Merge

Thu, 26 Sep 2013 12:18:21 +0200

author
tschatzl
date
Thu, 26 Sep 2013 12:18:21 +0200
changeset 5775
461159cd7a91
parent 5761
e8a0010ba69e
parent 5774
03f493ce3a71
child 5776
3da9fad1391e

Merge

src/share/vm/classfile/symbolTable.cpp file | annotate | diff | comparison | revisions
     1.1 --- a/make/excludeSrc.make	Wed Sep 25 13:03:21 2013 -0400
     1.2 +++ b/make/excludeSrc.make	Thu Sep 26 12:18:21 2013 +0200
     1.3 @@ -88,7 +88,7 @@
     1.4  	g1ErgoVerbose.cpp g1GCPhaseTimes.cpp g1HRPrinter.cpp g1HotCardCache.cpp g1Log.cpp \
     1.5  	g1MMUTracker.cpp g1MarkSweep.cpp g1MemoryPool.cpp g1MonitoringSupport.cpp \
     1.6  	g1RemSet.cpp g1RemSetSummary.cpp g1SATBCardTableModRefBS.cpp g1_globals.cpp heapRegion.cpp \
     1.7 -	heapRegionRemSet.cpp heapRegionSeq.cpp heapRegionSet.cpp heapRegionSets.cpp \
     1.8 +	g1BiasedArray.cpp heapRegionRemSet.cpp heapRegionSeq.cpp heapRegionSet.cpp heapRegionSets.cpp \
     1.9  	ptrQueue.cpp satbQueue.cpp sparsePRT.cpp survRateGroup.cpp vm_operations_g1.cpp \
    1.10  	adjoiningGenerations.cpp adjoiningVirtualSpaces.cpp asPSOldGen.cpp asPSYoungGen.cpp \
    1.11  	cardTableExtension.cpp gcTaskManager.cpp gcTaskThread.cpp objectStartArray.cpp \
     2.1 --- a/src/share/vm/classfile/symbolTable.cpp	Wed Sep 25 13:03:21 2013 -0400
     2.2 +++ b/src/share/vm/classfile/symbolTable.cpp	Thu Sep 26 12:18:21 2013 +0200
     2.3 @@ -341,7 +341,7 @@
     2.4  
     2.5  Symbol* SymbolTable::basic_add(int index_arg, u1 *name, int len,
     2.6                                 unsigned int hashValue_arg, bool c_heap, TRAPS) {
     2.7 -  assert(!Universe::heap()->is_in_reserved(name) || GC_locker::is_active(),
     2.8 +  assert(!Universe::heap()->is_in_reserved(name),
     2.9           "proposed name of symbol must be stable");
    2.10  
    2.11    // Don't allow symbols to be created which cannot fit in a Symbol*.
    2.12 @@ -685,7 +685,7 @@
    2.13    if (found_string != NULL) return found_string;
    2.14  
    2.15    debug_only(StableMemoryChecker smc(name, len * sizeof(name[0])));
    2.16 -  assert(!Universe::heap()->is_in_reserved(name) || GC_locker::is_active(),
    2.17 +  assert(!Universe::heap()->is_in_reserved(name),
    2.18           "proposed name of symbol must be stable");
    2.19  
    2.20    Handle string;
     3.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     3.2 +++ b/src/share/vm/gc_implementation/g1/g1BiasedArray.cpp	Thu Sep 26 12:18:21 2013 +0200
     3.3 @@ -0,0 +1,141 @@
     3.4 +/*
     3.5 + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
     3.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     3.7 + *
     3.8 + * This code is free software; you can redistribute it and/or modify it
     3.9 + * under the terms of the GNU General Public License version 2 only, as
    3.10 + * published by the Free Software Foundation.
    3.11 + *
    3.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
    3.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    3.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    3.15 + * version 2 for more details (a copy is included in the LICENSE file that
    3.16 + * accompanied this code).
    3.17 + *
    3.18 + * You should have received a copy of the GNU General Public License version
    3.19 + * 2 along with this work; if not, write to the Free Software Foundation,
    3.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    3.21 + *
    3.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    3.23 + * or visit www.oracle.com if you need additional information or have any
    3.24 + * questions.
    3.25 + *
    3.26 + */
    3.27 +
    3.28 +#include "precompiled.hpp"
    3.29 +#include "gc_implementation/g1/g1BiasedArray.hpp"
    3.30 +
    3.31 +#ifndef PRODUCT
    3.32 +void G1BiasedMappedArrayBase::verify_index(idx_t index) const {
    3.33 +  guarantee(_base != NULL, "Array not initialized");
    3.34 +  guarantee(index < length(), err_msg("Index out of bounds index: "SIZE_FORMAT" length: "SIZE_FORMAT, index, length()));
    3.35 +}
    3.36 +
    3.37 +void G1BiasedMappedArrayBase::verify_biased_index(idx_t biased_index) const {
    3.38 +  guarantee(_biased_base != NULL, "Array not initialized");
    3.39 +  guarantee(biased_index >= bias() && biased_index < (bias() + length()),
    3.40 +    err_msg("Biased index out of bounds, index: "SIZE_FORMAT" bias: "SIZE_FORMAT" length: "SIZE_FORMAT, biased_index, bias(), length()));
    3.41 +}
    3.42 +
    3.43 +void G1BiasedMappedArrayBase::verify_biased_index_inclusive_end(idx_t biased_index) const {
    3.44 +  guarantee(_biased_base != NULL, "Array not initialized");
    3.45 +  guarantee(biased_index >= bias() && biased_index <= (bias() + length()),
    3.46 +    err_msg("Biased index out of inclusive bounds, index: "SIZE_FORMAT" bias: "SIZE_FORMAT" length: "SIZE_FORMAT, biased_index, bias(), length()));
    3.47 +}
    3.48 +
    3.49 +class TestMappedArray : public G1BiasedMappedArray<int> {
    3.50 +protected:
    3.51 +  virtual int default_value() const { return 0xBAADBABE; }
    3.52 +public:
    3.53 +  static void test_biasedarray() {
    3.54 +    const size_t REGION_SIZE_IN_WORDS = 512;
    3.55 +    const size_t NUM_REGIONS = 20;
    3.56 +    HeapWord* fake_heap = (HeapWord*)LP64_ONLY(0xBAAA00000) NOT_LP64(0xBA000000); // Any value that is non-zero
    3.57 +
    3.58 +    TestMappedArray array;
    3.59 +    array.initialize(fake_heap, fake_heap + REGION_SIZE_IN_WORDS * NUM_REGIONS,
    3.60 +            REGION_SIZE_IN_WORDS * HeapWordSize);
    3.61 +    // Check address calculation (bounds)
    3.62 +    assert(array.bottom_address_mapped() == fake_heap,
    3.63 +      err_msg("bottom mapped address should be "PTR_FORMAT", but is "PTR_FORMAT, fake_heap, array.bottom_address_mapped()));
    3.64 +    assert(array.end_address_mapped() == (fake_heap + REGION_SIZE_IN_WORDS * NUM_REGIONS), "must be");
    3.65 +
    3.66 +    int* bottom = array.address_mapped_to(fake_heap);
    3.67 +    assert((void*)bottom == (void*) array.base(), "must be");
    3.68 +    int* end = array.address_mapped_to(fake_heap + REGION_SIZE_IN_WORDS * NUM_REGIONS);
    3.69 +    assert((void*)end == (void*)(array.base() + array.length()), "must be");
    3.70 +    // The entire array should contain default value elements
    3.71 +    for (int* current = bottom; current < end; current++) {
    3.72 +      assert(*current == array.default_value(), "must be");
    3.73 +    }
    3.74 +
    3.75 +    // Test setting values in the table
    3.76 +
    3.77 +    HeapWord* region_start_address = fake_heap + REGION_SIZE_IN_WORDS * (NUM_REGIONS / 2);
    3.78 +    HeapWord* region_end_address = fake_heap + (REGION_SIZE_IN_WORDS * (NUM_REGIONS / 2) + REGION_SIZE_IN_WORDS - 1);
    3.79 +
    3.80 +    // Set/get by address tests: invert some value; first retrieve one
    3.81 +    int actual_value = array.get_by_index(NUM_REGIONS / 2);
    3.82 +    array.set_by_index(NUM_REGIONS / 2, ~actual_value);
    3.83 +    // Get the same value by address, should correspond to the start of the "region"
    3.84 +    int value = array.get_by_address(region_start_address);
    3.85 +    assert(value == ~actual_value, "must be");
    3.86 +    // Get the same value by address, at one HeapWord before the start
    3.87 +    value = array.get_by_address(region_start_address - 1);
    3.88 +    assert(value == array.default_value(), "must be");
    3.89 +    // Get the same value by address, at the end of the "region"
    3.90 +    value = array.get_by_address(region_end_address);
    3.91 +    assert(value == ~actual_value, "must be");
    3.92 +    // Make sure the next value maps to another index
    3.93 +    value = array.get_by_address(region_end_address + 1);
    3.94 +    assert(value == array.default_value(), "must be");
    3.95 +
    3.96 +    // Reset the value in the array
    3.97 +    array.set_by_address(region_start_address + (region_end_address - region_start_address) / 2, actual_value);
    3.98 +
    3.99 +    // The entire array should have the default value again
   3.100 +    for (int* current = bottom; current < end; current++) {
   3.101 +      assert(*current == array.default_value(), "must be");
   3.102 +    }
   3.103 +
   3.104 +    // Set/get by index tests: invert some value
   3.105 +    idx_t index = NUM_REGIONS / 2;
   3.106 +    actual_value = array.get_by_index(index);
   3.107 +    array.set_by_index(index, ~actual_value);
   3.108 +
   3.109 +    value = array.get_by_index(index);
   3.110 +    assert(value == ~actual_value, "must be");
   3.111 +
   3.112 +    value = array.get_by_index(index - 1);
   3.113 +    assert(value == array.default_value(), "must be");
   3.114 +
   3.115 +    value = array.get_by_index(index + 1);
   3.116 +    assert(value == array.default_value(), "must be");
   3.117 +
   3.118 +    array.set_by_index(0, 0);
   3.119 +    value = array.get_by_index(0);
   3.120 +    assert(value == 0, "must be");
   3.121 +
   3.122 +    array.set_by_index(array.length() - 1, 0);
   3.123 +    value = array.get_by_index(array.length() - 1);
   3.124 +    assert(value == 0, "must be");
   3.125 +
   3.126 +    array.set_by_index(index, 0);
   3.127 +
   3.128 +    // The array should have three zeros, and default values otherwise
   3.129 +    size_t num_zeros = 0;
   3.130 +    for (int* current = bottom; current < end; current++) {
   3.131 +      assert(*current == array.default_value() || *current == 0, "must be");
   3.132 +      if (*current == 0) {
   3.133 +        num_zeros++;
   3.134 +      }
   3.135 +    }
   3.136 +    assert(num_zeros == 3, "must be");
   3.137 +  }
   3.138 +};
   3.139 +
   3.140 +void TestG1BiasedArray_test() {
   3.141 +  TestMappedArray::test_biasedarray();
   3.142 +}
   3.143 +
   3.144 +#endif
     4.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     4.2 +++ b/src/share/vm/gc_implementation/g1/g1BiasedArray.hpp	Thu Sep 26 12:18:21 2013 +0200
     4.3 @@ -0,0 +1,181 @@
     4.4 +/*
     4.5 + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
     4.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4.7 + *
     4.8 + * This code is free software; you can redistribute it and/or modify it
     4.9 + * under the terms of the GNU General Public License version 2 only, as
    4.10 + * published by the Free Software Foundation.
    4.11 + *
    4.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
    4.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    4.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    4.15 + * version 2 for more details (a copy is included in the LICENSE file that
    4.16 + * accompanied this code).
    4.17 + *
    4.18 + * You should have received a copy of the GNU General Public License version
    4.19 + * 2 along with this work; if not, write to the Free Software Foundation,
    4.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    4.21 + *
    4.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    4.23 + * or visit www.oracle.com if you need additional information or have any
    4.24 + * questions.
    4.25 + *
    4.26 + */
    4.27 +
    4.28 +#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1BIASEDARRAY_HPP
    4.29 +#define SHARE_VM_GC_IMPLEMENTATION_G1_G1BIASEDARRAY_HPP
    4.30 +
    4.31 +#include "utilities/debug.hpp"
    4.32 +#include "memory/allocation.inline.hpp"
    4.33 +
    4.34 +// Implements the common base functionality for arrays that contain provisions
    4.35 +// for accessing its elements using a biased index.
    4.36 +// The element type is defined by the instantiating the template.
    4.37 +class G1BiasedMappedArrayBase VALUE_OBJ_CLASS_SPEC {
    4.38 +  friend class VMStructs;
    4.39 +public:
    4.40 +  typedef size_t idx_t;
    4.41 +protected:
    4.42 +  address _base;          // the real base address
    4.43 +  size_t _length;         // the length of the array
    4.44 +  address _biased_base;   // base address biased by "bias" elements
    4.45 +  size_t _bias;           // the bias, i.e. the offset biased_base is located to the right in elements
    4.46 +  uint _shift_by;         // the amount of bits to shift right when mapping to an index of the array.
    4.47 +
    4.48 +protected:
    4.49 +
    4.50 +  G1BiasedMappedArrayBase() : _base(NULL), _length(0), _biased_base(NULL),
    4.51 +    _bias(0), _shift_by(0) { }
    4.52 +
    4.53 +  // Allocate a new array, generic version.
    4.54 +  static address create_new_base_array(size_t length, size_t elem_size) {
    4.55 +    assert(length > 0, "just checking");
    4.56 +    assert(elem_size > 0, "just checking");
    4.57 +    return NEW_C_HEAP_ARRAY(u_char, length * elem_size, mtGC);
    4.58 +  }
    4.59 +
    4.60 +  // Initialize the members of this class. The biased start address of this array
    4.61 +  // is the bias (in elements) multiplied by the element size.
    4.62 +  void initialize_base(address base, size_t length, size_t bias, size_t elem_size, uint shift_by) {
    4.63 +    assert(base != NULL, "just checking");
    4.64 +    assert(length > 0, "just checking");
    4.65 +    assert(shift_by < sizeof(uintptr_t) * 8, err_msg("Shifting by %zd, larger than word size?", shift_by));
    4.66 +    _base = base;
    4.67 +    _length = length;
    4.68 +    _biased_base = base - (bias * elem_size);
    4.69 +    _bias = bias;
    4.70 +    _shift_by = shift_by;
    4.71 +  }
    4.72 +
    4.73 +  // Allocate and initialize this array to cover the heap addresses in the range
    4.74 +  // of [bottom, end).
    4.75 +  void initialize(HeapWord* bottom, HeapWord* end, size_t target_elem_size_in_bytes, size_t mapping_granularity_in_bytes) {
    4.76 +    assert(mapping_granularity_in_bytes > 0, "just checking");
    4.77 +    assert(is_power_of_2(mapping_granularity_in_bytes),
    4.78 +      err_msg("mapping granularity must be power of 2, is %zd", mapping_granularity_in_bytes));
    4.79 +    assert((uintptr_t)bottom % mapping_granularity_in_bytes == 0,
    4.80 +      err_msg("bottom mapping area address must be a multiple of mapping granularity %zd, is "PTR_FORMAT,
    4.81 +        mapping_granularity_in_bytes, bottom));
    4.82 +    assert((uintptr_t)end % mapping_granularity_in_bytes == 0,
    4.83 +      err_msg("end mapping area address must be a multiple of mapping granularity %zd, is "PTR_FORMAT,
    4.84 +        mapping_granularity_in_bytes, end));
    4.85 +    size_t num_target_elems = (end - bottom) / (mapping_granularity_in_bytes / HeapWordSize);
    4.86 +    idx_t bias = (uintptr_t)bottom / mapping_granularity_in_bytes;
    4.87 +    address base = create_new_base_array(num_target_elems, target_elem_size_in_bytes);
    4.88 +    initialize_base(base, num_target_elems, bias, target_elem_size_in_bytes, log2_intptr(mapping_granularity_in_bytes));
    4.89 +  }
    4.90 +
    4.91 +  size_t bias() const { return _bias; }
    4.92 +  uint shift_by() const { return _shift_by; }
    4.93 +
    4.94 +  void verify_index(idx_t index) const PRODUCT_RETURN;
    4.95 +  void verify_biased_index(idx_t biased_index) const PRODUCT_RETURN;
    4.96 +  void verify_biased_index_inclusive_end(idx_t biased_index) const PRODUCT_RETURN;
    4.97 +
    4.98 +public:
    4.99 +   // Return the length of the array in elements.
   4.100 +   size_t length() const { return _length; }
   4.101 +};
   4.102 +
   4.103 +// Array that provides biased access and mapping from (valid) addresses in the
   4.104 +// heap into this array.
   4.105 +template<class T>
   4.106 +class G1BiasedMappedArray : public G1BiasedMappedArrayBase {
   4.107 +public:
   4.108 +  typedef G1BiasedMappedArrayBase::idx_t idx_t;
   4.109 +
   4.110 +  T* base() const { return (T*)G1BiasedMappedArrayBase::_base; }
   4.111 +  // Return the element of the given array at the given index. Assume
   4.112 +  // the index is valid. This is a convenience method that does sanity
   4.113 +  // checking on the index.
   4.114 +  T get_by_index(idx_t index) const {
   4.115 +    verify_index(index);
   4.116 +    return this->base()[index];
   4.117 +  }
   4.118 +
   4.119 +  // Set the element of the given array at the given index to the
   4.120 +  // given value. Assume the index is valid. This is a convenience
   4.121 +  // method that does sanity checking on the index.
   4.122 +  void set_by_index(idx_t index, T value) {
   4.123 +    verify_index(index);
   4.124 +    this->base()[index] = value;
   4.125 +  }
   4.126 +
   4.127 +  // The raw biased base pointer.
   4.128 +  T* biased_base() const { return (T*)G1BiasedMappedArrayBase::_biased_base; }
   4.129 +
   4.130 +  // Return the element of the given array that covers the given word in the
   4.131 +  // heap. Assumes the index is valid.
   4.132 +  T get_by_address(HeapWord* value) const {
   4.133 +    idx_t biased_index = ((uintptr_t)value) >> this->shift_by();
   4.134 +    this->verify_biased_index(biased_index);
   4.135 +    return biased_base()[biased_index];
   4.136 +  }
   4.137 +
   4.138 +  // Set the value of the array entry that corresponds to the given array.
   4.139 +  void set_by_address(HeapWord * address, T value) {
   4.140 +    idx_t biased_index = ((uintptr_t)address) >> this->shift_by();
   4.141 +    this->verify_biased_index(biased_index);
   4.142 +    biased_base()[biased_index] = value;
   4.143 +  }
   4.144 +
   4.145 +protected:
   4.146 +  // Returns the address of the element the given address maps to
   4.147 +  T* address_mapped_to(HeapWord* address) {
   4.148 +    idx_t biased_index = ((uintptr_t)address) >> this->shift_by();
   4.149 +    this->verify_biased_index_inclusive_end(biased_index);
   4.150 +    return biased_base() + biased_index;
   4.151 +  }
   4.152 +
   4.153 +public:
   4.154 +  // Return the smallest address (inclusive) in the heap that this array covers.
   4.155 +  HeapWord* bottom_address_mapped() const {
   4.156 +    return (HeapWord*) ((uintptr_t)this->bias() << this->shift_by());
   4.157 +  }
   4.158 +
   4.159 +  // Return the highest address (exclusive) in the heap that this array covers.
   4.160 +  HeapWord* end_address_mapped() const {
   4.161 +    return (HeapWord*) ((uintptr_t)(this->bias() + this->length()) << this->shift_by());
   4.162 +  }
   4.163 +
   4.164 +protected:
   4.165 +  virtual T default_value() const = 0;
   4.166 +  // Set all elements of the given array to the given value.
   4.167 +  void clear() {
   4.168 +    T value = default_value();
   4.169 +    for (idx_t i = 0; i < length(); i++) {
   4.170 +      set_by_index(i, value);
   4.171 +    }
   4.172 +  }
   4.173 +public:
   4.174 +  G1BiasedMappedArray() {}
   4.175 +
   4.176 +  // Allocate and initialize this array to cover the heap addresses in the range
   4.177 +  // of [bottom, end).
   4.178 +  void initialize(HeapWord* bottom, HeapWord* end, size_t mapping_granularity) {
   4.179 +    G1BiasedMappedArrayBase::initialize(bottom, end, sizeof(T), mapping_granularity);
   4.180 +    this->clear();
   4.181 +  }
   4.182 +};
   4.183 +
   4.184 +#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1BIASEDARRAY_HPP
     5.1 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Wed Sep 25 13:03:21 2013 -0400
     5.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Thu Sep 26 12:18:21 2013 +0200
     5.3 @@ -2069,8 +2069,10 @@
     5.4    _g1_storage.initialize(g1_rs, 0);
     5.5    _g1_committed = MemRegion((HeapWord*)_g1_storage.low(), (size_t) 0);
     5.6    _hrs.initialize((HeapWord*) _g1_reserved.start(),
     5.7 -                  (HeapWord*) _g1_reserved.end(),
     5.8 -                  _expansion_regions);
     5.9 +                  (HeapWord*) _g1_reserved.end());
    5.10 +  assert(_hrs.max_length() == _expansion_regions,
    5.11 +         err_msg("max length: %u expansion regions: %u",
    5.12 +                 _hrs.max_length(), _expansion_regions));
    5.13  
    5.14    // Do later initialization work for concurrent refinement.
    5.15    _cg1r->init();
     6.1 --- a/src/share/vm/gc_implementation/g1/heapRegionSeq.cpp	Wed Sep 25 13:03:21 2013 -0400
     6.2 +++ b/src/share/vm/gc_implementation/g1/heapRegionSeq.cpp	Thu Sep 26 12:18:21 2013 +0200
     6.3 @@ -71,27 +71,16 @@
     6.4  
     6.5  // Public
     6.6  
     6.7 -void HeapRegionSeq::initialize(HeapWord* bottom, HeapWord* end,
     6.8 -                               uint max_length) {
     6.9 +void HeapRegionSeq::initialize(HeapWord* bottom, HeapWord* end) {
    6.10    assert((uintptr_t) bottom % HeapRegion::GrainBytes == 0,
    6.11           "bottom should be heap region aligned");
    6.12    assert((uintptr_t) end % HeapRegion::GrainBytes == 0,
    6.13           "end should be heap region aligned");
    6.14  
    6.15 -  _length = 0;
    6.16 -  _heap_bottom = bottom;
    6.17 -  _heap_end = end;
    6.18 -  _region_shift = HeapRegion::LogOfHRGrainBytes;
    6.19    _next_search_index = 0;
    6.20    _allocated_length = 0;
    6.21 -  _max_length = max_length;
    6.22  
    6.23 -  _regions = NEW_C_HEAP_ARRAY(HeapRegion*, max_length, mtGC);
    6.24 -  memset(_regions, 0, (size_t) max_length * sizeof(HeapRegion*));
    6.25 -  _regions_biased = _regions - ((uintx) bottom >> _region_shift);
    6.26 -
    6.27 -  assert(&_regions[0] == &_regions_biased[addr_to_index_biased(bottom)],
    6.28 -         "bottom should be included in the region with index 0");
    6.29 +  _regions.initialize(bottom, end, HeapRegion::GrainBytes);
    6.30  }
    6.31  
    6.32  MemRegion HeapRegionSeq::expand_by(HeapWord* old_end,
    6.33 @@ -101,15 +90,15 @@
    6.34    G1CollectedHeap* g1h = G1CollectedHeap::heap();
    6.35  
    6.36    HeapWord* next_bottom = old_end;
    6.37 -  assert(_heap_bottom <= next_bottom, "invariant");
    6.38 +  assert(heap_bottom() <= next_bottom, "invariant");
    6.39    while (next_bottom < new_end) {
    6.40 -    assert(next_bottom < _heap_end, "invariant");
    6.41 +    assert(next_bottom < heap_end(), "invariant");
    6.42      uint index = length();
    6.43  
    6.44 -    assert(index < _max_length, "otherwise we cannot expand further");
    6.45 +    assert(index < max_length(), "otherwise we cannot expand further");
    6.46      if (index == 0) {
    6.47        // We have not allocated any regions so far
    6.48 -      assert(next_bottom == _heap_bottom, "invariant");
    6.49 +      assert(next_bottom == heap_bottom(), "invariant");
    6.50      } else {
    6.51        // next_bottom should match the end of the last/previous region
    6.52        assert(next_bottom == at(index - 1)->end(), "invariant");
    6.53 @@ -122,8 +111,8 @@
    6.54          // allocation failed, we bail out and return what we have done so far
    6.55          return MemRegion(old_end, next_bottom);
    6.56        }
    6.57 -      assert(_regions[index] == NULL, "invariant");
    6.58 -      _regions[index] = new_hr;
    6.59 +      assert(_regions.get_by_index(index) == NULL, "invariant");
    6.60 +      _regions.set_by_index(index, new_hr);
    6.61        increment_allocated_length();
    6.62      }
    6.63      // Have to increment the length first, otherwise we will get an
    6.64 @@ -228,26 +217,26 @@
    6.65  
    6.66  #ifndef PRODUCT
    6.67  void HeapRegionSeq::verify_optional() {
    6.68 -  guarantee(_length <= _allocated_length,
    6.69 +  guarantee(length() <= _allocated_length,
    6.70              err_msg("invariant: _length: %u _allocated_length: %u",
    6.71 -                    _length, _allocated_length));
    6.72 -  guarantee(_allocated_length <= _max_length,
    6.73 +                    length(), _allocated_length));
    6.74 +  guarantee(_allocated_length <= max_length(),
    6.75              err_msg("invariant: _allocated_length: %u _max_length: %u",
    6.76 -                    _allocated_length, _max_length));
    6.77 -  guarantee(_next_search_index <= _length,
    6.78 +                    _allocated_length, max_length()));
    6.79 +  guarantee(_next_search_index <= length(),
    6.80              err_msg("invariant: _next_search_index: %u _length: %u",
    6.81 -                    _next_search_index, _length));
    6.82 +                    _next_search_index, length()));
    6.83  
    6.84 -  HeapWord* prev_end = _heap_bottom;
    6.85 +  HeapWord* prev_end = heap_bottom();
    6.86    for (uint i = 0; i < _allocated_length; i += 1) {
    6.87 -    HeapRegion* hr = _regions[i];
    6.88 +    HeapRegion* hr = _regions.get_by_index(i);
    6.89      guarantee(hr != NULL, err_msg("invariant: i: %u", i));
    6.90      guarantee(hr->bottom() == prev_end,
    6.91                err_msg("invariant i: %u "HR_FORMAT" prev_end: "PTR_FORMAT,
    6.92                        i, HR_FORMAT_PARAMS(hr), prev_end));
    6.93      guarantee(hr->hrs_index() == i,
    6.94                err_msg("invariant: i: %u hrs_index(): %u", i, hr->hrs_index()));
    6.95 -    if (i < _length) {
    6.96 +    if (i < length()) {
    6.97        // Asserts will fire if i is >= _length
    6.98        HeapWord* addr = hr->bottom();
    6.99        guarantee(addr_to_region(addr) == hr, "sanity");
   6.100 @@ -265,8 +254,8 @@
   6.101        prev_end = hr->end();
   6.102      }
   6.103    }
   6.104 -  for (uint i = _allocated_length; i < _max_length; i += 1) {
   6.105 -    guarantee(_regions[i] == NULL, err_msg("invariant i: %u", i));
   6.106 +  for (uint i = _allocated_length; i < max_length(); i += 1) {
   6.107 +    guarantee(_regions.get_by_index(i) == NULL, err_msg("invariant i: %u", i));
   6.108    }
   6.109  }
   6.110  #endif // PRODUCT
     7.1 --- a/src/share/vm/gc_implementation/g1/heapRegionSeq.hpp	Wed Sep 25 13:03:21 2013 -0400
     7.2 +++ b/src/share/vm/gc_implementation/g1/heapRegionSeq.hpp	Thu Sep 26 12:18:21 2013 +0200
     7.3 @@ -25,10 +25,17 @@
     7.4  #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_HPP
     7.5  #define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_HPP
     7.6  
     7.7 +#include "gc_implementation/g1/g1BiasedArray.hpp"
     7.8 +
     7.9  class HeapRegion;
    7.10  class HeapRegionClosure;
    7.11  class FreeRegionList;
    7.12  
    7.13 +class G1HeapRegionTable : public G1BiasedMappedArray<HeapRegion*> {
    7.14 + protected:
    7.15 +   virtual HeapRegion* default_value() const { return NULL; }
    7.16 +};
    7.17 +
    7.18  // This class keeps track of the region metadata (i.e., HeapRegion
    7.19  // instances). They are kept in the _regions array in address
    7.20  // order. A region's index in the array corresponds to its index in
    7.21 @@ -44,35 +51,21 @@
    7.22  //
    7.23  // We keep track of three lengths:
    7.24  //
    7.25 -// * _length (returned by length()) is the number of currently
    7.26 +// * _committed_length (returned by length()) is the number of currently
    7.27  //   committed regions.
    7.28  // * _allocated_length (not exposed outside this class) is the
    7.29  //   number of regions for which we have HeapRegions.
    7.30 -// * _max_length (returned by max_length()) is the maximum number of
    7.31 -//   regions the heap can have.
    7.32 +// * max_length() returns the maximum number of regions the heap can have.
    7.33  //
    7.34 -// and maintain that: _length <= _allocated_length <= _max_length
    7.35 +// and maintain that: _committed_length <= _allocated_length <= max_length()
    7.36  
    7.37  class HeapRegionSeq: public CHeapObj<mtGC> {
    7.38    friend class VMStructs;
    7.39  
    7.40 -  // The array that holds the HeapRegions.
    7.41 -  HeapRegion** _regions;
    7.42 -
    7.43 -  // Version of _regions biased to address 0
    7.44 -  HeapRegion** _regions_biased;
    7.45 +  G1HeapRegionTable _regions;
    7.46  
    7.47    // The number of regions committed in the heap.
    7.48 -  uint _length;
    7.49 -
    7.50 -  // The address of the first reserved word in the heap.
    7.51 -  HeapWord* _heap_bottom;
    7.52 -
    7.53 -  // The address of the last reserved word in the heap - 1.
    7.54 -  HeapWord* _heap_end;
    7.55 -
    7.56 -  // The log of the region byte size.
    7.57 -  uint _region_shift;
    7.58 +  uint _committed_length;
    7.59  
    7.60    // A hint for which index to start searching from for humongous
    7.61    // allocations.
    7.62 @@ -81,37 +74,33 @@
    7.63    // The number of regions for which we have allocated HeapRegions for.
    7.64    uint _allocated_length;
    7.65  
    7.66 -  // The maximum number of regions in the heap.
    7.67 -  uint _max_length;
    7.68 -
    7.69    // Find a contiguous set of empty regions of length num, starting
    7.70    // from the given index.
    7.71    uint find_contiguous_from(uint from, uint num);
    7.72  
    7.73 -  // Map a heap address to a biased region index. Assume that the
    7.74 -  // address is valid.
    7.75 -  inline uintx addr_to_index_biased(HeapWord* addr) const;
    7.76 -
    7.77    void increment_allocated_length() {
    7.78 -    assert(_allocated_length < _max_length, "pre-condition");
    7.79 +    assert(_allocated_length < max_length(), "pre-condition");
    7.80      _allocated_length++;
    7.81    }
    7.82  
    7.83    void increment_length() {
    7.84 -    assert(_length < _max_length, "pre-condition");
    7.85 -    _length++;
    7.86 +    assert(length() < max_length(), "pre-condition");
    7.87 +    _committed_length++;
    7.88    }
    7.89  
    7.90    void decrement_length() {
    7.91 -    assert(_length > 0, "pre-condition");
    7.92 -    _length--;
    7.93 +    assert(length() > 0, "pre-condition");
    7.94 +    _committed_length--;
    7.95    }
    7.96  
    7.97 +  HeapWord* heap_bottom() const { return _regions.bottom_address_mapped(); }
    7.98 +  HeapWord* heap_end() const {return _regions.end_address_mapped(); }
    7.99 +
   7.100   public:
   7.101    // Empty contructor, we'll initialize it with the initialize() method.
   7.102 -  HeapRegionSeq() { }
   7.103 +  HeapRegionSeq() : _regions(), _committed_length(0), _next_search_index(0), _allocated_length(0) { }
   7.104  
   7.105 -  void initialize(HeapWord* bottom, HeapWord* end, uint max_length);
   7.106 +  void initialize(HeapWord* bottom, HeapWord* end);
   7.107  
   7.108    // Return the HeapRegion at the given index. Assume that the index
   7.109    // is valid.
   7.110 @@ -126,10 +115,10 @@
   7.111    inline HeapRegion* addr_to_region_unsafe(HeapWord* addr) const;
   7.112  
   7.113    // Return the number of regions that have been committed in the heap.
   7.114 -  uint length() const { return _length; }
   7.115 +  uint length() const { return _committed_length; }
   7.116  
   7.117    // Return the maximum number of regions in the heap.
   7.118 -  uint max_length() const { return _max_length; }
   7.119 +  uint max_length() const { return (uint)_regions.length(); }
   7.120  
   7.121    // Expand the sequence to reflect that the heap has grown from
   7.122    // old_end to new_end. Either create new HeapRegions, or re-use
     8.1 --- a/src/share/vm/gc_implementation/g1/heapRegionSeq.inline.hpp	Wed Sep 25 13:03:21 2013 -0400
     8.2 +++ b/src/share/vm/gc_implementation/g1/heapRegionSeq.inline.hpp	Thu Sep 26 12:18:21 2013 +0200
     8.3 @@ -28,28 +28,16 @@
     8.4  #include "gc_implementation/g1/heapRegion.hpp"
     8.5  #include "gc_implementation/g1/heapRegionSeq.hpp"
     8.6  
     8.7 -inline uintx HeapRegionSeq::addr_to_index_biased(HeapWord* addr) const {
     8.8 -  assert(_heap_bottom <= addr && addr < _heap_end,
     8.9 -         err_msg("addr: "PTR_FORMAT" bottom: "PTR_FORMAT" end: "PTR_FORMAT,
    8.10 -                 addr, _heap_bottom, _heap_end));
    8.11 -  uintx index = (uintx) addr >> _region_shift;
    8.12 -  return index;
    8.13 -}
    8.14 -
    8.15  inline HeapRegion* HeapRegionSeq::addr_to_region_unsafe(HeapWord* addr) const {
    8.16 -  assert(_heap_bottom <= addr && addr < _heap_end,
    8.17 -         err_msg("addr: "PTR_FORMAT" bottom: "PTR_FORMAT" end: "PTR_FORMAT,
    8.18 -                 addr, _heap_bottom, _heap_end));
    8.19 -  uintx index_biased = addr_to_index_biased(addr);
    8.20 -  HeapRegion* hr = _regions_biased[index_biased];
    8.21 +  HeapRegion* hr = _regions.get_by_address(addr);
    8.22    assert(hr != NULL, "invariant");
    8.23    return hr;
    8.24  }
    8.25  
    8.26  inline HeapRegion* HeapRegionSeq::addr_to_region(HeapWord* addr) const {
    8.27 -  if (addr != NULL && addr < _heap_end) {
    8.28 -    assert(addr >= _heap_bottom,
    8.29 -          err_msg("addr: "PTR_FORMAT" bottom: "PTR_FORMAT, addr, _heap_bottom));
    8.30 +  if (addr != NULL && addr < heap_end()) {
    8.31 +    assert(addr >= heap_bottom(),
    8.32 +          err_msg("addr: "PTR_FORMAT" bottom: "PTR_FORMAT, addr, heap_bottom()));
    8.33      return addr_to_region_unsafe(addr);
    8.34    }
    8.35    return NULL;
    8.36 @@ -57,7 +45,7 @@
    8.37  
    8.38  inline HeapRegion* HeapRegionSeq::at(uint index) const {
    8.39    assert(index < length(), "pre-condition");
    8.40 -  HeapRegion* hr = _regions[index];
    8.41 +  HeapRegion* hr = _regions.get_by_index(index);
    8.42    assert(hr != NULL, "sanity");
    8.43    assert(hr->hrs_index() == index, "sanity");
    8.44    return hr;
     9.1 --- a/src/share/vm/gc_implementation/g1/vmStructs_g1.hpp	Wed Sep 25 13:03:21 2013 -0400
     9.2 +++ b/src/share/vm/gc_implementation/g1/vmStructs_g1.hpp	Thu Sep 26 12:18:21 2013 +0200
     9.3 @@ -34,8 +34,14 @@
     9.4    static_field(HeapRegion, GrainBytes,        size_t)                         \
     9.5    static_field(HeapRegion, LogOfHRGrainBytes, int)                            \
     9.6                                                                                \
     9.7 -  nonstatic_field(HeapRegionSeq,   _regions, HeapRegion**)                    \
     9.8 -  nonstatic_field(HeapRegionSeq,   _length,  uint)                            \
     9.9 +  nonstatic_field(G1HeapRegionTable, _base,             address)              \
    9.10 +  nonstatic_field(G1HeapRegionTable, _length,           size_t)               \
    9.11 +  nonstatic_field(G1HeapRegionTable, _biased_base,      address)              \
    9.12 +  nonstatic_field(G1HeapRegionTable, _bias,             size_t)               \
    9.13 +  nonstatic_field(G1HeapRegionTable, _shift_by,         uint)                 \
    9.14 +                                                                              \
    9.15 +  nonstatic_field(HeapRegionSeq,   _regions,            G1HeapRegionTable)    \
    9.16 +  nonstatic_field(HeapRegionSeq,   _committed_length,   uint)                 \
    9.17                                                                                \
    9.18    nonstatic_field(G1CollectedHeap, _hrs,                HeapRegionSeq)        \
    9.19    nonstatic_field(G1CollectedHeap, _g1_committed,       MemRegion)            \
    9.20 @@ -58,6 +64,8 @@
    9.21  
    9.22  #define VM_TYPES_G1(declare_type, declare_toplevel_type)                      \
    9.23                                                                                \
    9.24 +  declare_toplevel_type(G1HeapRegionTable)                                    \
    9.25 +                                                                              \
    9.26    declare_type(G1CollectedHeap, SharedHeap)                                   \
    9.27                                                                                \
    9.28    declare_type(HeapRegion, ContiguousSpace)                                   \
    10.1 --- a/src/share/vm/memory/gcLocker.cpp	Wed Sep 25 13:03:21 2013 -0400
    10.2 +++ b/src/share/vm/memory/gcLocker.cpp	Thu Sep 26 12:18:21 2013 +0200
    10.3 @@ -122,7 +122,7 @@
    10.4      // strictly needed. It's added here to make it clear that
    10.5      // the GC will NOT be performed if any other caller
    10.6      // of GC_locker::lock() still needs GC locked.
    10.7 -    if (!is_active()) {
    10.8 +    if (!is_active_internal()) {
    10.9        _doing_gc = true;
   10.10        {
   10.11          // Must give up the lock while at a safepoint
    11.1 --- a/src/share/vm/memory/gcLocker.hpp	Wed Sep 25 13:03:21 2013 -0400
    11.2 +++ b/src/share/vm/memory/gcLocker.hpp	Thu Sep 26 12:18:21 2013 +0200
    11.3 @@ -88,7 +88,7 @@
    11.4   public:
    11.5    // Accessors
    11.6    static bool is_active() {
    11.7 -    assert(_needs_gc || SafepointSynchronize::is_at_safepoint(), "only read at safepoint");
    11.8 +    assert(SafepointSynchronize::is_at_safepoint(), "only read at safepoint");
    11.9      return is_active_internal();
   11.10    }
   11.11    static bool needs_gc()       { return _needs_gc;                        }
    12.1 --- a/src/share/vm/memory/metaspace.cpp	Wed Sep 25 13:03:21 2013 -0400
    12.2 +++ b/src/share/vm/memory/metaspace.cpp	Thu Sep 26 12:18:21 2013 +0200
    12.3 @@ -23,6 +23,7 @@
    12.4   */
    12.5  #include "precompiled.hpp"
    12.6  #include "gc_interface/collectedHeap.hpp"
    12.7 +#include "memory/allocation.hpp"
    12.8  #include "memory/binaryTreeDictionary.hpp"
    12.9  #include "memory/freeList.hpp"
   12.10  #include "memory/collectorPolicy.hpp"
   12.11 @@ -111,7 +112,7 @@
   12.12  // Has three lists of free chunks, and a total size and
   12.13  // count that includes all three
   12.14  
   12.15 -class ChunkManager VALUE_OBJ_CLASS_SPEC {
   12.16 +class ChunkManager : public CHeapObj<mtInternal> {
   12.17  
   12.18    // Free list of chunks of different sizes.
   12.19    //   SpecializedChunk
   12.20 @@ -158,7 +159,12 @@
   12.21  
   12.22   public:
   12.23  
   12.24 -  ChunkManager() : _free_chunks_total(0), _free_chunks_count(0) {}
   12.25 +  ChunkManager(size_t specialized_size, size_t small_size, size_t medium_size)
   12.26 +      : _free_chunks_total(0), _free_chunks_count(0) {
   12.27 +    _free_chunks[SpecializedIndex].set_size(specialized_size);
   12.28 +    _free_chunks[SmallIndex].set_size(small_size);
   12.29 +    _free_chunks[MediumIndex].set_size(medium_size);
   12.30 +  }
   12.31  
   12.32    // add or delete (return) a chunk to the global freelist.
   12.33    Metachunk* chunk_freelist_allocate(size_t word_size);
   12.34 @@ -219,7 +225,7 @@
   12.35    void locked_print_free_chunks(outputStream* st);
   12.36    void locked_print_sum_free_chunks(outputStream* st);
   12.37  
   12.38 -  void print_on(outputStream* st);
   12.39 +  void print_on(outputStream* st) const;
   12.40  };
   12.41  
   12.42  // Used to manage the free list of Metablocks (a block corresponds
   12.43 @@ -276,11 +282,6 @@
   12.44    // VirtualSpace
   12.45    Metachunk* first_chunk() { return (Metachunk*) bottom(); }
   12.46  
   12.47 -  void inc_container_count();
   12.48 -#ifdef ASSERT
   12.49 -  uint container_count_slow();
   12.50 -#endif
   12.51 -
   12.52   public:
   12.53  
   12.54    VirtualSpaceNode(size_t byte_size);
   12.55 @@ -314,8 +315,10 @@
   12.56    void inc_top(size_t word_size) { _top += word_size; }
   12.57  
   12.58    uintx container_count() { return _container_count; }
   12.59 +  void inc_container_count();
   12.60    void dec_container_count();
   12.61  #ifdef ASSERT
   12.62 +  uint container_count_slow();
   12.63    void verify_container_count();
   12.64  #endif
   12.65  
   12.66 @@ -421,8 +424,6 @@
   12.67    VirtualSpaceNode* _virtual_space_list;
   12.68    // virtual space currently being used for allocations
   12.69    VirtualSpaceNode* _current_virtual_space;
   12.70 -  // Free chunk list for all other metadata
   12.71 -  ChunkManager      _chunk_manager;
   12.72  
   12.73    // Can this virtual list allocate >1 spaces?  Also, used to determine
   12.74    // whether to allocate unlimited small chunks in this virtual space
   12.75 @@ -475,7 +476,6 @@
   12.76      return _current_virtual_space;
   12.77    }
   12.78  
   12.79 -  ChunkManager* chunk_manager() { return &_chunk_manager; }
   12.80    bool is_class() const { return _is_class; }
   12.81  
   12.82    // Allocate the first virtualspace.
   12.83 @@ -494,14 +494,7 @@
   12.84    void dec_virtual_space_count();
   12.85  
   12.86    // Unlink empty VirtualSpaceNodes and free it.
   12.87 -  void purge();
   12.88 -
   12.89 -  // Used and capacity in the entire list of virtual spaces.
   12.90 -  // These are global values shared by all Metaspaces
   12.91 -  size_t capacity_words_sum();
   12.92 -  size_t capacity_bytes_sum() { return capacity_words_sum() * BytesPerWord; }
   12.93 -  size_t used_words_sum();
   12.94 -  size_t used_bytes_sum() { return used_words_sum() * BytesPerWord; }
   12.95 +  void purge(ChunkManager* chunk_manager);
   12.96  
   12.97    bool contains(const void *ptr);
   12.98  
   12.99 @@ -582,18 +575,12 @@
  12.100    // Type of metadata allocated.
  12.101    Metaspace::MetadataType _mdtype;
  12.102  
  12.103 -  // Chunk related size
  12.104 -  size_t _medium_chunk_bunch;
  12.105 -
  12.106    // List of chunks in use by this SpaceManager.  Allocations
  12.107    // are done from the current chunk.  The list is used for deallocating
  12.108    // chunks when the SpaceManager is freed.
  12.109    Metachunk* _chunks_in_use[NumberOfInUseLists];
  12.110    Metachunk* _current_chunk;
  12.111  
  12.112 -  // Virtual space where allocation comes from.
  12.113 -  VirtualSpaceList* _vs_list;
  12.114 -
  12.115    // Number of small chunks to allocate to a manager
  12.116    // If class space manager, small chunks are unlimited
  12.117    static uint const _small_chunk_limit;
  12.118 @@ -626,7 +613,9 @@
  12.119    }
  12.120  
  12.121    Metaspace::MetadataType mdtype() { return _mdtype; }
  12.122 -  VirtualSpaceList* vs_list() const    { return _vs_list; }
  12.123 +
  12.124 +  VirtualSpaceList* vs_list()   const { return Metaspace::get_space_list(_mdtype); }
  12.125 +  ChunkManager* chunk_manager() const { return Metaspace::get_chunk_manager(_mdtype); }
  12.126  
  12.127    Metachunk* current_chunk() const { return _current_chunk; }
  12.128    void set_current_chunk(Metachunk* v) {
  12.129 @@ -648,18 +637,19 @@
  12.130  
  12.131   public:
  12.132    SpaceManager(Metaspace::MetadataType mdtype,
  12.133 -               Mutex* lock,
  12.134 -               VirtualSpaceList* vs_list);
  12.135 +               Mutex* lock);
  12.136    ~SpaceManager();
  12.137  
  12.138    enum ChunkMultiples {
  12.139      MediumChunkMultiple = 4
  12.140    };
  12.141  
  12.142 +  bool is_class() { return _mdtype == Metaspace::ClassType; }
  12.143 +
  12.144    // Accessors
  12.145    size_t specialized_chunk_size() { return SpecializedChunk; }
  12.146 -  size_t small_chunk_size() { return (size_t) vs_list()->is_class() ? ClassSmallChunk : SmallChunk; }
  12.147 -  size_t medium_chunk_size() { return (size_t) vs_list()->is_class() ? ClassMediumChunk : MediumChunk; }
  12.148 +  size_t small_chunk_size() { return (size_t) is_class() ? ClassSmallChunk : SmallChunk; }
  12.149 +  size_t medium_chunk_size() { return (size_t) is_class() ? ClassMediumChunk : MediumChunk; }
  12.150    size_t medium_chunk_bunch() { return medium_chunk_size() * MediumChunkMultiple; }
  12.151  
  12.152    size_t allocated_blocks_words() const { return _allocated_blocks_words; }
  12.153 @@ -762,7 +752,7 @@
  12.154    _container_count++;
  12.155    assert(_container_count == container_count_slow(),
  12.156           err_msg("Inconsistency in countainer_count _container_count " SIZE_FORMAT
  12.157 -                 "container_count_slow() " SIZE_FORMAT,
  12.158 +                 " container_count_slow() " SIZE_FORMAT,
  12.159                   _container_count, container_count_slow()));
  12.160  }
  12.161  
  12.162 @@ -775,7 +765,7 @@
  12.163  void VirtualSpaceNode::verify_container_count() {
  12.164    assert(_container_count == container_count_slow(),
  12.165      err_msg("Inconsistency in countainer_count _container_count " SIZE_FORMAT
  12.166 -            "container_count_slow() " SIZE_FORMAT, _container_count, container_count_slow()));
  12.167 +            " container_count_slow() " SIZE_FORMAT, _container_count, container_count_slow()));
  12.168  }
  12.169  #endif
  12.170  
  12.171 @@ -1020,7 +1010,7 @@
  12.172  // Walk the list of VirtualSpaceNodes and delete
  12.173  // nodes with a 0 container_count.  Remove Metachunks in
  12.174  // the node from their respective freelists.
  12.175 -void VirtualSpaceList::purge() {
  12.176 +void VirtualSpaceList::purge(ChunkManager* chunk_manager) {
  12.177    assert_lock_strong(SpaceManager::expand_lock());
  12.178    // Don't use a VirtualSpaceListIterator because this
  12.179    // list is being changed and a straightforward use of an iterator is not safe.
  12.180 @@ -1042,7 +1032,7 @@
  12.181          prev_vsl->set_next(vsl->next());
  12.182        }
  12.183  
  12.184 -      vsl->purge(chunk_manager());
  12.185 +      vsl->purge(chunk_manager);
  12.186        dec_reserved_words(vsl->reserved_words());
  12.187        dec_committed_words(vsl->committed_words());
  12.188        dec_virtual_space_count();
  12.189 @@ -1064,36 +1054,6 @@
  12.190  #endif
  12.191  }
  12.192  
  12.193 -size_t VirtualSpaceList::used_words_sum() {
  12.194 -  size_t allocated_by_vs = 0;
  12.195 -  VirtualSpaceListIterator iter(virtual_space_list());
  12.196 -  while (iter.repeat()) {
  12.197 -    VirtualSpaceNode* vsl = iter.get_next();
  12.198 -    // Sum used region [bottom, top) in each virtualspace
  12.199 -    allocated_by_vs += vsl->used_words_in_vs();
  12.200 -  }
  12.201 -  assert(allocated_by_vs >= chunk_manager()->free_chunks_total_words(),
  12.202 -    err_msg("Total in free chunks " SIZE_FORMAT
  12.203 -            " greater than total from virtual_spaces " SIZE_FORMAT,
  12.204 -            allocated_by_vs, chunk_manager()->free_chunks_total_words()));
  12.205 -  size_t used =
  12.206 -    allocated_by_vs - chunk_manager()->free_chunks_total_words();
  12.207 -  return used;
  12.208 -}
  12.209 -
  12.210 -// Space available in all MetadataVirtualspaces allocated
  12.211 -// for metadata.  This is the upper limit on the capacity
  12.212 -// of chunks allocated out of all the MetadataVirtualspaces.
  12.213 -size_t VirtualSpaceList::capacity_words_sum() {
  12.214 -  size_t capacity = 0;
  12.215 -  VirtualSpaceListIterator iter(virtual_space_list());
  12.216 -  while (iter.repeat()) {
  12.217 -    VirtualSpaceNode* vsl = iter.get_next();
  12.218 -    capacity += vsl->capacity_words_in_vs();
  12.219 -  }
  12.220 -  return capacity;
  12.221 -}
  12.222 -
  12.223  VirtualSpaceList::VirtualSpaceList(size_t word_size ) :
  12.224                                     _is_class(false),
  12.225                                     _virtual_space_list(NULL),
  12.226 @@ -1104,10 +1064,6 @@
  12.227    MutexLockerEx cl(SpaceManager::expand_lock(),
  12.228                     Mutex::_no_safepoint_check_flag);
  12.229    bool initialization_succeeded = grow_vs(word_size);
  12.230 -
  12.231 -  _chunk_manager.free_chunks(SpecializedIndex)->set_size(SpecializedChunk);
  12.232 -  _chunk_manager.free_chunks(SmallIndex)->set_size(SmallChunk);
  12.233 -  _chunk_manager.free_chunks(MediumIndex)->set_size(MediumChunk);
  12.234    assert(initialization_succeeded,
  12.235      " VirtualSpaceList initialization should not fail");
  12.236  }
  12.237 @@ -1123,9 +1079,6 @@
  12.238                     Mutex::_no_safepoint_check_flag);
  12.239    VirtualSpaceNode* class_entry = new VirtualSpaceNode(rs);
  12.240    bool succeeded = class_entry->initialize();
  12.241 -  _chunk_manager.free_chunks(SpecializedIndex)->set_size(SpecializedChunk);
  12.242 -  _chunk_manager.free_chunks(SmallIndex)->set_size(ClassSmallChunk);
  12.243 -  _chunk_manager.free_chunks(MediumIndex)->set_size(ClassMediumChunk);
  12.244    assert(succeeded, " VirtualSpaceList initialization should not fail");
  12.245    link_vs(class_entry);
  12.246  }
  12.247 @@ -1142,7 +1095,7 @@
  12.248    }
  12.249    // Reserve the space
  12.250    size_t vs_byte_size = vs_word_size * BytesPerWord;
  12.251 -  assert(vs_byte_size % os::vm_page_size() == 0, "Not aligned");
  12.252 +  assert(vs_byte_size % os::vm_allocation_granularity() == 0, "Not aligned");
  12.253  
  12.254    // Allocate the meta virtual space and initialize it.
  12.255    VirtualSpaceNode* new_entry = new VirtualSpaceNode(vs_byte_size);
  12.256 @@ -1195,15 +1148,8 @@
  12.257                                             size_t grow_chunks_by_words,
  12.258                                             size_t medium_chunk_bunch) {
  12.259  
  12.260 -  // Get a chunk from the chunk freelist
  12.261 -  Metachunk* next = chunk_manager()->chunk_freelist_allocate(grow_chunks_by_words);
  12.262 -
  12.263 -  if (next != NULL) {
  12.264 -    next->container()->inc_container_count();
  12.265 -  } else {
  12.266 -    // Allocate a chunk out of the current virtual space.
  12.267 -    next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
  12.268 -  }
  12.269 +  // Allocate a chunk out of the current virtual space.
  12.270 +  Metachunk* next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
  12.271  
  12.272    if (next == NULL) {
  12.273      // Not enough room in current virtual space.  Try to commit
  12.274 @@ -1221,12 +1167,14 @@
  12.275        // being used for CompressedHeaders, don't allocate a new virtualspace.
  12.276        if (can_grow() && MetaspaceGC::should_expand(this, word_size)) {
  12.277          // Get another virtual space.
  12.278 -          size_t grow_vs_words =
  12.279 -            MAX2((size_t)VirtualSpaceSize, aligned_expand_vs_by_words);
  12.280 +        size_t allocation_aligned_expand_words =
  12.281 +            align_size_up(aligned_expand_vs_by_words, os::vm_allocation_granularity() / BytesPerWord);
  12.282 +        size_t grow_vs_words =
  12.283 +            MAX2((size_t)VirtualSpaceSize, allocation_aligned_expand_words);
  12.284          if (grow_vs(grow_vs_words)) {
  12.285            // Got it.  It's on the list now.  Get a chunk from it.
  12.286            assert(current_virtual_space()->expanded_words() == 0,
  12.287 -              "New virtuals space nodes should not have expanded");
  12.288 +              "New virtual space nodes should not have expanded");
  12.289  
  12.290            size_t grow_chunks_by_words_aligned = align_size_up(grow_chunks_by_words,
  12.291                                                                page_size_words);
  12.292 @@ -1342,8 +1290,9 @@
  12.293    // reserved space, because this is a larger space prereserved for compressed
  12.294    // class pointers.
  12.295    if (!FLAG_IS_DEFAULT(MaxMetaspaceSize)) {
  12.296 -    size_t real_allocated = Metaspace::space_list()->reserved_words() +
  12.297 -              MetaspaceAux::allocated_capacity_bytes(Metaspace::ClassType);
  12.298 +    size_t nonclass_allocated = MetaspaceAux::reserved_bytes(Metaspace::NonClassType);
  12.299 +    size_t class_allocated    = MetaspaceAux::allocated_capacity_bytes(Metaspace::ClassType);
  12.300 +    size_t real_allocated     = nonclass_allocated + class_allocated;
  12.301      if (real_allocated >= MaxMetaspaceSize) {
  12.302        return false;
  12.303      }
  12.304 @@ -1536,15 +1485,15 @@
  12.305        if (dummy_chunk == NULL) {
  12.306          break;
  12.307        }
  12.308 -      vsl->chunk_manager()->chunk_freelist_deallocate(dummy_chunk);
  12.309 +      sm->chunk_manager()->chunk_freelist_deallocate(dummy_chunk);
  12.310  
  12.311        if (TraceMetadataChunkAllocation && Verbose) {
  12.312          gclog_or_tty->print("Metadebug::deallocate_chunk_a_lot: %d) ",
  12.313                                 sm->sum_count_in_chunks_in_use());
  12.314          dummy_chunk->print_on(gclog_or_tty);
  12.315          gclog_or_tty->print_cr("  Free chunks total %d  count %d",
  12.316 -                               vsl->chunk_manager()->free_chunks_total_words(),
  12.317 -                               vsl->chunk_manager()->free_chunks_count());
  12.318 +                               sm->chunk_manager()->free_chunks_total_words(),
  12.319 +                               sm->chunk_manager()->free_chunks_count());
  12.320        }
  12.321      }
  12.322    } else {
  12.323 @@ -1796,6 +1745,8 @@
  12.324    // work.
  12.325    chunk->set_is_free(false);
  12.326  #endif
  12.327 +  chunk->container()->inc_container_count();
  12.328 +
  12.329    slow_locked_verify();
  12.330    return chunk;
  12.331  }
  12.332 @@ -1830,9 +1781,9 @@
  12.333    return chunk;
  12.334  }
  12.335  
  12.336 -void ChunkManager::print_on(outputStream* out) {
  12.337 +void ChunkManager::print_on(outputStream* out) const {
  12.338    if (PrintFLSStatistics != 0) {
  12.339 -    humongous_dictionary()->report_statistics();
  12.340 +    const_cast<ChunkManager *>(this)->humongous_dictionary()->report_statistics();
  12.341    }
  12.342  }
  12.343  
  12.344 @@ -1979,8 +1930,8 @@
  12.345      }
  12.346    }
  12.347  
  12.348 -  vs_list()->chunk_manager()->locked_print_free_chunks(st);
  12.349 -  vs_list()->chunk_manager()->locked_print_sum_free_chunks(st);
  12.350 +  chunk_manager()->locked_print_free_chunks(st);
  12.351 +  chunk_manager()->locked_print_sum_free_chunks(st);
  12.352  }
  12.353  
  12.354  size_t SpaceManager::calc_chunk_size(size_t word_size) {
  12.355 @@ -2084,9 +2035,7 @@
  12.356  }
  12.357  
  12.358  SpaceManager::SpaceManager(Metaspace::MetadataType mdtype,
  12.359 -                           Mutex* lock,
  12.360 -                           VirtualSpaceList* vs_list) :
  12.361 -  _vs_list(vs_list),
  12.362 +                           Mutex* lock) :
  12.363    _mdtype(mdtype),
  12.364    _allocated_blocks_words(0),
  12.365    _allocated_chunks_words(0),
  12.366 @@ -2172,9 +2121,7 @@
  12.367    MutexLockerEx fcl(SpaceManager::expand_lock(),
  12.368                      Mutex::_no_safepoint_check_flag);
  12.369  
  12.370 -  ChunkManager* chunk_manager = vs_list()->chunk_manager();
  12.371 -
  12.372 -  chunk_manager->slow_locked_verify();
  12.373 +  chunk_manager()->slow_locked_verify();
  12.374  
  12.375    dec_total_from_size_metrics();
  12.376  
  12.377 @@ -2188,8 +2135,8 @@
  12.378  
  12.379    // Have to update before the chunks_in_use lists are emptied
  12.380    // below.
  12.381 -  chunk_manager->inc_free_chunks_total(allocated_chunks_words(),
  12.382 -                                       sum_count_in_chunks_in_use());
  12.383 +  chunk_manager()->inc_free_chunks_total(allocated_chunks_words(),
  12.384 +                                         sum_count_in_chunks_in_use());
  12.385  
  12.386    // Add all the chunks in use by this space manager
  12.387    // to the global list of free chunks.
  12.388 @@ -2204,11 +2151,11 @@
  12.389                               chunk_size_name(i));
  12.390      }
  12.391      Metachunk* chunks = chunks_in_use(i);
  12.392 -    chunk_manager->return_chunks(i, chunks);
  12.393 +    chunk_manager()->return_chunks(i, chunks);
  12.394      set_chunks_in_use(i, NULL);
  12.395      if (TraceMetadataChunkAllocation && Verbose) {
  12.396        gclog_or_tty->print_cr("updated freelist count %d %s",
  12.397 -                             chunk_manager->free_chunks(i)->count(),
  12.398 +                             chunk_manager()->free_chunks(i)->count(),
  12.399                               chunk_size_name(i));
  12.400      }
  12.401      assert(i != HumongousIndex, "Humongous chunks are handled explicitly later");
  12.402 @@ -2245,16 +2192,16 @@
  12.403                     humongous_chunks->word_size(), HumongousChunkGranularity));
  12.404      Metachunk* next_humongous_chunks = humongous_chunks->next();
  12.405      humongous_chunks->container()->dec_container_count();
  12.406 -    chunk_manager->humongous_dictionary()->return_chunk(humongous_chunks);
  12.407 +    chunk_manager()->humongous_dictionary()->return_chunk(humongous_chunks);
  12.408      humongous_chunks = next_humongous_chunks;
  12.409    }
  12.410    if (TraceMetadataChunkAllocation && Verbose) {
  12.411      gclog_or_tty->print_cr("");
  12.412      gclog_or_tty->print_cr("updated dictionary count %d %s",
  12.413 -                     chunk_manager->humongous_dictionary()->total_count(),
  12.414 +                     chunk_manager()->humongous_dictionary()->total_count(),
  12.415                       chunk_size_name(HumongousIndex));
  12.416    }
  12.417 -  chunk_manager->slow_locked_verify();
  12.418 +  chunk_manager()->slow_locked_verify();
  12.419  }
  12.420  
  12.421  const char* SpaceManager::chunk_size_name(ChunkIndex index) const {
  12.422 @@ -2343,9 +2290,7 @@
  12.423      gclog_or_tty->print("SpaceManager::add_chunk: %d) ",
  12.424                          sum_count_in_chunks_in_use());
  12.425      new_chunk->print_on(gclog_or_tty);
  12.426 -    if (vs_list() != NULL) {
  12.427 -      vs_list()->chunk_manager()->locked_print_free_chunks(gclog_or_tty);
  12.428 -    }
  12.429 +    chunk_manager()->locked_print_free_chunks(gclog_or_tty);
  12.430    }
  12.431  }
  12.432  
  12.433 @@ -2361,10 +2306,14 @@
  12.434  
  12.435  Metachunk* SpaceManager::get_new_chunk(size_t word_size,
  12.436                                         size_t grow_chunks_by_words) {
  12.437 -
  12.438 -  Metachunk* next = vs_list()->get_new_chunk(word_size,
  12.439 -                                             grow_chunks_by_words,
  12.440 -                                             medium_chunk_bunch());
  12.441 +  // Get a chunk from the chunk freelist
  12.442 +  Metachunk* next = chunk_manager()->chunk_freelist_allocate(grow_chunks_by_words);
  12.443 +
  12.444 +  if (next == NULL) {
  12.445 +    next = vs_list()->get_new_chunk(word_size,
  12.446 +                                    grow_chunks_by_words,
  12.447 +                                    medium_chunk_bunch());
  12.448 +  }
  12.449  
  12.450    if (TraceMetadataHumongousAllocation && next != NULL &&
  12.451        SpaceManager::is_humongous(next->word_size())) {
  12.452 @@ -2644,13 +2593,12 @@
  12.453  size_t MetaspaceAux::min_chunk_size_words() { return Metaspace::first_chunk_word_size(); }
  12.454  
  12.455  size_t MetaspaceAux::free_chunks_total_words(Metaspace::MetadataType mdtype) {
  12.456 -  VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
  12.457 -  if (list == NULL) {
  12.458 +  ChunkManager* chunk_manager = Metaspace::get_chunk_manager(mdtype);
  12.459 +  if (chunk_manager == NULL) {
  12.460      return 0;
  12.461    }
  12.462 -  ChunkManager* chunk = list->chunk_manager();
  12.463 -  chunk->slow_verify();
  12.464 -  return chunk->free_chunks_total_words();
  12.465 +  chunk_manager->slow_verify();
  12.466 +  return chunk_manager->free_chunks_total_words();
  12.467  }
  12.468  
  12.469  size_t MetaspaceAux::free_chunks_total_bytes(Metaspace::MetadataType mdtype) {
  12.470 @@ -2801,9 +2749,9 @@
  12.471  }
  12.472  
  12.473  void MetaspaceAux::verify_free_chunks() {
  12.474 -  Metaspace::space_list()->chunk_manager()->verify();
  12.475 +  Metaspace::chunk_manager_metadata()->verify();
  12.476    if (Metaspace::using_class_space()) {
  12.477 -    Metaspace::class_space_list()->chunk_manager()->verify();
  12.478 +    Metaspace::chunk_manager_class()->verify();
  12.479    }
  12.480  }
  12.481  
  12.482 @@ -2874,6 +2822,9 @@
  12.483  VirtualSpaceList* Metaspace::_space_list = NULL;
  12.484  VirtualSpaceList* Metaspace::_class_space_list = NULL;
  12.485  
  12.486 +ChunkManager* Metaspace::_chunk_manager_metadata = NULL;
  12.487 +ChunkManager* Metaspace::_chunk_manager_class = NULL;
  12.488 +
  12.489  #define VIRTUALSPACEMULTIPLIER 2
  12.490  
  12.491  #ifdef _LP64
  12.492 @@ -2981,6 +2932,7 @@
  12.493           err_msg(SIZE_FORMAT " != " UINTX_FORMAT, rs.size(), CompressedClassSpaceSize));
  12.494    assert(using_class_space(), "Must be using class space");
  12.495    _class_space_list = new VirtualSpaceList(rs);
  12.496 +  _chunk_manager_class = new ChunkManager(SpecializedChunk, ClassSmallChunk, ClassMediumChunk);
  12.497  }
  12.498  
  12.499  #endif
  12.500 @@ -3006,6 +2958,7 @@
  12.501      // remainder is the misc code and data chunks.
  12.502      cds_total = FileMapInfo::shared_spaces_size();
  12.503      _space_list = new VirtualSpaceList(cds_total/wordSize);
  12.504 +    _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk);
  12.505  
  12.506  #ifdef _LP64
  12.507      // Set the compressed klass pointer base so that decoding of these pointers works
  12.508 @@ -3073,15 +3026,30 @@
  12.509      size_t word_size = VIRTUALSPACEMULTIPLIER * first_chunk_word_size();
  12.510      // Initialize the list of virtual spaces.
  12.511      _space_list = new VirtualSpaceList(word_size);
  12.512 +    _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk);
  12.513    }
  12.514  }
  12.515  
  12.516 +Metachunk* Metaspace::get_initialization_chunk(MetadataType mdtype,
  12.517 +                                               size_t chunk_word_size,
  12.518 +                                               size_t chunk_bunch) {
  12.519 +  // Get a chunk from the chunk freelist
  12.520 +  Metachunk* chunk = get_chunk_manager(mdtype)->chunk_freelist_allocate(chunk_word_size);
  12.521 +  if (chunk != NULL) {
  12.522 +    return chunk;
  12.523 +  }
  12.524 +
  12.525 +  return get_space_list(mdtype)->get_initialization_chunk(chunk_word_size, chunk_bunch);
  12.526 +}
  12.527 +
  12.528  void Metaspace::initialize(Mutex* lock, MetaspaceType type) {
  12.529  
  12.530    assert(space_list() != NULL,
  12.531      "Metadata VirtualSpaceList has not been initialized");
  12.532 -
  12.533 -  _vsm = new SpaceManager(NonClassType, lock, space_list());
  12.534 +  assert(chunk_manager_metadata() != NULL,
  12.535 +    "Metadata ChunkManager has not been initialized");
  12.536 +
  12.537 +  _vsm = new SpaceManager(NonClassType, lock);
  12.538    if (_vsm == NULL) {
  12.539      return;
  12.540    }
  12.541 @@ -3090,11 +3058,13 @@
  12.542    vsm()->get_initial_chunk_sizes(type, &word_size, &class_word_size);
  12.543  
  12.544    if (using_class_space()) {
  12.545 -    assert(class_space_list() != NULL,
  12.546 -      "Class VirtualSpaceList has not been initialized");
  12.547 +  assert(class_space_list() != NULL,
  12.548 +    "Class VirtualSpaceList has not been initialized");
  12.549 +  assert(chunk_manager_class() != NULL,
  12.550 +    "Class ChunkManager has not been initialized");
  12.551  
  12.552      // Allocate SpaceManager for classes.
  12.553 -    _class_vsm = new SpaceManager(ClassType, lock, class_space_list());
  12.554 +    _class_vsm = new SpaceManager(ClassType, lock);
  12.555      if (_class_vsm == NULL) {
  12.556        return;
  12.557      }
  12.558 @@ -3103,9 +3073,9 @@
  12.559    MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
  12.560  
  12.561    // Allocate chunk for metadata objects
  12.562 -  Metachunk* new_chunk =
  12.563 -     space_list()->get_initialization_chunk(word_size,
  12.564 -                                            vsm()->medium_chunk_bunch());
  12.565 +  Metachunk* new_chunk = get_initialization_chunk(NonClassType,
  12.566 +                                                  word_size,
  12.567 +                                                  vsm()->medium_chunk_bunch());
  12.568    assert(!DumpSharedSpaces || new_chunk != NULL, "should have enough space for both chunks");
  12.569    if (new_chunk != NULL) {
  12.570      // Add to this manager's list of chunks in use and current_chunk().
  12.571 @@ -3114,9 +3084,9 @@
  12.572  
  12.573    // Allocate chunk for class metadata objects
  12.574    if (using_class_space()) {
  12.575 -    Metachunk* class_chunk =
  12.576 -       class_space_list()->get_initialization_chunk(class_word_size,
  12.577 -                                                    class_vsm()->medium_chunk_bunch());
  12.578 +    Metachunk* class_chunk = get_initialization_chunk(ClassType,
  12.579 +                                                      class_word_size,
  12.580 +                                                      class_vsm()->medium_chunk_bunch());
  12.581      if (class_chunk != NULL) {
  12.582        class_vsm()->add_chunk(class_chunk, true);
  12.583      }
  12.584 @@ -3333,12 +3303,16 @@
  12.585    }
  12.586  }
  12.587  
  12.588 +void Metaspace::purge(MetadataType mdtype) {
  12.589 +  get_space_list(mdtype)->purge(get_chunk_manager(mdtype));
  12.590 +}
  12.591 +
  12.592  void Metaspace::purge() {
  12.593    MutexLockerEx cl(SpaceManager::expand_lock(),
  12.594                     Mutex::_no_safepoint_check_flag);
  12.595 -  space_list()->purge();
  12.596 +  purge(NonClassType);
  12.597    if (using_class_space()) {
  12.598 -    class_space_list()->purge();
  12.599 +    purge(ClassType);
  12.600    }
  12.601  }
  12.602  
  12.603 @@ -3385,7 +3359,7 @@
  12.604  
  12.605  #ifndef PRODUCT
  12.606  
  12.607 -class MetaspaceAuxTest : AllStatic {
  12.608 +class TestMetaspaceAuxTest : AllStatic {
  12.609   public:
  12.610    static void test_reserved() {
  12.611      size_t reserved = MetaspaceAux::reserved_bytes();
  12.612 @@ -3425,14 +3399,25 @@
  12.613      }
  12.614    }
  12.615  
  12.616 +  static void test_virtual_space_list_large_chunk() {
  12.617 +    VirtualSpaceList* vs_list = new VirtualSpaceList(os::vm_allocation_granularity());
  12.618 +    MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
  12.619 +    // A size larger than VirtualSpaceSize (256k) and add one page to make it _not_ be
  12.620 +    // vm_allocation_granularity aligned on Windows.
  12.621 +    size_t large_size = (size_t)(2*256*K + (os::vm_page_size()/BytesPerWord));
  12.622 +    large_size += (os::vm_page_size()/BytesPerWord);
  12.623 +    vs_list->get_new_chunk(large_size, large_size, 0);
  12.624 +  }
  12.625 +
  12.626    static void test() {
  12.627      test_reserved();
  12.628      test_committed();
  12.629 +    test_virtual_space_list_large_chunk();
  12.630    }
  12.631  };
  12.632  
  12.633 -void MetaspaceAux_test() {
  12.634 -  MetaspaceAuxTest::test();
  12.635 +void TestMetaspaceAux_test() {
  12.636 +  TestMetaspaceAuxTest::test();
  12.637  }
  12.638  
  12.639  #endif
    13.1 --- a/src/share/vm/memory/metaspace.hpp	Wed Sep 25 13:03:21 2013 -0400
    13.2 +++ b/src/share/vm/memory/metaspace.hpp	Thu Sep 26 12:18:21 2013 +0200
    13.3 @@ -56,12 +56,15 @@
    13.4  //                       +-------------------+
    13.5  //
    13.6  
    13.7 +class ChunkManager;
    13.8  class ClassLoaderData;
    13.9  class Metablock;
   13.10 +class Metachunk;
   13.11  class MetaWord;
   13.12  class Mutex;
   13.13  class outputStream;
   13.14  class SpaceManager;
   13.15 +class VirtualSpaceList;
   13.16  
   13.17  // Metaspaces each have a  SpaceManager and allocations
   13.18  // are done by the SpaceManager.  Allocations are done
   13.19 @@ -76,8 +79,6 @@
   13.20  // allocate() method returns a block for use as a
   13.21  // quantum of metadata.
   13.22  
   13.23 -class VirtualSpaceList;
   13.24 -
   13.25  class Metaspace : public CHeapObj<mtClass> {
   13.26    friend class VMStructs;
   13.27    friend class SpaceManager;
   13.28 @@ -102,6 +103,10 @@
   13.29   private:
   13.30    void initialize(Mutex* lock, MetaspaceType type);
   13.31  
   13.32 +  Metachunk* get_initialization_chunk(MetadataType mdtype,
   13.33 +                                      size_t chunk_word_size,
   13.34 +                                      size_t chunk_bunch);
   13.35 +
   13.36    // Align up the word size to the allocation word size
   13.37    static size_t align_word_size_up(size_t);
   13.38  
   13.39 @@ -134,6 +139,10 @@
   13.40    static VirtualSpaceList* _space_list;
   13.41    static VirtualSpaceList* _class_space_list;
   13.42  
   13.43 +  static ChunkManager* _chunk_manager_metadata;
   13.44 +  static ChunkManager* _chunk_manager_class;
   13.45 +
   13.46 + public:
   13.47    static VirtualSpaceList* space_list()       { return _space_list; }
   13.48    static VirtualSpaceList* class_space_list() { return _class_space_list; }
   13.49    static VirtualSpaceList* get_space_list(MetadataType mdtype) {
   13.50 @@ -141,6 +150,14 @@
   13.51      return mdtype == ClassType ? class_space_list() : space_list();
   13.52    }
   13.53  
   13.54 +  static ChunkManager* chunk_manager_metadata() { return _chunk_manager_metadata; }
   13.55 +  static ChunkManager* chunk_manager_class()    { return _chunk_manager_class; }
   13.56 +  static ChunkManager* get_chunk_manager(MetadataType mdtype) {
   13.57 +    assert(mdtype != MetadataTypeCount, "MetadaTypeCount can't be used as mdtype");
   13.58 +    return mdtype == ClassType ? chunk_manager_class() : chunk_manager_metadata();
   13.59 +  }
   13.60 +
   13.61 + private:
   13.62    // This is used by DumpSharedSpaces only, where only _vsm is used. So we will
   13.63    // maintain a single list for now.
   13.64    void record_allocation(void* ptr, MetaspaceObj::Type type, size_t word_size);
   13.65 @@ -199,6 +216,7 @@
   13.66    void dump(outputStream* const out) const;
   13.67  
   13.68    // Free empty virtualspaces
   13.69 +  static void purge(MetadataType mdtype);
   13.70    static void purge();
   13.71  
   13.72    void print_on(outputStream* st) const;
    14.1 --- a/src/share/vm/prims/jni.cpp	Wed Sep 25 13:03:21 2013 -0400
    14.2 +++ b/src/share/vm/prims/jni.cpp	Thu Sep 26 12:18:21 2013 +0200
    14.3 @@ -5046,7 +5046,10 @@
    14.4  void TestReservedSpace_test();
    14.5  void TestReserveMemorySpecial_test();
    14.6  void TestVirtualSpace_test();
    14.7 -void MetaspaceAux_test();
    14.8 +void TestMetaspaceAux_test();
    14.9 +#if INCLUDE_ALL_GCS
   14.10 +void TestG1BiasedArray_test();
   14.11 +#endif
   14.12  
   14.13  void execute_internal_vm_tests() {
   14.14    if (ExecuteInternalVMTests) {
   14.15 @@ -5054,7 +5057,7 @@
   14.16      run_unit_test(TestReservedSpace_test());
   14.17      run_unit_test(TestReserveMemorySpecial_test());
   14.18      run_unit_test(TestVirtualSpace_test());
   14.19 -    run_unit_test(MetaspaceAux_test());
   14.20 +    run_unit_test(TestMetaspaceAux_test());
   14.21      run_unit_test(GlobalDefinitions::test_globals());
   14.22      run_unit_test(GCTimerAllTest::all());
   14.23      run_unit_test(arrayOopDesc::test_max_array_length());
   14.24 @@ -5066,6 +5069,7 @@
   14.25      run_unit_test(VMStructs::test());
   14.26  #endif
   14.27  #if INCLUDE_ALL_GCS
   14.28 +    run_unit_test(TestG1BiasedArray_test());
   14.29      run_unit_test(HeapRegionRemSet::test_prt());
   14.30  #endif
   14.31      tty->print_cr("All internal VM tests passed");

mercurial