8024547: MaxMetaspaceSize should limit the committed memory used by the metaspaces

Mon, 07 Oct 2013 15:51:08 +0200

author
stefank
date
Mon, 07 Oct 2013 15:51:08 +0200
changeset 5863
85c1ca43713f
parent 5862
82af7d7a0128
child 5864
a6414751d537

8024547: MaxMetaspaceSize should limit the committed memory used by the metaspaces
Reviewed-by: brutisso, jmasa, coleenp

src/share/vm/gc_implementation/shared/vmGCOperations.hpp file | annotate | diff | comparison | revisions
src/share/vm/gc_interface/collectedHeap.cpp file | annotate | diff | comparison | revisions
src/share/vm/gc_interface/collectedHeap.hpp file | annotate | diff | comparison | revisions
src/share/vm/memory/collectorPolicy.cpp file | annotate | diff | comparison | revisions
src/share/vm/memory/filemap.hpp file | annotate | diff | comparison | revisions
src/share/vm/memory/metaspace.cpp file | annotate | diff | comparison | revisions
src/share/vm/memory/metaspace.hpp file | annotate | diff | comparison | revisions
src/share/vm/runtime/arguments.cpp file | annotate | diff | comparison | revisions
src/share/vm/runtime/globals.hpp file | annotate | diff | comparison | revisions
     1.1 --- a/src/share/vm/gc_implementation/shared/vmGCOperations.hpp	Wed Oct 09 10:57:01 2013 +0200
     1.2 +++ b/src/share/vm/gc_implementation/shared/vmGCOperations.hpp	Mon Oct 07 15:51:08 2013 +0200
     1.3 @@ -214,9 +214,6 @@
     1.4      : VM_GC_Operation(gc_count_before, gc_cause, full_gc_count_before, true),
     1.5        _loader_data(loader_data), _size(size), _mdtype(mdtype), _result(NULL) {
     1.6    }
     1.7 -  ~VM_CollectForMetadataAllocation()  {
     1.8 -    MetaspaceGC::set_expand_after_GC(false);
     1.9 -  }
    1.10    virtual VMOp_Type type() const { return VMOp_CollectForMetadataAllocation; }
    1.11    virtual void doit();
    1.12    MetaWord* result() const       { return _result; }
     2.1 --- a/src/share/vm/gc_interface/collectedHeap.cpp	Wed Oct 09 10:57:01 2013 +0200
     2.2 +++ b/src/share/vm/gc_interface/collectedHeap.cpp	Mon Oct 07 15:51:08 2013 +0200
     2.3 @@ -202,12 +202,6 @@
     2.4        ShouldNotReachHere(); // Unexpected use of this function
     2.5    }
     2.6  }
     2.7 -MetaWord* CollectedHeap::satisfy_failed_metadata_allocation(
     2.8 -                                              ClassLoaderData* loader_data,
     2.9 -                                              size_t size, Metaspace::MetadataType mdtype) {
    2.10 -  return collector_policy()->satisfy_failed_metadata_allocation(loader_data, size, mdtype);
    2.11 -}
    2.12 -
    2.13  
    2.14  void CollectedHeap::pre_initialize() {
    2.15    // Used for ReduceInitialCardMarks (when COMPILER2 is used);
     3.1 --- a/src/share/vm/gc_interface/collectedHeap.hpp	Wed Oct 09 10:57:01 2013 +0200
     3.2 +++ b/src/share/vm/gc_interface/collectedHeap.hpp	Mon Oct 07 15:51:08 2013 +0200
     3.3 @@ -475,11 +475,6 @@
     3.4    // the context of the vm thread.
     3.5    virtual void collect_as_vm_thread(GCCause::Cause cause);
     3.6  
     3.7 -  // Callback from VM_CollectForMetadataAllocation operation.
     3.8 -  MetaWord* satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
     3.9 -                                               size_t size,
    3.10 -                                               Metaspace::MetadataType mdtype);
    3.11 -
    3.12    // Returns the barrier set for this heap
    3.13    BarrierSet* barrier_set() { return _barrier_set; }
    3.14  
     4.1 --- a/src/share/vm/memory/collectorPolicy.cpp	Wed Oct 09 10:57:01 2013 +0200
     4.2 +++ b/src/share/vm/memory/collectorPolicy.cpp	Mon Oct 07 15:51:08 2013 +0200
     4.3 @@ -47,11 +47,6 @@
     4.4  
     4.5  // CollectorPolicy methods.
     4.6  
     4.7 -// Align down. If the aligning result in 0, return 'alignment'.
     4.8 -static size_t restricted_align_down(size_t size, size_t alignment) {
     4.9 -  return MAX2(alignment, align_size_down_(size, alignment));
    4.10 -}
    4.11 -
    4.12  void CollectorPolicy::initialize_flags() {
    4.13    assert(_max_alignment >= _min_alignment,
    4.14           err_msg("max_alignment: " SIZE_FORMAT " less than min_alignment: " SIZE_FORMAT,
    4.15 @@ -64,34 +59,7 @@
    4.16      vm_exit_during_initialization("Incompatible initial and maximum heap sizes specified");
    4.17    }
    4.18  
    4.19 -  // Do not use FLAG_SET_ERGO to update MaxMetaspaceSize, since this will
    4.20 -  // override if MaxMetaspaceSize was set on the command line or not.
    4.21 -  // This information is needed later to conform to the specification of the
    4.22 -  // java.lang.management.MemoryUsage API.
    4.23 -  //
    4.24 -  // Ideally, we would be able to set the default value of MaxMetaspaceSize in
    4.25 -  // globals.hpp to the aligned value, but this is not possible, since the
    4.26 -  // alignment depends on other flags being parsed.
    4.27 -  MaxMetaspaceSize = restricted_align_down(MaxMetaspaceSize, _max_alignment);
    4.28 -
    4.29 -  if (MetaspaceSize > MaxMetaspaceSize) {
    4.30 -    MetaspaceSize = MaxMetaspaceSize;
    4.31 -  }
    4.32 -
    4.33 -  MetaspaceSize = restricted_align_down(MetaspaceSize, _min_alignment);
    4.34 -
    4.35 -  assert(MetaspaceSize <= MaxMetaspaceSize, "Must be");
    4.36 -
    4.37 -  MinMetaspaceExpansion = restricted_align_down(MinMetaspaceExpansion, _min_alignment);
    4.38 -  MaxMetaspaceExpansion = restricted_align_down(MaxMetaspaceExpansion, _min_alignment);
    4.39 -
    4.40    MinHeapDeltaBytes = align_size_up(MinHeapDeltaBytes, _min_alignment);
    4.41 -
    4.42 -  assert(MetaspaceSize    % _min_alignment == 0, "metapace alignment");
    4.43 -  assert(MaxMetaspaceSize % _max_alignment == 0, "maximum metaspace alignment");
    4.44 -  if (MetaspaceSize < 256*K) {
    4.45 -    vm_exit_during_initialization("Too small initial Metaspace size");
    4.46 -  }
    4.47  }
    4.48  
    4.49  void CollectorPolicy::initialize_size_info() {
     5.1 --- a/src/share/vm/memory/filemap.hpp	Wed Oct 09 10:57:01 2013 +0200
     5.2 +++ b/src/share/vm/memory/filemap.hpp	Mon Oct 07 15:51:08 2013 +0200
     5.3 @@ -26,6 +26,7 @@
     5.4  #define SHARE_VM_MEMORY_FILEMAP_HPP
     5.5  
     5.6  #include "memory/metaspaceShared.hpp"
     5.7 +#include "memory/metaspace.hpp"
     5.8  
     5.9  // Layout of the file:
    5.10  //  header: dump of archive instance plus versioning info, datestamp, etc.
     6.1 --- a/src/share/vm/memory/metaspace.cpp	Wed Oct 09 10:57:01 2013 +0200
     6.2 +++ b/src/share/vm/memory/metaspace.cpp	Mon Oct 07 15:51:08 2013 +0200
     6.3 @@ -29,13 +29,16 @@
     6.4  #include "memory/collectorPolicy.hpp"
     6.5  #include "memory/filemap.hpp"
     6.6  #include "memory/freeList.hpp"
     6.7 +#include "memory/gcLocker.hpp"
     6.8  #include "memory/metablock.hpp"
     6.9  #include "memory/metachunk.hpp"
    6.10  #include "memory/metaspace.hpp"
    6.11  #include "memory/metaspaceShared.hpp"
    6.12  #include "memory/resourceArea.hpp"
    6.13  #include "memory/universe.hpp"
    6.14 +#include "runtime/atomic.inline.hpp"
    6.15  #include "runtime/globals.hpp"
    6.16 +#include "runtime/init.hpp"
    6.17  #include "runtime/java.hpp"
    6.18  #include "runtime/mutex.hpp"
    6.19  #include "runtime/orderAccess.hpp"
    6.20 @@ -84,13 +87,7 @@
    6.21    return (ChunkIndex) (i+1);
    6.22  }
    6.23  
    6.24 -// Originally _capacity_until_GC was set to MetaspaceSize here but
    6.25 -// the default MetaspaceSize before argument processing was being
    6.26 -// used which was not the desired value.  See the code
    6.27 -// in should_expand() to see how the initialization is handled
    6.28 -// now.
    6.29 -size_t MetaspaceGC::_capacity_until_GC = 0;
    6.30 -bool MetaspaceGC::_expand_after_GC = false;
    6.31 +volatile intptr_t MetaspaceGC::_capacity_until_GC = 0;
    6.32  uint MetaspaceGC::_shrink_factor = 0;
    6.33  bool MetaspaceGC::_should_concurrent_collect = false;
    6.34  
    6.35 @@ -293,9 +290,10 @@
    6.36    MetaWord* end() const { return (MetaWord*) _virtual_space.high(); }
    6.37  
    6.38    size_t reserved_words() const  { return _virtual_space.reserved_size() / BytesPerWord; }
    6.39 -  size_t expanded_words() const  { return _virtual_space.committed_size() / BytesPerWord; }
    6.40    size_t committed_words() const { return _virtual_space.actual_committed_size() / BytesPerWord; }
    6.41  
    6.42 +  bool is_pre_committed() const { return _virtual_space.special(); }
    6.43 +
    6.44    // address of next available space in _virtual_space;
    6.45    // Accessors
    6.46    VirtualSpaceNode* next() { return _next; }
    6.47 @@ -337,7 +335,7 @@
    6.48  
    6.49    // Expands/shrinks the committed space in a virtual space.  Delegates
    6.50    // to Virtualspace
    6.51 -  bool expand_by(size_t words, bool pre_touch = false);
    6.52 +  bool expand_by(size_t min_words, size_t preferred_words);
    6.53  
    6.54    // In preparation for deleting this node, remove all the chunks
    6.55    // in the node from any freelist.
    6.56 @@ -351,29 +349,64 @@
    6.57    void print_on(outputStream* st) const;
    6.58  };
    6.59  
    6.60 +#define assert_is_ptr_aligned(ptr, alignment) \
    6.61 +  assert(is_ptr_aligned(ptr, alignment),      \
    6.62 +    err_msg(PTR_FORMAT " is not aligned to "  \
    6.63 +      SIZE_FORMAT, ptr, alignment))
    6.64 +
    6.65 +#define assert_is_size_aligned(size, alignment) \
    6.66 +  assert(is_size_aligned(size, alignment),      \
    6.67 +    err_msg(SIZE_FORMAT " is not aligned to "   \
    6.68 +       SIZE_FORMAT, size, alignment))
    6.69 +
    6.70 +
    6.71 +// Decide if large pages should be committed when the memory is reserved.
    6.72 +static bool should_commit_large_pages_when_reserving(size_t bytes) {
    6.73 +  if (UseLargePages && UseLargePagesInMetaspace && !os::can_commit_large_page_memory()) {
    6.74 +    size_t words = bytes / BytesPerWord;
    6.75 +    bool is_class = false; // We never reserve large pages for the class space.
    6.76 +    if (MetaspaceGC::can_expand(words, is_class) &&
    6.77 +        MetaspaceGC::allowed_expansion() >= words) {
    6.78 +      return true;
    6.79 +    }
    6.80 +  }
    6.81 +
    6.82 +  return false;
    6.83 +}
    6.84 +
    6.85    // byte_size is the size of the associated virtualspace.
    6.86 -VirtualSpaceNode::VirtualSpaceNode(size_t byte_size) : _top(NULL), _next(NULL), _rs(), _container_count(0) {
    6.87 -  // align up to vm allocation granularity
    6.88 -  byte_size = align_size_up(byte_size, os::vm_allocation_granularity());
    6.89 +VirtualSpaceNode::VirtualSpaceNode(size_t bytes) : _top(NULL), _next(NULL), _rs(), _container_count(0) {
    6.90 +  assert_is_size_aligned(bytes, Metaspace::reserve_alignment());
    6.91  
    6.92    // This allocates memory with mmap.  For DumpSharedspaces, try to reserve
    6.93    // configurable address, generally at the top of the Java heap so other
    6.94    // memory addresses don't conflict.
    6.95    if (DumpSharedSpaces) {
    6.96 -    char* shared_base = (char*)SharedBaseAddress;
    6.97 -    _rs = ReservedSpace(byte_size, 0, false, shared_base, 0);
    6.98 +    bool large_pages = false; // No large pages when dumping the CDS archive.
    6.99 +    char* shared_base = (char*)align_ptr_up((char*)SharedBaseAddress, Metaspace::reserve_alignment());
   6.100 +
   6.101 +    _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages, shared_base, 0);
   6.102      if (_rs.is_reserved()) {
   6.103        assert(shared_base == 0 || _rs.base() == shared_base, "should match");
   6.104      } else {
   6.105        // Get a mmap region anywhere if the SharedBaseAddress fails.
   6.106 -      _rs = ReservedSpace(byte_size);
   6.107 +      _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
   6.108      }
   6.109      MetaspaceShared::set_shared_rs(&_rs);
   6.110    } else {
   6.111 -    _rs = ReservedSpace(byte_size);
   6.112 +    bool large_pages = should_commit_large_pages_when_reserving(bytes);
   6.113 +
   6.114 +    _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
   6.115    }
   6.116  
   6.117 -  MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass);
   6.118 +  if (_rs.is_reserved()) {
   6.119 +    assert(_rs.base() != NULL, "Catch if we get a NULL address");
   6.120 +    assert(_rs.size() != 0, "Catch if we get a 0 size");
   6.121 +    assert_is_ptr_aligned(_rs.base(), Metaspace::reserve_alignment());
   6.122 +    assert_is_size_aligned(_rs.size(), Metaspace::reserve_alignment());
   6.123 +
   6.124 +    MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass);
   6.125 +  }
   6.126  }
   6.127  
   6.128  void VirtualSpaceNode::purge(ChunkManager* chunk_manager) {
   6.129 @@ -410,8 +443,6 @@
   6.130  #endif
   6.131  
   6.132  // List of VirtualSpaces for metadata allocation.
   6.133 -// It has a  _next link for singly linked list and a MemRegion
   6.134 -// for total space in the VirtualSpace.
   6.135  class VirtualSpaceList : public CHeapObj<mtClass> {
   6.136    friend class VirtualSpaceNode;
   6.137  
   6.138 @@ -419,16 +450,13 @@
   6.139      VirtualSpaceSize = 256 * K
   6.140    };
   6.141  
   6.142 -  // Global list of virtual spaces
   6.143    // Head of the list
   6.144    VirtualSpaceNode* _virtual_space_list;
   6.145    // virtual space currently being used for allocations
   6.146    VirtualSpaceNode* _current_virtual_space;
   6.147  
   6.148 -  // Can this virtual list allocate >1 spaces?  Also, used to determine
   6.149 -  // whether to allocate unlimited small chunks in this virtual space
   6.150 +  // Is this VirtualSpaceList used for the compressed class space
   6.151    bool _is_class;
   6.152 -  bool can_grow() const { return !is_class() || !UseCompressedClassPointers; }
   6.153  
   6.154    // Sum of reserved and committed memory in the virtual spaces
   6.155    size_t _reserved_words;
   6.156 @@ -453,7 +481,7 @@
   6.157    // Get another virtual space and add it to the list.  This
   6.158    // is typically prompted by a failed attempt to allocate a chunk
   6.159    // and is typically followed by the allocation of a chunk.
   6.160 -  bool grow_vs(size_t vs_word_size);
   6.161 +  bool create_new_virtual_space(size_t vs_word_size);
   6.162  
   6.163   public:
   6.164    VirtualSpaceList(size_t word_size);
   6.165 @@ -465,12 +493,12 @@
   6.166                             size_t grow_chunks_by_words,
   6.167                             size_t medium_chunk_bunch);
   6.168  
   6.169 -  bool expand_by(VirtualSpaceNode* node, size_t word_size, bool pre_touch = false);
   6.170 -
   6.171 -  // Get the first chunk for a Metaspace.  Used for
   6.172 -  // special cases such as the boot class loader, reflection
   6.173 -  // class loader and anonymous class loader.
   6.174 -  Metachunk* get_initialization_chunk(size_t word_size, size_t chunk_bunch);
   6.175 +  bool expand_node_by(VirtualSpaceNode* node,
   6.176 +                      size_t min_words,
   6.177 +                      size_t preferred_words);
   6.178 +
   6.179 +  bool expand_by(size_t min_words,
   6.180 +                 size_t preferred_words);
   6.181  
   6.182    VirtualSpaceNode* current_virtual_space() {
   6.183      return _current_virtual_space;
   6.184 @@ -478,8 +506,7 @@
   6.185  
   6.186    bool is_class() const { return _is_class; }
   6.187  
   6.188 -  // Allocate the first virtualspace.
   6.189 -  void initialize(size_t word_size);
   6.190 +  bool initialization_succeeded() { return _virtual_space_list != NULL; }
   6.191  
   6.192    size_t reserved_words()  { return _reserved_words; }
   6.193    size_t reserved_bytes()  { return reserved_words() * BytesPerWord; }
   6.194 @@ -869,6 +896,12 @@
   6.195    MetaWord* chunk_limit = top();
   6.196    assert(chunk_limit != NULL, "Not safe to call this method");
   6.197  
   6.198 +  // The virtual spaces are always expanded by the
   6.199 +  // commit granularity to enforce the following condition.
   6.200 +  // Without this the is_available check will not work correctly.
   6.201 +  assert(_virtual_space.committed_size() == _virtual_space.actual_committed_size(),
   6.202 +      "The committed memory doesn't match the expanded memory.");
   6.203 +
   6.204    if (!is_available(chunk_word_size)) {
   6.205      if (TraceMetadataChunkAllocation) {
   6.206        gclog_or_tty->print("VirtualSpaceNode::take_from_committed() not available %d words ", chunk_word_size);
   6.207 @@ -888,14 +921,21 @@
   6.208  
   6.209  
   6.210  // Expand the virtual space (commit more of the reserved space)
   6.211 -bool VirtualSpaceNode::expand_by(size_t words, bool pre_touch) {
   6.212 -  size_t bytes = words * BytesPerWord;
   6.213 -  bool result =  virtual_space()->expand_by(bytes, pre_touch);
   6.214 -  if (TraceMetavirtualspaceAllocation && !result) {
   6.215 -    gclog_or_tty->print_cr("VirtualSpaceNode::expand_by() failed "
   6.216 -                           "for byte size " SIZE_FORMAT, bytes);
   6.217 -    virtual_space()->print_on(gclog_or_tty);
   6.218 +bool VirtualSpaceNode::expand_by(size_t min_words, size_t preferred_words) {
   6.219 +  size_t min_bytes = min_words * BytesPerWord;
   6.220 +  size_t preferred_bytes = preferred_words * BytesPerWord;
   6.221 +
   6.222 +  size_t uncommitted = virtual_space()->reserved_size() - virtual_space()->actual_committed_size();
   6.223 +
   6.224 +  if (uncommitted < min_bytes) {
   6.225 +    return false;
   6.226    }
   6.227 +
   6.228 +  size_t commit = MIN2(preferred_bytes, uncommitted);
   6.229 +  bool result = virtual_space()->expand_by(commit, false);
   6.230 +
   6.231 +  assert(result, "Failed to commit memory");
   6.232 +
   6.233    return result;
   6.234  }
   6.235  
   6.236 @@ -914,12 +954,23 @@
   6.237      return false;
   6.238    }
   6.239  
   6.240 -  // An allocation out of this Virtualspace that is larger
   6.241 -  // than an initial commit size can waste that initial committed
   6.242 -  // space.
   6.243 -  size_t committed_byte_size = 0;
   6.244 -  bool result = virtual_space()->initialize(_rs, committed_byte_size);
   6.245 +  // These are necessary restriction to make sure that the virtual space always
   6.246 +  // grows in steps of Metaspace::commit_alignment(). If both base and size are
   6.247 +  // aligned only the middle alignment of the VirtualSpace is used.
   6.248 +  assert_is_ptr_aligned(_rs.base(), Metaspace::commit_alignment());
   6.249 +  assert_is_size_aligned(_rs.size(), Metaspace::commit_alignment());
   6.250 +
   6.251 +  // ReservedSpaces marked as special will have the entire memory
   6.252 +  // pre-committed. Setting a committed size will make sure that
   6.253 +  // committed_size and actual_committed_size agrees.
   6.254 +  size_t pre_committed_size = _rs.special() ? _rs.size() : 0;
   6.255 +
   6.256 +  bool result = virtual_space()->initialize_with_granularity(_rs, pre_committed_size,
   6.257 +                                            Metaspace::commit_alignment());
   6.258    if (result) {
   6.259 +    assert(virtual_space()->committed_size() == virtual_space()->actual_committed_size(),
   6.260 +        "Checking that the pre-committed memory was registered by the VirtualSpace");
   6.261 +
   6.262      set_top((MetaWord*)virtual_space()->low());
   6.263      set_reserved(MemRegion((HeapWord*)_rs.base(),
   6.264                   (HeapWord*)(_rs.base() + _rs.size())));
   6.265 @@ -976,13 +1027,23 @@
   6.266    _reserved_words = _reserved_words - v;
   6.267  }
   6.268  
   6.269 +#define assert_committed_below_limit()                             \
   6.270 +  assert(MetaspaceAux::committed_bytes() <= MaxMetaspaceSize,      \
   6.271 +      err_msg("Too much committed memory. Committed: " SIZE_FORMAT \
   6.272 +              " limit (MaxMetaspaceSize): " SIZE_FORMAT,           \
   6.273 +          MetaspaceAux::committed_bytes(), MaxMetaspaceSize));
   6.274 +
   6.275  void VirtualSpaceList::inc_committed_words(size_t v) {
   6.276    assert_lock_strong(SpaceManager::expand_lock());
   6.277    _committed_words = _committed_words + v;
   6.278 +
   6.279 +  assert_committed_below_limit();
   6.280  }
   6.281  void VirtualSpaceList::dec_committed_words(size_t v) {
   6.282    assert_lock_strong(SpaceManager::expand_lock());
   6.283    _committed_words = _committed_words - v;
   6.284 +
   6.285 +  assert_committed_below_limit();
   6.286  }
   6.287  
   6.288  void VirtualSpaceList::inc_virtual_space_count() {
   6.289 @@ -1025,8 +1086,8 @@
   6.290      if (vsl->container_count() == 0 && vsl != current_virtual_space()) {
   6.291        // Unlink it from the list
   6.292        if (prev_vsl == vsl) {
   6.293 -        // This is the case of the current note being the first note.
   6.294 -        assert(vsl == virtual_space_list(), "Expected to be the first note");
   6.295 +        // This is the case of the current node being the first node.
   6.296 +        assert(vsl == virtual_space_list(), "Expected to be the first node");
   6.297          set_virtual_space_list(vsl->next());
   6.298        } else {
   6.299          prev_vsl->set_next(vsl->next());
   6.300 @@ -1054,7 +1115,7 @@
   6.301  #endif
   6.302  }
   6.303  
   6.304 -VirtualSpaceList::VirtualSpaceList(size_t word_size ) :
   6.305 +VirtualSpaceList::VirtualSpaceList(size_t word_size) :
   6.306                                     _is_class(false),
   6.307                                     _virtual_space_list(NULL),
   6.308                                     _current_virtual_space(NULL),
   6.309 @@ -1063,9 +1124,7 @@
   6.310                                     _virtual_space_count(0) {
   6.311    MutexLockerEx cl(SpaceManager::expand_lock(),
   6.312                     Mutex::_no_safepoint_check_flag);
   6.313 -  bool initialization_succeeded = grow_vs(word_size);
   6.314 -  assert(initialization_succeeded,
   6.315 -    " VirtualSpaceList initialization should not fail");
   6.316 +  create_new_virtual_space(word_size);
   6.317  }
   6.318  
   6.319  VirtualSpaceList::VirtualSpaceList(ReservedSpace rs) :
   6.320 @@ -1079,8 +1138,9 @@
   6.321                     Mutex::_no_safepoint_check_flag);
   6.322    VirtualSpaceNode* class_entry = new VirtualSpaceNode(rs);
   6.323    bool succeeded = class_entry->initialize();
   6.324 -  assert(succeeded, " VirtualSpaceList initialization should not fail");
   6.325 -  link_vs(class_entry);
   6.326 +  if (succeeded) {
   6.327 +    link_vs(class_entry);
   6.328 +  }
   6.329  }
   6.330  
   6.331  size_t VirtualSpaceList::free_bytes() {
   6.332 @@ -1088,14 +1148,24 @@
   6.333  }
   6.334  
   6.335  // Allocate another meta virtual space and add it to the list.
   6.336 -bool VirtualSpaceList::grow_vs(size_t vs_word_size) {
   6.337 +bool VirtualSpaceList::create_new_virtual_space(size_t vs_word_size) {
   6.338    assert_lock_strong(SpaceManager::expand_lock());
   6.339 -  if (vs_word_size == 0) {
   6.340 +
   6.341 +  if (is_class()) {
   6.342 +    assert(false, "We currently don't support more than one VirtualSpace for"
   6.343 +                  " the compressed class space. The initialization of the"
   6.344 +                  " CCS uses another code path and should not hit this path.");
   6.345      return false;
   6.346    }
   6.347 +
   6.348 +  if (vs_word_size == 0) {
   6.349 +    assert(false, "vs_word_size should always be at least _reserve_alignment large.");
   6.350 +    return false;
   6.351 +  }
   6.352 +
   6.353    // Reserve the space
   6.354    size_t vs_byte_size = vs_word_size * BytesPerWord;
   6.355 -  assert(vs_byte_size % os::vm_allocation_granularity() == 0, "Not aligned");
   6.356 +  assert_is_size_aligned(vs_byte_size, Metaspace::reserve_alignment());
   6.357  
   6.358    // Allocate the meta virtual space and initialize it.
   6.359    VirtualSpaceNode* new_entry = new VirtualSpaceNode(vs_byte_size);
   6.360 @@ -1103,7 +1173,8 @@
   6.361      delete new_entry;
   6.362      return false;
   6.363    } else {
   6.364 -    assert(new_entry->reserved_words() == vs_word_size, "Must be");
   6.365 +    assert(new_entry->reserved_words() == vs_word_size,
   6.366 +        "Reserved memory size differs from requested memory size");
   6.367      // ensure lock-free iteration sees fully initialized node
   6.368      OrderAccess::storestore();
   6.369      link_vs(new_entry);
   6.370 @@ -1130,20 +1201,67 @@
   6.371    }
   6.372  }
   6.373  
   6.374 -bool VirtualSpaceList::expand_by(VirtualSpaceNode* node, size_t word_size, bool pre_touch) {
   6.375 +bool VirtualSpaceList::expand_node_by(VirtualSpaceNode* node,
   6.376 +                                      size_t min_words,
   6.377 +                                      size_t preferred_words) {
   6.378    size_t before = node->committed_words();
   6.379  
   6.380 -  bool result = node->expand_by(word_size, pre_touch);
   6.381 +  bool result = node->expand_by(min_words, preferred_words);
   6.382  
   6.383    size_t after = node->committed_words();
   6.384  
   6.385    // after and before can be the same if the memory was pre-committed.
   6.386 -  assert(after >= before, "Must be");
   6.387 +  assert(after >= before, "Inconsistency");
   6.388    inc_committed_words(after - before);
   6.389  
   6.390    return result;
   6.391  }
   6.392  
   6.393 +bool VirtualSpaceList::expand_by(size_t min_words, size_t preferred_words) {
   6.394 +  assert_is_size_aligned(min_words,       Metaspace::commit_alignment_words());
   6.395 +  assert_is_size_aligned(preferred_words, Metaspace::commit_alignment_words());
   6.396 +  assert(min_words <= preferred_words, "Invalid arguments");
   6.397 +
   6.398 +  if (!MetaspaceGC::can_expand(min_words, this->is_class())) {
   6.399 +    return  false;
   6.400 +  }
   6.401 +
   6.402 +  size_t allowed_expansion_words = MetaspaceGC::allowed_expansion();
   6.403 +  if (allowed_expansion_words < min_words) {
   6.404 +    return false;
   6.405 +  }
   6.406 +
   6.407 +  size_t max_expansion_words = MIN2(preferred_words, allowed_expansion_words);
   6.408 +
   6.409 +  // Commit more memory from the the current virtual space.
   6.410 +  bool vs_expanded = expand_node_by(current_virtual_space(),
   6.411 +                                    min_words,
   6.412 +                                    max_expansion_words);
   6.413 +  if (vs_expanded) {
   6.414 +    return true;
   6.415 +  }
   6.416 +
   6.417 +  // Get another virtual space.
   6.418 +  size_t grow_vs_words = MAX2((size_t)VirtualSpaceSize, preferred_words);
   6.419 +  grow_vs_words = align_size_up(grow_vs_words, Metaspace::reserve_alignment_words());
   6.420 +
   6.421 +  if (create_new_virtual_space(grow_vs_words)) {
   6.422 +    if (current_virtual_space()->is_pre_committed()) {
   6.423 +      // The memory was pre-committed, so we are done here.
   6.424 +      assert(min_words <= current_virtual_space()->committed_words(),
   6.425 +          "The new VirtualSpace was pre-committed, so it"
   6.426 +          "should be large enough to fit the alloc request.");
   6.427 +      return true;
   6.428 +    }
   6.429 +
   6.430 +    return expand_node_by(current_virtual_space(),
   6.431 +                          min_words,
   6.432 +                          max_expansion_words);
   6.433 +  }
   6.434 +
   6.435 +  return false;
   6.436 +}
   6.437 +
   6.438  Metachunk* VirtualSpaceList::get_new_chunk(size_t word_size,
   6.439                                             size_t grow_chunks_by_words,
   6.440                                             size_t medium_chunk_bunch) {
   6.441 @@ -1151,63 +1269,27 @@
   6.442    // Allocate a chunk out of the current virtual space.
   6.443    Metachunk* next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
   6.444  
   6.445 -  if (next == NULL) {
   6.446 -    // Not enough room in current virtual space.  Try to commit
   6.447 -    // more space.
   6.448 -    size_t expand_vs_by_words = MAX2(medium_chunk_bunch,
   6.449 -                                     grow_chunks_by_words);
   6.450 -    size_t page_size_words = os::vm_page_size() / BytesPerWord;
   6.451 -    size_t aligned_expand_vs_by_words = align_size_up(expand_vs_by_words,
   6.452 -                                                        page_size_words);
   6.453 -    bool vs_expanded =
   6.454 -      expand_by(current_virtual_space(), aligned_expand_vs_by_words);
   6.455 -    if (!vs_expanded) {
   6.456 -      // Should the capacity of the metaspaces be expanded for
   6.457 -      // this allocation?  If it's the virtual space for classes and is
   6.458 -      // being used for CompressedHeaders, don't allocate a new virtualspace.
   6.459 -      if (can_grow() && MetaspaceGC::should_expand(this, word_size)) {
   6.460 -        // Get another virtual space.
   6.461 -        size_t allocation_aligned_expand_words =
   6.462 -            align_size_up(aligned_expand_vs_by_words, os::vm_allocation_granularity() / BytesPerWord);
   6.463 -        size_t grow_vs_words =
   6.464 -            MAX2((size_t)VirtualSpaceSize, allocation_aligned_expand_words);
   6.465 -        if (grow_vs(grow_vs_words)) {
   6.466 -          // Got it.  It's on the list now.  Get a chunk from it.
   6.467 -          assert(current_virtual_space()->expanded_words() == 0,
   6.468 -              "New virtual space nodes should not have expanded");
   6.469 -
   6.470 -          size_t grow_chunks_by_words_aligned = align_size_up(grow_chunks_by_words,
   6.471 -                                                              page_size_words);
   6.472 -          // We probably want to expand by aligned_expand_vs_by_words here.
   6.473 -          expand_by(current_virtual_space(), grow_chunks_by_words_aligned);
   6.474 -          next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
   6.475 -        }
   6.476 -      } else {
   6.477 -        // Allocation will fail and induce a GC
   6.478 -        if (TraceMetadataChunkAllocation && Verbose) {
   6.479 -          gclog_or_tty->print_cr("VirtualSpaceList::get_new_chunk():"
   6.480 -            " Fail instead of expand the metaspace");
   6.481 -        }
   6.482 -      }
   6.483 -    } else {
   6.484 -      // The virtual space expanded, get a new chunk
   6.485 -      next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
   6.486 -      assert(next != NULL, "Just expanded, should succeed");
   6.487 -    }
   6.488 +  if (next != NULL) {
   6.489 +    return next;
   6.490    }
   6.491  
   6.492 -  assert(next == NULL || (next->next() == NULL && next->prev() == NULL),
   6.493 -         "New chunk is still on some list");
   6.494 -  return next;
   6.495 -}
   6.496 -
   6.497 -Metachunk* VirtualSpaceList::get_initialization_chunk(size_t chunk_word_size,
   6.498 -                                                      size_t chunk_bunch) {
   6.499 -  // Get a chunk from the chunk freelist
   6.500 -  Metachunk* new_chunk = get_new_chunk(chunk_word_size,
   6.501 -                                       chunk_word_size,
   6.502 -                                       chunk_bunch);
   6.503 -  return new_chunk;
   6.504 +  // The expand amount is currently only determined by the requested sizes
   6.505 +  // and not how much committed memory is left in the current virtual space.
   6.506 +
   6.507 +  size_t min_word_size       = align_size_up(grow_chunks_by_words, Metaspace::commit_alignment_words());
   6.508 +  size_t preferred_word_size = align_size_up(medium_chunk_bunch,   Metaspace::commit_alignment_words());
   6.509 +  if (min_word_size >= preferred_word_size) {
   6.510 +    // Can happen when humongous chunks are allocated.
   6.511 +    preferred_word_size = min_word_size;
   6.512 +  }
   6.513 +
   6.514 +  bool expanded = expand_by(min_word_size, preferred_word_size);
   6.515 +  if (expanded) {
   6.516 +    next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
   6.517 +    assert(next != NULL, "The allocation was expected to succeed after the expansion");
   6.518 +  }
   6.519 +
   6.520 +   return next;
   6.521  }
   6.522  
   6.523  void VirtualSpaceList::print_on(outputStream* st) const {
   6.524 @@ -1256,96 +1338,96 @@
   6.525  // Calculate the amount to increase the high water mark (HWM).
   6.526  // Increase by a minimum amount (MinMetaspaceExpansion) so that
   6.527  // another expansion is not requested too soon.  If that is not
   6.528 -// enough to satisfy the allocation (i.e. big enough for a word_size
   6.529 -// allocation), increase by MaxMetaspaceExpansion.  If that is still
   6.530 -// not enough, expand by the size of the allocation (word_size) plus
   6.531 -// some.
   6.532 -size_t MetaspaceGC::delta_capacity_until_GC(size_t word_size) {
   6.533 -  size_t before_inc = MetaspaceGC::capacity_until_GC();
   6.534 -  size_t min_delta_words = MinMetaspaceExpansion / BytesPerWord;
   6.535 -  size_t max_delta_words = MaxMetaspaceExpansion / BytesPerWord;
   6.536 -  size_t page_size_words = os::vm_page_size() / BytesPerWord;
   6.537 -  size_t size_delta_words = align_size_up(word_size, page_size_words);
   6.538 -  size_t delta_words = MAX2(size_delta_words, min_delta_words);
   6.539 -  if (delta_words > min_delta_words) {
   6.540 +// enough to satisfy the allocation, increase by MaxMetaspaceExpansion.
   6.541 +// If that is still not enough, expand by the size of the allocation
   6.542 +// plus some.
   6.543 +size_t MetaspaceGC::delta_capacity_until_GC(size_t bytes) {
   6.544 +  size_t min_delta = MinMetaspaceExpansion;
   6.545 +  size_t max_delta = MaxMetaspaceExpansion;
   6.546 +  size_t delta = align_size_up(bytes, Metaspace::commit_alignment());
   6.547 +
   6.548 +  if (delta <= min_delta) {
   6.549 +    delta = min_delta;
   6.550 +  } else if (delta <= max_delta) {
   6.551      // Don't want to hit the high water mark on the next
   6.552      // allocation so make the delta greater than just enough
   6.553      // for this allocation.
   6.554 -    delta_words = MAX2(delta_words, max_delta_words);
   6.555 -    if (delta_words > max_delta_words) {
   6.556 -      // This allocation is large but the next ones are probably not
   6.557 -      // so increase by the minimum.
   6.558 -      delta_words = delta_words + min_delta_words;
   6.559 -    }
   6.560 +    delta = max_delta;
   6.561 +  } else {
   6.562 +    // This allocation is large but the next ones are probably not
   6.563 +    // so increase by the minimum.
   6.564 +    delta = delta + min_delta;
   6.565    }
   6.566 -  return delta_words;
   6.567 +
   6.568 +  assert_is_size_aligned(delta, Metaspace::commit_alignment());
   6.569 +
   6.570 +  return delta;
   6.571  }
   6.572  
   6.573 -bool MetaspaceGC::should_expand(VirtualSpaceList* vsl, size_t word_size) {
   6.574 -
   6.575 -  // If the user wants a limit, impose one.
   6.576 -  // The reason for someone using this flag is to limit reserved space.  So
   6.577 -  // for non-class virtual space, compare against virtual spaces that are reserved.
   6.578 -  // For class virtual space, we only compare against the committed space, not
   6.579 -  // reserved space, because this is a larger space prereserved for compressed
   6.580 -  // class pointers.
   6.581 -  if (!FLAG_IS_DEFAULT(MaxMetaspaceSize)) {
   6.582 -    size_t nonclass_allocated = MetaspaceAux::reserved_bytes(Metaspace::NonClassType);
   6.583 -    size_t class_allocated    = MetaspaceAux::allocated_capacity_bytes(Metaspace::ClassType);
   6.584 -    size_t real_allocated     = nonclass_allocated + class_allocated;
   6.585 -    if (real_allocated >= MaxMetaspaceSize) {
   6.586 +size_t MetaspaceGC::capacity_until_GC() {
   6.587 +  size_t value = (size_t)OrderAccess::load_ptr_acquire(&_capacity_until_GC);
   6.588 +  assert(value >= MetaspaceSize, "Not initialied properly?");
   6.589 +  return value;
   6.590 +}
   6.591 +
   6.592 +size_t MetaspaceGC::inc_capacity_until_GC(size_t v) {
   6.593 +  assert_is_size_aligned(v, Metaspace::commit_alignment());
   6.594 +
   6.595 +  return (size_t)Atomic::add_ptr(v, &_capacity_until_GC);
   6.596 +}
   6.597 +
   6.598 +size_t MetaspaceGC::dec_capacity_until_GC(size_t v) {
   6.599 +  assert_is_size_aligned(v, Metaspace::commit_alignment());
   6.600 +
   6.601 +  return (size_t)Atomic::add_ptr(-(intptr_t)v, &_capacity_until_GC);
   6.602 +}
   6.603 +
   6.604 +bool MetaspaceGC::can_expand(size_t word_size, bool is_class) {
   6.605 +  // Check if the compressed class space is full.
   6.606 +  if (is_class && Metaspace::using_class_space()) {
   6.607 +    size_t class_committed = MetaspaceAux::committed_bytes(Metaspace::ClassType);
   6.608 +    if (class_committed + word_size * BytesPerWord > CompressedClassSpaceSize) {
   6.609        return false;
   6.610      }
   6.611    }
   6.612  
   6.613 -  // Class virtual space should always be expanded.  Call GC for the other
   6.614 -  // metadata virtual space.
   6.615 -  if (Metaspace::using_class_space() &&
   6.616 -      (vsl == Metaspace::class_space_list())) return true;
   6.617 -
   6.618 -  // If this is part of an allocation after a GC, expand
   6.619 -  // unconditionally.
   6.620 -  if (MetaspaceGC::expand_after_GC()) {
   6.621 -    return true;
   6.622 +  // Check if the user has imposed a limit on the metaspace memory.
   6.623 +  size_t committed_bytes = MetaspaceAux::committed_bytes();
   6.624 +  if (committed_bytes + word_size * BytesPerWord > MaxMetaspaceSize) {
   6.625 +    return false;
   6.626    }
   6.627  
   6.628 -
   6.629 -  // If the capacity is below the minimum capacity, allow the
   6.630 -  // expansion.  Also set the high-water-mark (capacity_until_GC)
   6.631 -  // to that minimum capacity so that a GC will not be induced
   6.632 -  // until that minimum capacity is exceeded.
   6.633 -  size_t committed_capacity_bytes = MetaspaceAux::allocated_capacity_bytes();
   6.634 -  size_t metaspace_size_bytes = MetaspaceSize;
   6.635 -  if (committed_capacity_bytes < metaspace_size_bytes ||
   6.636 -      capacity_until_GC() == 0) {
   6.637 -    set_capacity_until_GC(metaspace_size_bytes);
   6.638 -    return true;
   6.639 -  } else {
   6.640 -    if (committed_capacity_bytes < capacity_until_GC()) {
   6.641 -      return true;
   6.642 -    } else {
   6.643 -      if (TraceMetadataChunkAllocation && Verbose) {
   6.644 -        gclog_or_tty->print_cr("  allocation request size " SIZE_FORMAT
   6.645 -                        "  capacity_until_GC " SIZE_FORMAT
   6.646 -                        "  allocated_capacity_bytes " SIZE_FORMAT,
   6.647 -                        word_size,
   6.648 -                        capacity_until_GC(),
   6.649 -                        MetaspaceAux::allocated_capacity_bytes());
   6.650 -      }
   6.651 -      return false;
   6.652 -    }
   6.653 +  return true;
   6.654 +}
   6.655 +
   6.656 +size_t MetaspaceGC::allowed_expansion() {
   6.657 +  size_t committed_bytes = MetaspaceAux::committed_bytes();
   6.658 +
   6.659 +  size_t left_until_max  = MaxMetaspaceSize - committed_bytes;
   6.660 +
   6.661 +  // Always grant expansion if we are initiating the JVM,
   6.662 +  // or if the GC_locker is preventing GCs.
   6.663 +  if (!is_init_completed() || GC_locker::is_active_and_needs_gc()) {
   6.664 +    return left_until_max / BytesPerWord;
   6.665    }
   6.666 +
   6.667 +  size_t capacity_until_gc = capacity_until_GC();
   6.668 +
   6.669 +  if (capacity_until_gc <= committed_bytes) {
   6.670 +    return 0;
   6.671 +  }
   6.672 +
   6.673 +  size_t left_until_GC = capacity_until_gc - committed_bytes;
   6.674 +  size_t left_to_commit = MIN2(left_until_GC, left_until_max);
   6.675 +
   6.676 +  return left_to_commit / BytesPerWord;
   6.677  }
   6.678  
   6.679 -
   6.680 -
   6.681  void MetaspaceGC::compute_new_size() {
   6.682    assert(_shrink_factor <= 100, "invalid shrink factor");
   6.683    uint current_shrink_factor = _shrink_factor;
   6.684    _shrink_factor = 0;
   6.685  
   6.686 -  // Until a faster way of calculating the "used" quantity is implemented,
   6.687 -  // use "capacity".
   6.688    const size_t used_after_gc = MetaspaceAux::allocated_capacity_bytes();
   6.689    const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC();
   6.690  
   6.691 @@ -1377,9 +1459,10 @@
   6.692      // If we have less capacity below the metaspace HWM, then
   6.693      // increment the HWM.
   6.694      size_t expand_bytes = minimum_desired_capacity - capacity_until_GC;
   6.695 +    expand_bytes = align_size_up(expand_bytes, Metaspace::commit_alignment());
   6.696      // Don't expand unless it's significant
   6.697      if (expand_bytes >= MinMetaspaceExpansion) {
   6.698 -      MetaspaceGC::set_capacity_until_GC(capacity_until_GC + expand_bytes);
   6.699 +      MetaspaceGC::inc_capacity_until_GC(expand_bytes);
   6.700      }
   6.701      if (PrintGCDetails && Verbose) {
   6.702        size_t new_capacity_until_GC = capacity_until_GC;
   6.703 @@ -1436,6 +1519,9 @@
   6.704        // on the third call, and 100% by the fourth call.  But if we recompute
   6.705        // size without shrinking, it goes back to 0%.
   6.706        shrink_bytes = shrink_bytes / 100 * current_shrink_factor;
   6.707 +
   6.708 +      shrink_bytes = align_size_down(shrink_bytes, Metaspace::commit_alignment());
   6.709 +
   6.710        assert(shrink_bytes <= max_shrink_bytes,
   6.711          err_msg("invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT,
   6.712            shrink_bytes, max_shrink_bytes));
   6.713 @@ -1467,7 +1553,7 @@
   6.714    // Don't shrink unless it's significant
   6.715    if (shrink_bytes >= MinMetaspaceExpansion &&
   6.716        ((capacity_until_GC - shrink_bytes) >= MetaspaceSize)) {
   6.717 -    MetaspaceGC::set_capacity_until_GC(capacity_until_GC - shrink_bytes);
   6.718 +    MetaspaceGC::dec_capacity_until_GC(shrink_bytes);
   6.719    }
   6.720  }
   6.721  
   6.722 @@ -1700,7 +1786,6 @@
   6.723      assert(free_list != NULL, "Sanity check");
   6.724  
   6.725      chunk = free_list->head();
   6.726 -    debug_only(Metachunk* debug_head = chunk;)
   6.727  
   6.728      if (chunk == NULL) {
   6.729        return NULL;
   6.730 @@ -1709,9 +1794,6 @@
   6.731      // Remove the chunk as the head of the list.
   6.732      free_list->remove_chunk(chunk);
   6.733  
   6.734 -    // Chunk is being removed from the chunks free list.
   6.735 -    dec_free_chunks_total(chunk->capacity_word_size());
   6.736 -
   6.737      if (TraceMetadataChunkAllocation && Verbose) {
   6.738        gclog_or_tty->print_cr("ChunkManager::free_chunks_get: free_list "
   6.739                               PTR_FORMAT " head " PTR_FORMAT " size " SIZE_FORMAT,
   6.740 @@ -1722,21 +1804,22 @@
   6.741        word_size,
   6.742        FreeBlockDictionary<Metachunk>::atLeast);
   6.743  
   6.744 -    if (chunk != NULL) {
   6.745 -      if (TraceMetadataHumongousAllocation) {
   6.746 -        size_t waste = chunk->word_size() - word_size;
   6.747 -        gclog_or_tty->print_cr("Free list allocate humongous chunk size "
   6.748 -                               SIZE_FORMAT " for requested size " SIZE_FORMAT
   6.749 -                               " waste " SIZE_FORMAT,
   6.750 -                               chunk->word_size(), word_size, waste);
   6.751 -      }
   6.752 -      // Chunk is being removed from the chunks free list.
   6.753 -      dec_free_chunks_total(chunk->capacity_word_size());
   6.754 -    } else {
   6.755 +    if (chunk == NULL) {
   6.756        return NULL;
   6.757      }
   6.758 +
   6.759 +    if (TraceMetadataHumongousAllocation) {
   6.760 +      size_t waste = chunk->word_size() - word_size;
   6.761 +      gclog_or_tty->print_cr("Free list allocate humongous chunk size "
   6.762 +                             SIZE_FORMAT " for requested size " SIZE_FORMAT
   6.763 +                             " waste " SIZE_FORMAT,
   6.764 +                             chunk->word_size(), word_size, waste);
   6.765 +    }
   6.766    }
   6.767  
   6.768 +  // Chunk is being removed from the chunks free list.
   6.769 +  dec_free_chunks_total(chunk->capacity_word_size());
   6.770 +
   6.771    // Remove it from the links to this freelist
   6.772    chunk->set_next(NULL);
   6.773    chunk->set_prev(NULL);
   6.774 @@ -2002,15 +2085,21 @@
   6.775    size_t grow_chunks_by_words = calc_chunk_size(word_size);
   6.776    Metachunk* next = get_new_chunk(word_size, grow_chunks_by_words);
   6.777  
   6.778 +  if (next != NULL) {
   6.779 +    Metadebug::deallocate_chunk_a_lot(this, grow_chunks_by_words);
   6.780 +  }
   6.781 +
   6.782 +  MetaWord* mem = NULL;
   6.783 +
   6.784    // If a chunk was available, add it to the in-use chunk list
   6.785    // and do an allocation from it.
   6.786    if (next != NULL) {
   6.787 -    Metadebug::deallocate_chunk_a_lot(this, grow_chunks_by_words);
   6.788      // Add to this manager's list of chunks in use.
   6.789      add_chunk(next, false);
   6.790 -    return next->allocate(word_size);
   6.791 +    mem = next->allocate(word_size);
   6.792    }
   6.793 -  return NULL;
   6.794 +
   6.795 +  return mem;
   6.796  }
   6.797  
   6.798  void SpaceManager::print_on(outputStream* st) const {
   6.799 @@ -2366,6 +2455,7 @@
   6.800      inc_used_metrics(word_size);
   6.801      return current_chunk()->allocate(word_size); // caller handles null result
   6.802    }
   6.803 +
   6.804    if (current_chunk() != NULL) {
   6.805      result = current_chunk()->allocate(word_size);
   6.806    }
   6.807 @@ -2373,7 +2463,8 @@
   6.808    if (result == NULL) {
   6.809      result = grow_and_allocate(word_size);
   6.810    }
   6.811 -  if (result != 0) {
   6.812 +
   6.813 +  if (result != NULL) {
   6.814      inc_used_metrics(word_size);
   6.815      assert(result != (MetaWord*) chunks_in_use(MediumIndex),
   6.816             "Head of the list is being allocated");
   6.817 @@ -2639,24 +2730,26 @@
   6.818  void MetaspaceAux::print_on(outputStream* out) {
   6.819    Metaspace::MetadataType nct = Metaspace::NonClassType;
   6.820  
   6.821 -  out->print_cr(" Metaspace total "
   6.822 -                SIZE_FORMAT "K, used " SIZE_FORMAT "K,"
   6.823 -                " reserved " SIZE_FORMAT "K",
   6.824 -                allocated_capacity_bytes()/K, allocated_used_bytes()/K, reserved_bytes()/K);
   6.825 -
   6.826 -  out->print_cr("  data space     "
   6.827 -                SIZE_FORMAT "K, used " SIZE_FORMAT "K,"
   6.828 -                " reserved " SIZE_FORMAT "K",
   6.829 -                allocated_capacity_bytes(nct)/K,
   6.830 -                allocated_used_bytes(nct)/K,
   6.831 -                reserved_bytes(nct)/K);
   6.832 +  out->print_cr(" Metaspace       "
   6.833 +                "used "      SIZE_FORMAT "K, "
   6.834 +                "capacity "  SIZE_FORMAT "K, "
   6.835 +                "committed " SIZE_FORMAT "K, "
   6.836 +                "reserved "  SIZE_FORMAT "K",
   6.837 +                allocated_used_bytes()/K,
   6.838 +                allocated_capacity_bytes()/K,
   6.839 +                committed_bytes()/K,
   6.840 +                reserved_bytes()/K);
   6.841 +
   6.842    if (Metaspace::using_class_space()) {
   6.843      Metaspace::MetadataType ct = Metaspace::ClassType;
   6.844      out->print_cr("  class space    "
   6.845 -                  SIZE_FORMAT "K, used " SIZE_FORMAT "K,"
   6.846 -                  " reserved " SIZE_FORMAT "K",
   6.847 +                  "used "      SIZE_FORMAT "K, "
   6.848 +                  "capacity "  SIZE_FORMAT "K, "
   6.849 +                  "committed " SIZE_FORMAT "K, "
   6.850 +                  "reserved "  SIZE_FORMAT "K",
   6.851 +                  allocated_used_bytes(ct)/K,
   6.852                    allocated_capacity_bytes(ct)/K,
   6.853 -                  allocated_used_bytes(ct)/K,
   6.854 +                  committed_bytes(ct)/K,
   6.855                    reserved_bytes(ct)/K);
   6.856    }
   6.857  }
   6.858 @@ -2808,6 +2901,9 @@
   6.859  size_t Metaspace::_first_chunk_word_size = 0;
   6.860  size_t Metaspace::_first_class_chunk_word_size = 0;
   6.861  
   6.862 +size_t Metaspace::_commit_alignment = 0;
   6.863 +size_t Metaspace::_reserve_alignment = 0;
   6.864 +
   6.865  Metaspace::Metaspace(Mutex* lock, MetaspaceType type) {
   6.866    initialize(lock, type);
   6.867  }
   6.868 @@ -2869,21 +2965,30 @@
   6.869    assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
   6.870    assert(class_metaspace_size() < KlassEncodingMetaspaceMax,
   6.871           "Metaspace size is too big");
   6.872 +  assert_is_ptr_aligned(requested_addr,          _reserve_alignment);
   6.873 +  assert_is_ptr_aligned(cds_base,                _reserve_alignment);
   6.874 +  assert_is_size_aligned(class_metaspace_size(), _reserve_alignment);
   6.875 +
   6.876 +  // Don't use large pages for the class space.
   6.877 +  bool large_pages = false;
   6.878  
   6.879    ReservedSpace metaspace_rs = ReservedSpace(class_metaspace_size(),
   6.880 -                                             os::vm_allocation_granularity(),
   6.881 -                                             false, requested_addr, 0);
   6.882 +                                             _reserve_alignment,
   6.883 +                                             large_pages,
   6.884 +                                             requested_addr, 0);
   6.885    if (!metaspace_rs.is_reserved()) {
   6.886      if (UseSharedSpaces) {
   6.887 +      size_t increment = align_size_up(1*G, _reserve_alignment);
   6.888 +
   6.889        // Keep trying to allocate the metaspace, increasing the requested_addr
   6.890        // by 1GB each time, until we reach an address that will no longer allow
   6.891        // use of CDS with compressed klass pointers.
   6.892        char *addr = requested_addr;
   6.893 -      while (!metaspace_rs.is_reserved() && (addr + 1*G > addr) &&
   6.894 -             can_use_cds_with_metaspace_addr(addr + 1*G, cds_base)) {
   6.895 -        addr = addr + 1*G;
   6.896 +      while (!metaspace_rs.is_reserved() && (addr + increment > addr) &&
   6.897 +             can_use_cds_with_metaspace_addr(addr + increment, cds_base)) {
   6.898 +        addr = addr + increment;
   6.899          metaspace_rs = ReservedSpace(class_metaspace_size(),
   6.900 -                                     os::vm_allocation_granularity(), false, addr, 0);
   6.901 +                                     _reserve_alignment, large_pages, addr, 0);
   6.902        }
   6.903      }
   6.904  
   6.905 @@ -2894,7 +2999,7 @@
   6.906      // So, UseCompressedClassPointers cannot be turned off at this point.
   6.907      if (!metaspace_rs.is_reserved()) {
   6.908        metaspace_rs = ReservedSpace(class_metaspace_size(),
   6.909 -                                   os::vm_allocation_granularity(), false);
   6.910 +                                   _reserve_alignment, large_pages);
   6.911        if (!metaspace_rs.is_reserved()) {
   6.912          vm_exit_during_initialization(err_msg("Could not allocate metaspace: %d bytes",
   6.913                                                class_metaspace_size()));
   6.914 @@ -2933,34 +3038,96 @@
   6.915    assert(using_class_space(), "Must be using class space");
   6.916    _class_space_list = new VirtualSpaceList(rs);
   6.917    _chunk_manager_class = new ChunkManager(SpecializedChunk, ClassSmallChunk, ClassMediumChunk);
   6.918 +
   6.919 +  if (!_class_space_list->initialization_succeeded()) {
   6.920 +    vm_exit_during_initialization("Failed to setup compressed class space virtual space list.");
   6.921 +  }
   6.922  }
   6.923  
   6.924  #endif
   6.925  
   6.926 +// Align down. If the aligning result in 0, return 'alignment'.
   6.927 +static size_t restricted_align_down(size_t size, size_t alignment) {
   6.928 +  return MAX2(alignment, align_size_down_(size, alignment));
   6.929 +}
   6.930 +
   6.931 +void Metaspace::ergo_initialize() {
   6.932 +  if (DumpSharedSpaces) {
   6.933 +    // Using large pages when dumping the shared archive is currently not implemented.
   6.934 +    FLAG_SET_ERGO(bool, UseLargePagesInMetaspace, false);
   6.935 +  }
   6.936 +
   6.937 +  size_t page_size = os::vm_page_size();
   6.938 +  if (UseLargePages && UseLargePagesInMetaspace) {
   6.939 +    page_size = os::large_page_size();
   6.940 +  }
   6.941 +
   6.942 +  _commit_alignment  = page_size;
   6.943 +  _reserve_alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity());
   6.944 +
   6.945 +  // Do not use FLAG_SET_ERGO to update MaxMetaspaceSize, since this will
   6.946 +  // override if MaxMetaspaceSize was set on the command line or not.
   6.947 +  // This information is needed later to conform to the specification of the
   6.948 +  // java.lang.management.MemoryUsage API.
   6.949 +  //
   6.950 +  // Ideally, we would be able to set the default value of MaxMetaspaceSize in
   6.951 +  // globals.hpp to the aligned value, but this is not possible, since the
   6.952 +  // alignment depends on other flags being parsed.
   6.953 +  MaxMetaspaceSize = restricted_align_down(MaxMetaspaceSize, _reserve_alignment);
   6.954 +
   6.955 +  if (MetaspaceSize > MaxMetaspaceSize) {
   6.956 +    MetaspaceSize = MaxMetaspaceSize;
   6.957 +  }
   6.958 +
   6.959 +  MetaspaceSize = restricted_align_down(MetaspaceSize, _commit_alignment);
   6.960 +
   6.961 +  assert(MetaspaceSize <= MaxMetaspaceSize, "MetaspaceSize should be limited by MaxMetaspaceSize");
   6.962 +
   6.963 +  if (MetaspaceSize < 256*K) {
   6.964 +    vm_exit_during_initialization("Too small initial Metaspace size");
   6.965 +  }
   6.966 +
   6.967 +  MinMetaspaceExpansion = restricted_align_down(MinMetaspaceExpansion, _commit_alignment);
   6.968 +  MaxMetaspaceExpansion = restricted_align_down(MaxMetaspaceExpansion, _commit_alignment);
   6.969 +
   6.970 +  CompressedClassSpaceSize = restricted_align_down(CompressedClassSpaceSize, _reserve_alignment);
   6.971 +  set_class_metaspace_size(CompressedClassSpaceSize);
   6.972 +}
   6.973 +
   6.974  void Metaspace::global_initialize() {
   6.975    // Initialize the alignment for shared spaces.
   6.976    int max_alignment = os::vm_page_size();
   6.977    size_t cds_total = 0;
   6.978  
   6.979 -  set_class_metaspace_size(align_size_up(CompressedClassSpaceSize,
   6.980 -                                         os::vm_allocation_granularity()));
   6.981 -
   6.982    MetaspaceShared::set_max_alignment(max_alignment);
   6.983  
   6.984    if (DumpSharedSpaces) {
   6.985 -    SharedReadOnlySize = align_size_up(SharedReadOnlySize, max_alignment);
   6.986 +    SharedReadOnlySize  = align_size_up(SharedReadOnlySize,  max_alignment);
   6.987      SharedReadWriteSize = align_size_up(SharedReadWriteSize, max_alignment);
   6.988 -    SharedMiscDataSize  = align_size_up(SharedMiscDataSize, max_alignment);
   6.989 -    SharedMiscCodeSize  = align_size_up(SharedMiscCodeSize, max_alignment);
   6.990 +    SharedMiscDataSize  = align_size_up(SharedMiscDataSize,  max_alignment);
   6.991 +    SharedMiscCodeSize  = align_size_up(SharedMiscCodeSize,  max_alignment);
   6.992  
   6.993      // Initialize with the sum of the shared space sizes.  The read-only
   6.994      // and read write metaspace chunks will be allocated out of this and the
   6.995      // remainder is the misc code and data chunks.
   6.996      cds_total = FileMapInfo::shared_spaces_size();
   6.997 +    cds_total = align_size_up(cds_total, _reserve_alignment);
   6.998      _space_list = new VirtualSpaceList(cds_total/wordSize);
   6.999      _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk);
  6.1000  
  6.1001 +    if (!_space_list->initialization_succeeded()) {
  6.1002 +      vm_exit_during_initialization("Unable to dump shared archive.", NULL);
  6.1003 +    }
  6.1004 +
  6.1005  #ifdef _LP64
  6.1006 +    if (cds_total + class_metaspace_size() > (uint64_t)max_juint) {
  6.1007 +      vm_exit_during_initialization("Unable to dump shared archive.",
  6.1008 +          err_msg("Size of archive (" SIZE_FORMAT ") + compressed class space ("
  6.1009 +                  SIZE_FORMAT ") == total (" SIZE_FORMAT ") is larger than compressed "
  6.1010 +                  "klass limit: " SIZE_FORMAT, cds_total, class_metaspace_size(),
  6.1011 +                  cds_total + class_metaspace_size(), (size_t)max_juint));
  6.1012 +    }
  6.1013 +
  6.1014      // Set the compressed klass pointer base so that decoding of these pointers works
  6.1015      // properly when creating the shared archive.
  6.1016      assert(UseCompressedOops && UseCompressedClassPointers,
  6.1017 @@ -2971,9 +3138,6 @@
  6.1018                               _space_list->current_virtual_space()->bottom());
  6.1019      }
  6.1020  
  6.1021 -    // Set the shift to zero.
  6.1022 -    assert(class_metaspace_size() < (uint64_t)(max_juint) - cds_total,
  6.1023 -           "CDS region is too large");
  6.1024      Universe::set_narrow_klass_shift(0);
  6.1025  #endif
  6.1026  
  6.1027 @@ -2992,12 +3156,12 @@
  6.1028        // Map in spaces now also
  6.1029        if (mapinfo->initialize() && MetaspaceShared::map_shared_spaces(mapinfo)) {
  6.1030          FileMapInfo::set_current_info(mapinfo);
  6.1031 +        cds_total = FileMapInfo::shared_spaces_size();
  6.1032 +        cds_address = (address)mapinfo->region_base(0);
  6.1033        } else {
  6.1034          assert(!mapinfo->is_open() && !UseSharedSpaces,
  6.1035                 "archive file not closed or shared spaces not disabled.");
  6.1036        }
  6.1037 -      cds_total = FileMapInfo::shared_spaces_size();
  6.1038 -      cds_address = (address)mapinfo->region_base(0);
  6.1039      }
  6.1040  
  6.1041  #ifdef _LP64
  6.1042 @@ -3005,7 +3169,9 @@
  6.1043      // above the heap and above the CDS area (if it exists).
  6.1044      if (using_class_space()) {
  6.1045        if (UseSharedSpaces) {
  6.1046 -        allocate_metaspace_compressed_klass_ptrs((char *)(cds_address + cds_total), cds_address);
  6.1047 +        char* cds_end = (char*)(cds_address + cds_total);
  6.1048 +        cds_end = (char *)align_ptr_up(cds_end, _reserve_alignment);
  6.1049 +        allocate_metaspace_compressed_klass_ptrs(cds_end, cds_address);
  6.1050        } else {
  6.1051          allocate_metaspace_compressed_klass_ptrs((char *)CompressedKlassPointersBase, 0);
  6.1052        }
  6.1053 @@ -3023,11 +3189,19 @@
  6.1054      _first_class_chunk_word_size = align_word_size_up(_first_class_chunk_word_size);
  6.1055      // Arbitrarily set the initial virtual space to a multiple
  6.1056      // of the boot class loader size.
  6.1057 -    size_t word_size = VIRTUALSPACEMULTIPLIER * first_chunk_word_size();
  6.1058 +    size_t word_size = VIRTUALSPACEMULTIPLIER * _first_chunk_word_size;
  6.1059 +    word_size = align_size_up(word_size, Metaspace::reserve_alignment_words());
  6.1060 +
  6.1061      // Initialize the list of virtual spaces.
  6.1062      _space_list = new VirtualSpaceList(word_size);
  6.1063      _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk);
  6.1064 +
  6.1065 +    if (!_space_list->initialization_succeeded()) {
  6.1066 +      vm_exit_during_initialization("Unable to setup metadata virtual space list.", NULL);
  6.1067 +    }
  6.1068    }
  6.1069 +
  6.1070 +  MetaspaceGC::initialize();
  6.1071  }
  6.1072  
  6.1073  Metachunk* Metaspace::get_initialization_chunk(MetadataType mdtype,
  6.1074 @@ -3039,7 +3213,7 @@
  6.1075      return chunk;
  6.1076    }
  6.1077  
  6.1078 -  return get_space_list(mdtype)->get_initialization_chunk(chunk_word_size, chunk_bunch);
  6.1079 +  return get_space_list(mdtype)->get_new_chunk(chunk_word_size, chunk_word_size, chunk_bunch);
  6.1080  }
  6.1081  
  6.1082  void Metaspace::initialize(Mutex* lock, MetaspaceType type) {
  6.1083 @@ -3112,19 +3286,18 @@
  6.1084  }
  6.1085  
  6.1086  MetaWord* Metaspace::expand_and_allocate(size_t word_size, MetadataType mdtype) {
  6.1087 -  MetaWord* result;
  6.1088 -  MetaspaceGC::set_expand_after_GC(true);
  6.1089 -  size_t before_inc = MetaspaceGC::capacity_until_GC();
  6.1090 -  size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size) * BytesPerWord;
  6.1091 -  MetaspaceGC::inc_capacity_until_GC(delta_bytes);
  6.1092 +  size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size * BytesPerWord);
  6.1093 +  assert(delta_bytes > 0, "Must be");
  6.1094 +
  6.1095 +  size_t after_inc = MetaspaceGC::inc_capacity_until_GC(delta_bytes);
  6.1096 +  size_t before_inc = after_inc - delta_bytes;
  6.1097 +
  6.1098    if (PrintGCDetails && Verbose) {
  6.1099      gclog_or_tty->print_cr("Increase capacity to GC from " SIZE_FORMAT
  6.1100 -      " to " SIZE_FORMAT, before_inc, MetaspaceGC::capacity_until_GC());
  6.1101 +        " to " SIZE_FORMAT, before_inc, after_inc);
  6.1102    }
  6.1103  
  6.1104 -  result = allocate(word_size, mdtype);
  6.1105 -
  6.1106 -  return result;
  6.1107 +  return allocate(word_size, mdtype);
  6.1108  }
  6.1109  
  6.1110  // Space allocated in the Metaspace.  This may
  6.1111 @@ -3206,6 +3379,7 @@
  6.1112    }
  6.1113  }
  6.1114  
  6.1115 +
  6.1116  Metablock* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
  6.1117                                bool read_only, MetaspaceObj::Type type, TRAPS) {
  6.1118    if (HAS_PENDING_EXCEPTION) {
  6.1119 @@ -3213,20 +3387,16 @@
  6.1120      return NULL;  // caller does a CHECK_NULL too
  6.1121    }
  6.1122  
  6.1123 -  MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType;
  6.1124 -
  6.1125 -  // SSS: Should we align the allocations and make sure the sizes are aligned.
  6.1126 -  MetaWord* result = NULL;
  6.1127 -
  6.1128    assert(loader_data != NULL, "Should never pass around a NULL loader_data. "
  6.1129          "ClassLoaderData::the_null_class_loader_data() should have been used.");
  6.1130 +
  6.1131    // Allocate in metaspaces without taking out a lock, because it deadlocks
  6.1132    // with the SymbolTable_lock.  Dumping is single threaded for now.  We'll have
  6.1133    // to revisit this for application class data sharing.
  6.1134    if (DumpSharedSpaces) {
  6.1135      assert(type > MetaspaceObj::UnknownType && type < MetaspaceObj::_number_of_types, "sanity");
  6.1136      Metaspace* space = read_only ? loader_data->ro_metaspace() : loader_data->rw_metaspace();
  6.1137 -    result = space->allocate(word_size, NonClassType);
  6.1138 +    MetaWord* result = space->allocate(word_size, NonClassType);
  6.1139      if (result == NULL) {
  6.1140        report_out_of_shared_space(read_only ? SharedReadOnly : SharedReadWrite);
  6.1141      } else {
  6.1142 @@ -3235,42 +3405,64 @@
  6.1143      return Metablock::initialize(result, word_size);
  6.1144    }
  6.1145  
  6.1146 -  result = loader_data->metaspace_non_null()->allocate(word_size, mdtype);
  6.1147 +  MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType;
  6.1148 +
  6.1149 +  // Try to allocate metadata.
  6.1150 +  MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype);
  6.1151  
  6.1152    if (result == NULL) {
  6.1153 -    // Try to clean out some memory and retry.
  6.1154 -    result =
  6.1155 -      Universe::heap()->collector_policy()->satisfy_failed_metadata_allocation(
  6.1156 -        loader_data, word_size, mdtype);
  6.1157 -
  6.1158 -    // If result is still null, we are out of memory.
  6.1159 -    if (result == NULL) {
  6.1160 -      if (Verbose && TraceMetadataChunkAllocation) {
  6.1161 -        gclog_or_tty->print_cr("Metaspace allocation failed for size "
  6.1162 -          SIZE_FORMAT, word_size);
  6.1163 -        if (loader_data->metaspace_or_null() != NULL) loader_data->dump(gclog_or_tty);
  6.1164 -        MetaspaceAux::dump(gclog_or_tty);
  6.1165 -      }
  6.1166 -      // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
  6.1167 -      const char* space_string = is_class_space_allocation(mdtype) ? "Compressed class space" :
  6.1168 -                                                                     "Metadata space";
  6.1169 -      report_java_out_of_memory(space_string);
  6.1170 -
  6.1171 -      if (JvmtiExport::should_post_resource_exhausted()) {
  6.1172 -        JvmtiExport::post_resource_exhausted(
  6.1173 -            JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR,
  6.1174 -            space_string);
  6.1175 -      }
  6.1176 -      if (is_class_space_allocation(mdtype)) {
  6.1177 -        THROW_OOP_0(Universe::out_of_memory_error_class_metaspace());
  6.1178 -      } else {
  6.1179 -        THROW_OOP_0(Universe::out_of_memory_error_metaspace());
  6.1180 -      }
  6.1181 +    // Allocation failed.
  6.1182 +    if (is_init_completed()) {
  6.1183 +      // Only start a GC if the bootstrapping has completed.
  6.1184 +
  6.1185 +      // Try to clean out some memory and retry.
  6.1186 +      result = Universe::heap()->collector_policy()->satisfy_failed_metadata_allocation(
  6.1187 +          loader_data, word_size, mdtype);
  6.1188      }
  6.1189    }
  6.1190 +
  6.1191 +  if (result == NULL) {
  6.1192 +    report_metadata_oome(loader_data, word_size, mdtype, THREAD);
  6.1193 +    // Will not reach here.
  6.1194 +    return NULL;
  6.1195 +  }
  6.1196 +
  6.1197    return Metablock::initialize(result, word_size);
  6.1198  }
  6.1199  
  6.1200 +void Metaspace::report_metadata_oome(ClassLoaderData* loader_data, size_t word_size, MetadataType mdtype, TRAPS) {
  6.1201 +  // If result is still null, we are out of memory.
  6.1202 +  if (Verbose && TraceMetadataChunkAllocation) {
  6.1203 +    gclog_or_tty->print_cr("Metaspace allocation failed for size "
  6.1204 +        SIZE_FORMAT, word_size);
  6.1205 +    if (loader_data->metaspace_or_null() != NULL) {
  6.1206 +      loader_data->dump(gclog_or_tty);
  6.1207 +    }
  6.1208 +    MetaspaceAux::dump(gclog_or_tty);
  6.1209 +  }
  6.1210 +
  6.1211 +  // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
  6.1212 +  const char* space_string = is_class_space_allocation(mdtype) ? "Compressed class space" :
  6.1213 +                                                                 "Metadata space";
  6.1214 +  report_java_out_of_memory(space_string);
  6.1215 +
  6.1216 +  if (JvmtiExport::should_post_resource_exhausted()) {
  6.1217 +    JvmtiExport::post_resource_exhausted(
  6.1218 +        JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR,
  6.1219 +        space_string);
  6.1220 +  }
  6.1221 +
  6.1222 +  if (!is_init_completed()) {
  6.1223 +    vm_exit_during_initialization("OutOfMemoryError", space_string);
  6.1224 +  }
  6.1225 +
  6.1226 +  if (is_class_space_allocation(mdtype)) {
  6.1227 +    THROW_OOP(Universe::out_of_memory_error_class_metaspace());
  6.1228 +  } else {
  6.1229 +    THROW_OOP(Universe::out_of_memory_error_metaspace());
  6.1230 +  }
  6.1231 +}
  6.1232 +
  6.1233  void Metaspace::record_allocation(void* ptr, MetaspaceObj::Type type, size_t word_size) {
  6.1234    assert(DumpSharedSpaces, "sanity");
  6.1235  
     7.1 --- a/src/share/vm/memory/metaspace.hpp	Wed Oct 09 10:57:01 2013 +0200
     7.2 +++ b/src/share/vm/memory/metaspace.hpp	Mon Oct 07 15:51:08 2013 +0200
     7.3 @@ -87,9 +87,10 @@
     7.4    friend class MetaspaceAux;
     7.5  
     7.6   public:
     7.7 -  enum MetadataType {ClassType = 0,
     7.8 -                     NonClassType = ClassType + 1,
     7.9 -                     MetadataTypeCount = ClassType + 2
    7.10 +  enum MetadataType {
    7.11 +    ClassType,
    7.12 +    NonClassType,
    7.13 +    MetadataTypeCount
    7.14    };
    7.15    enum MetaspaceType {
    7.16      StandardMetaspaceType,
    7.17 @@ -103,6 +104,9 @@
    7.18   private:
    7.19    void initialize(Mutex* lock, MetaspaceType type);
    7.20  
    7.21 +  // Get the first chunk for a Metaspace.  Used for
    7.22 +  // special cases such as the boot class loader, reflection
    7.23 +  // class loader and anonymous class loader.
    7.24    Metachunk* get_initialization_chunk(MetadataType mdtype,
    7.25                                        size_t chunk_word_size,
    7.26                                        size_t chunk_bunch);
    7.27 @@ -123,6 +127,9 @@
    7.28    static size_t _first_chunk_word_size;
    7.29    static size_t _first_class_chunk_word_size;
    7.30  
    7.31 +  static size_t _commit_alignment;
    7.32 +  static size_t _reserve_alignment;
    7.33 +
    7.34    SpaceManager* _vsm;
    7.35    SpaceManager* vsm() const { return _vsm; }
    7.36  
    7.37 @@ -191,12 +198,17 @@
    7.38    Metaspace(Mutex* lock, MetaspaceType type);
    7.39    ~Metaspace();
    7.40  
    7.41 -  // Initialize globals for Metaspace
    7.42 +  static void ergo_initialize();
    7.43    static void global_initialize();
    7.44  
    7.45    static size_t first_chunk_word_size() { return _first_chunk_word_size; }
    7.46    static size_t first_class_chunk_word_size() { return _first_class_chunk_word_size; }
    7.47  
    7.48 +  static size_t reserve_alignment()       { return _reserve_alignment; }
    7.49 +  static size_t reserve_alignment_words() { return _reserve_alignment / BytesPerWord; }
    7.50 +  static size_t commit_alignment()        { return _commit_alignment; }
    7.51 +  static size_t commit_alignment_words()  { return _commit_alignment / BytesPerWord; }
    7.52 +
    7.53    char*  bottom() const;
    7.54    size_t used_words_slow(MetadataType mdtype) const;
    7.55    size_t free_words_slow(MetadataType mdtype) const;
    7.56 @@ -219,6 +231,9 @@
    7.57    static void purge(MetadataType mdtype);
    7.58    static void purge();
    7.59  
    7.60 +  static void report_metadata_oome(ClassLoaderData* loader_data, size_t word_size,
    7.61 +                                   MetadataType mdtype, TRAPS);
    7.62 +
    7.63    void print_on(outputStream* st) const;
    7.64    // Debugging support
    7.65    void verify();
    7.66 @@ -352,17 +367,10 @@
    7.67  
    7.68  class MetaspaceGC : AllStatic {
    7.69  
    7.70 -  // The current high-water-mark for inducing a GC.  When
    7.71 -  // the capacity of all space in the virtual lists reaches this value,
    7.72 -  // a GC is induced and the value is increased.  This should be changed
    7.73 -  // to the space actually used for allocations to avoid affects of
    7.74 -  // fragmentation losses to partially used chunks.  Size is in words.
    7.75 -  static size_t _capacity_until_GC;
    7.76 -
    7.77 -  // After a GC is done any allocation that fails should try to expand
    7.78 -  // the capacity of the Metaspaces.  This flag is set during attempts
    7.79 -  // to allocate in the VMGCOperation that does the GC.
    7.80 -  static bool _expand_after_GC;
    7.81 +  // The current high-water-mark for inducing a GC.
    7.82 +  // When committed memory of all metaspaces reaches this value,
    7.83 +  // a GC is induced and the value is increased. Size is in bytes.
    7.84 +  static volatile intptr_t _capacity_until_GC;
    7.85  
    7.86    // For a CMS collection, signal that a concurrent collection should
    7.87    // be started.
    7.88 @@ -370,20 +378,16 @@
    7.89  
    7.90    static uint _shrink_factor;
    7.91  
    7.92 -  static void set_capacity_until_GC(size_t v) { _capacity_until_GC = v; }
    7.93 -
    7.94    static size_t shrink_factor() { return _shrink_factor; }
    7.95    void set_shrink_factor(uint v) { _shrink_factor = v; }
    7.96  
    7.97   public:
    7.98  
    7.99 -  static size_t capacity_until_GC() { return _capacity_until_GC; }
   7.100 -  static void inc_capacity_until_GC(size_t v) { _capacity_until_GC += v; }
   7.101 -  static void dec_capacity_until_GC(size_t v) {
   7.102 -    _capacity_until_GC = _capacity_until_GC > v ? _capacity_until_GC - v : 0;
   7.103 -  }
   7.104 -  static bool expand_after_GC()           { return _expand_after_GC; }
   7.105 -  static void set_expand_after_GC(bool v) { _expand_after_GC = v; }
   7.106 +  static void initialize() { _capacity_until_GC = MetaspaceSize; }
   7.107 +
   7.108 +  static size_t capacity_until_GC();
   7.109 +  static size_t inc_capacity_until_GC(size_t v);
   7.110 +  static size_t dec_capacity_until_GC(size_t v);
   7.111  
   7.112    static bool should_concurrent_collect() { return _should_concurrent_collect; }
   7.113    static void set_should_concurrent_collect(bool v) {
   7.114 @@ -391,11 +395,14 @@
   7.115    }
   7.116  
   7.117    // The amount to increase the high-water-mark (_capacity_until_GC)
   7.118 -  static size_t delta_capacity_until_GC(size_t word_size);
   7.119 +  static size_t delta_capacity_until_GC(size_t bytes);
   7.120  
   7.121 -  // It is expected that this will be called when the current capacity
   7.122 -  // has been used and a GC should be considered.
   7.123 -  static bool should_expand(VirtualSpaceList* vsl, size_t word_size);
   7.124 +  // Tells if we have can expand metaspace without hitting set limits.
   7.125 +  static bool can_expand(size_t words, bool is_class);
   7.126 +
   7.127 +  // Returns amount that we can expand without hitting a GC,
   7.128 +  // measured in words.
   7.129 +  static size_t allowed_expansion();
   7.130  
   7.131    // Calculate the new high-water mark at which to induce
   7.132    // a GC.
     8.1 --- a/src/share/vm/runtime/arguments.cpp	Wed Oct 09 10:57:01 2013 +0200
     8.2 +++ b/src/share/vm/runtime/arguments.cpp	Mon Oct 07 15:51:08 2013 +0200
     8.3 @@ -3656,6 +3656,9 @@
     8.4    assert(verify_serial_gc_flags(), "SerialGC unset");
     8.5  #endif // INCLUDE_ALL_GCS
     8.6  
     8.7 +  // Initialize Metaspace flags and alignments.
     8.8 +  Metaspace::ergo_initialize();
     8.9 +
    8.10    // Set bytecode rewriting flags
    8.11    set_bytecode_flags();
    8.12  
     9.1 --- a/src/share/vm/runtime/globals.hpp	Wed Oct 09 10:57:01 2013 +0200
     9.2 +++ b/src/share/vm/runtime/globals.hpp	Mon Oct 07 15:51:08 2013 +0200
     9.3 @@ -502,6 +502,10 @@
     9.4    develop(bool, LargePagesIndividualAllocationInjectError, false,           \
     9.5            "Fail large pages individual allocation")                         \
     9.6                                                                              \
     9.7 +  product(bool, UseLargePagesInMetaspace, false,                            \
     9.8 +          "Use large page memory in metaspace. "                            \
     9.9 +          "Only used if UseLargePages is enabled.")                         \
    9.10 +                                                                            \
    9.11    develop(bool, TracePageSizes, false,                                      \
    9.12            "Trace page size selection and usage")                            \
    9.13                                                                              \

mercurial