src/share/vm/memory/metaspace.cpp

changeset 0
f90c822e73f8
child 6876
710a3c8b516e
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/src/share/vm/memory/metaspace.cpp	Wed Apr 27 01:25:04 2016 +0800
     1.3 @@ -0,0 +1,3829 @@
     1.4 +/*
     1.5 + * Copyright (c) 2011, 2014, Oracle and/or its affiliates. All rights reserved.
     1.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     1.7 + *
     1.8 + * This code is free software; you can redistribute it and/or modify it
     1.9 + * under the terms of the GNU General Public License version 2 only, as
    1.10 + * published by the Free Software Foundation.
    1.11 + *
    1.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
    1.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    1.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    1.15 + * version 2 for more details (a copy is included in the LICENSE file that
    1.16 + * accompanied this code).
    1.17 + *
    1.18 + * You should have received a copy of the GNU General Public License version
    1.19 + * 2 along with this work; if not, write to the Free Software Foundation,
    1.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    1.21 + *
    1.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    1.23 + * or visit www.oracle.com if you need additional information or have any
    1.24 + * questions.
    1.25 + *
    1.26 + */
    1.27 +#include "precompiled.hpp"
    1.28 +#include "gc_interface/collectedHeap.hpp"
    1.29 +#include "memory/allocation.hpp"
    1.30 +#include "memory/binaryTreeDictionary.hpp"
    1.31 +#include "memory/freeList.hpp"
    1.32 +#include "memory/collectorPolicy.hpp"
    1.33 +#include "memory/filemap.hpp"
    1.34 +#include "memory/freeList.hpp"
    1.35 +#include "memory/gcLocker.hpp"
    1.36 +#include "memory/metachunk.hpp"
    1.37 +#include "memory/metaspace.hpp"
    1.38 +#include "memory/metaspaceGCThresholdUpdater.hpp"
    1.39 +#include "memory/metaspaceShared.hpp"
    1.40 +#include "memory/metaspaceTracer.hpp"
    1.41 +#include "memory/resourceArea.hpp"
    1.42 +#include "memory/universe.hpp"
    1.43 +#include "runtime/atomic.inline.hpp"
    1.44 +#include "runtime/globals.hpp"
    1.45 +#include "runtime/init.hpp"
    1.46 +#include "runtime/java.hpp"
    1.47 +#include "runtime/mutex.hpp"
    1.48 +#include "runtime/orderAccess.hpp"
    1.49 +#include "services/memTracker.hpp"
    1.50 +#include "services/memoryService.hpp"
    1.51 +#include "utilities/copy.hpp"
    1.52 +#include "utilities/debug.hpp"
    1.53 +
    1.54 +PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
    1.55 +
    1.56 +typedef BinaryTreeDictionary<Metablock, FreeList<Metablock> > BlockTreeDictionary;
    1.57 +typedef BinaryTreeDictionary<Metachunk, FreeList<Metachunk> > ChunkTreeDictionary;
    1.58 +
    1.59 +// Set this constant to enable slow integrity checking of the free chunk lists
    1.60 +const bool metaspace_slow_verify = false;
    1.61 +
    1.62 +size_t const allocation_from_dictionary_limit = 4 * K;
    1.63 +
    1.64 +MetaWord* last_allocated = 0;
    1.65 +
    1.66 +size_t Metaspace::_compressed_class_space_size;
    1.67 +const MetaspaceTracer* Metaspace::_tracer = NULL;
    1.68 +
    1.69 +// Used in declarations in SpaceManager and ChunkManager
    1.70 +enum ChunkIndex {
    1.71 +  ZeroIndex = 0,
    1.72 +  SpecializedIndex = ZeroIndex,
    1.73 +  SmallIndex = SpecializedIndex + 1,
    1.74 +  MediumIndex = SmallIndex + 1,
    1.75 +  HumongousIndex = MediumIndex + 1,
    1.76 +  NumberOfFreeLists = 3,
    1.77 +  NumberOfInUseLists = 4
    1.78 +};
    1.79 +
    1.80 +enum ChunkSizes {    // in words.
    1.81 +  ClassSpecializedChunk = 128,
    1.82 +  SpecializedChunk = 128,
    1.83 +  ClassSmallChunk = 256,
    1.84 +  SmallChunk = 512,
    1.85 +  ClassMediumChunk = 4 * K,
    1.86 +  MediumChunk = 8 * K
    1.87 +};
    1.88 +
    1.89 +static ChunkIndex next_chunk_index(ChunkIndex i) {
    1.90 +  assert(i < NumberOfInUseLists, "Out of bound");
    1.91 +  return (ChunkIndex) (i+1);
    1.92 +}
    1.93 +
    1.94 +volatile intptr_t MetaspaceGC::_capacity_until_GC = 0;
    1.95 +uint MetaspaceGC::_shrink_factor = 0;
    1.96 +bool MetaspaceGC::_should_concurrent_collect = false;
    1.97 +
    1.98 +typedef class FreeList<Metachunk> ChunkList;
    1.99 +
   1.100 +// Manages the global free lists of chunks.
   1.101 +class ChunkManager : public CHeapObj<mtInternal> {
   1.102 +  friend class TestVirtualSpaceNodeTest;
   1.103 +
   1.104 +  // Free list of chunks of different sizes.
   1.105 +  //   SpecializedChunk
   1.106 +  //   SmallChunk
   1.107 +  //   MediumChunk
   1.108 +  //   HumongousChunk
   1.109 +  ChunkList _free_chunks[NumberOfFreeLists];
   1.110 +
   1.111 +  //   HumongousChunk
   1.112 +  ChunkTreeDictionary _humongous_dictionary;
   1.113 +
   1.114 +  // ChunkManager in all lists of this type
   1.115 +  size_t _free_chunks_total;
   1.116 +  size_t _free_chunks_count;
   1.117 +
   1.118 +  void dec_free_chunks_total(size_t v) {
   1.119 +    assert(_free_chunks_count > 0 &&
   1.120 +             _free_chunks_total > 0,
   1.121 +             "About to go negative");
   1.122 +    Atomic::add_ptr(-1, &_free_chunks_count);
   1.123 +    jlong minus_v = (jlong) - (jlong) v;
   1.124 +    Atomic::add_ptr(minus_v, &_free_chunks_total);
   1.125 +  }
   1.126 +
   1.127 +  // Debug support
   1.128 +
   1.129 +  size_t sum_free_chunks();
   1.130 +  size_t sum_free_chunks_count();
   1.131 +
   1.132 +  void locked_verify_free_chunks_total();
   1.133 +  void slow_locked_verify_free_chunks_total() {
   1.134 +    if (metaspace_slow_verify) {
   1.135 +      locked_verify_free_chunks_total();
   1.136 +    }
   1.137 +  }
   1.138 +  void locked_verify_free_chunks_count();
   1.139 +  void slow_locked_verify_free_chunks_count() {
   1.140 +    if (metaspace_slow_verify) {
   1.141 +      locked_verify_free_chunks_count();
   1.142 +    }
   1.143 +  }
   1.144 +  void verify_free_chunks_count();
   1.145 +
   1.146 + public:
   1.147 +
   1.148 +  ChunkManager(size_t specialized_size, size_t small_size, size_t medium_size)
   1.149 +      : _free_chunks_total(0), _free_chunks_count(0) {
   1.150 +    _free_chunks[SpecializedIndex].set_size(specialized_size);
   1.151 +    _free_chunks[SmallIndex].set_size(small_size);
   1.152 +    _free_chunks[MediumIndex].set_size(medium_size);
   1.153 +  }
   1.154 +
   1.155 +  // add or delete (return) a chunk to the global freelist.
   1.156 +  Metachunk* chunk_freelist_allocate(size_t word_size);
   1.157 +
   1.158 +  // Map a size to a list index assuming that there are lists
   1.159 +  // for special, small, medium, and humongous chunks.
   1.160 +  static ChunkIndex list_index(size_t size);
   1.161 +
   1.162 +  // Remove the chunk from its freelist.  It is
   1.163 +  // expected to be on one of the _free_chunks[] lists.
   1.164 +  void remove_chunk(Metachunk* chunk);
   1.165 +
   1.166 +  // Add the simple linked list of chunks to the freelist of chunks
   1.167 +  // of type index.
   1.168 +  void return_chunks(ChunkIndex index, Metachunk* chunks);
   1.169 +
   1.170 +  // Total of the space in the free chunks list
   1.171 +  size_t free_chunks_total_words();
   1.172 +  size_t free_chunks_total_bytes();
   1.173 +
   1.174 +  // Number of chunks in the free chunks list
   1.175 +  size_t free_chunks_count();
   1.176 +
   1.177 +  void inc_free_chunks_total(size_t v, size_t count = 1) {
   1.178 +    Atomic::add_ptr(count, &_free_chunks_count);
   1.179 +    Atomic::add_ptr(v, &_free_chunks_total);
   1.180 +  }
   1.181 +  ChunkTreeDictionary* humongous_dictionary() {
   1.182 +    return &_humongous_dictionary;
   1.183 +  }
   1.184 +
   1.185 +  ChunkList* free_chunks(ChunkIndex index);
   1.186 +
   1.187 +  // Returns the list for the given chunk word size.
   1.188 +  ChunkList* find_free_chunks_list(size_t word_size);
   1.189 +
   1.190 +  // Remove from a list by size.  Selects list based on size of chunk.
   1.191 +  Metachunk* free_chunks_get(size_t chunk_word_size);
   1.192 +
   1.193 +#define index_bounds_check(index)                                         \
   1.194 +  assert(index == SpecializedIndex ||                                     \
   1.195 +         index == SmallIndex ||                                           \
   1.196 +         index == MediumIndex ||                                          \
   1.197 +         index == HumongousIndex, err_msg("Bad index: %d", (int) index))
   1.198 +
   1.199 +  size_t num_free_chunks(ChunkIndex index) const {
   1.200 +    index_bounds_check(index);
   1.201 +
   1.202 +    if (index == HumongousIndex) {
   1.203 +      return _humongous_dictionary.total_free_blocks();
   1.204 +    }
   1.205 +
   1.206 +    ssize_t count = _free_chunks[index].count();
   1.207 +    return count == -1 ? 0 : (size_t) count;
   1.208 +  }
   1.209 +
   1.210 +  size_t size_free_chunks_in_bytes(ChunkIndex index) const {
   1.211 +    index_bounds_check(index);
   1.212 +
   1.213 +    size_t word_size = 0;
   1.214 +    if (index == HumongousIndex) {
   1.215 +      word_size = _humongous_dictionary.total_size();
   1.216 +    } else {
   1.217 +      const size_t size_per_chunk_in_words = _free_chunks[index].size();
   1.218 +      word_size = size_per_chunk_in_words * num_free_chunks(index);
   1.219 +    }
   1.220 +
   1.221 +    return word_size * BytesPerWord;
   1.222 +  }
   1.223 +
   1.224 +  MetaspaceChunkFreeListSummary chunk_free_list_summary() const {
   1.225 +    return MetaspaceChunkFreeListSummary(num_free_chunks(SpecializedIndex),
   1.226 +                                         num_free_chunks(SmallIndex),
   1.227 +                                         num_free_chunks(MediumIndex),
   1.228 +                                         num_free_chunks(HumongousIndex),
   1.229 +                                         size_free_chunks_in_bytes(SpecializedIndex),
   1.230 +                                         size_free_chunks_in_bytes(SmallIndex),
   1.231 +                                         size_free_chunks_in_bytes(MediumIndex),
   1.232 +                                         size_free_chunks_in_bytes(HumongousIndex));
   1.233 +  }
   1.234 +
   1.235 +  // Debug support
   1.236 +  void verify();
   1.237 +  void slow_verify() {
   1.238 +    if (metaspace_slow_verify) {
   1.239 +      verify();
   1.240 +    }
   1.241 +  }
   1.242 +  void locked_verify();
   1.243 +  void slow_locked_verify() {
   1.244 +    if (metaspace_slow_verify) {
   1.245 +      locked_verify();
   1.246 +    }
   1.247 +  }
   1.248 +  void verify_free_chunks_total();
   1.249 +
   1.250 +  void locked_print_free_chunks(outputStream* st);
   1.251 +  void locked_print_sum_free_chunks(outputStream* st);
   1.252 +
   1.253 +  void print_on(outputStream* st) const;
   1.254 +};
   1.255 +
   1.256 +// Used to manage the free list of Metablocks (a block corresponds
   1.257 +// to the allocation of a quantum of metadata).
   1.258 +class BlockFreelist VALUE_OBJ_CLASS_SPEC {
   1.259 +  BlockTreeDictionary* _dictionary;
   1.260 +
   1.261 +  // Only allocate and split from freelist if the size of the allocation
   1.262 +  // is at least 1/4th the size of the available block.
   1.263 +  const static int WasteMultiplier = 4;
   1.264 +
   1.265 +  // Accessors
   1.266 +  BlockTreeDictionary* dictionary() const { return _dictionary; }
   1.267 +
   1.268 + public:
   1.269 +  BlockFreelist();
   1.270 +  ~BlockFreelist();
   1.271 +
   1.272 +  // Get and return a block to the free list
   1.273 +  MetaWord* get_block(size_t word_size);
   1.274 +  void return_block(MetaWord* p, size_t word_size);
   1.275 +
   1.276 +  size_t total_size() {
   1.277 +  if (dictionary() == NULL) {
   1.278 +    return 0;
   1.279 +  } else {
   1.280 +    return dictionary()->total_size();
   1.281 +  }
   1.282 +}
   1.283 +
   1.284 +  void print_on(outputStream* st) const;
   1.285 +};
   1.286 +
   1.287 +// A VirtualSpaceList node.
   1.288 +class VirtualSpaceNode : public CHeapObj<mtClass> {
   1.289 +  friend class VirtualSpaceList;
   1.290 +
   1.291 +  // Link to next VirtualSpaceNode
   1.292 +  VirtualSpaceNode* _next;
   1.293 +
   1.294 +  // total in the VirtualSpace
   1.295 +  MemRegion _reserved;
   1.296 +  ReservedSpace _rs;
   1.297 +  VirtualSpace _virtual_space;
   1.298 +  MetaWord* _top;
   1.299 +  // count of chunks contained in this VirtualSpace
   1.300 +  uintx _container_count;
   1.301 +
   1.302 +  // Convenience functions to access the _virtual_space
   1.303 +  char* low()  const { return virtual_space()->low(); }
   1.304 +  char* high() const { return virtual_space()->high(); }
   1.305 +
   1.306 +  // The first Metachunk will be allocated at the bottom of the
   1.307 +  // VirtualSpace
   1.308 +  Metachunk* first_chunk() { return (Metachunk*) bottom(); }
   1.309 +
   1.310 +  // Committed but unused space in the virtual space
   1.311 +  size_t free_words_in_vs() const;
   1.312 + public:
   1.313 +
   1.314 +  VirtualSpaceNode(size_t byte_size);
   1.315 +  VirtualSpaceNode(ReservedSpace rs) : _top(NULL), _next(NULL), _rs(rs), _container_count(0) {}
   1.316 +  ~VirtualSpaceNode();
   1.317 +
   1.318 +  // Convenience functions for logical bottom and end
   1.319 +  MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); }
   1.320 +  MetaWord* end() const { return (MetaWord*) _virtual_space.high(); }
   1.321 +
   1.322 +  bool contains(const void* ptr) { return ptr >= low() && ptr < high(); }
   1.323 +
   1.324 +  size_t reserved_words() const  { return _virtual_space.reserved_size() / BytesPerWord; }
   1.325 +  size_t committed_words() const { return _virtual_space.actual_committed_size() / BytesPerWord; }
   1.326 +
   1.327 +  bool is_pre_committed() const { return _virtual_space.special(); }
   1.328 +
   1.329 +  // address of next available space in _virtual_space;
   1.330 +  // Accessors
   1.331 +  VirtualSpaceNode* next() { return _next; }
   1.332 +  void set_next(VirtualSpaceNode* v) { _next = v; }
   1.333 +
   1.334 +  void set_reserved(MemRegion const v) { _reserved = v; }
   1.335 +  void set_top(MetaWord* v) { _top = v; }
   1.336 +
   1.337 +  // Accessors
   1.338 +  MemRegion* reserved() { return &_reserved; }
   1.339 +  VirtualSpace* virtual_space() const { return (VirtualSpace*) &_virtual_space; }
   1.340 +
   1.341 +  // Returns true if "word_size" is available in the VirtualSpace
   1.342 +  bool is_available(size_t word_size) { return word_size <= pointer_delta(end(), _top, sizeof(MetaWord)); }
   1.343 +
   1.344 +  MetaWord* top() const { return _top; }
   1.345 +  void inc_top(size_t word_size) { _top += word_size; }
   1.346 +
   1.347 +  uintx container_count() { return _container_count; }
   1.348 +  void inc_container_count();
   1.349 +  void dec_container_count();
   1.350 +#ifdef ASSERT
   1.351 +  uint container_count_slow();
   1.352 +  void verify_container_count();
   1.353 +#endif
   1.354 +
   1.355 +  // used and capacity in this single entry in the list
   1.356 +  size_t used_words_in_vs() const;
   1.357 +  size_t capacity_words_in_vs() const;
   1.358 +
   1.359 +  bool initialize();
   1.360 +
   1.361 +  // get space from the virtual space
   1.362 +  Metachunk* take_from_committed(size_t chunk_word_size);
   1.363 +
   1.364 +  // Allocate a chunk from the virtual space and return it.
   1.365 +  Metachunk* get_chunk_vs(size_t chunk_word_size);
   1.366 +
   1.367 +  // Expands/shrinks the committed space in a virtual space.  Delegates
   1.368 +  // to Virtualspace
   1.369 +  bool expand_by(size_t min_words, size_t preferred_words);
   1.370 +
   1.371 +  // In preparation for deleting this node, remove all the chunks
   1.372 +  // in the node from any freelist.
   1.373 +  void purge(ChunkManager* chunk_manager);
   1.374 +
   1.375 +  // If an allocation doesn't fit in the current node a new node is created.
   1.376 +  // Allocate chunks out of the remaining committed space in this node
   1.377 +  // to avoid wasting that memory.
   1.378 +  // This always adds up because all the chunk sizes are multiples of
   1.379 +  // the smallest chunk size.
   1.380 +  void retire(ChunkManager* chunk_manager);
   1.381 +
   1.382 +#ifdef ASSERT
   1.383 +  // Debug support
   1.384 +  void mangle();
   1.385 +#endif
   1.386 +
   1.387 +  void print_on(outputStream* st) const;
   1.388 +};
   1.389 +
   1.390 +#define assert_is_ptr_aligned(ptr, alignment) \
   1.391 +  assert(is_ptr_aligned(ptr, alignment),      \
   1.392 +    err_msg(PTR_FORMAT " is not aligned to "  \
   1.393 +      SIZE_FORMAT, ptr, alignment))
   1.394 +
   1.395 +#define assert_is_size_aligned(size, alignment) \
   1.396 +  assert(is_size_aligned(size, alignment),      \
   1.397 +    err_msg(SIZE_FORMAT " is not aligned to "   \
   1.398 +       SIZE_FORMAT, size, alignment))
   1.399 +
   1.400 +
   1.401 +// Decide if large pages should be committed when the memory is reserved.
   1.402 +static bool should_commit_large_pages_when_reserving(size_t bytes) {
   1.403 +  if (UseLargePages && UseLargePagesInMetaspace && !os::can_commit_large_page_memory()) {
   1.404 +    size_t words = bytes / BytesPerWord;
   1.405 +    bool is_class = false; // We never reserve large pages for the class space.
   1.406 +    if (MetaspaceGC::can_expand(words, is_class) &&
   1.407 +        MetaspaceGC::allowed_expansion() >= words) {
   1.408 +      return true;
   1.409 +    }
   1.410 +  }
   1.411 +
   1.412 +  return false;
   1.413 +}
   1.414 +
   1.415 +  // byte_size is the size of the associated virtualspace.
   1.416 +VirtualSpaceNode::VirtualSpaceNode(size_t bytes) : _top(NULL), _next(NULL), _rs(), _container_count(0) {
   1.417 +  assert_is_size_aligned(bytes, Metaspace::reserve_alignment());
   1.418 +
   1.419 +  // This allocates memory with mmap.  For DumpSharedspaces, try to reserve
   1.420 +  // configurable address, generally at the top of the Java heap so other
   1.421 +  // memory addresses don't conflict.
   1.422 +  if (DumpSharedSpaces) {
   1.423 +    bool large_pages = false; // No large pages when dumping the CDS archive.
   1.424 +    char* shared_base = (char*)align_ptr_up((char*)SharedBaseAddress, Metaspace::reserve_alignment());
   1.425 +
   1.426 +    _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages, shared_base, 0);
   1.427 +    if (_rs.is_reserved()) {
   1.428 +      assert(shared_base == 0 || _rs.base() == shared_base, "should match");
   1.429 +    } else {
   1.430 +      // Get a mmap region anywhere if the SharedBaseAddress fails.
   1.431 +      _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
   1.432 +    }
   1.433 +    MetaspaceShared::set_shared_rs(&_rs);
   1.434 +  } else {
   1.435 +    bool large_pages = should_commit_large_pages_when_reserving(bytes);
   1.436 +
   1.437 +    _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
   1.438 +  }
   1.439 +
   1.440 +  if (_rs.is_reserved()) {
   1.441 +    assert(_rs.base() != NULL, "Catch if we get a NULL address");
   1.442 +    assert(_rs.size() != 0, "Catch if we get a 0 size");
   1.443 +    assert_is_ptr_aligned(_rs.base(), Metaspace::reserve_alignment());
   1.444 +    assert_is_size_aligned(_rs.size(), Metaspace::reserve_alignment());
   1.445 +
   1.446 +    MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass);
   1.447 +  }
   1.448 +}
   1.449 +
   1.450 +void VirtualSpaceNode::purge(ChunkManager* chunk_manager) {
   1.451 +  Metachunk* chunk = first_chunk();
   1.452 +  Metachunk* invalid_chunk = (Metachunk*) top();
   1.453 +  while (chunk < invalid_chunk ) {
   1.454 +    assert(chunk->is_tagged_free(), "Should be tagged free");
   1.455 +    MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
   1.456 +    chunk_manager->remove_chunk(chunk);
   1.457 +    assert(chunk->next() == NULL &&
   1.458 +           chunk->prev() == NULL,
   1.459 +           "Was not removed from its list");
   1.460 +    chunk = (Metachunk*) next;
   1.461 +  }
   1.462 +}
   1.463 +
   1.464 +#ifdef ASSERT
   1.465 +uint VirtualSpaceNode::container_count_slow() {
   1.466 +  uint count = 0;
   1.467 +  Metachunk* chunk = first_chunk();
   1.468 +  Metachunk* invalid_chunk = (Metachunk*) top();
   1.469 +  while (chunk < invalid_chunk ) {
   1.470 +    MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
   1.471 +    // Don't count the chunks on the free lists.  Those are
   1.472 +    // still part of the VirtualSpaceNode but not currently
   1.473 +    // counted.
   1.474 +    if (!chunk->is_tagged_free()) {
   1.475 +      count++;
   1.476 +    }
   1.477 +    chunk = (Metachunk*) next;
   1.478 +  }
   1.479 +  return count;
   1.480 +}
   1.481 +#endif
   1.482 +
   1.483 +// List of VirtualSpaces for metadata allocation.
   1.484 +class VirtualSpaceList : public CHeapObj<mtClass> {
   1.485 +  friend class VirtualSpaceNode;
   1.486 +
   1.487 +  enum VirtualSpaceSizes {
   1.488 +    VirtualSpaceSize = 256 * K
   1.489 +  };
   1.490 +
   1.491 +  // Head of the list
   1.492 +  VirtualSpaceNode* _virtual_space_list;
   1.493 +  // virtual space currently being used for allocations
   1.494 +  VirtualSpaceNode* _current_virtual_space;
   1.495 +
   1.496 +  // Is this VirtualSpaceList used for the compressed class space
   1.497 +  bool _is_class;
   1.498 +
   1.499 +  // Sum of reserved and committed memory in the virtual spaces
   1.500 +  size_t _reserved_words;
   1.501 +  size_t _committed_words;
   1.502 +
   1.503 +  // Number of virtual spaces
   1.504 +  size_t _virtual_space_count;
   1.505 +
   1.506 +  ~VirtualSpaceList();
   1.507 +
   1.508 +  VirtualSpaceNode* virtual_space_list() const { return _virtual_space_list; }
   1.509 +
   1.510 +  void set_virtual_space_list(VirtualSpaceNode* v) {
   1.511 +    _virtual_space_list = v;
   1.512 +  }
   1.513 +  void set_current_virtual_space(VirtualSpaceNode* v) {
   1.514 +    _current_virtual_space = v;
   1.515 +  }
   1.516 +
   1.517 +  void link_vs(VirtualSpaceNode* new_entry);
   1.518 +
   1.519 +  // Get another virtual space and add it to the list.  This
   1.520 +  // is typically prompted by a failed attempt to allocate a chunk
   1.521 +  // and is typically followed by the allocation of a chunk.
   1.522 +  bool create_new_virtual_space(size_t vs_word_size);
   1.523 +
   1.524 +  // Chunk up the unused committed space in the current
   1.525 +  // virtual space and add the chunks to the free list.
   1.526 +  void retire_current_virtual_space();
   1.527 +
   1.528 + public:
   1.529 +  VirtualSpaceList(size_t word_size);
   1.530 +  VirtualSpaceList(ReservedSpace rs);
   1.531 +
   1.532 +  size_t free_bytes();
   1.533 +
   1.534 +  Metachunk* get_new_chunk(size_t word_size,
   1.535 +                           size_t grow_chunks_by_words,
   1.536 +                           size_t medium_chunk_bunch);
   1.537 +
   1.538 +  bool expand_node_by(VirtualSpaceNode* node,
   1.539 +                      size_t min_words,
   1.540 +                      size_t preferred_words);
   1.541 +
   1.542 +  bool expand_by(size_t min_words,
   1.543 +                 size_t preferred_words);
   1.544 +
   1.545 +  VirtualSpaceNode* current_virtual_space() {
   1.546 +    return _current_virtual_space;
   1.547 +  }
   1.548 +
   1.549 +  bool is_class() const { return _is_class; }
   1.550 +
   1.551 +  bool initialization_succeeded() { return _virtual_space_list != NULL; }
   1.552 +
   1.553 +  size_t reserved_words()  { return _reserved_words; }
   1.554 +  size_t reserved_bytes()  { return reserved_words() * BytesPerWord; }
   1.555 +  size_t committed_words() { return _committed_words; }
   1.556 +  size_t committed_bytes() { return committed_words() * BytesPerWord; }
   1.557 +
   1.558 +  void inc_reserved_words(size_t v);
   1.559 +  void dec_reserved_words(size_t v);
   1.560 +  void inc_committed_words(size_t v);
   1.561 +  void dec_committed_words(size_t v);
   1.562 +  void inc_virtual_space_count();
   1.563 +  void dec_virtual_space_count();
   1.564 +
   1.565 +  bool contains(const void* ptr);
   1.566 +
   1.567 +  // Unlink empty VirtualSpaceNodes and free it.
   1.568 +  void purge(ChunkManager* chunk_manager);
   1.569 +
   1.570 +  void print_on(outputStream* st) const;
   1.571 +
   1.572 +  class VirtualSpaceListIterator : public StackObj {
   1.573 +    VirtualSpaceNode* _virtual_spaces;
   1.574 +   public:
   1.575 +    VirtualSpaceListIterator(VirtualSpaceNode* virtual_spaces) :
   1.576 +      _virtual_spaces(virtual_spaces) {}
   1.577 +
   1.578 +    bool repeat() {
   1.579 +      return _virtual_spaces != NULL;
   1.580 +    }
   1.581 +
   1.582 +    VirtualSpaceNode* get_next() {
   1.583 +      VirtualSpaceNode* result = _virtual_spaces;
   1.584 +      if (_virtual_spaces != NULL) {
   1.585 +        _virtual_spaces = _virtual_spaces->next();
   1.586 +      }
   1.587 +      return result;
   1.588 +    }
   1.589 +  };
   1.590 +};
   1.591 +
   1.592 +class Metadebug : AllStatic {
   1.593 +  // Debugging support for Metaspaces
   1.594 +  static int _allocation_fail_alot_count;
   1.595 +
   1.596 + public:
   1.597 +
   1.598 +  static void init_allocation_fail_alot_count();
   1.599 +#ifdef ASSERT
   1.600 +  static bool test_metadata_failure();
   1.601 +#endif
   1.602 +};
   1.603 +
   1.604 +int Metadebug::_allocation_fail_alot_count = 0;
   1.605 +
   1.606 +//  SpaceManager - used by Metaspace to handle allocations
   1.607 +class SpaceManager : public CHeapObj<mtClass> {
   1.608 +  friend class Metaspace;
   1.609 +  friend class Metadebug;
   1.610 +
   1.611 + private:
   1.612 +
   1.613 +  // protects allocations
   1.614 +  Mutex* const _lock;
   1.615 +
   1.616 +  // Type of metadata allocated.
   1.617 +  Metaspace::MetadataType _mdtype;
   1.618 +
   1.619 +  // List of chunks in use by this SpaceManager.  Allocations
   1.620 +  // are done from the current chunk.  The list is used for deallocating
   1.621 +  // chunks when the SpaceManager is freed.
   1.622 +  Metachunk* _chunks_in_use[NumberOfInUseLists];
   1.623 +  Metachunk* _current_chunk;
   1.624 +
   1.625 +  // Number of small chunks to allocate to a manager
   1.626 +  // If class space manager, small chunks are unlimited
   1.627 +  static uint const _small_chunk_limit;
   1.628 +
   1.629 +  // Sum of all space in allocated chunks
   1.630 +  size_t _allocated_blocks_words;
   1.631 +
   1.632 +  // Sum of all allocated chunks
   1.633 +  size_t _allocated_chunks_words;
   1.634 +  size_t _allocated_chunks_count;
   1.635 +
   1.636 +  // Free lists of blocks are per SpaceManager since they
   1.637 +  // are assumed to be in chunks in use by the SpaceManager
   1.638 +  // and all chunks in use by a SpaceManager are freed when
   1.639 +  // the class loader using the SpaceManager is collected.
   1.640 +  BlockFreelist _block_freelists;
   1.641 +
   1.642 +  // protects virtualspace and chunk expansions
   1.643 +  static const char*  _expand_lock_name;
   1.644 +  static const int    _expand_lock_rank;
   1.645 +  static Mutex* const _expand_lock;
   1.646 +
   1.647 + private:
   1.648 +  // Accessors
   1.649 +  Metachunk* chunks_in_use(ChunkIndex index) const { return _chunks_in_use[index]; }
   1.650 +  void set_chunks_in_use(ChunkIndex index, Metachunk* v) {
   1.651 +    _chunks_in_use[index] = v;
   1.652 +  }
   1.653 +
   1.654 +  BlockFreelist* block_freelists() const {
   1.655 +    return (BlockFreelist*) &_block_freelists;
   1.656 +  }
   1.657 +
   1.658 +  Metaspace::MetadataType mdtype() { return _mdtype; }
   1.659 +
   1.660 +  VirtualSpaceList* vs_list()   const { return Metaspace::get_space_list(_mdtype); }
   1.661 +  ChunkManager* chunk_manager() const { return Metaspace::get_chunk_manager(_mdtype); }
   1.662 +
   1.663 +  Metachunk* current_chunk() const { return _current_chunk; }
   1.664 +  void set_current_chunk(Metachunk* v) {
   1.665 +    _current_chunk = v;
   1.666 +  }
   1.667 +
   1.668 +  Metachunk* find_current_chunk(size_t word_size);
   1.669 +
   1.670 +  // Add chunk to the list of chunks in use
   1.671 +  void add_chunk(Metachunk* v, bool make_current);
   1.672 +  void retire_current_chunk();
   1.673 +
   1.674 +  Mutex* lock() const { return _lock; }
   1.675 +
   1.676 +  const char* chunk_size_name(ChunkIndex index) const;
   1.677 +
   1.678 + protected:
   1.679 +  void initialize();
   1.680 +
   1.681 + public:
   1.682 +  SpaceManager(Metaspace::MetadataType mdtype,
   1.683 +               Mutex* lock);
   1.684 +  ~SpaceManager();
   1.685 +
   1.686 +  enum ChunkMultiples {
   1.687 +    MediumChunkMultiple = 4
   1.688 +  };
   1.689 +
   1.690 +  bool is_class() { return _mdtype == Metaspace::ClassType; }
   1.691 +
   1.692 +  // Accessors
   1.693 +  size_t specialized_chunk_size() { return (size_t) is_class() ? ClassSpecializedChunk : SpecializedChunk; }
   1.694 +  size_t small_chunk_size()       { return (size_t) is_class() ? ClassSmallChunk : SmallChunk; }
   1.695 +  size_t medium_chunk_size()      { return (size_t) is_class() ? ClassMediumChunk : MediumChunk; }
   1.696 +  size_t medium_chunk_bunch()     { return medium_chunk_size() * MediumChunkMultiple; }
   1.697 +
   1.698 +  size_t smallest_chunk_size()  { return specialized_chunk_size(); }
   1.699 +
   1.700 +  size_t allocated_blocks_words() const { return _allocated_blocks_words; }
   1.701 +  size_t allocated_blocks_bytes() const { return _allocated_blocks_words * BytesPerWord; }
   1.702 +  size_t allocated_chunks_words() const { return _allocated_chunks_words; }
   1.703 +  size_t allocated_chunks_count() const { return _allocated_chunks_count; }
   1.704 +
   1.705 +  bool is_humongous(size_t word_size) { return word_size > medium_chunk_size(); }
   1.706 +
   1.707 +  static Mutex* expand_lock() { return _expand_lock; }
   1.708 +
   1.709 +  // Increment the per Metaspace and global running sums for Metachunks
   1.710 +  // by the given size.  This is used when a Metachunk to added to
   1.711 +  // the in-use list.
   1.712 +  void inc_size_metrics(size_t words);
   1.713 +  // Increment the per Metaspace and global running sums Metablocks by the given
   1.714 +  // size.  This is used when a Metablock is allocated.
   1.715 +  void inc_used_metrics(size_t words);
   1.716 +  // Delete the portion of the running sums for this SpaceManager. That is,
   1.717 +  // the globals running sums for the Metachunks and Metablocks are
   1.718 +  // decremented for all the Metachunks in-use by this SpaceManager.
   1.719 +  void dec_total_from_size_metrics();
   1.720 +
   1.721 +  // Set the sizes for the initial chunks.
   1.722 +  void get_initial_chunk_sizes(Metaspace::MetaspaceType type,
   1.723 +                               size_t* chunk_word_size,
   1.724 +                               size_t* class_chunk_word_size);
   1.725 +
   1.726 +  size_t sum_capacity_in_chunks_in_use() const;
   1.727 +  size_t sum_used_in_chunks_in_use() const;
   1.728 +  size_t sum_free_in_chunks_in_use() const;
   1.729 +  size_t sum_waste_in_chunks_in_use() const;
   1.730 +  size_t sum_waste_in_chunks_in_use(ChunkIndex index ) const;
   1.731 +
   1.732 +  size_t sum_count_in_chunks_in_use();
   1.733 +  size_t sum_count_in_chunks_in_use(ChunkIndex i);
   1.734 +
   1.735 +  Metachunk* get_new_chunk(size_t word_size, size_t grow_chunks_by_words);
   1.736 +
   1.737 +  // Block allocation and deallocation.
   1.738 +  // Allocates a block from the current chunk
   1.739 +  MetaWord* allocate(size_t word_size);
   1.740 +
   1.741 +  // Helper for allocations
   1.742 +  MetaWord* allocate_work(size_t word_size);
   1.743 +
   1.744 +  // Returns a block to the per manager freelist
   1.745 +  void deallocate(MetaWord* p, size_t word_size);
   1.746 +
   1.747 +  // Based on the allocation size and a minimum chunk size,
   1.748 +  // returned chunk size (for expanding space for chunk allocation).
   1.749 +  size_t calc_chunk_size(size_t allocation_word_size);
   1.750 +
   1.751 +  // Called when an allocation from the current chunk fails.
   1.752 +  // Gets a new chunk (may require getting a new virtual space),
   1.753 +  // and allocates from that chunk.
   1.754 +  MetaWord* grow_and_allocate(size_t word_size);
   1.755 +
   1.756 +  // Notify memory usage to MemoryService.
   1.757 +  void track_metaspace_memory_usage();
   1.758 +
   1.759 +  // debugging support.
   1.760 +
   1.761 +  void dump(outputStream* const out) const;
   1.762 +  void print_on(outputStream* st) const;
   1.763 +  void locked_print_chunks_in_use_on(outputStream* st) const;
   1.764 +
   1.765 +  void verify();
   1.766 +  void verify_chunk_size(Metachunk* chunk);
   1.767 +  NOT_PRODUCT(void mangle_freed_chunks();)
   1.768 +#ifdef ASSERT
   1.769 +  void verify_allocated_blocks_words();
   1.770 +#endif
   1.771 +
   1.772 +  size_t get_raw_word_size(size_t word_size) {
   1.773 +    size_t byte_size = word_size * BytesPerWord;
   1.774 +
   1.775 +    size_t raw_bytes_size = MAX2(byte_size, sizeof(Metablock));
   1.776 +    raw_bytes_size = align_size_up(raw_bytes_size, Metachunk::object_alignment());
   1.777 +
   1.778 +    size_t raw_word_size = raw_bytes_size / BytesPerWord;
   1.779 +    assert(raw_word_size * BytesPerWord == raw_bytes_size, "Size problem");
   1.780 +
   1.781 +    return raw_word_size;
   1.782 +  }
   1.783 +};
   1.784 +
   1.785 +uint const SpaceManager::_small_chunk_limit = 4;
   1.786 +
   1.787 +const char* SpaceManager::_expand_lock_name =
   1.788 +  "SpaceManager chunk allocation lock";
   1.789 +const int SpaceManager::_expand_lock_rank = Monitor::leaf - 1;
   1.790 +Mutex* const SpaceManager::_expand_lock =
   1.791 +  new Mutex(SpaceManager::_expand_lock_rank,
   1.792 +            SpaceManager::_expand_lock_name,
   1.793 +            Mutex::_allow_vm_block_flag);
   1.794 +
   1.795 +void VirtualSpaceNode::inc_container_count() {
   1.796 +  assert_lock_strong(SpaceManager::expand_lock());
   1.797 +  _container_count++;
   1.798 +  assert(_container_count == container_count_slow(),
   1.799 +         err_msg("Inconsistency in countainer_count _container_count " SIZE_FORMAT
   1.800 +                 " container_count_slow() " SIZE_FORMAT,
   1.801 +                 _container_count, container_count_slow()));
   1.802 +}
   1.803 +
   1.804 +void VirtualSpaceNode::dec_container_count() {
   1.805 +  assert_lock_strong(SpaceManager::expand_lock());
   1.806 +  _container_count--;
   1.807 +}
   1.808 +
   1.809 +#ifdef ASSERT
   1.810 +void VirtualSpaceNode::verify_container_count() {
   1.811 +  assert(_container_count == container_count_slow(),
   1.812 +    err_msg("Inconsistency in countainer_count _container_count " SIZE_FORMAT
   1.813 +            " container_count_slow() " SIZE_FORMAT, _container_count, container_count_slow()));
   1.814 +}
   1.815 +#endif
   1.816 +
   1.817 +// BlockFreelist methods
   1.818 +
   1.819 +BlockFreelist::BlockFreelist() : _dictionary(NULL) {}
   1.820 +
   1.821 +BlockFreelist::~BlockFreelist() {
   1.822 +  if (_dictionary != NULL) {
   1.823 +    if (Verbose && TraceMetadataChunkAllocation) {
   1.824 +      _dictionary->print_free_lists(gclog_or_tty);
   1.825 +    }
   1.826 +    delete _dictionary;
   1.827 +  }
   1.828 +}
   1.829 +
   1.830 +void BlockFreelist::return_block(MetaWord* p, size_t word_size) {
   1.831 +  Metablock* free_chunk = ::new (p) Metablock(word_size);
   1.832 +  if (dictionary() == NULL) {
   1.833 +   _dictionary = new BlockTreeDictionary();
   1.834 +  }
   1.835 +  dictionary()->return_chunk(free_chunk);
   1.836 +}
   1.837 +
   1.838 +MetaWord* BlockFreelist::get_block(size_t word_size) {
   1.839 +  if (dictionary() == NULL) {
   1.840 +    return NULL;
   1.841 +  }
   1.842 +
   1.843 +  if (word_size < TreeChunk<Metablock, FreeList<Metablock> >::min_size()) {
   1.844 +    // Dark matter.  Too small for dictionary.
   1.845 +    return NULL;
   1.846 +  }
   1.847 +
   1.848 +  Metablock* free_block =
   1.849 +    dictionary()->get_chunk(word_size, FreeBlockDictionary<Metablock>::atLeast);
   1.850 +  if (free_block == NULL) {
   1.851 +    return NULL;
   1.852 +  }
   1.853 +
   1.854 +  const size_t block_size = free_block->size();
   1.855 +  if (block_size > WasteMultiplier * word_size) {
   1.856 +    return_block((MetaWord*)free_block, block_size);
   1.857 +    return NULL;
   1.858 +  }
   1.859 +
   1.860 +  MetaWord* new_block = (MetaWord*)free_block;
   1.861 +  assert(block_size >= word_size, "Incorrect size of block from freelist");
   1.862 +  const size_t unused = block_size - word_size;
   1.863 +  if (unused >= TreeChunk<Metablock, FreeList<Metablock> >::min_size()) {
   1.864 +    return_block(new_block + word_size, unused);
   1.865 +  }
   1.866 +
   1.867 +  return new_block;
   1.868 +}
   1.869 +
   1.870 +void BlockFreelist::print_on(outputStream* st) const {
   1.871 +  if (dictionary() == NULL) {
   1.872 +    return;
   1.873 +  }
   1.874 +  dictionary()->print_free_lists(st);
   1.875 +}
   1.876 +
   1.877 +// VirtualSpaceNode methods
   1.878 +
   1.879 +VirtualSpaceNode::~VirtualSpaceNode() {
   1.880 +  _rs.release();
   1.881 +#ifdef ASSERT
   1.882 +  size_t word_size = sizeof(*this) / BytesPerWord;
   1.883 +  Copy::fill_to_words((HeapWord*) this, word_size, 0xf1f1f1f1);
   1.884 +#endif
   1.885 +}
   1.886 +
   1.887 +size_t VirtualSpaceNode::used_words_in_vs() const {
   1.888 +  return pointer_delta(top(), bottom(), sizeof(MetaWord));
   1.889 +}
   1.890 +
   1.891 +// Space committed in the VirtualSpace
   1.892 +size_t VirtualSpaceNode::capacity_words_in_vs() const {
   1.893 +  return pointer_delta(end(), bottom(), sizeof(MetaWord));
   1.894 +}
   1.895 +
   1.896 +size_t VirtualSpaceNode::free_words_in_vs() const {
   1.897 +  return pointer_delta(end(), top(), sizeof(MetaWord));
   1.898 +}
   1.899 +
   1.900 +// Allocates the chunk from the virtual space only.
   1.901 +// This interface is also used internally for debugging.  Not all
   1.902 +// chunks removed here are necessarily used for allocation.
   1.903 +Metachunk* VirtualSpaceNode::take_from_committed(size_t chunk_word_size) {
   1.904 +  // Bottom of the new chunk
   1.905 +  MetaWord* chunk_limit = top();
   1.906 +  assert(chunk_limit != NULL, "Not safe to call this method");
   1.907 +
   1.908 +  // The virtual spaces are always expanded by the
   1.909 +  // commit granularity to enforce the following condition.
   1.910 +  // Without this the is_available check will not work correctly.
   1.911 +  assert(_virtual_space.committed_size() == _virtual_space.actual_committed_size(),
   1.912 +      "The committed memory doesn't match the expanded memory.");
   1.913 +
   1.914 +  if (!is_available(chunk_word_size)) {
   1.915 +    if (TraceMetadataChunkAllocation) {
   1.916 +      gclog_or_tty->print("VirtualSpaceNode::take_from_committed() not available %d words ", chunk_word_size);
   1.917 +      // Dump some information about the virtual space that is nearly full
   1.918 +      print_on(gclog_or_tty);
   1.919 +    }
   1.920 +    return NULL;
   1.921 +  }
   1.922 +
   1.923 +  // Take the space  (bump top on the current virtual space).
   1.924 +  inc_top(chunk_word_size);
   1.925 +
   1.926 +  // Initialize the chunk
   1.927 +  Metachunk* result = ::new (chunk_limit) Metachunk(chunk_word_size, this);
   1.928 +  return result;
   1.929 +}
   1.930 +
   1.931 +
   1.932 +// Expand the virtual space (commit more of the reserved space)
   1.933 +bool VirtualSpaceNode::expand_by(size_t min_words, size_t preferred_words) {
   1.934 +  size_t min_bytes = min_words * BytesPerWord;
   1.935 +  size_t preferred_bytes = preferred_words * BytesPerWord;
   1.936 +
   1.937 +  size_t uncommitted = virtual_space()->reserved_size() - virtual_space()->actual_committed_size();
   1.938 +
   1.939 +  if (uncommitted < min_bytes) {
   1.940 +    return false;
   1.941 +  }
   1.942 +
   1.943 +  size_t commit = MIN2(preferred_bytes, uncommitted);
   1.944 +  bool result = virtual_space()->expand_by(commit, false);
   1.945 +
   1.946 +  assert(result, "Failed to commit memory");
   1.947 +
   1.948 +  return result;
   1.949 +}
   1.950 +
   1.951 +Metachunk* VirtualSpaceNode::get_chunk_vs(size_t chunk_word_size) {
   1.952 +  assert_lock_strong(SpaceManager::expand_lock());
   1.953 +  Metachunk* result = take_from_committed(chunk_word_size);
   1.954 +  if (result != NULL) {
   1.955 +    inc_container_count();
   1.956 +  }
   1.957 +  return result;
   1.958 +}
   1.959 +
   1.960 +bool VirtualSpaceNode::initialize() {
   1.961 +
   1.962 +  if (!_rs.is_reserved()) {
   1.963 +    return false;
   1.964 +  }
   1.965 +
   1.966 +  // These are necessary restriction to make sure that the virtual space always
   1.967 +  // grows in steps of Metaspace::commit_alignment(). If both base and size are
   1.968 +  // aligned only the middle alignment of the VirtualSpace is used.
   1.969 +  assert_is_ptr_aligned(_rs.base(), Metaspace::commit_alignment());
   1.970 +  assert_is_size_aligned(_rs.size(), Metaspace::commit_alignment());
   1.971 +
   1.972 +  // ReservedSpaces marked as special will have the entire memory
   1.973 +  // pre-committed. Setting a committed size will make sure that
   1.974 +  // committed_size and actual_committed_size agrees.
   1.975 +  size_t pre_committed_size = _rs.special() ? _rs.size() : 0;
   1.976 +
   1.977 +  bool result = virtual_space()->initialize_with_granularity(_rs, pre_committed_size,
   1.978 +                                            Metaspace::commit_alignment());
   1.979 +  if (result) {
   1.980 +    assert(virtual_space()->committed_size() == virtual_space()->actual_committed_size(),
   1.981 +        "Checking that the pre-committed memory was registered by the VirtualSpace");
   1.982 +
   1.983 +    set_top((MetaWord*)virtual_space()->low());
   1.984 +    set_reserved(MemRegion((HeapWord*)_rs.base(),
   1.985 +                 (HeapWord*)(_rs.base() + _rs.size())));
   1.986 +
   1.987 +    assert(reserved()->start() == (HeapWord*) _rs.base(),
   1.988 +      err_msg("Reserved start was not set properly " PTR_FORMAT
   1.989 +        " != " PTR_FORMAT, reserved()->start(), _rs.base()));
   1.990 +    assert(reserved()->word_size() == _rs.size() / BytesPerWord,
   1.991 +      err_msg("Reserved size was not set properly " SIZE_FORMAT
   1.992 +        " != " SIZE_FORMAT, reserved()->word_size(),
   1.993 +        _rs.size() / BytesPerWord));
   1.994 +  }
   1.995 +
   1.996 +  return result;
   1.997 +}
   1.998 +
   1.999 +void VirtualSpaceNode::print_on(outputStream* st) const {
  1.1000 +  size_t used = used_words_in_vs();
  1.1001 +  size_t capacity = capacity_words_in_vs();
  1.1002 +  VirtualSpace* vs = virtual_space();
  1.1003 +  st->print_cr("   space @ " PTR_FORMAT " " SIZE_FORMAT "K, %3d%% used "
  1.1004 +           "[" PTR_FORMAT ", " PTR_FORMAT ", "
  1.1005 +           PTR_FORMAT ", " PTR_FORMAT ")",
  1.1006 +           vs, capacity / K,
  1.1007 +           capacity == 0 ? 0 : used * 100 / capacity,
  1.1008 +           bottom(), top(), end(),
  1.1009 +           vs->high_boundary());
  1.1010 +}
  1.1011 +
  1.1012 +#ifdef ASSERT
  1.1013 +void VirtualSpaceNode::mangle() {
  1.1014 +  size_t word_size = capacity_words_in_vs();
  1.1015 +  Copy::fill_to_words((HeapWord*) low(), word_size, 0xf1f1f1f1);
  1.1016 +}
  1.1017 +#endif // ASSERT
  1.1018 +
  1.1019 +// VirtualSpaceList methods
  1.1020 +// Space allocated from the VirtualSpace
  1.1021 +
  1.1022 +VirtualSpaceList::~VirtualSpaceList() {
  1.1023 +  VirtualSpaceListIterator iter(virtual_space_list());
  1.1024 +  while (iter.repeat()) {
  1.1025 +    VirtualSpaceNode* vsl = iter.get_next();
  1.1026 +    delete vsl;
  1.1027 +  }
  1.1028 +}
  1.1029 +
  1.1030 +void VirtualSpaceList::inc_reserved_words(size_t v) {
  1.1031 +  assert_lock_strong(SpaceManager::expand_lock());
  1.1032 +  _reserved_words = _reserved_words + v;
  1.1033 +}
  1.1034 +void VirtualSpaceList::dec_reserved_words(size_t v) {
  1.1035 +  assert_lock_strong(SpaceManager::expand_lock());
  1.1036 +  _reserved_words = _reserved_words - v;
  1.1037 +}
  1.1038 +
  1.1039 +#define assert_committed_below_limit()                             \
  1.1040 +  assert(MetaspaceAux::committed_bytes() <= MaxMetaspaceSize,      \
  1.1041 +      err_msg("Too much committed memory. Committed: " SIZE_FORMAT \
  1.1042 +              " limit (MaxMetaspaceSize): " SIZE_FORMAT,           \
  1.1043 +          MetaspaceAux::committed_bytes(), MaxMetaspaceSize));
  1.1044 +
  1.1045 +void VirtualSpaceList::inc_committed_words(size_t v) {
  1.1046 +  assert_lock_strong(SpaceManager::expand_lock());
  1.1047 +  _committed_words = _committed_words + v;
  1.1048 +
  1.1049 +  assert_committed_below_limit();
  1.1050 +}
  1.1051 +void VirtualSpaceList::dec_committed_words(size_t v) {
  1.1052 +  assert_lock_strong(SpaceManager::expand_lock());
  1.1053 +  _committed_words = _committed_words - v;
  1.1054 +
  1.1055 +  assert_committed_below_limit();
  1.1056 +}
  1.1057 +
  1.1058 +void VirtualSpaceList::inc_virtual_space_count() {
  1.1059 +  assert_lock_strong(SpaceManager::expand_lock());
  1.1060 +  _virtual_space_count++;
  1.1061 +}
  1.1062 +void VirtualSpaceList::dec_virtual_space_count() {
  1.1063 +  assert_lock_strong(SpaceManager::expand_lock());
  1.1064 +  _virtual_space_count--;
  1.1065 +}
  1.1066 +
  1.1067 +void ChunkManager::remove_chunk(Metachunk* chunk) {
  1.1068 +  size_t word_size = chunk->word_size();
  1.1069 +  ChunkIndex index = list_index(word_size);
  1.1070 +  if (index != HumongousIndex) {
  1.1071 +    free_chunks(index)->remove_chunk(chunk);
  1.1072 +  } else {
  1.1073 +    humongous_dictionary()->remove_chunk(chunk);
  1.1074 +  }
  1.1075 +
  1.1076 +  // Chunk is being removed from the chunks free list.
  1.1077 +  dec_free_chunks_total(chunk->word_size());
  1.1078 +}
  1.1079 +
  1.1080 +// Walk the list of VirtualSpaceNodes and delete
  1.1081 +// nodes with a 0 container_count.  Remove Metachunks in
  1.1082 +// the node from their respective freelists.
  1.1083 +void VirtualSpaceList::purge(ChunkManager* chunk_manager) {
  1.1084 +  assert(SafepointSynchronize::is_at_safepoint(), "must be called at safepoint for contains to work");
  1.1085 +  assert_lock_strong(SpaceManager::expand_lock());
  1.1086 +  // Don't use a VirtualSpaceListIterator because this
  1.1087 +  // list is being changed and a straightforward use of an iterator is not safe.
  1.1088 +  VirtualSpaceNode* purged_vsl = NULL;
  1.1089 +  VirtualSpaceNode* prev_vsl = virtual_space_list();
  1.1090 +  VirtualSpaceNode* next_vsl = prev_vsl;
  1.1091 +  while (next_vsl != NULL) {
  1.1092 +    VirtualSpaceNode* vsl = next_vsl;
  1.1093 +    next_vsl = vsl->next();
  1.1094 +    // Don't free the current virtual space since it will likely
  1.1095 +    // be needed soon.
  1.1096 +    if (vsl->container_count() == 0 && vsl != current_virtual_space()) {
  1.1097 +      // Unlink it from the list
  1.1098 +      if (prev_vsl == vsl) {
  1.1099 +        // This is the case of the current node being the first node.
  1.1100 +        assert(vsl == virtual_space_list(), "Expected to be the first node");
  1.1101 +        set_virtual_space_list(vsl->next());
  1.1102 +      } else {
  1.1103 +        prev_vsl->set_next(vsl->next());
  1.1104 +      }
  1.1105 +
  1.1106 +      vsl->purge(chunk_manager);
  1.1107 +      dec_reserved_words(vsl->reserved_words());
  1.1108 +      dec_committed_words(vsl->committed_words());
  1.1109 +      dec_virtual_space_count();
  1.1110 +      purged_vsl = vsl;
  1.1111 +      delete vsl;
  1.1112 +    } else {
  1.1113 +      prev_vsl = vsl;
  1.1114 +    }
  1.1115 +  }
  1.1116 +#ifdef ASSERT
  1.1117 +  if (purged_vsl != NULL) {
  1.1118 +    // List should be stable enough to use an iterator here.
  1.1119 +    VirtualSpaceListIterator iter(virtual_space_list());
  1.1120 +    while (iter.repeat()) {
  1.1121 +      VirtualSpaceNode* vsl = iter.get_next();
  1.1122 +      assert(vsl != purged_vsl, "Purge of vsl failed");
  1.1123 +    }
  1.1124 +  }
  1.1125 +#endif
  1.1126 +}
  1.1127 +
  1.1128 +
  1.1129 +// This function looks at the mmap regions in the metaspace without locking.
  1.1130 +// The chunks are added with store ordering and not deleted except for at
  1.1131 +// unloading time during a safepoint.
  1.1132 +bool VirtualSpaceList::contains(const void* ptr) {
  1.1133 +  // List should be stable enough to use an iterator here because removing virtual
  1.1134 +  // space nodes is only allowed at a safepoint.
  1.1135 +  VirtualSpaceListIterator iter(virtual_space_list());
  1.1136 +  while (iter.repeat()) {
  1.1137 +    VirtualSpaceNode* vsn = iter.get_next();
  1.1138 +    if (vsn->contains(ptr)) {
  1.1139 +      return true;
  1.1140 +    }
  1.1141 +  }
  1.1142 +  return false;
  1.1143 +}
  1.1144 +
  1.1145 +void VirtualSpaceList::retire_current_virtual_space() {
  1.1146 +  assert_lock_strong(SpaceManager::expand_lock());
  1.1147 +
  1.1148 +  VirtualSpaceNode* vsn = current_virtual_space();
  1.1149 +
  1.1150 +  ChunkManager* cm = is_class() ? Metaspace::chunk_manager_class() :
  1.1151 +                                  Metaspace::chunk_manager_metadata();
  1.1152 +
  1.1153 +  vsn->retire(cm);
  1.1154 +}
  1.1155 +
  1.1156 +void VirtualSpaceNode::retire(ChunkManager* chunk_manager) {
  1.1157 +  for (int i = (int)MediumIndex; i >= (int)ZeroIndex; --i) {
  1.1158 +    ChunkIndex index = (ChunkIndex)i;
  1.1159 +    size_t chunk_size = chunk_manager->free_chunks(index)->size();
  1.1160 +
  1.1161 +    while (free_words_in_vs() >= chunk_size) {
  1.1162 +      DEBUG_ONLY(verify_container_count();)
  1.1163 +      Metachunk* chunk = get_chunk_vs(chunk_size);
  1.1164 +      assert(chunk != NULL, "allocation should have been successful");
  1.1165 +
  1.1166 +      chunk_manager->return_chunks(index, chunk);
  1.1167 +      chunk_manager->inc_free_chunks_total(chunk_size);
  1.1168 +      DEBUG_ONLY(verify_container_count();)
  1.1169 +    }
  1.1170 +  }
  1.1171 +  assert(free_words_in_vs() == 0, "should be empty now");
  1.1172 +}
  1.1173 +
  1.1174 +VirtualSpaceList::VirtualSpaceList(size_t word_size) :
  1.1175 +                                   _is_class(false),
  1.1176 +                                   _virtual_space_list(NULL),
  1.1177 +                                   _current_virtual_space(NULL),
  1.1178 +                                   _reserved_words(0),
  1.1179 +                                   _committed_words(0),
  1.1180 +                                   _virtual_space_count(0) {
  1.1181 +  MutexLockerEx cl(SpaceManager::expand_lock(),
  1.1182 +                   Mutex::_no_safepoint_check_flag);
  1.1183 +  create_new_virtual_space(word_size);
  1.1184 +}
  1.1185 +
  1.1186 +VirtualSpaceList::VirtualSpaceList(ReservedSpace rs) :
  1.1187 +                                   _is_class(true),
  1.1188 +                                   _virtual_space_list(NULL),
  1.1189 +                                   _current_virtual_space(NULL),
  1.1190 +                                   _reserved_words(0),
  1.1191 +                                   _committed_words(0),
  1.1192 +                                   _virtual_space_count(0) {
  1.1193 +  MutexLockerEx cl(SpaceManager::expand_lock(),
  1.1194 +                   Mutex::_no_safepoint_check_flag);
  1.1195 +  VirtualSpaceNode* class_entry = new VirtualSpaceNode(rs);
  1.1196 +  bool succeeded = class_entry->initialize();
  1.1197 +  if (succeeded) {
  1.1198 +    link_vs(class_entry);
  1.1199 +  }
  1.1200 +}
  1.1201 +
  1.1202 +size_t VirtualSpaceList::free_bytes() {
  1.1203 +  return virtual_space_list()->free_words_in_vs() * BytesPerWord;
  1.1204 +}
  1.1205 +
  1.1206 +// Allocate another meta virtual space and add it to the list.
  1.1207 +bool VirtualSpaceList::create_new_virtual_space(size_t vs_word_size) {
  1.1208 +  assert_lock_strong(SpaceManager::expand_lock());
  1.1209 +
  1.1210 +  if (is_class()) {
  1.1211 +    assert(false, "We currently don't support more than one VirtualSpace for"
  1.1212 +                  " the compressed class space. The initialization of the"
  1.1213 +                  " CCS uses another code path and should not hit this path.");
  1.1214 +    return false;
  1.1215 +  }
  1.1216 +
  1.1217 +  if (vs_word_size == 0) {
  1.1218 +    assert(false, "vs_word_size should always be at least _reserve_alignment large.");
  1.1219 +    return false;
  1.1220 +  }
  1.1221 +
  1.1222 +  // Reserve the space
  1.1223 +  size_t vs_byte_size = vs_word_size * BytesPerWord;
  1.1224 +  assert_is_size_aligned(vs_byte_size, Metaspace::reserve_alignment());
  1.1225 +
  1.1226 +  // Allocate the meta virtual space and initialize it.
  1.1227 +  VirtualSpaceNode* new_entry = new VirtualSpaceNode(vs_byte_size);
  1.1228 +  if (!new_entry->initialize()) {
  1.1229 +    delete new_entry;
  1.1230 +    return false;
  1.1231 +  } else {
  1.1232 +    assert(new_entry->reserved_words() == vs_word_size,
  1.1233 +        "Reserved memory size differs from requested memory size");
  1.1234 +    // ensure lock-free iteration sees fully initialized node
  1.1235 +    OrderAccess::storestore();
  1.1236 +    link_vs(new_entry);
  1.1237 +    return true;
  1.1238 +  }
  1.1239 +}
  1.1240 +
  1.1241 +void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry) {
  1.1242 +  if (virtual_space_list() == NULL) {
  1.1243 +      set_virtual_space_list(new_entry);
  1.1244 +  } else {
  1.1245 +    current_virtual_space()->set_next(new_entry);
  1.1246 +  }
  1.1247 +  set_current_virtual_space(new_entry);
  1.1248 +  inc_reserved_words(new_entry->reserved_words());
  1.1249 +  inc_committed_words(new_entry->committed_words());
  1.1250 +  inc_virtual_space_count();
  1.1251 +#ifdef ASSERT
  1.1252 +  new_entry->mangle();
  1.1253 +#endif
  1.1254 +  if (TraceMetavirtualspaceAllocation && Verbose) {
  1.1255 +    VirtualSpaceNode* vsl = current_virtual_space();
  1.1256 +    vsl->print_on(gclog_or_tty);
  1.1257 +  }
  1.1258 +}
  1.1259 +
  1.1260 +bool VirtualSpaceList::expand_node_by(VirtualSpaceNode* node,
  1.1261 +                                      size_t min_words,
  1.1262 +                                      size_t preferred_words) {
  1.1263 +  size_t before = node->committed_words();
  1.1264 +
  1.1265 +  bool result = node->expand_by(min_words, preferred_words);
  1.1266 +
  1.1267 +  size_t after = node->committed_words();
  1.1268 +
  1.1269 +  // after and before can be the same if the memory was pre-committed.
  1.1270 +  assert(after >= before, "Inconsistency");
  1.1271 +  inc_committed_words(after - before);
  1.1272 +
  1.1273 +  return result;
  1.1274 +}
  1.1275 +
  1.1276 +bool VirtualSpaceList::expand_by(size_t min_words, size_t preferred_words) {
  1.1277 +  assert_is_size_aligned(min_words,       Metaspace::commit_alignment_words());
  1.1278 +  assert_is_size_aligned(preferred_words, Metaspace::commit_alignment_words());
  1.1279 +  assert(min_words <= preferred_words, "Invalid arguments");
  1.1280 +
  1.1281 +  if (!MetaspaceGC::can_expand(min_words, this->is_class())) {
  1.1282 +    return  false;
  1.1283 +  }
  1.1284 +
  1.1285 +  size_t allowed_expansion_words = MetaspaceGC::allowed_expansion();
  1.1286 +  if (allowed_expansion_words < min_words) {
  1.1287 +    return false;
  1.1288 +  }
  1.1289 +
  1.1290 +  size_t max_expansion_words = MIN2(preferred_words, allowed_expansion_words);
  1.1291 +
  1.1292 +  // Commit more memory from the the current virtual space.
  1.1293 +  bool vs_expanded = expand_node_by(current_virtual_space(),
  1.1294 +                                    min_words,
  1.1295 +                                    max_expansion_words);
  1.1296 +  if (vs_expanded) {
  1.1297 +    return true;
  1.1298 +  }
  1.1299 +  retire_current_virtual_space();
  1.1300 +
  1.1301 +  // Get another virtual space.
  1.1302 +  size_t grow_vs_words = MAX2((size_t)VirtualSpaceSize, preferred_words);
  1.1303 +  grow_vs_words = align_size_up(grow_vs_words, Metaspace::reserve_alignment_words());
  1.1304 +
  1.1305 +  if (create_new_virtual_space(grow_vs_words)) {
  1.1306 +    if (current_virtual_space()->is_pre_committed()) {
  1.1307 +      // The memory was pre-committed, so we are done here.
  1.1308 +      assert(min_words <= current_virtual_space()->committed_words(),
  1.1309 +          "The new VirtualSpace was pre-committed, so it"
  1.1310 +          "should be large enough to fit the alloc request.");
  1.1311 +      return true;
  1.1312 +    }
  1.1313 +
  1.1314 +    return expand_node_by(current_virtual_space(),
  1.1315 +                          min_words,
  1.1316 +                          max_expansion_words);
  1.1317 +  }
  1.1318 +
  1.1319 +  return false;
  1.1320 +}
  1.1321 +
  1.1322 +Metachunk* VirtualSpaceList::get_new_chunk(size_t word_size,
  1.1323 +                                           size_t grow_chunks_by_words,
  1.1324 +                                           size_t medium_chunk_bunch) {
  1.1325 +
  1.1326 +  // Allocate a chunk out of the current virtual space.
  1.1327 +  Metachunk* next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
  1.1328 +
  1.1329 +  if (next != NULL) {
  1.1330 +    return next;
  1.1331 +  }
  1.1332 +
  1.1333 +  // The expand amount is currently only determined by the requested sizes
  1.1334 +  // and not how much committed memory is left in the current virtual space.
  1.1335 +
  1.1336 +  size_t min_word_size       = align_size_up(grow_chunks_by_words, Metaspace::commit_alignment_words());
  1.1337 +  size_t preferred_word_size = align_size_up(medium_chunk_bunch,   Metaspace::commit_alignment_words());
  1.1338 +  if (min_word_size >= preferred_word_size) {
  1.1339 +    // Can happen when humongous chunks are allocated.
  1.1340 +    preferred_word_size = min_word_size;
  1.1341 +  }
  1.1342 +
  1.1343 +  bool expanded = expand_by(min_word_size, preferred_word_size);
  1.1344 +  if (expanded) {
  1.1345 +    next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
  1.1346 +    assert(next != NULL, "The allocation was expected to succeed after the expansion");
  1.1347 +  }
  1.1348 +
  1.1349 +   return next;
  1.1350 +}
  1.1351 +
  1.1352 +void VirtualSpaceList::print_on(outputStream* st) const {
  1.1353 +  if (TraceMetadataChunkAllocation && Verbose) {
  1.1354 +    VirtualSpaceListIterator iter(virtual_space_list());
  1.1355 +    while (iter.repeat()) {
  1.1356 +      VirtualSpaceNode* node = iter.get_next();
  1.1357 +      node->print_on(st);
  1.1358 +    }
  1.1359 +  }
  1.1360 +}
  1.1361 +
  1.1362 +// MetaspaceGC methods
  1.1363 +
  1.1364 +// VM_CollectForMetadataAllocation is the vm operation used to GC.
  1.1365 +// Within the VM operation after the GC the attempt to allocate the metadata
  1.1366 +// should succeed.  If the GC did not free enough space for the metaspace
  1.1367 +// allocation, the HWM is increased so that another virtualspace will be
  1.1368 +// allocated for the metadata.  With perm gen the increase in the perm
  1.1369 +// gen had bounds, MinMetaspaceExpansion and MaxMetaspaceExpansion.  The
  1.1370 +// metaspace policy uses those as the small and large steps for the HWM.
  1.1371 +//
  1.1372 +// After the GC the compute_new_size() for MetaspaceGC is called to
  1.1373 +// resize the capacity of the metaspaces.  The current implementation
  1.1374 +// is based on the flags MinMetaspaceFreeRatio and MaxMetaspaceFreeRatio used
  1.1375 +// to resize the Java heap by some GC's.  New flags can be implemented
  1.1376 +// if really needed.  MinMetaspaceFreeRatio is used to calculate how much
  1.1377 +// free space is desirable in the metaspace capacity to decide how much
  1.1378 +// to increase the HWM.  MaxMetaspaceFreeRatio is used to decide how much
  1.1379 +// free space is desirable in the metaspace capacity before decreasing
  1.1380 +// the HWM.
  1.1381 +
  1.1382 +// Calculate the amount to increase the high water mark (HWM).
  1.1383 +// Increase by a minimum amount (MinMetaspaceExpansion) so that
  1.1384 +// another expansion is not requested too soon.  If that is not
  1.1385 +// enough to satisfy the allocation, increase by MaxMetaspaceExpansion.
  1.1386 +// If that is still not enough, expand by the size of the allocation
  1.1387 +// plus some.
  1.1388 +size_t MetaspaceGC::delta_capacity_until_GC(size_t bytes) {
  1.1389 +  size_t min_delta = MinMetaspaceExpansion;
  1.1390 +  size_t max_delta = MaxMetaspaceExpansion;
  1.1391 +  size_t delta = align_size_up(bytes, Metaspace::commit_alignment());
  1.1392 +
  1.1393 +  if (delta <= min_delta) {
  1.1394 +    delta = min_delta;
  1.1395 +  } else if (delta <= max_delta) {
  1.1396 +    // Don't want to hit the high water mark on the next
  1.1397 +    // allocation so make the delta greater than just enough
  1.1398 +    // for this allocation.
  1.1399 +    delta = max_delta;
  1.1400 +  } else {
  1.1401 +    // This allocation is large but the next ones are probably not
  1.1402 +    // so increase by the minimum.
  1.1403 +    delta = delta + min_delta;
  1.1404 +  }
  1.1405 +
  1.1406 +  assert_is_size_aligned(delta, Metaspace::commit_alignment());
  1.1407 +
  1.1408 +  return delta;
  1.1409 +}
  1.1410 +
  1.1411 +size_t MetaspaceGC::capacity_until_GC() {
  1.1412 +  size_t value = (size_t)OrderAccess::load_ptr_acquire(&_capacity_until_GC);
  1.1413 +  assert(value >= MetaspaceSize, "Not initialied properly?");
  1.1414 +  return value;
  1.1415 +}
  1.1416 +
  1.1417 +size_t MetaspaceGC::inc_capacity_until_GC(size_t v) {
  1.1418 +  assert_is_size_aligned(v, Metaspace::commit_alignment());
  1.1419 +
  1.1420 +  return (size_t)Atomic::add_ptr(v, &_capacity_until_GC);
  1.1421 +}
  1.1422 +
  1.1423 +size_t MetaspaceGC::dec_capacity_until_GC(size_t v) {
  1.1424 +  assert_is_size_aligned(v, Metaspace::commit_alignment());
  1.1425 +
  1.1426 +  return (size_t)Atomic::add_ptr(-(intptr_t)v, &_capacity_until_GC);
  1.1427 +}
  1.1428 +
  1.1429 +void MetaspaceGC::initialize() {
  1.1430 +  // Set the high-water mark to MaxMetapaceSize during VM initializaton since
  1.1431 +  // we can't do a GC during initialization.
  1.1432 +  _capacity_until_GC = MaxMetaspaceSize;
  1.1433 +}
  1.1434 +
  1.1435 +void MetaspaceGC::post_initialize() {
  1.1436 +  // Reset the high-water mark once the VM initialization is done.
  1.1437 +  _capacity_until_GC = MAX2(MetaspaceAux::committed_bytes(), MetaspaceSize);
  1.1438 +}
  1.1439 +
  1.1440 +bool MetaspaceGC::can_expand(size_t word_size, bool is_class) {
  1.1441 +  // Check if the compressed class space is full.
  1.1442 +  if (is_class && Metaspace::using_class_space()) {
  1.1443 +    size_t class_committed = MetaspaceAux::committed_bytes(Metaspace::ClassType);
  1.1444 +    if (class_committed + word_size * BytesPerWord > CompressedClassSpaceSize) {
  1.1445 +      return false;
  1.1446 +    }
  1.1447 +  }
  1.1448 +
  1.1449 +  // Check if the user has imposed a limit on the metaspace memory.
  1.1450 +  size_t committed_bytes = MetaspaceAux::committed_bytes();
  1.1451 +  if (committed_bytes + word_size * BytesPerWord > MaxMetaspaceSize) {
  1.1452 +    return false;
  1.1453 +  }
  1.1454 +
  1.1455 +  return true;
  1.1456 +}
  1.1457 +
  1.1458 +size_t MetaspaceGC::allowed_expansion() {
  1.1459 +  size_t committed_bytes = MetaspaceAux::committed_bytes();
  1.1460 +  size_t capacity_until_gc = capacity_until_GC();
  1.1461 +
  1.1462 +  assert(capacity_until_gc >= committed_bytes,
  1.1463 +        err_msg("capacity_until_gc: " SIZE_FORMAT " < committed_bytes: " SIZE_FORMAT,
  1.1464 +                capacity_until_gc, committed_bytes));
  1.1465 +
  1.1466 +  size_t left_until_max  = MaxMetaspaceSize - committed_bytes;
  1.1467 +  size_t left_until_GC = capacity_until_gc - committed_bytes;
  1.1468 +  size_t left_to_commit = MIN2(left_until_GC, left_until_max);
  1.1469 +
  1.1470 +  return left_to_commit / BytesPerWord;
  1.1471 +}
  1.1472 +
  1.1473 +void MetaspaceGC::compute_new_size() {
  1.1474 +  assert(_shrink_factor <= 100, "invalid shrink factor");
  1.1475 +  uint current_shrink_factor = _shrink_factor;
  1.1476 +  _shrink_factor = 0;
  1.1477 +
  1.1478 +  // Using committed_bytes() for used_after_gc is an overestimation, since the
  1.1479 +  // chunk free lists are included in committed_bytes() and the memory in an
  1.1480 +  // un-fragmented chunk free list is available for future allocations.
  1.1481 +  // However, if the chunk free lists becomes fragmented, then the memory may
  1.1482 +  // not be available for future allocations and the memory is therefore "in use".
  1.1483 +  // Including the chunk free lists in the definition of "in use" is therefore
  1.1484 +  // necessary. Not including the chunk free lists can cause capacity_until_GC to
  1.1485 +  // shrink below committed_bytes() and this has caused serious bugs in the past.
  1.1486 +  const size_t used_after_gc = MetaspaceAux::committed_bytes();
  1.1487 +  const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC();
  1.1488 +
  1.1489 +  const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0;
  1.1490 +  const double maximum_used_percentage = 1.0 - minimum_free_percentage;
  1.1491 +
  1.1492 +  const double min_tmp = used_after_gc / maximum_used_percentage;
  1.1493 +  size_t minimum_desired_capacity =
  1.1494 +    (size_t)MIN2(min_tmp, double(max_uintx));
  1.1495 +  // Don't shrink less than the initial generation size
  1.1496 +  minimum_desired_capacity = MAX2(minimum_desired_capacity,
  1.1497 +                                  MetaspaceSize);
  1.1498 +
  1.1499 +  if (PrintGCDetails && Verbose) {
  1.1500 +    gclog_or_tty->print_cr("\nMetaspaceGC::compute_new_size: ");
  1.1501 +    gclog_or_tty->print_cr("  "
  1.1502 +                  "  minimum_free_percentage: %6.2f"
  1.1503 +                  "  maximum_used_percentage: %6.2f",
  1.1504 +                  minimum_free_percentage,
  1.1505 +                  maximum_used_percentage);
  1.1506 +    gclog_or_tty->print_cr("  "
  1.1507 +                  "   used_after_gc       : %6.1fKB",
  1.1508 +                  used_after_gc / (double) K);
  1.1509 +  }
  1.1510 +
  1.1511 +
  1.1512 +  size_t shrink_bytes = 0;
  1.1513 +  if (capacity_until_GC < minimum_desired_capacity) {
  1.1514 +    // If we have less capacity below the metaspace HWM, then
  1.1515 +    // increment the HWM.
  1.1516 +    size_t expand_bytes = minimum_desired_capacity - capacity_until_GC;
  1.1517 +    expand_bytes = align_size_up(expand_bytes, Metaspace::commit_alignment());
  1.1518 +    // Don't expand unless it's significant
  1.1519 +    if (expand_bytes >= MinMetaspaceExpansion) {
  1.1520 +      size_t new_capacity_until_GC = MetaspaceGC::inc_capacity_until_GC(expand_bytes);
  1.1521 +      Metaspace::tracer()->report_gc_threshold(capacity_until_GC,
  1.1522 +                                               new_capacity_until_GC,
  1.1523 +                                               MetaspaceGCThresholdUpdater::ComputeNewSize);
  1.1524 +      if (PrintGCDetails && Verbose) {
  1.1525 +        gclog_or_tty->print_cr("    expanding:"
  1.1526 +                      "  minimum_desired_capacity: %6.1fKB"
  1.1527 +                      "  expand_bytes: %6.1fKB"
  1.1528 +                      "  MinMetaspaceExpansion: %6.1fKB"
  1.1529 +                      "  new metaspace HWM:  %6.1fKB",
  1.1530 +                      minimum_desired_capacity / (double) K,
  1.1531 +                      expand_bytes / (double) K,
  1.1532 +                      MinMetaspaceExpansion / (double) K,
  1.1533 +                      new_capacity_until_GC / (double) K);
  1.1534 +      }
  1.1535 +    }
  1.1536 +    return;
  1.1537 +  }
  1.1538 +
  1.1539 +  // No expansion, now see if we want to shrink
  1.1540 +  // We would never want to shrink more than this
  1.1541 +  size_t max_shrink_bytes = capacity_until_GC - minimum_desired_capacity;
  1.1542 +  assert(max_shrink_bytes >= 0, err_msg("max_shrink_bytes " SIZE_FORMAT,
  1.1543 +    max_shrink_bytes));
  1.1544 +
  1.1545 +  // Should shrinking be considered?
  1.1546 +  if (MaxMetaspaceFreeRatio < 100) {
  1.1547 +    const double maximum_free_percentage = MaxMetaspaceFreeRatio / 100.0;
  1.1548 +    const double minimum_used_percentage = 1.0 - maximum_free_percentage;
  1.1549 +    const double max_tmp = used_after_gc / minimum_used_percentage;
  1.1550 +    size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx));
  1.1551 +    maximum_desired_capacity = MAX2(maximum_desired_capacity,
  1.1552 +                                    MetaspaceSize);
  1.1553 +    if (PrintGCDetails && Verbose) {
  1.1554 +      gclog_or_tty->print_cr("  "
  1.1555 +                             "  maximum_free_percentage: %6.2f"
  1.1556 +                             "  minimum_used_percentage: %6.2f",
  1.1557 +                             maximum_free_percentage,
  1.1558 +                             minimum_used_percentage);
  1.1559 +      gclog_or_tty->print_cr("  "
  1.1560 +                             "  minimum_desired_capacity: %6.1fKB"
  1.1561 +                             "  maximum_desired_capacity: %6.1fKB",
  1.1562 +                             minimum_desired_capacity / (double) K,
  1.1563 +                             maximum_desired_capacity / (double) K);
  1.1564 +    }
  1.1565 +
  1.1566 +    assert(minimum_desired_capacity <= maximum_desired_capacity,
  1.1567 +           "sanity check");
  1.1568 +
  1.1569 +    if (capacity_until_GC > maximum_desired_capacity) {
  1.1570 +      // Capacity too large, compute shrinking size
  1.1571 +      shrink_bytes = capacity_until_GC - maximum_desired_capacity;
  1.1572 +      // We don't want shrink all the way back to initSize if people call
  1.1573 +      // System.gc(), because some programs do that between "phases" and then
  1.1574 +      // we'd just have to grow the heap up again for the next phase.  So we
  1.1575 +      // damp the shrinking: 0% on the first call, 10% on the second call, 40%
  1.1576 +      // on the third call, and 100% by the fourth call.  But if we recompute
  1.1577 +      // size without shrinking, it goes back to 0%.
  1.1578 +      shrink_bytes = shrink_bytes / 100 * current_shrink_factor;
  1.1579 +
  1.1580 +      shrink_bytes = align_size_down(shrink_bytes, Metaspace::commit_alignment());
  1.1581 +
  1.1582 +      assert(shrink_bytes <= max_shrink_bytes,
  1.1583 +        err_msg("invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT,
  1.1584 +          shrink_bytes, max_shrink_bytes));
  1.1585 +      if (current_shrink_factor == 0) {
  1.1586 +        _shrink_factor = 10;
  1.1587 +      } else {
  1.1588 +        _shrink_factor = MIN2(current_shrink_factor * 4, (uint) 100);
  1.1589 +      }
  1.1590 +      if (PrintGCDetails && Verbose) {
  1.1591 +        gclog_or_tty->print_cr("  "
  1.1592 +                      "  shrinking:"
  1.1593 +                      "  initSize: %.1fK"
  1.1594 +                      "  maximum_desired_capacity: %.1fK",
  1.1595 +                      MetaspaceSize / (double) K,
  1.1596 +                      maximum_desired_capacity / (double) K);
  1.1597 +        gclog_or_tty->print_cr("  "
  1.1598 +                      "  shrink_bytes: %.1fK"
  1.1599 +                      "  current_shrink_factor: %d"
  1.1600 +                      "  new shrink factor: %d"
  1.1601 +                      "  MinMetaspaceExpansion: %.1fK",
  1.1602 +                      shrink_bytes / (double) K,
  1.1603 +                      current_shrink_factor,
  1.1604 +                      _shrink_factor,
  1.1605 +                      MinMetaspaceExpansion / (double) K);
  1.1606 +      }
  1.1607 +    }
  1.1608 +  }
  1.1609 +
  1.1610 +  // Don't shrink unless it's significant
  1.1611 +  if (shrink_bytes >= MinMetaspaceExpansion &&
  1.1612 +      ((capacity_until_GC - shrink_bytes) >= MetaspaceSize)) {
  1.1613 +    size_t new_capacity_until_GC = MetaspaceGC::dec_capacity_until_GC(shrink_bytes);
  1.1614 +    Metaspace::tracer()->report_gc_threshold(capacity_until_GC,
  1.1615 +                                             new_capacity_until_GC,
  1.1616 +                                             MetaspaceGCThresholdUpdater::ComputeNewSize);
  1.1617 +  }
  1.1618 +}
  1.1619 +
  1.1620 +// Metadebug methods
  1.1621 +
  1.1622 +void Metadebug::init_allocation_fail_alot_count() {
  1.1623 +  if (MetadataAllocationFailALot) {
  1.1624 +    _allocation_fail_alot_count =
  1.1625 +      1+(long)((double)MetadataAllocationFailALotInterval*os::random()/(max_jint+1.0));
  1.1626 +  }
  1.1627 +}
  1.1628 +
  1.1629 +#ifdef ASSERT
  1.1630 +bool Metadebug::test_metadata_failure() {
  1.1631 +  if (MetadataAllocationFailALot &&
  1.1632 +      Threads::is_vm_complete()) {
  1.1633 +    if (_allocation_fail_alot_count > 0) {
  1.1634 +      _allocation_fail_alot_count--;
  1.1635 +    } else {
  1.1636 +      if (TraceMetadataChunkAllocation && Verbose) {
  1.1637 +        gclog_or_tty->print_cr("Metadata allocation failing for "
  1.1638 +                               "MetadataAllocationFailALot");
  1.1639 +      }
  1.1640 +      init_allocation_fail_alot_count();
  1.1641 +      return true;
  1.1642 +    }
  1.1643 +  }
  1.1644 +  return false;
  1.1645 +}
  1.1646 +#endif
  1.1647 +
  1.1648 +// ChunkManager methods
  1.1649 +
  1.1650 +size_t ChunkManager::free_chunks_total_words() {
  1.1651 +  return _free_chunks_total;
  1.1652 +}
  1.1653 +
  1.1654 +size_t ChunkManager::free_chunks_total_bytes() {
  1.1655 +  return free_chunks_total_words() * BytesPerWord;
  1.1656 +}
  1.1657 +
  1.1658 +size_t ChunkManager::free_chunks_count() {
  1.1659 +#ifdef ASSERT
  1.1660 +  if (!UseConcMarkSweepGC && !SpaceManager::expand_lock()->is_locked()) {
  1.1661 +    MutexLockerEx cl(SpaceManager::expand_lock(),
  1.1662 +                     Mutex::_no_safepoint_check_flag);
  1.1663 +    // This lock is only needed in debug because the verification
  1.1664 +    // of the _free_chunks_totals walks the list of free chunks
  1.1665 +    slow_locked_verify_free_chunks_count();
  1.1666 +  }
  1.1667 +#endif
  1.1668 +  return _free_chunks_count;
  1.1669 +}
  1.1670 +
  1.1671 +void ChunkManager::locked_verify_free_chunks_total() {
  1.1672 +  assert_lock_strong(SpaceManager::expand_lock());
  1.1673 +  assert(sum_free_chunks() == _free_chunks_total,
  1.1674 +    err_msg("_free_chunks_total " SIZE_FORMAT " is not the"
  1.1675 +           " same as sum " SIZE_FORMAT, _free_chunks_total,
  1.1676 +           sum_free_chunks()));
  1.1677 +}
  1.1678 +
  1.1679 +void ChunkManager::verify_free_chunks_total() {
  1.1680 +  MutexLockerEx cl(SpaceManager::expand_lock(),
  1.1681 +                     Mutex::_no_safepoint_check_flag);
  1.1682 +  locked_verify_free_chunks_total();
  1.1683 +}
  1.1684 +
  1.1685 +void ChunkManager::locked_verify_free_chunks_count() {
  1.1686 +  assert_lock_strong(SpaceManager::expand_lock());
  1.1687 +  assert(sum_free_chunks_count() == _free_chunks_count,
  1.1688 +    err_msg("_free_chunks_count " SIZE_FORMAT " is not the"
  1.1689 +           " same as sum " SIZE_FORMAT, _free_chunks_count,
  1.1690 +           sum_free_chunks_count()));
  1.1691 +}
  1.1692 +
  1.1693 +void ChunkManager::verify_free_chunks_count() {
  1.1694 +#ifdef ASSERT
  1.1695 +  MutexLockerEx cl(SpaceManager::expand_lock(),
  1.1696 +                     Mutex::_no_safepoint_check_flag);
  1.1697 +  locked_verify_free_chunks_count();
  1.1698 +#endif
  1.1699 +}
  1.1700 +
  1.1701 +void ChunkManager::verify() {
  1.1702 +  MutexLockerEx cl(SpaceManager::expand_lock(),
  1.1703 +                     Mutex::_no_safepoint_check_flag);
  1.1704 +  locked_verify();
  1.1705 +}
  1.1706 +
  1.1707 +void ChunkManager::locked_verify() {
  1.1708 +  locked_verify_free_chunks_count();
  1.1709 +  locked_verify_free_chunks_total();
  1.1710 +}
  1.1711 +
  1.1712 +void ChunkManager::locked_print_free_chunks(outputStream* st) {
  1.1713 +  assert_lock_strong(SpaceManager::expand_lock());
  1.1714 +  st->print_cr("Free chunk total " SIZE_FORMAT "  count " SIZE_FORMAT,
  1.1715 +                _free_chunks_total, _free_chunks_count);
  1.1716 +}
  1.1717 +
  1.1718 +void ChunkManager::locked_print_sum_free_chunks(outputStream* st) {
  1.1719 +  assert_lock_strong(SpaceManager::expand_lock());
  1.1720 +  st->print_cr("Sum free chunk total " SIZE_FORMAT "  count " SIZE_FORMAT,
  1.1721 +                sum_free_chunks(), sum_free_chunks_count());
  1.1722 +}
  1.1723 +ChunkList* ChunkManager::free_chunks(ChunkIndex index) {
  1.1724 +  return &_free_chunks[index];
  1.1725 +}
  1.1726 +
  1.1727 +// These methods that sum the free chunk lists are used in printing
  1.1728 +// methods that are used in product builds.
  1.1729 +size_t ChunkManager::sum_free_chunks() {
  1.1730 +  assert_lock_strong(SpaceManager::expand_lock());
  1.1731 +  size_t result = 0;
  1.1732 +  for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
  1.1733 +    ChunkList* list = free_chunks(i);
  1.1734 +
  1.1735 +    if (list == NULL) {
  1.1736 +      continue;
  1.1737 +    }
  1.1738 +
  1.1739 +    result = result + list->count() * list->size();
  1.1740 +  }
  1.1741 +  result = result + humongous_dictionary()->total_size();
  1.1742 +  return result;
  1.1743 +}
  1.1744 +
  1.1745 +size_t ChunkManager::sum_free_chunks_count() {
  1.1746 +  assert_lock_strong(SpaceManager::expand_lock());
  1.1747 +  size_t count = 0;
  1.1748 +  for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
  1.1749 +    ChunkList* list = free_chunks(i);
  1.1750 +    if (list == NULL) {
  1.1751 +      continue;
  1.1752 +    }
  1.1753 +    count = count + list->count();
  1.1754 +  }
  1.1755 +  count = count + humongous_dictionary()->total_free_blocks();
  1.1756 +  return count;
  1.1757 +}
  1.1758 +
  1.1759 +ChunkList* ChunkManager::find_free_chunks_list(size_t word_size) {
  1.1760 +  ChunkIndex index = list_index(word_size);
  1.1761 +  assert(index < HumongousIndex, "No humongous list");
  1.1762 +  return free_chunks(index);
  1.1763 +}
  1.1764 +
  1.1765 +Metachunk* ChunkManager::free_chunks_get(size_t word_size) {
  1.1766 +  assert_lock_strong(SpaceManager::expand_lock());
  1.1767 +
  1.1768 +  slow_locked_verify();
  1.1769 +
  1.1770 +  Metachunk* chunk = NULL;
  1.1771 +  if (list_index(word_size) != HumongousIndex) {
  1.1772 +    ChunkList* free_list = find_free_chunks_list(word_size);
  1.1773 +    assert(free_list != NULL, "Sanity check");
  1.1774 +
  1.1775 +    chunk = free_list->head();
  1.1776 +
  1.1777 +    if (chunk == NULL) {
  1.1778 +      return NULL;
  1.1779 +    }
  1.1780 +
  1.1781 +    // Remove the chunk as the head of the list.
  1.1782 +    free_list->remove_chunk(chunk);
  1.1783 +
  1.1784 +    if (TraceMetadataChunkAllocation && Verbose) {
  1.1785 +      gclog_or_tty->print_cr("ChunkManager::free_chunks_get: free_list "
  1.1786 +                             PTR_FORMAT " head " PTR_FORMAT " size " SIZE_FORMAT,
  1.1787 +                             free_list, chunk, chunk->word_size());
  1.1788 +    }
  1.1789 +  } else {
  1.1790 +    chunk = humongous_dictionary()->get_chunk(
  1.1791 +      word_size,
  1.1792 +      FreeBlockDictionary<Metachunk>::atLeast);
  1.1793 +
  1.1794 +    if (chunk == NULL) {
  1.1795 +      return NULL;
  1.1796 +    }
  1.1797 +
  1.1798 +    if (TraceMetadataHumongousAllocation) {
  1.1799 +      size_t waste = chunk->word_size() - word_size;
  1.1800 +      gclog_or_tty->print_cr("Free list allocate humongous chunk size "
  1.1801 +                             SIZE_FORMAT " for requested size " SIZE_FORMAT
  1.1802 +                             " waste " SIZE_FORMAT,
  1.1803 +                             chunk->word_size(), word_size, waste);
  1.1804 +    }
  1.1805 +  }
  1.1806 +
  1.1807 +  // Chunk is being removed from the chunks free list.
  1.1808 +  dec_free_chunks_total(chunk->word_size());
  1.1809 +
  1.1810 +  // Remove it from the links to this freelist
  1.1811 +  chunk->set_next(NULL);
  1.1812 +  chunk->set_prev(NULL);
  1.1813 +#ifdef ASSERT
  1.1814 +  // Chunk is no longer on any freelist. Setting to false make container_count_slow()
  1.1815 +  // work.
  1.1816 +  chunk->set_is_tagged_free(false);
  1.1817 +#endif
  1.1818 +  chunk->container()->inc_container_count();
  1.1819 +
  1.1820 +  slow_locked_verify();
  1.1821 +  return chunk;
  1.1822 +}
  1.1823 +
  1.1824 +Metachunk* ChunkManager::chunk_freelist_allocate(size_t word_size) {
  1.1825 +  assert_lock_strong(SpaceManager::expand_lock());
  1.1826 +  slow_locked_verify();
  1.1827 +
  1.1828 +  // Take from the beginning of the list
  1.1829 +  Metachunk* chunk = free_chunks_get(word_size);
  1.1830 +  if (chunk == NULL) {
  1.1831 +    return NULL;
  1.1832 +  }
  1.1833 +
  1.1834 +  assert((word_size <= chunk->word_size()) ||
  1.1835 +         list_index(chunk->word_size() == HumongousIndex),
  1.1836 +         "Non-humongous variable sized chunk");
  1.1837 +  if (TraceMetadataChunkAllocation) {
  1.1838 +    size_t list_count;
  1.1839 +    if (list_index(word_size) < HumongousIndex) {
  1.1840 +      ChunkList* list = find_free_chunks_list(word_size);
  1.1841 +      list_count = list->count();
  1.1842 +    } else {
  1.1843 +      list_count = humongous_dictionary()->total_count();
  1.1844 +    }
  1.1845 +    gclog_or_tty->print("ChunkManager::chunk_freelist_allocate: " PTR_FORMAT " chunk "
  1.1846 +                        PTR_FORMAT "  size " SIZE_FORMAT " count " SIZE_FORMAT " ",
  1.1847 +                        this, chunk, chunk->word_size(), list_count);
  1.1848 +    locked_print_free_chunks(gclog_or_tty);
  1.1849 +  }
  1.1850 +
  1.1851 +  return chunk;
  1.1852 +}
  1.1853 +
  1.1854 +void ChunkManager::print_on(outputStream* out) const {
  1.1855 +  if (PrintFLSStatistics != 0) {
  1.1856 +    const_cast<ChunkManager *>(this)->humongous_dictionary()->report_statistics();
  1.1857 +  }
  1.1858 +}
  1.1859 +
  1.1860 +// SpaceManager methods
  1.1861 +
  1.1862 +void SpaceManager::get_initial_chunk_sizes(Metaspace::MetaspaceType type,
  1.1863 +                                           size_t* chunk_word_size,
  1.1864 +                                           size_t* class_chunk_word_size) {
  1.1865 +  switch (type) {
  1.1866 +  case Metaspace::BootMetaspaceType:
  1.1867 +    *chunk_word_size = Metaspace::first_chunk_word_size();
  1.1868 +    *class_chunk_word_size = Metaspace::first_class_chunk_word_size();
  1.1869 +    break;
  1.1870 +  case Metaspace::ROMetaspaceType:
  1.1871 +    *chunk_word_size = SharedReadOnlySize / wordSize;
  1.1872 +    *class_chunk_word_size = ClassSpecializedChunk;
  1.1873 +    break;
  1.1874 +  case Metaspace::ReadWriteMetaspaceType:
  1.1875 +    *chunk_word_size = SharedReadWriteSize / wordSize;
  1.1876 +    *class_chunk_word_size = ClassSpecializedChunk;
  1.1877 +    break;
  1.1878 +  case Metaspace::AnonymousMetaspaceType:
  1.1879 +  case Metaspace::ReflectionMetaspaceType:
  1.1880 +    *chunk_word_size = SpecializedChunk;
  1.1881 +    *class_chunk_word_size = ClassSpecializedChunk;
  1.1882 +    break;
  1.1883 +  default:
  1.1884 +    *chunk_word_size = SmallChunk;
  1.1885 +    *class_chunk_word_size = ClassSmallChunk;
  1.1886 +    break;
  1.1887 +  }
  1.1888 +  assert(*chunk_word_size != 0 && *class_chunk_word_size != 0,
  1.1889 +    err_msg("Initial chunks sizes bad: data  " SIZE_FORMAT
  1.1890 +            " class " SIZE_FORMAT,
  1.1891 +            *chunk_word_size, *class_chunk_word_size));
  1.1892 +}
  1.1893 +
  1.1894 +size_t SpaceManager::sum_free_in_chunks_in_use() const {
  1.1895 +  MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
  1.1896 +  size_t free = 0;
  1.1897 +  for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
  1.1898 +    Metachunk* chunk = chunks_in_use(i);
  1.1899 +    while (chunk != NULL) {
  1.1900 +      free += chunk->free_word_size();
  1.1901 +      chunk = chunk->next();
  1.1902 +    }
  1.1903 +  }
  1.1904 +  return free;
  1.1905 +}
  1.1906 +
  1.1907 +size_t SpaceManager::sum_waste_in_chunks_in_use() const {
  1.1908 +  MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
  1.1909 +  size_t result = 0;
  1.1910 +  for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
  1.1911 +   result += sum_waste_in_chunks_in_use(i);
  1.1912 +  }
  1.1913 +
  1.1914 +  return result;
  1.1915 +}
  1.1916 +
  1.1917 +size_t SpaceManager::sum_waste_in_chunks_in_use(ChunkIndex index) const {
  1.1918 +  size_t result = 0;
  1.1919 +  Metachunk* chunk = chunks_in_use(index);
  1.1920 +  // Count the free space in all the chunk but not the
  1.1921 +  // current chunk from which allocations are still being done.
  1.1922 +  while (chunk != NULL) {
  1.1923 +    if (chunk != current_chunk()) {
  1.1924 +      result += chunk->free_word_size();
  1.1925 +    }
  1.1926 +    chunk = chunk->next();
  1.1927 +  }
  1.1928 +  return result;
  1.1929 +}
  1.1930 +
  1.1931 +size_t SpaceManager::sum_capacity_in_chunks_in_use() const {
  1.1932 +  // For CMS use "allocated_chunks_words()" which does not need the
  1.1933 +  // Metaspace lock.  For the other collectors sum over the
  1.1934 +  // lists.  Use both methods as a check that "allocated_chunks_words()"
  1.1935 +  // is correct.  That is, sum_capacity_in_chunks() is too expensive
  1.1936 +  // to use in the product and allocated_chunks_words() should be used
  1.1937 +  // but allow for  checking that allocated_chunks_words() returns the same
  1.1938 +  // value as sum_capacity_in_chunks_in_use() which is the definitive
  1.1939 +  // answer.
  1.1940 +  if (UseConcMarkSweepGC) {
  1.1941 +    return allocated_chunks_words();
  1.1942 +  } else {
  1.1943 +    MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
  1.1944 +    size_t sum = 0;
  1.1945 +    for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
  1.1946 +      Metachunk* chunk = chunks_in_use(i);
  1.1947 +      while (chunk != NULL) {
  1.1948 +        sum += chunk->word_size();
  1.1949 +        chunk = chunk->next();
  1.1950 +      }
  1.1951 +    }
  1.1952 +  return sum;
  1.1953 +  }
  1.1954 +}
  1.1955 +
  1.1956 +size_t SpaceManager::sum_count_in_chunks_in_use() {
  1.1957 +  size_t count = 0;
  1.1958 +  for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
  1.1959 +    count = count + sum_count_in_chunks_in_use(i);
  1.1960 +  }
  1.1961 +
  1.1962 +  return count;
  1.1963 +}
  1.1964 +
  1.1965 +size_t SpaceManager::sum_count_in_chunks_in_use(ChunkIndex i) {
  1.1966 +  size_t count = 0;
  1.1967 +  Metachunk* chunk = chunks_in_use(i);
  1.1968 +  while (chunk != NULL) {
  1.1969 +    count++;
  1.1970 +    chunk = chunk->next();
  1.1971 +  }
  1.1972 +  return count;
  1.1973 +}
  1.1974 +
  1.1975 +
  1.1976 +size_t SpaceManager::sum_used_in_chunks_in_use() const {
  1.1977 +  MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
  1.1978 +  size_t used = 0;
  1.1979 +  for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
  1.1980 +    Metachunk* chunk = chunks_in_use(i);
  1.1981 +    while (chunk != NULL) {
  1.1982 +      used += chunk->used_word_size();
  1.1983 +      chunk = chunk->next();
  1.1984 +    }
  1.1985 +  }
  1.1986 +  return used;
  1.1987 +}
  1.1988 +
  1.1989 +void SpaceManager::locked_print_chunks_in_use_on(outputStream* st) const {
  1.1990 +
  1.1991 +  for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
  1.1992 +    Metachunk* chunk = chunks_in_use(i);
  1.1993 +    st->print("SpaceManager: %s " PTR_FORMAT,
  1.1994 +                 chunk_size_name(i), chunk);
  1.1995 +    if (chunk != NULL) {
  1.1996 +      st->print_cr(" free " SIZE_FORMAT,
  1.1997 +                   chunk->free_word_size());
  1.1998 +    } else {
  1.1999 +      st->cr();
  1.2000 +    }
  1.2001 +  }
  1.2002 +
  1.2003 +  chunk_manager()->locked_print_free_chunks(st);
  1.2004 +  chunk_manager()->locked_print_sum_free_chunks(st);
  1.2005 +}
  1.2006 +
  1.2007 +size_t SpaceManager::calc_chunk_size(size_t word_size) {
  1.2008 +
  1.2009 +  // Decide between a small chunk and a medium chunk.  Up to
  1.2010 +  // _small_chunk_limit small chunks can be allocated but
  1.2011 +  // once a medium chunk has been allocated, no more small
  1.2012 +  // chunks will be allocated.
  1.2013 +  size_t chunk_word_size;
  1.2014 +  if (chunks_in_use(MediumIndex) == NULL &&
  1.2015 +      sum_count_in_chunks_in_use(SmallIndex) < _small_chunk_limit) {
  1.2016 +    chunk_word_size = (size_t) small_chunk_size();
  1.2017 +    if (word_size + Metachunk::overhead() > small_chunk_size()) {
  1.2018 +      chunk_word_size = medium_chunk_size();
  1.2019 +    }
  1.2020 +  } else {
  1.2021 +    chunk_word_size = medium_chunk_size();
  1.2022 +  }
  1.2023 +
  1.2024 +  // Might still need a humongous chunk.  Enforce
  1.2025 +  // humongous allocations sizes to be aligned up to
  1.2026 +  // the smallest chunk size.
  1.2027 +  size_t if_humongous_sized_chunk =
  1.2028 +    align_size_up(word_size + Metachunk::overhead(),
  1.2029 +                  smallest_chunk_size());
  1.2030 +  chunk_word_size =
  1.2031 +    MAX2((size_t) chunk_word_size, if_humongous_sized_chunk);
  1.2032 +
  1.2033 +  assert(!SpaceManager::is_humongous(word_size) ||
  1.2034 +         chunk_word_size == if_humongous_sized_chunk,
  1.2035 +         err_msg("Size calculation is wrong, word_size " SIZE_FORMAT
  1.2036 +                 " chunk_word_size " SIZE_FORMAT,
  1.2037 +                 word_size, chunk_word_size));
  1.2038 +  if (TraceMetadataHumongousAllocation &&
  1.2039 +      SpaceManager::is_humongous(word_size)) {
  1.2040 +    gclog_or_tty->print_cr("Metadata humongous allocation:");
  1.2041 +    gclog_or_tty->print_cr("  word_size " PTR_FORMAT, word_size);
  1.2042 +    gclog_or_tty->print_cr("  chunk_word_size " PTR_FORMAT,
  1.2043 +                           chunk_word_size);
  1.2044 +    gclog_or_tty->print_cr("    chunk overhead " PTR_FORMAT,
  1.2045 +                           Metachunk::overhead());
  1.2046 +  }
  1.2047 +  return chunk_word_size;
  1.2048 +}
  1.2049 +
  1.2050 +void SpaceManager::track_metaspace_memory_usage() {
  1.2051 +  if (is_init_completed()) {
  1.2052 +    if (is_class()) {
  1.2053 +      MemoryService::track_compressed_class_memory_usage();
  1.2054 +    }
  1.2055 +    MemoryService::track_metaspace_memory_usage();
  1.2056 +  }
  1.2057 +}
  1.2058 +
  1.2059 +MetaWord* SpaceManager::grow_and_allocate(size_t word_size) {
  1.2060 +  assert(vs_list()->current_virtual_space() != NULL,
  1.2061 +         "Should have been set");
  1.2062 +  assert(current_chunk() == NULL ||
  1.2063 +         current_chunk()->allocate(word_size) == NULL,
  1.2064 +         "Don't need to expand");
  1.2065 +  MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
  1.2066 +
  1.2067 +  if (TraceMetadataChunkAllocation && Verbose) {
  1.2068 +    size_t words_left = 0;
  1.2069 +    size_t words_used = 0;
  1.2070 +    if (current_chunk() != NULL) {
  1.2071 +      words_left = current_chunk()->free_word_size();
  1.2072 +      words_used = current_chunk()->used_word_size();
  1.2073 +    }
  1.2074 +    gclog_or_tty->print_cr("SpaceManager::grow_and_allocate for " SIZE_FORMAT
  1.2075 +                           " words " SIZE_FORMAT " words used " SIZE_FORMAT
  1.2076 +                           " words left",
  1.2077 +                            word_size, words_used, words_left);
  1.2078 +  }
  1.2079 +
  1.2080 +  // Get another chunk out of the virtual space
  1.2081 +  size_t grow_chunks_by_words = calc_chunk_size(word_size);
  1.2082 +  Metachunk* next = get_new_chunk(word_size, grow_chunks_by_words);
  1.2083 +
  1.2084 +  MetaWord* mem = NULL;
  1.2085 +
  1.2086 +  // If a chunk was available, add it to the in-use chunk list
  1.2087 +  // and do an allocation from it.
  1.2088 +  if (next != NULL) {
  1.2089 +    // Add to this manager's list of chunks in use.
  1.2090 +    add_chunk(next, false);
  1.2091 +    mem = next->allocate(word_size);
  1.2092 +  }
  1.2093 +
  1.2094 +  // Track metaspace memory usage statistic.
  1.2095 +  track_metaspace_memory_usage();
  1.2096 +
  1.2097 +  return mem;
  1.2098 +}
  1.2099 +
  1.2100 +void SpaceManager::print_on(outputStream* st) const {
  1.2101 +
  1.2102 +  for (ChunkIndex i = ZeroIndex;
  1.2103 +       i < NumberOfInUseLists ;
  1.2104 +       i = next_chunk_index(i) ) {
  1.2105 +    st->print_cr("  chunks_in_use " PTR_FORMAT " chunk size " PTR_FORMAT,
  1.2106 +                 chunks_in_use(i),
  1.2107 +                 chunks_in_use(i) == NULL ? 0 : chunks_in_use(i)->word_size());
  1.2108 +  }
  1.2109 +  st->print_cr("    waste:  Small " SIZE_FORMAT " Medium " SIZE_FORMAT
  1.2110 +               " Humongous " SIZE_FORMAT,
  1.2111 +               sum_waste_in_chunks_in_use(SmallIndex),
  1.2112 +               sum_waste_in_chunks_in_use(MediumIndex),
  1.2113 +               sum_waste_in_chunks_in_use(HumongousIndex));
  1.2114 +  // block free lists
  1.2115 +  if (block_freelists() != NULL) {
  1.2116 +    st->print_cr("total in block free lists " SIZE_FORMAT,
  1.2117 +      block_freelists()->total_size());
  1.2118 +  }
  1.2119 +}
  1.2120 +
  1.2121 +SpaceManager::SpaceManager(Metaspace::MetadataType mdtype,
  1.2122 +                           Mutex* lock) :
  1.2123 +  _mdtype(mdtype),
  1.2124 +  _allocated_blocks_words(0),
  1.2125 +  _allocated_chunks_words(0),
  1.2126 +  _allocated_chunks_count(0),
  1.2127 +  _lock(lock)
  1.2128 +{
  1.2129 +  initialize();
  1.2130 +}
  1.2131 +
  1.2132 +void SpaceManager::inc_size_metrics(size_t words) {
  1.2133 +  assert_lock_strong(SpaceManager::expand_lock());
  1.2134 +  // Total of allocated Metachunks and allocated Metachunks count
  1.2135 +  // for each SpaceManager
  1.2136 +  _allocated_chunks_words = _allocated_chunks_words + words;
  1.2137 +  _allocated_chunks_count++;
  1.2138 +  // Global total of capacity in allocated Metachunks
  1.2139 +  MetaspaceAux::inc_capacity(mdtype(), words);
  1.2140 +  // Global total of allocated Metablocks.
  1.2141 +  // used_words_slow() includes the overhead in each
  1.2142 +  // Metachunk so include it in the used when the
  1.2143 +  // Metachunk is first added (so only added once per
  1.2144 +  // Metachunk).
  1.2145 +  MetaspaceAux::inc_used(mdtype(), Metachunk::overhead());
  1.2146 +}
  1.2147 +
  1.2148 +void SpaceManager::inc_used_metrics(size_t words) {
  1.2149 +  // Add to the per SpaceManager total
  1.2150 +  Atomic::add_ptr(words, &_allocated_blocks_words);
  1.2151 +  // Add to the global total
  1.2152 +  MetaspaceAux::inc_used(mdtype(), words);
  1.2153 +}
  1.2154 +
  1.2155 +void SpaceManager::dec_total_from_size_metrics() {
  1.2156 +  MetaspaceAux::dec_capacity(mdtype(), allocated_chunks_words());
  1.2157 +  MetaspaceAux::dec_used(mdtype(), allocated_blocks_words());
  1.2158 +  // Also deduct the overhead per Metachunk
  1.2159 +  MetaspaceAux::dec_used(mdtype(), allocated_chunks_count() * Metachunk::overhead());
  1.2160 +}
  1.2161 +
  1.2162 +void SpaceManager::initialize() {
  1.2163 +  Metadebug::init_allocation_fail_alot_count();
  1.2164 +  for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
  1.2165 +    _chunks_in_use[i] = NULL;
  1.2166 +  }
  1.2167 +  _current_chunk = NULL;
  1.2168 +  if (TraceMetadataChunkAllocation && Verbose) {
  1.2169 +    gclog_or_tty->print_cr("SpaceManager(): " PTR_FORMAT, this);
  1.2170 +  }
  1.2171 +}
  1.2172 +
  1.2173 +void ChunkManager::return_chunks(ChunkIndex index, Metachunk* chunks) {
  1.2174 +  if (chunks == NULL) {
  1.2175 +    return;
  1.2176 +  }
  1.2177 +  ChunkList* list = free_chunks(index);
  1.2178 +  assert(list->size() == chunks->word_size(), "Mismatch in chunk sizes");
  1.2179 +  assert_lock_strong(SpaceManager::expand_lock());
  1.2180 +  Metachunk* cur = chunks;
  1.2181 +
  1.2182 +  // This returns chunks one at a time.  If a new
  1.2183 +  // class List can be created that is a base class
  1.2184 +  // of FreeList then something like FreeList::prepend()
  1.2185 +  // can be used in place of this loop
  1.2186 +  while (cur != NULL) {
  1.2187 +    assert(cur->container() != NULL, "Container should have been set");
  1.2188 +    cur->container()->dec_container_count();
  1.2189 +    // Capture the next link before it is changed
  1.2190 +    // by the call to return_chunk_at_head();
  1.2191 +    Metachunk* next = cur->next();
  1.2192 +    DEBUG_ONLY(cur->set_is_tagged_free(true);)
  1.2193 +    list->return_chunk_at_head(cur);
  1.2194 +    cur = next;
  1.2195 +  }
  1.2196 +}
  1.2197 +
  1.2198 +SpaceManager::~SpaceManager() {
  1.2199 +  // This call this->_lock which can't be done while holding expand_lock()
  1.2200 +  assert(sum_capacity_in_chunks_in_use() == allocated_chunks_words(),
  1.2201 +    err_msg("sum_capacity_in_chunks_in_use() " SIZE_FORMAT
  1.2202 +            " allocated_chunks_words() " SIZE_FORMAT,
  1.2203 +            sum_capacity_in_chunks_in_use(), allocated_chunks_words()));
  1.2204 +
  1.2205 +  MutexLockerEx fcl(SpaceManager::expand_lock(),
  1.2206 +                    Mutex::_no_safepoint_check_flag);
  1.2207 +
  1.2208 +  chunk_manager()->slow_locked_verify();
  1.2209 +
  1.2210 +  dec_total_from_size_metrics();
  1.2211 +
  1.2212 +  if (TraceMetadataChunkAllocation && Verbose) {
  1.2213 +    gclog_or_tty->print_cr("~SpaceManager(): " PTR_FORMAT, this);
  1.2214 +    locked_print_chunks_in_use_on(gclog_or_tty);
  1.2215 +  }
  1.2216 +
  1.2217 +  // Do not mangle freed Metachunks.  The chunk size inside Metachunks
  1.2218 +  // is during the freeing of a VirtualSpaceNodes.
  1.2219 +
  1.2220 +  // Have to update before the chunks_in_use lists are emptied
  1.2221 +  // below.
  1.2222 +  chunk_manager()->inc_free_chunks_total(allocated_chunks_words(),
  1.2223 +                                         sum_count_in_chunks_in_use());
  1.2224 +
  1.2225 +  // Add all the chunks in use by this space manager
  1.2226 +  // to the global list of free chunks.
  1.2227 +
  1.2228 +  // Follow each list of chunks-in-use and add them to the
  1.2229 +  // free lists.  Each list is NULL terminated.
  1.2230 +
  1.2231 +  for (ChunkIndex i = ZeroIndex; i < HumongousIndex; i = next_chunk_index(i)) {
  1.2232 +    if (TraceMetadataChunkAllocation && Verbose) {
  1.2233 +      gclog_or_tty->print_cr("returned %d %s chunks to freelist",
  1.2234 +                             sum_count_in_chunks_in_use(i),
  1.2235 +                             chunk_size_name(i));
  1.2236 +    }
  1.2237 +    Metachunk* chunks = chunks_in_use(i);
  1.2238 +    chunk_manager()->return_chunks(i, chunks);
  1.2239 +    set_chunks_in_use(i, NULL);
  1.2240 +    if (TraceMetadataChunkAllocation && Verbose) {
  1.2241 +      gclog_or_tty->print_cr("updated freelist count %d %s",
  1.2242 +                             chunk_manager()->free_chunks(i)->count(),
  1.2243 +                             chunk_size_name(i));
  1.2244 +    }
  1.2245 +    assert(i != HumongousIndex, "Humongous chunks are handled explicitly later");
  1.2246 +  }
  1.2247 +
  1.2248 +  // The medium chunk case may be optimized by passing the head and
  1.2249 +  // tail of the medium chunk list to add_at_head().  The tail is often
  1.2250 +  // the current chunk but there are probably exceptions.
  1.2251 +
  1.2252 +  // Humongous chunks
  1.2253 +  if (TraceMetadataChunkAllocation && Verbose) {
  1.2254 +    gclog_or_tty->print_cr("returned %d %s humongous chunks to dictionary",
  1.2255 +                            sum_count_in_chunks_in_use(HumongousIndex),
  1.2256 +                            chunk_size_name(HumongousIndex));
  1.2257 +    gclog_or_tty->print("Humongous chunk dictionary: ");
  1.2258 +  }
  1.2259 +  // Humongous chunks are never the current chunk.
  1.2260 +  Metachunk* humongous_chunks = chunks_in_use(HumongousIndex);
  1.2261 +
  1.2262 +  while (humongous_chunks != NULL) {
  1.2263 +#ifdef ASSERT
  1.2264 +    humongous_chunks->set_is_tagged_free(true);
  1.2265 +#endif
  1.2266 +    if (TraceMetadataChunkAllocation && Verbose) {
  1.2267 +      gclog_or_tty->print(PTR_FORMAT " (" SIZE_FORMAT ") ",
  1.2268 +                          humongous_chunks,
  1.2269 +                          humongous_chunks->word_size());
  1.2270 +    }
  1.2271 +    assert(humongous_chunks->word_size() == (size_t)
  1.2272 +           align_size_up(humongous_chunks->word_size(),
  1.2273 +                             smallest_chunk_size()),
  1.2274 +           err_msg("Humongous chunk size is wrong: word size " SIZE_FORMAT
  1.2275 +                   " granularity %d",
  1.2276 +                   humongous_chunks->word_size(), smallest_chunk_size()));
  1.2277 +    Metachunk* next_humongous_chunks = humongous_chunks->next();
  1.2278 +    humongous_chunks->container()->dec_container_count();
  1.2279 +    chunk_manager()->humongous_dictionary()->return_chunk(humongous_chunks);
  1.2280 +    humongous_chunks = next_humongous_chunks;
  1.2281 +  }
  1.2282 +  if (TraceMetadataChunkAllocation && Verbose) {
  1.2283 +    gclog_or_tty->cr();
  1.2284 +    gclog_or_tty->print_cr("updated dictionary count %d %s",
  1.2285 +                     chunk_manager()->humongous_dictionary()->total_count(),
  1.2286 +                     chunk_size_name(HumongousIndex));
  1.2287 +  }
  1.2288 +  chunk_manager()->slow_locked_verify();
  1.2289 +}
  1.2290 +
  1.2291 +const char* SpaceManager::chunk_size_name(ChunkIndex index) const {
  1.2292 +  switch (index) {
  1.2293 +    case SpecializedIndex:
  1.2294 +      return "Specialized";
  1.2295 +    case SmallIndex:
  1.2296 +      return "Small";
  1.2297 +    case MediumIndex:
  1.2298 +      return "Medium";
  1.2299 +    case HumongousIndex:
  1.2300 +      return "Humongous";
  1.2301 +    default:
  1.2302 +      return NULL;
  1.2303 +  }
  1.2304 +}
  1.2305 +
  1.2306 +ChunkIndex ChunkManager::list_index(size_t size) {
  1.2307 +  switch (size) {
  1.2308 +    case SpecializedChunk:
  1.2309 +      assert(SpecializedChunk == ClassSpecializedChunk,
  1.2310 +             "Need branch for ClassSpecializedChunk");
  1.2311 +      return SpecializedIndex;
  1.2312 +    case SmallChunk:
  1.2313 +    case ClassSmallChunk:
  1.2314 +      return SmallIndex;
  1.2315 +    case MediumChunk:
  1.2316 +    case ClassMediumChunk:
  1.2317 +      return MediumIndex;
  1.2318 +    default:
  1.2319 +      assert(size > MediumChunk || size > ClassMediumChunk,
  1.2320 +             "Not a humongous chunk");
  1.2321 +      return HumongousIndex;
  1.2322 +  }
  1.2323 +}
  1.2324 +
  1.2325 +void SpaceManager::deallocate(MetaWord* p, size_t word_size) {
  1.2326 +  assert_lock_strong(_lock);
  1.2327 +  size_t raw_word_size = get_raw_word_size(word_size);
  1.2328 +  size_t min_size = TreeChunk<Metablock, FreeList<Metablock> >::min_size();
  1.2329 +  assert(raw_word_size >= min_size,
  1.2330 +         err_msg("Should not deallocate dark matter " SIZE_FORMAT "<" SIZE_FORMAT, word_size, min_size));
  1.2331 +  block_freelists()->return_block(p, raw_word_size);
  1.2332 +}
  1.2333 +
  1.2334 +// Adds a chunk to the list of chunks in use.
  1.2335 +void SpaceManager::add_chunk(Metachunk* new_chunk, bool make_current) {
  1.2336 +
  1.2337 +  assert(new_chunk != NULL, "Should not be NULL");
  1.2338 +  assert(new_chunk->next() == NULL, "Should not be on a list");
  1.2339 +
  1.2340 +  new_chunk->reset_empty();
  1.2341 +
  1.2342 +  // Find the correct list and and set the current
  1.2343 +  // chunk for that list.
  1.2344 +  ChunkIndex index = ChunkManager::list_index(new_chunk->word_size());
  1.2345 +
  1.2346 +  if (index != HumongousIndex) {
  1.2347 +    retire_current_chunk();
  1.2348 +    set_current_chunk(new_chunk);
  1.2349 +    new_chunk->set_next(chunks_in_use(index));
  1.2350 +    set_chunks_in_use(index, new_chunk);
  1.2351 +  } else {
  1.2352 +    // For null class loader data and DumpSharedSpaces, the first chunk isn't
  1.2353 +    // small, so small will be null.  Link this first chunk as the current
  1.2354 +    // chunk.
  1.2355 +    if (make_current) {
  1.2356 +      // Set as the current chunk but otherwise treat as a humongous chunk.
  1.2357 +      set_current_chunk(new_chunk);
  1.2358 +    }
  1.2359 +    // Link at head.  The _current_chunk only points to a humongous chunk for
  1.2360 +    // the null class loader metaspace (class and data virtual space managers)
  1.2361 +    // any humongous chunks so will not point to the tail
  1.2362 +    // of the humongous chunks list.
  1.2363 +    new_chunk->set_next(chunks_in_use(HumongousIndex));
  1.2364 +    set_chunks_in_use(HumongousIndex, new_chunk);
  1.2365 +
  1.2366 +    assert(new_chunk->word_size() > medium_chunk_size(), "List inconsistency");
  1.2367 +  }
  1.2368 +
  1.2369 +  // Add to the running sum of capacity
  1.2370 +  inc_size_metrics(new_chunk->word_size());
  1.2371 +
  1.2372 +  assert(new_chunk->is_empty(), "Not ready for reuse");
  1.2373 +  if (TraceMetadataChunkAllocation && Verbose) {
  1.2374 +    gclog_or_tty->print("SpaceManager::add_chunk: %d) ",
  1.2375 +                        sum_count_in_chunks_in_use());
  1.2376 +    new_chunk->print_on(gclog_or_tty);
  1.2377 +    chunk_manager()->locked_print_free_chunks(gclog_or_tty);
  1.2378 +  }
  1.2379 +}
  1.2380 +
  1.2381 +void SpaceManager::retire_current_chunk() {
  1.2382 +  if (current_chunk() != NULL) {
  1.2383 +    size_t remaining_words = current_chunk()->free_word_size();
  1.2384 +    if (remaining_words >= TreeChunk<Metablock, FreeList<Metablock> >::min_size()) {
  1.2385 +      block_freelists()->return_block(current_chunk()->allocate(remaining_words), remaining_words);
  1.2386 +      inc_used_metrics(remaining_words);
  1.2387 +    }
  1.2388 +  }
  1.2389 +}
  1.2390 +
  1.2391 +Metachunk* SpaceManager::get_new_chunk(size_t word_size,
  1.2392 +                                       size_t grow_chunks_by_words) {
  1.2393 +  // Get a chunk from the chunk freelist
  1.2394 +  Metachunk* next = chunk_manager()->chunk_freelist_allocate(grow_chunks_by_words);
  1.2395 +
  1.2396 +  if (next == NULL) {
  1.2397 +    next = vs_list()->get_new_chunk(word_size,
  1.2398 +                                    grow_chunks_by_words,
  1.2399 +                                    medium_chunk_bunch());
  1.2400 +  }
  1.2401 +
  1.2402 +  if (TraceMetadataHumongousAllocation && next != NULL &&
  1.2403 +      SpaceManager::is_humongous(next->word_size())) {
  1.2404 +    gclog_or_tty->print_cr("  new humongous chunk word size "
  1.2405 +                           PTR_FORMAT, next->word_size());
  1.2406 +  }
  1.2407 +
  1.2408 +  return next;
  1.2409 +}
  1.2410 +
  1.2411 +MetaWord* SpaceManager::allocate(size_t word_size) {
  1.2412 +  MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
  1.2413 +
  1.2414 +  size_t raw_word_size = get_raw_word_size(word_size);
  1.2415 +  BlockFreelist* fl =  block_freelists();
  1.2416 +  MetaWord* p = NULL;
  1.2417 +  // Allocation from the dictionary is expensive in the sense that
  1.2418 +  // the dictionary has to be searched for a size.  Don't allocate
  1.2419 +  // from the dictionary until it starts to get fat.  Is this
  1.2420 +  // a reasonable policy?  Maybe an skinny dictionary is fast enough
  1.2421 +  // for allocations.  Do some profiling.  JJJ
  1.2422 +  if (fl->total_size() > allocation_from_dictionary_limit) {
  1.2423 +    p = fl->get_block(raw_word_size);
  1.2424 +  }
  1.2425 +  if (p == NULL) {
  1.2426 +    p = allocate_work(raw_word_size);
  1.2427 +  }
  1.2428 +
  1.2429 +  return p;
  1.2430 +}
  1.2431 +
  1.2432 +// Returns the address of spaced allocated for "word_size".
  1.2433 +// This methods does not know about blocks (Metablocks)
  1.2434 +MetaWord* SpaceManager::allocate_work(size_t word_size) {
  1.2435 +  assert_lock_strong(_lock);
  1.2436 +#ifdef ASSERT
  1.2437 +  if (Metadebug::test_metadata_failure()) {
  1.2438 +    return NULL;
  1.2439 +  }
  1.2440 +#endif
  1.2441 +  // Is there space in the current chunk?
  1.2442 +  MetaWord* result = NULL;
  1.2443 +
  1.2444 +  // For DumpSharedSpaces, only allocate out of the current chunk which is
  1.2445 +  // never null because we gave it the size we wanted.   Caller reports out
  1.2446 +  // of memory if this returns null.
  1.2447 +  if (DumpSharedSpaces) {
  1.2448 +    assert(current_chunk() != NULL, "should never happen");
  1.2449 +    inc_used_metrics(word_size);
  1.2450 +    return current_chunk()->allocate(word_size); // caller handles null result
  1.2451 +  }
  1.2452 +
  1.2453 +  if (current_chunk() != NULL) {
  1.2454 +    result = current_chunk()->allocate(word_size);
  1.2455 +  }
  1.2456 +
  1.2457 +  if (result == NULL) {
  1.2458 +    result = grow_and_allocate(word_size);
  1.2459 +  }
  1.2460 +
  1.2461 +  if (result != NULL) {
  1.2462 +    inc_used_metrics(word_size);
  1.2463 +    assert(result != (MetaWord*) chunks_in_use(MediumIndex),
  1.2464 +           "Head of the list is being allocated");
  1.2465 +  }
  1.2466 +
  1.2467 +  return result;
  1.2468 +}
  1.2469 +
  1.2470 +void SpaceManager::verify() {
  1.2471 +  // If there are blocks in the dictionary, then
  1.2472 +  // verfication of chunks does not work since
  1.2473 +  // being in the dictionary alters a chunk.
  1.2474 +  if (block_freelists()->total_size() == 0) {
  1.2475 +    for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
  1.2476 +      Metachunk* curr = chunks_in_use(i);
  1.2477 +      while (curr != NULL) {
  1.2478 +        curr->verify();
  1.2479 +        verify_chunk_size(curr);
  1.2480 +        curr = curr->next();
  1.2481 +      }
  1.2482 +    }
  1.2483 +  }
  1.2484 +}
  1.2485 +
  1.2486 +void SpaceManager::verify_chunk_size(Metachunk* chunk) {
  1.2487 +  assert(is_humongous(chunk->word_size()) ||
  1.2488 +         chunk->word_size() == medium_chunk_size() ||
  1.2489 +         chunk->word_size() == small_chunk_size() ||
  1.2490 +         chunk->word_size() == specialized_chunk_size(),
  1.2491 +         "Chunk size is wrong");
  1.2492 +  return;
  1.2493 +}
  1.2494 +
  1.2495 +#ifdef ASSERT
  1.2496 +void SpaceManager::verify_allocated_blocks_words() {
  1.2497 +  // Verification is only guaranteed at a safepoint.
  1.2498 +  assert(SafepointSynchronize::is_at_safepoint() || !Universe::is_fully_initialized(),
  1.2499 +    "Verification can fail if the applications is running");
  1.2500 +  assert(allocated_blocks_words() == sum_used_in_chunks_in_use(),
  1.2501 +    err_msg("allocation total is not consistent " SIZE_FORMAT
  1.2502 +            " vs " SIZE_FORMAT,
  1.2503 +            allocated_blocks_words(), sum_used_in_chunks_in_use()));
  1.2504 +}
  1.2505 +
  1.2506 +#endif
  1.2507 +
  1.2508 +void SpaceManager::dump(outputStream* const out) const {
  1.2509 +  size_t curr_total = 0;
  1.2510 +  size_t waste = 0;
  1.2511 +  uint i = 0;
  1.2512 +  size_t used = 0;
  1.2513 +  size_t capacity = 0;
  1.2514 +
  1.2515 +  // Add up statistics for all chunks in this SpaceManager.
  1.2516 +  for (ChunkIndex index = ZeroIndex;
  1.2517 +       index < NumberOfInUseLists;
  1.2518 +       index = next_chunk_index(index)) {
  1.2519 +    for (Metachunk* curr = chunks_in_use(index);
  1.2520 +         curr != NULL;
  1.2521 +         curr = curr->next()) {
  1.2522 +      out->print("%d) ", i++);
  1.2523 +      curr->print_on(out);
  1.2524 +      curr_total += curr->word_size();
  1.2525 +      used += curr->used_word_size();
  1.2526 +      capacity += curr->word_size();
  1.2527 +      waste += curr->free_word_size() + curr->overhead();;
  1.2528 +    }
  1.2529 +  }
  1.2530 +
  1.2531 +  if (TraceMetadataChunkAllocation && Verbose) {
  1.2532 +    block_freelists()->print_on(out);
  1.2533 +  }
  1.2534 +
  1.2535 +  size_t free = current_chunk() == NULL ? 0 : current_chunk()->free_word_size();
  1.2536 +  // Free space isn't wasted.
  1.2537 +  waste -= free;
  1.2538 +
  1.2539 +  out->print_cr("total of all chunks "  SIZE_FORMAT " used " SIZE_FORMAT
  1.2540 +                " free " SIZE_FORMAT " capacity " SIZE_FORMAT
  1.2541 +                " waste " SIZE_FORMAT, curr_total, used, free, capacity, waste);
  1.2542 +}
  1.2543 +
  1.2544 +#ifndef PRODUCT
  1.2545 +void SpaceManager::mangle_freed_chunks() {
  1.2546 +  for (ChunkIndex index = ZeroIndex;
  1.2547 +       index < NumberOfInUseLists;
  1.2548 +       index = next_chunk_index(index)) {
  1.2549 +    for (Metachunk* curr = chunks_in_use(index);
  1.2550 +         curr != NULL;
  1.2551 +         curr = curr->next()) {
  1.2552 +      curr->mangle();
  1.2553 +    }
  1.2554 +  }
  1.2555 +}
  1.2556 +#endif // PRODUCT
  1.2557 +
  1.2558 +// MetaspaceAux
  1.2559 +
  1.2560 +
  1.2561 +size_t MetaspaceAux::_capacity_words[] = {0, 0};
  1.2562 +size_t MetaspaceAux::_used_words[] = {0, 0};
  1.2563 +
  1.2564 +size_t MetaspaceAux::free_bytes(Metaspace::MetadataType mdtype) {
  1.2565 +  VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
  1.2566 +  return list == NULL ? 0 : list->free_bytes();
  1.2567 +}
  1.2568 +
  1.2569 +size_t MetaspaceAux::free_bytes() {
  1.2570 +  return free_bytes(Metaspace::ClassType) + free_bytes(Metaspace::NonClassType);
  1.2571 +}
  1.2572 +
  1.2573 +void MetaspaceAux::dec_capacity(Metaspace::MetadataType mdtype, size_t words) {
  1.2574 +  assert_lock_strong(SpaceManager::expand_lock());
  1.2575 +  assert(words <= capacity_words(mdtype),
  1.2576 +    err_msg("About to decrement below 0: words " SIZE_FORMAT
  1.2577 +            " is greater than _capacity_words[%u] " SIZE_FORMAT,
  1.2578 +            words, mdtype, capacity_words(mdtype)));
  1.2579 +  _capacity_words[mdtype] -= words;
  1.2580 +}
  1.2581 +
  1.2582 +void MetaspaceAux::inc_capacity(Metaspace::MetadataType mdtype, size_t words) {
  1.2583 +  assert_lock_strong(SpaceManager::expand_lock());
  1.2584 +  // Needs to be atomic
  1.2585 +  _capacity_words[mdtype] += words;
  1.2586 +}
  1.2587 +
  1.2588 +void MetaspaceAux::dec_used(Metaspace::MetadataType mdtype, size_t words) {
  1.2589 +  assert(words <= used_words(mdtype),
  1.2590 +    err_msg("About to decrement below 0: words " SIZE_FORMAT
  1.2591 +            " is greater than _used_words[%u] " SIZE_FORMAT,
  1.2592 +            words, mdtype, used_words(mdtype)));
  1.2593 +  // For CMS deallocation of the Metaspaces occurs during the
  1.2594 +  // sweep which is a concurrent phase.  Protection by the expand_lock()
  1.2595 +  // is not enough since allocation is on a per Metaspace basis
  1.2596 +  // and protected by the Metaspace lock.
  1.2597 +  jlong minus_words = (jlong) - (jlong) words;
  1.2598 +  Atomic::add_ptr(minus_words, &_used_words[mdtype]);
  1.2599 +}
  1.2600 +
  1.2601 +void MetaspaceAux::inc_used(Metaspace::MetadataType mdtype, size_t words) {
  1.2602 +  // _used_words tracks allocations for
  1.2603 +  // each piece of metadata.  Those allocations are
  1.2604 +  // generally done concurrently by different application
  1.2605 +  // threads so must be done atomically.
  1.2606 +  Atomic::add_ptr(words, &_used_words[mdtype]);
  1.2607 +}
  1.2608 +
  1.2609 +size_t MetaspaceAux::used_bytes_slow(Metaspace::MetadataType mdtype) {
  1.2610 +  size_t used = 0;
  1.2611 +  ClassLoaderDataGraphMetaspaceIterator iter;
  1.2612 +  while (iter.repeat()) {
  1.2613 +    Metaspace* msp = iter.get_next();
  1.2614 +    // Sum allocated_blocks_words for each metaspace
  1.2615 +    if (msp != NULL) {
  1.2616 +      used += msp->used_words_slow(mdtype);
  1.2617 +    }
  1.2618 +  }
  1.2619 +  return used * BytesPerWord;
  1.2620 +}
  1.2621 +
  1.2622 +size_t MetaspaceAux::free_bytes_slow(Metaspace::MetadataType mdtype) {
  1.2623 +  size_t free = 0;
  1.2624 +  ClassLoaderDataGraphMetaspaceIterator iter;
  1.2625 +  while (iter.repeat()) {
  1.2626 +    Metaspace* msp = iter.get_next();
  1.2627 +    if (msp != NULL) {
  1.2628 +      free += msp->free_words_slow(mdtype);
  1.2629 +    }
  1.2630 +  }
  1.2631 +  return free * BytesPerWord;
  1.2632 +}
  1.2633 +
  1.2634 +size_t MetaspaceAux::capacity_bytes_slow(Metaspace::MetadataType mdtype) {
  1.2635 +  if ((mdtype == Metaspace::ClassType) && !Metaspace::using_class_space()) {
  1.2636 +    return 0;
  1.2637 +  }
  1.2638 +  // Don't count the space in the freelists.  That space will be
  1.2639 +  // added to the capacity calculation as needed.
  1.2640 +  size_t capacity = 0;
  1.2641 +  ClassLoaderDataGraphMetaspaceIterator iter;
  1.2642 +  while (iter.repeat()) {
  1.2643 +    Metaspace* msp = iter.get_next();
  1.2644 +    if (msp != NULL) {
  1.2645 +      capacity += msp->capacity_words_slow(mdtype);
  1.2646 +    }
  1.2647 +  }
  1.2648 +  return capacity * BytesPerWord;
  1.2649 +}
  1.2650 +
  1.2651 +size_t MetaspaceAux::capacity_bytes_slow() {
  1.2652 +#ifdef PRODUCT
  1.2653 +  // Use capacity_bytes() in PRODUCT instead of this function.
  1.2654 +  guarantee(false, "Should not call capacity_bytes_slow() in the PRODUCT");
  1.2655 +#endif
  1.2656 +  size_t class_capacity = capacity_bytes_slow(Metaspace::ClassType);
  1.2657 +  size_t non_class_capacity = capacity_bytes_slow(Metaspace::NonClassType);
  1.2658 +  assert(capacity_bytes() == class_capacity + non_class_capacity,
  1.2659 +      err_msg("bad accounting: capacity_bytes() " SIZE_FORMAT
  1.2660 +        " class_capacity + non_class_capacity " SIZE_FORMAT
  1.2661 +        " class_capacity " SIZE_FORMAT " non_class_capacity " SIZE_FORMAT,
  1.2662 +        capacity_bytes(), class_capacity + non_class_capacity,
  1.2663 +        class_capacity, non_class_capacity));
  1.2664 +
  1.2665 +  return class_capacity + non_class_capacity;
  1.2666 +}
  1.2667 +
  1.2668 +size_t MetaspaceAux::reserved_bytes(Metaspace::MetadataType mdtype) {
  1.2669 +  VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
  1.2670 +  return list == NULL ? 0 : list->reserved_bytes();
  1.2671 +}
  1.2672 +
  1.2673 +size_t MetaspaceAux::committed_bytes(Metaspace::MetadataType mdtype) {
  1.2674 +  VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
  1.2675 +  return list == NULL ? 0 : list->committed_bytes();
  1.2676 +}
  1.2677 +
  1.2678 +size_t MetaspaceAux::min_chunk_size_words() { return Metaspace::first_chunk_word_size(); }
  1.2679 +
  1.2680 +size_t MetaspaceAux::free_chunks_total_words(Metaspace::MetadataType mdtype) {
  1.2681 +  ChunkManager* chunk_manager = Metaspace::get_chunk_manager(mdtype);
  1.2682 +  if (chunk_manager == NULL) {
  1.2683 +    return 0;
  1.2684 +  }
  1.2685 +  chunk_manager->slow_verify();
  1.2686 +  return chunk_manager->free_chunks_total_words();
  1.2687 +}
  1.2688 +
  1.2689 +size_t MetaspaceAux::free_chunks_total_bytes(Metaspace::MetadataType mdtype) {
  1.2690 +  return free_chunks_total_words(mdtype) * BytesPerWord;
  1.2691 +}
  1.2692 +
  1.2693 +size_t MetaspaceAux::free_chunks_total_words() {
  1.2694 +  return free_chunks_total_words(Metaspace::ClassType) +
  1.2695 +         free_chunks_total_words(Metaspace::NonClassType);
  1.2696 +}
  1.2697 +
  1.2698 +size_t MetaspaceAux::free_chunks_total_bytes() {
  1.2699 +  return free_chunks_total_words() * BytesPerWord;
  1.2700 +}
  1.2701 +
  1.2702 +bool MetaspaceAux::has_chunk_free_list(Metaspace::MetadataType mdtype) {
  1.2703 +  return Metaspace::get_chunk_manager(mdtype) != NULL;
  1.2704 +}
  1.2705 +
  1.2706 +MetaspaceChunkFreeListSummary MetaspaceAux::chunk_free_list_summary(Metaspace::MetadataType mdtype) {
  1.2707 +  if (!has_chunk_free_list(mdtype)) {
  1.2708 +    return MetaspaceChunkFreeListSummary();
  1.2709 +  }
  1.2710 +
  1.2711 +  const ChunkManager* cm = Metaspace::get_chunk_manager(mdtype);
  1.2712 +  return cm->chunk_free_list_summary();
  1.2713 +}
  1.2714 +
  1.2715 +void MetaspaceAux::print_metaspace_change(size_t prev_metadata_used) {
  1.2716 +  gclog_or_tty->print(", [Metaspace:");
  1.2717 +  if (PrintGCDetails && Verbose) {
  1.2718 +    gclog_or_tty->print(" "  SIZE_FORMAT
  1.2719 +                        "->" SIZE_FORMAT
  1.2720 +                        "("  SIZE_FORMAT ")",
  1.2721 +                        prev_metadata_used,
  1.2722 +                        used_bytes(),
  1.2723 +                        reserved_bytes());
  1.2724 +  } else {
  1.2725 +    gclog_or_tty->print(" "  SIZE_FORMAT "K"
  1.2726 +                        "->" SIZE_FORMAT "K"
  1.2727 +                        "("  SIZE_FORMAT "K)",
  1.2728 +                        prev_metadata_used/K,
  1.2729 +                        used_bytes()/K,
  1.2730 +                        reserved_bytes()/K);
  1.2731 +  }
  1.2732 +
  1.2733 +  gclog_or_tty->print("]");
  1.2734 +}
  1.2735 +
  1.2736 +// This is printed when PrintGCDetails
  1.2737 +void MetaspaceAux::print_on(outputStream* out) {
  1.2738 +  Metaspace::MetadataType nct = Metaspace::NonClassType;
  1.2739 +
  1.2740 +  out->print_cr(" Metaspace       "
  1.2741 +                "used "      SIZE_FORMAT "K, "
  1.2742 +                "capacity "  SIZE_FORMAT "K, "
  1.2743 +                "committed " SIZE_FORMAT "K, "
  1.2744 +                "reserved "  SIZE_FORMAT "K",
  1.2745 +                used_bytes()/K,
  1.2746 +                capacity_bytes()/K,
  1.2747 +                committed_bytes()/K,
  1.2748 +                reserved_bytes()/K);
  1.2749 +
  1.2750 +  if (Metaspace::using_class_space()) {
  1.2751 +    Metaspace::MetadataType ct = Metaspace::ClassType;
  1.2752 +    out->print_cr("  class space    "
  1.2753 +                  "used "      SIZE_FORMAT "K, "
  1.2754 +                  "capacity "  SIZE_FORMAT "K, "
  1.2755 +                  "committed " SIZE_FORMAT "K, "
  1.2756 +                  "reserved "  SIZE_FORMAT "K",
  1.2757 +                  used_bytes(ct)/K,
  1.2758 +                  capacity_bytes(ct)/K,
  1.2759 +                  committed_bytes(ct)/K,
  1.2760 +                  reserved_bytes(ct)/K);
  1.2761 +  }
  1.2762 +}
  1.2763 +
  1.2764 +// Print information for class space and data space separately.
  1.2765 +// This is almost the same as above.
  1.2766 +void MetaspaceAux::print_on(outputStream* out, Metaspace::MetadataType mdtype) {
  1.2767 +  size_t free_chunks_capacity_bytes = free_chunks_total_bytes(mdtype);
  1.2768 +  size_t capacity_bytes = capacity_bytes_slow(mdtype);
  1.2769 +  size_t used_bytes = used_bytes_slow(mdtype);
  1.2770 +  size_t free_bytes = free_bytes_slow(mdtype);
  1.2771 +  size_t used_and_free = used_bytes + free_bytes +
  1.2772 +                           free_chunks_capacity_bytes;
  1.2773 +  out->print_cr("  Chunk accounting: used in chunks " SIZE_FORMAT
  1.2774 +             "K + unused in chunks " SIZE_FORMAT "K  + "
  1.2775 +             " capacity in free chunks " SIZE_FORMAT "K = " SIZE_FORMAT
  1.2776 +             "K  capacity in allocated chunks " SIZE_FORMAT "K",
  1.2777 +             used_bytes / K,
  1.2778 +             free_bytes / K,
  1.2779 +             free_chunks_capacity_bytes / K,
  1.2780 +             used_and_free / K,
  1.2781 +             capacity_bytes / K);
  1.2782 +  // Accounting can only be correct if we got the values during a safepoint
  1.2783 +  assert(!SafepointSynchronize::is_at_safepoint() || used_and_free == capacity_bytes, "Accounting is wrong");
  1.2784 +}
  1.2785 +
  1.2786 +// Print total fragmentation for class metaspaces
  1.2787 +void MetaspaceAux::print_class_waste(outputStream* out) {
  1.2788 +  assert(Metaspace::using_class_space(), "class metaspace not used");
  1.2789 +  size_t cls_specialized_waste = 0, cls_small_waste = 0, cls_medium_waste = 0;
  1.2790 +  size_t cls_specialized_count = 0, cls_small_count = 0, cls_medium_count = 0, cls_humongous_count = 0;
  1.2791 +  ClassLoaderDataGraphMetaspaceIterator iter;
  1.2792 +  while (iter.repeat()) {
  1.2793 +    Metaspace* msp = iter.get_next();
  1.2794 +    if (msp != NULL) {
  1.2795 +      cls_specialized_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SpecializedIndex);
  1.2796 +      cls_specialized_count += msp->class_vsm()->sum_count_in_chunks_in_use(SpecializedIndex);
  1.2797 +      cls_small_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SmallIndex);
  1.2798 +      cls_small_count += msp->class_vsm()->sum_count_in_chunks_in_use(SmallIndex);
  1.2799 +      cls_medium_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(MediumIndex);
  1.2800 +      cls_medium_count += msp->class_vsm()->sum_count_in_chunks_in_use(MediumIndex);
  1.2801 +      cls_humongous_count += msp->class_vsm()->sum_count_in_chunks_in_use(HumongousIndex);
  1.2802 +    }
  1.2803 +  }
  1.2804 +  out->print_cr(" class: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", "
  1.2805 +                SIZE_FORMAT " small(s) " SIZE_FORMAT ", "
  1.2806 +                SIZE_FORMAT " medium(s) " SIZE_FORMAT ", "
  1.2807 +                "large count " SIZE_FORMAT,
  1.2808 +                cls_specialized_count, cls_specialized_waste,
  1.2809 +                cls_small_count, cls_small_waste,
  1.2810 +                cls_medium_count, cls_medium_waste, cls_humongous_count);
  1.2811 +}
  1.2812 +
  1.2813 +// Print total fragmentation for data and class metaspaces separately
  1.2814 +void MetaspaceAux::print_waste(outputStream* out) {
  1.2815 +  size_t specialized_waste = 0, small_waste = 0, medium_waste = 0;
  1.2816 +  size_t specialized_count = 0, small_count = 0, medium_count = 0, humongous_count = 0;
  1.2817 +
  1.2818 +  ClassLoaderDataGraphMetaspaceIterator iter;
  1.2819 +  while (iter.repeat()) {
  1.2820 +    Metaspace* msp = iter.get_next();
  1.2821 +    if (msp != NULL) {
  1.2822 +      specialized_waste += msp->vsm()->sum_waste_in_chunks_in_use(SpecializedIndex);
  1.2823 +      specialized_count += msp->vsm()->sum_count_in_chunks_in_use(SpecializedIndex);
  1.2824 +      small_waste += msp->vsm()->sum_waste_in_chunks_in_use(SmallIndex);
  1.2825 +      small_count += msp->vsm()->sum_count_in_chunks_in_use(SmallIndex);
  1.2826 +      medium_waste += msp->vsm()->sum_waste_in_chunks_in_use(MediumIndex);
  1.2827 +      medium_count += msp->vsm()->sum_count_in_chunks_in_use(MediumIndex);
  1.2828 +      humongous_count += msp->vsm()->sum_count_in_chunks_in_use(HumongousIndex);
  1.2829 +    }
  1.2830 +  }
  1.2831 +  out->print_cr("Total fragmentation waste (words) doesn't count free space");
  1.2832 +  out->print_cr("  data: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", "
  1.2833 +                        SIZE_FORMAT " small(s) " SIZE_FORMAT ", "
  1.2834 +                        SIZE_FORMAT " medium(s) " SIZE_FORMAT ", "
  1.2835 +                        "large count " SIZE_FORMAT,
  1.2836 +             specialized_count, specialized_waste, small_count,
  1.2837 +             small_waste, medium_count, medium_waste, humongous_count);
  1.2838 +  if (Metaspace::using_class_space()) {
  1.2839 +    print_class_waste(out);
  1.2840 +  }
  1.2841 +}
  1.2842 +
  1.2843 +// Dump global metaspace things from the end of ClassLoaderDataGraph
  1.2844 +void MetaspaceAux::dump(outputStream* out) {
  1.2845 +  out->print_cr("All Metaspace:");
  1.2846 +  out->print("data space: "); print_on(out, Metaspace::NonClassType);
  1.2847 +  out->print("class space: "); print_on(out, Metaspace::ClassType);
  1.2848 +  print_waste(out);
  1.2849 +}
  1.2850 +
  1.2851 +void MetaspaceAux::verify_free_chunks() {
  1.2852 +  Metaspace::chunk_manager_metadata()->verify();
  1.2853 +  if (Metaspace::using_class_space()) {
  1.2854 +    Metaspace::chunk_manager_class()->verify();
  1.2855 +  }
  1.2856 +}
  1.2857 +
  1.2858 +void MetaspaceAux::verify_capacity() {
  1.2859 +#ifdef ASSERT
  1.2860 +  size_t running_sum_capacity_bytes = capacity_bytes();
  1.2861 +  // For purposes of the running sum of capacity, verify against capacity
  1.2862 +  size_t capacity_in_use_bytes = capacity_bytes_slow();
  1.2863 +  assert(running_sum_capacity_bytes == capacity_in_use_bytes,
  1.2864 +    err_msg("capacity_words() * BytesPerWord " SIZE_FORMAT
  1.2865 +            " capacity_bytes_slow()" SIZE_FORMAT,
  1.2866 +            running_sum_capacity_bytes, capacity_in_use_bytes));
  1.2867 +  for (Metaspace::MetadataType i = Metaspace::ClassType;
  1.2868 +       i < Metaspace:: MetadataTypeCount;
  1.2869 +       i = (Metaspace::MetadataType)(i + 1)) {
  1.2870 +    size_t capacity_in_use_bytes = capacity_bytes_slow(i);
  1.2871 +    assert(capacity_bytes(i) == capacity_in_use_bytes,
  1.2872 +      err_msg("capacity_bytes(%u) " SIZE_FORMAT
  1.2873 +              " capacity_bytes_slow(%u)" SIZE_FORMAT,
  1.2874 +              i, capacity_bytes(i), i, capacity_in_use_bytes));
  1.2875 +  }
  1.2876 +#endif
  1.2877 +}
  1.2878 +
  1.2879 +void MetaspaceAux::verify_used() {
  1.2880 +#ifdef ASSERT
  1.2881 +  size_t running_sum_used_bytes = used_bytes();
  1.2882 +  // For purposes of the running sum of used, verify against used
  1.2883 +  size_t used_in_use_bytes = used_bytes_slow();
  1.2884 +  assert(used_bytes() == used_in_use_bytes,
  1.2885 +    err_msg("used_bytes() " SIZE_FORMAT
  1.2886 +            " used_bytes_slow()" SIZE_FORMAT,
  1.2887 +            used_bytes(), used_in_use_bytes));
  1.2888 +  for (Metaspace::MetadataType i = Metaspace::ClassType;
  1.2889 +       i < Metaspace:: MetadataTypeCount;
  1.2890 +       i = (Metaspace::MetadataType)(i + 1)) {
  1.2891 +    size_t used_in_use_bytes = used_bytes_slow(i);
  1.2892 +    assert(used_bytes(i) == used_in_use_bytes,
  1.2893 +      err_msg("used_bytes(%u) " SIZE_FORMAT
  1.2894 +              " used_bytes_slow(%u)" SIZE_FORMAT,
  1.2895 +              i, used_bytes(i), i, used_in_use_bytes));
  1.2896 +  }
  1.2897 +#endif
  1.2898 +}
  1.2899 +
  1.2900 +void MetaspaceAux::verify_metrics() {
  1.2901 +  verify_capacity();
  1.2902 +  verify_used();
  1.2903 +}
  1.2904 +
  1.2905 +
  1.2906 +// Metaspace methods
  1.2907 +
  1.2908 +size_t Metaspace::_first_chunk_word_size = 0;
  1.2909 +size_t Metaspace::_first_class_chunk_word_size = 0;
  1.2910 +
  1.2911 +size_t Metaspace::_commit_alignment = 0;
  1.2912 +size_t Metaspace::_reserve_alignment = 0;
  1.2913 +
  1.2914 +Metaspace::Metaspace(Mutex* lock, MetaspaceType type) {
  1.2915 +  initialize(lock, type);
  1.2916 +}
  1.2917 +
  1.2918 +Metaspace::~Metaspace() {
  1.2919 +  delete _vsm;
  1.2920 +  if (using_class_space()) {
  1.2921 +    delete _class_vsm;
  1.2922 +  }
  1.2923 +}
  1.2924 +
  1.2925 +VirtualSpaceList* Metaspace::_space_list = NULL;
  1.2926 +VirtualSpaceList* Metaspace::_class_space_list = NULL;
  1.2927 +
  1.2928 +ChunkManager* Metaspace::_chunk_manager_metadata = NULL;
  1.2929 +ChunkManager* Metaspace::_chunk_manager_class = NULL;
  1.2930 +
  1.2931 +#define VIRTUALSPACEMULTIPLIER 2
  1.2932 +
  1.2933 +#ifdef _LP64
  1.2934 +static const uint64_t UnscaledClassSpaceMax = (uint64_t(max_juint) + 1);
  1.2935 +
  1.2936 +void Metaspace::set_narrow_klass_base_and_shift(address metaspace_base, address cds_base) {
  1.2937 +  // Figure out the narrow_klass_base and the narrow_klass_shift.  The
  1.2938 +  // narrow_klass_base is the lower of the metaspace base and the cds base
  1.2939 +  // (if cds is enabled).  The narrow_klass_shift depends on the distance
  1.2940 +  // between the lower base and higher address.
  1.2941 +  address lower_base;
  1.2942 +  address higher_address;
  1.2943 +  if (UseSharedSpaces) {
  1.2944 +    higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()),
  1.2945 +                          (address)(metaspace_base + compressed_class_space_size()));
  1.2946 +    lower_base = MIN2(metaspace_base, cds_base);
  1.2947 +  } else {
  1.2948 +    higher_address = metaspace_base + compressed_class_space_size();
  1.2949 +    lower_base = metaspace_base;
  1.2950 +
  1.2951 +    uint64_t klass_encoding_max = UnscaledClassSpaceMax << LogKlassAlignmentInBytes;
  1.2952 +    // If compressed class space fits in lower 32G, we don't need a base.
  1.2953 +    if (higher_address <= (address)klass_encoding_max) {
  1.2954 +      lower_base = 0; // effectively lower base is zero.
  1.2955 +    }
  1.2956 +  }
  1.2957 +
  1.2958 +  Universe::set_narrow_klass_base(lower_base);
  1.2959 +
  1.2960 +  if ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax) {
  1.2961 +    Universe::set_narrow_klass_shift(0);
  1.2962 +  } else {
  1.2963 +    assert(!UseSharedSpaces, "Cannot shift with UseSharedSpaces");
  1.2964 +    Universe::set_narrow_klass_shift(LogKlassAlignmentInBytes);
  1.2965 +  }
  1.2966 +}
  1.2967 +
  1.2968 +// Return TRUE if the specified metaspace_base and cds_base are close enough
  1.2969 +// to work with compressed klass pointers.
  1.2970 +bool Metaspace::can_use_cds_with_metaspace_addr(char* metaspace_base, address cds_base) {
  1.2971 +  assert(cds_base != 0 && UseSharedSpaces, "Only use with CDS");
  1.2972 +  assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
  1.2973 +  address lower_base = MIN2((address)metaspace_base, cds_base);
  1.2974 +  address higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()),
  1.2975 +                                (address)(metaspace_base + compressed_class_space_size()));
  1.2976 +  return ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax);
  1.2977 +}
  1.2978 +
  1.2979 +// Try to allocate the metaspace at the requested addr.
  1.2980 +void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base) {
  1.2981 +  assert(using_class_space(), "called improperly");
  1.2982 +  assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
  1.2983 +  assert(compressed_class_space_size() < KlassEncodingMetaspaceMax,
  1.2984 +         "Metaspace size is too big");
  1.2985 +  assert_is_ptr_aligned(requested_addr, _reserve_alignment);
  1.2986 +  assert_is_ptr_aligned(cds_base, _reserve_alignment);
  1.2987 +  assert_is_size_aligned(compressed_class_space_size(), _reserve_alignment);
  1.2988 +
  1.2989 +  // Don't use large pages for the class space.
  1.2990 +  bool large_pages = false;
  1.2991 +
  1.2992 +  ReservedSpace metaspace_rs = ReservedSpace(compressed_class_space_size(),
  1.2993 +                                             _reserve_alignment,
  1.2994 +                                             large_pages,
  1.2995 +                                             requested_addr, 0);
  1.2996 +  if (!metaspace_rs.is_reserved()) {
  1.2997 +    if (UseSharedSpaces) {
  1.2998 +      size_t increment = align_size_up(1*G, _reserve_alignment);
  1.2999 +
  1.3000 +      // Keep trying to allocate the metaspace, increasing the requested_addr
  1.3001 +      // by 1GB each time, until we reach an address that will no longer allow
  1.3002 +      // use of CDS with compressed klass pointers.
  1.3003 +      char *addr = requested_addr;
  1.3004 +      while (!metaspace_rs.is_reserved() && (addr + increment > addr) &&
  1.3005 +             can_use_cds_with_metaspace_addr(addr + increment, cds_base)) {
  1.3006 +        addr = addr + increment;
  1.3007 +        metaspace_rs = ReservedSpace(compressed_class_space_size(),
  1.3008 +                                     _reserve_alignment, large_pages, addr, 0);
  1.3009 +      }
  1.3010 +    }
  1.3011 +
  1.3012 +    // If no successful allocation then try to allocate the space anywhere.  If
  1.3013 +    // that fails then OOM doom.  At this point we cannot try allocating the
  1.3014 +    // metaspace as if UseCompressedClassPointers is off because too much
  1.3015 +    // initialization has happened that depends on UseCompressedClassPointers.
  1.3016 +    // So, UseCompressedClassPointers cannot be turned off at this point.
  1.3017 +    if (!metaspace_rs.is_reserved()) {
  1.3018 +      metaspace_rs = ReservedSpace(compressed_class_space_size(),
  1.3019 +                                   _reserve_alignment, large_pages);
  1.3020 +      if (!metaspace_rs.is_reserved()) {
  1.3021 +        vm_exit_during_initialization(err_msg("Could not allocate metaspace: %d bytes",
  1.3022 +                                              compressed_class_space_size()));
  1.3023 +      }
  1.3024 +    }
  1.3025 +  }
  1.3026 +
  1.3027 +  // If we got here then the metaspace got allocated.
  1.3028 +  MemTracker::record_virtual_memory_type((address)metaspace_rs.base(), mtClass);
  1.3029 +
  1.3030 +  // Verify that we can use shared spaces.  Otherwise, turn off CDS.
  1.3031 +  if (UseSharedSpaces && !can_use_cds_with_metaspace_addr(metaspace_rs.base(), cds_base)) {
  1.3032 +    FileMapInfo::stop_sharing_and_unmap(
  1.3033 +        "Could not allocate metaspace at a compatible address");
  1.3034 +  }
  1.3035 +
  1.3036 +  set_narrow_klass_base_and_shift((address)metaspace_rs.base(),
  1.3037 +                                  UseSharedSpaces ? (address)cds_base : 0);
  1.3038 +
  1.3039 +  initialize_class_space(metaspace_rs);
  1.3040 +
  1.3041 +  if (PrintCompressedOopsMode || (PrintMiscellaneous && Verbose)) {
  1.3042 +    gclog_or_tty->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: " SIZE_FORMAT,
  1.3043 +                            Universe::narrow_klass_base(), Universe::narrow_klass_shift());
  1.3044 +    gclog_or_tty->print_cr("Compressed class space size: " SIZE_FORMAT " Address: " PTR_FORMAT " Req Addr: " PTR_FORMAT,
  1.3045 +                           compressed_class_space_size(), metaspace_rs.base(), requested_addr);
  1.3046 +  }
  1.3047 +}
  1.3048 +
  1.3049 +// For UseCompressedClassPointers the class space is reserved above the top of
  1.3050 +// the Java heap.  The argument passed in is at the base of the compressed space.
  1.3051 +void Metaspace::initialize_class_space(ReservedSpace rs) {
  1.3052 +  // The reserved space size may be bigger because of alignment, esp with UseLargePages
  1.3053 +  assert(rs.size() >= CompressedClassSpaceSize,
  1.3054 +         err_msg(SIZE_FORMAT " != " UINTX_FORMAT, rs.size(), CompressedClassSpaceSize));
  1.3055 +  assert(using_class_space(), "Must be using class space");
  1.3056 +  _class_space_list = new VirtualSpaceList(rs);
  1.3057 +  _chunk_manager_class = new ChunkManager(SpecializedChunk, ClassSmallChunk, ClassMediumChunk);
  1.3058 +
  1.3059 +  if (!_class_space_list->initialization_succeeded()) {
  1.3060 +    vm_exit_during_initialization("Failed to setup compressed class space virtual space list.");
  1.3061 +  }
  1.3062 +}
  1.3063 +
  1.3064 +#endif
  1.3065 +
  1.3066 +void Metaspace::ergo_initialize() {
  1.3067 +  if (DumpSharedSpaces) {
  1.3068 +    // Using large pages when dumping the shared archive is currently not implemented.
  1.3069 +    FLAG_SET_ERGO(bool, UseLargePagesInMetaspace, false);
  1.3070 +  }
  1.3071 +
  1.3072 +  size_t page_size = os::vm_page_size();
  1.3073 +  if (UseLargePages && UseLargePagesInMetaspace) {
  1.3074 +    page_size = os::large_page_size();
  1.3075 +  }
  1.3076 +
  1.3077 +  _commit_alignment  = page_size;
  1.3078 +  _reserve_alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity());
  1.3079 +
  1.3080 +  // Do not use FLAG_SET_ERGO to update MaxMetaspaceSize, since this will
  1.3081 +  // override if MaxMetaspaceSize was set on the command line or not.
  1.3082 +  // This information is needed later to conform to the specification of the
  1.3083 +  // java.lang.management.MemoryUsage API.
  1.3084 +  //
  1.3085 +  // Ideally, we would be able to set the default value of MaxMetaspaceSize in
  1.3086 +  // globals.hpp to the aligned value, but this is not possible, since the
  1.3087 +  // alignment depends on other flags being parsed.
  1.3088 +  MaxMetaspaceSize = align_size_down_bounded(MaxMetaspaceSize, _reserve_alignment);
  1.3089 +
  1.3090 +  if (MetaspaceSize > MaxMetaspaceSize) {
  1.3091 +    MetaspaceSize = MaxMetaspaceSize;
  1.3092 +  }
  1.3093 +
  1.3094 +  MetaspaceSize = align_size_down_bounded(MetaspaceSize, _commit_alignment);
  1.3095 +
  1.3096 +  assert(MetaspaceSize <= MaxMetaspaceSize, "MetaspaceSize should be limited by MaxMetaspaceSize");
  1.3097 +
  1.3098 +  if (MetaspaceSize < 256*K) {
  1.3099 +    vm_exit_during_initialization("Too small initial Metaspace size");
  1.3100 +  }
  1.3101 +
  1.3102 +  MinMetaspaceExpansion = align_size_down_bounded(MinMetaspaceExpansion, _commit_alignment);
  1.3103 +  MaxMetaspaceExpansion = align_size_down_bounded(MaxMetaspaceExpansion, _commit_alignment);
  1.3104 +
  1.3105 +  CompressedClassSpaceSize = align_size_down_bounded(CompressedClassSpaceSize, _reserve_alignment);
  1.3106 +  set_compressed_class_space_size(CompressedClassSpaceSize);
  1.3107 +}
  1.3108 +
  1.3109 +void Metaspace::global_initialize() {
  1.3110 +  MetaspaceGC::initialize();
  1.3111 +
  1.3112 +  // Initialize the alignment for shared spaces.
  1.3113 +  int max_alignment = os::vm_page_size();
  1.3114 +  size_t cds_total = 0;
  1.3115 +
  1.3116 +  MetaspaceShared::set_max_alignment(max_alignment);
  1.3117 +
  1.3118 +  if (DumpSharedSpaces) {
  1.3119 +    SharedReadOnlySize  = align_size_up(SharedReadOnlySize,  max_alignment);
  1.3120 +    SharedReadWriteSize = align_size_up(SharedReadWriteSize, max_alignment);
  1.3121 +    SharedMiscDataSize  = align_size_up(SharedMiscDataSize,  max_alignment);
  1.3122 +    SharedMiscCodeSize  = align_size_up(SharedMiscCodeSize,  max_alignment);
  1.3123 +
  1.3124 +    // Initialize with the sum of the shared space sizes.  The read-only
  1.3125 +    // and read write metaspace chunks will be allocated out of this and the
  1.3126 +    // remainder is the misc code and data chunks.
  1.3127 +    cds_total = FileMapInfo::shared_spaces_size();
  1.3128 +    cds_total = align_size_up(cds_total, _reserve_alignment);
  1.3129 +    _space_list = new VirtualSpaceList(cds_total/wordSize);
  1.3130 +    _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk);
  1.3131 +
  1.3132 +    if (!_space_list->initialization_succeeded()) {
  1.3133 +      vm_exit_during_initialization("Unable to dump shared archive.", NULL);
  1.3134 +    }
  1.3135 +
  1.3136 +#ifdef _LP64
  1.3137 +    if (cds_total + compressed_class_space_size() > UnscaledClassSpaceMax) {
  1.3138 +      vm_exit_during_initialization("Unable to dump shared archive.",
  1.3139 +          err_msg("Size of archive (" SIZE_FORMAT ") + compressed class space ("
  1.3140 +                  SIZE_FORMAT ") == total (" SIZE_FORMAT ") is larger than compressed "
  1.3141 +                  "klass limit: " SIZE_FORMAT, cds_total, compressed_class_space_size(),
  1.3142 +                  cds_total + compressed_class_space_size(), UnscaledClassSpaceMax));
  1.3143 +    }
  1.3144 +
  1.3145 +    // Set the compressed klass pointer base so that decoding of these pointers works
  1.3146 +    // properly when creating the shared archive.
  1.3147 +    assert(UseCompressedOops && UseCompressedClassPointers,
  1.3148 +      "UseCompressedOops and UseCompressedClassPointers must be set");
  1.3149 +    Universe::set_narrow_klass_base((address)_space_list->current_virtual_space()->bottom());
  1.3150 +    if (TraceMetavirtualspaceAllocation && Verbose) {
  1.3151 +      gclog_or_tty->print_cr("Setting_narrow_klass_base to Address: " PTR_FORMAT,
  1.3152 +                             _space_list->current_virtual_space()->bottom());
  1.3153 +    }
  1.3154 +
  1.3155 +    Universe::set_narrow_klass_shift(0);
  1.3156 +#endif
  1.3157 +
  1.3158 +  } else {
  1.3159 +    // If using shared space, open the file that contains the shared space
  1.3160 +    // and map in the memory before initializing the rest of metaspace (so
  1.3161 +    // the addresses don't conflict)
  1.3162 +    address cds_address = NULL;
  1.3163 +    if (UseSharedSpaces) {
  1.3164 +      FileMapInfo* mapinfo = new FileMapInfo();
  1.3165 +      memset(mapinfo, 0, sizeof(FileMapInfo));
  1.3166 +
  1.3167 +      // Open the shared archive file, read and validate the header. If
  1.3168 +      // initialization fails, shared spaces [UseSharedSpaces] are
  1.3169 +      // disabled and the file is closed.
  1.3170 +      // Map in spaces now also
  1.3171 +      if (mapinfo->initialize() && MetaspaceShared::map_shared_spaces(mapinfo)) {
  1.3172 +        FileMapInfo::set_current_info(mapinfo);
  1.3173 +        cds_total = FileMapInfo::shared_spaces_size();
  1.3174 +        cds_address = (address)mapinfo->region_base(0);
  1.3175 +      } else {
  1.3176 +        assert(!mapinfo->is_open() && !UseSharedSpaces,
  1.3177 +               "archive file not closed or shared spaces not disabled.");
  1.3178 +      }
  1.3179 +    }
  1.3180 +
  1.3181 +#ifdef _LP64
  1.3182 +    // If UseCompressedClassPointers is set then allocate the metaspace area
  1.3183 +    // above the heap and above the CDS area (if it exists).
  1.3184 +    if (using_class_space()) {
  1.3185 +      if (UseSharedSpaces) {
  1.3186 +        char* cds_end = (char*)(cds_address + cds_total);
  1.3187 +        cds_end = (char *)align_ptr_up(cds_end, _reserve_alignment);
  1.3188 +        allocate_metaspace_compressed_klass_ptrs(cds_end, cds_address);
  1.3189 +      } else {
  1.3190 +        char* base = (char*)align_ptr_up(Universe::heap()->reserved_region().end(), _reserve_alignment);
  1.3191 +        allocate_metaspace_compressed_klass_ptrs(base, 0);
  1.3192 +      }
  1.3193 +    }
  1.3194 +#endif
  1.3195 +
  1.3196 +    // Initialize these before initializing the VirtualSpaceList
  1.3197 +    _first_chunk_word_size = InitialBootClassLoaderMetaspaceSize / BytesPerWord;
  1.3198 +    _first_chunk_word_size = align_word_size_up(_first_chunk_word_size);
  1.3199 +    // Make the first class chunk bigger than a medium chunk so it's not put
  1.3200 +    // on the medium chunk list.   The next chunk will be small and progress
  1.3201 +    // from there.  This size calculated by -version.
  1.3202 +    _first_class_chunk_word_size = MIN2((size_t)MediumChunk*6,
  1.3203 +                                       (CompressedClassSpaceSize/BytesPerWord)*2);
  1.3204 +    _first_class_chunk_word_size = align_word_size_up(_first_class_chunk_word_size);
  1.3205 +    // Arbitrarily set the initial virtual space to a multiple
  1.3206 +    // of the boot class loader size.
  1.3207 +    size_t word_size = VIRTUALSPACEMULTIPLIER * _first_chunk_word_size;
  1.3208 +    word_size = align_size_up(word_size, Metaspace::reserve_alignment_words());
  1.3209 +
  1.3210 +    // Initialize the list of virtual spaces.
  1.3211 +    _space_list = new VirtualSpaceList(word_size);
  1.3212 +    _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk);
  1.3213 +
  1.3214 +    if (!_space_list->initialization_succeeded()) {
  1.3215 +      vm_exit_during_initialization("Unable to setup metadata virtual space list.", NULL);
  1.3216 +    }
  1.3217 +  }
  1.3218 +
  1.3219 +  _tracer = new MetaspaceTracer();
  1.3220 +}
  1.3221 +
  1.3222 +void Metaspace::post_initialize() {
  1.3223 +  MetaspaceGC::post_initialize();
  1.3224 +}
  1.3225 +
  1.3226 +Metachunk* Metaspace::get_initialization_chunk(MetadataType mdtype,
  1.3227 +                                               size_t chunk_word_size,
  1.3228 +                                               size_t chunk_bunch) {
  1.3229 +  // Get a chunk from the chunk freelist
  1.3230 +  Metachunk* chunk = get_chunk_manager(mdtype)->chunk_freelist_allocate(chunk_word_size);
  1.3231 +  if (chunk != NULL) {
  1.3232 +    return chunk;
  1.3233 +  }
  1.3234 +
  1.3235 +  return get_space_list(mdtype)->get_new_chunk(chunk_word_size, chunk_word_size, chunk_bunch);
  1.3236 +}
  1.3237 +
  1.3238 +void Metaspace::initialize(Mutex* lock, MetaspaceType type) {
  1.3239 +
  1.3240 +  assert(space_list() != NULL,
  1.3241 +    "Metadata VirtualSpaceList has not been initialized");
  1.3242 +  assert(chunk_manager_metadata() != NULL,
  1.3243 +    "Metadata ChunkManager has not been initialized");
  1.3244 +
  1.3245 +  _vsm = new SpaceManager(NonClassType, lock);
  1.3246 +  if (_vsm == NULL) {
  1.3247 +    return;
  1.3248 +  }
  1.3249 +  size_t word_size;
  1.3250 +  size_t class_word_size;
  1.3251 +  vsm()->get_initial_chunk_sizes(type, &word_size, &class_word_size);
  1.3252 +
  1.3253 +  if (using_class_space()) {
  1.3254 +  assert(class_space_list() != NULL,
  1.3255 +    "Class VirtualSpaceList has not been initialized");
  1.3256 +  assert(chunk_manager_class() != NULL,
  1.3257 +    "Class ChunkManager has not been initialized");
  1.3258 +
  1.3259 +    // Allocate SpaceManager for classes.
  1.3260 +    _class_vsm = new SpaceManager(ClassType, lock);
  1.3261 +    if (_class_vsm == NULL) {
  1.3262 +      return;
  1.3263 +    }
  1.3264 +  }
  1.3265 +
  1.3266 +  MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
  1.3267 +
  1.3268 +  // Allocate chunk for metadata objects
  1.3269 +  Metachunk* new_chunk = get_initialization_chunk(NonClassType,
  1.3270 +                                                  word_size,
  1.3271 +                                                  vsm()->medium_chunk_bunch());
  1.3272 +  assert(!DumpSharedSpaces || new_chunk != NULL, "should have enough space for both chunks");
  1.3273 +  if (new_chunk != NULL) {
  1.3274 +    // Add to this manager's list of chunks in use and current_chunk().
  1.3275 +    vsm()->add_chunk(new_chunk, true);
  1.3276 +  }
  1.3277 +
  1.3278 +  // Allocate chunk for class metadata objects
  1.3279 +  if (using_class_space()) {
  1.3280 +    Metachunk* class_chunk = get_initialization_chunk(ClassType,
  1.3281 +                                                      class_word_size,
  1.3282 +                                                      class_vsm()->medium_chunk_bunch());
  1.3283 +    if (class_chunk != NULL) {
  1.3284 +      class_vsm()->add_chunk(class_chunk, true);
  1.3285 +    }
  1.3286 +  }
  1.3287 +
  1.3288 +  _alloc_record_head = NULL;
  1.3289 +  _alloc_record_tail = NULL;
  1.3290 +}
  1.3291 +
  1.3292 +size_t Metaspace::align_word_size_up(size_t word_size) {
  1.3293 +  size_t byte_size = word_size * wordSize;
  1.3294 +  return ReservedSpace::allocation_align_size_up(byte_size) / wordSize;
  1.3295 +}
  1.3296 +
  1.3297 +MetaWord* Metaspace::allocate(size_t word_size, MetadataType mdtype) {
  1.3298 +  // DumpSharedSpaces doesn't use class metadata area (yet)
  1.3299 +  // Also, don't use class_vsm() unless UseCompressedClassPointers is true.
  1.3300 +  if (is_class_space_allocation(mdtype)) {
  1.3301 +    return  class_vsm()->allocate(word_size);
  1.3302 +  } else {
  1.3303 +    return  vsm()->allocate(word_size);
  1.3304 +  }
  1.3305 +}
  1.3306 +
  1.3307 +MetaWord* Metaspace::expand_and_allocate(size_t word_size, MetadataType mdtype) {
  1.3308 +  size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size * BytesPerWord);
  1.3309 +  assert(delta_bytes > 0, "Must be");
  1.3310 +
  1.3311 +  size_t after_inc = MetaspaceGC::inc_capacity_until_GC(delta_bytes);
  1.3312 +
  1.3313 +  // capacity_until_GC might be updated concurrently, must calculate previous value.
  1.3314 +  size_t before_inc = after_inc - delta_bytes;
  1.3315 +
  1.3316 +  tracer()->report_gc_threshold(before_inc, after_inc,
  1.3317 +                                MetaspaceGCThresholdUpdater::ExpandAndAllocate);
  1.3318 +  if (PrintGCDetails && Verbose) {
  1.3319 +    gclog_or_tty->print_cr("Increase capacity to GC from " SIZE_FORMAT
  1.3320 +        " to " SIZE_FORMAT, before_inc, after_inc);
  1.3321 +  }
  1.3322 +
  1.3323 +  return allocate(word_size, mdtype);
  1.3324 +}
  1.3325 +
  1.3326 +// Space allocated in the Metaspace.  This may
  1.3327 +// be across several metadata virtual spaces.
  1.3328 +char* Metaspace::bottom() const {
  1.3329 +  assert(DumpSharedSpaces, "only useful and valid for dumping shared spaces");
  1.3330 +  return (char*)vsm()->current_chunk()->bottom();
  1.3331 +}
  1.3332 +
  1.3333 +size_t Metaspace::used_words_slow(MetadataType mdtype) const {
  1.3334 +  if (mdtype == ClassType) {
  1.3335 +    return using_class_space() ? class_vsm()->sum_used_in_chunks_in_use() : 0;
  1.3336 +  } else {
  1.3337 +    return vsm()->sum_used_in_chunks_in_use();  // includes overhead!
  1.3338 +  }
  1.3339 +}
  1.3340 +
  1.3341 +size_t Metaspace::free_words_slow(MetadataType mdtype) const {
  1.3342 +  if (mdtype == ClassType) {
  1.3343 +    return using_class_space() ? class_vsm()->sum_free_in_chunks_in_use() : 0;
  1.3344 +  } else {
  1.3345 +    return vsm()->sum_free_in_chunks_in_use();
  1.3346 +  }
  1.3347 +}
  1.3348 +
  1.3349 +// Space capacity in the Metaspace.  It includes
  1.3350 +// space in the list of chunks from which allocations
  1.3351 +// have been made. Don't include space in the global freelist and
  1.3352 +// in the space available in the dictionary which
  1.3353 +// is already counted in some chunk.
  1.3354 +size_t Metaspace::capacity_words_slow(MetadataType mdtype) const {
  1.3355 +  if (mdtype == ClassType) {
  1.3356 +    return using_class_space() ? class_vsm()->sum_capacity_in_chunks_in_use() : 0;
  1.3357 +  } else {
  1.3358 +    return vsm()->sum_capacity_in_chunks_in_use();
  1.3359 +  }
  1.3360 +}
  1.3361 +
  1.3362 +size_t Metaspace::used_bytes_slow(MetadataType mdtype) const {
  1.3363 +  return used_words_slow(mdtype) * BytesPerWord;
  1.3364 +}
  1.3365 +
  1.3366 +size_t Metaspace::capacity_bytes_slow(MetadataType mdtype) const {
  1.3367 +  return capacity_words_slow(mdtype) * BytesPerWord;
  1.3368 +}
  1.3369 +
  1.3370 +void Metaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) {
  1.3371 +  if (SafepointSynchronize::is_at_safepoint()) {
  1.3372 +    assert(Thread::current()->is_VM_thread(), "should be the VM thread");
  1.3373 +    // Don't take Heap_lock
  1.3374 +    MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag);
  1.3375 +    if (word_size < TreeChunk<Metablock, FreeList<Metablock> >::min_size()) {
  1.3376 +      // Dark matter.  Too small for dictionary.
  1.3377 +#ifdef ASSERT
  1.3378 +      Copy::fill_to_words((HeapWord*)ptr, word_size, 0xf5f5f5f5);
  1.3379 +#endif
  1.3380 +      return;
  1.3381 +    }
  1.3382 +    if (is_class && using_class_space()) {
  1.3383 +      class_vsm()->deallocate(ptr, word_size);
  1.3384 +    } else {
  1.3385 +      vsm()->deallocate(ptr, word_size);
  1.3386 +    }
  1.3387 +  } else {
  1.3388 +    MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag);
  1.3389 +
  1.3390 +    if (word_size < TreeChunk<Metablock, FreeList<Metablock> >::min_size()) {
  1.3391 +      // Dark matter.  Too small for dictionary.
  1.3392 +#ifdef ASSERT
  1.3393 +      Copy::fill_to_words((HeapWord*)ptr, word_size, 0xf5f5f5f5);
  1.3394 +#endif
  1.3395 +      return;
  1.3396 +    }
  1.3397 +    if (is_class && using_class_space()) {
  1.3398 +      class_vsm()->deallocate(ptr, word_size);
  1.3399 +    } else {
  1.3400 +      vsm()->deallocate(ptr, word_size);
  1.3401 +    }
  1.3402 +  }
  1.3403 +}
  1.3404 +
  1.3405 +
  1.3406 +MetaWord* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
  1.3407 +                              bool read_only, MetaspaceObj::Type type, TRAPS) {
  1.3408 +  if (HAS_PENDING_EXCEPTION) {
  1.3409 +    assert(false, "Should not allocate with exception pending");
  1.3410 +    return NULL;  // caller does a CHECK_NULL too
  1.3411 +  }
  1.3412 +
  1.3413 +  assert(loader_data != NULL, "Should never pass around a NULL loader_data. "
  1.3414 +        "ClassLoaderData::the_null_class_loader_data() should have been used.");
  1.3415 +
  1.3416 +  // Allocate in metaspaces without taking out a lock, because it deadlocks
  1.3417 +  // with the SymbolTable_lock.  Dumping is single threaded for now.  We'll have
  1.3418 +  // to revisit this for application class data sharing.
  1.3419 +  if (DumpSharedSpaces) {
  1.3420 +    assert(type > MetaspaceObj::UnknownType && type < MetaspaceObj::_number_of_types, "sanity");
  1.3421 +    Metaspace* space = read_only ? loader_data->ro_metaspace() : loader_data->rw_metaspace();
  1.3422 +    MetaWord* result = space->allocate(word_size, NonClassType);
  1.3423 +    if (result == NULL) {
  1.3424 +      report_out_of_shared_space(read_only ? SharedReadOnly : SharedReadWrite);
  1.3425 +    }
  1.3426 +
  1.3427 +    space->record_allocation(result, type, space->vsm()->get_raw_word_size(word_size));
  1.3428 +
  1.3429 +    // Zero initialize.
  1.3430 +    Copy::fill_to_aligned_words((HeapWord*)result, word_size, 0);
  1.3431 +
  1.3432 +    return result;
  1.3433 +  }
  1.3434 +
  1.3435 +  MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType;
  1.3436 +
  1.3437 +  // Try to allocate metadata.
  1.3438 +  MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype);
  1.3439 +
  1.3440 +  if (result == NULL) {
  1.3441 +    tracer()->report_metaspace_allocation_failure(loader_data, word_size, type, mdtype);
  1.3442 +
  1.3443 +    // Allocation failed.
  1.3444 +    if (is_init_completed()) {
  1.3445 +      // Only start a GC if the bootstrapping has completed.
  1.3446 +
  1.3447 +      // Try to clean out some memory and retry.
  1.3448 +      result = Universe::heap()->collector_policy()->satisfy_failed_metadata_allocation(
  1.3449 +          loader_data, word_size, mdtype);
  1.3450 +    }
  1.3451 +  }
  1.3452 +
  1.3453 +  if (result == NULL) {
  1.3454 +    report_metadata_oome(loader_data, word_size, type, mdtype, CHECK_NULL);
  1.3455 +  }
  1.3456 +
  1.3457 +  // Zero initialize.
  1.3458 +  Copy::fill_to_aligned_words((HeapWord*)result, word_size, 0);
  1.3459 +
  1.3460 +  return result;
  1.3461 +}
  1.3462 +
  1.3463 +size_t Metaspace::class_chunk_size(size_t word_size) {
  1.3464 +  assert(using_class_space(), "Has to use class space");
  1.3465 +  return class_vsm()->calc_chunk_size(word_size);
  1.3466 +}
  1.3467 +
  1.3468 +void Metaspace::report_metadata_oome(ClassLoaderData* loader_data, size_t word_size, MetaspaceObj::Type type, MetadataType mdtype, TRAPS) {
  1.3469 +  tracer()->report_metadata_oom(loader_data, word_size, type, mdtype);
  1.3470 +
  1.3471 +  // If result is still null, we are out of memory.
  1.3472 +  if (Verbose && TraceMetadataChunkAllocation) {
  1.3473 +    gclog_or_tty->print_cr("Metaspace allocation failed for size "
  1.3474 +        SIZE_FORMAT, word_size);
  1.3475 +    if (loader_data->metaspace_or_null() != NULL) {
  1.3476 +      loader_data->dump(gclog_or_tty);
  1.3477 +    }
  1.3478 +    MetaspaceAux::dump(gclog_or_tty);
  1.3479 +  }
  1.3480 +
  1.3481 +  bool out_of_compressed_class_space = false;
  1.3482 +  if (is_class_space_allocation(mdtype)) {
  1.3483 +    Metaspace* metaspace = loader_data->metaspace_non_null();
  1.3484 +    out_of_compressed_class_space =
  1.3485 +      MetaspaceAux::committed_bytes(Metaspace::ClassType) +
  1.3486 +      (metaspace->class_chunk_size(word_size) * BytesPerWord) >
  1.3487 +      CompressedClassSpaceSize;
  1.3488 +  }
  1.3489 +
  1.3490 +  // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
  1.3491 +  const char* space_string = out_of_compressed_class_space ?
  1.3492 +    "Compressed class space" : "Metaspace";
  1.3493 +
  1.3494 +  report_java_out_of_memory(space_string);
  1.3495 +
  1.3496 +  if (JvmtiExport::should_post_resource_exhausted()) {
  1.3497 +    JvmtiExport::post_resource_exhausted(
  1.3498 +        JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR,
  1.3499 +        space_string);
  1.3500 +  }
  1.3501 +
  1.3502 +  if (!is_init_completed()) {
  1.3503 +    vm_exit_during_initialization("OutOfMemoryError", space_string);
  1.3504 +  }
  1.3505 +
  1.3506 +  if (out_of_compressed_class_space) {
  1.3507 +    THROW_OOP(Universe::out_of_memory_error_class_metaspace());
  1.3508 +  } else {
  1.3509 +    THROW_OOP(Universe::out_of_memory_error_metaspace());
  1.3510 +  }
  1.3511 +}
  1.3512 +
  1.3513 +const char* Metaspace::metadata_type_name(Metaspace::MetadataType mdtype) {
  1.3514 +  switch (mdtype) {
  1.3515 +    case Metaspace::ClassType: return "Class";
  1.3516 +    case Metaspace::NonClassType: return "Metadata";
  1.3517 +    default:
  1.3518 +      assert(false, err_msg("Got bad mdtype: %d", (int) mdtype));
  1.3519 +      return NULL;
  1.3520 +  }
  1.3521 +}
  1.3522 +
  1.3523 +void Metaspace::record_allocation(void* ptr, MetaspaceObj::Type type, size_t word_size) {
  1.3524 +  assert(DumpSharedSpaces, "sanity");
  1.3525 +
  1.3526 +  AllocRecord *rec = new AllocRecord((address)ptr, type, (int)word_size * HeapWordSize);
  1.3527 +  if (_alloc_record_head == NULL) {
  1.3528 +    _alloc_record_head = _alloc_record_tail = rec;
  1.3529 +  } else {
  1.3530 +    _alloc_record_tail->_next = rec;
  1.3531 +    _alloc_record_tail = rec;
  1.3532 +  }
  1.3533 +}
  1.3534 +
  1.3535 +void Metaspace::iterate(Metaspace::AllocRecordClosure *closure) {
  1.3536 +  assert(DumpSharedSpaces, "unimplemented for !DumpSharedSpaces");
  1.3537 +
  1.3538 +  address last_addr = (address)bottom();
  1.3539 +
  1.3540 +  for (AllocRecord *rec = _alloc_record_head; rec; rec = rec->_next) {
  1.3541 +    address ptr = rec->_ptr;
  1.3542 +    if (last_addr < ptr) {
  1.3543 +      closure->doit(last_addr, MetaspaceObj::UnknownType, ptr - last_addr);
  1.3544 +    }
  1.3545 +    closure->doit(ptr, rec->_type, rec->_byte_size);
  1.3546 +    last_addr = ptr + rec->_byte_size;
  1.3547 +  }
  1.3548 +
  1.3549 +  address top = ((address)bottom()) + used_bytes_slow(Metaspace::NonClassType);
  1.3550 +  if (last_addr < top) {
  1.3551 +    closure->doit(last_addr, MetaspaceObj::UnknownType, top - last_addr);
  1.3552 +  }
  1.3553 +}
  1.3554 +
  1.3555 +void Metaspace::purge(MetadataType mdtype) {
  1.3556 +  get_space_list(mdtype)->purge(get_chunk_manager(mdtype));
  1.3557 +}
  1.3558 +
  1.3559 +void Metaspace::purge() {
  1.3560 +  MutexLockerEx cl(SpaceManager::expand_lock(),
  1.3561 +                   Mutex::_no_safepoint_check_flag);
  1.3562 +  purge(NonClassType);
  1.3563 +  if (using_class_space()) {
  1.3564 +    purge(ClassType);
  1.3565 +  }
  1.3566 +}
  1.3567 +
  1.3568 +void Metaspace::print_on(outputStream* out) const {
  1.3569 +  // Print both class virtual space counts and metaspace.
  1.3570 +  if (Verbose) {
  1.3571 +    vsm()->print_on(out);
  1.3572 +    if (using_class_space()) {
  1.3573 +      class_vsm()->print_on(out);
  1.3574 +    }
  1.3575 +  }
  1.3576 +}
  1.3577 +
  1.3578 +bool Metaspace::contains(const void* ptr) {
  1.3579 +  if (UseSharedSpaces && MetaspaceShared::is_in_shared_space(ptr)) {
  1.3580 +    return true;
  1.3581 +  }
  1.3582 +
  1.3583 +  if (using_class_space() && get_space_list(ClassType)->contains(ptr)) {
  1.3584 +     return true;
  1.3585 +  }
  1.3586 +
  1.3587 +  return get_space_list(NonClassType)->contains(ptr);
  1.3588 +}
  1.3589 +
  1.3590 +void Metaspace::verify() {
  1.3591 +  vsm()->verify();
  1.3592 +  if (using_class_space()) {
  1.3593 +    class_vsm()->verify();
  1.3594 +  }
  1.3595 +}
  1.3596 +
  1.3597 +void Metaspace::dump(outputStream* const out) const {
  1.3598 +  out->print_cr("\nVirtual space manager: " INTPTR_FORMAT, vsm());
  1.3599 +  vsm()->dump(out);
  1.3600 +  if (using_class_space()) {
  1.3601 +    out->print_cr("\nClass space manager: " INTPTR_FORMAT, class_vsm());
  1.3602 +    class_vsm()->dump(out);
  1.3603 +  }
  1.3604 +}
  1.3605 +
  1.3606 +/////////////// Unit tests ///////////////
  1.3607 +
  1.3608 +#ifndef PRODUCT
  1.3609 +
  1.3610 +class TestMetaspaceAuxTest : AllStatic {
  1.3611 + public:
  1.3612 +  static void test_reserved() {
  1.3613 +    size_t reserved = MetaspaceAux::reserved_bytes();
  1.3614 +
  1.3615 +    assert(reserved > 0, "assert");
  1.3616 +
  1.3617 +    size_t committed  = MetaspaceAux::committed_bytes();
  1.3618 +    assert(committed <= reserved, "assert");
  1.3619 +
  1.3620 +    size_t reserved_metadata = MetaspaceAux::reserved_bytes(Metaspace::NonClassType);
  1.3621 +    assert(reserved_metadata > 0, "assert");
  1.3622 +    assert(reserved_metadata <= reserved, "assert");
  1.3623 +
  1.3624 +    if (UseCompressedClassPointers) {
  1.3625 +      size_t reserved_class    = MetaspaceAux::reserved_bytes(Metaspace::ClassType);
  1.3626 +      assert(reserved_class > 0, "assert");
  1.3627 +      assert(reserved_class < reserved, "assert");
  1.3628 +    }
  1.3629 +  }
  1.3630 +
  1.3631 +  static void test_committed() {
  1.3632 +    size_t committed = MetaspaceAux::committed_bytes();
  1.3633 +
  1.3634 +    assert(committed > 0, "assert");
  1.3635 +
  1.3636 +    size_t reserved  = MetaspaceAux::reserved_bytes();
  1.3637 +    assert(committed <= reserved, "assert");
  1.3638 +
  1.3639 +    size_t committed_metadata = MetaspaceAux::committed_bytes(Metaspace::NonClassType);
  1.3640 +    assert(committed_metadata > 0, "assert");
  1.3641 +    assert(committed_metadata <= committed, "assert");
  1.3642 +
  1.3643 +    if (UseCompressedClassPointers) {
  1.3644 +      size_t committed_class    = MetaspaceAux::committed_bytes(Metaspace::ClassType);
  1.3645 +      assert(committed_class > 0, "assert");
  1.3646 +      assert(committed_class < committed, "assert");
  1.3647 +    }
  1.3648 +  }
  1.3649 +
  1.3650 +  static void test_virtual_space_list_large_chunk() {
  1.3651 +    VirtualSpaceList* vs_list = new VirtualSpaceList(os::vm_allocation_granularity());
  1.3652 +    MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
  1.3653 +    // A size larger than VirtualSpaceSize (256k) and add one page to make it _not_ be
  1.3654 +    // vm_allocation_granularity aligned on Windows.
  1.3655 +    size_t large_size = (size_t)(2*256*K + (os::vm_page_size()/BytesPerWord));
  1.3656 +    large_size += (os::vm_page_size()/BytesPerWord);
  1.3657 +    vs_list->get_new_chunk(large_size, large_size, 0);
  1.3658 +  }
  1.3659 +
  1.3660 +  static void test() {
  1.3661 +    test_reserved();
  1.3662 +    test_committed();
  1.3663 +    test_virtual_space_list_large_chunk();
  1.3664 +  }
  1.3665 +};
  1.3666 +
  1.3667 +void TestMetaspaceAux_test() {
  1.3668 +  TestMetaspaceAuxTest::test();
  1.3669 +}
  1.3670 +
  1.3671 +class TestVirtualSpaceNodeTest {
  1.3672 +  static void chunk_up(size_t words_left, size_t& num_medium_chunks,
  1.3673 +                                          size_t& num_small_chunks,
  1.3674 +                                          size_t& num_specialized_chunks) {
  1.3675 +    num_medium_chunks = words_left / MediumChunk;
  1.3676 +    words_left = words_left % MediumChunk;
  1.3677 +
  1.3678 +    num_small_chunks = words_left / SmallChunk;
  1.3679 +    words_left = words_left % SmallChunk;
  1.3680 +    // how many specialized chunks can we get?
  1.3681 +    num_specialized_chunks = words_left / SpecializedChunk;
  1.3682 +    assert(words_left % SpecializedChunk == 0, "should be nothing left");
  1.3683 +  }
  1.3684 +
  1.3685 + public:
  1.3686 +  static void test() {
  1.3687 +    MutexLockerEx ml(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
  1.3688 +    const size_t vsn_test_size_words = MediumChunk  * 4;
  1.3689 +    const size_t vsn_test_size_bytes = vsn_test_size_words * BytesPerWord;
  1.3690 +
  1.3691 +    // The chunk sizes must be multiples of eachother, or this will fail
  1.3692 +    STATIC_ASSERT(MediumChunk % SmallChunk == 0);
  1.3693 +    STATIC_ASSERT(SmallChunk % SpecializedChunk == 0);
  1.3694 +
  1.3695 +    { // No committed memory in VSN
  1.3696 +      ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk);
  1.3697 +      VirtualSpaceNode vsn(vsn_test_size_bytes);
  1.3698 +      vsn.initialize();
  1.3699 +      vsn.retire(&cm);
  1.3700 +      assert(cm.sum_free_chunks_count() == 0, "did not commit any memory in the VSN");
  1.3701 +    }
  1.3702 +
  1.3703 +    { // All of VSN is committed, half is used by chunks
  1.3704 +      ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk);
  1.3705 +      VirtualSpaceNode vsn(vsn_test_size_bytes);
  1.3706 +      vsn.initialize();
  1.3707 +      vsn.expand_by(vsn_test_size_words, vsn_test_size_words);
  1.3708 +      vsn.get_chunk_vs(MediumChunk);
  1.3709 +      vsn.get_chunk_vs(MediumChunk);
  1.3710 +      vsn.retire(&cm);
  1.3711 +      assert(cm.sum_free_chunks_count() == 2, "should have been memory left for 2 medium chunks");
  1.3712 +      assert(cm.sum_free_chunks() == 2*MediumChunk, "sizes should add up");
  1.3713 +    }
  1.3714 +
  1.3715 +    { // 4 pages of VSN is committed, some is used by chunks
  1.3716 +      ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk);
  1.3717 +      VirtualSpaceNode vsn(vsn_test_size_bytes);
  1.3718 +      const size_t page_chunks = 4 * (size_t)os::vm_page_size() / BytesPerWord;
  1.3719 +      assert(page_chunks < MediumChunk, "Test expects medium chunks to be at least 4*page_size");
  1.3720 +      vsn.initialize();
  1.3721 +      vsn.expand_by(page_chunks, page_chunks);
  1.3722 +      vsn.get_chunk_vs(SmallChunk);
  1.3723 +      vsn.get_chunk_vs(SpecializedChunk);
  1.3724 +      vsn.retire(&cm);
  1.3725 +
  1.3726 +      // committed - used = words left to retire
  1.3727 +      const size_t words_left = page_chunks - SmallChunk - SpecializedChunk;
  1.3728 +
  1.3729 +      size_t num_medium_chunks, num_small_chunks, num_spec_chunks;
  1.3730 +      chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks);
  1.3731 +
  1.3732 +      assert(num_medium_chunks == 0, "should not get any medium chunks");
  1.3733 +      assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks");
  1.3734 +      assert(cm.sum_free_chunks() == words_left, "sizes should add up");
  1.3735 +    }
  1.3736 +
  1.3737 +    { // Half of VSN is committed, a humongous chunk is used
  1.3738 +      ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk);
  1.3739 +      VirtualSpaceNode vsn(vsn_test_size_bytes);
  1.3740 +      vsn.initialize();
  1.3741 +      vsn.expand_by(MediumChunk * 2, MediumChunk * 2);
  1.3742 +      vsn.get_chunk_vs(MediumChunk + SpecializedChunk); // Humongous chunks will be aligned up to MediumChunk + SpecializedChunk
  1.3743 +      vsn.retire(&cm);
  1.3744 +
  1.3745 +      const size_t words_left = MediumChunk * 2 - (MediumChunk + SpecializedChunk);
  1.3746 +      size_t num_medium_chunks, num_small_chunks, num_spec_chunks;
  1.3747 +      chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks);
  1.3748 +
  1.3749 +      assert(num_medium_chunks == 0, "should not get any medium chunks");
  1.3750 +      assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks");
  1.3751 +      assert(cm.sum_free_chunks() == words_left, "sizes should add up");
  1.3752 +    }
  1.3753 +
  1.3754 +  }
  1.3755 +
  1.3756 +#define assert_is_available_positive(word_size) \
  1.3757 +  assert(vsn.is_available(word_size), \
  1.3758 +    err_msg(#word_size ": " PTR_FORMAT " bytes were not available in " \
  1.3759 +            "VirtualSpaceNode [" PTR_FORMAT ", " PTR_FORMAT ")", \
  1.3760 +            (uintptr_t)(word_size * BytesPerWord), vsn.bottom(), vsn.end()));
  1.3761 +
  1.3762 +#define assert_is_available_negative(word_size) \
  1.3763 +  assert(!vsn.is_available(word_size), \
  1.3764 +    err_msg(#word_size ": " PTR_FORMAT " bytes should not be available in " \
  1.3765 +            "VirtualSpaceNode [" PTR_FORMAT ", " PTR_FORMAT ")", \
  1.3766 +            (uintptr_t)(word_size * BytesPerWord), vsn.bottom(), vsn.end()));
  1.3767 +
  1.3768 +  static void test_is_available_positive() {
  1.3769 +    // Reserve some memory.
  1.3770 +    VirtualSpaceNode vsn(os::vm_allocation_granularity());
  1.3771 +    assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
  1.3772 +
  1.3773 +    // Commit some memory.
  1.3774 +    size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
  1.3775 +    bool expanded = vsn.expand_by(commit_word_size, commit_word_size);
  1.3776 +    assert(expanded, "Failed to commit");
  1.3777 +
  1.3778 +    // Check that is_available accepts the committed size.
  1.3779 +    assert_is_available_positive(commit_word_size);
  1.3780 +
  1.3781 +    // Check that is_available accepts half the committed size.
  1.3782 +    size_t expand_word_size = commit_word_size / 2;
  1.3783 +    assert_is_available_positive(expand_word_size);
  1.3784 +  }
  1.3785 +
  1.3786 +  static void test_is_available_negative() {
  1.3787 +    // Reserve some memory.
  1.3788 +    VirtualSpaceNode vsn(os::vm_allocation_granularity());
  1.3789 +    assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
  1.3790 +
  1.3791 +    // Commit some memory.
  1.3792 +    size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
  1.3793 +    bool expanded = vsn.expand_by(commit_word_size, commit_word_size);
  1.3794 +    assert(expanded, "Failed to commit");
  1.3795 +
  1.3796 +    // Check that is_available doesn't accept a too large size.
  1.3797 +    size_t two_times_commit_word_size = commit_word_size * 2;
  1.3798 +    assert_is_available_negative(two_times_commit_word_size);
  1.3799 +  }
  1.3800 +
  1.3801 +  static void test_is_available_overflow() {
  1.3802 +    // Reserve some memory.
  1.3803 +    VirtualSpaceNode vsn(os::vm_allocation_granularity());
  1.3804 +    assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
  1.3805 +
  1.3806 +    // Commit some memory.
  1.3807 +    size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
  1.3808 +    bool expanded = vsn.expand_by(commit_word_size, commit_word_size);
  1.3809 +    assert(expanded, "Failed to commit");
  1.3810 +
  1.3811 +    // Calculate a size that will overflow the virtual space size.
  1.3812 +    void* virtual_space_max = (void*)(uintptr_t)-1;
  1.3813 +    size_t bottom_to_max = pointer_delta(virtual_space_max, vsn.bottom(), 1);
  1.3814 +    size_t overflow_size = bottom_to_max + BytesPerWord;
  1.3815 +    size_t overflow_word_size = overflow_size / BytesPerWord;
  1.3816 +
  1.3817 +    // Check that is_available can handle the overflow.
  1.3818 +    assert_is_available_negative(overflow_word_size);
  1.3819 +  }
  1.3820 +
  1.3821 +  static void test_is_available() {
  1.3822 +    TestVirtualSpaceNodeTest::test_is_available_positive();
  1.3823 +    TestVirtualSpaceNodeTest::test_is_available_negative();
  1.3824 +    TestVirtualSpaceNodeTest::test_is_available_overflow();
  1.3825 +  }
  1.3826 +};
  1.3827 +
  1.3828 +void TestVirtualSpaceNode_test() {
  1.3829 +  TestVirtualSpaceNodeTest::test();
  1.3830 +  TestVirtualSpaceNodeTest::test_is_available();
  1.3831 +}
  1.3832 +#endif

mercurial