src/share/vm/memory/metaspace.cpp

changeset 4037
da91efe96a93
child 4038
03049e0e8544
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/src/share/vm/memory/metaspace.cpp	Sat Sep 01 13:25:18 2012 -0400
     1.3 @@ -0,0 +1,2999 @@
     1.4 +/*
     1.5 + * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
     1.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     1.7 + *
     1.8 + * This code is free software; you can redistribute it and/or modify it
     1.9 + * under the terms of the GNU General Public License version 2 only, as
    1.10 + * published by the Free Software Foundation.
    1.11 + *
    1.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
    1.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    1.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    1.15 + * version 2 for more details (a copy is included in the LICENSE file that
    1.16 + * accompanied this code).
    1.17 + *
    1.18 + * You should have received a copy of the GNU General Public License version
    1.19 + * 2 along with this work; if not, write to the Free Software Foundation,
    1.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    1.21 + *
    1.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    1.23 + * or visit www.oracle.com if you need additional information or have any
    1.24 + * questions.
    1.25 + *
    1.26 + */
    1.27 +#include "precompiled.hpp"
    1.28 +#include "gc_interface/collectedHeap.hpp"
    1.29 +#include "memory/binaryTreeDictionary.hpp"
    1.30 +#include "memory/collectorPolicy.hpp"
    1.31 +#include "memory/filemap.hpp"
    1.32 +#include "memory/freeList.hpp"
    1.33 +#include "memory/metaspace.hpp"
    1.34 +#include "memory/metaspaceShared.hpp"
    1.35 +#include "memory/resourceArea.hpp"
    1.36 +#include "memory/universe.hpp"
    1.37 +#include "runtime/globals.hpp"
    1.38 +#include "runtime/mutex.hpp"
    1.39 +#include "services/memTracker.hpp"
    1.40 +#include "utilities/copy.hpp"
    1.41 +#include "utilities/debug.hpp"
    1.42 +
    1.43 +// Define this macro to deallocate Metablock.  If not defined,
    1.44 +// blocks are not yet deallocated and are only mangled.
    1.45 +#undef DEALLOCATE_BLOCKS
    1.46 +
    1.47 +// Easily recognizable patterns
    1.48 +// These patterns can be the same in 32bit or 64bit since
    1.49 +// they only have to be easily recognizable.
    1.50 +const void* metaspace_allocation_leader = (void*) 0X11111111;
    1.51 +const void* metaspace_allocation_trailer = (void*) 0X77777777;
    1.52 +
    1.53 +// Parameters for stress mode testing
    1.54 +const uint metadata_deallocate_a_lot_block = 10;
    1.55 +const uint metadata_deallocate_a_lock_chunk = 3;
    1.56 +size_t const allocation_from_dictionary_limit = 64 * K;
    1.57 +const size_t metadata_chunk_initialize = 0xf7f7f7f7;
    1.58 +const size_t metadata_deallocate = 0xf5f5f5f5;
    1.59 +const size_t metadata_space_manager_allocate = 0xf3f3f3f3;
    1.60 +
    1.61 +MetaWord* last_allocated = 0;
    1.62 +
    1.63 +// Used in declarations in SpaceManager and ChunkManager
    1.64 +enum ChunkIndex {
    1.65 +  SmallIndex = 0,
    1.66 +  MediumIndex = 1,
    1.67 +  HumongousIndex = 2,
    1.68 +  NumberOfFreeLists = 3
    1.69 +};
    1.70 +
    1.71 +static ChunkIndex next_chunk_index(ChunkIndex i) {
    1.72 +  assert(i < NumberOfFreeLists, "Out of bound");
    1.73 +  return (ChunkIndex) (i+1);
    1.74 +}
    1.75 +
    1.76 +// Originally _capacity_until_GC was set to MetaspaceSize here but
    1.77 +// the default MetaspaceSize before argument processing was being
    1.78 +// used which was not the desired value.  See the code
    1.79 +// in should_expand() to see how the initialization is handled
    1.80 +// now.
    1.81 +size_t MetaspaceGC::_capacity_until_GC = 0;
    1.82 +bool MetaspaceGC::_expand_after_GC = false;
    1.83 +uint MetaspaceGC::_shrink_factor = 0;
    1.84 +bool MetaspaceGC::_should_concurrent_collect = false;
    1.85 +
    1.86 +// Blocks of space for metadata are allocated out of Metachunks.
    1.87 +//
    1.88 +// Metachunk are allocated out of MetadataVirtualspaces and once
    1.89 +// allocated there is no explicit link between a Metachunk and
    1.90 +// the MetadataVirtualspaces from which it was allocated.
    1.91 +//
    1.92 +// Each SpaceManager maintains a
    1.93 +// list of the chunks it is using and the current chunk.  The current
    1.94 +// chunk is the chunk from which allocations are done.  Space freed in
    1.95 +// a chunk is placed on the free list of blocks (BlockFreelist) and
    1.96 +// reused from there.
    1.97 +//
    1.98 +// Future modification
    1.99 +//
   1.100 +// The Metachunk can conceivable be replaced by the Chunk in
   1.101 +// allocation.hpp.  Note that the latter Chunk is the space for
   1.102 +// allocation (allocations from the chunk are out of the space in
   1.103 +// the Chunk after the header for the Chunk) where as Metachunks
   1.104 +// point to space in a VirtualSpace.  To replace Metachunks with
   1.105 +// Chunks, change Chunks so that they can be allocated out of a VirtualSpace.
   1.106 +//
   1.107 +
   1.108 +// Metablock are the unit of allocation from a Chunk.  It contains
   1.109 +// the size of the requested allocation in a debug build.
   1.110 +// Also in a debug build it has a marker before and after the
   1.111 +// body of the block. The address of the body is the address returned
   1.112 +// by the allocation.
   1.113 +//
   1.114 +// Layout in a debug build.  In a product build only the body is present.
   1.115 +//
   1.116 +//     +-----------+-----------+------------+     +-----------+
   1.117 +//     | word size | leader    | body       | ... | trailer   |
   1.118 +//     +-----------+-----------+------------+     +-----------+
   1.119 +//
   1.120 +// A Metablock may be reused by its SpaceManager but are never moved between
   1.121 +// SpaceManagers.  There is no explicit link to the Metachunk
   1.122 +// from which it was allocated.  Metablock are not deallocated, rather
   1.123 +// the Metachunk it is a part of will be deallocated when it's
   1.124 +// associated class loader is collected.
   1.125 +//
   1.126 +// When the word size of a block is passed in to the deallocation
   1.127 +// call the word size no longer needs to be part of a Metablock.
   1.128 +
   1.129 +class Metablock {
   1.130 +  friend class VMStructs;
   1.131 + private:
   1.132 +  // Used to align the allocation (see below) and for debugging.
   1.133 +#ifdef ASSERT
   1.134 +  struct {
   1.135 +    size_t _word_size;
   1.136 +    void*  _leader;
   1.137 +  } _header;
   1.138 +  void* _data[1];
   1.139 +#endif
   1.140 +  static size_t _overhead;
   1.141 +
   1.142 +#ifdef ASSERT
   1.143 +  void set_word_size(size_t v) { _header._word_size = v; }
   1.144 +  void* leader() { return _header._leader; }
   1.145 +  void* trailer() {
   1.146 +    jlong index = (jlong) _header._word_size - sizeof(_header)/BytesPerWord - 1;
   1.147 +    assert(index > 0, err_msg("Bad indexling of trailer %d", index));
   1.148 +    void** ptr = &_data[index];
   1.149 +    return *ptr;
   1.150 +  }
   1.151 +  void set_leader(void* v) { _header._leader = v; }
   1.152 +  void set_trailer(void* v) {
   1.153 +    void** ptr = &_data[_header._word_size - sizeof(_header)/BytesPerWord - 1];
   1.154 +    *ptr = v;
   1.155 +  }
   1.156 + public:
   1.157 +  size_t word_size() { return _header._word_size; }
   1.158 +#endif
   1.159 + public:
   1.160 +
   1.161 +  static Metablock* initialize(MetaWord* p, size_t word_size);
   1.162 +
   1.163 +  // This places the body of the block at a 2 word boundary
   1.164 +  // because every block starts on a 2 word boundary.  Work out
   1.165 +  // how to make the body on a 2 word boundary if the block
   1.166 +  // starts on a arbitrary boundary.  JJJ
   1.167 +
   1.168 +#ifdef ASSERT
   1.169 +  MetaWord* data() { return (MetaWord*) &_data[0]; }
   1.170 +#else
   1.171 +  MetaWord* data() { return (MetaWord*) this; }
   1.172 +#endif
   1.173 +  static Metablock* metablock_from_data(MetaWord* p) {
   1.174 +#ifdef ASSERT
   1.175 +    size_t word_offset = offset_of(Metablock, _data)/BytesPerWord;
   1.176 +    Metablock* result = (Metablock*) (p - word_offset);
   1.177 +    return result;
   1.178 +#else
   1.179 +    return (Metablock*) p;
   1.180 +#endif
   1.181 +  }
   1.182 +
   1.183 +  static size_t overhead() { return _overhead; }
   1.184 +  void verify();
   1.185 +};
   1.186 +
   1.187 +//  Metachunk - Quantum of allocation from a Virtualspace
   1.188 +//    Metachunks are reused (when freed are put on a global freelist) and
   1.189 +//    have no permanent association to a SpaceManager.
   1.190 +
   1.191 +//            +--------------+ <- end
   1.192 +//            |              |          --+       ---+
   1.193 +//            |              |            | free     |
   1.194 +//            |              |            |          |
   1.195 +//            |              |            |          | capacity
   1.196 +//            |              |            |          |
   1.197 +//            |              | <- top   --+          |
   1.198 +//            |              |           ---+        |
   1.199 +//            |              |              | used   |
   1.200 +//            |              |              |        |
   1.201 +//            |              |              |        |
   1.202 +//            +--------------+ <- bottom ---+     ---+
   1.203 +
   1.204 +class Metachunk VALUE_OBJ_CLASS_SPEC {
   1.205 +  // link to support lists of chunks
   1.206 +  Metachunk* _next;
   1.207 +
   1.208 +  MetaWord* _bottom;
   1.209 +  MetaWord* _end;
   1.210 +  MetaWord* _top;
   1.211 +  size_t _word_size;
   1.212 +
   1.213 +  // Metachunks are allocated out of a MetadataVirtualSpace and
   1.214 +  // and use some of its space to describe itself (plus alignment
   1.215 +  // considerations).  Metadata is allocated in the rest of the chunk.
   1.216 +  // This size is the overhead of maintaining the Metachunk within
   1.217 +  // the space.
   1.218 +  static size_t _overhead;
   1.219 +
   1.220 +  void set_bottom(MetaWord* v) { _bottom = v; }
   1.221 +  void set_end(MetaWord* v) { _end = v; }
   1.222 +  void set_top(MetaWord* v) { _top = v; }
   1.223 +  void set_word_size(size_t v) { _word_size = v; }
   1.224 + public:
   1.225 +
   1.226 +  // Used to add a Metachunk to a list of Metachunks
   1.227 +  void set_next(Metachunk* v) { _next = v; assert(v != this, "Boom");}
   1.228 +
   1.229 +  Metablock* allocate(size_t word_size);
   1.230 +  static Metachunk* initialize(MetaWord* ptr, size_t word_size);
   1.231 +
   1.232 +  // Accessors
   1.233 +  Metachunk* next() const { return _next; }
   1.234 +  MetaWord* bottom() const { return _bottom; }
   1.235 +  MetaWord* end() const { return _end; }
   1.236 +  MetaWord* top() const { return _top; }
   1.237 +  size_t word_size() const { return _word_size; }
   1.238 +  static size_t overhead() { return _overhead; }
   1.239 +
   1.240 +  // Reset top to bottom so chunk can be reused.
   1.241 +  void reset_empty() { _top = (_bottom + _overhead); }
   1.242 +  bool is_empty() { return _top == (_bottom + _overhead); }
   1.243 +
   1.244 +  // used (has been allocated)
   1.245 +  // free (available for future allocations)
   1.246 +  // capacity (total size of chunk)
   1.247 +  size_t used_word_size();
   1.248 +  size_t free_word_size();
   1.249 +  size_t capacity_word_size();
   1.250 +
   1.251 +#ifdef ASSERT
   1.252 +  void mangle() {
   1.253 +    // Mangle the payload of the chunk and not the links that
   1.254 +    // maintain list of chunks.
   1.255 +    HeapWord* start = (HeapWord*)(bottom() + overhead());
   1.256 +    size_t word_size = capacity_word_size() - overhead();
   1.257 +    Copy::fill_to_words(start, word_size, metadata_chunk_initialize);
   1.258 +  }
   1.259 +#endif // ASSERT
   1.260 +
   1.261 +  void print_on(outputStream* st) const;
   1.262 +  void verify();
   1.263 +};
   1.264 +
   1.265 +
   1.266 +// Pointer to list of Metachunks.
   1.267 +class ChunkList VALUE_OBJ_CLASS_SPEC {
   1.268 +  // List of free chunks
   1.269 +  Metachunk* _head;
   1.270 +
   1.271 + public:
   1.272 +  // Constructor
   1.273 +  ChunkList() : _head(NULL) {}
   1.274 +
   1.275 +  // Accessors
   1.276 +  Metachunk* head() { return _head; }
   1.277 +  void set_head(Metachunk* v) { _head = v; }
   1.278 +
   1.279 +  // Link at head of the list
   1.280 +  void add_at_head(Metachunk* head, Metachunk* tail);
   1.281 +  void add_at_head(Metachunk* head);
   1.282 +
   1.283 +  size_t sum_list_size();
   1.284 +  size_t sum_list_count();
   1.285 +  size_t sum_list_capacity();
   1.286 +};
   1.287 +
   1.288 +// Manages the global free lists of chunks.
   1.289 +// Has three lists of free chunks, and a total size and
   1.290 +// count that includes all three
   1.291 +
   1.292 +class ChunkManager VALUE_OBJ_CLASS_SPEC {
   1.293 +
   1.294 +  // Free list of chunks of different sizes.
   1.295 +  //   SmallChunk
   1.296 +  //   MediumChunk
   1.297 +  //   HumongousChunk
   1.298 +  ChunkList _free_chunks[3];
   1.299 +
   1.300 +  // ChunkManager in all lists of this type
   1.301 +  size_t _free_chunks_total;
   1.302 +  size_t _free_chunks_count;
   1.303 +
   1.304 +  void dec_free_chunks_total(size_t v) {
   1.305 +    assert(_free_chunks_count > 0 &&
   1.306 +             _free_chunks_total > 0,
   1.307 +             "About to go negative");
   1.308 +    Atomic::add_ptr(-1, &_free_chunks_count);
   1.309 +    jlong minus_v = (jlong) - (jlong) v;
   1.310 +    Atomic::add_ptr(minus_v, &_free_chunks_total);
   1.311 +  }
   1.312 +
   1.313 +  // Debug support
   1.314 +
   1.315 +  size_t sum_free_chunks();
   1.316 +  size_t sum_free_chunks_count();
   1.317 +
   1.318 +  void locked_verify_free_chunks_total();
   1.319 +  void locked_verify_free_chunks_count();
   1.320 +  void verify_free_chunks_count();
   1.321 +
   1.322 + public:
   1.323 +
   1.324 +  ChunkManager() : _free_chunks_total(0), _free_chunks_count(0) {}
   1.325 +
   1.326 +  // add or delete (return) a chunk to the global freelist.
   1.327 +  Metachunk* chunk_freelist_allocate(size_t word_size);
   1.328 +  void chunk_freelist_deallocate(Metachunk* chunk);
   1.329 +
   1.330 +  // Total of the space in the free chunks list
   1.331 +  size_t free_chunks_total();
   1.332 +  size_t free_chunks_total_in_bytes();
   1.333 +
   1.334 +  // Number of chunks in the free chunks list
   1.335 +  size_t free_chunks_count();
   1.336 +
   1.337 +  void inc_free_chunks_total(size_t v, size_t count = 1) {
   1.338 +    Atomic::add_ptr(count, &_free_chunks_count);
   1.339 +    Atomic::add_ptr(v, &_free_chunks_total);
   1.340 +  }
   1.341 +  ChunkList* free_medium_chunks() { return &_free_chunks[1]; }
   1.342 +  ChunkList* free_small_chunks() { return &_free_chunks[0]; }
   1.343 +  ChunkList* free_humongous_chunks() { return &_free_chunks[2]; }
   1.344 +
   1.345 +  ChunkList* free_chunks(ChunkIndex index);
   1.346 +
   1.347 +  // Returns the list for the given chunk word size.
   1.348 +  ChunkList* find_free_chunks_list(size_t word_size);
   1.349 +
   1.350 +  // Add and remove from a list by size.  Selects
   1.351 +  // list based on size of chunk.
   1.352 +  void free_chunks_put(Metachunk* chuck);
   1.353 +  Metachunk* free_chunks_get(size_t chunk_word_size);
   1.354 +
   1.355 +  // Debug support
   1.356 +  void verify();
   1.357 +  void locked_verify();
   1.358 +  void verify_free_chunks_total();
   1.359 +
   1.360 +  void locked_print_free_chunks(outputStream* st);
   1.361 +  void locked_print_sum_free_chunks(outputStream* st);
   1.362 +};
   1.363 +
   1.364 +
   1.365 +// Used to manage the free list of Metablocks (a block corresponds
   1.366 +// to the allocation of a quantum of metadata).
   1.367 +class BlockFreelist VALUE_OBJ_CLASS_SPEC {
   1.368 +#ifdef DEALLOCATE_BLOCKS
   1.369 +  BinaryTreeDictionary<Metablock>* _dictionary;
   1.370 +#endif
   1.371 +  static Metablock* initialize_free_chunk(Metablock* block, size_t word_size);
   1.372 +
   1.373 +#ifdef DEALLOCATE_BLOCKS
   1.374 +  // Accessors
   1.375 +  BinaryTreeDictionary<Metablock>* dictionary() const { return _dictionary; }
   1.376 +#endif
   1.377 +
   1.378 + public:
   1.379 +  BlockFreelist();
   1.380 +  ~BlockFreelist();
   1.381 +
   1.382 +  // Get and return a block to the free list
   1.383 +  Metablock* get_block(size_t word_size);
   1.384 +  void return_block(Metablock* block, size_t word_size);
   1.385 +
   1.386 +  size_t totalSize() {
   1.387 +#ifdef DEALLOCATE_BLOCKS
   1.388 +    if (dictionary() == NULL) {
   1.389 +      return 0;
   1.390 +    } else {
   1.391 +      return dictionary()->totalSize();
   1.392 +    }
   1.393 +#else
   1.394 +    return 0;
   1.395 +#endif
   1.396 +  }
   1.397 +
   1.398 +  void print_on(outputStream* st) const;
   1.399 +};
   1.400 +
   1.401 +class VirtualSpaceNode : public CHeapObj<mtClass> {
   1.402 +  friend class VirtualSpaceList;
   1.403 +
   1.404 +  // Link to next VirtualSpaceNode
   1.405 +  VirtualSpaceNode* _next;
   1.406 +
   1.407 +  // total in the VirtualSpace
   1.408 +  MemRegion _reserved;
   1.409 +  ReservedSpace _rs;
   1.410 +  VirtualSpace _virtual_space;
   1.411 +  MetaWord* _top;
   1.412 +
   1.413 +  // Convenience functions for logical bottom and end
   1.414 +  MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); }
   1.415 +  MetaWord* end() const { return (MetaWord*) _virtual_space.high(); }
   1.416 +
   1.417 +  // Convenience functions to access the _virtual_space
   1.418 +  char* low()  const { return virtual_space()->low(); }
   1.419 +  char* high() const { return virtual_space()->high(); }
   1.420 +
   1.421 + public:
   1.422 +
   1.423 +  VirtualSpaceNode(size_t byte_size);
   1.424 +  VirtualSpaceNode(ReservedSpace rs) : _top(NULL), _next(NULL), _rs(rs) {}
   1.425 +  ~VirtualSpaceNode();
   1.426 +
   1.427 +  // address of next available space in _virtual_space;
   1.428 +  // Accessors
   1.429 +  VirtualSpaceNode* next() { return _next; }
   1.430 +  void set_next(VirtualSpaceNode* v) { _next = v; }
   1.431 +
   1.432 +  void set_reserved(MemRegion const v) { _reserved = v; }
   1.433 +  void set_top(MetaWord* v) { _top = v; }
   1.434 +
   1.435 +  // Accessors
   1.436 +  MemRegion* reserved() { return &_reserved; }
   1.437 +  VirtualSpace* virtual_space() const { return (VirtualSpace*) &_virtual_space; }
   1.438 +
   1.439 +  // Returns true if "word_size" is available in the virtual space
   1.440 +  bool is_available(size_t word_size) { return _top + word_size <= end(); }
   1.441 +
   1.442 +  MetaWord* top() const { return _top; }
   1.443 +  void inc_top(size_t word_size) { _top += word_size; }
   1.444 +
   1.445 +  // used and capacity in this single entry in the list
   1.446 +  size_t used_words_in_vs() const;
   1.447 +  size_t capacity_words_in_vs() const;
   1.448 +
   1.449 +  bool initialize();
   1.450 +
   1.451 +  // get space from the virtual space
   1.452 +  Metachunk* take_from_committed(size_t chunk_word_size);
   1.453 +
   1.454 +  // Allocate a chunk from the virtual space and return it.
   1.455 +  Metachunk* get_chunk_vs(size_t chunk_word_size);
   1.456 +  Metachunk* get_chunk_vs_with_expand(size_t chunk_word_size);
   1.457 +
   1.458 +  // Expands/shrinks the committed space in a virtual space.  Delegates
   1.459 +  // to Virtualspace
   1.460 +  bool expand_by(size_t words, bool pre_touch = false);
   1.461 +  bool shrink_by(size_t words);
   1.462 +
   1.463 +  // Debug support
   1.464 +  static void verify_virtual_space_total();
   1.465 +  static void verify_virtual_space_count();
   1.466 +  void mangle();
   1.467 +
   1.468 +  void print_on(outputStream* st) const;
   1.469 +};
   1.470 +
   1.471 +  // byte_size is the size of the associated virtualspace.
   1.472 +VirtualSpaceNode::VirtualSpaceNode(size_t byte_size) : _top(NULL), _next(NULL), _rs(0) {
   1.473 +  // This allocates memory with mmap.  For DumpSharedspaces, allocate the
   1.474 +  // space at low memory so that other shared images don't conflict.
   1.475 +  // This is the same address as memory needed for UseCompressedOops but
   1.476 +  // compressed oops don't work with CDS (offsets in metadata are wrong), so
   1.477 +  // borrow the same address.
   1.478 +  if (DumpSharedSpaces) {
   1.479 +    char* shared_base = (char*)HeapBaseMinAddress;
   1.480 +    _rs = ReservedSpace(byte_size, 0, false, shared_base, 0);
   1.481 +    if (_rs.is_reserved()) {
   1.482 +      assert(_rs.base() == shared_base, "should match");
   1.483 +    } else {
   1.484 +      // If we are dumping the heap, then allocate a wasted block of address
   1.485 +      // space in order to push the heap to a lower address.  This extra
   1.486 +      // address range allows for other (or larger) libraries to be loaded
   1.487 +      // without them occupying the space required for the shared spaces.
   1.488 +      uintx reserved = 0;
   1.489 +      uintx block_size = 64*1024*1024;
   1.490 +      while (reserved < SharedDummyBlockSize) {
   1.491 +        char* dummy = os::reserve_memory(block_size);
   1.492 +        reserved += block_size;
   1.493 +      }
   1.494 +      _rs = ReservedSpace(byte_size);
   1.495 +    }
   1.496 +    MetaspaceShared::set_shared_rs(&_rs);
   1.497 +  } else {
   1.498 +    _rs = ReservedSpace(byte_size);
   1.499 +  }
   1.500 +
   1.501 +  MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass);
   1.502 +}
   1.503 +
   1.504 +// List of VirtualSpaces for metadata allocation.
   1.505 +// It has a  _next link for singly linked list and a MemRegion
   1.506 +// for total space in the VirtualSpace.
   1.507 +class VirtualSpaceList : public CHeapObj<mtClass> {
   1.508 +  friend class VirtualSpaceNode;
   1.509 +
   1.510 +  enum VirtualSpaceSizes {
   1.511 +    VirtualSpaceSize = 256 * K
   1.512 +  };
   1.513 +
   1.514 +  // Global list of virtual spaces
   1.515 +  // Head of the list
   1.516 +  VirtualSpaceNode* _virtual_space_list;
   1.517 +  // virtual space currently being used for allocations
   1.518 +  VirtualSpaceNode* _current_virtual_space;
   1.519 +  // Free chunk list for all other metadata
   1.520 +  ChunkManager      _chunk_manager;
   1.521 +
   1.522 +  // Can this virtual list allocate >1 spaces?  Also, used to determine
   1.523 +  // whether to allocate unlimited small chunks in this virtual space
   1.524 +  bool _is_class;
   1.525 +  bool can_grow() const { return !is_class() || !UseCompressedKlassPointers; }
   1.526 +
   1.527 +  // Sum of space in all virtual spaces and number of virtual spaces
   1.528 +  size_t _virtual_space_total;
   1.529 +  size_t _virtual_space_count;
   1.530 +
   1.531 +  ~VirtualSpaceList();
   1.532 +
   1.533 +  VirtualSpaceNode* virtual_space_list() const { return _virtual_space_list; }
   1.534 +
   1.535 +  void set_virtual_space_list(VirtualSpaceNode* v) {
   1.536 +    _virtual_space_list = v;
   1.537 +  }
   1.538 +  void set_current_virtual_space(VirtualSpaceNode* v) {
   1.539 +    _current_virtual_space = v;
   1.540 +  }
   1.541 +
   1.542 +  void link_vs(VirtualSpaceNode* new_entry, size_t vs_word_size);
   1.543 +
   1.544 +  // Get another virtual space and add it to the list.  This
   1.545 +  // is typically prompted by a failed attempt to allocate a chunk
   1.546 +  // and is typically followed by the allocation of a chunk.
   1.547 +  bool grow_vs(size_t vs_word_size);
   1.548 +
   1.549 + public:
   1.550 +  VirtualSpaceList(size_t word_size);
   1.551 +  VirtualSpaceList(ReservedSpace rs);
   1.552 +
   1.553 +  Metachunk* get_new_chunk(size_t word_size, size_t grow_chunks_by_words);
   1.554 +
   1.555 +  VirtualSpaceNode* current_virtual_space() {
   1.556 +    return _current_virtual_space;
   1.557 +  }
   1.558 +
   1.559 +  ChunkManager* chunk_manager() { return &_chunk_manager; }
   1.560 +  bool is_class() const { return _is_class; }
   1.561 +
   1.562 +  // Allocate the first virtualspace.
   1.563 +  void initialize(size_t word_size);
   1.564 +
   1.565 +  size_t virtual_space_total() { return _virtual_space_total; }
   1.566 +  void inc_virtual_space_total(size_t v) {
   1.567 +    Atomic::add_ptr(v, &_virtual_space_total);
   1.568 +  }
   1.569 +
   1.570 +  size_t virtual_space_count() { return _virtual_space_count; }
   1.571 +  void inc_virtual_space_count() {
   1.572 +    Atomic::inc_ptr(&_virtual_space_count);
   1.573 +  }
   1.574 +
   1.575 +  // Used and capacity in the entire list of virtual spaces.
   1.576 +  // These are global values shared by all Metaspaces
   1.577 +  size_t capacity_words_sum();
   1.578 +  size_t capacity_bytes_sum() { return capacity_words_sum() * BytesPerWord; }
   1.579 +  size_t used_words_sum();
   1.580 +  size_t used_bytes_sum() { return used_words_sum() * BytesPerWord; }
   1.581 +
   1.582 +  bool contains(const void *ptr);
   1.583 +
   1.584 +  void print_on(outputStream* st) const;
   1.585 +
   1.586 +  class VirtualSpaceListIterator : public StackObj {
   1.587 +    VirtualSpaceNode* _virtual_spaces;
   1.588 +   public:
   1.589 +    VirtualSpaceListIterator(VirtualSpaceNode* virtual_spaces) :
   1.590 +      _virtual_spaces(virtual_spaces) {}
   1.591 +
   1.592 +    bool repeat() {
   1.593 +      return _virtual_spaces != NULL;
   1.594 +    }
   1.595 +
   1.596 +    VirtualSpaceNode* get_next() {
   1.597 +      VirtualSpaceNode* result = _virtual_spaces;
   1.598 +      if (_virtual_spaces != NULL) {
   1.599 +        _virtual_spaces = _virtual_spaces->next();
   1.600 +      }
   1.601 +      return result;
   1.602 +    }
   1.603 +  };
   1.604 +};
   1.605 +
   1.606 +
   1.607 +class Metadebug : AllStatic {
   1.608 +  // Debugging support for Metaspaces
   1.609 +  static int _deallocate_block_a_lot_count;
   1.610 +  static int _deallocate_chunk_a_lot_count;
   1.611 +  static int _allocation_fail_alot_count;
   1.612 +
   1.613 + public:
   1.614 +  static int deallocate_block_a_lot_count() {
   1.615 +    return _deallocate_block_a_lot_count;
   1.616 +  }
   1.617 +  static void set_deallocate_block_a_lot_count(int v) {
   1.618 +    _deallocate_block_a_lot_count = v;
   1.619 +  }
   1.620 +  static void inc_deallocate_block_a_lot_count() {
   1.621 +    _deallocate_block_a_lot_count++;
   1.622 +  }
   1.623 +  static int deallocate_chunk_a_lot_count() {
   1.624 +    return _deallocate_chunk_a_lot_count;
   1.625 +  }
   1.626 +  static void reset_deallocate_chunk_a_lot_count() {
   1.627 +    _deallocate_chunk_a_lot_count = 1;
   1.628 +  }
   1.629 +  static void inc_deallocate_chunk_a_lot_count() {
   1.630 +    _deallocate_chunk_a_lot_count++;
   1.631 +  }
   1.632 +
   1.633 +  static void init_allocation_fail_alot_count();
   1.634 +#ifdef ASSERT
   1.635 +  static bool test_metadata_failure();
   1.636 +#endif
   1.637 +
   1.638 +  static void deallocate_chunk_a_lot(SpaceManager* sm,
   1.639 +                                     size_t chunk_word_size);
   1.640 +  static void deallocate_block_a_lot(SpaceManager* sm,
   1.641 +                                     size_t chunk_word_size);
   1.642 +
   1.643 +};
   1.644 +
   1.645 +int Metadebug::_deallocate_block_a_lot_count = 0;
   1.646 +int Metadebug::_deallocate_chunk_a_lot_count = 0;
   1.647 +int Metadebug::_allocation_fail_alot_count = 0;
   1.648 +
   1.649 +//  SpaceManager - used by Metaspace to handle allocations
   1.650 +class SpaceManager : public CHeapObj<mtClass> {
   1.651 +  friend class Metaspace;
   1.652 +  friend class Metadebug;
   1.653 +
   1.654 + private:
   1.655 +  // protects allocations and contains.
   1.656 +  Mutex* const _lock;
   1.657 +
   1.658 +  // List of chunks in use by this SpaceManager.  Allocations
   1.659 +  // are done from the current chunk.  The list is used for deallocating
   1.660 +  // chunks when the SpaceManager is freed.
   1.661 +  Metachunk* _chunks_in_use[NumberOfFreeLists];
   1.662 +  Metachunk* _current_chunk;
   1.663 +
   1.664 +  // Virtual space where allocation comes from.
   1.665 +  VirtualSpaceList* _vs_list;
   1.666 +
   1.667 +  // Number of small chunks to allocate to a manager
   1.668 +  // If class space manager, small chunks are unlimited
   1.669 +  static uint const _small_chunk_limit;
   1.670 +  bool has_small_chunk_limit() { return !vs_list()->is_class(); }
   1.671 +
   1.672 +  // Sum of all space in allocated chunks
   1.673 +  size_t _allocation_total;
   1.674 +
   1.675 +  // Free lists of blocks are per SpaceManager since they
   1.676 +  // are assumed to be in chunks in use by the SpaceManager
   1.677 +  // and all chunks in use by a SpaceManager are freed when
   1.678 +  // the class loader using the SpaceManager is collected.
   1.679 +  BlockFreelist _block_freelists;
   1.680 +
   1.681 +  // protects virtualspace and chunk expansions
   1.682 +  static const char*  _expand_lock_name;
   1.683 +  static const int    _expand_lock_rank;
   1.684 +  static Mutex* const _expand_lock;
   1.685 +
   1.686 +  // Accessors
   1.687 +  Metachunk* chunks_in_use(ChunkIndex index) const { return _chunks_in_use[index]; }
   1.688 +  void set_chunks_in_use(ChunkIndex index, Metachunk* v) { _chunks_in_use[index] = v; }
   1.689 +
   1.690 +  BlockFreelist* block_freelists() const {
   1.691 +    return (BlockFreelist*) &_block_freelists;
   1.692 +  }
   1.693 +
   1.694 +  VirtualSpaceList* vs_list() const    { return _vs_list; }
   1.695 +
   1.696 +  Metachunk* current_chunk() const { return _current_chunk; }
   1.697 +  void set_current_chunk(Metachunk* v) {
   1.698 +    _current_chunk = v;
   1.699 +  }
   1.700 +
   1.701 +  Metachunk* find_current_chunk(size_t word_size);
   1.702 +
   1.703 +  // Add chunk to the list of chunks in use
   1.704 +  void add_chunk(Metachunk* v, bool make_current);
   1.705 +
   1.706 +  // Debugging support
   1.707 +  void verify_chunks_in_use_index(ChunkIndex index, Metachunk* v) {
   1.708 +    switch (index) {
   1.709 +    case 0:
   1.710 +      assert(v->word_size() == SmallChunk, "Not a SmallChunk");
   1.711 +      break;
   1.712 +    case 1:
   1.713 +      assert(v->word_size() == MediumChunk, "Not a MediumChunk");
   1.714 +      break;
   1.715 +    case 2:
   1.716 +      assert(v->word_size() > MediumChunk, "Not a HumongousChunk");
   1.717 +      break;
   1.718 +    default:
   1.719 +      assert(false, "Wrong list.");
   1.720 +    }
   1.721 +  }
   1.722 +
   1.723 + protected:
   1.724 +  Mutex* lock() const { return _lock; }
   1.725 +
   1.726 + public:
   1.727 +  SpaceManager(Mutex* lock, VirtualSpaceList* vs_list);
   1.728 +  ~SpaceManager();
   1.729 +
   1.730 +  enum ChunkSizes {    // in words.
   1.731 +    SmallChunk = 512,
   1.732 +    MediumChunk = 8 * K,
   1.733 +    MediumChunkBunch = 4 * MediumChunk
   1.734 +  };
   1.735 +
   1.736 +  // Accessors
   1.737 +  size_t allocation_total() const { return _allocation_total; }
   1.738 +  void inc_allocation_total(size_t v) { Atomic::add_ptr(v, &_allocation_total); }
   1.739 +  static bool is_humongous(size_t word_size) { return word_size > MediumChunk; }
   1.740 +
   1.741 +  static Mutex* expand_lock() { return _expand_lock; }
   1.742 +
   1.743 +  size_t sum_capacity_in_chunks_in_use() const;
   1.744 +  size_t sum_used_in_chunks_in_use() const;
   1.745 +  size_t sum_free_in_chunks_in_use() const;
   1.746 +  size_t sum_waste_in_chunks_in_use() const;
   1.747 +  size_t sum_waste_in_chunks_in_use(ChunkIndex index ) const;
   1.748 +
   1.749 +  size_t sum_count_in_chunks_in_use();
   1.750 +  size_t sum_count_in_chunks_in_use(ChunkIndex i);
   1.751 +
   1.752 +  // Block allocation and deallocation.
   1.753 +  // Allocates a block from the current chunk
   1.754 +  MetaWord* allocate(size_t word_size);
   1.755 +
   1.756 +  // Helper for allocations
   1.757 +  Metablock* allocate_work(size_t word_size);
   1.758 +
   1.759 +  // Returns a block to the per manager freelist
   1.760 +  void deallocate(MetaWord* p);
   1.761 +
   1.762 +  // Based on the allocation size and a minimum chunk size,
   1.763 +  // returned chunk size (for expanding space for chunk allocation).
   1.764 +  size_t calc_chunk_size(size_t allocation_word_size);
   1.765 +
   1.766 +  // Called when an allocation from the current chunk fails.
   1.767 +  // Gets a new chunk (may require getting a new virtual space),
   1.768 +  // and allocates from that chunk.
   1.769 +  Metablock* grow_and_allocate(size_t word_size);
   1.770 +
   1.771 +  // debugging support.
   1.772 +
   1.773 +  void dump(outputStream* const out) const;
   1.774 +  void print_on(outputStream* st) const;
   1.775 +  void locked_print_chunks_in_use_on(outputStream* st) const;
   1.776 +
   1.777 +  void verify();
   1.778 +#ifdef ASSERT
   1.779 +  void mangle_freed_chunks();
   1.780 +  void verify_allocation_total();
   1.781 +#endif
   1.782 +};
   1.783 +
   1.784 +uint const SpaceManager::_small_chunk_limit = 4;
   1.785 +
   1.786 +const char* SpaceManager::_expand_lock_name =
   1.787 +  "SpaceManager chunk allocation lock";
   1.788 +const int SpaceManager::_expand_lock_rank = Monitor::leaf - 1;
   1.789 +Mutex* const SpaceManager::_expand_lock =
   1.790 +  new Mutex(SpaceManager::_expand_lock_rank,
   1.791 +            SpaceManager::_expand_lock_name,
   1.792 +            Mutex::_allow_vm_block_flag);
   1.793 +
   1.794 +#ifdef ASSERT
   1.795 +size_t Metablock::_overhead =
   1.796 +  Chunk::aligned_overhead_size(sizeof(Metablock)) / BytesPerWord;
   1.797 +#else
   1.798 +size_t Metablock::_overhead = 0;
   1.799 +#endif
   1.800 +size_t Metachunk::_overhead =
   1.801 +  Chunk::aligned_overhead_size(sizeof(Metachunk)) / BytesPerWord;
   1.802 +
   1.803 +// New blocks returned by the Metaspace are zero initialized.
   1.804 +// We should fix the constructors to not assume this instead.
   1.805 +Metablock* Metablock::initialize(MetaWord* p, size_t word_size) {
   1.806 +  Metablock* result = (Metablock*) p;
   1.807 +
   1.808 +  // Clear the memory
   1.809 +  Copy::fill_to_aligned_words((HeapWord*)result, word_size);
   1.810 +#ifdef ASSERT
   1.811 +  result->set_word_size(word_size);
   1.812 +  // Check after work size is set.
   1.813 +  result->set_leader((void*) metaspace_allocation_leader);
   1.814 +  result->set_trailer((void*) metaspace_allocation_trailer);
   1.815 +#endif
   1.816 +  return result;
   1.817 +}
   1.818 +
   1.819 +void Metablock::verify() {
   1.820 +#ifdef ASSERT
   1.821 +  assert(leader() == metaspace_allocation_leader &&
   1.822 +         trailer() == metaspace_allocation_trailer,
   1.823 +         "block has been corrupted");
   1.824 +#endif
   1.825 +}
   1.826 +
   1.827 +// Metachunk methods
   1.828 +
   1.829 +Metachunk* Metachunk::initialize(MetaWord* ptr, size_t word_size) {
   1.830 +  // Set bottom, top, and end.  Allow space for the Metachunk itself
   1.831 +  Metachunk* chunk = (Metachunk*) ptr;
   1.832 +
   1.833 +  MetaWord* chunk_bottom = ptr + _overhead;
   1.834 +  chunk->set_bottom(ptr);
   1.835 +  chunk->set_top(chunk_bottom);
   1.836 +  MetaWord* chunk_end = ptr + word_size;
   1.837 +  assert(chunk_end > chunk_bottom, "Chunk must be too small");
   1.838 +  chunk->set_end(chunk_end);
   1.839 +  chunk->set_next(NULL);
   1.840 +  chunk->set_word_size(word_size);
   1.841 +#ifdef ASSERT
   1.842 +  size_t data_word_size = pointer_delta(chunk_end, chunk_bottom, sizeof(MetaWord));
   1.843 +  Copy::fill_to_words((HeapWord*) chunk_bottom, data_word_size, metadata_chunk_initialize);
   1.844 +#endif
   1.845 +  return chunk;
   1.846 +}
   1.847 +
   1.848 +
   1.849 +Metablock* Metachunk::allocate(size_t word_size) {
   1.850 +  Metablock* result = NULL;
   1.851 +  // If available, bump the pointer to allocate.
   1.852 +  if (free_word_size() >= word_size) {
   1.853 +    result = Metablock::initialize(_top, word_size);
   1.854 +    _top = _top + word_size;
   1.855 +  }
   1.856 +#ifdef ASSERT
   1.857 +  assert(result == NULL ||
   1.858 +         result->word_size() == word_size,
   1.859 +         "Block size is not set correctly");
   1.860 +#endif
   1.861 +  return result;
   1.862 +}
   1.863 +
   1.864 +// _bottom points to the start of the chunk including the overhead.
   1.865 +size_t Metachunk::used_word_size() {
   1.866 +  return pointer_delta(_top, _bottom, sizeof(MetaWord));
   1.867 +}
   1.868 +
   1.869 +size_t Metachunk::free_word_size() {
   1.870 +  return pointer_delta(_end, _top, sizeof(MetaWord));
   1.871 +}
   1.872 +
   1.873 +size_t Metachunk::capacity_word_size() {
   1.874 +  return pointer_delta(_end, _bottom, sizeof(MetaWord));
   1.875 +}
   1.876 +
   1.877 +void Metachunk::print_on(outputStream* st) const {
   1.878 +  st->print_cr("Metachunk:"
   1.879 +               " bottom " PTR_FORMAT " top " PTR_FORMAT
   1.880 +               " end " PTR_FORMAT " size " SIZE_FORMAT,
   1.881 +               bottom(), top(), end(), word_size());
   1.882 +}
   1.883 +
   1.884 +
   1.885 +void Metachunk::verify() {
   1.886 +#ifdef ASSERT
   1.887 +  // Cannot walk through the blocks unless the blocks have
   1.888 +  // headers with sizes.
   1.889 +  MetaWord* curr = bottom() + overhead();
   1.890 +  while (curr < top()) {
   1.891 +    Metablock* block = (Metablock*) curr;
   1.892 +    size_t word_size = block->word_size();
   1.893 +    block->verify();
   1.894 +    curr = curr + word_size;
   1.895 +  }
   1.896 +#endif
   1.897 +  return;
   1.898 +}
   1.899 +
   1.900 +// BlockFreelist methods
   1.901 +
   1.902 +#ifdef DEALLOCATE_BLOCKS
   1.903 +BlockFreelist::BlockFreelist() : _dictionary(NULL) {}
   1.904 +#else
   1.905 +BlockFreelist::BlockFreelist() {}
   1.906 +#endif
   1.907 +
   1.908 +BlockFreelist::~BlockFreelist() {
   1.909 +#ifdef DEALLOCATE_BLOCKS
   1.910 +  if (_dictionary != NULL) {
   1.911 +    if (Verbose && TraceMetadataChunkAllocation) {
   1.912 +      _dictionary->print_free_lists(gclog_or_tty);
   1.913 +    }
   1.914 +    delete _dictionary;
   1.915 +  }
   1.916 +#endif
   1.917 +}
   1.918 +
   1.919 +Metablock* BlockFreelist::initialize_free_chunk(Metablock* block, size_t word_size) {
   1.920 +#ifdef DEALLOCATE_BLOCKS
   1.921 +#ifdef ASSERT
   1.922 +  assert(word_size = block->word_size(), "Wrong chunk size");
   1.923 +#endif
   1.924 +  Metablock* result = block;
   1.925 +  result->setSize(word_size);
   1.926 +  result->linkPrev(NULL);
   1.927 +  result->linkNext(NULL);
   1.928 +
   1.929 +  return result;
   1.930 +#else
   1.931 +  ShouldNotReachHere();
   1.932 +  return block;
   1.933 +#endif
   1.934 +}
   1.935 +
   1.936 +void BlockFreelist::return_block(Metablock* block, size_t word_size) {
   1.937 +#ifdef ASSERT
   1.938 +  assert(word_size = block->word_size(), "Block size is wrong");;
   1.939 +#endif
   1.940 +  Metablock* free_chunk = initialize_free_chunk(block, word_size);
   1.941 +#ifdef DEALLOCATE_BLOCKS
   1.942 +  if (dictionary() == NULL) {
   1.943 +   _dictionary = new BinaryTreeDictionary<Metablock>(false /* adaptive_freelists */);
   1.944 +  }
   1.945 +  dictionary()->returnChunk(free_chunk);
   1.946 +#endif
   1.947 +}
   1.948 +
   1.949 +Metablock* BlockFreelist::get_block(size_t word_size) {
   1.950 +#ifdef DEALLOCATE_BLOCKS
   1.951 +  if (dictionary() == NULL) {
   1.952 +    return NULL;
   1.953 +  }
   1.954 +
   1.955 +  Metablock* free_chunk =
   1.956 +    dictionary()->getChunk(word_size, FreeBlockDictionary<Metablock>::exactly);
   1.957 +#else
   1.958 +  Metablock* free_chunk = NULL;
   1.959 +#endif
   1.960 +  if (free_chunk == NULL) {
   1.961 +    return NULL;
   1.962 +  }
   1.963 +  assert(free_chunk->word_size() == word_size, "Size of chunk is incorrect");
   1.964 +  Metablock* block = Metablock::initialize((MetaWord*) free_chunk, word_size);
   1.965 +#ifdef ASSERT
   1.966 +  assert(block->word_size() == word_size, "Block size is not set correctly");
   1.967 +#endif
   1.968 +
   1.969 +  return block;
   1.970 +}
   1.971 +
   1.972 +void BlockFreelist::print_on(outputStream* st) const {
   1.973 +#ifdef DEALLOCATE_BLOCKS
   1.974 +  if (dictionary() == NULL) {
   1.975 +    return;
   1.976 +  }
   1.977 +  dictionary()->print_free_lists(st);
   1.978 +#else
   1.979 +  return;
   1.980 +#endif
   1.981 +}
   1.982 +
   1.983 +// VirtualSpaceNode methods
   1.984 +
   1.985 +VirtualSpaceNode::~VirtualSpaceNode() {
   1.986 +  _rs.release();
   1.987 +}
   1.988 +
   1.989 +size_t VirtualSpaceNode::used_words_in_vs() const {
   1.990 +  return pointer_delta(top(), bottom(), sizeof(MetaWord));
   1.991 +}
   1.992 +
   1.993 +// Space committed in the VirtualSpace
   1.994 +size_t VirtualSpaceNode::capacity_words_in_vs() const {
   1.995 +  return pointer_delta(end(), bottom(), sizeof(MetaWord));
   1.996 +}
   1.997 +
   1.998 +
   1.999 +// Allocates the chunk from the virtual space only.
  1.1000 +// This interface is also used internally for debugging.  Not all
  1.1001 +// chunks removed here are necessarily used for allocation.
  1.1002 +Metachunk* VirtualSpaceNode::take_from_committed(size_t chunk_word_size) {
  1.1003 +  // Bottom of the new chunk
  1.1004 +  MetaWord* chunk_limit = top();
  1.1005 +  assert(chunk_limit != NULL, "Not safe to call this method");
  1.1006 +
  1.1007 +  if (!is_available(chunk_word_size)) {
  1.1008 +    if (TraceMetadataChunkAllocation) {
  1.1009 +      tty->print("VirtualSpaceNode::take_from_committed() not available %d words ", chunk_word_size);
  1.1010 +      // Dump some information about the virtual space that is nearly full
  1.1011 +      print_on(tty);
  1.1012 +    }
  1.1013 +    return NULL;
  1.1014 +  }
  1.1015 +
  1.1016 +  // Take the space  (bump top on the current virtual space).
  1.1017 +  inc_top(chunk_word_size);
  1.1018 +
  1.1019 +  // Point the chunk at the space
  1.1020 +  Metachunk* result = Metachunk::initialize(chunk_limit, chunk_word_size);
  1.1021 +  return result;
  1.1022 +}
  1.1023 +
  1.1024 +
  1.1025 +// Expand the virtual space (commit more of the reserved space)
  1.1026 +bool VirtualSpaceNode::expand_by(size_t words, bool pre_touch) {
  1.1027 +  size_t bytes = words * BytesPerWord;
  1.1028 +  bool result =  virtual_space()->expand_by(bytes, pre_touch);
  1.1029 +  if (TraceMetavirtualspaceAllocation && !result) {
  1.1030 +    gclog_or_tty->print_cr("VirtualSpaceNode::expand_by() failed "
  1.1031 +                           "for byte size " SIZE_FORMAT, bytes);
  1.1032 +    virtual_space()->print();
  1.1033 +  }
  1.1034 +  return result;
  1.1035 +}
  1.1036 +
  1.1037 +// Shrink the virtual space (commit more of the reserved space)
  1.1038 +bool VirtualSpaceNode::shrink_by(size_t words) {
  1.1039 +  size_t bytes = words * BytesPerWord;
  1.1040 +  virtual_space()->shrink_by(bytes);
  1.1041 +  return true;
  1.1042 +}
  1.1043 +
  1.1044 +// Add another chunk to the chunk list.
  1.1045 +
  1.1046 +Metachunk* VirtualSpaceNode::get_chunk_vs(size_t chunk_word_size) {
  1.1047 +  assert_lock_strong(SpaceManager::expand_lock());
  1.1048 +  Metachunk* result = NULL;
  1.1049 +
  1.1050 +  return take_from_committed(chunk_word_size);
  1.1051 +}
  1.1052 +
  1.1053 +Metachunk* VirtualSpaceNode::get_chunk_vs_with_expand(size_t chunk_word_size) {
  1.1054 +  assert_lock_strong(SpaceManager::expand_lock());
  1.1055 +
  1.1056 +  Metachunk* new_chunk = get_chunk_vs(chunk_word_size);
  1.1057 +
  1.1058 +  if (new_chunk == NULL) {
  1.1059 +    // Only a small part of the virtualspace is committed when first
  1.1060 +    // allocated so committing more here can be expected.
  1.1061 +    size_t page_size_words = os::vm_page_size() / BytesPerWord;
  1.1062 +    size_t aligned_expand_vs_by_words = align_size_up(chunk_word_size,
  1.1063 +                                                    page_size_words);
  1.1064 +    expand_by(aligned_expand_vs_by_words, false);
  1.1065 +    new_chunk = get_chunk_vs(chunk_word_size);
  1.1066 +  }
  1.1067 +  return new_chunk;
  1.1068 +}
  1.1069 +
  1.1070 +bool VirtualSpaceNode::initialize() {
  1.1071 +
  1.1072 +  if (!_rs.is_reserved()) {
  1.1073 +    return false;
  1.1074 +  }
  1.1075 +
  1.1076 +  // Commit only 1 page instead of the whole reserved space _rs.size()
  1.1077 +  size_t committed_byte_size = os::vm_page_size();
  1.1078 +  bool result = virtual_space()->initialize(_rs, committed_byte_size);
  1.1079 +  if (result) {
  1.1080 +    set_top((MetaWord*)virtual_space()->low());
  1.1081 +    set_reserved(MemRegion((HeapWord*)_rs.base(),
  1.1082 +                 (HeapWord*)(_rs.base() + _rs.size())));
  1.1083 +  }
  1.1084 +
  1.1085 +  assert(reserved()->start() == (HeapWord*) _rs.base(),
  1.1086 +    err_msg("Reserved start was not set properly " PTR_FORMAT
  1.1087 +      " != " PTR_FORMAT, reserved()->start(), _rs.base()));
  1.1088 +  assert(reserved()->word_size() == _rs.size() / BytesPerWord,
  1.1089 +    err_msg("Reserved size was not set properly " SIZE_FORMAT
  1.1090 +      " != " SIZE_FORMAT, reserved()->word_size(),
  1.1091 +      _rs.size() / BytesPerWord));
  1.1092 +
  1.1093 +  return result;
  1.1094 +}
  1.1095 +
  1.1096 +void VirtualSpaceNode::print_on(outputStream* st) const {
  1.1097 +  size_t used = used_words_in_vs();
  1.1098 +  size_t capacity = capacity_words_in_vs();
  1.1099 +  VirtualSpace* vs = virtual_space();
  1.1100 +  st->print_cr("   space @ " PTR_FORMAT " " SIZE_FORMAT "K, %3d%% used "
  1.1101 +           "[" PTR_FORMAT ", " PTR_FORMAT ", "
  1.1102 +           PTR_FORMAT ", " PTR_FORMAT ")",
  1.1103 +           vs, capacity / K, used * 100 / capacity,
  1.1104 +           bottom(), top(), end(),
  1.1105 +           vs->high_boundary());
  1.1106 +}
  1.1107 +
  1.1108 +void VirtualSpaceNode::mangle() {
  1.1109 +  size_t word_size = capacity_words_in_vs();
  1.1110 +  Copy::fill_to_words((HeapWord*) low(), word_size, 0xf1f1f1f1);
  1.1111 +}
  1.1112 +
  1.1113 +// VirtualSpaceList methods
  1.1114 +// Space allocated from the VirtualSpace
  1.1115 +
  1.1116 +VirtualSpaceList::~VirtualSpaceList() {
  1.1117 +  VirtualSpaceListIterator iter(virtual_space_list());
  1.1118 +  while (iter.repeat()) {
  1.1119 +    VirtualSpaceNode* vsl = iter.get_next();
  1.1120 +    delete vsl;
  1.1121 +  }
  1.1122 +}
  1.1123 +
  1.1124 +size_t VirtualSpaceList::used_words_sum() {
  1.1125 +  size_t allocated_by_vs = 0;
  1.1126 +  VirtualSpaceListIterator iter(virtual_space_list());
  1.1127 +  while (iter.repeat()) {
  1.1128 +    VirtualSpaceNode* vsl = iter.get_next();
  1.1129 +    // Sum used region [bottom, top) in each virtualspace
  1.1130 +    allocated_by_vs += vsl->used_words_in_vs();
  1.1131 +  }
  1.1132 +  assert(allocated_by_vs >= chunk_manager()->free_chunks_total(),
  1.1133 +    err_msg("Total in free chunks " SIZE_FORMAT
  1.1134 +            " greater than total from virtual_spaces " SIZE_FORMAT,
  1.1135 +            allocated_by_vs, chunk_manager()->free_chunks_total()));
  1.1136 +  size_t used =
  1.1137 +    allocated_by_vs - chunk_manager()->free_chunks_total();
  1.1138 +  return used;
  1.1139 +}
  1.1140 +
  1.1141 +// Space available in all MetadataVirtualspaces allocated
  1.1142 +// for metadata.  This is the upper limit on the capacity
  1.1143 +// of chunks allocated out of all the MetadataVirtualspaces.
  1.1144 +size_t VirtualSpaceList::capacity_words_sum() {
  1.1145 +  size_t capacity = 0;
  1.1146 +  VirtualSpaceListIterator iter(virtual_space_list());
  1.1147 +  while (iter.repeat()) {
  1.1148 +    VirtualSpaceNode* vsl = iter.get_next();
  1.1149 +    capacity += vsl->capacity_words_in_vs();
  1.1150 +  }
  1.1151 +  return capacity;
  1.1152 +}
  1.1153 +
  1.1154 +VirtualSpaceList::VirtualSpaceList(size_t word_size ) :
  1.1155 +                                   _is_class(false),
  1.1156 +                                   _virtual_space_list(NULL),
  1.1157 +                                   _current_virtual_space(NULL),
  1.1158 +                                   _virtual_space_total(0),
  1.1159 +                                   _virtual_space_count(0) {
  1.1160 +  MutexLockerEx cl(SpaceManager::expand_lock(),
  1.1161 +                   Mutex::_no_safepoint_check_flag);
  1.1162 +  bool initialization_succeeded = grow_vs(word_size);
  1.1163 +
  1.1164 +  assert(initialization_succeeded,
  1.1165 +    " VirtualSpaceList initialization should not fail");
  1.1166 +}
  1.1167 +
  1.1168 +VirtualSpaceList::VirtualSpaceList(ReservedSpace rs) :
  1.1169 +                                   _is_class(true),
  1.1170 +                                   _virtual_space_list(NULL),
  1.1171 +                                   _current_virtual_space(NULL),
  1.1172 +                                   _virtual_space_total(0),
  1.1173 +                                   _virtual_space_count(0) {
  1.1174 +  MutexLockerEx cl(SpaceManager::expand_lock(),
  1.1175 +                   Mutex::_no_safepoint_check_flag);
  1.1176 +  VirtualSpaceNode* class_entry = new VirtualSpaceNode(rs);
  1.1177 +  bool succeeded = class_entry->initialize();
  1.1178 +  assert(succeeded, " VirtualSpaceList initialization should not fail");
  1.1179 +  link_vs(class_entry, rs.size()/BytesPerWord);
  1.1180 +}
  1.1181 +
  1.1182 +// Allocate another meta virtual space and add it to the list.
  1.1183 +bool VirtualSpaceList::grow_vs(size_t vs_word_size) {
  1.1184 +  assert_lock_strong(SpaceManager::expand_lock());
  1.1185 +  if (vs_word_size == 0) {
  1.1186 +    return false;
  1.1187 +  }
  1.1188 +  // Reserve the space
  1.1189 +  size_t vs_byte_size = vs_word_size * BytesPerWord;
  1.1190 +  assert(vs_byte_size % os::vm_page_size() == 0, "Not aligned");
  1.1191 +
  1.1192 +  // Allocate the meta virtual space and initialize it.
  1.1193 +  VirtualSpaceNode* new_entry = new VirtualSpaceNode(vs_byte_size);
  1.1194 +  if (!new_entry->initialize()) {
  1.1195 +    delete new_entry;
  1.1196 +    return false;
  1.1197 +  } else {
  1.1198 +    link_vs(new_entry, vs_word_size);
  1.1199 +    return true;
  1.1200 +  }
  1.1201 +}
  1.1202 +
  1.1203 +void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry, size_t vs_word_size) {
  1.1204 +  if (virtual_space_list() == NULL) {
  1.1205 +      set_virtual_space_list(new_entry);
  1.1206 +  } else {
  1.1207 +    current_virtual_space()->set_next(new_entry);
  1.1208 +  }
  1.1209 +  set_current_virtual_space(new_entry);
  1.1210 +  inc_virtual_space_total(vs_word_size);
  1.1211 +  inc_virtual_space_count();
  1.1212 +#ifdef ASSERT
  1.1213 +  new_entry->mangle();
  1.1214 +#endif
  1.1215 +  if (TraceMetavirtualspaceAllocation && Verbose) {
  1.1216 +    VirtualSpaceNode* vsl = current_virtual_space();
  1.1217 +    vsl->print_on(tty);
  1.1218 +  }
  1.1219 +}
  1.1220 +
  1.1221 +Metachunk* VirtualSpaceList::get_new_chunk(size_t word_size,
  1.1222 +                                           size_t grow_chunks_by_words) {
  1.1223 +
  1.1224 +  // Get a chunk from the chunk freelist
  1.1225 +  Metachunk* next = chunk_manager()->chunk_freelist_allocate(grow_chunks_by_words);
  1.1226 +
  1.1227 +  // Allocate a chunk out of the current virtual space.
  1.1228 +  if (next == NULL) {
  1.1229 +    next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
  1.1230 +  }
  1.1231 +
  1.1232 +  if (next == NULL) {
  1.1233 +    // Not enough room in current virtual space.  Try to commit
  1.1234 +    // more space.
  1.1235 +    size_t expand_vs_by_words = MAX2((size_t)SpaceManager::MediumChunkBunch,
  1.1236 +                                       grow_chunks_by_words);
  1.1237 +    size_t page_size_words = os::vm_page_size() / BytesPerWord;
  1.1238 +    size_t aligned_expand_vs_by_words = align_size_up(expand_vs_by_words,
  1.1239 +                                                        page_size_words);
  1.1240 +    bool vs_expanded =
  1.1241 +      current_virtual_space()->expand_by(aligned_expand_vs_by_words, false);
  1.1242 +    if (!vs_expanded) {
  1.1243 +      // Should the capacity of the metaspaces be expanded for
  1.1244 +      // this allocation?  If it's the virtual space for classes and is
  1.1245 +      // being used for CompressedHeaders, don't allocate a new virtualspace.
  1.1246 +      if (can_grow() && MetaspaceGC::should_expand(this, word_size)) {
  1.1247 +        // Get another virtual space.
  1.1248 +          size_t grow_vs_words =
  1.1249 +            MAX2((size_t)VirtualSpaceSize, aligned_expand_vs_by_words);
  1.1250 +        if (grow_vs(grow_vs_words)) {
  1.1251 +          // Got it.  It's on the list now.  Get a chunk from it.
  1.1252 +          next = current_virtual_space()->get_chunk_vs_with_expand(grow_chunks_by_words);
  1.1253 +        }
  1.1254 +        if (TraceMetadataHumongousAllocation && SpaceManager::is_humongous(word_size)) {
  1.1255 +          gclog_or_tty->print_cr("  aligned_expand_vs_by_words " PTR_FORMAT,
  1.1256 +                                 aligned_expand_vs_by_words);
  1.1257 +          gclog_or_tty->print_cr("  grow_vs_words " PTR_FORMAT,
  1.1258 +                                 grow_vs_words);
  1.1259 +        }
  1.1260 +      } else {
  1.1261 +        // Allocation will fail and induce a GC
  1.1262 +        if (TraceMetadataChunkAllocation && Verbose) {
  1.1263 +          gclog_or_tty->print_cr("VirtualSpaceList::get_new_chunk():"
  1.1264 +            " Fail instead of expand the metaspace");
  1.1265 +        }
  1.1266 +      }
  1.1267 +    } else {
  1.1268 +      // The virtual space expanded, get a new chunk
  1.1269 +      next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
  1.1270 +      assert(next != NULL, "Just expanded, should succeed");
  1.1271 +    }
  1.1272 +  }
  1.1273 +
  1.1274 +  return next;
  1.1275 +}
  1.1276 +
  1.1277 +void VirtualSpaceList::print_on(outputStream* st) const {
  1.1278 +  if (TraceMetadataChunkAllocation && Verbose) {
  1.1279 +    VirtualSpaceListIterator iter(virtual_space_list());
  1.1280 +    while (iter.repeat()) {
  1.1281 +      VirtualSpaceNode* node = iter.get_next();
  1.1282 +      node->print_on(st);
  1.1283 +    }
  1.1284 +  }
  1.1285 +}
  1.1286 +
  1.1287 +#ifndef PRODUCT
  1.1288 +bool VirtualSpaceList::contains(const void *ptr) {
  1.1289 +  VirtualSpaceNode* list = virtual_space_list();
  1.1290 +  VirtualSpaceListIterator iter(list);
  1.1291 +  while (iter.repeat()) {
  1.1292 +    VirtualSpaceNode* node = iter.get_next();
  1.1293 +    if (node->reserved()->contains(ptr)) {
  1.1294 +      return true;
  1.1295 +    }
  1.1296 +  }
  1.1297 +  return false;
  1.1298 +}
  1.1299 +#endif // PRODUCT
  1.1300 +
  1.1301 +
  1.1302 +// MetaspaceGC methods
  1.1303 +
  1.1304 +// VM_CollectForMetadataAllocation is the vm operation used to GC.
  1.1305 +// Within the VM operation after the GC the attempt to allocate the metadata
  1.1306 +// should succeed.  If the GC did not free enough space for the metaspace
  1.1307 +// allocation, the HWM is increased so that another virtualspace will be
  1.1308 +// allocated for the metadata.  With perm gen the increase in the perm
  1.1309 +// gen had bounds, MinMetaspaceExpansion and MaxMetaspaceExpansion.  The
  1.1310 +// metaspace policy uses those as the small and large steps for the HWM.
  1.1311 +//
  1.1312 +// After the GC the compute_new_size() for MetaspaceGC is called to
  1.1313 +// resize the capacity of the metaspaces.  The current implementation
  1.1314 +// is based on the flags MinHeapFreeRatio and MaxHeapFreeRatio used
  1.1315 +// to resize the Java heap by some GC's.  New flags can be implemented
  1.1316 +// if really needed.  MinHeapFreeRatio is used to calculate how much
  1.1317 +// free space is desirable in the metaspace capacity to decide how much
  1.1318 +// to increase the HWM.  MaxHeapFreeRatio is used to decide how much
  1.1319 +// free space is desirable in the metaspace capacity before decreasing
  1.1320 +// the HWM.
  1.1321 +
  1.1322 +// Calculate the amount to increase the high water mark (HWM).
  1.1323 +// Increase by a minimum amount (MinMetaspaceExpansion) so that
  1.1324 +// another expansion is not requested too soon.  If that is not
  1.1325 +// enough to satisfy the allocation (i.e. big enough for a word_size
  1.1326 +// allocation), increase by MaxMetaspaceExpansion.  If that is still
  1.1327 +// not enough, expand by the size of the allocation (word_size) plus
  1.1328 +// some.
  1.1329 +size_t MetaspaceGC::delta_capacity_until_GC(size_t word_size) {
  1.1330 +  size_t before_inc = MetaspaceGC::capacity_until_GC();
  1.1331 +  size_t min_delta_words = MinMetaspaceExpansion / BytesPerWord;
  1.1332 +  size_t max_delta_words = MaxMetaspaceExpansion / BytesPerWord;
  1.1333 +  size_t page_size_words = os::vm_page_size() / BytesPerWord;
  1.1334 +  size_t size_delta_words = align_size_up(word_size, page_size_words);
  1.1335 +  size_t delta_words = MAX2(size_delta_words, min_delta_words);
  1.1336 +  if (delta_words > min_delta_words) {
  1.1337 +    // Don't want to hit the high water mark on the next
  1.1338 +    // allocation so make the delta greater than just enough
  1.1339 +    // for this allocation.
  1.1340 +    delta_words = MAX2(delta_words, max_delta_words);
  1.1341 +    if (delta_words > max_delta_words) {
  1.1342 +      // This allocation is large but the next ones are probably not
  1.1343 +      // so increase by the minimum.
  1.1344 +      delta_words = delta_words + min_delta_words;
  1.1345 +    }
  1.1346 +  }
  1.1347 +  return delta_words;
  1.1348 +}
  1.1349 +
  1.1350 +bool MetaspaceGC::should_expand(VirtualSpaceList* vsl, size_t word_size) {
  1.1351 +
  1.1352 +  // Class virtual space should always be expanded.  Call GC for the other
  1.1353 +  // metadata virtual space.
  1.1354 +  if (vsl == Metaspace::class_space_list()) return true;
  1.1355 +
  1.1356 +  // If the user wants a limit, impose one.
  1.1357 +  size_t max_metaspace_size_words = MaxMetaspaceSize / BytesPerWord;
  1.1358 +  size_t metaspace_size_words = MetaspaceSize / BytesPerWord;
  1.1359 +  if (!FLAG_IS_DEFAULT(MaxMetaspaceSize) &&
  1.1360 +      vsl->capacity_words_sum() >= max_metaspace_size_words) {
  1.1361 +    return false;
  1.1362 +  }
  1.1363 +
  1.1364 +  // If this is part of an allocation after a GC, expand
  1.1365 +  // unconditionally.
  1.1366 +  if(MetaspaceGC::expand_after_GC()) {
  1.1367 +    return true;
  1.1368 +  }
  1.1369 +
  1.1370 +  // If the capacity is below the minimum capacity, allow the
  1.1371 +  // expansion.  Also set the high-water-mark (capacity_until_GC)
  1.1372 +  // to that minimum capacity so that a GC will not be induced
  1.1373 +  // until that minimum capacity is exceeded.
  1.1374 +  if (vsl->capacity_words_sum() < metaspace_size_words ||
  1.1375 +      capacity_until_GC() == 0) {
  1.1376 +    set_capacity_until_GC(metaspace_size_words);
  1.1377 +    return true;
  1.1378 +  } else {
  1.1379 +    if (vsl->capacity_words_sum() < capacity_until_GC()) {
  1.1380 +      return true;
  1.1381 +    } else {
  1.1382 +      if (TraceMetadataChunkAllocation && Verbose) {
  1.1383 +        gclog_or_tty->print_cr("  allocation request size " SIZE_FORMAT
  1.1384 +                        "  capacity_until_GC " SIZE_FORMAT
  1.1385 +                        "  capacity_words_sum " SIZE_FORMAT
  1.1386 +                        "  used_words_sum " SIZE_FORMAT
  1.1387 +                        "  free chunks " SIZE_FORMAT
  1.1388 +                        "  free chunks count %d",
  1.1389 +                        word_size,
  1.1390 +                        capacity_until_GC(),
  1.1391 +                        vsl->capacity_words_sum(),
  1.1392 +                        vsl->used_words_sum(),
  1.1393 +                        vsl->chunk_manager()->free_chunks_total(),
  1.1394 +                        vsl->chunk_manager()->free_chunks_count());
  1.1395 +      }
  1.1396 +      return false;
  1.1397 +    }
  1.1398 +  }
  1.1399 +}
  1.1400 +
  1.1401 +// Variables are in bytes
  1.1402 +
  1.1403 +void MetaspaceGC::compute_new_size() {
  1.1404 +  assert(_shrink_factor <= 100, "invalid shrink factor");
  1.1405 +  uint current_shrink_factor = _shrink_factor;
  1.1406 +  _shrink_factor = 0;
  1.1407 +
  1.1408 +  VirtualSpaceList *vsl = Metaspace::space_list();
  1.1409 +
  1.1410 +  size_t capacity_after_gc = vsl->capacity_bytes_sum();
  1.1411 +  // Check to see if these two can be calculated without walking the CLDG
  1.1412 +  size_t used_after_gc = vsl->used_bytes_sum();
  1.1413 +  size_t capacity_until_GC = vsl->capacity_bytes_sum();
  1.1414 +  size_t free_after_gc = capacity_until_GC - used_after_gc;
  1.1415 +
  1.1416 +  const double minimum_free_percentage = MinHeapFreeRatio / 100.0;
  1.1417 +  const double maximum_used_percentage = 1.0 - minimum_free_percentage;
  1.1418 +
  1.1419 +  const double min_tmp = used_after_gc / maximum_used_percentage;
  1.1420 +  size_t minimum_desired_capacity =
  1.1421 +    (size_t)MIN2(min_tmp, double(max_uintx));
  1.1422 +  // Don't shrink less than the initial generation size
  1.1423 +  minimum_desired_capacity = MAX2(minimum_desired_capacity,
  1.1424 +                                  MetaspaceSize);
  1.1425 +
  1.1426 +  if (PrintGCDetails && Verbose) {
  1.1427 +    const double free_percentage = ((double)free_after_gc) / capacity_until_GC;
  1.1428 +    gclog_or_tty->print_cr("\nMetaspaceGC::compute_new_size: ");
  1.1429 +    gclog_or_tty->print_cr("  "
  1.1430 +                  "  minimum_free_percentage: %6.2f"
  1.1431 +                  "  maximum_used_percentage: %6.2f",
  1.1432 +                  minimum_free_percentage,
  1.1433 +                  maximum_used_percentage);
  1.1434 +    double d_free_after_gc = free_after_gc / (double) K;
  1.1435 +    gclog_or_tty->print_cr("  "
  1.1436 +                  "   free_after_gc       : %6.1fK"
  1.1437 +                  "   used_after_gc       : %6.1fK"
  1.1438 +                  "   capacity_after_gc   : %6.1fK"
  1.1439 +                  "   metaspace HWM     : %6.1fK",
  1.1440 +                  free_after_gc / (double) K,
  1.1441 +                  used_after_gc / (double) K,
  1.1442 +                  capacity_after_gc / (double) K,
  1.1443 +                  capacity_until_GC / (double) K);
  1.1444 +    gclog_or_tty->print_cr("  "
  1.1445 +                  "   free_percentage: %6.2f",
  1.1446 +                  free_percentage);
  1.1447 +  }
  1.1448 +
  1.1449 +
  1.1450 +  if (capacity_until_GC < minimum_desired_capacity) {
  1.1451 +    // If we have less capacity below the metaspace HWM, then
  1.1452 +    // increment the HWM.
  1.1453 +    size_t expand_bytes = minimum_desired_capacity - capacity_until_GC;
  1.1454 +    // Don't expand unless it's significant
  1.1455 +    if (expand_bytes >= MinMetaspaceExpansion) {
  1.1456 +      size_t expand_words = expand_bytes / BytesPerWord;
  1.1457 +      MetaspaceGC::inc_capacity_until_GC(expand_words);
  1.1458 +    }
  1.1459 +    if (PrintGCDetails && Verbose) {
  1.1460 +      size_t new_capacity_until_GC = MetaspaceGC::capacity_until_GC_in_bytes();
  1.1461 +      gclog_or_tty->print_cr("    expanding:"
  1.1462 +                    "  minimum_desired_capacity: %6.1fK"
  1.1463 +                    "  expand_words: %6.1fK"
  1.1464 +                    "  MinMetaspaceExpansion: %6.1fK"
  1.1465 +                    "  new metaspace HWM:  %6.1fK",
  1.1466 +                    minimum_desired_capacity / (double) K,
  1.1467 +                    expand_bytes / (double) K,
  1.1468 +                    MinMetaspaceExpansion / (double) K,
  1.1469 +                    new_capacity_until_GC / (double) K);
  1.1470 +    }
  1.1471 +    return;
  1.1472 +  }
  1.1473 +
  1.1474 +  // No expansion, now see if we want to shrink
  1.1475 +  size_t shrink_words = 0;
  1.1476 +  // We would never want to shrink more than this
  1.1477 +  size_t max_shrink_words = capacity_until_GC - minimum_desired_capacity;
  1.1478 +  assert(max_shrink_words >= 0, err_msg("max_shrink_words " SIZE_FORMAT,
  1.1479 +    max_shrink_words));
  1.1480 +
  1.1481 +  // Should shrinking be considered?
  1.1482 +  if (MaxHeapFreeRatio < 100) {
  1.1483 +    const double maximum_free_percentage = MaxHeapFreeRatio / 100.0;
  1.1484 +    const double minimum_used_percentage = 1.0 - maximum_free_percentage;
  1.1485 +    const double max_tmp = used_after_gc / minimum_used_percentage;
  1.1486 +    size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx));
  1.1487 +    maximum_desired_capacity = MAX2(maximum_desired_capacity,
  1.1488 +                                    MetaspaceSize);
  1.1489 +    if (PrintGC && Verbose) {
  1.1490 +      gclog_or_tty->print_cr("  "
  1.1491 +                             "  maximum_free_percentage: %6.2f"
  1.1492 +                             "  minimum_used_percentage: %6.2f",
  1.1493 +                             maximum_free_percentage,
  1.1494 +                             minimum_used_percentage);
  1.1495 +      gclog_or_tty->print_cr("  "
  1.1496 +                             "  capacity_until_GC: %6.1fK"
  1.1497 +                             "  minimum_desired_capacity: %6.1fK"
  1.1498 +                             "  maximum_desired_capacity: %6.1fK",
  1.1499 +                             capacity_until_GC / (double) K,
  1.1500 +                             minimum_desired_capacity / (double) K,
  1.1501 +                             maximum_desired_capacity / (double) K);
  1.1502 +    }
  1.1503 +
  1.1504 +    assert(minimum_desired_capacity <= maximum_desired_capacity,
  1.1505 +           "sanity check");
  1.1506 +
  1.1507 +    if (capacity_until_GC > maximum_desired_capacity) {
  1.1508 +      // Capacity too large, compute shrinking size
  1.1509 +      shrink_words = capacity_until_GC - maximum_desired_capacity;
  1.1510 +      // We don't want shrink all the way back to initSize if people call
  1.1511 +      // System.gc(), because some programs do that between "phases" and then
  1.1512 +      // we'd just have to grow the heap up again for the next phase.  So we
  1.1513 +      // damp the shrinking: 0% on the first call, 10% on the second call, 40%
  1.1514 +      // on the third call, and 100% by the fourth call.  But if we recompute
  1.1515 +      // size without shrinking, it goes back to 0%.
  1.1516 +      shrink_words = shrink_words / 100 * current_shrink_factor;
  1.1517 +      assert(shrink_words <= max_shrink_words,
  1.1518 +        err_msg("invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT,
  1.1519 +          shrink_words, max_shrink_words));
  1.1520 +      if (current_shrink_factor == 0) {
  1.1521 +        _shrink_factor = 10;
  1.1522 +      } else {
  1.1523 +        _shrink_factor = MIN2(current_shrink_factor * 4, (uint) 100);
  1.1524 +      }
  1.1525 +      if (PrintGCDetails && Verbose) {
  1.1526 +        gclog_or_tty->print_cr("  "
  1.1527 +                      "  shrinking:"
  1.1528 +                      "  initSize: %.1fK"
  1.1529 +                      "  maximum_desired_capacity: %.1fK",
  1.1530 +                      MetaspaceSize / (double) K,
  1.1531 +                      maximum_desired_capacity / (double) K);
  1.1532 +        gclog_or_tty->print_cr("  "
  1.1533 +                      "  shrink_words: %.1fK"
  1.1534 +                      "  current_shrink_factor: %d"
  1.1535 +                      "  new shrink factor: %d"
  1.1536 +                      "  MinMetaspaceExpansion: %.1fK",
  1.1537 +                      shrink_words / (double) K,
  1.1538 +                      current_shrink_factor,
  1.1539 +                      _shrink_factor,
  1.1540 +                      MinMetaspaceExpansion / (double) K);
  1.1541 +      }
  1.1542 +    }
  1.1543 +  }
  1.1544 +
  1.1545 +
  1.1546 +  // Don't shrink unless it's significant
  1.1547 +  if (shrink_words >= MinMetaspaceExpansion) {
  1.1548 +    VirtualSpaceNode* csp = vsl->current_virtual_space();
  1.1549 +    size_t available_to_shrink = csp->capacity_words_in_vs() -
  1.1550 +      csp->used_words_in_vs();
  1.1551 +    shrink_words = MIN2(shrink_words, available_to_shrink);
  1.1552 +    csp->shrink_by(shrink_words);
  1.1553 +    MetaspaceGC::dec_capacity_until_GC(shrink_words);
  1.1554 +    if (PrintGCDetails && Verbose) {
  1.1555 +      size_t new_capacity_until_GC = MetaspaceGC::capacity_until_GC_in_bytes();
  1.1556 +      gclog_or_tty->print_cr("  metaspace HWM: %.1fK", new_capacity_until_GC / (double) K);
  1.1557 +    }
  1.1558 +  }
  1.1559 +  assert(vsl->used_bytes_sum() == used_after_gc &&
  1.1560 +         used_after_gc <= vsl->capacity_bytes_sum(),
  1.1561 +         "sanity check");
  1.1562 +
  1.1563 +}
  1.1564 +
  1.1565 +// Metadebug methods
  1.1566 +
  1.1567 +void Metadebug::deallocate_chunk_a_lot(SpaceManager* sm,
  1.1568 +                                       size_t chunk_word_size){
  1.1569 +#ifdef ASSERT
  1.1570 +  VirtualSpaceList* vsl = sm->vs_list();
  1.1571 +  if (MetaDataDeallocateALot &&
  1.1572 +      Metadebug::deallocate_chunk_a_lot_count() % MetaDataDeallocateALotInterval == 0 ) {
  1.1573 +    Metadebug::reset_deallocate_chunk_a_lot_count();
  1.1574 +    for (uint i = 0; i < metadata_deallocate_a_lock_chunk; i++) {
  1.1575 +      Metachunk* dummy_chunk = vsl->current_virtual_space()->take_from_committed(chunk_word_size);
  1.1576 +      if (dummy_chunk == NULL) {
  1.1577 +        break;
  1.1578 +      }
  1.1579 +      vsl->chunk_manager()->chunk_freelist_deallocate(dummy_chunk);
  1.1580 +
  1.1581 +      if (TraceMetadataChunkAllocation && Verbose) {
  1.1582 +        gclog_or_tty->print("Metadebug::deallocate_chunk_a_lot: %d) ",
  1.1583 +                               sm->sum_count_in_chunks_in_use());
  1.1584 +        dummy_chunk->print_on(gclog_or_tty);
  1.1585 +        gclog_or_tty->print_cr("  Free chunks total %d  count %d",
  1.1586 +                               vsl->chunk_manager()->free_chunks_total(),
  1.1587 +                               vsl->chunk_manager()->free_chunks_count());
  1.1588 +      }
  1.1589 +    }
  1.1590 +  } else {
  1.1591 +    Metadebug::inc_deallocate_chunk_a_lot_count();
  1.1592 +  }
  1.1593 +#endif
  1.1594 +}
  1.1595 +
  1.1596 +void Metadebug::deallocate_block_a_lot(SpaceManager* sm,
  1.1597 +                                       size_t raw_word_size){
  1.1598 +#ifdef ASSERT
  1.1599 +  if (MetaDataDeallocateALot &&
  1.1600 +        Metadebug::deallocate_block_a_lot_count() % MetaDataDeallocateALotInterval == 0 ) {
  1.1601 +    Metadebug::set_deallocate_block_a_lot_count(0);
  1.1602 +    for (uint i = 0; i < metadata_deallocate_a_lot_block; i++) {
  1.1603 +      Metablock* dummy_block = sm->allocate_work(raw_word_size);
  1.1604 +      if (dummy_block == 0) {
  1.1605 +        break;
  1.1606 +      }
  1.1607 +#ifdef ASSERT
  1.1608 +      assert(dummy_block->word_size() == raw_word_size, "Block size is not set correctly");
  1.1609 +#endif
  1.1610 +      sm->deallocate(dummy_block->data());
  1.1611 +    }
  1.1612 +  } else {
  1.1613 +    Metadebug::inc_deallocate_block_a_lot_count();
  1.1614 +  }
  1.1615 +#endif
  1.1616 +}
  1.1617 +
  1.1618 +void Metadebug::init_allocation_fail_alot_count() {
  1.1619 +  if (MetadataAllocationFailALot) {
  1.1620 +    _allocation_fail_alot_count =
  1.1621 +      1+(long)((double)MetadataAllocationFailALotInterval*os::random()/(max_jint+1.0));
  1.1622 +  }
  1.1623 +}
  1.1624 +
  1.1625 +#ifdef ASSERT
  1.1626 +bool Metadebug::test_metadata_failure() {
  1.1627 +  if (MetadataAllocationFailALot &&
  1.1628 +      Threads::is_vm_complete()) {
  1.1629 +    if (_allocation_fail_alot_count > 0) {
  1.1630 +      _allocation_fail_alot_count--;
  1.1631 +    } else {
  1.1632 +      if (TraceMetadataChunkAllocation && Verbose) {
  1.1633 +        gclog_or_tty->print_cr("Metadata allocation failing for "
  1.1634 +                               "MetadataAllocationFailALot");
  1.1635 +      }
  1.1636 +      init_allocation_fail_alot_count();
  1.1637 +      return true;
  1.1638 +    }
  1.1639 +  }
  1.1640 +  return false;
  1.1641 +}
  1.1642 +#endif
  1.1643 +
  1.1644 +// ChunkList methods
  1.1645 +
  1.1646 +size_t ChunkList::sum_list_size() {
  1.1647 +  size_t result = 0;
  1.1648 +  Metachunk* cur = head();
  1.1649 +  while (cur != NULL) {
  1.1650 +    result += cur->word_size();
  1.1651 +    cur = cur->next();
  1.1652 +  }
  1.1653 +  return result;
  1.1654 +}
  1.1655 +
  1.1656 +size_t ChunkList::sum_list_count() {
  1.1657 +  size_t result = 0;
  1.1658 +  Metachunk* cur = head();
  1.1659 +  while (cur != NULL) {
  1.1660 +    result++;
  1.1661 +    cur = cur->next();
  1.1662 +  }
  1.1663 +  return result;
  1.1664 +}
  1.1665 +
  1.1666 +size_t ChunkList::sum_list_capacity() {
  1.1667 +  size_t result = 0;
  1.1668 +  Metachunk* cur = head();
  1.1669 +  while (cur != NULL) {
  1.1670 +    result += cur->capacity_word_size();
  1.1671 +    cur = cur->next();
  1.1672 +  }
  1.1673 +  return result;
  1.1674 +}
  1.1675 +
  1.1676 +void ChunkList::add_at_head(Metachunk* head, Metachunk* tail) {
  1.1677 +  assert_lock_strong(SpaceManager::expand_lock());
  1.1678 +  assert(tail->next() == NULL, "Not the tail");
  1.1679 +
  1.1680 +  if (TraceMetadataChunkAllocation && Verbose) {
  1.1681 +    tty->print("ChunkList::add_at_head: ");
  1.1682 +    Metachunk* cur = head;
  1.1683 +    while (cur != NULL) {
  1.1684 +    tty->print(PTR_FORMAT " (" SIZE_FORMAT ") ", cur, cur->word_size());
  1.1685 +      cur = cur->next();
  1.1686 +    }
  1.1687 +    tty->print_cr("");
  1.1688 +  }
  1.1689 +
  1.1690 +  if (tail != NULL) {
  1.1691 +    tail->set_next(_head);
  1.1692 +  }
  1.1693 +  set_head(head);
  1.1694 +}
  1.1695 +
  1.1696 +void ChunkList::add_at_head(Metachunk* list) {
  1.1697 +  if (list == NULL) {
  1.1698 +    // Nothing to add
  1.1699 +    return;
  1.1700 +  }
  1.1701 +  assert_lock_strong(SpaceManager::expand_lock());
  1.1702 +  Metachunk* head = list;
  1.1703 +  Metachunk* tail = list;
  1.1704 +  Metachunk* cur = head->next();
  1.1705 +  // Search for the tail since it is not passed.
  1.1706 +  while (cur != NULL) {
  1.1707 +    tail = cur;
  1.1708 +    cur = cur->next();
  1.1709 +  }
  1.1710 +  add_at_head(head, tail);
  1.1711 +}
  1.1712 +
  1.1713 +// ChunkManager methods
  1.1714 +
  1.1715 +// Verification of _free_chunks_total and _free_chunks_count does not
  1.1716 +// work with the CMS collector because its use of additional locks
  1.1717 +// complicate the mutex deadlock detection but it can still be useful
  1.1718 +// for detecting errors in the chunk accounting with other collectors.
  1.1719 +
  1.1720 +size_t ChunkManager::free_chunks_total() {
  1.1721 +#ifdef ASSERT
  1.1722 +  if (!UseConcMarkSweepGC && !SpaceManager::expand_lock()->is_locked()) {
  1.1723 +    MutexLockerEx cl(SpaceManager::expand_lock(),
  1.1724 +                     Mutex::_no_safepoint_check_flag);
  1.1725 +    locked_verify_free_chunks_total();
  1.1726 +  }
  1.1727 +#endif
  1.1728 +  return _free_chunks_total;
  1.1729 +}
  1.1730 +
  1.1731 +size_t ChunkManager::free_chunks_total_in_bytes() {
  1.1732 +  return free_chunks_total() * BytesPerWord;
  1.1733 +}
  1.1734 +
  1.1735 +size_t ChunkManager::free_chunks_count() {
  1.1736 +#ifdef ASSERT
  1.1737 +  if (!UseConcMarkSweepGC && !SpaceManager::expand_lock()->is_locked()) {
  1.1738 +    MutexLockerEx cl(SpaceManager::expand_lock(),
  1.1739 +                     Mutex::_no_safepoint_check_flag);
  1.1740 +    // This lock is only needed in debug because the verification
  1.1741 +    // of the _free_chunks_totals walks the list of free chunks
  1.1742 +    locked_verify_free_chunks_count();
  1.1743 +  }
  1.1744 +#endif
  1.1745 +    return _free_chunks_count;
  1.1746 +}
  1.1747 +
  1.1748 +void ChunkManager::locked_verify_free_chunks_total() {
  1.1749 +  assert_lock_strong(SpaceManager::expand_lock());
  1.1750 +  assert(sum_free_chunks() == _free_chunks_total,
  1.1751 +    err_msg("_free_chunks_total " SIZE_FORMAT " is not the"
  1.1752 +           " same as sum " SIZE_FORMAT, _free_chunks_total,
  1.1753 +           sum_free_chunks()));
  1.1754 +}
  1.1755 +
  1.1756 +void ChunkManager::verify_free_chunks_total() {
  1.1757 +  MutexLockerEx cl(SpaceManager::expand_lock(),
  1.1758 +                     Mutex::_no_safepoint_check_flag);
  1.1759 +  locked_verify_free_chunks_total();
  1.1760 +}
  1.1761 +
  1.1762 +void ChunkManager::locked_verify_free_chunks_count() {
  1.1763 +  assert_lock_strong(SpaceManager::expand_lock());
  1.1764 +  assert(sum_free_chunks_count() == _free_chunks_count,
  1.1765 +    err_msg("_free_chunks_count " SIZE_FORMAT " is not the"
  1.1766 +           " same as sum " SIZE_FORMAT, _free_chunks_count,
  1.1767 +           sum_free_chunks_count()));
  1.1768 +}
  1.1769 +
  1.1770 +void ChunkManager::verify_free_chunks_count() {
  1.1771 +#ifdef ASSERT
  1.1772 +  MutexLockerEx cl(SpaceManager::expand_lock(),
  1.1773 +                     Mutex::_no_safepoint_check_flag);
  1.1774 +  locked_verify_free_chunks_count();
  1.1775 +#endif
  1.1776 +}
  1.1777 +
  1.1778 +void ChunkManager::verify() {
  1.1779 +#ifdef ASSERT
  1.1780 +  if (!UseConcMarkSweepGC) {
  1.1781 +    MutexLockerEx cl(SpaceManager::expand_lock(),
  1.1782 +                       Mutex::_no_safepoint_check_flag);
  1.1783 +    locked_verify_free_chunks_total();
  1.1784 +    locked_verify_free_chunks_count();
  1.1785 +  }
  1.1786 +#endif
  1.1787 +}
  1.1788 +
  1.1789 +void ChunkManager::locked_verify() {
  1.1790 +  locked_verify_free_chunks_total();
  1.1791 +  locked_verify_free_chunks_count();
  1.1792 +}
  1.1793 +
  1.1794 +void ChunkManager::locked_print_free_chunks(outputStream* st) {
  1.1795 +  assert_lock_strong(SpaceManager::expand_lock());
  1.1796 +  st->print_cr("Free chunk total 0x%x  count 0x%x",
  1.1797 +                _free_chunks_total, _free_chunks_count);
  1.1798 +}
  1.1799 +
  1.1800 +void ChunkManager::locked_print_sum_free_chunks(outputStream* st) {
  1.1801 +  assert_lock_strong(SpaceManager::expand_lock());
  1.1802 +  st->print_cr("Sum free chunk total 0x%x  count 0x%x",
  1.1803 +                sum_free_chunks(), sum_free_chunks_count());
  1.1804 +}
  1.1805 +ChunkList* ChunkManager::free_chunks(ChunkIndex index) {
  1.1806 +  return &_free_chunks[index];
  1.1807 +}
  1.1808 +
  1.1809 +
  1.1810 +// These methods that sum the free chunk lists are used in printing
  1.1811 +// methods that are used in product builds.
  1.1812 +size_t ChunkManager::sum_free_chunks() {
  1.1813 +  assert_lock_strong(SpaceManager::expand_lock());
  1.1814 +  size_t result = 0;
  1.1815 +  for (ChunkIndex i = SmallIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
  1.1816 +    ChunkList* list = free_chunks(i);
  1.1817 +
  1.1818 +    if (list == NULL) {
  1.1819 +      continue;
  1.1820 +    }
  1.1821 +
  1.1822 +    result = result + list->sum_list_capacity();
  1.1823 +  }
  1.1824 +  return result;
  1.1825 +}
  1.1826 +
  1.1827 +size_t ChunkManager::sum_free_chunks_count() {
  1.1828 +  assert_lock_strong(SpaceManager::expand_lock());
  1.1829 +  size_t count = 0;
  1.1830 +  for (ChunkIndex i = SmallIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
  1.1831 +    ChunkList* list = free_chunks(i);
  1.1832 +    if (list == NULL) {
  1.1833 +      continue;
  1.1834 +    }
  1.1835 +    count = count + list->sum_list_count();
  1.1836 +  }
  1.1837 +  return count;
  1.1838 +}
  1.1839 +
  1.1840 +ChunkList* ChunkManager::find_free_chunks_list(size_t word_size) {
  1.1841 +  switch (word_size) {
  1.1842 +  case SpaceManager::SmallChunk :
  1.1843 +      return &_free_chunks[0];
  1.1844 +  case SpaceManager::MediumChunk :
  1.1845 +      return &_free_chunks[1];
  1.1846 +  default:
  1.1847 +    assert(word_size > SpaceManager::MediumChunk, "List inconsistency");
  1.1848 +    return &_free_chunks[2];
  1.1849 +  }
  1.1850 +}
  1.1851 +
  1.1852 +void ChunkManager::free_chunks_put(Metachunk* chunk) {
  1.1853 +  assert_lock_strong(SpaceManager::expand_lock());
  1.1854 +  ChunkList* free_list = find_free_chunks_list(chunk->word_size());
  1.1855 +  chunk->set_next(free_list->head());
  1.1856 +  free_list->set_head(chunk);
  1.1857 +  // chunk is being returned to the chunk free list
  1.1858 +  inc_free_chunks_total(chunk->capacity_word_size());
  1.1859 +  locked_verify();
  1.1860 +}
  1.1861 +
  1.1862 +void ChunkManager::chunk_freelist_deallocate(Metachunk* chunk) {
  1.1863 +  // The deallocation of a chunk originates in the freelist
  1.1864 +  // manangement code for a Metaspace and does not hold the
  1.1865 +  // lock.
  1.1866 +  assert(chunk != NULL, "Deallocating NULL");
  1.1867 +  // MutexLockerEx fcl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
  1.1868 +  locked_verify();
  1.1869 +  if (TraceMetadataChunkAllocation) {
  1.1870 +    tty->print_cr("ChunkManager::chunk_freelist_deallocate: chunk "
  1.1871 +                  PTR_FORMAT "  size " SIZE_FORMAT,
  1.1872 +                  chunk, chunk->word_size());
  1.1873 +  }
  1.1874 +  free_chunks_put(chunk);
  1.1875 +}
  1.1876 +
  1.1877 +Metachunk* ChunkManager::free_chunks_get(size_t word_size) {
  1.1878 +  assert_lock_strong(SpaceManager::expand_lock());
  1.1879 +
  1.1880 +  locked_verify();
  1.1881 +  ChunkList* free_list = find_free_chunks_list(word_size);
  1.1882 +  assert(free_list != NULL, "Sanity check");
  1.1883 +
  1.1884 +  Metachunk* chunk = free_list->head();
  1.1885 +  debug_only(Metachunk* debug_head = chunk;)
  1.1886 +
  1.1887 +  if (chunk == NULL) {
  1.1888 +    return NULL;
  1.1889 +  }
  1.1890 +
  1.1891 +  Metachunk* prev_chunk = chunk;
  1.1892 +  if (chunk->word_size() == word_size) {
  1.1893 +    // Chunk is being removed from the chunks free list.
  1.1894 +    dec_free_chunks_total(chunk->capacity_word_size());
  1.1895 +    // Remove the chunk as the head of the list.
  1.1896 +    free_list->set_head(chunk->next());
  1.1897 +    chunk->set_next(NULL);
  1.1898 +
  1.1899 +    if (TraceMetadataChunkAllocation && Verbose) {
  1.1900 +      tty->print_cr("ChunkManager::free_chunks_get: free_list "
  1.1901 +                    PTR_FORMAT " head " PTR_FORMAT " size " SIZE_FORMAT,
  1.1902 +                    free_list, chunk, chunk->word_size());
  1.1903 +    }
  1.1904 +  } else {
  1.1905 +    assert(SpaceManager::is_humongous(word_size),
  1.1906 +      "Should only need to check humongous");
  1.1907 +    // This code to find the best fit is just for purposes of
  1.1908 +    // investigating the loss due to fragmentation on a humongous
  1.1909 +    // chunk.  It will be replace by a binaryTreeDictionary for
  1.1910 +    // the humongous chunks.
  1.1911 +    uint count = 0;
  1.1912 +    Metachunk* best_fit = NULL;
  1.1913 +    Metachunk* best_fit_prev = NULL;
  1.1914 +    while (chunk != NULL) {
  1.1915 +      count++;
  1.1916 +      if (chunk->word_size() < word_size) {
  1.1917 +        prev_chunk = chunk;
  1.1918 +        chunk = chunk->next();
  1.1919 +      } else if (chunk->word_size() == word_size) {
  1.1920 +        break;
  1.1921 +      } else {
  1.1922 +        if (best_fit == NULL ||
  1.1923 +            best_fit->word_size() > chunk->word_size()) {
  1.1924 +          best_fit_prev = prev_chunk;
  1.1925 +          best_fit = chunk;
  1.1926 +        }
  1.1927 +        prev_chunk = chunk;
  1.1928 +        chunk = chunk->next();
  1.1929 +      }
  1.1930 +    }
  1.1931 +      if (chunk == NULL) {
  1.1932 +        prev_chunk = best_fit_prev;
  1.1933 +        chunk = best_fit;
  1.1934 +      }
  1.1935 +      if (chunk != NULL) {
  1.1936 +        if (TraceMetadataHumongousAllocation) {
  1.1937 +          size_t waste = chunk->word_size() - word_size;
  1.1938 +          tty->print_cr("Free list allocate humongous chunk size " SIZE_FORMAT
  1.1939 +                        " for requested size " SIZE_FORMAT
  1.1940 +                        " waste " SIZE_FORMAT
  1.1941 +                        " found at " SIZE_FORMAT " of " SIZE_FORMAT,
  1.1942 +                        chunk->word_size(), word_size, waste,
  1.1943 +                        count, free_list->sum_list_count());
  1.1944 +        }
  1.1945 +        // Chunk is being removed from the chunks free list.
  1.1946 +        dec_free_chunks_total(chunk->capacity_word_size());
  1.1947 +        // Remove the chunk if it is at the head of the list.
  1.1948 +        if (chunk == free_list->head()) {
  1.1949 +          free_list->set_head(chunk->next());
  1.1950 +
  1.1951 +          if (TraceMetadataHumongousAllocation) {
  1.1952 +            tty->print_cr("ChunkManager::free_chunks_get: humongous free_list "
  1.1953 +                          PTR_FORMAT " chunk " PTR_FORMAT " size " SIZE_FORMAT
  1.1954 +                          " new head " PTR_FORMAT,
  1.1955 +                          free_list, chunk, chunk->word_size(),
  1.1956 +                          free_list->head());
  1.1957 +          }
  1.1958 +        } else {
  1.1959 +          // Remove a chunk in the interior of the list
  1.1960 +          prev_chunk->set_next(chunk->next());
  1.1961 +
  1.1962 +          if (TraceMetadataHumongousAllocation) {
  1.1963 +            tty->print_cr("ChunkManager::free_chunks_get: humongous free_list "
  1.1964 +                          PTR_FORMAT " chunk " PTR_FORMAT " size " SIZE_FORMAT
  1.1965 +                          PTR_FORMAT "  prev " PTR_FORMAT " next " PTR_FORMAT,
  1.1966 +                          free_list, chunk, chunk->word_size(),
  1.1967 +                          prev_chunk, chunk->next());
  1.1968 +          }
  1.1969 +        }
  1.1970 +        chunk->set_next(NULL);
  1.1971 +      } else {
  1.1972 +        if (TraceMetadataHumongousAllocation) {
  1.1973 +          tty->print_cr("ChunkManager::free_chunks_get: New humongous chunk of size "
  1.1974 +                        SIZE_FORMAT,
  1.1975 +                        word_size);
  1.1976 +        }
  1.1977 +      }
  1.1978 +  }
  1.1979 +  locked_verify();
  1.1980 +  return chunk;
  1.1981 +}
  1.1982 +
  1.1983 +Metachunk* ChunkManager::chunk_freelist_allocate(size_t word_size) {
  1.1984 +  assert_lock_strong(SpaceManager::expand_lock());
  1.1985 +  locked_verify();
  1.1986 +
  1.1987 +  // Take from the beginning of the list
  1.1988 +  Metachunk* chunk = free_chunks_get(word_size);
  1.1989 +  if (chunk == NULL) {
  1.1990 +    return NULL;
  1.1991 +  }
  1.1992 +
  1.1993 +  assert(word_size <= chunk->word_size() ||
  1.1994 +           SpaceManager::is_humongous(chunk->word_size()),
  1.1995 +           "Non-humongous variable sized chunk");
  1.1996 +  if (TraceMetadataChunkAllocation) {
  1.1997 +    tty->print("ChunkManager::chunk_freelist_allocate: chunk "
  1.1998 +               PTR_FORMAT "  size " SIZE_FORMAT " ",
  1.1999 +               chunk, chunk->word_size());
  1.2000 +    locked_print_free_chunks(tty);
  1.2001 +  }
  1.2002 +
  1.2003 +  return chunk;
  1.2004 +}
  1.2005 +
  1.2006 +// SpaceManager methods
  1.2007 +
  1.2008 +size_t SpaceManager::sum_free_in_chunks_in_use() const {
  1.2009 +  MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
  1.2010 +  size_t free = 0;
  1.2011 +  for (ChunkIndex i = SmallIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
  1.2012 +    Metachunk* chunk = chunks_in_use(i);
  1.2013 +    while (chunk != NULL) {
  1.2014 +      free += chunk->free_word_size();
  1.2015 +      chunk = chunk->next();
  1.2016 +    }
  1.2017 +  }
  1.2018 +  return free;
  1.2019 +}
  1.2020 +
  1.2021 +size_t SpaceManager::sum_waste_in_chunks_in_use() const {
  1.2022 +  MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
  1.2023 +  size_t result = 0;
  1.2024 +  for (ChunkIndex i = SmallIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
  1.2025 +   // Count the free space in all the chunk but not the
  1.2026 +   // current chunk from which allocations are still being done.
  1.2027 +   result += sum_waste_in_chunks_in_use(i);
  1.2028 +  }
  1.2029 +  return result;
  1.2030 +}
  1.2031 +
  1.2032 +size_t SpaceManager::sum_waste_in_chunks_in_use(ChunkIndex index) const {
  1.2033 +  size_t result = 0;
  1.2034 +  size_t count = 0;
  1.2035 +  Metachunk* chunk = chunks_in_use(index);
  1.2036 +  // Count the free space in all the chunk but not the
  1.2037 +  // current chunk from which allocations are still being done.
  1.2038 +  if (chunk != NULL) {
  1.2039 +    while (chunk != NULL) {
  1.2040 +      if (chunk != current_chunk()) {
  1.2041 +        result += chunk->free_word_size();
  1.2042 +      }
  1.2043 +      chunk = chunk->next();
  1.2044 +      count++;
  1.2045 +    }
  1.2046 +  }
  1.2047 +  return result;
  1.2048 +}
  1.2049 +
  1.2050 +size_t SpaceManager::sum_capacity_in_chunks_in_use() const {
  1.2051 +  MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
  1.2052 +  size_t sum = 0;
  1.2053 +  for (ChunkIndex i = SmallIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
  1.2054 +    Metachunk* chunk = chunks_in_use(i);
  1.2055 +    while (chunk != NULL) {
  1.2056 +      // Just changed this sum += chunk->capacity_word_size();
  1.2057 +      // sum += chunk->word_size() - Metachunk::overhead();
  1.2058 +      sum += chunk->capacity_word_size();
  1.2059 +      chunk = chunk->next();
  1.2060 +    }
  1.2061 +  }
  1.2062 +  return sum;
  1.2063 +}
  1.2064 +
  1.2065 +size_t SpaceManager::sum_count_in_chunks_in_use() {
  1.2066 +  size_t count = 0;
  1.2067 +  for (ChunkIndex i = SmallIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
  1.2068 +    count = count + sum_count_in_chunks_in_use(i);
  1.2069 +  }
  1.2070 +  return count;
  1.2071 +}
  1.2072 +
  1.2073 +size_t SpaceManager::sum_count_in_chunks_in_use(ChunkIndex i) {
  1.2074 +  size_t count = 0;
  1.2075 +  Metachunk* chunk = chunks_in_use(i);
  1.2076 +  while (chunk != NULL) {
  1.2077 +    count++;
  1.2078 +    chunk = chunk->next();
  1.2079 +  }
  1.2080 +  return count;
  1.2081 +}
  1.2082 +
  1.2083 +
  1.2084 +size_t SpaceManager::sum_used_in_chunks_in_use() const {
  1.2085 +  MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
  1.2086 +  size_t used = 0;
  1.2087 +  for (ChunkIndex i = SmallIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
  1.2088 +    Metachunk* chunk = chunks_in_use(i);
  1.2089 +    while (chunk != NULL) {
  1.2090 +      used += chunk->used_word_size();
  1.2091 +      chunk = chunk->next();
  1.2092 +    }
  1.2093 +  }
  1.2094 +  return used;
  1.2095 +}
  1.2096 +
  1.2097 +void SpaceManager::locked_print_chunks_in_use_on(outputStream* st) const {
  1.2098 +
  1.2099 +  Metachunk* small_chunk = chunks_in_use(SmallIndex);
  1.2100 +  st->print_cr("SpaceManager: small chunk " PTR_FORMAT
  1.2101 +               " free " SIZE_FORMAT,
  1.2102 +               small_chunk,
  1.2103 +               small_chunk->free_word_size());
  1.2104 +
  1.2105 +  Metachunk* medium_chunk = chunks_in_use(MediumIndex);
  1.2106 +  st->print("medium chunk " PTR_FORMAT, medium_chunk);
  1.2107 +  Metachunk* tail = current_chunk();
  1.2108 +  st->print_cr(" current chunk " PTR_FORMAT, tail);
  1.2109 +
  1.2110 +  Metachunk* head = chunks_in_use(HumongousIndex);
  1.2111 +  st->print_cr("humongous chunk " PTR_FORMAT, head);
  1.2112 +
  1.2113 +  vs_list()->chunk_manager()->locked_print_free_chunks(st);
  1.2114 +  vs_list()->chunk_manager()->locked_print_sum_free_chunks(st);
  1.2115 +}
  1.2116 +
  1.2117 +size_t SpaceManager::calc_chunk_size(size_t word_size) {
  1.2118 +
  1.2119 +  // Decide between a small chunk and a medium chunk.  Up to
  1.2120 +  // _small_chunk_limit small chunks can be allocated but
  1.2121 +  // once a medium chunk has been allocated, no more small
  1.2122 +  // chunks will be allocated.
  1.2123 +  size_t chunk_word_size;
  1.2124 +  if (chunks_in_use(MediumIndex) == NULL &&
  1.2125 +      (!has_small_chunk_limit() ||
  1.2126 +       sum_count_in_chunks_in_use(SmallIndex) < _small_chunk_limit)) {
  1.2127 +    chunk_word_size = (size_t) SpaceManager::SmallChunk;
  1.2128 +    if (word_size + Metachunk::overhead() > SpaceManager::SmallChunk) {
  1.2129 +      chunk_word_size = MediumChunk;
  1.2130 +    }
  1.2131 +  } else {
  1.2132 +    chunk_word_size = MediumChunk;
  1.2133 +  }
  1.2134 +
  1.2135 +  // Might still need a humongous chunk
  1.2136 +  chunk_word_size =
  1.2137 +    MAX2((size_t) chunk_word_size, word_size + Metachunk::overhead());
  1.2138 +
  1.2139 +  if (TraceMetadataHumongousAllocation &&
  1.2140 +      SpaceManager::is_humongous(word_size)) {
  1.2141 +    gclog_or_tty->print_cr("Metadata humongous allocation:");
  1.2142 +    gclog_or_tty->print_cr("  word_size " PTR_FORMAT, word_size);
  1.2143 +    gclog_or_tty->print_cr("  chunk_word_size " PTR_FORMAT,
  1.2144 +                           chunk_word_size);
  1.2145 +    gclog_or_tty->print_cr("    block overhead " PTR_FORMAT
  1.2146 +                           " chunk overhead " PTR_FORMAT,
  1.2147 +                           Metablock::overhead(),
  1.2148 +                           Metachunk::overhead());
  1.2149 +  }
  1.2150 +  return chunk_word_size;
  1.2151 +}
  1.2152 +
  1.2153 +Metablock* SpaceManager::grow_and_allocate(size_t word_size) {
  1.2154 +  assert(vs_list()->current_virtual_space() != NULL,
  1.2155 +         "Should have been set");
  1.2156 +  assert(current_chunk() == NULL ||
  1.2157 +         current_chunk()->allocate(word_size) == NULL,
  1.2158 +         "Don't need to expand");
  1.2159 +  MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
  1.2160 +
  1.2161 +  if (TraceMetadataChunkAllocation && Verbose) {
  1.2162 +    gclog_or_tty->print_cr("SpaceManager::grow_and_allocate for " SIZE_FORMAT
  1.2163 +                           " words " SIZE_FORMAT " space left",
  1.2164 +                            word_size, current_chunk() != NULL ?
  1.2165 +                              current_chunk()->free_word_size() : 0);
  1.2166 +  }
  1.2167 +
  1.2168 +  // Get another chunk out of the virtual space
  1.2169 +  size_t grow_chunks_by_words = calc_chunk_size(word_size);
  1.2170 +  Metachunk* next = vs_list()->get_new_chunk(word_size, grow_chunks_by_words);
  1.2171 +
  1.2172 +  // If a chunk was available, add it to the in-use chunk list
  1.2173 +  // and do an allocation from it.
  1.2174 +  if (next != NULL) {
  1.2175 +    Metadebug::deallocate_chunk_a_lot(this, grow_chunks_by_words);
  1.2176 +    // Add to this manager's list of chunks in use.
  1.2177 +    add_chunk(next, false);
  1.2178 +    return next->allocate(word_size);
  1.2179 +  }
  1.2180 +  return NULL;
  1.2181 +}
  1.2182 +
  1.2183 +void SpaceManager::print_on(outputStream* st) const {
  1.2184 +
  1.2185 +  for (ChunkIndex i = SmallIndex;
  1.2186 +       i < NumberOfFreeLists ;
  1.2187 +       i = next_chunk_index(i) ) {
  1.2188 +    st->print_cr("  chunks_in_use " PTR_FORMAT " chunk size " PTR_FORMAT,
  1.2189 +                 chunks_in_use(i),
  1.2190 +                 chunks_in_use(i) == NULL ? 0 : chunks_in_use(i)->word_size());
  1.2191 +  }
  1.2192 +  st->print_cr("    waste:  Small " SIZE_FORMAT " Medium " SIZE_FORMAT
  1.2193 +               " Humongous " SIZE_FORMAT,
  1.2194 +               sum_waste_in_chunks_in_use(SmallIndex),
  1.2195 +               sum_waste_in_chunks_in_use(MediumIndex),
  1.2196 +               sum_waste_in_chunks_in_use(HumongousIndex));
  1.2197 +  // Nothing in them yet
  1.2198 +  // block_freelists()->print_on(st);
  1.2199 +}
  1.2200 +
  1.2201 +SpaceManager::SpaceManager(Mutex* lock, VirtualSpaceList* vs_list) :
  1.2202 +  _vs_list(vs_list),
  1.2203 +  _allocation_total(0),
  1.2204 +  _lock(lock) {
  1.2205 +  Metadebug::init_allocation_fail_alot_count();
  1.2206 +  for (ChunkIndex i = SmallIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
  1.2207 +    _chunks_in_use[i] = NULL;
  1.2208 +  }
  1.2209 +  _current_chunk = NULL;
  1.2210 +  if (TraceMetadataChunkAllocation && Verbose) {
  1.2211 +    gclog_or_tty->print_cr("SpaceManager(): " PTR_FORMAT, this);
  1.2212 +  }
  1.2213 +}
  1.2214 +
  1.2215 +SpaceManager::~SpaceManager() {
  1.2216 +  MutexLockerEx fcl(SpaceManager::expand_lock(),
  1.2217 +                    Mutex::_no_safepoint_check_flag);
  1.2218 +
  1.2219 +  ChunkManager* chunk_manager = vs_list()->chunk_manager();
  1.2220 +
  1.2221 +  chunk_manager->locked_verify();
  1.2222 +
  1.2223 +  if (TraceMetadataChunkAllocation && Verbose) {
  1.2224 +    gclog_or_tty->print_cr("~SpaceManager(): " PTR_FORMAT, this);
  1.2225 +    locked_print_chunks_in_use_on(gclog_or_tty);
  1.2226 +  }
  1.2227 +
  1.2228 +  // Have to update before the chunks_in_use lists are emptied
  1.2229 +  // below.
  1.2230 +  chunk_manager->inc_free_chunks_total(sum_capacity_in_chunks_in_use(),
  1.2231 +                                       sum_count_in_chunks_in_use());
  1.2232 +
  1.2233 +#ifdef ASSERT
  1.2234 +  // Mangle freed memory.
  1.2235 +  mangle_freed_chunks();
  1.2236 +#endif // ASSERT
  1.2237 +
  1.2238 +  // Add all the chunks in use by this space manager
  1.2239 +  // to the global list of free chunks.
  1.2240 +
  1.2241 +  // Small chunks.  There is one _current_chunk for each
  1.2242 +  // Metaspace.  It could point to a small or medium chunk.
  1.2243 +  // Rather than determine which it is, follow the list of
  1.2244 +  // small chunks to add them to the free list
  1.2245 +  Metachunk* small_chunk = chunks_in_use(SmallIndex);
  1.2246 +  chunk_manager->free_small_chunks()->add_at_head(small_chunk);
  1.2247 +  set_chunks_in_use(SmallIndex, NULL);
  1.2248 +
  1.2249 +  // After the small chunk are the medium chunks
  1.2250 +  Metachunk* medium_chunk = chunks_in_use(MediumIndex);
  1.2251 +  assert(medium_chunk == NULL ||
  1.2252 +         medium_chunk->word_size() == MediumChunk,
  1.2253 +         "Chunk is on the wrong list");
  1.2254 +
  1.2255 +  if (medium_chunk != NULL) {
  1.2256 +    Metachunk* head = medium_chunk;
  1.2257 +    // If there is a medium chunk then the _current_chunk can only
  1.2258 +    // point to the last medium chunk.
  1.2259 +    Metachunk* tail = current_chunk();
  1.2260 +    chunk_manager->free_medium_chunks()->add_at_head(head, tail);
  1.2261 +    set_chunks_in_use(MediumIndex, NULL);
  1.2262 +  }
  1.2263 +
  1.2264 +  // Humongous chunks
  1.2265 +  // Humongous chunks are never the current chunk.
  1.2266 +  Metachunk* humongous_chunks = chunks_in_use(HumongousIndex);
  1.2267 +
  1.2268 +  if (humongous_chunks != NULL) {
  1.2269 +    chunk_manager->free_humongous_chunks()->add_at_head(humongous_chunks);
  1.2270 +    set_chunks_in_use(HumongousIndex, NULL);
  1.2271 +  }
  1.2272 +  chunk_manager->locked_verify();
  1.2273 +}
  1.2274 +
  1.2275 +void SpaceManager::deallocate(MetaWord* p) {
  1.2276 +  assert_lock_strong(_lock);
  1.2277 +  ShouldNotReachHere();  // Where is this needed.
  1.2278 +#ifdef DEALLOCATE_BLOCKS
  1.2279 +  Metablock* block = Metablock::metablock_from_data(p);
  1.2280 +  // This is expense but kept it until integration JJJ
  1.2281 +  assert(contains((address)block), "Block does not belong to this metaspace");
  1.2282 +  block_freelists()->return_block(block, word_size);
  1.2283 +#endif
  1.2284 +}
  1.2285 +
  1.2286 +// Adds a chunk to the list of chunks in use.
  1.2287 +void SpaceManager::add_chunk(Metachunk* new_chunk, bool make_current) {
  1.2288 +
  1.2289 +  assert(new_chunk != NULL, "Should not be NULL");
  1.2290 +  assert(new_chunk->next() == NULL, "Should not be on a list");
  1.2291 +
  1.2292 +  new_chunk->reset_empty();
  1.2293 +
  1.2294 +  // Find the correct list and and set the current
  1.2295 +  // chunk for that list.
  1.2296 +  switch (new_chunk->word_size()) {
  1.2297 +  case SpaceManager::SmallChunk :
  1.2298 +    if (chunks_in_use(SmallIndex) == NULL) {
  1.2299 +      // First chunk to add to the list
  1.2300 +      set_chunks_in_use(SmallIndex, new_chunk);
  1.2301 +    } else {
  1.2302 +      assert(current_chunk()->word_size() == SpaceManager::SmallChunk,
  1.2303 +        err_msg( "Incorrect mix of sizes in chunk list "
  1.2304 +        SIZE_FORMAT " new chunk " SIZE_FORMAT,
  1.2305 +        current_chunk()->word_size(), new_chunk->word_size()));
  1.2306 +      current_chunk()->set_next(new_chunk);
  1.2307 +    }
  1.2308 +    // Make current chunk
  1.2309 +    set_current_chunk(new_chunk);
  1.2310 +    break;
  1.2311 +  case SpaceManager::MediumChunk :
  1.2312 +    if (chunks_in_use(MediumIndex) == NULL) {
  1.2313 +      // About to add the first medium chunk so teminate the
  1.2314 +      // small chunk list.  In general once medium chunks are
  1.2315 +      // being added, we're past the need for small chunks.
  1.2316 +      if (current_chunk() != NULL) {
  1.2317 +        // Only a small chunk or the initial chunk could be
  1.2318 +        // the current chunk if this is the first medium chunk.
  1.2319 +        assert(current_chunk()->word_size() == SpaceManager::SmallChunk ||
  1.2320 +          chunks_in_use(SmallIndex) == NULL,
  1.2321 +          err_msg("Should be a small chunk or initial chunk, current chunk "
  1.2322 +          SIZE_FORMAT " new chunk " SIZE_FORMAT,
  1.2323 +          current_chunk()->word_size(), new_chunk->word_size()));
  1.2324 +        current_chunk()->set_next(NULL);
  1.2325 +      }
  1.2326 +      // First chunk to add to the list
  1.2327 +      set_chunks_in_use(MediumIndex, new_chunk);
  1.2328 +
  1.2329 +    } else {
  1.2330 +      // As a minimum the first medium chunk added would
  1.2331 +      // have become the _current_chunk
  1.2332 +      // so the _current_chunk has to be non-NULL here
  1.2333 +      // (although not necessarily still the first medium chunk).
  1.2334 +      assert(current_chunk()->word_size() == SpaceManager::MediumChunk,
  1.2335 +             "A medium chunk should the current chunk");
  1.2336 +      current_chunk()->set_next(new_chunk);
  1.2337 +    }
  1.2338 +    // Make current chunk
  1.2339 +    set_current_chunk(new_chunk);
  1.2340 +    break;
  1.2341 +  default: {
  1.2342 +    // For null class loader data and DumpSharedSpaces, the first chunk isn't
  1.2343 +    // small, so small will be null.  Link this first chunk as the current
  1.2344 +    // chunk.
  1.2345 +    if (make_current) {
  1.2346 +      // Set as the current chunk but otherwise treat as a humongous chunk.
  1.2347 +      set_current_chunk(new_chunk);
  1.2348 +    }
  1.2349 +    // Link at head.  The _current_chunk only points to a humongous chunk for
  1.2350 +    // the null class loader metaspace (class and data virtual space managers)
  1.2351 +    // any humongous chunks so will not point to the tail
  1.2352 +    // of the humongous chunks list.
  1.2353 +    new_chunk->set_next(chunks_in_use(HumongousIndex));
  1.2354 +    set_chunks_in_use(HumongousIndex, new_chunk);
  1.2355 +
  1.2356 +    assert(new_chunk->word_size() > MediumChunk, "List inconsistency");
  1.2357 +  }
  1.2358 +  }
  1.2359 +
  1.2360 +  assert(new_chunk->is_empty(), "Not ready for reuse");
  1.2361 +  if (TraceMetadataChunkAllocation && Verbose) {
  1.2362 +    gclog_or_tty->print("SpaceManager::add_chunk: %d) ",
  1.2363 +                        sum_count_in_chunks_in_use());
  1.2364 +    new_chunk->print_on(gclog_or_tty);
  1.2365 +    vs_list()->chunk_manager()->locked_print_free_chunks(tty);
  1.2366 +  }
  1.2367 +}
  1.2368 +
  1.2369 +MetaWord* SpaceManager::allocate(size_t word_size) {
  1.2370 +  MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
  1.2371 +
  1.2372 +  size_t block_overhead = Metablock::overhead();
  1.2373 +  // If only the dictionary is going to be used (i.e., no
  1.2374 +  // indexed free list), then there is a minimum size requirement.
  1.2375 +  // MinChunkSize is a placeholder for the real minimum size JJJ
  1.2376 +  size_t byte_size_with_overhead = (word_size + block_overhead) * BytesPerWord;
  1.2377 +#ifdef DEALLOCATE_BLOCKS
  1.2378 +  size_t raw_bytes_size = MAX2(ARENA_ALIGN(byte_size_with_overhead),
  1.2379 +                               MinChunkSize * BytesPerWord);
  1.2380 +#else
  1.2381 +  size_t raw_bytes_size = ARENA_ALIGN(byte_size_with_overhead);
  1.2382 +#endif
  1.2383 +  size_t raw_word_size = raw_bytes_size / BytesPerWord;
  1.2384 +  assert(raw_word_size * BytesPerWord == raw_bytes_size, "Size problem");
  1.2385 +
  1.2386 +  BlockFreelist* fl =  block_freelists();
  1.2387 +  Metablock* block = NULL;
  1.2388 +  // Allocation from the dictionary is expensive in the sense that
  1.2389 +  // the dictionary has to be searched for a size.  Don't allocate
  1.2390 +  // from the dictionary until it starts to get fat.  Is this
  1.2391 +  // a reasonable policy?  Maybe an skinny dictionary is fast enough
  1.2392 +  // for allocations.  Do some profiling.  JJJ
  1.2393 +  if (fl->totalSize() > allocation_from_dictionary_limit) {
  1.2394 +    block = fl->get_block(raw_word_size);
  1.2395 +  }
  1.2396 +  if (block == NULL) {
  1.2397 +    block = allocate_work(raw_word_size);
  1.2398 +    if (block == NULL) {
  1.2399 +      return NULL;
  1.2400 +    }
  1.2401 +  }
  1.2402 +  Metadebug::deallocate_block_a_lot(this, raw_word_size);
  1.2403 +
  1.2404 +  // Push the allocation past the word containing the size and leader.
  1.2405 +#ifdef ASSERT
  1.2406 +  MetaWord* result =  block->data();
  1.2407 +  return result;
  1.2408 +#else
  1.2409 +  return (MetaWord*) block;
  1.2410 +#endif
  1.2411 +}
  1.2412 +
  1.2413 +// Returns the address of spaced allocated for "word_size".
  1.2414 +// This methods does not know about blocks (Metablocks)
  1.2415 +Metablock* SpaceManager::allocate_work(size_t word_size) {
  1.2416 +  assert_lock_strong(_lock);
  1.2417 +#ifdef ASSERT
  1.2418 +  if (Metadebug::test_metadata_failure()) {
  1.2419 +    return NULL;
  1.2420 +  }
  1.2421 +#endif
  1.2422 +  // Is there space in the current chunk?
  1.2423 +  Metablock* result = NULL;
  1.2424 +
  1.2425 +  // For DumpSharedSpaces, only allocate out of the current chunk which is
  1.2426 +  // never null because we gave it the size we wanted.   Caller reports out
  1.2427 +  // of memory if this returns null.
  1.2428 +  if (DumpSharedSpaces) {
  1.2429 +    assert(current_chunk() != NULL, "should never happen");
  1.2430 +    inc_allocation_total(word_size);
  1.2431 +    return current_chunk()->allocate(word_size); // caller handles null result
  1.2432 +  }
  1.2433 +  if (current_chunk() != NULL) {
  1.2434 +    result = current_chunk()->allocate(word_size);
  1.2435 +  }
  1.2436 +
  1.2437 +  if (result == NULL) {
  1.2438 +    result = grow_and_allocate(word_size);
  1.2439 +  }
  1.2440 +  if (result > 0) {
  1.2441 +    inc_allocation_total(word_size);
  1.2442 +    assert(result != (Metablock*) chunks_in_use(MediumIndex), "Head of the list is being allocated");
  1.2443 +    assert(result->word_size() == word_size, "Size not set correctly");
  1.2444 +  }
  1.2445 +
  1.2446 +  return result;
  1.2447 +}
  1.2448 +
  1.2449 +void SpaceManager::verify() {
  1.2450 +  // If there are blocks in the dictionary, then
  1.2451 +  // verfication of chunks does not work since
  1.2452 +  // being in the dictionary alters a chunk.
  1.2453 +  if (block_freelists()->totalSize() == 0) {
  1.2454 +    // Skip the small chunks because their next link points to
  1.2455 +    // medium chunks.  This is because the small chunk is the
  1.2456 +    // current chunk (for allocations) until it is full and the
  1.2457 +    // the addition of the next chunk does not NULL the next
  1.2458 +    // like of the small chunk.
  1.2459 +    for (ChunkIndex i = MediumIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
  1.2460 +      Metachunk* curr = chunks_in_use(i);
  1.2461 +      while (curr != NULL) {
  1.2462 +        curr->verify();
  1.2463 +        curr = curr->next();
  1.2464 +      }
  1.2465 +    }
  1.2466 +  }
  1.2467 +}
  1.2468 +
  1.2469 +#ifdef ASSERT
  1.2470 +void SpaceManager::verify_allocation_total() {
  1.2471 +#if 0
  1.2472 +  // Verification is only guaranteed at a safepoint.
  1.2473 +  if (SafepointSynchronize::is_at_safepoint()) {
  1.2474 +    gclog_or_tty->print_cr("Chunk " PTR_FORMAT " allocation_total " SIZE_FORMAT
  1.2475 +                           " sum_used_in_chunks_in_use " SIZE_FORMAT,
  1.2476 +                           this,
  1.2477 +                           allocation_total(),
  1.2478 +                           sum_used_in_chunks_in_use());
  1.2479 +  }
  1.2480 +  MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
  1.2481 +  assert(allocation_total() == sum_used_in_chunks_in_use(),
  1.2482 +    err_msg("allocation total is not consistent %d vs %d",
  1.2483 +            allocation_total(), sum_used_in_chunks_in_use()));
  1.2484 +#endif
  1.2485 +}
  1.2486 +
  1.2487 +#endif
  1.2488 +
  1.2489 +void SpaceManager::dump(outputStream* const out) const {
  1.2490 +  size_t curr_total = 0;
  1.2491 +  size_t waste = 0;
  1.2492 +  uint i = 0;
  1.2493 +  size_t used = 0;
  1.2494 +  size_t capacity = 0;
  1.2495 +
  1.2496 +  // Add up statistics for all chunks in this SpaceManager.
  1.2497 +  for (ChunkIndex index = SmallIndex;
  1.2498 +       index < NumberOfFreeLists;
  1.2499 +       index = next_chunk_index(index)) {
  1.2500 +    for (Metachunk* curr = chunks_in_use(index);
  1.2501 +         curr != NULL;
  1.2502 +         curr = curr->next()) {
  1.2503 +      out->print("%d) ", i++);
  1.2504 +      curr->print_on(out);
  1.2505 +      if (TraceMetadataChunkAllocation && Verbose) {
  1.2506 +        block_freelists()->print_on(out);
  1.2507 +      }
  1.2508 +      curr_total += curr->word_size();
  1.2509 +      used += curr->used_word_size();
  1.2510 +      capacity += curr->capacity_word_size();
  1.2511 +      waste += curr->free_word_size() + curr->overhead();;
  1.2512 +    }
  1.2513 +  }
  1.2514 +
  1.2515 +  size_t free = current_chunk()->free_word_size();
  1.2516 +  // Free space isn't wasted.
  1.2517 +  waste -= free;
  1.2518 +
  1.2519 +  out->print_cr("total of all chunks "  SIZE_FORMAT " used " SIZE_FORMAT
  1.2520 +                " free " SIZE_FORMAT " capacity " SIZE_FORMAT
  1.2521 +                " waste " SIZE_FORMAT, curr_total, used, free, capacity, waste);
  1.2522 +}
  1.2523 +
  1.2524 +#ifndef PRODUCT
  1.2525 +void SpaceManager::mangle_freed_chunks() {
  1.2526 +  for (ChunkIndex index = SmallIndex;
  1.2527 +       index < NumberOfFreeLists;
  1.2528 +       index = next_chunk_index(index)) {
  1.2529 +    for (Metachunk* curr = chunks_in_use(index);
  1.2530 +         curr != NULL;
  1.2531 +         curr = curr->next()) {
  1.2532 +      // Try to detect incorrectly terminated small chunk
  1.2533 +      // list.
  1.2534 +      assert(index == MediumIndex || curr != chunks_in_use(MediumIndex),
  1.2535 +             err_msg("Mangling medium chunks in small chunks? "
  1.2536 +                     "curr " PTR_FORMAT " medium list " PTR_FORMAT,
  1.2537 +                     curr, chunks_in_use(MediumIndex)));
  1.2538 +      curr->mangle();
  1.2539 +    }
  1.2540 +  }
  1.2541 +}
  1.2542 +#endif // PRODUCT
  1.2543 +
  1.2544 +
  1.2545 +// MetaspaceAux
  1.2546 +
  1.2547 +size_t MetaspaceAux::used_in_bytes(Metaspace::MetadataType mdtype) {
  1.2548 +  size_t used = 0;
  1.2549 +#ifdef ASSERT
  1.2550 +  size_t free = 0;
  1.2551 +  size_t capacity = 0;
  1.2552 +#endif
  1.2553 +  ClassLoaderDataGraphMetaspaceIterator iter;
  1.2554 +  while (iter.repeat()) {
  1.2555 +    Metaspace* msp = iter.get_next();
  1.2556 +    // Sum allocation_total for each metaspace
  1.2557 +    if (msp != NULL) {
  1.2558 +      used += msp->used_words(mdtype);
  1.2559 +#ifdef ASSERT
  1.2560 +      free += msp->free_words(mdtype);
  1.2561 +      capacity += msp->capacity_words(mdtype);
  1.2562 +      assert(used + free == capacity,
  1.2563 +        err_msg("Accounting is wrong used " SIZE_FORMAT
  1.2564 +                " free " SIZE_FORMAT " capacity " SIZE_FORMAT,
  1.2565 +                used, free, capacity));
  1.2566 +#endif
  1.2567 +    }
  1.2568 +  }
  1.2569 +  return used * BytesPerWord;
  1.2570 +}
  1.2571 +
  1.2572 +size_t MetaspaceAux::free_in_bytes(Metaspace::MetadataType mdtype) {
  1.2573 +  size_t free = 0;
  1.2574 +  ClassLoaderDataGraphMetaspaceIterator iter;
  1.2575 +  while (iter.repeat()) {
  1.2576 +    Metaspace* msp = iter.get_next();
  1.2577 +    if (msp != NULL) {
  1.2578 +      free += msp->free_words(mdtype);
  1.2579 +    }
  1.2580 +  }
  1.2581 +  return free * BytesPerWord;
  1.2582 +}
  1.2583 +
  1.2584 +// The total words available for metadata allocation.  This
  1.2585 +// uses Metaspace capacity_words() which is the total words
  1.2586 +// in chunks allocated for a Metaspace.
  1.2587 +size_t MetaspaceAux::capacity_in_bytes(Metaspace::MetadataType mdtype) {
  1.2588 +  size_t capacity = free_chunks_total(mdtype);
  1.2589 +  ClassLoaderDataGraphMetaspaceIterator iter;
  1.2590 +  while (iter.repeat()) {
  1.2591 +    Metaspace* msp = iter.get_next();
  1.2592 +    if (msp != NULL) {
  1.2593 +      capacity += msp->capacity_words(mdtype);
  1.2594 +    }
  1.2595 +  }
  1.2596 +  return capacity * BytesPerWord;
  1.2597 +}
  1.2598 +
  1.2599 +size_t MetaspaceAux::reserved_in_bytes(Metaspace::MetadataType mdtype) {
  1.2600 +  size_t reserved = (mdtype == Metaspace::ClassType) ?
  1.2601 +                       Metaspace::class_space_list()->virtual_space_total() :
  1.2602 +                       Metaspace::space_list()->virtual_space_total();
  1.2603 +  return reserved * BytesPerWord;
  1.2604 +}
  1.2605 +
  1.2606 +size_t MetaspaceAux::min_chunk_size() { return SpaceManager::MediumChunk; }
  1.2607 +
  1.2608 +size_t MetaspaceAux::free_chunks_total(Metaspace::MetadataType mdtype) {
  1.2609 +  ChunkManager* chunk = (mdtype == Metaspace::ClassType) ?
  1.2610 +                            Metaspace::class_space_list()->chunk_manager() :
  1.2611 +                            Metaspace::space_list()->chunk_manager();
  1.2612 +
  1.2613 +  chunk->verify_free_chunks_total();
  1.2614 +  return chunk->free_chunks_total();
  1.2615 +}
  1.2616 +
  1.2617 +size_t MetaspaceAux::free_chunks_total_in_bytes(Metaspace::MetadataType mdtype) {
  1.2618 +  return free_chunks_total(mdtype) * BytesPerWord;
  1.2619 +}
  1.2620 +
  1.2621 +void MetaspaceAux::print_metaspace_change(size_t prev_metadata_used) {
  1.2622 +  gclog_or_tty->print(", [Metaspace:");
  1.2623 +  if (PrintGCDetails && Verbose) {
  1.2624 +    gclog_or_tty->print(" "  SIZE_FORMAT
  1.2625 +                        "->" SIZE_FORMAT
  1.2626 +                        "("  SIZE_FORMAT "/" SIZE_FORMAT ")",
  1.2627 +                        prev_metadata_used,
  1.2628 +                        used_in_bytes(),
  1.2629 +                        capacity_in_bytes(),
  1.2630 +                        reserved_in_bytes());
  1.2631 +  } else {
  1.2632 +    gclog_or_tty->print(" "  SIZE_FORMAT "K"
  1.2633 +                        "->" SIZE_FORMAT "K"
  1.2634 +                        "("  SIZE_FORMAT "K/" SIZE_FORMAT "K)",
  1.2635 +                        prev_metadata_used / K,
  1.2636 +                        used_in_bytes()/ K,
  1.2637 +                        capacity_in_bytes()/K,
  1.2638 +                        reserved_in_bytes()/ K);
  1.2639 +  }
  1.2640 +
  1.2641 +  gclog_or_tty->print("]");
  1.2642 +}
  1.2643 +
  1.2644 +// This is printed when PrintGCDetails
  1.2645 +void MetaspaceAux::print_on(outputStream* out) {
  1.2646 +  Metaspace::MetadataType ct = Metaspace::ClassType;
  1.2647 +  Metaspace::MetadataType nct = Metaspace::NonClassType;
  1.2648 +
  1.2649 +  out->print_cr(" Metaspace total "
  1.2650 +                SIZE_FORMAT "K, used " SIZE_FORMAT "K,"
  1.2651 +                " reserved " SIZE_FORMAT "K",
  1.2652 +                capacity_in_bytes()/K, used_in_bytes()/K, reserved_in_bytes()/K);
  1.2653 +  out->print_cr("  data space     "
  1.2654 +                SIZE_FORMAT "K, used " SIZE_FORMAT "K,"
  1.2655 +                " reserved " SIZE_FORMAT "K",
  1.2656 +                capacity_in_bytes(nct)/K, used_in_bytes(nct)/K, reserved_in_bytes(nct)/K);
  1.2657 +  out->print_cr("  class space    "
  1.2658 +                SIZE_FORMAT "K, used " SIZE_FORMAT "K,"
  1.2659 +                " reserved " SIZE_FORMAT "K",
  1.2660 +                capacity_in_bytes(ct)/K, used_in_bytes(ct)/K, reserved_in_bytes(ct)/K);
  1.2661 +}
  1.2662 +
  1.2663 +// Print information for class space and data space separately.
  1.2664 +// This is almost the same as above.
  1.2665 +void MetaspaceAux::print_on(outputStream* out, Metaspace::MetadataType mdtype) {
  1.2666 +  size_t free_chunks_capacity_bytes = free_chunks_total_in_bytes(mdtype);
  1.2667 +  size_t capacity_bytes = capacity_in_bytes(mdtype);
  1.2668 +  size_t used_bytes = used_in_bytes(mdtype);
  1.2669 +  size_t free_bytes = free_in_bytes(mdtype);
  1.2670 +  size_t used_and_free = used_bytes + free_bytes +
  1.2671 +                           free_chunks_capacity_bytes;
  1.2672 +  out->print_cr("  Chunk accounting: used in chunks " SIZE_FORMAT
  1.2673 +             "K + unused in chunks " SIZE_FORMAT "K  + "
  1.2674 +             " capacity in free chunks " SIZE_FORMAT "K = " SIZE_FORMAT
  1.2675 +             "K  capacity in allocated chunks " SIZE_FORMAT "K",
  1.2676 +             used_bytes / K,
  1.2677 +             free_bytes / K,
  1.2678 +             free_chunks_capacity_bytes / K,
  1.2679 +             used_and_free / K,
  1.2680 +             capacity_bytes / K);
  1.2681 +  assert(used_and_free == capacity_bytes, "Accounting is wrong");
  1.2682 +}
  1.2683 +
  1.2684 +// Print total fragmentation for class and data metaspaces separately
  1.2685 +void MetaspaceAux::print_waste(outputStream* out) {
  1.2686 +
  1.2687 +  size_t small_waste = 0, medium_waste = 0, large_waste = 0;
  1.2688 +  size_t cls_small_waste = 0, cls_medium_waste = 0, cls_large_waste = 0;
  1.2689 +
  1.2690 +  ClassLoaderDataGraphMetaspaceIterator iter;
  1.2691 +  while (iter.repeat()) {
  1.2692 +    Metaspace* msp = iter.get_next();
  1.2693 +    if (msp != NULL) {
  1.2694 +      small_waste += msp->vsm()->sum_waste_in_chunks_in_use(SmallIndex);
  1.2695 +      medium_waste += msp->vsm()->sum_waste_in_chunks_in_use(MediumIndex);
  1.2696 +      large_waste += msp->vsm()->sum_waste_in_chunks_in_use(HumongousIndex);
  1.2697 +
  1.2698 +      cls_small_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SmallIndex);
  1.2699 +      cls_medium_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(MediumIndex);
  1.2700 +      cls_large_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(HumongousIndex);
  1.2701 +    }
  1.2702 +  }
  1.2703 +  out->print_cr("Total fragmentation waste (words) doesn't count free space");
  1.2704 +  out->print("  data: small " SIZE_FORMAT " medium " SIZE_FORMAT,
  1.2705 +             small_waste, medium_waste);
  1.2706 +  out->print_cr(" class: small " SIZE_FORMAT, cls_small_waste);
  1.2707 +}
  1.2708 +
  1.2709 +// Dump global metaspace things from the end of ClassLoaderDataGraph
  1.2710 +void MetaspaceAux::dump(outputStream* out) {
  1.2711 +  out->print_cr("All Metaspace:");
  1.2712 +  out->print("data space: "); print_on(out, Metaspace::NonClassType);
  1.2713 +  out->print("class space: "); print_on(out, Metaspace::ClassType);
  1.2714 +  print_waste(out);
  1.2715 +}
  1.2716 +
  1.2717 +// Metaspace methods
  1.2718 +
  1.2719 +size_t Metaspace::_first_chunk_word_size = 0;
  1.2720 +
  1.2721 +Metaspace::Metaspace(Mutex* lock, size_t word_size) {
  1.2722 +  initialize(lock, word_size);
  1.2723 +}
  1.2724 +
  1.2725 +Metaspace::Metaspace(Mutex* lock) {
  1.2726 +  initialize(lock);
  1.2727 +}
  1.2728 +
  1.2729 +Metaspace::~Metaspace() {
  1.2730 +  delete _vsm;
  1.2731 +  delete _class_vsm;
  1.2732 +}
  1.2733 +
  1.2734 +VirtualSpaceList* Metaspace::_space_list = NULL;
  1.2735 +VirtualSpaceList* Metaspace::_class_space_list = NULL;
  1.2736 +
  1.2737 +#define VIRTUALSPACEMULTIPLIER 2
  1.2738 +
  1.2739 +void Metaspace::global_initialize() {
  1.2740 +  // Initialize the alignment for shared spaces.
  1.2741 +  int max_alignment = os::vm_page_size();
  1.2742 +  MetaspaceShared::set_max_alignment(max_alignment);
  1.2743 +
  1.2744 +  if (DumpSharedSpaces) {
  1.2745 +    SharedReadOnlySize = align_size_up(SharedReadOnlySize, max_alignment);
  1.2746 +    SharedReadWriteSize = align_size_up(SharedReadWriteSize, max_alignment);
  1.2747 +    SharedMiscDataSize  = align_size_up(SharedMiscDataSize, max_alignment);
  1.2748 +    SharedMiscCodeSize  = align_size_up(SharedMiscCodeSize, max_alignment);
  1.2749 +
  1.2750 +    // Initialize with the sum of the shared space sizes.  The read-only
  1.2751 +    // and read write metaspace chunks will be allocated out of this and the
  1.2752 +    // remainder is the misc code and data chunks.
  1.2753 +    size_t total = align_size_up(SharedReadOnlySize + SharedReadWriteSize +
  1.2754 +                                 SharedMiscDataSize + SharedMiscCodeSize,
  1.2755 +                                 os::vm_allocation_granularity());
  1.2756 +    size_t word_size = total/wordSize;
  1.2757 +    _space_list = new VirtualSpaceList(word_size);
  1.2758 +  } else {
  1.2759 +    // If using shared space, open the file that contains the shared space
  1.2760 +    // and map in the memory before initializing the rest of metaspace (so
  1.2761 +    // the addresses don't conflict)
  1.2762 +    if (UseSharedSpaces) {
  1.2763 +      FileMapInfo* mapinfo = new FileMapInfo();
  1.2764 +      memset(mapinfo, 0, sizeof(FileMapInfo));
  1.2765 +
  1.2766 +      // Open the shared archive file, read and validate the header. If
  1.2767 +      // initialization fails, shared spaces [UseSharedSpaces] are
  1.2768 +      // disabled and the file is closed.
  1.2769 +      // Map in spaces now also
  1.2770 +      if (mapinfo->initialize() && MetaspaceShared::map_shared_spaces(mapinfo)) {
  1.2771 +        FileMapInfo::set_current_info(mapinfo);
  1.2772 +      } else {
  1.2773 +        assert(!mapinfo->is_open() && !UseSharedSpaces,
  1.2774 +               "archive file not closed or shared spaces not disabled.");
  1.2775 +      }
  1.2776 +    }
  1.2777 +
  1.2778 +    // Initialize this before initializing the VirtualSpaceList
  1.2779 +    _first_chunk_word_size = InitialBootClassLoaderMetaspaceSize / BytesPerWord;
  1.2780 +    // Arbitrarily set the initial virtual space to a multiple
  1.2781 +    // of the boot class loader size.
  1.2782 +    size_t word_size = VIRTUALSPACEMULTIPLIER * Metaspace::first_chunk_word_size();
  1.2783 +    // Initialize the list of virtual spaces.
  1.2784 +    _space_list = new VirtualSpaceList(word_size);
  1.2785 +  }
  1.2786 +}
  1.2787 +
  1.2788 +// For UseCompressedKlassPointers the class space is reserved as a piece of the
  1.2789 +// Java heap because the compression algorithm is the same for each.  The
  1.2790 +// argument passed in is at the top of the compressed space
  1.2791 +void Metaspace::initialize_class_space(ReservedSpace rs) {
  1.2792 +  // The reserved space size may be bigger because of alignment, esp with UseLargePages
  1.2793 +  assert(rs.size() >= ClassMetaspaceSize, err_msg("%d != %d", rs.size(), ClassMetaspaceSize));
  1.2794 +  _class_space_list = new VirtualSpaceList(rs);
  1.2795 +}
  1.2796 +
  1.2797 +// Class space probably needs a lot less than data space
  1.2798 +const int class_space_divisor = 4;
  1.2799 +
  1.2800 +void Metaspace::initialize(Mutex* lock, size_t initial_size) {
  1.2801 +  // Use SmallChunk size if not specified, adjust class to smaller size if so.
  1.2802 +  size_t word_size;
  1.2803 +  size_t class_word_size;
  1.2804 +  if (initial_size == 0) {
  1.2805 +    word_size = (size_t) SpaceManager::SmallChunk;
  1.2806 +    class_word_size = word_size;
  1.2807 +  } else {
  1.2808 +    word_size = initial_size;
  1.2809 +    class_word_size = initial_size/class_space_divisor;
  1.2810 +  }
  1.2811 +
  1.2812 +  assert(space_list() != NULL,
  1.2813 +    "Metadata VirtualSpaceList has not been initialized");
  1.2814 +
  1.2815 +  _vsm = new SpaceManager(lock, space_list());
  1.2816 +  if (_vsm == NULL) {
  1.2817 +    return;
  1.2818 +  }
  1.2819 +
  1.2820 +  assert(class_space_list() != NULL,
  1.2821 +    "Class VirtualSpaceList has not been initialized");
  1.2822 +
  1.2823 +  // Allocate SpaceManager for classes.
  1.2824 +  _class_vsm = new SpaceManager(lock, class_space_list());
  1.2825 +  if (_class_vsm == NULL) {
  1.2826 +    return;
  1.2827 +  }
  1.2828 +
  1.2829 +  MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
  1.2830 +
  1.2831 +  // Allocate chunk for metadata objects
  1.2832 +  Metachunk* new_chunk =
  1.2833 +     space_list()->current_virtual_space()->get_chunk_vs_with_expand(word_size);
  1.2834 +  assert(!DumpSharedSpaces || new_chunk != NULL, "should have enough space for both chunks");
  1.2835 +  if (new_chunk != NULL) {
  1.2836 +    // Add to this manager's list of chunks in use and current_chunk().
  1.2837 +    vsm()->add_chunk(new_chunk, true);
  1.2838 +  }
  1.2839 +
  1.2840 +  // Allocate chunk for class metadata objects
  1.2841 +  Metachunk* class_chunk =
  1.2842 +     class_space_list()->current_virtual_space()->get_chunk_vs_with_expand(class_word_size);
  1.2843 +  if (class_chunk != NULL) {
  1.2844 +    class_vsm()->add_chunk(class_chunk, true);
  1.2845 +  }
  1.2846 +}
  1.2847 +
  1.2848 +
  1.2849 +MetaWord* Metaspace::allocate(size_t word_size, MetadataType mdtype) {
  1.2850 +  // DumpSharedSpaces doesn't use class metadata area (yet)
  1.2851 +  if (mdtype == ClassType && !DumpSharedSpaces) {
  1.2852 +    return class_vsm()->allocate(word_size);
  1.2853 +  } else {
  1.2854 +    return vsm()->allocate(word_size);
  1.2855 +  }
  1.2856 +}
  1.2857 +
  1.2858 +// Space allocated in the Metaspace.  This may
  1.2859 +// be across several metadata virtual spaces.
  1.2860 +char* Metaspace::bottom() const {
  1.2861 +  assert(DumpSharedSpaces, "only useful and valid for dumping shared spaces");
  1.2862 +  return (char*)vsm()->current_chunk()->bottom();
  1.2863 +}
  1.2864 +
  1.2865 +size_t Metaspace::used_words(MetadataType mdtype) const {
  1.2866 +  // return vsm()->allocation_total();
  1.2867 +  return mdtype == ClassType ? class_vsm()->sum_used_in_chunks_in_use() :
  1.2868 +                               vsm()->sum_used_in_chunks_in_use();  // includes overhead!
  1.2869 +}
  1.2870 +
  1.2871 +size_t Metaspace::free_words(MetadataType mdtype) const {
  1.2872 +  return mdtype == ClassType ? class_vsm()->sum_free_in_chunks_in_use() :
  1.2873 +                               vsm()->sum_free_in_chunks_in_use();
  1.2874 +}
  1.2875 +
  1.2876 +// Space capacity in the Metaspace.  It includes
  1.2877 +// space in the list of chunks from which allocations
  1.2878 +// have been made. Don't include space in the global freelist and
  1.2879 +// in the space available in the dictionary which
  1.2880 +// is already counted in some chunk.
  1.2881 +size_t Metaspace::capacity_words(MetadataType mdtype) const {
  1.2882 +  return mdtype == ClassType ? class_vsm()->sum_capacity_in_chunks_in_use() :
  1.2883 +                               vsm()->sum_capacity_in_chunks_in_use();
  1.2884 +}
  1.2885 +
  1.2886 +void Metaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) {
  1.2887 +  if (SafepointSynchronize::is_at_safepoint()) {
  1.2888 +    assert(Thread::current()->is_VM_thread(), "should be the VM thread");
  1.2889 +    // Don't take lock
  1.2890 +#ifdef DEALLOCATE_BLOCKS
  1.2891 +    if (is_class) {
  1.2892 +      class_vsm()->deallocate(ptr);
  1.2893 +    } else {
  1.2894 +      vsm()->deallocate(ptr);
  1.2895 +    }
  1.2896 +#else
  1.2897 +#ifdef ASSERT
  1.2898 +    Copy::fill_to_words((HeapWord*)ptr, word_size, metadata_deallocate);
  1.2899 +#endif
  1.2900 +#endif
  1.2901 +
  1.2902 +  } else {
  1.2903 +    MutexLocker ml(vsm()->lock());
  1.2904 +
  1.2905 +#ifdef DEALLOCATE_BLOCKS
  1.2906 +    if (is_class) {
  1.2907 +      class_vsm()->deallocate(ptr);
  1.2908 +    } else {
  1.2909 +      vsm()->deallocate(ptr);
  1.2910 +    }
  1.2911 +#else
  1.2912 +#ifdef ASSERT
  1.2913 +    Copy::fill_to_words((HeapWord*)ptr, word_size, metadata_deallocate);
  1.2914 +#endif
  1.2915 +#endif
  1.2916 +  }
  1.2917 +}
  1.2918 +
  1.2919 +MetaWord* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
  1.2920 +                              bool read_only, MetadataType mdtype, TRAPS) {
  1.2921 +  if (HAS_PENDING_EXCEPTION) {
  1.2922 +    assert(false, "Should not allocate with exception pending");
  1.2923 +    return NULL;  // caller does a CHECK_NULL too
  1.2924 +  }
  1.2925 +
  1.2926 +  // SSS: Should we align the allocations and make sure the sizes are aligned.
  1.2927 +  MetaWord* result = NULL;
  1.2928 +
  1.2929 +  assert(loader_data != NULL, "Should never pass around a NULL loader_data. "
  1.2930 +        "ClassLoaderData::the_null_class_loader_data() should have been used.");
  1.2931 +  // Allocate in metaspaces without taking out a lock, because it deadlocks
  1.2932 +  // with the SymbolTable_lock.  Dumping is single threaded for now.  We'll have
  1.2933 +  // to revisit this for application class data sharing.
  1.2934 +  if (DumpSharedSpaces) {
  1.2935 +    if (read_only) {
  1.2936 +      result = loader_data->ro_metaspace()->allocate(word_size, NonClassType);
  1.2937 +    } else {
  1.2938 +      result = loader_data->rw_metaspace()->allocate(word_size, NonClassType);
  1.2939 +    }
  1.2940 +    if (result == NULL) {
  1.2941 +      report_out_of_shared_space(read_only ? SharedReadOnly : SharedReadWrite);
  1.2942 +    }
  1.2943 +    return result;
  1.2944 +  }
  1.2945 +
  1.2946 +  result = loader_data->metaspace_non_null()->allocate(word_size, mdtype);
  1.2947 +
  1.2948 +  if (result == NULL) {
  1.2949 +    // Try to clean out some memory and retry.
  1.2950 +    result =
  1.2951 +    Universe::heap()->collector_policy()->satisfy_failed_metadata_allocation(
  1.2952 +        loader_data, word_size, mdtype);
  1.2953 +
  1.2954 +    // If result is still null, we are out of memory.
  1.2955 +    if (result == NULL) {
  1.2956 +      // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
  1.2957 +      report_java_out_of_memory("Metadata space");
  1.2958 +
  1.2959 +      if (JvmtiExport::should_post_resource_exhausted()) {
  1.2960 +        JvmtiExport::post_resource_exhausted(
  1.2961 +            JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR,
  1.2962 +            "Metadata space");
  1.2963 +      }
  1.2964 +      THROW_OOP_0(Universe::out_of_memory_error_perm_gen());
  1.2965 +    }
  1.2966 +  }
  1.2967 +  return result;
  1.2968 +}
  1.2969 +
  1.2970 +void Metaspace::print_on(outputStream* out) const {
  1.2971 +  // Print both class virtual space counts and metaspace.
  1.2972 +  if (Verbose) {
  1.2973 +      vsm()->print_on(out);
  1.2974 +      class_vsm()->print_on(out);
  1.2975 +  }
  1.2976 +}
  1.2977 +
  1.2978 +#ifndef PRODUCT
  1.2979 +bool Metaspace::contains(const void * ptr) const {
  1.2980 +  if (MetaspaceShared::is_in_shared_space(ptr)) {
  1.2981 +    return true;
  1.2982 +  }
  1.2983 +  MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
  1.2984 +  return space_list()->contains(ptr) || class_space_list()->contains(ptr);
  1.2985 +}
  1.2986 +#endif
  1.2987 +
  1.2988 +void Metaspace::verify() {
  1.2989 +  vsm()->verify();
  1.2990 +  class_vsm()->verify();
  1.2991 +}
  1.2992 +
  1.2993 +void Metaspace::dump(outputStream* const out) const {
  1.2994 +  if (UseMallocOnly) {
  1.2995 +    // Just print usage for now
  1.2996 +    out->print_cr("usage %d", used_words(Metaspace::NonClassType));
  1.2997 +  }
  1.2998 +  out->print_cr("\nVirtual space manager: " INTPTR_FORMAT, vsm());
  1.2999 +  vsm()->dump(out);
  1.3000 +  out->print_cr("\nClass space manager: " INTPTR_FORMAT, class_vsm());
  1.3001 +  class_vsm()->dump(out);
  1.3002 +}

mercurial