src/share/vm/memory/metaspace.cpp

Fri, 13 Sep 2013 22:25:27 +0200

author
stefank
date
Fri, 13 Sep 2013 22:25:27 +0200
changeset 5708
8c5e6482cbfc
parent 5707
8227700da288
child 5770
9361de86a50f
permissions
-rw-r--r--

8024752: Log TraceMetadata* output to gclog_or_tty instead of tty
Reviewed-by: brutisso, mgerdin, coleenp

     1 /*
     2  * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    24 #include "precompiled.hpp"
    25 #include "gc_interface/collectedHeap.hpp"
    26 #include "memory/binaryTreeDictionary.hpp"
    27 #include "memory/freeList.hpp"
    28 #include "memory/collectorPolicy.hpp"
    29 #include "memory/filemap.hpp"
    30 #include "memory/freeList.hpp"
    31 #include "memory/metablock.hpp"
    32 #include "memory/metachunk.hpp"
    33 #include "memory/metaspace.hpp"
    34 #include "memory/metaspaceShared.hpp"
    35 #include "memory/resourceArea.hpp"
    36 #include "memory/universe.hpp"
    37 #include "runtime/globals.hpp"
    38 #include "runtime/java.hpp"
    39 #include "runtime/mutex.hpp"
    40 #include "runtime/orderAccess.hpp"
    41 #include "services/memTracker.hpp"
    42 #include "utilities/copy.hpp"
    43 #include "utilities/debug.hpp"
    45 typedef BinaryTreeDictionary<Metablock, FreeList> BlockTreeDictionary;
    46 typedef BinaryTreeDictionary<Metachunk, FreeList> ChunkTreeDictionary;
    47 // Define this macro to enable slow integrity checking of
    48 // the free chunk lists
    49 const bool metaspace_slow_verify = false;
    51 // Parameters for stress mode testing
    52 const uint metadata_deallocate_a_lot_block = 10;
    53 const uint metadata_deallocate_a_lock_chunk = 3;
    54 size_t const allocation_from_dictionary_limit = 4 * K;
    56 MetaWord* last_allocated = 0;
    58 size_t Metaspace::_class_metaspace_size;
    60 // Used in declarations in SpaceManager and ChunkManager
    61 enum ChunkIndex {
    62   ZeroIndex = 0,
    63   SpecializedIndex = ZeroIndex,
    64   SmallIndex = SpecializedIndex + 1,
    65   MediumIndex = SmallIndex + 1,
    66   HumongousIndex = MediumIndex + 1,
    67   NumberOfFreeLists = 3,
    68   NumberOfInUseLists = 4
    69 };
    71 enum ChunkSizes {    // in words.
    72   ClassSpecializedChunk = 128,
    73   SpecializedChunk = 128,
    74   ClassSmallChunk = 256,
    75   SmallChunk = 512,
    76   ClassMediumChunk = 4 * K,
    77   MediumChunk = 8 * K,
    78   HumongousChunkGranularity = 8
    79 };
    81 static ChunkIndex next_chunk_index(ChunkIndex i) {
    82   assert(i < NumberOfInUseLists, "Out of bound");
    83   return (ChunkIndex) (i+1);
    84 }
    86 // Originally _capacity_until_GC was set to MetaspaceSize here but
    87 // the default MetaspaceSize before argument processing was being
    88 // used which was not the desired value.  See the code
    89 // in should_expand() to see how the initialization is handled
    90 // now.
    91 size_t MetaspaceGC::_capacity_until_GC = 0;
    92 bool MetaspaceGC::_expand_after_GC = false;
    93 uint MetaspaceGC::_shrink_factor = 0;
    94 bool MetaspaceGC::_should_concurrent_collect = false;
    96 // Blocks of space for metadata are allocated out of Metachunks.
    97 //
    98 // Metachunk are allocated out of MetadataVirtualspaces and once
    99 // allocated there is no explicit link between a Metachunk and
   100 // the MetadataVirtualspaces from which it was allocated.
   101 //
   102 // Each SpaceManager maintains a
   103 // list of the chunks it is using and the current chunk.  The current
   104 // chunk is the chunk from which allocations are done.  Space freed in
   105 // a chunk is placed on the free list of blocks (BlockFreelist) and
   106 // reused from there.
   108 typedef class FreeList<Metachunk> ChunkList;
   110 // Manages the global free lists of chunks.
   111 // Has three lists of free chunks, and a total size and
   112 // count that includes all three
   114 class ChunkManager VALUE_OBJ_CLASS_SPEC {
   116   // Free list of chunks of different sizes.
   117   //   SpecializedChunk
   118   //   SmallChunk
   119   //   MediumChunk
   120   //   HumongousChunk
   121   ChunkList _free_chunks[NumberOfFreeLists];
   124   //   HumongousChunk
   125   ChunkTreeDictionary _humongous_dictionary;
   127   // ChunkManager in all lists of this type
   128   size_t _free_chunks_total;
   129   size_t _free_chunks_count;
   131   void dec_free_chunks_total(size_t v) {
   132     assert(_free_chunks_count > 0 &&
   133              _free_chunks_total > 0,
   134              "About to go negative");
   135     Atomic::add_ptr(-1, &_free_chunks_count);
   136     jlong minus_v = (jlong) - (jlong) v;
   137     Atomic::add_ptr(minus_v, &_free_chunks_total);
   138   }
   140   // Debug support
   142   size_t sum_free_chunks();
   143   size_t sum_free_chunks_count();
   145   void locked_verify_free_chunks_total();
   146   void slow_locked_verify_free_chunks_total() {
   147     if (metaspace_slow_verify) {
   148       locked_verify_free_chunks_total();
   149     }
   150   }
   151   void locked_verify_free_chunks_count();
   152   void slow_locked_verify_free_chunks_count() {
   153     if (metaspace_slow_verify) {
   154       locked_verify_free_chunks_count();
   155     }
   156   }
   157   void verify_free_chunks_count();
   159  public:
   161   ChunkManager() : _free_chunks_total(0), _free_chunks_count(0) {}
   163   // add or delete (return) a chunk to the global freelist.
   164   Metachunk* chunk_freelist_allocate(size_t word_size);
   165   void chunk_freelist_deallocate(Metachunk* chunk);
   167   // Map a size to a list index assuming that there are lists
   168   // for special, small, medium, and humongous chunks.
   169   static ChunkIndex list_index(size_t size);
   171   // Remove the chunk from its freelist.  It is
   172   // expected to be on one of the _free_chunks[] lists.
   173   void remove_chunk(Metachunk* chunk);
   175   // Add the simple linked list of chunks to the freelist of chunks
   176   // of type index.
   177   void return_chunks(ChunkIndex index, Metachunk* chunks);
   179   // Total of the space in the free chunks list
   180   size_t free_chunks_total_words();
   181   size_t free_chunks_total_bytes();
   183   // Number of chunks in the free chunks list
   184   size_t free_chunks_count();
   186   void inc_free_chunks_total(size_t v, size_t count = 1) {
   187     Atomic::add_ptr(count, &_free_chunks_count);
   188     Atomic::add_ptr(v, &_free_chunks_total);
   189   }
   190   ChunkTreeDictionary* humongous_dictionary() {
   191     return &_humongous_dictionary;
   192   }
   194   ChunkList* free_chunks(ChunkIndex index);
   196   // Returns the list for the given chunk word size.
   197   ChunkList* find_free_chunks_list(size_t word_size);
   199   // Add and remove from a list by size.  Selects
   200   // list based on size of chunk.
   201   void free_chunks_put(Metachunk* chuck);
   202   Metachunk* free_chunks_get(size_t chunk_word_size);
   204   // Debug support
   205   void verify();
   206   void slow_verify() {
   207     if (metaspace_slow_verify) {
   208       verify();
   209     }
   210   }
   211   void locked_verify();
   212   void slow_locked_verify() {
   213     if (metaspace_slow_verify) {
   214       locked_verify();
   215     }
   216   }
   217   void verify_free_chunks_total();
   219   void locked_print_free_chunks(outputStream* st);
   220   void locked_print_sum_free_chunks(outputStream* st);
   222   void print_on(outputStream* st);
   223 };
   225 // Used to manage the free list of Metablocks (a block corresponds
   226 // to the allocation of a quantum of metadata).
   227 class BlockFreelist VALUE_OBJ_CLASS_SPEC {
   228   BlockTreeDictionary* _dictionary;
   229   static Metablock* initialize_free_chunk(MetaWord* p, size_t word_size);
   231   // Only allocate and split from freelist if the size of the allocation
   232   // is at least 1/4th the size of the available block.
   233   const static int WasteMultiplier = 4;
   235   // Accessors
   236   BlockTreeDictionary* dictionary() const { return _dictionary; }
   238  public:
   239   BlockFreelist();
   240   ~BlockFreelist();
   242   // Get and return a block to the free list
   243   MetaWord* get_block(size_t word_size);
   244   void return_block(MetaWord* p, size_t word_size);
   246   size_t total_size() {
   247   if (dictionary() == NULL) {
   248     return 0;
   249   } else {
   250     return dictionary()->total_size();
   251   }
   252 }
   254   void print_on(outputStream* st) const;
   255 };
   257 class VirtualSpaceNode : public CHeapObj<mtClass> {
   258   friend class VirtualSpaceList;
   260   // Link to next VirtualSpaceNode
   261   VirtualSpaceNode* _next;
   263   // total in the VirtualSpace
   264   MemRegion _reserved;
   265   ReservedSpace _rs;
   266   VirtualSpace _virtual_space;
   267   MetaWord* _top;
   268   // count of chunks contained in this VirtualSpace
   269   uintx _container_count;
   271   // Convenience functions to access the _virtual_space
   272   char* low()  const { return virtual_space()->low(); }
   273   char* high() const { return virtual_space()->high(); }
   275   // The first Metachunk will be allocated at the bottom of the
   276   // VirtualSpace
   277   Metachunk* first_chunk() { return (Metachunk*) bottom(); }
   279   void inc_container_count();
   280 #ifdef ASSERT
   281   uint container_count_slow();
   282 #endif
   284  public:
   286   VirtualSpaceNode(size_t byte_size);
   287   VirtualSpaceNode(ReservedSpace rs) : _top(NULL), _next(NULL), _rs(rs), _container_count(0) {}
   288   ~VirtualSpaceNode();
   290   // Convenience functions for logical bottom and end
   291   MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); }
   292   MetaWord* end() const { return (MetaWord*) _virtual_space.high(); }
   294   size_t reserved_words() const  { return _virtual_space.reserved_size() / BytesPerWord; }
   295   size_t expanded_words() const  { return _virtual_space.committed_size() / BytesPerWord; }
   296   size_t committed_words() const { return _virtual_space.actual_committed_size() / BytesPerWord; }
   298   // address of next available space in _virtual_space;
   299   // Accessors
   300   VirtualSpaceNode* next() { return _next; }
   301   void set_next(VirtualSpaceNode* v) { _next = v; }
   303   void set_reserved(MemRegion const v) { _reserved = v; }
   304   void set_top(MetaWord* v) { _top = v; }
   306   // Accessors
   307   MemRegion* reserved() { return &_reserved; }
   308   VirtualSpace* virtual_space() const { return (VirtualSpace*) &_virtual_space; }
   310   // Returns true if "word_size" is available in the VirtualSpace
   311   bool is_available(size_t word_size) { return _top + word_size <= end(); }
   313   MetaWord* top() const { return _top; }
   314   void inc_top(size_t word_size) { _top += word_size; }
   316   uintx container_count() { return _container_count; }
   317   void dec_container_count();
   318 #ifdef ASSERT
   319   void verify_container_count();
   320 #endif
   322   // used and capacity in this single entry in the list
   323   size_t used_words_in_vs() const;
   324   size_t capacity_words_in_vs() const;
   325   size_t free_words_in_vs() const;
   327   bool initialize();
   329   // get space from the virtual space
   330   Metachunk* take_from_committed(size_t chunk_word_size);
   332   // Allocate a chunk from the virtual space and return it.
   333   Metachunk* get_chunk_vs(size_t chunk_word_size);
   335   // Expands/shrinks the committed space in a virtual space.  Delegates
   336   // to Virtualspace
   337   bool expand_by(size_t words, bool pre_touch = false);
   339   // In preparation for deleting this node, remove all the chunks
   340   // in the node from any freelist.
   341   void purge(ChunkManager* chunk_manager);
   343 #ifdef ASSERT
   344   // Debug support
   345   void mangle();
   346 #endif
   348   void print_on(outputStream* st) const;
   349 };
   351   // byte_size is the size of the associated virtualspace.
   352 VirtualSpaceNode::VirtualSpaceNode(size_t byte_size) : _top(NULL), _next(NULL), _rs(), _container_count(0) {
   353   // align up to vm allocation granularity
   354   byte_size = align_size_up(byte_size, os::vm_allocation_granularity());
   356   // This allocates memory with mmap.  For DumpSharedspaces, try to reserve
   357   // configurable address, generally at the top of the Java heap so other
   358   // memory addresses don't conflict.
   359   if (DumpSharedSpaces) {
   360     char* shared_base = (char*)SharedBaseAddress;
   361     _rs = ReservedSpace(byte_size, 0, false, shared_base, 0);
   362     if (_rs.is_reserved()) {
   363       assert(shared_base == 0 || _rs.base() == shared_base, "should match");
   364     } else {
   365       // Get a mmap region anywhere if the SharedBaseAddress fails.
   366       _rs = ReservedSpace(byte_size);
   367     }
   368     MetaspaceShared::set_shared_rs(&_rs);
   369   } else {
   370     _rs = ReservedSpace(byte_size);
   371   }
   373   MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass);
   374 }
   376 void VirtualSpaceNode::purge(ChunkManager* chunk_manager) {
   377   Metachunk* chunk = first_chunk();
   378   Metachunk* invalid_chunk = (Metachunk*) top();
   379   while (chunk < invalid_chunk ) {
   380     assert(chunk->is_free(), "Should be marked free");
   381       MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
   382       chunk_manager->remove_chunk(chunk);
   383       assert(chunk->next() == NULL &&
   384              chunk->prev() == NULL,
   385              "Was not removed from its list");
   386       chunk = (Metachunk*) next;
   387   }
   388 }
   390 #ifdef ASSERT
   391 uint VirtualSpaceNode::container_count_slow() {
   392   uint count = 0;
   393   Metachunk* chunk = first_chunk();
   394   Metachunk* invalid_chunk = (Metachunk*) top();
   395   while (chunk < invalid_chunk ) {
   396     MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
   397     // Don't count the chunks on the free lists.  Those are
   398     // still part of the VirtualSpaceNode but not currently
   399     // counted.
   400     if (!chunk->is_free()) {
   401       count++;
   402     }
   403     chunk = (Metachunk*) next;
   404   }
   405   return count;
   406 }
   407 #endif
   409 // List of VirtualSpaces for metadata allocation.
   410 // It has a  _next link for singly linked list and a MemRegion
   411 // for total space in the VirtualSpace.
   412 class VirtualSpaceList : public CHeapObj<mtClass> {
   413   friend class VirtualSpaceNode;
   415   enum VirtualSpaceSizes {
   416     VirtualSpaceSize = 256 * K
   417   };
   419   // Global list of virtual spaces
   420   // Head of the list
   421   VirtualSpaceNode* _virtual_space_list;
   422   // virtual space currently being used for allocations
   423   VirtualSpaceNode* _current_virtual_space;
   424   // Free chunk list for all other metadata
   425   ChunkManager      _chunk_manager;
   427   // Can this virtual list allocate >1 spaces?  Also, used to determine
   428   // whether to allocate unlimited small chunks in this virtual space
   429   bool _is_class;
   430   bool can_grow() const { return !is_class() || !UseCompressedClassPointers; }
   432   // Sum of reserved and committed memory in the virtual spaces
   433   size_t _reserved_words;
   434   size_t _committed_words;
   436   // Number of virtual spaces
   437   size_t _virtual_space_count;
   439   ~VirtualSpaceList();
   441   VirtualSpaceNode* virtual_space_list() const { return _virtual_space_list; }
   443   void set_virtual_space_list(VirtualSpaceNode* v) {
   444     _virtual_space_list = v;
   445   }
   446   void set_current_virtual_space(VirtualSpaceNode* v) {
   447     _current_virtual_space = v;
   448   }
   450   void link_vs(VirtualSpaceNode* new_entry);
   452   // Get another virtual space and add it to the list.  This
   453   // is typically prompted by a failed attempt to allocate a chunk
   454   // and is typically followed by the allocation of a chunk.
   455   bool grow_vs(size_t vs_word_size);
   457  public:
   458   VirtualSpaceList(size_t word_size);
   459   VirtualSpaceList(ReservedSpace rs);
   461   size_t free_bytes();
   463   Metachunk* get_new_chunk(size_t word_size,
   464                            size_t grow_chunks_by_words,
   465                            size_t medium_chunk_bunch);
   467   bool expand_by(VirtualSpaceNode* node, size_t word_size, bool pre_touch = false);
   469   // Get the first chunk for a Metaspace.  Used for
   470   // special cases such as the boot class loader, reflection
   471   // class loader and anonymous class loader.
   472   Metachunk* get_initialization_chunk(size_t word_size, size_t chunk_bunch);
   474   VirtualSpaceNode* current_virtual_space() {
   475     return _current_virtual_space;
   476   }
   478   ChunkManager* chunk_manager() { return &_chunk_manager; }
   479   bool is_class() const { return _is_class; }
   481   // Allocate the first virtualspace.
   482   void initialize(size_t word_size);
   484   size_t reserved_words()  { return _reserved_words; }
   485   size_t reserved_bytes()  { return reserved_words() * BytesPerWord; }
   486   size_t committed_words() { return _committed_words; }
   487   size_t committed_bytes() { return committed_words() * BytesPerWord; }
   489   void inc_reserved_words(size_t v);
   490   void dec_reserved_words(size_t v);
   491   void inc_committed_words(size_t v);
   492   void dec_committed_words(size_t v);
   493   void inc_virtual_space_count();
   494   void dec_virtual_space_count();
   496   // Unlink empty VirtualSpaceNodes and free it.
   497   void purge();
   499   // Used and capacity in the entire list of virtual spaces.
   500   // These are global values shared by all Metaspaces
   501   size_t capacity_words_sum();
   502   size_t capacity_bytes_sum() { return capacity_words_sum() * BytesPerWord; }
   503   size_t used_words_sum();
   504   size_t used_bytes_sum() { return used_words_sum() * BytesPerWord; }
   506   bool contains(const void *ptr);
   508   void print_on(outputStream* st) const;
   510   class VirtualSpaceListIterator : public StackObj {
   511     VirtualSpaceNode* _virtual_spaces;
   512    public:
   513     VirtualSpaceListIterator(VirtualSpaceNode* virtual_spaces) :
   514       _virtual_spaces(virtual_spaces) {}
   516     bool repeat() {
   517       return _virtual_spaces != NULL;
   518     }
   520     VirtualSpaceNode* get_next() {
   521       VirtualSpaceNode* result = _virtual_spaces;
   522       if (_virtual_spaces != NULL) {
   523         _virtual_spaces = _virtual_spaces->next();
   524       }
   525       return result;
   526     }
   527   };
   528 };
   530 class Metadebug : AllStatic {
   531   // Debugging support for Metaspaces
   532   static int _deallocate_block_a_lot_count;
   533   static int _deallocate_chunk_a_lot_count;
   534   static int _allocation_fail_alot_count;
   536  public:
   537   static int deallocate_block_a_lot_count() {
   538     return _deallocate_block_a_lot_count;
   539   }
   540   static void set_deallocate_block_a_lot_count(int v) {
   541     _deallocate_block_a_lot_count = v;
   542   }
   543   static void inc_deallocate_block_a_lot_count() {
   544     _deallocate_block_a_lot_count++;
   545   }
   546   static int deallocate_chunk_a_lot_count() {
   547     return _deallocate_chunk_a_lot_count;
   548   }
   549   static void reset_deallocate_chunk_a_lot_count() {
   550     _deallocate_chunk_a_lot_count = 1;
   551   }
   552   static void inc_deallocate_chunk_a_lot_count() {
   553     _deallocate_chunk_a_lot_count++;
   554   }
   556   static void init_allocation_fail_alot_count();
   557 #ifdef ASSERT
   558   static bool test_metadata_failure();
   559 #endif
   561   static void deallocate_chunk_a_lot(SpaceManager* sm,
   562                                      size_t chunk_word_size);
   563   static void deallocate_block_a_lot(SpaceManager* sm,
   564                                      size_t chunk_word_size);
   566 };
   568 int Metadebug::_deallocate_block_a_lot_count = 0;
   569 int Metadebug::_deallocate_chunk_a_lot_count = 0;
   570 int Metadebug::_allocation_fail_alot_count = 0;
   572 //  SpaceManager - used by Metaspace to handle allocations
   573 class SpaceManager : public CHeapObj<mtClass> {
   574   friend class Metaspace;
   575   friend class Metadebug;
   577  private:
   579   // protects allocations and contains.
   580   Mutex* const _lock;
   582   // Type of metadata allocated.
   583   Metaspace::MetadataType _mdtype;
   585   // Chunk related size
   586   size_t _medium_chunk_bunch;
   588   // List of chunks in use by this SpaceManager.  Allocations
   589   // are done from the current chunk.  The list is used for deallocating
   590   // chunks when the SpaceManager is freed.
   591   Metachunk* _chunks_in_use[NumberOfInUseLists];
   592   Metachunk* _current_chunk;
   594   // Virtual space where allocation comes from.
   595   VirtualSpaceList* _vs_list;
   597   // Number of small chunks to allocate to a manager
   598   // If class space manager, small chunks are unlimited
   599   static uint const _small_chunk_limit;
   601   // Sum of all space in allocated chunks
   602   size_t _allocated_blocks_words;
   604   // Sum of all allocated chunks
   605   size_t _allocated_chunks_words;
   606   size_t _allocated_chunks_count;
   608   // Free lists of blocks are per SpaceManager since they
   609   // are assumed to be in chunks in use by the SpaceManager
   610   // and all chunks in use by a SpaceManager are freed when
   611   // the class loader using the SpaceManager is collected.
   612   BlockFreelist _block_freelists;
   614   // protects virtualspace and chunk expansions
   615   static const char*  _expand_lock_name;
   616   static const int    _expand_lock_rank;
   617   static Mutex* const _expand_lock;
   619  private:
   620   // Accessors
   621   Metachunk* chunks_in_use(ChunkIndex index) const { return _chunks_in_use[index]; }
   622   void set_chunks_in_use(ChunkIndex index, Metachunk* v) { _chunks_in_use[index] = v; }
   624   BlockFreelist* block_freelists() const {
   625     return (BlockFreelist*) &_block_freelists;
   626   }
   628   Metaspace::MetadataType mdtype() { return _mdtype; }
   629   VirtualSpaceList* vs_list() const    { return _vs_list; }
   631   Metachunk* current_chunk() const { return _current_chunk; }
   632   void set_current_chunk(Metachunk* v) {
   633     _current_chunk = v;
   634   }
   636   Metachunk* find_current_chunk(size_t word_size);
   638   // Add chunk to the list of chunks in use
   639   void add_chunk(Metachunk* v, bool make_current);
   640   void retire_current_chunk();
   642   Mutex* lock() const { return _lock; }
   644   const char* chunk_size_name(ChunkIndex index) const;
   646  protected:
   647   void initialize();
   649  public:
   650   SpaceManager(Metaspace::MetadataType mdtype,
   651                Mutex* lock,
   652                VirtualSpaceList* vs_list);
   653   ~SpaceManager();
   655   enum ChunkMultiples {
   656     MediumChunkMultiple = 4
   657   };
   659   // Accessors
   660   size_t specialized_chunk_size() { return SpecializedChunk; }
   661   size_t small_chunk_size() { return (size_t) vs_list()->is_class() ? ClassSmallChunk : SmallChunk; }
   662   size_t medium_chunk_size() { return (size_t) vs_list()->is_class() ? ClassMediumChunk : MediumChunk; }
   663   size_t medium_chunk_bunch() { return medium_chunk_size() * MediumChunkMultiple; }
   665   size_t allocated_blocks_words() const { return _allocated_blocks_words; }
   666   size_t allocated_blocks_bytes() const { return _allocated_blocks_words * BytesPerWord; }
   667   size_t allocated_chunks_words() const { return _allocated_chunks_words; }
   668   size_t allocated_chunks_count() const { return _allocated_chunks_count; }
   670   bool is_humongous(size_t word_size) { return word_size > medium_chunk_size(); }
   672   static Mutex* expand_lock() { return _expand_lock; }
   674   // Increment the per Metaspace and global running sums for Metachunks
   675   // by the given size.  This is used when a Metachunk to added to
   676   // the in-use list.
   677   void inc_size_metrics(size_t words);
   678   // Increment the per Metaspace and global running sums Metablocks by the given
   679   // size.  This is used when a Metablock is allocated.
   680   void inc_used_metrics(size_t words);
   681   // Delete the portion of the running sums for this SpaceManager. That is,
   682   // the globals running sums for the Metachunks and Metablocks are
   683   // decremented for all the Metachunks in-use by this SpaceManager.
   684   void dec_total_from_size_metrics();
   686   // Set the sizes for the initial chunks.
   687   void get_initial_chunk_sizes(Metaspace::MetaspaceType type,
   688                                size_t* chunk_word_size,
   689                                size_t* class_chunk_word_size);
   691   size_t sum_capacity_in_chunks_in_use() const;
   692   size_t sum_used_in_chunks_in_use() const;
   693   size_t sum_free_in_chunks_in_use() const;
   694   size_t sum_waste_in_chunks_in_use() const;
   695   size_t sum_waste_in_chunks_in_use(ChunkIndex index ) const;
   697   size_t sum_count_in_chunks_in_use();
   698   size_t sum_count_in_chunks_in_use(ChunkIndex i);
   700   Metachunk* get_new_chunk(size_t word_size, size_t grow_chunks_by_words);
   702   // Block allocation and deallocation.
   703   // Allocates a block from the current chunk
   704   MetaWord* allocate(size_t word_size);
   706   // Helper for allocations
   707   MetaWord* allocate_work(size_t word_size);
   709   // Returns a block to the per manager freelist
   710   void deallocate(MetaWord* p, size_t word_size);
   712   // Based on the allocation size and a minimum chunk size,
   713   // returned chunk size (for expanding space for chunk allocation).
   714   size_t calc_chunk_size(size_t allocation_word_size);
   716   // Called when an allocation from the current chunk fails.
   717   // Gets a new chunk (may require getting a new virtual space),
   718   // and allocates from that chunk.
   719   MetaWord* grow_and_allocate(size_t word_size);
   721   // debugging support.
   723   void dump(outputStream* const out) const;
   724   void print_on(outputStream* st) const;
   725   void locked_print_chunks_in_use_on(outputStream* st) const;
   727   void verify();
   728   void verify_chunk_size(Metachunk* chunk);
   729   NOT_PRODUCT(void mangle_freed_chunks();)
   730 #ifdef ASSERT
   731   void verify_allocated_blocks_words();
   732 #endif
   734   size_t get_raw_word_size(size_t word_size) {
   735     // If only the dictionary is going to be used (i.e., no
   736     // indexed free list), then there is a minimum size requirement.
   737     // MinChunkSize is a placeholder for the real minimum size JJJ
   738     size_t byte_size = word_size * BytesPerWord;
   740     size_t raw_bytes_size = MAX2(byte_size,
   741                                  Metablock::min_block_byte_size());
   742     raw_bytes_size = ARENA_ALIGN(raw_bytes_size);
   743     size_t raw_word_size = raw_bytes_size / BytesPerWord;
   744     assert(raw_word_size * BytesPerWord == raw_bytes_size, "Size problem");
   746     return raw_word_size;
   747   }
   748 };
   750 uint const SpaceManager::_small_chunk_limit = 4;
   752 const char* SpaceManager::_expand_lock_name =
   753   "SpaceManager chunk allocation lock";
   754 const int SpaceManager::_expand_lock_rank = Monitor::leaf - 1;
   755 Mutex* const SpaceManager::_expand_lock =
   756   new Mutex(SpaceManager::_expand_lock_rank,
   757             SpaceManager::_expand_lock_name,
   758             Mutex::_allow_vm_block_flag);
   760 void VirtualSpaceNode::inc_container_count() {
   761   assert_lock_strong(SpaceManager::expand_lock());
   762   _container_count++;
   763   assert(_container_count == container_count_slow(),
   764          err_msg("Inconsistency in countainer_count _container_count " SIZE_FORMAT
   765                  "container_count_slow() " SIZE_FORMAT,
   766                  _container_count, container_count_slow()));
   767 }
   769 void VirtualSpaceNode::dec_container_count() {
   770   assert_lock_strong(SpaceManager::expand_lock());
   771   _container_count--;
   772 }
   774 #ifdef ASSERT
   775 void VirtualSpaceNode::verify_container_count() {
   776   assert(_container_count == container_count_slow(),
   777     err_msg("Inconsistency in countainer_count _container_count " SIZE_FORMAT
   778             "container_count_slow() " SIZE_FORMAT, _container_count, container_count_slow()));
   779 }
   780 #endif
   782 // BlockFreelist methods
   784 BlockFreelist::BlockFreelist() : _dictionary(NULL) {}
   786 BlockFreelist::~BlockFreelist() {
   787   if (_dictionary != NULL) {
   788     if (Verbose && TraceMetadataChunkAllocation) {
   789       _dictionary->print_free_lists(gclog_or_tty);
   790     }
   791     delete _dictionary;
   792   }
   793 }
   795 Metablock* BlockFreelist::initialize_free_chunk(MetaWord* p, size_t word_size) {
   796   Metablock* block = (Metablock*) p;
   797   block->set_word_size(word_size);
   798   block->set_prev(NULL);
   799   block->set_next(NULL);
   801   return block;
   802 }
   804 void BlockFreelist::return_block(MetaWord* p, size_t word_size) {
   805   Metablock* free_chunk = initialize_free_chunk(p, word_size);
   806   if (dictionary() == NULL) {
   807    _dictionary = new BlockTreeDictionary();
   808   }
   809   dictionary()->return_chunk(free_chunk);
   810 }
   812 MetaWord* BlockFreelist::get_block(size_t word_size) {
   813   if (dictionary() == NULL) {
   814     return NULL;
   815   }
   817   if (word_size < TreeChunk<Metablock, FreeList>::min_size()) {
   818     // Dark matter.  Too small for dictionary.
   819     return NULL;
   820   }
   822   Metablock* free_block =
   823     dictionary()->get_chunk(word_size, FreeBlockDictionary<Metablock>::atLeast);
   824   if (free_block == NULL) {
   825     return NULL;
   826   }
   828   const size_t block_size = free_block->size();
   829   if (block_size > WasteMultiplier * word_size) {
   830     return_block((MetaWord*)free_block, block_size);
   831     return NULL;
   832   }
   834   MetaWord* new_block = (MetaWord*)free_block;
   835   assert(block_size >= word_size, "Incorrect size of block from freelist");
   836   const size_t unused = block_size - word_size;
   837   if (unused >= TreeChunk<Metablock, FreeList>::min_size()) {
   838     return_block(new_block + word_size, unused);
   839   }
   841   return new_block;
   842 }
   844 void BlockFreelist::print_on(outputStream* st) const {
   845   if (dictionary() == NULL) {
   846     return;
   847   }
   848   dictionary()->print_free_lists(st);
   849 }
   851 // VirtualSpaceNode methods
   853 VirtualSpaceNode::~VirtualSpaceNode() {
   854   _rs.release();
   855 #ifdef ASSERT
   856   size_t word_size = sizeof(*this) / BytesPerWord;
   857   Copy::fill_to_words((HeapWord*) this, word_size, 0xf1f1f1f1);
   858 #endif
   859 }
   861 size_t VirtualSpaceNode::used_words_in_vs() const {
   862   return pointer_delta(top(), bottom(), sizeof(MetaWord));
   863 }
   865 // Space committed in the VirtualSpace
   866 size_t VirtualSpaceNode::capacity_words_in_vs() const {
   867   return pointer_delta(end(), bottom(), sizeof(MetaWord));
   868 }
   870 size_t VirtualSpaceNode::free_words_in_vs() const {
   871   return pointer_delta(end(), top(), sizeof(MetaWord));
   872 }
   874 // Allocates the chunk from the virtual space only.
   875 // This interface is also used internally for debugging.  Not all
   876 // chunks removed here are necessarily used for allocation.
   877 Metachunk* VirtualSpaceNode::take_from_committed(size_t chunk_word_size) {
   878   // Bottom of the new chunk
   879   MetaWord* chunk_limit = top();
   880   assert(chunk_limit != NULL, "Not safe to call this method");
   882   if (!is_available(chunk_word_size)) {
   883     if (TraceMetadataChunkAllocation) {
   884       gclog_or_tty->print("VirtualSpaceNode::take_from_committed() not available %d words ", chunk_word_size);
   885       // Dump some information about the virtual space that is nearly full
   886       print_on(gclog_or_tty);
   887     }
   888     return NULL;
   889   }
   891   // Take the space  (bump top on the current virtual space).
   892   inc_top(chunk_word_size);
   894   // Initialize the chunk
   895   Metachunk* result = ::new (chunk_limit) Metachunk(chunk_word_size, this);
   896   return result;
   897 }
   900 // Expand the virtual space (commit more of the reserved space)
   901 bool VirtualSpaceNode::expand_by(size_t words, bool pre_touch) {
   902   size_t bytes = words * BytesPerWord;
   903   bool result =  virtual_space()->expand_by(bytes, pre_touch);
   904   if (TraceMetavirtualspaceAllocation && !result) {
   905     gclog_or_tty->print_cr("VirtualSpaceNode::expand_by() failed "
   906                            "for byte size " SIZE_FORMAT, bytes);
   907     virtual_space()->print_on(gclog_or_tty);
   908   }
   909   return result;
   910 }
   912 Metachunk* VirtualSpaceNode::get_chunk_vs(size_t chunk_word_size) {
   913   assert_lock_strong(SpaceManager::expand_lock());
   914   Metachunk* result = take_from_committed(chunk_word_size);
   915   if (result != NULL) {
   916     inc_container_count();
   917   }
   918   return result;
   919 }
   921 bool VirtualSpaceNode::initialize() {
   923   if (!_rs.is_reserved()) {
   924     return false;
   925   }
   927   // An allocation out of this Virtualspace that is larger
   928   // than an initial commit size can waste that initial committed
   929   // space.
   930   size_t committed_byte_size = 0;
   931   bool result = virtual_space()->initialize(_rs, committed_byte_size);
   932   if (result) {
   933     set_top((MetaWord*)virtual_space()->low());
   934     set_reserved(MemRegion((HeapWord*)_rs.base(),
   935                  (HeapWord*)(_rs.base() + _rs.size())));
   937     assert(reserved()->start() == (HeapWord*) _rs.base(),
   938       err_msg("Reserved start was not set properly " PTR_FORMAT
   939         " != " PTR_FORMAT, reserved()->start(), _rs.base()));
   940     assert(reserved()->word_size() == _rs.size() / BytesPerWord,
   941       err_msg("Reserved size was not set properly " SIZE_FORMAT
   942         " != " SIZE_FORMAT, reserved()->word_size(),
   943         _rs.size() / BytesPerWord));
   944   }
   946   return result;
   947 }
   949 void VirtualSpaceNode::print_on(outputStream* st) const {
   950   size_t used = used_words_in_vs();
   951   size_t capacity = capacity_words_in_vs();
   952   VirtualSpace* vs = virtual_space();
   953   st->print_cr("   space @ " PTR_FORMAT " " SIZE_FORMAT "K, %3d%% used "
   954            "[" PTR_FORMAT ", " PTR_FORMAT ", "
   955            PTR_FORMAT ", " PTR_FORMAT ")",
   956            vs, capacity / K,
   957            capacity == 0 ? 0 : used * 100 / capacity,
   958            bottom(), top(), end(),
   959            vs->high_boundary());
   960 }
   962 #ifdef ASSERT
   963 void VirtualSpaceNode::mangle() {
   964   size_t word_size = capacity_words_in_vs();
   965   Copy::fill_to_words((HeapWord*) low(), word_size, 0xf1f1f1f1);
   966 }
   967 #endif // ASSERT
   969 // VirtualSpaceList methods
   970 // Space allocated from the VirtualSpace
   972 VirtualSpaceList::~VirtualSpaceList() {
   973   VirtualSpaceListIterator iter(virtual_space_list());
   974   while (iter.repeat()) {
   975     VirtualSpaceNode* vsl = iter.get_next();
   976     delete vsl;
   977   }
   978 }
   980 void VirtualSpaceList::inc_reserved_words(size_t v) {
   981   assert_lock_strong(SpaceManager::expand_lock());
   982   _reserved_words = _reserved_words + v;
   983 }
   984 void VirtualSpaceList::dec_reserved_words(size_t v) {
   985   assert_lock_strong(SpaceManager::expand_lock());
   986   _reserved_words = _reserved_words - v;
   987 }
   989 void VirtualSpaceList::inc_committed_words(size_t v) {
   990   assert_lock_strong(SpaceManager::expand_lock());
   991   _committed_words = _committed_words + v;
   992 }
   993 void VirtualSpaceList::dec_committed_words(size_t v) {
   994   assert_lock_strong(SpaceManager::expand_lock());
   995   _committed_words = _committed_words - v;
   996 }
   998 void VirtualSpaceList::inc_virtual_space_count() {
   999   assert_lock_strong(SpaceManager::expand_lock());
  1000   _virtual_space_count++;
  1002 void VirtualSpaceList::dec_virtual_space_count() {
  1003   assert_lock_strong(SpaceManager::expand_lock());
  1004   _virtual_space_count--;
  1007 void ChunkManager::remove_chunk(Metachunk* chunk) {
  1008   size_t word_size = chunk->word_size();
  1009   ChunkIndex index = list_index(word_size);
  1010   if (index != HumongousIndex) {
  1011     free_chunks(index)->remove_chunk(chunk);
  1012   } else {
  1013     humongous_dictionary()->remove_chunk(chunk);
  1016   // Chunk is being removed from the chunks free list.
  1017   dec_free_chunks_total(chunk->capacity_word_size());
  1020 // Walk the list of VirtualSpaceNodes and delete
  1021 // nodes with a 0 container_count.  Remove Metachunks in
  1022 // the node from their respective freelists.
  1023 void VirtualSpaceList::purge() {
  1024   assert_lock_strong(SpaceManager::expand_lock());
  1025   // Don't use a VirtualSpaceListIterator because this
  1026   // list is being changed and a straightforward use of an iterator is not safe.
  1027   VirtualSpaceNode* purged_vsl = NULL;
  1028   VirtualSpaceNode* prev_vsl = virtual_space_list();
  1029   VirtualSpaceNode* next_vsl = prev_vsl;
  1030   while (next_vsl != NULL) {
  1031     VirtualSpaceNode* vsl = next_vsl;
  1032     next_vsl = vsl->next();
  1033     // Don't free the current virtual space since it will likely
  1034     // be needed soon.
  1035     if (vsl->container_count() == 0 && vsl != current_virtual_space()) {
  1036       // Unlink it from the list
  1037       if (prev_vsl == vsl) {
  1038         // This is the case of the current note being the first note.
  1039         assert(vsl == virtual_space_list(), "Expected to be the first note");
  1040         set_virtual_space_list(vsl->next());
  1041       } else {
  1042         prev_vsl->set_next(vsl->next());
  1045       vsl->purge(chunk_manager());
  1046       dec_reserved_words(vsl->reserved_words());
  1047       dec_committed_words(vsl->committed_words());
  1048       dec_virtual_space_count();
  1049       purged_vsl = vsl;
  1050       delete vsl;
  1051     } else {
  1052       prev_vsl = vsl;
  1055 #ifdef ASSERT
  1056   if (purged_vsl != NULL) {
  1057   // List should be stable enough to use an iterator here.
  1058   VirtualSpaceListIterator iter(virtual_space_list());
  1059     while (iter.repeat()) {
  1060       VirtualSpaceNode* vsl = iter.get_next();
  1061       assert(vsl != purged_vsl, "Purge of vsl failed");
  1064 #endif
  1067 size_t VirtualSpaceList::used_words_sum() {
  1068   size_t allocated_by_vs = 0;
  1069   VirtualSpaceListIterator iter(virtual_space_list());
  1070   while (iter.repeat()) {
  1071     VirtualSpaceNode* vsl = iter.get_next();
  1072     // Sum used region [bottom, top) in each virtualspace
  1073     allocated_by_vs += vsl->used_words_in_vs();
  1075   assert(allocated_by_vs >= chunk_manager()->free_chunks_total_words(),
  1076     err_msg("Total in free chunks " SIZE_FORMAT
  1077             " greater than total from virtual_spaces " SIZE_FORMAT,
  1078             allocated_by_vs, chunk_manager()->free_chunks_total_words()));
  1079   size_t used =
  1080     allocated_by_vs - chunk_manager()->free_chunks_total_words();
  1081   return used;
  1084 // Space available in all MetadataVirtualspaces allocated
  1085 // for metadata.  This is the upper limit on the capacity
  1086 // of chunks allocated out of all the MetadataVirtualspaces.
  1087 size_t VirtualSpaceList::capacity_words_sum() {
  1088   size_t capacity = 0;
  1089   VirtualSpaceListIterator iter(virtual_space_list());
  1090   while (iter.repeat()) {
  1091     VirtualSpaceNode* vsl = iter.get_next();
  1092     capacity += vsl->capacity_words_in_vs();
  1094   return capacity;
  1097 VirtualSpaceList::VirtualSpaceList(size_t word_size ) :
  1098                                    _is_class(false),
  1099                                    _virtual_space_list(NULL),
  1100                                    _current_virtual_space(NULL),
  1101                                    _reserved_words(0),
  1102                                    _committed_words(0),
  1103                                    _virtual_space_count(0) {
  1104   MutexLockerEx cl(SpaceManager::expand_lock(),
  1105                    Mutex::_no_safepoint_check_flag);
  1106   bool initialization_succeeded = grow_vs(word_size);
  1108   _chunk_manager.free_chunks(SpecializedIndex)->set_size(SpecializedChunk);
  1109   _chunk_manager.free_chunks(SmallIndex)->set_size(SmallChunk);
  1110   _chunk_manager.free_chunks(MediumIndex)->set_size(MediumChunk);
  1111   assert(initialization_succeeded,
  1112     " VirtualSpaceList initialization should not fail");
  1115 VirtualSpaceList::VirtualSpaceList(ReservedSpace rs) :
  1116                                    _is_class(true),
  1117                                    _virtual_space_list(NULL),
  1118                                    _current_virtual_space(NULL),
  1119                                    _reserved_words(0),
  1120                                    _committed_words(0),
  1121                                    _virtual_space_count(0) {
  1122   MutexLockerEx cl(SpaceManager::expand_lock(),
  1123                    Mutex::_no_safepoint_check_flag);
  1124   VirtualSpaceNode* class_entry = new VirtualSpaceNode(rs);
  1125   bool succeeded = class_entry->initialize();
  1126   _chunk_manager.free_chunks(SpecializedIndex)->set_size(SpecializedChunk);
  1127   _chunk_manager.free_chunks(SmallIndex)->set_size(ClassSmallChunk);
  1128   _chunk_manager.free_chunks(MediumIndex)->set_size(ClassMediumChunk);
  1129   assert(succeeded, " VirtualSpaceList initialization should not fail");
  1130   link_vs(class_entry);
  1133 size_t VirtualSpaceList::free_bytes() {
  1134   return virtual_space_list()->free_words_in_vs() * BytesPerWord;
  1137 // Allocate another meta virtual space and add it to the list.
  1138 bool VirtualSpaceList::grow_vs(size_t vs_word_size) {
  1139   assert_lock_strong(SpaceManager::expand_lock());
  1140   if (vs_word_size == 0) {
  1141     return false;
  1143   // Reserve the space
  1144   size_t vs_byte_size = vs_word_size * BytesPerWord;
  1145   assert(vs_byte_size % os::vm_page_size() == 0, "Not aligned");
  1147   // Allocate the meta virtual space and initialize it.
  1148   VirtualSpaceNode* new_entry = new VirtualSpaceNode(vs_byte_size);
  1149   if (!new_entry->initialize()) {
  1150     delete new_entry;
  1151     return false;
  1152   } else {
  1153     assert(new_entry->reserved_words() == vs_word_size, "Must be");
  1154     // ensure lock-free iteration sees fully initialized node
  1155     OrderAccess::storestore();
  1156     link_vs(new_entry);
  1157     return true;
  1161 void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry) {
  1162   if (virtual_space_list() == NULL) {
  1163       set_virtual_space_list(new_entry);
  1164   } else {
  1165     current_virtual_space()->set_next(new_entry);
  1167   set_current_virtual_space(new_entry);
  1168   inc_reserved_words(new_entry->reserved_words());
  1169   inc_committed_words(new_entry->committed_words());
  1170   inc_virtual_space_count();
  1171 #ifdef ASSERT
  1172   new_entry->mangle();
  1173 #endif
  1174   if (TraceMetavirtualspaceAllocation && Verbose) {
  1175     VirtualSpaceNode* vsl = current_virtual_space();
  1176     vsl->print_on(gclog_or_tty);
  1180 bool VirtualSpaceList::expand_by(VirtualSpaceNode* node, size_t word_size, bool pre_touch) {
  1181   size_t before = node->committed_words();
  1183   bool result = node->expand_by(word_size, pre_touch);
  1185   size_t after = node->committed_words();
  1187   // after and before can be the same if the memory was pre-committed.
  1188   assert(after >= before, "Must be");
  1189   inc_committed_words(after - before);
  1191   return result;
  1194 Metachunk* VirtualSpaceList::get_new_chunk(size_t word_size,
  1195                                            size_t grow_chunks_by_words,
  1196                                            size_t medium_chunk_bunch) {
  1198   // Get a chunk from the chunk freelist
  1199   Metachunk* next = chunk_manager()->chunk_freelist_allocate(grow_chunks_by_words);
  1201   if (next != NULL) {
  1202     next->container()->inc_container_count();
  1203   } else {
  1204     // Allocate a chunk out of the current virtual space.
  1205     next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
  1208   if (next == NULL) {
  1209     // Not enough room in current virtual space.  Try to commit
  1210     // more space.
  1211     size_t expand_vs_by_words = MAX2(medium_chunk_bunch,
  1212                                      grow_chunks_by_words);
  1213     size_t page_size_words = os::vm_page_size() / BytesPerWord;
  1214     size_t aligned_expand_vs_by_words = align_size_up(expand_vs_by_words,
  1215                                                         page_size_words);
  1216     bool vs_expanded =
  1217       expand_by(current_virtual_space(), aligned_expand_vs_by_words);
  1218     if (!vs_expanded) {
  1219       // Should the capacity of the metaspaces be expanded for
  1220       // this allocation?  If it's the virtual space for classes and is
  1221       // being used for CompressedHeaders, don't allocate a new virtualspace.
  1222       if (can_grow() && MetaspaceGC::should_expand(this, word_size)) {
  1223         // Get another virtual space.
  1224           size_t grow_vs_words =
  1225             MAX2((size_t)VirtualSpaceSize, aligned_expand_vs_by_words);
  1226         if (grow_vs(grow_vs_words)) {
  1227           // Got it.  It's on the list now.  Get a chunk from it.
  1228           assert(current_virtual_space()->expanded_words() == 0,
  1229               "New virtuals space nodes should not have expanded");
  1231           size_t grow_chunks_by_words_aligned = align_size_up(grow_chunks_by_words,
  1232                                                               page_size_words);
  1233           // We probably want to expand by aligned_expand_vs_by_words here.
  1234           expand_by(current_virtual_space(), grow_chunks_by_words_aligned);
  1235           next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
  1237       } else {
  1238         // Allocation will fail and induce a GC
  1239         if (TraceMetadataChunkAllocation && Verbose) {
  1240           gclog_or_tty->print_cr("VirtualSpaceList::get_new_chunk():"
  1241             " Fail instead of expand the metaspace");
  1244     } else {
  1245       // The virtual space expanded, get a new chunk
  1246       next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
  1247       assert(next != NULL, "Just expanded, should succeed");
  1251   assert(next == NULL || (next->next() == NULL && next->prev() == NULL),
  1252          "New chunk is still on some list");
  1253   return next;
  1256 Metachunk* VirtualSpaceList::get_initialization_chunk(size_t chunk_word_size,
  1257                                                       size_t chunk_bunch) {
  1258   // Get a chunk from the chunk freelist
  1259   Metachunk* new_chunk = get_new_chunk(chunk_word_size,
  1260                                        chunk_word_size,
  1261                                        chunk_bunch);
  1262   return new_chunk;
  1265 void VirtualSpaceList::print_on(outputStream* st) const {
  1266   if (TraceMetadataChunkAllocation && Verbose) {
  1267     VirtualSpaceListIterator iter(virtual_space_list());
  1268     while (iter.repeat()) {
  1269       VirtualSpaceNode* node = iter.get_next();
  1270       node->print_on(st);
  1275 bool VirtualSpaceList::contains(const void *ptr) {
  1276   VirtualSpaceNode* list = virtual_space_list();
  1277   VirtualSpaceListIterator iter(list);
  1278   while (iter.repeat()) {
  1279     VirtualSpaceNode* node = iter.get_next();
  1280     if (node->reserved()->contains(ptr)) {
  1281       return true;
  1284   return false;
  1288 // MetaspaceGC methods
  1290 // VM_CollectForMetadataAllocation is the vm operation used to GC.
  1291 // Within the VM operation after the GC the attempt to allocate the metadata
  1292 // should succeed.  If the GC did not free enough space for the metaspace
  1293 // allocation, the HWM is increased so that another virtualspace will be
  1294 // allocated for the metadata.  With perm gen the increase in the perm
  1295 // gen had bounds, MinMetaspaceExpansion and MaxMetaspaceExpansion.  The
  1296 // metaspace policy uses those as the small and large steps for the HWM.
  1297 //
  1298 // After the GC the compute_new_size() for MetaspaceGC is called to
  1299 // resize the capacity of the metaspaces.  The current implementation
  1300 // is based on the flags MinMetaspaceFreeRatio and MaxMetaspaceFreeRatio used
  1301 // to resize the Java heap by some GC's.  New flags can be implemented
  1302 // if really needed.  MinMetaspaceFreeRatio is used to calculate how much
  1303 // free space is desirable in the metaspace capacity to decide how much
  1304 // to increase the HWM.  MaxMetaspaceFreeRatio is used to decide how much
  1305 // free space is desirable in the metaspace capacity before decreasing
  1306 // the HWM.
  1308 // Calculate the amount to increase the high water mark (HWM).
  1309 // Increase by a minimum amount (MinMetaspaceExpansion) so that
  1310 // another expansion is not requested too soon.  If that is not
  1311 // enough to satisfy the allocation (i.e. big enough for a word_size
  1312 // allocation), increase by MaxMetaspaceExpansion.  If that is still
  1313 // not enough, expand by the size of the allocation (word_size) plus
  1314 // some.
  1315 size_t MetaspaceGC::delta_capacity_until_GC(size_t word_size) {
  1316   size_t before_inc = MetaspaceGC::capacity_until_GC();
  1317   size_t min_delta_words = MinMetaspaceExpansion / BytesPerWord;
  1318   size_t max_delta_words = MaxMetaspaceExpansion / BytesPerWord;
  1319   size_t page_size_words = os::vm_page_size() / BytesPerWord;
  1320   size_t size_delta_words = align_size_up(word_size, page_size_words);
  1321   size_t delta_words = MAX2(size_delta_words, min_delta_words);
  1322   if (delta_words > min_delta_words) {
  1323     // Don't want to hit the high water mark on the next
  1324     // allocation so make the delta greater than just enough
  1325     // for this allocation.
  1326     delta_words = MAX2(delta_words, max_delta_words);
  1327     if (delta_words > max_delta_words) {
  1328       // This allocation is large but the next ones are probably not
  1329       // so increase by the minimum.
  1330       delta_words = delta_words + min_delta_words;
  1333   return delta_words;
  1336 bool MetaspaceGC::should_expand(VirtualSpaceList* vsl, size_t word_size) {
  1338   // If the user wants a limit, impose one.
  1339   // The reason for someone using this flag is to limit reserved space.  So
  1340   // for non-class virtual space, compare against virtual spaces that are reserved.
  1341   // For class virtual space, we only compare against the committed space, not
  1342   // reserved space, because this is a larger space prereserved for compressed
  1343   // class pointers.
  1344   if (!FLAG_IS_DEFAULT(MaxMetaspaceSize)) {
  1345     size_t real_allocated = Metaspace::space_list()->reserved_words() +
  1346               MetaspaceAux::allocated_capacity_bytes(Metaspace::ClassType);
  1347     if (real_allocated >= MaxMetaspaceSize) {
  1348       return false;
  1352   // Class virtual space should always be expanded.  Call GC for the other
  1353   // metadata virtual space.
  1354   if (Metaspace::using_class_space() &&
  1355       (vsl == Metaspace::class_space_list())) return true;
  1357   // If this is part of an allocation after a GC, expand
  1358   // unconditionally.
  1359   if (MetaspaceGC::expand_after_GC()) {
  1360     return true;
  1364   // If the capacity is below the minimum capacity, allow the
  1365   // expansion.  Also set the high-water-mark (capacity_until_GC)
  1366   // to that minimum capacity so that a GC will not be induced
  1367   // until that minimum capacity is exceeded.
  1368   size_t committed_capacity_bytes = MetaspaceAux::allocated_capacity_bytes();
  1369   size_t metaspace_size_bytes = MetaspaceSize;
  1370   if (committed_capacity_bytes < metaspace_size_bytes ||
  1371       capacity_until_GC() == 0) {
  1372     set_capacity_until_GC(metaspace_size_bytes);
  1373     return true;
  1374   } else {
  1375     if (committed_capacity_bytes < capacity_until_GC()) {
  1376       return true;
  1377     } else {
  1378       if (TraceMetadataChunkAllocation && Verbose) {
  1379         gclog_or_tty->print_cr("  allocation request size " SIZE_FORMAT
  1380                         "  capacity_until_GC " SIZE_FORMAT
  1381                         "  allocated_capacity_bytes " SIZE_FORMAT,
  1382                         word_size,
  1383                         capacity_until_GC(),
  1384                         MetaspaceAux::allocated_capacity_bytes());
  1386       return false;
  1393 void MetaspaceGC::compute_new_size() {
  1394   assert(_shrink_factor <= 100, "invalid shrink factor");
  1395   uint current_shrink_factor = _shrink_factor;
  1396   _shrink_factor = 0;
  1398   // Until a faster way of calculating the "used" quantity is implemented,
  1399   // use "capacity".
  1400   const size_t used_after_gc = MetaspaceAux::allocated_capacity_bytes();
  1401   const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC();
  1403   const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0;
  1404   const double maximum_used_percentage = 1.0 - minimum_free_percentage;
  1406   const double min_tmp = used_after_gc / maximum_used_percentage;
  1407   size_t minimum_desired_capacity =
  1408     (size_t)MIN2(min_tmp, double(max_uintx));
  1409   // Don't shrink less than the initial generation size
  1410   minimum_desired_capacity = MAX2(minimum_desired_capacity,
  1411                                   MetaspaceSize);
  1413   if (PrintGCDetails && Verbose) {
  1414     gclog_or_tty->print_cr("\nMetaspaceGC::compute_new_size: ");
  1415     gclog_or_tty->print_cr("  "
  1416                   "  minimum_free_percentage: %6.2f"
  1417                   "  maximum_used_percentage: %6.2f",
  1418                   minimum_free_percentage,
  1419                   maximum_used_percentage);
  1420     gclog_or_tty->print_cr("  "
  1421                   "   used_after_gc       : %6.1fKB",
  1422                   used_after_gc / (double) K);
  1426   size_t shrink_bytes = 0;
  1427   if (capacity_until_GC < minimum_desired_capacity) {
  1428     // If we have less capacity below the metaspace HWM, then
  1429     // increment the HWM.
  1430     size_t expand_bytes = minimum_desired_capacity - capacity_until_GC;
  1431     // Don't expand unless it's significant
  1432     if (expand_bytes >= MinMetaspaceExpansion) {
  1433       MetaspaceGC::set_capacity_until_GC(capacity_until_GC + expand_bytes);
  1435     if (PrintGCDetails && Verbose) {
  1436       size_t new_capacity_until_GC = capacity_until_GC;
  1437       gclog_or_tty->print_cr("    expanding:"
  1438                     "  minimum_desired_capacity: %6.1fKB"
  1439                     "  expand_bytes: %6.1fKB"
  1440                     "  MinMetaspaceExpansion: %6.1fKB"
  1441                     "  new metaspace HWM:  %6.1fKB",
  1442                     minimum_desired_capacity / (double) K,
  1443                     expand_bytes / (double) K,
  1444                     MinMetaspaceExpansion / (double) K,
  1445                     new_capacity_until_GC / (double) K);
  1447     return;
  1450   // No expansion, now see if we want to shrink
  1451   // We would never want to shrink more than this
  1452   size_t max_shrink_bytes = capacity_until_GC - minimum_desired_capacity;
  1453   assert(max_shrink_bytes >= 0, err_msg("max_shrink_bytes " SIZE_FORMAT,
  1454     max_shrink_bytes));
  1456   // Should shrinking be considered?
  1457   if (MaxMetaspaceFreeRatio < 100) {
  1458     const double maximum_free_percentage = MaxMetaspaceFreeRatio / 100.0;
  1459     const double minimum_used_percentage = 1.0 - maximum_free_percentage;
  1460     const double max_tmp = used_after_gc / minimum_used_percentage;
  1461     size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx));
  1462     maximum_desired_capacity = MAX2(maximum_desired_capacity,
  1463                                     MetaspaceSize);
  1464     if (PrintGCDetails && Verbose) {
  1465       gclog_or_tty->print_cr("  "
  1466                              "  maximum_free_percentage: %6.2f"
  1467                              "  minimum_used_percentage: %6.2f",
  1468                              maximum_free_percentage,
  1469                              minimum_used_percentage);
  1470       gclog_or_tty->print_cr("  "
  1471                              "  minimum_desired_capacity: %6.1fKB"
  1472                              "  maximum_desired_capacity: %6.1fKB",
  1473                              minimum_desired_capacity / (double) K,
  1474                              maximum_desired_capacity / (double) K);
  1477     assert(minimum_desired_capacity <= maximum_desired_capacity,
  1478            "sanity check");
  1480     if (capacity_until_GC > maximum_desired_capacity) {
  1481       // Capacity too large, compute shrinking size
  1482       shrink_bytes = capacity_until_GC - maximum_desired_capacity;
  1483       // We don't want shrink all the way back to initSize if people call
  1484       // System.gc(), because some programs do that between "phases" and then
  1485       // we'd just have to grow the heap up again for the next phase.  So we
  1486       // damp the shrinking: 0% on the first call, 10% on the second call, 40%
  1487       // on the third call, and 100% by the fourth call.  But if we recompute
  1488       // size without shrinking, it goes back to 0%.
  1489       shrink_bytes = shrink_bytes / 100 * current_shrink_factor;
  1490       assert(shrink_bytes <= max_shrink_bytes,
  1491         err_msg("invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT,
  1492           shrink_bytes, max_shrink_bytes));
  1493       if (current_shrink_factor == 0) {
  1494         _shrink_factor = 10;
  1495       } else {
  1496         _shrink_factor = MIN2(current_shrink_factor * 4, (uint) 100);
  1498       if (PrintGCDetails && Verbose) {
  1499         gclog_or_tty->print_cr("  "
  1500                       "  shrinking:"
  1501                       "  initSize: %.1fK"
  1502                       "  maximum_desired_capacity: %.1fK",
  1503                       MetaspaceSize / (double) K,
  1504                       maximum_desired_capacity / (double) K);
  1505         gclog_or_tty->print_cr("  "
  1506                       "  shrink_bytes: %.1fK"
  1507                       "  current_shrink_factor: %d"
  1508                       "  new shrink factor: %d"
  1509                       "  MinMetaspaceExpansion: %.1fK",
  1510                       shrink_bytes / (double) K,
  1511                       current_shrink_factor,
  1512                       _shrink_factor,
  1513                       MinMetaspaceExpansion / (double) K);
  1518   // Don't shrink unless it's significant
  1519   if (shrink_bytes >= MinMetaspaceExpansion &&
  1520       ((capacity_until_GC - shrink_bytes) >= MetaspaceSize)) {
  1521     MetaspaceGC::set_capacity_until_GC(capacity_until_GC - shrink_bytes);
  1525 // Metadebug methods
  1527 void Metadebug::deallocate_chunk_a_lot(SpaceManager* sm,
  1528                                        size_t chunk_word_size){
  1529 #ifdef ASSERT
  1530   VirtualSpaceList* vsl = sm->vs_list();
  1531   if (MetaDataDeallocateALot &&
  1532       Metadebug::deallocate_chunk_a_lot_count() % MetaDataDeallocateALotInterval == 0 ) {
  1533     Metadebug::reset_deallocate_chunk_a_lot_count();
  1534     for (uint i = 0; i < metadata_deallocate_a_lock_chunk; i++) {
  1535       Metachunk* dummy_chunk = vsl->current_virtual_space()->take_from_committed(chunk_word_size);
  1536       if (dummy_chunk == NULL) {
  1537         break;
  1539       vsl->chunk_manager()->chunk_freelist_deallocate(dummy_chunk);
  1541       if (TraceMetadataChunkAllocation && Verbose) {
  1542         gclog_or_tty->print("Metadebug::deallocate_chunk_a_lot: %d) ",
  1543                                sm->sum_count_in_chunks_in_use());
  1544         dummy_chunk->print_on(gclog_or_tty);
  1545         gclog_or_tty->print_cr("  Free chunks total %d  count %d",
  1546                                vsl->chunk_manager()->free_chunks_total_words(),
  1547                                vsl->chunk_manager()->free_chunks_count());
  1550   } else {
  1551     Metadebug::inc_deallocate_chunk_a_lot_count();
  1553 #endif
  1556 void Metadebug::deallocate_block_a_lot(SpaceManager* sm,
  1557                                        size_t raw_word_size){
  1558 #ifdef ASSERT
  1559   if (MetaDataDeallocateALot &&
  1560         Metadebug::deallocate_block_a_lot_count() % MetaDataDeallocateALotInterval == 0 ) {
  1561     Metadebug::set_deallocate_block_a_lot_count(0);
  1562     for (uint i = 0; i < metadata_deallocate_a_lot_block; i++) {
  1563       MetaWord* dummy_block = sm->allocate_work(raw_word_size);
  1564       if (dummy_block == 0) {
  1565         break;
  1567       sm->deallocate(dummy_block, raw_word_size);
  1569   } else {
  1570     Metadebug::inc_deallocate_block_a_lot_count();
  1572 #endif
  1575 void Metadebug::init_allocation_fail_alot_count() {
  1576   if (MetadataAllocationFailALot) {
  1577     _allocation_fail_alot_count =
  1578       1+(long)((double)MetadataAllocationFailALotInterval*os::random()/(max_jint+1.0));
  1582 #ifdef ASSERT
  1583 bool Metadebug::test_metadata_failure() {
  1584   if (MetadataAllocationFailALot &&
  1585       Threads::is_vm_complete()) {
  1586     if (_allocation_fail_alot_count > 0) {
  1587       _allocation_fail_alot_count--;
  1588     } else {
  1589       if (TraceMetadataChunkAllocation && Verbose) {
  1590         gclog_or_tty->print_cr("Metadata allocation failing for "
  1591                                "MetadataAllocationFailALot");
  1593       init_allocation_fail_alot_count();
  1594       return true;
  1597   return false;
  1599 #endif
  1601 // ChunkManager methods
  1603 size_t ChunkManager::free_chunks_total_words() {
  1604   return _free_chunks_total;
  1607 size_t ChunkManager::free_chunks_total_bytes() {
  1608   return free_chunks_total_words() * BytesPerWord;
  1611 size_t ChunkManager::free_chunks_count() {
  1612 #ifdef ASSERT
  1613   if (!UseConcMarkSweepGC && !SpaceManager::expand_lock()->is_locked()) {
  1614     MutexLockerEx cl(SpaceManager::expand_lock(),
  1615                      Mutex::_no_safepoint_check_flag);
  1616     // This lock is only needed in debug because the verification
  1617     // of the _free_chunks_totals walks the list of free chunks
  1618     slow_locked_verify_free_chunks_count();
  1620 #endif
  1621   return _free_chunks_count;
  1624 void ChunkManager::locked_verify_free_chunks_total() {
  1625   assert_lock_strong(SpaceManager::expand_lock());
  1626   assert(sum_free_chunks() == _free_chunks_total,
  1627     err_msg("_free_chunks_total " SIZE_FORMAT " is not the"
  1628            " same as sum " SIZE_FORMAT, _free_chunks_total,
  1629            sum_free_chunks()));
  1632 void ChunkManager::verify_free_chunks_total() {
  1633   MutexLockerEx cl(SpaceManager::expand_lock(),
  1634                      Mutex::_no_safepoint_check_flag);
  1635   locked_verify_free_chunks_total();
  1638 void ChunkManager::locked_verify_free_chunks_count() {
  1639   assert_lock_strong(SpaceManager::expand_lock());
  1640   assert(sum_free_chunks_count() == _free_chunks_count,
  1641     err_msg("_free_chunks_count " SIZE_FORMAT " is not the"
  1642            " same as sum " SIZE_FORMAT, _free_chunks_count,
  1643            sum_free_chunks_count()));
  1646 void ChunkManager::verify_free_chunks_count() {
  1647 #ifdef ASSERT
  1648   MutexLockerEx cl(SpaceManager::expand_lock(),
  1649                      Mutex::_no_safepoint_check_flag);
  1650   locked_verify_free_chunks_count();
  1651 #endif
  1654 void ChunkManager::verify() {
  1655   MutexLockerEx cl(SpaceManager::expand_lock(),
  1656                      Mutex::_no_safepoint_check_flag);
  1657   locked_verify();
  1660 void ChunkManager::locked_verify() {
  1661   locked_verify_free_chunks_count();
  1662   locked_verify_free_chunks_total();
  1665 void ChunkManager::locked_print_free_chunks(outputStream* st) {
  1666   assert_lock_strong(SpaceManager::expand_lock());
  1667   st->print_cr("Free chunk total " SIZE_FORMAT "  count " SIZE_FORMAT,
  1668                 _free_chunks_total, _free_chunks_count);
  1671 void ChunkManager::locked_print_sum_free_chunks(outputStream* st) {
  1672   assert_lock_strong(SpaceManager::expand_lock());
  1673   st->print_cr("Sum free chunk total " SIZE_FORMAT "  count " SIZE_FORMAT,
  1674                 sum_free_chunks(), sum_free_chunks_count());
  1676 ChunkList* ChunkManager::free_chunks(ChunkIndex index) {
  1677   return &_free_chunks[index];
  1680 // These methods that sum the free chunk lists are used in printing
  1681 // methods that are used in product builds.
  1682 size_t ChunkManager::sum_free_chunks() {
  1683   assert_lock_strong(SpaceManager::expand_lock());
  1684   size_t result = 0;
  1685   for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
  1686     ChunkList* list = free_chunks(i);
  1688     if (list == NULL) {
  1689       continue;
  1692     result = result + list->count() * list->size();
  1694   result = result + humongous_dictionary()->total_size();
  1695   return result;
  1698 size_t ChunkManager::sum_free_chunks_count() {
  1699   assert_lock_strong(SpaceManager::expand_lock());
  1700   size_t count = 0;
  1701   for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
  1702     ChunkList* list = free_chunks(i);
  1703     if (list == NULL) {
  1704       continue;
  1706     count = count + list->count();
  1708   count = count + humongous_dictionary()->total_free_blocks();
  1709   return count;
  1712 ChunkList* ChunkManager::find_free_chunks_list(size_t word_size) {
  1713   ChunkIndex index = list_index(word_size);
  1714   assert(index < HumongousIndex, "No humongous list");
  1715   return free_chunks(index);
  1718 void ChunkManager::free_chunks_put(Metachunk* chunk) {
  1719   assert_lock_strong(SpaceManager::expand_lock());
  1720   ChunkList* free_list = find_free_chunks_list(chunk->word_size());
  1721   chunk->set_next(free_list->head());
  1722   free_list->set_head(chunk);
  1723   // chunk is being returned to the chunk free list
  1724   inc_free_chunks_total(chunk->capacity_word_size());
  1725   slow_locked_verify();
  1728 void ChunkManager::chunk_freelist_deallocate(Metachunk* chunk) {
  1729   // The deallocation of a chunk originates in the freelist
  1730   // manangement code for a Metaspace and does not hold the
  1731   // lock.
  1732   assert(chunk != NULL, "Deallocating NULL");
  1733   assert_lock_strong(SpaceManager::expand_lock());
  1734   slow_locked_verify();
  1735   if (TraceMetadataChunkAllocation) {
  1736     gclog_or_tty->print_cr("ChunkManager::chunk_freelist_deallocate: chunk "
  1737                            PTR_FORMAT "  size " SIZE_FORMAT,
  1738                            chunk, chunk->word_size());
  1740   free_chunks_put(chunk);
  1743 Metachunk* ChunkManager::free_chunks_get(size_t word_size) {
  1744   assert_lock_strong(SpaceManager::expand_lock());
  1746   slow_locked_verify();
  1748   Metachunk* chunk = NULL;
  1749   if (list_index(word_size) != HumongousIndex) {
  1750     ChunkList* free_list = find_free_chunks_list(word_size);
  1751     assert(free_list != NULL, "Sanity check");
  1753     chunk = free_list->head();
  1754     debug_only(Metachunk* debug_head = chunk;)
  1756     if (chunk == NULL) {
  1757       return NULL;
  1760     // Remove the chunk as the head of the list.
  1761     free_list->remove_chunk(chunk);
  1763     // Chunk is being removed from the chunks free list.
  1764     dec_free_chunks_total(chunk->capacity_word_size());
  1766     if (TraceMetadataChunkAllocation && Verbose) {
  1767       gclog_or_tty->print_cr("ChunkManager::free_chunks_get: free_list "
  1768                              PTR_FORMAT " head " PTR_FORMAT " size " SIZE_FORMAT,
  1769                              free_list, chunk, chunk->word_size());
  1771   } else {
  1772     chunk = humongous_dictionary()->get_chunk(
  1773       word_size,
  1774       FreeBlockDictionary<Metachunk>::atLeast);
  1776     if (chunk != NULL) {
  1777       if (TraceMetadataHumongousAllocation) {
  1778         size_t waste = chunk->word_size() - word_size;
  1779         gclog_or_tty->print_cr("Free list allocate humongous chunk size "
  1780                                SIZE_FORMAT " for requested size " SIZE_FORMAT
  1781                                " waste " SIZE_FORMAT,
  1782                                chunk->word_size(), word_size, waste);
  1784       // Chunk is being removed from the chunks free list.
  1785       dec_free_chunks_total(chunk->capacity_word_size());
  1786     } else {
  1787       return NULL;
  1791   // Remove it from the links to this freelist
  1792   chunk->set_next(NULL);
  1793   chunk->set_prev(NULL);
  1794 #ifdef ASSERT
  1795   // Chunk is no longer on any freelist. Setting to false make container_count_slow()
  1796   // work.
  1797   chunk->set_is_free(false);
  1798 #endif
  1799   slow_locked_verify();
  1800   return chunk;
  1803 Metachunk* ChunkManager::chunk_freelist_allocate(size_t word_size) {
  1804   assert_lock_strong(SpaceManager::expand_lock());
  1805   slow_locked_verify();
  1807   // Take from the beginning of the list
  1808   Metachunk* chunk = free_chunks_get(word_size);
  1809   if (chunk == NULL) {
  1810     return NULL;
  1813   assert((word_size <= chunk->word_size()) ||
  1814          list_index(chunk->word_size() == HumongousIndex),
  1815          "Non-humongous variable sized chunk");
  1816   if (TraceMetadataChunkAllocation) {
  1817     size_t list_count;
  1818     if (list_index(word_size) < HumongousIndex) {
  1819       ChunkList* list = find_free_chunks_list(word_size);
  1820       list_count = list->count();
  1821     } else {
  1822       list_count = humongous_dictionary()->total_count();
  1824     gclog_or_tty->print("ChunkManager::chunk_freelist_allocate: " PTR_FORMAT " chunk "
  1825                         PTR_FORMAT "  size " SIZE_FORMAT " count " SIZE_FORMAT " ",
  1826                         this, chunk, chunk->word_size(), list_count);
  1827     locked_print_free_chunks(gclog_or_tty);
  1830   return chunk;
  1833 void ChunkManager::print_on(outputStream* out) {
  1834   if (PrintFLSStatistics != 0) {
  1835     humongous_dictionary()->report_statistics();
  1839 // SpaceManager methods
  1841 void SpaceManager::get_initial_chunk_sizes(Metaspace::MetaspaceType type,
  1842                                            size_t* chunk_word_size,
  1843                                            size_t* class_chunk_word_size) {
  1844   switch (type) {
  1845   case Metaspace::BootMetaspaceType:
  1846     *chunk_word_size = Metaspace::first_chunk_word_size();
  1847     *class_chunk_word_size = Metaspace::first_class_chunk_word_size();
  1848     break;
  1849   case Metaspace::ROMetaspaceType:
  1850     *chunk_word_size = SharedReadOnlySize / wordSize;
  1851     *class_chunk_word_size = ClassSpecializedChunk;
  1852     break;
  1853   case Metaspace::ReadWriteMetaspaceType:
  1854     *chunk_word_size = SharedReadWriteSize / wordSize;
  1855     *class_chunk_word_size = ClassSpecializedChunk;
  1856     break;
  1857   case Metaspace::AnonymousMetaspaceType:
  1858   case Metaspace::ReflectionMetaspaceType:
  1859     *chunk_word_size = SpecializedChunk;
  1860     *class_chunk_word_size = ClassSpecializedChunk;
  1861     break;
  1862   default:
  1863     *chunk_word_size = SmallChunk;
  1864     *class_chunk_word_size = ClassSmallChunk;
  1865     break;
  1867   assert(*chunk_word_size != 0 && *class_chunk_word_size != 0,
  1868     err_msg("Initial chunks sizes bad: data  " SIZE_FORMAT
  1869             " class " SIZE_FORMAT,
  1870             *chunk_word_size, *class_chunk_word_size));
  1873 size_t SpaceManager::sum_free_in_chunks_in_use() const {
  1874   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
  1875   size_t free = 0;
  1876   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
  1877     Metachunk* chunk = chunks_in_use(i);
  1878     while (chunk != NULL) {
  1879       free += chunk->free_word_size();
  1880       chunk = chunk->next();
  1883   return free;
  1886 size_t SpaceManager::sum_waste_in_chunks_in_use() const {
  1887   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
  1888   size_t result = 0;
  1889   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
  1890    result += sum_waste_in_chunks_in_use(i);
  1893   return result;
  1896 size_t SpaceManager::sum_waste_in_chunks_in_use(ChunkIndex index) const {
  1897   size_t result = 0;
  1898   Metachunk* chunk = chunks_in_use(index);
  1899   // Count the free space in all the chunk but not the
  1900   // current chunk from which allocations are still being done.
  1901   while (chunk != NULL) {
  1902     if (chunk != current_chunk()) {
  1903       result += chunk->free_word_size();
  1905     chunk = chunk->next();
  1907   return result;
  1910 size_t SpaceManager::sum_capacity_in_chunks_in_use() const {
  1911   // For CMS use "allocated_chunks_words()" which does not need the
  1912   // Metaspace lock.  For the other collectors sum over the
  1913   // lists.  Use both methods as a check that "allocated_chunks_words()"
  1914   // is correct.  That is, sum_capacity_in_chunks() is too expensive
  1915   // to use in the product and allocated_chunks_words() should be used
  1916   // but allow for  checking that allocated_chunks_words() returns the same
  1917   // value as sum_capacity_in_chunks_in_use() which is the definitive
  1918   // answer.
  1919   if (UseConcMarkSweepGC) {
  1920     return allocated_chunks_words();
  1921   } else {
  1922     MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
  1923     size_t sum = 0;
  1924     for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
  1925       Metachunk* chunk = chunks_in_use(i);
  1926       while (chunk != NULL) {
  1927         sum += chunk->capacity_word_size();
  1928         chunk = chunk->next();
  1931   return sum;
  1935 size_t SpaceManager::sum_count_in_chunks_in_use() {
  1936   size_t count = 0;
  1937   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
  1938     count = count + sum_count_in_chunks_in_use(i);
  1941   return count;
  1944 size_t SpaceManager::sum_count_in_chunks_in_use(ChunkIndex i) {
  1945   size_t count = 0;
  1946   Metachunk* chunk = chunks_in_use(i);
  1947   while (chunk != NULL) {
  1948     count++;
  1949     chunk = chunk->next();
  1951   return count;
  1955 size_t SpaceManager::sum_used_in_chunks_in_use() const {
  1956   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
  1957   size_t used = 0;
  1958   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
  1959     Metachunk* chunk = chunks_in_use(i);
  1960     while (chunk != NULL) {
  1961       used += chunk->used_word_size();
  1962       chunk = chunk->next();
  1965   return used;
  1968 void SpaceManager::locked_print_chunks_in_use_on(outputStream* st) const {
  1970   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
  1971     Metachunk* chunk = chunks_in_use(i);
  1972     st->print("SpaceManager: %s " PTR_FORMAT,
  1973                  chunk_size_name(i), chunk);
  1974     if (chunk != NULL) {
  1975       st->print_cr(" free " SIZE_FORMAT,
  1976                    chunk->free_word_size());
  1977     } else {
  1978       st->print_cr("");
  1982   vs_list()->chunk_manager()->locked_print_free_chunks(st);
  1983   vs_list()->chunk_manager()->locked_print_sum_free_chunks(st);
  1986 size_t SpaceManager::calc_chunk_size(size_t word_size) {
  1988   // Decide between a small chunk and a medium chunk.  Up to
  1989   // _small_chunk_limit small chunks can be allocated but
  1990   // once a medium chunk has been allocated, no more small
  1991   // chunks will be allocated.
  1992   size_t chunk_word_size;
  1993   if (chunks_in_use(MediumIndex) == NULL &&
  1994       sum_count_in_chunks_in_use(SmallIndex) < _small_chunk_limit) {
  1995     chunk_word_size = (size_t) small_chunk_size();
  1996     if (word_size + Metachunk::overhead() > small_chunk_size()) {
  1997       chunk_word_size = medium_chunk_size();
  1999   } else {
  2000     chunk_word_size = medium_chunk_size();
  2003   // Might still need a humongous chunk.  Enforce an
  2004   // eight word granularity to facilitate reuse (some
  2005   // wastage but better chance of reuse).
  2006   size_t if_humongous_sized_chunk =
  2007     align_size_up(word_size + Metachunk::overhead(),
  2008                   HumongousChunkGranularity);
  2009   chunk_word_size =
  2010     MAX2((size_t) chunk_word_size, if_humongous_sized_chunk);
  2012   assert(!SpaceManager::is_humongous(word_size) ||
  2013          chunk_word_size == if_humongous_sized_chunk,
  2014          err_msg("Size calculation is wrong, word_size " SIZE_FORMAT
  2015                  " chunk_word_size " SIZE_FORMAT,
  2016                  word_size, chunk_word_size));
  2017   if (TraceMetadataHumongousAllocation &&
  2018       SpaceManager::is_humongous(word_size)) {
  2019     gclog_or_tty->print_cr("Metadata humongous allocation:");
  2020     gclog_or_tty->print_cr("  word_size " PTR_FORMAT, word_size);
  2021     gclog_or_tty->print_cr("  chunk_word_size " PTR_FORMAT,
  2022                            chunk_word_size);
  2023     gclog_or_tty->print_cr("    chunk overhead " PTR_FORMAT,
  2024                            Metachunk::overhead());
  2026   return chunk_word_size;
  2029 MetaWord* SpaceManager::grow_and_allocate(size_t word_size) {
  2030   assert(vs_list()->current_virtual_space() != NULL,
  2031          "Should have been set");
  2032   assert(current_chunk() == NULL ||
  2033          current_chunk()->allocate(word_size) == NULL,
  2034          "Don't need to expand");
  2035   MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
  2037   if (TraceMetadataChunkAllocation && Verbose) {
  2038     size_t words_left = 0;
  2039     size_t words_used = 0;
  2040     if (current_chunk() != NULL) {
  2041       words_left = current_chunk()->free_word_size();
  2042       words_used = current_chunk()->used_word_size();
  2044     gclog_or_tty->print_cr("SpaceManager::grow_and_allocate for " SIZE_FORMAT
  2045                            " words " SIZE_FORMAT " words used " SIZE_FORMAT
  2046                            " words left",
  2047                             word_size, words_used, words_left);
  2050   // Get another chunk out of the virtual space
  2051   size_t grow_chunks_by_words = calc_chunk_size(word_size);
  2052   Metachunk* next = get_new_chunk(word_size, grow_chunks_by_words);
  2054   // If a chunk was available, add it to the in-use chunk list
  2055   // and do an allocation from it.
  2056   if (next != NULL) {
  2057     Metadebug::deallocate_chunk_a_lot(this, grow_chunks_by_words);
  2058     // Add to this manager's list of chunks in use.
  2059     add_chunk(next, false);
  2060     return next->allocate(word_size);
  2062   return NULL;
  2065 void SpaceManager::print_on(outputStream* st) const {
  2067   for (ChunkIndex i = ZeroIndex;
  2068        i < NumberOfInUseLists ;
  2069        i = next_chunk_index(i) ) {
  2070     st->print_cr("  chunks_in_use " PTR_FORMAT " chunk size " PTR_FORMAT,
  2071                  chunks_in_use(i),
  2072                  chunks_in_use(i) == NULL ? 0 : chunks_in_use(i)->word_size());
  2074   st->print_cr("    waste:  Small " SIZE_FORMAT " Medium " SIZE_FORMAT
  2075                " Humongous " SIZE_FORMAT,
  2076                sum_waste_in_chunks_in_use(SmallIndex),
  2077                sum_waste_in_chunks_in_use(MediumIndex),
  2078                sum_waste_in_chunks_in_use(HumongousIndex));
  2079   // block free lists
  2080   if (block_freelists() != NULL) {
  2081     st->print_cr("total in block free lists " SIZE_FORMAT,
  2082       block_freelists()->total_size());
  2086 SpaceManager::SpaceManager(Metaspace::MetadataType mdtype,
  2087                            Mutex* lock,
  2088                            VirtualSpaceList* vs_list) :
  2089   _vs_list(vs_list),
  2090   _mdtype(mdtype),
  2091   _allocated_blocks_words(0),
  2092   _allocated_chunks_words(0),
  2093   _allocated_chunks_count(0),
  2094   _lock(lock)
  2096   initialize();
  2099 void SpaceManager::inc_size_metrics(size_t words) {
  2100   assert_lock_strong(SpaceManager::expand_lock());
  2101   // Total of allocated Metachunks and allocated Metachunks count
  2102   // for each SpaceManager
  2103   _allocated_chunks_words = _allocated_chunks_words + words;
  2104   _allocated_chunks_count++;
  2105   // Global total of capacity in allocated Metachunks
  2106   MetaspaceAux::inc_capacity(mdtype(), words);
  2107   // Global total of allocated Metablocks.
  2108   // used_words_slow() includes the overhead in each
  2109   // Metachunk so include it in the used when the
  2110   // Metachunk is first added (so only added once per
  2111   // Metachunk).
  2112   MetaspaceAux::inc_used(mdtype(), Metachunk::overhead());
  2115 void SpaceManager::inc_used_metrics(size_t words) {
  2116   // Add to the per SpaceManager total
  2117   Atomic::add_ptr(words, &_allocated_blocks_words);
  2118   // Add to the global total
  2119   MetaspaceAux::inc_used(mdtype(), words);
  2122 void SpaceManager::dec_total_from_size_metrics() {
  2123   MetaspaceAux::dec_capacity(mdtype(), allocated_chunks_words());
  2124   MetaspaceAux::dec_used(mdtype(), allocated_blocks_words());
  2125   // Also deduct the overhead per Metachunk
  2126   MetaspaceAux::dec_used(mdtype(), allocated_chunks_count() * Metachunk::overhead());
  2129 void SpaceManager::initialize() {
  2130   Metadebug::init_allocation_fail_alot_count();
  2131   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
  2132     _chunks_in_use[i] = NULL;
  2134   _current_chunk = NULL;
  2135   if (TraceMetadataChunkAllocation && Verbose) {
  2136     gclog_or_tty->print_cr("SpaceManager(): " PTR_FORMAT, this);
  2140 void ChunkManager::return_chunks(ChunkIndex index, Metachunk* chunks) {
  2141   if (chunks == NULL) {
  2142     return;
  2144   ChunkList* list = free_chunks(index);
  2145   assert(list->size() == chunks->word_size(), "Mismatch in chunk sizes");
  2146   assert_lock_strong(SpaceManager::expand_lock());
  2147   Metachunk* cur = chunks;
  2149   // This returns chunks one at a time.  If a new
  2150   // class List can be created that is a base class
  2151   // of FreeList then something like FreeList::prepend()
  2152   // can be used in place of this loop
  2153   while (cur != NULL) {
  2154     assert(cur->container() != NULL, "Container should have been set");
  2155     cur->container()->dec_container_count();
  2156     // Capture the next link before it is changed
  2157     // by the call to return_chunk_at_head();
  2158     Metachunk* next = cur->next();
  2159     cur->set_is_free(true);
  2160     list->return_chunk_at_head(cur);
  2161     cur = next;
  2165 SpaceManager::~SpaceManager() {
  2166   // This call this->_lock which can't be done while holding expand_lock()
  2167   assert(sum_capacity_in_chunks_in_use() == allocated_chunks_words(),
  2168     err_msg("sum_capacity_in_chunks_in_use() " SIZE_FORMAT
  2169             " allocated_chunks_words() " SIZE_FORMAT,
  2170             sum_capacity_in_chunks_in_use(), allocated_chunks_words()));
  2172   MutexLockerEx fcl(SpaceManager::expand_lock(),
  2173                     Mutex::_no_safepoint_check_flag);
  2175   ChunkManager* chunk_manager = vs_list()->chunk_manager();
  2177   chunk_manager->slow_locked_verify();
  2179   dec_total_from_size_metrics();
  2181   if (TraceMetadataChunkAllocation && Verbose) {
  2182     gclog_or_tty->print_cr("~SpaceManager(): " PTR_FORMAT, this);
  2183     locked_print_chunks_in_use_on(gclog_or_tty);
  2186   // Do not mangle freed Metachunks.  The chunk size inside Metachunks
  2187   // is during the freeing of a VirtualSpaceNodes.
  2189   // Have to update before the chunks_in_use lists are emptied
  2190   // below.
  2191   chunk_manager->inc_free_chunks_total(allocated_chunks_words(),
  2192                                        sum_count_in_chunks_in_use());
  2194   // Add all the chunks in use by this space manager
  2195   // to the global list of free chunks.
  2197   // Follow each list of chunks-in-use and add them to the
  2198   // free lists.  Each list is NULL terminated.
  2200   for (ChunkIndex i = ZeroIndex; i < HumongousIndex; i = next_chunk_index(i)) {
  2201     if (TraceMetadataChunkAllocation && Verbose) {
  2202       gclog_or_tty->print_cr("returned %d %s chunks to freelist",
  2203                              sum_count_in_chunks_in_use(i),
  2204                              chunk_size_name(i));
  2206     Metachunk* chunks = chunks_in_use(i);
  2207     chunk_manager->return_chunks(i, chunks);
  2208     set_chunks_in_use(i, NULL);
  2209     if (TraceMetadataChunkAllocation && Verbose) {
  2210       gclog_or_tty->print_cr("updated freelist count %d %s",
  2211                              chunk_manager->free_chunks(i)->count(),
  2212                              chunk_size_name(i));
  2214     assert(i != HumongousIndex, "Humongous chunks are handled explicitly later");
  2217   // The medium chunk case may be optimized by passing the head and
  2218   // tail of the medium chunk list to add_at_head().  The tail is often
  2219   // the current chunk but there are probably exceptions.
  2221   // Humongous chunks
  2222   if (TraceMetadataChunkAllocation && Verbose) {
  2223     gclog_or_tty->print_cr("returned %d %s humongous chunks to dictionary",
  2224                             sum_count_in_chunks_in_use(HumongousIndex),
  2225                             chunk_size_name(HumongousIndex));
  2226     gclog_or_tty->print("Humongous chunk dictionary: ");
  2228   // Humongous chunks are never the current chunk.
  2229   Metachunk* humongous_chunks = chunks_in_use(HumongousIndex);
  2231   while (humongous_chunks != NULL) {
  2232 #ifdef ASSERT
  2233     humongous_chunks->set_is_free(true);
  2234 #endif
  2235     if (TraceMetadataChunkAllocation && Verbose) {
  2236       gclog_or_tty->print(PTR_FORMAT " (" SIZE_FORMAT ") ",
  2237                           humongous_chunks,
  2238                           humongous_chunks->word_size());
  2240     assert(humongous_chunks->word_size() == (size_t)
  2241            align_size_up(humongous_chunks->word_size(),
  2242                              HumongousChunkGranularity),
  2243            err_msg("Humongous chunk size is wrong: word size " SIZE_FORMAT
  2244                    " granularity %d",
  2245                    humongous_chunks->word_size(), HumongousChunkGranularity));
  2246     Metachunk* next_humongous_chunks = humongous_chunks->next();
  2247     humongous_chunks->container()->dec_container_count();
  2248     chunk_manager->humongous_dictionary()->return_chunk(humongous_chunks);
  2249     humongous_chunks = next_humongous_chunks;
  2251   if (TraceMetadataChunkAllocation && Verbose) {
  2252     gclog_or_tty->print_cr("");
  2253     gclog_or_tty->print_cr("updated dictionary count %d %s",
  2254                      chunk_manager->humongous_dictionary()->total_count(),
  2255                      chunk_size_name(HumongousIndex));
  2257   chunk_manager->slow_locked_verify();
  2260 const char* SpaceManager::chunk_size_name(ChunkIndex index) const {
  2261   switch (index) {
  2262     case SpecializedIndex:
  2263       return "Specialized";
  2264     case SmallIndex:
  2265       return "Small";
  2266     case MediumIndex:
  2267       return "Medium";
  2268     case HumongousIndex:
  2269       return "Humongous";
  2270     default:
  2271       return NULL;
  2275 ChunkIndex ChunkManager::list_index(size_t size) {
  2276   switch (size) {
  2277     case SpecializedChunk:
  2278       assert(SpecializedChunk == ClassSpecializedChunk,
  2279              "Need branch for ClassSpecializedChunk");
  2280       return SpecializedIndex;
  2281     case SmallChunk:
  2282     case ClassSmallChunk:
  2283       return SmallIndex;
  2284     case MediumChunk:
  2285     case ClassMediumChunk:
  2286       return MediumIndex;
  2287     default:
  2288       assert(size > MediumChunk || size > ClassMediumChunk,
  2289              "Not a humongous chunk");
  2290       return HumongousIndex;
  2294 void SpaceManager::deallocate(MetaWord* p, size_t word_size) {
  2295   assert_lock_strong(_lock);
  2296   size_t raw_word_size = get_raw_word_size(word_size);
  2297   size_t min_size = TreeChunk<Metablock, FreeList>::min_size();
  2298   assert(raw_word_size >= min_size,
  2299          err_msg("Should not deallocate dark matter " SIZE_FORMAT "<" SIZE_FORMAT, word_size, min_size));
  2300   block_freelists()->return_block(p, raw_word_size);
  2303 // Adds a chunk to the list of chunks in use.
  2304 void SpaceManager::add_chunk(Metachunk* new_chunk, bool make_current) {
  2306   assert(new_chunk != NULL, "Should not be NULL");
  2307   assert(new_chunk->next() == NULL, "Should not be on a list");
  2309   new_chunk->reset_empty();
  2311   // Find the correct list and and set the current
  2312   // chunk for that list.
  2313   ChunkIndex index = ChunkManager::list_index(new_chunk->word_size());
  2315   if (index != HumongousIndex) {
  2316     retire_current_chunk();
  2317     set_current_chunk(new_chunk);
  2318     new_chunk->set_next(chunks_in_use(index));
  2319     set_chunks_in_use(index, new_chunk);
  2320   } else {
  2321     // For null class loader data and DumpSharedSpaces, the first chunk isn't
  2322     // small, so small will be null.  Link this first chunk as the current
  2323     // chunk.
  2324     if (make_current) {
  2325       // Set as the current chunk but otherwise treat as a humongous chunk.
  2326       set_current_chunk(new_chunk);
  2328     // Link at head.  The _current_chunk only points to a humongous chunk for
  2329     // the null class loader metaspace (class and data virtual space managers)
  2330     // any humongous chunks so will not point to the tail
  2331     // of the humongous chunks list.
  2332     new_chunk->set_next(chunks_in_use(HumongousIndex));
  2333     set_chunks_in_use(HumongousIndex, new_chunk);
  2335     assert(new_chunk->word_size() > medium_chunk_size(), "List inconsistency");
  2338   // Add to the running sum of capacity
  2339   inc_size_metrics(new_chunk->word_size());
  2341   assert(new_chunk->is_empty(), "Not ready for reuse");
  2342   if (TraceMetadataChunkAllocation && Verbose) {
  2343     gclog_or_tty->print("SpaceManager::add_chunk: %d) ",
  2344                         sum_count_in_chunks_in_use());
  2345     new_chunk->print_on(gclog_or_tty);
  2346     if (vs_list() != NULL) {
  2347       vs_list()->chunk_manager()->locked_print_free_chunks(gclog_or_tty);
  2352 void SpaceManager::retire_current_chunk() {
  2353   if (current_chunk() != NULL) {
  2354     size_t remaining_words = current_chunk()->free_word_size();
  2355     if (remaining_words >= TreeChunk<Metablock, FreeList>::min_size()) {
  2356       block_freelists()->return_block(current_chunk()->allocate(remaining_words), remaining_words);
  2357       inc_used_metrics(remaining_words);
  2362 Metachunk* SpaceManager::get_new_chunk(size_t word_size,
  2363                                        size_t grow_chunks_by_words) {
  2365   Metachunk* next = vs_list()->get_new_chunk(word_size,
  2366                                              grow_chunks_by_words,
  2367                                              medium_chunk_bunch());
  2369   if (TraceMetadataHumongousAllocation && next != NULL &&
  2370       SpaceManager::is_humongous(next->word_size())) {
  2371     gclog_or_tty->print_cr("  new humongous chunk word size "
  2372                            PTR_FORMAT, next->word_size());
  2375   return next;
  2378 MetaWord* SpaceManager::allocate(size_t word_size) {
  2379   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
  2381   size_t raw_word_size = get_raw_word_size(word_size);
  2382   BlockFreelist* fl =  block_freelists();
  2383   MetaWord* p = NULL;
  2384   // Allocation from the dictionary is expensive in the sense that
  2385   // the dictionary has to be searched for a size.  Don't allocate
  2386   // from the dictionary until it starts to get fat.  Is this
  2387   // a reasonable policy?  Maybe an skinny dictionary is fast enough
  2388   // for allocations.  Do some profiling.  JJJ
  2389   if (fl->total_size() > allocation_from_dictionary_limit) {
  2390     p = fl->get_block(raw_word_size);
  2392   if (p == NULL) {
  2393     p = allocate_work(raw_word_size);
  2395   Metadebug::deallocate_block_a_lot(this, raw_word_size);
  2397   return p;
  2400 // Returns the address of spaced allocated for "word_size".
  2401 // This methods does not know about blocks (Metablocks)
  2402 MetaWord* SpaceManager::allocate_work(size_t word_size) {
  2403   assert_lock_strong(_lock);
  2404 #ifdef ASSERT
  2405   if (Metadebug::test_metadata_failure()) {
  2406     return NULL;
  2408 #endif
  2409   // Is there space in the current chunk?
  2410   MetaWord* result = NULL;
  2412   // For DumpSharedSpaces, only allocate out of the current chunk which is
  2413   // never null because we gave it the size we wanted.   Caller reports out
  2414   // of memory if this returns null.
  2415   if (DumpSharedSpaces) {
  2416     assert(current_chunk() != NULL, "should never happen");
  2417     inc_used_metrics(word_size);
  2418     return current_chunk()->allocate(word_size); // caller handles null result
  2420   if (current_chunk() != NULL) {
  2421     result = current_chunk()->allocate(word_size);
  2424   if (result == NULL) {
  2425     result = grow_and_allocate(word_size);
  2427   if (result != 0) {
  2428     inc_used_metrics(word_size);
  2429     assert(result != (MetaWord*) chunks_in_use(MediumIndex),
  2430            "Head of the list is being allocated");
  2433   return result;
  2436 void SpaceManager::verify() {
  2437   // If there are blocks in the dictionary, then
  2438   // verfication of chunks does not work since
  2439   // being in the dictionary alters a chunk.
  2440   if (block_freelists()->total_size() == 0) {
  2441     for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
  2442       Metachunk* curr = chunks_in_use(i);
  2443       while (curr != NULL) {
  2444         curr->verify();
  2445         verify_chunk_size(curr);
  2446         curr = curr->next();
  2452 void SpaceManager::verify_chunk_size(Metachunk* chunk) {
  2453   assert(is_humongous(chunk->word_size()) ||
  2454          chunk->word_size() == medium_chunk_size() ||
  2455          chunk->word_size() == small_chunk_size() ||
  2456          chunk->word_size() == specialized_chunk_size(),
  2457          "Chunk size is wrong");
  2458   return;
  2461 #ifdef ASSERT
  2462 void SpaceManager::verify_allocated_blocks_words() {
  2463   // Verification is only guaranteed at a safepoint.
  2464   assert(SafepointSynchronize::is_at_safepoint() || !Universe::is_fully_initialized(),
  2465     "Verification can fail if the applications is running");
  2466   assert(allocated_blocks_words() == sum_used_in_chunks_in_use(),
  2467     err_msg("allocation total is not consistent " SIZE_FORMAT
  2468             " vs " SIZE_FORMAT,
  2469             allocated_blocks_words(), sum_used_in_chunks_in_use()));
  2472 #endif
  2474 void SpaceManager::dump(outputStream* const out) const {
  2475   size_t curr_total = 0;
  2476   size_t waste = 0;
  2477   uint i = 0;
  2478   size_t used = 0;
  2479   size_t capacity = 0;
  2481   // Add up statistics for all chunks in this SpaceManager.
  2482   for (ChunkIndex index = ZeroIndex;
  2483        index < NumberOfInUseLists;
  2484        index = next_chunk_index(index)) {
  2485     for (Metachunk* curr = chunks_in_use(index);
  2486          curr != NULL;
  2487          curr = curr->next()) {
  2488       out->print("%d) ", i++);
  2489       curr->print_on(out);
  2490       curr_total += curr->word_size();
  2491       used += curr->used_word_size();
  2492       capacity += curr->capacity_word_size();
  2493       waste += curr->free_word_size() + curr->overhead();;
  2497   if (TraceMetadataChunkAllocation && Verbose) {
  2498     block_freelists()->print_on(out);
  2501   size_t free = current_chunk() == NULL ? 0 : current_chunk()->free_word_size();
  2502   // Free space isn't wasted.
  2503   waste -= free;
  2505   out->print_cr("total of all chunks "  SIZE_FORMAT " used " SIZE_FORMAT
  2506                 " free " SIZE_FORMAT " capacity " SIZE_FORMAT
  2507                 " waste " SIZE_FORMAT, curr_total, used, free, capacity, waste);
  2510 #ifndef PRODUCT
  2511 void SpaceManager::mangle_freed_chunks() {
  2512   for (ChunkIndex index = ZeroIndex;
  2513        index < NumberOfInUseLists;
  2514        index = next_chunk_index(index)) {
  2515     for (Metachunk* curr = chunks_in_use(index);
  2516          curr != NULL;
  2517          curr = curr->next()) {
  2518       curr->mangle();
  2522 #endif // PRODUCT
  2524 // MetaspaceAux
  2527 size_t MetaspaceAux::_allocated_capacity_words[] = {0, 0};
  2528 size_t MetaspaceAux::_allocated_used_words[] = {0, 0};
  2530 size_t MetaspaceAux::free_bytes(Metaspace::MetadataType mdtype) {
  2531   VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
  2532   return list == NULL ? 0 : list->free_bytes();
  2535 size_t MetaspaceAux::free_bytes() {
  2536   return free_bytes(Metaspace::ClassType) + free_bytes(Metaspace::NonClassType);
  2539 void MetaspaceAux::dec_capacity(Metaspace::MetadataType mdtype, size_t words) {
  2540   assert_lock_strong(SpaceManager::expand_lock());
  2541   assert(words <= allocated_capacity_words(mdtype),
  2542     err_msg("About to decrement below 0: words " SIZE_FORMAT
  2543             " is greater than _allocated_capacity_words[%u] " SIZE_FORMAT,
  2544             words, mdtype, allocated_capacity_words(mdtype)));
  2545   _allocated_capacity_words[mdtype] -= words;
  2548 void MetaspaceAux::inc_capacity(Metaspace::MetadataType mdtype, size_t words) {
  2549   assert_lock_strong(SpaceManager::expand_lock());
  2550   // Needs to be atomic
  2551   _allocated_capacity_words[mdtype] += words;
  2554 void MetaspaceAux::dec_used(Metaspace::MetadataType mdtype, size_t words) {
  2555   assert(words <= allocated_used_words(mdtype),
  2556     err_msg("About to decrement below 0: words " SIZE_FORMAT
  2557             " is greater than _allocated_used_words[%u] " SIZE_FORMAT,
  2558             words, mdtype, allocated_used_words(mdtype)));
  2559   // For CMS deallocation of the Metaspaces occurs during the
  2560   // sweep which is a concurrent phase.  Protection by the expand_lock()
  2561   // is not enough since allocation is on a per Metaspace basis
  2562   // and protected by the Metaspace lock.
  2563   jlong minus_words = (jlong) - (jlong) words;
  2564   Atomic::add_ptr(minus_words, &_allocated_used_words[mdtype]);
  2567 void MetaspaceAux::inc_used(Metaspace::MetadataType mdtype, size_t words) {
  2568   // _allocated_used_words tracks allocations for
  2569   // each piece of metadata.  Those allocations are
  2570   // generally done concurrently by different application
  2571   // threads so must be done atomically.
  2572   Atomic::add_ptr(words, &_allocated_used_words[mdtype]);
  2575 size_t MetaspaceAux::used_bytes_slow(Metaspace::MetadataType mdtype) {
  2576   size_t used = 0;
  2577   ClassLoaderDataGraphMetaspaceIterator iter;
  2578   while (iter.repeat()) {
  2579     Metaspace* msp = iter.get_next();
  2580     // Sum allocated_blocks_words for each metaspace
  2581     if (msp != NULL) {
  2582       used += msp->used_words_slow(mdtype);
  2585   return used * BytesPerWord;
  2588 size_t MetaspaceAux::free_bytes_slow(Metaspace::MetadataType mdtype) {
  2589   size_t free = 0;
  2590   ClassLoaderDataGraphMetaspaceIterator iter;
  2591   while (iter.repeat()) {
  2592     Metaspace* msp = iter.get_next();
  2593     if (msp != NULL) {
  2594       free += msp->free_words_slow(mdtype);
  2597   return free * BytesPerWord;
  2600 size_t MetaspaceAux::capacity_bytes_slow(Metaspace::MetadataType mdtype) {
  2601   if ((mdtype == Metaspace::ClassType) && !Metaspace::using_class_space()) {
  2602     return 0;
  2604   // Don't count the space in the freelists.  That space will be
  2605   // added to the capacity calculation as needed.
  2606   size_t capacity = 0;
  2607   ClassLoaderDataGraphMetaspaceIterator iter;
  2608   while (iter.repeat()) {
  2609     Metaspace* msp = iter.get_next();
  2610     if (msp != NULL) {
  2611       capacity += msp->capacity_words_slow(mdtype);
  2614   return capacity * BytesPerWord;
  2617 size_t MetaspaceAux::capacity_bytes_slow() {
  2618 #ifdef PRODUCT
  2619   // Use allocated_capacity_bytes() in PRODUCT instead of this function.
  2620   guarantee(false, "Should not call capacity_bytes_slow() in the PRODUCT");
  2621 #endif
  2622   size_t class_capacity = capacity_bytes_slow(Metaspace::ClassType);
  2623   size_t non_class_capacity = capacity_bytes_slow(Metaspace::NonClassType);
  2624   assert(allocated_capacity_bytes() == class_capacity + non_class_capacity,
  2625       err_msg("bad accounting: allocated_capacity_bytes() " SIZE_FORMAT
  2626         " class_capacity + non_class_capacity " SIZE_FORMAT
  2627         " class_capacity " SIZE_FORMAT " non_class_capacity " SIZE_FORMAT,
  2628         allocated_capacity_bytes(), class_capacity + non_class_capacity,
  2629         class_capacity, non_class_capacity));
  2631   return class_capacity + non_class_capacity;
  2634 size_t MetaspaceAux::reserved_bytes(Metaspace::MetadataType mdtype) {
  2635   VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
  2636   return list == NULL ? 0 : list->reserved_bytes();
  2639 size_t MetaspaceAux::committed_bytes(Metaspace::MetadataType mdtype) {
  2640   VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
  2641   return list == NULL ? 0 : list->committed_bytes();
  2644 size_t MetaspaceAux::min_chunk_size_words() { return Metaspace::first_chunk_word_size(); }
  2646 size_t MetaspaceAux::free_chunks_total_words(Metaspace::MetadataType mdtype) {
  2647   VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
  2648   if (list == NULL) {
  2649     return 0;
  2651   ChunkManager* chunk = list->chunk_manager();
  2652   chunk->slow_verify();
  2653   return chunk->free_chunks_total_words();
  2656 size_t MetaspaceAux::free_chunks_total_bytes(Metaspace::MetadataType mdtype) {
  2657   return free_chunks_total_words(mdtype) * BytesPerWord;
  2660 size_t MetaspaceAux::free_chunks_total_words() {
  2661   return free_chunks_total_words(Metaspace::ClassType) +
  2662          free_chunks_total_words(Metaspace::NonClassType);
  2665 size_t MetaspaceAux::free_chunks_total_bytes() {
  2666   return free_chunks_total_words() * BytesPerWord;
  2669 void MetaspaceAux::print_metaspace_change(size_t prev_metadata_used) {
  2670   gclog_or_tty->print(", [Metaspace:");
  2671   if (PrintGCDetails && Verbose) {
  2672     gclog_or_tty->print(" "  SIZE_FORMAT
  2673                         "->" SIZE_FORMAT
  2674                         "("  SIZE_FORMAT ")",
  2675                         prev_metadata_used,
  2676                         allocated_used_bytes(),
  2677                         reserved_bytes());
  2678   } else {
  2679     gclog_or_tty->print(" "  SIZE_FORMAT "K"
  2680                         "->" SIZE_FORMAT "K"
  2681                         "("  SIZE_FORMAT "K)",
  2682                         prev_metadata_used/K,
  2683                         allocated_used_bytes()/K,
  2684                         reserved_bytes()/K);
  2687   gclog_or_tty->print("]");
  2690 // This is printed when PrintGCDetails
  2691 void MetaspaceAux::print_on(outputStream* out) {
  2692   Metaspace::MetadataType nct = Metaspace::NonClassType;
  2694   out->print_cr(" Metaspace total "
  2695                 SIZE_FORMAT "K, used " SIZE_FORMAT "K,"
  2696                 " reserved " SIZE_FORMAT "K",
  2697                 allocated_capacity_bytes()/K, allocated_used_bytes()/K, reserved_bytes()/K);
  2699   out->print_cr("  data space     "
  2700                 SIZE_FORMAT "K, used " SIZE_FORMAT "K,"
  2701                 " reserved " SIZE_FORMAT "K",
  2702                 allocated_capacity_bytes(nct)/K,
  2703                 allocated_used_bytes(nct)/K,
  2704                 reserved_bytes(nct)/K);
  2705   if (Metaspace::using_class_space()) {
  2706     Metaspace::MetadataType ct = Metaspace::ClassType;
  2707     out->print_cr("  class space    "
  2708                   SIZE_FORMAT "K, used " SIZE_FORMAT "K,"
  2709                   " reserved " SIZE_FORMAT "K",
  2710                   allocated_capacity_bytes(ct)/K,
  2711                   allocated_used_bytes(ct)/K,
  2712                   reserved_bytes(ct)/K);
  2716 // Print information for class space and data space separately.
  2717 // This is almost the same as above.
  2718 void MetaspaceAux::print_on(outputStream* out, Metaspace::MetadataType mdtype) {
  2719   size_t free_chunks_capacity_bytes = free_chunks_total_bytes(mdtype);
  2720   size_t capacity_bytes = capacity_bytes_slow(mdtype);
  2721   size_t used_bytes = used_bytes_slow(mdtype);
  2722   size_t free_bytes = free_bytes_slow(mdtype);
  2723   size_t used_and_free = used_bytes + free_bytes +
  2724                            free_chunks_capacity_bytes;
  2725   out->print_cr("  Chunk accounting: used in chunks " SIZE_FORMAT
  2726              "K + unused in chunks " SIZE_FORMAT "K  + "
  2727              " capacity in free chunks " SIZE_FORMAT "K = " SIZE_FORMAT
  2728              "K  capacity in allocated chunks " SIZE_FORMAT "K",
  2729              used_bytes / K,
  2730              free_bytes / K,
  2731              free_chunks_capacity_bytes / K,
  2732              used_and_free / K,
  2733              capacity_bytes / K);
  2734   // Accounting can only be correct if we got the values during a safepoint
  2735   assert(!SafepointSynchronize::is_at_safepoint() || used_and_free == capacity_bytes, "Accounting is wrong");
  2738 // Print total fragmentation for class metaspaces
  2739 void MetaspaceAux::print_class_waste(outputStream* out) {
  2740   assert(Metaspace::using_class_space(), "class metaspace not used");
  2741   size_t cls_specialized_waste = 0, cls_small_waste = 0, cls_medium_waste = 0;
  2742   size_t cls_specialized_count = 0, cls_small_count = 0, cls_medium_count = 0, cls_humongous_count = 0;
  2743   ClassLoaderDataGraphMetaspaceIterator iter;
  2744   while (iter.repeat()) {
  2745     Metaspace* msp = iter.get_next();
  2746     if (msp != NULL) {
  2747       cls_specialized_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SpecializedIndex);
  2748       cls_specialized_count += msp->class_vsm()->sum_count_in_chunks_in_use(SpecializedIndex);
  2749       cls_small_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SmallIndex);
  2750       cls_small_count += msp->class_vsm()->sum_count_in_chunks_in_use(SmallIndex);
  2751       cls_medium_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(MediumIndex);
  2752       cls_medium_count += msp->class_vsm()->sum_count_in_chunks_in_use(MediumIndex);
  2753       cls_humongous_count += msp->class_vsm()->sum_count_in_chunks_in_use(HumongousIndex);
  2756   out->print_cr(" class: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", "
  2757                 SIZE_FORMAT " small(s) " SIZE_FORMAT ", "
  2758                 SIZE_FORMAT " medium(s) " SIZE_FORMAT ", "
  2759                 "large count " SIZE_FORMAT,
  2760                 cls_specialized_count, cls_specialized_waste,
  2761                 cls_small_count, cls_small_waste,
  2762                 cls_medium_count, cls_medium_waste, cls_humongous_count);
  2765 // Print total fragmentation for data and class metaspaces separately
  2766 void MetaspaceAux::print_waste(outputStream* out) {
  2767   size_t specialized_waste = 0, small_waste = 0, medium_waste = 0;
  2768   size_t specialized_count = 0, small_count = 0, medium_count = 0, humongous_count = 0;
  2770   ClassLoaderDataGraphMetaspaceIterator iter;
  2771   while (iter.repeat()) {
  2772     Metaspace* msp = iter.get_next();
  2773     if (msp != NULL) {
  2774       specialized_waste += msp->vsm()->sum_waste_in_chunks_in_use(SpecializedIndex);
  2775       specialized_count += msp->vsm()->sum_count_in_chunks_in_use(SpecializedIndex);
  2776       small_waste += msp->vsm()->sum_waste_in_chunks_in_use(SmallIndex);
  2777       small_count += msp->vsm()->sum_count_in_chunks_in_use(SmallIndex);
  2778       medium_waste += msp->vsm()->sum_waste_in_chunks_in_use(MediumIndex);
  2779       medium_count += msp->vsm()->sum_count_in_chunks_in_use(MediumIndex);
  2780       humongous_count += msp->vsm()->sum_count_in_chunks_in_use(HumongousIndex);
  2783   out->print_cr("Total fragmentation waste (words) doesn't count free space");
  2784   out->print_cr("  data: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", "
  2785                         SIZE_FORMAT " small(s) " SIZE_FORMAT ", "
  2786                         SIZE_FORMAT " medium(s) " SIZE_FORMAT ", "
  2787                         "large count " SIZE_FORMAT,
  2788              specialized_count, specialized_waste, small_count,
  2789              small_waste, medium_count, medium_waste, humongous_count);
  2790   if (Metaspace::using_class_space()) {
  2791     print_class_waste(out);
  2795 // Dump global metaspace things from the end of ClassLoaderDataGraph
  2796 void MetaspaceAux::dump(outputStream* out) {
  2797   out->print_cr("All Metaspace:");
  2798   out->print("data space: "); print_on(out, Metaspace::NonClassType);
  2799   out->print("class space: "); print_on(out, Metaspace::ClassType);
  2800   print_waste(out);
  2803 void MetaspaceAux::verify_free_chunks() {
  2804   Metaspace::space_list()->chunk_manager()->verify();
  2805   if (Metaspace::using_class_space()) {
  2806     Metaspace::class_space_list()->chunk_manager()->verify();
  2810 void MetaspaceAux::verify_capacity() {
  2811 #ifdef ASSERT
  2812   size_t running_sum_capacity_bytes = allocated_capacity_bytes();
  2813   // For purposes of the running sum of capacity, verify against capacity
  2814   size_t capacity_in_use_bytes = capacity_bytes_slow();
  2815   assert(running_sum_capacity_bytes == capacity_in_use_bytes,
  2816     err_msg("allocated_capacity_words() * BytesPerWord " SIZE_FORMAT
  2817             " capacity_bytes_slow()" SIZE_FORMAT,
  2818             running_sum_capacity_bytes, capacity_in_use_bytes));
  2819   for (Metaspace::MetadataType i = Metaspace::ClassType;
  2820        i < Metaspace:: MetadataTypeCount;
  2821        i = (Metaspace::MetadataType)(i + 1)) {
  2822     size_t capacity_in_use_bytes = capacity_bytes_slow(i);
  2823     assert(allocated_capacity_bytes(i) == capacity_in_use_bytes,
  2824       err_msg("allocated_capacity_bytes(%u) " SIZE_FORMAT
  2825               " capacity_bytes_slow(%u)" SIZE_FORMAT,
  2826               i, allocated_capacity_bytes(i), i, capacity_in_use_bytes));
  2828 #endif
  2831 void MetaspaceAux::verify_used() {
  2832 #ifdef ASSERT
  2833   size_t running_sum_used_bytes = allocated_used_bytes();
  2834   // For purposes of the running sum of used, verify against used
  2835   size_t used_in_use_bytes = used_bytes_slow();
  2836   assert(allocated_used_bytes() == used_in_use_bytes,
  2837     err_msg("allocated_used_bytes() " SIZE_FORMAT
  2838             " used_bytes_slow()" SIZE_FORMAT,
  2839             allocated_used_bytes(), used_in_use_bytes));
  2840   for (Metaspace::MetadataType i = Metaspace::ClassType;
  2841        i < Metaspace:: MetadataTypeCount;
  2842        i = (Metaspace::MetadataType)(i + 1)) {
  2843     size_t used_in_use_bytes = used_bytes_slow(i);
  2844     assert(allocated_used_bytes(i) == used_in_use_bytes,
  2845       err_msg("allocated_used_bytes(%u) " SIZE_FORMAT
  2846               " used_bytes_slow(%u)" SIZE_FORMAT,
  2847               i, allocated_used_bytes(i), i, used_in_use_bytes));
  2849 #endif
  2852 void MetaspaceAux::verify_metrics() {
  2853   verify_capacity();
  2854   verify_used();
  2858 // Metaspace methods
  2860 size_t Metaspace::_first_chunk_word_size = 0;
  2861 size_t Metaspace::_first_class_chunk_word_size = 0;
  2863 Metaspace::Metaspace(Mutex* lock, MetaspaceType type) {
  2864   initialize(lock, type);
  2867 Metaspace::~Metaspace() {
  2868   delete _vsm;
  2869   if (using_class_space()) {
  2870     delete _class_vsm;
  2874 VirtualSpaceList* Metaspace::_space_list = NULL;
  2875 VirtualSpaceList* Metaspace::_class_space_list = NULL;
  2877 #define VIRTUALSPACEMULTIPLIER 2
  2879 #ifdef _LP64
  2880 void Metaspace::set_narrow_klass_base_and_shift(address metaspace_base, address cds_base) {
  2881   // Figure out the narrow_klass_base and the narrow_klass_shift.  The
  2882   // narrow_klass_base is the lower of the metaspace base and the cds base
  2883   // (if cds is enabled).  The narrow_klass_shift depends on the distance
  2884   // between the lower base and higher address.
  2885   address lower_base;
  2886   address higher_address;
  2887   if (UseSharedSpaces) {
  2888     higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()),
  2889                           (address)(metaspace_base + class_metaspace_size()));
  2890     lower_base = MIN2(metaspace_base, cds_base);
  2891   } else {
  2892     higher_address = metaspace_base + class_metaspace_size();
  2893     lower_base = metaspace_base;
  2895   Universe::set_narrow_klass_base(lower_base);
  2896   if ((uint64_t)(higher_address - lower_base) < (uint64_t)max_juint) {
  2897     Universe::set_narrow_klass_shift(0);
  2898   } else {
  2899     assert(!UseSharedSpaces, "Cannot shift with UseSharedSpaces");
  2900     Universe::set_narrow_klass_shift(LogKlassAlignmentInBytes);
  2904 // Return TRUE if the specified metaspace_base and cds_base are close enough
  2905 // to work with compressed klass pointers.
  2906 bool Metaspace::can_use_cds_with_metaspace_addr(char* metaspace_base, address cds_base) {
  2907   assert(cds_base != 0 && UseSharedSpaces, "Only use with CDS");
  2908   assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
  2909   address lower_base = MIN2((address)metaspace_base, cds_base);
  2910   address higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()),
  2911                                 (address)(metaspace_base + class_metaspace_size()));
  2912   return ((uint64_t)(higher_address - lower_base) < (uint64_t)max_juint);
  2915 // Try to allocate the metaspace at the requested addr.
  2916 void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base) {
  2917   assert(using_class_space(), "called improperly");
  2918   assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
  2919   assert(class_metaspace_size() < KlassEncodingMetaspaceMax,
  2920          "Metaspace size is too big");
  2922   ReservedSpace metaspace_rs = ReservedSpace(class_metaspace_size(),
  2923                                              os::vm_allocation_granularity(),
  2924                                              false, requested_addr, 0);
  2925   if (!metaspace_rs.is_reserved()) {
  2926     if (UseSharedSpaces) {
  2927       // Keep trying to allocate the metaspace, increasing the requested_addr
  2928       // by 1GB each time, until we reach an address that will no longer allow
  2929       // use of CDS with compressed klass pointers.
  2930       char *addr = requested_addr;
  2931       while (!metaspace_rs.is_reserved() && (addr + 1*G > addr) &&
  2932              can_use_cds_with_metaspace_addr(addr + 1*G, cds_base)) {
  2933         addr = addr + 1*G;
  2934         metaspace_rs = ReservedSpace(class_metaspace_size(),
  2935                                      os::vm_allocation_granularity(), false, addr, 0);
  2939     // If no successful allocation then try to allocate the space anywhere.  If
  2940     // that fails then OOM doom.  At this point we cannot try allocating the
  2941     // metaspace as if UseCompressedClassPointers is off because too much
  2942     // initialization has happened that depends on UseCompressedClassPointers.
  2943     // So, UseCompressedClassPointers cannot be turned off at this point.
  2944     if (!metaspace_rs.is_reserved()) {
  2945       metaspace_rs = ReservedSpace(class_metaspace_size(),
  2946                                    os::vm_allocation_granularity(), false);
  2947       if (!metaspace_rs.is_reserved()) {
  2948         vm_exit_during_initialization(err_msg("Could not allocate metaspace: %d bytes",
  2949                                               class_metaspace_size()));
  2954   // If we got here then the metaspace got allocated.
  2955   MemTracker::record_virtual_memory_type((address)metaspace_rs.base(), mtClass);
  2957   // Verify that we can use shared spaces.  Otherwise, turn off CDS.
  2958   if (UseSharedSpaces && !can_use_cds_with_metaspace_addr(metaspace_rs.base(), cds_base)) {
  2959     FileMapInfo::stop_sharing_and_unmap(
  2960         "Could not allocate metaspace at a compatible address");
  2963   set_narrow_klass_base_and_shift((address)metaspace_rs.base(),
  2964                                   UseSharedSpaces ? (address)cds_base : 0);
  2966   initialize_class_space(metaspace_rs);
  2968   if (PrintCompressedOopsMode || (PrintMiscellaneous && Verbose)) {
  2969     gclog_or_tty->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: " SIZE_FORMAT,
  2970                             Universe::narrow_klass_base(), Universe::narrow_klass_shift());
  2971     gclog_or_tty->print_cr("Metaspace Size: " SIZE_FORMAT " Address: " PTR_FORMAT " Req Addr: " PTR_FORMAT,
  2972                            class_metaspace_size(), metaspace_rs.base(), requested_addr);
  2976 // For UseCompressedClassPointers the class space is reserved above the top of
  2977 // the Java heap.  The argument passed in is at the base of the compressed space.
  2978 void Metaspace::initialize_class_space(ReservedSpace rs) {
  2979   // The reserved space size may be bigger because of alignment, esp with UseLargePages
  2980   assert(rs.size() >= CompressedClassSpaceSize,
  2981          err_msg(SIZE_FORMAT " != " UINTX_FORMAT, rs.size(), CompressedClassSpaceSize));
  2982   assert(using_class_space(), "Must be using class space");
  2983   _class_space_list = new VirtualSpaceList(rs);
  2986 #endif
  2988 void Metaspace::global_initialize() {
  2989   // Initialize the alignment for shared spaces.
  2990   int max_alignment = os::vm_page_size();
  2991   size_t cds_total = 0;
  2993   set_class_metaspace_size(align_size_up(CompressedClassSpaceSize,
  2994                                          os::vm_allocation_granularity()));
  2996   MetaspaceShared::set_max_alignment(max_alignment);
  2998   if (DumpSharedSpaces) {
  2999     SharedReadOnlySize = align_size_up(SharedReadOnlySize, max_alignment);
  3000     SharedReadWriteSize = align_size_up(SharedReadWriteSize, max_alignment);
  3001     SharedMiscDataSize  = align_size_up(SharedMiscDataSize, max_alignment);
  3002     SharedMiscCodeSize  = align_size_up(SharedMiscCodeSize, max_alignment);
  3004     // Initialize with the sum of the shared space sizes.  The read-only
  3005     // and read write metaspace chunks will be allocated out of this and the
  3006     // remainder is the misc code and data chunks.
  3007     cds_total = FileMapInfo::shared_spaces_size();
  3008     _space_list = new VirtualSpaceList(cds_total/wordSize);
  3010 #ifdef _LP64
  3011     // Set the compressed klass pointer base so that decoding of these pointers works
  3012     // properly when creating the shared archive.
  3013     assert(UseCompressedOops && UseCompressedClassPointers,
  3014       "UseCompressedOops and UseCompressedClassPointers must be set");
  3015     Universe::set_narrow_klass_base((address)_space_list->current_virtual_space()->bottom());
  3016     if (TraceMetavirtualspaceAllocation && Verbose) {
  3017       gclog_or_tty->print_cr("Setting_narrow_klass_base to Address: " PTR_FORMAT,
  3018                              _space_list->current_virtual_space()->bottom());
  3021     // Set the shift to zero.
  3022     assert(class_metaspace_size() < (uint64_t)(max_juint) - cds_total,
  3023            "CDS region is too large");
  3024     Universe::set_narrow_klass_shift(0);
  3025 #endif
  3027   } else {
  3028     // If using shared space, open the file that contains the shared space
  3029     // and map in the memory before initializing the rest of metaspace (so
  3030     // the addresses don't conflict)
  3031     address cds_address = NULL;
  3032     if (UseSharedSpaces) {
  3033       FileMapInfo* mapinfo = new FileMapInfo();
  3034       memset(mapinfo, 0, sizeof(FileMapInfo));
  3036       // Open the shared archive file, read and validate the header. If
  3037       // initialization fails, shared spaces [UseSharedSpaces] are
  3038       // disabled and the file is closed.
  3039       // Map in spaces now also
  3040       if (mapinfo->initialize() && MetaspaceShared::map_shared_spaces(mapinfo)) {
  3041         FileMapInfo::set_current_info(mapinfo);
  3042       } else {
  3043         assert(!mapinfo->is_open() && !UseSharedSpaces,
  3044                "archive file not closed or shared spaces not disabled.");
  3046       cds_total = FileMapInfo::shared_spaces_size();
  3047       cds_address = (address)mapinfo->region_base(0);
  3050 #ifdef _LP64
  3051     // If UseCompressedClassPointers is set then allocate the metaspace area
  3052     // above the heap and above the CDS area (if it exists).
  3053     if (using_class_space()) {
  3054       if (UseSharedSpaces) {
  3055         allocate_metaspace_compressed_klass_ptrs((char *)(cds_address + cds_total), cds_address);
  3056       } else {
  3057         allocate_metaspace_compressed_klass_ptrs((char *)CompressedKlassPointersBase, 0);
  3060 #endif
  3062     // Initialize these before initializing the VirtualSpaceList
  3063     _first_chunk_word_size = InitialBootClassLoaderMetaspaceSize / BytesPerWord;
  3064     _first_chunk_word_size = align_word_size_up(_first_chunk_word_size);
  3065     // Make the first class chunk bigger than a medium chunk so it's not put
  3066     // on the medium chunk list.   The next chunk will be small and progress
  3067     // from there.  This size calculated by -version.
  3068     _first_class_chunk_word_size = MIN2((size_t)MediumChunk*6,
  3069                                        (CompressedClassSpaceSize/BytesPerWord)*2);
  3070     _first_class_chunk_word_size = align_word_size_up(_first_class_chunk_word_size);
  3071     // Arbitrarily set the initial virtual space to a multiple
  3072     // of the boot class loader size.
  3073     size_t word_size = VIRTUALSPACEMULTIPLIER * first_chunk_word_size();
  3074     // Initialize the list of virtual spaces.
  3075     _space_list = new VirtualSpaceList(word_size);
  3079 void Metaspace::initialize(Mutex* lock, MetaspaceType type) {
  3081   assert(space_list() != NULL,
  3082     "Metadata VirtualSpaceList has not been initialized");
  3084   _vsm = new SpaceManager(NonClassType, lock, space_list());
  3085   if (_vsm == NULL) {
  3086     return;
  3088   size_t word_size;
  3089   size_t class_word_size;
  3090   vsm()->get_initial_chunk_sizes(type, &word_size, &class_word_size);
  3092   if (using_class_space()) {
  3093     assert(class_space_list() != NULL,
  3094       "Class VirtualSpaceList has not been initialized");
  3096     // Allocate SpaceManager for classes.
  3097     _class_vsm = new SpaceManager(ClassType, lock, class_space_list());
  3098     if (_class_vsm == NULL) {
  3099       return;
  3103   MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
  3105   // Allocate chunk for metadata objects
  3106   Metachunk* new_chunk =
  3107      space_list()->get_initialization_chunk(word_size,
  3108                                             vsm()->medium_chunk_bunch());
  3109   assert(!DumpSharedSpaces || new_chunk != NULL, "should have enough space for both chunks");
  3110   if (new_chunk != NULL) {
  3111     // Add to this manager's list of chunks in use and current_chunk().
  3112     vsm()->add_chunk(new_chunk, true);
  3115   // Allocate chunk for class metadata objects
  3116   if (using_class_space()) {
  3117     Metachunk* class_chunk =
  3118        class_space_list()->get_initialization_chunk(class_word_size,
  3119                                                     class_vsm()->medium_chunk_bunch());
  3120     if (class_chunk != NULL) {
  3121       class_vsm()->add_chunk(class_chunk, true);
  3125   _alloc_record_head = NULL;
  3126   _alloc_record_tail = NULL;
  3129 size_t Metaspace::align_word_size_up(size_t word_size) {
  3130   size_t byte_size = word_size * wordSize;
  3131   return ReservedSpace::allocation_align_size_up(byte_size) / wordSize;
  3134 MetaWord* Metaspace::allocate(size_t word_size, MetadataType mdtype) {
  3135   // DumpSharedSpaces doesn't use class metadata area (yet)
  3136   // Also, don't use class_vsm() unless UseCompressedClassPointers is true.
  3137   if (mdtype == ClassType && using_class_space()) {
  3138     return  class_vsm()->allocate(word_size);
  3139   } else {
  3140     return  vsm()->allocate(word_size);
  3144 MetaWord* Metaspace::expand_and_allocate(size_t word_size, MetadataType mdtype) {
  3145   MetaWord* result;
  3146   MetaspaceGC::set_expand_after_GC(true);
  3147   size_t before_inc = MetaspaceGC::capacity_until_GC();
  3148   size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size) * BytesPerWord;
  3149   MetaspaceGC::inc_capacity_until_GC(delta_bytes);
  3150   if (PrintGCDetails && Verbose) {
  3151     gclog_or_tty->print_cr("Increase capacity to GC from " SIZE_FORMAT
  3152       " to " SIZE_FORMAT, before_inc, MetaspaceGC::capacity_until_GC());
  3155   result = allocate(word_size, mdtype);
  3157   return result;
  3160 // Space allocated in the Metaspace.  This may
  3161 // be across several metadata virtual spaces.
  3162 char* Metaspace::bottom() const {
  3163   assert(DumpSharedSpaces, "only useful and valid for dumping shared spaces");
  3164   return (char*)vsm()->current_chunk()->bottom();
  3167 size_t Metaspace::used_words_slow(MetadataType mdtype) const {
  3168   if (mdtype == ClassType) {
  3169     return using_class_space() ? class_vsm()->sum_used_in_chunks_in_use() : 0;
  3170   } else {
  3171     return vsm()->sum_used_in_chunks_in_use();  // includes overhead!
  3175 size_t Metaspace::free_words_slow(MetadataType mdtype) const {
  3176   if (mdtype == ClassType) {
  3177     return using_class_space() ? class_vsm()->sum_free_in_chunks_in_use() : 0;
  3178   } else {
  3179     return vsm()->sum_free_in_chunks_in_use();
  3183 // Space capacity in the Metaspace.  It includes
  3184 // space in the list of chunks from which allocations
  3185 // have been made. Don't include space in the global freelist and
  3186 // in the space available in the dictionary which
  3187 // is already counted in some chunk.
  3188 size_t Metaspace::capacity_words_slow(MetadataType mdtype) const {
  3189   if (mdtype == ClassType) {
  3190     return using_class_space() ? class_vsm()->sum_capacity_in_chunks_in_use() : 0;
  3191   } else {
  3192     return vsm()->sum_capacity_in_chunks_in_use();
  3196 size_t Metaspace::used_bytes_slow(MetadataType mdtype) const {
  3197   return used_words_slow(mdtype) * BytesPerWord;
  3200 size_t Metaspace::capacity_bytes_slow(MetadataType mdtype) const {
  3201   return capacity_words_slow(mdtype) * BytesPerWord;
  3204 void Metaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) {
  3205   if (SafepointSynchronize::is_at_safepoint()) {
  3206     assert(Thread::current()->is_VM_thread(), "should be the VM thread");
  3207     // Don't take Heap_lock
  3208     MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag);
  3209     if (word_size < TreeChunk<Metablock, FreeList>::min_size()) {
  3210       // Dark matter.  Too small for dictionary.
  3211 #ifdef ASSERT
  3212       Copy::fill_to_words((HeapWord*)ptr, word_size, 0xf5f5f5f5);
  3213 #endif
  3214       return;
  3216     if (is_class && using_class_space()) {
  3217       class_vsm()->deallocate(ptr, word_size);
  3218     } else {
  3219       vsm()->deallocate(ptr, word_size);
  3221   } else {
  3222     MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag);
  3224     if (word_size < TreeChunk<Metablock, FreeList>::min_size()) {
  3225       // Dark matter.  Too small for dictionary.
  3226 #ifdef ASSERT
  3227       Copy::fill_to_words((HeapWord*)ptr, word_size, 0xf5f5f5f5);
  3228 #endif
  3229       return;
  3231     if (is_class && using_class_space()) {
  3232       class_vsm()->deallocate(ptr, word_size);
  3233     } else {
  3234       vsm()->deallocate(ptr, word_size);
  3239 Metablock* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
  3240                               bool read_only, MetaspaceObj::Type type, TRAPS) {
  3241   if (HAS_PENDING_EXCEPTION) {
  3242     assert(false, "Should not allocate with exception pending");
  3243     return NULL;  // caller does a CHECK_NULL too
  3246   MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType;
  3248   // SSS: Should we align the allocations and make sure the sizes are aligned.
  3249   MetaWord* result = NULL;
  3251   assert(loader_data != NULL, "Should never pass around a NULL loader_data. "
  3252         "ClassLoaderData::the_null_class_loader_data() should have been used.");
  3253   // Allocate in metaspaces without taking out a lock, because it deadlocks
  3254   // with the SymbolTable_lock.  Dumping is single threaded for now.  We'll have
  3255   // to revisit this for application class data sharing.
  3256   if (DumpSharedSpaces) {
  3257     assert(type > MetaspaceObj::UnknownType && type < MetaspaceObj::_number_of_types, "sanity");
  3258     Metaspace* space = read_only ? loader_data->ro_metaspace() : loader_data->rw_metaspace();
  3259     result = space->allocate(word_size, NonClassType);
  3260     if (result == NULL) {
  3261       report_out_of_shared_space(read_only ? SharedReadOnly : SharedReadWrite);
  3262     } else {
  3263       space->record_allocation(result, type, space->vsm()->get_raw_word_size(word_size));
  3265     return Metablock::initialize(result, word_size);
  3268   result = loader_data->metaspace_non_null()->allocate(word_size, mdtype);
  3270   if (result == NULL) {
  3271     // Try to clean out some memory and retry.
  3272     result =
  3273       Universe::heap()->collector_policy()->satisfy_failed_metadata_allocation(
  3274         loader_data, word_size, mdtype);
  3276     // If result is still null, we are out of memory.
  3277     if (result == NULL) {
  3278       if (Verbose && TraceMetadataChunkAllocation) {
  3279         gclog_or_tty->print_cr("Metaspace allocation failed for size "
  3280           SIZE_FORMAT, word_size);
  3281         if (loader_data->metaspace_or_null() != NULL) loader_data->dump(gclog_or_tty);
  3282         MetaspaceAux::dump(gclog_or_tty);
  3284       // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
  3285       const char* space_string = (mdtype == ClassType) ? "Compressed class space" :
  3286                                                          "Metadata space";
  3287       report_java_out_of_memory(space_string);
  3289       if (JvmtiExport::should_post_resource_exhausted()) {
  3290         JvmtiExport::post_resource_exhausted(
  3291             JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR,
  3292             space_string);
  3294       if (mdtype == ClassType) {
  3295         THROW_OOP_0(Universe::out_of_memory_error_class_metaspace());
  3296       } else {
  3297         THROW_OOP_0(Universe::out_of_memory_error_metaspace());
  3301   return Metablock::initialize(result, word_size);
  3304 void Metaspace::record_allocation(void* ptr, MetaspaceObj::Type type, size_t word_size) {
  3305   assert(DumpSharedSpaces, "sanity");
  3307   AllocRecord *rec = new AllocRecord((address)ptr, type, (int)word_size * HeapWordSize);
  3308   if (_alloc_record_head == NULL) {
  3309     _alloc_record_head = _alloc_record_tail = rec;
  3310   } else {
  3311     _alloc_record_tail->_next = rec;
  3312     _alloc_record_tail = rec;
  3316 void Metaspace::iterate(Metaspace::AllocRecordClosure *closure) {
  3317   assert(DumpSharedSpaces, "unimplemented for !DumpSharedSpaces");
  3319   address last_addr = (address)bottom();
  3321   for (AllocRecord *rec = _alloc_record_head; rec; rec = rec->_next) {
  3322     address ptr = rec->_ptr;
  3323     if (last_addr < ptr) {
  3324       closure->doit(last_addr, MetaspaceObj::UnknownType, ptr - last_addr);
  3326     closure->doit(ptr, rec->_type, rec->_byte_size);
  3327     last_addr = ptr + rec->_byte_size;
  3330   address top = ((address)bottom()) + used_bytes_slow(Metaspace::NonClassType);
  3331   if (last_addr < top) {
  3332     closure->doit(last_addr, MetaspaceObj::UnknownType, top - last_addr);
  3336 void Metaspace::purge() {
  3337   MutexLockerEx cl(SpaceManager::expand_lock(),
  3338                    Mutex::_no_safepoint_check_flag);
  3339   space_list()->purge();
  3340   if (using_class_space()) {
  3341     class_space_list()->purge();
  3345 void Metaspace::print_on(outputStream* out) const {
  3346   // Print both class virtual space counts and metaspace.
  3347   if (Verbose) {
  3348     vsm()->print_on(out);
  3349     if (using_class_space()) {
  3350       class_vsm()->print_on(out);
  3355 bool Metaspace::contains(const void * ptr) {
  3356   if (MetaspaceShared::is_in_shared_space(ptr)) {
  3357     return true;
  3359   // This is checked while unlocked.  As long as the virtualspaces are added
  3360   // at the end, the pointer will be in one of them.  The virtual spaces
  3361   // aren't deleted presently.  When they are, some sort of locking might
  3362   // be needed.  Note, locking this can cause inversion problems with the
  3363   // caller in MetaspaceObj::is_metadata() function.
  3364   return space_list()->contains(ptr) ||
  3365          (using_class_space() && class_space_list()->contains(ptr));
  3368 void Metaspace::verify() {
  3369   vsm()->verify();
  3370   if (using_class_space()) {
  3371     class_vsm()->verify();
  3375 void Metaspace::dump(outputStream* const out) const {
  3376   out->print_cr("\nVirtual space manager: " INTPTR_FORMAT, vsm());
  3377   vsm()->dump(out);
  3378   if (using_class_space()) {
  3379     out->print_cr("\nClass space manager: " INTPTR_FORMAT, class_vsm());
  3380     class_vsm()->dump(out);
  3384 /////////////// Unit tests ///////////////
  3386 #ifndef PRODUCT
  3388 class MetaspaceAuxTest : AllStatic {
  3389  public:
  3390   static void test_reserved() {
  3391     size_t reserved = MetaspaceAux::reserved_bytes();
  3393     assert(reserved > 0, "assert");
  3395     size_t committed  = MetaspaceAux::committed_bytes();
  3396     assert(committed <= reserved, "assert");
  3398     size_t reserved_metadata = MetaspaceAux::reserved_bytes(Metaspace::NonClassType);
  3399     assert(reserved_metadata > 0, "assert");
  3400     assert(reserved_metadata <= reserved, "assert");
  3402     if (UseCompressedClassPointers) {
  3403       size_t reserved_class    = MetaspaceAux::reserved_bytes(Metaspace::ClassType);
  3404       assert(reserved_class > 0, "assert");
  3405       assert(reserved_class < reserved, "assert");
  3409   static void test_committed() {
  3410     size_t committed = MetaspaceAux::committed_bytes();
  3412     assert(committed > 0, "assert");
  3414     size_t reserved  = MetaspaceAux::reserved_bytes();
  3415     assert(committed <= reserved, "assert");
  3417     size_t committed_metadata = MetaspaceAux::committed_bytes(Metaspace::NonClassType);
  3418     assert(committed_metadata > 0, "assert");
  3419     assert(committed_metadata <= committed, "assert");
  3421     if (UseCompressedClassPointers) {
  3422       size_t committed_class    = MetaspaceAux::committed_bytes(Metaspace::ClassType);
  3423       assert(committed_class > 0, "assert");
  3424       assert(committed_class < committed, "assert");
  3428   static void test() {
  3429     test_reserved();
  3430     test_committed();
  3432 };
  3434 void MetaspaceAux_test() {
  3435   MetaspaceAuxTest::test();
  3438 #endif

mercurial