src/share/vm/memory/metaspace.cpp

Wed, 11 Sep 2013 00:38:18 -0400

author
dholmes
date
Wed, 11 Sep 2013 00:38:18 -0400
changeset 5689
de88570fabfc
parent 5578
4c84d351cca9
child 5694
7944aba7ba41
permissions
-rw-r--r--

8024256: Minimal VM build is broken with PCH disabled
Reviewed-by: coleenp, twisti

     1 /*
     2  * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    24 #include "precompiled.hpp"
    25 #include "gc_interface/collectedHeap.hpp"
    26 #include "memory/binaryTreeDictionary.hpp"
    27 #include "memory/freeList.hpp"
    28 #include "memory/collectorPolicy.hpp"
    29 #include "memory/filemap.hpp"
    30 #include "memory/freeList.hpp"
    31 #include "memory/metablock.hpp"
    32 #include "memory/metachunk.hpp"
    33 #include "memory/metaspace.hpp"
    34 #include "memory/metaspaceShared.hpp"
    35 #include "memory/resourceArea.hpp"
    36 #include "memory/universe.hpp"
    37 #include "runtime/globals.hpp"
    38 #include "runtime/java.hpp"
    39 #include "runtime/mutex.hpp"
    40 #include "runtime/orderAccess.hpp"
    41 #include "services/memTracker.hpp"
    42 #include "utilities/copy.hpp"
    43 #include "utilities/debug.hpp"
    45 typedef BinaryTreeDictionary<Metablock, FreeList> BlockTreeDictionary;
    46 typedef BinaryTreeDictionary<Metachunk, FreeList> ChunkTreeDictionary;
    47 // Define this macro to enable slow integrity checking of
    48 // the free chunk lists
    49 const bool metaspace_slow_verify = false;
    51 // Parameters for stress mode testing
    52 const uint metadata_deallocate_a_lot_block = 10;
    53 const uint metadata_deallocate_a_lock_chunk = 3;
    54 size_t const allocation_from_dictionary_limit = 64 * K;
    56 MetaWord* last_allocated = 0;
    58 size_t Metaspace::_class_metaspace_size;
    60 // Used in declarations in SpaceManager and ChunkManager
    61 enum ChunkIndex {
    62   ZeroIndex = 0,
    63   SpecializedIndex = ZeroIndex,
    64   SmallIndex = SpecializedIndex + 1,
    65   MediumIndex = SmallIndex + 1,
    66   HumongousIndex = MediumIndex + 1,
    67   NumberOfFreeLists = 3,
    68   NumberOfInUseLists = 4
    69 };
    71 enum ChunkSizes {    // in words.
    72   ClassSpecializedChunk = 128,
    73   SpecializedChunk = 128,
    74   ClassSmallChunk = 256,
    75   SmallChunk = 512,
    76   ClassMediumChunk = 4 * K,
    77   MediumChunk = 8 * K,
    78   HumongousChunkGranularity = 8
    79 };
    81 static ChunkIndex next_chunk_index(ChunkIndex i) {
    82   assert(i < NumberOfInUseLists, "Out of bound");
    83   return (ChunkIndex) (i+1);
    84 }
    86 // Originally _capacity_until_GC was set to MetaspaceSize here but
    87 // the default MetaspaceSize before argument processing was being
    88 // used which was not the desired value.  See the code
    89 // in should_expand() to see how the initialization is handled
    90 // now.
    91 size_t MetaspaceGC::_capacity_until_GC = 0;
    92 bool MetaspaceGC::_expand_after_GC = false;
    93 uint MetaspaceGC::_shrink_factor = 0;
    94 bool MetaspaceGC::_should_concurrent_collect = false;
    96 // Blocks of space for metadata are allocated out of Metachunks.
    97 //
    98 // Metachunk are allocated out of MetadataVirtualspaces and once
    99 // allocated there is no explicit link between a Metachunk and
   100 // the MetadataVirtualspaces from which it was allocated.
   101 //
   102 // Each SpaceManager maintains a
   103 // list of the chunks it is using and the current chunk.  The current
   104 // chunk is the chunk from which allocations are done.  Space freed in
   105 // a chunk is placed on the free list of blocks (BlockFreelist) and
   106 // reused from there.
   108 typedef class FreeList<Metachunk> ChunkList;
   110 // Manages the global free lists of chunks.
   111 // Has three lists of free chunks, and a total size and
   112 // count that includes all three
   114 class ChunkManager VALUE_OBJ_CLASS_SPEC {
   116   // Free list of chunks of different sizes.
   117   //   SpecializedChunk
   118   //   SmallChunk
   119   //   MediumChunk
   120   //   HumongousChunk
   121   ChunkList _free_chunks[NumberOfFreeLists];
   124   //   HumongousChunk
   125   ChunkTreeDictionary _humongous_dictionary;
   127   // ChunkManager in all lists of this type
   128   size_t _free_chunks_total;
   129   size_t _free_chunks_count;
   131   void dec_free_chunks_total(size_t v) {
   132     assert(_free_chunks_count > 0 &&
   133              _free_chunks_total > 0,
   134              "About to go negative");
   135     Atomic::add_ptr(-1, &_free_chunks_count);
   136     jlong minus_v = (jlong) - (jlong) v;
   137     Atomic::add_ptr(minus_v, &_free_chunks_total);
   138   }
   140   // Debug support
   142   size_t sum_free_chunks();
   143   size_t sum_free_chunks_count();
   145   void locked_verify_free_chunks_total();
   146   void slow_locked_verify_free_chunks_total() {
   147     if (metaspace_slow_verify) {
   148       locked_verify_free_chunks_total();
   149     }
   150   }
   151   void locked_verify_free_chunks_count();
   152   void slow_locked_verify_free_chunks_count() {
   153     if (metaspace_slow_verify) {
   154       locked_verify_free_chunks_count();
   155     }
   156   }
   157   void verify_free_chunks_count();
   159  public:
   161   ChunkManager() : _free_chunks_total(0), _free_chunks_count(0) {}
   163   // add or delete (return) a chunk to the global freelist.
   164   Metachunk* chunk_freelist_allocate(size_t word_size);
   165   void chunk_freelist_deallocate(Metachunk* chunk);
   167   // Map a size to a list index assuming that there are lists
   168   // for special, small, medium, and humongous chunks.
   169   static ChunkIndex list_index(size_t size);
   171   // Remove the chunk from its freelist.  It is
   172   // expected to be on one of the _free_chunks[] lists.
   173   void remove_chunk(Metachunk* chunk);
   175   // Add the simple linked list of chunks to the freelist of chunks
   176   // of type index.
   177   void return_chunks(ChunkIndex index, Metachunk* chunks);
   179   // Total of the space in the free chunks list
   180   size_t free_chunks_total();
   181   size_t free_chunks_total_in_bytes();
   183   // Number of chunks in the free chunks list
   184   size_t free_chunks_count();
   186   void inc_free_chunks_total(size_t v, size_t count = 1) {
   187     Atomic::add_ptr(count, &_free_chunks_count);
   188     Atomic::add_ptr(v, &_free_chunks_total);
   189   }
   190   ChunkTreeDictionary* humongous_dictionary() {
   191     return &_humongous_dictionary;
   192   }
   194   ChunkList* free_chunks(ChunkIndex index);
   196   // Returns the list for the given chunk word size.
   197   ChunkList* find_free_chunks_list(size_t word_size);
   199   // Add and remove from a list by size.  Selects
   200   // list based on size of chunk.
   201   void free_chunks_put(Metachunk* chuck);
   202   Metachunk* free_chunks_get(size_t chunk_word_size);
   204   // Debug support
   205   void verify();
   206   void slow_verify() {
   207     if (metaspace_slow_verify) {
   208       verify();
   209     }
   210   }
   211   void locked_verify();
   212   void slow_locked_verify() {
   213     if (metaspace_slow_verify) {
   214       locked_verify();
   215     }
   216   }
   217   void verify_free_chunks_total();
   219   void locked_print_free_chunks(outputStream* st);
   220   void locked_print_sum_free_chunks(outputStream* st);
   222   void print_on(outputStream* st);
   223 };
   225 // Used to manage the free list of Metablocks (a block corresponds
   226 // to the allocation of a quantum of metadata).
   227 class BlockFreelist VALUE_OBJ_CLASS_SPEC {
   228   BlockTreeDictionary* _dictionary;
   229   static Metablock* initialize_free_chunk(MetaWord* p, size_t word_size);
   231   // Accessors
   232   BlockTreeDictionary* dictionary() const { return _dictionary; }
   234  public:
   235   BlockFreelist();
   236   ~BlockFreelist();
   238   // Get and return a block to the free list
   239   MetaWord* get_block(size_t word_size);
   240   void return_block(MetaWord* p, size_t word_size);
   242   size_t total_size() {
   243   if (dictionary() == NULL) {
   244     return 0;
   245   } else {
   246     return dictionary()->total_size();
   247   }
   248 }
   250   void print_on(outputStream* st) const;
   251 };
   253 class VirtualSpaceNode : public CHeapObj<mtClass> {
   254   friend class VirtualSpaceList;
   256   // Link to next VirtualSpaceNode
   257   VirtualSpaceNode* _next;
   259   // total in the VirtualSpace
   260   MemRegion _reserved;
   261   ReservedSpace _rs;
   262   VirtualSpace _virtual_space;
   263   MetaWord* _top;
   264   // count of chunks contained in this VirtualSpace
   265   uintx _container_count;
   267   // Convenience functions to access the _virtual_space
   268   char* low()  const { return virtual_space()->low(); }
   269   char* high() const { return virtual_space()->high(); }
   271   // The first Metachunk will be allocated at the bottom of the
   272   // VirtualSpace
   273   Metachunk* first_chunk() { return (Metachunk*) bottom(); }
   275   void inc_container_count();
   276 #ifdef ASSERT
   277   uint container_count_slow();
   278 #endif
   280  public:
   282   VirtualSpaceNode(size_t byte_size);
   283   VirtualSpaceNode(ReservedSpace rs) : _top(NULL), _next(NULL), _rs(rs), _container_count(0) {}
   284   ~VirtualSpaceNode();
   286   // Convenience functions for logical bottom and end
   287   MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); }
   288   MetaWord* end() const { return (MetaWord*) _virtual_space.high(); }
   290   // address of next available space in _virtual_space;
   291   // Accessors
   292   VirtualSpaceNode* next() { return _next; }
   293   void set_next(VirtualSpaceNode* v) { _next = v; }
   295   void set_reserved(MemRegion const v) { _reserved = v; }
   296   void set_top(MetaWord* v) { _top = v; }
   298   // Accessors
   299   MemRegion* reserved() { return &_reserved; }
   300   VirtualSpace* virtual_space() const { return (VirtualSpace*) &_virtual_space; }
   302   // Returns true if "word_size" is available in the VirtualSpace
   303   bool is_available(size_t word_size) { return _top + word_size <= end(); }
   305   MetaWord* top() const { return _top; }
   306   void inc_top(size_t word_size) { _top += word_size; }
   308   uintx container_count() { return _container_count; }
   309   void dec_container_count();
   310 #ifdef ASSERT
   311   void verify_container_count();
   312 #endif
   314   // used and capacity in this single entry in the list
   315   size_t used_words_in_vs() const;
   316   size_t capacity_words_in_vs() const;
   317   size_t free_words_in_vs() const;
   319   bool initialize();
   321   // get space from the virtual space
   322   Metachunk* take_from_committed(size_t chunk_word_size);
   324   // Allocate a chunk from the virtual space and return it.
   325   Metachunk* get_chunk_vs(size_t chunk_word_size);
   326   Metachunk* get_chunk_vs_with_expand(size_t chunk_word_size);
   328   // Expands/shrinks the committed space in a virtual space.  Delegates
   329   // to Virtualspace
   330   bool expand_by(size_t words, bool pre_touch = false);
   331   bool shrink_by(size_t words);
   333   // In preparation for deleting this node, remove all the chunks
   334   // in the node from any freelist.
   335   void purge(ChunkManager* chunk_manager);
   337 #ifdef ASSERT
   338   // Debug support
   339   static void verify_virtual_space_total();
   340   static void verify_virtual_space_count();
   341   void mangle();
   342 #endif
   344   void print_on(outputStream* st) const;
   345 };
   347   // byte_size is the size of the associated virtualspace.
   348 VirtualSpaceNode::VirtualSpaceNode(size_t byte_size) : _top(NULL), _next(NULL), _rs(), _container_count(0) {
   349   // align up to vm allocation granularity
   350   byte_size = align_size_up(byte_size, os::vm_allocation_granularity());
   352   // This allocates memory with mmap.  For DumpSharedspaces, try to reserve
   353   // configurable address, generally at the top of the Java heap so other
   354   // memory addresses don't conflict.
   355   if (DumpSharedSpaces) {
   356     char* shared_base = (char*)SharedBaseAddress;
   357     _rs = ReservedSpace(byte_size, 0, false, shared_base, 0);
   358     if (_rs.is_reserved()) {
   359       assert(shared_base == 0 || _rs.base() == shared_base, "should match");
   360     } else {
   361       // Get a mmap region anywhere if the SharedBaseAddress fails.
   362       _rs = ReservedSpace(byte_size);
   363     }
   364     MetaspaceShared::set_shared_rs(&_rs);
   365   } else {
   366     _rs = ReservedSpace(byte_size);
   367   }
   369   MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass);
   370 }
   372 void VirtualSpaceNode::purge(ChunkManager* chunk_manager) {
   373   Metachunk* chunk = first_chunk();
   374   Metachunk* invalid_chunk = (Metachunk*) top();
   375   while (chunk < invalid_chunk ) {
   376     assert(chunk->is_free(), "Should be marked free");
   377       MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
   378       chunk_manager->remove_chunk(chunk);
   379       assert(chunk->next() == NULL &&
   380              chunk->prev() == NULL,
   381              "Was not removed from its list");
   382       chunk = (Metachunk*) next;
   383   }
   384 }
   386 #ifdef ASSERT
   387 uint VirtualSpaceNode::container_count_slow() {
   388   uint count = 0;
   389   Metachunk* chunk = first_chunk();
   390   Metachunk* invalid_chunk = (Metachunk*) top();
   391   while (chunk < invalid_chunk ) {
   392     MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
   393     // Don't count the chunks on the free lists.  Those are
   394     // still part of the VirtualSpaceNode but not currently
   395     // counted.
   396     if (!chunk->is_free()) {
   397       count++;
   398     }
   399     chunk = (Metachunk*) next;
   400   }
   401   return count;
   402 }
   403 #endif
   405 // List of VirtualSpaces for metadata allocation.
   406 // It has a  _next link for singly linked list and a MemRegion
   407 // for total space in the VirtualSpace.
   408 class VirtualSpaceList : public CHeapObj<mtClass> {
   409   friend class VirtualSpaceNode;
   411   enum VirtualSpaceSizes {
   412     VirtualSpaceSize = 256 * K
   413   };
   415   // Global list of virtual spaces
   416   // Head of the list
   417   VirtualSpaceNode* _virtual_space_list;
   418   // virtual space currently being used for allocations
   419   VirtualSpaceNode* _current_virtual_space;
   420   // Free chunk list for all other metadata
   421   ChunkManager      _chunk_manager;
   423   // Can this virtual list allocate >1 spaces?  Also, used to determine
   424   // whether to allocate unlimited small chunks in this virtual space
   425   bool _is_class;
   426   bool can_grow() const { return !is_class() || !UseCompressedKlassPointers; }
   428   // Sum of space in all virtual spaces and number of virtual spaces
   429   size_t _virtual_space_total;
   430   size_t _virtual_space_count;
   432   ~VirtualSpaceList();
   434   VirtualSpaceNode* virtual_space_list() const { return _virtual_space_list; }
   436   void set_virtual_space_list(VirtualSpaceNode* v) {
   437     _virtual_space_list = v;
   438   }
   439   void set_current_virtual_space(VirtualSpaceNode* v) {
   440     _current_virtual_space = v;
   441   }
   443   void link_vs(VirtualSpaceNode* new_entry, size_t vs_word_size);
   445   // Get another virtual space and add it to the list.  This
   446   // is typically prompted by a failed attempt to allocate a chunk
   447   // and is typically followed by the allocation of a chunk.
   448   bool grow_vs(size_t vs_word_size);
   450  public:
   451   VirtualSpaceList(size_t word_size);
   452   VirtualSpaceList(ReservedSpace rs);
   454   size_t free_bytes();
   456   Metachunk* get_new_chunk(size_t word_size,
   457                            size_t grow_chunks_by_words,
   458                            size_t medium_chunk_bunch);
   460   // Get the first chunk for a Metaspace.  Used for
   461   // special cases such as the boot class loader, reflection
   462   // class loader and anonymous class loader.
   463   Metachunk* get_initialization_chunk(size_t word_size, size_t chunk_bunch);
   465   VirtualSpaceNode* current_virtual_space() {
   466     return _current_virtual_space;
   467   }
   469   ChunkManager* chunk_manager() { return &_chunk_manager; }
   470   bool is_class() const { return _is_class; }
   472   // Allocate the first virtualspace.
   473   void initialize(size_t word_size);
   475   size_t virtual_space_total() { return _virtual_space_total; }
   477   void inc_virtual_space_total(size_t v);
   478   void dec_virtual_space_total(size_t v);
   479   void inc_virtual_space_count();
   480   void dec_virtual_space_count();
   482   // Unlink empty VirtualSpaceNodes and free it.
   483   void purge();
   485   // Used and capacity in the entire list of virtual spaces.
   486   // These are global values shared by all Metaspaces
   487   size_t capacity_words_sum();
   488   size_t capacity_bytes_sum() { return capacity_words_sum() * BytesPerWord; }
   489   size_t used_words_sum();
   490   size_t used_bytes_sum() { return used_words_sum() * BytesPerWord; }
   492   bool contains(const void *ptr);
   494   void print_on(outputStream* st) const;
   496   class VirtualSpaceListIterator : public StackObj {
   497     VirtualSpaceNode* _virtual_spaces;
   498    public:
   499     VirtualSpaceListIterator(VirtualSpaceNode* virtual_spaces) :
   500       _virtual_spaces(virtual_spaces) {}
   502     bool repeat() {
   503       return _virtual_spaces != NULL;
   504     }
   506     VirtualSpaceNode* get_next() {
   507       VirtualSpaceNode* result = _virtual_spaces;
   508       if (_virtual_spaces != NULL) {
   509         _virtual_spaces = _virtual_spaces->next();
   510       }
   511       return result;
   512     }
   513   };
   514 };
   516 class Metadebug : AllStatic {
   517   // Debugging support for Metaspaces
   518   static int _deallocate_block_a_lot_count;
   519   static int _deallocate_chunk_a_lot_count;
   520   static int _allocation_fail_alot_count;
   522  public:
   523   static int deallocate_block_a_lot_count() {
   524     return _deallocate_block_a_lot_count;
   525   }
   526   static void set_deallocate_block_a_lot_count(int v) {
   527     _deallocate_block_a_lot_count = v;
   528   }
   529   static void inc_deallocate_block_a_lot_count() {
   530     _deallocate_block_a_lot_count++;
   531   }
   532   static int deallocate_chunk_a_lot_count() {
   533     return _deallocate_chunk_a_lot_count;
   534   }
   535   static void reset_deallocate_chunk_a_lot_count() {
   536     _deallocate_chunk_a_lot_count = 1;
   537   }
   538   static void inc_deallocate_chunk_a_lot_count() {
   539     _deallocate_chunk_a_lot_count++;
   540   }
   542   static void init_allocation_fail_alot_count();
   543 #ifdef ASSERT
   544   static bool test_metadata_failure();
   545 #endif
   547   static void deallocate_chunk_a_lot(SpaceManager* sm,
   548                                      size_t chunk_word_size);
   549   static void deallocate_block_a_lot(SpaceManager* sm,
   550                                      size_t chunk_word_size);
   552 };
   554 int Metadebug::_deallocate_block_a_lot_count = 0;
   555 int Metadebug::_deallocate_chunk_a_lot_count = 0;
   556 int Metadebug::_allocation_fail_alot_count = 0;
   558 //  SpaceManager - used by Metaspace to handle allocations
   559 class SpaceManager : public CHeapObj<mtClass> {
   560   friend class Metaspace;
   561   friend class Metadebug;
   563  private:
   565   // protects allocations and contains.
   566   Mutex* const _lock;
   568   // Type of metadata allocated.
   569   Metaspace::MetadataType _mdtype;
   571   // Chunk related size
   572   size_t _medium_chunk_bunch;
   574   // List of chunks in use by this SpaceManager.  Allocations
   575   // are done from the current chunk.  The list is used for deallocating
   576   // chunks when the SpaceManager is freed.
   577   Metachunk* _chunks_in_use[NumberOfInUseLists];
   578   Metachunk* _current_chunk;
   580   // Virtual space where allocation comes from.
   581   VirtualSpaceList* _vs_list;
   583   // Number of small chunks to allocate to a manager
   584   // If class space manager, small chunks are unlimited
   585   static uint const _small_chunk_limit;
   587   // Sum of all space in allocated chunks
   588   size_t _allocated_blocks_words;
   590   // Sum of all allocated chunks
   591   size_t _allocated_chunks_words;
   592   size_t _allocated_chunks_count;
   594   // Free lists of blocks are per SpaceManager since they
   595   // are assumed to be in chunks in use by the SpaceManager
   596   // and all chunks in use by a SpaceManager are freed when
   597   // the class loader using the SpaceManager is collected.
   598   BlockFreelist _block_freelists;
   600   // protects virtualspace and chunk expansions
   601   static const char*  _expand_lock_name;
   602   static const int    _expand_lock_rank;
   603   static Mutex* const _expand_lock;
   605  private:
   606   // Accessors
   607   Metachunk* chunks_in_use(ChunkIndex index) const { return _chunks_in_use[index]; }
   608   void set_chunks_in_use(ChunkIndex index, Metachunk* v) { _chunks_in_use[index] = v; }
   610   BlockFreelist* block_freelists() const {
   611     return (BlockFreelist*) &_block_freelists;
   612   }
   614   Metaspace::MetadataType mdtype() { return _mdtype; }
   615   VirtualSpaceList* vs_list() const    { return _vs_list; }
   617   Metachunk* current_chunk() const { return _current_chunk; }
   618   void set_current_chunk(Metachunk* v) {
   619     _current_chunk = v;
   620   }
   622   Metachunk* find_current_chunk(size_t word_size);
   624   // Add chunk to the list of chunks in use
   625   void add_chunk(Metachunk* v, bool make_current);
   627   Mutex* lock() const { return _lock; }
   629   const char* chunk_size_name(ChunkIndex index) const;
   631  protected:
   632   void initialize();
   634  public:
   635   SpaceManager(Metaspace::MetadataType mdtype,
   636                Mutex* lock,
   637                VirtualSpaceList* vs_list);
   638   ~SpaceManager();
   640   enum ChunkMultiples {
   641     MediumChunkMultiple = 4
   642   };
   644   // Accessors
   645   size_t specialized_chunk_size() { return SpecializedChunk; }
   646   size_t small_chunk_size() { return (size_t) vs_list()->is_class() ? ClassSmallChunk : SmallChunk; }
   647   size_t medium_chunk_size() { return (size_t) vs_list()->is_class() ? ClassMediumChunk : MediumChunk; }
   648   size_t medium_chunk_bunch() { return medium_chunk_size() * MediumChunkMultiple; }
   650   size_t allocated_blocks_words() const { return _allocated_blocks_words; }
   651   size_t allocated_blocks_bytes() const { return _allocated_blocks_words * BytesPerWord; }
   652   size_t allocated_chunks_words() const { return _allocated_chunks_words; }
   653   size_t allocated_chunks_count() const { return _allocated_chunks_count; }
   655   bool is_humongous(size_t word_size) { return word_size > medium_chunk_size(); }
   657   static Mutex* expand_lock() { return _expand_lock; }
   659   // Increment the per Metaspace and global running sums for Metachunks
   660   // by the given size.  This is used when a Metachunk to added to
   661   // the in-use list.
   662   void inc_size_metrics(size_t words);
   663   // Increment the per Metaspace and global running sums Metablocks by the given
   664   // size.  This is used when a Metablock is allocated.
   665   void inc_used_metrics(size_t words);
   666   // Delete the portion of the running sums for this SpaceManager. That is,
   667   // the globals running sums for the Metachunks and Metablocks are
   668   // decremented for all the Metachunks in-use by this SpaceManager.
   669   void dec_total_from_size_metrics();
   671   // Set the sizes for the initial chunks.
   672   void get_initial_chunk_sizes(Metaspace::MetaspaceType type,
   673                                size_t* chunk_word_size,
   674                                size_t* class_chunk_word_size);
   676   size_t sum_capacity_in_chunks_in_use() const;
   677   size_t sum_used_in_chunks_in_use() const;
   678   size_t sum_free_in_chunks_in_use() const;
   679   size_t sum_waste_in_chunks_in_use() const;
   680   size_t sum_waste_in_chunks_in_use(ChunkIndex index ) const;
   682   size_t sum_count_in_chunks_in_use();
   683   size_t sum_count_in_chunks_in_use(ChunkIndex i);
   685   Metachunk* get_new_chunk(size_t word_size, size_t grow_chunks_by_words);
   687   // Block allocation and deallocation.
   688   // Allocates a block from the current chunk
   689   MetaWord* allocate(size_t word_size);
   691   // Helper for allocations
   692   MetaWord* allocate_work(size_t word_size);
   694   // Returns a block to the per manager freelist
   695   void deallocate(MetaWord* p, size_t word_size);
   697   // Based on the allocation size and a minimum chunk size,
   698   // returned chunk size (for expanding space for chunk allocation).
   699   size_t calc_chunk_size(size_t allocation_word_size);
   701   // Called when an allocation from the current chunk fails.
   702   // Gets a new chunk (may require getting a new virtual space),
   703   // and allocates from that chunk.
   704   MetaWord* grow_and_allocate(size_t word_size);
   706   // debugging support.
   708   void dump(outputStream* const out) const;
   709   void print_on(outputStream* st) const;
   710   void locked_print_chunks_in_use_on(outputStream* st) const;
   712   void verify();
   713   void verify_chunk_size(Metachunk* chunk);
   714   NOT_PRODUCT(void mangle_freed_chunks();)
   715 #ifdef ASSERT
   716   void verify_allocated_blocks_words();
   717 #endif
   719   size_t get_raw_word_size(size_t word_size) {
   720     // If only the dictionary is going to be used (i.e., no
   721     // indexed free list), then there is a minimum size requirement.
   722     // MinChunkSize is a placeholder for the real minimum size JJJ
   723     size_t byte_size = word_size * BytesPerWord;
   725     size_t byte_size_with_overhead = byte_size + Metablock::overhead();
   727     size_t raw_bytes_size = MAX2(byte_size_with_overhead,
   728                                  Metablock::min_block_byte_size());
   729     raw_bytes_size = ARENA_ALIGN(raw_bytes_size);
   730     size_t raw_word_size = raw_bytes_size / BytesPerWord;
   731     assert(raw_word_size * BytesPerWord == raw_bytes_size, "Size problem");
   733     return raw_word_size;
   734   }
   735 };
   737 uint const SpaceManager::_small_chunk_limit = 4;
   739 const char* SpaceManager::_expand_lock_name =
   740   "SpaceManager chunk allocation lock";
   741 const int SpaceManager::_expand_lock_rank = Monitor::leaf - 1;
   742 Mutex* const SpaceManager::_expand_lock =
   743   new Mutex(SpaceManager::_expand_lock_rank,
   744             SpaceManager::_expand_lock_name,
   745             Mutex::_allow_vm_block_flag);
   747 void VirtualSpaceNode::inc_container_count() {
   748   assert_lock_strong(SpaceManager::expand_lock());
   749   _container_count++;
   750   assert(_container_count == container_count_slow(),
   751          err_msg("Inconsistency in countainer_count _container_count " SIZE_FORMAT
   752                  "container_count_slow() " SIZE_FORMAT,
   753                  _container_count, container_count_slow()));
   754 }
   756 void VirtualSpaceNode::dec_container_count() {
   757   assert_lock_strong(SpaceManager::expand_lock());
   758   _container_count--;
   759 }
   761 #ifdef ASSERT
   762 void VirtualSpaceNode::verify_container_count() {
   763   assert(_container_count == container_count_slow(),
   764     err_msg("Inconsistency in countainer_count _container_count " SIZE_FORMAT
   765             "container_count_slow() " SIZE_FORMAT, _container_count, container_count_slow()));
   766 }
   767 #endif
   769 // BlockFreelist methods
   771 BlockFreelist::BlockFreelist() : _dictionary(NULL) {}
   773 BlockFreelist::~BlockFreelist() {
   774   if (_dictionary != NULL) {
   775     if (Verbose && TraceMetadataChunkAllocation) {
   776       _dictionary->print_free_lists(gclog_or_tty);
   777     }
   778     delete _dictionary;
   779   }
   780 }
   782 Metablock* BlockFreelist::initialize_free_chunk(MetaWord* p, size_t word_size) {
   783   Metablock* block = (Metablock*) p;
   784   block->set_word_size(word_size);
   785   block->set_prev(NULL);
   786   block->set_next(NULL);
   788   return block;
   789 }
   791 void BlockFreelist::return_block(MetaWord* p, size_t word_size) {
   792   Metablock* free_chunk = initialize_free_chunk(p, word_size);
   793   if (dictionary() == NULL) {
   794    _dictionary = new BlockTreeDictionary();
   795   }
   796   dictionary()->return_chunk(free_chunk);
   797 }
   799 MetaWord* BlockFreelist::get_block(size_t word_size) {
   800   if (dictionary() == NULL) {
   801     return NULL;
   802   }
   804   if (word_size < TreeChunk<Metablock, FreeList>::min_size()) {
   805     // Dark matter.  Too small for dictionary.
   806     return NULL;
   807   }
   809   Metablock* free_block =
   810     dictionary()->get_chunk(word_size, FreeBlockDictionary<Metablock>::exactly);
   811   if (free_block == NULL) {
   812     return NULL;
   813   }
   815   return (MetaWord*) free_block;
   816 }
   818 void BlockFreelist::print_on(outputStream* st) const {
   819   if (dictionary() == NULL) {
   820     return;
   821   }
   822   dictionary()->print_free_lists(st);
   823 }
   825 // VirtualSpaceNode methods
   827 VirtualSpaceNode::~VirtualSpaceNode() {
   828   _rs.release();
   829 #ifdef ASSERT
   830   size_t word_size = sizeof(*this) / BytesPerWord;
   831   Copy::fill_to_words((HeapWord*) this, word_size, 0xf1f1f1f1);
   832 #endif
   833 }
   835 size_t VirtualSpaceNode::used_words_in_vs() const {
   836   return pointer_delta(top(), bottom(), sizeof(MetaWord));
   837 }
   839 // Space committed in the VirtualSpace
   840 size_t VirtualSpaceNode::capacity_words_in_vs() const {
   841   return pointer_delta(end(), bottom(), sizeof(MetaWord));
   842 }
   844 size_t VirtualSpaceNode::free_words_in_vs() const {
   845   return pointer_delta(end(), top(), sizeof(MetaWord));
   846 }
   848 // Allocates the chunk from the virtual space only.
   849 // This interface is also used internally for debugging.  Not all
   850 // chunks removed here are necessarily used for allocation.
   851 Metachunk* VirtualSpaceNode::take_from_committed(size_t chunk_word_size) {
   852   // Bottom of the new chunk
   853   MetaWord* chunk_limit = top();
   854   assert(chunk_limit != NULL, "Not safe to call this method");
   856   if (!is_available(chunk_word_size)) {
   857     if (TraceMetadataChunkAllocation) {
   858       tty->print("VirtualSpaceNode::take_from_committed() not available %d words ", chunk_word_size);
   859       // Dump some information about the virtual space that is nearly full
   860       print_on(tty);
   861     }
   862     return NULL;
   863   }
   865   // Take the space  (bump top on the current virtual space).
   866   inc_top(chunk_word_size);
   868   // Initialize the chunk
   869   Metachunk* result = ::new (chunk_limit) Metachunk(chunk_word_size, this);
   870   return result;
   871 }
   874 // Expand the virtual space (commit more of the reserved space)
   875 bool VirtualSpaceNode::expand_by(size_t words, bool pre_touch) {
   876   size_t bytes = words * BytesPerWord;
   877   bool result =  virtual_space()->expand_by(bytes, pre_touch);
   878   if (TraceMetavirtualspaceAllocation && !result) {
   879     gclog_or_tty->print_cr("VirtualSpaceNode::expand_by() failed "
   880                            "for byte size " SIZE_FORMAT, bytes);
   881     virtual_space()->print();
   882   }
   883   return result;
   884 }
   886 // Shrink the virtual space (commit more of the reserved space)
   887 bool VirtualSpaceNode::shrink_by(size_t words) {
   888   size_t bytes = words * BytesPerWord;
   889   virtual_space()->shrink_by(bytes);
   890   return true;
   891 }
   893 // Add another chunk to the chunk list.
   895 Metachunk* VirtualSpaceNode::get_chunk_vs(size_t chunk_word_size) {
   896   assert_lock_strong(SpaceManager::expand_lock());
   897   Metachunk* result = take_from_committed(chunk_word_size);
   898   if (result != NULL) {
   899     inc_container_count();
   900   }
   901   return result;
   902 }
   904 Metachunk* VirtualSpaceNode::get_chunk_vs_with_expand(size_t chunk_word_size) {
   905   assert_lock_strong(SpaceManager::expand_lock());
   907   Metachunk* new_chunk = get_chunk_vs(chunk_word_size);
   909   if (new_chunk == NULL) {
   910     // Only a small part of the virtualspace is committed when first
   911     // allocated so committing more here can be expected.
   912     size_t page_size_words = os::vm_page_size() / BytesPerWord;
   913     size_t aligned_expand_vs_by_words = align_size_up(chunk_word_size,
   914                                                     page_size_words);
   915     expand_by(aligned_expand_vs_by_words, false);
   916     new_chunk = get_chunk_vs(chunk_word_size);
   917   }
   918   return new_chunk;
   919 }
   921 bool VirtualSpaceNode::initialize() {
   923   if (!_rs.is_reserved()) {
   924     return false;
   925   }
   927   // An allocation out of this Virtualspace that is larger
   928   // than an initial commit size can waste that initial committed
   929   // space.
   930   size_t committed_byte_size = 0;
   931   bool result = virtual_space()->initialize(_rs, committed_byte_size);
   932   if (result) {
   933     set_top((MetaWord*)virtual_space()->low());
   934     set_reserved(MemRegion((HeapWord*)_rs.base(),
   935                  (HeapWord*)(_rs.base() + _rs.size())));
   937     assert(reserved()->start() == (HeapWord*) _rs.base(),
   938       err_msg("Reserved start was not set properly " PTR_FORMAT
   939         " != " PTR_FORMAT, reserved()->start(), _rs.base()));
   940     assert(reserved()->word_size() == _rs.size() / BytesPerWord,
   941       err_msg("Reserved size was not set properly " SIZE_FORMAT
   942         " != " SIZE_FORMAT, reserved()->word_size(),
   943         _rs.size() / BytesPerWord));
   944   }
   946   return result;
   947 }
   949 void VirtualSpaceNode::print_on(outputStream* st) const {
   950   size_t used = used_words_in_vs();
   951   size_t capacity = capacity_words_in_vs();
   952   VirtualSpace* vs = virtual_space();
   953   st->print_cr("   space @ " PTR_FORMAT " " SIZE_FORMAT "K, %3d%% used "
   954            "[" PTR_FORMAT ", " PTR_FORMAT ", "
   955            PTR_FORMAT ", " PTR_FORMAT ")",
   956            vs, capacity / K,
   957            capacity == 0 ? 0 : used * 100 / capacity,
   958            bottom(), top(), end(),
   959            vs->high_boundary());
   960 }
   962 #ifdef ASSERT
   963 void VirtualSpaceNode::mangle() {
   964   size_t word_size = capacity_words_in_vs();
   965   Copy::fill_to_words((HeapWord*) low(), word_size, 0xf1f1f1f1);
   966 }
   967 #endif // ASSERT
   969 // VirtualSpaceList methods
   970 // Space allocated from the VirtualSpace
   972 VirtualSpaceList::~VirtualSpaceList() {
   973   VirtualSpaceListIterator iter(virtual_space_list());
   974   while (iter.repeat()) {
   975     VirtualSpaceNode* vsl = iter.get_next();
   976     delete vsl;
   977   }
   978 }
   980 void VirtualSpaceList::inc_virtual_space_total(size_t v) {
   981   assert_lock_strong(SpaceManager::expand_lock());
   982   _virtual_space_total = _virtual_space_total + v;
   983 }
   984 void VirtualSpaceList::dec_virtual_space_total(size_t v) {
   985   assert_lock_strong(SpaceManager::expand_lock());
   986   _virtual_space_total = _virtual_space_total - v;
   987 }
   989 void VirtualSpaceList::inc_virtual_space_count() {
   990   assert_lock_strong(SpaceManager::expand_lock());
   991   _virtual_space_count++;
   992 }
   993 void VirtualSpaceList::dec_virtual_space_count() {
   994   assert_lock_strong(SpaceManager::expand_lock());
   995   _virtual_space_count--;
   996 }
   998 void ChunkManager::remove_chunk(Metachunk* chunk) {
   999   size_t word_size = chunk->word_size();
  1000   ChunkIndex index = list_index(word_size);
  1001   if (index != HumongousIndex) {
  1002     free_chunks(index)->remove_chunk(chunk);
  1003   } else {
  1004     humongous_dictionary()->remove_chunk(chunk);
  1007   // Chunk is being removed from the chunks free list.
  1008   dec_free_chunks_total(chunk->capacity_word_size());
  1011 // Walk the list of VirtualSpaceNodes and delete
  1012 // nodes with a 0 container_count.  Remove Metachunks in
  1013 // the node from their respective freelists.
  1014 void VirtualSpaceList::purge() {
  1015   assert_lock_strong(SpaceManager::expand_lock());
  1016   // Don't use a VirtualSpaceListIterator because this
  1017   // list is being changed and a straightforward use of an iterator is not safe.
  1018   VirtualSpaceNode* purged_vsl = NULL;
  1019   VirtualSpaceNode* prev_vsl = virtual_space_list();
  1020   VirtualSpaceNode* next_vsl = prev_vsl;
  1021   while (next_vsl != NULL) {
  1022     VirtualSpaceNode* vsl = next_vsl;
  1023     next_vsl = vsl->next();
  1024     // Don't free the current virtual space since it will likely
  1025     // be needed soon.
  1026     if (vsl->container_count() == 0 && vsl != current_virtual_space()) {
  1027       // Unlink it from the list
  1028       if (prev_vsl == vsl) {
  1029         // This is the case of the current note being the first note.
  1030         assert(vsl == virtual_space_list(), "Expected to be the first note");
  1031         set_virtual_space_list(vsl->next());
  1032       } else {
  1033         prev_vsl->set_next(vsl->next());
  1036       vsl->purge(chunk_manager());
  1037       dec_virtual_space_total(vsl->reserved()->word_size());
  1038       dec_virtual_space_count();
  1039       purged_vsl = vsl;
  1040       delete vsl;
  1041     } else {
  1042       prev_vsl = vsl;
  1045 #ifdef ASSERT
  1046   if (purged_vsl != NULL) {
  1047   // List should be stable enough to use an iterator here.
  1048   VirtualSpaceListIterator iter(virtual_space_list());
  1049     while (iter.repeat()) {
  1050       VirtualSpaceNode* vsl = iter.get_next();
  1051       assert(vsl != purged_vsl, "Purge of vsl failed");
  1054 #endif
  1057 size_t VirtualSpaceList::used_words_sum() {
  1058   size_t allocated_by_vs = 0;
  1059   VirtualSpaceListIterator iter(virtual_space_list());
  1060   while (iter.repeat()) {
  1061     VirtualSpaceNode* vsl = iter.get_next();
  1062     // Sum used region [bottom, top) in each virtualspace
  1063     allocated_by_vs += vsl->used_words_in_vs();
  1065   assert(allocated_by_vs >= chunk_manager()->free_chunks_total(),
  1066     err_msg("Total in free chunks " SIZE_FORMAT
  1067             " greater than total from virtual_spaces " SIZE_FORMAT,
  1068             allocated_by_vs, chunk_manager()->free_chunks_total()));
  1069   size_t used =
  1070     allocated_by_vs - chunk_manager()->free_chunks_total();
  1071   return used;
  1074 // Space available in all MetadataVirtualspaces allocated
  1075 // for metadata.  This is the upper limit on the capacity
  1076 // of chunks allocated out of all the MetadataVirtualspaces.
  1077 size_t VirtualSpaceList::capacity_words_sum() {
  1078   size_t capacity = 0;
  1079   VirtualSpaceListIterator iter(virtual_space_list());
  1080   while (iter.repeat()) {
  1081     VirtualSpaceNode* vsl = iter.get_next();
  1082     capacity += vsl->capacity_words_in_vs();
  1084   return capacity;
  1087 VirtualSpaceList::VirtualSpaceList(size_t word_size ) :
  1088                                    _is_class(false),
  1089                                    _virtual_space_list(NULL),
  1090                                    _current_virtual_space(NULL),
  1091                                    _virtual_space_total(0),
  1092                                    _virtual_space_count(0) {
  1093   MutexLockerEx cl(SpaceManager::expand_lock(),
  1094                    Mutex::_no_safepoint_check_flag);
  1095   bool initialization_succeeded = grow_vs(word_size);
  1097   _chunk_manager.free_chunks(SpecializedIndex)->set_size(SpecializedChunk);
  1098   _chunk_manager.free_chunks(SmallIndex)->set_size(SmallChunk);
  1099   _chunk_manager.free_chunks(MediumIndex)->set_size(MediumChunk);
  1100   assert(initialization_succeeded,
  1101     " VirtualSpaceList initialization should not fail");
  1104 VirtualSpaceList::VirtualSpaceList(ReservedSpace rs) :
  1105                                    _is_class(true),
  1106                                    _virtual_space_list(NULL),
  1107                                    _current_virtual_space(NULL),
  1108                                    _virtual_space_total(0),
  1109                                    _virtual_space_count(0) {
  1110   MutexLockerEx cl(SpaceManager::expand_lock(),
  1111                    Mutex::_no_safepoint_check_flag);
  1112   VirtualSpaceNode* class_entry = new VirtualSpaceNode(rs);
  1113   bool succeeded = class_entry->initialize();
  1114   _chunk_manager.free_chunks(SpecializedIndex)->set_size(SpecializedChunk);
  1115   _chunk_manager.free_chunks(SmallIndex)->set_size(ClassSmallChunk);
  1116   _chunk_manager.free_chunks(MediumIndex)->set_size(ClassMediumChunk);
  1117   assert(succeeded, " VirtualSpaceList initialization should not fail");
  1118   link_vs(class_entry, rs.size()/BytesPerWord);
  1121 size_t VirtualSpaceList::free_bytes() {
  1122   return virtual_space_list()->free_words_in_vs() * BytesPerWord;
  1125 // Allocate another meta virtual space and add it to the list.
  1126 bool VirtualSpaceList::grow_vs(size_t vs_word_size) {
  1127   assert_lock_strong(SpaceManager::expand_lock());
  1128   if (vs_word_size == 0) {
  1129     return false;
  1131   // Reserve the space
  1132   size_t vs_byte_size = vs_word_size * BytesPerWord;
  1133   assert(vs_byte_size % os::vm_page_size() == 0, "Not aligned");
  1135   // Allocate the meta virtual space and initialize it.
  1136   VirtualSpaceNode* new_entry = new VirtualSpaceNode(vs_byte_size);
  1137   if (!new_entry->initialize()) {
  1138     delete new_entry;
  1139     return false;
  1140   } else {
  1141     // ensure lock-free iteration sees fully initialized node
  1142     OrderAccess::storestore();
  1143     link_vs(new_entry, vs_word_size);
  1144     return true;
  1148 void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry, size_t vs_word_size) {
  1149   if (virtual_space_list() == NULL) {
  1150       set_virtual_space_list(new_entry);
  1151   } else {
  1152     current_virtual_space()->set_next(new_entry);
  1154   set_current_virtual_space(new_entry);
  1155   inc_virtual_space_total(vs_word_size);
  1156   inc_virtual_space_count();
  1157 #ifdef ASSERT
  1158   new_entry->mangle();
  1159 #endif
  1160   if (TraceMetavirtualspaceAllocation && Verbose) {
  1161     VirtualSpaceNode* vsl = current_virtual_space();
  1162     vsl->print_on(tty);
  1166 Metachunk* VirtualSpaceList::get_new_chunk(size_t word_size,
  1167                                            size_t grow_chunks_by_words,
  1168                                            size_t medium_chunk_bunch) {
  1170   // Get a chunk from the chunk freelist
  1171   Metachunk* next = chunk_manager()->chunk_freelist_allocate(grow_chunks_by_words);
  1173   if (next != NULL) {
  1174     next->container()->inc_container_count();
  1175   } else {
  1176     // Allocate a chunk out of the current virtual space.
  1177     next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
  1180   if (next == NULL) {
  1181     // Not enough room in current virtual space.  Try to commit
  1182     // more space.
  1183     size_t expand_vs_by_words = MAX2(medium_chunk_bunch,
  1184                                      grow_chunks_by_words);
  1185     size_t page_size_words = os::vm_page_size() / BytesPerWord;
  1186     size_t aligned_expand_vs_by_words = align_size_up(expand_vs_by_words,
  1187                                                         page_size_words);
  1188     bool vs_expanded =
  1189       current_virtual_space()->expand_by(aligned_expand_vs_by_words, false);
  1190     if (!vs_expanded) {
  1191       // Should the capacity of the metaspaces be expanded for
  1192       // this allocation?  If it's the virtual space for classes and is
  1193       // being used for CompressedHeaders, don't allocate a new virtualspace.
  1194       if (can_grow() && MetaspaceGC::should_expand(this, word_size)) {
  1195         // Get another virtual space.
  1196           size_t grow_vs_words =
  1197             MAX2((size_t)VirtualSpaceSize, aligned_expand_vs_by_words);
  1198         if (grow_vs(grow_vs_words)) {
  1199           // Got it.  It's on the list now.  Get a chunk from it.
  1200           next = current_virtual_space()->get_chunk_vs_with_expand(grow_chunks_by_words);
  1202       } else {
  1203         // Allocation will fail and induce a GC
  1204         if (TraceMetadataChunkAllocation && Verbose) {
  1205           gclog_or_tty->print_cr("VirtualSpaceList::get_new_chunk():"
  1206             " Fail instead of expand the metaspace");
  1209     } else {
  1210       // The virtual space expanded, get a new chunk
  1211       next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
  1212       assert(next != NULL, "Just expanded, should succeed");
  1216   assert(next == NULL || (next->next() == NULL && next->prev() == NULL),
  1217          "New chunk is still on some list");
  1218   return next;
  1221 Metachunk* VirtualSpaceList::get_initialization_chunk(size_t chunk_word_size,
  1222                                                       size_t chunk_bunch) {
  1223   // Get a chunk from the chunk freelist
  1224   Metachunk* new_chunk = get_new_chunk(chunk_word_size,
  1225                                        chunk_word_size,
  1226                                        chunk_bunch);
  1227   return new_chunk;
  1230 void VirtualSpaceList::print_on(outputStream* st) const {
  1231   if (TraceMetadataChunkAllocation && Verbose) {
  1232     VirtualSpaceListIterator iter(virtual_space_list());
  1233     while (iter.repeat()) {
  1234       VirtualSpaceNode* node = iter.get_next();
  1235       node->print_on(st);
  1240 bool VirtualSpaceList::contains(const void *ptr) {
  1241   VirtualSpaceNode* list = virtual_space_list();
  1242   VirtualSpaceListIterator iter(list);
  1243   while (iter.repeat()) {
  1244     VirtualSpaceNode* node = iter.get_next();
  1245     if (node->reserved()->contains(ptr)) {
  1246       return true;
  1249   return false;
  1253 // MetaspaceGC methods
  1255 // VM_CollectForMetadataAllocation is the vm operation used to GC.
  1256 // Within the VM operation after the GC the attempt to allocate the metadata
  1257 // should succeed.  If the GC did not free enough space for the metaspace
  1258 // allocation, the HWM is increased so that another virtualspace will be
  1259 // allocated for the metadata.  With perm gen the increase in the perm
  1260 // gen had bounds, MinMetaspaceExpansion and MaxMetaspaceExpansion.  The
  1261 // metaspace policy uses those as the small and large steps for the HWM.
  1262 //
  1263 // After the GC the compute_new_size() for MetaspaceGC is called to
  1264 // resize the capacity of the metaspaces.  The current implementation
  1265 // is based on the flags MinMetaspaceFreeRatio and MaxMetaspaceFreeRatio used
  1266 // to resize the Java heap by some GC's.  New flags can be implemented
  1267 // if really needed.  MinMetaspaceFreeRatio is used to calculate how much
  1268 // free space is desirable in the metaspace capacity to decide how much
  1269 // to increase the HWM.  MaxMetaspaceFreeRatio is used to decide how much
  1270 // free space is desirable in the metaspace capacity before decreasing
  1271 // the HWM.
  1273 // Calculate the amount to increase the high water mark (HWM).
  1274 // Increase by a minimum amount (MinMetaspaceExpansion) so that
  1275 // another expansion is not requested too soon.  If that is not
  1276 // enough to satisfy the allocation (i.e. big enough for a word_size
  1277 // allocation), increase by MaxMetaspaceExpansion.  If that is still
  1278 // not enough, expand by the size of the allocation (word_size) plus
  1279 // some.
  1280 size_t MetaspaceGC::delta_capacity_until_GC(size_t word_size) {
  1281   size_t before_inc = MetaspaceGC::capacity_until_GC();
  1282   size_t min_delta_words = MinMetaspaceExpansion / BytesPerWord;
  1283   size_t max_delta_words = MaxMetaspaceExpansion / BytesPerWord;
  1284   size_t page_size_words = os::vm_page_size() / BytesPerWord;
  1285   size_t size_delta_words = align_size_up(word_size, page_size_words);
  1286   size_t delta_words = MAX2(size_delta_words, min_delta_words);
  1287   if (delta_words > min_delta_words) {
  1288     // Don't want to hit the high water mark on the next
  1289     // allocation so make the delta greater than just enough
  1290     // for this allocation.
  1291     delta_words = MAX2(delta_words, max_delta_words);
  1292     if (delta_words > max_delta_words) {
  1293       // This allocation is large but the next ones are probably not
  1294       // so increase by the minimum.
  1295       delta_words = delta_words + min_delta_words;
  1298   return delta_words;
  1301 bool MetaspaceGC::should_expand(VirtualSpaceList* vsl, size_t word_size) {
  1303   // If the user wants a limit, impose one.
  1304   // The reason for someone using this flag is to limit reserved space.  So
  1305   // for non-class virtual space, compare against virtual spaces that are reserved.
  1306   // For class virtual space, we only compare against the committed space, not
  1307   // reserved space, because this is a larger space prereserved for compressed
  1308   // class pointers.
  1309   if (!FLAG_IS_DEFAULT(MaxMetaspaceSize)) {
  1310     size_t real_allocated = Metaspace::space_list()->virtual_space_total() +
  1311               MetaspaceAux::allocated_capacity_bytes(Metaspace::ClassType);
  1312     if (real_allocated >= MaxMetaspaceSize) {
  1313       return false;
  1317   // Class virtual space should always be expanded.  Call GC for the other
  1318   // metadata virtual space.
  1319   if (Metaspace::using_class_space() &&
  1320       (vsl == Metaspace::class_space_list())) return true;
  1322   // If this is part of an allocation after a GC, expand
  1323   // unconditionally.
  1324   if (MetaspaceGC::expand_after_GC()) {
  1325     return true;
  1329   // If the capacity is below the minimum capacity, allow the
  1330   // expansion.  Also set the high-water-mark (capacity_until_GC)
  1331   // to that minimum capacity so that a GC will not be induced
  1332   // until that minimum capacity is exceeded.
  1333   size_t committed_capacity_bytes = MetaspaceAux::allocated_capacity_bytes();
  1334   size_t metaspace_size_bytes = MetaspaceSize;
  1335   if (committed_capacity_bytes < metaspace_size_bytes ||
  1336       capacity_until_GC() == 0) {
  1337     set_capacity_until_GC(metaspace_size_bytes);
  1338     return true;
  1339   } else {
  1340     if (committed_capacity_bytes < capacity_until_GC()) {
  1341       return true;
  1342     } else {
  1343       if (TraceMetadataChunkAllocation && Verbose) {
  1344         gclog_or_tty->print_cr("  allocation request size " SIZE_FORMAT
  1345                         "  capacity_until_GC " SIZE_FORMAT
  1346                         "  allocated_capacity_bytes " SIZE_FORMAT,
  1347                         word_size,
  1348                         capacity_until_GC(),
  1349                         MetaspaceAux::allocated_capacity_bytes());
  1351       return false;
  1358 void MetaspaceGC::compute_new_size() {
  1359   assert(_shrink_factor <= 100, "invalid shrink factor");
  1360   uint current_shrink_factor = _shrink_factor;
  1361   _shrink_factor = 0;
  1363   // Until a faster way of calculating the "used" quantity is implemented,
  1364   // use "capacity".
  1365   const size_t used_after_gc = MetaspaceAux::allocated_capacity_bytes();
  1366   const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC();
  1368   const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0;
  1369   const double maximum_used_percentage = 1.0 - minimum_free_percentage;
  1371   const double min_tmp = used_after_gc / maximum_used_percentage;
  1372   size_t minimum_desired_capacity =
  1373     (size_t)MIN2(min_tmp, double(max_uintx));
  1374   // Don't shrink less than the initial generation size
  1375   minimum_desired_capacity = MAX2(minimum_desired_capacity,
  1376                                   MetaspaceSize);
  1378   if (PrintGCDetails && Verbose) {
  1379     gclog_or_tty->print_cr("\nMetaspaceGC::compute_new_size: ");
  1380     gclog_or_tty->print_cr("  "
  1381                   "  minimum_free_percentage: %6.2f"
  1382                   "  maximum_used_percentage: %6.2f",
  1383                   minimum_free_percentage,
  1384                   maximum_used_percentage);
  1385     gclog_or_tty->print_cr("  "
  1386                   "   used_after_gc       : %6.1fKB",
  1387                   used_after_gc / (double) K);
  1391   size_t shrink_bytes = 0;
  1392   if (capacity_until_GC < minimum_desired_capacity) {
  1393     // If we have less capacity below the metaspace HWM, then
  1394     // increment the HWM.
  1395     size_t expand_bytes = minimum_desired_capacity - capacity_until_GC;
  1396     // Don't expand unless it's significant
  1397     if (expand_bytes >= MinMetaspaceExpansion) {
  1398       MetaspaceGC::set_capacity_until_GC(capacity_until_GC + expand_bytes);
  1400     if (PrintGCDetails && Verbose) {
  1401       size_t new_capacity_until_GC = capacity_until_GC;
  1402       gclog_or_tty->print_cr("    expanding:"
  1403                     "  minimum_desired_capacity: %6.1fKB"
  1404                     "  expand_bytes: %6.1fKB"
  1405                     "  MinMetaspaceExpansion: %6.1fKB"
  1406                     "  new metaspace HWM:  %6.1fKB",
  1407                     minimum_desired_capacity / (double) K,
  1408                     expand_bytes / (double) K,
  1409                     MinMetaspaceExpansion / (double) K,
  1410                     new_capacity_until_GC / (double) K);
  1412     return;
  1415   // No expansion, now see if we want to shrink
  1416   // We would never want to shrink more than this
  1417   size_t max_shrink_bytes = capacity_until_GC - minimum_desired_capacity;
  1418   assert(max_shrink_bytes >= 0, err_msg("max_shrink_bytes " SIZE_FORMAT,
  1419     max_shrink_bytes));
  1421   // Should shrinking be considered?
  1422   if (MaxMetaspaceFreeRatio < 100) {
  1423     const double maximum_free_percentage = MaxMetaspaceFreeRatio / 100.0;
  1424     const double minimum_used_percentage = 1.0 - maximum_free_percentage;
  1425     const double max_tmp = used_after_gc / minimum_used_percentage;
  1426     size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx));
  1427     maximum_desired_capacity = MAX2(maximum_desired_capacity,
  1428                                     MetaspaceSize);
  1429     if (PrintGCDetails && Verbose) {
  1430       gclog_or_tty->print_cr("  "
  1431                              "  maximum_free_percentage: %6.2f"
  1432                              "  minimum_used_percentage: %6.2f",
  1433                              maximum_free_percentage,
  1434                              minimum_used_percentage);
  1435       gclog_or_tty->print_cr("  "
  1436                              "  minimum_desired_capacity: %6.1fKB"
  1437                              "  maximum_desired_capacity: %6.1fKB",
  1438                              minimum_desired_capacity / (double) K,
  1439                              maximum_desired_capacity / (double) K);
  1442     assert(minimum_desired_capacity <= maximum_desired_capacity,
  1443            "sanity check");
  1445     if (capacity_until_GC > maximum_desired_capacity) {
  1446       // Capacity too large, compute shrinking size
  1447       shrink_bytes = capacity_until_GC - maximum_desired_capacity;
  1448       // We don't want shrink all the way back to initSize if people call
  1449       // System.gc(), because some programs do that between "phases" and then
  1450       // we'd just have to grow the heap up again for the next phase.  So we
  1451       // damp the shrinking: 0% on the first call, 10% on the second call, 40%
  1452       // on the third call, and 100% by the fourth call.  But if we recompute
  1453       // size without shrinking, it goes back to 0%.
  1454       shrink_bytes = shrink_bytes / 100 * current_shrink_factor;
  1455       assert(shrink_bytes <= max_shrink_bytes,
  1456         err_msg("invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT,
  1457           shrink_bytes, max_shrink_bytes));
  1458       if (current_shrink_factor == 0) {
  1459         _shrink_factor = 10;
  1460       } else {
  1461         _shrink_factor = MIN2(current_shrink_factor * 4, (uint) 100);
  1463       if (PrintGCDetails && Verbose) {
  1464         gclog_or_tty->print_cr("  "
  1465                       "  shrinking:"
  1466                       "  initSize: %.1fK"
  1467                       "  maximum_desired_capacity: %.1fK",
  1468                       MetaspaceSize / (double) K,
  1469                       maximum_desired_capacity / (double) K);
  1470         gclog_or_tty->print_cr("  "
  1471                       "  shrink_bytes: %.1fK"
  1472                       "  current_shrink_factor: %d"
  1473                       "  new shrink factor: %d"
  1474                       "  MinMetaspaceExpansion: %.1fK",
  1475                       shrink_bytes / (double) K,
  1476                       current_shrink_factor,
  1477                       _shrink_factor,
  1478                       MinMetaspaceExpansion / (double) K);
  1483   // Don't shrink unless it's significant
  1484   if (shrink_bytes >= MinMetaspaceExpansion &&
  1485       ((capacity_until_GC - shrink_bytes) >= MetaspaceSize)) {
  1486     MetaspaceGC::set_capacity_until_GC(capacity_until_GC - shrink_bytes);
  1490 // Metadebug methods
  1492 void Metadebug::deallocate_chunk_a_lot(SpaceManager* sm,
  1493                                        size_t chunk_word_size){
  1494 #ifdef ASSERT
  1495   VirtualSpaceList* vsl = sm->vs_list();
  1496   if (MetaDataDeallocateALot &&
  1497       Metadebug::deallocate_chunk_a_lot_count() % MetaDataDeallocateALotInterval == 0 ) {
  1498     Metadebug::reset_deallocate_chunk_a_lot_count();
  1499     for (uint i = 0; i < metadata_deallocate_a_lock_chunk; i++) {
  1500       Metachunk* dummy_chunk = vsl->current_virtual_space()->take_from_committed(chunk_word_size);
  1501       if (dummy_chunk == NULL) {
  1502         break;
  1504       vsl->chunk_manager()->chunk_freelist_deallocate(dummy_chunk);
  1506       if (TraceMetadataChunkAllocation && Verbose) {
  1507         gclog_or_tty->print("Metadebug::deallocate_chunk_a_lot: %d) ",
  1508                                sm->sum_count_in_chunks_in_use());
  1509         dummy_chunk->print_on(gclog_or_tty);
  1510         gclog_or_tty->print_cr("  Free chunks total %d  count %d",
  1511                                vsl->chunk_manager()->free_chunks_total(),
  1512                                vsl->chunk_manager()->free_chunks_count());
  1515   } else {
  1516     Metadebug::inc_deallocate_chunk_a_lot_count();
  1518 #endif
  1521 void Metadebug::deallocate_block_a_lot(SpaceManager* sm,
  1522                                        size_t raw_word_size){
  1523 #ifdef ASSERT
  1524   if (MetaDataDeallocateALot &&
  1525         Metadebug::deallocate_block_a_lot_count() % MetaDataDeallocateALotInterval == 0 ) {
  1526     Metadebug::set_deallocate_block_a_lot_count(0);
  1527     for (uint i = 0; i < metadata_deallocate_a_lot_block; i++) {
  1528       MetaWord* dummy_block = sm->allocate_work(raw_word_size);
  1529       if (dummy_block == 0) {
  1530         break;
  1532       sm->deallocate(dummy_block, raw_word_size);
  1534   } else {
  1535     Metadebug::inc_deallocate_block_a_lot_count();
  1537 #endif
  1540 void Metadebug::init_allocation_fail_alot_count() {
  1541   if (MetadataAllocationFailALot) {
  1542     _allocation_fail_alot_count =
  1543       1+(long)((double)MetadataAllocationFailALotInterval*os::random()/(max_jint+1.0));
  1547 #ifdef ASSERT
  1548 bool Metadebug::test_metadata_failure() {
  1549   if (MetadataAllocationFailALot &&
  1550       Threads::is_vm_complete()) {
  1551     if (_allocation_fail_alot_count > 0) {
  1552       _allocation_fail_alot_count--;
  1553     } else {
  1554       if (TraceMetadataChunkAllocation && Verbose) {
  1555         gclog_or_tty->print_cr("Metadata allocation failing for "
  1556                                "MetadataAllocationFailALot");
  1558       init_allocation_fail_alot_count();
  1559       return true;
  1562   return false;
  1564 #endif
  1566 // ChunkManager methods
  1568 size_t ChunkManager::free_chunks_total() {
  1569   return _free_chunks_total;
  1572 size_t ChunkManager::free_chunks_total_in_bytes() {
  1573   return free_chunks_total() * BytesPerWord;
  1576 size_t ChunkManager::free_chunks_count() {
  1577 #ifdef ASSERT
  1578   if (!UseConcMarkSweepGC && !SpaceManager::expand_lock()->is_locked()) {
  1579     MutexLockerEx cl(SpaceManager::expand_lock(),
  1580                      Mutex::_no_safepoint_check_flag);
  1581     // This lock is only needed in debug because the verification
  1582     // of the _free_chunks_totals walks the list of free chunks
  1583     slow_locked_verify_free_chunks_count();
  1585 #endif
  1586   return _free_chunks_count;
  1589 void ChunkManager::locked_verify_free_chunks_total() {
  1590   assert_lock_strong(SpaceManager::expand_lock());
  1591   assert(sum_free_chunks() == _free_chunks_total,
  1592     err_msg("_free_chunks_total " SIZE_FORMAT " is not the"
  1593            " same as sum " SIZE_FORMAT, _free_chunks_total,
  1594            sum_free_chunks()));
  1597 void ChunkManager::verify_free_chunks_total() {
  1598   MutexLockerEx cl(SpaceManager::expand_lock(),
  1599                      Mutex::_no_safepoint_check_flag);
  1600   locked_verify_free_chunks_total();
  1603 void ChunkManager::locked_verify_free_chunks_count() {
  1604   assert_lock_strong(SpaceManager::expand_lock());
  1605   assert(sum_free_chunks_count() == _free_chunks_count,
  1606     err_msg("_free_chunks_count " SIZE_FORMAT " is not the"
  1607            " same as sum " SIZE_FORMAT, _free_chunks_count,
  1608            sum_free_chunks_count()));
  1611 void ChunkManager::verify_free_chunks_count() {
  1612 #ifdef ASSERT
  1613   MutexLockerEx cl(SpaceManager::expand_lock(),
  1614                      Mutex::_no_safepoint_check_flag);
  1615   locked_verify_free_chunks_count();
  1616 #endif
  1619 void ChunkManager::verify() {
  1620   MutexLockerEx cl(SpaceManager::expand_lock(),
  1621                      Mutex::_no_safepoint_check_flag);
  1622   locked_verify();
  1625 void ChunkManager::locked_verify() {
  1626   locked_verify_free_chunks_count();
  1627   locked_verify_free_chunks_total();
  1630 void ChunkManager::locked_print_free_chunks(outputStream* st) {
  1631   assert_lock_strong(SpaceManager::expand_lock());
  1632   st->print_cr("Free chunk total " SIZE_FORMAT "  count " SIZE_FORMAT,
  1633                 _free_chunks_total, _free_chunks_count);
  1636 void ChunkManager::locked_print_sum_free_chunks(outputStream* st) {
  1637   assert_lock_strong(SpaceManager::expand_lock());
  1638   st->print_cr("Sum free chunk total " SIZE_FORMAT "  count " SIZE_FORMAT,
  1639                 sum_free_chunks(), sum_free_chunks_count());
  1641 ChunkList* ChunkManager::free_chunks(ChunkIndex index) {
  1642   return &_free_chunks[index];
  1645 // These methods that sum the free chunk lists are used in printing
  1646 // methods that are used in product builds.
  1647 size_t ChunkManager::sum_free_chunks() {
  1648   assert_lock_strong(SpaceManager::expand_lock());
  1649   size_t result = 0;
  1650   for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
  1651     ChunkList* list = free_chunks(i);
  1653     if (list == NULL) {
  1654       continue;
  1657     result = result + list->count() * list->size();
  1659   result = result + humongous_dictionary()->total_size();
  1660   return result;
  1663 size_t ChunkManager::sum_free_chunks_count() {
  1664   assert_lock_strong(SpaceManager::expand_lock());
  1665   size_t count = 0;
  1666   for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
  1667     ChunkList* list = free_chunks(i);
  1668     if (list == NULL) {
  1669       continue;
  1671     count = count + list->count();
  1673   count = count + humongous_dictionary()->total_free_blocks();
  1674   return count;
  1677 ChunkList* ChunkManager::find_free_chunks_list(size_t word_size) {
  1678   ChunkIndex index = list_index(word_size);
  1679   assert(index < HumongousIndex, "No humongous list");
  1680   return free_chunks(index);
  1683 void ChunkManager::free_chunks_put(Metachunk* chunk) {
  1684   assert_lock_strong(SpaceManager::expand_lock());
  1685   ChunkList* free_list = find_free_chunks_list(chunk->word_size());
  1686   chunk->set_next(free_list->head());
  1687   free_list->set_head(chunk);
  1688   // chunk is being returned to the chunk free list
  1689   inc_free_chunks_total(chunk->capacity_word_size());
  1690   slow_locked_verify();
  1693 void ChunkManager::chunk_freelist_deallocate(Metachunk* chunk) {
  1694   // The deallocation of a chunk originates in the freelist
  1695   // manangement code for a Metaspace and does not hold the
  1696   // lock.
  1697   assert(chunk != NULL, "Deallocating NULL");
  1698   assert_lock_strong(SpaceManager::expand_lock());
  1699   slow_locked_verify();
  1700   if (TraceMetadataChunkAllocation) {
  1701     tty->print_cr("ChunkManager::chunk_freelist_deallocate: chunk "
  1702                   PTR_FORMAT "  size " SIZE_FORMAT,
  1703                   chunk, chunk->word_size());
  1705   free_chunks_put(chunk);
  1708 Metachunk* ChunkManager::free_chunks_get(size_t word_size) {
  1709   assert_lock_strong(SpaceManager::expand_lock());
  1711   slow_locked_verify();
  1713   Metachunk* chunk = NULL;
  1714   if (list_index(word_size) != HumongousIndex) {
  1715     ChunkList* free_list = find_free_chunks_list(word_size);
  1716     assert(free_list != NULL, "Sanity check");
  1718     chunk = free_list->head();
  1719     debug_only(Metachunk* debug_head = chunk;)
  1721     if (chunk == NULL) {
  1722       return NULL;
  1725     // Remove the chunk as the head of the list.
  1726     free_list->remove_chunk(chunk);
  1728     // Chunk is being removed from the chunks free list.
  1729     dec_free_chunks_total(chunk->capacity_word_size());
  1731     if (TraceMetadataChunkAllocation && Verbose) {
  1732       tty->print_cr("ChunkManager::free_chunks_get: free_list "
  1733                     PTR_FORMAT " head " PTR_FORMAT " size " SIZE_FORMAT,
  1734                     free_list, chunk, chunk->word_size());
  1736   } else {
  1737     chunk = humongous_dictionary()->get_chunk(
  1738       word_size,
  1739       FreeBlockDictionary<Metachunk>::atLeast);
  1741     if (chunk != NULL) {
  1742       if (TraceMetadataHumongousAllocation) {
  1743         size_t waste = chunk->word_size() - word_size;
  1744         tty->print_cr("Free list allocate humongous chunk size " SIZE_FORMAT
  1745                       " for requested size " SIZE_FORMAT
  1746                       " waste " SIZE_FORMAT,
  1747                       chunk->word_size(), word_size, waste);
  1749       // Chunk is being removed from the chunks free list.
  1750       dec_free_chunks_total(chunk->capacity_word_size());
  1751     } else {
  1752       return NULL;
  1756   // Remove it from the links to this freelist
  1757   chunk->set_next(NULL);
  1758   chunk->set_prev(NULL);
  1759 #ifdef ASSERT
  1760   // Chunk is no longer on any freelist. Setting to false make container_count_slow()
  1761   // work.
  1762   chunk->set_is_free(false);
  1763 #endif
  1764   slow_locked_verify();
  1765   return chunk;
  1768 Metachunk* ChunkManager::chunk_freelist_allocate(size_t word_size) {
  1769   assert_lock_strong(SpaceManager::expand_lock());
  1770   slow_locked_verify();
  1772   // Take from the beginning of the list
  1773   Metachunk* chunk = free_chunks_get(word_size);
  1774   if (chunk == NULL) {
  1775     return NULL;
  1778   assert((word_size <= chunk->word_size()) ||
  1779          list_index(chunk->word_size() == HumongousIndex),
  1780          "Non-humongous variable sized chunk");
  1781   if (TraceMetadataChunkAllocation) {
  1782     size_t list_count;
  1783     if (list_index(word_size) < HumongousIndex) {
  1784       ChunkList* list = find_free_chunks_list(word_size);
  1785       list_count = list->count();
  1786     } else {
  1787       list_count = humongous_dictionary()->total_count();
  1789     tty->print("ChunkManager::chunk_freelist_allocate: " PTR_FORMAT " chunk "
  1790                PTR_FORMAT "  size " SIZE_FORMAT " count " SIZE_FORMAT " ",
  1791                this, chunk, chunk->word_size(), list_count);
  1792     locked_print_free_chunks(tty);
  1795   return chunk;
  1798 void ChunkManager::print_on(outputStream* out) {
  1799   if (PrintFLSStatistics != 0) {
  1800     humongous_dictionary()->report_statistics();
  1804 // SpaceManager methods
  1806 void SpaceManager::get_initial_chunk_sizes(Metaspace::MetaspaceType type,
  1807                                            size_t* chunk_word_size,
  1808                                            size_t* class_chunk_word_size) {
  1809   switch (type) {
  1810   case Metaspace::BootMetaspaceType:
  1811     *chunk_word_size = Metaspace::first_chunk_word_size();
  1812     *class_chunk_word_size = Metaspace::first_class_chunk_word_size();
  1813     break;
  1814   case Metaspace::ROMetaspaceType:
  1815     *chunk_word_size = SharedReadOnlySize / wordSize;
  1816     *class_chunk_word_size = ClassSpecializedChunk;
  1817     break;
  1818   case Metaspace::ReadWriteMetaspaceType:
  1819     *chunk_word_size = SharedReadWriteSize / wordSize;
  1820     *class_chunk_word_size = ClassSpecializedChunk;
  1821     break;
  1822   case Metaspace::AnonymousMetaspaceType:
  1823   case Metaspace::ReflectionMetaspaceType:
  1824     *chunk_word_size = SpecializedChunk;
  1825     *class_chunk_word_size = ClassSpecializedChunk;
  1826     break;
  1827   default:
  1828     *chunk_word_size = SmallChunk;
  1829     *class_chunk_word_size = ClassSmallChunk;
  1830     break;
  1832   assert(*chunk_word_size != 0 && *class_chunk_word_size != 0,
  1833     err_msg("Initial chunks sizes bad: data  " SIZE_FORMAT
  1834             " class " SIZE_FORMAT,
  1835             *chunk_word_size, *class_chunk_word_size));
  1838 size_t SpaceManager::sum_free_in_chunks_in_use() const {
  1839   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
  1840   size_t free = 0;
  1841   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
  1842     Metachunk* chunk = chunks_in_use(i);
  1843     while (chunk != NULL) {
  1844       free += chunk->free_word_size();
  1845       chunk = chunk->next();
  1848   return free;
  1851 size_t SpaceManager::sum_waste_in_chunks_in_use() const {
  1852   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
  1853   size_t result = 0;
  1854   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
  1855    result += sum_waste_in_chunks_in_use(i);
  1858   return result;
  1861 size_t SpaceManager::sum_waste_in_chunks_in_use(ChunkIndex index) const {
  1862   size_t result = 0;
  1863   Metachunk* chunk = chunks_in_use(index);
  1864   // Count the free space in all the chunk but not the
  1865   // current chunk from which allocations are still being done.
  1866   while (chunk != NULL) {
  1867     if (chunk != current_chunk()) {
  1868       result += chunk->free_word_size();
  1870     chunk = chunk->next();
  1872   return result;
  1875 size_t SpaceManager::sum_capacity_in_chunks_in_use() const {
  1876   // For CMS use "allocated_chunks_words()" which does not need the
  1877   // Metaspace lock.  For the other collectors sum over the
  1878   // lists.  Use both methods as a check that "allocated_chunks_words()"
  1879   // is correct.  That is, sum_capacity_in_chunks() is too expensive
  1880   // to use in the product and allocated_chunks_words() should be used
  1881   // but allow for  checking that allocated_chunks_words() returns the same
  1882   // value as sum_capacity_in_chunks_in_use() which is the definitive
  1883   // answer.
  1884   if (UseConcMarkSweepGC) {
  1885     return allocated_chunks_words();
  1886   } else {
  1887     MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
  1888     size_t sum = 0;
  1889     for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
  1890       Metachunk* chunk = chunks_in_use(i);
  1891       while (chunk != NULL) {
  1892         sum += chunk->capacity_word_size();
  1893         chunk = chunk->next();
  1896   return sum;
  1900 size_t SpaceManager::sum_count_in_chunks_in_use() {
  1901   size_t count = 0;
  1902   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
  1903     count = count + sum_count_in_chunks_in_use(i);
  1906   return count;
  1909 size_t SpaceManager::sum_count_in_chunks_in_use(ChunkIndex i) {
  1910   size_t count = 0;
  1911   Metachunk* chunk = chunks_in_use(i);
  1912   while (chunk != NULL) {
  1913     count++;
  1914     chunk = chunk->next();
  1916   return count;
  1920 size_t SpaceManager::sum_used_in_chunks_in_use() const {
  1921   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
  1922   size_t used = 0;
  1923   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
  1924     Metachunk* chunk = chunks_in_use(i);
  1925     while (chunk != NULL) {
  1926       used += chunk->used_word_size();
  1927       chunk = chunk->next();
  1930   return used;
  1933 void SpaceManager::locked_print_chunks_in_use_on(outputStream* st) const {
  1935   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
  1936     Metachunk* chunk = chunks_in_use(i);
  1937     st->print("SpaceManager: %s " PTR_FORMAT,
  1938                  chunk_size_name(i), chunk);
  1939     if (chunk != NULL) {
  1940       st->print_cr(" free " SIZE_FORMAT,
  1941                    chunk->free_word_size());
  1942     } else {
  1943       st->print_cr("");
  1947   vs_list()->chunk_manager()->locked_print_free_chunks(st);
  1948   vs_list()->chunk_manager()->locked_print_sum_free_chunks(st);
  1951 size_t SpaceManager::calc_chunk_size(size_t word_size) {
  1953   // Decide between a small chunk and a medium chunk.  Up to
  1954   // _small_chunk_limit small chunks can be allocated but
  1955   // once a medium chunk has been allocated, no more small
  1956   // chunks will be allocated.
  1957   size_t chunk_word_size;
  1958   if (chunks_in_use(MediumIndex) == NULL &&
  1959       sum_count_in_chunks_in_use(SmallIndex) < _small_chunk_limit) {
  1960     chunk_word_size = (size_t) small_chunk_size();
  1961     if (word_size + Metachunk::overhead() > small_chunk_size()) {
  1962       chunk_word_size = medium_chunk_size();
  1964   } else {
  1965     chunk_word_size = medium_chunk_size();
  1968   // Might still need a humongous chunk.  Enforce an
  1969   // eight word granularity to facilitate reuse (some
  1970   // wastage but better chance of reuse).
  1971   size_t if_humongous_sized_chunk =
  1972     align_size_up(word_size + Metachunk::overhead(),
  1973                   HumongousChunkGranularity);
  1974   chunk_word_size =
  1975     MAX2((size_t) chunk_word_size, if_humongous_sized_chunk);
  1977   assert(!SpaceManager::is_humongous(word_size) ||
  1978          chunk_word_size == if_humongous_sized_chunk,
  1979          err_msg("Size calculation is wrong, word_size " SIZE_FORMAT
  1980                  " chunk_word_size " SIZE_FORMAT,
  1981                  word_size, chunk_word_size));
  1982   if (TraceMetadataHumongousAllocation &&
  1983       SpaceManager::is_humongous(word_size)) {
  1984     gclog_or_tty->print_cr("Metadata humongous allocation:");
  1985     gclog_or_tty->print_cr("  word_size " PTR_FORMAT, word_size);
  1986     gclog_or_tty->print_cr("  chunk_word_size " PTR_FORMAT,
  1987                            chunk_word_size);
  1988     gclog_or_tty->print_cr("    chunk overhead " PTR_FORMAT,
  1989                            Metachunk::overhead());
  1991   return chunk_word_size;
  1994 MetaWord* SpaceManager::grow_and_allocate(size_t word_size) {
  1995   assert(vs_list()->current_virtual_space() != NULL,
  1996          "Should have been set");
  1997   assert(current_chunk() == NULL ||
  1998          current_chunk()->allocate(word_size) == NULL,
  1999          "Don't need to expand");
  2000   MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
  2002   if (TraceMetadataChunkAllocation && Verbose) {
  2003     size_t words_left = 0;
  2004     size_t words_used = 0;
  2005     if (current_chunk() != NULL) {
  2006       words_left = current_chunk()->free_word_size();
  2007       words_used = current_chunk()->used_word_size();
  2009     gclog_or_tty->print_cr("SpaceManager::grow_and_allocate for " SIZE_FORMAT
  2010                            " words " SIZE_FORMAT " words used " SIZE_FORMAT
  2011                            " words left",
  2012                             word_size, words_used, words_left);
  2015   // Get another chunk out of the virtual space
  2016   size_t grow_chunks_by_words = calc_chunk_size(word_size);
  2017   Metachunk* next = get_new_chunk(word_size, grow_chunks_by_words);
  2019   // If a chunk was available, add it to the in-use chunk list
  2020   // and do an allocation from it.
  2021   if (next != NULL) {
  2022     Metadebug::deallocate_chunk_a_lot(this, grow_chunks_by_words);
  2023     // Add to this manager's list of chunks in use.
  2024     add_chunk(next, false);
  2025     return next->allocate(word_size);
  2027   return NULL;
  2030 void SpaceManager::print_on(outputStream* st) const {
  2032   for (ChunkIndex i = ZeroIndex;
  2033        i < NumberOfInUseLists ;
  2034        i = next_chunk_index(i) ) {
  2035     st->print_cr("  chunks_in_use " PTR_FORMAT " chunk size " PTR_FORMAT,
  2036                  chunks_in_use(i),
  2037                  chunks_in_use(i) == NULL ? 0 : chunks_in_use(i)->word_size());
  2039   st->print_cr("    waste:  Small " SIZE_FORMAT " Medium " SIZE_FORMAT
  2040                " Humongous " SIZE_FORMAT,
  2041                sum_waste_in_chunks_in_use(SmallIndex),
  2042                sum_waste_in_chunks_in_use(MediumIndex),
  2043                sum_waste_in_chunks_in_use(HumongousIndex));
  2044   // block free lists
  2045   if (block_freelists() != NULL) {
  2046     st->print_cr("total in block free lists " SIZE_FORMAT,
  2047       block_freelists()->total_size());
  2051 SpaceManager::SpaceManager(Metaspace::MetadataType mdtype,
  2052                            Mutex* lock,
  2053                            VirtualSpaceList* vs_list) :
  2054   _vs_list(vs_list),
  2055   _mdtype(mdtype),
  2056   _allocated_blocks_words(0),
  2057   _allocated_chunks_words(0),
  2058   _allocated_chunks_count(0),
  2059   _lock(lock)
  2061   initialize();
  2064 void SpaceManager::inc_size_metrics(size_t words) {
  2065   assert_lock_strong(SpaceManager::expand_lock());
  2066   // Total of allocated Metachunks and allocated Metachunks count
  2067   // for each SpaceManager
  2068   _allocated_chunks_words = _allocated_chunks_words + words;
  2069   _allocated_chunks_count++;
  2070   // Global total of capacity in allocated Metachunks
  2071   MetaspaceAux::inc_capacity(mdtype(), words);
  2072   // Global total of allocated Metablocks.
  2073   // used_words_slow() includes the overhead in each
  2074   // Metachunk so include it in the used when the
  2075   // Metachunk is first added (so only added once per
  2076   // Metachunk).
  2077   MetaspaceAux::inc_used(mdtype(), Metachunk::overhead());
  2080 void SpaceManager::inc_used_metrics(size_t words) {
  2081   // Add to the per SpaceManager total
  2082   Atomic::add_ptr(words, &_allocated_blocks_words);
  2083   // Add to the global total
  2084   MetaspaceAux::inc_used(mdtype(), words);
  2087 void SpaceManager::dec_total_from_size_metrics() {
  2088   MetaspaceAux::dec_capacity(mdtype(), allocated_chunks_words());
  2089   MetaspaceAux::dec_used(mdtype(), allocated_blocks_words());
  2090   // Also deduct the overhead per Metachunk
  2091   MetaspaceAux::dec_used(mdtype(), allocated_chunks_count() * Metachunk::overhead());
  2094 void SpaceManager::initialize() {
  2095   Metadebug::init_allocation_fail_alot_count();
  2096   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
  2097     _chunks_in_use[i] = NULL;
  2099   _current_chunk = NULL;
  2100   if (TraceMetadataChunkAllocation && Verbose) {
  2101     gclog_or_tty->print_cr("SpaceManager(): " PTR_FORMAT, this);
  2105 void ChunkManager::return_chunks(ChunkIndex index, Metachunk* chunks) {
  2106   if (chunks == NULL) {
  2107     return;
  2109   ChunkList* list = free_chunks(index);
  2110   assert(list->size() == chunks->word_size(), "Mismatch in chunk sizes");
  2111   assert_lock_strong(SpaceManager::expand_lock());
  2112   Metachunk* cur = chunks;
  2114   // This returns chunks one at a time.  If a new
  2115   // class List can be created that is a base class
  2116   // of FreeList then something like FreeList::prepend()
  2117   // can be used in place of this loop
  2118   while (cur != NULL) {
  2119     assert(cur->container() != NULL, "Container should have been set");
  2120     cur->container()->dec_container_count();
  2121     // Capture the next link before it is changed
  2122     // by the call to return_chunk_at_head();
  2123     Metachunk* next = cur->next();
  2124     cur->set_is_free(true);
  2125     list->return_chunk_at_head(cur);
  2126     cur = next;
  2130 SpaceManager::~SpaceManager() {
  2131   // This call this->_lock which can't be done while holding expand_lock()
  2132   assert(sum_capacity_in_chunks_in_use() == allocated_chunks_words(),
  2133     err_msg("sum_capacity_in_chunks_in_use() " SIZE_FORMAT
  2134             " allocated_chunks_words() " SIZE_FORMAT,
  2135             sum_capacity_in_chunks_in_use(), allocated_chunks_words()));
  2137   MutexLockerEx fcl(SpaceManager::expand_lock(),
  2138                     Mutex::_no_safepoint_check_flag);
  2140   ChunkManager* chunk_manager = vs_list()->chunk_manager();
  2142   chunk_manager->slow_locked_verify();
  2144   dec_total_from_size_metrics();
  2146   if (TraceMetadataChunkAllocation && Verbose) {
  2147     gclog_or_tty->print_cr("~SpaceManager(): " PTR_FORMAT, this);
  2148     locked_print_chunks_in_use_on(gclog_or_tty);
  2151   // Do not mangle freed Metachunks.  The chunk size inside Metachunks
  2152   // is during the freeing of a VirtualSpaceNodes.
  2154   // Have to update before the chunks_in_use lists are emptied
  2155   // below.
  2156   chunk_manager->inc_free_chunks_total(allocated_chunks_words(),
  2157                                        sum_count_in_chunks_in_use());
  2159   // Add all the chunks in use by this space manager
  2160   // to the global list of free chunks.
  2162   // Follow each list of chunks-in-use and add them to the
  2163   // free lists.  Each list is NULL terminated.
  2165   for (ChunkIndex i = ZeroIndex; i < HumongousIndex; i = next_chunk_index(i)) {
  2166     if (TraceMetadataChunkAllocation && Verbose) {
  2167       gclog_or_tty->print_cr("returned %d %s chunks to freelist",
  2168                              sum_count_in_chunks_in_use(i),
  2169                              chunk_size_name(i));
  2171     Metachunk* chunks = chunks_in_use(i);
  2172     chunk_manager->return_chunks(i, chunks);
  2173     set_chunks_in_use(i, NULL);
  2174     if (TraceMetadataChunkAllocation && Verbose) {
  2175       gclog_or_tty->print_cr("updated freelist count %d %s",
  2176                              chunk_manager->free_chunks(i)->count(),
  2177                              chunk_size_name(i));
  2179     assert(i != HumongousIndex, "Humongous chunks are handled explicitly later");
  2182   // The medium chunk case may be optimized by passing the head and
  2183   // tail of the medium chunk list to add_at_head().  The tail is often
  2184   // the current chunk but there are probably exceptions.
  2186   // Humongous chunks
  2187   if (TraceMetadataChunkAllocation && Verbose) {
  2188     gclog_or_tty->print_cr("returned %d %s humongous chunks to dictionary",
  2189                             sum_count_in_chunks_in_use(HumongousIndex),
  2190                             chunk_size_name(HumongousIndex));
  2191     gclog_or_tty->print("Humongous chunk dictionary: ");
  2193   // Humongous chunks are never the current chunk.
  2194   Metachunk* humongous_chunks = chunks_in_use(HumongousIndex);
  2196   while (humongous_chunks != NULL) {
  2197 #ifdef ASSERT
  2198     humongous_chunks->set_is_free(true);
  2199 #endif
  2200     if (TraceMetadataChunkAllocation && Verbose) {
  2201       gclog_or_tty->print(PTR_FORMAT " (" SIZE_FORMAT ") ",
  2202                           humongous_chunks,
  2203                           humongous_chunks->word_size());
  2205     assert(humongous_chunks->word_size() == (size_t)
  2206            align_size_up(humongous_chunks->word_size(),
  2207                              HumongousChunkGranularity),
  2208            err_msg("Humongous chunk size is wrong: word size " SIZE_FORMAT
  2209                    " granularity %d",
  2210                    humongous_chunks->word_size(), HumongousChunkGranularity));
  2211     Metachunk* next_humongous_chunks = humongous_chunks->next();
  2212     humongous_chunks->container()->dec_container_count();
  2213     chunk_manager->humongous_dictionary()->return_chunk(humongous_chunks);
  2214     humongous_chunks = next_humongous_chunks;
  2216   if (TraceMetadataChunkAllocation && Verbose) {
  2217     gclog_or_tty->print_cr("");
  2218     gclog_or_tty->print_cr("updated dictionary count %d %s",
  2219                      chunk_manager->humongous_dictionary()->total_count(),
  2220                      chunk_size_name(HumongousIndex));
  2222   chunk_manager->slow_locked_verify();
  2225 const char* SpaceManager::chunk_size_name(ChunkIndex index) const {
  2226   switch (index) {
  2227     case SpecializedIndex:
  2228       return "Specialized";
  2229     case SmallIndex:
  2230       return "Small";
  2231     case MediumIndex:
  2232       return "Medium";
  2233     case HumongousIndex:
  2234       return "Humongous";
  2235     default:
  2236       return NULL;
  2240 ChunkIndex ChunkManager::list_index(size_t size) {
  2241   switch (size) {
  2242     case SpecializedChunk:
  2243       assert(SpecializedChunk == ClassSpecializedChunk,
  2244              "Need branch for ClassSpecializedChunk");
  2245       return SpecializedIndex;
  2246     case SmallChunk:
  2247     case ClassSmallChunk:
  2248       return SmallIndex;
  2249     case MediumChunk:
  2250     case ClassMediumChunk:
  2251       return MediumIndex;
  2252     default:
  2253       assert(size > MediumChunk || size > ClassMediumChunk,
  2254              "Not a humongous chunk");
  2255       return HumongousIndex;
  2259 void SpaceManager::deallocate(MetaWord* p, size_t word_size) {
  2260   assert_lock_strong(_lock);
  2261   size_t raw_word_size = get_raw_word_size(word_size);
  2262   size_t min_size = TreeChunk<Metablock, FreeList>::min_size();
  2263   assert(raw_word_size >= min_size,
  2264          err_msg("Should not deallocate dark matter " SIZE_FORMAT "<" SIZE_FORMAT, word_size, min_size));
  2265   block_freelists()->return_block(p, raw_word_size);
  2268 // Adds a chunk to the list of chunks in use.
  2269 void SpaceManager::add_chunk(Metachunk* new_chunk, bool make_current) {
  2271   assert(new_chunk != NULL, "Should not be NULL");
  2272   assert(new_chunk->next() == NULL, "Should not be on a list");
  2274   new_chunk->reset_empty();
  2276   // Find the correct list and and set the current
  2277   // chunk for that list.
  2278   ChunkIndex index = ChunkManager::list_index(new_chunk->word_size());
  2280   if (index != HumongousIndex) {
  2281     set_current_chunk(new_chunk);
  2282     new_chunk->set_next(chunks_in_use(index));
  2283     set_chunks_in_use(index, new_chunk);
  2284   } else {
  2285     // For null class loader data and DumpSharedSpaces, the first chunk isn't
  2286     // small, so small will be null.  Link this first chunk as the current
  2287     // chunk.
  2288     if (make_current) {
  2289       // Set as the current chunk but otherwise treat as a humongous chunk.
  2290       set_current_chunk(new_chunk);
  2292     // Link at head.  The _current_chunk only points to a humongous chunk for
  2293     // the null class loader metaspace (class and data virtual space managers)
  2294     // any humongous chunks so will not point to the tail
  2295     // of the humongous chunks list.
  2296     new_chunk->set_next(chunks_in_use(HumongousIndex));
  2297     set_chunks_in_use(HumongousIndex, new_chunk);
  2299     assert(new_chunk->word_size() > medium_chunk_size(), "List inconsistency");
  2302   // Add to the running sum of capacity
  2303   inc_size_metrics(new_chunk->word_size());
  2305   assert(new_chunk->is_empty(), "Not ready for reuse");
  2306   if (TraceMetadataChunkAllocation && Verbose) {
  2307     gclog_or_tty->print("SpaceManager::add_chunk: %d) ",
  2308                         sum_count_in_chunks_in_use());
  2309     new_chunk->print_on(gclog_or_tty);
  2310     if (vs_list() != NULL) {
  2311       vs_list()->chunk_manager()->locked_print_free_chunks(tty);
  2316 Metachunk* SpaceManager::get_new_chunk(size_t word_size,
  2317                                        size_t grow_chunks_by_words) {
  2319   Metachunk* next = vs_list()->get_new_chunk(word_size,
  2320                                              grow_chunks_by_words,
  2321                                              medium_chunk_bunch());
  2323   if (TraceMetadataHumongousAllocation &&
  2324       SpaceManager::is_humongous(next->word_size())) {
  2325     gclog_or_tty->print_cr("  new humongous chunk word size " PTR_FORMAT,
  2326                            next->word_size());
  2329   return next;
  2332 MetaWord* SpaceManager::allocate(size_t word_size) {
  2333   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
  2335   size_t raw_word_size = get_raw_word_size(word_size);
  2336   BlockFreelist* fl =  block_freelists();
  2337   MetaWord* p = NULL;
  2338   // Allocation from the dictionary is expensive in the sense that
  2339   // the dictionary has to be searched for a size.  Don't allocate
  2340   // from the dictionary until it starts to get fat.  Is this
  2341   // a reasonable policy?  Maybe an skinny dictionary is fast enough
  2342   // for allocations.  Do some profiling.  JJJ
  2343   if (fl->total_size() > allocation_from_dictionary_limit) {
  2344     p = fl->get_block(raw_word_size);
  2346   if (p == NULL) {
  2347     p = allocate_work(raw_word_size);
  2349   Metadebug::deallocate_block_a_lot(this, raw_word_size);
  2351   return p;
  2354 // Returns the address of spaced allocated for "word_size".
  2355 // This methods does not know about blocks (Metablocks)
  2356 MetaWord* SpaceManager::allocate_work(size_t word_size) {
  2357   assert_lock_strong(_lock);
  2358 #ifdef ASSERT
  2359   if (Metadebug::test_metadata_failure()) {
  2360     return NULL;
  2362 #endif
  2363   // Is there space in the current chunk?
  2364   MetaWord* result = NULL;
  2366   // For DumpSharedSpaces, only allocate out of the current chunk which is
  2367   // never null because we gave it the size we wanted.   Caller reports out
  2368   // of memory if this returns null.
  2369   if (DumpSharedSpaces) {
  2370     assert(current_chunk() != NULL, "should never happen");
  2371     inc_used_metrics(word_size);
  2372     return current_chunk()->allocate(word_size); // caller handles null result
  2374   if (current_chunk() != NULL) {
  2375     result = current_chunk()->allocate(word_size);
  2378   if (result == NULL) {
  2379     result = grow_and_allocate(word_size);
  2381   if (result != 0) {
  2382     inc_used_metrics(word_size);
  2383     assert(result != (MetaWord*) chunks_in_use(MediumIndex),
  2384            "Head of the list is being allocated");
  2387   return result;
  2390 void SpaceManager::verify() {
  2391   // If there are blocks in the dictionary, then
  2392   // verfication of chunks does not work since
  2393   // being in the dictionary alters a chunk.
  2394   if (block_freelists()->total_size() == 0) {
  2395     for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
  2396       Metachunk* curr = chunks_in_use(i);
  2397       while (curr != NULL) {
  2398         curr->verify();
  2399         verify_chunk_size(curr);
  2400         curr = curr->next();
  2406 void SpaceManager::verify_chunk_size(Metachunk* chunk) {
  2407   assert(is_humongous(chunk->word_size()) ||
  2408          chunk->word_size() == medium_chunk_size() ||
  2409          chunk->word_size() == small_chunk_size() ||
  2410          chunk->word_size() == specialized_chunk_size(),
  2411          "Chunk size is wrong");
  2412   return;
  2415 #ifdef ASSERT
  2416 void SpaceManager::verify_allocated_blocks_words() {
  2417   // Verification is only guaranteed at a safepoint.
  2418   assert(SafepointSynchronize::is_at_safepoint() || !Universe::is_fully_initialized(),
  2419     "Verification can fail if the applications is running");
  2420   assert(allocated_blocks_words() == sum_used_in_chunks_in_use(),
  2421     err_msg("allocation total is not consistent " SIZE_FORMAT
  2422             " vs " SIZE_FORMAT,
  2423             allocated_blocks_words(), sum_used_in_chunks_in_use()));
  2426 #endif
  2428 void SpaceManager::dump(outputStream* const out) const {
  2429   size_t curr_total = 0;
  2430   size_t waste = 0;
  2431   uint i = 0;
  2432   size_t used = 0;
  2433   size_t capacity = 0;
  2435   // Add up statistics for all chunks in this SpaceManager.
  2436   for (ChunkIndex index = ZeroIndex;
  2437        index < NumberOfInUseLists;
  2438        index = next_chunk_index(index)) {
  2439     for (Metachunk* curr = chunks_in_use(index);
  2440          curr != NULL;
  2441          curr = curr->next()) {
  2442       out->print("%d) ", i++);
  2443       curr->print_on(out);
  2444       if (TraceMetadataChunkAllocation && Verbose) {
  2445         block_freelists()->print_on(out);
  2447       curr_total += curr->word_size();
  2448       used += curr->used_word_size();
  2449       capacity += curr->capacity_word_size();
  2450       waste += curr->free_word_size() + curr->overhead();;
  2454   size_t free = current_chunk() == NULL ? 0 : current_chunk()->free_word_size();
  2455   // Free space isn't wasted.
  2456   waste -= free;
  2458   out->print_cr("total of all chunks "  SIZE_FORMAT " used " SIZE_FORMAT
  2459                 " free " SIZE_FORMAT " capacity " SIZE_FORMAT
  2460                 " waste " SIZE_FORMAT, curr_total, used, free, capacity, waste);
  2463 #ifndef PRODUCT
  2464 void SpaceManager::mangle_freed_chunks() {
  2465   for (ChunkIndex index = ZeroIndex;
  2466        index < NumberOfInUseLists;
  2467        index = next_chunk_index(index)) {
  2468     for (Metachunk* curr = chunks_in_use(index);
  2469          curr != NULL;
  2470          curr = curr->next()) {
  2471       curr->mangle();
  2475 #endif // PRODUCT
  2477 // MetaspaceAux
  2480 size_t MetaspaceAux::_allocated_capacity_words[] = {0, 0};
  2481 size_t MetaspaceAux::_allocated_used_words[] = {0, 0};
  2483 size_t MetaspaceAux::free_bytes(Metaspace::MetadataType mdtype) {
  2484   VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
  2485   return list == NULL ? 0 : list->free_bytes();
  2488 size_t MetaspaceAux::free_bytes() {
  2489   return free_bytes(Metaspace::ClassType) + free_bytes(Metaspace::NonClassType);
  2492 void MetaspaceAux::dec_capacity(Metaspace::MetadataType mdtype, size_t words) {
  2493   assert_lock_strong(SpaceManager::expand_lock());
  2494   assert(words <= allocated_capacity_words(mdtype),
  2495     err_msg("About to decrement below 0: words " SIZE_FORMAT
  2496             " is greater than _allocated_capacity_words[%u] " SIZE_FORMAT,
  2497             words, mdtype, allocated_capacity_words(mdtype)));
  2498   _allocated_capacity_words[mdtype] -= words;
  2501 void MetaspaceAux::inc_capacity(Metaspace::MetadataType mdtype, size_t words) {
  2502   assert_lock_strong(SpaceManager::expand_lock());
  2503   // Needs to be atomic
  2504   _allocated_capacity_words[mdtype] += words;
  2507 void MetaspaceAux::dec_used(Metaspace::MetadataType mdtype, size_t words) {
  2508   assert(words <= allocated_used_words(mdtype),
  2509     err_msg("About to decrement below 0: words " SIZE_FORMAT
  2510             " is greater than _allocated_used_words[%u] " SIZE_FORMAT,
  2511             words, mdtype, allocated_used_words(mdtype)));
  2512   // For CMS deallocation of the Metaspaces occurs during the
  2513   // sweep which is a concurrent phase.  Protection by the expand_lock()
  2514   // is not enough since allocation is on a per Metaspace basis
  2515   // and protected by the Metaspace lock.
  2516   jlong minus_words = (jlong) - (jlong) words;
  2517   Atomic::add_ptr(minus_words, &_allocated_used_words[mdtype]);
  2520 void MetaspaceAux::inc_used(Metaspace::MetadataType mdtype, size_t words) {
  2521   // _allocated_used_words tracks allocations for
  2522   // each piece of metadata.  Those allocations are
  2523   // generally done concurrently by different application
  2524   // threads so must be done atomically.
  2525   Atomic::add_ptr(words, &_allocated_used_words[mdtype]);
  2528 size_t MetaspaceAux::used_bytes_slow(Metaspace::MetadataType mdtype) {
  2529   size_t used = 0;
  2530   ClassLoaderDataGraphMetaspaceIterator iter;
  2531   while (iter.repeat()) {
  2532     Metaspace* msp = iter.get_next();
  2533     // Sum allocated_blocks_words for each metaspace
  2534     if (msp != NULL) {
  2535       used += msp->used_words_slow(mdtype);
  2538   return used * BytesPerWord;
  2541 size_t MetaspaceAux::free_in_bytes(Metaspace::MetadataType mdtype) {
  2542   size_t free = 0;
  2543   ClassLoaderDataGraphMetaspaceIterator iter;
  2544   while (iter.repeat()) {
  2545     Metaspace* msp = iter.get_next();
  2546     if (msp != NULL) {
  2547       free += msp->free_words(mdtype);
  2550   return free * BytesPerWord;
  2553 size_t MetaspaceAux::capacity_bytes_slow(Metaspace::MetadataType mdtype) {
  2554   if ((mdtype == Metaspace::ClassType) && !Metaspace::using_class_space()) {
  2555     return 0;
  2557   // Don't count the space in the freelists.  That space will be
  2558   // added to the capacity calculation as needed.
  2559   size_t capacity = 0;
  2560   ClassLoaderDataGraphMetaspaceIterator iter;
  2561   while (iter.repeat()) {
  2562     Metaspace* msp = iter.get_next();
  2563     if (msp != NULL) {
  2564       capacity += msp->capacity_words_slow(mdtype);
  2567   return capacity * BytesPerWord;
  2570 size_t MetaspaceAux::reserved_in_bytes(Metaspace::MetadataType mdtype) {
  2571   VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
  2572   return list == NULL ? 0 : list->virtual_space_total();
  2575 size_t MetaspaceAux::min_chunk_size() { return Metaspace::first_chunk_word_size(); }
  2577 size_t MetaspaceAux::free_chunks_total(Metaspace::MetadataType mdtype) {
  2578   VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
  2579   if (list == NULL) {
  2580     return 0;
  2582   ChunkManager* chunk = list->chunk_manager();
  2583   chunk->slow_verify();
  2584   return chunk->free_chunks_total();
  2587 size_t MetaspaceAux::free_chunks_total_in_bytes(Metaspace::MetadataType mdtype) {
  2588   return free_chunks_total(mdtype) * BytesPerWord;
  2591 size_t MetaspaceAux::free_chunks_total() {
  2592   return free_chunks_total(Metaspace::ClassType) +
  2593          free_chunks_total(Metaspace::NonClassType);
  2596 size_t MetaspaceAux::free_chunks_total_in_bytes() {
  2597   return free_chunks_total() * BytesPerWord;
  2600 void MetaspaceAux::print_metaspace_change(size_t prev_metadata_used) {
  2601   gclog_or_tty->print(", [Metaspace:");
  2602   if (PrintGCDetails && Verbose) {
  2603     gclog_or_tty->print(" "  SIZE_FORMAT
  2604                         "->" SIZE_FORMAT
  2605                         "("  SIZE_FORMAT ")",
  2606                         prev_metadata_used,
  2607                         allocated_used_bytes(),
  2608                         reserved_in_bytes());
  2609   } else {
  2610     gclog_or_tty->print(" "  SIZE_FORMAT "K"
  2611                         "->" SIZE_FORMAT "K"
  2612                         "("  SIZE_FORMAT "K)",
  2613                         prev_metadata_used / K,
  2614                         allocated_used_bytes() / K,
  2615                         reserved_in_bytes()/ K);
  2618   gclog_or_tty->print("]");
  2621 // This is printed when PrintGCDetails
  2622 void MetaspaceAux::print_on(outputStream* out) {
  2623   Metaspace::MetadataType nct = Metaspace::NonClassType;
  2625   out->print_cr(" Metaspace total "
  2626                 SIZE_FORMAT "K, used " SIZE_FORMAT "K,"
  2627                 " reserved " SIZE_FORMAT "K",
  2628                 allocated_capacity_bytes()/K, allocated_used_bytes()/K, reserved_in_bytes()/K);
  2630   out->print_cr("  data space     "
  2631                 SIZE_FORMAT "K, used " SIZE_FORMAT "K,"
  2632                 " reserved " SIZE_FORMAT "K",
  2633                 allocated_capacity_bytes(nct)/K,
  2634                 allocated_used_bytes(nct)/K,
  2635                 reserved_in_bytes(nct)/K);
  2636   if (Metaspace::using_class_space()) {
  2637     Metaspace::MetadataType ct = Metaspace::ClassType;
  2638     out->print_cr("  class space    "
  2639                   SIZE_FORMAT "K, used " SIZE_FORMAT "K,"
  2640                   " reserved " SIZE_FORMAT "K",
  2641                   allocated_capacity_bytes(ct)/K,
  2642                   allocated_used_bytes(ct)/K,
  2643                   reserved_in_bytes(ct)/K);
  2647 // Print information for class space and data space separately.
  2648 // This is almost the same as above.
  2649 void MetaspaceAux::print_on(outputStream* out, Metaspace::MetadataType mdtype) {
  2650   size_t free_chunks_capacity_bytes = free_chunks_total_in_bytes(mdtype);
  2651   size_t capacity_bytes = capacity_bytes_slow(mdtype);
  2652   size_t used_bytes = used_bytes_slow(mdtype);
  2653   size_t free_bytes = free_in_bytes(mdtype);
  2654   size_t used_and_free = used_bytes + free_bytes +
  2655                            free_chunks_capacity_bytes;
  2656   out->print_cr("  Chunk accounting: used in chunks " SIZE_FORMAT
  2657              "K + unused in chunks " SIZE_FORMAT "K  + "
  2658              " capacity in free chunks " SIZE_FORMAT "K = " SIZE_FORMAT
  2659              "K  capacity in allocated chunks " SIZE_FORMAT "K",
  2660              used_bytes / K,
  2661              free_bytes / K,
  2662              free_chunks_capacity_bytes / K,
  2663              used_and_free / K,
  2664              capacity_bytes / K);
  2665   // Accounting can only be correct if we got the values during a safepoint
  2666   assert(!SafepointSynchronize::is_at_safepoint() || used_and_free == capacity_bytes, "Accounting is wrong");
  2669 // Print total fragmentation for class metaspaces
  2670 void MetaspaceAux::print_class_waste(outputStream* out) {
  2671   assert(Metaspace::using_class_space(), "class metaspace not used");
  2672   size_t cls_specialized_waste = 0, cls_small_waste = 0, cls_medium_waste = 0;
  2673   size_t cls_specialized_count = 0, cls_small_count = 0, cls_medium_count = 0, cls_humongous_count = 0;
  2674   ClassLoaderDataGraphMetaspaceIterator iter;
  2675   while (iter.repeat()) {
  2676     Metaspace* msp = iter.get_next();
  2677     if (msp != NULL) {
  2678       cls_specialized_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SpecializedIndex);
  2679       cls_specialized_count += msp->class_vsm()->sum_count_in_chunks_in_use(SpecializedIndex);
  2680       cls_small_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SmallIndex);
  2681       cls_small_count += msp->class_vsm()->sum_count_in_chunks_in_use(SmallIndex);
  2682       cls_medium_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(MediumIndex);
  2683       cls_medium_count += msp->class_vsm()->sum_count_in_chunks_in_use(MediumIndex);
  2684       cls_humongous_count += msp->class_vsm()->sum_count_in_chunks_in_use(HumongousIndex);
  2687   out->print_cr(" class: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", "
  2688                 SIZE_FORMAT " small(s) " SIZE_FORMAT ", "
  2689                 SIZE_FORMAT " medium(s) " SIZE_FORMAT ", "
  2690                 "large count " SIZE_FORMAT,
  2691                 cls_specialized_count, cls_specialized_waste,
  2692                 cls_small_count, cls_small_waste,
  2693                 cls_medium_count, cls_medium_waste, cls_humongous_count);
  2696 // Print total fragmentation for data and class metaspaces separately
  2697 void MetaspaceAux::print_waste(outputStream* out) {
  2698   size_t specialized_waste = 0, small_waste = 0, medium_waste = 0;
  2699   size_t specialized_count = 0, small_count = 0, medium_count = 0, humongous_count = 0;
  2701   ClassLoaderDataGraphMetaspaceIterator iter;
  2702   while (iter.repeat()) {
  2703     Metaspace* msp = iter.get_next();
  2704     if (msp != NULL) {
  2705       specialized_waste += msp->vsm()->sum_waste_in_chunks_in_use(SpecializedIndex);
  2706       specialized_count += msp->vsm()->sum_count_in_chunks_in_use(SpecializedIndex);
  2707       small_waste += msp->vsm()->sum_waste_in_chunks_in_use(SmallIndex);
  2708       small_count += msp->vsm()->sum_count_in_chunks_in_use(SmallIndex);
  2709       medium_waste += msp->vsm()->sum_waste_in_chunks_in_use(MediumIndex);
  2710       medium_count += msp->vsm()->sum_count_in_chunks_in_use(MediumIndex);
  2711       humongous_count += msp->vsm()->sum_count_in_chunks_in_use(HumongousIndex);
  2714   out->print_cr("Total fragmentation waste (words) doesn't count free space");
  2715   out->print_cr("  data: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", "
  2716                         SIZE_FORMAT " small(s) " SIZE_FORMAT ", "
  2717                         SIZE_FORMAT " medium(s) " SIZE_FORMAT ", "
  2718                         "large count " SIZE_FORMAT,
  2719              specialized_count, specialized_waste, small_count,
  2720              small_waste, medium_count, medium_waste, humongous_count);
  2721   if (Metaspace::using_class_space()) {
  2722     print_class_waste(out);
  2726 // Dump global metaspace things from the end of ClassLoaderDataGraph
  2727 void MetaspaceAux::dump(outputStream* out) {
  2728   out->print_cr("All Metaspace:");
  2729   out->print("data space: "); print_on(out, Metaspace::NonClassType);
  2730   out->print("class space: "); print_on(out, Metaspace::ClassType);
  2731   print_waste(out);
  2734 void MetaspaceAux::verify_free_chunks() {
  2735   Metaspace::space_list()->chunk_manager()->verify();
  2736   if (Metaspace::using_class_space()) {
  2737     Metaspace::class_space_list()->chunk_manager()->verify();
  2741 void MetaspaceAux::verify_capacity() {
  2742 #ifdef ASSERT
  2743   size_t running_sum_capacity_bytes = allocated_capacity_bytes();
  2744   // For purposes of the running sum of capacity, verify against capacity
  2745   size_t capacity_in_use_bytes = capacity_bytes_slow();
  2746   assert(running_sum_capacity_bytes == capacity_in_use_bytes,
  2747     err_msg("allocated_capacity_words() * BytesPerWord " SIZE_FORMAT
  2748             " capacity_bytes_slow()" SIZE_FORMAT,
  2749             running_sum_capacity_bytes, capacity_in_use_bytes));
  2750   for (Metaspace::MetadataType i = Metaspace::ClassType;
  2751        i < Metaspace:: MetadataTypeCount;
  2752        i = (Metaspace::MetadataType)(i + 1)) {
  2753     size_t capacity_in_use_bytes = capacity_bytes_slow(i);
  2754     assert(allocated_capacity_bytes(i) == capacity_in_use_bytes,
  2755       err_msg("allocated_capacity_bytes(%u) " SIZE_FORMAT
  2756               " capacity_bytes_slow(%u)" SIZE_FORMAT,
  2757               i, allocated_capacity_bytes(i), i, capacity_in_use_bytes));
  2759 #endif
  2762 void MetaspaceAux::verify_used() {
  2763 #ifdef ASSERT
  2764   size_t running_sum_used_bytes = allocated_used_bytes();
  2765   // For purposes of the running sum of used, verify against used
  2766   size_t used_in_use_bytes = used_bytes_slow();
  2767   assert(allocated_used_bytes() == used_in_use_bytes,
  2768     err_msg("allocated_used_bytes() " SIZE_FORMAT
  2769             " used_bytes_slow()" SIZE_FORMAT,
  2770             allocated_used_bytes(), used_in_use_bytes));
  2771   for (Metaspace::MetadataType i = Metaspace::ClassType;
  2772        i < Metaspace:: MetadataTypeCount;
  2773        i = (Metaspace::MetadataType)(i + 1)) {
  2774     size_t used_in_use_bytes = used_bytes_slow(i);
  2775     assert(allocated_used_bytes(i) == used_in_use_bytes,
  2776       err_msg("allocated_used_bytes(%u) " SIZE_FORMAT
  2777               " used_bytes_slow(%u)" SIZE_FORMAT,
  2778               i, allocated_used_bytes(i), i, used_in_use_bytes));
  2780 #endif
  2783 void MetaspaceAux::verify_metrics() {
  2784   verify_capacity();
  2785   verify_used();
  2789 // Metaspace methods
  2791 size_t Metaspace::_first_chunk_word_size = 0;
  2792 size_t Metaspace::_first_class_chunk_word_size = 0;
  2794 Metaspace::Metaspace(Mutex* lock, MetaspaceType type) {
  2795   initialize(lock, type);
  2798 Metaspace::~Metaspace() {
  2799   delete _vsm;
  2800   if (using_class_space()) {
  2801     delete _class_vsm;
  2805 VirtualSpaceList* Metaspace::_space_list = NULL;
  2806 VirtualSpaceList* Metaspace::_class_space_list = NULL;
  2808 #define VIRTUALSPACEMULTIPLIER 2
  2810 #ifdef _LP64
  2811 void Metaspace::set_narrow_klass_base_and_shift(address metaspace_base, address cds_base) {
  2812   // Figure out the narrow_klass_base and the narrow_klass_shift.  The
  2813   // narrow_klass_base is the lower of the metaspace base and the cds base
  2814   // (if cds is enabled).  The narrow_klass_shift depends on the distance
  2815   // between the lower base and higher address.
  2816   address lower_base;
  2817   address higher_address;
  2818   if (UseSharedSpaces) {
  2819     higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()),
  2820                           (address)(metaspace_base + class_metaspace_size()));
  2821     lower_base = MIN2(metaspace_base, cds_base);
  2822   } else {
  2823     higher_address = metaspace_base + class_metaspace_size();
  2824     lower_base = metaspace_base;
  2826   Universe::set_narrow_klass_base(lower_base);
  2827   if ((uint64_t)(higher_address - lower_base) < (uint64_t)max_juint) {
  2828     Universe::set_narrow_klass_shift(0);
  2829   } else {
  2830     assert(!UseSharedSpaces, "Cannot shift with UseSharedSpaces");
  2831     Universe::set_narrow_klass_shift(LogKlassAlignmentInBytes);
  2835 // Return TRUE if the specified metaspace_base and cds_base are close enough
  2836 // to work with compressed klass pointers.
  2837 bool Metaspace::can_use_cds_with_metaspace_addr(char* metaspace_base, address cds_base) {
  2838   assert(cds_base != 0 && UseSharedSpaces, "Only use with CDS");
  2839   assert(UseCompressedKlassPointers, "Only use with CompressedKlassPtrs");
  2840   address lower_base = MIN2((address)metaspace_base, cds_base);
  2841   address higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()),
  2842                                 (address)(metaspace_base + class_metaspace_size()));
  2843   return ((uint64_t)(higher_address - lower_base) < (uint64_t)max_juint);
  2846 // Try to allocate the metaspace at the requested addr.
  2847 void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base) {
  2848   assert(using_class_space(), "called improperly");
  2849   assert(UseCompressedKlassPointers, "Only use with CompressedKlassPtrs");
  2850   assert(class_metaspace_size() < KlassEncodingMetaspaceMax,
  2851          "Metaspace size is too big");
  2853   ReservedSpace metaspace_rs = ReservedSpace(class_metaspace_size(),
  2854                                              os::vm_allocation_granularity(),
  2855                                              false, requested_addr, 0);
  2856   if (!metaspace_rs.is_reserved()) {
  2857     if (UseSharedSpaces) {
  2858       // Keep trying to allocate the metaspace, increasing the requested_addr
  2859       // by 1GB each time, until we reach an address that will no longer allow
  2860       // use of CDS with compressed klass pointers.
  2861       char *addr = requested_addr;
  2862       while (!metaspace_rs.is_reserved() && (addr + 1*G > addr) &&
  2863              can_use_cds_with_metaspace_addr(addr + 1*G, cds_base)) {
  2864         addr = addr + 1*G;
  2865         metaspace_rs = ReservedSpace(class_metaspace_size(),
  2866                                      os::vm_allocation_granularity(), false, addr, 0);
  2870     // If no successful allocation then try to allocate the space anywhere.  If
  2871     // that fails then OOM doom.  At this point we cannot try allocating the
  2872     // metaspace as if UseCompressedKlassPointers is off because too much
  2873     // initialization has happened that depends on UseCompressedKlassPointers.
  2874     // So, UseCompressedKlassPointers cannot be turned off at this point.
  2875     if (!metaspace_rs.is_reserved()) {
  2876       metaspace_rs = ReservedSpace(class_metaspace_size(),
  2877                                    os::vm_allocation_granularity(), false);
  2878       if (!metaspace_rs.is_reserved()) {
  2879         vm_exit_during_initialization(err_msg("Could not allocate metaspace: %d bytes",
  2880                                               class_metaspace_size()));
  2885   // If we got here then the metaspace got allocated.
  2886   MemTracker::record_virtual_memory_type((address)metaspace_rs.base(), mtClass);
  2888   // Verify that we can use shared spaces.  Otherwise, turn off CDS.
  2889   if (UseSharedSpaces && !can_use_cds_with_metaspace_addr(metaspace_rs.base(), cds_base)) {
  2890     FileMapInfo::stop_sharing_and_unmap(
  2891         "Could not allocate metaspace at a compatible address");
  2894   set_narrow_klass_base_and_shift((address)metaspace_rs.base(),
  2895                                   UseSharedSpaces ? (address)cds_base : 0);
  2897   initialize_class_space(metaspace_rs);
  2899   if (PrintCompressedOopsMode || (PrintMiscellaneous && Verbose)) {
  2900     gclog_or_tty->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: " SIZE_FORMAT,
  2901                             Universe::narrow_klass_base(), Universe::narrow_klass_shift());
  2902     gclog_or_tty->print_cr("Metaspace Size: " SIZE_FORMAT " Address: " PTR_FORMAT " Req Addr: " PTR_FORMAT,
  2903                            class_metaspace_size(), metaspace_rs.base(), requested_addr);
  2907 // For UseCompressedKlassPointers the class space is reserved above the top of
  2908 // the Java heap.  The argument passed in is at the base of the compressed space.
  2909 void Metaspace::initialize_class_space(ReservedSpace rs) {
  2910   // The reserved space size may be bigger because of alignment, esp with UseLargePages
  2911   assert(rs.size() >= ClassMetaspaceSize,
  2912          err_msg(SIZE_FORMAT " != " UINTX_FORMAT, rs.size(), ClassMetaspaceSize));
  2913   assert(using_class_space(), "Must be using class space");
  2914   _class_space_list = new VirtualSpaceList(rs);
  2917 #endif
  2919 void Metaspace::global_initialize() {
  2920   // Initialize the alignment for shared spaces.
  2921   int max_alignment = os::vm_page_size();
  2922   size_t cds_total = 0;
  2924   set_class_metaspace_size(align_size_up(ClassMetaspaceSize,
  2925                                          os::vm_allocation_granularity()));
  2927   MetaspaceShared::set_max_alignment(max_alignment);
  2929   if (DumpSharedSpaces) {
  2930     SharedReadOnlySize = align_size_up(SharedReadOnlySize, max_alignment);
  2931     SharedReadWriteSize = align_size_up(SharedReadWriteSize, max_alignment);
  2932     SharedMiscDataSize  = align_size_up(SharedMiscDataSize, max_alignment);
  2933     SharedMiscCodeSize  = align_size_up(SharedMiscCodeSize, max_alignment);
  2935     // Initialize with the sum of the shared space sizes.  The read-only
  2936     // and read write metaspace chunks will be allocated out of this and the
  2937     // remainder is the misc code and data chunks.
  2938     cds_total = FileMapInfo::shared_spaces_size();
  2939     _space_list = new VirtualSpaceList(cds_total/wordSize);
  2941 #ifdef _LP64
  2942     // Set the compressed klass pointer base so that decoding of these pointers works
  2943     // properly when creating the shared archive.
  2944     assert(UseCompressedOops && UseCompressedKlassPointers,
  2945       "UseCompressedOops and UseCompressedKlassPointers must be set");
  2946     Universe::set_narrow_klass_base((address)_space_list->current_virtual_space()->bottom());
  2947     if (TraceMetavirtualspaceAllocation && Verbose) {
  2948       gclog_or_tty->print_cr("Setting_narrow_klass_base to Address: " PTR_FORMAT,
  2949                              _space_list->current_virtual_space()->bottom());
  2952     // Set the shift to zero.
  2953     assert(class_metaspace_size() < (uint64_t)(max_juint) - cds_total,
  2954            "CDS region is too large");
  2955     Universe::set_narrow_klass_shift(0);
  2956 #endif
  2958   } else {
  2959     // If using shared space, open the file that contains the shared space
  2960     // and map in the memory before initializing the rest of metaspace (so
  2961     // the addresses don't conflict)
  2962     address cds_address = NULL;
  2963     if (UseSharedSpaces) {
  2964       FileMapInfo* mapinfo = new FileMapInfo();
  2965       memset(mapinfo, 0, sizeof(FileMapInfo));
  2967       // Open the shared archive file, read and validate the header. If
  2968       // initialization fails, shared spaces [UseSharedSpaces] are
  2969       // disabled and the file is closed.
  2970       // Map in spaces now also
  2971       if (mapinfo->initialize() && MetaspaceShared::map_shared_spaces(mapinfo)) {
  2972         FileMapInfo::set_current_info(mapinfo);
  2973       } else {
  2974         assert(!mapinfo->is_open() && !UseSharedSpaces,
  2975                "archive file not closed or shared spaces not disabled.");
  2977       cds_total = FileMapInfo::shared_spaces_size();
  2978       cds_address = (address)mapinfo->region_base(0);
  2981 #ifdef _LP64
  2982     // If UseCompressedKlassPointers is set then allocate the metaspace area
  2983     // above the heap and above the CDS area (if it exists).
  2984     if (using_class_space()) {
  2985       if (UseSharedSpaces) {
  2986         allocate_metaspace_compressed_klass_ptrs((char *)(cds_address + cds_total), cds_address);
  2987       } else {
  2988         allocate_metaspace_compressed_klass_ptrs((char *)CompressedKlassPointersBase, 0);
  2991 #endif
  2993     // Initialize these before initializing the VirtualSpaceList
  2994     _first_chunk_word_size = InitialBootClassLoaderMetaspaceSize / BytesPerWord;
  2995     _first_chunk_word_size = align_word_size_up(_first_chunk_word_size);
  2996     // Make the first class chunk bigger than a medium chunk so it's not put
  2997     // on the medium chunk list.   The next chunk will be small and progress
  2998     // from there.  This size calculated by -version.
  2999     _first_class_chunk_word_size = MIN2((size_t)MediumChunk*6,
  3000                                        (ClassMetaspaceSize/BytesPerWord)*2);
  3001     _first_class_chunk_word_size = align_word_size_up(_first_class_chunk_word_size);
  3002     // Arbitrarily set the initial virtual space to a multiple
  3003     // of the boot class loader size.
  3004     size_t word_size = VIRTUALSPACEMULTIPLIER * first_chunk_word_size();
  3005     // Initialize the list of virtual spaces.
  3006     _space_list = new VirtualSpaceList(word_size);
  3010 void Metaspace::initialize(Mutex* lock, MetaspaceType type) {
  3012   assert(space_list() != NULL,
  3013     "Metadata VirtualSpaceList has not been initialized");
  3015   _vsm = new SpaceManager(NonClassType, lock, space_list());
  3016   if (_vsm == NULL) {
  3017     return;
  3019   size_t word_size;
  3020   size_t class_word_size;
  3021   vsm()->get_initial_chunk_sizes(type, &word_size, &class_word_size);
  3023   if (using_class_space()) {
  3024     assert(class_space_list() != NULL,
  3025       "Class VirtualSpaceList has not been initialized");
  3027     // Allocate SpaceManager for classes.
  3028     _class_vsm = new SpaceManager(ClassType, lock, class_space_list());
  3029     if (_class_vsm == NULL) {
  3030       return;
  3034   MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
  3036   // Allocate chunk for metadata objects
  3037   Metachunk* new_chunk =
  3038      space_list()->get_initialization_chunk(word_size,
  3039                                             vsm()->medium_chunk_bunch());
  3040   assert(!DumpSharedSpaces || new_chunk != NULL, "should have enough space for both chunks");
  3041   if (new_chunk != NULL) {
  3042     // Add to this manager's list of chunks in use and current_chunk().
  3043     vsm()->add_chunk(new_chunk, true);
  3046   // Allocate chunk for class metadata objects
  3047   if (using_class_space()) {
  3048     Metachunk* class_chunk =
  3049        class_space_list()->get_initialization_chunk(class_word_size,
  3050                                                     class_vsm()->medium_chunk_bunch());
  3051     if (class_chunk != NULL) {
  3052       class_vsm()->add_chunk(class_chunk, true);
  3056   _alloc_record_head = NULL;
  3057   _alloc_record_tail = NULL;
  3060 size_t Metaspace::align_word_size_up(size_t word_size) {
  3061   size_t byte_size = word_size * wordSize;
  3062   return ReservedSpace::allocation_align_size_up(byte_size) / wordSize;
  3065 MetaWord* Metaspace::allocate(size_t word_size, MetadataType mdtype) {
  3066   // DumpSharedSpaces doesn't use class metadata area (yet)
  3067   // Also, don't use class_vsm() unless UseCompressedKlassPointers is true.
  3068   if (mdtype == ClassType && using_class_space()) {
  3069     return  class_vsm()->allocate(word_size);
  3070   } else {
  3071     return  vsm()->allocate(word_size);
  3075 MetaWord* Metaspace::expand_and_allocate(size_t word_size, MetadataType mdtype) {
  3076   MetaWord* result;
  3077   MetaspaceGC::set_expand_after_GC(true);
  3078   size_t before_inc = MetaspaceGC::capacity_until_GC();
  3079   size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size) * BytesPerWord;
  3080   MetaspaceGC::inc_capacity_until_GC(delta_bytes);
  3081   if (PrintGCDetails && Verbose) {
  3082     gclog_or_tty->print_cr("Increase capacity to GC from " SIZE_FORMAT
  3083       " to " SIZE_FORMAT, before_inc, MetaspaceGC::capacity_until_GC());
  3086   result = allocate(word_size, mdtype);
  3088   return result;
  3091 // Space allocated in the Metaspace.  This may
  3092 // be across several metadata virtual spaces.
  3093 char* Metaspace::bottom() const {
  3094   assert(DumpSharedSpaces, "only useful and valid for dumping shared spaces");
  3095   return (char*)vsm()->current_chunk()->bottom();
  3098 size_t Metaspace::used_words_slow(MetadataType mdtype) const {
  3099   if (mdtype == ClassType) {
  3100     return using_class_space() ? class_vsm()->sum_used_in_chunks_in_use() : 0;
  3101   } else {
  3102     return vsm()->sum_used_in_chunks_in_use();  // includes overhead!
  3106 size_t Metaspace::free_words(MetadataType mdtype) const {
  3107   if (mdtype == ClassType) {
  3108     return using_class_space() ? class_vsm()->sum_free_in_chunks_in_use() : 0;
  3109   } else {
  3110     return vsm()->sum_free_in_chunks_in_use();
  3114 // Space capacity in the Metaspace.  It includes
  3115 // space in the list of chunks from which allocations
  3116 // have been made. Don't include space in the global freelist and
  3117 // in the space available in the dictionary which
  3118 // is already counted in some chunk.
  3119 size_t Metaspace::capacity_words_slow(MetadataType mdtype) const {
  3120   if (mdtype == ClassType) {
  3121     return using_class_space() ? class_vsm()->sum_capacity_in_chunks_in_use() : 0;
  3122   } else {
  3123     return vsm()->sum_capacity_in_chunks_in_use();
  3127 size_t Metaspace::used_bytes_slow(MetadataType mdtype) const {
  3128   return used_words_slow(mdtype) * BytesPerWord;
  3131 size_t Metaspace::capacity_bytes_slow(MetadataType mdtype) const {
  3132   return capacity_words_slow(mdtype) * BytesPerWord;
  3135 void Metaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) {
  3136   if (SafepointSynchronize::is_at_safepoint()) {
  3137     assert(Thread::current()->is_VM_thread(), "should be the VM thread");
  3138     // Don't take Heap_lock
  3139     MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag);
  3140     if (word_size < TreeChunk<Metablock, FreeList>::min_size()) {
  3141       // Dark matter.  Too small for dictionary.
  3142 #ifdef ASSERT
  3143       Copy::fill_to_words((HeapWord*)ptr, word_size, 0xf5f5f5f5);
  3144 #endif
  3145       return;
  3147     if (is_class && using_class_space()) {
  3148       class_vsm()->deallocate(ptr, word_size);
  3149     } else {
  3150       vsm()->deallocate(ptr, word_size);
  3152   } else {
  3153     MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag);
  3155     if (word_size < TreeChunk<Metablock, FreeList>::min_size()) {
  3156       // Dark matter.  Too small for dictionary.
  3157 #ifdef ASSERT
  3158       Copy::fill_to_words((HeapWord*)ptr, word_size, 0xf5f5f5f5);
  3159 #endif
  3160       return;
  3162     if (is_class && using_class_space()) {
  3163       class_vsm()->deallocate(ptr, word_size);
  3164     } else {
  3165       vsm()->deallocate(ptr, word_size);
  3170 Metablock* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
  3171                               bool read_only, MetaspaceObj::Type type, TRAPS) {
  3172   if (HAS_PENDING_EXCEPTION) {
  3173     assert(false, "Should not allocate with exception pending");
  3174     return NULL;  // caller does a CHECK_NULL too
  3177   MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType;
  3179   // SSS: Should we align the allocations and make sure the sizes are aligned.
  3180   MetaWord* result = NULL;
  3182   assert(loader_data != NULL, "Should never pass around a NULL loader_data. "
  3183         "ClassLoaderData::the_null_class_loader_data() should have been used.");
  3184   // Allocate in metaspaces without taking out a lock, because it deadlocks
  3185   // with the SymbolTable_lock.  Dumping is single threaded for now.  We'll have
  3186   // to revisit this for application class data sharing.
  3187   if (DumpSharedSpaces) {
  3188     assert(type > MetaspaceObj::UnknownType && type < MetaspaceObj::_number_of_types, "sanity");
  3189     Metaspace* space = read_only ? loader_data->ro_metaspace() : loader_data->rw_metaspace();
  3190     result = space->allocate(word_size, NonClassType);
  3191     if (result == NULL) {
  3192       report_out_of_shared_space(read_only ? SharedReadOnly : SharedReadWrite);
  3193     } else {
  3194       space->record_allocation(result, type, space->vsm()->get_raw_word_size(word_size));
  3196     return Metablock::initialize(result, word_size);
  3199   result = loader_data->metaspace_non_null()->allocate(word_size, mdtype);
  3201   if (result == NULL) {
  3202     // Try to clean out some memory and retry.
  3203     result =
  3204       Universe::heap()->collector_policy()->satisfy_failed_metadata_allocation(
  3205         loader_data, word_size, mdtype);
  3207     // If result is still null, we are out of memory.
  3208     if (result == NULL) {
  3209       if (Verbose && TraceMetadataChunkAllocation) {
  3210         gclog_or_tty->print_cr("Metaspace allocation failed for size "
  3211           SIZE_FORMAT, word_size);
  3212         if (loader_data->metaspace_or_null() != NULL) loader_data->dump(gclog_or_tty);
  3213         MetaspaceAux::dump(gclog_or_tty);
  3215       // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
  3216       const char* space_string = (mdtype == ClassType) ? "Class Metadata space" :
  3217                                                          "Metadata space";
  3218       report_java_out_of_memory(space_string);
  3220       if (JvmtiExport::should_post_resource_exhausted()) {
  3221         JvmtiExport::post_resource_exhausted(
  3222             JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR,
  3223             space_string);
  3225       if (mdtype == ClassType) {
  3226         THROW_OOP_0(Universe::out_of_memory_error_class_metaspace());
  3227       } else {
  3228         THROW_OOP_0(Universe::out_of_memory_error_metaspace());
  3232   return Metablock::initialize(result, word_size);
  3235 void Metaspace::record_allocation(void* ptr, MetaspaceObj::Type type, size_t word_size) {
  3236   assert(DumpSharedSpaces, "sanity");
  3238   AllocRecord *rec = new AllocRecord((address)ptr, type, (int)word_size * HeapWordSize);
  3239   if (_alloc_record_head == NULL) {
  3240     _alloc_record_head = _alloc_record_tail = rec;
  3241   } else {
  3242     _alloc_record_tail->_next = rec;
  3243     _alloc_record_tail = rec;
  3247 void Metaspace::iterate(Metaspace::AllocRecordClosure *closure) {
  3248   assert(DumpSharedSpaces, "unimplemented for !DumpSharedSpaces");
  3250   address last_addr = (address)bottom();
  3252   for (AllocRecord *rec = _alloc_record_head; rec; rec = rec->_next) {
  3253     address ptr = rec->_ptr;
  3254     if (last_addr < ptr) {
  3255       closure->doit(last_addr, MetaspaceObj::UnknownType, ptr - last_addr);
  3257     closure->doit(ptr, rec->_type, rec->_byte_size);
  3258     last_addr = ptr + rec->_byte_size;
  3261   address top = ((address)bottom()) + used_bytes_slow(Metaspace::NonClassType);
  3262   if (last_addr < top) {
  3263     closure->doit(last_addr, MetaspaceObj::UnknownType, top - last_addr);
  3267 void Metaspace::purge() {
  3268   MutexLockerEx cl(SpaceManager::expand_lock(),
  3269                    Mutex::_no_safepoint_check_flag);
  3270   space_list()->purge();
  3271   if (using_class_space()) {
  3272     class_space_list()->purge();
  3276 void Metaspace::print_on(outputStream* out) const {
  3277   // Print both class virtual space counts and metaspace.
  3278   if (Verbose) {
  3279     vsm()->print_on(out);
  3280     if (using_class_space()) {
  3281       class_vsm()->print_on(out);
  3286 bool Metaspace::contains(const void * ptr) {
  3287   if (MetaspaceShared::is_in_shared_space(ptr)) {
  3288     return true;
  3290   // This is checked while unlocked.  As long as the virtualspaces are added
  3291   // at the end, the pointer will be in one of them.  The virtual spaces
  3292   // aren't deleted presently.  When they are, some sort of locking might
  3293   // be needed.  Note, locking this can cause inversion problems with the
  3294   // caller in MetaspaceObj::is_metadata() function.
  3295   return space_list()->contains(ptr) ||
  3296          (using_class_space() && class_space_list()->contains(ptr));
  3299 void Metaspace::verify() {
  3300   vsm()->verify();
  3301   if (using_class_space()) {
  3302     class_vsm()->verify();
  3306 void Metaspace::dump(outputStream* const out) const {
  3307   out->print_cr("\nVirtual space manager: " INTPTR_FORMAT, vsm());
  3308   vsm()->dump(out);
  3309   if (using_class_space()) {
  3310     out->print_cr("\nClass space manager: " INTPTR_FORMAT, class_vsm());
  3311     class_vsm()->dump(out);

mercurial