Wed, 20 Mar 2013 08:04:54 -0400
8008217: CDS: Class data sharing limits the malloc heap on Solaris
Summary: In 64bit VM move CDS archive address to 32G on all platforms using new flag SharedBaseAddress. In 32bit VM set CDS archive address to 3Gb on Linux and let other OSs pick the address.
Reviewed-by: kvn, dcubed, zgu, hseigel
1 /*
2 * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24 #include "precompiled.hpp"
25 #include "gc_interface/collectedHeap.hpp"
26 #include "memory/binaryTreeDictionary.hpp"
27 #include "memory/freeList.hpp"
28 #include "memory/collectorPolicy.hpp"
29 #include "memory/filemap.hpp"
30 #include "memory/freeList.hpp"
31 #include "memory/metablock.hpp"
32 #include "memory/metachunk.hpp"
33 #include "memory/metaspace.hpp"
34 #include "memory/metaspaceShared.hpp"
35 #include "memory/resourceArea.hpp"
36 #include "memory/universe.hpp"
37 #include "runtime/globals.hpp"
38 #include "runtime/mutex.hpp"
39 #include "runtime/orderAccess.hpp"
40 #include "services/memTracker.hpp"
41 #include "utilities/copy.hpp"
42 #include "utilities/debug.hpp"
44 typedef BinaryTreeDictionary<Metablock, FreeList> BlockTreeDictionary;
45 typedef BinaryTreeDictionary<Metachunk, FreeList> ChunkTreeDictionary;
46 // Define this macro to enable slow integrity checking of
47 // the free chunk lists
48 const bool metaspace_slow_verify = false;
51 // Parameters for stress mode testing
52 const uint metadata_deallocate_a_lot_block = 10;
53 const uint metadata_deallocate_a_lock_chunk = 3;
54 size_t const allocation_from_dictionary_limit = 64 * K;
56 MetaWord* last_allocated = 0;
58 // Used in declarations in SpaceManager and ChunkManager
59 enum ChunkIndex {
60 ZeroIndex = 0,
61 SpecializedIndex = ZeroIndex,
62 SmallIndex = SpecializedIndex + 1,
63 MediumIndex = SmallIndex + 1,
64 HumongousIndex = MediumIndex + 1,
65 NumberOfFreeLists = 3,
66 NumberOfInUseLists = 4
67 };
69 enum ChunkSizes { // in words.
70 ClassSpecializedChunk = 128,
71 SpecializedChunk = 128,
72 ClassSmallChunk = 256,
73 SmallChunk = 512,
74 ClassMediumChunk = 1 * K,
75 MediumChunk = 8 * K,
76 HumongousChunkGranularity = 8
77 };
79 static ChunkIndex next_chunk_index(ChunkIndex i) {
80 assert(i < NumberOfInUseLists, "Out of bound");
81 return (ChunkIndex) (i+1);
82 }
84 // Originally _capacity_until_GC was set to MetaspaceSize here but
85 // the default MetaspaceSize before argument processing was being
86 // used which was not the desired value. See the code
87 // in should_expand() to see how the initialization is handled
88 // now.
89 size_t MetaspaceGC::_capacity_until_GC = 0;
90 bool MetaspaceGC::_expand_after_GC = false;
91 uint MetaspaceGC::_shrink_factor = 0;
92 bool MetaspaceGC::_should_concurrent_collect = false;
94 // Blocks of space for metadata are allocated out of Metachunks.
95 //
96 // Metachunk are allocated out of MetadataVirtualspaces and once
97 // allocated there is no explicit link between a Metachunk and
98 // the MetadataVirtualspaces from which it was allocated.
99 //
100 // Each SpaceManager maintains a
101 // list of the chunks it is using and the current chunk. The current
102 // chunk is the chunk from which allocations are done. Space freed in
103 // a chunk is placed on the free list of blocks (BlockFreelist) and
104 // reused from there.
106 // Pointer to list of Metachunks.
107 class ChunkList VALUE_OBJ_CLASS_SPEC {
108 // List of free chunks
109 Metachunk* _head;
111 public:
112 // Constructor
113 ChunkList() : _head(NULL) {}
115 // Accessors
116 Metachunk* head() { return _head; }
117 void set_head(Metachunk* v) { _head = v; }
119 // Link at head of the list
120 void add_at_head(Metachunk* head, Metachunk* tail);
121 void add_at_head(Metachunk* head);
123 size_t sum_list_size();
124 size_t sum_list_count();
125 size_t sum_list_capacity();
126 };
128 // Manages the global free lists of chunks.
129 // Has three lists of free chunks, and a total size and
130 // count that includes all three
132 class ChunkManager VALUE_OBJ_CLASS_SPEC {
134 // Free list of chunks of different sizes.
135 // SmallChunk
136 // MediumChunk
137 // HumongousChunk
138 ChunkList _free_chunks[NumberOfFreeLists];
141 // HumongousChunk
142 ChunkTreeDictionary _humongous_dictionary;
144 // ChunkManager in all lists of this type
145 size_t _free_chunks_total;
146 size_t _free_chunks_count;
148 void dec_free_chunks_total(size_t v) {
149 assert(_free_chunks_count > 0 &&
150 _free_chunks_total > 0,
151 "About to go negative");
152 Atomic::add_ptr(-1, &_free_chunks_count);
153 jlong minus_v = (jlong) - (jlong) v;
154 Atomic::add_ptr(minus_v, &_free_chunks_total);
155 }
157 // Debug support
159 size_t sum_free_chunks();
160 size_t sum_free_chunks_count();
162 void locked_verify_free_chunks_total();
163 void slow_locked_verify_free_chunks_total() {
164 if (metaspace_slow_verify) {
165 locked_verify_free_chunks_total();
166 }
167 }
168 void locked_verify_free_chunks_count();
169 void slow_locked_verify_free_chunks_count() {
170 if (metaspace_slow_verify) {
171 locked_verify_free_chunks_count();
172 }
173 }
174 void verify_free_chunks_count();
176 public:
178 ChunkManager() : _free_chunks_total(0), _free_chunks_count(0) {}
180 // add or delete (return) a chunk to the global freelist.
181 Metachunk* chunk_freelist_allocate(size_t word_size);
182 void chunk_freelist_deallocate(Metachunk* chunk);
184 // Map a size to a list index assuming that there are lists
185 // for special, small, medium, and humongous chunks.
186 static ChunkIndex list_index(size_t size);
188 // Total of the space in the free chunks list
189 size_t free_chunks_total();
190 size_t free_chunks_total_in_bytes();
192 // Number of chunks in the free chunks list
193 size_t free_chunks_count();
195 void inc_free_chunks_total(size_t v, size_t count = 1) {
196 Atomic::add_ptr(count, &_free_chunks_count);
197 Atomic::add_ptr(v, &_free_chunks_total);
198 }
199 ChunkTreeDictionary* humongous_dictionary() {
200 return &_humongous_dictionary;
201 }
203 ChunkList* free_chunks(ChunkIndex index);
205 // Returns the list for the given chunk word size.
206 ChunkList* find_free_chunks_list(size_t word_size);
208 // Add and remove from a list by size. Selects
209 // list based on size of chunk.
210 void free_chunks_put(Metachunk* chuck);
211 Metachunk* free_chunks_get(size_t chunk_word_size);
213 // Debug support
214 void verify();
215 void slow_verify() {
216 if (metaspace_slow_verify) {
217 verify();
218 }
219 }
220 void locked_verify();
221 void slow_locked_verify() {
222 if (metaspace_slow_verify) {
223 locked_verify();
224 }
225 }
226 void verify_free_chunks_total();
228 void locked_print_free_chunks(outputStream* st);
229 void locked_print_sum_free_chunks(outputStream* st);
231 void print_on(outputStream* st);
232 };
235 // Used to manage the free list of Metablocks (a block corresponds
236 // to the allocation of a quantum of metadata).
237 class BlockFreelist VALUE_OBJ_CLASS_SPEC {
238 BlockTreeDictionary* _dictionary;
239 static Metablock* initialize_free_chunk(MetaWord* p, size_t word_size);
241 // Accessors
242 BlockTreeDictionary* dictionary() const { return _dictionary; }
244 public:
245 BlockFreelist();
246 ~BlockFreelist();
248 // Get and return a block to the free list
249 MetaWord* get_block(size_t word_size);
250 void return_block(MetaWord* p, size_t word_size);
252 size_t total_size() {
253 if (dictionary() == NULL) {
254 return 0;
255 } else {
256 return dictionary()->total_size();
257 }
258 }
260 void print_on(outputStream* st) const;
261 };
263 class VirtualSpaceNode : public CHeapObj<mtClass> {
264 friend class VirtualSpaceList;
266 // Link to next VirtualSpaceNode
267 VirtualSpaceNode* _next;
269 // total in the VirtualSpace
270 MemRegion _reserved;
271 ReservedSpace _rs;
272 VirtualSpace _virtual_space;
273 MetaWord* _top;
275 // Convenience functions for logical bottom and end
276 MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); }
277 MetaWord* end() const { return (MetaWord*) _virtual_space.high(); }
279 // Convenience functions to access the _virtual_space
280 char* low() const { return virtual_space()->low(); }
281 char* high() const { return virtual_space()->high(); }
283 public:
285 VirtualSpaceNode(size_t byte_size);
286 VirtualSpaceNode(ReservedSpace rs) : _top(NULL), _next(NULL), _rs(rs) {}
287 ~VirtualSpaceNode();
289 // address of next available space in _virtual_space;
290 // Accessors
291 VirtualSpaceNode* next() { return _next; }
292 void set_next(VirtualSpaceNode* v) { _next = v; }
294 void set_reserved(MemRegion const v) { _reserved = v; }
295 void set_top(MetaWord* v) { _top = v; }
297 // Accessors
298 MemRegion* reserved() { return &_reserved; }
299 VirtualSpace* virtual_space() const { return (VirtualSpace*) &_virtual_space; }
301 // Returns true if "word_size" is available in the virtual space
302 bool is_available(size_t word_size) { return _top + word_size <= end(); }
304 MetaWord* top() const { return _top; }
305 void inc_top(size_t word_size) { _top += word_size; }
307 // used and capacity in this single entry in the list
308 size_t used_words_in_vs() const;
309 size_t capacity_words_in_vs() const;
311 bool initialize();
313 // get space from the virtual space
314 Metachunk* take_from_committed(size_t chunk_word_size);
316 // Allocate a chunk from the virtual space and return it.
317 Metachunk* get_chunk_vs(size_t chunk_word_size);
318 Metachunk* get_chunk_vs_with_expand(size_t chunk_word_size);
320 // Expands/shrinks the committed space in a virtual space. Delegates
321 // to Virtualspace
322 bool expand_by(size_t words, bool pre_touch = false);
323 bool shrink_by(size_t words);
325 #ifdef ASSERT
326 // Debug support
327 static void verify_virtual_space_total();
328 static void verify_virtual_space_count();
329 void mangle();
330 #endif
332 void print_on(outputStream* st) const;
333 };
335 // byte_size is the size of the associated virtualspace.
336 VirtualSpaceNode::VirtualSpaceNode(size_t byte_size) : _top(NULL), _next(NULL), _rs(0) {
337 // align up to vm allocation granularity
338 byte_size = align_size_up(byte_size, os::vm_allocation_granularity());
340 // This allocates memory with mmap. For DumpSharedspaces, try to reserve
341 // configurable address, generally at the top of the Java heap so other
342 // memory addresses don't conflict.
343 if (DumpSharedSpaces) {
344 char* shared_base = (char*)SharedBaseAddress;
345 _rs = ReservedSpace(byte_size, 0, false, shared_base, 0);
346 if (_rs.is_reserved()) {
347 assert(shared_base == 0 || _rs.base() == shared_base, "should match");
348 } else {
349 // Get a mmap region anywhere if the SharedBaseAddress fails.
350 _rs = ReservedSpace(byte_size);
351 }
352 MetaspaceShared::set_shared_rs(&_rs);
353 } else {
354 _rs = ReservedSpace(byte_size);
355 }
357 MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass);
358 }
360 // List of VirtualSpaces for metadata allocation.
361 // It has a _next link for singly linked list and a MemRegion
362 // for total space in the VirtualSpace.
363 class VirtualSpaceList : public CHeapObj<mtClass> {
364 friend class VirtualSpaceNode;
366 enum VirtualSpaceSizes {
367 VirtualSpaceSize = 256 * K
368 };
370 // Global list of virtual spaces
371 // Head of the list
372 VirtualSpaceNode* _virtual_space_list;
373 // virtual space currently being used for allocations
374 VirtualSpaceNode* _current_virtual_space;
375 // Free chunk list for all other metadata
376 ChunkManager _chunk_manager;
378 // Can this virtual list allocate >1 spaces? Also, used to determine
379 // whether to allocate unlimited small chunks in this virtual space
380 bool _is_class;
381 bool can_grow() const { return !is_class() || !UseCompressedKlassPointers; }
383 // Sum of space in all virtual spaces and number of virtual spaces
384 size_t _virtual_space_total;
385 size_t _virtual_space_count;
387 ~VirtualSpaceList();
389 VirtualSpaceNode* virtual_space_list() const { return _virtual_space_list; }
391 void set_virtual_space_list(VirtualSpaceNode* v) {
392 _virtual_space_list = v;
393 }
394 void set_current_virtual_space(VirtualSpaceNode* v) {
395 _current_virtual_space = v;
396 }
398 void link_vs(VirtualSpaceNode* new_entry, size_t vs_word_size);
400 // Get another virtual space and add it to the list. This
401 // is typically prompted by a failed attempt to allocate a chunk
402 // and is typically followed by the allocation of a chunk.
403 bool grow_vs(size_t vs_word_size);
405 public:
406 VirtualSpaceList(size_t word_size);
407 VirtualSpaceList(ReservedSpace rs);
409 Metachunk* get_new_chunk(size_t word_size,
410 size_t grow_chunks_by_words,
411 size_t medium_chunk_bunch);
413 // Get the first chunk for a Metaspace. Used for
414 // special cases such as the boot class loader, reflection
415 // class loader and anonymous class loader.
416 Metachunk* get_initialization_chunk(size_t word_size, size_t chunk_bunch);
418 VirtualSpaceNode* current_virtual_space() {
419 return _current_virtual_space;
420 }
422 ChunkManager* chunk_manager() { return &_chunk_manager; }
423 bool is_class() const { return _is_class; }
425 // Allocate the first virtualspace.
426 void initialize(size_t word_size);
428 size_t virtual_space_total() { return _virtual_space_total; }
429 void inc_virtual_space_total(size_t v) {
430 Atomic::add_ptr(v, &_virtual_space_total);
431 }
433 size_t virtual_space_count() { return _virtual_space_count; }
434 void inc_virtual_space_count() {
435 Atomic::inc_ptr(&_virtual_space_count);
436 }
438 // Used and capacity in the entire list of virtual spaces.
439 // These are global values shared by all Metaspaces
440 size_t capacity_words_sum();
441 size_t capacity_bytes_sum() { return capacity_words_sum() * BytesPerWord; }
442 size_t used_words_sum();
443 size_t used_bytes_sum() { return used_words_sum() * BytesPerWord; }
445 bool contains(const void *ptr);
447 void print_on(outputStream* st) const;
449 class VirtualSpaceListIterator : public StackObj {
450 VirtualSpaceNode* _virtual_spaces;
451 public:
452 VirtualSpaceListIterator(VirtualSpaceNode* virtual_spaces) :
453 _virtual_spaces(virtual_spaces) {}
455 bool repeat() {
456 return _virtual_spaces != NULL;
457 }
459 VirtualSpaceNode* get_next() {
460 VirtualSpaceNode* result = _virtual_spaces;
461 if (_virtual_spaces != NULL) {
462 _virtual_spaces = _virtual_spaces->next();
463 }
464 return result;
465 }
466 };
467 };
469 class Metadebug : AllStatic {
470 // Debugging support for Metaspaces
471 static int _deallocate_block_a_lot_count;
472 static int _deallocate_chunk_a_lot_count;
473 static int _allocation_fail_alot_count;
475 public:
476 static int deallocate_block_a_lot_count() {
477 return _deallocate_block_a_lot_count;
478 }
479 static void set_deallocate_block_a_lot_count(int v) {
480 _deallocate_block_a_lot_count = v;
481 }
482 static void inc_deallocate_block_a_lot_count() {
483 _deallocate_block_a_lot_count++;
484 }
485 static int deallocate_chunk_a_lot_count() {
486 return _deallocate_chunk_a_lot_count;
487 }
488 static void reset_deallocate_chunk_a_lot_count() {
489 _deallocate_chunk_a_lot_count = 1;
490 }
491 static void inc_deallocate_chunk_a_lot_count() {
492 _deallocate_chunk_a_lot_count++;
493 }
495 static void init_allocation_fail_alot_count();
496 #ifdef ASSERT
497 static bool test_metadata_failure();
498 #endif
500 static void deallocate_chunk_a_lot(SpaceManager* sm,
501 size_t chunk_word_size);
502 static void deallocate_block_a_lot(SpaceManager* sm,
503 size_t chunk_word_size);
505 };
507 int Metadebug::_deallocate_block_a_lot_count = 0;
508 int Metadebug::_deallocate_chunk_a_lot_count = 0;
509 int Metadebug::_allocation_fail_alot_count = 0;
511 // SpaceManager - used by Metaspace to handle allocations
512 class SpaceManager : public CHeapObj<mtClass> {
513 friend class Metaspace;
514 friend class Metadebug;
516 private:
518 // protects allocations and contains.
519 Mutex* const _lock;
521 // Chunk related size
522 size_t _medium_chunk_bunch;
524 // List of chunks in use by this SpaceManager. Allocations
525 // are done from the current chunk. The list is used for deallocating
526 // chunks when the SpaceManager is freed.
527 Metachunk* _chunks_in_use[NumberOfInUseLists];
528 Metachunk* _current_chunk;
530 // Virtual space where allocation comes from.
531 VirtualSpaceList* _vs_list;
533 // Number of small chunks to allocate to a manager
534 // If class space manager, small chunks are unlimited
535 static uint const _small_chunk_limit;
536 bool has_small_chunk_limit() { return !vs_list()->is_class(); }
538 // Sum of all space in allocated chunks
539 size_t _allocation_total;
541 // Free lists of blocks are per SpaceManager since they
542 // are assumed to be in chunks in use by the SpaceManager
543 // and all chunks in use by a SpaceManager are freed when
544 // the class loader using the SpaceManager is collected.
545 BlockFreelist _block_freelists;
547 // protects virtualspace and chunk expansions
548 static const char* _expand_lock_name;
549 static const int _expand_lock_rank;
550 static Mutex* const _expand_lock;
552 private:
553 // Accessors
554 Metachunk* chunks_in_use(ChunkIndex index) const { return _chunks_in_use[index]; }
555 void set_chunks_in_use(ChunkIndex index, Metachunk* v) { _chunks_in_use[index] = v; }
557 BlockFreelist* block_freelists() const {
558 return (BlockFreelist*) &_block_freelists;
559 }
561 VirtualSpaceList* vs_list() const { return _vs_list; }
563 Metachunk* current_chunk() const { return _current_chunk; }
564 void set_current_chunk(Metachunk* v) {
565 _current_chunk = v;
566 }
568 Metachunk* find_current_chunk(size_t word_size);
570 // Add chunk to the list of chunks in use
571 void add_chunk(Metachunk* v, bool make_current);
573 Mutex* lock() const { return _lock; }
575 const char* chunk_size_name(ChunkIndex index) const;
577 protected:
578 void initialize();
580 public:
581 SpaceManager(Mutex* lock,
582 VirtualSpaceList* vs_list);
583 ~SpaceManager();
585 enum ChunkMultiples {
586 MediumChunkMultiple = 4
587 };
589 // Accessors
590 size_t specialized_chunk_size() { return SpecializedChunk; }
591 size_t small_chunk_size() { return (size_t) vs_list()->is_class() ? ClassSmallChunk : SmallChunk; }
592 size_t medium_chunk_size() { return (size_t) vs_list()->is_class() ? ClassMediumChunk : MediumChunk; }
593 size_t medium_chunk_bunch() { return medium_chunk_size() * MediumChunkMultiple; }
595 size_t allocation_total() const { return _allocation_total; }
596 void inc_allocation_total(size_t v) { Atomic::add_ptr(v, &_allocation_total); }
597 bool is_humongous(size_t word_size) { return word_size > medium_chunk_size(); }
599 static Mutex* expand_lock() { return _expand_lock; }
601 // Set the sizes for the initial chunks.
602 void get_initial_chunk_sizes(Metaspace::MetaspaceType type,
603 size_t* chunk_word_size,
604 size_t* class_chunk_word_size);
606 size_t sum_capacity_in_chunks_in_use() const;
607 size_t sum_used_in_chunks_in_use() const;
608 size_t sum_free_in_chunks_in_use() const;
609 size_t sum_waste_in_chunks_in_use() const;
610 size_t sum_waste_in_chunks_in_use(ChunkIndex index ) const;
612 size_t sum_count_in_chunks_in_use();
613 size_t sum_count_in_chunks_in_use(ChunkIndex i);
615 Metachunk* get_new_chunk(size_t word_size, size_t grow_chunks_by_words);
617 // Block allocation and deallocation.
618 // Allocates a block from the current chunk
619 MetaWord* allocate(size_t word_size);
621 // Helper for allocations
622 MetaWord* allocate_work(size_t word_size);
624 // Returns a block to the per manager freelist
625 void deallocate(MetaWord* p, size_t word_size);
627 // Based on the allocation size and a minimum chunk size,
628 // returned chunk size (for expanding space for chunk allocation).
629 size_t calc_chunk_size(size_t allocation_word_size);
631 // Called when an allocation from the current chunk fails.
632 // Gets a new chunk (may require getting a new virtual space),
633 // and allocates from that chunk.
634 MetaWord* grow_and_allocate(size_t word_size);
636 // debugging support.
638 void dump(outputStream* const out) const;
639 void print_on(outputStream* st) const;
640 void locked_print_chunks_in_use_on(outputStream* st) const;
642 void verify();
643 void verify_chunk_size(Metachunk* chunk);
644 NOT_PRODUCT(void mangle_freed_chunks();)
645 #ifdef ASSERT
646 void verify_allocation_total();
647 #endif
648 };
650 uint const SpaceManager::_small_chunk_limit = 4;
652 const char* SpaceManager::_expand_lock_name =
653 "SpaceManager chunk allocation lock";
654 const int SpaceManager::_expand_lock_rank = Monitor::leaf - 1;
655 Mutex* const SpaceManager::_expand_lock =
656 new Mutex(SpaceManager::_expand_lock_rank,
657 SpaceManager::_expand_lock_name,
658 Mutex::_allow_vm_block_flag);
660 // BlockFreelist methods
662 BlockFreelist::BlockFreelist() : _dictionary(NULL) {}
664 BlockFreelist::~BlockFreelist() {
665 if (_dictionary != NULL) {
666 if (Verbose && TraceMetadataChunkAllocation) {
667 _dictionary->print_free_lists(gclog_or_tty);
668 }
669 delete _dictionary;
670 }
671 }
673 Metablock* BlockFreelist::initialize_free_chunk(MetaWord* p, size_t word_size) {
674 Metablock* block = (Metablock*) p;
675 block->set_word_size(word_size);
676 block->set_prev(NULL);
677 block->set_next(NULL);
679 return block;
680 }
682 void BlockFreelist::return_block(MetaWord* p, size_t word_size) {
683 Metablock* free_chunk = initialize_free_chunk(p, word_size);
684 if (dictionary() == NULL) {
685 _dictionary = new BlockTreeDictionary();
686 }
687 dictionary()->return_chunk(free_chunk);
688 }
690 MetaWord* BlockFreelist::get_block(size_t word_size) {
691 if (dictionary() == NULL) {
692 return NULL;
693 }
695 if (word_size < TreeChunk<Metablock, FreeList>::min_size()) {
696 // Dark matter. Too small for dictionary.
697 return NULL;
698 }
700 Metablock* free_block =
701 dictionary()->get_chunk(word_size, FreeBlockDictionary<Metablock>::exactly);
702 if (free_block == NULL) {
703 return NULL;
704 }
706 return (MetaWord*) free_block;
707 }
709 void BlockFreelist::print_on(outputStream* st) const {
710 if (dictionary() == NULL) {
711 return;
712 }
713 dictionary()->print_free_lists(st);
714 }
716 // VirtualSpaceNode methods
718 VirtualSpaceNode::~VirtualSpaceNode() {
719 _rs.release();
720 }
722 size_t VirtualSpaceNode::used_words_in_vs() const {
723 return pointer_delta(top(), bottom(), sizeof(MetaWord));
724 }
726 // Space committed in the VirtualSpace
727 size_t VirtualSpaceNode::capacity_words_in_vs() const {
728 return pointer_delta(end(), bottom(), sizeof(MetaWord));
729 }
732 // Allocates the chunk from the virtual space only.
733 // This interface is also used internally for debugging. Not all
734 // chunks removed here are necessarily used for allocation.
735 Metachunk* VirtualSpaceNode::take_from_committed(size_t chunk_word_size) {
736 // Bottom of the new chunk
737 MetaWord* chunk_limit = top();
738 assert(chunk_limit != NULL, "Not safe to call this method");
740 if (!is_available(chunk_word_size)) {
741 if (TraceMetadataChunkAllocation) {
742 tty->print("VirtualSpaceNode::take_from_committed() not available %d words ", chunk_word_size);
743 // Dump some information about the virtual space that is nearly full
744 print_on(tty);
745 }
746 return NULL;
747 }
749 // Take the space (bump top on the current virtual space).
750 inc_top(chunk_word_size);
752 // Point the chunk at the space
753 Metachunk* result = Metachunk::initialize(chunk_limit, chunk_word_size);
754 return result;
755 }
758 // Expand the virtual space (commit more of the reserved space)
759 bool VirtualSpaceNode::expand_by(size_t words, bool pre_touch) {
760 size_t bytes = words * BytesPerWord;
761 bool result = virtual_space()->expand_by(bytes, pre_touch);
762 if (TraceMetavirtualspaceAllocation && !result) {
763 gclog_or_tty->print_cr("VirtualSpaceNode::expand_by() failed "
764 "for byte size " SIZE_FORMAT, bytes);
765 virtual_space()->print();
766 }
767 return result;
768 }
770 // Shrink the virtual space (commit more of the reserved space)
771 bool VirtualSpaceNode::shrink_by(size_t words) {
772 size_t bytes = words * BytesPerWord;
773 virtual_space()->shrink_by(bytes);
774 return true;
775 }
777 // Add another chunk to the chunk list.
779 Metachunk* VirtualSpaceNode::get_chunk_vs(size_t chunk_word_size) {
780 assert_lock_strong(SpaceManager::expand_lock());
781 Metachunk* result = NULL;
783 return take_from_committed(chunk_word_size);
784 }
786 Metachunk* VirtualSpaceNode::get_chunk_vs_with_expand(size_t chunk_word_size) {
787 assert_lock_strong(SpaceManager::expand_lock());
789 Metachunk* new_chunk = get_chunk_vs(chunk_word_size);
791 if (new_chunk == NULL) {
792 // Only a small part of the virtualspace is committed when first
793 // allocated so committing more here can be expected.
794 size_t page_size_words = os::vm_page_size() / BytesPerWord;
795 size_t aligned_expand_vs_by_words = align_size_up(chunk_word_size,
796 page_size_words);
797 expand_by(aligned_expand_vs_by_words, false);
798 new_chunk = get_chunk_vs(chunk_word_size);
799 }
800 return new_chunk;
801 }
803 bool VirtualSpaceNode::initialize() {
805 if (!_rs.is_reserved()) {
806 return false;
807 }
809 // An allocation out of this Virtualspace that is larger
810 // than an initial commit size can waste that initial committed
811 // space.
812 size_t committed_byte_size = 0;
813 bool result = virtual_space()->initialize(_rs, committed_byte_size);
814 if (result) {
815 set_top((MetaWord*)virtual_space()->low());
816 set_reserved(MemRegion((HeapWord*)_rs.base(),
817 (HeapWord*)(_rs.base() + _rs.size())));
819 assert(reserved()->start() == (HeapWord*) _rs.base(),
820 err_msg("Reserved start was not set properly " PTR_FORMAT
821 " != " PTR_FORMAT, reserved()->start(), _rs.base()));
822 assert(reserved()->word_size() == _rs.size() / BytesPerWord,
823 err_msg("Reserved size was not set properly " SIZE_FORMAT
824 " != " SIZE_FORMAT, reserved()->word_size(),
825 _rs.size() / BytesPerWord));
826 }
828 return result;
829 }
831 void VirtualSpaceNode::print_on(outputStream* st) const {
832 size_t used = used_words_in_vs();
833 size_t capacity = capacity_words_in_vs();
834 VirtualSpace* vs = virtual_space();
835 st->print_cr(" space @ " PTR_FORMAT " " SIZE_FORMAT "K, %3d%% used "
836 "[" PTR_FORMAT ", " PTR_FORMAT ", "
837 PTR_FORMAT ", " PTR_FORMAT ")",
838 vs, capacity / K,
839 capacity == 0 ? 0 : used * 100 / capacity,
840 bottom(), top(), end(),
841 vs->high_boundary());
842 }
844 #ifdef ASSERT
845 void VirtualSpaceNode::mangle() {
846 size_t word_size = capacity_words_in_vs();
847 Copy::fill_to_words((HeapWord*) low(), word_size, 0xf1f1f1f1);
848 }
849 #endif // ASSERT
851 // VirtualSpaceList methods
852 // Space allocated from the VirtualSpace
854 VirtualSpaceList::~VirtualSpaceList() {
855 VirtualSpaceListIterator iter(virtual_space_list());
856 while (iter.repeat()) {
857 VirtualSpaceNode* vsl = iter.get_next();
858 delete vsl;
859 }
860 }
862 size_t VirtualSpaceList::used_words_sum() {
863 size_t allocated_by_vs = 0;
864 VirtualSpaceListIterator iter(virtual_space_list());
865 while (iter.repeat()) {
866 VirtualSpaceNode* vsl = iter.get_next();
867 // Sum used region [bottom, top) in each virtualspace
868 allocated_by_vs += vsl->used_words_in_vs();
869 }
870 assert(allocated_by_vs >= chunk_manager()->free_chunks_total(),
871 err_msg("Total in free chunks " SIZE_FORMAT
872 " greater than total from virtual_spaces " SIZE_FORMAT,
873 allocated_by_vs, chunk_manager()->free_chunks_total()));
874 size_t used =
875 allocated_by_vs - chunk_manager()->free_chunks_total();
876 return used;
877 }
879 // Space available in all MetadataVirtualspaces allocated
880 // for metadata. This is the upper limit on the capacity
881 // of chunks allocated out of all the MetadataVirtualspaces.
882 size_t VirtualSpaceList::capacity_words_sum() {
883 size_t capacity = 0;
884 VirtualSpaceListIterator iter(virtual_space_list());
885 while (iter.repeat()) {
886 VirtualSpaceNode* vsl = iter.get_next();
887 capacity += vsl->capacity_words_in_vs();
888 }
889 return capacity;
890 }
892 VirtualSpaceList::VirtualSpaceList(size_t word_size ) :
893 _is_class(false),
894 _virtual_space_list(NULL),
895 _current_virtual_space(NULL),
896 _virtual_space_total(0),
897 _virtual_space_count(0) {
898 MutexLockerEx cl(SpaceManager::expand_lock(),
899 Mutex::_no_safepoint_check_flag);
900 bool initialization_succeeded = grow_vs(word_size);
902 assert(initialization_succeeded,
903 " VirtualSpaceList initialization should not fail");
904 }
906 VirtualSpaceList::VirtualSpaceList(ReservedSpace rs) :
907 _is_class(true),
908 _virtual_space_list(NULL),
909 _current_virtual_space(NULL),
910 _virtual_space_total(0),
911 _virtual_space_count(0) {
912 MutexLockerEx cl(SpaceManager::expand_lock(),
913 Mutex::_no_safepoint_check_flag);
914 VirtualSpaceNode* class_entry = new VirtualSpaceNode(rs);
915 bool succeeded = class_entry->initialize();
916 assert(succeeded, " VirtualSpaceList initialization should not fail");
917 link_vs(class_entry, rs.size()/BytesPerWord);
918 }
920 // Allocate another meta virtual space and add it to the list.
921 bool VirtualSpaceList::grow_vs(size_t vs_word_size) {
922 assert_lock_strong(SpaceManager::expand_lock());
923 if (vs_word_size == 0) {
924 return false;
925 }
926 // Reserve the space
927 size_t vs_byte_size = vs_word_size * BytesPerWord;
928 assert(vs_byte_size % os::vm_page_size() == 0, "Not aligned");
930 // Allocate the meta virtual space and initialize it.
931 VirtualSpaceNode* new_entry = new VirtualSpaceNode(vs_byte_size);
932 if (!new_entry->initialize()) {
933 delete new_entry;
934 return false;
935 } else {
936 // ensure lock-free iteration sees fully initialized node
937 OrderAccess::storestore();
938 link_vs(new_entry, vs_word_size);
939 return true;
940 }
941 }
943 void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry, size_t vs_word_size) {
944 if (virtual_space_list() == NULL) {
945 set_virtual_space_list(new_entry);
946 } else {
947 current_virtual_space()->set_next(new_entry);
948 }
949 set_current_virtual_space(new_entry);
950 inc_virtual_space_total(vs_word_size);
951 inc_virtual_space_count();
952 #ifdef ASSERT
953 new_entry->mangle();
954 #endif
955 if (TraceMetavirtualspaceAllocation && Verbose) {
956 VirtualSpaceNode* vsl = current_virtual_space();
957 vsl->print_on(tty);
958 }
959 }
961 Metachunk* VirtualSpaceList::get_new_chunk(size_t word_size,
962 size_t grow_chunks_by_words,
963 size_t medium_chunk_bunch) {
965 // Get a chunk from the chunk freelist
966 Metachunk* next = chunk_manager()->chunk_freelist_allocate(grow_chunks_by_words);
968 // Allocate a chunk out of the current virtual space.
969 if (next == NULL) {
970 next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
971 }
973 if (next == NULL) {
974 // Not enough room in current virtual space. Try to commit
975 // more space.
976 size_t expand_vs_by_words = MAX2(medium_chunk_bunch,
977 grow_chunks_by_words);
978 size_t page_size_words = os::vm_page_size() / BytesPerWord;
979 size_t aligned_expand_vs_by_words = align_size_up(expand_vs_by_words,
980 page_size_words);
981 bool vs_expanded =
982 current_virtual_space()->expand_by(aligned_expand_vs_by_words, false);
983 if (!vs_expanded) {
984 // Should the capacity of the metaspaces be expanded for
985 // this allocation? If it's the virtual space for classes and is
986 // being used for CompressedHeaders, don't allocate a new virtualspace.
987 if (can_grow() && MetaspaceGC::should_expand(this, word_size)) {
988 // Get another virtual space.
989 size_t grow_vs_words =
990 MAX2((size_t)VirtualSpaceSize, aligned_expand_vs_by_words);
991 if (grow_vs(grow_vs_words)) {
992 // Got it. It's on the list now. Get a chunk from it.
993 next = current_virtual_space()->get_chunk_vs_with_expand(grow_chunks_by_words);
994 }
995 } else {
996 // Allocation will fail and induce a GC
997 if (TraceMetadataChunkAllocation && Verbose) {
998 gclog_or_tty->print_cr("VirtualSpaceList::get_new_chunk():"
999 " Fail instead of expand the metaspace");
1000 }
1001 }
1002 } else {
1003 // The virtual space expanded, get a new chunk
1004 next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
1005 assert(next != NULL, "Just expanded, should succeed");
1006 }
1007 }
1009 assert(next == NULL || (next->next() == NULL && next->prev() == NULL),
1010 "New chunk is still on some list");
1011 return next;
1012 }
1014 Metachunk* VirtualSpaceList::get_initialization_chunk(size_t chunk_word_size,
1015 size_t chunk_bunch) {
1016 // Get a chunk from the chunk freelist
1017 Metachunk* new_chunk = get_new_chunk(chunk_word_size,
1018 chunk_word_size,
1019 chunk_bunch);
1020 return new_chunk;
1021 }
1023 void VirtualSpaceList::print_on(outputStream* st) const {
1024 if (TraceMetadataChunkAllocation && Verbose) {
1025 VirtualSpaceListIterator iter(virtual_space_list());
1026 while (iter.repeat()) {
1027 VirtualSpaceNode* node = iter.get_next();
1028 node->print_on(st);
1029 }
1030 }
1031 }
1033 bool VirtualSpaceList::contains(const void *ptr) {
1034 VirtualSpaceNode* list = virtual_space_list();
1035 VirtualSpaceListIterator iter(list);
1036 while (iter.repeat()) {
1037 VirtualSpaceNode* node = iter.get_next();
1038 if (node->reserved()->contains(ptr)) {
1039 return true;
1040 }
1041 }
1042 return false;
1043 }
1046 // MetaspaceGC methods
1048 // VM_CollectForMetadataAllocation is the vm operation used to GC.
1049 // Within the VM operation after the GC the attempt to allocate the metadata
1050 // should succeed. If the GC did not free enough space for the metaspace
1051 // allocation, the HWM is increased so that another virtualspace will be
1052 // allocated for the metadata. With perm gen the increase in the perm
1053 // gen had bounds, MinMetaspaceExpansion and MaxMetaspaceExpansion. The
1054 // metaspace policy uses those as the small and large steps for the HWM.
1055 //
1056 // After the GC the compute_new_size() for MetaspaceGC is called to
1057 // resize the capacity of the metaspaces. The current implementation
1058 // is based on the flags MinMetaspaceFreeRatio and MaxHeapFreeRatio used
1059 // to resize the Java heap by some GC's. New flags can be implemented
1060 // if really needed. MinHeapFreeRatio is used to calculate how much
1061 // free space is desirable in the metaspace capacity to decide how much
1062 // to increase the HWM. MaxMetaspaceFreeRatio is used to decide how much
1063 // free space is desirable in the metaspace capacity before decreasing
1064 // the HWM.
1066 // Calculate the amount to increase the high water mark (HWM).
1067 // Increase by a minimum amount (MinMetaspaceExpansion) so that
1068 // another expansion is not requested too soon. If that is not
1069 // enough to satisfy the allocation (i.e. big enough for a word_size
1070 // allocation), increase by MaxMetaspaceExpansion. If that is still
1071 // not enough, expand by the size of the allocation (word_size) plus
1072 // some.
1073 size_t MetaspaceGC::delta_capacity_until_GC(size_t word_size) {
1074 size_t before_inc = MetaspaceGC::capacity_until_GC();
1075 size_t min_delta_words = MinMetaspaceExpansion / BytesPerWord;
1076 size_t max_delta_words = MaxMetaspaceExpansion / BytesPerWord;
1077 size_t page_size_words = os::vm_page_size() / BytesPerWord;
1078 size_t size_delta_words = align_size_up(word_size, page_size_words);
1079 size_t delta_words = MAX2(size_delta_words, min_delta_words);
1080 if (delta_words > min_delta_words) {
1081 // Don't want to hit the high water mark on the next
1082 // allocation so make the delta greater than just enough
1083 // for this allocation.
1084 delta_words = MAX2(delta_words, max_delta_words);
1085 if (delta_words > max_delta_words) {
1086 // This allocation is large but the next ones are probably not
1087 // so increase by the minimum.
1088 delta_words = delta_words + min_delta_words;
1089 }
1090 }
1091 return delta_words;
1092 }
1094 bool MetaspaceGC::should_expand(VirtualSpaceList* vsl, size_t word_size) {
1096 // Class virtual space should always be expanded. Call GC for the other
1097 // metadata virtual space.
1098 if (vsl == Metaspace::class_space_list()) return true;
1100 // If the user wants a limit, impose one.
1101 size_t max_metaspace_size_words = MaxMetaspaceSize / BytesPerWord;
1102 size_t metaspace_size_words = MetaspaceSize / BytesPerWord;
1103 if (!FLAG_IS_DEFAULT(MaxMetaspaceSize) &&
1104 vsl->capacity_words_sum() >= max_metaspace_size_words) {
1105 return false;
1106 }
1108 // If this is part of an allocation after a GC, expand
1109 // unconditionally.
1110 if(MetaspaceGC::expand_after_GC()) {
1111 return true;
1112 }
1114 // If the capacity is below the minimum capacity, allow the
1115 // expansion. Also set the high-water-mark (capacity_until_GC)
1116 // to that minimum capacity so that a GC will not be induced
1117 // until that minimum capacity is exceeded.
1118 if (vsl->capacity_words_sum() < metaspace_size_words ||
1119 capacity_until_GC() == 0) {
1120 set_capacity_until_GC(metaspace_size_words);
1121 return true;
1122 } else {
1123 if (vsl->capacity_words_sum() < capacity_until_GC()) {
1124 return true;
1125 } else {
1126 if (TraceMetadataChunkAllocation && Verbose) {
1127 gclog_or_tty->print_cr(" allocation request size " SIZE_FORMAT
1128 " capacity_until_GC " SIZE_FORMAT
1129 " capacity_words_sum " SIZE_FORMAT
1130 " used_words_sum " SIZE_FORMAT
1131 " free chunks " SIZE_FORMAT
1132 " free chunks count %d",
1133 word_size,
1134 capacity_until_GC(),
1135 vsl->capacity_words_sum(),
1136 vsl->used_words_sum(),
1137 vsl->chunk_manager()->free_chunks_total(),
1138 vsl->chunk_manager()->free_chunks_count());
1139 }
1140 return false;
1141 }
1142 }
1143 }
1145 // Variables are in bytes
1147 void MetaspaceGC::compute_new_size() {
1148 assert(_shrink_factor <= 100, "invalid shrink factor");
1149 uint current_shrink_factor = _shrink_factor;
1150 _shrink_factor = 0;
1152 VirtualSpaceList *vsl = Metaspace::space_list();
1154 size_t capacity_after_gc = vsl->capacity_bytes_sum();
1155 // Check to see if these two can be calculated without walking the CLDG
1156 size_t used_after_gc = vsl->used_bytes_sum();
1157 size_t capacity_until_GC = vsl->capacity_bytes_sum();
1158 size_t free_after_gc = capacity_until_GC - used_after_gc;
1160 const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0;
1161 const double maximum_used_percentage = 1.0 - minimum_free_percentage;
1163 const double min_tmp = used_after_gc / maximum_used_percentage;
1164 size_t minimum_desired_capacity =
1165 (size_t)MIN2(min_tmp, double(max_uintx));
1166 // Don't shrink less than the initial generation size
1167 minimum_desired_capacity = MAX2(minimum_desired_capacity,
1168 MetaspaceSize);
1170 if (PrintGCDetails && Verbose) {
1171 const double free_percentage = ((double)free_after_gc) / capacity_until_GC;
1172 gclog_or_tty->print_cr("\nMetaspaceGC::compute_new_size: ");
1173 gclog_or_tty->print_cr(" "
1174 " minimum_free_percentage: %6.2f"
1175 " maximum_used_percentage: %6.2f",
1176 minimum_free_percentage,
1177 maximum_used_percentage);
1178 double d_free_after_gc = free_after_gc / (double) K;
1179 gclog_or_tty->print_cr(" "
1180 " free_after_gc : %6.1fK"
1181 " used_after_gc : %6.1fK"
1182 " capacity_after_gc : %6.1fK"
1183 " metaspace HWM : %6.1fK",
1184 free_after_gc / (double) K,
1185 used_after_gc / (double) K,
1186 capacity_after_gc / (double) K,
1187 capacity_until_GC / (double) K);
1188 gclog_or_tty->print_cr(" "
1189 " free_percentage: %6.2f",
1190 free_percentage);
1191 }
1194 if (capacity_until_GC < minimum_desired_capacity) {
1195 // If we have less capacity below the metaspace HWM, then
1196 // increment the HWM.
1197 size_t expand_bytes = minimum_desired_capacity - capacity_until_GC;
1198 // Don't expand unless it's significant
1199 if (expand_bytes >= MinMetaspaceExpansion) {
1200 size_t expand_words = expand_bytes / BytesPerWord;
1201 MetaspaceGC::inc_capacity_until_GC(expand_words);
1202 }
1203 if (PrintGCDetails && Verbose) {
1204 size_t new_capacity_until_GC = MetaspaceGC::capacity_until_GC_in_bytes();
1205 gclog_or_tty->print_cr(" expanding:"
1206 " minimum_desired_capacity: %6.1fK"
1207 " expand_words: %6.1fK"
1208 " MinMetaspaceExpansion: %6.1fK"
1209 " new metaspace HWM: %6.1fK",
1210 minimum_desired_capacity / (double) K,
1211 expand_bytes / (double) K,
1212 MinMetaspaceExpansion / (double) K,
1213 new_capacity_until_GC / (double) K);
1214 }
1215 return;
1216 }
1218 // No expansion, now see if we want to shrink
1219 size_t shrink_words = 0;
1220 // We would never want to shrink more than this
1221 size_t max_shrink_words = capacity_until_GC - minimum_desired_capacity;
1222 assert(max_shrink_words >= 0, err_msg("max_shrink_words " SIZE_FORMAT,
1223 max_shrink_words));
1225 // Should shrinking be considered?
1226 if (MaxMetaspaceFreeRatio < 100) {
1227 const double maximum_free_percentage = MaxMetaspaceFreeRatio / 100.0;
1228 const double minimum_used_percentage = 1.0 - maximum_free_percentage;
1229 const double max_tmp = used_after_gc / minimum_used_percentage;
1230 size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx));
1231 maximum_desired_capacity = MAX2(maximum_desired_capacity,
1232 MetaspaceSize);
1233 if (PrintGC && Verbose) {
1234 gclog_or_tty->print_cr(" "
1235 " maximum_free_percentage: %6.2f"
1236 " minimum_used_percentage: %6.2f",
1237 maximum_free_percentage,
1238 minimum_used_percentage);
1239 gclog_or_tty->print_cr(" "
1240 " capacity_until_GC: %6.1fK"
1241 " minimum_desired_capacity: %6.1fK"
1242 " maximum_desired_capacity: %6.1fK",
1243 capacity_until_GC / (double) K,
1244 minimum_desired_capacity / (double) K,
1245 maximum_desired_capacity / (double) K);
1246 }
1248 assert(minimum_desired_capacity <= maximum_desired_capacity,
1249 "sanity check");
1251 if (capacity_until_GC > maximum_desired_capacity) {
1252 // Capacity too large, compute shrinking size
1253 shrink_words = capacity_until_GC - maximum_desired_capacity;
1254 // We don't want shrink all the way back to initSize if people call
1255 // System.gc(), because some programs do that between "phases" and then
1256 // we'd just have to grow the heap up again for the next phase. So we
1257 // damp the shrinking: 0% on the first call, 10% on the second call, 40%
1258 // on the third call, and 100% by the fourth call. But if we recompute
1259 // size without shrinking, it goes back to 0%.
1260 shrink_words = shrink_words / 100 * current_shrink_factor;
1261 assert(shrink_words <= max_shrink_words,
1262 err_msg("invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT,
1263 shrink_words, max_shrink_words));
1264 if (current_shrink_factor == 0) {
1265 _shrink_factor = 10;
1266 } else {
1267 _shrink_factor = MIN2(current_shrink_factor * 4, (uint) 100);
1268 }
1269 if (PrintGCDetails && Verbose) {
1270 gclog_or_tty->print_cr(" "
1271 " shrinking:"
1272 " initSize: %.1fK"
1273 " maximum_desired_capacity: %.1fK",
1274 MetaspaceSize / (double) K,
1275 maximum_desired_capacity / (double) K);
1276 gclog_or_tty->print_cr(" "
1277 " shrink_words: %.1fK"
1278 " current_shrink_factor: %d"
1279 " new shrink factor: %d"
1280 " MinMetaspaceExpansion: %.1fK",
1281 shrink_words / (double) K,
1282 current_shrink_factor,
1283 _shrink_factor,
1284 MinMetaspaceExpansion / (double) K);
1285 }
1286 }
1287 }
1290 // Don't shrink unless it's significant
1291 if (shrink_words >= MinMetaspaceExpansion) {
1292 VirtualSpaceNode* csp = vsl->current_virtual_space();
1293 size_t available_to_shrink = csp->capacity_words_in_vs() -
1294 csp->used_words_in_vs();
1295 shrink_words = MIN2(shrink_words, available_to_shrink);
1296 csp->shrink_by(shrink_words);
1297 MetaspaceGC::dec_capacity_until_GC(shrink_words);
1298 if (PrintGCDetails && Verbose) {
1299 size_t new_capacity_until_GC = MetaspaceGC::capacity_until_GC_in_bytes();
1300 gclog_or_tty->print_cr(" metaspace HWM: %.1fK", new_capacity_until_GC / (double) K);
1301 }
1302 }
1303 assert(vsl->used_bytes_sum() == used_after_gc &&
1304 used_after_gc <= vsl->capacity_bytes_sum(),
1305 "sanity check");
1307 }
1309 // Metadebug methods
1311 void Metadebug::deallocate_chunk_a_lot(SpaceManager* sm,
1312 size_t chunk_word_size){
1313 #ifdef ASSERT
1314 VirtualSpaceList* vsl = sm->vs_list();
1315 if (MetaDataDeallocateALot &&
1316 Metadebug::deallocate_chunk_a_lot_count() % MetaDataDeallocateALotInterval == 0 ) {
1317 Metadebug::reset_deallocate_chunk_a_lot_count();
1318 for (uint i = 0; i < metadata_deallocate_a_lock_chunk; i++) {
1319 Metachunk* dummy_chunk = vsl->current_virtual_space()->take_from_committed(chunk_word_size);
1320 if (dummy_chunk == NULL) {
1321 break;
1322 }
1323 vsl->chunk_manager()->chunk_freelist_deallocate(dummy_chunk);
1325 if (TraceMetadataChunkAllocation && Verbose) {
1326 gclog_or_tty->print("Metadebug::deallocate_chunk_a_lot: %d) ",
1327 sm->sum_count_in_chunks_in_use());
1328 dummy_chunk->print_on(gclog_or_tty);
1329 gclog_or_tty->print_cr(" Free chunks total %d count %d",
1330 vsl->chunk_manager()->free_chunks_total(),
1331 vsl->chunk_manager()->free_chunks_count());
1332 }
1333 }
1334 } else {
1335 Metadebug::inc_deallocate_chunk_a_lot_count();
1336 }
1337 #endif
1338 }
1340 void Metadebug::deallocate_block_a_lot(SpaceManager* sm,
1341 size_t raw_word_size){
1342 #ifdef ASSERT
1343 if (MetaDataDeallocateALot &&
1344 Metadebug::deallocate_block_a_lot_count() % MetaDataDeallocateALotInterval == 0 ) {
1345 Metadebug::set_deallocate_block_a_lot_count(0);
1346 for (uint i = 0; i < metadata_deallocate_a_lot_block; i++) {
1347 MetaWord* dummy_block = sm->allocate_work(raw_word_size);
1348 if (dummy_block == 0) {
1349 break;
1350 }
1351 sm->deallocate(dummy_block, raw_word_size);
1352 }
1353 } else {
1354 Metadebug::inc_deallocate_block_a_lot_count();
1355 }
1356 #endif
1357 }
1359 void Metadebug::init_allocation_fail_alot_count() {
1360 if (MetadataAllocationFailALot) {
1361 _allocation_fail_alot_count =
1362 1+(long)((double)MetadataAllocationFailALotInterval*os::random()/(max_jint+1.0));
1363 }
1364 }
1366 #ifdef ASSERT
1367 bool Metadebug::test_metadata_failure() {
1368 if (MetadataAllocationFailALot &&
1369 Threads::is_vm_complete()) {
1370 if (_allocation_fail_alot_count > 0) {
1371 _allocation_fail_alot_count--;
1372 } else {
1373 if (TraceMetadataChunkAllocation && Verbose) {
1374 gclog_or_tty->print_cr("Metadata allocation failing for "
1375 "MetadataAllocationFailALot");
1376 }
1377 init_allocation_fail_alot_count();
1378 return true;
1379 }
1380 }
1381 return false;
1382 }
1383 #endif
1385 // ChunkList methods
1387 size_t ChunkList::sum_list_size() {
1388 size_t result = 0;
1389 Metachunk* cur = head();
1390 while (cur != NULL) {
1391 result += cur->word_size();
1392 cur = cur->next();
1393 }
1394 return result;
1395 }
1397 size_t ChunkList::sum_list_count() {
1398 size_t result = 0;
1399 Metachunk* cur = head();
1400 while (cur != NULL) {
1401 result++;
1402 cur = cur->next();
1403 }
1404 return result;
1405 }
1407 size_t ChunkList::sum_list_capacity() {
1408 size_t result = 0;
1409 Metachunk* cur = head();
1410 while (cur != NULL) {
1411 result += cur->capacity_word_size();
1412 cur = cur->next();
1413 }
1414 return result;
1415 }
1417 void ChunkList::add_at_head(Metachunk* head, Metachunk* tail) {
1418 assert_lock_strong(SpaceManager::expand_lock());
1419 assert(head == tail || tail->next() == NULL,
1420 "Not the tail or the head has already been added to a list");
1422 if (TraceMetadataChunkAllocation && Verbose) {
1423 gclog_or_tty->print("ChunkList::add_at_head(head, tail): ");
1424 Metachunk* cur = head;
1425 while (cur != NULL) {
1426 gclog_or_tty->print(PTR_FORMAT " (" SIZE_FORMAT ") ", cur, cur->word_size());
1427 cur = cur->next();
1428 }
1429 gclog_or_tty->print_cr("");
1430 }
1432 if (tail != NULL) {
1433 tail->set_next(_head);
1434 }
1435 set_head(head);
1436 }
1438 void ChunkList::add_at_head(Metachunk* list) {
1439 if (list == NULL) {
1440 // Nothing to add
1441 return;
1442 }
1443 assert_lock_strong(SpaceManager::expand_lock());
1444 Metachunk* head = list;
1445 Metachunk* tail = list;
1446 Metachunk* cur = head->next();
1447 // Search for the tail since it is not passed.
1448 while (cur != NULL) {
1449 tail = cur;
1450 cur = cur->next();
1451 }
1452 add_at_head(head, tail);
1453 }
1455 // ChunkManager methods
1457 // Verification of _free_chunks_total and _free_chunks_count does not
1458 // work with the CMS collector because its use of additional locks
1459 // complicate the mutex deadlock detection but it can still be useful
1460 // for detecting errors in the chunk accounting with other collectors.
1462 size_t ChunkManager::free_chunks_total() {
1463 #ifdef ASSERT
1464 if (!UseConcMarkSweepGC && !SpaceManager::expand_lock()->is_locked()) {
1465 MutexLockerEx cl(SpaceManager::expand_lock(),
1466 Mutex::_no_safepoint_check_flag);
1467 slow_locked_verify_free_chunks_total();
1468 }
1469 #endif
1470 return _free_chunks_total;
1471 }
1473 size_t ChunkManager::free_chunks_total_in_bytes() {
1474 return free_chunks_total() * BytesPerWord;
1475 }
1477 size_t ChunkManager::free_chunks_count() {
1478 #ifdef ASSERT
1479 if (!UseConcMarkSweepGC && !SpaceManager::expand_lock()->is_locked()) {
1480 MutexLockerEx cl(SpaceManager::expand_lock(),
1481 Mutex::_no_safepoint_check_flag);
1482 // This lock is only needed in debug because the verification
1483 // of the _free_chunks_totals walks the list of free chunks
1484 slow_locked_verify_free_chunks_count();
1485 }
1486 #endif
1487 return _free_chunks_count;
1488 }
1490 void ChunkManager::locked_verify_free_chunks_total() {
1491 assert_lock_strong(SpaceManager::expand_lock());
1492 assert(sum_free_chunks() == _free_chunks_total,
1493 err_msg("_free_chunks_total " SIZE_FORMAT " is not the"
1494 " same as sum " SIZE_FORMAT, _free_chunks_total,
1495 sum_free_chunks()));
1496 }
1498 void ChunkManager::verify_free_chunks_total() {
1499 MutexLockerEx cl(SpaceManager::expand_lock(),
1500 Mutex::_no_safepoint_check_flag);
1501 locked_verify_free_chunks_total();
1502 }
1504 void ChunkManager::locked_verify_free_chunks_count() {
1505 assert_lock_strong(SpaceManager::expand_lock());
1506 assert(sum_free_chunks_count() == _free_chunks_count,
1507 err_msg("_free_chunks_count " SIZE_FORMAT " is not the"
1508 " same as sum " SIZE_FORMAT, _free_chunks_count,
1509 sum_free_chunks_count()));
1510 }
1512 void ChunkManager::verify_free_chunks_count() {
1513 #ifdef ASSERT
1514 MutexLockerEx cl(SpaceManager::expand_lock(),
1515 Mutex::_no_safepoint_check_flag);
1516 locked_verify_free_chunks_count();
1517 #endif
1518 }
1520 void ChunkManager::verify() {
1521 MutexLockerEx cl(SpaceManager::expand_lock(),
1522 Mutex::_no_safepoint_check_flag);
1523 locked_verify();
1524 }
1526 void ChunkManager::locked_verify() {
1527 locked_verify_free_chunks_count();
1528 locked_verify_free_chunks_total();
1529 }
1531 void ChunkManager::locked_print_free_chunks(outputStream* st) {
1532 assert_lock_strong(SpaceManager::expand_lock());
1533 st->print_cr("Free chunk total " SIZE_FORMAT " count " SIZE_FORMAT,
1534 _free_chunks_total, _free_chunks_count);
1535 }
1537 void ChunkManager::locked_print_sum_free_chunks(outputStream* st) {
1538 assert_lock_strong(SpaceManager::expand_lock());
1539 st->print_cr("Sum free chunk total " SIZE_FORMAT " count " SIZE_FORMAT,
1540 sum_free_chunks(), sum_free_chunks_count());
1541 }
1542 ChunkList* ChunkManager::free_chunks(ChunkIndex index) {
1543 return &_free_chunks[index];
1544 }
1546 // These methods that sum the free chunk lists are used in printing
1547 // methods that are used in product builds.
1548 size_t ChunkManager::sum_free_chunks() {
1549 assert_lock_strong(SpaceManager::expand_lock());
1550 size_t result = 0;
1551 for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
1552 ChunkList* list = free_chunks(i);
1554 if (list == NULL) {
1555 continue;
1556 }
1558 result = result + list->sum_list_capacity();
1559 }
1560 result = result + humongous_dictionary()->total_size();
1561 return result;
1562 }
1564 size_t ChunkManager::sum_free_chunks_count() {
1565 assert_lock_strong(SpaceManager::expand_lock());
1566 size_t count = 0;
1567 for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
1568 ChunkList* list = free_chunks(i);
1569 if (list == NULL) {
1570 continue;
1571 }
1572 count = count + list->sum_list_count();
1573 }
1574 count = count + humongous_dictionary()->total_free_blocks();
1575 return count;
1576 }
1578 ChunkList* ChunkManager::find_free_chunks_list(size_t word_size) {
1579 ChunkIndex index = list_index(word_size);
1580 assert(index < HumongousIndex, "No humongous list");
1581 return free_chunks(index);
1582 }
1584 void ChunkManager::free_chunks_put(Metachunk* chunk) {
1585 assert_lock_strong(SpaceManager::expand_lock());
1586 ChunkList* free_list = find_free_chunks_list(chunk->word_size());
1587 chunk->set_next(free_list->head());
1588 free_list->set_head(chunk);
1589 // chunk is being returned to the chunk free list
1590 inc_free_chunks_total(chunk->capacity_word_size());
1591 slow_locked_verify();
1592 }
1594 void ChunkManager::chunk_freelist_deallocate(Metachunk* chunk) {
1595 // The deallocation of a chunk originates in the freelist
1596 // manangement code for a Metaspace and does not hold the
1597 // lock.
1598 assert(chunk != NULL, "Deallocating NULL");
1599 assert_lock_strong(SpaceManager::expand_lock());
1600 slow_locked_verify();
1601 if (TraceMetadataChunkAllocation) {
1602 tty->print_cr("ChunkManager::chunk_freelist_deallocate: chunk "
1603 PTR_FORMAT " size " SIZE_FORMAT,
1604 chunk, chunk->word_size());
1605 }
1606 free_chunks_put(chunk);
1607 }
1609 Metachunk* ChunkManager::free_chunks_get(size_t word_size) {
1610 assert_lock_strong(SpaceManager::expand_lock());
1612 slow_locked_verify();
1614 Metachunk* chunk = NULL;
1615 if (list_index(word_size) != HumongousIndex) {
1616 ChunkList* free_list = find_free_chunks_list(word_size);
1617 assert(free_list != NULL, "Sanity check");
1619 chunk = free_list->head();
1620 debug_only(Metachunk* debug_head = chunk;)
1622 if (chunk == NULL) {
1623 return NULL;
1624 }
1626 // Remove the chunk as the head of the list.
1627 free_list->set_head(chunk->next());
1629 // Chunk is being removed from the chunks free list.
1630 dec_free_chunks_total(chunk->capacity_word_size());
1632 if (TraceMetadataChunkAllocation && Verbose) {
1633 tty->print_cr("ChunkManager::free_chunks_get: free_list "
1634 PTR_FORMAT " head " PTR_FORMAT " size " SIZE_FORMAT,
1635 free_list, chunk, chunk->word_size());
1636 }
1637 } else {
1638 chunk = humongous_dictionary()->get_chunk(
1639 word_size,
1640 FreeBlockDictionary<Metachunk>::atLeast);
1642 if (chunk != NULL) {
1643 if (TraceMetadataHumongousAllocation) {
1644 size_t waste = chunk->word_size() - word_size;
1645 tty->print_cr("Free list allocate humongous chunk size " SIZE_FORMAT
1646 " for requested size " SIZE_FORMAT
1647 " waste " SIZE_FORMAT,
1648 chunk->word_size(), word_size, waste);
1649 }
1650 // Chunk is being removed from the chunks free list.
1651 dec_free_chunks_total(chunk->capacity_word_size());
1652 #ifdef ASSERT
1653 chunk->set_is_free(false);
1654 #endif
1655 } else {
1656 return NULL;
1657 }
1658 }
1660 // Remove it from the links to this freelist
1661 chunk->set_next(NULL);
1662 chunk->set_prev(NULL);
1663 slow_locked_verify();
1664 return chunk;
1665 }
1667 Metachunk* ChunkManager::chunk_freelist_allocate(size_t word_size) {
1668 assert_lock_strong(SpaceManager::expand_lock());
1669 slow_locked_verify();
1671 // Take from the beginning of the list
1672 Metachunk* chunk = free_chunks_get(word_size);
1673 if (chunk == NULL) {
1674 return NULL;
1675 }
1677 assert((word_size <= chunk->word_size()) ||
1678 list_index(chunk->word_size() == HumongousIndex),
1679 "Non-humongous variable sized chunk");
1680 if (TraceMetadataChunkAllocation) {
1681 size_t list_count;
1682 if (list_index(word_size) < HumongousIndex) {
1683 ChunkList* list = find_free_chunks_list(word_size);
1684 list_count = list->sum_list_count();
1685 } else {
1686 list_count = humongous_dictionary()->total_count();
1687 }
1688 tty->print("ChunkManager::chunk_freelist_allocate: " PTR_FORMAT " chunk "
1689 PTR_FORMAT " size " SIZE_FORMAT " count " SIZE_FORMAT " ",
1690 this, chunk, chunk->word_size(), list_count);
1691 locked_print_free_chunks(tty);
1692 }
1694 return chunk;
1695 }
1697 void ChunkManager::print_on(outputStream* out) {
1698 if (PrintFLSStatistics != 0) {
1699 humongous_dictionary()->report_statistics();
1700 }
1701 }
1703 // SpaceManager methods
1705 void SpaceManager::get_initial_chunk_sizes(Metaspace::MetaspaceType type,
1706 size_t* chunk_word_size,
1707 size_t* class_chunk_word_size) {
1708 switch (type) {
1709 case Metaspace::BootMetaspaceType:
1710 *chunk_word_size = Metaspace::first_chunk_word_size();
1711 *class_chunk_word_size = Metaspace::first_class_chunk_word_size();
1712 break;
1713 case Metaspace::ROMetaspaceType:
1714 *chunk_word_size = SharedReadOnlySize / wordSize;
1715 *class_chunk_word_size = ClassSpecializedChunk;
1716 break;
1717 case Metaspace::ReadWriteMetaspaceType:
1718 *chunk_word_size = SharedReadWriteSize / wordSize;
1719 *class_chunk_word_size = ClassSpecializedChunk;
1720 break;
1721 case Metaspace::AnonymousMetaspaceType:
1722 case Metaspace::ReflectionMetaspaceType:
1723 *chunk_word_size = SpecializedChunk;
1724 *class_chunk_word_size = ClassSpecializedChunk;
1725 break;
1726 default:
1727 *chunk_word_size = SmallChunk;
1728 *class_chunk_word_size = ClassSmallChunk;
1729 break;
1730 }
1731 assert(*chunk_word_size != 0 && *class_chunk_word_size != 0,
1732 err_msg("Initial chunks sizes bad: data " SIZE_FORMAT
1733 " class " SIZE_FORMAT,
1734 *chunk_word_size, *class_chunk_word_size));
1735 }
1737 size_t SpaceManager::sum_free_in_chunks_in_use() const {
1738 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
1739 size_t free = 0;
1740 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1741 Metachunk* chunk = chunks_in_use(i);
1742 while (chunk != NULL) {
1743 free += chunk->free_word_size();
1744 chunk = chunk->next();
1745 }
1746 }
1747 return free;
1748 }
1750 size_t SpaceManager::sum_waste_in_chunks_in_use() const {
1751 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
1752 size_t result = 0;
1753 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1754 result += sum_waste_in_chunks_in_use(i);
1755 }
1757 return result;
1758 }
1760 size_t SpaceManager::sum_waste_in_chunks_in_use(ChunkIndex index) const {
1761 size_t result = 0;
1762 Metachunk* chunk = chunks_in_use(index);
1763 // Count the free space in all the chunk but not the
1764 // current chunk from which allocations are still being done.
1765 if (chunk != NULL) {
1766 Metachunk* prev = chunk;
1767 while (chunk != NULL && chunk != current_chunk()) {
1768 result += chunk->free_word_size();
1769 prev = chunk;
1770 chunk = chunk->next();
1771 }
1772 }
1773 return result;
1774 }
1776 size_t SpaceManager::sum_capacity_in_chunks_in_use() const {
1777 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
1778 size_t sum = 0;
1779 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1780 Metachunk* chunk = chunks_in_use(i);
1781 while (chunk != NULL) {
1782 // Just changed this sum += chunk->capacity_word_size();
1783 // sum += chunk->word_size() - Metachunk::overhead();
1784 sum += chunk->capacity_word_size();
1785 chunk = chunk->next();
1786 }
1787 }
1788 return sum;
1789 }
1791 size_t SpaceManager::sum_count_in_chunks_in_use() {
1792 size_t count = 0;
1793 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1794 count = count + sum_count_in_chunks_in_use(i);
1795 }
1797 return count;
1798 }
1800 size_t SpaceManager::sum_count_in_chunks_in_use(ChunkIndex i) {
1801 size_t count = 0;
1802 Metachunk* chunk = chunks_in_use(i);
1803 while (chunk != NULL) {
1804 count++;
1805 chunk = chunk->next();
1806 }
1807 return count;
1808 }
1811 size_t SpaceManager::sum_used_in_chunks_in_use() const {
1812 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
1813 size_t used = 0;
1814 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1815 Metachunk* chunk = chunks_in_use(i);
1816 while (chunk != NULL) {
1817 used += chunk->used_word_size();
1818 chunk = chunk->next();
1819 }
1820 }
1821 return used;
1822 }
1824 void SpaceManager::locked_print_chunks_in_use_on(outputStream* st) const {
1826 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1827 Metachunk* chunk = chunks_in_use(i);
1828 st->print("SpaceManager: %s " PTR_FORMAT,
1829 chunk_size_name(i), chunk);
1830 if (chunk != NULL) {
1831 st->print_cr(" free " SIZE_FORMAT,
1832 chunk->free_word_size());
1833 } else {
1834 st->print_cr("");
1835 }
1836 }
1838 vs_list()->chunk_manager()->locked_print_free_chunks(st);
1839 vs_list()->chunk_manager()->locked_print_sum_free_chunks(st);
1840 }
1842 size_t SpaceManager::calc_chunk_size(size_t word_size) {
1844 // Decide between a small chunk and a medium chunk. Up to
1845 // _small_chunk_limit small chunks can be allocated but
1846 // once a medium chunk has been allocated, no more small
1847 // chunks will be allocated.
1848 size_t chunk_word_size;
1849 if (chunks_in_use(MediumIndex) == NULL &&
1850 (!has_small_chunk_limit() ||
1851 sum_count_in_chunks_in_use(SmallIndex) < _small_chunk_limit)) {
1852 chunk_word_size = (size_t) small_chunk_size();
1853 if (word_size + Metachunk::overhead() > small_chunk_size()) {
1854 chunk_word_size = medium_chunk_size();
1855 }
1856 } else {
1857 chunk_word_size = medium_chunk_size();
1858 }
1860 // Might still need a humongous chunk. Enforce an
1861 // eight word granularity to facilitate reuse (some
1862 // wastage but better chance of reuse).
1863 size_t if_humongous_sized_chunk =
1864 align_size_up(word_size + Metachunk::overhead(),
1865 HumongousChunkGranularity);
1866 chunk_word_size =
1867 MAX2((size_t) chunk_word_size, if_humongous_sized_chunk);
1869 assert(!SpaceManager::is_humongous(word_size) ||
1870 chunk_word_size == if_humongous_sized_chunk,
1871 err_msg("Size calculation is wrong, word_size " SIZE_FORMAT
1872 " chunk_word_size " SIZE_FORMAT,
1873 word_size, chunk_word_size));
1874 if (TraceMetadataHumongousAllocation &&
1875 SpaceManager::is_humongous(word_size)) {
1876 gclog_or_tty->print_cr("Metadata humongous allocation:");
1877 gclog_or_tty->print_cr(" word_size " PTR_FORMAT, word_size);
1878 gclog_or_tty->print_cr(" chunk_word_size " PTR_FORMAT,
1879 chunk_word_size);
1880 gclog_or_tty->print_cr(" chunk overhead " PTR_FORMAT,
1881 Metachunk::overhead());
1882 }
1883 return chunk_word_size;
1884 }
1886 MetaWord* SpaceManager::grow_and_allocate(size_t word_size) {
1887 assert(vs_list()->current_virtual_space() != NULL,
1888 "Should have been set");
1889 assert(current_chunk() == NULL ||
1890 current_chunk()->allocate(word_size) == NULL,
1891 "Don't need to expand");
1892 MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
1894 if (TraceMetadataChunkAllocation && Verbose) {
1895 size_t words_left = 0;
1896 size_t words_used = 0;
1897 if (current_chunk() != NULL) {
1898 words_left = current_chunk()->free_word_size();
1899 words_used = current_chunk()->used_word_size();
1900 }
1901 gclog_or_tty->print_cr("SpaceManager::grow_and_allocate for " SIZE_FORMAT
1902 " words " SIZE_FORMAT " words used " SIZE_FORMAT
1903 " words left",
1904 word_size, words_used, words_left);
1905 }
1907 // Get another chunk out of the virtual space
1908 size_t grow_chunks_by_words = calc_chunk_size(word_size);
1909 Metachunk* next = get_new_chunk(word_size, grow_chunks_by_words);
1911 // If a chunk was available, add it to the in-use chunk list
1912 // and do an allocation from it.
1913 if (next != NULL) {
1914 Metadebug::deallocate_chunk_a_lot(this, grow_chunks_by_words);
1915 // Add to this manager's list of chunks in use.
1916 add_chunk(next, false);
1917 return next->allocate(word_size);
1918 }
1919 return NULL;
1920 }
1922 void SpaceManager::print_on(outputStream* st) const {
1924 for (ChunkIndex i = ZeroIndex;
1925 i < NumberOfInUseLists ;
1926 i = next_chunk_index(i) ) {
1927 st->print_cr(" chunks_in_use " PTR_FORMAT " chunk size " PTR_FORMAT,
1928 chunks_in_use(i),
1929 chunks_in_use(i) == NULL ? 0 : chunks_in_use(i)->word_size());
1930 }
1931 st->print_cr(" waste: Small " SIZE_FORMAT " Medium " SIZE_FORMAT
1932 " Humongous " SIZE_FORMAT,
1933 sum_waste_in_chunks_in_use(SmallIndex),
1934 sum_waste_in_chunks_in_use(MediumIndex),
1935 sum_waste_in_chunks_in_use(HumongousIndex));
1936 // block free lists
1937 if (block_freelists() != NULL) {
1938 st->print_cr("total in block free lists " SIZE_FORMAT,
1939 block_freelists()->total_size());
1940 }
1941 }
1943 SpaceManager::SpaceManager(Mutex* lock,
1944 VirtualSpaceList* vs_list) :
1945 _vs_list(vs_list),
1946 _allocation_total(0),
1947 _lock(lock)
1948 {
1949 initialize();
1950 }
1952 void SpaceManager::initialize() {
1953 Metadebug::init_allocation_fail_alot_count();
1954 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1955 _chunks_in_use[i] = NULL;
1956 }
1957 _current_chunk = NULL;
1958 if (TraceMetadataChunkAllocation && Verbose) {
1959 gclog_or_tty->print_cr("SpaceManager(): " PTR_FORMAT, this);
1960 }
1961 }
1963 SpaceManager::~SpaceManager() {
1964 MutexLockerEx fcl(SpaceManager::expand_lock(),
1965 Mutex::_no_safepoint_check_flag);
1967 ChunkManager* chunk_manager = vs_list()->chunk_manager();
1969 chunk_manager->slow_locked_verify();
1971 if (TraceMetadataChunkAllocation && Verbose) {
1972 gclog_or_tty->print_cr("~SpaceManager(): " PTR_FORMAT, this);
1973 locked_print_chunks_in_use_on(gclog_or_tty);
1974 }
1976 // Mangle freed memory.
1977 NOT_PRODUCT(mangle_freed_chunks();)
1979 // Have to update before the chunks_in_use lists are emptied
1980 // below.
1981 chunk_manager->inc_free_chunks_total(sum_capacity_in_chunks_in_use(),
1982 sum_count_in_chunks_in_use());
1984 // Add all the chunks in use by this space manager
1985 // to the global list of free chunks.
1987 // Follow each list of chunks-in-use and add them to the
1988 // free lists. Each list is NULL terminated.
1990 for (ChunkIndex i = ZeroIndex; i < HumongousIndex; i = next_chunk_index(i)) {
1991 if (TraceMetadataChunkAllocation && Verbose) {
1992 gclog_or_tty->print_cr("returned %d %s chunks to freelist",
1993 sum_count_in_chunks_in_use(i),
1994 chunk_size_name(i));
1995 }
1996 Metachunk* chunks = chunks_in_use(i);
1997 chunk_manager->free_chunks(i)->add_at_head(chunks);
1998 set_chunks_in_use(i, NULL);
1999 if (TraceMetadataChunkAllocation && Verbose) {
2000 gclog_or_tty->print_cr("updated freelist count %d %s",
2001 chunk_manager->free_chunks(i)->sum_list_count(),
2002 chunk_size_name(i));
2003 }
2004 assert(i != HumongousIndex, "Humongous chunks are handled explicitly later");
2005 }
2007 // The medium chunk case may be optimized by passing the head and
2008 // tail of the medium chunk list to add_at_head(). The tail is often
2009 // the current chunk but there are probably exceptions.
2011 // Humongous chunks
2012 if (TraceMetadataChunkAllocation && Verbose) {
2013 gclog_or_tty->print_cr("returned %d %s humongous chunks to dictionary",
2014 sum_count_in_chunks_in_use(HumongousIndex),
2015 chunk_size_name(HumongousIndex));
2016 gclog_or_tty->print("Humongous chunk dictionary: ");
2017 }
2018 // Humongous chunks are never the current chunk.
2019 Metachunk* humongous_chunks = chunks_in_use(HumongousIndex);
2021 while (humongous_chunks != NULL) {
2022 #ifdef ASSERT
2023 humongous_chunks->set_is_free(true);
2024 #endif
2025 if (TraceMetadataChunkAllocation && Verbose) {
2026 gclog_or_tty->print(PTR_FORMAT " (" SIZE_FORMAT ") ",
2027 humongous_chunks,
2028 humongous_chunks->word_size());
2029 }
2030 assert(humongous_chunks->word_size() == (size_t)
2031 align_size_up(humongous_chunks->word_size(),
2032 HumongousChunkGranularity),
2033 err_msg("Humongous chunk size is wrong: word size " SIZE_FORMAT
2034 " granularity %d",
2035 humongous_chunks->word_size(), HumongousChunkGranularity));
2036 Metachunk* next_humongous_chunks = humongous_chunks->next();
2037 chunk_manager->humongous_dictionary()->return_chunk(humongous_chunks);
2038 humongous_chunks = next_humongous_chunks;
2039 }
2040 if (TraceMetadataChunkAllocation && Verbose) {
2041 gclog_or_tty->print_cr("");
2042 gclog_or_tty->print_cr("updated dictionary count %d %s",
2043 chunk_manager->humongous_dictionary()->total_count(),
2044 chunk_size_name(HumongousIndex));
2045 }
2046 set_chunks_in_use(HumongousIndex, NULL);
2047 chunk_manager->slow_locked_verify();
2048 }
2050 const char* SpaceManager::chunk_size_name(ChunkIndex index) const {
2051 switch (index) {
2052 case SpecializedIndex:
2053 return "Specialized";
2054 case SmallIndex:
2055 return "Small";
2056 case MediumIndex:
2057 return "Medium";
2058 case HumongousIndex:
2059 return "Humongous";
2060 default:
2061 return NULL;
2062 }
2063 }
2065 ChunkIndex ChunkManager::list_index(size_t size) {
2066 switch (size) {
2067 case SpecializedChunk:
2068 assert(SpecializedChunk == ClassSpecializedChunk,
2069 "Need branch for ClassSpecializedChunk");
2070 return SpecializedIndex;
2071 case SmallChunk:
2072 case ClassSmallChunk:
2073 return SmallIndex;
2074 case MediumChunk:
2075 case ClassMediumChunk:
2076 return MediumIndex;
2077 default:
2078 assert(size > MediumChunk || size > ClassMediumChunk,
2079 "Not a humongous chunk");
2080 return HumongousIndex;
2081 }
2082 }
2084 void SpaceManager::deallocate(MetaWord* p, size_t word_size) {
2085 assert_lock_strong(_lock);
2086 size_t min_size = TreeChunk<Metablock, FreeList>::min_size();
2087 assert(word_size >= min_size,
2088 err_msg("Should not deallocate dark matter " SIZE_FORMAT, word_size));
2089 block_freelists()->return_block(p, word_size);
2090 }
2092 // Adds a chunk to the list of chunks in use.
2093 void SpaceManager::add_chunk(Metachunk* new_chunk, bool make_current) {
2095 assert(new_chunk != NULL, "Should not be NULL");
2096 assert(new_chunk->next() == NULL, "Should not be on a list");
2098 new_chunk->reset_empty();
2100 // Find the correct list and and set the current
2101 // chunk for that list.
2102 ChunkIndex index = ChunkManager::list_index(new_chunk->word_size());
2104 if (index != HumongousIndex) {
2105 set_current_chunk(new_chunk);
2106 new_chunk->set_next(chunks_in_use(index));
2107 set_chunks_in_use(index, new_chunk);
2108 } else {
2109 // For null class loader data and DumpSharedSpaces, the first chunk isn't
2110 // small, so small will be null. Link this first chunk as the current
2111 // chunk.
2112 if (make_current) {
2113 // Set as the current chunk but otherwise treat as a humongous chunk.
2114 set_current_chunk(new_chunk);
2115 }
2116 // Link at head. The _current_chunk only points to a humongous chunk for
2117 // the null class loader metaspace (class and data virtual space managers)
2118 // any humongous chunks so will not point to the tail
2119 // of the humongous chunks list.
2120 new_chunk->set_next(chunks_in_use(HumongousIndex));
2121 set_chunks_in_use(HumongousIndex, new_chunk);
2123 assert(new_chunk->word_size() > medium_chunk_size(), "List inconsistency");
2124 }
2126 assert(new_chunk->is_empty(), "Not ready for reuse");
2127 if (TraceMetadataChunkAllocation && Verbose) {
2128 gclog_or_tty->print("SpaceManager::add_chunk: %d) ",
2129 sum_count_in_chunks_in_use());
2130 new_chunk->print_on(gclog_or_tty);
2131 vs_list()->chunk_manager()->locked_print_free_chunks(tty);
2132 }
2133 }
2135 Metachunk* SpaceManager::get_new_chunk(size_t word_size,
2136 size_t grow_chunks_by_words) {
2138 Metachunk* next = vs_list()->get_new_chunk(word_size,
2139 grow_chunks_by_words,
2140 medium_chunk_bunch());
2142 if (TraceMetadataHumongousAllocation &&
2143 SpaceManager::is_humongous(next->word_size())) {
2144 gclog_or_tty->print_cr(" new humongous chunk word size " PTR_FORMAT,
2145 next->word_size());
2146 }
2148 return next;
2149 }
2151 MetaWord* SpaceManager::allocate(size_t word_size) {
2152 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
2154 // If only the dictionary is going to be used (i.e., no
2155 // indexed free list), then there is a minimum size requirement.
2156 // MinChunkSize is a placeholder for the real minimum size JJJ
2157 size_t byte_size = word_size * BytesPerWord;
2159 size_t byte_size_with_overhead = byte_size + Metablock::overhead();
2161 size_t raw_bytes_size = MAX2(byte_size_with_overhead,
2162 Metablock::min_block_byte_size());
2163 raw_bytes_size = ARENA_ALIGN(raw_bytes_size);
2164 size_t raw_word_size = raw_bytes_size / BytesPerWord;
2165 assert(raw_word_size * BytesPerWord == raw_bytes_size, "Size problem");
2167 BlockFreelist* fl = block_freelists();
2168 MetaWord* p = NULL;
2169 // Allocation from the dictionary is expensive in the sense that
2170 // the dictionary has to be searched for a size. Don't allocate
2171 // from the dictionary until it starts to get fat. Is this
2172 // a reasonable policy? Maybe an skinny dictionary is fast enough
2173 // for allocations. Do some profiling. JJJ
2174 if (fl->total_size() > allocation_from_dictionary_limit) {
2175 p = fl->get_block(raw_word_size);
2176 }
2177 if (p == NULL) {
2178 p = allocate_work(raw_word_size);
2179 }
2180 Metadebug::deallocate_block_a_lot(this, raw_word_size);
2182 return p;
2183 }
2185 // Returns the address of spaced allocated for "word_size".
2186 // This methods does not know about blocks (Metablocks)
2187 MetaWord* SpaceManager::allocate_work(size_t word_size) {
2188 assert_lock_strong(_lock);
2189 #ifdef ASSERT
2190 if (Metadebug::test_metadata_failure()) {
2191 return NULL;
2192 }
2193 #endif
2194 // Is there space in the current chunk?
2195 MetaWord* result = NULL;
2197 // For DumpSharedSpaces, only allocate out of the current chunk which is
2198 // never null because we gave it the size we wanted. Caller reports out
2199 // of memory if this returns null.
2200 if (DumpSharedSpaces) {
2201 assert(current_chunk() != NULL, "should never happen");
2202 inc_allocation_total(word_size);
2203 return current_chunk()->allocate(word_size); // caller handles null result
2204 }
2205 if (current_chunk() != NULL) {
2206 result = current_chunk()->allocate(word_size);
2207 }
2209 if (result == NULL) {
2210 result = grow_and_allocate(word_size);
2211 }
2212 if (result > 0) {
2213 inc_allocation_total(word_size);
2214 assert(result != (MetaWord*) chunks_in_use(MediumIndex),
2215 "Head of the list is being allocated");
2216 }
2218 return result;
2219 }
2221 void SpaceManager::verify() {
2222 // If there are blocks in the dictionary, then
2223 // verfication of chunks does not work since
2224 // being in the dictionary alters a chunk.
2225 if (block_freelists()->total_size() == 0) {
2226 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2227 Metachunk* curr = chunks_in_use(i);
2228 while (curr != NULL) {
2229 curr->verify();
2230 verify_chunk_size(curr);
2231 curr = curr->next();
2232 }
2233 }
2234 }
2235 }
2237 void SpaceManager::verify_chunk_size(Metachunk* chunk) {
2238 assert(is_humongous(chunk->word_size()) ||
2239 chunk->word_size() == medium_chunk_size() ||
2240 chunk->word_size() == small_chunk_size() ||
2241 chunk->word_size() == specialized_chunk_size(),
2242 "Chunk size is wrong");
2243 return;
2244 }
2246 #ifdef ASSERT
2247 void SpaceManager::verify_allocation_total() {
2248 // Verification is only guaranteed at a safepoint.
2249 if (SafepointSynchronize::is_at_safepoint()) {
2250 gclog_or_tty->print_cr("Chunk " PTR_FORMAT " allocation_total " SIZE_FORMAT
2251 " sum_used_in_chunks_in_use " SIZE_FORMAT,
2252 this,
2253 allocation_total(),
2254 sum_used_in_chunks_in_use());
2255 }
2256 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
2257 assert(allocation_total() == sum_used_in_chunks_in_use(),
2258 err_msg("allocation total is not consistent " SIZE_FORMAT
2259 " vs " SIZE_FORMAT,
2260 allocation_total(), sum_used_in_chunks_in_use()));
2261 }
2263 #endif
2265 void SpaceManager::dump(outputStream* const out) const {
2266 size_t curr_total = 0;
2267 size_t waste = 0;
2268 uint i = 0;
2269 size_t used = 0;
2270 size_t capacity = 0;
2272 // Add up statistics for all chunks in this SpaceManager.
2273 for (ChunkIndex index = ZeroIndex;
2274 index < NumberOfInUseLists;
2275 index = next_chunk_index(index)) {
2276 for (Metachunk* curr = chunks_in_use(index);
2277 curr != NULL;
2278 curr = curr->next()) {
2279 out->print("%d) ", i++);
2280 curr->print_on(out);
2281 if (TraceMetadataChunkAllocation && Verbose) {
2282 block_freelists()->print_on(out);
2283 }
2284 curr_total += curr->word_size();
2285 used += curr->used_word_size();
2286 capacity += curr->capacity_word_size();
2287 waste += curr->free_word_size() + curr->overhead();;
2288 }
2289 }
2291 size_t free = current_chunk() == NULL ? 0 : current_chunk()->free_word_size();
2292 // Free space isn't wasted.
2293 waste -= free;
2295 out->print_cr("total of all chunks " SIZE_FORMAT " used " SIZE_FORMAT
2296 " free " SIZE_FORMAT " capacity " SIZE_FORMAT
2297 " waste " SIZE_FORMAT, curr_total, used, free, capacity, waste);
2298 }
2300 #ifndef PRODUCT
2301 void SpaceManager::mangle_freed_chunks() {
2302 for (ChunkIndex index = ZeroIndex;
2303 index < NumberOfInUseLists;
2304 index = next_chunk_index(index)) {
2305 for (Metachunk* curr = chunks_in_use(index);
2306 curr != NULL;
2307 curr = curr->next()) {
2308 curr->mangle();
2309 }
2310 }
2311 }
2312 #endif // PRODUCT
2314 // MetaspaceAux
2316 size_t MetaspaceAux::used_in_bytes(Metaspace::MetadataType mdtype) {
2317 size_t used = 0;
2318 ClassLoaderDataGraphMetaspaceIterator iter;
2319 while (iter.repeat()) {
2320 Metaspace* msp = iter.get_next();
2321 // Sum allocation_total for each metaspace
2322 if (msp != NULL) {
2323 used += msp->used_words(mdtype);
2324 }
2325 }
2326 return used * BytesPerWord;
2327 }
2329 size_t MetaspaceAux::free_in_bytes(Metaspace::MetadataType mdtype) {
2330 size_t free = 0;
2331 ClassLoaderDataGraphMetaspaceIterator iter;
2332 while (iter.repeat()) {
2333 Metaspace* msp = iter.get_next();
2334 if (msp != NULL) {
2335 free += msp->free_words(mdtype);
2336 }
2337 }
2338 return free * BytesPerWord;
2339 }
2341 size_t MetaspaceAux::capacity_in_bytes(Metaspace::MetadataType mdtype) {
2342 size_t capacity = free_chunks_total(mdtype);
2343 ClassLoaderDataGraphMetaspaceIterator iter;
2344 while (iter.repeat()) {
2345 Metaspace* msp = iter.get_next();
2346 if (msp != NULL) {
2347 capacity += msp->capacity_words(mdtype);
2348 }
2349 }
2350 return capacity * BytesPerWord;
2351 }
2353 size_t MetaspaceAux::reserved_in_bytes(Metaspace::MetadataType mdtype) {
2354 size_t reserved = (mdtype == Metaspace::ClassType) ?
2355 Metaspace::class_space_list()->virtual_space_total() :
2356 Metaspace::space_list()->virtual_space_total();
2357 return reserved * BytesPerWord;
2358 }
2360 size_t MetaspaceAux::min_chunk_size() { return Metaspace::first_chunk_word_size(); }
2362 size_t MetaspaceAux::free_chunks_total(Metaspace::MetadataType mdtype) {
2363 ChunkManager* chunk = (mdtype == Metaspace::ClassType) ?
2364 Metaspace::class_space_list()->chunk_manager() :
2365 Metaspace::space_list()->chunk_manager();
2366 chunk->slow_verify();
2367 return chunk->free_chunks_total();
2368 }
2370 size_t MetaspaceAux::free_chunks_total_in_bytes(Metaspace::MetadataType mdtype) {
2371 return free_chunks_total(mdtype) * BytesPerWord;
2372 }
2374 void MetaspaceAux::print_metaspace_change(size_t prev_metadata_used) {
2375 gclog_or_tty->print(", [Metaspace:");
2376 if (PrintGCDetails && Verbose) {
2377 gclog_or_tty->print(" " SIZE_FORMAT
2378 "->" SIZE_FORMAT
2379 "(" SIZE_FORMAT "/" SIZE_FORMAT ")",
2380 prev_metadata_used,
2381 used_in_bytes(),
2382 capacity_in_bytes(),
2383 reserved_in_bytes());
2384 } else {
2385 gclog_or_tty->print(" " SIZE_FORMAT "K"
2386 "->" SIZE_FORMAT "K"
2387 "(" SIZE_FORMAT "K/" SIZE_FORMAT "K)",
2388 prev_metadata_used / K,
2389 used_in_bytes()/ K,
2390 capacity_in_bytes()/K,
2391 reserved_in_bytes()/ K);
2392 }
2394 gclog_or_tty->print("]");
2395 }
2397 // This is printed when PrintGCDetails
2398 void MetaspaceAux::print_on(outputStream* out) {
2399 Metaspace::MetadataType ct = Metaspace::ClassType;
2400 Metaspace::MetadataType nct = Metaspace::NonClassType;
2402 out->print_cr(" Metaspace total "
2403 SIZE_FORMAT "K, used " SIZE_FORMAT "K,"
2404 " reserved " SIZE_FORMAT "K",
2405 capacity_in_bytes()/K, used_in_bytes()/K, reserved_in_bytes()/K);
2406 out->print_cr(" data space "
2407 SIZE_FORMAT "K, used " SIZE_FORMAT "K,"
2408 " reserved " SIZE_FORMAT "K",
2409 capacity_in_bytes(nct)/K, used_in_bytes(nct)/K, reserved_in_bytes(nct)/K);
2410 out->print_cr(" class space "
2411 SIZE_FORMAT "K, used " SIZE_FORMAT "K,"
2412 " reserved " SIZE_FORMAT "K",
2413 capacity_in_bytes(ct)/K, used_in_bytes(ct)/K, reserved_in_bytes(ct)/K);
2414 }
2416 // Print information for class space and data space separately.
2417 // This is almost the same as above.
2418 void MetaspaceAux::print_on(outputStream* out, Metaspace::MetadataType mdtype) {
2419 size_t free_chunks_capacity_bytes = free_chunks_total_in_bytes(mdtype);
2420 size_t capacity_bytes = capacity_in_bytes(mdtype);
2421 size_t used_bytes = used_in_bytes(mdtype);
2422 size_t free_bytes = free_in_bytes(mdtype);
2423 size_t used_and_free = used_bytes + free_bytes +
2424 free_chunks_capacity_bytes;
2425 out->print_cr(" Chunk accounting: used in chunks " SIZE_FORMAT
2426 "K + unused in chunks " SIZE_FORMAT "K + "
2427 " capacity in free chunks " SIZE_FORMAT "K = " SIZE_FORMAT
2428 "K capacity in allocated chunks " SIZE_FORMAT "K",
2429 used_bytes / K,
2430 free_bytes / K,
2431 free_chunks_capacity_bytes / K,
2432 used_and_free / K,
2433 capacity_bytes / K);
2434 // Accounting can only be correct if we got the values during a safepoint
2435 assert(!SafepointSynchronize::is_at_safepoint() || used_and_free == capacity_bytes, "Accounting is wrong");
2436 }
2438 // Print total fragmentation for class and data metaspaces separately
2439 void MetaspaceAux::print_waste(outputStream* out) {
2441 size_t specialized_waste = 0, small_waste = 0, medium_waste = 0, large_waste = 0;
2442 size_t specialized_count = 0, small_count = 0, medium_count = 0, large_count = 0;
2443 size_t cls_specialized_waste = 0, cls_small_waste = 0, cls_medium_waste = 0, cls_large_waste = 0;
2444 size_t cls_specialized_count = 0, cls_small_count = 0, cls_medium_count = 0, cls_large_count = 0;
2446 ClassLoaderDataGraphMetaspaceIterator iter;
2447 while (iter.repeat()) {
2448 Metaspace* msp = iter.get_next();
2449 if (msp != NULL) {
2450 specialized_waste += msp->vsm()->sum_waste_in_chunks_in_use(SpecializedIndex);
2451 specialized_count += msp->vsm()->sum_count_in_chunks_in_use(SpecializedIndex);
2452 small_waste += msp->vsm()->sum_waste_in_chunks_in_use(SmallIndex);
2453 small_count += msp->vsm()->sum_count_in_chunks_in_use(SmallIndex);
2454 medium_waste += msp->vsm()->sum_waste_in_chunks_in_use(MediumIndex);
2455 medium_count += msp->vsm()->sum_count_in_chunks_in_use(MediumIndex);
2456 large_waste += msp->vsm()->sum_waste_in_chunks_in_use(HumongousIndex);
2457 large_count += msp->vsm()->sum_count_in_chunks_in_use(HumongousIndex);
2459 cls_specialized_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SpecializedIndex);
2460 cls_specialized_count += msp->class_vsm()->sum_count_in_chunks_in_use(SpecializedIndex);
2461 cls_small_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SmallIndex);
2462 cls_small_count += msp->class_vsm()->sum_count_in_chunks_in_use(SmallIndex);
2463 cls_medium_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(MediumIndex);
2464 cls_medium_count += msp->class_vsm()->sum_count_in_chunks_in_use(MediumIndex);
2465 cls_large_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(HumongousIndex);
2466 cls_large_count += msp->class_vsm()->sum_count_in_chunks_in_use(HumongousIndex);
2467 }
2468 }
2469 out->print_cr("Total fragmentation waste (words) doesn't count free space");
2470 out->print_cr(" data: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", "
2471 SIZE_FORMAT " small(s) " SIZE_FORMAT ", "
2472 SIZE_FORMAT " medium(s) " SIZE_FORMAT,
2473 specialized_count, specialized_waste, small_count,
2474 small_waste, medium_count, medium_waste);
2475 out->print_cr(" class: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", "
2476 SIZE_FORMAT " small(s) " SIZE_FORMAT,
2477 cls_specialized_count, cls_specialized_waste,
2478 cls_small_count, cls_small_waste);
2479 }
2481 // Dump global metaspace things from the end of ClassLoaderDataGraph
2482 void MetaspaceAux::dump(outputStream* out) {
2483 out->print_cr("All Metaspace:");
2484 out->print("data space: "); print_on(out, Metaspace::NonClassType);
2485 out->print("class space: "); print_on(out, Metaspace::ClassType);
2486 print_waste(out);
2487 }
2489 void MetaspaceAux::verify_free_chunks() {
2490 Metaspace::space_list()->chunk_manager()->verify();
2491 Metaspace::class_space_list()->chunk_manager()->verify();
2492 }
2494 // Metaspace methods
2496 size_t Metaspace::_first_chunk_word_size = 0;
2497 size_t Metaspace::_first_class_chunk_word_size = 0;
2499 Metaspace::Metaspace(Mutex* lock, MetaspaceType type) {
2500 initialize(lock, type);
2501 }
2503 Metaspace::~Metaspace() {
2504 delete _vsm;
2505 delete _class_vsm;
2506 }
2508 VirtualSpaceList* Metaspace::_space_list = NULL;
2509 VirtualSpaceList* Metaspace::_class_space_list = NULL;
2511 #define VIRTUALSPACEMULTIPLIER 2
2513 void Metaspace::global_initialize() {
2514 // Initialize the alignment for shared spaces.
2515 int max_alignment = os::vm_page_size();
2516 MetaspaceShared::set_max_alignment(max_alignment);
2518 if (DumpSharedSpaces) {
2519 SharedReadOnlySize = align_size_up(SharedReadOnlySize, max_alignment);
2520 SharedReadWriteSize = align_size_up(SharedReadWriteSize, max_alignment);
2521 SharedMiscDataSize = align_size_up(SharedMiscDataSize, max_alignment);
2522 SharedMiscCodeSize = align_size_up(SharedMiscCodeSize, max_alignment);
2524 // Initialize with the sum of the shared space sizes. The read-only
2525 // and read write metaspace chunks will be allocated out of this and the
2526 // remainder is the misc code and data chunks.
2527 size_t total = align_size_up(SharedReadOnlySize + SharedReadWriteSize +
2528 SharedMiscDataSize + SharedMiscCodeSize,
2529 os::vm_allocation_granularity());
2530 size_t word_size = total/wordSize;
2531 _space_list = new VirtualSpaceList(word_size);
2532 } else {
2533 // If using shared space, open the file that contains the shared space
2534 // and map in the memory before initializing the rest of metaspace (so
2535 // the addresses don't conflict)
2536 if (UseSharedSpaces) {
2537 FileMapInfo* mapinfo = new FileMapInfo();
2538 memset(mapinfo, 0, sizeof(FileMapInfo));
2540 // Open the shared archive file, read and validate the header. If
2541 // initialization fails, shared spaces [UseSharedSpaces] are
2542 // disabled and the file is closed.
2543 // Map in spaces now also
2544 if (mapinfo->initialize() && MetaspaceShared::map_shared_spaces(mapinfo)) {
2545 FileMapInfo::set_current_info(mapinfo);
2546 } else {
2547 assert(!mapinfo->is_open() && !UseSharedSpaces,
2548 "archive file not closed or shared spaces not disabled.");
2549 }
2550 }
2552 // Initialize these before initializing the VirtualSpaceList
2553 _first_chunk_word_size = InitialBootClassLoaderMetaspaceSize / BytesPerWord;
2554 _first_chunk_word_size = align_word_size_up(_first_chunk_word_size);
2555 // Make the first class chunk bigger than a medium chunk so it's not put
2556 // on the medium chunk list. The next chunk will be small and progress
2557 // from there. This size calculated by -version.
2558 _first_class_chunk_word_size = MIN2((size_t)MediumChunk*6,
2559 (ClassMetaspaceSize/BytesPerWord)*2);
2560 _first_class_chunk_word_size = align_word_size_up(_first_class_chunk_word_size);
2561 // Arbitrarily set the initial virtual space to a multiple
2562 // of the boot class loader size.
2563 size_t word_size = VIRTUALSPACEMULTIPLIER * first_chunk_word_size();
2564 // Initialize the list of virtual spaces.
2565 _space_list = new VirtualSpaceList(word_size);
2566 }
2567 }
2569 // For UseCompressedKlassPointers the class space is reserved as a piece of the
2570 // Java heap because the compression algorithm is the same for each. The
2571 // argument passed in is at the top of the compressed space
2572 void Metaspace::initialize_class_space(ReservedSpace rs) {
2573 // The reserved space size may be bigger because of alignment, esp with UseLargePages
2574 assert(rs.size() >= ClassMetaspaceSize,
2575 err_msg(SIZE_FORMAT " != " UINTX_FORMAT, rs.size(), ClassMetaspaceSize));
2576 _class_space_list = new VirtualSpaceList(rs);
2577 }
2579 void Metaspace::initialize(Mutex* lock,
2580 MetaspaceType type) {
2582 assert(space_list() != NULL,
2583 "Metadata VirtualSpaceList has not been initialized");
2585 _vsm = new SpaceManager(lock, space_list());
2586 if (_vsm == NULL) {
2587 return;
2588 }
2589 size_t word_size;
2590 size_t class_word_size;
2591 vsm()->get_initial_chunk_sizes(type,
2592 &word_size,
2593 &class_word_size);
2595 assert(class_space_list() != NULL,
2596 "Class VirtualSpaceList has not been initialized");
2598 // Allocate SpaceManager for classes.
2599 _class_vsm = new SpaceManager(lock, class_space_list());
2600 if (_class_vsm == NULL) {
2601 return;
2602 }
2604 MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
2606 // Allocate chunk for metadata objects
2607 Metachunk* new_chunk =
2608 space_list()->get_initialization_chunk(word_size,
2609 vsm()->medium_chunk_bunch());
2610 assert(!DumpSharedSpaces || new_chunk != NULL, "should have enough space for both chunks");
2611 if (new_chunk != NULL) {
2612 // Add to this manager's list of chunks in use and current_chunk().
2613 vsm()->add_chunk(new_chunk, true);
2614 }
2616 // Allocate chunk for class metadata objects
2617 Metachunk* class_chunk =
2618 class_space_list()->get_initialization_chunk(class_word_size,
2619 class_vsm()->medium_chunk_bunch());
2620 if (class_chunk != NULL) {
2621 class_vsm()->add_chunk(class_chunk, true);
2622 }
2623 }
2625 size_t Metaspace::align_word_size_up(size_t word_size) {
2626 size_t byte_size = word_size * wordSize;
2627 return ReservedSpace::allocation_align_size_up(byte_size) / wordSize;
2628 }
2630 MetaWord* Metaspace::allocate(size_t word_size, MetadataType mdtype) {
2631 // DumpSharedSpaces doesn't use class metadata area (yet)
2632 if (mdtype == ClassType && !DumpSharedSpaces) {
2633 return class_vsm()->allocate(word_size);
2634 } else {
2635 return vsm()->allocate(word_size);
2636 }
2637 }
2639 MetaWord* Metaspace::expand_and_allocate(size_t word_size, MetadataType mdtype) {
2640 MetaWord* result;
2641 MetaspaceGC::set_expand_after_GC(true);
2642 size_t before_inc = MetaspaceGC::capacity_until_GC();
2643 size_t delta_words = MetaspaceGC::delta_capacity_until_GC(word_size);
2644 MetaspaceGC::inc_capacity_until_GC(delta_words);
2645 if (PrintGCDetails && Verbose) {
2646 gclog_or_tty->print_cr("Increase capacity to GC from " SIZE_FORMAT
2647 " to " SIZE_FORMAT, before_inc, MetaspaceGC::capacity_until_GC());
2648 }
2650 result = allocate(word_size, mdtype);
2652 return result;
2653 }
2655 // Space allocated in the Metaspace. This may
2656 // be across several metadata virtual spaces.
2657 char* Metaspace::bottom() const {
2658 assert(DumpSharedSpaces, "only useful and valid for dumping shared spaces");
2659 return (char*)vsm()->current_chunk()->bottom();
2660 }
2662 size_t Metaspace::used_words(MetadataType mdtype) const {
2663 // return vsm()->allocation_total();
2664 return mdtype == ClassType ? class_vsm()->sum_used_in_chunks_in_use() :
2665 vsm()->sum_used_in_chunks_in_use(); // includes overhead!
2666 }
2668 size_t Metaspace::free_words(MetadataType mdtype) const {
2669 return mdtype == ClassType ? class_vsm()->sum_free_in_chunks_in_use() :
2670 vsm()->sum_free_in_chunks_in_use();
2671 }
2673 // Space capacity in the Metaspace. It includes
2674 // space in the list of chunks from which allocations
2675 // have been made. Don't include space in the global freelist and
2676 // in the space available in the dictionary which
2677 // is already counted in some chunk.
2678 size_t Metaspace::capacity_words(MetadataType mdtype) const {
2679 return mdtype == ClassType ? class_vsm()->sum_capacity_in_chunks_in_use() :
2680 vsm()->sum_capacity_in_chunks_in_use();
2681 }
2683 void Metaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) {
2684 if (SafepointSynchronize::is_at_safepoint()) {
2685 assert(Thread::current()->is_VM_thread(), "should be the VM thread");
2686 // Don't take Heap_lock
2687 MutexLocker ml(vsm()->lock());
2688 if (word_size < TreeChunk<Metablock, FreeList>::min_size()) {
2689 // Dark matter. Too small for dictionary.
2690 #ifdef ASSERT
2691 Copy::fill_to_words((HeapWord*)ptr, word_size, 0xf5f5f5f5);
2692 #endif
2693 return;
2694 }
2695 if (is_class) {
2696 class_vsm()->deallocate(ptr, word_size);
2697 } else {
2698 vsm()->deallocate(ptr, word_size);
2699 }
2700 } else {
2701 MutexLocker ml(vsm()->lock());
2703 if (word_size < TreeChunk<Metablock, FreeList>::min_size()) {
2704 // Dark matter. Too small for dictionary.
2705 #ifdef ASSERT
2706 Copy::fill_to_words((HeapWord*)ptr, word_size, 0xf5f5f5f5);
2707 #endif
2708 return;
2709 }
2710 if (is_class) {
2711 class_vsm()->deallocate(ptr, word_size);
2712 } else {
2713 vsm()->deallocate(ptr, word_size);
2714 }
2715 }
2716 }
2718 Metablock* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
2719 bool read_only, MetadataType mdtype, TRAPS) {
2720 if (HAS_PENDING_EXCEPTION) {
2721 assert(false, "Should not allocate with exception pending");
2722 return NULL; // caller does a CHECK_NULL too
2723 }
2725 // SSS: Should we align the allocations and make sure the sizes are aligned.
2726 MetaWord* result = NULL;
2728 assert(loader_data != NULL, "Should never pass around a NULL loader_data. "
2729 "ClassLoaderData::the_null_class_loader_data() should have been used.");
2730 // Allocate in metaspaces without taking out a lock, because it deadlocks
2731 // with the SymbolTable_lock. Dumping is single threaded for now. We'll have
2732 // to revisit this for application class data sharing.
2733 if (DumpSharedSpaces) {
2734 if (read_only) {
2735 result = loader_data->ro_metaspace()->allocate(word_size, NonClassType);
2736 } else {
2737 result = loader_data->rw_metaspace()->allocate(word_size, NonClassType);
2738 }
2739 if (result == NULL) {
2740 report_out_of_shared_space(read_only ? SharedReadOnly : SharedReadWrite);
2741 }
2742 return Metablock::initialize(result, word_size);
2743 }
2745 result = loader_data->metaspace_non_null()->allocate(word_size, mdtype);
2747 if (result == NULL) {
2748 // Try to clean out some memory and retry.
2749 result =
2750 Universe::heap()->collector_policy()->satisfy_failed_metadata_allocation(
2751 loader_data, word_size, mdtype);
2753 // If result is still null, we are out of memory.
2754 if (result == NULL) {
2755 if (Verbose && TraceMetadataChunkAllocation) {
2756 gclog_or_tty->print_cr("Metaspace allocation failed for size "
2757 SIZE_FORMAT, word_size);
2758 if (loader_data->metaspace_or_null() != NULL) loader_data->metaspace_or_null()->dump(gclog_or_tty);
2759 MetaspaceAux::dump(gclog_or_tty);
2760 }
2761 // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
2762 report_java_out_of_memory("Metadata space");
2764 if (JvmtiExport::should_post_resource_exhausted()) {
2765 JvmtiExport::post_resource_exhausted(
2766 JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR,
2767 "Metadata space");
2768 }
2769 THROW_OOP_0(Universe::out_of_memory_error_perm_gen());
2770 }
2771 }
2772 return Metablock::initialize(result, word_size);
2773 }
2775 void Metaspace::print_on(outputStream* out) const {
2776 // Print both class virtual space counts and metaspace.
2777 if (Verbose) {
2778 vsm()->print_on(out);
2779 class_vsm()->print_on(out);
2780 }
2781 }
2783 bool Metaspace::contains(const void * ptr) {
2784 if (MetaspaceShared::is_in_shared_space(ptr)) {
2785 return true;
2786 }
2787 // This is checked while unlocked. As long as the virtualspaces are added
2788 // at the end, the pointer will be in one of them. The virtual spaces
2789 // aren't deleted presently. When they are, some sort of locking might
2790 // be needed. Note, locking this can cause inversion problems with the
2791 // caller in MetaspaceObj::is_metadata() function.
2792 return space_list()->contains(ptr) || class_space_list()->contains(ptr);
2793 }
2795 void Metaspace::verify() {
2796 vsm()->verify();
2797 class_vsm()->verify();
2798 }
2800 void Metaspace::dump(outputStream* const out) const {
2801 if (UseMallocOnly) {
2802 // Just print usage for now
2803 out->print_cr("usage %d", used_words(Metaspace::NonClassType));
2804 }
2805 out->print_cr("\nVirtual space manager: " INTPTR_FORMAT, vsm());
2806 vsm()->dump(out);
2807 out->print_cr("\nClass space manager: " INTPTR_FORMAT, class_vsm());
2808 class_vsm()->dump(out);
2809 }