Fri, 19 Apr 2013 11:08:52 -0700
8010992: Remove calls to global ::operator new[] and new
Summary: disable use of global operator new and new[] which could cause unexpected exception and escape from NMT tracking.
Reviewed-by: coleenp, dholmes, zgu
Contributed-by: yumin.qi@oracle.com
1 /*
2 * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24 #include "precompiled.hpp"
25 #include "gc_interface/collectedHeap.hpp"
26 #include "memory/binaryTreeDictionary.hpp"
27 #include "memory/freeList.hpp"
28 #include "memory/collectorPolicy.hpp"
29 #include "memory/filemap.hpp"
30 #include "memory/freeList.hpp"
31 #include "memory/metablock.hpp"
32 #include "memory/metachunk.hpp"
33 #include "memory/metaspace.hpp"
34 #include "memory/metaspaceShared.hpp"
35 #include "memory/resourceArea.hpp"
36 #include "memory/universe.hpp"
37 #include "runtime/globals.hpp"
38 #include "runtime/mutex.hpp"
39 #include "runtime/orderAccess.hpp"
40 #include "services/memTracker.hpp"
41 #include "utilities/copy.hpp"
42 #include "utilities/debug.hpp"
44 typedef BinaryTreeDictionary<Metablock, FreeList> BlockTreeDictionary;
45 typedef BinaryTreeDictionary<Metachunk, FreeList> ChunkTreeDictionary;
46 // Define this macro to enable slow integrity checking of
47 // the free chunk lists
48 const bool metaspace_slow_verify = false;
51 // Parameters for stress mode testing
52 const uint metadata_deallocate_a_lot_block = 10;
53 const uint metadata_deallocate_a_lock_chunk = 3;
54 size_t const allocation_from_dictionary_limit = 64 * K;
56 MetaWord* last_allocated = 0;
58 // Used in declarations in SpaceManager and ChunkManager
59 enum ChunkIndex {
60 ZeroIndex = 0,
61 SpecializedIndex = ZeroIndex,
62 SmallIndex = SpecializedIndex + 1,
63 MediumIndex = SmallIndex + 1,
64 HumongousIndex = MediumIndex + 1,
65 NumberOfFreeLists = 3,
66 NumberOfInUseLists = 4
67 };
69 enum ChunkSizes { // in words.
70 ClassSpecializedChunk = 128,
71 SpecializedChunk = 128,
72 ClassSmallChunk = 256,
73 SmallChunk = 512,
74 ClassMediumChunk = 1 * K,
75 MediumChunk = 8 * K,
76 HumongousChunkGranularity = 8
77 };
79 static ChunkIndex next_chunk_index(ChunkIndex i) {
80 assert(i < NumberOfInUseLists, "Out of bound");
81 return (ChunkIndex) (i+1);
82 }
84 // Originally _capacity_until_GC was set to MetaspaceSize here but
85 // the default MetaspaceSize before argument processing was being
86 // used which was not the desired value. See the code
87 // in should_expand() to see how the initialization is handled
88 // now.
89 size_t MetaspaceGC::_capacity_until_GC = 0;
90 bool MetaspaceGC::_expand_after_GC = false;
91 uint MetaspaceGC::_shrink_factor = 0;
92 bool MetaspaceGC::_should_concurrent_collect = false;
94 // Blocks of space for metadata are allocated out of Metachunks.
95 //
96 // Metachunk are allocated out of MetadataVirtualspaces and once
97 // allocated there is no explicit link between a Metachunk and
98 // the MetadataVirtualspaces from which it was allocated.
99 //
100 // Each SpaceManager maintains a
101 // list of the chunks it is using and the current chunk. The current
102 // chunk is the chunk from which allocations are done. Space freed in
103 // a chunk is placed on the free list of blocks (BlockFreelist) and
104 // reused from there.
106 // Pointer to list of Metachunks.
107 class ChunkList VALUE_OBJ_CLASS_SPEC {
108 // List of free chunks
109 Metachunk* _head;
111 public:
112 // Constructor
113 ChunkList() : _head(NULL) {}
115 // Accessors
116 Metachunk* head() { return _head; }
117 void set_head(Metachunk* v) { _head = v; }
119 // Link at head of the list
120 void add_at_head(Metachunk* head, Metachunk* tail);
121 void add_at_head(Metachunk* head);
123 size_t sum_list_size();
124 size_t sum_list_count();
125 size_t sum_list_capacity();
126 };
128 // Manages the global free lists of chunks.
129 // Has three lists of free chunks, and a total size and
130 // count that includes all three
132 class ChunkManager VALUE_OBJ_CLASS_SPEC {
134 // Free list of chunks of different sizes.
135 // SmallChunk
136 // MediumChunk
137 // HumongousChunk
138 ChunkList _free_chunks[NumberOfFreeLists];
141 // HumongousChunk
142 ChunkTreeDictionary _humongous_dictionary;
144 // ChunkManager in all lists of this type
145 size_t _free_chunks_total;
146 size_t _free_chunks_count;
148 void dec_free_chunks_total(size_t v) {
149 assert(_free_chunks_count > 0 &&
150 _free_chunks_total > 0,
151 "About to go negative");
152 Atomic::add_ptr(-1, &_free_chunks_count);
153 jlong minus_v = (jlong) - (jlong) v;
154 Atomic::add_ptr(minus_v, &_free_chunks_total);
155 }
157 // Debug support
159 size_t sum_free_chunks();
160 size_t sum_free_chunks_count();
162 void locked_verify_free_chunks_total();
163 void slow_locked_verify_free_chunks_total() {
164 if (metaspace_slow_verify) {
165 locked_verify_free_chunks_total();
166 }
167 }
168 void locked_verify_free_chunks_count();
169 void slow_locked_verify_free_chunks_count() {
170 if (metaspace_slow_verify) {
171 locked_verify_free_chunks_count();
172 }
173 }
174 void verify_free_chunks_count();
176 public:
178 ChunkManager() : _free_chunks_total(0), _free_chunks_count(0) {}
180 // add or delete (return) a chunk to the global freelist.
181 Metachunk* chunk_freelist_allocate(size_t word_size);
182 void chunk_freelist_deallocate(Metachunk* chunk);
184 // Map a size to a list index assuming that there are lists
185 // for special, small, medium, and humongous chunks.
186 static ChunkIndex list_index(size_t size);
188 // Total of the space in the free chunks list
189 size_t free_chunks_total();
190 size_t free_chunks_total_in_bytes();
192 // Number of chunks in the free chunks list
193 size_t free_chunks_count();
195 void inc_free_chunks_total(size_t v, size_t count = 1) {
196 Atomic::add_ptr(count, &_free_chunks_count);
197 Atomic::add_ptr(v, &_free_chunks_total);
198 }
199 ChunkTreeDictionary* humongous_dictionary() {
200 return &_humongous_dictionary;
201 }
203 ChunkList* free_chunks(ChunkIndex index);
205 // Returns the list for the given chunk word size.
206 ChunkList* find_free_chunks_list(size_t word_size);
208 // Add and remove from a list by size. Selects
209 // list based on size of chunk.
210 void free_chunks_put(Metachunk* chuck);
211 Metachunk* free_chunks_get(size_t chunk_word_size);
213 // Debug support
214 void verify();
215 void slow_verify() {
216 if (metaspace_slow_verify) {
217 verify();
218 }
219 }
220 void locked_verify();
221 void slow_locked_verify() {
222 if (metaspace_slow_verify) {
223 locked_verify();
224 }
225 }
226 void verify_free_chunks_total();
228 void locked_print_free_chunks(outputStream* st);
229 void locked_print_sum_free_chunks(outputStream* st);
231 void print_on(outputStream* st);
232 };
235 // Used to manage the free list of Metablocks (a block corresponds
236 // to the allocation of a quantum of metadata).
237 class BlockFreelist VALUE_OBJ_CLASS_SPEC {
238 BlockTreeDictionary* _dictionary;
239 static Metablock* initialize_free_chunk(MetaWord* p, size_t word_size);
241 // Accessors
242 BlockTreeDictionary* dictionary() const { return _dictionary; }
244 public:
245 BlockFreelist();
246 ~BlockFreelist();
248 // Get and return a block to the free list
249 MetaWord* get_block(size_t word_size);
250 void return_block(MetaWord* p, size_t word_size);
252 size_t total_size() {
253 if (dictionary() == NULL) {
254 return 0;
255 } else {
256 return dictionary()->total_size();
257 }
258 }
260 void print_on(outputStream* st) const;
261 };
263 class VirtualSpaceNode : public CHeapObj<mtClass> {
264 friend class VirtualSpaceList;
266 // Link to next VirtualSpaceNode
267 VirtualSpaceNode* _next;
269 // total in the VirtualSpace
270 MemRegion _reserved;
271 ReservedSpace _rs;
272 VirtualSpace _virtual_space;
273 MetaWord* _top;
275 // Convenience functions for logical bottom and end
276 MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); }
277 MetaWord* end() const { return (MetaWord*) _virtual_space.high(); }
279 // Convenience functions to access the _virtual_space
280 char* low() const { return virtual_space()->low(); }
281 char* high() const { return virtual_space()->high(); }
283 public:
285 VirtualSpaceNode(size_t byte_size);
286 VirtualSpaceNode(ReservedSpace rs) : _top(NULL), _next(NULL), _rs(rs) {}
287 ~VirtualSpaceNode();
289 // address of next available space in _virtual_space;
290 // Accessors
291 VirtualSpaceNode* next() { return _next; }
292 void set_next(VirtualSpaceNode* v) { _next = v; }
294 void set_reserved(MemRegion const v) { _reserved = v; }
295 void set_top(MetaWord* v) { _top = v; }
297 // Accessors
298 MemRegion* reserved() { return &_reserved; }
299 VirtualSpace* virtual_space() const { return (VirtualSpace*) &_virtual_space; }
301 // Returns true if "word_size" is available in the virtual space
302 bool is_available(size_t word_size) { return _top + word_size <= end(); }
304 MetaWord* top() const { return _top; }
305 void inc_top(size_t word_size) { _top += word_size; }
307 // used and capacity in this single entry in the list
308 size_t used_words_in_vs() const;
309 size_t capacity_words_in_vs() const;
311 bool initialize();
313 // get space from the virtual space
314 Metachunk* take_from_committed(size_t chunk_word_size);
316 // Allocate a chunk from the virtual space and return it.
317 Metachunk* get_chunk_vs(size_t chunk_word_size);
318 Metachunk* get_chunk_vs_with_expand(size_t chunk_word_size);
320 // Expands/shrinks the committed space in a virtual space. Delegates
321 // to Virtualspace
322 bool expand_by(size_t words, bool pre_touch = false);
323 bool shrink_by(size_t words);
325 #ifdef ASSERT
326 // Debug support
327 static void verify_virtual_space_total();
328 static void verify_virtual_space_count();
329 void mangle();
330 #endif
332 void print_on(outputStream* st) const;
333 };
335 // byte_size is the size of the associated virtualspace.
336 VirtualSpaceNode::VirtualSpaceNode(size_t byte_size) : _top(NULL), _next(NULL), _rs(0) {
337 // align up to vm allocation granularity
338 byte_size = align_size_up(byte_size, os::vm_allocation_granularity());
340 // This allocates memory with mmap. For DumpSharedspaces, try to reserve
341 // configurable address, generally at the top of the Java heap so other
342 // memory addresses don't conflict.
343 if (DumpSharedSpaces) {
344 char* shared_base = (char*)SharedBaseAddress;
345 _rs = ReservedSpace(byte_size, 0, false, shared_base, 0);
346 if (_rs.is_reserved()) {
347 assert(shared_base == 0 || _rs.base() == shared_base, "should match");
348 } else {
349 // Get a mmap region anywhere if the SharedBaseAddress fails.
350 _rs = ReservedSpace(byte_size);
351 }
352 MetaspaceShared::set_shared_rs(&_rs);
353 } else {
354 _rs = ReservedSpace(byte_size);
355 }
357 MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass);
358 }
360 // List of VirtualSpaces for metadata allocation.
361 // It has a _next link for singly linked list and a MemRegion
362 // for total space in the VirtualSpace.
363 class VirtualSpaceList : public CHeapObj<mtClass> {
364 friend class VirtualSpaceNode;
366 enum VirtualSpaceSizes {
367 VirtualSpaceSize = 256 * K
368 };
370 // Global list of virtual spaces
371 // Head of the list
372 VirtualSpaceNode* _virtual_space_list;
373 // virtual space currently being used for allocations
374 VirtualSpaceNode* _current_virtual_space;
375 // Free chunk list for all other metadata
376 ChunkManager _chunk_manager;
378 // Can this virtual list allocate >1 spaces? Also, used to determine
379 // whether to allocate unlimited small chunks in this virtual space
380 bool _is_class;
381 bool can_grow() const { return !is_class() || !UseCompressedKlassPointers; }
383 // Sum of space in all virtual spaces and number of virtual spaces
384 size_t _virtual_space_total;
385 size_t _virtual_space_count;
387 ~VirtualSpaceList();
389 VirtualSpaceNode* virtual_space_list() const { return _virtual_space_list; }
391 void set_virtual_space_list(VirtualSpaceNode* v) {
392 _virtual_space_list = v;
393 }
394 void set_current_virtual_space(VirtualSpaceNode* v) {
395 _current_virtual_space = v;
396 }
398 void link_vs(VirtualSpaceNode* new_entry, size_t vs_word_size);
400 // Get another virtual space and add it to the list. This
401 // is typically prompted by a failed attempt to allocate a chunk
402 // and is typically followed by the allocation of a chunk.
403 bool grow_vs(size_t vs_word_size);
405 public:
406 VirtualSpaceList(size_t word_size);
407 VirtualSpaceList(ReservedSpace rs);
409 Metachunk* get_new_chunk(size_t word_size,
410 size_t grow_chunks_by_words,
411 size_t medium_chunk_bunch);
413 // Get the first chunk for a Metaspace. Used for
414 // special cases such as the boot class loader, reflection
415 // class loader and anonymous class loader.
416 Metachunk* get_initialization_chunk(size_t word_size, size_t chunk_bunch);
418 VirtualSpaceNode* current_virtual_space() {
419 return _current_virtual_space;
420 }
422 ChunkManager* chunk_manager() { return &_chunk_manager; }
423 bool is_class() const { return _is_class; }
425 // Allocate the first virtualspace.
426 void initialize(size_t word_size);
428 size_t virtual_space_total() { return _virtual_space_total; }
429 void inc_virtual_space_total(size_t v) {
430 Atomic::add_ptr(v, &_virtual_space_total);
431 }
433 size_t virtual_space_count() { return _virtual_space_count; }
434 void inc_virtual_space_count() {
435 Atomic::inc_ptr(&_virtual_space_count);
436 }
438 // Used and capacity in the entire list of virtual spaces.
439 // These are global values shared by all Metaspaces
440 size_t capacity_words_sum();
441 size_t capacity_bytes_sum() { return capacity_words_sum() * BytesPerWord; }
442 size_t used_words_sum();
443 size_t used_bytes_sum() { return used_words_sum() * BytesPerWord; }
445 bool contains(const void *ptr);
447 void print_on(outputStream* st) const;
449 class VirtualSpaceListIterator : public StackObj {
450 VirtualSpaceNode* _virtual_spaces;
451 public:
452 VirtualSpaceListIterator(VirtualSpaceNode* virtual_spaces) :
453 _virtual_spaces(virtual_spaces) {}
455 bool repeat() {
456 return _virtual_spaces != NULL;
457 }
459 VirtualSpaceNode* get_next() {
460 VirtualSpaceNode* result = _virtual_spaces;
461 if (_virtual_spaces != NULL) {
462 _virtual_spaces = _virtual_spaces->next();
463 }
464 return result;
465 }
466 };
467 };
469 class Metadebug : AllStatic {
470 // Debugging support for Metaspaces
471 static int _deallocate_block_a_lot_count;
472 static int _deallocate_chunk_a_lot_count;
473 static int _allocation_fail_alot_count;
475 public:
476 static int deallocate_block_a_lot_count() {
477 return _deallocate_block_a_lot_count;
478 }
479 static void set_deallocate_block_a_lot_count(int v) {
480 _deallocate_block_a_lot_count = v;
481 }
482 static void inc_deallocate_block_a_lot_count() {
483 _deallocate_block_a_lot_count++;
484 }
485 static int deallocate_chunk_a_lot_count() {
486 return _deallocate_chunk_a_lot_count;
487 }
488 static void reset_deallocate_chunk_a_lot_count() {
489 _deallocate_chunk_a_lot_count = 1;
490 }
491 static void inc_deallocate_chunk_a_lot_count() {
492 _deallocate_chunk_a_lot_count++;
493 }
495 static void init_allocation_fail_alot_count();
496 #ifdef ASSERT
497 static bool test_metadata_failure();
498 #endif
500 static void deallocate_chunk_a_lot(SpaceManager* sm,
501 size_t chunk_word_size);
502 static void deallocate_block_a_lot(SpaceManager* sm,
503 size_t chunk_word_size);
505 };
507 int Metadebug::_deallocate_block_a_lot_count = 0;
508 int Metadebug::_deallocate_chunk_a_lot_count = 0;
509 int Metadebug::_allocation_fail_alot_count = 0;
511 // SpaceManager - used by Metaspace to handle allocations
512 class SpaceManager : public CHeapObj<mtClass> {
513 friend class Metaspace;
514 friend class Metadebug;
516 private:
518 // protects allocations and contains.
519 Mutex* const _lock;
521 // Chunk related size
522 size_t _medium_chunk_bunch;
524 // List of chunks in use by this SpaceManager. Allocations
525 // are done from the current chunk. The list is used for deallocating
526 // chunks when the SpaceManager is freed.
527 Metachunk* _chunks_in_use[NumberOfInUseLists];
528 Metachunk* _current_chunk;
530 // Virtual space where allocation comes from.
531 VirtualSpaceList* _vs_list;
533 // Number of small chunks to allocate to a manager
534 // If class space manager, small chunks are unlimited
535 static uint const _small_chunk_limit;
536 bool has_small_chunk_limit() { return !vs_list()->is_class(); }
538 // Sum of all space in allocated chunks
539 size_t _allocation_total;
541 // Free lists of blocks are per SpaceManager since they
542 // are assumed to be in chunks in use by the SpaceManager
543 // and all chunks in use by a SpaceManager are freed when
544 // the class loader using the SpaceManager is collected.
545 BlockFreelist _block_freelists;
547 // protects virtualspace and chunk expansions
548 static const char* _expand_lock_name;
549 static const int _expand_lock_rank;
550 static Mutex* const _expand_lock;
552 private:
553 // Accessors
554 Metachunk* chunks_in_use(ChunkIndex index) const { return _chunks_in_use[index]; }
555 void set_chunks_in_use(ChunkIndex index, Metachunk* v) { _chunks_in_use[index] = v; }
557 BlockFreelist* block_freelists() const {
558 return (BlockFreelist*) &_block_freelists;
559 }
561 VirtualSpaceList* vs_list() const { return _vs_list; }
563 Metachunk* current_chunk() const { return _current_chunk; }
564 void set_current_chunk(Metachunk* v) {
565 _current_chunk = v;
566 }
568 Metachunk* find_current_chunk(size_t word_size);
570 // Add chunk to the list of chunks in use
571 void add_chunk(Metachunk* v, bool make_current);
573 Mutex* lock() const { return _lock; }
575 const char* chunk_size_name(ChunkIndex index) const;
577 protected:
578 void initialize();
580 public:
581 SpaceManager(Mutex* lock,
582 VirtualSpaceList* vs_list);
583 ~SpaceManager();
585 enum ChunkMultiples {
586 MediumChunkMultiple = 4
587 };
589 // Accessors
590 size_t specialized_chunk_size() { return SpecializedChunk; }
591 size_t small_chunk_size() { return (size_t) vs_list()->is_class() ? ClassSmallChunk : SmallChunk; }
592 size_t medium_chunk_size() { return (size_t) vs_list()->is_class() ? ClassMediumChunk : MediumChunk; }
593 size_t medium_chunk_bunch() { return medium_chunk_size() * MediumChunkMultiple; }
595 size_t allocation_total() const { return _allocation_total; }
596 void inc_allocation_total(size_t v) { Atomic::add_ptr(v, &_allocation_total); }
597 bool is_humongous(size_t word_size) { return word_size > medium_chunk_size(); }
599 static Mutex* expand_lock() { return _expand_lock; }
601 // Set the sizes for the initial chunks.
602 void get_initial_chunk_sizes(Metaspace::MetaspaceType type,
603 size_t* chunk_word_size,
604 size_t* class_chunk_word_size);
606 size_t sum_capacity_in_chunks_in_use() const;
607 size_t sum_used_in_chunks_in_use() const;
608 size_t sum_free_in_chunks_in_use() const;
609 size_t sum_waste_in_chunks_in_use() const;
610 size_t sum_waste_in_chunks_in_use(ChunkIndex index ) const;
612 size_t sum_count_in_chunks_in_use();
613 size_t sum_count_in_chunks_in_use(ChunkIndex i);
615 Metachunk* get_new_chunk(size_t word_size, size_t grow_chunks_by_words);
617 // Block allocation and deallocation.
618 // Allocates a block from the current chunk
619 MetaWord* allocate(size_t word_size);
621 // Helper for allocations
622 MetaWord* allocate_work(size_t word_size);
624 // Returns a block to the per manager freelist
625 void deallocate(MetaWord* p, size_t word_size);
627 // Based on the allocation size and a minimum chunk size,
628 // returned chunk size (for expanding space for chunk allocation).
629 size_t calc_chunk_size(size_t allocation_word_size);
631 // Called when an allocation from the current chunk fails.
632 // Gets a new chunk (may require getting a new virtual space),
633 // and allocates from that chunk.
634 MetaWord* grow_and_allocate(size_t word_size);
636 // debugging support.
638 void dump(outputStream* const out) const;
639 void print_on(outputStream* st) const;
640 void locked_print_chunks_in_use_on(outputStream* st) const;
642 void verify();
643 void verify_chunk_size(Metachunk* chunk);
644 NOT_PRODUCT(void mangle_freed_chunks();)
645 #ifdef ASSERT
646 void verify_allocation_total();
647 #endif
648 };
650 uint const SpaceManager::_small_chunk_limit = 4;
652 const char* SpaceManager::_expand_lock_name =
653 "SpaceManager chunk allocation lock";
654 const int SpaceManager::_expand_lock_rank = Monitor::leaf - 1;
655 Mutex* const SpaceManager::_expand_lock =
656 new Mutex(SpaceManager::_expand_lock_rank,
657 SpaceManager::_expand_lock_name,
658 Mutex::_allow_vm_block_flag);
660 // BlockFreelist methods
662 BlockFreelist::BlockFreelist() : _dictionary(NULL) {}
664 BlockFreelist::~BlockFreelist() {
665 if (_dictionary != NULL) {
666 if (Verbose && TraceMetadataChunkAllocation) {
667 _dictionary->print_free_lists(gclog_or_tty);
668 }
669 delete _dictionary;
670 }
671 }
673 Metablock* BlockFreelist::initialize_free_chunk(MetaWord* p, size_t word_size) {
674 Metablock* block = (Metablock*) p;
675 block->set_word_size(word_size);
676 block->set_prev(NULL);
677 block->set_next(NULL);
679 return block;
680 }
682 void BlockFreelist::return_block(MetaWord* p, size_t word_size) {
683 Metablock* free_chunk = initialize_free_chunk(p, word_size);
684 if (dictionary() == NULL) {
685 _dictionary = new BlockTreeDictionary();
686 }
687 dictionary()->return_chunk(free_chunk);
688 }
690 MetaWord* BlockFreelist::get_block(size_t word_size) {
691 if (dictionary() == NULL) {
692 return NULL;
693 }
695 if (word_size < TreeChunk<Metablock, FreeList>::min_size()) {
696 // Dark matter. Too small for dictionary.
697 return NULL;
698 }
700 Metablock* free_block =
701 dictionary()->get_chunk(word_size, FreeBlockDictionary<Metablock>::exactly);
702 if (free_block == NULL) {
703 return NULL;
704 }
706 return (MetaWord*) free_block;
707 }
709 void BlockFreelist::print_on(outputStream* st) const {
710 if (dictionary() == NULL) {
711 return;
712 }
713 dictionary()->print_free_lists(st);
714 }
716 // VirtualSpaceNode methods
718 VirtualSpaceNode::~VirtualSpaceNode() {
719 _rs.release();
720 }
722 size_t VirtualSpaceNode::used_words_in_vs() const {
723 return pointer_delta(top(), bottom(), sizeof(MetaWord));
724 }
726 // Space committed in the VirtualSpace
727 size_t VirtualSpaceNode::capacity_words_in_vs() const {
728 return pointer_delta(end(), bottom(), sizeof(MetaWord));
729 }
732 // Allocates the chunk from the virtual space only.
733 // This interface is also used internally for debugging. Not all
734 // chunks removed here are necessarily used for allocation.
735 Metachunk* VirtualSpaceNode::take_from_committed(size_t chunk_word_size) {
736 // Bottom of the new chunk
737 MetaWord* chunk_limit = top();
738 assert(chunk_limit != NULL, "Not safe to call this method");
740 if (!is_available(chunk_word_size)) {
741 if (TraceMetadataChunkAllocation) {
742 tty->print("VirtualSpaceNode::take_from_committed() not available %d words ", chunk_word_size);
743 // Dump some information about the virtual space that is nearly full
744 print_on(tty);
745 }
746 return NULL;
747 }
749 // Take the space (bump top on the current virtual space).
750 inc_top(chunk_word_size);
752 // Point the chunk at the space
753 Metachunk* result = Metachunk::initialize(chunk_limit, chunk_word_size);
754 return result;
755 }
758 // Expand the virtual space (commit more of the reserved space)
759 bool VirtualSpaceNode::expand_by(size_t words, bool pre_touch) {
760 size_t bytes = words * BytesPerWord;
761 bool result = virtual_space()->expand_by(bytes, pre_touch);
762 if (TraceMetavirtualspaceAllocation && !result) {
763 gclog_or_tty->print_cr("VirtualSpaceNode::expand_by() failed "
764 "for byte size " SIZE_FORMAT, bytes);
765 virtual_space()->print();
766 }
767 return result;
768 }
770 // Shrink the virtual space (commit more of the reserved space)
771 bool VirtualSpaceNode::shrink_by(size_t words) {
772 size_t bytes = words * BytesPerWord;
773 virtual_space()->shrink_by(bytes);
774 return true;
775 }
777 // Add another chunk to the chunk list.
779 Metachunk* VirtualSpaceNode::get_chunk_vs(size_t chunk_word_size) {
780 assert_lock_strong(SpaceManager::expand_lock());
781 Metachunk* result = NULL;
783 return take_from_committed(chunk_word_size);
784 }
786 Metachunk* VirtualSpaceNode::get_chunk_vs_with_expand(size_t chunk_word_size) {
787 assert_lock_strong(SpaceManager::expand_lock());
789 Metachunk* new_chunk = get_chunk_vs(chunk_word_size);
791 if (new_chunk == NULL) {
792 // Only a small part of the virtualspace is committed when first
793 // allocated so committing more here can be expected.
794 size_t page_size_words = os::vm_page_size() / BytesPerWord;
795 size_t aligned_expand_vs_by_words = align_size_up(chunk_word_size,
796 page_size_words);
797 expand_by(aligned_expand_vs_by_words, false);
798 new_chunk = get_chunk_vs(chunk_word_size);
799 }
800 return new_chunk;
801 }
803 bool VirtualSpaceNode::initialize() {
805 if (!_rs.is_reserved()) {
806 return false;
807 }
809 // An allocation out of this Virtualspace that is larger
810 // than an initial commit size can waste that initial committed
811 // space.
812 size_t committed_byte_size = 0;
813 bool result = virtual_space()->initialize(_rs, committed_byte_size);
814 if (result) {
815 set_top((MetaWord*)virtual_space()->low());
816 set_reserved(MemRegion((HeapWord*)_rs.base(),
817 (HeapWord*)(_rs.base() + _rs.size())));
819 assert(reserved()->start() == (HeapWord*) _rs.base(),
820 err_msg("Reserved start was not set properly " PTR_FORMAT
821 " != " PTR_FORMAT, reserved()->start(), _rs.base()));
822 assert(reserved()->word_size() == _rs.size() / BytesPerWord,
823 err_msg("Reserved size was not set properly " SIZE_FORMAT
824 " != " SIZE_FORMAT, reserved()->word_size(),
825 _rs.size() / BytesPerWord));
826 }
828 return result;
829 }
831 void VirtualSpaceNode::print_on(outputStream* st) const {
832 size_t used = used_words_in_vs();
833 size_t capacity = capacity_words_in_vs();
834 VirtualSpace* vs = virtual_space();
835 st->print_cr(" space @ " PTR_FORMAT " " SIZE_FORMAT "K, %3d%% used "
836 "[" PTR_FORMAT ", " PTR_FORMAT ", "
837 PTR_FORMAT ", " PTR_FORMAT ")",
838 vs, capacity / K,
839 capacity == 0 ? 0 : used * 100 / capacity,
840 bottom(), top(), end(),
841 vs->high_boundary());
842 }
844 #ifdef ASSERT
845 void VirtualSpaceNode::mangle() {
846 size_t word_size = capacity_words_in_vs();
847 Copy::fill_to_words((HeapWord*) low(), word_size, 0xf1f1f1f1);
848 }
849 #endif // ASSERT
851 // VirtualSpaceList methods
852 // Space allocated from the VirtualSpace
854 VirtualSpaceList::~VirtualSpaceList() {
855 VirtualSpaceListIterator iter(virtual_space_list());
856 while (iter.repeat()) {
857 VirtualSpaceNode* vsl = iter.get_next();
858 delete vsl;
859 }
860 }
862 size_t VirtualSpaceList::used_words_sum() {
863 size_t allocated_by_vs = 0;
864 VirtualSpaceListIterator iter(virtual_space_list());
865 while (iter.repeat()) {
866 VirtualSpaceNode* vsl = iter.get_next();
867 // Sum used region [bottom, top) in each virtualspace
868 allocated_by_vs += vsl->used_words_in_vs();
869 }
870 assert(allocated_by_vs >= chunk_manager()->free_chunks_total(),
871 err_msg("Total in free chunks " SIZE_FORMAT
872 " greater than total from virtual_spaces " SIZE_FORMAT,
873 allocated_by_vs, chunk_manager()->free_chunks_total()));
874 size_t used =
875 allocated_by_vs - chunk_manager()->free_chunks_total();
876 return used;
877 }
879 // Space available in all MetadataVirtualspaces allocated
880 // for metadata. This is the upper limit on the capacity
881 // of chunks allocated out of all the MetadataVirtualspaces.
882 size_t VirtualSpaceList::capacity_words_sum() {
883 size_t capacity = 0;
884 VirtualSpaceListIterator iter(virtual_space_list());
885 while (iter.repeat()) {
886 VirtualSpaceNode* vsl = iter.get_next();
887 capacity += vsl->capacity_words_in_vs();
888 }
889 return capacity;
890 }
892 VirtualSpaceList::VirtualSpaceList(size_t word_size ) :
893 _is_class(false),
894 _virtual_space_list(NULL),
895 _current_virtual_space(NULL),
896 _virtual_space_total(0),
897 _virtual_space_count(0) {
898 MutexLockerEx cl(SpaceManager::expand_lock(),
899 Mutex::_no_safepoint_check_flag);
900 bool initialization_succeeded = grow_vs(word_size);
902 assert(initialization_succeeded,
903 " VirtualSpaceList initialization should not fail");
904 }
906 VirtualSpaceList::VirtualSpaceList(ReservedSpace rs) :
907 _is_class(true),
908 _virtual_space_list(NULL),
909 _current_virtual_space(NULL),
910 _virtual_space_total(0),
911 _virtual_space_count(0) {
912 MutexLockerEx cl(SpaceManager::expand_lock(),
913 Mutex::_no_safepoint_check_flag);
914 VirtualSpaceNode* class_entry = new VirtualSpaceNode(rs);
915 bool succeeded = class_entry->initialize();
916 assert(succeeded, " VirtualSpaceList initialization should not fail");
917 link_vs(class_entry, rs.size()/BytesPerWord);
918 }
920 // Allocate another meta virtual space and add it to the list.
921 bool VirtualSpaceList::grow_vs(size_t vs_word_size) {
922 assert_lock_strong(SpaceManager::expand_lock());
923 if (vs_word_size == 0) {
924 return false;
925 }
926 // Reserve the space
927 size_t vs_byte_size = vs_word_size * BytesPerWord;
928 assert(vs_byte_size % os::vm_page_size() == 0, "Not aligned");
930 // Allocate the meta virtual space and initialize it.
931 VirtualSpaceNode* new_entry = new VirtualSpaceNode(vs_byte_size);
932 if (!new_entry->initialize()) {
933 delete new_entry;
934 return false;
935 } else {
936 // ensure lock-free iteration sees fully initialized node
937 OrderAccess::storestore();
938 link_vs(new_entry, vs_word_size);
939 return true;
940 }
941 }
943 void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry, size_t vs_word_size) {
944 if (virtual_space_list() == NULL) {
945 set_virtual_space_list(new_entry);
946 } else {
947 current_virtual_space()->set_next(new_entry);
948 }
949 set_current_virtual_space(new_entry);
950 inc_virtual_space_total(vs_word_size);
951 inc_virtual_space_count();
952 #ifdef ASSERT
953 new_entry->mangle();
954 #endif
955 if (TraceMetavirtualspaceAllocation && Verbose) {
956 VirtualSpaceNode* vsl = current_virtual_space();
957 vsl->print_on(tty);
958 }
959 }
961 Metachunk* VirtualSpaceList::get_new_chunk(size_t word_size,
962 size_t grow_chunks_by_words,
963 size_t medium_chunk_bunch) {
965 // Get a chunk from the chunk freelist
966 Metachunk* next = chunk_manager()->chunk_freelist_allocate(grow_chunks_by_words);
968 // Allocate a chunk out of the current virtual space.
969 if (next == NULL) {
970 next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
971 }
973 if (next == NULL) {
974 // Not enough room in current virtual space. Try to commit
975 // more space.
976 size_t expand_vs_by_words = MAX2(medium_chunk_bunch,
977 grow_chunks_by_words);
978 size_t page_size_words = os::vm_page_size() / BytesPerWord;
979 size_t aligned_expand_vs_by_words = align_size_up(expand_vs_by_words,
980 page_size_words);
981 bool vs_expanded =
982 current_virtual_space()->expand_by(aligned_expand_vs_by_words, false);
983 if (!vs_expanded) {
984 // Should the capacity of the metaspaces be expanded for
985 // this allocation? If it's the virtual space for classes and is
986 // being used for CompressedHeaders, don't allocate a new virtualspace.
987 if (can_grow() && MetaspaceGC::should_expand(this, word_size)) {
988 // Get another virtual space.
989 size_t grow_vs_words =
990 MAX2((size_t)VirtualSpaceSize, aligned_expand_vs_by_words);
991 if (grow_vs(grow_vs_words)) {
992 // Got it. It's on the list now. Get a chunk from it.
993 next = current_virtual_space()->get_chunk_vs_with_expand(grow_chunks_by_words);
994 }
995 } else {
996 // Allocation will fail and induce a GC
997 if (TraceMetadataChunkAllocation && Verbose) {
998 gclog_or_tty->print_cr("VirtualSpaceList::get_new_chunk():"
999 " Fail instead of expand the metaspace");
1000 }
1001 }
1002 } else {
1003 // The virtual space expanded, get a new chunk
1004 next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
1005 assert(next != NULL, "Just expanded, should succeed");
1006 }
1007 }
1009 assert(next == NULL || (next->next() == NULL && next->prev() == NULL),
1010 "New chunk is still on some list");
1011 return next;
1012 }
1014 Metachunk* VirtualSpaceList::get_initialization_chunk(size_t chunk_word_size,
1015 size_t chunk_bunch) {
1016 // Get a chunk from the chunk freelist
1017 Metachunk* new_chunk = get_new_chunk(chunk_word_size,
1018 chunk_word_size,
1019 chunk_bunch);
1020 return new_chunk;
1021 }
1023 void VirtualSpaceList::print_on(outputStream* st) const {
1024 if (TraceMetadataChunkAllocation && Verbose) {
1025 VirtualSpaceListIterator iter(virtual_space_list());
1026 while (iter.repeat()) {
1027 VirtualSpaceNode* node = iter.get_next();
1028 node->print_on(st);
1029 }
1030 }
1031 }
1033 bool VirtualSpaceList::contains(const void *ptr) {
1034 VirtualSpaceNode* list = virtual_space_list();
1035 VirtualSpaceListIterator iter(list);
1036 while (iter.repeat()) {
1037 VirtualSpaceNode* node = iter.get_next();
1038 if (node->reserved()->contains(ptr)) {
1039 return true;
1040 }
1041 }
1042 return false;
1043 }
1046 // MetaspaceGC methods
1048 // VM_CollectForMetadataAllocation is the vm operation used to GC.
1049 // Within the VM operation after the GC the attempt to allocate the metadata
1050 // should succeed. If the GC did not free enough space for the metaspace
1051 // allocation, the HWM is increased so that another virtualspace will be
1052 // allocated for the metadata. With perm gen the increase in the perm
1053 // gen had bounds, MinMetaspaceExpansion and MaxMetaspaceExpansion. The
1054 // metaspace policy uses those as the small and large steps for the HWM.
1055 //
1056 // After the GC the compute_new_size() for MetaspaceGC is called to
1057 // resize the capacity of the metaspaces. The current implementation
1058 // is based on the flags MinMetaspaceFreeRatio and MaxHeapFreeRatio used
1059 // to resize the Java heap by some GC's. New flags can be implemented
1060 // if really needed. MinHeapFreeRatio is used to calculate how much
1061 // free space is desirable in the metaspace capacity to decide how much
1062 // to increase the HWM. MaxMetaspaceFreeRatio is used to decide how much
1063 // free space is desirable in the metaspace capacity before decreasing
1064 // the HWM.
1066 // Calculate the amount to increase the high water mark (HWM).
1067 // Increase by a minimum amount (MinMetaspaceExpansion) so that
1068 // another expansion is not requested too soon. If that is not
1069 // enough to satisfy the allocation (i.e. big enough for a word_size
1070 // allocation), increase by MaxMetaspaceExpansion. If that is still
1071 // not enough, expand by the size of the allocation (word_size) plus
1072 // some.
1073 size_t MetaspaceGC::delta_capacity_until_GC(size_t word_size) {
1074 size_t before_inc = MetaspaceGC::capacity_until_GC();
1075 size_t min_delta_words = MinMetaspaceExpansion / BytesPerWord;
1076 size_t max_delta_words = MaxMetaspaceExpansion / BytesPerWord;
1077 size_t page_size_words = os::vm_page_size() / BytesPerWord;
1078 size_t size_delta_words = align_size_up(word_size, page_size_words);
1079 size_t delta_words = MAX2(size_delta_words, min_delta_words);
1080 if (delta_words > min_delta_words) {
1081 // Don't want to hit the high water mark on the next
1082 // allocation so make the delta greater than just enough
1083 // for this allocation.
1084 delta_words = MAX2(delta_words, max_delta_words);
1085 if (delta_words > max_delta_words) {
1086 // This allocation is large but the next ones are probably not
1087 // so increase by the minimum.
1088 delta_words = delta_words + min_delta_words;
1089 }
1090 }
1091 return delta_words;
1092 }
1094 bool MetaspaceGC::should_expand(VirtualSpaceList* vsl, size_t word_size) {
1095 // If the user wants a limit, impose one.
1096 if (!FLAG_IS_DEFAULT(MaxMetaspaceSize) &&
1097 MetaspaceAux::reserved_in_bytes() >= MaxMetaspaceSize) {
1098 return false;
1099 }
1101 // Class virtual space should always be expanded. Call GC for the other
1102 // metadata virtual space.
1103 if (vsl == Metaspace::class_space_list()) return true;
1105 // If this is part of an allocation after a GC, expand
1106 // unconditionally.
1107 if(MetaspaceGC::expand_after_GC()) {
1108 return true;
1109 }
1111 size_t metaspace_size_words = MetaspaceSize / BytesPerWord;
1113 // If the capacity is below the minimum capacity, allow the
1114 // expansion. Also set the high-water-mark (capacity_until_GC)
1115 // to that minimum capacity so that a GC will not be induced
1116 // until that minimum capacity is exceeded.
1117 if (vsl->capacity_words_sum() < metaspace_size_words ||
1118 capacity_until_GC() == 0) {
1119 set_capacity_until_GC(metaspace_size_words);
1120 return true;
1121 } else {
1122 if (vsl->capacity_words_sum() < capacity_until_GC()) {
1123 return true;
1124 } else {
1125 if (TraceMetadataChunkAllocation && Verbose) {
1126 gclog_or_tty->print_cr(" allocation request size " SIZE_FORMAT
1127 " capacity_until_GC " SIZE_FORMAT
1128 " capacity_words_sum " SIZE_FORMAT
1129 " used_words_sum " SIZE_FORMAT
1130 " free chunks " SIZE_FORMAT
1131 " free chunks count %d",
1132 word_size,
1133 capacity_until_GC(),
1134 vsl->capacity_words_sum(),
1135 vsl->used_words_sum(),
1136 vsl->chunk_manager()->free_chunks_total(),
1137 vsl->chunk_manager()->free_chunks_count());
1138 }
1139 return false;
1140 }
1141 }
1142 }
1144 // Variables are in bytes
1146 void MetaspaceGC::compute_new_size() {
1147 assert(_shrink_factor <= 100, "invalid shrink factor");
1148 uint current_shrink_factor = _shrink_factor;
1149 _shrink_factor = 0;
1151 VirtualSpaceList *vsl = Metaspace::space_list();
1153 size_t capacity_after_gc = vsl->capacity_bytes_sum();
1154 // Check to see if these two can be calculated without walking the CLDG
1155 size_t used_after_gc = vsl->used_bytes_sum();
1156 size_t capacity_until_GC = vsl->capacity_bytes_sum();
1157 size_t free_after_gc = capacity_until_GC - used_after_gc;
1159 const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0;
1160 const double maximum_used_percentage = 1.0 - minimum_free_percentage;
1162 const double min_tmp = used_after_gc / maximum_used_percentage;
1163 size_t minimum_desired_capacity =
1164 (size_t)MIN2(min_tmp, double(max_uintx));
1165 // Don't shrink less than the initial generation size
1166 minimum_desired_capacity = MAX2(minimum_desired_capacity,
1167 MetaspaceSize);
1169 if (PrintGCDetails && Verbose) {
1170 const double free_percentage = ((double)free_after_gc) / capacity_until_GC;
1171 gclog_or_tty->print_cr("\nMetaspaceGC::compute_new_size: ");
1172 gclog_or_tty->print_cr(" "
1173 " minimum_free_percentage: %6.2f"
1174 " maximum_used_percentage: %6.2f",
1175 minimum_free_percentage,
1176 maximum_used_percentage);
1177 double d_free_after_gc = free_after_gc / (double) K;
1178 gclog_or_tty->print_cr(" "
1179 " free_after_gc : %6.1fK"
1180 " used_after_gc : %6.1fK"
1181 " capacity_after_gc : %6.1fK"
1182 " metaspace HWM : %6.1fK",
1183 free_after_gc / (double) K,
1184 used_after_gc / (double) K,
1185 capacity_after_gc / (double) K,
1186 capacity_until_GC / (double) K);
1187 gclog_or_tty->print_cr(" "
1188 " free_percentage: %6.2f",
1189 free_percentage);
1190 }
1193 if (capacity_until_GC < minimum_desired_capacity) {
1194 // If we have less capacity below the metaspace HWM, then
1195 // increment the HWM.
1196 size_t expand_bytes = minimum_desired_capacity - capacity_until_GC;
1197 // Don't expand unless it's significant
1198 if (expand_bytes >= MinMetaspaceExpansion) {
1199 size_t expand_words = expand_bytes / BytesPerWord;
1200 MetaspaceGC::inc_capacity_until_GC(expand_words);
1201 }
1202 if (PrintGCDetails && Verbose) {
1203 size_t new_capacity_until_GC = MetaspaceGC::capacity_until_GC_in_bytes();
1204 gclog_or_tty->print_cr(" expanding:"
1205 " minimum_desired_capacity: %6.1fK"
1206 " expand_words: %6.1fK"
1207 " MinMetaspaceExpansion: %6.1fK"
1208 " new metaspace HWM: %6.1fK",
1209 minimum_desired_capacity / (double) K,
1210 expand_bytes / (double) K,
1211 MinMetaspaceExpansion / (double) K,
1212 new_capacity_until_GC / (double) K);
1213 }
1214 return;
1215 }
1217 // No expansion, now see if we want to shrink
1218 size_t shrink_words = 0;
1219 // We would never want to shrink more than this
1220 size_t max_shrink_words = capacity_until_GC - minimum_desired_capacity;
1221 assert(max_shrink_words >= 0, err_msg("max_shrink_words " SIZE_FORMAT,
1222 max_shrink_words));
1224 // Should shrinking be considered?
1225 if (MaxMetaspaceFreeRatio < 100) {
1226 const double maximum_free_percentage = MaxMetaspaceFreeRatio / 100.0;
1227 const double minimum_used_percentage = 1.0 - maximum_free_percentage;
1228 const double max_tmp = used_after_gc / minimum_used_percentage;
1229 size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx));
1230 maximum_desired_capacity = MAX2(maximum_desired_capacity,
1231 MetaspaceSize);
1232 if (PrintGC && Verbose) {
1233 gclog_or_tty->print_cr(" "
1234 " maximum_free_percentage: %6.2f"
1235 " minimum_used_percentage: %6.2f",
1236 maximum_free_percentage,
1237 minimum_used_percentage);
1238 gclog_or_tty->print_cr(" "
1239 " capacity_until_GC: %6.1fK"
1240 " minimum_desired_capacity: %6.1fK"
1241 " maximum_desired_capacity: %6.1fK",
1242 capacity_until_GC / (double) K,
1243 minimum_desired_capacity / (double) K,
1244 maximum_desired_capacity / (double) K);
1245 }
1247 assert(minimum_desired_capacity <= maximum_desired_capacity,
1248 "sanity check");
1250 if (capacity_until_GC > maximum_desired_capacity) {
1251 // Capacity too large, compute shrinking size
1252 shrink_words = capacity_until_GC - maximum_desired_capacity;
1253 // We don't want shrink all the way back to initSize if people call
1254 // System.gc(), because some programs do that between "phases" and then
1255 // we'd just have to grow the heap up again for the next phase. So we
1256 // damp the shrinking: 0% on the first call, 10% on the second call, 40%
1257 // on the third call, and 100% by the fourth call. But if we recompute
1258 // size without shrinking, it goes back to 0%.
1259 shrink_words = shrink_words / 100 * current_shrink_factor;
1260 assert(shrink_words <= max_shrink_words,
1261 err_msg("invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT,
1262 shrink_words, max_shrink_words));
1263 if (current_shrink_factor == 0) {
1264 _shrink_factor = 10;
1265 } else {
1266 _shrink_factor = MIN2(current_shrink_factor * 4, (uint) 100);
1267 }
1268 if (PrintGCDetails && Verbose) {
1269 gclog_or_tty->print_cr(" "
1270 " shrinking:"
1271 " initSize: %.1fK"
1272 " maximum_desired_capacity: %.1fK",
1273 MetaspaceSize / (double) K,
1274 maximum_desired_capacity / (double) K);
1275 gclog_or_tty->print_cr(" "
1276 " shrink_words: %.1fK"
1277 " current_shrink_factor: %d"
1278 " new shrink factor: %d"
1279 " MinMetaspaceExpansion: %.1fK",
1280 shrink_words / (double) K,
1281 current_shrink_factor,
1282 _shrink_factor,
1283 MinMetaspaceExpansion / (double) K);
1284 }
1285 }
1286 }
1289 // Don't shrink unless it's significant
1290 if (shrink_words >= MinMetaspaceExpansion) {
1291 VirtualSpaceNode* csp = vsl->current_virtual_space();
1292 size_t available_to_shrink = csp->capacity_words_in_vs() -
1293 csp->used_words_in_vs();
1294 shrink_words = MIN2(shrink_words, available_to_shrink);
1295 csp->shrink_by(shrink_words);
1296 MetaspaceGC::dec_capacity_until_GC(shrink_words);
1297 if (PrintGCDetails && Verbose) {
1298 size_t new_capacity_until_GC = MetaspaceGC::capacity_until_GC_in_bytes();
1299 gclog_or_tty->print_cr(" metaspace HWM: %.1fK", new_capacity_until_GC / (double) K);
1300 }
1301 }
1302 assert(used_after_gc <= vsl->capacity_bytes_sum(),
1303 "sanity check");
1305 }
1307 // Metadebug methods
1309 void Metadebug::deallocate_chunk_a_lot(SpaceManager* sm,
1310 size_t chunk_word_size){
1311 #ifdef ASSERT
1312 VirtualSpaceList* vsl = sm->vs_list();
1313 if (MetaDataDeallocateALot &&
1314 Metadebug::deallocate_chunk_a_lot_count() % MetaDataDeallocateALotInterval == 0 ) {
1315 Metadebug::reset_deallocate_chunk_a_lot_count();
1316 for (uint i = 0; i < metadata_deallocate_a_lock_chunk; i++) {
1317 Metachunk* dummy_chunk = vsl->current_virtual_space()->take_from_committed(chunk_word_size);
1318 if (dummy_chunk == NULL) {
1319 break;
1320 }
1321 vsl->chunk_manager()->chunk_freelist_deallocate(dummy_chunk);
1323 if (TraceMetadataChunkAllocation && Verbose) {
1324 gclog_or_tty->print("Metadebug::deallocate_chunk_a_lot: %d) ",
1325 sm->sum_count_in_chunks_in_use());
1326 dummy_chunk->print_on(gclog_or_tty);
1327 gclog_or_tty->print_cr(" Free chunks total %d count %d",
1328 vsl->chunk_manager()->free_chunks_total(),
1329 vsl->chunk_manager()->free_chunks_count());
1330 }
1331 }
1332 } else {
1333 Metadebug::inc_deallocate_chunk_a_lot_count();
1334 }
1335 #endif
1336 }
1338 void Metadebug::deallocate_block_a_lot(SpaceManager* sm,
1339 size_t raw_word_size){
1340 #ifdef ASSERT
1341 if (MetaDataDeallocateALot &&
1342 Metadebug::deallocate_block_a_lot_count() % MetaDataDeallocateALotInterval == 0 ) {
1343 Metadebug::set_deallocate_block_a_lot_count(0);
1344 for (uint i = 0; i < metadata_deallocate_a_lot_block; i++) {
1345 MetaWord* dummy_block = sm->allocate_work(raw_word_size);
1346 if (dummy_block == 0) {
1347 break;
1348 }
1349 sm->deallocate(dummy_block, raw_word_size);
1350 }
1351 } else {
1352 Metadebug::inc_deallocate_block_a_lot_count();
1353 }
1354 #endif
1355 }
1357 void Metadebug::init_allocation_fail_alot_count() {
1358 if (MetadataAllocationFailALot) {
1359 _allocation_fail_alot_count =
1360 1+(long)((double)MetadataAllocationFailALotInterval*os::random()/(max_jint+1.0));
1361 }
1362 }
1364 #ifdef ASSERT
1365 bool Metadebug::test_metadata_failure() {
1366 if (MetadataAllocationFailALot &&
1367 Threads::is_vm_complete()) {
1368 if (_allocation_fail_alot_count > 0) {
1369 _allocation_fail_alot_count--;
1370 } else {
1371 if (TraceMetadataChunkAllocation && Verbose) {
1372 gclog_or_tty->print_cr("Metadata allocation failing for "
1373 "MetadataAllocationFailALot");
1374 }
1375 init_allocation_fail_alot_count();
1376 return true;
1377 }
1378 }
1379 return false;
1380 }
1381 #endif
1383 // ChunkList methods
1385 size_t ChunkList::sum_list_size() {
1386 size_t result = 0;
1387 Metachunk* cur = head();
1388 while (cur != NULL) {
1389 result += cur->word_size();
1390 cur = cur->next();
1391 }
1392 return result;
1393 }
1395 size_t ChunkList::sum_list_count() {
1396 size_t result = 0;
1397 Metachunk* cur = head();
1398 while (cur != NULL) {
1399 result++;
1400 cur = cur->next();
1401 }
1402 return result;
1403 }
1405 size_t ChunkList::sum_list_capacity() {
1406 size_t result = 0;
1407 Metachunk* cur = head();
1408 while (cur != NULL) {
1409 result += cur->capacity_word_size();
1410 cur = cur->next();
1411 }
1412 return result;
1413 }
1415 void ChunkList::add_at_head(Metachunk* head, Metachunk* tail) {
1416 assert_lock_strong(SpaceManager::expand_lock());
1417 assert(head == tail || tail->next() == NULL,
1418 "Not the tail or the head has already been added to a list");
1420 if (TraceMetadataChunkAllocation && Verbose) {
1421 gclog_or_tty->print("ChunkList::add_at_head(head, tail): ");
1422 Metachunk* cur = head;
1423 while (cur != NULL) {
1424 gclog_or_tty->print(PTR_FORMAT " (" SIZE_FORMAT ") ", cur, cur->word_size());
1425 cur = cur->next();
1426 }
1427 gclog_or_tty->print_cr("");
1428 }
1430 if (tail != NULL) {
1431 tail->set_next(_head);
1432 }
1433 set_head(head);
1434 }
1436 void ChunkList::add_at_head(Metachunk* list) {
1437 if (list == NULL) {
1438 // Nothing to add
1439 return;
1440 }
1441 assert_lock_strong(SpaceManager::expand_lock());
1442 Metachunk* head = list;
1443 Metachunk* tail = list;
1444 Metachunk* cur = head->next();
1445 // Search for the tail since it is not passed.
1446 while (cur != NULL) {
1447 tail = cur;
1448 cur = cur->next();
1449 }
1450 add_at_head(head, tail);
1451 }
1453 // ChunkManager methods
1455 // Verification of _free_chunks_total and _free_chunks_count does not
1456 // work with the CMS collector because its use of additional locks
1457 // complicate the mutex deadlock detection but it can still be useful
1458 // for detecting errors in the chunk accounting with other collectors.
1460 size_t ChunkManager::free_chunks_total() {
1461 #ifdef ASSERT
1462 if (!UseConcMarkSweepGC && !SpaceManager::expand_lock()->is_locked()) {
1463 MutexLockerEx cl(SpaceManager::expand_lock(),
1464 Mutex::_no_safepoint_check_flag);
1465 slow_locked_verify_free_chunks_total();
1466 }
1467 #endif
1468 return _free_chunks_total;
1469 }
1471 size_t ChunkManager::free_chunks_total_in_bytes() {
1472 return free_chunks_total() * BytesPerWord;
1473 }
1475 size_t ChunkManager::free_chunks_count() {
1476 #ifdef ASSERT
1477 if (!UseConcMarkSweepGC && !SpaceManager::expand_lock()->is_locked()) {
1478 MutexLockerEx cl(SpaceManager::expand_lock(),
1479 Mutex::_no_safepoint_check_flag);
1480 // This lock is only needed in debug because the verification
1481 // of the _free_chunks_totals walks the list of free chunks
1482 slow_locked_verify_free_chunks_count();
1483 }
1484 #endif
1485 return _free_chunks_count;
1486 }
1488 void ChunkManager::locked_verify_free_chunks_total() {
1489 assert_lock_strong(SpaceManager::expand_lock());
1490 assert(sum_free_chunks() == _free_chunks_total,
1491 err_msg("_free_chunks_total " SIZE_FORMAT " is not the"
1492 " same as sum " SIZE_FORMAT, _free_chunks_total,
1493 sum_free_chunks()));
1494 }
1496 void ChunkManager::verify_free_chunks_total() {
1497 MutexLockerEx cl(SpaceManager::expand_lock(),
1498 Mutex::_no_safepoint_check_flag);
1499 locked_verify_free_chunks_total();
1500 }
1502 void ChunkManager::locked_verify_free_chunks_count() {
1503 assert_lock_strong(SpaceManager::expand_lock());
1504 assert(sum_free_chunks_count() == _free_chunks_count,
1505 err_msg("_free_chunks_count " SIZE_FORMAT " is not the"
1506 " same as sum " SIZE_FORMAT, _free_chunks_count,
1507 sum_free_chunks_count()));
1508 }
1510 void ChunkManager::verify_free_chunks_count() {
1511 #ifdef ASSERT
1512 MutexLockerEx cl(SpaceManager::expand_lock(),
1513 Mutex::_no_safepoint_check_flag);
1514 locked_verify_free_chunks_count();
1515 #endif
1516 }
1518 void ChunkManager::verify() {
1519 MutexLockerEx cl(SpaceManager::expand_lock(),
1520 Mutex::_no_safepoint_check_flag);
1521 locked_verify();
1522 }
1524 void ChunkManager::locked_verify() {
1525 locked_verify_free_chunks_count();
1526 locked_verify_free_chunks_total();
1527 }
1529 void ChunkManager::locked_print_free_chunks(outputStream* st) {
1530 assert_lock_strong(SpaceManager::expand_lock());
1531 st->print_cr("Free chunk total " SIZE_FORMAT " count " SIZE_FORMAT,
1532 _free_chunks_total, _free_chunks_count);
1533 }
1535 void ChunkManager::locked_print_sum_free_chunks(outputStream* st) {
1536 assert_lock_strong(SpaceManager::expand_lock());
1537 st->print_cr("Sum free chunk total " SIZE_FORMAT " count " SIZE_FORMAT,
1538 sum_free_chunks(), sum_free_chunks_count());
1539 }
1540 ChunkList* ChunkManager::free_chunks(ChunkIndex index) {
1541 return &_free_chunks[index];
1542 }
1544 // These methods that sum the free chunk lists are used in printing
1545 // methods that are used in product builds.
1546 size_t ChunkManager::sum_free_chunks() {
1547 assert_lock_strong(SpaceManager::expand_lock());
1548 size_t result = 0;
1549 for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
1550 ChunkList* list = free_chunks(i);
1552 if (list == NULL) {
1553 continue;
1554 }
1556 result = result + list->sum_list_capacity();
1557 }
1558 result = result + humongous_dictionary()->total_size();
1559 return result;
1560 }
1562 size_t ChunkManager::sum_free_chunks_count() {
1563 assert_lock_strong(SpaceManager::expand_lock());
1564 size_t count = 0;
1565 for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
1566 ChunkList* list = free_chunks(i);
1567 if (list == NULL) {
1568 continue;
1569 }
1570 count = count + list->sum_list_count();
1571 }
1572 count = count + humongous_dictionary()->total_free_blocks();
1573 return count;
1574 }
1576 ChunkList* ChunkManager::find_free_chunks_list(size_t word_size) {
1577 ChunkIndex index = list_index(word_size);
1578 assert(index < HumongousIndex, "No humongous list");
1579 return free_chunks(index);
1580 }
1582 void ChunkManager::free_chunks_put(Metachunk* chunk) {
1583 assert_lock_strong(SpaceManager::expand_lock());
1584 ChunkList* free_list = find_free_chunks_list(chunk->word_size());
1585 chunk->set_next(free_list->head());
1586 free_list->set_head(chunk);
1587 // chunk is being returned to the chunk free list
1588 inc_free_chunks_total(chunk->capacity_word_size());
1589 slow_locked_verify();
1590 }
1592 void ChunkManager::chunk_freelist_deallocate(Metachunk* chunk) {
1593 // The deallocation of a chunk originates in the freelist
1594 // manangement code for a Metaspace and does not hold the
1595 // lock.
1596 assert(chunk != NULL, "Deallocating NULL");
1597 assert_lock_strong(SpaceManager::expand_lock());
1598 slow_locked_verify();
1599 if (TraceMetadataChunkAllocation) {
1600 tty->print_cr("ChunkManager::chunk_freelist_deallocate: chunk "
1601 PTR_FORMAT " size " SIZE_FORMAT,
1602 chunk, chunk->word_size());
1603 }
1604 free_chunks_put(chunk);
1605 }
1607 Metachunk* ChunkManager::free_chunks_get(size_t word_size) {
1608 assert_lock_strong(SpaceManager::expand_lock());
1610 slow_locked_verify();
1612 Metachunk* chunk = NULL;
1613 if (list_index(word_size) != HumongousIndex) {
1614 ChunkList* free_list = find_free_chunks_list(word_size);
1615 assert(free_list != NULL, "Sanity check");
1617 chunk = free_list->head();
1618 debug_only(Metachunk* debug_head = chunk;)
1620 if (chunk == NULL) {
1621 return NULL;
1622 }
1624 // Remove the chunk as the head of the list.
1625 free_list->set_head(chunk->next());
1627 // Chunk is being removed from the chunks free list.
1628 dec_free_chunks_total(chunk->capacity_word_size());
1630 if (TraceMetadataChunkAllocation && Verbose) {
1631 tty->print_cr("ChunkManager::free_chunks_get: free_list "
1632 PTR_FORMAT " head " PTR_FORMAT " size " SIZE_FORMAT,
1633 free_list, chunk, chunk->word_size());
1634 }
1635 } else {
1636 chunk = humongous_dictionary()->get_chunk(
1637 word_size,
1638 FreeBlockDictionary<Metachunk>::atLeast);
1640 if (chunk != NULL) {
1641 if (TraceMetadataHumongousAllocation) {
1642 size_t waste = chunk->word_size() - word_size;
1643 tty->print_cr("Free list allocate humongous chunk size " SIZE_FORMAT
1644 " for requested size " SIZE_FORMAT
1645 " waste " SIZE_FORMAT,
1646 chunk->word_size(), word_size, waste);
1647 }
1648 // Chunk is being removed from the chunks free list.
1649 dec_free_chunks_total(chunk->capacity_word_size());
1650 #ifdef ASSERT
1651 chunk->set_is_free(false);
1652 #endif
1653 } else {
1654 return NULL;
1655 }
1656 }
1658 // Remove it from the links to this freelist
1659 chunk->set_next(NULL);
1660 chunk->set_prev(NULL);
1661 slow_locked_verify();
1662 return chunk;
1663 }
1665 Metachunk* ChunkManager::chunk_freelist_allocate(size_t word_size) {
1666 assert_lock_strong(SpaceManager::expand_lock());
1667 slow_locked_verify();
1669 // Take from the beginning of the list
1670 Metachunk* chunk = free_chunks_get(word_size);
1671 if (chunk == NULL) {
1672 return NULL;
1673 }
1675 assert((word_size <= chunk->word_size()) ||
1676 list_index(chunk->word_size() == HumongousIndex),
1677 "Non-humongous variable sized chunk");
1678 if (TraceMetadataChunkAllocation) {
1679 size_t list_count;
1680 if (list_index(word_size) < HumongousIndex) {
1681 ChunkList* list = find_free_chunks_list(word_size);
1682 list_count = list->sum_list_count();
1683 } else {
1684 list_count = humongous_dictionary()->total_count();
1685 }
1686 tty->print("ChunkManager::chunk_freelist_allocate: " PTR_FORMAT " chunk "
1687 PTR_FORMAT " size " SIZE_FORMAT " count " SIZE_FORMAT " ",
1688 this, chunk, chunk->word_size(), list_count);
1689 locked_print_free_chunks(tty);
1690 }
1692 return chunk;
1693 }
1695 void ChunkManager::print_on(outputStream* out) {
1696 if (PrintFLSStatistics != 0) {
1697 humongous_dictionary()->report_statistics();
1698 }
1699 }
1701 // SpaceManager methods
1703 void SpaceManager::get_initial_chunk_sizes(Metaspace::MetaspaceType type,
1704 size_t* chunk_word_size,
1705 size_t* class_chunk_word_size) {
1706 switch (type) {
1707 case Metaspace::BootMetaspaceType:
1708 *chunk_word_size = Metaspace::first_chunk_word_size();
1709 *class_chunk_word_size = Metaspace::first_class_chunk_word_size();
1710 break;
1711 case Metaspace::ROMetaspaceType:
1712 *chunk_word_size = SharedReadOnlySize / wordSize;
1713 *class_chunk_word_size = ClassSpecializedChunk;
1714 break;
1715 case Metaspace::ReadWriteMetaspaceType:
1716 *chunk_word_size = SharedReadWriteSize / wordSize;
1717 *class_chunk_word_size = ClassSpecializedChunk;
1718 break;
1719 case Metaspace::AnonymousMetaspaceType:
1720 case Metaspace::ReflectionMetaspaceType:
1721 *chunk_word_size = SpecializedChunk;
1722 *class_chunk_word_size = ClassSpecializedChunk;
1723 break;
1724 default:
1725 *chunk_word_size = SmallChunk;
1726 *class_chunk_word_size = ClassSmallChunk;
1727 break;
1728 }
1729 assert(*chunk_word_size != 0 && *class_chunk_word_size != 0,
1730 err_msg("Initial chunks sizes bad: data " SIZE_FORMAT
1731 " class " SIZE_FORMAT,
1732 *chunk_word_size, *class_chunk_word_size));
1733 }
1735 size_t SpaceManager::sum_free_in_chunks_in_use() const {
1736 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
1737 size_t free = 0;
1738 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1739 Metachunk* chunk = chunks_in_use(i);
1740 while (chunk != NULL) {
1741 free += chunk->free_word_size();
1742 chunk = chunk->next();
1743 }
1744 }
1745 return free;
1746 }
1748 size_t SpaceManager::sum_waste_in_chunks_in_use() const {
1749 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
1750 size_t result = 0;
1751 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1752 result += sum_waste_in_chunks_in_use(i);
1753 }
1755 return result;
1756 }
1758 size_t SpaceManager::sum_waste_in_chunks_in_use(ChunkIndex index) const {
1759 size_t result = 0;
1760 Metachunk* chunk = chunks_in_use(index);
1761 // Count the free space in all the chunk but not the
1762 // current chunk from which allocations are still being done.
1763 if (chunk != NULL) {
1764 Metachunk* prev = chunk;
1765 while (chunk != NULL && chunk != current_chunk()) {
1766 result += chunk->free_word_size();
1767 prev = chunk;
1768 chunk = chunk->next();
1769 }
1770 }
1771 return result;
1772 }
1774 size_t SpaceManager::sum_capacity_in_chunks_in_use() const {
1775 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
1776 size_t sum = 0;
1777 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1778 Metachunk* chunk = chunks_in_use(i);
1779 while (chunk != NULL) {
1780 // Just changed this sum += chunk->capacity_word_size();
1781 // sum += chunk->word_size() - Metachunk::overhead();
1782 sum += chunk->capacity_word_size();
1783 chunk = chunk->next();
1784 }
1785 }
1786 return sum;
1787 }
1789 size_t SpaceManager::sum_count_in_chunks_in_use() {
1790 size_t count = 0;
1791 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1792 count = count + sum_count_in_chunks_in_use(i);
1793 }
1795 return count;
1796 }
1798 size_t SpaceManager::sum_count_in_chunks_in_use(ChunkIndex i) {
1799 size_t count = 0;
1800 Metachunk* chunk = chunks_in_use(i);
1801 while (chunk != NULL) {
1802 count++;
1803 chunk = chunk->next();
1804 }
1805 return count;
1806 }
1809 size_t SpaceManager::sum_used_in_chunks_in_use() const {
1810 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
1811 size_t used = 0;
1812 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1813 Metachunk* chunk = chunks_in_use(i);
1814 while (chunk != NULL) {
1815 used += chunk->used_word_size();
1816 chunk = chunk->next();
1817 }
1818 }
1819 return used;
1820 }
1822 void SpaceManager::locked_print_chunks_in_use_on(outputStream* st) const {
1824 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1825 Metachunk* chunk = chunks_in_use(i);
1826 st->print("SpaceManager: %s " PTR_FORMAT,
1827 chunk_size_name(i), chunk);
1828 if (chunk != NULL) {
1829 st->print_cr(" free " SIZE_FORMAT,
1830 chunk->free_word_size());
1831 } else {
1832 st->print_cr("");
1833 }
1834 }
1836 vs_list()->chunk_manager()->locked_print_free_chunks(st);
1837 vs_list()->chunk_manager()->locked_print_sum_free_chunks(st);
1838 }
1840 size_t SpaceManager::calc_chunk_size(size_t word_size) {
1842 // Decide between a small chunk and a medium chunk. Up to
1843 // _small_chunk_limit small chunks can be allocated but
1844 // once a medium chunk has been allocated, no more small
1845 // chunks will be allocated.
1846 size_t chunk_word_size;
1847 if (chunks_in_use(MediumIndex) == NULL &&
1848 (!has_small_chunk_limit() ||
1849 sum_count_in_chunks_in_use(SmallIndex) < _small_chunk_limit)) {
1850 chunk_word_size = (size_t) small_chunk_size();
1851 if (word_size + Metachunk::overhead() > small_chunk_size()) {
1852 chunk_word_size = medium_chunk_size();
1853 }
1854 } else {
1855 chunk_word_size = medium_chunk_size();
1856 }
1858 // Might still need a humongous chunk. Enforce an
1859 // eight word granularity to facilitate reuse (some
1860 // wastage but better chance of reuse).
1861 size_t if_humongous_sized_chunk =
1862 align_size_up(word_size + Metachunk::overhead(),
1863 HumongousChunkGranularity);
1864 chunk_word_size =
1865 MAX2((size_t) chunk_word_size, if_humongous_sized_chunk);
1867 assert(!SpaceManager::is_humongous(word_size) ||
1868 chunk_word_size == if_humongous_sized_chunk,
1869 err_msg("Size calculation is wrong, word_size " SIZE_FORMAT
1870 " chunk_word_size " SIZE_FORMAT,
1871 word_size, chunk_word_size));
1872 if (TraceMetadataHumongousAllocation &&
1873 SpaceManager::is_humongous(word_size)) {
1874 gclog_or_tty->print_cr("Metadata humongous allocation:");
1875 gclog_or_tty->print_cr(" word_size " PTR_FORMAT, word_size);
1876 gclog_or_tty->print_cr(" chunk_word_size " PTR_FORMAT,
1877 chunk_word_size);
1878 gclog_or_tty->print_cr(" chunk overhead " PTR_FORMAT,
1879 Metachunk::overhead());
1880 }
1881 return chunk_word_size;
1882 }
1884 MetaWord* SpaceManager::grow_and_allocate(size_t word_size) {
1885 assert(vs_list()->current_virtual_space() != NULL,
1886 "Should have been set");
1887 assert(current_chunk() == NULL ||
1888 current_chunk()->allocate(word_size) == NULL,
1889 "Don't need to expand");
1890 MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
1892 if (TraceMetadataChunkAllocation && Verbose) {
1893 size_t words_left = 0;
1894 size_t words_used = 0;
1895 if (current_chunk() != NULL) {
1896 words_left = current_chunk()->free_word_size();
1897 words_used = current_chunk()->used_word_size();
1898 }
1899 gclog_or_tty->print_cr("SpaceManager::grow_and_allocate for " SIZE_FORMAT
1900 " words " SIZE_FORMAT " words used " SIZE_FORMAT
1901 " words left",
1902 word_size, words_used, words_left);
1903 }
1905 // Get another chunk out of the virtual space
1906 size_t grow_chunks_by_words = calc_chunk_size(word_size);
1907 Metachunk* next = get_new_chunk(word_size, grow_chunks_by_words);
1909 // If a chunk was available, add it to the in-use chunk list
1910 // and do an allocation from it.
1911 if (next != NULL) {
1912 Metadebug::deallocate_chunk_a_lot(this, grow_chunks_by_words);
1913 // Add to this manager's list of chunks in use.
1914 add_chunk(next, false);
1915 return next->allocate(word_size);
1916 }
1917 return NULL;
1918 }
1920 void SpaceManager::print_on(outputStream* st) const {
1922 for (ChunkIndex i = ZeroIndex;
1923 i < NumberOfInUseLists ;
1924 i = next_chunk_index(i) ) {
1925 st->print_cr(" chunks_in_use " PTR_FORMAT " chunk size " PTR_FORMAT,
1926 chunks_in_use(i),
1927 chunks_in_use(i) == NULL ? 0 : chunks_in_use(i)->word_size());
1928 }
1929 st->print_cr(" waste: Small " SIZE_FORMAT " Medium " SIZE_FORMAT
1930 " Humongous " SIZE_FORMAT,
1931 sum_waste_in_chunks_in_use(SmallIndex),
1932 sum_waste_in_chunks_in_use(MediumIndex),
1933 sum_waste_in_chunks_in_use(HumongousIndex));
1934 // block free lists
1935 if (block_freelists() != NULL) {
1936 st->print_cr("total in block free lists " SIZE_FORMAT,
1937 block_freelists()->total_size());
1938 }
1939 }
1941 SpaceManager::SpaceManager(Mutex* lock,
1942 VirtualSpaceList* vs_list) :
1943 _vs_list(vs_list),
1944 _allocation_total(0),
1945 _lock(lock)
1946 {
1947 initialize();
1948 }
1950 void SpaceManager::initialize() {
1951 Metadebug::init_allocation_fail_alot_count();
1952 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1953 _chunks_in_use[i] = NULL;
1954 }
1955 _current_chunk = NULL;
1956 if (TraceMetadataChunkAllocation && Verbose) {
1957 gclog_or_tty->print_cr("SpaceManager(): " PTR_FORMAT, this);
1958 }
1959 }
1961 SpaceManager::~SpaceManager() {
1962 // This call this->_lock which can't be done while holding expand_lock()
1963 const size_t in_use_before = sum_capacity_in_chunks_in_use();
1965 MutexLockerEx fcl(SpaceManager::expand_lock(),
1966 Mutex::_no_safepoint_check_flag);
1968 ChunkManager* chunk_manager = vs_list()->chunk_manager();
1970 chunk_manager->slow_locked_verify();
1972 if (TraceMetadataChunkAllocation && Verbose) {
1973 gclog_or_tty->print_cr("~SpaceManager(): " PTR_FORMAT, this);
1974 locked_print_chunks_in_use_on(gclog_or_tty);
1975 }
1977 // Mangle freed memory.
1978 NOT_PRODUCT(mangle_freed_chunks();)
1980 // Have to update before the chunks_in_use lists are emptied
1981 // below.
1982 chunk_manager->inc_free_chunks_total(in_use_before,
1983 sum_count_in_chunks_in_use());
1985 // Add all the chunks in use by this space manager
1986 // to the global list of free chunks.
1988 // Follow each list of chunks-in-use and add them to the
1989 // free lists. Each list is NULL terminated.
1991 for (ChunkIndex i = ZeroIndex; i < HumongousIndex; i = next_chunk_index(i)) {
1992 if (TraceMetadataChunkAllocation && Verbose) {
1993 gclog_or_tty->print_cr("returned %d %s chunks to freelist",
1994 sum_count_in_chunks_in_use(i),
1995 chunk_size_name(i));
1996 }
1997 Metachunk* chunks = chunks_in_use(i);
1998 chunk_manager->free_chunks(i)->add_at_head(chunks);
1999 set_chunks_in_use(i, NULL);
2000 if (TraceMetadataChunkAllocation && Verbose) {
2001 gclog_or_tty->print_cr("updated freelist count %d %s",
2002 chunk_manager->free_chunks(i)->sum_list_count(),
2003 chunk_size_name(i));
2004 }
2005 assert(i != HumongousIndex, "Humongous chunks are handled explicitly later");
2006 }
2008 // The medium chunk case may be optimized by passing the head and
2009 // tail of the medium chunk list to add_at_head(). The tail is often
2010 // the current chunk but there are probably exceptions.
2012 // Humongous chunks
2013 if (TraceMetadataChunkAllocation && Verbose) {
2014 gclog_or_tty->print_cr("returned %d %s humongous chunks to dictionary",
2015 sum_count_in_chunks_in_use(HumongousIndex),
2016 chunk_size_name(HumongousIndex));
2017 gclog_or_tty->print("Humongous chunk dictionary: ");
2018 }
2019 // Humongous chunks are never the current chunk.
2020 Metachunk* humongous_chunks = chunks_in_use(HumongousIndex);
2022 while (humongous_chunks != NULL) {
2023 #ifdef ASSERT
2024 humongous_chunks->set_is_free(true);
2025 #endif
2026 if (TraceMetadataChunkAllocation && Verbose) {
2027 gclog_or_tty->print(PTR_FORMAT " (" SIZE_FORMAT ") ",
2028 humongous_chunks,
2029 humongous_chunks->word_size());
2030 }
2031 assert(humongous_chunks->word_size() == (size_t)
2032 align_size_up(humongous_chunks->word_size(),
2033 HumongousChunkGranularity),
2034 err_msg("Humongous chunk size is wrong: word size " SIZE_FORMAT
2035 " granularity %d",
2036 humongous_chunks->word_size(), HumongousChunkGranularity));
2037 Metachunk* next_humongous_chunks = humongous_chunks->next();
2038 chunk_manager->humongous_dictionary()->return_chunk(humongous_chunks);
2039 humongous_chunks = next_humongous_chunks;
2040 }
2041 if (TraceMetadataChunkAllocation && Verbose) {
2042 gclog_or_tty->print_cr("");
2043 gclog_or_tty->print_cr("updated dictionary count %d %s",
2044 chunk_manager->humongous_dictionary()->total_count(),
2045 chunk_size_name(HumongousIndex));
2046 }
2047 set_chunks_in_use(HumongousIndex, NULL);
2048 chunk_manager->slow_locked_verify();
2049 }
2051 const char* SpaceManager::chunk_size_name(ChunkIndex index) const {
2052 switch (index) {
2053 case SpecializedIndex:
2054 return "Specialized";
2055 case SmallIndex:
2056 return "Small";
2057 case MediumIndex:
2058 return "Medium";
2059 case HumongousIndex:
2060 return "Humongous";
2061 default:
2062 return NULL;
2063 }
2064 }
2066 ChunkIndex ChunkManager::list_index(size_t size) {
2067 switch (size) {
2068 case SpecializedChunk:
2069 assert(SpecializedChunk == ClassSpecializedChunk,
2070 "Need branch for ClassSpecializedChunk");
2071 return SpecializedIndex;
2072 case SmallChunk:
2073 case ClassSmallChunk:
2074 return SmallIndex;
2075 case MediumChunk:
2076 case ClassMediumChunk:
2077 return MediumIndex;
2078 default:
2079 assert(size > MediumChunk || size > ClassMediumChunk,
2080 "Not a humongous chunk");
2081 return HumongousIndex;
2082 }
2083 }
2085 void SpaceManager::deallocate(MetaWord* p, size_t word_size) {
2086 assert_lock_strong(_lock);
2087 size_t min_size = TreeChunk<Metablock, FreeList>::min_size();
2088 assert(word_size >= min_size,
2089 err_msg("Should not deallocate dark matter " SIZE_FORMAT, word_size));
2090 block_freelists()->return_block(p, word_size);
2091 }
2093 // Adds a chunk to the list of chunks in use.
2094 void SpaceManager::add_chunk(Metachunk* new_chunk, bool make_current) {
2096 assert(new_chunk != NULL, "Should not be NULL");
2097 assert(new_chunk->next() == NULL, "Should not be on a list");
2099 new_chunk->reset_empty();
2101 // Find the correct list and and set the current
2102 // chunk for that list.
2103 ChunkIndex index = ChunkManager::list_index(new_chunk->word_size());
2105 if (index != HumongousIndex) {
2106 set_current_chunk(new_chunk);
2107 new_chunk->set_next(chunks_in_use(index));
2108 set_chunks_in_use(index, new_chunk);
2109 } else {
2110 // For null class loader data and DumpSharedSpaces, the first chunk isn't
2111 // small, so small will be null. Link this first chunk as the current
2112 // chunk.
2113 if (make_current) {
2114 // Set as the current chunk but otherwise treat as a humongous chunk.
2115 set_current_chunk(new_chunk);
2116 }
2117 // Link at head. The _current_chunk only points to a humongous chunk for
2118 // the null class loader metaspace (class and data virtual space managers)
2119 // any humongous chunks so will not point to the tail
2120 // of the humongous chunks list.
2121 new_chunk->set_next(chunks_in_use(HumongousIndex));
2122 set_chunks_in_use(HumongousIndex, new_chunk);
2124 assert(new_chunk->word_size() > medium_chunk_size(), "List inconsistency");
2125 }
2127 assert(new_chunk->is_empty(), "Not ready for reuse");
2128 if (TraceMetadataChunkAllocation && Verbose) {
2129 gclog_or_tty->print("SpaceManager::add_chunk: %d) ",
2130 sum_count_in_chunks_in_use());
2131 new_chunk->print_on(gclog_or_tty);
2132 vs_list()->chunk_manager()->locked_print_free_chunks(tty);
2133 }
2134 }
2136 Metachunk* SpaceManager::get_new_chunk(size_t word_size,
2137 size_t grow_chunks_by_words) {
2139 Metachunk* next = vs_list()->get_new_chunk(word_size,
2140 grow_chunks_by_words,
2141 medium_chunk_bunch());
2143 if (TraceMetadataHumongousAllocation &&
2144 SpaceManager::is_humongous(next->word_size())) {
2145 gclog_or_tty->print_cr(" new humongous chunk word size " PTR_FORMAT,
2146 next->word_size());
2147 }
2149 return next;
2150 }
2152 MetaWord* SpaceManager::allocate(size_t word_size) {
2153 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
2155 // If only the dictionary is going to be used (i.e., no
2156 // indexed free list), then there is a minimum size requirement.
2157 // MinChunkSize is a placeholder for the real minimum size JJJ
2158 size_t byte_size = word_size * BytesPerWord;
2160 size_t byte_size_with_overhead = byte_size + Metablock::overhead();
2162 size_t raw_bytes_size = MAX2(byte_size_with_overhead,
2163 Metablock::min_block_byte_size());
2164 raw_bytes_size = ARENA_ALIGN(raw_bytes_size);
2165 size_t raw_word_size = raw_bytes_size / BytesPerWord;
2166 assert(raw_word_size * BytesPerWord == raw_bytes_size, "Size problem");
2168 BlockFreelist* fl = block_freelists();
2169 MetaWord* p = NULL;
2170 // Allocation from the dictionary is expensive in the sense that
2171 // the dictionary has to be searched for a size. Don't allocate
2172 // from the dictionary until it starts to get fat. Is this
2173 // a reasonable policy? Maybe an skinny dictionary is fast enough
2174 // for allocations. Do some profiling. JJJ
2175 if (fl->total_size() > allocation_from_dictionary_limit) {
2176 p = fl->get_block(raw_word_size);
2177 }
2178 if (p == NULL) {
2179 p = allocate_work(raw_word_size);
2180 }
2181 Metadebug::deallocate_block_a_lot(this, raw_word_size);
2183 return p;
2184 }
2186 // Returns the address of spaced allocated for "word_size".
2187 // This methods does not know about blocks (Metablocks)
2188 MetaWord* SpaceManager::allocate_work(size_t word_size) {
2189 assert_lock_strong(_lock);
2190 #ifdef ASSERT
2191 if (Metadebug::test_metadata_failure()) {
2192 return NULL;
2193 }
2194 #endif
2195 // Is there space in the current chunk?
2196 MetaWord* result = NULL;
2198 // For DumpSharedSpaces, only allocate out of the current chunk which is
2199 // never null because we gave it the size we wanted. Caller reports out
2200 // of memory if this returns null.
2201 if (DumpSharedSpaces) {
2202 assert(current_chunk() != NULL, "should never happen");
2203 inc_allocation_total(word_size);
2204 return current_chunk()->allocate(word_size); // caller handles null result
2205 }
2206 if (current_chunk() != NULL) {
2207 result = current_chunk()->allocate(word_size);
2208 }
2210 if (result == NULL) {
2211 result = grow_and_allocate(word_size);
2212 }
2213 if (result > 0) {
2214 inc_allocation_total(word_size);
2215 assert(result != (MetaWord*) chunks_in_use(MediumIndex),
2216 "Head of the list is being allocated");
2217 }
2219 return result;
2220 }
2222 void SpaceManager::verify() {
2223 // If there are blocks in the dictionary, then
2224 // verfication of chunks does not work since
2225 // being in the dictionary alters a chunk.
2226 if (block_freelists()->total_size() == 0) {
2227 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2228 Metachunk* curr = chunks_in_use(i);
2229 while (curr != NULL) {
2230 curr->verify();
2231 verify_chunk_size(curr);
2232 curr = curr->next();
2233 }
2234 }
2235 }
2236 }
2238 void SpaceManager::verify_chunk_size(Metachunk* chunk) {
2239 assert(is_humongous(chunk->word_size()) ||
2240 chunk->word_size() == medium_chunk_size() ||
2241 chunk->word_size() == small_chunk_size() ||
2242 chunk->word_size() == specialized_chunk_size(),
2243 "Chunk size is wrong");
2244 return;
2245 }
2247 #ifdef ASSERT
2248 void SpaceManager::verify_allocation_total() {
2249 // Verification is only guaranteed at a safepoint.
2250 if (SafepointSynchronize::is_at_safepoint()) {
2251 gclog_or_tty->print_cr("Chunk " PTR_FORMAT " allocation_total " SIZE_FORMAT
2252 " sum_used_in_chunks_in_use " SIZE_FORMAT,
2253 this,
2254 allocation_total(),
2255 sum_used_in_chunks_in_use());
2256 }
2257 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
2258 assert(allocation_total() == sum_used_in_chunks_in_use(),
2259 err_msg("allocation total is not consistent " SIZE_FORMAT
2260 " vs " SIZE_FORMAT,
2261 allocation_total(), sum_used_in_chunks_in_use()));
2262 }
2264 #endif
2266 void SpaceManager::dump(outputStream* const out) const {
2267 size_t curr_total = 0;
2268 size_t waste = 0;
2269 uint i = 0;
2270 size_t used = 0;
2271 size_t capacity = 0;
2273 // Add up statistics for all chunks in this SpaceManager.
2274 for (ChunkIndex index = ZeroIndex;
2275 index < NumberOfInUseLists;
2276 index = next_chunk_index(index)) {
2277 for (Metachunk* curr = chunks_in_use(index);
2278 curr != NULL;
2279 curr = curr->next()) {
2280 out->print("%d) ", i++);
2281 curr->print_on(out);
2282 if (TraceMetadataChunkAllocation && Verbose) {
2283 block_freelists()->print_on(out);
2284 }
2285 curr_total += curr->word_size();
2286 used += curr->used_word_size();
2287 capacity += curr->capacity_word_size();
2288 waste += curr->free_word_size() + curr->overhead();;
2289 }
2290 }
2292 size_t free = current_chunk() == NULL ? 0 : current_chunk()->free_word_size();
2293 // Free space isn't wasted.
2294 waste -= free;
2296 out->print_cr("total of all chunks " SIZE_FORMAT " used " SIZE_FORMAT
2297 " free " SIZE_FORMAT " capacity " SIZE_FORMAT
2298 " waste " SIZE_FORMAT, curr_total, used, free, capacity, waste);
2299 }
2301 #ifndef PRODUCT
2302 void SpaceManager::mangle_freed_chunks() {
2303 for (ChunkIndex index = ZeroIndex;
2304 index < NumberOfInUseLists;
2305 index = next_chunk_index(index)) {
2306 for (Metachunk* curr = chunks_in_use(index);
2307 curr != NULL;
2308 curr = curr->next()) {
2309 curr->mangle();
2310 }
2311 }
2312 }
2313 #endif // PRODUCT
2315 // MetaspaceAux
2317 size_t MetaspaceAux::used_in_bytes(Metaspace::MetadataType mdtype) {
2318 size_t used = 0;
2319 ClassLoaderDataGraphMetaspaceIterator iter;
2320 while (iter.repeat()) {
2321 Metaspace* msp = iter.get_next();
2322 // Sum allocation_total for each metaspace
2323 if (msp != NULL) {
2324 used += msp->used_words(mdtype);
2325 }
2326 }
2327 return used * BytesPerWord;
2328 }
2330 size_t MetaspaceAux::free_in_bytes(Metaspace::MetadataType mdtype) {
2331 size_t free = 0;
2332 ClassLoaderDataGraphMetaspaceIterator iter;
2333 while (iter.repeat()) {
2334 Metaspace* msp = iter.get_next();
2335 if (msp != NULL) {
2336 free += msp->free_words(mdtype);
2337 }
2338 }
2339 return free * BytesPerWord;
2340 }
2342 size_t MetaspaceAux::capacity_in_bytes(Metaspace::MetadataType mdtype) {
2343 size_t capacity = free_chunks_total(mdtype);
2344 ClassLoaderDataGraphMetaspaceIterator iter;
2345 while (iter.repeat()) {
2346 Metaspace* msp = iter.get_next();
2347 if (msp != NULL) {
2348 capacity += msp->capacity_words(mdtype);
2349 }
2350 }
2351 return capacity * BytesPerWord;
2352 }
2354 size_t MetaspaceAux::reserved_in_bytes(Metaspace::MetadataType mdtype) {
2355 size_t reserved = (mdtype == Metaspace::ClassType) ?
2356 Metaspace::class_space_list()->virtual_space_total() :
2357 Metaspace::space_list()->virtual_space_total();
2358 return reserved * BytesPerWord;
2359 }
2361 size_t MetaspaceAux::min_chunk_size() { return Metaspace::first_chunk_word_size(); }
2363 size_t MetaspaceAux::free_chunks_total(Metaspace::MetadataType mdtype) {
2364 ChunkManager* chunk = (mdtype == Metaspace::ClassType) ?
2365 Metaspace::class_space_list()->chunk_manager() :
2366 Metaspace::space_list()->chunk_manager();
2367 chunk->slow_verify();
2368 return chunk->free_chunks_total();
2369 }
2371 size_t MetaspaceAux::free_chunks_total_in_bytes(Metaspace::MetadataType mdtype) {
2372 return free_chunks_total(mdtype) * BytesPerWord;
2373 }
2375 void MetaspaceAux::print_metaspace_change(size_t prev_metadata_used) {
2376 gclog_or_tty->print(", [Metaspace:");
2377 if (PrintGCDetails && Verbose) {
2378 gclog_or_tty->print(" " SIZE_FORMAT
2379 "->" SIZE_FORMAT
2380 "(" SIZE_FORMAT "/" SIZE_FORMAT ")",
2381 prev_metadata_used,
2382 used_in_bytes(),
2383 capacity_in_bytes(),
2384 reserved_in_bytes());
2385 } else {
2386 gclog_or_tty->print(" " SIZE_FORMAT "K"
2387 "->" SIZE_FORMAT "K"
2388 "(" SIZE_FORMAT "K/" SIZE_FORMAT "K)",
2389 prev_metadata_used / K,
2390 used_in_bytes()/ K,
2391 capacity_in_bytes()/K,
2392 reserved_in_bytes()/ K);
2393 }
2395 gclog_or_tty->print("]");
2396 }
2398 // This is printed when PrintGCDetails
2399 void MetaspaceAux::print_on(outputStream* out) {
2400 Metaspace::MetadataType ct = Metaspace::ClassType;
2401 Metaspace::MetadataType nct = Metaspace::NonClassType;
2403 out->print_cr(" Metaspace total "
2404 SIZE_FORMAT "K, used " SIZE_FORMAT "K,"
2405 " reserved " SIZE_FORMAT "K",
2406 capacity_in_bytes()/K, used_in_bytes()/K, reserved_in_bytes()/K);
2407 out->print_cr(" data space "
2408 SIZE_FORMAT "K, used " SIZE_FORMAT "K,"
2409 " reserved " SIZE_FORMAT "K",
2410 capacity_in_bytes(nct)/K, used_in_bytes(nct)/K, reserved_in_bytes(nct)/K);
2411 out->print_cr(" class space "
2412 SIZE_FORMAT "K, used " SIZE_FORMAT "K,"
2413 " reserved " SIZE_FORMAT "K",
2414 capacity_in_bytes(ct)/K, used_in_bytes(ct)/K, reserved_in_bytes(ct)/K);
2415 }
2417 // Print information for class space and data space separately.
2418 // This is almost the same as above.
2419 void MetaspaceAux::print_on(outputStream* out, Metaspace::MetadataType mdtype) {
2420 size_t free_chunks_capacity_bytes = free_chunks_total_in_bytes(mdtype);
2421 size_t capacity_bytes = capacity_in_bytes(mdtype);
2422 size_t used_bytes = used_in_bytes(mdtype);
2423 size_t free_bytes = free_in_bytes(mdtype);
2424 size_t used_and_free = used_bytes + free_bytes +
2425 free_chunks_capacity_bytes;
2426 out->print_cr(" Chunk accounting: used in chunks " SIZE_FORMAT
2427 "K + unused in chunks " SIZE_FORMAT "K + "
2428 " capacity in free chunks " SIZE_FORMAT "K = " SIZE_FORMAT
2429 "K capacity in allocated chunks " SIZE_FORMAT "K",
2430 used_bytes / K,
2431 free_bytes / K,
2432 free_chunks_capacity_bytes / K,
2433 used_and_free / K,
2434 capacity_bytes / K);
2435 // Accounting can only be correct if we got the values during a safepoint
2436 assert(!SafepointSynchronize::is_at_safepoint() || used_and_free == capacity_bytes, "Accounting is wrong");
2437 }
2439 // Print total fragmentation for class and data metaspaces separately
2440 void MetaspaceAux::print_waste(outputStream* out) {
2442 size_t specialized_waste = 0, small_waste = 0, medium_waste = 0, large_waste = 0;
2443 size_t specialized_count = 0, small_count = 0, medium_count = 0, large_count = 0;
2444 size_t cls_specialized_waste = 0, cls_small_waste = 0, cls_medium_waste = 0, cls_large_waste = 0;
2445 size_t cls_specialized_count = 0, cls_small_count = 0, cls_medium_count = 0, cls_large_count = 0;
2447 ClassLoaderDataGraphMetaspaceIterator iter;
2448 while (iter.repeat()) {
2449 Metaspace* msp = iter.get_next();
2450 if (msp != NULL) {
2451 specialized_waste += msp->vsm()->sum_waste_in_chunks_in_use(SpecializedIndex);
2452 specialized_count += msp->vsm()->sum_count_in_chunks_in_use(SpecializedIndex);
2453 small_waste += msp->vsm()->sum_waste_in_chunks_in_use(SmallIndex);
2454 small_count += msp->vsm()->sum_count_in_chunks_in_use(SmallIndex);
2455 medium_waste += msp->vsm()->sum_waste_in_chunks_in_use(MediumIndex);
2456 medium_count += msp->vsm()->sum_count_in_chunks_in_use(MediumIndex);
2457 large_waste += msp->vsm()->sum_waste_in_chunks_in_use(HumongousIndex);
2458 large_count += msp->vsm()->sum_count_in_chunks_in_use(HumongousIndex);
2460 cls_specialized_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SpecializedIndex);
2461 cls_specialized_count += msp->class_vsm()->sum_count_in_chunks_in_use(SpecializedIndex);
2462 cls_small_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SmallIndex);
2463 cls_small_count += msp->class_vsm()->sum_count_in_chunks_in_use(SmallIndex);
2464 cls_medium_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(MediumIndex);
2465 cls_medium_count += msp->class_vsm()->sum_count_in_chunks_in_use(MediumIndex);
2466 cls_large_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(HumongousIndex);
2467 cls_large_count += msp->class_vsm()->sum_count_in_chunks_in_use(HumongousIndex);
2468 }
2469 }
2470 out->print_cr("Total fragmentation waste (words) doesn't count free space");
2471 out->print_cr(" data: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", "
2472 SIZE_FORMAT " small(s) " SIZE_FORMAT ", "
2473 SIZE_FORMAT " medium(s) " SIZE_FORMAT,
2474 specialized_count, specialized_waste, small_count,
2475 small_waste, medium_count, medium_waste);
2476 out->print_cr(" class: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", "
2477 SIZE_FORMAT " small(s) " SIZE_FORMAT,
2478 cls_specialized_count, cls_specialized_waste,
2479 cls_small_count, cls_small_waste);
2480 }
2482 // Dump global metaspace things from the end of ClassLoaderDataGraph
2483 void MetaspaceAux::dump(outputStream* out) {
2484 out->print_cr("All Metaspace:");
2485 out->print("data space: "); print_on(out, Metaspace::NonClassType);
2486 out->print("class space: "); print_on(out, Metaspace::ClassType);
2487 print_waste(out);
2488 }
2490 void MetaspaceAux::verify_free_chunks() {
2491 Metaspace::space_list()->chunk_manager()->verify();
2492 Metaspace::class_space_list()->chunk_manager()->verify();
2493 }
2495 // Metaspace methods
2497 size_t Metaspace::_first_chunk_word_size = 0;
2498 size_t Metaspace::_first_class_chunk_word_size = 0;
2500 Metaspace::Metaspace(Mutex* lock, MetaspaceType type) {
2501 initialize(lock, type);
2502 }
2504 Metaspace::~Metaspace() {
2505 delete _vsm;
2506 delete _class_vsm;
2507 }
2509 VirtualSpaceList* Metaspace::_space_list = NULL;
2510 VirtualSpaceList* Metaspace::_class_space_list = NULL;
2512 #define VIRTUALSPACEMULTIPLIER 2
2514 void Metaspace::global_initialize() {
2515 // Initialize the alignment for shared spaces.
2516 int max_alignment = os::vm_page_size();
2517 MetaspaceShared::set_max_alignment(max_alignment);
2519 if (DumpSharedSpaces) {
2520 SharedReadOnlySize = align_size_up(SharedReadOnlySize, max_alignment);
2521 SharedReadWriteSize = align_size_up(SharedReadWriteSize, max_alignment);
2522 SharedMiscDataSize = align_size_up(SharedMiscDataSize, max_alignment);
2523 SharedMiscCodeSize = align_size_up(SharedMiscCodeSize, max_alignment);
2525 // Initialize with the sum of the shared space sizes. The read-only
2526 // and read write metaspace chunks will be allocated out of this and the
2527 // remainder is the misc code and data chunks.
2528 size_t total = align_size_up(SharedReadOnlySize + SharedReadWriteSize +
2529 SharedMiscDataSize + SharedMiscCodeSize,
2530 os::vm_allocation_granularity());
2531 size_t word_size = total/wordSize;
2532 _space_list = new VirtualSpaceList(word_size);
2533 } else {
2534 // If using shared space, open the file that contains the shared space
2535 // and map in the memory before initializing the rest of metaspace (so
2536 // the addresses don't conflict)
2537 if (UseSharedSpaces) {
2538 FileMapInfo* mapinfo = new FileMapInfo();
2539 memset(mapinfo, 0, sizeof(FileMapInfo));
2541 // Open the shared archive file, read and validate the header. If
2542 // initialization fails, shared spaces [UseSharedSpaces] are
2543 // disabled and the file is closed.
2544 // Map in spaces now also
2545 if (mapinfo->initialize() && MetaspaceShared::map_shared_spaces(mapinfo)) {
2546 FileMapInfo::set_current_info(mapinfo);
2547 } else {
2548 assert(!mapinfo->is_open() && !UseSharedSpaces,
2549 "archive file not closed or shared spaces not disabled.");
2550 }
2551 }
2553 // Initialize these before initializing the VirtualSpaceList
2554 _first_chunk_word_size = InitialBootClassLoaderMetaspaceSize / BytesPerWord;
2555 _first_chunk_word_size = align_word_size_up(_first_chunk_word_size);
2556 // Make the first class chunk bigger than a medium chunk so it's not put
2557 // on the medium chunk list. The next chunk will be small and progress
2558 // from there. This size calculated by -version.
2559 _first_class_chunk_word_size = MIN2((size_t)MediumChunk*6,
2560 (ClassMetaspaceSize/BytesPerWord)*2);
2561 _first_class_chunk_word_size = align_word_size_up(_first_class_chunk_word_size);
2562 // Arbitrarily set the initial virtual space to a multiple
2563 // of the boot class loader size.
2564 size_t word_size = VIRTUALSPACEMULTIPLIER * first_chunk_word_size();
2565 // Initialize the list of virtual spaces.
2566 _space_list = new VirtualSpaceList(word_size);
2567 }
2568 }
2570 // For UseCompressedKlassPointers the class space is reserved as a piece of the
2571 // Java heap because the compression algorithm is the same for each. The
2572 // argument passed in is at the top of the compressed space
2573 void Metaspace::initialize_class_space(ReservedSpace rs) {
2574 // The reserved space size may be bigger because of alignment, esp with UseLargePages
2575 assert(rs.size() >= ClassMetaspaceSize,
2576 err_msg(SIZE_FORMAT " != " UINTX_FORMAT, rs.size(), ClassMetaspaceSize));
2577 _class_space_list = new VirtualSpaceList(rs);
2578 }
2580 void Metaspace::initialize(Mutex* lock,
2581 MetaspaceType type) {
2583 assert(space_list() != NULL,
2584 "Metadata VirtualSpaceList has not been initialized");
2586 _vsm = new SpaceManager(lock, space_list());
2587 if (_vsm == NULL) {
2588 return;
2589 }
2590 size_t word_size;
2591 size_t class_word_size;
2592 vsm()->get_initial_chunk_sizes(type,
2593 &word_size,
2594 &class_word_size);
2596 assert(class_space_list() != NULL,
2597 "Class VirtualSpaceList has not been initialized");
2599 // Allocate SpaceManager for classes.
2600 _class_vsm = new SpaceManager(lock, class_space_list());
2601 if (_class_vsm == NULL) {
2602 return;
2603 }
2605 MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
2607 // Allocate chunk for metadata objects
2608 Metachunk* new_chunk =
2609 space_list()->get_initialization_chunk(word_size,
2610 vsm()->medium_chunk_bunch());
2611 assert(!DumpSharedSpaces || new_chunk != NULL, "should have enough space for both chunks");
2612 if (new_chunk != NULL) {
2613 // Add to this manager's list of chunks in use and current_chunk().
2614 vsm()->add_chunk(new_chunk, true);
2615 }
2617 // Allocate chunk for class metadata objects
2618 Metachunk* class_chunk =
2619 class_space_list()->get_initialization_chunk(class_word_size,
2620 class_vsm()->medium_chunk_bunch());
2621 if (class_chunk != NULL) {
2622 class_vsm()->add_chunk(class_chunk, true);
2623 }
2624 }
2626 size_t Metaspace::align_word_size_up(size_t word_size) {
2627 size_t byte_size = word_size * wordSize;
2628 return ReservedSpace::allocation_align_size_up(byte_size) / wordSize;
2629 }
2631 MetaWord* Metaspace::allocate(size_t word_size, MetadataType mdtype) {
2632 // DumpSharedSpaces doesn't use class metadata area (yet)
2633 if (mdtype == ClassType && !DumpSharedSpaces) {
2634 return class_vsm()->allocate(word_size);
2635 } else {
2636 return vsm()->allocate(word_size);
2637 }
2638 }
2640 MetaWord* Metaspace::expand_and_allocate(size_t word_size, MetadataType mdtype) {
2641 MetaWord* result;
2642 MetaspaceGC::set_expand_after_GC(true);
2643 size_t before_inc = MetaspaceGC::capacity_until_GC();
2644 size_t delta_words = MetaspaceGC::delta_capacity_until_GC(word_size);
2645 MetaspaceGC::inc_capacity_until_GC(delta_words);
2646 if (PrintGCDetails && Verbose) {
2647 gclog_or_tty->print_cr("Increase capacity to GC from " SIZE_FORMAT
2648 " to " SIZE_FORMAT, before_inc, MetaspaceGC::capacity_until_GC());
2649 }
2651 result = allocate(word_size, mdtype);
2653 return result;
2654 }
2656 // Space allocated in the Metaspace. This may
2657 // be across several metadata virtual spaces.
2658 char* Metaspace::bottom() const {
2659 assert(DumpSharedSpaces, "only useful and valid for dumping shared spaces");
2660 return (char*)vsm()->current_chunk()->bottom();
2661 }
2663 size_t Metaspace::used_words(MetadataType mdtype) const {
2664 // return vsm()->allocation_total();
2665 return mdtype == ClassType ? class_vsm()->sum_used_in_chunks_in_use() :
2666 vsm()->sum_used_in_chunks_in_use(); // includes overhead!
2667 }
2669 size_t Metaspace::free_words(MetadataType mdtype) const {
2670 return mdtype == ClassType ? class_vsm()->sum_free_in_chunks_in_use() :
2671 vsm()->sum_free_in_chunks_in_use();
2672 }
2674 // Space capacity in the Metaspace. It includes
2675 // space in the list of chunks from which allocations
2676 // have been made. Don't include space in the global freelist and
2677 // in the space available in the dictionary which
2678 // is already counted in some chunk.
2679 size_t Metaspace::capacity_words(MetadataType mdtype) const {
2680 return mdtype == ClassType ? class_vsm()->sum_capacity_in_chunks_in_use() :
2681 vsm()->sum_capacity_in_chunks_in_use();
2682 }
2684 void Metaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) {
2685 if (SafepointSynchronize::is_at_safepoint()) {
2686 assert(Thread::current()->is_VM_thread(), "should be the VM thread");
2687 // Don't take Heap_lock
2688 MutexLocker ml(vsm()->lock());
2689 if (word_size < TreeChunk<Metablock, FreeList>::min_size()) {
2690 // Dark matter. Too small for dictionary.
2691 #ifdef ASSERT
2692 Copy::fill_to_words((HeapWord*)ptr, word_size, 0xf5f5f5f5);
2693 #endif
2694 return;
2695 }
2696 if (is_class) {
2697 class_vsm()->deallocate(ptr, word_size);
2698 } else {
2699 vsm()->deallocate(ptr, word_size);
2700 }
2701 } else {
2702 MutexLocker ml(vsm()->lock());
2704 if (word_size < TreeChunk<Metablock, FreeList>::min_size()) {
2705 // Dark matter. Too small for dictionary.
2706 #ifdef ASSERT
2707 Copy::fill_to_words((HeapWord*)ptr, word_size, 0xf5f5f5f5);
2708 #endif
2709 return;
2710 }
2711 if (is_class) {
2712 class_vsm()->deallocate(ptr, word_size);
2713 } else {
2714 vsm()->deallocate(ptr, word_size);
2715 }
2716 }
2717 }
2719 Metablock* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
2720 bool read_only, MetadataType mdtype, TRAPS) {
2721 if (HAS_PENDING_EXCEPTION) {
2722 assert(false, "Should not allocate with exception pending");
2723 return NULL; // caller does a CHECK_NULL too
2724 }
2726 // SSS: Should we align the allocations and make sure the sizes are aligned.
2727 MetaWord* result = NULL;
2729 assert(loader_data != NULL, "Should never pass around a NULL loader_data. "
2730 "ClassLoaderData::the_null_class_loader_data() should have been used.");
2731 // Allocate in metaspaces without taking out a lock, because it deadlocks
2732 // with the SymbolTable_lock. Dumping is single threaded for now. We'll have
2733 // to revisit this for application class data sharing.
2734 if (DumpSharedSpaces) {
2735 if (read_only) {
2736 result = loader_data->ro_metaspace()->allocate(word_size, NonClassType);
2737 } else {
2738 result = loader_data->rw_metaspace()->allocate(word_size, NonClassType);
2739 }
2740 if (result == NULL) {
2741 report_out_of_shared_space(read_only ? SharedReadOnly : SharedReadWrite);
2742 }
2743 return Metablock::initialize(result, word_size);
2744 }
2746 result = loader_data->metaspace_non_null()->allocate(word_size, mdtype);
2748 if (result == NULL) {
2749 // Try to clean out some memory and retry.
2750 result =
2751 Universe::heap()->collector_policy()->satisfy_failed_metadata_allocation(
2752 loader_data, word_size, mdtype);
2754 // If result is still null, we are out of memory.
2755 if (result == NULL) {
2756 if (Verbose && TraceMetadataChunkAllocation) {
2757 gclog_or_tty->print_cr("Metaspace allocation failed for size "
2758 SIZE_FORMAT, word_size);
2759 if (loader_data->metaspace_or_null() != NULL) loader_data->metaspace_or_null()->dump(gclog_or_tty);
2760 MetaspaceAux::dump(gclog_or_tty);
2761 }
2762 // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
2763 report_java_out_of_memory("Metadata space");
2765 if (JvmtiExport::should_post_resource_exhausted()) {
2766 JvmtiExport::post_resource_exhausted(
2767 JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR,
2768 "Metadata space");
2769 }
2770 THROW_OOP_0(Universe::out_of_memory_error_perm_gen());
2771 }
2772 }
2773 return Metablock::initialize(result, word_size);
2774 }
2776 void Metaspace::print_on(outputStream* out) const {
2777 // Print both class virtual space counts and metaspace.
2778 if (Verbose) {
2779 vsm()->print_on(out);
2780 class_vsm()->print_on(out);
2781 }
2782 }
2784 bool Metaspace::contains(const void * ptr) {
2785 if (MetaspaceShared::is_in_shared_space(ptr)) {
2786 return true;
2787 }
2788 // This is checked while unlocked. As long as the virtualspaces are added
2789 // at the end, the pointer will be in one of them. The virtual spaces
2790 // aren't deleted presently. When they are, some sort of locking might
2791 // be needed. Note, locking this can cause inversion problems with the
2792 // caller in MetaspaceObj::is_metadata() function.
2793 return space_list()->contains(ptr) || class_space_list()->contains(ptr);
2794 }
2796 void Metaspace::verify() {
2797 vsm()->verify();
2798 class_vsm()->verify();
2799 }
2801 void Metaspace::dump(outputStream* const out) const {
2802 if (UseMallocOnly) {
2803 // Just print usage for now
2804 out->print_cr("usage %d", used_words(Metaspace::NonClassType));
2805 }
2806 out->print_cr("\nVirtual space manager: " INTPTR_FORMAT, vsm());
2807 vsm()->dump(out);
2808 out->print_cr("\nClass space manager: " INTPTR_FORMAT, class_vsm());
2809 class_vsm()->dump(out);
2810 }