Tue, 27 May 2014 08:44:23 -0700
8038422: CDS test failed: assert((size % os::vm_allocation_granularity()) == 0) failed when limiting SharedMiscDataSize
Summary: In debug version, the assert is against os::vm_allocation_granularity(), but in initialization, we use os::vm_page_size() to align the allocation size. In windows, _vm_page_size and _vm_allocation_granularity may not be same. In debug version, the assert is against os::vm_allocation_granularity(), but in initialization, we use os::vm_page_size() to align the allocation size. In windows, _vm_page_size and _vm_allocation_granularity may not be same.
Reviewed-by: dholmes, iklam, jiangli, coleenp
Contributed-by: yumin.qi@oracle.com
1 /*
2 * Copyright (c) 2011, 2014, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24 #include "precompiled.hpp"
25 #include "gc_interface/collectedHeap.hpp"
26 #include "memory/allocation.hpp"
27 #include "memory/binaryTreeDictionary.hpp"
28 #include "memory/freeList.hpp"
29 #include "memory/collectorPolicy.hpp"
30 #include "memory/filemap.hpp"
31 #include "memory/freeList.hpp"
32 #include "memory/gcLocker.hpp"
33 #include "memory/metachunk.hpp"
34 #include "memory/metaspace.hpp"
35 #include "memory/metaspaceGCThresholdUpdater.hpp"
36 #include "memory/metaspaceShared.hpp"
37 #include "memory/metaspaceTracer.hpp"
38 #include "memory/resourceArea.hpp"
39 #include "memory/universe.hpp"
40 #include "runtime/atomic.inline.hpp"
41 #include "runtime/globals.hpp"
42 #include "runtime/init.hpp"
43 #include "runtime/java.hpp"
44 #include "runtime/mutex.hpp"
45 #include "runtime/orderAccess.inline.hpp"
46 #include "services/memTracker.hpp"
47 #include "services/memoryService.hpp"
48 #include "utilities/copy.hpp"
49 #include "utilities/debug.hpp"
51 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
53 typedef BinaryTreeDictionary<Metablock, FreeList<Metablock> > BlockTreeDictionary;
54 typedef BinaryTreeDictionary<Metachunk, FreeList<Metachunk> > ChunkTreeDictionary;
56 // Set this constant to enable slow integrity checking of the free chunk lists
57 const bool metaspace_slow_verify = false;
59 size_t const allocation_from_dictionary_limit = 4 * K;
61 MetaWord* last_allocated = 0;
63 size_t Metaspace::_compressed_class_space_size;
64 const MetaspaceTracer* Metaspace::_tracer = NULL;
66 // Used in declarations in SpaceManager and ChunkManager
67 enum ChunkIndex {
68 ZeroIndex = 0,
69 SpecializedIndex = ZeroIndex,
70 SmallIndex = SpecializedIndex + 1,
71 MediumIndex = SmallIndex + 1,
72 HumongousIndex = MediumIndex + 1,
73 NumberOfFreeLists = 3,
74 NumberOfInUseLists = 4
75 };
77 enum ChunkSizes { // in words.
78 ClassSpecializedChunk = 128,
79 SpecializedChunk = 128,
80 ClassSmallChunk = 256,
81 SmallChunk = 512,
82 ClassMediumChunk = 4 * K,
83 MediumChunk = 8 * K
84 };
86 static ChunkIndex next_chunk_index(ChunkIndex i) {
87 assert(i < NumberOfInUseLists, "Out of bound");
88 return (ChunkIndex) (i+1);
89 }
91 volatile intptr_t MetaspaceGC::_capacity_until_GC = 0;
92 uint MetaspaceGC::_shrink_factor = 0;
93 bool MetaspaceGC::_should_concurrent_collect = false;
95 typedef class FreeList<Metachunk> ChunkList;
97 // Manages the global free lists of chunks.
98 class ChunkManager : public CHeapObj<mtInternal> {
99 friend class TestVirtualSpaceNodeTest;
101 // Free list of chunks of different sizes.
102 // SpecializedChunk
103 // SmallChunk
104 // MediumChunk
105 // HumongousChunk
106 ChunkList _free_chunks[NumberOfFreeLists];
108 // HumongousChunk
109 ChunkTreeDictionary _humongous_dictionary;
111 // ChunkManager in all lists of this type
112 size_t _free_chunks_total;
113 size_t _free_chunks_count;
115 void dec_free_chunks_total(size_t v) {
116 assert(_free_chunks_count > 0 &&
117 _free_chunks_total > 0,
118 "About to go negative");
119 Atomic::add_ptr(-1, &_free_chunks_count);
120 jlong minus_v = (jlong) - (jlong) v;
121 Atomic::add_ptr(minus_v, &_free_chunks_total);
122 }
124 // Debug support
126 size_t sum_free_chunks();
127 size_t sum_free_chunks_count();
129 void locked_verify_free_chunks_total();
130 void slow_locked_verify_free_chunks_total() {
131 if (metaspace_slow_verify) {
132 locked_verify_free_chunks_total();
133 }
134 }
135 void locked_verify_free_chunks_count();
136 void slow_locked_verify_free_chunks_count() {
137 if (metaspace_slow_verify) {
138 locked_verify_free_chunks_count();
139 }
140 }
141 void verify_free_chunks_count();
143 public:
145 ChunkManager(size_t specialized_size, size_t small_size, size_t medium_size)
146 : _free_chunks_total(0), _free_chunks_count(0) {
147 _free_chunks[SpecializedIndex].set_size(specialized_size);
148 _free_chunks[SmallIndex].set_size(small_size);
149 _free_chunks[MediumIndex].set_size(medium_size);
150 }
152 // add or delete (return) a chunk to the global freelist.
153 Metachunk* chunk_freelist_allocate(size_t word_size);
155 // Map a size to a list index assuming that there are lists
156 // for special, small, medium, and humongous chunks.
157 static ChunkIndex list_index(size_t size);
159 // Remove the chunk from its freelist. It is
160 // expected to be on one of the _free_chunks[] lists.
161 void remove_chunk(Metachunk* chunk);
163 // Add the simple linked list of chunks to the freelist of chunks
164 // of type index.
165 void return_chunks(ChunkIndex index, Metachunk* chunks);
167 // Total of the space in the free chunks list
168 size_t free_chunks_total_words();
169 size_t free_chunks_total_bytes();
171 // Number of chunks in the free chunks list
172 size_t free_chunks_count();
174 void inc_free_chunks_total(size_t v, size_t count = 1) {
175 Atomic::add_ptr(count, &_free_chunks_count);
176 Atomic::add_ptr(v, &_free_chunks_total);
177 }
178 ChunkTreeDictionary* humongous_dictionary() {
179 return &_humongous_dictionary;
180 }
182 ChunkList* free_chunks(ChunkIndex index);
184 // Returns the list for the given chunk word size.
185 ChunkList* find_free_chunks_list(size_t word_size);
187 // Remove from a list by size. Selects list based on size of chunk.
188 Metachunk* free_chunks_get(size_t chunk_word_size);
190 #define index_bounds_check(index) \
191 assert(index == SpecializedIndex || \
192 index == SmallIndex || \
193 index == MediumIndex || \
194 index == HumongousIndex, err_msg("Bad index: %d", (int) index))
196 size_t num_free_chunks(ChunkIndex index) const {
197 index_bounds_check(index);
199 if (index == HumongousIndex) {
200 return _humongous_dictionary.total_free_blocks();
201 }
203 ssize_t count = _free_chunks[index].count();
204 return count == -1 ? 0 : (size_t) count;
205 }
207 size_t size_free_chunks_in_bytes(ChunkIndex index) const {
208 index_bounds_check(index);
210 size_t word_size = 0;
211 if (index == HumongousIndex) {
212 word_size = _humongous_dictionary.total_size();
213 } else {
214 const size_t size_per_chunk_in_words = _free_chunks[index].size();
215 word_size = size_per_chunk_in_words * num_free_chunks(index);
216 }
218 return word_size * BytesPerWord;
219 }
221 MetaspaceChunkFreeListSummary chunk_free_list_summary() const {
222 return MetaspaceChunkFreeListSummary(num_free_chunks(SpecializedIndex),
223 num_free_chunks(SmallIndex),
224 num_free_chunks(MediumIndex),
225 num_free_chunks(HumongousIndex),
226 size_free_chunks_in_bytes(SpecializedIndex),
227 size_free_chunks_in_bytes(SmallIndex),
228 size_free_chunks_in_bytes(MediumIndex),
229 size_free_chunks_in_bytes(HumongousIndex));
230 }
232 // Debug support
233 void verify();
234 void slow_verify() {
235 if (metaspace_slow_verify) {
236 verify();
237 }
238 }
239 void locked_verify();
240 void slow_locked_verify() {
241 if (metaspace_slow_verify) {
242 locked_verify();
243 }
244 }
245 void verify_free_chunks_total();
247 void locked_print_free_chunks(outputStream* st);
248 void locked_print_sum_free_chunks(outputStream* st);
250 void print_on(outputStream* st) const;
251 };
253 // Used to manage the free list of Metablocks (a block corresponds
254 // to the allocation of a quantum of metadata).
255 class BlockFreelist VALUE_OBJ_CLASS_SPEC {
256 BlockTreeDictionary* _dictionary;
258 // Only allocate and split from freelist if the size of the allocation
259 // is at least 1/4th the size of the available block.
260 const static int WasteMultiplier = 4;
262 // Accessors
263 BlockTreeDictionary* dictionary() const { return _dictionary; }
265 public:
266 BlockFreelist();
267 ~BlockFreelist();
269 // Get and return a block to the free list
270 MetaWord* get_block(size_t word_size);
271 void return_block(MetaWord* p, size_t word_size);
273 size_t total_size() {
274 if (dictionary() == NULL) {
275 return 0;
276 } else {
277 return dictionary()->total_size();
278 }
279 }
281 void print_on(outputStream* st) const;
282 };
284 // A VirtualSpaceList node.
285 class VirtualSpaceNode : public CHeapObj<mtClass> {
286 friend class VirtualSpaceList;
288 // Link to next VirtualSpaceNode
289 VirtualSpaceNode* _next;
291 // total in the VirtualSpace
292 MemRegion _reserved;
293 ReservedSpace _rs;
294 VirtualSpace _virtual_space;
295 MetaWord* _top;
296 // count of chunks contained in this VirtualSpace
297 uintx _container_count;
299 // Convenience functions to access the _virtual_space
300 char* low() const { return virtual_space()->low(); }
301 char* high() const { return virtual_space()->high(); }
303 // The first Metachunk will be allocated at the bottom of the
304 // VirtualSpace
305 Metachunk* first_chunk() { return (Metachunk*) bottom(); }
307 // Committed but unused space in the virtual space
308 size_t free_words_in_vs() const;
309 public:
311 VirtualSpaceNode(size_t byte_size);
312 VirtualSpaceNode(ReservedSpace rs) : _top(NULL), _next(NULL), _rs(rs), _container_count(0) {}
313 ~VirtualSpaceNode();
315 // Convenience functions for logical bottom and end
316 MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); }
317 MetaWord* end() const { return (MetaWord*) _virtual_space.high(); }
319 bool contains(const void* ptr) { return ptr >= low() && ptr < high(); }
321 size_t reserved_words() const { return _virtual_space.reserved_size() / BytesPerWord; }
322 size_t committed_words() const { return _virtual_space.actual_committed_size() / BytesPerWord; }
324 bool is_pre_committed() const { return _virtual_space.special(); }
326 // address of next available space in _virtual_space;
327 // Accessors
328 VirtualSpaceNode* next() { return _next; }
329 void set_next(VirtualSpaceNode* v) { _next = v; }
331 void set_reserved(MemRegion const v) { _reserved = v; }
332 void set_top(MetaWord* v) { _top = v; }
334 // Accessors
335 MemRegion* reserved() { return &_reserved; }
336 VirtualSpace* virtual_space() const { return (VirtualSpace*) &_virtual_space; }
338 // Returns true if "word_size" is available in the VirtualSpace
339 bool is_available(size_t word_size) { return word_size <= pointer_delta(end(), _top, sizeof(MetaWord)); }
341 MetaWord* top() const { return _top; }
342 void inc_top(size_t word_size) { _top += word_size; }
344 uintx container_count() { return _container_count; }
345 void inc_container_count();
346 void dec_container_count();
347 #ifdef ASSERT
348 uint container_count_slow();
349 void verify_container_count();
350 #endif
352 // used and capacity in this single entry in the list
353 size_t used_words_in_vs() const;
354 size_t capacity_words_in_vs() const;
356 bool initialize();
358 // get space from the virtual space
359 Metachunk* take_from_committed(size_t chunk_word_size);
361 // Allocate a chunk from the virtual space and return it.
362 Metachunk* get_chunk_vs(size_t chunk_word_size);
364 // Expands/shrinks the committed space in a virtual space. Delegates
365 // to Virtualspace
366 bool expand_by(size_t min_words, size_t preferred_words);
368 // In preparation for deleting this node, remove all the chunks
369 // in the node from any freelist.
370 void purge(ChunkManager* chunk_manager);
372 // If an allocation doesn't fit in the current node a new node is created.
373 // Allocate chunks out of the remaining committed space in this node
374 // to avoid wasting that memory.
375 // This always adds up because all the chunk sizes are multiples of
376 // the smallest chunk size.
377 void retire(ChunkManager* chunk_manager);
379 #ifdef ASSERT
380 // Debug support
381 void mangle();
382 #endif
384 void print_on(outputStream* st) const;
385 };
387 #define assert_is_ptr_aligned(ptr, alignment) \
388 assert(is_ptr_aligned(ptr, alignment), \
389 err_msg(PTR_FORMAT " is not aligned to " \
390 SIZE_FORMAT, ptr, alignment))
392 #define assert_is_size_aligned(size, alignment) \
393 assert(is_size_aligned(size, alignment), \
394 err_msg(SIZE_FORMAT " is not aligned to " \
395 SIZE_FORMAT, size, alignment))
398 // Decide if large pages should be committed when the memory is reserved.
399 static bool should_commit_large_pages_when_reserving(size_t bytes) {
400 if (UseLargePages && UseLargePagesInMetaspace && !os::can_commit_large_page_memory()) {
401 size_t words = bytes / BytesPerWord;
402 bool is_class = false; // We never reserve large pages for the class space.
403 if (MetaspaceGC::can_expand(words, is_class) &&
404 MetaspaceGC::allowed_expansion() >= words) {
405 return true;
406 }
407 }
409 return false;
410 }
412 // byte_size is the size of the associated virtualspace.
413 VirtualSpaceNode::VirtualSpaceNode(size_t bytes) : _top(NULL), _next(NULL), _rs(), _container_count(0) {
414 assert_is_size_aligned(bytes, Metaspace::reserve_alignment());
416 #if INCLUDE_CDS
417 // This allocates memory with mmap. For DumpSharedspaces, try to reserve
418 // configurable address, generally at the top of the Java heap so other
419 // memory addresses don't conflict.
420 if (DumpSharedSpaces) {
421 bool large_pages = false; // No large pages when dumping the CDS archive.
422 char* shared_base = (char*)align_ptr_up((char*)SharedBaseAddress, Metaspace::reserve_alignment());
424 _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages, shared_base, 0);
425 if (_rs.is_reserved()) {
426 assert(shared_base == 0 || _rs.base() == shared_base, "should match");
427 } else {
428 // Get a mmap region anywhere if the SharedBaseAddress fails.
429 _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
430 }
431 MetaspaceShared::set_shared_rs(&_rs);
432 } else
433 #endif
434 {
435 bool large_pages = should_commit_large_pages_when_reserving(bytes);
437 _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
438 }
440 if (_rs.is_reserved()) {
441 assert(_rs.base() != NULL, "Catch if we get a NULL address");
442 assert(_rs.size() != 0, "Catch if we get a 0 size");
443 assert_is_ptr_aligned(_rs.base(), Metaspace::reserve_alignment());
444 assert_is_size_aligned(_rs.size(), Metaspace::reserve_alignment());
446 MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass);
447 }
448 }
450 void VirtualSpaceNode::purge(ChunkManager* chunk_manager) {
451 Metachunk* chunk = first_chunk();
452 Metachunk* invalid_chunk = (Metachunk*) top();
453 while (chunk < invalid_chunk ) {
454 assert(chunk->is_tagged_free(), "Should be tagged free");
455 MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
456 chunk_manager->remove_chunk(chunk);
457 assert(chunk->next() == NULL &&
458 chunk->prev() == NULL,
459 "Was not removed from its list");
460 chunk = (Metachunk*) next;
461 }
462 }
464 #ifdef ASSERT
465 uint VirtualSpaceNode::container_count_slow() {
466 uint count = 0;
467 Metachunk* chunk = first_chunk();
468 Metachunk* invalid_chunk = (Metachunk*) top();
469 while (chunk < invalid_chunk ) {
470 MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
471 // Don't count the chunks on the free lists. Those are
472 // still part of the VirtualSpaceNode but not currently
473 // counted.
474 if (!chunk->is_tagged_free()) {
475 count++;
476 }
477 chunk = (Metachunk*) next;
478 }
479 return count;
480 }
481 #endif
483 // List of VirtualSpaces for metadata allocation.
484 class VirtualSpaceList : public CHeapObj<mtClass> {
485 friend class VirtualSpaceNode;
487 enum VirtualSpaceSizes {
488 VirtualSpaceSize = 256 * K
489 };
491 // Head of the list
492 VirtualSpaceNode* _virtual_space_list;
493 // virtual space currently being used for allocations
494 VirtualSpaceNode* _current_virtual_space;
496 // Is this VirtualSpaceList used for the compressed class space
497 bool _is_class;
499 // Sum of reserved and committed memory in the virtual spaces
500 size_t _reserved_words;
501 size_t _committed_words;
503 // Number of virtual spaces
504 size_t _virtual_space_count;
506 ~VirtualSpaceList();
508 VirtualSpaceNode* virtual_space_list() const { return _virtual_space_list; }
510 void set_virtual_space_list(VirtualSpaceNode* v) {
511 _virtual_space_list = v;
512 }
513 void set_current_virtual_space(VirtualSpaceNode* v) {
514 _current_virtual_space = v;
515 }
517 void link_vs(VirtualSpaceNode* new_entry);
519 // Get another virtual space and add it to the list. This
520 // is typically prompted by a failed attempt to allocate a chunk
521 // and is typically followed by the allocation of a chunk.
522 bool create_new_virtual_space(size_t vs_word_size);
524 // Chunk up the unused committed space in the current
525 // virtual space and add the chunks to the free list.
526 void retire_current_virtual_space();
528 public:
529 VirtualSpaceList(size_t word_size);
530 VirtualSpaceList(ReservedSpace rs);
532 size_t free_bytes();
534 Metachunk* get_new_chunk(size_t word_size,
535 size_t grow_chunks_by_words,
536 size_t medium_chunk_bunch);
538 bool expand_node_by(VirtualSpaceNode* node,
539 size_t min_words,
540 size_t preferred_words);
542 bool expand_by(size_t min_words,
543 size_t preferred_words);
545 VirtualSpaceNode* current_virtual_space() {
546 return _current_virtual_space;
547 }
549 bool is_class() const { return _is_class; }
551 bool initialization_succeeded() { return _virtual_space_list != NULL; }
553 size_t reserved_words() { return _reserved_words; }
554 size_t reserved_bytes() { return reserved_words() * BytesPerWord; }
555 size_t committed_words() { return _committed_words; }
556 size_t committed_bytes() { return committed_words() * BytesPerWord; }
558 void inc_reserved_words(size_t v);
559 void dec_reserved_words(size_t v);
560 void inc_committed_words(size_t v);
561 void dec_committed_words(size_t v);
562 void inc_virtual_space_count();
563 void dec_virtual_space_count();
565 bool contains(const void* ptr);
567 // Unlink empty VirtualSpaceNodes and free it.
568 void purge(ChunkManager* chunk_manager);
570 void print_on(outputStream* st) const;
572 class VirtualSpaceListIterator : public StackObj {
573 VirtualSpaceNode* _virtual_spaces;
574 public:
575 VirtualSpaceListIterator(VirtualSpaceNode* virtual_spaces) :
576 _virtual_spaces(virtual_spaces) {}
578 bool repeat() {
579 return _virtual_spaces != NULL;
580 }
582 VirtualSpaceNode* get_next() {
583 VirtualSpaceNode* result = _virtual_spaces;
584 if (_virtual_spaces != NULL) {
585 _virtual_spaces = _virtual_spaces->next();
586 }
587 return result;
588 }
589 };
590 };
592 class Metadebug : AllStatic {
593 // Debugging support for Metaspaces
594 static int _allocation_fail_alot_count;
596 public:
598 static void init_allocation_fail_alot_count();
599 #ifdef ASSERT
600 static bool test_metadata_failure();
601 #endif
602 };
604 int Metadebug::_allocation_fail_alot_count = 0;
606 // SpaceManager - used by Metaspace to handle allocations
607 class SpaceManager : public CHeapObj<mtClass> {
608 friend class Metaspace;
609 friend class Metadebug;
611 private:
613 // protects allocations
614 Mutex* const _lock;
616 // Type of metadata allocated.
617 Metaspace::MetadataType _mdtype;
619 // List of chunks in use by this SpaceManager. Allocations
620 // are done from the current chunk. The list is used for deallocating
621 // chunks when the SpaceManager is freed.
622 Metachunk* _chunks_in_use[NumberOfInUseLists];
623 Metachunk* _current_chunk;
625 // Number of small chunks to allocate to a manager
626 // If class space manager, small chunks are unlimited
627 static uint const _small_chunk_limit;
629 // Sum of all space in allocated chunks
630 size_t _allocated_blocks_words;
632 // Sum of all allocated chunks
633 size_t _allocated_chunks_words;
634 size_t _allocated_chunks_count;
636 // Free lists of blocks are per SpaceManager since they
637 // are assumed to be in chunks in use by the SpaceManager
638 // and all chunks in use by a SpaceManager are freed when
639 // the class loader using the SpaceManager is collected.
640 BlockFreelist _block_freelists;
642 // protects virtualspace and chunk expansions
643 static const char* _expand_lock_name;
644 static const int _expand_lock_rank;
645 static Mutex* const _expand_lock;
647 private:
648 // Accessors
649 Metachunk* chunks_in_use(ChunkIndex index) const { return _chunks_in_use[index]; }
650 void set_chunks_in_use(ChunkIndex index, Metachunk* v) {
651 _chunks_in_use[index] = v;
652 }
654 BlockFreelist* block_freelists() const {
655 return (BlockFreelist*) &_block_freelists;
656 }
658 Metaspace::MetadataType mdtype() { return _mdtype; }
660 VirtualSpaceList* vs_list() const { return Metaspace::get_space_list(_mdtype); }
661 ChunkManager* chunk_manager() const { return Metaspace::get_chunk_manager(_mdtype); }
663 Metachunk* current_chunk() const { return _current_chunk; }
664 void set_current_chunk(Metachunk* v) {
665 _current_chunk = v;
666 }
668 Metachunk* find_current_chunk(size_t word_size);
670 // Add chunk to the list of chunks in use
671 void add_chunk(Metachunk* v, bool make_current);
672 void retire_current_chunk();
674 Mutex* lock() const { return _lock; }
676 const char* chunk_size_name(ChunkIndex index) const;
678 protected:
679 void initialize();
681 public:
682 SpaceManager(Metaspace::MetadataType mdtype,
683 Mutex* lock);
684 ~SpaceManager();
686 enum ChunkMultiples {
687 MediumChunkMultiple = 4
688 };
690 bool is_class() { return _mdtype == Metaspace::ClassType; }
692 // Accessors
693 size_t specialized_chunk_size() { return (size_t) is_class() ? ClassSpecializedChunk : SpecializedChunk; }
694 size_t small_chunk_size() { return (size_t) is_class() ? ClassSmallChunk : SmallChunk; }
695 size_t medium_chunk_size() { return (size_t) is_class() ? ClassMediumChunk : MediumChunk; }
696 size_t medium_chunk_bunch() { return medium_chunk_size() * MediumChunkMultiple; }
698 size_t smallest_chunk_size() { return specialized_chunk_size(); }
700 size_t allocated_blocks_words() const { return _allocated_blocks_words; }
701 size_t allocated_blocks_bytes() const { return _allocated_blocks_words * BytesPerWord; }
702 size_t allocated_chunks_words() const { return _allocated_chunks_words; }
703 size_t allocated_chunks_count() const { return _allocated_chunks_count; }
705 bool is_humongous(size_t word_size) { return word_size > medium_chunk_size(); }
707 static Mutex* expand_lock() { return _expand_lock; }
709 // Increment the per Metaspace and global running sums for Metachunks
710 // by the given size. This is used when a Metachunk to added to
711 // the in-use list.
712 void inc_size_metrics(size_t words);
713 // Increment the per Metaspace and global running sums Metablocks by the given
714 // size. This is used when a Metablock is allocated.
715 void inc_used_metrics(size_t words);
716 // Delete the portion of the running sums for this SpaceManager. That is,
717 // the globals running sums for the Metachunks and Metablocks are
718 // decremented for all the Metachunks in-use by this SpaceManager.
719 void dec_total_from_size_metrics();
721 // Set the sizes for the initial chunks.
722 void get_initial_chunk_sizes(Metaspace::MetaspaceType type,
723 size_t* chunk_word_size,
724 size_t* class_chunk_word_size);
726 size_t sum_capacity_in_chunks_in_use() const;
727 size_t sum_used_in_chunks_in_use() const;
728 size_t sum_free_in_chunks_in_use() const;
729 size_t sum_waste_in_chunks_in_use() const;
730 size_t sum_waste_in_chunks_in_use(ChunkIndex index ) const;
732 size_t sum_count_in_chunks_in_use();
733 size_t sum_count_in_chunks_in_use(ChunkIndex i);
735 Metachunk* get_new_chunk(size_t word_size, size_t grow_chunks_by_words);
737 // Block allocation and deallocation.
738 // Allocates a block from the current chunk
739 MetaWord* allocate(size_t word_size);
741 // Helper for allocations
742 MetaWord* allocate_work(size_t word_size);
744 // Returns a block to the per manager freelist
745 void deallocate(MetaWord* p, size_t word_size);
747 // Based on the allocation size and a minimum chunk size,
748 // returned chunk size (for expanding space for chunk allocation).
749 size_t calc_chunk_size(size_t allocation_word_size);
751 // Called when an allocation from the current chunk fails.
752 // Gets a new chunk (may require getting a new virtual space),
753 // and allocates from that chunk.
754 MetaWord* grow_and_allocate(size_t word_size);
756 // Notify memory usage to MemoryService.
757 void track_metaspace_memory_usage();
759 // debugging support.
761 void dump(outputStream* const out) const;
762 void print_on(outputStream* st) const;
763 void locked_print_chunks_in_use_on(outputStream* st) const;
765 void verify();
766 void verify_chunk_size(Metachunk* chunk);
767 NOT_PRODUCT(void mangle_freed_chunks();)
768 #ifdef ASSERT
769 void verify_allocated_blocks_words();
770 #endif
772 size_t get_raw_word_size(size_t word_size) {
773 size_t byte_size = word_size * BytesPerWord;
775 size_t raw_bytes_size = MAX2(byte_size, sizeof(Metablock));
776 raw_bytes_size = align_size_up(raw_bytes_size, Metachunk::object_alignment());
778 size_t raw_word_size = raw_bytes_size / BytesPerWord;
779 assert(raw_word_size * BytesPerWord == raw_bytes_size, "Size problem");
781 return raw_word_size;
782 }
783 };
785 uint const SpaceManager::_small_chunk_limit = 4;
787 const char* SpaceManager::_expand_lock_name =
788 "SpaceManager chunk allocation lock";
789 const int SpaceManager::_expand_lock_rank = Monitor::leaf - 1;
790 Mutex* const SpaceManager::_expand_lock =
791 new Mutex(SpaceManager::_expand_lock_rank,
792 SpaceManager::_expand_lock_name,
793 Mutex::_allow_vm_block_flag);
795 void VirtualSpaceNode::inc_container_count() {
796 assert_lock_strong(SpaceManager::expand_lock());
797 _container_count++;
798 assert(_container_count == container_count_slow(),
799 err_msg("Inconsistency in countainer_count _container_count " SIZE_FORMAT
800 " container_count_slow() " SIZE_FORMAT,
801 _container_count, container_count_slow()));
802 }
804 void VirtualSpaceNode::dec_container_count() {
805 assert_lock_strong(SpaceManager::expand_lock());
806 _container_count--;
807 }
809 #ifdef ASSERT
810 void VirtualSpaceNode::verify_container_count() {
811 assert(_container_count == container_count_slow(),
812 err_msg("Inconsistency in countainer_count _container_count " SIZE_FORMAT
813 " container_count_slow() " SIZE_FORMAT, _container_count, container_count_slow()));
814 }
815 #endif
817 // BlockFreelist methods
819 BlockFreelist::BlockFreelist() : _dictionary(NULL) {}
821 BlockFreelist::~BlockFreelist() {
822 if (_dictionary != NULL) {
823 if (Verbose && TraceMetadataChunkAllocation) {
824 _dictionary->print_free_lists(gclog_or_tty);
825 }
826 delete _dictionary;
827 }
828 }
830 void BlockFreelist::return_block(MetaWord* p, size_t word_size) {
831 Metablock* free_chunk = ::new (p) Metablock(word_size);
832 if (dictionary() == NULL) {
833 _dictionary = new BlockTreeDictionary();
834 }
835 dictionary()->return_chunk(free_chunk);
836 }
838 MetaWord* BlockFreelist::get_block(size_t word_size) {
839 if (dictionary() == NULL) {
840 return NULL;
841 }
843 if (word_size < TreeChunk<Metablock, FreeList<Metablock> >::min_size()) {
844 // Dark matter. Too small for dictionary.
845 return NULL;
846 }
848 Metablock* free_block =
849 dictionary()->get_chunk(word_size, FreeBlockDictionary<Metablock>::atLeast);
850 if (free_block == NULL) {
851 return NULL;
852 }
854 const size_t block_size = free_block->size();
855 if (block_size > WasteMultiplier * word_size) {
856 return_block((MetaWord*)free_block, block_size);
857 return NULL;
858 }
860 MetaWord* new_block = (MetaWord*)free_block;
861 assert(block_size >= word_size, "Incorrect size of block from freelist");
862 const size_t unused = block_size - word_size;
863 if (unused >= TreeChunk<Metablock, FreeList<Metablock> >::min_size()) {
864 return_block(new_block + word_size, unused);
865 }
867 return new_block;
868 }
870 void BlockFreelist::print_on(outputStream* st) const {
871 if (dictionary() == NULL) {
872 return;
873 }
874 dictionary()->print_free_lists(st);
875 }
877 // VirtualSpaceNode methods
879 VirtualSpaceNode::~VirtualSpaceNode() {
880 _rs.release();
881 #ifdef ASSERT
882 size_t word_size = sizeof(*this) / BytesPerWord;
883 Copy::fill_to_words((HeapWord*) this, word_size, 0xf1f1f1f1);
884 #endif
885 }
887 size_t VirtualSpaceNode::used_words_in_vs() const {
888 return pointer_delta(top(), bottom(), sizeof(MetaWord));
889 }
891 // Space committed in the VirtualSpace
892 size_t VirtualSpaceNode::capacity_words_in_vs() const {
893 return pointer_delta(end(), bottom(), sizeof(MetaWord));
894 }
896 size_t VirtualSpaceNode::free_words_in_vs() const {
897 return pointer_delta(end(), top(), sizeof(MetaWord));
898 }
900 // Allocates the chunk from the virtual space only.
901 // This interface is also used internally for debugging. Not all
902 // chunks removed here are necessarily used for allocation.
903 Metachunk* VirtualSpaceNode::take_from_committed(size_t chunk_word_size) {
904 // Bottom of the new chunk
905 MetaWord* chunk_limit = top();
906 assert(chunk_limit != NULL, "Not safe to call this method");
908 // The virtual spaces are always expanded by the
909 // commit granularity to enforce the following condition.
910 // Without this the is_available check will not work correctly.
911 assert(_virtual_space.committed_size() == _virtual_space.actual_committed_size(),
912 "The committed memory doesn't match the expanded memory.");
914 if (!is_available(chunk_word_size)) {
915 if (TraceMetadataChunkAllocation) {
916 gclog_or_tty->print("VirtualSpaceNode::take_from_committed() not available %d words ", chunk_word_size);
917 // Dump some information about the virtual space that is nearly full
918 print_on(gclog_or_tty);
919 }
920 return NULL;
921 }
923 // Take the space (bump top on the current virtual space).
924 inc_top(chunk_word_size);
926 // Initialize the chunk
927 Metachunk* result = ::new (chunk_limit) Metachunk(chunk_word_size, this);
928 return result;
929 }
932 // Expand the virtual space (commit more of the reserved space)
933 bool VirtualSpaceNode::expand_by(size_t min_words, size_t preferred_words) {
934 size_t min_bytes = min_words * BytesPerWord;
935 size_t preferred_bytes = preferred_words * BytesPerWord;
937 size_t uncommitted = virtual_space()->reserved_size() - virtual_space()->actual_committed_size();
939 if (uncommitted < min_bytes) {
940 return false;
941 }
943 size_t commit = MIN2(preferred_bytes, uncommitted);
944 bool result = virtual_space()->expand_by(commit, false);
946 assert(result, "Failed to commit memory");
948 return result;
949 }
951 Metachunk* VirtualSpaceNode::get_chunk_vs(size_t chunk_word_size) {
952 assert_lock_strong(SpaceManager::expand_lock());
953 Metachunk* result = take_from_committed(chunk_word_size);
954 if (result != NULL) {
955 inc_container_count();
956 }
957 return result;
958 }
960 bool VirtualSpaceNode::initialize() {
962 if (!_rs.is_reserved()) {
963 return false;
964 }
966 // These are necessary restriction to make sure that the virtual space always
967 // grows in steps of Metaspace::commit_alignment(). If both base and size are
968 // aligned only the middle alignment of the VirtualSpace is used.
969 assert_is_ptr_aligned(_rs.base(), Metaspace::commit_alignment());
970 assert_is_size_aligned(_rs.size(), Metaspace::commit_alignment());
972 // ReservedSpaces marked as special will have the entire memory
973 // pre-committed. Setting a committed size will make sure that
974 // committed_size and actual_committed_size agrees.
975 size_t pre_committed_size = _rs.special() ? _rs.size() : 0;
977 bool result = virtual_space()->initialize_with_granularity(_rs, pre_committed_size,
978 Metaspace::commit_alignment());
979 if (result) {
980 assert(virtual_space()->committed_size() == virtual_space()->actual_committed_size(),
981 "Checking that the pre-committed memory was registered by the VirtualSpace");
983 set_top((MetaWord*)virtual_space()->low());
984 set_reserved(MemRegion((HeapWord*)_rs.base(),
985 (HeapWord*)(_rs.base() + _rs.size())));
987 assert(reserved()->start() == (HeapWord*) _rs.base(),
988 err_msg("Reserved start was not set properly " PTR_FORMAT
989 " != " PTR_FORMAT, reserved()->start(), _rs.base()));
990 assert(reserved()->word_size() == _rs.size() / BytesPerWord,
991 err_msg("Reserved size was not set properly " SIZE_FORMAT
992 " != " SIZE_FORMAT, reserved()->word_size(),
993 _rs.size() / BytesPerWord));
994 }
996 return result;
997 }
999 void VirtualSpaceNode::print_on(outputStream* st) const {
1000 size_t used = used_words_in_vs();
1001 size_t capacity = capacity_words_in_vs();
1002 VirtualSpace* vs = virtual_space();
1003 st->print_cr(" space @ " PTR_FORMAT " " SIZE_FORMAT "K, %3d%% used "
1004 "[" PTR_FORMAT ", " PTR_FORMAT ", "
1005 PTR_FORMAT ", " PTR_FORMAT ")",
1006 vs, capacity / K,
1007 capacity == 0 ? 0 : used * 100 / capacity,
1008 bottom(), top(), end(),
1009 vs->high_boundary());
1010 }
1012 #ifdef ASSERT
1013 void VirtualSpaceNode::mangle() {
1014 size_t word_size = capacity_words_in_vs();
1015 Copy::fill_to_words((HeapWord*) low(), word_size, 0xf1f1f1f1);
1016 }
1017 #endif // ASSERT
1019 // VirtualSpaceList methods
1020 // Space allocated from the VirtualSpace
1022 VirtualSpaceList::~VirtualSpaceList() {
1023 VirtualSpaceListIterator iter(virtual_space_list());
1024 while (iter.repeat()) {
1025 VirtualSpaceNode* vsl = iter.get_next();
1026 delete vsl;
1027 }
1028 }
1030 void VirtualSpaceList::inc_reserved_words(size_t v) {
1031 assert_lock_strong(SpaceManager::expand_lock());
1032 _reserved_words = _reserved_words + v;
1033 }
1034 void VirtualSpaceList::dec_reserved_words(size_t v) {
1035 assert_lock_strong(SpaceManager::expand_lock());
1036 _reserved_words = _reserved_words - v;
1037 }
1039 #define assert_committed_below_limit() \
1040 assert(MetaspaceAux::committed_bytes() <= MaxMetaspaceSize, \
1041 err_msg("Too much committed memory. Committed: " SIZE_FORMAT \
1042 " limit (MaxMetaspaceSize): " SIZE_FORMAT, \
1043 MetaspaceAux::committed_bytes(), MaxMetaspaceSize));
1045 void VirtualSpaceList::inc_committed_words(size_t v) {
1046 assert_lock_strong(SpaceManager::expand_lock());
1047 _committed_words = _committed_words + v;
1049 assert_committed_below_limit();
1050 }
1051 void VirtualSpaceList::dec_committed_words(size_t v) {
1052 assert_lock_strong(SpaceManager::expand_lock());
1053 _committed_words = _committed_words - v;
1055 assert_committed_below_limit();
1056 }
1058 void VirtualSpaceList::inc_virtual_space_count() {
1059 assert_lock_strong(SpaceManager::expand_lock());
1060 _virtual_space_count++;
1061 }
1062 void VirtualSpaceList::dec_virtual_space_count() {
1063 assert_lock_strong(SpaceManager::expand_lock());
1064 _virtual_space_count--;
1065 }
1067 void ChunkManager::remove_chunk(Metachunk* chunk) {
1068 size_t word_size = chunk->word_size();
1069 ChunkIndex index = list_index(word_size);
1070 if (index != HumongousIndex) {
1071 free_chunks(index)->remove_chunk(chunk);
1072 } else {
1073 humongous_dictionary()->remove_chunk(chunk);
1074 }
1076 // Chunk is being removed from the chunks free list.
1077 dec_free_chunks_total(chunk->word_size());
1078 }
1080 // Walk the list of VirtualSpaceNodes and delete
1081 // nodes with a 0 container_count. Remove Metachunks in
1082 // the node from their respective freelists.
1083 void VirtualSpaceList::purge(ChunkManager* chunk_manager) {
1084 assert(SafepointSynchronize::is_at_safepoint(), "must be called at safepoint for contains to work");
1085 assert_lock_strong(SpaceManager::expand_lock());
1086 // Don't use a VirtualSpaceListIterator because this
1087 // list is being changed and a straightforward use of an iterator is not safe.
1088 VirtualSpaceNode* purged_vsl = NULL;
1089 VirtualSpaceNode* prev_vsl = virtual_space_list();
1090 VirtualSpaceNode* next_vsl = prev_vsl;
1091 while (next_vsl != NULL) {
1092 VirtualSpaceNode* vsl = next_vsl;
1093 next_vsl = vsl->next();
1094 // Don't free the current virtual space since it will likely
1095 // be needed soon.
1096 if (vsl->container_count() == 0 && vsl != current_virtual_space()) {
1097 // Unlink it from the list
1098 if (prev_vsl == vsl) {
1099 // This is the case of the current node being the first node.
1100 assert(vsl == virtual_space_list(), "Expected to be the first node");
1101 set_virtual_space_list(vsl->next());
1102 } else {
1103 prev_vsl->set_next(vsl->next());
1104 }
1106 vsl->purge(chunk_manager);
1107 dec_reserved_words(vsl->reserved_words());
1108 dec_committed_words(vsl->committed_words());
1109 dec_virtual_space_count();
1110 purged_vsl = vsl;
1111 delete vsl;
1112 } else {
1113 prev_vsl = vsl;
1114 }
1115 }
1116 #ifdef ASSERT
1117 if (purged_vsl != NULL) {
1118 // List should be stable enough to use an iterator here.
1119 VirtualSpaceListIterator iter(virtual_space_list());
1120 while (iter.repeat()) {
1121 VirtualSpaceNode* vsl = iter.get_next();
1122 assert(vsl != purged_vsl, "Purge of vsl failed");
1123 }
1124 }
1125 #endif
1126 }
1129 // This function looks at the mmap regions in the metaspace without locking.
1130 // The chunks are added with store ordering and not deleted except for at
1131 // unloading time during a safepoint.
1132 bool VirtualSpaceList::contains(const void* ptr) {
1133 // List should be stable enough to use an iterator here because removing virtual
1134 // space nodes is only allowed at a safepoint.
1135 VirtualSpaceListIterator iter(virtual_space_list());
1136 while (iter.repeat()) {
1137 VirtualSpaceNode* vsn = iter.get_next();
1138 if (vsn->contains(ptr)) {
1139 return true;
1140 }
1141 }
1142 return false;
1143 }
1145 void VirtualSpaceList::retire_current_virtual_space() {
1146 assert_lock_strong(SpaceManager::expand_lock());
1148 VirtualSpaceNode* vsn = current_virtual_space();
1150 ChunkManager* cm = is_class() ? Metaspace::chunk_manager_class() :
1151 Metaspace::chunk_manager_metadata();
1153 vsn->retire(cm);
1154 }
1156 void VirtualSpaceNode::retire(ChunkManager* chunk_manager) {
1157 for (int i = (int)MediumIndex; i >= (int)ZeroIndex; --i) {
1158 ChunkIndex index = (ChunkIndex)i;
1159 size_t chunk_size = chunk_manager->free_chunks(index)->size();
1161 while (free_words_in_vs() >= chunk_size) {
1162 DEBUG_ONLY(verify_container_count();)
1163 Metachunk* chunk = get_chunk_vs(chunk_size);
1164 assert(chunk != NULL, "allocation should have been successful");
1166 chunk_manager->return_chunks(index, chunk);
1167 chunk_manager->inc_free_chunks_total(chunk_size);
1168 DEBUG_ONLY(verify_container_count();)
1169 }
1170 }
1171 assert(free_words_in_vs() == 0, "should be empty now");
1172 }
1174 VirtualSpaceList::VirtualSpaceList(size_t word_size) :
1175 _is_class(false),
1176 _virtual_space_list(NULL),
1177 _current_virtual_space(NULL),
1178 _reserved_words(0),
1179 _committed_words(0),
1180 _virtual_space_count(0) {
1181 MutexLockerEx cl(SpaceManager::expand_lock(),
1182 Mutex::_no_safepoint_check_flag);
1183 create_new_virtual_space(word_size);
1184 }
1186 VirtualSpaceList::VirtualSpaceList(ReservedSpace rs) :
1187 _is_class(true),
1188 _virtual_space_list(NULL),
1189 _current_virtual_space(NULL),
1190 _reserved_words(0),
1191 _committed_words(0),
1192 _virtual_space_count(0) {
1193 MutexLockerEx cl(SpaceManager::expand_lock(),
1194 Mutex::_no_safepoint_check_flag);
1195 VirtualSpaceNode* class_entry = new VirtualSpaceNode(rs);
1196 bool succeeded = class_entry->initialize();
1197 if (succeeded) {
1198 link_vs(class_entry);
1199 }
1200 }
1202 size_t VirtualSpaceList::free_bytes() {
1203 return virtual_space_list()->free_words_in_vs() * BytesPerWord;
1204 }
1206 // Allocate another meta virtual space and add it to the list.
1207 bool VirtualSpaceList::create_new_virtual_space(size_t vs_word_size) {
1208 assert_lock_strong(SpaceManager::expand_lock());
1210 if (is_class()) {
1211 assert(false, "We currently don't support more than one VirtualSpace for"
1212 " the compressed class space. The initialization of the"
1213 " CCS uses another code path and should not hit this path.");
1214 return false;
1215 }
1217 if (vs_word_size == 0) {
1218 assert(false, "vs_word_size should always be at least _reserve_alignment large.");
1219 return false;
1220 }
1222 // Reserve the space
1223 size_t vs_byte_size = vs_word_size * BytesPerWord;
1224 assert_is_size_aligned(vs_byte_size, Metaspace::reserve_alignment());
1226 // Allocate the meta virtual space and initialize it.
1227 VirtualSpaceNode* new_entry = new VirtualSpaceNode(vs_byte_size);
1228 if (!new_entry->initialize()) {
1229 delete new_entry;
1230 return false;
1231 } else {
1232 assert(new_entry->reserved_words() == vs_word_size,
1233 "Reserved memory size differs from requested memory size");
1234 // ensure lock-free iteration sees fully initialized node
1235 OrderAccess::storestore();
1236 link_vs(new_entry);
1237 return true;
1238 }
1239 }
1241 void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry) {
1242 if (virtual_space_list() == NULL) {
1243 set_virtual_space_list(new_entry);
1244 } else {
1245 current_virtual_space()->set_next(new_entry);
1246 }
1247 set_current_virtual_space(new_entry);
1248 inc_reserved_words(new_entry->reserved_words());
1249 inc_committed_words(new_entry->committed_words());
1250 inc_virtual_space_count();
1251 #ifdef ASSERT
1252 new_entry->mangle();
1253 #endif
1254 if (TraceMetavirtualspaceAllocation && Verbose) {
1255 VirtualSpaceNode* vsl = current_virtual_space();
1256 vsl->print_on(gclog_or_tty);
1257 }
1258 }
1260 bool VirtualSpaceList::expand_node_by(VirtualSpaceNode* node,
1261 size_t min_words,
1262 size_t preferred_words) {
1263 size_t before = node->committed_words();
1265 bool result = node->expand_by(min_words, preferred_words);
1267 size_t after = node->committed_words();
1269 // after and before can be the same if the memory was pre-committed.
1270 assert(after >= before, "Inconsistency");
1271 inc_committed_words(after - before);
1273 return result;
1274 }
1276 bool VirtualSpaceList::expand_by(size_t min_words, size_t preferred_words) {
1277 assert_is_size_aligned(min_words, Metaspace::commit_alignment_words());
1278 assert_is_size_aligned(preferred_words, Metaspace::commit_alignment_words());
1279 assert(min_words <= preferred_words, "Invalid arguments");
1281 if (!MetaspaceGC::can_expand(min_words, this->is_class())) {
1282 return false;
1283 }
1285 size_t allowed_expansion_words = MetaspaceGC::allowed_expansion();
1286 if (allowed_expansion_words < min_words) {
1287 return false;
1288 }
1290 size_t max_expansion_words = MIN2(preferred_words, allowed_expansion_words);
1292 // Commit more memory from the the current virtual space.
1293 bool vs_expanded = expand_node_by(current_virtual_space(),
1294 min_words,
1295 max_expansion_words);
1296 if (vs_expanded) {
1297 return true;
1298 }
1299 retire_current_virtual_space();
1301 // Get another virtual space.
1302 size_t grow_vs_words = MAX2((size_t)VirtualSpaceSize, preferred_words);
1303 grow_vs_words = align_size_up(grow_vs_words, Metaspace::reserve_alignment_words());
1305 if (create_new_virtual_space(grow_vs_words)) {
1306 if (current_virtual_space()->is_pre_committed()) {
1307 // The memory was pre-committed, so we are done here.
1308 assert(min_words <= current_virtual_space()->committed_words(),
1309 "The new VirtualSpace was pre-committed, so it"
1310 "should be large enough to fit the alloc request.");
1311 return true;
1312 }
1314 return expand_node_by(current_virtual_space(),
1315 min_words,
1316 max_expansion_words);
1317 }
1319 return false;
1320 }
1322 Metachunk* VirtualSpaceList::get_new_chunk(size_t word_size,
1323 size_t grow_chunks_by_words,
1324 size_t medium_chunk_bunch) {
1326 // Allocate a chunk out of the current virtual space.
1327 Metachunk* next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
1329 if (next != NULL) {
1330 return next;
1331 }
1333 // The expand amount is currently only determined by the requested sizes
1334 // and not how much committed memory is left in the current virtual space.
1336 size_t min_word_size = align_size_up(grow_chunks_by_words, Metaspace::commit_alignment_words());
1337 size_t preferred_word_size = align_size_up(medium_chunk_bunch, Metaspace::commit_alignment_words());
1338 if (min_word_size >= preferred_word_size) {
1339 // Can happen when humongous chunks are allocated.
1340 preferred_word_size = min_word_size;
1341 }
1343 bool expanded = expand_by(min_word_size, preferred_word_size);
1344 if (expanded) {
1345 next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
1346 assert(next != NULL, "The allocation was expected to succeed after the expansion");
1347 }
1349 return next;
1350 }
1352 void VirtualSpaceList::print_on(outputStream* st) const {
1353 if (TraceMetadataChunkAllocation && Verbose) {
1354 VirtualSpaceListIterator iter(virtual_space_list());
1355 while (iter.repeat()) {
1356 VirtualSpaceNode* node = iter.get_next();
1357 node->print_on(st);
1358 }
1359 }
1360 }
1362 // MetaspaceGC methods
1364 // VM_CollectForMetadataAllocation is the vm operation used to GC.
1365 // Within the VM operation after the GC the attempt to allocate the metadata
1366 // should succeed. If the GC did not free enough space for the metaspace
1367 // allocation, the HWM is increased so that another virtualspace will be
1368 // allocated for the metadata. With perm gen the increase in the perm
1369 // gen had bounds, MinMetaspaceExpansion and MaxMetaspaceExpansion. The
1370 // metaspace policy uses those as the small and large steps for the HWM.
1371 //
1372 // After the GC the compute_new_size() for MetaspaceGC is called to
1373 // resize the capacity of the metaspaces. The current implementation
1374 // is based on the flags MinMetaspaceFreeRatio and MaxMetaspaceFreeRatio used
1375 // to resize the Java heap by some GC's. New flags can be implemented
1376 // if really needed. MinMetaspaceFreeRatio is used to calculate how much
1377 // free space is desirable in the metaspace capacity to decide how much
1378 // to increase the HWM. MaxMetaspaceFreeRatio is used to decide how much
1379 // free space is desirable in the metaspace capacity before decreasing
1380 // the HWM.
1382 // Calculate the amount to increase the high water mark (HWM).
1383 // Increase by a minimum amount (MinMetaspaceExpansion) so that
1384 // another expansion is not requested too soon. If that is not
1385 // enough to satisfy the allocation, increase by MaxMetaspaceExpansion.
1386 // If that is still not enough, expand by the size of the allocation
1387 // plus some.
1388 size_t MetaspaceGC::delta_capacity_until_GC(size_t bytes) {
1389 size_t min_delta = MinMetaspaceExpansion;
1390 size_t max_delta = MaxMetaspaceExpansion;
1391 size_t delta = align_size_up(bytes, Metaspace::commit_alignment());
1393 if (delta <= min_delta) {
1394 delta = min_delta;
1395 } else if (delta <= max_delta) {
1396 // Don't want to hit the high water mark on the next
1397 // allocation so make the delta greater than just enough
1398 // for this allocation.
1399 delta = max_delta;
1400 } else {
1401 // This allocation is large but the next ones are probably not
1402 // so increase by the minimum.
1403 delta = delta + min_delta;
1404 }
1406 assert_is_size_aligned(delta, Metaspace::commit_alignment());
1408 return delta;
1409 }
1411 size_t MetaspaceGC::capacity_until_GC() {
1412 size_t value = (size_t)OrderAccess::load_ptr_acquire(&_capacity_until_GC);
1413 assert(value >= MetaspaceSize, "Not initialied properly?");
1414 return value;
1415 }
1417 bool MetaspaceGC::inc_capacity_until_GC(size_t v, size_t* new_cap_until_GC, size_t* old_cap_until_GC) {
1418 assert_is_size_aligned(v, Metaspace::commit_alignment());
1420 size_t capacity_until_GC = (size_t) _capacity_until_GC;
1421 size_t new_value = capacity_until_GC + v;
1423 if (new_value < capacity_until_GC) {
1424 // The addition wrapped around, set new_value to aligned max value.
1425 new_value = align_size_down(max_uintx, Metaspace::commit_alignment());
1426 }
1428 intptr_t expected = (intptr_t) capacity_until_GC;
1429 intptr_t actual = Atomic::cmpxchg_ptr((intptr_t) new_value, &_capacity_until_GC, expected);
1431 if (expected != actual) {
1432 return false;
1433 }
1435 if (new_cap_until_GC != NULL) {
1436 *new_cap_until_GC = new_value;
1437 }
1438 if (old_cap_until_GC != NULL) {
1439 *old_cap_until_GC = capacity_until_GC;
1440 }
1441 return true;
1442 }
1444 size_t MetaspaceGC::dec_capacity_until_GC(size_t v) {
1445 assert_is_size_aligned(v, Metaspace::commit_alignment());
1447 return (size_t)Atomic::add_ptr(-(intptr_t)v, &_capacity_until_GC);
1448 }
1450 void MetaspaceGC::initialize() {
1451 // Set the high-water mark to MaxMetapaceSize during VM initializaton since
1452 // we can't do a GC during initialization.
1453 _capacity_until_GC = MaxMetaspaceSize;
1454 }
1456 void MetaspaceGC::post_initialize() {
1457 // Reset the high-water mark once the VM initialization is done.
1458 _capacity_until_GC = MAX2(MetaspaceAux::committed_bytes(), MetaspaceSize);
1459 }
1461 bool MetaspaceGC::can_expand(size_t word_size, bool is_class) {
1462 // Check if the compressed class space is full.
1463 if (is_class && Metaspace::using_class_space()) {
1464 size_t class_committed = MetaspaceAux::committed_bytes(Metaspace::ClassType);
1465 if (class_committed + word_size * BytesPerWord > CompressedClassSpaceSize) {
1466 return false;
1467 }
1468 }
1470 // Check if the user has imposed a limit on the metaspace memory.
1471 size_t committed_bytes = MetaspaceAux::committed_bytes();
1472 if (committed_bytes + word_size * BytesPerWord > MaxMetaspaceSize) {
1473 return false;
1474 }
1476 return true;
1477 }
1479 size_t MetaspaceGC::allowed_expansion() {
1480 size_t committed_bytes = MetaspaceAux::committed_bytes();
1481 size_t capacity_until_gc = capacity_until_GC();
1483 assert(capacity_until_gc >= committed_bytes,
1484 err_msg("capacity_until_gc: " SIZE_FORMAT " < committed_bytes: " SIZE_FORMAT,
1485 capacity_until_gc, committed_bytes));
1487 size_t left_until_max = MaxMetaspaceSize - committed_bytes;
1488 size_t left_until_GC = capacity_until_gc - committed_bytes;
1489 size_t left_to_commit = MIN2(left_until_GC, left_until_max);
1491 return left_to_commit / BytesPerWord;
1492 }
1494 void MetaspaceGC::compute_new_size() {
1495 assert(_shrink_factor <= 100, "invalid shrink factor");
1496 uint current_shrink_factor = _shrink_factor;
1497 _shrink_factor = 0;
1499 // Using committed_bytes() for used_after_gc is an overestimation, since the
1500 // chunk free lists are included in committed_bytes() and the memory in an
1501 // un-fragmented chunk free list is available for future allocations.
1502 // However, if the chunk free lists becomes fragmented, then the memory may
1503 // not be available for future allocations and the memory is therefore "in use".
1504 // Including the chunk free lists in the definition of "in use" is therefore
1505 // necessary. Not including the chunk free lists can cause capacity_until_GC to
1506 // shrink below committed_bytes() and this has caused serious bugs in the past.
1507 const size_t used_after_gc = MetaspaceAux::committed_bytes();
1508 const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC();
1510 const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0;
1511 const double maximum_used_percentage = 1.0 - minimum_free_percentage;
1513 const double min_tmp = used_after_gc / maximum_used_percentage;
1514 size_t minimum_desired_capacity =
1515 (size_t)MIN2(min_tmp, double(max_uintx));
1516 // Don't shrink less than the initial generation size
1517 minimum_desired_capacity = MAX2(minimum_desired_capacity,
1518 MetaspaceSize);
1520 if (PrintGCDetails && Verbose) {
1521 gclog_or_tty->print_cr("\nMetaspaceGC::compute_new_size: ");
1522 gclog_or_tty->print_cr(" "
1523 " minimum_free_percentage: %6.2f"
1524 " maximum_used_percentage: %6.2f",
1525 minimum_free_percentage,
1526 maximum_used_percentage);
1527 gclog_or_tty->print_cr(" "
1528 " used_after_gc : %6.1fKB",
1529 used_after_gc / (double) K);
1530 }
1533 size_t shrink_bytes = 0;
1534 if (capacity_until_GC < minimum_desired_capacity) {
1535 // If we have less capacity below the metaspace HWM, then
1536 // increment the HWM.
1537 size_t expand_bytes = minimum_desired_capacity - capacity_until_GC;
1538 expand_bytes = align_size_up(expand_bytes, Metaspace::commit_alignment());
1539 // Don't expand unless it's significant
1540 if (expand_bytes >= MinMetaspaceExpansion) {
1541 size_t new_capacity_until_GC = 0;
1542 bool succeeded = MetaspaceGC::inc_capacity_until_GC(expand_bytes, &new_capacity_until_GC);
1543 assert(succeeded, "Should always succesfully increment HWM when at safepoint");
1545 Metaspace::tracer()->report_gc_threshold(capacity_until_GC,
1546 new_capacity_until_GC,
1547 MetaspaceGCThresholdUpdater::ComputeNewSize);
1548 if (PrintGCDetails && Verbose) {
1549 gclog_or_tty->print_cr(" expanding:"
1550 " minimum_desired_capacity: %6.1fKB"
1551 " expand_bytes: %6.1fKB"
1552 " MinMetaspaceExpansion: %6.1fKB"
1553 " new metaspace HWM: %6.1fKB",
1554 minimum_desired_capacity / (double) K,
1555 expand_bytes / (double) K,
1556 MinMetaspaceExpansion / (double) K,
1557 new_capacity_until_GC / (double) K);
1558 }
1559 }
1560 return;
1561 }
1563 // No expansion, now see if we want to shrink
1564 // We would never want to shrink more than this
1565 size_t max_shrink_bytes = capacity_until_GC - minimum_desired_capacity;
1566 assert(max_shrink_bytes >= 0, err_msg("max_shrink_bytes " SIZE_FORMAT,
1567 max_shrink_bytes));
1569 // Should shrinking be considered?
1570 if (MaxMetaspaceFreeRatio < 100) {
1571 const double maximum_free_percentage = MaxMetaspaceFreeRatio / 100.0;
1572 const double minimum_used_percentage = 1.0 - maximum_free_percentage;
1573 const double max_tmp = used_after_gc / minimum_used_percentage;
1574 size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx));
1575 maximum_desired_capacity = MAX2(maximum_desired_capacity,
1576 MetaspaceSize);
1577 if (PrintGCDetails && Verbose) {
1578 gclog_or_tty->print_cr(" "
1579 " maximum_free_percentage: %6.2f"
1580 " minimum_used_percentage: %6.2f",
1581 maximum_free_percentage,
1582 minimum_used_percentage);
1583 gclog_or_tty->print_cr(" "
1584 " minimum_desired_capacity: %6.1fKB"
1585 " maximum_desired_capacity: %6.1fKB",
1586 minimum_desired_capacity / (double) K,
1587 maximum_desired_capacity / (double) K);
1588 }
1590 assert(minimum_desired_capacity <= maximum_desired_capacity,
1591 "sanity check");
1593 if (capacity_until_GC > maximum_desired_capacity) {
1594 // Capacity too large, compute shrinking size
1595 shrink_bytes = capacity_until_GC - maximum_desired_capacity;
1596 // We don't want shrink all the way back to initSize if people call
1597 // System.gc(), because some programs do that between "phases" and then
1598 // we'd just have to grow the heap up again for the next phase. So we
1599 // damp the shrinking: 0% on the first call, 10% on the second call, 40%
1600 // on the third call, and 100% by the fourth call. But if we recompute
1601 // size without shrinking, it goes back to 0%.
1602 shrink_bytes = shrink_bytes / 100 * current_shrink_factor;
1604 shrink_bytes = align_size_down(shrink_bytes, Metaspace::commit_alignment());
1606 assert(shrink_bytes <= max_shrink_bytes,
1607 err_msg("invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT,
1608 shrink_bytes, max_shrink_bytes));
1609 if (current_shrink_factor == 0) {
1610 _shrink_factor = 10;
1611 } else {
1612 _shrink_factor = MIN2(current_shrink_factor * 4, (uint) 100);
1613 }
1614 if (PrintGCDetails && Verbose) {
1615 gclog_or_tty->print_cr(" "
1616 " shrinking:"
1617 " initSize: %.1fK"
1618 " maximum_desired_capacity: %.1fK",
1619 MetaspaceSize / (double) K,
1620 maximum_desired_capacity / (double) K);
1621 gclog_or_tty->print_cr(" "
1622 " shrink_bytes: %.1fK"
1623 " current_shrink_factor: %d"
1624 " new shrink factor: %d"
1625 " MinMetaspaceExpansion: %.1fK",
1626 shrink_bytes / (double) K,
1627 current_shrink_factor,
1628 _shrink_factor,
1629 MinMetaspaceExpansion / (double) K);
1630 }
1631 }
1632 }
1634 // Don't shrink unless it's significant
1635 if (shrink_bytes >= MinMetaspaceExpansion &&
1636 ((capacity_until_GC - shrink_bytes) >= MetaspaceSize)) {
1637 size_t new_capacity_until_GC = MetaspaceGC::dec_capacity_until_GC(shrink_bytes);
1638 Metaspace::tracer()->report_gc_threshold(capacity_until_GC,
1639 new_capacity_until_GC,
1640 MetaspaceGCThresholdUpdater::ComputeNewSize);
1641 }
1642 }
1644 // Metadebug methods
1646 void Metadebug::init_allocation_fail_alot_count() {
1647 if (MetadataAllocationFailALot) {
1648 _allocation_fail_alot_count =
1649 1+(long)((double)MetadataAllocationFailALotInterval*os::random()/(max_jint+1.0));
1650 }
1651 }
1653 #ifdef ASSERT
1654 bool Metadebug::test_metadata_failure() {
1655 if (MetadataAllocationFailALot &&
1656 Threads::is_vm_complete()) {
1657 if (_allocation_fail_alot_count > 0) {
1658 _allocation_fail_alot_count--;
1659 } else {
1660 if (TraceMetadataChunkAllocation && Verbose) {
1661 gclog_or_tty->print_cr("Metadata allocation failing for "
1662 "MetadataAllocationFailALot");
1663 }
1664 init_allocation_fail_alot_count();
1665 return true;
1666 }
1667 }
1668 return false;
1669 }
1670 #endif
1672 // ChunkManager methods
1674 size_t ChunkManager::free_chunks_total_words() {
1675 return _free_chunks_total;
1676 }
1678 size_t ChunkManager::free_chunks_total_bytes() {
1679 return free_chunks_total_words() * BytesPerWord;
1680 }
1682 size_t ChunkManager::free_chunks_count() {
1683 #ifdef ASSERT
1684 if (!UseConcMarkSweepGC && !SpaceManager::expand_lock()->is_locked()) {
1685 MutexLockerEx cl(SpaceManager::expand_lock(),
1686 Mutex::_no_safepoint_check_flag);
1687 // This lock is only needed in debug because the verification
1688 // of the _free_chunks_totals walks the list of free chunks
1689 slow_locked_verify_free_chunks_count();
1690 }
1691 #endif
1692 return _free_chunks_count;
1693 }
1695 void ChunkManager::locked_verify_free_chunks_total() {
1696 assert_lock_strong(SpaceManager::expand_lock());
1697 assert(sum_free_chunks() == _free_chunks_total,
1698 err_msg("_free_chunks_total " SIZE_FORMAT " is not the"
1699 " same as sum " SIZE_FORMAT, _free_chunks_total,
1700 sum_free_chunks()));
1701 }
1703 void ChunkManager::verify_free_chunks_total() {
1704 MutexLockerEx cl(SpaceManager::expand_lock(),
1705 Mutex::_no_safepoint_check_flag);
1706 locked_verify_free_chunks_total();
1707 }
1709 void ChunkManager::locked_verify_free_chunks_count() {
1710 assert_lock_strong(SpaceManager::expand_lock());
1711 assert(sum_free_chunks_count() == _free_chunks_count,
1712 err_msg("_free_chunks_count " SIZE_FORMAT " is not the"
1713 " same as sum " SIZE_FORMAT, _free_chunks_count,
1714 sum_free_chunks_count()));
1715 }
1717 void ChunkManager::verify_free_chunks_count() {
1718 #ifdef ASSERT
1719 MutexLockerEx cl(SpaceManager::expand_lock(),
1720 Mutex::_no_safepoint_check_flag);
1721 locked_verify_free_chunks_count();
1722 #endif
1723 }
1725 void ChunkManager::verify() {
1726 MutexLockerEx cl(SpaceManager::expand_lock(),
1727 Mutex::_no_safepoint_check_flag);
1728 locked_verify();
1729 }
1731 void ChunkManager::locked_verify() {
1732 locked_verify_free_chunks_count();
1733 locked_verify_free_chunks_total();
1734 }
1736 void ChunkManager::locked_print_free_chunks(outputStream* st) {
1737 assert_lock_strong(SpaceManager::expand_lock());
1738 st->print_cr("Free chunk total " SIZE_FORMAT " count " SIZE_FORMAT,
1739 _free_chunks_total, _free_chunks_count);
1740 }
1742 void ChunkManager::locked_print_sum_free_chunks(outputStream* st) {
1743 assert_lock_strong(SpaceManager::expand_lock());
1744 st->print_cr("Sum free chunk total " SIZE_FORMAT " count " SIZE_FORMAT,
1745 sum_free_chunks(), sum_free_chunks_count());
1746 }
1747 ChunkList* ChunkManager::free_chunks(ChunkIndex index) {
1748 return &_free_chunks[index];
1749 }
1751 // These methods that sum the free chunk lists are used in printing
1752 // methods that are used in product builds.
1753 size_t ChunkManager::sum_free_chunks() {
1754 assert_lock_strong(SpaceManager::expand_lock());
1755 size_t result = 0;
1756 for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
1757 ChunkList* list = free_chunks(i);
1759 if (list == NULL) {
1760 continue;
1761 }
1763 result = result + list->count() * list->size();
1764 }
1765 result = result + humongous_dictionary()->total_size();
1766 return result;
1767 }
1769 size_t ChunkManager::sum_free_chunks_count() {
1770 assert_lock_strong(SpaceManager::expand_lock());
1771 size_t count = 0;
1772 for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
1773 ChunkList* list = free_chunks(i);
1774 if (list == NULL) {
1775 continue;
1776 }
1777 count = count + list->count();
1778 }
1779 count = count + humongous_dictionary()->total_free_blocks();
1780 return count;
1781 }
1783 ChunkList* ChunkManager::find_free_chunks_list(size_t word_size) {
1784 ChunkIndex index = list_index(word_size);
1785 assert(index < HumongousIndex, "No humongous list");
1786 return free_chunks(index);
1787 }
1789 Metachunk* ChunkManager::free_chunks_get(size_t word_size) {
1790 assert_lock_strong(SpaceManager::expand_lock());
1792 slow_locked_verify();
1794 Metachunk* chunk = NULL;
1795 if (list_index(word_size) != HumongousIndex) {
1796 ChunkList* free_list = find_free_chunks_list(word_size);
1797 assert(free_list != NULL, "Sanity check");
1799 chunk = free_list->head();
1801 if (chunk == NULL) {
1802 return NULL;
1803 }
1805 // Remove the chunk as the head of the list.
1806 free_list->remove_chunk(chunk);
1808 if (TraceMetadataChunkAllocation && Verbose) {
1809 gclog_or_tty->print_cr("ChunkManager::free_chunks_get: free_list "
1810 PTR_FORMAT " head " PTR_FORMAT " size " SIZE_FORMAT,
1811 free_list, chunk, chunk->word_size());
1812 }
1813 } else {
1814 chunk = humongous_dictionary()->get_chunk(
1815 word_size,
1816 FreeBlockDictionary<Metachunk>::atLeast);
1818 if (chunk == NULL) {
1819 return NULL;
1820 }
1822 if (TraceMetadataHumongousAllocation) {
1823 size_t waste = chunk->word_size() - word_size;
1824 gclog_or_tty->print_cr("Free list allocate humongous chunk size "
1825 SIZE_FORMAT " for requested size " SIZE_FORMAT
1826 " waste " SIZE_FORMAT,
1827 chunk->word_size(), word_size, waste);
1828 }
1829 }
1831 // Chunk is being removed from the chunks free list.
1832 dec_free_chunks_total(chunk->word_size());
1834 // Remove it from the links to this freelist
1835 chunk->set_next(NULL);
1836 chunk->set_prev(NULL);
1837 #ifdef ASSERT
1838 // Chunk is no longer on any freelist. Setting to false make container_count_slow()
1839 // work.
1840 chunk->set_is_tagged_free(false);
1841 #endif
1842 chunk->container()->inc_container_count();
1844 slow_locked_verify();
1845 return chunk;
1846 }
1848 Metachunk* ChunkManager::chunk_freelist_allocate(size_t word_size) {
1849 assert_lock_strong(SpaceManager::expand_lock());
1850 slow_locked_verify();
1852 // Take from the beginning of the list
1853 Metachunk* chunk = free_chunks_get(word_size);
1854 if (chunk == NULL) {
1855 return NULL;
1856 }
1858 assert((word_size <= chunk->word_size()) ||
1859 list_index(chunk->word_size() == HumongousIndex),
1860 "Non-humongous variable sized chunk");
1861 if (TraceMetadataChunkAllocation) {
1862 size_t list_count;
1863 if (list_index(word_size) < HumongousIndex) {
1864 ChunkList* list = find_free_chunks_list(word_size);
1865 list_count = list->count();
1866 } else {
1867 list_count = humongous_dictionary()->total_count();
1868 }
1869 gclog_or_tty->print("ChunkManager::chunk_freelist_allocate: " PTR_FORMAT " chunk "
1870 PTR_FORMAT " size " SIZE_FORMAT " count " SIZE_FORMAT " ",
1871 this, chunk, chunk->word_size(), list_count);
1872 locked_print_free_chunks(gclog_or_tty);
1873 }
1875 return chunk;
1876 }
1878 void ChunkManager::print_on(outputStream* out) const {
1879 if (PrintFLSStatistics != 0) {
1880 const_cast<ChunkManager *>(this)->humongous_dictionary()->report_statistics();
1881 }
1882 }
1884 // SpaceManager methods
1886 void SpaceManager::get_initial_chunk_sizes(Metaspace::MetaspaceType type,
1887 size_t* chunk_word_size,
1888 size_t* class_chunk_word_size) {
1889 switch (type) {
1890 case Metaspace::BootMetaspaceType:
1891 *chunk_word_size = Metaspace::first_chunk_word_size();
1892 *class_chunk_word_size = Metaspace::first_class_chunk_word_size();
1893 break;
1894 case Metaspace::ROMetaspaceType:
1895 *chunk_word_size = SharedReadOnlySize / wordSize;
1896 *class_chunk_word_size = ClassSpecializedChunk;
1897 break;
1898 case Metaspace::ReadWriteMetaspaceType:
1899 *chunk_word_size = SharedReadWriteSize / wordSize;
1900 *class_chunk_word_size = ClassSpecializedChunk;
1901 break;
1902 case Metaspace::AnonymousMetaspaceType:
1903 case Metaspace::ReflectionMetaspaceType:
1904 *chunk_word_size = SpecializedChunk;
1905 *class_chunk_word_size = ClassSpecializedChunk;
1906 break;
1907 default:
1908 *chunk_word_size = SmallChunk;
1909 *class_chunk_word_size = ClassSmallChunk;
1910 break;
1911 }
1912 assert(*chunk_word_size != 0 && *class_chunk_word_size != 0,
1913 err_msg("Initial chunks sizes bad: data " SIZE_FORMAT
1914 " class " SIZE_FORMAT,
1915 *chunk_word_size, *class_chunk_word_size));
1916 }
1918 size_t SpaceManager::sum_free_in_chunks_in_use() const {
1919 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
1920 size_t free = 0;
1921 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1922 Metachunk* chunk = chunks_in_use(i);
1923 while (chunk != NULL) {
1924 free += chunk->free_word_size();
1925 chunk = chunk->next();
1926 }
1927 }
1928 return free;
1929 }
1931 size_t SpaceManager::sum_waste_in_chunks_in_use() const {
1932 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
1933 size_t result = 0;
1934 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1935 result += sum_waste_in_chunks_in_use(i);
1936 }
1938 return result;
1939 }
1941 size_t SpaceManager::sum_waste_in_chunks_in_use(ChunkIndex index) const {
1942 size_t result = 0;
1943 Metachunk* chunk = chunks_in_use(index);
1944 // Count the free space in all the chunk but not the
1945 // current chunk from which allocations are still being done.
1946 while (chunk != NULL) {
1947 if (chunk != current_chunk()) {
1948 result += chunk->free_word_size();
1949 }
1950 chunk = chunk->next();
1951 }
1952 return result;
1953 }
1955 size_t SpaceManager::sum_capacity_in_chunks_in_use() const {
1956 // For CMS use "allocated_chunks_words()" which does not need the
1957 // Metaspace lock. For the other collectors sum over the
1958 // lists. Use both methods as a check that "allocated_chunks_words()"
1959 // is correct. That is, sum_capacity_in_chunks() is too expensive
1960 // to use in the product and allocated_chunks_words() should be used
1961 // but allow for checking that allocated_chunks_words() returns the same
1962 // value as sum_capacity_in_chunks_in_use() which is the definitive
1963 // answer.
1964 if (UseConcMarkSweepGC) {
1965 return allocated_chunks_words();
1966 } else {
1967 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
1968 size_t sum = 0;
1969 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1970 Metachunk* chunk = chunks_in_use(i);
1971 while (chunk != NULL) {
1972 sum += chunk->word_size();
1973 chunk = chunk->next();
1974 }
1975 }
1976 return sum;
1977 }
1978 }
1980 size_t SpaceManager::sum_count_in_chunks_in_use() {
1981 size_t count = 0;
1982 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1983 count = count + sum_count_in_chunks_in_use(i);
1984 }
1986 return count;
1987 }
1989 size_t SpaceManager::sum_count_in_chunks_in_use(ChunkIndex i) {
1990 size_t count = 0;
1991 Metachunk* chunk = chunks_in_use(i);
1992 while (chunk != NULL) {
1993 count++;
1994 chunk = chunk->next();
1995 }
1996 return count;
1997 }
2000 size_t SpaceManager::sum_used_in_chunks_in_use() const {
2001 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
2002 size_t used = 0;
2003 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2004 Metachunk* chunk = chunks_in_use(i);
2005 while (chunk != NULL) {
2006 used += chunk->used_word_size();
2007 chunk = chunk->next();
2008 }
2009 }
2010 return used;
2011 }
2013 void SpaceManager::locked_print_chunks_in_use_on(outputStream* st) const {
2015 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2016 Metachunk* chunk = chunks_in_use(i);
2017 st->print("SpaceManager: %s " PTR_FORMAT,
2018 chunk_size_name(i), chunk);
2019 if (chunk != NULL) {
2020 st->print_cr(" free " SIZE_FORMAT,
2021 chunk->free_word_size());
2022 } else {
2023 st->cr();
2024 }
2025 }
2027 chunk_manager()->locked_print_free_chunks(st);
2028 chunk_manager()->locked_print_sum_free_chunks(st);
2029 }
2031 size_t SpaceManager::calc_chunk_size(size_t word_size) {
2033 // Decide between a small chunk and a medium chunk. Up to
2034 // _small_chunk_limit small chunks can be allocated but
2035 // once a medium chunk has been allocated, no more small
2036 // chunks will be allocated.
2037 size_t chunk_word_size;
2038 if (chunks_in_use(MediumIndex) == NULL &&
2039 sum_count_in_chunks_in_use(SmallIndex) < _small_chunk_limit) {
2040 chunk_word_size = (size_t) small_chunk_size();
2041 if (word_size + Metachunk::overhead() > small_chunk_size()) {
2042 chunk_word_size = medium_chunk_size();
2043 }
2044 } else {
2045 chunk_word_size = medium_chunk_size();
2046 }
2048 // Might still need a humongous chunk. Enforce
2049 // humongous allocations sizes to be aligned up to
2050 // the smallest chunk size.
2051 size_t if_humongous_sized_chunk =
2052 align_size_up(word_size + Metachunk::overhead(),
2053 smallest_chunk_size());
2054 chunk_word_size =
2055 MAX2((size_t) chunk_word_size, if_humongous_sized_chunk);
2057 assert(!SpaceManager::is_humongous(word_size) ||
2058 chunk_word_size == if_humongous_sized_chunk,
2059 err_msg("Size calculation is wrong, word_size " SIZE_FORMAT
2060 " chunk_word_size " SIZE_FORMAT,
2061 word_size, chunk_word_size));
2062 if (TraceMetadataHumongousAllocation &&
2063 SpaceManager::is_humongous(word_size)) {
2064 gclog_or_tty->print_cr("Metadata humongous allocation:");
2065 gclog_or_tty->print_cr(" word_size " PTR_FORMAT, word_size);
2066 gclog_or_tty->print_cr(" chunk_word_size " PTR_FORMAT,
2067 chunk_word_size);
2068 gclog_or_tty->print_cr(" chunk overhead " PTR_FORMAT,
2069 Metachunk::overhead());
2070 }
2071 return chunk_word_size;
2072 }
2074 void SpaceManager::track_metaspace_memory_usage() {
2075 if (is_init_completed()) {
2076 if (is_class()) {
2077 MemoryService::track_compressed_class_memory_usage();
2078 }
2079 MemoryService::track_metaspace_memory_usage();
2080 }
2081 }
2083 MetaWord* SpaceManager::grow_and_allocate(size_t word_size) {
2084 assert(vs_list()->current_virtual_space() != NULL,
2085 "Should have been set");
2086 assert(current_chunk() == NULL ||
2087 current_chunk()->allocate(word_size) == NULL,
2088 "Don't need to expand");
2089 MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
2091 if (TraceMetadataChunkAllocation && Verbose) {
2092 size_t words_left = 0;
2093 size_t words_used = 0;
2094 if (current_chunk() != NULL) {
2095 words_left = current_chunk()->free_word_size();
2096 words_used = current_chunk()->used_word_size();
2097 }
2098 gclog_or_tty->print_cr("SpaceManager::grow_and_allocate for " SIZE_FORMAT
2099 " words " SIZE_FORMAT " words used " SIZE_FORMAT
2100 " words left",
2101 word_size, words_used, words_left);
2102 }
2104 // Get another chunk out of the virtual space
2105 size_t grow_chunks_by_words = calc_chunk_size(word_size);
2106 Metachunk* next = get_new_chunk(word_size, grow_chunks_by_words);
2108 MetaWord* mem = NULL;
2110 // If a chunk was available, add it to the in-use chunk list
2111 // and do an allocation from it.
2112 if (next != NULL) {
2113 // Add to this manager's list of chunks in use.
2114 add_chunk(next, false);
2115 mem = next->allocate(word_size);
2116 }
2118 // Track metaspace memory usage statistic.
2119 track_metaspace_memory_usage();
2121 return mem;
2122 }
2124 void SpaceManager::print_on(outputStream* st) const {
2126 for (ChunkIndex i = ZeroIndex;
2127 i < NumberOfInUseLists ;
2128 i = next_chunk_index(i) ) {
2129 st->print_cr(" chunks_in_use " PTR_FORMAT " chunk size " PTR_FORMAT,
2130 chunks_in_use(i),
2131 chunks_in_use(i) == NULL ? 0 : chunks_in_use(i)->word_size());
2132 }
2133 st->print_cr(" waste: Small " SIZE_FORMAT " Medium " SIZE_FORMAT
2134 " Humongous " SIZE_FORMAT,
2135 sum_waste_in_chunks_in_use(SmallIndex),
2136 sum_waste_in_chunks_in_use(MediumIndex),
2137 sum_waste_in_chunks_in_use(HumongousIndex));
2138 // block free lists
2139 if (block_freelists() != NULL) {
2140 st->print_cr("total in block free lists " SIZE_FORMAT,
2141 block_freelists()->total_size());
2142 }
2143 }
2145 SpaceManager::SpaceManager(Metaspace::MetadataType mdtype,
2146 Mutex* lock) :
2147 _mdtype(mdtype),
2148 _allocated_blocks_words(0),
2149 _allocated_chunks_words(0),
2150 _allocated_chunks_count(0),
2151 _lock(lock)
2152 {
2153 initialize();
2154 }
2156 void SpaceManager::inc_size_metrics(size_t words) {
2157 assert_lock_strong(SpaceManager::expand_lock());
2158 // Total of allocated Metachunks and allocated Metachunks count
2159 // for each SpaceManager
2160 _allocated_chunks_words = _allocated_chunks_words + words;
2161 _allocated_chunks_count++;
2162 // Global total of capacity in allocated Metachunks
2163 MetaspaceAux::inc_capacity(mdtype(), words);
2164 // Global total of allocated Metablocks.
2165 // used_words_slow() includes the overhead in each
2166 // Metachunk so include it in the used when the
2167 // Metachunk is first added (so only added once per
2168 // Metachunk).
2169 MetaspaceAux::inc_used(mdtype(), Metachunk::overhead());
2170 }
2172 void SpaceManager::inc_used_metrics(size_t words) {
2173 // Add to the per SpaceManager total
2174 Atomic::add_ptr(words, &_allocated_blocks_words);
2175 // Add to the global total
2176 MetaspaceAux::inc_used(mdtype(), words);
2177 }
2179 void SpaceManager::dec_total_from_size_metrics() {
2180 MetaspaceAux::dec_capacity(mdtype(), allocated_chunks_words());
2181 MetaspaceAux::dec_used(mdtype(), allocated_blocks_words());
2182 // Also deduct the overhead per Metachunk
2183 MetaspaceAux::dec_used(mdtype(), allocated_chunks_count() * Metachunk::overhead());
2184 }
2186 void SpaceManager::initialize() {
2187 Metadebug::init_allocation_fail_alot_count();
2188 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2189 _chunks_in_use[i] = NULL;
2190 }
2191 _current_chunk = NULL;
2192 if (TraceMetadataChunkAllocation && Verbose) {
2193 gclog_or_tty->print_cr("SpaceManager(): " PTR_FORMAT, this);
2194 }
2195 }
2197 void ChunkManager::return_chunks(ChunkIndex index, Metachunk* chunks) {
2198 if (chunks == NULL) {
2199 return;
2200 }
2201 ChunkList* list = free_chunks(index);
2202 assert(list->size() == chunks->word_size(), "Mismatch in chunk sizes");
2203 assert_lock_strong(SpaceManager::expand_lock());
2204 Metachunk* cur = chunks;
2206 // This returns chunks one at a time. If a new
2207 // class List can be created that is a base class
2208 // of FreeList then something like FreeList::prepend()
2209 // can be used in place of this loop
2210 while (cur != NULL) {
2211 assert(cur->container() != NULL, "Container should have been set");
2212 cur->container()->dec_container_count();
2213 // Capture the next link before it is changed
2214 // by the call to return_chunk_at_head();
2215 Metachunk* next = cur->next();
2216 DEBUG_ONLY(cur->set_is_tagged_free(true);)
2217 list->return_chunk_at_head(cur);
2218 cur = next;
2219 }
2220 }
2222 SpaceManager::~SpaceManager() {
2223 // This call this->_lock which can't be done while holding expand_lock()
2224 assert(sum_capacity_in_chunks_in_use() == allocated_chunks_words(),
2225 err_msg("sum_capacity_in_chunks_in_use() " SIZE_FORMAT
2226 " allocated_chunks_words() " SIZE_FORMAT,
2227 sum_capacity_in_chunks_in_use(), allocated_chunks_words()));
2229 MutexLockerEx fcl(SpaceManager::expand_lock(),
2230 Mutex::_no_safepoint_check_flag);
2232 chunk_manager()->slow_locked_verify();
2234 dec_total_from_size_metrics();
2236 if (TraceMetadataChunkAllocation && Verbose) {
2237 gclog_or_tty->print_cr("~SpaceManager(): " PTR_FORMAT, this);
2238 locked_print_chunks_in_use_on(gclog_or_tty);
2239 }
2241 // Do not mangle freed Metachunks. The chunk size inside Metachunks
2242 // is during the freeing of a VirtualSpaceNodes.
2244 // Have to update before the chunks_in_use lists are emptied
2245 // below.
2246 chunk_manager()->inc_free_chunks_total(allocated_chunks_words(),
2247 sum_count_in_chunks_in_use());
2249 // Add all the chunks in use by this space manager
2250 // to the global list of free chunks.
2252 // Follow each list of chunks-in-use and add them to the
2253 // free lists. Each list is NULL terminated.
2255 for (ChunkIndex i = ZeroIndex; i < HumongousIndex; i = next_chunk_index(i)) {
2256 if (TraceMetadataChunkAllocation && Verbose) {
2257 gclog_or_tty->print_cr("returned %d %s chunks to freelist",
2258 sum_count_in_chunks_in_use(i),
2259 chunk_size_name(i));
2260 }
2261 Metachunk* chunks = chunks_in_use(i);
2262 chunk_manager()->return_chunks(i, chunks);
2263 set_chunks_in_use(i, NULL);
2264 if (TraceMetadataChunkAllocation && Verbose) {
2265 gclog_or_tty->print_cr("updated freelist count %d %s",
2266 chunk_manager()->free_chunks(i)->count(),
2267 chunk_size_name(i));
2268 }
2269 assert(i != HumongousIndex, "Humongous chunks are handled explicitly later");
2270 }
2272 // The medium chunk case may be optimized by passing the head and
2273 // tail of the medium chunk list to add_at_head(). The tail is often
2274 // the current chunk but there are probably exceptions.
2276 // Humongous chunks
2277 if (TraceMetadataChunkAllocation && Verbose) {
2278 gclog_or_tty->print_cr("returned %d %s humongous chunks to dictionary",
2279 sum_count_in_chunks_in_use(HumongousIndex),
2280 chunk_size_name(HumongousIndex));
2281 gclog_or_tty->print("Humongous chunk dictionary: ");
2282 }
2283 // Humongous chunks are never the current chunk.
2284 Metachunk* humongous_chunks = chunks_in_use(HumongousIndex);
2286 while (humongous_chunks != NULL) {
2287 #ifdef ASSERT
2288 humongous_chunks->set_is_tagged_free(true);
2289 #endif
2290 if (TraceMetadataChunkAllocation && Verbose) {
2291 gclog_or_tty->print(PTR_FORMAT " (" SIZE_FORMAT ") ",
2292 humongous_chunks,
2293 humongous_chunks->word_size());
2294 }
2295 assert(humongous_chunks->word_size() == (size_t)
2296 align_size_up(humongous_chunks->word_size(),
2297 smallest_chunk_size()),
2298 err_msg("Humongous chunk size is wrong: word size " SIZE_FORMAT
2299 " granularity %d",
2300 humongous_chunks->word_size(), smallest_chunk_size()));
2301 Metachunk* next_humongous_chunks = humongous_chunks->next();
2302 humongous_chunks->container()->dec_container_count();
2303 chunk_manager()->humongous_dictionary()->return_chunk(humongous_chunks);
2304 humongous_chunks = next_humongous_chunks;
2305 }
2306 if (TraceMetadataChunkAllocation && Verbose) {
2307 gclog_or_tty->cr();
2308 gclog_or_tty->print_cr("updated dictionary count %d %s",
2309 chunk_manager()->humongous_dictionary()->total_count(),
2310 chunk_size_name(HumongousIndex));
2311 }
2312 chunk_manager()->slow_locked_verify();
2313 }
2315 const char* SpaceManager::chunk_size_name(ChunkIndex index) const {
2316 switch (index) {
2317 case SpecializedIndex:
2318 return "Specialized";
2319 case SmallIndex:
2320 return "Small";
2321 case MediumIndex:
2322 return "Medium";
2323 case HumongousIndex:
2324 return "Humongous";
2325 default:
2326 return NULL;
2327 }
2328 }
2330 ChunkIndex ChunkManager::list_index(size_t size) {
2331 switch (size) {
2332 case SpecializedChunk:
2333 assert(SpecializedChunk == ClassSpecializedChunk,
2334 "Need branch for ClassSpecializedChunk");
2335 return SpecializedIndex;
2336 case SmallChunk:
2337 case ClassSmallChunk:
2338 return SmallIndex;
2339 case MediumChunk:
2340 case ClassMediumChunk:
2341 return MediumIndex;
2342 default:
2343 assert(size > MediumChunk || size > ClassMediumChunk,
2344 "Not a humongous chunk");
2345 return HumongousIndex;
2346 }
2347 }
2349 void SpaceManager::deallocate(MetaWord* p, size_t word_size) {
2350 assert_lock_strong(_lock);
2351 size_t raw_word_size = get_raw_word_size(word_size);
2352 size_t min_size = TreeChunk<Metablock, FreeList<Metablock> >::min_size();
2353 assert(raw_word_size >= min_size,
2354 err_msg("Should not deallocate dark matter " SIZE_FORMAT "<" SIZE_FORMAT, word_size, min_size));
2355 block_freelists()->return_block(p, raw_word_size);
2356 }
2358 // Adds a chunk to the list of chunks in use.
2359 void SpaceManager::add_chunk(Metachunk* new_chunk, bool make_current) {
2361 assert(new_chunk != NULL, "Should not be NULL");
2362 assert(new_chunk->next() == NULL, "Should not be on a list");
2364 new_chunk->reset_empty();
2366 // Find the correct list and and set the current
2367 // chunk for that list.
2368 ChunkIndex index = ChunkManager::list_index(new_chunk->word_size());
2370 if (index != HumongousIndex) {
2371 retire_current_chunk();
2372 set_current_chunk(new_chunk);
2373 new_chunk->set_next(chunks_in_use(index));
2374 set_chunks_in_use(index, new_chunk);
2375 } else {
2376 // For null class loader data and DumpSharedSpaces, the first chunk isn't
2377 // small, so small will be null. Link this first chunk as the current
2378 // chunk.
2379 if (make_current) {
2380 // Set as the current chunk but otherwise treat as a humongous chunk.
2381 set_current_chunk(new_chunk);
2382 }
2383 // Link at head. The _current_chunk only points to a humongous chunk for
2384 // the null class loader metaspace (class and data virtual space managers)
2385 // any humongous chunks so will not point to the tail
2386 // of the humongous chunks list.
2387 new_chunk->set_next(chunks_in_use(HumongousIndex));
2388 set_chunks_in_use(HumongousIndex, new_chunk);
2390 assert(new_chunk->word_size() > medium_chunk_size(), "List inconsistency");
2391 }
2393 // Add to the running sum of capacity
2394 inc_size_metrics(new_chunk->word_size());
2396 assert(new_chunk->is_empty(), "Not ready for reuse");
2397 if (TraceMetadataChunkAllocation && Verbose) {
2398 gclog_or_tty->print("SpaceManager::add_chunk: %d) ",
2399 sum_count_in_chunks_in_use());
2400 new_chunk->print_on(gclog_or_tty);
2401 chunk_manager()->locked_print_free_chunks(gclog_or_tty);
2402 }
2403 }
2405 void SpaceManager::retire_current_chunk() {
2406 if (current_chunk() != NULL) {
2407 size_t remaining_words = current_chunk()->free_word_size();
2408 if (remaining_words >= TreeChunk<Metablock, FreeList<Metablock> >::min_size()) {
2409 block_freelists()->return_block(current_chunk()->allocate(remaining_words), remaining_words);
2410 inc_used_metrics(remaining_words);
2411 }
2412 }
2413 }
2415 Metachunk* SpaceManager::get_new_chunk(size_t word_size,
2416 size_t grow_chunks_by_words) {
2417 // Get a chunk from the chunk freelist
2418 Metachunk* next = chunk_manager()->chunk_freelist_allocate(grow_chunks_by_words);
2420 if (next == NULL) {
2421 next = vs_list()->get_new_chunk(word_size,
2422 grow_chunks_by_words,
2423 medium_chunk_bunch());
2424 }
2426 if (TraceMetadataHumongousAllocation && next != NULL &&
2427 SpaceManager::is_humongous(next->word_size())) {
2428 gclog_or_tty->print_cr(" new humongous chunk word size "
2429 PTR_FORMAT, next->word_size());
2430 }
2432 return next;
2433 }
2435 MetaWord* SpaceManager::allocate(size_t word_size) {
2436 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
2438 size_t raw_word_size = get_raw_word_size(word_size);
2439 BlockFreelist* fl = block_freelists();
2440 MetaWord* p = NULL;
2441 // Allocation from the dictionary is expensive in the sense that
2442 // the dictionary has to be searched for a size. Don't allocate
2443 // from the dictionary until it starts to get fat. Is this
2444 // a reasonable policy? Maybe an skinny dictionary is fast enough
2445 // for allocations. Do some profiling. JJJ
2446 if (fl->total_size() > allocation_from_dictionary_limit) {
2447 p = fl->get_block(raw_word_size);
2448 }
2449 if (p == NULL) {
2450 p = allocate_work(raw_word_size);
2451 }
2453 return p;
2454 }
2456 // Returns the address of spaced allocated for "word_size".
2457 // This methods does not know about blocks (Metablocks)
2458 MetaWord* SpaceManager::allocate_work(size_t word_size) {
2459 assert_lock_strong(_lock);
2460 #ifdef ASSERT
2461 if (Metadebug::test_metadata_failure()) {
2462 return NULL;
2463 }
2464 #endif
2465 // Is there space in the current chunk?
2466 MetaWord* result = NULL;
2468 // For DumpSharedSpaces, only allocate out of the current chunk which is
2469 // never null because we gave it the size we wanted. Caller reports out
2470 // of memory if this returns null.
2471 if (DumpSharedSpaces) {
2472 assert(current_chunk() != NULL, "should never happen");
2473 inc_used_metrics(word_size);
2474 return current_chunk()->allocate(word_size); // caller handles null result
2475 }
2477 if (current_chunk() != NULL) {
2478 result = current_chunk()->allocate(word_size);
2479 }
2481 if (result == NULL) {
2482 result = grow_and_allocate(word_size);
2483 }
2485 if (result != NULL) {
2486 inc_used_metrics(word_size);
2487 assert(result != (MetaWord*) chunks_in_use(MediumIndex),
2488 "Head of the list is being allocated");
2489 }
2491 return result;
2492 }
2494 void SpaceManager::verify() {
2495 // If there are blocks in the dictionary, then
2496 // verfication of chunks does not work since
2497 // being in the dictionary alters a chunk.
2498 if (block_freelists()->total_size() == 0) {
2499 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2500 Metachunk* curr = chunks_in_use(i);
2501 while (curr != NULL) {
2502 curr->verify();
2503 verify_chunk_size(curr);
2504 curr = curr->next();
2505 }
2506 }
2507 }
2508 }
2510 void SpaceManager::verify_chunk_size(Metachunk* chunk) {
2511 assert(is_humongous(chunk->word_size()) ||
2512 chunk->word_size() == medium_chunk_size() ||
2513 chunk->word_size() == small_chunk_size() ||
2514 chunk->word_size() == specialized_chunk_size(),
2515 "Chunk size is wrong");
2516 return;
2517 }
2519 #ifdef ASSERT
2520 void SpaceManager::verify_allocated_blocks_words() {
2521 // Verification is only guaranteed at a safepoint.
2522 assert(SafepointSynchronize::is_at_safepoint() || !Universe::is_fully_initialized(),
2523 "Verification can fail if the applications is running");
2524 assert(allocated_blocks_words() == sum_used_in_chunks_in_use(),
2525 err_msg("allocation total is not consistent " SIZE_FORMAT
2526 " vs " SIZE_FORMAT,
2527 allocated_blocks_words(), sum_used_in_chunks_in_use()));
2528 }
2530 #endif
2532 void SpaceManager::dump(outputStream* const out) const {
2533 size_t curr_total = 0;
2534 size_t waste = 0;
2535 uint i = 0;
2536 size_t used = 0;
2537 size_t capacity = 0;
2539 // Add up statistics for all chunks in this SpaceManager.
2540 for (ChunkIndex index = ZeroIndex;
2541 index < NumberOfInUseLists;
2542 index = next_chunk_index(index)) {
2543 for (Metachunk* curr = chunks_in_use(index);
2544 curr != NULL;
2545 curr = curr->next()) {
2546 out->print("%d) ", i++);
2547 curr->print_on(out);
2548 curr_total += curr->word_size();
2549 used += curr->used_word_size();
2550 capacity += curr->word_size();
2551 waste += curr->free_word_size() + curr->overhead();;
2552 }
2553 }
2555 if (TraceMetadataChunkAllocation && Verbose) {
2556 block_freelists()->print_on(out);
2557 }
2559 size_t free = current_chunk() == NULL ? 0 : current_chunk()->free_word_size();
2560 // Free space isn't wasted.
2561 waste -= free;
2563 out->print_cr("total of all chunks " SIZE_FORMAT " used " SIZE_FORMAT
2564 " free " SIZE_FORMAT " capacity " SIZE_FORMAT
2565 " waste " SIZE_FORMAT, curr_total, used, free, capacity, waste);
2566 }
2568 #ifndef PRODUCT
2569 void SpaceManager::mangle_freed_chunks() {
2570 for (ChunkIndex index = ZeroIndex;
2571 index < NumberOfInUseLists;
2572 index = next_chunk_index(index)) {
2573 for (Metachunk* curr = chunks_in_use(index);
2574 curr != NULL;
2575 curr = curr->next()) {
2576 curr->mangle();
2577 }
2578 }
2579 }
2580 #endif // PRODUCT
2582 // MetaspaceAux
2585 size_t MetaspaceAux::_capacity_words[] = {0, 0};
2586 size_t MetaspaceAux::_used_words[] = {0, 0};
2588 size_t MetaspaceAux::free_bytes(Metaspace::MetadataType mdtype) {
2589 VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
2590 return list == NULL ? 0 : list->free_bytes();
2591 }
2593 size_t MetaspaceAux::free_bytes() {
2594 return free_bytes(Metaspace::ClassType) + free_bytes(Metaspace::NonClassType);
2595 }
2597 void MetaspaceAux::dec_capacity(Metaspace::MetadataType mdtype, size_t words) {
2598 assert_lock_strong(SpaceManager::expand_lock());
2599 assert(words <= capacity_words(mdtype),
2600 err_msg("About to decrement below 0: words " SIZE_FORMAT
2601 " is greater than _capacity_words[%u] " SIZE_FORMAT,
2602 words, mdtype, capacity_words(mdtype)));
2603 _capacity_words[mdtype] -= words;
2604 }
2606 void MetaspaceAux::inc_capacity(Metaspace::MetadataType mdtype, size_t words) {
2607 assert_lock_strong(SpaceManager::expand_lock());
2608 // Needs to be atomic
2609 _capacity_words[mdtype] += words;
2610 }
2612 void MetaspaceAux::dec_used(Metaspace::MetadataType mdtype, size_t words) {
2613 assert(words <= used_words(mdtype),
2614 err_msg("About to decrement below 0: words " SIZE_FORMAT
2615 " is greater than _used_words[%u] " SIZE_FORMAT,
2616 words, mdtype, used_words(mdtype)));
2617 // For CMS deallocation of the Metaspaces occurs during the
2618 // sweep which is a concurrent phase. Protection by the expand_lock()
2619 // is not enough since allocation is on a per Metaspace basis
2620 // and protected by the Metaspace lock.
2621 jlong minus_words = (jlong) - (jlong) words;
2622 Atomic::add_ptr(minus_words, &_used_words[mdtype]);
2623 }
2625 void MetaspaceAux::inc_used(Metaspace::MetadataType mdtype, size_t words) {
2626 // _used_words tracks allocations for
2627 // each piece of metadata. Those allocations are
2628 // generally done concurrently by different application
2629 // threads so must be done atomically.
2630 Atomic::add_ptr(words, &_used_words[mdtype]);
2631 }
2633 size_t MetaspaceAux::used_bytes_slow(Metaspace::MetadataType mdtype) {
2634 size_t used = 0;
2635 ClassLoaderDataGraphMetaspaceIterator iter;
2636 while (iter.repeat()) {
2637 Metaspace* msp = iter.get_next();
2638 // Sum allocated_blocks_words for each metaspace
2639 if (msp != NULL) {
2640 used += msp->used_words_slow(mdtype);
2641 }
2642 }
2643 return used * BytesPerWord;
2644 }
2646 size_t MetaspaceAux::free_bytes_slow(Metaspace::MetadataType mdtype) {
2647 size_t free = 0;
2648 ClassLoaderDataGraphMetaspaceIterator iter;
2649 while (iter.repeat()) {
2650 Metaspace* msp = iter.get_next();
2651 if (msp != NULL) {
2652 free += msp->free_words_slow(mdtype);
2653 }
2654 }
2655 return free * BytesPerWord;
2656 }
2658 size_t MetaspaceAux::capacity_bytes_slow(Metaspace::MetadataType mdtype) {
2659 if ((mdtype == Metaspace::ClassType) && !Metaspace::using_class_space()) {
2660 return 0;
2661 }
2662 // Don't count the space in the freelists. That space will be
2663 // added to the capacity calculation as needed.
2664 size_t capacity = 0;
2665 ClassLoaderDataGraphMetaspaceIterator iter;
2666 while (iter.repeat()) {
2667 Metaspace* msp = iter.get_next();
2668 if (msp != NULL) {
2669 capacity += msp->capacity_words_slow(mdtype);
2670 }
2671 }
2672 return capacity * BytesPerWord;
2673 }
2675 size_t MetaspaceAux::capacity_bytes_slow() {
2676 #ifdef PRODUCT
2677 // Use capacity_bytes() in PRODUCT instead of this function.
2678 guarantee(false, "Should not call capacity_bytes_slow() in the PRODUCT");
2679 #endif
2680 size_t class_capacity = capacity_bytes_slow(Metaspace::ClassType);
2681 size_t non_class_capacity = capacity_bytes_slow(Metaspace::NonClassType);
2682 assert(capacity_bytes() == class_capacity + non_class_capacity,
2683 err_msg("bad accounting: capacity_bytes() " SIZE_FORMAT
2684 " class_capacity + non_class_capacity " SIZE_FORMAT
2685 " class_capacity " SIZE_FORMAT " non_class_capacity " SIZE_FORMAT,
2686 capacity_bytes(), class_capacity + non_class_capacity,
2687 class_capacity, non_class_capacity));
2689 return class_capacity + non_class_capacity;
2690 }
2692 size_t MetaspaceAux::reserved_bytes(Metaspace::MetadataType mdtype) {
2693 VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
2694 return list == NULL ? 0 : list->reserved_bytes();
2695 }
2697 size_t MetaspaceAux::committed_bytes(Metaspace::MetadataType mdtype) {
2698 VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
2699 return list == NULL ? 0 : list->committed_bytes();
2700 }
2702 size_t MetaspaceAux::min_chunk_size_words() { return Metaspace::first_chunk_word_size(); }
2704 size_t MetaspaceAux::free_chunks_total_words(Metaspace::MetadataType mdtype) {
2705 ChunkManager* chunk_manager = Metaspace::get_chunk_manager(mdtype);
2706 if (chunk_manager == NULL) {
2707 return 0;
2708 }
2709 chunk_manager->slow_verify();
2710 return chunk_manager->free_chunks_total_words();
2711 }
2713 size_t MetaspaceAux::free_chunks_total_bytes(Metaspace::MetadataType mdtype) {
2714 return free_chunks_total_words(mdtype) * BytesPerWord;
2715 }
2717 size_t MetaspaceAux::free_chunks_total_words() {
2718 return free_chunks_total_words(Metaspace::ClassType) +
2719 free_chunks_total_words(Metaspace::NonClassType);
2720 }
2722 size_t MetaspaceAux::free_chunks_total_bytes() {
2723 return free_chunks_total_words() * BytesPerWord;
2724 }
2726 bool MetaspaceAux::has_chunk_free_list(Metaspace::MetadataType mdtype) {
2727 return Metaspace::get_chunk_manager(mdtype) != NULL;
2728 }
2730 MetaspaceChunkFreeListSummary MetaspaceAux::chunk_free_list_summary(Metaspace::MetadataType mdtype) {
2731 if (!has_chunk_free_list(mdtype)) {
2732 return MetaspaceChunkFreeListSummary();
2733 }
2735 const ChunkManager* cm = Metaspace::get_chunk_manager(mdtype);
2736 return cm->chunk_free_list_summary();
2737 }
2739 void MetaspaceAux::print_metaspace_change(size_t prev_metadata_used) {
2740 gclog_or_tty->print(", [Metaspace:");
2741 if (PrintGCDetails && Verbose) {
2742 gclog_or_tty->print(" " SIZE_FORMAT
2743 "->" SIZE_FORMAT
2744 "(" SIZE_FORMAT ")",
2745 prev_metadata_used,
2746 used_bytes(),
2747 reserved_bytes());
2748 } else {
2749 gclog_or_tty->print(" " SIZE_FORMAT "K"
2750 "->" SIZE_FORMAT "K"
2751 "(" SIZE_FORMAT "K)",
2752 prev_metadata_used/K,
2753 used_bytes()/K,
2754 reserved_bytes()/K);
2755 }
2757 gclog_or_tty->print("]");
2758 }
2760 // This is printed when PrintGCDetails
2761 void MetaspaceAux::print_on(outputStream* out) {
2762 Metaspace::MetadataType nct = Metaspace::NonClassType;
2764 out->print_cr(" Metaspace "
2765 "used " SIZE_FORMAT "K, "
2766 "capacity " SIZE_FORMAT "K, "
2767 "committed " SIZE_FORMAT "K, "
2768 "reserved " SIZE_FORMAT "K",
2769 used_bytes()/K,
2770 capacity_bytes()/K,
2771 committed_bytes()/K,
2772 reserved_bytes()/K);
2774 if (Metaspace::using_class_space()) {
2775 Metaspace::MetadataType ct = Metaspace::ClassType;
2776 out->print_cr(" class space "
2777 "used " SIZE_FORMAT "K, "
2778 "capacity " SIZE_FORMAT "K, "
2779 "committed " SIZE_FORMAT "K, "
2780 "reserved " SIZE_FORMAT "K",
2781 used_bytes(ct)/K,
2782 capacity_bytes(ct)/K,
2783 committed_bytes(ct)/K,
2784 reserved_bytes(ct)/K);
2785 }
2786 }
2788 // Print information for class space and data space separately.
2789 // This is almost the same as above.
2790 void MetaspaceAux::print_on(outputStream* out, Metaspace::MetadataType mdtype) {
2791 size_t free_chunks_capacity_bytes = free_chunks_total_bytes(mdtype);
2792 size_t capacity_bytes = capacity_bytes_slow(mdtype);
2793 size_t used_bytes = used_bytes_slow(mdtype);
2794 size_t free_bytes = free_bytes_slow(mdtype);
2795 size_t used_and_free = used_bytes + free_bytes +
2796 free_chunks_capacity_bytes;
2797 out->print_cr(" Chunk accounting: used in chunks " SIZE_FORMAT
2798 "K + unused in chunks " SIZE_FORMAT "K + "
2799 " capacity in free chunks " SIZE_FORMAT "K = " SIZE_FORMAT
2800 "K capacity in allocated chunks " SIZE_FORMAT "K",
2801 used_bytes / K,
2802 free_bytes / K,
2803 free_chunks_capacity_bytes / K,
2804 used_and_free / K,
2805 capacity_bytes / K);
2806 // Accounting can only be correct if we got the values during a safepoint
2807 assert(!SafepointSynchronize::is_at_safepoint() || used_and_free == capacity_bytes, "Accounting is wrong");
2808 }
2810 // Print total fragmentation for class metaspaces
2811 void MetaspaceAux::print_class_waste(outputStream* out) {
2812 assert(Metaspace::using_class_space(), "class metaspace not used");
2813 size_t cls_specialized_waste = 0, cls_small_waste = 0, cls_medium_waste = 0;
2814 size_t cls_specialized_count = 0, cls_small_count = 0, cls_medium_count = 0, cls_humongous_count = 0;
2815 ClassLoaderDataGraphMetaspaceIterator iter;
2816 while (iter.repeat()) {
2817 Metaspace* msp = iter.get_next();
2818 if (msp != NULL) {
2819 cls_specialized_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SpecializedIndex);
2820 cls_specialized_count += msp->class_vsm()->sum_count_in_chunks_in_use(SpecializedIndex);
2821 cls_small_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SmallIndex);
2822 cls_small_count += msp->class_vsm()->sum_count_in_chunks_in_use(SmallIndex);
2823 cls_medium_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(MediumIndex);
2824 cls_medium_count += msp->class_vsm()->sum_count_in_chunks_in_use(MediumIndex);
2825 cls_humongous_count += msp->class_vsm()->sum_count_in_chunks_in_use(HumongousIndex);
2826 }
2827 }
2828 out->print_cr(" class: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", "
2829 SIZE_FORMAT " small(s) " SIZE_FORMAT ", "
2830 SIZE_FORMAT " medium(s) " SIZE_FORMAT ", "
2831 "large count " SIZE_FORMAT,
2832 cls_specialized_count, cls_specialized_waste,
2833 cls_small_count, cls_small_waste,
2834 cls_medium_count, cls_medium_waste, cls_humongous_count);
2835 }
2837 // Print total fragmentation for data and class metaspaces separately
2838 void MetaspaceAux::print_waste(outputStream* out) {
2839 size_t specialized_waste = 0, small_waste = 0, medium_waste = 0;
2840 size_t specialized_count = 0, small_count = 0, medium_count = 0, humongous_count = 0;
2842 ClassLoaderDataGraphMetaspaceIterator iter;
2843 while (iter.repeat()) {
2844 Metaspace* msp = iter.get_next();
2845 if (msp != NULL) {
2846 specialized_waste += msp->vsm()->sum_waste_in_chunks_in_use(SpecializedIndex);
2847 specialized_count += msp->vsm()->sum_count_in_chunks_in_use(SpecializedIndex);
2848 small_waste += msp->vsm()->sum_waste_in_chunks_in_use(SmallIndex);
2849 small_count += msp->vsm()->sum_count_in_chunks_in_use(SmallIndex);
2850 medium_waste += msp->vsm()->sum_waste_in_chunks_in_use(MediumIndex);
2851 medium_count += msp->vsm()->sum_count_in_chunks_in_use(MediumIndex);
2852 humongous_count += msp->vsm()->sum_count_in_chunks_in_use(HumongousIndex);
2853 }
2854 }
2855 out->print_cr("Total fragmentation waste (words) doesn't count free space");
2856 out->print_cr(" data: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", "
2857 SIZE_FORMAT " small(s) " SIZE_FORMAT ", "
2858 SIZE_FORMAT " medium(s) " SIZE_FORMAT ", "
2859 "large count " SIZE_FORMAT,
2860 specialized_count, specialized_waste, small_count,
2861 small_waste, medium_count, medium_waste, humongous_count);
2862 if (Metaspace::using_class_space()) {
2863 print_class_waste(out);
2864 }
2865 }
2867 // Dump global metaspace things from the end of ClassLoaderDataGraph
2868 void MetaspaceAux::dump(outputStream* out) {
2869 out->print_cr("All Metaspace:");
2870 out->print("data space: "); print_on(out, Metaspace::NonClassType);
2871 out->print("class space: "); print_on(out, Metaspace::ClassType);
2872 print_waste(out);
2873 }
2875 void MetaspaceAux::verify_free_chunks() {
2876 Metaspace::chunk_manager_metadata()->verify();
2877 if (Metaspace::using_class_space()) {
2878 Metaspace::chunk_manager_class()->verify();
2879 }
2880 }
2882 void MetaspaceAux::verify_capacity() {
2883 #ifdef ASSERT
2884 size_t running_sum_capacity_bytes = capacity_bytes();
2885 // For purposes of the running sum of capacity, verify against capacity
2886 size_t capacity_in_use_bytes = capacity_bytes_slow();
2887 assert(running_sum_capacity_bytes == capacity_in_use_bytes,
2888 err_msg("capacity_words() * BytesPerWord " SIZE_FORMAT
2889 " capacity_bytes_slow()" SIZE_FORMAT,
2890 running_sum_capacity_bytes, capacity_in_use_bytes));
2891 for (Metaspace::MetadataType i = Metaspace::ClassType;
2892 i < Metaspace:: MetadataTypeCount;
2893 i = (Metaspace::MetadataType)(i + 1)) {
2894 size_t capacity_in_use_bytes = capacity_bytes_slow(i);
2895 assert(capacity_bytes(i) == capacity_in_use_bytes,
2896 err_msg("capacity_bytes(%u) " SIZE_FORMAT
2897 " capacity_bytes_slow(%u)" SIZE_FORMAT,
2898 i, capacity_bytes(i), i, capacity_in_use_bytes));
2899 }
2900 #endif
2901 }
2903 void MetaspaceAux::verify_used() {
2904 #ifdef ASSERT
2905 size_t running_sum_used_bytes = used_bytes();
2906 // For purposes of the running sum of used, verify against used
2907 size_t used_in_use_bytes = used_bytes_slow();
2908 assert(used_bytes() == used_in_use_bytes,
2909 err_msg("used_bytes() " SIZE_FORMAT
2910 " used_bytes_slow()" SIZE_FORMAT,
2911 used_bytes(), used_in_use_bytes));
2912 for (Metaspace::MetadataType i = Metaspace::ClassType;
2913 i < Metaspace:: MetadataTypeCount;
2914 i = (Metaspace::MetadataType)(i + 1)) {
2915 size_t used_in_use_bytes = used_bytes_slow(i);
2916 assert(used_bytes(i) == used_in_use_bytes,
2917 err_msg("used_bytes(%u) " SIZE_FORMAT
2918 " used_bytes_slow(%u)" SIZE_FORMAT,
2919 i, used_bytes(i), i, used_in_use_bytes));
2920 }
2921 #endif
2922 }
2924 void MetaspaceAux::verify_metrics() {
2925 verify_capacity();
2926 verify_used();
2927 }
2930 // Metaspace methods
2932 size_t Metaspace::_first_chunk_word_size = 0;
2933 size_t Metaspace::_first_class_chunk_word_size = 0;
2935 size_t Metaspace::_commit_alignment = 0;
2936 size_t Metaspace::_reserve_alignment = 0;
2938 Metaspace::Metaspace(Mutex* lock, MetaspaceType type) {
2939 initialize(lock, type);
2940 }
2942 Metaspace::~Metaspace() {
2943 delete _vsm;
2944 if (using_class_space()) {
2945 delete _class_vsm;
2946 }
2947 }
2949 VirtualSpaceList* Metaspace::_space_list = NULL;
2950 VirtualSpaceList* Metaspace::_class_space_list = NULL;
2952 ChunkManager* Metaspace::_chunk_manager_metadata = NULL;
2953 ChunkManager* Metaspace::_chunk_manager_class = NULL;
2955 #define VIRTUALSPACEMULTIPLIER 2
2957 #ifdef _LP64
2958 static const uint64_t UnscaledClassSpaceMax = (uint64_t(max_juint) + 1);
2960 void Metaspace::set_narrow_klass_base_and_shift(address metaspace_base, address cds_base) {
2961 // Figure out the narrow_klass_base and the narrow_klass_shift. The
2962 // narrow_klass_base is the lower of the metaspace base and the cds base
2963 // (if cds is enabled). The narrow_klass_shift depends on the distance
2964 // between the lower base and higher address.
2965 address lower_base;
2966 address higher_address;
2967 #if INCLUDE_CDS
2968 if (UseSharedSpaces) {
2969 higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()),
2970 (address)(metaspace_base + compressed_class_space_size()));
2971 lower_base = MIN2(metaspace_base, cds_base);
2972 } else
2973 #endif
2974 {
2975 higher_address = metaspace_base + compressed_class_space_size();
2976 lower_base = metaspace_base;
2978 uint64_t klass_encoding_max = UnscaledClassSpaceMax << LogKlassAlignmentInBytes;
2979 // If compressed class space fits in lower 32G, we don't need a base.
2980 if (higher_address <= (address)klass_encoding_max) {
2981 lower_base = 0; // effectively lower base is zero.
2982 }
2983 }
2985 Universe::set_narrow_klass_base(lower_base);
2987 if ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax) {
2988 Universe::set_narrow_klass_shift(0);
2989 } else {
2990 assert(!UseSharedSpaces, "Cannot shift with UseSharedSpaces");
2991 Universe::set_narrow_klass_shift(LogKlassAlignmentInBytes);
2992 }
2993 }
2995 #if INCLUDE_CDS
2996 // Return TRUE if the specified metaspace_base and cds_base are close enough
2997 // to work with compressed klass pointers.
2998 bool Metaspace::can_use_cds_with_metaspace_addr(char* metaspace_base, address cds_base) {
2999 assert(cds_base != 0 && UseSharedSpaces, "Only use with CDS");
3000 assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
3001 address lower_base = MIN2((address)metaspace_base, cds_base);
3002 address higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()),
3003 (address)(metaspace_base + compressed_class_space_size()));
3004 return ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax);
3005 }
3006 #endif
3008 // Try to allocate the metaspace at the requested addr.
3009 void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base) {
3010 assert(using_class_space(), "called improperly");
3011 assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
3012 assert(compressed_class_space_size() < KlassEncodingMetaspaceMax,
3013 "Metaspace size is too big");
3014 assert_is_ptr_aligned(requested_addr, _reserve_alignment);
3015 assert_is_ptr_aligned(cds_base, _reserve_alignment);
3016 assert_is_size_aligned(compressed_class_space_size(), _reserve_alignment);
3018 // Don't use large pages for the class space.
3019 bool large_pages = false;
3021 ReservedSpace metaspace_rs = ReservedSpace(compressed_class_space_size(),
3022 _reserve_alignment,
3023 large_pages,
3024 requested_addr, 0);
3025 if (!metaspace_rs.is_reserved()) {
3026 #if INCLUDE_CDS
3027 if (UseSharedSpaces) {
3028 size_t increment = align_size_up(1*G, _reserve_alignment);
3030 // Keep trying to allocate the metaspace, increasing the requested_addr
3031 // by 1GB each time, until we reach an address that will no longer allow
3032 // use of CDS with compressed klass pointers.
3033 char *addr = requested_addr;
3034 while (!metaspace_rs.is_reserved() && (addr + increment > addr) &&
3035 can_use_cds_with_metaspace_addr(addr + increment, cds_base)) {
3036 addr = addr + increment;
3037 metaspace_rs = ReservedSpace(compressed_class_space_size(),
3038 _reserve_alignment, large_pages, addr, 0);
3039 }
3040 }
3041 #endif
3042 // If no successful allocation then try to allocate the space anywhere. If
3043 // that fails then OOM doom. At this point we cannot try allocating the
3044 // metaspace as if UseCompressedClassPointers is off because too much
3045 // initialization has happened that depends on UseCompressedClassPointers.
3046 // So, UseCompressedClassPointers cannot be turned off at this point.
3047 if (!metaspace_rs.is_reserved()) {
3048 metaspace_rs = ReservedSpace(compressed_class_space_size(),
3049 _reserve_alignment, large_pages);
3050 if (!metaspace_rs.is_reserved()) {
3051 vm_exit_during_initialization(err_msg("Could not allocate metaspace: %d bytes",
3052 compressed_class_space_size()));
3053 }
3054 }
3055 }
3057 // If we got here then the metaspace got allocated.
3058 MemTracker::record_virtual_memory_type((address)metaspace_rs.base(), mtClass);
3060 #if INCLUDE_CDS
3061 // Verify that we can use shared spaces. Otherwise, turn off CDS.
3062 if (UseSharedSpaces && !can_use_cds_with_metaspace_addr(metaspace_rs.base(), cds_base)) {
3063 FileMapInfo::stop_sharing_and_unmap(
3064 "Could not allocate metaspace at a compatible address");
3065 }
3066 #endif
3067 set_narrow_klass_base_and_shift((address)metaspace_rs.base(),
3068 UseSharedSpaces ? (address)cds_base : 0);
3070 initialize_class_space(metaspace_rs);
3072 if (PrintCompressedOopsMode || (PrintMiscellaneous && Verbose)) {
3073 gclog_or_tty->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: " SIZE_FORMAT,
3074 Universe::narrow_klass_base(), Universe::narrow_klass_shift());
3075 gclog_or_tty->print_cr("Compressed class space size: " SIZE_FORMAT " Address: " PTR_FORMAT " Req Addr: " PTR_FORMAT,
3076 compressed_class_space_size(), metaspace_rs.base(), requested_addr);
3077 }
3078 }
3080 // For UseCompressedClassPointers the class space is reserved above the top of
3081 // the Java heap. The argument passed in is at the base of the compressed space.
3082 void Metaspace::initialize_class_space(ReservedSpace rs) {
3083 // The reserved space size may be bigger because of alignment, esp with UseLargePages
3084 assert(rs.size() >= CompressedClassSpaceSize,
3085 err_msg(SIZE_FORMAT " != " UINTX_FORMAT, rs.size(), CompressedClassSpaceSize));
3086 assert(using_class_space(), "Must be using class space");
3087 _class_space_list = new VirtualSpaceList(rs);
3088 _chunk_manager_class = new ChunkManager(SpecializedChunk, ClassSmallChunk, ClassMediumChunk);
3090 if (!_class_space_list->initialization_succeeded()) {
3091 vm_exit_during_initialization("Failed to setup compressed class space virtual space list.");
3092 }
3093 }
3095 #endif
3097 void Metaspace::ergo_initialize() {
3098 if (DumpSharedSpaces) {
3099 // Using large pages when dumping the shared archive is currently not implemented.
3100 FLAG_SET_ERGO(bool, UseLargePagesInMetaspace, false);
3101 }
3103 size_t page_size = os::vm_page_size();
3104 if (UseLargePages && UseLargePagesInMetaspace) {
3105 page_size = os::large_page_size();
3106 }
3108 _commit_alignment = page_size;
3109 _reserve_alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity());
3111 // Do not use FLAG_SET_ERGO to update MaxMetaspaceSize, since this will
3112 // override if MaxMetaspaceSize was set on the command line or not.
3113 // This information is needed later to conform to the specification of the
3114 // java.lang.management.MemoryUsage API.
3115 //
3116 // Ideally, we would be able to set the default value of MaxMetaspaceSize in
3117 // globals.hpp to the aligned value, but this is not possible, since the
3118 // alignment depends on other flags being parsed.
3119 MaxMetaspaceSize = align_size_down_bounded(MaxMetaspaceSize, _reserve_alignment);
3121 if (MetaspaceSize > MaxMetaspaceSize) {
3122 MetaspaceSize = MaxMetaspaceSize;
3123 }
3125 MetaspaceSize = align_size_down_bounded(MetaspaceSize, _commit_alignment);
3127 assert(MetaspaceSize <= MaxMetaspaceSize, "MetaspaceSize should be limited by MaxMetaspaceSize");
3129 if (MetaspaceSize < 256*K) {
3130 vm_exit_during_initialization("Too small initial Metaspace size");
3131 }
3133 MinMetaspaceExpansion = align_size_down_bounded(MinMetaspaceExpansion, _commit_alignment);
3134 MaxMetaspaceExpansion = align_size_down_bounded(MaxMetaspaceExpansion, _commit_alignment);
3136 CompressedClassSpaceSize = align_size_down_bounded(CompressedClassSpaceSize, _reserve_alignment);
3137 set_compressed_class_space_size(CompressedClassSpaceSize);
3138 }
3140 void Metaspace::global_initialize() {
3141 MetaspaceGC::initialize();
3143 // Initialize the alignment for shared spaces.
3144 int max_alignment = os::vm_allocation_granularity();
3145 size_t cds_total = 0;
3147 MetaspaceShared::set_max_alignment(max_alignment);
3149 if (DumpSharedSpaces) {
3150 #if INCLUDE_CDS
3151 MetaspaceShared::estimate_regions_size();
3153 SharedReadOnlySize = align_size_up(SharedReadOnlySize, max_alignment);
3154 SharedReadWriteSize = align_size_up(SharedReadWriteSize, max_alignment);
3155 SharedMiscDataSize = align_size_up(SharedMiscDataSize, max_alignment);
3156 SharedMiscCodeSize = align_size_up(SharedMiscCodeSize, max_alignment);
3158 // Initialize with the sum of the shared space sizes. The read-only
3159 // and read write metaspace chunks will be allocated out of this and the
3160 // remainder is the misc code and data chunks.
3161 cds_total = FileMapInfo::shared_spaces_size();
3162 cds_total = align_size_up(cds_total, _reserve_alignment);
3163 _space_list = new VirtualSpaceList(cds_total/wordSize);
3164 _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk);
3166 if (!_space_list->initialization_succeeded()) {
3167 vm_exit_during_initialization("Unable to dump shared archive.", NULL);
3168 }
3170 #ifdef _LP64
3171 if (cds_total + compressed_class_space_size() > UnscaledClassSpaceMax) {
3172 vm_exit_during_initialization("Unable to dump shared archive.",
3173 err_msg("Size of archive (" SIZE_FORMAT ") + compressed class space ("
3174 SIZE_FORMAT ") == total (" SIZE_FORMAT ") is larger than compressed "
3175 "klass limit: " SIZE_FORMAT, cds_total, compressed_class_space_size(),
3176 cds_total + compressed_class_space_size(), UnscaledClassSpaceMax));
3177 }
3179 // Set the compressed klass pointer base so that decoding of these pointers works
3180 // properly when creating the shared archive.
3181 assert(UseCompressedOops && UseCompressedClassPointers,
3182 "UseCompressedOops and UseCompressedClassPointers must be set");
3183 Universe::set_narrow_klass_base((address)_space_list->current_virtual_space()->bottom());
3184 if (TraceMetavirtualspaceAllocation && Verbose) {
3185 gclog_or_tty->print_cr("Setting_narrow_klass_base to Address: " PTR_FORMAT,
3186 _space_list->current_virtual_space()->bottom());
3187 }
3189 Universe::set_narrow_klass_shift(0);
3190 #endif // _LP64
3191 #endif // INCLUDE_CDS
3192 } else {
3193 #if INCLUDE_CDS
3194 // If using shared space, open the file that contains the shared space
3195 // and map in the memory before initializing the rest of metaspace (so
3196 // the addresses don't conflict)
3197 address cds_address = NULL;
3198 if (UseSharedSpaces) {
3199 FileMapInfo* mapinfo = new FileMapInfo();
3201 // Open the shared archive file, read and validate the header. If
3202 // initialization fails, shared spaces [UseSharedSpaces] are
3203 // disabled and the file is closed.
3204 // Map in spaces now also
3205 if (mapinfo->initialize() && MetaspaceShared::map_shared_spaces(mapinfo)) {
3206 cds_total = FileMapInfo::shared_spaces_size();
3207 cds_address = (address)mapinfo->region_base(0);
3208 } else {
3209 assert(!mapinfo->is_open() && !UseSharedSpaces,
3210 "archive file not closed or shared spaces not disabled.");
3211 }
3212 }
3213 #endif // INCLUDE_CDS
3214 #ifdef _LP64
3215 // If UseCompressedClassPointers is set then allocate the metaspace area
3216 // above the heap and above the CDS area (if it exists).
3217 if (using_class_space()) {
3218 if (UseSharedSpaces) {
3219 #if INCLUDE_CDS
3220 char* cds_end = (char*)(cds_address + cds_total);
3221 cds_end = (char *)align_ptr_up(cds_end, _reserve_alignment);
3222 allocate_metaspace_compressed_klass_ptrs(cds_end, cds_address);
3223 #endif
3224 } else {
3225 char* base = (char*)align_ptr_up(Universe::heap()->reserved_region().end(), _reserve_alignment);
3226 allocate_metaspace_compressed_klass_ptrs(base, 0);
3227 }
3228 }
3229 #endif // _LP64
3231 // Initialize these before initializing the VirtualSpaceList
3232 _first_chunk_word_size = InitialBootClassLoaderMetaspaceSize / BytesPerWord;
3233 _first_chunk_word_size = align_word_size_up(_first_chunk_word_size);
3234 // Make the first class chunk bigger than a medium chunk so it's not put
3235 // on the medium chunk list. The next chunk will be small and progress
3236 // from there. This size calculated by -version.
3237 _first_class_chunk_word_size = MIN2((size_t)MediumChunk*6,
3238 (CompressedClassSpaceSize/BytesPerWord)*2);
3239 _first_class_chunk_word_size = align_word_size_up(_first_class_chunk_word_size);
3240 // Arbitrarily set the initial virtual space to a multiple
3241 // of the boot class loader size.
3242 size_t word_size = VIRTUALSPACEMULTIPLIER * _first_chunk_word_size;
3243 word_size = align_size_up(word_size, Metaspace::reserve_alignment_words());
3245 // Initialize the list of virtual spaces.
3246 _space_list = new VirtualSpaceList(word_size);
3247 _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk);
3249 if (!_space_list->initialization_succeeded()) {
3250 vm_exit_during_initialization("Unable to setup metadata virtual space list.", NULL);
3251 }
3252 }
3254 _tracer = new MetaspaceTracer();
3255 }
3257 void Metaspace::post_initialize() {
3258 MetaspaceGC::post_initialize();
3259 }
3261 Metachunk* Metaspace::get_initialization_chunk(MetadataType mdtype,
3262 size_t chunk_word_size,
3263 size_t chunk_bunch) {
3264 // Get a chunk from the chunk freelist
3265 Metachunk* chunk = get_chunk_manager(mdtype)->chunk_freelist_allocate(chunk_word_size);
3266 if (chunk != NULL) {
3267 return chunk;
3268 }
3270 return get_space_list(mdtype)->get_new_chunk(chunk_word_size, chunk_word_size, chunk_bunch);
3271 }
3273 void Metaspace::initialize(Mutex* lock, MetaspaceType type) {
3275 assert(space_list() != NULL,
3276 "Metadata VirtualSpaceList has not been initialized");
3277 assert(chunk_manager_metadata() != NULL,
3278 "Metadata ChunkManager has not been initialized");
3280 _vsm = new SpaceManager(NonClassType, lock);
3281 if (_vsm == NULL) {
3282 return;
3283 }
3284 size_t word_size;
3285 size_t class_word_size;
3286 vsm()->get_initial_chunk_sizes(type, &word_size, &class_word_size);
3288 if (using_class_space()) {
3289 assert(class_space_list() != NULL,
3290 "Class VirtualSpaceList has not been initialized");
3291 assert(chunk_manager_class() != NULL,
3292 "Class ChunkManager has not been initialized");
3294 // Allocate SpaceManager for classes.
3295 _class_vsm = new SpaceManager(ClassType, lock);
3296 if (_class_vsm == NULL) {
3297 return;
3298 }
3299 }
3301 MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
3303 // Allocate chunk for metadata objects
3304 Metachunk* new_chunk = get_initialization_chunk(NonClassType,
3305 word_size,
3306 vsm()->medium_chunk_bunch());
3307 assert(!DumpSharedSpaces || new_chunk != NULL, "should have enough space for both chunks");
3308 if (new_chunk != NULL) {
3309 // Add to this manager's list of chunks in use and current_chunk().
3310 vsm()->add_chunk(new_chunk, true);
3311 }
3313 // Allocate chunk for class metadata objects
3314 if (using_class_space()) {
3315 Metachunk* class_chunk = get_initialization_chunk(ClassType,
3316 class_word_size,
3317 class_vsm()->medium_chunk_bunch());
3318 if (class_chunk != NULL) {
3319 class_vsm()->add_chunk(class_chunk, true);
3320 }
3321 }
3323 _alloc_record_head = NULL;
3324 _alloc_record_tail = NULL;
3325 }
3327 size_t Metaspace::align_word_size_up(size_t word_size) {
3328 size_t byte_size = word_size * wordSize;
3329 return ReservedSpace::allocation_align_size_up(byte_size) / wordSize;
3330 }
3332 MetaWord* Metaspace::allocate(size_t word_size, MetadataType mdtype) {
3333 // DumpSharedSpaces doesn't use class metadata area (yet)
3334 // Also, don't use class_vsm() unless UseCompressedClassPointers is true.
3335 if (is_class_space_allocation(mdtype)) {
3336 return class_vsm()->allocate(word_size);
3337 } else {
3338 return vsm()->allocate(word_size);
3339 }
3340 }
3342 MetaWord* Metaspace::expand_and_allocate(size_t word_size, MetadataType mdtype) {
3343 size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size * BytesPerWord);
3344 assert(delta_bytes > 0, "Must be");
3346 size_t before = 0;
3347 size_t after = 0;
3348 MetaWord* res;
3349 bool incremented;
3351 // Each thread increments the HWM at most once. Even if the thread fails to increment
3352 // the HWM, an allocation is still attempted. This is because another thread must then
3353 // have incremented the HWM and therefore the allocation might still succeed.
3354 do {
3355 incremented = MetaspaceGC::inc_capacity_until_GC(delta_bytes, &after, &before);
3356 res = allocate(word_size, mdtype);
3357 } while (!incremented && res == NULL);
3359 if (incremented) {
3360 tracer()->report_gc_threshold(before, after,
3361 MetaspaceGCThresholdUpdater::ExpandAndAllocate);
3362 if (PrintGCDetails && Verbose) {
3363 gclog_or_tty->print_cr("Increase capacity to GC from " SIZE_FORMAT
3364 " to " SIZE_FORMAT, before, after);
3365 }
3366 }
3368 return res;
3369 }
3371 // Space allocated in the Metaspace. This may
3372 // be across several metadata virtual spaces.
3373 char* Metaspace::bottom() const {
3374 assert(DumpSharedSpaces, "only useful and valid for dumping shared spaces");
3375 return (char*)vsm()->current_chunk()->bottom();
3376 }
3378 size_t Metaspace::used_words_slow(MetadataType mdtype) const {
3379 if (mdtype == ClassType) {
3380 return using_class_space() ? class_vsm()->sum_used_in_chunks_in_use() : 0;
3381 } else {
3382 return vsm()->sum_used_in_chunks_in_use(); // includes overhead!
3383 }
3384 }
3386 size_t Metaspace::free_words_slow(MetadataType mdtype) const {
3387 if (mdtype == ClassType) {
3388 return using_class_space() ? class_vsm()->sum_free_in_chunks_in_use() : 0;
3389 } else {
3390 return vsm()->sum_free_in_chunks_in_use();
3391 }
3392 }
3394 // Space capacity in the Metaspace. It includes
3395 // space in the list of chunks from which allocations
3396 // have been made. Don't include space in the global freelist and
3397 // in the space available in the dictionary which
3398 // is already counted in some chunk.
3399 size_t Metaspace::capacity_words_slow(MetadataType mdtype) const {
3400 if (mdtype == ClassType) {
3401 return using_class_space() ? class_vsm()->sum_capacity_in_chunks_in_use() : 0;
3402 } else {
3403 return vsm()->sum_capacity_in_chunks_in_use();
3404 }
3405 }
3407 size_t Metaspace::used_bytes_slow(MetadataType mdtype) const {
3408 return used_words_slow(mdtype) * BytesPerWord;
3409 }
3411 size_t Metaspace::capacity_bytes_slow(MetadataType mdtype) const {
3412 return capacity_words_slow(mdtype) * BytesPerWord;
3413 }
3415 void Metaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) {
3416 if (SafepointSynchronize::is_at_safepoint()) {
3417 if (DumpSharedSpaces && PrintSharedSpaces) {
3418 record_deallocation(ptr, vsm()->get_raw_word_size(word_size));
3419 }
3421 assert(Thread::current()->is_VM_thread(), "should be the VM thread");
3422 // Don't take Heap_lock
3423 MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag);
3424 if (word_size < TreeChunk<Metablock, FreeList<Metablock> >::min_size()) {
3425 // Dark matter. Too small for dictionary.
3426 #ifdef ASSERT
3427 Copy::fill_to_words((HeapWord*)ptr, word_size, 0xf5f5f5f5);
3428 #endif
3429 return;
3430 }
3431 if (is_class && using_class_space()) {
3432 class_vsm()->deallocate(ptr, word_size);
3433 } else {
3434 vsm()->deallocate(ptr, word_size);
3435 }
3436 } else {
3437 MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag);
3439 if (word_size < TreeChunk<Metablock, FreeList<Metablock> >::min_size()) {
3440 // Dark matter. Too small for dictionary.
3441 #ifdef ASSERT
3442 Copy::fill_to_words((HeapWord*)ptr, word_size, 0xf5f5f5f5);
3443 #endif
3444 return;
3445 }
3446 if (is_class && using_class_space()) {
3447 class_vsm()->deallocate(ptr, word_size);
3448 } else {
3449 vsm()->deallocate(ptr, word_size);
3450 }
3451 }
3452 }
3455 MetaWord* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
3456 bool read_only, MetaspaceObj::Type type, TRAPS) {
3457 if (HAS_PENDING_EXCEPTION) {
3458 assert(false, "Should not allocate with exception pending");
3459 return NULL; // caller does a CHECK_NULL too
3460 }
3462 assert(loader_data != NULL, "Should never pass around a NULL loader_data. "
3463 "ClassLoaderData::the_null_class_loader_data() should have been used.");
3465 // Allocate in metaspaces without taking out a lock, because it deadlocks
3466 // with the SymbolTable_lock. Dumping is single threaded for now. We'll have
3467 // to revisit this for application class data sharing.
3468 if (DumpSharedSpaces) {
3469 assert(type > MetaspaceObj::UnknownType && type < MetaspaceObj::_number_of_types, "sanity");
3470 Metaspace* space = read_only ? loader_data->ro_metaspace() : loader_data->rw_metaspace();
3471 MetaWord* result = space->allocate(word_size, NonClassType);
3472 if (result == NULL) {
3473 report_out_of_shared_space(read_only ? SharedReadOnly : SharedReadWrite);
3474 }
3475 if (PrintSharedSpaces) {
3476 space->record_allocation(result, type, space->vsm()->get_raw_word_size(word_size));
3477 }
3479 // Zero initialize.
3480 Copy::fill_to_aligned_words((HeapWord*)result, word_size, 0);
3482 return result;
3483 }
3485 MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType;
3487 // Try to allocate metadata.
3488 MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype);
3490 if (result == NULL) {
3491 tracer()->report_metaspace_allocation_failure(loader_data, word_size, type, mdtype);
3493 // Allocation failed.
3494 if (is_init_completed()) {
3495 // Only start a GC if the bootstrapping has completed.
3497 // Try to clean out some memory and retry.
3498 result = Universe::heap()->collector_policy()->satisfy_failed_metadata_allocation(
3499 loader_data, word_size, mdtype);
3500 }
3501 }
3503 if (result == NULL) {
3504 report_metadata_oome(loader_data, word_size, type, mdtype, CHECK_NULL);
3505 }
3507 // Zero initialize.
3508 Copy::fill_to_aligned_words((HeapWord*)result, word_size, 0);
3510 return result;
3511 }
3513 size_t Metaspace::class_chunk_size(size_t word_size) {
3514 assert(using_class_space(), "Has to use class space");
3515 return class_vsm()->calc_chunk_size(word_size);
3516 }
3518 void Metaspace::report_metadata_oome(ClassLoaderData* loader_data, size_t word_size, MetaspaceObj::Type type, MetadataType mdtype, TRAPS) {
3519 tracer()->report_metadata_oom(loader_data, word_size, type, mdtype);
3521 // If result is still null, we are out of memory.
3522 if (Verbose && TraceMetadataChunkAllocation) {
3523 gclog_or_tty->print_cr("Metaspace allocation failed for size "
3524 SIZE_FORMAT, word_size);
3525 if (loader_data->metaspace_or_null() != NULL) {
3526 loader_data->dump(gclog_or_tty);
3527 }
3528 MetaspaceAux::dump(gclog_or_tty);
3529 }
3531 bool out_of_compressed_class_space = false;
3532 if (is_class_space_allocation(mdtype)) {
3533 Metaspace* metaspace = loader_data->metaspace_non_null();
3534 out_of_compressed_class_space =
3535 MetaspaceAux::committed_bytes(Metaspace::ClassType) +
3536 (metaspace->class_chunk_size(word_size) * BytesPerWord) >
3537 CompressedClassSpaceSize;
3538 }
3540 // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
3541 const char* space_string = out_of_compressed_class_space ?
3542 "Compressed class space" : "Metaspace";
3544 report_java_out_of_memory(space_string);
3546 if (JvmtiExport::should_post_resource_exhausted()) {
3547 JvmtiExport::post_resource_exhausted(
3548 JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR,
3549 space_string);
3550 }
3552 if (!is_init_completed()) {
3553 vm_exit_during_initialization("OutOfMemoryError", space_string);
3554 }
3556 if (out_of_compressed_class_space) {
3557 THROW_OOP(Universe::out_of_memory_error_class_metaspace());
3558 } else {
3559 THROW_OOP(Universe::out_of_memory_error_metaspace());
3560 }
3561 }
3563 const char* Metaspace::metadata_type_name(Metaspace::MetadataType mdtype) {
3564 switch (mdtype) {
3565 case Metaspace::ClassType: return "Class";
3566 case Metaspace::NonClassType: return "Metadata";
3567 default:
3568 assert(false, err_msg("Got bad mdtype: %d", (int) mdtype));
3569 return NULL;
3570 }
3571 }
3573 void Metaspace::record_allocation(void* ptr, MetaspaceObj::Type type, size_t word_size) {
3574 assert(DumpSharedSpaces, "sanity");
3576 int byte_size = (int)word_size * HeapWordSize;
3577 AllocRecord *rec = new AllocRecord((address)ptr, type, byte_size);
3579 if (_alloc_record_head == NULL) {
3580 _alloc_record_head = _alloc_record_tail = rec;
3581 } else if (_alloc_record_tail->_ptr + _alloc_record_tail->_byte_size == (address)ptr) {
3582 _alloc_record_tail->_next = rec;
3583 _alloc_record_tail = rec;
3584 } else {
3585 // slow linear search, but this doesn't happen that often, and only when dumping
3586 for (AllocRecord *old = _alloc_record_head; old; old = old->_next) {
3587 if (old->_ptr == ptr) {
3588 assert(old->_type == MetaspaceObj::DeallocatedType, "sanity");
3589 int remain_bytes = old->_byte_size - byte_size;
3590 assert(remain_bytes >= 0, "sanity");
3591 old->_type = type;
3593 if (remain_bytes == 0) {
3594 delete(rec);
3595 } else {
3596 address remain_ptr = address(ptr) + byte_size;
3597 rec->_ptr = remain_ptr;
3598 rec->_byte_size = remain_bytes;
3599 rec->_type = MetaspaceObj::DeallocatedType;
3600 rec->_next = old->_next;
3601 old->_byte_size = byte_size;
3602 old->_next = rec;
3603 }
3604 return;
3605 }
3606 }
3607 assert(0, "reallocating a freed pointer that was not recorded");
3608 }
3609 }
3611 void Metaspace::record_deallocation(void* ptr, size_t word_size) {
3612 assert(DumpSharedSpaces, "sanity");
3614 for (AllocRecord *rec = _alloc_record_head; rec; rec = rec->_next) {
3615 if (rec->_ptr == ptr) {
3616 assert(rec->_byte_size == (int)word_size * HeapWordSize, "sanity");
3617 rec->_type = MetaspaceObj::DeallocatedType;
3618 return;
3619 }
3620 }
3622 assert(0, "deallocating a pointer that was not recorded");
3623 }
3625 void Metaspace::iterate(Metaspace::AllocRecordClosure *closure) {
3626 assert(DumpSharedSpaces, "unimplemented for !DumpSharedSpaces");
3628 address last_addr = (address)bottom();
3630 for (AllocRecord *rec = _alloc_record_head; rec; rec = rec->_next) {
3631 address ptr = rec->_ptr;
3632 if (last_addr < ptr) {
3633 closure->doit(last_addr, MetaspaceObj::UnknownType, ptr - last_addr);
3634 }
3635 closure->doit(ptr, rec->_type, rec->_byte_size);
3636 last_addr = ptr + rec->_byte_size;
3637 }
3639 address top = ((address)bottom()) + used_bytes_slow(Metaspace::NonClassType);
3640 if (last_addr < top) {
3641 closure->doit(last_addr, MetaspaceObj::UnknownType, top - last_addr);
3642 }
3643 }
3645 void Metaspace::purge(MetadataType mdtype) {
3646 get_space_list(mdtype)->purge(get_chunk_manager(mdtype));
3647 }
3649 void Metaspace::purge() {
3650 MutexLockerEx cl(SpaceManager::expand_lock(),
3651 Mutex::_no_safepoint_check_flag);
3652 purge(NonClassType);
3653 if (using_class_space()) {
3654 purge(ClassType);
3655 }
3656 }
3658 void Metaspace::print_on(outputStream* out) const {
3659 // Print both class virtual space counts and metaspace.
3660 if (Verbose) {
3661 vsm()->print_on(out);
3662 if (using_class_space()) {
3663 class_vsm()->print_on(out);
3664 }
3665 }
3666 }
3668 bool Metaspace::contains(const void* ptr) {
3669 if (UseSharedSpaces && MetaspaceShared::is_in_shared_space(ptr)) {
3670 return true;
3671 }
3673 if (using_class_space() && get_space_list(ClassType)->contains(ptr)) {
3674 return true;
3675 }
3677 return get_space_list(NonClassType)->contains(ptr);
3678 }
3680 void Metaspace::verify() {
3681 vsm()->verify();
3682 if (using_class_space()) {
3683 class_vsm()->verify();
3684 }
3685 }
3687 void Metaspace::dump(outputStream* const out) const {
3688 out->print_cr("\nVirtual space manager: " INTPTR_FORMAT, vsm());
3689 vsm()->dump(out);
3690 if (using_class_space()) {
3691 out->print_cr("\nClass space manager: " INTPTR_FORMAT, class_vsm());
3692 class_vsm()->dump(out);
3693 }
3694 }
3696 /////////////// Unit tests ///////////////
3698 #ifndef PRODUCT
3700 class TestMetaspaceAuxTest : AllStatic {
3701 public:
3702 static void test_reserved() {
3703 size_t reserved = MetaspaceAux::reserved_bytes();
3705 assert(reserved > 0, "assert");
3707 size_t committed = MetaspaceAux::committed_bytes();
3708 assert(committed <= reserved, "assert");
3710 size_t reserved_metadata = MetaspaceAux::reserved_bytes(Metaspace::NonClassType);
3711 assert(reserved_metadata > 0, "assert");
3712 assert(reserved_metadata <= reserved, "assert");
3714 if (UseCompressedClassPointers) {
3715 size_t reserved_class = MetaspaceAux::reserved_bytes(Metaspace::ClassType);
3716 assert(reserved_class > 0, "assert");
3717 assert(reserved_class < reserved, "assert");
3718 }
3719 }
3721 static void test_committed() {
3722 size_t committed = MetaspaceAux::committed_bytes();
3724 assert(committed > 0, "assert");
3726 size_t reserved = MetaspaceAux::reserved_bytes();
3727 assert(committed <= reserved, "assert");
3729 size_t committed_metadata = MetaspaceAux::committed_bytes(Metaspace::NonClassType);
3730 assert(committed_metadata > 0, "assert");
3731 assert(committed_metadata <= committed, "assert");
3733 if (UseCompressedClassPointers) {
3734 size_t committed_class = MetaspaceAux::committed_bytes(Metaspace::ClassType);
3735 assert(committed_class > 0, "assert");
3736 assert(committed_class < committed, "assert");
3737 }
3738 }
3740 static void test_virtual_space_list_large_chunk() {
3741 VirtualSpaceList* vs_list = new VirtualSpaceList(os::vm_allocation_granularity());
3742 MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
3743 // A size larger than VirtualSpaceSize (256k) and add one page to make it _not_ be
3744 // vm_allocation_granularity aligned on Windows.
3745 size_t large_size = (size_t)(2*256*K + (os::vm_page_size()/BytesPerWord));
3746 large_size += (os::vm_page_size()/BytesPerWord);
3747 vs_list->get_new_chunk(large_size, large_size, 0);
3748 }
3750 static void test() {
3751 test_reserved();
3752 test_committed();
3753 test_virtual_space_list_large_chunk();
3754 }
3755 };
3757 void TestMetaspaceAux_test() {
3758 TestMetaspaceAuxTest::test();
3759 }
3761 class TestVirtualSpaceNodeTest {
3762 static void chunk_up(size_t words_left, size_t& num_medium_chunks,
3763 size_t& num_small_chunks,
3764 size_t& num_specialized_chunks) {
3765 num_medium_chunks = words_left / MediumChunk;
3766 words_left = words_left % MediumChunk;
3768 num_small_chunks = words_left / SmallChunk;
3769 words_left = words_left % SmallChunk;
3770 // how many specialized chunks can we get?
3771 num_specialized_chunks = words_left / SpecializedChunk;
3772 assert(words_left % SpecializedChunk == 0, "should be nothing left");
3773 }
3775 public:
3776 static void test() {
3777 MutexLockerEx ml(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
3778 const size_t vsn_test_size_words = MediumChunk * 4;
3779 const size_t vsn_test_size_bytes = vsn_test_size_words * BytesPerWord;
3781 // The chunk sizes must be multiples of eachother, or this will fail
3782 STATIC_ASSERT(MediumChunk % SmallChunk == 0);
3783 STATIC_ASSERT(SmallChunk % SpecializedChunk == 0);
3785 { // No committed memory in VSN
3786 ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk);
3787 VirtualSpaceNode vsn(vsn_test_size_bytes);
3788 vsn.initialize();
3789 vsn.retire(&cm);
3790 assert(cm.sum_free_chunks_count() == 0, "did not commit any memory in the VSN");
3791 }
3793 { // All of VSN is committed, half is used by chunks
3794 ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk);
3795 VirtualSpaceNode vsn(vsn_test_size_bytes);
3796 vsn.initialize();
3797 vsn.expand_by(vsn_test_size_words, vsn_test_size_words);
3798 vsn.get_chunk_vs(MediumChunk);
3799 vsn.get_chunk_vs(MediumChunk);
3800 vsn.retire(&cm);
3801 assert(cm.sum_free_chunks_count() == 2, "should have been memory left for 2 medium chunks");
3802 assert(cm.sum_free_chunks() == 2*MediumChunk, "sizes should add up");
3803 }
3805 { // 4 pages of VSN is committed, some is used by chunks
3806 ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk);
3807 VirtualSpaceNode vsn(vsn_test_size_bytes);
3808 const size_t page_chunks = 4 * (size_t)os::vm_page_size() / BytesPerWord;
3809 assert(page_chunks < MediumChunk, "Test expects medium chunks to be at least 4*page_size");
3810 vsn.initialize();
3811 vsn.expand_by(page_chunks, page_chunks);
3812 vsn.get_chunk_vs(SmallChunk);
3813 vsn.get_chunk_vs(SpecializedChunk);
3814 vsn.retire(&cm);
3816 // committed - used = words left to retire
3817 const size_t words_left = page_chunks - SmallChunk - SpecializedChunk;
3819 size_t num_medium_chunks, num_small_chunks, num_spec_chunks;
3820 chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks);
3822 assert(num_medium_chunks == 0, "should not get any medium chunks");
3823 assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks");
3824 assert(cm.sum_free_chunks() == words_left, "sizes should add up");
3825 }
3827 { // Half of VSN is committed, a humongous chunk is used
3828 ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk);
3829 VirtualSpaceNode vsn(vsn_test_size_bytes);
3830 vsn.initialize();
3831 vsn.expand_by(MediumChunk * 2, MediumChunk * 2);
3832 vsn.get_chunk_vs(MediumChunk + SpecializedChunk); // Humongous chunks will be aligned up to MediumChunk + SpecializedChunk
3833 vsn.retire(&cm);
3835 const size_t words_left = MediumChunk * 2 - (MediumChunk + SpecializedChunk);
3836 size_t num_medium_chunks, num_small_chunks, num_spec_chunks;
3837 chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks);
3839 assert(num_medium_chunks == 0, "should not get any medium chunks");
3840 assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks");
3841 assert(cm.sum_free_chunks() == words_left, "sizes should add up");
3842 }
3844 }
3846 #define assert_is_available_positive(word_size) \
3847 assert(vsn.is_available(word_size), \
3848 err_msg(#word_size ": " PTR_FORMAT " bytes were not available in " \
3849 "VirtualSpaceNode [" PTR_FORMAT ", " PTR_FORMAT ")", \
3850 (uintptr_t)(word_size * BytesPerWord), vsn.bottom(), vsn.end()));
3852 #define assert_is_available_negative(word_size) \
3853 assert(!vsn.is_available(word_size), \
3854 err_msg(#word_size ": " PTR_FORMAT " bytes should not be available in " \
3855 "VirtualSpaceNode [" PTR_FORMAT ", " PTR_FORMAT ")", \
3856 (uintptr_t)(word_size * BytesPerWord), vsn.bottom(), vsn.end()));
3858 static void test_is_available_positive() {
3859 // Reserve some memory.
3860 VirtualSpaceNode vsn(os::vm_allocation_granularity());
3861 assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
3863 // Commit some memory.
3864 size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
3865 bool expanded = vsn.expand_by(commit_word_size, commit_word_size);
3866 assert(expanded, "Failed to commit");
3868 // Check that is_available accepts the committed size.
3869 assert_is_available_positive(commit_word_size);
3871 // Check that is_available accepts half the committed size.
3872 size_t expand_word_size = commit_word_size / 2;
3873 assert_is_available_positive(expand_word_size);
3874 }
3876 static void test_is_available_negative() {
3877 // Reserve some memory.
3878 VirtualSpaceNode vsn(os::vm_allocation_granularity());
3879 assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
3881 // Commit some memory.
3882 size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
3883 bool expanded = vsn.expand_by(commit_word_size, commit_word_size);
3884 assert(expanded, "Failed to commit");
3886 // Check that is_available doesn't accept a too large size.
3887 size_t two_times_commit_word_size = commit_word_size * 2;
3888 assert_is_available_negative(two_times_commit_word_size);
3889 }
3891 static void test_is_available_overflow() {
3892 // Reserve some memory.
3893 VirtualSpaceNode vsn(os::vm_allocation_granularity());
3894 assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
3896 // Commit some memory.
3897 size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
3898 bool expanded = vsn.expand_by(commit_word_size, commit_word_size);
3899 assert(expanded, "Failed to commit");
3901 // Calculate a size that will overflow the virtual space size.
3902 void* virtual_space_max = (void*)(uintptr_t)-1;
3903 size_t bottom_to_max = pointer_delta(virtual_space_max, vsn.bottom(), 1);
3904 size_t overflow_size = bottom_to_max + BytesPerWord;
3905 size_t overflow_word_size = overflow_size / BytesPerWord;
3907 // Check that is_available can handle the overflow.
3908 assert_is_available_negative(overflow_word_size);
3909 }
3911 static void test_is_available() {
3912 TestVirtualSpaceNodeTest::test_is_available_positive();
3913 TestVirtualSpaceNodeTest::test_is_available_negative();
3914 TestVirtualSpaceNodeTest::test_is_available_overflow();
3915 }
3916 };
3918 void TestVirtualSpaceNode_test() {
3919 TestVirtualSpaceNodeTest::test();
3920 TestVirtualSpaceNodeTest::test_is_available();
3921 }
3922 #endif