Thu, 09 Apr 2020 20:58:56 +0000
8241444: Metaspace::_class_vsm not initialized if compressed class pointers are disabled
Summary: Initialize _class_vsm to NULL by default
Reviewed-by: phh
Contributed-by: thomas.stuefe@gmail.com
1 /*
2 * Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24 #include "precompiled.hpp"
25 #include "gc_interface/collectedHeap.hpp"
26 #include "memory/allocation.hpp"
27 #include "memory/binaryTreeDictionary.hpp"
28 #include "memory/freeList.hpp"
29 #include "memory/collectorPolicy.hpp"
30 #include "memory/filemap.hpp"
31 #include "memory/freeList.hpp"
32 #include "memory/gcLocker.hpp"
33 #include "memory/metachunk.hpp"
34 #include "memory/metaspace.hpp"
35 #include "memory/metaspaceGCThresholdUpdater.hpp"
36 #include "memory/metaspaceShared.hpp"
37 #include "memory/metaspaceTracer.hpp"
38 #include "memory/resourceArea.hpp"
39 #include "memory/universe.hpp"
40 #include "runtime/atomic.inline.hpp"
41 #include "runtime/globals.hpp"
42 #include "runtime/init.hpp"
43 #include "runtime/java.hpp"
44 #include "runtime/mutex.hpp"
45 #include "runtime/orderAccess.inline.hpp"
46 #include "services/memTracker.hpp"
47 #include "services/memoryService.hpp"
48 #include "utilities/copy.hpp"
49 #include "utilities/debug.hpp"
51 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
53 typedef BinaryTreeDictionary<Metablock, FreeList<Metablock> > BlockTreeDictionary;
54 typedef BinaryTreeDictionary<Metachunk, FreeList<Metachunk> > ChunkTreeDictionary;
56 // Set this constant to enable slow integrity checking of the free chunk lists
57 const bool metaspace_slow_verify = false;
59 size_t const allocation_from_dictionary_limit = 4 * K;
61 MetaWord* last_allocated = 0;
63 size_t Metaspace::_compressed_class_space_size;
64 const MetaspaceTracer* Metaspace::_tracer = NULL;
66 // Used in declarations in SpaceManager and ChunkManager
67 enum ChunkIndex {
68 ZeroIndex = 0,
69 SpecializedIndex = ZeroIndex,
70 SmallIndex = SpecializedIndex + 1,
71 MediumIndex = SmallIndex + 1,
72 HumongousIndex = MediumIndex + 1,
73 NumberOfFreeLists = 3,
74 NumberOfInUseLists = 4
75 };
77 enum ChunkSizes { // in words.
78 ClassSpecializedChunk = 128,
79 SpecializedChunk = 128,
80 ClassSmallChunk = 256,
81 SmallChunk = 512,
82 ClassMediumChunk = 4 * K,
83 MediumChunk = 8 * K
84 };
86 static ChunkIndex next_chunk_index(ChunkIndex i) {
87 assert(i < NumberOfInUseLists, "Out of bound");
88 return (ChunkIndex) (i+1);
89 }
91 volatile intptr_t MetaspaceGC::_capacity_until_GC = 0;
92 uint MetaspaceGC::_shrink_factor = 0;
93 bool MetaspaceGC::_should_concurrent_collect = false;
95 typedef class FreeList<Metachunk> ChunkList;
97 // Manages the global free lists of chunks.
98 class ChunkManager : public CHeapObj<mtInternal> {
99 friend class TestVirtualSpaceNodeTest;
101 // Free list of chunks of different sizes.
102 // SpecializedChunk
103 // SmallChunk
104 // MediumChunk
105 // HumongousChunk
106 ChunkList _free_chunks[NumberOfFreeLists];
108 // HumongousChunk
109 ChunkTreeDictionary _humongous_dictionary;
111 // ChunkManager in all lists of this type
112 size_t _free_chunks_total;
113 size_t _free_chunks_count;
115 void dec_free_chunks_total(size_t v) {
116 assert(_free_chunks_count > 0 &&
117 _free_chunks_total > 0,
118 "About to go negative");
119 Atomic::add_ptr(-1, &_free_chunks_count);
120 jlong minus_v = (jlong) - (jlong) v;
121 Atomic::add_ptr(minus_v, &_free_chunks_total);
122 }
124 // Debug support
126 size_t sum_free_chunks();
127 size_t sum_free_chunks_count();
129 void locked_verify_free_chunks_total();
130 void slow_locked_verify_free_chunks_total() {
131 if (metaspace_slow_verify) {
132 locked_verify_free_chunks_total();
133 }
134 }
135 void locked_verify_free_chunks_count();
136 void slow_locked_verify_free_chunks_count() {
137 if (metaspace_slow_verify) {
138 locked_verify_free_chunks_count();
139 }
140 }
141 void verify_free_chunks_count();
143 public:
145 ChunkManager(size_t specialized_size, size_t small_size, size_t medium_size)
146 : _free_chunks_total(0), _free_chunks_count(0) {
147 _free_chunks[SpecializedIndex].set_size(specialized_size);
148 _free_chunks[SmallIndex].set_size(small_size);
149 _free_chunks[MediumIndex].set_size(medium_size);
150 }
152 // add or delete (return) a chunk to the global freelist.
153 Metachunk* chunk_freelist_allocate(size_t word_size);
155 // Map a size to a list index assuming that there are lists
156 // for special, small, medium, and humongous chunks.
157 ChunkIndex list_index(size_t size);
159 // Remove the chunk from its freelist. It is
160 // expected to be on one of the _free_chunks[] lists.
161 void remove_chunk(Metachunk* chunk);
163 // Add the simple linked list of chunks to the freelist of chunks
164 // of type index.
165 void return_chunks(ChunkIndex index, Metachunk* chunks);
167 // Total of the space in the free chunks list
168 size_t free_chunks_total_words();
169 size_t free_chunks_total_bytes();
171 // Number of chunks in the free chunks list
172 size_t free_chunks_count();
174 void inc_free_chunks_total(size_t v, size_t count = 1) {
175 Atomic::add_ptr(count, &_free_chunks_count);
176 Atomic::add_ptr(v, &_free_chunks_total);
177 }
178 ChunkTreeDictionary* humongous_dictionary() {
179 return &_humongous_dictionary;
180 }
182 ChunkList* free_chunks(ChunkIndex index);
184 // Returns the list for the given chunk word size.
185 ChunkList* find_free_chunks_list(size_t word_size);
187 // Remove from a list by size. Selects list based on size of chunk.
188 Metachunk* free_chunks_get(size_t chunk_word_size);
190 #define index_bounds_check(index) \
191 assert(index == SpecializedIndex || \
192 index == SmallIndex || \
193 index == MediumIndex || \
194 index == HumongousIndex, err_msg("Bad index: %d", (int) index))
196 size_t num_free_chunks(ChunkIndex index) const {
197 index_bounds_check(index);
199 if (index == HumongousIndex) {
200 return _humongous_dictionary.total_free_blocks();
201 }
203 ssize_t count = _free_chunks[index].count();
204 return count == -1 ? 0 : (size_t) count;
205 }
207 size_t size_free_chunks_in_bytes(ChunkIndex index) const {
208 index_bounds_check(index);
210 size_t word_size = 0;
211 if (index == HumongousIndex) {
212 word_size = _humongous_dictionary.total_size();
213 } else {
214 const size_t size_per_chunk_in_words = _free_chunks[index].size();
215 word_size = size_per_chunk_in_words * num_free_chunks(index);
216 }
218 return word_size * BytesPerWord;
219 }
221 MetaspaceChunkFreeListSummary chunk_free_list_summary() const {
222 return MetaspaceChunkFreeListSummary(num_free_chunks(SpecializedIndex),
223 num_free_chunks(SmallIndex),
224 num_free_chunks(MediumIndex),
225 num_free_chunks(HumongousIndex),
226 size_free_chunks_in_bytes(SpecializedIndex),
227 size_free_chunks_in_bytes(SmallIndex),
228 size_free_chunks_in_bytes(MediumIndex),
229 size_free_chunks_in_bytes(HumongousIndex));
230 }
232 // Debug support
233 void verify();
234 void slow_verify() {
235 if (metaspace_slow_verify) {
236 verify();
237 }
238 }
239 void locked_verify();
240 void slow_locked_verify() {
241 if (metaspace_slow_verify) {
242 locked_verify();
243 }
244 }
245 void verify_free_chunks_total();
247 void locked_print_free_chunks(outputStream* st);
248 void locked_print_sum_free_chunks(outputStream* st);
250 void print_on(outputStream* st) const;
251 };
253 // Used to manage the free list of Metablocks (a block corresponds
254 // to the allocation of a quantum of metadata).
255 class BlockFreelist VALUE_OBJ_CLASS_SPEC {
256 BlockTreeDictionary* _dictionary;
258 // Only allocate and split from freelist if the size of the allocation
259 // is at least 1/4th the size of the available block.
260 const static int WasteMultiplier = 4;
262 // Accessors
263 BlockTreeDictionary* dictionary() const { return _dictionary; }
265 public:
266 BlockFreelist();
267 ~BlockFreelist();
269 // Get and return a block to the free list
270 MetaWord* get_block(size_t word_size);
271 void return_block(MetaWord* p, size_t word_size);
273 size_t total_size() {
274 if (dictionary() == NULL) {
275 return 0;
276 } else {
277 return dictionary()->total_size();
278 }
279 }
281 void print_on(outputStream* st) const;
282 };
284 // A VirtualSpaceList node.
285 class VirtualSpaceNode : public CHeapObj<mtClass> {
286 friend class VirtualSpaceList;
288 // Link to next VirtualSpaceNode
289 VirtualSpaceNode* _next;
291 // total in the VirtualSpace
292 MemRegion _reserved;
293 ReservedSpace _rs;
294 VirtualSpace _virtual_space;
295 MetaWord* _top;
296 // count of chunks contained in this VirtualSpace
297 uintx _container_count;
299 // Convenience functions to access the _virtual_space
300 char* low() const { return virtual_space()->low(); }
301 char* high() const { return virtual_space()->high(); }
303 // The first Metachunk will be allocated at the bottom of the
304 // VirtualSpace
305 Metachunk* first_chunk() { return (Metachunk*) bottom(); }
307 // Committed but unused space in the virtual space
308 size_t free_words_in_vs() const;
309 public:
311 VirtualSpaceNode(size_t byte_size);
312 VirtualSpaceNode(ReservedSpace rs) : _top(NULL), _next(NULL), _rs(rs), _container_count(0) {}
313 ~VirtualSpaceNode();
315 // Convenience functions for logical bottom and end
316 MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); }
317 MetaWord* end() const { return (MetaWord*) _virtual_space.high(); }
319 bool contains(const void* ptr) { return ptr >= low() && ptr < high(); }
321 size_t reserved_words() const { return _virtual_space.reserved_size() / BytesPerWord; }
322 size_t committed_words() const { return _virtual_space.actual_committed_size() / BytesPerWord; }
324 bool is_pre_committed() const { return _virtual_space.special(); }
326 // address of next available space in _virtual_space;
327 // Accessors
328 VirtualSpaceNode* next() { return _next; }
329 void set_next(VirtualSpaceNode* v) { _next = v; }
331 void set_reserved(MemRegion const v) { _reserved = v; }
332 void set_top(MetaWord* v) { _top = v; }
334 // Accessors
335 MemRegion* reserved() { return &_reserved; }
336 VirtualSpace* virtual_space() const { return (VirtualSpace*) &_virtual_space; }
338 // Returns true if "word_size" is available in the VirtualSpace
339 bool is_available(size_t word_size) { return word_size <= pointer_delta(end(), _top, sizeof(MetaWord)); }
341 MetaWord* top() const { return _top; }
342 void inc_top(size_t word_size) { _top += word_size; }
344 uintx container_count() { return _container_count; }
345 void inc_container_count();
346 void dec_container_count();
347 #ifdef ASSERT
348 uint container_count_slow();
349 void verify_container_count();
350 #endif
352 // used and capacity in this single entry in the list
353 size_t used_words_in_vs() const;
354 size_t capacity_words_in_vs() const;
356 bool initialize();
358 // get space from the virtual space
359 Metachunk* take_from_committed(size_t chunk_word_size);
361 // Allocate a chunk from the virtual space and return it.
362 Metachunk* get_chunk_vs(size_t chunk_word_size);
364 // Expands/shrinks the committed space in a virtual space. Delegates
365 // to Virtualspace
366 bool expand_by(size_t min_words, size_t preferred_words);
368 // In preparation for deleting this node, remove all the chunks
369 // in the node from any freelist.
370 void purge(ChunkManager* chunk_manager);
372 // If an allocation doesn't fit in the current node a new node is created.
373 // Allocate chunks out of the remaining committed space in this node
374 // to avoid wasting that memory.
375 // This always adds up because all the chunk sizes are multiples of
376 // the smallest chunk size.
377 void retire(ChunkManager* chunk_manager);
379 #ifdef ASSERT
380 // Debug support
381 void mangle();
382 #endif
384 void print_on(outputStream* st) const;
385 };
387 #define assert_is_ptr_aligned(ptr, alignment) \
388 assert(is_ptr_aligned(ptr, alignment), \
389 err_msg(PTR_FORMAT " is not aligned to " \
390 SIZE_FORMAT, ptr, alignment))
392 #define assert_is_size_aligned(size, alignment) \
393 assert(is_size_aligned(size, alignment), \
394 err_msg(SIZE_FORMAT " is not aligned to " \
395 SIZE_FORMAT, size, alignment))
398 // Decide if large pages should be committed when the memory is reserved.
399 static bool should_commit_large_pages_when_reserving(size_t bytes) {
400 if (UseLargePages && UseLargePagesInMetaspace && !os::can_commit_large_page_memory()) {
401 size_t words = bytes / BytesPerWord;
402 bool is_class = false; // We never reserve large pages for the class space.
403 if (MetaspaceGC::can_expand(words, is_class) &&
404 MetaspaceGC::allowed_expansion() >= words) {
405 return true;
406 }
407 }
409 return false;
410 }
412 // byte_size is the size of the associated virtualspace.
413 VirtualSpaceNode::VirtualSpaceNode(size_t bytes) : _top(NULL), _next(NULL), _rs(), _container_count(0) {
414 assert_is_size_aligned(bytes, Metaspace::reserve_alignment());
416 #if INCLUDE_CDS
417 // This allocates memory with mmap. For DumpSharedspaces, try to reserve
418 // configurable address, generally at the top of the Java heap so other
419 // memory addresses don't conflict.
420 if (DumpSharedSpaces) {
421 bool large_pages = false; // No large pages when dumping the CDS archive.
422 char* shared_base = (char*)align_ptr_up((char*)SharedBaseAddress, Metaspace::reserve_alignment());
424 _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages, shared_base, 0);
425 if (_rs.is_reserved()) {
426 assert(shared_base == 0 || _rs.base() == shared_base, "should match");
427 } else {
428 // Get a mmap region anywhere if the SharedBaseAddress fails.
429 _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
430 }
431 MetaspaceShared::set_shared_rs(&_rs);
432 } else
433 #endif
434 {
435 bool large_pages = should_commit_large_pages_when_reserving(bytes);
437 _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
438 }
440 if (_rs.is_reserved()) {
441 assert(_rs.base() != NULL, "Catch if we get a NULL address");
442 assert(_rs.size() != 0, "Catch if we get a 0 size");
443 assert_is_ptr_aligned(_rs.base(), Metaspace::reserve_alignment());
444 assert_is_size_aligned(_rs.size(), Metaspace::reserve_alignment());
446 MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass);
447 }
448 }
450 void VirtualSpaceNode::purge(ChunkManager* chunk_manager) {
451 Metachunk* chunk = first_chunk();
452 Metachunk* invalid_chunk = (Metachunk*) top();
453 while (chunk < invalid_chunk ) {
454 assert(chunk->is_tagged_free(), "Should be tagged free");
455 MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
456 chunk_manager->remove_chunk(chunk);
457 assert(chunk->next() == NULL &&
458 chunk->prev() == NULL,
459 "Was not removed from its list");
460 chunk = (Metachunk*) next;
461 }
462 }
464 #ifdef ASSERT
465 uint VirtualSpaceNode::container_count_slow() {
466 uint count = 0;
467 Metachunk* chunk = first_chunk();
468 Metachunk* invalid_chunk = (Metachunk*) top();
469 while (chunk < invalid_chunk ) {
470 MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
471 // Don't count the chunks on the free lists. Those are
472 // still part of the VirtualSpaceNode but not currently
473 // counted.
474 if (!chunk->is_tagged_free()) {
475 count++;
476 }
477 chunk = (Metachunk*) next;
478 }
479 return count;
480 }
481 #endif
483 // List of VirtualSpaces for metadata allocation.
484 class VirtualSpaceList : public CHeapObj<mtClass> {
485 friend class VirtualSpaceNode;
487 enum VirtualSpaceSizes {
488 VirtualSpaceSize = 256 * K
489 };
491 // Head of the list
492 VirtualSpaceNode* _virtual_space_list;
493 // virtual space currently being used for allocations
494 VirtualSpaceNode* _current_virtual_space;
496 // Is this VirtualSpaceList used for the compressed class space
497 bool _is_class;
499 // Sum of reserved and committed memory in the virtual spaces
500 size_t _reserved_words;
501 size_t _committed_words;
503 // Number of virtual spaces
504 size_t _virtual_space_count;
506 ~VirtualSpaceList();
508 VirtualSpaceNode* virtual_space_list() const { return _virtual_space_list; }
510 void set_virtual_space_list(VirtualSpaceNode* v) {
511 _virtual_space_list = v;
512 }
513 void set_current_virtual_space(VirtualSpaceNode* v) {
514 _current_virtual_space = v;
515 }
517 void link_vs(VirtualSpaceNode* new_entry);
519 // Get another virtual space and add it to the list. This
520 // is typically prompted by a failed attempt to allocate a chunk
521 // and is typically followed by the allocation of a chunk.
522 bool create_new_virtual_space(size_t vs_word_size);
524 // Chunk up the unused committed space in the current
525 // virtual space and add the chunks to the free list.
526 void retire_current_virtual_space();
528 public:
529 VirtualSpaceList(size_t word_size);
530 VirtualSpaceList(ReservedSpace rs);
532 size_t free_bytes();
534 Metachunk* get_new_chunk(size_t chunk_word_size,
535 size_t suggested_commit_granularity);
537 bool expand_node_by(VirtualSpaceNode* node,
538 size_t min_words,
539 size_t preferred_words);
541 bool expand_by(size_t min_words,
542 size_t preferred_words);
544 VirtualSpaceNode* current_virtual_space() {
545 return _current_virtual_space;
546 }
548 bool is_class() const { return _is_class; }
550 bool initialization_succeeded() { return _virtual_space_list != NULL; }
552 size_t reserved_words() { return _reserved_words; }
553 size_t reserved_bytes() { return reserved_words() * BytesPerWord; }
554 size_t committed_words() { return _committed_words; }
555 size_t committed_bytes() { return committed_words() * BytesPerWord; }
557 void inc_reserved_words(size_t v);
558 void dec_reserved_words(size_t v);
559 void inc_committed_words(size_t v);
560 void dec_committed_words(size_t v);
561 void inc_virtual_space_count();
562 void dec_virtual_space_count();
564 bool contains(const void* ptr);
566 // Unlink empty VirtualSpaceNodes and free it.
567 void purge(ChunkManager* chunk_manager);
569 void print_on(outputStream* st) const;
571 class VirtualSpaceListIterator : public StackObj {
572 VirtualSpaceNode* _virtual_spaces;
573 public:
574 VirtualSpaceListIterator(VirtualSpaceNode* virtual_spaces) :
575 _virtual_spaces(virtual_spaces) {}
577 bool repeat() {
578 return _virtual_spaces != NULL;
579 }
581 VirtualSpaceNode* get_next() {
582 VirtualSpaceNode* result = _virtual_spaces;
583 if (_virtual_spaces != NULL) {
584 _virtual_spaces = _virtual_spaces->next();
585 }
586 return result;
587 }
588 };
589 };
591 class Metadebug : AllStatic {
592 // Debugging support for Metaspaces
593 static int _allocation_fail_alot_count;
595 public:
597 static void init_allocation_fail_alot_count();
598 #ifdef ASSERT
599 static bool test_metadata_failure();
600 #endif
601 };
603 int Metadebug::_allocation_fail_alot_count = 0;
605 // SpaceManager - used by Metaspace to handle allocations
606 class SpaceManager : public CHeapObj<mtClass> {
607 friend class Metaspace;
608 friend class Metadebug;
610 private:
612 // protects allocations
613 Mutex* const _lock;
615 // Type of metadata allocated.
616 Metaspace::MetadataType _mdtype;
618 // List of chunks in use by this SpaceManager. Allocations
619 // are done from the current chunk. The list is used for deallocating
620 // chunks when the SpaceManager is freed.
621 Metachunk* _chunks_in_use[NumberOfInUseLists];
622 Metachunk* _current_chunk;
624 // Number of small chunks to allocate to a manager
625 // If class space manager, small chunks are unlimited
626 static uint const _small_chunk_limit;
628 // Sum of all space in allocated chunks
629 size_t _allocated_blocks_words;
631 // Sum of all allocated chunks
632 size_t _allocated_chunks_words;
633 size_t _allocated_chunks_count;
635 // Free lists of blocks are per SpaceManager since they
636 // are assumed to be in chunks in use by the SpaceManager
637 // and all chunks in use by a SpaceManager are freed when
638 // the class loader using the SpaceManager is collected.
639 BlockFreelist _block_freelists;
641 // protects virtualspace and chunk expansions
642 static const char* _expand_lock_name;
643 static const int _expand_lock_rank;
644 static Mutex* const _expand_lock;
646 private:
647 // Accessors
648 Metachunk* chunks_in_use(ChunkIndex index) const { return _chunks_in_use[index]; }
649 void set_chunks_in_use(ChunkIndex index, Metachunk* v) {
650 _chunks_in_use[index] = v;
651 }
653 BlockFreelist* block_freelists() const {
654 return (BlockFreelist*) &_block_freelists;
655 }
657 Metaspace::MetadataType mdtype() { return _mdtype; }
659 VirtualSpaceList* vs_list() const { return Metaspace::get_space_list(_mdtype); }
660 ChunkManager* chunk_manager() const { return Metaspace::get_chunk_manager(_mdtype); }
662 Metachunk* current_chunk() const { return _current_chunk; }
663 void set_current_chunk(Metachunk* v) {
664 _current_chunk = v;
665 }
667 Metachunk* find_current_chunk(size_t word_size);
669 // Add chunk to the list of chunks in use
670 void add_chunk(Metachunk* v, bool make_current);
671 void retire_current_chunk();
673 Mutex* lock() const { return _lock; }
675 const char* chunk_size_name(ChunkIndex index) const;
677 protected:
678 void initialize();
680 public:
681 SpaceManager(Metaspace::MetadataType mdtype,
682 Mutex* lock);
683 ~SpaceManager();
685 enum ChunkMultiples {
686 MediumChunkMultiple = 4
687 };
689 static size_t specialized_chunk_size(bool is_class) { return is_class ? ClassSpecializedChunk : SpecializedChunk; }
690 static size_t small_chunk_size(bool is_class) { return is_class ? ClassSmallChunk : SmallChunk; }
691 static size_t medium_chunk_size(bool is_class) { return is_class ? ClassMediumChunk : MediumChunk; }
693 static size_t smallest_chunk_size(bool is_class) { return specialized_chunk_size(is_class); }
695 // Accessors
696 bool is_class() const { return _mdtype == Metaspace::ClassType; }
698 size_t specialized_chunk_size() const { return specialized_chunk_size(is_class()); }
699 size_t small_chunk_size() const { return small_chunk_size(is_class()); }
700 size_t medium_chunk_size() const { return medium_chunk_size(is_class()); }
702 size_t smallest_chunk_size() const { return smallest_chunk_size(is_class()); }
704 size_t medium_chunk_bunch() const { return medium_chunk_size() * MediumChunkMultiple; }
706 size_t allocated_blocks_words() const { return _allocated_blocks_words; }
707 size_t allocated_blocks_bytes() const { return _allocated_blocks_words * BytesPerWord; }
708 size_t allocated_chunks_words() const { return _allocated_chunks_words; }
709 size_t allocated_chunks_bytes() const { return _allocated_chunks_words * BytesPerWord; }
710 size_t allocated_chunks_count() const { return _allocated_chunks_count; }
712 bool is_humongous(size_t word_size) { return word_size > medium_chunk_size(); }
714 static Mutex* expand_lock() { return _expand_lock; }
716 // Increment the per Metaspace and global running sums for Metachunks
717 // by the given size. This is used when a Metachunk to added to
718 // the in-use list.
719 void inc_size_metrics(size_t words);
720 // Increment the per Metaspace and global running sums Metablocks by the given
721 // size. This is used when a Metablock is allocated.
722 void inc_used_metrics(size_t words);
723 // Delete the portion of the running sums for this SpaceManager. That is,
724 // the globals running sums for the Metachunks and Metablocks are
725 // decremented for all the Metachunks in-use by this SpaceManager.
726 void dec_total_from_size_metrics();
728 // Adjust the initial chunk size to match one of the fixed chunk list sizes,
729 // or return the unadjusted size if the requested size is humongous.
730 static size_t adjust_initial_chunk_size(size_t requested, bool is_class_space);
731 size_t adjust_initial_chunk_size(size_t requested) const;
733 // Get the initial chunks size for this metaspace type.
734 size_t get_initial_chunk_size(Metaspace::MetaspaceType type) const;
736 size_t sum_capacity_in_chunks_in_use() const;
737 size_t sum_used_in_chunks_in_use() const;
738 size_t sum_free_in_chunks_in_use() const;
739 size_t sum_waste_in_chunks_in_use() const;
740 size_t sum_waste_in_chunks_in_use(ChunkIndex index ) const;
742 size_t sum_count_in_chunks_in_use();
743 size_t sum_count_in_chunks_in_use(ChunkIndex i);
745 Metachunk* get_new_chunk(size_t chunk_word_size);
747 // Block allocation and deallocation.
748 // Allocates a block from the current chunk
749 MetaWord* allocate(size_t word_size);
751 // Helper for allocations
752 MetaWord* allocate_work(size_t word_size);
754 // Returns a block to the per manager freelist
755 void deallocate(MetaWord* p, size_t word_size);
757 // Based on the allocation size and a minimum chunk size,
758 // returned chunk size (for expanding space for chunk allocation).
759 size_t calc_chunk_size(size_t allocation_word_size);
761 // Called when an allocation from the current chunk fails.
762 // Gets a new chunk (may require getting a new virtual space),
763 // and allocates from that chunk.
764 MetaWord* grow_and_allocate(size_t word_size);
766 // Notify memory usage to MemoryService.
767 void track_metaspace_memory_usage();
769 // debugging support.
771 void dump(outputStream* const out) const;
772 void print_on(outputStream* st) const;
773 void locked_print_chunks_in_use_on(outputStream* st) const;
775 void verify();
776 void verify_chunk_size(Metachunk* chunk);
777 NOT_PRODUCT(void mangle_freed_chunks();)
778 #ifdef ASSERT
779 void verify_allocated_blocks_words();
780 #endif
782 size_t get_raw_word_size(size_t word_size) {
783 size_t byte_size = word_size * BytesPerWord;
785 size_t raw_bytes_size = MAX2(byte_size, sizeof(Metablock));
786 raw_bytes_size = align_size_up(raw_bytes_size, Metachunk::object_alignment());
788 size_t raw_word_size = raw_bytes_size / BytesPerWord;
789 assert(raw_word_size * BytesPerWord == raw_bytes_size, "Size problem");
791 return raw_word_size;
792 }
793 };
795 uint const SpaceManager::_small_chunk_limit = 4;
797 const char* SpaceManager::_expand_lock_name =
798 "SpaceManager chunk allocation lock";
799 const int SpaceManager::_expand_lock_rank = Monitor::leaf - 1;
800 Mutex* const SpaceManager::_expand_lock =
801 new Mutex(SpaceManager::_expand_lock_rank,
802 SpaceManager::_expand_lock_name,
803 Mutex::_allow_vm_block_flag);
805 void VirtualSpaceNode::inc_container_count() {
806 assert_lock_strong(SpaceManager::expand_lock());
807 _container_count++;
808 assert(_container_count == container_count_slow(),
809 err_msg("Inconsistency in countainer_count _container_count " SIZE_FORMAT
810 " container_count_slow() " SIZE_FORMAT,
811 _container_count, container_count_slow()));
812 }
814 void VirtualSpaceNode::dec_container_count() {
815 assert_lock_strong(SpaceManager::expand_lock());
816 _container_count--;
817 }
819 #ifdef ASSERT
820 void VirtualSpaceNode::verify_container_count() {
821 assert(_container_count == container_count_slow(),
822 err_msg("Inconsistency in countainer_count _container_count " SIZE_FORMAT
823 " container_count_slow() " SIZE_FORMAT, _container_count, container_count_slow()));
824 }
825 #endif
827 // BlockFreelist methods
829 BlockFreelist::BlockFreelist() : _dictionary(NULL) {}
831 BlockFreelist::~BlockFreelist() {
832 if (_dictionary != NULL) {
833 if (Verbose && TraceMetadataChunkAllocation) {
834 _dictionary->print_free_lists(gclog_or_tty);
835 }
836 delete _dictionary;
837 }
838 }
840 void BlockFreelist::return_block(MetaWord* p, size_t word_size) {
841 Metablock* free_chunk = ::new (p) Metablock(word_size);
842 if (dictionary() == NULL) {
843 _dictionary = new BlockTreeDictionary();
844 }
845 dictionary()->return_chunk(free_chunk);
846 }
848 MetaWord* BlockFreelist::get_block(size_t word_size) {
849 if (dictionary() == NULL) {
850 return NULL;
851 }
853 if (word_size < TreeChunk<Metablock, FreeList<Metablock> >::min_size()) {
854 // Dark matter. Too small for dictionary.
855 return NULL;
856 }
858 Metablock* free_block =
859 dictionary()->get_chunk(word_size, FreeBlockDictionary<Metablock>::atLeast);
860 if (free_block == NULL) {
861 return NULL;
862 }
864 const size_t block_size = free_block->size();
865 if (block_size > WasteMultiplier * word_size) {
866 return_block((MetaWord*)free_block, block_size);
867 return NULL;
868 }
870 MetaWord* new_block = (MetaWord*)free_block;
871 assert(block_size >= word_size, "Incorrect size of block from freelist");
872 const size_t unused = block_size - word_size;
873 if (unused >= TreeChunk<Metablock, FreeList<Metablock> >::min_size()) {
874 return_block(new_block + word_size, unused);
875 }
877 return new_block;
878 }
880 void BlockFreelist::print_on(outputStream* st) const {
881 if (dictionary() == NULL) {
882 return;
883 }
884 dictionary()->print_free_lists(st);
885 }
887 // VirtualSpaceNode methods
889 VirtualSpaceNode::~VirtualSpaceNode() {
890 _rs.release();
891 #ifdef ASSERT
892 size_t word_size = sizeof(*this) / BytesPerWord;
893 Copy::fill_to_words((HeapWord*) this, word_size, 0xf1f1f1f1);
894 #endif
895 }
897 size_t VirtualSpaceNode::used_words_in_vs() const {
898 return pointer_delta(top(), bottom(), sizeof(MetaWord));
899 }
901 // Space committed in the VirtualSpace
902 size_t VirtualSpaceNode::capacity_words_in_vs() const {
903 return pointer_delta(end(), bottom(), sizeof(MetaWord));
904 }
906 size_t VirtualSpaceNode::free_words_in_vs() const {
907 return pointer_delta(end(), top(), sizeof(MetaWord));
908 }
910 // Allocates the chunk from the virtual space only.
911 // This interface is also used internally for debugging. Not all
912 // chunks removed here are necessarily used for allocation.
913 Metachunk* VirtualSpaceNode::take_from_committed(size_t chunk_word_size) {
914 // Bottom of the new chunk
915 MetaWord* chunk_limit = top();
916 assert(chunk_limit != NULL, "Not safe to call this method");
918 // The virtual spaces are always expanded by the
919 // commit granularity to enforce the following condition.
920 // Without this the is_available check will not work correctly.
921 assert(_virtual_space.committed_size() == _virtual_space.actual_committed_size(),
922 "The committed memory doesn't match the expanded memory.");
924 if (!is_available(chunk_word_size)) {
925 if (TraceMetadataChunkAllocation) {
926 gclog_or_tty->print("VirtualSpaceNode::take_from_committed() not available %d words ", chunk_word_size);
927 // Dump some information about the virtual space that is nearly full
928 print_on(gclog_or_tty);
929 }
930 return NULL;
931 }
933 // Take the space (bump top on the current virtual space).
934 inc_top(chunk_word_size);
936 // Initialize the chunk
937 Metachunk* result = ::new (chunk_limit) Metachunk(chunk_word_size, this);
938 return result;
939 }
942 // Expand the virtual space (commit more of the reserved space)
943 bool VirtualSpaceNode::expand_by(size_t min_words, size_t preferred_words) {
944 size_t min_bytes = min_words * BytesPerWord;
945 size_t preferred_bytes = preferred_words * BytesPerWord;
947 size_t uncommitted = virtual_space()->reserved_size() - virtual_space()->actual_committed_size();
949 if (uncommitted < min_bytes) {
950 return false;
951 }
953 size_t commit = MIN2(preferred_bytes, uncommitted);
954 bool result = virtual_space()->expand_by(commit, false);
956 assert(result, "Failed to commit memory");
958 return result;
959 }
961 Metachunk* VirtualSpaceNode::get_chunk_vs(size_t chunk_word_size) {
962 assert_lock_strong(SpaceManager::expand_lock());
963 Metachunk* result = take_from_committed(chunk_word_size);
964 if (result != NULL) {
965 inc_container_count();
966 }
967 return result;
968 }
970 bool VirtualSpaceNode::initialize() {
972 if (!_rs.is_reserved()) {
973 return false;
974 }
976 // These are necessary restriction to make sure that the virtual space always
977 // grows in steps of Metaspace::commit_alignment(). If both base and size are
978 // aligned only the middle alignment of the VirtualSpace is used.
979 assert_is_ptr_aligned(_rs.base(), Metaspace::commit_alignment());
980 assert_is_size_aligned(_rs.size(), Metaspace::commit_alignment());
982 // ReservedSpaces marked as special will have the entire memory
983 // pre-committed. Setting a committed size will make sure that
984 // committed_size and actual_committed_size agrees.
985 size_t pre_committed_size = _rs.special() ? _rs.size() : 0;
987 bool result = virtual_space()->initialize_with_granularity(_rs, pre_committed_size,
988 Metaspace::commit_alignment());
989 if (result) {
990 assert(virtual_space()->committed_size() == virtual_space()->actual_committed_size(),
991 "Checking that the pre-committed memory was registered by the VirtualSpace");
993 set_top((MetaWord*)virtual_space()->low());
994 set_reserved(MemRegion((HeapWord*)_rs.base(),
995 (HeapWord*)(_rs.base() + _rs.size())));
997 assert(reserved()->start() == (HeapWord*) _rs.base(),
998 err_msg("Reserved start was not set properly " PTR_FORMAT
999 " != " PTR_FORMAT, reserved()->start(), _rs.base()));
1000 assert(reserved()->word_size() == _rs.size() / BytesPerWord,
1001 err_msg("Reserved size was not set properly " SIZE_FORMAT
1002 " != " SIZE_FORMAT, reserved()->word_size(),
1003 _rs.size() / BytesPerWord));
1004 }
1006 return result;
1007 }
1009 void VirtualSpaceNode::print_on(outputStream* st) const {
1010 size_t used = used_words_in_vs();
1011 size_t capacity = capacity_words_in_vs();
1012 VirtualSpace* vs = virtual_space();
1013 st->print_cr(" space @ " PTR_FORMAT " " SIZE_FORMAT "K, %3d%% used "
1014 "[" PTR_FORMAT ", " PTR_FORMAT ", "
1015 PTR_FORMAT ", " PTR_FORMAT ")",
1016 vs, capacity / K,
1017 capacity == 0 ? 0 : used * 100 / capacity,
1018 bottom(), top(), end(),
1019 vs->high_boundary());
1020 }
1022 #ifdef ASSERT
1023 void VirtualSpaceNode::mangle() {
1024 size_t word_size = capacity_words_in_vs();
1025 Copy::fill_to_words((HeapWord*) low(), word_size, 0xf1f1f1f1);
1026 }
1027 #endif // ASSERT
1029 // VirtualSpaceList methods
1030 // Space allocated from the VirtualSpace
1032 VirtualSpaceList::~VirtualSpaceList() {
1033 VirtualSpaceListIterator iter(virtual_space_list());
1034 while (iter.repeat()) {
1035 VirtualSpaceNode* vsl = iter.get_next();
1036 delete vsl;
1037 }
1038 }
1040 void VirtualSpaceList::inc_reserved_words(size_t v) {
1041 assert_lock_strong(SpaceManager::expand_lock());
1042 _reserved_words = _reserved_words + v;
1043 }
1044 void VirtualSpaceList::dec_reserved_words(size_t v) {
1045 assert_lock_strong(SpaceManager::expand_lock());
1046 _reserved_words = _reserved_words - v;
1047 }
1049 #define assert_committed_below_limit() \
1050 assert(MetaspaceAux::committed_bytes() <= MaxMetaspaceSize, \
1051 err_msg("Too much committed memory. Committed: " SIZE_FORMAT \
1052 " limit (MaxMetaspaceSize): " SIZE_FORMAT, \
1053 MetaspaceAux::committed_bytes(), MaxMetaspaceSize));
1055 void VirtualSpaceList::inc_committed_words(size_t v) {
1056 assert_lock_strong(SpaceManager::expand_lock());
1057 _committed_words = _committed_words + v;
1059 assert_committed_below_limit();
1060 }
1061 void VirtualSpaceList::dec_committed_words(size_t v) {
1062 assert_lock_strong(SpaceManager::expand_lock());
1063 _committed_words = _committed_words - v;
1065 assert_committed_below_limit();
1066 }
1068 void VirtualSpaceList::inc_virtual_space_count() {
1069 assert_lock_strong(SpaceManager::expand_lock());
1070 _virtual_space_count++;
1071 }
1072 void VirtualSpaceList::dec_virtual_space_count() {
1073 assert_lock_strong(SpaceManager::expand_lock());
1074 _virtual_space_count--;
1075 }
1077 void ChunkManager::remove_chunk(Metachunk* chunk) {
1078 size_t word_size = chunk->word_size();
1079 ChunkIndex index = list_index(word_size);
1080 if (index != HumongousIndex) {
1081 free_chunks(index)->remove_chunk(chunk);
1082 } else {
1083 humongous_dictionary()->remove_chunk(chunk);
1084 }
1086 // Chunk is being removed from the chunks free list.
1087 dec_free_chunks_total(chunk->word_size());
1088 }
1090 // Walk the list of VirtualSpaceNodes and delete
1091 // nodes with a 0 container_count. Remove Metachunks in
1092 // the node from their respective freelists.
1093 void VirtualSpaceList::purge(ChunkManager* chunk_manager) {
1094 assert(SafepointSynchronize::is_at_safepoint(), "must be called at safepoint for contains to work");
1095 assert_lock_strong(SpaceManager::expand_lock());
1096 // Don't use a VirtualSpaceListIterator because this
1097 // list is being changed and a straightforward use of an iterator is not safe.
1098 VirtualSpaceNode* purged_vsl = NULL;
1099 VirtualSpaceNode* prev_vsl = virtual_space_list();
1100 VirtualSpaceNode* next_vsl = prev_vsl;
1101 while (next_vsl != NULL) {
1102 VirtualSpaceNode* vsl = next_vsl;
1103 next_vsl = vsl->next();
1104 // Don't free the current virtual space since it will likely
1105 // be needed soon.
1106 if (vsl->container_count() == 0 && vsl != current_virtual_space()) {
1107 // Unlink it from the list
1108 if (prev_vsl == vsl) {
1109 // This is the case of the current node being the first node.
1110 assert(vsl == virtual_space_list(), "Expected to be the first node");
1111 set_virtual_space_list(vsl->next());
1112 } else {
1113 prev_vsl->set_next(vsl->next());
1114 }
1116 vsl->purge(chunk_manager);
1117 dec_reserved_words(vsl->reserved_words());
1118 dec_committed_words(vsl->committed_words());
1119 dec_virtual_space_count();
1120 purged_vsl = vsl;
1121 delete vsl;
1122 } else {
1123 prev_vsl = vsl;
1124 }
1125 }
1126 #ifdef ASSERT
1127 if (purged_vsl != NULL) {
1128 // List should be stable enough to use an iterator here.
1129 VirtualSpaceListIterator iter(virtual_space_list());
1130 while (iter.repeat()) {
1131 VirtualSpaceNode* vsl = iter.get_next();
1132 assert(vsl != purged_vsl, "Purge of vsl failed");
1133 }
1134 }
1135 #endif
1136 }
1139 // This function looks at the mmap regions in the metaspace without locking.
1140 // The chunks are added with store ordering and not deleted except for at
1141 // unloading time during a safepoint.
1142 bool VirtualSpaceList::contains(const void* ptr) {
1143 // List should be stable enough to use an iterator here because removing virtual
1144 // space nodes is only allowed at a safepoint.
1145 VirtualSpaceListIterator iter(virtual_space_list());
1146 while (iter.repeat()) {
1147 VirtualSpaceNode* vsn = iter.get_next();
1148 if (vsn->contains(ptr)) {
1149 return true;
1150 }
1151 }
1152 return false;
1153 }
1155 void VirtualSpaceList::retire_current_virtual_space() {
1156 assert_lock_strong(SpaceManager::expand_lock());
1158 VirtualSpaceNode* vsn = current_virtual_space();
1160 ChunkManager* cm = is_class() ? Metaspace::chunk_manager_class() :
1161 Metaspace::chunk_manager_metadata();
1163 vsn->retire(cm);
1164 }
1166 void VirtualSpaceNode::retire(ChunkManager* chunk_manager) {
1167 for (int i = (int)MediumIndex; i >= (int)ZeroIndex; --i) {
1168 ChunkIndex index = (ChunkIndex)i;
1169 size_t chunk_size = chunk_manager->free_chunks(index)->size();
1171 while (free_words_in_vs() >= chunk_size) {
1172 DEBUG_ONLY(verify_container_count();)
1173 Metachunk* chunk = get_chunk_vs(chunk_size);
1174 assert(chunk != NULL, "allocation should have been successful");
1176 chunk_manager->return_chunks(index, chunk);
1177 chunk_manager->inc_free_chunks_total(chunk_size);
1178 DEBUG_ONLY(verify_container_count();)
1179 }
1180 }
1181 assert(free_words_in_vs() == 0, "should be empty now");
1182 }
1184 VirtualSpaceList::VirtualSpaceList(size_t word_size) :
1185 _is_class(false),
1186 _virtual_space_list(NULL),
1187 _current_virtual_space(NULL),
1188 _reserved_words(0),
1189 _committed_words(0),
1190 _virtual_space_count(0) {
1191 MutexLockerEx cl(SpaceManager::expand_lock(),
1192 Mutex::_no_safepoint_check_flag);
1193 create_new_virtual_space(word_size);
1194 }
1196 VirtualSpaceList::VirtualSpaceList(ReservedSpace rs) :
1197 _is_class(true),
1198 _virtual_space_list(NULL),
1199 _current_virtual_space(NULL),
1200 _reserved_words(0),
1201 _committed_words(0),
1202 _virtual_space_count(0) {
1203 MutexLockerEx cl(SpaceManager::expand_lock(),
1204 Mutex::_no_safepoint_check_flag);
1205 VirtualSpaceNode* class_entry = new VirtualSpaceNode(rs);
1206 bool succeeded = class_entry->initialize();
1207 if (succeeded) {
1208 link_vs(class_entry);
1209 }
1210 }
1212 size_t VirtualSpaceList::free_bytes() {
1213 return current_virtual_space()->free_words_in_vs() * BytesPerWord;
1214 }
1216 // Allocate another meta virtual space and add it to the list.
1217 bool VirtualSpaceList::create_new_virtual_space(size_t vs_word_size) {
1218 assert_lock_strong(SpaceManager::expand_lock());
1220 if (is_class()) {
1221 assert(false, "We currently don't support more than one VirtualSpace for"
1222 " the compressed class space. The initialization of the"
1223 " CCS uses another code path and should not hit this path.");
1224 return false;
1225 }
1227 if (vs_word_size == 0) {
1228 assert(false, "vs_word_size should always be at least _reserve_alignment large.");
1229 return false;
1230 }
1232 // Reserve the space
1233 size_t vs_byte_size = vs_word_size * BytesPerWord;
1234 assert_is_size_aligned(vs_byte_size, Metaspace::reserve_alignment());
1236 // Allocate the meta virtual space and initialize it.
1237 VirtualSpaceNode* new_entry = new VirtualSpaceNode(vs_byte_size);
1238 if (!new_entry->initialize()) {
1239 delete new_entry;
1240 return false;
1241 } else {
1242 assert(new_entry->reserved_words() == vs_word_size,
1243 "Reserved memory size differs from requested memory size");
1244 // ensure lock-free iteration sees fully initialized node
1245 OrderAccess::storestore();
1246 link_vs(new_entry);
1247 return true;
1248 }
1249 }
1251 void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry) {
1252 if (virtual_space_list() == NULL) {
1253 set_virtual_space_list(new_entry);
1254 } else {
1255 current_virtual_space()->set_next(new_entry);
1256 }
1257 set_current_virtual_space(new_entry);
1258 inc_reserved_words(new_entry->reserved_words());
1259 inc_committed_words(new_entry->committed_words());
1260 inc_virtual_space_count();
1261 #ifdef ASSERT
1262 new_entry->mangle();
1263 #endif
1264 if (TraceMetavirtualspaceAllocation && Verbose) {
1265 VirtualSpaceNode* vsl = current_virtual_space();
1266 vsl->print_on(gclog_or_tty);
1267 }
1268 }
1270 bool VirtualSpaceList::expand_node_by(VirtualSpaceNode* node,
1271 size_t min_words,
1272 size_t preferred_words) {
1273 size_t before = node->committed_words();
1275 bool result = node->expand_by(min_words, preferred_words);
1277 size_t after = node->committed_words();
1279 // after and before can be the same if the memory was pre-committed.
1280 assert(after >= before, "Inconsistency");
1281 inc_committed_words(after - before);
1283 return result;
1284 }
1286 bool VirtualSpaceList::expand_by(size_t min_words, size_t preferred_words) {
1287 assert_is_size_aligned(min_words, Metaspace::commit_alignment_words());
1288 assert_is_size_aligned(preferred_words, Metaspace::commit_alignment_words());
1289 assert(min_words <= preferred_words, "Invalid arguments");
1291 if (!MetaspaceGC::can_expand(min_words, this->is_class())) {
1292 return false;
1293 }
1295 size_t allowed_expansion_words = MetaspaceGC::allowed_expansion();
1296 if (allowed_expansion_words < min_words) {
1297 return false;
1298 }
1300 size_t max_expansion_words = MIN2(preferred_words, allowed_expansion_words);
1302 // Commit more memory from the the current virtual space.
1303 bool vs_expanded = expand_node_by(current_virtual_space(),
1304 min_words,
1305 max_expansion_words);
1306 if (vs_expanded) {
1307 return true;
1308 }
1309 retire_current_virtual_space();
1311 // Get another virtual space.
1312 size_t grow_vs_words = MAX2((size_t)VirtualSpaceSize, preferred_words);
1313 grow_vs_words = align_size_up(grow_vs_words, Metaspace::reserve_alignment_words());
1315 if (create_new_virtual_space(grow_vs_words)) {
1316 if (current_virtual_space()->is_pre_committed()) {
1317 // The memory was pre-committed, so we are done here.
1318 assert(min_words <= current_virtual_space()->committed_words(),
1319 "The new VirtualSpace was pre-committed, so it"
1320 "should be large enough to fit the alloc request.");
1321 return true;
1322 }
1324 return expand_node_by(current_virtual_space(),
1325 min_words,
1326 max_expansion_words);
1327 }
1329 return false;
1330 }
1332 Metachunk* VirtualSpaceList::get_new_chunk(size_t chunk_word_size, size_t suggested_commit_granularity) {
1334 // Allocate a chunk out of the current virtual space.
1335 Metachunk* next = current_virtual_space()->get_chunk_vs(chunk_word_size);
1337 if (next != NULL) {
1338 return next;
1339 }
1341 // The expand amount is currently only determined by the requested sizes
1342 // and not how much committed memory is left in the current virtual space.
1344 size_t min_word_size = align_size_up(chunk_word_size, Metaspace::commit_alignment_words());
1345 size_t preferred_word_size = align_size_up(suggested_commit_granularity, Metaspace::commit_alignment_words());
1346 if (min_word_size >= preferred_word_size) {
1347 // Can happen when humongous chunks are allocated.
1348 preferred_word_size = min_word_size;
1349 }
1351 bool expanded = expand_by(min_word_size, preferred_word_size);
1352 if (expanded) {
1353 next = current_virtual_space()->get_chunk_vs(chunk_word_size);
1354 assert(next != NULL, "The allocation was expected to succeed after the expansion");
1355 }
1357 return next;
1358 }
1360 void VirtualSpaceList::print_on(outputStream* st) const {
1361 if (TraceMetadataChunkAllocation && Verbose) {
1362 VirtualSpaceListIterator iter(virtual_space_list());
1363 while (iter.repeat()) {
1364 VirtualSpaceNode* node = iter.get_next();
1365 node->print_on(st);
1366 }
1367 }
1368 }
1370 // MetaspaceGC methods
1372 // VM_CollectForMetadataAllocation is the vm operation used to GC.
1373 // Within the VM operation after the GC the attempt to allocate the metadata
1374 // should succeed. If the GC did not free enough space for the metaspace
1375 // allocation, the HWM is increased so that another virtualspace will be
1376 // allocated for the metadata. With perm gen the increase in the perm
1377 // gen had bounds, MinMetaspaceExpansion and MaxMetaspaceExpansion. The
1378 // metaspace policy uses those as the small and large steps for the HWM.
1379 //
1380 // After the GC the compute_new_size() for MetaspaceGC is called to
1381 // resize the capacity of the metaspaces. The current implementation
1382 // is based on the flags MinMetaspaceFreeRatio and MaxMetaspaceFreeRatio used
1383 // to resize the Java heap by some GC's. New flags can be implemented
1384 // if really needed. MinMetaspaceFreeRatio is used to calculate how much
1385 // free space is desirable in the metaspace capacity to decide how much
1386 // to increase the HWM. MaxMetaspaceFreeRatio is used to decide how much
1387 // free space is desirable in the metaspace capacity before decreasing
1388 // the HWM.
1390 // Calculate the amount to increase the high water mark (HWM).
1391 // Increase by a minimum amount (MinMetaspaceExpansion) so that
1392 // another expansion is not requested too soon. If that is not
1393 // enough to satisfy the allocation, increase by MaxMetaspaceExpansion.
1394 // If that is still not enough, expand by the size of the allocation
1395 // plus some.
1396 size_t MetaspaceGC::delta_capacity_until_GC(size_t bytes) {
1397 size_t min_delta = MinMetaspaceExpansion;
1398 size_t max_delta = MaxMetaspaceExpansion;
1399 size_t delta = align_size_up(bytes, Metaspace::commit_alignment());
1401 if (delta <= min_delta) {
1402 delta = min_delta;
1403 } else if (delta <= max_delta) {
1404 // Don't want to hit the high water mark on the next
1405 // allocation so make the delta greater than just enough
1406 // for this allocation.
1407 delta = max_delta;
1408 } else {
1409 // This allocation is large but the next ones are probably not
1410 // so increase by the minimum.
1411 delta = delta + min_delta;
1412 }
1414 assert_is_size_aligned(delta, Metaspace::commit_alignment());
1416 return delta;
1417 }
1419 size_t MetaspaceGC::capacity_until_GC() {
1420 size_t value = (size_t)OrderAccess::load_ptr_acquire(&_capacity_until_GC);
1421 assert(value >= MetaspaceSize, "Not initialied properly?");
1422 return value;
1423 }
1425 // Try to increase the _capacity_until_GC limit counter by v bytes.
1426 // Returns true if it succeeded. It may fail if either another thread
1427 // concurrently increased the limit or the new limit would be larger
1428 // than MaxMetaspaceSize.
1429 // On success, optionally returns new and old metaspace capacity in
1430 // new_cap_until_GC and old_cap_until_GC respectively.
1431 // On error, optionally sets can_retry to indicate whether if there is
1432 // actually enough space remaining to satisfy the request.
1433 bool MetaspaceGC::inc_capacity_until_GC(size_t v, size_t* new_cap_until_GC, size_t* old_cap_until_GC, bool* can_retry) {
1434 assert_is_size_aligned(v, Metaspace::commit_alignment());
1436 size_t capacity_until_GC = (size_t) _capacity_until_GC;
1437 size_t new_value = capacity_until_GC + v;
1439 if (new_value < capacity_until_GC) {
1440 // The addition wrapped around, set new_value to aligned max value.
1441 new_value = align_size_down(max_uintx, Metaspace::commit_alignment());
1442 }
1444 if (new_value > MaxMetaspaceSize) {
1445 if (can_retry != NULL) {
1446 *can_retry = false;
1447 }
1448 return false;
1449 }
1451 if (can_retry != NULL) {
1452 *can_retry = true;
1453 }
1455 intptr_t expected = (intptr_t) capacity_until_GC;
1456 intptr_t actual = Atomic::cmpxchg_ptr((intptr_t) new_value, &_capacity_until_GC, expected);
1458 if (expected != actual) {
1459 return false;
1460 }
1462 if (new_cap_until_GC != NULL) {
1463 *new_cap_until_GC = new_value;
1464 }
1465 if (old_cap_until_GC != NULL) {
1466 *old_cap_until_GC = capacity_until_GC;
1467 }
1468 return true;
1469 }
1471 size_t MetaspaceGC::dec_capacity_until_GC(size_t v) {
1472 assert_is_size_aligned(v, Metaspace::commit_alignment());
1474 return (size_t)Atomic::add_ptr(-(intptr_t)v, &_capacity_until_GC);
1475 }
1477 void MetaspaceGC::initialize() {
1478 // Set the high-water mark to MaxMetapaceSize during VM initializaton since
1479 // we can't do a GC during initialization.
1480 _capacity_until_GC = MaxMetaspaceSize;
1481 }
1483 void MetaspaceGC::post_initialize() {
1484 // Reset the high-water mark once the VM initialization is done.
1485 _capacity_until_GC = MAX2(MetaspaceAux::committed_bytes(), MetaspaceSize);
1486 }
1488 bool MetaspaceGC::can_expand(size_t word_size, bool is_class) {
1489 // Check if the compressed class space is full.
1490 if (is_class && Metaspace::using_class_space()) {
1491 size_t class_committed = MetaspaceAux::committed_bytes(Metaspace::ClassType);
1492 if (class_committed + word_size * BytesPerWord > CompressedClassSpaceSize) {
1493 return false;
1494 }
1495 }
1497 // Check if the user has imposed a limit on the metaspace memory.
1498 size_t committed_bytes = MetaspaceAux::committed_bytes();
1499 if (committed_bytes + word_size * BytesPerWord > MaxMetaspaceSize) {
1500 return false;
1501 }
1503 return true;
1504 }
1506 size_t MetaspaceGC::allowed_expansion() {
1507 size_t committed_bytes = MetaspaceAux::committed_bytes();
1508 size_t capacity_until_gc = capacity_until_GC();
1510 assert(capacity_until_gc >= committed_bytes,
1511 err_msg("capacity_until_gc: " SIZE_FORMAT " < committed_bytes: " SIZE_FORMAT,
1512 capacity_until_gc, committed_bytes));
1514 size_t left_until_max = MaxMetaspaceSize - committed_bytes;
1515 size_t left_until_GC = capacity_until_gc - committed_bytes;
1516 size_t left_to_commit = MIN2(left_until_GC, left_until_max);
1518 return left_to_commit / BytesPerWord;
1519 }
1521 void MetaspaceGC::compute_new_size() {
1522 assert(_shrink_factor <= 100, "invalid shrink factor");
1523 uint current_shrink_factor = _shrink_factor;
1524 _shrink_factor = 0;
1526 // Using committed_bytes() for used_after_gc is an overestimation, since the
1527 // chunk free lists are included in committed_bytes() and the memory in an
1528 // un-fragmented chunk free list is available for future allocations.
1529 // However, if the chunk free lists becomes fragmented, then the memory may
1530 // not be available for future allocations and the memory is therefore "in use".
1531 // Including the chunk free lists in the definition of "in use" is therefore
1532 // necessary. Not including the chunk free lists can cause capacity_until_GC to
1533 // shrink below committed_bytes() and this has caused serious bugs in the past.
1534 const size_t used_after_gc = MetaspaceAux::committed_bytes();
1535 const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC();
1537 const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0;
1538 const double maximum_used_percentage = 1.0 - minimum_free_percentage;
1540 const double min_tmp = used_after_gc / maximum_used_percentage;
1541 size_t minimum_desired_capacity =
1542 (size_t)MIN2(min_tmp, double(MaxMetaspaceSize));
1543 // Don't shrink less than the initial generation size
1544 minimum_desired_capacity = MAX2(minimum_desired_capacity,
1545 MetaspaceSize);
1547 if (PrintGCDetails && Verbose) {
1548 gclog_or_tty->print_cr("\nMetaspaceGC::compute_new_size: ");
1549 gclog_or_tty->print_cr(" "
1550 " minimum_free_percentage: %6.2f"
1551 " maximum_used_percentage: %6.2f",
1552 minimum_free_percentage,
1553 maximum_used_percentage);
1554 gclog_or_tty->print_cr(" "
1555 " used_after_gc : %6.1fKB",
1556 used_after_gc / (double) K);
1557 }
1560 size_t shrink_bytes = 0;
1561 if (capacity_until_GC < minimum_desired_capacity) {
1562 // If we have less capacity below the metaspace HWM, then
1563 // increment the HWM.
1564 size_t expand_bytes = minimum_desired_capacity - capacity_until_GC;
1565 expand_bytes = align_size_up(expand_bytes, Metaspace::commit_alignment());
1566 // Don't expand unless it's significant
1567 if (expand_bytes >= MinMetaspaceExpansion) {
1568 size_t new_capacity_until_GC = 0;
1569 bool succeeded = MetaspaceGC::inc_capacity_until_GC(expand_bytes, &new_capacity_until_GC);
1570 assert(succeeded, "Should always succesfully increment HWM when at safepoint");
1572 Metaspace::tracer()->report_gc_threshold(capacity_until_GC,
1573 new_capacity_until_GC,
1574 MetaspaceGCThresholdUpdater::ComputeNewSize);
1575 if (PrintGCDetails && Verbose) {
1576 gclog_or_tty->print_cr(" expanding:"
1577 " minimum_desired_capacity: %6.1fKB"
1578 " expand_bytes: %6.1fKB"
1579 " MinMetaspaceExpansion: %6.1fKB"
1580 " new metaspace HWM: %6.1fKB",
1581 minimum_desired_capacity / (double) K,
1582 expand_bytes / (double) K,
1583 MinMetaspaceExpansion / (double) K,
1584 new_capacity_until_GC / (double) K);
1585 }
1586 }
1587 return;
1588 }
1590 // No expansion, now see if we want to shrink
1591 // We would never want to shrink more than this
1592 size_t max_shrink_bytes = capacity_until_GC - minimum_desired_capacity;
1593 assert(max_shrink_bytes >= 0, err_msg("max_shrink_bytes " SIZE_FORMAT,
1594 max_shrink_bytes));
1596 // Should shrinking be considered?
1597 if (MaxMetaspaceFreeRatio < 100) {
1598 const double maximum_free_percentage = MaxMetaspaceFreeRatio / 100.0;
1599 const double minimum_used_percentage = 1.0 - maximum_free_percentage;
1600 const double max_tmp = used_after_gc / minimum_used_percentage;
1601 size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(MaxMetaspaceSize));
1602 maximum_desired_capacity = MAX2(maximum_desired_capacity,
1603 MetaspaceSize);
1604 if (PrintGCDetails && Verbose) {
1605 gclog_or_tty->print_cr(" "
1606 " maximum_free_percentage: %6.2f"
1607 " minimum_used_percentage: %6.2f",
1608 maximum_free_percentage,
1609 minimum_used_percentage);
1610 gclog_or_tty->print_cr(" "
1611 " minimum_desired_capacity: %6.1fKB"
1612 " maximum_desired_capacity: %6.1fKB",
1613 minimum_desired_capacity / (double) K,
1614 maximum_desired_capacity / (double) K);
1615 }
1617 assert(minimum_desired_capacity <= maximum_desired_capacity,
1618 "sanity check");
1620 if (capacity_until_GC > maximum_desired_capacity) {
1621 // Capacity too large, compute shrinking size
1622 shrink_bytes = capacity_until_GC - maximum_desired_capacity;
1623 // We don't want shrink all the way back to initSize if people call
1624 // System.gc(), because some programs do that between "phases" and then
1625 // we'd just have to grow the heap up again for the next phase. So we
1626 // damp the shrinking: 0% on the first call, 10% on the second call, 40%
1627 // on the third call, and 100% by the fourth call. But if we recompute
1628 // size without shrinking, it goes back to 0%.
1629 shrink_bytes = shrink_bytes / 100 * current_shrink_factor;
1631 shrink_bytes = align_size_down(shrink_bytes, Metaspace::commit_alignment());
1633 assert(shrink_bytes <= max_shrink_bytes,
1634 err_msg("invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT,
1635 shrink_bytes, max_shrink_bytes));
1636 if (current_shrink_factor == 0) {
1637 _shrink_factor = 10;
1638 } else {
1639 _shrink_factor = MIN2(current_shrink_factor * 4, (uint) 100);
1640 }
1641 if (PrintGCDetails && Verbose) {
1642 gclog_or_tty->print_cr(" "
1643 " shrinking:"
1644 " initSize: %.1fK"
1645 " maximum_desired_capacity: %.1fK",
1646 MetaspaceSize / (double) K,
1647 maximum_desired_capacity / (double) K);
1648 gclog_or_tty->print_cr(" "
1649 " shrink_bytes: %.1fK"
1650 " current_shrink_factor: %d"
1651 " new shrink factor: %d"
1652 " MinMetaspaceExpansion: %.1fK",
1653 shrink_bytes / (double) K,
1654 current_shrink_factor,
1655 _shrink_factor,
1656 MinMetaspaceExpansion / (double) K);
1657 }
1658 }
1659 }
1661 // Don't shrink unless it's significant
1662 if (shrink_bytes >= MinMetaspaceExpansion &&
1663 ((capacity_until_GC - shrink_bytes) >= MetaspaceSize)) {
1664 size_t new_capacity_until_GC = MetaspaceGC::dec_capacity_until_GC(shrink_bytes);
1665 Metaspace::tracer()->report_gc_threshold(capacity_until_GC,
1666 new_capacity_until_GC,
1667 MetaspaceGCThresholdUpdater::ComputeNewSize);
1668 }
1669 }
1671 // Metadebug methods
1673 void Metadebug::init_allocation_fail_alot_count() {
1674 if (MetadataAllocationFailALot) {
1675 _allocation_fail_alot_count =
1676 1+(long)((double)MetadataAllocationFailALotInterval*os::random()/(max_jint+1.0));
1677 }
1678 }
1680 #ifdef ASSERT
1681 bool Metadebug::test_metadata_failure() {
1682 if (MetadataAllocationFailALot &&
1683 Threads::is_vm_complete()) {
1684 if (_allocation_fail_alot_count > 0) {
1685 _allocation_fail_alot_count--;
1686 } else {
1687 if (TraceMetadataChunkAllocation && Verbose) {
1688 gclog_or_tty->print_cr("Metadata allocation failing for "
1689 "MetadataAllocationFailALot");
1690 }
1691 init_allocation_fail_alot_count();
1692 return true;
1693 }
1694 }
1695 return false;
1696 }
1697 #endif
1699 // ChunkManager methods
1701 size_t ChunkManager::free_chunks_total_words() {
1702 return _free_chunks_total;
1703 }
1705 size_t ChunkManager::free_chunks_total_bytes() {
1706 return free_chunks_total_words() * BytesPerWord;
1707 }
1709 size_t ChunkManager::free_chunks_count() {
1710 #ifdef ASSERT
1711 if (!UseConcMarkSweepGC && !SpaceManager::expand_lock()->is_locked()) {
1712 MutexLockerEx cl(SpaceManager::expand_lock(),
1713 Mutex::_no_safepoint_check_flag);
1714 // This lock is only needed in debug because the verification
1715 // of the _free_chunks_totals walks the list of free chunks
1716 slow_locked_verify_free_chunks_count();
1717 }
1718 #endif
1719 return _free_chunks_count;
1720 }
1722 void ChunkManager::locked_verify_free_chunks_total() {
1723 assert_lock_strong(SpaceManager::expand_lock());
1724 assert(sum_free_chunks() == _free_chunks_total,
1725 err_msg("_free_chunks_total " SIZE_FORMAT " is not the"
1726 " same as sum " SIZE_FORMAT, _free_chunks_total,
1727 sum_free_chunks()));
1728 }
1730 void ChunkManager::verify_free_chunks_total() {
1731 MutexLockerEx cl(SpaceManager::expand_lock(),
1732 Mutex::_no_safepoint_check_flag);
1733 locked_verify_free_chunks_total();
1734 }
1736 void ChunkManager::locked_verify_free_chunks_count() {
1737 assert_lock_strong(SpaceManager::expand_lock());
1738 assert(sum_free_chunks_count() == _free_chunks_count,
1739 err_msg("_free_chunks_count " SIZE_FORMAT " is not the"
1740 " same as sum " SIZE_FORMAT, _free_chunks_count,
1741 sum_free_chunks_count()));
1742 }
1744 void ChunkManager::verify_free_chunks_count() {
1745 #ifdef ASSERT
1746 MutexLockerEx cl(SpaceManager::expand_lock(),
1747 Mutex::_no_safepoint_check_flag);
1748 locked_verify_free_chunks_count();
1749 #endif
1750 }
1752 void ChunkManager::verify() {
1753 MutexLockerEx cl(SpaceManager::expand_lock(),
1754 Mutex::_no_safepoint_check_flag);
1755 locked_verify();
1756 }
1758 void ChunkManager::locked_verify() {
1759 locked_verify_free_chunks_count();
1760 locked_verify_free_chunks_total();
1761 }
1763 void ChunkManager::locked_print_free_chunks(outputStream* st) {
1764 assert_lock_strong(SpaceManager::expand_lock());
1765 st->print_cr("Free chunk total " SIZE_FORMAT " count " SIZE_FORMAT,
1766 _free_chunks_total, _free_chunks_count);
1767 }
1769 void ChunkManager::locked_print_sum_free_chunks(outputStream* st) {
1770 assert_lock_strong(SpaceManager::expand_lock());
1771 st->print_cr("Sum free chunk total " SIZE_FORMAT " count " SIZE_FORMAT,
1772 sum_free_chunks(), sum_free_chunks_count());
1773 }
1775 ChunkList* ChunkManager::free_chunks(ChunkIndex index) {
1776 assert(index == SpecializedIndex || index == SmallIndex || index == MediumIndex,
1777 err_msg("Bad index: %d", (int)index));
1779 return &_free_chunks[index];
1780 }
1782 // These methods that sum the free chunk lists are used in printing
1783 // methods that are used in product builds.
1784 size_t ChunkManager::sum_free_chunks() {
1785 assert_lock_strong(SpaceManager::expand_lock());
1786 size_t result = 0;
1787 for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
1788 ChunkList* list = free_chunks(i);
1790 if (list == NULL) {
1791 continue;
1792 }
1794 result = result + list->count() * list->size();
1795 }
1796 result = result + humongous_dictionary()->total_size();
1797 return result;
1798 }
1800 size_t ChunkManager::sum_free_chunks_count() {
1801 assert_lock_strong(SpaceManager::expand_lock());
1802 size_t count = 0;
1803 for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
1804 ChunkList* list = free_chunks(i);
1805 if (list == NULL) {
1806 continue;
1807 }
1808 count = count + list->count();
1809 }
1810 count = count + humongous_dictionary()->total_free_blocks();
1811 return count;
1812 }
1814 ChunkList* ChunkManager::find_free_chunks_list(size_t word_size) {
1815 ChunkIndex index = list_index(word_size);
1816 assert(index < HumongousIndex, "No humongous list");
1817 return free_chunks(index);
1818 }
1820 Metachunk* ChunkManager::free_chunks_get(size_t word_size) {
1821 assert_lock_strong(SpaceManager::expand_lock());
1823 slow_locked_verify();
1825 Metachunk* chunk = NULL;
1826 if (list_index(word_size) != HumongousIndex) {
1827 ChunkList* free_list = find_free_chunks_list(word_size);
1828 assert(free_list != NULL, "Sanity check");
1830 chunk = free_list->head();
1832 if (chunk == NULL) {
1833 return NULL;
1834 }
1836 // Remove the chunk as the head of the list.
1837 free_list->remove_chunk(chunk);
1839 if (TraceMetadataChunkAllocation && Verbose) {
1840 gclog_or_tty->print_cr("ChunkManager::free_chunks_get: free_list "
1841 PTR_FORMAT " head " PTR_FORMAT " size " SIZE_FORMAT,
1842 free_list, chunk, chunk->word_size());
1843 }
1844 } else {
1845 chunk = humongous_dictionary()->get_chunk(
1846 word_size,
1847 FreeBlockDictionary<Metachunk>::atLeast);
1849 if (chunk == NULL) {
1850 return NULL;
1851 }
1853 if (TraceMetadataHumongousAllocation) {
1854 size_t waste = chunk->word_size() - word_size;
1855 gclog_or_tty->print_cr("Free list allocate humongous chunk size "
1856 SIZE_FORMAT " for requested size " SIZE_FORMAT
1857 " waste " SIZE_FORMAT,
1858 chunk->word_size(), word_size, waste);
1859 }
1860 }
1862 // Chunk is being removed from the chunks free list.
1863 dec_free_chunks_total(chunk->word_size());
1865 // Remove it from the links to this freelist
1866 chunk->set_next(NULL);
1867 chunk->set_prev(NULL);
1868 #ifdef ASSERT
1869 // Chunk is no longer on any freelist. Setting to false make container_count_slow()
1870 // work.
1871 chunk->set_is_tagged_free(false);
1872 #endif
1873 chunk->container()->inc_container_count();
1875 slow_locked_verify();
1876 return chunk;
1877 }
1879 Metachunk* ChunkManager::chunk_freelist_allocate(size_t word_size) {
1880 assert_lock_strong(SpaceManager::expand_lock());
1881 slow_locked_verify();
1883 // Take from the beginning of the list
1884 Metachunk* chunk = free_chunks_get(word_size);
1885 if (chunk == NULL) {
1886 return NULL;
1887 }
1889 assert((word_size <= chunk->word_size()) ||
1890 (list_index(chunk->word_size()) == HumongousIndex),
1891 "Non-humongous variable sized chunk");
1892 if (TraceMetadataChunkAllocation) {
1893 size_t list_count;
1894 if (list_index(word_size) < HumongousIndex) {
1895 ChunkList* list = find_free_chunks_list(word_size);
1896 list_count = list->count();
1897 } else {
1898 list_count = humongous_dictionary()->total_count();
1899 }
1900 gclog_or_tty->print("ChunkManager::chunk_freelist_allocate: " PTR_FORMAT " chunk "
1901 PTR_FORMAT " size " SIZE_FORMAT " count " SIZE_FORMAT " ",
1902 this, chunk, chunk->word_size(), list_count);
1903 locked_print_free_chunks(gclog_or_tty);
1904 }
1906 return chunk;
1907 }
1909 void ChunkManager::print_on(outputStream* out) const {
1910 if (PrintFLSStatistics != 0) {
1911 const_cast<ChunkManager *>(this)->humongous_dictionary()->report_statistics();
1912 }
1913 }
1915 // SpaceManager methods
1917 size_t SpaceManager::adjust_initial_chunk_size(size_t requested, bool is_class_space) {
1918 size_t chunk_sizes[] = {
1919 specialized_chunk_size(is_class_space),
1920 small_chunk_size(is_class_space),
1921 medium_chunk_size(is_class_space)
1922 };
1924 // Adjust up to one of the fixed chunk sizes ...
1925 for (size_t i = 0; i < ARRAY_SIZE(chunk_sizes); i++) {
1926 if (requested <= chunk_sizes[i]) {
1927 return chunk_sizes[i];
1928 }
1929 }
1931 // ... or return the size as a humongous chunk.
1932 return requested;
1933 }
1935 size_t SpaceManager::adjust_initial_chunk_size(size_t requested) const {
1936 return adjust_initial_chunk_size(requested, is_class());
1937 }
1939 size_t SpaceManager::get_initial_chunk_size(Metaspace::MetaspaceType type) const {
1940 size_t requested;
1942 if (is_class()) {
1943 switch (type) {
1944 case Metaspace::BootMetaspaceType: requested = Metaspace::first_class_chunk_word_size(); break;
1945 case Metaspace::ROMetaspaceType: requested = ClassSpecializedChunk; break;
1946 case Metaspace::ReadWriteMetaspaceType: requested = ClassSpecializedChunk; break;
1947 case Metaspace::AnonymousMetaspaceType: requested = ClassSpecializedChunk; break;
1948 case Metaspace::ReflectionMetaspaceType: requested = ClassSpecializedChunk; break;
1949 default: requested = ClassSmallChunk; break;
1950 }
1951 } else {
1952 switch (type) {
1953 case Metaspace::BootMetaspaceType: requested = Metaspace::first_chunk_word_size(); break;
1954 case Metaspace::ROMetaspaceType: requested = SharedReadOnlySize / wordSize; break;
1955 case Metaspace::ReadWriteMetaspaceType: requested = SharedReadWriteSize / wordSize; break;
1956 case Metaspace::AnonymousMetaspaceType: requested = SpecializedChunk; break;
1957 case Metaspace::ReflectionMetaspaceType: requested = SpecializedChunk; break;
1958 default: requested = SmallChunk; break;
1959 }
1960 }
1962 // Adjust to one of the fixed chunk sizes (unless humongous)
1963 const size_t adjusted = adjust_initial_chunk_size(requested);
1965 assert(adjusted != 0, err_msg("Incorrect initial chunk size. Requested: "
1966 SIZE_FORMAT " adjusted: " SIZE_FORMAT, requested, adjusted));
1968 return adjusted;
1969 }
1971 size_t SpaceManager::sum_free_in_chunks_in_use() const {
1972 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
1973 size_t free = 0;
1974 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1975 Metachunk* chunk = chunks_in_use(i);
1976 while (chunk != NULL) {
1977 free += chunk->free_word_size();
1978 chunk = chunk->next();
1979 }
1980 }
1981 return free;
1982 }
1984 size_t SpaceManager::sum_waste_in_chunks_in_use() const {
1985 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
1986 size_t result = 0;
1987 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1988 result += sum_waste_in_chunks_in_use(i);
1989 }
1991 return result;
1992 }
1994 size_t SpaceManager::sum_waste_in_chunks_in_use(ChunkIndex index) const {
1995 size_t result = 0;
1996 Metachunk* chunk = chunks_in_use(index);
1997 // Count the free space in all the chunk but not the
1998 // current chunk from which allocations are still being done.
1999 while (chunk != NULL) {
2000 if (chunk != current_chunk()) {
2001 result += chunk->free_word_size();
2002 }
2003 chunk = chunk->next();
2004 }
2005 return result;
2006 }
2008 size_t SpaceManager::sum_capacity_in_chunks_in_use() const {
2009 // For CMS use "allocated_chunks_words()" which does not need the
2010 // Metaspace lock. For the other collectors sum over the
2011 // lists. Use both methods as a check that "allocated_chunks_words()"
2012 // is correct. That is, sum_capacity_in_chunks() is too expensive
2013 // to use in the product and allocated_chunks_words() should be used
2014 // but allow for checking that allocated_chunks_words() returns the same
2015 // value as sum_capacity_in_chunks_in_use() which is the definitive
2016 // answer.
2017 if (UseConcMarkSweepGC) {
2018 return allocated_chunks_words();
2019 } else {
2020 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
2021 size_t sum = 0;
2022 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2023 Metachunk* chunk = chunks_in_use(i);
2024 while (chunk != NULL) {
2025 sum += chunk->word_size();
2026 chunk = chunk->next();
2027 }
2028 }
2029 return sum;
2030 }
2031 }
2033 size_t SpaceManager::sum_count_in_chunks_in_use() {
2034 size_t count = 0;
2035 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2036 count = count + sum_count_in_chunks_in_use(i);
2037 }
2039 return count;
2040 }
2042 size_t SpaceManager::sum_count_in_chunks_in_use(ChunkIndex i) {
2043 size_t count = 0;
2044 Metachunk* chunk = chunks_in_use(i);
2045 while (chunk != NULL) {
2046 count++;
2047 chunk = chunk->next();
2048 }
2049 return count;
2050 }
2053 size_t SpaceManager::sum_used_in_chunks_in_use() const {
2054 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
2055 size_t used = 0;
2056 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2057 Metachunk* chunk = chunks_in_use(i);
2058 while (chunk != NULL) {
2059 used += chunk->used_word_size();
2060 chunk = chunk->next();
2061 }
2062 }
2063 return used;
2064 }
2066 void SpaceManager::locked_print_chunks_in_use_on(outputStream* st) const {
2068 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2069 Metachunk* chunk = chunks_in_use(i);
2070 st->print("SpaceManager: %s " PTR_FORMAT,
2071 chunk_size_name(i), chunk);
2072 if (chunk != NULL) {
2073 st->print_cr(" free " SIZE_FORMAT,
2074 chunk->free_word_size());
2075 } else {
2076 st->cr();
2077 }
2078 }
2080 chunk_manager()->locked_print_free_chunks(st);
2081 chunk_manager()->locked_print_sum_free_chunks(st);
2082 }
2084 size_t SpaceManager::calc_chunk_size(size_t word_size) {
2086 // Decide between a small chunk and a medium chunk. Up to
2087 // _small_chunk_limit small chunks can be allocated but
2088 // once a medium chunk has been allocated, no more small
2089 // chunks will be allocated.
2090 size_t chunk_word_size;
2091 if (chunks_in_use(MediumIndex) == NULL &&
2092 sum_count_in_chunks_in_use(SmallIndex) < _small_chunk_limit) {
2093 chunk_word_size = (size_t) small_chunk_size();
2094 if (word_size + Metachunk::overhead() > small_chunk_size()) {
2095 chunk_word_size = medium_chunk_size();
2096 }
2097 } else {
2098 chunk_word_size = medium_chunk_size();
2099 }
2101 // Might still need a humongous chunk. Enforce
2102 // humongous allocations sizes to be aligned up to
2103 // the smallest chunk size.
2104 size_t if_humongous_sized_chunk =
2105 align_size_up(word_size + Metachunk::overhead(),
2106 smallest_chunk_size());
2107 chunk_word_size =
2108 MAX2((size_t) chunk_word_size, if_humongous_sized_chunk);
2110 assert(!SpaceManager::is_humongous(word_size) ||
2111 chunk_word_size == if_humongous_sized_chunk,
2112 err_msg("Size calculation is wrong, word_size " SIZE_FORMAT
2113 " chunk_word_size " SIZE_FORMAT,
2114 word_size, chunk_word_size));
2115 if (TraceMetadataHumongousAllocation &&
2116 SpaceManager::is_humongous(word_size)) {
2117 gclog_or_tty->print_cr("Metadata humongous allocation:");
2118 gclog_or_tty->print_cr(" word_size " PTR_FORMAT, word_size);
2119 gclog_or_tty->print_cr(" chunk_word_size " PTR_FORMAT,
2120 chunk_word_size);
2121 gclog_or_tty->print_cr(" chunk overhead " PTR_FORMAT,
2122 Metachunk::overhead());
2123 }
2124 return chunk_word_size;
2125 }
2127 void SpaceManager::track_metaspace_memory_usage() {
2128 if (is_init_completed()) {
2129 if (is_class()) {
2130 MemoryService::track_compressed_class_memory_usage();
2131 }
2132 MemoryService::track_metaspace_memory_usage();
2133 }
2134 }
2136 MetaWord* SpaceManager::grow_and_allocate(size_t word_size) {
2137 assert(vs_list()->current_virtual_space() != NULL,
2138 "Should have been set");
2139 assert(current_chunk() == NULL ||
2140 current_chunk()->allocate(word_size) == NULL,
2141 "Don't need to expand");
2142 MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
2144 if (TraceMetadataChunkAllocation && Verbose) {
2145 size_t words_left = 0;
2146 size_t words_used = 0;
2147 if (current_chunk() != NULL) {
2148 words_left = current_chunk()->free_word_size();
2149 words_used = current_chunk()->used_word_size();
2150 }
2151 gclog_or_tty->print_cr("SpaceManager::grow_and_allocate for " SIZE_FORMAT
2152 " words " SIZE_FORMAT " words used " SIZE_FORMAT
2153 " words left",
2154 word_size, words_used, words_left);
2155 }
2157 // Get another chunk out of the virtual space
2158 size_t chunk_word_size = calc_chunk_size(word_size);
2159 Metachunk* next = get_new_chunk(chunk_word_size);
2161 MetaWord* mem = NULL;
2163 // If a chunk was available, add it to the in-use chunk list
2164 // and do an allocation from it.
2165 if (next != NULL) {
2166 // Add to this manager's list of chunks in use.
2167 add_chunk(next, false);
2168 mem = next->allocate(word_size);
2169 }
2171 // Track metaspace memory usage statistic.
2172 track_metaspace_memory_usage();
2174 return mem;
2175 }
2177 void SpaceManager::print_on(outputStream* st) const {
2179 for (ChunkIndex i = ZeroIndex;
2180 i < NumberOfInUseLists ;
2181 i = next_chunk_index(i) ) {
2182 st->print_cr(" chunks_in_use " PTR_FORMAT " chunk size " PTR_FORMAT,
2183 chunks_in_use(i),
2184 chunks_in_use(i) == NULL ? 0 : chunks_in_use(i)->word_size());
2185 }
2186 st->print_cr(" waste: Small " SIZE_FORMAT " Medium " SIZE_FORMAT
2187 " Humongous " SIZE_FORMAT,
2188 sum_waste_in_chunks_in_use(SmallIndex),
2189 sum_waste_in_chunks_in_use(MediumIndex),
2190 sum_waste_in_chunks_in_use(HumongousIndex));
2191 // block free lists
2192 if (block_freelists() != NULL) {
2193 st->print_cr("total in block free lists " SIZE_FORMAT,
2194 block_freelists()->total_size());
2195 }
2196 }
2198 SpaceManager::SpaceManager(Metaspace::MetadataType mdtype,
2199 Mutex* lock) :
2200 _mdtype(mdtype),
2201 _allocated_blocks_words(0),
2202 _allocated_chunks_words(0),
2203 _allocated_chunks_count(0),
2204 _lock(lock)
2205 {
2206 initialize();
2207 }
2209 void SpaceManager::inc_size_metrics(size_t words) {
2210 assert_lock_strong(SpaceManager::expand_lock());
2211 // Total of allocated Metachunks and allocated Metachunks count
2212 // for each SpaceManager
2213 _allocated_chunks_words = _allocated_chunks_words + words;
2214 _allocated_chunks_count++;
2215 // Global total of capacity in allocated Metachunks
2216 MetaspaceAux::inc_capacity(mdtype(), words);
2217 // Global total of allocated Metablocks.
2218 // used_words_slow() includes the overhead in each
2219 // Metachunk so include it in the used when the
2220 // Metachunk is first added (so only added once per
2221 // Metachunk).
2222 MetaspaceAux::inc_used(mdtype(), Metachunk::overhead());
2223 }
2225 void SpaceManager::inc_used_metrics(size_t words) {
2226 // Add to the per SpaceManager total
2227 Atomic::add_ptr(words, &_allocated_blocks_words);
2228 // Add to the global total
2229 MetaspaceAux::inc_used(mdtype(), words);
2230 }
2232 void SpaceManager::dec_total_from_size_metrics() {
2233 MetaspaceAux::dec_capacity(mdtype(), allocated_chunks_words());
2234 MetaspaceAux::dec_used(mdtype(), allocated_blocks_words());
2235 // Also deduct the overhead per Metachunk
2236 MetaspaceAux::dec_used(mdtype(), allocated_chunks_count() * Metachunk::overhead());
2237 }
2239 void SpaceManager::initialize() {
2240 Metadebug::init_allocation_fail_alot_count();
2241 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2242 _chunks_in_use[i] = NULL;
2243 }
2244 _current_chunk = NULL;
2245 if (TraceMetadataChunkAllocation && Verbose) {
2246 gclog_or_tty->print_cr("SpaceManager(): " PTR_FORMAT, this);
2247 }
2248 }
2250 void ChunkManager::return_chunks(ChunkIndex index, Metachunk* chunks) {
2251 if (chunks == NULL) {
2252 return;
2253 }
2254 ChunkList* list = free_chunks(index);
2255 assert(list->size() == chunks->word_size(), "Mismatch in chunk sizes");
2256 assert_lock_strong(SpaceManager::expand_lock());
2257 Metachunk* cur = chunks;
2259 // This returns chunks one at a time. If a new
2260 // class List can be created that is a base class
2261 // of FreeList then something like FreeList::prepend()
2262 // can be used in place of this loop
2263 while (cur != NULL) {
2264 assert(cur->container() != NULL, "Container should have been set");
2265 cur->container()->dec_container_count();
2266 // Capture the next link before it is changed
2267 // by the call to return_chunk_at_head();
2268 Metachunk* next = cur->next();
2269 DEBUG_ONLY(cur->set_is_tagged_free(true);)
2270 list->return_chunk_at_head(cur);
2271 cur = next;
2272 }
2273 }
2275 SpaceManager::~SpaceManager() {
2276 // This call this->_lock which can't be done while holding expand_lock()
2277 assert(sum_capacity_in_chunks_in_use() == allocated_chunks_words(),
2278 err_msg("sum_capacity_in_chunks_in_use() " SIZE_FORMAT
2279 " allocated_chunks_words() " SIZE_FORMAT,
2280 sum_capacity_in_chunks_in_use(), allocated_chunks_words()));
2282 MutexLockerEx fcl(SpaceManager::expand_lock(),
2283 Mutex::_no_safepoint_check_flag);
2285 chunk_manager()->slow_locked_verify();
2287 dec_total_from_size_metrics();
2289 if (TraceMetadataChunkAllocation && Verbose) {
2290 gclog_or_tty->print_cr("~SpaceManager(): " PTR_FORMAT, this);
2291 locked_print_chunks_in_use_on(gclog_or_tty);
2292 }
2294 // Do not mangle freed Metachunks. The chunk size inside Metachunks
2295 // is during the freeing of a VirtualSpaceNodes.
2297 // Have to update before the chunks_in_use lists are emptied
2298 // below.
2299 chunk_manager()->inc_free_chunks_total(allocated_chunks_words(),
2300 sum_count_in_chunks_in_use());
2302 // Add all the chunks in use by this space manager
2303 // to the global list of free chunks.
2305 // Follow each list of chunks-in-use and add them to the
2306 // free lists. Each list is NULL terminated.
2308 for (ChunkIndex i = ZeroIndex; i < HumongousIndex; i = next_chunk_index(i)) {
2309 if (TraceMetadataChunkAllocation && Verbose) {
2310 gclog_or_tty->print_cr("returned %d %s chunks to freelist",
2311 sum_count_in_chunks_in_use(i),
2312 chunk_size_name(i));
2313 }
2314 Metachunk* chunks = chunks_in_use(i);
2315 chunk_manager()->return_chunks(i, chunks);
2316 set_chunks_in_use(i, NULL);
2317 if (TraceMetadataChunkAllocation && Verbose) {
2318 gclog_or_tty->print_cr("updated freelist count %d %s",
2319 chunk_manager()->free_chunks(i)->count(),
2320 chunk_size_name(i));
2321 }
2322 assert(i != HumongousIndex, "Humongous chunks are handled explicitly later");
2323 }
2325 // The medium chunk case may be optimized by passing the head and
2326 // tail of the medium chunk list to add_at_head(). The tail is often
2327 // the current chunk but there are probably exceptions.
2329 // Humongous chunks
2330 if (TraceMetadataChunkAllocation && Verbose) {
2331 gclog_or_tty->print_cr("returned %d %s humongous chunks to dictionary",
2332 sum_count_in_chunks_in_use(HumongousIndex),
2333 chunk_size_name(HumongousIndex));
2334 gclog_or_tty->print("Humongous chunk dictionary: ");
2335 }
2336 // Humongous chunks are never the current chunk.
2337 Metachunk* humongous_chunks = chunks_in_use(HumongousIndex);
2339 while (humongous_chunks != NULL) {
2340 #ifdef ASSERT
2341 humongous_chunks->set_is_tagged_free(true);
2342 #endif
2343 if (TraceMetadataChunkAllocation && Verbose) {
2344 gclog_or_tty->print(PTR_FORMAT " (" SIZE_FORMAT ") ",
2345 humongous_chunks,
2346 humongous_chunks->word_size());
2347 }
2348 assert(humongous_chunks->word_size() == (size_t)
2349 align_size_up(humongous_chunks->word_size(),
2350 smallest_chunk_size()),
2351 err_msg("Humongous chunk size is wrong: word size " SIZE_FORMAT
2352 " granularity %d",
2353 humongous_chunks->word_size(), smallest_chunk_size()));
2354 Metachunk* next_humongous_chunks = humongous_chunks->next();
2355 humongous_chunks->container()->dec_container_count();
2356 chunk_manager()->humongous_dictionary()->return_chunk(humongous_chunks);
2357 humongous_chunks = next_humongous_chunks;
2358 }
2359 if (TraceMetadataChunkAllocation && Verbose) {
2360 gclog_or_tty->cr();
2361 gclog_or_tty->print_cr("updated dictionary count %d %s",
2362 chunk_manager()->humongous_dictionary()->total_count(),
2363 chunk_size_name(HumongousIndex));
2364 }
2365 chunk_manager()->slow_locked_verify();
2366 }
2368 const char* SpaceManager::chunk_size_name(ChunkIndex index) const {
2369 switch (index) {
2370 case SpecializedIndex:
2371 return "Specialized";
2372 case SmallIndex:
2373 return "Small";
2374 case MediumIndex:
2375 return "Medium";
2376 case HumongousIndex:
2377 return "Humongous";
2378 default:
2379 return NULL;
2380 }
2381 }
2383 ChunkIndex ChunkManager::list_index(size_t size) {
2384 if (free_chunks(SpecializedIndex)->size() == size) {
2385 return SpecializedIndex;
2386 }
2387 if (free_chunks(SmallIndex)->size() == size) {
2388 return SmallIndex;
2389 }
2390 if (free_chunks(MediumIndex)->size() == size) {
2391 return MediumIndex;
2392 }
2394 assert(size > free_chunks(MediumIndex)->size(), "Not a humongous chunk");
2395 return HumongousIndex;
2396 }
2398 void SpaceManager::deallocate(MetaWord* p, size_t word_size) {
2399 assert_lock_strong(_lock);
2400 size_t raw_word_size = get_raw_word_size(word_size);
2401 size_t min_size = TreeChunk<Metablock, FreeList<Metablock> >::min_size();
2402 assert(raw_word_size >= min_size,
2403 err_msg("Should not deallocate dark matter " SIZE_FORMAT "<" SIZE_FORMAT, word_size, min_size));
2404 block_freelists()->return_block(p, raw_word_size);
2405 }
2407 // Adds a chunk to the list of chunks in use.
2408 void SpaceManager::add_chunk(Metachunk* new_chunk, bool make_current) {
2410 assert(new_chunk != NULL, "Should not be NULL");
2411 assert(new_chunk->next() == NULL, "Should not be on a list");
2413 new_chunk->reset_empty();
2415 // Find the correct list and and set the current
2416 // chunk for that list.
2417 ChunkIndex index = chunk_manager()->list_index(new_chunk->word_size());
2419 if (index != HumongousIndex) {
2420 retire_current_chunk();
2421 set_current_chunk(new_chunk);
2422 new_chunk->set_next(chunks_in_use(index));
2423 set_chunks_in_use(index, new_chunk);
2424 } else {
2425 // For null class loader data and DumpSharedSpaces, the first chunk isn't
2426 // small, so small will be null. Link this first chunk as the current
2427 // chunk.
2428 if (make_current) {
2429 // Set as the current chunk but otherwise treat as a humongous chunk.
2430 set_current_chunk(new_chunk);
2431 }
2432 // Link at head. The _current_chunk only points to a humongous chunk for
2433 // the null class loader metaspace (class and data virtual space managers)
2434 // any humongous chunks so will not point to the tail
2435 // of the humongous chunks list.
2436 new_chunk->set_next(chunks_in_use(HumongousIndex));
2437 set_chunks_in_use(HumongousIndex, new_chunk);
2439 assert(new_chunk->word_size() > medium_chunk_size(), "List inconsistency");
2440 }
2442 // Add to the running sum of capacity
2443 inc_size_metrics(new_chunk->word_size());
2445 assert(new_chunk->is_empty(), "Not ready for reuse");
2446 if (TraceMetadataChunkAllocation && Verbose) {
2447 gclog_or_tty->print("SpaceManager::add_chunk: %d) ",
2448 sum_count_in_chunks_in_use());
2449 new_chunk->print_on(gclog_or_tty);
2450 chunk_manager()->locked_print_free_chunks(gclog_or_tty);
2451 }
2452 }
2454 void SpaceManager::retire_current_chunk() {
2455 if (current_chunk() != NULL) {
2456 size_t remaining_words = current_chunk()->free_word_size();
2457 if (remaining_words >= TreeChunk<Metablock, FreeList<Metablock> >::min_size()) {
2458 block_freelists()->return_block(current_chunk()->allocate(remaining_words), remaining_words);
2459 inc_used_metrics(remaining_words);
2460 }
2461 }
2462 }
2464 Metachunk* SpaceManager::get_new_chunk(size_t chunk_word_size) {
2465 // Get a chunk from the chunk freelist
2466 Metachunk* next = chunk_manager()->chunk_freelist_allocate(chunk_word_size);
2468 if (next == NULL) {
2469 next = vs_list()->get_new_chunk(chunk_word_size,
2470 medium_chunk_bunch());
2471 }
2473 if (TraceMetadataHumongousAllocation && next != NULL &&
2474 SpaceManager::is_humongous(next->word_size())) {
2475 gclog_or_tty->print_cr(" new humongous chunk word size "
2476 PTR_FORMAT, next->word_size());
2477 }
2479 return next;
2480 }
2482 MetaWord* SpaceManager::allocate(size_t word_size) {
2483 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
2485 size_t raw_word_size = get_raw_word_size(word_size);
2486 BlockFreelist* fl = block_freelists();
2487 MetaWord* p = NULL;
2488 // Allocation from the dictionary is expensive in the sense that
2489 // the dictionary has to be searched for a size. Don't allocate
2490 // from the dictionary until it starts to get fat. Is this
2491 // a reasonable policy? Maybe an skinny dictionary is fast enough
2492 // for allocations. Do some profiling. JJJ
2493 if (fl->total_size() > allocation_from_dictionary_limit) {
2494 p = fl->get_block(raw_word_size);
2495 }
2496 if (p == NULL) {
2497 p = allocate_work(raw_word_size);
2498 }
2500 return p;
2501 }
2503 // Returns the address of spaced allocated for "word_size".
2504 // This methods does not know about blocks (Metablocks)
2505 MetaWord* SpaceManager::allocate_work(size_t word_size) {
2506 assert_lock_strong(_lock);
2507 #ifdef ASSERT
2508 if (Metadebug::test_metadata_failure()) {
2509 return NULL;
2510 }
2511 #endif
2512 // Is there space in the current chunk?
2513 MetaWord* result = NULL;
2515 // For DumpSharedSpaces, only allocate out of the current chunk which is
2516 // never null because we gave it the size we wanted. Caller reports out
2517 // of memory if this returns null.
2518 if (DumpSharedSpaces) {
2519 assert(current_chunk() != NULL, "should never happen");
2520 inc_used_metrics(word_size);
2521 return current_chunk()->allocate(word_size); // caller handles null result
2522 }
2524 if (current_chunk() != NULL) {
2525 result = current_chunk()->allocate(word_size);
2526 }
2528 if (result == NULL) {
2529 result = grow_and_allocate(word_size);
2530 }
2532 if (result != NULL) {
2533 inc_used_metrics(word_size);
2534 assert(result != (MetaWord*) chunks_in_use(MediumIndex),
2535 "Head of the list is being allocated");
2536 }
2538 return result;
2539 }
2541 void SpaceManager::verify() {
2542 // If there are blocks in the dictionary, then
2543 // verfication of chunks does not work since
2544 // being in the dictionary alters a chunk.
2545 if (block_freelists()->total_size() == 0) {
2546 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2547 Metachunk* curr = chunks_in_use(i);
2548 while (curr != NULL) {
2549 curr->verify();
2550 verify_chunk_size(curr);
2551 curr = curr->next();
2552 }
2553 }
2554 }
2555 }
2557 void SpaceManager::verify_chunk_size(Metachunk* chunk) {
2558 assert(is_humongous(chunk->word_size()) ||
2559 chunk->word_size() == medium_chunk_size() ||
2560 chunk->word_size() == small_chunk_size() ||
2561 chunk->word_size() == specialized_chunk_size(),
2562 "Chunk size is wrong");
2563 return;
2564 }
2566 #ifdef ASSERT
2567 void SpaceManager::verify_allocated_blocks_words() {
2568 // Verification is only guaranteed at a safepoint.
2569 assert(SafepointSynchronize::is_at_safepoint() || !Universe::is_fully_initialized(),
2570 "Verification can fail if the applications is running");
2571 assert(allocated_blocks_words() == sum_used_in_chunks_in_use(),
2572 err_msg("allocation total is not consistent " SIZE_FORMAT
2573 " vs " SIZE_FORMAT,
2574 allocated_blocks_words(), sum_used_in_chunks_in_use()));
2575 }
2577 #endif
2579 void SpaceManager::dump(outputStream* const out) const {
2580 size_t curr_total = 0;
2581 size_t waste = 0;
2582 uint i = 0;
2583 size_t used = 0;
2584 size_t capacity = 0;
2586 // Add up statistics for all chunks in this SpaceManager.
2587 for (ChunkIndex index = ZeroIndex;
2588 index < NumberOfInUseLists;
2589 index = next_chunk_index(index)) {
2590 for (Metachunk* curr = chunks_in_use(index);
2591 curr != NULL;
2592 curr = curr->next()) {
2593 out->print("%d) ", i++);
2594 curr->print_on(out);
2595 curr_total += curr->word_size();
2596 used += curr->used_word_size();
2597 capacity += curr->word_size();
2598 waste += curr->free_word_size() + curr->overhead();;
2599 }
2600 }
2602 if (TraceMetadataChunkAllocation && Verbose) {
2603 block_freelists()->print_on(out);
2604 }
2606 size_t free = current_chunk() == NULL ? 0 : current_chunk()->free_word_size();
2607 // Free space isn't wasted.
2608 waste -= free;
2610 out->print_cr("total of all chunks " SIZE_FORMAT " used " SIZE_FORMAT
2611 " free " SIZE_FORMAT " capacity " SIZE_FORMAT
2612 " waste " SIZE_FORMAT, curr_total, used, free, capacity, waste);
2613 }
2615 #ifndef PRODUCT
2616 void SpaceManager::mangle_freed_chunks() {
2617 for (ChunkIndex index = ZeroIndex;
2618 index < NumberOfInUseLists;
2619 index = next_chunk_index(index)) {
2620 for (Metachunk* curr = chunks_in_use(index);
2621 curr != NULL;
2622 curr = curr->next()) {
2623 curr->mangle();
2624 }
2625 }
2626 }
2627 #endif // PRODUCT
2629 // MetaspaceAux
2632 size_t MetaspaceAux::_capacity_words[] = {0, 0};
2633 size_t MetaspaceAux::_used_words[] = {0, 0};
2635 size_t MetaspaceAux::free_bytes(Metaspace::MetadataType mdtype) {
2636 VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
2637 return list == NULL ? 0 : list->free_bytes();
2638 }
2640 size_t MetaspaceAux::free_bytes() {
2641 return free_bytes(Metaspace::ClassType) + free_bytes(Metaspace::NonClassType);
2642 }
2644 void MetaspaceAux::dec_capacity(Metaspace::MetadataType mdtype, size_t words) {
2645 assert_lock_strong(SpaceManager::expand_lock());
2646 assert(words <= capacity_words(mdtype),
2647 err_msg("About to decrement below 0: words " SIZE_FORMAT
2648 " is greater than _capacity_words[%u] " SIZE_FORMAT,
2649 words, mdtype, capacity_words(mdtype)));
2650 _capacity_words[mdtype] -= words;
2651 }
2653 void MetaspaceAux::inc_capacity(Metaspace::MetadataType mdtype, size_t words) {
2654 assert_lock_strong(SpaceManager::expand_lock());
2655 // Needs to be atomic
2656 _capacity_words[mdtype] += words;
2657 }
2659 void MetaspaceAux::dec_used(Metaspace::MetadataType mdtype, size_t words) {
2660 assert(words <= used_words(mdtype),
2661 err_msg("About to decrement below 0: words " SIZE_FORMAT
2662 " is greater than _used_words[%u] " SIZE_FORMAT,
2663 words, mdtype, used_words(mdtype)));
2664 // For CMS deallocation of the Metaspaces occurs during the
2665 // sweep which is a concurrent phase. Protection by the expand_lock()
2666 // is not enough since allocation is on a per Metaspace basis
2667 // and protected by the Metaspace lock.
2668 jlong minus_words = (jlong) - (jlong) words;
2669 Atomic::add_ptr(minus_words, &_used_words[mdtype]);
2670 }
2672 void MetaspaceAux::inc_used(Metaspace::MetadataType mdtype, size_t words) {
2673 // _used_words tracks allocations for
2674 // each piece of metadata. Those allocations are
2675 // generally done concurrently by different application
2676 // threads so must be done atomically.
2677 Atomic::add_ptr(words, &_used_words[mdtype]);
2678 }
2680 size_t MetaspaceAux::used_bytes_slow(Metaspace::MetadataType mdtype) {
2681 size_t used = 0;
2682 ClassLoaderDataGraphMetaspaceIterator iter;
2683 while (iter.repeat()) {
2684 Metaspace* msp = iter.get_next();
2685 // Sum allocated_blocks_words for each metaspace
2686 if (msp != NULL) {
2687 used += msp->used_words_slow(mdtype);
2688 }
2689 }
2690 return used * BytesPerWord;
2691 }
2693 size_t MetaspaceAux::free_bytes_slow(Metaspace::MetadataType mdtype) {
2694 size_t free = 0;
2695 ClassLoaderDataGraphMetaspaceIterator iter;
2696 while (iter.repeat()) {
2697 Metaspace* msp = iter.get_next();
2698 if (msp != NULL) {
2699 free += msp->free_words_slow(mdtype);
2700 }
2701 }
2702 return free * BytesPerWord;
2703 }
2705 size_t MetaspaceAux::capacity_bytes_slow(Metaspace::MetadataType mdtype) {
2706 if ((mdtype == Metaspace::ClassType) && !Metaspace::using_class_space()) {
2707 return 0;
2708 }
2709 // Don't count the space in the freelists. That space will be
2710 // added to the capacity calculation as needed.
2711 size_t capacity = 0;
2712 ClassLoaderDataGraphMetaspaceIterator iter;
2713 while (iter.repeat()) {
2714 Metaspace* msp = iter.get_next();
2715 if (msp != NULL) {
2716 capacity += msp->capacity_words_slow(mdtype);
2717 }
2718 }
2719 return capacity * BytesPerWord;
2720 }
2722 size_t MetaspaceAux::capacity_bytes_slow() {
2723 #ifdef PRODUCT
2724 // Use capacity_bytes() in PRODUCT instead of this function.
2725 guarantee(false, "Should not call capacity_bytes_slow() in the PRODUCT");
2726 #endif
2727 size_t class_capacity = capacity_bytes_slow(Metaspace::ClassType);
2728 size_t non_class_capacity = capacity_bytes_slow(Metaspace::NonClassType);
2729 assert(capacity_bytes() == class_capacity + non_class_capacity,
2730 err_msg("bad accounting: capacity_bytes() " SIZE_FORMAT
2731 " class_capacity + non_class_capacity " SIZE_FORMAT
2732 " class_capacity " SIZE_FORMAT " non_class_capacity " SIZE_FORMAT,
2733 capacity_bytes(), class_capacity + non_class_capacity,
2734 class_capacity, non_class_capacity));
2736 return class_capacity + non_class_capacity;
2737 }
2739 size_t MetaspaceAux::reserved_bytes(Metaspace::MetadataType mdtype) {
2740 VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
2741 return list == NULL ? 0 : list->reserved_bytes();
2742 }
2744 size_t MetaspaceAux::committed_bytes(Metaspace::MetadataType mdtype) {
2745 VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
2746 return list == NULL ? 0 : list->committed_bytes();
2747 }
2749 size_t MetaspaceAux::min_chunk_size_words() { return Metaspace::first_chunk_word_size(); }
2751 size_t MetaspaceAux::free_chunks_total_words(Metaspace::MetadataType mdtype) {
2752 ChunkManager* chunk_manager = Metaspace::get_chunk_manager(mdtype);
2753 if (chunk_manager == NULL) {
2754 return 0;
2755 }
2756 chunk_manager->slow_verify();
2757 return chunk_manager->free_chunks_total_words();
2758 }
2760 size_t MetaspaceAux::free_chunks_total_bytes(Metaspace::MetadataType mdtype) {
2761 return free_chunks_total_words(mdtype) * BytesPerWord;
2762 }
2764 size_t MetaspaceAux::free_chunks_total_words() {
2765 return free_chunks_total_words(Metaspace::ClassType) +
2766 free_chunks_total_words(Metaspace::NonClassType);
2767 }
2769 size_t MetaspaceAux::free_chunks_total_bytes() {
2770 return free_chunks_total_words() * BytesPerWord;
2771 }
2773 bool MetaspaceAux::has_chunk_free_list(Metaspace::MetadataType mdtype) {
2774 return Metaspace::get_chunk_manager(mdtype) != NULL;
2775 }
2777 MetaspaceChunkFreeListSummary MetaspaceAux::chunk_free_list_summary(Metaspace::MetadataType mdtype) {
2778 if (!has_chunk_free_list(mdtype)) {
2779 return MetaspaceChunkFreeListSummary();
2780 }
2782 const ChunkManager* cm = Metaspace::get_chunk_manager(mdtype);
2783 return cm->chunk_free_list_summary();
2784 }
2786 void MetaspaceAux::print_metaspace_change(size_t prev_metadata_used) {
2787 gclog_or_tty->print(", [Metaspace:");
2788 if (PrintGCDetails && Verbose) {
2789 gclog_or_tty->print(" " SIZE_FORMAT
2790 "->" SIZE_FORMAT
2791 "(" SIZE_FORMAT ")",
2792 prev_metadata_used,
2793 used_bytes(),
2794 reserved_bytes());
2795 } else {
2796 gclog_or_tty->print(" " SIZE_FORMAT "K"
2797 "->" SIZE_FORMAT "K"
2798 "(" SIZE_FORMAT "K)",
2799 prev_metadata_used/K,
2800 used_bytes()/K,
2801 reserved_bytes()/K);
2802 }
2804 gclog_or_tty->print("]");
2805 }
2807 // This is printed when PrintGCDetails
2808 void MetaspaceAux::print_on(outputStream* out) {
2809 Metaspace::MetadataType nct = Metaspace::NonClassType;
2811 out->print_cr(" Metaspace "
2812 "used " SIZE_FORMAT "K, "
2813 "capacity " SIZE_FORMAT "K, "
2814 "committed " SIZE_FORMAT "K, "
2815 "reserved " SIZE_FORMAT "K",
2816 used_bytes()/K,
2817 capacity_bytes()/K,
2818 committed_bytes()/K,
2819 reserved_bytes()/K);
2821 if (Metaspace::using_class_space()) {
2822 Metaspace::MetadataType ct = Metaspace::ClassType;
2823 out->print_cr(" class space "
2824 "used " SIZE_FORMAT "K, "
2825 "capacity " SIZE_FORMAT "K, "
2826 "committed " SIZE_FORMAT "K, "
2827 "reserved " SIZE_FORMAT "K",
2828 used_bytes(ct)/K,
2829 capacity_bytes(ct)/K,
2830 committed_bytes(ct)/K,
2831 reserved_bytes(ct)/K);
2832 }
2833 }
2835 // Print information for class space and data space separately.
2836 // This is almost the same as above.
2837 void MetaspaceAux::print_on(outputStream* out, Metaspace::MetadataType mdtype) {
2838 size_t free_chunks_capacity_bytes = free_chunks_total_bytes(mdtype);
2839 size_t capacity_bytes = capacity_bytes_slow(mdtype);
2840 size_t used_bytes = used_bytes_slow(mdtype);
2841 size_t free_bytes = free_bytes_slow(mdtype);
2842 size_t used_and_free = used_bytes + free_bytes +
2843 free_chunks_capacity_bytes;
2844 out->print_cr(" Chunk accounting: used in chunks " SIZE_FORMAT
2845 "K + unused in chunks " SIZE_FORMAT "K + "
2846 " capacity in free chunks " SIZE_FORMAT "K = " SIZE_FORMAT
2847 "K capacity in allocated chunks " SIZE_FORMAT "K",
2848 used_bytes / K,
2849 free_bytes / K,
2850 free_chunks_capacity_bytes / K,
2851 used_and_free / K,
2852 capacity_bytes / K);
2853 // Accounting can only be correct if we got the values during a safepoint
2854 assert(!SafepointSynchronize::is_at_safepoint() || used_and_free == capacity_bytes, "Accounting is wrong");
2855 }
2857 // Print total fragmentation for class metaspaces
2858 void MetaspaceAux::print_class_waste(outputStream* out) {
2859 assert(Metaspace::using_class_space(), "class metaspace not used");
2860 size_t cls_specialized_waste = 0, cls_small_waste = 0, cls_medium_waste = 0;
2861 size_t cls_specialized_count = 0, cls_small_count = 0, cls_medium_count = 0, cls_humongous_count = 0;
2862 ClassLoaderDataGraphMetaspaceIterator iter;
2863 while (iter.repeat()) {
2864 Metaspace* msp = iter.get_next();
2865 if (msp != NULL) {
2866 cls_specialized_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SpecializedIndex);
2867 cls_specialized_count += msp->class_vsm()->sum_count_in_chunks_in_use(SpecializedIndex);
2868 cls_small_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SmallIndex);
2869 cls_small_count += msp->class_vsm()->sum_count_in_chunks_in_use(SmallIndex);
2870 cls_medium_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(MediumIndex);
2871 cls_medium_count += msp->class_vsm()->sum_count_in_chunks_in_use(MediumIndex);
2872 cls_humongous_count += msp->class_vsm()->sum_count_in_chunks_in_use(HumongousIndex);
2873 }
2874 }
2875 out->print_cr(" class: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", "
2876 SIZE_FORMAT " small(s) " SIZE_FORMAT ", "
2877 SIZE_FORMAT " medium(s) " SIZE_FORMAT ", "
2878 "large count " SIZE_FORMAT,
2879 cls_specialized_count, cls_specialized_waste,
2880 cls_small_count, cls_small_waste,
2881 cls_medium_count, cls_medium_waste, cls_humongous_count);
2882 }
2884 // Print total fragmentation for data and class metaspaces separately
2885 void MetaspaceAux::print_waste(outputStream* out) {
2886 size_t specialized_waste = 0, small_waste = 0, medium_waste = 0;
2887 size_t specialized_count = 0, small_count = 0, medium_count = 0, humongous_count = 0;
2889 ClassLoaderDataGraphMetaspaceIterator iter;
2890 while (iter.repeat()) {
2891 Metaspace* msp = iter.get_next();
2892 if (msp != NULL) {
2893 specialized_waste += msp->vsm()->sum_waste_in_chunks_in_use(SpecializedIndex);
2894 specialized_count += msp->vsm()->sum_count_in_chunks_in_use(SpecializedIndex);
2895 small_waste += msp->vsm()->sum_waste_in_chunks_in_use(SmallIndex);
2896 small_count += msp->vsm()->sum_count_in_chunks_in_use(SmallIndex);
2897 medium_waste += msp->vsm()->sum_waste_in_chunks_in_use(MediumIndex);
2898 medium_count += msp->vsm()->sum_count_in_chunks_in_use(MediumIndex);
2899 humongous_count += msp->vsm()->sum_count_in_chunks_in_use(HumongousIndex);
2900 }
2901 }
2902 out->print_cr("Total fragmentation waste (words) doesn't count free space");
2903 out->print_cr(" data: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", "
2904 SIZE_FORMAT " small(s) " SIZE_FORMAT ", "
2905 SIZE_FORMAT " medium(s) " SIZE_FORMAT ", "
2906 "large count " SIZE_FORMAT,
2907 specialized_count, specialized_waste, small_count,
2908 small_waste, medium_count, medium_waste, humongous_count);
2909 if (Metaspace::using_class_space()) {
2910 print_class_waste(out);
2911 }
2912 }
2914 // Dump global metaspace things from the end of ClassLoaderDataGraph
2915 void MetaspaceAux::dump(outputStream* out) {
2916 out->print_cr("All Metaspace:");
2917 out->print("data space: "); print_on(out, Metaspace::NonClassType);
2918 out->print("class space: "); print_on(out, Metaspace::ClassType);
2919 print_waste(out);
2920 }
2922 void MetaspaceAux::verify_free_chunks() {
2923 Metaspace::chunk_manager_metadata()->verify();
2924 if (Metaspace::using_class_space()) {
2925 Metaspace::chunk_manager_class()->verify();
2926 }
2927 }
2929 void MetaspaceAux::verify_capacity() {
2930 #ifdef ASSERT
2931 size_t running_sum_capacity_bytes = capacity_bytes();
2932 // For purposes of the running sum of capacity, verify against capacity
2933 size_t capacity_in_use_bytes = capacity_bytes_slow();
2934 assert(running_sum_capacity_bytes == capacity_in_use_bytes,
2935 err_msg("capacity_words() * BytesPerWord " SIZE_FORMAT
2936 " capacity_bytes_slow()" SIZE_FORMAT,
2937 running_sum_capacity_bytes, capacity_in_use_bytes));
2938 for (Metaspace::MetadataType i = Metaspace::ClassType;
2939 i < Metaspace:: MetadataTypeCount;
2940 i = (Metaspace::MetadataType)(i + 1)) {
2941 size_t capacity_in_use_bytes = capacity_bytes_slow(i);
2942 assert(capacity_bytes(i) == capacity_in_use_bytes,
2943 err_msg("capacity_bytes(%u) " SIZE_FORMAT
2944 " capacity_bytes_slow(%u)" SIZE_FORMAT,
2945 i, capacity_bytes(i), i, capacity_in_use_bytes));
2946 }
2947 #endif
2948 }
2950 void MetaspaceAux::verify_used() {
2951 #ifdef ASSERT
2952 size_t running_sum_used_bytes = used_bytes();
2953 // For purposes of the running sum of used, verify against used
2954 size_t used_in_use_bytes = used_bytes_slow();
2955 assert(used_bytes() == used_in_use_bytes,
2956 err_msg("used_bytes() " SIZE_FORMAT
2957 " used_bytes_slow()" SIZE_FORMAT,
2958 used_bytes(), used_in_use_bytes));
2959 for (Metaspace::MetadataType i = Metaspace::ClassType;
2960 i < Metaspace:: MetadataTypeCount;
2961 i = (Metaspace::MetadataType)(i + 1)) {
2962 size_t used_in_use_bytes = used_bytes_slow(i);
2963 assert(used_bytes(i) == used_in_use_bytes,
2964 err_msg("used_bytes(%u) " SIZE_FORMAT
2965 " used_bytes_slow(%u)" SIZE_FORMAT,
2966 i, used_bytes(i), i, used_in_use_bytes));
2967 }
2968 #endif
2969 }
2971 void MetaspaceAux::verify_metrics() {
2972 verify_capacity();
2973 verify_used();
2974 }
2977 // Metaspace methods
2979 size_t Metaspace::_first_chunk_word_size = 0;
2980 size_t Metaspace::_first_class_chunk_word_size = 0;
2982 size_t Metaspace::_commit_alignment = 0;
2983 size_t Metaspace::_reserve_alignment = 0;
2985 Metaspace::Metaspace(Mutex* lock, MetaspaceType type) {
2986 initialize(lock, type);
2987 }
2989 Metaspace::~Metaspace() {
2990 delete _vsm;
2991 if (using_class_space()) {
2992 delete _class_vsm;
2993 }
2994 }
2996 VirtualSpaceList* Metaspace::_space_list = NULL;
2997 VirtualSpaceList* Metaspace::_class_space_list = NULL;
2999 ChunkManager* Metaspace::_chunk_manager_metadata = NULL;
3000 ChunkManager* Metaspace::_chunk_manager_class = NULL;
3002 #define VIRTUALSPACEMULTIPLIER 2
3004 #ifdef _LP64
3005 static const uint64_t UnscaledClassSpaceMax = (uint64_t(max_juint) + 1);
3007 void Metaspace::set_narrow_klass_base_and_shift(address metaspace_base, address cds_base) {
3008 // Figure out the narrow_klass_base and the narrow_klass_shift. The
3009 // narrow_klass_base is the lower of the metaspace base and the cds base
3010 // (if cds is enabled). The narrow_klass_shift depends on the distance
3011 // between the lower base and higher address.
3012 address lower_base;
3013 address higher_address;
3014 #if INCLUDE_CDS
3015 if (UseSharedSpaces) {
3016 higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()),
3017 (address)(metaspace_base + compressed_class_space_size()));
3018 lower_base = MIN2(metaspace_base, cds_base);
3019 } else
3020 #endif
3021 {
3022 higher_address = metaspace_base + compressed_class_space_size();
3023 lower_base = metaspace_base;
3025 uint64_t klass_encoding_max = UnscaledClassSpaceMax << LogKlassAlignmentInBytes;
3026 // If compressed class space fits in lower 32G, we don't need a base.
3027 if (higher_address <= (address)klass_encoding_max) {
3028 lower_base = 0; // effectively lower base is zero.
3029 }
3030 }
3032 Universe::set_narrow_klass_base(lower_base);
3034 if ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax) {
3035 Universe::set_narrow_klass_shift(0);
3036 } else {
3037 assert(!UseSharedSpaces, "Cannot shift with UseSharedSpaces");
3038 Universe::set_narrow_klass_shift(LogKlassAlignmentInBytes);
3039 }
3040 }
3042 #if INCLUDE_CDS
3043 // Return TRUE if the specified metaspace_base and cds_base are close enough
3044 // to work with compressed klass pointers.
3045 bool Metaspace::can_use_cds_with_metaspace_addr(char* metaspace_base, address cds_base) {
3046 assert(cds_base != 0 && UseSharedSpaces, "Only use with CDS");
3047 assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
3048 address lower_base = MIN2((address)metaspace_base, cds_base);
3049 address higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()),
3050 (address)(metaspace_base + compressed_class_space_size()));
3051 return ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax);
3052 }
3053 #endif
3055 // Try to allocate the metaspace at the requested addr.
3056 void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base) {
3057 assert(using_class_space(), "called improperly");
3058 assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
3059 assert(compressed_class_space_size() < KlassEncodingMetaspaceMax,
3060 "Metaspace size is too big");
3061 assert_is_ptr_aligned(requested_addr, _reserve_alignment);
3062 assert_is_ptr_aligned(cds_base, _reserve_alignment);
3063 assert_is_size_aligned(compressed_class_space_size(), _reserve_alignment);
3065 // Don't use large pages for the class space.
3066 bool large_pages = false;
3068 ReservedSpace metaspace_rs = ReservedSpace(compressed_class_space_size(),
3069 _reserve_alignment,
3070 large_pages,
3071 requested_addr, 0);
3072 if (!metaspace_rs.is_reserved()) {
3073 #if INCLUDE_CDS
3074 if (UseSharedSpaces) {
3075 size_t increment = align_size_up(1*G, _reserve_alignment);
3077 // Keep trying to allocate the metaspace, increasing the requested_addr
3078 // by 1GB each time, until we reach an address that will no longer allow
3079 // use of CDS with compressed klass pointers.
3080 char *addr = requested_addr;
3081 while (!metaspace_rs.is_reserved() && (addr + increment > addr) &&
3082 can_use_cds_with_metaspace_addr(addr + increment, cds_base)) {
3083 addr = addr + increment;
3084 metaspace_rs = ReservedSpace(compressed_class_space_size(),
3085 _reserve_alignment, large_pages, addr, 0);
3086 }
3087 }
3088 #endif
3089 // If no successful allocation then try to allocate the space anywhere. If
3090 // that fails then OOM doom. At this point we cannot try allocating the
3091 // metaspace as if UseCompressedClassPointers is off because too much
3092 // initialization has happened that depends on UseCompressedClassPointers.
3093 // So, UseCompressedClassPointers cannot be turned off at this point.
3094 if (!metaspace_rs.is_reserved()) {
3095 metaspace_rs = ReservedSpace(compressed_class_space_size(),
3096 _reserve_alignment, large_pages);
3097 if (!metaspace_rs.is_reserved()) {
3098 vm_exit_during_initialization(err_msg("Could not allocate metaspace: %d bytes",
3099 compressed_class_space_size()));
3100 }
3101 }
3102 }
3104 // If we got here then the metaspace got allocated.
3105 MemTracker::record_virtual_memory_type((address)metaspace_rs.base(), mtClass);
3107 #if INCLUDE_CDS
3108 // Verify that we can use shared spaces. Otherwise, turn off CDS.
3109 if (UseSharedSpaces && !can_use_cds_with_metaspace_addr(metaspace_rs.base(), cds_base)) {
3110 FileMapInfo::stop_sharing_and_unmap(
3111 "Could not allocate metaspace at a compatible address");
3112 }
3113 #endif
3114 set_narrow_klass_base_and_shift((address)metaspace_rs.base(),
3115 UseSharedSpaces ? (address)cds_base : 0);
3117 initialize_class_space(metaspace_rs);
3119 if (PrintCompressedOopsMode || (PrintMiscellaneous && Verbose)) {
3120 print_compressed_class_space(gclog_or_tty, requested_addr);
3121 }
3122 }
3124 void Metaspace::print_compressed_class_space(outputStream* st, const char* requested_addr) {
3125 st->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: %d",
3126 p2i(Universe::narrow_klass_base()), Universe::narrow_klass_shift());
3127 if (_class_space_list != NULL) {
3128 address base = (address)_class_space_list->current_virtual_space()->bottom();
3129 st->print("Compressed class space size: " SIZE_FORMAT " Address: " PTR_FORMAT,
3130 compressed_class_space_size(), p2i(base));
3131 if (requested_addr != 0) {
3132 st->print(" Req Addr: " PTR_FORMAT, p2i(requested_addr));
3133 }
3134 st->cr();
3135 }
3136 }
3138 // For UseCompressedClassPointers the class space is reserved above the top of
3139 // the Java heap. The argument passed in is at the base of the compressed space.
3140 void Metaspace::initialize_class_space(ReservedSpace rs) {
3141 // The reserved space size may be bigger because of alignment, esp with UseLargePages
3142 assert(rs.size() >= CompressedClassSpaceSize,
3143 err_msg(SIZE_FORMAT " != " UINTX_FORMAT, rs.size(), CompressedClassSpaceSize));
3144 assert(using_class_space(), "Must be using class space");
3145 _class_space_list = new VirtualSpaceList(rs);
3146 _chunk_manager_class = new ChunkManager(ClassSpecializedChunk, ClassSmallChunk, ClassMediumChunk);
3148 if (!_class_space_list->initialization_succeeded()) {
3149 vm_exit_during_initialization("Failed to setup compressed class space virtual space list.");
3150 }
3151 }
3153 #endif
3155 void Metaspace::ergo_initialize() {
3156 if (DumpSharedSpaces) {
3157 // Using large pages when dumping the shared archive is currently not implemented.
3158 FLAG_SET_ERGO(bool, UseLargePagesInMetaspace, false);
3159 }
3161 size_t page_size = os::vm_page_size();
3162 if (UseLargePages && UseLargePagesInMetaspace) {
3163 page_size = os::large_page_size();
3164 }
3166 _commit_alignment = page_size;
3167 _reserve_alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity());
3169 // Do not use FLAG_SET_ERGO to update MaxMetaspaceSize, since this will
3170 // override if MaxMetaspaceSize was set on the command line or not.
3171 // This information is needed later to conform to the specification of the
3172 // java.lang.management.MemoryUsage API.
3173 //
3174 // Ideally, we would be able to set the default value of MaxMetaspaceSize in
3175 // globals.hpp to the aligned value, but this is not possible, since the
3176 // alignment depends on other flags being parsed.
3177 MaxMetaspaceSize = align_size_down_bounded(MaxMetaspaceSize, _reserve_alignment);
3179 if (MetaspaceSize > MaxMetaspaceSize) {
3180 MetaspaceSize = MaxMetaspaceSize;
3181 }
3183 MetaspaceSize = align_size_down_bounded(MetaspaceSize, _commit_alignment);
3185 assert(MetaspaceSize <= MaxMetaspaceSize, "MetaspaceSize should be limited by MaxMetaspaceSize");
3187 if (MetaspaceSize < 256*K) {
3188 vm_exit_during_initialization("Too small initial Metaspace size");
3189 }
3191 MinMetaspaceExpansion = align_size_down_bounded(MinMetaspaceExpansion, _commit_alignment);
3192 MaxMetaspaceExpansion = align_size_down_bounded(MaxMetaspaceExpansion, _commit_alignment);
3194 CompressedClassSpaceSize = align_size_down_bounded(CompressedClassSpaceSize, _reserve_alignment);
3195 set_compressed_class_space_size(CompressedClassSpaceSize);
3197 // Initial virtual space size will be calculated at global_initialize()
3198 uintx min_metaspace_sz =
3199 VIRTUALSPACEMULTIPLIER * InitialBootClassLoaderMetaspaceSize;
3200 if (UseCompressedClassPointers) {
3201 if ((min_metaspace_sz + CompressedClassSpaceSize) > MaxMetaspaceSize) {
3202 if (min_metaspace_sz >= MaxMetaspaceSize) {
3203 vm_exit_during_initialization("MaxMetaspaceSize is too small.");
3204 } else {
3205 FLAG_SET_ERGO(uintx, CompressedClassSpaceSize,
3206 MaxMetaspaceSize - min_metaspace_sz);
3207 }
3208 }
3209 } else if (min_metaspace_sz >= MaxMetaspaceSize) {
3210 FLAG_SET_ERGO(uintx, InitialBootClassLoaderMetaspaceSize,
3211 min_metaspace_sz);
3212 }
3214 }
3216 void Metaspace::global_initialize() {
3217 MetaspaceGC::initialize();
3219 // Initialize the alignment for shared spaces.
3220 int max_alignment = os::vm_allocation_granularity();
3221 size_t cds_total = 0;
3223 MetaspaceShared::set_max_alignment(max_alignment);
3225 if (DumpSharedSpaces) {
3226 #if INCLUDE_CDS
3227 MetaspaceShared::estimate_regions_size();
3229 SharedReadOnlySize = align_size_up(SharedReadOnlySize, max_alignment);
3230 SharedReadWriteSize = align_size_up(SharedReadWriteSize, max_alignment);
3231 SharedMiscDataSize = align_size_up(SharedMiscDataSize, max_alignment);
3232 SharedMiscCodeSize = align_size_up(SharedMiscCodeSize, max_alignment);
3234 // the min_misc_code_size estimate is based on MetaspaceShared::generate_vtable_methods()
3235 uintx min_misc_code_size = align_size_up(
3236 (MetaspaceShared::num_virtuals * MetaspaceShared::vtbl_list_size) *
3237 (sizeof(void*) + MetaspaceShared::vtbl_method_size) + MetaspaceShared::vtbl_common_code_size,
3238 max_alignment);
3240 if (SharedMiscCodeSize < min_misc_code_size) {
3241 report_out_of_shared_space(SharedMiscCode);
3242 }
3244 // Initialize with the sum of the shared space sizes. The read-only
3245 // and read write metaspace chunks will be allocated out of this and the
3246 // remainder is the misc code and data chunks.
3247 cds_total = FileMapInfo::shared_spaces_size();
3248 cds_total = align_size_up(cds_total, _reserve_alignment);
3249 _space_list = new VirtualSpaceList(cds_total/wordSize);
3250 _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk);
3252 if (!_space_list->initialization_succeeded()) {
3253 vm_exit_during_initialization("Unable to dump shared archive.", NULL);
3254 }
3256 #ifdef _LP64
3257 if (cds_total + compressed_class_space_size() > UnscaledClassSpaceMax) {
3258 vm_exit_during_initialization("Unable to dump shared archive.",
3259 err_msg("Size of archive (" SIZE_FORMAT ") + compressed class space ("
3260 SIZE_FORMAT ") == total (" SIZE_FORMAT ") is larger than compressed "
3261 "klass limit: " SIZE_FORMAT, cds_total, compressed_class_space_size(),
3262 cds_total + compressed_class_space_size(), UnscaledClassSpaceMax));
3263 }
3265 // Set the compressed klass pointer base so that decoding of these pointers works
3266 // properly when creating the shared archive.
3267 assert(UseCompressedOops && UseCompressedClassPointers,
3268 "UseCompressedOops and UseCompressedClassPointers must be set");
3269 Universe::set_narrow_klass_base((address)_space_list->current_virtual_space()->bottom());
3270 if (TraceMetavirtualspaceAllocation && Verbose) {
3271 gclog_or_tty->print_cr("Setting_narrow_klass_base to Address: " PTR_FORMAT,
3272 _space_list->current_virtual_space()->bottom());
3273 }
3275 Universe::set_narrow_klass_shift(0);
3276 #endif // _LP64
3277 #endif // INCLUDE_CDS
3278 } else {
3279 #if INCLUDE_CDS
3280 // If using shared space, open the file that contains the shared space
3281 // and map in the memory before initializing the rest of metaspace (so
3282 // the addresses don't conflict)
3283 address cds_address = NULL;
3284 if (UseSharedSpaces) {
3285 FileMapInfo* mapinfo = new FileMapInfo();
3287 // Open the shared archive file, read and validate the header. If
3288 // initialization fails, shared spaces [UseSharedSpaces] are
3289 // disabled and the file is closed.
3290 // Map in spaces now also
3291 if (mapinfo->initialize() && MetaspaceShared::map_shared_spaces(mapinfo)) {
3292 cds_total = FileMapInfo::shared_spaces_size();
3293 cds_address = (address)mapinfo->region_base(0);
3294 } else {
3295 assert(!mapinfo->is_open() && !UseSharedSpaces,
3296 "archive file not closed or shared spaces not disabled.");
3297 }
3298 }
3299 #endif // INCLUDE_CDS
3300 #ifdef _LP64
3301 // If UseCompressedClassPointers is set then allocate the metaspace area
3302 // above the heap and above the CDS area (if it exists).
3303 if (using_class_space()) {
3304 if (UseSharedSpaces) {
3305 #if INCLUDE_CDS
3306 char* cds_end = (char*)(cds_address + cds_total);
3307 cds_end = (char *)align_ptr_up(cds_end, _reserve_alignment);
3308 allocate_metaspace_compressed_klass_ptrs(cds_end, cds_address);
3309 #endif
3310 } else {
3311 char* base = (char*)align_ptr_up(Universe::heap()->reserved_region().end(), _reserve_alignment);
3312 allocate_metaspace_compressed_klass_ptrs(base, 0);
3313 }
3314 }
3315 #endif // _LP64
3317 // Initialize these before initializing the VirtualSpaceList
3318 _first_chunk_word_size = InitialBootClassLoaderMetaspaceSize / BytesPerWord;
3319 _first_chunk_word_size = align_word_size_up(_first_chunk_word_size);
3320 // Make the first class chunk bigger than a medium chunk so it's not put
3321 // on the medium chunk list. The next chunk will be small and progress
3322 // from there. This size calculated by -version.
3323 _first_class_chunk_word_size = MIN2((size_t)MediumChunk*6,
3324 (CompressedClassSpaceSize/BytesPerWord)*2);
3325 _first_class_chunk_word_size = align_word_size_up(_first_class_chunk_word_size);
3326 // Arbitrarily set the initial virtual space to a multiple
3327 // of the boot class loader size.
3328 size_t word_size = VIRTUALSPACEMULTIPLIER * _first_chunk_word_size;
3329 word_size = align_size_up(word_size, Metaspace::reserve_alignment_words());
3331 // Initialize the list of virtual spaces.
3332 _space_list = new VirtualSpaceList(word_size);
3333 _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk);
3335 if (!_space_list->initialization_succeeded()) {
3336 vm_exit_during_initialization("Unable to setup metadata virtual space list.", NULL);
3337 }
3338 }
3340 _tracer = new MetaspaceTracer();
3341 }
3343 void Metaspace::post_initialize() {
3344 MetaspaceGC::post_initialize();
3345 }
3347 void Metaspace::initialize_first_chunk(MetaspaceType type, MetadataType mdtype) {
3348 Metachunk* chunk = get_initialization_chunk(type, mdtype);
3349 if (chunk != NULL) {
3350 // Add to this manager's list of chunks in use and current_chunk().
3351 get_space_manager(mdtype)->add_chunk(chunk, true);
3352 }
3353 }
3355 Metachunk* Metaspace::get_initialization_chunk(MetaspaceType type, MetadataType mdtype) {
3356 size_t chunk_word_size = get_space_manager(mdtype)->get_initial_chunk_size(type);
3358 // Get a chunk from the chunk freelist
3359 Metachunk* chunk = get_chunk_manager(mdtype)->chunk_freelist_allocate(chunk_word_size);
3361 if (chunk == NULL) {
3362 chunk = get_space_list(mdtype)->get_new_chunk(chunk_word_size,
3363 get_space_manager(mdtype)->medium_chunk_bunch());
3364 }
3366 // For dumping shared archive, report error if allocation has failed.
3367 if (DumpSharedSpaces && chunk == NULL) {
3368 report_insufficient_metaspace(MetaspaceAux::committed_bytes() + chunk_word_size * BytesPerWord);
3369 }
3371 return chunk;
3372 }
3374 void Metaspace::verify_global_initialization() {
3375 assert(space_list() != NULL, "Metadata VirtualSpaceList has not been initialized");
3376 assert(chunk_manager_metadata() != NULL, "Metadata ChunkManager has not been initialized");
3378 if (using_class_space()) {
3379 assert(class_space_list() != NULL, "Class VirtualSpaceList has not been initialized");
3380 assert(chunk_manager_class() != NULL, "Class ChunkManager has not been initialized");
3381 }
3382 }
3384 void Metaspace::initialize(Mutex* lock, MetaspaceType type) {
3385 verify_global_initialization();
3387 // Allocate SpaceManager for metadata objects.
3388 _vsm = new SpaceManager(NonClassType, lock);
3390 if (using_class_space()) {
3391 // Allocate SpaceManager for classes.
3392 _class_vsm = new SpaceManager(ClassType, lock);
3393 } else {
3394 _class_vsm = NULL;
3395 }
3397 MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
3399 // Allocate chunk for metadata objects
3400 initialize_first_chunk(type, NonClassType);
3402 // Allocate chunk for class metadata objects
3403 if (using_class_space()) {
3404 initialize_first_chunk(type, ClassType);
3405 }
3407 _alloc_record_head = NULL;
3408 _alloc_record_tail = NULL;
3409 }
3411 size_t Metaspace::align_word_size_up(size_t word_size) {
3412 size_t byte_size = word_size * wordSize;
3413 return ReservedSpace::allocation_align_size_up(byte_size) / wordSize;
3414 }
3416 MetaWord* Metaspace::allocate(size_t word_size, MetadataType mdtype) {
3417 // DumpSharedSpaces doesn't use class metadata area (yet)
3418 // Also, don't use class_vsm() unless UseCompressedClassPointers is true.
3419 if (is_class_space_allocation(mdtype)) {
3420 return class_vsm()->allocate(word_size);
3421 } else {
3422 return vsm()->allocate(word_size);
3423 }
3424 }
3426 MetaWord* Metaspace::expand_and_allocate(size_t word_size, MetadataType mdtype) {
3427 size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size * BytesPerWord);
3428 assert(delta_bytes > 0, "Must be");
3430 size_t before = 0;
3431 size_t after = 0;
3432 bool can_retry = true;
3433 MetaWord* res;
3434 bool incremented;
3436 // Each thread increments the HWM at most once. Even if the thread fails to increment
3437 // the HWM, an allocation is still attempted. This is because another thread must then
3438 // have incremented the HWM and therefore the allocation might still succeed.
3439 do {
3440 incremented = MetaspaceGC::inc_capacity_until_GC(delta_bytes, &after, &before, &can_retry);
3441 res = allocate(word_size, mdtype);
3442 } while (!incremented && res == NULL && can_retry);
3444 if (incremented) {
3445 tracer()->report_gc_threshold(before, after,
3446 MetaspaceGCThresholdUpdater::ExpandAndAllocate);
3447 if (PrintGCDetails && Verbose) {
3448 gclog_or_tty->print_cr("Increase capacity to GC from " SIZE_FORMAT
3449 " to " SIZE_FORMAT, before, after);
3450 }
3451 }
3453 return res;
3454 }
3456 // Space allocated in the Metaspace. This may
3457 // be across several metadata virtual spaces.
3458 char* Metaspace::bottom() const {
3459 assert(DumpSharedSpaces, "only useful and valid for dumping shared spaces");
3460 return (char*)vsm()->current_chunk()->bottom();
3461 }
3463 size_t Metaspace::used_words_slow(MetadataType mdtype) const {
3464 if (mdtype == ClassType) {
3465 return using_class_space() ? class_vsm()->sum_used_in_chunks_in_use() : 0;
3466 } else {
3467 return vsm()->sum_used_in_chunks_in_use(); // includes overhead!
3468 }
3469 }
3471 size_t Metaspace::free_words_slow(MetadataType mdtype) const {
3472 if (mdtype == ClassType) {
3473 return using_class_space() ? class_vsm()->sum_free_in_chunks_in_use() : 0;
3474 } else {
3475 return vsm()->sum_free_in_chunks_in_use();
3476 }
3477 }
3479 // Space capacity in the Metaspace. It includes
3480 // space in the list of chunks from which allocations
3481 // have been made. Don't include space in the global freelist and
3482 // in the space available in the dictionary which
3483 // is already counted in some chunk.
3484 size_t Metaspace::capacity_words_slow(MetadataType mdtype) const {
3485 if (mdtype == ClassType) {
3486 return using_class_space() ? class_vsm()->sum_capacity_in_chunks_in_use() : 0;
3487 } else {
3488 return vsm()->sum_capacity_in_chunks_in_use();
3489 }
3490 }
3492 size_t Metaspace::used_bytes_slow(MetadataType mdtype) const {
3493 return used_words_slow(mdtype) * BytesPerWord;
3494 }
3496 size_t Metaspace::capacity_bytes_slow(MetadataType mdtype) const {
3497 return capacity_words_slow(mdtype) * BytesPerWord;
3498 }
3500 size_t Metaspace::allocated_blocks_bytes() const {
3501 return vsm()->allocated_blocks_bytes() +
3502 (using_class_space() ? class_vsm()->allocated_blocks_bytes() : 0);
3503 }
3505 size_t Metaspace::allocated_chunks_bytes() const {
3506 return vsm()->allocated_chunks_bytes() +
3507 (using_class_space() ? class_vsm()->allocated_chunks_bytes() : 0);
3508 }
3510 void Metaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) {
3511 if (SafepointSynchronize::is_at_safepoint()) {
3512 if (DumpSharedSpaces && PrintSharedSpaces) {
3513 record_deallocation(ptr, vsm()->get_raw_word_size(word_size));
3514 }
3516 assert(Thread::current()->is_VM_thread(), "should be the VM thread");
3517 // Don't take Heap_lock
3518 MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag);
3519 if (word_size < TreeChunk<Metablock, FreeList<Metablock> >::min_size()) {
3520 // Dark matter. Too small for dictionary.
3521 #ifdef ASSERT
3522 Copy::fill_to_words((HeapWord*)ptr, word_size, 0xf5f5f5f5);
3523 #endif
3524 return;
3525 }
3526 if (is_class && using_class_space()) {
3527 class_vsm()->deallocate(ptr, word_size);
3528 } else {
3529 vsm()->deallocate(ptr, word_size);
3530 }
3531 } else {
3532 MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag);
3534 if (word_size < TreeChunk<Metablock, FreeList<Metablock> >::min_size()) {
3535 // Dark matter. Too small for dictionary.
3536 #ifdef ASSERT
3537 Copy::fill_to_words((HeapWord*)ptr, word_size, 0xf5f5f5f5);
3538 #endif
3539 return;
3540 }
3541 if (is_class && using_class_space()) {
3542 class_vsm()->deallocate(ptr, word_size);
3543 } else {
3544 vsm()->deallocate(ptr, word_size);
3545 }
3546 }
3547 }
3550 MetaWord* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
3551 bool read_only, MetaspaceObj::Type type, TRAPS) {
3552 if (HAS_PENDING_EXCEPTION) {
3553 assert(false, "Should not allocate with exception pending");
3554 return NULL; // caller does a CHECK_NULL too
3555 }
3557 assert(loader_data != NULL, "Should never pass around a NULL loader_data. "
3558 "ClassLoaderData::the_null_class_loader_data() should have been used.");
3560 // Allocate in metaspaces without taking out a lock, because it deadlocks
3561 // with the SymbolTable_lock. Dumping is single threaded for now. We'll have
3562 // to revisit this for application class data sharing.
3563 if (DumpSharedSpaces) {
3564 assert(type > MetaspaceObj::UnknownType && type < MetaspaceObj::_number_of_types, "sanity");
3565 Metaspace* space = read_only ? loader_data->ro_metaspace() : loader_data->rw_metaspace();
3566 MetaWord* result = space->allocate(word_size, NonClassType);
3567 if (result == NULL) {
3568 report_out_of_shared_space(read_only ? SharedReadOnly : SharedReadWrite);
3569 }
3570 if (PrintSharedSpaces) {
3571 space->record_allocation(result, type, space->vsm()->get_raw_word_size(word_size));
3572 }
3574 // Zero initialize.
3575 Copy::fill_to_aligned_words((HeapWord*)result, word_size, 0);
3577 return result;
3578 }
3580 MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType;
3582 // Try to allocate metadata.
3583 MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype);
3585 if (result == NULL) {
3586 tracer()->report_metaspace_allocation_failure(loader_data, word_size, type, mdtype);
3588 // Allocation failed.
3589 if (is_init_completed()) {
3590 // Only start a GC if the bootstrapping has completed.
3592 // Try to clean out some memory and retry.
3593 result = Universe::heap()->collector_policy()->satisfy_failed_metadata_allocation(
3594 loader_data, word_size, mdtype);
3595 }
3596 }
3598 if (result == NULL) {
3599 report_metadata_oome(loader_data, word_size, type, mdtype, CHECK_NULL);
3600 }
3602 // Zero initialize.
3603 Copy::fill_to_aligned_words((HeapWord*)result, word_size, 0);
3605 return result;
3606 }
3608 size_t Metaspace::class_chunk_size(size_t word_size) {
3609 assert(using_class_space(), "Has to use class space");
3610 return class_vsm()->calc_chunk_size(word_size);
3611 }
3613 void Metaspace::report_metadata_oome(ClassLoaderData* loader_data, size_t word_size, MetaspaceObj::Type type, MetadataType mdtype, TRAPS) {
3614 tracer()->report_metadata_oom(loader_data, word_size, type, mdtype);
3616 // If result is still null, we are out of memory.
3617 if (Verbose && TraceMetadataChunkAllocation) {
3618 gclog_or_tty->print_cr("Metaspace allocation failed for size "
3619 SIZE_FORMAT, word_size);
3620 if (loader_data->metaspace_or_null() != NULL) {
3621 loader_data->dump(gclog_or_tty);
3622 }
3623 MetaspaceAux::dump(gclog_or_tty);
3624 }
3626 bool out_of_compressed_class_space = false;
3627 if (is_class_space_allocation(mdtype)) {
3628 Metaspace* metaspace = loader_data->metaspace_non_null();
3629 out_of_compressed_class_space =
3630 MetaspaceAux::committed_bytes(Metaspace::ClassType) +
3631 (metaspace->class_chunk_size(word_size) * BytesPerWord) >
3632 CompressedClassSpaceSize;
3633 }
3635 // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
3636 const char* space_string = out_of_compressed_class_space ?
3637 "Compressed class space" : "Metaspace";
3639 report_java_out_of_memory(space_string);
3641 if (JvmtiExport::should_post_resource_exhausted()) {
3642 JvmtiExport::post_resource_exhausted(
3643 JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR,
3644 space_string);
3645 }
3647 if (!is_init_completed()) {
3648 vm_exit_during_initialization("OutOfMemoryError", space_string);
3649 }
3651 if (out_of_compressed_class_space) {
3652 THROW_OOP(Universe::out_of_memory_error_class_metaspace());
3653 } else {
3654 THROW_OOP(Universe::out_of_memory_error_metaspace());
3655 }
3656 }
3658 const char* Metaspace::metadata_type_name(Metaspace::MetadataType mdtype) {
3659 switch (mdtype) {
3660 case Metaspace::ClassType: return "Class";
3661 case Metaspace::NonClassType: return "Metadata";
3662 default:
3663 assert(false, err_msg("Got bad mdtype: %d", (int) mdtype));
3664 return NULL;
3665 }
3666 }
3668 void Metaspace::record_allocation(void* ptr, MetaspaceObj::Type type, size_t word_size) {
3669 assert(DumpSharedSpaces, "sanity");
3671 int byte_size = (int)word_size * HeapWordSize;
3672 AllocRecord *rec = new AllocRecord((address)ptr, type, byte_size);
3674 if (_alloc_record_head == NULL) {
3675 _alloc_record_head = _alloc_record_tail = rec;
3676 } else if (_alloc_record_tail->_ptr + _alloc_record_tail->_byte_size == (address)ptr) {
3677 _alloc_record_tail->_next = rec;
3678 _alloc_record_tail = rec;
3679 } else {
3680 // slow linear search, but this doesn't happen that often, and only when dumping
3681 for (AllocRecord *old = _alloc_record_head; old; old = old->_next) {
3682 if (old->_ptr == ptr) {
3683 assert(old->_type == MetaspaceObj::DeallocatedType, "sanity");
3684 int remain_bytes = old->_byte_size - byte_size;
3685 assert(remain_bytes >= 0, "sanity");
3686 old->_type = type;
3688 if (remain_bytes == 0) {
3689 delete(rec);
3690 } else {
3691 address remain_ptr = address(ptr) + byte_size;
3692 rec->_ptr = remain_ptr;
3693 rec->_byte_size = remain_bytes;
3694 rec->_type = MetaspaceObj::DeallocatedType;
3695 rec->_next = old->_next;
3696 old->_byte_size = byte_size;
3697 old->_next = rec;
3698 }
3699 return;
3700 }
3701 }
3702 assert(0, "reallocating a freed pointer that was not recorded");
3703 }
3704 }
3706 void Metaspace::record_deallocation(void* ptr, size_t word_size) {
3707 assert(DumpSharedSpaces, "sanity");
3709 for (AllocRecord *rec = _alloc_record_head; rec; rec = rec->_next) {
3710 if (rec->_ptr == ptr) {
3711 assert(rec->_byte_size == (int)word_size * HeapWordSize, "sanity");
3712 rec->_type = MetaspaceObj::DeallocatedType;
3713 return;
3714 }
3715 }
3717 assert(0, "deallocating a pointer that was not recorded");
3718 }
3720 void Metaspace::iterate(Metaspace::AllocRecordClosure *closure) {
3721 assert(DumpSharedSpaces, "unimplemented for !DumpSharedSpaces");
3723 address last_addr = (address)bottom();
3725 for (AllocRecord *rec = _alloc_record_head; rec; rec = rec->_next) {
3726 address ptr = rec->_ptr;
3727 if (last_addr < ptr) {
3728 closure->doit(last_addr, MetaspaceObj::UnknownType, ptr - last_addr);
3729 }
3730 closure->doit(ptr, rec->_type, rec->_byte_size);
3731 last_addr = ptr + rec->_byte_size;
3732 }
3734 address top = ((address)bottom()) + used_bytes_slow(Metaspace::NonClassType);
3735 if (last_addr < top) {
3736 closure->doit(last_addr, MetaspaceObj::UnknownType, top - last_addr);
3737 }
3738 }
3740 void Metaspace::purge(MetadataType mdtype) {
3741 get_space_list(mdtype)->purge(get_chunk_manager(mdtype));
3742 }
3744 void Metaspace::purge() {
3745 MutexLockerEx cl(SpaceManager::expand_lock(),
3746 Mutex::_no_safepoint_check_flag);
3747 purge(NonClassType);
3748 if (using_class_space()) {
3749 purge(ClassType);
3750 }
3751 }
3753 void Metaspace::print_on(outputStream* out) const {
3754 // Print both class virtual space counts and metaspace.
3755 if (Verbose) {
3756 vsm()->print_on(out);
3757 if (using_class_space()) {
3758 class_vsm()->print_on(out);
3759 }
3760 }
3761 }
3763 bool Metaspace::contains(const void* ptr) {
3764 if (UseSharedSpaces && MetaspaceShared::is_in_shared_space(ptr)) {
3765 return true;
3766 }
3768 if (using_class_space() && get_space_list(ClassType)->contains(ptr)) {
3769 return true;
3770 }
3772 return get_space_list(NonClassType)->contains(ptr);
3773 }
3775 void Metaspace::verify() {
3776 vsm()->verify();
3777 if (using_class_space()) {
3778 class_vsm()->verify();
3779 }
3780 }
3782 void Metaspace::dump(outputStream* const out) const {
3783 out->print_cr("\nVirtual space manager: " INTPTR_FORMAT, vsm());
3784 vsm()->dump(out);
3785 if (using_class_space()) {
3786 out->print_cr("\nClass space manager: " INTPTR_FORMAT, class_vsm());
3787 class_vsm()->dump(out);
3788 }
3789 }
3791 /////////////// Unit tests ///////////////
3793 #ifndef PRODUCT
3795 class TestMetaspaceAuxTest : AllStatic {
3796 public:
3797 static void test_reserved() {
3798 size_t reserved = MetaspaceAux::reserved_bytes();
3800 assert(reserved > 0, "assert");
3802 size_t committed = MetaspaceAux::committed_bytes();
3803 assert(committed <= reserved, "assert");
3805 size_t reserved_metadata = MetaspaceAux::reserved_bytes(Metaspace::NonClassType);
3806 assert(reserved_metadata > 0, "assert");
3807 assert(reserved_metadata <= reserved, "assert");
3809 if (UseCompressedClassPointers) {
3810 size_t reserved_class = MetaspaceAux::reserved_bytes(Metaspace::ClassType);
3811 assert(reserved_class > 0, "assert");
3812 assert(reserved_class < reserved, "assert");
3813 }
3814 }
3816 static void test_committed() {
3817 size_t committed = MetaspaceAux::committed_bytes();
3819 assert(committed > 0, "assert");
3821 size_t reserved = MetaspaceAux::reserved_bytes();
3822 assert(committed <= reserved, "assert");
3824 size_t committed_metadata = MetaspaceAux::committed_bytes(Metaspace::NonClassType);
3825 assert(committed_metadata > 0, "assert");
3826 assert(committed_metadata <= committed, "assert");
3828 if (UseCompressedClassPointers) {
3829 size_t committed_class = MetaspaceAux::committed_bytes(Metaspace::ClassType);
3830 assert(committed_class > 0, "assert");
3831 assert(committed_class < committed, "assert");
3832 }
3833 }
3835 static void test_virtual_space_list_large_chunk() {
3836 VirtualSpaceList* vs_list = new VirtualSpaceList(os::vm_allocation_granularity());
3837 MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
3838 // A size larger than VirtualSpaceSize (256k) and add one page to make it _not_ be
3839 // vm_allocation_granularity aligned on Windows.
3840 size_t large_size = (size_t)(2*256*K + (os::vm_page_size()/BytesPerWord));
3841 large_size += (os::vm_page_size()/BytesPerWord);
3842 vs_list->get_new_chunk(large_size, 0);
3843 }
3845 static void test() {
3846 test_reserved();
3847 test_committed();
3848 test_virtual_space_list_large_chunk();
3849 }
3850 };
3852 void TestMetaspaceAux_test() {
3853 TestMetaspaceAuxTest::test();
3854 }
3856 class TestVirtualSpaceNodeTest {
3857 static void chunk_up(size_t words_left, size_t& num_medium_chunks,
3858 size_t& num_small_chunks,
3859 size_t& num_specialized_chunks) {
3860 num_medium_chunks = words_left / MediumChunk;
3861 words_left = words_left % MediumChunk;
3863 num_small_chunks = words_left / SmallChunk;
3864 words_left = words_left % SmallChunk;
3865 // how many specialized chunks can we get?
3866 num_specialized_chunks = words_left / SpecializedChunk;
3867 assert(words_left % SpecializedChunk == 0, "should be nothing left");
3868 }
3870 public:
3871 static void test() {
3872 MutexLockerEx ml(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
3873 const size_t vsn_test_size_words = MediumChunk * 4;
3874 const size_t vsn_test_size_bytes = vsn_test_size_words * BytesPerWord;
3876 // The chunk sizes must be multiples of eachother, or this will fail
3877 STATIC_ASSERT(MediumChunk % SmallChunk == 0);
3878 STATIC_ASSERT(SmallChunk % SpecializedChunk == 0);
3880 { // No committed memory in VSN
3881 ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk);
3882 VirtualSpaceNode vsn(vsn_test_size_bytes);
3883 vsn.initialize();
3884 vsn.retire(&cm);
3885 assert(cm.sum_free_chunks_count() == 0, "did not commit any memory in the VSN");
3886 }
3888 { // All of VSN is committed, half is used by chunks
3889 ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk);
3890 VirtualSpaceNode vsn(vsn_test_size_bytes);
3891 vsn.initialize();
3892 vsn.expand_by(vsn_test_size_words, vsn_test_size_words);
3893 vsn.get_chunk_vs(MediumChunk);
3894 vsn.get_chunk_vs(MediumChunk);
3895 vsn.retire(&cm);
3896 assert(cm.sum_free_chunks_count() == 2, "should have been memory left for 2 medium chunks");
3897 assert(cm.sum_free_chunks() == 2*MediumChunk, "sizes should add up");
3898 }
3900 { // 4 pages of VSN is committed, some is used by chunks
3901 ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk);
3902 VirtualSpaceNode vsn(vsn_test_size_bytes);
3903 const size_t page_chunks = 4 * (size_t)os::vm_page_size() / BytesPerWord;
3904 assert(page_chunks < MediumChunk, "Test expects medium chunks to be at least 4*page_size");
3905 vsn.initialize();
3906 vsn.expand_by(page_chunks, page_chunks);
3907 vsn.get_chunk_vs(SmallChunk);
3908 vsn.get_chunk_vs(SpecializedChunk);
3909 vsn.retire(&cm);
3911 // committed - used = words left to retire
3912 const size_t words_left = page_chunks - SmallChunk - SpecializedChunk;
3914 size_t num_medium_chunks, num_small_chunks, num_spec_chunks;
3915 chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks);
3917 assert(num_medium_chunks == 0, "should not get any medium chunks");
3918 assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks");
3919 assert(cm.sum_free_chunks() == words_left, "sizes should add up");
3920 }
3922 { // Half of VSN is committed, a humongous chunk is used
3923 ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk);
3924 VirtualSpaceNode vsn(vsn_test_size_bytes);
3925 vsn.initialize();
3926 vsn.expand_by(MediumChunk * 2, MediumChunk * 2);
3927 vsn.get_chunk_vs(MediumChunk + SpecializedChunk); // Humongous chunks will be aligned up to MediumChunk + SpecializedChunk
3928 vsn.retire(&cm);
3930 const size_t words_left = MediumChunk * 2 - (MediumChunk + SpecializedChunk);
3931 size_t num_medium_chunks, num_small_chunks, num_spec_chunks;
3932 chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks);
3934 assert(num_medium_chunks == 0, "should not get any medium chunks");
3935 assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks");
3936 assert(cm.sum_free_chunks() == words_left, "sizes should add up");
3937 }
3939 }
3941 #define assert_is_available_positive(word_size) \
3942 assert(vsn.is_available(word_size), \
3943 err_msg(#word_size ": " PTR_FORMAT " bytes were not available in " \
3944 "VirtualSpaceNode [" PTR_FORMAT ", " PTR_FORMAT ")", \
3945 (uintptr_t)(word_size * BytesPerWord), vsn.bottom(), vsn.end()));
3947 #define assert_is_available_negative(word_size) \
3948 assert(!vsn.is_available(word_size), \
3949 err_msg(#word_size ": " PTR_FORMAT " bytes should not be available in " \
3950 "VirtualSpaceNode [" PTR_FORMAT ", " PTR_FORMAT ")", \
3951 (uintptr_t)(word_size * BytesPerWord), vsn.bottom(), vsn.end()));
3953 static void test_is_available_positive() {
3954 // Reserve some memory.
3955 VirtualSpaceNode vsn(os::vm_allocation_granularity());
3956 assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
3958 // Commit some memory.
3959 size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
3960 bool expanded = vsn.expand_by(commit_word_size, commit_word_size);
3961 assert(expanded, "Failed to commit");
3963 // Check that is_available accepts the committed size.
3964 assert_is_available_positive(commit_word_size);
3966 // Check that is_available accepts half the committed size.
3967 size_t expand_word_size = commit_word_size / 2;
3968 assert_is_available_positive(expand_word_size);
3969 }
3971 static void test_is_available_negative() {
3972 // Reserve some memory.
3973 VirtualSpaceNode vsn(os::vm_allocation_granularity());
3974 assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
3976 // Commit some memory.
3977 size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
3978 bool expanded = vsn.expand_by(commit_word_size, commit_word_size);
3979 assert(expanded, "Failed to commit");
3981 // Check that is_available doesn't accept a too large size.
3982 size_t two_times_commit_word_size = commit_word_size * 2;
3983 assert_is_available_negative(two_times_commit_word_size);
3984 }
3986 static void test_is_available_overflow() {
3987 // Reserve some memory.
3988 VirtualSpaceNode vsn(os::vm_allocation_granularity());
3989 assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
3991 // Commit some memory.
3992 size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
3993 bool expanded = vsn.expand_by(commit_word_size, commit_word_size);
3994 assert(expanded, "Failed to commit");
3996 // Calculate a size that will overflow the virtual space size.
3997 void* virtual_space_max = (void*)(uintptr_t)-1;
3998 size_t bottom_to_max = pointer_delta(virtual_space_max, vsn.bottom(), 1);
3999 size_t overflow_size = bottom_to_max + BytesPerWord;
4000 size_t overflow_word_size = overflow_size / BytesPerWord;
4002 // Check that is_available can handle the overflow.
4003 assert_is_available_negative(overflow_word_size);
4004 }
4006 static void test_is_available() {
4007 TestVirtualSpaceNodeTest::test_is_available_positive();
4008 TestVirtualSpaceNodeTest::test_is_available_negative();
4009 TestVirtualSpaceNodeTest::test_is_available_overflow();
4010 }
4011 };
4013 void TestVirtualSpaceNode_test() {
4014 TestVirtualSpaceNodeTest::test();
4015 TestVirtualSpaceNodeTest::test_is_available();
4016 }
4018 // The following test is placed here instead of a gtest / unittest file
4019 // because the ChunkManager class is only available in this file.
4020 class SpaceManagerTest : AllStatic {
4021 friend void SpaceManager_test_adjust_initial_chunk_size();
4023 static void test_adjust_initial_chunk_size(bool is_class) {
4024 const size_t smallest = SpaceManager::smallest_chunk_size(is_class);
4025 const size_t normal = SpaceManager::small_chunk_size(is_class);
4026 const size_t medium = SpaceManager::medium_chunk_size(is_class);
4028 #define test_adjust_initial_chunk_size(value, expected, is_class_value) \
4029 do { \
4030 size_t v = value; \
4031 size_t e = expected; \
4032 assert(SpaceManager::adjust_initial_chunk_size(v, (is_class_value)) == e, \
4033 err_msg("Expected: " SIZE_FORMAT " got: " SIZE_FORMAT, e, v)); \
4034 } while (0)
4036 // Smallest (specialized)
4037 test_adjust_initial_chunk_size(1, smallest, is_class);
4038 test_adjust_initial_chunk_size(smallest - 1, smallest, is_class);
4039 test_adjust_initial_chunk_size(smallest, smallest, is_class);
4041 // Small
4042 test_adjust_initial_chunk_size(smallest + 1, normal, is_class);
4043 test_adjust_initial_chunk_size(normal - 1, normal, is_class);
4044 test_adjust_initial_chunk_size(normal, normal, is_class);
4046 // Medium
4047 test_adjust_initial_chunk_size(normal + 1, medium, is_class);
4048 test_adjust_initial_chunk_size(medium - 1, medium, is_class);
4049 test_adjust_initial_chunk_size(medium, medium, is_class);
4051 // Humongous
4052 test_adjust_initial_chunk_size(medium + 1, medium + 1, is_class);
4054 #undef test_adjust_initial_chunk_size
4055 }
4057 static void test_adjust_initial_chunk_size() {
4058 test_adjust_initial_chunk_size(false);
4059 test_adjust_initial_chunk_size(true);
4060 }
4061 };
4063 void SpaceManager_test_adjust_initial_chunk_size() {
4064 SpaceManagerTest::test_adjust_initial_chunk_size();
4065 }
4067 // The following test is placed here instead of a gtest / unittest file
4068 // because the ChunkManager class is only available in this file.
4069 void ChunkManager_test_list_index() {
4070 ChunkManager manager(ClassSpecializedChunk, ClassSmallChunk, ClassMediumChunk);
4072 // Test previous bug where a query for a humongous class metachunk,
4073 // incorrectly matched the non-class medium metachunk size.
4074 {
4075 assert(MediumChunk > ClassMediumChunk, "Precondition for test");
4077 ChunkIndex index = manager.list_index(MediumChunk);
4079 assert(index == HumongousIndex,
4080 err_msg("Requested size is larger than ClassMediumChunk,"
4081 " so should return HumongousIndex. Got index: %d", (int)index));
4082 }
4084 // Check the specified sizes as well.
4085 {
4086 ChunkIndex index = manager.list_index(ClassSpecializedChunk);
4087 assert(index == SpecializedIndex, err_msg("Wrong index returned. Got index: %d", (int)index));
4088 }
4089 {
4090 ChunkIndex index = manager.list_index(ClassSmallChunk);
4091 assert(index == SmallIndex, err_msg("Wrong index returned. Got index: %d", (int)index));
4092 }
4093 {
4094 ChunkIndex index = manager.list_index(ClassMediumChunk);
4095 assert(index == MediumIndex, err_msg("Wrong index returned. Got index: %d", (int)index));
4096 }
4097 {
4098 ChunkIndex index = manager.list_index(ClassMediumChunk + 1);
4099 assert(index == HumongousIndex, err_msg("Wrong index returned. Got index: %d", (int)index));
4100 }
4101 }
4103 #endif