Tue, 02 Jun 2015 10:09:08 -0400
8081693: metaspace/shrink_grow/CompressedClassSpaceSize fails with OOM: Compressed class space
Summary: metaspace/shrink_grow/CompressedClassSpaceSize fails with OOM: Compressed class space
Reviewed-by: jmasa, kbarrett
1 /*
2 * Copyright (c) 2011, 2014, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24 #include "precompiled.hpp"
25 #include "gc_interface/collectedHeap.hpp"
26 #include "memory/allocation.hpp"
27 #include "memory/binaryTreeDictionary.hpp"
28 #include "memory/freeList.hpp"
29 #include "memory/collectorPolicy.hpp"
30 #include "memory/filemap.hpp"
31 #include "memory/freeList.hpp"
32 #include "memory/gcLocker.hpp"
33 #include "memory/metachunk.hpp"
34 #include "memory/metaspace.hpp"
35 #include "memory/metaspaceGCThresholdUpdater.hpp"
36 #include "memory/metaspaceShared.hpp"
37 #include "memory/metaspaceTracer.hpp"
38 #include "memory/resourceArea.hpp"
39 #include "memory/universe.hpp"
40 #include "runtime/atomic.inline.hpp"
41 #include "runtime/globals.hpp"
42 #include "runtime/init.hpp"
43 #include "runtime/java.hpp"
44 #include "runtime/mutex.hpp"
45 #include "runtime/orderAccess.inline.hpp"
46 #include "services/memTracker.hpp"
47 #include "services/memoryService.hpp"
48 #include "utilities/copy.hpp"
49 #include "utilities/debug.hpp"
51 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
53 typedef BinaryTreeDictionary<Metablock, FreeList<Metablock> > BlockTreeDictionary;
54 typedef BinaryTreeDictionary<Metachunk, FreeList<Metachunk> > ChunkTreeDictionary;
56 // Set this constant to enable slow integrity checking of the free chunk lists
57 const bool metaspace_slow_verify = false;
59 size_t const allocation_from_dictionary_limit = 4 * K;
61 MetaWord* last_allocated = 0;
63 size_t Metaspace::_compressed_class_space_size;
64 const MetaspaceTracer* Metaspace::_tracer = NULL;
66 // Used in declarations in SpaceManager and ChunkManager
67 enum ChunkIndex {
68 ZeroIndex = 0,
69 SpecializedIndex = ZeroIndex,
70 SmallIndex = SpecializedIndex + 1,
71 MediumIndex = SmallIndex + 1,
72 HumongousIndex = MediumIndex + 1,
73 NumberOfFreeLists = 3,
74 NumberOfInUseLists = 4
75 };
77 enum ChunkSizes { // in words.
78 ClassSpecializedChunk = 128,
79 SpecializedChunk = 128,
80 ClassSmallChunk = 256,
81 SmallChunk = 512,
82 ClassMediumChunk = 4 * K,
83 MediumChunk = 8 * K
84 };
86 static ChunkIndex next_chunk_index(ChunkIndex i) {
87 assert(i < NumberOfInUseLists, "Out of bound");
88 return (ChunkIndex) (i+1);
89 }
91 volatile intptr_t MetaspaceGC::_capacity_until_GC = 0;
92 uint MetaspaceGC::_shrink_factor = 0;
93 bool MetaspaceGC::_should_concurrent_collect = false;
95 typedef class FreeList<Metachunk> ChunkList;
97 // Manages the global free lists of chunks.
98 class ChunkManager : public CHeapObj<mtInternal> {
99 friend class TestVirtualSpaceNodeTest;
101 // Free list of chunks of different sizes.
102 // SpecializedChunk
103 // SmallChunk
104 // MediumChunk
105 // HumongousChunk
106 ChunkList _free_chunks[NumberOfFreeLists];
108 // HumongousChunk
109 ChunkTreeDictionary _humongous_dictionary;
111 // ChunkManager in all lists of this type
112 size_t _free_chunks_total;
113 size_t _free_chunks_count;
115 void dec_free_chunks_total(size_t v) {
116 assert(_free_chunks_count > 0 &&
117 _free_chunks_total > 0,
118 "About to go negative");
119 Atomic::add_ptr(-1, &_free_chunks_count);
120 jlong minus_v = (jlong) - (jlong) v;
121 Atomic::add_ptr(minus_v, &_free_chunks_total);
122 }
124 // Debug support
126 size_t sum_free_chunks();
127 size_t sum_free_chunks_count();
129 void locked_verify_free_chunks_total();
130 void slow_locked_verify_free_chunks_total() {
131 if (metaspace_slow_verify) {
132 locked_verify_free_chunks_total();
133 }
134 }
135 void locked_verify_free_chunks_count();
136 void slow_locked_verify_free_chunks_count() {
137 if (metaspace_slow_verify) {
138 locked_verify_free_chunks_count();
139 }
140 }
141 void verify_free_chunks_count();
143 public:
145 ChunkManager(size_t specialized_size, size_t small_size, size_t medium_size)
146 : _free_chunks_total(0), _free_chunks_count(0) {
147 _free_chunks[SpecializedIndex].set_size(specialized_size);
148 _free_chunks[SmallIndex].set_size(small_size);
149 _free_chunks[MediumIndex].set_size(medium_size);
150 }
152 // add or delete (return) a chunk to the global freelist.
153 Metachunk* chunk_freelist_allocate(size_t word_size);
155 // Map a size to a list index assuming that there are lists
156 // for special, small, medium, and humongous chunks.
157 static ChunkIndex list_index(size_t size);
159 // Remove the chunk from its freelist. It is
160 // expected to be on one of the _free_chunks[] lists.
161 void remove_chunk(Metachunk* chunk);
163 // Add the simple linked list of chunks to the freelist of chunks
164 // of type index.
165 void return_chunks(ChunkIndex index, Metachunk* chunks);
167 // Total of the space in the free chunks list
168 size_t free_chunks_total_words();
169 size_t free_chunks_total_bytes();
171 // Number of chunks in the free chunks list
172 size_t free_chunks_count();
174 void inc_free_chunks_total(size_t v, size_t count = 1) {
175 Atomic::add_ptr(count, &_free_chunks_count);
176 Atomic::add_ptr(v, &_free_chunks_total);
177 }
178 ChunkTreeDictionary* humongous_dictionary() {
179 return &_humongous_dictionary;
180 }
182 ChunkList* free_chunks(ChunkIndex index);
184 // Returns the list for the given chunk word size.
185 ChunkList* find_free_chunks_list(size_t word_size);
187 // Remove from a list by size. Selects list based on size of chunk.
188 Metachunk* free_chunks_get(size_t chunk_word_size);
190 #define index_bounds_check(index) \
191 assert(index == SpecializedIndex || \
192 index == SmallIndex || \
193 index == MediumIndex || \
194 index == HumongousIndex, err_msg("Bad index: %d", (int) index))
196 size_t num_free_chunks(ChunkIndex index) const {
197 index_bounds_check(index);
199 if (index == HumongousIndex) {
200 return _humongous_dictionary.total_free_blocks();
201 }
203 ssize_t count = _free_chunks[index].count();
204 return count == -1 ? 0 : (size_t) count;
205 }
207 size_t size_free_chunks_in_bytes(ChunkIndex index) const {
208 index_bounds_check(index);
210 size_t word_size = 0;
211 if (index == HumongousIndex) {
212 word_size = _humongous_dictionary.total_size();
213 } else {
214 const size_t size_per_chunk_in_words = _free_chunks[index].size();
215 word_size = size_per_chunk_in_words * num_free_chunks(index);
216 }
218 return word_size * BytesPerWord;
219 }
221 MetaspaceChunkFreeListSummary chunk_free_list_summary() const {
222 return MetaspaceChunkFreeListSummary(num_free_chunks(SpecializedIndex),
223 num_free_chunks(SmallIndex),
224 num_free_chunks(MediumIndex),
225 num_free_chunks(HumongousIndex),
226 size_free_chunks_in_bytes(SpecializedIndex),
227 size_free_chunks_in_bytes(SmallIndex),
228 size_free_chunks_in_bytes(MediumIndex),
229 size_free_chunks_in_bytes(HumongousIndex));
230 }
232 // Debug support
233 void verify();
234 void slow_verify() {
235 if (metaspace_slow_verify) {
236 verify();
237 }
238 }
239 void locked_verify();
240 void slow_locked_verify() {
241 if (metaspace_slow_verify) {
242 locked_verify();
243 }
244 }
245 void verify_free_chunks_total();
247 void locked_print_free_chunks(outputStream* st);
248 void locked_print_sum_free_chunks(outputStream* st);
250 void print_on(outputStream* st) const;
251 };
253 // Used to manage the free list of Metablocks (a block corresponds
254 // to the allocation of a quantum of metadata).
255 class BlockFreelist VALUE_OBJ_CLASS_SPEC {
256 BlockTreeDictionary* _dictionary;
258 // Only allocate and split from freelist if the size of the allocation
259 // is at least 1/4th the size of the available block.
260 const static int WasteMultiplier = 4;
262 // Accessors
263 BlockTreeDictionary* dictionary() const { return _dictionary; }
265 public:
266 BlockFreelist();
267 ~BlockFreelist();
269 // Get and return a block to the free list
270 MetaWord* get_block(size_t word_size);
271 void return_block(MetaWord* p, size_t word_size);
273 size_t total_size() {
274 if (dictionary() == NULL) {
275 return 0;
276 } else {
277 return dictionary()->total_size();
278 }
279 }
281 void print_on(outputStream* st) const;
282 };
284 // A VirtualSpaceList node.
285 class VirtualSpaceNode : public CHeapObj<mtClass> {
286 friend class VirtualSpaceList;
288 // Link to next VirtualSpaceNode
289 VirtualSpaceNode* _next;
291 // total in the VirtualSpace
292 MemRegion _reserved;
293 ReservedSpace _rs;
294 VirtualSpace _virtual_space;
295 MetaWord* _top;
296 // count of chunks contained in this VirtualSpace
297 uintx _container_count;
299 // Convenience functions to access the _virtual_space
300 char* low() const { return virtual_space()->low(); }
301 char* high() const { return virtual_space()->high(); }
303 // The first Metachunk will be allocated at the bottom of the
304 // VirtualSpace
305 Metachunk* first_chunk() { return (Metachunk*) bottom(); }
307 // Committed but unused space in the virtual space
308 size_t free_words_in_vs() const;
309 public:
311 VirtualSpaceNode(size_t byte_size);
312 VirtualSpaceNode(ReservedSpace rs) : _top(NULL), _next(NULL), _rs(rs), _container_count(0) {}
313 ~VirtualSpaceNode();
315 // Convenience functions for logical bottom and end
316 MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); }
317 MetaWord* end() const { return (MetaWord*) _virtual_space.high(); }
319 bool contains(const void* ptr) { return ptr >= low() && ptr < high(); }
321 size_t reserved_words() const { return _virtual_space.reserved_size() / BytesPerWord; }
322 size_t committed_words() const { return _virtual_space.actual_committed_size() / BytesPerWord; }
324 bool is_pre_committed() const { return _virtual_space.special(); }
326 // address of next available space in _virtual_space;
327 // Accessors
328 VirtualSpaceNode* next() { return _next; }
329 void set_next(VirtualSpaceNode* v) { _next = v; }
331 void set_reserved(MemRegion const v) { _reserved = v; }
332 void set_top(MetaWord* v) { _top = v; }
334 // Accessors
335 MemRegion* reserved() { return &_reserved; }
336 VirtualSpace* virtual_space() const { return (VirtualSpace*) &_virtual_space; }
338 // Returns true if "word_size" is available in the VirtualSpace
339 bool is_available(size_t word_size) { return word_size <= pointer_delta(end(), _top, sizeof(MetaWord)); }
341 MetaWord* top() const { return _top; }
342 void inc_top(size_t word_size) { _top += word_size; }
344 uintx container_count() { return _container_count; }
345 void inc_container_count();
346 void dec_container_count();
347 #ifdef ASSERT
348 uint container_count_slow();
349 void verify_container_count();
350 #endif
352 // used and capacity in this single entry in the list
353 size_t used_words_in_vs() const;
354 size_t capacity_words_in_vs() const;
356 bool initialize();
358 // get space from the virtual space
359 Metachunk* take_from_committed(size_t chunk_word_size);
361 // Allocate a chunk from the virtual space and return it.
362 Metachunk* get_chunk_vs(size_t chunk_word_size);
364 // Expands/shrinks the committed space in a virtual space. Delegates
365 // to Virtualspace
366 bool expand_by(size_t min_words, size_t preferred_words);
368 // In preparation for deleting this node, remove all the chunks
369 // in the node from any freelist.
370 void purge(ChunkManager* chunk_manager);
372 // If an allocation doesn't fit in the current node a new node is created.
373 // Allocate chunks out of the remaining committed space in this node
374 // to avoid wasting that memory.
375 // This always adds up because all the chunk sizes are multiples of
376 // the smallest chunk size.
377 void retire(ChunkManager* chunk_manager);
379 #ifdef ASSERT
380 // Debug support
381 void mangle();
382 #endif
384 void print_on(outputStream* st) const;
385 };
387 #define assert_is_ptr_aligned(ptr, alignment) \
388 assert(is_ptr_aligned(ptr, alignment), \
389 err_msg(PTR_FORMAT " is not aligned to " \
390 SIZE_FORMAT, ptr, alignment))
392 #define assert_is_size_aligned(size, alignment) \
393 assert(is_size_aligned(size, alignment), \
394 err_msg(SIZE_FORMAT " is not aligned to " \
395 SIZE_FORMAT, size, alignment))
398 // Decide if large pages should be committed when the memory is reserved.
399 static bool should_commit_large_pages_when_reserving(size_t bytes) {
400 if (UseLargePages && UseLargePagesInMetaspace && !os::can_commit_large_page_memory()) {
401 size_t words = bytes / BytesPerWord;
402 bool is_class = false; // We never reserve large pages for the class space.
403 if (MetaspaceGC::can_expand(words, is_class) &&
404 MetaspaceGC::allowed_expansion() >= words) {
405 return true;
406 }
407 }
409 return false;
410 }
412 // byte_size is the size of the associated virtualspace.
413 VirtualSpaceNode::VirtualSpaceNode(size_t bytes) : _top(NULL), _next(NULL), _rs(), _container_count(0) {
414 assert_is_size_aligned(bytes, Metaspace::reserve_alignment());
416 #if INCLUDE_CDS
417 // This allocates memory with mmap. For DumpSharedspaces, try to reserve
418 // configurable address, generally at the top of the Java heap so other
419 // memory addresses don't conflict.
420 if (DumpSharedSpaces) {
421 bool large_pages = false; // No large pages when dumping the CDS archive.
422 char* shared_base = (char*)align_ptr_up((char*)SharedBaseAddress, Metaspace::reserve_alignment());
424 _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages, shared_base, 0);
425 if (_rs.is_reserved()) {
426 assert(shared_base == 0 || _rs.base() == shared_base, "should match");
427 } else {
428 // Get a mmap region anywhere if the SharedBaseAddress fails.
429 _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
430 }
431 MetaspaceShared::set_shared_rs(&_rs);
432 } else
433 #endif
434 {
435 bool large_pages = should_commit_large_pages_when_reserving(bytes);
437 _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
438 }
440 if (_rs.is_reserved()) {
441 assert(_rs.base() != NULL, "Catch if we get a NULL address");
442 assert(_rs.size() != 0, "Catch if we get a 0 size");
443 assert_is_ptr_aligned(_rs.base(), Metaspace::reserve_alignment());
444 assert_is_size_aligned(_rs.size(), Metaspace::reserve_alignment());
446 MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass);
447 }
448 }
450 void VirtualSpaceNode::purge(ChunkManager* chunk_manager) {
451 Metachunk* chunk = first_chunk();
452 Metachunk* invalid_chunk = (Metachunk*) top();
453 while (chunk < invalid_chunk ) {
454 assert(chunk->is_tagged_free(), "Should be tagged free");
455 MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
456 chunk_manager->remove_chunk(chunk);
457 assert(chunk->next() == NULL &&
458 chunk->prev() == NULL,
459 "Was not removed from its list");
460 chunk = (Metachunk*) next;
461 }
462 }
464 #ifdef ASSERT
465 uint VirtualSpaceNode::container_count_slow() {
466 uint count = 0;
467 Metachunk* chunk = first_chunk();
468 Metachunk* invalid_chunk = (Metachunk*) top();
469 while (chunk < invalid_chunk ) {
470 MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
471 // Don't count the chunks on the free lists. Those are
472 // still part of the VirtualSpaceNode but not currently
473 // counted.
474 if (!chunk->is_tagged_free()) {
475 count++;
476 }
477 chunk = (Metachunk*) next;
478 }
479 return count;
480 }
481 #endif
483 // List of VirtualSpaces for metadata allocation.
484 class VirtualSpaceList : public CHeapObj<mtClass> {
485 friend class VirtualSpaceNode;
487 enum VirtualSpaceSizes {
488 VirtualSpaceSize = 256 * K
489 };
491 // Head of the list
492 VirtualSpaceNode* _virtual_space_list;
493 // virtual space currently being used for allocations
494 VirtualSpaceNode* _current_virtual_space;
496 // Is this VirtualSpaceList used for the compressed class space
497 bool _is_class;
499 // Sum of reserved and committed memory in the virtual spaces
500 size_t _reserved_words;
501 size_t _committed_words;
503 // Number of virtual spaces
504 size_t _virtual_space_count;
506 ~VirtualSpaceList();
508 VirtualSpaceNode* virtual_space_list() const { return _virtual_space_list; }
510 void set_virtual_space_list(VirtualSpaceNode* v) {
511 _virtual_space_list = v;
512 }
513 void set_current_virtual_space(VirtualSpaceNode* v) {
514 _current_virtual_space = v;
515 }
517 void link_vs(VirtualSpaceNode* new_entry);
519 // Get another virtual space and add it to the list. This
520 // is typically prompted by a failed attempt to allocate a chunk
521 // and is typically followed by the allocation of a chunk.
522 bool create_new_virtual_space(size_t vs_word_size);
524 // Chunk up the unused committed space in the current
525 // virtual space and add the chunks to the free list.
526 void retire_current_virtual_space();
528 public:
529 VirtualSpaceList(size_t word_size);
530 VirtualSpaceList(ReservedSpace rs);
532 size_t free_bytes();
534 Metachunk* get_new_chunk(size_t word_size,
535 size_t grow_chunks_by_words,
536 size_t medium_chunk_bunch);
538 bool expand_node_by(VirtualSpaceNode* node,
539 size_t min_words,
540 size_t preferred_words);
542 bool expand_by(size_t min_words,
543 size_t preferred_words);
545 VirtualSpaceNode* current_virtual_space() {
546 return _current_virtual_space;
547 }
549 bool is_class() const { return _is_class; }
551 bool initialization_succeeded() { return _virtual_space_list != NULL; }
553 size_t reserved_words() { return _reserved_words; }
554 size_t reserved_bytes() { return reserved_words() * BytesPerWord; }
555 size_t committed_words() { return _committed_words; }
556 size_t committed_bytes() { return committed_words() * BytesPerWord; }
558 void inc_reserved_words(size_t v);
559 void dec_reserved_words(size_t v);
560 void inc_committed_words(size_t v);
561 void dec_committed_words(size_t v);
562 void inc_virtual_space_count();
563 void dec_virtual_space_count();
565 bool contains(const void* ptr);
567 // Unlink empty VirtualSpaceNodes and free it.
568 void purge(ChunkManager* chunk_manager);
570 void print_on(outputStream* st) const;
572 class VirtualSpaceListIterator : public StackObj {
573 VirtualSpaceNode* _virtual_spaces;
574 public:
575 VirtualSpaceListIterator(VirtualSpaceNode* virtual_spaces) :
576 _virtual_spaces(virtual_spaces) {}
578 bool repeat() {
579 return _virtual_spaces != NULL;
580 }
582 VirtualSpaceNode* get_next() {
583 VirtualSpaceNode* result = _virtual_spaces;
584 if (_virtual_spaces != NULL) {
585 _virtual_spaces = _virtual_spaces->next();
586 }
587 return result;
588 }
589 };
590 };
592 class Metadebug : AllStatic {
593 // Debugging support for Metaspaces
594 static int _allocation_fail_alot_count;
596 public:
598 static void init_allocation_fail_alot_count();
599 #ifdef ASSERT
600 static bool test_metadata_failure();
601 #endif
602 };
604 int Metadebug::_allocation_fail_alot_count = 0;
606 // SpaceManager - used by Metaspace to handle allocations
607 class SpaceManager : public CHeapObj<mtClass> {
608 friend class Metaspace;
609 friend class Metadebug;
611 private:
613 // protects allocations
614 Mutex* const _lock;
616 // Type of metadata allocated.
617 Metaspace::MetadataType _mdtype;
619 // List of chunks in use by this SpaceManager. Allocations
620 // are done from the current chunk. The list is used for deallocating
621 // chunks when the SpaceManager is freed.
622 Metachunk* _chunks_in_use[NumberOfInUseLists];
623 Metachunk* _current_chunk;
625 // Maximum number of small chunks to allocate to a SpaceManager
626 static uint const _small_chunk_limit;
628 // Sum of all space in allocated chunks
629 size_t _allocated_blocks_words;
631 // Sum of all allocated chunks
632 size_t _allocated_chunks_words;
633 size_t _allocated_chunks_count;
635 // Free lists of blocks are per SpaceManager since they
636 // are assumed to be in chunks in use by the SpaceManager
637 // and all chunks in use by a SpaceManager are freed when
638 // the class loader using the SpaceManager is collected.
639 BlockFreelist _block_freelists;
641 // protects virtualspace and chunk expansions
642 static const char* _expand_lock_name;
643 static const int _expand_lock_rank;
644 static Mutex* const _expand_lock;
646 private:
647 // Accessors
648 Metachunk* chunks_in_use(ChunkIndex index) const { return _chunks_in_use[index]; }
649 void set_chunks_in_use(ChunkIndex index, Metachunk* v) {
650 _chunks_in_use[index] = v;
651 }
653 BlockFreelist* block_freelists() const {
654 return (BlockFreelist*) &_block_freelists;
655 }
657 Metaspace::MetadataType mdtype() { return _mdtype; }
659 VirtualSpaceList* vs_list() const { return Metaspace::get_space_list(_mdtype); }
660 ChunkManager* chunk_manager() const { return Metaspace::get_chunk_manager(_mdtype); }
662 Metachunk* current_chunk() const { return _current_chunk; }
663 void set_current_chunk(Metachunk* v) {
664 _current_chunk = v;
665 }
667 Metachunk* find_current_chunk(size_t word_size);
669 // Add chunk to the list of chunks in use
670 void add_chunk(Metachunk* v, bool make_current);
671 void retire_current_chunk();
673 Mutex* lock() const { return _lock; }
675 const char* chunk_size_name(ChunkIndex index) const;
677 protected:
678 void initialize();
680 public:
681 SpaceManager(Metaspace::MetadataType mdtype,
682 Mutex* lock);
683 ~SpaceManager();
685 enum ChunkMultiples {
686 MediumChunkMultiple = 4
687 };
689 bool is_class() { return _mdtype == Metaspace::ClassType; }
691 // Accessors
692 size_t specialized_chunk_size() { return (size_t) is_class() ? ClassSpecializedChunk : SpecializedChunk; }
693 size_t small_chunk_size() { return (size_t) is_class() ? ClassSmallChunk : SmallChunk; }
694 size_t medium_chunk_size() { return (size_t) is_class() ? ClassMediumChunk : MediumChunk; }
695 size_t medium_chunk_bunch() { return medium_chunk_size() * MediumChunkMultiple; }
697 size_t smallest_chunk_size() { return specialized_chunk_size(); }
699 size_t allocated_blocks_words() const { return _allocated_blocks_words; }
700 size_t allocated_blocks_bytes() const { return _allocated_blocks_words * BytesPerWord; }
701 size_t allocated_chunks_words() const { return _allocated_chunks_words; }
702 size_t allocated_chunks_count() const { return _allocated_chunks_count; }
704 bool is_humongous(size_t word_size) { return word_size > medium_chunk_size(); }
706 static Mutex* expand_lock() { return _expand_lock; }
708 // Increment the per Metaspace and global running sums for Metachunks
709 // by the given size. This is used when a Metachunk to added to
710 // the in-use list.
711 void inc_size_metrics(size_t words);
712 // Increment the per Metaspace and global running sums Metablocks by the given
713 // size. This is used when a Metablock is allocated.
714 void inc_used_metrics(size_t words);
715 // Delete the portion of the running sums for this SpaceManager. That is,
716 // the globals running sums for the Metachunks and Metablocks are
717 // decremented for all the Metachunks in-use by this SpaceManager.
718 void dec_total_from_size_metrics();
720 // Set the sizes for the initial chunks.
721 void get_initial_chunk_sizes(Metaspace::MetaspaceType type,
722 size_t* chunk_word_size,
723 size_t* class_chunk_word_size);
725 size_t sum_capacity_in_chunks_in_use() const;
726 size_t sum_used_in_chunks_in_use() const;
727 size_t sum_free_in_chunks_in_use() const;
728 size_t sum_waste_in_chunks_in_use() const;
729 size_t sum_waste_in_chunks_in_use(ChunkIndex index ) const;
731 size_t sum_count_in_chunks_in_use();
732 size_t sum_count_in_chunks_in_use(ChunkIndex i);
734 Metachunk* get_new_chunk(size_t word_size, size_t grow_chunks_by_words);
736 // Block allocation and deallocation.
737 // Allocates a block from the current chunk
738 MetaWord* allocate(size_t word_size);
739 // Allocates a block from a small chunk
740 MetaWord* get_small_chunk_and_allocate(size_t word_size);
742 // Helper for allocations
743 MetaWord* allocate_work(size_t word_size);
745 // Returns a block to the per manager freelist
746 void deallocate(MetaWord* p, size_t word_size);
748 // Based on the allocation size and a minimum chunk size,
749 // returned chunk size (for expanding space for chunk allocation).
750 size_t calc_chunk_size(size_t allocation_word_size);
752 // Called when an allocation from the current chunk fails.
753 // Gets a new chunk (may require getting a new virtual space),
754 // and allocates from that chunk.
755 MetaWord* grow_and_allocate(size_t word_size);
757 // Notify memory usage to MemoryService.
758 void track_metaspace_memory_usage();
760 // debugging support.
762 void dump(outputStream* const out) const;
763 void print_on(outputStream* st) const;
764 void locked_print_chunks_in_use_on(outputStream* st) const;
766 void verify();
767 void verify_chunk_size(Metachunk* chunk);
768 NOT_PRODUCT(void mangle_freed_chunks();)
769 #ifdef ASSERT
770 void verify_allocated_blocks_words();
771 #endif
773 size_t get_raw_word_size(size_t word_size) {
774 size_t byte_size = word_size * BytesPerWord;
776 size_t raw_bytes_size = MAX2(byte_size, sizeof(Metablock));
777 raw_bytes_size = align_size_up(raw_bytes_size, Metachunk::object_alignment());
779 size_t raw_word_size = raw_bytes_size / BytesPerWord;
780 assert(raw_word_size * BytesPerWord == raw_bytes_size, "Size problem");
782 return raw_word_size;
783 }
784 };
786 uint const SpaceManager::_small_chunk_limit = 4;
788 const char* SpaceManager::_expand_lock_name =
789 "SpaceManager chunk allocation lock";
790 const int SpaceManager::_expand_lock_rank = Monitor::leaf - 1;
791 Mutex* const SpaceManager::_expand_lock =
792 new Mutex(SpaceManager::_expand_lock_rank,
793 SpaceManager::_expand_lock_name,
794 Mutex::_allow_vm_block_flag);
796 void VirtualSpaceNode::inc_container_count() {
797 assert_lock_strong(SpaceManager::expand_lock());
798 _container_count++;
799 assert(_container_count == container_count_slow(),
800 err_msg("Inconsistency in countainer_count _container_count " SIZE_FORMAT
801 " container_count_slow() " SIZE_FORMAT,
802 _container_count, container_count_slow()));
803 }
805 void VirtualSpaceNode::dec_container_count() {
806 assert_lock_strong(SpaceManager::expand_lock());
807 _container_count--;
808 }
810 #ifdef ASSERT
811 void VirtualSpaceNode::verify_container_count() {
812 assert(_container_count == container_count_slow(),
813 err_msg("Inconsistency in countainer_count _container_count " SIZE_FORMAT
814 " container_count_slow() " SIZE_FORMAT, _container_count, container_count_slow()));
815 }
816 #endif
818 // BlockFreelist methods
820 BlockFreelist::BlockFreelist() : _dictionary(NULL) {}
822 BlockFreelist::~BlockFreelist() {
823 if (_dictionary != NULL) {
824 if (Verbose && TraceMetadataChunkAllocation) {
825 _dictionary->print_free_lists(gclog_or_tty);
826 }
827 delete _dictionary;
828 }
829 }
831 void BlockFreelist::return_block(MetaWord* p, size_t word_size) {
832 Metablock* free_chunk = ::new (p) Metablock(word_size);
833 if (dictionary() == NULL) {
834 _dictionary = new BlockTreeDictionary();
835 }
836 dictionary()->return_chunk(free_chunk);
837 }
839 MetaWord* BlockFreelist::get_block(size_t word_size) {
840 if (dictionary() == NULL) {
841 return NULL;
842 }
844 if (word_size < TreeChunk<Metablock, FreeList<Metablock> >::min_size()) {
845 // Dark matter. Too small for dictionary.
846 return NULL;
847 }
849 Metablock* free_block =
850 dictionary()->get_chunk(word_size, FreeBlockDictionary<Metablock>::atLeast);
851 if (free_block == NULL) {
852 return NULL;
853 }
855 const size_t block_size = free_block->size();
856 if (block_size > WasteMultiplier * word_size) {
857 return_block((MetaWord*)free_block, block_size);
858 return NULL;
859 }
861 MetaWord* new_block = (MetaWord*)free_block;
862 assert(block_size >= word_size, "Incorrect size of block from freelist");
863 const size_t unused = block_size - word_size;
864 if (unused >= TreeChunk<Metablock, FreeList<Metablock> >::min_size()) {
865 return_block(new_block + word_size, unused);
866 }
868 return new_block;
869 }
871 void BlockFreelist::print_on(outputStream* st) const {
872 if (dictionary() == NULL) {
873 return;
874 }
875 dictionary()->print_free_lists(st);
876 }
878 // VirtualSpaceNode methods
880 VirtualSpaceNode::~VirtualSpaceNode() {
881 _rs.release();
882 #ifdef ASSERT
883 size_t word_size = sizeof(*this) / BytesPerWord;
884 Copy::fill_to_words((HeapWord*) this, word_size, 0xf1f1f1f1);
885 #endif
886 }
888 size_t VirtualSpaceNode::used_words_in_vs() const {
889 return pointer_delta(top(), bottom(), sizeof(MetaWord));
890 }
892 // Space committed in the VirtualSpace
893 size_t VirtualSpaceNode::capacity_words_in_vs() const {
894 return pointer_delta(end(), bottom(), sizeof(MetaWord));
895 }
897 size_t VirtualSpaceNode::free_words_in_vs() const {
898 return pointer_delta(end(), top(), sizeof(MetaWord));
899 }
901 // Allocates the chunk from the virtual space only.
902 // This interface is also used internally for debugging. Not all
903 // chunks removed here are necessarily used for allocation.
904 Metachunk* VirtualSpaceNode::take_from_committed(size_t chunk_word_size) {
905 // Bottom of the new chunk
906 MetaWord* chunk_limit = top();
907 assert(chunk_limit != NULL, "Not safe to call this method");
909 // The virtual spaces are always expanded by the
910 // commit granularity to enforce the following condition.
911 // Without this the is_available check will not work correctly.
912 assert(_virtual_space.committed_size() == _virtual_space.actual_committed_size(),
913 "The committed memory doesn't match the expanded memory.");
915 if (!is_available(chunk_word_size)) {
916 if (TraceMetadataChunkAllocation) {
917 gclog_or_tty->print("VirtualSpaceNode::take_from_committed() not available %d words ", chunk_word_size);
918 // Dump some information about the virtual space that is nearly full
919 print_on(gclog_or_tty);
920 }
921 return NULL;
922 }
924 // Take the space (bump top on the current virtual space).
925 inc_top(chunk_word_size);
927 // Initialize the chunk
928 Metachunk* result = ::new (chunk_limit) Metachunk(chunk_word_size, this);
929 return result;
930 }
933 // Expand the virtual space (commit more of the reserved space)
934 bool VirtualSpaceNode::expand_by(size_t min_words, size_t preferred_words) {
935 size_t min_bytes = min_words * BytesPerWord;
936 size_t preferred_bytes = preferred_words * BytesPerWord;
938 size_t uncommitted = virtual_space()->reserved_size() - virtual_space()->actual_committed_size();
940 if (uncommitted < min_bytes) {
941 return false;
942 }
944 size_t commit = MIN2(preferred_bytes, uncommitted);
945 bool result = virtual_space()->expand_by(commit, false);
947 assert(result, "Failed to commit memory");
949 return result;
950 }
952 Metachunk* VirtualSpaceNode::get_chunk_vs(size_t chunk_word_size) {
953 assert_lock_strong(SpaceManager::expand_lock());
954 Metachunk* result = take_from_committed(chunk_word_size);
955 if (result != NULL) {
956 inc_container_count();
957 }
958 return result;
959 }
961 bool VirtualSpaceNode::initialize() {
963 if (!_rs.is_reserved()) {
964 return false;
965 }
967 // These are necessary restriction to make sure that the virtual space always
968 // grows in steps of Metaspace::commit_alignment(). If both base and size are
969 // aligned only the middle alignment of the VirtualSpace is used.
970 assert_is_ptr_aligned(_rs.base(), Metaspace::commit_alignment());
971 assert_is_size_aligned(_rs.size(), Metaspace::commit_alignment());
973 // ReservedSpaces marked as special will have the entire memory
974 // pre-committed. Setting a committed size will make sure that
975 // committed_size and actual_committed_size agrees.
976 size_t pre_committed_size = _rs.special() ? _rs.size() : 0;
978 bool result = virtual_space()->initialize_with_granularity(_rs, pre_committed_size,
979 Metaspace::commit_alignment());
980 if (result) {
981 assert(virtual_space()->committed_size() == virtual_space()->actual_committed_size(),
982 "Checking that the pre-committed memory was registered by the VirtualSpace");
984 set_top((MetaWord*)virtual_space()->low());
985 set_reserved(MemRegion((HeapWord*)_rs.base(),
986 (HeapWord*)(_rs.base() + _rs.size())));
988 assert(reserved()->start() == (HeapWord*) _rs.base(),
989 err_msg("Reserved start was not set properly " PTR_FORMAT
990 " != " PTR_FORMAT, reserved()->start(), _rs.base()));
991 assert(reserved()->word_size() == _rs.size() / BytesPerWord,
992 err_msg("Reserved size was not set properly " SIZE_FORMAT
993 " != " SIZE_FORMAT, reserved()->word_size(),
994 _rs.size() / BytesPerWord));
995 }
997 return result;
998 }
1000 void VirtualSpaceNode::print_on(outputStream* st) const {
1001 size_t used = used_words_in_vs();
1002 size_t capacity = capacity_words_in_vs();
1003 VirtualSpace* vs = virtual_space();
1004 st->print_cr(" space @ " PTR_FORMAT " " SIZE_FORMAT "K, %3d%% used "
1005 "[" PTR_FORMAT ", " PTR_FORMAT ", "
1006 PTR_FORMAT ", " PTR_FORMAT ")",
1007 vs, capacity / K,
1008 capacity == 0 ? 0 : used * 100 / capacity,
1009 bottom(), top(), end(),
1010 vs->high_boundary());
1011 }
1013 #ifdef ASSERT
1014 void VirtualSpaceNode::mangle() {
1015 size_t word_size = capacity_words_in_vs();
1016 Copy::fill_to_words((HeapWord*) low(), word_size, 0xf1f1f1f1);
1017 }
1018 #endif // ASSERT
1020 // VirtualSpaceList methods
1021 // Space allocated from the VirtualSpace
1023 VirtualSpaceList::~VirtualSpaceList() {
1024 VirtualSpaceListIterator iter(virtual_space_list());
1025 while (iter.repeat()) {
1026 VirtualSpaceNode* vsl = iter.get_next();
1027 delete vsl;
1028 }
1029 }
1031 void VirtualSpaceList::inc_reserved_words(size_t v) {
1032 assert_lock_strong(SpaceManager::expand_lock());
1033 _reserved_words = _reserved_words + v;
1034 }
1035 void VirtualSpaceList::dec_reserved_words(size_t v) {
1036 assert_lock_strong(SpaceManager::expand_lock());
1037 _reserved_words = _reserved_words - v;
1038 }
1040 #define assert_committed_below_limit() \
1041 assert(MetaspaceAux::committed_bytes() <= MaxMetaspaceSize, \
1042 err_msg("Too much committed memory. Committed: " SIZE_FORMAT \
1043 " limit (MaxMetaspaceSize): " SIZE_FORMAT, \
1044 MetaspaceAux::committed_bytes(), MaxMetaspaceSize));
1046 void VirtualSpaceList::inc_committed_words(size_t v) {
1047 assert_lock_strong(SpaceManager::expand_lock());
1048 _committed_words = _committed_words + v;
1050 assert_committed_below_limit();
1051 }
1052 void VirtualSpaceList::dec_committed_words(size_t v) {
1053 assert_lock_strong(SpaceManager::expand_lock());
1054 _committed_words = _committed_words - v;
1056 assert_committed_below_limit();
1057 }
1059 void VirtualSpaceList::inc_virtual_space_count() {
1060 assert_lock_strong(SpaceManager::expand_lock());
1061 _virtual_space_count++;
1062 }
1063 void VirtualSpaceList::dec_virtual_space_count() {
1064 assert_lock_strong(SpaceManager::expand_lock());
1065 _virtual_space_count--;
1066 }
1068 void ChunkManager::remove_chunk(Metachunk* chunk) {
1069 size_t word_size = chunk->word_size();
1070 ChunkIndex index = list_index(word_size);
1071 if (index != HumongousIndex) {
1072 free_chunks(index)->remove_chunk(chunk);
1073 } else {
1074 humongous_dictionary()->remove_chunk(chunk);
1075 }
1077 // Chunk is being removed from the chunks free list.
1078 dec_free_chunks_total(chunk->word_size());
1079 }
1081 // Walk the list of VirtualSpaceNodes and delete
1082 // nodes with a 0 container_count. Remove Metachunks in
1083 // the node from their respective freelists.
1084 void VirtualSpaceList::purge(ChunkManager* chunk_manager) {
1085 assert(SafepointSynchronize::is_at_safepoint(), "must be called at safepoint for contains to work");
1086 assert_lock_strong(SpaceManager::expand_lock());
1087 // Don't use a VirtualSpaceListIterator because this
1088 // list is being changed and a straightforward use of an iterator is not safe.
1089 VirtualSpaceNode* purged_vsl = NULL;
1090 VirtualSpaceNode* prev_vsl = virtual_space_list();
1091 VirtualSpaceNode* next_vsl = prev_vsl;
1092 while (next_vsl != NULL) {
1093 VirtualSpaceNode* vsl = next_vsl;
1094 next_vsl = vsl->next();
1095 // Don't free the current virtual space since it will likely
1096 // be needed soon.
1097 if (vsl->container_count() == 0 && vsl != current_virtual_space()) {
1098 // Unlink it from the list
1099 if (prev_vsl == vsl) {
1100 // This is the case of the current node being the first node.
1101 assert(vsl == virtual_space_list(), "Expected to be the first node");
1102 set_virtual_space_list(vsl->next());
1103 } else {
1104 prev_vsl->set_next(vsl->next());
1105 }
1107 vsl->purge(chunk_manager);
1108 dec_reserved_words(vsl->reserved_words());
1109 dec_committed_words(vsl->committed_words());
1110 dec_virtual_space_count();
1111 purged_vsl = vsl;
1112 delete vsl;
1113 } else {
1114 prev_vsl = vsl;
1115 }
1116 }
1117 #ifdef ASSERT
1118 if (purged_vsl != NULL) {
1119 // List should be stable enough to use an iterator here.
1120 VirtualSpaceListIterator iter(virtual_space_list());
1121 while (iter.repeat()) {
1122 VirtualSpaceNode* vsl = iter.get_next();
1123 assert(vsl != purged_vsl, "Purge of vsl failed");
1124 }
1125 }
1126 #endif
1127 }
1130 // This function looks at the mmap regions in the metaspace without locking.
1131 // The chunks are added with store ordering and not deleted except for at
1132 // unloading time during a safepoint.
1133 bool VirtualSpaceList::contains(const void* ptr) {
1134 // List should be stable enough to use an iterator here because removing virtual
1135 // space nodes is only allowed at a safepoint.
1136 VirtualSpaceListIterator iter(virtual_space_list());
1137 while (iter.repeat()) {
1138 VirtualSpaceNode* vsn = iter.get_next();
1139 if (vsn->contains(ptr)) {
1140 return true;
1141 }
1142 }
1143 return false;
1144 }
1146 void VirtualSpaceList::retire_current_virtual_space() {
1147 assert_lock_strong(SpaceManager::expand_lock());
1149 VirtualSpaceNode* vsn = current_virtual_space();
1151 ChunkManager* cm = is_class() ? Metaspace::chunk_manager_class() :
1152 Metaspace::chunk_manager_metadata();
1154 vsn->retire(cm);
1155 }
1157 void VirtualSpaceNode::retire(ChunkManager* chunk_manager) {
1158 for (int i = (int)MediumIndex; i >= (int)ZeroIndex; --i) {
1159 ChunkIndex index = (ChunkIndex)i;
1160 size_t chunk_size = chunk_manager->free_chunks(index)->size();
1162 while (free_words_in_vs() >= chunk_size) {
1163 DEBUG_ONLY(verify_container_count();)
1164 Metachunk* chunk = get_chunk_vs(chunk_size);
1165 assert(chunk != NULL, "allocation should have been successful");
1167 chunk_manager->return_chunks(index, chunk);
1168 chunk_manager->inc_free_chunks_total(chunk_size);
1169 DEBUG_ONLY(verify_container_count();)
1170 }
1171 }
1172 assert(free_words_in_vs() == 0, "should be empty now");
1173 }
1175 VirtualSpaceList::VirtualSpaceList(size_t word_size) :
1176 _is_class(false),
1177 _virtual_space_list(NULL),
1178 _current_virtual_space(NULL),
1179 _reserved_words(0),
1180 _committed_words(0),
1181 _virtual_space_count(0) {
1182 MutexLockerEx cl(SpaceManager::expand_lock(),
1183 Mutex::_no_safepoint_check_flag);
1184 create_new_virtual_space(word_size);
1185 }
1187 VirtualSpaceList::VirtualSpaceList(ReservedSpace rs) :
1188 _is_class(true),
1189 _virtual_space_list(NULL),
1190 _current_virtual_space(NULL),
1191 _reserved_words(0),
1192 _committed_words(0),
1193 _virtual_space_count(0) {
1194 MutexLockerEx cl(SpaceManager::expand_lock(),
1195 Mutex::_no_safepoint_check_flag);
1196 VirtualSpaceNode* class_entry = new VirtualSpaceNode(rs);
1197 bool succeeded = class_entry->initialize();
1198 if (succeeded) {
1199 link_vs(class_entry);
1200 }
1201 }
1203 size_t VirtualSpaceList::free_bytes() {
1204 return virtual_space_list()->free_words_in_vs() * BytesPerWord;
1205 }
1207 // Allocate another meta virtual space and add it to the list.
1208 bool VirtualSpaceList::create_new_virtual_space(size_t vs_word_size) {
1209 assert_lock_strong(SpaceManager::expand_lock());
1211 if (is_class()) {
1212 assert(false, "We currently don't support more than one VirtualSpace for"
1213 " the compressed class space. The initialization of the"
1214 " CCS uses another code path and should not hit this path.");
1215 return false;
1216 }
1218 if (vs_word_size == 0) {
1219 assert(false, "vs_word_size should always be at least _reserve_alignment large.");
1220 return false;
1221 }
1223 // Reserve the space
1224 size_t vs_byte_size = vs_word_size * BytesPerWord;
1225 assert_is_size_aligned(vs_byte_size, Metaspace::reserve_alignment());
1227 // Allocate the meta virtual space and initialize it.
1228 VirtualSpaceNode* new_entry = new VirtualSpaceNode(vs_byte_size);
1229 if (!new_entry->initialize()) {
1230 delete new_entry;
1231 return false;
1232 } else {
1233 assert(new_entry->reserved_words() == vs_word_size,
1234 "Reserved memory size differs from requested memory size");
1235 // ensure lock-free iteration sees fully initialized node
1236 OrderAccess::storestore();
1237 link_vs(new_entry);
1238 return true;
1239 }
1240 }
1242 void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry) {
1243 if (virtual_space_list() == NULL) {
1244 set_virtual_space_list(new_entry);
1245 } else {
1246 current_virtual_space()->set_next(new_entry);
1247 }
1248 set_current_virtual_space(new_entry);
1249 inc_reserved_words(new_entry->reserved_words());
1250 inc_committed_words(new_entry->committed_words());
1251 inc_virtual_space_count();
1252 #ifdef ASSERT
1253 new_entry->mangle();
1254 #endif
1255 if (TraceMetavirtualspaceAllocation && Verbose) {
1256 VirtualSpaceNode* vsl = current_virtual_space();
1257 vsl->print_on(gclog_or_tty);
1258 }
1259 }
1261 bool VirtualSpaceList::expand_node_by(VirtualSpaceNode* node,
1262 size_t min_words,
1263 size_t preferred_words) {
1264 size_t before = node->committed_words();
1266 bool result = node->expand_by(min_words, preferred_words);
1268 size_t after = node->committed_words();
1270 // after and before can be the same if the memory was pre-committed.
1271 assert(after >= before, "Inconsistency");
1272 inc_committed_words(after - before);
1274 return result;
1275 }
1277 bool VirtualSpaceList::expand_by(size_t min_words, size_t preferred_words) {
1278 assert_is_size_aligned(min_words, Metaspace::commit_alignment_words());
1279 assert_is_size_aligned(preferred_words, Metaspace::commit_alignment_words());
1280 assert(min_words <= preferred_words, "Invalid arguments");
1282 if (!MetaspaceGC::can_expand(min_words, this->is_class())) {
1283 return false;
1284 }
1286 size_t allowed_expansion_words = MetaspaceGC::allowed_expansion();
1287 if (allowed_expansion_words < min_words) {
1288 return false;
1289 }
1291 size_t max_expansion_words = MIN2(preferred_words, allowed_expansion_words);
1293 // Commit more memory from the the current virtual space.
1294 bool vs_expanded = expand_node_by(current_virtual_space(),
1295 min_words,
1296 max_expansion_words);
1297 if (vs_expanded) {
1298 return true;
1299 }
1300 retire_current_virtual_space();
1302 // Get another virtual space.
1303 size_t grow_vs_words = MAX2((size_t)VirtualSpaceSize, preferred_words);
1304 grow_vs_words = align_size_up(grow_vs_words, Metaspace::reserve_alignment_words());
1306 if (create_new_virtual_space(grow_vs_words)) {
1307 if (current_virtual_space()->is_pre_committed()) {
1308 // The memory was pre-committed, so we are done here.
1309 assert(min_words <= current_virtual_space()->committed_words(),
1310 "The new VirtualSpace was pre-committed, so it"
1311 "should be large enough to fit the alloc request.");
1312 return true;
1313 }
1315 return expand_node_by(current_virtual_space(),
1316 min_words,
1317 max_expansion_words);
1318 }
1320 return false;
1321 }
1323 Metachunk* VirtualSpaceList::get_new_chunk(size_t word_size,
1324 size_t grow_chunks_by_words,
1325 size_t medium_chunk_bunch) {
1327 // Allocate a chunk out of the current virtual space.
1328 Metachunk* next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
1330 if (next != NULL) {
1331 return next;
1332 }
1334 // The expand amount is currently only determined by the requested sizes
1335 // and not how much committed memory is left in the current virtual space.
1337 size_t min_word_size = align_size_up(grow_chunks_by_words, Metaspace::commit_alignment_words());
1338 size_t preferred_word_size = align_size_up(medium_chunk_bunch, Metaspace::commit_alignment_words());
1339 if (min_word_size >= preferred_word_size) {
1340 // Can happen when humongous chunks are allocated.
1341 preferred_word_size = min_word_size;
1342 }
1344 bool expanded = expand_by(min_word_size, preferred_word_size);
1345 if (expanded) {
1346 next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
1347 assert(next != NULL, "The allocation was expected to succeed after the expansion");
1348 }
1350 return next;
1351 }
1353 void VirtualSpaceList::print_on(outputStream* st) const {
1354 if (TraceMetadataChunkAllocation && Verbose) {
1355 VirtualSpaceListIterator iter(virtual_space_list());
1356 while (iter.repeat()) {
1357 VirtualSpaceNode* node = iter.get_next();
1358 node->print_on(st);
1359 }
1360 }
1361 }
1363 // MetaspaceGC methods
1365 // VM_CollectForMetadataAllocation is the vm operation used to GC.
1366 // Within the VM operation after the GC the attempt to allocate the metadata
1367 // should succeed. If the GC did not free enough space for the metaspace
1368 // allocation, the HWM is increased so that another virtualspace will be
1369 // allocated for the metadata. With perm gen the increase in the perm
1370 // gen had bounds, MinMetaspaceExpansion and MaxMetaspaceExpansion. The
1371 // metaspace policy uses those as the small and large steps for the HWM.
1372 //
1373 // After the GC the compute_new_size() for MetaspaceGC is called to
1374 // resize the capacity of the metaspaces. The current implementation
1375 // is based on the flags MinMetaspaceFreeRatio and MaxMetaspaceFreeRatio used
1376 // to resize the Java heap by some GC's. New flags can be implemented
1377 // if really needed. MinMetaspaceFreeRatio is used to calculate how much
1378 // free space is desirable in the metaspace capacity to decide how much
1379 // to increase the HWM. MaxMetaspaceFreeRatio is used to decide how much
1380 // free space is desirable in the metaspace capacity before decreasing
1381 // the HWM.
1383 // Calculate the amount to increase the high water mark (HWM).
1384 // Increase by a minimum amount (MinMetaspaceExpansion) so that
1385 // another expansion is not requested too soon. If that is not
1386 // enough to satisfy the allocation, increase by MaxMetaspaceExpansion.
1387 // If that is still not enough, expand by the size of the allocation
1388 // plus some.
1389 size_t MetaspaceGC::delta_capacity_until_GC(size_t bytes) {
1390 size_t min_delta = MinMetaspaceExpansion;
1391 size_t max_delta = MaxMetaspaceExpansion;
1392 size_t delta = align_size_up(bytes, Metaspace::commit_alignment());
1394 if (delta <= min_delta) {
1395 delta = min_delta;
1396 } else if (delta <= max_delta) {
1397 // Don't want to hit the high water mark on the next
1398 // allocation so make the delta greater than just enough
1399 // for this allocation.
1400 delta = max_delta;
1401 } else {
1402 // This allocation is large but the next ones are probably not
1403 // so increase by the minimum.
1404 delta = delta + min_delta;
1405 }
1407 assert_is_size_aligned(delta, Metaspace::commit_alignment());
1409 return delta;
1410 }
1412 size_t MetaspaceGC::capacity_until_GC() {
1413 size_t value = (size_t)OrderAccess::load_ptr_acquire(&_capacity_until_GC);
1414 assert(value >= MetaspaceSize, "Not initialied properly?");
1415 return value;
1416 }
1418 bool MetaspaceGC::inc_capacity_until_GC(size_t v, size_t* new_cap_until_GC, size_t* old_cap_until_GC) {
1419 assert_is_size_aligned(v, Metaspace::commit_alignment());
1421 size_t capacity_until_GC = (size_t) _capacity_until_GC;
1422 size_t new_value = capacity_until_GC + v;
1424 if (new_value < capacity_until_GC) {
1425 // The addition wrapped around, set new_value to aligned max value.
1426 new_value = align_size_down(max_uintx, Metaspace::commit_alignment());
1427 }
1429 intptr_t expected = (intptr_t) capacity_until_GC;
1430 intptr_t actual = Atomic::cmpxchg_ptr((intptr_t) new_value, &_capacity_until_GC, expected);
1432 if (expected != actual) {
1433 return false;
1434 }
1436 if (new_cap_until_GC != NULL) {
1437 *new_cap_until_GC = new_value;
1438 }
1439 if (old_cap_until_GC != NULL) {
1440 *old_cap_until_GC = capacity_until_GC;
1441 }
1442 return true;
1443 }
1445 size_t MetaspaceGC::dec_capacity_until_GC(size_t v) {
1446 assert_is_size_aligned(v, Metaspace::commit_alignment());
1448 return (size_t)Atomic::add_ptr(-(intptr_t)v, &_capacity_until_GC);
1449 }
1451 void MetaspaceGC::initialize() {
1452 // Set the high-water mark to MaxMetapaceSize during VM initializaton since
1453 // we can't do a GC during initialization.
1454 _capacity_until_GC = MaxMetaspaceSize;
1455 }
1457 void MetaspaceGC::post_initialize() {
1458 // Reset the high-water mark once the VM initialization is done.
1459 _capacity_until_GC = MAX2(MetaspaceAux::committed_bytes(), MetaspaceSize);
1460 }
1462 bool MetaspaceGC::can_expand(size_t word_size, bool is_class) {
1463 // Check if the compressed class space is full.
1464 if (is_class && Metaspace::using_class_space()) {
1465 size_t class_committed = MetaspaceAux::committed_bytes(Metaspace::ClassType);
1466 if (class_committed + word_size * BytesPerWord > CompressedClassSpaceSize) {
1467 return false;
1468 }
1469 }
1471 // Check if the user has imposed a limit on the metaspace memory.
1472 size_t committed_bytes = MetaspaceAux::committed_bytes();
1473 if (committed_bytes + word_size * BytesPerWord > MaxMetaspaceSize) {
1474 return false;
1475 }
1477 return true;
1478 }
1480 size_t MetaspaceGC::allowed_expansion() {
1481 size_t committed_bytes = MetaspaceAux::committed_bytes();
1482 size_t capacity_until_gc = capacity_until_GC();
1484 assert(capacity_until_gc >= committed_bytes,
1485 err_msg("capacity_until_gc: " SIZE_FORMAT " < committed_bytes: " SIZE_FORMAT,
1486 capacity_until_gc, committed_bytes));
1488 size_t left_until_max = MaxMetaspaceSize - committed_bytes;
1489 size_t left_until_GC = capacity_until_gc - committed_bytes;
1490 size_t left_to_commit = MIN2(left_until_GC, left_until_max);
1492 return left_to_commit / BytesPerWord;
1493 }
1495 void MetaspaceGC::compute_new_size() {
1496 assert(_shrink_factor <= 100, "invalid shrink factor");
1497 uint current_shrink_factor = _shrink_factor;
1498 _shrink_factor = 0;
1500 // Using committed_bytes() for used_after_gc is an overestimation, since the
1501 // chunk free lists are included in committed_bytes() and the memory in an
1502 // un-fragmented chunk free list is available for future allocations.
1503 // However, if the chunk free lists becomes fragmented, then the memory may
1504 // not be available for future allocations and the memory is therefore "in use".
1505 // Including the chunk free lists in the definition of "in use" is therefore
1506 // necessary. Not including the chunk free lists can cause capacity_until_GC to
1507 // shrink below committed_bytes() and this has caused serious bugs in the past.
1508 const size_t used_after_gc = MetaspaceAux::committed_bytes();
1509 const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC();
1511 const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0;
1512 const double maximum_used_percentage = 1.0 - minimum_free_percentage;
1514 const double min_tmp = used_after_gc / maximum_used_percentage;
1515 size_t minimum_desired_capacity =
1516 (size_t)MIN2(min_tmp, double(max_uintx));
1517 // Don't shrink less than the initial generation size
1518 minimum_desired_capacity = MAX2(minimum_desired_capacity,
1519 MetaspaceSize);
1521 if (PrintGCDetails && Verbose) {
1522 gclog_or_tty->print_cr("\nMetaspaceGC::compute_new_size: ");
1523 gclog_or_tty->print_cr(" "
1524 " minimum_free_percentage: %6.2f"
1525 " maximum_used_percentage: %6.2f",
1526 minimum_free_percentage,
1527 maximum_used_percentage);
1528 gclog_or_tty->print_cr(" "
1529 " used_after_gc : %6.1fKB",
1530 used_after_gc / (double) K);
1531 }
1534 size_t shrink_bytes = 0;
1535 if (capacity_until_GC < minimum_desired_capacity) {
1536 // If we have less capacity below the metaspace HWM, then
1537 // increment the HWM.
1538 size_t expand_bytes = minimum_desired_capacity - capacity_until_GC;
1539 expand_bytes = align_size_up(expand_bytes, Metaspace::commit_alignment());
1540 // Don't expand unless it's significant
1541 if (expand_bytes >= MinMetaspaceExpansion) {
1542 size_t new_capacity_until_GC = 0;
1543 bool succeeded = MetaspaceGC::inc_capacity_until_GC(expand_bytes, &new_capacity_until_GC);
1544 assert(succeeded, "Should always succesfully increment HWM when at safepoint");
1546 Metaspace::tracer()->report_gc_threshold(capacity_until_GC,
1547 new_capacity_until_GC,
1548 MetaspaceGCThresholdUpdater::ComputeNewSize);
1549 if (PrintGCDetails && Verbose) {
1550 gclog_or_tty->print_cr(" expanding:"
1551 " minimum_desired_capacity: %6.1fKB"
1552 " expand_bytes: %6.1fKB"
1553 " MinMetaspaceExpansion: %6.1fKB"
1554 " new metaspace HWM: %6.1fKB",
1555 minimum_desired_capacity / (double) K,
1556 expand_bytes / (double) K,
1557 MinMetaspaceExpansion / (double) K,
1558 new_capacity_until_GC / (double) K);
1559 }
1560 }
1561 return;
1562 }
1564 // No expansion, now see if we want to shrink
1565 // We would never want to shrink more than this
1566 size_t max_shrink_bytes = capacity_until_GC - minimum_desired_capacity;
1567 assert(max_shrink_bytes >= 0, err_msg("max_shrink_bytes " SIZE_FORMAT,
1568 max_shrink_bytes));
1570 // Should shrinking be considered?
1571 if (MaxMetaspaceFreeRatio < 100) {
1572 const double maximum_free_percentage = MaxMetaspaceFreeRatio / 100.0;
1573 const double minimum_used_percentage = 1.0 - maximum_free_percentage;
1574 const double max_tmp = used_after_gc / minimum_used_percentage;
1575 size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx));
1576 maximum_desired_capacity = MAX2(maximum_desired_capacity,
1577 MetaspaceSize);
1578 if (PrintGCDetails && Verbose) {
1579 gclog_or_tty->print_cr(" "
1580 " maximum_free_percentage: %6.2f"
1581 " minimum_used_percentage: %6.2f",
1582 maximum_free_percentage,
1583 minimum_used_percentage);
1584 gclog_or_tty->print_cr(" "
1585 " minimum_desired_capacity: %6.1fKB"
1586 " maximum_desired_capacity: %6.1fKB",
1587 minimum_desired_capacity / (double) K,
1588 maximum_desired_capacity / (double) K);
1589 }
1591 assert(minimum_desired_capacity <= maximum_desired_capacity,
1592 "sanity check");
1594 if (capacity_until_GC > maximum_desired_capacity) {
1595 // Capacity too large, compute shrinking size
1596 shrink_bytes = capacity_until_GC - maximum_desired_capacity;
1597 // We don't want shrink all the way back to initSize if people call
1598 // System.gc(), because some programs do that between "phases" and then
1599 // we'd just have to grow the heap up again for the next phase. So we
1600 // damp the shrinking: 0% on the first call, 10% on the second call, 40%
1601 // on the third call, and 100% by the fourth call. But if we recompute
1602 // size without shrinking, it goes back to 0%.
1603 shrink_bytes = shrink_bytes / 100 * current_shrink_factor;
1605 shrink_bytes = align_size_down(shrink_bytes, Metaspace::commit_alignment());
1607 assert(shrink_bytes <= max_shrink_bytes,
1608 err_msg("invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT,
1609 shrink_bytes, max_shrink_bytes));
1610 if (current_shrink_factor == 0) {
1611 _shrink_factor = 10;
1612 } else {
1613 _shrink_factor = MIN2(current_shrink_factor * 4, (uint) 100);
1614 }
1615 if (PrintGCDetails && Verbose) {
1616 gclog_or_tty->print_cr(" "
1617 " shrinking:"
1618 " initSize: %.1fK"
1619 " maximum_desired_capacity: %.1fK",
1620 MetaspaceSize / (double) K,
1621 maximum_desired_capacity / (double) K);
1622 gclog_or_tty->print_cr(" "
1623 " shrink_bytes: %.1fK"
1624 " current_shrink_factor: %d"
1625 " new shrink factor: %d"
1626 " MinMetaspaceExpansion: %.1fK",
1627 shrink_bytes / (double) K,
1628 current_shrink_factor,
1629 _shrink_factor,
1630 MinMetaspaceExpansion / (double) K);
1631 }
1632 }
1633 }
1635 // Don't shrink unless it's significant
1636 if (shrink_bytes >= MinMetaspaceExpansion &&
1637 ((capacity_until_GC - shrink_bytes) >= MetaspaceSize)) {
1638 size_t new_capacity_until_GC = MetaspaceGC::dec_capacity_until_GC(shrink_bytes);
1639 Metaspace::tracer()->report_gc_threshold(capacity_until_GC,
1640 new_capacity_until_GC,
1641 MetaspaceGCThresholdUpdater::ComputeNewSize);
1642 }
1643 }
1645 // Metadebug methods
1647 void Metadebug::init_allocation_fail_alot_count() {
1648 if (MetadataAllocationFailALot) {
1649 _allocation_fail_alot_count =
1650 1+(long)((double)MetadataAllocationFailALotInterval*os::random()/(max_jint+1.0));
1651 }
1652 }
1654 #ifdef ASSERT
1655 bool Metadebug::test_metadata_failure() {
1656 if (MetadataAllocationFailALot &&
1657 Threads::is_vm_complete()) {
1658 if (_allocation_fail_alot_count > 0) {
1659 _allocation_fail_alot_count--;
1660 } else {
1661 if (TraceMetadataChunkAllocation && Verbose) {
1662 gclog_or_tty->print_cr("Metadata allocation failing for "
1663 "MetadataAllocationFailALot");
1664 }
1665 init_allocation_fail_alot_count();
1666 return true;
1667 }
1668 }
1669 return false;
1670 }
1671 #endif
1673 // ChunkManager methods
1675 size_t ChunkManager::free_chunks_total_words() {
1676 return _free_chunks_total;
1677 }
1679 size_t ChunkManager::free_chunks_total_bytes() {
1680 return free_chunks_total_words() * BytesPerWord;
1681 }
1683 size_t ChunkManager::free_chunks_count() {
1684 #ifdef ASSERT
1685 if (!UseConcMarkSweepGC && !SpaceManager::expand_lock()->is_locked()) {
1686 MutexLockerEx cl(SpaceManager::expand_lock(),
1687 Mutex::_no_safepoint_check_flag);
1688 // This lock is only needed in debug because the verification
1689 // of the _free_chunks_totals walks the list of free chunks
1690 slow_locked_verify_free_chunks_count();
1691 }
1692 #endif
1693 return _free_chunks_count;
1694 }
1696 void ChunkManager::locked_verify_free_chunks_total() {
1697 assert_lock_strong(SpaceManager::expand_lock());
1698 assert(sum_free_chunks() == _free_chunks_total,
1699 err_msg("_free_chunks_total " SIZE_FORMAT " is not the"
1700 " same as sum " SIZE_FORMAT, _free_chunks_total,
1701 sum_free_chunks()));
1702 }
1704 void ChunkManager::verify_free_chunks_total() {
1705 MutexLockerEx cl(SpaceManager::expand_lock(),
1706 Mutex::_no_safepoint_check_flag);
1707 locked_verify_free_chunks_total();
1708 }
1710 void ChunkManager::locked_verify_free_chunks_count() {
1711 assert_lock_strong(SpaceManager::expand_lock());
1712 assert(sum_free_chunks_count() == _free_chunks_count,
1713 err_msg("_free_chunks_count " SIZE_FORMAT " is not the"
1714 " same as sum " SIZE_FORMAT, _free_chunks_count,
1715 sum_free_chunks_count()));
1716 }
1718 void ChunkManager::verify_free_chunks_count() {
1719 #ifdef ASSERT
1720 MutexLockerEx cl(SpaceManager::expand_lock(),
1721 Mutex::_no_safepoint_check_flag);
1722 locked_verify_free_chunks_count();
1723 #endif
1724 }
1726 void ChunkManager::verify() {
1727 MutexLockerEx cl(SpaceManager::expand_lock(),
1728 Mutex::_no_safepoint_check_flag);
1729 locked_verify();
1730 }
1732 void ChunkManager::locked_verify() {
1733 locked_verify_free_chunks_count();
1734 locked_verify_free_chunks_total();
1735 }
1737 void ChunkManager::locked_print_free_chunks(outputStream* st) {
1738 assert_lock_strong(SpaceManager::expand_lock());
1739 st->print_cr("Free chunk total " SIZE_FORMAT " count " SIZE_FORMAT,
1740 _free_chunks_total, _free_chunks_count);
1741 }
1743 void ChunkManager::locked_print_sum_free_chunks(outputStream* st) {
1744 assert_lock_strong(SpaceManager::expand_lock());
1745 st->print_cr("Sum free chunk total " SIZE_FORMAT " count " SIZE_FORMAT,
1746 sum_free_chunks(), sum_free_chunks_count());
1747 }
1748 ChunkList* ChunkManager::free_chunks(ChunkIndex index) {
1749 return &_free_chunks[index];
1750 }
1752 // These methods that sum the free chunk lists are used in printing
1753 // methods that are used in product builds.
1754 size_t ChunkManager::sum_free_chunks() {
1755 assert_lock_strong(SpaceManager::expand_lock());
1756 size_t result = 0;
1757 for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
1758 ChunkList* list = free_chunks(i);
1760 if (list == NULL) {
1761 continue;
1762 }
1764 result = result + list->count() * list->size();
1765 }
1766 result = result + humongous_dictionary()->total_size();
1767 return result;
1768 }
1770 size_t ChunkManager::sum_free_chunks_count() {
1771 assert_lock_strong(SpaceManager::expand_lock());
1772 size_t count = 0;
1773 for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
1774 ChunkList* list = free_chunks(i);
1775 if (list == NULL) {
1776 continue;
1777 }
1778 count = count + list->count();
1779 }
1780 count = count + humongous_dictionary()->total_free_blocks();
1781 return count;
1782 }
1784 ChunkList* ChunkManager::find_free_chunks_list(size_t word_size) {
1785 ChunkIndex index = list_index(word_size);
1786 assert(index < HumongousIndex, "No humongous list");
1787 return free_chunks(index);
1788 }
1790 Metachunk* ChunkManager::free_chunks_get(size_t word_size) {
1791 assert_lock_strong(SpaceManager::expand_lock());
1793 slow_locked_verify();
1795 Metachunk* chunk = NULL;
1796 if (list_index(word_size) != HumongousIndex) {
1797 ChunkList* free_list = find_free_chunks_list(word_size);
1798 assert(free_list != NULL, "Sanity check");
1800 chunk = free_list->head();
1802 if (chunk == NULL) {
1803 return NULL;
1804 }
1806 // Remove the chunk as the head of the list.
1807 free_list->remove_chunk(chunk);
1809 if (TraceMetadataChunkAllocation && Verbose) {
1810 gclog_or_tty->print_cr("ChunkManager::free_chunks_get: free_list "
1811 PTR_FORMAT " head " PTR_FORMAT " size " SIZE_FORMAT,
1812 free_list, chunk, chunk->word_size());
1813 }
1814 } else {
1815 chunk = humongous_dictionary()->get_chunk(
1816 word_size,
1817 FreeBlockDictionary<Metachunk>::atLeast);
1819 if (chunk == NULL) {
1820 return NULL;
1821 }
1823 if (TraceMetadataHumongousAllocation) {
1824 size_t waste = chunk->word_size() - word_size;
1825 gclog_or_tty->print_cr("Free list allocate humongous chunk size "
1826 SIZE_FORMAT " for requested size " SIZE_FORMAT
1827 " waste " SIZE_FORMAT,
1828 chunk->word_size(), word_size, waste);
1829 }
1830 }
1832 // Chunk is being removed from the chunks free list.
1833 dec_free_chunks_total(chunk->word_size());
1835 // Remove it from the links to this freelist
1836 chunk->set_next(NULL);
1837 chunk->set_prev(NULL);
1838 #ifdef ASSERT
1839 // Chunk is no longer on any freelist. Setting to false make container_count_slow()
1840 // work.
1841 chunk->set_is_tagged_free(false);
1842 #endif
1843 chunk->container()->inc_container_count();
1845 slow_locked_verify();
1846 return chunk;
1847 }
1849 Metachunk* ChunkManager::chunk_freelist_allocate(size_t word_size) {
1850 assert_lock_strong(SpaceManager::expand_lock());
1851 slow_locked_verify();
1853 // Take from the beginning of the list
1854 Metachunk* chunk = free_chunks_get(word_size);
1855 if (chunk == NULL) {
1856 return NULL;
1857 }
1859 assert((word_size <= chunk->word_size()) ||
1860 list_index(chunk->word_size() == HumongousIndex),
1861 "Non-humongous variable sized chunk");
1862 if (TraceMetadataChunkAllocation) {
1863 size_t list_count;
1864 if (list_index(word_size) < HumongousIndex) {
1865 ChunkList* list = find_free_chunks_list(word_size);
1866 list_count = list->count();
1867 } else {
1868 list_count = humongous_dictionary()->total_count();
1869 }
1870 gclog_or_tty->print("ChunkManager::chunk_freelist_allocate: " PTR_FORMAT " chunk "
1871 PTR_FORMAT " size " SIZE_FORMAT " count " SIZE_FORMAT " ",
1872 this, chunk, chunk->word_size(), list_count);
1873 locked_print_free_chunks(gclog_or_tty);
1874 }
1876 return chunk;
1877 }
1879 void ChunkManager::print_on(outputStream* out) const {
1880 if (PrintFLSStatistics != 0) {
1881 const_cast<ChunkManager *>(this)->humongous_dictionary()->report_statistics();
1882 }
1883 }
1885 // SpaceManager methods
1887 void SpaceManager::get_initial_chunk_sizes(Metaspace::MetaspaceType type,
1888 size_t* chunk_word_size,
1889 size_t* class_chunk_word_size) {
1890 switch (type) {
1891 case Metaspace::BootMetaspaceType:
1892 *chunk_word_size = Metaspace::first_chunk_word_size();
1893 *class_chunk_word_size = Metaspace::first_class_chunk_word_size();
1894 break;
1895 case Metaspace::ROMetaspaceType:
1896 *chunk_word_size = SharedReadOnlySize / wordSize;
1897 *class_chunk_word_size = ClassSpecializedChunk;
1898 break;
1899 case Metaspace::ReadWriteMetaspaceType:
1900 *chunk_word_size = SharedReadWriteSize / wordSize;
1901 *class_chunk_word_size = ClassSpecializedChunk;
1902 break;
1903 case Metaspace::AnonymousMetaspaceType:
1904 case Metaspace::ReflectionMetaspaceType:
1905 *chunk_word_size = SpecializedChunk;
1906 *class_chunk_word_size = ClassSpecializedChunk;
1907 break;
1908 default:
1909 *chunk_word_size = SmallChunk;
1910 *class_chunk_word_size = ClassSmallChunk;
1911 break;
1912 }
1913 assert(*chunk_word_size != 0 && *class_chunk_word_size != 0,
1914 err_msg("Initial chunks sizes bad: data " SIZE_FORMAT
1915 " class " SIZE_FORMAT,
1916 *chunk_word_size, *class_chunk_word_size));
1917 }
1919 size_t SpaceManager::sum_free_in_chunks_in_use() const {
1920 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
1921 size_t free = 0;
1922 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1923 Metachunk* chunk = chunks_in_use(i);
1924 while (chunk != NULL) {
1925 free += chunk->free_word_size();
1926 chunk = chunk->next();
1927 }
1928 }
1929 return free;
1930 }
1932 size_t SpaceManager::sum_waste_in_chunks_in_use() const {
1933 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
1934 size_t result = 0;
1935 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1936 result += sum_waste_in_chunks_in_use(i);
1937 }
1939 return result;
1940 }
1942 size_t SpaceManager::sum_waste_in_chunks_in_use(ChunkIndex index) const {
1943 size_t result = 0;
1944 Metachunk* chunk = chunks_in_use(index);
1945 // Count the free space in all the chunk but not the
1946 // current chunk from which allocations are still being done.
1947 while (chunk != NULL) {
1948 if (chunk != current_chunk()) {
1949 result += chunk->free_word_size();
1950 }
1951 chunk = chunk->next();
1952 }
1953 return result;
1954 }
1956 size_t SpaceManager::sum_capacity_in_chunks_in_use() const {
1957 // For CMS use "allocated_chunks_words()" which does not need the
1958 // Metaspace lock. For the other collectors sum over the
1959 // lists. Use both methods as a check that "allocated_chunks_words()"
1960 // is correct. That is, sum_capacity_in_chunks() is too expensive
1961 // to use in the product and allocated_chunks_words() should be used
1962 // but allow for checking that allocated_chunks_words() returns the same
1963 // value as sum_capacity_in_chunks_in_use() which is the definitive
1964 // answer.
1965 if (UseConcMarkSweepGC) {
1966 return allocated_chunks_words();
1967 } else {
1968 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
1969 size_t sum = 0;
1970 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1971 Metachunk* chunk = chunks_in_use(i);
1972 while (chunk != NULL) {
1973 sum += chunk->word_size();
1974 chunk = chunk->next();
1975 }
1976 }
1977 return sum;
1978 }
1979 }
1981 size_t SpaceManager::sum_count_in_chunks_in_use() {
1982 size_t count = 0;
1983 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1984 count = count + sum_count_in_chunks_in_use(i);
1985 }
1987 return count;
1988 }
1990 size_t SpaceManager::sum_count_in_chunks_in_use(ChunkIndex i) {
1991 size_t count = 0;
1992 Metachunk* chunk = chunks_in_use(i);
1993 while (chunk != NULL) {
1994 count++;
1995 chunk = chunk->next();
1996 }
1997 return count;
1998 }
2001 size_t SpaceManager::sum_used_in_chunks_in_use() const {
2002 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
2003 size_t used = 0;
2004 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2005 Metachunk* chunk = chunks_in_use(i);
2006 while (chunk != NULL) {
2007 used += chunk->used_word_size();
2008 chunk = chunk->next();
2009 }
2010 }
2011 return used;
2012 }
2014 void SpaceManager::locked_print_chunks_in_use_on(outputStream* st) const {
2016 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2017 Metachunk* chunk = chunks_in_use(i);
2018 st->print("SpaceManager: %s " PTR_FORMAT,
2019 chunk_size_name(i), chunk);
2020 if (chunk != NULL) {
2021 st->print_cr(" free " SIZE_FORMAT,
2022 chunk->free_word_size());
2023 } else {
2024 st->cr();
2025 }
2026 }
2028 chunk_manager()->locked_print_free_chunks(st);
2029 chunk_manager()->locked_print_sum_free_chunks(st);
2030 }
2032 size_t SpaceManager::calc_chunk_size(size_t word_size) {
2034 // Decide between a small chunk and a medium chunk. Up to
2035 // _small_chunk_limit small chunks can be allocated.
2036 // After that a medium chunk is preferred.
2037 size_t chunk_word_size;
2038 if (chunks_in_use(MediumIndex) == NULL &&
2039 sum_count_in_chunks_in_use(SmallIndex) < _small_chunk_limit) {
2040 chunk_word_size = (size_t) small_chunk_size();
2041 if (word_size + Metachunk::overhead() > small_chunk_size()) {
2042 chunk_word_size = medium_chunk_size();
2043 }
2044 } else {
2045 chunk_word_size = medium_chunk_size();
2046 }
2048 // Might still need a humongous chunk. Enforce
2049 // humongous allocations sizes to be aligned up to
2050 // the smallest chunk size.
2051 size_t if_humongous_sized_chunk =
2052 align_size_up(word_size + Metachunk::overhead(),
2053 smallest_chunk_size());
2054 chunk_word_size =
2055 MAX2((size_t) chunk_word_size, if_humongous_sized_chunk);
2057 assert(!SpaceManager::is_humongous(word_size) ||
2058 chunk_word_size == if_humongous_sized_chunk,
2059 err_msg("Size calculation is wrong, word_size " SIZE_FORMAT
2060 " chunk_word_size " SIZE_FORMAT,
2061 word_size, chunk_word_size));
2062 if (TraceMetadataHumongousAllocation &&
2063 SpaceManager::is_humongous(word_size)) {
2064 gclog_or_tty->print_cr("Metadata humongous allocation:");
2065 gclog_or_tty->print_cr(" word_size " PTR_FORMAT, word_size);
2066 gclog_or_tty->print_cr(" chunk_word_size " PTR_FORMAT,
2067 chunk_word_size);
2068 gclog_or_tty->print_cr(" chunk overhead " PTR_FORMAT,
2069 Metachunk::overhead());
2070 }
2071 return chunk_word_size;
2072 }
2074 void SpaceManager::track_metaspace_memory_usage() {
2075 if (is_init_completed()) {
2076 if (is_class()) {
2077 MemoryService::track_compressed_class_memory_usage();
2078 }
2079 MemoryService::track_metaspace_memory_usage();
2080 }
2081 }
2083 MetaWord* SpaceManager::grow_and_allocate(size_t word_size) {
2084 assert(vs_list()->current_virtual_space() != NULL,
2085 "Should have been set");
2086 assert(current_chunk() == NULL ||
2087 current_chunk()->allocate(word_size) == NULL,
2088 "Don't need to expand");
2089 MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
2091 if (TraceMetadataChunkAllocation && Verbose) {
2092 size_t words_left = 0;
2093 size_t words_used = 0;
2094 if (current_chunk() != NULL) {
2095 words_left = current_chunk()->free_word_size();
2096 words_used = current_chunk()->used_word_size();
2097 }
2098 gclog_or_tty->print_cr("SpaceManager::grow_and_allocate for " SIZE_FORMAT
2099 " words " SIZE_FORMAT " words used " SIZE_FORMAT
2100 " words left",
2101 word_size, words_used, words_left);
2102 }
2104 // Get another chunk
2105 size_t grow_chunks_by_words = calc_chunk_size(word_size);
2106 Metachunk* next = get_new_chunk(word_size, grow_chunks_by_words);
2108 MetaWord* mem = NULL;
2110 // If a chunk was available, add it to the in-use chunk list
2111 // and do an allocation from it.
2112 if (next != NULL) {
2113 // Add to this manager's list of chunks in use.
2114 add_chunk(next, false);
2115 mem = next->allocate(word_size);
2116 }
2118 // Track metaspace memory usage statistic.
2119 track_metaspace_memory_usage();
2121 return mem;
2122 }
2124 void SpaceManager::print_on(outputStream* st) const {
2126 for (ChunkIndex i = ZeroIndex;
2127 i < NumberOfInUseLists ;
2128 i = next_chunk_index(i) ) {
2129 st->print_cr(" chunks_in_use " PTR_FORMAT " chunk size " PTR_FORMAT,
2130 chunks_in_use(i),
2131 chunks_in_use(i) == NULL ? 0 : chunks_in_use(i)->word_size());
2132 }
2133 st->print_cr(" waste: Small " SIZE_FORMAT " Medium " SIZE_FORMAT
2134 " Humongous " SIZE_FORMAT,
2135 sum_waste_in_chunks_in_use(SmallIndex),
2136 sum_waste_in_chunks_in_use(MediumIndex),
2137 sum_waste_in_chunks_in_use(HumongousIndex));
2138 // block free lists
2139 if (block_freelists() != NULL) {
2140 st->print_cr("total in block free lists " SIZE_FORMAT,
2141 block_freelists()->total_size());
2142 }
2143 }
2145 SpaceManager::SpaceManager(Metaspace::MetadataType mdtype,
2146 Mutex* lock) :
2147 _mdtype(mdtype),
2148 _allocated_blocks_words(0),
2149 _allocated_chunks_words(0),
2150 _allocated_chunks_count(0),
2151 _lock(lock)
2152 {
2153 initialize();
2154 }
2156 void SpaceManager::inc_size_metrics(size_t words) {
2157 assert_lock_strong(SpaceManager::expand_lock());
2158 // Total of allocated Metachunks and allocated Metachunks count
2159 // for each SpaceManager
2160 _allocated_chunks_words = _allocated_chunks_words + words;
2161 _allocated_chunks_count++;
2162 // Global total of capacity in allocated Metachunks
2163 MetaspaceAux::inc_capacity(mdtype(), words);
2164 // Global total of allocated Metablocks.
2165 // used_words_slow() includes the overhead in each
2166 // Metachunk so include it in the used when the
2167 // Metachunk is first added (so only added once per
2168 // Metachunk).
2169 MetaspaceAux::inc_used(mdtype(), Metachunk::overhead());
2170 }
2172 void SpaceManager::inc_used_metrics(size_t words) {
2173 // Add to the per SpaceManager total
2174 Atomic::add_ptr(words, &_allocated_blocks_words);
2175 // Add to the global total
2176 MetaspaceAux::inc_used(mdtype(), words);
2177 }
2179 void SpaceManager::dec_total_from_size_metrics() {
2180 MetaspaceAux::dec_capacity(mdtype(), allocated_chunks_words());
2181 MetaspaceAux::dec_used(mdtype(), allocated_blocks_words());
2182 // Also deduct the overhead per Metachunk
2183 MetaspaceAux::dec_used(mdtype(), allocated_chunks_count() * Metachunk::overhead());
2184 }
2186 void SpaceManager::initialize() {
2187 Metadebug::init_allocation_fail_alot_count();
2188 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2189 _chunks_in_use[i] = NULL;
2190 }
2191 _current_chunk = NULL;
2192 if (TraceMetadataChunkAllocation && Verbose) {
2193 gclog_or_tty->print_cr("SpaceManager(): " PTR_FORMAT, this);
2194 }
2195 }
2197 void ChunkManager::return_chunks(ChunkIndex index, Metachunk* chunks) {
2198 if (chunks == NULL) {
2199 return;
2200 }
2201 ChunkList* list = free_chunks(index);
2202 assert(list->size() == chunks->word_size(), "Mismatch in chunk sizes");
2203 assert_lock_strong(SpaceManager::expand_lock());
2204 Metachunk* cur = chunks;
2206 // This returns chunks one at a time. If a new
2207 // class List can be created that is a base class
2208 // of FreeList then something like FreeList::prepend()
2209 // can be used in place of this loop
2210 while (cur != NULL) {
2211 assert(cur->container() != NULL, "Container should have been set");
2212 cur->container()->dec_container_count();
2213 // Capture the next link before it is changed
2214 // by the call to return_chunk_at_head();
2215 Metachunk* next = cur->next();
2216 DEBUG_ONLY(cur->set_is_tagged_free(true);)
2217 list->return_chunk_at_head(cur);
2218 cur = next;
2219 }
2220 }
2222 SpaceManager::~SpaceManager() {
2223 // This call this->_lock which can't be done while holding expand_lock()
2224 assert(sum_capacity_in_chunks_in_use() == allocated_chunks_words(),
2225 err_msg("sum_capacity_in_chunks_in_use() " SIZE_FORMAT
2226 " allocated_chunks_words() " SIZE_FORMAT,
2227 sum_capacity_in_chunks_in_use(), allocated_chunks_words()));
2229 MutexLockerEx fcl(SpaceManager::expand_lock(),
2230 Mutex::_no_safepoint_check_flag);
2232 chunk_manager()->slow_locked_verify();
2234 dec_total_from_size_metrics();
2236 if (TraceMetadataChunkAllocation && Verbose) {
2237 gclog_or_tty->print_cr("~SpaceManager(): " PTR_FORMAT, this);
2238 locked_print_chunks_in_use_on(gclog_or_tty);
2239 }
2241 // Do not mangle freed Metachunks. The chunk size inside Metachunks
2242 // is during the freeing of a VirtualSpaceNodes.
2244 // Have to update before the chunks_in_use lists are emptied
2245 // below.
2246 chunk_manager()->inc_free_chunks_total(allocated_chunks_words(),
2247 sum_count_in_chunks_in_use());
2249 // Add all the chunks in use by this space manager
2250 // to the global list of free chunks.
2252 // Follow each list of chunks-in-use and add them to the
2253 // free lists. Each list is NULL terminated.
2255 for (ChunkIndex i = ZeroIndex; i < HumongousIndex; i = next_chunk_index(i)) {
2256 if (TraceMetadataChunkAllocation && Verbose) {
2257 gclog_or_tty->print_cr("returned %d %s chunks to freelist",
2258 sum_count_in_chunks_in_use(i),
2259 chunk_size_name(i));
2260 }
2261 Metachunk* chunks = chunks_in_use(i);
2262 chunk_manager()->return_chunks(i, chunks);
2263 set_chunks_in_use(i, NULL);
2264 if (TraceMetadataChunkAllocation && Verbose) {
2265 gclog_or_tty->print_cr("updated freelist count %d %s",
2266 chunk_manager()->free_chunks(i)->count(),
2267 chunk_size_name(i));
2268 }
2269 assert(i != HumongousIndex, "Humongous chunks are handled explicitly later");
2270 }
2272 // The medium chunk case may be optimized by passing the head and
2273 // tail of the medium chunk list to add_at_head(). The tail is often
2274 // the current chunk but there are probably exceptions.
2276 // Humongous chunks
2277 if (TraceMetadataChunkAllocation && Verbose) {
2278 gclog_or_tty->print_cr("returned %d %s humongous chunks to dictionary",
2279 sum_count_in_chunks_in_use(HumongousIndex),
2280 chunk_size_name(HumongousIndex));
2281 gclog_or_tty->print("Humongous chunk dictionary: ");
2282 }
2283 // Humongous chunks are never the current chunk.
2284 Metachunk* humongous_chunks = chunks_in_use(HumongousIndex);
2286 while (humongous_chunks != NULL) {
2287 #ifdef ASSERT
2288 humongous_chunks->set_is_tagged_free(true);
2289 #endif
2290 if (TraceMetadataChunkAllocation && Verbose) {
2291 gclog_or_tty->print(PTR_FORMAT " (" SIZE_FORMAT ") ",
2292 humongous_chunks,
2293 humongous_chunks->word_size());
2294 }
2295 assert(humongous_chunks->word_size() == (size_t)
2296 align_size_up(humongous_chunks->word_size(),
2297 smallest_chunk_size()),
2298 err_msg("Humongous chunk size is wrong: word size " SIZE_FORMAT
2299 " granularity %d",
2300 humongous_chunks->word_size(), smallest_chunk_size()));
2301 Metachunk* next_humongous_chunks = humongous_chunks->next();
2302 humongous_chunks->container()->dec_container_count();
2303 chunk_manager()->humongous_dictionary()->return_chunk(humongous_chunks);
2304 humongous_chunks = next_humongous_chunks;
2305 }
2306 if (TraceMetadataChunkAllocation && Verbose) {
2307 gclog_or_tty->cr();
2308 gclog_or_tty->print_cr("updated dictionary count %d %s",
2309 chunk_manager()->humongous_dictionary()->total_count(),
2310 chunk_size_name(HumongousIndex));
2311 }
2312 chunk_manager()->slow_locked_verify();
2313 }
2315 const char* SpaceManager::chunk_size_name(ChunkIndex index) const {
2316 switch (index) {
2317 case SpecializedIndex:
2318 return "Specialized";
2319 case SmallIndex:
2320 return "Small";
2321 case MediumIndex:
2322 return "Medium";
2323 case HumongousIndex:
2324 return "Humongous";
2325 default:
2326 return NULL;
2327 }
2328 }
2330 ChunkIndex ChunkManager::list_index(size_t size) {
2331 switch (size) {
2332 case SpecializedChunk:
2333 assert(SpecializedChunk == ClassSpecializedChunk,
2334 "Need branch for ClassSpecializedChunk");
2335 return SpecializedIndex;
2336 case SmallChunk:
2337 case ClassSmallChunk:
2338 return SmallIndex;
2339 case MediumChunk:
2340 case ClassMediumChunk:
2341 return MediumIndex;
2342 default:
2343 assert(size > MediumChunk || size > ClassMediumChunk,
2344 "Not a humongous chunk");
2345 return HumongousIndex;
2346 }
2347 }
2349 void SpaceManager::deallocate(MetaWord* p, size_t word_size) {
2350 assert_lock_strong(_lock);
2351 size_t raw_word_size = get_raw_word_size(word_size);
2352 size_t min_size = TreeChunk<Metablock, FreeList<Metablock> >::min_size();
2353 assert(raw_word_size >= min_size,
2354 err_msg("Should not deallocate dark matter " SIZE_FORMAT "<" SIZE_FORMAT, word_size, min_size));
2355 block_freelists()->return_block(p, raw_word_size);
2356 }
2358 // Adds a chunk to the list of chunks in use.
2359 void SpaceManager::add_chunk(Metachunk* new_chunk, bool make_current) {
2361 assert(new_chunk != NULL, "Should not be NULL");
2362 assert(new_chunk->next() == NULL, "Should not be on a list");
2364 new_chunk->reset_empty();
2366 // Find the correct list and and set the current
2367 // chunk for that list.
2368 ChunkIndex index = ChunkManager::list_index(new_chunk->word_size());
2370 if (index != HumongousIndex) {
2371 retire_current_chunk();
2372 set_current_chunk(new_chunk);
2373 new_chunk->set_next(chunks_in_use(index));
2374 set_chunks_in_use(index, new_chunk);
2375 } else {
2376 // For null class loader data and DumpSharedSpaces, the first chunk isn't
2377 // small, so small will be null. Link this first chunk as the current
2378 // chunk.
2379 if (make_current) {
2380 // Set as the current chunk but otherwise treat as a humongous chunk.
2381 set_current_chunk(new_chunk);
2382 }
2383 // Link at head. The _current_chunk only points to a humongous chunk for
2384 // the null class loader metaspace (class and data virtual space managers)
2385 // any humongous chunks so will not point to the tail
2386 // of the humongous chunks list.
2387 new_chunk->set_next(chunks_in_use(HumongousIndex));
2388 set_chunks_in_use(HumongousIndex, new_chunk);
2390 assert(new_chunk->word_size() > medium_chunk_size(), "List inconsistency");
2391 }
2393 // Add to the running sum of capacity
2394 inc_size_metrics(new_chunk->word_size());
2396 assert(new_chunk->is_empty(), "Not ready for reuse");
2397 if (TraceMetadataChunkAllocation && Verbose) {
2398 gclog_or_tty->print("SpaceManager::add_chunk: %d) ",
2399 sum_count_in_chunks_in_use());
2400 new_chunk->print_on(gclog_or_tty);
2401 chunk_manager()->locked_print_free_chunks(gclog_or_tty);
2402 }
2403 }
2405 void SpaceManager::retire_current_chunk() {
2406 if (current_chunk() != NULL) {
2407 size_t remaining_words = current_chunk()->free_word_size();
2408 if (remaining_words >= TreeChunk<Metablock, FreeList<Metablock> >::min_size()) {
2409 block_freelists()->return_block(current_chunk()->allocate(remaining_words), remaining_words);
2410 inc_used_metrics(remaining_words);
2411 }
2412 }
2413 }
2415 Metachunk* SpaceManager::get_new_chunk(size_t word_size,
2416 size_t grow_chunks_by_words) {
2417 // Get a chunk from the chunk freelist
2418 Metachunk* next = chunk_manager()->chunk_freelist_allocate(grow_chunks_by_words);
2420 if (next == NULL) {
2421 next = vs_list()->get_new_chunk(word_size,
2422 grow_chunks_by_words,
2423 medium_chunk_bunch());
2424 }
2426 if (TraceMetadataHumongousAllocation && next != NULL &&
2427 SpaceManager::is_humongous(next->word_size())) {
2428 gclog_or_tty->print_cr(" new humongous chunk word size "
2429 PTR_FORMAT, next->word_size());
2430 }
2432 return next;
2433 }
2435 /*
2436 * The policy is to allocate up to _small_chunk_limit small chunks
2437 * after which only medium chunks are allocated. This is done to
2438 * reduce fragmentation. In some cases, this can result in a lot
2439 * of small chunks being allocated to the point where it's not
2440 * possible to expand. If this happens, there may be no medium chunks
2441 * available and OOME would be thrown. Instead of doing that,
2442 * if the allocation request size fits in a small chunk, an attempt
2443 * will be made to allocate a small chunk.
2444 */
2445 MetaWord* SpaceManager::get_small_chunk_and_allocate(size_t word_size) {
2446 if (word_size + Metachunk::overhead() > small_chunk_size()) {
2447 return NULL;
2448 }
2450 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
2451 MutexLockerEx cl1(expand_lock(), Mutex::_no_safepoint_check_flag);
2453 Metachunk* chunk = chunk_manager()->chunk_freelist_allocate(small_chunk_size());
2455 MetaWord* mem = NULL;
2457 if (chunk != NULL) {
2458 // Add chunk to the in-use chunk list and do an allocation from it.
2459 // Add to this manager's list of chunks in use.
2460 add_chunk(chunk, false);
2461 mem = chunk->allocate(word_size);
2463 inc_used_metrics(word_size);
2465 // Track metaspace memory usage statistic.
2466 track_metaspace_memory_usage();
2467 }
2469 return mem;
2470 }
2472 MetaWord* SpaceManager::allocate(size_t word_size) {
2473 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
2475 size_t raw_word_size = get_raw_word_size(word_size);
2476 BlockFreelist* fl = block_freelists();
2477 MetaWord* p = NULL;
2478 // Allocation from the dictionary is expensive in the sense that
2479 // the dictionary has to be searched for a size. Don't allocate
2480 // from the dictionary until it starts to get fat. Is this
2481 // a reasonable policy? Maybe an skinny dictionary is fast enough
2482 // for allocations. Do some profiling. JJJ
2483 if (fl->total_size() > allocation_from_dictionary_limit) {
2484 p = fl->get_block(raw_word_size);
2485 }
2486 if (p == NULL) {
2487 p = allocate_work(raw_word_size);
2488 }
2490 return p;
2491 }
2493 // Returns the address of spaced allocated for "word_size".
2494 // This methods does not know about blocks (Metablocks)
2495 MetaWord* SpaceManager::allocate_work(size_t word_size) {
2496 assert_lock_strong(_lock);
2497 #ifdef ASSERT
2498 if (Metadebug::test_metadata_failure()) {
2499 return NULL;
2500 }
2501 #endif
2502 // Is there space in the current chunk?
2503 MetaWord* result = NULL;
2505 // For DumpSharedSpaces, only allocate out of the current chunk which is
2506 // never null because we gave it the size we wanted. Caller reports out
2507 // of memory if this returns null.
2508 if (DumpSharedSpaces) {
2509 assert(current_chunk() != NULL, "should never happen");
2510 inc_used_metrics(word_size);
2511 return current_chunk()->allocate(word_size); // caller handles null result
2512 }
2514 if (current_chunk() != NULL) {
2515 result = current_chunk()->allocate(word_size);
2516 }
2518 if (result == NULL) {
2519 result = grow_and_allocate(word_size);
2520 }
2522 if (result != NULL) {
2523 inc_used_metrics(word_size);
2524 assert(result != (MetaWord*) chunks_in_use(MediumIndex),
2525 "Head of the list is being allocated");
2526 }
2528 return result;
2529 }
2531 void SpaceManager::verify() {
2532 // If there are blocks in the dictionary, then
2533 // verfication of chunks does not work since
2534 // being in the dictionary alters a chunk.
2535 if (block_freelists()->total_size() == 0) {
2536 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2537 Metachunk* curr = chunks_in_use(i);
2538 while (curr != NULL) {
2539 curr->verify();
2540 verify_chunk_size(curr);
2541 curr = curr->next();
2542 }
2543 }
2544 }
2545 }
2547 void SpaceManager::verify_chunk_size(Metachunk* chunk) {
2548 assert(is_humongous(chunk->word_size()) ||
2549 chunk->word_size() == medium_chunk_size() ||
2550 chunk->word_size() == small_chunk_size() ||
2551 chunk->word_size() == specialized_chunk_size(),
2552 "Chunk size is wrong");
2553 return;
2554 }
2556 #ifdef ASSERT
2557 void SpaceManager::verify_allocated_blocks_words() {
2558 // Verification is only guaranteed at a safepoint.
2559 assert(SafepointSynchronize::is_at_safepoint() || !Universe::is_fully_initialized(),
2560 "Verification can fail if the applications is running");
2561 assert(allocated_blocks_words() == sum_used_in_chunks_in_use(),
2562 err_msg("allocation total is not consistent " SIZE_FORMAT
2563 " vs " SIZE_FORMAT,
2564 allocated_blocks_words(), sum_used_in_chunks_in_use()));
2565 }
2567 #endif
2569 void SpaceManager::dump(outputStream* const out) const {
2570 size_t curr_total = 0;
2571 size_t waste = 0;
2572 uint i = 0;
2573 size_t used = 0;
2574 size_t capacity = 0;
2576 // Add up statistics for all chunks in this SpaceManager.
2577 for (ChunkIndex index = ZeroIndex;
2578 index < NumberOfInUseLists;
2579 index = next_chunk_index(index)) {
2580 for (Metachunk* curr = chunks_in_use(index);
2581 curr != NULL;
2582 curr = curr->next()) {
2583 out->print("%d) ", i++);
2584 curr->print_on(out);
2585 curr_total += curr->word_size();
2586 used += curr->used_word_size();
2587 capacity += curr->word_size();
2588 waste += curr->free_word_size() + curr->overhead();;
2589 }
2590 }
2592 if (TraceMetadataChunkAllocation && Verbose) {
2593 block_freelists()->print_on(out);
2594 }
2596 size_t free = current_chunk() == NULL ? 0 : current_chunk()->free_word_size();
2597 // Free space isn't wasted.
2598 waste -= free;
2600 out->print_cr("total of all chunks " SIZE_FORMAT " used " SIZE_FORMAT
2601 " free " SIZE_FORMAT " capacity " SIZE_FORMAT
2602 " waste " SIZE_FORMAT, curr_total, used, free, capacity, waste);
2603 }
2605 #ifndef PRODUCT
2606 void SpaceManager::mangle_freed_chunks() {
2607 for (ChunkIndex index = ZeroIndex;
2608 index < NumberOfInUseLists;
2609 index = next_chunk_index(index)) {
2610 for (Metachunk* curr = chunks_in_use(index);
2611 curr != NULL;
2612 curr = curr->next()) {
2613 curr->mangle();
2614 }
2615 }
2616 }
2617 #endif // PRODUCT
2619 // MetaspaceAux
2622 size_t MetaspaceAux::_capacity_words[] = {0, 0};
2623 size_t MetaspaceAux::_used_words[] = {0, 0};
2625 size_t MetaspaceAux::free_bytes(Metaspace::MetadataType mdtype) {
2626 VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
2627 return list == NULL ? 0 : list->free_bytes();
2628 }
2630 size_t MetaspaceAux::free_bytes() {
2631 return free_bytes(Metaspace::ClassType) + free_bytes(Metaspace::NonClassType);
2632 }
2634 void MetaspaceAux::dec_capacity(Metaspace::MetadataType mdtype, size_t words) {
2635 assert_lock_strong(SpaceManager::expand_lock());
2636 assert(words <= capacity_words(mdtype),
2637 err_msg("About to decrement below 0: words " SIZE_FORMAT
2638 " is greater than _capacity_words[%u] " SIZE_FORMAT,
2639 words, mdtype, capacity_words(mdtype)));
2640 _capacity_words[mdtype] -= words;
2641 }
2643 void MetaspaceAux::inc_capacity(Metaspace::MetadataType mdtype, size_t words) {
2644 assert_lock_strong(SpaceManager::expand_lock());
2645 // Needs to be atomic
2646 _capacity_words[mdtype] += words;
2647 }
2649 void MetaspaceAux::dec_used(Metaspace::MetadataType mdtype, size_t words) {
2650 assert(words <= used_words(mdtype),
2651 err_msg("About to decrement below 0: words " SIZE_FORMAT
2652 " is greater than _used_words[%u] " SIZE_FORMAT,
2653 words, mdtype, used_words(mdtype)));
2654 // For CMS deallocation of the Metaspaces occurs during the
2655 // sweep which is a concurrent phase. Protection by the expand_lock()
2656 // is not enough since allocation is on a per Metaspace basis
2657 // and protected by the Metaspace lock.
2658 jlong minus_words = (jlong) - (jlong) words;
2659 Atomic::add_ptr(minus_words, &_used_words[mdtype]);
2660 }
2662 void MetaspaceAux::inc_used(Metaspace::MetadataType mdtype, size_t words) {
2663 // _used_words tracks allocations for
2664 // each piece of metadata. Those allocations are
2665 // generally done concurrently by different application
2666 // threads so must be done atomically.
2667 Atomic::add_ptr(words, &_used_words[mdtype]);
2668 }
2670 size_t MetaspaceAux::used_bytes_slow(Metaspace::MetadataType mdtype) {
2671 size_t used = 0;
2672 ClassLoaderDataGraphMetaspaceIterator iter;
2673 while (iter.repeat()) {
2674 Metaspace* msp = iter.get_next();
2675 // Sum allocated_blocks_words for each metaspace
2676 if (msp != NULL) {
2677 used += msp->used_words_slow(mdtype);
2678 }
2679 }
2680 return used * BytesPerWord;
2681 }
2683 size_t MetaspaceAux::free_bytes_slow(Metaspace::MetadataType mdtype) {
2684 size_t free = 0;
2685 ClassLoaderDataGraphMetaspaceIterator iter;
2686 while (iter.repeat()) {
2687 Metaspace* msp = iter.get_next();
2688 if (msp != NULL) {
2689 free += msp->free_words_slow(mdtype);
2690 }
2691 }
2692 return free * BytesPerWord;
2693 }
2695 size_t MetaspaceAux::capacity_bytes_slow(Metaspace::MetadataType mdtype) {
2696 if ((mdtype == Metaspace::ClassType) && !Metaspace::using_class_space()) {
2697 return 0;
2698 }
2699 // Don't count the space in the freelists. That space will be
2700 // added to the capacity calculation as needed.
2701 size_t capacity = 0;
2702 ClassLoaderDataGraphMetaspaceIterator iter;
2703 while (iter.repeat()) {
2704 Metaspace* msp = iter.get_next();
2705 if (msp != NULL) {
2706 capacity += msp->capacity_words_slow(mdtype);
2707 }
2708 }
2709 return capacity * BytesPerWord;
2710 }
2712 size_t MetaspaceAux::capacity_bytes_slow() {
2713 #ifdef PRODUCT
2714 // Use capacity_bytes() in PRODUCT instead of this function.
2715 guarantee(false, "Should not call capacity_bytes_slow() in the PRODUCT");
2716 #endif
2717 size_t class_capacity = capacity_bytes_slow(Metaspace::ClassType);
2718 size_t non_class_capacity = capacity_bytes_slow(Metaspace::NonClassType);
2719 assert(capacity_bytes() == class_capacity + non_class_capacity,
2720 err_msg("bad accounting: capacity_bytes() " SIZE_FORMAT
2721 " class_capacity + non_class_capacity " SIZE_FORMAT
2722 " class_capacity " SIZE_FORMAT " non_class_capacity " SIZE_FORMAT,
2723 capacity_bytes(), class_capacity + non_class_capacity,
2724 class_capacity, non_class_capacity));
2726 return class_capacity + non_class_capacity;
2727 }
2729 size_t MetaspaceAux::reserved_bytes(Metaspace::MetadataType mdtype) {
2730 VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
2731 return list == NULL ? 0 : list->reserved_bytes();
2732 }
2734 size_t MetaspaceAux::committed_bytes(Metaspace::MetadataType mdtype) {
2735 VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
2736 return list == NULL ? 0 : list->committed_bytes();
2737 }
2739 size_t MetaspaceAux::min_chunk_size_words() { return Metaspace::first_chunk_word_size(); }
2741 size_t MetaspaceAux::free_chunks_total_words(Metaspace::MetadataType mdtype) {
2742 ChunkManager* chunk_manager = Metaspace::get_chunk_manager(mdtype);
2743 if (chunk_manager == NULL) {
2744 return 0;
2745 }
2746 chunk_manager->slow_verify();
2747 return chunk_manager->free_chunks_total_words();
2748 }
2750 size_t MetaspaceAux::free_chunks_total_bytes(Metaspace::MetadataType mdtype) {
2751 return free_chunks_total_words(mdtype) * BytesPerWord;
2752 }
2754 size_t MetaspaceAux::free_chunks_total_words() {
2755 return free_chunks_total_words(Metaspace::ClassType) +
2756 free_chunks_total_words(Metaspace::NonClassType);
2757 }
2759 size_t MetaspaceAux::free_chunks_total_bytes() {
2760 return free_chunks_total_words() * BytesPerWord;
2761 }
2763 bool MetaspaceAux::has_chunk_free_list(Metaspace::MetadataType mdtype) {
2764 return Metaspace::get_chunk_manager(mdtype) != NULL;
2765 }
2767 MetaspaceChunkFreeListSummary MetaspaceAux::chunk_free_list_summary(Metaspace::MetadataType mdtype) {
2768 if (!has_chunk_free_list(mdtype)) {
2769 return MetaspaceChunkFreeListSummary();
2770 }
2772 const ChunkManager* cm = Metaspace::get_chunk_manager(mdtype);
2773 return cm->chunk_free_list_summary();
2774 }
2776 void MetaspaceAux::print_metaspace_change(size_t prev_metadata_used) {
2777 gclog_or_tty->print(", [Metaspace:");
2778 if (PrintGCDetails && Verbose) {
2779 gclog_or_tty->print(" " SIZE_FORMAT
2780 "->" SIZE_FORMAT
2781 "(" SIZE_FORMAT ")",
2782 prev_metadata_used,
2783 used_bytes(),
2784 reserved_bytes());
2785 } else {
2786 gclog_or_tty->print(" " SIZE_FORMAT "K"
2787 "->" SIZE_FORMAT "K"
2788 "(" SIZE_FORMAT "K)",
2789 prev_metadata_used/K,
2790 used_bytes()/K,
2791 reserved_bytes()/K);
2792 }
2794 gclog_or_tty->print("]");
2795 }
2797 // This is printed when PrintGCDetails
2798 void MetaspaceAux::print_on(outputStream* out) {
2799 Metaspace::MetadataType nct = Metaspace::NonClassType;
2801 out->print_cr(" Metaspace "
2802 "used " SIZE_FORMAT "K, "
2803 "capacity " SIZE_FORMAT "K, "
2804 "committed " SIZE_FORMAT "K, "
2805 "reserved " SIZE_FORMAT "K",
2806 used_bytes()/K,
2807 capacity_bytes()/K,
2808 committed_bytes()/K,
2809 reserved_bytes()/K);
2811 if (Metaspace::using_class_space()) {
2812 Metaspace::MetadataType ct = Metaspace::ClassType;
2813 out->print_cr(" class space "
2814 "used " SIZE_FORMAT "K, "
2815 "capacity " SIZE_FORMAT "K, "
2816 "committed " SIZE_FORMAT "K, "
2817 "reserved " SIZE_FORMAT "K",
2818 used_bytes(ct)/K,
2819 capacity_bytes(ct)/K,
2820 committed_bytes(ct)/K,
2821 reserved_bytes(ct)/K);
2822 }
2823 }
2825 // Print information for class space and data space separately.
2826 // This is almost the same as above.
2827 void MetaspaceAux::print_on(outputStream* out, Metaspace::MetadataType mdtype) {
2828 size_t free_chunks_capacity_bytes = free_chunks_total_bytes(mdtype);
2829 size_t capacity_bytes = capacity_bytes_slow(mdtype);
2830 size_t used_bytes = used_bytes_slow(mdtype);
2831 size_t free_bytes = free_bytes_slow(mdtype);
2832 size_t used_and_free = used_bytes + free_bytes +
2833 free_chunks_capacity_bytes;
2834 out->print_cr(" Chunk accounting: used in chunks " SIZE_FORMAT
2835 "K + unused in chunks " SIZE_FORMAT "K + "
2836 " capacity in free chunks " SIZE_FORMAT "K = " SIZE_FORMAT
2837 "K capacity in allocated chunks " SIZE_FORMAT "K",
2838 used_bytes / K,
2839 free_bytes / K,
2840 free_chunks_capacity_bytes / K,
2841 used_and_free / K,
2842 capacity_bytes / K);
2843 // Accounting can only be correct if we got the values during a safepoint
2844 assert(!SafepointSynchronize::is_at_safepoint() || used_and_free == capacity_bytes, "Accounting is wrong");
2845 }
2847 // Print total fragmentation for class metaspaces
2848 void MetaspaceAux::print_class_waste(outputStream* out) {
2849 assert(Metaspace::using_class_space(), "class metaspace not used");
2850 size_t cls_specialized_waste = 0, cls_small_waste = 0, cls_medium_waste = 0;
2851 size_t cls_specialized_count = 0, cls_small_count = 0, cls_medium_count = 0, cls_humongous_count = 0;
2852 ClassLoaderDataGraphMetaspaceIterator iter;
2853 while (iter.repeat()) {
2854 Metaspace* msp = iter.get_next();
2855 if (msp != NULL) {
2856 cls_specialized_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SpecializedIndex);
2857 cls_specialized_count += msp->class_vsm()->sum_count_in_chunks_in_use(SpecializedIndex);
2858 cls_small_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SmallIndex);
2859 cls_small_count += msp->class_vsm()->sum_count_in_chunks_in_use(SmallIndex);
2860 cls_medium_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(MediumIndex);
2861 cls_medium_count += msp->class_vsm()->sum_count_in_chunks_in_use(MediumIndex);
2862 cls_humongous_count += msp->class_vsm()->sum_count_in_chunks_in_use(HumongousIndex);
2863 }
2864 }
2865 out->print_cr(" class: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", "
2866 SIZE_FORMAT " small(s) " SIZE_FORMAT ", "
2867 SIZE_FORMAT " medium(s) " SIZE_FORMAT ", "
2868 "large count " SIZE_FORMAT,
2869 cls_specialized_count, cls_specialized_waste,
2870 cls_small_count, cls_small_waste,
2871 cls_medium_count, cls_medium_waste, cls_humongous_count);
2872 }
2874 // Print total fragmentation for data and class metaspaces separately
2875 void MetaspaceAux::print_waste(outputStream* out) {
2876 size_t specialized_waste = 0, small_waste = 0, medium_waste = 0;
2877 size_t specialized_count = 0, small_count = 0, medium_count = 0, humongous_count = 0;
2879 ClassLoaderDataGraphMetaspaceIterator iter;
2880 while (iter.repeat()) {
2881 Metaspace* msp = iter.get_next();
2882 if (msp != NULL) {
2883 specialized_waste += msp->vsm()->sum_waste_in_chunks_in_use(SpecializedIndex);
2884 specialized_count += msp->vsm()->sum_count_in_chunks_in_use(SpecializedIndex);
2885 small_waste += msp->vsm()->sum_waste_in_chunks_in_use(SmallIndex);
2886 small_count += msp->vsm()->sum_count_in_chunks_in_use(SmallIndex);
2887 medium_waste += msp->vsm()->sum_waste_in_chunks_in_use(MediumIndex);
2888 medium_count += msp->vsm()->sum_count_in_chunks_in_use(MediumIndex);
2889 humongous_count += msp->vsm()->sum_count_in_chunks_in_use(HumongousIndex);
2890 }
2891 }
2892 out->print_cr("Total fragmentation waste (words) doesn't count free space");
2893 out->print_cr(" data: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", "
2894 SIZE_FORMAT " small(s) " SIZE_FORMAT ", "
2895 SIZE_FORMAT " medium(s) " SIZE_FORMAT ", "
2896 "large count " SIZE_FORMAT,
2897 specialized_count, specialized_waste, small_count,
2898 small_waste, medium_count, medium_waste, humongous_count);
2899 if (Metaspace::using_class_space()) {
2900 print_class_waste(out);
2901 }
2902 }
2904 // Dump global metaspace things from the end of ClassLoaderDataGraph
2905 void MetaspaceAux::dump(outputStream* out) {
2906 out->print_cr("All Metaspace:");
2907 out->print("data space: "); print_on(out, Metaspace::NonClassType);
2908 out->print("class space: "); print_on(out, Metaspace::ClassType);
2909 print_waste(out);
2910 }
2912 void MetaspaceAux::verify_free_chunks() {
2913 Metaspace::chunk_manager_metadata()->verify();
2914 if (Metaspace::using_class_space()) {
2915 Metaspace::chunk_manager_class()->verify();
2916 }
2917 }
2919 void MetaspaceAux::verify_capacity() {
2920 #ifdef ASSERT
2921 size_t running_sum_capacity_bytes = capacity_bytes();
2922 // For purposes of the running sum of capacity, verify against capacity
2923 size_t capacity_in_use_bytes = capacity_bytes_slow();
2924 assert(running_sum_capacity_bytes == capacity_in_use_bytes,
2925 err_msg("capacity_words() * BytesPerWord " SIZE_FORMAT
2926 " capacity_bytes_slow()" SIZE_FORMAT,
2927 running_sum_capacity_bytes, capacity_in_use_bytes));
2928 for (Metaspace::MetadataType i = Metaspace::ClassType;
2929 i < Metaspace:: MetadataTypeCount;
2930 i = (Metaspace::MetadataType)(i + 1)) {
2931 size_t capacity_in_use_bytes = capacity_bytes_slow(i);
2932 assert(capacity_bytes(i) == capacity_in_use_bytes,
2933 err_msg("capacity_bytes(%u) " SIZE_FORMAT
2934 " capacity_bytes_slow(%u)" SIZE_FORMAT,
2935 i, capacity_bytes(i), i, capacity_in_use_bytes));
2936 }
2937 #endif
2938 }
2940 void MetaspaceAux::verify_used() {
2941 #ifdef ASSERT
2942 size_t running_sum_used_bytes = used_bytes();
2943 // For purposes of the running sum of used, verify against used
2944 size_t used_in_use_bytes = used_bytes_slow();
2945 assert(used_bytes() == used_in_use_bytes,
2946 err_msg("used_bytes() " SIZE_FORMAT
2947 " used_bytes_slow()" SIZE_FORMAT,
2948 used_bytes(), used_in_use_bytes));
2949 for (Metaspace::MetadataType i = Metaspace::ClassType;
2950 i < Metaspace:: MetadataTypeCount;
2951 i = (Metaspace::MetadataType)(i + 1)) {
2952 size_t used_in_use_bytes = used_bytes_slow(i);
2953 assert(used_bytes(i) == used_in_use_bytes,
2954 err_msg("used_bytes(%u) " SIZE_FORMAT
2955 " used_bytes_slow(%u)" SIZE_FORMAT,
2956 i, used_bytes(i), i, used_in_use_bytes));
2957 }
2958 #endif
2959 }
2961 void MetaspaceAux::verify_metrics() {
2962 verify_capacity();
2963 verify_used();
2964 }
2967 // Metaspace methods
2969 size_t Metaspace::_first_chunk_word_size = 0;
2970 size_t Metaspace::_first_class_chunk_word_size = 0;
2972 size_t Metaspace::_commit_alignment = 0;
2973 size_t Metaspace::_reserve_alignment = 0;
2975 Metaspace::Metaspace(Mutex* lock, MetaspaceType type) {
2976 initialize(lock, type);
2977 }
2979 Metaspace::~Metaspace() {
2980 delete _vsm;
2981 if (using_class_space()) {
2982 delete _class_vsm;
2983 }
2984 }
2986 VirtualSpaceList* Metaspace::_space_list = NULL;
2987 VirtualSpaceList* Metaspace::_class_space_list = NULL;
2989 ChunkManager* Metaspace::_chunk_manager_metadata = NULL;
2990 ChunkManager* Metaspace::_chunk_manager_class = NULL;
2992 #define VIRTUALSPACEMULTIPLIER 2
2994 #ifdef _LP64
2995 static const uint64_t UnscaledClassSpaceMax = (uint64_t(max_juint) + 1);
2997 void Metaspace::set_narrow_klass_base_and_shift(address metaspace_base, address cds_base) {
2998 // Figure out the narrow_klass_base and the narrow_klass_shift. The
2999 // narrow_klass_base is the lower of the metaspace base and the cds base
3000 // (if cds is enabled). The narrow_klass_shift depends on the distance
3001 // between the lower base and higher address.
3002 address lower_base;
3003 address higher_address;
3004 #if INCLUDE_CDS
3005 if (UseSharedSpaces) {
3006 higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()),
3007 (address)(metaspace_base + compressed_class_space_size()));
3008 lower_base = MIN2(metaspace_base, cds_base);
3009 } else
3010 #endif
3011 {
3012 higher_address = metaspace_base + compressed_class_space_size();
3013 lower_base = metaspace_base;
3015 uint64_t klass_encoding_max = UnscaledClassSpaceMax << LogKlassAlignmentInBytes;
3016 // If compressed class space fits in lower 32G, we don't need a base.
3017 if (higher_address <= (address)klass_encoding_max) {
3018 lower_base = 0; // effectively lower base is zero.
3019 }
3020 }
3022 Universe::set_narrow_klass_base(lower_base);
3024 if ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax) {
3025 Universe::set_narrow_klass_shift(0);
3026 } else {
3027 assert(!UseSharedSpaces, "Cannot shift with UseSharedSpaces");
3028 Universe::set_narrow_klass_shift(LogKlassAlignmentInBytes);
3029 }
3030 }
3032 #if INCLUDE_CDS
3033 // Return TRUE if the specified metaspace_base and cds_base are close enough
3034 // to work with compressed klass pointers.
3035 bool Metaspace::can_use_cds_with_metaspace_addr(char* metaspace_base, address cds_base) {
3036 assert(cds_base != 0 && UseSharedSpaces, "Only use with CDS");
3037 assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
3038 address lower_base = MIN2((address)metaspace_base, cds_base);
3039 address higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()),
3040 (address)(metaspace_base + compressed_class_space_size()));
3041 return ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax);
3042 }
3043 #endif
3045 // Try to allocate the metaspace at the requested addr.
3046 void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base) {
3047 assert(using_class_space(), "called improperly");
3048 assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
3049 assert(compressed_class_space_size() < KlassEncodingMetaspaceMax,
3050 "Metaspace size is too big");
3051 assert_is_ptr_aligned(requested_addr, _reserve_alignment);
3052 assert_is_ptr_aligned(cds_base, _reserve_alignment);
3053 assert_is_size_aligned(compressed_class_space_size(), _reserve_alignment);
3055 // Don't use large pages for the class space.
3056 bool large_pages = false;
3058 ReservedSpace metaspace_rs = ReservedSpace(compressed_class_space_size(),
3059 _reserve_alignment,
3060 large_pages,
3061 requested_addr, 0);
3062 if (!metaspace_rs.is_reserved()) {
3063 #if INCLUDE_CDS
3064 if (UseSharedSpaces) {
3065 size_t increment = align_size_up(1*G, _reserve_alignment);
3067 // Keep trying to allocate the metaspace, increasing the requested_addr
3068 // by 1GB each time, until we reach an address that will no longer allow
3069 // use of CDS with compressed klass pointers.
3070 char *addr = requested_addr;
3071 while (!metaspace_rs.is_reserved() && (addr + increment > addr) &&
3072 can_use_cds_with_metaspace_addr(addr + increment, cds_base)) {
3073 addr = addr + increment;
3074 metaspace_rs = ReservedSpace(compressed_class_space_size(),
3075 _reserve_alignment, large_pages, addr, 0);
3076 }
3077 }
3078 #endif
3079 // If no successful allocation then try to allocate the space anywhere. If
3080 // that fails then OOM doom. At this point we cannot try allocating the
3081 // metaspace as if UseCompressedClassPointers is off because too much
3082 // initialization has happened that depends on UseCompressedClassPointers.
3083 // So, UseCompressedClassPointers cannot be turned off at this point.
3084 if (!metaspace_rs.is_reserved()) {
3085 metaspace_rs = ReservedSpace(compressed_class_space_size(),
3086 _reserve_alignment, large_pages);
3087 if (!metaspace_rs.is_reserved()) {
3088 vm_exit_during_initialization(err_msg("Could not allocate metaspace: %d bytes",
3089 compressed_class_space_size()));
3090 }
3091 }
3092 }
3094 // If we got here then the metaspace got allocated.
3095 MemTracker::record_virtual_memory_type((address)metaspace_rs.base(), mtClass);
3097 #if INCLUDE_CDS
3098 // Verify that we can use shared spaces. Otherwise, turn off CDS.
3099 if (UseSharedSpaces && !can_use_cds_with_metaspace_addr(metaspace_rs.base(), cds_base)) {
3100 FileMapInfo::stop_sharing_and_unmap(
3101 "Could not allocate metaspace at a compatible address");
3102 }
3103 #endif
3104 set_narrow_klass_base_and_shift((address)metaspace_rs.base(),
3105 UseSharedSpaces ? (address)cds_base : 0);
3107 initialize_class_space(metaspace_rs);
3109 if (PrintCompressedOopsMode || (PrintMiscellaneous && Verbose)) {
3110 gclog_or_tty->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: " SIZE_FORMAT,
3111 Universe::narrow_klass_base(), Universe::narrow_klass_shift());
3112 gclog_or_tty->print_cr("Compressed class space size: " SIZE_FORMAT " Address: " PTR_FORMAT " Req Addr: " PTR_FORMAT,
3113 compressed_class_space_size(), metaspace_rs.base(), requested_addr);
3114 }
3115 }
3117 // For UseCompressedClassPointers the class space is reserved above the top of
3118 // the Java heap. The argument passed in is at the base of the compressed space.
3119 void Metaspace::initialize_class_space(ReservedSpace rs) {
3120 // The reserved space size may be bigger because of alignment, esp with UseLargePages
3121 assert(rs.size() >= CompressedClassSpaceSize,
3122 err_msg(SIZE_FORMAT " != " UINTX_FORMAT, rs.size(), CompressedClassSpaceSize));
3123 assert(using_class_space(), "Must be using class space");
3124 _class_space_list = new VirtualSpaceList(rs);
3125 _chunk_manager_class = new ChunkManager(SpecializedChunk, ClassSmallChunk, ClassMediumChunk);
3127 if (!_class_space_list->initialization_succeeded()) {
3128 vm_exit_during_initialization("Failed to setup compressed class space virtual space list.");
3129 }
3130 }
3132 #endif
3134 void Metaspace::ergo_initialize() {
3135 if (DumpSharedSpaces) {
3136 // Using large pages when dumping the shared archive is currently not implemented.
3137 FLAG_SET_ERGO(bool, UseLargePagesInMetaspace, false);
3138 }
3140 size_t page_size = os::vm_page_size();
3141 if (UseLargePages && UseLargePagesInMetaspace) {
3142 page_size = os::large_page_size();
3143 }
3145 _commit_alignment = page_size;
3146 _reserve_alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity());
3148 // Do not use FLAG_SET_ERGO to update MaxMetaspaceSize, since this will
3149 // override if MaxMetaspaceSize was set on the command line or not.
3150 // This information is needed later to conform to the specification of the
3151 // java.lang.management.MemoryUsage API.
3152 //
3153 // Ideally, we would be able to set the default value of MaxMetaspaceSize in
3154 // globals.hpp to the aligned value, but this is not possible, since the
3155 // alignment depends on other flags being parsed.
3156 MaxMetaspaceSize = align_size_down_bounded(MaxMetaspaceSize, _reserve_alignment);
3158 if (MetaspaceSize > MaxMetaspaceSize) {
3159 MetaspaceSize = MaxMetaspaceSize;
3160 }
3162 MetaspaceSize = align_size_down_bounded(MetaspaceSize, _commit_alignment);
3164 assert(MetaspaceSize <= MaxMetaspaceSize, "MetaspaceSize should be limited by MaxMetaspaceSize");
3166 if (MetaspaceSize < 256*K) {
3167 vm_exit_during_initialization("Too small initial Metaspace size");
3168 }
3170 MinMetaspaceExpansion = align_size_down_bounded(MinMetaspaceExpansion, _commit_alignment);
3171 MaxMetaspaceExpansion = align_size_down_bounded(MaxMetaspaceExpansion, _commit_alignment);
3173 CompressedClassSpaceSize = align_size_down_bounded(CompressedClassSpaceSize, _reserve_alignment);
3174 set_compressed_class_space_size(CompressedClassSpaceSize);
3175 }
3177 void Metaspace::global_initialize() {
3178 MetaspaceGC::initialize();
3180 // Initialize the alignment for shared spaces.
3181 int max_alignment = os::vm_allocation_granularity();
3182 size_t cds_total = 0;
3184 MetaspaceShared::set_max_alignment(max_alignment);
3186 if (DumpSharedSpaces) {
3187 #if INCLUDE_CDS
3188 MetaspaceShared::estimate_regions_size();
3190 SharedReadOnlySize = align_size_up(SharedReadOnlySize, max_alignment);
3191 SharedReadWriteSize = align_size_up(SharedReadWriteSize, max_alignment);
3192 SharedMiscDataSize = align_size_up(SharedMiscDataSize, max_alignment);
3193 SharedMiscCodeSize = align_size_up(SharedMiscCodeSize, max_alignment);
3195 // the min_misc_code_size estimate is based on MetaspaceShared::generate_vtable_methods()
3196 uintx min_misc_code_size = align_size_up(
3197 (MetaspaceShared::num_virtuals * MetaspaceShared::vtbl_list_size) *
3198 (sizeof(void*) + MetaspaceShared::vtbl_method_size) + MetaspaceShared::vtbl_common_code_size,
3199 max_alignment);
3201 if (SharedMiscCodeSize < min_misc_code_size) {
3202 report_out_of_shared_space(SharedMiscCode);
3203 }
3205 // Initialize with the sum of the shared space sizes. The read-only
3206 // and read write metaspace chunks will be allocated out of this and the
3207 // remainder is the misc code and data chunks.
3208 cds_total = FileMapInfo::shared_spaces_size();
3209 cds_total = align_size_up(cds_total, _reserve_alignment);
3210 _space_list = new VirtualSpaceList(cds_total/wordSize);
3211 _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk);
3213 if (!_space_list->initialization_succeeded()) {
3214 vm_exit_during_initialization("Unable to dump shared archive.", NULL);
3215 }
3217 #ifdef _LP64
3218 if (cds_total + compressed_class_space_size() > UnscaledClassSpaceMax) {
3219 vm_exit_during_initialization("Unable to dump shared archive.",
3220 err_msg("Size of archive (" SIZE_FORMAT ") + compressed class space ("
3221 SIZE_FORMAT ") == total (" SIZE_FORMAT ") is larger than compressed "
3222 "klass limit: " SIZE_FORMAT, cds_total, compressed_class_space_size(),
3223 cds_total + compressed_class_space_size(), UnscaledClassSpaceMax));
3224 }
3226 // Set the compressed klass pointer base so that decoding of these pointers works
3227 // properly when creating the shared archive.
3228 assert(UseCompressedOops && UseCompressedClassPointers,
3229 "UseCompressedOops and UseCompressedClassPointers must be set");
3230 Universe::set_narrow_klass_base((address)_space_list->current_virtual_space()->bottom());
3231 if (TraceMetavirtualspaceAllocation && Verbose) {
3232 gclog_or_tty->print_cr("Setting_narrow_klass_base to Address: " PTR_FORMAT,
3233 _space_list->current_virtual_space()->bottom());
3234 }
3236 Universe::set_narrow_klass_shift(0);
3237 #endif // _LP64
3238 #endif // INCLUDE_CDS
3239 } else {
3240 #if INCLUDE_CDS
3241 // If using shared space, open the file that contains the shared space
3242 // and map in the memory before initializing the rest of metaspace (so
3243 // the addresses don't conflict)
3244 address cds_address = NULL;
3245 if (UseSharedSpaces) {
3246 FileMapInfo* mapinfo = new FileMapInfo();
3248 // Open the shared archive file, read and validate the header. If
3249 // initialization fails, shared spaces [UseSharedSpaces] are
3250 // disabled and the file is closed.
3251 // Map in spaces now also
3252 if (mapinfo->initialize() && MetaspaceShared::map_shared_spaces(mapinfo)) {
3253 cds_total = FileMapInfo::shared_spaces_size();
3254 cds_address = (address)mapinfo->region_base(0);
3255 } else {
3256 assert(!mapinfo->is_open() && !UseSharedSpaces,
3257 "archive file not closed or shared spaces not disabled.");
3258 }
3259 }
3260 #endif // INCLUDE_CDS
3261 #ifdef _LP64
3262 // If UseCompressedClassPointers is set then allocate the metaspace area
3263 // above the heap and above the CDS area (if it exists).
3264 if (using_class_space()) {
3265 if (UseSharedSpaces) {
3266 #if INCLUDE_CDS
3267 char* cds_end = (char*)(cds_address + cds_total);
3268 cds_end = (char *)align_ptr_up(cds_end, _reserve_alignment);
3269 allocate_metaspace_compressed_klass_ptrs(cds_end, cds_address);
3270 #endif
3271 } else {
3272 char* base = (char*)align_ptr_up(Universe::heap()->reserved_region().end(), _reserve_alignment);
3273 allocate_metaspace_compressed_klass_ptrs(base, 0);
3274 }
3275 }
3276 #endif // _LP64
3278 // Initialize these before initializing the VirtualSpaceList
3279 _first_chunk_word_size = InitialBootClassLoaderMetaspaceSize / BytesPerWord;
3280 _first_chunk_word_size = align_word_size_up(_first_chunk_word_size);
3281 // Make the first class chunk bigger than a medium chunk so it's not put
3282 // on the medium chunk list. The next chunk will be small and progress
3283 // from there. This size calculated by -version.
3284 _first_class_chunk_word_size = MIN2((size_t)MediumChunk*6,
3285 (CompressedClassSpaceSize/BytesPerWord)*2);
3286 _first_class_chunk_word_size = align_word_size_up(_first_class_chunk_word_size);
3287 // Arbitrarily set the initial virtual space to a multiple
3288 // of the boot class loader size.
3289 size_t word_size = VIRTUALSPACEMULTIPLIER * _first_chunk_word_size;
3290 word_size = align_size_up(word_size, Metaspace::reserve_alignment_words());
3292 // Initialize the list of virtual spaces.
3293 _space_list = new VirtualSpaceList(word_size);
3294 _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk);
3296 if (!_space_list->initialization_succeeded()) {
3297 vm_exit_during_initialization("Unable to setup metadata virtual space list.", NULL);
3298 }
3299 }
3301 _tracer = new MetaspaceTracer();
3302 }
3304 void Metaspace::post_initialize() {
3305 MetaspaceGC::post_initialize();
3306 }
3308 Metachunk* Metaspace::get_initialization_chunk(MetadataType mdtype,
3309 size_t chunk_word_size,
3310 size_t chunk_bunch) {
3311 // Get a chunk from the chunk freelist
3312 Metachunk* chunk = get_chunk_manager(mdtype)->chunk_freelist_allocate(chunk_word_size);
3313 if (chunk != NULL) {
3314 return chunk;
3315 }
3317 return get_space_list(mdtype)->get_new_chunk(chunk_word_size, chunk_word_size, chunk_bunch);
3318 }
3320 void Metaspace::initialize(Mutex* lock, MetaspaceType type) {
3322 assert(space_list() != NULL,
3323 "Metadata VirtualSpaceList has not been initialized");
3324 assert(chunk_manager_metadata() != NULL,
3325 "Metadata ChunkManager has not been initialized");
3327 _vsm = new SpaceManager(NonClassType, lock);
3328 if (_vsm == NULL) {
3329 return;
3330 }
3331 size_t word_size;
3332 size_t class_word_size;
3333 vsm()->get_initial_chunk_sizes(type, &word_size, &class_word_size);
3335 if (using_class_space()) {
3336 assert(class_space_list() != NULL,
3337 "Class VirtualSpaceList has not been initialized");
3338 assert(chunk_manager_class() != NULL,
3339 "Class ChunkManager has not been initialized");
3341 // Allocate SpaceManager for classes.
3342 _class_vsm = new SpaceManager(ClassType, lock);
3343 if (_class_vsm == NULL) {
3344 return;
3345 }
3346 }
3348 MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
3350 // Allocate chunk for metadata objects
3351 Metachunk* new_chunk = get_initialization_chunk(NonClassType,
3352 word_size,
3353 vsm()->medium_chunk_bunch());
3354 assert(!DumpSharedSpaces || new_chunk != NULL, "should have enough space for both chunks");
3355 if (new_chunk != NULL) {
3356 // Add to this manager's list of chunks in use and current_chunk().
3357 vsm()->add_chunk(new_chunk, true);
3358 }
3360 // Allocate chunk for class metadata objects
3361 if (using_class_space()) {
3362 Metachunk* class_chunk = get_initialization_chunk(ClassType,
3363 class_word_size,
3364 class_vsm()->medium_chunk_bunch());
3365 if (class_chunk != NULL) {
3366 class_vsm()->add_chunk(class_chunk, true);
3367 }
3368 }
3370 _alloc_record_head = NULL;
3371 _alloc_record_tail = NULL;
3372 }
3374 size_t Metaspace::align_word_size_up(size_t word_size) {
3375 size_t byte_size = word_size * wordSize;
3376 return ReservedSpace::allocation_align_size_up(byte_size) / wordSize;
3377 }
3379 MetaWord* Metaspace::allocate(size_t word_size, MetadataType mdtype) {
3380 // DumpSharedSpaces doesn't use class metadata area (yet)
3381 // Also, don't use class_vsm() unless UseCompressedClassPointers is true.
3382 if (is_class_space_allocation(mdtype)) {
3383 return class_vsm()->allocate(word_size);
3384 } else {
3385 return vsm()->allocate(word_size);
3386 }
3387 }
3389 MetaWord* Metaspace::expand_and_allocate(size_t word_size, MetadataType mdtype) {
3390 size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size * BytesPerWord);
3391 assert(delta_bytes > 0, "Must be");
3393 size_t before = 0;
3394 size_t after = 0;
3395 MetaWord* res;
3396 bool incremented;
3398 // Each thread increments the HWM at most once. Even if the thread fails to increment
3399 // the HWM, an allocation is still attempted. This is because another thread must then
3400 // have incremented the HWM and therefore the allocation might still succeed.
3401 do {
3402 incremented = MetaspaceGC::inc_capacity_until_GC(delta_bytes, &after, &before);
3403 res = allocate(word_size, mdtype);
3404 } while (!incremented && res == NULL);
3406 if (incremented) {
3407 tracer()->report_gc_threshold(before, after,
3408 MetaspaceGCThresholdUpdater::ExpandAndAllocate);
3409 if (PrintGCDetails && Verbose) {
3410 gclog_or_tty->print_cr("Increase capacity to GC from " SIZE_FORMAT
3411 " to " SIZE_FORMAT, before, after);
3412 }
3413 }
3415 return res;
3416 }
3418 // Space allocated in the Metaspace. This may
3419 // be across several metadata virtual spaces.
3420 char* Metaspace::bottom() const {
3421 assert(DumpSharedSpaces, "only useful and valid for dumping shared spaces");
3422 return (char*)vsm()->current_chunk()->bottom();
3423 }
3425 size_t Metaspace::used_words_slow(MetadataType mdtype) const {
3426 if (mdtype == ClassType) {
3427 return using_class_space() ? class_vsm()->sum_used_in_chunks_in_use() : 0;
3428 } else {
3429 return vsm()->sum_used_in_chunks_in_use(); // includes overhead!
3430 }
3431 }
3433 size_t Metaspace::free_words_slow(MetadataType mdtype) const {
3434 if (mdtype == ClassType) {
3435 return using_class_space() ? class_vsm()->sum_free_in_chunks_in_use() : 0;
3436 } else {
3437 return vsm()->sum_free_in_chunks_in_use();
3438 }
3439 }
3441 // Space capacity in the Metaspace. It includes
3442 // space in the list of chunks from which allocations
3443 // have been made. Don't include space in the global freelist and
3444 // in the space available in the dictionary which
3445 // is already counted in some chunk.
3446 size_t Metaspace::capacity_words_slow(MetadataType mdtype) const {
3447 if (mdtype == ClassType) {
3448 return using_class_space() ? class_vsm()->sum_capacity_in_chunks_in_use() : 0;
3449 } else {
3450 return vsm()->sum_capacity_in_chunks_in_use();
3451 }
3452 }
3454 size_t Metaspace::used_bytes_slow(MetadataType mdtype) const {
3455 return used_words_slow(mdtype) * BytesPerWord;
3456 }
3458 size_t Metaspace::capacity_bytes_slow(MetadataType mdtype) const {
3459 return capacity_words_slow(mdtype) * BytesPerWord;
3460 }
3462 void Metaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) {
3463 if (SafepointSynchronize::is_at_safepoint()) {
3464 if (DumpSharedSpaces && PrintSharedSpaces) {
3465 record_deallocation(ptr, vsm()->get_raw_word_size(word_size));
3466 }
3468 assert(Thread::current()->is_VM_thread(), "should be the VM thread");
3469 // Don't take Heap_lock
3470 MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag);
3471 if (word_size < TreeChunk<Metablock, FreeList<Metablock> >::min_size()) {
3472 // Dark matter. Too small for dictionary.
3473 #ifdef ASSERT
3474 Copy::fill_to_words((HeapWord*)ptr, word_size, 0xf5f5f5f5);
3475 #endif
3476 return;
3477 }
3478 if (is_class && using_class_space()) {
3479 class_vsm()->deallocate(ptr, word_size);
3480 } else {
3481 vsm()->deallocate(ptr, word_size);
3482 }
3483 } else {
3484 MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag);
3486 if (word_size < TreeChunk<Metablock, FreeList<Metablock> >::min_size()) {
3487 // Dark matter. Too small for dictionary.
3488 #ifdef ASSERT
3489 Copy::fill_to_words((HeapWord*)ptr, word_size, 0xf5f5f5f5);
3490 #endif
3491 return;
3492 }
3493 if (is_class && using_class_space()) {
3494 class_vsm()->deallocate(ptr, word_size);
3495 } else {
3496 vsm()->deallocate(ptr, word_size);
3497 }
3498 }
3499 }
3502 MetaWord* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
3503 bool read_only, MetaspaceObj::Type type, TRAPS) {
3504 if (HAS_PENDING_EXCEPTION) {
3505 assert(false, "Should not allocate with exception pending");
3506 return NULL; // caller does a CHECK_NULL too
3507 }
3509 assert(loader_data != NULL, "Should never pass around a NULL loader_data. "
3510 "ClassLoaderData::the_null_class_loader_data() should have been used.");
3512 // Allocate in metaspaces without taking out a lock, because it deadlocks
3513 // with the SymbolTable_lock. Dumping is single threaded for now. We'll have
3514 // to revisit this for application class data sharing.
3515 if (DumpSharedSpaces) {
3516 assert(type > MetaspaceObj::UnknownType && type < MetaspaceObj::_number_of_types, "sanity");
3517 Metaspace* space = read_only ? loader_data->ro_metaspace() : loader_data->rw_metaspace();
3518 MetaWord* result = space->allocate(word_size, NonClassType);
3519 if (result == NULL) {
3520 report_out_of_shared_space(read_only ? SharedReadOnly : SharedReadWrite);
3521 }
3522 if (PrintSharedSpaces) {
3523 space->record_allocation(result, type, space->vsm()->get_raw_word_size(word_size));
3524 }
3526 // Zero initialize.
3527 Copy::fill_to_aligned_words((HeapWord*)result, word_size, 0);
3529 return result;
3530 }
3532 MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType;
3534 // Try to allocate metadata.
3535 MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype);
3537 if (result == NULL) {
3538 tracer()->report_metaspace_allocation_failure(loader_data, word_size, type, mdtype);
3540 // Allocation failed.
3541 if (is_init_completed()) {
3542 // Only start a GC if the bootstrapping has completed.
3544 // Try to clean out some memory and retry.
3545 result = Universe::heap()->collector_policy()->satisfy_failed_metadata_allocation(
3546 loader_data, word_size, mdtype);
3547 }
3548 }
3550 if (result == NULL) {
3551 SpaceManager* sm;
3552 if (is_class_space_allocation(mdtype)) {
3553 sm = loader_data->metaspace_non_null()->class_vsm();
3554 } else {
3555 sm = loader_data->metaspace_non_null()->vsm();
3556 }
3558 result = sm->get_small_chunk_and_allocate(word_size);
3560 if (result == NULL) {
3561 report_metadata_oome(loader_data, word_size, type, mdtype, CHECK_NULL);
3562 }
3563 }
3565 // Zero initialize.
3566 Copy::fill_to_aligned_words((HeapWord*)result, word_size, 0);
3568 return result;
3569 }
3571 size_t Metaspace::class_chunk_size(size_t word_size) {
3572 assert(using_class_space(), "Has to use class space");
3573 return class_vsm()->calc_chunk_size(word_size);
3574 }
3576 void Metaspace::report_metadata_oome(ClassLoaderData* loader_data, size_t word_size, MetaspaceObj::Type type, MetadataType mdtype, TRAPS) {
3577 tracer()->report_metadata_oom(loader_data, word_size, type, mdtype);
3579 // If result is still null, we are out of memory.
3580 if (Verbose && TraceMetadataChunkAllocation) {
3581 gclog_or_tty->print_cr("Metaspace allocation failed for size "
3582 SIZE_FORMAT, word_size);
3583 if (loader_data->metaspace_or_null() != NULL) {
3584 loader_data->dump(gclog_or_tty);
3585 }
3586 MetaspaceAux::dump(gclog_or_tty);
3587 }
3589 bool out_of_compressed_class_space = false;
3590 if (is_class_space_allocation(mdtype)) {
3591 Metaspace* metaspace = loader_data->metaspace_non_null();
3592 out_of_compressed_class_space =
3593 MetaspaceAux::committed_bytes(Metaspace::ClassType) +
3594 (metaspace->class_chunk_size(word_size) * BytesPerWord) >
3595 CompressedClassSpaceSize;
3596 }
3598 // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
3599 const char* space_string = out_of_compressed_class_space ?
3600 "Compressed class space" : "Metaspace";
3602 report_java_out_of_memory(space_string);
3604 if (JvmtiExport::should_post_resource_exhausted()) {
3605 JvmtiExport::post_resource_exhausted(
3606 JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR,
3607 space_string);
3608 }
3610 if (!is_init_completed()) {
3611 vm_exit_during_initialization("OutOfMemoryError", space_string);
3612 }
3614 if (out_of_compressed_class_space) {
3615 THROW_OOP(Universe::out_of_memory_error_class_metaspace());
3616 } else {
3617 THROW_OOP(Universe::out_of_memory_error_metaspace());
3618 }
3619 }
3621 const char* Metaspace::metadata_type_name(Metaspace::MetadataType mdtype) {
3622 switch (mdtype) {
3623 case Metaspace::ClassType: return "Class";
3624 case Metaspace::NonClassType: return "Metadata";
3625 default:
3626 assert(false, err_msg("Got bad mdtype: %d", (int) mdtype));
3627 return NULL;
3628 }
3629 }
3631 void Metaspace::record_allocation(void* ptr, MetaspaceObj::Type type, size_t word_size) {
3632 assert(DumpSharedSpaces, "sanity");
3634 int byte_size = (int)word_size * HeapWordSize;
3635 AllocRecord *rec = new AllocRecord((address)ptr, type, byte_size);
3637 if (_alloc_record_head == NULL) {
3638 _alloc_record_head = _alloc_record_tail = rec;
3639 } else if (_alloc_record_tail->_ptr + _alloc_record_tail->_byte_size == (address)ptr) {
3640 _alloc_record_tail->_next = rec;
3641 _alloc_record_tail = rec;
3642 } else {
3643 // slow linear search, but this doesn't happen that often, and only when dumping
3644 for (AllocRecord *old = _alloc_record_head; old; old = old->_next) {
3645 if (old->_ptr == ptr) {
3646 assert(old->_type == MetaspaceObj::DeallocatedType, "sanity");
3647 int remain_bytes = old->_byte_size - byte_size;
3648 assert(remain_bytes >= 0, "sanity");
3649 old->_type = type;
3651 if (remain_bytes == 0) {
3652 delete(rec);
3653 } else {
3654 address remain_ptr = address(ptr) + byte_size;
3655 rec->_ptr = remain_ptr;
3656 rec->_byte_size = remain_bytes;
3657 rec->_type = MetaspaceObj::DeallocatedType;
3658 rec->_next = old->_next;
3659 old->_byte_size = byte_size;
3660 old->_next = rec;
3661 }
3662 return;
3663 }
3664 }
3665 assert(0, "reallocating a freed pointer that was not recorded");
3666 }
3667 }
3669 void Metaspace::record_deallocation(void* ptr, size_t word_size) {
3670 assert(DumpSharedSpaces, "sanity");
3672 for (AllocRecord *rec = _alloc_record_head; rec; rec = rec->_next) {
3673 if (rec->_ptr == ptr) {
3674 assert(rec->_byte_size == (int)word_size * HeapWordSize, "sanity");
3675 rec->_type = MetaspaceObj::DeallocatedType;
3676 return;
3677 }
3678 }
3680 assert(0, "deallocating a pointer that was not recorded");
3681 }
3683 void Metaspace::iterate(Metaspace::AllocRecordClosure *closure) {
3684 assert(DumpSharedSpaces, "unimplemented for !DumpSharedSpaces");
3686 address last_addr = (address)bottom();
3688 for (AllocRecord *rec = _alloc_record_head; rec; rec = rec->_next) {
3689 address ptr = rec->_ptr;
3690 if (last_addr < ptr) {
3691 closure->doit(last_addr, MetaspaceObj::UnknownType, ptr - last_addr);
3692 }
3693 closure->doit(ptr, rec->_type, rec->_byte_size);
3694 last_addr = ptr + rec->_byte_size;
3695 }
3697 address top = ((address)bottom()) + used_bytes_slow(Metaspace::NonClassType);
3698 if (last_addr < top) {
3699 closure->doit(last_addr, MetaspaceObj::UnknownType, top - last_addr);
3700 }
3701 }
3703 void Metaspace::purge(MetadataType mdtype) {
3704 get_space_list(mdtype)->purge(get_chunk_manager(mdtype));
3705 }
3707 void Metaspace::purge() {
3708 MutexLockerEx cl(SpaceManager::expand_lock(),
3709 Mutex::_no_safepoint_check_flag);
3710 purge(NonClassType);
3711 if (using_class_space()) {
3712 purge(ClassType);
3713 }
3714 }
3716 void Metaspace::print_on(outputStream* out) const {
3717 // Print both class virtual space counts and metaspace.
3718 if (Verbose) {
3719 vsm()->print_on(out);
3720 if (using_class_space()) {
3721 class_vsm()->print_on(out);
3722 }
3723 }
3724 }
3726 bool Metaspace::contains(const void* ptr) {
3727 if (UseSharedSpaces && MetaspaceShared::is_in_shared_space(ptr)) {
3728 return true;
3729 }
3731 if (using_class_space() && get_space_list(ClassType)->contains(ptr)) {
3732 return true;
3733 }
3735 return get_space_list(NonClassType)->contains(ptr);
3736 }
3738 void Metaspace::verify() {
3739 vsm()->verify();
3740 if (using_class_space()) {
3741 class_vsm()->verify();
3742 }
3743 }
3745 void Metaspace::dump(outputStream* const out) const {
3746 out->print_cr("\nVirtual space manager: " INTPTR_FORMAT, vsm());
3747 vsm()->dump(out);
3748 if (using_class_space()) {
3749 out->print_cr("\nClass space manager: " INTPTR_FORMAT, class_vsm());
3750 class_vsm()->dump(out);
3751 }
3752 }
3754 /////////////// Unit tests ///////////////
3756 #ifndef PRODUCT
3758 class TestMetaspaceAuxTest : AllStatic {
3759 public:
3760 static void test_reserved() {
3761 size_t reserved = MetaspaceAux::reserved_bytes();
3763 assert(reserved > 0, "assert");
3765 size_t committed = MetaspaceAux::committed_bytes();
3766 assert(committed <= reserved, "assert");
3768 size_t reserved_metadata = MetaspaceAux::reserved_bytes(Metaspace::NonClassType);
3769 assert(reserved_metadata > 0, "assert");
3770 assert(reserved_metadata <= reserved, "assert");
3772 if (UseCompressedClassPointers) {
3773 size_t reserved_class = MetaspaceAux::reserved_bytes(Metaspace::ClassType);
3774 assert(reserved_class > 0, "assert");
3775 assert(reserved_class < reserved, "assert");
3776 }
3777 }
3779 static void test_committed() {
3780 size_t committed = MetaspaceAux::committed_bytes();
3782 assert(committed > 0, "assert");
3784 size_t reserved = MetaspaceAux::reserved_bytes();
3785 assert(committed <= reserved, "assert");
3787 size_t committed_metadata = MetaspaceAux::committed_bytes(Metaspace::NonClassType);
3788 assert(committed_metadata > 0, "assert");
3789 assert(committed_metadata <= committed, "assert");
3791 if (UseCompressedClassPointers) {
3792 size_t committed_class = MetaspaceAux::committed_bytes(Metaspace::ClassType);
3793 assert(committed_class > 0, "assert");
3794 assert(committed_class < committed, "assert");
3795 }
3796 }
3798 static void test_virtual_space_list_large_chunk() {
3799 VirtualSpaceList* vs_list = new VirtualSpaceList(os::vm_allocation_granularity());
3800 MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
3801 // A size larger than VirtualSpaceSize (256k) and add one page to make it _not_ be
3802 // vm_allocation_granularity aligned on Windows.
3803 size_t large_size = (size_t)(2*256*K + (os::vm_page_size()/BytesPerWord));
3804 large_size += (os::vm_page_size()/BytesPerWord);
3805 vs_list->get_new_chunk(large_size, large_size, 0);
3806 }
3808 static void test() {
3809 test_reserved();
3810 test_committed();
3811 test_virtual_space_list_large_chunk();
3812 }
3813 };
3815 void TestMetaspaceAux_test() {
3816 TestMetaspaceAuxTest::test();
3817 }
3819 class TestVirtualSpaceNodeTest {
3820 static void chunk_up(size_t words_left, size_t& num_medium_chunks,
3821 size_t& num_small_chunks,
3822 size_t& num_specialized_chunks) {
3823 num_medium_chunks = words_left / MediumChunk;
3824 words_left = words_left % MediumChunk;
3826 num_small_chunks = words_left / SmallChunk;
3827 words_left = words_left % SmallChunk;
3828 // how many specialized chunks can we get?
3829 num_specialized_chunks = words_left / SpecializedChunk;
3830 assert(words_left % SpecializedChunk == 0, "should be nothing left");
3831 }
3833 public:
3834 static void test() {
3835 MutexLockerEx ml(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
3836 const size_t vsn_test_size_words = MediumChunk * 4;
3837 const size_t vsn_test_size_bytes = vsn_test_size_words * BytesPerWord;
3839 // The chunk sizes must be multiples of eachother, or this will fail
3840 STATIC_ASSERT(MediumChunk % SmallChunk == 0);
3841 STATIC_ASSERT(SmallChunk % SpecializedChunk == 0);
3843 { // No committed memory in VSN
3844 ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk);
3845 VirtualSpaceNode vsn(vsn_test_size_bytes);
3846 vsn.initialize();
3847 vsn.retire(&cm);
3848 assert(cm.sum_free_chunks_count() == 0, "did not commit any memory in the VSN");
3849 }
3851 { // All of VSN is committed, half is used by chunks
3852 ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk);
3853 VirtualSpaceNode vsn(vsn_test_size_bytes);
3854 vsn.initialize();
3855 vsn.expand_by(vsn_test_size_words, vsn_test_size_words);
3856 vsn.get_chunk_vs(MediumChunk);
3857 vsn.get_chunk_vs(MediumChunk);
3858 vsn.retire(&cm);
3859 assert(cm.sum_free_chunks_count() == 2, "should have been memory left for 2 medium chunks");
3860 assert(cm.sum_free_chunks() == 2*MediumChunk, "sizes should add up");
3861 }
3863 { // 4 pages of VSN is committed, some is used by chunks
3864 ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk);
3865 VirtualSpaceNode vsn(vsn_test_size_bytes);
3866 const size_t page_chunks = 4 * (size_t)os::vm_page_size() / BytesPerWord;
3867 assert(page_chunks < MediumChunk, "Test expects medium chunks to be at least 4*page_size");
3868 vsn.initialize();
3869 vsn.expand_by(page_chunks, page_chunks);
3870 vsn.get_chunk_vs(SmallChunk);
3871 vsn.get_chunk_vs(SpecializedChunk);
3872 vsn.retire(&cm);
3874 // committed - used = words left to retire
3875 const size_t words_left = page_chunks - SmallChunk - SpecializedChunk;
3877 size_t num_medium_chunks, num_small_chunks, num_spec_chunks;
3878 chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks);
3880 assert(num_medium_chunks == 0, "should not get any medium chunks");
3881 assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks");
3882 assert(cm.sum_free_chunks() == words_left, "sizes should add up");
3883 }
3885 { // Half of VSN is committed, a humongous chunk is used
3886 ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk);
3887 VirtualSpaceNode vsn(vsn_test_size_bytes);
3888 vsn.initialize();
3889 vsn.expand_by(MediumChunk * 2, MediumChunk * 2);
3890 vsn.get_chunk_vs(MediumChunk + SpecializedChunk); // Humongous chunks will be aligned up to MediumChunk + SpecializedChunk
3891 vsn.retire(&cm);
3893 const size_t words_left = MediumChunk * 2 - (MediumChunk + SpecializedChunk);
3894 size_t num_medium_chunks, num_small_chunks, num_spec_chunks;
3895 chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks);
3897 assert(num_medium_chunks == 0, "should not get any medium chunks");
3898 assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks");
3899 assert(cm.sum_free_chunks() == words_left, "sizes should add up");
3900 }
3902 }
3904 #define assert_is_available_positive(word_size) \
3905 assert(vsn.is_available(word_size), \
3906 err_msg(#word_size ": " PTR_FORMAT " bytes were not available in " \
3907 "VirtualSpaceNode [" PTR_FORMAT ", " PTR_FORMAT ")", \
3908 (uintptr_t)(word_size * BytesPerWord), vsn.bottom(), vsn.end()));
3910 #define assert_is_available_negative(word_size) \
3911 assert(!vsn.is_available(word_size), \
3912 err_msg(#word_size ": " PTR_FORMAT " bytes should not be available in " \
3913 "VirtualSpaceNode [" PTR_FORMAT ", " PTR_FORMAT ")", \
3914 (uintptr_t)(word_size * BytesPerWord), vsn.bottom(), vsn.end()));
3916 static void test_is_available_positive() {
3917 // Reserve some memory.
3918 VirtualSpaceNode vsn(os::vm_allocation_granularity());
3919 assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
3921 // Commit some memory.
3922 size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
3923 bool expanded = vsn.expand_by(commit_word_size, commit_word_size);
3924 assert(expanded, "Failed to commit");
3926 // Check that is_available accepts the committed size.
3927 assert_is_available_positive(commit_word_size);
3929 // Check that is_available accepts half the committed size.
3930 size_t expand_word_size = commit_word_size / 2;
3931 assert_is_available_positive(expand_word_size);
3932 }
3934 static void test_is_available_negative() {
3935 // Reserve some memory.
3936 VirtualSpaceNode vsn(os::vm_allocation_granularity());
3937 assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
3939 // Commit some memory.
3940 size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
3941 bool expanded = vsn.expand_by(commit_word_size, commit_word_size);
3942 assert(expanded, "Failed to commit");
3944 // Check that is_available doesn't accept a too large size.
3945 size_t two_times_commit_word_size = commit_word_size * 2;
3946 assert_is_available_negative(two_times_commit_word_size);
3947 }
3949 static void test_is_available_overflow() {
3950 // Reserve some memory.
3951 VirtualSpaceNode vsn(os::vm_allocation_granularity());
3952 assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
3954 // Commit some memory.
3955 size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
3956 bool expanded = vsn.expand_by(commit_word_size, commit_word_size);
3957 assert(expanded, "Failed to commit");
3959 // Calculate a size that will overflow the virtual space size.
3960 void* virtual_space_max = (void*)(uintptr_t)-1;
3961 size_t bottom_to_max = pointer_delta(virtual_space_max, vsn.bottom(), 1);
3962 size_t overflow_size = bottom_to_max + BytesPerWord;
3963 size_t overflow_word_size = overflow_size / BytesPerWord;
3965 // Check that is_available can handle the overflow.
3966 assert_is_available_negative(overflow_word_size);
3967 }
3969 static void test_is_available() {
3970 TestVirtualSpaceNodeTest::test_is_available_positive();
3971 TestVirtualSpaceNodeTest::test_is_available_negative();
3972 TestVirtualSpaceNodeTest::test_is_available_overflow();
3973 }
3974 };
3976 void TestVirtualSpaceNode_test() {
3977 TestVirtualSpaceNodeTest::test();
3978 TestVirtualSpaceNodeTest::test_is_available();
3979 }
3980 #endif