Tue, 25 Mar 2014 17:07:36 -0700
Merge
1 /*
2 * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24 #include "precompiled.hpp"
25 #include "gc_interface/collectedHeap.hpp"
26 #include "memory/allocation.hpp"
27 #include "memory/binaryTreeDictionary.hpp"
28 #include "memory/freeList.hpp"
29 #include "memory/collectorPolicy.hpp"
30 #include "memory/filemap.hpp"
31 #include "memory/freeList.hpp"
32 #include "memory/gcLocker.hpp"
33 #include "memory/metachunk.hpp"
34 #include "memory/metaspace.hpp"
35 #include "memory/metaspaceGCThresholdUpdater.hpp"
36 #include "memory/metaspaceShared.hpp"
37 #include "memory/metaspaceTracer.hpp"
38 #include "memory/resourceArea.hpp"
39 #include "memory/universe.hpp"
40 #include "runtime/atomic.inline.hpp"
41 #include "runtime/globals.hpp"
42 #include "runtime/init.hpp"
43 #include "runtime/java.hpp"
44 #include "runtime/mutex.hpp"
45 #include "runtime/orderAccess.hpp"
46 #include "services/memTracker.hpp"
47 #include "services/memoryService.hpp"
48 #include "utilities/copy.hpp"
49 #include "utilities/debug.hpp"
51 typedef BinaryTreeDictionary<Metablock, FreeList<Metablock> > BlockTreeDictionary;
52 typedef BinaryTreeDictionary<Metachunk, FreeList<Metachunk> > ChunkTreeDictionary;
54 // Set this constant to enable slow integrity checking of the free chunk lists
55 const bool metaspace_slow_verify = false;
57 size_t const allocation_from_dictionary_limit = 4 * K;
59 MetaWord* last_allocated = 0;
61 size_t Metaspace::_compressed_class_space_size;
62 const MetaspaceTracer* Metaspace::_tracer = NULL;
64 // Used in declarations in SpaceManager and ChunkManager
65 enum ChunkIndex {
66 ZeroIndex = 0,
67 SpecializedIndex = ZeroIndex,
68 SmallIndex = SpecializedIndex + 1,
69 MediumIndex = SmallIndex + 1,
70 HumongousIndex = MediumIndex + 1,
71 NumberOfFreeLists = 3,
72 NumberOfInUseLists = 4
73 };
75 enum ChunkSizes { // in words.
76 ClassSpecializedChunk = 128,
77 SpecializedChunk = 128,
78 ClassSmallChunk = 256,
79 SmallChunk = 512,
80 ClassMediumChunk = 4 * K,
81 MediumChunk = 8 * K
82 };
84 static ChunkIndex next_chunk_index(ChunkIndex i) {
85 assert(i < NumberOfInUseLists, "Out of bound");
86 return (ChunkIndex) (i+1);
87 }
89 volatile intptr_t MetaspaceGC::_capacity_until_GC = 0;
90 uint MetaspaceGC::_shrink_factor = 0;
91 bool MetaspaceGC::_should_concurrent_collect = false;
93 typedef class FreeList<Metachunk> ChunkList;
95 // Manages the global free lists of chunks.
96 class ChunkManager : public CHeapObj<mtInternal> {
97 friend class TestVirtualSpaceNodeTest;
99 // Free list of chunks of different sizes.
100 // SpecializedChunk
101 // SmallChunk
102 // MediumChunk
103 // HumongousChunk
104 ChunkList _free_chunks[NumberOfFreeLists];
106 // HumongousChunk
107 ChunkTreeDictionary _humongous_dictionary;
109 // ChunkManager in all lists of this type
110 size_t _free_chunks_total;
111 size_t _free_chunks_count;
113 void dec_free_chunks_total(size_t v) {
114 assert(_free_chunks_count > 0 &&
115 _free_chunks_total > 0,
116 "About to go negative");
117 Atomic::add_ptr(-1, &_free_chunks_count);
118 jlong minus_v = (jlong) - (jlong) v;
119 Atomic::add_ptr(minus_v, &_free_chunks_total);
120 }
122 // Debug support
124 size_t sum_free_chunks();
125 size_t sum_free_chunks_count();
127 void locked_verify_free_chunks_total();
128 void slow_locked_verify_free_chunks_total() {
129 if (metaspace_slow_verify) {
130 locked_verify_free_chunks_total();
131 }
132 }
133 void locked_verify_free_chunks_count();
134 void slow_locked_verify_free_chunks_count() {
135 if (metaspace_slow_verify) {
136 locked_verify_free_chunks_count();
137 }
138 }
139 void verify_free_chunks_count();
141 public:
143 ChunkManager(size_t specialized_size, size_t small_size, size_t medium_size)
144 : _free_chunks_total(0), _free_chunks_count(0) {
145 _free_chunks[SpecializedIndex].set_size(specialized_size);
146 _free_chunks[SmallIndex].set_size(small_size);
147 _free_chunks[MediumIndex].set_size(medium_size);
148 }
150 // add or delete (return) a chunk to the global freelist.
151 Metachunk* chunk_freelist_allocate(size_t word_size);
153 // Map a size to a list index assuming that there are lists
154 // for special, small, medium, and humongous chunks.
155 static ChunkIndex list_index(size_t size);
157 // Remove the chunk from its freelist. It is
158 // expected to be on one of the _free_chunks[] lists.
159 void remove_chunk(Metachunk* chunk);
161 // Add the simple linked list of chunks to the freelist of chunks
162 // of type index.
163 void return_chunks(ChunkIndex index, Metachunk* chunks);
165 // Total of the space in the free chunks list
166 size_t free_chunks_total_words();
167 size_t free_chunks_total_bytes();
169 // Number of chunks in the free chunks list
170 size_t free_chunks_count();
172 void inc_free_chunks_total(size_t v, size_t count = 1) {
173 Atomic::add_ptr(count, &_free_chunks_count);
174 Atomic::add_ptr(v, &_free_chunks_total);
175 }
176 ChunkTreeDictionary* humongous_dictionary() {
177 return &_humongous_dictionary;
178 }
180 ChunkList* free_chunks(ChunkIndex index);
182 // Returns the list for the given chunk word size.
183 ChunkList* find_free_chunks_list(size_t word_size);
185 // Remove from a list by size. Selects list based on size of chunk.
186 Metachunk* free_chunks_get(size_t chunk_word_size);
188 #define index_bounds_check(index) \
189 assert(index == SpecializedIndex || \
190 index == SmallIndex || \
191 index == MediumIndex || \
192 index == HumongousIndex, err_msg("Bad index: %d", (int) index))
194 size_t num_free_chunks(ChunkIndex index) const {
195 index_bounds_check(index);
197 if (index == HumongousIndex) {
198 return _humongous_dictionary.total_free_blocks();
199 }
201 ssize_t count = _free_chunks[index].count();
202 return count == -1 ? 0 : (size_t) count;
203 }
205 size_t size_free_chunks_in_bytes(ChunkIndex index) const {
206 index_bounds_check(index);
208 size_t word_size = 0;
209 if (index == HumongousIndex) {
210 word_size = _humongous_dictionary.total_size();
211 } else {
212 const size_t size_per_chunk_in_words = _free_chunks[index].size();
213 word_size = size_per_chunk_in_words * num_free_chunks(index);
214 }
216 return word_size * BytesPerWord;
217 }
219 MetaspaceChunkFreeListSummary chunk_free_list_summary() const {
220 return MetaspaceChunkFreeListSummary(num_free_chunks(SpecializedIndex),
221 num_free_chunks(SmallIndex),
222 num_free_chunks(MediumIndex),
223 num_free_chunks(HumongousIndex),
224 size_free_chunks_in_bytes(SpecializedIndex),
225 size_free_chunks_in_bytes(SmallIndex),
226 size_free_chunks_in_bytes(MediumIndex),
227 size_free_chunks_in_bytes(HumongousIndex));
228 }
230 // Debug support
231 void verify();
232 void slow_verify() {
233 if (metaspace_slow_verify) {
234 verify();
235 }
236 }
237 void locked_verify();
238 void slow_locked_verify() {
239 if (metaspace_slow_verify) {
240 locked_verify();
241 }
242 }
243 void verify_free_chunks_total();
245 void locked_print_free_chunks(outputStream* st);
246 void locked_print_sum_free_chunks(outputStream* st);
248 void print_on(outputStream* st) const;
249 };
251 // Used to manage the free list of Metablocks (a block corresponds
252 // to the allocation of a quantum of metadata).
253 class BlockFreelist VALUE_OBJ_CLASS_SPEC {
254 BlockTreeDictionary* _dictionary;
256 // Only allocate and split from freelist if the size of the allocation
257 // is at least 1/4th the size of the available block.
258 const static int WasteMultiplier = 4;
260 // Accessors
261 BlockTreeDictionary* dictionary() const { return _dictionary; }
263 public:
264 BlockFreelist();
265 ~BlockFreelist();
267 // Get and return a block to the free list
268 MetaWord* get_block(size_t word_size);
269 void return_block(MetaWord* p, size_t word_size);
271 size_t total_size() {
272 if (dictionary() == NULL) {
273 return 0;
274 } else {
275 return dictionary()->total_size();
276 }
277 }
279 void print_on(outputStream* st) const;
280 };
282 // A VirtualSpaceList node.
283 class VirtualSpaceNode : public CHeapObj<mtClass> {
284 friend class VirtualSpaceList;
286 // Link to next VirtualSpaceNode
287 VirtualSpaceNode* _next;
289 // total in the VirtualSpace
290 MemRegion _reserved;
291 ReservedSpace _rs;
292 VirtualSpace _virtual_space;
293 MetaWord* _top;
294 // count of chunks contained in this VirtualSpace
295 uintx _container_count;
297 // Convenience functions to access the _virtual_space
298 char* low() const { return virtual_space()->low(); }
299 char* high() const { return virtual_space()->high(); }
301 // The first Metachunk will be allocated at the bottom of the
302 // VirtualSpace
303 Metachunk* first_chunk() { return (Metachunk*) bottom(); }
305 // Committed but unused space in the virtual space
306 size_t free_words_in_vs() const;
307 public:
309 VirtualSpaceNode(size_t byte_size);
310 VirtualSpaceNode(ReservedSpace rs) : _top(NULL), _next(NULL), _rs(rs), _container_count(0) {}
311 ~VirtualSpaceNode();
313 // Convenience functions for logical bottom and end
314 MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); }
315 MetaWord* end() const { return (MetaWord*) _virtual_space.high(); }
317 size_t reserved_words() const { return _virtual_space.reserved_size() / BytesPerWord; }
318 size_t committed_words() const { return _virtual_space.actual_committed_size() / BytesPerWord; }
320 bool is_pre_committed() const { return _virtual_space.special(); }
322 // address of next available space in _virtual_space;
323 // Accessors
324 VirtualSpaceNode* next() { return _next; }
325 void set_next(VirtualSpaceNode* v) { _next = v; }
327 void set_reserved(MemRegion const v) { _reserved = v; }
328 void set_top(MetaWord* v) { _top = v; }
330 // Accessors
331 MemRegion* reserved() { return &_reserved; }
332 VirtualSpace* virtual_space() const { return (VirtualSpace*) &_virtual_space; }
334 // Returns true if "word_size" is available in the VirtualSpace
335 bool is_available(size_t word_size) { return word_size <= pointer_delta(end(), _top, sizeof(MetaWord)); }
337 MetaWord* top() const { return _top; }
338 void inc_top(size_t word_size) { _top += word_size; }
340 uintx container_count() { return _container_count; }
341 void inc_container_count();
342 void dec_container_count();
343 #ifdef ASSERT
344 uint container_count_slow();
345 void verify_container_count();
346 #endif
348 // used and capacity in this single entry in the list
349 size_t used_words_in_vs() const;
350 size_t capacity_words_in_vs() const;
352 bool initialize();
354 // get space from the virtual space
355 Metachunk* take_from_committed(size_t chunk_word_size);
357 // Allocate a chunk from the virtual space and return it.
358 Metachunk* get_chunk_vs(size_t chunk_word_size);
360 // Expands/shrinks the committed space in a virtual space. Delegates
361 // to Virtualspace
362 bool expand_by(size_t min_words, size_t preferred_words);
364 // In preparation for deleting this node, remove all the chunks
365 // in the node from any freelist.
366 void purge(ChunkManager* chunk_manager);
368 // If an allocation doesn't fit in the current node a new node is created.
369 // Allocate chunks out of the remaining committed space in this node
370 // to avoid wasting that memory.
371 // This always adds up because all the chunk sizes are multiples of
372 // the smallest chunk size.
373 void retire(ChunkManager* chunk_manager);
375 #ifdef ASSERT
376 // Debug support
377 void mangle();
378 #endif
380 void print_on(outputStream* st) const;
381 };
383 #define assert_is_ptr_aligned(ptr, alignment) \
384 assert(is_ptr_aligned(ptr, alignment), \
385 err_msg(PTR_FORMAT " is not aligned to " \
386 SIZE_FORMAT, ptr, alignment))
388 #define assert_is_size_aligned(size, alignment) \
389 assert(is_size_aligned(size, alignment), \
390 err_msg(SIZE_FORMAT " is not aligned to " \
391 SIZE_FORMAT, size, alignment))
394 // Decide if large pages should be committed when the memory is reserved.
395 static bool should_commit_large_pages_when_reserving(size_t bytes) {
396 if (UseLargePages && UseLargePagesInMetaspace && !os::can_commit_large_page_memory()) {
397 size_t words = bytes / BytesPerWord;
398 bool is_class = false; // We never reserve large pages for the class space.
399 if (MetaspaceGC::can_expand(words, is_class) &&
400 MetaspaceGC::allowed_expansion() >= words) {
401 return true;
402 }
403 }
405 return false;
406 }
408 // byte_size is the size of the associated virtualspace.
409 VirtualSpaceNode::VirtualSpaceNode(size_t bytes) : _top(NULL), _next(NULL), _rs(), _container_count(0) {
410 assert_is_size_aligned(bytes, Metaspace::reserve_alignment());
412 // This allocates memory with mmap. For DumpSharedspaces, try to reserve
413 // configurable address, generally at the top of the Java heap so other
414 // memory addresses don't conflict.
415 if (DumpSharedSpaces) {
416 bool large_pages = false; // No large pages when dumping the CDS archive.
417 char* shared_base = (char*)align_ptr_up((char*)SharedBaseAddress, Metaspace::reserve_alignment());
419 _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages, shared_base, 0);
420 if (_rs.is_reserved()) {
421 assert(shared_base == 0 || _rs.base() == shared_base, "should match");
422 } else {
423 // Get a mmap region anywhere if the SharedBaseAddress fails.
424 _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
425 }
426 MetaspaceShared::set_shared_rs(&_rs);
427 } else {
428 bool large_pages = should_commit_large_pages_when_reserving(bytes);
430 _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
431 }
433 if (_rs.is_reserved()) {
434 assert(_rs.base() != NULL, "Catch if we get a NULL address");
435 assert(_rs.size() != 0, "Catch if we get a 0 size");
436 assert_is_ptr_aligned(_rs.base(), Metaspace::reserve_alignment());
437 assert_is_size_aligned(_rs.size(), Metaspace::reserve_alignment());
439 MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass);
440 }
441 }
443 void VirtualSpaceNode::purge(ChunkManager* chunk_manager) {
444 Metachunk* chunk = first_chunk();
445 Metachunk* invalid_chunk = (Metachunk*) top();
446 while (chunk < invalid_chunk ) {
447 assert(chunk->is_tagged_free(), "Should be tagged free");
448 MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
449 chunk_manager->remove_chunk(chunk);
450 assert(chunk->next() == NULL &&
451 chunk->prev() == NULL,
452 "Was not removed from its list");
453 chunk = (Metachunk*) next;
454 }
455 }
457 #ifdef ASSERT
458 uint VirtualSpaceNode::container_count_slow() {
459 uint count = 0;
460 Metachunk* chunk = first_chunk();
461 Metachunk* invalid_chunk = (Metachunk*) top();
462 while (chunk < invalid_chunk ) {
463 MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
464 // Don't count the chunks on the free lists. Those are
465 // still part of the VirtualSpaceNode but not currently
466 // counted.
467 if (!chunk->is_tagged_free()) {
468 count++;
469 }
470 chunk = (Metachunk*) next;
471 }
472 return count;
473 }
474 #endif
476 // List of VirtualSpaces for metadata allocation.
477 class VirtualSpaceList : public CHeapObj<mtClass> {
478 friend class VirtualSpaceNode;
480 enum VirtualSpaceSizes {
481 VirtualSpaceSize = 256 * K
482 };
484 // Head of the list
485 VirtualSpaceNode* _virtual_space_list;
486 // virtual space currently being used for allocations
487 VirtualSpaceNode* _current_virtual_space;
489 // Is this VirtualSpaceList used for the compressed class space
490 bool _is_class;
492 // Sum of reserved and committed memory in the virtual spaces
493 size_t _reserved_words;
494 size_t _committed_words;
496 // Number of virtual spaces
497 size_t _virtual_space_count;
499 ~VirtualSpaceList();
501 VirtualSpaceNode* virtual_space_list() const { return _virtual_space_list; }
503 void set_virtual_space_list(VirtualSpaceNode* v) {
504 _virtual_space_list = v;
505 }
506 void set_current_virtual_space(VirtualSpaceNode* v) {
507 _current_virtual_space = v;
508 }
510 void link_vs(VirtualSpaceNode* new_entry);
512 // Get another virtual space and add it to the list. This
513 // is typically prompted by a failed attempt to allocate a chunk
514 // and is typically followed by the allocation of a chunk.
515 bool create_new_virtual_space(size_t vs_word_size);
517 // Chunk up the unused committed space in the current
518 // virtual space and add the chunks to the free list.
519 void retire_current_virtual_space();
521 public:
522 VirtualSpaceList(size_t word_size);
523 VirtualSpaceList(ReservedSpace rs);
525 size_t free_bytes();
527 Metachunk* get_new_chunk(size_t word_size,
528 size_t grow_chunks_by_words,
529 size_t medium_chunk_bunch);
531 bool expand_node_by(VirtualSpaceNode* node,
532 size_t min_words,
533 size_t preferred_words);
535 bool expand_by(size_t min_words,
536 size_t preferred_words);
538 VirtualSpaceNode* current_virtual_space() {
539 return _current_virtual_space;
540 }
542 bool is_class() const { return _is_class; }
544 bool initialization_succeeded() { return _virtual_space_list != NULL; }
546 size_t reserved_words() { return _reserved_words; }
547 size_t reserved_bytes() { return reserved_words() * BytesPerWord; }
548 size_t committed_words() { return _committed_words; }
549 size_t committed_bytes() { return committed_words() * BytesPerWord; }
551 void inc_reserved_words(size_t v);
552 void dec_reserved_words(size_t v);
553 void inc_committed_words(size_t v);
554 void dec_committed_words(size_t v);
555 void inc_virtual_space_count();
556 void dec_virtual_space_count();
558 // Unlink empty VirtualSpaceNodes and free it.
559 void purge(ChunkManager* chunk_manager);
561 void print_on(outputStream* st) const;
563 class VirtualSpaceListIterator : public StackObj {
564 VirtualSpaceNode* _virtual_spaces;
565 public:
566 VirtualSpaceListIterator(VirtualSpaceNode* virtual_spaces) :
567 _virtual_spaces(virtual_spaces) {}
569 bool repeat() {
570 return _virtual_spaces != NULL;
571 }
573 VirtualSpaceNode* get_next() {
574 VirtualSpaceNode* result = _virtual_spaces;
575 if (_virtual_spaces != NULL) {
576 _virtual_spaces = _virtual_spaces->next();
577 }
578 return result;
579 }
580 };
581 };
583 class Metadebug : AllStatic {
584 // Debugging support for Metaspaces
585 static int _allocation_fail_alot_count;
587 public:
589 static void init_allocation_fail_alot_count();
590 #ifdef ASSERT
591 static bool test_metadata_failure();
592 #endif
593 };
595 int Metadebug::_allocation_fail_alot_count = 0;
597 // SpaceManager - used by Metaspace to handle allocations
598 class SpaceManager : public CHeapObj<mtClass> {
599 friend class Metaspace;
600 friend class Metadebug;
602 private:
604 // protects allocations
605 Mutex* const _lock;
607 // Type of metadata allocated.
608 Metaspace::MetadataType _mdtype;
610 // List of chunks in use by this SpaceManager. Allocations
611 // are done from the current chunk. The list is used for deallocating
612 // chunks when the SpaceManager is freed.
613 Metachunk* _chunks_in_use[NumberOfInUseLists];
614 Metachunk* _current_chunk;
616 // Number of small chunks to allocate to a manager
617 // If class space manager, small chunks are unlimited
618 static uint const _small_chunk_limit;
620 // Sum of all space in allocated chunks
621 size_t _allocated_blocks_words;
623 // Sum of all allocated chunks
624 size_t _allocated_chunks_words;
625 size_t _allocated_chunks_count;
627 // Free lists of blocks are per SpaceManager since they
628 // are assumed to be in chunks in use by the SpaceManager
629 // and all chunks in use by a SpaceManager are freed when
630 // the class loader using the SpaceManager is collected.
631 BlockFreelist _block_freelists;
633 // protects virtualspace and chunk expansions
634 static const char* _expand_lock_name;
635 static const int _expand_lock_rank;
636 static Mutex* const _expand_lock;
638 private:
639 // Accessors
640 Metachunk* chunks_in_use(ChunkIndex index) const { return _chunks_in_use[index]; }
641 void set_chunks_in_use(ChunkIndex index, Metachunk* v) {
642 // ensure lock-free iteration sees fully initialized node
643 OrderAccess::storestore();
644 _chunks_in_use[index] = v;
645 }
647 BlockFreelist* block_freelists() const {
648 return (BlockFreelist*) &_block_freelists;
649 }
651 Metaspace::MetadataType mdtype() { return _mdtype; }
653 VirtualSpaceList* vs_list() const { return Metaspace::get_space_list(_mdtype); }
654 ChunkManager* chunk_manager() const { return Metaspace::get_chunk_manager(_mdtype); }
656 Metachunk* current_chunk() const { return _current_chunk; }
657 void set_current_chunk(Metachunk* v) {
658 _current_chunk = v;
659 }
661 Metachunk* find_current_chunk(size_t word_size);
663 // Add chunk to the list of chunks in use
664 void add_chunk(Metachunk* v, bool make_current);
665 void retire_current_chunk();
667 Mutex* lock() const { return _lock; }
669 const char* chunk_size_name(ChunkIndex index) const;
671 protected:
672 void initialize();
674 public:
675 SpaceManager(Metaspace::MetadataType mdtype,
676 Mutex* lock);
677 ~SpaceManager();
679 enum ChunkMultiples {
680 MediumChunkMultiple = 4
681 };
683 bool is_class() { return _mdtype == Metaspace::ClassType; }
685 // Accessors
686 size_t specialized_chunk_size() { return (size_t) is_class() ? ClassSpecializedChunk : SpecializedChunk; }
687 size_t small_chunk_size() { return (size_t) is_class() ? ClassSmallChunk : SmallChunk; }
688 size_t medium_chunk_size() { return (size_t) is_class() ? ClassMediumChunk : MediumChunk; }
689 size_t medium_chunk_bunch() { return medium_chunk_size() * MediumChunkMultiple; }
691 size_t smallest_chunk_size() { return specialized_chunk_size(); }
693 size_t allocated_blocks_words() const { return _allocated_blocks_words; }
694 size_t allocated_blocks_bytes() const { return _allocated_blocks_words * BytesPerWord; }
695 size_t allocated_chunks_words() const { return _allocated_chunks_words; }
696 size_t allocated_chunks_count() const { return _allocated_chunks_count; }
698 bool is_humongous(size_t word_size) { return word_size > medium_chunk_size(); }
700 static Mutex* expand_lock() { return _expand_lock; }
702 // Increment the per Metaspace and global running sums for Metachunks
703 // by the given size. This is used when a Metachunk to added to
704 // the in-use list.
705 void inc_size_metrics(size_t words);
706 // Increment the per Metaspace and global running sums Metablocks by the given
707 // size. This is used when a Metablock is allocated.
708 void inc_used_metrics(size_t words);
709 // Delete the portion of the running sums for this SpaceManager. That is,
710 // the globals running sums for the Metachunks and Metablocks are
711 // decremented for all the Metachunks in-use by this SpaceManager.
712 void dec_total_from_size_metrics();
714 // Set the sizes for the initial chunks.
715 void get_initial_chunk_sizes(Metaspace::MetaspaceType type,
716 size_t* chunk_word_size,
717 size_t* class_chunk_word_size);
719 size_t sum_capacity_in_chunks_in_use() const;
720 size_t sum_used_in_chunks_in_use() const;
721 size_t sum_free_in_chunks_in_use() const;
722 size_t sum_waste_in_chunks_in_use() const;
723 size_t sum_waste_in_chunks_in_use(ChunkIndex index ) const;
725 size_t sum_count_in_chunks_in_use();
726 size_t sum_count_in_chunks_in_use(ChunkIndex i);
728 Metachunk* get_new_chunk(size_t word_size, size_t grow_chunks_by_words);
730 // Block allocation and deallocation.
731 // Allocates a block from the current chunk
732 MetaWord* allocate(size_t word_size);
734 // Helper for allocations
735 MetaWord* allocate_work(size_t word_size);
737 // Returns a block to the per manager freelist
738 void deallocate(MetaWord* p, size_t word_size);
740 // Based on the allocation size and a minimum chunk size,
741 // returned chunk size (for expanding space for chunk allocation).
742 size_t calc_chunk_size(size_t allocation_word_size);
744 // Called when an allocation from the current chunk fails.
745 // Gets a new chunk (may require getting a new virtual space),
746 // and allocates from that chunk.
747 MetaWord* grow_and_allocate(size_t word_size);
749 // Notify memory usage to MemoryService.
750 void track_metaspace_memory_usage();
752 // debugging support.
754 void dump(outputStream* const out) const;
755 void print_on(outputStream* st) const;
756 void locked_print_chunks_in_use_on(outputStream* st) const;
758 bool contains(const void *ptr);
760 void verify();
761 void verify_chunk_size(Metachunk* chunk);
762 NOT_PRODUCT(void mangle_freed_chunks();)
763 #ifdef ASSERT
764 void verify_allocated_blocks_words();
765 #endif
767 size_t get_raw_word_size(size_t word_size) {
768 size_t byte_size = word_size * BytesPerWord;
770 size_t raw_bytes_size = MAX2(byte_size, sizeof(Metablock));
771 raw_bytes_size = align_size_up(raw_bytes_size, Metachunk::object_alignment());
773 size_t raw_word_size = raw_bytes_size / BytesPerWord;
774 assert(raw_word_size * BytesPerWord == raw_bytes_size, "Size problem");
776 return raw_word_size;
777 }
778 };
780 uint const SpaceManager::_small_chunk_limit = 4;
782 const char* SpaceManager::_expand_lock_name =
783 "SpaceManager chunk allocation lock";
784 const int SpaceManager::_expand_lock_rank = Monitor::leaf - 1;
785 Mutex* const SpaceManager::_expand_lock =
786 new Mutex(SpaceManager::_expand_lock_rank,
787 SpaceManager::_expand_lock_name,
788 Mutex::_allow_vm_block_flag);
790 void VirtualSpaceNode::inc_container_count() {
791 assert_lock_strong(SpaceManager::expand_lock());
792 _container_count++;
793 assert(_container_count == container_count_slow(),
794 err_msg("Inconsistency in countainer_count _container_count " SIZE_FORMAT
795 " container_count_slow() " SIZE_FORMAT,
796 _container_count, container_count_slow()));
797 }
799 void VirtualSpaceNode::dec_container_count() {
800 assert_lock_strong(SpaceManager::expand_lock());
801 _container_count--;
802 }
804 #ifdef ASSERT
805 void VirtualSpaceNode::verify_container_count() {
806 assert(_container_count == container_count_slow(),
807 err_msg("Inconsistency in countainer_count _container_count " SIZE_FORMAT
808 " container_count_slow() " SIZE_FORMAT, _container_count, container_count_slow()));
809 }
810 #endif
812 // BlockFreelist methods
814 BlockFreelist::BlockFreelist() : _dictionary(NULL) {}
816 BlockFreelist::~BlockFreelist() {
817 if (_dictionary != NULL) {
818 if (Verbose && TraceMetadataChunkAllocation) {
819 _dictionary->print_free_lists(gclog_or_tty);
820 }
821 delete _dictionary;
822 }
823 }
825 void BlockFreelist::return_block(MetaWord* p, size_t word_size) {
826 Metablock* free_chunk = ::new (p) Metablock(word_size);
827 if (dictionary() == NULL) {
828 _dictionary = new BlockTreeDictionary();
829 }
830 dictionary()->return_chunk(free_chunk);
831 }
833 MetaWord* BlockFreelist::get_block(size_t word_size) {
834 if (dictionary() == NULL) {
835 return NULL;
836 }
838 if (word_size < TreeChunk<Metablock, FreeList<Metablock> >::min_size()) {
839 // Dark matter. Too small for dictionary.
840 return NULL;
841 }
843 Metablock* free_block =
844 dictionary()->get_chunk(word_size, FreeBlockDictionary<Metablock>::atLeast);
845 if (free_block == NULL) {
846 return NULL;
847 }
849 const size_t block_size = free_block->size();
850 if (block_size > WasteMultiplier * word_size) {
851 return_block((MetaWord*)free_block, block_size);
852 return NULL;
853 }
855 MetaWord* new_block = (MetaWord*)free_block;
856 assert(block_size >= word_size, "Incorrect size of block from freelist");
857 const size_t unused = block_size - word_size;
858 if (unused >= TreeChunk<Metablock, FreeList<Metablock> >::min_size()) {
859 return_block(new_block + word_size, unused);
860 }
862 return new_block;
863 }
865 void BlockFreelist::print_on(outputStream* st) const {
866 if (dictionary() == NULL) {
867 return;
868 }
869 dictionary()->print_free_lists(st);
870 }
872 // VirtualSpaceNode methods
874 VirtualSpaceNode::~VirtualSpaceNode() {
875 _rs.release();
876 #ifdef ASSERT
877 size_t word_size = sizeof(*this) / BytesPerWord;
878 Copy::fill_to_words((HeapWord*) this, word_size, 0xf1f1f1f1);
879 #endif
880 }
882 size_t VirtualSpaceNode::used_words_in_vs() const {
883 return pointer_delta(top(), bottom(), sizeof(MetaWord));
884 }
886 // Space committed in the VirtualSpace
887 size_t VirtualSpaceNode::capacity_words_in_vs() const {
888 return pointer_delta(end(), bottom(), sizeof(MetaWord));
889 }
891 size_t VirtualSpaceNode::free_words_in_vs() const {
892 return pointer_delta(end(), top(), sizeof(MetaWord));
893 }
895 // Allocates the chunk from the virtual space only.
896 // This interface is also used internally for debugging. Not all
897 // chunks removed here are necessarily used for allocation.
898 Metachunk* VirtualSpaceNode::take_from_committed(size_t chunk_word_size) {
899 // Bottom of the new chunk
900 MetaWord* chunk_limit = top();
901 assert(chunk_limit != NULL, "Not safe to call this method");
903 // The virtual spaces are always expanded by the
904 // commit granularity to enforce the following condition.
905 // Without this the is_available check will not work correctly.
906 assert(_virtual_space.committed_size() == _virtual_space.actual_committed_size(),
907 "The committed memory doesn't match the expanded memory.");
909 if (!is_available(chunk_word_size)) {
910 if (TraceMetadataChunkAllocation) {
911 gclog_or_tty->print("VirtualSpaceNode::take_from_committed() not available %d words ", chunk_word_size);
912 // Dump some information about the virtual space that is nearly full
913 print_on(gclog_or_tty);
914 }
915 return NULL;
916 }
918 // Take the space (bump top on the current virtual space).
919 inc_top(chunk_word_size);
921 // Initialize the chunk
922 Metachunk* result = ::new (chunk_limit) Metachunk(chunk_word_size, this);
923 return result;
924 }
927 // Expand the virtual space (commit more of the reserved space)
928 bool VirtualSpaceNode::expand_by(size_t min_words, size_t preferred_words) {
929 size_t min_bytes = min_words * BytesPerWord;
930 size_t preferred_bytes = preferred_words * BytesPerWord;
932 size_t uncommitted = virtual_space()->reserved_size() - virtual_space()->actual_committed_size();
934 if (uncommitted < min_bytes) {
935 return false;
936 }
938 size_t commit = MIN2(preferred_bytes, uncommitted);
939 bool result = virtual_space()->expand_by(commit, false);
941 assert(result, "Failed to commit memory");
943 return result;
944 }
946 Metachunk* VirtualSpaceNode::get_chunk_vs(size_t chunk_word_size) {
947 assert_lock_strong(SpaceManager::expand_lock());
948 Metachunk* result = take_from_committed(chunk_word_size);
949 if (result != NULL) {
950 inc_container_count();
951 }
952 return result;
953 }
955 bool VirtualSpaceNode::initialize() {
957 if (!_rs.is_reserved()) {
958 return false;
959 }
961 // These are necessary restriction to make sure that the virtual space always
962 // grows in steps of Metaspace::commit_alignment(). If both base and size are
963 // aligned only the middle alignment of the VirtualSpace is used.
964 assert_is_ptr_aligned(_rs.base(), Metaspace::commit_alignment());
965 assert_is_size_aligned(_rs.size(), Metaspace::commit_alignment());
967 // ReservedSpaces marked as special will have the entire memory
968 // pre-committed. Setting a committed size will make sure that
969 // committed_size and actual_committed_size agrees.
970 size_t pre_committed_size = _rs.special() ? _rs.size() : 0;
972 bool result = virtual_space()->initialize_with_granularity(_rs, pre_committed_size,
973 Metaspace::commit_alignment());
974 if (result) {
975 assert(virtual_space()->committed_size() == virtual_space()->actual_committed_size(),
976 "Checking that the pre-committed memory was registered by the VirtualSpace");
978 set_top((MetaWord*)virtual_space()->low());
979 set_reserved(MemRegion((HeapWord*)_rs.base(),
980 (HeapWord*)(_rs.base() + _rs.size())));
982 assert(reserved()->start() == (HeapWord*) _rs.base(),
983 err_msg("Reserved start was not set properly " PTR_FORMAT
984 " != " PTR_FORMAT, reserved()->start(), _rs.base()));
985 assert(reserved()->word_size() == _rs.size() / BytesPerWord,
986 err_msg("Reserved size was not set properly " SIZE_FORMAT
987 " != " SIZE_FORMAT, reserved()->word_size(),
988 _rs.size() / BytesPerWord));
989 }
991 return result;
992 }
994 void VirtualSpaceNode::print_on(outputStream* st) const {
995 size_t used = used_words_in_vs();
996 size_t capacity = capacity_words_in_vs();
997 VirtualSpace* vs = virtual_space();
998 st->print_cr(" space @ " PTR_FORMAT " " SIZE_FORMAT "K, %3d%% used "
999 "[" PTR_FORMAT ", " PTR_FORMAT ", "
1000 PTR_FORMAT ", " PTR_FORMAT ")",
1001 vs, capacity / K,
1002 capacity == 0 ? 0 : used * 100 / capacity,
1003 bottom(), top(), end(),
1004 vs->high_boundary());
1005 }
1007 #ifdef ASSERT
1008 void VirtualSpaceNode::mangle() {
1009 size_t word_size = capacity_words_in_vs();
1010 Copy::fill_to_words((HeapWord*) low(), word_size, 0xf1f1f1f1);
1011 }
1012 #endif // ASSERT
1014 // VirtualSpaceList methods
1015 // Space allocated from the VirtualSpace
1017 VirtualSpaceList::~VirtualSpaceList() {
1018 VirtualSpaceListIterator iter(virtual_space_list());
1019 while (iter.repeat()) {
1020 VirtualSpaceNode* vsl = iter.get_next();
1021 delete vsl;
1022 }
1023 }
1025 void VirtualSpaceList::inc_reserved_words(size_t v) {
1026 assert_lock_strong(SpaceManager::expand_lock());
1027 _reserved_words = _reserved_words + v;
1028 }
1029 void VirtualSpaceList::dec_reserved_words(size_t v) {
1030 assert_lock_strong(SpaceManager::expand_lock());
1031 _reserved_words = _reserved_words - v;
1032 }
1034 #define assert_committed_below_limit() \
1035 assert(MetaspaceAux::committed_bytes() <= MaxMetaspaceSize, \
1036 err_msg("Too much committed memory. Committed: " SIZE_FORMAT \
1037 " limit (MaxMetaspaceSize): " SIZE_FORMAT, \
1038 MetaspaceAux::committed_bytes(), MaxMetaspaceSize));
1040 void VirtualSpaceList::inc_committed_words(size_t v) {
1041 assert_lock_strong(SpaceManager::expand_lock());
1042 _committed_words = _committed_words + v;
1044 assert_committed_below_limit();
1045 }
1046 void VirtualSpaceList::dec_committed_words(size_t v) {
1047 assert_lock_strong(SpaceManager::expand_lock());
1048 _committed_words = _committed_words - v;
1050 assert_committed_below_limit();
1051 }
1053 void VirtualSpaceList::inc_virtual_space_count() {
1054 assert_lock_strong(SpaceManager::expand_lock());
1055 _virtual_space_count++;
1056 }
1057 void VirtualSpaceList::dec_virtual_space_count() {
1058 assert_lock_strong(SpaceManager::expand_lock());
1059 _virtual_space_count--;
1060 }
1062 void ChunkManager::remove_chunk(Metachunk* chunk) {
1063 size_t word_size = chunk->word_size();
1064 ChunkIndex index = list_index(word_size);
1065 if (index != HumongousIndex) {
1066 free_chunks(index)->remove_chunk(chunk);
1067 } else {
1068 humongous_dictionary()->remove_chunk(chunk);
1069 }
1071 // Chunk is being removed from the chunks free list.
1072 dec_free_chunks_total(chunk->word_size());
1073 }
1075 // Walk the list of VirtualSpaceNodes and delete
1076 // nodes with a 0 container_count. Remove Metachunks in
1077 // the node from their respective freelists.
1078 void VirtualSpaceList::purge(ChunkManager* chunk_manager) {
1079 assert_lock_strong(SpaceManager::expand_lock());
1080 // Don't use a VirtualSpaceListIterator because this
1081 // list is being changed and a straightforward use of an iterator is not safe.
1082 VirtualSpaceNode* purged_vsl = NULL;
1083 VirtualSpaceNode* prev_vsl = virtual_space_list();
1084 VirtualSpaceNode* next_vsl = prev_vsl;
1085 while (next_vsl != NULL) {
1086 VirtualSpaceNode* vsl = next_vsl;
1087 next_vsl = vsl->next();
1088 // Don't free the current virtual space since it will likely
1089 // be needed soon.
1090 if (vsl->container_count() == 0 && vsl != current_virtual_space()) {
1091 // Unlink it from the list
1092 if (prev_vsl == vsl) {
1093 // This is the case of the current node being the first node.
1094 assert(vsl == virtual_space_list(), "Expected to be the first node");
1095 set_virtual_space_list(vsl->next());
1096 } else {
1097 prev_vsl->set_next(vsl->next());
1098 }
1100 vsl->purge(chunk_manager);
1101 dec_reserved_words(vsl->reserved_words());
1102 dec_committed_words(vsl->committed_words());
1103 dec_virtual_space_count();
1104 purged_vsl = vsl;
1105 delete vsl;
1106 } else {
1107 prev_vsl = vsl;
1108 }
1109 }
1110 #ifdef ASSERT
1111 if (purged_vsl != NULL) {
1112 // List should be stable enough to use an iterator here.
1113 VirtualSpaceListIterator iter(virtual_space_list());
1114 while (iter.repeat()) {
1115 VirtualSpaceNode* vsl = iter.get_next();
1116 assert(vsl != purged_vsl, "Purge of vsl failed");
1117 }
1118 }
1119 #endif
1120 }
1122 void VirtualSpaceList::retire_current_virtual_space() {
1123 assert_lock_strong(SpaceManager::expand_lock());
1125 VirtualSpaceNode* vsn = current_virtual_space();
1127 ChunkManager* cm = is_class() ? Metaspace::chunk_manager_class() :
1128 Metaspace::chunk_manager_metadata();
1130 vsn->retire(cm);
1131 }
1133 void VirtualSpaceNode::retire(ChunkManager* chunk_manager) {
1134 for (int i = (int)MediumIndex; i >= (int)ZeroIndex; --i) {
1135 ChunkIndex index = (ChunkIndex)i;
1136 size_t chunk_size = chunk_manager->free_chunks(index)->size();
1138 while (free_words_in_vs() >= chunk_size) {
1139 DEBUG_ONLY(verify_container_count();)
1140 Metachunk* chunk = get_chunk_vs(chunk_size);
1141 assert(chunk != NULL, "allocation should have been successful");
1143 chunk_manager->return_chunks(index, chunk);
1144 chunk_manager->inc_free_chunks_total(chunk_size);
1145 DEBUG_ONLY(verify_container_count();)
1146 }
1147 }
1148 assert(free_words_in_vs() == 0, "should be empty now");
1149 }
1151 VirtualSpaceList::VirtualSpaceList(size_t word_size) :
1152 _is_class(false),
1153 _virtual_space_list(NULL),
1154 _current_virtual_space(NULL),
1155 _reserved_words(0),
1156 _committed_words(0),
1157 _virtual_space_count(0) {
1158 MutexLockerEx cl(SpaceManager::expand_lock(),
1159 Mutex::_no_safepoint_check_flag);
1160 create_new_virtual_space(word_size);
1161 }
1163 VirtualSpaceList::VirtualSpaceList(ReservedSpace rs) :
1164 _is_class(true),
1165 _virtual_space_list(NULL),
1166 _current_virtual_space(NULL),
1167 _reserved_words(0),
1168 _committed_words(0),
1169 _virtual_space_count(0) {
1170 MutexLockerEx cl(SpaceManager::expand_lock(),
1171 Mutex::_no_safepoint_check_flag);
1172 VirtualSpaceNode* class_entry = new VirtualSpaceNode(rs);
1173 bool succeeded = class_entry->initialize();
1174 if (succeeded) {
1175 link_vs(class_entry);
1176 }
1177 }
1179 size_t VirtualSpaceList::free_bytes() {
1180 return virtual_space_list()->free_words_in_vs() * BytesPerWord;
1181 }
1183 // Allocate another meta virtual space and add it to the list.
1184 bool VirtualSpaceList::create_new_virtual_space(size_t vs_word_size) {
1185 assert_lock_strong(SpaceManager::expand_lock());
1187 if (is_class()) {
1188 assert(false, "We currently don't support more than one VirtualSpace for"
1189 " the compressed class space. The initialization of the"
1190 " CCS uses another code path and should not hit this path.");
1191 return false;
1192 }
1194 if (vs_word_size == 0) {
1195 assert(false, "vs_word_size should always be at least _reserve_alignment large.");
1196 return false;
1197 }
1199 // Reserve the space
1200 size_t vs_byte_size = vs_word_size * BytesPerWord;
1201 assert_is_size_aligned(vs_byte_size, Metaspace::reserve_alignment());
1203 // Allocate the meta virtual space and initialize it.
1204 VirtualSpaceNode* new_entry = new VirtualSpaceNode(vs_byte_size);
1205 if (!new_entry->initialize()) {
1206 delete new_entry;
1207 return false;
1208 } else {
1209 assert(new_entry->reserved_words() == vs_word_size,
1210 "Reserved memory size differs from requested memory size");
1211 link_vs(new_entry);
1212 return true;
1213 }
1214 }
1216 void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry) {
1217 if (virtual_space_list() == NULL) {
1218 set_virtual_space_list(new_entry);
1219 } else {
1220 current_virtual_space()->set_next(new_entry);
1221 }
1222 set_current_virtual_space(new_entry);
1223 inc_reserved_words(new_entry->reserved_words());
1224 inc_committed_words(new_entry->committed_words());
1225 inc_virtual_space_count();
1226 #ifdef ASSERT
1227 new_entry->mangle();
1228 #endif
1229 if (TraceMetavirtualspaceAllocation && Verbose) {
1230 VirtualSpaceNode* vsl = current_virtual_space();
1231 vsl->print_on(gclog_or_tty);
1232 }
1233 }
1235 bool VirtualSpaceList::expand_node_by(VirtualSpaceNode* node,
1236 size_t min_words,
1237 size_t preferred_words) {
1238 size_t before = node->committed_words();
1240 bool result = node->expand_by(min_words, preferred_words);
1242 size_t after = node->committed_words();
1244 // after and before can be the same if the memory was pre-committed.
1245 assert(after >= before, "Inconsistency");
1246 inc_committed_words(after - before);
1248 return result;
1249 }
1251 bool VirtualSpaceList::expand_by(size_t min_words, size_t preferred_words) {
1252 assert_is_size_aligned(min_words, Metaspace::commit_alignment_words());
1253 assert_is_size_aligned(preferred_words, Metaspace::commit_alignment_words());
1254 assert(min_words <= preferred_words, "Invalid arguments");
1256 if (!MetaspaceGC::can_expand(min_words, this->is_class())) {
1257 return false;
1258 }
1260 size_t allowed_expansion_words = MetaspaceGC::allowed_expansion();
1261 if (allowed_expansion_words < min_words) {
1262 return false;
1263 }
1265 size_t max_expansion_words = MIN2(preferred_words, allowed_expansion_words);
1267 // Commit more memory from the the current virtual space.
1268 bool vs_expanded = expand_node_by(current_virtual_space(),
1269 min_words,
1270 max_expansion_words);
1271 if (vs_expanded) {
1272 return true;
1273 }
1274 retire_current_virtual_space();
1276 // Get another virtual space.
1277 size_t grow_vs_words = MAX2((size_t)VirtualSpaceSize, preferred_words);
1278 grow_vs_words = align_size_up(grow_vs_words, Metaspace::reserve_alignment_words());
1280 if (create_new_virtual_space(grow_vs_words)) {
1281 if (current_virtual_space()->is_pre_committed()) {
1282 // The memory was pre-committed, so we are done here.
1283 assert(min_words <= current_virtual_space()->committed_words(),
1284 "The new VirtualSpace was pre-committed, so it"
1285 "should be large enough to fit the alloc request.");
1286 return true;
1287 }
1289 return expand_node_by(current_virtual_space(),
1290 min_words,
1291 max_expansion_words);
1292 }
1294 return false;
1295 }
1297 Metachunk* VirtualSpaceList::get_new_chunk(size_t word_size,
1298 size_t grow_chunks_by_words,
1299 size_t medium_chunk_bunch) {
1301 // Allocate a chunk out of the current virtual space.
1302 Metachunk* next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
1304 if (next != NULL) {
1305 return next;
1306 }
1308 // The expand amount is currently only determined by the requested sizes
1309 // and not how much committed memory is left in the current virtual space.
1311 size_t min_word_size = align_size_up(grow_chunks_by_words, Metaspace::commit_alignment_words());
1312 size_t preferred_word_size = align_size_up(medium_chunk_bunch, Metaspace::commit_alignment_words());
1313 if (min_word_size >= preferred_word_size) {
1314 // Can happen when humongous chunks are allocated.
1315 preferred_word_size = min_word_size;
1316 }
1318 bool expanded = expand_by(min_word_size, preferred_word_size);
1319 if (expanded) {
1320 next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
1321 assert(next != NULL, "The allocation was expected to succeed after the expansion");
1322 }
1324 return next;
1325 }
1327 void VirtualSpaceList::print_on(outputStream* st) const {
1328 if (TraceMetadataChunkAllocation && Verbose) {
1329 VirtualSpaceListIterator iter(virtual_space_list());
1330 while (iter.repeat()) {
1331 VirtualSpaceNode* node = iter.get_next();
1332 node->print_on(st);
1333 }
1334 }
1335 }
1337 // MetaspaceGC methods
1339 // VM_CollectForMetadataAllocation is the vm operation used to GC.
1340 // Within the VM operation after the GC the attempt to allocate the metadata
1341 // should succeed. If the GC did not free enough space for the metaspace
1342 // allocation, the HWM is increased so that another virtualspace will be
1343 // allocated for the metadata. With perm gen the increase in the perm
1344 // gen had bounds, MinMetaspaceExpansion and MaxMetaspaceExpansion. The
1345 // metaspace policy uses those as the small and large steps for the HWM.
1346 //
1347 // After the GC the compute_new_size() for MetaspaceGC is called to
1348 // resize the capacity of the metaspaces. The current implementation
1349 // is based on the flags MinMetaspaceFreeRatio and MaxMetaspaceFreeRatio used
1350 // to resize the Java heap by some GC's. New flags can be implemented
1351 // if really needed. MinMetaspaceFreeRatio is used to calculate how much
1352 // free space is desirable in the metaspace capacity to decide how much
1353 // to increase the HWM. MaxMetaspaceFreeRatio is used to decide how much
1354 // free space is desirable in the metaspace capacity before decreasing
1355 // the HWM.
1357 // Calculate the amount to increase the high water mark (HWM).
1358 // Increase by a minimum amount (MinMetaspaceExpansion) so that
1359 // another expansion is not requested too soon. If that is not
1360 // enough to satisfy the allocation, increase by MaxMetaspaceExpansion.
1361 // If that is still not enough, expand by the size of the allocation
1362 // plus some.
1363 size_t MetaspaceGC::delta_capacity_until_GC(size_t bytes) {
1364 size_t min_delta = MinMetaspaceExpansion;
1365 size_t max_delta = MaxMetaspaceExpansion;
1366 size_t delta = align_size_up(bytes, Metaspace::commit_alignment());
1368 if (delta <= min_delta) {
1369 delta = min_delta;
1370 } else if (delta <= max_delta) {
1371 // Don't want to hit the high water mark on the next
1372 // allocation so make the delta greater than just enough
1373 // for this allocation.
1374 delta = max_delta;
1375 } else {
1376 // This allocation is large but the next ones are probably not
1377 // so increase by the minimum.
1378 delta = delta + min_delta;
1379 }
1381 assert_is_size_aligned(delta, Metaspace::commit_alignment());
1383 return delta;
1384 }
1386 size_t MetaspaceGC::capacity_until_GC() {
1387 size_t value = (size_t)OrderAccess::load_ptr_acquire(&_capacity_until_GC);
1388 assert(value >= MetaspaceSize, "Not initialied properly?");
1389 return value;
1390 }
1392 size_t MetaspaceGC::inc_capacity_until_GC(size_t v) {
1393 assert_is_size_aligned(v, Metaspace::commit_alignment());
1395 return (size_t)Atomic::add_ptr(v, &_capacity_until_GC);
1396 }
1398 size_t MetaspaceGC::dec_capacity_until_GC(size_t v) {
1399 assert_is_size_aligned(v, Metaspace::commit_alignment());
1401 return (size_t)Atomic::add_ptr(-(intptr_t)v, &_capacity_until_GC);
1402 }
1404 bool MetaspaceGC::can_expand(size_t word_size, bool is_class) {
1405 // Check if the compressed class space is full.
1406 if (is_class && Metaspace::using_class_space()) {
1407 size_t class_committed = MetaspaceAux::committed_bytes(Metaspace::ClassType);
1408 if (class_committed + word_size * BytesPerWord > CompressedClassSpaceSize) {
1409 return false;
1410 }
1411 }
1413 // Check if the user has imposed a limit on the metaspace memory.
1414 size_t committed_bytes = MetaspaceAux::committed_bytes();
1415 if (committed_bytes + word_size * BytesPerWord > MaxMetaspaceSize) {
1416 return false;
1417 }
1419 return true;
1420 }
1422 size_t MetaspaceGC::allowed_expansion() {
1423 size_t committed_bytes = MetaspaceAux::committed_bytes();
1425 size_t left_until_max = MaxMetaspaceSize - committed_bytes;
1427 // Always grant expansion if we are initiating the JVM,
1428 // or if the GC_locker is preventing GCs.
1429 if (!is_init_completed() || GC_locker::is_active_and_needs_gc()) {
1430 return left_until_max / BytesPerWord;
1431 }
1433 size_t capacity_until_gc = capacity_until_GC();
1435 if (capacity_until_gc <= committed_bytes) {
1436 return 0;
1437 }
1439 size_t left_until_GC = capacity_until_gc - committed_bytes;
1440 size_t left_to_commit = MIN2(left_until_GC, left_until_max);
1442 return left_to_commit / BytesPerWord;
1443 }
1445 void MetaspaceGC::compute_new_size() {
1446 assert(_shrink_factor <= 100, "invalid shrink factor");
1447 uint current_shrink_factor = _shrink_factor;
1448 _shrink_factor = 0;
1450 const size_t used_after_gc = MetaspaceAux::allocated_capacity_bytes();
1451 const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC();
1453 const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0;
1454 const double maximum_used_percentage = 1.0 - minimum_free_percentage;
1456 const double min_tmp = used_after_gc / maximum_used_percentage;
1457 size_t minimum_desired_capacity =
1458 (size_t)MIN2(min_tmp, double(max_uintx));
1459 // Don't shrink less than the initial generation size
1460 minimum_desired_capacity = MAX2(minimum_desired_capacity,
1461 MetaspaceSize);
1463 if (PrintGCDetails && Verbose) {
1464 gclog_or_tty->print_cr("\nMetaspaceGC::compute_new_size: ");
1465 gclog_or_tty->print_cr(" "
1466 " minimum_free_percentage: %6.2f"
1467 " maximum_used_percentage: %6.2f",
1468 minimum_free_percentage,
1469 maximum_used_percentage);
1470 gclog_or_tty->print_cr(" "
1471 " used_after_gc : %6.1fKB",
1472 used_after_gc / (double) K);
1473 }
1476 size_t shrink_bytes = 0;
1477 if (capacity_until_GC < minimum_desired_capacity) {
1478 // If we have less capacity below the metaspace HWM, then
1479 // increment the HWM.
1480 size_t expand_bytes = minimum_desired_capacity - capacity_until_GC;
1481 expand_bytes = align_size_up(expand_bytes, Metaspace::commit_alignment());
1482 // Don't expand unless it's significant
1483 if (expand_bytes >= MinMetaspaceExpansion) {
1484 size_t new_capacity_until_GC = MetaspaceGC::inc_capacity_until_GC(expand_bytes);
1485 Metaspace::tracer()->report_gc_threshold(capacity_until_GC,
1486 new_capacity_until_GC,
1487 MetaspaceGCThresholdUpdater::ComputeNewSize);
1488 if (PrintGCDetails && Verbose) {
1489 gclog_or_tty->print_cr(" expanding:"
1490 " minimum_desired_capacity: %6.1fKB"
1491 " expand_bytes: %6.1fKB"
1492 " MinMetaspaceExpansion: %6.1fKB"
1493 " new metaspace HWM: %6.1fKB",
1494 minimum_desired_capacity / (double) K,
1495 expand_bytes / (double) K,
1496 MinMetaspaceExpansion / (double) K,
1497 new_capacity_until_GC / (double) K);
1498 }
1499 }
1500 return;
1501 }
1503 // No expansion, now see if we want to shrink
1504 // We would never want to shrink more than this
1505 size_t max_shrink_bytes = capacity_until_GC - minimum_desired_capacity;
1506 assert(max_shrink_bytes >= 0, err_msg("max_shrink_bytes " SIZE_FORMAT,
1507 max_shrink_bytes));
1509 // Should shrinking be considered?
1510 if (MaxMetaspaceFreeRatio < 100) {
1511 const double maximum_free_percentage = MaxMetaspaceFreeRatio / 100.0;
1512 const double minimum_used_percentage = 1.0 - maximum_free_percentage;
1513 const double max_tmp = used_after_gc / minimum_used_percentage;
1514 size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx));
1515 maximum_desired_capacity = MAX2(maximum_desired_capacity,
1516 MetaspaceSize);
1517 if (PrintGCDetails && Verbose) {
1518 gclog_or_tty->print_cr(" "
1519 " maximum_free_percentage: %6.2f"
1520 " minimum_used_percentage: %6.2f",
1521 maximum_free_percentage,
1522 minimum_used_percentage);
1523 gclog_or_tty->print_cr(" "
1524 " minimum_desired_capacity: %6.1fKB"
1525 " maximum_desired_capacity: %6.1fKB",
1526 minimum_desired_capacity / (double) K,
1527 maximum_desired_capacity / (double) K);
1528 }
1530 assert(minimum_desired_capacity <= maximum_desired_capacity,
1531 "sanity check");
1533 if (capacity_until_GC > maximum_desired_capacity) {
1534 // Capacity too large, compute shrinking size
1535 shrink_bytes = capacity_until_GC - maximum_desired_capacity;
1536 // We don't want shrink all the way back to initSize if people call
1537 // System.gc(), because some programs do that between "phases" and then
1538 // we'd just have to grow the heap up again for the next phase. So we
1539 // damp the shrinking: 0% on the first call, 10% on the second call, 40%
1540 // on the third call, and 100% by the fourth call. But if we recompute
1541 // size without shrinking, it goes back to 0%.
1542 shrink_bytes = shrink_bytes / 100 * current_shrink_factor;
1544 shrink_bytes = align_size_down(shrink_bytes, Metaspace::commit_alignment());
1546 assert(shrink_bytes <= max_shrink_bytes,
1547 err_msg("invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT,
1548 shrink_bytes, max_shrink_bytes));
1549 if (current_shrink_factor == 0) {
1550 _shrink_factor = 10;
1551 } else {
1552 _shrink_factor = MIN2(current_shrink_factor * 4, (uint) 100);
1553 }
1554 if (PrintGCDetails && Verbose) {
1555 gclog_or_tty->print_cr(" "
1556 " shrinking:"
1557 " initSize: %.1fK"
1558 " maximum_desired_capacity: %.1fK",
1559 MetaspaceSize / (double) K,
1560 maximum_desired_capacity / (double) K);
1561 gclog_or_tty->print_cr(" "
1562 " shrink_bytes: %.1fK"
1563 " current_shrink_factor: %d"
1564 " new shrink factor: %d"
1565 " MinMetaspaceExpansion: %.1fK",
1566 shrink_bytes / (double) K,
1567 current_shrink_factor,
1568 _shrink_factor,
1569 MinMetaspaceExpansion / (double) K);
1570 }
1571 }
1572 }
1574 // Don't shrink unless it's significant
1575 if (shrink_bytes >= MinMetaspaceExpansion &&
1576 ((capacity_until_GC - shrink_bytes) >= MetaspaceSize)) {
1577 size_t new_capacity_until_GC = MetaspaceGC::dec_capacity_until_GC(shrink_bytes);
1578 Metaspace::tracer()->report_gc_threshold(capacity_until_GC,
1579 new_capacity_until_GC,
1580 MetaspaceGCThresholdUpdater::ComputeNewSize);
1581 }
1582 }
1584 // Metadebug methods
1586 void Metadebug::init_allocation_fail_alot_count() {
1587 if (MetadataAllocationFailALot) {
1588 _allocation_fail_alot_count =
1589 1+(long)((double)MetadataAllocationFailALotInterval*os::random()/(max_jint+1.0));
1590 }
1591 }
1593 #ifdef ASSERT
1594 bool Metadebug::test_metadata_failure() {
1595 if (MetadataAllocationFailALot &&
1596 Threads::is_vm_complete()) {
1597 if (_allocation_fail_alot_count > 0) {
1598 _allocation_fail_alot_count--;
1599 } else {
1600 if (TraceMetadataChunkAllocation && Verbose) {
1601 gclog_or_tty->print_cr("Metadata allocation failing for "
1602 "MetadataAllocationFailALot");
1603 }
1604 init_allocation_fail_alot_count();
1605 return true;
1606 }
1607 }
1608 return false;
1609 }
1610 #endif
1612 // ChunkManager methods
1614 size_t ChunkManager::free_chunks_total_words() {
1615 return _free_chunks_total;
1616 }
1618 size_t ChunkManager::free_chunks_total_bytes() {
1619 return free_chunks_total_words() * BytesPerWord;
1620 }
1622 size_t ChunkManager::free_chunks_count() {
1623 #ifdef ASSERT
1624 if (!UseConcMarkSweepGC && !SpaceManager::expand_lock()->is_locked()) {
1625 MutexLockerEx cl(SpaceManager::expand_lock(),
1626 Mutex::_no_safepoint_check_flag);
1627 // This lock is only needed in debug because the verification
1628 // of the _free_chunks_totals walks the list of free chunks
1629 slow_locked_verify_free_chunks_count();
1630 }
1631 #endif
1632 return _free_chunks_count;
1633 }
1635 void ChunkManager::locked_verify_free_chunks_total() {
1636 assert_lock_strong(SpaceManager::expand_lock());
1637 assert(sum_free_chunks() == _free_chunks_total,
1638 err_msg("_free_chunks_total " SIZE_FORMAT " is not the"
1639 " same as sum " SIZE_FORMAT, _free_chunks_total,
1640 sum_free_chunks()));
1641 }
1643 void ChunkManager::verify_free_chunks_total() {
1644 MutexLockerEx cl(SpaceManager::expand_lock(),
1645 Mutex::_no_safepoint_check_flag);
1646 locked_verify_free_chunks_total();
1647 }
1649 void ChunkManager::locked_verify_free_chunks_count() {
1650 assert_lock_strong(SpaceManager::expand_lock());
1651 assert(sum_free_chunks_count() == _free_chunks_count,
1652 err_msg("_free_chunks_count " SIZE_FORMAT " is not the"
1653 " same as sum " SIZE_FORMAT, _free_chunks_count,
1654 sum_free_chunks_count()));
1655 }
1657 void ChunkManager::verify_free_chunks_count() {
1658 #ifdef ASSERT
1659 MutexLockerEx cl(SpaceManager::expand_lock(),
1660 Mutex::_no_safepoint_check_flag);
1661 locked_verify_free_chunks_count();
1662 #endif
1663 }
1665 void ChunkManager::verify() {
1666 MutexLockerEx cl(SpaceManager::expand_lock(),
1667 Mutex::_no_safepoint_check_flag);
1668 locked_verify();
1669 }
1671 void ChunkManager::locked_verify() {
1672 locked_verify_free_chunks_count();
1673 locked_verify_free_chunks_total();
1674 }
1676 void ChunkManager::locked_print_free_chunks(outputStream* st) {
1677 assert_lock_strong(SpaceManager::expand_lock());
1678 st->print_cr("Free chunk total " SIZE_FORMAT " count " SIZE_FORMAT,
1679 _free_chunks_total, _free_chunks_count);
1680 }
1682 void ChunkManager::locked_print_sum_free_chunks(outputStream* st) {
1683 assert_lock_strong(SpaceManager::expand_lock());
1684 st->print_cr("Sum free chunk total " SIZE_FORMAT " count " SIZE_FORMAT,
1685 sum_free_chunks(), sum_free_chunks_count());
1686 }
1687 ChunkList* ChunkManager::free_chunks(ChunkIndex index) {
1688 return &_free_chunks[index];
1689 }
1691 // These methods that sum the free chunk lists are used in printing
1692 // methods that are used in product builds.
1693 size_t ChunkManager::sum_free_chunks() {
1694 assert_lock_strong(SpaceManager::expand_lock());
1695 size_t result = 0;
1696 for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
1697 ChunkList* list = free_chunks(i);
1699 if (list == NULL) {
1700 continue;
1701 }
1703 result = result + list->count() * list->size();
1704 }
1705 result = result + humongous_dictionary()->total_size();
1706 return result;
1707 }
1709 size_t ChunkManager::sum_free_chunks_count() {
1710 assert_lock_strong(SpaceManager::expand_lock());
1711 size_t count = 0;
1712 for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
1713 ChunkList* list = free_chunks(i);
1714 if (list == NULL) {
1715 continue;
1716 }
1717 count = count + list->count();
1718 }
1719 count = count + humongous_dictionary()->total_free_blocks();
1720 return count;
1721 }
1723 ChunkList* ChunkManager::find_free_chunks_list(size_t word_size) {
1724 ChunkIndex index = list_index(word_size);
1725 assert(index < HumongousIndex, "No humongous list");
1726 return free_chunks(index);
1727 }
1729 Metachunk* ChunkManager::free_chunks_get(size_t word_size) {
1730 assert_lock_strong(SpaceManager::expand_lock());
1732 slow_locked_verify();
1734 Metachunk* chunk = NULL;
1735 if (list_index(word_size) != HumongousIndex) {
1736 ChunkList* free_list = find_free_chunks_list(word_size);
1737 assert(free_list != NULL, "Sanity check");
1739 chunk = free_list->head();
1741 if (chunk == NULL) {
1742 return NULL;
1743 }
1745 // Remove the chunk as the head of the list.
1746 free_list->remove_chunk(chunk);
1748 if (TraceMetadataChunkAllocation && Verbose) {
1749 gclog_or_tty->print_cr("ChunkManager::free_chunks_get: free_list "
1750 PTR_FORMAT " head " PTR_FORMAT " size " SIZE_FORMAT,
1751 free_list, chunk, chunk->word_size());
1752 }
1753 } else {
1754 chunk = humongous_dictionary()->get_chunk(
1755 word_size,
1756 FreeBlockDictionary<Metachunk>::atLeast);
1758 if (chunk == NULL) {
1759 return NULL;
1760 }
1762 if (TraceMetadataHumongousAllocation) {
1763 size_t waste = chunk->word_size() - word_size;
1764 gclog_or_tty->print_cr("Free list allocate humongous chunk size "
1765 SIZE_FORMAT " for requested size " SIZE_FORMAT
1766 " waste " SIZE_FORMAT,
1767 chunk->word_size(), word_size, waste);
1768 }
1769 }
1771 // Chunk is being removed from the chunks free list.
1772 dec_free_chunks_total(chunk->word_size());
1774 // Remove it from the links to this freelist
1775 chunk->set_next(NULL);
1776 chunk->set_prev(NULL);
1777 #ifdef ASSERT
1778 // Chunk is no longer on any freelist. Setting to false make container_count_slow()
1779 // work.
1780 chunk->set_is_tagged_free(false);
1781 #endif
1782 chunk->container()->inc_container_count();
1784 slow_locked_verify();
1785 return chunk;
1786 }
1788 Metachunk* ChunkManager::chunk_freelist_allocate(size_t word_size) {
1789 assert_lock_strong(SpaceManager::expand_lock());
1790 slow_locked_verify();
1792 // Take from the beginning of the list
1793 Metachunk* chunk = free_chunks_get(word_size);
1794 if (chunk == NULL) {
1795 return NULL;
1796 }
1798 assert((word_size <= chunk->word_size()) ||
1799 list_index(chunk->word_size() == HumongousIndex),
1800 "Non-humongous variable sized chunk");
1801 if (TraceMetadataChunkAllocation) {
1802 size_t list_count;
1803 if (list_index(word_size) < HumongousIndex) {
1804 ChunkList* list = find_free_chunks_list(word_size);
1805 list_count = list->count();
1806 } else {
1807 list_count = humongous_dictionary()->total_count();
1808 }
1809 gclog_or_tty->print("ChunkManager::chunk_freelist_allocate: " PTR_FORMAT " chunk "
1810 PTR_FORMAT " size " SIZE_FORMAT " count " SIZE_FORMAT " ",
1811 this, chunk, chunk->word_size(), list_count);
1812 locked_print_free_chunks(gclog_or_tty);
1813 }
1815 return chunk;
1816 }
1818 void ChunkManager::print_on(outputStream* out) const {
1819 if (PrintFLSStatistics != 0) {
1820 const_cast<ChunkManager *>(this)->humongous_dictionary()->report_statistics();
1821 }
1822 }
1824 // SpaceManager methods
1826 void SpaceManager::get_initial_chunk_sizes(Metaspace::MetaspaceType type,
1827 size_t* chunk_word_size,
1828 size_t* class_chunk_word_size) {
1829 switch (type) {
1830 case Metaspace::BootMetaspaceType:
1831 *chunk_word_size = Metaspace::first_chunk_word_size();
1832 *class_chunk_word_size = Metaspace::first_class_chunk_word_size();
1833 break;
1834 case Metaspace::ROMetaspaceType:
1835 *chunk_word_size = SharedReadOnlySize / wordSize;
1836 *class_chunk_word_size = ClassSpecializedChunk;
1837 break;
1838 case Metaspace::ReadWriteMetaspaceType:
1839 *chunk_word_size = SharedReadWriteSize / wordSize;
1840 *class_chunk_word_size = ClassSpecializedChunk;
1841 break;
1842 case Metaspace::AnonymousMetaspaceType:
1843 case Metaspace::ReflectionMetaspaceType:
1844 *chunk_word_size = SpecializedChunk;
1845 *class_chunk_word_size = ClassSpecializedChunk;
1846 break;
1847 default:
1848 *chunk_word_size = SmallChunk;
1849 *class_chunk_word_size = ClassSmallChunk;
1850 break;
1851 }
1852 assert(*chunk_word_size != 0 && *class_chunk_word_size != 0,
1853 err_msg("Initial chunks sizes bad: data " SIZE_FORMAT
1854 " class " SIZE_FORMAT,
1855 *chunk_word_size, *class_chunk_word_size));
1856 }
1858 size_t SpaceManager::sum_free_in_chunks_in_use() const {
1859 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
1860 size_t free = 0;
1861 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1862 Metachunk* chunk = chunks_in_use(i);
1863 while (chunk != NULL) {
1864 free += chunk->free_word_size();
1865 chunk = chunk->next();
1866 }
1867 }
1868 return free;
1869 }
1871 size_t SpaceManager::sum_waste_in_chunks_in_use() const {
1872 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
1873 size_t result = 0;
1874 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1875 result += sum_waste_in_chunks_in_use(i);
1876 }
1878 return result;
1879 }
1881 size_t SpaceManager::sum_waste_in_chunks_in_use(ChunkIndex index) const {
1882 size_t result = 0;
1883 Metachunk* chunk = chunks_in_use(index);
1884 // Count the free space in all the chunk but not the
1885 // current chunk from which allocations are still being done.
1886 while (chunk != NULL) {
1887 if (chunk != current_chunk()) {
1888 result += chunk->free_word_size();
1889 }
1890 chunk = chunk->next();
1891 }
1892 return result;
1893 }
1895 size_t SpaceManager::sum_capacity_in_chunks_in_use() const {
1896 // For CMS use "allocated_chunks_words()" which does not need the
1897 // Metaspace lock. For the other collectors sum over the
1898 // lists. Use both methods as a check that "allocated_chunks_words()"
1899 // is correct. That is, sum_capacity_in_chunks() is too expensive
1900 // to use in the product and allocated_chunks_words() should be used
1901 // but allow for checking that allocated_chunks_words() returns the same
1902 // value as sum_capacity_in_chunks_in_use() which is the definitive
1903 // answer.
1904 if (UseConcMarkSweepGC) {
1905 return allocated_chunks_words();
1906 } else {
1907 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
1908 size_t sum = 0;
1909 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1910 Metachunk* chunk = chunks_in_use(i);
1911 while (chunk != NULL) {
1912 sum += chunk->word_size();
1913 chunk = chunk->next();
1914 }
1915 }
1916 return sum;
1917 }
1918 }
1920 size_t SpaceManager::sum_count_in_chunks_in_use() {
1921 size_t count = 0;
1922 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1923 count = count + sum_count_in_chunks_in_use(i);
1924 }
1926 return count;
1927 }
1929 size_t SpaceManager::sum_count_in_chunks_in_use(ChunkIndex i) {
1930 size_t count = 0;
1931 Metachunk* chunk = chunks_in_use(i);
1932 while (chunk != NULL) {
1933 count++;
1934 chunk = chunk->next();
1935 }
1936 return count;
1937 }
1940 size_t SpaceManager::sum_used_in_chunks_in_use() const {
1941 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
1942 size_t used = 0;
1943 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1944 Metachunk* chunk = chunks_in_use(i);
1945 while (chunk != NULL) {
1946 used += chunk->used_word_size();
1947 chunk = chunk->next();
1948 }
1949 }
1950 return used;
1951 }
1953 void SpaceManager::locked_print_chunks_in_use_on(outputStream* st) const {
1955 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1956 Metachunk* chunk = chunks_in_use(i);
1957 st->print("SpaceManager: %s " PTR_FORMAT,
1958 chunk_size_name(i), chunk);
1959 if (chunk != NULL) {
1960 st->print_cr(" free " SIZE_FORMAT,
1961 chunk->free_word_size());
1962 } else {
1963 st->print_cr("");
1964 }
1965 }
1967 chunk_manager()->locked_print_free_chunks(st);
1968 chunk_manager()->locked_print_sum_free_chunks(st);
1969 }
1971 size_t SpaceManager::calc_chunk_size(size_t word_size) {
1973 // Decide between a small chunk and a medium chunk. Up to
1974 // _small_chunk_limit small chunks can be allocated but
1975 // once a medium chunk has been allocated, no more small
1976 // chunks will be allocated.
1977 size_t chunk_word_size;
1978 if (chunks_in_use(MediumIndex) == NULL &&
1979 sum_count_in_chunks_in_use(SmallIndex) < _small_chunk_limit) {
1980 chunk_word_size = (size_t) small_chunk_size();
1981 if (word_size + Metachunk::overhead() > small_chunk_size()) {
1982 chunk_word_size = medium_chunk_size();
1983 }
1984 } else {
1985 chunk_word_size = medium_chunk_size();
1986 }
1988 // Might still need a humongous chunk. Enforce
1989 // humongous allocations sizes to be aligned up to
1990 // the smallest chunk size.
1991 size_t if_humongous_sized_chunk =
1992 align_size_up(word_size + Metachunk::overhead(),
1993 smallest_chunk_size());
1994 chunk_word_size =
1995 MAX2((size_t) chunk_word_size, if_humongous_sized_chunk);
1997 assert(!SpaceManager::is_humongous(word_size) ||
1998 chunk_word_size == if_humongous_sized_chunk,
1999 err_msg("Size calculation is wrong, word_size " SIZE_FORMAT
2000 " chunk_word_size " SIZE_FORMAT,
2001 word_size, chunk_word_size));
2002 if (TraceMetadataHumongousAllocation &&
2003 SpaceManager::is_humongous(word_size)) {
2004 gclog_or_tty->print_cr("Metadata humongous allocation:");
2005 gclog_or_tty->print_cr(" word_size " PTR_FORMAT, word_size);
2006 gclog_or_tty->print_cr(" chunk_word_size " PTR_FORMAT,
2007 chunk_word_size);
2008 gclog_or_tty->print_cr(" chunk overhead " PTR_FORMAT,
2009 Metachunk::overhead());
2010 }
2011 return chunk_word_size;
2012 }
2014 void SpaceManager::track_metaspace_memory_usage() {
2015 if (is_init_completed()) {
2016 if (is_class()) {
2017 MemoryService::track_compressed_class_memory_usage();
2018 }
2019 MemoryService::track_metaspace_memory_usage();
2020 }
2021 }
2023 MetaWord* SpaceManager::grow_and_allocate(size_t word_size) {
2024 assert(vs_list()->current_virtual_space() != NULL,
2025 "Should have been set");
2026 assert(current_chunk() == NULL ||
2027 current_chunk()->allocate(word_size) == NULL,
2028 "Don't need to expand");
2029 MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
2031 if (TraceMetadataChunkAllocation && Verbose) {
2032 size_t words_left = 0;
2033 size_t words_used = 0;
2034 if (current_chunk() != NULL) {
2035 words_left = current_chunk()->free_word_size();
2036 words_used = current_chunk()->used_word_size();
2037 }
2038 gclog_or_tty->print_cr("SpaceManager::grow_and_allocate for " SIZE_FORMAT
2039 " words " SIZE_FORMAT " words used " SIZE_FORMAT
2040 " words left",
2041 word_size, words_used, words_left);
2042 }
2044 // Get another chunk out of the virtual space
2045 size_t grow_chunks_by_words = calc_chunk_size(word_size);
2046 Metachunk* next = get_new_chunk(word_size, grow_chunks_by_words);
2048 MetaWord* mem = NULL;
2050 // If a chunk was available, add it to the in-use chunk list
2051 // and do an allocation from it.
2052 if (next != NULL) {
2053 // Add to this manager's list of chunks in use.
2054 add_chunk(next, false);
2055 mem = next->allocate(word_size);
2056 }
2058 // Track metaspace memory usage statistic.
2059 track_metaspace_memory_usage();
2061 return mem;
2062 }
2064 void SpaceManager::print_on(outputStream* st) const {
2066 for (ChunkIndex i = ZeroIndex;
2067 i < NumberOfInUseLists ;
2068 i = next_chunk_index(i) ) {
2069 st->print_cr(" chunks_in_use " PTR_FORMAT " chunk size " PTR_FORMAT,
2070 chunks_in_use(i),
2071 chunks_in_use(i) == NULL ? 0 : chunks_in_use(i)->word_size());
2072 }
2073 st->print_cr(" waste: Small " SIZE_FORMAT " Medium " SIZE_FORMAT
2074 " Humongous " SIZE_FORMAT,
2075 sum_waste_in_chunks_in_use(SmallIndex),
2076 sum_waste_in_chunks_in_use(MediumIndex),
2077 sum_waste_in_chunks_in_use(HumongousIndex));
2078 // block free lists
2079 if (block_freelists() != NULL) {
2080 st->print_cr("total in block free lists " SIZE_FORMAT,
2081 block_freelists()->total_size());
2082 }
2083 }
2085 SpaceManager::SpaceManager(Metaspace::MetadataType mdtype,
2086 Mutex* lock) :
2087 _mdtype(mdtype),
2088 _allocated_blocks_words(0),
2089 _allocated_chunks_words(0),
2090 _allocated_chunks_count(0),
2091 _lock(lock)
2092 {
2093 initialize();
2094 }
2096 void SpaceManager::inc_size_metrics(size_t words) {
2097 assert_lock_strong(SpaceManager::expand_lock());
2098 // Total of allocated Metachunks and allocated Metachunks count
2099 // for each SpaceManager
2100 _allocated_chunks_words = _allocated_chunks_words + words;
2101 _allocated_chunks_count++;
2102 // Global total of capacity in allocated Metachunks
2103 MetaspaceAux::inc_capacity(mdtype(), words);
2104 // Global total of allocated Metablocks.
2105 // used_words_slow() includes the overhead in each
2106 // Metachunk so include it in the used when the
2107 // Metachunk is first added (so only added once per
2108 // Metachunk).
2109 MetaspaceAux::inc_used(mdtype(), Metachunk::overhead());
2110 }
2112 void SpaceManager::inc_used_metrics(size_t words) {
2113 // Add to the per SpaceManager total
2114 Atomic::add_ptr(words, &_allocated_blocks_words);
2115 // Add to the global total
2116 MetaspaceAux::inc_used(mdtype(), words);
2117 }
2119 void SpaceManager::dec_total_from_size_metrics() {
2120 MetaspaceAux::dec_capacity(mdtype(), allocated_chunks_words());
2121 MetaspaceAux::dec_used(mdtype(), allocated_blocks_words());
2122 // Also deduct the overhead per Metachunk
2123 MetaspaceAux::dec_used(mdtype(), allocated_chunks_count() * Metachunk::overhead());
2124 }
2126 void SpaceManager::initialize() {
2127 Metadebug::init_allocation_fail_alot_count();
2128 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2129 _chunks_in_use[i] = NULL;
2130 }
2131 _current_chunk = NULL;
2132 if (TraceMetadataChunkAllocation && Verbose) {
2133 gclog_or_tty->print_cr("SpaceManager(): " PTR_FORMAT, this);
2134 }
2135 }
2137 void ChunkManager::return_chunks(ChunkIndex index, Metachunk* chunks) {
2138 if (chunks == NULL) {
2139 return;
2140 }
2141 ChunkList* list = free_chunks(index);
2142 assert(list->size() == chunks->word_size(), "Mismatch in chunk sizes");
2143 assert_lock_strong(SpaceManager::expand_lock());
2144 Metachunk* cur = chunks;
2146 // This returns chunks one at a time. If a new
2147 // class List can be created that is a base class
2148 // of FreeList then something like FreeList::prepend()
2149 // can be used in place of this loop
2150 while (cur != NULL) {
2151 assert(cur->container() != NULL, "Container should have been set");
2152 cur->container()->dec_container_count();
2153 // Capture the next link before it is changed
2154 // by the call to return_chunk_at_head();
2155 Metachunk* next = cur->next();
2156 DEBUG_ONLY(cur->set_is_tagged_free(true);)
2157 list->return_chunk_at_head(cur);
2158 cur = next;
2159 }
2160 }
2162 SpaceManager::~SpaceManager() {
2163 // This call this->_lock which can't be done while holding expand_lock()
2164 assert(sum_capacity_in_chunks_in_use() == allocated_chunks_words(),
2165 err_msg("sum_capacity_in_chunks_in_use() " SIZE_FORMAT
2166 " allocated_chunks_words() " SIZE_FORMAT,
2167 sum_capacity_in_chunks_in_use(), allocated_chunks_words()));
2169 MutexLockerEx fcl(SpaceManager::expand_lock(),
2170 Mutex::_no_safepoint_check_flag);
2172 chunk_manager()->slow_locked_verify();
2174 dec_total_from_size_metrics();
2176 if (TraceMetadataChunkAllocation && Verbose) {
2177 gclog_or_tty->print_cr("~SpaceManager(): " PTR_FORMAT, this);
2178 locked_print_chunks_in_use_on(gclog_or_tty);
2179 }
2181 // Do not mangle freed Metachunks. The chunk size inside Metachunks
2182 // is during the freeing of a VirtualSpaceNodes.
2184 // Have to update before the chunks_in_use lists are emptied
2185 // below.
2186 chunk_manager()->inc_free_chunks_total(allocated_chunks_words(),
2187 sum_count_in_chunks_in_use());
2189 // Add all the chunks in use by this space manager
2190 // to the global list of free chunks.
2192 // Follow each list of chunks-in-use and add them to the
2193 // free lists. Each list is NULL terminated.
2195 for (ChunkIndex i = ZeroIndex; i < HumongousIndex; i = next_chunk_index(i)) {
2196 if (TraceMetadataChunkAllocation && Verbose) {
2197 gclog_or_tty->print_cr("returned %d %s chunks to freelist",
2198 sum_count_in_chunks_in_use(i),
2199 chunk_size_name(i));
2200 }
2201 Metachunk* chunks = chunks_in_use(i);
2202 chunk_manager()->return_chunks(i, chunks);
2203 set_chunks_in_use(i, NULL);
2204 if (TraceMetadataChunkAllocation && Verbose) {
2205 gclog_or_tty->print_cr("updated freelist count %d %s",
2206 chunk_manager()->free_chunks(i)->count(),
2207 chunk_size_name(i));
2208 }
2209 assert(i != HumongousIndex, "Humongous chunks are handled explicitly later");
2210 }
2212 // The medium chunk case may be optimized by passing the head and
2213 // tail of the medium chunk list to add_at_head(). The tail is often
2214 // the current chunk but there are probably exceptions.
2216 // Humongous chunks
2217 if (TraceMetadataChunkAllocation && Verbose) {
2218 gclog_or_tty->print_cr("returned %d %s humongous chunks to dictionary",
2219 sum_count_in_chunks_in_use(HumongousIndex),
2220 chunk_size_name(HumongousIndex));
2221 gclog_or_tty->print("Humongous chunk dictionary: ");
2222 }
2223 // Humongous chunks are never the current chunk.
2224 Metachunk* humongous_chunks = chunks_in_use(HumongousIndex);
2226 while (humongous_chunks != NULL) {
2227 #ifdef ASSERT
2228 humongous_chunks->set_is_tagged_free(true);
2229 #endif
2230 if (TraceMetadataChunkAllocation && Verbose) {
2231 gclog_or_tty->print(PTR_FORMAT " (" SIZE_FORMAT ") ",
2232 humongous_chunks,
2233 humongous_chunks->word_size());
2234 }
2235 assert(humongous_chunks->word_size() == (size_t)
2236 align_size_up(humongous_chunks->word_size(),
2237 smallest_chunk_size()),
2238 err_msg("Humongous chunk size is wrong: word size " SIZE_FORMAT
2239 " granularity %d",
2240 humongous_chunks->word_size(), smallest_chunk_size()));
2241 Metachunk* next_humongous_chunks = humongous_chunks->next();
2242 humongous_chunks->container()->dec_container_count();
2243 chunk_manager()->humongous_dictionary()->return_chunk(humongous_chunks);
2244 humongous_chunks = next_humongous_chunks;
2245 }
2246 if (TraceMetadataChunkAllocation && Verbose) {
2247 gclog_or_tty->print_cr("");
2248 gclog_or_tty->print_cr("updated dictionary count %d %s",
2249 chunk_manager()->humongous_dictionary()->total_count(),
2250 chunk_size_name(HumongousIndex));
2251 }
2252 chunk_manager()->slow_locked_verify();
2253 }
2255 const char* SpaceManager::chunk_size_name(ChunkIndex index) const {
2256 switch (index) {
2257 case SpecializedIndex:
2258 return "Specialized";
2259 case SmallIndex:
2260 return "Small";
2261 case MediumIndex:
2262 return "Medium";
2263 case HumongousIndex:
2264 return "Humongous";
2265 default:
2266 return NULL;
2267 }
2268 }
2270 ChunkIndex ChunkManager::list_index(size_t size) {
2271 switch (size) {
2272 case SpecializedChunk:
2273 assert(SpecializedChunk == ClassSpecializedChunk,
2274 "Need branch for ClassSpecializedChunk");
2275 return SpecializedIndex;
2276 case SmallChunk:
2277 case ClassSmallChunk:
2278 return SmallIndex;
2279 case MediumChunk:
2280 case ClassMediumChunk:
2281 return MediumIndex;
2282 default:
2283 assert(size > MediumChunk || size > ClassMediumChunk,
2284 "Not a humongous chunk");
2285 return HumongousIndex;
2286 }
2287 }
2289 void SpaceManager::deallocate(MetaWord* p, size_t word_size) {
2290 assert_lock_strong(_lock);
2291 size_t raw_word_size = get_raw_word_size(word_size);
2292 size_t min_size = TreeChunk<Metablock, FreeList<Metablock> >::min_size();
2293 assert(raw_word_size >= min_size,
2294 err_msg("Should not deallocate dark matter " SIZE_FORMAT "<" SIZE_FORMAT, word_size, min_size));
2295 block_freelists()->return_block(p, raw_word_size);
2296 }
2298 // Adds a chunk to the list of chunks in use.
2299 void SpaceManager::add_chunk(Metachunk* new_chunk, bool make_current) {
2301 assert(new_chunk != NULL, "Should not be NULL");
2302 assert(new_chunk->next() == NULL, "Should not be on a list");
2304 new_chunk->reset_empty();
2306 // Find the correct list and and set the current
2307 // chunk for that list.
2308 ChunkIndex index = ChunkManager::list_index(new_chunk->word_size());
2310 if (index != HumongousIndex) {
2311 retire_current_chunk();
2312 set_current_chunk(new_chunk);
2313 new_chunk->set_next(chunks_in_use(index));
2314 set_chunks_in_use(index, new_chunk);
2315 } else {
2316 // For null class loader data and DumpSharedSpaces, the first chunk isn't
2317 // small, so small will be null. Link this first chunk as the current
2318 // chunk.
2319 if (make_current) {
2320 // Set as the current chunk but otherwise treat as a humongous chunk.
2321 set_current_chunk(new_chunk);
2322 }
2323 // Link at head. The _current_chunk only points to a humongous chunk for
2324 // the null class loader metaspace (class and data virtual space managers)
2325 // any humongous chunks so will not point to the tail
2326 // of the humongous chunks list.
2327 new_chunk->set_next(chunks_in_use(HumongousIndex));
2328 set_chunks_in_use(HumongousIndex, new_chunk);
2330 assert(new_chunk->word_size() > medium_chunk_size(), "List inconsistency");
2331 }
2333 // Add to the running sum of capacity
2334 inc_size_metrics(new_chunk->word_size());
2336 assert(new_chunk->is_empty(), "Not ready for reuse");
2337 if (TraceMetadataChunkAllocation && Verbose) {
2338 gclog_or_tty->print("SpaceManager::add_chunk: %d) ",
2339 sum_count_in_chunks_in_use());
2340 new_chunk->print_on(gclog_or_tty);
2341 chunk_manager()->locked_print_free_chunks(gclog_or_tty);
2342 }
2343 }
2345 void SpaceManager::retire_current_chunk() {
2346 if (current_chunk() != NULL) {
2347 size_t remaining_words = current_chunk()->free_word_size();
2348 if (remaining_words >= TreeChunk<Metablock, FreeList<Metablock> >::min_size()) {
2349 block_freelists()->return_block(current_chunk()->allocate(remaining_words), remaining_words);
2350 inc_used_metrics(remaining_words);
2351 }
2352 }
2353 }
2355 Metachunk* SpaceManager::get_new_chunk(size_t word_size,
2356 size_t grow_chunks_by_words) {
2357 // Get a chunk from the chunk freelist
2358 Metachunk* next = chunk_manager()->chunk_freelist_allocate(grow_chunks_by_words);
2360 if (next == NULL) {
2361 next = vs_list()->get_new_chunk(word_size,
2362 grow_chunks_by_words,
2363 medium_chunk_bunch());
2364 }
2366 if (TraceMetadataHumongousAllocation && next != NULL &&
2367 SpaceManager::is_humongous(next->word_size())) {
2368 gclog_or_tty->print_cr(" new humongous chunk word size "
2369 PTR_FORMAT, next->word_size());
2370 }
2372 return next;
2373 }
2375 MetaWord* SpaceManager::allocate(size_t word_size) {
2376 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
2378 size_t raw_word_size = get_raw_word_size(word_size);
2379 BlockFreelist* fl = block_freelists();
2380 MetaWord* p = NULL;
2381 // Allocation from the dictionary is expensive in the sense that
2382 // the dictionary has to be searched for a size. Don't allocate
2383 // from the dictionary until it starts to get fat. Is this
2384 // a reasonable policy? Maybe an skinny dictionary is fast enough
2385 // for allocations. Do some profiling. JJJ
2386 if (fl->total_size() > allocation_from_dictionary_limit) {
2387 p = fl->get_block(raw_word_size);
2388 }
2389 if (p == NULL) {
2390 p = allocate_work(raw_word_size);
2391 }
2393 return p;
2394 }
2396 // Returns the address of spaced allocated for "word_size".
2397 // This methods does not know about blocks (Metablocks)
2398 MetaWord* SpaceManager::allocate_work(size_t word_size) {
2399 assert_lock_strong(_lock);
2400 #ifdef ASSERT
2401 if (Metadebug::test_metadata_failure()) {
2402 return NULL;
2403 }
2404 #endif
2405 // Is there space in the current chunk?
2406 MetaWord* result = NULL;
2408 // For DumpSharedSpaces, only allocate out of the current chunk which is
2409 // never null because we gave it the size we wanted. Caller reports out
2410 // of memory if this returns null.
2411 if (DumpSharedSpaces) {
2412 assert(current_chunk() != NULL, "should never happen");
2413 inc_used_metrics(word_size);
2414 return current_chunk()->allocate(word_size); // caller handles null result
2415 }
2417 if (current_chunk() != NULL) {
2418 result = current_chunk()->allocate(word_size);
2419 }
2421 if (result == NULL) {
2422 result = grow_and_allocate(word_size);
2423 }
2425 if (result != NULL) {
2426 inc_used_metrics(word_size);
2427 assert(result != (MetaWord*) chunks_in_use(MediumIndex),
2428 "Head of the list is being allocated");
2429 }
2431 return result;
2432 }
2434 // This function looks at the chunks in the metaspace without locking.
2435 // The chunks are added with store ordering and not deleted except for at
2436 // unloading time.
2437 bool SpaceManager::contains(const void *ptr) {
2438 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i))
2439 {
2440 Metachunk* curr = chunks_in_use(i);
2441 while (curr != NULL) {
2442 if (curr->contains(ptr)) return true;
2443 curr = curr->next();
2444 }
2445 }
2446 return false;
2447 }
2449 void SpaceManager::verify() {
2450 // If there are blocks in the dictionary, then
2451 // verfication of chunks does not work since
2452 // being in the dictionary alters a chunk.
2453 if (block_freelists()->total_size() == 0) {
2454 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2455 Metachunk* curr = chunks_in_use(i);
2456 while (curr != NULL) {
2457 curr->verify();
2458 verify_chunk_size(curr);
2459 curr = curr->next();
2460 }
2461 }
2462 }
2463 }
2465 void SpaceManager::verify_chunk_size(Metachunk* chunk) {
2466 assert(is_humongous(chunk->word_size()) ||
2467 chunk->word_size() == medium_chunk_size() ||
2468 chunk->word_size() == small_chunk_size() ||
2469 chunk->word_size() == specialized_chunk_size(),
2470 "Chunk size is wrong");
2471 return;
2472 }
2474 #ifdef ASSERT
2475 void SpaceManager::verify_allocated_blocks_words() {
2476 // Verification is only guaranteed at a safepoint.
2477 assert(SafepointSynchronize::is_at_safepoint() || !Universe::is_fully_initialized(),
2478 "Verification can fail if the applications is running");
2479 assert(allocated_blocks_words() == sum_used_in_chunks_in_use(),
2480 err_msg("allocation total is not consistent " SIZE_FORMAT
2481 " vs " SIZE_FORMAT,
2482 allocated_blocks_words(), sum_used_in_chunks_in_use()));
2483 }
2485 #endif
2487 void SpaceManager::dump(outputStream* const out) const {
2488 size_t curr_total = 0;
2489 size_t waste = 0;
2490 uint i = 0;
2491 size_t used = 0;
2492 size_t capacity = 0;
2494 // Add up statistics for all chunks in this SpaceManager.
2495 for (ChunkIndex index = ZeroIndex;
2496 index < NumberOfInUseLists;
2497 index = next_chunk_index(index)) {
2498 for (Metachunk* curr = chunks_in_use(index);
2499 curr != NULL;
2500 curr = curr->next()) {
2501 out->print("%d) ", i++);
2502 curr->print_on(out);
2503 curr_total += curr->word_size();
2504 used += curr->used_word_size();
2505 capacity += curr->word_size();
2506 waste += curr->free_word_size() + curr->overhead();;
2507 }
2508 }
2510 if (TraceMetadataChunkAllocation && Verbose) {
2511 block_freelists()->print_on(out);
2512 }
2514 size_t free = current_chunk() == NULL ? 0 : current_chunk()->free_word_size();
2515 // Free space isn't wasted.
2516 waste -= free;
2518 out->print_cr("total of all chunks " SIZE_FORMAT " used " SIZE_FORMAT
2519 " free " SIZE_FORMAT " capacity " SIZE_FORMAT
2520 " waste " SIZE_FORMAT, curr_total, used, free, capacity, waste);
2521 }
2523 #ifndef PRODUCT
2524 void SpaceManager::mangle_freed_chunks() {
2525 for (ChunkIndex index = ZeroIndex;
2526 index < NumberOfInUseLists;
2527 index = next_chunk_index(index)) {
2528 for (Metachunk* curr = chunks_in_use(index);
2529 curr != NULL;
2530 curr = curr->next()) {
2531 curr->mangle();
2532 }
2533 }
2534 }
2535 #endif // PRODUCT
2537 // MetaspaceAux
2540 size_t MetaspaceAux::_allocated_capacity_words[] = {0, 0};
2541 size_t MetaspaceAux::_allocated_used_words[] = {0, 0};
2543 size_t MetaspaceAux::free_bytes(Metaspace::MetadataType mdtype) {
2544 VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
2545 return list == NULL ? 0 : list->free_bytes();
2546 }
2548 size_t MetaspaceAux::free_bytes() {
2549 return free_bytes(Metaspace::ClassType) + free_bytes(Metaspace::NonClassType);
2550 }
2552 void MetaspaceAux::dec_capacity(Metaspace::MetadataType mdtype, size_t words) {
2553 assert_lock_strong(SpaceManager::expand_lock());
2554 assert(words <= allocated_capacity_words(mdtype),
2555 err_msg("About to decrement below 0: words " SIZE_FORMAT
2556 " is greater than _allocated_capacity_words[%u] " SIZE_FORMAT,
2557 words, mdtype, allocated_capacity_words(mdtype)));
2558 _allocated_capacity_words[mdtype] -= words;
2559 }
2561 void MetaspaceAux::inc_capacity(Metaspace::MetadataType mdtype, size_t words) {
2562 assert_lock_strong(SpaceManager::expand_lock());
2563 // Needs to be atomic
2564 _allocated_capacity_words[mdtype] += words;
2565 }
2567 void MetaspaceAux::dec_used(Metaspace::MetadataType mdtype, size_t words) {
2568 assert(words <= allocated_used_words(mdtype),
2569 err_msg("About to decrement below 0: words " SIZE_FORMAT
2570 " is greater than _allocated_used_words[%u] " SIZE_FORMAT,
2571 words, mdtype, allocated_used_words(mdtype)));
2572 // For CMS deallocation of the Metaspaces occurs during the
2573 // sweep which is a concurrent phase. Protection by the expand_lock()
2574 // is not enough since allocation is on a per Metaspace basis
2575 // and protected by the Metaspace lock.
2576 jlong minus_words = (jlong) - (jlong) words;
2577 Atomic::add_ptr(minus_words, &_allocated_used_words[mdtype]);
2578 }
2580 void MetaspaceAux::inc_used(Metaspace::MetadataType mdtype, size_t words) {
2581 // _allocated_used_words tracks allocations for
2582 // each piece of metadata. Those allocations are
2583 // generally done concurrently by different application
2584 // threads so must be done atomically.
2585 Atomic::add_ptr(words, &_allocated_used_words[mdtype]);
2586 }
2588 size_t MetaspaceAux::used_bytes_slow(Metaspace::MetadataType mdtype) {
2589 size_t used = 0;
2590 ClassLoaderDataGraphMetaspaceIterator iter;
2591 while (iter.repeat()) {
2592 Metaspace* msp = iter.get_next();
2593 // Sum allocated_blocks_words for each metaspace
2594 if (msp != NULL) {
2595 used += msp->used_words_slow(mdtype);
2596 }
2597 }
2598 return used * BytesPerWord;
2599 }
2601 size_t MetaspaceAux::free_bytes_slow(Metaspace::MetadataType mdtype) {
2602 size_t free = 0;
2603 ClassLoaderDataGraphMetaspaceIterator iter;
2604 while (iter.repeat()) {
2605 Metaspace* msp = iter.get_next();
2606 if (msp != NULL) {
2607 free += msp->free_words_slow(mdtype);
2608 }
2609 }
2610 return free * BytesPerWord;
2611 }
2613 size_t MetaspaceAux::capacity_bytes_slow(Metaspace::MetadataType mdtype) {
2614 if ((mdtype == Metaspace::ClassType) && !Metaspace::using_class_space()) {
2615 return 0;
2616 }
2617 // Don't count the space in the freelists. That space will be
2618 // added to the capacity calculation as needed.
2619 size_t capacity = 0;
2620 ClassLoaderDataGraphMetaspaceIterator iter;
2621 while (iter.repeat()) {
2622 Metaspace* msp = iter.get_next();
2623 if (msp != NULL) {
2624 capacity += msp->capacity_words_slow(mdtype);
2625 }
2626 }
2627 return capacity * BytesPerWord;
2628 }
2630 size_t MetaspaceAux::capacity_bytes_slow() {
2631 #ifdef PRODUCT
2632 // Use allocated_capacity_bytes() in PRODUCT instead of this function.
2633 guarantee(false, "Should not call capacity_bytes_slow() in the PRODUCT");
2634 #endif
2635 size_t class_capacity = capacity_bytes_slow(Metaspace::ClassType);
2636 size_t non_class_capacity = capacity_bytes_slow(Metaspace::NonClassType);
2637 assert(allocated_capacity_bytes() == class_capacity + non_class_capacity,
2638 err_msg("bad accounting: allocated_capacity_bytes() " SIZE_FORMAT
2639 " class_capacity + non_class_capacity " SIZE_FORMAT
2640 " class_capacity " SIZE_FORMAT " non_class_capacity " SIZE_FORMAT,
2641 allocated_capacity_bytes(), class_capacity + non_class_capacity,
2642 class_capacity, non_class_capacity));
2644 return class_capacity + non_class_capacity;
2645 }
2647 size_t MetaspaceAux::reserved_bytes(Metaspace::MetadataType mdtype) {
2648 VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
2649 return list == NULL ? 0 : list->reserved_bytes();
2650 }
2652 size_t MetaspaceAux::committed_bytes(Metaspace::MetadataType mdtype) {
2653 VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
2654 return list == NULL ? 0 : list->committed_bytes();
2655 }
2657 size_t MetaspaceAux::min_chunk_size_words() { return Metaspace::first_chunk_word_size(); }
2659 size_t MetaspaceAux::free_chunks_total_words(Metaspace::MetadataType mdtype) {
2660 ChunkManager* chunk_manager = Metaspace::get_chunk_manager(mdtype);
2661 if (chunk_manager == NULL) {
2662 return 0;
2663 }
2664 chunk_manager->slow_verify();
2665 return chunk_manager->free_chunks_total_words();
2666 }
2668 size_t MetaspaceAux::free_chunks_total_bytes(Metaspace::MetadataType mdtype) {
2669 return free_chunks_total_words(mdtype) * BytesPerWord;
2670 }
2672 size_t MetaspaceAux::free_chunks_total_words() {
2673 return free_chunks_total_words(Metaspace::ClassType) +
2674 free_chunks_total_words(Metaspace::NonClassType);
2675 }
2677 size_t MetaspaceAux::free_chunks_total_bytes() {
2678 return free_chunks_total_words() * BytesPerWord;
2679 }
2681 bool MetaspaceAux::has_chunk_free_list(Metaspace::MetadataType mdtype) {
2682 return Metaspace::get_chunk_manager(mdtype) != NULL;
2683 }
2685 MetaspaceChunkFreeListSummary MetaspaceAux::chunk_free_list_summary(Metaspace::MetadataType mdtype) {
2686 if (!has_chunk_free_list(mdtype)) {
2687 return MetaspaceChunkFreeListSummary();
2688 }
2690 const ChunkManager* cm = Metaspace::get_chunk_manager(mdtype);
2691 return cm->chunk_free_list_summary();
2692 }
2694 void MetaspaceAux::print_metaspace_change(size_t prev_metadata_used) {
2695 gclog_or_tty->print(", [Metaspace:");
2696 if (PrintGCDetails && Verbose) {
2697 gclog_or_tty->print(" " SIZE_FORMAT
2698 "->" SIZE_FORMAT
2699 "(" SIZE_FORMAT ")",
2700 prev_metadata_used,
2701 allocated_used_bytes(),
2702 reserved_bytes());
2703 } else {
2704 gclog_or_tty->print(" " SIZE_FORMAT "K"
2705 "->" SIZE_FORMAT "K"
2706 "(" SIZE_FORMAT "K)",
2707 prev_metadata_used/K,
2708 allocated_used_bytes()/K,
2709 reserved_bytes()/K);
2710 }
2712 gclog_or_tty->print("]");
2713 }
2715 // This is printed when PrintGCDetails
2716 void MetaspaceAux::print_on(outputStream* out) {
2717 Metaspace::MetadataType nct = Metaspace::NonClassType;
2719 out->print_cr(" Metaspace "
2720 "used " SIZE_FORMAT "K, "
2721 "capacity " SIZE_FORMAT "K, "
2722 "committed " SIZE_FORMAT "K, "
2723 "reserved " SIZE_FORMAT "K",
2724 allocated_used_bytes()/K,
2725 allocated_capacity_bytes()/K,
2726 committed_bytes()/K,
2727 reserved_bytes()/K);
2729 if (Metaspace::using_class_space()) {
2730 Metaspace::MetadataType ct = Metaspace::ClassType;
2731 out->print_cr(" class space "
2732 "used " SIZE_FORMAT "K, "
2733 "capacity " SIZE_FORMAT "K, "
2734 "committed " SIZE_FORMAT "K, "
2735 "reserved " SIZE_FORMAT "K",
2736 allocated_used_bytes(ct)/K,
2737 allocated_capacity_bytes(ct)/K,
2738 committed_bytes(ct)/K,
2739 reserved_bytes(ct)/K);
2740 }
2741 }
2743 // Print information for class space and data space separately.
2744 // This is almost the same as above.
2745 void MetaspaceAux::print_on(outputStream* out, Metaspace::MetadataType mdtype) {
2746 size_t free_chunks_capacity_bytes = free_chunks_total_bytes(mdtype);
2747 size_t capacity_bytes = capacity_bytes_slow(mdtype);
2748 size_t used_bytes = used_bytes_slow(mdtype);
2749 size_t free_bytes = free_bytes_slow(mdtype);
2750 size_t used_and_free = used_bytes + free_bytes +
2751 free_chunks_capacity_bytes;
2752 out->print_cr(" Chunk accounting: used in chunks " SIZE_FORMAT
2753 "K + unused in chunks " SIZE_FORMAT "K + "
2754 " capacity in free chunks " SIZE_FORMAT "K = " SIZE_FORMAT
2755 "K capacity in allocated chunks " SIZE_FORMAT "K",
2756 used_bytes / K,
2757 free_bytes / K,
2758 free_chunks_capacity_bytes / K,
2759 used_and_free / K,
2760 capacity_bytes / K);
2761 // Accounting can only be correct if we got the values during a safepoint
2762 assert(!SafepointSynchronize::is_at_safepoint() || used_and_free == capacity_bytes, "Accounting is wrong");
2763 }
2765 // Print total fragmentation for class metaspaces
2766 void MetaspaceAux::print_class_waste(outputStream* out) {
2767 assert(Metaspace::using_class_space(), "class metaspace not used");
2768 size_t cls_specialized_waste = 0, cls_small_waste = 0, cls_medium_waste = 0;
2769 size_t cls_specialized_count = 0, cls_small_count = 0, cls_medium_count = 0, cls_humongous_count = 0;
2770 ClassLoaderDataGraphMetaspaceIterator iter;
2771 while (iter.repeat()) {
2772 Metaspace* msp = iter.get_next();
2773 if (msp != NULL) {
2774 cls_specialized_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SpecializedIndex);
2775 cls_specialized_count += msp->class_vsm()->sum_count_in_chunks_in_use(SpecializedIndex);
2776 cls_small_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SmallIndex);
2777 cls_small_count += msp->class_vsm()->sum_count_in_chunks_in_use(SmallIndex);
2778 cls_medium_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(MediumIndex);
2779 cls_medium_count += msp->class_vsm()->sum_count_in_chunks_in_use(MediumIndex);
2780 cls_humongous_count += msp->class_vsm()->sum_count_in_chunks_in_use(HumongousIndex);
2781 }
2782 }
2783 out->print_cr(" class: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", "
2784 SIZE_FORMAT " small(s) " SIZE_FORMAT ", "
2785 SIZE_FORMAT " medium(s) " SIZE_FORMAT ", "
2786 "large count " SIZE_FORMAT,
2787 cls_specialized_count, cls_specialized_waste,
2788 cls_small_count, cls_small_waste,
2789 cls_medium_count, cls_medium_waste, cls_humongous_count);
2790 }
2792 // Print total fragmentation for data and class metaspaces separately
2793 void MetaspaceAux::print_waste(outputStream* out) {
2794 size_t specialized_waste = 0, small_waste = 0, medium_waste = 0;
2795 size_t specialized_count = 0, small_count = 0, medium_count = 0, humongous_count = 0;
2797 ClassLoaderDataGraphMetaspaceIterator iter;
2798 while (iter.repeat()) {
2799 Metaspace* msp = iter.get_next();
2800 if (msp != NULL) {
2801 specialized_waste += msp->vsm()->sum_waste_in_chunks_in_use(SpecializedIndex);
2802 specialized_count += msp->vsm()->sum_count_in_chunks_in_use(SpecializedIndex);
2803 small_waste += msp->vsm()->sum_waste_in_chunks_in_use(SmallIndex);
2804 small_count += msp->vsm()->sum_count_in_chunks_in_use(SmallIndex);
2805 medium_waste += msp->vsm()->sum_waste_in_chunks_in_use(MediumIndex);
2806 medium_count += msp->vsm()->sum_count_in_chunks_in_use(MediumIndex);
2807 humongous_count += msp->vsm()->sum_count_in_chunks_in_use(HumongousIndex);
2808 }
2809 }
2810 out->print_cr("Total fragmentation waste (words) doesn't count free space");
2811 out->print_cr(" data: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", "
2812 SIZE_FORMAT " small(s) " SIZE_FORMAT ", "
2813 SIZE_FORMAT " medium(s) " SIZE_FORMAT ", "
2814 "large count " SIZE_FORMAT,
2815 specialized_count, specialized_waste, small_count,
2816 small_waste, medium_count, medium_waste, humongous_count);
2817 if (Metaspace::using_class_space()) {
2818 print_class_waste(out);
2819 }
2820 }
2822 // Dump global metaspace things from the end of ClassLoaderDataGraph
2823 void MetaspaceAux::dump(outputStream* out) {
2824 out->print_cr("All Metaspace:");
2825 out->print("data space: "); print_on(out, Metaspace::NonClassType);
2826 out->print("class space: "); print_on(out, Metaspace::ClassType);
2827 print_waste(out);
2828 }
2830 void MetaspaceAux::verify_free_chunks() {
2831 Metaspace::chunk_manager_metadata()->verify();
2832 if (Metaspace::using_class_space()) {
2833 Metaspace::chunk_manager_class()->verify();
2834 }
2835 }
2837 void MetaspaceAux::verify_capacity() {
2838 #ifdef ASSERT
2839 size_t running_sum_capacity_bytes = allocated_capacity_bytes();
2840 // For purposes of the running sum of capacity, verify against capacity
2841 size_t capacity_in_use_bytes = capacity_bytes_slow();
2842 assert(running_sum_capacity_bytes == capacity_in_use_bytes,
2843 err_msg("allocated_capacity_words() * BytesPerWord " SIZE_FORMAT
2844 " capacity_bytes_slow()" SIZE_FORMAT,
2845 running_sum_capacity_bytes, capacity_in_use_bytes));
2846 for (Metaspace::MetadataType i = Metaspace::ClassType;
2847 i < Metaspace:: MetadataTypeCount;
2848 i = (Metaspace::MetadataType)(i + 1)) {
2849 size_t capacity_in_use_bytes = capacity_bytes_slow(i);
2850 assert(allocated_capacity_bytes(i) == capacity_in_use_bytes,
2851 err_msg("allocated_capacity_bytes(%u) " SIZE_FORMAT
2852 " capacity_bytes_slow(%u)" SIZE_FORMAT,
2853 i, allocated_capacity_bytes(i), i, capacity_in_use_bytes));
2854 }
2855 #endif
2856 }
2858 void MetaspaceAux::verify_used() {
2859 #ifdef ASSERT
2860 size_t running_sum_used_bytes = allocated_used_bytes();
2861 // For purposes of the running sum of used, verify against used
2862 size_t used_in_use_bytes = used_bytes_slow();
2863 assert(allocated_used_bytes() == used_in_use_bytes,
2864 err_msg("allocated_used_bytes() " SIZE_FORMAT
2865 " used_bytes_slow()" SIZE_FORMAT,
2866 allocated_used_bytes(), used_in_use_bytes));
2867 for (Metaspace::MetadataType i = Metaspace::ClassType;
2868 i < Metaspace:: MetadataTypeCount;
2869 i = (Metaspace::MetadataType)(i + 1)) {
2870 size_t used_in_use_bytes = used_bytes_slow(i);
2871 assert(allocated_used_bytes(i) == used_in_use_bytes,
2872 err_msg("allocated_used_bytes(%u) " SIZE_FORMAT
2873 " used_bytes_slow(%u)" SIZE_FORMAT,
2874 i, allocated_used_bytes(i), i, used_in_use_bytes));
2875 }
2876 #endif
2877 }
2879 void MetaspaceAux::verify_metrics() {
2880 verify_capacity();
2881 verify_used();
2882 }
2885 // Metaspace methods
2887 size_t Metaspace::_first_chunk_word_size = 0;
2888 size_t Metaspace::_first_class_chunk_word_size = 0;
2890 size_t Metaspace::_commit_alignment = 0;
2891 size_t Metaspace::_reserve_alignment = 0;
2893 Metaspace::Metaspace(Mutex* lock, MetaspaceType type) {
2894 initialize(lock, type);
2895 }
2897 Metaspace::~Metaspace() {
2898 delete _vsm;
2899 if (using_class_space()) {
2900 delete _class_vsm;
2901 }
2902 }
2904 VirtualSpaceList* Metaspace::_space_list = NULL;
2905 VirtualSpaceList* Metaspace::_class_space_list = NULL;
2907 ChunkManager* Metaspace::_chunk_manager_metadata = NULL;
2908 ChunkManager* Metaspace::_chunk_manager_class = NULL;
2910 #define VIRTUALSPACEMULTIPLIER 2
2912 #ifdef _LP64
2913 static const uint64_t UnscaledClassSpaceMax = (uint64_t(max_juint) + 1);
2915 void Metaspace::set_narrow_klass_base_and_shift(address metaspace_base, address cds_base) {
2916 // Figure out the narrow_klass_base and the narrow_klass_shift. The
2917 // narrow_klass_base is the lower of the metaspace base and the cds base
2918 // (if cds is enabled). The narrow_klass_shift depends on the distance
2919 // between the lower base and higher address.
2920 address lower_base;
2921 address higher_address;
2922 if (UseSharedSpaces) {
2923 higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()),
2924 (address)(metaspace_base + compressed_class_space_size()));
2925 lower_base = MIN2(metaspace_base, cds_base);
2926 } else {
2927 higher_address = metaspace_base + compressed_class_space_size();
2928 lower_base = metaspace_base;
2930 uint64_t klass_encoding_max = UnscaledClassSpaceMax << LogKlassAlignmentInBytes;
2931 // If compressed class space fits in lower 32G, we don't need a base.
2932 if (higher_address <= (address)klass_encoding_max) {
2933 lower_base = 0; // effectively lower base is zero.
2934 }
2935 }
2937 Universe::set_narrow_klass_base(lower_base);
2939 if ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax) {
2940 Universe::set_narrow_klass_shift(0);
2941 } else {
2942 assert(!UseSharedSpaces, "Cannot shift with UseSharedSpaces");
2943 Universe::set_narrow_klass_shift(LogKlassAlignmentInBytes);
2944 }
2945 }
2947 // Return TRUE if the specified metaspace_base and cds_base are close enough
2948 // to work with compressed klass pointers.
2949 bool Metaspace::can_use_cds_with_metaspace_addr(char* metaspace_base, address cds_base) {
2950 assert(cds_base != 0 && UseSharedSpaces, "Only use with CDS");
2951 assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
2952 address lower_base = MIN2((address)metaspace_base, cds_base);
2953 address higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()),
2954 (address)(metaspace_base + compressed_class_space_size()));
2955 return ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax);
2956 }
2958 // Try to allocate the metaspace at the requested addr.
2959 void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base) {
2960 assert(using_class_space(), "called improperly");
2961 assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
2962 assert(compressed_class_space_size() < KlassEncodingMetaspaceMax,
2963 "Metaspace size is too big");
2964 assert_is_ptr_aligned(requested_addr, _reserve_alignment);
2965 assert_is_ptr_aligned(cds_base, _reserve_alignment);
2966 assert_is_size_aligned(compressed_class_space_size(), _reserve_alignment);
2968 // Don't use large pages for the class space.
2969 bool large_pages = false;
2971 ReservedSpace metaspace_rs = ReservedSpace(compressed_class_space_size(),
2972 _reserve_alignment,
2973 large_pages,
2974 requested_addr, 0);
2975 if (!metaspace_rs.is_reserved()) {
2976 if (UseSharedSpaces) {
2977 size_t increment = align_size_up(1*G, _reserve_alignment);
2979 // Keep trying to allocate the metaspace, increasing the requested_addr
2980 // by 1GB each time, until we reach an address that will no longer allow
2981 // use of CDS with compressed klass pointers.
2982 char *addr = requested_addr;
2983 while (!metaspace_rs.is_reserved() && (addr + increment > addr) &&
2984 can_use_cds_with_metaspace_addr(addr + increment, cds_base)) {
2985 addr = addr + increment;
2986 metaspace_rs = ReservedSpace(compressed_class_space_size(),
2987 _reserve_alignment, large_pages, addr, 0);
2988 }
2989 }
2991 // If no successful allocation then try to allocate the space anywhere. If
2992 // that fails then OOM doom. At this point we cannot try allocating the
2993 // metaspace as if UseCompressedClassPointers is off because too much
2994 // initialization has happened that depends on UseCompressedClassPointers.
2995 // So, UseCompressedClassPointers cannot be turned off at this point.
2996 if (!metaspace_rs.is_reserved()) {
2997 metaspace_rs = ReservedSpace(compressed_class_space_size(),
2998 _reserve_alignment, large_pages);
2999 if (!metaspace_rs.is_reserved()) {
3000 vm_exit_during_initialization(err_msg("Could not allocate metaspace: %d bytes",
3001 compressed_class_space_size()));
3002 }
3003 }
3004 }
3006 // If we got here then the metaspace got allocated.
3007 MemTracker::record_virtual_memory_type((address)metaspace_rs.base(), mtClass);
3009 // Verify that we can use shared spaces. Otherwise, turn off CDS.
3010 if (UseSharedSpaces && !can_use_cds_with_metaspace_addr(metaspace_rs.base(), cds_base)) {
3011 FileMapInfo::stop_sharing_and_unmap(
3012 "Could not allocate metaspace at a compatible address");
3013 }
3015 set_narrow_klass_base_and_shift((address)metaspace_rs.base(),
3016 UseSharedSpaces ? (address)cds_base : 0);
3018 initialize_class_space(metaspace_rs);
3020 if (PrintCompressedOopsMode || (PrintMiscellaneous && Verbose)) {
3021 gclog_or_tty->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: " SIZE_FORMAT,
3022 Universe::narrow_klass_base(), Universe::narrow_klass_shift());
3023 gclog_or_tty->print_cr("Compressed class space size: " SIZE_FORMAT " Address: " PTR_FORMAT " Req Addr: " PTR_FORMAT,
3024 compressed_class_space_size(), metaspace_rs.base(), requested_addr);
3025 }
3026 }
3028 // For UseCompressedClassPointers the class space is reserved above the top of
3029 // the Java heap. The argument passed in is at the base of the compressed space.
3030 void Metaspace::initialize_class_space(ReservedSpace rs) {
3031 // The reserved space size may be bigger because of alignment, esp with UseLargePages
3032 assert(rs.size() >= CompressedClassSpaceSize,
3033 err_msg(SIZE_FORMAT " != " UINTX_FORMAT, rs.size(), CompressedClassSpaceSize));
3034 assert(using_class_space(), "Must be using class space");
3035 _class_space_list = new VirtualSpaceList(rs);
3036 _chunk_manager_class = new ChunkManager(SpecializedChunk, ClassSmallChunk, ClassMediumChunk);
3038 if (!_class_space_list->initialization_succeeded()) {
3039 vm_exit_during_initialization("Failed to setup compressed class space virtual space list.");
3040 }
3041 }
3043 #endif
3045 void Metaspace::ergo_initialize() {
3046 if (DumpSharedSpaces) {
3047 // Using large pages when dumping the shared archive is currently not implemented.
3048 FLAG_SET_ERGO(bool, UseLargePagesInMetaspace, false);
3049 }
3051 size_t page_size = os::vm_page_size();
3052 if (UseLargePages && UseLargePagesInMetaspace) {
3053 page_size = os::large_page_size();
3054 }
3056 _commit_alignment = page_size;
3057 _reserve_alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity());
3059 // Do not use FLAG_SET_ERGO to update MaxMetaspaceSize, since this will
3060 // override if MaxMetaspaceSize was set on the command line or not.
3061 // This information is needed later to conform to the specification of the
3062 // java.lang.management.MemoryUsage API.
3063 //
3064 // Ideally, we would be able to set the default value of MaxMetaspaceSize in
3065 // globals.hpp to the aligned value, but this is not possible, since the
3066 // alignment depends on other flags being parsed.
3067 MaxMetaspaceSize = align_size_down_bounded(MaxMetaspaceSize, _reserve_alignment);
3069 if (MetaspaceSize > MaxMetaspaceSize) {
3070 MetaspaceSize = MaxMetaspaceSize;
3071 }
3073 MetaspaceSize = align_size_down_bounded(MetaspaceSize, _commit_alignment);
3075 assert(MetaspaceSize <= MaxMetaspaceSize, "MetaspaceSize should be limited by MaxMetaspaceSize");
3077 if (MetaspaceSize < 256*K) {
3078 vm_exit_during_initialization("Too small initial Metaspace size");
3079 }
3081 MinMetaspaceExpansion = align_size_down_bounded(MinMetaspaceExpansion, _commit_alignment);
3082 MaxMetaspaceExpansion = align_size_down_bounded(MaxMetaspaceExpansion, _commit_alignment);
3084 CompressedClassSpaceSize = align_size_down_bounded(CompressedClassSpaceSize, _reserve_alignment);
3085 set_compressed_class_space_size(CompressedClassSpaceSize);
3086 }
3088 void Metaspace::global_initialize() {
3089 // Initialize the alignment for shared spaces.
3090 int max_alignment = os::vm_page_size();
3091 size_t cds_total = 0;
3093 MetaspaceShared::set_max_alignment(max_alignment);
3095 if (DumpSharedSpaces) {
3096 SharedReadOnlySize = align_size_up(SharedReadOnlySize, max_alignment);
3097 SharedReadWriteSize = align_size_up(SharedReadWriteSize, max_alignment);
3098 SharedMiscDataSize = align_size_up(SharedMiscDataSize, max_alignment);
3099 SharedMiscCodeSize = align_size_up(SharedMiscCodeSize, max_alignment);
3101 // Initialize with the sum of the shared space sizes. The read-only
3102 // and read write metaspace chunks will be allocated out of this and the
3103 // remainder is the misc code and data chunks.
3104 cds_total = FileMapInfo::shared_spaces_size();
3105 cds_total = align_size_up(cds_total, _reserve_alignment);
3106 _space_list = new VirtualSpaceList(cds_total/wordSize);
3107 _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk);
3109 if (!_space_list->initialization_succeeded()) {
3110 vm_exit_during_initialization("Unable to dump shared archive.", NULL);
3111 }
3113 #ifdef _LP64
3114 if (cds_total + compressed_class_space_size() > UnscaledClassSpaceMax) {
3115 vm_exit_during_initialization("Unable to dump shared archive.",
3116 err_msg("Size of archive (" SIZE_FORMAT ") + compressed class space ("
3117 SIZE_FORMAT ") == total (" SIZE_FORMAT ") is larger than compressed "
3118 "klass limit: " SIZE_FORMAT, cds_total, compressed_class_space_size(),
3119 cds_total + compressed_class_space_size(), UnscaledClassSpaceMax));
3120 }
3122 // Set the compressed klass pointer base so that decoding of these pointers works
3123 // properly when creating the shared archive.
3124 assert(UseCompressedOops && UseCompressedClassPointers,
3125 "UseCompressedOops and UseCompressedClassPointers must be set");
3126 Universe::set_narrow_klass_base((address)_space_list->current_virtual_space()->bottom());
3127 if (TraceMetavirtualspaceAllocation && Verbose) {
3128 gclog_or_tty->print_cr("Setting_narrow_klass_base to Address: " PTR_FORMAT,
3129 _space_list->current_virtual_space()->bottom());
3130 }
3132 Universe::set_narrow_klass_shift(0);
3133 #endif
3135 } else {
3136 // If using shared space, open the file that contains the shared space
3137 // and map in the memory before initializing the rest of metaspace (so
3138 // the addresses don't conflict)
3139 address cds_address = NULL;
3140 if (UseSharedSpaces) {
3141 FileMapInfo* mapinfo = new FileMapInfo();
3142 memset(mapinfo, 0, sizeof(FileMapInfo));
3144 // Open the shared archive file, read and validate the header. If
3145 // initialization fails, shared spaces [UseSharedSpaces] are
3146 // disabled and the file is closed.
3147 // Map in spaces now also
3148 if (mapinfo->initialize() && MetaspaceShared::map_shared_spaces(mapinfo)) {
3149 FileMapInfo::set_current_info(mapinfo);
3150 cds_total = FileMapInfo::shared_spaces_size();
3151 cds_address = (address)mapinfo->region_base(0);
3152 } else {
3153 assert(!mapinfo->is_open() && !UseSharedSpaces,
3154 "archive file not closed or shared spaces not disabled.");
3155 }
3156 }
3158 #ifdef _LP64
3159 // If UseCompressedClassPointers is set then allocate the metaspace area
3160 // above the heap and above the CDS area (if it exists).
3161 if (using_class_space()) {
3162 if (UseSharedSpaces) {
3163 char* cds_end = (char*)(cds_address + cds_total);
3164 cds_end = (char *)align_ptr_up(cds_end, _reserve_alignment);
3165 allocate_metaspace_compressed_klass_ptrs(cds_end, cds_address);
3166 } else {
3167 char* base = (char*)align_ptr_up(Universe::heap()->reserved_region().end(), _reserve_alignment);
3168 allocate_metaspace_compressed_klass_ptrs(base, 0);
3169 }
3170 }
3171 #endif
3173 // Initialize these before initializing the VirtualSpaceList
3174 _first_chunk_word_size = InitialBootClassLoaderMetaspaceSize / BytesPerWord;
3175 _first_chunk_word_size = align_word_size_up(_first_chunk_word_size);
3176 // Make the first class chunk bigger than a medium chunk so it's not put
3177 // on the medium chunk list. The next chunk will be small and progress
3178 // from there. This size calculated by -version.
3179 _first_class_chunk_word_size = MIN2((size_t)MediumChunk*6,
3180 (CompressedClassSpaceSize/BytesPerWord)*2);
3181 _first_class_chunk_word_size = align_word_size_up(_first_class_chunk_word_size);
3182 // Arbitrarily set the initial virtual space to a multiple
3183 // of the boot class loader size.
3184 size_t word_size = VIRTUALSPACEMULTIPLIER * _first_chunk_word_size;
3185 word_size = align_size_up(word_size, Metaspace::reserve_alignment_words());
3187 // Initialize the list of virtual spaces.
3188 _space_list = new VirtualSpaceList(word_size);
3189 _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk);
3191 if (!_space_list->initialization_succeeded()) {
3192 vm_exit_during_initialization("Unable to setup metadata virtual space list.", NULL);
3193 }
3194 }
3196 MetaspaceGC::initialize();
3197 _tracer = new MetaspaceTracer();
3198 }
3200 Metachunk* Metaspace::get_initialization_chunk(MetadataType mdtype,
3201 size_t chunk_word_size,
3202 size_t chunk_bunch) {
3203 // Get a chunk from the chunk freelist
3204 Metachunk* chunk = get_chunk_manager(mdtype)->chunk_freelist_allocate(chunk_word_size);
3205 if (chunk != NULL) {
3206 return chunk;
3207 }
3209 return get_space_list(mdtype)->get_new_chunk(chunk_word_size, chunk_word_size, chunk_bunch);
3210 }
3212 void Metaspace::initialize(Mutex* lock, MetaspaceType type) {
3214 assert(space_list() != NULL,
3215 "Metadata VirtualSpaceList has not been initialized");
3216 assert(chunk_manager_metadata() != NULL,
3217 "Metadata ChunkManager has not been initialized");
3219 _vsm = new SpaceManager(NonClassType, lock);
3220 if (_vsm == NULL) {
3221 return;
3222 }
3223 size_t word_size;
3224 size_t class_word_size;
3225 vsm()->get_initial_chunk_sizes(type, &word_size, &class_word_size);
3227 if (using_class_space()) {
3228 assert(class_space_list() != NULL,
3229 "Class VirtualSpaceList has not been initialized");
3230 assert(chunk_manager_class() != NULL,
3231 "Class ChunkManager has not been initialized");
3233 // Allocate SpaceManager for classes.
3234 _class_vsm = new SpaceManager(ClassType, lock);
3235 if (_class_vsm == NULL) {
3236 return;
3237 }
3238 }
3240 MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
3242 // Allocate chunk for metadata objects
3243 Metachunk* new_chunk = get_initialization_chunk(NonClassType,
3244 word_size,
3245 vsm()->medium_chunk_bunch());
3246 assert(!DumpSharedSpaces || new_chunk != NULL, "should have enough space for both chunks");
3247 if (new_chunk != NULL) {
3248 // Add to this manager's list of chunks in use and current_chunk().
3249 vsm()->add_chunk(new_chunk, true);
3250 }
3252 // Allocate chunk for class metadata objects
3253 if (using_class_space()) {
3254 Metachunk* class_chunk = get_initialization_chunk(ClassType,
3255 class_word_size,
3256 class_vsm()->medium_chunk_bunch());
3257 if (class_chunk != NULL) {
3258 class_vsm()->add_chunk(class_chunk, true);
3259 }
3260 }
3262 _alloc_record_head = NULL;
3263 _alloc_record_tail = NULL;
3264 }
3266 size_t Metaspace::align_word_size_up(size_t word_size) {
3267 size_t byte_size = word_size * wordSize;
3268 return ReservedSpace::allocation_align_size_up(byte_size) / wordSize;
3269 }
3271 MetaWord* Metaspace::allocate(size_t word_size, MetadataType mdtype) {
3272 // DumpSharedSpaces doesn't use class metadata area (yet)
3273 // Also, don't use class_vsm() unless UseCompressedClassPointers is true.
3274 if (is_class_space_allocation(mdtype)) {
3275 return class_vsm()->allocate(word_size);
3276 } else {
3277 return vsm()->allocate(word_size);
3278 }
3279 }
3281 MetaWord* Metaspace::expand_and_allocate(size_t word_size, MetadataType mdtype) {
3282 size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size * BytesPerWord);
3283 assert(delta_bytes > 0, "Must be");
3285 size_t after_inc = MetaspaceGC::inc_capacity_until_GC(delta_bytes);
3287 // capacity_until_GC might be updated concurrently, must calculate previous value.
3288 size_t before_inc = after_inc - delta_bytes;
3290 tracer()->report_gc_threshold(before_inc, after_inc,
3291 MetaspaceGCThresholdUpdater::ExpandAndAllocate);
3292 if (PrintGCDetails && Verbose) {
3293 gclog_or_tty->print_cr("Increase capacity to GC from " SIZE_FORMAT
3294 " to " SIZE_FORMAT, before_inc, after_inc);
3295 }
3297 return allocate(word_size, mdtype);
3298 }
3300 // Space allocated in the Metaspace. This may
3301 // be across several metadata virtual spaces.
3302 char* Metaspace::bottom() const {
3303 assert(DumpSharedSpaces, "only useful and valid for dumping shared spaces");
3304 return (char*)vsm()->current_chunk()->bottom();
3305 }
3307 size_t Metaspace::used_words_slow(MetadataType mdtype) const {
3308 if (mdtype == ClassType) {
3309 return using_class_space() ? class_vsm()->sum_used_in_chunks_in_use() : 0;
3310 } else {
3311 return vsm()->sum_used_in_chunks_in_use(); // includes overhead!
3312 }
3313 }
3315 size_t Metaspace::free_words_slow(MetadataType mdtype) const {
3316 if (mdtype == ClassType) {
3317 return using_class_space() ? class_vsm()->sum_free_in_chunks_in_use() : 0;
3318 } else {
3319 return vsm()->sum_free_in_chunks_in_use();
3320 }
3321 }
3323 // Space capacity in the Metaspace. It includes
3324 // space in the list of chunks from which allocations
3325 // have been made. Don't include space in the global freelist and
3326 // in the space available in the dictionary which
3327 // is already counted in some chunk.
3328 size_t Metaspace::capacity_words_slow(MetadataType mdtype) const {
3329 if (mdtype == ClassType) {
3330 return using_class_space() ? class_vsm()->sum_capacity_in_chunks_in_use() : 0;
3331 } else {
3332 return vsm()->sum_capacity_in_chunks_in_use();
3333 }
3334 }
3336 size_t Metaspace::used_bytes_slow(MetadataType mdtype) const {
3337 return used_words_slow(mdtype) * BytesPerWord;
3338 }
3340 size_t Metaspace::capacity_bytes_slow(MetadataType mdtype) const {
3341 return capacity_words_slow(mdtype) * BytesPerWord;
3342 }
3344 void Metaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) {
3345 if (SafepointSynchronize::is_at_safepoint()) {
3346 assert(Thread::current()->is_VM_thread(), "should be the VM thread");
3347 // Don't take Heap_lock
3348 MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag);
3349 if (word_size < TreeChunk<Metablock, FreeList<Metablock> >::min_size()) {
3350 // Dark matter. Too small for dictionary.
3351 #ifdef ASSERT
3352 Copy::fill_to_words((HeapWord*)ptr, word_size, 0xf5f5f5f5);
3353 #endif
3354 return;
3355 }
3356 if (is_class && using_class_space()) {
3357 class_vsm()->deallocate(ptr, word_size);
3358 } else {
3359 vsm()->deallocate(ptr, word_size);
3360 }
3361 } else {
3362 MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag);
3364 if (word_size < TreeChunk<Metablock, FreeList<Metablock> >::min_size()) {
3365 // Dark matter. Too small for dictionary.
3366 #ifdef ASSERT
3367 Copy::fill_to_words((HeapWord*)ptr, word_size, 0xf5f5f5f5);
3368 #endif
3369 return;
3370 }
3371 if (is_class && using_class_space()) {
3372 class_vsm()->deallocate(ptr, word_size);
3373 } else {
3374 vsm()->deallocate(ptr, word_size);
3375 }
3376 }
3377 }
3380 MetaWord* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
3381 bool read_only, MetaspaceObj::Type type, TRAPS) {
3382 if (HAS_PENDING_EXCEPTION) {
3383 assert(false, "Should not allocate with exception pending");
3384 return NULL; // caller does a CHECK_NULL too
3385 }
3387 assert(loader_data != NULL, "Should never pass around a NULL loader_data. "
3388 "ClassLoaderData::the_null_class_loader_data() should have been used.");
3390 // Allocate in metaspaces without taking out a lock, because it deadlocks
3391 // with the SymbolTable_lock. Dumping is single threaded for now. We'll have
3392 // to revisit this for application class data sharing.
3393 if (DumpSharedSpaces) {
3394 assert(type > MetaspaceObj::UnknownType && type < MetaspaceObj::_number_of_types, "sanity");
3395 Metaspace* space = read_only ? loader_data->ro_metaspace() : loader_data->rw_metaspace();
3396 MetaWord* result = space->allocate(word_size, NonClassType);
3397 if (result == NULL) {
3398 report_out_of_shared_space(read_only ? SharedReadOnly : SharedReadWrite);
3399 }
3401 space->record_allocation(result, type, space->vsm()->get_raw_word_size(word_size));
3403 // Zero initialize.
3404 Copy::fill_to_aligned_words((HeapWord*)result, word_size, 0);
3406 return result;
3407 }
3409 MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType;
3411 // Try to allocate metadata.
3412 MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype);
3414 if (result == NULL) {
3415 tracer()->report_metaspace_allocation_failure(loader_data, word_size, type, mdtype);
3417 // Allocation failed.
3418 if (is_init_completed()) {
3419 // Only start a GC if the bootstrapping has completed.
3421 // Try to clean out some memory and retry.
3422 result = Universe::heap()->collector_policy()->satisfy_failed_metadata_allocation(
3423 loader_data, word_size, mdtype);
3424 }
3425 }
3427 if (result == NULL) {
3428 report_metadata_oome(loader_data, word_size, type, mdtype, CHECK_NULL);
3429 }
3431 // Zero initialize.
3432 Copy::fill_to_aligned_words((HeapWord*)result, word_size, 0);
3434 return result;
3435 }
3437 size_t Metaspace::class_chunk_size(size_t word_size) {
3438 assert(using_class_space(), "Has to use class space");
3439 return class_vsm()->calc_chunk_size(word_size);
3440 }
3442 void Metaspace::report_metadata_oome(ClassLoaderData* loader_data, size_t word_size, MetaspaceObj::Type type, MetadataType mdtype, TRAPS) {
3443 tracer()->report_metadata_oom(loader_data, word_size, type, mdtype);
3445 // If result is still null, we are out of memory.
3446 if (Verbose && TraceMetadataChunkAllocation) {
3447 gclog_or_tty->print_cr("Metaspace allocation failed for size "
3448 SIZE_FORMAT, word_size);
3449 if (loader_data->metaspace_or_null() != NULL) {
3450 loader_data->dump(gclog_or_tty);
3451 }
3452 MetaspaceAux::dump(gclog_or_tty);
3453 }
3455 bool out_of_compressed_class_space = false;
3456 if (is_class_space_allocation(mdtype)) {
3457 Metaspace* metaspace = loader_data->metaspace_non_null();
3458 out_of_compressed_class_space =
3459 MetaspaceAux::committed_bytes(Metaspace::ClassType) +
3460 (metaspace->class_chunk_size(word_size) * BytesPerWord) >
3461 CompressedClassSpaceSize;
3462 }
3464 // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
3465 const char* space_string = out_of_compressed_class_space ?
3466 "Compressed class space" : "Metaspace";
3468 report_java_out_of_memory(space_string);
3470 if (JvmtiExport::should_post_resource_exhausted()) {
3471 JvmtiExport::post_resource_exhausted(
3472 JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR,
3473 space_string);
3474 }
3476 if (!is_init_completed()) {
3477 vm_exit_during_initialization("OutOfMemoryError", space_string);
3478 }
3480 if (out_of_compressed_class_space) {
3481 THROW_OOP(Universe::out_of_memory_error_class_metaspace());
3482 } else {
3483 THROW_OOP(Universe::out_of_memory_error_metaspace());
3484 }
3485 }
3487 const char* Metaspace::metadata_type_name(Metaspace::MetadataType mdtype) {
3488 switch (mdtype) {
3489 case Metaspace::ClassType: return "Class";
3490 case Metaspace::NonClassType: return "Metadata";
3491 default:
3492 assert(false, err_msg("Got bad mdtype: %d", (int) mdtype));
3493 return NULL;
3494 }
3495 }
3497 void Metaspace::record_allocation(void* ptr, MetaspaceObj::Type type, size_t word_size) {
3498 assert(DumpSharedSpaces, "sanity");
3500 AllocRecord *rec = new AllocRecord((address)ptr, type, (int)word_size * HeapWordSize);
3501 if (_alloc_record_head == NULL) {
3502 _alloc_record_head = _alloc_record_tail = rec;
3503 } else {
3504 _alloc_record_tail->_next = rec;
3505 _alloc_record_tail = rec;
3506 }
3507 }
3509 void Metaspace::iterate(Metaspace::AllocRecordClosure *closure) {
3510 assert(DumpSharedSpaces, "unimplemented for !DumpSharedSpaces");
3512 address last_addr = (address)bottom();
3514 for (AllocRecord *rec = _alloc_record_head; rec; rec = rec->_next) {
3515 address ptr = rec->_ptr;
3516 if (last_addr < ptr) {
3517 closure->doit(last_addr, MetaspaceObj::UnknownType, ptr - last_addr);
3518 }
3519 closure->doit(ptr, rec->_type, rec->_byte_size);
3520 last_addr = ptr + rec->_byte_size;
3521 }
3523 address top = ((address)bottom()) + used_bytes_slow(Metaspace::NonClassType);
3524 if (last_addr < top) {
3525 closure->doit(last_addr, MetaspaceObj::UnknownType, top - last_addr);
3526 }
3527 }
3529 void Metaspace::purge(MetadataType mdtype) {
3530 get_space_list(mdtype)->purge(get_chunk_manager(mdtype));
3531 }
3533 void Metaspace::purge() {
3534 MutexLockerEx cl(SpaceManager::expand_lock(),
3535 Mutex::_no_safepoint_check_flag);
3536 purge(NonClassType);
3537 if (using_class_space()) {
3538 purge(ClassType);
3539 }
3540 }
3542 void Metaspace::print_on(outputStream* out) const {
3543 // Print both class virtual space counts and metaspace.
3544 if (Verbose) {
3545 vsm()->print_on(out);
3546 if (using_class_space()) {
3547 class_vsm()->print_on(out);
3548 }
3549 }
3550 }
3552 bool Metaspace::contains(const void* ptr) {
3553 if (vsm()->contains(ptr)) return true;
3554 if (using_class_space()) {
3555 return class_vsm()->contains(ptr);
3556 }
3557 return false;
3558 }
3560 void Metaspace::verify() {
3561 vsm()->verify();
3562 if (using_class_space()) {
3563 class_vsm()->verify();
3564 }
3565 }
3567 void Metaspace::dump(outputStream* const out) const {
3568 out->print_cr("\nVirtual space manager: " INTPTR_FORMAT, vsm());
3569 vsm()->dump(out);
3570 if (using_class_space()) {
3571 out->print_cr("\nClass space manager: " INTPTR_FORMAT, class_vsm());
3572 class_vsm()->dump(out);
3573 }
3574 }
3576 /////////////// Unit tests ///////////////
3578 #ifndef PRODUCT
3580 class TestMetaspaceAuxTest : AllStatic {
3581 public:
3582 static void test_reserved() {
3583 size_t reserved = MetaspaceAux::reserved_bytes();
3585 assert(reserved > 0, "assert");
3587 size_t committed = MetaspaceAux::committed_bytes();
3588 assert(committed <= reserved, "assert");
3590 size_t reserved_metadata = MetaspaceAux::reserved_bytes(Metaspace::NonClassType);
3591 assert(reserved_metadata > 0, "assert");
3592 assert(reserved_metadata <= reserved, "assert");
3594 if (UseCompressedClassPointers) {
3595 size_t reserved_class = MetaspaceAux::reserved_bytes(Metaspace::ClassType);
3596 assert(reserved_class > 0, "assert");
3597 assert(reserved_class < reserved, "assert");
3598 }
3599 }
3601 static void test_committed() {
3602 size_t committed = MetaspaceAux::committed_bytes();
3604 assert(committed > 0, "assert");
3606 size_t reserved = MetaspaceAux::reserved_bytes();
3607 assert(committed <= reserved, "assert");
3609 size_t committed_metadata = MetaspaceAux::committed_bytes(Metaspace::NonClassType);
3610 assert(committed_metadata > 0, "assert");
3611 assert(committed_metadata <= committed, "assert");
3613 if (UseCompressedClassPointers) {
3614 size_t committed_class = MetaspaceAux::committed_bytes(Metaspace::ClassType);
3615 assert(committed_class > 0, "assert");
3616 assert(committed_class < committed, "assert");
3617 }
3618 }
3620 static void test_virtual_space_list_large_chunk() {
3621 VirtualSpaceList* vs_list = new VirtualSpaceList(os::vm_allocation_granularity());
3622 MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
3623 // A size larger than VirtualSpaceSize (256k) and add one page to make it _not_ be
3624 // vm_allocation_granularity aligned on Windows.
3625 size_t large_size = (size_t)(2*256*K + (os::vm_page_size()/BytesPerWord));
3626 large_size += (os::vm_page_size()/BytesPerWord);
3627 vs_list->get_new_chunk(large_size, large_size, 0);
3628 }
3630 static void test() {
3631 test_reserved();
3632 test_committed();
3633 test_virtual_space_list_large_chunk();
3634 }
3635 };
3637 void TestMetaspaceAux_test() {
3638 TestMetaspaceAuxTest::test();
3639 }
3641 class TestVirtualSpaceNodeTest {
3642 static void chunk_up(size_t words_left, size_t& num_medium_chunks,
3643 size_t& num_small_chunks,
3644 size_t& num_specialized_chunks) {
3645 num_medium_chunks = words_left / MediumChunk;
3646 words_left = words_left % MediumChunk;
3648 num_small_chunks = words_left / SmallChunk;
3649 words_left = words_left % SmallChunk;
3650 // how many specialized chunks can we get?
3651 num_specialized_chunks = words_left / SpecializedChunk;
3652 assert(words_left % SpecializedChunk == 0, "should be nothing left");
3653 }
3655 public:
3656 static void test() {
3657 MutexLockerEx ml(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
3658 const size_t vsn_test_size_words = MediumChunk * 4;
3659 const size_t vsn_test_size_bytes = vsn_test_size_words * BytesPerWord;
3661 // The chunk sizes must be multiples of eachother, or this will fail
3662 STATIC_ASSERT(MediumChunk % SmallChunk == 0);
3663 STATIC_ASSERT(SmallChunk % SpecializedChunk == 0);
3665 { // No committed memory in VSN
3666 ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk);
3667 VirtualSpaceNode vsn(vsn_test_size_bytes);
3668 vsn.initialize();
3669 vsn.retire(&cm);
3670 assert(cm.sum_free_chunks_count() == 0, "did not commit any memory in the VSN");
3671 }
3673 { // All of VSN is committed, half is used by chunks
3674 ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk);
3675 VirtualSpaceNode vsn(vsn_test_size_bytes);
3676 vsn.initialize();
3677 vsn.expand_by(vsn_test_size_words, vsn_test_size_words);
3678 vsn.get_chunk_vs(MediumChunk);
3679 vsn.get_chunk_vs(MediumChunk);
3680 vsn.retire(&cm);
3681 assert(cm.sum_free_chunks_count() == 2, "should have been memory left for 2 medium chunks");
3682 assert(cm.sum_free_chunks() == 2*MediumChunk, "sizes should add up");
3683 }
3685 { // 4 pages of VSN is committed, some is used by chunks
3686 ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk);
3687 VirtualSpaceNode vsn(vsn_test_size_bytes);
3688 const size_t page_chunks = 4 * (size_t)os::vm_page_size() / BytesPerWord;
3689 assert(page_chunks < MediumChunk, "Test expects medium chunks to be at least 4*page_size");
3690 vsn.initialize();
3691 vsn.expand_by(page_chunks, page_chunks);
3692 vsn.get_chunk_vs(SmallChunk);
3693 vsn.get_chunk_vs(SpecializedChunk);
3694 vsn.retire(&cm);
3696 // committed - used = words left to retire
3697 const size_t words_left = page_chunks - SmallChunk - SpecializedChunk;
3699 size_t num_medium_chunks, num_small_chunks, num_spec_chunks;
3700 chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks);
3702 assert(num_medium_chunks == 0, "should not get any medium chunks");
3703 assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks");
3704 assert(cm.sum_free_chunks() == words_left, "sizes should add up");
3705 }
3707 { // Half of VSN is committed, a humongous chunk is used
3708 ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk);
3709 VirtualSpaceNode vsn(vsn_test_size_bytes);
3710 vsn.initialize();
3711 vsn.expand_by(MediumChunk * 2, MediumChunk * 2);
3712 vsn.get_chunk_vs(MediumChunk + SpecializedChunk); // Humongous chunks will be aligned up to MediumChunk + SpecializedChunk
3713 vsn.retire(&cm);
3715 const size_t words_left = MediumChunk * 2 - (MediumChunk + SpecializedChunk);
3716 size_t num_medium_chunks, num_small_chunks, num_spec_chunks;
3717 chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks);
3719 assert(num_medium_chunks == 0, "should not get any medium chunks");
3720 assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks");
3721 assert(cm.sum_free_chunks() == words_left, "sizes should add up");
3722 }
3724 }
3726 #define assert_is_available_positive(word_size) \
3727 assert(vsn.is_available(word_size), \
3728 err_msg(#word_size ": " PTR_FORMAT " bytes were not available in " \
3729 "VirtualSpaceNode [" PTR_FORMAT ", " PTR_FORMAT ")", \
3730 (uintptr_t)(word_size * BytesPerWord), vsn.bottom(), vsn.end()));
3732 #define assert_is_available_negative(word_size) \
3733 assert(!vsn.is_available(word_size), \
3734 err_msg(#word_size ": " PTR_FORMAT " bytes should not be available in " \
3735 "VirtualSpaceNode [" PTR_FORMAT ", " PTR_FORMAT ")", \
3736 (uintptr_t)(word_size * BytesPerWord), vsn.bottom(), vsn.end()));
3738 static void test_is_available_positive() {
3739 // Reserve some memory.
3740 VirtualSpaceNode vsn(os::vm_allocation_granularity());
3741 assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
3743 // Commit some memory.
3744 size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
3745 bool expanded = vsn.expand_by(commit_word_size, commit_word_size);
3746 assert(expanded, "Failed to commit");
3748 // Check that is_available accepts the committed size.
3749 assert_is_available_positive(commit_word_size);
3751 // Check that is_available accepts half the committed size.
3752 size_t expand_word_size = commit_word_size / 2;
3753 assert_is_available_positive(expand_word_size);
3754 }
3756 static void test_is_available_negative() {
3757 // Reserve some memory.
3758 VirtualSpaceNode vsn(os::vm_allocation_granularity());
3759 assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
3761 // Commit some memory.
3762 size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
3763 bool expanded = vsn.expand_by(commit_word_size, commit_word_size);
3764 assert(expanded, "Failed to commit");
3766 // Check that is_available doesn't accept a too large size.
3767 size_t two_times_commit_word_size = commit_word_size * 2;
3768 assert_is_available_negative(two_times_commit_word_size);
3769 }
3771 static void test_is_available_overflow() {
3772 // Reserve some memory.
3773 VirtualSpaceNode vsn(os::vm_allocation_granularity());
3774 assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
3776 // Commit some memory.
3777 size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
3778 bool expanded = vsn.expand_by(commit_word_size, commit_word_size);
3779 assert(expanded, "Failed to commit");
3781 // Calculate a size that will overflow the virtual space size.
3782 void* virtual_space_max = (void*)(uintptr_t)-1;
3783 size_t bottom_to_max = pointer_delta(virtual_space_max, vsn.bottom(), 1);
3784 size_t overflow_size = bottom_to_max + BytesPerWord;
3785 size_t overflow_word_size = overflow_size / BytesPerWord;
3787 // Check that is_available can handle the overflow.
3788 assert_is_available_negative(overflow_word_size);
3789 }
3791 static void test_is_available() {
3792 TestVirtualSpaceNodeTest::test_is_available_positive();
3793 TestVirtualSpaceNodeTest::test_is_available_negative();
3794 TestVirtualSpaceNodeTest::test_is_available_overflow();
3795 }
3796 };
3798 void TestVirtualSpaceNode_test() {
3799 TestVirtualSpaceNodeTest::test();
3800 TestVirtualSpaceNodeTest::test_is_available();
3801 }
3803 #endif