Wed, 28 Nov 2012 17:50:21 -0500
8003635: NPG: AsynchGetCallTrace broken by Method* virtual call
Summary: Make metaspace::contains be lock free and used to see if something is in metaspace, also compare Method* with vtbl pointer.
Reviewed-by: dholmes, sspitsyn, dcubed, jmasa
1 /*
2 * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24 #include "precompiled.hpp"
25 #include "gc_interface/collectedHeap.hpp"
26 #include "memory/binaryTreeDictionary.hpp"
27 #include "memory/freeList.hpp"
28 #include "memory/collectorPolicy.hpp"
29 #include "memory/filemap.hpp"
30 #include "memory/freeList.hpp"
31 #include "memory/metablock.hpp"
32 #include "memory/metachunk.hpp"
33 #include "memory/metaspace.hpp"
34 #include "memory/metaspaceShared.hpp"
35 #include "memory/resourceArea.hpp"
36 #include "memory/universe.hpp"
37 #include "runtime/globals.hpp"
38 #include "runtime/mutex.hpp"
39 #include "runtime/orderAccess.hpp"
40 #include "services/memTracker.hpp"
41 #include "utilities/copy.hpp"
42 #include "utilities/debug.hpp"
44 typedef BinaryTreeDictionary<Metablock, FreeList> BlockTreeDictionary;
45 typedef BinaryTreeDictionary<Metachunk, FreeList> ChunkTreeDictionary;
46 // Define this macro to enable slow integrity checking of
47 // the free chunk lists
48 const bool metaspace_slow_verify = false;
51 // Parameters for stress mode testing
52 const uint metadata_deallocate_a_lot_block = 10;
53 const uint metadata_deallocate_a_lock_chunk = 3;
54 size_t const allocation_from_dictionary_limit = 64 * K;
55 const size_t metadata_chunk_initialize = 0xf7f7f7f7;
56 const size_t metadata_deallocate = 0xf5f5f5f5;
58 MetaWord* last_allocated = 0;
60 // Used in declarations in SpaceManager and ChunkManager
61 enum ChunkIndex {
62 SmallIndex = 0,
63 MediumIndex = 1,
64 HumongousIndex = 2,
65 NumberOfFreeLists = 2,
66 NumberOfInUseLists = 3
67 };
69 static ChunkIndex next_chunk_index(ChunkIndex i) {
70 assert(i < NumberOfInUseLists, "Out of bound");
71 return (ChunkIndex) (i+1);
72 }
74 // Originally _capacity_until_GC was set to MetaspaceSize here but
75 // the default MetaspaceSize before argument processing was being
76 // used which was not the desired value. See the code
77 // in should_expand() to see how the initialization is handled
78 // now.
79 size_t MetaspaceGC::_capacity_until_GC = 0;
80 bool MetaspaceGC::_expand_after_GC = false;
81 uint MetaspaceGC::_shrink_factor = 0;
82 bool MetaspaceGC::_should_concurrent_collect = false;
84 // Blocks of space for metadata are allocated out of Metachunks.
85 //
86 // Metachunk are allocated out of MetadataVirtualspaces and once
87 // allocated there is no explicit link between a Metachunk and
88 // the MetadataVirtualspaces from which it was allocated.
89 //
90 // Each SpaceManager maintains a
91 // list of the chunks it is using and the current chunk. The current
92 // chunk is the chunk from which allocations are done. Space freed in
93 // a chunk is placed on the free list of blocks (BlockFreelist) and
94 // reused from there.
95 //
96 // Future modification
97 //
98 // The Metachunk can conceivable be replaced by the Chunk in
99 // allocation.hpp. Note that the latter Chunk is the space for
100 // allocation (allocations from the chunk are out of the space in
101 // the Chunk after the header for the Chunk) where as Metachunks
102 // point to space in a VirtualSpace. To replace Metachunks with
103 // Chunks, change Chunks so that they can be allocated out of a VirtualSpace.
104 size_t Metablock::_min_block_byte_size = sizeof(Metablock);
105 #ifdef ASSERT
106 size_t Metablock::_overhead =
107 Chunk::aligned_overhead_size(sizeof(Metablock)) / BytesPerWord;
108 #else
109 size_t Metablock::_overhead = 0;
110 #endif
113 // Pointer to list of Metachunks.
114 class ChunkList VALUE_OBJ_CLASS_SPEC {
115 // List of free chunks
116 Metachunk* _head;
118 public:
119 // Constructor
120 ChunkList() : _head(NULL) {}
122 // Accessors
123 Metachunk* head() { return _head; }
124 void set_head(Metachunk* v) { _head = v; }
126 // Link at head of the list
127 void add_at_head(Metachunk* head, Metachunk* tail);
128 void add_at_head(Metachunk* head);
130 size_t sum_list_size();
131 size_t sum_list_count();
132 size_t sum_list_capacity();
133 };
135 // Manages the global free lists of chunks.
136 // Has three lists of free chunks, and a total size and
137 // count that includes all three
139 class ChunkManager VALUE_OBJ_CLASS_SPEC {
141 // Free list of chunks of different sizes.
142 // SmallChunk
143 // MediumChunk
144 // HumongousChunk
145 ChunkList _free_chunks[NumberOfFreeLists];
147 // HumongousChunk
148 ChunkTreeDictionary _humongous_dictionary;
150 // ChunkManager in all lists of this type
151 size_t _free_chunks_total;
152 size_t _free_chunks_count;
154 void dec_free_chunks_total(size_t v) {
155 assert(_free_chunks_count > 0 &&
156 _free_chunks_total > 0,
157 "About to go negative");
158 Atomic::add_ptr(-1, &_free_chunks_count);
159 jlong minus_v = (jlong) - (jlong) v;
160 Atomic::add_ptr(minus_v, &_free_chunks_total);
161 }
163 // Debug support
165 size_t sum_free_chunks();
166 size_t sum_free_chunks_count();
168 void locked_verify_free_chunks_total();
169 void slow_locked_verify_free_chunks_total() {
170 if (metaspace_slow_verify) {
171 locked_verify_free_chunks_total();
172 }
173 }
174 void locked_verify_free_chunks_count();
175 void slow_locked_verify_free_chunks_count() {
176 if (metaspace_slow_verify) {
177 locked_verify_free_chunks_count();
178 }
179 }
180 void verify_free_chunks_count();
182 public:
184 ChunkManager() : _free_chunks_total(0), _free_chunks_count(0) {}
186 // add or delete (return) a chunk to the global freelist.
187 Metachunk* chunk_freelist_allocate(size_t word_size);
188 void chunk_freelist_deallocate(Metachunk* chunk);
190 // Total of the space in the free chunks list
191 size_t free_chunks_total();
192 size_t free_chunks_total_in_bytes();
194 // Number of chunks in the free chunks list
195 size_t free_chunks_count();
197 void inc_free_chunks_total(size_t v, size_t count = 1) {
198 Atomic::add_ptr(count, &_free_chunks_count);
199 Atomic::add_ptr(v, &_free_chunks_total);
200 }
201 ChunkList* free_medium_chunks() { return &_free_chunks[1]; }
202 ChunkList* free_small_chunks() { return &_free_chunks[0]; }
203 ChunkTreeDictionary* humongous_dictionary() {
204 return &_humongous_dictionary;
205 }
207 ChunkList* free_chunks(ChunkIndex index);
209 // Returns the list for the given chunk word size.
210 ChunkList* find_free_chunks_list(size_t word_size);
212 // Add and remove from a list by size. Selects
213 // list based on size of chunk.
214 void free_chunks_put(Metachunk* chuck);
215 Metachunk* free_chunks_get(size_t chunk_word_size);
217 // Debug support
218 void verify();
219 void slow_verify() {
220 if (metaspace_slow_verify) {
221 verify();
222 }
223 }
224 void locked_verify();
225 void slow_locked_verify() {
226 if (metaspace_slow_verify) {
227 locked_verify();
228 }
229 }
230 void verify_free_chunks_total();
232 void locked_print_free_chunks(outputStream* st);
233 void locked_print_sum_free_chunks(outputStream* st);
235 void print_on(outputStream* st);
236 };
239 // Used to manage the free list of Metablocks (a block corresponds
240 // to the allocation of a quantum of metadata).
241 class BlockFreelist VALUE_OBJ_CLASS_SPEC {
242 BlockTreeDictionary* _dictionary;
243 static Metablock* initialize_free_chunk(MetaWord* p, size_t word_size);
245 // Accessors
246 BlockTreeDictionary* dictionary() const { return _dictionary; }
248 public:
249 BlockFreelist();
250 ~BlockFreelist();
252 // Get and return a block to the free list
253 MetaWord* get_block(size_t word_size);
254 void return_block(MetaWord* p, size_t word_size);
256 size_t total_size() {
257 if (dictionary() == NULL) {
258 return 0;
259 } else {
260 return dictionary()->total_size();
261 }
262 }
264 void print_on(outputStream* st) const;
265 };
267 class VirtualSpaceNode : public CHeapObj<mtClass> {
268 friend class VirtualSpaceList;
270 // Link to next VirtualSpaceNode
271 VirtualSpaceNode* _next;
273 // total in the VirtualSpace
274 MemRegion _reserved;
275 ReservedSpace _rs;
276 VirtualSpace _virtual_space;
277 MetaWord* _top;
279 // Convenience functions for logical bottom and end
280 MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); }
281 MetaWord* end() const { return (MetaWord*) _virtual_space.high(); }
283 // Convenience functions to access the _virtual_space
284 char* low() const { return virtual_space()->low(); }
285 char* high() const { return virtual_space()->high(); }
287 public:
289 VirtualSpaceNode(size_t byte_size);
290 VirtualSpaceNode(ReservedSpace rs) : _top(NULL), _next(NULL), _rs(rs) {}
291 ~VirtualSpaceNode();
293 // address of next available space in _virtual_space;
294 // Accessors
295 VirtualSpaceNode* next() { return _next; }
296 void set_next(VirtualSpaceNode* v) { _next = v; }
298 void set_reserved(MemRegion const v) { _reserved = v; }
299 void set_top(MetaWord* v) { _top = v; }
301 // Accessors
302 MemRegion* reserved() { return &_reserved; }
303 VirtualSpace* virtual_space() const { return (VirtualSpace*) &_virtual_space; }
305 // Returns true if "word_size" is available in the virtual space
306 bool is_available(size_t word_size) { return _top + word_size <= end(); }
308 MetaWord* top() const { return _top; }
309 void inc_top(size_t word_size) { _top += word_size; }
311 // used and capacity in this single entry in the list
312 size_t used_words_in_vs() const;
313 size_t capacity_words_in_vs() const;
315 bool initialize();
317 // get space from the virtual space
318 Metachunk* take_from_committed(size_t chunk_word_size);
320 // Allocate a chunk from the virtual space and return it.
321 Metachunk* get_chunk_vs(size_t chunk_word_size);
322 Metachunk* get_chunk_vs_with_expand(size_t chunk_word_size);
324 // Expands/shrinks the committed space in a virtual space. Delegates
325 // to Virtualspace
326 bool expand_by(size_t words, bool pre_touch = false);
327 bool shrink_by(size_t words);
329 // Debug support
330 static void verify_virtual_space_total();
331 static void verify_virtual_space_count();
332 void mangle();
334 void print_on(outputStream* st) const;
335 };
337 // byte_size is the size of the associated virtualspace.
338 VirtualSpaceNode::VirtualSpaceNode(size_t byte_size) : _top(NULL), _next(NULL), _rs(0) {
339 // This allocates memory with mmap. For DumpSharedspaces, allocate the
340 // space at low memory so that other shared images don't conflict.
341 // This is the same address as memory needed for UseCompressedOops but
342 // compressed oops don't work with CDS (offsets in metadata are wrong), so
343 // borrow the same address.
344 if (DumpSharedSpaces) {
345 char* shared_base = (char*)HeapBaseMinAddress;
346 _rs = ReservedSpace(byte_size, 0, false, shared_base, 0);
347 if (_rs.is_reserved()) {
348 assert(_rs.base() == shared_base, "should match");
349 } else {
350 // If we are dumping the heap, then allocate a wasted block of address
351 // space in order to push the heap to a lower address. This extra
352 // address range allows for other (or larger) libraries to be loaded
353 // without them occupying the space required for the shared spaces.
354 uintx reserved = 0;
355 uintx block_size = 64*1024*1024;
356 while (reserved < SharedDummyBlockSize) {
357 char* dummy = os::reserve_memory(block_size);
358 reserved += block_size;
359 }
360 _rs = ReservedSpace(byte_size);
361 }
362 MetaspaceShared::set_shared_rs(&_rs);
363 } else {
364 _rs = ReservedSpace(byte_size);
365 }
367 MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass);
368 }
370 // List of VirtualSpaces for metadata allocation.
371 // It has a _next link for singly linked list and a MemRegion
372 // for total space in the VirtualSpace.
373 class VirtualSpaceList : public CHeapObj<mtClass> {
374 friend class VirtualSpaceNode;
376 enum VirtualSpaceSizes {
377 VirtualSpaceSize = 256 * K
378 };
380 // Global list of virtual spaces
381 // Head of the list
382 VirtualSpaceNode* _virtual_space_list;
383 // virtual space currently being used for allocations
384 VirtualSpaceNode* _current_virtual_space;
385 // Free chunk list for all other metadata
386 ChunkManager _chunk_manager;
388 // Can this virtual list allocate >1 spaces? Also, used to determine
389 // whether to allocate unlimited small chunks in this virtual space
390 bool _is_class;
391 bool can_grow() const { return !is_class() || !UseCompressedKlassPointers; }
393 // Sum of space in all virtual spaces and number of virtual spaces
394 size_t _virtual_space_total;
395 size_t _virtual_space_count;
397 ~VirtualSpaceList();
399 VirtualSpaceNode* virtual_space_list() const { return _virtual_space_list; }
401 void set_virtual_space_list(VirtualSpaceNode* v) {
402 _virtual_space_list = v;
403 }
404 void set_current_virtual_space(VirtualSpaceNode* v) {
405 _current_virtual_space = v;
406 }
408 void link_vs(VirtualSpaceNode* new_entry, size_t vs_word_size);
410 // Get another virtual space and add it to the list. This
411 // is typically prompted by a failed attempt to allocate a chunk
412 // and is typically followed by the allocation of a chunk.
413 bool grow_vs(size_t vs_word_size);
415 public:
416 VirtualSpaceList(size_t word_size);
417 VirtualSpaceList(ReservedSpace rs);
419 Metachunk* get_new_chunk(size_t word_size, size_t grow_chunks_by_words);
421 VirtualSpaceNode* current_virtual_space() {
422 return _current_virtual_space;
423 }
425 ChunkManager* chunk_manager() { return &_chunk_manager; }
426 bool is_class() const { return _is_class; }
428 // Allocate the first virtualspace.
429 void initialize(size_t word_size);
431 size_t virtual_space_total() { return _virtual_space_total; }
432 void inc_virtual_space_total(size_t v) {
433 Atomic::add_ptr(v, &_virtual_space_total);
434 }
436 size_t virtual_space_count() { return _virtual_space_count; }
437 void inc_virtual_space_count() {
438 Atomic::inc_ptr(&_virtual_space_count);
439 }
441 // Used and capacity in the entire list of virtual spaces.
442 // These are global values shared by all Metaspaces
443 size_t capacity_words_sum();
444 size_t capacity_bytes_sum() { return capacity_words_sum() * BytesPerWord; }
445 size_t used_words_sum();
446 size_t used_bytes_sum() { return used_words_sum() * BytesPerWord; }
448 bool contains(const void *ptr);
450 void print_on(outputStream* st) const;
452 class VirtualSpaceListIterator : public StackObj {
453 VirtualSpaceNode* _virtual_spaces;
454 public:
455 VirtualSpaceListIterator(VirtualSpaceNode* virtual_spaces) :
456 _virtual_spaces(virtual_spaces) {}
458 bool repeat() {
459 return _virtual_spaces != NULL;
460 }
462 VirtualSpaceNode* get_next() {
463 VirtualSpaceNode* result = _virtual_spaces;
464 if (_virtual_spaces != NULL) {
465 _virtual_spaces = _virtual_spaces->next();
466 }
467 return result;
468 }
469 };
470 };
472 class Metadebug : AllStatic {
473 // Debugging support for Metaspaces
474 static int _deallocate_block_a_lot_count;
475 static int _deallocate_chunk_a_lot_count;
476 static int _allocation_fail_alot_count;
478 public:
479 static int deallocate_block_a_lot_count() {
480 return _deallocate_block_a_lot_count;
481 }
482 static void set_deallocate_block_a_lot_count(int v) {
483 _deallocate_block_a_lot_count = v;
484 }
485 static void inc_deallocate_block_a_lot_count() {
486 _deallocate_block_a_lot_count++;
487 }
488 static int deallocate_chunk_a_lot_count() {
489 return _deallocate_chunk_a_lot_count;
490 }
491 static void reset_deallocate_chunk_a_lot_count() {
492 _deallocate_chunk_a_lot_count = 1;
493 }
494 static void inc_deallocate_chunk_a_lot_count() {
495 _deallocate_chunk_a_lot_count++;
496 }
498 static void init_allocation_fail_alot_count();
499 #ifdef ASSERT
500 static bool test_metadata_failure();
501 #endif
503 static void deallocate_chunk_a_lot(SpaceManager* sm,
504 size_t chunk_word_size);
505 static void deallocate_block_a_lot(SpaceManager* sm,
506 size_t chunk_word_size);
508 };
510 int Metadebug::_deallocate_block_a_lot_count = 0;
511 int Metadebug::_deallocate_chunk_a_lot_count = 0;
512 int Metadebug::_allocation_fail_alot_count = 0;
514 // SpaceManager - used by Metaspace to handle allocations
515 class SpaceManager : public CHeapObj<mtClass> {
516 friend class Metaspace;
517 friend class Metadebug;
519 private:
520 // protects allocations and contains.
521 Mutex* const _lock;
523 // List of chunks in use by this SpaceManager. Allocations
524 // are done from the current chunk. The list is used for deallocating
525 // chunks when the SpaceManager is freed.
526 Metachunk* _chunks_in_use[NumberOfInUseLists];
527 Metachunk* _current_chunk;
529 // Virtual space where allocation comes from.
530 VirtualSpaceList* _vs_list;
532 // Number of small chunks to allocate to a manager
533 // If class space manager, small chunks are unlimited
534 static uint const _small_chunk_limit;
535 bool has_small_chunk_limit() { return !vs_list()->is_class(); }
537 // Sum of all space in allocated chunks
538 size_t _allocation_total;
540 // Free lists of blocks are per SpaceManager since they
541 // are assumed to be in chunks in use by the SpaceManager
542 // and all chunks in use by a SpaceManager are freed when
543 // the class loader using the SpaceManager is collected.
544 BlockFreelist _block_freelists;
546 // protects virtualspace and chunk expansions
547 static const char* _expand_lock_name;
548 static const int _expand_lock_rank;
549 static Mutex* const _expand_lock;
551 // Accessors
552 Metachunk* chunks_in_use(ChunkIndex index) const { return _chunks_in_use[index]; }
553 void set_chunks_in_use(ChunkIndex index, Metachunk* v) { _chunks_in_use[index] = v; }
555 BlockFreelist* block_freelists() const {
556 return (BlockFreelist*) &_block_freelists;
557 }
559 VirtualSpaceList* vs_list() const { return _vs_list; }
561 Metachunk* current_chunk() const { return _current_chunk; }
562 void set_current_chunk(Metachunk* v) {
563 _current_chunk = v;
564 }
566 Metachunk* find_current_chunk(size_t word_size);
568 // Add chunk to the list of chunks in use
569 void add_chunk(Metachunk* v, bool make_current);
571 Mutex* lock() const { return _lock; }
573 public:
574 SpaceManager(Mutex* lock, VirtualSpaceList* vs_list);
575 ~SpaceManager();
577 enum ChunkSizes { // in words.
578 SmallChunk = 512,
579 MediumChunk = 8 * K,
580 MediumChunkBunch = 4 * MediumChunk
581 };
583 // Accessors
584 size_t allocation_total() const { return _allocation_total; }
585 void inc_allocation_total(size_t v) { Atomic::add_ptr(v, &_allocation_total); }
586 static bool is_humongous(size_t word_size) { return word_size > MediumChunk; }
588 static Mutex* expand_lock() { return _expand_lock; }
590 size_t sum_capacity_in_chunks_in_use() const;
591 size_t sum_used_in_chunks_in_use() const;
592 size_t sum_free_in_chunks_in_use() const;
593 size_t sum_waste_in_chunks_in_use() const;
594 size_t sum_waste_in_chunks_in_use(ChunkIndex index ) const;
596 size_t sum_count_in_chunks_in_use();
597 size_t sum_count_in_chunks_in_use(ChunkIndex i);
599 // Block allocation and deallocation.
600 // Allocates a block from the current chunk
601 MetaWord* allocate(size_t word_size);
603 // Helper for allocations
604 MetaWord* allocate_work(size_t word_size);
606 // Returns a block to the per manager freelist
607 void deallocate(MetaWord* p, size_t word_size);
609 // Based on the allocation size and a minimum chunk size,
610 // returned chunk size (for expanding space for chunk allocation).
611 size_t calc_chunk_size(size_t allocation_word_size);
613 // Called when an allocation from the current chunk fails.
614 // Gets a new chunk (may require getting a new virtual space),
615 // and allocates from that chunk.
616 MetaWord* grow_and_allocate(size_t word_size);
618 // debugging support.
620 void dump(outputStream* const out) const;
621 void print_on(outputStream* st) const;
622 void locked_print_chunks_in_use_on(outputStream* st) const;
624 void verify();
625 #ifdef ASSERT
626 void mangle_freed_chunks();
627 void verify_allocation_total();
628 #endif
629 };
631 uint const SpaceManager::_small_chunk_limit = 4;
635 const char* SpaceManager::_expand_lock_name =
636 "SpaceManager chunk allocation lock";
637 const int SpaceManager::_expand_lock_rank = Monitor::leaf - 1;
638 Mutex* const SpaceManager::_expand_lock =
639 new Mutex(SpaceManager::_expand_lock_rank,
640 SpaceManager::_expand_lock_name,
641 Mutex::_allow_vm_block_flag);
643 size_t Metachunk::_overhead =
644 Chunk::aligned_overhead_size(sizeof(Metachunk)) / BytesPerWord;
646 // New blocks returned by the Metaspace are zero initialized.
647 // We should fix the constructors to not assume this instead.
648 Metablock* Metablock::initialize(MetaWord* p, size_t word_size) {
649 if (p == NULL) {
650 return NULL;
651 }
653 Metablock* result = (Metablock*) p;
655 // Clear the memory
656 Copy::fill_to_aligned_words((HeapWord*)result, word_size);
657 #ifdef ASSERT
658 result->set_word_size(word_size);
659 #endif
660 return result;
661 }
663 // Metachunk methods
665 Metachunk* Metachunk::initialize(MetaWord* ptr, size_t word_size) {
666 // Set bottom, top, and end. Allow space for the Metachunk itself
667 Metachunk* chunk = (Metachunk*) ptr;
669 MetaWord* chunk_bottom = ptr + _overhead;
670 chunk->set_bottom(ptr);
671 chunk->set_top(chunk_bottom);
672 MetaWord* chunk_end = ptr + word_size;
673 assert(chunk_end > chunk_bottom, "Chunk must be too small");
674 chunk->set_end(chunk_end);
675 chunk->set_next(NULL);
676 chunk->set_word_size(word_size);
677 #ifdef ASSERT
678 size_t data_word_size = pointer_delta(chunk_end, chunk_bottom, sizeof(MetaWord));
679 Copy::fill_to_words((HeapWord*) chunk_bottom, data_word_size, metadata_chunk_initialize);
680 #endif
681 return chunk;
682 }
685 MetaWord* Metachunk::allocate(size_t word_size) {
686 MetaWord* result = NULL;
687 // If available, bump the pointer to allocate.
688 if (free_word_size() >= word_size) {
689 result = _top;
690 _top = _top + word_size;
691 }
692 return result;
693 }
695 // _bottom points to the start of the chunk including the overhead.
696 size_t Metachunk::used_word_size() {
697 return pointer_delta(_top, _bottom, sizeof(MetaWord));
698 }
700 size_t Metachunk::free_word_size() {
701 return pointer_delta(_end, _top, sizeof(MetaWord));
702 }
704 size_t Metachunk::capacity_word_size() {
705 return pointer_delta(_end, _bottom, sizeof(MetaWord));
706 }
708 void Metachunk::print_on(outputStream* st) const {
709 st->print_cr("Metachunk:"
710 " bottom " PTR_FORMAT " top " PTR_FORMAT
711 " end " PTR_FORMAT " size " SIZE_FORMAT,
712 bottom(), top(), end(), word_size());
713 }
715 #ifdef ASSERT
716 void Metachunk::mangle() {
717 // Mangle the payload of the chunk and not the links that
718 // maintain list of chunks.
719 HeapWord* start = (HeapWord*)(bottom() + overhead());
720 size_t word_size = capacity_word_size() - overhead();
721 Copy::fill_to_words(start, word_size, metadata_chunk_initialize);
722 }
723 #endif // ASSERT
725 void Metachunk::verify() {
726 #ifdef ASSERT
727 // Cannot walk through the blocks unless the blocks have
728 // headers with sizes.
729 assert(_bottom <= _top &&
730 _top <= _end,
731 "Chunk has been smashed");
732 assert(SpaceManager::is_humongous(_word_size) ||
733 _word_size == SpaceManager::MediumChunk ||
734 _word_size == SpaceManager::SmallChunk,
735 "Chunk size is wrong");
736 #endif
737 return;
738 }
740 // BlockFreelist methods
742 BlockFreelist::BlockFreelist() : _dictionary(NULL) {}
744 BlockFreelist::~BlockFreelist() {
745 if (_dictionary != NULL) {
746 if (Verbose && TraceMetadataChunkAllocation) {
747 _dictionary->print_free_lists(gclog_or_tty);
748 }
749 delete _dictionary;
750 }
751 }
753 Metablock* BlockFreelist::initialize_free_chunk(MetaWord* p, size_t word_size) {
754 Metablock* block = (Metablock*) p;
755 block->set_word_size(word_size);
756 block->set_prev(NULL);
757 block->set_next(NULL);
759 return block;
760 }
762 void BlockFreelist::return_block(MetaWord* p, size_t word_size) {
763 Metablock* free_chunk = initialize_free_chunk(p, word_size);
764 if (dictionary() == NULL) {
765 _dictionary = new BlockTreeDictionary();
766 }
767 dictionary()->return_chunk(free_chunk);
768 }
770 MetaWord* BlockFreelist::get_block(size_t word_size) {
771 if (dictionary() == NULL) {
772 return NULL;
773 }
775 if (word_size < TreeChunk<Metablock, FreeList>::min_size()) {
776 // Dark matter. Too small for dictionary.
777 return NULL;
778 }
780 Metablock* free_block =
781 dictionary()->get_chunk(word_size, FreeBlockDictionary<Metablock>::exactly);
782 if (free_block == NULL) {
783 return NULL;
784 }
786 return (MetaWord*) free_block;
787 }
789 void BlockFreelist::print_on(outputStream* st) const {
790 if (dictionary() == NULL) {
791 return;
792 }
793 dictionary()->print_free_lists(st);
794 }
796 // VirtualSpaceNode methods
798 VirtualSpaceNode::~VirtualSpaceNode() {
799 _rs.release();
800 }
802 size_t VirtualSpaceNode::used_words_in_vs() const {
803 return pointer_delta(top(), bottom(), sizeof(MetaWord));
804 }
806 // Space committed in the VirtualSpace
807 size_t VirtualSpaceNode::capacity_words_in_vs() const {
808 return pointer_delta(end(), bottom(), sizeof(MetaWord));
809 }
812 // Allocates the chunk from the virtual space only.
813 // This interface is also used internally for debugging. Not all
814 // chunks removed here are necessarily used for allocation.
815 Metachunk* VirtualSpaceNode::take_from_committed(size_t chunk_word_size) {
816 // Bottom of the new chunk
817 MetaWord* chunk_limit = top();
818 assert(chunk_limit != NULL, "Not safe to call this method");
820 if (!is_available(chunk_word_size)) {
821 if (TraceMetadataChunkAllocation) {
822 tty->print("VirtualSpaceNode::take_from_committed() not available %d words ", chunk_word_size);
823 // Dump some information about the virtual space that is nearly full
824 print_on(tty);
825 }
826 return NULL;
827 }
829 // Take the space (bump top on the current virtual space).
830 inc_top(chunk_word_size);
832 // Point the chunk at the space
833 Metachunk* result = Metachunk::initialize(chunk_limit, chunk_word_size);
834 return result;
835 }
838 // Expand the virtual space (commit more of the reserved space)
839 bool VirtualSpaceNode::expand_by(size_t words, bool pre_touch) {
840 size_t bytes = words * BytesPerWord;
841 bool result = virtual_space()->expand_by(bytes, pre_touch);
842 if (TraceMetavirtualspaceAllocation && !result) {
843 gclog_or_tty->print_cr("VirtualSpaceNode::expand_by() failed "
844 "for byte size " SIZE_FORMAT, bytes);
845 virtual_space()->print();
846 }
847 return result;
848 }
850 // Shrink the virtual space (commit more of the reserved space)
851 bool VirtualSpaceNode::shrink_by(size_t words) {
852 size_t bytes = words * BytesPerWord;
853 virtual_space()->shrink_by(bytes);
854 return true;
855 }
857 // Add another chunk to the chunk list.
859 Metachunk* VirtualSpaceNode::get_chunk_vs(size_t chunk_word_size) {
860 assert_lock_strong(SpaceManager::expand_lock());
861 Metachunk* result = NULL;
863 return take_from_committed(chunk_word_size);
864 }
866 Metachunk* VirtualSpaceNode::get_chunk_vs_with_expand(size_t chunk_word_size) {
867 assert_lock_strong(SpaceManager::expand_lock());
869 Metachunk* new_chunk = get_chunk_vs(chunk_word_size);
871 if (new_chunk == NULL) {
872 // Only a small part of the virtualspace is committed when first
873 // allocated so committing more here can be expected.
874 size_t page_size_words = os::vm_page_size() / BytesPerWord;
875 size_t aligned_expand_vs_by_words = align_size_up(chunk_word_size,
876 page_size_words);
877 expand_by(aligned_expand_vs_by_words, false);
878 new_chunk = get_chunk_vs(chunk_word_size);
879 }
880 return new_chunk;
881 }
883 bool VirtualSpaceNode::initialize() {
885 if (!_rs.is_reserved()) {
886 return false;
887 }
889 // Commit only 1 page instead of the whole reserved space _rs.size()
890 size_t committed_byte_size = os::vm_page_size();
891 bool result = virtual_space()->initialize(_rs, committed_byte_size);
892 if (result) {
893 set_top((MetaWord*)virtual_space()->low());
894 set_reserved(MemRegion((HeapWord*)_rs.base(),
895 (HeapWord*)(_rs.base() + _rs.size())));
897 assert(reserved()->start() == (HeapWord*) _rs.base(),
898 err_msg("Reserved start was not set properly " PTR_FORMAT
899 " != " PTR_FORMAT, reserved()->start(), _rs.base()));
900 assert(reserved()->word_size() == _rs.size() / BytesPerWord,
901 err_msg("Reserved size was not set properly " SIZE_FORMAT
902 " != " SIZE_FORMAT, reserved()->word_size(),
903 _rs.size() / BytesPerWord));
904 }
906 return result;
907 }
909 void VirtualSpaceNode::print_on(outputStream* st) const {
910 size_t used = used_words_in_vs();
911 size_t capacity = capacity_words_in_vs();
912 VirtualSpace* vs = virtual_space();
913 st->print_cr(" space @ " PTR_FORMAT " " SIZE_FORMAT "K, %3d%% used "
914 "[" PTR_FORMAT ", " PTR_FORMAT ", "
915 PTR_FORMAT ", " PTR_FORMAT ")",
916 vs, capacity / K, used * 100 / capacity,
917 bottom(), top(), end(),
918 vs->high_boundary());
919 }
921 void VirtualSpaceNode::mangle() {
922 size_t word_size = capacity_words_in_vs();
923 Copy::fill_to_words((HeapWord*) low(), word_size, 0xf1f1f1f1);
924 }
926 // VirtualSpaceList methods
927 // Space allocated from the VirtualSpace
929 VirtualSpaceList::~VirtualSpaceList() {
930 VirtualSpaceListIterator iter(virtual_space_list());
931 while (iter.repeat()) {
932 VirtualSpaceNode* vsl = iter.get_next();
933 delete vsl;
934 }
935 }
937 size_t VirtualSpaceList::used_words_sum() {
938 size_t allocated_by_vs = 0;
939 VirtualSpaceListIterator iter(virtual_space_list());
940 while (iter.repeat()) {
941 VirtualSpaceNode* vsl = iter.get_next();
942 // Sum used region [bottom, top) in each virtualspace
943 allocated_by_vs += vsl->used_words_in_vs();
944 }
945 assert(allocated_by_vs >= chunk_manager()->free_chunks_total(),
946 err_msg("Total in free chunks " SIZE_FORMAT
947 " greater than total from virtual_spaces " SIZE_FORMAT,
948 allocated_by_vs, chunk_manager()->free_chunks_total()));
949 size_t used =
950 allocated_by_vs - chunk_manager()->free_chunks_total();
951 return used;
952 }
954 // Space available in all MetadataVirtualspaces allocated
955 // for metadata. This is the upper limit on the capacity
956 // of chunks allocated out of all the MetadataVirtualspaces.
957 size_t VirtualSpaceList::capacity_words_sum() {
958 size_t capacity = 0;
959 VirtualSpaceListIterator iter(virtual_space_list());
960 while (iter.repeat()) {
961 VirtualSpaceNode* vsl = iter.get_next();
962 capacity += vsl->capacity_words_in_vs();
963 }
964 return capacity;
965 }
967 VirtualSpaceList::VirtualSpaceList(size_t word_size ) :
968 _is_class(false),
969 _virtual_space_list(NULL),
970 _current_virtual_space(NULL),
971 _virtual_space_total(0),
972 _virtual_space_count(0) {
973 MutexLockerEx cl(SpaceManager::expand_lock(),
974 Mutex::_no_safepoint_check_flag);
975 bool initialization_succeeded = grow_vs(word_size);
977 assert(initialization_succeeded,
978 " VirtualSpaceList initialization should not fail");
979 }
981 VirtualSpaceList::VirtualSpaceList(ReservedSpace rs) :
982 _is_class(true),
983 _virtual_space_list(NULL),
984 _current_virtual_space(NULL),
985 _virtual_space_total(0),
986 _virtual_space_count(0) {
987 MutexLockerEx cl(SpaceManager::expand_lock(),
988 Mutex::_no_safepoint_check_flag);
989 VirtualSpaceNode* class_entry = new VirtualSpaceNode(rs);
990 bool succeeded = class_entry->initialize();
991 assert(succeeded, " VirtualSpaceList initialization should not fail");
992 link_vs(class_entry, rs.size()/BytesPerWord);
993 }
995 // Allocate another meta virtual space and add it to the list.
996 bool VirtualSpaceList::grow_vs(size_t vs_word_size) {
997 assert_lock_strong(SpaceManager::expand_lock());
998 if (vs_word_size == 0) {
999 return false;
1000 }
1001 // Reserve the space
1002 size_t vs_byte_size = vs_word_size * BytesPerWord;
1003 assert(vs_byte_size % os::vm_page_size() == 0, "Not aligned");
1005 // Allocate the meta virtual space and initialize it.
1006 VirtualSpaceNode* new_entry = new VirtualSpaceNode(vs_byte_size);
1007 if (!new_entry->initialize()) {
1008 delete new_entry;
1009 return false;
1010 } else {
1011 // ensure lock-free iteration sees fully initialized node
1012 OrderAccess::storestore();
1013 link_vs(new_entry, vs_word_size);
1014 return true;
1015 }
1016 }
1018 void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry, size_t vs_word_size) {
1019 if (virtual_space_list() == NULL) {
1020 set_virtual_space_list(new_entry);
1021 } else {
1022 current_virtual_space()->set_next(new_entry);
1023 }
1024 set_current_virtual_space(new_entry);
1025 inc_virtual_space_total(vs_word_size);
1026 inc_virtual_space_count();
1027 #ifdef ASSERT
1028 new_entry->mangle();
1029 #endif
1030 if (TraceMetavirtualspaceAllocation && Verbose) {
1031 VirtualSpaceNode* vsl = current_virtual_space();
1032 vsl->print_on(tty);
1033 }
1034 }
1036 Metachunk* VirtualSpaceList::get_new_chunk(size_t word_size,
1037 size_t grow_chunks_by_words) {
1039 // Get a chunk from the chunk freelist
1040 Metachunk* next = chunk_manager()->chunk_freelist_allocate(grow_chunks_by_words);
1042 // Allocate a chunk out of the current virtual space.
1043 if (next == NULL) {
1044 next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
1045 }
1047 if (next == NULL) {
1048 // Not enough room in current virtual space. Try to commit
1049 // more space.
1050 size_t expand_vs_by_words = MAX2((size_t)SpaceManager::MediumChunkBunch,
1051 grow_chunks_by_words);
1052 size_t page_size_words = os::vm_page_size() / BytesPerWord;
1053 size_t aligned_expand_vs_by_words = align_size_up(expand_vs_by_words,
1054 page_size_words);
1055 bool vs_expanded =
1056 current_virtual_space()->expand_by(aligned_expand_vs_by_words, false);
1057 if (!vs_expanded) {
1058 // Should the capacity of the metaspaces be expanded for
1059 // this allocation? If it's the virtual space for classes and is
1060 // being used for CompressedHeaders, don't allocate a new virtualspace.
1061 if (can_grow() && MetaspaceGC::should_expand(this, word_size)) {
1062 // Get another virtual space.
1063 size_t grow_vs_words =
1064 MAX2((size_t)VirtualSpaceSize, aligned_expand_vs_by_words);
1065 if (grow_vs(grow_vs_words)) {
1066 // Got it. It's on the list now. Get a chunk from it.
1067 next = current_virtual_space()->get_chunk_vs_with_expand(grow_chunks_by_words);
1068 }
1069 if (TraceMetadataHumongousAllocation && SpaceManager::is_humongous(word_size)) {
1070 gclog_or_tty->print_cr(" aligned_expand_vs_by_words " PTR_FORMAT,
1071 aligned_expand_vs_by_words);
1072 gclog_or_tty->print_cr(" grow_vs_words " PTR_FORMAT,
1073 grow_vs_words);
1074 }
1075 } else {
1076 // Allocation will fail and induce a GC
1077 if (TraceMetadataChunkAllocation && Verbose) {
1078 gclog_or_tty->print_cr("VirtualSpaceList::get_new_chunk():"
1079 " Fail instead of expand the metaspace");
1080 }
1081 }
1082 } else {
1083 // The virtual space expanded, get a new chunk
1084 next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
1085 assert(next != NULL, "Just expanded, should succeed");
1086 }
1087 }
1089 return next;
1090 }
1092 void VirtualSpaceList::print_on(outputStream* st) const {
1093 if (TraceMetadataChunkAllocation && Verbose) {
1094 VirtualSpaceListIterator iter(virtual_space_list());
1095 while (iter.repeat()) {
1096 VirtualSpaceNode* node = iter.get_next();
1097 node->print_on(st);
1098 }
1099 }
1100 }
1102 bool VirtualSpaceList::contains(const void *ptr) {
1103 VirtualSpaceNode* list = virtual_space_list();
1104 VirtualSpaceListIterator iter(list);
1105 while (iter.repeat()) {
1106 VirtualSpaceNode* node = iter.get_next();
1107 if (node->reserved()->contains(ptr)) {
1108 return true;
1109 }
1110 }
1111 return false;
1112 }
1115 // MetaspaceGC methods
1117 // VM_CollectForMetadataAllocation is the vm operation used to GC.
1118 // Within the VM operation after the GC the attempt to allocate the metadata
1119 // should succeed. If the GC did not free enough space for the metaspace
1120 // allocation, the HWM is increased so that another virtualspace will be
1121 // allocated for the metadata. With perm gen the increase in the perm
1122 // gen had bounds, MinMetaspaceExpansion and MaxMetaspaceExpansion. The
1123 // metaspace policy uses those as the small and large steps for the HWM.
1124 //
1125 // After the GC the compute_new_size() for MetaspaceGC is called to
1126 // resize the capacity of the metaspaces. The current implementation
1127 // is based on the flags MinHeapFreeRatio and MaxHeapFreeRatio used
1128 // to resize the Java heap by some GC's. New flags can be implemented
1129 // if really needed. MinHeapFreeRatio is used to calculate how much
1130 // free space is desirable in the metaspace capacity to decide how much
1131 // to increase the HWM. MaxHeapFreeRatio is used to decide how much
1132 // free space is desirable in the metaspace capacity before decreasing
1133 // the HWM.
1135 // Calculate the amount to increase the high water mark (HWM).
1136 // Increase by a minimum amount (MinMetaspaceExpansion) so that
1137 // another expansion is not requested too soon. If that is not
1138 // enough to satisfy the allocation (i.e. big enough for a word_size
1139 // allocation), increase by MaxMetaspaceExpansion. If that is still
1140 // not enough, expand by the size of the allocation (word_size) plus
1141 // some.
1142 size_t MetaspaceGC::delta_capacity_until_GC(size_t word_size) {
1143 size_t before_inc = MetaspaceGC::capacity_until_GC();
1144 size_t min_delta_words = MinMetaspaceExpansion / BytesPerWord;
1145 size_t max_delta_words = MaxMetaspaceExpansion / BytesPerWord;
1146 size_t page_size_words = os::vm_page_size() / BytesPerWord;
1147 size_t size_delta_words = align_size_up(word_size, page_size_words);
1148 size_t delta_words = MAX2(size_delta_words, min_delta_words);
1149 if (delta_words > min_delta_words) {
1150 // Don't want to hit the high water mark on the next
1151 // allocation so make the delta greater than just enough
1152 // for this allocation.
1153 delta_words = MAX2(delta_words, max_delta_words);
1154 if (delta_words > max_delta_words) {
1155 // This allocation is large but the next ones are probably not
1156 // so increase by the minimum.
1157 delta_words = delta_words + min_delta_words;
1158 }
1159 }
1160 return delta_words;
1161 }
1163 bool MetaspaceGC::should_expand(VirtualSpaceList* vsl, size_t word_size) {
1165 // Class virtual space should always be expanded. Call GC for the other
1166 // metadata virtual space.
1167 if (vsl == Metaspace::class_space_list()) return true;
1169 // If the user wants a limit, impose one.
1170 size_t max_metaspace_size_words = MaxMetaspaceSize / BytesPerWord;
1171 size_t metaspace_size_words = MetaspaceSize / BytesPerWord;
1172 if (!FLAG_IS_DEFAULT(MaxMetaspaceSize) &&
1173 vsl->capacity_words_sum() >= max_metaspace_size_words) {
1174 return false;
1175 }
1177 // If this is part of an allocation after a GC, expand
1178 // unconditionally.
1179 if(MetaspaceGC::expand_after_GC()) {
1180 return true;
1181 }
1183 // If the capacity is below the minimum capacity, allow the
1184 // expansion. Also set the high-water-mark (capacity_until_GC)
1185 // to that minimum capacity so that a GC will not be induced
1186 // until that minimum capacity is exceeded.
1187 if (vsl->capacity_words_sum() < metaspace_size_words ||
1188 capacity_until_GC() == 0) {
1189 set_capacity_until_GC(metaspace_size_words);
1190 return true;
1191 } else {
1192 if (vsl->capacity_words_sum() < capacity_until_GC()) {
1193 return true;
1194 } else {
1195 if (TraceMetadataChunkAllocation && Verbose) {
1196 gclog_or_tty->print_cr(" allocation request size " SIZE_FORMAT
1197 " capacity_until_GC " SIZE_FORMAT
1198 " capacity_words_sum " SIZE_FORMAT
1199 " used_words_sum " SIZE_FORMAT
1200 " free chunks " SIZE_FORMAT
1201 " free chunks count %d",
1202 word_size,
1203 capacity_until_GC(),
1204 vsl->capacity_words_sum(),
1205 vsl->used_words_sum(),
1206 vsl->chunk_manager()->free_chunks_total(),
1207 vsl->chunk_manager()->free_chunks_count());
1208 }
1209 return false;
1210 }
1211 }
1212 }
1214 // Variables are in bytes
1216 void MetaspaceGC::compute_new_size() {
1217 assert(_shrink_factor <= 100, "invalid shrink factor");
1218 uint current_shrink_factor = _shrink_factor;
1219 _shrink_factor = 0;
1221 VirtualSpaceList *vsl = Metaspace::space_list();
1223 size_t capacity_after_gc = vsl->capacity_bytes_sum();
1224 // Check to see if these two can be calculated without walking the CLDG
1225 size_t used_after_gc = vsl->used_bytes_sum();
1226 size_t capacity_until_GC = vsl->capacity_bytes_sum();
1227 size_t free_after_gc = capacity_until_GC - used_after_gc;
1229 const double minimum_free_percentage = MinHeapFreeRatio / 100.0;
1230 const double maximum_used_percentage = 1.0 - minimum_free_percentage;
1232 const double min_tmp = used_after_gc / maximum_used_percentage;
1233 size_t minimum_desired_capacity =
1234 (size_t)MIN2(min_tmp, double(max_uintx));
1235 // Don't shrink less than the initial generation size
1236 minimum_desired_capacity = MAX2(minimum_desired_capacity,
1237 MetaspaceSize);
1239 if (PrintGCDetails && Verbose) {
1240 const double free_percentage = ((double)free_after_gc) / capacity_until_GC;
1241 gclog_or_tty->print_cr("\nMetaspaceGC::compute_new_size: ");
1242 gclog_or_tty->print_cr(" "
1243 " minimum_free_percentage: %6.2f"
1244 " maximum_used_percentage: %6.2f",
1245 minimum_free_percentage,
1246 maximum_used_percentage);
1247 double d_free_after_gc = free_after_gc / (double) K;
1248 gclog_or_tty->print_cr(" "
1249 " free_after_gc : %6.1fK"
1250 " used_after_gc : %6.1fK"
1251 " capacity_after_gc : %6.1fK"
1252 " metaspace HWM : %6.1fK",
1253 free_after_gc / (double) K,
1254 used_after_gc / (double) K,
1255 capacity_after_gc / (double) K,
1256 capacity_until_GC / (double) K);
1257 gclog_or_tty->print_cr(" "
1258 " free_percentage: %6.2f",
1259 free_percentage);
1260 }
1263 if (capacity_until_GC < minimum_desired_capacity) {
1264 // If we have less capacity below the metaspace HWM, then
1265 // increment the HWM.
1266 size_t expand_bytes = minimum_desired_capacity - capacity_until_GC;
1267 // Don't expand unless it's significant
1268 if (expand_bytes >= MinMetaspaceExpansion) {
1269 size_t expand_words = expand_bytes / BytesPerWord;
1270 MetaspaceGC::inc_capacity_until_GC(expand_words);
1271 }
1272 if (PrintGCDetails && Verbose) {
1273 size_t new_capacity_until_GC = MetaspaceGC::capacity_until_GC_in_bytes();
1274 gclog_or_tty->print_cr(" expanding:"
1275 " minimum_desired_capacity: %6.1fK"
1276 " expand_words: %6.1fK"
1277 " MinMetaspaceExpansion: %6.1fK"
1278 " new metaspace HWM: %6.1fK",
1279 minimum_desired_capacity / (double) K,
1280 expand_bytes / (double) K,
1281 MinMetaspaceExpansion / (double) K,
1282 new_capacity_until_GC / (double) K);
1283 }
1284 return;
1285 }
1287 // No expansion, now see if we want to shrink
1288 size_t shrink_words = 0;
1289 // We would never want to shrink more than this
1290 size_t max_shrink_words = capacity_until_GC - minimum_desired_capacity;
1291 assert(max_shrink_words >= 0, err_msg("max_shrink_words " SIZE_FORMAT,
1292 max_shrink_words));
1294 // Should shrinking be considered?
1295 if (MaxHeapFreeRatio < 100) {
1296 const double maximum_free_percentage = MaxHeapFreeRatio / 100.0;
1297 const double minimum_used_percentage = 1.0 - maximum_free_percentage;
1298 const double max_tmp = used_after_gc / minimum_used_percentage;
1299 size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx));
1300 maximum_desired_capacity = MAX2(maximum_desired_capacity,
1301 MetaspaceSize);
1302 if (PrintGC && Verbose) {
1303 gclog_or_tty->print_cr(" "
1304 " maximum_free_percentage: %6.2f"
1305 " minimum_used_percentage: %6.2f",
1306 maximum_free_percentage,
1307 minimum_used_percentage);
1308 gclog_or_tty->print_cr(" "
1309 " capacity_until_GC: %6.1fK"
1310 " minimum_desired_capacity: %6.1fK"
1311 " maximum_desired_capacity: %6.1fK",
1312 capacity_until_GC / (double) K,
1313 minimum_desired_capacity / (double) K,
1314 maximum_desired_capacity / (double) K);
1315 }
1317 assert(minimum_desired_capacity <= maximum_desired_capacity,
1318 "sanity check");
1320 if (capacity_until_GC > maximum_desired_capacity) {
1321 // Capacity too large, compute shrinking size
1322 shrink_words = capacity_until_GC - maximum_desired_capacity;
1323 // We don't want shrink all the way back to initSize if people call
1324 // System.gc(), because some programs do that between "phases" and then
1325 // we'd just have to grow the heap up again for the next phase. So we
1326 // damp the shrinking: 0% on the first call, 10% on the second call, 40%
1327 // on the third call, and 100% by the fourth call. But if we recompute
1328 // size without shrinking, it goes back to 0%.
1329 shrink_words = shrink_words / 100 * current_shrink_factor;
1330 assert(shrink_words <= max_shrink_words,
1331 err_msg("invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT,
1332 shrink_words, max_shrink_words));
1333 if (current_shrink_factor == 0) {
1334 _shrink_factor = 10;
1335 } else {
1336 _shrink_factor = MIN2(current_shrink_factor * 4, (uint) 100);
1337 }
1338 if (PrintGCDetails && Verbose) {
1339 gclog_or_tty->print_cr(" "
1340 " shrinking:"
1341 " initSize: %.1fK"
1342 " maximum_desired_capacity: %.1fK",
1343 MetaspaceSize / (double) K,
1344 maximum_desired_capacity / (double) K);
1345 gclog_or_tty->print_cr(" "
1346 " shrink_words: %.1fK"
1347 " current_shrink_factor: %d"
1348 " new shrink factor: %d"
1349 " MinMetaspaceExpansion: %.1fK",
1350 shrink_words / (double) K,
1351 current_shrink_factor,
1352 _shrink_factor,
1353 MinMetaspaceExpansion / (double) K);
1354 }
1355 }
1356 }
1359 // Don't shrink unless it's significant
1360 if (shrink_words >= MinMetaspaceExpansion) {
1361 VirtualSpaceNode* csp = vsl->current_virtual_space();
1362 size_t available_to_shrink = csp->capacity_words_in_vs() -
1363 csp->used_words_in_vs();
1364 shrink_words = MIN2(shrink_words, available_to_shrink);
1365 csp->shrink_by(shrink_words);
1366 MetaspaceGC::dec_capacity_until_GC(shrink_words);
1367 if (PrintGCDetails && Verbose) {
1368 size_t new_capacity_until_GC = MetaspaceGC::capacity_until_GC_in_bytes();
1369 gclog_or_tty->print_cr(" metaspace HWM: %.1fK", new_capacity_until_GC / (double) K);
1370 }
1371 }
1372 assert(vsl->used_bytes_sum() == used_after_gc &&
1373 used_after_gc <= vsl->capacity_bytes_sum(),
1374 "sanity check");
1376 }
1378 // Metadebug methods
1380 void Metadebug::deallocate_chunk_a_lot(SpaceManager* sm,
1381 size_t chunk_word_size){
1382 #ifdef ASSERT
1383 VirtualSpaceList* vsl = sm->vs_list();
1384 if (MetaDataDeallocateALot &&
1385 Metadebug::deallocate_chunk_a_lot_count() % MetaDataDeallocateALotInterval == 0 ) {
1386 Metadebug::reset_deallocate_chunk_a_lot_count();
1387 for (uint i = 0; i < metadata_deallocate_a_lock_chunk; i++) {
1388 Metachunk* dummy_chunk = vsl->current_virtual_space()->take_from_committed(chunk_word_size);
1389 if (dummy_chunk == NULL) {
1390 break;
1391 }
1392 vsl->chunk_manager()->chunk_freelist_deallocate(dummy_chunk);
1394 if (TraceMetadataChunkAllocation && Verbose) {
1395 gclog_or_tty->print("Metadebug::deallocate_chunk_a_lot: %d) ",
1396 sm->sum_count_in_chunks_in_use());
1397 dummy_chunk->print_on(gclog_or_tty);
1398 gclog_or_tty->print_cr(" Free chunks total %d count %d",
1399 vsl->chunk_manager()->free_chunks_total(),
1400 vsl->chunk_manager()->free_chunks_count());
1401 }
1402 }
1403 } else {
1404 Metadebug::inc_deallocate_chunk_a_lot_count();
1405 }
1406 #endif
1407 }
1409 void Metadebug::deallocate_block_a_lot(SpaceManager* sm,
1410 size_t raw_word_size){
1411 #ifdef ASSERT
1412 if (MetaDataDeallocateALot &&
1413 Metadebug::deallocate_block_a_lot_count() % MetaDataDeallocateALotInterval == 0 ) {
1414 Metadebug::set_deallocate_block_a_lot_count(0);
1415 for (uint i = 0; i < metadata_deallocate_a_lot_block; i++) {
1416 MetaWord* dummy_block = sm->allocate_work(raw_word_size);
1417 if (dummy_block == 0) {
1418 break;
1419 }
1420 sm->deallocate(dummy_block, raw_word_size);
1421 }
1422 } else {
1423 Metadebug::inc_deallocate_block_a_lot_count();
1424 }
1425 #endif
1426 }
1428 void Metadebug::init_allocation_fail_alot_count() {
1429 if (MetadataAllocationFailALot) {
1430 _allocation_fail_alot_count =
1431 1+(long)((double)MetadataAllocationFailALotInterval*os::random()/(max_jint+1.0));
1432 }
1433 }
1435 #ifdef ASSERT
1436 bool Metadebug::test_metadata_failure() {
1437 if (MetadataAllocationFailALot &&
1438 Threads::is_vm_complete()) {
1439 if (_allocation_fail_alot_count > 0) {
1440 _allocation_fail_alot_count--;
1441 } else {
1442 if (TraceMetadataChunkAllocation && Verbose) {
1443 gclog_or_tty->print_cr("Metadata allocation failing for "
1444 "MetadataAllocationFailALot");
1445 }
1446 init_allocation_fail_alot_count();
1447 return true;
1448 }
1449 }
1450 return false;
1451 }
1452 #endif
1454 // ChunkList methods
1456 size_t ChunkList::sum_list_size() {
1457 size_t result = 0;
1458 Metachunk* cur = head();
1459 while (cur != NULL) {
1460 result += cur->word_size();
1461 cur = cur->next();
1462 }
1463 return result;
1464 }
1466 size_t ChunkList::sum_list_count() {
1467 size_t result = 0;
1468 Metachunk* cur = head();
1469 while (cur != NULL) {
1470 result++;
1471 cur = cur->next();
1472 }
1473 return result;
1474 }
1476 size_t ChunkList::sum_list_capacity() {
1477 size_t result = 0;
1478 Metachunk* cur = head();
1479 while (cur != NULL) {
1480 result += cur->capacity_word_size();
1481 cur = cur->next();
1482 }
1483 return result;
1484 }
1486 void ChunkList::add_at_head(Metachunk* head, Metachunk* tail) {
1487 assert_lock_strong(SpaceManager::expand_lock());
1488 assert(tail->next() == NULL, "Not the tail");
1490 if (TraceMetadataChunkAllocation && Verbose) {
1491 tty->print("ChunkList::add_at_head: ");
1492 Metachunk* cur = head;
1493 while (cur != NULL) {
1494 tty->print(PTR_FORMAT " (" SIZE_FORMAT ") ", cur, cur->word_size());
1495 cur = cur->next();
1496 }
1497 tty->print_cr("");
1498 }
1500 if (tail != NULL) {
1501 tail->set_next(_head);
1502 }
1503 set_head(head);
1504 }
1506 void ChunkList::add_at_head(Metachunk* list) {
1507 if (list == NULL) {
1508 // Nothing to add
1509 return;
1510 }
1511 assert_lock_strong(SpaceManager::expand_lock());
1512 Metachunk* head = list;
1513 Metachunk* tail = list;
1514 Metachunk* cur = head->next();
1515 // Search for the tail since it is not passed.
1516 while (cur != NULL) {
1517 tail = cur;
1518 cur = cur->next();
1519 }
1520 add_at_head(head, tail);
1521 }
1523 // ChunkManager methods
1525 // Verification of _free_chunks_total and _free_chunks_count does not
1526 // work with the CMS collector because its use of additional locks
1527 // complicate the mutex deadlock detection but it can still be useful
1528 // for detecting errors in the chunk accounting with other collectors.
1530 size_t ChunkManager::free_chunks_total() {
1531 #ifdef ASSERT
1532 if (!UseConcMarkSweepGC && !SpaceManager::expand_lock()->is_locked()) {
1533 MutexLockerEx cl(SpaceManager::expand_lock(),
1534 Mutex::_no_safepoint_check_flag);
1535 slow_locked_verify_free_chunks_total();
1536 }
1537 #endif
1538 return _free_chunks_total;
1539 }
1541 size_t ChunkManager::free_chunks_total_in_bytes() {
1542 return free_chunks_total() * BytesPerWord;
1543 }
1545 size_t ChunkManager::free_chunks_count() {
1546 #ifdef ASSERT
1547 if (!UseConcMarkSweepGC && !SpaceManager::expand_lock()->is_locked()) {
1548 MutexLockerEx cl(SpaceManager::expand_lock(),
1549 Mutex::_no_safepoint_check_flag);
1550 // This lock is only needed in debug because the verification
1551 // of the _free_chunks_totals walks the list of free chunks
1552 slow_locked_verify_free_chunks_count();
1553 }
1554 #endif
1555 return _free_chunks_count;
1556 }
1558 void ChunkManager::locked_verify_free_chunks_total() {
1559 assert_lock_strong(SpaceManager::expand_lock());
1560 assert(sum_free_chunks() == _free_chunks_total,
1561 err_msg("_free_chunks_total " SIZE_FORMAT " is not the"
1562 " same as sum " SIZE_FORMAT, _free_chunks_total,
1563 sum_free_chunks()));
1564 }
1566 void ChunkManager::verify_free_chunks_total() {
1567 MutexLockerEx cl(SpaceManager::expand_lock(),
1568 Mutex::_no_safepoint_check_flag);
1569 locked_verify_free_chunks_total();
1570 }
1572 void ChunkManager::locked_verify_free_chunks_count() {
1573 assert_lock_strong(SpaceManager::expand_lock());
1574 assert(sum_free_chunks_count() == _free_chunks_count,
1575 err_msg("_free_chunks_count " SIZE_FORMAT " is not the"
1576 " same as sum " SIZE_FORMAT, _free_chunks_count,
1577 sum_free_chunks_count()));
1578 }
1580 void ChunkManager::verify_free_chunks_count() {
1581 #ifdef ASSERT
1582 MutexLockerEx cl(SpaceManager::expand_lock(),
1583 Mutex::_no_safepoint_check_flag);
1584 locked_verify_free_chunks_count();
1585 #endif
1586 }
1588 void ChunkManager::verify() {
1589 MutexLockerEx cl(SpaceManager::expand_lock(),
1590 Mutex::_no_safepoint_check_flag);
1591 locked_verify();
1592 }
1594 void ChunkManager::locked_verify() {
1595 locked_verify_free_chunks_count();
1596 locked_verify_free_chunks_total();
1597 }
1599 void ChunkManager::locked_print_free_chunks(outputStream* st) {
1600 assert_lock_strong(SpaceManager::expand_lock());
1601 st->print_cr("Free chunk total 0x%x count 0x%x",
1602 _free_chunks_total, _free_chunks_count);
1603 }
1605 void ChunkManager::locked_print_sum_free_chunks(outputStream* st) {
1606 assert_lock_strong(SpaceManager::expand_lock());
1607 st->print_cr("Sum free chunk total 0x%x count 0x%x",
1608 sum_free_chunks(), sum_free_chunks_count());
1609 }
1610 ChunkList* ChunkManager::free_chunks(ChunkIndex index) {
1611 return &_free_chunks[index];
1612 }
1614 // These methods that sum the free chunk lists are used in printing
1615 // methods that are used in product builds.
1616 size_t ChunkManager::sum_free_chunks() {
1617 assert_lock_strong(SpaceManager::expand_lock());
1618 size_t result = 0;
1619 for (ChunkIndex i = SmallIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
1620 ChunkList* list = free_chunks(i);
1622 if (list == NULL) {
1623 continue;
1624 }
1626 result = result + list->sum_list_capacity();
1627 }
1628 result = result + humongous_dictionary()->total_size();
1629 return result;
1630 }
1632 size_t ChunkManager::sum_free_chunks_count() {
1633 assert_lock_strong(SpaceManager::expand_lock());
1634 size_t count = 0;
1635 for (ChunkIndex i = SmallIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
1636 ChunkList* list = free_chunks(i);
1637 if (list == NULL) {
1638 continue;
1639 }
1640 count = count + list->sum_list_count();
1641 }
1642 count = count + humongous_dictionary()->total_free_blocks();
1643 return count;
1644 }
1646 ChunkList* ChunkManager::find_free_chunks_list(size_t word_size) {
1647 switch (word_size) {
1648 case SpaceManager::SmallChunk :
1649 return &_free_chunks[0];
1650 case SpaceManager::MediumChunk :
1651 return &_free_chunks[1];
1652 default:
1653 assert(word_size > SpaceManager::MediumChunk, "List inconsistency");
1654 return &_free_chunks[2];
1655 }
1656 }
1658 void ChunkManager::free_chunks_put(Metachunk* chunk) {
1659 assert_lock_strong(SpaceManager::expand_lock());
1660 ChunkList* free_list = find_free_chunks_list(chunk->word_size());
1661 chunk->set_next(free_list->head());
1662 free_list->set_head(chunk);
1663 // chunk is being returned to the chunk free list
1664 inc_free_chunks_total(chunk->capacity_word_size());
1665 slow_locked_verify();
1666 }
1668 void ChunkManager::chunk_freelist_deallocate(Metachunk* chunk) {
1669 // The deallocation of a chunk originates in the freelist
1670 // manangement code for a Metaspace and does not hold the
1671 // lock.
1672 assert(chunk != NULL, "Deallocating NULL");
1673 assert_lock_strong(SpaceManager::expand_lock());
1674 slow_locked_verify();
1675 if (TraceMetadataChunkAllocation) {
1676 tty->print_cr("ChunkManager::chunk_freelist_deallocate: chunk "
1677 PTR_FORMAT " size " SIZE_FORMAT,
1678 chunk, chunk->word_size());
1679 }
1680 free_chunks_put(chunk);
1681 }
1683 Metachunk* ChunkManager::free_chunks_get(size_t word_size) {
1684 assert_lock_strong(SpaceManager::expand_lock());
1686 slow_locked_verify();
1688 Metachunk* chunk = NULL;
1689 if (!SpaceManager::is_humongous(word_size)) {
1690 ChunkList* free_list = find_free_chunks_list(word_size);
1691 assert(free_list != NULL, "Sanity check");
1693 chunk = free_list->head();
1694 debug_only(Metachunk* debug_head = chunk;)
1696 if (chunk == NULL) {
1697 return NULL;
1698 }
1700 // Remove the chunk as the head of the list.
1701 free_list->set_head(chunk->next());
1702 chunk->set_next(NULL);
1703 // Chunk has been removed from the chunks free list.
1704 dec_free_chunks_total(chunk->capacity_word_size());
1706 if (TraceMetadataChunkAllocation && Verbose) {
1707 tty->print_cr("ChunkManager::free_chunks_get: free_list "
1708 PTR_FORMAT " head " PTR_FORMAT " size " SIZE_FORMAT,
1709 free_list, chunk, chunk->word_size());
1710 }
1711 } else {
1712 chunk = humongous_dictionary()->get_chunk(
1713 word_size,
1714 FreeBlockDictionary<Metachunk>::atLeast);
1716 if (chunk != NULL) {
1717 if (TraceMetadataHumongousAllocation) {
1718 size_t waste = chunk->word_size() - word_size;
1719 tty->print_cr("Free list allocate humongous chunk size " SIZE_FORMAT
1720 " for requested size " SIZE_FORMAT
1721 " waste " SIZE_FORMAT,
1722 chunk->word_size(), word_size, waste);
1723 }
1724 // Chunk is being removed from the chunks free list.
1725 dec_free_chunks_total(chunk->capacity_word_size());
1726 #ifdef ASSERT
1727 chunk->set_is_free(false);
1728 #endif
1729 }
1730 }
1731 slow_locked_verify();
1732 return chunk;
1733 }
1735 Metachunk* ChunkManager::chunk_freelist_allocate(size_t word_size) {
1736 assert_lock_strong(SpaceManager::expand_lock());
1737 slow_locked_verify();
1739 // Take from the beginning of the list
1740 Metachunk* chunk = free_chunks_get(word_size);
1741 if (chunk == NULL) {
1742 return NULL;
1743 }
1745 assert(word_size <= chunk->word_size() ||
1746 SpaceManager::is_humongous(chunk->word_size()),
1747 "Non-humongous variable sized chunk");
1748 if (TraceMetadataChunkAllocation) {
1749 tty->print("ChunkManager::chunk_freelist_allocate: chunk "
1750 PTR_FORMAT " size " SIZE_FORMAT " ",
1751 chunk, chunk->word_size());
1752 locked_print_free_chunks(tty);
1753 }
1755 return chunk;
1756 }
1758 void ChunkManager::print_on(outputStream* out) {
1759 if (PrintFLSStatistics != 0) {
1760 humongous_dictionary()->report_statistics();
1761 }
1762 }
1764 // SpaceManager methods
1766 size_t SpaceManager::sum_free_in_chunks_in_use() const {
1767 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
1768 size_t free = 0;
1769 for (ChunkIndex i = SmallIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1770 Metachunk* chunk = chunks_in_use(i);
1771 while (chunk != NULL) {
1772 free += chunk->free_word_size();
1773 chunk = chunk->next();
1774 }
1775 }
1776 return free;
1777 }
1779 size_t SpaceManager::sum_waste_in_chunks_in_use() const {
1780 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
1781 size_t result = 0;
1782 for (ChunkIndex i = SmallIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1785 result += sum_waste_in_chunks_in_use(i);
1786 }
1788 return result;
1789 }
1791 size_t SpaceManager::sum_waste_in_chunks_in_use(ChunkIndex index) const {
1792 size_t result = 0;
1793 size_t count = 0;
1794 Metachunk* chunk = chunks_in_use(index);
1795 // Count the free space in all the chunk but not the
1796 // current chunk from which allocations are still being done.
1797 if (chunk != NULL) {
1798 Metachunk* prev = chunk;
1799 while (chunk != NULL && chunk != current_chunk()) {
1800 result += chunk->free_word_size();
1801 prev = chunk;
1802 chunk = chunk->next();
1803 count++;
1804 }
1805 }
1806 return result;
1807 }
1809 size_t SpaceManager::sum_capacity_in_chunks_in_use() const {
1810 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
1811 size_t sum = 0;
1812 for (ChunkIndex i = SmallIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1813 Metachunk* chunk = chunks_in_use(i);
1814 while (chunk != NULL) {
1815 // Just changed this sum += chunk->capacity_word_size();
1816 // sum += chunk->word_size() - Metachunk::overhead();
1817 sum += chunk->capacity_word_size();
1818 chunk = chunk->next();
1819 }
1820 }
1821 return sum;
1822 }
1824 size_t SpaceManager::sum_count_in_chunks_in_use() {
1825 size_t count = 0;
1826 for (ChunkIndex i = SmallIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1827 count = count + sum_count_in_chunks_in_use(i);
1828 }
1830 return count;
1831 }
1833 size_t SpaceManager::sum_count_in_chunks_in_use(ChunkIndex i) {
1834 size_t count = 0;
1835 Metachunk* chunk = chunks_in_use(i);
1836 while (chunk != NULL) {
1837 count++;
1838 chunk = chunk->next();
1839 }
1840 return count;
1841 }
1844 size_t SpaceManager::sum_used_in_chunks_in_use() const {
1845 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
1846 size_t used = 0;
1847 for (ChunkIndex i = SmallIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1848 Metachunk* chunk = chunks_in_use(i);
1849 while (chunk != NULL) {
1850 used += chunk->used_word_size();
1851 chunk = chunk->next();
1852 }
1853 }
1854 return used;
1855 }
1857 void SpaceManager::locked_print_chunks_in_use_on(outputStream* st) const {
1859 Metachunk* small_chunk = chunks_in_use(SmallIndex);
1860 st->print_cr("SpaceManager: small chunk " PTR_FORMAT
1861 " free " SIZE_FORMAT,
1862 small_chunk,
1863 small_chunk->free_word_size());
1865 Metachunk* medium_chunk = chunks_in_use(MediumIndex);
1866 st->print("medium chunk " PTR_FORMAT, medium_chunk);
1867 Metachunk* tail = current_chunk();
1868 st->print_cr(" current chunk " PTR_FORMAT, tail);
1870 Metachunk* head = chunks_in_use(HumongousIndex);
1871 st->print_cr("humongous chunk " PTR_FORMAT, head);
1873 vs_list()->chunk_manager()->locked_print_free_chunks(st);
1874 vs_list()->chunk_manager()->locked_print_sum_free_chunks(st);
1875 }
1877 size_t SpaceManager::calc_chunk_size(size_t word_size) {
1879 // Decide between a small chunk and a medium chunk. Up to
1880 // _small_chunk_limit small chunks can be allocated but
1881 // once a medium chunk has been allocated, no more small
1882 // chunks will be allocated.
1883 size_t chunk_word_size;
1884 if (chunks_in_use(MediumIndex) == NULL &&
1885 (!has_small_chunk_limit() ||
1886 sum_count_in_chunks_in_use(SmallIndex) < _small_chunk_limit)) {
1887 chunk_word_size = (size_t) SpaceManager::SmallChunk;
1888 if (word_size + Metachunk::overhead() > SpaceManager::SmallChunk) {
1889 chunk_word_size = MediumChunk;
1890 }
1891 } else {
1892 chunk_word_size = MediumChunk;
1893 }
1895 // Might still need a humongous chunk
1896 chunk_word_size =
1897 MAX2((size_t) chunk_word_size, word_size + Metachunk::overhead());
1899 if (TraceMetadataHumongousAllocation &&
1900 SpaceManager::is_humongous(word_size)) {
1901 gclog_or_tty->print_cr("Metadata humongous allocation:");
1902 gclog_or_tty->print_cr(" word_size " PTR_FORMAT, word_size);
1903 gclog_or_tty->print_cr(" chunk_word_size " PTR_FORMAT,
1904 chunk_word_size);
1905 gclog_or_tty->print_cr(" chunk overhead " PTR_FORMAT,
1906 Metachunk::overhead());
1907 }
1908 return chunk_word_size;
1909 }
1911 MetaWord* SpaceManager::grow_and_allocate(size_t word_size) {
1912 assert(vs_list()->current_virtual_space() != NULL,
1913 "Should have been set");
1914 assert(current_chunk() == NULL ||
1915 current_chunk()->allocate(word_size) == NULL,
1916 "Don't need to expand");
1917 MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
1919 if (TraceMetadataChunkAllocation && Verbose) {
1920 gclog_or_tty->print_cr("SpaceManager::grow_and_allocate for " SIZE_FORMAT
1921 " words " SIZE_FORMAT " space left",
1922 word_size, current_chunk() != NULL ?
1923 current_chunk()->free_word_size() : 0);
1924 }
1926 // Get another chunk out of the virtual space
1927 size_t grow_chunks_by_words = calc_chunk_size(word_size);
1928 Metachunk* next = vs_list()->get_new_chunk(word_size, grow_chunks_by_words);
1930 // If a chunk was available, add it to the in-use chunk list
1931 // and do an allocation from it.
1932 if (next != NULL) {
1933 Metadebug::deallocate_chunk_a_lot(this, grow_chunks_by_words);
1934 // Add to this manager's list of chunks in use.
1935 add_chunk(next, false);
1936 return next->allocate(word_size);
1937 }
1938 return NULL;
1939 }
1941 void SpaceManager::print_on(outputStream* st) const {
1943 for (ChunkIndex i = SmallIndex;
1944 i < NumberOfInUseLists ;
1945 i = next_chunk_index(i) ) {
1946 st->print_cr(" chunks_in_use " PTR_FORMAT " chunk size " PTR_FORMAT,
1947 chunks_in_use(i),
1948 chunks_in_use(i) == NULL ? 0 : chunks_in_use(i)->word_size());
1949 }
1950 st->print_cr(" waste: Small " SIZE_FORMAT " Medium " SIZE_FORMAT
1951 " Humongous " SIZE_FORMAT,
1952 sum_waste_in_chunks_in_use(SmallIndex),
1953 sum_waste_in_chunks_in_use(MediumIndex),
1954 sum_waste_in_chunks_in_use(HumongousIndex));
1955 // block free lists
1956 if (block_freelists() != NULL) {
1957 st->print_cr("total in block free lists " SIZE_FORMAT,
1958 block_freelists()->total_size());
1959 }
1960 }
1962 SpaceManager::SpaceManager(Mutex* lock, VirtualSpaceList* vs_list) :
1963 _vs_list(vs_list),
1964 _allocation_total(0),
1965 _lock(lock) {
1966 Metadebug::init_allocation_fail_alot_count();
1967 for (ChunkIndex i = SmallIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1968 _chunks_in_use[i] = NULL;
1969 }
1970 _current_chunk = NULL;
1971 if (TraceMetadataChunkAllocation && Verbose) {
1972 gclog_or_tty->print_cr("SpaceManager(): " PTR_FORMAT, this);
1973 }
1974 }
1976 SpaceManager::~SpaceManager() {
1977 MutexLockerEx fcl(SpaceManager::expand_lock(),
1978 Mutex::_no_safepoint_check_flag);
1980 ChunkManager* chunk_manager = vs_list()->chunk_manager();
1982 chunk_manager->slow_locked_verify();
1984 if (TraceMetadataChunkAllocation && Verbose) {
1985 gclog_or_tty->print_cr("~SpaceManager(): " PTR_FORMAT, this);
1986 locked_print_chunks_in_use_on(gclog_or_tty);
1987 }
1989 // Have to update before the chunks_in_use lists are emptied
1990 // below.
1991 chunk_manager->inc_free_chunks_total(sum_capacity_in_chunks_in_use(),
1992 sum_count_in_chunks_in_use());
1994 #ifdef ASSERT
1995 // Mangle freed memory.
1996 mangle_freed_chunks();
1997 #endif // ASSERT
1999 // Add all the chunks in use by this space manager
2000 // to the global list of free chunks.
2002 // Small chunks. There is one _current_chunk for each
2003 // Metaspace. It could point to a small or medium chunk.
2004 // Rather than determine which it is, follow the list of
2005 // small chunks to add them to the free list
2006 Metachunk* small_chunk = chunks_in_use(SmallIndex);
2007 chunk_manager->free_small_chunks()->add_at_head(small_chunk);
2008 set_chunks_in_use(SmallIndex, NULL);
2010 // After the small chunk are the medium chunks
2011 Metachunk* medium_chunk = chunks_in_use(MediumIndex);
2012 assert(medium_chunk == NULL ||
2013 medium_chunk->word_size() == MediumChunk,
2014 "Chunk is on the wrong list");
2016 if (medium_chunk != NULL) {
2017 Metachunk* head = medium_chunk;
2018 // If there is a medium chunk then the _current_chunk can only
2019 // point to the last medium chunk.
2020 Metachunk* tail = current_chunk();
2021 chunk_manager->free_medium_chunks()->add_at_head(head, tail);
2022 set_chunks_in_use(MediumIndex, NULL);
2023 }
2025 // Humongous chunks
2026 // Humongous chunks are never the current chunk.
2027 Metachunk* humongous_chunks = chunks_in_use(HumongousIndex);
2029 while (humongous_chunks != NULL) {
2030 #ifdef ASSERT
2031 humongous_chunks->set_is_free(true);
2032 #endif
2033 Metachunk* next_humongous_chunks = humongous_chunks->next();
2034 chunk_manager->humongous_dictionary()->return_chunk(humongous_chunks);
2035 humongous_chunks = next_humongous_chunks;
2036 }
2037 set_chunks_in_use(HumongousIndex, NULL);
2038 chunk_manager->slow_locked_verify();
2039 }
2041 void SpaceManager::deallocate(MetaWord* p, size_t word_size) {
2042 assert_lock_strong(_lock);
2043 size_t min_size = TreeChunk<Metablock, FreeList>::min_size();
2044 assert(word_size >= min_size,
2045 err_msg("Should not deallocate dark matter " SIZE_FORMAT, word_size));
2046 block_freelists()->return_block(p, word_size);
2047 }
2049 // Adds a chunk to the list of chunks in use.
2050 void SpaceManager::add_chunk(Metachunk* new_chunk, bool make_current) {
2052 assert(new_chunk != NULL, "Should not be NULL");
2053 assert(new_chunk->next() == NULL, "Should not be on a list");
2055 new_chunk->reset_empty();
2057 // Find the correct list and and set the current
2058 // chunk for that list.
2059 switch (new_chunk->word_size()) {
2060 case SpaceManager::SmallChunk :
2061 if (chunks_in_use(SmallIndex) == NULL) {
2062 // First chunk to add to the list
2063 set_chunks_in_use(SmallIndex, new_chunk);
2064 } else {
2065 assert(current_chunk()->word_size() == SpaceManager::SmallChunk,
2066 err_msg( "Incorrect mix of sizes in chunk list "
2067 SIZE_FORMAT " new chunk " SIZE_FORMAT,
2068 current_chunk()->word_size(), new_chunk->word_size()));
2069 current_chunk()->set_next(new_chunk);
2070 }
2071 // Make current chunk
2072 set_current_chunk(new_chunk);
2073 break;
2074 case SpaceManager::MediumChunk :
2075 if (chunks_in_use(MediumIndex) == NULL) {
2076 // About to add the first medium chunk so teminate the
2077 // small chunk list. In general once medium chunks are
2078 // being added, we're past the need for small chunks.
2079 if (current_chunk() != NULL) {
2080 // Only a small chunk or the initial chunk could be
2081 // the current chunk if this is the first medium chunk.
2082 assert(current_chunk()->word_size() == SpaceManager::SmallChunk ||
2083 chunks_in_use(SmallIndex) == NULL,
2084 err_msg("Should be a small chunk or initial chunk, current chunk "
2085 SIZE_FORMAT " new chunk " SIZE_FORMAT,
2086 current_chunk()->word_size(), new_chunk->word_size()));
2087 current_chunk()->set_next(NULL);
2088 }
2089 // First chunk to add to the list
2090 set_chunks_in_use(MediumIndex, new_chunk);
2092 } else {
2093 // As a minimum the first medium chunk added would
2094 // have become the _current_chunk
2095 // so the _current_chunk has to be non-NULL here
2096 // (although not necessarily still the first medium chunk).
2097 assert(current_chunk()->word_size() == SpaceManager::MediumChunk,
2098 "A medium chunk should the current chunk");
2099 current_chunk()->set_next(new_chunk);
2100 }
2101 // Make current chunk
2102 set_current_chunk(new_chunk);
2103 break;
2104 default: {
2105 // For null class loader data and DumpSharedSpaces, the first chunk isn't
2106 // small, so small will be null. Link this first chunk as the current
2107 // chunk.
2108 if (make_current) {
2109 // Set as the current chunk but otherwise treat as a humongous chunk.
2110 set_current_chunk(new_chunk);
2111 }
2112 // Link at head. The _current_chunk only points to a humongous chunk for
2113 // the null class loader metaspace (class and data virtual space managers)
2114 // any humongous chunks so will not point to the tail
2115 // of the humongous chunks list.
2116 new_chunk->set_next(chunks_in_use(HumongousIndex));
2117 set_chunks_in_use(HumongousIndex, new_chunk);
2119 assert(new_chunk->word_size() > MediumChunk, "List inconsistency");
2120 }
2121 }
2123 assert(new_chunk->is_empty(), "Not ready for reuse");
2124 if (TraceMetadataChunkAllocation && Verbose) {
2125 gclog_or_tty->print("SpaceManager::add_chunk: %d) ",
2126 sum_count_in_chunks_in_use());
2127 new_chunk->print_on(gclog_or_tty);
2128 vs_list()->chunk_manager()->locked_print_free_chunks(tty);
2129 }
2130 }
2132 MetaWord* SpaceManager::allocate(size_t word_size) {
2133 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
2135 // If only the dictionary is going to be used (i.e., no
2136 // indexed free list), then there is a minimum size requirement.
2137 // MinChunkSize is a placeholder for the real minimum size JJJ
2138 size_t byte_size = word_size * BytesPerWord;
2140 size_t byte_size_with_overhead = byte_size + Metablock::overhead();
2142 size_t raw_bytes_size = MAX2(byte_size_with_overhead,
2143 Metablock::min_block_byte_size());
2144 raw_bytes_size = ARENA_ALIGN(raw_bytes_size);
2145 size_t raw_word_size = raw_bytes_size / BytesPerWord;
2146 assert(raw_word_size * BytesPerWord == raw_bytes_size, "Size problem");
2148 BlockFreelist* fl = block_freelists();
2149 MetaWord* p = NULL;
2150 // Allocation from the dictionary is expensive in the sense that
2151 // the dictionary has to be searched for a size. Don't allocate
2152 // from the dictionary until it starts to get fat. Is this
2153 // a reasonable policy? Maybe an skinny dictionary is fast enough
2154 // for allocations. Do some profiling. JJJ
2155 if (fl->total_size() > allocation_from_dictionary_limit) {
2156 p = fl->get_block(raw_word_size);
2157 }
2158 if (p == NULL) {
2159 p = allocate_work(raw_word_size);
2160 }
2161 Metadebug::deallocate_block_a_lot(this, raw_word_size);
2163 return p;
2164 }
2166 // Returns the address of spaced allocated for "word_size".
2167 // This methods does not know about blocks (Metablocks)
2168 MetaWord* SpaceManager::allocate_work(size_t word_size) {
2169 assert_lock_strong(_lock);
2170 #ifdef ASSERT
2171 if (Metadebug::test_metadata_failure()) {
2172 return NULL;
2173 }
2174 #endif
2175 // Is there space in the current chunk?
2176 MetaWord* result = NULL;
2178 // For DumpSharedSpaces, only allocate out of the current chunk which is
2179 // never null because we gave it the size we wanted. Caller reports out
2180 // of memory if this returns null.
2181 if (DumpSharedSpaces) {
2182 assert(current_chunk() != NULL, "should never happen");
2183 inc_allocation_total(word_size);
2184 return current_chunk()->allocate(word_size); // caller handles null result
2185 }
2186 if (current_chunk() != NULL) {
2187 result = current_chunk()->allocate(word_size);
2188 }
2190 if (result == NULL) {
2191 result = grow_and_allocate(word_size);
2192 }
2193 if (result > 0) {
2194 inc_allocation_total(word_size);
2195 assert(result != (MetaWord*) chunks_in_use(MediumIndex),
2196 "Head of the list is being allocated");
2197 }
2199 return result;
2200 }
2202 void SpaceManager::verify() {
2203 // If there are blocks in the dictionary, then
2204 // verfication of chunks does not work since
2205 // being in the dictionary alters a chunk.
2206 if (block_freelists()->total_size() == 0) {
2207 // Skip the small chunks because their next link points to
2208 // medium chunks. This is because the small chunk is the
2209 // current chunk (for allocations) until it is full and the
2210 // the addition of the next chunk does not NULL the next
2211 // like of the small chunk.
2212 for (ChunkIndex i = MediumIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2213 Metachunk* curr = chunks_in_use(i);
2214 while (curr != NULL) {
2215 curr->verify();
2216 curr = curr->next();
2217 }
2218 }
2219 }
2220 }
2222 #ifdef ASSERT
2223 void SpaceManager::verify_allocation_total() {
2224 #if 0
2225 // Verification is only guaranteed at a safepoint.
2226 if (SafepointSynchronize::is_at_safepoint()) {
2227 gclog_or_tty->print_cr("Chunk " PTR_FORMAT " allocation_total " SIZE_FORMAT
2228 " sum_used_in_chunks_in_use " SIZE_FORMAT,
2229 this,
2230 allocation_total(),
2231 sum_used_in_chunks_in_use());
2232 }
2233 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
2234 assert(allocation_total() == sum_used_in_chunks_in_use(),
2235 err_msg("allocation total is not consistent %d vs %d",
2236 allocation_total(), sum_used_in_chunks_in_use()));
2237 #endif
2238 }
2240 #endif
2242 void SpaceManager::dump(outputStream* const out) const {
2243 size_t curr_total = 0;
2244 size_t waste = 0;
2245 uint i = 0;
2246 size_t used = 0;
2247 size_t capacity = 0;
2249 // Add up statistics for all chunks in this SpaceManager.
2250 for (ChunkIndex index = SmallIndex;
2251 index < NumberOfInUseLists;
2252 index = next_chunk_index(index)) {
2253 for (Metachunk* curr = chunks_in_use(index);
2254 curr != NULL;
2255 curr = curr->next()) {
2256 out->print("%d) ", i++);
2257 curr->print_on(out);
2258 if (TraceMetadataChunkAllocation && Verbose) {
2259 block_freelists()->print_on(out);
2260 }
2261 curr_total += curr->word_size();
2262 used += curr->used_word_size();
2263 capacity += curr->capacity_word_size();
2264 waste += curr->free_word_size() + curr->overhead();;
2265 }
2266 }
2268 size_t free = current_chunk()->free_word_size();
2269 // Free space isn't wasted.
2270 waste -= free;
2272 out->print_cr("total of all chunks " SIZE_FORMAT " used " SIZE_FORMAT
2273 " free " SIZE_FORMAT " capacity " SIZE_FORMAT
2274 " waste " SIZE_FORMAT, curr_total, used, free, capacity, waste);
2275 }
2277 #ifdef ASSERT
2278 void SpaceManager::mangle_freed_chunks() {
2279 for (ChunkIndex index = SmallIndex;
2280 index < NumberOfInUseLists;
2281 index = next_chunk_index(index)) {
2282 for (Metachunk* curr = chunks_in_use(index);
2283 curr != NULL;
2284 curr = curr->next()) {
2285 // Try to detect incorrectly terminated small chunk
2286 // list.
2287 assert(index == MediumIndex || curr != chunks_in_use(MediumIndex),
2288 err_msg("Mangling medium chunks in small chunks? "
2289 "curr " PTR_FORMAT " medium list " PTR_FORMAT,
2290 curr, chunks_in_use(MediumIndex)));
2291 curr->mangle();
2292 }
2293 }
2294 }
2295 #endif // ASSERT
2298 // MetaspaceAux
2300 size_t MetaspaceAux::used_in_bytes(Metaspace::MetadataType mdtype) {
2301 size_t used = 0;
2302 ClassLoaderDataGraphMetaspaceIterator iter;
2303 while (iter.repeat()) {
2304 Metaspace* msp = iter.get_next();
2305 // Sum allocation_total for each metaspace
2306 if (msp != NULL) {
2307 used += msp->used_words(mdtype);
2308 }
2309 }
2310 return used * BytesPerWord;
2311 }
2313 size_t MetaspaceAux::free_in_bytes(Metaspace::MetadataType mdtype) {
2314 size_t free = 0;
2315 ClassLoaderDataGraphMetaspaceIterator iter;
2316 while (iter.repeat()) {
2317 Metaspace* msp = iter.get_next();
2318 if (msp != NULL) {
2319 free += msp->free_words(mdtype);
2320 }
2321 }
2322 return free * BytesPerWord;
2323 }
2325 // The total words available for metadata allocation. This
2326 // uses Metaspace capacity_words() which is the total words
2327 // in chunks allocated for a Metaspace.
2328 size_t MetaspaceAux::capacity_in_bytes(Metaspace::MetadataType mdtype) {
2329 size_t capacity = free_chunks_total(mdtype);
2330 ClassLoaderDataGraphMetaspaceIterator iter;
2331 while (iter.repeat()) {
2332 Metaspace* msp = iter.get_next();
2333 if (msp != NULL) {
2334 capacity += msp->capacity_words(mdtype);
2335 }
2336 }
2337 return capacity * BytesPerWord;
2338 }
2340 size_t MetaspaceAux::reserved_in_bytes(Metaspace::MetadataType mdtype) {
2341 size_t reserved = (mdtype == Metaspace::ClassType) ?
2342 Metaspace::class_space_list()->virtual_space_total() :
2343 Metaspace::space_list()->virtual_space_total();
2344 return reserved * BytesPerWord;
2345 }
2347 size_t MetaspaceAux::min_chunk_size() { return SpaceManager::MediumChunk; }
2349 size_t MetaspaceAux::free_chunks_total(Metaspace::MetadataType mdtype) {
2350 ChunkManager* chunk = (mdtype == Metaspace::ClassType) ?
2351 Metaspace::class_space_list()->chunk_manager() :
2352 Metaspace::space_list()->chunk_manager();
2353 chunk->slow_verify();
2354 return chunk->free_chunks_total();
2355 }
2357 size_t MetaspaceAux::free_chunks_total_in_bytes(Metaspace::MetadataType mdtype) {
2358 return free_chunks_total(mdtype) * BytesPerWord;
2359 }
2361 void MetaspaceAux::print_metaspace_change(size_t prev_metadata_used) {
2362 gclog_or_tty->print(", [Metaspace:");
2363 if (PrintGCDetails && Verbose) {
2364 gclog_or_tty->print(" " SIZE_FORMAT
2365 "->" SIZE_FORMAT
2366 "(" SIZE_FORMAT "/" SIZE_FORMAT ")",
2367 prev_metadata_used,
2368 used_in_bytes(),
2369 capacity_in_bytes(),
2370 reserved_in_bytes());
2371 } else {
2372 gclog_or_tty->print(" " SIZE_FORMAT "K"
2373 "->" SIZE_FORMAT "K"
2374 "(" SIZE_FORMAT "K/" SIZE_FORMAT "K)",
2375 prev_metadata_used / K,
2376 used_in_bytes()/ K,
2377 capacity_in_bytes()/K,
2378 reserved_in_bytes()/ K);
2379 }
2381 gclog_or_tty->print("]");
2382 }
2384 // This is printed when PrintGCDetails
2385 void MetaspaceAux::print_on(outputStream* out) {
2386 Metaspace::MetadataType ct = Metaspace::ClassType;
2387 Metaspace::MetadataType nct = Metaspace::NonClassType;
2389 out->print_cr(" Metaspace total "
2390 SIZE_FORMAT "K, used " SIZE_FORMAT "K,"
2391 " reserved " SIZE_FORMAT "K",
2392 capacity_in_bytes()/K, used_in_bytes()/K, reserved_in_bytes()/K);
2393 out->print_cr(" data space "
2394 SIZE_FORMAT "K, used " SIZE_FORMAT "K,"
2395 " reserved " SIZE_FORMAT "K",
2396 capacity_in_bytes(nct)/K, used_in_bytes(nct)/K, reserved_in_bytes(nct)/K);
2397 out->print_cr(" class space "
2398 SIZE_FORMAT "K, used " SIZE_FORMAT "K,"
2399 " reserved " SIZE_FORMAT "K",
2400 capacity_in_bytes(ct)/K, used_in_bytes(ct)/K, reserved_in_bytes(ct)/K);
2401 }
2403 // Print information for class space and data space separately.
2404 // This is almost the same as above.
2405 void MetaspaceAux::print_on(outputStream* out, Metaspace::MetadataType mdtype) {
2406 size_t free_chunks_capacity_bytes = free_chunks_total_in_bytes(mdtype);
2407 size_t capacity_bytes = capacity_in_bytes(mdtype);
2408 size_t used_bytes = used_in_bytes(mdtype);
2409 size_t free_bytes = free_in_bytes(mdtype);
2410 size_t used_and_free = used_bytes + free_bytes +
2411 free_chunks_capacity_bytes;
2412 out->print_cr(" Chunk accounting: used in chunks " SIZE_FORMAT
2413 "K + unused in chunks " SIZE_FORMAT "K + "
2414 " capacity in free chunks " SIZE_FORMAT "K = " SIZE_FORMAT
2415 "K capacity in allocated chunks " SIZE_FORMAT "K",
2416 used_bytes / K,
2417 free_bytes / K,
2418 free_chunks_capacity_bytes / K,
2419 used_and_free / K,
2420 capacity_bytes / K);
2421 assert(used_and_free == capacity_bytes, "Accounting is wrong");
2422 }
2424 // Print total fragmentation for class and data metaspaces separately
2425 void MetaspaceAux::print_waste(outputStream* out) {
2427 size_t small_waste = 0, medium_waste = 0, large_waste = 0;
2428 size_t cls_small_waste = 0, cls_medium_waste = 0, cls_large_waste = 0;
2430 ClassLoaderDataGraphMetaspaceIterator iter;
2431 while (iter.repeat()) {
2432 Metaspace* msp = iter.get_next();
2433 if (msp != NULL) {
2434 small_waste += msp->vsm()->sum_waste_in_chunks_in_use(SmallIndex);
2435 medium_waste += msp->vsm()->sum_waste_in_chunks_in_use(MediumIndex);
2436 large_waste += msp->vsm()->sum_waste_in_chunks_in_use(HumongousIndex);
2438 cls_small_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SmallIndex);
2439 cls_medium_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(MediumIndex);
2440 cls_large_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(HumongousIndex);
2441 }
2442 }
2443 out->print_cr("Total fragmentation waste (words) doesn't count free space");
2444 out->print(" data: small " SIZE_FORMAT " medium " SIZE_FORMAT,
2445 small_waste, medium_waste);
2446 out->print_cr(" class: small " SIZE_FORMAT, cls_small_waste);
2447 }
2449 // Dump global metaspace things from the end of ClassLoaderDataGraph
2450 void MetaspaceAux::dump(outputStream* out) {
2451 out->print_cr("All Metaspace:");
2452 out->print("data space: "); print_on(out, Metaspace::NonClassType);
2453 out->print("class space: "); print_on(out, Metaspace::ClassType);
2454 print_waste(out);
2455 }
2457 void MetaspaceAux::verify_free_chunks() {
2458 Metaspace::space_list()->chunk_manager()->verify();
2459 Metaspace::class_space_list()->chunk_manager()->verify();
2460 }
2462 // Metaspace methods
2464 size_t Metaspace::_first_chunk_word_size = 0;
2466 Metaspace::Metaspace(Mutex* lock, size_t word_size) {
2467 initialize(lock, word_size);
2468 }
2470 Metaspace::Metaspace(Mutex* lock) {
2471 initialize(lock);
2472 }
2474 Metaspace::~Metaspace() {
2475 delete _vsm;
2476 delete _class_vsm;
2477 }
2479 VirtualSpaceList* Metaspace::_space_list = NULL;
2480 VirtualSpaceList* Metaspace::_class_space_list = NULL;
2482 #define VIRTUALSPACEMULTIPLIER 2
2484 void Metaspace::global_initialize() {
2485 // Initialize the alignment for shared spaces.
2486 int max_alignment = os::vm_page_size();
2487 MetaspaceShared::set_max_alignment(max_alignment);
2489 if (DumpSharedSpaces) {
2490 SharedReadOnlySize = align_size_up(SharedReadOnlySize, max_alignment);
2491 SharedReadWriteSize = align_size_up(SharedReadWriteSize, max_alignment);
2492 SharedMiscDataSize = align_size_up(SharedMiscDataSize, max_alignment);
2493 SharedMiscCodeSize = align_size_up(SharedMiscCodeSize, max_alignment);
2495 // Initialize with the sum of the shared space sizes. The read-only
2496 // and read write metaspace chunks will be allocated out of this and the
2497 // remainder is the misc code and data chunks.
2498 size_t total = align_size_up(SharedReadOnlySize + SharedReadWriteSize +
2499 SharedMiscDataSize + SharedMiscCodeSize,
2500 os::vm_allocation_granularity());
2501 size_t word_size = total/wordSize;
2502 _space_list = new VirtualSpaceList(word_size);
2503 } else {
2504 // If using shared space, open the file that contains the shared space
2505 // and map in the memory before initializing the rest of metaspace (so
2506 // the addresses don't conflict)
2507 if (UseSharedSpaces) {
2508 FileMapInfo* mapinfo = new FileMapInfo();
2509 memset(mapinfo, 0, sizeof(FileMapInfo));
2511 // Open the shared archive file, read and validate the header. If
2512 // initialization fails, shared spaces [UseSharedSpaces] are
2513 // disabled and the file is closed.
2514 // Map in spaces now also
2515 if (mapinfo->initialize() && MetaspaceShared::map_shared_spaces(mapinfo)) {
2516 FileMapInfo::set_current_info(mapinfo);
2517 } else {
2518 assert(!mapinfo->is_open() && !UseSharedSpaces,
2519 "archive file not closed or shared spaces not disabled.");
2520 }
2521 }
2523 // Initialize this before initializing the VirtualSpaceList
2524 _first_chunk_word_size = InitialBootClassLoaderMetaspaceSize / BytesPerWord;
2525 // Arbitrarily set the initial virtual space to a multiple
2526 // of the boot class loader size.
2527 size_t word_size = VIRTUALSPACEMULTIPLIER * Metaspace::first_chunk_word_size();
2528 // Initialize the list of virtual spaces.
2529 _space_list = new VirtualSpaceList(word_size);
2530 }
2531 }
2533 // For UseCompressedKlassPointers the class space is reserved as a piece of the
2534 // Java heap because the compression algorithm is the same for each. The
2535 // argument passed in is at the top of the compressed space
2536 void Metaspace::initialize_class_space(ReservedSpace rs) {
2537 // The reserved space size may be bigger because of alignment, esp with UseLargePages
2538 assert(rs.size() >= ClassMetaspaceSize, err_msg("%d != %d", rs.size(), ClassMetaspaceSize));
2539 _class_space_list = new VirtualSpaceList(rs);
2540 }
2543 void Metaspace::initialize(Mutex* lock, size_t initial_size) {
2544 // Use SmallChunk size if not specified. If specified, use this size for
2545 // the data metaspace.
2546 size_t word_size;
2547 size_t class_word_size;
2548 if (initial_size == 0) {
2549 word_size = (size_t) SpaceManager::SmallChunk;
2550 class_word_size = (size_t) SpaceManager::SmallChunk;
2551 } else {
2552 word_size = initial_size;
2553 // Make the first class chunk bigger than a medium chunk so it's not put
2554 // on the medium chunk list. The next chunk will be small and progress
2555 // from there. This size calculated by -version.
2556 class_word_size = MIN2((size_t)SpaceManager::MediumChunk*5,
2557 (ClassMetaspaceSize/BytesPerWord)*2);
2558 }
2560 assert(space_list() != NULL,
2561 "Metadata VirtualSpaceList has not been initialized");
2563 _vsm = new SpaceManager(lock, space_list());
2564 if (_vsm == NULL) {
2565 return;
2566 }
2568 assert(class_space_list() != NULL,
2569 "Class VirtualSpaceList has not been initialized");
2571 // Allocate SpaceManager for classes.
2572 _class_vsm = new SpaceManager(lock, class_space_list());
2573 if (_class_vsm == NULL) {
2574 return;
2575 }
2577 MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
2579 // Allocate chunk for metadata objects
2580 Metachunk* new_chunk =
2581 space_list()->current_virtual_space()->get_chunk_vs_with_expand(word_size);
2582 assert(!DumpSharedSpaces || new_chunk != NULL, "should have enough space for both chunks");
2583 if (new_chunk != NULL) {
2584 // Add to this manager's list of chunks in use and current_chunk().
2585 vsm()->add_chunk(new_chunk, true);
2586 }
2588 // Allocate chunk for class metadata objects
2589 Metachunk* class_chunk =
2590 class_space_list()->current_virtual_space()->get_chunk_vs_with_expand(class_word_size);
2591 if (class_chunk != NULL) {
2592 class_vsm()->add_chunk(class_chunk, true);
2593 }
2594 }
2596 MetaWord* Metaspace::allocate(size_t word_size, MetadataType mdtype) {
2597 // DumpSharedSpaces doesn't use class metadata area (yet)
2598 if (mdtype == ClassType && !DumpSharedSpaces) {
2599 return class_vsm()->allocate(word_size);
2600 } else {
2601 return vsm()->allocate(word_size);
2602 }
2603 }
2605 MetaWord* Metaspace::expand_and_allocate(size_t word_size, MetadataType mdtype) {
2606 MetaWord* result;
2607 MetaspaceGC::set_expand_after_GC(true);
2608 size_t before_inc = MetaspaceGC::capacity_until_GC();
2609 size_t delta_words = MetaspaceGC::delta_capacity_until_GC(word_size);
2610 MetaspaceGC::inc_capacity_until_GC(delta_words);
2611 if (PrintGCDetails && Verbose) {
2612 gclog_or_tty->print_cr("Increase capacity to GC from " SIZE_FORMAT
2613 " to " SIZE_FORMAT, before_inc, MetaspaceGC::capacity_until_GC());
2614 }
2616 result = allocate(word_size, mdtype);
2618 return result;
2619 }
2621 // Space allocated in the Metaspace. This may
2622 // be across several metadata virtual spaces.
2623 char* Metaspace::bottom() const {
2624 assert(DumpSharedSpaces, "only useful and valid for dumping shared spaces");
2625 return (char*)vsm()->current_chunk()->bottom();
2626 }
2628 size_t Metaspace::used_words(MetadataType mdtype) const {
2629 // return vsm()->allocation_total();
2630 return mdtype == ClassType ? class_vsm()->sum_used_in_chunks_in_use() :
2631 vsm()->sum_used_in_chunks_in_use(); // includes overhead!
2632 }
2634 size_t Metaspace::free_words(MetadataType mdtype) const {
2635 return mdtype == ClassType ? class_vsm()->sum_free_in_chunks_in_use() :
2636 vsm()->sum_free_in_chunks_in_use();
2637 }
2639 // Space capacity in the Metaspace. It includes
2640 // space in the list of chunks from which allocations
2641 // have been made. Don't include space in the global freelist and
2642 // in the space available in the dictionary which
2643 // is already counted in some chunk.
2644 size_t Metaspace::capacity_words(MetadataType mdtype) const {
2645 return mdtype == ClassType ? class_vsm()->sum_capacity_in_chunks_in_use() :
2646 vsm()->sum_capacity_in_chunks_in_use();
2647 }
2649 void Metaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) {
2650 if (SafepointSynchronize::is_at_safepoint()) {
2651 assert(Thread::current()->is_VM_thread(), "should be the VM thread");
2652 // Don't take Heap_lock
2653 MutexLocker ml(vsm()->lock());
2654 if (word_size < TreeChunk<Metablock, FreeList>::min_size()) {
2655 // Dark matter. Too small for dictionary.
2656 #ifdef ASSERT
2657 Copy::fill_to_words((HeapWord*)ptr, word_size, 0xf5f5f5f5);
2658 #endif
2659 return;
2660 }
2661 if (is_class) {
2662 class_vsm()->deallocate(ptr, word_size);
2663 } else {
2664 vsm()->deallocate(ptr, word_size);
2665 }
2666 } else {
2667 MutexLocker ml(vsm()->lock());
2669 if (word_size < TreeChunk<Metablock, FreeList>::min_size()) {
2670 // Dark matter. Too small for dictionary.
2671 #ifdef ASSERT
2672 Copy::fill_to_words((HeapWord*)ptr, word_size, 0xf5f5f5f5);
2673 #endif
2674 return;
2675 }
2676 if (is_class) {
2677 class_vsm()->deallocate(ptr, word_size);
2678 } else {
2679 vsm()->deallocate(ptr, word_size);
2680 }
2681 }
2682 }
2684 Metablock* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
2685 bool read_only, MetadataType mdtype, TRAPS) {
2686 if (HAS_PENDING_EXCEPTION) {
2687 assert(false, "Should not allocate with exception pending");
2688 return NULL; // caller does a CHECK_NULL too
2689 }
2691 // SSS: Should we align the allocations and make sure the sizes are aligned.
2692 MetaWord* result = NULL;
2694 assert(loader_data != NULL, "Should never pass around a NULL loader_data. "
2695 "ClassLoaderData::the_null_class_loader_data() should have been used.");
2696 // Allocate in metaspaces without taking out a lock, because it deadlocks
2697 // with the SymbolTable_lock. Dumping is single threaded for now. We'll have
2698 // to revisit this for application class data sharing.
2699 if (DumpSharedSpaces) {
2700 if (read_only) {
2701 result = loader_data->ro_metaspace()->allocate(word_size, NonClassType);
2702 } else {
2703 result = loader_data->rw_metaspace()->allocate(word_size, NonClassType);
2704 }
2705 if (result == NULL) {
2706 report_out_of_shared_space(read_only ? SharedReadOnly : SharedReadWrite);
2707 }
2708 return Metablock::initialize(result, word_size);
2709 }
2711 result = loader_data->metaspace_non_null()->allocate(word_size, mdtype);
2713 if (result == NULL) {
2714 // Try to clean out some memory and retry.
2715 result =
2716 Universe::heap()->collector_policy()->satisfy_failed_metadata_allocation(
2717 loader_data, word_size, mdtype);
2719 // If result is still null, we are out of memory.
2720 if (result == NULL) {
2721 // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
2722 report_java_out_of_memory("Metadata space");
2724 if (JvmtiExport::should_post_resource_exhausted()) {
2725 JvmtiExport::post_resource_exhausted(
2726 JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR,
2727 "Metadata space");
2728 }
2729 THROW_OOP_0(Universe::out_of_memory_error_perm_gen());
2730 }
2731 }
2732 return Metablock::initialize(result, word_size);
2733 }
2735 void Metaspace::print_on(outputStream* out) const {
2736 // Print both class virtual space counts and metaspace.
2737 if (Verbose) {
2738 vsm()->print_on(out);
2739 class_vsm()->print_on(out);
2740 }
2741 }
2743 bool Metaspace::contains(const void * ptr) {
2744 if (MetaspaceShared::is_in_shared_space(ptr)) {
2745 return true;
2746 }
2747 // This is checked while unlocked. As long as the virtualspaces are added
2748 // at the end, the pointer will be in one of them. The virtual spaces
2749 // aren't deleted presently. When they are, some sort of locking might
2750 // be needed. Note, locking this can cause inversion problems with the
2751 // caller in MetaspaceObj::is_metadata() function.
2752 return space_list()->contains(ptr) || class_space_list()->contains(ptr);
2753 }
2755 void Metaspace::verify() {
2756 vsm()->verify();
2757 class_vsm()->verify();
2758 }
2760 void Metaspace::dump(outputStream* const out) const {
2761 if (UseMallocOnly) {
2762 // Just print usage for now
2763 out->print_cr("usage %d", used_words(Metaspace::NonClassType));
2764 }
2765 out->print_cr("\nVirtual space manager: " INTPTR_FORMAT, vsm());
2766 vsm()->dump(out);
2767 out->print_cr("\nClass space manager: " INTPTR_FORMAT, class_vsm());
2768 class_vsm()->dump(out);
2769 }