Tue, 28 May 2013 16:36:19 -0700
8014912: Restore PrintSharedSpaces functionality after NPG
Summary: Added dumping of object sizes in CDS archive, sorted by MetaspaceObj::Type
Reviewed-by: coleenp, acorn
1 /*
2 * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24 #include "precompiled.hpp"
25 #include "gc_interface/collectedHeap.hpp"
26 #include "memory/binaryTreeDictionary.hpp"
27 #include "memory/freeList.hpp"
28 #include "memory/collectorPolicy.hpp"
29 #include "memory/filemap.hpp"
30 #include "memory/freeList.hpp"
31 #include "memory/metablock.hpp"
32 #include "memory/metachunk.hpp"
33 #include "memory/metaspace.hpp"
34 #include "memory/metaspaceShared.hpp"
35 #include "memory/resourceArea.hpp"
36 #include "memory/universe.hpp"
37 #include "runtime/globals.hpp"
38 #include "runtime/mutex.hpp"
39 #include "runtime/orderAccess.hpp"
40 #include "services/memTracker.hpp"
41 #include "utilities/copy.hpp"
42 #include "utilities/debug.hpp"
44 typedef BinaryTreeDictionary<Metablock, FreeList> BlockTreeDictionary;
45 typedef BinaryTreeDictionary<Metachunk, FreeList> ChunkTreeDictionary;
46 // Define this macro to enable slow integrity checking of
47 // the free chunk lists
48 const bool metaspace_slow_verify = false;
50 // Parameters for stress mode testing
51 const uint metadata_deallocate_a_lot_block = 10;
52 const uint metadata_deallocate_a_lock_chunk = 3;
53 size_t const allocation_from_dictionary_limit = 64 * K;
55 MetaWord* last_allocated = 0;
57 // Used in declarations in SpaceManager and ChunkManager
58 enum ChunkIndex {
59 ZeroIndex = 0,
60 SpecializedIndex = ZeroIndex,
61 SmallIndex = SpecializedIndex + 1,
62 MediumIndex = SmallIndex + 1,
63 HumongousIndex = MediumIndex + 1,
64 NumberOfFreeLists = 3,
65 NumberOfInUseLists = 4
66 };
68 enum ChunkSizes { // in words.
69 ClassSpecializedChunk = 128,
70 SpecializedChunk = 128,
71 ClassSmallChunk = 256,
72 SmallChunk = 512,
73 ClassMediumChunk = 1 * K,
74 MediumChunk = 8 * K,
75 HumongousChunkGranularity = 8
76 };
78 static ChunkIndex next_chunk_index(ChunkIndex i) {
79 assert(i < NumberOfInUseLists, "Out of bound");
80 return (ChunkIndex) (i+1);
81 }
83 // Originally _capacity_until_GC was set to MetaspaceSize here but
84 // the default MetaspaceSize before argument processing was being
85 // used which was not the desired value. See the code
86 // in should_expand() to see how the initialization is handled
87 // now.
88 size_t MetaspaceGC::_capacity_until_GC = 0;
89 bool MetaspaceGC::_expand_after_GC = false;
90 uint MetaspaceGC::_shrink_factor = 0;
91 bool MetaspaceGC::_should_concurrent_collect = false;
93 // Blocks of space for metadata are allocated out of Metachunks.
94 //
95 // Metachunk are allocated out of MetadataVirtualspaces and once
96 // allocated there is no explicit link between a Metachunk and
97 // the MetadataVirtualspaces from which it was allocated.
98 //
99 // Each SpaceManager maintains a
100 // list of the chunks it is using and the current chunk. The current
101 // chunk is the chunk from which allocations are done. Space freed in
102 // a chunk is placed on the free list of blocks (BlockFreelist) and
103 // reused from there.
105 typedef class FreeList<Metachunk> ChunkList;
107 // Manages the global free lists of chunks.
108 // Has three lists of free chunks, and a total size and
109 // count that includes all three
111 class ChunkManager VALUE_OBJ_CLASS_SPEC {
113 // Free list of chunks of different sizes.
114 // SpecializedChunk
115 // SmallChunk
116 // MediumChunk
117 // HumongousChunk
118 ChunkList _free_chunks[NumberOfFreeLists];
121 // HumongousChunk
122 ChunkTreeDictionary _humongous_dictionary;
124 // ChunkManager in all lists of this type
125 size_t _free_chunks_total;
126 size_t _free_chunks_count;
128 void dec_free_chunks_total(size_t v) {
129 assert(_free_chunks_count > 0 &&
130 _free_chunks_total > 0,
131 "About to go negative");
132 Atomic::add_ptr(-1, &_free_chunks_count);
133 jlong minus_v = (jlong) - (jlong) v;
134 Atomic::add_ptr(minus_v, &_free_chunks_total);
135 }
137 // Debug support
139 size_t sum_free_chunks();
140 size_t sum_free_chunks_count();
142 void locked_verify_free_chunks_total();
143 void slow_locked_verify_free_chunks_total() {
144 if (metaspace_slow_verify) {
145 locked_verify_free_chunks_total();
146 }
147 }
148 void locked_verify_free_chunks_count();
149 void slow_locked_verify_free_chunks_count() {
150 if (metaspace_slow_verify) {
151 locked_verify_free_chunks_count();
152 }
153 }
154 void verify_free_chunks_count();
156 public:
158 ChunkManager() : _free_chunks_total(0), _free_chunks_count(0) {}
160 // add or delete (return) a chunk to the global freelist.
161 Metachunk* chunk_freelist_allocate(size_t word_size);
162 void chunk_freelist_deallocate(Metachunk* chunk);
164 // Map a size to a list index assuming that there are lists
165 // for special, small, medium, and humongous chunks.
166 static ChunkIndex list_index(size_t size);
168 // Remove the chunk from its freelist. It is
169 // expected to be on one of the _free_chunks[] lists.
170 void remove_chunk(Metachunk* chunk);
172 // Add the simple linked list of chunks to the freelist of chunks
173 // of type index.
174 void return_chunks(ChunkIndex index, Metachunk* chunks);
176 // Total of the space in the free chunks list
177 size_t free_chunks_total();
178 size_t free_chunks_total_in_bytes();
180 // Number of chunks in the free chunks list
181 size_t free_chunks_count();
183 void inc_free_chunks_total(size_t v, size_t count = 1) {
184 Atomic::add_ptr(count, &_free_chunks_count);
185 Atomic::add_ptr(v, &_free_chunks_total);
186 }
187 ChunkTreeDictionary* humongous_dictionary() {
188 return &_humongous_dictionary;
189 }
191 ChunkList* free_chunks(ChunkIndex index);
193 // Returns the list for the given chunk word size.
194 ChunkList* find_free_chunks_list(size_t word_size);
196 // Add and remove from a list by size. Selects
197 // list based on size of chunk.
198 void free_chunks_put(Metachunk* chuck);
199 Metachunk* free_chunks_get(size_t chunk_word_size);
201 // Debug support
202 void verify();
203 void slow_verify() {
204 if (metaspace_slow_verify) {
205 verify();
206 }
207 }
208 void locked_verify();
209 void slow_locked_verify() {
210 if (metaspace_slow_verify) {
211 locked_verify();
212 }
213 }
214 void verify_free_chunks_total();
216 void locked_print_free_chunks(outputStream* st);
217 void locked_print_sum_free_chunks(outputStream* st);
219 void print_on(outputStream* st);
220 };
222 // Used to manage the free list of Metablocks (a block corresponds
223 // to the allocation of a quantum of metadata).
224 class BlockFreelist VALUE_OBJ_CLASS_SPEC {
225 BlockTreeDictionary* _dictionary;
226 static Metablock* initialize_free_chunk(MetaWord* p, size_t word_size);
228 // Accessors
229 BlockTreeDictionary* dictionary() const { return _dictionary; }
231 public:
232 BlockFreelist();
233 ~BlockFreelist();
235 // Get and return a block to the free list
236 MetaWord* get_block(size_t word_size);
237 void return_block(MetaWord* p, size_t word_size);
239 size_t total_size() {
240 if (dictionary() == NULL) {
241 return 0;
242 } else {
243 return dictionary()->total_size();
244 }
245 }
247 void print_on(outputStream* st) const;
248 };
250 class VirtualSpaceNode : public CHeapObj<mtClass> {
251 friend class VirtualSpaceList;
253 // Link to next VirtualSpaceNode
254 VirtualSpaceNode* _next;
256 // total in the VirtualSpace
257 MemRegion _reserved;
258 ReservedSpace _rs;
259 VirtualSpace _virtual_space;
260 MetaWord* _top;
261 // count of chunks contained in this VirtualSpace
262 uintx _container_count;
264 // Convenience functions for logical bottom and end
265 MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); }
266 MetaWord* end() const { return (MetaWord*) _virtual_space.high(); }
268 // Convenience functions to access the _virtual_space
269 char* low() const { return virtual_space()->low(); }
270 char* high() const { return virtual_space()->high(); }
272 // The first Metachunk will be allocated at the bottom of the
273 // VirtualSpace
274 Metachunk* first_chunk() { return (Metachunk*) bottom(); }
276 void inc_container_count();
277 #ifdef ASSERT
278 uint container_count_slow();
279 #endif
281 public:
283 VirtualSpaceNode(size_t byte_size);
284 VirtualSpaceNode(ReservedSpace rs) : _top(NULL), _next(NULL), _rs(rs), _container_count(0) {}
285 ~VirtualSpaceNode();
287 // address of next available space in _virtual_space;
288 // Accessors
289 VirtualSpaceNode* next() { return _next; }
290 void set_next(VirtualSpaceNode* v) { _next = v; }
292 void set_reserved(MemRegion const v) { _reserved = v; }
293 void set_top(MetaWord* v) { _top = v; }
295 // Accessors
296 MemRegion* reserved() { return &_reserved; }
297 VirtualSpace* virtual_space() const { return (VirtualSpace*) &_virtual_space; }
299 // Returns true if "word_size" is available in the VirtualSpace
300 bool is_available(size_t word_size) { return _top + word_size <= end(); }
302 MetaWord* top() const { return _top; }
303 void inc_top(size_t word_size) { _top += word_size; }
305 uintx container_count() { return _container_count; }
306 void dec_container_count();
307 #ifdef ASSERT
308 void verify_container_count();
309 #endif
311 // used and capacity in this single entry in the list
312 size_t used_words_in_vs() const;
313 size_t capacity_words_in_vs() const;
314 size_t free_words_in_vs() const;
316 bool initialize();
318 // get space from the virtual space
319 Metachunk* take_from_committed(size_t chunk_word_size);
321 // Allocate a chunk from the virtual space and return it.
322 Metachunk* get_chunk_vs(size_t chunk_word_size);
323 Metachunk* get_chunk_vs_with_expand(size_t chunk_word_size);
325 // Expands/shrinks the committed space in a virtual space. Delegates
326 // to Virtualspace
327 bool expand_by(size_t words, bool pre_touch = false);
328 bool shrink_by(size_t words);
330 // In preparation for deleting this node, remove all the chunks
331 // in the node from any freelist.
332 void purge(ChunkManager* chunk_manager);
334 #ifdef ASSERT
335 // Debug support
336 static void verify_virtual_space_total();
337 static void verify_virtual_space_count();
338 void mangle();
339 #endif
341 void print_on(outputStream* st) const;
342 };
344 // byte_size is the size of the associated virtualspace.
345 VirtualSpaceNode::VirtualSpaceNode(size_t byte_size) : _top(NULL), _next(NULL), _rs(0), _container_count(0) {
346 // align up to vm allocation granularity
347 byte_size = align_size_up(byte_size, os::vm_allocation_granularity());
349 // This allocates memory with mmap. For DumpSharedspaces, try to reserve
350 // configurable address, generally at the top of the Java heap so other
351 // memory addresses don't conflict.
352 if (DumpSharedSpaces) {
353 char* shared_base = (char*)SharedBaseAddress;
354 _rs = ReservedSpace(byte_size, 0, false, shared_base, 0);
355 if (_rs.is_reserved()) {
356 assert(shared_base == 0 || _rs.base() == shared_base, "should match");
357 } else {
358 // Get a mmap region anywhere if the SharedBaseAddress fails.
359 _rs = ReservedSpace(byte_size);
360 }
361 MetaspaceShared::set_shared_rs(&_rs);
362 } else {
363 _rs = ReservedSpace(byte_size);
364 }
366 MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass);
367 }
369 void VirtualSpaceNode::purge(ChunkManager* chunk_manager) {
370 Metachunk* chunk = first_chunk();
371 Metachunk* invalid_chunk = (Metachunk*) top();
372 while (chunk < invalid_chunk ) {
373 assert(chunk->is_free(), "Should be marked free");
374 MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
375 chunk_manager->remove_chunk(chunk);
376 assert(chunk->next() == NULL &&
377 chunk->prev() == NULL,
378 "Was not removed from its list");
379 chunk = (Metachunk*) next;
380 }
381 }
383 #ifdef ASSERT
384 uint VirtualSpaceNode::container_count_slow() {
385 uint count = 0;
386 Metachunk* chunk = first_chunk();
387 Metachunk* invalid_chunk = (Metachunk*) top();
388 while (chunk < invalid_chunk ) {
389 MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
390 // Don't count the chunks on the free lists. Those are
391 // still part of the VirtualSpaceNode but not currently
392 // counted.
393 if (!chunk->is_free()) {
394 count++;
395 }
396 chunk = (Metachunk*) next;
397 }
398 return count;
399 }
400 #endif
402 // List of VirtualSpaces for metadata allocation.
403 // It has a _next link for singly linked list and a MemRegion
404 // for total space in the VirtualSpace.
405 class VirtualSpaceList : public CHeapObj<mtClass> {
406 friend class VirtualSpaceNode;
408 enum VirtualSpaceSizes {
409 VirtualSpaceSize = 256 * K
410 };
412 // Global list of virtual spaces
413 // Head of the list
414 VirtualSpaceNode* _virtual_space_list;
415 // virtual space currently being used for allocations
416 VirtualSpaceNode* _current_virtual_space;
417 // Free chunk list for all other metadata
418 ChunkManager _chunk_manager;
420 // Can this virtual list allocate >1 spaces? Also, used to determine
421 // whether to allocate unlimited small chunks in this virtual space
422 bool _is_class;
423 bool can_grow() const { return !is_class() || !UseCompressedKlassPointers; }
425 // Sum of space in all virtual spaces and number of virtual spaces
426 size_t _virtual_space_total;
427 size_t _virtual_space_count;
429 ~VirtualSpaceList();
431 VirtualSpaceNode* virtual_space_list() const { return _virtual_space_list; }
433 void set_virtual_space_list(VirtualSpaceNode* v) {
434 _virtual_space_list = v;
435 }
436 void set_current_virtual_space(VirtualSpaceNode* v) {
437 _current_virtual_space = v;
438 }
440 void link_vs(VirtualSpaceNode* new_entry, size_t vs_word_size);
442 // Get another virtual space and add it to the list. This
443 // is typically prompted by a failed attempt to allocate a chunk
444 // and is typically followed by the allocation of a chunk.
445 bool grow_vs(size_t vs_word_size);
447 public:
448 VirtualSpaceList(size_t word_size);
449 VirtualSpaceList(ReservedSpace rs);
451 size_t free_bytes();
453 Metachunk* get_new_chunk(size_t word_size,
454 size_t grow_chunks_by_words,
455 size_t medium_chunk_bunch);
457 // Get the first chunk for a Metaspace. Used for
458 // special cases such as the boot class loader, reflection
459 // class loader and anonymous class loader.
460 Metachunk* get_initialization_chunk(size_t word_size, size_t chunk_bunch);
462 VirtualSpaceNode* current_virtual_space() {
463 return _current_virtual_space;
464 }
466 ChunkManager* chunk_manager() { return &_chunk_manager; }
467 bool is_class() const { return _is_class; }
469 // Allocate the first virtualspace.
470 void initialize(size_t word_size);
472 size_t virtual_space_total() { return _virtual_space_total; }
474 void inc_virtual_space_total(size_t v);
475 void dec_virtual_space_total(size_t v);
476 void inc_virtual_space_count();
477 void dec_virtual_space_count();
479 // Unlink empty VirtualSpaceNodes and free it.
480 void purge();
482 // Used and capacity in the entire list of virtual spaces.
483 // These are global values shared by all Metaspaces
484 size_t capacity_words_sum();
485 size_t capacity_bytes_sum() { return capacity_words_sum() * BytesPerWord; }
486 size_t used_words_sum();
487 size_t used_bytes_sum() { return used_words_sum() * BytesPerWord; }
489 bool contains(const void *ptr);
491 void print_on(outputStream* st) const;
493 class VirtualSpaceListIterator : public StackObj {
494 VirtualSpaceNode* _virtual_spaces;
495 public:
496 VirtualSpaceListIterator(VirtualSpaceNode* virtual_spaces) :
497 _virtual_spaces(virtual_spaces) {}
499 bool repeat() {
500 return _virtual_spaces != NULL;
501 }
503 VirtualSpaceNode* get_next() {
504 VirtualSpaceNode* result = _virtual_spaces;
505 if (_virtual_spaces != NULL) {
506 _virtual_spaces = _virtual_spaces->next();
507 }
508 return result;
509 }
510 };
511 };
513 class Metadebug : AllStatic {
514 // Debugging support for Metaspaces
515 static int _deallocate_block_a_lot_count;
516 static int _deallocate_chunk_a_lot_count;
517 static int _allocation_fail_alot_count;
519 public:
520 static int deallocate_block_a_lot_count() {
521 return _deallocate_block_a_lot_count;
522 }
523 static void set_deallocate_block_a_lot_count(int v) {
524 _deallocate_block_a_lot_count = v;
525 }
526 static void inc_deallocate_block_a_lot_count() {
527 _deallocate_block_a_lot_count++;
528 }
529 static int deallocate_chunk_a_lot_count() {
530 return _deallocate_chunk_a_lot_count;
531 }
532 static void reset_deallocate_chunk_a_lot_count() {
533 _deallocate_chunk_a_lot_count = 1;
534 }
535 static void inc_deallocate_chunk_a_lot_count() {
536 _deallocate_chunk_a_lot_count++;
537 }
539 static void init_allocation_fail_alot_count();
540 #ifdef ASSERT
541 static bool test_metadata_failure();
542 #endif
544 static void deallocate_chunk_a_lot(SpaceManager* sm,
545 size_t chunk_word_size);
546 static void deallocate_block_a_lot(SpaceManager* sm,
547 size_t chunk_word_size);
549 };
551 int Metadebug::_deallocate_block_a_lot_count = 0;
552 int Metadebug::_deallocate_chunk_a_lot_count = 0;
553 int Metadebug::_allocation_fail_alot_count = 0;
555 // SpaceManager - used by Metaspace to handle allocations
556 class SpaceManager : public CHeapObj<mtClass> {
557 friend class Metaspace;
558 friend class Metadebug;
560 private:
562 // protects allocations and contains.
563 Mutex* const _lock;
565 // Type of metadata allocated.
566 Metaspace::MetadataType _mdtype;
568 // Chunk related size
569 size_t _medium_chunk_bunch;
571 // List of chunks in use by this SpaceManager. Allocations
572 // are done from the current chunk. The list is used for deallocating
573 // chunks when the SpaceManager is freed.
574 Metachunk* _chunks_in_use[NumberOfInUseLists];
575 Metachunk* _current_chunk;
577 // Virtual space where allocation comes from.
578 VirtualSpaceList* _vs_list;
580 // Number of small chunks to allocate to a manager
581 // If class space manager, small chunks are unlimited
582 static uint const _small_chunk_limit;
583 bool has_small_chunk_limit() { return !vs_list()->is_class(); }
585 // Sum of all space in allocated chunks
586 size_t _allocated_blocks_words;
588 // Sum of all allocated chunks
589 size_t _allocated_chunks_words;
590 size_t _allocated_chunks_count;
592 // Free lists of blocks are per SpaceManager since they
593 // are assumed to be in chunks in use by the SpaceManager
594 // and all chunks in use by a SpaceManager are freed when
595 // the class loader using the SpaceManager is collected.
596 BlockFreelist _block_freelists;
598 // protects virtualspace and chunk expansions
599 static const char* _expand_lock_name;
600 static const int _expand_lock_rank;
601 static Mutex* const _expand_lock;
603 private:
604 // Accessors
605 Metachunk* chunks_in_use(ChunkIndex index) const { return _chunks_in_use[index]; }
606 void set_chunks_in_use(ChunkIndex index, Metachunk* v) { _chunks_in_use[index] = v; }
608 BlockFreelist* block_freelists() const {
609 return (BlockFreelist*) &_block_freelists;
610 }
612 Metaspace::MetadataType mdtype() { return _mdtype; }
613 VirtualSpaceList* vs_list() const { return _vs_list; }
615 Metachunk* current_chunk() const { return _current_chunk; }
616 void set_current_chunk(Metachunk* v) {
617 _current_chunk = v;
618 }
620 Metachunk* find_current_chunk(size_t word_size);
622 // Add chunk to the list of chunks in use
623 void add_chunk(Metachunk* v, bool make_current);
625 Mutex* lock() const { return _lock; }
627 const char* chunk_size_name(ChunkIndex index) const;
629 protected:
630 void initialize();
632 public:
633 SpaceManager(Metaspace::MetadataType mdtype,
634 Mutex* lock,
635 VirtualSpaceList* vs_list);
636 ~SpaceManager();
638 enum ChunkMultiples {
639 MediumChunkMultiple = 4
640 };
642 // Accessors
643 size_t specialized_chunk_size() { return SpecializedChunk; }
644 size_t small_chunk_size() { return (size_t) vs_list()->is_class() ? ClassSmallChunk : SmallChunk; }
645 size_t medium_chunk_size() { return (size_t) vs_list()->is_class() ? ClassMediumChunk : MediumChunk; }
646 size_t medium_chunk_bunch() { return medium_chunk_size() * MediumChunkMultiple; }
648 size_t allocated_blocks_words() const { return _allocated_blocks_words; }
649 size_t allocated_blocks_bytes() const { return _allocated_blocks_words * BytesPerWord; }
650 size_t allocated_chunks_words() const { return _allocated_chunks_words; }
651 size_t allocated_chunks_count() const { return _allocated_chunks_count; }
653 bool is_humongous(size_t word_size) { return word_size > medium_chunk_size(); }
655 static Mutex* expand_lock() { return _expand_lock; }
657 // Increment the per Metaspace and global running sums for Metachunks
658 // by the given size. This is used when a Metachunk to added to
659 // the in-use list.
660 void inc_size_metrics(size_t words);
661 // Increment the per Metaspace and global running sums Metablocks by the given
662 // size. This is used when a Metablock is allocated.
663 void inc_used_metrics(size_t words);
664 // Delete the portion of the running sums for this SpaceManager. That is,
665 // the globals running sums for the Metachunks and Metablocks are
666 // decremented for all the Metachunks in-use by this SpaceManager.
667 void dec_total_from_size_metrics();
669 // Set the sizes for the initial chunks.
670 void get_initial_chunk_sizes(Metaspace::MetaspaceType type,
671 size_t* chunk_word_size,
672 size_t* class_chunk_word_size);
674 size_t sum_capacity_in_chunks_in_use() const;
675 size_t sum_used_in_chunks_in_use() const;
676 size_t sum_free_in_chunks_in_use() const;
677 size_t sum_waste_in_chunks_in_use() const;
678 size_t sum_waste_in_chunks_in_use(ChunkIndex index ) const;
680 size_t sum_count_in_chunks_in_use();
681 size_t sum_count_in_chunks_in_use(ChunkIndex i);
683 Metachunk* get_new_chunk(size_t word_size, size_t grow_chunks_by_words);
685 // Block allocation and deallocation.
686 // Allocates a block from the current chunk
687 MetaWord* allocate(size_t word_size);
689 // Helper for allocations
690 MetaWord* allocate_work(size_t word_size);
692 // Returns a block to the per manager freelist
693 void deallocate(MetaWord* p, size_t word_size);
695 // Based on the allocation size and a minimum chunk size,
696 // returned chunk size (for expanding space for chunk allocation).
697 size_t calc_chunk_size(size_t allocation_word_size);
699 // Called when an allocation from the current chunk fails.
700 // Gets a new chunk (may require getting a new virtual space),
701 // and allocates from that chunk.
702 MetaWord* grow_and_allocate(size_t word_size);
704 // debugging support.
706 void dump(outputStream* const out) const;
707 void print_on(outputStream* st) const;
708 void locked_print_chunks_in_use_on(outputStream* st) const;
710 void verify();
711 void verify_chunk_size(Metachunk* chunk);
712 NOT_PRODUCT(void mangle_freed_chunks();)
713 #ifdef ASSERT
714 void verify_allocated_blocks_words();
715 #endif
717 size_t get_raw_word_size(size_t word_size) {
718 // If only the dictionary is going to be used (i.e., no
719 // indexed free list), then there is a minimum size requirement.
720 // MinChunkSize is a placeholder for the real minimum size JJJ
721 size_t byte_size = word_size * BytesPerWord;
723 size_t byte_size_with_overhead = byte_size + Metablock::overhead();
725 size_t raw_bytes_size = MAX2(byte_size_with_overhead,
726 Metablock::min_block_byte_size());
727 raw_bytes_size = ARENA_ALIGN(raw_bytes_size);
728 size_t raw_word_size = raw_bytes_size / BytesPerWord;
729 assert(raw_word_size * BytesPerWord == raw_bytes_size, "Size problem");
731 return raw_word_size;
732 }
733 };
735 uint const SpaceManager::_small_chunk_limit = 4;
737 const char* SpaceManager::_expand_lock_name =
738 "SpaceManager chunk allocation lock";
739 const int SpaceManager::_expand_lock_rank = Monitor::leaf - 1;
740 Mutex* const SpaceManager::_expand_lock =
741 new Mutex(SpaceManager::_expand_lock_rank,
742 SpaceManager::_expand_lock_name,
743 Mutex::_allow_vm_block_flag);
745 void VirtualSpaceNode::inc_container_count() {
746 assert_lock_strong(SpaceManager::expand_lock());
747 _container_count++;
748 assert(_container_count == container_count_slow(),
749 err_msg("Inconsistency in countainer_count _container_count " SIZE_FORMAT
750 "container_count_slow() " SIZE_FORMAT,
751 _container_count, container_count_slow()));
752 }
754 void VirtualSpaceNode::dec_container_count() {
755 assert_lock_strong(SpaceManager::expand_lock());
756 _container_count--;
757 }
759 #ifdef ASSERT
760 void VirtualSpaceNode::verify_container_count() {
761 assert(_container_count == container_count_slow(),
762 err_msg("Inconsistency in countainer_count _container_count " SIZE_FORMAT
763 "container_count_slow() " SIZE_FORMAT, _container_count, container_count_slow()));
764 }
765 #endif
767 // BlockFreelist methods
769 BlockFreelist::BlockFreelist() : _dictionary(NULL) {}
771 BlockFreelist::~BlockFreelist() {
772 if (_dictionary != NULL) {
773 if (Verbose && TraceMetadataChunkAllocation) {
774 _dictionary->print_free_lists(gclog_or_tty);
775 }
776 delete _dictionary;
777 }
778 }
780 Metablock* BlockFreelist::initialize_free_chunk(MetaWord* p, size_t word_size) {
781 Metablock* block = (Metablock*) p;
782 block->set_word_size(word_size);
783 block->set_prev(NULL);
784 block->set_next(NULL);
786 return block;
787 }
789 void BlockFreelist::return_block(MetaWord* p, size_t word_size) {
790 Metablock* free_chunk = initialize_free_chunk(p, word_size);
791 if (dictionary() == NULL) {
792 _dictionary = new BlockTreeDictionary();
793 }
794 dictionary()->return_chunk(free_chunk);
795 }
797 MetaWord* BlockFreelist::get_block(size_t word_size) {
798 if (dictionary() == NULL) {
799 return NULL;
800 }
802 if (word_size < TreeChunk<Metablock, FreeList>::min_size()) {
803 // Dark matter. Too small for dictionary.
804 return NULL;
805 }
807 Metablock* free_block =
808 dictionary()->get_chunk(word_size, FreeBlockDictionary<Metablock>::exactly);
809 if (free_block == NULL) {
810 return NULL;
811 }
813 return (MetaWord*) free_block;
814 }
816 void BlockFreelist::print_on(outputStream* st) const {
817 if (dictionary() == NULL) {
818 return;
819 }
820 dictionary()->print_free_lists(st);
821 }
823 // VirtualSpaceNode methods
825 VirtualSpaceNode::~VirtualSpaceNode() {
826 _rs.release();
827 #ifdef ASSERT
828 size_t word_size = sizeof(*this) / BytesPerWord;
829 Copy::fill_to_words((HeapWord*) this, word_size, 0xf1f1f1f1);
830 #endif
831 }
833 size_t VirtualSpaceNode::used_words_in_vs() const {
834 return pointer_delta(top(), bottom(), sizeof(MetaWord));
835 }
837 // Space committed in the VirtualSpace
838 size_t VirtualSpaceNode::capacity_words_in_vs() const {
839 return pointer_delta(end(), bottom(), sizeof(MetaWord));
840 }
842 size_t VirtualSpaceNode::free_words_in_vs() const {
843 return pointer_delta(end(), top(), sizeof(MetaWord));
844 }
846 // Allocates the chunk from the virtual space only.
847 // This interface is also used internally for debugging. Not all
848 // chunks removed here are necessarily used for allocation.
849 Metachunk* VirtualSpaceNode::take_from_committed(size_t chunk_word_size) {
850 // Bottom of the new chunk
851 MetaWord* chunk_limit = top();
852 assert(chunk_limit != NULL, "Not safe to call this method");
854 if (!is_available(chunk_word_size)) {
855 if (TraceMetadataChunkAllocation) {
856 tty->print("VirtualSpaceNode::take_from_committed() not available %d words ", chunk_word_size);
857 // Dump some information about the virtual space that is nearly full
858 print_on(tty);
859 }
860 return NULL;
861 }
863 // Take the space (bump top on the current virtual space).
864 inc_top(chunk_word_size);
866 // Initialize the chunk
867 Metachunk* result = ::new (chunk_limit) Metachunk(chunk_word_size, this);
868 return result;
869 }
872 // Expand the virtual space (commit more of the reserved space)
873 bool VirtualSpaceNode::expand_by(size_t words, bool pre_touch) {
874 size_t bytes = words * BytesPerWord;
875 bool result = virtual_space()->expand_by(bytes, pre_touch);
876 if (TraceMetavirtualspaceAllocation && !result) {
877 gclog_or_tty->print_cr("VirtualSpaceNode::expand_by() failed "
878 "for byte size " SIZE_FORMAT, bytes);
879 virtual_space()->print();
880 }
881 return result;
882 }
884 // Shrink the virtual space (commit more of the reserved space)
885 bool VirtualSpaceNode::shrink_by(size_t words) {
886 size_t bytes = words * BytesPerWord;
887 virtual_space()->shrink_by(bytes);
888 return true;
889 }
891 // Add another chunk to the chunk list.
893 Metachunk* VirtualSpaceNode::get_chunk_vs(size_t chunk_word_size) {
894 assert_lock_strong(SpaceManager::expand_lock());
895 Metachunk* result = take_from_committed(chunk_word_size);
896 if (result != NULL) {
897 inc_container_count();
898 }
899 return result;
900 }
902 Metachunk* VirtualSpaceNode::get_chunk_vs_with_expand(size_t chunk_word_size) {
903 assert_lock_strong(SpaceManager::expand_lock());
905 Metachunk* new_chunk = get_chunk_vs(chunk_word_size);
907 if (new_chunk == NULL) {
908 // Only a small part of the virtualspace is committed when first
909 // allocated so committing more here can be expected.
910 size_t page_size_words = os::vm_page_size() / BytesPerWord;
911 size_t aligned_expand_vs_by_words = align_size_up(chunk_word_size,
912 page_size_words);
913 expand_by(aligned_expand_vs_by_words, false);
914 new_chunk = get_chunk_vs(chunk_word_size);
915 }
916 return new_chunk;
917 }
919 bool VirtualSpaceNode::initialize() {
921 if (!_rs.is_reserved()) {
922 return false;
923 }
925 // An allocation out of this Virtualspace that is larger
926 // than an initial commit size can waste that initial committed
927 // space.
928 size_t committed_byte_size = 0;
929 bool result = virtual_space()->initialize(_rs, committed_byte_size);
930 if (result) {
931 set_top((MetaWord*)virtual_space()->low());
932 set_reserved(MemRegion((HeapWord*)_rs.base(),
933 (HeapWord*)(_rs.base() + _rs.size())));
935 assert(reserved()->start() == (HeapWord*) _rs.base(),
936 err_msg("Reserved start was not set properly " PTR_FORMAT
937 " != " PTR_FORMAT, reserved()->start(), _rs.base()));
938 assert(reserved()->word_size() == _rs.size() / BytesPerWord,
939 err_msg("Reserved size was not set properly " SIZE_FORMAT
940 " != " SIZE_FORMAT, reserved()->word_size(),
941 _rs.size() / BytesPerWord));
942 }
944 return result;
945 }
947 void VirtualSpaceNode::print_on(outputStream* st) const {
948 size_t used = used_words_in_vs();
949 size_t capacity = capacity_words_in_vs();
950 VirtualSpace* vs = virtual_space();
951 st->print_cr(" space @ " PTR_FORMAT " " SIZE_FORMAT "K, %3d%% used "
952 "[" PTR_FORMAT ", " PTR_FORMAT ", "
953 PTR_FORMAT ", " PTR_FORMAT ")",
954 vs, capacity / K,
955 capacity == 0 ? 0 : used * 100 / capacity,
956 bottom(), top(), end(),
957 vs->high_boundary());
958 }
960 #ifdef ASSERT
961 void VirtualSpaceNode::mangle() {
962 size_t word_size = capacity_words_in_vs();
963 Copy::fill_to_words((HeapWord*) low(), word_size, 0xf1f1f1f1);
964 }
965 #endif // ASSERT
967 // VirtualSpaceList methods
968 // Space allocated from the VirtualSpace
970 VirtualSpaceList::~VirtualSpaceList() {
971 VirtualSpaceListIterator iter(virtual_space_list());
972 while (iter.repeat()) {
973 VirtualSpaceNode* vsl = iter.get_next();
974 delete vsl;
975 }
976 }
978 void VirtualSpaceList::inc_virtual_space_total(size_t v) {
979 assert_lock_strong(SpaceManager::expand_lock());
980 _virtual_space_total = _virtual_space_total + v;
981 }
982 void VirtualSpaceList::dec_virtual_space_total(size_t v) {
983 assert_lock_strong(SpaceManager::expand_lock());
984 _virtual_space_total = _virtual_space_total - v;
985 }
987 void VirtualSpaceList::inc_virtual_space_count() {
988 assert_lock_strong(SpaceManager::expand_lock());
989 _virtual_space_count++;
990 }
991 void VirtualSpaceList::dec_virtual_space_count() {
992 assert_lock_strong(SpaceManager::expand_lock());
993 _virtual_space_count--;
994 }
996 void ChunkManager::remove_chunk(Metachunk* chunk) {
997 size_t word_size = chunk->word_size();
998 ChunkIndex index = list_index(word_size);
999 if (index != HumongousIndex) {
1000 free_chunks(index)->remove_chunk(chunk);
1001 } else {
1002 humongous_dictionary()->remove_chunk(chunk);
1003 }
1005 // Chunk is being removed from the chunks free list.
1006 dec_free_chunks_total(chunk->capacity_word_size());
1007 }
1009 // Walk the list of VirtualSpaceNodes and delete
1010 // nodes with a 0 container_count. Remove Metachunks in
1011 // the node from their respective freelists.
1012 void VirtualSpaceList::purge() {
1013 assert_lock_strong(SpaceManager::expand_lock());
1014 // Don't use a VirtualSpaceListIterator because this
1015 // list is being changed and a straightforward use of an iterator is not safe.
1016 VirtualSpaceNode* purged_vsl = NULL;
1017 VirtualSpaceNode* prev_vsl = virtual_space_list();
1018 VirtualSpaceNode* next_vsl = prev_vsl;
1019 while (next_vsl != NULL) {
1020 VirtualSpaceNode* vsl = next_vsl;
1021 next_vsl = vsl->next();
1022 // Don't free the current virtual space since it will likely
1023 // be needed soon.
1024 if (vsl->container_count() == 0 && vsl != current_virtual_space()) {
1025 // Unlink it from the list
1026 if (prev_vsl == vsl) {
1027 // This is the case of the current note being the first note.
1028 assert(vsl == virtual_space_list(), "Expected to be the first note");
1029 set_virtual_space_list(vsl->next());
1030 } else {
1031 prev_vsl->set_next(vsl->next());
1032 }
1034 vsl->purge(chunk_manager());
1035 dec_virtual_space_total(vsl->reserved()->word_size());
1036 dec_virtual_space_count();
1037 purged_vsl = vsl;
1038 delete vsl;
1039 } else {
1040 prev_vsl = vsl;
1041 }
1042 }
1043 #ifdef ASSERT
1044 if (purged_vsl != NULL) {
1045 // List should be stable enough to use an iterator here.
1046 VirtualSpaceListIterator iter(virtual_space_list());
1047 while (iter.repeat()) {
1048 VirtualSpaceNode* vsl = iter.get_next();
1049 assert(vsl != purged_vsl, "Purge of vsl failed");
1050 }
1051 }
1052 #endif
1053 }
1055 size_t VirtualSpaceList::used_words_sum() {
1056 size_t allocated_by_vs = 0;
1057 VirtualSpaceListIterator iter(virtual_space_list());
1058 while (iter.repeat()) {
1059 VirtualSpaceNode* vsl = iter.get_next();
1060 // Sum used region [bottom, top) in each virtualspace
1061 allocated_by_vs += vsl->used_words_in_vs();
1062 }
1063 assert(allocated_by_vs >= chunk_manager()->free_chunks_total(),
1064 err_msg("Total in free chunks " SIZE_FORMAT
1065 " greater than total from virtual_spaces " SIZE_FORMAT,
1066 allocated_by_vs, chunk_manager()->free_chunks_total()));
1067 size_t used =
1068 allocated_by_vs - chunk_manager()->free_chunks_total();
1069 return used;
1070 }
1072 // Space available in all MetadataVirtualspaces allocated
1073 // for metadata. This is the upper limit on the capacity
1074 // of chunks allocated out of all the MetadataVirtualspaces.
1075 size_t VirtualSpaceList::capacity_words_sum() {
1076 size_t capacity = 0;
1077 VirtualSpaceListIterator iter(virtual_space_list());
1078 while (iter.repeat()) {
1079 VirtualSpaceNode* vsl = iter.get_next();
1080 capacity += vsl->capacity_words_in_vs();
1081 }
1082 return capacity;
1083 }
1085 VirtualSpaceList::VirtualSpaceList(size_t word_size ) :
1086 _is_class(false),
1087 _virtual_space_list(NULL),
1088 _current_virtual_space(NULL),
1089 _virtual_space_total(0),
1090 _virtual_space_count(0) {
1091 MutexLockerEx cl(SpaceManager::expand_lock(),
1092 Mutex::_no_safepoint_check_flag);
1093 bool initialization_succeeded = grow_vs(word_size);
1095 _chunk_manager.free_chunks(SpecializedIndex)->set_size(SpecializedChunk);
1096 _chunk_manager.free_chunks(SmallIndex)->set_size(SmallChunk);
1097 _chunk_manager.free_chunks(MediumIndex)->set_size(MediumChunk);
1098 assert(initialization_succeeded,
1099 " VirtualSpaceList initialization should not fail");
1100 }
1102 VirtualSpaceList::VirtualSpaceList(ReservedSpace rs) :
1103 _is_class(true),
1104 _virtual_space_list(NULL),
1105 _current_virtual_space(NULL),
1106 _virtual_space_total(0),
1107 _virtual_space_count(0) {
1108 MutexLockerEx cl(SpaceManager::expand_lock(),
1109 Mutex::_no_safepoint_check_flag);
1110 VirtualSpaceNode* class_entry = new VirtualSpaceNode(rs);
1111 bool succeeded = class_entry->initialize();
1112 _chunk_manager.free_chunks(SpecializedIndex)->set_size(SpecializedChunk);
1113 _chunk_manager.free_chunks(SmallIndex)->set_size(ClassSmallChunk);
1114 _chunk_manager.free_chunks(MediumIndex)->set_size(ClassMediumChunk);
1115 assert(succeeded, " VirtualSpaceList initialization should not fail");
1116 link_vs(class_entry, rs.size()/BytesPerWord);
1117 }
1119 size_t VirtualSpaceList::free_bytes() {
1120 return virtual_space_list()->free_words_in_vs() * BytesPerWord;
1121 }
1123 // Allocate another meta virtual space and add it to the list.
1124 bool VirtualSpaceList::grow_vs(size_t vs_word_size) {
1125 assert_lock_strong(SpaceManager::expand_lock());
1126 if (vs_word_size == 0) {
1127 return false;
1128 }
1129 // Reserve the space
1130 size_t vs_byte_size = vs_word_size * BytesPerWord;
1131 assert(vs_byte_size % os::vm_page_size() == 0, "Not aligned");
1133 // Allocate the meta virtual space and initialize it.
1134 VirtualSpaceNode* new_entry = new VirtualSpaceNode(vs_byte_size);
1135 if (!new_entry->initialize()) {
1136 delete new_entry;
1137 return false;
1138 } else {
1139 // ensure lock-free iteration sees fully initialized node
1140 OrderAccess::storestore();
1141 link_vs(new_entry, vs_word_size);
1142 return true;
1143 }
1144 }
1146 void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry, size_t vs_word_size) {
1147 if (virtual_space_list() == NULL) {
1148 set_virtual_space_list(new_entry);
1149 } else {
1150 current_virtual_space()->set_next(new_entry);
1151 }
1152 set_current_virtual_space(new_entry);
1153 inc_virtual_space_total(vs_word_size);
1154 inc_virtual_space_count();
1155 #ifdef ASSERT
1156 new_entry->mangle();
1157 #endif
1158 if (TraceMetavirtualspaceAllocation && Verbose) {
1159 VirtualSpaceNode* vsl = current_virtual_space();
1160 vsl->print_on(tty);
1161 }
1162 }
1164 Metachunk* VirtualSpaceList::get_new_chunk(size_t word_size,
1165 size_t grow_chunks_by_words,
1166 size_t medium_chunk_bunch) {
1168 // Get a chunk from the chunk freelist
1169 Metachunk* next = chunk_manager()->chunk_freelist_allocate(grow_chunks_by_words);
1171 if (next != NULL) {
1172 next->container()->inc_container_count();
1173 } else {
1174 // Allocate a chunk out of the current virtual space.
1175 next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
1176 }
1178 if (next == NULL) {
1179 // Not enough room in current virtual space. Try to commit
1180 // more space.
1181 size_t expand_vs_by_words = MAX2(medium_chunk_bunch,
1182 grow_chunks_by_words);
1183 size_t page_size_words = os::vm_page_size() / BytesPerWord;
1184 size_t aligned_expand_vs_by_words = align_size_up(expand_vs_by_words,
1185 page_size_words);
1186 bool vs_expanded =
1187 current_virtual_space()->expand_by(aligned_expand_vs_by_words, false);
1188 if (!vs_expanded) {
1189 // Should the capacity of the metaspaces be expanded for
1190 // this allocation? If it's the virtual space for classes and is
1191 // being used for CompressedHeaders, don't allocate a new virtualspace.
1192 if (can_grow() && MetaspaceGC::should_expand(this, word_size)) {
1193 // Get another virtual space.
1194 size_t grow_vs_words =
1195 MAX2((size_t)VirtualSpaceSize, aligned_expand_vs_by_words);
1196 if (grow_vs(grow_vs_words)) {
1197 // Got it. It's on the list now. Get a chunk from it.
1198 next = current_virtual_space()->get_chunk_vs_with_expand(grow_chunks_by_words);
1199 }
1200 } else {
1201 // Allocation will fail and induce a GC
1202 if (TraceMetadataChunkAllocation && Verbose) {
1203 gclog_or_tty->print_cr("VirtualSpaceList::get_new_chunk():"
1204 " Fail instead of expand the metaspace");
1205 }
1206 }
1207 } else {
1208 // The virtual space expanded, get a new chunk
1209 next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
1210 assert(next != NULL, "Just expanded, should succeed");
1211 }
1212 }
1214 assert(next == NULL || (next->next() == NULL && next->prev() == NULL),
1215 "New chunk is still on some list");
1216 return next;
1217 }
1219 Metachunk* VirtualSpaceList::get_initialization_chunk(size_t chunk_word_size,
1220 size_t chunk_bunch) {
1221 // Get a chunk from the chunk freelist
1222 Metachunk* new_chunk = get_new_chunk(chunk_word_size,
1223 chunk_word_size,
1224 chunk_bunch);
1225 return new_chunk;
1226 }
1228 void VirtualSpaceList::print_on(outputStream* st) const {
1229 if (TraceMetadataChunkAllocation && Verbose) {
1230 VirtualSpaceListIterator iter(virtual_space_list());
1231 while (iter.repeat()) {
1232 VirtualSpaceNode* node = iter.get_next();
1233 node->print_on(st);
1234 }
1235 }
1236 }
1238 bool VirtualSpaceList::contains(const void *ptr) {
1239 VirtualSpaceNode* list = virtual_space_list();
1240 VirtualSpaceListIterator iter(list);
1241 while (iter.repeat()) {
1242 VirtualSpaceNode* node = iter.get_next();
1243 if (node->reserved()->contains(ptr)) {
1244 return true;
1245 }
1246 }
1247 return false;
1248 }
1251 // MetaspaceGC methods
1253 // VM_CollectForMetadataAllocation is the vm operation used to GC.
1254 // Within the VM operation after the GC the attempt to allocate the metadata
1255 // should succeed. If the GC did not free enough space for the metaspace
1256 // allocation, the HWM is increased so that another virtualspace will be
1257 // allocated for the metadata. With perm gen the increase in the perm
1258 // gen had bounds, MinMetaspaceExpansion and MaxMetaspaceExpansion. The
1259 // metaspace policy uses those as the small and large steps for the HWM.
1260 //
1261 // After the GC the compute_new_size() for MetaspaceGC is called to
1262 // resize the capacity of the metaspaces. The current implementation
1263 // is based on the flags MinMetaspaceFreeRatio and MaxMetaspaceFreeRatio used
1264 // to resize the Java heap by some GC's. New flags can be implemented
1265 // if really needed. MinMetaspaceFreeRatio is used to calculate how much
1266 // free space is desirable in the metaspace capacity to decide how much
1267 // to increase the HWM. MaxMetaspaceFreeRatio is used to decide how much
1268 // free space is desirable in the metaspace capacity before decreasing
1269 // the HWM.
1271 // Calculate the amount to increase the high water mark (HWM).
1272 // Increase by a minimum amount (MinMetaspaceExpansion) so that
1273 // another expansion is not requested too soon. If that is not
1274 // enough to satisfy the allocation (i.e. big enough for a word_size
1275 // allocation), increase by MaxMetaspaceExpansion. If that is still
1276 // not enough, expand by the size of the allocation (word_size) plus
1277 // some.
1278 size_t MetaspaceGC::delta_capacity_until_GC(size_t word_size) {
1279 size_t before_inc = MetaspaceGC::capacity_until_GC();
1280 size_t min_delta_words = MinMetaspaceExpansion / BytesPerWord;
1281 size_t max_delta_words = MaxMetaspaceExpansion / BytesPerWord;
1282 size_t page_size_words = os::vm_page_size() / BytesPerWord;
1283 size_t size_delta_words = align_size_up(word_size, page_size_words);
1284 size_t delta_words = MAX2(size_delta_words, min_delta_words);
1285 if (delta_words > min_delta_words) {
1286 // Don't want to hit the high water mark on the next
1287 // allocation so make the delta greater than just enough
1288 // for this allocation.
1289 delta_words = MAX2(delta_words, max_delta_words);
1290 if (delta_words > max_delta_words) {
1291 // This allocation is large but the next ones are probably not
1292 // so increase by the minimum.
1293 delta_words = delta_words + min_delta_words;
1294 }
1295 }
1296 return delta_words;
1297 }
1299 bool MetaspaceGC::should_expand(VirtualSpaceList* vsl, size_t word_size) {
1301 size_t committed_capacity_bytes = MetaspaceAux::allocated_capacity_bytes();
1302 // If the user wants a limit, impose one.
1303 size_t max_metaspace_size_bytes = MaxMetaspaceSize;
1304 size_t metaspace_size_bytes = MetaspaceSize;
1305 if (!FLAG_IS_DEFAULT(MaxMetaspaceSize) &&
1306 MetaspaceAux::reserved_in_bytes() >= MaxMetaspaceSize) {
1307 return false;
1308 }
1310 // Class virtual space should always be expanded. Call GC for the other
1311 // metadata virtual space.
1312 if (vsl == Metaspace::class_space_list()) return true;
1314 // If this is part of an allocation after a GC, expand
1315 // unconditionally.
1316 if (MetaspaceGC::expand_after_GC()) {
1317 return true;
1318 }
1322 // If the capacity is below the minimum capacity, allow the
1323 // expansion. Also set the high-water-mark (capacity_until_GC)
1324 // to that minimum capacity so that a GC will not be induced
1325 // until that minimum capacity is exceeded.
1326 if (committed_capacity_bytes < metaspace_size_bytes ||
1327 capacity_until_GC() == 0) {
1328 set_capacity_until_GC(metaspace_size_bytes);
1329 return true;
1330 } else {
1331 if (committed_capacity_bytes < capacity_until_GC()) {
1332 return true;
1333 } else {
1334 if (TraceMetadataChunkAllocation && Verbose) {
1335 gclog_or_tty->print_cr(" allocation request size " SIZE_FORMAT
1336 " capacity_until_GC " SIZE_FORMAT
1337 " allocated_capacity_bytes " SIZE_FORMAT,
1338 word_size,
1339 capacity_until_GC(),
1340 MetaspaceAux::allocated_capacity_bytes());
1341 }
1342 return false;
1343 }
1344 }
1345 }
1349 void MetaspaceGC::compute_new_size() {
1350 assert(_shrink_factor <= 100, "invalid shrink factor");
1351 uint current_shrink_factor = _shrink_factor;
1352 _shrink_factor = 0;
1354 // Until a faster way of calculating the "used" quantity is implemented,
1355 // use "capacity".
1356 const size_t used_after_gc = MetaspaceAux::allocated_capacity_bytes();
1357 const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC();
1359 const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0;
1360 const double maximum_used_percentage = 1.0 - minimum_free_percentage;
1362 const double min_tmp = used_after_gc / maximum_used_percentage;
1363 size_t minimum_desired_capacity =
1364 (size_t)MIN2(min_tmp, double(max_uintx));
1365 // Don't shrink less than the initial generation size
1366 minimum_desired_capacity = MAX2(minimum_desired_capacity,
1367 MetaspaceSize);
1369 if (PrintGCDetails && Verbose) {
1370 gclog_or_tty->print_cr("\nMetaspaceGC::compute_new_size: ");
1371 gclog_or_tty->print_cr(" "
1372 " minimum_free_percentage: %6.2f"
1373 " maximum_used_percentage: %6.2f",
1374 minimum_free_percentage,
1375 maximum_used_percentage);
1376 gclog_or_tty->print_cr(" "
1377 " used_after_gc : %6.1fKB",
1378 used_after_gc / (double) K);
1379 }
1382 size_t shrink_bytes = 0;
1383 if (capacity_until_GC < minimum_desired_capacity) {
1384 // If we have less capacity below the metaspace HWM, then
1385 // increment the HWM.
1386 size_t expand_bytes = minimum_desired_capacity - capacity_until_GC;
1387 // Don't expand unless it's significant
1388 if (expand_bytes >= MinMetaspaceExpansion) {
1389 MetaspaceGC::set_capacity_until_GC(capacity_until_GC + expand_bytes);
1390 }
1391 if (PrintGCDetails && Verbose) {
1392 size_t new_capacity_until_GC = capacity_until_GC;
1393 gclog_or_tty->print_cr(" expanding:"
1394 " minimum_desired_capacity: %6.1fKB"
1395 " expand_bytes: %6.1fKB"
1396 " MinMetaspaceExpansion: %6.1fKB"
1397 " new metaspace HWM: %6.1fKB",
1398 minimum_desired_capacity / (double) K,
1399 expand_bytes / (double) K,
1400 MinMetaspaceExpansion / (double) K,
1401 new_capacity_until_GC / (double) K);
1402 }
1403 return;
1404 }
1406 // No expansion, now see if we want to shrink
1407 // We would never want to shrink more than this
1408 size_t max_shrink_bytes = capacity_until_GC - minimum_desired_capacity;
1409 assert(max_shrink_bytes >= 0, err_msg("max_shrink_bytes " SIZE_FORMAT,
1410 max_shrink_bytes));
1412 // Should shrinking be considered?
1413 if (MaxMetaspaceFreeRatio < 100) {
1414 const double maximum_free_percentage = MaxMetaspaceFreeRatio / 100.0;
1415 const double minimum_used_percentage = 1.0 - maximum_free_percentage;
1416 const double max_tmp = used_after_gc / minimum_used_percentage;
1417 size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx));
1418 maximum_desired_capacity = MAX2(maximum_desired_capacity,
1419 MetaspaceSize);
1420 if (PrintGCDetails && Verbose) {
1421 gclog_or_tty->print_cr(" "
1422 " maximum_free_percentage: %6.2f"
1423 " minimum_used_percentage: %6.2f",
1424 maximum_free_percentage,
1425 minimum_used_percentage);
1426 gclog_or_tty->print_cr(" "
1427 " minimum_desired_capacity: %6.1fKB"
1428 " maximum_desired_capacity: %6.1fKB",
1429 minimum_desired_capacity / (double) K,
1430 maximum_desired_capacity / (double) K);
1431 }
1433 assert(minimum_desired_capacity <= maximum_desired_capacity,
1434 "sanity check");
1436 if (capacity_until_GC > maximum_desired_capacity) {
1437 // Capacity too large, compute shrinking size
1438 shrink_bytes = capacity_until_GC - maximum_desired_capacity;
1439 // We don't want shrink all the way back to initSize if people call
1440 // System.gc(), because some programs do that between "phases" and then
1441 // we'd just have to grow the heap up again for the next phase. So we
1442 // damp the shrinking: 0% on the first call, 10% on the second call, 40%
1443 // on the third call, and 100% by the fourth call. But if we recompute
1444 // size without shrinking, it goes back to 0%.
1445 shrink_bytes = shrink_bytes / 100 * current_shrink_factor;
1446 assert(shrink_bytes <= max_shrink_bytes,
1447 err_msg("invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT,
1448 shrink_bytes, max_shrink_bytes));
1449 if (current_shrink_factor == 0) {
1450 _shrink_factor = 10;
1451 } else {
1452 _shrink_factor = MIN2(current_shrink_factor * 4, (uint) 100);
1453 }
1454 if (PrintGCDetails && Verbose) {
1455 gclog_or_tty->print_cr(" "
1456 " shrinking:"
1457 " initSize: %.1fK"
1458 " maximum_desired_capacity: %.1fK",
1459 MetaspaceSize / (double) K,
1460 maximum_desired_capacity / (double) K);
1461 gclog_or_tty->print_cr(" "
1462 " shrink_bytes: %.1fK"
1463 " current_shrink_factor: %d"
1464 " new shrink factor: %d"
1465 " MinMetaspaceExpansion: %.1fK",
1466 shrink_bytes / (double) K,
1467 current_shrink_factor,
1468 _shrink_factor,
1469 MinMetaspaceExpansion / (double) K);
1470 }
1471 }
1472 }
1474 // Don't shrink unless it's significant
1475 if (shrink_bytes >= MinMetaspaceExpansion &&
1476 ((capacity_until_GC - shrink_bytes) >= MetaspaceSize)) {
1477 MetaspaceGC::set_capacity_until_GC(capacity_until_GC - shrink_bytes);
1478 }
1479 }
1481 // Metadebug methods
1483 void Metadebug::deallocate_chunk_a_lot(SpaceManager* sm,
1484 size_t chunk_word_size){
1485 #ifdef ASSERT
1486 VirtualSpaceList* vsl = sm->vs_list();
1487 if (MetaDataDeallocateALot &&
1488 Metadebug::deallocate_chunk_a_lot_count() % MetaDataDeallocateALotInterval == 0 ) {
1489 Metadebug::reset_deallocate_chunk_a_lot_count();
1490 for (uint i = 0; i < metadata_deallocate_a_lock_chunk; i++) {
1491 Metachunk* dummy_chunk = vsl->current_virtual_space()->take_from_committed(chunk_word_size);
1492 if (dummy_chunk == NULL) {
1493 break;
1494 }
1495 vsl->chunk_manager()->chunk_freelist_deallocate(dummy_chunk);
1497 if (TraceMetadataChunkAllocation && Verbose) {
1498 gclog_or_tty->print("Metadebug::deallocate_chunk_a_lot: %d) ",
1499 sm->sum_count_in_chunks_in_use());
1500 dummy_chunk->print_on(gclog_or_tty);
1501 gclog_or_tty->print_cr(" Free chunks total %d count %d",
1502 vsl->chunk_manager()->free_chunks_total(),
1503 vsl->chunk_manager()->free_chunks_count());
1504 }
1505 }
1506 } else {
1507 Metadebug::inc_deallocate_chunk_a_lot_count();
1508 }
1509 #endif
1510 }
1512 void Metadebug::deallocate_block_a_lot(SpaceManager* sm,
1513 size_t raw_word_size){
1514 #ifdef ASSERT
1515 if (MetaDataDeallocateALot &&
1516 Metadebug::deallocate_block_a_lot_count() % MetaDataDeallocateALotInterval == 0 ) {
1517 Metadebug::set_deallocate_block_a_lot_count(0);
1518 for (uint i = 0; i < metadata_deallocate_a_lot_block; i++) {
1519 MetaWord* dummy_block = sm->allocate_work(raw_word_size);
1520 if (dummy_block == 0) {
1521 break;
1522 }
1523 sm->deallocate(dummy_block, raw_word_size);
1524 }
1525 } else {
1526 Metadebug::inc_deallocate_block_a_lot_count();
1527 }
1528 #endif
1529 }
1531 void Metadebug::init_allocation_fail_alot_count() {
1532 if (MetadataAllocationFailALot) {
1533 _allocation_fail_alot_count =
1534 1+(long)((double)MetadataAllocationFailALotInterval*os::random()/(max_jint+1.0));
1535 }
1536 }
1538 #ifdef ASSERT
1539 bool Metadebug::test_metadata_failure() {
1540 if (MetadataAllocationFailALot &&
1541 Threads::is_vm_complete()) {
1542 if (_allocation_fail_alot_count > 0) {
1543 _allocation_fail_alot_count--;
1544 } else {
1545 if (TraceMetadataChunkAllocation && Verbose) {
1546 gclog_or_tty->print_cr("Metadata allocation failing for "
1547 "MetadataAllocationFailALot");
1548 }
1549 init_allocation_fail_alot_count();
1550 return true;
1551 }
1552 }
1553 return false;
1554 }
1555 #endif
1557 // ChunkManager methods
1559 // Verification of _free_chunks_total and _free_chunks_count does not
1560 // work with the CMS collector because its use of additional locks
1561 // complicate the mutex deadlock detection but it can still be useful
1562 // for detecting errors in the chunk accounting with other collectors.
1564 size_t ChunkManager::free_chunks_total() {
1565 #ifdef ASSERT
1566 if (!UseConcMarkSweepGC && !SpaceManager::expand_lock()->is_locked()) {
1567 MutexLockerEx cl(SpaceManager::expand_lock(),
1568 Mutex::_no_safepoint_check_flag);
1569 slow_locked_verify_free_chunks_total();
1570 }
1571 #endif
1572 return _free_chunks_total;
1573 }
1575 size_t ChunkManager::free_chunks_total_in_bytes() {
1576 return free_chunks_total() * BytesPerWord;
1577 }
1579 size_t ChunkManager::free_chunks_count() {
1580 #ifdef ASSERT
1581 if (!UseConcMarkSweepGC && !SpaceManager::expand_lock()->is_locked()) {
1582 MutexLockerEx cl(SpaceManager::expand_lock(),
1583 Mutex::_no_safepoint_check_flag);
1584 // This lock is only needed in debug because the verification
1585 // of the _free_chunks_totals walks the list of free chunks
1586 slow_locked_verify_free_chunks_count();
1587 }
1588 #endif
1589 return _free_chunks_count;
1590 }
1592 void ChunkManager::locked_verify_free_chunks_total() {
1593 assert_lock_strong(SpaceManager::expand_lock());
1594 assert(sum_free_chunks() == _free_chunks_total,
1595 err_msg("_free_chunks_total " SIZE_FORMAT " is not the"
1596 " same as sum " SIZE_FORMAT, _free_chunks_total,
1597 sum_free_chunks()));
1598 }
1600 void ChunkManager::verify_free_chunks_total() {
1601 MutexLockerEx cl(SpaceManager::expand_lock(),
1602 Mutex::_no_safepoint_check_flag);
1603 locked_verify_free_chunks_total();
1604 }
1606 void ChunkManager::locked_verify_free_chunks_count() {
1607 assert_lock_strong(SpaceManager::expand_lock());
1608 assert(sum_free_chunks_count() == _free_chunks_count,
1609 err_msg("_free_chunks_count " SIZE_FORMAT " is not the"
1610 " same as sum " SIZE_FORMAT, _free_chunks_count,
1611 sum_free_chunks_count()));
1612 }
1614 void ChunkManager::verify_free_chunks_count() {
1615 #ifdef ASSERT
1616 MutexLockerEx cl(SpaceManager::expand_lock(),
1617 Mutex::_no_safepoint_check_flag);
1618 locked_verify_free_chunks_count();
1619 #endif
1620 }
1622 void ChunkManager::verify() {
1623 MutexLockerEx cl(SpaceManager::expand_lock(),
1624 Mutex::_no_safepoint_check_flag);
1625 locked_verify();
1626 }
1628 void ChunkManager::locked_verify() {
1629 locked_verify_free_chunks_count();
1630 locked_verify_free_chunks_total();
1631 }
1633 void ChunkManager::locked_print_free_chunks(outputStream* st) {
1634 assert_lock_strong(SpaceManager::expand_lock());
1635 st->print_cr("Free chunk total " SIZE_FORMAT " count " SIZE_FORMAT,
1636 _free_chunks_total, _free_chunks_count);
1637 }
1639 void ChunkManager::locked_print_sum_free_chunks(outputStream* st) {
1640 assert_lock_strong(SpaceManager::expand_lock());
1641 st->print_cr("Sum free chunk total " SIZE_FORMAT " count " SIZE_FORMAT,
1642 sum_free_chunks(), sum_free_chunks_count());
1643 }
1644 ChunkList* ChunkManager::free_chunks(ChunkIndex index) {
1645 return &_free_chunks[index];
1646 }
1648 // These methods that sum the free chunk lists are used in printing
1649 // methods that are used in product builds.
1650 size_t ChunkManager::sum_free_chunks() {
1651 assert_lock_strong(SpaceManager::expand_lock());
1652 size_t result = 0;
1653 for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
1654 ChunkList* list = free_chunks(i);
1656 if (list == NULL) {
1657 continue;
1658 }
1660 result = result + list->count() * list->size();
1661 }
1662 result = result + humongous_dictionary()->total_size();
1663 return result;
1664 }
1666 size_t ChunkManager::sum_free_chunks_count() {
1667 assert_lock_strong(SpaceManager::expand_lock());
1668 size_t count = 0;
1669 for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
1670 ChunkList* list = free_chunks(i);
1671 if (list == NULL) {
1672 continue;
1673 }
1674 count = count + list->count();
1675 }
1676 count = count + humongous_dictionary()->total_free_blocks();
1677 return count;
1678 }
1680 ChunkList* ChunkManager::find_free_chunks_list(size_t word_size) {
1681 ChunkIndex index = list_index(word_size);
1682 assert(index < HumongousIndex, "No humongous list");
1683 return free_chunks(index);
1684 }
1686 void ChunkManager::free_chunks_put(Metachunk* chunk) {
1687 assert_lock_strong(SpaceManager::expand_lock());
1688 ChunkList* free_list = find_free_chunks_list(chunk->word_size());
1689 chunk->set_next(free_list->head());
1690 free_list->set_head(chunk);
1691 // chunk is being returned to the chunk free list
1692 inc_free_chunks_total(chunk->capacity_word_size());
1693 slow_locked_verify();
1694 }
1696 void ChunkManager::chunk_freelist_deallocate(Metachunk* chunk) {
1697 // The deallocation of a chunk originates in the freelist
1698 // manangement code for a Metaspace and does not hold the
1699 // lock.
1700 assert(chunk != NULL, "Deallocating NULL");
1701 assert_lock_strong(SpaceManager::expand_lock());
1702 slow_locked_verify();
1703 if (TraceMetadataChunkAllocation) {
1704 tty->print_cr("ChunkManager::chunk_freelist_deallocate: chunk "
1705 PTR_FORMAT " size " SIZE_FORMAT,
1706 chunk, chunk->word_size());
1707 }
1708 free_chunks_put(chunk);
1709 }
1711 Metachunk* ChunkManager::free_chunks_get(size_t word_size) {
1712 assert_lock_strong(SpaceManager::expand_lock());
1714 slow_locked_verify();
1716 Metachunk* chunk = NULL;
1717 if (list_index(word_size) != HumongousIndex) {
1718 ChunkList* free_list = find_free_chunks_list(word_size);
1719 assert(free_list != NULL, "Sanity check");
1721 chunk = free_list->head();
1722 debug_only(Metachunk* debug_head = chunk;)
1724 if (chunk == NULL) {
1725 return NULL;
1726 }
1728 // Remove the chunk as the head of the list.
1729 free_list->remove_chunk(chunk);
1731 // Chunk is being removed from the chunks free list.
1732 dec_free_chunks_total(chunk->capacity_word_size());
1734 if (TraceMetadataChunkAllocation && Verbose) {
1735 tty->print_cr("ChunkManager::free_chunks_get: free_list "
1736 PTR_FORMAT " head " PTR_FORMAT " size " SIZE_FORMAT,
1737 free_list, chunk, chunk->word_size());
1738 }
1739 } else {
1740 chunk = humongous_dictionary()->get_chunk(
1741 word_size,
1742 FreeBlockDictionary<Metachunk>::atLeast);
1744 if (chunk != NULL) {
1745 if (TraceMetadataHumongousAllocation) {
1746 size_t waste = chunk->word_size() - word_size;
1747 tty->print_cr("Free list allocate humongous chunk size " SIZE_FORMAT
1748 " for requested size " SIZE_FORMAT
1749 " waste " SIZE_FORMAT,
1750 chunk->word_size(), word_size, waste);
1751 }
1752 // Chunk is being removed from the chunks free list.
1753 dec_free_chunks_total(chunk->capacity_word_size());
1754 } else {
1755 return NULL;
1756 }
1757 }
1759 // Remove it from the links to this freelist
1760 chunk->set_next(NULL);
1761 chunk->set_prev(NULL);
1762 #ifdef ASSERT
1763 // Chunk is no longer on any freelist. Setting to false make container_count_slow()
1764 // work.
1765 chunk->set_is_free(false);
1766 #endif
1767 slow_locked_verify();
1768 return chunk;
1769 }
1771 Metachunk* ChunkManager::chunk_freelist_allocate(size_t word_size) {
1772 assert_lock_strong(SpaceManager::expand_lock());
1773 slow_locked_verify();
1775 // Take from the beginning of the list
1776 Metachunk* chunk = free_chunks_get(word_size);
1777 if (chunk == NULL) {
1778 return NULL;
1779 }
1781 assert((word_size <= chunk->word_size()) ||
1782 list_index(chunk->word_size() == HumongousIndex),
1783 "Non-humongous variable sized chunk");
1784 if (TraceMetadataChunkAllocation) {
1785 size_t list_count;
1786 if (list_index(word_size) < HumongousIndex) {
1787 ChunkList* list = find_free_chunks_list(word_size);
1788 list_count = list->count();
1789 } else {
1790 list_count = humongous_dictionary()->total_count();
1791 }
1792 tty->print("ChunkManager::chunk_freelist_allocate: " PTR_FORMAT " chunk "
1793 PTR_FORMAT " size " SIZE_FORMAT " count " SIZE_FORMAT " ",
1794 this, chunk, chunk->word_size(), list_count);
1795 locked_print_free_chunks(tty);
1796 }
1798 return chunk;
1799 }
1801 void ChunkManager::print_on(outputStream* out) {
1802 if (PrintFLSStatistics != 0) {
1803 humongous_dictionary()->report_statistics();
1804 }
1805 }
1807 // SpaceManager methods
1809 void SpaceManager::get_initial_chunk_sizes(Metaspace::MetaspaceType type,
1810 size_t* chunk_word_size,
1811 size_t* class_chunk_word_size) {
1812 switch (type) {
1813 case Metaspace::BootMetaspaceType:
1814 *chunk_word_size = Metaspace::first_chunk_word_size();
1815 *class_chunk_word_size = Metaspace::first_class_chunk_word_size();
1816 break;
1817 case Metaspace::ROMetaspaceType:
1818 *chunk_word_size = SharedReadOnlySize / wordSize;
1819 *class_chunk_word_size = ClassSpecializedChunk;
1820 break;
1821 case Metaspace::ReadWriteMetaspaceType:
1822 *chunk_word_size = SharedReadWriteSize / wordSize;
1823 *class_chunk_word_size = ClassSpecializedChunk;
1824 break;
1825 case Metaspace::AnonymousMetaspaceType:
1826 case Metaspace::ReflectionMetaspaceType:
1827 *chunk_word_size = SpecializedChunk;
1828 *class_chunk_word_size = ClassSpecializedChunk;
1829 break;
1830 default:
1831 *chunk_word_size = SmallChunk;
1832 *class_chunk_word_size = ClassSmallChunk;
1833 break;
1834 }
1835 assert(*chunk_word_size != 0 && *class_chunk_word_size != 0,
1836 err_msg("Initial chunks sizes bad: data " SIZE_FORMAT
1837 " class " SIZE_FORMAT,
1838 *chunk_word_size, *class_chunk_word_size));
1839 }
1841 size_t SpaceManager::sum_free_in_chunks_in_use() const {
1842 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
1843 size_t free = 0;
1844 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1845 Metachunk* chunk = chunks_in_use(i);
1846 while (chunk != NULL) {
1847 free += chunk->free_word_size();
1848 chunk = chunk->next();
1849 }
1850 }
1851 return free;
1852 }
1854 size_t SpaceManager::sum_waste_in_chunks_in_use() const {
1855 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
1856 size_t result = 0;
1857 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1858 result += sum_waste_in_chunks_in_use(i);
1859 }
1861 return result;
1862 }
1864 size_t SpaceManager::sum_waste_in_chunks_in_use(ChunkIndex index) const {
1865 size_t result = 0;
1866 Metachunk* chunk = chunks_in_use(index);
1867 // Count the free space in all the chunk but not the
1868 // current chunk from which allocations are still being done.
1869 if (chunk != NULL) {
1870 Metachunk* prev = chunk;
1871 while (chunk != NULL && chunk != current_chunk()) {
1872 result += chunk->free_word_size();
1873 prev = chunk;
1874 chunk = chunk->next();
1875 }
1876 }
1877 return result;
1878 }
1880 size_t SpaceManager::sum_capacity_in_chunks_in_use() const {
1881 // For CMS use "allocated_chunks_words()" which does not need the
1882 // Metaspace lock. For the other collectors sum over the
1883 // lists. Use both methods as a check that "allocated_chunks_words()"
1884 // is correct. That is, sum_capacity_in_chunks() is too expensive
1885 // to use in the product and allocated_chunks_words() should be used
1886 // but allow for checking that allocated_chunks_words() returns the same
1887 // value as sum_capacity_in_chunks_in_use() which is the definitive
1888 // answer.
1889 if (UseConcMarkSweepGC) {
1890 return allocated_chunks_words();
1891 } else {
1892 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
1893 size_t sum = 0;
1894 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1895 Metachunk* chunk = chunks_in_use(i);
1896 while (chunk != NULL) {
1897 sum += chunk->capacity_word_size();
1898 chunk = chunk->next();
1899 }
1900 }
1901 return sum;
1902 }
1903 }
1905 size_t SpaceManager::sum_count_in_chunks_in_use() {
1906 size_t count = 0;
1907 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1908 count = count + sum_count_in_chunks_in_use(i);
1909 }
1911 return count;
1912 }
1914 size_t SpaceManager::sum_count_in_chunks_in_use(ChunkIndex i) {
1915 size_t count = 0;
1916 Metachunk* chunk = chunks_in_use(i);
1917 while (chunk != NULL) {
1918 count++;
1919 chunk = chunk->next();
1920 }
1921 return count;
1922 }
1925 size_t SpaceManager::sum_used_in_chunks_in_use() const {
1926 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
1927 size_t used = 0;
1928 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1929 Metachunk* chunk = chunks_in_use(i);
1930 while (chunk != NULL) {
1931 used += chunk->used_word_size();
1932 chunk = chunk->next();
1933 }
1934 }
1935 return used;
1936 }
1938 void SpaceManager::locked_print_chunks_in_use_on(outputStream* st) const {
1940 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1941 Metachunk* chunk = chunks_in_use(i);
1942 st->print("SpaceManager: %s " PTR_FORMAT,
1943 chunk_size_name(i), chunk);
1944 if (chunk != NULL) {
1945 st->print_cr(" free " SIZE_FORMAT,
1946 chunk->free_word_size());
1947 } else {
1948 st->print_cr("");
1949 }
1950 }
1952 vs_list()->chunk_manager()->locked_print_free_chunks(st);
1953 vs_list()->chunk_manager()->locked_print_sum_free_chunks(st);
1954 }
1956 size_t SpaceManager::calc_chunk_size(size_t word_size) {
1958 // Decide between a small chunk and a medium chunk. Up to
1959 // _small_chunk_limit small chunks can be allocated but
1960 // once a medium chunk has been allocated, no more small
1961 // chunks will be allocated.
1962 size_t chunk_word_size;
1963 if (chunks_in_use(MediumIndex) == NULL &&
1964 (!has_small_chunk_limit() ||
1965 sum_count_in_chunks_in_use(SmallIndex) < _small_chunk_limit)) {
1966 chunk_word_size = (size_t) small_chunk_size();
1967 if (word_size + Metachunk::overhead() > small_chunk_size()) {
1968 chunk_word_size = medium_chunk_size();
1969 }
1970 } else {
1971 chunk_word_size = medium_chunk_size();
1972 }
1974 // Might still need a humongous chunk. Enforce an
1975 // eight word granularity to facilitate reuse (some
1976 // wastage but better chance of reuse).
1977 size_t if_humongous_sized_chunk =
1978 align_size_up(word_size + Metachunk::overhead(),
1979 HumongousChunkGranularity);
1980 chunk_word_size =
1981 MAX2((size_t) chunk_word_size, if_humongous_sized_chunk);
1983 assert(!SpaceManager::is_humongous(word_size) ||
1984 chunk_word_size == if_humongous_sized_chunk,
1985 err_msg("Size calculation is wrong, word_size " SIZE_FORMAT
1986 " chunk_word_size " SIZE_FORMAT,
1987 word_size, chunk_word_size));
1988 if (TraceMetadataHumongousAllocation &&
1989 SpaceManager::is_humongous(word_size)) {
1990 gclog_or_tty->print_cr("Metadata humongous allocation:");
1991 gclog_or_tty->print_cr(" word_size " PTR_FORMAT, word_size);
1992 gclog_or_tty->print_cr(" chunk_word_size " PTR_FORMAT,
1993 chunk_word_size);
1994 gclog_or_tty->print_cr(" chunk overhead " PTR_FORMAT,
1995 Metachunk::overhead());
1996 }
1997 return chunk_word_size;
1998 }
2000 MetaWord* SpaceManager::grow_and_allocate(size_t word_size) {
2001 assert(vs_list()->current_virtual_space() != NULL,
2002 "Should have been set");
2003 assert(current_chunk() == NULL ||
2004 current_chunk()->allocate(word_size) == NULL,
2005 "Don't need to expand");
2006 MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
2008 if (TraceMetadataChunkAllocation && Verbose) {
2009 size_t words_left = 0;
2010 size_t words_used = 0;
2011 if (current_chunk() != NULL) {
2012 words_left = current_chunk()->free_word_size();
2013 words_used = current_chunk()->used_word_size();
2014 }
2015 gclog_or_tty->print_cr("SpaceManager::grow_and_allocate for " SIZE_FORMAT
2016 " words " SIZE_FORMAT " words used " SIZE_FORMAT
2017 " words left",
2018 word_size, words_used, words_left);
2019 }
2021 // Get another chunk out of the virtual space
2022 size_t grow_chunks_by_words = calc_chunk_size(word_size);
2023 Metachunk* next = get_new_chunk(word_size, grow_chunks_by_words);
2025 // If a chunk was available, add it to the in-use chunk list
2026 // and do an allocation from it.
2027 if (next != NULL) {
2028 Metadebug::deallocate_chunk_a_lot(this, grow_chunks_by_words);
2029 // Add to this manager's list of chunks in use.
2030 add_chunk(next, false);
2031 return next->allocate(word_size);
2032 }
2033 return NULL;
2034 }
2036 void SpaceManager::print_on(outputStream* st) const {
2038 for (ChunkIndex i = ZeroIndex;
2039 i < NumberOfInUseLists ;
2040 i = next_chunk_index(i) ) {
2041 st->print_cr(" chunks_in_use " PTR_FORMAT " chunk size " PTR_FORMAT,
2042 chunks_in_use(i),
2043 chunks_in_use(i) == NULL ? 0 : chunks_in_use(i)->word_size());
2044 }
2045 st->print_cr(" waste: Small " SIZE_FORMAT " Medium " SIZE_FORMAT
2046 " Humongous " SIZE_FORMAT,
2047 sum_waste_in_chunks_in_use(SmallIndex),
2048 sum_waste_in_chunks_in_use(MediumIndex),
2049 sum_waste_in_chunks_in_use(HumongousIndex));
2050 // block free lists
2051 if (block_freelists() != NULL) {
2052 st->print_cr("total in block free lists " SIZE_FORMAT,
2053 block_freelists()->total_size());
2054 }
2055 }
2057 SpaceManager::SpaceManager(Metaspace::MetadataType mdtype,
2058 Mutex* lock,
2059 VirtualSpaceList* vs_list) :
2060 _vs_list(vs_list),
2061 _mdtype(mdtype),
2062 _allocated_blocks_words(0),
2063 _allocated_chunks_words(0),
2064 _allocated_chunks_count(0),
2065 _lock(lock)
2066 {
2067 initialize();
2068 }
2070 void SpaceManager::inc_size_metrics(size_t words) {
2071 assert_lock_strong(SpaceManager::expand_lock());
2072 // Total of allocated Metachunks and allocated Metachunks count
2073 // for each SpaceManager
2074 _allocated_chunks_words = _allocated_chunks_words + words;
2075 _allocated_chunks_count++;
2076 // Global total of capacity in allocated Metachunks
2077 MetaspaceAux::inc_capacity(mdtype(), words);
2078 // Global total of allocated Metablocks.
2079 // used_words_slow() includes the overhead in each
2080 // Metachunk so include it in the used when the
2081 // Metachunk is first added (so only added once per
2082 // Metachunk).
2083 MetaspaceAux::inc_used(mdtype(), Metachunk::overhead());
2084 }
2086 void SpaceManager::inc_used_metrics(size_t words) {
2087 // Add to the per SpaceManager total
2088 Atomic::add_ptr(words, &_allocated_blocks_words);
2089 // Add to the global total
2090 MetaspaceAux::inc_used(mdtype(), words);
2091 }
2093 void SpaceManager::dec_total_from_size_metrics() {
2094 MetaspaceAux::dec_capacity(mdtype(), allocated_chunks_words());
2095 MetaspaceAux::dec_used(mdtype(), allocated_blocks_words());
2096 // Also deduct the overhead per Metachunk
2097 MetaspaceAux::dec_used(mdtype(), allocated_chunks_count() * Metachunk::overhead());
2098 }
2100 void SpaceManager::initialize() {
2101 Metadebug::init_allocation_fail_alot_count();
2102 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2103 _chunks_in_use[i] = NULL;
2104 }
2105 _current_chunk = NULL;
2106 if (TraceMetadataChunkAllocation && Verbose) {
2107 gclog_or_tty->print_cr("SpaceManager(): " PTR_FORMAT, this);
2108 }
2109 }
2111 void ChunkManager::return_chunks(ChunkIndex index, Metachunk* chunks) {
2112 if (chunks == NULL) {
2113 return;
2114 }
2115 ChunkList* list = free_chunks(index);
2116 assert(list->size() == chunks->word_size(), "Mismatch in chunk sizes");
2117 assert_lock_strong(SpaceManager::expand_lock());
2118 Metachunk* cur = chunks;
2120 // This returns chunks one at a time. If a new
2121 // class List can be created that is a base class
2122 // of FreeList then something like FreeList::prepend()
2123 // can be used in place of this loop
2124 while (cur != NULL) {
2125 assert(cur->container() != NULL, "Container should have been set");
2126 cur->container()->dec_container_count();
2127 // Capture the next link before it is changed
2128 // by the call to return_chunk_at_head();
2129 Metachunk* next = cur->next();
2130 cur->set_is_free(true);
2131 list->return_chunk_at_head(cur);
2132 cur = next;
2133 }
2134 }
2136 SpaceManager::~SpaceManager() {
2137 // This call this->_lock which can't be done while holding expand_lock()
2138 assert(sum_capacity_in_chunks_in_use() == allocated_chunks_words(),
2139 err_msg("sum_capacity_in_chunks_in_use() " SIZE_FORMAT
2140 " allocated_chunks_words() " SIZE_FORMAT,
2141 sum_capacity_in_chunks_in_use(), allocated_chunks_words()));
2143 MutexLockerEx fcl(SpaceManager::expand_lock(),
2144 Mutex::_no_safepoint_check_flag);
2146 ChunkManager* chunk_manager = vs_list()->chunk_manager();
2148 chunk_manager->slow_locked_verify();
2150 dec_total_from_size_metrics();
2152 if (TraceMetadataChunkAllocation && Verbose) {
2153 gclog_or_tty->print_cr("~SpaceManager(): " PTR_FORMAT, this);
2154 locked_print_chunks_in_use_on(gclog_or_tty);
2155 }
2157 // Do not mangle freed Metachunks. The chunk size inside Metachunks
2158 // is during the freeing of a VirtualSpaceNodes.
2160 // Have to update before the chunks_in_use lists are emptied
2161 // below.
2162 chunk_manager->inc_free_chunks_total(allocated_chunks_words(),
2163 sum_count_in_chunks_in_use());
2165 // Add all the chunks in use by this space manager
2166 // to the global list of free chunks.
2168 // Follow each list of chunks-in-use and add them to the
2169 // free lists. Each list is NULL terminated.
2171 for (ChunkIndex i = ZeroIndex; i < HumongousIndex; i = next_chunk_index(i)) {
2172 if (TraceMetadataChunkAllocation && Verbose) {
2173 gclog_or_tty->print_cr("returned %d %s chunks to freelist",
2174 sum_count_in_chunks_in_use(i),
2175 chunk_size_name(i));
2176 }
2177 Metachunk* chunks = chunks_in_use(i);
2178 chunk_manager->return_chunks(i, chunks);
2179 set_chunks_in_use(i, NULL);
2180 if (TraceMetadataChunkAllocation && Verbose) {
2181 gclog_or_tty->print_cr("updated freelist count %d %s",
2182 chunk_manager->free_chunks(i)->count(),
2183 chunk_size_name(i));
2184 }
2185 assert(i != HumongousIndex, "Humongous chunks are handled explicitly later");
2186 }
2188 // The medium chunk case may be optimized by passing the head and
2189 // tail of the medium chunk list to add_at_head(). The tail is often
2190 // the current chunk but there are probably exceptions.
2192 // Humongous chunks
2193 if (TraceMetadataChunkAllocation && Verbose) {
2194 gclog_or_tty->print_cr("returned %d %s humongous chunks to dictionary",
2195 sum_count_in_chunks_in_use(HumongousIndex),
2196 chunk_size_name(HumongousIndex));
2197 gclog_or_tty->print("Humongous chunk dictionary: ");
2198 }
2199 // Humongous chunks are never the current chunk.
2200 Metachunk* humongous_chunks = chunks_in_use(HumongousIndex);
2202 while (humongous_chunks != NULL) {
2203 #ifdef ASSERT
2204 humongous_chunks->set_is_free(true);
2205 #endif
2206 if (TraceMetadataChunkAllocation && Verbose) {
2207 gclog_or_tty->print(PTR_FORMAT " (" SIZE_FORMAT ") ",
2208 humongous_chunks,
2209 humongous_chunks->word_size());
2210 }
2211 assert(humongous_chunks->word_size() == (size_t)
2212 align_size_up(humongous_chunks->word_size(),
2213 HumongousChunkGranularity),
2214 err_msg("Humongous chunk size is wrong: word size " SIZE_FORMAT
2215 " granularity %d",
2216 humongous_chunks->word_size(), HumongousChunkGranularity));
2217 Metachunk* next_humongous_chunks = humongous_chunks->next();
2218 humongous_chunks->container()->dec_container_count();
2219 chunk_manager->humongous_dictionary()->return_chunk(humongous_chunks);
2220 humongous_chunks = next_humongous_chunks;
2221 }
2222 if (TraceMetadataChunkAllocation && Verbose) {
2223 gclog_or_tty->print_cr("");
2224 gclog_or_tty->print_cr("updated dictionary count %d %s",
2225 chunk_manager->humongous_dictionary()->total_count(),
2226 chunk_size_name(HumongousIndex));
2227 }
2228 chunk_manager->slow_locked_verify();
2229 }
2231 const char* SpaceManager::chunk_size_name(ChunkIndex index) const {
2232 switch (index) {
2233 case SpecializedIndex:
2234 return "Specialized";
2235 case SmallIndex:
2236 return "Small";
2237 case MediumIndex:
2238 return "Medium";
2239 case HumongousIndex:
2240 return "Humongous";
2241 default:
2242 return NULL;
2243 }
2244 }
2246 ChunkIndex ChunkManager::list_index(size_t size) {
2247 switch (size) {
2248 case SpecializedChunk:
2249 assert(SpecializedChunk == ClassSpecializedChunk,
2250 "Need branch for ClassSpecializedChunk");
2251 return SpecializedIndex;
2252 case SmallChunk:
2253 case ClassSmallChunk:
2254 return SmallIndex;
2255 case MediumChunk:
2256 case ClassMediumChunk:
2257 return MediumIndex;
2258 default:
2259 assert(size > MediumChunk || size > ClassMediumChunk,
2260 "Not a humongous chunk");
2261 return HumongousIndex;
2262 }
2263 }
2265 void SpaceManager::deallocate(MetaWord* p, size_t word_size) {
2266 assert_lock_strong(_lock);
2267 size_t min_size = TreeChunk<Metablock, FreeList>::min_size();
2268 assert(word_size >= min_size,
2269 err_msg("Should not deallocate dark matter " SIZE_FORMAT, word_size));
2270 block_freelists()->return_block(p, word_size);
2271 }
2273 // Adds a chunk to the list of chunks in use.
2274 void SpaceManager::add_chunk(Metachunk* new_chunk, bool make_current) {
2276 assert(new_chunk != NULL, "Should not be NULL");
2277 assert(new_chunk->next() == NULL, "Should not be on a list");
2279 new_chunk->reset_empty();
2281 // Find the correct list and and set the current
2282 // chunk for that list.
2283 ChunkIndex index = ChunkManager::list_index(new_chunk->word_size());
2285 if (index != HumongousIndex) {
2286 set_current_chunk(new_chunk);
2287 new_chunk->set_next(chunks_in_use(index));
2288 set_chunks_in_use(index, new_chunk);
2289 } else {
2290 // For null class loader data and DumpSharedSpaces, the first chunk isn't
2291 // small, so small will be null. Link this first chunk as the current
2292 // chunk.
2293 if (make_current) {
2294 // Set as the current chunk but otherwise treat as a humongous chunk.
2295 set_current_chunk(new_chunk);
2296 }
2297 // Link at head. The _current_chunk only points to a humongous chunk for
2298 // the null class loader metaspace (class and data virtual space managers)
2299 // any humongous chunks so will not point to the tail
2300 // of the humongous chunks list.
2301 new_chunk->set_next(chunks_in_use(HumongousIndex));
2302 set_chunks_in_use(HumongousIndex, new_chunk);
2304 assert(new_chunk->word_size() > medium_chunk_size(), "List inconsistency");
2305 }
2307 // Add to the running sum of capacity
2308 inc_size_metrics(new_chunk->word_size());
2310 assert(new_chunk->is_empty(), "Not ready for reuse");
2311 if (TraceMetadataChunkAllocation && Verbose) {
2312 gclog_or_tty->print("SpaceManager::add_chunk: %d) ",
2313 sum_count_in_chunks_in_use());
2314 new_chunk->print_on(gclog_or_tty);
2315 if (vs_list() != NULL) {
2316 vs_list()->chunk_manager()->locked_print_free_chunks(tty);
2317 }
2318 }
2319 }
2321 Metachunk* SpaceManager::get_new_chunk(size_t word_size,
2322 size_t grow_chunks_by_words) {
2324 Metachunk* next = vs_list()->get_new_chunk(word_size,
2325 grow_chunks_by_words,
2326 medium_chunk_bunch());
2328 if (TraceMetadataHumongousAllocation &&
2329 SpaceManager::is_humongous(next->word_size())) {
2330 gclog_or_tty->print_cr(" new humongous chunk word size " PTR_FORMAT,
2331 next->word_size());
2332 }
2334 return next;
2335 }
2337 MetaWord* SpaceManager::allocate(size_t word_size) {
2338 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
2340 size_t raw_word_size = get_raw_word_size(word_size);
2341 BlockFreelist* fl = block_freelists();
2342 MetaWord* p = NULL;
2343 // Allocation from the dictionary is expensive in the sense that
2344 // the dictionary has to be searched for a size. Don't allocate
2345 // from the dictionary until it starts to get fat. Is this
2346 // a reasonable policy? Maybe an skinny dictionary is fast enough
2347 // for allocations. Do some profiling. JJJ
2348 if (fl->total_size() > allocation_from_dictionary_limit) {
2349 p = fl->get_block(raw_word_size);
2350 }
2351 if (p == NULL) {
2352 p = allocate_work(raw_word_size);
2353 }
2354 Metadebug::deallocate_block_a_lot(this, raw_word_size);
2356 return p;
2357 }
2359 // Returns the address of spaced allocated for "word_size".
2360 // This methods does not know about blocks (Metablocks)
2361 MetaWord* SpaceManager::allocate_work(size_t word_size) {
2362 assert_lock_strong(_lock);
2363 #ifdef ASSERT
2364 if (Metadebug::test_metadata_failure()) {
2365 return NULL;
2366 }
2367 #endif
2368 // Is there space in the current chunk?
2369 MetaWord* result = NULL;
2371 // For DumpSharedSpaces, only allocate out of the current chunk which is
2372 // never null because we gave it the size we wanted. Caller reports out
2373 // of memory if this returns null.
2374 if (DumpSharedSpaces) {
2375 assert(current_chunk() != NULL, "should never happen");
2376 inc_used_metrics(word_size);
2377 return current_chunk()->allocate(word_size); // caller handles null result
2378 }
2379 if (current_chunk() != NULL) {
2380 result = current_chunk()->allocate(word_size);
2381 }
2383 if (result == NULL) {
2384 result = grow_and_allocate(word_size);
2385 }
2386 if (result > 0) {
2387 inc_used_metrics(word_size);
2388 assert(result != (MetaWord*) chunks_in_use(MediumIndex),
2389 "Head of the list is being allocated");
2390 }
2392 return result;
2393 }
2395 void SpaceManager::verify() {
2396 // If there are blocks in the dictionary, then
2397 // verfication of chunks does not work since
2398 // being in the dictionary alters a chunk.
2399 if (block_freelists()->total_size() == 0) {
2400 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2401 Metachunk* curr = chunks_in_use(i);
2402 while (curr != NULL) {
2403 curr->verify();
2404 verify_chunk_size(curr);
2405 curr = curr->next();
2406 }
2407 }
2408 }
2409 }
2411 void SpaceManager::verify_chunk_size(Metachunk* chunk) {
2412 assert(is_humongous(chunk->word_size()) ||
2413 chunk->word_size() == medium_chunk_size() ||
2414 chunk->word_size() == small_chunk_size() ||
2415 chunk->word_size() == specialized_chunk_size(),
2416 "Chunk size is wrong");
2417 return;
2418 }
2420 #ifdef ASSERT
2421 void SpaceManager::verify_allocated_blocks_words() {
2422 // Verification is only guaranteed at a safepoint.
2423 assert(SafepointSynchronize::is_at_safepoint() || !Universe::is_fully_initialized(),
2424 "Verification can fail if the applications is running");
2425 assert(allocated_blocks_words() == sum_used_in_chunks_in_use(),
2426 err_msg("allocation total is not consistent " SIZE_FORMAT
2427 " vs " SIZE_FORMAT,
2428 allocated_blocks_words(), sum_used_in_chunks_in_use()));
2429 }
2431 #endif
2433 void SpaceManager::dump(outputStream* const out) const {
2434 size_t curr_total = 0;
2435 size_t waste = 0;
2436 uint i = 0;
2437 size_t used = 0;
2438 size_t capacity = 0;
2440 // Add up statistics for all chunks in this SpaceManager.
2441 for (ChunkIndex index = ZeroIndex;
2442 index < NumberOfInUseLists;
2443 index = next_chunk_index(index)) {
2444 for (Metachunk* curr = chunks_in_use(index);
2445 curr != NULL;
2446 curr = curr->next()) {
2447 out->print("%d) ", i++);
2448 curr->print_on(out);
2449 if (TraceMetadataChunkAllocation && Verbose) {
2450 block_freelists()->print_on(out);
2451 }
2452 curr_total += curr->word_size();
2453 used += curr->used_word_size();
2454 capacity += curr->capacity_word_size();
2455 waste += curr->free_word_size() + curr->overhead();;
2456 }
2457 }
2459 size_t free = current_chunk() == NULL ? 0 : current_chunk()->free_word_size();
2460 // Free space isn't wasted.
2461 waste -= free;
2463 out->print_cr("total of all chunks " SIZE_FORMAT " used " SIZE_FORMAT
2464 " free " SIZE_FORMAT " capacity " SIZE_FORMAT
2465 " waste " SIZE_FORMAT, curr_total, used, free, capacity, waste);
2466 }
2468 #ifndef PRODUCT
2469 void SpaceManager::mangle_freed_chunks() {
2470 for (ChunkIndex index = ZeroIndex;
2471 index < NumberOfInUseLists;
2472 index = next_chunk_index(index)) {
2473 for (Metachunk* curr = chunks_in_use(index);
2474 curr != NULL;
2475 curr = curr->next()) {
2476 curr->mangle();
2477 }
2478 }
2479 }
2480 #endif // PRODUCT
2482 // MetaspaceAux
2485 size_t MetaspaceAux::_allocated_capacity_words[] = {0, 0};
2486 size_t MetaspaceAux::_allocated_used_words[] = {0, 0};
2488 size_t MetaspaceAux::free_bytes() {
2489 size_t result = 0;
2490 if (Metaspace::class_space_list() != NULL) {
2491 result = result + Metaspace::class_space_list()->free_bytes();
2492 }
2493 if (Metaspace::space_list() != NULL) {
2494 result = result + Metaspace::space_list()->free_bytes();
2495 }
2496 return result;
2497 }
2499 void MetaspaceAux::dec_capacity(Metaspace::MetadataType mdtype, size_t words) {
2500 assert_lock_strong(SpaceManager::expand_lock());
2501 assert(words <= allocated_capacity_words(mdtype),
2502 err_msg("About to decrement below 0: words " SIZE_FORMAT
2503 " is greater than _allocated_capacity_words[%u] " SIZE_FORMAT,
2504 words, mdtype, allocated_capacity_words(mdtype)));
2505 _allocated_capacity_words[mdtype] -= words;
2506 }
2508 void MetaspaceAux::inc_capacity(Metaspace::MetadataType mdtype, size_t words) {
2509 assert_lock_strong(SpaceManager::expand_lock());
2510 // Needs to be atomic
2511 _allocated_capacity_words[mdtype] += words;
2512 }
2514 void MetaspaceAux::dec_used(Metaspace::MetadataType mdtype, size_t words) {
2515 assert(words <= allocated_used_words(mdtype),
2516 err_msg("About to decrement below 0: words " SIZE_FORMAT
2517 " is greater than _allocated_used_words[%u] " SIZE_FORMAT,
2518 words, mdtype, allocated_used_words(mdtype)));
2519 // For CMS deallocation of the Metaspaces occurs during the
2520 // sweep which is a concurrent phase. Protection by the expand_lock()
2521 // is not enough since allocation is on a per Metaspace basis
2522 // and protected by the Metaspace lock.
2523 jlong minus_words = (jlong) - (jlong) words;
2524 Atomic::add_ptr(minus_words, &_allocated_used_words[mdtype]);
2525 }
2527 void MetaspaceAux::inc_used(Metaspace::MetadataType mdtype, size_t words) {
2528 // _allocated_used_words tracks allocations for
2529 // each piece of metadata. Those allocations are
2530 // generally done concurrently by different application
2531 // threads so must be done atomically.
2532 Atomic::add_ptr(words, &_allocated_used_words[mdtype]);
2533 }
2535 size_t MetaspaceAux::used_bytes_slow(Metaspace::MetadataType mdtype) {
2536 size_t used = 0;
2537 ClassLoaderDataGraphMetaspaceIterator iter;
2538 while (iter.repeat()) {
2539 Metaspace* msp = iter.get_next();
2540 // Sum allocated_blocks_words for each metaspace
2541 if (msp != NULL) {
2542 used += msp->used_words_slow(mdtype);
2543 }
2544 }
2545 return used * BytesPerWord;
2546 }
2548 size_t MetaspaceAux::free_in_bytes(Metaspace::MetadataType mdtype) {
2549 size_t free = 0;
2550 ClassLoaderDataGraphMetaspaceIterator iter;
2551 while (iter.repeat()) {
2552 Metaspace* msp = iter.get_next();
2553 if (msp != NULL) {
2554 free += msp->free_words(mdtype);
2555 }
2556 }
2557 return free * BytesPerWord;
2558 }
2560 size_t MetaspaceAux::capacity_bytes_slow(Metaspace::MetadataType mdtype) {
2561 // Don't count the space in the freelists. That space will be
2562 // added to the capacity calculation as needed.
2563 size_t capacity = 0;
2564 ClassLoaderDataGraphMetaspaceIterator iter;
2565 while (iter.repeat()) {
2566 Metaspace* msp = iter.get_next();
2567 if (msp != NULL) {
2568 capacity += msp->capacity_words_slow(mdtype);
2569 }
2570 }
2571 return capacity * BytesPerWord;
2572 }
2574 size_t MetaspaceAux::reserved_in_bytes(Metaspace::MetadataType mdtype) {
2575 size_t reserved = (mdtype == Metaspace::ClassType) ?
2576 Metaspace::class_space_list()->virtual_space_total() :
2577 Metaspace::space_list()->virtual_space_total();
2578 return reserved * BytesPerWord;
2579 }
2581 size_t MetaspaceAux::min_chunk_size() { return Metaspace::first_chunk_word_size(); }
2583 size_t MetaspaceAux::free_chunks_total(Metaspace::MetadataType mdtype) {
2584 ChunkManager* chunk = (mdtype == Metaspace::ClassType) ?
2585 Metaspace::class_space_list()->chunk_manager() :
2586 Metaspace::space_list()->chunk_manager();
2587 chunk->slow_verify();
2588 return chunk->free_chunks_total();
2589 }
2591 size_t MetaspaceAux::free_chunks_total_in_bytes(Metaspace::MetadataType mdtype) {
2592 return free_chunks_total(mdtype) * BytesPerWord;
2593 }
2595 size_t MetaspaceAux::free_chunks_total() {
2596 return free_chunks_total(Metaspace::ClassType) +
2597 free_chunks_total(Metaspace::NonClassType);
2598 }
2600 size_t MetaspaceAux::free_chunks_total_in_bytes() {
2601 return free_chunks_total() * BytesPerWord;
2602 }
2604 void MetaspaceAux::print_metaspace_change(size_t prev_metadata_used) {
2605 gclog_or_tty->print(", [Metaspace:");
2606 if (PrintGCDetails && Verbose) {
2607 gclog_or_tty->print(" " SIZE_FORMAT
2608 "->" SIZE_FORMAT
2609 "(" SIZE_FORMAT ")",
2610 prev_metadata_used,
2611 allocated_capacity_bytes(),
2612 reserved_in_bytes());
2613 } else {
2614 gclog_or_tty->print(" " SIZE_FORMAT "K"
2615 "->" SIZE_FORMAT "K"
2616 "(" SIZE_FORMAT "K)",
2617 prev_metadata_used / K,
2618 allocated_capacity_bytes() / K,
2619 reserved_in_bytes()/ K);
2620 }
2622 gclog_or_tty->print("]");
2623 }
2625 // This is printed when PrintGCDetails
2626 void MetaspaceAux::print_on(outputStream* out) {
2627 Metaspace::MetadataType ct = Metaspace::ClassType;
2628 Metaspace::MetadataType nct = Metaspace::NonClassType;
2630 out->print_cr(" Metaspace total "
2631 SIZE_FORMAT "K, used " SIZE_FORMAT "K,"
2632 " reserved " SIZE_FORMAT "K",
2633 allocated_capacity_bytes()/K, allocated_used_bytes()/K, reserved_in_bytes()/K);
2635 out->print_cr(" data space "
2636 SIZE_FORMAT "K, used " SIZE_FORMAT "K,"
2637 " reserved " SIZE_FORMAT "K",
2638 allocated_capacity_bytes(nct)/K,
2639 allocated_used_bytes(nct)/K,
2640 reserved_in_bytes(nct)/K);
2641 out->print_cr(" class space "
2642 SIZE_FORMAT "K, used " SIZE_FORMAT "K,"
2643 " reserved " SIZE_FORMAT "K",
2644 allocated_capacity_bytes(ct)/K,
2645 allocated_used_bytes(ct)/K,
2646 reserved_in_bytes(ct)/K);
2647 }
2649 // Print information for class space and data space separately.
2650 // This is almost the same as above.
2651 void MetaspaceAux::print_on(outputStream* out, Metaspace::MetadataType mdtype) {
2652 size_t free_chunks_capacity_bytes = free_chunks_total_in_bytes(mdtype);
2653 size_t capacity_bytes = capacity_bytes_slow(mdtype);
2654 size_t used_bytes = used_bytes_slow(mdtype);
2655 size_t free_bytes = free_in_bytes(mdtype);
2656 size_t used_and_free = used_bytes + free_bytes +
2657 free_chunks_capacity_bytes;
2658 out->print_cr(" Chunk accounting: used in chunks " SIZE_FORMAT
2659 "K + unused in chunks " SIZE_FORMAT "K + "
2660 " capacity in free chunks " SIZE_FORMAT "K = " SIZE_FORMAT
2661 "K capacity in allocated chunks " SIZE_FORMAT "K",
2662 used_bytes / K,
2663 free_bytes / K,
2664 free_chunks_capacity_bytes / K,
2665 used_and_free / K,
2666 capacity_bytes / K);
2667 // Accounting can only be correct if we got the values during a safepoint
2668 assert(!SafepointSynchronize::is_at_safepoint() || used_and_free == capacity_bytes, "Accounting is wrong");
2669 }
2671 // Print total fragmentation for class and data metaspaces separately
2672 void MetaspaceAux::print_waste(outputStream* out) {
2674 size_t specialized_waste = 0, small_waste = 0, medium_waste = 0, large_waste = 0;
2675 size_t specialized_count = 0, small_count = 0, medium_count = 0, large_count = 0;
2676 size_t cls_specialized_waste = 0, cls_small_waste = 0, cls_medium_waste = 0, cls_large_waste = 0;
2677 size_t cls_specialized_count = 0, cls_small_count = 0, cls_medium_count = 0, cls_large_count = 0;
2679 ClassLoaderDataGraphMetaspaceIterator iter;
2680 while (iter.repeat()) {
2681 Metaspace* msp = iter.get_next();
2682 if (msp != NULL) {
2683 specialized_waste += msp->vsm()->sum_waste_in_chunks_in_use(SpecializedIndex);
2684 specialized_count += msp->vsm()->sum_count_in_chunks_in_use(SpecializedIndex);
2685 small_waste += msp->vsm()->sum_waste_in_chunks_in_use(SmallIndex);
2686 small_count += msp->vsm()->sum_count_in_chunks_in_use(SmallIndex);
2687 medium_waste += msp->vsm()->sum_waste_in_chunks_in_use(MediumIndex);
2688 medium_count += msp->vsm()->sum_count_in_chunks_in_use(MediumIndex);
2689 large_waste += msp->vsm()->sum_waste_in_chunks_in_use(HumongousIndex);
2690 large_count += msp->vsm()->sum_count_in_chunks_in_use(HumongousIndex);
2692 cls_specialized_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SpecializedIndex);
2693 cls_specialized_count += msp->class_vsm()->sum_count_in_chunks_in_use(SpecializedIndex);
2694 cls_small_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SmallIndex);
2695 cls_small_count += msp->class_vsm()->sum_count_in_chunks_in_use(SmallIndex);
2696 cls_medium_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(MediumIndex);
2697 cls_medium_count += msp->class_vsm()->sum_count_in_chunks_in_use(MediumIndex);
2698 cls_large_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(HumongousIndex);
2699 cls_large_count += msp->class_vsm()->sum_count_in_chunks_in_use(HumongousIndex);
2700 }
2701 }
2702 out->print_cr("Total fragmentation waste (words) doesn't count free space");
2703 out->print_cr(" data: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", "
2704 SIZE_FORMAT " small(s) " SIZE_FORMAT ", "
2705 SIZE_FORMAT " medium(s) " SIZE_FORMAT,
2706 specialized_count, specialized_waste, small_count,
2707 small_waste, medium_count, medium_waste);
2708 out->print_cr(" class: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", "
2709 SIZE_FORMAT " small(s) " SIZE_FORMAT,
2710 cls_specialized_count, cls_specialized_waste,
2711 cls_small_count, cls_small_waste);
2712 }
2714 // Dump global metaspace things from the end of ClassLoaderDataGraph
2715 void MetaspaceAux::dump(outputStream* out) {
2716 out->print_cr("All Metaspace:");
2717 out->print("data space: "); print_on(out, Metaspace::NonClassType);
2718 out->print("class space: "); print_on(out, Metaspace::ClassType);
2719 print_waste(out);
2720 }
2722 void MetaspaceAux::verify_free_chunks() {
2723 Metaspace::space_list()->chunk_manager()->verify();
2724 Metaspace::class_space_list()->chunk_manager()->verify();
2725 }
2727 void MetaspaceAux::verify_capacity() {
2728 #ifdef ASSERT
2729 size_t running_sum_capacity_bytes = allocated_capacity_bytes();
2730 // For purposes of the running sum of capacity, verify against capacity
2731 size_t capacity_in_use_bytes = capacity_bytes_slow();
2732 assert(running_sum_capacity_bytes == capacity_in_use_bytes,
2733 err_msg("allocated_capacity_words() * BytesPerWord " SIZE_FORMAT
2734 " capacity_bytes_slow()" SIZE_FORMAT,
2735 running_sum_capacity_bytes, capacity_in_use_bytes));
2736 for (Metaspace::MetadataType i = Metaspace::ClassType;
2737 i < Metaspace:: MetadataTypeCount;
2738 i = (Metaspace::MetadataType)(i + 1)) {
2739 size_t capacity_in_use_bytes = capacity_bytes_slow(i);
2740 assert(allocated_capacity_bytes(i) == capacity_in_use_bytes,
2741 err_msg("allocated_capacity_bytes(%u) " SIZE_FORMAT
2742 " capacity_bytes_slow(%u)" SIZE_FORMAT,
2743 i, allocated_capacity_bytes(i), i, capacity_in_use_bytes));
2744 }
2745 #endif
2746 }
2748 void MetaspaceAux::verify_used() {
2749 #ifdef ASSERT
2750 size_t running_sum_used_bytes = allocated_used_bytes();
2751 // For purposes of the running sum of used, verify against used
2752 size_t used_in_use_bytes = used_bytes_slow();
2753 assert(allocated_used_bytes() == used_in_use_bytes,
2754 err_msg("allocated_used_bytes() " SIZE_FORMAT
2755 " used_bytes_slow()" SIZE_FORMAT,
2756 allocated_used_bytes(), used_in_use_bytes));
2757 for (Metaspace::MetadataType i = Metaspace::ClassType;
2758 i < Metaspace:: MetadataTypeCount;
2759 i = (Metaspace::MetadataType)(i + 1)) {
2760 size_t used_in_use_bytes = used_bytes_slow(i);
2761 assert(allocated_used_bytes(i) == used_in_use_bytes,
2762 err_msg("allocated_used_bytes(%u) " SIZE_FORMAT
2763 " used_bytes_slow(%u)" SIZE_FORMAT,
2764 i, allocated_used_bytes(i), i, used_in_use_bytes));
2765 }
2766 #endif
2767 }
2769 void MetaspaceAux::verify_metrics() {
2770 verify_capacity();
2771 verify_used();
2772 }
2775 // Metaspace methods
2777 size_t Metaspace::_first_chunk_word_size = 0;
2778 size_t Metaspace::_first_class_chunk_word_size = 0;
2780 Metaspace::Metaspace(Mutex* lock, MetaspaceType type) {
2781 initialize(lock, type);
2782 }
2784 Metaspace::~Metaspace() {
2785 delete _vsm;
2786 delete _class_vsm;
2787 }
2789 VirtualSpaceList* Metaspace::_space_list = NULL;
2790 VirtualSpaceList* Metaspace::_class_space_list = NULL;
2792 #define VIRTUALSPACEMULTIPLIER 2
2794 void Metaspace::global_initialize() {
2795 // Initialize the alignment for shared spaces.
2796 int max_alignment = os::vm_page_size();
2797 MetaspaceShared::set_max_alignment(max_alignment);
2799 if (DumpSharedSpaces) {
2800 SharedReadOnlySize = align_size_up(SharedReadOnlySize, max_alignment);
2801 SharedReadWriteSize = align_size_up(SharedReadWriteSize, max_alignment);
2802 SharedMiscDataSize = align_size_up(SharedMiscDataSize, max_alignment);
2803 SharedMiscCodeSize = align_size_up(SharedMiscCodeSize, max_alignment);
2805 // Initialize with the sum of the shared space sizes. The read-only
2806 // and read write metaspace chunks will be allocated out of this and the
2807 // remainder is the misc code and data chunks.
2808 size_t total = align_size_up(SharedReadOnlySize + SharedReadWriteSize +
2809 SharedMiscDataSize + SharedMiscCodeSize,
2810 os::vm_allocation_granularity());
2811 size_t word_size = total/wordSize;
2812 _space_list = new VirtualSpaceList(word_size);
2813 } else {
2814 // If using shared space, open the file that contains the shared space
2815 // and map in the memory before initializing the rest of metaspace (so
2816 // the addresses don't conflict)
2817 if (UseSharedSpaces) {
2818 FileMapInfo* mapinfo = new FileMapInfo();
2819 memset(mapinfo, 0, sizeof(FileMapInfo));
2821 // Open the shared archive file, read and validate the header. If
2822 // initialization fails, shared spaces [UseSharedSpaces] are
2823 // disabled and the file is closed.
2824 // Map in spaces now also
2825 if (mapinfo->initialize() && MetaspaceShared::map_shared_spaces(mapinfo)) {
2826 FileMapInfo::set_current_info(mapinfo);
2827 } else {
2828 assert(!mapinfo->is_open() && !UseSharedSpaces,
2829 "archive file not closed or shared spaces not disabled.");
2830 }
2831 }
2833 // Initialize these before initializing the VirtualSpaceList
2834 _first_chunk_word_size = InitialBootClassLoaderMetaspaceSize / BytesPerWord;
2835 _first_chunk_word_size = align_word_size_up(_first_chunk_word_size);
2836 // Make the first class chunk bigger than a medium chunk so it's not put
2837 // on the medium chunk list. The next chunk will be small and progress
2838 // from there. This size calculated by -version.
2839 _first_class_chunk_word_size = MIN2((size_t)MediumChunk*6,
2840 (ClassMetaspaceSize/BytesPerWord)*2);
2841 _first_class_chunk_word_size = align_word_size_up(_first_class_chunk_word_size);
2842 // Arbitrarily set the initial virtual space to a multiple
2843 // of the boot class loader size.
2844 size_t word_size = VIRTUALSPACEMULTIPLIER * first_chunk_word_size();
2845 // Initialize the list of virtual spaces.
2846 _space_list = new VirtualSpaceList(word_size);
2847 }
2848 }
2850 // For UseCompressedKlassPointers the class space is reserved as a piece of the
2851 // Java heap because the compression algorithm is the same for each. The
2852 // argument passed in is at the top of the compressed space
2853 void Metaspace::initialize_class_space(ReservedSpace rs) {
2854 // The reserved space size may be bigger because of alignment, esp with UseLargePages
2855 assert(rs.size() >= ClassMetaspaceSize,
2856 err_msg(SIZE_FORMAT " != " UINTX_FORMAT, rs.size(), ClassMetaspaceSize));
2857 _class_space_list = new VirtualSpaceList(rs);
2858 }
2860 void Metaspace::initialize(Mutex* lock,
2861 MetaspaceType type) {
2863 assert(space_list() != NULL,
2864 "Metadata VirtualSpaceList has not been initialized");
2866 _vsm = new SpaceManager(Metaspace::NonClassType, lock, space_list());
2867 if (_vsm == NULL) {
2868 return;
2869 }
2870 size_t word_size;
2871 size_t class_word_size;
2872 vsm()->get_initial_chunk_sizes(type,
2873 &word_size,
2874 &class_word_size);
2876 assert(class_space_list() != NULL,
2877 "Class VirtualSpaceList has not been initialized");
2879 // Allocate SpaceManager for classes.
2880 _class_vsm = new SpaceManager(Metaspace::ClassType, lock, class_space_list());
2881 if (_class_vsm == NULL) {
2882 return;
2883 }
2885 MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
2887 // Allocate chunk for metadata objects
2888 Metachunk* new_chunk =
2889 space_list()->get_initialization_chunk(word_size,
2890 vsm()->medium_chunk_bunch());
2891 assert(!DumpSharedSpaces || new_chunk != NULL, "should have enough space for both chunks");
2892 if (new_chunk != NULL) {
2893 // Add to this manager's list of chunks in use and current_chunk().
2894 vsm()->add_chunk(new_chunk, true);
2895 }
2897 // Allocate chunk for class metadata objects
2898 Metachunk* class_chunk =
2899 class_space_list()->get_initialization_chunk(class_word_size,
2900 class_vsm()->medium_chunk_bunch());
2901 if (class_chunk != NULL) {
2902 class_vsm()->add_chunk(class_chunk, true);
2903 }
2905 _alloc_record_head = NULL;
2906 _alloc_record_tail = NULL;
2907 }
2909 size_t Metaspace::align_word_size_up(size_t word_size) {
2910 size_t byte_size = word_size * wordSize;
2911 return ReservedSpace::allocation_align_size_up(byte_size) / wordSize;
2912 }
2914 MetaWord* Metaspace::allocate(size_t word_size, MetadataType mdtype) {
2915 // DumpSharedSpaces doesn't use class metadata area (yet)
2916 if (mdtype == ClassType && !DumpSharedSpaces) {
2917 return class_vsm()->allocate(word_size);
2918 } else {
2919 return vsm()->allocate(word_size);
2920 }
2921 }
2923 MetaWord* Metaspace::expand_and_allocate(size_t word_size, MetadataType mdtype) {
2924 MetaWord* result;
2925 MetaspaceGC::set_expand_after_GC(true);
2926 size_t before_inc = MetaspaceGC::capacity_until_GC();
2927 size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size) * BytesPerWord;
2928 MetaspaceGC::inc_capacity_until_GC(delta_bytes);
2929 if (PrintGCDetails && Verbose) {
2930 gclog_or_tty->print_cr("Increase capacity to GC from " SIZE_FORMAT
2931 " to " SIZE_FORMAT, before_inc, MetaspaceGC::capacity_until_GC());
2932 }
2934 result = allocate(word_size, mdtype);
2936 return result;
2937 }
2939 // Space allocated in the Metaspace. This may
2940 // be across several metadata virtual spaces.
2941 char* Metaspace::bottom() const {
2942 assert(DumpSharedSpaces, "only useful and valid for dumping shared spaces");
2943 return (char*)vsm()->current_chunk()->bottom();
2944 }
2946 size_t Metaspace::used_words_slow(MetadataType mdtype) const {
2947 // return vsm()->allocated_used_words();
2948 return mdtype == ClassType ? class_vsm()->sum_used_in_chunks_in_use() :
2949 vsm()->sum_used_in_chunks_in_use(); // includes overhead!
2950 }
2952 size_t Metaspace::free_words(MetadataType mdtype) const {
2953 return mdtype == ClassType ? class_vsm()->sum_free_in_chunks_in_use() :
2954 vsm()->sum_free_in_chunks_in_use();
2955 }
2957 // Space capacity in the Metaspace. It includes
2958 // space in the list of chunks from which allocations
2959 // have been made. Don't include space in the global freelist and
2960 // in the space available in the dictionary which
2961 // is already counted in some chunk.
2962 size_t Metaspace::capacity_words_slow(MetadataType mdtype) const {
2963 return mdtype == ClassType ? class_vsm()->sum_capacity_in_chunks_in_use() :
2964 vsm()->sum_capacity_in_chunks_in_use();
2965 }
2967 size_t Metaspace::used_bytes_slow(MetadataType mdtype) const {
2968 return used_words_slow(mdtype) * BytesPerWord;
2969 }
2971 size_t Metaspace::capacity_bytes_slow(MetadataType mdtype) const {
2972 return capacity_words_slow(mdtype) * BytesPerWord;
2973 }
2975 void Metaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) {
2976 if (SafepointSynchronize::is_at_safepoint()) {
2977 assert(Thread::current()->is_VM_thread(), "should be the VM thread");
2978 // Don't take Heap_lock
2979 MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag);
2980 if (word_size < TreeChunk<Metablock, FreeList>::min_size()) {
2981 // Dark matter. Too small for dictionary.
2982 #ifdef ASSERT
2983 Copy::fill_to_words((HeapWord*)ptr, word_size, 0xf5f5f5f5);
2984 #endif
2985 return;
2986 }
2987 if (is_class) {
2988 class_vsm()->deallocate(ptr, word_size);
2989 } else {
2990 vsm()->deallocate(ptr, word_size);
2991 }
2992 } else {
2993 MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag);
2995 if (word_size < TreeChunk<Metablock, FreeList>::min_size()) {
2996 // Dark matter. Too small for dictionary.
2997 #ifdef ASSERT
2998 Copy::fill_to_words((HeapWord*)ptr, word_size, 0xf5f5f5f5);
2999 #endif
3000 return;
3001 }
3002 if (is_class) {
3003 class_vsm()->deallocate(ptr, word_size);
3004 } else {
3005 vsm()->deallocate(ptr, word_size);
3006 }
3007 }
3008 }
3010 Metablock* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
3011 bool read_only, MetaspaceObj::Type type, TRAPS) {
3012 if (HAS_PENDING_EXCEPTION) {
3013 assert(false, "Should not allocate with exception pending");
3014 return NULL; // caller does a CHECK_NULL too
3015 }
3017 MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType;
3019 // SSS: Should we align the allocations and make sure the sizes are aligned.
3020 MetaWord* result = NULL;
3022 assert(loader_data != NULL, "Should never pass around a NULL loader_data. "
3023 "ClassLoaderData::the_null_class_loader_data() should have been used.");
3024 // Allocate in metaspaces without taking out a lock, because it deadlocks
3025 // with the SymbolTable_lock. Dumping is single threaded for now. We'll have
3026 // to revisit this for application class data sharing.
3027 if (DumpSharedSpaces) {
3028 assert(type > MetaspaceObj::UnknownType && type < MetaspaceObj::_number_of_types, "sanity");
3029 Metaspace* space = read_only ? loader_data->ro_metaspace() : loader_data->rw_metaspace();
3030 result = space->allocate(word_size, NonClassType);
3031 if (result == NULL) {
3032 report_out_of_shared_space(read_only ? SharedReadOnly : SharedReadWrite);
3033 } else {
3034 space->record_allocation(result, type, space->vsm()->get_raw_word_size(word_size));
3035 }
3036 return Metablock::initialize(result, word_size);
3037 }
3039 result = loader_data->metaspace_non_null()->allocate(word_size, mdtype);
3041 if (result == NULL) {
3042 // Try to clean out some memory and retry.
3043 result =
3044 Universe::heap()->collector_policy()->satisfy_failed_metadata_allocation(
3045 loader_data, word_size, mdtype);
3047 // If result is still null, we are out of memory.
3048 if (result == NULL) {
3049 if (Verbose && TraceMetadataChunkAllocation) {
3050 gclog_or_tty->print_cr("Metaspace allocation failed for size "
3051 SIZE_FORMAT, word_size);
3052 if (loader_data->metaspace_or_null() != NULL) loader_data->metaspace_or_null()->dump(gclog_or_tty);
3053 MetaspaceAux::dump(gclog_or_tty);
3054 }
3055 // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
3056 report_java_out_of_memory("Metadata space");
3058 if (JvmtiExport::should_post_resource_exhausted()) {
3059 JvmtiExport::post_resource_exhausted(
3060 JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR,
3061 "Metadata space");
3062 }
3063 THROW_OOP_0(Universe::out_of_memory_error_perm_gen());
3064 }
3065 }
3066 return Metablock::initialize(result, word_size);
3067 }
3069 void Metaspace::record_allocation(void* ptr, MetaspaceObj::Type type, size_t word_size) {
3070 assert(DumpSharedSpaces, "sanity");
3072 AllocRecord *rec = new AllocRecord((address)ptr, type, (int)word_size * HeapWordSize);
3073 if (_alloc_record_head == NULL) {
3074 _alloc_record_head = _alloc_record_tail = rec;
3075 } else {
3076 _alloc_record_tail->_next = rec;
3077 _alloc_record_tail = rec;
3078 }
3079 }
3081 void Metaspace::iterate(Metaspace::AllocRecordClosure *closure) {
3082 assert(DumpSharedSpaces, "unimplemented for !DumpSharedSpaces");
3084 address last_addr = (address)bottom();
3086 for (AllocRecord *rec = _alloc_record_head; rec; rec = rec->_next) {
3087 address ptr = rec->_ptr;
3088 if (last_addr < ptr) {
3089 closure->doit(last_addr, MetaspaceObj::UnknownType, ptr - last_addr);
3090 }
3091 closure->doit(ptr, rec->_type, rec->_byte_size);
3092 last_addr = ptr + rec->_byte_size;
3093 }
3095 address top = ((address)bottom()) + used_bytes_slow(Metaspace::NonClassType);
3096 if (last_addr < top) {
3097 closure->doit(last_addr, MetaspaceObj::UnknownType, top - last_addr);
3098 }
3099 }
3101 void Metaspace::purge() {
3102 MutexLockerEx cl(SpaceManager::expand_lock(),
3103 Mutex::_no_safepoint_check_flag);
3104 space_list()->purge();
3105 class_space_list()->purge();
3106 }
3108 void Metaspace::print_on(outputStream* out) const {
3109 // Print both class virtual space counts and metaspace.
3110 if (Verbose) {
3111 vsm()->print_on(out);
3112 class_vsm()->print_on(out);
3113 }
3114 }
3116 bool Metaspace::contains(const void * ptr) {
3117 if (MetaspaceShared::is_in_shared_space(ptr)) {
3118 return true;
3119 }
3120 // This is checked while unlocked. As long as the virtualspaces are added
3121 // at the end, the pointer will be in one of them. The virtual spaces
3122 // aren't deleted presently. When they are, some sort of locking might
3123 // be needed. Note, locking this can cause inversion problems with the
3124 // caller in MetaspaceObj::is_metadata() function.
3125 return space_list()->contains(ptr) ||
3126 class_space_list()->contains(ptr);
3127 }
3129 void Metaspace::verify() {
3130 vsm()->verify();
3131 class_vsm()->verify();
3132 }
3134 void Metaspace::dump(outputStream* const out) const {
3135 out->print_cr("\nVirtual space manager: " INTPTR_FORMAT, vsm());
3136 vsm()->dump(out);
3137 out->print_cr("\nClass space manager: " INTPTR_FORMAT, class_vsm());
3138 class_vsm()->dump(out);
3139 }