Wed, 26 Jun 2013 16:58:37 +0200
8013590: NPG: Add a memory pool MXBean for Metaspace
Reviewed-by: jmasa, mgerdin
1 /*
2 * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24 #include "precompiled.hpp"
25 #include "gc_interface/collectedHeap.hpp"
26 #include "memory/binaryTreeDictionary.hpp"
27 #include "memory/freeList.hpp"
28 #include "memory/collectorPolicy.hpp"
29 #include "memory/filemap.hpp"
30 #include "memory/freeList.hpp"
31 #include "memory/metablock.hpp"
32 #include "memory/metachunk.hpp"
33 #include "memory/metaspace.hpp"
34 #include "memory/metaspaceShared.hpp"
35 #include "memory/resourceArea.hpp"
36 #include "memory/universe.hpp"
37 #include "runtime/globals.hpp"
38 #include "runtime/mutex.hpp"
39 #include "runtime/orderAccess.hpp"
40 #include "services/memTracker.hpp"
41 #include "utilities/copy.hpp"
42 #include "utilities/debug.hpp"
44 typedef BinaryTreeDictionary<Metablock, FreeList> BlockTreeDictionary;
45 typedef BinaryTreeDictionary<Metachunk, FreeList> ChunkTreeDictionary;
46 // Define this macro to enable slow integrity checking of
47 // the free chunk lists
48 const bool metaspace_slow_verify = false;
50 // Parameters for stress mode testing
51 const uint metadata_deallocate_a_lot_block = 10;
52 const uint metadata_deallocate_a_lock_chunk = 3;
53 size_t const allocation_from_dictionary_limit = 64 * K;
55 MetaWord* last_allocated = 0;
57 // Used in declarations in SpaceManager and ChunkManager
58 enum ChunkIndex {
59 ZeroIndex = 0,
60 SpecializedIndex = ZeroIndex,
61 SmallIndex = SpecializedIndex + 1,
62 MediumIndex = SmallIndex + 1,
63 HumongousIndex = MediumIndex + 1,
64 NumberOfFreeLists = 3,
65 NumberOfInUseLists = 4
66 };
68 enum ChunkSizes { // in words.
69 ClassSpecializedChunk = 128,
70 SpecializedChunk = 128,
71 ClassSmallChunk = 256,
72 SmallChunk = 512,
73 ClassMediumChunk = 1 * K,
74 MediumChunk = 8 * K,
75 HumongousChunkGranularity = 8
76 };
78 static ChunkIndex next_chunk_index(ChunkIndex i) {
79 assert(i < NumberOfInUseLists, "Out of bound");
80 return (ChunkIndex) (i+1);
81 }
83 // Originally _capacity_until_GC was set to MetaspaceSize here but
84 // the default MetaspaceSize before argument processing was being
85 // used which was not the desired value. See the code
86 // in should_expand() to see how the initialization is handled
87 // now.
88 size_t MetaspaceGC::_capacity_until_GC = 0;
89 bool MetaspaceGC::_expand_after_GC = false;
90 uint MetaspaceGC::_shrink_factor = 0;
91 bool MetaspaceGC::_should_concurrent_collect = false;
93 // Blocks of space for metadata are allocated out of Metachunks.
94 //
95 // Metachunk are allocated out of MetadataVirtualspaces and once
96 // allocated there is no explicit link between a Metachunk and
97 // the MetadataVirtualspaces from which it was allocated.
98 //
99 // Each SpaceManager maintains a
100 // list of the chunks it is using and the current chunk. The current
101 // chunk is the chunk from which allocations are done. Space freed in
102 // a chunk is placed on the free list of blocks (BlockFreelist) and
103 // reused from there.
105 typedef class FreeList<Metachunk> ChunkList;
107 // Manages the global free lists of chunks.
108 // Has three lists of free chunks, and a total size and
109 // count that includes all three
111 class ChunkManager VALUE_OBJ_CLASS_SPEC {
113 // Free list of chunks of different sizes.
114 // SpecializedChunk
115 // SmallChunk
116 // MediumChunk
117 // HumongousChunk
118 ChunkList _free_chunks[NumberOfFreeLists];
121 // HumongousChunk
122 ChunkTreeDictionary _humongous_dictionary;
124 // ChunkManager in all lists of this type
125 size_t _free_chunks_total;
126 size_t _free_chunks_count;
128 void dec_free_chunks_total(size_t v) {
129 assert(_free_chunks_count > 0 &&
130 _free_chunks_total > 0,
131 "About to go negative");
132 Atomic::add_ptr(-1, &_free_chunks_count);
133 jlong minus_v = (jlong) - (jlong) v;
134 Atomic::add_ptr(minus_v, &_free_chunks_total);
135 }
137 // Debug support
139 size_t sum_free_chunks();
140 size_t sum_free_chunks_count();
142 void locked_verify_free_chunks_total();
143 void slow_locked_verify_free_chunks_total() {
144 if (metaspace_slow_verify) {
145 locked_verify_free_chunks_total();
146 }
147 }
148 void locked_verify_free_chunks_count();
149 void slow_locked_verify_free_chunks_count() {
150 if (metaspace_slow_verify) {
151 locked_verify_free_chunks_count();
152 }
153 }
154 void verify_free_chunks_count();
156 public:
158 ChunkManager() : _free_chunks_total(0), _free_chunks_count(0) {}
160 // add or delete (return) a chunk to the global freelist.
161 Metachunk* chunk_freelist_allocate(size_t word_size);
162 void chunk_freelist_deallocate(Metachunk* chunk);
164 // Map a size to a list index assuming that there are lists
165 // for special, small, medium, and humongous chunks.
166 static ChunkIndex list_index(size_t size);
168 // Remove the chunk from its freelist. It is
169 // expected to be on one of the _free_chunks[] lists.
170 void remove_chunk(Metachunk* chunk);
172 // Add the simple linked list of chunks to the freelist of chunks
173 // of type index.
174 void return_chunks(ChunkIndex index, Metachunk* chunks);
176 // Total of the space in the free chunks list
177 size_t free_chunks_total();
178 size_t free_chunks_total_in_bytes();
180 // Number of chunks in the free chunks list
181 size_t free_chunks_count();
183 void inc_free_chunks_total(size_t v, size_t count = 1) {
184 Atomic::add_ptr(count, &_free_chunks_count);
185 Atomic::add_ptr(v, &_free_chunks_total);
186 }
187 ChunkTreeDictionary* humongous_dictionary() {
188 return &_humongous_dictionary;
189 }
191 ChunkList* free_chunks(ChunkIndex index);
193 // Returns the list for the given chunk word size.
194 ChunkList* find_free_chunks_list(size_t word_size);
196 // Add and remove from a list by size. Selects
197 // list based on size of chunk.
198 void free_chunks_put(Metachunk* chuck);
199 Metachunk* free_chunks_get(size_t chunk_word_size);
201 // Debug support
202 void verify();
203 void slow_verify() {
204 if (metaspace_slow_verify) {
205 verify();
206 }
207 }
208 void locked_verify();
209 void slow_locked_verify() {
210 if (metaspace_slow_verify) {
211 locked_verify();
212 }
213 }
214 void verify_free_chunks_total();
216 void locked_print_free_chunks(outputStream* st);
217 void locked_print_sum_free_chunks(outputStream* st);
219 void print_on(outputStream* st);
220 };
222 // Used to manage the free list of Metablocks (a block corresponds
223 // to the allocation of a quantum of metadata).
224 class BlockFreelist VALUE_OBJ_CLASS_SPEC {
225 BlockTreeDictionary* _dictionary;
226 static Metablock* initialize_free_chunk(MetaWord* p, size_t word_size);
228 // Accessors
229 BlockTreeDictionary* dictionary() const { return _dictionary; }
231 public:
232 BlockFreelist();
233 ~BlockFreelist();
235 // Get and return a block to the free list
236 MetaWord* get_block(size_t word_size);
237 void return_block(MetaWord* p, size_t word_size);
239 size_t total_size() {
240 if (dictionary() == NULL) {
241 return 0;
242 } else {
243 return dictionary()->total_size();
244 }
245 }
247 void print_on(outputStream* st) const;
248 };
250 class VirtualSpaceNode : public CHeapObj<mtClass> {
251 friend class VirtualSpaceList;
253 // Link to next VirtualSpaceNode
254 VirtualSpaceNode* _next;
256 // total in the VirtualSpace
257 MemRegion _reserved;
258 ReservedSpace _rs;
259 VirtualSpace _virtual_space;
260 MetaWord* _top;
261 // count of chunks contained in this VirtualSpace
262 uintx _container_count;
264 // Convenience functions for logical bottom and end
265 MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); }
266 MetaWord* end() const { return (MetaWord*) _virtual_space.high(); }
268 // Convenience functions to access the _virtual_space
269 char* low() const { return virtual_space()->low(); }
270 char* high() const { return virtual_space()->high(); }
272 // The first Metachunk will be allocated at the bottom of the
273 // VirtualSpace
274 Metachunk* first_chunk() { return (Metachunk*) bottom(); }
276 void inc_container_count();
277 #ifdef ASSERT
278 uint container_count_slow();
279 #endif
281 public:
283 VirtualSpaceNode(size_t byte_size);
284 VirtualSpaceNode(ReservedSpace rs) : _top(NULL), _next(NULL), _rs(rs), _container_count(0) {}
285 ~VirtualSpaceNode();
287 // address of next available space in _virtual_space;
288 // Accessors
289 VirtualSpaceNode* next() { return _next; }
290 void set_next(VirtualSpaceNode* v) { _next = v; }
292 void set_reserved(MemRegion const v) { _reserved = v; }
293 void set_top(MetaWord* v) { _top = v; }
295 // Accessors
296 MemRegion* reserved() { return &_reserved; }
297 VirtualSpace* virtual_space() const { return (VirtualSpace*) &_virtual_space; }
299 // Returns true if "word_size" is available in the VirtualSpace
300 bool is_available(size_t word_size) { return _top + word_size <= end(); }
302 MetaWord* top() const { return _top; }
303 void inc_top(size_t word_size) { _top += word_size; }
305 uintx container_count() { return _container_count; }
306 void dec_container_count();
307 #ifdef ASSERT
308 void verify_container_count();
309 #endif
311 // used and capacity in this single entry in the list
312 size_t used_words_in_vs() const;
313 size_t capacity_words_in_vs() const;
314 size_t free_words_in_vs() const;
316 bool initialize();
318 // get space from the virtual space
319 Metachunk* take_from_committed(size_t chunk_word_size);
321 // Allocate a chunk from the virtual space and return it.
322 Metachunk* get_chunk_vs(size_t chunk_word_size);
323 Metachunk* get_chunk_vs_with_expand(size_t chunk_word_size);
325 // Expands/shrinks the committed space in a virtual space. Delegates
326 // to Virtualspace
327 bool expand_by(size_t words, bool pre_touch = false);
328 bool shrink_by(size_t words);
330 // In preparation for deleting this node, remove all the chunks
331 // in the node from any freelist.
332 void purge(ChunkManager* chunk_manager);
334 #ifdef ASSERT
335 // Debug support
336 static void verify_virtual_space_total();
337 static void verify_virtual_space_count();
338 void mangle();
339 #endif
341 void print_on(outputStream* st) const;
342 };
344 // byte_size is the size of the associated virtualspace.
345 VirtualSpaceNode::VirtualSpaceNode(size_t byte_size) : _top(NULL), _next(NULL), _rs(0), _container_count(0) {
346 // align up to vm allocation granularity
347 byte_size = align_size_up(byte_size, os::vm_allocation_granularity());
349 // This allocates memory with mmap. For DumpSharedspaces, try to reserve
350 // configurable address, generally at the top of the Java heap so other
351 // memory addresses don't conflict.
352 if (DumpSharedSpaces) {
353 char* shared_base = (char*)SharedBaseAddress;
354 _rs = ReservedSpace(byte_size, 0, false, shared_base, 0);
355 if (_rs.is_reserved()) {
356 assert(shared_base == 0 || _rs.base() == shared_base, "should match");
357 } else {
358 // Get a mmap region anywhere if the SharedBaseAddress fails.
359 _rs = ReservedSpace(byte_size);
360 }
361 MetaspaceShared::set_shared_rs(&_rs);
362 } else {
363 _rs = ReservedSpace(byte_size);
364 }
366 MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass);
367 }
369 void VirtualSpaceNode::purge(ChunkManager* chunk_manager) {
370 Metachunk* chunk = first_chunk();
371 Metachunk* invalid_chunk = (Metachunk*) top();
372 while (chunk < invalid_chunk ) {
373 assert(chunk->is_free(), "Should be marked free");
374 MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
375 chunk_manager->remove_chunk(chunk);
376 assert(chunk->next() == NULL &&
377 chunk->prev() == NULL,
378 "Was not removed from its list");
379 chunk = (Metachunk*) next;
380 }
381 }
383 #ifdef ASSERT
384 uint VirtualSpaceNode::container_count_slow() {
385 uint count = 0;
386 Metachunk* chunk = first_chunk();
387 Metachunk* invalid_chunk = (Metachunk*) top();
388 while (chunk < invalid_chunk ) {
389 MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
390 // Don't count the chunks on the free lists. Those are
391 // still part of the VirtualSpaceNode but not currently
392 // counted.
393 if (!chunk->is_free()) {
394 count++;
395 }
396 chunk = (Metachunk*) next;
397 }
398 return count;
399 }
400 #endif
402 // List of VirtualSpaces for metadata allocation.
403 // It has a _next link for singly linked list and a MemRegion
404 // for total space in the VirtualSpace.
405 class VirtualSpaceList : public CHeapObj<mtClass> {
406 friend class VirtualSpaceNode;
408 enum VirtualSpaceSizes {
409 VirtualSpaceSize = 256 * K
410 };
412 // Global list of virtual spaces
413 // Head of the list
414 VirtualSpaceNode* _virtual_space_list;
415 // virtual space currently being used for allocations
416 VirtualSpaceNode* _current_virtual_space;
417 // Free chunk list for all other metadata
418 ChunkManager _chunk_manager;
420 // Can this virtual list allocate >1 spaces? Also, used to determine
421 // whether to allocate unlimited small chunks in this virtual space
422 bool _is_class;
423 bool can_grow() const { return !is_class() || !UseCompressedKlassPointers; }
425 // Sum of space in all virtual spaces and number of virtual spaces
426 size_t _virtual_space_total;
427 size_t _virtual_space_count;
429 ~VirtualSpaceList();
431 VirtualSpaceNode* virtual_space_list() const { return _virtual_space_list; }
433 void set_virtual_space_list(VirtualSpaceNode* v) {
434 _virtual_space_list = v;
435 }
436 void set_current_virtual_space(VirtualSpaceNode* v) {
437 _current_virtual_space = v;
438 }
440 void link_vs(VirtualSpaceNode* new_entry, size_t vs_word_size);
442 // Get another virtual space and add it to the list. This
443 // is typically prompted by a failed attempt to allocate a chunk
444 // and is typically followed by the allocation of a chunk.
445 bool grow_vs(size_t vs_word_size);
447 public:
448 VirtualSpaceList(size_t word_size);
449 VirtualSpaceList(ReservedSpace rs);
451 size_t free_bytes();
453 Metachunk* get_new_chunk(size_t word_size,
454 size_t grow_chunks_by_words,
455 size_t medium_chunk_bunch);
457 // Get the first chunk for a Metaspace. Used for
458 // special cases such as the boot class loader, reflection
459 // class loader and anonymous class loader.
460 Metachunk* get_initialization_chunk(size_t word_size, size_t chunk_bunch);
462 VirtualSpaceNode* current_virtual_space() {
463 return _current_virtual_space;
464 }
466 ChunkManager* chunk_manager() { return &_chunk_manager; }
467 bool is_class() const { return _is_class; }
469 // Allocate the first virtualspace.
470 void initialize(size_t word_size);
472 size_t virtual_space_total() { return _virtual_space_total; }
474 void inc_virtual_space_total(size_t v);
475 void dec_virtual_space_total(size_t v);
476 void inc_virtual_space_count();
477 void dec_virtual_space_count();
479 // Unlink empty VirtualSpaceNodes and free it.
480 void purge();
482 // Used and capacity in the entire list of virtual spaces.
483 // These are global values shared by all Metaspaces
484 size_t capacity_words_sum();
485 size_t capacity_bytes_sum() { return capacity_words_sum() * BytesPerWord; }
486 size_t used_words_sum();
487 size_t used_bytes_sum() { return used_words_sum() * BytesPerWord; }
489 bool contains(const void *ptr);
491 void print_on(outputStream* st) const;
493 class VirtualSpaceListIterator : public StackObj {
494 VirtualSpaceNode* _virtual_spaces;
495 public:
496 VirtualSpaceListIterator(VirtualSpaceNode* virtual_spaces) :
497 _virtual_spaces(virtual_spaces) {}
499 bool repeat() {
500 return _virtual_spaces != NULL;
501 }
503 VirtualSpaceNode* get_next() {
504 VirtualSpaceNode* result = _virtual_spaces;
505 if (_virtual_spaces != NULL) {
506 _virtual_spaces = _virtual_spaces->next();
507 }
508 return result;
509 }
510 };
511 };
513 class Metadebug : AllStatic {
514 // Debugging support for Metaspaces
515 static int _deallocate_block_a_lot_count;
516 static int _deallocate_chunk_a_lot_count;
517 static int _allocation_fail_alot_count;
519 public:
520 static int deallocate_block_a_lot_count() {
521 return _deallocate_block_a_lot_count;
522 }
523 static void set_deallocate_block_a_lot_count(int v) {
524 _deallocate_block_a_lot_count = v;
525 }
526 static void inc_deallocate_block_a_lot_count() {
527 _deallocate_block_a_lot_count++;
528 }
529 static int deallocate_chunk_a_lot_count() {
530 return _deallocate_chunk_a_lot_count;
531 }
532 static void reset_deallocate_chunk_a_lot_count() {
533 _deallocate_chunk_a_lot_count = 1;
534 }
535 static void inc_deallocate_chunk_a_lot_count() {
536 _deallocate_chunk_a_lot_count++;
537 }
539 static void init_allocation_fail_alot_count();
540 #ifdef ASSERT
541 static bool test_metadata_failure();
542 #endif
544 static void deallocate_chunk_a_lot(SpaceManager* sm,
545 size_t chunk_word_size);
546 static void deallocate_block_a_lot(SpaceManager* sm,
547 size_t chunk_word_size);
549 };
551 int Metadebug::_deallocate_block_a_lot_count = 0;
552 int Metadebug::_deallocate_chunk_a_lot_count = 0;
553 int Metadebug::_allocation_fail_alot_count = 0;
555 // SpaceManager - used by Metaspace to handle allocations
556 class SpaceManager : public CHeapObj<mtClass> {
557 friend class Metaspace;
558 friend class Metadebug;
560 private:
562 // protects allocations and contains.
563 Mutex* const _lock;
565 // Type of metadata allocated.
566 Metaspace::MetadataType _mdtype;
568 // Chunk related size
569 size_t _medium_chunk_bunch;
571 // List of chunks in use by this SpaceManager. Allocations
572 // are done from the current chunk. The list is used for deallocating
573 // chunks when the SpaceManager is freed.
574 Metachunk* _chunks_in_use[NumberOfInUseLists];
575 Metachunk* _current_chunk;
577 // Virtual space where allocation comes from.
578 VirtualSpaceList* _vs_list;
580 // Number of small chunks to allocate to a manager
581 // If class space manager, small chunks are unlimited
582 static uint const _small_chunk_limit;
583 bool has_small_chunk_limit() { return !vs_list()->is_class(); }
585 // Sum of all space in allocated chunks
586 size_t _allocated_blocks_words;
588 // Sum of all allocated chunks
589 size_t _allocated_chunks_words;
590 size_t _allocated_chunks_count;
592 // Free lists of blocks are per SpaceManager since they
593 // are assumed to be in chunks in use by the SpaceManager
594 // and all chunks in use by a SpaceManager are freed when
595 // the class loader using the SpaceManager is collected.
596 BlockFreelist _block_freelists;
598 // protects virtualspace and chunk expansions
599 static const char* _expand_lock_name;
600 static const int _expand_lock_rank;
601 static Mutex* const _expand_lock;
603 private:
604 // Accessors
605 Metachunk* chunks_in_use(ChunkIndex index) const { return _chunks_in_use[index]; }
606 void set_chunks_in_use(ChunkIndex index, Metachunk* v) { _chunks_in_use[index] = v; }
608 BlockFreelist* block_freelists() const {
609 return (BlockFreelist*) &_block_freelists;
610 }
612 Metaspace::MetadataType mdtype() { return _mdtype; }
613 VirtualSpaceList* vs_list() const { return _vs_list; }
615 Metachunk* current_chunk() const { return _current_chunk; }
616 void set_current_chunk(Metachunk* v) {
617 _current_chunk = v;
618 }
620 Metachunk* find_current_chunk(size_t word_size);
622 // Add chunk to the list of chunks in use
623 void add_chunk(Metachunk* v, bool make_current);
625 Mutex* lock() const { return _lock; }
627 const char* chunk_size_name(ChunkIndex index) const;
629 protected:
630 void initialize();
632 public:
633 SpaceManager(Metaspace::MetadataType mdtype,
634 Mutex* lock,
635 VirtualSpaceList* vs_list);
636 ~SpaceManager();
638 enum ChunkMultiples {
639 MediumChunkMultiple = 4
640 };
642 // Accessors
643 size_t specialized_chunk_size() { return SpecializedChunk; }
644 size_t small_chunk_size() { return (size_t) vs_list()->is_class() ? ClassSmallChunk : SmallChunk; }
645 size_t medium_chunk_size() { return (size_t) vs_list()->is_class() ? ClassMediumChunk : MediumChunk; }
646 size_t medium_chunk_bunch() { return medium_chunk_size() * MediumChunkMultiple; }
648 size_t allocated_blocks_words() const { return _allocated_blocks_words; }
649 size_t allocated_blocks_bytes() const { return _allocated_blocks_words * BytesPerWord; }
650 size_t allocated_chunks_words() const { return _allocated_chunks_words; }
651 size_t allocated_chunks_count() const { return _allocated_chunks_count; }
653 bool is_humongous(size_t word_size) { return word_size > medium_chunk_size(); }
655 static Mutex* expand_lock() { return _expand_lock; }
657 // Increment the per Metaspace and global running sums for Metachunks
658 // by the given size. This is used when a Metachunk to added to
659 // the in-use list.
660 void inc_size_metrics(size_t words);
661 // Increment the per Metaspace and global running sums Metablocks by the given
662 // size. This is used when a Metablock is allocated.
663 void inc_used_metrics(size_t words);
664 // Delete the portion of the running sums for this SpaceManager. That is,
665 // the globals running sums for the Metachunks and Metablocks are
666 // decremented for all the Metachunks in-use by this SpaceManager.
667 void dec_total_from_size_metrics();
669 // Set the sizes for the initial chunks.
670 void get_initial_chunk_sizes(Metaspace::MetaspaceType type,
671 size_t* chunk_word_size,
672 size_t* class_chunk_word_size);
674 size_t sum_capacity_in_chunks_in_use() const;
675 size_t sum_used_in_chunks_in_use() const;
676 size_t sum_free_in_chunks_in_use() const;
677 size_t sum_waste_in_chunks_in_use() const;
678 size_t sum_waste_in_chunks_in_use(ChunkIndex index ) const;
680 size_t sum_count_in_chunks_in_use();
681 size_t sum_count_in_chunks_in_use(ChunkIndex i);
683 Metachunk* get_new_chunk(size_t word_size, size_t grow_chunks_by_words);
685 // Block allocation and deallocation.
686 // Allocates a block from the current chunk
687 MetaWord* allocate(size_t word_size);
689 // Helper for allocations
690 MetaWord* allocate_work(size_t word_size);
692 // Returns a block to the per manager freelist
693 void deallocate(MetaWord* p, size_t word_size);
695 // Based on the allocation size and a minimum chunk size,
696 // returned chunk size (for expanding space for chunk allocation).
697 size_t calc_chunk_size(size_t allocation_word_size);
699 // Called when an allocation from the current chunk fails.
700 // Gets a new chunk (may require getting a new virtual space),
701 // and allocates from that chunk.
702 MetaWord* grow_and_allocate(size_t word_size);
704 // debugging support.
706 void dump(outputStream* const out) const;
707 void print_on(outputStream* st) const;
708 void locked_print_chunks_in_use_on(outputStream* st) const;
710 void verify();
711 void verify_chunk_size(Metachunk* chunk);
712 NOT_PRODUCT(void mangle_freed_chunks();)
713 #ifdef ASSERT
714 void verify_allocated_blocks_words();
715 #endif
717 size_t get_raw_word_size(size_t word_size) {
718 // If only the dictionary is going to be used (i.e., no
719 // indexed free list), then there is a minimum size requirement.
720 // MinChunkSize is a placeholder for the real minimum size JJJ
721 size_t byte_size = word_size * BytesPerWord;
723 size_t byte_size_with_overhead = byte_size + Metablock::overhead();
725 size_t raw_bytes_size = MAX2(byte_size_with_overhead,
726 Metablock::min_block_byte_size());
727 raw_bytes_size = ARENA_ALIGN(raw_bytes_size);
728 size_t raw_word_size = raw_bytes_size / BytesPerWord;
729 assert(raw_word_size * BytesPerWord == raw_bytes_size, "Size problem");
731 return raw_word_size;
732 }
733 };
735 uint const SpaceManager::_small_chunk_limit = 4;
737 const char* SpaceManager::_expand_lock_name =
738 "SpaceManager chunk allocation lock";
739 const int SpaceManager::_expand_lock_rank = Monitor::leaf - 1;
740 Mutex* const SpaceManager::_expand_lock =
741 new Mutex(SpaceManager::_expand_lock_rank,
742 SpaceManager::_expand_lock_name,
743 Mutex::_allow_vm_block_flag);
745 void VirtualSpaceNode::inc_container_count() {
746 assert_lock_strong(SpaceManager::expand_lock());
747 _container_count++;
748 assert(_container_count == container_count_slow(),
749 err_msg("Inconsistency in countainer_count _container_count " SIZE_FORMAT
750 "container_count_slow() " SIZE_FORMAT,
751 _container_count, container_count_slow()));
752 }
754 void VirtualSpaceNode::dec_container_count() {
755 assert_lock_strong(SpaceManager::expand_lock());
756 _container_count--;
757 }
759 #ifdef ASSERT
760 void VirtualSpaceNode::verify_container_count() {
761 assert(_container_count == container_count_slow(),
762 err_msg("Inconsistency in countainer_count _container_count " SIZE_FORMAT
763 "container_count_slow() " SIZE_FORMAT, _container_count, container_count_slow()));
764 }
765 #endif
767 // BlockFreelist methods
769 BlockFreelist::BlockFreelist() : _dictionary(NULL) {}
771 BlockFreelist::~BlockFreelist() {
772 if (_dictionary != NULL) {
773 if (Verbose && TraceMetadataChunkAllocation) {
774 _dictionary->print_free_lists(gclog_or_tty);
775 }
776 delete _dictionary;
777 }
778 }
780 Metablock* BlockFreelist::initialize_free_chunk(MetaWord* p, size_t word_size) {
781 Metablock* block = (Metablock*) p;
782 block->set_word_size(word_size);
783 block->set_prev(NULL);
784 block->set_next(NULL);
786 return block;
787 }
789 void BlockFreelist::return_block(MetaWord* p, size_t word_size) {
790 Metablock* free_chunk = initialize_free_chunk(p, word_size);
791 if (dictionary() == NULL) {
792 _dictionary = new BlockTreeDictionary();
793 }
794 dictionary()->return_chunk(free_chunk);
795 }
797 MetaWord* BlockFreelist::get_block(size_t word_size) {
798 if (dictionary() == NULL) {
799 return NULL;
800 }
802 if (word_size < TreeChunk<Metablock, FreeList>::min_size()) {
803 // Dark matter. Too small for dictionary.
804 return NULL;
805 }
807 Metablock* free_block =
808 dictionary()->get_chunk(word_size, FreeBlockDictionary<Metablock>::exactly);
809 if (free_block == NULL) {
810 return NULL;
811 }
813 return (MetaWord*) free_block;
814 }
816 void BlockFreelist::print_on(outputStream* st) const {
817 if (dictionary() == NULL) {
818 return;
819 }
820 dictionary()->print_free_lists(st);
821 }
823 // VirtualSpaceNode methods
825 VirtualSpaceNode::~VirtualSpaceNode() {
826 _rs.release();
827 #ifdef ASSERT
828 size_t word_size = sizeof(*this) / BytesPerWord;
829 Copy::fill_to_words((HeapWord*) this, word_size, 0xf1f1f1f1);
830 #endif
831 }
833 size_t VirtualSpaceNode::used_words_in_vs() const {
834 return pointer_delta(top(), bottom(), sizeof(MetaWord));
835 }
837 // Space committed in the VirtualSpace
838 size_t VirtualSpaceNode::capacity_words_in_vs() const {
839 return pointer_delta(end(), bottom(), sizeof(MetaWord));
840 }
842 size_t VirtualSpaceNode::free_words_in_vs() const {
843 return pointer_delta(end(), top(), sizeof(MetaWord));
844 }
846 // Allocates the chunk from the virtual space only.
847 // This interface is also used internally for debugging. Not all
848 // chunks removed here are necessarily used for allocation.
849 Metachunk* VirtualSpaceNode::take_from_committed(size_t chunk_word_size) {
850 // Bottom of the new chunk
851 MetaWord* chunk_limit = top();
852 assert(chunk_limit != NULL, "Not safe to call this method");
854 if (!is_available(chunk_word_size)) {
855 if (TraceMetadataChunkAllocation) {
856 tty->print("VirtualSpaceNode::take_from_committed() not available %d words ", chunk_word_size);
857 // Dump some information about the virtual space that is nearly full
858 print_on(tty);
859 }
860 return NULL;
861 }
863 // Take the space (bump top on the current virtual space).
864 inc_top(chunk_word_size);
866 // Initialize the chunk
867 Metachunk* result = ::new (chunk_limit) Metachunk(chunk_word_size, this);
868 return result;
869 }
872 // Expand the virtual space (commit more of the reserved space)
873 bool VirtualSpaceNode::expand_by(size_t words, bool pre_touch) {
874 size_t bytes = words * BytesPerWord;
875 bool result = virtual_space()->expand_by(bytes, pre_touch);
876 if (TraceMetavirtualspaceAllocation && !result) {
877 gclog_or_tty->print_cr("VirtualSpaceNode::expand_by() failed "
878 "for byte size " SIZE_FORMAT, bytes);
879 virtual_space()->print();
880 }
881 return result;
882 }
884 // Shrink the virtual space (commit more of the reserved space)
885 bool VirtualSpaceNode::shrink_by(size_t words) {
886 size_t bytes = words * BytesPerWord;
887 virtual_space()->shrink_by(bytes);
888 return true;
889 }
891 // Add another chunk to the chunk list.
893 Metachunk* VirtualSpaceNode::get_chunk_vs(size_t chunk_word_size) {
894 assert_lock_strong(SpaceManager::expand_lock());
895 Metachunk* result = take_from_committed(chunk_word_size);
896 if (result != NULL) {
897 inc_container_count();
898 }
899 return result;
900 }
902 Metachunk* VirtualSpaceNode::get_chunk_vs_with_expand(size_t chunk_word_size) {
903 assert_lock_strong(SpaceManager::expand_lock());
905 Metachunk* new_chunk = get_chunk_vs(chunk_word_size);
907 if (new_chunk == NULL) {
908 // Only a small part of the virtualspace is committed when first
909 // allocated so committing more here can be expected.
910 size_t page_size_words = os::vm_page_size() / BytesPerWord;
911 size_t aligned_expand_vs_by_words = align_size_up(chunk_word_size,
912 page_size_words);
913 expand_by(aligned_expand_vs_by_words, false);
914 new_chunk = get_chunk_vs(chunk_word_size);
915 }
916 return new_chunk;
917 }
919 bool VirtualSpaceNode::initialize() {
921 if (!_rs.is_reserved()) {
922 return false;
923 }
925 // An allocation out of this Virtualspace that is larger
926 // than an initial commit size can waste that initial committed
927 // space.
928 size_t committed_byte_size = 0;
929 bool result = virtual_space()->initialize(_rs, committed_byte_size);
930 if (result) {
931 set_top((MetaWord*)virtual_space()->low());
932 set_reserved(MemRegion((HeapWord*)_rs.base(),
933 (HeapWord*)(_rs.base() + _rs.size())));
935 assert(reserved()->start() == (HeapWord*) _rs.base(),
936 err_msg("Reserved start was not set properly " PTR_FORMAT
937 " != " PTR_FORMAT, reserved()->start(), _rs.base()));
938 assert(reserved()->word_size() == _rs.size() / BytesPerWord,
939 err_msg("Reserved size was not set properly " SIZE_FORMAT
940 " != " SIZE_FORMAT, reserved()->word_size(),
941 _rs.size() / BytesPerWord));
942 }
944 return result;
945 }
947 void VirtualSpaceNode::print_on(outputStream* st) const {
948 size_t used = used_words_in_vs();
949 size_t capacity = capacity_words_in_vs();
950 VirtualSpace* vs = virtual_space();
951 st->print_cr(" space @ " PTR_FORMAT " " SIZE_FORMAT "K, %3d%% used "
952 "[" PTR_FORMAT ", " PTR_FORMAT ", "
953 PTR_FORMAT ", " PTR_FORMAT ")",
954 vs, capacity / K,
955 capacity == 0 ? 0 : used * 100 / capacity,
956 bottom(), top(), end(),
957 vs->high_boundary());
958 }
960 #ifdef ASSERT
961 void VirtualSpaceNode::mangle() {
962 size_t word_size = capacity_words_in_vs();
963 Copy::fill_to_words((HeapWord*) low(), word_size, 0xf1f1f1f1);
964 }
965 #endif // ASSERT
967 // VirtualSpaceList methods
968 // Space allocated from the VirtualSpace
970 VirtualSpaceList::~VirtualSpaceList() {
971 VirtualSpaceListIterator iter(virtual_space_list());
972 while (iter.repeat()) {
973 VirtualSpaceNode* vsl = iter.get_next();
974 delete vsl;
975 }
976 }
978 void VirtualSpaceList::inc_virtual_space_total(size_t v) {
979 assert_lock_strong(SpaceManager::expand_lock());
980 _virtual_space_total = _virtual_space_total + v;
981 }
982 void VirtualSpaceList::dec_virtual_space_total(size_t v) {
983 assert_lock_strong(SpaceManager::expand_lock());
984 _virtual_space_total = _virtual_space_total - v;
985 }
987 void VirtualSpaceList::inc_virtual_space_count() {
988 assert_lock_strong(SpaceManager::expand_lock());
989 _virtual_space_count++;
990 }
991 void VirtualSpaceList::dec_virtual_space_count() {
992 assert_lock_strong(SpaceManager::expand_lock());
993 _virtual_space_count--;
994 }
996 void ChunkManager::remove_chunk(Metachunk* chunk) {
997 size_t word_size = chunk->word_size();
998 ChunkIndex index = list_index(word_size);
999 if (index != HumongousIndex) {
1000 free_chunks(index)->remove_chunk(chunk);
1001 } else {
1002 humongous_dictionary()->remove_chunk(chunk);
1003 }
1005 // Chunk is being removed from the chunks free list.
1006 dec_free_chunks_total(chunk->capacity_word_size());
1007 }
1009 // Walk the list of VirtualSpaceNodes and delete
1010 // nodes with a 0 container_count. Remove Metachunks in
1011 // the node from their respective freelists.
1012 void VirtualSpaceList::purge() {
1013 assert_lock_strong(SpaceManager::expand_lock());
1014 // Don't use a VirtualSpaceListIterator because this
1015 // list is being changed and a straightforward use of an iterator is not safe.
1016 VirtualSpaceNode* purged_vsl = NULL;
1017 VirtualSpaceNode* prev_vsl = virtual_space_list();
1018 VirtualSpaceNode* next_vsl = prev_vsl;
1019 while (next_vsl != NULL) {
1020 VirtualSpaceNode* vsl = next_vsl;
1021 next_vsl = vsl->next();
1022 // Don't free the current virtual space since it will likely
1023 // be needed soon.
1024 if (vsl->container_count() == 0 && vsl != current_virtual_space()) {
1025 // Unlink it from the list
1026 if (prev_vsl == vsl) {
1027 // This is the case of the current note being the first note.
1028 assert(vsl == virtual_space_list(), "Expected to be the first note");
1029 set_virtual_space_list(vsl->next());
1030 } else {
1031 prev_vsl->set_next(vsl->next());
1032 }
1034 vsl->purge(chunk_manager());
1035 dec_virtual_space_total(vsl->reserved()->word_size());
1036 dec_virtual_space_count();
1037 purged_vsl = vsl;
1038 delete vsl;
1039 } else {
1040 prev_vsl = vsl;
1041 }
1042 }
1043 #ifdef ASSERT
1044 if (purged_vsl != NULL) {
1045 // List should be stable enough to use an iterator here.
1046 VirtualSpaceListIterator iter(virtual_space_list());
1047 while (iter.repeat()) {
1048 VirtualSpaceNode* vsl = iter.get_next();
1049 assert(vsl != purged_vsl, "Purge of vsl failed");
1050 }
1051 }
1052 #endif
1053 }
1055 size_t VirtualSpaceList::used_words_sum() {
1056 size_t allocated_by_vs = 0;
1057 VirtualSpaceListIterator iter(virtual_space_list());
1058 while (iter.repeat()) {
1059 VirtualSpaceNode* vsl = iter.get_next();
1060 // Sum used region [bottom, top) in each virtualspace
1061 allocated_by_vs += vsl->used_words_in_vs();
1062 }
1063 assert(allocated_by_vs >= chunk_manager()->free_chunks_total(),
1064 err_msg("Total in free chunks " SIZE_FORMAT
1065 " greater than total from virtual_spaces " SIZE_FORMAT,
1066 allocated_by_vs, chunk_manager()->free_chunks_total()));
1067 size_t used =
1068 allocated_by_vs - chunk_manager()->free_chunks_total();
1069 return used;
1070 }
1072 // Space available in all MetadataVirtualspaces allocated
1073 // for metadata. This is the upper limit on the capacity
1074 // of chunks allocated out of all the MetadataVirtualspaces.
1075 size_t VirtualSpaceList::capacity_words_sum() {
1076 size_t capacity = 0;
1077 VirtualSpaceListIterator iter(virtual_space_list());
1078 while (iter.repeat()) {
1079 VirtualSpaceNode* vsl = iter.get_next();
1080 capacity += vsl->capacity_words_in_vs();
1081 }
1082 return capacity;
1083 }
1085 VirtualSpaceList::VirtualSpaceList(size_t word_size ) :
1086 _is_class(false),
1087 _virtual_space_list(NULL),
1088 _current_virtual_space(NULL),
1089 _virtual_space_total(0),
1090 _virtual_space_count(0) {
1091 MutexLockerEx cl(SpaceManager::expand_lock(),
1092 Mutex::_no_safepoint_check_flag);
1093 bool initialization_succeeded = grow_vs(word_size);
1095 _chunk_manager.free_chunks(SpecializedIndex)->set_size(SpecializedChunk);
1096 _chunk_manager.free_chunks(SmallIndex)->set_size(SmallChunk);
1097 _chunk_manager.free_chunks(MediumIndex)->set_size(MediumChunk);
1098 assert(initialization_succeeded,
1099 " VirtualSpaceList initialization should not fail");
1100 }
1102 VirtualSpaceList::VirtualSpaceList(ReservedSpace rs) :
1103 _is_class(true),
1104 _virtual_space_list(NULL),
1105 _current_virtual_space(NULL),
1106 _virtual_space_total(0),
1107 _virtual_space_count(0) {
1108 MutexLockerEx cl(SpaceManager::expand_lock(),
1109 Mutex::_no_safepoint_check_flag);
1110 VirtualSpaceNode* class_entry = new VirtualSpaceNode(rs);
1111 bool succeeded = class_entry->initialize();
1112 _chunk_manager.free_chunks(SpecializedIndex)->set_size(SpecializedChunk);
1113 _chunk_manager.free_chunks(SmallIndex)->set_size(ClassSmallChunk);
1114 _chunk_manager.free_chunks(MediumIndex)->set_size(ClassMediumChunk);
1115 assert(succeeded, " VirtualSpaceList initialization should not fail");
1116 link_vs(class_entry, rs.size()/BytesPerWord);
1117 }
1119 size_t VirtualSpaceList::free_bytes() {
1120 return virtual_space_list()->free_words_in_vs() * BytesPerWord;
1121 }
1123 // Allocate another meta virtual space and add it to the list.
1124 bool VirtualSpaceList::grow_vs(size_t vs_word_size) {
1125 assert_lock_strong(SpaceManager::expand_lock());
1126 if (vs_word_size == 0) {
1127 return false;
1128 }
1129 // Reserve the space
1130 size_t vs_byte_size = vs_word_size * BytesPerWord;
1131 assert(vs_byte_size % os::vm_page_size() == 0, "Not aligned");
1133 // Allocate the meta virtual space and initialize it.
1134 VirtualSpaceNode* new_entry = new VirtualSpaceNode(vs_byte_size);
1135 if (!new_entry->initialize()) {
1136 delete new_entry;
1137 return false;
1138 } else {
1139 // ensure lock-free iteration sees fully initialized node
1140 OrderAccess::storestore();
1141 link_vs(new_entry, vs_word_size);
1142 return true;
1143 }
1144 }
1146 void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry, size_t vs_word_size) {
1147 if (virtual_space_list() == NULL) {
1148 set_virtual_space_list(new_entry);
1149 } else {
1150 current_virtual_space()->set_next(new_entry);
1151 }
1152 set_current_virtual_space(new_entry);
1153 inc_virtual_space_total(vs_word_size);
1154 inc_virtual_space_count();
1155 #ifdef ASSERT
1156 new_entry->mangle();
1157 #endif
1158 if (TraceMetavirtualspaceAllocation && Verbose) {
1159 VirtualSpaceNode* vsl = current_virtual_space();
1160 vsl->print_on(tty);
1161 }
1162 }
1164 Metachunk* VirtualSpaceList::get_new_chunk(size_t word_size,
1165 size_t grow_chunks_by_words,
1166 size_t medium_chunk_bunch) {
1168 // Get a chunk from the chunk freelist
1169 Metachunk* next = chunk_manager()->chunk_freelist_allocate(grow_chunks_by_words);
1171 if (next != NULL) {
1172 next->container()->inc_container_count();
1173 } else {
1174 // Allocate a chunk out of the current virtual space.
1175 next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
1176 }
1178 if (next == NULL) {
1179 // Not enough room in current virtual space. Try to commit
1180 // more space.
1181 size_t expand_vs_by_words = MAX2(medium_chunk_bunch,
1182 grow_chunks_by_words);
1183 size_t page_size_words = os::vm_page_size() / BytesPerWord;
1184 size_t aligned_expand_vs_by_words = align_size_up(expand_vs_by_words,
1185 page_size_words);
1186 bool vs_expanded =
1187 current_virtual_space()->expand_by(aligned_expand_vs_by_words, false);
1188 if (!vs_expanded) {
1189 // Should the capacity of the metaspaces be expanded for
1190 // this allocation? If it's the virtual space for classes and is
1191 // being used for CompressedHeaders, don't allocate a new virtualspace.
1192 if (can_grow() && MetaspaceGC::should_expand(this, word_size)) {
1193 // Get another virtual space.
1194 size_t grow_vs_words =
1195 MAX2((size_t)VirtualSpaceSize, aligned_expand_vs_by_words);
1196 if (grow_vs(grow_vs_words)) {
1197 // Got it. It's on the list now. Get a chunk from it.
1198 next = current_virtual_space()->get_chunk_vs_with_expand(grow_chunks_by_words);
1199 }
1200 } else {
1201 // Allocation will fail and induce a GC
1202 if (TraceMetadataChunkAllocation && Verbose) {
1203 gclog_or_tty->print_cr("VirtualSpaceList::get_new_chunk():"
1204 " Fail instead of expand the metaspace");
1205 }
1206 }
1207 } else {
1208 // The virtual space expanded, get a new chunk
1209 next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
1210 assert(next != NULL, "Just expanded, should succeed");
1211 }
1212 }
1214 assert(next == NULL || (next->next() == NULL && next->prev() == NULL),
1215 "New chunk is still on some list");
1216 return next;
1217 }
1219 Metachunk* VirtualSpaceList::get_initialization_chunk(size_t chunk_word_size,
1220 size_t chunk_bunch) {
1221 // Get a chunk from the chunk freelist
1222 Metachunk* new_chunk = get_new_chunk(chunk_word_size,
1223 chunk_word_size,
1224 chunk_bunch);
1225 return new_chunk;
1226 }
1228 void VirtualSpaceList::print_on(outputStream* st) const {
1229 if (TraceMetadataChunkAllocation && Verbose) {
1230 VirtualSpaceListIterator iter(virtual_space_list());
1231 while (iter.repeat()) {
1232 VirtualSpaceNode* node = iter.get_next();
1233 node->print_on(st);
1234 }
1235 }
1236 }
1238 bool VirtualSpaceList::contains(const void *ptr) {
1239 VirtualSpaceNode* list = virtual_space_list();
1240 VirtualSpaceListIterator iter(list);
1241 while (iter.repeat()) {
1242 VirtualSpaceNode* node = iter.get_next();
1243 if (node->reserved()->contains(ptr)) {
1244 return true;
1245 }
1246 }
1247 return false;
1248 }
1251 // MetaspaceGC methods
1253 // VM_CollectForMetadataAllocation is the vm operation used to GC.
1254 // Within the VM operation after the GC the attempt to allocate the metadata
1255 // should succeed. If the GC did not free enough space for the metaspace
1256 // allocation, the HWM is increased so that another virtualspace will be
1257 // allocated for the metadata. With perm gen the increase in the perm
1258 // gen had bounds, MinMetaspaceExpansion and MaxMetaspaceExpansion. The
1259 // metaspace policy uses those as the small and large steps for the HWM.
1260 //
1261 // After the GC the compute_new_size() for MetaspaceGC is called to
1262 // resize the capacity of the metaspaces. The current implementation
1263 // is based on the flags MinMetaspaceFreeRatio and MaxMetaspaceFreeRatio used
1264 // to resize the Java heap by some GC's. New flags can be implemented
1265 // if really needed. MinMetaspaceFreeRatio is used to calculate how much
1266 // free space is desirable in the metaspace capacity to decide how much
1267 // to increase the HWM. MaxMetaspaceFreeRatio is used to decide how much
1268 // free space is desirable in the metaspace capacity before decreasing
1269 // the HWM.
1271 // Calculate the amount to increase the high water mark (HWM).
1272 // Increase by a minimum amount (MinMetaspaceExpansion) so that
1273 // another expansion is not requested too soon. If that is not
1274 // enough to satisfy the allocation (i.e. big enough for a word_size
1275 // allocation), increase by MaxMetaspaceExpansion. If that is still
1276 // not enough, expand by the size of the allocation (word_size) plus
1277 // some.
1278 size_t MetaspaceGC::delta_capacity_until_GC(size_t word_size) {
1279 size_t before_inc = MetaspaceGC::capacity_until_GC();
1280 size_t min_delta_words = MinMetaspaceExpansion / BytesPerWord;
1281 size_t max_delta_words = MaxMetaspaceExpansion / BytesPerWord;
1282 size_t page_size_words = os::vm_page_size() / BytesPerWord;
1283 size_t size_delta_words = align_size_up(word_size, page_size_words);
1284 size_t delta_words = MAX2(size_delta_words, min_delta_words);
1285 if (delta_words > min_delta_words) {
1286 // Don't want to hit the high water mark on the next
1287 // allocation so make the delta greater than just enough
1288 // for this allocation.
1289 delta_words = MAX2(delta_words, max_delta_words);
1290 if (delta_words > max_delta_words) {
1291 // This allocation is large but the next ones are probably not
1292 // so increase by the minimum.
1293 delta_words = delta_words + min_delta_words;
1294 }
1295 }
1296 return delta_words;
1297 }
1299 bool MetaspaceGC::should_expand(VirtualSpaceList* vsl, size_t word_size) {
1301 size_t committed_capacity_bytes = MetaspaceAux::allocated_capacity_bytes();
1302 // If the user wants a limit, impose one.
1303 size_t max_metaspace_size_bytes = MaxMetaspaceSize;
1304 size_t metaspace_size_bytes = MetaspaceSize;
1305 if (!FLAG_IS_DEFAULT(MaxMetaspaceSize) &&
1306 MetaspaceAux::reserved_in_bytes() >= MaxMetaspaceSize) {
1307 return false;
1308 }
1310 // Class virtual space should always be expanded. Call GC for the other
1311 // metadata virtual space.
1312 if (vsl == Metaspace::class_space_list()) return true;
1314 // If this is part of an allocation after a GC, expand
1315 // unconditionally.
1316 if (MetaspaceGC::expand_after_GC()) {
1317 return true;
1318 }
1322 // If the capacity is below the minimum capacity, allow the
1323 // expansion. Also set the high-water-mark (capacity_until_GC)
1324 // to that minimum capacity so that a GC will not be induced
1325 // until that minimum capacity is exceeded.
1326 if (committed_capacity_bytes < metaspace_size_bytes ||
1327 capacity_until_GC() == 0) {
1328 set_capacity_until_GC(metaspace_size_bytes);
1329 return true;
1330 } else {
1331 if (committed_capacity_bytes < capacity_until_GC()) {
1332 return true;
1333 } else {
1334 if (TraceMetadataChunkAllocation && Verbose) {
1335 gclog_or_tty->print_cr(" allocation request size " SIZE_FORMAT
1336 " capacity_until_GC " SIZE_FORMAT
1337 " allocated_capacity_bytes " SIZE_FORMAT,
1338 word_size,
1339 capacity_until_GC(),
1340 MetaspaceAux::allocated_capacity_bytes());
1341 }
1342 return false;
1343 }
1344 }
1345 }
1349 void MetaspaceGC::compute_new_size() {
1350 assert(_shrink_factor <= 100, "invalid shrink factor");
1351 uint current_shrink_factor = _shrink_factor;
1352 _shrink_factor = 0;
1354 // Until a faster way of calculating the "used" quantity is implemented,
1355 // use "capacity".
1356 const size_t used_after_gc = MetaspaceAux::allocated_capacity_bytes();
1357 const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC();
1359 const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0;
1360 const double maximum_used_percentage = 1.0 - minimum_free_percentage;
1362 const double min_tmp = used_after_gc / maximum_used_percentage;
1363 size_t minimum_desired_capacity =
1364 (size_t)MIN2(min_tmp, double(max_uintx));
1365 // Don't shrink less than the initial generation size
1366 minimum_desired_capacity = MAX2(minimum_desired_capacity,
1367 MetaspaceSize);
1369 if (PrintGCDetails && Verbose) {
1370 gclog_or_tty->print_cr("\nMetaspaceGC::compute_new_size: ");
1371 gclog_or_tty->print_cr(" "
1372 " minimum_free_percentage: %6.2f"
1373 " maximum_used_percentage: %6.2f",
1374 minimum_free_percentage,
1375 maximum_used_percentage);
1376 gclog_or_tty->print_cr(" "
1377 " used_after_gc : %6.1fKB",
1378 used_after_gc / (double) K);
1379 }
1382 size_t shrink_bytes = 0;
1383 if (capacity_until_GC < minimum_desired_capacity) {
1384 // If we have less capacity below the metaspace HWM, then
1385 // increment the HWM.
1386 size_t expand_bytes = minimum_desired_capacity - capacity_until_GC;
1387 // Don't expand unless it's significant
1388 if (expand_bytes >= MinMetaspaceExpansion) {
1389 MetaspaceGC::set_capacity_until_GC(capacity_until_GC + expand_bytes);
1390 }
1391 if (PrintGCDetails && Verbose) {
1392 size_t new_capacity_until_GC = capacity_until_GC;
1393 gclog_or_tty->print_cr(" expanding:"
1394 " minimum_desired_capacity: %6.1fKB"
1395 " expand_bytes: %6.1fKB"
1396 " MinMetaspaceExpansion: %6.1fKB"
1397 " new metaspace HWM: %6.1fKB",
1398 minimum_desired_capacity / (double) K,
1399 expand_bytes / (double) K,
1400 MinMetaspaceExpansion / (double) K,
1401 new_capacity_until_GC / (double) K);
1402 }
1403 return;
1404 }
1406 // No expansion, now see if we want to shrink
1407 // We would never want to shrink more than this
1408 size_t max_shrink_bytes = capacity_until_GC - minimum_desired_capacity;
1409 assert(max_shrink_bytes >= 0, err_msg("max_shrink_bytes " SIZE_FORMAT,
1410 max_shrink_bytes));
1412 // Should shrinking be considered?
1413 if (MaxMetaspaceFreeRatio < 100) {
1414 const double maximum_free_percentage = MaxMetaspaceFreeRatio / 100.0;
1415 const double minimum_used_percentage = 1.0 - maximum_free_percentage;
1416 const double max_tmp = used_after_gc / minimum_used_percentage;
1417 size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx));
1418 maximum_desired_capacity = MAX2(maximum_desired_capacity,
1419 MetaspaceSize);
1420 if (PrintGCDetails && Verbose) {
1421 gclog_or_tty->print_cr(" "
1422 " maximum_free_percentage: %6.2f"
1423 " minimum_used_percentage: %6.2f",
1424 maximum_free_percentage,
1425 minimum_used_percentage);
1426 gclog_or_tty->print_cr(" "
1427 " minimum_desired_capacity: %6.1fKB"
1428 " maximum_desired_capacity: %6.1fKB",
1429 minimum_desired_capacity / (double) K,
1430 maximum_desired_capacity / (double) K);
1431 }
1433 assert(minimum_desired_capacity <= maximum_desired_capacity,
1434 "sanity check");
1436 if (capacity_until_GC > maximum_desired_capacity) {
1437 // Capacity too large, compute shrinking size
1438 shrink_bytes = capacity_until_GC - maximum_desired_capacity;
1439 // We don't want shrink all the way back to initSize if people call
1440 // System.gc(), because some programs do that between "phases" and then
1441 // we'd just have to grow the heap up again for the next phase. So we
1442 // damp the shrinking: 0% on the first call, 10% on the second call, 40%
1443 // on the third call, and 100% by the fourth call. But if we recompute
1444 // size without shrinking, it goes back to 0%.
1445 shrink_bytes = shrink_bytes / 100 * current_shrink_factor;
1446 assert(shrink_bytes <= max_shrink_bytes,
1447 err_msg("invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT,
1448 shrink_bytes, max_shrink_bytes));
1449 if (current_shrink_factor == 0) {
1450 _shrink_factor = 10;
1451 } else {
1452 _shrink_factor = MIN2(current_shrink_factor * 4, (uint) 100);
1453 }
1454 if (PrintGCDetails && Verbose) {
1455 gclog_or_tty->print_cr(" "
1456 " shrinking:"
1457 " initSize: %.1fK"
1458 " maximum_desired_capacity: %.1fK",
1459 MetaspaceSize / (double) K,
1460 maximum_desired_capacity / (double) K);
1461 gclog_or_tty->print_cr(" "
1462 " shrink_bytes: %.1fK"
1463 " current_shrink_factor: %d"
1464 " new shrink factor: %d"
1465 " MinMetaspaceExpansion: %.1fK",
1466 shrink_bytes / (double) K,
1467 current_shrink_factor,
1468 _shrink_factor,
1469 MinMetaspaceExpansion / (double) K);
1470 }
1471 }
1472 }
1474 // Don't shrink unless it's significant
1475 if (shrink_bytes >= MinMetaspaceExpansion &&
1476 ((capacity_until_GC - shrink_bytes) >= MetaspaceSize)) {
1477 MetaspaceGC::set_capacity_until_GC(capacity_until_GC - shrink_bytes);
1478 }
1479 }
1481 // Metadebug methods
1483 void Metadebug::deallocate_chunk_a_lot(SpaceManager* sm,
1484 size_t chunk_word_size){
1485 #ifdef ASSERT
1486 VirtualSpaceList* vsl = sm->vs_list();
1487 if (MetaDataDeallocateALot &&
1488 Metadebug::deallocate_chunk_a_lot_count() % MetaDataDeallocateALotInterval == 0 ) {
1489 Metadebug::reset_deallocate_chunk_a_lot_count();
1490 for (uint i = 0; i < metadata_deallocate_a_lock_chunk; i++) {
1491 Metachunk* dummy_chunk = vsl->current_virtual_space()->take_from_committed(chunk_word_size);
1492 if (dummy_chunk == NULL) {
1493 break;
1494 }
1495 vsl->chunk_manager()->chunk_freelist_deallocate(dummy_chunk);
1497 if (TraceMetadataChunkAllocation && Verbose) {
1498 gclog_or_tty->print("Metadebug::deallocate_chunk_a_lot: %d) ",
1499 sm->sum_count_in_chunks_in_use());
1500 dummy_chunk->print_on(gclog_or_tty);
1501 gclog_or_tty->print_cr(" Free chunks total %d count %d",
1502 vsl->chunk_manager()->free_chunks_total(),
1503 vsl->chunk_manager()->free_chunks_count());
1504 }
1505 }
1506 } else {
1507 Metadebug::inc_deallocate_chunk_a_lot_count();
1508 }
1509 #endif
1510 }
1512 void Metadebug::deallocate_block_a_lot(SpaceManager* sm,
1513 size_t raw_word_size){
1514 #ifdef ASSERT
1515 if (MetaDataDeallocateALot &&
1516 Metadebug::deallocate_block_a_lot_count() % MetaDataDeallocateALotInterval == 0 ) {
1517 Metadebug::set_deallocate_block_a_lot_count(0);
1518 for (uint i = 0; i < metadata_deallocate_a_lot_block; i++) {
1519 MetaWord* dummy_block = sm->allocate_work(raw_word_size);
1520 if (dummy_block == 0) {
1521 break;
1522 }
1523 sm->deallocate(dummy_block, raw_word_size);
1524 }
1525 } else {
1526 Metadebug::inc_deallocate_block_a_lot_count();
1527 }
1528 #endif
1529 }
1531 void Metadebug::init_allocation_fail_alot_count() {
1532 if (MetadataAllocationFailALot) {
1533 _allocation_fail_alot_count =
1534 1+(long)((double)MetadataAllocationFailALotInterval*os::random()/(max_jint+1.0));
1535 }
1536 }
1538 #ifdef ASSERT
1539 bool Metadebug::test_metadata_failure() {
1540 if (MetadataAllocationFailALot &&
1541 Threads::is_vm_complete()) {
1542 if (_allocation_fail_alot_count > 0) {
1543 _allocation_fail_alot_count--;
1544 } else {
1545 if (TraceMetadataChunkAllocation && Verbose) {
1546 gclog_or_tty->print_cr("Metadata allocation failing for "
1547 "MetadataAllocationFailALot");
1548 }
1549 init_allocation_fail_alot_count();
1550 return true;
1551 }
1552 }
1553 return false;
1554 }
1555 #endif
1557 // ChunkManager methods
1559 size_t ChunkManager::free_chunks_total() {
1560 return _free_chunks_total;
1561 }
1563 size_t ChunkManager::free_chunks_total_in_bytes() {
1564 return free_chunks_total() * BytesPerWord;
1565 }
1567 size_t ChunkManager::free_chunks_count() {
1568 #ifdef ASSERT
1569 if (!UseConcMarkSweepGC && !SpaceManager::expand_lock()->is_locked()) {
1570 MutexLockerEx cl(SpaceManager::expand_lock(),
1571 Mutex::_no_safepoint_check_flag);
1572 // This lock is only needed in debug because the verification
1573 // of the _free_chunks_totals walks the list of free chunks
1574 slow_locked_verify_free_chunks_count();
1575 }
1576 #endif
1577 return _free_chunks_count;
1578 }
1580 void ChunkManager::locked_verify_free_chunks_total() {
1581 assert_lock_strong(SpaceManager::expand_lock());
1582 assert(sum_free_chunks() == _free_chunks_total,
1583 err_msg("_free_chunks_total " SIZE_FORMAT " is not the"
1584 " same as sum " SIZE_FORMAT, _free_chunks_total,
1585 sum_free_chunks()));
1586 }
1588 void ChunkManager::verify_free_chunks_total() {
1589 MutexLockerEx cl(SpaceManager::expand_lock(),
1590 Mutex::_no_safepoint_check_flag);
1591 locked_verify_free_chunks_total();
1592 }
1594 void ChunkManager::locked_verify_free_chunks_count() {
1595 assert_lock_strong(SpaceManager::expand_lock());
1596 assert(sum_free_chunks_count() == _free_chunks_count,
1597 err_msg("_free_chunks_count " SIZE_FORMAT " is not the"
1598 " same as sum " SIZE_FORMAT, _free_chunks_count,
1599 sum_free_chunks_count()));
1600 }
1602 void ChunkManager::verify_free_chunks_count() {
1603 #ifdef ASSERT
1604 MutexLockerEx cl(SpaceManager::expand_lock(),
1605 Mutex::_no_safepoint_check_flag);
1606 locked_verify_free_chunks_count();
1607 #endif
1608 }
1610 void ChunkManager::verify() {
1611 MutexLockerEx cl(SpaceManager::expand_lock(),
1612 Mutex::_no_safepoint_check_flag);
1613 locked_verify();
1614 }
1616 void ChunkManager::locked_verify() {
1617 locked_verify_free_chunks_count();
1618 locked_verify_free_chunks_total();
1619 }
1621 void ChunkManager::locked_print_free_chunks(outputStream* st) {
1622 assert_lock_strong(SpaceManager::expand_lock());
1623 st->print_cr("Free chunk total " SIZE_FORMAT " count " SIZE_FORMAT,
1624 _free_chunks_total, _free_chunks_count);
1625 }
1627 void ChunkManager::locked_print_sum_free_chunks(outputStream* st) {
1628 assert_lock_strong(SpaceManager::expand_lock());
1629 st->print_cr("Sum free chunk total " SIZE_FORMAT " count " SIZE_FORMAT,
1630 sum_free_chunks(), sum_free_chunks_count());
1631 }
1632 ChunkList* ChunkManager::free_chunks(ChunkIndex index) {
1633 return &_free_chunks[index];
1634 }
1636 // These methods that sum the free chunk lists are used in printing
1637 // methods that are used in product builds.
1638 size_t ChunkManager::sum_free_chunks() {
1639 assert_lock_strong(SpaceManager::expand_lock());
1640 size_t result = 0;
1641 for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
1642 ChunkList* list = free_chunks(i);
1644 if (list == NULL) {
1645 continue;
1646 }
1648 result = result + list->count() * list->size();
1649 }
1650 result = result + humongous_dictionary()->total_size();
1651 return result;
1652 }
1654 size_t ChunkManager::sum_free_chunks_count() {
1655 assert_lock_strong(SpaceManager::expand_lock());
1656 size_t count = 0;
1657 for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
1658 ChunkList* list = free_chunks(i);
1659 if (list == NULL) {
1660 continue;
1661 }
1662 count = count + list->count();
1663 }
1664 count = count + humongous_dictionary()->total_free_blocks();
1665 return count;
1666 }
1668 ChunkList* ChunkManager::find_free_chunks_list(size_t word_size) {
1669 ChunkIndex index = list_index(word_size);
1670 assert(index < HumongousIndex, "No humongous list");
1671 return free_chunks(index);
1672 }
1674 void ChunkManager::free_chunks_put(Metachunk* chunk) {
1675 assert_lock_strong(SpaceManager::expand_lock());
1676 ChunkList* free_list = find_free_chunks_list(chunk->word_size());
1677 chunk->set_next(free_list->head());
1678 free_list->set_head(chunk);
1679 // chunk is being returned to the chunk free list
1680 inc_free_chunks_total(chunk->capacity_word_size());
1681 slow_locked_verify();
1682 }
1684 void ChunkManager::chunk_freelist_deallocate(Metachunk* chunk) {
1685 // The deallocation of a chunk originates in the freelist
1686 // manangement code for a Metaspace and does not hold the
1687 // lock.
1688 assert(chunk != NULL, "Deallocating NULL");
1689 assert_lock_strong(SpaceManager::expand_lock());
1690 slow_locked_verify();
1691 if (TraceMetadataChunkAllocation) {
1692 tty->print_cr("ChunkManager::chunk_freelist_deallocate: chunk "
1693 PTR_FORMAT " size " SIZE_FORMAT,
1694 chunk, chunk->word_size());
1695 }
1696 free_chunks_put(chunk);
1697 }
1699 Metachunk* ChunkManager::free_chunks_get(size_t word_size) {
1700 assert_lock_strong(SpaceManager::expand_lock());
1702 slow_locked_verify();
1704 Metachunk* chunk = NULL;
1705 if (list_index(word_size) != HumongousIndex) {
1706 ChunkList* free_list = find_free_chunks_list(word_size);
1707 assert(free_list != NULL, "Sanity check");
1709 chunk = free_list->head();
1710 debug_only(Metachunk* debug_head = chunk;)
1712 if (chunk == NULL) {
1713 return NULL;
1714 }
1716 // Remove the chunk as the head of the list.
1717 free_list->remove_chunk(chunk);
1719 // Chunk is being removed from the chunks free list.
1720 dec_free_chunks_total(chunk->capacity_word_size());
1722 if (TraceMetadataChunkAllocation && Verbose) {
1723 tty->print_cr("ChunkManager::free_chunks_get: free_list "
1724 PTR_FORMAT " head " PTR_FORMAT " size " SIZE_FORMAT,
1725 free_list, chunk, chunk->word_size());
1726 }
1727 } else {
1728 chunk = humongous_dictionary()->get_chunk(
1729 word_size,
1730 FreeBlockDictionary<Metachunk>::atLeast);
1732 if (chunk != NULL) {
1733 if (TraceMetadataHumongousAllocation) {
1734 size_t waste = chunk->word_size() - word_size;
1735 tty->print_cr("Free list allocate humongous chunk size " SIZE_FORMAT
1736 " for requested size " SIZE_FORMAT
1737 " waste " SIZE_FORMAT,
1738 chunk->word_size(), word_size, waste);
1739 }
1740 // Chunk is being removed from the chunks free list.
1741 dec_free_chunks_total(chunk->capacity_word_size());
1742 } else {
1743 return NULL;
1744 }
1745 }
1747 // Remove it from the links to this freelist
1748 chunk->set_next(NULL);
1749 chunk->set_prev(NULL);
1750 #ifdef ASSERT
1751 // Chunk is no longer on any freelist. Setting to false make container_count_slow()
1752 // work.
1753 chunk->set_is_free(false);
1754 #endif
1755 slow_locked_verify();
1756 return chunk;
1757 }
1759 Metachunk* ChunkManager::chunk_freelist_allocate(size_t word_size) {
1760 assert_lock_strong(SpaceManager::expand_lock());
1761 slow_locked_verify();
1763 // Take from the beginning of the list
1764 Metachunk* chunk = free_chunks_get(word_size);
1765 if (chunk == NULL) {
1766 return NULL;
1767 }
1769 assert((word_size <= chunk->word_size()) ||
1770 list_index(chunk->word_size() == HumongousIndex),
1771 "Non-humongous variable sized chunk");
1772 if (TraceMetadataChunkAllocation) {
1773 size_t list_count;
1774 if (list_index(word_size) < HumongousIndex) {
1775 ChunkList* list = find_free_chunks_list(word_size);
1776 list_count = list->count();
1777 } else {
1778 list_count = humongous_dictionary()->total_count();
1779 }
1780 tty->print("ChunkManager::chunk_freelist_allocate: " PTR_FORMAT " chunk "
1781 PTR_FORMAT " size " SIZE_FORMAT " count " SIZE_FORMAT " ",
1782 this, chunk, chunk->word_size(), list_count);
1783 locked_print_free_chunks(tty);
1784 }
1786 return chunk;
1787 }
1789 void ChunkManager::print_on(outputStream* out) {
1790 if (PrintFLSStatistics != 0) {
1791 humongous_dictionary()->report_statistics();
1792 }
1793 }
1795 // SpaceManager methods
1797 void SpaceManager::get_initial_chunk_sizes(Metaspace::MetaspaceType type,
1798 size_t* chunk_word_size,
1799 size_t* class_chunk_word_size) {
1800 switch (type) {
1801 case Metaspace::BootMetaspaceType:
1802 *chunk_word_size = Metaspace::first_chunk_word_size();
1803 *class_chunk_word_size = Metaspace::first_class_chunk_word_size();
1804 break;
1805 case Metaspace::ROMetaspaceType:
1806 *chunk_word_size = SharedReadOnlySize / wordSize;
1807 *class_chunk_word_size = ClassSpecializedChunk;
1808 break;
1809 case Metaspace::ReadWriteMetaspaceType:
1810 *chunk_word_size = SharedReadWriteSize / wordSize;
1811 *class_chunk_word_size = ClassSpecializedChunk;
1812 break;
1813 case Metaspace::AnonymousMetaspaceType:
1814 case Metaspace::ReflectionMetaspaceType:
1815 *chunk_word_size = SpecializedChunk;
1816 *class_chunk_word_size = ClassSpecializedChunk;
1817 break;
1818 default:
1819 *chunk_word_size = SmallChunk;
1820 *class_chunk_word_size = ClassSmallChunk;
1821 break;
1822 }
1823 assert(*chunk_word_size != 0 && *class_chunk_word_size != 0,
1824 err_msg("Initial chunks sizes bad: data " SIZE_FORMAT
1825 " class " SIZE_FORMAT,
1826 *chunk_word_size, *class_chunk_word_size));
1827 }
1829 size_t SpaceManager::sum_free_in_chunks_in_use() const {
1830 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
1831 size_t free = 0;
1832 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1833 Metachunk* chunk = chunks_in_use(i);
1834 while (chunk != NULL) {
1835 free += chunk->free_word_size();
1836 chunk = chunk->next();
1837 }
1838 }
1839 return free;
1840 }
1842 size_t SpaceManager::sum_waste_in_chunks_in_use() const {
1843 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
1844 size_t result = 0;
1845 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1846 result += sum_waste_in_chunks_in_use(i);
1847 }
1849 return result;
1850 }
1852 size_t SpaceManager::sum_waste_in_chunks_in_use(ChunkIndex index) const {
1853 size_t result = 0;
1854 Metachunk* chunk = chunks_in_use(index);
1855 // Count the free space in all the chunk but not the
1856 // current chunk from which allocations are still being done.
1857 if (chunk != NULL) {
1858 Metachunk* prev = chunk;
1859 while (chunk != NULL && chunk != current_chunk()) {
1860 result += chunk->free_word_size();
1861 prev = chunk;
1862 chunk = chunk->next();
1863 }
1864 }
1865 return result;
1866 }
1868 size_t SpaceManager::sum_capacity_in_chunks_in_use() const {
1869 // For CMS use "allocated_chunks_words()" which does not need the
1870 // Metaspace lock. For the other collectors sum over the
1871 // lists. Use both methods as a check that "allocated_chunks_words()"
1872 // is correct. That is, sum_capacity_in_chunks() is too expensive
1873 // to use in the product and allocated_chunks_words() should be used
1874 // but allow for checking that allocated_chunks_words() returns the same
1875 // value as sum_capacity_in_chunks_in_use() which is the definitive
1876 // answer.
1877 if (UseConcMarkSweepGC) {
1878 return allocated_chunks_words();
1879 } else {
1880 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
1881 size_t sum = 0;
1882 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1883 Metachunk* chunk = chunks_in_use(i);
1884 while (chunk != NULL) {
1885 sum += chunk->capacity_word_size();
1886 chunk = chunk->next();
1887 }
1888 }
1889 return sum;
1890 }
1891 }
1893 size_t SpaceManager::sum_count_in_chunks_in_use() {
1894 size_t count = 0;
1895 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1896 count = count + sum_count_in_chunks_in_use(i);
1897 }
1899 return count;
1900 }
1902 size_t SpaceManager::sum_count_in_chunks_in_use(ChunkIndex i) {
1903 size_t count = 0;
1904 Metachunk* chunk = chunks_in_use(i);
1905 while (chunk != NULL) {
1906 count++;
1907 chunk = chunk->next();
1908 }
1909 return count;
1910 }
1913 size_t SpaceManager::sum_used_in_chunks_in_use() const {
1914 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
1915 size_t used = 0;
1916 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1917 Metachunk* chunk = chunks_in_use(i);
1918 while (chunk != NULL) {
1919 used += chunk->used_word_size();
1920 chunk = chunk->next();
1921 }
1922 }
1923 return used;
1924 }
1926 void SpaceManager::locked_print_chunks_in_use_on(outputStream* st) const {
1928 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1929 Metachunk* chunk = chunks_in_use(i);
1930 st->print("SpaceManager: %s " PTR_FORMAT,
1931 chunk_size_name(i), chunk);
1932 if (chunk != NULL) {
1933 st->print_cr(" free " SIZE_FORMAT,
1934 chunk->free_word_size());
1935 } else {
1936 st->print_cr("");
1937 }
1938 }
1940 vs_list()->chunk_manager()->locked_print_free_chunks(st);
1941 vs_list()->chunk_manager()->locked_print_sum_free_chunks(st);
1942 }
1944 size_t SpaceManager::calc_chunk_size(size_t word_size) {
1946 // Decide between a small chunk and a medium chunk. Up to
1947 // _small_chunk_limit small chunks can be allocated but
1948 // once a medium chunk has been allocated, no more small
1949 // chunks will be allocated.
1950 size_t chunk_word_size;
1951 if (chunks_in_use(MediumIndex) == NULL &&
1952 (!has_small_chunk_limit() ||
1953 sum_count_in_chunks_in_use(SmallIndex) < _small_chunk_limit)) {
1954 chunk_word_size = (size_t) small_chunk_size();
1955 if (word_size + Metachunk::overhead() > small_chunk_size()) {
1956 chunk_word_size = medium_chunk_size();
1957 }
1958 } else {
1959 chunk_word_size = medium_chunk_size();
1960 }
1962 // Might still need a humongous chunk. Enforce an
1963 // eight word granularity to facilitate reuse (some
1964 // wastage but better chance of reuse).
1965 size_t if_humongous_sized_chunk =
1966 align_size_up(word_size + Metachunk::overhead(),
1967 HumongousChunkGranularity);
1968 chunk_word_size =
1969 MAX2((size_t) chunk_word_size, if_humongous_sized_chunk);
1971 assert(!SpaceManager::is_humongous(word_size) ||
1972 chunk_word_size == if_humongous_sized_chunk,
1973 err_msg("Size calculation is wrong, word_size " SIZE_FORMAT
1974 " chunk_word_size " SIZE_FORMAT,
1975 word_size, chunk_word_size));
1976 if (TraceMetadataHumongousAllocation &&
1977 SpaceManager::is_humongous(word_size)) {
1978 gclog_or_tty->print_cr("Metadata humongous allocation:");
1979 gclog_or_tty->print_cr(" word_size " PTR_FORMAT, word_size);
1980 gclog_or_tty->print_cr(" chunk_word_size " PTR_FORMAT,
1981 chunk_word_size);
1982 gclog_or_tty->print_cr(" chunk overhead " PTR_FORMAT,
1983 Metachunk::overhead());
1984 }
1985 return chunk_word_size;
1986 }
1988 MetaWord* SpaceManager::grow_and_allocate(size_t word_size) {
1989 assert(vs_list()->current_virtual_space() != NULL,
1990 "Should have been set");
1991 assert(current_chunk() == NULL ||
1992 current_chunk()->allocate(word_size) == NULL,
1993 "Don't need to expand");
1994 MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
1996 if (TraceMetadataChunkAllocation && Verbose) {
1997 size_t words_left = 0;
1998 size_t words_used = 0;
1999 if (current_chunk() != NULL) {
2000 words_left = current_chunk()->free_word_size();
2001 words_used = current_chunk()->used_word_size();
2002 }
2003 gclog_or_tty->print_cr("SpaceManager::grow_and_allocate for " SIZE_FORMAT
2004 " words " SIZE_FORMAT " words used " SIZE_FORMAT
2005 " words left",
2006 word_size, words_used, words_left);
2007 }
2009 // Get another chunk out of the virtual space
2010 size_t grow_chunks_by_words = calc_chunk_size(word_size);
2011 Metachunk* next = get_new_chunk(word_size, grow_chunks_by_words);
2013 // If a chunk was available, add it to the in-use chunk list
2014 // and do an allocation from it.
2015 if (next != NULL) {
2016 Metadebug::deallocate_chunk_a_lot(this, grow_chunks_by_words);
2017 // Add to this manager's list of chunks in use.
2018 add_chunk(next, false);
2019 return next->allocate(word_size);
2020 }
2021 return NULL;
2022 }
2024 void SpaceManager::print_on(outputStream* st) const {
2026 for (ChunkIndex i = ZeroIndex;
2027 i < NumberOfInUseLists ;
2028 i = next_chunk_index(i) ) {
2029 st->print_cr(" chunks_in_use " PTR_FORMAT " chunk size " PTR_FORMAT,
2030 chunks_in_use(i),
2031 chunks_in_use(i) == NULL ? 0 : chunks_in_use(i)->word_size());
2032 }
2033 st->print_cr(" waste: Small " SIZE_FORMAT " Medium " SIZE_FORMAT
2034 " Humongous " SIZE_FORMAT,
2035 sum_waste_in_chunks_in_use(SmallIndex),
2036 sum_waste_in_chunks_in_use(MediumIndex),
2037 sum_waste_in_chunks_in_use(HumongousIndex));
2038 // block free lists
2039 if (block_freelists() != NULL) {
2040 st->print_cr("total in block free lists " SIZE_FORMAT,
2041 block_freelists()->total_size());
2042 }
2043 }
2045 SpaceManager::SpaceManager(Metaspace::MetadataType mdtype,
2046 Mutex* lock,
2047 VirtualSpaceList* vs_list) :
2048 _vs_list(vs_list),
2049 _mdtype(mdtype),
2050 _allocated_blocks_words(0),
2051 _allocated_chunks_words(0),
2052 _allocated_chunks_count(0),
2053 _lock(lock)
2054 {
2055 initialize();
2056 }
2058 void SpaceManager::inc_size_metrics(size_t words) {
2059 assert_lock_strong(SpaceManager::expand_lock());
2060 // Total of allocated Metachunks and allocated Metachunks count
2061 // for each SpaceManager
2062 _allocated_chunks_words = _allocated_chunks_words + words;
2063 _allocated_chunks_count++;
2064 // Global total of capacity in allocated Metachunks
2065 MetaspaceAux::inc_capacity(mdtype(), words);
2066 // Global total of allocated Metablocks.
2067 // used_words_slow() includes the overhead in each
2068 // Metachunk so include it in the used when the
2069 // Metachunk is first added (so only added once per
2070 // Metachunk).
2071 MetaspaceAux::inc_used(mdtype(), Metachunk::overhead());
2072 }
2074 void SpaceManager::inc_used_metrics(size_t words) {
2075 // Add to the per SpaceManager total
2076 Atomic::add_ptr(words, &_allocated_blocks_words);
2077 // Add to the global total
2078 MetaspaceAux::inc_used(mdtype(), words);
2079 }
2081 void SpaceManager::dec_total_from_size_metrics() {
2082 MetaspaceAux::dec_capacity(mdtype(), allocated_chunks_words());
2083 MetaspaceAux::dec_used(mdtype(), allocated_blocks_words());
2084 // Also deduct the overhead per Metachunk
2085 MetaspaceAux::dec_used(mdtype(), allocated_chunks_count() * Metachunk::overhead());
2086 }
2088 void SpaceManager::initialize() {
2089 Metadebug::init_allocation_fail_alot_count();
2090 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2091 _chunks_in_use[i] = NULL;
2092 }
2093 _current_chunk = NULL;
2094 if (TraceMetadataChunkAllocation && Verbose) {
2095 gclog_or_tty->print_cr("SpaceManager(): " PTR_FORMAT, this);
2096 }
2097 }
2099 void ChunkManager::return_chunks(ChunkIndex index, Metachunk* chunks) {
2100 if (chunks == NULL) {
2101 return;
2102 }
2103 ChunkList* list = free_chunks(index);
2104 assert(list->size() == chunks->word_size(), "Mismatch in chunk sizes");
2105 assert_lock_strong(SpaceManager::expand_lock());
2106 Metachunk* cur = chunks;
2108 // This returns chunks one at a time. If a new
2109 // class List can be created that is a base class
2110 // of FreeList then something like FreeList::prepend()
2111 // can be used in place of this loop
2112 while (cur != NULL) {
2113 assert(cur->container() != NULL, "Container should have been set");
2114 cur->container()->dec_container_count();
2115 // Capture the next link before it is changed
2116 // by the call to return_chunk_at_head();
2117 Metachunk* next = cur->next();
2118 cur->set_is_free(true);
2119 list->return_chunk_at_head(cur);
2120 cur = next;
2121 }
2122 }
2124 SpaceManager::~SpaceManager() {
2125 // This call this->_lock which can't be done while holding expand_lock()
2126 assert(sum_capacity_in_chunks_in_use() == allocated_chunks_words(),
2127 err_msg("sum_capacity_in_chunks_in_use() " SIZE_FORMAT
2128 " allocated_chunks_words() " SIZE_FORMAT,
2129 sum_capacity_in_chunks_in_use(), allocated_chunks_words()));
2131 MutexLockerEx fcl(SpaceManager::expand_lock(),
2132 Mutex::_no_safepoint_check_flag);
2134 ChunkManager* chunk_manager = vs_list()->chunk_manager();
2136 chunk_manager->slow_locked_verify();
2138 dec_total_from_size_metrics();
2140 if (TraceMetadataChunkAllocation && Verbose) {
2141 gclog_or_tty->print_cr("~SpaceManager(): " PTR_FORMAT, this);
2142 locked_print_chunks_in_use_on(gclog_or_tty);
2143 }
2145 // Do not mangle freed Metachunks. The chunk size inside Metachunks
2146 // is during the freeing of a VirtualSpaceNodes.
2148 // Have to update before the chunks_in_use lists are emptied
2149 // below.
2150 chunk_manager->inc_free_chunks_total(allocated_chunks_words(),
2151 sum_count_in_chunks_in_use());
2153 // Add all the chunks in use by this space manager
2154 // to the global list of free chunks.
2156 // Follow each list of chunks-in-use and add them to the
2157 // free lists. Each list is NULL terminated.
2159 for (ChunkIndex i = ZeroIndex; i < HumongousIndex; i = next_chunk_index(i)) {
2160 if (TraceMetadataChunkAllocation && Verbose) {
2161 gclog_or_tty->print_cr("returned %d %s chunks to freelist",
2162 sum_count_in_chunks_in_use(i),
2163 chunk_size_name(i));
2164 }
2165 Metachunk* chunks = chunks_in_use(i);
2166 chunk_manager->return_chunks(i, chunks);
2167 set_chunks_in_use(i, NULL);
2168 if (TraceMetadataChunkAllocation && Verbose) {
2169 gclog_or_tty->print_cr("updated freelist count %d %s",
2170 chunk_manager->free_chunks(i)->count(),
2171 chunk_size_name(i));
2172 }
2173 assert(i != HumongousIndex, "Humongous chunks are handled explicitly later");
2174 }
2176 // The medium chunk case may be optimized by passing the head and
2177 // tail of the medium chunk list to add_at_head(). The tail is often
2178 // the current chunk but there are probably exceptions.
2180 // Humongous chunks
2181 if (TraceMetadataChunkAllocation && Verbose) {
2182 gclog_or_tty->print_cr("returned %d %s humongous chunks to dictionary",
2183 sum_count_in_chunks_in_use(HumongousIndex),
2184 chunk_size_name(HumongousIndex));
2185 gclog_or_tty->print("Humongous chunk dictionary: ");
2186 }
2187 // Humongous chunks are never the current chunk.
2188 Metachunk* humongous_chunks = chunks_in_use(HumongousIndex);
2190 while (humongous_chunks != NULL) {
2191 #ifdef ASSERT
2192 humongous_chunks->set_is_free(true);
2193 #endif
2194 if (TraceMetadataChunkAllocation && Verbose) {
2195 gclog_or_tty->print(PTR_FORMAT " (" SIZE_FORMAT ") ",
2196 humongous_chunks,
2197 humongous_chunks->word_size());
2198 }
2199 assert(humongous_chunks->word_size() == (size_t)
2200 align_size_up(humongous_chunks->word_size(),
2201 HumongousChunkGranularity),
2202 err_msg("Humongous chunk size is wrong: word size " SIZE_FORMAT
2203 " granularity %d",
2204 humongous_chunks->word_size(), HumongousChunkGranularity));
2205 Metachunk* next_humongous_chunks = humongous_chunks->next();
2206 humongous_chunks->container()->dec_container_count();
2207 chunk_manager->humongous_dictionary()->return_chunk(humongous_chunks);
2208 humongous_chunks = next_humongous_chunks;
2209 }
2210 if (TraceMetadataChunkAllocation && Verbose) {
2211 gclog_or_tty->print_cr("");
2212 gclog_or_tty->print_cr("updated dictionary count %d %s",
2213 chunk_manager->humongous_dictionary()->total_count(),
2214 chunk_size_name(HumongousIndex));
2215 }
2216 chunk_manager->slow_locked_verify();
2217 }
2219 const char* SpaceManager::chunk_size_name(ChunkIndex index) const {
2220 switch (index) {
2221 case SpecializedIndex:
2222 return "Specialized";
2223 case SmallIndex:
2224 return "Small";
2225 case MediumIndex:
2226 return "Medium";
2227 case HumongousIndex:
2228 return "Humongous";
2229 default:
2230 return NULL;
2231 }
2232 }
2234 ChunkIndex ChunkManager::list_index(size_t size) {
2235 switch (size) {
2236 case SpecializedChunk:
2237 assert(SpecializedChunk == ClassSpecializedChunk,
2238 "Need branch for ClassSpecializedChunk");
2239 return SpecializedIndex;
2240 case SmallChunk:
2241 case ClassSmallChunk:
2242 return SmallIndex;
2243 case MediumChunk:
2244 case ClassMediumChunk:
2245 return MediumIndex;
2246 default:
2247 assert(size > MediumChunk || size > ClassMediumChunk,
2248 "Not a humongous chunk");
2249 return HumongousIndex;
2250 }
2251 }
2253 void SpaceManager::deallocate(MetaWord* p, size_t word_size) {
2254 assert_lock_strong(_lock);
2255 size_t min_size = TreeChunk<Metablock, FreeList>::min_size();
2256 assert(word_size >= min_size,
2257 err_msg("Should not deallocate dark matter " SIZE_FORMAT, word_size));
2258 block_freelists()->return_block(p, word_size);
2259 }
2261 // Adds a chunk to the list of chunks in use.
2262 void SpaceManager::add_chunk(Metachunk* new_chunk, bool make_current) {
2264 assert(new_chunk != NULL, "Should not be NULL");
2265 assert(new_chunk->next() == NULL, "Should not be on a list");
2267 new_chunk->reset_empty();
2269 // Find the correct list and and set the current
2270 // chunk for that list.
2271 ChunkIndex index = ChunkManager::list_index(new_chunk->word_size());
2273 if (index != HumongousIndex) {
2274 set_current_chunk(new_chunk);
2275 new_chunk->set_next(chunks_in_use(index));
2276 set_chunks_in_use(index, new_chunk);
2277 } else {
2278 // For null class loader data and DumpSharedSpaces, the first chunk isn't
2279 // small, so small will be null. Link this first chunk as the current
2280 // chunk.
2281 if (make_current) {
2282 // Set as the current chunk but otherwise treat as a humongous chunk.
2283 set_current_chunk(new_chunk);
2284 }
2285 // Link at head. The _current_chunk only points to a humongous chunk for
2286 // the null class loader metaspace (class and data virtual space managers)
2287 // any humongous chunks so will not point to the tail
2288 // of the humongous chunks list.
2289 new_chunk->set_next(chunks_in_use(HumongousIndex));
2290 set_chunks_in_use(HumongousIndex, new_chunk);
2292 assert(new_chunk->word_size() > medium_chunk_size(), "List inconsistency");
2293 }
2295 // Add to the running sum of capacity
2296 inc_size_metrics(new_chunk->word_size());
2298 assert(new_chunk->is_empty(), "Not ready for reuse");
2299 if (TraceMetadataChunkAllocation && Verbose) {
2300 gclog_or_tty->print("SpaceManager::add_chunk: %d) ",
2301 sum_count_in_chunks_in_use());
2302 new_chunk->print_on(gclog_or_tty);
2303 if (vs_list() != NULL) {
2304 vs_list()->chunk_manager()->locked_print_free_chunks(tty);
2305 }
2306 }
2307 }
2309 Metachunk* SpaceManager::get_new_chunk(size_t word_size,
2310 size_t grow_chunks_by_words) {
2312 Metachunk* next = vs_list()->get_new_chunk(word_size,
2313 grow_chunks_by_words,
2314 medium_chunk_bunch());
2316 if (TraceMetadataHumongousAllocation &&
2317 SpaceManager::is_humongous(next->word_size())) {
2318 gclog_or_tty->print_cr(" new humongous chunk word size " PTR_FORMAT,
2319 next->word_size());
2320 }
2322 return next;
2323 }
2325 MetaWord* SpaceManager::allocate(size_t word_size) {
2326 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
2328 size_t raw_word_size = get_raw_word_size(word_size);
2329 BlockFreelist* fl = block_freelists();
2330 MetaWord* p = NULL;
2331 // Allocation from the dictionary is expensive in the sense that
2332 // the dictionary has to be searched for a size. Don't allocate
2333 // from the dictionary until it starts to get fat. Is this
2334 // a reasonable policy? Maybe an skinny dictionary is fast enough
2335 // for allocations. Do some profiling. JJJ
2336 if (fl->total_size() > allocation_from_dictionary_limit) {
2337 p = fl->get_block(raw_word_size);
2338 }
2339 if (p == NULL) {
2340 p = allocate_work(raw_word_size);
2341 }
2342 Metadebug::deallocate_block_a_lot(this, raw_word_size);
2344 return p;
2345 }
2347 // Returns the address of spaced allocated for "word_size".
2348 // This methods does not know about blocks (Metablocks)
2349 MetaWord* SpaceManager::allocate_work(size_t word_size) {
2350 assert_lock_strong(_lock);
2351 #ifdef ASSERT
2352 if (Metadebug::test_metadata_failure()) {
2353 return NULL;
2354 }
2355 #endif
2356 // Is there space in the current chunk?
2357 MetaWord* result = NULL;
2359 // For DumpSharedSpaces, only allocate out of the current chunk which is
2360 // never null because we gave it the size we wanted. Caller reports out
2361 // of memory if this returns null.
2362 if (DumpSharedSpaces) {
2363 assert(current_chunk() != NULL, "should never happen");
2364 inc_used_metrics(word_size);
2365 return current_chunk()->allocate(word_size); // caller handles null result
2366 }
2367 if (current_chunk() != NULL) {
2368 result = current_chunk()->allocate(word_size);
2369 }
2371 if (result == NULL) {
2372 result = grow_and_allocate(word_size);
2373 }
2374 if (result > 0) {
2375 inc_used_metrics(word_size);
2376 assert(result != (MetaWord*) chunks_in_use(MediumIndex),
2377 "Head of the list is being allocated");
2378 }
2380 return result;
2381 }
2383 void SpaceManager::verify() {
2384 // If there are blocks in the dictionary, then
2385 // verfication of chunks does not work since
2386 // being in the dictionary alters a chunk.
2387 if (block_freelists()->total_size() == 0) {
2388 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2389 Metachunk* curr = chunks_in_use(i);
2390 while (curr != NULL) {
2391 curr->verify();
2392 verify_chunk_size(curr);
2393 curr = curr->next();
2394 }
2395 }
2396 }
2397 }
2399 void SpaceManager::verify_chunk_size(Metachunk* chunk) {
2400 assert(is_humongous(chunk->word_size()) ||
2401 chunk->word_size() == medium_chunk_size() ||
2402 chunk->word_size() == small_chunk_size() ||
2403 chunk->word_size() == specialized_chunk_size(),
2404 "Chunk size is wrong");
2405 return;
2406 }
2408 #ifdef ASSERT
2409 void SpaceManager::verify_allocated_blocks_words() {
2410 // Verification is only guaranteed at a safepoint.
2411 assert(SafepointSynchronize::is_at_safepoint() || !Universe::is_fully_initialized(),
2412 "Verification can fail if the applications is running");
2413 assert(allocated_blocks_words() == sum_used_in_chunks_in_use(),
2414 err_msg("allocation total is not consistent " SIZE_FORMAT
2415 " vs " SIZE_FORMAT,
2416 allocated_blocks_words(), sum_used_in_chunks_in_use()));
2417 }
2419 #endif
2421 void SpaceManager::dump(outputStream* const out) const {
2422 size_t curr_total = 0;
2423 size_t waste = 0;
2424 uint i = 0;
2425 size_t used = 0;
2426 size_t capacity = 0;
2428 // Add up statistics for all chunks in this SpaceManager.
2429 for (ChunkIndex index = ZeroIndex;
2430 index < NumberOfInUseLists;
2431 index = next_chunk_index(index)) {
2432 for (Metachunk* curr = chunks_in_use(index);
2433 curr != NULL;
2434 curr = curr->next()) {
2435 out->print("%d) ", i++);
2436 curr->print_on(out);
2437 if (TraceMetadataChunkAllocation && Verbose) {
2438 block_freelists()->print_on(out);
2439 }
2440 curr_total += curr->word_size();
2441 used += curr->used_word_size();
2442 capacity += curr->capacity_word_size();
2443 waste += curr->free_word_size() + curr->overhead();;
2444 }
2445 }
2447 size_t free = current_chunk() == NULL ? 0 : current_chunk()->free_word_size();
2448 // Free space isn't wasted.
2449 waste -= free;
2451 out->print_cr("total of all chunks " SIZE_FORMAT " used " SIZE_FORMAT
2452 " free " SIZE_FORMAT " capacity " SIZE_FORMAT
2453 " waste " SIZE_FORMAT, curr_total, used, free, capacity, waste);
2454 }
2456 #ifndef PRODUCT
2457 void SpaceManager::mangle_freed_chunks() {
2458 for (ChunkIndex index = ZeroIndex;
2459 index < NumberOfInUseLists;
2460 index = next_chunk_index(index)) {
2461 for (Metachunk* curr = chunks_in_use(index);
2462 curr != NULL;
2463 curr = curr->next()) {
2464 curr->mangle();
2465 }
2466 }
2467 }
2468 #endif // PRODUCT
2470 // MetaspaceAux
2473 size_t MetaspaceAux::_allocated_capacity_words[] = {0, 0};
2474 size_t MetaspaceAux::_allocated_used_words[] = {0, 0};
2476 size_t MetaspaceAux::free_bytes() {
2477 size_t result = 0;
2478 if (Metaspace::class_space_list() != NULL) {
2479 result = result + Metaspace::class_space_list()->free_bytes();
2480 }
2481 if (Metaspace::space_list() != NULL) {
2482 result = result + Metaspace::space_list()->free_bytes();
2483 }
2484 return result;
2485 }
2487 void MetaspaceAux::dec_capacity(Metaspace::MetadataType mdtype, size_t words) {
2488 assert_lock_strong(SpaceManager::expand_lock());
2489 assert(words <= allocated_capacity_words(mdtype),
2490 err_msg("About to decrement below 0: words " SIZE_FORMAT
2491 " is greater than _allocated_capacity_words[%u] " SIZE_FORMAT,
2492 words, mdtype, allocated_capacity_words(mdtype)));
2493 _allocated_capacity_words[mdtype] -= words;
2494 }
2496 void MetaspaceAux::inc_capacity(Metaspace::MetadataType mdtype, size_t words) {
2497 assert_lock_strong(SpaceManager::expand_lock());
2498 // Needs to be atomic
2499 _allocated_capacity_words[mdtype] += words;
2500 }
2502 void MetaspaceAux::dec_used(Metaspace::MetadataType mdtype, size_t words) {
2503 assert(words <= allocated_used_words(mdtype),
2504 err_msg("About to decrement below 0: words " SIZE_FORMAT
2505 " is greater than _allocated_used_words[%u] " SIZE_FORMAT,
2506 words, mdtype, allocated_used_words(mdtype)));
2507 // For CMS deallocation of the Metaspaces occurs during the
2508 // sweep which is a concurrent phase. Protection by the expand_lock()
2509 // is not enough since allocation is on a per Metaspace basis
2510 // and protected by the Metaspace lock.
2511 jlong minus_words = (jlong) - (jlong) words;
2512 Atomic::add_ptr(minus_words, &_allocated_used_words[mdtype]);
2513 }
2515 void MetaspaceAux::inc_used(Metaspace::MetadataType mdtype, size_t words) {
2516 // _allocated_used_words tracks allocations for
2517 // each piece of metadata. Those allocations are
2518 // generally done concurrently by different application
2519 // threads so must be done atomically.
2520 Atomic::add_ptr(words, &_allocated_used_words[mdtype]);
2521 }
2523 size_t MetaspaceAux::used_bytes_slow(Metaspace::MetadataType mdtype) {
2524 size_t used = 0;
2525 ClassLoaderDataGraphMetaspaceIterator iter;
2526 while (iter.repeat()) {
2527 Metaspace* msp = iter.get_next();
2528 // Sum allocated_blocks_words for each metaspace
2529 if (msp != NULL) {
2530 used += msp->used_words_slow(mdtype);
2531 }
2532 }
2533 return used * BytesPerWord;
2534 }
2536 size_t MetaspaceAux::free_in_bytes(Metaspace::MetadataType mdtype) {
2537 size_t free = 0;
2538 ClassLoaderDataGraphMetaspaceIterator iter;
2539 while (iter.repeat()) {
2540 Metaspace* msp = iter.get_next();
2541 if (msp != NULL) {
2542 free += msp->free_words(mdtype);
2543 }
2544 }
2545 return free * BytesPerWord;
2546 }
2548 size_t MetaspaceAux::capacity_bytes_slow(Metaspace::MetadataType mdtype) {
2549 // Don't count the space in the freelists. That space will be
2550 // added to the capacity calculation as needed.
2551 size_t capacity = 0;
2552 ClassLoaderDataGraphMetaspaceIterator iter;
2553 while (iter.repeat()) {
2554 Metaspace* msp = iter.get_next();
2555 if (msp != NULL) {
2556 capacity += msp->capacity_words_slow(mdtype);
2557 }
2558 }
2559 return capacity * BytesPerWord;
2560 }
2562 size_t MetaspaceAux::reserved_in_bytes(Metaspace::MetadataType mdtype) {
2563 size_t reserved = (mdtype == Metaspace::ClassType) ?
2564 Metaspace::class_space_list()->virtual_space_total() :
2565 Metaspace::space_list()->virtual_space_total();
2566 return reserved * BytesPerWord;
2567 }
2569 size_t MetaspaceAux::min_chunk_size() { return Metaspace::first_chunk_word_size(); }
2571 size_t MetaspaceAux::free_chunks_total(Metaspace::MetadataType mdtype) {
2572 ChunkManager* chunk = (mdtype == Metaspace::ClassType) ?
2573 Metaspace::class_space_list()->chunk_manager() :
2574 Metaspace::space_list()->chunk_manager();
2575 chunk->slow_verify();
2576 return chunk->free_chunks_total();
2577 }
2579 size_t MetaspaceAux::free_chunks_total_in_bytes(Metaspace::MetadataType mdtype) {
2580 return free_chunks_total(mdtype) * BytesPerWord;
2581 }
2583 size_t MetaspaceAux::free_chunks_total() {
2584 return free_chunks_total(Metaspace::ClassType) +
2585 free_chunks_total(Metaspace::NonClassType);
2586 }
2588 size_t MetaspaceAux::free_chunks_total_in_bytes() {
2589 return free_chunks_total() * BytesPerWord;
2590 }
2592 void MetaspaceAux::print_metaspace_change(size_t prev_metadata_used) {
2593 gclog_or_tty->print(", [Metaspace:");
2594 if (PrintGCDetails && Verbose) {
2595 gclog_or_tty->print(" " SIZE_FORMAT
2596 "->" SIZE_FORMAT
2597 "(" SIZE_FORMAT ")",
2598 prev_metadata_used,
2599 allocated_used_bytes(),
2600 reserved_in_bytes());
2601 } else {
2602 gclog_or_tty->print(" " SIZE_FORMAT "K"
2603 "->" SIZE_FORMAT "K"
2604 "(" SIZE_FORMAT "K)",
2605 prev_metadata_used / K,
2606 allocated_used_bytes() / K,
2607 reserved_in_bytes()/ K);
2608 }
2610 gclog_or_tty->print("]");
2611 }
2613 // This is printed when PrintGCDetails
2614 void MetaspaceAux::print_on(outputStream* out) {
2615 Metaspace::MetadataType ct = Metaspace::ClassType;
2616 Metaspace::MetadataType nct = Metaspace::NonClassType;
2618 out->print_cr(" Metaspace total "
2619 SIZE_FORMAT "K, used " SIZE_FORMAT "K,"
2620 " reserved " SIZE_FORMAT "K",
2621 allocated_capacity_bytes()/K, allocated_used_bytes()/K, reserved_in_bytes()/K);
2623 out->print_cr(" data space "
2624 SIZE_FORMAT "K, used " SIZE_FORMAT "K,"
2625 " reserved " SIZE_FORMAT "K",
2626 allocated_capacity_bytes(nct)/K,
2627 allocated_used_bytes(nct)/K,
2628 reserved_in_bytes(nct)/K);
2629 out->print_cr(" class space "
2630 SIZE_FORMAT "K, used " SIZE_FORMAT "K,"
2631 " reserved " SIZE_FORMAT "K",
2632 allocated_capacity_bytes(ct)/K,
2633 allocated_used_bytes(ct)/K,
2634 reserved_in_bytes(ct)/K);
2635 }
2637 // Print information for class space and data space separately.
2638 // This is almost the same as above.
2639 void MetaspaceAux::print_on(outputStream* out, Metaspace::MetadataType mdtype) {
2640 size_t free_chunks_capacity_bytes = free_chunks_total_in_bytes(mdtype);
2641 size_t capacity_bytes = capacity_bytes_slow(mdtype);
2642 size_t used_bytes = used_bytes_slow(mdtype);
2643 size_t free_bytes = free_in_bytes(mdtype);
2644 size_t used_and_free = used_bytes + free_bytes +
2645 free_chunks_capacity_bytes;
2646 out->print_cr(" Chunk accounting: used in chunks " SIZE_FORMAT
2647 "K + unused in chunks " SIZE_FORMAT "K + "
2648 " capacity in free chunks " SIZE_FORMAT "K = " SIZE_FORMAT
2649 "K capacity in allocated chunks " SIZE_FORMAT "K",
2650 used_bytes / K,
2651 free_bytes / K,
2652 free_chunks_capacity_bytes / K,
2653 used_and_free / K,
2654 capacity_bytes / K);
2655 // Accounting can only be correct if we got the values during a safepoint
2656 assert(!SafepointSynchronize::is_at_safepoint() || used_and_free == capacity_bytes, "Accounting is wrong");
2657 }
2659 // Print total fragmentation for class and data metaspaces separately
2660 void MetaspaceAux::print_waste(outputStream* out) {
2662 size_t specialized_waste = 0, small_waste = 0, medium_waste = 0, large_waste = 0;
2663 size_t specialized_count = 0, small_count = 0, medium_count = 0, large_count = 0;
2664 size_t cls_specialized_waste = 0, cls_small_waste = 0, cls_medium_waste = 0, cls_large_waste = 0;
2665 size_t cls_specialized_count = 0, cls_small_count = 0, cls_medium_count = 0, cls_large_count = 0;
2667 ClassLoaderDataGraphMetaspaceIterator iter;
2668 while (iter.repeat()) {
2669 Metaspace* msp = iter.get_next();
2670 if (msp != NULL) {
2671 specialized_waste += msp->vsm()->sum_waste_in_chunks_in_use(SpecializedIndex);
2672 specialized_count += msp->vsm()->sum_count_in_chunks_in_use(SpecializedIndex);
2673 small_waste += msp->vsm()->sum_waste_in_chunks_in_use(SmallIndex);
2674 small_count += msp->vsm()->sum_count_in_chunks_in_use(SmallIndex);
2675 medium_waste += msp->vsm()->sum_waste_in_chunks_in_use(MediumIndex);
2676 medium_count += msp->vsm()->sum_count_in_chunks_in_use(MediumIndex);
2677 large_waste += msp->vsm()->sum_waste_in_chunks_in_use(HumongousIndex);
2678 large_count += msp->vsm()->sum_count_in_chunks_in_use(HumongousIndex);
2680 cls_specialized_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SpecializedIndex);
2681 cls_specialized_count += msp->class_vsm()->sum_count_in_chunks_in_use(SpecializedIndex);
2682 cls_small_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SmallIndex);
2683 cls_small_count += msp->class_vsm()->sum_count_in_chunks_in_use(SmallIndex);
2684 cls_medium_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(MediumIndex);
2685 cls_medium_count += msp->class_vsm()->sum_count_in_chunks_in_use(MediumIndex);
2686 cls_large_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(HumongousIndex);
2687 cls_large_count += msp->class_vsm()->sum_count_in_chunks_in_use(HumongousIndex);
2688 }
2689 }
2690 out->print_cr("Total fragmentation waste (words) doesn't count free space");
2691 out->print_cr(" data: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", "
2692 SIZE_FORMAT " small(s) " SIZE_FORMAT ", "
2693 SIZE_FORMAT " medium(s) " SIZE_FORMAT,
2694 specialized_count, specialized_waste, small_count,
2695 small_waste, medium_count, medium_waste);
2696 out->print_cr(" class: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", "
2697 SIZE_FORMAT " small(s) " SIZE_FORMAT,
2698 cls_specialized_count, cls_specialized_waste,
2699 cls_small_count, cls_small_waste);
2700 }
2702 // Dump global metaspace things from the end of ClassLoaderDataGraph
2703 void MetaspaceAux::dump(outputStream* out) {
2704 out->print_cr("All Metaspace:");
2705 out->print("data space: "); print_on(out, Metaspace::NonClassType);
2706 out->print("class space: "); print_on(out, Metaspace::ClassType);
2707 print_waste(out);
2708 }
2710 void MetaspaceAux::verify_free_chunks() {
2711 Metaspace::space_list()->chunk_manager()->verify();
2712 Metaspace::class_space_list()->chunk_manager()->verify();
2713 }
2715 void MetaspaceAux::verify_capacity() {
2716 #ifdef ASSERT
2717 size_t running_sum_capacity_bytes = allocated_capacity_bytes();
2718 // For purposes of the running sum of capacity, verify against capacity
2719 size_t capacity_in_use_bytes = capacity_bytes_slow();
2720 assert(running_sum_capacity_bytes == capacity_in_use_bytes,
2721 err_msg("allocated_capacity_words() * BytesPerWord " SIZE_FORMAT
2722 " capacity_bytes_slow()" SIZE_FORMAT,
2723 running_sum_capacity_bytes, capacity_in_use_bytes));
2724 for (Metaspace::MetadataType i = Metaspace::ClassType;
2725 i < Metaspace:: MetadataTypeCount;
2726 i = (Metaspace::MetadataType)(i + 1)) {
2727 size_t capacity_in_use_bytes = capacity_bytes_slow(i);
2728 assert(allocated_capacity_bytes(i) == capacity_in_use_bytes,
2729 err_msg("allocated_capacity_bytes(%u) " SIZE_FORMAT
2730 " capacity_bytes_slow(%u)" SIZE_FORMAT,
2731 i, allocated_capacity_bytes(i), i, capacity_in_use_bytes));
2732 }
2733 #endif
2734 }
2736 void MetaspaceAux::verify_used() {
2737 #ifdef ASSERT
2738 size_t running_sum_used_bytes = allocated_used_bytes();
2739 // For purposes of the running sum of used, verify against used
2740 size_t used_in_use_bytes = used_bytes_slow();
2741 assert(allocated_used_bytes() == used_in_use_bytes,
2742 err_msg("allocated_used_bytes() " SIZE_FORMAT
2743 " used_bytes_slow()" SIZE_FORMAT,
2744 allocated_used_bytes(), used_in_use_bytes));
2745 for (Metaspace::MetadataType i = Metaspace::ClassType;
2746 i < Metaspace:: MetadataTypeCount;
2747 i = (Metaspace::MetadataType)(i + 1)) {
2748 size_t used_in_use_bytes = used_bytes_slow(i);
2749 assert(allocated_used_bytes(i) == used_in_use_bytes,
2750 err_msg("allocated_used_bytes(%u) " SIZE_FORMAT
2751 " used_bytes_slow(%u)" SIZE_FORMAT,
2752 i, allocated_used_bytes(i), i, used_in_use_bytes));
2753 }
2754 #endif
2755 }
2757 void MetaspaceAux::verify_metrics() {
2758 verify_capacity();
2759 verify_used();
2760 }
2763 // Metaspace methods
2765 size_t Metaspace::_first_chunk_word_size = 0;
2766 size_t Metaspace::_first_class_chunk_word_size = 0;
2768 Metaspace::Metaspace(Mutex* lock, MetaspaceType type) {
2769 initialize(lock, type);
2770 }
2772 Metaspace::~Metaspace() {
2773 delete _vsm;
2774 delete _class_vsm;
2775 }
2777 VirtualSpaceList* Metaspace::_space_list = NULL;
2778 VirtualSpaceList* Metaspace::_class_space_list = NULL;
2780 #define VIRTUALSPACEMULTIPLIER 2
2782 void Metaspace::global_initialize() {
2783 // Initialize the alignment for shared spaces.
2784 int max_alignment = os::vm_page_size();
2785 MetaspaceShared::set_max_alignment(max_alignment);
2787 if (DumpSharedSpaces) {
2788 SharedReadOnlySize = align_size_up(SharedReadOnlySize, max_alignment);
2789 SharedReadWriteSize = align_size_up(SharedReadWriteSize, max_alignment);
2790 SharedMiscDataSize = align_size_up(SharedMiscDataSize, max_alignment);
2791 SharedMiscCodeSize = align_size_up(SharedMiscCodeSize, max_alignment);
2793 // Initialize with the sum of the shared space sizes. The read-only
2794 // and read write metaspace chunks will be allocated out of this and the
2795 // remainder is the misc code and data chunks.
2796 size_t total = align_size_up(SharedReadOnlySize + SharedReadWriteSize +
2797 SharedMiscDataSize + SharedMiscCodeSize,
2798 os::vm_allocation_granularity());
2799 size_t word_size = total/wordSize;
2800 _space_list = new VirtualSpaceList(word_size);
2801 } else {
2802 // If using shared space, open the file that contains the shared space
2803 // and map in the memory before initializing the rest of metaspace (so
2804 // the addresses don't conflict)
2805 if (UseSharedSpaces) {
2806 FileMapInfo* mapinfo = new FileMapInfo();
2807 memset(mapinfo, 0, sizeof(FileMapInfo));
2809 // Open the shared archive file, read and validate the header. If
2810 // initialization fails, shared spaces [UseSharedSpaces] are
2811 // disabled and the file is closed.
2812 // Map in spaces now also
2813 if (mapinfo->initialize() && MetaspaceShared::map_shared_spaces(mapinfo)) {
2814 FileMapInfo::set_current_info(mapinfo);
2815 } else {
2816 assert(!mapinfo->is_open() && !UseSharedSpaces,
2817 "archive file not closed or shared spaces not disabled.");
2818 }
2819 }
2821 // Initialize these before initializing the VirtualSpaceList
2822 _first_chunk_word_size = InitialBootClassLoaderMetaspaceSize / BytesPerWord;
2823 _first_chunk_word_size = align_word_size_up(_first_chunk_word_size);
2824 // Make the first class chunk bigger than a medium chunk so it's not put
2825 // on the medium chunk list. The next chunk will be small and progress
2826 // from there. This size calculated by -version.
2827 _first_class_chunk_word_size = MIN2((size_t)MediumChunk*6,
2828 (ClassMetaspaceSize/BytesPerWord)*2);
2829 _first_class_chunk_word_size = align_word_size_up(_first_class_chunk_word_size);
2830 // Arbitrarily set the initial virtual space to a multiple
2831 // of the boot class loader size.
2832 size_t word_size = VIRTUALSPACEMULTIPLIER * first_chunk_word_size();
2833 // Initialize the list of virtual spaces.
2834 _space_list = new VirtualSpaceList(word_size);
2835 }
2836 }
2838 // For UseCompressedKlassPointers the class space is reserved as a piece of the
2839 // Java heap because the compression algorithm is the same for each. The
2840 // argument passed in is at the top of the compressed space
2841 void Metaspace::initialize_class_space(ReservedSpace rs) {
2842 // The reserved space size may be bigger because of alignment, esp with UseLargePages
2843 assert(rs.size() >= ClassMetaspaceSize,
2844 err_msg(SIZE_FORMAT " != " UINTX_FORMAT, rs.size(), ClassMetaspaceSize));
2845 _class_space_list = new VirtualSpaceList(rs);
2846 }
2848 void Metaspace::initialize(Mutex* lock,
2849 MetaspaceType type) {
2851 assert(space_list() != NULL,
2852 "Metadata VirtualSpaceList has not been initialized");
2854 _vsm = new SpaceManager(Metaspace::NonClassType, lock, space_list());
2855 if (_vsm == NULL) {
2856 return;
2857 }
2858 size_t word_size;
2859 size_t class_word_size;
2860 vsm()->get_initial_chunk_sizes(type,
2861 &word_size,
2862 &class_word_size);
2864 assert(class_space_list() != NULL,
2865 "Class VirtualSpaceList has not been initialized");
2867 // Allocate SpaceManager for classes.
2868 _class_vsm = new SpaceManager(Metaspace::ClassType, lock, class_space_list());
2869 if (_class_vsm == NULL) {
2870 return;
2871 }
2873 MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
2875 // Allocate chunk for metadata objects
2876 Metachunk* new_chunk =
2877 space_list()->get_initialization_chunk(word_size,
2878 vsm()->medium_chunk_bunch());
2879 assert(!DumpSharedSpaces || new_chunk != NULL, "should have enough space for both chunks");
2880 if (new_chunk != NULL) {
2881 // Add to this manager's list of chunks in use and current_chunk().
2882 vsm()->add_chunk(new_chunk, true);
2883 }
2885 // Allocate chunk for class metadata objects
2886 Metachunk* class_chunk =
2887 class_space_list()->get_initialization_chunk(class_word_size,
2888 class_vsm()->medium_chunk_bunch());
2889 if (class_chunk != NULL) {
2890 class_vsm()->add_chunk(class_chunk, true);
2891 }
2893 _alloc_record_head = NULL;
2894 _alloc_record_tail = NULL;
2895 }
2897 size_t Metaspace::align_word_size_up(size_t word_size) {
2898 size_t byte_size = word_size * wordSize;
2899 return ReservedSpace::allocation_align_size_up(byte_size) / wordSize;
2900 }
2902 MetaWord* Metaspace::allocate(size_t word_size, MetadataType mdtype) {
2903 // DumpSharedSpaces doesn't use class metadata area (yet)
2904 if (mdtype == ClassType && !DumpSharedSpaces) {
2905 return class_vsm()->allocate(word_size);
2906 } else {
2907 return vsm()->allocate(word_size);
2908 }
2909 }
2911 MetaWord* Metaspace::expand_and_allocate(size_t word_size, MetadataType mdtype) {
2912 MetaWord* result;
2913 MetaspaceGC::set_expand_after_GC(true);
2914 size_t before_inc = MetaspaceGC::capacity_until_GC();
2915 size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size) * BytesPerWord;
2916 MetaspaceGC::inc_capacity_until_GC(delta_bytes);
2917 if (PrintGCDetails && Verbose) {
2918 gclog_or_tty->print_cr("Increase capacity to GC from " SIZE_FORMAT
2919 " to " SIZE_FORMAT, before_inc, MetaspaceGC::capacity_until_GC());
2920 }
2922 result = allocate(word_size, mdtype);
2924 return result;
2925 }
2927 // Space allocated in the Metaspace. This may
2928 // be across several metadata virtual spaces.
2929 char* Metaspace::bottom() const {
2930 assert(DumpSharedSpaces, "only useful and valid for dumping shared spaces");
2931 return (char*)vsm()->current_chunk()->bottom();
2932 }
2934 size_t Metaspace::used_words_slow(MetadataType mdtype) const {
2935 // return vsm()->allocated_used_words();
2936 return mdtype == ClassType ? class_vsm()->sum_used_in_chunks_in_use() :
2937 vsm()->sum_used_in_chunks_in_use(); // includes overhead!
2938 }
2940 size_t Metaspace::free_words(MetadataType mdtype) const {
2941 return mdtype == ClassType ? class_vsm()->sum_free_in_chunks_in_use() :
2942 vsm()->sum_free_in_chunks_in_use();
2943 }
2945 // Space capacity in the Metaspace. It includes
2946 // space in the list of chunks from which allocations
2947 // have been made. Don't include space in the global freelist and
2948 // in the space available in the dictionary which
2949 // is already counted in some chunk.
2950 size_t Metaspace::capacity_words_slow(MetadataType mdtype) const {
2951 return mdtype == ClassType ? class_vsm()->sum_capacity_in_chunks_in_use() :
2952 vsm()->sum_capacity_in_chunks_in_use();
2953 }
2955 size_t Metaspace::used_bytes_slow(MetadataType mdtype) const {
2956 return used_words_slow(mdtype) * BytesPerWord;
2957 }
2959 size_t Metaspace::capacity_bytes_slow(MetadataType mdtype) const {
2960 return capacity_words_slow(mdtype) * BytesPerWord;
2961 }
2963 void Metaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) {
2964 if (SafepointSynchronize::is_at_safepoint()) {
2965 assert(Thread::current()->is_VM_thread(), "should be the VM thread");
2966 // Don't take Heap_lock
2967 MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag);
2968 if (word_size < TreeChunk<Metablock, FreeList>::min_size()) {
2969 // Dark matter. Too small for dictionary.
2970 #ifdef ASSERT
2971 Copy::fill_to_words((HeapWord*)ptr, word_size, 0xf5f5f5f5);
2972 #endif
2973 return;
2974 }
2975 if (is_class) {
2976 class_vsm()->deallocate(ptr, word_size);
2977 } else {
2978 vsm()->deallocate(ptr, word_size);
2979 }
2980 } else {
2981 MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag);
2983 if (word_size < TreeChunk<Metablock, FreeList>::min_size()) {
2984 // Dark matter. Too small for dictionary.
2985 #ifdef ASSERT
2986 Copy::fill_to_words((HeapWord*)ptr, word_size, 0xf5f5f5f5);
2987 #endif
2988 return;
2989 }
2990 if (is_class) {
2991 class_vsm()->deallocate(ptr, word_size);
2992 } else {
2993 vsm()->deallocate(ptr, word_size);
2994 }
2995 }
2996 }
2998 Metablock* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
2999 bool read_only, MetaspaceObj::Type type, TRAPS) {
3000 if (HAS_PENDING_EXCEPTION) {
3001 assert(false, "Should not allocate with exception pending");
3002 return NULL; // caller does a CHECK_NULL too
3003 }
3005 MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType;
3007 // SSS: Should we align the allocations and make sure the sizes are aligned.
3008 MetaWord* result = NULL;
3010 assert(loader_data != NULL, "Should never pass around a NULL loader_data. "
3011 "ClassLoaderData::the_null_class_loader_data() should have been used.");
3012 // Allocate in metaspaces without taking out a lock, because it deadlocks
3013 // with the SymbolTable_lock. Dumping is single threaded for now. We'll have
3014 // to revisit this for application class data sharing.
3015 if (DumpSharedSpaces) {
3016 assert(type > MetaspaceObj::UnknownType && type < MetaspaceObj::_number_of_types, "sanity");
3017 Metaspace* space = read_only ? loader_data->ro_metaspace() : loader_data->rw_metaspace();
3018 result = space->allocate(word_size, NonClassType);
3019 if (result == NULL) {
3020 report_out_of_shared_space(read_only ? SharedReadOnly : SharedReadWrite);
3021 } else {
3022 space->record_allocation(result, type, space->vsm()->get_raw_word_size(word_size));
3023 }
3024 return Metablock::initialize(result, word_size);
3025 }
3027 result = loader_data->metaspace_non_null()->allocate(word_size, mdtype);
3029 if (result == NULL) {
3030 // Try to clean out some memory and retry.
3031 result =
3032 Universe::heap()->collector_policy()->satisfy_failed_metadata_allocation(
3033 loader_data, word_size, mdtype);
3035 // If result is still null, we are out of memory.
3036 if (result == NULL) {
3037 if (Verbose && TraceMetadataChunkAllocation) {
3038 gclog_or_tty->print_cr("Metaspace allocation failed for size "
3039 SIZE_FORMAT, word_size);
3040 if (loader_data->metaspace_or_null() != NULL) loader_data->metaspace_or_null()->dump(gclog_or_tty);
3041 MetaspaceAux::dump(gclog_or_tty);
3042 }
3043 // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
3044 report_java_out_of_memory("Metadata space");
3046 if (JvmtiExport::should_post_resource_exhausted()) {
3047 JvmtiExport::post_resource_exhausted(
3048 JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR,
3049 "Metadata space");
3050 }
3051 THROW_OOP_0(Universe::out_of_memory_error_perm_gen());
3052 }
3053 }
3054 return Metablock::initialize(result, word_size);
3055 }
3057 void Metaspace::record_allocation(void* ptr, MetaspaceObj::Type type, size_t word_size) {
3058 assert(DumpSharedSpaces, "sanity");
3060 AllocRecord *rec = new AllocRecord((address)ptr, type, (int)word_size * HeapWordSize);
3061 if (_alloc_record_head == NULL) {
3062 _alloc_record_head = _alloc_record_tail = rec;
3063 } else {
3064 _alloc_record_tail->_next = rec;
3065 _alloc_record_tail = rec;
3066 }
3067 }
3069 void Metaspace::iterate(Metaspace::AllocRecordClosure *closure) {
3070 assert(DumpSharedSpaces, "unimplemented for !DumpSharedSpaces");
3072 address last_addr = (address)bottom();
3074 for (AllocRecord *rec = _alloc_record_head; rec; rec = rec->_next) {
3075 address ptr = rec->_ptr;
3076 if (last_addr < ptr) {
3077 closure->doit(last_addr, MetaspaceObj::UnknownType, ptr - last_addr);
3078 }
3079 closure->doit(ptr, rec->_type, rec->_byte_size);
3080 last_addr = ptr + rec->_byte_size;
3081 }
3083 address top = ((address)bottom()) + used_bytes_slow(Metaspace::NonClassType);
3084 if (last_addr < top) {
3085 closure->doit(last_addr, MetaspaceObj::UnknownType, top - last_addr);
3086 }
3087 }
3089 void Metaspace::purge() {
3090 MutexLockerEx cl(SpaceManager::expand_lock(),
3091 Mutex::_no_safepoint_check_flag);
3092 space_list()->purge();
3093 class_space_list()->purge();
3094 }
3096 void Metaspace::print_on(outputStream* out) const {
3097 // Print both class virtual space counts and metaspace.
3098 if (Verbose) {
3099 vsm()->print_on(out);
3100 class_vsm()->print_on(out);
3101 }
3102 }
3104 bool Metaspace::contains(const void * ptr) {
3105 if (MetaspaceShared::is_in_shared_space(ptr)) {
3106 return true;
3107 }
3108 // This is checked while unlocked. As long as the virtualspaces are added
3109 // at the end, the pointer will be in one of them. The virtual spaces
3110 // aren't deleted presently. When they are, some sort of locking might
3111 // be needed. Note, locking this can cause inversion problems with the
3112 // caller in MetaspaceObj::is_metadata() function.
3113 return space_list()->contains(ptr) ||
3114 class_space_list()->contains(ptr);
3115 }
3117 void Metaspace::verify() {
3118 vsm()->verify();
3119 class_vsm()->verify();
3120 }
3122 void Metaspace::dump(outputStream* const out) const {
3123 out->print_cr("\nVirtual space manager: " INTPTR_FORMAT, vsm());
3124 vsm()->dump(out);
3125 out->print_cr("\nClass space manager: " INTPTR_FORMAT, class_vsm());
3126 class_vsm()->dump(out);
3127 }