Fri, 26 Jul 2013 15:24:32 +0000
8019845: Memory leak during class redefinition
Reviewed-by: acorn, jmasa, coleenp, dcubed, mgerdin
1 /*
2 * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24 #include "precompiled.hpp"
25 #include "gc_interface/collectedHeap.hpp"
26 #include "memory/binaryTreeDictionary.hpp"
27 #include "memory/freeList.hpp"
28 #include "memory/collectorPolicy.hpp"
29 #include "memory/filemap.hpp"
30 #include "memory/freeList.hpp"
31 #include "memory/metablock.hpp"
32 #include "memory/metachunk.hpp"
33 #include "memory/metaspace.hpp"
34 #include "memory/metaspaceShared.hpp"
35 #include "memory/resourceArea.hpp"
36 #include "memory/universe.hpp"
37 #include "runtime/globals.hpp"
38 #include "runtime/mutex.hpp"
39 #include "runtime/orderAccess.hpp"
40 #include "services/memTracker.hpp"
41 #include "utilities/copy.hpp"
42 #include "utilities/debug.hpp"
44 typedef BinaryTreeDictionary<Metablock, FreeList> BlockTreeDictionary;
45 typedef BinaryTreeDictionary<Metachunk, FreeList> ChunkTreeDictionary;
46 // Define this macro to enable slow integrity checking of
47 // the free chunk lists
48 const bool metaspace_slow_verify = false;
50 // Parameters for stress mode testing
51 const uint metadata_deallocate_a_lot_block = 10;
52 const uint metadata_deallocate_a_lock_chunk = 3;
53 size_t const allocation_from_dictionary_limit = 64 * K;
55 MetaWord* last_allocated = 0;
57 // Used in declarations in SpaceManager and ChunkManager
58 enum ChunkIndex {
59 ZeroIndex = 0,
60 SpecializedIndex = ZeroIndex,
61 SmallIndex = SpecializedIndex + 1,
62 MediumIndex = SmallIndex + 1,
63 HumongousIndex = MediumIndex + 1,
64 NumberOfFreeLists = 3,
65 NumberOfInUseLists = 4
66 };
68 enum ChunkSizes { // in words.
69 ClassSpecializedChunk = 128,
70 SpecializedChunk = 128,
71 ClassSmallChunk = 256,
72 SmallChunk = 512,
73 ClassMediumChunk = 4 * K,
74 MediumChunk = 8 * K,
75 HumongousChunkGranularity = 8
76 };
78 static ChunkIndex next_chunk_index(ChunkIndex i) {
79 assert(i < NumberOfInUseLists, "Out of bound");
80 return (ChunkIndex) (i+1);
81 }
83 // Originally _capacity_until_GC was set to MetaspaceSize here but
84 // the default MetaspaceSize before argument processing was being
85 // used which was not the desired value. See the code
86 // in should_expand() to see how the initialization is handled
87 // now.
88 size_t MetaspaceGC::_capacity_until_GC = 0;
89 bool MetaspaceGC::_expand_after_GC = false;
90 uint MetaspaceGC::_shrink_factor = 0;
91 bool MetaspaceGC::_should_concurrent_collect = false;
93 // Blocks of space for metadata are allocated out of Metachunks.
94 //
95 // Metachunk are allocated out of MetadataVirtualspaces and once
96 // allocated there is no explicit link between a Metachunk and
97 // the MetadataVirtualspaces from which it was allocated.
98 //
99 // Each SpaceManager maintains a
100 // list of the chunks it is using and the current chunk. The current
101 // chunk is the chunk from which allocations are done. Space freed in
102 // a chunk is placed on the free list of blocks (BlockFreelist) and
103 // reused from there.
105 typedef class FreeList<Metachunk> ChunkList;
107 // Manages the global free lists of chunks.
108 // Has three lists of free chunks, and a total size and
109 // count that includes all three
111 class ChunkManager VALUE_OBJ_CLASS_SPEC {
113 // Free list of chunks of different sizes.
114 // SpecializedChunk
115 // SmallChunk
116 // MediumChunk
117 // HumongousChunk
118 ChunkList _free_chunks[NumberOfFreeLists];
121 // HumongousChunk
122 ChunkTreeDictionary _humongous_dictionary;
124 // ChunkManager in all lists of this type
125 size_t _free_chunks_total;
126 size_t _free_chunks_count;
128 void dec_free_chunks_total(size_t v) {
129 assert(_free_chunks_count > 0 &&
130 _free_chunks_total > 0,
131 "About to go negative");
132 Atomic::add_ptr(-1, &_free_chunks_count);
133 jlong minus_v = (jlong) - (jlong) v;
134 Atomic::add_ptr(minus_v, &_free_chunks_total);
135 }
137 // Debug support
139 size_t sum_free_chunks();
140 size_t sum_free_chunks_count();
142 void locked_verify_free_chunks_total();
143 void slow_locked_verify_free_chunks_total() {
144 if (metaspace_slow_verify) {
145 locked_verify_free_chunks_total();
146 }
147 }
148 void locked_verify_free_chunks_count();
149 void slow_locked_verify_free_chunks_count() {
150 if (metaspace_slow_verify) {
151 locked_verify_free_chunks_count();
152 }
153 }
154 void verify_free_chunks_count();
156 public:
158 ChunkManager() : _free_chunks_total(0), _free_chunks_count(0) {}
160 // add or delete (return) a chunk to the global freelist.
161 Metachunk* chunk_freelist_allocate(size_t word_size);
162 void chunk_freelist_deallocate(Metachunk* chunk);
164 // Map a size to a list index assuming that there are lists
165 // for special, small, medium, and humongous chunks.
166 static ChunkIndex list_index(size_t size);
168 // Remove the chunk from its freelist. It is
169 // expected to be on one of the _free_chunks[] lists.
170 void remove_chunk(Metachunk* chunk);
172 // Add the simple linked list of chunks to the freelist of chunks
173 // of type index.
174 void return_chunks(ChunkIndex index, Metachunk* chunks);
176 // Total of the space in the free chunks list
177 size_t free_chunks_total();
178 size_t free_chunks_total_in_bytes();
180 // Number of chunks in the free chunks list
181 size_t free_chunks_count();
183 void inc_free_chunks_total(size_t v, size_t count = 1) {
184 Atomic::add_ptr(count, &_free_chunks_count);
185 Atomic::add_ptr(v, &_free_chunks_total);
186 }
187 ChunkTreeDictionary* humongous_dictionary() {
188 return &_humongous_dictionary;
189 }
191 ChunkList* free_chunks(ChunkIndex index);
193 // Returns the list for the given chunk word size.
194 ChunkList* find_free_chunks_list(size_t word_size);
196 // Add and remove from a list by size. Selects
197 // list based on size of chunk.
198 void free_chunks_put(Metachunk* chuck);
199 Metachunk* free_chunks_get(size_t chunk_word_size);
201 // Debug support
202 void verify();
203 void slow_verify() {
204 if (metaspace_slow_verify) {
205 verify();
206 }
207 }
208 void locked_verify();
209 void slow_locked_verify() {
210 if (metaspace_slow_verify) {
211 locked_verify();
212 }
213 }
214 void verify_free_chunks_total();
216 void locked_print_free_chunks(outputStream* st);
217 void locked_print_sum_free_chunks(outputStream* st);
219 void print_on(outputStream* st);
220 };
222 // Used to manage the free list of Metablocks (a block corresponds
223 // to the allocation of a quantum of metadata).
224 class BlockFreelist VALUE_OBJ_CLASS_SPEC {
225 BlockTreeDictionary* _dictionary;
226 static Metablock* initialize_free_chunk(MetaWord* p, size_t word_size);
228 // Accessors
229 BlockTreeDictionary* dictionary() const { return _dictionary; }
231 public:
232 BlockFreelist();
233 ~BlockFreelist();
235 // Get and return a block to the free list
236 MetaWord* get_block(size_t word_size);
237 void return_block(MetaWord* p, size_t word_size);
239 size_t total_size() {
240 if (dictionary() == NULL) {
241 return 0;
242 } else {
243 return dictionary()->total_size();
244 }
245 }
247 void print_on(outputStream* st) const;
248 };
250 class VirtualSpaceNode : public CHeapObj<mtClass> {
251 friend class VirtualSpaceList;
253 // Link to next VirtualSpaceNode
254 VirtualSpaceNode* _next;
256 // total in the VirtualSpace
257 MemRegion _reserved;
258 ReservedSpace _rs;
259 VirtualSpace _virtual_space;
260 MetaWord* _top;
261 // count of chunks contained in this VirtualSpace
262 uintx _container_count;
264 // Convenience functions for logical bottom and end
265 MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); }
266 MetaWord* end() const { return (MetaWord*) _virtual_space.high(); }
268 // Convenience functions to access the _virtual_space
269 char* low() const { return virtual_space()->low(); }
270 char* high() const { return virtual_space()->high(); }
272 // The first Metachunk will be allocated at the bottom of the
273 // VirtualSpace
274 Metachunk* first_chunk() { return (Metachunk*) bottom(); }
276 void inc_container_count();
277 #ifdef ASSERT
278 uint container_count_slow();
279 #endif
281 public:
283 VirtualSpaceNode(size_t byte_size);
284 VirtualSpaceNode(ReservedSpace rs) : _top(NULL), _next(NULL), _rs(rs), _container_count(0) {}
285 ~VirtualSpaceNode();
287 // address of next available space in _virtual_space;
288 // Accessors
289 VirtualSpaceNode* next() { return _next; }
290 void set_next(VirtualSpaceNode* v) { _next = v; }
292 void set_reserved(MemRegion const v) { _reserved = v; }
293 void set_top(MetaWord* v) { _top = v; }
295 // Accessors
296 MemRegion* reserved() { return &_reserved; }
297 VirtualSpace* virtual_space() const { return (VirtualSpace*) &_virtual_space; }
299 // Returns true if "word_size" is available in the VirtualSpace
300 bool is_available(size_t word_size) { return _top + word_size <= end(); }
302 MetaWord* top() const { return _top; }
303 void inc_top(size_t word_size) { _top += word_size; }
305 uintx container_count() { return _container_count; }
306 void dec_container_count();
307 #ifdef ASSERT
308 void verify_container_count();
309 #endif
311 // used and capacity in this single entry in the list
312 size_t used_words_in_vs() const;
313 size_t capacity_words_in_vs() const;
314 size_t free_words_in_vs() const;
316 bool initialize();
318 // get space from the virtual space
319 Metachunk* take_from_committed(size_t chunk_word_size);
321 // Allocate a chunk from the virtual space and return it.
322 Metachunk* get_chunk_vs(size_t chunk_word_size);
323 Metachunk* get_chunk_vs_with_expand(size_t chunk_word_size);
325 // Expands/shrinks the committed space in a virtual space. Delegates
326 // to Virtualspace
327 bool expand_by(size_t words, bool pre_touch = false);
328 bool shrink_by(size_t words);
330 // In preparation for deleting this node, remove all the chunks
331 // in the node from any freelist.
332 void purge(ChunkManager* chunk_manager);
334 #ifdef ASSERT
335 // Debug support
336 static void verify_virtual_space_total();
337 static void verify_virtual_space_count();
338 void mangle();
339 #endif
341 void print_on(outputStream* st) const;
342 };
344 // byte_size is the size of the associated virtualspace.
345 VirtualSpaceNode::VirtualSpaceNode(size_t byte_size) : _top(NULL), _next(NULL), _rs(0), _container_count(0) {
346 // align up to vm allocation granularity
347 byte_size = align_size_up(byte_size, os::vm_allocation_granularity());
349 // This allocates memory with mmap. For DumpSharedspaces, try to reserve
350 // configurable address, generally at the top of the Java heap so other
351 // memory addresses don't conflict.
352 if (DumpSharedSpaces) {
353 char* shared_base = (char*)SharedBaseAddress;
354 _rs = ReservedSpace(byte_size, 0, false, shared_base, 0);
355 if (_rs.is_reserved()) {
356 assert(shared_base == 0 || _rs.base() == shared_base, "should match");
357 } else {
358 // Get a mmap region anywhere if the SharedBaseAddress fails.
359 _rs = ReservedSpace(byte_size);
360 }
361 MetaspaceShared::set_shared_rs(&_rs);
362 } else {
363 _rs = ReservedSpace(byte_size);
364 }
366 MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass);
367 }
369 void VirtualSpaceNode::purge(ChunkManager* chunk_manager) {
370 Metachunk* chunk = first_chunk();
371 Metachunk* invalid_chunk = (Metachunk*) top();
372 while (chunk < invalid_chunk ) {
373 assert(chunk->is_free(), "Should be marked free");
374 MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
375 chunk_manager->remove_chunk(chunk);
376 assert(chunk->next() == NULL &&
377 chunk->prev() == NULL,
378 "Was not removed from its list");
379 chunk = (Metachunk*) next;
380 }
381 }
383 #ifdef ASSERT
384 uint VirtualSpaceNode::container_count_slow() {
385 uint count = 0;
386 Metachunk* chunk = first_chunk();
387 Metachunk* invalid_chunk = (Metachunk*) top();
388 while (chunk < invalid_chunk ) {
389 MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
390 // Don't count the chunks on the free lists. Those are
391 // still part of the VirtualSpaceNode but not currently
392 // counted.
393 if (!chunk->is_free()) {
394 count++;
395 }
396 chunk = (Metachunk*) next;
397 }
398 return count;
399 }
400 #endif
402 // List of VirtualSpaces for metadata allocation.
403 // It has a _next link for singly linked list and a MemRegion
404 // for total space in the VirtualSpace.
405 class VirtualSpaceList : public CHeapObj<mtClass> {
406 friend class VirtualSpaceNode;
408 enum VirtualSpaceSizes {
409 VirtualSpaceSize = 256 * K
410 };
412 // Global list of virtual spaces
413 // Head of the list
414 VirtualSpaceNode* _virtual_space_list;
415 // virtual space currently being used for allocations
416 VirtualSpaceNode* _current_virtual_space;
417 // Free chunk list for all other metadata
418 ChunkManager _chunk_manager;
420 // Can this virtual list allocate >1 spaces? Also, used to determine
421 // whether to allocate unlimited small chunks in this virtual space
422 bool _is_class;
423 bool can_grow() const { return !is_class() || !UseCompressedKlassPointers; }
425 // Sum of space in all virtual spaces and number of virtual spaces
426 size_t _virtual_space_total;
427 size_t _virtual_space_count;
429 ~VirtualSpaceList();
431 VirtualSpaceNode* virtual_space_list() const { return _virtual_space_list; }
433 void set_virtual_space_list(VirtualSpaceNode* v) {
434 _virtual_space_list = v;
435 }
436 void set_current_virtual_space(VirtualSpaceNode* v) {
437 _current_virtual_space = v;
438 }
440 void link_vs(VirtualSpaceNode* new_entry, size_t vs_word_size);
442 // Get another virtual space and add it to the list. This
443 // is typically prompted by a failed attempt to allocate a chunk
444 // and is typically followed by the allocation of a chunk.
445 bool grow_vs(size_t vs_word_size);
447 public:
448 VirtualSpaceList(size_t word_size);
449 VirtualSpaceList(ReservedSpace rs);
451 size_t free_bytes();
453 Metachunk* get_new_chunk(size_t word_size,
454 size_t grow_chunks_by_words,
455 size_t medium_chunk_bunch);
457 // Get the first chunk for a Metaspace. Used for
458 // special cases such as the boot class loader, reflection
459 // class loader and anonymous class loader.
460 Metachunk* get_initialization_chunk(size_t word_size, size_t chunk_bunch);
462 VirtualSpaceNode* current_virtual_space() {
463 return _current_virtual_space;
464 }
466 ChunkManager* chunk_manager() { return &_chunk_manager; }
467 bool is_class() const { return _is_class; }
469 // Allocate the first virtualspace.
470 void initialize(size_t word_size);
472 size_t virtual_space_total() { return _virtual_space_total; }
474 void inc_virtual_space_total(size_t v);
475 void dec_virtual_space_total(size_t v);
476 void inc_virtual_space_count();
477 void dec_virtual_space_count();
479 // Unlink empty VirtualSpaceNodes and free it.
480 void purge();
482 // Used and capacity in the entire list of virtual spaces.
483 // These are global values shared by all Metaspaces
484 size_t capacity_words_sum();
485 size_t capacity_bytes_sum() { return capacity_words_sum() * BytesPerWord; }
486 size_t used_words_sum();
487 size_t used_bytes_sum() { return used_words_sum() * BytesPerWord; }
489 bool contains(const void *ptr);
491 void print_on(outputStream* st) const;
493 class VirtualSpaceListIterator : public StackObj {
494 VirtualSpaceNode* _virtual_spaces;
495 public:
496 VirtualSpaceListIterator(VirtualSpaceNode* virtual_spaces) :
497 _virtual_spaces(virtual_spaces) {}
499 bool repeat() {
500 return _virtual_spaces != NULL;
501 }
503 VirtualSpaceNode* get_next() {
504 VirtualSpaceNode* result = _virtual_spaces;
505 if (_virtual_spaces != NULL) {
506 _virtual_spaces = _virtual_spaces->next();
507 }
508 return result;
509 }
510 };
511 };
513 class Metadebug : AllStatic {
514 // Debugging support for Metaspaces
515 static int _deallocate_block_a_lot_count;
516 static int _deallocate_chunk_a_lot_count;
517 static int _allocation_fail_alot_count;
519 public:
520 static int deallocate_block_a_lot_count() {
521 return _deallocate_block_a_lot_count;
522 }
523 static void set_deallocate_block_a_lot_count(int v) {
524 _deallocate_block_a_lot_count = v;
525 }
526 static void inc_deallocate_block_a_lot_count() {
527 _deallocate_block_a_lot_count++;
528 }
529 static int deallocate_chunk_a_lot_count() {
530 return _deallocate_chunk_a_lot_count;
531 }
532 static void reset_deallocate_chunk_a_lot_count() {
533 _deallocate_chunk_a_lot_count = 1;
534 }
535 static void inc_deallocate_chunk_a_lot_count() {
536 _deallocate_chunk_a_lot_count++;
537 }
539 static void init_allocation_fail_alot_count();
540 #ifdef ASSERT
541 static bool test_metadata_failure();
542 #endif
544 static void deallocate_chunk_a_lot(SpaceManager* sm,
545 size_t chunk_word_size);
546 static void deallocate_block_a_lot(SpaceManager* sm,
547 size_t chunk_word_size);
549 };
551 int Metadebug::_deallocate_block_a_lot_count = 0;
552 int Metadebug::_deallocate_chunk_a_lot_count = 0;
553 int Metadebug::_allocation_fail_alot_count = 0;
555 // SpaceManager - used by Metaspace to handle allocations
556 class SpaceManager : public CHeapObj<mtClass> {
557 friend class Metaspace;
558 friend class Metadebug;
560 private:
562 // protects allocations and contains.
563 Mutex* const _lock;
565 // Type of metadata allocated.
566 Metaspace::MetadataType _mdtype;
568 // Chunk related size
569 size_t _medium_chunk_bunch;
571 // List of chunks in use by this SpaceManager. Allocations
572 // are done from the current chunk. The list is used for deallocating
573 // chunks when the SpaceManager is freed.
574 Metachunk* _chunks_in_use[NumberOfInUseLists];
575 Metachunk* _current_chunk;
577 // Virtual space where allocation comes from.
578 VirtualSpaceList* _vs_list;
580 // Number of small chunks to allocate to a manager
581 // If class space manager, small chunks are unlimited
582 static uint const _small_chunk_limit;
584 // Sum of all space in allocated chunks
585 size_t _allocated_blocks_words;
587 // Sum of all allocated chunks
588 size_t _allocated_chunks_words;
589 size_t _allocated_chunks_count;
591 // Free lists of blocks are per SpaceManager since they
592 // are assumed to be in chunks in use by the SpaceManager
593 // and all chunks in use by a SpaceManager are freed when
594 // the class loader using the SpaceManager is collected.
595 BlockFreelist _block_freelists;
597 // protects virtualspace and chunk expansions
598 static const char* _expand_lock_name;
599 static const int _expand_lock_rank;
600 static Mutex* const _expand_lock;
602 private:
603 // Accessors
604 Metachunk* chunks_in_use(ChunkIndex index) const { return _chunks_in_use[index]; }
605 void set_chunks_in_use(ChunkIndex index, Metachunk* v) { _chunks_in_use[index] = v; }
607 BlockFreelist* block_freelists() const {
608 return (BlockFreelist*) &_block_freelists;
609 }
611 Metaspace::MetadataType mdtype() { return _mdtype; }
612 VirtualSpaceList* vs_list() const { return _vs_list; }
614 Metachunk* current_chunk() const { return _current_chunk; }
615 void set_current_chunk(Metachunk* v) {
616 _current_chunk = v;
617 }
619 Metachunk* find_current_chunk(size_t word_size);
621 // Add chunk to the list of chunks in use
622 void add_chunk(Metachunk* v, bool make_current);
624 Mutex* lock() const { return _lock; }
626 const char* chunk_size_name(ChunkIndex index) const;
628 protected:
629 void initialize();
631 public:
632 SpaceManager(Metaspace::MetadataType mdtype,
633 Mutex* lock,
634 VirtualSpaceList* vs_list);
635 ~SpaceManager();
637 enum ChunkMultiples {
638 MediumChunkMultiple = 4
639 };
641 // Accessors
642 size_t specialized_chunk_size() { return SpecializedChunk; }
643 size_t small_chunk_size() { return (size_t) vs_list()->is_class() ? ClassSmallChunk : SmallChunk; }
644 size_t medium_chunk_size() { return (size_t) vs_list()->is_class() ? ClassMediumChunk : MediumChunk; }
645 size_t medium_chunk_bunch() { return medium_chunk_size() * MediumChunkMultiple; }
647 size_t allocated_blocks_words() const { return _allocated_blocks_words; }
648 size_t allocated_blocks_bytes() const { return _allocated_blocks_words * BytesPerWord; }
649 size_t allocated_chunks_words() const { return _allocated_chunks_words; }
650 size_t allocated_chunks_count() const { return _allocated_chunks_count; }
652 bool is_humongous(size_t word_size) { return word_size > medium_chunk_size(); }
654 static Mutex* expand_lock() { return _expand_lock; }
656 // Increment the per Metaspace and global running sums for Metachunks
657 // by the given size. This is used when a Metachunk to added to
658 // the in-use list.
659 void inc_size_metrics(size_t words);
660 // Increment the per Metaspace and global running sums Metablocks by the given
661 // size. This is used when a Metablock is allocated.
662 void inc_used_metrics(size_t words);
663 // Delete the portion of the running sums for this SpaceManager. That is,
664 // the globals running sums for the Metachunks and Metablocks are
665 // decremented for all the Metachunks in-use by this SpaceManager.
666 void dec_total_from_size_metrics();
668 // Set the sizes for the initial chunks.
669 void get_initial_chunk_sizes(Metaspace::MetaspaceType type,
670 size_t* chunk_word_size,
671 size_t* class_chunk_word_size);
673 size_t sum_capacity_in_chunks_in_use() const;
674 size_t sum_used_in_chunks_in_use() const;
675 size_t sum_free_in_chunks_in_use() const;
676 size_t sum_waste_in_chunks_in_use() const;
677 size_t sum_waste_in_chunks_in_use(ChunkIndex index ) const;
679 size_t sum_count_in_chunks_in_use();
680 size_t sum_count_in_chunks_in_use(ChunkIndex i);
682 Metachunk* get_new_chunk(size_t word_size, size_t grow_chunks_by_words);
684 // Block allocation and deallocation.
685 // Allocates a block from the current chunk
686 MetaWord* allocate(size_t word_size);
688 // Helper for allocations
689 MetaWord* allocate_work(size_t word_size);
691 // Returns a block to the per manager freelist
692 void deallocate(MetaWord* p, size_t word_size);
694 // Based on the allocation size and a minimum chunk size,
695 // returned chunk size (for expanding space for chunk allocation).
696 size_t calc_chunk_size(size_t allocation_word_size);
698 // Called when an allocation from the current chunk fails.
699 // Gets a new chunk (may require getting a new virtual space),
700 // and allocates from that chunk.
701 MetaWord* grow_and_allocate(size_t word_size);
703 // debugging support.
705 void dump(outputStream* const out) const;
706 void print_on(outputStream* st) const;
707 void locked_print_chunks_in_use_on(outputStream* st) const;
709 void verify();
710 void verify_chunk_size(Metachunk* chunk);
711 NOT_PRODUCT(void mangle_freed_chunks();)
712 #ifdef ASSERT
713 void verify_allocated_blocks_words();
714 #endif
716 size_t get_raw_word_size(size_t word_size) {
717 // If only the dictionary is going to be used (i.e., no
718 // indexed free list), then there is a minimum size requirement.
719 // MinChunkSize is a placeholder for the real minimum size JJJ
720 size_t byte_size = word_size * BytesPerWord;
722 size_t byte_size_with_overhead = byte_size + Metablock::overhead();
724 size_t raw_bytes_size = MAX2(byte_size_with_overhead,
725 Metablock::min_block_byte_size());
726 raw_bytes_size = ARENA_ALIGN(raw_bytes_size);
727 size_t raw_word_size = raw_bytes_size / BytesPerWord;
728 assert(raw_word_size * BytesPerWord == raw_bytes_size, "Size problem");
730 return raw_word_size;
731 }
732 };
734 uint const SpaceManager::_small_chunk_limit = 4;
736 const char* SpaceManager::_expand_lock_name =
737 "SpaceManager chunk allocation lock";
738 const int SpaceManager::_expand_lock_rank = Monitor::leaf - 1;
739 Mutex* const SpaceManager::_expand_lock =
740 new Mutex(SpaceManager::_expand_lock_rank,
741 SpaceManager::_expand_lock_name,
742 Mutex::_allow_vm_block_flag);
744 void VirtualSpaceNode::inc_container_count() {
745 assert_lock_strong(SpaceManager::expand_lock());
746 _container_count++;
747 assert(_container_count == container_count_slow(),
748 err_msg("Inconsistency in countainer_count _container_count " SIZE_FORMAT
749 "container_count_slow() " SIZE_FORMAT,
750 _container_count, container_count_slow()));
751 }
753 void VirtualSpaceNode::dec_container_count() {
754 assert_lock_strong(SpaceManager::expand_lock());
755 _container_count--;
756 }
758 #ifdef ASSERT
759 void VirtualSpaceNode::verify_container_count() {
760 assert(_container_count == container_count_slow(),
761 err_msg("Inconsistency in countainer_count _container_count " SIZE_FORMAT
762 "container_count_slow() " SIZE_FORMAT, _container_count, container_count_slow()));
763 }
764 #endif
766 // BlockFreelist methods
768 BlockFreelist::BlockFreelist() : _dictionary(NULL) {}
770 BlockFreelist::~BlockFreelist() {
771 if (_dictionary != NULL) {
772 if (Verbose && TraceMetadataChunkAllocation) {
773 _dictionary->print_free_lists(gclog_or_tty);
774 }
775 delete _dictionary;
776 }
777 }
779 Metablock* BlockFreelist::initialize_free_chunk(MetaWord* p, size_t word_size) {
780 Metablock* block = (Metablock*) p;
781 block->set_word_size(word_size);
782 block->set_prev(NULL);
783 block->set_next(NULL);
785 return block;
786 }
788 void BlockFreelist::return_block(MetaWord* p, size_t word_size) {
789 Metablock* free_chunk = initialize_free_chunk(p, word_size);
790 if (dictionary() == NULL) {
791 _dictionary = new BlockTreeDictionary();
792 }
793 dictionary()->return_chunk(free_chunk);
794 }
796 MetaWord* BlockFreelist::get_block(size_t word_size) {
797 if (dictionary() == NULL) {
798 return NULL;
799 }
801 if (word_size < TreeChunk<Metablock, FreeList>::min_size()) {
802 // Dark matter. Too small for dictionary.
803 return NULL;
804 }
806 Metablock* free_block =
807 dictionary()->get_chunk(word_size, FreeBlockDictionary<Metablock>::exactly);
808 if (free_block == NULL) {
809 return NULL;
810 }
812 return (MetaWord*) free_block;
813 }
815 void BlockFreelist::print_on(outputStream* st) const {
816 if (dictionary() == NULL) {
817 return;
818 }
819 dictionary()->print_free_lists(st);
820 }
822 // VirtualSpaceNode methods
824 VirtualSpaceNode::~VirtualSpaceNode() {
825 _rs.release();
826 #ifdef ASSERT
827 size_t word_size = sizeof(*this) / BytesPerWord;
828 Copy::fill_to_words((HeapWord*) this, word_size, 0xf1f1f1f1);
829 #endif
830 }
832 size_t VirtualSpaceNode::used_words_in_vs() const {
833 return pointer_delta(top(), bottom(), sizeof(MetaWord));
834 }
836 // Space committed in the VirtualSpace
837 size_t VirtualSpaceNode::capacity_words_in_vs() const {
838 return pointer_delta(end(), bottom(), sizeof(MetaWord));
839 }
841 size_t VirtualSpaceNode::free_words_in_vs() const {
842 return pointer_delta(end(), top(), sizeof(MetaWord));
843 }
845 // Allocates the chunk from the virtual space only.
846 // This interface is also used internally for debugging. Not all
847 // chunks removed here are necessarily used for allocation.
848 Metachunk* VirtualSpaceNode::take_from_committed(size_t chunk_word_size) {
849 // Bottom of the new chunk
850 MetaWord* chunk_limit = top();
851 assert(chunk_limit != NULL, "Not safe to call this method");
853 if (!is_available(chunk_word_size)) {
854 if (TraceMetadataChunkAllocation) {
855 tty->print("VirtualSpaceNode::take_from_committed() not available %d words ", chunk_word_size);
856 // Dump some information about the virtual space that is nearly full
857 print_on(tty);
858 }
859 return NULL;
860 }
862 // Take the space (bump top on the current virtual space).
863 inc_top(chunk_word_size);
865 // Initialize the chunk
866 Metachunk* result = ::new (chunk_limit) Metachunk(chunk_word_size, this);
867 return result;
868 }
871 // Expand the virtual space (commit more of the reserved space)
872 bool VirtualSpaceNode::expand_by(size_t words, bool pre_touch) {
873 size_t bytes = words * BytesPerWord;
874 bool result = virtual_space()->expand_by(bytes, pre_touch);
875 if (TraceMetavirtualspaceAllocation && !result) {
876 gclog_or_tty->print_cr("VirtualSpaceNode::expand_by() failed "
877 "for byte size " SIZE_FORMAT, bytes);
878 virtual_space()->print();
879 }
880 return result;
881 }
883 // Shrink the virtual space (commit more of the reserved space)
884 bool VirtualSpaceNode::shrink_by(size_t words) {
885 size_t bytes = words * BytesPerWord;
886 virtual_space()->shrink_by(bytes);
887 return true;
888 }
890 // Add another chunk to the chunk list.
892 Metachunk* VirtualSpaceNode::get_chunk_vs(size_t chunk_word_size) {
893 assert_lock_strong(SpaceManager::expand_lock());
894 Metachunk* result = take_from_committed(chunk_word_size);
895 if (result != NULL) {
896 inc_container_count();
897 }
898 return result;
899 }
901 Metachunk* VirtualSpaceNode::get_chunk_vs_with_expand(size_t chunk_word_size) {
902 assert_lock_strong(SpaceManager::expand_lock());
904 Metachunk* new_chunk = get_chunk_vs(chunk_word_size);
906 if (new_chunk == NULL) {
907 // Only a small part of the virtualspace is committed when first
908 // allocated so committing more here can be expected.
909 size_t page_size_words = os::vm_page_size() / BytesPerWord;
910 size_t aligned_expand_vs_by_words = align_size_up(chunk_word_size,
911 page_size_words);
912 expand_by(aligned_expand_vs_by_words, false);
913 new_chunk = get_chunk_vs(chunk_word_size);
914 }
915 return new_chunk;
916 }
918 bool VirtualSpaceNode::initialize() {
920 if (!_rs.is_reserved()) {
921 return false;
922 }
924 // An allocation out of this Virtualspace that is larger
925 // than an initial commit size can waste that initial committed
926 // space.
927 size_t committed_byte_size = 0;
928 bool result = virtual_space()->initialize(_rs, committed_byte_size);
929 if (result) {
930 set_top((MetaWord*)virtual_space()->low());
931 set_reserved(MemRegion((HeapWord*)_rs.base(),
932 (HeapWord*)(_rs.base() + _rs.size())));
934 assert(reserved()->start() == (HeapWord*) _rs.base(),
935 err_msg("Reserved start was not set properly " PTR_FORMAT
936 " != " PTR_FORMAT, reserved()->start(), _rs.base()));
937 assert(reserved()->word_size() == _rs.size() / BytesPerWord,
938 err_msg("Reserved size was not set properly " SIZE_FORMAT
939 " != " SIZE_FORMAT, reserved()->word_size(),
940 _rs.size() / BytesPerWord));
941 }
943 return result;
944 }
946 void VirtualSpaceNode::print_on(outputStream* st) const {
947 size_t used = used_words_in_vs();
948 size_t capacity = capacity_words_in_vs();
949 VirtualSpace* vs = virtual_space();
950 st->print_cr(" space @ " PTR_FORMAT " " SIZE_FORMAT "K, %3d%% used "
951 "[" PTR_FORMAT ", " PTR_FORMAT ", "
952 PTR_FORMAT ", " PTR_FORMAT ")",
953 vs, capacity / K,
954 capacity == 0 ? 0 : used * 100 / capacity,
955 bottom(), top(), end(),
956 vs->high_boundary());
957 }
959 #ifdef ASSERT
960 void VirtualSpaceNode::mangle() {
961 size_t word_size = capacity_words_in_vs();
962 Copy::fill_to_words((HeapWord*) low(), word_size, 0xf1f1f1f1);
963 }
964 #endif // ASSERT
966 // VirtualSpaceList methods
967 // Space allocated from the VirtualSpace
969 VirtualSpaceList::~VirtualSpaceList() {
970 VirtualSpaceListIterator iter(virtual_space_list());
971 while (iter.repeat()) {
972 VirtualSpaceNode* vsl = iter.get_next();
973 delete vsl;
974 }
975 }
977 void VirtualSpaceList::inc_virtual_space_total(size_t v) {
978 assert_lock_strong(SpaceManager::expand_lock());
979 _virtual_space_total = _virtual_space_total + v;
980 }
981 void VirtualSpaceList::dec_virtual_space_total(size_t v) {
982 assert_lock_strong(SpaceManager::expand_lock());
983 _virtual_space_total = _virtual_space_total - v;
984 }
986 void VirtualSpaceList::inc_virtual_space_count() {
987 assert_lock_strong(SpaceManager::expand_lock());
988 _virtual_space_count++;
989 }
990 void VirtualSpaceList::dec_virtual_space_count() {
991 assert_lock_strong(SpaceManager::expand_lock());
992 _virtual_space_count--;
993 }
995 void ChunkManager::remove_chunk(Metachunk* chunk) {
996 size_t word_size = chunk->word_size();
997 ChunkIndex index = list_index(word_size);
998 if (index != HumongousIndex) {
999 free_chunks(index)->remove_chunk(chunk);
1000 } else {
1001 humongous_dictionary()->remove_chunk(chunk);
1002 }
1004 // Chunk is being removed from the chunks free list.
1005 dec_free_chunks_total(chunk->capacity_word_size());
1006 }
1008 // Walk the list of VirtualSpaceNodes and delete
1009 // nodes with a 0 container_count. Remove Metachunks in
1010 // the node from their respective freelists.
1011 void VirtualSpaceList::purge() {
1012 assert_lock_strong(SpaceManager::expand_lock());
1013 // Don't use a VirtualSpaceListIterator because this
1014 // list is being changed and a straightforward use of an iterator is not safe.
1015 VirtualSpaceNode* purged_vsl = NULL;
1016 VirtualSpaceNode* prev_vsl = virtual_space_list();
1017 VirtualSpaceNode* next_vsl = prev_vsl;
1018 while (next_vsl != NULL) {
1019 VirtualSpaceNode* vsl = next_vsl;
1020 next_vsl = vsl->next();
1021 // Don't free the current virtual space since it will likely
1022 // be needed soon.
1023 if (vsl->container_count() == 0 && vsl != current_virtual_space()) {
1024 // Unlink it from the list
1025 if (prev_vsl == vsl) {
1026 // This is the case of the current note being the first note.
1027 assert(vsl == virtual_space_list(), "Expected to be the first note");
1028 set_virtual_space_list(vsl->next());
1029 } else {
1030 prev_vsl->set_next(vsl->next());
1031 }
1033 vsl->purge(chunk_manager());
1034 dec_virtual_space_total(vsl->reserved()->word_size());
1035 dec_virtual_space_count();
1036 purged_vsl = vsl;
1037 delete vsl;
1038 } else {
1039 prev_vsl = vsl;
1040 }
1041 }
1042 #ifdef ASSERT
1043 if (purged_vsl != NULL) {
1044 // List should be stable enough to use an iterator here.
1045 VirtualSpaceListIterator iter(virtual_space_list());
1046 while (iter.repeat()) {
1047 VirtualSpaceNode* vsl = iter.get_next();
1048 assert(vsl != purged_vsl, "Purge of vsl failed");
1049 }
1050 }
1051 #endif
1052 }
1054 size_t VirtualSpaceList::used_words_sum() {
1055 size_t allocated_by_vs = 0;
1056 VirtualSpaceListIterator iter(virtual_space_list());
1057 while (iter.repeat()) {
1058 VirtualSpaceNode* vsl = iter.get_next();
1059 // Sum used region [bottom, top) in each virtualspace
1060 allocated_by_vs += vsl->used_words_in_vs();
1061 }
1062 assert(allocated_by_vs >= chunk_manager()->free_chunks_total(),
1063 err_msg("Total in free chunks " SIZE_FORMAT
1064 " greater than total from virtual_spaces " SIZE_FORMAT,
1065 allocated_by_vs, chunk_manager()->free_chunks_total()));
1066 size_t used =
1067 allocated_by_vs - chunk_manager()->free_chunks_total();
1068 return used;
1069 }
1071 // Space available in all MetadataVirtualspaces allocated
1072 // for metadata. This is the upper limit on the capacity
1073 // of chunks allocated out of all the MetadataVirtualspaces.
1074 size_t VirtualSpaceList::capacity_words_sum() {
1075 size_t capacity = 0;
1076 VirtualSpaceListIterator iter(virtual_space_list());
1077 while (iter.repeat()) {
1078 VirtualSpaceNode* vsl = iter.get_next();
1079 capacity += vsl->capacity_words_in_vs();
1080 }
1081 return capacity;
1082 }
1084 VirtualSpaceList::VirtualSpaceList(size_t word_size ) :
1085 _is_class(false),
1086 _virtual_space_list(NULL),
1087 _current_virtual_space(NULL),
1088 _virtual_space_total(0),
1089 _virtual_space_count(0) {
1090 MutexLockerEx cl(SpaceManager::expand_lock(),
1091 Mutex::_no_safepoint_check_flag);
1092 bool initialization_succeeded = grow_vs(word_size);
1094 _chunk_manager.free_chunks(SpecializedIndex)->set_size(SpecializedChunk);
1095 _chunk_manager.free_chunks(SmallIndex)->set_size(SmallChunk);
1096 _chunk_manager.free_chunks(MediumIndex)->set_size(MediumChunk);
1097 assert(initialization_succeeded,
1098 " VirtualSpaceList initialization should not fail");
1099 }
1101 VirtualSpaceList::VirtualSpaceList(ReservedSpace rs) :
1102 _is_class(true),
1103 _virtual_space_list(NULL),
1104 _current_virtual_space(NULL),
1105 _virtual_space_total(0),
1106 _virtual_space_count(0) {
1107 MutexLockerEx cl(SpaceManager::expand_lock(),
1108 Mutex::_no_safepoint_check_flag);
1109 VirtualSpaceNode* class_entry = new VirtualSpaceNode(rs);
1110 bool succeeded = class_entry->initialize();
1111 _chunk_manager.free_chunks(SpecializedIndex)->set_size(SpecializedChunk);
1112 _chunk_manager.free_chunks(SmallIndex)->set_size(ClassSmallChunk);
1113 _chunk_manager.free_chunks(MediumIndex)->set_size(ClassMediumChunk);
1114 assert(succeeded, " VirtualSpaceList initialization should not fail");
1115 link_vs(class_entry, rs.size()/BytesPerWord);
1116 }
1118 size_t VirtualSpaceList::free_bytes() {
1119 return virtual_space_list()->free_words_in_vs() * BytesPerWord;
1120 }
1122 // Allocate another meta virtual space and add it to the list.
1123 bool VirtualSpaceList::grow_vs(size_t vs_word_size) {
1124 assert_lock_strong(SpaceManager::expand_lock());
1125 if (vs_word_size == 0) {
1126 return false;
1127 }
1128 // Reserve the space
1129 size_t vs_byte_size = vs_word_size * BytesPerWord;
1130 assert(vs_byte_size % os::vm_page_size() == 0, "Not aligned");
1132 // Allocate the meta virtual space and initialize it.
1133 VirtualSpaceNode* new_entry = new VirtualSpaceNode(vs_byte_size);
1134 if (!new_entry->initialize()) {
1135 delete new_entry;
1136 return false;
1137 } else {
1138 // ensure lock-free iteration sees fully initialized node
1139 OrderAccess::storestore();
1140 link_vs(new_entry, vs_word_size);
1141 return true;
1142 }
1143 }
1145 void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry, size_t vs_word_size) {
1146 if (virtual_space_list() == NULL) {
1147 set_virtual_space_list(new_entry);
1148 } else {
1149 current_virtual_space()->set_next(new_entry);
1150 }
1151 set_current_virtual_space(new_entry);
1152 inc_virtual_space_total(vs_word_size);
1153 inc_virtual_space_count();
1154 #ifdef ASSERT
1155 new_entry->mangle();
1156 #endif
1157 if (TraceMetavirtualspaceAllocation && Verbose) {
1158 VirtualSpaceNode* vsl = current_virtual_space();
1159 vsl->print_on(tty);
1160 }
1161 }
1163 Metachunk* VirtualSpaceList::get_new_chunk(size_t word_size,
1164 size_t grow_chunks_by_words,
1165 size_t medium_chunk_bunch) {
1167 // Get a chunk from the chunk freelist
1168 Metachunk* next = chunk_manager()->chunk_freelist_allocate(grow_chunks_by_words);
1170 if (next != NULL) {
1171 next->container()->inc_container_count();
1172 } else {
1173 // Allocate a chunk out of the current virtual space.
1174 next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
1175 }
1177 if (next == NULL) {
1178 // Not enough room in current virtual space. Try to commit
1179 // more space.
1180 size_t expand_vs_by_words = MAX2(medium_chunk_bunch,
1181 grow_chunks_by_words);
1182 size_t page_size_words = os::vm_page_size() / BytesPerWord;
1183 size_t aligned_expand_vs_by_words = align_size_up(expand_vs_by_words,
1184 page_size_words);
1185 bool vs_expanded =
1186 current_virtual_space()->expand_by(aligned_expand_vs_by_words, false);
1187 if (!vs_expanded) {
1188 // Should the capacity of the metaspaces be expanded for
1189 // this allocation? If it's the virtual space for classes and is
1190 // being used for CompressedHeaders, don't allocate a new virtualspace.
1191 if (can_grow() && MetaspaceGC::should_expand(this, word_size)) {
1192 // Get another virtual space.
1193 size_t grow_vs_words =
1194 MAX2((size_t)VirtualSpaceSize, aligned_expand_vs_by_words);
1195 if (grow_vs(grow_vs_words)) {
1196 // Got it. It's on the list now. Get a chunk from it.
1197 next = current_virtual_space()->get_chunk_vs_with_expand(grow_chunks_by_words);
1198 }
1199 } else {
1200 // Allocation will fail and induce a GC
1201 if (TraceMetadataChunkAllocation && Verbose) {
1202 gclog_or_tty->print_cr("VirtualSpaceList::get_new_chunk():"
1203 " Fail instead of expand the metaspace");
1204 }
1205 }
1206 } else {
1207 // The virtual space expanded, get a new chunk
1208 next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
1209 assert(next != NULL, "Just expanded, should succeed");
1210 }
1211 }
1213 assert(next == NULL || (next->next() == NULL && next->prev() == NULL),
1214 "New chunk is still on some list");
1215 return next;
1216 }
1218 Metachunk* VirtualSpaceList::get_initialization_chunk(size_t chunk_word_size,
1219 size_t chunk_bunch) {
1220 // Get a chunk from the chunk freelist
1221 Metachunk* new_chunk = get_new_chunk(chunk_word_size,
1222 chunk_word_size,
1223 chunk_bunch);
1224 return new_chunk;
1225 }
1227 void VirtualSpaceList::print_on(outputStream* st) const {
1228 if (TraceMetadataChunkAllocation && Verbose) {
1229 VirtualSpaceListIterator iter(virtual_space_list());
1230 while (iter.repeat()) {
1231 VirtualSpaceNode* node = iter.get_next();
1232 node->print_on(st);
1233 }
1234 }
1235 }
1237 bool VirtualSpaceList::contains(const void *ptr) {
1238 VirtualSpaceNode* list = virtual_space_list();
1239 VirtualSpaceListIterator iter(list);
1240 while (iter.repeat()) {
1241 VirtualSpaceNode* node = iter.get_next();
1242 if (node->reserved()->contains(ptr)) {
1243 return true;
1244 }
1245 }
1246 return false;
1247 }
1250 // MetaspaceGC methods
1252 // VM_CollectForMetadataAllocation is the vm operation used to GC.
1253 // Within the VM operation after the GC the attempt to allocate the metadata
1254 // should succeed. If the GC did not free enough space for the metaspace
1255 // allocation, the HWM is increased so that another virtualspace will be
1256 // allocated for the metadata. With perm gen the increase in the perm
1257 // gen had bounds, MinMetaspaceExpansion and MaxMetaspaceExpansion. The
1258 // metaspace policy uses those as the small and large steps for the HWM.
1259 //
1260 // After the GC the compute_new_size() for MetaspaceGC is called to
1261 // resize the capacity of the metaspaces. The current implementation
1262 // is based on the flags MinMetaspaceFreeRatio and MaxMetaspaceFreeRatio used
1263 // to resize the Java heap by some GC's. New flags can be implemented
1264 // if really needed. MinMetaspaceFreeRatio is used to calculate how much
1265 // free space is desirable in the metaspace capacity to decide how much
1266 // to increase the HWM. MaxMetaspaceFreeRatio is used to decide how much
1267 // free space is desirable in the metaspace capacity before decreasing
1268 // the HWM.
1270 // Calculate the amount to increase the high water mark (HWM).
1271 // Increase by a minimum amount (MinMetaspaceExpansion) so that
1272 // another expansion is not requested too soon. If that is not
1273 // enough to satisfy the allocation (i.e. big enough for a word_size
1274 // allocation), increase by MaxMetaspaceExpansion. If that is still
1275 // not enough, expand by the size of the allocation (word_size) plus
1276 // some.
1277 size_t MetaspaceGC::delta_capacity_until_GC(size_t word_size) {
1278 size_t before_inc = MetaspaceGC::capacity_until_GC();
1279 size_t min_delta_words = MinMetaspaceExpansion / BytesPerWord;
1280 size_t max_delta_words = MaxMetaspaceExpansion / BytesPerWord;
1281 size_t page_size_words = os::vm_page_size() / BytesPerWord;
1282 size_t size_delta_words = align_size_up(word_size, page_size_words);
1283 size_t delta_words = MAX2(size_delta_words, min_delta_words);
1284 if (delta_words > min_delta_words) {
1285 // Don't want to hit the high water mark on the next
1286 // allocation so make the delta greater than just enough
1287 // for this allocation.
1288 delta_words = MAX2(delta_words, max_delta_words);
1289 if (delta_words > max_delta_words) {
1290 // This allocation is large but the next ones are probably not
1291 // so increase by the minimum.
1292 delta_words = delta_words + min_delta_words;
1293 }
1294 }
1295 return delta_words;
1296 }
1298 bool MetaspaceGC::should_expand(VirtualSpaceList* vsl, size_t word_size) {
1300 // If the user wants a limit, impose one.
1301 // The reason for someone using this flag is to limit reserved space. So
1302 // for non-class virtual space, compare against virtual spaces that are reserved.
1303 // For class virtual space, we only compare against the committed space, not
1304 // reserved space, because this is a larger space prereserved for compressed
1305 // class pointers.
1306 if (!FLAG_IS_DEFAULT(MaxMetaspaceSize)) {
1307 size_t real_allocated = Metaspace::space_list()->virtual_space_total() +
1308 MetaspaceAux::allocated_capacity_bytes(Metaspace::ClassType);
1309 if (real_allocated >= MaxMetaspaceSize) {
1310 return false;
1311 }
1312 }
1314 // Class virtual space should always be expanded. Call GC for the other
1315 // metadata virtual space.
1316 if (vsl == Metaspace::class_space_list()) return true;
1318 // If this is part of an allocation after a GC, expand
1319 // unconditionally.
1320 if (MetaspaceGC::expand_after_GC()) {
1321 return true;
1322 }
1325 // If the capacity is below the minimum capacity, allow the
1326 // expansion. Also set the high-water-mark (capacity_until_GC)
1327 // to that minimum capacity so that a GC will not be induced
1328 // until that minimum capacity is exceeded.
1329 size_t committed_capacity_bytes = MetaspaceAux::allocated_capacity_bytes();
1330 size_t metaspace_size_bytes = MetaspaceSize;
1331 if (committed_capacity_bytes < metaspace_size_bytes ||
1332 capacity_until_GC() == 0) {
1333 set_capacity_until_GC(metaspace_size_bytes);
1334 return true;
1335 } else {
1336 if (committed_capacity_bytes < capacity_until_GC()) {
1337 return true;
1338 } else {
1339 if (TraceMetadataChunkAllocation && Verbose) {
1340 gclog_or_tty->print_cr(" allocation request size " SIZE_FORMAT
1341 " capacity_until_GC " SIZE_FORMAT
1342 " allocated_capacity_bytes " SIZE_FORMAT,
1343 word_size,
1344 capacity_until_GC(),
1345 MetaspaceAux::allocated_capacity_bytes());
1346 }
1347 return false;
1348 }
1349 }
1350 }
1354 void MetaspaceGC::compute_new_size() {
1355 assert(_shrink_factor <= 100, "invalid shrink factor");
1356 uint current_shrink_factor = _shrink_factor;
1357 _shrink_factor = 0;
1359 // Until a faster way of calculating the "used" quantity is implemented,
1360 // use "capacity".
1361 const size_t used_after_gc = MetaspaceAux::allocated_capacity_bytes();
1362 const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC();
1364 const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0;
1365 const double maximum_used_percentage = 1.0 - minimum_free_percentage;
1367 const double min_tmp = used_after_gc / maximum_used_percentage;
1368 size_t minimum_desired_capacity =
1369 (size_t)MIN2(min_tmp, double(max_uintx));
1370 // Don't shrink less than the initial generation size
1371 minimum_desired_capacity = MAX2(minimum_desired_capacity,
1372 MetaspaceSize);
1374 if (PrintGCDetails && Verbose) {
1375 gclog_or_tty->print_cr("\nMetaspaceGC::compute_new_size: ");
1376 gclog_or_tty->print_cr(" "
1377 " minimum_free_percentage: %6.2f"
1378 " maximum_used_percentage: %6.2f",
1379 minimum_free_percentage,
1380 maximum_used_percentage);
1381 gclog_or_tty->print_cr(" "
1382 " used_after_gc : %6.1fKB",
1383 used_after_gc / (double) K);
1384 }
1387 size_t shrink_bytes = 0;
1388 if (capacity_until_GC < minimum_desired_capacity) {
1389 // If we have less capacity below the metaspace HWM, then
1390 // increment the HWM.
1391 size_t expand_bytes = minimum_desired_capacity - capacity_until_GC;
1392 // Don't expand unless it's significant
1393 if (expand_bytes >= MinMetaspaceExpansion) {
1394 MetaspaceGC::set_capacity_until_GC(capacity_until_GC + expand_bytes);
1395 }
1396 if (PrintGCDetails && Verbose) {
1397 size_t new_capacity_until_GC = capacity_until_GC;
1398 gclog_or_tty->print_cr(" expanding:"
1399 " minimum_desired_capacity: %6.1fKB"
1400 " expand_bytes: %6.1fKB"
1401 " MinMetaspaceExpansion: %6.1fKB"
1402 " new metaspace HWM: %6.1fKB",
1403 minimum_desired_capacity / (double) K,
1404 expand_bytes / (double) K,
1405 MinMetaspaceExpansion / (double) K,
1406 new_capacity_until_GC / (double) K);
1407 }
1408 return;
1409 }
1411 // No expansion, now see if we want to shrink
1412 // We would never want to shrink more than this
1413 size_t max_shrink_bytes = capacity_until_GC - minimum_desired_capacity;
1414 assert(max_shrink_bytes >= 0, err_msg("max_shrink_bytes " SIZE_FORMAT,
1415 max_shrink_bytes));
1417 // Should shrinking be considered?
1418 if (MaxMetaspaceFreeRatio < 100) {
1419 const double maximum_free_percentage = MaxMetaspaceFreeRatio / 100.0;
1420 const double minimum_used_percentage = 1.0 - maximum_free_percentage;
1421 const double max_tmp = used_after_gc / minimum_used_percentage;
1422 size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx));
1423 maximum_desired_capacity = MAX2(maximum_desired_capacity,
1424 MetaspaceSize);
1425 if (PrintGCDetails && Verbose) {
1426 gclog_or_tty->print_cr(" "
1427 " maximum_free_percentage: %6.2f"
1428 " minimum_used_percentage: %6.2f",
1429 maximum_free_percentage,
1430 minimum_used_percentage);
1431 gclog_or_tty->print_cr(" "
1432 " minimum_desired_capacity: %6.1fKB"
1433 " maximum_desired_capacity: %6.1fKB",
1434 minimum_desired_capacity / (double) K,
1435 maximum_desired_capacity / (double) K);
1436 }
1438 assert(minimum_desired_capacity <= maximum_desired_capacity,
1439 "sanity check");
1441 if (capacity_until_GC > maximum_desired_capacity) {
1442 // Capacity too large, compute shrinking size
1443 shrink_bytes = capacity_until_GC - maximum_desired_capacity;
1444 // We don't want shrink all the way back to initSize if people call
1445 // System.gc(), because some programs do that between "phases" and then
1446 // we'd just have to grow the heap up again for the next phase. So we
1447 // damp the shrinking: 0% on the first call, 10% on the second call, 40%
1448 // on the third call, and 100% by the fourth call. But if we recompute
1449 // size without shrinking, it goes back to 0%.
1450 shrink_bytes = shrink_bytes / 100 * current_shrink_factor;
1451 assert(shrink_bytes <= max_shrink_bytes,
1452 err_msg("invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT,
1453 shrink_bytes, max_shrink_bytes));
1454 if (current_shrink_factor == 0) {
1455 _shrink_factor = 10;
1456 } else {
1457 _shrink_factor = MIN2(current_shrink_factor * 4, (uint) 100);
1458 }
1459 if (PrintGCDetails && Verbose) {
1460 gclog_or_tty->print_cr(" "
1461 " shrinking:"
1462 " initSize: %.1fK"
1463 " maximum_desired_capacity: %.1fK",
1464 MetaspaceSize / (double) K,
1465 maximum_desired_capacity / (double) K);
1466 gclog_or_tty->print_cr(" "
1467 " shrink_bytes: %.1fK"
1468 " current_shrink_factor: %d"
1469 " new shrink factor: %d"
1470 " MinMetaspaceExpansion: %.1fK",
1471 shrink_bytes / (double) K,
1472 current_shrink_factor,
1473 _shrink_factor,
1474 MinMetaspaceExpansion / (double) K);
1475 }
1476 }
1477 }
1479 // Don't shrink unless it's significant
1480 if (shrink_bytes >= MinMetaspaceExpansion &&
1481 ((capacity_until_GC - shrink_bytes) >= MetaspaceSize)) {
1482 MetaspaceGC::set_capacity_until_GC(capacity_until_GC - shrink_bytes);
1483 }
1484 }
1486 // Metadebug methods
1488 void Metadebug::deallocate_chunk_a_lot(SpaceManager* sm,
1489 size_t chunk_word_size){
1490 #ifdef ASSERT
1491 VirtualSpaceList* vsl = sm->vs_list();
1492 if (MetaDataDeallocateALot &&
1493 Metadebug::deallocate_chunk_a_lot_count() % MetaDataDeallocateALotInterval == 0 ) {
1494 Metadebug::reset_deallocate_chunk_a_lot_count();
1495 for (uint i = 0; i < metadata_deallocate_a_lock_chunk; i++) {
1496 Metachunk* dummy_chunk = vsl->current_virtual_space()->take_from_committed(chunk_word_size);
1497 if (dummy_chunk == NULL) {
1498 break;
1499 }
1500 vsl->chunk_manager()->chunk_freelist_deallocate(dummy_chunk);
1502 if (TraceMetadataChunkAllocation && Verbose) {
1503 gclog_or_tty->print("Metadebug::deallocate_chunk_a_lot: %d) ",
1504 sm->sum_count_in_chunks_in_use());
1505 dummy_chunk->print_on(gclog_or_tty);
1506 gclog_or_tty->print_cr(" Free chunks total %d count %d",
1507 vsl->chunk_manager()->free_chunks_total(),
1508 vsl->chunk_manager()->free_chunks_count());
1509 }
1510 }
1511 } else {
1512 Metadebug::inc_deallocate_chunk_a_lot_count();
1513 }
1514 #endif
1515 }
1517 void Metadebug::deallocate_block_a_lot(SpaceManager* sm,
1518 size_t raw_word_size){
1519 #ifdef ASSERT
1520 if (MetaDataDeallocateALot &&
1521 Metadebug::deallocate_block_a_lot_count() % MetaDataDeallocateALotInterval == 0 ) {
1522 Metadebug::set_deallocate_block_a_lot_count(0);
1523 for (uint i = 0; i < metadata_deallocate_a_lot_block; i++) {
1524 MetaWord* dummy_block = sm->allocate_work(raw_word_size);
1525 if (dummy_block == 0) {
1526 break;
1527 }
1528 sm->deallocate(dummy_block, raw_word_size);
1529 }
1530 } else {
1531 Metadebug::inc_deallocate_block_a_lot_count();
1532 }
1533 #endif
1534 }
1536 void Metadebug::init_allocation_fail_alot_count() {
1537 if (MetadataAllocationFailALot) {
1538 _allocation_fail_alot_count =
1539 1+(long)((double)MetadataAllocationFailALotInterval*os::random()/(max_jint+1.0));
1540 }
1541 }
1543 #ifdef ASSERT
1544 bool Metadebug::test_metadata_failure() {
1545 if (MetadataAllocationFailALot &&
1546 Threads::is_vm_complete()) {
1547 if (_allocation_fail_alot_count > 0) {
1548 _allocation_fail_alot_count--;
1549 } else {
1550 if (TraceMetadataChunkAllocation && Verbose) {
1551 gclog_or_tty->print_cr("Metadata allocation failing for "
1552 "MetadataAllocationFailALot");
1553 }
1554 init_allocation_fail_alot_count();
1555 return true;
1556 }
1557 }
1558 return false;
1559 }
1560 #endif
1562 // ChunkManager methods
1564 size_t ChunkManager::free_chunks_total() {
1565 return _free_chunks_total;
1566 }
1568 size_t ChunkManager::free_chunks_total_in_bytes() {
1569 return free_chunks_total() * BytesPerWord;
1570 }
1572 size_t ChunkManager::free_chunks_count() {
1573 #ifdef ASSERT
1574 if (!UseConcMarkSweepGC && !SpaceManager::expand_lock()->is_locked()) {
1575 MutexLockerEx cl(SpaceManager::expand_lock(),
1576 Mutex::_no_safepoint_check_flag);
1577 // This lock is only needed in debug because the verification
1578 // of the _free_chunks_totals walks the list of free chunks
1579 slow_locked_verify_free_chunks_count();
1580 }
1581 #endif
1582 return _free_chunks_count;
1583 }
1585 void ChunkManager::locked_verify_free_chunks_total() {
1586 assert_lock_strong(SpaceManager::expand_lock());
1587 assert(sum_free_chunks() == _free_chunks_total,
1588 err_msg("_free_chunks_total " SIZE_FORMAT " is not the"
1589 " same as sum " SIZE_FORMAT, _free_chunks_total,
1590 sum_free_chunks()));
1591 }
1593 void ChunkManager::verify_free_chunks_total() {
1594 MutexLockerEx cl(SpaceManager::expand_lock(),
1595 Mutex::_no_safepoint_check_flag);
1596 locked_verify_free_chunks_total();
1597 }
1599 void ChunkManager::locked_verify_free_chunks_count() {
1600 assert_lock_strong(SpaceManager::expand_lock());
1601 assert(sum_free_chunks_count() == _free_chunks_count,
1602 err_msg("_free_chunks_count " SIZE_FORMAT " is not the"
1603 " same as sum " SIZE_FORMAT, _free_chunks_count,
1604 sum_free_chunks_count()));
1605 }
1607 void ChunkManager::verify_free_chunks_count() {
1608 #ifdef ASSERT
1609 MutexLockerEx cl(SpaceManager::expand_lock(),
1610 Mutex::_no_safepoint_check_flag);
1611 locked_verify_free_chunks_count();
1612 #endif
1613 }
1615 void ChunkManager::verify() {
1616 MutexLockerEx cl(SpaceManager::expand_lock(),
1617 Mutex::_no_safepoint_check_flag);
1618 locked_verify();
1619 }
1621 void ChunkManager::locked_verify() {
1622 locked_verify_free_chunks_count();
1623 locked_verify_free_chunks_total();
1624 }
1626 void ChunkManager::locked_print_free_chunks(outputStream* st) {
1627 assert_lock_strong(SpaceManager::expand_lock());
1628 st->print_cr("Free chunk total " SIZE_FORMAT " count " SIZE_FORMAT,
1629 _free_chunks_total, _free_chunks_count);
1630 }
1632 void ChunkManager::locked_print_sum_free_chunks(outputStream* st) {
1633 assert_lock_strong(SpaceManager::expand_lock());
1634 st->print_cr("Sum free chunk total " SIZE_FORMAT " count " SIZE_FORMAT,
1635 sum_free_chunks(), sum_free_chunks_count());
1636 }
1637 ChunkList* ChunkManager::free_chunks(ChunkIndex index) {
1638 return &_free_chunks[index];
1639 }
1641 // These methods that sum the free chunk lists are used in printing
1642 // methods that are used in product builds.
1643 size_t ChunkManager::sum_free_chunks() {
1644 assert_lock_strong(SpaceManager::expand_lock());
1645 size_t result = 0;
1646 for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
1647 ChunkList* list = free_chunks(i);
1649 if (list == NULL) {
1650 continue;
1651 }
1653 result = result + list->count() * list->size();
1654 }
1655 result = result + humongous_dictionary()->total_size();
1656 return result;
1657 }
1659 size_t ChunkManager::sum_free_chunks_count() {
1660 assert_lock_strong(SpaceManager::expand_lock());
1661 size_t count = 0;
1662 for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
1663 ChunkList* list = free_chunks(i);
1664 if (list == NULL) {
1665 continue;
1666 }
1667 count = count + list->count();
1668 }
1669 count = count + humongous_dictionary()->total_free_blocks();
1670 return count;
1671 }
1673 ChunkList* ChunkManager::find_free_chunks_list(size_t word_size) {
1674 ChunkIndex index = list_index(word_size);
1675 assert(index < HumongousIndex, "No humongous list");
1676 return free_chunks(index);
1677 }
1679 void ChunkManager::free_chunks_put(Metachunk* chunk) {
1680 assert_lock_strong(SpaceManager::expand_lock());
1681 ChunkList* free_list = find_free_chunks_list(chunk->word_size());
1682 chunk->set_next(free_list->head());
1683 free_list->set_head(chunk);
1684 // chunk is being returned to the chunk free list
1685 inc_free_chunks_total(chunk->capacity_word_size());
1686 slow_locked_verify();
1687 }
1689 void ChunkManager::chunk_freelist_deallocate(Metachunk* chunk) {
1690 // The deallocation of a chunk originates in the freelist
1691 // manangement code for a Metaspace and does not hold the
1692 // lock.
1693 assert(chunk != NULL, "Deallocating NULL");
1694 assert_lock_strong(SpaceManager::expand_lock());
1695 slow_locked_verify();
1696 if (TraceMetadataChunkAllocation) {
1697 tty->print_cr("ChunkManager::chunk_freelist_deallocate: chunk "
1698 PTR_FORMAT " size " SIZE_FORMAT,
1699 chunk, chunk->word_size());
1700 }
1701 free_chunks_put(chunk);
1702 }
1704 Metachunk* ChunkManager::free_chunks_get(size_t word_size) {
1705 assert_lock_strong(SpaceManager::expand_lock());
1707 slow_locked_verify();
1709 Metachunk* chunk = NULL;
1710 if (list_index(word_size) != HumongousIndex) {
1711 ChunkList* free_list = find_free_chunks_list(word_size);
1712 assert(free_list != NULL, "Sanity check");
1714 chunk = free_list->head();
1715 debug_only(Metachunk* debug_head = chunk;)
1717 if (chunk == NULL) {
1718 return NULL;
1719 }
1721 // Remove the chunk as the head of the list.
1722 free_list->remove_chunk(chunk);
1724 // Chunk is being removed from the chunks free list.
1725 dec_free_chunks_total(chunk->capacity_word_size());
1727 if (TraceMetadataChunkAllocation && Verbose) {
1728 tty->print_cr("ChunkManager::free_chunks_get: free_list "
1729 PTR_FORMAT " head " PTR_FORMAT " size " SIZE_FORMAT,
1730 free_list, chunk, chunk->word_size());
1731 }
1732 } else {
1733 chunk = humongous_dictionary()->get_chunk(
1734 word_size,
1735 FreeBlockDictionary<Metachunk>::atLeast);
1737 if (chunk != NULL) {
1738 if (TraceMetadataHumongousAllocation) {
1739 size_t waste = chunk->word_size() - word_size;
1740 tty->print_cr("Free list allocate humongous chunk size " SIZE_FORMAT
1741 " for requested size " SIZE_FORMAT
1742 " waste " SIZE_FORMAT,
1743 chunk->word_size(), word_size, waste);
1744 }
1745 // Chunk is being removed from the chunks free list.
1746 dec_free_chunks_total(chunk->capacity_word_size());
1747 } else {
1748 return NULL;
1749 }
1750 }
1752 // Remove it from the links to this freelist
1753 chunk->set_next(NULL);
1754 chunk->set_prev(NULL);
1755 #ifdef ASSERT
1756 // Chunk is no longer on any freelist. Setting to false make container_count_slow()
1757 // work.
1758 chunk->set_is_free(false);
1759 #endif
1760 slow_locked_verify();
1761 return chunk;
1762 }
1764 Metachunk* ChunkManager::chunk_freelist_allocate(size_t word_size) {
1765 assert_lock_strong(SpaceManager::expand_lock());
1766 slow_locked_verify();
1768 // Take from the beginning of the list
1769 Metachunk* chunk = free_chunks_get(word_size);
1770 if (chunk == NULL) {
1771 return NULL;
1772 }
1774 assert((word_size <= chunk->word_size()) ||
1775 list_index(chunk->word_size() == HumongousIndex),
1776 "Non-humongous variable sized chunk");
1777 if (TraceMetadataChunkAllocation) {
1778 size_t list_count;
1779 if (list_index(word_size) < HumongousIndex) {
1780 ChunkList* list = find_free_chunks_list(word_size);
1781 list_count = list->count();
1782 } else {
1783 list_count = humongous_dictionary()->total_count();
1784 }
1785 tty->print("ChunkManager::chunk_freelist_allocate: " PTR_FORMAT " chunk "
1786 PTR_FORMAT " size " SIZE_FORMAT " count " SIZE_FORMAT " ",
1787 this, chunk, chunk->word_size(), list_count);
1788 locked_print_free_chunks(tty);
1789 }
1791 return chunk;
1792 }
1794 void ChunkManager::print_on(outputStream* out) {
1795 if (PrintFLSStatistics != 0) {
1796 humongous_dictionary()->report_statistics();
1797 }
1798 }
1800 // SpaceManager methods
1802 void SpaceManager::get_initial_chunk_sizes(Metaspace::MetaspaceType type,
1803 size_t* chunk_word_size,
1804 size_t* class_chunk_word_size) {
1805 switch (type) {
1806 case Metaspace::BootMetaspaceType:
1807 *chunk_word_size = Metaspace::first_chunk_word_size();
1808 *class_chunk_word_size = Metaspace::first_class_chunk_word_size();
1809 break;
1810 case Metaspace::ROMetaspaceType:
1811 *chunk_word_size = SharedReadOnlySize / wordSize;
1812 *class_chunk_word_size = ClassSpecializedChunk;
1813 break;
1814 case Metaspace::ReadWriteMetaspaceType:
1815 *chunk_word_size = SharedReadWriteSize / wordSize;
1816 *class_chunk_word_size = ClassSpecializedChunk;
1817 break;
1818 case Metaspace::AnonymousMetaspaceType:
1819 case Metaspace::ReflectionMetaspaceType:
1820 *chunk_word_size = SpecializedChunk;
1821 *class_chunk_word_size = ClassSpecializedChunk;
1822 break;
1823 default:
1824 *chunk_word_size = SmallChunk;
1825 *class_chunk_word_size = ClassSmallChunk;
1826 break;
1827 }
1828 assert(*chunk_word_size != 0 && *class_chunk_word_size != 0,
1829 err_msg("Initial chunks sizes bad: data " SIZE_FORMAT
1830 " class " SIZE_FORMAT,
1831 *chunk_word_size, *class_chunk_word_size));
1832 }
1834 size_t SpaceManager::sum_free_in_chunks_in_use() const {
1835 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
1836 size_t free = 0;
1837 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1838 Metachunk* chunk = chunks_in_use(i);
1839 while (chunk != NULL) {
1840 free += chunk->free_word_size();
1841 chunk = chunk->next();
1842 }
1843 }
1844 return free;
1845 }
1847 size_t SpaceManager::sum_waste_in_chunks_in_use() const {
1848 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
1849 size_t result = 0;
1850 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1851 result += sum_waste_in_chunks_in_use(i);
1852 }
1854 return result;
1855 }
1857 size_t SpaceManager::sum_waste_in_chunks_in_use(ChunkIndex index) const {
1858 size_t result = 0;
1859 Metachunk* chunk = chunks_in_use(index);
1860 // Count the free space in all the chunk but not the
1861 // current chunk from which allocations are still being done.
1862 while (chunk != NULL) {
1863 if (chunk != current_chunk()) {
1864 result += chunk->free_word_size();
1865 }
1866 chunk = chunk->next();
1867 }
1868 return result;
1869 }
1871 size_t SpaceManager::sum_capacity_in_chunks_in_use() const {
1872 // For CMS use "allocated_chunks_words()" which does not need the
1873 // Metaspace lock. For the other collectors sum over the
1874 // lists. Use both methods as a check that "allocated_chunks_words()"
1875 // is correct. That is, sum_capacity_in_chunks() is too expensive
1876 // to use in the product and allocated_chunks_words() should be used
1877 // but allow for checking that allocated_chunks_words() returns the same
1878 // value as sum_capacity_in_chunks_in_use() which is the definitive
1879 // answer.
1880 if (UseConcMarkSweepGC) {
1881 return allocated_chunks_words();
1882 } else {
1883 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
1884 size_t sum = 0;
1885 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1886 Metachunk* chunk = chunks_in_use(i);
1887 while (chunk != NULL) {
1888 sum += chunk->capacity_word_size();
1889 chunk = chunk->next();
1890 }
1891 }
1892 return sum;
1893 }
1894 }
1896 size_t SpaceManager::sum_count_in_chunks_in_use() {
1897 size_t count = 0;
1898 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1899 count = count + sum_count_in_chunks_in_use(i);
1900 }
1902 return count;
1903 }
1905 size_t SpaceManager::sum_count_in_chunks_in_use(ChunkIndex i) {
1906 size_t count = 0;
1907 Metachunk* chunk = chunks_in_use(i);
1908 while (chunk != NULL) {
1909 count++;
1910 chunk = chunk->next();
1911 }
1912 return count;
1913 }
1916 size_t SpaceManager::sum_used_in_chunks_in_use() const {
1917 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
1918 size_t used = 0;
1919 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1920 Metachunk* chunk = chunks_in_use(i);
1921 while (chunk != NULL) {
1922 used += chunk->used_word_size();
1923 chunk = chunk->next();
1924 }
1925 }
1926 return used;
1927 }
1929 void SpaceManager::locked_print_chunks_in_use_on(outputStream* st) const {
1931 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1932 Metachunk* chunk = chunks_in_use(i);
1933 st->print("SpaceManager: %s " PTR_FORMAT,
1934 chunk_size_name(i), chunk);
1935 if (chunk != NULL) {
1936 st->print_cr(" free " SIZE_FORMAT,
1937 chunk->free_word_size());
1938 } else {
1939 st->print_cr("");
1940 }
1941 }
1943 vs_list()->chunk_manager()->locked_print_free_chunks(st);
1944 vs_list()->chunk_manager()->locked_print_sum_free_chunks(st);
1945 }
1947 size_t SpaceManager::calc_chunk_size(size_t word_size) {
1949 // Decide between a small chunk and a medium chunk. Up to
1950 // _small_chunk_limit small chunks can be allocated but
1951 // once a medium chunk has been allocated, no more small
1952 // chunks will be allocated.
1953 size_t chunk_word_size;
1954 if (chunks_in_use(MediumIndex) == NULL &&
1955 sum_count_in_chunks_in_use(SmallIndex) < _small_chunk_limit) {
1956 chunk_word_size = (size_t) small_chunk_size();
1957 if (word_size + Metachunk::overhead() > small_chunk_size()) {
1958 chunk_word_size = medium_chunk_size();
1959 }
1960 } else {
1961 chunk_word_size = medium_chunk_size();
1962 }
1964 // Might still need a humongous chunk. Enforce an
1965 // eight word granularity to facilitate reuse (some
1966 // wastage but better chance of reuse).
1967 size_t if_humongous_sized_chunk =
1968 align_size_up(word_size + Metachunk::overhead(),
1969 HumongousChunkGranularity);
1970 chunk_word_size =
1971 MAX2((size_t) chunk_word_size, if_humongous_sized_chunk);
1973 assert(!SpaceManager::is_humongous(word_size) ||
1974 chunk_word_size == if_humongous_sized_chunk,
1975 err_msg("Size calculation is wrong, word_size " SIZE_FORMAT
1976 " chunk_word_size " SIZE_FORMAT,
1977 word_size, chunk_word_size));
1978 if (TraceMetadataHumongousAllocation &&
1979 SpaceManager::is_humongous(word_size)) {
1980 gclog_or_tty->print_cr("Metadata humongous allocation:");
1981 gclog_or_tty->print_cr(" word_size " PTR_FORMAT, word_size);
1982 gclog_or_tty->print_cr(" chunk_word_size " PTR_FORMAT,
1983 chunk_word_size);
1984 gclog_or_tty->print_cr(" chunk overhead " PTR_FORMAT,
1985 Metachunk::overhead());
1986 }
1987 return chunk_word_size;
1988 }
1990 MetaWord* SpaceManager::grow_and_allocate(size_t word_size) {
1991 assert(vs_list()->current_virtual_space() != NULL,
1992 "Should have been set");
1993 assert(current_chunk() == NULL ||
1994 current_chunk()->allocate(word_size) == NULL,
1995 "Don't need to expand");
1996 MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
1998 if (TraceMetadataChunkAllocation && Verbose) {
1999 size_t words_left = 0;
2000 size_t words_used = 0;
2001 if (current_chunk() != NULL) {
2002 words_left = current_chunk()->free_word_size();
2003 words_used = current_chunk()->used_word_size();
2004 }
2005 gclog_or_tty->print_cr("SpaceManager::grow_and_allocate for " SIZE_FORMAT
2006 " words " SIZE_FORMAT " words used " SIZE_FORMAT
2007 " words left",
2008 word_size, words_used, words_left);
2009 }
2011 // Get another chunk out of the virtual space
2012 size_t grow_chunks_by_words = calc_chunk_size(word_size);
2013 Metachunk* next = get_new_chunk(word_size, grow_chunks_by_words);
2015 // If a chunk was available, add it to the in-use chunk list
2016 // and do an allocation from it.
2017 if (next != NULL) {
2018 Metadebug::deallocate_chunk_a_lot(this, grow_chunks_by_words);
2019 // Add to this manager's list of chunks in use.
2020 add_chunk(next, false);
2021 return next->allocate(word_size);
2022 }
2023 return NULL;
2024 }
2026 void SpaceManager::print_on(outputStream* st) const {
2028 for (ChunkIndex i = ZeroIndex;
2029 i < NumberOfInUseLists ;
2030 i = next_chunk_index(i) ) {
2031 st->print_cr(" chunks_in_use " PTR_FORMAT " chunk size " PTR_FORMAT,
2032 chunks_in_use(i),
2033 chunks_in_use(i) == NULL ? 0 : chunks_in_use(i)->word_size());
2034 }
2035 st->print_cr(" waste: Small " SIZE_FORMAT " Medium " SIZE_FORMAT
2036 " Humongous " SIZE_FORMAT,
2037 sum_waste_in_chunks_in_use(SmallIndex),
2038 sum_waste_in_chunks_in_use(MediumIndex),
2039 sum_waste_in_chunks_in_use(HumongousIndex));
2040 // block free lists
2041 if (block_freelists() != NULL) {
2042 st->print_cr("total in block free lists " SIZE_FORMAT,
2043 block_freelists()->total_size());
2044 }
2045 }
2047 SpaceManager::SpaceManager(Metaspace::MetadataType mdtype,
2048 Mutex* lock,
2049 VirtualSpaceList* vs_list) :
2050 _vs_list(vs_list),
2051 _mdtype(mdtype),
2052 _allocated_blocks_words(0),
2053 _allocated_chunks_words(0),
2054 _allocated_chunks_count(0),
2055 _lock(lock)
2056 {
2057 initialize();
2058 }
2060 void SpaceManager::inc_size_metrics(size_t words) {
2061 assert_lock_strong(SpaceManager::expand_lock());
2062 // Total of allocated Metachunks and allocated Metachunks count
2063 // for each SpaceManager
2064 _allocated_chunks_words = _allocated_chunks_words + words;
2065 _allocated_chunks_count++;
2066 // Global total of capacity in allocated Metachunks
2067 MetaspaceAux::inc_capacity(mdtype(), words);
2068 // Global total of allocated Metablocks.
2069 // used_words_slow() includes the overhead in each
2070 // Metachunk so include it in the used when the
2071 // Metachunk is first added (so only added once per
2072 // Metachunk).
2073 MetaspaceAux::inc_used(mdtype(), Metachunk::overhead());
2074 }
2076 void SpaceManager::inc_used_metrics(size_t words) {
2077 // Add to the per SpaceManager total
2078 Atomic::add_ptr(words, &_allocated_blocks_words);
2079 // Add to the global total
2080 MetaspaceAux::inc_used(mdtype(), words);
2081 }
2083 void SpaceManager::dec_total_from_size_metrics() {
2084 MetaspaceAux::dec_capacity(mdtype(), allocated_chunks_words());
2085 MetaspaceAux::dec_used(mdtype(), allocated_blocks_words());
2086 // Also deduct the overhead per Metachunk
2087 MetaspaceAux::dec_used(mdtype(), allocated_chunks_count() * Metachunk::overhead());
2088 }
2090 void SpaceManager::initialize() {
2091 Metadebug::init_allocation_fail_alot_count();
2092 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2093 _chunks_in_use[i] = NULL;
2094 }
2095 _current_chunk = NULL;
2096 if (TraceMetadataChunkAllocation && Verbose) {
2097 gclog_or_tty->print_cr("SpaceManager(): " PTR_FORMAT, this);
2098 }
2099 }
2101 void ChunkManager::return_chunks(ChunkIndex index, Metachunk* chunks) {
2102 if (chunks == NULL) {
2103 return;
2104 }
2105 ChunkList* list = free_chunks(index);
2106 assert(list->size() == chunks->word_size(), "Mismatch in chunk sizes");
2107 assert_lock_strong(SpaceManager::expand_lock());
2108 Metachunk* cur = chunks;
2110 // This returns chunks one at a time. If a new
2111 // class List can be created that is a base class
2112 // of FreeList then something like FreeList::prepend()
2113 // can be used in place of this loop
2114 while (cur != NULL) {
2115 assert(cur->container() != NULL, "Container should have been set");
2116 cur->container()->dec_container_count();
2117 // Capture the next link before it is changed
2118 // by the call to return_chunk_at_head();
2119 Metachunk* next = cur->next();
2120 cur->set_is_free(true);
2121 list->return_chunk_at_head(cur);
2122 cur = next;
2123 }
2124 }
2126 SpaceManager::~SpaceManager() {
2127 // This call this->_lock which can't be done while holding expand_lock()
2128 assert(sum_capacity_in_chunks_in_use() == allocated_chunks_words(),
2129 err_msg("sum_capacity_in_chunks_in_use() " SIZE_FORMAT
2130 " allocated_chunks_words() " SIZE_FORMAT,
2131 sum_capacity_in_chunks_in_use(), allocated_chunks_words()));
2133 MutexLockerEx fcl(SpaceManager::expand_lock(),
2134 Mutex::_no_safepoint_check_flag);
2136 ChunkManager* chunk_manager = vs_list()->chunk_manager();
2138 chunk_manager->slow_locked_verify();
2140 dec_total_from_size_metrics();
2142 if (TraceMetadataChunkAllocation && Verbose) {
2143 gclog_or_tty->print_cr("~SpaceManager(): " PTR_FORMAT, this);
2144 locked_print_chunks_in_use_on(gclog_or_tty);
2145 }
2147 // Do not mangle freed Metachunks. The chunk size inside Metachunks
2148 // is during the freeing of a VirtualSpaceNodes.
2150 // Have to update before the chunks_in_use lists are emptied
2151 // below.
2152 chunk_manager->inc_free_chunks_total(allocated_chunks_words(),
2153 sum_count_in_chunks_in_use());
2155 // Add all the chunks in use by this space manager
2156 // to the global list of free chunks.
2158 // Follow each list of chunks-in-use and add them to the
2159 // free lists. Each list is NULL terminated.
2161 for (ChunkIndex i = ZeroIndex; i < HumongousIndex; i = next_chunk_index(i)) {
2162 if (TraceMetadataChunkAllocation && Verbose) {
2163 gclog_or_tty->print_cr("returned %d %s chunks to freelist",
2164 sum_count_in_chunks_in_use(i),
2165 chunk_size_name(i));
2166 }
2167 Metachunk* chunks = chunks_in_use(i);
2168 chunk_manager->return_chunks(i, chunks);
2169 set_chunks_in_use(i, NULL);
2170 if (TraceMetadataChunkAllocation && Verbose) {
2171 gclog_or_tty->print_cr("updated freelist count %d %s",
2172 chunk_manager->free_chunks(i)->count(),
2173 chunk_size_name(i));
2174 }
2175 assert(i != HumongousIndex, "Humongous chunks are handled explicitly later");
2176 }
2178 // The medium chunk case may be optimized by passing the head and
2179 // tail of the medium chunk list to add_at_head(). The tail is often
2180 // the current chunk but there are probably exceptions.
2182 // Humongous chunks
2183 if (TraceMetadataChunkAllocation && Verbose) {
2184 gclog_or_tty->print_cr("returned %d %s humongous chunks to dictionary",
2185 sum_count_in_chunks_in_use(HumongousIndex),
2186 chunk_size_name(HumongousIndex));
2187 gclog_or_tty->print("Humongous chunk dictionary: ");
2188 }
2189 // Humongous chunks are never the current chunk.
2190 Metachunk* humongous_chunks = chunks_in_use(HumongousIndex);
2192 while (humongous_chunks != NULL) {
2193 #ifdef ASSERT
2194 humongous_chunks->set_is_free(true);
2195 #endif
2196 if (TraceMetadataChunkAllocation && Verbose) {
2197 gclog_or_tty->print(PTR_FORMAT " (" SIZE_FORMAT ") ",
2198 humongous_chunks,
2199 humongous_chunks->word_size());
2200 }
2201 assert(humongous_chunks->word_size() == (size_t)
2202 align_size_up(humongous_chunks->word_size(),
2203 HumongousChunkGranularity),
2204 err_msg("Humongous chunk size is wrong: word size " SIZE_FORMAT
2205 " granularity %d",
2206 humongous_chunks->word_size(), HumongousChunkGranularity));
2207 Metachunk* next_humongous_chunks = humongous_chunks->next();
2208 humongous_chunks->container()->dec_container_count();
2209 chunk_manager->humongous_dictionary()->return_chunk(humongous_chunks);
2210 humongous_chunks = next_humongous_chunks;
2211 }
2212 if (TraceMetadataChunkAllocation && Verbose) {
2213 gclog_or_tty->print_cr("");
2214 gclog_or_tty->print_cr("updated dictionary count %d %s",
2215 chunk_manager->humongous_dictionary()->total_count(),
2216 chunk_size_name(HumongousIndex));
2217 }
2218 chunk_manager->slow_locked_verify();
2219 }
2221 const char* SpaceManager::chunk_size_name(ChunkIndex index) const {
2222 switch (index) {
2223 case SpecializedIndex:
2224 return "Specialized";
2225 case SmallIndex:
2226 return "Small";
2227 case MediumIndex:
2228 return "Medium";
2229 case HumongousIndex:
2230 return "Humongous";
2231 default:
2232 return NULL;
2233 }
2234 }
2236 ChunkIndex ChunkManager::list_index(size_t size) {
2237 switch (size) {
2238 case SpecializedChunk:
2239 assert(SpecializedChunk == ClassSpecializedChunk,
2240 "Need branch for ClassSpecializedChunk");
2241 return SpecializedIndex;
2242 case SmallChunk:
2243 case ClassSmallChunk:
2244 return SmallIndex;
2245 case MediumChunk:
2246 case ClassMediumChunk:
2247 return MediumIndex;
2248 default:
2249 assert(size > MediumChunk || size > ClassMediumChunk,
2250 "Not a humongous chunk");
2251 return HumongousIndex;
2252 }
2253 }
2255 void SpaceManager::deallocate(MetaWord* p, size_t word_size) {
2256 assert_lock_strong(_lock);
2257 size_t raw_word_size = get_raw_word_size(word_size);
2258 size_t min_size = TreeChunk<Metablock, FreeList>::min_size();
2259 assert(raw_word_size >= min_size,
2260 err_msg("Should not deallocate dark matter " SIZE_FORMAT, word_size));
2261 block_freelists()->return_block(p, raw_word_size);
2262 }
2264 // Adds a chunk to the list of chunks in use.
2265 void SpaceManager::add_chunk(Metachunk* new_chunk, bool make_current) {
2267 assert(new_chunk != NULL, "Should not be NULL");
2268 assert(new_chunk->next() == NULL, "Should not be on a list");
2270 new_chunk->reset_empty();
2272 // Find the correct list and and set the current
2273 // chunk for that list.
2274 ChunkIndex index = ChunkManager::list_index(new_chunk->word_size());
2276 if (index != HumongousIndex) {
2277 set_current_chunk(new_chunk);
2278 new_chunk->set_next(chunks_in_use(index));
2279 set_chunks_in_use(index, new_chunk);
2280 } else {
2281 // For null class loader data and DumpSharedSpaces, the first chunk isn't
2282 // small, so small will be null. Link this first chunk as the current
2283 // chunk.
2284 if (make_current) {
2285 // Set as the current chunk but otherwise treat as a humongous chunk.
2286 set_current_chunk(new_chunk);
2287 }
2288 // Link at head. The _current_chunk only points to a humongous chunk for
2289 // the null class loader metaspace (class and data virtual space managers)
2290 // any humongous chunks so will not point to the tail
2291 // of the humongous chunks list.
2292 new_chunk->set_next(chunks_in_use(HumongousIndex));
2293 set_chunks_in_use(HumongousIndex, new_chunk);
2295 assert(new_chunk->word_size() > medium_chunk_size(), "List inconsistency");
2296 }
2298 // Add to the running sum of capacity
2299 inc_size_metrics(new_chunk->word_size());
2301 assert(new_chunk->is_empty(), "Not ready for reuse");
2302 if (TraceMetadataChunkAllocation && Verbose) {
2303 gclog_or_tty->print("SpaceManager::add_chunk: %d) ",
2304 sum_count_in_chunks_in_use());
2305 new_chunk->print_on(gclog_or_tty);
2306 if (vs_list() != NULL) {
2307 vs_list()->chunk_manager()->locked_print_free_chunks(tty);
2308 }
2309 }
2310 }
2312 Metachunk* SpaceManager::get_new_chunk(size_t word_size,
2313 size_t grow_chunks_by_words) {
2315 Metachunk* next = vs_list()->get_new_chunk(word_size,
2316 grow_chunks_by_words,
2317 medium_chunk_bunch());
2319 if (TraceMetadataHumongousAllocation &&
2320 SpaceManager::is_humongous(next->word_size())) {
2321 gclog_or_tty->print_cr(" new humongous chunk word size " PTR_FORMAT,
2322 next->word_size());
2323 }
2325 return next;
2326 }
2328 MetaWord* SpaceManager::allocate(size_t word_size) {
2329 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
2331 size_t raw_word_size = get_raw_word_size(word_size);
2332 BlockFreelist* fl = block_freelists();
2333 MetaWord* p = NULL;
2334 // Allocation from the dictionary is expensive in the sense that
2335 // the dictionary has to be searched for a size. Don't allocate
2336 // from the dictionary until it starts to get fat. Is this
2337 // a reasonable policy? Maybe an skinny dictionary is fast enough
2338 // for allocations. Do some profiling. JJJ
2339 if (fl->total_size() > allocation_from_dictionary_limit) {
2340 p = fl->get_block(raw_word_size);
2341 }
2342 if (p == NULL) {
2343 p = allocate_work(raw_word_size);
2344 }
2345 Metadebug::deallocate_block_a_lot(this, raw_word_size);
2347 return p;
2348 }
2350 // Returns the address of spaced allocated for "word_size".
2351 // This methods does not know about blocks (Metablocks)
2352 MetaWord* SpaceManager::allocate_work(size_t word_size) {
2353 assert_lock_strong(_lock);
2354 #ifdef ASSERT
2355 if (Metadebug::test_metadata_failure()) {
2356 return NULL;
2357 }
2358 #endif
2359 // Is there space in the current chunk?
2360 MetaWord* result = NULL;
2362 // For DumpSharedSpaces, only allocate out of the current chunk which is
2363 // never null because we gave it the size we wanted. Caller reports out
2364 // of memory if this returns null.
2365 if (DumpSharedSpaces) {
2366 assert(current_chunk() != NULL, "should never happen");
2367 inc_used_metrics(word_size);
2368 return current_chunk()->allocate(word_size); // caller handles null result
2369 }
2370 if (current_chunk() != NULL) {
2371 result = current_chunk()->allocate(word_size);
2372 }
2374 if (result == NULL) {
2375 result = grow_and_allocate(word_size);
2376 }
2377 if (result > 0) {
2378 inc_used_metrics(word_size);
2379 assert(result != (MetaWord*) chunks_in_use(MediumIndex),
2380 "Head of the list is being allocated");
2381 }
2383 return result;
2384 }
2386 void SpaceManager::verify() {
2387 // If there are blocks in the dictionary, then
2388 // verfication of chunks does not work since
2389 // being in the dictionary alters a chunk.
2390 if (block_freelists()->total_size() == 0) {
2391 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2392 Metachunk* curr = chunks_in_use(i);
2393 while (curr != NULL) {
2394 curr->verify();
2395 verify_chunk_size(curr);
2396 curr = curr->next();
2397 }
2398 }
2399 }
2400 }
2402 void SpaceManager::verify_chunk_size(Metachunk* chunk) {
2403 assert(is_humongous(chunk->word_size()) ||
2404 chunk->word_size() == medium_chunk_size() ||
2405 chunk->word_size() == small_chunk_size() ||
2406 chunk->word_size() == specialized_chunk_size(),
2407 "Chunk size is wrong");
2408 return;
2409 }
2411 #ifdef ASSERT
2412 void SpaceManager::verify_allocated_blocks_words() {
2413 // Verification is only guaranteed at a safepoint.
2414 assert(SafepointSynchronize::is_at_safepoint() || !Universe::is_fully_initialized(),
2415 "Verification can fail if the applications is running");
2416 assert(allocated_blocks_words() == sum_used_in_chunks_in_use(),
2417 err_msg("allocation total is not consistent " SIZE_FORMAT
2418 " vs " SIZE_FORMAT,
2419 allocated_blocks_words(), sum_used_in_chunks_in_use()));
2420 }
2422 #endif
2424 void SpaceManager::dump(outputStream* const out) const {
2425 size_t curr_total = 0;
2426 size_t waste = 0;
2427 uint i = 0;
2428 size_t used = 0;
2429 size_t capacity = 0;
2431 // Add up statistics for all chunks in this SpaceManager.
2432 for (ChunkIndex index = ZeroIndex;
2433 index < NumberOfInUseLists;
2434 index = next_chunk_index(index)) {
2435 for (Metachunk* curr = chunks_in_use(index);
2436 curr != NULL;
2437 curr = curr->next()) {
2438 out->print("%d) ", i++);
2439 curr->print_on(out);
2440 if (TraceMetadataChunkAllocation && Verbose) {
2441 block_freelists()->print_on(out);
2442 }
2443 curr_total += curr->word_size();
2444 used += curr->used_word_size();
2445 capacity += curr->capacity_word_size();
2446 waste += curr->free_word_size() + curr->overhead();;
2447 }
2448 }
2450 size_t free = current_chunk() == NULL ? 0 : current_chunk()->free_word_size();
2451 // Free space isn't wasted.
2452 waste -= free;
2454 out->print_cr("total of all chunks " SIZE_FORMAT " used " SIZE_FORMAT
2455 " free " SIZE_FORMAT " capacity " SIZE_FORMAT
2456 " waste " SIZE_FORMAT, curr_total, used, free, capacity, waste);
2457 }
2459 #ifndef PRODUCT
2460 void SpaceManager::mangle_freed_chunks() {
2461 for (ChunkIndex index = ZeroIndex;
2462 index < NumberOfInUseLists;
2463 index = next_chunk_index(index)) {
2464 for (Metachunk* curr = chunks_in_use(index);
2465 curr != NULL;
2466 curr = curr->next()) {
2467 curr->mangle();
2468 }
2469 }
2470 }
2471 #endif // PRODUCT
2473 // MetaspaceAux
2476 size_t MetaspaceAux::_allocated_capacity_words[] = {0, 0};
2477 size_t MetaspaceAux::_allocated_used_words[] = {0, 0};
2479 size_t MetaspaceAux::free_bytes() {
2480 size_t result = 0;
2481 if (Metaspace::class_space_list() != NULL) {
2482 result = result + Metaspace::class_space_list()->free_bytes();
2483 }
2484 if (Metaspace::space_list() != NULL) {
2485 result = result + Metaspace::space_list()->free_bytes();
2486 }
2487 return result;
2488 }
2490 void MetaspaceAux::dec_capacity(Metaspace::MetadataType mdtype, size_t words) {
2491 assert_lock_strong(SpaceManager::expand_lock());
2492 assert(words <= allocated_capacity_words(mdtype),
2493 err_msg("About to decrement below 0: words " SIZE_FORMAT
2494 " is greater than _allocated_capacity_words[%u] " SIZE_FORMAT,
2495 words, mdtype, allocated_capacity_words(mdtype)));
2496 _allocated_capacity_words[mdtype] -= words;
2497 }
2499 void MetaspaceAux::inc_capacity(Metaspace::MetadataType mdtype, size_t words) {
2500 assert_lock_strong(SpaceManager::expand_lock());
2501 // Needs to be atomic
2502 _allocated_capacity_words[mdtype] += words;
2503 }
2505 void MetaspaceAux::dec_used(Metaspace::MetadataType mdtype, size_t words) {
2506 assert(words <= allocated_used_words(mdtype),
2507 err_msg("About to decrement below 0: words " SIZE_FORMAT
2508 " is greater than _allocated_used_words[%u] " SIZE_FORMAT,
2509 words, mdtype, allocated_used_words(mdtype)));
2510 // For CMS deallocation of the Metaspaces occurs during the
2511 // sweep which is a concurrent phase. Protection by the expand_lock()
2512 // is not enough since allocation is on a per Metaspace basis
2513 // and protected by the Metaspace lock.
2514 jlong minus_words = (jlong) - (jlong) words;
2515 Atomic::add_ptr(minus_words, &_allocated_used_words[mdtype]);
2516 }
2518 void MetaspaceAux::inc_used(Metaspace::MetadataType mdtype, size_t words) {
2519 // _allocated_used_words tracks allocations for
2520 // each piece of metadata. Those allocations are
2521 // generally done concurrently by different application
2522 // threads so must be done atomically.
2523 Atomic::add_ptr(words, &_allocated_used_words[mdtype]);
2524 }
2526 size_t MetaspaceAux::used_bytes_slow(Metaspace::MetadataType mdtype) {
2527 size_t used = 0;
2528 ClassLoaderDataGraphMetaspaceIterator iter;
2529 while (iter.repeat()) {
2530 Metaspace* msp = iter.get_next();
2531 // Sum allocated_blocks_words for each metaspace
2532 if (msp != NULL) {
2533 used += msp->used_words_slow(mdtype);
2534 }
2535 }
2536 return used * BytesPerWord;
2537 }
2539 size_t MetaspaceAux::free_in_bytes(Metaspace::MetadataType mdtype) {
2540 size_t free = 0;
2541 ClassLoaderDataGraphMetaspaceIterator iter;
2542 while (iter.repeat()) {
2543 Metaspace* msp = iter.get_next();
2544 if (msp != NULL) {
2545 free += msp->free_words(mdtype);
2546 }
2547 }
2548 return free * BytesPerWord;
2549 }
2551 size_t MetaspaceAux::capacity_bytes_slow(Metaspace::MetadataType mdtype) {
2552 // Don't count the space in the freelists. That space will be
2553 // added to the capacity calculation as needed.
2554 size_t capacity = 0;
2555 ClassLoaderDataGraphMetaspaceIterator iter;
2556 while (iter.repeat()) {
2557 Metaspace* msp = iter.get_next();
2558 if (msp != NULL) {
2559 capacity += msp->capacity_words_slow(mdtype);
2560 }
2561 }
2562 return capacity * BytesPerWord;
2563 }
2565 size_t MetaspaceAux::reserved_in_bytes(Metaspace::MetadataType mdtype) {
2566 size_t reserved = (mdtype == Metaspace::ClassType) ?
2567 Metaspace::class_space_list()->virtual_space_total() :
2568 Metaspace::space_list()->virtual_space_total();
2569 return reserved * BytesPerWord;
2570 }
2572 size_t MetaspaceAux::min_chunk_size() { return Metaspace::first_chunk_word_size(); }
2574 size_t MetaspaceAux::free_chunks_total(Metaspace::MetadataType mdtype) {
2575 ChunkManager* chunk = (mdtype == Metaspace::ClassType) ?
2576 Metaspace::class_space_list()->chunk_manager() :
2577 Metaspace::space_list()->chunk_manager();
2578 chunk->slow_verify();
2579 return chunk->free_chunks_total();
2580 }
2582 size_t MetaspaceAux::free_chunks_total_in_bytes(Metaspace::MetadataType mdtype) {
2583 return free_chunks_total(mdtype) * BytesPerWord;
2584 }
2586 size_t MetaspaceAux::free_chunks_total() {
2587 return free_chunks_total(Metaspace::ClassType) +
2588 free_chunks_total(Metaspace::NonClassType);
2589 }
2591 size_t MetaspaceAux::free_chunks_total_in_bytes() {
2592 return free_chunks_total() * BytesPerWord;
2593 }
2595 void MetaspaceAux::print_metaspace_change(size_t prev_metadata_used) {
2596 gclog_or_tty->print(", [Metaspace:");
2597 if (PrintGCDetails && Verbose) {
2598 gclog_or_tty->print(" " SIZE_FORMAT
2599 "->" SIZE_FORMAT
2600 "(" SIZE_FORMAT ")",
2601 prev_metadata_used,
2602 allocated_used_bytes(),
2603 reserved_in_bytes());
2604 } else {
2605 gclog_or_tty->print(" " SIZE_FORMAT "K"
2606 "->" SIZE_FORMAT "K"
2607 "(" SIZE_FORMAT "K)",
2608 prev_metadata_used / K,
2609 allocated_used_bytes() / K,
2610 reserved_in_bytes()/ K);
2611 }
2613 gclog_or_tty->print("]");
2614 }
2616 // This is printed when PrintGCDetails
2617 void MetaspaceAux::print_on(outputStream* out) {
2618 Metaspace::MetadataType ct = Metaspace::ClassType;
2619 Metaspace::MetadataType nct = Metaspace::NonClassType;
2621 out->print_cr(" Metaspace total "
2622 SIZE_FORMAT "K, used " SIZE_FORMAT "K,"
2623 " reserved " SIZE_FORMAT "K",
2624 allocated_capacity_bytes()/K, allocated_used_bytes()/K, reserved_in_bytes()/K);
2626 out->print_cr(" data space "
2627 SIZE_FORMAT "K, used " SIZE_FORMAT "K,"
2628 " reserved " SIZE_FORMAT "K",
2629 allocated_capacity_bytes(nct)/K,
2630 allocated_used_bytes(nct)/K,
2631 reserved_in_bytes(nct)/K);
2632 out->print_cr(" class space "
2633 SIZE_FORMAT "K, used " SIZE_FORMAT "K,"
2634 " reserved " SIZE_FORMAT "K",
2635 allocated_capacity_bytes(ct)/K,
2636 allocated_used_bytes(ct)/K,
2637 reserved_in_bytes(ct)/K);
2638 }
2640 // Print information for class space and data space separately.
2641 // This is almost the same as above.
2642 void MetaspaceAux::print_on(outputStream* out, Metaspace::MetadataType mdtype) {
2643 size_t free_chunks_capacity_bytes = free_chunks_total_in_bytes(mdtype);
2644 size_t capacity_bytes = capacity_bytes_slow(mdtype);
2645 size_t used_bytes = used_bytes_slow(mdtype);
2646 size_t free_bytes = free_in_bytes(mdtype);
2647 size_t used_and_free = used_bytes + free_bytes +
2648 free_chunks_capacity_bytes;
2649 out->print_cr(" Chunk accounting: used in chunks " SIZE_FORMAT
2650 "K + unused in chunks " SIZE_FORMAT "K + "
2651 " capacity in free chunks " SIZE_FORMAT "K = " SIZE_FORMAT
2652 "K capacity in allocated chunks " SIZE_FORMAT "K",
2653 used_bytes / K,
2654 free_bytes / K,
2655 free_chunks_capacity_bytes / K,
2656 used_and_free / K,
2657 capacity_bytes / K);
2658 // Accounting can only be correct if we got the values during a safepoint
2659 assert(!SafepointSynchronize::is_at_safepoint() || used_and_free == capacity_bytes, "Accounting is wrong");
2660 }
2662 // Print total fragmentation for class and data metaspaces separately
2663 void MetaspaceAux::print_waste(outputStream* out) {
2665 size_t specialized_waste = 0, small_waste = 0, medium_waste = 0;
2666 size_t specialized_count = 0, small_count = 0, medium_count = 0, humongous_count = 0;
2667 size_t cls_specialized_waste = 0, cls_small_waste = 0, cls_medium_waste = 0;
2668 size_t cls_specialized_count = 0, cls_small_count = 0, cls_medium_count = 0, cls_humongous_count = 0;
2670 ClassLoaderDataGraphMetaspaceIterator iter;
2671 while (iter.repeat()) {
2672 Metaspace* msp = iter.get_next();
2673 if (msp != NULL) {
2674 specialized_waste += msp->vsm()->sum_waste_in_chunks_in_use(SpecializedIndex);
2675 specialized_count += msp->vsm()->sum_count_in_chunks_in_use(SpecializedIndex);
2676 small_waste += msp->vsm()->sum_waste_in_chunks_in_use(SmallIndex);
2677 small_count += msp->vsm()->sum_count_in_chunks_in_use(SmallIndex);
2678 medium_waste += msp->vsm()->sum_waste_in_chunks_in_use(MediumIndex);
2679 medium_count += msp->vsm()->sum_count_in_chunks_in_use(MediumIndex);
2680 humongous_count += msp->vsm()->sum_count_in_chunks_in_use(HumongousIndex);
2682 cls_specialized_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SpecializedIndex);
2683 cls_specialized_count += msp->class_vsm()->sum_count_in_chunks_in_use(SpecializedIndex);
2684 cls_small_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SmallIndex);
2685 cls_small_count += msp->class_vsm()->sum_count_in_chunks_in_use(SmallIndex);
2686 cls_medium_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(MediumIndex);
2687 cls_medium_count += msp->class_vsm()->sum_count_in_chunks_in_use(MediumIndex);
2688 cls_humongous_count += msp->class_vsm()->sum_count_in_chunks_in_use(HumongousIndex);
2689 }
2690 }
2691 out->print_cr("Total fragmentation waste (words) doesn't count free space");
2692 out->print_cr(" data: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", "
2693 SIZE_FORMAT " small(s) " SIZE_FORMAT ", "
2694 SIZE_FORMAT " medium(s) " SIZE_FORMAT ", "
2695 "large count " SIZE_FORMAT,
2696 specialized_count, specialized_waste, small_count,
2697 small_waste, medium_count, medium_waste, humongous_count);
2698 out->print_cr(" class: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", "
2699 SIZE_FORMAT " small(s) " SIZE_FORMAT ", "
2700 SIZE_FORMAT " medium(s) " SIZE_FORMAT ", "
2701 "large count " SIZE_FORMAT,
2702 cls_specialized_count, cls_specialized_waste,
2703 cls_small_count, cls_small_waste,
2704 cls_medium_count, cls_medium_waste, cls_humongous_count);
2705 }
2707 // Dump global metaspace things from the end of ClassLoaderDataGraph
2708 void MetaspaceAux::dump(outputStream* out) {
2709 out->print_cr("All Metaspace:");
2710 out->print("data space: "); print_on(out, Metaspace::NonClassType);
2711 out->print("class space: "); print_on(out, Metaspace::ClassType);
2712 print_waste(out);
2713 }
2715 void MetaspaceAux::verify_free_chunks() {
2716 Metaspace::space_list()->chunk_manager()->verify();
2717 Metaspace::class_space_list()->chunk_manager()->verify();
2718 }
2720 void MetaspaceAux::verify_capacity() {
2721 #ifdef ASSERT
2722 size_t running_sum_capacity_bytes = allocated_capacity_bytes();
2723 // For purposes of the running sum of capacity, verify against capacity
2724 size_t capacity_in_use_bytes = capacity_bytes_slow();
2725 assert(running_sum_capacity_bytes == capacity_in_use_bytes,
2726 err_msg("allocated_capacity_words() * BytesPerWord " SIZE_FORMAT
2727 " capacity_bytes_slow()" SIZE_FORMAT,
2728 running_sum_capacity_bytes, capacity_in_use_bytes));
2729 for (Metaspace::MetadataType i = Metaspace::ClassType;
2730 i < Metaspace:: MetadataTypeCount;
2731 i = (Metaspace::MetadataType)(i + 1)) {
2732 size_t capacity_in_use_bytes = capacity_bytes_slow(i);
2733 assert(allocated_capacity_bytes(i) == capacity_in_use_bytes,
2734 err_msg("allocated_capacity_bytes(%u) " SIZE_FORMAT
2735 " capacity_bytes_slow(%u)" SIZE_FORMAT,
2736 i, allocated_capacity_bytes(i), i, capacity_in_use_bytes));
2737 }
2738 #endif
2739 }
2741 void MetaspaceAux::verify_used() {
2742 #ifdef ASSERT
2743 size_t running_sum_used_bytes = allocated_used_bytes();
2744 // For purposes of the running sum of used, verify against used
2745 size_t used_in_use_bytes = used_bytes_slow();
2746 assert(allocated_used_bytes() == used_in_use_bytes,
2747 err_msg("allocated_used_bytes() " SIZE_FORMAT
2748 " used_bytes_slow()" SIZE_FORMAT,
2749 allocated_used_bytes(), used_in_use_bytes));
2750 for (Metaspace::MetadataType i = Metaspace::ClassType;
2751 i < Metaspace:: MetadataTypeCount;
2752 i = (Metaspace::MetadataType)(i + 1)) {
2753 size_t used_in_use_bytes = used_bytes_slow(i);
2754 assert(allocated_used_bytes(i) == used_in_use_bytes,
2755 err_msg("allocated_used_bytes(%u) " SIZE_FORMAT
2756 " used_bytes_slow(%u)" SIZE_FORMAT,
2757 i, allocated_used_bytes(i), i, used_in_use_bytes));
2758 }
2759 #endif
2760 }
2762 void MetaspaceAux::verify_metrics() {
2763 verify_capacity();
2764 verify_used();
2765 }
2768 // Metaspace methods
2770 size_t Metaspace::_first_chunk_word_size = 0;
2771 size_t Metaspace::_first_class_chunk_word_size = 0;
2773 Metaspace::Metaspace(Mutex* lock, MetaspaceType type) {
2774 initialize(lock, type);
2775 }
2777 Metaspace::~Metaspace() {
2778 delete _vsm;
2779 delete _class_vsm;
2780 }
2782 VirtualSpaceList* Metaspace::_space_list = NULL;
2783 VirtualSpaceList* Metaspace::_class_space_list = NULL;
2785 #define VIRTUALSPACEMULTIPLIER 2
2787 void Metaspace::global_initialize() {
2788 // Initialize the alignment for shared spaces.
2789 int max_alignment = os::vm_page_size();
2790 MetaspaceShared::set_max_alignment(max_alignment);
2792 if (DumpSharedSpaces) {
2793 SharedReadOnlySize = align_size_up(SharedReadOnlySize, max_alignment);
2794 SharedReadWriteSize = align_size_up(SharedReadWriteSize, max_alignment);
2795 SharedMiscDataSize = align_size_up(SharedMiscDataSize, max_alignment);
2796 SharedMiscCodeSize = align_size_up(SharedMiscCodeSize, max_alignment);
2798 // Initialize with the sum of the shared space sizes. The read-only
2799 // and read write metaspace chunks will be allocated out of this and the
2800 // remainder is the misc code and data chunks.
2801 size_t total = align_size_up(SharedReadOnlySize + SharedReadWriteSize +
2802 SharedMiscDataSize + SharedMiscCodeSize,
2803 os::vm_allocation_granularity());
2804 size_t word_size = total/wordSize;
2805 _space_list = new VirtualSpaceList(word_size);
2806 } else {
2807 // If using shared space, open the file that contains the shared space
2808 // and map in the memory before initializing the rest of metaspace (so
2809 // the addresses don't conflict)
2810 if (UseSharedSpaces) {
2811 FileMapInfo* mapinfo = new FileMapInfo();
2812 memset(mapinfo, 0, sizeof(FileMapInfo));
2814 // Open the shared archive file, read and validate the header. If
2815 // initialization fails, shared spaces [UseSharedSpaces] are
2816 // disabled and the file is closed.
2817 // Map in spaces now also
2818 if (mapinfo->initialize() && MetaspaceShared::map_shared_spaces(mapinfo)) {
2819 FileMapInfo::set_current_info(mapinfo);
2820 } else {
2821 assert(!mapinfo->is_open() && !UseSharedSpaces,
2822 "archive file not closed or shared spaces not disabled.");
2823 }
2824 }
2826 // Initialize these before initializing the VirtualSpaceList
2827 _first_chunk_word_size = InitialBootClassLoaderMetaspaceSize / BytesPerWord;
2828 _first_chunk_word_size = align_word_size_up(_first_chunk_word_size);
2829 // Make the first class chunk bigger than a medium chunk so it's not put
2830 // on the medium chunk list. The next chunk will be small and progress
2831 // from there. This size calculated by -version.
2832 _first_class_chunk_word_size = MIN2((size_t)MediumChunk*6,
2833 (ClassMetaspaceSize/BytesPerWord)*2);
2834 _first_class_chunk_word_size = align_word_size_up(_first_class_chunk_word_size);
2835 // Arbitrarily set the initial virtual space to a multiple
2836 // of the boot class loader size.
2837 size_t word_size = VIRTUALSPACEMULTIPLIER * first_chunk_word_size();
2838 // Initialize the list of virtual spaces.
2839 _space_list = new VirtualSpaceList(word_size);
2840 }
2841 }
2843 // For UseCompressedKlassPointers the class space is reserved as a piece of the
2844 // Java heap because the compression algorithm is the same for each. The
2845 // argument passed in is at the top of the compressed space
2846 void Metaspace::initialize_class_space(ReservedSpace rs) {
2847 // The reserved space size may be bigger because of alignment, esp with UseLargePages
2848 assert(rs.size() >= ClassMetaspaceSize,
2849 err_msg(SIZE_FORMAT " != " UINTX_FORMAT, rs.size(), ClassMetaspaceSize));
2850 _class_space_list = new VirtualSpaceList(rs);
2851 }
2853 void Metaspace::initialize(Mutex* lock,
2854 MetaspaceType type) {
2856 assert(space_list() != NULL,
2857 "Metadata VirtualSpaceList has not been initialized");
2859 _vsm = new SpaceManager(Metaspace::NonClassType, lock, space_list());
2860 if (_vsm == NULL) {
2861 return;
2862 }
2863 size_t word_size;
2864 size_t class_word_size;
2865 vsm()->get_initial_chunk_sizes(type,
2866 &word_size,
2867 &class_word_size);
2869 assert(class_space_list() != NULL,
2870 "Class VirtualSpaceList has not been initialized");
2872 // Allocate SpaceManager for classes.
2873 _class_vsm = new SpaceManager(Metaspace::ClassType, lock, class_space_list());
2874 if (_class_vsm == NULL) {
2875 return;
2876 }
2878 MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
2880 // Allocate chunk for metadata objects
2881 Metachunk* new_chunk =
2882 space_list()->get_initialization_chunk(word_size,
2883 vsm()->medium_chunk_bunch());
2884 assert(!DumpSharedSpaces || new_chunk != NULL, "should have enough space for both chunks");
2885 if (new_chunk != NULL) {
2886 // Add to this manager's list of chunks in use and current_chunk().
2887 vsm()->add_chunk(new_chunk, true);
2888 }
2890 // Allocate chunk for class metadata objects
2891 Metachunk* class_chunk =
2892 class_space_list()->get_initialization_chunk(class_word_size,
2893 class_vsm()->medium_chunk_bunch());
2894 if (class_chunk != NULL) {
2895 class_vsm()->add_chunk(class_chunk, true);
2896 }
2898 _alloc_record_head = NULL;
2899 _alloc_record_tail = NULL;
2900 }
2902 size_t Metaspace::align_word_size_up(size_t word_size) {
2903 size_t byte_size = word_size * wordSize;
2904 return ReservedSpace::allocation_align_size_up(byte_size) / wordSize;
2905 }
2907 MetaWord* Metaspace::allocate(size_t word_size, MetadataType mdtype) {
2908 // DumpSharedSpaces doesn't use class metadata area (yet)
2909 if (mdtype == ClassType && !DumpSharedSpaces) {
2910 return class_vsm()->allocate(word_size);
2911 } else {
2912 return vsm()->allocate(word_size);
2913 }
2914 }
2916 MetaWord* Metaspace::expand_and_allocate(size_t word_size, MetadataType mdtype) {
2917 MetaWord* result;
2918 MetaspaceGC::set_expand_after_GC(true);
2919 size_t before_inc = MetaspaceGC::capacity_until_GC();
2920 size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size) * BytesPerWord;
2921 MetaspaceGC::inc_capacity_until_GC(delta_bytes);
2922 if (PrintGCDetails && Verbose) {
2923 gclog_or_tty->print_cr("Increase capacity to GC from " SIZE_FORMAT
2924 " to " SIZE_FORMAT, before_inc, MetaspaceGC::capacity_until_GC());
2925 }
2927 result = allocate(word_size, mdtype);
2929 return result;
2930 }
2932 // Space allocated in the Metaspace. This may
2933 // be across several metadata virtual spaces.
2934 char* Metaspace::bottom() const {
2935 assert(DumpSharedSpaces, "only useful and valid for dumping shared spaces");
2936 return (char*)vsm()->current_chunk()->bottom();
2937 }
2939 size_t Metaspace::used_words_slow(MetadataType mdtype) const {
2940 // return vsm()->allocated_used_words();
2941 return mdtype == ClassType ? class_vsm()->sum_used_in_chunks_in_use() :
2942 vsm()->sum_used_in_chunks_in_use(); // includes overhead!
2943 }
2945 size_t Metaspace::free_words(MetadataType mdtype) const {
2946 return mdtype == ClassType ? class_vsm()->sum_free_in_chunks_in_use() :
2947 vsm()->sum_free_in_chunks_in_use();
2948 }
2950 // Space capacity in the Metaspace. It includes
2951 // space in the list of chunks from which allocations
2952 // have been made. Don't include space in the global freelist and
2953 // in the space available in the dictionary which
2954 // is already counted in some chunk.
2955 size_t Metaspace::capacity_words_slow(MetadataType mdtype) const {
2956 return mdtype == ClassType ? class_vsm()->sum_capacity_in_chunks_in_use() :
2957 vsm()->sum_capacity_in_chunks_in_use();
2958 }
2960 size_t Metaspace::used_bytes_slow(MetadataType mdtype) const {
2961 return used_words_slow(mdtype) * BytesPerWord;
2962 }
2964 size_t Metaspace::capacity_bytes_slow(MetadataType mdtype) const {
2965 return capacity_words_slow(mdtype) * BytesPerWord;
2966 }
2968 void Metaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) {
2969 if (SafepointSynchronize::is_at_safepoint()) {
2970 assert(Thread::current()->is_VM_thread(), "should be the VM thread");
2971 // Don't take Heap_lock
2972 MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag);
2973 if (word_size < TreeChunk<Metablock, FreeList>::min_size()) {
2974 // Dark matter. Too small for dictionary.
2975 #ifdef ASSERT
2976 Copy::fill_to_words((HeapWord*)ptr, word_size, 0xf5f5f5f5);
2977 #endif
2978 return;
2979 }
2980 if (is_class) {
2981 class_vsm()->deallocate(ptr, word_size);
2982 } else {
2983 vsm()->deallocate(ptr, word_size);
2984 }
2985 } else {
2986 MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag);
2988 if (word_size < TreeChunk<Metablock, FreeList>::min_size()) {
2989 // Dark matter. Too small for dictionary.
2990 #ifdef ASSERT
2991 Copy::fill_to_words((HeapWord*)ptr, word_size, 0xf5f5f5f5);
2992 #endif
2993 return;
2994 }
2995 if (is_class) {
2996 class_vsm()->deallocate(ptr, word_size);
2997 } else {
2998 vsm()->deallocate(ptr, word_size);
2999 }
3000 }
3001 }
3003 Metablock* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
3004 bool read_only, MetaspaceObj::Type type, TRAPS) {
3005 if (HAS_PENDING_EXCEPTION) {
3006 assert(false, "Should not allocate with exception pending");
3007 return NULL; // caller does a CHECK_NULL too
3008 }
3010 MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType;
3012 // SSS: Should we align the allocations and make sure the sizes are aligned.
3013 MetaWord* result = NULL;
3015 assert(loader_data != NULL, "Should never pass around a NULL loader_data. "
3016 "ClassLoaderData::the_null_class_loader_data() should have been used.");
3017 // Allocate in metaspaces without taking out a lock, because it deadlocks
3018 // with the SymbolTable_lock. Dumping is single threaded for now. We'll have
3019 // to revisit this for application class data sharing.
3020 if (DumpSharedSpaces) {
3021 assert(type > MetaspaceObj::UnknownType && type < MetaspaceObj::_number_of_types, "sanity");
3022 Metaspace* space = read_only ? loader_data->ro_metaspace() : loader_data->rw_metaspace();
3023 result = space->allocate(word_size, NonClassType);
3024 if (result == NULL) {
3025 report_out_of_shared_space(read_only ? SharedReadOnly : SharedReadWrite);
3026 } else {
3027 space->record_allocation(result, type, space->vsm()->get_raw_word_size(word_size));
3028 }
3029 return Metablock::initialize(result, word_size);
3030 }
3032 result = loader_data->metaspace_non_null()->allocate(word_size, mdtype);
3034 if (result == NULL) {
3035 // Try to clean out some memory and retry.
3036 result =
3037 Universe::heap()->collector_policy()->satisfy_failed_metadata_allocation(
3038 loader_data, word_size, mdtype);
3040 // If result is still null, we are out of memory.
3041 if (result == NULL) {
3042 if (Verbose && TraceMetadataChunkAllocation) {
3043 gclog_or_tty->print_cr("Metaspace allocation failed for size "
3044 SIZE_FORMAT, word_size);
3045 if (loader_data->metaspace_or_null() != NULL) loader_data->dump(gclog_or_tty);
3046 MetaspaceAux::dump(gclog_or_tty);
3047 }
3048 // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
3049 const char* space_string = (mdtype == ClassType) ? "Class Metadata space" :
3050 "Metadata space";
3051 report_java_out_of_memory(space_string);
3053 if (JvmtiExport::should_post_resource_exhausted()) {
3054 JvmtiExport::post_resource_exhausted(
3055 JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR,
3056 space_string);
3057 }
3058 if (mdtype == ClassType) {
3059 THROW_OOP_0(Universe::out_of_memory_error_class_metaspace());
3060 } else {
3061 THROW_OOP_0(Universe::out_of_memory_error_metaspace());
3062 }
3063 }
3064 }
3065 return Metablock::initialize(result, word_size);
3066 }
3068 void Metaspace::record_allocation(void* ptr, MetaspaceObj::Type type, size_t word_size) {
3069 assert(DumpSharedSpaces, "sanity");
3071 AllocRecord *rec = new AllocRecord((address)ptr, type, (int)word_size * HeapWordSize);
3072 if (_alloc_record_head == NULL) {
3073 _alloc_record_head = _alloc_record_tail = rec;
3074 } else {
3075 _alloc_record_tail->_next = rec;
3076 _alloc_record_tail = rec;
3077 }
3078 }
3080 void Metaspace::iterate(Metaspace::AllocRecordClosure *closure) {
3081 assert(DumpSharedSpaces, "unimplemented for !DumpSharedSpaces");
3083 address last_addr = (address)bottom();
3085 for (AllocRecord *rec = _alloc_record_head; rec; rec = rec->_next) {
3086 address ptr = rec->_ptr;
3087 if (last_addr < ptr) {
3088 closure->doit(last_addr, MetaspaceObj::UnknownType, ptr - last_addr);
3089 }
3090 closure->doit(ptr, rec->_type, rec->_byte_size);
3091 last_addr = ptr + rec->_byte_size;
3092 }
3094 address top = ((address)bottom()) + used_bytes_slow(Metaspace::NonClassType);
3095 if (last_addr < top) {
3096 closure->doit(last_addr, MetaspaceObj::UnknownType, top - last_addr);
3097 }
3098 }
3100 void Metaspace::purge() {
3101 MutexLockerEx cl(SpaceManager::expand_lock(),
3102 Mutex::_no_safepoint_check_flag);
3103 space_list()->purge();
3104 class_space_list()->purge();
3105 }
3107 void Metaspace::print_on(outputStream* out) const {
3108 // Print both class virtual space counts and metaspace.
3109 if (Verbose) {
3110 vsm()->print_on(out);
3111 class_vsm()->print_on(out);
3112 }
3113 }
3115 bool Metaspace::contains(const void * ptr) {
3116 if (MetaspaceShared::is_in_shared_space(ptr)) {
3117 return true;
3118 }
3119 // This is checked while unlocked. As long as the virtualspaces are added
3120 // at the end, the pointer will be in one of them. The virtual spaces
3121 // aren't deleted presently. When they are, some sort of locking might
3122 // be needed. Note, locking this can cause inversion problems with the
3123 // caller in MetaspaceObj::is_metadata() function.
3124 return space_list()->contains(ptr) ||
3125 class_space_list()->contains(ptr);
3126 }
3128 void Metaspace::verify() {
3129 vsm()->verify();
3130 class_vsm()->verify();
3131 }
3133 void Metaspace::dump(outputStream* const out) const {
3134 out->print_cr("\nVirtual space manager: " INTPTR_FORMAT, vsm());
3135 vsm()->dump(out);
3136 out->print_cr("\nClass space manager: " INTPTR_FORMAT, class_vsm());
3137 class_vsm()->dump(out);
3138 }