|
1 /* |
|
2 * Copyright (c) 2011, 2014, Oracle and/or its affiliates. All rights reserved. |
|
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
|
4 * |
|
5 * This code is free software; you can redistribute it and/or modify it |
|
6 * under the terms of the GNU General Public License version 2 only, as |
|
7 * published by the Free Software Foundation. |
|
8 * |
|
9 * This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 * version 2 for more details (a copy is included in the LICENSE file that |
|
13 * accompanied this code). |
|
14 * |
|
15 * You should have received a copy of the GNU General Public License version |
|
16 * 2 along with this work; if not, write to the Free Software Foundation, |
|
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 * |
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
|
20 * or visit www.oracle.com if you need additional information or have any |
|
21 * questions. |
|
22 * |
|
23 */ |
|
24 #include "precompiled.hpp" |
|
25 #include "gc_interface/collectedHeap.hpp" |
|
26 #include "memory/allocation.hpp" |
|
27 #include "memory/binaryTreeDictionary.hpp" |
|
28 #include "memory/freeList.hpp" |
|
29 #include "memory/collectorPolicy.hpp" |
|
30 #include "memory/filemap.hpp" |
|
31 #include "memory/freeList.hpp" |
|
32 #include "memory/gcLocker.hpp" |
|
33 #include "memory/metachunk.hpp" |
|
34 #include "memory/metaspace.hpp" |
|
35 #include "memory/metaspaceGCThresholdUpdater.hpp" |
|
36 #include "memory/metaspaceShared.hpp" |
|
37 #include "memory/metaspaceTracer.hpp" |
|
38 #include "memory/resourceArea.hpp" |
|
39 #include "memory/universe.hpp" |
|
40 #include "runtime/atomic.inline.hpp" |
|
41 #include "runtime/globals.hpp" |
|
42 #include "runtime/init.hpp" |
|
43 #include "runtime/java.hpp" |
|
44 #include "runtime/mutex.hpp" |
|
45 #include "runtime/orderAccess.hpp" |
|
46 #include "services/memTracker.hpp" |
|
47 #include "services/memoryService.hpp" |
|
48 #include "utilities/copy.hpp" |
|
49 #include "utilities/debug.hpp" |
|
50 |
|
51 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC |
|
52 |
|
53 typedef BinaryTreeDictionary<Metablock, FreeList<Metablock> > BlockTreeDictionary; |
|
54 typedef BinaryTreeDictionary<Metachunk, FreeList<Metachunk> > ChunkTreeDictionary; |
|
55 |
|
56 // Set this constant to enable slow integrity checking of the free chunk lists |
|
57 const bool metaspace_slow_verify = false; |
|
58 |
|
59 size_t const allocation_from_dictionary_limit = 4 * K; |
|
60 |
|
61 MetaWord* last_allocated = 0; |
|
62 |
|
63 size_t Metaspace::_compressed_class_space_size; |
|
64 const MetaspaceTracer* Metaspace::_tracer = NULL; |
|
65 |
|
66 // Used in declarations in SpaceManager and ChunkManager |
|
67 enum ChunkIndex { |
|
68 ZeroIndex = 0, |
|
69 SpecializedIndex = ZeroIndex, |
|
70 SmallIndex = SpecializedIndex + 1, |
|
71 MediumIndex = SmallIndex + 1, |
|
72 HumongousIndex = MediumIndex + 1, |
|
73 NumberOfFreeLists = 3, |
|
74 NumberOfInUseLists = 4 |
|
75 }; |
|
76 |
|
77 enum ChunkSizes { // in words. |
|
78 ClassSpecializedChunk = 128, |
|
79 SpecializedChunk = 128, |
|
80 ClassSmallChunk = 256, |
|
81 SmallChunk = 512, |
|
82 ClassMediumChunk = 4 * K, |
|
83 MediumChunk = 8 * K |
|
84 }; |
|
85 |
|
86 static ChunkIndex next_chunk_index(ChunkIndex i) { |
|
87 assert(i < NumberOfInUseLists, "Out of bound"); |
|
88 return (ChunkIndex) (i+1); |
|
89 } |
|
90 |
|
91 volatile intptr_t MetaspaceGC::_capacity_until_GC = 0; |
|
92 uint MetaspaceGC::_shrink_factor = 0; |
|
93 bool MetaspaceGC::_should_concurrent_collect = false; |
|
94 |
|
95 typedef class FreeList<Metachunk> ChunkList; |
|
96 |
|
97 // Manages the global free lists of chunks. |
|
98 class ChunkManager : public CHeapObj<mtInternal> { |
|
99 friend class TestVirtualSpaceNodeTest; |
|
100 |
|
101 // Free list of chunks of different sizes. |
|
102 // SpecializedChunk |
|
103 // SmallChunk |
|
104 // MediumChunk |
|
105 // HumongousChunk |
|
106 ChunkList _free_chunks[NumberOfFreeLists]; |
|
107 |
|
108 // HumongousChunk |
|
109 ChunkTreeDictionary _humongous_dictionary; |
|
110 |
|
111 // ChunkManager in all lists of this type |
|
112 size_t _free_chunks_total; |
|
113 size_t _free_chunks_count; |
|
114 |
|
115 void dec_free_chunks_total(size_t v) { |
|
116 assert(_free_chunks_count > 0 && |
|
117 _free_chunks_total > 0, |
|
118 "About to go negative"); |
|
119 Atomic::add_ptr(-1, &_free_chunks_count); |
|
120 jlong minus_v = (jlong) - (jlong) v; |
|
121 Atomic::add_ptr(minus_v, &_free_chunks_total); |
|
122 } |
|
123 |
|
124 // Debug support |
|
125 |
|
126 size_t sum_free_chunks(); |
|
127 size_t sum_free_chunks_count(); |
|
128 |
|
129 void locked_verify_free_chunks_total(); |
|
130 void slow_locked_verify_free_chunks_total() { |
|
131 if (metaspace_slow_verify) { |
|
132 locked_verify_free_chunks_total(); |
|
133 } |
|
134 } |
|
135 void locked_verify_free_chunks_count(); |
|
136 void slow_locked_verify_free_chunks_count() { |
|
137 if (metaspace_slow_verify) { |
|
138 locked_verify_free_chunks_count(); |
|
139 } |
|
140 } |
|
141 void verify_free_chunks_count(); |
|
142 |
|
143 public: |
|
144 |
|
145 ChunkManager(size_t specialized_size, size_t small_size, size_t medium_size) |
|
146 : _free_chunks_total(0), _free_chunks_count(0) { |
|
147 _free_chunks[SpecializedIndex].set_size(specialized_size); |
|
148 _free_chunks[SmallIndex].set_size(small_size); |
|
149 _free_chunks[MediumIndex].set_size(medium_size); |
|
150 } |
|
151 |
|
152 // add or delete (return) a chunk to the global freelist. |
|
153 Metachunk* chunk_freelist_allocate(size_t word_size); |
|
154 |
|
155 // Map a size to a list index assuming that there are lists |
|
156 // for special, small, medium, and humongous chunks. |
|
157 static ChunkIndex list_index(size_t size); |
|
158 |
|
159 // Remove the chunk from its freelist. It is |
|
160 // expected to be on one of the _free_chunks[] lists. |
|
161 void remove_chunk(Metachunk* chunk); |
|
162 |
|
163 // Add the simple linked list of chunks to the freelist of chunks |
|
164 // of type index. |
|
165 void return_chunks(ChunkIndex index, Metachunk* chunks); |
|
166 |
|
167 // Total of the space in the free chunks list |
|
168 size_t free_chunks_total_words(); |
|
169 size_t free_chunks_total_bytes(); |
|
170 |
|
171 // Number of chunks in the free chunks list |
|
172 size_t free_chunks_count(); |
|
173 |
|
174 void inc_free_chunks_total(size_t v, size_t count = 1) { |
|
175 Atomic::add_ptr(count, &_free_chunks_count); |
|
176 Atomic::add_ptr(v, &_free_chunks_total); |
|
177 } |
|
178 ChunkTreeDictionary* humongous_dictionary() { |
|
179 return &_humongous_dictionary; |
|
180 } |
|
181 |
|
182 ChunkList* free_chunks(ChunkIndex index); |
|
183 |
|
184 // Returns the list for the given chunk word size. |
|
185 ChunkList* find_free_chunks_list(size_t word_size); |
|
186 |
|
187 // Remove from a list by size. Selects list based on size of chunk. |
|
188 Metachunk* free_chunks_get(size_t chunk_word_size); |
|
189 |
|
190 #define index_bounds_check(index) \ |
|
191 assert(index == SpecializedIndex || \ |
|
192 index == SmallIndex || \ |
|
193 index == MediumIndex || \ |
|
194 index == HumongousIndex, err_msg("Bad index: %d", (int) index)) |
|
195 |
|
196 size_t num_free_chunks(ChunkIndex index) const { |
|
197 index_bounds_check(index); |
|
198 |
|
199 if (index == HumongousIndex) { |
|
200 return _humongous_dictionary.total_free_blocks(); |
|
201 } |
|
202 |
|
203 ssize_t count = _free_chunks[index].count(); |
|
204 return count == -1 ? 0 : (size_t) count; |
|
205 } |
|
206 |
|
207 size_t size_free_chunks_in_bytes(ChunkIndex index) const { |
|
208 index_bounds_check(index); |
|
209 |
|
210 size_t word_size = 0; |
|
211 if (index == HumongousIndex) { |
|
212 word_size = _humongous_dictionary.total_size(); |
|
213 } else { |
|
214 const size_t size_per_chunk_in_words = _free_chunks[index].size(); |
|
215 word_size = size_per_chunk_in_words * num_free_chunks(index); |
|
216 } |
|
217 |
|
218 return word_size * BytesPerWord; |
|
219 } |
|
220 |
|
221 MetaspaceChunkFreeListSummary chunk_free_list_summary() const { |
|
222 return MetaspaceChunkFreeListSummary(num_free_chunks(SpecializedIndex), |
|
223 num_free_chunks(SmallIndex), |
|
224 num_free_chunks(MediumIndex), |
|
225 num_free_chunks(HumongousIndex), |
|
226 size_free_chunks_in_bytes(SpecializedIndex), |
|
227 size_free_chunks_in_bytes(SmallIndex), |
|
228 size_free_chunks_in_bytes(MediumIndex), |
|
229 size_free_chunks_in_bytes(HumongousIndex)); |
|
230 } |
|
231 |
|
232 // Debug support |
|
233 void verify(); |
|
234 void slow_verify() { |
|
235 if (metaspace_slow_verify) { |
|
236 verify(); |
|
237 } |
|
238 } |
|
239 void locked_verify(); |
|
240 void slow_locked_verify() { |
|
241 if (metaspace_slow_verify) { |
|
242 locked_verify(); |
|
243 } |
|
244 } |
|
245 void verify_free_chunks_total(); |
|
246 |
|
247 void locked_print_free_chunks(outputStream* st); |
|
248 void locked_print_sum_free_chunks(outputStream* st); |
|
249 |
|
250 void print_on(outputStream* st) const; |
|
251 }; |
|
252 |
|
253 // Used to manage the free list of Metablocks (a block corresponds |
|
254 // to the allocation of a quantum of metadata). |
|
255 class BlockFreelist VALUE_OBJ_CLASS_SPEC { |
|
256 BlockTreeDictionary* _dictionary; |
|
257 |
|
258 // Only allocate and split from freelist if the size of the allocation |
|
259 // is at least 1/4th the size of the available block. |
|
260 const static int WasteMultiplier = 4; |
|
261 |
|
262 // Accessors |
|
263 BlockTreeDictionary* dictionary() const { return _dictionary; } |
|
264 |
|
265 public: |
|
266 BlockFreelist(); |
|
267 ~BlockFreelist(); |
|
268 |
|
269 // Get and return a block to the free list |
|
270 MetaWord* get_block(size_t word_size); |
|
271 void return_block(MetaWord* p, size_t word_size); |
|
272 |
|
273 size_t total_size() { |
|
274 if (dictionary() == NULL) { |
|
275 return 0; |
|
276 } else { |
|
277 return dictionary()->total_size(); |
|
278 } |
|
279 } |
|
280 |
|
281 void print_on(outputStream* st) const; |
|
282 }; |
|
283 |
|
284 // A VirtualSpaceList node. |
|
285 class VirtualSpaceNode : public CHeapObj<mtClass> { |
|
286 friend class VirtualSpaceList; |
|
287 |
|
288 // Link to next VirtualSpaceNode |
|
289 VirtualSpaceNode* _next; |
|
290 |
|
291 // total in the VirtualSpace |
|
292 MemRegion _reserved; |
|
293 ReservedSpace _rs; |
|
294 VirtualSpace _virtual_space; |
|
295 MetaWord* _top; |
|
296 // count of chunks contained in this VirtualSpace |
|
297 uintx _container_count; |
|
298 |
|
299 // Convenience functions to access the _virtual_space |
|
300 char* low() const { return virtual_space()->low(); } |
|
301 char* high() const { return virtual_space()->high(); } |
|
302 |
|
303 // The first Metachunk will be allocated at the bottom of the |
|
304 // VirtualSpace |
|
305 Metachunk* first_chunk() { return (Metachunk*) bottom(); } |
|
306 |
|
307 // Committed but unused space in the virtual space |
|
308 size_t free_words_in_vs() const; |
|
309 public: |
|
310 |
|
311 VirtualSpaceNode(size_t byte_size); |
|
312 VirtualSpaceNode(ReservedSpace rs) : _top(NULL), _next(NULL), _rs(rs), _container_count(0) {} |
|
313 ~VirtualSpaceNode(); |
|
314 |
|
315 // Convenience functions for logical bottom and end |
|
316 MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); } |
|
317 MetaWord* end() const { return (MetaWord*) _virtual_space.high(); } |
|
318 |
|
319 bool contains(const void* ptr) { return ptr >= low() && ptr < high(); } |
|
320 |
|
321 size_t reserved_words() const { return _virtual_space.reserved_size() / BytesPerWord; } |
|
322 size_t committed_words() const { return _virtual_space.actual_committed_size() / BytesPerWord; } |
|
323 |
|
324 bool is_pre_committed() const { return _virtual_space.special(); } |
|
325 |
|
326 // address of next available space in _virtual_space; |
|
327 // Accessors |
|
328 VirtualSpaceNode* next() { return _next; } |
|
329 void set_next(VirtualSpaceNode* v) { _next = v; } |
|
330 |
|
331 void set_reserved(MemRegion const v) { _reserved = v; } |
|
332 void set_top(MetaWord* v) { _top = v; } |
|
333 |
|
334 // Accessors |
|
335 MemRegion* reserved() { return &_reserved; } |
|
336 VirtualSpace* virtual_space() const { return (VirtualSpace*) &_virtual_space; } |
|
337 |
|
338 // Returns true if "word_size" is available in the VirtualSpace |
|
339 bool is_available(size_t word_size) { return word_size <= pointer_delta(end(), _top, sizeof(MetaWord)); } |
|
340 |
|
341 MetaWord* top() const { return _top; } |
|
342 void inc_top(size_t word_size) { _top += word_size; } |
|
343 |
|
344 uintx container_count() { return _container_count; } |
|
345 void inc_container_count(); |
|
346 void dec_container_count(); |
|
347 #ifdef ASSERT |
|
348 uint container_count_slow(); |
|
349 void verify_container_count(); |
|
350 #endif |
|
351 |
|
352 // used and capacity in this single entry in the list |
|
353 size_t used_words_in_vs() const; |
|
354 size_t capacity_words_in_vs() const; |
|
355 |
|
356 bool initialize(); |
|
357 |
|
358 // get space from the virtual space |
|
359 Metachunk* take_from_committed(size_t chunk_word_size); |
|
360 |
|
361 // Allocate a chunk from the virtual space and return it. |
|
362 Metachunk* get_chunk_vs(size_t chunk_word_size); |
|
363 |
|
364 // Expands/shrinks the committed space in a virtual space. Delegates |
|
365 // to Virtualspace |
|
366 bool expand_by(size_t min_words, size_t preferred_words); |
|
367 |
|
368 // In preparation for deleting this node, remove all the chunks |
|
369 // in the node from any freelist. |
|
370 void purge(ChunkManager* chunk_manager); |
|
371 |
|
372 // If an allocation doesn't fit in the current node a new node is created. |
|
373 // Allocate chunks out of the remaining committed space in this node |
|
374 // to avoid wasting that memory. |
|
375 // This always adds up because all the chunk sizes are multiples of |
|
376 // the smallest chunk size. |
|
377 void retire(ChunkManager* chunk_manager); |
|
378 |
|
379 #ifdef ASSERT |
|
380 // Debug support |
|
381 void mangle(); |
|
382 #endif |
|
383 |
|
384 void print_on(outputStream* st) const; |
|
385 }; |
|
386 |
|
387 #define assert_is_ptr_aligned(ptr, alignment) \ |
|
388 assert(is_ptr_aligned(ptr, alignment), \ |
|
389 err_msg(PTR_FORMAT " is not aligned to " \ |
|
390 SIZE_FORMAT, ptr, alignment)) |
|
391 |
|
392 #define assert_is_size_aligned(size, alignment) \ |
|
393 assert(is_size_aligned(size, alignment), \ |
|
394 err_msg(SIZE_FORMAT " is not aligned to " \ |
|
395 SIZE_FORMAT, size, alignment)) |
|
396 |
|
397 |
|
398 // Decide if large pages should be committed when the memory is reserved. |
|
399 static bool should_commit_large_pages_when_reserving(size_t bytes) { |
|
400 if (UseLargePages && UseLargePagesInMetaspace && !os::can_commit_large_page_memory()) { |
|
401 size_t words = bytes / BytesPerWord; |
|
402 bool is_class = false; // We never reserve large pages for the class space. |
|
403 if (MetaspaceGC::can_expand(words, is_class) && |
|
404 MetaspaceGC::allowed_expansion() >= words) { |
|
405 return true; |
|
406 } |
|
407 } |
|
408 |
|
409 return false; |
|
410 } |
|
411 |
|
412 // byte_size is the size of the associated virtualspace. |
|
413 VirtualSpaceNode::VirtualSpaceNode(size_t bytes) : _top(NULL), _next(NULL), _rs(), _container_count(0) { |
|
414 assert_is_size_aligned(bytes, Metaspace::reserve_alignment()); |
|
415 |
|
416 // This allocates memory with mmap. For DumpSharedspaces, try to reserve |
|
417 // configurable address, generally at the top of the Java heap so other |
|
418 // memory addresses don't conflict. |
|
419 if (DumpSharedSpaces) { |
|
420 bool large_pages = false; // No large pages when dumping the CDS archive. |
|
421 char* shared_base = (char*)align_ptr_up((char*)SharedBaseAddress, Metaspace::reserve_alignment()); |
|
422 |
|
423 _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages, shared_base, 0); |
|
424 if (_rs.is_reserved()) { |
|
425 assert(shared_base == 0 || _rs.base() == shared_base, "should match"); |
|
426 } else { |
|
427 // Get a mmap region anywhere if the SharedBaseAddress fails. |
|
428 _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages); |
|
429 } |
|
430 MetaspaceShared::set_shared_rs(&_rs); |
|
431 } else { |
|
432 bool large_pages = should_commit_large_pages_when_reserving(bytes); |
|
433 |
|
434 _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages); |
|
435 } |
|
436 |
|
437 if (_rs.is_reserved()) { |
|
438 assert(_rs.base() != NULL, "Catch if we get a NULL address"); |
|
439 assert(_rs.size() != 0, "Catch if we get a 0 size"); |
|
440 assert_is_ptr_aligned(_rs.base(), Metaspace::reserve_alignment()); |
|
441 assert_is_size_aligned(_rs.size(), Metaspace::reserve_alignment()); |
|
442 |
|
443 MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass); |
|
444 } |
|
445 } |
|
446 |
|
447 void VirtualSpaceNode::purge(ChunkManager* chunk_manager) { |
|
448 Metachunk* chunk = first_chunk(); |
|
449 Metachunk* invalid_chunk = (Metachunk*) top(); |
|
450 while (chunk < invalid_chunk ) { |
|
451 assert(chunk->is_tagged_free(), "Should be tagged free"); |
|
452 MetaWord* next = ((MetaWord*)chunk) + chunk->word_size(); |
|
453 chunk_manager->remove_chunk(chunk); |
|
454 assert(chunk->next() == NULL && |
|
455 chunk->prev() == NULL, |
|
456 "Was not removed from its list"); |
|
457 chunk = (Metachunk*) next; |
|
458 } |
|
459 } |
|
460 |
|
461 #ifdef ASSERT |
|
462 uint VirtualSpaceNode::container_count_slow() { |
|
463 uint count = 0; |
|
464 Metachunk* chunk = first_chunk(); |
|
465 Metachunk* invalid_chunk = (Metachunk*) top(); |
|
466 while (chunk < invalid_chunk ) { |
|
467 MetaWord* next = ((MetaWord*)chunk) + chunk->word_size(); |
|
468 // Don't count the chunks on the free lists. Those are |
|
469 // still part of the VirtualSpaceNode but not currently |
|
470 // counted. |
|
471 if (!chunk->is_tagged_free()) { |
|
472 count++; |
|
473 } |
|
474 chunk = (Metachunk*) next; |
|
475 } |
|
476 return count; |
|
477 } |
|
478 #endif |
|
479 |
|
480 // List of VirtualSpaces for metadata allocation. |
|
481 class VirtualSpaceList : public CHeapObj<mtClass> { |
|
482 friend class VirtualSpaceNode; |
|
483 |
|
484 enum VirtualSpaceSizes { |
|
485 VirtualSpaceSize = 256 * K |
|
486 }; |
|
487 |
|
488 // Head of the list |
|
489 VirtualSpaceNode* _virtual_space_list; |
|
490 // virtual space currently being used for allocations |
|
491 VirtualSpaceNode* _current_virtual_space; |
|
492 |
|
493 // Is this VirtualSpaceList used for the compressed class space |
|
494 bool _is_class; |
|
495 |
|
496 // Sum of reserved and committed memory in the virtual spaces |
|
497 size_t _reserved_words; |
|
498 size_t _committed_words; |
|
499 |
|
500 // Number of virtual spaces |
|
501 size_t _virtual_space_count; |
|
502 |
|
503 ~VirtualSpaceList(); |
|
504 |
|
505 VirtualSpaceNode* virtual_space_list() const { return _virtual_space_list; } |
|
506 |
|
507 void set_virtual_space_list(VirtualSpaceNode* v) { |
|
508 _virtual_space_list = v; |
|
509 } |
|
510 void set_current_virtual_space(VirtualSpaceNode* v) { |
|
511 _current_virtual_space = v; |
|
512 } |
|
513 |
|
514 void link_vs(VirtualSpaceNode* new_entry); |
|
515 |
|
516 // Get another virtual space and add it to the list. This |
|
517 // is typically prompted by a failed attempt to allocate a chunk |
|
518 // and is typically followed by the allocation of a chunk. |
|
519 bool create_new_virtual_space(size_t vs_word_size); |
|
520 |
|
521 // Chunk up the unused committed space in the current |
|
522 // virtual space and add the chunks to the free list. |
|
523 void retire_current_virtual_space(); |
|
524 |
|
525 public: |
|
526 VirtualSpaceList(size_t word_size); |
|
527 VirtualSpaceList(ReservedSpace rs); |
|
528 |
|
529 size_t free_bytes(); |
|
530 |
|
531 Metachunk* get_new_chunk(size_t word_size, |
|
532 size_t grow_chunks_by_words, |
|
533 size_t medium_chunk_bunch); |
|
534 |
|
535 bool expand_node_by(VirtualSpaceNode* node, |
|
536 size_t min_words, |
|
537 size_t preferred_words); |
|
538 |
|
539 bool expand_by(size_t min_words, |
|
540 size_t preferred_words); |
|
541 |
|
542 VirtualSpaceNode* current_virtual_space() { |
|
543 return _current_virtual_space; |
|
544 } |
|
545 |
|
546 bool is_class() const { return _is_class; } |
|
547 |
|
548 bool initialization_succeeded() { return _virtual_space_list != NULL; } |
|
549 |
|
550 size_t reserved_words() { return _reserved_words; } |
|
551 size_t reserved_bytes() { return reserved_words() * BytesPerWord; } |
|
552 size_t committed_words() { return _committed_words; } |
|
553 size_t committed_bytes() { return committed_words() * BytesPerWord; } |
|
554 |
|
555 void inc_reserved_words(size_t v); |
|
556 void dec_reserved_words(size_t v); |
|
557 void inc_committed_words(size_t v); |
|
558 void dec_committed_words(size_t v); |
|
559 void inc_virtual_space_count(); |
|
560 void dec_virtual_space_count(); |
|
561 |
|
562 bool contains(const void* ptr); |
|
563 |
|
564 // Unlink empty VirtualSpaceNodes and free it. |
|
565 void purge(ChunkManager* chunk_manager); |
|
566 |
|
567 void print_on(outputStream* st) const; |
|
568 |
|
569 class VirtualSpaceListIterator : public StackObj { |
|
570 VirtualSpaceNode* _virtual_spaces; |
|
571 public: |
|
572 VirtualSpaceListIterator(VirtualSpaceNode* virtual_spaces) : |
|
573 _virtual_spaces(virtual_spaces) {} |
|
574 |
|
575 bool repeat() { |
|
576 return _virtual_spaces != NULL; |
|
577 } |
|
578 |
|
579 VirtualSpaceNode* get_next() { |
|
580 VirtualSpaceNode* result = _virtual_spaces; |
|
581 if (_virtual_spaces != NULL) { |
|
582 _virtual_spaces = _virtual_spaces->next(); |
|
583 } |
|
584 return result; |
|
585 } |
|
586 }; |
|
587 }; |
|
588 |
|
589 class Metadebug : AllStatic { |
|
590 // Debugging support for Metaspaces |
|
591 static int _allocation_fail_alot_count; |
|
592 |
|
593 public: |
|
594 |
|
595 static void init_allocation_fail_alot_count(); |
|
596 #ifdef ASSERT |
|
597 static bool test_metadata_failure(); |
|
598 #endif |
|
599 }; |
|
600 |
|
601 int Metadebug::_allocation_fail_alot_count = 0; |
|
602 |
|
603 // SpaceManager - used by Metaspace to handle allocations |
|
604 class SpaceManager : public CHeapObj<mtClass> { |
|
605 friend class Metaspace; |
|
606 friend class Metadebug; |
|
607 |
|
608 private: |
|
609 |
|
610 // protects allocations |
|
611 Mutex* const _lock; |
|
612 |
|
613 // Type of metadata allocated. |
|
614 Metaspace::MetadataType _mdtype; |
|
615 |
|
616 // List of chunks in use by this SpaceManager. Allocations |
|
617 // are done from the current chunk. The list is used for deallocating |
|
618 // chunks when the SpaceManager is freed. |
|
619 Metachunk* _chunks_in_use[NumberOfInUseLists]; |
|
620 Metachunk* _current_chunk; |
|
621 |
|
622 // Number of small chunks to allocate to a manager |
|
623 // If class space manager, small chunks are unlimited |
|
624 static uint const _small_chunk_limit; |
|
625 |
|
626 // Sum of all space in allocated chunks |
|
627 size_t _allocated_blocks_words; |
|
628 |
|
629 // Sum of all allocated chunks |
|
630 size_t _allocated_chunks_words; |
|
631 size_t _allocated_chunks_count; |
|
632 |
|
633 // Free lists of blocks are per SpaceManager since they |
|
634 // are assumed to be in chunks in use by the SpaceManager |
|
635 // and all chunks in use by a SpaceManager are freed when |
|
636 // the class loader using the SpaceManager is collected. |
|
637 BlockFreelist _block_freelists; |
|
638 |
|
639 // protects virtualspace and chunk expansions |
|
640 static const char* _expand_lock_name; |
|
641 static const int _expand_lock_rank; |
|
642 static Mutex* const _expand_lock; |
|
643 |
|
644 private: |
|
645 // Accessors |
|
646 Metachunk* chunks_in_use(ChunkIndex index) const { return _chunks_in_use[index]; } |
|
647 void set_chunks_in_use(ChunkIndex index, Metachunk* v) { |
|
648 _chunks_in_use[index] = v; |
|
649 } |
|
650 |
|
651 BlockFreelist* block_freelists() const { |
|
652 return (BlockFreelist*) &_block_freelists; |
|
653 } |
|
654 |
|
655 Metaspace::MetadataType mdtype() { return _mdtype; } |
|
656 |
|
657 VirtualSpaceList* vs_list() const { return Metaspace::get_space_list(_mdtype); } |
|
658 ChunkManager* chunk_manager() const { return Metaspace::get_chunk_manager(_mdtype); } |
|
659 |
|
660 Metachunk* current_chunk() const { return _current_chunk; } |
|
661 void set_current_chunk(Metachunk* v) { |
|
662 _current_chunk = v; |
|
663 } |
|
664 |
|
665 Metachunk* find_current_chunk(size_t word_size); |
|
666 |
|
667 // Add chunk to the list of chunks in use |
|
668 void add_chunk(Metachunk* v, bool make_current); |
|
669 void retire_current_chunk(); |
|
670 |
|
671 Mutex* lock() const { return _lock; } |
|
672 |
|
673 const char* chunk_size_name(ChunkIndex index) const; |
|
674 |
|
675 protected: |
|
676 void initialize(); |
|
677 |
|
678 public: |
|
679 SpaceManager(Metaspace::MetadataType mdtype, |
|
680 Mutex* lock); |
|
681 ~SpaceManager(); |
|
682 |
|
683 enum ChunkMultiples { |
|
684 MediumChunkMultiple = 4 |
|
685 }; |
|
686 |
|
687 bool is_class() { return _mdtype == Metaspace::ClassType; } |
|
688 |
|
689 // Accessors |
|
690 size_t specialized_chunk_size() { return (size_t) is_class() ? ClassSpecializedChunk : SpecializedChunk; } |
|
691 size_t small_chunk_size() { return (size_t) is_class() ? ClassSmallChunk : SmallChunk; } |
|
692 size_t medium_chunk_size() { return (size_t) is_class() ? ClassMediumChunk : MediumChunk; } |
|
693 size_t medium_chunk_bunch() { return medium_chunk_size() * MediumChunkMultiple; } |
|
694 |
|
695 size_t smallest_chunk_size() { return specialized_chunk_size(); } |
|
696 |
|
697 size_t allocated_blocks_words() const { return _allocated_blocks_words; } |
|
698 size_t allocated_blocks_bytes() const { return _allocated_blocks_words * BytesPerWord; } |
|
699 size_t allocated_chunks_words() const { return _allocated_chunks_words; } |
|
700 size_t allocated_chunks_count() const { return _allocated_chunks_count; } |
|
701 |
|
702 bool is_humongous(size_t word_size) { return word_size > medium_chunk_size(); } |
|
703 |
|
704 static Mutex* expand_lock() { return _expand_lock; } |
|
705 |
|
706 // Increment the per Metaspace and global running sums for Metachunks |
|
707 // by the given size. This is used when a Metachunk to added to |
|
708 // the in-use list. |
|
709 void inc_size_metrics(size_t words); |
|
710 // Increment the per Metaspace and global running sums Metablocks by the given |
|
711 // size. This is used when a Metablock is allocated. |
|
712 void inc_used_metrics(size_t words); |
|
713 // Delete the portion of the running sums for this SpaceManager. That is, |
|
714 // the globals running sums for the Metachunks and Metablocks are |
|
715 // decremented for all the Metachunks in-use by this SpaceManager. |
|
716 void dec_total_from_size_metrics(); |
|
717 |
|
718 // Set the sizes for the initial chunks. |
|
719 void get_initial_chunk_sizes(Metaspace::MetaspaceType type, |
|
720 size_t* chunk_word_size, |
|
721 size_t* class_chunk_word_size); |
|
722 |
|
723 size_t sum_capacity_in_chunks_in_use() const; |
|
724 size_t sum_used_in_chunks_in_use() const; |
|
725 size_t sum_free_in_chunks_in_use() const; |
|
726 size_t sum_waste_in_chunks_in_use() const; |
|
727 size_t sum_waste_in_chunks_in_use(ChunkIndex index ) const; |
|
728 |
|
729 size_t sum_count_in_chunks_in_use(); |
|
730 size_t sum_count_in_chunks_in_use(ChunkIndex i); |
|
731 |
|
732 Metachunk* get_new_chunk(size_t word_size, size_t grow_chunks_by_words); |
|
733 |
|
734 // Block allocation and deallocation. |
|
735 // Allocates a block from the current chunk |
|
736 MetaWord* allocate(size_t word_size); |
|
737 |
|
738 // Helper for allocations |
|
739 MetaWord* allocate_work(size_t word_size); |
|
740 |
|
741 // Returns a block to the per manager freelist |
|
742 void deallocate(MetaWord* p, size_t word_size); |
|
743 |
|
744 // Based on the allocation size and a minimum chunk size, |
|
745 // returned chunk size (for expanding space for chunk allocation). |
|
746 size_t calc_chunk_size(size_t allocation_word_size); |
|
747 |
|
748 // Called when an allocation from the current chunk fails. |
|
749 // Gets a new chunk (may require getting a new virtual space), |
|
750 // and allocates from that chunk. |
|
751 MetaWord* grow_and_allocate(size_t word_size); |
|
752 |
|
753 // Notify memory usage to MemoryService. |
|
754 void track_metaspace_memory_usage(); |
|
755 |
|
756 // debugging support. |
|
757 |
|
758 void dump(outputStream* const out) const; |
|
759 void print_on(outputStream* st) const; |
|
760 void locked_print_chunks_in_use_on(outputStream* st) const; |
|
761 |
|
762 void verify(); |
|
763 void verify_chunk_size(Metachunk* chunk); |
|
764 NOT_PRODUCT(void mangle_freed_chunks();) |
|
765 #ifdef ASSERT |
|
766 void verify_allocated_blocks_words(); |
|
767 #endif |
|
768 |
|
769 size_t get_raw_word_size(size_t word_size) { |
|
770 size_t byte_size = word_size * BytesPerWord; |
|
771 |
|
772 size_t raw_bytes_size = MAX2(byte_size, sizeof(Metablock)); |
|
773 raw_bytes_size = align_size_up(raw_bytes_size, Metachunk::object_alignment()); |
|
774 |
|
775 size_t raw_word_size = raw_bytes_size / BytesPerWord; |
|
776 assert(raw_word_size * BytesPerWord == raw_bytes_size, "Size problem"); |
|
777 |
|
778 return raw_word_size; |
|
779 } |
|
780 }; |
|
781 |
|
782 uint const SpaceManager::_small_chunk_limit = 4; |
|
783 |
|
784 const char* SpaceManager::_expand_lock_name = |
|
785 "SpaceManager chunk allocation lock"; |
|
786 const int SpaceManager::_expand_lock_rank = Monitor::leaf - 1; |
|
787 Mutex* const SpaceManager::_expand_lock = |
|
788 new Mutex(SpaceManager::_expand_lock_rank, |
|
789 SpaceManager::_expand_lock_name, |
|
790 Mutex::_allow_vm_block_flag); |
|
791 |
|
792 void VirtualSpaceNode::inc_container_count() { |
|
793 assert_lock_strong(SpaceManager::expand_lock()); |
|
794 _container_count++; |
|
795 assert(_container_count == container_count_slow(), |
|
796 err_msg("Inconsistency in countainer_count _container_count " SIZE_FORMAT |
|
797 " container_count_slow() " SIZE_FORMAT, |
|
798 _container_count, container_count_slow())); |
|
799 } |
|
800 |
|
801 void VirtualSpaceNode::dec_container_count() { |
|
802 assert_lock_strong(SpaceManager::expand_lock()); |
|
803 _container_count--; |
|
804 } |
|
805 |
|
806 #ifdef ASSERT |
|
807 void VirtualSpaceNode::verify_container_count() { |
|
808 assert(_container_count == container_count_slow(), |
|
809 err_msg("Inconsistency in countainer_count _container_count " SIZE_FORMAT |
|
810 " container_count_slow() " SIZE_FORMAT, _container_count, container_count_slow())); |
|
811 } |
|
812 #endif |
|
813 |
|
814 // BlockFreelist methods |
|
815 |
|
816 BlockFreelist::BlockFreelist() : _dictionary(NULL) {} |
|
817 |
|
818 BlockFreelist::~BlockFreelist() { |
|
819 if (_dictionary != NULL) { |
|
820 if (Verbose && TraceMetadataChunkAllocation) { |
|
821 _dictionary->print_free_lists(gclog_or_tty); |
|
822 } |
|
823 delete _dictionary; |
|
824 } |
|
825 } |
|
826 |
|
827 void BlockFreelist::return_block(MetaWord* p, size_t word_size) { |
|
828 Metablock* free_chunk = ::new (p) Metablock(word_size); |
|
829 if (dictionary() == NULL) { |
|
830 _dictionary = new BlockTreeDictionary(); |
|
831 } |
|
832 dictionary()->return_chunk(free_chunk); |
|
833 } |
|
834 |
|
835 MetaWord* BlockFreelist::get_block(size_t word_size) { |
|
836 if (dictionary() == NULL) { |
|
837 return NULL; |
|
838 } |
|
839 |
|
840 if (word_size < TreeChunk<Metablock, FreeList<Metablock> >::min_size()) { |
|
841 // Dark matter. Too small for dictionary. |
|
842 return NULL; |
|
843 } |
|
844 |
|
845 Metablock* free_block = |
|
846 dictionary()->get_chunk(word_size, FreeBlockDictionary<Metablock>::atLeast); |
|
847 if (free_block == NULL) { |
|
848 return NULL; |
|
849 } |
|
850 |
|
851 const size_t block_size = free_block->size(); |
|
852 if (block_size > WasteMultiplier * word_size) { |
|
853 return_block((MetaWord*)free_block, block_size); |
|
854 return NULL; |
|
855 } |
|
856 |
|
857 MetaWord* new_block = (MetaWord*)free_block; |
|
858 assert(block_size >= word_size, "Incorrect size of block from freelist"); |
|
859 const size_t unused = block_size - word_size; |
|
860 if (unused >= TreeChunk<Metablock, FreeList<Metablock> >::min_size()) { |
|
861 return_block(new_block + word_size, unused); |
|
862 } |
|
863 |
|
864 return new_block; |
|
865 } |
|
866 |
|
867 void BlockFreelist::print_on(outputStream* st) const { |
|
868 if (dictionary() == NULL) { |
|
869 return; |
|
870 } |
|
871 dictionary()->print_free_lists(st); |
|
872 } |
|
873 |
|
874 // VirtualSpaceNode methods |
|
875 |
|
876 VirtualSpaceNode::~VirtualSpaceNode() { |
|
877 _rs.release(); |
|
878 #ifdef ASSERT |
|
879 size_t word_size = sizeof(*this) / BytesPerWord; |
|
880 Copy::fill_to_words((HeapWord*) this, word_size, 0xf1f1f1f1); |
|
881 #endif |
|
882 } |
|
883 |
|
884 size_t VirtualSpaceNode::used_words_in_vs() const { |
|
885 return pointer_delta(top(), bottom(), sizeof(MetaWord)); |
|
886 } |
|
887 |
|
888 // Space committed in the VirtualSpace |
|
889 size_t VirtualSpaceNode::capacity_words_in_vs() const { |
|
890 return pointer_delta(end(), bottom(), sizeof(MetaWord)); |
|
891 } |
|
892 |
|
893 size_t VirtualSpaceNode::free_words_in_vs() const { |
|
894 return pointer_delta(end(), top(), sizeof(MetaWord)); |
|
895 } |
|
896 |
|
897 // Allocates the chunk from the virtual space only. |
|
898 // This interface is also used internally for debugging. Not all |
|
899 // chunks removed here are necessarily used for allocation. |
|
900 Metachunk* VirtualSpaceNode::take_from_committed(size_t chunk_word_size) { |
|
901 // Bottom of the new chunk |
|
902 MetaWord* chunk_limit = top(); |
|
903 assert(chunk_limit != NULL, "Not safe to call this method"); |
|
904 |
|
905 // The virtual spaces are always expanded by the |
|
906 // commit granularity to enforce the following condition. |
|
907 // Without this the is_available check will not work correctly. |
|
908 assert(_virtual_space.committed_size() == _virtual_space.actual_committed_size(), |
|
909 "The committed memory doesn't match the expanded memory."); |
|
910 |
|
911 if (!is_available(chunk_word_size)) { |
|
912 if (TraceMetadataChunkAllocation) { |
|
913 gclog_or_tty->print("VirtualSpaceNode::take_from_committed() not available %d words ", chunk_word_size); |
|
914 // Dump some information about the virtual space that is nearly full |
|
915 print_on(gclog_or_tty); |
|
916 } |
|
917 return NULL; |
|
918 } |
|
919 |
|
920 // Take the space (bump top on the current virtual space). |
|
921 inc_top(chunk_word_size); |
|
922 |
|
923 // Initialize the chunk |
|
924 Metachunk* result = ::new (chunk_limit) Metachunk(chunk_word_size, this); |
|
925 return result; |
|
926 } |
|
927 |
|
928 |
|
929 // Expand the virtual space (commit more of the reserved space) |
|
930 bool VirtualSpaceNode::expand_by(size_t min_words, size_t preferred_words) { |
|
931 size_t min_bytes = min_words * BytesPerWord; |
|
932 size_t preferred_bytes = preferred_words * BytesPerWord; |
|
933 |
|
934 size_t uncommitted = virtual_space()->reserved_size() - virtual_space()->actual_committed_size(); |
|
935 |
|
936 if (uncommitted < min_bytes) { |
|
937 return false; |
|
938 } |
|
939 |
|
940 size_t commit = MIN2(preferred_bytes, uncommitted); |
|
941 bool result = virtual_space()->expand_by(commit, false); |
|
942 |
|
943 assert(result, "Failed to commit memory"); |
|
944 |
|
945 return result; |
|
946 } |
|
947 |
|
948 Metachunk* VirtualSpaceNode::get_chunk_vs(size_t chunk_word_size) { |
|
949 assert_lock_strong(SpaceManager::expand_lock()); |
|
950 Metachunk* result = take_from_committed(chunk_word_size); |
|
951 if (result != NULL) { |
|
952 inc_container_count(); |
|
953 } |
|
954 return result; |
|
955 } |
|
956 |
|
957 bool VirtualSpaceNode::initialize() { |
|
958 |
|
959 if (!_rs.is_reserved()) { |
|
960 return false; |
|
961 } |
|
962 |
|
963 // These are necessary restriction to make sure that the virtual space always |
|
964 // grows in steps of Metaspace::commit_alignment(). If both base and size are |
|
965 // aligned only the middle alignment of the VirtualSpace is used. |
|
966 assert_is_ptr_aligned(_rs.base(), Metaspace::commit_alignment()); |
|
967 assert_is_size_aligned(_rs.size(), Metaspace::commit_alignment()); |
|
968 |
|
969 // ReservedSpaces marked as special will have the entire memory |
|
970 // pre-committed. Setting a committed size will make sure that |
|
971 // committed_size and actual_committed_size agrees. |
|
972 size_t pre_committed_size = _rs.special() ? _rs.size() : 0; |
|
973 |
|
974 bool result = virtual_space()->initialize_with_granularity(_rs, pre_committed_size, |
|
975 Metaspace::commit_alignment()); |
|
976 if (result) { |
|
977 assert(virtual_space()->committed_size() == virtual_space()->actual_committed_size(), |
|
978 "Checking that the pre-committed memory was registered by the VirtualSpace"); |
|
979 |
|
980 set_top((MetaWord*)virtual_space()->low()); |
|
981 set_reserved(MemRegion((HeapWord*)_rs.base(), |
|
982 (HeapWord*)(_rs.base() + _rs.size()))); |
|
983 |
|
984 assert(reserved()->start() == (HeapWord*) _rs.base(), |
|
985 err_msg("Reserved start was not set properly " PTR_FORMAT |
|
986 " != " PTR_FORMAT, reserved()->start(), _rs.base())); |
|
987 assert(reserved()->word_size() == _rs.size() / BytesPerWord, |
|
988 err_msg("Reserved size was not set properly " SIZE_FORMAT |
|
989 " != " SIZE_FORMAT, reserved()->word_size(), |
|
990 _rs.size() / BytesPerWord)); |
|
991 } |
|
992 |
|
993 return result; |
|
994 } |
|
995 |
|
996 void VirtualSpaceNode::print_on(outputStream* st) const { |
|
997 size_t used = used_words_in_vs(); |
|
998 size_t capacity = capacity_words_in_vs(); |
|
999 VirtualSpace* vs = virtual_space(); |
|
1000 st->print_cr(" space @ " PTR_FORMAT " " SIZE_FORMAT "K, %3d%% used " |
|
1001 "[" PTR_FORMAT ", " PTR_FORMAT ", " |
|
1002 PTR_FORMAT ", " PTR_FORMAT ")", |
|
1003 vs, capacity / K, |
|
1004 capacity == 0 ? 0 : used * 100 / capacity, |
|
1005 bottom(), top(), end(), |
|
1006 vs->high_boundary()); |
|
1007 } |
|
1008 |
|
1009 #ifdef ASSERT |
|
1010 void VirtualSpaceNode::mangle() { |
|
1011 size_t word_size = capacity_words_in_vs(); |
|
1012 Copy::fill_to_words((HeapWord*) low(), word_size, 0xf1f1f1f1); |
|
1013 } |
|
1014 #endif // ASSERT |
|
1015 |
|
1016 // VirtualSpaceList methods |
|
1017 // Space allocated from the VirtualSpace |
|
1018 |
|
1019 VirtualSpaceList::~VirtualSpaceList() { |
|
1020 VirtualSpaceListIterator iter(virtual_space_list()); |
|
1021 while (iter.repeat()) { |
|
1022 VirtualSpaceNode* vsl = iter.get_next(); |
|
1023 delete vsl; |
|
1024 } |
|
1025 } |
|
1026 |
|
1027 void VirtualSpaceList::inc_reserved_words(size_t v) { |
|
1028 assert_lock_strong(SpaceManager::expand_lock()); |
|
1029 _reserved_words = _reserved_words + v; |
|
1030 } |
|
1031 void VirtualSpaceList::dec_reserved_words(size_t v) { |
|
1032 assert_lock_strong(SpaceManager::expand_lock()); |
|
1033 _reserved_words = _reserved_words - v; |
|
1034 } |
|
1035 |
|
1036 #define assert_committed_below_limit() \ |
|
1037 assert(MetaspaceAux::committed_bytes() <= MaxMetaspaceSize, \ |
|
1038 err_msg("Too much committed memory. Committed: " SIZE_FORMAT \ |
|
1039 " limit (MaxMetaspaceSize): " SIZE_FORMAT, \ |
|
1040 MetaspaceAux::committed_bytes(), MaxMetaspaceSize)); |
|
1041 |
|
1042 void VirtualSpaceList::inc_committed_words(size_t v) { |
|
1043 assert_lock_strong(SpaceManager::expand_lock()); |
|
1044 _committed_words = _committed_words + v; |
|
1045 |
|
1046 assert_committed_below_limit(); |
|
1047 } |
|
1048 void VirtualSpaceList::dec_committed_words(size_t v) { |
|
1049 assert_lock_strong(SpaceManager::expand_lock()); |
|
1050 _committed_words = _committed_words - v; |
|
1051 |
|
1052 assert_committed_below_limit(); |
|
1053 } |
|
1054 |
|
1055 void VirtualSpaceList::inc_virtual_space_count() { |
|
1056 assert_lock_strong(SpaceManager::expand_lock()); |
|
1057 _virtual_space_count++; |
|
1058 } |
|
1059 void VirtualSpaceList::dec_virtual_space_count() { |
|
1060 assert_lock_strong(SpaceManager::expand_lock()); |
|
1061 _virtual_space_count--; |
|
1062 } |
|
1063 |
|
1064 void ChunkManager::remove_chunk(Metachunk* chunk) { |
|
1065 size_t word_size = chunk->word_size(); |
|
1066 ChunkIndex index = list_index(word_size); |
|
1067 if (index != HumongousIndex) { |
|
1068 free_chunks(index)->remove_chunk(chunk); |
|
1069 } else { |
|
1070 humongous_dictionary()->remove_chunk(chunk); |
|
1071 } |
|
1072 |
|
1073 // Chunk is being removed from the chunks free list. |
|
1074 dec_free_chunks_total(chunk->word_size()); |
|
1075 } |
|
1076 |
|
1077 // Walk the list of VirtualSpaceNodes and delete |
|
1078 // nodes with a 0 container_count. Remove Metachunks in |
|
1079 // the node from their respective freelists. |
|
1080 void VirtualSpaceList::purge(ChunkManager* chunk_manager) { |
|
1081 assert(SafepointSynchronize::is_at_safepoint(), "must be called at safepoint for contains to work"); |
|
1082 assert_lock_strong(SpaceManager::expand_lock()); |
|
1083 // Don't use a VirtualSpaceListIterator because this |
|
1084 // list is being changed and a straightforward use of an iterator is not safe. |
|
1085 VirtualSpaceNode* purged_vsl = NULL; |
|
1086 VirtualSpaceNode* prev_vsl = virtual_space_list(); |
|
1087 VirtualSpaceNode* next_vsl = prev_vsl; |
|
1088 while (next_vsl != NULL) { |
|
1089 VirtualSpaceNode* vsl = next_vsl; |
|
1090 next_vsl = vsl->next(); |
|
1091 // Don't free the current virtual space since it will likely |
|
1092 // be needed soon. |
|
1093 if (vsl->container_count() == 0 && vsl != current_virtual_space()) { |
|
1094 // Unlink it from the list |
|
1095 if (prev_vsl == vsl) { |
|
1096 // This is the case of the current node being the first node. |
|
1097 assert(vsl == virtual_space_list(), "Expected to be the first node"); |
|
1098 set_virtual_space_list(vsl->next()); |
|
1099 } else { |
|
1100 prev_vsl->set_next(vsl->next()); |
|
1101 } |
|
1102 |
|
1103 vsl->purge(chunk_manager); |
|
1104 dec_reserved_words(vsl->reserved_words()); |
|
1105 dec_committed_words(vsl->committed_words()); |
|
1106 dec_virtual_space_count(); |
|
1107 purged_vsl = vsl; |
|
1108 delete vsl; |
|
1109 } else { |
|
1110 prev_vsl = vsl; |
|
1111 } |
|
1112 } |
|
1113 #ifdef ASSERT |
|
1114 if (purged_vsl != NULL) { |
|
1115 // List should be stable enough to use an iterator here. |
|
1116 VirtualSpaceListIterator iter(virtual_space_list()); |
|
1117 while (iter.repeat()) { |
|
1118 VirtualSpaceNode* vsl = iter.get_next(); |
|
1119 assert(vsl != purged_vsl, "Purge of vsl failed"); |
|
1120 } |
|
1121 } |
|
1122 #endif |
|
1123 } |
|
1124 |
|
1125 |
|
1126 // This function looks at the mmap regions in the metaspace without locking. |
|
1127 // The chunks are added with store ordering and not deleted except for at |
|
1128 // unloading time during a safepoint. |
|
1129 bool VirtualSpaceList::contains(const void* ptr) { |
|
1130 // List should be stable enough to use an iterator here because removing virtual |
|
1131 // space nodes is only allowed at a safepoint. |
|
1132 VirtualSpaceListIterator iter(virtual_space_list()); |
|
1133 while (iter.repeat()) { |
|
1134 VirtualSpaceNode* vsn = iter.get_next(); |
|
1135 if (vsn->contains(ptr)) { |
|
1136 return true; |
|
1137 } |
|
1138 } |
|
1139 return false; |
|
1140 } |
|
1141 |
|
1142 void VirtualSpaceList::retire_current_virtual_space() { |
|
1143 assert_lock_strong(SpaceManager::expand_lock()); |
|
1144 |
|
1145 VirtualSpaceNode* vsn = current_virtual_space(); |
|
1146 |
|
1147 ChunkManager* cm = is_class() ? Metaspace::chunk_manager_class() : |
|
1148 Metaspace::chunk_manager_metadata(); |
|
1149 |
|
1150 vsn->retire(cm); |
|
1151 } |
|
1152 |
|
1153 void VirtualSpaceNode::retire(ChunkManager* chunk_manager) { |
|
1154 for (int i = (int)MediumIndex; i >= (int)ZeroIndex; --i) { |
|
1155 ChunkIndex index = (ChunkIndex)i; |
|
1156 size_t chunk_size = chunk_manager->free_chunks(index)->size(); |
|
1157 |
|
1158 while (free_words_in_vs() >= chunk_size) { |
|
1159 DEBUG_ONLY(verify_container_count();) |
|
1160 Metachunk* chunk = get_chunk_vs(chunk_size); |
|
1161 assert(chunk != NULL, "allocation should have been successful"); |
|
1162 |
|
1163 chunk_manager->return_chunks(index, chunk); |
|
1164 chunk_manager->inc_free_chunks_total(chunk_size); |
|
1165 DEBUG_ONLY(verify_container_count();) |
|
1166 } |
|
1167 } |
|
1168 assert(free_words_in_vs() == 0, "should be empty now"); |
|
1169 } |
|
1170 |
|
1171 VirtualSpaceList::VirtualSpaceList(size_t word_size) : |
|
1172 _is_class(false), |
|
1173 _virtual_space_list(NULL), |
|
1174 _current_virtual_space(NULL), |
|
1175 _reserved_words(0), |
|
1176 _committed_words(0), |
|
1177 _virtual_space_count(0) { |
|
1178 MutexLockerEx cl(SpaceManager::expand_lock(), |
|
1179 Mutex::_no_safepoint_check_flag); |
|
1180 create_new_virtual_space(word_size); |
|
1181 } |
|
1182 |
|
1183 VirtualSpaceList::VirtualSpaceList(ReservedSpace rs) : |
|
1184 _is_class(true), |
|
1185 _virtual_space_list(NULL), |
|
1186 _current_virtual_space(NULL), |
|
1187 _reserved_words(0), |
|
1188 _committed_words(0), |
|
1189 _virtual_space_count(0) { |
|
1190 MutexLockerEx cl(SpaceManager::expand_lock(), |
|
1191 Mutex::_no_safepoint_check_flag); |
|
1192 VirtualSpaceNode* class_entry = new VirtualSpaceNode(rs); |
|
1193 bool succeeded = class_entry->initialize(); |
|
1194 if (succeeded) { |
|
1195 link_vs(class_entry); |
|
1196 } |
|
1197 } |
|
1198 |
|
1199 size_t VirtualSpaceList::free_bytes() { |
|
1200 return virtual_space_list()->free_words_in_vs() * BytesPerWord; |
|
1201 } |
|
1202 |
|
1203 // Allocate another meta virtual space and add it to the list. |
|
1204 bool VirtualSpaceList::create_new_virtual_space(size_t vs_word_size) { |
|
1205 assert_lock_strong(SpaceManager::expand_lock()); |
|
1206 |
|
1207 if (is_class()) { |
|
1208 assert(false, "We currently don't support more than one VirtualSpace for" |
|
1209 " the compressed class space. The initialization of the" |
|
1210 " CCS uses another code path and should not hit this path."); |
|
1211 return false; |
|
1212 } |
|
1213 |
|
1214 if (vs_word_size == 0) { |
|
1215 assert(false, "vs_word_size should always be at least _reserve_alignment large."); |
|
1216 return false; |
|
1217 } |
|
1218 |
|
1219 // Reserve the space |
|
1220 size_t vs_byte_size = vs_word_size * BytesPerWord; |
|
1221 assert_is_size_aligned(vs_byte_size, Metaspace::reserve_alignment()); |
|
1222 |
|
1223 // Allocate the meta virtual space and initialize it. |
|
1224 VirtualSpaceNode* new_entry = new VirtualSpaceNode(vs_byte_size); |
|
1225 if (!new_entry->initialize()) { |
|
1226 delete new_entry; |
|
1227 return false; |
|
1228 } else { |
|
1229 assert(new_entry->reserved_words() == vs_word_size, |
|
1230 "Reserved memory size differs from requested memory size"); |
|
1231 // ensure lock-free iteration sees fully initialized node |
|
1232 OrderAccess::storestore(); |
|
1233 link_vs(new_entry); |
|
1234 return true; |
|
1235 } |
|
1236 } |
|
1237 |
|
1238 void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry) { |
|
1239 if (virtual_space_list() == NULL) { |
|
1240 set_virtual_space_list(new_entry); |
|
1241 } else { |
|
1242 current_virtual_space()->set_next(new_entry); |
|
1243 } |
|
1244 set_current_virtual_space(new_entry); |
|
1245 inc_reserved_words(new_entry->reserved_words()); |
|
1246 inc_committed_words(new_entry->committed_words()); |
|
1247 inc_virtual_space_count(); |
|
1248 #ifdef ASSERT |
|
1249 new_entry->mangle(); |
|
1250 #endif |
|
1251 if (TraceMetavirtualspaceAllocation && Verbose) { |
|
1252 VirtualSpaceNode* vsl = current_virtual_space(); |
|
1253 vsl->print_on(gclog_or_tty); |
|
1254 } |
|
1255 } |
|
1256 |
|
1257 bool VirtualSpaceList::expand_node_by(VirtualSpaceNode* node, |
|
1258 size_t min_words, |
|
1259 size_t preferred_words) { |
|
1260 size_t before = node->committed_words(); |
|
1261 |
|
1262 bool result = node->expand_by(min_words, preferred_words); |
|
1263 |
|
1264 size_t after = node->committed_words(); |
|
1265 |
|
1266 // after and before can be the same if the memory was pre-committed. |
|
1267 assert(after >= before, "Inconsistency"); |
|
1268 inc_committed_words(after - before); |
|
1269 |
|
1270 return result; |
|
1271 } |
|
1272 |
|
1273 bool VirtualSpaceList::expand_by(size_t min_words, size_t preferred_words) { |
|
1274 assert_is_size_aligned(min_words, Metaspace::commit_alignment_words()); |
|
1275 assert_is_size_aligned(preferred_words, Metaspace::commit_alignment_words()); |
|
1276 assert(min_words <= preferred_words, "Invalid arguments"); |
|
1277 |
|
1278 if (!MetaspaceGC::can_expand(min_words, this->is_class())) { |
|
1279 return false; |
|
1280 } |
|
1281 |
|
1282 size_t allowed_expansion_words = MetaspaceGC::allowed_expansion(); |
|
1283 if (allowed_expansion_words < min_words) { |
|
1284 return false; |
|
1285 } |
|
1286 |
|
1287 size_t max_expansion_words = MIN2(preferred_words, allowed_expansion_words); |
|
1288 |
|
1289 // Commit more memory from the the current virtual space. |
|
1290 bool vs_expanded = expand_node_by(current_virtual_space(), |
|
1291 min_words, |
|
1292 max_expansion_words); |
|
1293 if (vs_expanded) { |
|
1294 return true; |
|
1295 } |
|
1296 retire_current_virtual_space(); |
|
1297 |
|
1298 // Get another virtual space. |
|
1299 size_t grow_vs_words = MAX2((size_t)VirtualSpaceSize, preferred_words); |
|
1300 grow_vs_words = align_size_up(grow_vs_words, Metaspace::reserve_alignment_words()); |
|
1301 |
|
1302 if (create_new_virtual_space(grow_vs_words)) { |
|
1303 if (current_virtual_space()->is_pre_committed()) { |
|
1304 // The memory was pre-committed, so we are done here. |
|
1305 assert(min_words <= current_virtual_space()->committed_words(), |
|
1306 "The new VirtualSpace was pre-committed, so it" |
|
1307 "should be large enough to fit the alloc request."); |
|
1308 return true; |
|
1309 } |
|
1310 |
|
1311 return expand_node_by(current_virtual_space(), |
|
1312 min_words, |
|
1313 max_expansion_words); |
|
1314 } |
|
1315 |
|
1316 return false; |
|
1317 } |
|
1318 |
|
1319 Metachunk* VirtualSpaceList::get_new_chunk(size_t word_size, |
|
1320 size_t grow_chunks_by_words, |
|
1321 size_t medium_chunk_bunch) { |
|
1322 |
|
1323 // Allocate a chunk out of the current virtual space. |
|
1324 Metachunk* next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words); |
|
1325 |
|
1326 if (next != NULL) { |
|
1327 return next; |
|
1328 } |
|
1329 |
|
1330 // The expand amount is currently only determined by the requested sizes |
|
1331 // and not how much committed memory is left in the current virtual space. |
|
1332 |
|
1333 size_t min_word_size = align_size_up(grow_chunks_by_words, Metaspace::commit_alignment_words()); |
|
1334 size_t preferred_word_size = align_size_up(medium_chunk_bunch, Metaspace::commit_alignment_words()); |
|
1335 if (min_word_size >= preferred_word_size) { |
|
1336 // Can happen when humongous chunks are allocated. |
|
1337 preferred_word_size = min_word_size; |
|
1338 } |
|
1339 |
|
1340 bool expanded = expand_by(min_word_size, preferred_word_size); |
|
1341 if (expanded) { |
|
1342 next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words); |
|
1343 assert(next != NULL, "The allocation was expected to succeed after the expansion"); |
|
1344 } |
|
1345 |
|
1346 return next; |
|
1347 } |
|
1348 |
|
1349 void VirtualSpaceList::print_on(outputStream* st) const { |
|
1350 if (TraceMetadataChunkAllocation && Verbose) { |
|
1351 VirtualSpaceListIterator iter(virtual_space_list()); |
|
1352 while (iter.repeat()) { |
|
1353 VirtualSpaceNode* node = iter.get_next(); |
|
1354 node->print_on(st); |
|
1355 } |
|
1356 } |
|
1357 } |
|
1358 |
|
1359 // MetaspaceGC methods |
|
1360 |
|
1361 // VM_CollectForMetadataAllocation is the vm operation used to GC. |
|
1362 // Within the VM operation after the GC the attempt to allocate the metadata |
|
1363 // should succeed. If the GC did not free enough space for the metaspace |
|
1364 // allocation, the HWM is increased so that another virtualspace will be |
|
1365 // allocated for the metadata. With perm gen the increase in the perm |
|
1366 // gen had bounds, MinMetaspaceExpansion and MaxMetaspaceExpansion. The |
|
1367 // metaspace policy uses those as the small and large steps for the HWM. |
|
1368 // |
|
1369 // After the GC the compute_new_size() for MetaspaceGC is called to |
|
1370 // resize the capacity of the metaspaces. The current implementation |
|
1371 // is based on the flags MinMetaspaceFreeRatio and MaxMetaspaceFreeRatio used |
|
1372 // to resize the Java heap by some GC's. New flags can be implemented |
|
1373 // if really needed. MinMetaspaceFreeRatio is used to calculate how much |
|
1374 // free space is desirable in the metaspace capacity to decide how much |
|
1375 // to increase the HWM. MaxMetaspaceFreeRatio is used to decide how much |
|
1376 // free space is desirable in the metaspace capacity before decreasing |
|
1377 // the HWM. |
|
1378 |
|
1379 // Calculate the amount to increase the high water mark (HWM). |
|
1380 // Increase by a minimum amount (MinMetaspaceExpansion) so that |
|
1381 // another expansion is not requested too soon. If that is not |
|
1382 // enough to satisfy the allocation, increase by MaxMetaspaceExpansion. |
|
1383 // If that is still not enough, expand by the size of the allocation |
|
1384 // plus some. |
|
1385 size_t MetaspaceGC::delta_capacity_until_GC(size_t bytes) { |
|
1386 size_t min_delta = MinMetaspaceExpansion; |
|
1387 size_t max_delta = MaxMetaspaceExpansion; |
|
1388 size_t delta = align_size_up(bytes, Metaspace::commit_alignment()); |
|
1389 |
|
1390 if (delta <= min_delta) { |
|
1391 delta = min_delta; |
|
1392 } else if (delta <= max_delta) { |
|
1393 // Don't want to hit the high water mark on the next |
|
1394 // allocation so make the delta greater than just enough |
|
1395 // for this allocation. |
|
1396 delta = max_delta; |
|
1397 } else { |
|
1398 // This allocation is large but the next ones are probably not |
|
1399 // so increase by the minimum. |
|
1400 delta = delta + min_delta; |
|
1401 } |
|
1402 |
|
1403 assert_is_size_aligned(delta, Metaspace::commit_alignment()); |
|
1404 |
|
1405 return delta; |
|
1406 } |
|
1407 |
|
1408 size_t MetaspaceGC::capacity_until_GC() { |
|
1409 size_t value = (size_t)OrderAccess::load_ptr_acquire(&_capacity_until_GC); |
|
1410 assert(value >= MetaspaceSize, "Not initialied properly?"); |
|
1411 return value; |
|
1412 } |
|
1413 |
|
1414 size_t MetaspaceGC::inc_capacity_until_GC(size_t v) { |
|
1415 assert_is_size_aligned(v, Metaspace::commit_alignment()); |
|
1416 |
|
1417 return (size_t)Atomic::add_ptr(v, &_capacity_until_GC); |
|
1418 } |
|
1419 |
|
1420 size_t MetaspaceGC::dec_capacity_until_GC(size_t v) { |
|
1421 assert_is_size_aligned(v, Metaspace::commit_alignment()); |
|
1422 |
|
1423 return (size_t)Atomic::add_ptr(-(intptr_t)v, &_capacity_until_GC); |
|
1424 } |
|
1425 |
|
1426 void MetaspaceGC::initialize() { |
|
1427 // Set the high-water mark to MaxMetapaceSize during VM initializaton since |
|
1428 // we can't do a GC during initialization. |
|
1429 _capacity_until_GC = MaxMetaspaceSize; |
|
1430 } |
|
1431 |
|
1432 void MetaspaceGC::post_initialize() { |
|
1433 // Reset the high-water mark once the VM initialization is done. |
|
1434 _capacity_until_GC = MAX2(MetaspaceAux::committed_bytes(), MetaspaceSize); |
|
1435 } |
|
1436 |
|
1437 bool MetaspaceGC::can_expand(size_t word_size, bool is_class) { |
|
1438 // Check if the compressed class space is full. |
|
1439 if (is_class && Metaspace::using_class_space()) { |
|
1440 size_t class_committed = MetaspaceAux::committed_bytes(Metaspace::ClassType); |
|
1441 if (class_committed + word_size * BytesPerWord > CompressedClassSpaceSize) { |
|
1442 return false; |
|
1443 } |
|
1444 } |
|
1445 |
|
1446 // Check if the user has imposed a limit on the metaspace memory. |
|
1447 size_t committed_bytes = MetaspaceAux::committed_bytes(); |
|
1448 if (committed_bytes + word_size * BytesPerWord > MaxMetaspaceSize) { |
|
1449 return false; |
|
1450 } |
|
1451 |
|
1452 return true; |
|
1453 } |
|
1454 |
|
1455 size_t MetaspaceGC::allowed_expansion() { |
|
1456 size_t committed_bytes = MetaspaceAux::committed_bytes(); |
|
1457 size_t capacity_until_gc = capacity_until_GC(); |
|
1458 |
|
1459 assert(capacity_until_gc >= committed_bytes, |
|
1460 err_msg("capacity_until_gc: " SIZE_FORMAT " < committed_bytes: " SIZE_FORMAT, |
|
1461 capacity_until_gc, committed_bytes)); |
|
1462 |
|
1463 size_t left_until_max = MaxMetaspaceSize - committed_bytes; |
|
1464 size_t left_until_GC = capacity_until_gc - committed_bytes; |
|
1465 size_t left_to_commit = MIN2(left_until_GC, left_until_max); |
|
1466 |
|
1467 return left_to_commit / BytesPerWord; |
|
1468 } |
|
1469 |
|
1470 void MetaspaceGC::compute_new_size() { |
|
1471 assert(_shrink_factor <= 100, "invalid shrink factor"); |
|
1472 uint current_shrink_factor = _shrink_factor; |
|
1473 _shrink_factor = 0; |
|
1474 |
|
1475 // Using committed_bytes() for used_after_gc is an overestimation, since the |
|
1476 // chunk free lists are included in committed_bytes() and the memory in an |
|
1477 // un-fragmented chunk free list is available for future allocations. |
|
1478 // However, if the chunk free lists becomes fragmented, then the memory may |
|
1479 // not be available for future allocations and the memory is therefore "in use". |
|
1480 // Including the chunk free lists in the definition of "in use" is therefore |
|
1481 // necessary. Not including the chunk free lists can cause capacity_until_GC to |
|
1482 // shrink below committed_bytes() and this has caused serious bugs in the past. |
|
1483 const size_t used_after_gc = MetaspaceAux::committed_bytes(); |
|
1484 const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC(); |
|
1485 |
|
1486 const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0; |
|
1487 const double maximum_used_percentage = 1.0 - minimum_free_percentage; |
|
1488 |
|
1489 const double min_tmp = used_after_gc / maximum_used_percentage; |
|
1490 size_t minimum_desired_capacity = |
|
1491 (size_t)MIN2(min_tmp, double(max_uintx)); |
|
1492 // Don't shrink less than the initial generation size |
|
1493 minimum_desired_capacity = MAX2(minimum_desired_capacity, |
|
1494 MetaspaceSize); |
|
1495 |
|
1496 if (PrintGCDetails && Verbose) { |
|
1497 gclog_or_tty->print_cr("\nMetaspaceGC::compute_new_size: "); |
|
1498 gclog_or_tty->print_cr(" " |
|
1499 " minimum_free_percentage: %6.2f" |
|
1500 " maximum_used_percentage: %6.2f", |
|
1501 minimum_free_percentage, |
|
1502 maximum_used_percentage); |
|
1503 gclog_or_tty->print_cr(" " |
|
1504 " used_after_gc : %6.1fKB", |
|
1505 used_after_gc / (double) K); |
|
1506 } |
|
1507 |
|
1508 |
|
1509 size_t shrink_bytes = 0; |
|
1510 if (capacity_until_GC < minimum_desired_capacity) { |
|
1511 // If we have less capacity below the metaspace HWM, then |
|
1512 // increment the HWM. |
|
1513 size_t expand_bytes = minimum_desired_capacity - capacity_until_GC; |
|
1514 expand_bytes = align_size_up(expand_bytes, Metaspace::commit_alignment()); |
|
1515 // Don't expand unless it's significant |
|
1516 if (expand_bytes >= MinMetaspaceExpansion) { |
|
1517 size_t new_capacity_until_GC = MetaspaceGC::inc_capacity_until_GC(expand_bytes); |
|
1518 Metaspace::tracer()->report_gc_threshold(capacity_until_GC, |
|
1519 new_capacity_until_GC, |
|
1520 MetaspaceGCThresholdUpdater::ComputeNewSize); |
|
1521 if (PrintGCDetails && Verbose) { |
|
1522 gclog_or_tty->print_cr(" expanding:" |
|
1523 " minimum_desired_capacity: %6.1fKB" |
|
1524 " expand_bytes: %6.1fKB" |
|
1525 " MinMetaspaceExpansion: %6.1fKB" |
|
1526 " new metaspace HWM: %6.1fKB", |
|
1527 minimum_desired_capacity / (double) K, |
|
1528 expand_bytes / (double) K, |
|
1529 MinMetaspaceExpansion / (double) K, |
|
1530 new_capacity_until_GC / (double) K); |
|
1531 } |
|
1532 } |
|
1533 return; |
|
1534 } |
|
1535 |
|
1536 // No expansion, now see if we want to shrink |
|
1537 // We would never want to shrink more than this |
|
1538 size_t max_shrink_bytes = capacity_until_GC - minimum_desired_capacity; |
|
1539 assert(max_shrink_bytes >= 0, err_msg("max_shrink_bytes " SIZE_FORMAT, |
|
1540 max_shrink_bytes)); |
|
1541 |
|
1542 // Should shrinking be considered? |
|
1543 if (MaxMetaspaceFreeRatio < 100) { |
|
1544 const double maximum_free_percentage = MaxMetaspaceFreeRatio / 100.0; |
|
1545 const double minimum_used_percentage = 1.0 - maximum_free_percentage; |
|
1546 const double max_tmp = used_after_gc / minimum_used_percentage; |
|
1547 size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx)); |
|
1548 maximum_desired_capacity = MAX2(maximum_desired_capacity, |
|
1549 MetaspaceSize); |
|
1550 if (PrintGCDetails && Verbose) { |
|
1551 gclog_or_tty->print_cr(" " |
|
1552 " maximum_free_percentage: %6.2f" |
|
1553 " minimum_used_percentage: %6.2f", |
|
1554 maximum_free_percentage, |
|
1555 minimum_used_percentage); |
|
1556 gclog_or_tty->print_cr(" " |
|
1557 " minimum_desired_capacity: %6.1fKB" |
|
1558 " maximum_desired_capacity: %6.1fKB", |
|
1559 minimum_desired_capacity / (double) K, |
|
1560 maximum_desired_capacity / (double) K); |
|
1561 } |
|
1562 |
|
1563 assert(minimum_desired_capacity <= maximum_desired_capacity, |
|
1564 "sanity check"); |
|
1565 |
|
1566 if (capacity_until_GC > maximum_desired_capacity) { |
|
1567 // Capacity too large, compute shrinking size |
|
1568 shrink_bytes = capacity_until_GC - maximum_desired_capacity; |
|
1569 // We don't want shrink all the way back to initSize if people call |
|
1570 // System.gc(), because some programs do that between "phases" and then |
|
1571 // we'd just have to grow the heap up again for the next phase. So we |
|
1572 // damp the shrinking: 0% on the first call, 10% on the second call, 40% |
|
1573 // on the third call, and 100% by the fourth call. But if we recompute |
|
1574 // size without shrinking, it goes back to 0%. |
|
1575 shrink_bytes = shrink_bytes / 100 * current_shrink_factor; |
|
1576 |
|
1577 shrink_bytes = align_size_down(shrink_bytes, Metaspace::commit_alignment()); |
|
1578 |
|
1579 assert(shrink_bytes <= max_shrink_bytes, |
|
1580 err_msg("invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT, |
|
1581 shrink_bytes, max_shrink_bytes)); |
|
1582 if (current_shrink_factor == 0) { |
|
1583 _shrink_factor = 10; |
|
1584 } else { |
|
1585 _shrink_factor = MIN2(current_shrink_factor * 4, (uint) 100); |
|
1586 } |
|
1587 if (PrintGCDetails && Verbose) { |
|
1588 gclog_or_tty->print_cr(" " |
|
1589 " shrinking:" |
|
1590 " initSize: %.1fK" |
|
1591 " maximum_desired_capacity: %.1fK", |
|
1592 MetaspaceSize / (double) K, |
|
1593 maximum_desired_capacity / (double) K); |
|
1594 gclog_or_tty->print_cr(" " |
|
1595 " shrink_bytes: %.1fK" |
|
1596 " current_shrink_factor: %d" |
|
1597 " new shrink factor: %d" |
|
1598 " MinMetaspaceExpansion: %.1fK", |
|
1599 shrink_bytes / (double) K, |
|
1600 current_shrink_factor, |
|
1601 _shrink_factor, |
|
1602 MinMetaspaceExpansion / (double) K); |
|
1603 } |
|
1604 } |
|
1605 } |
|
1606 |
|
1607 // Don't shrink unless it's significant |
|
1608 if (shrink_bytes >= MinMetaspaceExpansion && |
|
1609 ((capacity_until_GC - shrink_bytes) >= MetaspaceSize)) { |
|
1610 size_t new_capacity_until_GC = MetaspaceGC::dec_capacity_until_GC(shrink_bytes); |
|
1611 Metaspace::tracer()->report_gc_threshold(capacity_until_GC, |
|
1612 new_capacity_until_GC, |
|
1613 MetaspaceGCThresholdUpdater::ComputeNewSize); |
|
1614 } |
|
1615 } |
|
1616 |
|
1617 // Metadebug methods |
|
1618 |
|
1619 void Metadebug::init_allocation_fail_alot_count() { |
|
1620 if (MetadataAllocationFailALot) { |
|
1621 _allocation_fail_alot_count = |
|
1622 1+(long)((double)MetadataAllocationFailALotInterval*os::random()/(max_jint+1.0)); |
|
1623 } |
|
1624 } |
|
1625 |
|
1626 #ifdef ASSERT |
|
1627 bool Metadebug::test_metadata_failure() { |
|
1628 if (MetadataAllocationFailALot && |
|
1629 Threads::is_vm_complete()) { |
|
1630 if (_allocation_fail_alot_count > 0) { |
|
1631 _allocation_fail_alot_count--; |
|
1632 } else { |
|
1633 if (TraceMetadataChunkAllocation && Verbose) { |
|
1634 gclog_or_tty->print_cr("Metadata allocation failing for " |
|
1635 "MetadataAllocationFailALot"); |
|
1636 } |
|
1637 init_allocation_fail_alot_count(); |
|
1638 return true; |
|
1639 } |
|
1640 } |
|
1641 return false; |
|
1642 } |
|
1643 #endif |
|
1644 |
|
1645 // ChunkManager methods |
|
1646 |
|
1647 size_t ChunkManager::free_chunks_total_words() { |
|
1648 return _free_chunks_total; |
|
1649 } |
|
1650 |
|
1651 size_t ChunkManager::free_chunks_total_bytes() { |
|
1652 return free_chunks_total_words() * BytesPerWord; |
|
1653 } |
|
1654 |
|
1655 size_t ChunkManager::free_chunks_count() { |
|
1656 #ifdef ASSERT |
|
1657 if (!UseConcMarkSweepGC && !SpaceManager::expand_lock()->is_locked()) { |
|
1658 MutexLockerEx cl(SpaceManager::expand_lock(), |
|
1659 Mutex::_no_safepoint_check_flag); |
|
1660 // This lock is only needed in debug because the verification |
|
1661 // of the _free_chunks_totals walks the list of free chunks |
|
1662 slow_locked_verify_free_chunks_count(); |
|
1663 } |
|
1664 #endif |
|
1665 return _free_chunks_count; |
|
1666 } |
|
1667 |
|
1668 void ChunkManager::locked_verify_free_chunks_total() { |
|
1669 assert_lock_strong(SpaceManager::expand_lock()); |
|
1670 assert(sum_free_chunks() == _free_chunks_total, |
|
1671 err_msg("_free_chunks_total " SIZE_FORMAT " is not the" |
|
1672 " same as sum " SIZE_FORMAT, _free_chunks_total, |
|
1673 sum_free_chunks())); |
|
1674 } |
|
1675 |
|
1676 void ChunkManager::verify_free_chunks_total() { |
|
1677 MutexLockerEx cl(SpaceManager::expand_lock(), |
|
1678 Mutex::_no_safepoint_check_flag); |
|
1679 locked_verify_free_chunks_total(); |
|
1680 } |
|
1681 |
|
1682 void ChunkManager::locked_verify_free_chunks_count() { |
|
1683 assert_lock_strong(SpaceManager::expand_lock()); |
|
1684 assert(sum_free_chunks_count() == _free_chunks_count, |
|
1685 err_msg("_free_chunks_count " SIZE_FORMAT " is not the" |
|
1686 " same as sum " SIZE_FORMAT, _free_chunks_count, |
|
1687 sum_free_chunks_count())); |
|
1688 } |
|
1689 |
|
1690 void ChunkManager::verify_free_chunks_count() { |
|
1691 #ifdef ASSERT |
|
1692 MutexLockerEx cl(SpaceManager::expand_lock(), |
|
1693 Mutex::_no_safepoint_check_flag); |
|
1694 locked_verify_free_chunks_count(); |
|
1695 #endif |
|
1696 } |
|
1697 |
|
1698 void ChunkManager::verify() { |
|
1699 MutexLockerEx cl(SpaceManager::expand_lock(), |
|
1700 Mutex::_no_safepoint_check_flag); |
|
1701 locked_verify(); |
|
1702 } |
|
1703 |
|
1704 void ChunkManager::locked_verify() { |
|
1705 locked_verify_free_chunks_count(); |
|
1706 locked_verify_free_chunks_total(); |
|
1707 } |
|
1708 |
|
1709 void ChunkManager::locked_print_free_chunks(outputStream* st) { |
|
1710 assert_lock_strong(SpaceManager::expand_lock()); |
|
1711 st->print_cr("Free chunk total " SIZE_FORMAT " count " SIZE_FORMAT, |
|
1712 _free_chunks_total, _free_chunks_count); |
|
1713 } |
|
1714 |
|
1715 void ChunkManager::locked_print_sum_free_chunks(outputStream* st) { |
|
1716 assert_lock_strong(SpaceManager::expand_lock()); |
|
1717 st->print_cr("Sum free chunk total " SIZE_FORMAT " count " SIZE_FORMAT, |
|
1718 sum_free_chunks(), sum_free_chunks_count()); |
|
1719 } |
|
1720 ChunkList* ChunkManager::free_chunks(ChunkIndex index) { |
|
1721 return &_free_chunks[index]; |
|
1722 } |
|
1723 |
|
1724 // These methods that sum the free chunk lists are used in printing |
|
1725 // methods that are used in product builds. |
|
1726 size_t ChunkManager::sum_free_chunks() { |
|
1727 assert_lock_strong(SpaceManager::expand_lock()); |
|
1728 size_t result = 0; |
|
1729 for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) { |
|
1730 ChunkList* list = free_chunks(i); |
|
1731 |
|
1732 if (list == NULL) { |
|
1733 continue; |
|
1734 } |
|
1735 |
|
1736 result = result + list->count() * list->size(); |
|
1737 } |
|
1738 result = result + humongous_dictionary()->total_size(); |
|
1739 return result; |
|
1740 } |
|
1741 |
|
1742 size_t ChunkManager::sum_free_chunks_count() { |
|
1743 assert_lock_strong(SpaceManager::expand_lock()); |
|
1744 size_t count = 0; |
|
1745 for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) { |
|
1746 ChunkList* list = free_chunks(i); |
|
1747 if (list == NULL) { |
|
1748 continue; |
|
1749 } |
|
1750 count = count + list->count(); |
|
1751 } |
|
1752 count = count + humongous_dictionary()->total_free_blocks(); |
|
1753 return count; |
|
1754 } |
|
1755 |
|
1756 ChunkList* ChunkManager::find_free_chunks_list(size_t word_size) { |
|
1757 ChunkIndex index = list_index(word_size); |
|
1758 assert(index < HumongousIndex, "No humongous list"); |
|
1759 return free_chunks(index); |
|
1760 } |
|
1761 |
|
1762 Metachunk* ChunkManager::free_chunks_get(size_t word_size) { |
|
1763 assert_lock_strong(SpaceManager::expand_lock()); |
|
1764 |
|
1765 slow_locked_verify(); |
|
1766 |
|
1767 Metachunk* chunk = NULL; |
|
1768 if (list_index(word_size) != HumongousIndex) { |
|
1769 ChunkList* free_list = find_free_chunks_list(word_size); |
|
1770 assert(free_list != NULL, "Sanity check"); |
|
1771 |
|
1772 chunk = free_list->head(); |
|
1773 |
|
1774 if (chunk == NULL) { |
|
1775 return NULL; |
|
1776 } |
|
1777 |
|
1778 // Remove the chunk as the head of the list. |
|
1779 free_list->remove_chunk(chunk); |
|
1780 |
|
1781 if (TraceMetadataChunkAllocation && Verbose) { |
|
1782 gclog_or_tty->print_cr("ChunkManager::free_chunks_get: free_list " |
|
1783 PTR_FORMAT " head " PTR_FORMAT " size " SIZE_FORMAT, |
|
1784 free_list, chunk, chunk->word_size()); |
|
1785 } |
|
1786 } else { |
|
1787 chunk = humongous_dictionary()->get_chunk( |
|
1788 word_size, |
|
1789 FreeBlockDictionary<Metachunk>::atLeast); |
|
1790 |
|
1791 if (chunk == NULL) { |
|
1792 return NULL; |
|
1793 } |
|
1794 |
|
1795 if (TraceMetadataHumongousAllocation) { |
|
1796 size_t waste = chunk->word_size() - word_size; |
|
1797 gclog_or_tty->print_cr("Free list allocate humongous chunk size " |
|
1798 SIZE_FORMAT " for requested size " SIZE_FORMAT |
|
1799 " waste " SIZE_FORMAT, |
|
1800 chunk->word_size(), word_size, waste); |
|
1801 } |
|
1802 } |
|
1803 |
|
1804 // Chunk is being removed from the chunks free list. |
|
1805 dec_free_chunks_total(chunk->word_size()); |
|
1806 |
|
1807 // Remove it from the links to this freelist |
|
1808 chunk->set_next(NULL); |
|
1809 chunk->set_prev(NULL); |
|
1810 #ifdef ASSERT |
|
1811 // Chunk is no longer on any freelist. Setting to false make container_count_slow() |
|
1812 // work. |
|
1813 chunk->set_is_tagged_free(false); |
|
1814 #endif |
|
1815 chunk->container()->inc_container_count(); |
|
1816 |
|
1817 slow_locked_verify(); |
|
1818 return chunk; |
|
1819 } |
|
1820 |
|
1821 Metachunk* ChunkManager::chunk_freelist_allocate(size_t word_size) { |
|
1822 assert_lock_strong(SpaceManager::expand_lock()); |
|
1823 slow_locked_verify(); |
|
1824 |
|
1825 // Take from the beginning of the list |
|
1826 Metachunk* chunk = free_chunks_get(word_size); |
|
1827 if (chunk == NULL) { |
|
1828 return NULL; |
|
1829 } |
|
1830 |
|
1831 assert((word_size <= chunk->word_size()) || |
|
1832 list_index(chunk->word_size() == HumongousIndex), |
|
1833 "Non-humongous variable sized chunk"); |
|
1834 if (TraceMetadataChunkAllocation) { |
|
1835 size_t list_count; |
|
1836 if (list_index(word_size) < HumongousIndex) { |
|
1837 ChunkList* list = find_free_chunks_list(word_size); |
|
1838 list_count = list->count(); |
|
1839 } else { |
|
1840 list_count = humongous_dictionary()->total_count(); |
|
1841 } |
|
1842 gclog_or_tty->print("ChunkManager::chunk_freelist_allocate: " PTR_FORMAT " chunk " |
|
1843 PTR_FORMAT " size " SIZE_FORMAT " count " SIZE_FORMAT " ", |
|
1844 this, chunk, chunk->word_size(), list_count); |
|
1845 locked_print_free_chunks(gclog_or_tty); |
|
1846 } |
|
1847 |
|
1848 return chunk; |
|
1849 } |
|
1850 |
|
1851 void ChunkManager::print_on(outputStream* out) const { |
|
1852 if (PrintFLSStatistics != 0) { |
|
1853 const_cast<ChunkManager *>(this)->humongous_dictionary()->report_statistics(); |
|
1854 } |
|
1855 } |
|
1856 |
|
1857 // SpaceManager methods |
|
1858 |
|
1859 void SpaceManager::get_initial_chunk_sizes(Metaspace::MetaspaceType type, |
|
1860 size_t* chunk_word_size, |
|
1861 size_t* class_chunk_word_size) { |
|
1862 switch (type) { |
|
1863 case Metaspace::BootMetaspaceType: |
|
1864 *chunk_word_size = Metaspace::first_chunk_word_size(); |
|
1865 *class_chunk_word_size = Metaspace::first_class_chunk_word_size(); |
|
1866 break; |
|
1867 case Metaspace::ROMetaspaceType: |
|
1868 *chunk_word_size = SharedReadOnlySize / wordSize; |
|
1869 *class_chunk_word_size = ClassSpecializedChunk; |
|
1870 break; |
|
1871 case Metaspace::ReadWriteMetaspaceType: |
|
1872 *chunk_word_size = SharedReadWriteSize / wordSize; |
|
1873 *class_chunk_word_size = ClassSpecializedChunk; |
|
1874 break; |
|
1875 case Metaspace::AnonymousMetaspaceType: |
|
1876 case Metaspace::ReflectionMetaspaceType: |
|
1877 *chunk_word_size = SpecializedChunk; |
|
1878 *class_chunk_word_size = ClassSpecializedChunk; |
|
1879 break; |
|
1880 default: |
|
1881 *chunk_word_size = SmallChunk; |
|
1882 *class_chunk_word_size = ClassSmallChunk; |
|
1883 break; |
|
1884 } |
|
1885 assert(*chunk_word_size != 0 && *class_chunk_word_size != 0, |
|
1886 err_msg("Initial chunks sizes bad: data " SIZE_FORMAT |
|
1887 " class " SIZE_FORMAT, |
|
1888 *chunk_word_size, *class_chunk_word_size)); |
|
1889 } |
|
1890 |
|
1891 size_t SpaceManager::sum_free_in_chunks_in_use() const { |
|
1892 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag); |
|
1893 size_t free = 0; |
|
1894 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { |
|
1895 Metachunk* chunk = chunks_in_use(i); |
|
1896 while (chunk != NULL) { |
|
1897 free += chunk->free_word_size(); |
|
1898 chunk = chunk->next(); |
|
1899 } |
|
1900 } |
|
1901 return free; |
|
1902 } |
|
1903 |
|
1904 size_t SpaceManager::sum_waste_in_chunks_in_use() const { |
|
1905 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag); |
|
1906 size_t result = 0; |
|
1907 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { |
|
1908 result += sum_waste_in_chunks_in_use(i); |
|
1909 } |
|
1910 |
|
1911 return result; |
|
1912 } |
|
1913 |
|
1914 size_t SpaceManager::sum_waste_in_chunks_in_use(ChunkIndex index) const { |
|
1915 size_t result = 0; |
|
1916 Metachunk* chunk = chunks_in_use(index); |
|
1917 // Count the free space in all the chunk but not the |
|
1918 // current chunk from which allocations are still being done. |
|
1919 while (chunk != NULL) { |
|
1920 if (chunk != current_chunk()) { |
|
1921 result += chunk->free_word_size(); |
|
1922 } |
|
1923 chunk = chunk->next(); |
|
1924 } |
|
1925 return result; |
|
1926 } |
|
1927 |
|
1928 size_t SpaceManager::sum_capacity_in_chunks_in_use() const { |
|
1929 // For CMS use "allocated_chunks_words()" which does not need the |
|
1930 // Metaspace lock. For the other collectors sum over the |
|
1931 // lists. Use both methods as a check that "allocated_chunks_words()" |
|
1932 // is correct. That is, sum_capacity_in_chunks() is too expensive |
|
1933 // to use in the product and allocated_chunks_words() should be used |
|
1934 // but allow for checking that allocated_chunks_words() returns the same |
|
1935 // value as sum_capacity_in_chunks_in_use() which is the definitive |
|
1936 // answer. |
|
1937 if (UseConcMarkSweepGC) { |
|
1938 return allocated_chunks_words(); |
|
1939 } else { |
|
1940 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag); |
|
1941 size_t sum = 0; |
|
1942 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { |
|
1943 Metachunk* chunk = chunks_in_use(i); |
|
1944 while (chunk != NULL) { |
|
1945 sum += chunk->word_size(); |
|
1946 chunk = chunk->next(); |
|
1947 } |
|
1948 } |
|
1949 return sum; |
|
1950 } |
|
1951 } |
|
1952 |
|
1953 size_t SpaceManager::sum_count_in_chunks_in_use() { |
|
1954 size_t count = 0; |
|
1955 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { |
|
1956 count = count + sum_count_in_chunks_in_use(i); |
|
1957 } |
|
1958 |
|
1959 return count; |
|
1960 } |
|
1961 |
|
1962 size_t SpaceManager::sum_count_in_chunks_in_use(ChunkIndex i) { |
|
1963 size_t count = 0; |
|
1964 Metachunk* chunk = chunks_in_use(i); |
|
1965 while (chunk != NULL) { |
|
1966 count++; |
|
1967 chunk = chunk->next(); |
|
1968 } |
|
1969 return count; |
|
1970 } |
|
1971 |
|
1972 |
|
1973 size_t SpaceManager::sum_used_in_chunks_in_use() const { |
|
1974 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag); |
|
1975 size_t used = 0; |
|
1976 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { |
|
1977 Metachunk* chunk = chunks_in_use(i); |
|
1978 while (chunk != NULL) { |
|
1979 used += chunk->used_word_size(); |
|
1980 chunk = chunk->next(); |
|
1981 } |
|
1982 } |
|
1983 return used; |
|
1984 } |
|
1985 |
|
1986 void SpaceManager::locked_print_chunks_in_use_on(outputStream* st) const { |
|
1987 |
|
1988 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { |
|
1989 Metachunk* chunk = chunks_in_use(i); |
|
1990 st->print("SpaceManager: %s " PTR_FORMAT, |
|
1991 chunk_size_name(i), chunk); |
|
1992 if (chunk != NULL) { |
|
1993 st->print_cr(" free " SIZE_FORMAT, |
|
1994 chunk->free_word_size()); |
|
1995 } else { |
|
1996 st->cr(); |
|
1997 } |
|
1998 } |
|
1999 |
|
2000 chunk_manager()->locked_print_free_chunks(st); |
|
2001 chunk_manager()->locked_print_sum_free_chunks(st); |
|
2002 } |
|
2003 |
|
2004 size_t SpaceManager::calc_chunk_size(size_t word_size) { |
|
2005 |
|
2006 // Decide between a small chunk and a medium chunk. Up to |
|
2007 // _small_chunk_limit small chunks can be allocated but |
|
2008 // once a medium chunk has been allocated, no more small |
|
2009 // chunks will be allocated. |
|
2010 size_t chunk_word_size; |
|
2011 if (chunks_in_use(MediumIndex) == NULL && |
|
2012 sum_count_in_chunks_in_use(SmallIndex) < _small_chunk_limit) { |
|
2013 chunk_word_size = (size_t) small_chunk_size(); |
|
2014 if (word_size + Metachunk::overhead() > small_chunk_size()) { |
|
2015 chunk_word_size = medium_chunk_size(); |
|
2016 } |
|
2017 } else { |
|
2018 chunk_word_size = medium_chunk_size(); |
|
2019 } |
|
2020 |
|
2021 // Might still need a humongous chunk. Enforce |
|
2022 // humongous allocations sizes to be aligned up to |
|
2023 // the smallest chunk size. |
|
2024 size_t if_humongous_sized_chunk = |
|
2025 align_size_up(word_size + Metachunk::overhead(), |
|
2026 smallest_chunk_size()); |
|
2027 chunk_word_size = |
|
2028 MAX2((size_t) chunk_word_size, if_humongous_sized_chunk); |
|
2029 |
|
2030 assert(!SpaceManager::is_humongous(word_size) || |
|
2031 chunk_word_size == if_humongous_sized_chunk, |
|
2032 err_msg("Size calculation is wrong, word_size " SIZE_FORMAT |
|
2033 " chunk_word_size " SIZE_FORMAT, |
|
2034 word_size, chunk_word_size)); |
|
2035 if (TraceMetadataHumongousAllocation && |
|
2036 SpaceManager::is_humongous(word_size)) { |
|
2037 gclog_or_tty->print_cr("Metadata humongous allocation:"); |
|
2038 gclog_or_tty->print_cr(" word_size " PTR_FORMAT, word_size); |
|
2039 gclog_or_tty->print_cr(" chunk_word_size " PTR_FORMAT, |
|
2040 chunk_word_size); |
|
2041 gclog_or_tty->print_cr(" chunk overhead " PTR_FORMAT, |
|
2042 Metachunk::overhead()); |
|
2043 } |
|
2044 return chunk_word_size; |
|
2045 } |
|
2046 |
|
2047 void SpaceManager::track_metaspace_memory_usage() { |
|
2048 if (is_init_completed()) { |
|
2049 if (is_class()) { |
|
2050 MemoryService::track_compressed_class_memory_usage(); |
|
2051 } |
|
2052 MemoryService::track_metaspace_memory_usage(); |
|
2053 } |
|
2054 } |
|
2055 |
|
2056 MetaWord* SpaceManager::grow_and_allocate(size_t word_size) { |
|
2057 assert(vs_list()->current_virtual_space() != NULL, |
|
2058 "Should have been set"); |
|
2059 assert(current_chunk() == NULL || |
|
2060 current_chunk()->allocate(word_size) == NULL, |
|
2061 "Don't need to expand"); |
|
2062 MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag); |
|
2063 |
|
2064 if (TraceMetadataChunkAllocation && Verbose) { |
|
2065 size_t words_left = 0; |
|
2066 size_t words_used = 0; |
|
2067 if (current_chunk() != NULL) { |
|
2068 words_left = current_chunk()->free_word_size(); |
|
2069 words_used = current_chunk()->used_word_size(); |
|
2070 } |
|
2071 gclog_or_tty->print_cr("SpaceManager::grow_and_allocate for " SIZE_FORMAT |
|
2072 " words " SIZE_FORMAT " words used " SIZE_FORMAT |
|
2073 " words left", |
|
2074 word_size, words_used, words_left); |
|
2075 } |
|
2076 |
|
2077 // Get another chunk out of the virtual space |
|
2078 size_t grow_chunks_by_words = calc_chunk_size(word_size); |
|
2079 Metachunk* next = get_new_chunk(word_size, grow_chunks_by_words); |
|
2080 |
|
2081 MetaWord* mem = NULL; |
|
2082 |
|
2083 // If a chunk was available, add it to the in-use chunk list |
|
2084 // and do an allocation from it. |
|
2085 if (next != NULL) { |
|
2086 // Add to this manager's list of chunks in use. |
|
2087 add_chunk(next, false); |
|
2088 mem = next->allocate(word_size); |
|
2089 } |
|
2090 |
|
2091 // Track metaspace memory usage statistic. |
|
2092 track_metaspace_memory_usage(); |
|
2093 |
|
2094 return mem; |
|
2095 } |
|
2096 |
|
2097 void SpaceManager::print_on(outputStream* st) const { |
|
2098 |
|
2099 for (ChunkIndex i = ZeroIndex; |
|
2100 i < NumberOfInUseLists ; |
|
2101 i = next_chunk_index(i) ) { |
|
2102 st->print_cr(" chunks_in_use " PTR_FORMAT " chunk size " PTR_FORMAT, |
|
2103 chunks_in_use(i), |
|
2104 chunks_in_use(i) == NULL ? 0 : chunks_in_use(i)->word_size()); |
|
2105 } |
|
2106 st->print_cr(" waste: Small " SIZE_FORMAT " Medium " SIZE_FORMAT |
|
2107 " Humongous " SIZE_FORMAT, |
|
2108 sum_waste_in_chunks_in_use(SmallIndex), |
|
2109 sum_waste_in_chunks_in_use(MediumIndex), |
|
2110 sum_waste_in_chunks_in_use(HumongousIndex)); |
|
2111 // block free lists |
|
2112 if (block_freelists() != NULL) { |
|
2113 st->print_cr("total in block free lists " SIZE_FORMAT, |
|
2114 block_freelists()->total_size()); |
|
2115 } |
|
2116 } |
|
2117 |
|
2118 SpaceManager::SpaceManager(Metaspace::MetadataType mdtype, |
|
2119 Mutex* lock) : |
|
2120 _mdtype(mdtype), |
|
2121 _allocated_blocks_words(0), |
|
2122 _allocated_chunks_words(0), |
|
2123 _allocated_chunks_count(0), |
|
2124 _lock(lock) |
|
2125 { |
|
2126 initialize(); |
|
2127 } |
|
2128 |
|
2129 void SpaceManager::inc_size_metrics(size_t words) { |
|
2130 assert_lock_strong(SpaceManager::expand_lock()); |
|
2131 // Total of allocated Metachunks and allocated Metachunks count |
|
2132 // for each SpaceManager |
|
2133 _allocated_chunks_words = _allocated_chunks_words + words; |
|
2134 _allocated_chunks_count++; |
|
2135 // Global total of capacity in allocated Metachunks |
|
2136 MetaspaceAux::inc_capacity(mdtype(), words); |
|
2137 // Global total of allocated Metablocks. |
|
2138 // used_words_slow() includes the overhead in each |
|
2139 // Metachunk so include it in the used when the |
|
2140 // Metachunk is first added (so only added once per |
|
2141 // Metachunk). |
|
2142 MetaspaceAux::inc_used(mdtype(), Metachunk::overhead()); |
|
2143 } |
|
2144 |
|
2145 void SpaceManager::inc_used_metrics(size_t words) { |
|
2146 // Add to the per SpaceManager total |
|
2147 Atomic::add_ptr(words, &_allocated_blocks_words); |
|
2148 // Add to the global total |
|
2149 MetaspaceAux::inc_used(mdtype(), words); |
|
2150 } |
|
2151 |
|
2152 void SpaceManager::dec_total_from_size_metrics() { |
|
2153 MetaspaceAux::dec_capacity(mdtype(), allocated_chunks_words()); |
|
2154 MetaspaceAux::dec_used(mdtype(), allocated_blocks_words()); |
|
2155 // Also deduct the overhead per Metachunk |
|
2156 MetaspaceAux::dec_used(mdtype(), allocated_chunks_count() * Metachunk::overhead()); |
|
2157 } |
|
2158 |
|
2159 void SpaceManager::initialize() { |
|
2160 Metadebug::init_allocation_fail_alot_count(); |
|
2161 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { |
|
2162 _chunks_in_use[i] = NULL; |
|
2163 } |
|
2164 _current_chunk = NULL; |
|
2165 if (TraceMetadataChunkAllocation && Verbose) { |
|
2166 gclog_or_tty->print_cr("SpaceManager(): " PTR_FORMAT, this); |
|
2167 } |
|
2168 } |
|
2169 |
|
2170 void ChunkManager::return_chunks(ChunkIndex index, Metachunk* chunks) { |
|
2171 if (chunks == NULL) { |
|
2172 return; |
|
2173 } |
|
2174 ChunkList* list = free_chunks(index); |
|
2175 assert(list->size() == chunks->word_size(), "Mismatch in chunk sizes"); |
|
2176 assert_lock_strong(SpaceManager::expand_lock()); |
|
2177 Metachunk* cur = chunks; |
|
2178 |
|
2179 // This returns chunks one at a time. If a new |
|
2180 // class List can be created that is a base class |
|
2181 // of FreeList then something like FreeList::prepend() |
|
2182 // can be used in place of this loop |
|
2183 while (cur != NULL) { |
|
2184 assert(cur->container() != NULL, "Container should have been set"); |
|
2185 cur->container()->dec_container_count(); |
|
2186 // Capture the next link before it is changed |
|
2187 // by the call to return_chunk_at_head(); |
|
2188 Metachunk* next = cur->next(); |
|
2189 DEBUG_ONLY(cur->set_is_tagged_free(true);) |
|
2190 list->return_chunk_at_head(cur); |
|
2191 cur = next; |
|
2192 } |
|
2193 } |
|
2194 |
|
2195 SpaceManager::~SpaceManager() { |
|
2196 // This call this->_lock which can't be done while holding expand_lock() |
|
2197 assert(sum_capacity_in_chunks_in_use() == allocated_chunks_words(), |
|
2198 err_msg("sum_capacity_in_chunks_in_use() " SIZE_FORMAT |
|
2199 " allocated_chunks_words() " SIZE_FORMAT, |
|
2200 sum_capacity_in_chunks_in_use(), allocated_chunks_words())); |
|
2201 |
|
2202 MutexLockerEx fcl(SpaceManager::expand_lock(), |
|
2203 Mutex::_no_safepoint_check_flag); |
|
2204 |
|
2205 chunk_manager()->slow_locked_verify(); |
|
2206 |
|
2207 dec_total_from_size_metrics(); |
|
2208 |
|
2209 if (TraceMetadataChunkAllocation && Verbose) { |
|
2210 gclog_or_tty->print_cr("~SpaceManager(): " PTR_FORMAT, this); |
|
2211 locked_print_chunks_in_use_on(gclog_or_tty); |
|
2212 } |
|
2213 |
|
2214 // Do not mangle freed Metachunks. The chunk size inside Metachunks |
|
2215 // is during the freeing of a VirtualSpaceNodes. |
|
2216 |
|
2217 // Have to update before the chunks_in_use lists are emptied |
|
2218 // below. |
|
2219 chunk_manager()->inc_free_chunks_total(allocated_chunks_words(), |
|
2220 sum_count_in_chunks_in_use()); |
|
2221 |
|
2222 // Add all the chunks in use by this space manager |
|
2223 // to the global list of free chunks. |
|
2224 |
|
2225 // Follow each list of chunks-in-use and add them to the |
|
2226 // free lists. Each list is NULL terminated. |
|
2227 |
|
2228 for (ChunkIndex i = ZeroIndex; i < HumongousIndex; i = next_chunk_index(i)) { |
|
2229 if (TraceMetadataChunkAllocation && Verbose) { |
|
2230 gclog_or_tty->print_cr("returned %d %s chunks to freelist", |
|
2231 sum_count_in_chunks_in_use(i), |
|
2232 chunk_size_name(i)); |
|
2233 } |
|
2234 Metachunk* chunks = chunks_in_use(i); |
|
2235 chunk_manager()->return_chunks(i, chunks); |
|
2236 set_chunks_in_use(i, NULL); |
|
2237 if (TraceMetadataChunkAllocation && Verbose) { |
|
2238 gclog_or_tty->print_cr("updated freelist count %d %s", |
|
2239 chunk_manager()->free_chunks(i)->count(), |
|
2240 chunk_size_name(i)); |
|
2241 } |
|
2242 assert(i != HumongousIndex, "Humongous chunks are handled explicitly later"); |
|
2243 } |
|
2244 |
|
2245 // The medium chunk case may be optimized by passing the head and |
|
2246 // tail of the medium chunk list to add_at_head(). The tail is often |
|
2247 // the current chunk but there are probably exceptions. |
|
2248 |
|
2249 // Humongous chunks |
|
2250 if (TraceMetadataChunkAllocation && Verbose) { |
|
2251 gclog_or_tty->print_cr("returned %d %s humongous chunks to dictionary", |
|
2252 sum_count_in_chunks_in_use(HumongousIndex), |
|
2253 chunk_size_name(HumongousIndex)); |
|
2254 gclog_or_tty->print("Humongous chunk dictionary: "); |
|
2255 } |
|
2256 // Humongous chunks are never the current chunk. |
|
2257 Metachunk* humongous_chunks = chunks_in_use(HumongousIndex); |
|
2258 |
|
2259 while (humongous_chunks != NULL) { |
|
2260 #ifdef ASSERT |
|
2261 humongous_chunks->set_is_tagged_free(true); |
|
2262 #endif |
|
2263 if (TraceMetadataChunkAllocation && Verbose) { |
|
2264 gclog_or_tty->print(PTR_FORMAT " (" SIZE_FORMAT ") ", |
|
2265 humongous_chunks, |
|
2266 humongous_chunks->word_size()); |
|
2267 } |
|
2268 assert(humongous_chunks->word_size() == (size_t) |
|
2269 align_size_up(humongous_chunks->word_size(), |
|
2270 smallest_chunk_size()), |
|
2271 err_msg("Humongous chunk size is wrong: word size " SIZE_FORMAT |
|
2272 " granularity %d", |
|
2273 humongous_chunks->word_size(), smallest_chunk_size())); |
|
2274 Metachunk* next_humongous_chunks = humongous_chunks->next(); |
|
2275 humongous_chunks->container()->dec_container_count(); |
|
2276 chunk_manager()->humongous_dictionary()->return_chunk(humongous_chunks); |
|
2277 humongous_chunks = next_humongous_chunks; |
|
2278 } |
|
2279 if (TraceMetadataChunkAllocation && Verbose) { |
|
2280 gclog_or_tty->cr(); |
|
2281 gclog_or_tty->print_cr("updated dictionary count %d %s", |
|
2282 chunk_manager()->humongous_dictionary()->total_count(), |
|
2283 chunk_size_name(HumongousIndex)); |
|
2284 } |
|
2285 chunk_manager()->slow_locked_verify(); |
|
2286 } |
|
2287 |
|
2288 const char* SpaceManager::chunk_size_name(ChunkIndex index) const { |
|
2289 switch (index) { |
|
2290 case SpecializedIndex: |
|
2291 return "Specialized"; |
|
2292 case SmallIndex: |
|
2293 return "Small"; |
|
2294 case MediumIndex: |
|
2295 return "Medium"; |
|
2296 case HumongousIndex: |
|
2297 return "Humongous"; |
|
2298 default: |
|
2299 return NULL; |
|
2300 } |
|
2301 } |
|
2302 |
|
2303 ChunkIndex ChunkManager::list_index(size_t size) { |
|
2304 switch (size) { |
|
2305 case SpecializedChunk: |
|
2306 assert(SpecializedChunk == ClassSpecializedChunk, |
|
2307 "Need branch for ClassSpecializedChunk"); |
|
2308 return SpecializedIndex; |
|
2309 case SmallChunk: |
|
2310 case ClassSmallChunk: |
|
2311 return SmallIndex; |
|
2312 case MediumChunk: |
|
2313 case ClassMediumChunk: |
|
2314 return MediumIndex; |
|
2315 default: |
|
2316 assert(size > MediumChunk || size > ClassMediumChunk, |
|
2317 "Not a humongous chunk"); |
|
2318 return HumongousIndex; |
|
2319 } |
|
2320 } |
|
2321 |
|
2322 void SpaceManager::deallocate(MetaWord* p, size_t word_size) { |
|
2323 assert_lock_strong(_lock); |
|
2324 size_t raw_word_size = get_raw_word_size(word_size); |
|
2325 size_t min_size = TreeChunk<Metablock, FreeList<Metablock> >::min_size(); |
|
2326 assert(raw_word_size >= min_size, |
|
2327 err_msg("Should not deallocate dark matter " SIZE_FORMAT "<" SIZE_FORMAT, word_size, min_size)); |
|
2328 block_freelists()->return_block(p, raw_word_size); |
|
2329 } |
|
2330 |
|
2331 // Adds a chunk to the list of chunks in use. |
|
2332 void SpaceManager::add_chunk(Metachunk* new_chunk, bool make_current) { |
|
2333 |
|
2334 assert(new_chunk != NULL, "Should not be NULL"); |
|
2335 assert(new_chunk->next() == NULL, "Should not be on a list"); |
|
2336 |
|
2337 new_chunk->reset_empty(); |
|
2338 |
|
2339 // Find the correct list and and set the current |
|
2340 // chunk for that list. |
|
2341 ChunkIndex index = ChunkManager::list_index(new_chunk->word_size()); |
|
2342 |
|
2343 if (index != HumongousIndex) { |
|
2344 retire_current_chunk(); |
|
2345 set_current_chunk(new_chunk); |
|
2346 new_chunk->set_next(chunks_in_use(index)); |
|
2347 set_chunks_in_use(index, new_chunk); |
|
2348 } else { |
|
2349 // For null class loader data and DumpSharedSpaces, the first chunk isn't |
|
2350 // small, so small will be null. Link this first chunk as the current |
|
2351 // chunk. |
|
2352 if (make_current) { |
|
2353 // Set as the current chunk but otherwise treat as a humongous chunk. |
|
2354 set_current_chunk(new_chunk); |
|
2355 } |
|
2356 // Link at head. The _current_chunk only points to a humongous chunk for |
|
2357 // the null class loader metaspace (class and data virtual space managers) |
|
2358 // any humongous chunks so will not point to the tail |
|
2359 // of the humongous chunks list. |
|
2360 new_chunk->set_next(chunks_in_use(HumongousIndex)); |
|
2361 set_chunks_in_use(HumongousIndex, new_chunk); |
|
2362 |
|
2363 assert(new_chunk->word_size() > medium_chunk_size(), "List inconsistency"); |
|
2364 } |
|
2365 |
|
2366 // Add to the running sum of capacity |
|
2367 inc_size_metrics(new_chunk->word_size()); |
|
2368 |
|
2369 assert(new_chunk->is_empty(), "Not ready for reuse"); |
|
2370 if (TraceMetadataChunkAllocation && Verbose) { |
|
2371 gclog_or_tty->print("SpaceManager::add_chunk: %d) ", |
|
2372 sum_count_in_chunks_in_use()); |
|
2373 new_chunk->print_on(gclog_or_tty); |
|
2374 chunk_manager()->locked_print_free_chunks(gclog_or_tty); |
|
2375 } |
|
2376 } |
|
2377 |
|
2378 void SpaceManager::retire_current_chunk() { |
|
2379 if (current_chunk() != NULL) { |
|
2380 size_t remaining_words = current_chunk()->free_word_size(); |
|
2381 if (remaining_words >= TreeChunk<Metablock, FreeList<Metablock> >::min_size()) { |
|
2382 block_freelists()->return_block(current_chunk()->allocate(remaining_words), remaining_words); |
|
2383 inc_used_metrics(remaining_words); |
|
2384 } |
|
2385 } |
|
2386 } |
|
2387 |
|
2388 Metachunk* SpaceManager::get_new_chunk(size_t word_size, |
|
2389 size_t grow_chunks_by_words) { |
|
2390 // Get a chunk from the chunk freelist |
|
2391 Metachunk* next = chunk_manager()->chunk_freelist_allocate(grow_chunks_by_words); |
|
2392 |
|
2393 if (next == NULL) { |
|
2394 next = vs_list()->get_new_chunk(word_size, |
|
2395 grow_chunks_by_words, |
|
2396 medium_chunk_bunch()); |
|
2397 } |
|
2398 |
|
2399 if (TraceMetadataHumongousAllocation && next != NULL && |
|
2400 SpaceManager::is_humongous(next->word_size())) { |
|
2401 gclog_or_tty->print_cr(" new humongous chunk word size " |
|
2402 PTR_FORMAT, next->word_size()); |
|
2403 } |
|
2404 |
|
2405 return next; |
|
2406 } |
|
2407 |
|
2408 MetaWord* SpaceManager::allocate(size_t word_size) { |
|
2409 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag); |
|
2410 |
|
2411 size_t raw_word_size = get_raw_word_size(word_size); |
|
2412 BlockFreelist* fl = block_freelists(); |
|
2413 MetaWord* p = NULL; |
|
2414 // Allocation from the dictionary is expensive in the sense that |
|
2415 // the dictionary has to be searched for a size. Don't allocate |
|
2416 // from the dictionary until it starts to get fat. Is this |
|
2417 // a reasonable policy? Maybe an skinny dictionary is fast enough |
|
2418 // for allocations. Do some profiling. JJJ |
|
2419 if (fl->total_size() > allocation_from_dictionary_limit) { |
|
2420 p = fl->get_block(raw_word_size); |
|
2421 } |
|
2422 if (p == NULL) { |
|
2423 p = allocate_work(raw_word_size); |
|
2424 } |
|
2425 |
|
2426 return p; |
|
2427 } |
|
2428 |
|
2429 // Returns the address of spaced allocated for "word_size". |
|
2430 // This methods does not know about blocks (Metablocks) |
|
2431 MetaWord* SpaceManager::allocate_work(size_t word_size) { |
|
2432 assert_lock_strong(_lock); |
|
2433 #ifdef ASSERT |
|
2434 if (Metadebug::test_metadata_failure()) { |
|
2435 return NULL; |
|
2436 } |
|
2437 #endif |
|
2438 // Is there space in the current chunk? |
|
2439 MetaWord* result = NULL; |
|
2440 |
|
2441 // For DumpSharedSpaces, only allocate out of the current chunk which is |
|
2442 // never null because we gave it the size we wanted. Caller reports out |
|
2443 // of memory if this returns null. |
|
2444 if (DumpSharedSpaces) { |
|
2445 assert(current_chunk() != NULL, "should never happen"); |
|
2446 inc_used_metrics(word_size); |
|
2447 return current_chunk()->allocate(word_size); // caller handles null result |
|
2448 } |
|
2449 |
|
2450 if (current_chunk() != NULL) { |
|
2451 result = current_chunk()->allocate(word_size); |
|
2452 } |
|
2453 |
|
2454 if (result == NULL) { |
|
2455 result = grow_and_allocate(word_size); |
|
2456 } |
|
2457 |
|
2458 if (result != NULL) { |
|
2459 inc_used_metrics(word_size); |
|
2460 assert(result != (MetaWord*) chunks_in_use(MediumIndex), |
|
2461 "Head of the list is being allocated"); |
|
2462 } |
|
2463 |
|
2464 return result; |
|
2465 } |
|
2466 |
|
2467 void SpaceManager::verify() { |
|
2468 // If there are blocks in the dictionary, then |
|
2469 // verfication of chunks does not work since |
|
2470 // being in the dictionary alters a chunk. |
|
2471 if (block_freelists()->total_size() == 0) { |
|
2472 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { |
|
2473 Metachunk* curr = chunks_in_use(i); |
|
2474 while (curr != NULL) { |
|
2475 curr->verify(); |
|
2476 verify_chunk_size(curr); |
|
2477 curr = curr->next(); |
|
2478 } |
|
2479 } |
|
2480 } |
|
2481 } |
|
2482 |
|
2483 void SpaceManager::verify_chunk_size(Metachunk* chunk) { |
|
2484 assert(is_humongous(chunk->word_size()) || |
|
2485 chunk->word_size() == medium_chunk_size() || |
|
2486 chunk->word_size() == small_chunk_size() || |
|
2487 chunk->word_size() == specialized_chunk_size(), |
|
2488 "Chunk size is wrong"); |
|
2489 return; |
|
2490 } |
|
2491 |
|
2492 #ifdef ASSERT |
|
2493 void SpaceManager::verify_allocated_blocks_words() { |
|
2494 // Verification is only guaranteed at a safepoint. |
|
2495 assert(SafepointSynchronize::is_at_safepoint() || !Universe::is_fully_initialized(), |
|
2496 "Verification can fail if the applications is running"); |
|
2497 assert(allocated_blocks_words() == sum_used_in_chunks_in_use(), |
|
2498 err_msg("allocation total is not consistent " SIZE_FORMAT |
|
2499 " vs " SIZE_FORMAT, |
|
2500 allocated_blocks_words(), sum_used_in_chunks_in_use())); |
|
2501 } |
|
2502 |
|
2503 #endif |
|
2504 |
|
2505 void SpaceManager::dump(outputStream* const out) const { |
|
2506 size_t curr_total = 0; |
|
2507 size_t waste = 0; |
|
2508 uint i = 0; |
|
2509 size_t used = 0; |
|
2510 size_t capacity = 0; |
|
2511 |
|
2512 // Add up statistics for all chunks in this SpaceManager. |
|
2513 for (ChunkIndex index = ZeroIndex; |
|
2514 index < NumberOfInUseLists; |
|
2515 index = next_chunk_index(index)) { |
|
2516 for (Metachunk* curr = chunks_in_use(index); |
|
2517 curr != NULL; |
|
2518 curr = curr->next()) { |
|
2519 out->print("%d) ", i++); |
|
2520 curr->print_on(out); |
|
2521 curr_total += curr->word_size(); |
|
2522 used += curr->used_word_size(); |
|
2523 capacity += curr->word_size(); |
|
2524 waste += curr->free_word_size() + curr->overhead();; |
|
2525 } |
|
2526 } |
|
2527 |
|
2528 if (TraceMetadataChunkAllocation && Verbose) { |
|
2529 block_freelists()->print_on(out); |
|
2530 } |
|
2531 |
|
2532 size_t free = current_chunk() == NULL ? 0 : current_chunk()->free_word_size(); |
|
2533 // Free space isn't wasted. |
|
2534 waste -= free; |
|
2535 |
|
2536 out->print_cr("total of all chunks " SIZE_FORMAT " used " SIZE_FORMAT |
|
2537 " free " SIZE_FORMAT " capacity " SIZE_FORMAT |
|
2538 " waste " SIZE_FORMAT, curr_total, used, free, capacity, waste); |
|
2539 } |
|
2540 |
|
2541 #ifndef PRODUCT |
|
2542 void SpaceManager::mangle_freed_chunks() { |
|
2543 for (ChunkIndex index = ZeroIndex; |
|
2544 index < NumberOfInUseLists; |
|
2545 index = next_chunk_index(index)) { |
|
2546 for (Metachunk* curr = chunks_in_use(index); |
|
2547 curr != NULL; |
|
2548 curr = curr->next()) { |
|
2549 curr->mangle(); |
|
2550 } |
|
2551 } |
|
2552 } |
|
2553 #endif // PRODUCT |
|
2554 |
|
2555 // MetaspaceAux |
|
2556 |
|
2557 |
|
2558 size_t MetaspaceAux::_capacity_words[] = {0, 0}; |
|
2559 size_t MetaspaceAux::_used_words[] = {0, 0}; |
|
2560 |
|
2561 size_t MetaspaceAux::free_bytes(Metaspace::MetadataType mdtype) { |
|
2562 VirtualSpaceList* list = Metaspace::get_space_list(mdtype); |
|
2563 return list == NULL ? 0 : list->free_bytes(); |
|
2564 } |
|
2565 |
|
2566 size_t MetaspaceAux::free_bytes() { |
|
2567 return free_bytes(Metaspace::ClassType) + free_bytes(Metaspace::NonClassType); |
|
2568 } |
|
2569 |
|
2570 void MetaspaceAux::dec_capacity(Metaspace::MetadataType mdtype, size_t words) { |
|
2571 assert_lock_strong(SpaceManager::expand_lock()); |
|
2572 assert(words <= capacity_words(mdtype), |
|
2573 err_msg("About to decrement below 0: words " SIZE_FORMAT |
|
2574 " is greater than _capacity_words[%u] " SIZE_FORMAT, |
|
2575 words, mdtype, capacity_words(mdtype))); |
|
2576 _capacity_words[mdtype] -= words; |
|
2577 } |
|
2578 |
|
2579 void MetaspaceAux::inc_capacity(Metaspace::MetadataType mdtype, size_t words) { |
|
2580 assert_lock_strong(SpaceManager::expand_lock()); |
|
2581 // Needs to be atomic |
|
2582 _capacity_words[mdtype] += words; |
|
2583 } |
|
2584 |
|
2585 void MetaspaceAux::dec_used(Metaspace::MetadataType mdtype, size_t words) { |
|
2586 assert(words <= used_words(mdtype), |
|
2587 err_msg("About to decrement below 0: words " SIZE_FORMAT |
|
2588 " is greater than _used_words[%u] " SIZE_FORMAT, |
|
2589 words, mdtype, used_words(mdtype))); |
|
2590 // For CMS deallocation of the Metaspaces occurs during the |
|
2591 // sweep which is a concurrent phase. Protection by the expand_lock() |
|
2592 // is not enough since allocation is on a per Metaspace basis |
|
2593 // and protected by the Metaspace lock. |
|
2594 jlong minus_words = (jlong) - (jlong) words; |
|
2595 Atomic::add_ptr(minus_words, &_used_words[mdtype]); |
|
2596 } |
|
2597 |
|
2598 void MetaspaceAux::inc_used(Metaspace::MetadataType mdtype, size_t words) { |
|
2599 // _used_words tracks allocations for |
|
2600 // each piece of metadata. Those allocations are |
|
2601 // generally done concurrently by different application |
|
2602 // threads so must be done atomically. |
|
2603 Atomic::add_ptr(words, &_used_words[mdtype]); |
|
2604 } |
|
2605 |
|
2606 size_t MetaspaceAux::used_bytes_slow(Metaspace::MetadataType mdtype) { |
|
2607 size_t used = 0; |
|
2608 ClassLoaderDataGraphMetaspaceIterator iter; |
|
2609 while (iter.repeat()) { |
|
2610 Metaspace* msp = iter.get_next(); |
|
2611 // Sum allocated_blocks_words for each metaspace |
|
2612 if (msp != NULL) { |
|
2613 used += msp->used_words_slow(mdtype); |
|
2614 } |
|
2615 } |
|
2616 return used * BytesPerWord; |
|
2617 } |
|
2618 |
|
2619 size_t MetaspaceAux::free_bytes_slow(Metaspace::MetadataType mdtype) { |
|
2620 size_t free = 0; |
|
2621 ClassLoaderDataGraphMetaspaceIterator iter; |
|
2622 while (iter.repeat()) { |
|
2623 Metaspace* msp = iter.get_next(); |
|
2624 if (msp != NULL) { |
|
2625 free += msp->free_words_slow(mdtype); |
|
2626 } |
|
2627 } |
|
2628 return free * BytesPerWord; |
|
2629 } |
|
2630 |
|
2631 size_t MetaspaceAux::capacity_bytes_slow(Metaspace::MetadataType mdtype) { |
|
2632 if ((mdtype == Metaspace::ClassType) && !Metaspace::using_class_space()) { |
|
2633 return 0; |
|
2634 } |
|
2635 // Don't count the space in the freelists. That space will be |
|
2636 // added to the capacity calculation as needed. |
|
2637 size_t capacity = 0; |
|
2638 ClassLoaderDataGraphMetaspaceIterator iter; |
|
2639 while (iter.repeat()) { |
|
2640 Metaspace* msp = iter.get_next(); |
|
2641 if (msp != NULL) { |
|
2642 capacity += msp->capacity_words_slow(mdtype); |
|
2643 } |
|
2644 } |
|
2645 return capacity * BytesPerWord; |
|
2646 } |
|
2647 |
|
2648 size_t MetaspaceAux::capacity_bytes_slow() { |
|
2649 #ifdef PRODUCT |
|
2650 // Use capacity_bytes() in PRODUCT instead of this function. |
|
2651 guarantee(false, "Should not call capacity_bytes_slow() in the PRODUCT"); |
|
2652 #endif |
|
2653 size_t class_capacity = capacity_bytes_slow(Metaspace::ClassType); |
|
2654 size_t non_class_capacity = capacity_bytes_slow(Metaspace::NonClassType); |
|
2655 assert(capacity_bytes() == class_capacity + non_class_capacity, |
|
2656 err_msg("bad accounting: capacity_bytes() " SIZE_FORMAT |
|
2657 " class_capacity + non_class_capacity " SIZE_FORMAT |
|
2658 " class_capacity " SIZE_FORMAT " non_class_capacity " SIZE_FORMAT, |
|
2659 capacity_bytes(), class_capacity + non_class_capacity, |
|
2660 class_capacity, non_class_capacity)); |
|
2661 |
|
2662 return class_capacity + non_class_capacity; |
|
2663 } |
|
2664 |
|
2665 size_t MetaspaceAux::reserved_bytes(Metaspace::MetadataType mdtype) { |
|
2666 VirtualSpaceList* list = Metaspace::get_space_list(mdtype); |
|
2667 return list == NULL ? 0 : list->reserved_bytes(); |
|
2668 } |
|
2669 |
|
2670 size_t MetaspaceAux::committed_bytes(Metaspace::MetadataType mdtype) { |
|
2671 VirtualSpaceList* list = Metaspace::get_space_list(mdtype); |
|
2672 return list == NULL ? 0 : list->committed_bytes(); |
|
2673 } |
|
2674 |
|
2675 size_t MetaspaceAux::min_chunk_size_words() { return Metaspace::first_chunk_word_size(); } |
|
2676 |
|
2677 size_t MetaspaceAux::free_chunks_total_words(Metaspace::MetadataType mdtype) { |
|
2678 ChunkManager* chunk_manager = Metaspace::get_chunk_manager(mdtype); |
|
2679 if (chunk_manager == NULL) { |
|
2680 return 0; |
|
2681 } |
|
2682 chunk_manager->slow_verify(); |
|
2683 return chunk_manager->free_chunks_total_words(); |
|
2684 } |
|
2685 |
|
2686 size_t MetaspaceAux::free_chunks_total_bytes(Metaspace::MetadataType mdtype) { |
|
2687 return free_chunks_total_words(mdtype) * BytesPerWord; |
|
2688 } |
|
2689 |
|
2690 size_t MetaspaceAux::free_chunks_total_words() { |
|
2691 return free_chunks_total_words(Metaspace::ClassType) + |
|
2692 free_chunks_total_words(Metaspace::NonClassType); |
|
2693 } |
|
2694 |
|
2695 size_t MetaspaceAux::free_chunks_total_bytes() { |
|
2696 return free_chunks_total_words() * BytesPerWord; |
|
2697 } |
|
2698 |
|
2699 bool MetaspaceAux::has_chunk_free_list(Metaspace::MetadataType mdtype) { |
|
2700 return Metaspace::get_chunk_manager(mdtype) != NULL; |
|
2701 } |
|
2702 |
|
2703 MetaspaceChunkFreeListSummary MetaspaceAux::chunk_free_list_summary(Metaspace::MetadataType mdtype) { |
|
2704 if (!has_chunk_free_list(mdtype)) { |
|
2705 return MetaspaceChunkFreeListSummary(); |
|
2706 } |
|
2707 |
|
2708 const ChunkManager* cm = Metaspace::get_chunk_manager(mdtype); |
|
2709 return cm->chunk_free_list_summary(); |
|
2710 } |
|
2711 |
|
2712 void MetaspaceAux::print_metaspace_change(size_t prev_metadata_used) { |
|
2713 gclog_or_tty->print(", [Metaspace:"); |
|
2714 if (PrintGCDetails && Verbose) { |
|
2715 gclog_or_tty->print(" " SIZE_FORMAT |
|
2716 "->" SIZE_FORMAT |
|
2717 "(" SIZE_FORMAT ")", |
|
2718 prev_metadata_used, |
|
2719 used_bytes(), |
|
2720 reserved_bytes()); |
|
2721 } else { |
|
2722 gclog_or_tty->print(" " SIZE_FORMAT "K" |
|
2723 "->" SIZE_FORMAT "K" |
|
2724 "(" SIZE_FORMAT "K)", |
|
2725 prev_metadata_used/K, |
|
2726 used_bytes()/K, |
|
2727 reserved_bytes()/K); |
|
2728 } |
|
2729 |
|
2730 gclog_or_tty->print("]"); |
|
2731 } |
|
2732 |
|
2733 // This is printed when PrintGCDetails |
|
2734 void MetaspaceAux::print_on(outputStream* out) { |
|
2735 Metaspace::MetadataType nct = Metaspace::NonClassType; |
|
2736 |
|
2737 out->print_cr(" Metaspace " |
|
2738 "used " SIZE_FORMAT "K, " |
|
2739 "capacity " SIZE_FORMAT "K, " |
|
2740 "committed " SIZE_FORMAT "K, " |
|
2741 "reserved " SIZE_FORMAT "K", |
|
2742 used_bytes()/K, |
|
2743 capacity_bytes()/K, |
|
2744 committed_bytes()/K, |
|
2745 reserved_bytes()/K); |
|
2746 |
|
2747 if (Metaspace::using_class_space()) { |
|
2748 Metaspace::MetadataType ct = Metaspace::ClassType; |
|
2749 out->print_cr(" class space " |
|
2750 "used " SIZE_FORMAT "K, " |
|
2751 "capacity " SIZE_FORMAT "K, " |
|
2752 "committed " SIZE_FORMAT "K, " |
|
2753 "reserved " SIZE_FORMAT "K", |
|
2754 used_bytes(ct)/K, |
|
2755 capacity_bytes(ct)/K, |
|
2756 committed_bytes(ct)/K, |
|
2757 reserved_bytes(ct)/K); |
|
2758 } |
|
2759 } |
|
2760 |
|
2761 // Print information for class space and data space separately. |
|
2762 // This is almost the same as above. |
|
2763 void MetaspaceAux::print_on(outputStream* out, Metaspace::MetadataType mdtype) { |
|
2764 size_t free_chunks_capacity_bytes = free_chunks_total_bytes(mdtype); |
|
2765 size_t capacity_bytes = capacity_bytes_slow(mdtype); |
|
2766 size_t used_bytes = used_bytes_slow(mdtype); |
|
2767 size_t free_bytes = free_bytes_slow(mdtype); |
|
2768 size_t used_and_free = used_bytes + free_bytes + |
|
2769 free_chunks_capacity_bytes; |
|
2770 out->print_cr(" Chunk accounting: used in chunks " SIZE_FORMAT |
|
2771 "K + unused in chunks " SIZE_FORMAT "K + " |
|
2772 " capacity in free chunks " SIZE_FORMAT "K = " SIZE_FORMAT |
|
2773 "K capacity in allocated chunks " SIZE_FORMAT "K", |
|
2774 used_bytes / K, |
|
2775 free_bytes / K, |
|
2776 free_chunks_capacity_bytes / K, |
|
2777 used_and_free / K, |
|
2778 capacity_bytes / K); |
|
2779 // Accounting can only be correct if we got the values during a safepoint |
|
2780 assert(!SafepointSynchronize::is_at_safepoint() || used_and_free == capacity_bytes, "Accounting is wrong"); |
|
2781 } |
|
2782 |
|
2783 // Print total fragmentation for class metaspaces |
|
2784 void MetaspaceAux::print_class_waste(outputStream* out) { |
|
2785 assert(Metaspace::using_class_space(), "class metaspace not used"); |
|
2786 size_t cls_specialized_waste = 0, cls_small_waste = 0, cls_medium_waste = 0; |
|
2787 size_t cls_specialized_count = 0, cls_small_count = 0, cls_medium_count = 0, cls_humongous_count = 0; |
|
2788 ClassLoaderDataGraphMetaspaceIterator iter; |
|
2789 while (iter.repeat()) { |
|
2790 Metaspace* msp = iter.get_next(); |
|
2791 if (msp != NULL) { |
|
2792 cls_specialized_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SpecializedIndex); |
|
2793 cls_specialized_count += msp->class_vsm()->sum_count_in_chunks_in_use(SpecializedIndex); |
|
2794 cls_small_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SmallIndex); |
|
2795 cls_small_count += msp->class_vsm()->sum_count_in_chunks_in_use(SmallIndex); |
|
2796 cls_medium_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(MediumIndex); |
|
2797 cls_medium_count += msp->class_vsm()->sum_count_in_chunks_in_use(MediumIndex); |
|
2798 cls_humongous_count += msp->class_vsm()->sum_count_in_chunks_in_use(HumongousIndex); |
|
2799 } |
|
2800 } |
|
2801 out->print_cr(" class: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", " |
|
2802 SIZE_FORMAT " small(s) " SIZE_FORMAT ", " |
|
2803 SIZE_FORMAT " medium(s) " SIZE_FORMAT ", " |
|
2804 "large count " SIZE_FORMAT, |
|
2805 cls_specialized_count, cls_specialized_waste, |
|
2806 cls_small_count, cls_small_waste, |
|
2807 cls_medium_count, cls_medium_waste, cls_humongous_count); |
|
2808 } |
|
2809 |
|
2810 // Print total fragmentation for data and class metaspaces separately |
|
2811 void MetaspaceAux::print_waste(outputStream* out) { |
|
2812 size_t specialized_waste = 0, small_waste = 0, medium_waste = 0; |
|
2813 size_t specialized_count = 0, small_count = 0, medium_count = 0, humongous_count = 0; |
|
2814 |
|
2815 ClassLoaderDataGraphMetaspaceIterator iter; |
|
2816 while (iter.repeat()) { |
|
2817 Metaspace* msp = iter.get_next(); |
|
2818 if (msp != NULL) { |
|
2819 specialized_waste += msp->vsm()->sum_waste_in_chunks_in_use(SpecializedIndex); |
|
2820 specialized_count += msp->vsm()->sum_count_in_chunks_in_use(SpecializedIndex); |
|
2821 small_waste += msp->vsm()->sum_waste_in_chunks_in_use(SmallIndex); |
|
2822 small_count += msp->vsm()->sum_count_in_chunks_in_use(SmallIndex); |
|
2823 medium_waste += msp->vsm()->sum_waste_in_chunks_in_use(MediumIndex); |
|
2824 medium_count += msp->vsm()->sum_count_in_chunks_in_use(MediumIndex); |
|
2825 humongous_count += msp->vsm()->sum_count_in_chunks_in_use(HumongousIndex); |
|
2826 } |
|
2827 } |
|
2828 out->print_cr("Total fragmentation waste (words) doesn't count free space"); |
|
2829 out->print_cr(" data: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", " |
|
2830 SIZE_FORMAT " small(s) " SIZE_FORMAT ", " |
|
2831 SIZE_FORMAT " medium(s) " SIZE_FORMAT ", " |
|
2832 "large count " SIZE_FORMAT, |
|
2833 specialized_count, specialized_waste, small_count, |
|
2834 small_waste, medium_count, medium_waste, humongous_count); |
|
2835 if (Metaspace::using_class_space()) { |
|
2836 print_class_waste(out); |
|
2837 } |
|
2838 } |
|
2839 |
|
2840 // Dump global metaspace things from the end of ClassLoaderDataGraph |
|
2841 void MetaspaceAux::dump(outputStream* out) { |
|
2842 out->print_cr("All Metaspace:"); |
|
2843 out->print("data space: "); print_on(out, Metaspace::NonClassType); |
|
2844 out->print("class space: "); print_on(out, Metaspace::ClassType); |
|
2845 print_waste(out); |
|
2846 } |
|
2847 |
|
2848 void MetaspaceAux::verify_free_chunks() { |
|
2849 Metaspace::chunk_manager_metadata()->verify(); |
|
2850 if (Metaspace::using_class_space()) { |
|
2851 Metaspace::chunk_manager_class()->verify(); |
|
2852 } |
|
2853 } |
|
2854 |
|
2855 void MetaspaceAux::verify_capacity() { |
|
2856 #ifdef ASSERT |
|
2857 size_t running_sum_capacity_bytes = capacity_bytes(); |
|
2858 // For purposes of the running sum of capacity, verify against capacity |
|
2859 size_t capacity_in_use_bytes = capacity_bytes_slow(); |
|
2860 assert(running_sum_capacity_bytes == capacity_in_use_bytes, |
|
2861 err_msg("capacity_words() * BytesPerWord " SIZE_FORMAT |
|
2862 " capacity_bytes_slow()" SIZE_FORMAT, |
|
2863 running_sum_capacity_bytes, capacity_in_use_bytes)); |
|
2864 for (Metaspace::MetadataType i = Metaspace::ClassType; |
|
2865 i < Metaspace:: MetadataTypeCount; |
|
2866 i = (Metaspace::MetadataType)(i + 1)) { |
|
2867 size_t capacity_in_use_bytes = capacity_bytes_slow(i); |
|
2868 assert(capacity_bytes(i) == capacity_in_use_bytes, |
|
2869 err_msg("capacity_bytes(%u) " SIZE_FORMAT |
|
2870 " capacity_bytes_slow(%u)" SIZE_FORMAT, |
|
2871 i, capacity_bytes(i), i, capacity_in_use_bytes)); |
|
2872 } |
|
2873 #endif |
|
2874 } |
|
2875 |
|
2876 void MetaspaceAux::verify_used() { |
|
2877 #ifdef ASSERT |
|
2878 size_t running_sum_used_bytes = used_bytes(); |
|
2879 // For purposes of the running sum of used, verify against used |
|
2880 size_t used_in_use_bytes = used_bytes_slow(); |
|
2881 assert(used_bytes() == used_in_use_bytes, |
|
2882 err_msg("used_bytes() " SIZE_FORMAT |
|
2883 " used_bytes_slow()" SIZE_FORMAT, |
|
2884 used_bytes(), used_in_use_bytes)); |
|
2885 for (Metaspace::MetadataType i = Metaspace::ClassType; |
|
2886 i < Metaspace:: MetadataTypeCount; |
|
2887 i = (Metaspace::MetadataType)(i + 1)) { |
|
2888 size_t used_in_use_bytes = used_bytes_slow(i); |
|
2889 assert(used_bytes(i) == used_in_use_bytes, |
|
2890 err_msg("used_bytes(%u) " SIZE_FORMAT |
|
2891 " used_bytes_slow(%u)" SIZE_FORMAT, |
|
2892 i, used_bytes(i), i, used_in_use_bytes)); |
|
2893 } |
|
2894 #endif |
|
2895 } |
|
2896 |
|
2897 void MetaspaceAux::verify_metrics() { |
|
2898 verify_capacity(); |
|
2899 verify_used(); |
|
2900 } |
|
2901 |
|
2902 |
|
2903 // Metaspace methods |
|
2904 |
|
2905 size_t Metaspace::_first_chunk_word_size = 0; |
|
2906 size_t Metaspace::_first_class_chunk_word_size = 0; |
|
2907 |
|
2908 size_t Metaspace::_commit_alignment = 0; |
|
2909 size_t Metaspace::_reserve_alignment = 0; |
|
2910 |
|
2911 Metaspace::Metaspace(Mutex* lock, MetaspaceType type) { |
|
2912 initialize(lock, type); |
|
2913 } |
|
2914 |
|
2915 Metaspace::~Metaspace() { |
|
2916 delete _vsm; |
|
2917 if (using_class_space()) { |
|
2918 delete _class_vsm; |
|
2919 } |
|
2920 } |
|
2921 |
|
2922 VirtualSpaceList* Metaspace::_space_list = NULL; |
|
2923 VirtualSpaceList* Metaspace::_class_space_list = NULL; |
|
2924 |
|
2925 ChunkManager* Metaspace::_chunk_manager_metadata = NULL; |
|
2926 ChunkManager* Metaspace::_chunk_manager_class = NULL; |
|
2927 |
|
2928 #define VIRTUALSPACEMULTIPLIER 2 |
|
2929 |
|
2930 #ifdef _LP64 |
|
2931 static const uint64_t UnscaledClassSpaceMax = (uint64_t(max_juint) + 1); |
|
2932 |
|
2933 void Metaspace::set_narrow_klass_base_and_shift(address metaspace_base, address cds_base) { |
|
2934 // Figure out the narrow_klass_base and the narrow_klass_shift. The |
|
2935 // narrow_klass_base is the lower of the metaspace base and the cds base |
|
2936 // (if cds is enabled). The narrow_klass_shift depends on the distance |
|
2937 // between the lower base and higher address. |
|
2938 address lower_base; |
|
2939 address higher_address; |
|
2940 if (UseSharedSpaces) { |
|
2941 higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()), |
|
2942 (address)(metaspace_base + compressed_class_space_size())); |
|
2943 lower_base = MIN2(metaspace_base, cds_base); |
|
2944 } else { |
|
2945 higher_address = metaspace_base + compressed_class_space_size(); |
|
2946 lower_base = metaspace_base; |
|
2947 |
|
2948 uint64_t klass_encoding_max = UnscaledClassSpaceMax << LogKlassAlignmentInBytes; |
|
2949 // If compressed class space fits in lower 32G, we don't need a base. |
|
2950 if (higher_address <= (address)klass_encoding_max) { |
|
2951 lower_base = 0; // effectively lower base is zero. |
|
2952 } |
|
2953 } |
|
2954 |
|
2955 Universe::set_narrow_klass_base(lower_base); |
|
2956 |
|
2957 if ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax) { |
|
2958 Universe::set_narrow_klass_shift(0); |
|
2959 } else { |
|
2960 assert(!UseSharedSpaces, "Cannot shift with UseSharedSpaces"); |
|
2961 Universe::set_narrow_klass_shift(LogKlassAlignmentInBytes); |
|
2962 } |
|
2963 } |
|
2964 |
|
2965 // Return TRUE if the specified metaspace_base and cds_base are close enough |
|
2966 // to work with compressed klass pointers. |
|
2967 bool Metaspace::can_use_cds_with_metaspace_addr(char* metaspace_base, address cds_base) { |
|
2968 assert(cds_base != 0 && UseSharedSpaces, "Only use with CDS"); |
|
2969 assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs"); |
|
2970 address lower_base = MIN2((address)metaspace_base, cds_base); |
|
2971 address higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()), |
|
2972 (address)(metaspace_base + compressed_class_space_size())); |
|
2973 return ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax); |
|
2974 } |
|
2975 |
|
2976 // Try to allocate the metaspace at the requested addr. |
|
2977 void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base) { |
|
2978 assert(using_class_space(), "called improperly"); |
|
2979 assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs"); |
|
2980 assert(compressed_class_space_size() < KlassEncodingMetaspaceMax, |
|
2981 "Metaspace size is too big"); |
|
2982 assert_is_ptr_aligned(requested_addr, _reserve_alignment); |
|
2983 assert_is_ptr_aligned(cds_base, _reserve_alignment); |
|
2984 assert_is_size_aligned(compressed_class_space_size(), _reserve_alignment); |
|
2985 |
|
2986 // Don't use large pages for the class space. |
|
2987 bool large_pages = false; |
|
2988 |
|
2989 ReservedSpace metaspace_rs = ReservedSpace(compressed_class_space_size(), |
|
2990 _reserve_alignment, |
|
2991 large_pages, |
|
2992 requested_addr, 0); |
|
2993 if (!metaspace_rs.is_reserved()) { |
|
2994 if (UseSharedSpaces) { |
|
2995 size_t increment = align_size_up(1*G, _reserve_alignment); |
|
2996 |
|
2997 // Keep trying to allocate the metaspace, increasing the requested_addr |
|
2998 // by 1GB each time, until we reach an address that will no longer allow |
|
2999 // use of CDS with compressed klass pointers. |
|
3000 char *addr = requested_addr; |
|
3001 while (!metaspace_rs.is_reserved() && (addr + increment > addr) && |
|
3002 can_use_cds_with_metaspace_addr(addr + increment, cds_base)) { |
|
3003 addr = addr + increment; |
|
3004 metaspace_rs = ReservedSpace(compressed_class_space_size(), |
|
3005 _reserve_alignment, large_pages, addr, 0); |
|
3006 } |
|
3007 } |
|
3008 |
|
3009 // If no successful allocation then try to allocate the space anywhere. If |
|
3010 // that fails then OOM doom. At this point we cannot try allocating the |
|
3011 // metaspace as if UseCompressedClassPointers is off because too much |
|
3012 // initialization has happened that depends on UseCompressedClassPointers. |
|
3013 // So, UseCompressedClassPointers cannot be turned off at this point. |
|
3014 if (!metaspace_rs.is_reserved()) { |
|
3015 metaspace_rs = ReservedSpace(compressed_class_space_size(), |
|
3016 _reserve_alignment, large_pages); |
|
3017 if (!metaspace_rs.is_reserved()) { |
|
3018 vm_exit_during_initialization(err_msg("Could not allocate metaspace: %d bytes", |
|
3019 compressed_class_space_size())); |
|
3020 } |
|
3021 } |
|
3022 } |
|
3023 |
|
3024 // If we got here then the metaspace got allocated. |
|
3025 MemTracker::record_virtual_memory_type((address)metaspace_rs.base(), mtClass); |
|
3026 |
|
3027 // Verify that we can use shared spaces. Otherwise, turn off CDS. |
|
3028 if (UseSharedSpaces && !can_use_cds_with_metaspace_addr(metaspace_rs.base(), cds_base)) { |
|
3029 FileMapInfo::stop_sharing_and_unmap( |
|
3030 "Could not allocate metaspace at a compatible address"); |
|
3031 } |
|
3032 |
|
3033 set_narrow_klass_base_and_shift((address)metaspace_rs.base(), |
|
3034 UseSharedSpaces ? (address)cds_base : 0); |
|
3035 |
|
3036 initialize_class_space(metaspace_rs); |
|
3037 |
|
3038 if (PrintCompressedOopsMode || (PrintMiscellaneous && Verbose)) { |
|
3039 gclog_or_tty->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: " SIZE_FORMAT, |
|
3040 Universe::narrow_klass_base(), Universe::narrow_klass_shift()); |
|
3041 gclog_or_tty->print_cr("Compressed class space size: " SIZE_FORMAT " Address: " PTR_FORMAT " Req Addr: " PTR_FORMAT, |
|
3042 compressed_class_space_size(), metaspace_rs.base(), requested_addr); |
|
3043 } |
|
3044 } |
|
3045 |
|
3046 // For UseCompressedClassPointers the class space is reserved above the top of |
|
3047 // the Java heap. The argument passed in is at the base of the compressed space. |
|
3048 void Metaspace::initialize_class_space(ReservedSpace rs) { |
|
3049 // The reserved space size may be bigger because of alignment, esp with UseLargePages |
|
3050 assert(rs.size() >= CompressedClassSpaceSize, |
|
3051 err_msg(SIZE_FORMAT " != " UINTX_FORMAT, rs.size(), CompressedClassSpaceSize)); |
|
3052 assert(using_class_space(), "Must be using class space"); |
|
3053 _class_space_list = new VirtualSpaceList(rs); |
|
3054 _chunk_manager_class = new ChunkManager(SpecializedChunk, ClassSmallChunk, ClassMediumChunk); |
|
3055 |
|
3056 if (!_class_space_list->initialization_succeeded()) { |
|
3057 vm_exit_during_initialization("Failed to setup compressed class space virtual space list."); |
|
3058 } |
|
3059 } |
|
3060 |
|
3061 #endif |
|
3062 |
|
3063 void Metaspace::ergo_initialize() { |
|
3064 if (DumpSharedSpaces) { |
|
3065 // Using large pages when dumping the shared archive is currently not implemented. |
|
3066 FLAG_SET_ERGO(bool, UseLargePagesInMetaspace, false); |
|
3067 } |
|
3068 |
|
3069 size_t page_size = os::vm_page_size(); |
|
3070 if (UseLargePages && UseLargePagesInMetaspace) { |
|
3071 page_size = os::large_page_size(); |
|
3072 } |
|
3073 |
|
3074 _commit_alignment = page_size; |
|
3075 _reserve_alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity()); |
|
3076 |
|
3077 // Do not use FLAG_SET_ERGO to update MaxMetaspaceSize, since this will |
|
3078 // override if MaxMetaspaceSize was set on the command line or not. |
|
3079 // This information is needed later to conform to the specification of the |
|
3080 // java.lang.management.MemoryUsage API. |
|
3081 // |
|
3082 // Ideally, we would be able to set the default value of MaxMetaspaceSize in |
|
3083 // globals.hpp to the aligned value, but this is not possible, since the |
|
3084 // alignment depends on other flags being parsed. |
|
3085 MaxMetaspaceSize = align_size_down_bounded(MaxMetaspaceSize, _reserve_alignment); |
|
3086 |
|
3087 if (MetaspaceSize > MaxMetaspaceSize) { |
|
3088 MetaspaceSize = MaxMetaspaceSize; |
|
3089 } |
|
3090 |
|
3091 MetaspaceSize = align_size_down_bounded(MetaspaceSize, _commit_alignment); |
|
3092 |
|
3093 assert(MetaspaceSize <= MaxMetaspaceSize, "MetaspaceSize should be limited by MaxMetaspaceSize"); |
|
3094 |
|
3095 if (MetaspaceSize < 256*K) { |
|
3096 vm_exit_during_initialization("Too small initial Metaspace size"); |
|
3097 } |
|
3098 |
|
3099 MinMetaspaceExpansion = align_size_down_bounded(MinMetaspaceExpansion, _commit_alignment); |
|
3100 MaxMetaspaceExpansion = align_size_down_bounded(MaxMetaspaceExpansion, _commit_alignment); |
|
3101 |
|
3102 CompressedClassSpaceSize = align_size_down_bounded(CompressedClassSpaceSize, _reserve_alignment); |
|
3103 set_compressed_class_space_size(CompressedClassSpaceSize); |
|
3104 } |
|
3105 |
|
3106 void Metaspace::global_initialize() { |
|
3107 MetaspaceGC::initialize(); |
|
3108 |
|
3109 // Initialize the alignment for shared spaces. |
|
3110 int max_alignment = os::vm_page_size(); |
|
3111 size_t cds_total = 0; |
|
3112 |
|
3113 MetaspaceShared::set_max_alignment(max_alignment); |
|
3114 |
|
3115 if (DumpSharedSpaces) { |
|
3116 SharedReadOnlySize = align_size_up(SharedReadOnlySize, max_alignment); |
|
3117 SharedReadWriteSize = align_size_up(SharedReadWriteSize, max_alignment); |
|
3118 SharedMiscDataSize = align_size_up(SharedMiscDataSize, max_alignment); |
|
3119 SharedMiscCodeSize = align_size_up(SharedMiscCodeSize, max_alignment); |
|
3120 |
|
3121 // Initialize with the sum of the shared space sizes. The read-only |
|
3122 // and read write metaspace chunks will be allocated out of this and the |
|
3123 // remainder is the misc code and data chunks. |
|
3124 cds_total = FileMapInfo::shared_spaces_size(); |
|
3125 cds_total = align_size_up(cds_total, _reserve_alignment); |
|
3126 _space_list = new VirtualSpaceList(cds_total/wordSize); |
|
3127 _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk); |
|
3128 |
|
3129 if (!_space_list->initialization_succeeded()) { |
|
3130 vm_exit_during_initialization("Unable to dump shared archive.", NULL); |
|
3131 } |
|
3132 |
|
3133 #ifdef _LP64 |
|
3134 if (cds_total + compressed_class_space_size() > UnscaledClassSpaceMax) { |
|
3135 vm_exit_during_initialization("Unable to dump shared archive.", |
|
3136 err_msg("Size of archive (" SIZE_FORMAT ") + compressed class space (" |
|
3137 SIZE_FORMAT ") == total (" SIZE_FORMAT ") is larger than compressed " |
|
3138 "klass limit: " SIZE_FORMAT, cds_total, compressed_class_space_size(), |
|
3139 cds_total + compressed_class_space_size(), UnscaledClassSpaceMax)); |
|
3140 } |
|
3141 |
|
3142 // Set the compressed klass pointer base so that decoding of these pointers works |
|
3143 // properly when creating the shared archive. |
|
3144 assert(UseCompressedOops && UseCompressedClassPointers, |
|
3145 "UseCompressedOops and UseCompressedClassPointers must be set"); |
|
3146 Universe::set_narrow_klass_base((address)_space_list->current_virtual_space()->bottom()); |
|
3147 if (TraceMetavirtualspaceAllocation && Verbose) { |
|
3148 gclog_or_tty->print_cr("Setting_narrow_klass_base to Address: " PTR_FORMAT, |
|
3149 _space_list->current_virtual_space()->bottom()); |
|
3150 } |
|
3151 |
|
3152 Universe::set_narrow_klass_shift(0); |
|
3153 #endif |
|
3154 |
|
3155 } else { |
|
3156 // If using shared space, open the file that contains the shared space |
|
3157 // and map in the memory before initializing the rest of metaspace (so |
|
3158 // the addresses don't conflict) |
|
3159 address cds_address = NULL; |
|
3160 if (UseSharedSpaces) { |
|
3161 FileMapInfo* mapinfo = new FileMapInfo(); |
|
3162 memset(mapinfo, 0, sizeof(FileMapInfo)); |
|
3163 |
|
3164 // Open the shared archive file, read and validate the header. If |
|
3165 // initialization fails, shared spaces [UseSharedSpaces] are |
|
3166 // disabled and the file is closed. |
|
3167 // Map in spaces now also |
|
3168 if (mapinfo->initialize() && MetaspaceShared::map_shared_spaces(mapinfo)) { |
|
3169 FileMapInfo::set_current_info(mapinfo); |
|
3170 cds_total = FileMapInfo::shared_spaces_size(); |
|
3171 cds_address = (address)mapinfo->region_base(0); |
|
3172 } else { |
|
3173 assert(!mapinfo->is_open() && !UseSharedSpaces, |
|
3174 "archive file not closed or shared spaces not disabled."); |
|
3175 } |
|
3176 } |
|
3177 |
|
3178 #ifdef _LP64 |
|
3179 // If UseCompressedClassPointers is set then allocate the metaspace area |
|
3180 // above the heap and above the CDS area (if it exists). |
|
3181 if (using_class_space()) { |
|
3182 if (UseSharedSpaces) { |
|
3183 char* cds_end = (char*)(cds_address + cds_total); |
|
3184 cds_end = (char *)align_ptr_up(cds_end, _reserve_alignment); |
|
3185 allocate_metaspace_compressed_klass_ptrs(cds_end, cds_address); |
|
3186 } else { |
|
3187 char* base = (char*)align_ptr_up(Universe::heap()->reserved_region().end(), _reserve_alignment); |
|
3188 allocate_metaspace_compressed_klass_ptrs(base, 0); |
|
3189 } |
|
3190 } |
|
3191 #endif |
|
3192 |
|
3193 // Initialize these before initializing the VirtualSpaceList |
|
3194 _first_chunk_word_size = InitialBootClassLoaderMetaspaceSize / BytesPerWord; |
|
3195 _first_chunk_word_size = align_word_size_up(_first_chunk_word_size); |
|
3196 // Make the first class chunk bigger than a medium chunk so it's not put |
|
3197 // on the medium chunk list. The next chunk will be small and progress |
|
3198 // from there. This size calculated by -version. |
|
3199 _first_class_chunk_word_size = MIN2((size_t)MediumChunk*6, |
|
3200 (CompressedClassSpaceSize/BytesPerWord)*2); |
|
3201 _first_class_chunk_word_size = align_word_size_up(_first_class_chunk_word_size); |
|
3202 // Arbitrarily set the initial virtual space to a multiple |
|
3203 // of the boot class loader size. |
|
3204 size_t word_size = VIRTUALSPACEMULTIPLIER * _first_chunk_word_size; |
|
3205 word_size = align_size_up(word_size, Metaspace::reserve_alignment_words()); |
|
3206 |
|
3207 // Initialize the list of virtual spaces. |
|
3208 _space_list = new VirtualSpaceList(word_size); |
|
3209 _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk); |
|
3210 |
|
3211 if (!_space_list->initialization_succeeded()) { |
|
3212 vm_exit_during_initialization("Unable to setup metadata virtual space list.", NULL); |
|
3213 } |
|
3214 } |
|
3215 |
|
3216 _tracer = new MetaspaceTracer(); |
|
3217 } |
|
3218 |
|
3219 void Metaspace::post_initialize() { |
|
3220 MetaspaceGC::post_initialize(); |
|
3221 } |
|
3222 |
|
3223 Metachunk* Metaspace::get_initialization_chunk(MetadataType mdtype, |
|
3224 size_t chunk_word_size, |
|
3225 size_t chunk_bunch) { |
|
3226 // Get a chunk from the chunk freelist |
|
3227 Metachunk* chunk = get_chunk_manager(mdtype)->chunk_freelist_allocate(chunk_word_size); |
|
3228 if (chunk != NULL) { |
|
3229 return chunk; |
|
3230 } |
|
3231 |
|
3232 return get_space_list(mdtype)->get_new_chunk(chunk_word_size, chunk_word_size, chunk_bunch); |
|
3233 } |
|
3234 |
|
3235 void Metaspace::initialize(Mutex* lock, MetaspaceType type) { |
|
3236 |
|
3237 assert(space_list() != NULL, |
|
3238 "Metadata VirtualSpaceList has not been initialized"); |
|
3239 assert(chunk_manager_metadata() != NULL, |
|
3240 "Metadata ChunkManager has not been initialized"); |
|
3241 |
|
3242 _vsm = new SpaceManager(NonClassType, lock); |
|
3243 if (_vsm == NULL) { |
|
3244 return; |
|
3245 } |
|
3246 size_t word_size; |
|
3247 size_t class_word_size; |
|
3248 vsm()->get_initial_chunk_sizes(type, &word_size, &class_word_size); |
|
3249 |
|
3250 if (using_class_space()) { |
|
3251 assert(class_space_list() != NULL, |
|
3252 "Class VirtualSpaceList has not been initialized"); |
|
3253 assert(chunk_manager_class() != NULL, |
|
3254 "Class ChunkManager has not been initialized"); |
|
3255 |
|
3256 // Allocate SpaceManager for classes. |
|
3257 _class_vsm = new SpaceManager(ClassType, lock); |
|
3258 if (_class_vsm == NULL) { |
|
3259 return; |
|
3260 } |
|
3261 } |
|
3262 |
|
3263 MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag); |
|
3264 |
|
3265 // Allocate chunk for metadata objects |
|
3266 Metachunk* new_chunk = get_initialization_chunk(NonClassType, |
|
3267 word_size, |
|
3268 vsm()->medium_chunk_bunch()); |
|
3269 assert(!DumpSharedSpaces || new_chunk != NULL, "should have enough space for both chunks"); |
|
3270 if (new_chunk != NULL) { |
|
3271 // Add to this manager's list of chunks in use and current_chunk(). |
|
3272 vsm()->add_chunk(new_chunk, true); |
|
3273 } |
|
3274 |
|
3275 // Allocate chunk for class metadata objects |
|
3276 if (using_class_space()) { |
|
3277 Metachunk* class_chunk = get_initialization_chunk(ClassType, |
|
3278 class_word_size, |
|
3279 class_vsm()->medium_chunk_bunch()); |
|
3280 if (class_chunk != NULL) { |
|
3281 class_vsm()->add_chunk(class_chunk, true); |
|
3282 } |
|
3283 } |
|
3284 |
|
3285 _alloc_record_head = NULL; |
|
3286 _alloc_record_tail = NULL; |
|
3287 } |
|
3288 |
|
3289 size_t Metaspace::align_word_size_up(size_t word_size) { |
|
3290 size_t byte_size = word_size * wordSize; |
|
3291 return ReservedSpace::allocation_align_size_up(byte_size) / wordSize; |
|
3292 } |
|
3293 |
|
3294 MetaWord* Metaspace::allocate(size_t word_size, MetadataType mdtype) { |
|
3295 // DumpSharedSpaces doesn't use class metadata area (yet) |
|
3296 // Also, don't use class_vsm() unless UseCompressedClassPointers is true. |
|
3297 if (is_class_space_allocation(mdtype)) { |
|
3298 return class_vsm()->allocate(word_size); |
|
3299 } else { |
|
3300 return vsm()->allocate(word_size); |
|
3301 } |
|
3302 } |
|
3303 |
|
3304 MetaWord* Metaspace::expand_and_allocate(size_t word_size, MetadataType mdtype) { |
|
3305 size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size * BytesPerWord); |
|
3306 assert(delta_bytes > 0, "Must be"); |
|
3307 |
|
3308 size_t after_inc = MetaspaceGC::inc_capacity_until_GC(delta_bytes); |
|
3309 |
|
3310 // capacity_until_GC might be updated concurrently, must calculate previous value. |
|
3311 size_t before_inc = after_inc - delta_bytes; |
|
3312 |
|
3313 tracer()->report_gc_threshold(before_inc, after_inc, |
|
3314 MetaspaceGCThresholdUpdater::ExpandAndAllocate); |
|
3315 if (PrintGCDetails && Verbose) { |
|
3316 gclog_or_tty->print_cr("Increase capacity to GC from " SIZE_FORMAT |
|
3317 " to " SIZE_FORMAT, before_inc, after_inc); |
|
3318 } |
|
3319 |
|
3320 return allocate(word_size, mdtype); |
|
3321 } |
|
3322 |
|
3323 // Space allocated in the Metaspace. This may |
|
3324 // be across several metadata virtual spaces. |
|
3325 char* Metaspace::bottom() const { |
|
3326 assert(DumpSharedSpaces, "only useful and valid for dumping shared spaces"); |
|
3327 return (char*)vsm()->current_chunk()->bottom(); |
|
3328 } |
|
3329 |
|
3330 size_t Metaspace::used_words_slow(MetadataType mdtype) const { |
|
3331 if (mdtype == ClassType) { |
|
3332 return using_class_space() ? class_vsm()->sum_used_in_chunks_in_use() : 0; |
|
3333 } else { |
|
3334 return vsm()->sum_used_in_chunks_in_use(); // includes overhead! |
|
3335 } |
|
3336 } |
|
3337 |
|
3338 size_t Metaspace::free_words_slow(MetadataType mdtype) const { |
|
3339 if (mdtype == ClassType) { |
|
3340 return using_class_space() ? class_vsm()->sum_free_in_chunks_in_use() : 0; |
|
3341 } else { |
|
3342 return vsm()->sum_free_in_chunks_in_use(); |
|
3343 } |
|
3344 } |
|
3345 |
|
3346 // Space capacity in the Metaspace. It includes |
|
3347 // space in the list of chunks from which allocations |
|
3348 // have been made. Don't include space in the global freelist and |
|
3349 // in the space available in the dictionary which |
|
3350 // is already counted in some chunk. |
|
3351 size_t Metaspace::capacity_words_slow(MetadataType mdtype) const { |
|
3352 if (mdtype == ClassType) { |
|
3353 return using_class_space() ? class_vsm()->sum_capacity_in_chunks_in_use() : 0; |
|
3354 } else { |
|
3355 return vsm()->sum_capacity_in_chunks_in_use(); |
|
3356 } |
|
3357 } |
|
3358 |
|
3359 size_t Metaspace::used_bytes_slow(MetadataType mdtype) const { |
|
3360 return used_words_slow(mdtype) * BytesPerWord; |
|
3361 } |
|
3362 |
|
3363 size_t Metaspace::capacity_bytes_slow(MetadataType mdtype) const { |
|
3364 return capacity_words_slow(mdtype) * BytesPerWord; |
|
3365 } |
|
3366 |
|
3367 void Metaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) { |
|
3368 if (SafepointSynchronize::is_at_safepoint()) { |
|
3369 assert(Thread::current()->is_VM_thread(), "should be the VM thread"); |
|
3370 // Don't take Heap_lock |
|
3371 MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag); |
|
3372 if (word_size < TreeChunk<Metablock, FreeList<Metablock> >::min_size()) { |
|
3373 // Dark matter. Too small for dictionary. |
|
3374 #ifdef ASSERT |
|
3375 Copy::fill_to_words((HeapWord*)ptr, word_size, 0xf5f5f5f5); |
|
3376 #endif |
|
3377 return; |
|
3378 } |
|
3379 if (is_class && using_class_space()) { |
|
3380 class_vsm()->deallocate(ptr, word_size); |
|
3381 } else { |
|
3382 vsm()->deallocate(ptr, word_size); |
|
3383 } |
|
3384 } else { |
|
3385 MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag); |
|
3386 |
|
3387 if (word_size < TreeChunk<Metablock, FreeList<Metablock> >::min_size()) { |
|
3388 // Dark matter. Too small for dictionary. |
|
3389 #ifdef ASSERT |
|
3390 Copy::fill_to_words((HeapWord*)ptr, word_size, 0xf5f5f5f5); |
|
3391 #endif |
|
3392 return; |
|
3393 } |
|
3394 if (is_class && using_class_space()) { |
|
3395 class_vsm()->deallocate(ptr, word_size); |
|
3396 } else { |
|
3397 vsm()->deallocate(ptr, word_size); |
|
3398 } |
|
3399 } |
|
3400 } |
|
3401 |
|
3402 |
|
3403 MetaWord* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size, |
|
3404 bool read_only, MetaspaceObj::Type type, TRAPS) { |
|
3405 if (HAS_PENDING_EXCEPTION) { |
|
3406 assert(false, "Should not allocate with exception pending"); |
|
3407 return NULL; // caller does a CHECK_NULL too |
|
3408 } |
|
3409 |
|
3410 assert(loader_data != NULL, "Should never pass around a NULL loader_data. " |
|
3411 "ClassLoaderData::the_null_class_loader_data() should have been used."); |
|
3412 |
|
3413 // Allocate in metaspaces without taking out a lock, because it deadlocks |
|
3414 // with the SymbolTable_lock. Dumping is single threaded for now. We'll have |
|
3415 // to revisit this for application class data sharing. |
|
3416 if (DumpSharedSpaces) { |
|
3417 assert(type > MetaspaceObj::UnknownType && type < MetaspaceObj::_number_of_types, "sanity"); |
|
3418 Metaspace* space = read_only ? loader_data->ro_metaspace() : loader_data->rw_metaspace(); |
|
3419 MetaWord* result = space->allocate(word_size, NonClassType); |
|
3420 if (result == NULL) { |
|
3421 report_out_of_shared_space(read_only ? SharedReadOnly : SharedReadWrite); |
|
3422 } |
|
3423 |
|
3424 space->record_allocation(result, type, space->vsm()->get_raw_word_size(word_size)); |
|
3425 |
|
3426 // Zero initialize. |
|
3427 Copy::fill_to_aligned_words((HeapWord*)result, word_size, 0); |
|
3428 |
|
3429 return result; |
|
3430 } |
|
3431 |
|
3432 MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType; |
|
3433 |
|
3434 // Try to allocate metadata. |
|
3435 MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype); |
|
3436 |
|
3437 if (result == NULL) { |
|
3438 tracer()->report_metaspace_allocation_failure(loader_data, word_size, type, mdtype); |
|
3439 |
|
3440 // Allocation failed. |
|
3441 if (is_init_completed()) { |
|
3442 // Only start a GC if the bootstrapping has completed. |
|
3443 |
|
3444 // Try to clean out some memory and retry. |
|
3445 result = Universe::heap()->collector_policy()->satisfy_failed_metadata_allocation( |
|
3446 loader_data, word_size, mdtype); |
|
3447 } |
|
3448 } |
|
3449 |
|
3450 if (result == NULL) { |
|
3451 report_metadata_oome(loader_data, word_size, type, mdtype, CHECK_NULL); |
|
3452 } |
|
3453 |
|
3454 // Zero initialize. |
|
3455 Copy::fill_to_aligned_words((HeapWord*)result, word_size, 0); |
|
3456 |
|
3457 return result; |
|
3458 } |
|
3459 |
|
3460 size_t Metaspace::class_chunk_size(size_t word_size) { |
|
3461 assert(using_class_space(), "Has to use class space"); |
|
3462 return class_vsm()->calc_chunk_size(word_size); |
|
3463 } |
|
3464 |
|
3465 void Metaspace::report_metadata_oome(ClassLoaderData* loader_data, size_t word_size, MetaspaceObj::Type type, MetadataType mdtype, TRAPS) { |
|
3466 tracer()->report_metadata_oom(loader_data, word_size, type, mdtype); |
|
3467 |
|
3468 // If result is still null, we are out of memory. |
|
3469 if (Verbose && TraceMetadataChunkAllocation) { |
|
3470 gclog_or_tty->print_cr("Metaspace allocation failed for size " |
|
3471 SIZE_FORMAT, word_size); |
|
3472 if (loader_data->metaspace_or_null() != NULL) { |
|
3473 loader_data->dump(gclog_or_tty); |
|
3474 } |
|
3475 MetaspaceAux::dump(gclog_or_tty); |
|
3476 } |
|
3477 |
|
3478 bool out_of_compressed_class_space = false; |
|
3479 if (is_class_space_allocation(mdtype)) { |
|
3480 Metaspace* metaspace = loader_data->metaspace_non_null(); |
|
3481 out_of_compressed_class_space = |
|
3482 MetaspaceAux::committed_bytes(Metaspace::ClassType) + |
|
3483 (metaspace->class_chunk_size(word_size) * BytesPerWord) > |
|
3484 CompressedClassSpaceSize; |
|
3485 } |
|
3486 |
|
3487 // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support |
|
3488 const char* space_string = out_of_compressed_class_space ? |
|
3489 "Compressed class space" : "Metaspace"; |
|
3490 |
|
3491 report_java_out_of_memory(space_string); |
|
3492 |
|
3493 if (JvmtiExport::should_post_resource_exhausted()) { |
|
3494 JvmtiExport::post_resource_exhausted( |
|
3495 JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR, |
|
3496 space_string); |
|
3497 } |
|
3498 |
|
3499 if (!is_init_completed()) { |
|
3500 vm_exit_during_initialization("OutOfMemoryError", space_string); |
|
3501 } |
|
3502 |
|
3503 if (out_of_compressed_class_space) { |
|
3504 THROW_OOP(Universe::out_of_memory_error_class_metaspace()); |
|
3505 } else { |
|
3506 THROW_OOP(Universe::out_of_memory_error_metaspace()); |
|
3507 } |
|
3508 } |
|
3509 |
|
3510 const char* Metaspace::metadata_type_name(Metaspace::MetadataType mdtype) { |
|
3511 switch (mdtype) { |
|
3512 case Metaspace::ClassType: return "Class"; |
|
3513 case Metaspace::NonClassType: return "Metadata"; |
|
3514 default: |
|
3515 assert(false, err_msg("Got bad mdtype: %d", (int) mdtype)); |
|
3516 return NULL; |
|
3517 } |
|
3518 } |
|
3519 |
|
3520 void Metaspace::record_allocation(void* ptr, MetaspaceObj::Type type, size_t word_size) { |
|
3521 assert(DumpSharedSpaces, "sanity"); |
|
3522 |
|
3523 AllocRecord *rec = new AllocRecord((address)ptr, type, (int)word_size * HeapWordSize); |
|
3524 if (_alloc_record_head == NULL) { |
|
3525 _alloc_record_head = _alloc_record_tail = rec; |
|
3526 } else { |
|
3527 _alloc_record_tail->_next = rec; |
|
3528 _alloc_record_tail = rec; |
|
3529 } |
|
3530 } |
|
3531 |
|
3532 void Metaspace::iterate(Metaspace::AllocRecordClosure *closure) { |
|
3533 assert(DumpSharedSpaces, "unimplemented for !DumpSharedSpaces"); |
|
3534 |
|
3535 address last_addr = (address)bottom(); |
|
3536 |
|
3537 for (AllocRecord *rec = _alloc_record_head; rec; rec = rec->_next) { |
|
3538 address ptr = rec->_ptr; |
|
3539 if (last_addr < ptr) { |
|
3540 closure->doit(last_addr, MetaspaceObj::UnknownType, ptr - last_addr); |
|
3541 } |
|
3542 closure->doit(ptr, rec->_type, rec->_byte_size); |
|
3543 last_addr = ptr + rec->_byte_size; |
|
3544 } |
|
3545 |
|
3546 address top = ((address)bottom()) + used_bytes_slow(Metaspace::NonClassType); |
|
3547 if (last_addr < top) { |
|
3548 closure->doit(last_addr, MetaspaceObj::UnknownType, top - last_addr); |
|
3549 } |
|
3550 } |
|
3551 |
|
3552 void Metaspace::purge(MetadataType mdtype) { |
|
3553 get_space_list(mdtype)->purge(get_chunk_manager(mdtype)); |
|
3554 } |
|
3555 |
|
3556 void Metaspace::purge() { |
|
3557 MutexLockerEx cl(SpaceManager::expand_lock(), |
|
3558 Mutex::_no_safepoint_check_flag); |
|
3559 purge(NonClassType); |
|
3560 if (using_class_space()) { |
|
3561 purge(ClassType); |
|
3562 } |
|
3563 } |
|
3564 |
|
3565 void Metaspace::print_on(outputStream* out) const { |
|
3566 // Print both class virtual space counts and metaspace. |
|
3567 if (Verbose) { |
|
3568 vsm()->print_on(out); |
|
3569 if (using_class_space()) { |
|
3570 class_vsm()->print_on(out); |
|
3571 } |
|
3572 } |
|
3573 } |
|
3574 |
|
3575 bool Metaspace::contains(const void* ptr) { |
|
3576 if (UseSharedSpaces && MetaspaceShared::is_in_shared_space(ptr)) { |
|
3577 return true; |
|
3578 } |
|
3579 |
|
3580 if (using_class_space() && get_space_list(ClassType)->contains(ptr)) { |
|
3581 return true; |
|
3582 } |
|
3583 |
|
3584 return get_space_list(NonClassType)->contains(ptr); |
|
3585 } |
|
3586 |
|
3587 void Metaspace::verify() { |
|
3588 vsm()->verify(); |
|
3589 if (using_class_space()) { |
|
3590 class_vsm()->verify(); |
|
3591 } |
|
3592 } |
|
3593 |
|
3594 void Metaspace::dump(outputStream* const out) const { |
|
3595 out->print_cr("\nVirtual space manager: " INTPTR_FORMAT, vsm()); |
|
3596 vsm()->dump(out); |
|
3597 if (using_class_space()) { |
|
3598 out->print_cr("\nClass space manager: " INTPTR_FORMAT, class_vsm()); |
|
3599 class_vsm()->dump(out); |
|
3600 } |
|
3601 } |
|
3602 |
|
3603 /////////////// Unit tests /////////////// |
|
3604 |
|
3605 #ifndef PRODUCT |
|
3606 |
|
3607 class TestMetaspaceAuxTest : AllStatic { |
|
3608 public: |
|
3609 static void test_reserved() { |
|
3610 size_t reserved = MetaspaceAux::reserved_bytes(); |
|
3611 |
|
3612 assert(reserved > 0, "assert"); |
|
3613 |
|
3614 size_t committed = MetaspaceAux::committed_bytes(); |
|
3615 assert(committed <= reserved, "assert"); |
|
3616 |
|
3617 size_t reserved_metadata = MetaspaceAux::reserved_bytes(Metaspace::NonClassType); |
|
3618 assert(reserved_metadata > 0, "assert"); |
|
3619 assert(reserved_metadata <= reserved, "assert"); |
|
3620 |
|
3621 if (UseCompressedClassPointers) { |
|
3622 size_t reserved_class = MetaspaceAux::reserved_bytes(Metaspace::ClassType); |
|
3623 assert(reserved_class > 0, "assert"); |
|
3624 assert(reserved_class < reserved, "assert"); |
|
3625 } |
|
3626 } |
|
3627 |
|
3628 static void test_committed() { |
|
3629 size_t committed = MetaspaceAux::committed_bytes(); |
|
3630 |
|
3631 assert(committed > 0, "assert"); |
|
3632 |
|
3633 size_t reserved = MetaspaceAux::reserved_bytes(); |
|
3634 assert(committed <= reserved, "assert"); |
|
3635 |
|
3636 size_t committed_metadata = MetaspaceAux::committed_bytes(Metaspace::NonClassType); |
|
3637 assert(committed_metadata > 0, "assert"); |
|
3638 assert(committed_metadata <= committed, "assert"); |
|
3639 |
|
3640 if (UseCompressedClassPointers) { |
|
3641 size_t committed_class = MetaspaceAux::committed_bytes(Metaspace::ClassType); |
|
3642 assert(committed_class > 0, "assert"); |
|
3643 assert(committed_class < committed, "assert"); |
|
3644 } |
|
3645 } |
|
3646 |
|
3647 static void test_virtual_space_list_large_chunk() { |
|
3648 VirtualSpaceList* vs_list = new VirtualSpaceList(os::vm_allocation_granularity()); |
|
3649 MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag); |
|
3650 // A size larger than VirtualSpaceSize (256k) and add one page to make it _not_ be |
|
3651 // vm_allocation_granularity aligned on Windows. |
|
3652 size_t large_size = (size_t)(2*256*K + (os::vm_page_size()/BytesPerWord)); |
|
3653 large_size += (os::vm_page_size()/BytesPerWord); |
|
3654 vs_list->get_new_chunk(large_size, large_size, 0); |
|
3655 } |
|
3656 |
|
3657 static void test() { |
|
3658 test_reserved(); |
|
3659 test_committed(); |
|
3660 test_virtual_space_list_large_chunk(); |
|
3661 } |
|
3662 }; |
|
3663 |
|
3664 void TestMetaspaceAux_test() { |
|
3665 TestMetaspaceAuxTest::test(); |
|
3666 } |
|
3667 |
|
3668 class TestVirtualSpaceNodeTest { |
|
3669 static void chunk_up(size_t words_left, size_t& num_medium_chunks, |
|
3670 size_t& num_small_chunks, |
|
3671 size_t& num_specialized_chunks) { |
|
3672 num_medium_chunks = words_left / MediumChunk; |
|
3673 words_left = words_left % MediumChunk; |
|
3674 |
|
3675 num_small_chunks = words_left / SmallChunk; |
|
3676 words_left = words_left % SmallChunk; |
|
3677 // how many specialized chunks can we get? |
|
3678 num_specialized_chunks = words_left / SpecializedChunk; |
|
3679 assert(words_left % SpecializedChunk == 0, "should be nothing left"); |
|
3680 } |
|
3681 |
|
3682 public: |
|
3683 static void test() { |
|
3684 MutexLockerEx ml(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag); |
|
3685 const size_t vsn_test_size_words = MediumChunk * 4; |
|
3686 const size_t vsn_test_size_bytes = vsn_test_size_words * BytesPerWord; |
|
3687 |
|
3688 // The chunk sizes must be multiples of eachother, or this will fail |
|
3689 STATIC_ASSERT(MediumChunk % SmallChunk == 0); |
|
3690 STATIC_ASSERT(SmallChunk % SpecializedChunk == 0); |
|
3691 |
|
3692 { // No committed memory in VSN |
|
3693 ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk); |
|
3694 VirtualSpaceNode vsn(vsn_test_size_bytes); |
|
3695 vsn.initialize(); |
|
3696 vsn.retire(&cm); |
|
3697 assert(cm.sum_free_chunks_count() == 0, "did not commit any memory in the VSN"); |
|
3698 } |
|
3699 |
|
3700 { // All of VSN is committed, half is used by chunks |
|
3701 ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk); |
|
3702 VirtualSpaceNode vsn(vsn_test_size_bytes); |
|
3703 vsn.initialize(); |
|
3704 vsn.expand_by(vsn_test_size_words, vsn_test_size_words); |
|
3705 vsn.get_chunk_vs(MediumChunk); |
|
3706 vsn.get_chunk_vs(MediumChunk); |
|
3707 vsn.retire(&cm); |
|
3708 assert(cm.sum_free_chunks_count() == 2, "should have been memory left for 2 medium chunks"); |
|
3709 assert(cm.sum_free_chunks() == 2*MediumChunk, "sizes should add up"); |
|
3710 } |
|
3711 |
|
3712 { // 4 pages of VSN is committed, some is used by chunks |
|
3713 ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk); |
|
3714 VirtualSpaceNode vsn(vsn_test_size_bytes); |
|
3715 const size_t page_chunks = 4 * (size_t)os::vm_page_size() / BytesPerWord; |
|
3716 assert(page_chunks < MediumChunk, "Test expects medium chunks to be at least 4*page_size"); |
|
3717 vsn.initialize(); |
|
3718 vsn.expand_by(page_chunks, page_chunks); |
|
3719 vsn.get_chunk_vs(SmallChunk); |
|
3720 vsn.get_chunk_vs(SpecializedChunk); |
|
3721 vsn.retire(&cm); |
|
3722 |
|
3723 // committed - used = words left to retire |
|
3724 const size_t words_left = page_chunks - SmallChunk - SpecializedChunk; |
|
3725 |
|
3726 size_t num_medium_chunks, num_small_chunks, num_spec_chunks; |
|
3727 chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks); |
|
3728 |
|
3729 assert(num_medium_chunks == 0, "should not get any medium chunks"); |
|
3730 assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks"); |
|
3731 assert(cm.sum_free_chunks() == words_left, "sizes should add up"); |
|
3732 } |
|
3733 |
|
3734 { // Half of VSN is committed, a humongous chunk is used |
|
3735 ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk); |
|
3736 VirtualSpaceNode vsn(vsn_test_size_bytes); |
|
3737 vsn.initialize(); |
|
3738 vsn.expand_by(MediumChunk * 2, MediumChunk * 2); |
|
3739 vsn.get_chunk_vs(MediumChunk + SpecializedChunk); // Humongous chunks will be aligned up to MediumChunk + SpecializedChunk |
|
3740 vsn.retire(&cm); |
|
3741 |
|
3742 const size_t words_left = MediumChunk * 2 - (MediumChunk + SpecializedChunk); |
|
3743 size_t num_medium_chunks, num_small_chunks, num_spec_chunks; |
|
3744 chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks); |
|
3745 |
|
3746 assert(num_medium_chunks == 0, "should not get any medium chunks"); |
|
3747 assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks"); |
|
3748 assert(cm.sum_free_chunks() == words_left, "sizes should add up"); |
|
3749 } |
|
3750 |
|
3751 } |
|
3752 |
|
3753 #define assert_is_available_positive(word_size) \ |
|
3754 assert(vsn.is_available(word_size), \ |
|
3755 err_msg(#word_size ": " PTR_FORMAT " bytes were not available in " \ |
|
3756 "VirtualSpaceNode [" PTR_FORMAT ", " PTR_FORMAT ")", \ |
|
3757 (uintptr_t)(word_size * BytesPerWord), vsn.bottom(), vsn.end())); |
|
3758 |
|
3759 #define assert_is_available_negative(word_size) \ |
|
3760 assert(!vsn.is_available(word_size), \ |
|
3761 err_msg(#word_size ": " PTR_FORMAT " bytes should not be available in " \ |
|
3762 "VirtualSpaceNode [" PTR_FORMAT ", " PTR_FORMAT ")", \ |
|
3763 (uintptr_t)(word_size * BytesPerWord), vsn.bottom(), vsn.end())); |
|
3764 |
|
3765 static void test_is_available_positive() { |
|
3766 // Reserve some memory. |
|
3767 VirtualSpaceNode vsn(os::vm_allocation_granularity()); |
|
3768 assert(vsn.initialize(), "Failed to setup VirtualSpaceNode"); |
|
3769 |
|
3770 // Commit some memory. |
|
3771 size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord; |
|
3772 bool expanded = vsn.expand_by(commit_word_size, commit_word_size); |
|
3773 assert(expanded, "Failed to commit"); |
|
3774 |
|
3775 // Check that is_available accepts the committed size. |
|
3776 assert_is_available_positive(commit_word_size); |
|
3777 |
|
3778 // Check that is_available accepts half the committed size. |
|
3779 size_t expand_word_size = commit_word_size / 2; |
|
3780 assert_is_available_positive(expand_word_size); |
|
3781 } |
|
3782 |
|
3783 static void test_is_available_negative() { |
|
3784 // Reserve some memory. |
|
3785 VirtualSpaceNode vsn(os::vm_allocation_granularity()); |
|
3786 assert(vsn.initialize(), "Failed to setup VirtualSpaceNode"); |
|
3787 |
|
3788 // Commit some memory. |
|
3789 size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord; |
|
3790 bool expanded = vsn.expand_by(commit_word_size, commit_word_size); |
|
3791 assert(expanded, "Failed to commit"); |
|
3792 |
|
3793 // Check that is_available doesn't accept a too large size. |
|
3794 size_t two_times_commit_word_size = commit_word_size * 2; |
|
3795 assert_is_available_negative(two_times_commit_word_size); |
|
3796 } |
|
3797 |
|
3798 static void test_is_available_overflow() { |
|
3799 // Reserve some memory. |
|
3800 VirtualSpaceNode vsn(os::vm_allocation_granularity()); |
|
3801 assert(vsn.initialize(), "Failed to setup VirtualSpaceNode"); |
|
3802 |
|
3803 // Commit some memory. |
|
3804 size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord; |
|
3805 bool expanded = vsn.expand_by(commit_word_size, commit_word_size); |
|
3806 assert(expanded, "Failed to commit"); |
|
3807 |
|
3808 // Calculate a size that will overflow the virtual space size. |
|
3809 void* virtual_space_max = (void*)(uintptr_t)-1; |
|
3810 size_t bottom_to_max = pointer_delta(virtual_space_max, vsn.bottom(), 1); |
|
3811 size_t overflow_size = bottom_to_max + BytesPerWord; |
|
3812 size_t overflow_word_size = overflow_size / BytesPerWord; |
|
3813 |
|
3814 // Check that is_available can handle the overflow. |
|
3815 assert_is_available_negative(overflow_word_size); |
|
3816 } |
|
3817 |
|
3818 static void test_is_available() { |
|
3819 TestVirtualSpaceNodeTest::test_is_available_positive(); |
|
3820 TestVirtualSpaceNodeTest::test_is_available_negative(); |
|
3821 TestVirtualSpaceNodeTest::test_is_available_overflow(); |
|
3822 } |
|
3823 }; |
|
3824 |
|
3825 void TestVirtualSpaceNode_test() { |
|
3826 TestVirtualSpaceNodeTest::test(); |
|
3827 TestVirtualSpaceNodeTest::test_is_available(); |
|
3828 } |
|
3829 #endif |