Wed, 26 Jun 2013 16:58:37 +0200
8013590: NPG: Add a memory pool MXBean for Metaspace
Reviewed-by: jmasa, mgerdin
1 /*
2 * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "memory/heap.hpp"
27 #include "oops/oop.inline.hpp"
28 #include "runtime/os.hpp"
29 #include "services/memTracker.hpp"
31 size_t CodeHeap::header_size() {
32 return sizeof(HeapBlock);
33 }
36 // Implementation of Heap
38 CodeHeap::CodeHeap() {
39 _number_of_committed_segments = 0;
40 _number_of_reserved_segments = 0;
41 _segment_size = 0;
42 _log2_segment_size = 0;
43 _next_segment = 0;
44 _freelist = NULL;
45 _freelist_segments = 0;
46 }
49 void CodeHeap::mark_segmap_as_free(size_t beg, size_t end) {
50 assert(0 <= beg && beg < _number_of_committed_segments, "interval begin out of bounds");
51 assert(beg < end && end <= _number_of_committed_segments, "interval end out of bounds");
52 // setup _segmap pointers for faster indexing
53 address p = (address)_segmap.low() + beg;
54 address q = (address)_segmap.low() + end;
55 // initialize interval
56 while (p < q) *p++ = 0xFF;
57 }
60 void CodeHeap::mark_segmap_as_used(size_t beg, size_t end) {
61 assert(0 <= beg && beg < _number_of_committed_segments, "interval begin out of bounds");
62 assert(beg < end && end <= _number_of_committed_segments, "interval end out of bounds");
63 // setup _segmap pointers for faster indexing
64 address p = (address)_segmap.low() + beg;
65 address q = (address)_segmap.low() + end;
66 // initialize interval
67 int i = 0;
68 while (p < q) {
69 *p++ = i++;
70 if (i == 0xFF) i = 1;
71 }
72 }
75 static size_t align_to_page_size(size_t size) {
76 const size_t alignment = (size_t)os::vm_page_size();
77 assert(is_power_of_2(alignment), "no kidding ???");
78 return (size + alignment - 1) & ~(alignment - 1);
79 }
82 void CodeHeap::on_code_mapping(char* base, size_t size) {
83 #ifdef LINUX
84 extern void linux_wrap_code(char* base, size_t size);
85 linux_wrap_code(base, size);
86 #endif
87 }
90 bool CodeHeap::reserve(size_t reserved_size, size_t committed_size,
91 size_t segment_size) {
92 assert(reserved_size >= committed_size, "reserved < committed");
93 assert(segment_size >= sizeof(FreeBlock), "segment size is too small");
94 assert(is_power_of_2(segment_size), "segment_size must be a power of 2");
96 _segment_size = segment_size;
97 _log2_segment_size = exact_log2(segment_size);
99 // Reserve and initialize space for _memory.
100 const size_t page_size = os::can_execute_large_page_memory() ?
101 os::page_size_for_region(committed_size, reserved_size, 8) :
102 os::vm_page_size();
103 const size_t granularity = os::vm_allocation_granularity();
104 const size_t r_align = MAX2(page_size, granularity);
105 const size_t r_size = align_size_up(reserved_size, r_align);
106 const size_t c_size = align_size_up(committed_size, page_size);
108 const size_t rs_align = page_size == (size_t) os::vm_page_size() ? 0 :
109 MAX2(page_size, granularity);
110 ReservedCodeSpace rs(r_size, rs_align, rs_align > 0);
111 os::trace_page_sizes("code heap", committed_size, reserved_size, page_size,
112 rs.base(), rs.size());
113 if (!_memory.initialize(rs, c_size)) {
114 return false;
115 }
117 on_code_mapping(_memory.low(), _memory.committed_size());
118 _number_of_committed_segments = size_to_segments(_memory.committed_size());
119 _number_of_reserved_segments = size_to_segments(_memory.reserved_size());
120 assert(_number_of_reserved_segments >= _number_of_committed_segments, "just checking");
122 // reserve space for _segmap
123 if (!_segmap.initialize(align_to_page_size(_number_of_reserved_segments), align_to_page_size(_number_of_committed_segments))) {
124 return false;
125 }
127 MemTracker::record_virtual_memory_type((address)_segmap.low_boundary(), mtCode);
129 assert(_segmap.committed_size() >= (size_t) _number_of_committed_segments, "could not commit enough space for segment map");
130 assert(_segmap.reserved_size() >= (size_t) _number_of_reserved_segments , "could not reserve enough space for segment map");
131 assert(_segmap.reserved_size() >= _segmap.committed_size() , "just checking");
133 // initialize remaining instance variables
134 clear();
135 return true;
136 }
139 void CodeHeap::release() {
140 Unimplemented();
141 }
144 bool CodeHeap::expand_by(size_t size) {
145 // expand _memory space
146 size_t dm = align_to_page_size(_memory.committed_size() + size) - _memory.committed_size();
147 if (dm > 0) {
148 char* base = _memory.low() + _memory.committed_size();
149 if (!_memory.expand_by(dm)) return false;
150 on_code_mapping(base, dm);
151 size_t i = _number_of_committed_segments;
152 _number_of_committed_segments = size_to_segments(_memory.committed_size());
153 assert(_number_of_reserved_segments == size_to_segments(_memory.reserved_size()), "number of reserved segments should not change");
154 assert(_number_of_reserved_segments >= _number_of_committed_segments, "just checking");
155 // expand _segmap space
156 size_t ds = align_to_page_size(_number_of_committed_segments) - _segmap.committed_size();
157 if (ds > 0) {
158 if (!_segmap.expand_by(ds)) return false;
159 }
160 assert(_segmap.committed_size() >= (size_t) _number_of_committed_segments, "just checking");
161 // initialize additional segmap entries
162 mark_segmap_as_free(i, _number_of_committed_segments);
163 }
164 return true;
165 }
168 void CodeHeap::shrink_by(size_t size) {
169 Unimplemented();
170 }
173 void CodeHeap::clear() {
174 _next_segment = 0;
175 mark_segmap_as_free(0, _number_of_committed_segments);
176 }
179 void* CodeHeap::allocate(size_t instance_size, bool is_critical) {
180 size_t number_of_segments = size_to_segments(instance_size + sizeof(HeapBlock));
181 assert(segments_to_size(number_of_segments) >= sizeof(FreeBlock), "not enough room for FreeList");
183 // First check if we can satify request from freelist
184 debug_only(verify());
185 HeapBlock* block = search_freelist(number_of_segments, is_critical);
186 debug_only(if (VerifyCodeCacheOften) verify());
187 if (block != NULL) {
188 assert(block->length() >= number_of_segments && block->length() < number_of_segments + CodeCacheMinBlockLength, "sanity check");
189 assert(!block->free(), "must be marked free");
190 #ifdef ASSERT
191 memset((void *)block->allocated_space(), badCodeHeapNewVal, instance_size);
192 #endif
193 return block->allocated_space();
194 }
196 // Ensure minimum size for allocation to the heap.
197 if (number_of_segments < CodeCacheMinBlockLength) {
198 number_of_segments = CodeCacheMinBlockLength;
199 }
201 if (!is_critical) {
202 // Make sure the allocation fits in the unallocated heap without using
203 // the CodeCacheMimimumFreeSpace that is reserved for critical allocations.
204 if (segments_to_size(number_of_segments) > (heap_unallocated_capacity() - CodeCacheMinimumFreeSpace)) {
205 // Fail allocation
206 return NULL;
207 }
208 }
210 if (_next_segment + number_of_segments <= _number_of_committed_segments) {
211 mark_segmap_as_used(_next_segment, _next_segment + number_of_segments);
212 HeapBlock* b = block_at(_next_segment);
213 b->initialize(number_of_segments);
214 _next_segment += number_of_segments;
215 #ifdef ASSERT
216 memset((void *)b->allocated_space(), badCodeHeapNewVal, instance_size);
217 #endif
218 return b->allocated_space();
219 } else {
220 return NULL;
221 }
222 }
225 void CodeHeap::deallocate(void* p) {
226 assert(p == find_start(p), "illegal deallocation");
227 // Find start of HeapBlock
228 HeapBlock* b = (((HeapBlock *)p) - 1);
229 assert(b->allocated_space() == p, "sanity check");
230 #ifdef ASSERT
231 memset((void *)b->allocated_space(),
232 badCodeHeapFreeVal,
233 segments_to_size(b->length()) - sizeof(HeapBlock));
234 #endif
235 add_to_freelist(b);
237 debug_only(if (VerifyCodeCacheOften) verify());
238 }
241 void* CodeHeap::find_start(void* p) const {
242 if (!contains(p)) {
243 return NULL;
244 }
245 size_t i = segment_for(p);
246 address b = (address)_segmap.low();
247 if (b[i] == 0xFF) {
248 return NULL;
249 }
250 while (b[i] > 0) i -= (int)b[i];
251 HeapBlock* h = block_at(i);
252 if (h->free()) {
253 return NULL;
254 }
255 return h->allocated_space();
256 }
259 size_t CodeHeap::alignment_unit() const {
260 // this will be a power of two
261 return _segment_size;
262 }
265 size_t CodeHeap::alignment_offset() const {
266 // The lowest address in any allocated block will be
267 // equal to alignment_offset (mod alignment_unit).
268 return sizeof(HeapBlock) & (_segment_size - 1);
269 }
271 // Finds the next free heapblock. If the current one is free, that it returned
272 void* CodeHeap::next_free(HeapBlock *b) const {
273 // Since free blocks are merged, there is max. on free block
274 // between two used ones
275 if (b != NULL && b->free()) b = next_block(b);
276 assert(b == NULL || !b->free(), "must be in use or at end of heap");
277 return (b == NULL) ? NULL : b->allocated_space();
278 }
280 // Returns the first used HeapBlock
281 HeapBlock* CodeHeap::first_block() const {
282 if (_next_segment > 0)
283 return block_at(0);
284 return NULL;
285 }
287 HeapBlock *CodeHeap::block_start(void *q) const {
288 HeapBlock* b = (HeapBlock*)find_start(q);
289 if (b == NULL) return NULL;
290 return b - 1;
291 }
293 // Returns the next Heap block an offset into one
294 HeapBlock* CodeHeap::next_block(HeapBlock *b) const {
295 if (b == NULL) return NULL;
296 size_t i = segment_for(b) + b->length();
297 if (i < _next_segment)
298 return block_at(i);
299 return NULL;
300 }
303 // Returns current capacity
304 size_t CodeHeap::capacity() const {
305 return _memory.committed_size();
306 }
308 size_t CodeHeap::max_capacity() const {
309 return _memory.reserved_size();
310 }
312 size_t CodeHeap::allocated_capacity() const {
313 // size of used heap - size on freelist
314 return segments_to_size(_next_segment - _freelist_segments);
315 }
317 // Returns size of the unallocated heap block
318 size_t CodeHeap::heap_unallocated_capacity() const {
319 // Total number of segments - number currently used
320 return segments_to_size(_number_of_reserved_segments - _next_segment);
321 }
323 // Free list management
325 FreeBlock *CodeHeap::following_block(FreeBlock *b) {
326 return (FreeBlock*)(((address)b) + _segment_size * b->length());
327 }
329 // Inserts block b after a
330 void CodeHeap::insert_after(FreeBlock* a, FreeBlock* b) {
331 assert(a != NULL && b != NULL, "must be real pointers");
333 // Link b into the list after a
334 b->set_link(a->link());
335 a->set_link(b);
337 // See if we can merge blocks
338 merge_right(b); // Try to make b bigger
339 merge_right(a); // Try to make a include b
340 }
342 // Try to merge this block with the following block
343 void CodeHeap::merge_right(FreeBlock *a) {
344 assert(a->free(), "must be a free block");
345 if (following_block(a) == a->link()) {
346 assert(a->link() != NULL && a->link()->free(), "must be free too");
347 // Update block a to include the following block
348 a->set_length(a->length() + a->link()->length());
349 a->set_link(a->link()->link());
350 // Update find_start map
351 size_t beg = segment_for(a);
352 mark_segmap_as_used(beg, beg + a->length());
353 }
354 }
356 void CodeHeap::add_to_freelist(HeapBlock *a) {
357 FreeBlock* b = (FreeBlock*)a;
358 assert(b != _freelist, "cannot be removed twice");
360 // Mark as free and update free space count
361 _freelist_segments += b->length();
362 b->set_free();
364 // First element in list?
365 if (_freelist == NULL) {
366 _freelist = b;
367 b->set_link(NULL);
368 return;
369 }
371 // Scan for right place to put into list. List
372 // is sorted by increasing addresseses
373 FreeBlock* prev = NULL;
374 FreeBlock* cur = _freelist;
375 while(cur != NULL && cur < b) {
376 assert(prev == NULL || prev < cur, "must be ordered");
377 prev = cur;
378 cur = cur->link();
379 }
381 assert( (prev == NULL && b < _freelist) ||
382 (prev < b && (cur == NULL || b < cur)), "list must be ordered");
384 if (prev == NULL) {
385 // Insert first in list
386 b->set_link(_freelist);
387 _freelist = b;
388 merge_right(_freelist);
389 } else {
390 insert_after(prev, b);
391 }
392 }
394 // Search freelist for an entry on the list with the best fit
395 // Return NULL if no one was found
396 FreeBlock* CodeHeap::search_freelist(size_t length, bool is_critical) {
397 FreeBlock *best_block = NULL;
398 FreeBlock *best_prev = NULL;
399 size_t best_length = 0;
401 // Search for smallest block which is bigger than length
402 FreeBlock *prev = NULL;
403 FreeBlock *cur = _freelist;
404 while(cur != NULL) {
405 size_t l = cur->length();
406 if (l >= length && (best_block == NULL || best_length > l)) {
408 // Non critical allocations are not allowed to use the last part of the code heap.
409 if (!is_critical) {
410 // Make sure the end of the allocation doesn't cross into the last part of the code heap
411 if (((size_t)cur + length) > ((size_t)high_boundary() - CodeCacheMinimumFreeSpace)) {
412 // the freelist is sorted by address - if one fails, all consecutive will also fail.
413 break;
414 }
415 }
417 // Remember best block, its previous element, and its length
418 best_block = cur;
419 best_prev = prev;
420 best_length = best_block->length();
421 }
423 // Next element in list
424 prev = cur;
425 cur = cur->link();
426 }
428 if (best_block == NULL) {
429 // None found
430 return NULL;
431 }
433 assert((best_prev == NULL && _freelist == best_block ) ||
434 (best_prev != NULL && best_prev->link() == best_block), "sanity check");
436 // Exact (or at least good enough) fit. Remove from list.
437 // Don't leave anything on the freelist smaller than CodeCacheMinBlockLength.
438 if (best_length < length + CodeCacheMinBlockLength) {
439 length = best_length;
440 if (best_prev == NULL) {
441 assert(_freelist == best_block, "sanity check");
442 _freelist = _freelist->link();
443 } else {
444 // Unmap element
445 best_prev->set_link(best_block->link());
446 }
447 } else {
448 // Truncate block and return a pointer to the following block
449 best_block->set_length(best_length - length);
450 best_block = following_block(best_block);
451 // Set used bit and length on new block
452 size_t beg = segment_for(best_block);
453 mark_segmap_as_used(beg, beg + length);
454 best_block->set_length(length);
455 }
457 best_block->set_used();
458 _freelist_segments -= length;
459 return best_block;
460 }
462 //----------------------------------------------------------------------------
463 // Non-product code
465 #ifndef PRODUCT
467 void CodeHeap::print() {
468 tty->print_cr("The Heap");
469 }
471 #endif
473 void CodeHeap::verify() {
474 // Count the number of blocks on the freelist, and the amount of space
475 // represented.
476 int count = 0;
477 size_t len = 0;
478 for(FreeBlock* b = _freelist; b != NULL; b = b->link()) {
479 len += b->length();
480 count++;
481 }
483 // Verify that freelist contains the right amount of free space
484 // guarantee(len == _freelist_segments, "wrong freelist");
486 // Verify that the number of free blocks is not out of hand.
487 static int free_block_threshold = 10000;
488 if (count > free_block_threshold) {
489 warning("CodeHeap: # of free blocks > %d", free_block_threshold);
490 // Double the warning limit
491 free_block_threshold *= 2;
492 }
494 // Verify that the freelist contains the same number of free blocks that is
495 // found on the full list.
496 for(HeapBlock *h = first_block(); h != NULL; h = next_block(h)) {
497 if (h->free()) count--;
498 }
499 // guarantee(count == 0, "missing free blocks");
500 }