Mon, 12 Aug 2019 18:30:40 +0300
8223147: JFR Backport
8199712: Flight Recorder
8203346: JFR: Inconsistent signature of jfr_add_string_constant
8195817: JFR.stop should require name of recording
8195818: JFR.start should increase autogenerated name by one
8195819: Remove recording=x from jcmd JFR.check output
8203921: JFR thread sampling is missing fixes from JDK-8194552
8203929: Limit amount of data for JFR.dump
8203664: JFR start failure after AppCDS archive created with JFR StartFlightRecording
8003209: JFR events for network utilization
8207392: [PPC64] Implement JFR profiling
8202835: jfr/event/os/TestSystemProcess.java fails on missing events
Summary: Backport JFR from JDK11. Initial integration
Reviewed-by: neugens
1 /*
2 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "memory/heap.hpp"
27 #include "oops/oop.inline.hpp"
28 #include "runtime/os.hpp"
29 #include "services/memTracker.hpp"
31 size_t CodeHeap::header_size() {
32 return sizeof(HeapBlock);
33 }
36 // Implementation of Heap
38 CodeHeap::CodeHeap() {
39 _number_of_committed_segments = 0;
40 _number_of_reserved_segments = 0;
41 _segment_size = 0;
42 _log2_segment_size = 0;
43 _next_segment = 0;
44 _freelist = NULL;
45 _freelist_segments = 0;
46 }
49 void CodeHeap::mark_segmap_as_free(size_t beg, size_t end) {
50 assert(0 <= beg && beg < _number_of_committed_segments, "interval begin out of bounds");
51 assert(beg < end && end <= _number_of_committed_segments, "interval end out of bounds");
52 // setup _segmap pointers for faster indexing
53 address p = (address)_segmap.low() + beg;
54 address q = (address)_segmap.low() + end;
55 // initialize interval
56 while (p < q) *p++ = 0xFF;
57 }
60 void CodeHeap::mark_segmap_as_used(size_t beg, size_t end) {
61 assert(0 <= beg && beg < _number_of_committed_segments, "interval begin out of bounds");
62 assert(beg < end && end <= _number_of_committed_segments, "interval end out of bounds");
63 // setup _segmap pointers for faster indexing
64 address p = (address)_segmap.low() + beg;
65 address q = (address)_segmap.low() + end;
66 // initialize interval
67 int i = 0;
68 while (p < q) {
69 *p++ = i++;
70 if (i == 0xFF) i = 1;
71 }
72 }
75 static size_t align_to_page_size(size_t size) {
76 const size_t alignment = (size_t)os::vm_page_size();
77 assert(is_power_of_2(alignment), "no kidding ???");
78 return (size + alignment - 1) & ~(alignment - 1);
79 }
82 void CodeHeap::on_code_mapping(char* base, size_t size) {
83 #ifdef LINUX
84 extern void linux_wrap_code(char* base, size_t size);
85 linux_wrap_code(base, size);
86 #endif
87 }
90 bool CodeHeap::reserve(size_t reserved_size, size_t committed_size,
91 size_t segment_size) {
92 assert(reserved_size >= committed_size, "reserved < committed");
93 assert(segment_size >= sizeof(FreeBlock), "segment size is too small");
94 assert(is_power_of_2(segment_size), "segment_size must be a power of 2");
96 _segment_size = segment_size;
97 _log2_segment_size = exact_log2(segment_size);
99 // Reserve and initialize space for _memory.
100 size_t page_size = os::vm_page_size();
101 if (os::can_execute_large_page_memory()) {
102 page_size = os::page_size_for_region_unaligned(reserved_size, 8);
103 }
105 const size_t granularity = os::vm_allocation_granularity();
106 const size_t r_align = MAX2(page_size, granularity);
107 const size_t r_size = align_size_up(reserved_size, r_align);
108 const size_t c_size = align_size_up(committed_size, page_size);
110 const size_t rs_align = page_size == (size_t) os::vm_page_size() ? 0 :
111 MAX2(page_size, granularity);
112 ReservedCodeSpace rs(r_size, rs_align, rs_align > 0);
113 os::trace_page_sizes("code heap", committed_size, reserved_size, page_size,
114 rs.base(), rs.size());
115 if (!_memory.initialize(rs, c_size)) {
116 return false;
117 }
119 on_code_mapping(_memory.low(), _memory.committed_size());
120 _number_of_committed_segments = size_to_segments(_memory.committed_size());
121 _number_of_reserved_segments = size_to_segments(_memory.reserved_size());
122 assert(_number_of_reserved_segments >= _number_of_committed_segments, "just checking");
123 const size_t reserved_segments_alignment = MAX2((size_t)os::vm_page_size(), granularity);
124 const size_t reserved_segments_size = align_size_up(_number_of_reserved_segments, reserved_segments_alignment);
125 const size_t committed_segments_size = align_to_page_size(_number_of_committed_segments);
127 // reserve space for _segmap
128 if (!_segmap.initialize(reserved_segments_size, committed_segments_size)) {
129 return false;
130 }
132 MemTracker::record_virtual_memory_type((address)_segmap.low_boundary(), mtCode);
134 assert(_segmap.committed_size() >= (size_t) _number_of_committed_segments, "could not commit enough space for segment map");
135 assert(_segmap.reserved_size() >= (size_t) _number_of_reserved_segments , "could not reserve enough space for segment map");
136 assert(_segmap.reserved_size() >= _segmap.committed_size() , "just checking");
138 // initialize remaining instance variables
139 clear();
140 return true;
141 }
144 void CodeHeap::release() {
145 Unimplemented();
146 }
149 bool CodeHeap::expand_by(size_t size) {
150 // expand _memory space
151 size_t dm = align_to_page_size(_memory.committed_size() + size) - _memory.committed_size();
152 if (dm > 0) {
153 char* base = _memory.low() + _memory.committed_size();
154 if (!_memory.expand_by(dm)) return false;
155 on_code_mapping(base, dm);
156 size_t i = _number_of_committed_segments;
157 _number_of_committed_segments = size_to_segments(_memory.committed_size());
158 assert(_number_of_reserved_segments == size_to_segments(_memory.reserved_size()), "number of reserved segments should not change");
159 assert(_number_of_reserved_segments >= _number_of_committed_segments, "just checking");
160 // expand _segmap space
161 size_t ds = align_to_page_size(_number_of_committed_segments) - _segmap.committed_size();
162 if (ds > 0) {
163 if (!_segmap.expand_by(ds)) return false;
164 }
165 assert(_segmap.committed_size() >= (size_t) _number_of_committed_segments, "just checking");
166 // initialize additional segmap entries
167 mark_segmap_as_free(i, _number_of_committed_segments);
168 }
169 return true;
170 }
173 void CodeHeap::shrink_by(size_t size) {
174 Unimplemented();
175 }
178 void CodeHeap::clear() {
179 _next_segment = 0;
180 mark_segmap_as_free(0, _number_of_committed_segments);
181 }
184 void* CodeHeap::allocate(size_t instance_size, bool is_critical) {
185 size_t number_of_segments = size_to_segments(instance_size + sizeof(HeapBlock));
186 assert(segments_to_size(number_of_segments) >= sizeof(FreeBlock), "not enough room for FreeList");
188 // First check if we can satify request from freelist
189 debug_only(verify());
190 HeapBlock* block = search_freelist(number_of_segments, is_critical);
191 debug_only(if (VerifyCodeCacheOften) verify());
192 if (block != NULL) {
193 assert(block->length() >= number_of_segments && block->length() < number_of_segments + CodeCacheMinBlockLength, "sanity check");
194 assert(!block->free(), "must be marked free");
195 #ifdef ASSERT
196 memset((void *)block->allocated_space(), badCodeHeapNewVal, instance_size);
197 #endif
198 return block->allocated_space();
199 }
201 // Ensure minimum size for allocation to the heap.
202 if (number_of_segments < CodeCacheMinBlockLength) {
203 number_of_segments = CodeCacheMinBlockLength;
204 }
206 if (!is_critical) {
207 // Make sure the allocation fits in the unallocated heap without using
208 // the CodeCacheMimimumFreeSpace that is reserved for critical allocations.
209 if (segments_to_size(number_of_segments) > (heap_unallocated_capacity() - CodeCacheMinimumFreeSpace)) {
210 // Fail allocation
211 return NULL;
212 }
213 }
215 if (_next_segment + number_of_segments <= _number_of_committed_segments) {
216 mark_segmap_as_used(_next_segment, _next_segment + number_of_segments);
217 HeapBlock* b = block_at(_next_segment);
218 b->initialize(number_of_segments);
219 _next_segment += number_of_segments;
220 #ifdef ASSERT
221 memset((void *)b->allocated_space(), badCodeHeapNewVal, instance_size);
222 #endif
223 return b->allocated_space();
224 } else {
225 return NULL;
226 }
227 }
230 void CodeHeap::deallocate(void* p) {
231 assert(p == find_start(p), "illegal deallocation");
232 // Find start of HeapBlock
233 HeapBlock* b = (((HeapBlock *)p) - 1);
234 assert(b->allocated_space() == p, "sanity check");
235 #ifdef ASSERT
236 memset((void *)b->allocated_space(),
237 badCodeHeapFreeVal,
238 segments_to_size(b->length()) - sizeof(HeapBlock));
239 #endif
240 add_to_freelist(b);
242 debug_only(if (VerifyCodeCacheOften) verify());
243 }
246 void* CodeHeap::find_start(void* p) const {
247 if (!contains(p)) {
248 return NULL;
249 }
250 size_t i = segment_for(p);
251 address b = (address)_segmap.low();
252 if (b[i] == 0xFF) {
253 return NULL;
254 }
255 while (b[i] > 0) i -= (int)b[i];
256 HeapBlock* h = block_at(i);
257 if (h->free()) {
258 return NULL;
259 }
260 return h->allocated_space();
261 }
264 size_t CodeHeap::alignment_unit() const {
265 // this will be a power of two
266 return _segment_size;
267 }
270 size_t CodeHeap::alignment_offset() const {
271 // The lowest address in any allocated block will be
272 // equal to alignment_offset (mod alignment_unit).
273 return sizeof(HeapBlock) & (_segment_size - 1);
274 }
276 // Finds the next free heapblock. If the current one is free, that it returned
277 void* CodeHeap::next_free(HeapBlock *b) const {
278 // Since free blocks are merged, there is max. on free block
279 // between two used ones
280 if (b != NULL && b->free()) b = next_block(b);
281 assert(b == NULL || !b->free(), "must be in use or at end of heap");
282 return (b == NULL) ? NULL : b->allocated_space();
283 }
285 // Returns the first used HeapBlock
286 HeapBlock* CodeHeap::first_block() const {
287 if (_next_segment > 0)
288 return block_at(0);
289 return NULL;
290 }
292 HeapBlock *CodeHeap::block_start(void *q) const {
293 HeapBlock* b = (HeapBlock*)find_start(q);
294 if (b == NULL) return NULL;
295 return b - 1;
296 }
298 // Returns the next Heap block an offset into one
299 HeapBlock* CodeHeap::next_block(HeapBlock *b) const {
300 if (b == NULL) return NULL;
301 size_t i = segment_for(b) + b->length();
302 if (i < _next_segment)
303 return block_at(i);
304 return NULL;
305 }
308 // Returns current capacity
309 size_t CodeHeap::capacity() const {
310 return _memory.committed_size();
311 }
313 size_t CodeHeap::max_capacity() const {
314 return _memory.reserved_size();
315 }
317 size_t CodeHeap::allocated_capacity() const {
318 // size of used heap - size on freelist
319 return segments_to_size(_next_segment - _freelist_segments);
320 }
322 // Returns size of the unallocated heap block
323 size_t CodeHeap::heap_unallocated_capacity() const {
324 // Total number of segments - number currently used
325 return segments_to_size(_number_of_reserved_segments - _next_segment);
326 }
328 // Free list management
330 FreeBlock *CodeHeap::following_block(FreeBlock *b) {
331 return (FreeBlock*)(((address)b) + _segment_size * b->length());
332 }
334 // Inserts block b after a
335 void CodeHeap::insert_after(FreeBlock* a, FreeBlock* b) {
336 assert(a != NULL && b != NULL, "must be real pointers");
338 // Link b into the list after a
339 b->set_link(a->link());
340 a->set_link(b);
342 // See if we can merge blocks
343 merge_right(b); // Try to make b bigger
344 merge_right(a); // Try to make a include b
345 }
347 // Try to merge this block with the following block
348 void CodeHeap::merge_right(FreeBlock *a) {
349 assert(a->free(), "must be a free block");
350 if (following_block(a) == a->link()) {
351 assert(a->link() != NULL && a->link()->free(), "must be free too");
352 // Update block a to include the following block
353 a->set_length(a->length() + a->link()->length());
354 a->set_link(a->link()->link());
355 // Update find_start map
356 size_t beg = segment_for(a);
357 mark_segmap_as_used(beg, beg + a->length());
358 }
359 }
361 void CodeHeap::add_to_freelist(HeapBlock *a) {
362 FreeBlock* b = (FreeBlock*)a;
363 assert(b != _freelist, "cannot be removed twice");
365 // Mark as free and update free space count
366 _freelist_segments += b->length();
367 b->set_free();
369 // First element in list?
370 if (_freelist == NULL) {
371 _freelist = b;
372 b->set_link(NULL);
373 return;
374 }
376 // Scan for right place to put into list. List
377 // is sorted by increasing addresseses
378 FreeBlock* prev = NULL;
379 FreeBlock* cur = _freelist;
380 while(cur != NULL && cur < b) {
381 assert(prev == NULL || prev < cur, "must be ordered");
382 prev = cur;
383 cur = cur->link();
384 }
386 assert( (prev == NULL && b < _freelist) ||
387 (prev < b && (cur == NULL || b < cur)), "list must be ordered");
389 if (prev == NULL) {
390 // Insert first in list
391 b->set_link(_freelist);
392 _freelist = b;
393 merge_right(_freelist);
394 } else {
395 insert_after(prev, b);
396 }
397 }
399 // Search freelist for an entry on the list with the best fit
400 // Return NULL if no one was found
401 FreeBlock* CodeHeap::search_freelist(size_t length, bool is_critical) {
402 FreeBlock *best_block = NULL;
403 FreeBlock *best_prev = NULL;
404 size_t best_length = 0;
406 // Search for smallest block which is bigger than length
407 FreeBlock *prev = NULL;
408 FreeBlock *cur = _freelist;
409 while(cur != NULL) {
410 size_t l = cur->length();
411 if (l >= length && (best_block == NULL || best_length > l)) {
413 // Non critical allocations are not allowed to use the last part of the code heap.
414 if (!is_critical) {
415 // Make sure the end of the allocation doesn't cross into the last part of the code heap
416 if (((size_t)cur + length) > ((size_t)high_boundary() - CodeCacheMinimumFreeSpace)) {
417 // the freelist is sorted by address - if one fails, all consecutive will also fail.
418 break;
419 }
420 }
422 // Remember best block, its previous element, and its length
423 best_block = cur;
424 best_prev = prev;
425 best_length = best_block->length();
426 }
428 // Next element in list
429 prev = cur;
430 cur = cur->link();
431 }
433 if (best_block == NULL) {
434 // None found
435 return NULL;
436 }
438 assert((best_prev == NULL && _freelist == best_block ) ||
439 (best_prev != NULL && best_prev->link() == best_block), "sanity check");
441 // Exact (or at least good enough) fit. Remove from list.
442 // Don't leave anything on the freelist smaller than CodeCacheMinBlockLength.
443 if (best_length < length + CodeCacheMinBlockLength) {
444 length = best_length;
445 if (best_prev == NULL) {
446 assert(_freelist == best_block, "sanity check");
447 _freelist = _freelist->link();
448 } else {
449 // Unmap element
450 best_prev->set_link(best_block->link());
451 }
452 } else {
453 // Truncate block and return a pointer to the following block
454 best_block->set_length(best_length - length);
455 best_block = following_block(best_block);
456 // Set used bit and length on new block
457 size_t beg = segment_for(best_block);
458 mark_segmap_as_used(beg, beg + length);
459 best_block->set_length(length);
460 }
462 best_block->set_used();
463 _freelist_segments -= length;
464 return best_block;
465 }
467 //----------------------------------------------------------------------------
468 // Non-product code
470 #ifndef PRODUCT
472 void CodeHeap::print() {
473 tty->print_cr("The Heap");
474 }
476 #endif
478 void CodeHeap::verify() {
479 // Count the number of blocks on the freelist, and the amount of space
480 // represented.
481 int count = 0;
482 size_t len = 0;
483 for(FreeBlock* b = _freelist; b != NULL; b = b->link()) {
484 len += b->length();
485 count++;
486 }
488 // Verify that freelist contains the right amount of free space
489 // guarantee(len == _freelist_segments, "wrong freelist");
491 // Verify that the number of free blocks is not out of hand.
492 static int free_block_threshold = 10000;
493 if (count > free_block_threshold) {
494 warning("CodeHeap: # of free blocks > %d", free_block_threshold);
495 // Double the warning limit
496 free_block_threshold *= 2;
497 }
499 // Verify that the freelist contains the same number of free blocks that is
500 // found on the full list.
501 for(HeapBlock *h = first_block(); h != NULL; h = next_block(h)) {
502 if (h->free()) count--;
503 }
504 // guarantee(count == 0, "missing free blocks");
505 }