Mon, 26 Jan 2009 12:47:21 -0800
6786503: Overflow list performance can be improved
Summary: Avoid overflow list walk in CMS & ParNew when it is unnecessary. Fix a couple of correctness issues, including a C-heap leak, in ParNew at the intersection of promotion failure, work queue overflow and object array chunking. Add stress testing option and related assertion checking.
Reviewed-by: jmasa
duke@435 | 1 | /* |
xdono@631 | 2 | * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. |
duke@435 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
duke@435 | 4 | * |
duke@435 | 5 | * This code is free software; you can redistribute it and/or modify it |
duke@435 | 6 | * under the terms of the GNU General Public License version 2 only, as |
duke@435 | 7 | * published by the Free Software Foundation. |
duke@435 | 8 | * |
duke@435 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
duke@435 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
duke@435 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
duke@435 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
duke@435 | 13 | * accompanied this code). |
duke@435 | 14 | * |
duke@435 | 15 | * You should have received a copy of the GNU General Public License version |
duke@435 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
duke@435 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
duke@435 | 18 | * |
duke@435 | 19 | * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, |
duke@435 | 20 | * CA 95054 USA or visit www.sun.com if you need additional information or |
duke@435 | 21 | * have any questions. |
duke@435 | 22 | * |
duke@435 | 23 | */ |
duke@435 | 24 | |
duke@435 | 25 | # include "incls/_precompiled.incl" |
duke@435 | 26 | # include "incls/_heap.cpp.incl" |
duke@435 | 27 | |
duke@435 | 28 | |
duke@435 | 29 | size_t CodeHeap::header_size() { |
duke@435 | 30 | return sizeof(HeapBlock); |
duke@435 | 31 | } |
duke@435 | 32 | |
duke@435 | 33 | |
duke@435 | 34 | // Implementation of Heap |
duke@435 | 35 | |
duke@435 | 36 | CodeHeap::CodeHeap() { |
duke@435 | 37 | _number_of_committed_segments = 0; |
duke@435 | 38 | _number_of_reserved_segments = 0; |
duke@435 | 39 | _segment_size = 0; |
duke@435 | 40 | _log2_segment_size = 0; |
duke@435 | 41 | _next_segment = 0; |
duke@435 | 42 | _freelist = NULL; |
duke@435 | 43 | _free_segments = 0; |
duke@435 | 44 | } |
duke@435 | 45 | |
duke@435 | 46 | |
duke@435 | 47 | void CodeHeap::mark_segmap_as_free(size_t beg, size_t end) { |
duke@435 | 48 | assert(0 <= beg && beg < _number_of_committed_segments, "interval begin out of bounds"); |
duke@435 | 49 | assert(beg < end && end <= _number_of_committed_segments, "interval end out of bounds"); |
duke@435 | 50 | // setup _segmap pointers for faster indexing |
duke@435 | 51 | address p = (address)_segmap.low() + beg; |
duke@435 | 52 | address q = (address)_segmap.low() + end; |
duke@435 | 53 | // initialize interval |
duke@435 | 54 | while (p < q) *p++ = 0xFF; |
duke@435 | 55 | } |
duke@435 | 56 | |
duke@435 | 57 | |
duke@435 | 58 | void CodeHeap::mark_segmap_as_used(size_t beg, size_t end) { |
duke@435 | 59 | assert(0 <= beg && beg < _number_of_committed_segments, "interval begin out of bounds"); |
duke@435 | 60 | assert(beg < end && end <= _number_of_committed_segments, "interval end out of bounds"); |
duke@435 | 61 | // setup _segmap pointers for faster indexing |
duke@435 | 62 | address p = (address)_segmap.low() + beg; |
duke@435 | 63 | address q = (address)_segmap.low() + end; |
duke@435 | 64 | // initialize interval |
duke@435 | 65 | int i = 0; |
duke@435 | 66 | while (p < q) { |
duke@435 | 67 | *p++ = i++; |
duke@435 | 68 | if (i == 0xFF) i = 1; |
duke@435 | 69 | } |
duke@435 | 70 | } |
duke@435 | 71 | |
duke@435 | 72 | |
duke@435 | 73 | static size_t align_to_page_size(size_t size) { |
duke@435 | 74 | const size_t alignment = (size_t)os::vm_page_size(); |
duke@435 | 75 | assert(is_power_of_2(alignment), "no kidding ???"); |
duke@435 | 76 | return (size + alignment - 1) & ~(alignment - 1); |
duke@435 | 77 | } |
duke@435 | 78 | |
duke@435 | 79 | |
duke@435 | 80 | static size_t align_to_allocation_size(size_t size) { |
duke@435 | 81 | const size_t alignment = (size_t)os::vm_allocation_granularity(); |
duke@435 | 82 | assert(is_power_of_2(alignment), "no kidding ???"); |
duke@435 | 83 | return (size + alignment - 1) & ~(alignment - 1); |
duke@435 | 84 | } |
duke@435 | 85 | |
duke@435 | 86 | |
duke@435 | 87 | void CodeHeap::on_code_mapping(char* base, size_t size) { |
duke@435 | 88 | #ifdef LINUX |
duke@435 | 89 | extern void linux_wrap_code(char* base, size_t size); |
duke@435 | 90 | linux_wrap_code(base, size); |
duke@435 | 91 | #endif |
duke@435 | 92 | } |
duke@435 | 93 | |
duke@435 | 94 | |
duke@435 | 95 | bool CodeHeap::reserve(size_t reserved_size, size_t committed_size, |
duke@435 | 96 | size_t segment_size) { |
duke@435 | 97 | assert(reserved_size >= committed_size, "reserved < committed"); |
duke@435 | 98 | assert(segment_size >= sizeof(FreeBlock), "segment size is too small"); |
duke@435 | 99 | assert(is_power_of_2(segment_size), "segment_size must be a power of 2"); |
duke@435 | 100 | |
duke@435 | 101 | _segment_size = segment_size; |
duke@435 | 102 | _log2_segment_size = exact_log2(segment_size); |
duke@435 | 103 | |
duke@435 | 104 | // Reserve and initialize space for _memory. |
jcoomes@514 | 105 | const size_t page_size = os::can_execute_large_page_memory() ? |
jcoomes@514 | 106 | os::page_size_for_region(committed_size, reserved_size, 8) : |
jcoomes@514 | 107 | os::vm_page_size(); |
duke@435 | 108 | const size_t granularity = os::vm_allocation_granularity(); |
duke@435 | 109 | const size_t r_align = MAX2(page_size, granularity); |
duke@435 | 110 | const size_t r_size = align_size_up(reserved_size, r_align); |
duke@435 | 111 | const size_t c_size = align_size_up(committed_size, page_size); |
duke@435 | 112 | |
duke@435 | 113 | const size_t rs_align = page_size == (size_t) os::vm_page_size() ? 0 : |
duke@435 | 114 | MAX2(page_size, granularity); |
jcoomes@514 | 115 | ReservedSpace rs(r_size, rs_align, rs_align > 0); |
duke@435 | 116 | os::trace_page_sizes("code heap", committed_size, reserved_size, page_size, |
duke@435 | 117 | rs.base(), rs.size()); |
duke@435 | 118 | if (!_memory.initialize(rs, c_size)) { |
duke@435 | 119 | return false; |
duke@435 | 120 | } |
duke@435 | 121 | |
duke@435 | 122 | on_code_mapping(_memory.low(), _memory.committed_size()); |
duke@435 | 123 | _number_of_committed_segments = number_of_segments(_memory.committed_size()); |
duke@435 | 124 | _number_of_reserved_segments = number_of_segments(_memory.reserved_size()); |
duke@435 | 125 | assert(_number_of_reserved_segments >= _number_of_committed_segments, "just checking"); |
duke@435 | 126 | |
duke@435 | 127 | // reserve space for _segmap |
duke@435 | 128 | if (!_segmap.initialize(align_to_page_size(_number_of_reserved_segments), align_to_page_size(_number_of_committed_segments))) { |
duke@435 | 129 | return false; |
duke@435 | 130 | } |
duke@435 | 131 | assert(_segmap.committed_size() >= (size_t) _number_of_committed_segments, "could not commit enough space for segment map"); |
duke@435 | 132 | assert(_segmap.reserved_size() >= (size_t) _number_of_reserved_segments , "could not reserve enough space for segment map"); |
duke@435 | 133 | assert(_segmap.reserved_size() >= _segmap.committed_size() , "just checking"); |
duke@435 | 134 | |
duke@435 | 135 | // initialize remaining instance variables |
duke@435 | 136 | clear(); |
duke@435 | 137 | return true; |
duke@435 | 138 | } |
duke@435 | 139 | |
duke@435 | 140 | |
duke@435 | 141 | void CodeHeap::release() { |
duke@435 | 142 | Unimplemented(); |
duke@435 | 143 | } |
duke@435 | 144 | |
duke@435 | 145 | |
duke@435 | 146 | bool CodeHeap::expand_by(size_t size) { |
duke@435 | 147 | // expand _memory space |
duke@435 | 148 | size_t dm = align_to_page_size(_memory.committed_size() + size) - _memory.committed_size(); |
duke@435 | 149 | if (dm > 0) { |
duke@435 | 150 | char* base = _memory.low() + _memory.committed_size(); |
duke@435 | 151 | if (!_memory.expand_by(dm)) return false; |
duke@435 | 152 | on_code_mapping(base, dm); |
duke@435 | 153 | size_t i = _number_of_committed_segments; |
duke@435 | 154 | _number_of_committed_segments = number_of_segments(_memory.committed_size()); |
duke@435 | 155 | assert(_number_of_reserved_segments == number_of_segments(_memory.reserved_size()), "number of reserved segments should not change"); |
duke@435 | 156 | assert(_number_of_reserved_segments >= _number_of_committed_segments, "just checking"); |
duke@435 | 157 | // expand _segmap space |
duke@435 | 158 | size_t ds = align_to_page_size(_number_of_committed_segments) - _segmap.committed_size(); |
duke@435 | 159 | if (ds > 0) { |
duke@435 | 160 | if (!_segmap.expand_by(ds)) return false; |
duke@435 | 161 | } |
duke@435 | 162 | assert(_segmap.committed_size() >= (size_t) _number_of_committed_segments, "just checking"); |
duke@435 | 163 | // initialize additional segmap entries |
duke@435 | 164 | mark_segmap_as_free(i, _number_of_committed_segments); |
duke@435 | 165 | } |
duke@435 | 166 | return true; |
duke@435 | 167 | } |
duke@435 | 168 | |
duke@435 | 169 | |
duke@435 | 170 | void CodeHeap::shrink_by(size_t size) { |
duke@435 | 171 | Unimplemented(); |
duke@435 | 172 | } |
duke@435 | 173 | |
duke@435 | 174 | |
duke@435 | 175 | void CodeHeap::clear() { |
duke@435 | 176 | _next_segment = 0; |
duke@435 | 177 | mark_segmap_as_free(0, _number_of_committed_segments); |
duke@435 | 178 | } |
duke@435 | 179 | |
duke@435 | 180 | |
duke@435 | 181 | void* CodeHeap::allocate(size_t size) { |
duke@435 | 182 | size_t length = number_of_segments(size + sizeof(HeapBlock)); |
duke@435 | 183 | assert(length *_segment_size >= sizeof(FreeBlock), "not enough room for FreeList"); |
duke@435 | 184 | |
duke@435 | 185 | // First check if we can satify request from freelist |
duke@435 | 186 | debug_only(verify()); |
duke@435 | 187 | HeapBlock* block = search_freelist(length); |
duke@435 | 188 | debug_only(if (VerifyCodeCacheOften) verify()); |
duke@435 | 189 | if (block != NULL) { |
duke@435 | 190 | assert(block->length() >= length && block->length() < length + CodeCacheMinBlockLength, "sanity check"); |
duke@435 | 191 | assert(!block->free(), "must be marked free"); |
duke@435 | 192 | #ifdef ASSERT |
duke@435 | 193 | memset((void *)block->allocated_space(), badCodeHeapNewVal, size); |
duke@435 | 194 | #endif |
duke@435 | 195 | return block->allocated_space(); |
duke@435 | 196 | } |
duke@435 | 197 | |
duke@435 | 198 | if (length < CodeCacheMinBlockLength) { |
duke@435 | 199 | length = CodeCacheMinBlockLength; |
duke@435 | 200 | } |
duke@435 | 201 | if (_next_segment + length <= _number_of_committed_segments) { |
duke@435 | 202 | mark_segmap_as_used(_next_segment, _next_segment + length); |
duke@435 | 203 | HeapBlock* b = block_at(_next_segment); |
duke@435 | 204 | b->initialize(length); |
duke@435 | 205 | _next_segment += length; |
duke@435 | 206 | #ifdef ASSERT |
duke@435 | 207 | memset((void *)b->allocated_space(), badCodeHeapNewVal, size); |
duke@435 | 208 | #endif |
duke@435 | 209 | return b->allocated_space(); |
duke@435 | 210 | } else { |
duke@435 | 211 | return NULL; |
duke@435 | 212 | } |
duke@435 | 213 | } |
duke@435 | 214 | |
duke@435 | 215 | |
duke@435 | 216 | void CodeHeap::deallocate(void* p) { |
duke@435 | 217 | assert(p == find_start(p), "illegal deallocation"); |
duke@435 | 218 | // Find start of HeapBlock |
duke@435 | 219 | HeapBlock* b = (((HeapBlock *)p) - 1); |
duke@435 | 220 | assert(b->allocated_space() == p, "sanity check"); |
duke@435 | 221 | #ifdef ASSERT |
duke@435 | 222 | memset((void *)b->allocated_space(), |
duke@435 | 223 | badCodeHeapFreeVal, |
duke@435 | 224 | size(b->length()) - sizeof(HeapBlock)); |
duke@435 | 225 | #endif |
duke@435 | 226 | add_to_freelist(b); |
duke@435 | 227 | |
duke@435 | 228 | debug_only(if (VerifyCodeCacheOften) verify()); |
duke@435 | 229 | } |
duke@435 | 230 | |
duke@435 | 231 | |
duke@435 | 232 | void* CodeHeap::find_start(void* p) const { |
duke@435 | 233 | if (!contains(p)) { |
duke@435 | 234 | return NULL; |
duke@435 | 235 | } |
duke@435 | 236 | size_t i = segment_for(p); |
duke@435 | 237 | address b = (address)_segmap.low(); |
duke@435 | 238 | if (b[i] == 0xFF) { |
duke@435 | 239 | return NULL; |
duke@435 | 240 | } |
duke@435 | 241 | while (b[i] > 0) i -= (int)b[i]; |
duke@435 | 242 | HeapBlock* h = block_at(i); |
duke@435 | 243 | if (h->free()) { |
duke@435 | 244 | return NULL; |
duke@435 | 245 | } |
duke@435 | 246 | return h->allocated_space(); |
duke@435 | 247 | } |
duke@435 | 248 | |
duke@435 | 249 | |
duke@435 | 250 | size_t CodeHeap::alignment_unit() const { |
duke@435 | 251 | // this will be a power of two |
duke@435 | 252 | return _segment_size; |
duke@435 | 253 | } |
duke@435 | 254 | |
duke@435 | 255 | |
duke@435 | 256 | size_t CodeHeap::alignment_offset() const { |
duke@435 | 257 | // The lowest address in any allocated block will be |
duke@435 | 258 | // equal to alignment_offset (mod alignment_unit). |
duke@435 | 259 | return sizeof(HeapBlock) & (_segment_size - 1); |
duke@435 | 260 | } |
duke@435 | 261 | |
duke@435 | 262 | // Finds the next free heapblock. If the current one is free, that it returned |
duke@435 | 263 | void* CodeHeap::next_free(HeapBlock *b) const { |
duke@435 | 264 | // Since free blocks are merged, there is max. on free block |
duke@435 | 265 | // between two used ones |
duke@435 | 266 | if (b != NULL && b->free()) b = next_block(b); |
duke@435 | 267 | assert(b == NULL || !b->free(), "must be in use or at end of heap"); |
duke@435 | 268 | return (b == NULL) ? NULL : b->allocated_space(); |
duke@435 | 269 | } |
duke@435 | 270 | |
duke@435 | 271 | // Returns the first used HeapBlock |
duke@435 | 272 | HeapBlock* CodeHeap::first_block() const { |
duke@435 | 273 | if (_next_segment > 0) |
duke@435 | 274 | return block_at(0); |
duke@435 | 275 | return NULL; |
duke@435 | 276 | } |
duke@435 | 277 | |
duke@435 | 278 | HeapBlock *CodeHeap::block_start(void *q) const { |
duke@435 | 279 | HeapBlock* b = (HeapBlock*)find_start(q); |
duke@435 | 280 | if (b == NULL) return NULL; |
duke@435 | 281 | return b - 1; |
duke@435 | 282 | } |
duke@435 | 283 | |
duke@435 | 284 | // Returns the next Heap block an offset into one |
duke@435 | 285 | HeapBlock* CodeHeap::next_block(HeapBlock *b) const { |
duke@435 | 286 | if (b == NULL) return NULL; |
duke@435 | 287 | size_t i = segment_for(b) + b->length(); |
duke@435 | 288 | if (i < _next_segment) |
duke@435 | 289 | return block_at(i); |
duke@435 | 290 | return NULL; |
duke@435 | 291 | } |
duke@435 | 292 | |
duke@435 | 293 | |
duke@435 | 294 | // Returns current capacity |
duke@435 | 295 | size_t CodeHeap::capacity() const { |
duke@435 | 296 | return _memory.committed_size(); |
duke@435 | 297 | } |
duke@435 | 298 | |
duke@435 | 299 | size_t CodeHeap::max_capacity() const { |
duke@435 | 300 | return _memory.reserved_size(); |
duke@435 | 301 | } |
duke@435 | 302 | |
duke@435 | 303 | size_t CodeHeap::allocated_capacity() const { |
duke@435 | 304 | // Start with the committed size in _memory; |
duke@435 | 305 | size_t l = _memory.committed_size(); |
duke@435 | 306 | |
duke@435 | 307 | // Subtract the committed, but unused, segments |
duke@435 | 308 | l -= size(_number_of_committed_segments - _next_segment); |
duke@435 | 309 | |
duke@435 | 310 | // Subtract the size of the freelist |
duke@435 | 311 | l -= size(_free_segments); |
duke@435 | 312 | |
duke@435 | 313 | return l; |
duke@435 | 314 | } |
duke@435 | 315 | |
duke@435 | 316 | // Free list management |
duke@435 | 317 | |
duke@435 | 318 | FreeBlock *CodeHeap::following_block(FreeBlock *b) { |
duke@435 | 319 | return (FreeBlock*)(((address)b) + _segment_size * b->length()); |
duke@435 | 320 | } |
duke@435 | 321 | |
duke@435 | 322 | // Inserts block b after a |
duke@435 | 323 | void CodeHeap::insert_after(FreeBlock* a, FreeBlock* b) { |
duke@435 | 324 | assert(a != NULL && b != NULL, "must be real pointers"); |
duke@435 | 325 | |
duke@435 | 326 | // Link b into the list after a |
duke@435 | 327 | b->set_link(a->link()); |
duke@435 | 328 | a->set_link(b); |
duke@435 | 329 | |
duke@435 | 330 | // See if we can merge blocks |
duke@435 | 331 | merge_right(b); // Try to make b bigger |
duke@435 | 332 | merge_right(a); // Try to make a include b |
duke@435 | 333 | } |
duke@435 | 334 | |
duke@435 | 335 | // Try to merge this block with the following block |
duke@435 | 336 | void CodeHeap::merge_right(FreeBlock *a) { |
duke@435 | 337 | assert(a->free(), "must be a free block"); |
duke@435 | 338 | if (following_block(a) == a->link()) { |
duke@435 | 339 | assert(a->link() != NULL && a->link()->free(), "must be free too"); |
duke@435 | 340 | // Update block a to include the following block |
duke@435 | 341 | a->set_length(a->length() + a->link()->length()); |
duke@435 | 342 | a->set_link(a->link()->link()); |
duke@435 | 343 | // Update find_start map |
duke@435 | 344 | size_t beg = segment_for(a); |
duke@435 | 345 | mark_segmap_as_used(beg, beg + a->length()); |
duke@435 | 346 | } |
duke@435 | 347 | } |
duke@435 | 348 | |
duke@435 | 349 | void CodeHeap::add_to_freelist(HeapBlock *a) { |
duke@435 | 350 | FreeBlock* b = (FreeBlock*)a; |
duke@435 | 351 | assert(b != _freelist, "cannot be removed twice"); |
duke@435 | 352 | |
duke@435 | 353 | // Mark as free and update free space count |
duke@435 | 354 | _free_segments += b->length(); |
duke@435 | 355 | b->set_free(); |
duke@435 | 356 | |
duke@435 | 357 | // First element in list? |
duke@435 | 358 | if (_freelist == NULL) { |
duke@435 | 359 | _freelist = b; |
duke@435 | 360 | b->set_link(NULL); |
duke@435 | 361 | return; |
duke@435 | 362 | } |
duke@435 | 363 | |
duke@435 | 364 | // Scan for right place to put into list. List |
duke@435 | 365 | // is sorted by increasing addresseses |
duke@435 | 366 | FreeBlock* prev = NULL; |
duke@435 | 367 | FreeBlock* cur = _freelist; |
duke@435 | 368 | while(cur != NULL && cur < b) { |
duke@435 | 369 | assert(prev == NULL || prev < cur, "must be ordered"); |
duke@435 | 370 | prev = cur; |
duke@435 | 371 | cur = cur->link(); |
duke@435 | 372 | } |
duke@435 | 373 | |
duke@435 | 374 | assert( (prev == NULL && b < _freelist) || |
duke@435 | 375 | (prev < b && (cur == NULL || b < cur)), "list must be ordered"); |
duke@435 | 376 | |
duke@435 | 377 | if (prev == NULL) { |
duke@435 | 378 | // Insert first in list |
duke@435 | 379 | b->set_link(_freelist); |
duke@435 | 380 | _freelist = b; |
duke@435 | 381 | merge_right(_freelist); |
duke@435 | 382 | } else { |
duke@435 | 383 | insert_after(prev, b); |
duke@435 | 384 | } |
duke@435 | 385 | } |
duke@435 | 386 | |
duke@435 | 387 | // Search freelist for an entry on the list with the best fit |
duke@435 | 388 | // Return NULL if no one was found |
duke@435 | 389 | FreeBlock* CodeHeap::search_freelist(size_t length) { |
duke@435 | 390 | FreeBlock *best_block = NULL; |
duke@435 | 391 | FreeBlock *best_prev = NULL; |
duke@435 | 392 | size_t best_length = 0; |
duke@435 | 393 | |
duke@435 | 394 | // Search for smallest block which is bigger than length |
duke@435 | 395 | FreeBlock *prev = NULL; |
duke@435 | 396 | FreeBlock *cur = _freelist; |
duke@435 | 397 | while(cur != NULL) { |
duke@435 | 398 | size_t l = cur->length(); |
duke@435 | 399 | if (l >= length && (best_block == NULL || best_length > l)) { |
duke@435 | 400 | // Remember best block, its previous element, and its length |
duke@435 | 401 | best_block = cur; |
duke@435 | 402 | best_prev = prev; |
duke@435 | 403 | best_length = best_block->length(); |
duke@435 | 404 | } |
duke@435 | 405 | |
duke@435 | 406 | // Next element in list |
duke@435 | 407 | prev = cur; |
duke@435 | 408 | cur = cur->link(); |
duke@435 | 409 | } |
duke@435 | 410 | |
duke@435 | 411 | if (best_block == NULL) { |
duke@435 | 412 | // None found |
duke@435 | 413 | return NULL; |
duke@435 | 414 | } |
duke@435 | 415 | |
duke@435 | 416 | assert((best_prev == NULL && _freelist == best_block ) || |
duke@435 | 417 | (best_prev != NULL && best_prev->link() == best_block), "sanity check"); |
duke@435 | 418 | |
duke@435 | 419 | // Exact (or at least good enough) fit. Remove from list. |
duke@435 | 420 | // Don't leave anything on the freelist smaller than CodeCacheMinBlockLength. |
duke@435 | 421 | if (best_length < length + CodeCacheMinBlockLength) { |
duke@435 | 422 | length = best_length; |
duke@435 | 423 | if (best_prev == NULL) { |
duke@435 | 424 | assert(_freelist == best_block, "sanity check"); |
duke@435 | 425 | _freelist = _freelist->link(); |
duke@435 | 426 | } else { |
duke@435 | 427 | // Unmap element |
duke@435 | 428 | best_prev->set_link(best_block->link()); |
duke@435 | 429 | } |
duke@435 | 430 | } else { |
duke@435 | 431 | // Truncate block and return a pointer to the following block |
duke@435 | 432 | best_block->set_length(best_length - length); |
duke@435 | 433 | best_block = following_block(best_block); |
duke@435 | 434 | // Set used bit and length on new block |
duke@435 | 435 | size_t beg = segment_for(best_block); |
duke@435 | 436 | mark_segmap_as_used(beg, beg + length); |
duke@435 | 437 | best_block->set_length(length); |
duke@435 | 438 | } |
duke@435 | 439 | |
duke@435 | 440 | best_block->set_used(); |
duke@435 | 441 | _free_segments -= length; |
duke@435 | 442 | return best_block; |
duke@435 | 443 | } |
duke@435 | 444 | |
duke@435 | 445 | //---------------------------------------------------------------------------- |
duke@435 | 446 | // Non-product code |
duke@435 | 447 | |
duke@435 | 448 | #ifndef PRODUCT |
duke@435 | 449 | |
duke@435 | 450 | void CodeHeap::print() { |
duke@435 | 451 | tty->print_cr("The Heap"); |
duke@435 | 452 | } |
duke@435 | 453 | |
duke@435 | 454 | #endif |
duke@435 | 455 | |
duke@435 | 456 | void CodeHeap::verify() { |
duke@435 | 457 | // Count the number of blocks on the freelist, and the amount of space |
duke@435 | 458 | // represented. |
duke@435 | 459 | int count = 0; |
duke@435 | 460 | size_t len = 0; |
duke@435 | 461 | for(FreeBlock* b = _freelist; b != NULL; b = b->link()) { |
duke@435 | 462 | len += b->length(); |
duke@435 | 463 | count++; |
duke@435 | 464 | } |
duke@435 | 465 | |
duke@435 | 466 | // Verify that freelist contains the right amount of free space |
duke@435 | 467 | guarantee(len == _free_segments, "wrong freelist"); |
duke@435 | 468 | |
duke@435 | 469 | // Verify that the number of free blocks is not out of hand. |
duke@435 | 470 | static int free_block_threshold = 10000; |
duke@435 | 471 | if (count > free_block_threshold) { |
duke@435 | 472 | warning("CodeHeap: # of free blocks > %d", free_block_threshold); |
duke@435 | 473 | // Double the warning limit |
duke@435 | 474 | free_block_threshold *= 2; |
duke@435 | 475 | } |
duke@435 | 476 | |
duke@435 | 477 | // Verify that the freelist contains the same number of free blocks that is |
duke@435 | 478 | // found on the full list. |
duke@435 | 479 | for(HeapBlock *h = first_block(); h != NULL; h = next_block(h)) { |
duke@435 | 480 | if (h->free()) count--; |
duke@435 | 481 | } |
duke@435 | 482 | guarantee(count == 0, "missing free blocks"); |
duke@435 | 483 | } |