src/share/vm/memory/heap.cpp

changeset 4952
a7fb14888912
parent 4889
cc32ccaaf47f
child 5528
740e263c80c6
     1.1 --- a/src/share/vm/memory/heap.cpp	Tue Apr 16 10:04:01 2013 -0700
     1.2 +++ b/src/share/vm/memory/heap.cpp	Thu Apr 11 13:57:44 2013 +0200
     1.3 @@ -42,7 +42,7 @@
     1.4    _log2_segment_size            = 0;
     1.5    _next_segment                 = 0;
     1.6    _freelist                     = NULL;
     1.7 -  _free_segments                = 0;
     1.8 +  _freelist_segments            = 0;
     1.9  }
    1.10  
    1.11  
    1.12 @@ -115,8 +115,8 @@
    1.13    }
    1.14  
    1.15    on_code_mapping(_memory.low(), _memory.committed_size());
    1.16 -  _number_of_committed_segments = number_of_segments(_memory.committed_size());
    1.17 -  _number_of_reserved_segments  = number_of_segments(_memory.reserved_size());
    1.18 +  _number_of_committed_segments = size_to_segments(_memory.committed_size());
    1.19 +  _number_of_reserved_segments  = size_to_segments(_memory.reserved_size());
    1.20    assert(_number_of_reserved_segments >= _number_of_committed_segments, "just checking");
    1.21  
    1.22    // reserve space for _segmap
    1.23 @@ -149,8 +149,8 @@
    1.24      if (!_memory.expand_by(dm)) return false;
    1.25      on_code_mapping(base, dm);
    1.26      size_t i = _number_of_committed_segments;
    1.27 -    _number_of_committed_segments = number_of_segments(_memory.committed_size());
    1.28 -    assert(_number_of_reserved_segments == number_of_segments(_memory.reserved_size()), "number of reserved segments should not change");
    1.29 +    _number_of_committed_segments = size_to_segments(_memory.committed_size());
    1.30 +    assert(_number_of_reserved_segments == size_to_segments(_memory.reserved_size()), "number of reserved segments should not change");
    1.31      assert(_number_of_reserved_segments >= _number_of_committed_segments, "just checking");
    1.32      // expand _segmap space
    1.33      size_t ds = align_to_page_size(_number_of_committed_segments) - _segmap.committed_size();
    1.34 @@ -176,33 +176,44 @@
    1.35  }
    1.36  
    1.37  
    1.38 -void* CodeHeap::allocate(size_t size) {
    1.39 -  size_t length = number_of_segments(size + sizeof(HeapBlock));
    1.40 -  assert(length *_segment_size >= sizeof(FreeBlock), "not enough room for FreeList");
    1.41 +void* CodeHeap::allocate(size_t instance_size, bool is_critical) {
    1.42 +  size_t number_of_segments = size_to_segments(instance_size + sizeof(HeapBlock));
    1.43 +  assert(segments_to_size(number_of_segments) >= sizeof(FreeBlock), "not enough room for FreeList");
    1.44  
    1.45    // First check if we can satify request from freelist
    1.46    debug_only(verify());
    1.47 -  HeapBlock* block = search_freelist(length);
    1.48 +  HeapBlock* block = search_freelist(number_of_segments, is_critical);
    1.49    debug_only(if (VerifyCodeCacheOften) verify());
    1.50    if (block != NULL) {
    1.51 -    assert(block->length() >= length && block->length() < length + CodeCacheMinBlockLength, "sanity check");
    1.52 +    assert(block->length() >= number_of_segments && block->length() < number_of_segments + CodeCacheMinBlockLength, "sanity check");
    1.53      assert(!block->free(), "must be marked free");
    1.54  #ifdef ASSERT
    1.55 -    memset((void *)block->allocated_space(), badCodeHeapNewVal, size);
    1.56 +    memset((void *)block->allocated_space(), badCodeHeapNewVal, instance_size);
    1.57  #endif
    1.58      return block->allocated_space();
    1.59    }
    1.60  
    1.61 -  if (length < CodeCacheMinBlockLength) {
    1.62 -    length = CodeCacheMinBlockLength;
    1.63 +  // Ensure minimum size for allocation to the heap.
    1.64 +  if (number_of_segments < CodeCacheMinBlockLength) {
    1.65 +    number_of_segments = CodeCacheMinBlockLength;
    1.66    }
    1.67 -  if (_next_segment + length <= _number_of_committed_segments) {
    1.68 -    mark_segmap_as_used(_next_segment, _next_segment + length);
    1.69 +
    1.70 +  if (!is_critical) {
    1.71 +    // Make sure the allocation fits in the unallocated heap without using
    1.72 +    // the CodeCacheMimimumFreeSpace that is reserved for critical allocations.
    1.73 +    if (segments_to_size(number_of_segments) > (heap_unallocated_capacity() - CodeCacheMinimumFreeSpace)) {
    1.74 +      // Fail allocation
    1.75 +      return NULL;
    1.76 +    }
    1.77 +  }
    1.78 +
    1.79 +  if (_next_segment + number_of_segments <= _number_of_committed_segments) {
    1.80 +    mark_segmap_as_used(_next_segment, _next_segment + number_of_segments);
    1.81      HeapBlock* b =  block_at(_next_segment);
    1.82 -    b->initialize(length);
    1.83 -    _next_segment += length;
    1.84 +    b->initialize(number_of_segments);
    1.85 +    _next_segment += number_of_segments;
    1.86  #ifdef ASSERT
    1.87 -    memset((void *)b->allocated_space(), badCodeHeapNewVal, size);
    1.88 +    memset((void *)b->allocated_space(), badCodeHeapNewVal, instance_size);
    1.89  #endif
    1.90      return b->allocated_space();
    1.91    } else {
    1.92 @@ -219,7 +230,7 @@
    1.93  #ifdef ASSERT
    1.94    memset((void *)b->allocated_space(),
    1.95           badCodeHeapFreeVal,
    1.96 -         size(b->length()) - sizeof(HeapBlock));
    1.97 +         segments_to_size(b->length()) - sizeof(HeapBlock));
    1.98  #endif
    1.99    add_to_freelist(b);
   1.100  
   1.101 @@ -299,32 +310,14 @@
   1.102  }
   1.103  
   1.104  size_t CodeHeap::allocated_capacity() const {
   1.105 -  // Start with the committed size in _memory;
   1.106 -  size_t l = _memory.committed_size();
   1.107 -
   1.108 -  // Subtract the committed, but unused, segments
   1.109 -  l -= size(_number_of_committed_segments - _next_segment);
   1.110 -
   1.111 -  // Subtract the size of the freelist
   1.112 -  l -= size(_free_segments);
   1.113 -
   1.114 -  return l;
   1.115 +  // size of used heap - size on freelist
   1.116 +  return segments_to_size(_next_segment - _freelist_segments);
   1.117  }
   1.118  
   1.119 -size_t CodeHeap::largest_free_block() const {
   1.120 -  // First check unused space excluding free blocks.
   1.121 -  size_t free_sz = size(_free_segments);
   1.122 -  size_t unused  = max_capacity() - allocated_capacity() - free_sz;
   1.123 -  if (unused >= free_sz)
   1.124 -    return unused;
   1.125 -
   1.126 -  // Now check largest free block.
   1.127 -  size_t len = 0;
   1.128 -  for (FreeBlock* b = _freelist; b != NULL; b = b->link()) {
   1.129 -    if (b->length() > len)
   1.130 -      len = b->length();
   1.131 -  }
   1.132 -  return MAX2(unused, size(len));
   1.133 +// Returns size of the unallocated heap block
   1.134 +size_t CodeHeap::heap_unallocated_capacity() const {
   1.135 +  // Total number of segments - number currently used
   1.136 +  return segments_to_size(_number_of_reserved_segments - _next_segment);
   1.137  }
   1.138  
   1.139  // Free list management
   1.140 @@ -365,7 +358,7 @@
   1.141    assert(b != _freelist, "cannot be removed twice");
   1.142  
   1.143    // Mark as free and update free space count
   1.144 -  _free_segments += b->length();
   1.145 +  _freelist_segments += b->length();
   1.146    b->set_free();
   1.147  
   1.148    // First element in list?
   1.149 @@ -400,7 +393,7 @@
   1.150  
   1.151  // Search freelist for an entry on the list with the best fit
   1.152  // Return NULL if no one was found
   1.153 -FreeBlock* CodeHeap::search_freelist(size_t length) {
   1.154 +FreeBlock* CodeHeap::search_freelist(size_t length, bool is_critical) {
   1.155    FreeBlock *best_block = NULL;
   1.156    FreeBlock *best_prev  = NULL;
   1.157    size_t best_length = 0;
   1.158 @@ -411,6 +404,16 @@
   1.159    while(cur != NULL) {
   1.160      size_t l = cur->length();
   1.161      if (l >= length && (best_block == NULL || best_length > l)) {
   1.162 +
   1.163 +      // Non critical allocations are not allowed to use the last part of the code heap.
   1.164 +      if (!is_critical) {
   1.165 +        // Make sure the end of the allocation doesn't cross into the last part of the code heap
   1.166 +        if (((size_t)cur + length) > ((size_t)high_boundary() - CodeCacheMinimumFreeSpace)) {
   1.167 +          // the freelist is sorted by address - if one fails, all consecutive will also fail.
   1.168 +          break;
   1.169 +        }
   1.170 +      }
   1.171 +
   1.172        // Remember best block, its previous element, and its length
   1.173        best_block = cur;
   1.174        best_prev  = prev;
   1.175 @@ -452,7 +455,7 @@
   1.176    }
   1.177  
   1.178    best_block->set_used();
   1.179 -  _free_segments -= length;
   1.180 +  _freelist_segments -= length;
   1.181    return best_block;
   1.182  }
   1.183  
   1.184 @@ -478,7 +481,7 @@
   1.185    }
   1.186  
   1.187    // Verify that freelist contains the right amount of free space
   1.188 -  //  guarantee(len == _free_segments, "wrong freelist");
   1.189 +  //  guarantee(len == _freelist_segments, "wrong freelist");
   1.190  
   1.191    // Verify that the number of free blocks is not out of hand.
   1.192    static int free_block_threshold = 10000;

mercurial