src/share/vm/gc_implementation/g1/heapRegionSeq.cpp

changeset 2963
c3f1170908be
parent 2643
1216415d8e35
child 3713
720b6a76dd9d
     1.1 --- a/src/share/vm/gc_implementation/g1/heapRegionSeq.cpp	Wed Jun 08 21:48:38 2011 -0400
     1.2 +++ b/src/share/vm/gc_implementation/g1/heapRegionSeq.cpp	Fri Jun 10 13:16:40 2011 -0400
     1.3 @@ -23,259 +23,182 @@
     1.4   */
     1.5  
     1.6  #include "precompiled.hpp"
     1.7 +#include "gc_implementation/g1/heapRegion.hpp"
     1.8 +#include "gc_implementation/g1/heapRegionSeq.inline.hpp"
     1.9 +#include "gc_implementation/g1/heapRegionSets.hpp"
    1.10  #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
    1.11 -#include "gc_implementation/g1/heapRegionSeq.hpp"
    1.12  #include "memory/allocation.hpp"
    1.13  
    1.14 -// Local to this file.
    1.15 +// Private
    1.16  
    1.17 -static int orderRegions(HeapRegion** hr1p, HeapRegion** hr2p) {
    1.18 -  if ((*hr1p)->end() <= (*hr2p)->bottom()) return -1;
    1.19 -  else if ((*hr2p)->end() <= (*hr1p)->bottom()) return 1;
    1.20 -  else if (*hr1p == *hr2p) return 0;
    1.21 -  else {
    1.22 -    assert(false, "We should never compare distinct overlapping regions.");
    1.23 -  }
    1.24 -  return 0;
    1.25 -}
    1.26 +size_t HeapRegionSeq::find_contiguous_from(size_t from, size_t num) {
    1.27 +  size_t len = length();
    1.28 +  assert(num > 1, "use this only for sequences of length 2 or greater");
    1.29 +  assert(from <= len,
    1.30 +         err_msg("from: "SIZE_FORMAT" should be valid and <= than "SIZE_FORMAT,
    1.31 +                 from, len));
    1.32  
    1.33 -HeapRegionSeq::HeapRegionSeq(const size_t max_size) :
    1.34 -  _alloc_search_start(0),
    1.35 -  // The line below is the worst bit of C++ hackery I've ever written
    1.36 -  // (Detlefs, 11/23).  You should think of it as equivalent to
    1.37 -  // "_regions(100, true)": initialize the growable array and inform it
    1.38 -  // that it should allocate its elem array(s) on the C heap.
    1.39 -  //
    1.40 -  // The first argument, however, is actually a comma expression
    1.41 -  // (set_allocation_type(this, C_HEAP), 100). The purpose of the
    1.42 -  // set_allocation_type() call is to replace the default allocation
    1.43 -  // type for embedded objects STACK_OR_EMBEDDED with C_HEAP. It will
    1.44 -  // allow to pass the assert in GenericGrowableArray() which checks
    1.45 -  // that a growable array object must be on C heap if elements are.
    1.46 -  //
    1.47 -  // Note: containing object is allocated on C heap since it is CHeapObj.
    1.48 -  //
    1.49 -  _regions((ResourceObj::set_allocation_type((address)&_regions,
    1.50 -                                             ResourceObj::C_HEAP),
    1.51 -            (int)max_size),
    1.52 -           true),
    1.53 -  _next_rr_candidate(0),
    1.54 -  _seq_bottom(NULL)
    1.55 -{}
    1.56 -
    1.57 -// Private methods.
    1.58 -
    1.59 -void HeapRegionSeq::print_empty_runs() {
    1.60 -  int empty_run = 0;
    1.61 -  int n_empty = 0;
    1.62 -  int empty_run_start;
    1.63 -  for (int i = 0; i < _regions.length(); i++) {
    1.64 -    HeapRegion* r = _regions.at(i);
    1.65 -    if (r->continuesHumongous()) continue;
    1.66 -    if (r->is_empty()) {
    1.67 -      assert(!r->isHumongous(), "H regions should not be empty.");
    1.68 -      if (empty_run == 0) empty_run_start = i;
    1.69 -      empty_run++;
    1.70 -      n_empty++;
    1.71 -    } else {
    1.72 -      if (empty_run > 0) {
    1.73 -        gclog_or_tty->print("  %d:%d", empty_run_start, empty_run);
    1.74 -        empty_run = 0;
    1.75 -      }
    1.76 -    }
    1.77 -  }
    1.78 -  if (empty_run > 0) {
    1.79 -    gclog_or_tty->print(" %d:%d", empty_run_start, empty_run);
    1.80 -  }
    1.81 -  gclog_or_tty->print_cr(" [tot = %d]", n_empty);
    1.82 -}
    1.83 -
    1.84 -int HeapRegionSeq::find(HeapRegion* hr) {
    1.85 -  // FIXME: optimized for adjacent regions of fixed size.
    1.86 -  int ind = hr->hrs_index();
    1.87 -  if (ind != -1) {
    1.88 -    assert(_regions.at(ind) == hr, "Mismatch");
    1.89 -  }
    1.90 -  return ind;
    1.91 -}
    1.92 -
    1.93 -
    1.94 -// Public methods.
    1.95 -
    1.96 -void HeapRegionSeq::insert(HeapRegion* hr) {
    1.97 -  assert(!_regions.is_full(), "Too many elements in HeapRegionSeq");
    1.98 -  if (_regions.length() == 0
    1.99 -      || _regions.top()->end() <= hr->bottom()) {
   1.100 -    hr->set_hrs_index(_regions.length());
   1.101 -    _regions.append(hr);
   1.102 -  } else {
   1.103 -    _regions.append(hr);
   1.104 -    _regions.sort(orderRegions);
   1.105 -    for (int i = 0; i < _regions.length(); i++) {
   1.106 -      _regions.at(i)->set_hrs_index(i);
   1.107 -    }
   1.108 -  }
   1.109 -  char* bot = (char*)_regions.at(0)->bottom();
   1.110 -  if (_seq_bottom == NULL || bot < _seq_bottom) _seq_bottom = bot;
   1.111 -}
   1.112 -
   1.113 -size_t HeapRegionSeq::length() {
   1.114 -  return _regions.length();
   1.115 -}
   1.116 -
   1.117 -size_t HeapRegionSeq::free_suffix() {
   1.118 -  size_t res = 0;
   1.119 -  int first = _regions.length() - 1;
   1.120 -  int cur = first;
   1.121 -  while (cur >= 0 &&
   1.122 -         (_regions.at(cur)->is_empty()
   1.123 -          && (first == cur
   1.124 -              || (_regions.at(cur+1)->bottom() ==
   1.125 -                  _regions.at(cur)->end())))) {
   1.126 -      res++;
   1.127 -      cur--;
   1.128 -  }
   1.129 -  return res;
   1.130 -}
   1.131 -
   1.132 -int HeapRegionSeq::find_contiguous_from(int from, size_t num) {
   1.133 -  assert(num > 1, "pre-condition");
   1.134 -  assert(0 <= from && from <= _regions.length(),
   1.135 -         err_msg("from: %d should be valid and <= than %d",
   1.136 -                 from, _regions.length()));
   1.137 -
   1.138 -  int curr = from;
   1.139 -  int first = -1;
   1.140 +  size_t curr = from;
   1.141 +  size_t first = G1_NULL_HRS_INDEX;
   1.142    size_t num_so_far = 0;
   1.143 -  while (curr < _regions.length() && num_so_far < num) {
   1.144 -    HeapRegion* curr_hr = _regions.at(curr);
   1.145 -    if (curr_hr->is_empty()) {
   1.146 -      if (first == -1) {
   1.147 +  while (curr < len && num_so_far < num) {
   1.148 +    if (at(curr)->is_empty()) {
   1.149 +      if (first == G1_NULL_HRS_INDEX) {
   1.150          first = curr;
   1.151          num_so_far = 1;
   1.152        } else {
   1.153          num_so_far += 1;
   1.154        }
   1.155      } else {
   1.156 -      first = -1;
   1.157 +      first = G1_NULL_HRS_INDEX;
   1.158        num_so_far = 0;
   1.159      }
   1.160      curr += 1;
   1.161    }
   1.162 -
   1.163    assert(num_so_far <= num, "post-condition");
   1.164    if (num_so_far == num) {
   1.165      // we found enough space for the humongous object
   1.166 -    assert(from <= first && first < _regions.length(), "post-condition");
   1.167 -    assert(first < curr && (curr - first) == (int) num, "post-condition");
   1.168 -    for (int i = first; i < first + (int) num; ++i) {
   1.169 -      assert(_regions.at(i)->is_empty(), "post-condition");
   1.170 +    assert(from <= first && first < len, "post-condition");
   1.171 +    assert(first < curr && (curr - first) == num, "post-condition");
   1.172 +    for (size_t i = first; i < first + num; ++i) {
   1.173 +      assert(at(i)->is_empty(), "post-condition");
   1.174      }
   1.175      return first;
   1.176    } else {
   1.177      // we failed to find enough space for the humongous object
   1.178 -    return -1;
   1.179 +    return G1_NULL_HRS_INDEX;
   1.180    }
   1.181  }
   1.182  
   1.183 -int HeapRegionSeq::find_contiguous(size_t num) {
   1.184 -  assert(num > 1, "otherwise we should not be calling this");
   1.185 -  assert(0 <= _alloc_search_start && _alloc_search_start <= _regions.length(),
   1.186 -         err_msg("_alloc_search_start: %d should be valid and <= than %d",
   1.187 -                 _alloc_search_start, _regions.length()));
   1.188 +// Public
   1.189  
   1.190 -  int start = _alloc_search_start;
   1.191 -  int res = find_contiguous_from(start, num);
   1.192 -  if (res == -1 && start != 0) {
   1.193 -    // Try starting from the beginning. If _alloc_search_start was 0,
   1.194 -    // no point in doing this again.
   1.195 -    res = find_contiguous_from(0, num);
   1.196 +void HeapRegionSeq::initialize(HeapWord* bottom, HeapWord* end,
   1.197 +                               size_t max_length) {
   1.198 +  assert((size_t) bottom % HeapRegion::GrainBytes == 0,
   1.199 +         "bottom should be heap region aligned");
   1.200 +  assert((size_t) end % HeapRegion::GrainBytes == 0,
   1.201 +         "end should be heap region aligned");
   1.202 +
   1.203 +  _length = 0;
   1.204 +  _heap_bottom = bottom;
   1.205 +  _heap_end = end;
   1.206 +  _region_shift = HeapRegion::LogOfHRGrainBytes;
   1.207 +  _next_search_index = 0;
   1.208 +  _allocated_length = 0;
   1.209 +  _max_length = max_length;
   1.210 +
   1.211 +  _regions = NEW_C_HEAP_ARRAY(HeapRegion*, max_length);
   1.212 +  memset(_regions, 0, max_length * sizeof(HeapRegion*));
   1.213 +  _regions_biased = _regions - ((size_t) bottom >> _region_shift);
   1.214 +
   1.215 +  assert(&_regions[0] == &_regions_biased[addr_to_index_biased(bottom)],
   1.216 +         "bottom should be included in the region with index 0");
   1.217 +}
   1.218 +
   1.219 +MemRegion HeapRegionSeq::expand_by(HeapWord* old_end,
   1.220 +                                   HeapWord* new_end,
   1.221 +                                   FreeRegionList* list) {
   1.222 +  assert(old_end < new_end, "don't call it otherwise");
   1.223 +  G1CollectedHeap* g1h = G1CollectedHeap::heap();
   1.224 +
   1.225 +  HeapWord* next_bottom = old_end;
   1.226 +  assert(_heap_bottom <= next_bottom, "invariant");
   1.227 +  while (next_bottom < new_end) {
   1.228 +    assert(next_bottom < _heap_end, "invariant");
   1.229 +    size_t index = length();
   1.230 +
   1.231 +    assert(index < _max_length, "otherwise we cannot expand further");
   1.232 +    if (index == 0) {
   1.233 +      // We have not allocated any regions so far
   1.234 +      assert(next_bottom == _heap_bottom, "invariant");
   1.235 +    } else {
   1.236 +      // next_bottom should match the end of the last/previous region
   1.237 +      assert(next_bottom == at(index - 1)->end(), "invariant");
   1.238 +    }
   1.239 +
   1.240 +    if (index == _allocated_length) {
   1.241 +      // We have to allocate a new HeapRegion.
   1.242 +      HeapRegion* new_hr = g1h->new_heap_region(index, next_bottom);
   1.243 +      if (new_hr == NULL) {
   1.244 +        // allocation failed, we bail out and return what we have done so far
   1.245 +        return MemRegion(old_end, next_bottom);
   1.246 +      }
   1.247 +      assert(_regions[index] == NULL, "invariant");
   1.248 +      _regions[index] = new_hr;
   1.249 +      increment_length(&_allocated_length);
   1.250 +    }
   1.251 +    // Have to increment the length first, otherwise we will get an
   1.252 +    // assert failure at(index) below.
   1.253 +    increment_length(&_length);
   1.254 +    HeapRegion* hr = at(index);
   1.255 +    list->add_as_tail(hr);
   1.256 +
   1.257 +    next_bottom = hr->end();
   1.258    }
   1.259 -  if (res != -1) {
   1.260 -    assert(0 <= res && res < _regions.length(),
   1.261 -           err_msg("res: %d should be valid", res));
   1.262 -    _alloc_search_start = res + (int) num;
   1.263 -    assert(0 < _alloc_search_start && _alloc_search_start <= _regions.length(),
   1.264 -           err_msg("_alloc_search_start: %d should be valid",
   1.265 -                   _alloc_search_start));
   1.266 +  assert(next_bottom == new_end, "post-condition");
   1.267 +  return MemRegion(old_end, next_bottom);
   1.268 +}
   1.269 +
   1.270 +size_t HeapRegionSeq::free_suffix() {
   1.271 +  size_t res = 0;
   1.272 +  size_t index = length();
   1.273 +  while (index > 0) {
   1.274 +    index -= 1;
   1.275 +    if (!at(index)->is_empty()) {
   1.276 +      break;
   1.277 +    }
   1.278 +    res += 1;
   1.279    }
   1.280    return res;
   1.281  }
   1.282  
   1.283 -void HeapRegionSeq::iterate(HeapRegionClosure* blk) {
   1.284 -  iterate_from((HeapRegion*)NULL, blk);
   1.285 +size_t HeapRegionSeq::find_contiguous(size_t num) {
   1.286 +  assert(num > 1, "use this only for sequences of length 2 or greater");
   1.287 +  assert(_next_search_index <= length(),
   1.288 +         err_msg("_next_search_indeex: "SIZE_FORMAT" "
   1.289 +                 "should be valid and <= than "SIZE_FORMAT,
   1.290 +                 _next_search_index, length()));
   1.291 +
   1.292 +  size_t start = _next_search_index;
   1.293 +  size_t res = find_contiguous_from(start, num);
   1.294 +  if (res == G1_NULL_HRS_INDEX && start > 0) {
   1.295 +    // Try starting from the beginning. If _next_search_index was 0,
   1.296 +    // no point in doing this again.
   1.297 +    res = find_contiguous_from(0, num);
   1.298 +  }
   1.299 +  if (res != G1_NULL_HRS_INDEX) {
   1.300 +    assert(res < length(),
   1.301 +           err_msg("res: "SIZE_FORMAT" should be valid", res));
   1.302 +    _next_search_index = res + num;
   1.303 +    assert(_next_search_index <= length(),
   1.304 +           err_msg("_next_search_indeex: "SIZE_FORMAT" "
   1.305 +                   "should be valid and <= than "SIZE_FORMAT,
   1.306 +                   _next_search_index, length()));
   1.307 +  }
   1.308 +  return res;
   1.309  }
   1.310  
   1.311 -// The first argument r is the heap region at which iteration begins.
   1.312 -// This operation runs fastest when r is NULL, or the heap region for
   1.313 -// which a HeapRegionClosure most recently returned true, or the
   1.314 -// heap region immediately to its right in the sequence.  In all
   1.315 -// other cases a linear search is required to find the index of r.
   1.316 +void HeapRegionSeq::iterate(HeapRegionClosure* blk) const {
   1.317 +  iterate_from((HeapRegion*) NULL, blk);
   1.318 +}
   1.319  
   1.320 -void HeapRegionSeq::iterate_from(HeapRegion* r, HeapRegionClosure* blk) {
   1.321 +void HeapRegionSeq::iterate_from(HeapRegion* hr, HeapRegionClosure* blk) const {
   1.322 +  size_t hr_index = 0;
   1.323 +  if (hr != NULL) {
   1.324 +    hr_index = (size_t) hr->hrs_index();
   1.325 +  }
   1.326  
   1.327 -  // :::: FIXME ::::
   1.328 -  // Static cache value is bad, especially when we start doing parallel
   1.329 -  // remembered set update. For now just don't cache anything (the
   1.330 -  // code in the def'd out blocks).
   1.331 -
   1.332 -#if 0
   1.333 -  static int cached_j = 0;
   1.334 -#endif
   1.335 -  int len = _regions.length();
   1.336 -  int j = 0;
   1.337 -  // Find the index of r.
   1.338 -  if (r != NULL) {
   1.339 -#if 0
   1.340 -    assert(cached_j >= 0, "Invariant.");
   1.341 -    if ((cached_j < len) && (r == _regions.at(cached_j))) {
   1.342 -      j = cached_j;
   1.343 -    } else if ((cached_j + 1 < len) && (r == _regions.at(cached_j + 1))) {
   1.344 -      j = cached_j + 1;
   1.345 -    } else {
   1.346 -      j = find(r);
   1.347 -#endif
   1.348 -      if (j < 0) {
   1.349 -        j = 0;
   1.350 -      }
   1.351 -#if 0
   1.352 -    }
   1.353 -#endif
   1.354 -  }
   1.355 -  int i;
   1.356 -  for (i = j; i < len; i += 1) {
   1.357 -    int res = blk->doHeapRegion(_regions.at(i));
   1.358 +  size_t len = length();
   1.359 +  for (size_t i = hr_index; i < len; i += 1) {
   1.360 +    bool res = blk->doHeapRegion(at(i));
   1.361      if (res) {
   1.362 -#if 0
   1.363 -      cached_j = i;
   1.364 -#endif
   1.365        blk->incomplete();
   1.366        return;
   1.367      }
   1.368    }
   1.369 -  for (i = 0; i < j; i += 1) {
   1.370 -    int res = blk->doHeapRegion(_regions.at(i));
   1.371 +  for (size_t i = 0; i < hr_index; i += 1) {
   1.372 +    bool res = blk->doHeapRegion(at(i));
   1.373      if (res) {
   1.374 -#if 0
   1.375 -      cached_j = i;
   1.376 -#endif
   1.377 -      blk->incomplete();
   1.378 -      return;
   1.379 -    }
   1.380 -  }
   1.381 -}
   1.382 -
   1.383 -void HeapRegionSeq::iterate_from(int idx, HeapRegionClosure* blk) {
   1.384 -  int len = _regions.length();
   1.385 -  int i;
   1.386 -  for (i = idx; i < len; i++) {
   1.387 -    if (blk->doHeapRegion(_regions.at(i))) {
   1.388 -      blk->incomplete();
   1.389 -      return;
   1.390 -    }
   1.391 -  }
   1.392 -  for (i = 0; i < idx; i++) {
   1.393 -    if (blk->doHeapRegion(_regions.at(i))) {
   1.394        blk->incomplete();
   1.395        return;
   1.396      }
   1.397 @@ -283,54 +206,92 @@
   1.398  }
   1.399  
   1.400  MemRegion HeapRegionSeq::shrink_by(size_t shrink_bytes,
   1.401 -                                   size_t& num_regions_deleted) {
   1.402 +                                   size_t* num_regions_deleted) {
   1.403    // Reset this in case it's currently pointing into the regions that
   1.404    // we just removed.
   1.405 -  _alloc_search_start = 0;
   1.406 +  _next_search_index = 0;
   1.407  
   1.408    assert(shrink_bytes % os::vm_page_size() == 0, "unaligned");
   1.409    assert(shrink_bytes % HeapRegion::GrainBytes == 0, "unaligned");
   1.410 +  assert(length() > 0, "the region sequence should not be empty");
   1.411 +  assert(length() <= _allocated_length, "invariant");
   1.412 +  assert(_allocated_length > 0, "we should have at least one region committed");
   1.413  
   1.414 -  if (_regions.length() == 0) {
   1.415 -    num_regions_deleted = 0;
   1.416 -    return MemRegion();
   1.417 -  }
   1.418 -  int j = _regions.length() - 1;
   1.419 -  HeapWord* end = _regions.at(j)->end();
   1.420 +  // around the loop, i will be the next region to be removed
   1.421 +  size_t i = length() - 1;
   1.422 +  assert(i > 0, "we should never remove all regions");
   1.423 +  // [last_start, end) is the MemRegion that covers the regions we will remove.
   1.424 +  HeapWord* end = at(i)->end();
   1.425    HeapWord* last_start = end;
   1.426 -  while (j >= 0 && shrink_bytes > 0) {
   1.427 -    HeapRegion* cur = _regions.at(j);
   1.428 -    // We have to leave humongous regions where they are,
   1.429 -    // and work around them.
   1.430 -    if (cur->isHumongous()) {
   1.431 -      return MemRegion(last_start, end);
   1.432 -    }
   1.433 -    assert(cur == _regions.top(), "Should be top");
   1.434 +  *num_regions_deleted = 0;
   1.435 +  while (shrink_bytes > 0) {
   1.436 +    HeapRegion* cur = at(i);
   1.437 +    // We should leave the humongous regions where they are.
   1.438 +    if (cur->isHumongous()) break;
   1.439 +    // We should stop shrinking if we come across a non-empty region.
   1.440      if (!cur->is_empty()) break;
   1.441 +
   1.442 +    i -= 1;
   1.443 +    *num_regions_deleted += 1;
   1.444      shrink_bytes -= cur->capacity();
   1.445 -    num_regions_deleted++;
   1.446 -    _regions.pop();
   1.447      last_start = cur->bottom();
   1.448 -    // We need to delete these somehow, but can't currently do so here: if
   1.449 -    // we do, the ZF thread may still access the deleted region.  We'll
   1.450 -    // leave this here as a reminder that we have to do something about
   1.451 -    // this.
   1.452 -    // delete cur;
   1.453 -    j--;
   1.454 +    decrement_length(&_length);
   1.455 +    // We will reclaim the HeapRegion. _allocated_length should be
   1.456 +    // covering this index. So, even though we removed the region from
   1.457 +    // the active set by decreasing _length, we still have it
   1.458 +    // available in the future if we need to re-use it.
   1.459 +    assert(i > 0, "we should never remove all regions");
   1.460 +    assert(length() > 0, "we should never remove all regions");
   1.461    }
   1.462    return MemRegion(last_start, end);
   1.463  }
   1.464  
   1.465 -class PrintHeapRegionClosure : public  HeapRegionClosure {
   1.466 -public:
   1.467 -  bool doHeapRegion(HeapRegion* r) {
   1.468 -    gclog_or_tty->print(PTR_FORMAT ":", r);
   1.469 -    r->print();
   1.470 -    return false;
   1.471 +#ifndef PRODUCT
   1.472 +void HeapRegionSeq::verify_optional() {
   1.473 +  guarantee(_length <= _allocated_length,
   1.474 +            err_msg("invariant: _length: "SIZE_FORMAT" "
   1.475 +                    "_allocated_length: "SIZE_FORMAT,
   1.476 +                    _length, _allocated_length));
   1.477 +  guarantee(_allocated_length <= _max_length,
   1.478 +            err_msg("invariant: _allocated_length: "SIZE_FORMAT" "
   1.479 +                    "_max_length: "SIZE_FORMAT,
   1.480 +                    _allocated_length, _max_length));
   1.481 +  guarantee(_next_search_index <= _length,
   1.482 +            err_msg("invariant: _next_search_index: "SIZE_FORMAT" "
   1.483 +                    "_length: "SIZE_FORMAT,
   1.484 +                    _next_search_index, _length));
   1.485 +
   1.486 +  HeapWord* prev_end = _heap_bottom;
   1.487 +  for (size_t i = 0; i < _allocated_length; i += 1) {
   1.488 +    HeapRegion* hr = _regions[i];
   1.489 +    guarantee(hr != NULL, err_msg("invariant: i: "SIZE_FORMAT, i));
   1.490 +    guarantee(hr->bottom() == prev_end,
   1.491 +              err_msg("invariant i: "SIZE_FORMAT" "HR_FORMAT" "
   1.492 +                      "prev_end: "PTR_FORMAT,
   1.493 +                      i, HR_FORMAT_PARAMS(hr), prev_end));
   1.494 +    guarantee(hr->hrs_index() == i,
   1.495 +              err_msg("invariant: i: "SIZE_FORMAT" hrs_index(): "SIZE_FORMAT,
   1.496 +                      i, hr->hrs_index()));
   1.497 +    if (i < _length) {
   1.498 +      // Asserts will fire if i is >= _length
   1.499 +      HeapWord* addr = hr->bottom();
   1.500 +      guarantee(addr_to_region(addr) == hr, "sanity");
   1.501 +      guarantee(addr_to_region_unsafe(addr) == hr, "sanity");
   1.502 +    } else {
   1.503 +      guarantee(hr->is_empty(), "sanity");
   1.504 +      guarantee(!hr->isHumongous(), "sanity");
   1.505 +      // using assert instead of guarantee here since containing_set()
   1.506 +      // is only available in non-product builds.
   1.507 +      assert(hr->containing_set() == NULL, "sanity");
   1.508 +    }
   1.509 +    if (hr->startsHumongous()) {
   1.510 +      prev_end = hr->orig_end();
   1.511 +    } else {
   1.512 +      prev_end = hr->end();
   1.513 +    }
   1.514    }
   1.515 -};
   1.516 -
   1.517 -void HeapRegionSeq::print() {
   1.518 -  PrintHeapRegionClosure cl;
   1.519 -  iterate(&cl);
   1.520 +  for (size_t i = _allocated_length; i < _max_length; i += 1) {
   1.521 +    guarantee(_regions[i] == NULL, err_msg("invariant i: "SIZE_FORMAT, i));
   1.522 +  }
   1.523  }
   1.524 +#endif // PRODUCT

mercurial