src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp

changeset 6979
5255b195f828
parent 6978
30c99d8e0f02
child 6980
6c523f5d5440
     1.1 --- a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp	Thu Mar 06 09:08:18 2014 +0100
     1.2 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp	Wed Mar 12 15:22:45 2014 +0100
     1.3 @@ -853,7 +853,58 @@
     1.4                                                    UpwardsObjectClosure* cl) {
     1.5    assert_locked(freelistLock());
     1.6    NOT_PRODUCT(verify_objects_initialized());
     1.7 -  Space::object_iterate_mem(mr, cl);
     1.8 +  assert(!mr.is_empty(), "Should be non-empty");
     1.9 +  // We use MemRegion(bottom(), end()) rather than used_region() below
    1.10 +  // because the two are not necessarily equal for some kinds of
    1.11 +  // spaces, in particular, certain kinds of free list spaces.
    1.12 +  // We could use the more complicated but more precise:
    1.13 +  // MemRegion(used_region().start(), round_to(used_region().end(), CardSize))
    1.14 +  // but the slight imprecision seems acceptable in the assertion check.
    1.15 +  assert(MemRegion(bottom(), end()).contains(mr),
    1.16 +         "Should be within used space");
    1.17 +  HeapWord* prev = cl->previous();   // max address from last time
    1.18 +  if (prev >= mr.end()) { // nothing to do
    1.19 +    return;
    1.20 +  }
    1.21 +  // This assert will not work when we go from cms space to perm
    1.22 +  // space, and use same closure. Easy fix deferred for later. XXX YSR
    1.23 +  // assert(prev == NULL || contains(prev), "Should be within space");
    1.24 +
    1.25 +  bool last_was_obj_array = false;
    1.26 +  HeapWord *blk_start_addr, *region_start_addr;
    1.27 +  if (prev > mr.start()) {
    1.28 +    region_start_addr = prev;
    1.29 +    blk_start_addr    = prev;
    1.30 +    // The previous invocation may have pushed "prev" beyond the
    1.31 +    // last allocated block yet there may be still be blocks
    1.32 +    // in this region due to a particular coalescing policy.
    1.33 +    // Relax the assertion so that the case where the unallocated
    1.34 +    // block is maintained and "prev" is beyond the unallocated
    1.35 +    // block does not cause the assertion to fire.
    1.36 +    assert((BlockOffsetArrayUseUnallocatedBlock &&
    1.37 +            (!is_in(prev))) ||
    1.38 +           (blk_start_addr == block_start(region_start_addr)), "invariant");
    1.39 +  } else {
    1.40 +    region_start_addr = mr.start();
    1.41 +    blk_start_addr    = block_start(region_start_addr);
    1.42 +  }
    1.43 +  HeapWord* region_end_addr = mr.end();
    1.44 +  MemRegion derived_mr(region_start_addr, region_end_addr);
    1.45 +  while (blk_start_addr < region_end_addr) {
    1.46 +    const size_t size = block_size(blk_start_addr);
    1.47 +    if (block_is_obj(blk_start_addr)) {
    1.48 +      last_was_obj_array = cl->do_object_bm(oop(blk_start_addr), derived_mr);
    1.49 +    } else {
    1.50 +      last_was_obj_array = false;
    1.51 +    }
    1.52 +    blk_start_addr += size;
    1.53 +  }
    1.54 +  if (!last_was_obj_array) {
    1.55 +    assert((bottom() <= blk_start_addr) && (blk_start_addr <= end()),
    1.56 +           "Should be within (closed) used space");
    1.57 +    assert(blk_start_addr > prev, "Invariant");
    1.58 +    cl->set_previous(blk_start_addr); // min address for next time
    1.59 +  }
    1.60  }
    1.61  
    1.62  // Callers of this iterator beware: The closure application should

mercurial