src/share/vm/gc_implementation/parallelScavenge/psOldGen.cpp

changeset 704
850fdf70db2b
parent 698
12eea04c8b06
child 706
818a18cd69a8
     1.1 --- a/src/share/vm/gc_implementation/parallelScavenge/psOldGen.cpp	Fri Jul 25 11:29:03 2008 -0700
     1.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/psOldGen.cpp	Mon Jul 28 15:30:23 2008 -0700
     1.3 @@ -87,6 +87,15 @@
     1.4  
     1.5    MemRegion cmr((HeapWord*)virtual_space()->low(),
     1.6                  (HeapWord*)virtual_space()->high());
     1.7 +  if (ZapUnusedHeapArea) {
     1.8 +    // Mangle newly committed space immediately rather than
     1.9 +    // waiting for the initialization of the space even though
    1.10 +    // mangling is related to spaces.  Doing it here eliminates
    1.11 +    // the need to carry along information that a complete mangling
    1.12 +    // (bottom to end) needs to be done.
    1.13 +    SpaceMangler::mangle_region(cmr);
    1.14 +  }
    1.15 +
    1.16    Universe::heap()->barrier_set()->resize_covered_region(cmr);
    1.17  
    1.18    CardTableModRefBS* _ct = (CardTableModRefBS*)Universe::heap()->barrier_set();
    1.19 @@ -112,7 +121,9 @@
    1.20    if (_object_space == NULL)
    1.21      vm_exit_during_initialization("Could not allocate an old gen space");
    1.22  
    1.23 -  object_space()->initialize(cmr, true);
    1.24 +  object_space()->initialize(cmr,
    1.25 +                             SpaceDecorator::Clear,
    1.26 +                             SpaceDecorator::Mangle);
    1.27  
    1.28    _object_mark_sweep = new PSMarkSweepDecorator(_object_space, start_array(), MarkSweepDeadRatio);
    1.29  
    1.30 @@ -232,6 +243,19 @@
    1.31    assert_locked_or_safepoint(Heap_lock);
    1.32    bool result = virtual_space()->expand_by(bytes);
    1.33    if (result) {
    1.34 +    if (ZapUnusedHeapArea) {
    1.35 +      // We need to mangle the newly expanded area. The memregion spans
    1.36 +      // end -> new_end, we assume that top -> end is already mangled.
    1.37 +      // Do the mangling before post_resize() is called because
    1.38 +      // the space is available for allocation after post_resize();
    1.39 +      HeapWord* const virtual_space_high = (HeapWord*) virtual_space()->high();
    1.40 +      assert(object_space()->end() < virtual_space_high,
    1.41 +        "Should be true before post_resize()");
    1.42 +      MemRegion mangle_region(object_space()->end(), virtual_space_high);
    1.43 +      // Note that the object space has not yet been updated to
    1.44 +      // coincede with the new underlying virtual space.
    1.45 +      SpaceMangler::mangle_region(mangle_region);
    1.46 +    }
    1.47      post_resize();
    1.48      if (UsePerfData) {
    1.49        _space_counters->update_capacity();
    1.50 @@ -348,16 +372,7 @@
    1.51    start_array()->set_covered_region(new_memregion);
    1.52    Universe::heap()->barrier_set()->resize_covered_region(new_memregion);
    1.53  
    1.54 -  // Did we expand?
    1.55    HeapWord* const virtual_space_high = (HeapWord*) virtual_space()->high();
    1.56 -  if (object_space()->end() < virtual_space_high) {
    1.57 -    // We need to mangle the newly expanded area. The memregion spans
    1.58 -    // end -> new_end, we assume that top -> end is already mangled.
    1.59 -    // This cannot be safely tested for, as allocation may be taking
    1.60 -    // place.
    1.61 -    MemRegion mangle_region(object_space()->end(), virtual_space_high);
    1.62 -    object_space()->mangle_region(mangle_region);
    1.63 -  }
    1.64  
    1.65    // ALWAYS do this last!!
    1.66    object_space()->set_end(virtual_space_high);
    1.67 @@ -462,3 +477,10 @@
    1.68    VerifyObjectStartArrayClosure check( this, &_start_array );
    1.69    object_iterate(&check);
    1.70  }
    1.71 +
    1.72 +#ifndef PRODUCT
    1.73 +void PSOldGen::record_spaces_top() {
    1.74 +  assert(ZapUnusedHeapArea, "Not mangling unused space");
    1.75 +  object_space()->set_top_for_allocations();
    1.76 +}
    1.77 +#endif

mercurial