src/share/vm/memory/defNewGeneration.cpp

changeset 791
1ee8caae33af
parent 782
60fb9c4db4e6
parent 704
850fdf70db2b
child 888
c96030fff130
     1.1 --- a/src/share/vm/memory/defNewGeneration.cpp	Wed Aug 06 11:57:31 2008 -0400
     1.2 +++ b/src/share/vm/memory/defNewGeneration.cpp	Thu Aug 21 23:36:31 2008 -0400
     1.3 @@ -1,5 +1,5 @@
     1.4  /*
     1.5 - * Copyright 2001-2007 Sun Microsystems, Inc.  All Rights Reserved.
     1.6 + * Copyright 2001-2008 Sun Microsystems, Inc.  All Rights Reserved.
     1.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     1.8   *
     1.9   * This code is free software; you can redistribute it and/or modify it
    1.10 @@ -172,15 +172,25 @@
    1.11    _to_counters = new CSpaceCounters("s1", 2, _max_survivor_size, _to_space,
    1.12                                      _gen_counters);
    1.13  
    1.14 -  compute_space_boundaries(0);
    1.15 +  compute_space_boundaries(0, SpaceDecorator::Clear, SpaceDecorator::Mangle);
    1.16    update_counters();
    1.17    _next_gen = NULL;
    1.18    _tenuring_threshold = MaxTenuringThreshold;
    1.19    _pretenure_size_threshold_words = PretenureSizeThreshold >> LogHeapWordSize;
    1.20  }
    1.21  
    1.22 -void DefNewGeneration::compute_space_boundaries(uintx minimum_eden_size) {
    1.23 -  uintx alignment = GenCollectedHeap::heap()->collector_policy()->min_alignment();
    1.24 +void DefNewGeneration::compute_space_boundaries(uintx minimum_eden_size,
    1.25 +                                                bool clear_space,
    1.26 +                                                bool mangle_space) {
    1.27 +  uintx alignment =
    1.28 +    GenCollectedHeap::heap()->collector_policy()->min_alignment();
    1.29 +
    1.30 +  // If the spaces are being cleared (only done at heap initialization
    1.31 +  // currently), the survivor spaces need not be empty.
    1.32 +  // Otherwise, no care is taken for used areas in the survivor spaces
    1.33 +  // so check.
    1.34 +  assert(clear_space || (to()->is_empty() && from()->is_empty()),
    1.35 +    "Initialization of the survivor spaces assumes these are empty");
    1.36  
    1.37    // Compute sizes
    1.38    uintx size = _virtual_space.committed_size();
    1.39 @@ -214,26 +224,45 @@
    1.40    MemRegion fromMR((HeapWord*)from_start, (HeapWord*)to_start);
    1.41    MemRegion toMR  ((HeapWord*)to_start, (HeapWord*)to_end);
    1.42  
    1.43 -  eden()->set_bounds(edenMR);
    1.44 -  if (minimum_eden_size == 0) {
    1.45 -    // The "minimum_eden_size" is really the amount of eden occupied by
    1.46 -    // allocated objects -- if this is zero, then we can clear the space.
    1.47 -    eden()->clear();
    1.48 -  } else {
    1.49 -    // Otherwise, we will not have cleared eden. This can cause newly
    1.50 -    // expanded space not to be mangled if using ZapUnusedHeapArea.
    1.51 -    // We explicitly do such mangling here.
    1.52 +  // A minimum eden size implies that there is a part of eden that
    1.53 +  // is being used and that affects the initialization of any
    1.54 +  // newly formed eden.
    1.55 +  bool live_in_eden = minimum_eden_size > 0;
    1.56 +
    1.57 +  // If not clearing the spaces, do some checking to verify that
    1.58 +  // the space are already mangled.
    1.59 +  if (!clear_space) {
    1.60 +    // Must check mangling before the spaces are reshaped.  Otherwise,
    1.61 +    // the bottom or end of one space may have moved into another
    1.62 +    // a failure of the check may not correctly indicate which space
    1.63 +    // is not properly mangled.
    1.64      if (ZapUnusedHeapArea) {
    1.65 -      eden()->mangle_unused_area();
    1.66 +      HeapWord* limit = (HeapWord*) _virtual_space.high();
    1.67 +      eden()->check_mangled_unused_area(limit);
    1.68 +      from()->check_mangled_unused_area(limit);
    1.69 +        to()->check_mangled_unused_area(limit);
    1.70      }
    1.71    }
    1.72 -  from()->initialize(fromMR, true /* clear */);
    1.73 -    to()->initialize(  toMR, true /* clear */);
    1.74 -  // Make sure we compact eden, then from.
    1.75 +
    1.76 +  // Reset the spaces for their new regions.
    1.77 +  eden()->initialize(edenMR,
    1.78 +                     clear_space && !live_in_eden,
    1.79 +                     SpaceDecorator::Mangle);
    1.80 +  // If clear_space and live_in_eden, we will not have cleared any
    1.81 +  // portion of eden above its top. This can cause newly
    1.82 +  // expanded space not to be mangled if using ZapUnusedHeapArea.
    1.83 +  // We explicitly do such mangling here.
    1.84 +  if (ZapUnusedHeapArea && clear_space && live_in_eden && mangle_space) {
    1.85 +    eden()->mangle_unused_area();
    1.86 +  }
    1.87 +  from()->initialize(fromMR, clear_space, mangle_space);
    1.88 +  to()->initialize(toMR, clear_space, mangle_space);
    1.89 +
    1.90 +  // Set next compaction spaces.
    1.91 +  eden()->set_next_compaction_space(from());
    1.92    // The to-space is normally empty before a compaction so need
    1.93    // not be considered.  The exception is during promotion
    1.94    // failure handling when to-space can contain live objects.
    1.95 -  eden()->set_next_compaction_space(from());
    1.96    from()->set_next_compaction_space(NULL);
    1.97  }
    1.98  
    1.99 @@ -256,7 +285,16 @@
   1.100  
   1.101  bool DefNewGeneration::expand(size_t bytes) {
   1.102    MutexLocker x(ExpandHeap_lock);
   1.103 +  HeapWord* prev_high = (HeapWord*) _virtual_space.high();
   1.104    bool success = _virtual_space.expand_by(bytes);
   1.105 +  if (success && ZapUnusedHeapArea) {
   1.106 +    // Mangle newly committed space immediately because it
   1.107 +    // can be done here more simply that after the new
   1.108 +    // spaces have been computed.
   1.109 +    HeapWord* new_high = (HeapWord*) _virtual_space.high();
   1.110 +    MemRegion mangle_region(prev_high, new_high);
   1.111 +    SpaceMangler::mangle_region(mangle_region);
   1.112 +  }
   1.113  
   1.114    // Do not attempt an expand-to-the reserve size.  The
   1.115    // request should properly observe the maximum size of
   1.116 @@ -268,7 +306,8 @@
   1.117    // value.
   1.118    if (GC_locker::is_active()) {
   1.119      if (PrintGC && Verbose) {
   1.120 -      gclog_or_tty->print_cr("Garbage collection disabled, expanded heap instead");
   1.121 +      gclog_or_tty->print_cr("Garbage collection disabled, "
   1.122 +        "expanded heap instead");
   1.123      }
   1.124    }
   1.125  
   1.126 @@ -332,16 +371,24 @@
   1.127      changed = true;
   1.128    }
   1.129    if (changed) {
   1.130 -    compute_space_boundaries(eden()->used());
   1.131 -    MemRegion cmr((HeapWord*)_virtual_space.low(), (HeapWord*)_virtual_space.high());
   1.132 +    // The spaces have already been mangled at this point but
   1.133 +    // may not have been cleared (set top = bottom) and should be.
   1.134 +    // Mangling was done when the heap was being expanded.
   1.135 +    compute_space_boundaries(eden()->used(),
   1.136 +                             SpaceDecorator::Clear,
   1.137 +                             SpaceDecorator::DontMangle);
   1.138 +    MemRegion cmr((HeapWord*)_virtual_space.low(),
   1.139 +                  (HeapWord*)_virtual_space.high());
   1.140      Universe::heap()->barrier_set()->resize_covered_region(cmr);
   1.141      if (Verbose && PrintGC) {
   1.142        size_t new_size_after  = _virtual_space.committed_size();
   1.143        size_t eden_size_after = eden()->capacity();
   1.144        size_t survivor_size_after = from()->capacity();
   1.145 -      gclog_or_tty->print("New generation size " SIZE_FORMAT "K->" SIZE_FORMAT "K [eden="
   1.146 +      gclog_or_tty->print("New generation size " SIZE_FORMAT "K->"
   1.147 +        SIZE_FORMAT "K [eden="
   1.148          SIZE_FORMAT "K,survivor=" SIZE_FORMAT "K]",
   1.149 -        new_size_before/K, new_size_after/K, eden_size_after/K, survivor_size_after/K);
   1.150 +        new_size_before/K, new_size_after/K,
   1.151 +        eden_size_after/K, survivor_size_after/K);
   1.152        if (WizardMode) {
   1.153          gclog_or_tty->print("[allowed " SIZE_FORMAT "K extra for %d threads]",
   1.154            thread_increase_size/K, threads_count);
   1.155 @@ -486,7 +533,7 @@
   1.156    ScanWeakRefClosure scan_weak_ref(this);
   1.157  
   1.158    age_table()->clear();
   1.159 -  to()->clear();
   1.160 +  to()->clear(SpaceDecorator::Mangle);
   1.161  
   1.162    gch->rem_set()->prepare_for_younger_refs_iterate(false);
   1.163  
   1.164 @@ -531,8 +578,18 @@
   1.165      soft_ref_policy, &is_alive, &keep_alive, &evacuate_followers, NULL);
   1.166    if (!promotion_failed()) {
   1.167      // Swap the survivor spaces.
   1.168 -    eden()->clear();
   1.169 -    from()->clear();
   1.170 +    eden()->clear(SpaceDecorator::Mangle);
   1.171 +    from()->clear(SpaceDecorator::Mangle);
   1.172 +    if (ZapUnusedHeapArea) {
   1.173 +      // This is now done here because of the piece-meal mangling which
   1.174 +      // can check for valid mangling at intermediate points in the
   1.175 +      // collection(s).  When a minor collection fails to collect
   1.176 +      // sufficient space resizing of the young generation can occur
   1.177 +      // an redistribute the spaces in the young generation.  Mangle
   1.178 +      // here so that unzapped regions don't get distributed to
   1.179 +      // other spaces.
   1.180 +      to()->mangle_unused_area();
   1.181 +    }
   1.182      swap_spaces();
   1.183  
   1.184      assert(to()->is_empty(), "to space should be empty now");
   1.185 @@ -759,6 +816,15 @@
   1.186    }
   1.187  }
   1.188  
   1.189 +void DefNewGeneration::reset_scratch() {
   1.190 +  // If contributing scratch in to_space, mangle all of
   1.191 +  // to_space if ZapUnusedHeapArea.  This is needed because
   1.192 +  // top is not maintained while using to-space as scratch.
   1.193 +  if (ZapUnusedHeapArea) {
   1.194 +    to()->mangle_unused_area_complete();
   1.195 +  }
   1.196 +}
   1.197 +
   1.198  bool DefNewGeneration::collection_attempt_is_safe() {
   1.199    if (!to()->is_empty()) {
   1.200      return false;
   1.201 @@ -812,11 +878,25 @@
   1.202      }
   1.203    }
   1.204  
   1.205 +  if (ZapUnusedHeapArea) {
   1.206 +    eden()->check_mangled_unused_area_complete();
   1.207 +    from()->check_mangled_unused_area_complete();
   1.208 +    to()->check_mangled_unused_area_complete();
   1.209 +  }
   1.210 +
   1.211    // update the generation and space performance counters
   1.212    update_counters();
   1.213    gch->collector_policy()->counters()->update_counters();
   1.214  }
   1.215  
   1.216 +void DefNewGeneration::record_spaces_top() {
   1.217 +  assert(ZapUnusedHeapArea, "Not mangling unused space");
   1.218 +  eden()->set_top_for_allocations();
   1.219 +  to()->set_top_for_allocations();
   1.220 +  from()->set_top_for_allocations();
   1.221 +}
   1.222 +
   1.223 +
   1.224  void DefNewGeneration::update_counters() {
   1.225    if (UsePerfData) {
   1.226      _eden_counters->update_all();

mercurial