src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp

changeset 1075
ba50942c8138
parent 1063
7bb995fbd3c0
parent 1072
25e146966e7c
child 1082
bd441136a5ce
     1.1 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Thu Mar 12 18:17:25 2009 -0700
     1.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Wed Mar 18 11:37:48 2009 -0400
     1.3 @@ -786,6 +786,12 @@
     1.4    }
     1.5  }
     1.6  
     1.7 +void G1CollectedHeap::abandon_gc_alloc_regions() {
     1.8 +  // first, make sure that the GC alloc region list is empty (it should!)
     1.9 +  assert(_gc_alloc_region_list == NULL, "invariant");
    1.10 +  release_gc_alloc_regions(true /* totally */);
    1.11 +}
    1.12 +
    1.13  class PostMCRemSetClearClosure: public HeapRegionClosure {
    1.14    ModRefBarrierSet* _mr_bs;
    1.15  public:
    1.16 @@ -914,6 +920,7 @@
    1.17  
    1.18      // Make sure we'll choose a new allocation region afterwards.
    1.19      abandon_cur_alloc_region();
    1.20 +    abandon_gc_alloc_regions();
    1.21      assert(_cur_alloc_region == NULL, "Invariant.");
    1.22      g1_rem_set()->as_HRInto_G1RemSet()->cleanupHRRS();
    1.23      tear_down_region_lists();
    1.24 @@ -954,6 +961,7 @@
    1.25      if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) {
    1.26        HandleMark hm;  // Discard invalid handles created during verification
    1.27        gclog_or_tty->print(" VerifyAfterGC:");
    1.28 +      prepare_for_verify();
    1.29        Universe::verify(false);
    1.30      }
    1.31      NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
    1.32 @@ -1306,7 +1314,7 @@
    1.33  }
    1.34  
    1.35  void G1CollectedHeap::shrink(size_t shrink_bytes) {
    1.36 -  release_gc_alloc_regions();
    1.37 +  release_gc_alloc_regions(true /* totally */);
    1.38    tear_down_region_lists();  // We will rebuild them in a moment.
    1.39    shrink_helper(shrink_bytes);
    1.40    rebuild_region_lists();
    1.41 @@ -1345,8 +1353,7 @@
    1.42    _gc_time_stamp(0),
    1.43    _surviving_young_words(NULL),
    1.44    _in_cset_fast_test(NULL),
    1.45 -  _in_cset_fast_test_base(NULL)
    1.46 -{
    1.47 +  _in_cset_fast_test_base(NULL) {
    1.48    _g1h = this; // To catch bugs.
    1.49    if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) {
    1.50      vm_exit_during_initialization("Failed necessary allocation.");
    1.51 @@ -1371,9 +1378,19 @@
    1.52    }
    1.53  
    1.54    for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
    1.55 -    _gc_alloc_regions[ap]       = NULL;
    1.56 -    _gc_alloc_region_counts[ap] = 0;
    1.57 -  }
    1.58 +    _gc_alloc_regions[ap]          = NULL;
    1.59 +    _gc_alloc_region_counts[ap]    = 0;
    1.60 +    _retained_gc_alloc_regions[ap] = NULL;
    1.61 +    // by default, we do not retain a GC alloc region for each ap;
    1.62 +    // we'll override this, when appropriate, below
    1.63 +    _retain_gc_alloc_region[ap]    = false;
    1.64 +  }
    1.65 +
    1.66 +  // We will try to remember the last half-full tenured region we
    1.67 +  // allocated to at the end of a collection so that we can re-use it
    1.68 +  // during the next collection.
    1.69 +  _retain_gc_alloc_region[GCAllocForTenured]  = true;
    1.70 +
    1.71    guarantee(_task_queues != NULL, "task_queues allocation failure.");
    1.72  }
    1.73  
    1.74 @@ -2119,15 +2136,7 @@
    1.75    bool doHeapRegion(HeapRegion* r) {
    1.76      guarantee(_par || r->claim_value() == HeapRegion::InitialClaimValue,
    1.77                "Should be unclaimed at verify points.");
    1.78 -    if (r->isHumongous()) {
    1.79 -      if (r->startsHumongous()) {
    1.80 -        // Verify the single H object.
    1.81 -        oop(r->bottom())->verify();
    1.82 -        size_t word_sz = oop(r->bottom())->size();
    1.83 -        guarantee(r->top() == r->bottom() + word_sz,
    1.84 -                  "Only one object in a humongous region");
    1.85 -      }
    1.86 -    } else {
    1.87 +    if (!r->continuesHumongous()) {
    1.88        VerifyObjsInRegionClosure not_dead_yet_cl(r);
    1.89        r->verify(_allow_dirty);
    1.90        r->object_iterate(&not_dead_yet_cl);
    1.91 @@ -2179,6 +2188,7 @@
    1.92      _g1h(g1h), _allow_dirty(allow_dirty) { }
    1.93  
    1.94    void work(int worker_i) {
    1.95 +    HandleMark hm;
    1.96      VerifyRegionClosure blk(_allow_dirty, true);
    1.97      _g1h->heap_region_par_iterate_chunked(&blk, worker_i,
    1.98                                            HeapRegion::ParVerifyClaimValue);
    1.99 @@ -2644,7 +2654,7 @@
   1.100          popular_region->set_popular_pending(false);
   1.101        }
   1.102  
   1.103 -      release_gc_alloc_regions();
   1.104 +      release_gc_alloc_regions(false /* totally */);
   1.105  
   1.106        cleanup_surviving_young_words();
   1.107  
   1.108 @@ -2697,6 +2707,7 @@
   1.109      if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) {
   1.110        HandleMark hm;  // Discard invalid handles created during verification
   1.111        gclog_or_tty->print(" VerifyAfterGC:");
   1.112 +      prepare_for_verify();
   1.113        Universe::verify(false);
   1.114      }
   1.115  
   1.116 @@ -2735,6 +2746,10 @@
   1.117  
   1.118  void G1CollectedHeap::set_gc_alloc_region(int purpose, HeapRegion* r) {
   1.119    assert(purpose >= 0 && purpose < GCAllocPurposeCount, "invalid purpose");
   1.120 +  // make sure we don't call set_gc_alloc_region() multiple times on
   1.121 +  // the same region
   1.122 +  assert(r == NULL || !r->is_gc_alloc_region(),
   1.123 +         "shouldn't already be a GC alloc region");
   1.124    HeapWord* original_top = NULL;
   1.125    if (r != NULL)
   1.126      original_top = r->top();
   1.127 @@ -2824,6 +2839,12 @@
   1.128    while (_gc_alloc_region_list != NULL) {
   1.129      HeapRegion* r = _gc_alloc_region_list;
   1.130      assert(r->is_gc_alloc_region(), "Invariant.");
   1.131 +    // We need HeapRegion::oops_on_card_seq_iterate_careful() to work on
   1.132 +    // newly allocated data in order to be able to apply deferred updates
   1.133 +    // before the GC is done for verification purposes (i.e to allow
   1.134 +    // G1HRRSFlushLogBuffersOnVerify). It's safe thing to do after the
   1.135 +    // collection.
   1.136 +    r->ContiguousSpace::set_saved_mark();
   1.137      _gc_alloc_region_list = r->next_gc_alloc_region();
   1.138      r->set_next_gc_alloc_region(NULL);
   1.139      r->set_is_gc_alloc_region(false);
   1.140 @@ -2851,23 +2872,55 @@
   1.141  }
   1.142  
   1.143  void G1CollectedHeap::get_gc_alloc_regions() {
   1.144 +  // First, let's check that the GC alloc region list is empty (it should)
   1.145 +  assert(_gc_alloc_region_list == NULL, "invariant");
   1.146 +
   1.147    for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
   1.148 +    assert(_gc_alloc_regions[ap] == NULL, "invariant");
   1.149 +
   1.150      // Create new GC alloc regions.
   1.151 -    HeapRegion* alloc_region = _gc_alloc_regions[ap];
   1.152 -    // Clear this alloc region, so that in case it turns out to be
   1.153 -    // unacceptable, we end up with no allocation region, rather than a bad
   1.154 -    // one.
   1.155 -    _gc_alloc_regions[ap] = NULL;
   1.156 -    if (alloc_region == NULL || alloc_region->in_collection_set()) {
   1.157 -      // Can't re-use old one.  Allocate a new one.
   1.158 +    HeapRegion* alloc_region = _retained_gc_alloc_regions[ap];
   1.159 +    _retained_gc_alloc_regions[ap] = NULL;
   1.160 +
   1.161 +    if (alloc_region != NULL) {
   1.162 +      assert(_retain_gc_alloc_region[ap], "only way to retain a GC region");
   1.163 +
   1.164 +      // let's make sure that the GC alloc region is not tagged as such
   1.165 +      // outside a GC operation
   1.166 +      assert(!alloc_region->is_gc_alloc_region(), "sanity");
   1.167 +
   1.168 +      if (alloc_region->in_collection_set() ||
   1.169 +          alloc_region->top() == alloc_region->end() ||
   1.170 +          alloc_region->top() == alloc_region->bottom()) {
   1.171 +        // we will discard the current GC alloc region if it's in the
   1.172 +        // collection set (it can happen!), if it's already full (no
   1.173 +        // point in using it), or if it's empty (this means that it
   1.174 +        // was emptied during a cleanup and it should be on the free
   1.175 +        // list now).
   1.176 +
   1.177 +        alloc_region = NULL;
   1.178 +      }
   1.179 +    }
   1.180 +
   1.181 +    if (alloc_region == NULL) {
   1.182 +      // we will get a new GC alloc region
   1.183        alloc_region = newAllocRegionWithExpansion(ap, 0);
   1.184      }
   1.185 +
   1.186      if (alloc_region != NULL) {
   1.187 +      assert(_gc_alloc_regions[ap] == NULL, "pre-condition");
   1.188        set_gc_alloc_region(ap, alloc_region);
   1.189      }
   1.190 +
   1.191 +    assert(_gc_alloc_regions[ap] == NULL ||
   1.192 +           _gc_alloc_regions[ap]->is_gc_alloc_region(),
   1.193 +           "the GC alloc region should be tagged as such");
   1.194 +    assert(_gc_alloc_regions[ap] == NULL ||
   1.195 +           _gc_alloc_regions[ap] == _gc_alloc_region_list,
   1.196 +           "the GC alloc region should be the same as the GC alloc list head");
   1.197    }
   1.198    // Set alternative regions for allocation purposes that have reached
   1.199 -  // thier limit.
   1.200 +  // their limit.
   1.201    for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
   1.202      GCAllocPurpose alt_purpose = g1_policy()->alternative_purpose(ap);
   1.203      if (_gc_alloc_regions[ap] == NULL && alt_purpose != ap) {
   1.204 @@ -2877,27 +2930,55 @@
   1.205    assert(check_gc_alloc_regions(), "alloc regions messed up");
   1.206  }
   1.207  
   1.208 -void G1CollectedHeap::release_gc_alloc_regions() {
   1.209 +void G1CollectedHeap::release_gc_alloc_regions(bool totally) {
   1.210    // We keep a separate list of all regions that have been alloc regions in
   1.211 -  // the current collection pause.  Forget that now.
   1.212 +  // the current collection pause. Forget that now. This method will
   1.213 +  // untag the GC alloc regions and tear down the GC alloc region
   1.214 +  // list. It's desirable that no regions are tagged as GC alloc
   1.215 +  // outside GCs.
   1.216    forget_alloc_region_list();
   1.217  
   1.218    // The current alloc regions contain objs that have survived
   1.219    // collection. Make them no longer GC alloc regions.
   1.220    for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
   1.221      HeapRegion* r = _gc_alloc_regions[ap];
   1.222 -    if (r != NULL && r->is_empty()) {
   1.223 -      {
   1.224 +    _retained_gc_alloc_regions[ap] = NULL;
   1.225 +
   1.226 +    if (r != NULL) {
   1.227 +      // we retain nothing on _gc_alloc_regions between GCs
   1.228 +      set_gc_alloc_region(ap, NULL);
   1.229 +      _gc_alloc_region_counts[ap] = 0;
   1.230 +
   1.231 +      if (r->is_empty()) {
   1.232 +        // we didn't actually allocate anything in it; let's just put
   1.233 +        // it on the free list
   1.234          MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
   1.235          r->set_zero_fill_complete();
   1.236          put_free_region_on_list_locked(r);
   1.237 +      } else if (_retain_gc_alloc_region[ap] && !totally) {
   1.238 +        // retain it so that we can use it at the beginning of the next GC
   1.239 +        _retained_gc_alloc_regions[ap] = r;
   1.240        }
   1.241      }
   1.242 -    // set_gc_alloc_region will also NULLify all aliases to the region
   1.243 -    set_gc_alloc_region(ap, NULL);
   1.244 -    _gc_alloc_region_counts[ap] = 0;
   1.245 -  }
   1.246 -}
   1.247 +  }
   1.248 +}
   1.249 +
   1.250 +#ifndef PRODUCT
   1.251 +// Useful for debugging
   1.252 +
   1.253 +void G1CollectedHeap::print_gc_alloc_regions() {
   1.254 +  gclog_or_tty->print_cr("GC alloc regions");
   1.255 +  for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
   1.256 +    HeapRegion* r = _gc_alloc_regions[ap];
   1.257 +    if (r == NULL) {
   1.258 +      gclog_or_tty->print_cr("  %2d : "PTR_FORMAT, ap, NULL);
   1.259 +    } else {
   1.260 +      gclog_or_tty->print_cr("  %2d : "PTR_FORMAT" "SIZE_FORMAT,
   1.261 +                             ap, r->bottom(), r->used());
   1.262 +    }
   1.263 +  }
   1.264 +}
   1.265 +#endif // PRODUCT
   1.266  
   1.267  void G1CollectedHeap::init_for_evac_failure(OopsInHeapRegionClosure* cl) {
   1.268    _drain_in_progress = false;
   1.269 @@ -3658,7 +3739,9 @@
   1.270    CardTableModRefBS* ctbs()                      { return _ct_bs; }
   1.271  
   1.272    void immediate_rs_update(HeapRegion* from, oop* p, int tid) {
   1.273 -    _g1_rem->par_write_ref(from, p, tid);
   1.274 +    if (!from->is_survivor()) {
   1.275 +      _g1_rem->par_write_ref(from, p, tid);
   1.276 +    }
   1.277    }
   1.278  
   1.279    void deferred_rs_update(HeapRegion* from, oop* p, int tid) {

mercurial