src/share/vm/gc_implementation/g1/g1EvacFailure.hpp

changeset 3416
2ace1c4ee8da
parent 3412
023652e49ac0
child 3463
d30fa85f9994
     1.1 --- a/src/share/vm/gc_implementation/g1/g1EvacFailure.hpp	Tue Jan 10 20:02:41 2012 +0100
     1.2 +++ b/src/share/vm/gc_implementation/g1/g1EvacFailure.hpp	Tue Jan 10 18:58:13 2012 -0500
     1.3 @@ -66,19 +66,22 @@
     1.4    G1CollectedHeap* _g1;
     1.5    ConcurrentMark* _cm;
     1.6    HeapRegion* _hr;
     1.7 -  size_t _prev_marked_bytes;
     1.8 -  size_t _next_marked_bytes;
     1.9 +  size_t _marked_bytes;
    1.10    OopsInHeapRegionClosure *_update_rset_cl;
    1.11 +  bool _during_initial_mark;
    1.12 +  bool _during_conc_mark;
    1.13  public:
    1.14    RemoveSelfForwardPtrObjClosure(G1CollectedHeap* g1, ConcurrentMark* cm,
    1.15                                   HeapRegion* hr,
    1.16 -                                 OopsInHeapRegionClosure* update_rset_cl) :
    1.17 -    _g1(g1), _cm(cm), _hr(hr),
    1.18 +                                 OopsInHeapRegionClosure* update_rset_cl,
    1.19 +                                 bool during_initial_mark,
    1.20 +                                 bool during_conc_mark) :
    1.21 +    _g1(g1), _cm(cm), _hr(hr), _marked_bytes(0),
    1.22      _update_rset_cl(update_rset_cl),
    1.23 -    _prev_marked_bytes(0), _next_marked_bytes(0) {}
    1.24 +    _during_initial_mark(during_initial_mark),
    1.25 +    _during_conc_mark(during_conc_mark) { }
    1.26  
    1.27 -  size_t prev_marked_bytes() { return _prev_marked_bytes; }
    1.28 -  size_t next_marked_bytes() { return _next_marked_bytes; }
    1.29 +  size_t marked_bytes() { return _marked_bytes; }
    1.30  
    1.31    // <original comment>
    1.32    // The original idea here was to coalesce evacuated and dead objects.
    1.33 @@ -100,18 +103,29 @@
    1.34      HeapWord* obj_addr = (HeapWord*) obj;
    1.35      assert(_hr->is_in(obj_addr), "sanity");
    1.36      size_t obj_size = obj->size();
    1.37 -
    1.38      _hr->update_bot_for_object(obj_addr, obj_size);
    1.39  
    1.40      if (obj->is_forwarded() && obj->forwardee() == obj) {
    1.41        // The object failed to move.
    1.42 -      assert(!_g1->is_obj_dead(obj), "We should not be preserving dead objs.");
    1.43 +
    1.44 +      // We consider all objects that we find self-forwarded to be
    1.45 +      // live. What we'll do is that we'll update the prev marking
    1.46 +      // info so that they are all under PTAMS and explicitly marked.
    1.47        _cm->markPrev(obj);
    1.48 -      assert(_cm->isPrevMarked(obj), "Should be marked!");
    1.49 -      _prev_marked_bytes += (obj_size * HeapWordSize);
    1.50 -      if (_g1->mark_in_progress() && !_g1->is_obj_ill(obj)) {
    1.51 -        _cm->markAndGrayObjectIfNecessary(obj);
    1.52 +      if (_during_initial_mark) {
    1.53 +        // For the next marking info we'll only mark the
    1.54 +        // self-forwarded objects explicitly if we are during
    1.55 +        // initial-mark (since, normally, we only mark objects pointed
    1.56 +        // to by roots if we succeed in copying them). By marking all
    1.57 +        // self-forwarded objects we ensure that we mark any that are
    1.58 +        // still pointed to be roots. During concurrent marking, and
    1.59 +        // after initial-mark, we don't need to mark any objects
    1.60 +        // explicitly and all objects in the CSet are considered
    1.61 +        // (implicitly) live. So, we won't mark them explicitly and
    1.62 +        // we'll leave them over NTAMS.
    1.63 +        _cm->markNext(obj);
    1.64        }
    1.65 +      _marked_bytes += (obj_size * HeapWordSize);
    1.66        obj->set_mark(markOopDesc::prototype());
    1.67  
    1.68        // While we were processing RSet buffers during the collection,
    1.69 @@ -126,15 +140,13 @@
    1.70        // The problem is that, if evacuation fails, we might have
    1.71        // remembered set entries missing given that we skipped cards on
    1.72        // the collection set. So, we'll recreate such entries now.
    1.73 -
    1.74        obj->oop_iterate(_update_rset_cl);
    1.75        assert(_cm->isPrevMarked(obj), "Should be marked!");
    1.76      } else {
    1.77        // The object has been either evacuated or is dead. Fill it with a
    1.78        // dummy object.
    1.79 -      MemRegion mr((HeapWord*)obj, obj_size);
    1.80 +      MemRegion mr((HeapWord*) obj, obj_size);
    1.81        CollectedHeap::fill_with_object(mr);
    1.82 -      _cm->clearRangeBothMaps(mr);
    1.83      }
    1.84    }
    1.85  };
    1.86 @@ -151,12 +163,27 @@
    1.87      _cm(_g1h->concurrent_mark()) { }
    1.88  
    1.89    bool doHeapRegion(HeapRegion *hr) {
    1.90 +    bool during_initial_mark = _g1h->g1_policy()->during_initial_mark_pause();
    1.91 +    bool during_conc_mark = _g1h->mark_in_progress();
    1.92 +
    1.93      assert(!hr->isHumongous(), "sanity");
    1.94      assert(hr->in_collection_set(), "bad CS");
    1.95  
    1.96      if (hr->claimHeapRegion(HeapRegion::ParEvacFailureClaimValue)) {
    1.97        if (hr->evacuation_failed()) {
    1.98 -        RemoveSelfForwardPtrObjClosure rspc(_g1h, _cm, hr, _update_rset_cl);
    1.99 +        RemoveSelfForwardPtrObjClosure rspc(_g1h, _cm, hr, _update_rset_cl,
   1.100 +                                            during_initial_mark,
   1.101 +                                            during_conc_mark);
   1.102 +
   1.103 +        MemRegion mr(hr->bottom(), hr->end());
   1.104 +        // We'll recreate the prev marking info so we'll first clear
   1.105 +        // the prev bitmap range for this region. We never mark any
   1.106 +        // CSet objects explicitly so the next bitmap range should be
   1.107 +        // cleared anyway.
   1.108 +        _cm->clearRangePrevBitmap(mr);
   1.109 +
   1.110 +        hr->note_self_forwarding_removal_start(during_initial_mark,
   1.111 +                                               during_conc_mark);
   1.112  
   1.113          // In the common case (i.e. when there is no evacuation
   1.114          // failure) we make sure that the following is done when
   1.115 @@ -171,28 +198,9 @@
   1.116          _update_rset_cl->set_region(hr);
   1.117          hr->object_iterate(&rspc);
   1.118  
   1.119 -        // A number of manipulations to make the TAMS for this region
   1.120 -        // be the current top, and the marked bytes be the ones observed
   1.121 -        // in the iteration.
   1.122 -        if (_cm->at_least_one_mark_complete()) {
   1.123 -          // The comments below are the postconditions achieved by the
   1.124 -          // calls.  Note especially the last such condition, which says that
   1.125 -          // the count of marked bytes has been properly restored.
   1.126 -          hr->note_start_of_marking(false);
   1.127 -          // _next_top_at_mark_start == top, _next_marked_bytes == 0
   1.128 -          hr->add_to_marked_bytes(rspc.prev_marked_bytes());
   1.129 -          // _next_marked_bytes == prev_marked_bytes.
   1.130 -          hr->note_end_of_marking();
   1.131 -          // _prev_top_at_mark_start == top(),
   1.132 -          // _prev_marked_bytes == prev_marked_bytes
   1.133 -        }
   1.134 -        // If there is no mark in progress, we modified the _next variables
   1.135 -        // above needlessly, but harmlessly.
   1.136 -        if (_g1h->mark_in_progress()) {
   1.137 -          hr->note_start_of_marking(false);
   1.138 -          // _next_top_at_mark_start == top, _next_marked_bytes == 0
   1.139 -          // _next_marked_bytes == next_marked_bytes.
   1.140 -        }
   1.141 +        hr->note_self_forwarding_removal_end(during_initial_mark,
   1.142 +                                             during_conc_mark,
   1.143 +                                             rspc.marked_bytes());
   1.144        }
   1.145      }
   1.146      return false;

mercurial