src/share/vm/gc_implementation/g1/g1EvacFailure.hpp

Wed, 25 Jan 2012 12:58:23 -0500

author
tonyp
date
Wed, 25 Jan 2012 12:58:23 -0500
changeset 3464
eff609af17d7
parent 3463
d30fa85f9994
child 5811
d55c004e1d4d
permissions
-rw-r--r--

7127706: G1: re-enable survivors during the initial-mark pause
Summary: Re-enable survivors during the initial-mark pause. Afterwards, the concurrent marking threads have to scan them and mark everything reachable from them. The next GC will have to wait for the survivors to be scanned.
Reviewed-by: brutisso, johnc

     1 /*
     2  * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1EVACFAILURE_HPP
    26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1EVACFAILURE_HPP
    28 #include "gc_implementation/g1/concurrentMark.inline.hpp"
    29 #include "gc_implementation/g1/dirtyCardQueue.hpp"
    30 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
    31 #include "gc_implementation/g1/g1_globals.hpp"
    32 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
    33 #include "gc_implementation/g1/heapRegion.hpp"
    34 #include "gc_implementation/g1/heapRegionRemSet.hpp"
    35 #include "utilities/workgroup.hpp"
    37 // Closures and tasks associated with any self-forwarding pointers
    38 // installed as a result of an evacuation failure.
    40 class UpdateRSetDeferred : public OopsInHeapRegionClosure {
    41 private:
    42   G1CollectedHeap* _g1;
    43   DirtyCardQueue *_dcq;
    44   CardTableModRefBS* _ct_bs;
    46 public:
    47   UpdateRSetDeferred(G1CollectedHeap* g1, DirtyCardQueue* dcq) :
    48     _g1(g1), _ct_bs((CardTableModRefBS*)_g1->barrier_set()), _dcq(dcq) {}
    50   virtual void do_oop(narrowOop* p) { do_oop_work(p); }
    51   virtual void do_oop(      oop* p) { do_oop_work(p); }
    52   template <class T> void do_oop_work(T* p) {
    53     assert(_from->is_in_reserved(p), "paranoia");
    54     if (!_from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) &&
    55         !_from->is_survivor()) {
    56       size_t card_index = _ct_bs->index_for(p);
    57       if (_ct_bs->mark_card_deferred(card_index)) {
    58         _dcq->enqueue((jbyte*)_ct_bs->byte_for_index(card_index));
    59       }
    60     }
    61   }
    62 };
    64 class RemoveSelfForwardPtrObjClosure: public ObjectClosure {
    65 private:
    66   G1CollectedHeap* _g1;
    67   ConcurrentMark* _cm;
    68   HeapRegion* _hr;
    69   size_t _marked_bytes;
    70   OopsInHeapRegionClosure *_update_rset_cl;
    71   bool _during_initial_mark;
    72   bool _during_conc_mark;
    73   uint _worker_id;
    75 public:
    76   RemoveSelfForwardPtrObjClosure(G1CollectedHeap* g1, ConcurrentMark* cm,
    77                                  HeapRegion* hr,
    78                                  OopsInHeapRegionClosure* update_rset_cl,
    79                                  bool during_initial_mark,
    80                                  bool during_conc_mark,
    81                                  uint worker_id) :
    82     _g1(g1), _cm(cm), _hr(hr), _marked_bytes(0),
    83     _update_rset_cl(update_rset_cl),
    84     _during_initial_mark(during_initial_mark),
    85     _during_conc_mark(during_conc_mark),
    86     _worker_id(worker_id) { }
    88   size_t marked_bytes() { return _marked_bytes; }
    90   // <original comment>
    91   // The original idea here was to coalesce evacuated and dead objects.
    92   // However that caused complications with the block offset table (BOT).
    93   // In particular if there were two TLABs, one of them partially refined.
    94   // |----- TLAB_1--------|----TLAB_2-~~~(partially refined part)~~~|
    95   // The BOT entries of the unrefined part of TLAB_2 point to the start
    96   // of TLAB_2. If the last object of the TLAB_1 and the first object
    97   // of TLAB_2 are coalesced, then the cards of the unrefined part
    98   // would point into middle of the filler object.
    99   // The current approach is to not coalesce and leave the BOT contents intact.
   100   // </original comment>
   101   //
   102   // We now reset the BOT when we start the object iteration over the
   103   // region and refine its entries for every object we come across. So
   104   // the above comment is not really relevant and we should be able
   105   // to coalesce dead objects if we want to.
   106   void do_object(oop obj) {
   107     HeapWord* obj_addr = (HeapWord*) obj;
   108     assert(_hr->is_in(obj_addr), "sanity");
   109     size_t obj_size = obj->size();
   110     _hr->update_bot_for_object(obj_addr, obj_size);
   112     if (obj->is_forwarded() && obj->forwardee() == obj) {
   113       // The object failed to move.
   115       // We consider all objects that we find self-forwarded to be
   116       // live. What we'll do is that we'll update the prev marking
   117       // info so that they are all under PTAMS and explicitly marked.
   118       _cm->markPrev(obj);
   119       if (_during_initial_mark) {
   120         // For the next marking info we'll only mark the
   121         // self-forwarded objects explicitly if we are during
   122         // initial-mark (since, normally, we only mark objects pointed
   123         // to by roots if we succeed in copying them). By marking all
   124         // self-forwarded objects we ensure that we mark any that are
   125         // still pointed to be roots. During concurrent marking, and
   126         // after initial-mark, we don't need to mark any objects
   127         // explicitly and all objects in the CSet are considered
   128         // (implicitly) live. So, we won't mark them explicitly and
   129         // we'll leave them over NTAMS.
   130         _cm->grayRoot(obj, obj_size, _worker_id, _hr);
   131       }
   132       _marked_bytes += (obj_size * HeapWordSize);
   133       obj->set_mark(markOopDesc::prototype());
   135       // While we were processing RSet buffers during the collection,
   136       // we actually didn't scan any cards on the collection set,
   137       // since we didn't want to update remembered sets with entries
   138       // that point into the collection set, given that live objects
   139       // from the collection set are about to move and such entries
   140       // will be stale very soon.
   141       // This change also dealt with a reliability issue which
   142       // involved scanning a card in the collection set and coming
   143       // across an array that was being chunked and looking malformed.
   144       // The problem is that, if evacuation fails, we might have
   145       // remembered set entries missing given that we skipped cards on
   146       // the collection set. So, we'll recreate such entries now.
   147       obj->oop_iterate(_update_rset_cl);
   148       assert(_cm->isPrevMarked(obj), "Should be marked!");
   149     } else {
   150       // The object has been either evacuated or is dead. Fill it with a
   151       // dummy object.
   152       MemRegion mr((HeapWord*) obj, obj_size);
   153       CollectedHeap::fill_with_object(mr);
   154     }
   155   }
   156 };
   158 class RemoveSelfForwardPtrHRClosure: public HeapRegionClosure {
   159   G1CollectedHeap* _g1h;
   160   ConcurrentMark* _cm;
   161   OopsInHeapRegionClosure *_update_rset_cl;
   162   uint _worker_id;
   164 public:
   165   RemoveSelfForwardPtrHRClosure(G1CollectedHeap* g1h,
   166                                 OopsInHeapRegionClosure* update_rset_cl,
   167                                 uint worker_id) :
   168     _g1h(g1h), _update_rset_cl(update_rset_cl),
   169     _worker_id(worker_id), _cm(_g1h->concurrent_mark()) { }
   171   bool doHeapRegion(HeapRegion *hr) {
   172     bool during_initial_mark = _g1h->g1_policy()->during_initial_mark_pause();
   173     bool during_conc_mark = _g1h->mark_in_progress();
   175     assert(!hr->isHumongous(), "sanity");
   176     assert(hr->in_collection_set(), "bad CS");
   178     if (hr->claimHeapRegion(HeapRegion::ParEvacFailureClaimValue)) {
   179       if (hr->evacuation_failed()) {
   180         RemoveSelfForwardPtrObjClosure rspc(_g1h, _cm, hr, _update_rset_cl,
   181                                             during_initial_mark,
   182                                             during_conc_mark,
   183                                             _worker_id);
   185         MemRegion mr(hr->bottom(), hr->end());
   186         // We'll recreate the prev marking info so we'll first clear
   187         // the prev bitmap range for this region. We never mark any
   188         // CSet objects explicitly so the next bitmap range should be
   189         // cleared anyway.
   190         _cm->clearRangePrevBitmap(mr);
   192         hr->note_self_forwarding_removal_start(during_initial_mark,
   193                                                during_conc_mark);
   195         // In the common case (i.e. when there is no evacuation
   196         // failure) we make sure that the following is done when
   197         // the region is freed so that it is "ready-to-go" when it's
   198         // re-allocated. However, when evacuation failure happens, a
   199         // region will remain in the heap and might ultimately be added
   200         // to a CSet in the future. So we have to be careful here and
   201         // make sure the region's RSet is ready for parallel iteration
   202         // whenever this might be required in the future.
   203         hr->rem_set()->reset_for_par_iteration();
   204         hr->reset_bot();
   205         _update_rset_cl->set_region(hr);
   206         hr->object_iterate(&rspc);
   208         hr->note_self_forwarding_removal_end(during_initial_mark,
   209                                              during_conc_mark,
   210                                              rspc.marked_bytes());
   211       }
   212     }
   213     return false;
   214   }
   215 };
   217 class G1ParRemoveSelfForwardPtrsTask: public AbstractGangTask {
   218 protected:
   219   G1CollectedHeap* _g1h;
   221 public:
   222   G1ParRemoveSelfForwardPtrsTask(G1CollectedHeap* g1h) :
   223     AbstractGangTask("G1 Remove Self-forwarding Pointers"),
   224     _g1h(g1h) { }
   226   void work(uint worker_id) {
   227     UpdateRSetImmediate immediate_update(_g1h->g1_rem_set());
   228     DirtyCardQueue dcq(&_g1h->dirty_card_queue_set());
   229     UpdateRSetDeferred deferred_update(_g1h, &dcq);
   231     OopsInHeapRegionClosure *update_rset_cl = &deferred_update;
   232     if (!G1DeferredRSUpdate) {
   233       update_rset_cl = &immediate_update;
   234     }
   236     RemoveSelfForwardPtrHRClosure rsfp_cl(_g1h, update_rset_cl, worker_id);
   238     HeapRegion* hr = _g1h->start_cset_region_for_worker(worker_id);
   239     _g1h->collection_set_iterate_from(hr, &rsfp_cl);
   240   }
   241 };
   243 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1EVACFAILURE_HPP

mercurial