src/share/vm/gc_implementation/g1/g1EvacFailure.hpp

Tue, 30 Sep 2014 09:44:36 +0200

author
tschatzl
date
Tue, 30 Sep 2014 09:44:36 +0200
changeset 7218
6948da6d7c13
parent 7208
7baf47cb97cb
child 7535
7ae4e26cb1e0
permissions
-rw-r--r--

8052172: Evacuation failure handling in G1 does not evacuate all objects if -XX:-G1DeferredRSUpdate is set
Summary: Remove -XX:-G1DeferredRSUpdate functionality as it is racy. During evacuation failure handling, threads where evacuation failure handling occurred may try to add remembered sets to regions which remembered sets are currently being scanned. The iterator to handle the remembered set scan does not support addition of entries during scan and so may skip valid references.
Reviewed-by: iveresov, brutisso, mgerdin

     1 /*
     2  * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1EVACFAILURE_HPP
    26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1EVACFAILURE_HPP
    28 #include "gc_implementation/g1/concurrentMark.inline.hpp"
    29 #include "gc_implementation/g1/dirtyCardQueue.hpp"
    30 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
    31 #include "gc_implementation/g1/g1_globals.hpp"
    32 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
    33 #include "gc_implementation/g1/heapRegion.hpp"
    34 #include "gc_implementation/g1/heapRegionRemSet.hpp"
    35 #include "utilities/workgroup.hpp"
    37 // Closures and tasks associated with any self-forwarding pointers
    38 // installed as a result of an evacuation failure.
    40 class UpdateRSetDeferred : public OopsInHeapRegionClosure {
    41 private:
    42   G1CollectedHeap* _g1;
    43   DirtyCardQueue *_dcq;
    44   G1SATBCardTableModRefBS* _ct_bs;
    46 public:
    47   UpdateRSetDeferred(G1CollectedHeap* g1, DirtyCardQueue* dcq) :
    48     _g1(g1), _ct_bs(_g1->g1_barrier_set()), _dcq(dcq) {}
    50   virtual void do_oop(narrowOop* p) { do_oop_work(p); }
    51   virtual void do_oop(      oop* p) { do_oop_work(p); }
    52   template <class T> void do_oop_work(T* p) {
    53     assert(_from->is_in_reserved(p), "paranoia");
    54     if (!_from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) &&
    55         !_from->is_survivor()) {
    56       size_t card_index = _ct_bs->index_for(p);
    57       if (_ct_bs->mark_card_deferred(card_index)) {
    58         _dcq->enqueue((jbyte*)_ct_bs->byte_for_index(card_index));
    59       }
    60     }
    61   }
    62 };
    64 class RemoveSelfForwardPtrObjClosure: public ObjectClosure {
    65 private:
    66   G1CollectedHeap* _g1;
    67   ConcurrentMark* _cm;
    68   HeapRegion* _hr;
    69   size_t _marked_bytes;
    70   OopsInHeapRegionClosure *_update_rset_cl;
    71   bool _during_initial_mark;
    72   bool _during_conc_mark;
    73   uint _worker_id;
    74   HeapWord* _end_of_last_gap;
    75   HeapWord* _last_gap_threshold;
    76   HeapWord* _last_obj_threshold;
    78 public:
    79   RemoveSelfForwardPtrObjClosure(G1CollectedHeap* g1, ConcurrentMark* cm,
    80                                  HeapRegion* hr,
    81                                  OopsInHeapRegionClosure* update_rset_cl,
    82                                  bool during_initial_mark,
    83                                  bool during_conc_mark,
    84                                  uint worker_id) :
    85     _g1(g1), _cm(cm), _hr(hr), _marked_bytes(0),
    86     _update_rset_cl(update_rset_cl),
    87     _during_initial_mark(during_initial_mark),
    88     _during_conc_mark(during_conc_mark),
    89     _worker_id(worker_id),
    90     _end_of_last_gap(hr->bottom()),
    91     _last_gap_threshold(hr->bottom()),
    92     _last_obj_threshold(hr->bottom()) { }
    94   size_t marked_bytes() { return _marked_bytes; }
    96   // <original comment>
    97   // The original idea here was to coalesce evacuated and dead objects.
    98   // However that caused complications with the block offset table (BOT).
    99   // In particular if there were two TLABs, one of them partially refined.
   100   // |----- TLAB_1--------|----TLAB_2-~~~(partially refined part)~~~|
   101   // The BOT entries of the unrefined part of TLAB_2 point to the start
   102   // of TLAB_2. If the last object of the TLAB_1 and the first object
   103   // of TLAB_2 are coalesced, then the cards of the unrefined part
   104   // would point into middle of the filler object.
   105   // The current approach is to not coalesce and leave the BOT contents intact.
   106   // </original comment>
   107   //
   108   // We now reset the BOT when we start the object iteration over the
   109   // region and refine its entries for every object we come across. So
   110   // the above comment is not really relevant and we should be able
   111   // to coalesce dead objects if we want to.
   112   void do_object(oop obj) {
   113     HeapWord* obj_addr = (HeapWord*) obj;
   114     assert(_hr->is_in(obj_addr), "sanity");
   115     size_t obj_size = obj->size();
   116     HeapWord* obj_end = obj_addr + obj_size;
   118     if (_end_of_last_gap != obj_addr) {
   119       // there was a gap before obj_addr
   120       _last_gap_threshold = _hr->cross_threshold(_end_of_last_gap, obj_addr);
   121     }
   123     if (obj->is_forwarded() && obj->forwardee() == obj) {
   124       // The object failed to move.
   126       // We consider all objects that we find self-forwarded to be
   127       // live. What we'll do is that we'll update the prev marking
   128       // info so that they are all under PTAMS and explicitly marked.
   129       if (!_cm->isPrevMarked(obj)) {
   130         _cm->markPrev(obj);
   131       }
   132       if (_during_initial_mark) {
   133         // For the next marking info we'll only mark the
   134         // self-forwarded objects explicitly if we are during
   135         // initial-mark (since, normally, we only mark objects pointed
   136         // to by roots if we succeed in copying them). By marking all
   137         // self-forwarded objects we ensure that we mark any that are
   138         // still pointed to be roots. During concurrent marking, and
   139         // after initial-mark, we don't need to mark any objects
   140         // explicitly and all objects in the CSet are considered
   141         // (implicitly) live. So, we won't mark them explicitly and
   142         // we'll leave them over NTAMS.
   143         _cm->grayRoot(obj, obj_size, _worker_id, _hr);
   144       }
   145       _marked_bytes += (obj_size * HeapWordSize);
   146       obj->set_mark(markOopDesc::prototype());
   148       // While we were processing RSet buffers during the collection,
   149       // we actually didn't scan any cards on the collection set,
   150       // since we didn't want to update remembered sets with entries
   151       // that point into the collection set, given that live objects
   152       // from the collection set are about to move and such entries
   153       // will be stale very soon.
   154       // This change also dealt with a reliability issue which
   155       // involved scanning a card in the collection set and coming
   156       // across an array that was being chunked and looking malformed.
   157       // The problem is that, if evacuation fails, we might have
   158       // remembered set entries missing given that we skipped cards on
   159       // the collection set. So, we'll recreate such entries now.
   160       obj->oop_iterate(_update_rset_cl);
   161     } else {
   163       // The object has been either evacuated or is dead. Fill it with a
   164       // dummy object.
   165       MemRegion mr(obj_addr, obj_size);
   166       CollectedHeap::fill_with_object(mr);
   168       // must nuke all dead objects which we skipped when iterating over the region
   169       _cm->clearRangePrevBitmap(MemRegion(_end_of_last_gap, obj_end));
   170     }
   171     _end_of_last_gap = obj_end;
   172     _last_obj_threshold = _hr->cross_threshold(obj_addr, obj_end);
   173   }
   174 };
   176 class RemoveSelfForwardPtrHRClosure: public HeapRegionClosure {
   177   G1CollectedHeap* _g1h;
   178   ConcurrentMark* _cm;
   179   uint _worker_id;
   181   DirtyCardQueue _dcq;
   182   UpdateRSetDeferred _update_rset_cl;
   184 public:
   185   RemoveSelfForwardPtrHRClosure(G1CollectedHeap* g1h,
   186                                 uint worker_id) :
   187     _g1h(g1h), _dcq(&g1h->dirty_card_queue_set()), _update_rset_cl(g1h, &_dcq),
   188     _worker_id(worker_id), _cm(_g1h->concurrent_mark()) {
   189     }
   191   bool doHeapRegion(HeapRegion *hr) {
   192     bool during_initial_mark = _g1h->g1_policy()->during_initial_mark_pause();
   193     bool during_conc_mark = _g1h->mark_in_progress();
   195     assert(!hr->isHumongous(), "sanity");
   196     assert(hr->in_collection_set(), "bad CS");
   198     if (hr->claimHeapRegion(HeapRegion::ParEvacFailureClaimValue)) {
   199       if (hr->evacuation_failed()) {
   200         RemoveSelfForwardPtrObjClosure rspc(_g1h, _cm, hr, &_update_rset_cl,
   201                                             during_initial_mark,
   202                                             during_conc_mark,
   203                                             _worker_id);
   205         hr->note_self_forwarding_removal_start(during_initial_mark,
   206                                                during_conc_mark);
   207         _g1h->check_bitmaps("Self-Forwarding Ptr Removal", hr);
   209         // In the common case (i.e. when there is no evacuation
   210         // failure) we make sure that the following is done when
   211         // the region is freed so that it is "ready-to-go" when it's
   212         // re-allocated. However, when evacuation failure happens, a
   213         // region will remain in the heap and might ultimately be added
   214         // to a CSet in the future. So we have to be careful here and
   215         // make sure the region's RSet is ready for parallel iteration
   216         // whenever this might be required in the future.
   217         hr->rem_set()->reset_for_par_iteration();
   218         hr->reset_bot();
   219         _update_rset_cl.set_region(hr);
   220         hr->object_iterate(&rspc);
   222         hr->rem_set()->clean_strong_code_roots(hr);
   224         hr->note_self_forwarding_removal_end(during_initial_mark,
   225                                              during_conc_mark,
   226                                              rspc.marked_bytes());
   227       }
   228     }
   229     return false;
   230   }
   231 };
   233 class G1ParRemoveSelfForwardPtrsTask: public AbstractGangTask {
   234 protected:
   235   G1CollectedHeap* _g1h;
   237 public:
   238   G1ParRemoveSelfForwardPtrsTask(G1CollectedHeap* g1h) :
   239     AbstractGangTask("G1 Remove Self-forwarding Pointers"),
   240     _g1h(g1h) { }
   242   void work(uint worker_id) {
   243     RemoveSelfForwardPtrHRClosure rsfp_cl(_g1h, worker_id);
   245     HeapRegion* hr = _g1h->start_cset_region_for_worker(worker_id);
   246     _g1h->collection_set_iterate_from(hr, &rsfp_cl);
   247   }
   248 };
   250 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1EVACFAILURE_HPP

mercurial