src/share/vm/gc_implementation/g1/g1EvacFailure.hpp

changeset 0
f90c822e73f8
child 6876
710a3c8b516e
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/src/share/vm/gc_implementation/g1/g1EvacFailure.hpp	Wed Apr 27 01:25:04 2016 +0800
     1.3 @@ -0,0 +1,243 @@
     1.4 +/*
     1.5 + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
     1.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     1.7 + *
     1.8 + * This code is free software; you can redistribute it and/or modify it
     1.9 + * under the terms of the GNU General Public License version 2 only, as
    1.10 + * published by the Free Software Foundation.
    1.11 + *
    1.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
    1.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    1.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    1.15 + * version 2 for more details (a copy is included in the LICENSE file that
    1.16 + * accompanied this code).
    1.17 + *
    1.18 + * You should have received a copy of the GNU General Public License version
    1.19 + * 2 along with this work; if not, write to the Free Software Foundation,
    1.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    1.21 + *
    1.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    1.23 + * or visit www.oracle.com if you need additional information or have any
    1.24 + * questions.
    1.25 + *
    1.26 + */
    1.27 +
    1.28 +#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1EVACFAILURE_HPP
    1.29 +#define SHARE_VM_GC_IMPLEMENTATION_G1_G1EVACFAILURE_HPP
    1.30 +
    1.31 +#include "gc_implementation/g1/concurrentMark.inline.hpp"
    1.32 +#include "gc_implementation/g1/dirtyCardQueue.hpp"
    1.33 +#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
    1.34 +#include "gc_implementation/g1/g1_globals.hpp"
    1.35 +#include "gc_implementation/g1/g1OopClosures.inline.hpp"
    1.36 +#include "gc_implementation/g1/heapRegion.hpp"
    1.37 +#include "gc_implementation/g1/heapRegionRemSet.hpp"
    1.38 +#include "utilities/workgroup.hpp"
    1.39 +
    1.40 +// Closures and tasks associated with any self-forwarding pointers
    1.41 +// installed as a result of an evacuation failure.
    1.42 +
    1.43 +class UpdateRSetDeferred : public OopsInHeapRegionClosure {
    1.44 +private:
    1.45 +  G1CollectedHeap* _g1;
    1.46 +  DirtyCardQueue *_dcq;
    1.47 +  G1SATBCardTableModRefBS* _ct_bs;
    1.48 +
    1.49 +public:
    1.50 +  UpdateRSetDeferred(G1CollectedHeap* g1, DirtyCardQueue* dcq) :
    1.51 +    _g1(g1), _ct_bs(_g1->g1_barrier_set()), _dcq(dcq) {}
    1.52 +
    1.53 +  virtual void do_oop(narrowOop* p) { do_oop_work(p); }
    1.54 +  virtual void do_oop(      oop* p) { do_oop_work(p); }
    1.55 +  template <class T> void do_oop_work(T* p) {
    1.56 +    assert(_from->is_in_reserved(p), "paranoia");
    1.57 +    if (!_from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) &&
    1.58 +        !_from->is_survivor()) {
    1.59 +      size_t card_index = _ct_bs->index_for(p);
    1.60 +      if (_ct_bs->mark_card_deferred(card_index)) {
    1.61 +        _dcq->enqueue((jbyte*)_ct_bs->byte_for_index(card_index));
    1.62 +      }
    1.63 +    }
    1.64 +  }
    1.65 +};
    1.66 +
    1.67 +class RemoveSelfForwardPtrObjClosure: public ObjectClosure {
    1.68 +private:
    1.69 +  G1CollectedHeap* _g1;
    1.70 +  ConcurrentMark* _cm;
    1.71 +  HeapRegion* _hr;
    1.72 +  size_t _marked_bytes;
    1.73 +  OopsInHeapRegionClosure *_update_rset_cl;
    1.74 +  bool _during_initial_mark;
    1.75 +  bool _during_conc_mark;
    1.76 +  uint _worker_id;
    1.77 +
    1.78 +public:
    1.79 +  RemoveSelfForwardPtrObjClosure(G1CollectedHeap* g1, ConcurrentMark* cm,
    1.80 +                                 HeapRegion* hr,
    1.81 +                                 OopsInHeapRegionClosure* update_rset_cl,
    1.82 +                                 bool during_initial_mark,
    1.83 +                                 bool during_conc_mark,
    1.84 +                                 uint worker_id) :
    1.85 +    _g1(g1), _cm(cm), _hr(hr), _marked_bytes(0),
    1.86 +    _update_rset_cl(update_rset_cl),
    1.87 +    _during_initial_mark(during_initial_mark),
    1.88 +    _during_conc_mark(during_conc_mark),
    1.89 +    _worker_id(worker_id) { }
    1.90 +
    1.91 +  size_t marked_bytes() { return _marked_bytes; }
    1.92 +
    1.93 +  // <original comment>
    1.94 +  // The original idea here was to coalesce evacuated and dead objects.
    1.95 +  // However that caused complications with the block offset table (BOT).
    1.96 +  // In particular if there were two TLABs, one of them partially refined.
    1.97 +  // |----- TLAB_1--------|----TLAB_2-~~~(partially refined part)~~~|
    1.98 +  // The BOT entries of the unrefined part of TLAB_2 point to the start
    1.99 +  // of TLAB_2. If the last object of the TLAB_1 and the first object
   1.100 +  // of TLAB_2 are coalesced, then the cards of the unrefined part
   1.101 +  // would point into middle of the filler object.
   1.102 +  // The current approach is to not coalesce and leave the BOT contents intact.
   1.103 +  // </original comment>
   1.104 +  //
   1.105 +  // We now reset the BOT when we start the object iteration over the
   1.106 +  // region and refine its entries for every object we come across. So
   1.107 +  // the above comment is not really relevant and we should be able
   1.108 +  // to coalesce dead objects if we want to.
   1.109 +  void do_object(oop obj) {
   1.110 +    HeapWord* obj_addr = (HeapWord*) obj;
   1.111 +    assert(_hr->is_in(obj_addr), "sanity");
   1.112 +    size_t obj_size = obj->size();
   1.113 +    _hr->update_bot_for_object(obj_addr, obj_size);
   1.114 +
   1.115 +    if (obj->is_forwarded() && obj->forwardee() == obj) {
   1.116 +      // The object failed to move.
   1.117 +
   1.118 +      // We consider all objects that we find self-forwarded to be
   1.119 +      // live. What we'll do is that we'll update the prev marking
   1.120 +      // info so that they are all under PTAMS and explicitly marked.
   1.121 +      _cm->markPrev(obj);
   1.122 +      if (_during_initial_mark) {
   1.123 +        // For the next marking info we'll only mark the
   1.124 +        // self-forwarded objects explicitly if we are during
   1.125 +        // initial-mark (since, normally, we only mark objects pointed
   1.126 +        // to by roots if we succeed in copying them). By marking all
   1.127 +        // self-forwarded objects we ensure that we mark any that are
   1.128 +        // still pointed to be roots. During concurrent marking, and
   1.129 +        // after initial-mark, we don't need to mark any objects
   1.130 +        // explicitly and all objects in the CSet are considered
   1.131 +        // (implicitly) live. So, we won't mark them explicitly and
   1.132 +        // we'll leave them over NTAMS.
   1.133 +        _cm->grayRoot(obj, obj_size, _worker_id, _hr);
   1.134 +      }
   1.135 +      _marked_bytes += (obj_size * HeapWordSize);
   1.136 +      obj->set_mark(markOopDesc::prototype());
   1.137 +
   1.138 +      // While we were processing RSet buffers during the collection,
   1.139 +      // we actually didn't scan any cards on the collection set,
   1.140 +      // since we didn't want to update remembered sets with entries
   1.141 +      // that point into the collection set, given that live objects
   1.142 +      // from the collection set are about to move and such entries
   1.143 +      // will be stale very soon.
   1.144 +      // This change also dealt with a reliability issue which
   1.145 +      // involved scanning a card in the collection set and coming
   1.146 +      // across an array that was being chunked and looking malformed.
   1.147 +      // The problem is that, if evacuation fails, we might have
   1.148 +      // remembered set entries missing given that we skipped cards on
   1.149 +      // the collection set. So, we'll recreate such entries now.
   1.150 +      obj->oop_iterate(_update_rset_cl);
   1.151 +      assert(_cm->isPrevMarked(obj), "Should be marked!");
   1.152 +    } else {
   1.153 +      // The object has been either evacuated or is dead. Fill it with a
   1.154 +      // dummy object.
   1.155 +      MemRegion mr((HeapWord*) obj, obj_size);
   1.156 +      CollectedHeap::fill_with_object(mr);
   1.157 +    }
   1.158 +  }
   1.159 +};
   1.160 +
   1.161 +class RemoveSelfForwardPtrHRClosure: public HeapRegionClosure {
   1.162 +  G1CollectedHeap* _g1h;
   1.163 +  ConcurrentMark* _cm;
   1.164 +  OopsInHeapRegionClosure *_update_rset_cl;
   1.165 +  uint _worker_id;
   1.166 +
   1.167 +public:
   1.168 +  RemoveSelfForwardPtrHRClosure(G1CollectedHeap* g1h,
   1.169 +                                OopsInHeapRegionClosure* update_rset_cl,
   1.170 +                                uint worker_id) :
   1.171 +    _g1h(g1h), _update_rset_cl(update_rset_cl),
   1.172 +    _worker_id(worker_id), _cm(_g1h->concurrent_mark()) { }
   1.173 +
   1.174 +  bool doHeapRegion(HeapRegion *hr) {
   1.175 +    bool during_initial_mark = _g1h->g1_policy()->during_initial_mark_pause();
   1.176 +    bool during_conc_mark = _g1h->mark_in_progress();
   1.177 +
   1.178 +    assert(!hr->isHumongous(), "sanity");
   1.179 +    assert(hr->in_collection_set(), "bad CS");
   1.180 +
   1.181 +    if (hr->claimHeapRegion(HeapRegion::ParEvacFailureClaimValue)) {
   1.182 +      if (hr->evacuation_failed()) {
   1.183 +        RemoveSelfForwardPtrObjClosure rspc(_g1h, _cm, hr, _update_rset_cl,
   1.184 +                                            during_initial_mark,
   1.185 +                                            during_conc_mark,
   1.186 +                                            _worker_id);
   1.187 +
   1.188 +        MemRegion mr(hr->bottom(), hr->end());
   1.189 +        // We'll recreate the prev marking info so we'll first clear
   1.190 +        // the prev bitmap range for this region. We never mark any
   1.191 +        // CSet objects explicitly so the next bitmap range should be
   1.192 +        // cleared anyway.
   1.193 +        _cm->clearRangePrevBitmap(mr);
   1.194 +
   1.195 +        hr->note_self_forwarding_removal_start(during_initial_mark,
   1.196 +                                               during_conc_mark);
   1.197 +
   1.198 +        // In the common case (i.e. when there is no evacuation
   1.199 +        // failure) we make sure that the following is done when
   1.200 +        // the region is freed so that it is "ready-to-go" when it's
   1.201 +        // re-allocated. However, when evacuation failure happens, a
   1.202 +        // region will remain in the heap and might ultimately be added
   1.203 +        // to a CSet in the future. So we have to be careful here and
   1.204 +        // make sure the region's RSet is ready for parallel iteration
   1.205 +        // whenever this might be required in the future.
   1.206 +        hr->rem_set()->reset_for_par_iteration();
   1.207 +        hr->reset_bot();
   1.208 +        _update_rset_cl->set_region(hr);
   1.209 +        hr->object_iterate(&rspc);
   1.210 +
   1.211 +        hr->note_self_forwarding_removal_end(during_initial_mark,
   1.212 +                                             during_conc_mark,
   1.213 +                                             rspc.marked_bytes());
   1.214 +      }
   1.215 +    }
   1.216 +    return false;
   1.217 +  }
   1.218 +};
   1.219 +
   1.220 +class G1ParRemoveSelfForwardPtrsTask: public AbstractGangTask {
   1.221 +protected:
   1.222 +  G1CollectedHeap* _g1h;
   1.223 +
   1.224 +public:
   1.225 +  G1ParRemoveSelfForwardPtrsTask(G1CollectedHeap* g1h) :
   1.226 +    AbstractGangTask("G1 Remove Self-forwarding Pointers"),
   1.227 +    _g1h(g1h) { }
   1.228 +
   1.229 +  void work(uint worker_id) {
   1.230 +    UpdateRSetImmediate immediate_update(_g1h->g1_rem_set());
   1.231 +    DirtyCardQueue dcq(&_g1h->dirty_card_queue_set());
   1.232 +    UpdateRSetDeferred deferred_update(_g1h, &dcq);
   1.233 +
   1.234 +    OopsInHeapRegionClosure *update_rset_cl = &deferred_update;
   1.235 +    if (!G1DeferredRSUpdate) {
   1.236 +      update_rset_cl = &immediate_update;
   1.237 +    }
   1.238 +
   1.239 +    RemoveSelfForwardPtrHRClosure rsfp_cl(_g1h, update_rset_cl, worker_id);
   1.240 +
   1.241 +    HeapRegion* hr = _g1h->start_cset_region_for_worker(worker_id);
   1.242 +    _g1h->collection_set_iterate_from(hr, &rsfp_cl);
   1.243 +  }
   1.244 +};
   1.245 +
   1.246 +#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1EVACFAILURE_HPP

mercurial