src/share/vm/gc_implementation/g1/g1EvacFailure.hpp

Wed, 25 Jan 2012 12:58:23 -0500

author
tonyp
date
Wed, 25 Jan 2012 12:58:23 -0500
changeset 3464
eff609af17d7
parent 3463
d30fa85f9994
child 5811
d55c004e1d4d
permissions
-rw-r--r--

7127706: G1: re-enable survivors during the initial-mark pause
Summary: Re-enable survivors during the initial-mark pause. Afterwards, the concurrent marking threads have to scan them and mark everything reachable from them. The next GC will have to wait for the survivors to be scanned.
Reviewed-by: brutisso, johnc

johnc@3412 1 /*
johnc@3412 2 * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
johnc@3412 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
johnc@3412 4 *
johnc@3412 5 * This code is free software; you can redistribute it and/or modify it
johnc@3412 6 * under the terms of the GNU General Public License version 2 only, as
johnc@3412 7 * published by the Free Software Foundation.
johnc@3412 8 *
johnc@3412 9 * This code is distributed in the hope that it will be useful, but WITHOUT
johnc@3412 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
johnc@3412 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
johnc@3412 12 * version 2 for more details (a copy is included in the LICENSE file that
johnc@3412 13 * accompanied this code).
johnc@3412 14 *
johnc@3412 15 * You should have received a copy of the GNU General Public License version
johnc@3412 16 * 2 along with this work; if not, write to the Free Software Foundation,
johnc@3412 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
johnc@3412 18 *
johnc@3412 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
johnc@3412 20 * or visit www.oracle.com if you need additional information or have any
johnc@3412 21 * questions.
johnc@3412 22 *
johnc@3412 23 */
johnc@3412 24
johnc@3412 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1EVACFAILURE_HPP
johnc@3412 26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1EVACFAILURE_HPP
johnc@3412 27
johnc@3412 28 #include "gc_implementation/g1/concurrentMark.inline.hpp"
johnc@3412 29 #include "gc_implementation/g1/dirtyCardQueue.hpp"
johnc@3412 30 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
johnc@3412 31 #include "gc_implementation/g1/g1_globals.hpp"
johnc@3412 32 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
johnc@3412 33 #include "gc_implementation/g1/heapRegion.hpp"
johnc@3412 34 #include "gc_implementation/g1/heapRegionRemSet.hpp"
johnc@3412 35 #include "utilities/workgroup.hpp"
johnc@3412 36
johnc@3412 37 // Closures and tasks associated with any self-forwarding pointers
johnc@3412 38 // installed as a result of an evacuation failure.
johnc@3412 39
johnc@3412 40 class UpdateRSetDeferred : public OopsInHeapRegionClosure {
johnc@3412 41 private:
johnc@3412 42 G1CollectedHeap* _g1;
johnc@3412 43 DirtyCardQueue *_dcq;
johnc@3412 44 CardTableModRefBS* _ct_bs;
johnc@3412 45
johnc@3412 46 public:
johnc@3412 47 UpdateRSetDeferred(G1CollectedHeap* g1, DirtyCardQueue* dcq) :
johnc@3412 48 _g1(g1), _ct_bs((CardTableModRefBS*)_g1->barrier_set()), _dcq(dcq) {}
johnc@3412 49
johnc@3412 50 virtual void do_oop(narrowOop* p) { do_oop_work(p); }
johnc@3412 51 virtual void do_oop( oop* p) { do_oop_work(p); }
johnc@3412 52 template <class T> void do_oop_work(T* p) {
johnc@3412 53 assert(_from->is_in_reserved(p), "paranoia");
johnc@3412 54 if (!_from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) &&
johnc@3412 55 !_from->is_survivor()) {
johnc@3412 56 size_t card_index = _ct_bs->index_for(p);
johnc@3412 57 if (_ct_bs->mark_card_deferred(card_index)) {
johnc@3412 58 _dcq->enqueue((jbyte*)_ct_bs->byte_for_index(card_index));
johnc@3412 59 }
johnc@3412 60 }
johnc@3412 61 }
johnc@3412 62 };
johnc@3412 63
johnc@3412 64 class RemoveSelfForwardPtrObjClosure: public ObjectClosure {
johnc@3412 65 private:
johnc@3412 66 G1CollectedHeap* _g1;
johnc@3412 67 ConcurrentMark* _cm;
johnc@3412 68 HeapRegion* _hr;
tonyp@3416 69 size_t _marked_bytes;
johnc@3412 70 OopsInHeapRegionClosure *_update_rset_cl;
tonyp@3416 71 bool _during_initial_mark;
tonyp@3416 72 bool _during_conc_mark;
johnc@3463 73 uint _worker_id;
johnc@3463 74
johnc@3412 75 public:
johnc@3412 76 RemoveSelfForwardPtrObjClosure(G1CollectedHeap* g1, ConcurrentMark* cm,
johnc@3412 77 HeapRegion* hr,
tonyp@3416 78 OopsInHeapRegionClosure* update_rset_cl,
tonyp@3416 79 bool during_initial_mark,
johnc@3463 80 bool during_conc_mark,
johnc@3463 81 uint worker_id) :
tonyp@3416 82 _g1(g1), _cm(cm), _hr(hr), _marked_bytes(0),
johnc@3412 83 _update_rset_cl(update_rset_cl),
tonyp@3416 84 _during_initial_mark(during_initial_mark),
johnc@3463 85 _during_conc_mark(during_conc_mark),
johnc@3463 86 _worker_id(worker_id) { }
johnc@3412 87
tonyp@3416 88 size_t marked_bytes() { return _marked_bytes; }
johnc@3412 89
johnc@3412 90 // <original comment>
johnc@3412 91 // The original idea here was to coalesce evacuated and dead objects.
johnc@3412 92 // However that caused complications with the block offset table (BOT).
johnc@3412 93 // In particular if there were two TLABs, one of them partially refined.
johnc@3412 94 // |----- TLAB_1--------|----TLAB_2-~~~(partially refined part)~~~|
johnc@3412 95 // The BOT entries of the unrefined part of TLAB_2 point to the start
johnc@3412 96 // of TLAB_2. If the last object of the TLAB_1 and the first object
johnc@3412 97 // of TLAB_2 are coalesced, then the cards of the unrefined part
johnc@3412 98 // would point into middle of the filler object.
johnc@3412 99 // The current approach is to not coalesce and leave the BOT contents intact.
johnc@3412 100 // </original comment>
johnc@3412 101 //
johnc@3412 102 // We now reset the BOT when we start the object iteration over the
johnc@3412 103 // region and refine its entries for every object we come across. So
johnc@3412 104 // the above comment is not really relevant and we should be able
johnc@3412 105 // to coalesce dead objects if we want to.
johnc@3412 106 void do_object(oop obj) {
johnc@3412 107 HeapWord* obj_addr = (HeapWord*) obj;
johnc@3412 108 assert(_hr->is_in(obj_addr), "sanity");
johnc@3412 109 size_t obj_size = obj->size();
johnc@3412 110 _hr->update_bot_for_object(obj_addr, obj_size);
johnc@3412 111
johnc@3412 112 if (obj->is_forwarded() && obj->forwardee() == obj) {
johnc@3412 113 // The object failed to move.
tonyp@3416 114
tonyp@3416 115 // We consider all objects that we find self-forwarded to be
tonyp@3416 116 // live. What we'll do is that we'll update the prev marking
tonyp@3416 117 // info so that they are all under PTAMS and explicitly marked.
johnc@3412 118 _cm->markPrev(obj);
tonyp@3416 119 if (_during_initial_mark) {
tonyp@3416 120 // For the next marking info we'll only mark the
tonyp@3416 121 // self-forwarded objects explicitly if we are during
tonyp@3416 122 // initial-mark (since, normally, we only mark objects pointed
tonyp@3416 123 // to by roots if we succeed in copying them). By marking all
tonyp@3416 124 // self-forwarded objects we ensure that we mark any that are
tonyp@3416 125 // still pointed to be roots. During concurrent marking, and
tonyp@3416 126 // after initial-mark, we don't need to mark any objects
tonyp@3416 127 // explicitly and all objects in the CSet are considered
tonyp@3416 128 // (implicitly) live. So, we won't mark them explicitly and
tonyp@3416 129 // we'll leave them over NTAMS.
tonyp@3464 130 _cm->grayRoot(obj, obj_size, _worker_id, _hr);
johnc@3412 131 }
tonyp@3416 132 _marked_bytes += (obj_size * HeapWordSize);
johnc@3412 133 obj->set_mark(markOopDesc::prototype());
johnc@3412 134
johnc@3412 135 // While we were processing RSet buffers during the collection,
johnc@3412 136 // we actually didn't scan any cards on the collection set,
johnc@3412 137 // since we didn't want to update remembered sets with entries
johnc@3412 138 // that point into the collection set, given that live objects
johnc@3412 139 // from the collection set are about to move and such entries
johnc@3412 140 // will be stale very soon.
johnc@3412 141 // This change also dealt with a reliability issue which
johnc@3412 142 // involved scanning a card in the collection set and coming
johnc@3412 143 // across an array that was being chunked and looking malformed.
johnc@3412 144 // The problem is that, if evacuation fails, we might have
johnc@3412 145 // remembered set entries missing given that we skipped cards on
johnc@3412 146 // the collection set. So, we'll recreate such entries now.
johnc@3412 147 obj->oop_iterate(_update_rset_cl);
johnc@3412 148 assert(_cm->isPrevMarked(obj), "Should be marked!");
johnc@3412 149 } else {
johnc@3412 150 // The object has been either evacuated or is dead. Fill it with a
johnc@3412 151 // dummy object.
tonyp@3416 152 MemRegion mr((HeapWord*) obj, obj_size);
johnc@3412 153 CollectedHeap::fill_with_object(mr);
johnc@3412 154 }
johnc@3412 155 }
johnc@3412 156 };
johnc@3412 157
johnc@3412 158 class RemoveSelfForwardPtrHRClosure: public HeapRegionClosure {
johnc@3412 159 G1CollectedHeap* _g1h;
johnc@3412 160 ConcurrentMark* _cm;
johnc@3412 161 OopsInHeapRegionClosure *_update_rset_cl;
johnc@3463 162 uint _worker_id;
johnc@3412 163
johnc@3412 164 public:
johnc@3412 165 RemoveSelfForwardPtrHRClosure(G1CollectedHeap* g1h,
johnc@3463 166 OopsInHeapRegionClosure* update_rset_cl,
johnc@3463 167 uint worker_id) :
johnc@3412 168 _g1h(g1h), _update_rset_cl(update_rset_cl),
johnc@3463 169 _worker_id(worker_id), _cm(_g1h->concurrent_mark()) { }
johnc@3412 170
johnc@3412 171 bool doHeapRegion(HeapRegion *hr) {
tonyp@3416 172 bool during_initial_mark = _g1h->g1_policy()->during_initial_mark_pause();
tonyp@3416 173 bool during_conc_mark = _g1h->mark_in_progress();
tonyp@3416 174
johnc@3412 175 assert(!hr->isHumongous(), "sanity");
johnc@3412 176 assert(hr->in_collection_set(), "bad CS");
johnc@3412 177
johnc@3412 178 if (hr->claimHeapRegion(HeapRegion::ParEvacFailureClaimValue)) {
johnc@3412 179 if (hr->evacuation_failed()) {
tonyp@3416 180 RemoveSelfForwardPtrObjClosure rspc(_g1h, _cm, hr, _update_rset_cl,
tonyp@3416 181 during_initial_mark,
johnc@3463 182 during_conc_mark,
johnc@3463 183 _worker_id);
tonyp@3416 184
tonyp@3416 185 MemRegion mr(hr->bottom(), hr->end());
tonyp@3416 186 // We'll recreate the prev marking info so we'll first clear
tonyp@3416 187 // the prev bitmap range for this region. We never mark any
tonyp@3416 188 // CSet objects explicitly so the next bitmap range should be
tonyp@3416 189 // cleared anyway.
tonyp@3416 190 _cm->clearRangePrevBitmap(mr);
tonyp@3416 191
tonyp@3416 192 hr->note_self_forwarding_removal_start(during_initial_mark,
tonyp@3416 193 during_conc_mark);
johnc@3412 194
johnc@3412 195 // In the common case (i.e. when there is no evacuation
johnc@3412 196 // failure) we make sure that the following is done when
johnc@3412 197 // the region is freed so that it is "ready-to-go" when it's
johnc@3412 198 // re-allocated. However, when evacuation failure happens, a
johnc@3412 199 // region will remain in the heap and might ultimately be added
johnc@3412 200 // to a CSet in the future. So we have to be careful here and
johnc@3412 201 // make sure the region's RSet is ready for parallel iteration
johnc@3412 202 // whenever this might be required in the future.
johnc@3412 203 hr->rem_set()->reset_for_par_iteration();
johnc@3412 204 hr->reset_bot();
johnc@3412 205 _update_rset_cl->set_region(hr);
johnc@3412 206 hr->object_iterate(&rspc);
johnc@3412 207
tonyp@3416 208 hr->note_self_forwarding_removal_end(during_initial_mark,
tonyp@3416 209 during_conc_mark,
tonyp@3416 210 rspc.marked_bytes());
johnc@3412 211 }
johnc@3412 212 }
johnc@3412 213 return false;
johnc@3412 214 }
johnc@3412 215 };
johnc@3412 216
johnc@3412 217 class G1ParRemoveSelfForwardPtrsTask: public AbstractGangTask {
johnc@3412 218 protected:
johnc@3412 219 G1CollectedHeap* _g1h;
johnc@3412 220
johnc@3412 221 public:
johnc@3412 222 G1ParRemoveSelfForwardPtrsTask(G1CollectedHeap* g1h) :
johnc@3412 223 AbstractGangTask("G1 Remove Self-forwarding Pointers"),
johnc@3412 224 _g1h(g1h) { }
johnc@3412 225
johnc@3412 226 void work(uint worker_id) {
johnc@3412 227 UpdateRSetImmediate immediate_update(_g1h->g1_rem_set());
johnc@3412 228 DirtyCardQueue dcq(&_g1h->dirty_card_queue_set());
johnc@3412 229 UpdateRSetDeferred deferred_update(_g1h, &dcq);
johnc@3412 230
johnc@3412 231 OopsInHeapRegionClosure *update_rset_cl = &deferred_update;
johnc@3412 232 if (!G1DeferredRSUpdate) {
johnc@3412 233 update_rset_cl = &immediate_update;
johnc@3412 234 }
johnc@3412 235
johnc@3463 236 RemoveSelfForwardPtrHRClosure rsfp_cl(_g1h, update_rset_cl, worker_id);
johnc@3412 237
johnc@3412 238 HeapRegion* hr = _g1h->start_cset_region_for_worker(worker_id);
johnc@3412 239 _g1h->collection_set_iterate_from(hr, &rsfp_cl);
johnc@3412 240 }
johnc@3412 241 };
johnc@3412 242
johnc@3412 243 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1EVACFAILURE_HPP

mercurial