src/share/vm/gc_implementation/g1/g1EvacFailure.hpp

Fri, 10 Oct 2014 15:51:58 +0200

author
tschatzl
date
Fri, 10 Oct 2014 15:51:58 +0200
changeset 7257
e7d0505c8a30
parent 7218
6948da6d7c13
child 7535
7ae4e26cb1e0
permissions
-rw-r--r--

8059758: Footprint regressions with JDK-8038423
Summary: Changes in JDK-8038423 always initialize (zero out) virtual memory used for auxiliary data structures. This causes a footprint regression for G1 in startup benchmarks. This is because they do not touch that memory at all, so the operating system does not actually commit these pages. The fix is to, if the initialization value of the data structures matches the default value of just committed memory (=0), do not do anything.
Reviewed-by: jwilhelm, brutisso

johnc@3412 1 /*
mikael@6198 2 * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
johnc@3412 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
johnc@3412 4 *
johnc@3412 5 * This code is free software; you can redistribute it and/or modify it
johnc@3412 6 * under the terms of the GNU General Public License version 2 only, as
johnc@3412 7 * published by the Free Software Foundation.
johnc@3412 8 *
johnc@3412 9 * This code is distributed in the hope that it will be useful, but WITHOUT
johnc@3412 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
johnc@3412 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
johnc@3412 12 * version 2 for more details (a copy is included in the LICENSE file that
johnc@3412 13 * accompanied this code).
johnc@3412 14 *
johnc@3412 15 * You should have received a copy of the GNU General Public License version
johnc@3412 16 * 2 along with this work; if not, write to the Free Software Foundation,
johnc@3412 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
johnc@3412 18 *
johnc@3412 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
johnc@3412 20 * or visit www.oracle.com if you need additional information or have any
johnc@3412 21 * questions.
johnc@3412 22 *
johnc@3412 23 */
johnc@3412 24
johnc@3412 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1EVACFAILURE_HPP
johnc@3412 26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1EVACFAILURE_HPP
johnc@3412 27
johnc@3412 28 #include "gc_implementation/g1/concurrentMark.inline.hpp"
johnc@3412 29 #include "gc_implementation/g1/dirtyCardQueue.hpp"
johnc@3412 30 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
johnc@3412 31 #include "gc_implementation/g1/g1_globals.hpp"
johnc@3412 32 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
johnc@3412 33 #include "gc_implementation/g1/heapRegion.hpp"
johnc@3412 34 #include "gc_implementation/g1/heapRegionRemSet.hpp"
johnc@3412 35 #include "utilities/workgroup.hpp"
johnc@3412 36
johnc@3412 37 // Closures and tasks associated with any self-forwarding pointers
johnc@3412 38 // installed as a result of an evacuation failure.
johnc@3412 39
johnc@3412 40 class UpdateRSetDeferred : public OopsInHeapRegionClosure {
johnc@3412 41 private:
johnc@3412 42 G1CollectedHeap* _g1;
johnc@3412 43 DirtyCardQueue *_dcq;
mgerdin@5811 44 G1SATBCardTableModRefBS* _ct_bs;
johnc@3412 45
johnc@3412 46 public:
johnc@3412 47 UpdateRSetDeferred(G1CollectedHeap* g1, DirtyCardQueue* dcq) :
mgerdin@5811 48 _g1(g1), _ct_bs(_g1->g1_barrier_set()), _dcq(dcq) {}
johnc@3412 49
johnc@3412 50 virtual void do_oop(narrowOop* p) { do_oop_work(p); }
johnc@3412 51 virtual void do_oop( oop* p) { do_oop_work(p); }
johnc@3412 52 template <class T> void do_oop_work(T* p) {
johnc@3412 53 assert(_from->is_in_reserved(p), "paranoia");
johnc@3412 54 if (!_from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) &&
johnc@3412 55 !_from->is_survivor()) {
johnc@3412 56 size_t card_index = _ct_bs->index_for(p);
johnc@3412 57 if (_ct_bs->mark_card_deferred(card_index)) {
johnc@3412 58 _dcq->enqueue((jbyte*)_ct_bs->byte_for_index(card_index));
johnc@3412 59 }
johnc@3412 60 }
johnc@3412 61 }
johnc@3412 62 };
johnc@3412 63
johnc@3412 64 class RemoveSelfForwardPtrObjClosure: public ObjectClosure {
johnc@3412 65 private:
johnc@3412 66 G1CollectedHeap* _g1;
johnc@3412 67 ConcurrentMark* _cm;
johnc@3412 68 HeapRegion* _hr;
tonyp@3416 69 size_t _marked_bytes;
johnc@3412 70 OopsInHeapRegionClosure *_update_rset_cl;
tonyp@3416 71 bool _during_initial_mark;
tonyp@3416 72 bool _during_conc_mark;
johnc@3463 73 uint _worker_id;
stefank@6992 74 HeapWord* _end_of_last_gap;
stefank@6992 75 HeapWord* _last_gap_threshold;
stefank@6992 76 HeapWord* _last_obj_threshold;
johnc@3463 77
johnc@3412 78 public:
johnc@3412 79 RemoveSelfForwardPtrObjClosure(G1CollectedHeap* g1, ConcurrentMark* cm,
johnc@3412 80 HeapRegion* hr,
tonyp@3416 81 OopsInHeapRegionClosure* update_rset_cl,
tonyp@3416 82 bool during_initial_mark,
johnc@3463 83 bool during_conc_mark,
johnc@3463 84 uint worker_id) :
tonyp@3416 85 _g1(g1), _cm(cm), _hr(hr), _marked_bytes(0),
johnc@3412 86 _update_rset_cl(update_rset_cl),
tonyp@3416 87 _during_initial_mark(during_initial_mark),
johnc@3463 88 _during_conc_mark(during_conc_mark),
stefank@6992 89 _worker_id(worker_id),
stefank@6992 90 _end_of_last_gap(hr->bottom()),
stefank@6992 91 _last_gap_threshold(hr->bottom()),
stefank@6992 92 _last_obj_threshold(hr->bottom()) { }
johnc@3412 93
tonyp@3416 94 size_t marked_bytes() { return _marked_bytes; }
johnc@3412 95
johnc@3412 96 // <original comment>
johnc@3412 97 // The original idea here was to coalesce evacuated and dead objects.
johnc@3412 98 // However that caused complications with the block offset table (BOT).
johnc@3412 99 // In particular if there were two TLABs, one of them partially refined.
johnc@3412 100 // |----- TLAB_1--------|----TLAB_2-~~~(partially refined part)~~~|
johnc@3412 101 // The BOT entries of the unrefined part of TLAB_2 point to the start
johnc@3412 102 // of TLAB_2. If the last object of the TLAB_1 and the first object
johnc@3412 103 // of TLAB_2 are coalesced, then the cards of the unrefined part
johnc@3412 104 // would point into middle of the filler object.
johnc@3412 105 // The current approach is to not coalesce and leave the BOT contents intact.
johnc@3412 106 // </original comment>
johnc@3412 107 //
johnc@3412 108 // We now reset the BOT when we start the object iteration over the
johnc@3412 109 // region and refine its entries for every object we come across. So
johnc@3412 110 // the above comment is not really relevant and we should be able
johnc@3412 111 // to coalesce dead objects if we want to.
johnc@3412 112 void do_object(oop obj) {
johnc@3412 113 HeapWord* obj_addr = (HeapWord*) obj;
johnc@3412 114 assert(_hr->is_in(obj_addr), "sanity");
johnc@3412 115 size_t obj_size = obj->size();
stefank@6992 116 HeapWord* obj_end = obj_addr + obj_size;
stefank@6992 117
stefank@6992 118 if (_end_of_last_gap != obj_addr) {
stefank@6992 119 // there was a gap before obj_addr
stefank@6992 120 _last_gap_threshold = _hr->cross_threshold(_end_of_last_gap, obj_addr);
stefank@6992 121 }
johnc@3412 122
johnc@3412 123 if (obj->is_forwarded() && obj->forwardee() == obj) {
johnc@3412 124 // The object failed to move.
tonyp@3416 125
tonyp@3416 126 // We consider all objects that we find self-forwarded to be
tonyp@3416 127 // live. What we'll do is that we'll update the prev marking
tonyp@3416 128 // info so that they are all under PTAMS and explicitly marked.
stefank@6992 129 if (!_cm->isPrevMarked(obj)) {
stefank@6992 130 _cm->markPrev(obj);
stefank@6992 131 }
tonyp@3416 132 if (_during_initial_mark) {
tonyp@3416 133 // For the next marking info we'll only mark the
tonyp@3416 134 // self-forwarded objects explicitly if we are during
tonyp@3416 135 // initial-mark (since, normally, we only mark objects pointed
tonyp@3416 136 // to by roots if we succeed in copying them). By marking all
tonyp@3416 137 // self-forwarded objects we ensure that we mark any that are
tonyp@3416 138 // still pointed to be roots. During concurrent marking, and
tonyp@3416 139 // after initial-mark, we don't need to mark any objects
tonyp@3416 140 // explicitly and all objects in the CSet are considered
tonyp@3416 141 // (implicitly) live. So, we won't mark them explicitly and
tonyp@3416 142 // we'll leave them over NTAMS.
tonyp@3464 143 _cm->grayRoot(obj, obj_size, _worker_id, _hr);
johnc@3412 144 }
tonyp@3416 145 _marked_bytes += (obj_size * HeapWordSize);
johnc@3412 146 obj->set_mark(markOopDesc::prototype());
johnc@3412 147
johnc@3412 148 // While we were processing RSet buffers during the collection,
johnc@3412 149 // we actually didn't scan any cards on the collection set,
johnc@3412 150 // since we didn't want to update remembered sets with entries
johnc@3412 151 // that point into the collection set, given that live objects
johnc@3412 152 // from the collection set are about to move and such entries
johnc@3412 153 // will be stale very soon.
johnc@3412 154 // This change also dealt with a reliability issue which
johnc@3412 155 // involved scanning a card in the collection set and coming
johnc@3412 156 // across an array that was being chunked and looking malformed.
johnc@3412 157 // The problem is that, if evacuation fails, we might have
johnc@3412 158 // remembered set entries missing given that we skipped cards on
johnc@3412 159 // the collection set. So, we'll recreate such entries now.
johnc@3412 160 obj->oop_iterate(_update_rset_cl);
johnc@3412 161 } else {
stefank@6992 162
johnc@3412 163 // The object has been either evacuated or is dead. Fill it with a
johnc@3412 164 // dummy object.
stefank@6992 165 MemRegion mr(obj_addr, obj_size);
johnc@3412 166 CollectedHeap::fill_with_object(mr);
stefank@6992 167
stefank@6992 168 // must nuke all dead objects which we skipped when iterating over the region
stefank@6992 169 _cm->clearRangePrevBitmap(MemRegion(_end_of_last_gap, obj_end));
johnc@3412 170 }
stefank@6992 171 _end_of_last_gap = obj_end;
stefank@6992 172 _last_obj_threshold = _hr->cross_threshold(obj_addr, obj_end);
johnc@3412 173 }
johnc@3412 174 };
johnc@3412 175
johnc@3412 176 class RemoveSelfForwardPtrHRClosure: public HeapRegionClosure {
johnc@3412 177 G1CollectedHeap* _g1h;
johnc@3412 178 ConcurrentMark* _cm;
johnc@3463 179 uint _worker_id;
johnc@3412 180
tschatzl@7218 181 DirtyCardQueue _dcq;
tschatzl@7218 182 UpdateRSetDeferred _update_rset_cl;
tschatzl@7218 183
johnc@3412 184 public:
johnc@3412 185 RemoveSelfForwardPtrHRClosure(G1CollectedHeap* g1h,
johnc@3463 186 uint worker_id) :
tschatzl@7218 187 _g1h(g1h), _dcq(&g1h->dirty_card_queue_set()), _update_rset_cl(g1h, &_dcq),
tschatzl@7218 188 _worker_id(worker_id), _cm(_g1h->concurrent_mark()) {
tschatzl@7218 189 }
johnc@3412 190
johnc@3412 191 bool doHeapRegion(HeapRegion *hr) {
tonyp@3416 192 bool during_initial_mark = _g1h->g1_policy()->during_initial_mark_pause();
tonyp@3416 193 bool during_conc_mark = _g1h->mark_in_progress();
tonyp@3416 194
johnc@3412 195 assert(!hr->isHumongous(), "sanity");
johnc@3412 196 assert(hr->in_collection_set(), "bad CS");
johnc@3412 197
johnc@3412 198 if (hr->claimHeapRegion(HeapRegion::ParEvacFailureClaimValue)) {
johnc@3412 199 if (hr->evacuation_failed()) {
tschatzl@7218 200 RemoveSelfForwardPtrObjClosure rspc(_g1h, _cm, hr, &_update_rset_cl,
tonyp@3416 201 during_initial_mark,
johnc@3463 202 during_conc_mark,
johnc@3463 203 _worker_id);
tonyp@3416 204
tonyp@3416 205 hr->note_self_forwarding_removal_start(during_initial_mark,
tonyp@3416 206 during_conc_mark);
brutisso@7005 207 _g1h->check_bitmaps("Self-Forwarding Ptr Removal", hr);
johnc@3412 208
johnc@3412 209 // In the common case (i.e. when there is no evacuation
johnc@3412 210 // failure) we make sure that the following is done when
johnc@3412 211 // the region is freed so that it is "ready-to-go" when it's
johnc@3412 212 // re-allocated. However, when evacuation failure happens, a
johnc@3412 213 // region will remain in the heap and might ultimately be added
johnc@3412 214 // to a CSet in the future. So we have to be careful here and
johnc@3412 215 // make sure the region's RSet is ready for parallel iteration
johnc@3412 216 // whenever this might be required in the future.
johnc@3412 217 hr->rem_set()->reset_for_par_iteration();
johnc@3412 218 hr->reset_bot();
tschatzl@7218 219 _update_rset_cl.set_region(hr);
johnc@3412 220 hr->object_iterate(&rspc);
johnc@3412 221
mgerdin@7208 222 hr->rem_set()->clean_strong_code_roots(hr);
mgerdin@7208 223
tonyp@3416 224 hr->note_self_forwarding_removal_end(during_initial_mark,
tonyp@3416 225 during_conc_mark,
tonyp@3416 226 rspc.marked_bytes());
johnc@3412 227 }
johnc@3412 228 }
johnc@3412 229 return false;
johnc@3412 230 }
johnc@3412 231 };
johnc@3412 232
johnc@3412 233 class G1ParRemoveSelfForwardPtrsTask: public AbstractGangTask {
johnc@3412 234 protected:
johnc@3412 235 G1CollectedHeap* _g1h;
johnc@3412 236
johnc@3412 237 public:
johnc@3412 238 G1ParRemoveSelfForwardPtrsTask(G1CollectedHeap* g1h) :
johnc@3412 239 AbstractGangTask("G1 Remove Self-forwarding Pointers"),
johnc@3412 240 _g1h(g1h) { }
johnc@3412 241
johnc@3412 242 void work(uint worker_id) {
tschatzl@7218 243 RemoveSelfForwardPtrHRClosure rsfp_cl(_g1h, worker_id);
johnc@3412 244
johnc@3412 245 HeapRegion* hr = _g1h->start_cset_region_for_worker(worker_id);
johnc@3412 246 _g1h->collection_set_iterate_from(hr, &rsfp_cl);
johnc@3412 247 }
johnc@3412 248 };
johnc@3412 249
johnc@3412 250 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1EVACFAILURE_HPP

mercurial