src/share/vm/gc_implementation/g1/g1RemSet.cpp

Tue, 19 Jan 2016 18:16:40 +0000

author
dbuck
date
Tue, 19 Jan 2016 18:16:40 +0000
changeset 8280
f3f2f71d2dc8
parent 7990
1f646daf0d67
child 8604
04d83ba48607
child 9327
f96fcd9e1e1b
permissions
-rw-r--r--

8139424: SIGSEGV, Problematic frame: # V [libjvm.so+0xd0c0cc] void InstanceKlass::oop_oop_iterate_oop_maps_specialized<true,oopDesc*,MarkAndPushClosure>
Summary: The crash was caused by a faulty eager humongous reclaim. The reason for reclaiming a live object was that the call to cleanupHRRS was done after dirtying cards and clearing the remembered sets for the humongous object. This could lead to one or many cards being missed.
Reviewed-by: tbenson, kbarrett, tschatzl

     1 /*
     2  * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "gc_implementation/g1/concurrentG1Refine.hpp"
    27 #include "gc_implementation/g1/concurrentG1RefineThread.hpp"
    28 #include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp"
    29 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
    30 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
    31 #include "gc_implementation/g1/g1HotCardCache.hpp"
    32 #include "gc_implementation/g1/g1GCPhaseTimes.hpp"
    33 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
    34 #include "gc_implementation/g1/g1RemSet.inline.hpp"
    35 #include "gc_implementation/g1/heapRegionManager.inline.hpp"
    36 #include "gc_implementation/g1/heapRegionRemSet.hpp"
    37 #include "memory/iterator.hpp"
    38 #include "oops/oop.inline.hpp"
    39 #include "utilities/intHisto.hpp"
    41 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
    43 #define CARD_REPEAT_HISTO 0
    45 #if CARD_REPEAT_HISTO
    46 static size_t ct_freq_sz;
    47 static jbyte* ct_freq = NULL;
    49 void init_ct_freq_table(size_t heap_sz_bytes) {
    50   if (ct_freq == NULL) {
    51     ct_freq_sz = heap_sz_bytes/CardTableModRefBS::card_size;
    52     ct_freq = new jbyte[ct_freq_sz];
    53     for (size_t j = 0; j < ct_freq_sz; j++) ct_freq[j] = 0;
    54   }
    55 }
    57 void ct_freq_note_card(size_t index) {
    58   assert(0 <= index && index < ct_freq_sz, "Bounds error.");
    59   if (ct_freq[index] < 100) { ct_freq[index]++; }
    60 }
    62 static IntHistogram card_repeat_count(10, 10);
    64 void ct_freq_update_histo_and_reset() {
    65   for (size_t j = 0; j < ct_freq_sz; j++) {
    66     card_repeat_count.add_entry(ct_freq[j]);
    67     ct_freq[j] = 0;
    68   }
    70 }
    71 #endif
    73 G1RemSet::G1RemSet(G1CollectedHeap* g1, CardTableModRefBS* ct_bs)
    74   : _g1(g1), _conc_refine_cards(0),
    75     _ct_bs(ct_bs), _g1p(_g1->g1_policy()),
    76     _cg1r(g1->concurrent_g1_refine()),
    77     _cset_rs_update_cl(NULL),
    78     _cards_scanned(NULL), _total_cards_scanned(0),
    79     _prev_period_summary()
    80 {
    81   guarantee(n_workers() > 0, "There should be some workers");
    82   _cset_rs_update_cl = NEW_C_HEAP_ARRAY(G1ParPushHeapRSClosure*, n_workers(), mtGC);
    83   for (uint i = 0; i < n_workers(); i++) {
    84     _cset_rs_update_cl[i] = NULL;
    85   }
    86   if (G1SummarizeRSetStats) {
    87     _prev_period_summary.initialize(this);
    88   }
    89 }
    91 G1RemSet::~G1RemSet() {
    92   for (uint i = 0; i < n_workers(); i++) {
    93     assert(_cset_rs_update_cl[i] == NULL, "it should be");
    94   }
    95   FREE_C_HEAP_ARRAY(G1ParPushHeapRSClosure*, _cset_rs_update_cl, mtGC);
    96 }
    98 void CountNonCleanMemRegionClosure::do_MemRegion(MemRegion mr) {
    99   if (_g1->is_in_g1_reserved(mr.start())) {
   100     _n += (int) ((mr.byte_size() / CardTableModRefBS::card_size));
   101     if (_start_first == NULL) _start_first = mr.start();
   102   }
   103 }
   105 class ScanRSClosure : public HeapRegionClosure {
   106   size_t _cards_done, _cards;
   107   G1CollectedHeap* _g1h;
   109   G1ParPushHeapRSClosure* _oc;
   110   CodeBlobClosure* _code_root_cl;
   112   G1BlockOffsetSharedArray* _bot_shared;
   113   G1SATBCardTableModRefBS *_ct_bs;
   115   double _strong_code_root_scan_time_sec;
   116   uint   _worker_i;
   117   int    _block_size;
   118   bool   _try_claimed;
   120 public:
   121   ScanRSClosure(G1ParPushHeapRSClosure* oc,
   122                 CodeBlobClosure* code_root_cl,
   123                 uint worker_i) :
   124     _oc(oc),
   125     _code_root_cl(code_root_cl),
   126     _strong_code_root_scan_time_sec(0.0),
   127     _cards(0),
   128     _cards_done(0),
   129     _worker_i(worker_i),
   130     _try_claimed(false)
   131   {
   132     _g1h = G1CollectedHeap::heap();
   133     _bot_shared = _g1h->bot_shared();
   134     _ct_bs = _g1h->g1_barrier_set();
   135     _block_size = MAX2<int>(G1RSetScanBlockSize, 1);
   136   }
   138   void set_try_claimed() { _try_claimed = true; }
   140   void scanCard(size_t index, HeapRegion *r) {
   141     // Stack allocate the DirtyCardToOopClosure instance
   142     HeapRegionDCTOC cl(_g1h, r, _oc,
   143                        CardTableModRefBS::Precise);
   145     // Set the "from" region in the closure.
   146     _oc->set_region(r);
   147     MemRegion card_region(_bot_shared->address_for_index(index), G1BlockOffsetSharedArray::N_words);
   148     MemRegion pre_gc_allocated(r->bottom(), r->scan_top());
   149     MemRegion mr = pre_gc_allocated.intersection(card_region);
   150     if (!mr.is_empty() && !_ct_bs->is_card_claimed(index)) {
   151       // We make the card as "claimed" lazily (so races are possible
   152       // but they're benign), which reduces the number of duplicate
   153       // scans (the rsets of the regions in the cset can intersect).
   154       _ct_bs->set_card_claimed(index);
   155       _cards_done++;
   156       cl.do_MemRegion(mr);
   157     }
   158   }
   160   void printCard(HeapRegion* card_region, size_t card_index,
   161                  HeapWord* card_start) {
   162     gclog_or_tty->print_cr("T " UINT32_FORMAT " Region [" PTR_FORMAT ", " PTR_FORMAT ") "
   163                            "RS names card %p: "
   164                            "[" PTR_FORMAT ", " PTR_FORMAT ")",
   165                            _worker_i,
   166                            card_region->bottom(), card_region->end(),
   167                            card_index,
   168                            card_start, card_start + G1BlockOffsetSharedArray::N_words);
   169   }
   171   void scan_strong_code_roots(HeapRegion* r) {
   172     double scan_start = os::elapsedTime();
   173     r->strong_code_roots_do(_code_root_cl);
   174     _strong_code_root_scan_time_sec += (os::elapsedTime() - scan_start);
   175   }
   177   bool doHeapRegion(HeapRegion* r) {
   178     assert(r->in_collection_set(), "should only be called on elements of CS.");
   179     HeapRegionRemSet* hrrs = r->rem_set();
   180     if (hrrs->iter_is_complete()) return false; // All done.
   181     if (!_try_claimed && !hrrs->claim_iter()) return false;
   182     // If we ever free the collection set concurrently, we should also
   183     // clear the card table concurrently therefore we won't need to
   184     // add regions of the collection set to the dirty cards region.
   185     _g1h->push_dirty_cards_region(r);
   186     // If we didn't return above, then
   187     //   _try_claimed || r->claim_iter()
   188     // is true: either we're supposed to work on claimed-but-not-complete
   189     // regions, or we successfully claimed the region.
   191     HeapRegionRemSetIterator iter(hrrs);
   192     size_t card_index;
   194     // We claim cards in block so as to recude the contention. The block size is determined by
   195     // the G1RSetScanBlockSize parameter.
   196     size_t jump_to_card = hrrs->iter_claimed_next(_block_size);
   197     for (size_t current_card = 0; iter.has_next(card_index); current_card++) {
   198       if (current_card >= jump_to_card + _block_size) {
   199         jump_to_card = hrrs->iter_claimed_next(_block_size);
   200       }
   201       if (current_card < jump_to_card) continue;
   202       HeapWord* card_start = _g1h->bot_shared()->address_for_index(card_index);
   203 #if 0
   204       gclog_or_tty->print("Rem set iteration yielded card [" PTR_FORMAT ", " PTR_FORMAT ").\n",
   205                           card_start, card_start + CardTableModRefBS::card_size_in_words);
   206 #endif
   208       HeapRegion* card_region = _g1h->heap_region_containing(card_start);
   209       _cards++;
   211       if (!card_region->is_on_dirty_cards_region_list()) {
   212         _g1h->push_dirty_cards_region(card_region);
   213       }
   215       // If the card is dirty, then we will scan it during updateRS.
   216       if (!card_region->in_collection_set() &&
   217           !_ct_bs->is_card_dirty(card_index)) {
   218         scanCard(card_index, card_region);
   219       }
   220     }
   221     if (!_try_claimed) {
   222       // Scan the strong code root list attached to the current region
   223       scan_strong_code_roots(r);
   225       hrrs->set_iter_complete();
   226     }
   227     return false;
   228   }
   230   double strong_code_root_scan_time_sec() {
   231     return _strong_code_root_scan_time_sec;
   232   }
   234   size_t cards_done() { return _cards_done;}
   235   size_t cards_looked_up() { return _cards;}
   236 };
   238 void G1RemSet::scanRS(G1ParPushHeapRSClosure* oc,
   239                       CodeBlobClosure* code_root_cl,
   240                       uint worker_i) {
   241   double rs_time_start = os::elapsedTime();
   242   HeapRegion *startRegion = _g1->start_cset_region_for_worker(worker_i);
   244   ScanRSClosure scanRScl(oc, code_root_cl, worker_i);
   246   _g1->collection_set_iterate_from(startRegion, &scanRScl);
   247   scanRScl.set_try_claimed();
   248   _g1->collection_set_iterate_from(startRegion, &scanRScl);
   250   double scan_rs_time_sec = (os::elapsedTime() - rs_time_start)
   251                             - scanRScl.strong_code_root_scan_time_sec();
   253   assert(_cards_scanned != NULL, "invariant");
   254   _cards_scanned[worker_i] = scanRScl.cards_done();
   256   _g1p->phase_times()->record_time_secs(G1GCPhaseTimes::ScanRS, worker_i, scan_rs_time_sec);
   257   _g1p->phase_times()->record_time_secs(G1GCPhaseTimes::CodeRoots, worker_i, scanRScl.strong_code_root_scan_time_sec());
   258 }
   260 // Closure used for updating RSets and recording references that
   261 // point into the collection set. Only called during an
   262 // evacuation pause.
   264 class RefineRecordRefsIntoCSCardTableEntryClosure: public CardTableEntryClosure {
   265   G1RemSet* _g1rs;
   266   DirtyCardQueue* _into_cset_dcq;
   267 public:
   268   RefineRecordRefsIntoCSCardTableEntryClosure(G1CollectedHeap* g1h,
   269                                               DirtyCardQueue* into_cset_dcq) :
   270     _g1rs(g1h->g1_rem_set()), _into_cset_dcq(into_cset_dcq)
   271   {}
   272   bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
   273     // The only time we care about recording cards that
   274     // contain references that point into the collection set
   275     // is during RSet updating within an evacuation pause.
   276     // In this case worker_i should be the id of a GC worker thread.
   277     assert(SafepointSynchronize::is_at_safepoint(), "not during an evacuation pause");
   278     assert(worker_i < (ParallelGCThreads == 0 ? 1 : ParallelGCThreads), "should be a GC worker");
   280     if (_g1rs->refine_card(card_ptr, worker_i, true)) {
   281       // 'card_ptr' contains references that point into the collection
   282       // set. We need to record the card in the DCQS
   283       // (G1CollectedHeap::into_cset_dirty_card_queue_set())
   284       // that's used for that purpose.
   285       //
   286       // Enqueue the card
   287       _into_cset_dcq->enqueue(card_ptr);
   288     }
   289     return true;
   290   }
   291 };
   293 void G1RemSet::updateRS(DirtyCardQueue* into_cset_dcq, uint worker_i) {
   294   G1GCParPhaseTimesTracker x(_g1p->phase_times(), G1GCPhaseTimes::UpdateRS, worker_i);
   295   // Apply the given closure to all remaining log entries.
   296   RefineRecordRefsIntoCSCardTableEntryClosure into_cset_update_rs_cl(_g1, into_cset_dcq);
   298   _g1->iterate_dirty_card_closure(&into_cset_update_rs_cl, into_cset_dcq, false, worker_i);
   299 }
   301 void G1RemSet::cleanupHRRS() {
   302   HeapRegionRemSet::cleanup();
   303 }
   305 void G1RemSet::oops_into_collection_set_do(G1ParPushHeapRSClosure* oc,
   306                                            CodeBlobClosure* code_root_cl,
   307                                            uint worker_i) {
   308 #if CARD_REPEAT_HISTO
   309   ct_freq_update_histo_and_reset();
   310 #endif
   312   // We cache the value of 'oc' closure into the appropriate slot in the
   313   // _cset_rs_update_cl for this worker
   314   assert(worker_i < n_workers(), "sanity");
   315   _cset_rs_update_cl[worker_i] = oc;
   317   // A DirtyCardQueue that is used to hold cards containing references
   318   // that point into the collection set. This DCQ is associated with a
   319   // special DirtyCardQueueSet (see g1CollectedHeap.hpp).  Under normal
   320   // circumstances (i.e. the pause successfully completes), these cards
   321   // are just discarded (there's no need to update the RSets of regions
   322   // that were in the collection set - after the pause these regions
   323   // are wholly 'free' of live objects. In the event of an evacuation
   324   // failure the cards/buffers in this queue set are passed to the
   325   // DirtyCardQueueSet that is used to manage RSet updates
   326   DirtyCardQueue into_cset_dcq(&_g1->into_cset_dirty_card_queue_set());
   328   assert((ParallelGCThreads > 0) || worker_i == 0, "invariant");
   330   updateRS(&into_cset_dcq, worker_i);
   331   scanRS(oc, code_root_cl, worker_i);
   333   // We now clear the cached values of _cset_rs_update_cl for this worker
   334   _cset_rs_update_cl[worker_i] = NULL;
   335 }
   337 void G1RemSet::prepare_for_oops_into_collection_set_do() {
   338   _g1->set_refine_cte_cl_concurrency(false);
   339   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
   340   dcqs.concatenate_logs();
   342   guarantee( _cards_scanned == NULL, "invariant" );
   343   _cards_scanned = NEW_C_HEAP_ARRAY(size_t, n_workers(), mtGC);
   344   for (uint i = 0; i < n_workers(); ++i) {
   345     _cards_scanned[i] = 0;
   346   }
   347   _total_cards_scanned = 0;
   348 }
   350 void G1RemSet::cleanup_after_oops_into_collection_set_do() {
   351   guarantee( _cards_scanned != NULL, "invariant" );
   352   _total_cards_scanned = 0;
   353   for (uint i = 0; i < n_workers(); ++i) {
   354     _total_cards_scanned += _cards_scanned[i];
   355   }
   356   FREE_C_HEAP_ARRAY(size_t, _cards_scanned, mtGC);
   357   _cards_scanned = NULL;
   358   // Cleanup after copy
   359   _g1->set_refine_cte_cl_concurrency(true);
   360   // Set all cards back to clean.
   361   _g1->cleanUpCardTable();
   363   DirtyCardQueueSet& into_cset_dcqs = _g1->into_cset_dirty_card_queue_set();
   364   int into_cset_n_buffers = into_cset_dcqs.completed_buffers_num();
   366   if (_g1->evacuation_failed()) {
   367     double restore_remembered_set_start = os::elapsedTime();
   369     // Restore remembered sets for the regions pointing into the collection set.
   370     // We just need to transfer the completed buffers from the DirtyCardQueueSet
   371     // used to hold cards that contain references that point into the collection set
   372     // to the DCQS used to hold the deferred RS updates.
   373     _g1->dirty_card_queue_set().merge_bufferlists(&into_cset_dcqs);
   374     _g1->g1_policy()->phase_times()->record_evac_fail_restore_remsets((os::elapsedTime() - restore_remembered_set_start) * 1000.0);
   375   }
   377   // Free any completed buffers in the DirtyCardQueueSet used to hold cards
   378   // which contain references that point into the collection.
   379   _g1->into_cset_dirty_card_queue_set().clear();
   380   assert(_g1->into_cset_dirty_card_queue_set().completed_buffers_num() == 0,
   381          "all buffers should be freed");
   382   _g1->into_cset_dirty_card_queue_set().clear_n_completed_buffers();
   383 }
   385 class ScrubRSClosure: public HeapRegionClosure {
   386   G1CollectedHeap* _g1h;
   387   BitMap* _region_bm;
   388   BitMap* _card_bm;
   389   CardTableModRefBS* _ctbs;
   390 public:
   391   ScrubRSClosure(BitMap* region_bm, BitMap* card_bm) :
   392     _g1h(G1CollectedHeap::heap()),
   393     _region_bm(region_bm), _card_bm(card_bm),
   394     _ctbs(_g1h->g1_barrier_set()) {}
   396   bool doHeapRegion(HeapRegion* r) {
   397     if (!r->continuesHumongous()) {
   398       r->rem_set()->scrub(_ctbs, _region_bm, _card_bm);
   399     }
   400     return false;
   401   }
   402 };
   404 void G1RemSet::scrub(BitMap* region_bm, BitMap* card_bm) {
   405   ScrubRSClosure scrub_cl(region_bm, card_bm);
   406   _g1->heap_region_iterate(&scrub_cl);
   407 }
   409 void G1RemSet::scrub_par(BitMap* region_bm, BitMap* card_bm,
   410                                 uint worker_num, int claim_val) {
   411   ScrubRSClosure scrub_cl(region_bm, card_bm);
   412   _g1->heap_region_par_iterate_chunked(&scrub_cl,
   413                                        worker_num,
   414                                        n_workers(),
   415                                        claim_val);
   416 }
   418 G1TriggerClosure::G1TriggerClosure() :
   419   _triggered(false) { }
   421 G1InvokeIfNotTriggeredClosure::G1InvokeIfNotTriggeredClosure(G1TriggerClosure* t_cl,
   422                                                              OopClosure* oop_cl)  :
   423   _trigger_cl(t_cl), _oop_cl(oop_cl) { }
   425 G1Mux2Closure::G1Mux2Closure(OopClosure *c1, OopClosure *c2) :
   426   _c1(c1), _c2(c2) { }
   428 G1UpdateRSOrPushRefOopClosure::
   429 G1UpdateRSOrPushRefOopClosure(G1CollectedHeap* g1h,
   430                               G1RemSet* rs,
   431                               G1ParPushHeapRSClosure* push_ref_cl,
   432                               bool record_refs_into_cset,
   433                               uint worker_i) :
   434   _g1(g1h), _g1_rem_set(rs), _from(NULL),
   435   _record_refs_into_cset(record_refs_into_cset),
   436   _push_ref_cl(push_ref_cl), _worker_i(worker_i) { }
   438 // Returns true if the given card contains references that point
   439 // into the collection set, if we're checking for such references;
   440 // false otherwise.
   442 bool G1RemSet::refine_card(jbyte* card_ptr, uint worker_i,
   443                            bool check_for_refs_into_cset) {
   444   assert(_g1->is_in_exact(_ct_bs->addr_for(card_ptr)),
   445          err_msg("Card at "PTR_FORMAT" index "SIZE_FORMAT" representing heap at "PTR_FORMAT" (%u) must be in committed heap",
   446                  p2i(card_ptr),
   447                  _ct_bs->index_for(_ct_bs->addr_for(card_ptr)),
   448                  _ct_bs->addr_for(card_ptr),
   449                  _g1->addr_to_region(_ct_bs->addr_for(card_ptr))));
   451   // If the card is no longer dirty, nothing to do.
   452   if (*card_ptr != CardTableModRefBS::dirty_card_val()) {
   453     // No need to return that this card contains refs that point
   454     // into the collection set.
   455     return false;
   456   }
   458   // Construct the region representing the card.
   459   HeapWord* start = _ct_bs->addr_for(card_ptr);
   460   // And find the region containing it.
   461   HeapRegion* r = _g1->heap_region_containing(start);
   463   // Why do we have to check here whether a card is on a young region,
   464   // given that we dirty young regions and, as a result, the
   465   // post-barrier is supposed to filter them out and never to enqueue
   466   // them? When we allocate a new region as the "allocation region" we
   467   // actually dirty its cards after we release the lock, since card
   468   // dirtying while holding the lock was a performance bottleneck. So,
   469   // as a result, it is possible for other threads to actually
   470   // allocate objects in the region (after the acquire the lock)
   471   // before all the cards on the region are dirtied. This is unlikely,
   472   // and it doesn't happen often, but it can happen. So, the extra
   473   // check below filters out those cards.
   474   if (r->is_young()) {
   475     return false;
   476   }
   478   // While we are processing RSet buffers during the collection, we
   479   // actually don't want to scan any cards on the collection set,
   480   // since we don't want to update remebered sets with entries that
   481   // point into the collection set, given that live objects from the
   482   // collection set are about to move and such entries will be stale
   483   // very soon. This change also deals with a reliability issue which
   484   // involves scanning a card in the collection set and coming across
   485   // an array that was being chunked and looking malformed. Note,
   486   // however, that if evacuation fails, we have to scan any objects
   487   // that were not moved and create any missing entries.
   488   if (r->in_collection_set()) {
   489     return false;
   490   }
   492   // The result from the hot card cache insert call is either:
   493   //   * pointer to the current card
   494   //     (implying that the current card is not 'hot'),
   495   //   * null
   496   //     (meaning we had inserted the card ptr into the "hot" card cache,
   497   //     which had some headroom),
   498   //   * a pointer to a "hot" card that was evicted from the "hot" cache.
   499   //
   501   G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
   502   if (hot_card_cache->use_cache()) {
   503     assert(!check_for_refs_into_cset, "sanity");
   504     assert(!SafepointSynchronize::is_at_safepoint(), "sanity");
   506     card_ptr = hot_card_cache->insert(card_ptr);
   507     if (card_ptr == NULL) {
   508       // There was no eviction. Nothing to do.
   509       return false;
   510     }
   512     start = _ct_bs->addr_for(card_ptr);
   513     r = _g1->heap_region_containing(start);
   515     // Checking whether the region we got back from the cache
   516     // is young here is inappropriate. The region could have been
   517     // freed, reallocated and tagged as young while in the cache.
   518     // Hence we could see its young type change at any time.
   519   }
   521   // Don't use addr_for(card_ptr + 1) which can ask for
   522   // a card beyond the heap.  This is not safe without a perm
   523   // gen at the upper end of the heap.
   524   HeapWord* end   = start + CardTableModRefBS::card_size_in_words;
   525   MemRegion dirtyRegion(start, end);
   527 #if CARD_REPEAT_HISTO
   528   init_ct_freq_table(_g1->max_capacity());
   529   ct_freq_note_card(_ct_bs->index_for(start));
   530 #endif
   532   G1ParPushHeapRSClosure* oops_in_heap_closure = NULL;
   533   if (check_for_refs_into_cset) {
   534     // ConcurrentG1RefineThreads have worker numbers larger than what
   535     // _cset_rs_update_cl[] is set up to handle. But those threads should
   536     // only be active outside of a collection which means that when they
   537     // reach here they should have check_for_refs_into_cset == false.
   538     assert((size_t)worker_i < n_workers(), "index of worker larger than _cset_rs_update_cl[].length");
   539     oops_in_heap_closure = _cset_rs_update_cl[worker_i];
   540   }
   541   G1UpdateRSOrPushRefOopClosure update_rs_oop_cl(_g1,
   542                                                  _g1->g1_rem_set(),
   543                                                  oops_in_heap_closure,
   544                                                  check_for_refs_into_cset,
   545                                                  worker_i);
   546   update_rs_oop_cl.set_from(r);
   548   G1TriggerClosure trigger_cl;
   549   FilterIntoCSClosure into_cs_cl(NULL, _g1, &trigger_cl);
   550   G1InvokeIfNotTriggeredClosure invoke_cl(&trigger_cl, &into_cs_cl);
   551   G1Mux2Closure mux(&invoke_cl, &update_rs_oop_cl);
   553   FilterOutOfRegionClosure filter_then_update_rs_oop_cl(r,
   554                         (check_for_refs_into_cset ?
   555                                 (OopClosure*)&mux :
   556                                 (OopClosure*)&update_rs_oop_cl));
   558   // The region for the current card may be a young region. The
   559   // current card may have been a card that was evicted from the
   560   // card cache. When the card was inserted into the cache, we had
   561   // determined that its region was non-young. While in the cache,
   562   // the region may have been freed during a cleanup pause, reallocated
   563   // and tagged as young.
   564   //
   565   // We wish to filter out cards for such a region but the current
   566   // thread, if we're running concurrently, may "see" the young type
   567   // change at any time (so an earlier "is_young" check may pass or
   568   // fail arbitrarily). We tell the iteration code to perform this
   569   // filtering when it has been determined that there has been an actual
   570   // allocation in this region and making it safe to check the young type.
   571   bool filter_young = true;
   573   HeapWord* stop_point =
   574     r->oops_on_card_seq_iterate_careful(dirtyRegion,
   575                                         &filter_then_update_rs_oop_cl,
   576                                         filter_young,
   577                                         card_ptr);
   579   // If stop_point is non-null, then we encountered an unallocated region
   580   // (perhaps the unfilled portion of a TLAB.)  For now, we'll dirty the
   581   // card and re-enqueue: if we put off the card until a GC pause, then the
   582   // unallocated portion will be filled in.  Alternatively, we might try
   583   // the full complexity of the technique used in "regular" precleaning.
   584   if (stop_point != NULL) {
   585     // The card might have gotten re-dirtied and re-enqueued while we
   586     // worked.  (In fact, it's pretty likely.)
   587     if (*card_ptr != CardTableModRefBS::dirty_card_val()) {
   588       *card_ptr = CardTableModRefBS::dirty_card_val();
   589       MutexLockerEx x(Shared_DirtyCardQ_lock,
   590                       Mutex::_no_safepoint_check_flag);
   591       DirtyCardQueue* sdcq =
   592         JavaThread::dirty_card_queue_set().shared_dirty_card_queue();
   593       sdcq->enqueue(card_ptr);
   594     }
   595   } else {
   596     _conc_refine_cards++;
   597   }
   599   // This gets set to true if the card being refined has
   600   // references that point into the collection set.
   601   bool has_refs_into_cset = trigger_cl.triggered();
   603   // We should only be detecting that the card contains references
   604   // that point into the collection set if the current thread is
   605   // a GC worker thread.
   606   assert(!has_refs_into_cset || SafepointSynchronize::is_at_safepoint(),
   607            "invalid result at non safepoint");
   609   return has_refs_into_cset;
   610 }
   612 void G1RemSet::print_periodic_summary_info(const char* header) {
   613   G1RemSetSummary current;
   614   current.initialize(this);
   616   _prev_period_summary.subtract_from(&current);
   617   print_summary_info(&_prev_period_summary, header);
   619   _prev_period_summary.set(&current);
   620 }
   622 void G1RemSet::print_summary_info() {
   623   G1RemSetSummary current;
   624   current.initialize(this);
   626   print_summary_info(&current, " Cumulative RS summary");
   627 }
   629 void G1RemSet::print_summary_info(G1RemSetSummary * summary, const char * header) {
   630   assert(summary != NULL, "just checking");
   632   if (header != NULL) {
   633     gclog_or_tty->print_cr("%s", header);
   634   }
   636 #if CARD_REPEAT_HISTO
   637   gclog_or_tty->print_cr("\nG1 card_repeat count histogram: ");
   638   gclog_or_tty->print_cr("  # of repeats --> # of cards with that number.");
   639   card_repeat_count.print_on(gclog_or_tty);
   640 #endif
   642   summary->print_on(gclog_or_tty);
   643 }
   645 void G1RemSet::prepare_for_verify() {
   646   if (G1HRRSFlushLogBuffersOnVerify &&
   647       (VerifyBeforeGC || VerifyAfterGC)
   648       &&  (!_g1->full_collection() || G1VerifyRSetsDuringFullGC)) {
   649     cleanupHRRS();
   650     _g1->set_refine_cte_cl_concurrency(false);
   651     if (SafepointSynchronize::is_at_safepoint()) {
   652       DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
   653       dcqs.concatenate_logs();
   654     }
   656     G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
   657     bool use_hot_card_cache = hot_card_cache->use_cache();
   658     hot_card_cache->set_use_cache(false);
   660     DirtyCardQueue into_cset_dcq(&_g1->into_cset_dirty_card_queue_set());
   661     updateRS(&into_cset_dcq, 0);
   662     _g1->into_cset_dirty_card_queue_set().clear();
   664     hot_card_cache->set_use_cache(use_hot_card_cache);
   665     assert(JavaThread::dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed");
   666   }
   667 }

mercurial