Fri, 10 Oct 2014 15:51:58 +0200
8059758: Footprint regressions with JDK-8038423
Summary: Changes in JDK-8038423 always initialize (zero out) virtual memory used for auxiliary data structures. This causes a footprint regression for G1 in startup benchmarks. This is because they do not touch that memory at all, so the operating system does not actually commit these pages. The fix is to, if the initialization value of the data structures matches the default value of just committed memory (=0), do not do anything.
Reviewed-by: jwilhelm, brutisso
1 /*
2 * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "gc_implementation/g1/concurrentG1Refine.hpp"
27 #include "gc_implementation/g1/concurrentG1RefineThread.hpp"
28 #include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp"
29 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
30 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
31 #include "gc_implementation/g1/g1HotCardCache.hpp"
32 #include "gc_implementation/g1/g1GCPhaseTimes.hpp"
33 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
34 #include "gc_implementation/g1/g1RemSet.inline.hpp"
35 #include "gc_implementation/g1/heapRegionManager.inline.hpp"
36 #include "gc_implementation/g1/heapRegionRemSet.hpp"
37 #include "memory/iterator.hpp"
38 #include "oops/oop.inline.hpp"
39 #include "utilities/intHisto.hpp"
41 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
43 #define CARD_REPEAT_HISTO 0
45 #if CARD_REPEAT_HISTO
46 static size_t ct_freq_sz;
47 static jbyte* ct_freq = NULL;
49 void init_ct_freq_table(size_t heap_sz_bytes) {
50 if (ct_freq == NULL) {
51 ct_freq_sz = heap_sz_bytes/CardTableModRefBS::card_size;
52 ct_freq = new jbyte[ct_freq_sz];
53 for (size_t j = 0; j < ct_freq_sz; j++) ct_freq[j] = 0;
54 }
55 }
57 void ct_freq_note_card(size_t index) {
58 assert(0 <= index && index < ct_freq_sz, "Bounds error.");
59 if (ct_freq[index] < 100) { ct_freq[index]++; }
60 }
62 static IntHistogram card_repeat_count(10, 10);
64 void ct_freq_update_histo_and_reset() {
65 for (size_t j = 0; j < ct_freq_sz; j++) {
66 card_repeat_count.add_entry(ct_freq[j]);
67 ct_freq[j] = 0;
68 }
70 }
71 #endif
73 G1RemSet::G1RemSet(G1CollectedHeap* g1, CardTableModRefBS* ct_bs)
74 : _g1(g1), _conc_refine_cards(0),
75 _ct_bs(ct_bs), _g1p(_g1->g1_policy()),
76 _cg1r(g1->concurrent_g1_refine()),
77 _cset_rs_update_cl(NULL),
78 _cards_scanned(NULL), _total_cards_scanned(0),
79 _prev_period_summary()
80 {
81 _seq_task = new SubTasksDone(NumSeqTasks);
82 guarantee(n_workers() > 0, "There should be some workers");
83 _cset_rs_update_cl = NEW_C_HEAP_ARRAY(OopsInHeapRegionClosure*, n_workers(), mtGC);
84 for (uint i = 0; i < n_workers(); i++) {
85 _cset_rs_update_cl[i] = NULL;
86 }
87 if (G1SummarizeRSetStats) {
88 _prev_period_summary.initialize(this);
89 }
90 }
92 G1RemSet::~G1RemSet() {
93 delete _seq_task;
94 for (uint i = 0; i < n_workers(); i++) {
95 assert(_cset_rs_update_cl[i] == NULL, "it should be");
96 }
97 FREE_C_HEAP_ARRAY(OopsInHeapRegionClosure*, _cset_rs_update_cl, mtGC);
98 }
100 void CountNonCleanMemRegionClosure::do_MemRegion(MemRegion mr) {
101 if (_g1->is_in_g1_reserved(mr.start())) {
102 _n += (int) ((mr.byte_size() / CardTableModRefBS::card_size));
103 if (_start_first == NULL) _start_first = mr.start();
104 }
105 }
107 class ScanRSClosure : public HeapRegionClosure {
108 size_t _cards_done, _cards;
109 G1CollectedHeap* _g1h;
111 OopsInHeapRegionClosure* _oc;
112 CodeBlobClosure* _code_root_cl;
114 G1BlockOffsetSharedArray* _bot_shared;
115 G1SATBCardTableModRefBS *_ct_bs;
117 double _strong_code_root_scan_time_sec;
118 uint _worker_i;
119 int _block_size;
120 bool _try_claimed;
122 public:
123 ScanRSClosure(OopsInHeapRegionClosure* oc,
124 CodeBlobClosure* code_root_cl,
125 uint worker_i) :
126 _oc(oc),
127 _code_root_cl(code_root_cl),
128 _strong_code_root_scan_time_sec(0.0),
129 _cards(0),
130 _cards_done(0),
131 _worker_i(worker_i),
132 _try_claimed(false)
133 {
134 _g1h = G1CollectedHeap::heap();
135 _bot_shared = _g1h->bot_shared();
136 _ct_bs = _g1h->g1_barrier_set();
137 _block_size = MAX2<int>(G1RSetScanBlockSize, 1);
138 }
140 void set_try_claimed() { _try_claimed = true; }
142 void scanCard(size_t index, HeapRegion *r) {
143 // Stack allocate the DirtyCardToOopClosure instance
144 HeapRegionDCTOC cl(_g1h, r, _oc,
145 CardTableModRefBS::Precise,
146 HeapRegionDCTOC::IntoCSFilterKind);
148 // Set the "from" region in the closure.
149 _oc->set_region(r);
150 HeapWord* card_start = _bot_shared->address_for_index(index);
151 HeapWord* card_end = card_start + G1BlockOffsetSharedArray::N_words;
152 Space *sp = SharedHeap::heap()->space_containing(card_start);
153 MemRegion sm_region = sp->used_region_at_save_marks();
154 MemRegion mr = sm_region.intersection(MemRegion(card_start,card_end));
155 if (!mr.is_empty() && !_ct_bs->is_card_claimed(index)) {
156 // We make the card as "claimed" lazily (so races are possible
157 // but they're benign), which reduces the number of duplicate
158 // scans (the rsets of the regions in the cset can intersect).
159 _ct_bs->set_card_claimed(index);
160 _cards_done++;
161 cl.do_MemRegion(mr);
162 }
163 }
165 void printCard(HeapRegion* card_region, size_t card_index,
166 HeapWord* card_start) {
167 gclog_or_tty->print_cr("T " UINT32_FORMAT " Region [" PTR_FORMAT ", " PTR_FORMAT ") "
168 "RS names card %p: "
169 "[" PTR_FORMAT ", " PTR_FORMAT ")",
170 _worker_i,
171 card_region->bottom(), card_region->end(),
172 card_index,
173 card_start, card_start + G1BlockOffsetSharedArray::N_words);
174 }
176 void scan_strong_code_roots(HeapRegion* r) {
177 double scan_start = os::elapsedTime();
178 r->strong_code_roots_do(_code_root_cl);
179 _strong_code_root_scan_time_sec += (os::elapsedTime() - scan_start);
180 }
182 bool doHeapRegion(HeapRegion* r) {
183 assert(r->in_collection_set(), "should only be called on elements of CS.");
184 HeapRegionRemSet* hrrs = r->rem_set();
185 if (hrrs->iter_is_complete()) return false; // All done.
186 if (!_try_claimed && !hrrs->claim_iter()) return false;
187 // If we ever free the collection set concurrently, we should also
188 // clear the card table concurrently therefore we won't need to
189 // add regions of the collection set to the dirty cards region.
190 _g1h->push_dirty_cards_region(r);
191 // If we didn't return above, then
192 // _try_claimed || r->claim_iter()
193 // is true: either we're supposed to work on claimed-but-not-complete
194 // regions, or we successfully claimed the region.
196 HeapRegionRemSetIterator iter(hrrs);
197 size_t card_index;
199 // We claim cards in block so as to recude the contention. The block size is determined by
200 // the G1RSetScanBlockSize parameter.
201 size_t jump_to_card = hrrs->iter_claimed_next(_block_size);
202 for (size_t current_card = 0; iter.has_next(card_index); current_card++) {
203 if (current_card >= jump_to_card + _block_size) {
204 jump_to_card = hrrs->iter_claimed_next(_block_size);
205 }
206 if (current_card < jump_to_card) continue;
207 HeapWord* card_start = _g1h->bot_shared()->address_for_index(card_index);
208 #if 0
209 gclog_or_tty->print("Rem set iteration yielded card [" PTR_FORMAT ", " PTR_FORMAT ").\n",
210 card_start, card_start + CardTableModRefBS::card_size_in_words);
211 #endif
213 HeapRegion* card_region = _g1h->heap_region_containing(card_start);
214 _cards++;
216 if (!card_region->is_on_dirty_cards_region_list()) {
217 _g1h->push_dirty_cards_region(card_region);
218 }
220 // If the card is dirty, then we will scan it during updateRS.
221 if (!card_region->in_collection_set() &&
222 !_ct_bs->is_card_dirty(card_index)) {
223 scanCard(card_index, card_region);
224 }
225 }
226 if (!_try_claimed) {
227 // Scan the strong code root list attached to the current region
228 scan_strong_code_roots(r);
230 hrrs->set_iter_complete();
231 }
232 return false;
233 }
235 double strong_code_root_scan_time_sec() {
236 return _strong_code_root_scan_time_sec;
237 }
239 size_t cards_done() { return _cards_done;}
240 size_t cards_looked_up() { return _cards;}
241 };
243 void G1RemSet::scanRS(OopsInHeapRegionClosure* oc,
244 CodeBlobClosure* code_root_cl,
245 uint worker_i) {
246 double rs_time_start = os::elapsedTime();
247 HeapRegion *startRegion = _g1->start_cset_region_for_worker(worker_i);
249 ScanRSClosure scanRScl(oc, code_root_cl, worker_i);
251 _g1->collection_set_iterate_from(startRegion, &scanRScl);
252 scanRScl.set_try_claimed();
253 _g1->collection_set_iterate_from(startRegion, &scanRScl);
255 double scan_rs_time_sec = (os::elapsedTime() - rs_time_start)
256 - scanRScl.strong_code_root_scan_time_sec();
258 assert(_cards_scanned != NULL, "invariant");
259 _cards_scanned[worker_i] = scanRScl.cards_done();
261 _g1p->phase_times()->record_scan_rs_time(worker_i, scan_rs_time_sec * 1000.0);
262 _g1p->phase_times()->record_strong_code_root_scan_time(worker_i,
263 scanRScl.strong_code_root_scan_time_sec() * 1000.0);
264 }
266 // Closure used for updating RSets and recording references that
267 // point into the collection set. Only called during an
268 // evacuation pause.
270 class RefineRecordRefsIntoCSCardTableEntryClosure: public CardTableEntryClosure {
271 G1RemSet* _g1rs;
272 DirtyCardQueue* _into_cset_dcq;
273 public:
274 RefineRecordRefsIntoCSCardTableEntryClosure(G1CollectedHeap* g1h,
275 DirtyCardQueue* into_cset_dcq) :
276 _g1rs(g1h->g1_rem_set()), _into_cset_dcq(into_cset_dcq)
277 {}
278 bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
279 // The only time we care about recording cards that
280 // contain references that point into the collection set
281 // is during RSet updating within an evacuation pause.
282 // In this case worker_i should be the id of a GC worker thread.
283 assert(SafepointSynchronize::is_at_safepoint(), "not during an evacuation pause");
284 assert(worker_i < (ParallelGCThreads == 0 ? 1 : ParallelGCThreads), "should be a GC worker");
286 if (_g1rs->refine_card(card_ptr, worker_i, true)) {
287 // 'card_ptr' contains references that point into the collection
288 // set. We need to record the card in the DCQS
289 // (G1CollectedHeap::into_cset_dirty_card_queue_set())
290 // that's used for that purpose.
291 //
292 // Enqueue the card
293 _into_cset_dcq->enqueue(card_ptr);
294 }
295 return true;
296 }
297 };
299 void G1RemSet::updateRS(DirtyCardQueue* into_cset_dcq, uint worker_i) {
300 double start = os::elapsedTime();
301 // Apply the given closure to all remaining log entries.
302 RefineRecordRefsIntoCSCardTableEntryClosure into_cset_update_rs_cl(_g1, into_cset_dcq);
304 _g1->iterate_dirty_card_closure(&into_cset_update_rs_cl, into_cset_dcq, false, worker_i);
306 // Now there should be no dirty cards.
307 if (G1RSLogCheckCardTable) {
308 CountNonCleanMemRegionClosure cl(_g1);
309 _ct_bs->mod_card_iterate(&cl);
310 // XXX This isn't true any more: keeping cards of young regions
311 // marked dirty broke it. Need some reasonable fix.
312 guarantee(cl.n() == 0, "Card table should be clean.");
313 }
315 _g1p->phase_times()->record_update_rs_time(worker_i, (os::elapsedTime() - start) * 1000.0);
316 }
318 void G1RemSet::cleanupHRRS() {
319 HeapRegionRemSet::cleanup();
320 }
322 void G1RemSet::oops_into_collection_set_do(OopsInHeapRegionClosure* oc,
323 CodeBlobClosure* code_root_cl,
324 uint worker_i) {
325 #if CARD_REPEAT_HISTO
326 ct_freq_update_histo_and_reset();
327 #endif
329 // We cache the value of 'oc' closure into the appropriate slot in the
330 // _cset_rs_update_cl for this worker
331 assert(worker_i < n_workers(), "sanity");
332 _cset_rs_update_cl[worker_i] = oc;
334 // A DirtyCardQueue that is used to hold cards containing references
335 // that point into the collection set. This DCQ is associated with a
336 // special DirtyCardQueueSet (see g1CollectedHeap.hpp). Under normal
337 // circumstances (i.e. the pause successfully completes), these cards
338 // are just discarded (there's no need to update the RSets of regions
339 // that were in the collection set - after the pause these regions
340 // are wholly 'free' of live objects. In the event of an evacuation
341 // failure the cards/buffers in this queue set are passed to the
342 // DirtyCardQueueSet that is used to manage RSet updates
343 DirtyCardQueue into_cset_dcq(&_g1->into_cset_dirty_card_queue_set());
345 assert((ParallelGCThreads > 0) || worker_i == 0, "invariant");
347 // The two flags below were introduced temporarily to serialize
348 // the updating and scanning of remembered sets. There are some
349 // race conditions when these two operations are done in parallel
350 // and they are causing failures. When we resolve said race
351 // conditions, we'll revert back to parallel remembered set
352 // updating and scanning. See CRs 6677707 and 6677708.
353 if (G1UseParallelRSetUpdating || (worker_i == 0)) {
354 updateRS(&into_cset_dcq, worker_i);
355 } else {
356 _g1p->phase_times()->record_update_rs_processed_buffers(worker_i, 0);
357 _g1p->phase_times()->record_update_rs_time(worker_i, 0.0);
358 }
359 if (G1UseParallelRSetScanning || (worker_i == 0)) {
360 scanRS(oc, code_root_cl, worker_i);
361 } else {
362 _g1p->phase_times()->record_scan_rs_time(worker_i, 0.0);
363 }
365 // We now clear the cached values of _cset_rs_update_cl for this worker
366 _cset_rs_update_cl[worker_i] = NULL;
367 }
369 void G1RemSet::prepare_for_oops_into_collection_set_do() {
370 cleanupHRRS();
371 _g1->set_refine_cte_cl_concurrency(false);
372 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
373 dcqs.concatenate_logs();
375 guarantee( _cards_scanned == NULL, "invariant" );
376 _cards_scanned = NEW_C_HEAP_ARRAY(size_t, n_workers(), mtGC);
377 for (uint i = 0; i < n_workers(); ++i) {
378 _cards_scanned[i] = 0;
379 }
380 _total_cards_scanned = 0;
381 }
383 void G1RemSet::cleanup_after_oops_into_collection_set_do() {
384 guarantee( _cards_scanned != NULL, "invariant" );
385 _total_cards_scanned = 0;
386 for (uint i = 0; i < n_workers(); ++i) {
387 _total_cards_scanned += _cards_scanned[i];
388 }
389 FREE_C_HEAP_ARRAY(size_t, _cards_scanned, mtGC);
390 _cards_scanned = NULL;
391 // Cleanup after copy
392 _g1->set_refine_cte_cl_concurrency(true);
393 // Set all cards back to clean.
394 _g1->cleanUpCardTable();
396 DirtyCardQueueSet& into_cset_dcqs = _g1->into_cset_dirty_card_queue_set();
397 int into_cset_n_buffers = into_cset_dcqs.completed_buffers_num();
399 if (_g1->evacuation_failed()) {
400 double restore_remembered_set_start = os::elapsedTime();
402 // Restore remembered sets for the regions pointing into the collection set.
403 // We just need to transfer the completed buffers from the DirtyCardQueueSet
404 // used to hold cards that contain references that point into the collection set
405 // to the DCQS used to hold the deferred RS updates.
406 _g1->dirty_card_queue_set().merge_bufferlists(&into_cset_dcqs);
407 _g1->g1_policy()->phase_times()->record_evac_fail_restore_remsets((os::elapsedTime() - restore_remembered_set_start) * 1000.0);
408 }
410 // Free any completed buffers in the DirtyCardQueueSet used to hold cards
411 // which contain references that point into the collection.
412 _g1->into_cset_dirty_card_queue_set().clear();
413 assert(_g1->into_cset_dirty_card_queue_set().completed_buffers_num() == 0,
414 "all buffers should be freed");
415 _g1->into_cset_dirty_card_queue_set().clear_n_completed_buffers();
416 }
418 class ScrubRSClosure: public HeapRegionClosure {
419 G1CollectedHeap* _g1h;
420 BitMap* _region_bm;
421 BitMap* _card_bm;
422 CardTableModRefBS* _ctbs;
423 public:
424 ScrubRSClosure(BitMap* region_bm, BitMap* card_bm) :
425 _g1h(G1CollectedHeap::heap()),
426 _region_bm(region_bm), _card_bm(card_bm),
427 _ctbs(_g1h->g1_barrier_set()) {}
429 bool doHeapRegion(HeapRegion* r) {
430 if (!r->continuesHumongous()) {
431 r->rem_set()->scrub(_ctbs, _region_bm, _card_bm);
432 }
433 return false;
434 }
435 };
437 void G1RemSet::scrub(BitMap* region_bm, BitMap* card_bm) {
438 ScrubRSClosure scrub_cl(region_bm, card_bm);
439 _g1->heap_region_iterate(&scrub_cl);
440 }
442 void G1RemSet::scrub_par(BitMap* region_bm, BitMap* card_bm,
443 uint worker_num, int claim_val) {
444 ScrubRSClosure scrub_cl(region_bm, card_bm);
445 _g1->heap_region_par_iterate_chunked(&scrub_cl,
446 worker_num,
447 n_workers(),
448 claim_val);
449 }
451 G1TriggerClosure::G1TriggerClosure() :
452 _triggered(false) { }
454 G1InvokeIfNotTriggeredClosure::G1InvokeIfNotTriggeredClosure(G1TriggerClosure* t_cl,
455 OopClosure* oop_cl) :
456 _trigger_cl(t_cl), _oop_cl(oop_cl) { }
458 G1Mux2Closure::G1Mux2Closure(OopClosure *c1, OopClosure *c2) :
459 _c1(c1), _c2(c2) { }
461 G1UpdateRSOrPushRefOopClosure::
462 G1UpdateRSOrPushRefOopClosure(G1CollectedHeap* g1h,
463 G1RemSet* rs,
464 OopsInHeapRegionClosure* push_ref_cl,
465 bool record_refs_into_cset,
466 uint worker_i) :
467 _g1(g1h), _g1_rem_set(rs), _from(NULL),
468 _record_refs_into_cset(record_refs_into_cset),
469 _push_ref_cl(push_ref_cl), _worker_i(worker_i) { }
471 // Returns true if the given card contains references that point
472 // into the collection set, if we're checking for such references;
473 // false otherwise.
475 bool G1RemSet::refine_card(jbyte* card_ptr, uint worker_i,
476 bool check_for_refs_into_cset) {
477 assert(_g1->is_in_exact(_ct_bs->addr_for(card_ptr)),
478 err_msg("Card at "PTR_FORMAT" index "SIZE_FORMAT" representing heap at "PTR_FORMAT" (%u) must be in committed heap",
479 p2i(card_ptr),
480 _ct_bs->index_for(_ct_bs->addr_for(card_ptr)),
481 _ct_bs->addr_for(card_ptr),
482 _g1->addr_to_region(_ct_bs->addr_for(card_ptr))));
484 // If the card is no longer dirty, nothing to do.
485 if (*card_ptr != CardTableModRefBS::dirty_card_val()) {
486 // No need to return that this card contains refs that point
487 // into the collection set.
488 return false;
489 }
491 // Construct the region representing the card.
492 HeapWord* start = _ct_bs->addr_for(card_ptr);
493 // And find the region containing it.
494 HeapRegion* r = _g1->heap_region_containing(start);
496 // Why do we have to check here whether a card is on a young region,
497 // given that we dirty young regions and, as a result, the
498 // post-barrier is supposed to filter them out and never to enqueue
499 // them? When we allocate a new region as the "allocation region" we
500 // actually dirty its cards after we release the lock, since card
501 // dirtying while holding the lock was a performance bottleneck. So,
502 // as a result, it is possible for other threads to actually
503 // allocate objects in the region (after the acquire the lock)
504 // before all the cards on the region are dirtied. This is unlikely,
505 // and it doesn't happen often, but it can happen. So, the extra
506 // check below filters out those cards.
507 if (r->is_young()) {
508 return false;
509 }
511 // While we are processing RSet buffers during the collection, we
512 // actually don't want to scan any cards on the collection set,
513 // since we don't want to update remebered sets with entries that
514 // point into the collection set, given that live objects from the
515 // collection set are about to move and such entries will be stale
516 // very soon. This change also deals with a reliability issue which
517 // involves scanning a card in the collection set and coming across
518 // an array that was being chunked and looking malformed. Note,
519 // however, that if evacuation fails, we have to scan any objects
520 // that were not moved and create any missing entries.
521 if (r->in_collection_set()) {
522 return false;
523 }
525 // The result from the hot card cache insert call is either:
526 // * pointer to the current card
527 // (implying that the current card is not 'hot'),
528 // * null
529 // (meaning we had inserted the card ptr into the "hot" card cache,
530 // which had some headroom),
531 // * a pointer to a "hot" card that was evicted from the "hot" cache.
532 //
534 G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
535 if (hot_card_cache->use_cache()) {
536 assert(!check_for_refs_into_cset, "sanity");
537 assert(!SafepointSynchronize::is_at_safepoint(), "sanity");
539 card_ptr = hot_card_cache->insert(card_ptr);
540 if (card_ptr == NULL) {
541 // There was no eviction. Nothing to do.
542 return false;
543 }
545 start = _ct_bs->addr_for(card_ptr);
546 r = _g1->heap_region_containing(start);
548 // Checking whether the region we got back from the cache
549 // is young here is inappropriate. The region could have been
550 // freed, reallocated and tagged as young while in the cache.
551 // Hence we could see its young type change at any time.
552 }
554 // Don't use addr_for(card_ptr + 1) which can ask for
555 // a card beyond the heap. This is not safe without a perm
556 // gen at the upper end of the heap.
557 HeapWord* end = start + CardTableModRefBS::card_size_in_words;
558 MemRegion dirtyRegion(start, end);
560 #if CARD_REPEAT_HISTO
561 init_ct_freq_table(_g1->max_capacity());
562 ct_freq_note_card(_ct_bs->index_for(start));
563 #endif
565 OopsInHeapRegionClosure* oops_in_heap_closure = NULL;
566 if (check_for_refs_into_cset) {
567 // ConcurrentG1RefineThreads have worker numbers larger than what
568 // _cset_rs_update_cl[] is set up to handle. But those threads should
569 // only be active outside of a collection which means that when they
570 // reach here they should have check_for_refs_into_cset == false.
571 assert((size_t)worker_i < n_workers(), "index of worker larger than _cset_rs_update_cl[].length");
572 oops_in_heap_closure = _cset_rs_update_cl[worker_i];
573 }
574 G1UpdateRSOrPushRefOopClosure update_rs_oop_cl(_g1,
575 _g1->g1_rem_set(),
576 oops_in_heap_closure,
577 check_for_refs_into_cset,
578 worker_i);
579 update_rs_oop_cl.set_from(r);
581 G1TriggerClosure trigger_cl;
582 FilterIntoCSClosure into_cs_cl(NULL, _g1, &trigger_cl);
583 G1InvokeIfNotTriggeredClosure invoke_cl(&trigger_cl, &into_cs_cl);
584 G1Mux2Closure mux(&invoke_cl, &update_rs_oop_cl);
586 FilterOutOfRegionClosure filter_then_update_rs_oop_cl(r,
587 (check_for_refs_into_cset ?
588 (OopClosure*)&mux :
589 (OopClosure*)&update_rs_oop_cl));
591 // The region for the current card may be a young region. The
592 // current card may have been a card that was evicted from the
593 // card cache. When the card was inserted into the cache, we had
594 // determined that its region was non-young. While in the cache,
595 // the region may have been freed during a cleanup pause, reallocated
596 // and tagged as young.
597 //
598 // We wish to filter out cards for such a region but the current
599 // thread, if we're running concurrently, may "see" the young type
600 // change at any time (so an earlier "is_young" check may pass or
601 // fail arbitrarily). We tell the iteration code to perform this
602 // filtering when it has been determined that there has been an actual
603 // allocation in this region and making it safe to check the young type.
604 bool filter_young = true;
606 HeapWord* stop_point =
607 r->oops_on_card_seq_iterate_careful(dirtyRegion,
608 &filter_then_update_rs_oop_cl,
609 filter_young,
610 card_ptr);
612 // If stop_point is non-null, then we encountered an unallocated region
613 // (perhaps the unfilled portion of a TLAB.) For now, we'll dirty the
614 // card and re-enqueue: if we put off the card until a GC pause, then the
615 // unallocated portion will be filled in. Alternatively, we might try
616 // the full complexity of the technique used in "regular" precleaning.
617 if (stop_point != NULL) {
618 // The card might have gotten re-dirtied and re-enqueued while we
619 // worked. (In fact, it's pretty likely.)
620 if (*card_ptr != CardTableModRefBS::dirty_card_val()) {
621 *card_ptr = CardTableModRefBS::dirty_card_val();
622 MutexLockerEx x(Shared_DirtyCardQ_lock,
623 Mutex::_no_safepoint_check_flag);
624 DirtyCardQueue* sdcq =
625 JavaThread::dirty_card_queue_set().shared_dirty_card_queue();
626 sdcq->enqueue(card_ptr);
627 }
628 } else {
629 _conc_refine_cards++;
630 }
632 // This gets set to true if the card being refined has
633 // references that point into the collection set.
634 bool has_refs_into_cset = trigger_cl.triggered();
636 // We should only be detecting that the card contains references
637 // that point into the collection set if the current thread is
638 // a GC worker thread.
639 assert(!has_refs_into_cset || SafepointSynchronize::is_at_safepoint(),
640 "invalid result at non safepoint");
642 return has_refs_into_cset;
643 }
645 void G1RemSet::print_periodic_summary_info(const char* header) {
646 G1RemSetSummary current;
647 current.initialize(this);
649 _prev_period_summary.subtract_from(¤t);
650 print_summary_info(&_prev_period_summary, header);
652 _prev_period_summary.set(¤t);
653 }
655 void G1RemSet::print_summary_info() {
656 G1RemSetSummary current;
657 current.initialize(this);
659 print_summary_info(¤t, " Cumulative RS summary");
660 }
662 void G1RemSet::print_summary_info(G1RemSetSummary * summary, const char * header) {
663 assert(summary != NULL, "just checking");
665 if (header != NULL) {
666 gclog_or_tty->print_cr("%s", header);
667 }
669 #if CARD_REPEAT_HISTO
670 gclog_or_tty->print_cr("\nG1 card_repeat count histogram: ");
671 gclog_or_tty->print_cr(" # of repeats --> # of cards with that number.");
672 card_repeat_count.print_on(gclog_or_tty);
673 #endif
675 summary->print_on(gclog_or_tty);
676 }
678 void G1RemSet::prepare_for_verify() {
679 if (G1HRRSFlushLogBuffersOnVerify &&
680 (VerifyBeforeGC || VerifyAfterGC)
681 && (!_g1->full_collection() || G1VerifyRSetsDuringFullGC)) {
682 cleanupHRRS();
683 _g1->set_refine_cte_cl_concurrency(false);
684 if (SafepointSynchronize::is_at_safepoint()) {
685 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
686 dcqs.concatenate_logs();
687 }
689 G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
690 bool use_hot_card_cache = hot_card_cache->use_cache();
691 hot_card_cache->set_use_cache(false);
693 DirtyCardQueue into_cset_dcq(&_g1->into_cset_dirty_card_queue_set());
694 updateRS(&into_cset_dcq, 0);
695 _g1->into_cset_dirty_card_queue_set().clear();
697 hot_card_cache->set_use_cache(use_hot_card_cache);
698 assert(JavaThread::dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed");
699 }
700 }