Wed, 25 Apr 2012 10:23:12 -0700
7143490: G1: Remove HeapRegion::_top_at_conc_mark_count
Summary: Removed the HeapRegion::_top_at_conc_mark_count field. It is no longer needed as a result of the changes for 6888336 and 7127706. Refactored the closures that finalize and verify the liveness counting data so that common functionality was placed into a base class.
Reviewed-by: brutisso, tonyp
1 /*
2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "code/icBuffer.hpp"
27 #include "gc_implementation/g1/bufferingOopClosure.hpp"
28 #include "gc_implementation/g1/concurrentG1Refine.hpp"
29 #include "gc_implementation/g1/concurrentG1RefineThread.hpp"
30 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
31 #include "gc_implementation/g1/g1AllocRegion.inline.hpp"
32 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
33 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
34 #include "gc_implementation/g1/g1ErgoVerbose.hpp"
35 #include "gc_implementation/g1/g1EvacFailure.hpp"
36 #include "gc_implementation/g1/g1Log.hpp"
37 #include "gc_implementation/g1/g1MarkSweep.hpp"
38 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
39 #include "gc_implementation/g1/g1RemSet.inline.hpp"
40 #include "gc_implementation/g1/heapRegion.inline.hpp"
41 #include "gc_implementation/g1/heapRegionRemSet.hpp"
42 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
43 #include "gc_implementation/g1/vm_operations_g1.hpp"
44 #include "gc_implementation/shared/isGCActiveMark.hpp"
45 #include "memory/gcLocker.inline.hpp"
46 #include "memory/genOopClosures.inline.hpp"
47 #include "memory/generationSpec.hpp"
48 #include "memory/referenceProcessor.hpp"
49 #include "oops/oop.inline.hpp"
50 #include "oops/oop.pcgc.inline.hpp"
51 #include "runtime/aprofiler.hpp"
52 #include "runtime/vmThread.hpp"
54 size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
56 // turn it on so that the contents of the young list (scan-only /
57 // to-be-collected) are printed at "strategic" points before / during
58 // / after the collection --- this is useful for debugging
59 #define YOUNG_LIST_VERBOSE 0
60 // CURRENT STATUS
61 // This file is under construction. Search for "FIXME".
63 // INVARIANTS/NOTES
64 //
65 // All allocation activity covered by the G1CollectedHeap interface is
66 // serialized by acquiring the HeapLock. This happens in mem_allocate
67 // and allocate_new_tlab, which are the "entry" points to the
68 // allocation code from the rest of the JVM. (Note that this does not
69 // apply to TLAB allocation, which is not part of this interface: it
70 // is done by clients of this interface.)
72 // Notes on implementation of parallelism in different tasks.
73 //
74 // G1ParVerifyTask uses heap_region_par_iterate_chunked() for parallelism.
75 // The number of GC workers is passed to heap_region_par_iterate_chunked().
76 // It does use run_task() which sets _n_workers in the task.
77 // G1ParTask executes g1_process_strong_roots() ->
78 // SharedHeap::process_strong_roots() which calls eventuall to
79 // CardTableModRefBS::par_non_clean_card_iterate_work() which uses
80 // SequentialSubTasksDone. SharedHeap::process_strong_roots() also
81 // directly uses SubTasksDone (_process_strong_tasks field in SharedHeap).
82 //
84 // Local to this file.
86 class RefineCardTableEntryClosure: public CardTableEntryClosure {
87 SuspendibleThreadSet* _sts;
88 G1RemSet* _g1rs;
89 ConcurrentG1Refine* _cg1r;
90 bool _concurrent;
91 public:
92 RefineCardTableEntryClosure(SuspendibleThreadSet* sts,
93 G1RemSet* g1rs,
94 ConcurrentG1Refine* cg1r) :
95 _sts(sts), _g1rs(g1rs), _cg1r(cg1r), _concurrent(true)
96 {}
97 bool do_card_ptr(jbyte* card_ptr, int worker_i) {
98 bool oops_into_cset = _g1rs->concurrentRefineOneCard(card_ptr, worker_i, false);
99 // This path is executed by the concurrent refine or mutator threads,
100 // concurrently, and so we do not care if card_ptr contains references
101 // that point into the collection set.
102 assert(!oops_into_cset, "should be");
104 if (_concurrent && _sts->should_yield()) {
105 // Caller will actually yield.
106 return false;
107 }
108 // Otherwise, we finished successfully; return true.
109 return true;
110 }
111 void set_concurrent(bool b) { _concurrent = b; }
112 };
115 class ClearLoggedCardTableEntryClosure: public CardTableEntryClosure {
116 int _calls;
117 G1CollectedHeap* _g1h;
118 CardTableModRefBS* _ctbs;
119 int _histo[256];
120 public:
121 ClearLoggedCardTableEntryClosure() :
122 _calls(0)
123 {
124 _g1h = G1CollectedHeap::heap();
125 _ctbs = (CardTableModRefBS*)_g1h->barrier_set();
126 for (int i = 0; i < 256; i++) _histo[i] = 0;
127 }
128 bool do_card_ptr(jbyte* card_ptr, int worker_i) {
129 if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) {
130 _calls++;
131 unsigned char* ujb = (unsigned char*)card_ptr;
132 int ind = (int)(*ujb);
133 _histo[ind]++;
134 *card_ptr = -1;
135 }
136 return true;
137 }
138 int calls() { return _calls; }
139 void print_histo() {
140 gclog_or_tty->print_cr("Card table value histogram:");
141 for (int i = 0; i < 256; i++) {
142 if (_histo[i] != 0) {
143 gclog_or_tty->print_cr(" %d: %d", i, _histo[i]);
144 }
145 }
146 }
147 };
149 class RedirtyLoggedCardTableEntryClosure: public CardTableEntryClosure {
150 int _calls;
151 G1CollectedHeap* _g1h;
152 CardTableModRefBS* _ctbs;
153 public:
154 RedirtyLoggedCardTableEntryClosure() :
155 _calls(0)
156 {
157 _g1h = G1CollectedHeap::heap();
158 _ctbs = (CardTableModRefBS*)_g1h->barrier_set();
159 }
160 bool do_card_ptr(jbyte* card_ptr, int worker_i) {
161 if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) {
162 _calls++;
163 *card_ptr = 0;
164 }
165 return true;
166 }
167 int calls() { return _calls; }
168 };
170 class RedirtyLoggedCardTableEntryFastClosure : public CardTableEntryClosure {
171 public:
172 bool do_card_ptr(jbyte* card_ptr, int worker_i) {
173 *card_ptr = CardTableModRefBS::dirty_card_val();
174 return true;
175 }
176 };
178 YoungList::YoungList(G1CollectedHeap* g1h) :
179 _g1h(g1h), _head(NULL), _length(0), _last_sampled_rs_lengths(0),
180 _survivor_head(NULL), _survivor_tail(NULL), _survivor_length(0) {
181 guarantee(check_list_empty(false), "just making sure...");
182 }
184 void YoungList::push_region(HeapRegion *hr) {
185 assert(!hr->is_young(), "should not already be young");
186 assert(hr->get_next_young_region() == NULL, "cause it should!");
188 hr->set_next_young_region(_head);
189 _head = hr;
191 _g1h->g1_policy()->set_region_eden(hr, (int) _length);
192 ++_length;
193 }
195 void YoungList::add_survivor_region(HeapRegion* hr) {
196 assert(hr->is_survivor(), "should be flagged as survivor region");
197 assert(hr->get_next_young_region() == NULL, "cause it should!");
199 hr->set_next_young_region(_survivor_head);
200 if (_survivor_head == NULL) {
201 _survivor_tail = hr;
202 }
203 _survivor_head = hr;
204 ++_survivor_length;
205 }
207 void YoungList::empty_list(HeapRegion* list) {
208 while (list != NULL) {
209 HeapRegion* next = list->get_next_young_region();
210 list->set_next_young_region(NULL);
211 list->uninstall_surv_rate_group();
212 list->set_not_young();
213 list = next;
214 }
215 }
217 void YoungList::empty_list() {
218 assert(check_list_well_formed(), "young list should be well formed");
220 empty_list(_head);
221 _head = NULL;
222 _length = 0;
224 empty_list(_survivor_head);
225 _survivor_head = NULL;
226 _survivor_tail = NULL;
227 _survivor_length = 0;
229 _last_sampled_rs_lengths = 0;
231 assert(check_list_empty(false), "just making sure...");
232 }
234 bool YoungList::check_list_well_formed() {
235 bool ret = true;
237 uint length = 0;
238 HeapRegion* curr = _head;
239 HeapRegion* last = NULL;
240 while (curr != NULL) {
241 if (!curr->is_young()) {
242 gclog_or_tty->print_cr("### YOUNG REGION "PTR_FORMAT"-"PTR_FORMAT" "
243 "incorrectly tagged (y: %d, surv: %d)",
244 curr->bottom(), curr->end(),
245 curr->is_young(), curr->is_survivor());
246 ret = false;
247 }
248 ++length;
249 last = curr;
250 curr = curr->get_next_young_region();
251 }
252 ret = ret && (length == _length);
254 if (!ret) {
255 gclog_or_tty->print_cr("### YOUNG LIST seems not well formed!");
256 gclog_or_tty->print_cr("### list has %u entries, _length is %u",
257 length, _length);
258 }
260 return ret;
261 }
263 bool YoungList::check_list_empty(bool check_sample) {
264 bool ret = true;
266 if (_length != 0) {
267 gclog_or_tty->print_cr("### YOUNG LIST should have 0 length, not %u",
268 _length);
269 ret = false;
270 }
271 if (check_sample && _last_sampled_rs_lengths != 0) {
272 gclog_or_tty->print_cr("### YOUNG LIST has non-zero last sampled RS lengths");
273 ret = false;
274 }
275 if (_head != NULL) {
276 gclog_or_tty->print_cr("### YOUNG LIST does not have a NULL head");
277 ret = false;
278 }
279 if (!ret) {
280 gclog_or_tty->print_cr("### YOUNG LIST does not seem empty");
281 }
283 return ret;
284 }
286 void
287 YoungList::rs_length_sampling_init() {
288 _sampled_rs_lengths = 0;
289 _curr = _head;
290 }
292 bool
293 YoungList::rs_length_sampling_more() {
294 return _curr != NULL;
295 }
297 void
298 YoungList::rs_length_sampling_next() {
299 assert( _curr != NULL, "invariant" );
300 size_t rs_length = _curr->rem_set()->occupied();
302 _sampled_rs_lengths += rs_length;
304 // The current region may not yet have been added to the
305 // incremental collection set (it gets added when it is
306 // retired as the current allocation region).
307 if (_curr->in_collection_set()) {
308 // Update the collection set policy information for this region
309 _g1h->g1_policy()->update_incremental_cset_info(_curr, rs_length);
310 }
312 _curr = _curr->get_next_young_region();
313 if (_curr == NULL) {
314 _last_sampled_rs_lengths = _sampled_rs_lengths;
315 // gclog_or_tty->print_cr("last sampled RS lengths = %d", _last_sampled_rs_lengths);
316 }
317 }
319 void
320 YoungList::reset_auxilary_lists() {
321 guarantee( is_empty(), "young list should be empty" );
322 assert(check_list_well_formed(), "young list should be well formed");
324 // Add survivor regions to SurvRateGroup.
325 _g1h->g1_policy()->note_start_adding_survivor_regions();
326 _g1h->g1_policy()->finished_recalculating_age_indexes(true /* is_survivors */);
328 int young_index_in_cset = 0;
329 for (HeapRegion* curr = _survivor_head;
330 curr != NULL;
331 curr = curr->get_next_young_region()) {
332 _g1h->g1_policy()->set_region_survivor(curr, young_index_in_cset);
334 // The region is a non-empty survivor so let's add it to
335 // the incremental collection set for the next evacuation
336 // pause.
337 _g1h->g1_policy()->add_region_to_incremental_cset_rhs(curr);
338 young_index_in_cset += 1;
339 }
340 assert((uint) young_index_in_cset == _survivor_length, "post-condition");
341 _g1h->g1_policy()->note_stop_adding_survivor_regions();
343 _head = _survivor_head;
344 _length = _survivor_length;
345 if (_survivor_head != NULL) {
346 assert(_survivor_tail != NULL, "cause it shouldn't be");
347 assert(_survivor_length > 0, "invariant");
348 _survivor_tail->set_next_young_region(NULL);
349 }
351 // Don't clear the survivor list handles until the start of
352 // the next evacuation pause - we need it in order to re-tag
353 // the survivor regions from this evacuation pause as 'young'
354 // at the start of the next.
356 _g1h->g1_policy()->finished_recalculating_age_indexes(false /* is_survivors */);
358 assert(check_list_well_formed(), "young list should be well formed");
359 }
361 void YoungList::print() {
362 HeapRegion* lists[] = {_head, _survivor_head};
363 const char* names[] = {"YOUNG", "SURVIVOR"};
365 for (unsigned int list = 0; list < ARRAY_SIZE(lists); ++list) {
366 gclog_or_tty->print_cr("%s LIST CONTENTS", names[list]);
367 HeapRegion *curr = lists[list];
368 if (curr == NULL)
369 gclog_or_tty->print_cr(" empty");
370 while (curr != NULL) {
371 gclog_or_tty->print_cr(" "HR_FORMAT", P: "PTR_FORMAT "N: "PTR_FORMAT", age: %4d",
372 HR_FORMAT_PARAMS(curr),
373 curr->prev_top_at_mark_start(),
374 curr->next_top_at_mark_start(),
375 curr->age_in_surv_rate_group_cond());
376 curr = curr->get_next_young_region();
377 }
378 }
380 gclog_or_tty->print_cr("");
381 }
383 void G1CollectedHeap::push_dirty_cards_region(HeapRegion* hr)
384 {
385 // Claim the right to put the region on the dirty cards region list
386 // by installing a self pointer.
387 HeapRegion* next = hr->get_next_dirty_cards_region();
388 if (next == NULL) {
389 HeapRegion* res = (HeapRegion*)
390 Atomic::cmpxchg_ptr(hr, hr->next_dirty_cards_region_addr(),
391 NULL);
392 if (res == NULL) {
393 HeapRegion* head;
394 do {
395 // Put the region to the dirty cards region list.
396 head = _dirty_cards_region_list;
397 next = (HeapRegion*)
398 Atomic::cmpxchg_ptr(hr, &_dirty_cards_region_list, head);
399 if (next == head) {
400 assert(hr->get_next_dirty_cards_region() == hr,
401 "hr->get_next_dirty_cards_region() != hr");
402 if (next == NULL) {
403 // The last region in the list points to itself.
404 hr->set_next_dirty_cards_region(hr);
405 } else {
406 hr->set_next_dirty_cards_region(next);
407 }
408 }
409 } while (next != head);
410 }
411 }
412 }
414 HeapRegion* G1CollectedHeap::pop_dirty_cards_region()
415 {
416 HeapRegion* head;
417 HeapRegion* hr;
418 do {
419 head = _dirty_cards_region_list;
420 if (head == NULL) {
421 return NULL;
422 }
423 HeapRegion* new_head = head->get_next_dirty_cards_region();
424 if (head == new_head) {
425 // The last region.
426 new_head = NULL;
427 }
428 hr = (HeapRegion*)Atomic::cmpxchg_ptr(new_head, &_dirty_cards_region_list,
429 head);
430 } while (hr != head);
431 assert(hr != NULL, "invariant");
432 hr->set_next_dirty_cards_region(NULL);
433 return hr;
434 }
436 void G1CollectedHeap::stop_conc_gc_threads() {
437 _cg1r->stop();
438 _cmThread->stop();
439 }
441 #ifdef ASSERT
442 // A region is added to the collection set as it is retired
443 // so an address p can point to a region which will be in the
444 // collection set but has not yet been retired. This method
445 // therefore is only accurate during a GC pause after all
446 // regions have been retired. It is used for debugging
447 // to check if an nmethod has references to objects that can
448 // be move during a partial collection. Though it can be
449 // inaccurate, it is sufficient for G1 because the conservative
450 // implementation of is_scavengable() for G1 will indicate that
451 // all nmethods must be scanned during a partial collection.
452 bool G1CollectedHeap::is_in_partial_collection(const void* p) {
453 HeapRegion* hr = heap_region_containing(p);
454 return hr != NULL && hr->in_collection_set();
455 }
456 #endif
458 // Returns true if the reference points to an object that
459 // can move in an incremental collecction.
460 bool G1CollectedHeap::is_scavengable(const void* p) {
461 G1CollectedHeap* g1h = G1CollectedHeap::heap();
462 G1CollectorPolicy* g1p = g1h->g1_policy();
463 HeapRegion* hr = heap_region_containing(p);
464 if (hr == NULL) {
465 // perm gen (or null)
466 return false;
467 } else {
468 return !hr->isHumongous();
469 }
470 }
472 void G1CollectedHeap::check_ct_logs_at_safepoint() {
473 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
474 CardTableModRefBS* ct_bs = (CardTableModRefBS*)barrier_set();
476 // Count the dirty cards at the start.
477 CountNonCleanMemRegionClosure count1(this);
478 ct_bs->mod_card_iterate(&count1);
479 int orig_count = count1.n();
481 // First clear the logged cards.
482 ClearLoggedCardTableEntryClosure clear;
483 dcqs.set_closure(&clear);
484 dcqs.apply_closure_to_all_completed_buffers();
485 dcqs.iterate_closure_all_threads(false);
486 clear.print_histo();
488 // Now ensure that there's no dirty cards.
489 CountNonCleanMemRegionClosure count2(this);
490 ct_bs->mod_card_iterate(&count2);
491 if (count2.n() != 0) {
492 gclog_or_tty->print_cr("Card table has %d entries; %d originally",
493 count2.n(), orig_count);
494 }
495 guarantee(count2.n() == 0, "Card table should be clean.");
497 RedirtyLoggedCardTableEntryClosure redirty;
498 JavaThread::dirty_card_queue_set().set_closure(&redirty);
499 dcqs.apply_closure_to_all_completed_buffers();
500 dcqs.iterate_closure_all_threads(false);
501 gclog_or_tty->print_cr("Log entries = %d, dirty cards = %d.",
502 clear.calls(), orig_count);
503 guarantee(redirty.calls() == clear.calls(),
504 "Or else mechanism is broken.");
506 CountNonCleanMemRegionClosure count3(this);
507 ct_bs->mod_card_iterate(&count3);
508 if (count3.n() != orig_count) {
509 gclog_or_tty->print_cr("Should have restored them all: orig = %d, final = %d.",
510 orig_count, count3.n());
511 guarantee(count3.n() >= orig_count, "Should have restored them all.");
512 }
514 JavaThread::dirty_card_queue_set().set_closure(_refine_cte_cl);
515 }
517 // Private class members.
519 G1CollectedHeap* G1CollectedHeap::_g1h;
521 // Private methods.
523 HeapRegion*
524 G1CollectedHeap::new_region_try_secondary_free_list() {
525 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
526 while (!_secondary_free_list.is_empty() || free_regions_coming()) {
527 if (!_secondary_free_list.is_empty()) {
528 if (G1ConcRegionFreeingVerbose) {
529 gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
530 "secondary_free_list has %u entries",
531 _secondary_free_list.length());
532 }
533 // It looks as if there are free regions available on the
534 // secondary_free_list. Let's move them to the free_list and try
535 // again to allocate from it.
536 append_secondary_free_list();
538 assert(!_free_list.is_empty(), "if the secondary_free_list was not "
539 "empty we should have moved at least one entry to the free_list");
540 HeapRegion* res = _free_list.remove_head();
541 if (G1ConcRegionFreeingVerbose) {
542 gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
543 "allocated "HR_FORMAT" from secondary_free_list",
544 HR_FORMAT_PARAMS(res));
545 }
546 return res;
547 }
549 // Wait here until we get notifed either when (a) there are no
550 // more free regions coming or (b) some regions have been moved on
551 // the secondary_free_list.
552 SecondaryFreeList_lock->wait(Mutex::_no_safepoint_check_flag);
553 }
555 if (G1ConcRegionFreeingVerbose) {
556 gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
557 "could not allocate from secondary_free_list");
558 }
559 return NULL;
560 }
562 HeapRegion* G1CollectedHeap::new_region(size_t word_size, bool do_expand) {
563 assert(!isHumongous(word_size) || word_size <= HeapRegion::GrainWords,
564 "the only time we use this to allocate a humongous region is "
565 "when we are allocating a single humongous region");
567 HeapRegion* res;
568 if (G1StressConcRegionFreeing) {
569 if (!_secondary_free_list.is_empty()) {
570 if (G1ConcRegionFreeingVerbose) {
571 gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
572 "forced to look at the secondary_free_list");
573 }
574 res = new_region_try_secondary_free_list();
575 if (res != NULL) {
576 return res;
577 }
578 }
579 }
580 res = _free_list.remove_head_or_null();
581 if (res == NULL) {
582 if (G1ConcRegionFreeingVerbose) {
583 gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
584 "res == NULL, trying the secondary_free_list");
585 }
586 res = new_region_try_secondary_free_list();
587 }
588 if (res == NULL && do_expand && _expand_heap_after_alloc_failure) {
589 // Currently, only attempts to allocate GC alloc regions set
590 // do_expand to true. So, we should only reach here during a
591 // safepoint. If this assumption changes we might have to
592 // reconsider the use of _expand_heap_after_alloc_failure.
593 assert(SafepointSynchronize::is_at_safepoint(), "invariant");
595 ergo_verbose1(ErgoHeapSizing,
596 "attempt heap expansion",
597 ergo_format_reason("region allocation request failed")
598 ergo_format_byte("allocation request"),
599 word_size * HeapWordSize);
600 if (expand(word_size * HeapWordSize)) {
601 // Given that expand() succeeded in expanding the heap, and we
602 // always expand the heap by an amount aligned to the heap
603 // region size, the free list should in theory not be empty. So
604 // it would probably be OK to use remove_head(). But the extra
605 // check for NULL is unlikely to be a performance issue here (we
606 // just expanded the heap!) so let's just be conservative and
607 // use remove_head_or_null().
608 res = _free_list.remove_head_or_null();
609 } else {
610 _expand_heap_after_alloc_failure = false;
611 }
612 }
613 return res;
614 }
616 uint G1CollectedHeap::humongous_obj_allocate_find_first(uint num_regions,
617 size_t word_size) {
618 assert(isHumongous(word_size), "word_size should be humongous");
619 assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition");
621 uint first = G1_NULL_HRS_INDEX;
622 if (num_regions == 1) {
623 // Only one region to allocate, no need to go through the slower
624 // path. The caller will attempt the expasion if this fails, so
625 // let's not try to expand here too.
626 HeapRegion* hr = new_region(word_size, false /* do_expand */);
627 if (hr != NULL) {
628 first = hr->hrs_index();
629 } else {
630 first = G1_NULL_HRS_INDEX;
631 }
632 } else {
633 // We can't allocate humongous regions while cleanupComplete() is
634 // running, since some of the regions we find to be empty might not
635 // yet be added to the free list and it is not straightforward to
636 // know which list they are on so that we can remove them. Note
637 // that we only need to do this if we need to allocate more than
638 // one region to satisfy the current humongous allocation
639 // request. If we are only allocating one region we use the common
640 // region allocation code (see above).
641 wait_while_free_regions_coming();
642 append_secondary_free_list_if_not_empty_with_lock();
644 if (free_regions() >= num_regions) {
645 first = _hrs.find_contiguous(num_regions);
646 if (first != G1_NULL_HRS_INDEX) {
647 for (uint i = first; i < first + num_regions; ++i) {
648 HeapRegion* hr = region_at(i);
649 assert(hr->is_empty(), "sanity");
650 assert(is_on_master_free_list(hr), "sanity");
651 hr->set_pending_removal(true);
652 }
653 _free_list.remove_all_pending(num_regions);
654 }
655 }
656 }
657 return first;
658 }
660 HeapWord*
661 G1CollectedHeap::humongous_obj_allocate_initialize_regions(uint first,
662 uint num_regions,
663 size_t word_size) {
664 assert(first != G1_NULL_HRS_INDEX, "pre-condition");
665 assert(isHumongous(word_size), "word_size should be humongous");
666 assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition");
668 // Index of last region in the series + 1.
669 uint last = first + num_regions;
671 // We need to initialize the region(s) we just discovered. This is
672 // a bit tricky given that it can happen concurrently with
673 // refinement threads refining cards on these regions and
674 // potentially wanting to refine the BOT as they are scanning
675 // those cards (this can happen shortly after a cleanup; see CR
676 // 6991377). So we have to set up the region(s) carefully and in
677 // a specific order.
679 // The word size sum of all the regions we will allocate.
680 size_t word_size_sum = (size_t) num_regions * HeapRegion::GrainWords;
681 assert(word_size <= word_size_sum, "sanity");
683 // This will be the "starts humongous" region.
684 HeapRegion* first_hr = region_at(first);
685 // The header of the new object will be placed at the bottom of
686 // the first region.
687 HeapWord* new_obj = first_hr->bottom();
688 // This will be the new end of the first region in the series that
689 // should also match the end of the last region in the seriers.
690 HeapWord* new_end = new_obj + word_size_sum;
691 // This will be the new top of the first region that will reflect
692 // this allocation.
693 HeapWord* new_top = new_obj + word_size;
695 // First, we need to zero the header of the space that we will be
696 // allocating. When we update top further down, some refinement
697 // threads might try to scan the region. By zeroing the header we
698 // ensure that any thread that will try to scan the region will
699 // come across the zero klass word and bail out.
700 //
701 // NOTE: It would not have been correct to have used
702 // CollectedHeap::fill_with_object() and make the space look like
703 // an int array. The thread that is doing the allocation will
704 // later update the object header to a potentially different array
705 // type and, for a very short period of time, the klass and length
706 // fields will be inconsistent. This could cause a refinement
707 // thread to calculate the object size incorrectly.
708 Copy::fill_to_words(new_obj, oopDesc::header_size(), 0);
710 // We will set up the first region as "starts humongous". This
711 // will also update the BOT covering all the regions to reflect
712 // that there is a single object that starts at the bottom of the
713 // first region.
714 first_hr->set_startsHumongous(new_top, new_end);
716 // Then, if there are any, we will set up the "continues
717 // humongous" regions.
718 HeapRegion* hr = NULL;
719 for (uint i = first + 1; i < last; ++i) {
720 hr = region_at(i);
721 hr->set_continuesHumongous(first_hr);
722 }
723 // If we have "continues humongous" regions (hr != NULL), then the
724 // end of the last one should match new_end.
725 assert(hr == NULL || hr->end() == new_end, "sanity");
727 // Up to this point no concurrent thread would have been able to
728 // do any scanning on any region in this series. All the top
729 // fields still point to bottom, so the intersection between
730 // [bottom,top] and [card_start,card_end] will be empty. Before we
731 // update the top fields, we'll do a storestore to make sure that
732 // no thread sees the update to top before the zeroing of the
733 // object header and the BOT initialization.
734 OrderAccess::storestore();
736 // Now that the BOT and the object header have been initialized,
737 // we can update top of the "starts humongous" region.
738 assert(first_hr->bottom() < new_top && new_top <= first_hr->end(),
739 "new_top should be in this region");
740 first_hr->set_top(new_top);
741 if (_hr_printer.is_active()) {
742 HeapWord* bottom = first_hr->bottom();
743 HeapWord* end = first_hr->orig_end();
744 if ((first + 1) == last) {
745 // the series has a single humongous region
746 _hr_printer.alloc(G1HRPrinter::SingleHumongous, first_hr, new_top);
747 } else {
748 // the series has more than one humongous regions
749 _hr_printer.alloc(G1HRPrinter::StartsHumongous, first_hr, end);
750 }
751 }
753 // Now, we will update the top fields of the "continues humongous"
754 // regions. The reason we need to do this is that, otherwise,
755 // these regions would look empty and this will confuse parts of
756 // G1. For example, the code that looks for a consecutive number
757 // of empty regions will consider them empty and try to
758 // re-allocate them. We can extend is_empty() to also include
759 // !continuesHumongous(), but it is easier to just update the top
760 // fields here. The way we set top for all regions (i.e., top ==
761 // end for all regions but the last one, top == new_top for the
762 // last one) is actually used when we will free up the humongous
763 // region in free_humongous_region().
764 hr = NULL;
765 for (uint i = first + 1; i < last; ++i) {
766 hr = region_at(i);
767 if ((i + 1) == last) {
768 // last continues humongous region
769 assert(hr->bottom() < new_top && new_top <= hr->end(),
770 "new_top should fall on this region");
771 hr->set_top(new_top);
772 _hr_printer.alloc(G1HRPrinter::ContinuesHumongous, hr, new_top);
773 } else {
774 // not last one
775 assert(new_top > hr->end(), "new_top should be above this region");
776 hr->set_top(hr->end());
777 _hr_printer.alloc(G1HRPrinter::ContinuesHumongous, hr, hr->end());
778 }
779 }
780 // If we have continues humongous regions (hr != NULL), then the
781 // end of the last one should match new_end and its top should
782 // match new_top.
783 assert(hr == NULL ||
784 (hr->end() == new_end && hr->top() == new_top), "sanity");
786 assert(first_hr->used() == word_size * HeapWordSize, "invariant");
787 _summary_bytes_used += first_hr->used();
788 _humongous_set.add(first_hr);
790 return new_obj;
791 }
793 // If could fit into free regions w/o expansion, try.
794 // Otherwise, if can expand, do so.
795 // Otherwise, if using ex regions might help, try with ex given back.
796 HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size) {
797 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
799 verify_region_sets_optional();
801 size_t word_size_rounded = round_to(word_size, HeapRegion::GrainWords);
802 uint num_regions = (uint) (word_size_rounded / HeapRegion::GrainWords);
803 uint x_num = expansion_regions();
804 uint fs = _hrs.free_suffix();
805 uint first = humongous_obj_allocate_find_first(num_regions, word_size);
806 if (first == G1_NULL_HRS_INDEX) {
807 // The only thing we can do now is attempt expansion.
808 if (fs + x_num >= num_regions) {
809 // If the number of regions we're trying to allocate for this
810 // object is at most the number of regions in the free suffix,
811 // then the call to humongous_obj_allocate_find_first() above
812 // should have succeeded and we wouldn't be here.
813 //
814 // We should only be trying to expand when the free suffix is
815 // not sufficient for the object _and_ we have some expansion
816 // room available.
817 assert(num_regions > fs, "earlier allocation should have succeeded");
819 ergo_verbose1(ErgoHeapSizing,
820 "attempt heap expansion",
821 ergo_format_reason("humongous allocation request failed")
822 ergo_format_byte("allocation request"),
823 word_size * HeapWordSize);
824 if (expand((num_regions - fs) * HeapRegion::GrainBytes)) {
825 // Even though the heap was expanded, it might not have
826 // reached the desired size. So, we cannot assume that the
827 // allocation will succeed.
828 first = humongous_obj_allocate_find_first(num_regions, word_size);
829 }
830 }
831 }
833 HeapWord* result = NULL;
834 if (first != G1_NULL_HRS_INDEX) {
835 result =
836 humongous_obj_allocate_initialize_regions(first, num_regions, word_size);
837 assert(result != NULL, "it should always return a valid result");
839 // A successful humongous object allocation changes the used space
840 // information of the old generation so we need to recalculate the
841 // sizes and update the jstat counters here.
842 g1mm()->update_sizes();
843 }
845 verify_region_sets_optional();
847 return result;
848 }
850 HeapWord* G1CollectedHeap::allocate_new_tlab(size_t word_size) {
851 assert_heap_not_locked_and_not_at_safepoint();
852 assert(!isHumongous(word_size), "we do not allow humongous TLABs");
854 unsigned int dummy_gc_count_before;
855 return attempt_allocation(word_size, &dummy_gc_count_before);
856 }
858 HeapWord*
859 G1CollectedHeap::mem_allocate(size_t word_size,
860 bool* gc_overhead_limit_was_exceeded) {
861 assert_heap_not_locked_and_not_at_safepoint();
863 // Loop until the allocation is satisified, or unsatisfied after GC.
864 for (int try_count = 1; /* we'll return */; try_count += 1) {
865 unsigned int gc_count_before;
867 HeapWord* result = NULL;
868 if (!isHumongous(word_size)) {
869 result = attempt_allocation(word_size, &gc_count_before);
870 } else {
871 result = attempt_allocation_humongous(word_size, &gc_count_before);
872 }
873 if (result != NULL) {
874 return result;
875 }
877 // Create the garbage collection operation...
878 VM_G1CollectForAllocation op(gc_count_before, word_size);
879 // ...and get the VM thread to execute it.
880 VMThread::execute(&op);
882 if (op.prologue_succeeded() && op.pause_succeeded()) {
883 // If the operation was successful we'll return the result even
884 // if it is NULL. If the allocation attempt failed immediately
885 // after a Full GC, it's unlikely we'll be able to allocate now.
886 HeapWord* result = op.result();
887 if (result != NULL && !isHumongous(word_size)) {
888 // Allocations that take place on VM operations do not do any
889 // card dirtying and we have to do it here. We only have to do
890 // this for non-humongous allocations, though.
891 dirty_young_block(result, word_size);
892 }
893 return result;
894 } else {
895 assert(op.result() == NULL,
896 "the result should be NULL if the VM op did not succeed");
897 }
899 // Give a warning if we seem to be looping forever.
900 if ((QueuedAllocationWarningCount > 0) &&
901 (try_count % QueuedAllocationWarningCount == 0)) {
902 warning("G1CollectedHeap::mem_allocate retries %d times", try_count);
903 }
904 }
906 ShouldNotReachHere();
907 return NULL;
908 }
910 HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size,
911 unsigned int *gc_count_before_ret) {
912 // Make sure you read the note in attempt_allocation_humongous().
914 assert_heap_not_locked_and_not_at_safepoint();
915 assert(!isHumongous(word_size), "attempt_allocation_slow() should not "
916 "be called for humongous allocation requests");
918 // We should only get here after the first-level allocation attempt
919 // (attempt_allocation()) failed to allocate.
921 // We will loop until a) we manage to successfully perform the
922 // allocation or b) we successfully schedule a collection which
923 // fails to perform the allocation. b) is the only case when we'll
924 // return NULL.
925 HeapWord* result = NULL;
926 for (int try_count = 1; /* we'll return */; try_count += 1) {
927 bool should_try_gc;
928 unsigned int gc_count_before;
930 {
931 MutexLockerEx x(Heap_lock);
933 result = _mutator_alloc_region.attempt_allocation_locked(word_size,
934 false /* bot_updates */);
935 if (result != NULL) {
936 return result;
937 }
939 // If we reach here, attempt_allocation_locked() above failed to
940 // allocate a new region. So the mutator alloc region should be NULL.
941 assert(_mutator_alloc_region.get() == NULL, "only way to get here");
943 if (GC_locker::is_active_and_needs_gc()) {
944 if (g1_policy()->can_expand_young_list()) {
945 // No need for an ergo verbose message here,
946 // can_expand_young_list() does this when it returns true.
947 result = _mutator_alloc_region.attempt_allocation_force(word_size,
948 false /* bot_updates */);
949 if (result != NULL) {
950 return result;
951 }
952 }
953 should_try_gc = false;
954 } else {
955 // Read the GC count while still holding the Heap_lock.
956 gc_count_before = total_collections();
957 should_try_gc = true;
958 }
959 }
961 if (should_try_gc) {
962 bool succeeded;
963 result = do_collection_pause(word_size, gc_count_before, &succeeded);
964 if (result != NULL) {
965 assert(succeeded, "only way to get back a non-NULL result");
966 return result;
967 }
969 if (succeeded) {
970 // If we get here we successfully scheduled a collection which
971 // failed to allocate. No point in trying to allocate
972 // further. We'll just return NULL.
973 MutexLockerEx x(Heap_lock);
974 *gc_count_before_ret = total_collections();
975 return NULL;
976 }
977 } else {
978 GC_locker::stall_until_clear();
979 }
981 // We can reach here if we were unsuccessul in scheduling a
982 // collection (because another thread beat us to it) or if we were
983 // stalled due to the GC locker. In either can we should retry the
984 // allocation attempt in case another thread successfully
985 // performed a collection and reclaimed enough space. We do the
986 // first attempt (without holding the Heap_lock) here and the
987 // follow-on attempt will be at the start of the next loop
988 // iteration (after taking the Heap_lock).
989 result = _mutator_alloc_region.attempt_allocation(word_size,
990 false /* bot_updates */);
991 if (result != NULL) {
992 return result;
993 }
995 // Give a warning if we seem to be looping forever.
996 if ((QueuedAllocationWarningCount > 0) &&
997 (try_count % QueuedAllocationWarningCount == 0)) {
998 warning("G1CollectedHeap::attempt_allocation_slow() "
999 "retries %d times", try_count);
1000 }
1001 }
1003 ShouldNotReachHere();
1004 return NULL;
1005 }
1007 HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
1008 unsigned int * gc_count_before_ret) {
1009 // The structure of this method has a lot of similarities to
1010 // attempt_allocation_slow(). The reason these two were not merged
1011 // into a single one is that such a method would require several "if
1012 // allocation is not humongous do this, otherwise do that"
1013 // conditional paths which would obscure its flow. In fact, an early
1014 // version of this code did use a unified method which was harder to
1015 // follow and, as a result, it had subtle bugs that were hard to
1016 // track down. So keeping these two methods separate allows each to
1017 // be more readable. It will be good to keep these two in sync as
1018 // much as possible.
1020 assert_heap_not_locked_and_not_at_safepoint();
1021 assert(isHumongous(word_size), "attempt_allocation_humongous() "
1022 "should only be called for humongous allocations");
1024 // Humongous objects can exhaust the heap quickly, so we should check if we
1025 // need to start a marking cycle at each humongous object allocation. We do
1026 // the check before we do the actual allocation. The reason for doing it
1027 // before the allocation is that we avoid having to keep track of the newly
1028 // allocated memory while we do a GC.
1029 if (g1_policy()->need_to_start_conc_mark("concurrent humongous allocation",
1030 word_size)) {
1031 collect(GCCause::_g1_humongous_allocation);
1032 }
1034 // We will loop until a) we manage to successfully perform the
1035 // allocation or b) we successfully schedule a collection which
1036 // fails to perform the allocation. b) is the only case when we'll
1037 // return NULL.
1038 HeapWord* result = NULL;
1039 for (int try_count = 1; /* we'll return */; try_count += 1) {
1040 bool should_try_gc;
1041 unsigned int gc_count_before;
1043 {
1044 MutexLockerEx x(Heap_lock);
1046 // Given that humongous objects are not allocated in young
1047 // regions, we'll first try to do the allocation without doing a
1048 // collection hoping that there's enough space in the heap.
1049 result = humongous_obj_allocate(word_size);
1050 if (result != NULL) {
1051 return result;
1052 }
1054 if (GC_locker::is_active_and_needs_gc()) {
1055 should_try_gc = false;
1056 } else {
1057 // Read the GC count while still holding the Heap_lock.
1058 gc_count_before = total_collections();
1059 should_try_gc = true;
1060 }
1061 }
1063 if (should_try_gc) {
1064 // If we failed to allocate the humongous object, we should try to
1065 // do a collection pause (if we're allowed) in case it reclaims
1066 // enough space for the allocation to succeed after the pause.
1068 bool succeeded;
1069 result = do_collection_pause(word_size, gc_count_before, &succeeded);
1070 if (result != NULL) {
1071 assert(succeeded, "only way to get back a non-NULL result");
1072 return result;
1073 }
1075 if (succeeded) {
1076 // If we get here we successfully scheduled a collection which
1077 // failed to allocate. No point in trying to allocate
1078 // further. We'll just return NULL.
1079 MutexLockerEx x(Heap_lock);
1080 *gc_count_before_ret = total_collections();
1081 return NULL;
1082 }
1083 } else {
1084 GC_locker::stall_until_clear();
1085 }
1087 // We can reach here if we were unsuccessul in scheduling a
1088 // collection (because another thread beat us to it) or if we were
1089 // stalled due to the GC locker. In either can we should retry the
1090 // allocation attempt in case another thread successfully
1091 // performed a collection and reclaimed enough space. Give a
1092 // warning if we seem to be looping forever.
1094 if ((QueuedAllocationWarningCount > 0) &&
1095 (try_count % QueuedAllocationWarningCount == 0)) {
1096 warning("G1CollectedHeap::attempt_allocation_humongous() "
1097 "retries %d times", try_count);
1098 }
1099 }
1101 ShouldNotReachHere();
1102 return NULL;
1103 }
1105 HeapWord* G1CollectedHeap::attempt_allocation_at_safepoint(size_t word_size,
1106 bool expect_null_mutator_alloc_region) {
1107 assert_at_safepoint(true /* should_be_vm_thread */);
1108 assert(_mutator_alloc_region.get() == NULL ||
1109 !expect_null_mutator_alloc_region,
1110 "the current alloc region was unexpectedly found to be non-NULL");
1112 if (!isHumongous(word_size)) {
1113 return _mutator_alloc_region.attempt_allocation_locked(word_size,
1114 false /* bot_updates */);
1115 } else {
1116 HeapWord* result = humongous_obj_allocate(word_size);
1117 if (result != NULL && g1_policy()->need_to_start_conc_mark("STW humongous allocation")) {
1118 g1_policy()->set_initiate_conc_mark_if_possible();
1119 }
1120 return result;
1121 }
1123 ShouldNotReachHere();
1124 }
1126 class PostMCRemSetClearClosure: public HeapRegionClosure {
1127 ModRefBarrierSet* _mr_bs;
1128 public:
1129 PostMCRemSetClearClosure(ModRefBarrierSet* mr_bs) : _mr_bs(mr_bs) {}
1130 bool doHeapRegion(HeapRegion* r) {
1131 r->reset_gc_time_stamp();
1132 if (r->continuesHumongous())
1133 return false;
1134 HeapRegionRemSet* hrrs = r->rem_set();
1135 if (hrrs != NULL) hrrs->clear();
1136 // You might think here that we could clear just the cards
1137 // corresponding to the used region. But no: if we leave a dirty card
1138 // in a region we might allocate into, then it would prevent that card
1139 // from being enqueued, and cause it to be missed.
1140 // Re: the performance cost: we shouldn't be doing full GC anyway!
1141 _mr_bs->clear(MemRegion(r->bottom(), r->end()));
1142 return false;
1143 }
1144 };
1147 class PostMCRemSetInvalidateClosure: public HeapRegionClosure {
1148 ModRefBarrierSet* _mr_bs;
1149 public:
1150 PostMCRemSetInvalidateClosure(ModRefBarrierSet* mr_bs) : _mr_bs(mr_bs) {}
1151 bool doHeapRegion(HeapRegion* r) {
1152 if (r->continuesHumongous()) return false;
1153 if (r->used_region().word_size() != 0) {
1154 _mr_bs->invalidate(r->used_region(), true /*whole heap*/);
1155 }
1156 return false;
1157 }
1158 };
1160 class RebuildRSOutOfRegionClosure: public HeapRegionClosure {
1161 G1CollectedHeap* _g1h;
1162 UpdateRSOopClosure _cl;
1163 int _worker_i;
1164 public:
1165 RebuildRSOutOfRegionClosure(G1CollectedHeap* g1, int worker_i = 0) :
1166 _cl(g1->g1_rem_set(), worker_i),
1167 _worker_i(worker_i),
1168 _g1h(g1)
1169 { }
1171 bool doHeapRegion(HeapRegion* r) {
1172 if (!r->continuesHumongous()) {
1173 _cl.set_from(r);
1174 r->oop_iterate(&_cl);
1175 }
1176 return false;
1177 }
1178 };
1180 class ParRebuildRSTask: public AbstractGangTask {
1181 G1CollectedHeap* _g1;
1182 public:
1183 ParRebuildRSTask(G1CollectedHeap* g1)
1184 : AbstractGangTask("ParRebuildRSTask"),
1185 _g1(g1)
1186 { }
1188 void work(uint worker_id) {
1189 RebuildRSOutOfRegionClosure rebuild_rs(_g1, worker_id);
1190 _g1->heap_region_par_iterate_chunked(&rebuild_rs, worker_id,
1191 _g1->workers()->active_workers(),
1192 HeapRegion::RebuildRSClaimValue);
1193 }
1194 };
1196 class PostCompactionPrinterClosure: public HeapRegionClosure {
1197 private:
1198 G1HRPrinter* _hr_printer;
1199 public:
1200 bool doHeapRegion(HeapRegion* hr) {
1201 assert(!hr->is_young(), "not expecting to find young regions");
1202 // We only generate output for non-empty regions.
1203 if (!hr->is_empty()) {
1204 if (!hr->isHumongous()) {
1205 _hr_printer->post_compaction(hr, G1HRPrinter::Old);
1206 } else if (hr->startsHumongous()) {
1207 if (hr->capacity() == HeapRegion::GrainBytes) {
1208 // single humongous region
1209 _hr_printer->post_compaction(hr, G1HRPrinter::SingleHumongous);
1210 } else {
1211 _hr_printer->post_compaction(hr, G1HRPrinter::StartsHumongous);
1212 }
1213 } else {
1214 assert(hr->continuesHumongous(), "only way to get here");
1215 _hr_printer->post_compaction(hr, G1HRPrinter::ContinuesHumongous);
1216 }
1217 }
1218 return false;
1219 }
1221 PostCompactionPrinterClosure(G1HRPrinter* hr_printer)
1222 : _hr_printer(hr_printer) { }
1223 };
1225 bool G1CollectedHeap::do_collection(bool explicit_gc,
1226 bool clear_all_soft_refs,
1227 size_t word_size) {
1228 assert_at_safepoint(true /* should_be_vm_thread */);
1230 if (GC_locker::check_active_before_gc()) {
1231 return false;
1232 }
1234 SvcGCMarker sgcm(SvcGCMarker::FULL);
1235 ResourceMark rm;
1237 print_heap_before_gc();
1239 HRSPhaseSetter x(HRSPhaseFullGC);
1240 verify_region_sets_optional();
1242 const bool do_clear_all_soft_refs = clear_all_soft_refs ||
1243 collector_policy()->should_clear_all_soft_refs();
1245 ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy());
1247 {
1248 IsGCActiveMark x;
1250 // Timing
1251 bool system_gc = (gc_cause() == GCCause::_java_lang_system_gc);
1252 assert(!system_gc || explicit_gc, "invariant");
1253 gclog_or_tty->date_stamp(G1Log::fine() && PrintGCDateStamps);
1254 TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
1255 TraceTime t(system_gc ? "Full GC (System.gc())" : "Full GC",
1256 G1Log::fine(), true, gclog_or_tty);
1258 TraceCollectorStats tcs(g1mm()->full_collection_counters());
1259 TraceMemoryManagerStats tms(true /* fullGC */, gc_cause());
1261 double start = os::elapsedTime();
1262 g1_policy()->record_full_collection_start();
1264 // Note: When we have a more flexible GC logging framework that
1265 // allows us to add optional attributes to a GC log record we
1266 // could consider timing and reporting how long we wait in the
1267 // following two methods.
1268 wait_while_free_regions_coming();
1269 // If we start the compaction before the CM threads finish
1270 // scanning the root regions we might trip them over as we'll
1271 // be moving objects / updating references. So let's wait until
1272 // they are done. By telling them to abort, they should complete
1273 // early.
1274 _cm->root_regions()->abort();
1275 _cm->root_regions()->wait_until_scan_finished();
1276 append_secondary_free_list_if_not_empty_with_lock();
1278 gc_prologue(true);
1279 increment_total_collections(true /* full gc */);
1281 size_t g1h_prev_used = used();
1282 assert(used() == recalculate_used(), "Should be equal");
1284 if (VerifyBeforeGC && total_collections() >= VerifyGCStartAt) {
1285 HandleMark hm; // Discard invalid handles created during verification
1286 gclog_or_tty->print(" VerifyBeforeGC:");
1287 prepare_for_verify();
1288 Universe::verify(/* silent */ false,
1289 /* option */ VerifyOption_G1UsePrevMarking);
1291 }
1292 pre_full_gc_dump();
1294 COMPILER2_PRESENT(DerivedPointerTable::clear());
1296 // Disable discovery and empty the discovered lists
1297 // for the CM ref processor.
1298 ref_processor_cm()->disable_discovery();
1299 ref_processor_cm()->abandon_partial_discovery();
1300 ref_processor_cm()->verify_no_references_recorded();
1302 // Abandon current iterations of concurrent marking and concurrent
1303 // refinement, if any are in progress. We have to do this before
1304 // wait_until_scan_finished() below.
1305 concurrent_mark()->abort();
1307 // Make sure we'll choose a new allocation region afterwards.
1308 release_mutator_alloc_region();
1309 abandon_gc_alloc_regions();
1310 g1_rem_set()->cleanupHRRS();
1312 // We should call this after we retire any currently active alloc
1313 // regions so that all the ALLOC / RETIRE events are generated
1314 // before the start GC event.
1315 _hr_printer.start_gc(true /* full */, (size_t) total_collections());
1317 // We may have added regions to the current incremental collection
1318 // set between the last GC or pause and now. We need to clear the
1319 // incremental collection set and then start rebuilding it afresh
1320 // after this full GC.
1321 abandon_collection_set(g1_policy()->inc_cset_head());
1322 g1_policy()->clear_incremental_cset();
1323 g1_policy()->stop_incremental_cset_building();
1325 tear_down_region_sets(false /* free_list_only */);
1326 g1_policy()->set_gcs_are_young(true);
1328 // See the comments in g1CollectedHeap.hpp and
1329 // G1CollectedHeap::ref_processing_init() about
1330 // how reference processing currently works in G1.
1332 // Temporarily make discovery by the STW ref processor single threaded (non-MT).
1333 ReferenceProcessorMTDiscoveryMutator stw_rp_disc_ser(ref_processor_stw(), false);
1335 // Temporarily clear the STW ref processor's _is_alive_non_header field.
1336 ReferenceProcessorIsAliveMutator stw_rp_is_alive_null(ref_processor_stw(), NULL);
1338 ref_processor_stw()->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
1339 ref_processor_stw()->setup_policy(do_clear_all_soft_refs);
1341 // Do collection work
1342 {
1343 HandleMark hm; // Discard invalid handles created during gc
1344 G1MarkSweep::invoke_at_safepoint(ref_processor_stw(), do_clear_all_soft_refs);
1345 }
1347 assert(free_regions() == 0, "we should not have added any free regions");
1348 rebuild_region_sets(false /* free_list_only */);
1350 // Enqueue any discovered reference objects that have
1351 // not been removed from the discovered lists.
1352 ref_processor_stw()->enqueue_discovered_references();
1354 COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
1356 MemoryService::track_memory_usage();
1358 if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) {
1359 HandleMark hm; // Discard invalid handles created during verification
1360 gclog_or_tty->print(" VerifyAfterGC:");
1361 prepare_for_verify();
1362 Universe::verify(/* silent */ false,
1363 /* option */ VerifyOption_G1UsePrevMarking);
1365 }
1367 assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");
1368 ref_processor_stw()->verify_no_references_recorded();
1370 // Note: since we've just done a full GC, concurrent
1371 // marking is no longer active. Therefore we need not
1372 // re-enable reference discovery for the CM ref processor.
1373 // That will be done at the start of the next marking cycle.
1374 assert(!ref_processor_cm()->discovery_enabled(), "Postcondition");
1375 ref_processor_cm()->verify_no_references_recorded();
1377 reset_gc_time_stamp();
1378 // Since everything potentially moved, we will clear all remembered
1379 // sets, and clear all cards. Later we will rebuild remebered
1380 // sets. We will also reset the GC time stamps of the regions.
1381 PostMCRemSetClearClosure rs_clear(mr_bs());
1382 heap_region_iterate(&rs_clear);
1384 // Resize the heap if necessary.
1385 resize_if_necessary_after_full_collection(explicit_gc ? 0 : word_size);
1387 if (_hr_printer.is_active()) {
1388 // We should do this after we potentially resize the heap so
1389 // that all the COMMIT / UNCOMMIT events are generated before
1390 // the end GC event.
1392 PostCompactionPrinterClosure cl(hr_printer());
1393 heap_region_iterate(&cl);
1395 _hr_printer.end_gc(true /* full */, (size_t) total_collections());
1396 }
1398 if (_cg1r->use_cache()) {
1399 _cg1r->clear_and_record_card_counts();
1400 _cg1r->clear_hot_cache();
1401 }
1403 // Rebuild remembered sets of all regions.
1404 if (G1CollectedHeap::use_parallel_gc_threads()) {
1405 uint n_workers =
1406 AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
1407 workers()->active_workers(),
1408 Threads::number_of_non_daemon_threads());
1409 assert(UseDynamicNumberOfGCThreads ||
1410 n_workers == workers()->total_workers(),
1411 "If not dynamic should be using all the workers");
1412 workers()->set_active_workers(n_workers);
1413 // Set parallel threads in the heap (_n_par_threads) only
1414 // before a parallel phase and always reset it to 0 after
1415 // the phase so that the number of parallel threads does
1416 // no get carried forward to a serial phase where there
1417 // may be code that is "possibly_parallel".
1418 set_par_threads(n_workers);
1420 ParRebuildRSTask rebuild_rs_task(this);
1421 assert(check_heap_region_claim_values(
1422 HeapRegion::InitialClaimValue), "sanity check");
1423 assert(UseDynamicNumberOfGCThreads ||
1424 workers()->active_workers() == workers()->total_workers(),
1425 "Unless dynamic should use total workers");
1426 // Use the most recent number of active workers
1427 assert(workers()->active_workers() > 0,
1428 "Active workers not properly set");
1429 set_par_threads(workers()->active_workers());
1430 workers()->run_task(&rebuild_rs_task);
1431 set_par_threads(0);
1432 assert(check_heap_region_claim_values(
1433 HeapRegion::RebuildRSClaimValue), "sanity check");
1434 reset_heap_region_claim_values();
1435 } else {
1436 RebuildRSOutOfRegionClosure rebuild_rs(this);
1437 heap_region_iterate(&rebuild_rs);
1438 }
1440 if (G1Log::fine()) {
1441 print_size_transition(gclog_or_tty, g1h_prev_used, used(), capacity());
1442 }
1444 if (true) { // FIXME
1445 // Ask the permanent generation to adjust size for full collections
1446 perm()->compute_new_size();
1447 }
1449 // Start a new incremental collection set for the next pause
1450 assert(g1_policy()->collection_set() == NULL, "must be");
1451 g1_policy()->start_incremental_cset_building();
1453 // Clear the _cset_fast_test bitmap in anticipation of adding
1454 // regions to the incremental collection set for the next
1455 // evacuation pause.
1456 clear_cset_fast_test();
1458 init_mutator_alloc_region();
1460 double end = os::elapsedTime();
1461 g1_policy()->record_full_collection_end();
1463 #ifdef TRACESPINNING
1464 ParallelTaskTerminator::print_termination_counts();
1465 #endif
1467 gc_epilogue(true);
1469 // Discard all rset updates
1470 JavaThread::dirty_card_queue_set().abandon_logs();
1471 assert(!G1DeferredRSUpdate
1472 || (G1DeferredRSUpdate && (dirty_card_queue_set().completed_buffers_num() == 0)), "Should not be any");
1473 }
1475 _young_list->reset_sampled_info();
1476 // At this point there should be no regions in the
1477 // entire heap tagged as young.
1478 assert( check_young_list_empty(true /* check_heap */),
1479 "young list should be empty at this point");
1481 // Update the number of full collections that have been completed.
1482 increment_full_collections_completed(false /* concurrent */);
1484 _hrs.verify_optional();
1485 verify_region_sets_optional();
1487 print_heap_after_gc();
1488 g1mm()->update_sizes();
1489 post_full_gc_dump();
1491 return true;
1492 }
1494 void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) {
1495 // do_collection() will return whether it succeeded in performing
1496 // the GC. Currently, there is no facility on the
1497 // do_full_collection() API to notify the caller than the collection
1498 // did not succeed (e.g., because it was locked out by the GC
1499 // locker). So, right now, we'll ignore the return value.
1500 bool dummy = do_collection(true, /* explicit_gc */
1501 clear_all_soft_refs,
1502 0 /* word_size */);
1503 }
1505 // This code is mostly copied from TenuredGeneration.
1506 void
1507 G1CollectedHeap::
1508 resize_if_necessary_after_full_collection(size_t word_size) {
1509 assert(MinHeapFreeRatio <= MaxHeapFreeRatio, "sanity check");
1511 // Include the current allocation, if any, and bytes that will be
1512 // pre-allocated to support collections, as "used".
1513 const size_t used_after_gc = used();
1514 const size_t capacity_after_gc = capacity();
1515 const size_t free_after_gc = capacity_after_gc - used_after_gc;
1517 // This is enforced in arguments.cpp.
1518 assert(MinHeapFreeRatio <= MaxHeapFreeRatio,
1519 "otherwise the code below doesn't make sense");
1521 // We don't have floating point command-line arguments
1522 const double minimum_free_percentage = (double) MinHeapFreeRatio / 100.0;
1523 const double maximum_used_percentage = 1.0 - minimum_free_percentage;
1524 const double maximum_free_percentage = (double) MaxHeapFreeRatio / 100.0;
1525 const double minimum_used_percentage = 1.0 - maximum_free_percentage;
1527 const size_t min_heap_size = collector_policy()->min_heap_byte_size();
1528 const size_t max_heap_size = collector_policy()->max_heap_byte_size();
1530 // We have to be careful here as these two calculations can overflow
1531 // 32-bit size_t's.
1532 double used_after_gc_d = (double) used_after_gc;
1533 double minimum_desired_capacity_d = used_after_gc_d / maximum_used_percentage;
1534 double maximum_desired_capacity_d = used_after_gc_d / minimum_used_percentage;
1536 // Let's make sure that they are both under the max heap size, which
1537 // by default will make them fit into a size_t.
1538 double desired_capacity_upper_bound = (double) max_heap_size;
1539 minimum_desired_capacity_d = MIN2(minimum_desired_capacity_d,
1540 desired_capacity_upper_bound);
1541 maximum_desired_capacity_d = MIN2(maximum_desired_capacity_d,
1542 desired_capacity_upper_bound);
1544 // We can now safely turn them into size_t's.
1545 size_t minimum_desired_capacity = (size_t) minimum_desired_capacity_d;
1546 size_t maximum_desired_capacity = (size_t) maximum_desired_capacity_d;
1548 // This assert only makes sense here, before we adjust them
1549 // with respect to the min and max heap size.
1550 assert(minimum_desired_capacity <= maximum_desired_capacity,
1551 err_msg("minimum_desired_capacity = "SIZE_FORMAT", "
1552 "maximum_desired_capacity = "SIZE_FORMAT,
1553 minimum_desired_capacity, maximum_desired_capacity));
1555 // Should not be greater than the heap max size. No need to adjust
1556 // it with respect to the heap min size as it's a lower bound (i.e.,
1557 // we'll try to make the capacity larger than it, not smaller).
1558 minimum_desired_capacity = MIN2(minimum_desired_capacity, max_heap_size);
1559 // Should not be less than the heap min size. No need to adjust it
1560 // with respect to the heap max size as it's an upper bound (i.e.,
1561 // we'll try to make the capacity smaller than it, not greater).
1562 maximum_desired_capacity = MAX2(maximum_desired_capacity, min_heap_size);
1564 if (capacity_after_gc < minimum_desired_capacity) {
1565 // Don't expand unless it's significant
1566 size_t expand_bytes = minimum_desired_capacity - capacity_after_gc;
1567 ergo_verbose4(ErgoHeapSizing,
1568 "attempt heap expansion",
1569 ergo_format_reason("capacity lower than "
1570 "min desired capacity after Full GC")
1571 ergo_format_byte("capacity")
1572 ergo_format_byte("occupancy")
1573 ergo_format_byte_perc("min desired capacity"),
1574 capacity_after_gc, used_after_gc,
1575 minimum_desired_capacity, (double) MinHeapFreeRatio);
1576 expand(expand_bytes);
1578 // No expansion, now see if we want to shrink
1579 } else if (capacity_after_gc > maximum_desired_capacity) {
1580 // Capacity too large, compute shrinking size
1581 size_t shrink_bytes = capacity_after_gc - maximum_desired_capacity;
1582 ergo_verbose4(ErgoHeapSizing,
1583 "attempt heap shrinking",
1584 ergo_format_reason("capacity higher than "
1585 "max desired capacity after Full GC")
1586 ergo_format_byte("capacity")
1587 ergo_format_byte("occupancy")
1588 ergo_format_byte_perc("max desired capacity"),
1589 capacity_after_gc, used_after_gc,
1590 maximum_desired_capacity, (double) MaxHeapFreeRatio);
1591 shrink(shrink_bytes);
1592 }
1593 }
1596 HeapWord*
1597 G1CollectedHeap::satisfy_failed_allocation(size_t word_size,
1598 bool* succeeded) {
1599 assert_at_safepoint(true /* should_be_vm_thread */);
1601 *succeeded = true;
1602 // Let's attempt the allocation first.
1603 HeapWord* result =
1604 attempt_allocation_at_safepoint(word_size,
1605 false /* expect_null_mutator_alloc_region */);
1606 if (result != NULL) {
1607 assert(*succeeded, "sanity");
1608 return result;
1609 }
1611 // In a G1 heap, we're supposed to keep allocation from failing by
1612 // incremental pauses. Therefore, at least for now, we'll favor
1613 // expansion over collection. (This might change in the future if we can
1614 // do something smarter than full collection to satisfy a failed alloc.)
1615 result = expand_and_allocate(word_size);
1616 if (result != NULL) {
1617 assert(*succeeded, "sanity");
1618 return result;
1619 }
1621 // Expansion didn't work, we'll try to do a Full GC.
1622 bool gc_succeeded = do_collection(false, /* explicit_gc */
1623 false, /* clear_all_soft_refs */
1624 word_size);
1625 if (!gc_succeeded) {
1626 *succeeded = false;
1627 return NULL;
1628 }
1630 // Retry the allocation
1631 result = attempt_allocation_at_safepoint(word_size,
1632 true /* expect_null_mutator_alloc_region */);
1633 if (result != NULL) {
1634 assert(*succeeded, "sanity");
1635 return result;
1636 }
1638 // Then, try a Full GC that will collect all soft references.
1639 gc_succeeded = do_collection(false, /* explicit_gc */
1640 true, /* clear_all_soft_refs */
1641 word_size);
1642 if (!gc_succeeded) {
1643 *succeeded = false;
1644 return NULL;
1645 }
1647 // Retry the allocation once more
1648 result = attempt_allocation_at_safepoint(word_size,
1649 true /* expect_null_mutator_alloc_region */);
1650 if (result != NULL) {
1651 assert(*succeeded, "sanity");
1652 return result;
1653 }
1655 assert(!collector_policy()->should_clear_all_soft_refs(),
1656 "Flag should have been handled and cleared prior to this point");
1658 // What else? We might try synchronous finalization later. If the total
1659 // space available is large enough for the allocation, then a more
1660 // complete compaction phase than we've tried so far might be
1661 // appropriate.
1662 assert(*succeeded, "sanity");
1663 return NULL;
1664 }
1666 // Attempting to expand the heap sufficiently
1667 // to support an allocation of the given "word_size". If
1668 // successful, perform the allocation and return the address of the
1669 // allocated block, or else "NULL".
1671 HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size) {
1672 assert_at_safepoint(true /* should_be_vm_thread */);
1674 verify_region_sets_optional();
1676 size_t expand_bytes = MAX2(word_size * HeapWordSize, MinHeapDeltaBytes);
1677 ergo_verbose1(ErgoHeapSizing,
1678 "attempt heap expansion",
1679 ergo_format_reason("allocation request failed")
1680 ergo_format_byte("allocation request"),
1681 word_size * HeapWordSize);
1682 if (expand(expand_bytes)) {
1683 _hrs.verify_optional();
1684 verify_region_sets_optional();
1685 return attempt_allocation_at_safepoint(word_size,
1686 false /* expect_null_mutator_alloc_region */);
1687 }
1688 return NULL;
1689 }
1691 void G1CollectedHeap::update_committed_space(HeapWord* old_end,
1692 HeapWord* new_end) {
1693 assert(old_end != new_end, "don't call this otherwise");
1694 assert((HeapWord*) _g1_storage.high() == new_end, "invariant");
1696 // Update the committed mem region.
1697 _g1_committed.set_end(new_end);
1698 // Tell the card table about the update.
1699 Universe::heap()->barrier_set()->resize_covered_region(_g1_committed);
1700 // Tell the BOT about the update.
1701 _bot_shared->resize(_g1_committed.word_size());
1702 }
1704 bool G1CollectedHeap::expand(size_t expand_bytes) {
1705 size_t old_mem_size = _g1_storage.committed_size();
1706 size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes);
1707 aligned_expand_bytes = align_size_up(aligned_expand_bytes,
1708 HeapRegion::GrainBytes);
1709 ergo_verbose2(ErgoHeapSizing,
1710 "expand the heap",
1711 ergo_format_byte("requested expansion amount")
1712 ergo_format_byte("attempted expansion amount"),
1713 expand_bytes, aligned_expand_bytes);
1715 // First commit the memory.
1716 HeapWord* old_end = (HeapWord*) _g1_storage.high();
1717 bool successful = _g1_storage.expand_by(aligned_expand_bytes);
1718 if (successful) {
1719 // Then propagate this update to the necessary data structures.
1720 HeapWord* new_end = (HeapWord*) _g1_storage.high();
1721 update_committed_space(old_end, new_end);
1723 FreeRegionList expansion_list("Local Expansion List");
1724 MemRegion mr = _hrs.expand_by(old_end, new_end, &expansion_list);
1725 assert(mr.start() == old_end, "post-condition");
1726 // mr might be a smaller region than what was requested if
1727 // expand_by() was unable to allocate the HeapRegion instances
1728 assert(mr.end() <= new_end, "post-condition");
1730 size_t actual_expand_bytes = mr.byte_size();
1731 assert(actual_expand_bytes <= aligned_expand_bytes, "post-condition");
1732 assert(actual_expand_bytes == expansion_list.total_capacity_bytes(),
1733 "post-condition");
1734 if (actual_expand_bytes < aligned_expand_bytes) {
1735 // We could not expand _hrs to the desired size. In this case we
1736 // need to shrink the committed space accordingly.
1737 assert(mr.end() < new_end, "invariant");
1739 size_t diff_bytes = aligned_expand_bytes - actual_expand_bytes;
1740 // First uncommit the memory.
1741 _g1_storage.shrink_by(diff_bytes);
1742 // Then propagate this update to the necessary data structures.
1743 update_committed_space(new_end, mr.end());
1744 }
1745 _free_list.add_as_tail(&expansion_list);
1747 if (_hr_printer.is_active()) {
1748 HeapWord* curr = mr.start();
1749 while (curr < mr.end()) {
1750 HeapWord* curr_end = curr + HeapRegion::GrainWords;
1751 _hr_printer.commit(curr, curr_end);
1752 curr = curr_end;
1753 }
1754 assert(curr == mr.end(), "post-condition");
1755 }
1756 g1_policy()->record_new_heap_size(n_regions());
1757 } else {
1758 ergo_verbose0(ErgoHeapSizing,
1759 "did not expand the heap",
1760 ergo_format_reason("heap expansion operation failed"));
1761 // The expansion of the virtual storage space was unsuccessful.
1762 // Let's see if it was because we ran out of swap.
1763 if (G1ExitOnExpansionFailure &&
1764 _g1_storage.uncommitted_size() >= aligned_expand_bytes) {
1765 // We had head room...
1766 vm_exit_out_of_memory(aligned_expand_bytes, "G1 heap expansion");
1767 }
1768 }
1769 return successful;
1770 }
1772 void G1CollectedHeap::shrink_helper(size_t shrink_bytes) {
1773 size_t old_mem_size = _g1_storage.committed_size();
1774 size_t aligned_shrink_bytes =
1775 ReservedSpace::page_align_size_down(shrink_bytes);
1776 aligned_shrink_bytes = align_size_down(aligned_shrink_bytes,
1777 HeapRegion::GrainBytes);
1778 uint num_regions_deleted = 0;
1779 MemRegion mr = _hrs.shrink_by(aligned_shrink_bytes, &num_regions_deleted);
1780 HeapWord* old_end = (HeapWord*) _g1_storage.high();
1781 assert(mr.end() == old_end, "post-condition");
1783 ergo_verbose3(ErgoHeapSizing,
1784 "shrink the heap",
1785 ergo_format_byte("requested shrinking amount")
1786 ergo_format_byte("aligned shrinking amount")
1787 ergo_format_byte("attempted shrinking amount"),
1788 shrink_bytes, aligned_shrink_bytes, mr.byte_size());
1789 if (mr.byte_size() > 0) {
1790 if (_hr_printer.is_active()) {
1791 HeapWord* curr = mr.end();
1792 while (curr > mr.start()) {
1793 HeapWord* curr_end = curr;
1794 curr -= HeapRegion::GrainWords;
1795 _hr_printer.uncommit(curr, curr_end);
1796 }
1797 assert(curr == mr.start(), "post-condition");
1798 }
1800 _g1_storage.shrink_by(mr.byte_size());
1801 HeapWord* new_end = (HeapWord*) _g1_storage.high();
1802 assert(mr.start() == new_end, "post-condition");
1804 _expansion_regions += num_regions_deleted;
1805 update_committed_space(old_end, new_end);
1806 HeapRegionRemSet::shrink_heap(n_regions());
1807 g1_policy()->record_new_heap_size(n_regions());
1808 } else {
1809 ergo_verbose0(ErgoHeapSizing,
1810 "did not shrink the heap",
1811 ergo_format_reason("heap shrinking operation failed"));
1812 }
1813 }
1815 void G1CollectedHeap::shrink(size_t shrink_bytes) {
1816 verify_region_sets_optional();
1818 // We should only reach here at the end of a Full GC which means we
1819 // should not not be holding to any GC alloc regions. The method
1820 // below will make sure of that and do any remaining clean up.
1821 abandon_gc_alloc_regions();
1823 // Instead of tearing down / rebuilding the free lists here, we
1824 // could instead use the remove_all_pending() method on free_list to
1825 // remove only the ones that we need to remove.
1826 tear_down_region_sets(true /* free_list_only */);
1827 shrink_helper(shrink_bytes);
1828 rebuild_region_sets(true /* free_list_only */);
1830 _hrs.verify_optional();
1831 verify_region_sets_optional();
1832 }
1834 // Public methods.
1836 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
1837 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
1838 #endif // _MSC_VER
1841 G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
1842 SharedHeap(policy_),
1843 _g1_policy(policy_),
1844 _dirty_card_queue_set(false),
1845 _into_cset_dirty_card_queue_set(false),
1846 _is_alive_closure_cm(this),
1847 _is_alive_closure_stw(this),
1848 _ref_processor_cm(NULL),
1849 _ref_processor_stw(NULL),
1850 _process_strong_tasks(new SubTasksDone(G1H_PS_NumElements)),
1851 _bot_shared(NULL),
1852 _objs_with_preserved_marks(NULL), _preserved_marks_of_objs(NULL),
1853 _evac_failure_scan_stack(NULL) ,
1854 _mark_in_progress(false),
1855 _cg1r(NULL), _summary_bytes_used(0),
1856 _g1mm(NULL),
1857 _refine_cte_cl(NULL),
1858 _full_collection(false),
1859 _free_list("Master Free List"),
1860 _secondary_free_list("Secondary Free List"),
1861 _old_set("Old Set"),
1862 _humongous_set("Master Humongous Set"),
1863 _free_regions_coming(false),
1864 _young_list(new YoungList(this)),
1865 _gc_time_stamp(0),
1866 _retained_old_gc_alloc_region(NULL),
1867 _expand_heap_after_alloc_failure(true),
1868 _surviving_young_words(NULL),
1869 _full_collections_completed(0),
1870 _in_cset_fast_test(NULL),
1871 _in_cset_fast_test_base(NULL),
1872 _dirty_cards_region_list(NULL),
1873 _worker_cset_start_region(NULL),
1874 _worker_cset_start_region_time_stamp(NULL) {
1875 _g1h = this; // To catch bugs.
1876 if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) {
1877 vm_exit_during_initialization("Failed necessary allocation.");
1878 }
1880 _humongous_object_threshold_in_words = HeapRegion::GrainWords / 2;
1882 int n_queues = MAX2((int)ParallelGCThreads, 1);
1883 _task_queues = new RefToScanQueueSet(n_queues);
1885 int n_rem_sets = HeapRegionRemSet::num_par_rem_sets();
1886 assert(n_rem_sets > 0, "Invariant.");
1888 HeapRegionRemSetIterator** iter_arr =
1889 NEW_C_HEAP_ARRAY(HeapRegionRemSetIterator*, n_queues);
1890 for (int i = 0; i < n_queues; i++) {
1891 iter_arr[i] = new HeapRegionRemSetIterator();
1892 }
1893 _rem_set_iterator = iter_arr;
1895 _worker_cset_start_region = NEW_C_HEAP_ARRAY(HeapRegion*, n_queues);
1896 _worker_cset_start_region_time_stamp = NEW_C_HEAP_ARRAY(unsigned int, n_queues);
1898 for (int i = 0; i < n_queues; i++) {
1899 RefToScanQueue* q = new RefToScanQueue();
1900 q->initialize();
1901 _task_queues->register_queue(i, q);
1902 }
1904 clear_cset_start_regions();
1906 guarantee(_task_queues != NULL, "task_queues allocation failure.");
1907 }
1909 jint G1CollectedHeap::initialize() {
1910 CollectedHeap::pre_initialize();
1911 os::enable_vtime();
1913 G1Log::init();
1915 // Necessary to satisfy locking discipline assertions.
1917 MutexLocker x(Heap_lock);
1919 // We have to initialize the printer before committing the heap, as
1920 // it will be used then.
1921 _hr_printer.set_active(G1PrintHeapRegions);
1923 // While there are no constraints in the GC code that HeapWordSize
1924 // be any particular value, there are multiple other areas in the
1925 // system which believe this to be true (e.g. oop->object_size in some
1926 // cases incorrectly returns the size in wordSize units rather than
1927 // HeapWordSize).
1928 guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
1930 size_t init_byte_size = collector_policy()->initial_heap_byte_size();
1931 size_t max_byte_size = collector_policy()->max_heap_byte_size();
1933 // Ensure that the sizes are properly aligned.
1934 Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap");
1935 Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap");
1937 _cg1r = new ConcurrentG1Refine();
1939 // Reserve the maximum.
1940 PermanentGenerationSpec* pgs = collector_policy()->permanent_generation();
1941 // Includes the perm-gen.
1943 // When compressed oops are enabled, the preferred heap base
1944 // is calculated by subtracting the requested size from the
1945 // 32Gb boundary and using the result as the base address for
1946 // heap reservation. If the requested size is not aligned to
1947 // HeapRegion::GrainBytes (i.e. the alignment that is passed
1948 // into the ReservedHeapSpace constructor) then the actual
1949 // base of the reserved heap may end up differing from the
1950 // address that was requested (i.e. the preferred heap base).
1951 // If this happens then we could end up using a non-optimal
1952 // compressed oops mode.
1954 // Since max_byte_size is aligned to the size of a heap region (checked
1955 // above), we also need to align the perm gen size as it might not be.
1956 const size_t total_reserved = max_byte_size +
1957 align_size_up(pgs->max_size(), HeapRegion::GrainBytes);
1958 Universe::check_alignment(total_reserved, HeapRegion::GrainBytes, "g1 heap and perm");
1960 char* addr = Universe::preferred_heap_base(total_reserved, Universe::UnscaledNarrowOop);
1962 ReservedHeapSpace heap_rs(total_reserved, HeapRegion::GrainBytes,
1963 UseLargePages, addr);
1965 if (UseCompressedOops) {
1966 if (addr != NULL && !heap_rs.is_reserved()) {
1967 // Failed to reserve at specified address - the requested memory
1968 // region is taken already, for example, by 'java' launcher.
1969 // Try again to reserver heap higher.
1970 addr = Universe::preferred_heap_base(total_reserved, Universe::ZeroBasedNarrowOop);
1972 ReservedHeapSpace heap_rs0(total_reserved, HeapRegion::GrainBytes,
1973 UseLargePages, addr);
1975 if (addr != NULL && !heap_rs0.is_reserved()) {
1976 // Failed to reserve at specified address again - give up.
1977 addr = Universe::preferred_heap_base(total_reserved, Universe::HeapBasedNarrowOop);
1978 assert(addr == NULL, "");
1980 ReservedHeapSpace heap_rs1(total_reserved, HeapRegion::GrainBytes,
1981 UseLargePages, addr);
1982 heap_rs = heap_rs1;
1983 } else {
1984 heap_rs = heap_rs0;
1985 }
1986 }
1987 }
1989 if (!heap_rs.is_reserved()) {
1990 vm_exit_during_initialization("Could not reserve enough space for object heap");
1991 return JNI_ENOMEM;
1992 }
1994 // It is important to do this in a way such that concurrent readers can't
1995 // temporarily think somethings in the heap. (I've actually seen this
1996 // happen in asserts: DLD.)
1997 _reserved.set_word_size(0);
1998 _reserved.set_start((HeapWord*)heap_rs.base());
1999 _reserved.set_end((HeapWord*)(heap_rs.base() + heap_rs.size()));
2001 _expansion_regions = (uint) (max_byte_size / HeapRegion::GrainBytes);
2003 // Create the gen rem set (and barrier set) for the entire reserved region.
2004 _rem_set = collector_policy()->create_rem_set(_reserved, 2);
2005 set_barrier_set(rem_set()->bs());
2006 if (barrier_set()->is_a(BarrierSet::ModRef)) {
2007 _mr_bs = (ModRefBarrierSet*)_barrier_set;
2008 } else {
2009 vm_exit_during_initialization("G1 requires a mod ref bs.");
2010 return JNI_ENOMEM;
2011 }
2013 // Also create a G1 rem set.
2014 if (mr_bs()->is_a(BarrierSet::CardTableModRef)) {
2015 _g1_rem_set = new G1RemSet(this, (CardTableModRefBS*)mr_bs());
2016 } else {
2017 vm_exit_during_initialization("G1 requires a cardtable mod ref bs.");
2018 return JNI_ENOMEM;
2019 }
2021 // Carve out the G1 part of the heap.
2023 ReservedSpace g1_rs = heap_rs.first_part(max_byte_size);
2024 _g1_reserved = MemRegion((HeapWord*)g1_rs.base(),
2025 g1_rs.size()/HeapWordSize);
2026 ReservedSpace perm_gen_rs = heap_rs.last_part(max_byte_size);
2028 _perm_gen = pgs->init(perm_gen_rs, pgs->init_size(), rem_set());
2030 _g1_storage.initialize(g1_rs, 0);
2031 _g1_committed = MemRegion((HeapWord*)_g1_storage.low(), (size_t) 0);
2032 _hrs.initialize((HeapWord*) _g1_reserved.start(),
2033 (HeapWord*) _g1_reserved.end(),
2034 _expansion_regions);
2036 // 6843694 - ensure that the maximum region index can fit
2037 // in the remembered set structures.
2038 const uint max_region_idx = (1U << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1;
2039 guarantee((max_regions() - 1) <= max_region_idx, "too many regions");
2041 size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1;
2042 guarantee(HeapRegion::CardsPerRegion > 0, "make sure it's initialized");
2043 guarantee(HeapRegion::CardsPerRegion < max_cards_per_region,
2044 "too many cards per region");
2046 HeapRegionSet::set_unrealistically_long_length(max_regions() + 1);
2048 _bot_shared = new G1BlockOffsetSharedArray(_reserved,
2049 heap_word_size(init_byte_size));
2051 _g1h = this;
2053 _in_cset_fast_test_length = max_regions();
2054 _in_cset_fast_test_base =
2055 NEW_C_HEAP_ARRAY(bool, (size_t) _in_cset_fast_test_length);
2057 // We're biasing _in_cset_fast_test to avoid subtracting the
2058 // beginning of the heap every time we want to index; basically
2059 // it's the same with what we do with the card table.
2060 _in_cset_fast_test = _in_cset_fast_test_base -
2061 ((uintx) _g1_reserved.start() >> HeapRegion::LogOfHRGrainBytes);
2063 // Clear the _cset_fast_test bitmap in anticipation of adding
2064 // regions to the incremental collection set for the first
2065 // evacuation pause.
2066 clear_cset_fast_test();
2068 // Create the ConcurrentMark data structure and thread.
2069 // (Must do this late, so that "max_regions" is defined.)
2070 _cm = new ConcurrentMark(heap_rs, max_regions());
2071 _cmThread = _cm->cmThread();
2073 // Initialize the from_card cache structure of HeapRegionRemSet.
2074 HeapRegionRemSet::init_heap(max_regions());
2076 // Now expand into the initial heap size.
2077 if (!expand(init_byte_size)) {
2078 vm_exit_during_initialization("Failed to allocate initial heap.");
2079 return JNI_ENOMEM;
2080 }
2082 // Perform any initialization actions delegated to the policy.
2083 g1_policy()->init();
2085 _refine_cte_cl =
2086 new RefineCardTableEntryClosure(ConcurrentG1RefineThread::sts(),
2087 g1_rem_set(),
2088 concurrent_g1_refine());
2089 JavaThread::dirty_card_queue_set().set_closure(_refine_cte_cl);
2091 JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon,
2092 SATB_Q_FL_lock,
2093 G1SATBProcessCompletedThreshold,
2094 Shared_SATB_Q_lock);
2096 JavaThread::dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon,
2097 DirtyCardQ_FL_lock,
2098 concurrent_g1_refine()->yellow_zone(),
2099 concurrent_g1_refine()->red_zone(),
2100 Shared_DirtyCardQ_lock);
2102 if (G1DeferredRSUpdate) {
2103 dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon,
2104 DirtyCardQ_FL_lock,
2105 -1, // never trigger processing
2106 -1, // no limit on length
2107 Shared_DirtyCardQ_lock,
2108 &JavaThread::dirty_card_queue_set());
2109 }
2111 // Initialize the card queue set used to hold cards containing
2112 // references into the collection set.
2113 _into_cset_dirty_card_queue_set.initialize(DirtyCardQ_CBL_mon,
2114 DirtyCardQ_FL_lock,
2115 -1, // never trigger processing
2116 -1, // no limit on length
2117 Shared_DirtyCardQ_lock,
2118 &JavaThread::dirty_card_queue_set());
2120 // In case we're keeping closure specialization stats, initialize those
2121 // counts and that mechanism.
2122 SpecializationStats::clear();
2124 // Do later initialization work for concurrent refinement.
2125 _cg1r->init();
2127 // Here we allocate the dummy full region that is required by the
2128 // G1AllocRegion class. If we don't pass an address in the reserved
2129 // space here, lots of asserts fire.
2131 HeapRegion* dummy_region = new_heap_region(0 /* index of bottom region */,
2132 _g1_reserved.start());
2133 // We'll re-use the same region whether the alloc region will
2134 // require BOT updates or not and, if it doesn't, then a non-young
2135 // region will complain that it cannot support allocations without
2136 // BOT updates. So we'll tag the dummy region as young to avoid that.
2137 dummy_region->set_young();
2138 // Make sure it's full.
2139 dummy_region->set_top(dummy_region->end());
2140 G1AllocRegion::setup(this, dummy_region);
2142 init_mutator_alloc_region();
2144 // Do create of the monitoring and management support so that
2145 // values in the heap have been properly initialized.
2146 _g1mm = new G1MonitoringSupport(this);
2148 return JNI_OK;
2149 }
2151 void G1CollectedHeap::ref_processing_init() {
2152 // Reference processing in G1 currently works as follows:
2153 //
2154 // * There are two reference processor instances. One is
2155 // used to record and process discovered references
2156 // during concurrent marking; the other is used to
2157 // record and process references during STW pauses
2158 // (both full and incremental).
2159 // * Both ref processors need to 'span' the entire heap as
2160 // the regions in the collection set may be dotted around.
2161 //
2162 // * For the concurrent marking ref processor:
2163 // * Reference discovery is enabled at initial marking.
2164 // * Reference discovery is disabled and the discovered
2165 // references processed etc during remarking.
2166 // * Reference discovery is MT (see below).
2167 // * Reference discovery requires a barrier (see below).
2168 // * Reference processing may or may not be MT
2169 // (depending on the value of ParallelRefProcEnabled
2170 // and ParallelGCThreads).
2171 // * A full GC disables reference discovery by the CM
2172 // ref processor and abandons any entries on it's
2173 // discovered lists.
2174 //
2175 // * For the STW processor:
2176 // * Non MT discovery is enabled at the start of a full GC.
2177 // * Processing and enqueueing during a full GC is non-MT.
2178 // * During a full GC, references are processed after marking.
2179 //
2180 // * Discovery (may or may not be MT) is enabled at the start
2181 // of an incremental evacuation pause.
2182 // * References are processed near the end of a STW evacuation pause.
2183 // * For both types of GC:
2184 // * Discovery is atomic - i.e. not concurrent.
2185 // * Reference discovery will not need a barrier.
2187 SharedHeap::ref_processing_init();
2188 MemRegion mr = reserved_region();
2190 // Concurrent Mark ref processor
2191 _ref_processor_cm =
2192 new ReferenceProcessor(mr, // span
2193 ParallelRefProcEnabled && (ParallelGCThreads > 1),
2194 // mt processing
2195 (int) ParallelGCThreads,
2196 // degree of mt processing
2197 (ParallelGCThreads > 1) || (ConcGCThreads > 1),
2198 // mt discovery
2199 (int) MAX2(ParallelGCThreads, ConcGCThreads),
2200 // degree of mt discovery
2201 false,
2202 // Reference discovery is not atomic
2203 &_is_alive_closure_cm,
2204 // is alive closure
2205 // (for efficiency/performance)
2206 true);
2207 // Setting next fields of discovered
2208 // lists requires a barrier.
2210 // STW ref processor
2211 _ref_processor_stw =
2212 new ReferenceProcessor(mr, // span
2213 ParallelRefProcEnabled && (ParallelGCThreads > 1),
2214 // mt processing
2215 MAX2((int)ParallelGCThreads, 1),
2216 // degree of mt processing
2217 (ParallelGCThreads > 1),
2218 // mt discovery
2219 MAX2((int)ParallelGCThreads, 1),
2220 // degree of mt discovery
2221 true,
2222 // Reference discovery is atomic
2223 &_is_alive_closure_stw,
2224 // is alive closure
2225 // (for efficiency/performance)
2226 false);
2227 // Setting next fields of discovered
2228 // lists requires a barrier.
2229 }
2231 size_t G1CollectedHeap::capacity() const {
2232 return _g1_committed.byte_size();
2233 }
2235 void G1CollectedHeap::iterate_dirty_card_closure(CardTableEntryClosure* cl,
2236 DirtyCardQueue* into_cset_dcq,
2237 bool concurrent,
2238 int worker_i) {
2239 // Clean cards in the hot card cache
2240 concurrent_g1_refine()->clean_up_cache(worker_i, g1_rem_set(), into_cset_dcq);
2242 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
2243 int n_completed_buffers = 0;
2244 while (dcqs.apply_closure_to_completed_buffer(cl, worker_i, 0, true)) {
2245 n_completed_buffers++;
2246 }
2247 g1_policy()->record_update_rs_processed_buffers(worker_i,
2248 (double) n_completed_buffers);
2249 dcqs.clear_n_completed_buffers();
2250 assert(!dcqs.completed_buffers_exist_dirty(), "Completed buffers exist!");
2251 }
2254 // Computes the sum of the storage used by the various regions.
2256 size_t G1CollectedHeap::used() const {
2257 assert(Heap_lock->owner() != NULL,
2258 "Should be owned on this thread's behalf.");
2259 size_t result = _summary_bytes_used;
2260 // Read only once in case it is set to NULL concurrently
2261 HeapRegion* hr = _mutator_alloc_region.get();
2262 if (hr != NULL)
2263 result += hr->used();
2264 return result;
2265 }
2267 size_t G1CollectedHeap::used_unlocked() const {
2268 size_t result = _summary_bytes_used;
2269 return result;
2270 }
2272 class SumUsedClosure: public HeapRegionClosure {
2273 size_t _used;
2274 public:
2275 SumUsedClosure() : _used(0) {}
2276 bool doHeapRegion(HeapRegion* r) {
2277 if (!r->continuesHumongous()) {
2278 _used += r->used();
2279 }
2280 return false;
2281 }
2282 size_t result() { return _used; }
2283 };
2285 size_t G1CollectedHeap::recalculate_used() const {
2286 SumUsedClosure blk;
2287 heap_region_iterate(&blk);
2288 return blk.result();
2289 }
2291 size_t G1CollectedHeap::unsafe_max_alloc() {
2292 if (free_regions() > 0) return HeapRegion::GrainBytes;
2293 // otherwise, is there space in the current allocation region?
2295 // We need to store the current allocation region in a local variable
2296 // here. The problem is that this method doesn't take any locks and
2297 // there may be other threads which overwrite the current allocation
2298 // region field. attempt_allocation(), for example, sets it to NULL
2299 // and this can happen *after* the NULL check here but before the call
2300 // to free(), resulting in a SIGSEGV. Note that this doesn't appear
2301 // to be a problem in the optimized build, since the two loads of the
2302 // current allocation region field are optimized away.
2303 HeapRegion* hr = _mutator_alloc_region.get();
2304 if (hr == NULL) {
2305 return 0;
2306 }
2307 return hr->free();
2308 }
2310 bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
2311 switch (cause) {
2312 case GCCause::_gc_locker: return GCLockerInvokesConcurrent;
2313 case GCCause::_java_lang_system_gc: return ExplicitGCInvokesConcurrent;
2314 case GCCause::_g1_humongous_allocation: return true;
2315 default: return false;
2316 }
2317 }
2319 #ifndef PRODUCT
2320 void G1CollectedHeap::allocate_dummy_regions() {
2321 // Let's fill up most of the region
2322 size_t word_size = HeapRegion::GrainWords - 1024;
2323 // And as a result the region we'll allocate will be humongous.
2324 guarantee(isHumongous(word_size), "sanity");
2326 for (uintx i = 0; i < G1DummyRegionsPerGC; ++i) {
2327 // Let's use the existing mechanism for the allocation
2328 HeapWord* dummy_obj = humongous_obj_allocate(word_size);
2329 if (dummy_obj != NULL) {
2330 MemRegion mr(dummy_obj, word_size);
2331 CollectedHeap::fill_with_object(mr);
2332 } else {
2333 // If we can't allocate once, we probably cannot allocate
2334 // again. Let's get out of the loop.
2335 break;
2336 }
2337 }
2338 }
2339 #endif // !PRODUCT
2341 void G1CollectedHeap::increment_full_collections_completed(bool concurrent) {
2342 MonitorLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
2344 // We assume that if concurrent == true, then the caller is a
2345 // concurrent thread that was joined the Suspendible Thread
2346 // Set. If there's ever a cheap way to check this, we should add an
2347 // assert here.
2349 // We have already incremented _total_full_collections at the start
2350 // of the GC, so total_full_collections() represents how many full
2351 // collections have been started.
2352 unsigned int full_collections_started = total_full_collections();
2354 // Given that this method is called at the end of a Full GC or of a
2355 // concurrent cycle, and those can be nested (i.e., a Full GC can
2356 // interrupt a concurrent cycle), the number of full collections
2357 // completed should be either one (in the case where there was no
2358 // nesting) or two (when a Full GC interrupted a concurrent cycle)
2359 // behind the number of full collections started.
2361 // This is the case for the inner caller, i.e. a Full GC.
2362 assert(concurrent ||
2363 (full_collections_started == _full_collections_completed + 1) ||
2364 (full_collections_started == _full_collections_completed + 2),
2365 err_msg("for inner caller (Full GC): full_collections_started = %u "
2366 "is inconsistent with _full_collections_completed = %u",
2367 full_collections_started, _full_collections_completed));
2369 // This is the case for the outer caller, i.e. the concurrent cycle.
2370 assert(!concurrent ||
2371 (full_collections_started == _full_collections_completed + 1),
2372 err_msg("for outer caller (concurrent cycle): "
2373 "full_collections_started = %u "
2374 "is inconsistent with _full_collections_completed = %u",
2375 full_collections_started, _full_collections_completed));
2377 _full_collections_completed += 1;
2379 // We need to clear the "in_progress" flag in the CM thread before
2380 // we wake up any waiters (especially when ExplicitInvokesConcurrent
2381 // is set) so that if a waiter requests another System.gc() it doesn't
2382 // incorrectly see that a marking cyle is still in progress.
2383 if (concurrent) {
2384 _cmThread->clear_in_progress();
2385 }
2387 // This notify_all() will ensure that a thread that called
2388 // System.gc() with (with ExplicitGCInvokesConcurrent set or not)
2389 // and it's waiting for a full GC to finish will be woken up. It is
2390 // waiting in VM_G1IncCollectionPause::doit_epilogue().
2391 FullGCCount_lock->notify_all();
2392 }
2394 void G1CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {
2395 assert_at_safepoint(true /* should_be_vm_thread */);
2396 GCCauseSetter gcs(this, cause);
2397 switch (cause) {
2398 case GCCause::_heap_inspection:
2399 case GCCause::_heap_dump: {
2400 HandleMark hm;
2401 do_full_collection(false); // don't clear all soft refs
2402 break;
2403 }
2404 default: // XXX FIX ME
2405 ShouldNotReachHere(); // Unexpected use of this function
2406 }
2407 }
2409 void G1CollectedHeap::collect(GCCause::Cause cause) {
2410 assert_heap_not_locked();
2412 unsigned int gc_count_before;
2413 unsigned int full_gc_count_before;
2414 bool retry_gc;
2416 do {
2417 retry_gc = false;
2419 {
2420 MutexLocker ml(Heap_lock);
2422 // Read the GC count while holding the Heap_lock
2423 gc_count_before = total_collections();
2424 full_gc_count_before = total_full_collections();
2425 }
2427 if (should_do_concurrent_full_gc(cause)) {
2428 // Schedule an initial-mark evacuation pause that will start a
2429 // concurrent cycle. We're setting word_size to 0 which means that
2430 // we are not requesting a post-GC allocation.
2431 VM_G1IncCollectionPause op(gc_count_before,
2432 0, /* word_size */
2433 true, /* should_initiate_conc_mark */
2434 g1_policy()->max_pause_time_ms(),
2435 cause);
2437 VMThread::execute(&op);
2438 if (!op.pause_succeeded()) {
2439 if (full_gc_count_before == total_full_collections()) {
2440 retry_gc = op.should_retry_gc();
2441 } else {
2442 // A Full GC happened while we were trying to schedule the
2443 // initial-mark GC. No point in starting a new cycle given
2444 // that the whole heap was collected anyway.
2445 }
2447 if (retry_gc) {
2448 if (GC_locker::is_active_and_needs_gc()) {
2449 GC_locker::stall_until_clear();
2450 }
2451 }
2452 }
2453 } else {
2454 if (cause == GCCause::_gc_locker
2455 DEBUG_ONLY(|| cause == GCCause::_scavenge_alot)) {
2457 // Schedule a standard evacuation pause. We're setting word_size
2458 // to 0 which means that we are not requesting a post-GC allocation.
2459 VM_G1IncCollectionPause op(gc_count_before,
2460 0, /* word_size */
2461 false, /* should_initiate_conc_mark */
2462 g1_policy()->max_pause_time_ms(),
2463 cause);
2464 VMThread::execute(&op);
2465 } else {
2466 // Schedule a Full GC.
2467 VM_G1CollectFull op(gc_count_before, full_gc_count_before, cause);
2468 VMThread::execute(&op);
2469 }
2470 }
2471 } while (retry_gc);
2472 }
2474 bool G1CollectedHeap::is_in(const void* p) const {
2475 if (_g1_committed.contains(p)) {
2476 // Given that we know that p is in the committed space,
2477 // heap_region_containing_raw() should successfully
2478 // return the containing region.
2479 HeapRegion* hr = heap_region_containing_raw(p);
2480 return hr->is_in(p);
2481 } else {
2482 return _perm_gen->as_gen()->is_in(p);
2483 }
2484 }
2486 // Iteration functions.
2488 // Iterates an OopClosure over all ref-containing fields of objects
2489 // within a HeapRegion.
2491 class IterateOopClosureRegionClosure: public HeapRegionClosure {
2492 MemRegion _mr;
2493 OopClosure* _cl;
2494 public:
2495 IterateOopClosureRegionClosure(MemRegion mr, OopClosure* cl)
2496 : _mr(mr), _cl(cl) {}
2497 bool doHeapRegion(HeapRegion* r) {
2498 if (! r->continuesHumongous()) {
2499 r->oop_iterate(_cl);
2500 }
2501 return false;
2502 }
2503 };
2505 void G1CollectedHeap::oop_iterate(OopClosure* cl, bool do_perm) {
2506 IterateOopClosureRegionClosure blk(_g1_committed, cl);
2507 heap_region_iterate(&blk);
2508 if (do_perm) {
2509 perm_gen()->oop_iterate(cl);
2510 }
2511 }
2513 void G1CollectedHeap::oop_iterate(MemRegion mr, OopClosure* cl, bool do_perm) {
2514 IterateOopClosureRegionClosure blk(mr, cl);
2515 heap_region_iterate(&blk);
2516 if (do_perm) {
2517 perm_gen()->oop_iterate(cl);
2518 }
2519 }
2521 // Iterates an ObjectClosure over all objects within a HeapRegion.
2523 class IterateObjectClosureRegionClosure: public HeapRegionClosure {
2524 ObjectClosure* _cl;
2525 public:
2526 IterateObjectClosureRegionClosure(ObjectClosure* cl) : _cl(cl) {}
2527 bool doHeapRegion(HeapRegion* r) {
2528 if (! r->continuesHumongous()) {
2529 r->object_iterate(_cl);
2530 }
2531 return false;
2532 }
2533 };
2535 void G1CollectedHeap::object_iterate(ObjectClosure* cl, bool do_perm) {
2536 IterateObjectClosureRegionClosure blk(cl);
2537 heap_region_iterate(&blk);
2538 if (do_perm) {
2539 perm_gen()->object_iterate(cl);
2540 }
2541 }
2543 void G1CollectedHeap::object_iterate_since_last_GC(ObjectClosure* cl) {
2544 // FIXME: is this right?
2545 guarantee(false, "object_iterate_since_last_GC not supported by G1 heap");
2546 }
2548 // Calls a SpaceClosure on a HeapRegion.
2550 class SpaceClosureRegionClosure: public HeapRegionClosure {
2551 SpaceClosure* _cl;
2552 public:
2553 SpaceClosureRegionClosure(SpaceClosure* cl) : _cl(cl) {}
2554 bool doHeapRegion(HeapRegion* r) {
2555 _cl->do_space(r);
2556 return false;
2557 }
2558 };
2560 void G1CollectedHeap::space_iterate(SpaceClosure* cl) {
2561 SpaceClosureRegionClosure blk(cl);
2562 heap_region_iterate(&blk);
2563 }
2565 void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) const {
2566 _hrs.iterate(cl);
2567 }
2569 void G1CollectedHeap::heap_region_iterate_from(HeapRegion* r,
2570 HeapRegionClosure* cl) const {
2571 _hrs.iterate_from(r, cl);
2572 }
2574 void
2575 G1CollectedHeap::heap_region_par_iterate_chunked(HeapRegionClosure* cl,
2576 uint worker,
2577 uint no_of_par_workers,
2578 jint claim_value) {
2579 const uint regions = n_regions();
2580 const uint max_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
2581 no_of_par_workers :
2582 1);
2583 assert(UseDynamicNumberOfGCThreads ||
2584 no_of_par_workers == workers()->total_workers(),
2585 "Non dynamic should use fixed number of workers");
2586 // try to spread out the starting points of the workers
2587 const uint start_index = regions / max_workers * worker;
2589 // each worker will actually look at all regions
2590 for (uint count = 0; count < regions; ++count) {
2591 const uint index = (start_index + count) % regions;
2592 assert(0 <= index && index < regions, "sanity");
2593 HeapRegion* r = region_at(index);
2594 // we'll ignore "continues humongous" regions (we'll process them
2595 // when we come across their corresponding "start humongous"
2596 // region) and regions already claimed
2597 if (r->claim_value() == claim_value || r->continuesHumongous()) {
2598 continue;
2599 }
2600 // OK, try to claim it
2601 if (r->claimHeapRegion(claim_value)) {
2602 // success!
2603 assert(!r->continuesHumongous(), "sanity");
2604 if (r->startsHumongous()) {
2605 // If the region is "starts humongous" we'll iterate over its
2606 // "continues humongous" first; in fact we'll do them
2607 // first. The order is important. In on case, calling the
2608 // closure on the "starts humongous" region might de-allocate
2609 // and clear all its "continues humongous" regions and, as a
2610 // result, we might end up processing them twice. So, we'll do
2611 // them first (notice: most closures will ignore them anyway) and
2612 // then we'll do the "starts humongous" region.
2613 for (uint ch_index = index + 1; ch_index < regions; ++ch_index) {
2614 HeapRegion* chr = region_at(ch_index);
2616 // if the region has already been claimed or it's not
2617 // "continues humongous" we're done
2618 if (chr->claim_value() == claim_value ||
2619 !chr->continuesHumongous()) {
2620 break;
2621 }
2623 // Noone should have claimed it directly. We can given
2624 // that we claimed its "starts humongous" region.
2625 assert(chr->claim_value() != claim_value, "sanity");
2626 assert(chr->humongous_start_region() == r, "sanity");
2628 if (chr->claimHeapRegion(claim_value)) {
2629 // we should always be able to claim it; noone else should
2630 // be trying to claim this region
2632 bool res2 = cl->doHeapRegion(chr);
2633 assert(!res2, "Should not abort");
2635 // Right now, this holds (i.e., no closure that actually
2636 // does something with "continues humongous" regions
2637 // clears them). We might have to weaken it in the future,
2638 // but let's leave these two asserts here for extra safety.
2639 assert(chr->continuesHumongous(), "should still be the case");
2640 assert(chr->humongous_start_region() == r, "sanity");
2641 } else {
2642 guarantee(false, "we should not reach here");
2643 }
2644 }
2645 }
2647 assert(!r->continuesHumongous(), "sanity");
2648 bool res = cl->doHeapRegion(r);
2649 assert(!res, "Should not abort");
2650 }
2651 }
2652 }
2654 class ResetClaimValuesClosure: public HeapRegionClosure {
2655 public:
2656 bool doHeapRegion(HeapRegion* r) {
2657 r->set_claim_value(HeapRegion::InitialClaimValue);
2658 return false;
2659 }
2660 };
2662 void G1CollectedHeap::reset_heap_region_claim_values() {
2663 ResetClaimValuesClosure blk;
2664 heap_region_iterate(&blk);
2665 }
2667 void G1CollectedHeap::reset_cset_heap_region_claim_values() {
2668 ResetClaimValuesClosure blk;
2669 collection_set_iterate(&blk);
2670 }
2672 #ifdef ASSERT
2673 // This checks whether all regions in the heap have the correct claim
2674 // value. I also piggy-backed on this a check to ensure that the
2675 // humongous_start_region() information on "continues humongous"
2676 // regions is correct.
2678 class CheckClaimValuesClosure : public HeapRegionClosure {
2679 private:
2680 jint _claim_value;
2681 uint _failures;
2682 HeapRegion* _sh_region;
2684 public:
2685 CheckClaimValuesClosure(jint claim_value) :
2686 _claim_value(claim_value), _failures(0), _sh_region(NULL) { }
2687 bool doHeapRegion(HeapRegion* r) {
2688 if (r->claim_value() != _claim_value) {
2689 gclog_or_tty->print_cr("Region " HR_FORMAT ", "
2690 "claim value = %d, should be %d",
2691 HR_FORMAT_PARAMS(r),
2692 r->claim_value(), _claim_value);
2693 ++_failures;
2694 }
2695 if (!r->isHumongous()) {
2696 _sh_region = NULL;
2697 } else if (r->startsHumongous()) {
2698 _sh_region = r;
2699 } else if (r->continuesHumongous()) {
2700 if (r->humongous_start_region() != _sh_region) {
2701 gclog_or_tty->print_cr("Region " HR_FORMAT ", "
2702 "HS = "PTR_FORMAT", should be "PTR_FORMAT,
2703 HR_FORMAT_PARAMS(r),
2704 r->humongous_start_region(),
2705 _sh_region);
2706 ++_failures;
2707 }
2708 }
2709 return false;
2710 }
2711 uint failures() { return _failures; }
2712 };
2714 bool G1CollectedHeap::check_heap_region_claim_values(jint claim_value) {
2715 CheckClaimValuesClosure cl(claim_value);
2716 heap_region_iterate(&cl);
2717 return cl.failures() == 0;
2718 }
2720 class CheckClaimValuesInCSetHRClosure: public HeapRegionClosure {
2721 private:
2722 jint _claim_value;
2723 uint _failures;
2725 public:
2726 CheckClaimValuesInCSetHRClosure(jint claim_value) :
2727 _claim_value(claim_value), _failures(0) { }
2729 uint failures() { return _failures; }
2731 bool doHeapRegion(HeapRegion* hr) {
2732 assert(hr->in_collection_set(), "how?");
2733 assert(!hr->isHumongous(), "H-region in CSet");
2734 if (hr->claim_value() != _claim_value) {
2735 gclog_or_tty->print_cr("CSet Region " HR_FORMAT ", "
2736 "claim value = %d, should be %d",
2737 HR_FORMAT_PARAMS(hr),
2738 hr->claim_value(), _claim_value);
2739 _failures += 1;
2740 }
2741 return false;
2742 }
2743 };
2745 bool G1CollectedHeap::check_cset_heap_region_claim_values(jint claim_value) {
2746 CheckClaimValuesInCSetHRClosure cl(claim_value);
2747 collection_set_iterate(&cl);
2748 return cl.failures() == 0;
2749 }
2750 #endif // ASSERT
2752 // Clear the cached CSet starting regions and (more importantly)
2753 // the time stamps. Called when we reset the GC time stamp.
2754 void G1CollectedHeap::clear_cset_start_regions() {
2755 assert(_worker_cset_start_region != NULL, "sanity");
2756 assert(_worker_cset_start_region_time_stamp != NULL, "sanity");
2758 int n_queues = MAX2((int)ParallelGCThreads, 1);
2759 for (int i = 0; i < n_queues; i++) {
2760 _worker_cset_start_region[i] = NULL;
2761 _worker_cset_start_region_time_stamp[i] = 0;
2762 }
2763 }
2765 // Given the id of a worker, obtain or calculate a suitable
2766 // starting region for iterating over the current collection set.
2767 HeapRegion* G1CollectedHeap::start_cset_region_for_worker(int worker_i) {
2768 assert(get_gc_time_stamp() > 0, "should have been updated by now");
2770 HeapRegion* result = NULL;
2771 unsigned gc_time_stamp = get_gc_time_stamp();
2773 if (_worker_cset_start_region_time_stamp[worker_i] == gc_time_stamp) {
2774 // Cached starting region for current worker was set
2775 // during the current pause - so it's valid.
2776 // Note: the cached starting heap region may be NULL
2777 // (when the collection set is empty).
2778 result = _worker_cset_start_region[worker_i];
2779 assert(result == NULL || result->in_collection_set(), "sanity");
2780 return result;
2781 }
2783 // The cached entry was not valid so let's calculate
2784 // a suitable starting heap region for this worker.
2786 // We want the parallel threads to start their collection
2787 // set iteration at different collection set regions to
2788 // avoid contention.
2789 // If we have:
2790 // n collection set regions
2791 // p threads
2792 // Then thread t will start at region floor ((t * n) / p)
2794 result = g1_policy()->collection_set();
2795 if (G1CollectedHeap::use_parallel_gc_threads()) {
2796 uint cs_size = g1_policy()->cset_region_length();
2797 uint active_workers = workers()->active_workers();
2798 assert(UseDynamicNumberOfGCThreads ||
2799 active_workers == workers()->total_workers(),
2800 "Unless dynamic should use total workers");
2802 uint end_ind = (cs_size * worker_i) / active_workers;
2803 uint start_ind = 0;
2805 if (worker_i > 0 &&
2806 _worker_cset_start_region_time_stamp[worker_i - 1] == gc_time_stamp) {
2807 // Previous workers starting region is valid
2808 // so let's iterate from there
2809 start_ind = (cs_size * (worker_i - 1)) / active_workers;
2810 result = _worker_cset_start_region[worker_i - 1];
2811 }
2813 for (uint i = start_ind; i < end_ind; i++) {
2814 result = result->next_in_collection_set();
2815 }
2816 }
2818 // Note: the calculated starting heap region may be NULL
2819 // (when the collection set is empty).
2820 assert(result == NULL || result->in_collection_set(), "sanity");
2821 assert(_worker_cset_start_region_time_stamp[worker_i] != gc_time_stamp,
2822 "should be updated only once per pause");
2823 _worker_cset_start_region[worker_i] = result;
2824 OrderAccess::storestore();
2825 _worker_cset_start_region_time_stamp[worker_i] = gc_time_stamp;
2826 return result;
2827 }
2829 void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) {
2830 HeapRegion* r = g1_policy()->collection_set();
2831 while (r != NULL) {
2832 HeapRegion* next = r->next_in_collection_set();
2833 if (cl->doHeapRegion(r)) {
2834 cl->incomplete();
2835 return;
2836 }
2837 r = next;
2838 }
2839 }
2841 void G1CollectedHeap::collection_set_iterate_from(HeapRegion* r,
2842 HeapRegionClosure *cl) {
2843 if (r == NULL) {
2844 // The CSet is empty so there's nothing to do.
2845 return;
2846 }
2848 assert(r->in_collection_set(),
2849 "Start region must be a member of the collection set.");
2850 HeapRegion* cur = r;
2851 while (cur != NULL) {
2852 HeapRegion* next = cur->next_in_collection_set();
2853 if (cl->doHeapRegion(cur) && false) {
2854 cl->incomplete();
2855 return;
2856 }
2857 cur = next;
2858 }
2859 cur = g1_policy()->collection_set();
2860 while (cur != r) {
2861 HeapRegion* next = cur->next_in_collection_set();
2862 if (cl->doHeapRegion(cur) && false) {
2863 cl->incomplete();
2864 return;
2865 }
2866 cur = next;
2867 }
2868 }
2870 CompactibleSpace* G1CollectedHeap::first_compactible_space() {
2871 return n_regions() > 0 ? region_at(0) : NULL;
2872 }
2875 Space* G1CollectedHeap::space_containing(const void* addr) const {
2876 Space* res = heap_region_containing(addr);
2877 if (res == NULL)
2878 res = perm_gen()->space_containing(addr);
2879 return res;
2880 }
2882 HeapWord* G1CollectedHeap::block_start(const void* addr) const {
2883 Space* sp = space_containing(addr);
2884 if (sp != NULL) {
2885 return sp->block_start(addr);
2886 }
2887 return NULL;
2888 }
2890 size_t G1CollectedHeap::block_size(const HeapWord* addr) const {
2891 Space* sp = space_containing(addr);
2892 assert(sp != NULL, "block_size of address outside of heap");
2893 return sp->block_size(addr);
2894 }
2896 bool G1CollectedHeap::block_is_obj(const HeapWord* addr) const {
2897 Space* sp = space_containing(addr);
2898 return sp->block_is_obj(addr);
2899 }
2901 bool G1CollectedHeap::supports_tlab_allocation() const {
2902 return true;
2903 }
2905 size_t G1CollectedHeap::tlab_capacity(Thread* ignored) const {
2906 return HeapRegion::GrainBytes;
2907 }
2909 size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
2910 // Return the remaining space in the cur alloc region, but not less than
2911 // the min TLAB size.
2913 // Also, this value can be at most the humongous object threshold,
2914 // since we can't allow tlabs to grow big enough to accomodate
2915 // humongous objects.
2917 HeapRegion* hr = _mutator_alloc_region.get();
2918 size_t max_tlab_size = _humongous_object_threshold_in_words * wordSize;
2919 if (hr == NULL) {
2920 return max_tlab_size;
2921 } else {
2922 return MIN2(MAX2(hr->free(), (size_t) MinTLABSize), max_tlab_size);
2923 }
2924 }
2926 size_t G1CollectedHeap::max_capacity() const {
2927 return _g1_reserved.byte_size();
2928 }
2930 jlong G1CollectedHeap::millis_since_last_gc() {
2931 // assert(false, "NYI");
2932 return 0;
2933 }
2935 void G1CollectedHeap::prepare_for_verify() {
2936 if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
2937 ensure_parsability(false);
2938 }
2939 g1_rem_set()->prepare_for_verify();
2940 }
2942 class VerifyLivenessOopClosure: public OopClosure {
2943 G1CollectedHeap* _g1h;
2944 VerifyOption _vo;
2945 public:
2946 VerifyLivenessOopClosure(G1CollectedHeap* g1h, VerifyOption vo):
2947 _g1h(g1h), _vo(vo)
2948 { }
2949 void do_oop(narrowOop *p) { do_oop_work(p); }
2950 void do_oop( oop *p) { do_oop_work(p); }
2952 template <class T> void do_oop_work(T *p) {
2953 oop obj = oopDesc::load_decode_heap_oop(p);
2954 guarantee(obj == NULL || !_g1h->is_obj_dead_cond(obj, _vo),
2955 "Dead object referenced by a not dead object");
2956 }
2957 };
2959 class VerifyObjsInRegionClosure: public ObjectClosure {
2960 private:
2961 G1CollectedHeap* _g1h;
2962 size_t _live_bytes;
2963 HeapRegion *_hr;
2964 VerifyOption _vo;
2965 public:
2966 // _vo == UsePrevMarking -> use "prev" marking information,
2967 // _vo == UseNextMarking -> use "next" marking information,
2968 // _vo == UseMarkWord -> use mark word from object header.
2969 VerifyObjsInRegionClosure(HeapRegion *hr, VerifyOption vo)
2970 : _live_bytes(0), _hr(hr), _vo(vo) {
2971 _g1h = G1CollectedHeap::heap();
2972 }
2973 void do_object(oop o) {
2974 VerifyLivenessOopClosure isLive(_g1h, _vo);
2975 assert(o != NULL, "Huh?");
2976 if (!_g1h->is_obj_dead_cond(o, _vo)) {
2977 // If the object is alive according to the mark word,
2978 // then verify that the marking information agrees.
2979 // Note we can't verify the contra-positive of the
2980 // above: if the object is dead (according to the mark
2981 // word), it may not be marked, or may have been marked
2982 // but has since became dead, or may have been allocated
2983 // since the last marking.
2984 if (_vo == VerifyOption_G1UseMarkWord) {
2985 guarantee(!_g1h->is_obj_dead(o), "mark word and concurrent mark mismatch");
2986 }
2988 o->oop_iterate(&isLive);
2989 if (!_hr->obj_allocated_since_prev_marking(o)) {
2990 size_t obj_size = o->size(); // Make sure we don't overflow
2991 _live_bytes += (obj_size * HeapWordSize);
2992 }
2993 }
2994 }
2995 size_t live_bytes() { return _live_bytes; }
2996 };
2998 class PrintObjsInRegionClosure : public ObjectClosure {
2999 HeapRegion *_hr;
3000 G1CollectedHeap *_g1;
3001 public:
3002 PrintObjsInRegionClosure(HeapRegion *hr) : _hr(hr) {
3003 _g1 = G1CollectedHeap::heap();
3004 };
3006 void do_object(oop o) {
3007 if (o != NULL) {
3008 HeapWord *start = (HeapWord *) o;
3009 size_t word_sz = o->size();
3010 gclog_or_tty->print("\nPrinting obj "PTR_FORMAT" of size " SIZE_FORMAT
3011 " isMarkedPrev %d isMarkedNext %d isAllocSince %d\n",
3012 (void*) o, word_sz,
3013 _g1->isMarkedPrev(o),
3014 _g1->isMarkedNext(o),
3015 _hr->obj_allocated_since_prev_marking(o));
3016 HeapWord *end = start + word_sz;
3017 HeapWord *cur;
3018 int *val;
3019 for (cur = start; cur < end; cur++) {
3020 val = (int *) cur;
3021 gclog_or_tty->print("\t "PTR_FORMAT":"PTR_FORMAT"\n", val, *val);
3022 }
3023 }
3024 }
3025 };
3027 class VerifyRegionClosure: public HeapRegionClosure {
3028 private:
3029 bool _par;
3030 VerifyOption _vo;
3031 bool _failures;
3032 public:
3033 // _vo == UsePrevMarking -> use "prev" marking information,
3034 // _vo == UseNextMarking -> use "next" marking information,
3035 // _vo == UseMarkWord -> use mark word from object header.
3036 VerifyRegionClosure(bool par, VerifyOption vo)
3037 : _par(par),
3038 _vo(vo),
3039 _failures(false) {}
3041 bool failures() {
3042 return _failures;
3043 }
3045 bool doHeapRegion(HeapRegion* r) {
3046 guarantee(_par || r->claim_value() == HeapRegion::InitialClaimValue,
3047 "Should be unclaimed at verify points.");
3048 if (!r->continuesHumongous()) {
3049 bool failures = false;
3050 r->verify(_vo, &failures);
3051 if (failures) {
3052 _failures = true;
3053 } else {
3054 VerifyObjsInRegionClosure not_dead_yet_cl(r, _vo);
3055 r->object_iterate(¬_dead_yet_cl);
3056 if (_vo != VerifyOption_G1UseNextMarking) {
3057 if (r->max_live_bytes() < not_dead_yet_cl.live_bytes()) {
3058 gclog_or_tty->print_cr("["PTR_FORMAT","PTR_FORMAT"] "
3059 "max_live_bytes "SIZE_FORMAT" "
3060 "< calculated "SIZE_FORMAT,
3061 r->bottom(), r->end(),
3062 r->max_live_bytes(),
3063 not_dead_yet_cl.live_bytes());
3064 _failures = true;
3065 }
3066 } else {
3067 // When vo == UseNextMarking we cannot currently do a sanity
3068 // check on the live bytes as the calculation has not been
3069 // finalized yet.
3070 }
3071 }
3072 }
3073 return false; // stop the region iteration if we hit a failure
3074 }
3075 };
3077 class VerifyRootsClosure: public OopsInGenClosure {
3078 private:
3079 G1CollectedHeap* _g1h;
3080 VerifyOption _vo;
3081 bool _failures;
3082 public:
3083 // _vo == UsePrevMarking -> use "prev" marking information,
3084 // _vo == UseNextMarking -> use "next" marking information,
3085 // _vo == UseMarkWord -> use mark word from object header.
3086 VerifyRootsClosure(VerifyOption vo) :
3087 _g1h(G1CollectedHeap::heap()),
3088 _vo(vo),
3089 _failures(false) { }
3091 bool failures() { return _failures; }
3093 template <class T> void do_oop_nv(T* p) {
3094 T heap_oop = oopDesc::load_heap_oop(p);
3095 if (!oopDesc::is_null(heap_oop)) {
3096 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
3097 if (_g1h->is_obj_dead_cond(obj, _vo)) {
3098 gclog_or_tty->print_cr("Root location "PTR_FORMAT" "
3099 "points to dead obj "PTR_FORMAT, p, (void*) obj);
3100 if (_vo == VerifyOption_G1UseMarkWord) {
3101 gclog_or_tty->print_cr(" Mark word: "PTR_FORMAT, (void*)(obj->mark()));
3102 }
3103 obj->print_on(gclog_or_tty);
3104 _failures = true;
3105 }
3106 }
3107 }
3109 void do_oop(oop* p) { do_oop_nv(p); }
3110 void do_oop(narrowOop* p) { do_oop_nv(p); }
3111 };
3113 // This is the task used for parallel heap verification.
3115 class G1ParVerifyTask: public AbstractGangTask {
3116 private:
3117 G1CollectedHeap* _g1h;
3118 VerifyOption _vo;
3119 bool _failures;
3121 public:
3122 // _vo == UsePrevMarking -> use "prev" marking information,
3123 // _vo == UseNextMarking -> use "next" marking information,
3124 // _vo == UseMarkWord -> use mark word from object header.
3125 G1ParVerifyTask(G1CollectedHeap* g1h, VerifyOption vo) :
3126 AbstractGangTask("Parallel verify task"),
3127 _g1h(g1h),
3128 _vo(vo),
3129 _failures(false) { }
3131 bool failures() {
3132 return _failures;
3133 }
3135 void work(uint worker_id) {
3136 HandleMark hm;
3137 VerifyRegionClosure blk(true, _vo);
3138 _g1h->heap_region_par_iterate_chunked(&blk, worker_id,
3139 _g1h->workers()->active_workers(),
3140 HeapRegion::ParVerifyClaimValue);
3141 if (blk.failures()) {
3142 _failures = true;
3143 }
3144 }
3145 };
3147 void G1CollectedHeap::verify(bool silent) {
3148 verify(silent, VerifyOption_G1UsePrevMarking);
3149 }
3151 void G1CollectedHeap::verify(bool silent,
3152 VerifyOption vo) {
3153 if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
3154 if (!silent) { gclog_or_tty->print("Roots (excluding permgen) "); }
3155 VerifyRootsClosure rootsCl(vo);
3157 assert(Thread::current()->is_VM_thread(),
3158 "Expected to be executed serially by the VM thread at this point");
3160 CodeBlobToOopClosure blobsCl(&rootsCl, /*do_marking=*/ false);
3162 // We apply the relevant closures to all the oops in the
3163 // system dictionary, the string table and the code cache.
3164 const int so = SO_AllClasses | SO_Strings | SO_CodeCache;
3166 process_strong_roots(true, // activate StrongRootsScope
3167 true, // we set "collecting perm gen" to true,
3168 // so we don't reset the dirty cards in the perm gen.
3169 ScanningOption(so), // roots scanning options
3170 &rootsCl,
3171 &blobsCl,
3172 &rootsCl);
3174 // If we're verifying after the marking phase of a Full GC then we can't
3175 // treat the perm gen as roots into the G1 heap. Some of the objects in
3176 // the perm gen may be dead and hence not marked. If one of these dead
3177 // objects is considered to be a root then we may end up with a false
3178 // "Root location <x> points to dead ob <y>" failure.
3179 if (vo != VerifyOption_G1UseMarkWord) {
3180 // Since we used "collecting_perm_gen" == true above, we will not have
3181 // checked the refs from perm into the G1-collected heap. We check those
3182 // references explicitly below. Whether the relevant cards are dirty
3183 // is checked further below in the rem set verification.
3184 if (!silent) { gclog_or_tty->print("Permgen roots "); }
3185 perm_gen()->oop_iterate(&rootsCl);
3186 }
3187 bool failures = rootsCl.failures();
3189 if (vo != VerifyOption_G1UseMarkWord) {
3190 // If we're verifying during a full GC then the region sets
3191 // will have been torn down at the start of the GC. Therefore
3192 // verifying the region sets will fail. So we only verify
3193 // the region sets when not in a full GC.
3194 if (!silent) { gclog_or_tty->print("HeapRegionSets "); }
3195 verify_region_sets();
3196 }
3198 if (!silent) { gclog_or_tty->print("HeapRegions "); }
3199 if (GCParallelVerificationEnabled && ParallelGCThreads > 1) {
3200 assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue),
3201 "sanity check");
3203 G1ParVerifyTask task(this, vo);
3204 assert(UseDynamicNumberOfGCThreads ||
3205 workers()->active_workers() == workers()->total_workers(),
3206 "If not dynamic should be using all the workers");
3207 int n_workers = workers()->active_workers();
3208 set_par_threads(n_workers);
3209 workers()->run_task(&task);
3210 set_par_threads(0);
3211 if (task.failures()) {
3212 failures = true;
3213 }
3215 // Checks that the expected amount of parallel work was done.
3216 // The implication is that n_workers is > 0.
3217 assert(check_heap_region_claim_values(HeapRegion::ParVerifyClaimValue),
3218 "sanity check");
3220 reset_heap_region_claim_values();
3222 assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue),
3223 "sanity check");
3224 } else {
3225 VerifyRegionClosure blk(false, vo);
3226 heap_region_iterate(&blk);
3227 if (blk.failures()) {
3228 failures = true;
3229 }
3230 }
3231 if (!silent) gclog_or_tty->print("RemSet ");
3232 rem_set()->verify();
3234 if (failures) {
3235 gclog_or_tty->print_cr("Heap:");
3236 // It helps to have the per-region information in the output to
3237 // help us track down what went wrong. This is why we call
3238 // print_extended_on() instead of print_on().
3239 print_extended_on(gclog_or_tty);
3240 gclog_or_tty->print_cr("");
3241 #ifndef PRODUCT
3242 if (VerifyDuringGC && G1VerifyDuringGCPrintReachable) {
3243 concurrent_mark()->print_reachable("at-verification-failure",
3244 vo, false /* all */);
3245 }
3246 #endif
3247 gclog_or_tty->flush();
3248 }
3249 guarantee(!failures, "there should not have been any failures");
3250 } else {
3251 if (!silent) gclog_or_tty->print("(SKIPPING roots, heapRegions, remset) ");
3252 }
3253 }
3255 class PrintRegionClosure: public HeapRegionClosure {
3256 outputStream* _st;
3257 public:
3258 PrintRegionClosure(outputStream* st) : _st(st) {}
3259 bool doHeapRegion(HeapRegion* r) {
3260 r->print_on(_st);
3261 return false;
3262 }
3263 };
3265 void G1CollectedHeap::print_on(outputStream* st) const {
3266 st->print(" %-20s", "garbage-first heap");
3267 st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K",
3268 capacity()/K, used_unlocked()/K);
3269 st->print(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")",
3270 _g1_storage.low_boundary(),
3271 _g1_storage.high(),
3272 _g1_storage.high_boundary());
3273 st->cr();
3274 st->print(" region size " SIZE_FORMAT "K, ", HeapRegion::GrainBytes / K);
3275 uint young_regions = _young_list->length();
3276 st->print("%u young (" SIZE_FORMAT "K), ", young_regions,
3277 (size_t) young_regions * HeapRegion::GrainBytes / K);
3278 uint survivor_regions = g1_policy()->recorded_survivor_regions();
3279 st->print("%u survivors (" SIZE_FORMAT "K)", survivor_regions,
3280 (size_t) survivor_regions * HeapRegion::GrainBytes / K);
3281 st->cr();
3282 perm()->as_gen()->print_on(st);
3283 }
3285 void G1CollectedHeap::print_extended_on(outputStream* st) const {
3286 print_on(st);
3288 // Print the per-region information.
3289 st->cr();
3290 st->print_cr("Heap Regions: (Y=young(eden), SU=young(survivor), "
3291 "HS=humongous(starts), HC=humongous(continues), "
3292 "CS=collection set, F=free, TS=gc time stamp, "
3293 "PTAMS=previous top-at-mark-start, "
3294 "NTAMS=next top-at-mark-start)");
3295 PrintRegionClosure blk(st);
3296 heap_region_iterate(&blk);
3297 }
3299 void G1CollectedHeap::print_gc_threads_on(outputStream* st) const {
3300 if (G1CollectedHeap::use_parallel_gc_threads()) {
3301 workers()->print_worker_threads_on(st);
3302 }
3303 _cmThread->print_on(st);
3304 st->cr();
3305 _cm->print_worker_threads_on(st);
3306 _cg1r->print_worker_threads_on(st);
3307 st->cr();
3308 }
3310 void G1CollectedHeap::gc_threads_do(ThreadClosure* tc) const {
3311 if (G1CollectedHeap::use_parallel_gc_threads()) {
3312 workers()->threads_do(tc);
3313 }
3314 tc->do_thread(_cmThread);
3315 _cg1r->threads_do(tc);
3316 }
3318 void G1CollectedHeap::print_tracing_info() const {
3319 // We'll overload this to mean "trace GC pause statistics."
3320 if (TraceGen0Time || TraceGen1Time) {
3321 // The "G1CollectorPolicy" is keeping track of these stats, so delegate
3322 // to that.
3323 g1_policy()->print_tracing_info();
3324 }
3325 if (G1SummarizeRSetStats) {
3326 g1_rem_set()->print_summary_info();
3327 }
3328 if (G1SummarizeConcMark) {
3329 concurrent_mark()->print_summary_info();
3330 }
3331 g1_policy()->print_yg_surv_rate_info();
3332 SpecializationStats::print();
3333 }
3335 #ifndef PRODUCT
3336 // Helpful for debugging RSet issues.
3338 class PrintRSetsClosure : public HeapRegionClosure {
3339 private:
3340 const char* _msg;
3341 size_t _occupied_sum;
3343 public:
3344 bool doHeapRegion(HeapRegion* r) {
3345 HeapRegionRemSet* hrrs = r->rem_set();
3346 size_t occupied = hrrs->occupied();
3347 _occupied_sum += occupied;
3349 gclog_or_tty->print_cr("Printing RSet for region "HR_FORMAT,
3350 HR_FORMAT_PARAMS(r));
3351 if (occupied == 0) {
3352 gclog_or_tty->print_cr(" RSet is empty");
3353 } else {
3354 hrrs->print();
3355 }
3356 gclog_or_tty->print_cr("----------");
3357 return false;
3358 }
3360 PrintRSetsClosure(const char* msg) : _msg(msg), _occupied_sum(0) {
3361 gclog_or_tty->cr();
3362 gclog_or_tty->print_cr("========================================");
3363 gclog_or_tty->print_cr(msg);
3364 gclog_or_tty->cr();
3365 }
3367 ~PrintRSetsClosure() {
3368 gclog_or_tty->print_cr("Occupied Sum: "SIZE_FORMAT, _occupied_sum);
3369 gclog_or_tty->print_cr("========================================");
3370 gclog_or_tty->cr();
3371 }
3372 };
3374 void G1CollectedHeap::print_cset_rsets() {
3375 PrintRSetsClosure cl("Printing CSet RSets");
3376 collection_set_iterate(&cl);
3377 }
3379 void G1CollectedHeap::print_all_rsets() {
3380 PrintRSetsClosure cl("Printing All RSets");;
3381 heap_region_iterate(&cl);
3382 }
3383 #endif // PRODUCT
3385 G1CollectedHeap* G1CollectedHeap::heap() {
3386 assert(_sh->kind() == CollectedHeap::G1CollectedHeap,
3387 "not a garbage-first heap");
3388 return _g1h;
3389 }
3391 void G1CollectedHeap::gc_prologue(bool full /* Ignored */) {
3392 // always_do_update_barrier = false;
3393 assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
3394 // Call allocation profiler
3395 AllocationProfiler::iterate_since_last_gc();
3396 // Fill TLAB's and such
3397 ensure_parsability(true);
3398 }
3400 void G1CollectedHeap::gc_epilogue(bool full /* Ignored */) {
3401 // FIXME: what is this about?
3402 // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled"
3403 // is set.
3404 COMPILER2_PRESENT(assert(DerivedPointerTable::is_empty(),
3405 "derived pointer present"));
3406 // always_do_update_barrier = true;
3408 // We have just completed a GC. Update the soft reference
3409 // policy with the new heap occupancy
3410 Universe::update_heap_info_at_gc();
3411 }
3413 HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size,
3414 unsigned int gc_count_before,
3415 bool* succeeded) {
3416 assert_heap_not_locked_and_not_at_safepoint();
3417 g1_policy()->record_stop_world_start();
3418 VM_G1IncCollectionPause op(gc_count_before,
3419 word_size,
3420 false, /* should_initiate_conc_mark */
3421 g1_policy()->max_pause_time_ms(),
3422 GCCause::_g1_inc_collection_pause);
3423 VMThread::execute(&op);
3425 HeapWord* result = op.result();
3426 bool ret_succeeded = op.prologue_succeeded() && op.pause_succeeded();
3427 assert(result == NULL || ret_succeeded,
3428 "the result should be NULL if the VM did not succeed");
3429 *succeeded = ret_succeeded;
3431 assert_heap_not_locked();
3432 return result;
3433 }
3435 void
3436 G1CollectedHeap::doConcurrentMark() {
3437 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
3438 if (!_cmThread->in_progress()) {
3439 _cmThread->set_started();
3440 CGC_lock->notify();
3441 }
3442 }
3444 size_t G1CollectedHeap::pending_card_num() {
3445 size_t extra_cards = 0;
3446 JavaThread *curr = Threads::first();
3447 while (curr != NULL) {
3448 DirtyCardQueue& dcq = curr->dirty_card_queue();
3449 extra_cards += dcq.size();
3450 curr = curr->next();
3451 }
3452 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
3453 size_t buffer_size = dcqs.buffer_size();
3454 size_t buffer_num = dcqs.completed_buffers_num();
3455 return buffer_size * buffer_num + extra_cards;
3456 }
3458 size_t G1CollectedHeap::max_pending_card_num() {
3459 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
3460 size_t buffer_size = dcqs.buffer_size();
3461 size_t buffer_num = dcqs.completed_buffers_num();
3462 int thread_num = Threads::number_of_threads();
3463 return (buffer_num + thread_num) * buffer_size;
3464 }
3466 size_t G1CollectedHeap::cards_scanned() {
3467 return g1_rem_set()->cardsScanned();
3468 }
3470 void
3471 G1CollectedHeap::setup_surviving_young_words() {
3472 assert(_surviving_young_words == NULL, "pre-condition");
3473 uint array_length = g1_policy()->young_cset_region_length();
3474 _surviving_young_words = NEW_C_HEAP_ARRAY(size_t, (size_t) array_length);
3475 if (_surviving_young_words == NULL) {
3476 vm_exit_out_of_memory(sizeof(size_t) * array_length,
3477 "Not enough space for young surv words summary.");
3478 }
3479 memset(_surviving_young_words, 0, (size_t) array_length * sizeof(size_t));
3480 #ifdef ASSERT
3481 for (uint i = 0; i < array_length; ++i) {
3482 assert( _surviving_young_words[i] == 0, "memset above" );
3483 }
3484 #endif // !ASSERT
3485 }
3487 void
3488 G1CollectedHeap::update_surviving_young_words(size_t* surv_young_words) {
3489 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
3490 uint array_length = g1_policy()->young_cset_region_length();
3491 for (uint i = 0; i < array_length; ++i) {
3492 _surviving_young_words[i] += surv_young_words[i];
3493 }
3494 }
3496 void
3497 G1CollectedHeap::cleanup_surviving_young_words() {
3498 guarantee( _surviving_young_words != NULL, "pre-condition" );
3499 FREE_C_HEAP_ARRAY(size_t, _surviving_young_words);
3500 _surviving_young_words = NULL;
3501 }
3503 #ifdef ASSERT
3504 class VerifyCSetClosure: public HeapRegionClosure {
3505 public:
3506 bool doHeapRegion(HeapRegion* hr) {
3507 // Here we check that the CSet region's RSet is ready for parallel
3508 // iteration. The fields that we'll verify are only manipulated
3509 // when the region is part of a CSet and is collected. Afterwards,
3510 // we reset these fields when we clear the region's RSet (when the
3511 // region is freed) so they are ready when the region is
3512 // re-allocated. The only exception to this is if there's an
3513 // evacuation failure and instead of freeing the region we leave
3514 // it in the heap. In that case, we reset these fields during
3515 // evacuation failure handling.
3516 guarantee(hr->rem_set()->verify_ready_for_par_iteration(), "verification");
3518 // Here's a good place to add any other checks we'd like to
3519 // perform on CSet regions.
3520 return false;
3521 }
3522 };
3523 #endif // ASSERT
3525 #if TASKQUEUE_STATS
3526 void G1CollectedHeap::print_taskqueue_stats_hdr(outputStream* const st) {
3527 st->print_raw_cr("GC Task Stats");
3528 st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr();
3529 st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr();
3530 }
3532 void G1CollectedHeap::print_taskqueue_stats(outputStream* const st) const {
3533 print_taskqueue_stats_hdr(st);
3535 TaskQueueStats totals;
3536 const int n = workers() != NULL ? workers()->total_workers() : 1;
3537 for (int i = 0; i < n; ++i) {
3538 st->print("%3d ", i); task_queue(i)->stats.print(st); st->cr();
3539 totals += task_queue(i)->stats;
3540 }
3541 st->print_raw("tot "); totals.print(st); st->cr();
3543 DEBUG_ONLY(totals.verify());
3544 }
3546 void G1CollectedHeap::reset_taskqueue_stats() {
3547 const int n = workers() != NULL ? workers()->total_workers() : 1;
3548 for (int i = 0; i < n; ++i) {
3549 task_queue(i)->stats.reset();
3550 }
3551 }
3552 #endif // TASKQUEUE_STATS
3554 bool
3555 G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
3556 assert_at_safepoint(true /* should_be_vm_thread */);
3557 guarantee(!is_gc_active(), "collection is not reentrant");
3559 if (GC_locker::check_active_before_gc()) {
3560 return false;
3561 }
3563 SvcGCMarker sgcm(SvcGCMarker::MINOR);
3564 ResourceMark rm;
3566 print_heap_before_gc();
3568 HRSPhaseSetter x(HRSPhaseEvacuation);
3569 verify_region_sets_optional();
3570 verify_dirty_young_regions();
3572 // This call will decide whether this pause is an initial-mark
3573 // pause. If it is, during_initial_mark_pause() will return true
3574 // for the duration of this pause.
3575 g1_policy()->decide_on_conc_mark_initiation();
3577 // We do not allow initial-mark to be piggy-backed on a mixed GC.
3578 assert(!g1_policy()->during_initial_mark_pause() ||
3579 g1_policy()->gcs_are_young(), "sanity");
3581 // We also do not allow mixed GCs during marking.
3582 assert(!mark_in_progress() || g1_policy()->gcs_are_young(), "sanity");
3584 // Record whether this pause is an initial mark. When the current
3585 // thread has completed its logging output and it's safe to signal
3586 // the CM thread, the flag's value in the policy has been reset.
3587 bool should_start_conc_mark = g1_policy()->during_initial_mark_pause();
3589 // Inner scope for scope based logging, timers, and stats collection
3590 {
3591 char verbose_str[128];
3592 sprintf(verbose_str, "GC pause ");
3593 if (g1_policy()->gcs_are_young()) {
3594 strcat(verbose_str, "(young)");
3595 } else {
3596 strcat(verbose_str, "(mixed)");
3597 }
3598 if (g1_policy()->during_initial_mark_pause()) {
3599 strcat(verbose_str, " (initial-mark)");
3600 // We are about to start a marking cycle, so we increment the
3601 // full collection counter.
3602 increment_total_full_collections();
3603 }
3605 // if the log level is "finer" is on, we'll print long statistics information
3606 // in the collector policy code, so let's not print this as the output
3607 // is messy if we do.
3608 gclog_or_tty->date_stamp(G1Log::fine() && PrintGCDateStamps);
3609 TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
3610 TraceTime t(verbose_str, G1Log::fine() && !G1Log::finer(), true, gclog_or_tty);
3612 TraceCollectorStats tcs(g1mm()->incremental_collection_counters());
3613 TraceMemoryManagerStats tms(false /* fullGC */, gc_cause());
3615 // If the secondary_free_list is not empty, append it to the
3616 // free_list. No need to wait for the cleanup operation to finish;
3617 // the region allocation code will check the secondary_free_list
3618 // and wait if necessary. If the G1StressConcRegionFreeing flag is
3619 // set, skip this step so that the region allocation code has to
3620 // get entries from the secondary_free_list.
3621 if (!G1StressConcRegionFreeing) {
3622 append_secondary_free_list_if_not_empty_with_lock();
3623 }
3625 assert(check_young_list_well_formed(),
3626 "young list should be well formed");
3628 // Don't dynamically change the number of GC threads this early. A value of
3629 // 0 is used to indicate serial work. When parallel work is done,
3630 // it will be set.
3632 { // Call to jvmpi::post_class_unload_events must occur outside of active GC
3633 IsGCActiveMark x;
3635 gc_prologue(false);
3636 increment_total_collections(false /* full gc */);
3637 increment_gc_time_stamp();
3639 if (VerifyBeforeGC && total_collections() >= VerifyGCStartAt) {
3640 HandleMark hm; // Discard invalid handles created during verification
3641 gclog_or_tty->print(" VerifyBeforeGC:");
3642 prepare_for_verify();
3643 Universe::verify(/* silent */ false,
3644 /* option */ VerifyOption_G1UsePrevMarking);
3645 }
3647 COMPILER2_PRESENT(DerivedPointerTable::clear());
3649 // Please see comment in g1CollectedHeap.hpp and
3650 // G1CollectedHeap::ref_processing_init() to see how
3651 // reference processing currently works in G1.
3653 // Enable discovery in the STW reference processor
3654 ref_processor_stw()->enable_discovery(true /*verify_disabled*/,
3655 true /*verify_no_refs*/);
3657 {
3658 // We want to temporarily turn off discovery by the
3659 // CM ref processor, if necessary, and turn it back on
3660 // on again later if we do. Using a scoped
3661 // NoRefDiscovery object will do this.
3662 NoRefDiscovery no_cm_discovery(ref_processor_cm());
3664 // Forget the current alloc region (we might even choose it to be part
3665 // of the collection set!).
3666 release_mutator_alloc_region();
3668 // We should call this after we retire the mutator alloc
3669 // region(s) so that all the ALLOC / RETIRE events are generated
3670 // before the start GC event.
3671 _hr_printer.start_gc(false /* full */, (size_t) total_collections());
3673 // The elapsed time induced by the start time below deliberately elides
3674 // the possible verification above.
3675 double start_time_sec = os::elapsedTime();
3676 size_t start_used_bytes = used();
3678 #if YOUNG_LIST_VERBOSE
3679 gclog_or_tty->print_cr("\nBefore recording pause start.\nYoung_list:");
3680 _young_list->print();
3681 g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
3682 #endif // YOUNG_LIST_VERBOSE
3684 g1_policy()->record_collection_pause_start(start_time_sec,
3685 start_used_bytes);
3687 double scan_wait_start = os::elapsedTime();
3688 // We have to wait until the CM threads finish scanning the
3689 // root regions as it's the only way to ensure that all the
3690 // objects on them have been correctly scanned before we start
3691 // moving them during the GC.
3692 bool waited = _cm->root_regions()->wait_until_scan_finished();
3693 if (waited) {
3694 double scan_wait_end = os::elapsedTime();
3695 double wait_time_ms = (scan_wait_end - scan_wait_start) * 1000.0;
3696 g1_policy()->record_root_region_scan_wait_time(wait_time_ms);
3697 }
3699 #if YOUNG_LIST_VERBOSE
3700 gclog_or_tty->print_cr("\nAfter recording pause start.\nYoung_list:");
3701 _young_list->print();
3702 #endif // YOUNG_LIST_VERBOSE
3704 if (g1_policy()->during_initial_mark_pause()) {
3705 concurrent_mark()->checkpointRootsInitialPre();
3706 }
3707 perm_gen()->save_marks();
3709 #if YOUNG_LIST_VERBOSE
3710 gclog_or_tty->print_cr("\nBefore choosing collection set.\nYoung_list:");
3711 _young_list->print();
3712 g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
3713 #endif // YOUNG_LIST_VERBOSE
3715 g1_policy()->finalize_cset(target_pause_time_ms);
3717 _cm->note_start_of_gc();
3718 // We should not verify the per-thread SATB buffers given that
3719 // we have not filtered them yet (we'll do so during the
3720 // GC). We also call this after finalize_cset() to
3721 // ensure that the CSet has been finalized.
3722 _cm->verify_no_cset_oops(true /* verify_stacks */,
3723 true /* verify_enqueued_buffers */,
3724 false /* verify_thread_buffers */,
3725 true /* verify_fingers */);
3727 if (_hr_printer.is_active()) {
3728 HeapRegion* hr = g1_policy()->collection_set();
3729 while (hr != NULL) {
3730 G1HRPrinter::RegionType type;
3731 if (!hr->is_young()) {
3732 type = G1HRPrinter::Old;
3733 } else if (hr->is_survivor()) {
3734 type = G1HRPrinter::Survivor;
3735 } else {
3736 type = G1HRPrinter::Eden;
3737 }
3738 _hr_printer.cset(hr);
3739 hr = hr->next_in_collection_set();
3740 }
3741 }
3743 #ifdef ASSERT
3744 VerifyCSetClosure cl;
3745 collection_set_iterate(&cl);
3746 #endif // ASSERT
3748 setup_surviving_young_words();
3750 // Initialize the GC alloc regions.
3751 init_gc_alloc_regions();
3753 // Actually do the work...
3754 evacuate_collection_set();
3756 // We do this to mainly verify the per-thread SATB buffers
3757 // (which have been filtered by now) since we didn't verify
3758 // them earlier. No point in re-checking the stacks / enqueued
3759 // buffers given that the CSet has not changed since last time
3760 // we checked.
3761 _cm->verify_no_cset_oops(false /* verify_stacks */,
3762 false /* verify_enqueued_buffers */,
3763 true /* verify_thread_buffers */,
3764 true /* verify_fingers */);
3766 free_collection_set(g1_policy()->collection_set());
3767 g1_policy()->clear_collection_set();
3769 cleanup_surviving_young_words();
3771 // Start a new incremental collection set for the next pause.
3772 g1_policy()->start_incremental_cset_building();
3774 // Clear the _cset_fast_test bitmap in anticipation of adding
3775 // regions to the incremental collection set for the next
3776 // evacuation pause.
3777 clear_cset_fast_test();
3779 _young_list->reset_sampled_info();
3781 // Don't check the whole heap at this point as the
3782 // GC alloc regions from this pause have been tagged
3783 // as survivors and moved on to the survivor list.
3784 // Survivor regions will fail the !is_young() check.
3785 assert(check_young_list_empty(false /* check_heap */),
3786 "young list should be empty");
3788 #if YOUNG_LIST_VERBOSE
3789 gclog_or_tty->print_cr("Before recording survivors.\nYoung List:");
3790 _young_list->print();
3791 #endif // YOUNG_LIST_VERBOSE
3793 g1_policy()->record_survivor_regions(_young_list->survivor_length(),
3794 _young_list->first_survivor_region(),
3795 _young_list->last_survivor_region());
3797 _young_list->reset_auxilary_lists();
3799 if (evacuation_failed()) {
3800 _summary_bytes_used = recalculate_used();
3801 } else {
3802 // The "used" of the the collection set have already been subtracted
3803 // when they were freed. Add in the bytes evacuated.
3804 _summary_bytes_used += g1_policy()->bytes_copied_during_gc();
3805 }
3807 if (g1_policy()->during_initial_mark_pause()) {
3808 // We have to do this before we notify the CM threads that
3809 // they can start working to make sure that all the
3810 // appropriate initialization is done on the CM object.
3811 concurrent_mark()->checkpointRootsInitialPost();
3812 set_marking_started();
3813 // Note that we don't actually trigger the CM thread at
3814 // this point. We do that later when we're sure that
3815 // the current thread has completed its logging output.
3816 }
3818 allocate_dummy_regions();
3820 #if YOUNG_LIST_VERBOSE
3821 gclog_or_tty->print_cr("\nEnd of the pause.\nYoung_list:");
3822 _young_list->print();
3823 g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
3824 #endif // YOUNG_LIST_VERBOSE
3826 init_mutator_alloc_region();
3828 {
3829 size_t expand_bytes = g1_policy()->expansion_amount();
3830 if (expand_bytes > 0) {
3831 size_t bytes_before = capacity();
3832 // No need for an ergo verbose message here,
3833 // expansion_amount() does this when it returns a value > 0.
3834 if (!expand(expand_bytes)) {
3835 // We failed to expand the heap so let's verify that
3836 // committed/uncommitted amount match the backing store
3837 assert(capacity() == _g1_storage.committed_size(), "committed size mismatch");
3838 assert(max_capacity() == _g1_storage.reserved_size(), "reserved size mismatch");
3839 }
3840 }
3841 }
3843 // We redo the verificaiton but now wrt to the new CSet which
3844 // has just got initialized after the previous CSet was freed.
3845 _cm->verify_no_cset_oops(true /* verify_stacks */,
3846 true /* verify_enqueued_buffers */,
3847 true /* verify_thread_buffers */,
3848 true /* verify_fingers */);
3849 _cm->note_end_of_gc();
3851 double end_time_sec = os::elapsedTime();
3852 double pause_time_ms = (end_time_sec - start_time_sec) * MILLIUNITS;
3853 g1_policy()->record_pause_time_ms(pause_time_ms);
3854 int active_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
3855 workers()->active_workers() : 1);
3856 g1_policy()->record_collection_pause_end(active_workers);
3858 MemoryService::track_memory_usage();
3860 // In prepare_for_verify() below we'll need to scan the deferred
3861 // update buffers to bring the RSets up-to-date if
3862 // G1HRRSFlushLogBuffersOnVerify has been set. While scanning
3863 // the update buffers we'll probably need to scan cards on the
3864 // regions we just allocated to (i.e., the GC alloc
3865 // regions). However, during the last GC we called
3866 // set_saved_mark() on all the GC alloc regions, so card
3867 // scanning might skip the [saved_mark_word()...top()] area of
3868 // those regions (i.e., the area we allocated objects into
3869 // during the last GC). But it shouldn't. Given that
3870 // saved_mark_word() is conditional on whether the GC time stamp
3871 // on the region is current or not, by incrementing the GC time
3872 // stamp here we invalidate all the GC time stamps on all the
3873 // regions and saved_mark_word() will simply return top() for
3874 // all the regions. This is a nicer way of ensuring this rather
3875 // than iterating over the regions and fixing them. In fact, the
3876 // GC time stamp increment here also ensures that
3877 // saved_mark_word() will return top() between pauses, i.e.,
3878 // during concurrent refinement. So we don't need the
3879 // is_gc_active() check to decided which top to use when
3880 // scanning cards (see CR 7039627).
3881 increment_gc_time_stamp();
3883 if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) {
3884 HandleMark hm; // Discard invalid handles created during verification
3885 gclog_or_tty->print(" VerifyAfterGC:");
3886 prepare_for_verify();
3887 Universe::verify(/* silent */ false,
3888 /* option */ VerifyOption_G1UsePrevMarking);
3889 }
3891 assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");
3892 ref_processor_stw()->verify_no_references_recorded();
3894 // CM reference discovery will be re-enabled if necessary.
3895 }
3897 // We should do this after we potentially expand the heap so
3898 // that all the COMMIT events are generated before the end GC
3899 // event, and after we retire the GC alloc regions so that all
3900 // RETIRE events are generated before the end GC event.
3901 _hr_printer.end_gc(false /* full */, (size_t) total_collections());
3903 // We have to do this after we decide whether to expand the heap or not.
3904 g1_policy()->print_heap_transition();
3906 if (mark_in_progress()) {
3907 concurrent_mark()->update_g1_committed();
3908 }
3910 #ifdef TRACESPINNING
3911 ParallelTaskTerminator::print_termination_counts();
3912 #endif
3914 gc_epilogue(false);
3915 }
3917 if (ExitAfterGCNum > 0 && total_collections() == ExitAfterGCNum) {
3918 gclog_or_tty->print_cr("Stopping after GC #%d", ExitAfterGCNum);
3919 print_tracing_info();
3920 vm_exit(-1);
3921 }
3922 }
3924 // The closing of the inner scope, immediately above, will complete
3925 // logging at the "fine" level. The record_collection_pause_end() call
3926 // above will complete logging at the "finer" level.
3927 //
3928 // It is not yet to safe, however, to tell the concurrent mark to
3929 // start as we have some optional output below. We don't want the
3930 // output from the concurrent mark thread interfering with this
3931 // logging output either.
3933 _hrs.verify_optional();
3934 verify_region_sets_optional();
3936 TASKQUEUE_STATS_ONLY(if (ParallelGCVerbose) print_taskqueue_stats());
3937 TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
3939 print_heap_after_gc();
3940 g1mm()->update_sizes();
3942 if (G1SummarizeRSetStats &&
3943 (G1SummarizeRSetStatsPeriod > 0) &&
3944 (total_collections() % G1SummarizeRSetStatsPeriod == 0)) {
3945 g1_rem_set()->print_summary_info();
3946 }
3948 // It should now be safe to tell the concurrent mark thread to start
3949 // without its logging output interfering with the logging output
3950 // that came from the pause.
3952 if (should_start_conc_mark) {
3953 // CAUTION: after the doConcurrentMark() call below,
3954 // the concurrent marking thread(s) could be running
3955 // concurrently with us. Make sure that anything after
3956 // this point does not assume that we are the only GC thread
3957 // running. Note: of course, the actual marking work will
3958 // not start until the safepoint itself is released in
3959 // ConcurrentGCThread::safepoint_desynchronize().
3960 doConcurrentMark();
3961 }
3963 return true;
3964 }
3966 size_t G1CollectedHeap::desired_plab_sz(GCAllocPurpose purpose)
3967 {
3968 size_t gclab_word_size;
3969 switch (purpose) {
3970 case GCAllocForSurvived:
3971 gclab_word_size = YoungPLABSize;
3972 break;
3973 case GCAllocForTenured:
3974 gclab_word_size = OldPLABSize;
3975 break;
3976 default:
3977 assert(false, "unknown GCAllocPurpose");
3978 gclab_word_size = OldPLABSize;
3979 break;
3980 }
3981 return gclab_word_size;
3982 }
3984 void G1CollectedHeap::init_mutator_alloc_region() {
3985 assert(_mutator_alloc_region.get() == NULL, "pre-condition");
3986 _mutator_alloc_region.init();
3987 }
3989 void G1CollectedHeap::release_mutator_alloc_region() {
3990 _mutator_alloc_region.release();
3991 assert(_mutator_alloc_region.get() == NULL, "post-condition");
3992 }
3994 void G1CollectedHeap::init_gc_alloc_regions() {
3995 assert_at_safepoint(true /* should_be_vm_thread */);
3997 _survivor_gc_alloc_region.init();
3998 _old_gc_alloc_region.init();
3999 HeapRegion* retained_region = _retained_old_gc_alloc_region;
4000 _retained_old_gc_alloc_region = NULL;
4002 // We will discard the current GC alloc region if:
4003 // a) it's in the collection set (it can happen!),
4004 // b) it's already full (no point in using it),
4005 // c) it's empty (this means that it was emptied during
4006 // a cleanup and it should be on the free list now), or
4007 // d) it's humongous (this means that it was emptied
4008 // during a cleanup and was added to the free list, but
4009 // has been subseqently used to allocate a humongous
4010 // object that may be less than the region size).
4011 if (retained_region != NULL &&
4012 !retained_region->in_collection_set() &&
4013 !(retained_region->top() == retained_region->end()) &&
4014 !retained_region->is_empty() &&
4015 !retained_region->isHumongous()) {
4016 retained_region->set_saved_mark();
4017 // The retained region was added to the old region set when it was
4018 // retired. We have to remove it now, since we don't allow regions
4019 // we allocate to in the region sets. We'll re-add it later, when
4020 // it's retired again.
4021 _old_set.remove(retained_region);
4022 bool during_im = g1_policy()->during_initial_mark_pause();
4023 retained_region->note_start_of_copying(during_im);
4024 _old_gc_alloc_region.set(retained_region);
4025 _hr_printer.reuse(retained_region);
4026 }
4027 }
4029 void G1CollectedHeap::release_gc_alloc_regions() {
4030 _survivor_gc_alloc_region.release();
4031 // If we have an old GC alloc region to release, we'll save it in
4032 // _retained_old_gc_alloc_region. If we don't
4033 // _retained_old_gc_alloc_region will become NULL. This is what we
4034 // want either way so no reason to check explicitly for either
4035 // condition.
4036 _retained_old_gc_alloc_region = _old_gc_alloc_region.release();
4037 }
4039 void G1CollectedHeap::abandon_gc_alloc_regions() {
4040 assert(_survivor_gc_alloc_region.get() == NULL, "pre-condition");
4041 assert(_old_gc_alloc_region.get() == NULL, "pre-condition");
4042 _retained_old_gc_alloc_region = NULL;
4043 }
4045 void G1CollectedHeap::init_for_evac_failure(OopsInHeapRegionClosure* cl) {
4046 _drain_in_progress = false;
4047 set_evac_failure_closure(cl);
4048 _evac_failure_scan_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(40, true);
4049 }
4051 void G1CollectedHeap::finalize_for_evac_failure() {
4052 assert(_evac_failure_scan_stack != NULL &&
4053 _evac_failure_scan_stack->length() == 0,
4054 "Postcondition");
4055 assert(!_drain_in_progress, "Postcondition");
4056 delete _evac_failure_scan_stack;
4057 _evac_failure_scan_stack = NULL;
4058 }
4060 void G1CollectedHeap::remove_self_forwarding_pointers() {
4061 assert(check_cset_heap_region_claim_values(HeapRegion::InitialClaimValue), "sanity");
4063 G1ParRemoveSelfForwardPtrsTask rsfp_task(this);
4065 if (G1CollectedHeap::use_parallel_gc_threads()) {
4066 set_par_threads();
4067 workers()->run_task(&rsfp_task);
4068 set_par_threads(0);
4069 } else {
4070 rsfp_task.work(0);
4071 }
4073 assert(check_cset_heap_region_claim_values(HeapRegion::ParEvacFailureClaimValue), "sanity");
4075 // Reset the claim values in the regions in the collection set.
4076 reset_cset_heap_region_claim_values();
4078 assert(check_cset_heap_region_claim_values(HeapRegion::InitialClaimValue), "sanity");
4080 // Now restore saved marks, if any.
4081 if (_objs_with_preserved_marks != NULL) {
4082 assert(_preserved_marks_of_objs != NULL, "Both or none.");
4083 guarantee(_objs_with_preserved_marks->length() ==
4084 _preserved_marks_of_objs->length(), "Both or none.");
4085 for (int i = 0; i < _objs_with_preserved_marks->length(); i++) {
4086 oop obj = _objs_with_preserved_marks->at(i);
4087 markOop m = _preserved_marks_of_objs->at(i);
4088 obj->set_mark(m);
4089 }
4091 // Delete the preserved marks growable arrays (allocated on the C heap).
4092 delete _objs_with_preserved_marks;
4093 delete _preserved_marks_of_objs;
4094 _objs_with_preserved_marks = NULL;
4095 _preserved_marks_of_objs = NULL;
4096 }
4097 }
4099 void G1CollectedHeap::push_on_evac_failure_scan_stack(oop obj) {
4100 _evac_failure_scan_stack->push(obj);
4101 }
4103 void G1CollectedHeap::drain_evac_failure_scan_stack() {
4104 assert(_evac_failure_scan_stack != NULL, "precondition");
4106 while (_evac_failure_scan_stack->length() > 0) {
4107 oop obj = _evac_failure_scan_stack->pop();
4108 _evac_failure_closure->set_region(heap_region_containing(obj));
4109 obj->oop_iterate_backwards(_evac_failure_closure);
4110 }
4111 }
4113 oop
4114 G1CollectedHeap::handle_evacuation_failure_par(OopsInHeapRegionClosure* cl,
4115 oop old) {
4116 assert(obj_in_cs(old),
4117 err_msg("obj: "PTR_FORMAT" should still be in the CSet",
4118 (HeapWord*) old));
4119 markOop m = old->mark();
4120 oop forward_ptr = old->forward_to_atomic(old);
4121 if (forward_ptr == NULL) {
4122 // Forward-to-self succeeded.
4124 if (_evac_failure_closure != cl) {
4125 MutexLockerEx x(EvacFailureStack_lock, Mutex::_no_safepoint_check_flag);
4126 assert(!_drain_in_progress,
4127 "Should only be true while someone holds the lock.");
4128 // Set the global evac-failure closure to the current thread's.
4129 assert(_evac_failure_closure == NULL, "Or locking has failed.");
4130 set_evac_failure_closure(cl);
4131 // Now do the common part.
4132 handle_evacuation_failure_common(old, m);
4133 // Reset to NULL.
4134 set_evac_failure_closure(NULL);
4135 } else {
4136 // The lock is already held, and this is recursive.
4137 assert(_drain_in_progress, "This should only be the recursive case.");
4138 handle_evacuation_failure_common(old, m);
4139 }
4140 return old;
4141 } else {
4142 // Forward-to-self failed. Either someone else managed to allocate
4143 // space for this object (old != forward_ptr) or they beat us in
4144 // self-forwarding it (old == forward_ptr).
4145 assert(old == forward_ptr || !obj_in_cs(forward_ptr),
4146 err_msg("obj: "PTR_FORMAT" forwarded to: "PTR_FORMAT" "
4147 "should not be in the CSet",
4148 (HeapWord*) old, (HeapWord*) forward_ptr));
4149 return forward_ptr;
4150 }
4151 }
4153 void G1CollectedHeap::handle_evacuation_failure_common(oop old, markOop m) {
4154 set_evacuation_failed(true);
4156 preserve_mark_if_necessary(old, m);
4158 HeapRegion* r = heap_region_containing(old);
4159 if (!r->evacuation_failed()) {
4160 r->set_evacuation_failed(true);
4161 _hr_printer.evac_failure(r);
4162 }
4164 push_on_evac_failure_scan_stack(old);
4166 if (!_drain_in_progress) {
4167 // prevent recursion in copy_to_survivor_space()
4168 _drain_in_progress = true;
4169 drain_evac_failure_scan_stack();
4170 _drain_in_progress = false;
4171 }
4172 }
4174 void G1CollectedHeap::preserve_mark_if_necessary(oop obj, markOop m) {
4175 assert(evacuation_failed(), "Oversaving!");
4176 // We want to call the "for_promotion_failure" version only in the
4177 // case of a promotion failure.
4178 if (m->must_be_preserved_for_promotion_failure(obj)) {
4179 if (_objs_with_preserved_marks == NULL) {
4180 assert(_preserved_marks_of_objs == NULL, "Both or none.");
4181 _objs_with_preserved_marks =
4182 new (ResourceObj::C_HEAP) GrowableArray<oop>(40, true);
4183 _preserved_marks_of_objs =
4184 new (ResourceObj::C_HEAP) GrowableArray<markOop>(40, true);
4185 }
4186 _objs_with_preserved_marks->push(obj);
4187 _preserved_marks_of_objs->push(m);
4188 }
4189 }
4191 HeapWord* G1CollectedHeap::par_allocate_during_gc(GCAllocPurpose purpose,
4192 size_t word_size) {
4193 if (purpose == GCAllocForSurvived) {
4194 HeapWord* result = survivor_attempt_allocation(word_size);
4195 if (result != NULL) {
4196 return result;
4197 } else {
4198 // Let's try to allocate in the old gen in case we can fit the
4199 // object there.
4200 return old_attempt_allocation(word_size);
4201 }
4202 } else {
4203 assert(purpose == GCAllocForTenured, "sanity");
4204 HeapWord* result = old_attempt_allocation(word_size);
4205 if (result != NULL) {
4206 return result;
4207 } else {
4208 // Let's try to allocate in the survivors in case we can fit the
4209 // object there.
4210 return survivor_attempt_allocation(word_size);
4211 }
4212 }
4214 ShouldNotReachHere();
4215 // Trying to keep some compilers happy.
4216 return NULL;
4217 }
4219 G1ParGCAllocBuffer::G1ParGCAllocBuffer(size_t gclab_word_size) :
4220 ParGCAllocBuffer(gclab_word_size), _retired(false) { }
4222 G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num)
4223 : _g1h(g1h),
4224 _refs(g1h->task_queue(queue_num)),
4225 _dcq(&g1h->dirty_card_queue_set()),
4226 _ct_bs((CardTableModRefBS*)_g1h->barrier_set()),
4227 _g1_rem(g1h->g1_rem_set()),
4228 _hash_seed(17), _queue_num(queue_num),
4229 _term_attempts(0),
4230 _surviving_alloc_buffer(g1h->desired_plab_sz(GCAllocForSurvived)),
4231 _tenured_alloc_buffer(g1h->desired_plab_sz(GCAllocForTenured)),
4232 _age_table(false),
4233 _strong_roots_time(0), _term_time(0),
4234 _alloc_buffer_waste(0), _undo_waste(0) {
4235 // we allocate G1YoungSurvRateNumRegions plus one entries, since
4236 // we "sacrifice" entry 0 to keep track of surviving bytes for
4237 // non-young regions (where the age is -1)
4238 // We also add a few elements at the beginning and at the end in
4239 // an attempt to eliminate cache contention
4240 uint real_length = 1 + _g1h->g1_policy()->young_cset_region_length();
4241 uint array_length = PADDING_ELEM_NUM +
4242 real_length +
4243 PADDING_ELEM_NUM;
4244 _surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length);
4245 if (_surviving_young_words_base == NULL)
4246 vm_exit_out_of_memory(array_length * sizeof(size_t),
4247 "Not enough space for young surv histo.");
4248 _surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM;
4249 memset(_surviving_young_words, 0, (size_t) real_length * sizeof(size_t));
4251 _alloc_buffers[GCAllocForSurvived] = &_surviving_alloc_buffer;
4252 _alloc_buffers[GCAllocForTenured] = &_tenured_alloc_buffer;
4254 _start = os::elapsedTime();
4255 }
4257 void
4258 G1ParScanThreadState::print_termination_stats_hdr(outputStream* const st)
4259 {
4260 st->print_raw_cr("GC Termination Stats");
4261 st->print_raw_cr(" elapsed --strong roots-- -------termination-------"
4262 " ------waste (KiB)------");
4263 st->print_raw_cr("thr ms ms % ms % attempts"
4264 " total alloc undo");
4265 st->print_raw_cr("--- --------- --------- ------ --------- ------ --------"
4266 " ------- ------- -------");
4267 }
4269 void
4270 G1ParScanThreadState::print_termination_stats(int i,
4271 outputStream* const st) const
4272 {
4273 const double elapsed_ms = elapsed_time() * 1000.0;
4274 const double s_roots_ms = strong_roots_time() * 1000.0;
4275 const double term_ms = term_time() * 1000.0;
4276 st->print_cr("%3d %9.2f %9.2f %6.2f "
4277 "%9.2f %6.2f " SIZE_FORMAT_W(8) " "
4278 SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7),
4279 i, elapsed_ms, s_roots_ms, s_roots_ms * 100 / elapsed_ms,
4280 term_ms, term_ms * 100 / elapsed_ms, term_attempts(),
4281 (alloc_buffer_waste() + undo_waste()) * HeapWordSize / K,
4282 alloc_buffer_waste() * HeapWordSize / K,
4283 undo_waste() * HeapWordSize / K);
4284 }
4286 #ifdef ASSERT
4287 bool G1ParScanThreadState::verify_ref(narrowOop* ref) const {
4288 assert(ref != NULL, "invariant");
4289 assert(UseCompressedOops, "sanity");
4290 assert(!has_partial_array_mask(ref), err_msg("ref=" PTR_FORMAT, ref));
4291 oop p = oopDesc::load_decode_heap_oop(ref);
4292 assert(_g1h->is_in_g1_reserved(p),
4293 err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, intptr_t(p)));
4294 return true;
4295 }
4297 bool G1ParScanThreadState::verify_ref(oop* ref) const {
4298 assert(ref != NULL, "invariant");
4299 if (has_partial_array_mask(ref)) {
4300 // Must be in the collection set--it's already been copied.
4301 oop p = clear_partial_array_mask(ref);
4302 assert(_g1h->obj_in_cs(p),
4303 err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, intptr_t(p)));
4304 } else {
4305 oop p = oopDesc::load_decode_heap_oop(ref);
4306 assert(_g1h->is_in_g1_reserved(p),
4307 err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, intptr_t(p)));
4308 }
4309 return true;
4310 }
4312 bool G1ParScanThreadState::verify_task(StarTask ref) const {
4313 if (ref.is_narrow()) {
4314 return verify_ref((narrowOop*) ref);
4315 } else {
4316 return verify_ref((oop*) ref);
4317 }
4318 }
4319 #endif // ASSERT
4321 void G1ParScanThreadState::trim_queue() {
4322 assert(_evac_cl != NULL, "not set");
4323 assert(_evac_failure_cl != NULL, "not set");
4324 assert(_partial_scan_cl != NULL, "not set");
4326 StarTask ref;
4327 do {
4328 // Drain the overflow stack first, so other threads can steal.
4329 while (refs()->pop_overflow(ref)) {
4330 deal_with_reference(ref);
4331 }
4333 while (refs()->pop_local(ref)) {
4334 deal_with_reference(ref);
4335 }
4336 } while (!refs()->is_empty());
4337 }
4339 G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1,
4340 G1ParScanThreadState* par_scan_state) :
4341 _g1(g1), _g1_rem(_g1->g1_rem_set()), _cm(_g1->concurrent_mark()),
4342 _par_scan_state(par_scan_state),
4343 _worker_id(par_scan_state->queue_num()),
4344 _during_initial_mark(_g1->g1_policy()->during_initial_mark_pause()),
4345 _mark_in_progress(_g1->mark_in_progress()) { }
4347 template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_object>
4348 void G1ParCopyClosure<do_gen_barrier, barrier, do_mark_object>::mark_object(oop obj) {
4349 #ifdef ASSERT
4350 HeapRegion* hr = _g1->heap_region_containing(obj);
4351 assert(hr != NULL, "sanity");
4352 assert(!hr->in_collection_set(), "should not mark objects in the CSet");
4353 #endif // ASSERT
4355 // We know that the object is not moving so it's safe to read its size.
4356 _cm->grayRoot(obj, (size_t) obj->size(), _worker_id);
4357 }
4359 template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_object>
4360 void G1ParCopyClosure<do_gen_barrier, barrier, do_mark_object>
4361 ::mark_forwarded_object(oop from_obj, oop to_obj) {
4362 #ifdef ASSERT
4363 assert(from_obj->is_forwarded(), "from obj should be forwarded");
4364 assert(from_obj->forwardee() == to_obj, "to obj should be the forwardee");
4365 assert(from_obj != to_obj, "should not be self-forwarded");
4367 HeapRegion* from_hr = _g1->heap_region_containing(from_obj);
4368 assert(from_hr != NULL, "sanity");
4369 assert(from_hr->in_collection_set(), "from obj should be in the CSet");
4371 HeapRegion* to_hr = _g1->heap_region_containing(to_obj);
4372 assert(to_hr != NULL, "sanity");
4373 assert(!to_hr->in_collection_set(), "should not mark objects in the CSet");
4374 #endif // ASSERT
4376 // The object might be in the process of being copied by another
4377 // worker so we cannot trust that its to-space image is
4378 // well-formed. So we have to read its size from its from-space
4379 // image which we know should not be changing.
4380 _cm->grayRoot(to_obj, (size_t) from_obj->size(), _worker_id);
4381 }
4383 template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_object>
4384 oop G1ParCopyClosure<do_gen_barrier, barrier, do_mark_object>
4385 ::copy_to_survivor_space(oop old) {
4386 size_t word_sz = old->size();
4387 HeapRegion* from_region = _g1->heap_region_containing_raw(old);
4388 // +1 to make the -1 indexes valid...
4389 int young_index = from_region->young_index_in_cset()+1;
4390 assert( (from_region->is_young() && young_index > 0) ||
4391 (!from_region->is_young() && young_index == 0), "invariant" );
4392 G1CollectorPolicy* g1p = _g1->g1_policy();
4393 markOop m = old->mark();
4394 int age = m->has_displaced_mark_helper() ? m->displaced_mark_helper()->age()
4395 : m->age();
4396 GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, age,
4397 word_sz);
4398 HeapWord* obj_ptr = _par_scan_state->allocate(alloc_purpose, word_sz);
4399 oop obj = oop(obj_ptr);
4401 if (obj_ptr == NULL) {
4402 // This will either forward-to-self, or detect that someone else has
4403 // installed a forwarding pointer.
4404 OopsInHeapRegionClosure* cl = _par_scan_state->evac_failure_closure();
4405 return _g1->handle_evacuation_failure_par(cl, old);
4406 }
4408 // We're going to allocate linearly, so might as well prefetch ahead.
4409 Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes);
4411 oop forward_ptr = old->forward_to_atomic(obj);
4412 if (forward_ptr == NULL) {
4413 Copy::aligned_disjoint_words((HeapWord*) old, obj_ptr, word_sz);
4414 if (g1p->track_object_age(alloc_purpose)) {
4415 // We could simply do obj->incr_age(). However, this causes a
4416 // performance issue. obj->incr_age() will first check whether
4417 // the object has a displaced mark by checking its mark word;
4418 // getting the mark word from the new location of the object
4419 // stalls. So, given that we already have the mark word and we
4420 // are about to install it anyway, it's better to increase the
4421 // age on the mark word, when the object does not have a
4422 // displaced mark word. We're not expecting many objects to have
4423 // a displaced marked word, so that case is not optimized
4424 // further (it could be...) and we simply call obj->incr_age().
4426 if (m->has_displaced_mark_helper()) {
4427 // in this case, we have to install the mark word first,
4428 // otherwise obj looks to be forwarded (the old mark word,
4429 // which contains the forward pointer, was copied)
4430 obj->set_mark(m);
4431 obj->incr_age();
4432 } else {
4433 m = m->incr_age();
4434 obj->set_mark(m);
4435 }
4436 _par_scan_state->age_table()->add(obj, word_sz);
4437 } else {
4438 obj->set_mark(m);
4439 }
4441 size_t* surv_young_words = _par_scan_state->surviving_young_words();
4442 surv_young_words[young_index] += word_sz;
4444 if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) {
4445 // We keep track of the next start index in the length field of
4446 // the to-space object. The actual length can be found in the
4447 // length field of the from-space object.
4448 arrayOop(obj)->set_length(0);
4449 oop* old_p = set_partial_array_mask(old);
4450 _par_scan_state->push_on_queue(old_p);
4451 } else {
4452 // No point in using the slower heap_region_containing() method,
4453 // given that we know obj is in the heap.
4454 _scanner.set_region(_g1->heap_region_containing_raw(obj));
4455 obj->oop_iterate_backwards(&_scanner);
4456 }
4457 } else {
4458 _par_scan_state->undo_allocation(alloc_purpose, obj_ptr, word_sz);
4459 obj = forward_ptr;
4460 }
4461 return obj;
4462 }
4464 template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_object>
4465 template <class T>
4466 void G1ParCopyClosure<do_gen_barrier, barrier, do_mark_object>
4467 ::do_oop_work(T* p) {
4468 oop obj = oopDesc::load_decode_heap_oop(p);
4469 assert(barrier != G1BarrierRS || obj != NULL,
4470 "Precondition: G1BarrierRS implies obj is non-NULL");
4472 assert(_worker_id == _par_scan_state->queue_num(), "sanity");
4474 // here the null check is implicit in the cset_fast_test() test
4475 if (_g1->in_cset_fast_test(obj)) {
4476 oop forwardee;
4477 if (obj->is_forwarded()) {
4478 forwardee = obj->forwardee();
4479 } else {
4480 forwardee = copy_to_survivor_space(obj);
4481 }
4482 assert(forwardee != NULL, "forwardee should not be NULL");
4483 oopDesc::encode_store_heap_oop(p, forwardee);
4484 if (do_mark_object && forwardee != obj) {
4485 // If the object is self-forwarded we don't need to explicitly
4486 // mark it, the evacuation failure protocol will do so.
4487 mark_forwarded_object(obj, forwardee);
4488 }
4490 // When scanning the RS, we only care about objs in CS.
4491 if (barrier == G1BarrierRS) {
4492 _par_scan_state->update_rs(_from, p, _worker_id);
4493 }
4494 } else {
4495 // The object is not in collection set. If we're a root scanning
4496 // closure during an initial mark pause (i.e. do_mark_object will
4497 // be true) then attempt to mark the object.
4498 if (do_mark_object && _g1->is_in_g1_reserved(obj)) {
4499 mark_object(obj);
4500 }
4501 }
4503 if (barrier == G1BarrierEvac && obj != NULL) {
4504 _par_scan_state->update_rs(_from, p, _worker_id);
4505 }
4507 if (do_gen_barrier && obj != NULL) {
4508 par_do_barrier(p);
4509 }
4510 }
4512 template void G1ParCopyClosure<false, G1BarrierEvac, false>::do_oop_work(oop* p);
4513 template void G1ParCopyClosure<false, G1BarrierEvac, false>::do_oop_work(narrowOop* p);
4515 template <class T> void G1ParScanPartialArrayClosure::do_oop_nv(T* p) {
4516 assert(has_partial_array_mask(p), "invariant");
4517 oop from_obj = clear_partial_array_mask(p);
4519 assert(Universe::heap()->is_in_reserved(from_obj), "must be in heap.");
4520 assert(from_obj->is_objArray(), "must be obj array");
4521 objArrayOop from_obj_array = objArrayOop(from_obj);
4522 // The from-space object contains the real length.
4523 int length = from_obj_array->length();
4525 assert(from_obj->is_forwarded(), "must be forwarded");
4526 oop to_obj = from_obj->forwardee();
4527 assert(from_obj != to_obj, "should not be chunking self-forwarded objects");
4528 objArrayOop to_obj_array = objArrayOop(to_obj);
4529 // We keep track of the next start index in the length field of the
4530 // to-space object.
4531 int next_index = to_obj_array->length();
4532 assert(0 <= next_index && next_index < length,
4533 err_msg("invariant, next index: %d, length: %d", next_index, length));
4535 int start = next_index;
4536 int end = length;
4537 int remainder = end - start;
4538 // We'll try not to push a range that's smaller than ParGCArrayScanChunk.
4539 if (remainder > 2 * ParGCArrayScanChunk) {
4540 end = start + ParGCArrayScanChunk;
4541 to_obj_array->set_length(end);
4542 // Push the remainder before we process the range in case another
4543 // worker has run out of things to do and can steal it.
4544 oop* from_obj_p = set_partial_array_mask(from_obj);
4545 _par_scan_state->push_on_queue(from_obj_p);
4546 } else {
4547 assert(length == end, "sanity");
4548 // We'll process the final range for this object. Restore the length
4549 // so that the heap remains parsable in case of evacuation failure.
4550 to_obj_array->set_length(end);
4551 }
4552 _scanner.set_region(_g1->heap_region_containing_raw(to_obj));
4553 // Process indexes [start,end). It will also process the header
4554 // along with the first chunk (i.e., the chunk with start == 0).
4555 // Note that at this point the length field of to_obj_array is not
4556 // correct given that we are using it to keep track of the next
4557 // start index. oop_iterate_range() (thankfully!) ignores the length
4558 // field and only relies on the start / end parameters. It does
4559 // however return the size of the object which will be incorrect. So
4560 // we have to ignore it even if we wanted to use it.
4561 to_obj_array->oop_iterate_range(&_scanner, start, end);
4562 }
4564 class G1ParEvacuateFollowersClosure : public VoidClosure {
4565 protected:
4566 G1CollectedHeap* _g1h;
4567 G1ParScanThreadState* _par_scan_state;
4568 RefToScanQueueSet* _queues;
4569 ParallelTaskTerminator* _terminator;
4571 G1ParScanThreadState* par_scan_state() { return _par_scan_state; }
4572 RefToScanQueueSet* queues() { return _queues; }
4573 ParallelTaskTerminator* terminator() { return _terminator; }
4575 public:
4576 G1ParEvacuateFollowersClosure(G1CollectedHeap* g1h,
4577 G1ParScanThreadState* par_scan_state,
4578 RefToScanQueueSet* queues,
4579 ParallelTaskTerminator* terminator)
4580 : _g1h(g1h), _par_scan_state(par_scan_state),
4581 _queues(queues), _terminator(terminator) {}
4583 void do_void();
4585 private:
4586 inline bool offer_termination();
4587 };
4589 bool G1ParEvacuateFollowersClosure::offer_termination() {
4590 G1ParScanThreadState* const pss = par_scan_state();
4591 pss->start_term_time();
4592 const bool res = terminator()->offer_termination();
4593 pss->end_term_time();
4594 return res;
4595 }
4597 void G1ParEvacuateFollowersClosure::do_void() {
4598 StarTask stolen_task;
4599 G1ParScanThreadState* const pss = par_scan_state();
4600 pss->trim_queue();
4602 do {
4603 while (queues()->steal(pss->queue_num(), pss->hash_seed(), stolen_task)) {
4604 assert(pss->verify_task(stolen_task), "sanity");
4605 if (stolen_task.is_narrow()) {
4606 pss->deal_with_reference((narrowOop*) stolen_task);
4607 } else {
4608 pss->deal_with_reference((oop*) stolen_task);
4609 }
4611 // We've just processed a reference and we might have made
4612 // available new entries on the queues. So we have to make sure
4613 // we drain the queues as necessary.
4614 pss->trim_queue();
4615 }
4616 } while (!offer_termination());
4618 pss->retire_alloc_buffers();
4619 }
4621 class G1ParTask : public AbstractGangTask {
4622 protected:
4623 G1CollectedHeap* _g1h;
4624 RefToScanQueueSet *_queues;
4625 ParallelTaskTerminator _terminator;
4626 uint _n_workers;
4628 Mutex _stats_lock;
4629 Mutex* stats_lock() { return &_stats_lock; }
4631 size_t getNCards() {
4632 return (_g1h->capacity() + G1BlockOffsetSharedArray::N_bytes - 1)
4633 / G1BlockOffsetSharedArray::N_bytes;
4634 }
4636 public:
4637 G1ParTask(G1CollectedHeap* g1h,
4638 RefToScanQueueSet *task_queues)
4639 : AbstractGangTask("G1 collection"),
4640 _g1h(g1h),
4641 _queues(task_queues),
4642 _terminator(0, _queues),
4643 _stats_lock(Mutex::leaf, "parallel G1 stats lock", true)
4644 {}
4646 RefToScanQueueSet* queues() { return _queues; }
4648 RefToScanQueue *work_queue(int i) {
4649 return queues()->queue(i);
4650 }
4652 ParallelTaskTerminator* terminator() { return &_terminator; }
4654 virtual void set_for_termination(int active_workers) {
4655 // This task calls set_n_termination() in par_non_clean_card_iterate_work()
4656 // in the young space (_par_seq_tasks) in the G1 heap
4657 // for SequentialSubTasksDone.
4658 // This task also uses SubTasksDone in SharedHeap and G1CollectedHeap
4659 // both of which need setting by set_n_termination().
4660 _g1h->SharedHeap::set_n_termination(active_workers);
4661 _g1h->set_n_termination(active_workers);
4662 terminator()->reset_for_reuse(active_workers);
4663 _n_workers = active_workers;
4664 }
4666 void work(uint worker_id) {
4667 if (worker_id >= _n_workers) return; // no work needed this round
4669 double start_time_ms = os::elapsedTime() * 1000.0;
4670 _g1h->g1_policy()->record_gc_worker_start_time(worker_id, start_time_ms);
4672 {
4673 ResourceMark rm;
4674 HandleMark hm;
4676 ReferenceProcessor* rp = _g1h->ref_processor_stw();
4678 G1ParScanThreadState pss(_g1h, worker_id);
4679 G1ParScanHeapEvacClosure scan_evac_cl(_g1h, &pss, rp);
4680 G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, rp);
4681 G1ParScanPartialArrayClosure partial_scan_cl(_g1h, &pss, rp);
4683 pss.set_evac_closure(&scan_evac_cl);
4684 pss.set_evac_failure_closure(&evac_failure_cl);
4685 pss.set_partial_scan_closure(&partial_scan_cl);
4687 G1ParScanExtRootClosure only_scan_root_cl(_g1h, &pss, rp);
4688 G1ParScanPermClosure only_scan_perm_cl(_g1h, &pss, rp);
4690 G1ParScanAndMarkExtRootClosure scan_mark_root_cl(_g1h, &pss, rp);
4691 G1ParScanAndMarkPermClosure scan_mark_perm_cl(_g1h, &pss, rp);
4693 OopClosure* scan_root_cl = &only_scan_root_cl;
4694 OopsInHeapRegionClosure* scan_perm_cl = &only_scan_perm_cl;
4696 if (_g1h->g1_policy()->during_initial_mark_pause()) {
4697 // We also need to mark copied objects.
4698 scan_root_cl = &scan_mark_root_cl;
4699 scan_perm_cl = &scan_mark_perm_cl;
4700 }
4702 G1ParPushHeapRSClosure push_heap_rs_cl(_g1h, &pss);
4704 pss.start_strong_roots();
4705 _g1h->g1_process_strong_roots(/* not collecting perm */ false,
4706 SharedHeap::SO_AllClasses,
4707 scan_root_cl,
4708 &push_heap_rs_cl,
4709 scan_perm_cl,
4710 worker_id);
4711 pss.end_strong_roots();
4713 {
4714 double start = os::elapsedTime();
4715 G1ParEvacuateFollowersClosure evac(_g1h, &pss, _queues, &_terminator);
4716 evac.do_void();
4717 double elapsed_ms = (os::elapsedTime()-start)*1000.0;
4718 double term_ms = pss.term_time()*1000.0;
4719 _g1h->g1_policy()->record_obj_copy_time(worker_id, elapsed_ms-term_ms);
4720 _g1h->g1_policy()->record_termination(worker_id, term_ms, pss.term_attempts());
4721 }
4722 _g1h->g1_policy()->record_thread_age_table(pss.age_table());
4723 _g1h->update_surviving_young_words(pss.surviving_young_words()+1);
4725 // Clean up any par-expanded rem sets.
4726 HeapRegionRemSet::par_cleanup();
4728 if (ParallelGCVerbose) {
4729 MutexLocker x(stats_lock());
4730 pss.print_termination_stats(worker_id);
4731 }
4733 assert(pss.refs()->is_empty(), "should be empty");
4735 // Close the inner scope so that the ResourceMark and HandleMark
4736 // destructors are executed here and are included as part of the
4737 // "GC Worker Time".
4738 }
4740 double end_time_ms = os::elapsedTime() * 1000.0;
4741 _g1h->g1_policy()->record_gc_worker_end_time(worker_id, end_time_ms);
4742 }
4743 };
4745 // *** Common G1 Evacuation Stuff
4747 // Closures that support the filtering of CodeBlobs scanned during
4748 // external root scanning.
4750 // Closure applied to reference fields in code blobs (specifically nmethods)
4751 // to determine whether an nmethod contains references that point into
4752 // the collection set. Used as a predicate when walking code roots so
4753 // that only nmethods that point into the collection set are added to the
4754 // 'marked' list.
4756 class G1FilteredCodeBlobToOopClosure : public CodeBlobToOopClosure {
4758 class G1PointsIntoCSOopClosure : public OopClosure {
4759 G1CollectedHeap* _g1;
4760 bool _points_into_cs;
4761 public:
4762 G1PointsIntoCSOopClosure(G1CollectedHeap* g1) :
4763 _g1(g1), _points_into_cs(false) { }
4765 bool points_into_cs() const { return _points_into_cs; }
4767 template <class T>
4768 void do_oop_nv(T* p) {
4769 if (!_points_into_cs) {
4770 T heap_oop = oopDesc::load_heap_oop(p);
4771 if (!oopDesc::is_null(heap_oop) &&
4772 _g1->in_cset_fast_test(oopDesc::decode_heap_oop_not_null(heap_oop))) {
4773 _points_into_cs = true;
4774 }
4775 }
4776 }
4778 virtual void do_oop(oop* p) { do_oop_nv(p); }
4779 virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
4780 };
4782 G1CollectedHeap* _g1;
4784 public:
4785 G1FilteredCodeBlobToOopClosure(G1CollectedHeap* g1, OopClosure* cl) :
4786 CodeBlobToOopClosure(cl, true), _g1(g1) { }
4788 virtual void do_code_blob(CodeBlob* cb) {
4789 nmethod* nm = cb->as_nmethod_or_null();
4790 if (nm != NULL && !(nm->test_oops_do_mark())) {
4791 G1PointsIntoCSOopClosure predicate_cl(_g1);
4792 nm->oops_do(&predicate_cl);
4794 if (predicate_cl.points_into_cs()) {
4795 // At least one of the reference fields or the oop relocations
4796 // in the nmethod points into the collection set. We have to
4797 // 'mark' this nmethod.
4798 // Note: Revisit the following if CodeBlobToOopClosure::do_code_blob()
4799 // or MarkingCodeBlobClosure::do_code_blob() change.
4800 if (!nm->test_set_oops_do_mark()) {
4801 do_newly_marked_nmethod(nm);
4802 }
4803 }
4804 }
4805 }
4806 };
4808 // This method is run in a GC worker.
4810 void
4811 G1CollectedHeap::
4812 g1_process_strong_roots(bool collecting_perm_gen,
4813 ScanningOption so,
4814 OopClosure* scan_non_heap_roots,
4815 OopsInHeapRegionClosure* scan_rs,
4816 OopsInGenClosure* scan_perm,
4817 int worker_i) {
4819 // First scan the strong roots, including the perm gen.
4820 double ext_roots_start = os::elapsedTime();
4821 double closure_app_time_sec = 0.0;
4823 BufferingOopClosure buf_scan_non_heap_roots(scan_non_heap_roots);
4824 BufferingOopsInGenClosure buf_scan_perm(scan_perm);
4825 buf_scan_perm.set_generation(perm_gen());
4827 // Walk the code cache w/o buffering, because StarTask cannot handle
4828 // unaligned oop locations.
4829 G1FilteredCodeBlobToOopClosure eager_scan_code_roots(this, scan_non_heap_roots);
4831 process_strong_roots(false, // no scoping; this is parallel code
4832 collecting_perm_gen, so,
4833 &buf_scan_non_heap_roots,
4834 &eager_scan_code_roots,
4835 &buf_scan_perm);
4837 // Now the CM ref_processor roots.
4838 if (!_process_strong_tasks->is_task_claimed(G1H_PS_refProcessor_oops_do)) {
4839 // We need to treat the discovered reference lists of the
4840 // concurrent mark ref processor as roots and keep entries
4841 // (which are added by the marking threads) on them live
4842 // until they can be processed at the end of marking.
4843 ref_processor_cm()->weak_oops_do(&buf_scan_non_heap_roots);
4844 }
4846 // Finish up any enqueued closure apps (attributed as object copy time).
4847 buf_scan_non_heap_roots.done();
4848 buf_scan_perm.done();
4850 double ext_roots_end = os::elapsedTime();
4852 g1_policy()->reset_obj_copy_time(worker_i);
4853 double obj_copy_time_sec = buf_scan_perm.closure_app_seconds() +
4854 buf_scan_non_heap_roots.closure_app_seconds();
4855 g1_policy()->record_obj_copy_time(worker_i, obj_copy_time_sec * 1000.0);
4857 double ext_root_time_ms =
4858 ((ext_roots_end - ext_roots_start) - obj_copy_time_sec) * 1000.0;
4860 g1_policy()->record_ext_root_scan_time(worker_i, ext_root_time_ms);
4862 // During conc marking we have to filter the per-thread SATB buffers
4863 // to make sure we remove any oops into the CSet (which will show up
4864 // as implicitly live).
4865 if (!_process_strong_tasks->is_task_claimed(G1H_PS_filter_satb_buffers)) {
4866 if (mark_in_progress()) {
4867 JavaThread::satb_mark_queue_set().filter_thread_buffers();
4868 }
4869 }
4870 double satb_filtering_ms = (os::elapsedTime() - ext_roots_end) * 1000.0;
4871 g1_policy()->record_satb_filtering_time(worker_i, satb_filtering_ms);
4873 // Now scan the complement of the collection set.
4874 if (scan_rs != NULL) {
4875 g1_rem_set()->oops_into_collection_set_do(scan_rs, worker_i);
4876 }
4878 _process_strong_tasks->all_tasks_completed();
4879 }
4881 void
4882 G1CollectedHeap::g1_process_weak_roots(OopClosure* root_closure,
4883 OopClosure* non_root_closure) {
4884 CodeBlobToOopClosure roots_in_blobs(root_closure, /*do_marking=*/ false);
4885 SharedHeap::process_weak_roots(root_closure, &roots_in_blobs, non_root_closure);
4886 }
4888 // Weak Reference Processing support
4890 // An always "is_alive" closure that is used to preserve referents.
4891 // If the object is non-null then it's alive. Used in the preservation
4892 // of referent objects that are pointed to by reference objects
4893 // discovered by the CM ref processor.
4894 class G1AlwaysAliveClosure: public BoolObjectClosure {
4895 G1CollectedHeap* _g1;
4896 public:
4897 G1AlwaysAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
4898 void do_object(oop p) { assert(false, "Do not call."); }
4899 bool do_object_b(oop p) {
4900 if (p != NULL) {
4901 return true;
4902 }
4903 return false;
4904 }
4905 };
4907 bool G1STWIsAliveClosure::do_object_b(oop p) {
4908 // An object is reachable if it is outside the collection set,
4909 // or is inside and copied.
4910 return !_g1->obj_in_cs(p) || p->is_forwarded();
4911 }
4913 // Non Copying Keep Alive closure
4914 class G1KeepAliveClosure: public OopClosure {
4915 G1CollectedHeap* _g1;
4916 public:
4917 G1KeepAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
4918 void do_oop(narrowOop* p) { guarantee(false, "Not needed"); }
4919 void do_oop( oop* p) {
4920 oop obj = *p;
4922 if (_g1->obj_in_cs(obj)) {
4923 assert( obj->is_forwarded(), "invariant" );
4924 *p = obj->forwardee();
4925 }
4926 }
4927 };
4929 // Copying Keep Alive closure - can be called from both
4930 // serial and parallel code as long as different worker
4931 // threads utilize different G1ParScanThreadState instances
4932 // and different queues.
4934 class G1CopyingKeepAliveClosure: public OopClosure {
4935 G1CollectedHeap* _g1h;
4936 OopClosure* _copy_non_heap_obj_cl;
4937 OopsInHeapRegionClosure* _copy_perm_obj_cl;
4938 G1ParScanThreadState* _par_scan_state;
4940 public:
4941 G1CopyingKeepAliveClosure(G1CollectedHeap* g1h,
4942 OopClosure* non_heap_obj_cl,
4943 OopsInHeapRegionClosure* perm_obj_cl,
4944 G1ParScanThreadState* pss):
4945 _g1h(g1h),
4946 _copy_non_heap_obj_cl(non_heap_obj_cl),
4947 _copy_perm_obj_cl(perm_obj_cl),
4948 _par_scan_state(pss)
4949 {}
4951 virtual void do_oop(narrowOop* p) { do_oop_work(p); }
4952 virtual void do_oop( oop* p) { do_oop_work(p); }
4954 template <class T> void do_oop_work(T* p) {
4955 oop obj = oopDesc::load_decode_heap_oop(p);
4957 if (_g1h->obj_in_cs(obj)) {
4958 // If the referent object has been forwarded (either copied
4959 // to a new location or to itself in the event of an
4960 // evacuation failure) then we need to update the reference
4961 // field and, if both reference and referent are in the G1
4962 // heap, update the RSet for the referent.
4963 //
4964 // If the referent has not been forwarded then we have to keep
4965 // it alive by policy. Therefore we have copy the referent.
4966 //
4967 // If the reference field is in the G1 heap then we can push
4968 // on the PSS queue. When the queue is drained (after each
4969 // phase of reference processing) the object and it's followers
4970 // will be copied, the reference field set to point to the
4971 // new location, and the RSet updated. Otherwise we need to
4972 // use the the non-heap or perm closures directly to copy
4973 // the refernt object and update the pointer, while avoiding
4974 // updating the RSet.
4976 if (_g1h->is_in_g1_reserved(p)) {
4977 _par_scan_state->push_on_queue(p);
4978 } else {
4979 // The reference field is not in the G1 heap.
4980 if (_g1h->perm_gen()->is_in(p)) {
4981 _copy_perm_obj_cl->do_oop(p);
4982 } else {
4983 _copy_non_heap_obj_cl->do_oop(p);
4984 }
4985 }
4986 }
4987 }
4988 };
4990 // Serial drain queue closure. Called as the 'complete_gc'
4991 // closure for each discovered list in some of the
4992 // reference processing phases.
4994 class G1STWDrainQueueClosure: public VoidClosure {
4995 protected:
4996 G1CollectedHeap* _g1h;
4997 G1ParScanThreadState* _par_scan_state;
4999 G1ParScanThreadState* par_scan_state() { return _par_scan_state; }
5001 public:
5002 G1STWDrainQueueClosure(G1CollectedHeap* g1h, G1ParScanThreadState* pss) :
5003 _g1h(g1h),
5004 _par_scan_state(pss)
5005 { }
5007 void do_void() {
5008 G1ParScanThreadState* const pss = par_scan_state();
5009 pss->trim_queue();
5010 }
5011 };
5013 // Parallel Reference Processing closures
5015 // Implementation of AbstractRefProcTaskExecutor for parallel reference
5016 // processing during G1 evacuation pauses.
5018 class G1STWRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
5019 private:
5020 G1CollectedHeap* _g1h;
5021 RefToScanQueueSet* _queues;
5022 FlexibleWorkGang* _workers;
5023 int _active_workers;
5025 public:
5026 G1STWRefProcTaskExecutor(G1CollectedHeap* g1h,
5027 FlexibleWorkGang* workers,
5028 RefToScanQueueSet *task_queues,
5029 int n_workers) :
5030 _g1h(g1h),
5031 _queues(task_queues),
5032 _workers(workers),
5033 _active_workers(n_workers)
5034 {
5035 assert(n_workers > 0, "shouldn't call this otherwise");
5036 }
5038 // Executes the given task using concurrent marking worker threads.
5039 virtual void execute(ProcessTask& task);
5040 virtual void execute(EnqueueTask& task);
5041 };
5043 // Gang task for possibly parallel reference processing
5045 class G1STWRefProcTaskProxy: public AbstractGangTask {
5046 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
5047 ProcessTask& _proc_task;
5048 G1CollectedHeap* _g1h;
5049 RefToScanQueueSet *_task_queues;
5050 ParallelTaskTerminator* _terminator;
5052 public:
5053 G1STWRefProcTaskProxy(ProcessTask& proc_task,
5054 G1CollectedHeap* g1h,
5055 RefToScanQueueSet *task_queues,
5056 ParallelTaskTerminator* terminator) :
5057 AbstractGangTask("Process reference objects in parallel"),
5058 _proc_task(proc_task),
5059 _g1h(g1h),
5060 _task_queues(task_queues),
5061 _terminator(terminator)
5062 {}
5064 virtual void work(uint worker_id) {
5065 // The reference processing task executed by a single worker.
5066 ResourceMark rm;
5067 HandleMark hm;
5069 G1STWIsAliveClosure is_alive(_g1h);
5071 G1ParScanThreadState pss(_g1h, worker_id);
5073 G1ParScanHeapEvacClosure scan_evac_cl(_g1h, &pss, NULL);
5074 G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL);
5075 G1ParScanPartialArrayClosure partial_scan_cl(_g1h, &pss, NULL);
5077 pss.set_evac_closure(&scan_evac_cl);
5078 pss.set_evac_failure_closure(&evac_failure_cl);
5079 pss.set_partial_scan_closure(&partial_scan_cl);
5081 G1ParScanExtRootClosure only_copy_non_heap_cl(_g1h, &pss, NULL);
5082 G1ParScanPermClosure only_copy_perm_cl(_g1h, &pss, NULL);
5084 G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, &pss, NULL);
5085 G1ParScanAndMarkPermClosure copy_mark_perm_cl(_g1h, &pss, NULL);
5087 OopClosure* copy_non_heap_cl = &only_copy_non_heap_cl;
5088 OopsInHeapRegionClosure* copy_perm_cl = &only_copy_perm_cl;
5090 if (_g1h->g1_policy()->during_initial_mark_pause()) {
5091 // We also need to mark copied objects.
5092 copy_non_heap_cl = ©_mark_non_heap_cl;
5093 copy_perm_cl = ©_mark_perm_cl;
5094 }
5096 // Keep alive closure.
5097 G1CopyingKeepAliveClosure keep_alive(_g1h, copy_non_heap_cl, copy_perm_cl, &pss);
5099 // Complete GC closure
5100 G1ParEvacuateFollowersClosure drain_queue(_g1h, &pss, _task_queues, _terminator);
5102 // Call the reference processing task's work routine.
5103 _proc_task.work(worker_id, is_alive, keep_alive, drain_queue);
5105 // Note we cannot assert that the refs array is empty here as not all
5106 // of the processing tasks (specifically phase2 - pp2_work) execute
5107 // the complete_gc closure (which ordinarily would drain the queue) so
5108 // the queue may not be empty.
5109 }
5110 };
5112 // Driver routine for parallel reference processing.
5113 // Creates an instance of the ref processing gang
5114 // task and has the worker threads execute it.
5115 void G1STWRefProcTaskExecutor::execute(ProcessTask& proc_task) {
5116 assert(_workers != NULL, "Need parallel worker threads.");
5118 ParallelTaskTerminator terminator(_active_workers, _queues);
5119 G1STWRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _queues, &terminator);
5121 _g1h->set_par_threads(_active_workers);
5122 _workers->run_task(&proc_task_proxy);
5123 _g1h->set_par_threads(0);
5124 }
5126 // Gang task for parallel reference enqueueing.
5128 class G1STWRefEnqueueTaskProxy: public AbstractGangTask {
5129 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
5130 EnqueueTask& _enq_task;
5132 public:
5133 G1STWRefEnqueueTaskProxy(EnqueueTask& enq_task) :
5134 AbstractGangTask("Enqueue reference objects in parallel"),
5135 _enq_task(enq_task)
5136 { }
5138 virtual void work(uint worker_id) {
5139 _enq_task.work(worker_id);
5140 }
5141 };
5143 // Driver routine for parallel reference enqueing.
5144 // Creates an instance of the ref enqueueing gang
5145 // task and has the worker threads execute it.
5147 void G1STWRefProcTaskExecutor::execute(EnqueueTask& enq_task) {
5148 assert(_workers != NULL, "Need parallel worker threads.");
5150 G1STWRefEnqueueTaskProxy enq_task_proxy(enq_task);
5152 _g1h->set_par_threads(_active_workers);
5153 _workers->run_task(&enq_task_proxy);
5154 _g1h->set_par_threads(0);
5155 }
5157 // End of weak reference support closures
5159 // Abstract task used to preserve (i.e. copy) any referent objects
5160 // that are in the collection set and are pointed to by reference
5161 // objects discovered by the CM ref processor.
5163 class G1ParPreserveCMReferentsTask: public AbstractGangTask {
5164 protected:
5165 G1CollectedHeap* _g1h;
5166 RefToScanQueueSet *_queues;
5167 ParallelTaskTerminator _terminator;
5168 uint _n_workers;
5170 public:
5171 G1ParPreserveCMReferentsTask(G1CollectedHeap* g1h,int workers, RefToScanQueueSet *task_queues) :
5172 AbstractGangTask("ParPreserveCMReferents"),
5173 _g1h(g1h),
5174 _queues(task_queues),
5175 _terminator(workers, _queues),
5176 _n_workers(workers)
5177 { }
5179 void work(uint worker_id) {
5180 ResourceMark rm;
5181 HandleMark hm;
5183 G1ParScanThreadState pss(_g1h, worker_id);
5184 G1ParScanHeapEvacClosure scan_evac_cl(_g1h, &pss, NULL);
5185 G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL);
5186 G1ParScanPartialArrayClosure partial_scan_cl(_g1h, &pss, NULL);
5188 pss.set_evac_closure(&scan_evac_cl);
5189 pss.set_evac_failure_closure(&evac_failure_cl);
5190 pss.set_partial_scan_closure(&partial_scan_cl);
5192 assert(pss.refs()->is_empty(), "both queue and overflow should be empty");
5195 G1ParScanExtRootClosure only_copy_non_heap_cl(_g1h, &pss, NULL);
5196 G1ParScanPermClosure only_copy_perm_cl(_g1h, &pss, NULL);
5198 G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, &pss, NULL);
5199 G1ParScanAndMarkPermClosure copy_mark_perm_cl(_g1h, &pss, NULL);
5201 OopClosure* copy_non_heap_cl = &only_copy_non_heap_cl;
5202 OopsInHeapRegionClosure* copy_perm_cl = &only_copy_perm_cl;
5204 if (_g1h->g1_policy()->during_initial_mark_pause()) {
5205 // We also need to mark copied objects.
5206 copy_non_heap_cl = ©_mark_non_heap_cl;
5207 copy_perm_cl = ©_mark_perm_cl;
5208 }
5210 // Is alive closure
5211 G1AlwaysAliveClosure always_alive(_g1h);
5213 // Copying keep alive closure. Applied to referent objects that need
5214 // to be copied.
5215 G1CopyingKeepAliveClosure keep_alive(_g1h, copy_non_heap_cl, copy_perm_cl, &pss);
5217 ReferenceProcessor* rp = _g1h->ref_processor_cm();
5219 uint limit = ReferenceProcessor::number_of_subclasses_of_ref() * rp->max_num_q();
5220 uint stride = MIN2(MAX2(_n_workers, 1U), limit);
5222 // limit is set using max_num_q() - which was set using ParallelGCThreads.
5223 // So this must be true - but assert just in case someone decides to
5224 // change the worker ids.
5225 assert(0 <= worker_id && worker_id < limit, "sanity");
5226 assert(!rp->discovery_is_atomic(), "check this code");
5228 // Select discovered lists [i, i+stride, i+2*stride,...,limit)
5229 for (uint idx = worker_id; idx < limit; idx += stride) {
5230 DiscoveredList& ref_list = rp->discovered_refs()[idx];
5232 DiscoveredListIterator iter(ref_list, &keep_alive, &always_alive);
5233 while (iter.has_next()) {
5234 // Since discovery is not atomic for the CM ref processor, we
5235 // can see some null referent objects.
5236 iter.load_ptrs(DEBUG_ONLY(true));
5237 oop ref = iter.obj();
5239 // This will filter nulls.
5240 if (iter.is_referent_alive()) {
5241 iter.make_referent_alive();
5242 }
5243 iter.move_to_next();
5244 }
5245 }
5247 // Drain the queue - which may cause stealing
5248 G1ParEvacuateFollowersClosure drain_queue(_g1h, &pss, _queues, &_terminator);
5249 drain_queue.do_void();
5250 // Allocation buffers were retired at the end of G1ParEvacuateFollowersClosure
5251 assert(pss.refs()->is_empty(), "should be");
5252 }
5253 };
5255 // Weak Reference processing during an evacuation pause (part 1).
5256 void G1CollectedHeap::process_discovered_references() {
5257 double ref_proc_start = os::elapsedTime();
5259 ReferenceProcessor* rp = _ref_processor_stw;
5260 assert(rp->discovery_enabled(), "should have been enabled");
5262 // Any reference objects, in the collection set, that were 'discovered'
5263 // by the CM ref processor should have already been copied (either by
5264 // applying the external root copy closure to the discovered lists, or
5265 // by following an RSet entry).
5266 //
5267 // But some of the referents, that are in the collection set, that these
5268 // reference objects point to may not have been copied: the STW ref
5269 // processor would have seen that the reference object had already
5270 // been 'discovered' and would have skipped discovering the reference,
5271 // but would not have treated the reference object as a regular oop.
5272 // As a reult the copy closure would not have been applied to the
5273 // referent object.
5274 //
5275 // We need to explicitly copy these referent objects - the references
5276 // will be processed at the end of remarking.
5277 //
5278 // We also need to do this copying before we process the reference
5279 // objects discovered by the STW ref processor in case one of these
5280 // referents points to another object which is also referenced by an
5281 // object discovered by the STW ref processor.
5283 uint active_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
5284 workers()->active_workers() : 1);
5286 assert(!G1CollectedHeap::use_parallel_gc_threads() ||
5287 active_workers == workers()->active_workers(),
5288 "Need to reset active_workers");
5290 set_par_threads(active_workers);
5291 G1ParPreserveCMReferentsTask keep_cm_referents(this, active_workers, _task_queues);
5293 if (G1CollectedHeap::use_parallel_gc_threads()) {
5294 workers()->run_task(&keep_cm_referents);
5295 } else {
5296 keep_cm_referents.work(0);
5297 }
5299 set_par_threads(0);
5301 // Closure to test whether a referent is alive.
5302 G1STWIsAliveClosure is_alive(this);
5304 // Even when parallel reference processing is enabled, the processing
5305 // of JNI refs is serial and performed serially by the current thread
5306 // rather than by a worker. The following PSS will be used for processing
5307 // JNI refs.
5309 // Use only a single queue for this PSS.
5310 G1ParScanThreadState pss(this, 0);
5312 // We do not embed a reference processor in the copying/scanning
5313 // closures while we're actually processing the discovered
5314 // reference objects.
5315 G1ParScanHeapEvacClosure scan_evac_cl(this, &pss, NULL);
5316 G1ParScanHeapEvacFailureClosure evac_failure_cl(this, &pss, NULL);
5317 G1ParScanPartialArrayClosure partial_scan_cl(this, &pss, NULL);
5319 pss.set_evac_closure(&scan_evac_cl);
5320 pss.set_evac_failure_closure(&evac_failure_cl);
5321 pss.set_partial_scan_closure(&partial_scan_cl);
5323 assert(pss.refs()->is_empty(), "pre-condition");
5325 G1ParScanExtRootClosure only_copy_non_heap_cl(this, &pss, NULL);
5326 G1ParScanPermClosure only_copy_perm_cl(this, &pss, NULL);
5328 G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(this, &pss, NULL);
5329 G1ParScanAndMarkPermClosure copy_mark_perm_cl(this, &pss, NULL);
5331 OopClosure* copy_non_heap_cl = &only_copy_non_heap_cl;
5332 OopsInHeapRegionClosure* copy_perm_cl = &only_copy_perm_cl;
5334 if (_g1h->g1_policy()->during_initial_mark_pause()) {
5335 // We also need to mark copied objects.
5336 copy_non_heap_cl = ©_mark_non_heap_cl;
5337 copy_perm_cl = ©_mark_perm_cl;
5338 }
5340 // Keep alive closure.
5341 G1CopyingKeepAliveClosure keep_alive(this, copy_non_heap_cl, copy_perm_cl, &pss);
5343 // Serial Complete GC closure
5344 G1STWDrainQueueClosure drain_queue(this, &pss);
5346 // Setup the soft refs policy...
5347 rp->setup_policy(false);
5349 if (!rp->processing_is_mt()) {
5350 // Serial reference processing...
5351 rp->process_discovered_references(&is_alive,
5352 &keep_alive,
5353 &drain_queue,
5354 NULL);
5355 } else {
5356 // Parallel reference processing
5357 assert(rp->num_q() == active_workers, "sanity");
5358 assert(active_workers <= rp->max_num_q(), "sanity");
5360 G1STWRefProcTaskExecutor par_task_executor(this, workers(), _task_queues, active_workers);
5361 rp->process_discovered_references(&is_alive, &keep_alive, &drain_queue, &par_task_executor);
5362 }
5364 // We have completed copying any necessary live referent objects
5365 // (that were not copied during the actual pause) so we can
5366 // retire any active alloc buffers
5367 pss.retire_alloc_buffers();
5368 assert(pss.refs()->is_empty(), "both queue and overflow should be empty");
5370 double ref_proc_time = os::elapsedTime() - ref_proc_start;
5371 g1_policy()->record_ref_proc_time(ref_proc_time * 1000.0);
5372 }
5374 // Weak Reference processing during an evacuation pause (part 2).
5375 void G1CollectedHeap::enqueue_discovered_references() {
5376 double ref_enq_start = os::elapsedTime();
5378 ReferenceProcessor* rp = _ref_processor_stw;
5379 assert(!rp->discovery_enabled(), "should have been disabled as part of processing");
5381 // Now enqueue any remaining on the discovered lists on to
5382 // the pending list.
5383 if (!rp->processing_is_mt()) {
5384 // Serial reference processing...
5385 rp->enqueue_discovered_references();
5386 } else {
5387 // Parallel reference enqueuing
5389 uint active_workers = (ParallelGCThreads > 0 ? workers()->active_workers() : 1);
5390 assert(active_workers == workers()->active_workers(),
5391 "Need to reset active_workers");
5392 assert(rp->num_q() == active_workers, "sanity");
5393 assert(active_workers <= rp->max_num_q(), "sanity");
5395 G1STWRefProcTaskExecutor par_task_executor(this, workers(), _task_queues, active_workers);
5396 rp->enqueue_discovered_references(&par_task_executor);
5397 }
5399 rp->verify_no_references_recorded();
5400 assert(!rp->discovery_enabled(), "should have been disabled");
5402 // FIXME
5403 // CM's reference processing also cleans up the string and symbol tables.
5404 // Should we do that here also? We could, but it is a serial operation
5405 // and could signicantly increase the pause time.
5407 double ref_enq_time = os::elapsedTime() - ref_enq_start;
5408 g1_policy()->record_ref_enq_time(ref_enq_time * 1000.0);
5409 }
5411 void G1CollectedHeap::evacuate_collection_set() {
5412 _expand_heap_after_alloc_failure = true;
5413 set_evacuation_failed(false);
5415 g1_rem_set()->prepare_for_oops_into_collection_set_do();
5416 concurrent_g1_refine()->set_use_cache(false);
5417 concurrent_g1_refine()->clear_hot_cache_claimed_index();
5419 uint n_workers;
5420 if (G1CollectedHeap::use_parallel_gc_threads()) {
5421 n_workers =
5422 AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
5423 workers()->active_workers(),
5424 Threads::number_of_non_daemon_threads());
5425 assert(UseDynamicNumberOfGCThreads ||
5426 n_workers == workers()->total_workers(),
5427 "If not dynamic should be using all the workers");
5428 workers()->set_active_workers(n_workers);
5429 set_par_threads(n_workers);
5430 } else {
5431 assert(n_par_threads() == 0,
5432 "Should be the original non-parallel value");
5433 n_workers = 1;
5434 }
5436 G1ParTask g1_par_task(this, _task_queues);
5438 init_for_evac_failure(NULL);
5440 rem_set()->prepare_for_younger_refs_iterate(true);
5442 assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty");
5443 double start_par_time_sec = os::elapsedTime();
5444 double end_par_time_sec;
5446 {
5447 StrongRootsScope srs(this);
5449 if (G1CollectedHeap::use_parallel_gc_threads()) {
5450 // The individual threads will set their evac-failure closures.
5451 if (ParallelGCVerbose) G1ParScanThreadState::print_termination_stats_hdr();
5452 // These tasks use ShareHeap::_process_strong_tasks
5453 assert(UseDynamicNumberOfGCThreads ||
5454 workers()->active_workers() == workers()->total_workers(),
5455 "If not dynamic should be using all the workers");
5456 workers()->run_task(&g1_par_task);
5457 } else {
5458 g1_par_task.set_for_termination(n_workers);
5459 g1_par_task.work(0);
5460 }
5461 end_par_time_sec = os::elapsedTime();
5463 // Closing the inner scope will execute the destructor
5464 // for the StrongRootsScope object. We record the current
5465 // elapsed time before closing the scope so that time
5466 // taken for the SRS destructor is NOT included in the
5467 // reported parallel time.
5468 }
5470 double par_time_ms = (end_par_time_sec - start_par_time_sec) * 1000.0;
5471 g1_policy()->record_par_time(par_time_ms);
5473 double code_root_fixup_time_ms =
5474 (os::elapsedTime() - end_par_time_sec) * 1000.0;
5475 g1_policy()->record_code_root_fixup_time(code_root_fixup_time_ms);
5477 set_par_threads(0);
5479 // Process any discovered reference objects - we have
5480 // to do this _before_ we retire the GC alloc regions
5481 // as we may have to copy some 'reachable' referent
5482 // objects (and their reachable sub-graphs) that were
5483 // not copied during the pause.
5484 process_discovered_references();
5486 // Weak root processing.
5487 // Note: when JSR 292 is enabled and code blobs can contain
5488 // non-perm oops then we will need to process the code blobs
5489 // here too.
5490 {
5491 G1STWIsAliveClosure is_alive(this);
5492 G1KeepAliveClosure keep_alive(this);
5493 JNIHandles::weak_oops_do(&is_alive, &keep_alive);
5494 }
5496 release_gc_alloc_regions();
5497 g1_rem_set()->cleanup_after_oops_into_collection_set_do();
5499 concurrent_g1_refine()->clear_hot_cache();
5500 concurrent_g1_refine()->set_use_cache(true);
5502 finalize_for_evac_failure();
5504 if (evacuation_failed()) {
5505 remove_self_forwarding_pointers();
5506 if (G1Log::finer()) {
5507 gclog_or_tty->print(" (to-space overflow)");
5508 } else if (G1Log::fine()) {
5509 gclog_or_tty->print("--");
5510 }
5511 }
5513 // Enqueue any remaining references remaining on the STW
5514 // reference processor's discovered lists. We need to do
5515 // this after the card table is cleaned (and verified) as
5516 // the act of enqueuing entries on to the pending list
5517 // will log these updates (and dirty their associated
5518 // cards). We need these updates logged to update any
5519 // RSets.
5520 enqueue_discovered_references();
5522 if (G1DeferredRSUpdate) {
5523 RedirtyLoggedCardTableEntryFastClosure redirty;
5524 dirty_card_queue_set().set_closure(&redirty);
5525 dirty_card_queue_set().apply_closure_to_all_completed_buffers();
5527 DirtyCardQueueSet& dcq = JavaThread::dirty_card_queue_set();
5528 dcq.merge_bufferlists(&dirty_card_queue_set());
5529 assert(dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed");
5530 }
5531 COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
5532 }
5534 void G1CollectedHeap::free_region_if_empty(HeapRegion* hr,
5535 size_t* pre_used,
5536 FreeRegionList* free_list,
5537 OldRegionSet* old_proxy_set,
5538 HumongousRegionSet* humongous_proxy_set,
5539 HRRSCleanupTask* hrrs_cleanup_task,
5540 bool par) {
5541 if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young()) {
5542 if (hr->isHumongous()) {
5543 assert(hr->startsHumongous(), "we should only see starts humongous");
5544 free_humongous_region(hr, pre_used, free_list, humongous_proxy_set, par);
5545 } else {
5546 _old_set.remove_with_proxy(hr, old_proxy_set);
5547 free_region(hr, pre_used, free_list, par);
5548 }
5549 } else {
5550 hr->rem_set()->do_cleanup_work(hrrs_cleanup_task);
5551 }
5552 }
5554 void G1CollectedHeap::free_region(HeapRegion* hr,
5555 size_t* pre_used,
5556 FreeRegionList* free_list,
5557 bool par) {
5558 assert(!hr->isHumongous(), "this is only for non-humongous regions");
5559 assert(!hr->is_empty(), "the region should not be empty");
5560 assert(free_list != NULL, "pre-condition");
5562 *pre_used += hr->used();
5563 hr->hr_clear(par, true /* clear_space */);
5564 free_list->add_as_head(hr);
5565 }
5567 void G1CollectedHeap::free_humongous_region(HeapRegion* hr,
5568 size_t* pre_used,
5569 FreeRegionList* free_list,
5570 HumongousRegionSet* humongous_proxy_set,
5571 bool par) {
5572 assert(hr->startsHumongous(), "this is only for starts humongous regions");
5573 assert(free_list != NULL, "pre-condition");
5574 assert(humongous_proxy_set != NULL, "pre-condition");
5576 size_t hr_used = hr->used();
5577 size_t hr_capacity = hr->capacity();
5578 size_t hr_pre_used = 0;
5579 _humongous_set.remove_with_proxy(hr, humongous_proxy_set);
5580 hr->set_notHumongous();
5581 free_region(hr, &hr_pre_used, free_list, par);
5583 uint i = hr->hrs_index() + 1;
5584 uint num = 1;
5585 while (i < n_regions()) {
5586 HeapRegion* curr_hr = region_at(i);
5587 if (!curr_hr->continuesHumongous()) {
5588 break;
5589 }
5590 curr_hr->set_notHumongous();
5591 free_region(curr_hr, &hr_pre_used, free_list, par);
5592 num += 1;
5593 i += 1;
5594 }
5595 assert(hr_pre_used == hr_used,
5596 err_msg("hr_pre_used: "SIZE_FORMAT" and hr_used: "SIZE_FORMAT" "
5597 "should be the same", hr_pre_used, hr_used));
5598 *pre_used += hr_pre_used;
5599 }
5601 void G1CollectedHeap::update_sets_after_freeing_regions(size_t pre_used,
5602 FreeRegionList* free_list,
5603 OldRegionSet* old_proxy_set,
5604 HumongousRegionSet* humongous_proxy_set,
5605 bool par) {
5606 if (pre_used > 0) {
5607 Mutex* lock = (par) ? ParGCRareEvent_lock : NULL;
5608 MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
5609 assert(_summary_bytes_used >= pre_used,
5610 err_msg("invariant: _summary_bytes_used: "SIZE_FORMAT" "
5611 "should be >= pre_used: "SIZE_FORMAT,
5612 _summary_bytes_used, pre_used));
5613 _summary_bytes_used -= pre_used;
5614 }
5615 if (free_list != NULL && !free_list->is_empty()) {
5616 MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
5617 _free_list.add_as_head(free_list);
5618 }
5619 if (old_proxy_set != NULL && !old_proxy_set->is_empty()) {
5620 MutexLockerEx x(OldSets_lock, Mutex::_no_safepoint_check_flag);
5621 _old_set.update_from_proxy(old_proxy_set);
5622 }
5623 if (humongous_proxy_set != NULL && !humongous_proxy_set->is_empty()) {
5624 MutexLockerEx x(OldSets_lock, Mutex::_no_safepoint_check_flag);
5625 _humongous_set.update_from_proxy(humongous_proxy_set);
5626 }
5627 }
5629 class G1ParCleanupCTTask : public AbstractGangTask {
5630 CardTableModRefBS* _ct_bs;
5631 G1CollectedHeap* _g1h;
5632 HeapRegion* volatile _su_head;
5633 public:
5634 G1ParCleanupCTTask(CardTableModRefBS* ct_bs,
5635 G1CollectedHeap* g1h) :
5636 AbstractGangTask("G1 Par Cleanup CT Task"),
5637 _ct_bs(ct_bs), _g1h(g1h) { }
5639 void work(uint worker_id) {
5640 HeapRegion* r;
5641 while (r = _g1h->pop_dirty_cards_region()) {
5642 clear_cards(r);
5643 }
5644 }
5646 void clear_cards(HeapRegion* r) {
5647 // Cards of the survivors should have already been dirtied.
5648 if (!r->is_survivor()) {
5649 _ct_bs->clear(MemRegion(r->bottom(), r->end()));
5650 }
5651 }
5652 };
5654 #ifndef PRODUCT
5655 class G1VerifyCardTableCleanup: public HeapRegionClosure {
5656 G1CollectedHeap* _g1h;
5657 CardTableModRefBS* _ct_bs;
5658 public:
5659 G1VerifyCardTableCleanup(G1CollectedHeap* g1h, CardTableModRefBS* ct_bs)
5660 : _g1h(g1h), _ct_bs(ct_bs) { }
5661 virtual bool doHeapRegion(HeapRegion* r) {
5662 if (r->is_survivor()) {
5663 _g1h->verify_dirty_region(r);
5664 } else {
5665 _g1h->verify_not_dirty_region(r);
5666 }
5667 return false;
5668 }
5669 };
5671 void G1CollectedHeap::verify_not_dirty_region(HeapRegion* hr) {
5672 // All of the region should be clean.
5673 CardTableModRefBS* ct_bs = (CardTableModRefBS*)barrier_set();
5674 MemRegion mr(hr->bottom(), hr->end());
5675 ct_bs->verify_not_dirty_region(mr);
5676 }
5678 void G1CollectedHeap::verify_dirty_region(HeapRegion* hr) {
5679 // We cannot guarantee that [bottom(),end()] is dirty. Threads
5680 // dirty allocated blocks as they allocate them. The thread that
5681 // retires each region and replaces it with a new one will do a
5682 // maximal allocation to fill in [pre_dummy_top(),end()] but will
5683 // not dirty that area (one less thing to have to do while holding
5684 // a lock). So we can only verify that [bottom(),pre_dummy_top()]
5685 // is dirty.
5686 CardTableModRefBS* ct_bs = (CardTableModRefBS*) barrier_set();
5687 MemRegion mr(hr->bottom(), hr->pre_dummy_top());
5688 ct_bs->verify_dirty_region(mr);
5689 }
5691 void G1CollectedHeap::verify_dirty_young_list(HeapRegion* head) {
5692 CardTableModRefBS* ct_bs = (CardTableModRefBS*) barrier_set();
5693 for (HeapRegion* hr = head; hr != NULL; hr = hr->get_next_young_region()) {
5694 verify_dirty_region(hr);
5695 }
5696 }
5698 void G1CollectedHeap::verify_dirty_young_regions() {
5699 verify_dirty_young_list(_young_list->first_region());
5700 verify_dirty_young_list(_young_list->first_survivor_region());
5701 }
5702 #endif
5704 void G1CollectedHeap::cleanUpCardTable() {
5705 CardTableModRefBS* ct_bs = (CardTableModRefBS*) (barrier_set());
5706 double start = os::elapsedTime();
5708 {
5709 // Iterate over the dirty cards region list.
5710 G1ParCleanupCTTask cleanup_task(ct_bs, this);
5712 if (G1CollectedHeap::use_parallel_gc_threads()) {
5713 set_par_threads();
5714 workers()->run_task(&cleanup_task);
5715 set_par_threads(0);
5716 } else {
5717 while (_dirty_cards_region_list) {
5718 HeapRegion* r = _dirty_cards_region_list;
5719 cleanup_task.clear_cards(r);
5720 _dirty_cards_region_list = r->get_next_dirty_cards_region();
5721 if (_dirty_cards_region_list == r) {
5722 // The last region.
5723 _dirty_cards_region_list = NULL;
5724 }
5725 r->set_next_dirty_cards_region(NULL);
5726 }
5727 }
5728 #ifndef PRODUCT
5729 if (G1VerifyCTCleanup || VerifyAfterGC) {
5730 G1VerifyCardTableCleanup cleanup_verifier(this, ct_bs);
5731 heap_region_iterate(&cleanup_verifier);
5732 }
5733 #endif
5734 }
5736 double elapsed = os::elapsedTime() - start;
5737 g1_policy()->record_clear_ct_time(elapsed * 1000.0);
5738 }
5740 void G1CollectedHeap::free_collection_set(HeapRegion* cs_head) {
5741 size_t pre_used = 0;
5742 FreeRegionList local_free_list("Local List for CSet Freeing");
5744 double young_time_ms = 0.0;
5745 double non_young_time_ms = 0.0;
5747 // Since the collection set is a superset of the the young list,
5748 // all we need to do to clear the young list is clear its
5749 // head and length, and unlink any young regions in the code below
5750 _young_list->clear();
5752 G1CollectorPolicy* policy = g1_policy();
5754 double start_sec = os::elapsedTime();
5755 bool non_young = true;
5757 HeapRegion* cur = cs_head;
5758 int age_bound = -1;
5759 size_t rs_lengths = 0;
5761 while (cur != NULL) {
5762 assert(!is_on_master_free_list(cur), "sanity");
5763 if (non_young) {
5764 if (cur->is_young()) {
5765 double end_sec = os::elapsedTime();
5766 double elapsed_ms = (end_sec - start_sec) * 1000.0;
5767 non_young_time_ms += elapsed_ms;
5769 start_sec = os::elapsedTime();
5770 non_young = false;
5771 }
5772 } else {
5773 if (!cur->is_young()) {
5774 double end_sec = os::elapsedTime();
5775 double elapsed_ms = (end_sec - start_sec) * 1000.0;
5776 young_time_ms += elapsed_ms;
5778 start_sec = os::elapsedTime();
5779 non_young = true;
5780 }
5781 }
5783 rs_lengths += cur->rem_set()->occupied();
5785 HeapRegion* next = cur->next_in_collection_set();
5786 assert(cur->in_collection_set(), "bad CS");
5787 cur->set_next_in_collection_set(NULL);
5788 cur->set_in_collection_set(false);
5790 if (cur->is_young()) {
5791 int index = cur->young_index_in_cset();
5792 assert(index != -1, "invariant");
5793 assert((uint) index < policy->young_cset_region_length(), "invariant");
5794 size_t words_survived = _surviving_young_words[index];
5795 cur->record_surv_words_in_group(words_survived);
5797 // At this point the we have 'popped' cur from the collection set
5798 // (linked via next_in_collection_set()) but it is still in the
5799 // young list (linked via next_young_region()). Clear the
5800 // _next_young_region field.
5801 cur->set_next_young_region(NULL);
5802 } else {
5803 int index = cur->young_index_in_cset();
5804 assert(index == -1, "invariant");
5805 }
5807 assert( (cur->is_young() && cur->young_index_in_cset() > -1) ||
5808 (!cur->is_young() && cur->young_index_in_cset() == -1),
5809 "invariant" );
5811 if (!cur->evacuation_failed()) {
5812 MemRegion used_mr = cur->used_region();
5814 // And the region is empty.
5815 assert(!used_mr.is_empty(), "Should not have empty regions in a CS.");
5816 free_region(cur, &pre_used, &local_free_list, false /* par */);
5817 } else {
5818 cur->uninstall_surv_rate_group();
5819 if (cur->is_young()) {
5820 cur->set_young_index_in_cset(-1);
5821 }
5822 cur->set_not_young();
5823 cur->set_evacuation_failed(false);
5824 // The region is now considered to be old.
5825 _old_set.add(cur);
5826 }
5827 cur = next;
5828 }
5830 policy->record_max_rs_lengths(rs_lengths);
5831 policy->cset_regions_freed();
5833 double end_sec = os::elapsedTime();
5834 double elapsed_ms = (end_sec - start_sec) * 1000.0;
5836 if (non_young) {
5837 non_young_time_ms += elapsed_ms;
5838 } else {
5839 young_time_ms += elapsed_ms;
5840 }
5842 update_sets_after_freeing_regions(pre_used, &local_free_list,
5843 NULL /* old_proxy_set */,
5844 NULL /* humongous_proxy_set */,
5845 false /* par */);
5846 policy->record_young_free_cset_time_ms(young_time_ms);
5847 policy->record_non_young_free_cset_time_ms(non_young_time_ms);
5848 }
5850 // This routine is similar to the above but does not record
5851 // any policy statistics or update free lists; we are abandoning
5852 // the current incremental collection set in preparation of a
5853 // full collection. After the full GC we will start to build up
5854 // the incremental collection set again.
5855 // This is only called when we're doing a full collection
5856 // and is immediately followed by the tearing down of the young list.
5858 void G1CollectedHeap::abandon_collection_set(HeapRegion* cs_head) {
5859 HeapRegion* cur = cs_head;
5861 while (cur != NULL) {
5862 HeapRegion* next = cur->next_in_collection_set();
5863 assert(cur->in_collection_set(), "bad CS");
5864 cur->set_next_in_collection_set(NULL);
5865 cur->set_in_collection_set(false);
5866 cur->set_young_index_in_cset(-1);
5867 cur = next;
5868 }
5869 }
5871 void G1CollectedHeap::set_free_regions_coming() {
5872 if (G1ConcRegionFreeingVerbose) {
5873 gclog_or_tty->print_cr("G1ConcRegionFreeing [cm thread] : "
5874 "setting free regions coming");
5875 }
5877 assert(!free_regions_coming(), "pre-condition");
5878 _free_regions_coming = true;
5879 }
5881 void G1CollectedHeap::reset_free_regions_coming() {
5882 assert(free_regions_coming(), "pre-condition");
5884 {
5885 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
5886 _free_regions_coming = false;
5887 SecondaryFreeList_lock->notify_all();
5888 }
5890 if (G1ConcRegionFreeingVerbose) {
5891 gclog_or_tty->print_cr("G1ConcRegionFreeing [cm thread] : "
5892 "reset free regions coming");
5893 }
5894 }
5896 void G1CollectedHeap::wait_while_free_regions_coming() {
5897 // Most of the time we won't have to wait, so let's do a quick test
5898 // first before we take the lock.
5899 if (!free_regions_coming()) {
5900 return;
5901 }
5903 if (G1ConcRegionFreeingVerbose) {
5904 gclog_or_tty->print_cr("G1ConcRegionFreeing [other] : "
5905 "waiting for free regions");
5906 }
5908 {
5909 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
5910 while (free_regions_coming()) {
5911 SecondaryFreeList_lock->wait(Mutex::_no_safepoint_check_flag);
5912 }
5913 }
5915 if (G1ConcRegionFreeingVerbose) {
5916 gclog_or_tty->print_cr("G1ConcRegionFreeing [other] : "
5917 "done waiting for free regions");
5918 }
5919 }
5921 void G1CollectedHeap::set_region_short_lived_locked(HeapRegion* hr) {
5922 assert(heap_lock_held_for_gc(),
5923 "the heap lock should already be held by or for this thread");
5924 _young_list->push_region(hr);
5925 }
5927 class NoYoungRegionsClosure: public HeapRegionClosure {
5928 private:
5929 bool _success;
5930 public:
5931 NoYoungRegionsClosure() : _success(true) { }
5932 bool doHeapRegion(HeapRegion* r) {
5933 if (r->is_young()) {
5934 gclog_or_tty->print_cr("Region ["PTR_FORMAT", "PTR_FORMAT") tagged as young",
5935 r->bottom(), r->end());
5936 _success = false;
5937 }
5938 return false;
5939 }
5940 bool success() { return _success; }
5941 };
5943 bool G1CollectedHeap::check_young_list_empty(bool check_heap, bool check_sample) {
5944 bool ret = _young_list->check_list_empty(check_sample);
5946 if (check_heap) {
5947 NoYoungRegionsClosure closure;
5948 heap_region_iterate(&closure);
5949 ret = ret && closure.success();
5950 }
5952 return ret;
5953 }
5955 class TearDownRegionSetsClosure : public HeapRegionClosure {
5956 private:
5957 OldRegionSet *_old_set;
5959 public:
5960 TearDownRegionSetsClosure(OldRegionSet* old_set) : _old_set(old_set) { }
5962 bool doHeapRegion(HeapRegion* r) {
5963 if (r->is_empty()) {
5964 // We ignore empty regions, we'll empty the free list afterwards
5965 } else if (r->is_young()) {
5966 // We ignore young regions, we'll empty the young list afterwards
5967 } else if (r->isHumongous()) {
5968 // We ignore humongous regions, we're not tearing down the
5969 // humongous region set
5970 } else {
5971 // The rest should be old
5972 _old_set->remove(r);
5973 }
5974 return false;
5975 }
5977 ~TearDownRegionSetsClosure() {
5978 assert(_old_set->is_empty(), "post-condition");
5979 }
5980 };
5982 void G1CollectedHeap::tear_down_region_sets(bool free_list_only) {
5983 assert_at_safepoint(true /* should_be_vm_thread */);
5985 if (!free_list_only) {
5986 TearDownRegionSetsClosure cl(&_old_set);
5987 heap_region_iterate(&cl);
5989 // Need to do this after the heap iteration to be able to
5990 // recognize the young regions and ignore them during the iteration.
5991 _young_list->empty_list();
5992 }
5993 _free_list.remove_all();
5994 }
5996 class RebuildRegionSetsClosure : public HeapRegionClosure {
5997 private:
5998 bool _free_list_only;
5999 OldRegionSet* _old_set;
6000 FreeRegionList* _free_list;
6001 size_t _total_used;
6003 public:
6004 RebuildRegionSetsClosure(bool free_list_only,
6005 OldRegionSet* old_set, FreeRegionList* free_list) :
6006 _free_list_only(free_list_only),
6007 _old_set(old_set), _free_list(free_list), _total_used(0) {
6008 assert(_free_list->is_empty(), "pre-condition");
6009 if (!free_list_only) {
6010 assert(_old_set->is_empty(), "pre-condition");
6011 }
6012 }
6014 bool doHeapRegion(HeapRegion* r) {
6015 if (r->continuesHumongous()) {
6016 return false;
6017 }
6019 if (r->is_empty()) {
6020 // Add free regions to the free list
6021 _free_list->add_as_tail(r);
6022 } else if (!_free_list_only) {
6023 assert(!r->is_young(), "we should not come across young regions");
6025 if (r->isHumongous()) {
6026 // We ignore humongous regions, we left the humongous set unchanged
6027 } else {
6028 // The rest should be old, add them to the old set
6029 _old_set->add(r);
6030 }
6031 _total_used += r->used();
6032 }
6034 return false;
6035 }
6037 size_t total_used() {
6038 return _total_used;
6039 }
6040 };
6042 void G1CollectedHeap::rebuild_region_sets(bool free_list_only) {
6043 assert_at_safepoint(true /* should_be_vm_thread */);
6045 RebuildRegionSetsClosure cl(free_list_only, &_old_set, &_free_list);
6046 heap_region_iterate(&cl);
6048 if (!free_list_only) {
6049 _summary_bytes_used = cl.total_used();
6050 }
6051 assert(_summary_bytes_used == recalculate_used(),
6052 err_msg("inconsistent _summary_bytes_used, "
6053 "value: "SIZE_FORMAT" recalculated: "SIZE_FORMAT,
6054 _summary_bytes_used, recalculate_used()));
6055 }
6057 void G1CollectedHeap::set_refine_cte_cl_concurrency(bool concurrent) {
6058 _refine_cte_cl->set_concurrent(concurrent);
6059 }
6061 bool G1CollectedHeap::is_in_closed_subset(const void* p) const {
6062 HeapRegion* hr = heap_region_containing(p);
6063 if (hr == NULL) {
6064 return is_in_permanent(p);
6065 } else {
6066 return hr->is_in(p);
6067 }
6068 }
6070 // Methods for the mutator alloc region
6072 HeapRegion* G1CollectedHeap::new_mutator_alloc_region(size_t word_size,
6073 bool force) {
6074 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
6075 assert(!force || g1_policy()->can_expand_young_list(),
6076 "if force is true we should be able to expand the young list");
6077 bool young_list_full = g1_policy()->is_young_list_full();
6078 if (force || !young_list_full) {
6079 HeapRegion* new_alloc_region = new_region(word_size,
6080 false /* do_expand */);
6081 if (new_alloc_region != NULL) {
6082 set_region_short_lived_locked(new_alloc_region);
6083 _hr_printer.alloc(new_alloc_region, G1HRPrinter::Eden, young_list_full);
6084 return new_alloc_region;
6085 }
6086 }
6087 return NULL;
6088 }
6090 void G1CollectedHeap::retire_mutator_alloc_region(HeapRegion* alloc_region,
6091 size_t allocated_bytes) {
6092 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
6093 assert(alloc_region->is_young(), "all mutator alloc regions should be young");
6095 g1_policy()->add_region_to_incremental_cset_lhs(alloc_region);
6096 _summary_bytes_used += allocated_bytes;
6097 _hr_printer.retire(alloc_region);
6098 // We update the eden sizes here, when the region is retired,
6099 // instead of when it's allocated, since this is the point that its
6100 // used space has been recored in _summary_bytes_used.
6101 g1mm()->update_eden_size();
6102 }
6104 HeapRegion* MutatorAllocRegion::allocate_new_region(size_t word_size,
6105 bool force) {
6106 return _g1h->new_mutator_alloc_region(word_size, force);
6107 }
6109 void G1CollectedHeap::set_par_threads() {
6110 // Don't change the number of workers. Use the value previously set
6111 // in the workgroup.
6112 assert(G1CollectedHeap::use_parallel_gc_threads(), "shouldn't be here otherwise");
6113 uint n_workers = workers()->active_workers();
6114 assert(UseDynamicNumberOfGCThreads ||
6115 n_workers == workers()->total_workers(),
6116 "Otherwise should be using the total number of workers");
6117 if (n_workers == 0) {
6118 assert(false, "Should have been set in prior evacuation pause.");
6119 n_workers = ParallelGCThreads;
6120 workers()->set_active_workers(n_workers);
6121 }
6122 set_par_threads(n_workers);
6123 }
6125 void MutatorAllocRegion::retire_region(HeapRegion* alloc_region,
6126 size_t allocated_bytes) {
6127 _g1h->retire_mutator_alloc_region(alloc_region, allocated_bytes);
6128 }
6130 // Methods for the GC alloc regions
6132 HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size,
6133 uint count,
6134 GCAllocPurpose ap) {
6135 assert(FreeList_lock->owned_by_self(), "pre-condition");
6137 if (count < g1_policy()->max_regions(ap)) {
6138 HeapRegion* new_alloc_region = new_region(word_size,
6139 true /* do_expand */);
6140 if (new_alloc_region != NULL) {
6141 // We really only need to do this for old regions given that we
6142 // should never scan survivors. But it doesn't hurt to do it
6143 // for survivors too.
6144 new_alloc_region->set_saved_mark();
6145 if (ap == GCAllocForSurvived) {
6146 new_alloc_region->set_survivor();
6147 _hr_printer.alloc(new_alloc_region, G1HRPrinter::Survivor);
6148 } else {
6149 _hr_printer.alloc(new_alloc_region, G1HRPrinter::Old);
6150 }
6151 bool during_im = g1_policy()->during_initial_mark_pause();
6152 new_alloc_region->note_start_of_copying(during_im);
6153 return new_alloc_region;
6154 } else {
6155 g1_policy()->note_alloc_region_limit_reached(ap);
6156 }
6157 }
6158 return NULL;
6159 }
6161 void G1CollectedHeap::retire_gc_alloc_region(HeapRegion* alloc_region,
6162 size_t allocated_bytes,
6163 GCAllocPurpose ap) {
6164 bool during_im = g1_policy()->during_initial_mark_pause();
6165 alloc_region->note_end_of_copying(during_im);
6166 g1_policy()->record_bytes_copied_during_gc(allocated_bytes);
6167 if (ap == GCAllocForSurvived) {
6168 young_list()->add_survivor_region(alloc_region);
6169 } else {
6170 _old_set.add(alloc_region);
6171 }
6172 _hr_printer.retire(alloc_region);
6173 }
6175 HeapRegion* SurvivorGCAllocRegion::allocate_new_region(size_t word_size,
6176 bool force) {
6177 assert(!force, "not supported for GC alloc regions");
6178 return _g1h->new_gc_alloc_region(word_size, count(), GCAllocForSurvived);
6179 }
6181 void SurvivorGCAllocRegion::retire_region(HeapRegion* alloc_region,
6182 size_t allocated_bytes) {
6183 _g1h->retire_gc_alloc_region(alloc_region, allocated_bytes,
6184 GCAllocForSurvived);
6185 }
6187 HeapRegion* OldGCAllocRegion::allocate_new_region(size_t word_size,
6188 bool force) {
6189 assert(!force, "not supported for GC alloc regions");
6190 return _g1h->new_gc_alloc_region(word_size, count(), GCAllocForTenured);
6191 }
6193 void OldGCAllocRegion::retire_region(HeapRegion* alloc_region,
6194 size_t allocated_bytes) {
6195 _g1h->retire_gc_alloc_region(alloc_region, allocated_bytes,
6196 GCAllocForTenured);
6197 }
6198 // Heap region set verification
6200 class VerifyRegionListsClosure : public HeapRegionClosure {
6201 private:
6202 FreeRegionList* _free_list;
6203 OldRegionSet* _old_set;
6204 HumongousRegionSet* _humongous_set;
6205 uint _region_count;
6207 public:
6208 VerifyRegionListsClosure(OldRegionSet* old_set,
6209 HumongousRegionSet* humongous_set,
6210 FreeRegionList* free_list) :
6211 _old_set(old_set), _humongous_set(humongous_set),
6212 _free_list(free_list), _region_count(0) { }
6214 uint region_count() { return _region_count; }
6216 bool doHeapRegion(HeapRegion* hr) {
6217 _region_count += 1;
6219 if (hr->continuesHumongous()) {
6220 return false;
6221 }
6223 if (hr->is_young()) {
6224 // TODO
6225 } else if (hr->startsHumongous()) {
6226 _humongous_set->verify_next_region(hr);
6227 } else if (hr->is_empty()) {
6228 _free_list->verify_next_region(hr);
6229 } else {
6230 _old_set->verify_next_region(hr);
6231 }
6232 return false;
6233 }
6234 };
6236 HeapRegion* G1CollectedHeap::new_heap_region(uint hrs_index,
6237 HeapWord* bottom) {
6238 HeapWord* end = bottom + HeapRegion::GrainWords;
6239 MemRegion mr(bottom, end);
6240 assert(_g1_reserved.contains(mr), "invariant");
6241 // This might return NULL if the allocation fails
6242 return new HeapRegion(hrs_index, _bot_shared, mr, true /* is_zeroed */);
6243 }
6245 void G1CollectedHeap::verify_region_sets() {
6246 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
6248 // First, check the explicit lists.
6249 _free_list.verify();
6250 {
6251 // Given that a concurrent operation might be adding regions to
6252 // the secondary free list we have to take the lock before
6253 // verifying it.
6254 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
6255 _secondary_free_list.verify();
6256 }
6257 _old_set.verify();
6258 _humongous_set.verify();
6260 // If a concurrent region freeing operation is in progress it will
6261 // be difficult to correctly attributed any free regions we come
6262 // across to the correct free list given that they might belong to
6263 // one of several (free_list, secondary_free_list, any local lists,
6264 // etc.). So, if that's the case we will skip the rest of the
6265 // verification operation. Alternatively, waiting for the concurrent
6266 // operation to complete will have a non-trivial effect on the GC's
6267 // operation (no concurrent operation will last longer than the
6268 // interval between two calls to verification) and it might hide
6269 // any issues that we would like to catch during testing.
6270 if (free_regions_coming()) {
6271 return;
6272 }
6274 // Make sure we append the secondary_free_list on the free_list so
6275 // that all free regions we will come across can be safely
6276 // attributed to the free_list.
6277 append_secondary_free_list_if_not_empty_with_lock();
6279 // Finally, make sure that the region accounting in the lists is
6280 // consistent with what we see in the heap.
6281 _old_set.verify_start();
6282 _humongous_set.verify_start();
6283 _free_list.verify_start();
6285 VerifyRegionListsClosure cl(&_old_set, &_humongous_set, &_free_list);
6286 heap_region_iterate(&cl);
6288 _old_set.verify_end();
6289 _humongous_set.verify_end();
6290 _free_list.verify_end();
6291 }