Mon, 11 May 2009 16:30:56 -0700
6484957: G1: parallel concurrent refinement
6826318: G1: remove traversal-based refinement code
Summary: Removed traversal-based refinement code as it's no longer used. Made the concurrent refinement (queue-based) parallel.
Reviewed-by: tonyp
1 /*
2 * Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
25 #include "incls/_precompiled.incl"
26 #include "incls/_g1CollectedHeap.cpp.incl"
28 // turn it on so that the contents of the young list (scan-only /
29 // to-be-collected) are printed at "strategic" points before / during
30 // / after the collection --- this is useful for debugging
31 #define SCAN_ONLY_VERBOSE 0
32 // CURRENT STATUS
33 // This file is under construction. Search for "FIXME".
35 // INVARIANTS/NOTES
36 //
37 // All allocation activity covered by the G1CollectedHeap interface is
38 // serialized by acquiring the HeapLock. This happens in
39 // mem_allocate_work, which all such allocation functions call.
40 // (Note that this does not apply to TLAB allocation, which is not part
41 // of this interface: it is done by clients of this interface.)
43 // Local to this file.
45 class RefineCardTableEntryClosure: public CardTableEntryClosure {
46 SuspendibleThreadSet* _sts;
47 G1RemSet* _g1rs;
48 ConcurrentG1Refine* _cg1r;
49 bool _concurrent;
50 public:
51 RefineCardTableEntryClosure(SuspendibleThreadSet* sts,
52 G1RemSet* g1rs,
53 ConcurrentG1Refine* cg1r) :
54 _sts(sts), _g1rs(g1rs), _cg1r(cg1r), _concurrent(true)
55 {}
56 bool do_card_ptr(jbyte* card_ptr, int worker_i) {
57 _g1rs->concurrentRefineOneCard(card_ptr, worker_i);
58 if (_concurrent && _sts->should_yield()) {
59 // Caller will actually yield.
60 return false;
61 }
62 // Otherwise, we finished successfully; return true.
63 return true;
64 }
65 void set_concurrent(bool b) { _concurrent = b; }
66 };
69 class ClearLoggedCardTableEntryClosure: public CardTableEntryClosure {
70 int _calls;
71 G1CollectedHeap* _g1h;
72 CardTableModRefBS* _ctbs;
73 int _histo[256];
74 public:
75 ClearLoggedCardTableEntryClosure() :
76 _calls(0)
77 {
78 _g1h = G1CollectedHeap::heap();
79 _ctbs = (CardTableModRefBS*)_g1h->barrier_set();
80 for (int i = 0; i < 256; i++) _histo[i] = 0;
81 }
82 bool do_card_ptr(jbyte* card_ptr, int worker_i) {
83 if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) {
84 _calls++;
85 unsigned char* ujb = (unsigned char*)card_ptr;
86 int ind = (int)(*ujb);
87 _histo[ind]++;
88 *card_ptr = -1;
89 }
90 return true;
91 }
92 int calls() { return _calls; }
93 void print_histo() {
94 gclog_or_tty->print_cr("Card table value histogram:");
95 for (int i = 0; i < 256; i++) {
96 if (_histo[i] != 0) {
97 gclog_or_tty->print_cr(" %d: %d", i, _histo[i]);
98 }
99 }
100 }
101 };
103 class RedirtyLoggedCardTableEntryClosure: public CardTableEntryClosure {
104 int _calls;
105 G1CollectedHeap* _g1h;
106 CardTableModRefBS* _ctbs;
107 public:
108 RedirtyLoggedCardTableEntryClosure() :
109 _calls(0)
110 {
111 _g1h = G1CollectedHeap::heap();
112 _ctbs = (CardTableModRefBS*)_g1h->barrier_set();
113 }
114 bool do_card_ptr(jbyte* card_ptr, int worker_i) {
115 if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) {
116 _calls++;
117 *card_ptr = 0;
118 }
119 return true;
120 }
121 int calls() { return _calls; }
122 };
124 class RedirtyLoggedCardTableEntryFastClosure : public CardTableEntryClosure {
125 public:
126 bool do_card_ptr(jbyte* card_ptr, int worker_i) {
127 *card_ptr = CardTableModRefBS::dirty_card_val();
128 return true;
129 }
130 };
132 YoungList::YoungList(G1CollectedHeap* g1h)
133 : _g1h(g1h), _head(NULL),
134 _scan_only_head(NULL), _scan_only_tail(NULL), _curr_scan_only(NULL),
135 _length(0), _scan_only_length(0),
136 _last_sampled_rs_lengths(0),
137 _survivor_head(NULL), _survivor_tail(NULL), _survivor_length(0)
138 {
139 guarantee( check_list_empty(false), "just making sure..." );
140 }
142 void YoungList::push_region(HeapRegion *hr) {
143 assert(!hr->is_young(), "should not already be young");
144 assert(hr->get_next_young_region() == NULL, "cause it should!");
146 hr->set_next_young_region(_head);
147 _head = hr;
149 hr->set_young();
150 double yg_surv_rate = _g1h->g1_policy()->predict_yg_surv_rate((int)_length);
151 ++_length;
152 }
154 void YoungList::add_survivor_region(HeapRegion* hr) {
155 assert(hr->is_survivor(), "should be flagged as survivor region");
156 assert(hr->get_next_young_region() == NULL, "cause it should!");
158 hr->set_next_young_region(_survivor_head);
159 if (_survivor_head == NULL) {
160 _survivor_tail = hr;
161 }
162 _survivor_head = hr;
164 ++_survivor_length;
165 }
167 HeapRegion* YoungList::pop_region() {
168 while (_head != NULL) {
169 assert( length() > 0, "list should not be empty" );
170 HeapRegion* ret = _head;
171 _head = ret->get_next_young_region();
172 ret->set_next_young_region(NULL);
173 --_length;
174 assert(ret->is_young(), "region should be very young");
176 // Replace 'Survivor' region type with 'Young'. So the region will
177 // be treated as a young region and will not be 'confused' with
178 // newly created survivor regions.
179 if (ret->is_survivor()) {
180 ret->set_young();
181 }
183 if (!ret->is_scan_only()) {
184 return ret;
185 }
187 // scan-only, we'll add it to the scan-only list
188 if (_scan_only_tail == NULL) {
189 guarantee( _scan_only_head == NULL, "invariant" );
191 _scan_only_head = ret;
192 _curr_scan_only = ret;
193 } else {
194 guarantee( _scan_only_head != NULL, "invariant" );
195 _scan_only_tail->set_next_young_region(ret);
196 }
197 guarantee( ret->get_next_young_region() == NULL, "invariant" );
198 _scan_only_tail = ret;
200 // no need to be tagged as scan-only any more
201 ret->set_young();
203 ++_scan_only_length;
204 }
205 assert( length() == 0, "list should be empty" );
206 return NULL;
207 }
209 void YoungList::empty_list(HeapRegion* list) {
210 while (list != NULL) {
211 HeapRegion* next = list->get_next_young_region();
212 list->set_next_young_region(NULL);
213 list->uninstall_surv_rate_group();
214 list->set_not_young();
215 list = next;
216 }
217 }
219 void YoungList::empty_list() {
220 assert(check_list_well_formed(), "young list should be well formed");
222 empty_list(_head);
223 _head = NULL;
224 _length = 0;
226 empty_list(_scan_only_head);
227 _scan_only_head = NULL;
228 _scan_only_tail = NULL;
229 _scan_only_length = 0;
230 _curr_scan_only = NULL;
232 empty_list(_survivor_head);
233 _survivor_head = NULL;
234 _survivor_tail = NULL;
235 _survivor_length = 0;
237 _last_sampled_rs_lengths = 0;
239 assert(check_list_empty(false), "just making sure...");
240 }
242 bool YoungList::check_list_well_formed() {
243 bool ret = true;
245 size_t length = 0;
246 HeapRegion* curr = _head;
247 HeapRegion* last = NULL;
248 while (curr != NULL) {
249 if (!curr->is_young() || curr->is_scan_only()) {
250 gclog_or_tty->print_cr("### YOUNG REGION "PTR_FORMAT"-"PTR_FORMAT" "
251 "incorrectly tagged (%d, %d)",
252 curr->bottom(), curr->end(),
253 curr->is_young(), curr->is_scan_only());
254 ret = false;
255 }
256 ++length;
257 last = curr;
258 curr = curr->get_next_young_region();
259 }
260 ret = ret && (length == _length);
262 if (!ret) {
263 gclog_or_tty->print_cr("### YOUNG LIST seems not well formed!");
264 gclog_or_tty->print_cr("### list has %d entries, _length is %d",
265 length, _length);
266 }
268 bool scan_only_ret = true;
269 length = 0;
270 curr = _scan_only_head;
271 last = NULL;
272 while (curr != NULL) {
273 if (!curr->is_young() || curr->is_scan_only()) {
274 gclog_or_tty->print_cr("### SCAN-ONLY REGION "PTR_FORMAT"-"PTR_FORMAT" "
275 "incorrectly tagged (%d, %d)",
276 curr->bottom(), curr->end(),
277 curr->is_young(), curr->is_scan_only());
278 scan_only_ret = false;
279 }
280 ++length;
281 last = curr;
282 curr = curr->get_next_young_region();
283 }
284 scan_only_ret = scan_only_ret && (length == _scan_only_length);
286 if ( (last != _scan_only_tail) ||
287 (_scan_only_head == NULL && _scan_only_tail != NULL) ||
288 (_scan_only_head != NULL && _scan_only_tail == NULL) ) {
289 gclog_or_tty->print_cr("## _scan_only_tail is set incorrectly");
290 scan_only_ret = false;
291 }
293 if (_curr_scan_only != NULL && _curr_scan_only != _scan_only_head) {
294 gclog_or_tty->print_cr("### _curr_scan_only is set incorrectly");
295 scan_only_ret = false;
296 }
298 if (!scan_only_ret) {
299 gclog_or_tty->print_cr("### SCAN-ONLY LIST seems not well formed!");
300 gclog_or_tty->print_cr("### list has %d entries, _scan_only_length is %d",
301 length, _scan_only_length);
302 }
304 return ret && scan_only_ret;
305 }
307 bool YoungList::check_list_empty(bool ignore_scan_only_list,
308 bool check_sample) {
309 bool ret = true;
311 if (_length != 0) {
312 gclog_or_tty->print_cr("### YOUNG LIST should have 0 length, not %d",
313 _length);
314 ret = false;
315 }
316 if (check_sample && _last_sampled_rs_lengths != 0) {
317 gclog_or_tty->print_cr("### YOUNG LIST has non-zero last sampled RS lengths");
318 ret = false;
319 }
320 if (_head != NULL) {
321 gclog_or_tty->print_cr("### YOUNG LIST does not have a NULL head");
322 ret = false;
323 }
324 if (!ret) {
325 gclog_or_tty->print_cr("### YOUNG LIST does not seem empty");
326 }
328 if (ignore_scan_only_list)
329 return ret;
331 bool scan_only_ret = true;
332 if (_scan_only_length != 0) {
333 gclog_or_tty->print_cr("### SCAN-ONLY LIST should have 0 length, not %d",
334 _scan_only_length);
335 scan_only_ret = false;
336 }
337 if (_scan_only_head != NULL) {
338 gclog_or_tty->print_cr("### SCAN-ONLY LIST does not have a NULL head");
339 scan_only_ret = false;
340 }
341 if (_scan_only_tail != NULL) {
342 gclog_or_tty->print_cr("### SCAN-ONLY LIST does not have a NULL tail");
343 scan_only_ret = false;
344 }
345 if (!scan_only_ret) {
346 gclog_or_tty->print_cr("### SCAN-ONLY LIST does not seem empty");
347 }
349 return ret && scan_only_ret;
350 }
352 void
353 YoungList::rs_length_sampling_init() {
354 _sampled_rs_lengths = 0;
355 _curr = _head;
356 }
358 bool
359 YoungList::rs_length_sampling_more() {
360 return _curr != NULL;
361 }
363 void
364 YoungList::rs_length_sampling_next() {
365 assert( _curr != NULL, "invariant" );
366 _sampled_rs_lengths += _curr->rem_set()->occupied();
367 _curr = _curr->get_next_young_region();
368 if (_curr == NULL) {
369 _last_sampled_rs_lengths = _sampled_rs_lengths;
370 // gclog_or_tty->print_cr("last sampled RS lengths = %d", _last_sampled_rs_lengths);
371 }
372 }
374 void
375 YoungList::reset_auxilary_lists() {
376 // We could have just "moved" the scan-only list to the young list.
377 // However, the scan-only list is ordered according to the region
378 // age in descending order, so, by moving one entry at a time, we
379 // ensure that it is recreated in ascending order.
381 guarantee( is_empty(), "young list should be empty" );
382 assert(check_list_well_formed(), "young list should be well formed");
384 // Add survivor regions to SurvRateGroup.
385 _g1h->g1_policy()->note_start_adding_survivor_regions();
386 _g1h->g1_policy()->finished_recalculating_age_indexes(true /* is_survivors */);
387 for (HeapRegion* curr = _survivor_head;
388 curr != NULL;
389 curr = curr->get_next_young_region()) {
390 _g1h->g1_policy()->set_region_survivors(curr);
391 }
392 _g1h->g1_policy()->note_stop_adding_survivor_regions();
394 if (_survivor_head != NULL) {
395 _head = _survivor_head;
396 _length = _survivor_length + _scan_only_length;
397 _survivor_tail->set_next_young_region(_scan_only_head);
398 } else {
399 _head = _scan_only_head;
400 _length = _scan_only_length;
401 }
403 for (HeapRegion* curr = _scan_only_head;
404 curr != NULL;
405 curr = curr->get_next_young_region()) {
406 curr->recalculate_age_in_surv_rate_group();
407 }
408 _scan_only_head = NULL;
409 _scan_only_tail = NULL;
410 _scan_only_length = 0;
411 _curr_scan_only = NULL;
413 _survivor_head = NULL;
414 _survivor_tail = NULL;
415 _survivor_length = 0;
416 _g1h->g1_policy()->finished_recalculating_age_indexes(false /* is_survivors */);
418 assert(check_list_well_formed(), "young list should be well formed");
419 }
421 void YoungList::print() {
422 HeapRegion* lists[] = {_head, _scan_only_head, _survivor_head};
423 const char* names[] = {"YOUNG", "SCAN-ONLY", "SURVIVOR"};
425 for (unsigned int list = 0; list < ARRAY_SIZE(lists); ++list) {
426 gclog_or_tty->print_cr("%s LIST CONTENTS", names[list]);
427 HeapRegion *curr = lists[list];
428 if (curr == NULL)
429 gclog_or_tty->print_cr(" empty");
430 while (curr != NULL) {
431 gclog_or_tty->print_cr(" [%08x-%08x], t: %08x, P: %08x, N: %08x, C: %08x, "
432 "age: %4d, y: %d, s-o: %d, surv: %d",
433 curr->bottom(), curr->end(),
434 curr->top(),
435 curr->prev_top_at_mark_start(),
436 curr->next_top_at_mark_start(),
437 curr->top_at_conc_mark_count(),
438 curr->age_in_surv_rate_group_cond(),
439 curr->is_young(),
440 curr->is_scan_only(),
441 curr->is_survivor());
442 curr = curr->get_next_young_region();
443 }
444 }
446 gclog_or_tty->print_cr("");
447 }
449 void G1CollectedHeap::stop_conc_gc_threads() {
450 _cg1r->stop();
451 _czft->stop();
452 _cmThread->stop();
453 }
456 void G1CollectedHeap::check_ct_logs_at_safepoint() {
457 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
458 CardTableModRefBS* ct_bs = (CardTableModRefBS*)barrier_set();
460 // Count the dirty cards at the start.
461 CountNonCleanMemRegionClosure count1(this);
462 ct_bs->mod_card_iterate(&count1);
463 int orig_count = count1.n();
465 // First clear the logged cards.
466 ClearLoggedCardTableEntryClosure clear;
467 dcqs.set_closure(&clear);
468 dcqs.apply_closure_to_all_completed_buffers();
469 dcqs.iterate_closure_all_threads(false);
470 clear.print_histo();
472 // Now ensure that there's no dirty cards.
473 CountNonCleanMemRegionClosure count2(this);
474 ct_bs->mod_card_iterate(&count2);
475 if (count2.n() != 0) {
476 gclog_or_tty->print_cr("Card table has %d entries; %d originally",
477 count2.n(), orig_count);
478 }
479 guarantee(count2.n() == 0, "Card table should be clean.");
481 RedirtyLoggedCardTableEntryClosure redirty;
482 JavaThread::dirty_card_queue_set().set_closure(&redirty);
483 dcqs.apply_closure_to_all_completed_buffers();
484 dcqs.iterate_closure_all_threads(false);
485 gclog_or_tty->print_cr("Log entries = %d, dirty cards = %d.",
486 clear.calls(), orig_count);
487 guarantee(redirty.calls() == clear.calls(),
488 "Or else mechanism is broken.");
490 CountNonCleanMemRegionClosure count3(this);
491 ct_bs->mod_card_iterate(&count3);
492 if (count3.n() != orig_count) {
493 gclog_or_tty->print_cr("Should have restored them all: orig = %d, final = %d.",
494 orig_count, count3.n());
495 guarantee(count3.n() >= orig_count, "Should have restored them all.");
496 }
498 JavaThread::dirty_card_queue_set().set_closure(_refine_cte_cl);
499 }
501 // Private class members.
503 G1CollectedHeap* G1CollectedHeap::_g1h;
505 // Private methods.
507 // Finds a HeapRegion that can be used to allocate a given size of block.
510 HeapRegion* G1CollectedHeap::newAllocRegion_work(size_t word_size,
511 bool do_expand,
512 bool zero_filled) {
513 ConcurrentZFThread::note_region_alloc();
514 HeapRegion* res = alloc_free_region_from_lists(zero_filled);
515 if (res == NULL && do_expand) {
516 expand(word_size * HeapWordSize);
517 res = alloc_free_region_from_lists(zero_filled);
518 assert(res == NULL ||
519 (!res->isHumongous() &&
520 (!zero_filled ||
521 res->zero_fill_state() == HeapRegion::Allocated)),
522 "Alloc Regions must be zero filled (and non-H)");
523 }
524 if (res != NULL && res->is_empty()) _free_regions--;
525 assert(res == NULL ||
526 (!res->isHumongous() &&
527 (!zero_filled ||
528 res->zero_fill_state() == HeapRegion::Allocated)),
529 "Non-young alloc Regions must be zero filled (and non-H)");
531 if (G1PrintRegions) {
532 if (res != NULL) {
533 gclog_or_tty->print_cr("new alloc region %d:["PTR_FORMAT", "PTR_FORMAT"], "
534 "top "PTR_FORMAT,
535 res->hrs_index(), res->bottom(), res->end(), res->top());
536 }
537 }
539 return res;
540 }
542 HeapRegion* G1CollectedHeap::newAllocRegionWithExpansion(int purpose,
543 size_t word_size,
544 bool zero_filled) {
545 HeapRegion* alloc_region = NULL;
546 if (_gc_alloc_region_counts[purpose] < g1_policy()->max_regions(purpose)) {
547 alloc_region = newAllocRegion_work(word_size, true, zero_filled);
548 if (purpose == GCAllocForSurvived && alloc_region != NULL) {
549 alloc_region->set_survivor();
550 }
551 ++_gc_alloc_region_counts[purpose];
552 } else {
553 g1_policy()->note_alloc_region_limit_reached(purpose);
554 }
555 return alloc_region;
556 }
558 // If could fit into free regions w/o expansion, try.
559 // Otherwise, if can expand, do so.
560 // Otherwise, if using ex regions might help, try with ex given back.
561 HeapWord* G1CollectedHeap::humongousObjAllocate(size_t word_size) {
562 assert(regions_accounted_for(), "Region leakage!");
564 // We can't allocate H regions while cleanupComplete is running, since
565 // some of the regions we find to be empty might not yet be added to the
566 // unclean list. (If we're already at a safepoint, this call is
567 // unnecessary, not to mention wrong.)
568 if (!SafepointSynchronize::is_at_safepoint())
569 wait_for_cleanup_complete();
571 size_t num_regions =
572 round_to(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords;
574 // Special case if < one region???
576 // Remember the ft size.
577 size_t x_size = expansion_regions();
579 HeapWord* res = NULL;
580 bool eliminated_allocated_from_lists = false;
582 // Can the allocation potentially fit in the free regions?
583 if (free_regions() >= num_regions) {
584 res = _hrs->obj_allocate(word_size);
585 }
586 if (res == NULL) {
587 // Try expansion.
588 size_t fs = _hrs->free_suffix();
589 if (fs + x_size >= num_regions) {
590 expand((num_regions - fs) * HeapRegion::GrainBytes);
591 res = _hrs->obj_allocate(word_size);
592 assert(res != NULL, "This should have worked.");
593 } else {
594 // Expansion won't help. Are there enough free regions if we get rid
595 // of reservations?
596 size_t avail = free_regions();
597 if (avail >= num_regions) {
598 res = _hrs->obj_allocate(word_size);
599 if (res != NULL) {
600 remove_allocated_regions_from_lists();
601 eliminated_allocated_from_lists = true;
602 }
603 }
604 }
605 }
606 if (res != NULL) {
607 // Increment by the number of regions allocated.
608 // FIXME: Assumes regions all of size GrainBytes.
609 #ifndef PRODUCT
610 mr_bs()->verify_clean_region(MemRegion(res, res + num_regions *
611 HeapRegion::GrainWords));
612 #endif
613 if (!eliminated_allocated_from_lists)
614 remove_allocated_regions_from_lists();
615 _summary_bytes_used += word_size * HeapWordSize;
616 _free_regions -= num_regions;
617 _num_humongous_regions += (int) num_regions;
618 }
619 assert(regions_accounted_for(), "Region Leakage");
620 return res;
621 }
623 HeapWord*
624 G1CollectedHeap::attempt_allocation_slow(size_t word_size,
625 bool permit_collection_pause) {
626 HeapWord* res = NULL;
627 HeapRegion* allocated_young_region = NULL;
629 assert( SafepointSynchronize::is_at_safepoint() ||
630 Heap_lock->owned_by_self(), "pre condition of the call" );
632 if (isHumongous(word_size)) {
633 // Allocation of a humongous object can, in a sense, complete a
634 // partial region, if the previous alloc was also humongous, and
635 // caused the test below to succeed.
636 if (permit_collection_pause)
637 do_collection_pause_if_appropriate(word_size);
638 res = humongousObjAllocate(word_size);
639 assert(_cur_alloc_region == NULL
640 || !_cur_alloc_region->isHumongous(),
641 "Prevent a regression of this bug.");
643 } else {
644 // We may have concurrent cleanup working at the time. Wait for it
645 // to complete. In the future we would probably want to make the
646 // concurrent cleanup truly concurrent by decoupling it from the
647 // allocation.
648 if (!SafepointSynchronize::is_at_safepoint())
649 wait_for_cleanup_complete();
650 // If we do a collection pause, this will be reset to a non-NULL
651 // value. If we don't, nulling here ensures that we allocate a new
652 // region below.
653 if (_cur_alloc_region != NULL) {
654 // We're finished with the _cur_alloc_region.
655 _summary_bytes_used += _cur_alloc_region->used();
656 _cur_alloc_region = NULL;
657 }
658 assert(_cur_alloc_region == NULL, "Invariant.");
659 // Completion of a heap region is perhaps a good point at which to do
660 // a collection pause.
661 if (permit_collection_pause)
662 do_collection_pause_if_appropriate(word_size);
663 // Make sure we have an allocation region available.
664 if (_cur_alloc_region == NULL) {
665 if (!SafepointSynchronize::is_at_safepoint())
666 wait_for_cleanup_complete();
667 bool next_is_young = should_set_young_locked();
668 // If the next region is not young, make sure it's zero-filled.
669 _cur_alloc_region = newAllocRegion(word_size, !next_is_young);
670 if (_cur_alloc_region != NULL) {
671 _summary_bytes_used -= _cur_alloc_region->used();
672 if (next_is_young) {
673 set_region_short_lived_locked(_cur_alloc_region);
674 allocated_young_region = _cur_alloc_region;
675 }
676 }
677 }
678 assert(_cur_alloc_region == NULL || !_cur_alloc_region->isHumongous(),
679 "Prevent a regression of this bug.");
681 // Now retry the allocation.
682 if (_cur_alloc_region != NULL) {
683 res = _cur_alloc_region->allocate(word_size);
684 }
685 }
687 // NOTE: fails frequently in PRT
688 assert(regions_accounted_for(), "Region leakage!");
690 if (res != NULL) {
691 if (!SafepointSynchronize::is_at_safepoint()) {
692 assert( permit_collection_pause, "invariant" );
693 assert( Heap_lock->owned_by_self(), "invariant" );
694 Heap_lock->unlock();
695 }
697 if (allocated_young_region != NULL) {
698 HeapRegion* hr = allocated_young_region;
699 HeapWord* bottom = hr->bottom();
700 HeapWord* end = hr->end();
701 MemRegion mr(bottom, end);
702 ((CardTableModRefBS*)_g1h->barrier_set())->dirty(mr);
703 }
704 }
706 assert( SafepointSynchronize::is_at_safepoint() ||
707 (res == NULL && Heap_lock->owned_by_self()) ||
708 (res != NULL && !Heap_lock->owned_by_self()),
709 "post condition of the call" );
711 return res;
712 }
714 HeapWord*
715 G1CollectedHeap::mem_allocate(size_t word_size,
716 bool is_noref,
717 bool is_tlab,
718 bool* gc_overhead_limit_was_exceeded) {
719 debug_only(check_for_valid_allocation_state());
720 assert(no_gc_in_progress(), "Allocation during gc not allowed");
721 HeapWord* result = NULL;
723 // Loop until the allocation is satisified,
724 // or unsatisfied after GC.
725 for (int try_count = 1; /* return or throw */; try_count += 1) {
726 int gc_count_before;
727 {
728 Heap_lock->lock();
729 result = attempt_allocation(word_size);
730 if (result != NULL) {
731 // attempt_allocation should have unlocked the heap lock
732 assert(is_in(result), "result not in heap");
733 return result;
734 }
735 // Read the gc count while the heap lock is held.
736 gc_count_before = SharedHeap::heap()->total_collections();
737 Heap_lock->unlock();
738 }
740 // Create the garbage collection operation...
741 VM_G1CollectForAllocation op(word_size,
742 gc_count_before);
744 // ...and get the VM thread to execute it.
745 VMThread::execute(&op);
746 if (op.prologue_succeeded()) {
747 result = op.result();
748 assert(result == NULL || is_in(result), "result not in heap");
749 return result;
750 }
752 // Give a warning if we seem to be looping forever.
753 if ((QueuedAllocationWarningCount > 0) &&
754 (try_count % QueuedAllocationWarningCount == 0)) {
755 warning("G1CollectedHeap::mem_allocate_work retries %d times",
756 try_count);
757 }
758 }
759 }
761 void G1CollectedHeap::abandon_cur_alloc_region() {
762 if (_cur_alloc_region != NULL) {
763 // We're finished with the _cur_alloc_region.
764 if (_cur_alloc_region->is_empty()) {
765 _free_regions++;
766 free_region(_cur_alloc_region);
767 } else {
768 _summary_bytes_used += _cur_alloc_region->used();
769 }
770 _cur_alloc_region = NULL;
771 }
772 }
774 void G1CollectedHeap::abandon_gc_alloc_regions() {
775 // first, make sure that the GC alloc region list is empty (it should!)
776 assert(_gc_alloc_region_list == NULL, "invariant");
777 release_gc_alloc_regions(true /* totally */);
778 }
780 class PostMCRemSetClearClosure: public HeapRegionClosure {
781 ModRefBarrierSet* _mr_bs;
782 public:
783 PostMCRemSetClearClosure(ModRefBarrierSet* mr_bs) : _mr_bs(mr_bs) {}
784 bool doHeapRegion(HeapRegion* r) {
785 r->reset_gc_time_stamp();
786 if (r->continuesHumongous())
787 return false;
788 HeapRegionRemSet* hrrs = r->rem_set();
789 if (hrrs != NULL) hrrs->clear();
790 // You might think here that we could clear just the cards
791 // corresponding to the used region. But no: if we leave a dirty card
792 // in a region we might allocate into, then it would prevent that card
793 // from being enqueued, and cause it to be missed.
794 // Re: the performance cost: we shouldn't be doing full GC anyway!
795 _mr_bs->clear(MemRegion(r->bottom(), r->end()));
796 return false;
797 }
798 };
801 class PostMCRemSetInvalidateClosure: public HeapRegionClosure {
802 ModRefBarrierSet* _mr_bs;
803 public:
804 PostMCRemSetInvalidateClosure(ModRefBarrierSet* mr_bs) : _mr_bs(mr_bs) {}
805 bool doHeapRegion(HeapRegion* r) {
806 if (r->continuesHumongous()) return false;
807 if (r->used_region().word_size() != 0) {
808 _mr_bs->invalidate(r->used_region(), true /*whole heap*/);
809 }
810 return false;
811 }
812 };
814 class RebuildRSOutOfRegionClosure: public HeapRegionClosure {
815 G1CollectedHeap* _g1h;
816 UpdateRSOopClosure _cl;
817 int _worker_i;
818 public:
819 RebuildRSOutOfRegionClosure(G1CollectedHeap* g1, int worker_i = 0) :
820 _cl(g1->g1_rem_set()->as_HRInto_G1RemSet(), worker_i),
821 _worker_i(worker_i),
822 _g1h(g1)
823 { }
824 bool doHeapRegion(HeapRegion* r) {
825 if (!r->continuesHumongous()) {
826 _cl.set_from(r);
827 r->oop_iterate(&_cl);
828 }
829 return false;
830 }
831 };
833 class ParRebuildRSTask: public AbstractGangTask {
834 G1CollectedHeap* _g1;
835 public:
836 ParRebuildRSTask(G1CollectedHeap* g1)
837 : AbstractGangTask("ParRebuildRSTask"),
838 _g1(g1)
839 { }
841 void work(int i) {
842 RebuildRSOutOfRegionClosure rebuild_rs(_g1, i);
843 _g1->heap_region_par_iterate_chunked(&rebuild_rs, i,
844 HeapRegion::RebuildRSClaimValue);
845 }
846 };
848 void G1CollectedHeap::do_collection(bool full, bool clear_all_soft_refs,
849 size_t word_size) {
850 ResourceMark rm;
852 if (full && DisableExplicitGC) {
853 gclog_or_tty->print("\n\n\nDisabling Explicit GC\n\n\n");
854 return;
855 }
857 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
858 assert(Thread::current() == VMThread::vm_thread(), "should be in vm thread");
860 if (GC_locker::is_active()) {
861 return; // GC is disabled (e.g. JNI GetXXXCritical operation)
862 }
864 {
865 IsGCActiveMark x;
867 // Timing
868 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
869 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
870 TraceTime t(full ? "Full GC (System.gc())" : "Full GC", PrintGC, true, gclog_or_tty);
872 double start = os::elapsedTime();
873 GCOverheadReporter::recordSTWStart(start);
874 g1_policy()->record_full_collection_start();
876 gc_prologue(true);
877 increment_total_collections();
879 size_t g1h_prev_used = used();
880 assert(used() == recalculate_used(), "Should be equal");
882 if (VerifyBeforeGC && total_collections() >= VerifyGCStartAt) {
883 HandleMark hm; // Discard invalid handles created during verification
884 prepare_for_verify();
885 gclog_or_tty->print(" VerifyBeforeGC:");
886 Universe::verify(true);
887 }
888 assert(regions_accounted_for(), "Region leakage!");
890 COMPILER2_PRESENT(DerivedPointerTable::clear());
892 // We want to discover references, but not process them yet.
893 // This mode is disabled in
894 // instanceRefKlass::process_discovered_references if the
895 // generation does some collection work, or
896 // instanceRefKlass::enqueue_discovered_references if the
897 // generation returns without doing any work.
898 ref_processor()->disable_discovery();
899 ref_processor()->abandon_partial_discovery();
900 ref_processor()->verify_no_references_recorded();
902 // Abandon current iterations of concurrent marking and concurrent
903 // refinement, if any are in progress.
904 concurrent_mark()->abort();
906 // Make sure we'll choose a new allocation region afterwards.
907 abandon_cur_alloc_region();
908 abandon_gc_alloc_regions();
909 assert(_cur_alloc_region == NULL, "Invariant.");
910 g1_rem_set()->as_HRInto_G1RemSet()->cleanupHRRS();
911 tear_down_region_lists();
912 set_used_regions_to_need_zero_fill();
913 if (g1_policy()->in_young_gc_mode()) {
914 empty_young_list();
915 g1_policy()->set_full_young_gcs(true);
916 }
918 // Temporarily make reference _discovery_ single threaded (non-MT).
919 ReferenceProcessorMTMutator rp_disc_ser(ref_processor(), false);
921 // Temporarily make refs discovery atomic
922 ReferenceProcessorAtomicMutator rp_disc_atomic(ref_processor(), true);
924 // Temporarily clear _is_alive_non_header
925 ReferenceProcessorIsAliveMutator rp_is_alive_null(ref_processor(), NULL);
927 ref_processor()->enable_discovery();
928 ref_processor()->setup_policy(clear_all_soft_refs);
930 // Do collection work
931 {
932 HandleMark hm; // Discard invalid handles created during gc
933 G1MarkSweep::invoke_at_safepoint(ref_processor(), clear_all_soft_refs);
934 }
935 // Because freeing humongous regions may have added some unclean
936 // regions, it is necessary to tear down again before rebuilding.
937 tear_down_region_lists();
938 rebuild_region_lists();
940 _summary_bytes_used = recalculate_used();
942 ref_processor()->enqueue_discovered_references();
944 COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
946 if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) {
947 HandleMark hm; // Discard invalid handles created during verification
948 gclog_or_tty->print(" VerifyAfterGC:");
949 prepare_for_verify();
950 Universe::verify(false);
951 }
952 NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
954 reset_gc_time_stamp();
955 // Since everything potentially moved, we will clear all remembered
956 // sets, and clear all cards. Later we will rebuild remebered
957 // sets. We will also reset the GC time stamps of the regions.
958 PostMCRemSetClearClosure rs_clear(mr_bs());
959 heap_region_iterate(&rs_clear);
961 // Resize the heap if necessary.
962 resize_if_necessary_after_full_collection(full ? 0 : word_size);
964 if (_cg1r->use_cache()) {
965 _cg1r->clear_and_record_card_counts();
966 _cg1r->clear_hot_cache();
967 }
969 // Rebuild remembered sets of all regions.
970 if (ParallelGCThreads > 0) {
971 ParRebuildRSTask rebuild_rs_task(this);
972 assert(check_heap_region_claim_values(
973 HeapRegion::InitialClaimValue), "sanity check");
974 set_par_threads(workers()->total_workers());
975 workers()->run_task(&rebuild_rs_task);
976 set_par_threads(0);
977 assert(check_heap_region_claim_values(
978 HeapRegion::RebuildRSClaimValue), "sanity check");
979 reset_heap_region_claim_values();
980 } else {
981 RebuildRSOutOfRegionClosure rebuild_rs(this);
982 heap_region_iterate(&rebuild_rs);
983 }
985 if (PrintGC) {
986 print_size_transition(gclog_or_tty, g1h_prev_used, used(), capacity());
987 }
989 if (true) { // FIXME
990 // Ask the permanent generation to adjust size for full collections
991 perm()->compute_new_size();
992 }
994 double end = os::elapsedTime();
995 GCOverheadReporter::recordSTWEnd(end);
996 g1_policy()->record_full_collection_end();
998 #ifdef TRACESPINNING
999 ParallelTaskTerminator::print_termination_counts();
1000 #endif
1002 gc_epilogue(true);
1004 // Discard all rset updates
1005 JavaThread::dirty_card_queue_set().abandon_logs();
1006 assert(!G1DeferredRSUpdate
1007 || (G1DeferredRSUpdate && (dirty_card_queue_set().completed_buffers_num() == 0)), "Should not be any");
1008 assert(regions_accounted_for(), "Region leakage!");
1009 }
1011 if (g1_policy()->in_young_gc_mode()) {
1012 _young_list->reset_sampled_info();
1013 assert( check_young_list_empty(false, false),
1014 "young list should be empty at this point");
1015 }
1016 }
1018 void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) {
1019 do_collection(true, clear_all_soft_refs, 0);
1020 }
1022 // This code is mostly copied from TenuredGeneration.
1023 void
1024 G1CollectedHeap::
1025 resize_if_necessary_after_full_collection(size_t word_size) {
1026 assert(MinHeapFreeRatio <= MaxHeapFreeRatio, "sanity check");
1028 // Include the current allocation, if any, and bytes that will be
1029 // pre-allocated to support collections, as "used".
1030 const size_t used_after_gc = used();
1031 const size_t capacity_after_gc = capacity();
1032 const size_t free_after_gc = capacity_after_gc - used_after_gc;
1034 // We don't have floating point command-line arguments
1035 const double minimum_free_percentage = (double) MinHeapFreeRatio / 100;
1036 const double maximum_used_percentage = 1.0 - minimum_free_percentage;
1037 const double maximum_free_percentage = (double) MaxHeapFreeRatio / 100;
1038 const double minimum_used_percentage = 1.0 - maximum_free_percentage;
1040 size_t minimum_desired_capacity = (size_t) (used_after_gc / maximum_used_percentage);
1041 size_t maximum_desired_capacity = (size_t) (used_after_gc / minimum_used_percentage);
1043 // Don't shrink less than the initial size.
1044 minimum_desired_capacity =
1045 MAX2(minimum_desired_capacity,
1046 collector_policy()->initial_heap_byte_size());
1047 maximum_desired_capacity =
1048 MAX2(maximum_desired_capacity,
1049 collector_policy()->initial_heap_byte_size());
1051 // We are failing here because minimum_desired_capacity is
1052 assert(used_after_gc <= minimum_desired_capacity, "sanity check");
1053 assert(minimum_desired_capacity <= maximum_desired_capacity, "sanity check");
1055 if (PrintGC && Verbose) {
1056 const double free_percentage = ((double)free_after_gc) / capacity();
1057 gclog_or_tty->print_cr("Computing new size after full GC ");
1058 gclog_or_tty->print_cr(" "
1059 " minimum_free_percentage: %6.2f",
1060 minimum_free_percentage);
1061 gclog_or_tty->print_cr(" "
1062 " maximum_free_percentage: %6.2f",
1063 maximum_free_percentage);
1064 gclog_or_tty->print_cr(" "
1065 " capacity: %6.1fK"
1066 " minimum_desired_capacity: %6.1fK"
1067 " maximum_desired_capacity: %6.1fK",
1068 capacity() / (double) K,
1069 minimum_desired_capacity / (double) K,
1070 maximum_desired_capacity / (double) K);
1071 gclog_or_tty->print_cr(" "
1072 " free_after_gc : %6.1fK"
1073 " used_after_gc : %6.1fK",
1074 free_after_gc / (double) K,
1075 used_after_gc / (double) K);
1076 gclog_or_tty->print_cr(" "
1077 " free_percentage: %6.2f",
1078 free_percentage);
1079 }
1080 if (capacity() < minimum_desired_capacity) {
1081 // Don't expand unless it's significant
1082 size_t expand_bytes = minimum_desired_capacity - capacity_after_gc;
1083 expand(expand_bytes);
1084 if (PrintGC && Verbose) {
1085 gclog_or_tty->print_cr(" expanding:"
1086 " minimum_desired_capacity: %6.1fK"
1087 " expand_bytes: %6.1fK",
1088 minimum_desired_capacity / (double) K,
1089 expand_bytes / (double) K);
1090 }
1092 // No expansion, now see if we want to shrink
1093 } else if (capacity() > maximum_desired_capacity) {
1094 // Capacity too large, compute shrinking size
1095 size_t shrink_bytes = capacity_after_gc - maximum_desired_capacity;
1096 shrink(shrink_bytes);
1097 if (PrintGC && Verbose) {
1098 gclog_or_tty->print_cr(" "
1099 " shrinking:"
1100 " initSize: %.1fK"
1101 " maximum_desired_capacity: %.1fK",
1102 collector_policy()->initial_heap_byte_size() / (double) K,
1103 maximum_desired_capacity / (double) K);
1104 gclog_or_tty->print_cr(" "
1105 " shrink_bytes: %.1fK",
1106 shrink_bytes / (double) K);
1107 }
1108 }
1109 }
1112 HeapWord*
1113 G1CollectedHeap::satisfy_failed_allocation(size_t word_size) {
1114 HeapWord* result = NULL;
1116 // In a G1 heap, we're supposed to keep allocation from failing by
1117 // incremental pauses. Therefore, at least for now, we'll favor
1118 // expansion over collection. (This might change in the future if we can
1119 // do something smarter than full collection to satisfy a failed alloc.)
1121 result = expand_and_allocate(word_size);
1122 if (result != NULL) {
1123 assert(is_in(result), "result not in heap");
1124 return result;
1125 }
1127 // OK, I guess we have to try collection.
1129 do_collection(false, false, word_size);
1131 result = attempt_allocation(word_size, /*permit_collection_pause*/false);
1133 if (result != NULL) {
1134 assert(is_in(result), "result not in heap");
1135 return result;
1136 }
1138 // Try collecting soft references.
1139 do_collection(false, true, word_size);
1140 result = attempt_allocation(word_size, /*permit_collection_pause*/false);
1141 if (result != NULL) {
1142 assert(is_in(result), "result not in heap");
1143 return result;
1144 }
1146 // What else? We might try synchronous finalization later. If the total
1147 // space available is large enough for the allocation, then a more
1148 // complete compaction phase than we've tried so far might be
1149 // appropriate.
1150 return NULL;
1151 }
1153 // Attempting to expand the heap sufficiently
1154 // to support an allocation of the given "word_size". If
1155 // successful, perform the allocation and return the address of the
1156 // allocated block, or else "NULL".
1158 HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size) {
1159 size_t expand_bytes = word_size * HeapWordSize;
1160 if (expand_bytes < MinHeapDeltaBytes) {
1161 expand_bytes = MinHeapDeltaBytes;
1162 }
1163 expand(expand_bytes);
1164 assert(regions_accounted_for(), "Region leakage!");
1165 HeapWord* result = attempt_allocation(word_size, false /* permit_collection_pause */);
1166 return result;
1167 }
1169 size_t G1CollectedHeap::free_region_if_totally_empty(HeapRegion* hr) {
1170 size_t pre_used = 0;
1171 size_t cleared_h_regions = 0;
1172 size_t freed_regions = 0;
1173 UncleanRegionList local_list;
1174 free_region_if_totally_empty_work(hr, pre_used, cleared_h_regions,
1175 freed_regions, &local_list);
1177 finish_free_region_work(pre_used, cleared_h_regions, freed_regions,
1178 &local_list);
1179 return pre_used;
1180 }
1182 void
1183 G1CollectedHeap::free_region_if_totally_empty_work(HeapRegion* hr,
1184 size_t& pre_used,
1185 size_t& cleared_h,
1186 size_t& freed_regions,
1187 UncleanRegionList* list,
1188 bool par) {
1189 assert(!hr->continuesHumongous(), "should have filtered these out");
1190 size_t res = 0;
1191 if (hr->used() > 0 && hr->garbage_bytes() == hr->used() &&
1192 !hr->is_young()) {
1193 if (G1PolicyVerbose > 0)
1194 gclog_or_tty->print_cr("Freeing empty region "PTR_FORMAT "(" SIZE_FORMAT " bytes)"
1195 " during cleanup", hr, hr->used());
1196 free_region_work(hr, pre_used, cleared_h, freed_regions, list, par);
1197 }
1198 }
1200 // FIXME: both this and shrink could probably be more efficient by
1201 // doing one "VirtualSpace::expand_by" call rather than several.
1202 void G1CollectedHeap::expand(size_t expand_bytes) {
1203 size_t old_mem_size = _g1_storage.committed_size();
1204 // We expand by a minimum of 1K.
1205 expand_bytes = MAX2(expand_bytes, (size_t)K);
1206 size_t aligned_expand_bytes =
1207 ReservedSpace::page_align_size_up(expand_bytes);
1208 aligned_expand_bytes = align_size_up(aligned_expand_bytes,
1209 HeapRegion::GrainBytes);
1210 expand_bytes = aligned_expand_bytes;
1211 while (expand_bytes > 0) {
1212 HeapWord* base = (HeapWord*)_g1_storage.high();
1213 // Commit more storage.
1214 bool successful = _g1_storage.expand_by(HeapRegion::GrainBytes);
1215 if (!successful) {
1216 expand_bytes = 0;
1217 } else {
1218 expand_bytes -= HeapRegion::GrainBytes;
1219 // Expand the committed region.
1220 HeapWord* high = (HeapWord*) _g1_storage.high();
1221 _g1_committed.set_end(high);
1222 // Create a new HeapRegion.
1223 MemRegion mr(base, high);
1224 bool is_zeroed = !_g1_max_committed.contains(base);
1225 HeapRegion* hr = new HeapRegion(_bot_shared, mr, is_zeroed);
1227 // Now update max_committed if necessary.
1228 _g1_max_committed.set_end(MAX2(_g1_max_committed.end(), high));
1230 // Add it to the HeapRegionSeq.
1231 _hrs->insert(hr);
1232 // Set the zero-fill state, according to whether it's already
1233 // zeroed.
1234 {
1235 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
1236 if (is_zeroed) {
1237 hr->set_zero_fill_complete();
1238 put_free_region_on_list_locked(hr);
1239 } else {
1240 hr->set_zero_fill_needed();
1241 put_region_on_unclean_list_locked(hr);
1242 }
1243 }
1244 _free_regions++;
1245 // And we used up an expansion region to create it.
1246 _expansion_regions--;
1247 // Tell the cardtable about it.
1248 Universe::heap()->barrier_set()->resize_covered_region(_g1_committed);
1249 // And the offset table as well.
1250 _bot_shared->resize(_g1_committed.word_size());
1251 }
1252 }
1253 if (Verbose && PrintGC) {
1254 size_t new_mem_size = _g1_storage.committed_size();
1255 gclog_or_tty->print_cr("Expanding garbage-first heap from %ldK by %ldK to %ldK",
1256 old_mem_size/K, aligned_expand_bytes/K,
1257 new_mem_size/K);
1258 }
1259 }
1261 void G1CollectedHeap::shrink_helper(size_t shrink_bytes)
1262 {
1263 size_t old_mem_size = _g1_storage.committed_size();
1264 size_t aligned_shrink_bytes =
1265 ReservedSpace::page_align_size_down(shrink_bytes);
1266 aligned_shrink_bytes = align_size_down(aligned_shrink_bytes,
1267 HeapRegion::GrainBytes);
1268 size_t num_regions_deleted = 0;
1269 MemRegion mr = _hrs->shrink_by(aligned_shrink_bytes, num_regions_deleted);
1271 assert(mr.end() == (HeapWord*)_g1_storage.high(), "Bad shrink!");
1272 if (mr.byte_size() > 0)
1273 _g1_storage.shrink_by(mr.byte_size());
1274 assert(mr.start() == (HeapWord*)_g1_storage.high(), "Bad shrink!");
1276 _g1_committed.set_end(mr.start());
1277 _free_regions -= num_regions_deleted;
1278 _expansion_regions += num_regions_deleted;
1280 // Tell the cardtable about it.
1281 Universe::heap()->barrier_set()->resize_covered_region(_g1_committed);
1283 // And the offset table as well.
1284 _bot_shared->resize(_g1_committed.word_size());
1286 HeapRegionRemSet::shrink_heap(n_regions());
1288 if (Verbose && PrintGC) {
1289 size_t new_mem_size = _g1_storage.committed_size();
1290 gclog_or_tty->print_cr("Shrinking garbage-first heap from %ldK by %ldK to %ldK",
1291 old_mem_size/K, aligned_shrink_bytes/K,
1292 new_mem_size/K);
1293 }
1294 }
1296 void G1CollectedHeap::shrink(size_t shrink_bytes) {
1297 release_gc_alloc_regions(true /* totally */);
1298 tear_down_region_lists(); // We will rebuild them in a moment.
1299 shrink_helper(shrink_bytes);
1300 rebuild_region_lists();
1301 }
1303 // Public methods.
1305 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
1306 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
1307 #endif // _MSC_VER
1310 G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
1311 SharedHeap(policy_),
1312 _g1_policy(policy_),
1313 _ref_processor(NULL),
1314 _process_strong_tasks(new SubTasksDone(G1H_PS_NumElements)),
1315 _bot_shared(NULL),
1316 _par_alloc_during_gc_lock(Mutex::leaf, "par alloc during GC lock"),
1317 _objs_with_preserved_marks(NULL), _preserved_marks_of_objs(NULL),
1318 _evac_failure_scan_stack(NULL) ,
1319 _mark_in_progress(false),
1320 _cg1r(NULL), _czft(NULL), _summary_bytes_used(0),
1321 _cur_alloc_region(NULL),
1322 _refine_cte_cl(NULL),
1323 _free_region_list(NULL), _free_region_list_size(0),
1324 _free_regions(0),
1325 _full_collection(false),
1326 _unclean_region_list(),
1327 _unclean_regions_coming(false),
1328 _young_list(new YoungList(this)),
1329 _gc_time_stamp(0),
1330 _surviving_young_words(NULL),
1331 _in_cset_fast_test(NULL),
1332 _in_cset_fast_test_base(NULL) {
1333 _g1h = this; // To catch bugs.
1334 if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) {
1335 vm_exit_during_initialization("Failed necessary allocation.");
1336 }
1337 int n_queues = MAX2((int)ParallelGCThreads, 1);
1338 _task_queues = new RefToScanQueueSet(n_queues);
1340 int n_rem_sets = HeapRegionRemSet::num_par_rem_sets();
1341 assert(n_rem_sets > 0, "Invariant.");
1343 HeapRegionRemSetIterator** iter_arr =
1344 NEW_C_HEAP_ARRAY(HeapRegionRemSetIterator*, n_queues);
1345 for (int i = 0; i < n_queues; i++) {
1346 iter_arr[i] = new HeapRegionRemSetIterator();
1347 }
1348 _rem_set_iterator = iter_arr;
1350 for (int i = 0; i < n_queues; i++) {
1351 RefToScanQueue* q = new RefToScanQueue();
1352 q->initialize();
1353 _task_queues->register_queue(i, q);
1354 }
1356 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
1357 _gc_alloc_regions[ap] = NULL;
1358 _gc_alloc_region_counts[ap] = 0;
1359 _retained_gc_alloc_regions[ap] = NULL;
1360 // by default, we do not retain a GC alloc region for each ap;
1361 // we'll override this, when appropriate, below
1362 _retain_gc_alloc_region[ap] = false;
1363 }
1365 // We will try to remember the last half-full tenured region we
1366 // allocated to at the end of a collection so that we can re-use it
1367 // during the next collection.
1368 _retain_gc_alloc_region[GCAllocForTenured] = true;
1370 guarantee(_task_queues != NULL, "task_queues allocation failure.");
1371 }
1373 jint G1CollectedHeap::initialize() {
1374 os::enable_vtime();
1376 // Necessary to satisfy locking discipline assertions.
1378 MutexLocker x(Heap_lock);
1380 // While there are no constraints in the GC code that HeapWordSize
1381 // be any particular value, there are multiple other areas in the
1382 // system which believe this to be true (e.g. oop->object_size in some
1383 // cases incorrectly returns the size in wordSize units rather than
1384 // HeapWordSize).
1385 guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
1387 size_t init_byte_size = collector_policy()->initial_heap_byte_size();
1388 size_t max_byte_size = collector_policy()->max_heap_byte_size();
1390 // Ensure that the sizes are properly aligned.
1391 Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap");
1392 Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap");
1394 // We allocate this in any case, but only do no work if the command line
1395 // param is off.
1396 _cg1r = new ConcurrentG1Refine();
1398 // Reserve the maximum.
1399 PermanentGenerationSpec* pgs = collector_policy()->permanent_generation();
1400 // Includes the perm-gen.
1402 const size_t total_reserved = max_byte_size + pgs->max_size();
1403 char* addr = Universe::preferred_heap_base(total_reserved, Universe::UnscaledNarrowOop);
1405 ReservedSpace heap_rs(max_byte_size + pgs->max_size(),
1406 HeapRegion::GrainBytes,
1407 false /*ism*/, addr);
1409 if (UseCompressedOops) {
1410 if (addr != NULL && !heap_rs.is_reserved()) {
1411 // Failed to reserve at specified address - the requested memory
1412 // region is taken already, for example, by 'java' launcher.
1413 // Try again to reserver heap higher.
1414 addr = Universe::preferred_heap_base(total_reserved, Universe::ZeroBasedNarrowOop);
1415 ReservedSpace heap_rs0(total_reserved, HeapRegion::GrainBytes,
1416 false /*ism*/, addr);
1417 if (addr != NULL && !heap_rs0.is_reserved()) {
1418 // Failed to reserve at specified address again - give up.
1419 addr = Universe::preferred_heap_base(total_reserved, Universe::HeapBasedNarrowOop);
1420 assert(addr == NULL, "");
1421 ReservedSpace heap_rs1(total_reserved, HeapRegion::GrainBytes,
1422 false /*ism*/, addr);
1423 heap_rs = heap_rs1;
1424 } else {
1425 heap_rs = heap_rs0;
1426 }
1427 }
1428 }
1430 if (!heap_rs.is_reserved()) {
1431 vm_exit_during_initialization("Could not reserve enough space for object heap");
1432 return JNI_ENOMEM;
1433 }
1435 // It is important to do this in a way such that concurrent readers can't
1436 // temporarily think somethings in the heap. (I've actually seen this
1437 // happen in asserts: DLD.)
1438 _reserved.set_word_size(0);
1439 _reserved.set_start((HeapWord*)heap_rs.base());
1440 _reserved.set_end((HeapWord*)(heap_rs.base() + heap_rs.size()));
1442 _expansion_regions = max_byte_size/HeapRegion::GrainBytes;
1444 _num_humongous_regions = 0;
1446 // Create the gen rem set (and barrier set) for the entire reserved region.
1447 _rem_set = collector_policy()->create_rem_set(_reserved, 2);
1448 set_barrier_set(rem_set()->bs());
1449 if (barrier_set()->is_a(BarrierSet::ModRef)) {
1450 _mr_bs = (ModRefBarrierSet*)_barrier_set;
1451 } else {
1452 vm_exit_during_initialization("G1 requires a mod ref bs.");
1453 return JNI_ENOMEM;
1454 }
1456 // Also create a G1 rem set.
1457 if (G1UseHRIntoRS) {
1458 if (mr_bs()->is_a(BarrierSet::CardTableModRef)) {
1459 _g1_rem_set = new HRInto_G1RemSet(this, (CardTableModRefBS*)mr_bs());
1460 } else {
1461 vm_exit_during_initialization("G1 requires a cardtable mod ref bs.");
1462 return JNI_ENOMEM;
1463 }
1464 } else {
1465 _g1_rem_set = new StupidG1RemSet(this);
1466 }
1468 // Carve out the G1 part of the heap.
1470 ReservedSpace g1_rs = heap_rs.first_part(max_byte_size);
1471 _g1_reserved = MemRegion((HeapWord*)g1_rs.base(),
1472 g1_rs.size()/HeapWordSize);
1473 ReservedSpace perm_gen_rs = heap_rs.last_part(max_byte_size);
1475 _perm_gen = pgs->init(perm_gen_rs, pgs->init_size(), rem_set());
1477 _g1_storage.initialize(g1_rs, 0);
1478 _g1_committed = MemRegion((HeapWord*)_g1_storage.low(), (size_t) 0);
1479 _g1_max_committed = _g1_committed;
1480 _hrs = new HeapRegionSeq(_expansion_regions);
1481 guarantee(_hrs != NULL, "Couldn't allocate HeapRegionSeq");
1482 guarantee(_cur_alloc_region == NULL, "from constructor");
1484 _bot_shared = new G1BlockOffsetSharedArray(_reserved,
1485 heap_word_size(init_byte_size));
1487 _g1h = this;
1489 // Create the ConcurrentMark data structure and thread.
1490 // (Must do this late, so that "max_regions" is defined.)
1491 _cm = new ConcurrentMark(heap_rs, (int) max_regions());
1492 _cmThread = _cm->cmThread();
1494 // ...and the concurrent zero-fill thread, if necessary.
1495 if (G1ConcZeroFill) {
1496 _czft = new ConcurrentZFThread();
1497 }
1499 // Initialize the from_card cache structure of HeapRegionRemSet.
1500 HeapRegionRemSet::init_heap(max_regions());
1502 // Now expand into the initial heap size.
1503 expand(init_byte_size);
1505 // Perform any initialization actions delegated to the policy.
1506 g1_policy()->init();
1508 g1_policy()->note_start_of_mark_thread();
1510 _refine_cte_cl =
1511 new RefineCardTableEntryClosure(ConcurrentG1RefineThread::sts(),
1512 g1_rem_set(),
1513 concurrent_g1_refine());
1514 JavaThread::dirty_card_queue_set().set_closure(_refine_cte_cl);
1516 JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon,
1517 SATB_Q_FL_lock,
1518 0,
1519 Shared_SATB_Q_lock);
1521 JavaThread::dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon,
1522 DirtyCardQ_FL_lock,
1523 G1DirtyCardQueueMax,
1524 Shared_DirtyCardQ_lock);
1526 if (G1DeferredRSUpdate) {
1527 dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon,
1528 DirtyCardQ_FL_lock,
1529 0,
1530 Shared_DirtyCardQ_lock,
1531 &JavaThread::dirty_card_queue_set());
1532 }
1533 // In case we're keeping closure specialization stats, initialize those
1534 // counts and that mechanism.
1535 SpecializationStats::clear();
1537 _gc_alloc_region_list = NULL;
1539 // Do later initialization work for concurrent refinement.
1540 _cg1r->init();
1542 const char* group_names[] = { "CR", "ZF", "CM", "CL" };
1543 GCOverheadReporter::initGCOverheadReporter(4, group_names);
1545 return JNI_OK;
1546 }
1548 void G1CollectedHeap::ref_processing_init() {
1549 SharedHeap::ref_processing_init();
1550 MemRegion mr = reserved_region();
1551 _ref_processor = ReferenceProcessor::create_ref_processor(
1552 mr, // span
1553 false, // Reference discovery is not atomic
1554 // (though it shouldn't matter here.)
1555 true, // mt_discovery
1556 NULL, // is alive closure: need to fill this in for efficiency
1557 ParallelGCThreads,
1558 ParallelRefProcEnabled,
1559 true); // Setting next fields of discovered
1560 // lists requires a barrier.
1561 }
1563 size_t G1CollectedHeap::capacity() const {
1564 return _g1_committed.byte_size();
1565 }
1567 void G1CollectedHeap::iterate_dirty_card_closure(bool concurrent,
1568 int worker_i) {
1569 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
1570 int n_completed_buffers = 0;
1571 while (dcqs.apply_closure_to_completed_buffer(worker_i, 0, true)) {
1572 n_completed_buffers++;
1573 }
1574 g1_policy()->record_update_rs_processed_buffers(worker_i,
1575 (double) n_completed_buffers);
1576 dcqs.clear_n_completed_buffers();
1577 // Finish up the queue...
1578 if (worker_i == 0) concurrent_g1_refine()->clean_up_cache(worker_i,
1579 g1_rem_set());
1580 assert(!dcqs.completed_buffers_exist_dirty(), "Completed buffers exist!");
1581 }
1584 // Computes the sum of the storage used by the various regions.
1586 size_t G1CollectedHeap::used() const {
1587 assert(Heap_lock->owner() != NULL,
1588 "Should be owned on this thread's behalf.");
1589 size_t result = _summary_bytes_used;
1590 if (_cur_alloc_region != NULL)
1591 result += _cur_alloc_region->used();
1592 return result;
1593 }
1595 class SumUsedClosure: public HeapRegionClosure {
1596 size_t _used;
1597 public:
1598 SumUsedClosure() : _used(0) {}
1599 bool doHeapRegion(HeapRegion* r) {
1600 if (!r->continuesHumongous()) {
1601 _used += r->used();
1602 }
1603 return false;
1604 }
1605 size_t result() { return _used; }
1606 };
1608 size_t G1CollectedHeap::recalculate_used() const {
1609 SumUsedClosure blk;
1610 _hrs->iterate(&blk);
1611 return blk.result();
1612 }
1614 #ifndef PRODUCT
1615 class SumUsedRegionsClosure: public HeapRegionClosure {
1616 size_t _num;
1617 public:
1618 SumUsedRegionsClosure() : _num(0) {}
1619 bool doHeapRegion(HeapRegion* r) {
1620 if (r->continuesHumongous() || r->used() > 0 || r->is_gc_alloc_region()) {
1621 _num += 1;
1622 }
1623 return false;
1624 }
1625 size_t result() { return _num; }
1626 };
1628 size_t G1CollectedHeap::recalculate_used_regions() const {
1629 SumUsedRegionsClosure blk;
1630 _hrs->iterate(&blk);
1631 return blk.result();
1632 }
1633 #endif // PRODUCT
1635 size_t G1CollectedHeap::unsafe_max_alloc() {
1636 if (_free_regions > 0) return HeapRegion::GrainBytes;
1637 // otherwise, is there space in the current allocation region?
1639 // We need to store the current allocation region in a local variable
1640 // here. The problem is that this method doesn't take any locks and
1641 // there may be other threads which overwrite the current allocation
1642 // region field. attempt_allocation(), for example, sets it to NULL
1643 // and this can happen *after* the NULL check here but before the call
1644 // to free(), resulting in a SIGSEGV. Note that this doesn't appear
1645 // to be a problem in the optimized build, since the two loads of the
1646 // current allocation region field are optimized away.
1647 HeapRegion* car = _cur_alloc_region;
1649 // FIXME: should iterate over all regions?
1650 if (car == NULL) {
1651 return 0;
1652 }
1653 return car->free();
1654 }
1656 void G1CollectedHeap::collect(GCCause::Cause cause) {
1657 // The caller doesn't have the Heap_lock
1658 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
1659 MutexLocker ml(Heap_lock);
1660 collect_locked(cause);
1661 }
1663 void G1CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {
1664 assert(Thread::current()->is_VM_thread(), "Precondition#1");
1665 assert(Heap_lock->is_locked(), "Precondition#2");
1666 GCCauseSetter gcs(this, cause);
1667 switch (cause) {
1668 case GCCause::_heap_inspection:
1669 case GCCause::_heap_dump: {
1670 HandleMark hm;
1671 do_full_collection(false); // don't clear all soft refs
1672 break;
1673 }
1674 default: // XXX FIX ME
1675 ShouldNotReachHere(); // Unexpected use of this function
1676 }
1677 }
1680 void G1CollectedHeap::collect_locked(GCCause::Cause cause) {
1681 // Don't want to do a GC until cleanup is completed.
1682 wait_for_cleanup_complete();
1684 // Read the GC count while holding the Heap_lock
1685 int gc_count_before = SharedHeap::heap()->total_collections();
1686 {
1687 MutexUnlocker mu(Heap_lock); // give up heap lock, execute gets it back
1688 VM_G1CollectFull op(gc_count_before, cause);
1689 VMThread::execute(&op);
1690 }
1691 }
1693 bool G1CollectedHeap::is_in(const void* p) const {
1694 if (_g1_committed.contains(p)) {
1695 HeapRegion* hr = _hrs->addr_to_region(p);
1696 return hr->is_in(p);
1697 } else {
1698 return _perm_gen->as_gen()->is_in(p);
1699 }
1700 }
1702 // Iteration functions.
1704 // Iterates an OopClosure over all ref-containing fields of objects
1705 // within a HeapRegion.
1707 class IterateOopClosureRegionClosure: public HeapRegionClosure {
1708 MemRegion _mr;
1709 OopClosure* _cl;
1710 public:
1711 IterateOopClosureRegionClosure(MemRegion mr, OopClosure* cl)
1712 : _mr(mr), _cl(cl) {}
1713 bool doHeapRegion(HeapRegion* r) {
1714 if (! r->continuesHumongous()) {
1715 r->oop_iterate(_cl);
1716 }
1717 return false;
1718 }
1719 };
1721 void G1CollectedHeap::oop_iterate(OopClosure* cl, bool do_perm) {
1722 IterateOopClosureRegionClosure blk(_g1_committed, cl);
1723 _hrs->iterate(&blk);
1724 if (do_perm) {
1725 perm_gen()->oop_iterate(cl);
1726 }
1727 }
1729 void G1CollectedHeap::oop_iterate(MemRegion mr, OopClosure* cl, bool do_perm) {
1730 IterateOopClosureRegionClosure blk(mr, cl);
1731 _hrs->iterate(&blk);
1732 if (do_perm) {
1733 perm_gen()->oop_iterate(cl);
1734 }
1735 }
1737 // Iterates an ObjectClosure over all objects within a HeapRegion.
1739 class IterateObjectClosureRegionClosure: public HeapRegionClosure {
1740 ObjectClosure* _cl;
1741 public:
1742 IterateObjectClosureRegionClosure(ObjectClosure* cl) : _cl(cl) {}
1743 bool doHeapRegion(HeapRegion* r) {
1744 if (! r->continuesHumongous()) {
1745 r->object_iterate(_cl);
1746 }
1747 return false;
1748 }
1749 };
1751 void G1CollectedHeap::object_iterate(ObjectClosure* cl, bool do_perm) {
1752 IterateObjectClosureRegionClosure blk(cl);
1753 _hrs->iterate(&blk);
1754 if (do_perm) {
1755 perm_gen()->object_iterate(cl);
1756 }
1757 }
1759 void G1CollectedHeap::object_iterate_since_last_GC(ObjectClosure* cl) {
1760 // FIXME: is this right?
1761 guarantee(false, "object_iterate_since_last_GC not supported by G1 heap");
1762 }
1764 // Calls a SpaceClosure on a HeapRegion.
1766 class SpaceClosureRegionClosure: public HeapRegionClosure {
1767 SpaceClosure* _cl;
1768 public:
1769 SpaceClosureRegionClosure(SpaceClosure* cl) : _cl(cl) {}
1770 bool doHeapRegion(HeapRegion* r) {
1771 _cl->do_space(r);
1772 return false;
1773 }
1774 };
1776 void G1CollectedHeap::space_iterate(SpaceClosure* cl) {
1777 SpaceClosureRegionClosure blk(cl);
1778 _hrs->iterate(&blk);
1779 }
1781 void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) {
1782 _hrs->iterate(cl);
1783 }
1785 void G1CollectedHeap::heap_region_iterate_from(HeapRegion* r,
1786 HeapRegionClosure* cl) {
1787 _hrs->iterate_from(r, cl);
1788 }
1790 void
1791 G1CollectedHeap::heap_region_iterate_from(int idx, HeapRegionClosure* cl) {
1792 _hrs->iterate_from(idx, cl);
1793 }
1795 HeapRegion* G1CollectedHeap::region_at(size_t idx) { return _hrs->at(idx); }
1797 void
1798 G1CollectedHeap::heap_region_par_iterate_chunked(HeapRegionClosure* cl,
1799 int worker,
1800 jint claim_value) {
1801 const size_t regions = n_regions();
1802 const size_t worker_num = (ParallelGCThreads > 0 ? ParallelGCThreads : 1);
1803 // try to spread out the starting points of the workers
1804 const size_t start_index = regions / worker_num * (size_t) worker;
1806 // each worker will actually look at all regions
1807 for (size_t count = 0; count < regions; ++count) {
1808 const size_t index = (start_index + count) % regions;
1809 assert(0 <= index && index < regions, "sanity");
1810 HeapRegion* r = region_at(index);
1811 // we'll ignore "continues humongous" regions (we'll process them
1812 // when we come across their corresponding "start humongous"
1813 // region) and regions already claimed
1814 if (r->claim_value() == claim_value || r->continuesHumongous()) {
1815 continue;
1816 }
1817 // OK, try to claim it
1818 if (r->claimHeapRegion(claim_value)) {
1819 // success!
1820 assert(!r->continuesHumongous(), "sanity");
1821 if (r->startsHumongous()) {
1822 // If the region is "starts humongous" we'll iterate over its
1823 // "continues humongous" first; in fact we'll do them
1824 // first. The order is important. In on case, calling the
1825 // closure on the "starts humongous" region might de-allocate
1826 // and clear all its "continues humongous" regions and, as a
1827 // result, we might end up processing them twice. So, we'll do
1828 // them first (notice: most closures will ignore them anyway) and
1829 // then we'll do the "starts humongous" region.
1830 for (size_t ch_index = index + 1; ch_index < regions; ++ch_index) {
1831 HeapRegion* chr = region_at(ch_index);
1833 // if the region has already been claimed or it's not
1834 // "continues humongous" we're done
1835 if (chr->claim_value() == claim_value ||
1836 !chr->continuesHumongous()) {
1837 break;
1838 }
1840 // Noone should have claimed it directly. We can given
1841 // that we claimed its "starts humongous" region.
1842 assert(chr->claim_value() != claim_value, "sanity");
1843 assert(chr->humongous_start_region() == r, "sanity");
1845 if (chr->claimHeapRegion(claim_value)) {
1846 // we should always be able to claim it; noone else should
1847 // be trying to claim this region
1849 bool res2 = cl->doHeapRegion(chr);
1850 assert(!res2, "Should not abort");
1852 // Right now, this holds (i.e., no closure that actually
1853 // does something with "continues humongous" regions
1854 // clears them). We might have to weaken it in the future,
1855 // but let's leave these two asserts here for extra safety.
1856 assert(chr->continuesHumongous(), "should still be the case");
1857 assert(chr->humongous_start_region() == r, "sanity");
1858 } else {
1859 guarantee(false, "we should not reach here");
1860 }
1861 }
1862 }
1864 assert(!r->continuesHumongous(), "sanity");
1865 bool res = cl->doHeapRegion(r);
1866 assert(!res, "Should not abort");
1867 }
1868 }
1869 }
1871 class ResetClaimValuesClosure: public HeapRegionClosure {
1872 public:
1873 bool doHeapRegion(HeapRegion* r) {
1874 r->set_claim_value(HeapRegion::InitialClaimValue);
1875 return false;
1876 }
1877 };
1879 void
1880 G1CollectedHeap::reset_heap_region_claim_values() {
1881 ResetClaimValuesClosure blk;
1882 heap_region_iterate(&blk);
1883 }
1885 #ifdef ASSERT
1886 // This checks whether all regions in the heap have the correct claim
1887 // value. I also piggy-backed on this a check to ensure that the
1888 // humongous_start_region() information on "continues humongous"
1889 // regions is correct.
1891 class CheckClaimValuesClosure : public HeapRegionClosure {
1892 private:
1893 jint _claim_value;
1894 size_t _failures;
1895 HeapRegion* _sh_region;
1896 public:
1897 CheckClaimValuesClosure(jint claim_value) :
1898 _claim_value(claim_value), _failures(0), _sh_region(NULL) { }
1899 bool doHeapRegion(HeapRegion* r) {
1900 if (r->claim_value() != _claim_value) {
1901 gclog_or_tty->print_cr("Region ["PTR_FORMAT","PTR_FORMAT"), "
1902 "claim value = %d, should be %d",
1903 r->bottom(), r->end(), r->claim_value(),
1904 _claim_value);
1905 ++_failures;
1906 }
1907 if (!r->isHumongous()) {
1908 _sh_region = NULL;
1909 } else if (r->startsHumongous()) {
1910 _sh_region = r;
1911 } else if (r->continuesHumongous()) {
1912 if (r->humongous_start_region() != _sh_region) {
1913 gclog_or_tty->print_cr("Region ["PTR_FORMAT","PTR_FORMAT"), "
1914 "HS = "PTR_FORMAT", should be "PTR_FORMAT,
1915 r->bottom(), r->end(),
1916 r->humongous_start_region(),
1917 _sh_region);
1918 ++_failures;
1919 }
1920 }
1921 return false;
1922 }
1923 size_t failures() {
1924 return _failures;
1925 }
1926 };
1928 bool G1CollectedHeap::check_heap_region_claim_values(jint claim_value) {
1929 CheckClaimValuesClosure cl(claim_value);
1930 heap_region_iterate(&cl);
1931 return cl.failures() == 0;
1932 }
1933 #endif // ASSERT
1935 void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) {
1936 HeapRegion* r = g1_policy()->collection_set();
1937 while (r != NULL) {
1938 HeapRegion* next = r->next_in_collection_set();
1939 if (cl->doHeapRegion(r)) {
1940 cl->incomplete();
1941 return;
1942 }
1943 r = next;
1944 }
1945 }
1947 void G1CollectedHeap::collection_set_iterate_from(HeapRegion* r,
1948 HeapRegionClosure *cl) {
1949 assert(r->in_collection_set(),
1950 "Start region must be a member of the collection set.");
1951 HeapRegion* cur = r;
1952 while (cur != NULL) {
1953 HeapRegion* next = cur->next_in_collection_set();
1954 if (cl->doHeapRegion(cur) && false) {
1955 cl->incomplete();
1956 return;
1957 }
1958 cur = next;
1959 }
1960 cur = g1_policy()->collection_set();
1961 while (cur != r) {
1962 HeapRegion* next = cur->next_in_collection_set();
1963 if (cl->doHeapRegion(cur) && false) {
1964 cl->incomplete();
1965 return;
1966 }
1967 cur = next;
1968 }
1969 }
1971 CompactibleSpace* G1CollectedHeap::first_compactible_space() {
1972 return _hrs->length() > 0 ? _hrs->at(0) : NULL;
1973 }
1976 Space* G1CollectedHeap::space_containing(const void* addr) const {
1977 Space* res = heap_region_containing(addr);
1978 if (res == NULL)
1979 res = perm_gen()->space_containing(addr);
1980 return res;
1981 }
1983 HeapWord* G1CollectedHeap::block_start(const void* addr) const {
1984 Space* sp = space_containing(addr);
1985 if (sp != NULL) {
1986 return sp->block_start(addr);
1987 }
1988 return NULL;
1989 }
1991 size_t G1CollectedHeap::block_size(const HeapWord* addr) const {
1992 Space* sp = space_containing(addr);
1993 assert(sp != NULL, "block_size of address outside of heap");
1994 return sp->block_size(addr);
1995 }
1997 bool G1CollectedHeap::block_is_obj(const HeapWord* addr) const {
1998 Space* sp = space_containing(addr);
1999 return sp->block_is_obj(addr);
2000 }
2002 bool G1CollectedHeap::supports_tlab_allocation() const {
2003 return true;
2004 }
2006 size_t G1CollectedHeap::tlab_capacity(Thread* ignored) const {
2007 return HeapRegion::GrainBytes;
2008 }
2010 size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
2011 // Return the remaining space in the cur alloc region, but not less than
2012 // the min TLAB size.
2013 // Also, no more than half the region size, since we can't allow tlabs to
2014 // grow big enough to accomodate humongous objects.
2016 // We need to story it locally, since it might change between when we
2017 // test for NULL and when we use it later.
2018 ContiguousSpace* cur_alloc_space = _cur_alloc_region;
2019 if (cur_alloc_space == NULL) {
2020 return HeapRegion::GrainBytes/2;
2021 } else {
2022 return MAX2(MIN2(cur_alloc_space->free(),
2023 (size_t)(HeapRegion::GrainBytes/2)),
2024 (size_t)MinTLABSize);
2025 }
2026 }
2028 HeapWord* G1CollectedHeap::allocate_new_tlab(size_t size) {
2029 bool dummy;
2030 return G1CollectedHeap::mem_allocate(size, false, true, &dummy);
2031 }
2033 bool G1CollectedHeap::allocs_are_zero_filled() {
2034 return false;
2035 }
2037 size_t G1CollectedHeap::large_typearray_limit() {
2038 // FIXME
2039 return HeapRegion::GrainBytes/HeapWordSize;
2040 }
2042 size_t G1CollectedHeap::max_capacity() const {
2043 return _g1_committed.byte_size();
2044 }
2046 jlong G1CollectedHeap::millis_since_last_gc() {
2047 // assert(false, "NYI");
2048 return 0;
2049 }
2052 void G1CollectedHeap::prepare_for_verify() {
2053 if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
2054 ensure_parsability(false);
2055 }
2056 g1_rem_set()->prepare_for_verify();
2057 }
2059 class VerifyLivenessOopClosure: public OopClosure {
2060 G1CollectedHeap* g1h;
2061 public:
2062 VerifyLivenessOopClosure(G1CollectedHeap* _g1h) {
2063 g1h = _g1h;
2064 }
2065 void do_oop(narrowOop *p) {
2066 guarantee(false, "NYI");
2067 }
2068 void do_oop(oop *p) {
2069 oop obj = *p;
2070 assert(obj == NULL || !g1h->is_obj_dead(obj),
2071 "Dead object referenced by a not dead object");
2072 }
2073 };
2075 class VerifyObjsInRegionClosure: public ObjectClosure {
2076 G1CollectedHeap* _g1h;
2077 size_t _live_bytes;
2078 HeapRegion *_hr;
2079 public:
2080 VerifyObjsInRegionClosure(HeapRegion *hr) : _live_bytes(0), _hr(hr) {
2081 _g1h = G1CollectedHeap::heap();
2082 }
2083 void do_object(oop o) {
2084 VerifyLivenessOopClosure isLive(_g1h);
2085 assert(o != NULL, "Huh?");
2086 if (!_g1h->is_obj_dead(o)) {
2087 o->oop_iterate(&isLive);
2088 if (!_hr->obj_allocated_since_prev_marking(o))
2089 _live_bytes += (o->size() * HeapWordSize);
2090 }
2091 }
2092 size_t live_bytes() { return _live_bytes; }
2093 };
2095 class PrintObjsInRegionClosure : public ObjectClosure {
2096 HeapRegion *_hr;
2097 G1CollectedHeap *_g1;
2098 public:
2099 PrintObjsInRegionClosure(HeapRegion *hr) : _hr(hr) {
2100 _g1 = G1CollectedHeap::heap();
2101 };
2103 void do_object(oop o) {
2104 if (o != NULL) {
2105 HeapWord *start = (HeapWord *) o;
2106 size_t word_sz = o->size();
2107 gclog_or_tty->print("\nPrinting obj "PTR_FORMAT" of size " SIZE_FORMAT
2108 " isMarkedPrev %d isMarkedNext %d isAllocSince %d\n",
2109 (void*) o, word_sz,
2110 _g1->isMarkedPrev(o),
2111 _g1->isMarkedNext(o),
2112 _hr->obj_allocated_since_prev_marking(o));
2113 HeapWord *end = start + word_sz;
2114 HeapWord *cur;
2115 int *val;
2116 for (cur = start; cur < end; cur++) {
2117 val = (int *) cur;
2118 gclog_or_tty->print("\t "PTR_FORMAT":"PTR_FORMAT"\n", val, *val);
2119 }
2120 }
2121 }
2122 };
2124 class VerifyRegionClosure: public HeapRegionClosure {
2125 public:
2126 bool _allow_dirty;
2127 bool _par;
2128 VerifyRegionClosure(bool allow_dirty, bool par = false)
2129 : _allow_dirty(allow_dirty), _par(par) {}
2130 bool doHeapRegion(HeapRegion* r) {
2131 guarantee(_par || r->claim_value() == HeapRegion::InitialClaimValue,
2132 "Should be unclaimed at verify points.");
2133 if (!r->continuesHumongous()) {
2134 VerifyObjsInRegionClosure not_dead_yet_cl(r);
2135 r->verify(_allow_dirty);
2136 r->object_iterate(¬_dead_yet_cl);
2137 guarantee(r->max_live_bytes() >= not_dead_yet_cl.live_bytes(),
2138 "More live objects than counted in last complete marking.");
2139 }
2140 return false;
2141 }
2142 };
2144 class VerifyRootsClosure: public OopsInGenClosure {
2145 private:
2146 G1CollectedHeap* _g1h;
2147 bool _failures;
2149 public:
2150 VerifyRootsClosure() :
2151 _g1h(G1CollectedHeap::heap()), _failures(false) { }
2153 bool failures() { return _failures; }
2155 void do_oop(narrowOop* p) {
2156 guarantee(false, "NYI");
2157 }
2159 void do_oop(oop* p) {
2160 oop obj = *p;
2161 if (obj != NULL) {
2162 if (_g1h->is_obj_dead(obj)) {
2163 gclog_or_tty->print_cr("Root location "PTR_FORMAT" "
2164 "points to dead obj "PTR_FORMAT, p, (void*) obj);
2165 obj->print_on(gclog_or_tty);
2166 _failures = true;
2167 }
2168 }
2169 }
2170 };
2172 // This is the task used for parallel heap verification.
2174 class G1ParVerifyTask: public AbstractGangTask {
2175 private:
2176 G1CollectedHeap* _g1h;
2177 bool _allow_dirty;
2179 public:
2180 G1ParVerifyTask(G1CollectedHeap* g1h, bool allow_dirty) :
2181 AbstractGangTask("Parallel verify task"),
2182 _g1h(g1h), _allow_dirty(allow_dirty) { }
2184 void work(int worker_i) {
2185 HandleMark hm;
2186 VerifyRegionClosure blk(_allow_dirty, true);
2187 _g1h->heap_region_par_iterate_chunked(&blk, worker_i,
2188 HeapRegion::ParVerifyClaimValue);
2189 }
2190 };
2192 void G1CollectedHeap::verify(bool allow_dirty, bool silent) {
2193 if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
2194 if (!silent) { gclog_or_tty->print("roots "); }
2195 VerifyRootsClosure rootsCl;
2196 process_strong_roots(false,
2197 SharedHeap::SO_AllClasses,
2198 &rootsCl,
2199 &rootsCl);
2200 rem_set()->invalidate(perm_gen()->used_region(), false);
2201 if (!silent) { gclog_or_tty->print("heapRegions "); }
2202 if (GCParallelVerificationEnabled && ParallelGCThreads > 1) {
2203 assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue),
2204 "sanity check");
2206 G1ParVerifyTask task(this, allow_dirty);
2207 int n_workers = workers()->total_workers();
2208 set_par_threads(n_workers);
2209 workers()->run_task(&task);
2210 set_par_threads(0);
2212 assert(check_heap_region_claim_values(HeapRegion::ParVerifyClaimValue),
2213 "sanity check");
2215 reset_heap_region_claim_values();
2217 assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue),
2218 "sanity check");
2219 } else {
2220 VerifyRegionClosure blk(allow_dirty);
2221 _hrs->iterate(&blk);
2222 }
2223 if (!silent) gclog_or_tty->print("remset ");
2224 rem_set()->verify();
2225 guarantee(!rootsCl.failures(), "should not have had failures");
2226 } else {
2227 if (!silent) gclog_or_tty->print("(SKIPPING roots, heapRegions, remset) ");
2228 }
2229 }
2231 class PrintRegionClosure: public HeapRegionClosure {
2232 outputStream* _st;
2233 public:
2234 PrintRegionClosure(outputStream* st) : _st(st) {}
2235 bool doHeapRegion(HeapRegion* r) {
2236 r->print_on(_st);
2237 return false;
2238 }
2239 };
2241 void G1CollectedHeap::print() const { print_on(gclog_or_tty); }
2243 void G1CollectedHeap::print_on(outputStream* st) const {
2244 PrintRegionClosure blk(st);
2245 _hrs->iterate(&blk);
2246 }
2248 class PrintOnThreadsClosure : public ThreadClosure {
2249 outputStream* _st;
2250 public:
2251 PrintOnThreadsClosure(outputStream* st) : _st(st) { }
2252 virtual void do_thread(Thread *t) {
2253 t->print_on(_st);
2254 }
2255 };
2257 void G1CollectedHeap::print_gc_threads_on(outputStream* st) const {
2258 if (ParallelGCThreads > 0) {
2259 workers()->print_worker_threads();
2260 }
2261 st->print("\"G1 concurrent mark GC Thread\" ");
2262 _cmThread->print();
2263 st->cr();
2264 st->print("\"G1 concurrent refinement GC Threads\" ");
2265 PrintOnThreadsClosure p(st);
2266 _cg1r->threads_do(&p);
2267 st->cr();
2268 st->print("\"G1 zero-fill GC Thread\" ");
2269 _czft->print_on(st);
2270 st->cr();
2271 }
2273 void G1CollectedHeap::gc_threads_do(ThreadClosure* tc) const {
2274 if (ParallelGCThreads > 0) {
2275 workers()->threads_do(tc);
2276 }
2277 tc->do_thread(_cmThread);
2278 _cg1r->threads_do(tc);
2279 tc->do_thread(_czft);
2280 }
2282 void G1CollectedHeap::print_tracing_info() const {
2283 concurrent_g1_refine()->print_final_card_counts();
2285 // We'll overload this to mean "trace GC pause statistics."
2286 if (TraceGen0Time || TraceGen1Time) {
2287 // The "G1CollectorPolicy" is keeping track of these stats, so delegate
2288 // to that.
2289 g1_policy()->print_tracing_info();
2290 }
2291 if (G1SummarizeRSetStats) {
2292 g1_rem_set()->print_summary_info();
2293 }
2294 if (G1SummarizeConcurrentMark) {
2295 concurrent_mark()->print_summary_info();
2296 }
2297 if (G1SummarizeZFStats) {
2298 ConcurrentZFThread::print_summary_info();
2299 }
2300 g1_policy()->print_yg_surv_rate_info();
2302 GCOverheadReporter::printGCOverhead();
2304 SpecializationStats::print();
2305 }
2308 int G1CollectedHeap::addr_to_arena_id(void* addr) const {
2309 HeapRegion* hr = heap_region_containing(addr);
2310 if (hr == NULL) {
2311 return 0;
2312 } else {
2313 return 1;
2314 }
2315 }
2317 G1CollectedHeap* G1CollectedHeap::heap() {
2318 assert(_sh->kind() == CollectedHeap::G1CollectedHeap,
2319 "not a garbage-first heap");
2320 return _g1h;
2321 }
2323 void G1CollectedHeap::gc_prologue(bool full /* Ignored */) {
2324 if (PrintHeapAtGC){
2325 gclog_or_tty->print_cr(" {Heap before GC collections=%d:", total_collections());
2326 Universe::print();
2327 }
2328 assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
2329 // Call allocation profiler
2330 AllocationProfiler::iterate_since_last_gc();
2331 // Fill TLAB's and such
2332 ensure_parsability(true);
2333 }
2335 void G1CollectedHeap::gc_epilogue(bool full /* Ignored */) {
2336 // FIXME: what is this about?
2337 // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled"
2338 // is set.
2339 COMPILER2_PRESENT(assert(DerivedPointerTable::is_empty(),
2340 "derived pointer present"));
2342 if (PrintHeapAtGC){
2343 gclog_or_tty->print_cr(" Heap after GC collections=%d:", total_collections());
2344 Universe::print();
2345 gclog_or_tty->print("} ");
2346 }
2347 }
2349 void G1CollectedHeap::do_collection_pause() {
2350 // Read the GC count while holding the Heap_lock
2351 // we need to do this _before_ wait_for_cleanup_complete(), to
2352 // ensure that we do not give up the heap lock and potentially
2353 // pick up the wrong count
2354 int gc_count_before = SharedHeap::heap()->total_collections();
2356 // Don't want to do a GC pause while cleanup is being completed!
2357 wait_for_cleanup_complete();
2359 g1_policy()->record_stop_world_start();
2360 {
2361 MutexUnlocker mu(Heap_lock); // give up heap lock, execute gets it back
2362 VM_G1IncCollectionPause op(gc_count_before);
2363 VMThread::execute(&op);
2364 }
2365 }
2367 void
2368 G1CollectedHeap::doConcurrentMark() {
2369 if (G1ConcMark) {
2370 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2371 if (!_cmThread->in_progress()) {
2372 _cmThread->set_started();
2373 CGC_lock->notify();
2374 }
2375 }
2376 }
2378 class VerifyMarkedObjsClosure: public ObjectClosure {
2379 G1CollectedHeap* _g1h;
2380 public:
2381 VerifyMarkedObjsClosure(G1CollectedHeap* g1h) : _g1h(g1h) {}
2382 void do_object(oop obj) {
2383 assert(obj->mark()->is_marked() ? !_g1h->is_obj_dead(obj) : true,
2384 "markandsweep mark should agree with concurrent deadness");
2385 }
2386 };
2388 void
2389 G1CollectedHeap::checkConcurrentMark() {
2390 VerifyMarkedObjsClosure verifycl(this);
2391 // MutexLockerEx x(getMarkBitMapLock(),
2392 // Mutex::_no_safepoint_check_flag);
2393 object_iterate(&verifycl, false);
2394 }
2396 void G1CollectedHeap::do_sync_mark() {
2397 _cm->checkpointRootsInitial();
2398 _cm->markFromRoots();
2399 _cm->checkpointRootsFinal(false);
2400 }
2402 // <NEW PREDICTION>
2404 double G1CollectedHeap::predict_region_elapsed_time_ms(HeapRegion *hr,
2405 bool young) {
2406 return _g1_policy->predict_region_elapsed_time_ms(hr, young);
2407 }
2409 void G1CollectedHeap::check_if_region_is_too_expensive(double
2410 predicted_time_ms) {
2411 _g1_policy->check_if_region_is_too_expensive(predicted_time_ms);
2412 }
2414 size_t G1CollectedHeap::pending_card_num() {
2415 size_t extra_cards = 0;
2416 JavaThread *curr = Threads::first();
2417 while (curr != NULL) {
2418 DirtyCardQueue& dcq = curr->dirty_card_queue();
2419 extra_cards += dcq.size();
2420 curr = curr->next();
2421 }
2422 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
2423 size_t buffer_size = dcqs.buffer_size();
2424 size_t buffer_num = dcqs.completed_buffers_num();
2425 return buffer_size * buffer_num + extra_cards;
2426 }
2428 size_t G1CollectedHeap::max_pending_card_num() {
2429 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
2430 size_t buffer_size = dcqs.buffer_size();
2431 size_t buffer_num = dcqs.completed_buffers_num();
2432 int thread_num = Threads::number_of_threads();
2433 return (buffer_num + thread_num) * buffer_size;
2434 }
2436 size_t G1CollectedHeap::cards_scanned() {
2437 HRInto_G1RemSet* g1_rset = (HRInto_G1RemSet*) g1_rem_set();
2438 return g1_rset->cardsScanned();
2439 }
2441 void
2442 G1CollectedHeap::setup_surviving_young_words() {
2443 guarantee( _surviving_young_words == NULL, "pre-condition" );
2444 size_t array_length = g1_policy()->young_cset_length();
2445 _surviving_young_words = NEW_C_HEAP_ARRAY(size_t, array_length);
2446 if (_surviving_young_words == NULL) {
2447 vm_exit_out_of_memory(sizeof(size_t) * array_length,
2448 "Not enough space for young surv words summary.");
2449 }
2450 memset(_surviving_young_words, 0, array_length * sizeof(size_t));
2451 for (size_t i = 0; i < array_length; ++i) {
2452 guarantee( _surviving_young_words[i] == 0, "invariant" );
2453 }
2454 }
2456 void
2457 G1CollectedHeap::update_surviving_young_words(size_t* surv_young_words) {
2458 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
2459 size_t array_length = g1_policy()->young_cset_length();
2460 for (size_t i = 0; i < array_length; ++i)
2461 _surviving_young_words[i] += surv_young_words[i];
2462 }
2464 void
2465 G1CollectedHeap::cleanup_surviving_young_words() {
2466 guarantee( _surviving_young_words != NULL, "pre-condition" );
2467 FREE_C_HEAP_ARRAY(size_t, _surviving_young_words);
2468 _surviving_young_words = NULL;
2469 }
2471 // </NEW PREDICTION>
2473 void
2474 G1CollectedHeap::do_collection_pause_at_safepoint() {
2475 char verbose_str[128];
2476 sprintf(verbose_str, "GC pause ");
2477 if (g1_policy()->in_young_gc_mode()) {
2478 if (g1_policy()->full_young_gcs())
2479 strcat(verbose_str, "(young)");
2480 else
2481 strcat(verbose_str, "(partial)");
2482 }
2483 if (g1_policy()->should_initiate_conc_mark())
2484 strcat(verbose_str, " (initial-mark)");
2486 GCCauseSetter x(this, GCCause::_g1_inc_collection_pause);
2488 // if PrintGCDetails is on, we'll print long statistics information
2489 // in the collector policy code, so let's not print this as the output
2490 // is messy if we do.
2491 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
2492 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
2493 TraceTime t(verbose_str, PrintGC && !PrintGCDetails, true, gclog_or_tty);
2495 ResourceMark rm;
2496 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
2497 assert(Thread::current() == VMThread::vm_thread(), "should be in vm thread");
2498 guarantee(!is_gc_active(), "collection is not reentrant");
2499 assert(regions_accounted_for(), "Region leakage!");
2501 increment_gc_time_stamp();
2503 if (g1_policy()->in_young_gc_mode()) {
2504 assert(check_young_list_well_formed(),
2505 "young list should be well formed");
2506 }
2508 if (GC_locker::is_active()) {
2509 return; // GC is disabled (e.g. JNI GetXXXCritical operation)
2510 }
2512 bool abandoned = false;
2513 { // Call to jvmpi::post_class_unload_events must occur outside of active GC
2514 IsGCActiveMark x;
2516 gc_prologue(false);
2517 increment_total_collections();
2519 #if G1_REM_SET_LOGGING
2520 gclog_or_tty->print_cr("\nJust chose CS, heap:");
2521 print();
2522 #endif
2524 if (VerifyBeforeGC && total_collections() >= VerifyGCStartAt) {
2525 HandleMark hm; // Discard invalid handles created during verification
2526 prepare_for_verify();
2527 gclog_or_tty->print(" VerifyBeforeGC:");
2528 Universe::verify(false);
2529 }
2531 COMPILER2_PRESENT(DerivedPointerTable::clear());
2533 // We want to turn off ref discovery, if necessary, and turn it back on
2534 // on again later if we do.
2535 bool was_enabled = ref_processor()->discovery_enabled();
2536 if (was_enabled) ref_processor()->disable_discovery();
2538 // Forget the current alloc region (we might even choose it to be part
2539 // of the collection set!).
2540 abandon_cur_alloc_region();
2542 // The elapsed time induced by the start time below deliberately elides
2543 // the possible verification above.
2544 double start_time_sec = os::elapsedTime();
2545 GCOverheadReporter::recordSTWStart(start_time_sec);
2546 size_t start_used_bytes = used();
2547 if (!G1ConcMark) {
2548 do_sync_mark();
2549 }
2551 g1_policy()->record_collection_pause_start(start_time_sec,
2552 start_used_bytes);
2554 guarantee(_in_cset_fast_test == NULL, "invariant");
2555 guarantee(_in_cset_fast_test_base == NULL, "invariant");
2556 _in_cset_fast_test_length = max_regions();
2557 _in_cset_fast_test_base =
2558 NEW_C_HEAP_ARRAY(bool, _in_cset_fast_test_length);
2559 memset(_in_cset_fast_test_base, false,
2560 _in_cset_fast_test_length * sizeof(bool));
2561 // We're biasing _in_cset_fast_test to avoid subtracting the
2562 // beginning of the heap every time we want to index; basically
2563 // it's the same with what we do with the card table.
2564 _in_cset_fast_test = _in_cset_fast_test_base -
2565 ((size_t) _g1_reserved.start() >> HeapRegion::LogOfHRGrainBytes);
2567 #if SCAN_ONLY_VERBOSE
2568 _young_list->print();
2569 #endif // SCAN_ONLY_VERBOSE
2571 if (g1_policy()->should_initiate_conc_mark()) {
2572 concurrent_mark()->checkpointRootsInitialPre();
2573 }
2574 save_marks();
2576 // We must do this before any possible evacuation that should propagate
2577 // marks.
2578 if (mark_in_progress()) {
2579 double start_time_sec = os::elapsedTime();
2581 _cm->drainAllSATBBuffers();
2582 double finish_mark_ms = (os::elapsedTime() - start_time_sec) * 1000.0;
2583 g1_policy()->record_satb_drain_time(finish_mark_ms);
2585 }
2586 // Record the number of elements currently on the mark stack, so we
2587 // only iterate over these. (Since evacuation may add to the mark
2588 // stack, doing more exposes race conditions.) If no mark is in
2589 // progress, this will be zero.
2590 _cm->set_oops_do_bound();
2592 assert(regions_accounted_for(), "Region leakage.");
2594 if (mark_in_progress())
2595 concurrent_mark()->newCSet();
2597 // Now choose the CS.
2598 g1_policy()->choose_collection_set();
2600 // We may abandon a pause if we find no region that will fit in the MMU
2601 // pause.
2602 bool abandoned = (g1_policy()->collection_set() == NULL);
2604 // Nothing to do if we were unable to choose a collection set.
2605 if (!abandoned) {
2606 #if G1_REM_SET_LOGGING
2607 gclog_or_tty->print_cr("\nAfter pause, heap:");
2608 print();
2609 #endif
2611 setup_surviving_young_words();
2613 // Set up the gc allocation regions.
2614 get_gc_alloc_regions();
2616 // Actually do the work...
2617 evacuate_collection_set();
2618 free_collection_set(g1_policy()->collection_set());
2619 g1_policy()->clear_collection_set();
2621 FREE_C_HEAP_ARRAY(bool, _in_cset_fast_test_base);
2622 // this is more for peace of mind; we're nulling them here and
2623 // we're expecting them to be null at the beginning of the next GC
2624 _in_cset_fast_test = NULL;
2625 _in_cset_fast_test_base = NULL;
2627 release_gc_alloc_regions(false /* totally */);
2629 cleanup_surviving_young_words();
2631 if (g1_policy()->in_young_gc_mode()) {
2632 _young_list->reset_sampled_info();
2633 assert(check_young_list_empty(true),
2634 "young list should be empty");
2636 #if SCAN_ONLY_VERBOSE
2637 _young_list->print();
2638 #endif // SCAN_ONLY_VERBOSE
2640 g1_policy()->record_survivor_regions(_young_list->survivor_length(),
2641 _young_list->first_survivor_region(),
2642 _young_list->last_survivor_region());
2643 _young_list->reset_auxilary_lists();
2644 }
2645 } else {
2646 COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
2647 }
2649 if (evacuation_failed()) {
2650 _summary_bytes_used = recalculate_used();
2651 } else {
2652 // The "used" of the the collection set have already been subtracted
2653 // when they were freed. Add in the bytes evacuated.
2654 _summary_bytes_used += g1_policy()->bytes_in_to_space();
2655 }
2657 if (g1_policy()->in_young_gc_mode() &&
2658 g1_policy()->should_initiate_conc_mark()) {
2659 concurrent_mark()->checkpointRootsInitialPost();
2660 set_marking_started();
2661 doConcurrentMark();
2662 }
2664 #if SCAN_ONLY_VERBOSE
2665 _young_list->print();
2666 #endif // SCAN_ONLY_VERBOSE
2668 double end_time_sec = os::elapsedTime();
2669 double pause_time_ms = (end_time_sec - start_time_sec) * MILLIUNITS;
2670 g1_policy()->record_pause_time_ms(pause_time_ms);
2671 GCOverheadReporter::recordSTWEnd(end_time_sec);
2672 g1_policy()->record_collection_pause_end(abandoned);
2674 assert(regions_accounted_for(), "Region leakage.");
2676 if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) {
2677 HandleMark hm; // Discard invalid handles created during verification
2678 gclog_or_tty->print(" VerifyAfterGC:");
2679 prepare_for_verify();
2680 Universe::verify(false);
2681 }
2683 if (was_enabled) ref_processor()->enable_discovery();
2685 {
2686 size_t expand_bytes = g1_policy()->expansion_amount();
2687 if (expand_bytes > 0) {
2688 size_t bytes_before = capacity();
2689 expand(expand_bytes);
2690 }
2691 }
2693 if (mark_in_progress()) {
2694 concurrent_mark()->update_g1_committed();
2695 }
2697 #ifdef TRACESPINNING
2698 ParallelTaskTerminator::print_termination_counts();
2699 #endif
2701 gc_epilogue(false);
2702 }
2704 assert(verify_region_lists(), "Bad region lists.");
2706 if (ExitAfterGCNum > 0 && total_collections() == ExitAfterGCNum) {
2707 gclog_or_tty->print_cr("Stopping after GC #%d", ExitAfterGCNum);
2708 print_tracing_info();
2709 vm_exit(-1);
2710 }
2711 }
2713 void G1CollectedHeap::set_gc_alloc_region(int purpose, HeapRegion* r) {
2714 assert(purpose >= 0 && purpose < GCAllocPurposeCount, "invalid purpose");
2715 // make sure we don't call set_gc_alloc_region() multiple times on
2716 // the same region
2717 assert(r == NULL || !r->is_gc_alloc_region(),
2718 "shouldn't already be a GC alloc region");
2719 HeapWord* original_top = NULL;
2720 if (r != NULL)
2721 original_top = r->top();
2723 // We will want to record the used space in r as being there before gc.
2724 // One we install it as a GC alloc region it's eligible for allocation.
2725 // So record it now and use it later.
2726 size_t r_used = 0;
2727 if (r != NULL) {
2728 r_used = r->used();
2730 if (ParallelGCThreads > 0) {
2731 // need to take the lock to guard against two threads calling
2732 // get_gc_alloc_region concurrently (very unlikely but...)
2733 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
2734 r->save_marks();
2735 }
2736 }
2737 HeapRegion* old_alloc_region = _gc_alloc_regions[purpose];
2738 _gc_alloc_regions[purpose] = r;
2739 if (old_alloc_region != NULL) {
2740 // Replace aliases too.
2741 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
2742 if (_gc_alloc_regions[ap] == old_alloc_region) {
2743 _gc_alloc_regions[ap] = r;
2744 }
2745 }
2746 }
2747 if (r != NULL) {
2748 push_gc_alloc_region(r);
2749 if (mark_in_progress() && original_top != r->next_top_at_mark_start()) {
2750 // We are using a region as a GC alloc region after it has been used
2751 // as a mutator allocation region during the current marking cycle.
2752 // The mutator-allocated objects are currently implicitly marked, but
2753 // when we move hr->next_top_at_mark_start() forward at the the end
2754 // of the GC pause, they won't be. We therefore mark all objects in
2755 // the "gap". We do this object-by-object, since marking densely
2756 // does not currently work right with marking bitmap iteration. This
2757 // means we rely on TLAB filling at the start of pauses, and no
2758 // "resuscitation" of filled TLAB's. If we want to do this, we need
2759 // to fix the marking bitmap iteration.
2760 HeapWord* curhw = r->next_top_at_mark_start();
2761 HeapWord* t = original_top;
2763 while (curhw < t) {
2764 oop cur = (oop)curhw;
2765 // We'll assume parallel for generality. This is rare code.
2766 concurrent_mark()->markAndGrayObjectIfNecessary(cur); // can't we just mark them?
2767 curhw = curhw + cur->size();
2768 }
2769 assert(curhw == t, "Should have parsed correctly.");
2770 }
2771 if (G1PolicyVerbose > 1) {
2772 gclog_or_tty->print("New alloc region ["PTR_FORMAT", "PTR_FORMAT", " PTR_FORMAT") "
2773 "for survivors:", r->bottom(), original_top, r->end());
2774 r->print();
2775 }
2776 g1_policy()->record_before_bytes(r_used);
2777 }
2778 }
2780 void G1CollectedHeap::push_gc_alloc_region(HeapRegion* hr) {
2781 assert(Thread::current()->is_VM_thread() ||
2782 par_alloc_during_gc_lock()->owned_by_self(), "Precondition");
2783 assert(!hr->is_gc_alloc_region() && !hr->in_collection_set(),
2784 "Precondition.");
2785 hr->set_is_gc_alloc_region(true);
2786 hr->set_next_gc_alloc_region(_gc_alloc_region_list);
2787 _gc_alloc_region_list = hr;
2788 }
2790 #ifdef G1_DEBUG
2791 class FindGCAllocRegion: public HeapRegionClosure {
2792 public:
2793 bool doHeapRegion(HeapRegion* r) {
2794 if (r->is_gc_alloc_region()) {
2795 gclog_or_tty->print_cr("Region %d ["PTR_FORMAT"...] is still a gc_alloc_region.",
2796 r->hrs_index(), r->bottom());
2797 }
2798 return false;
2799 }
2800 };
2801 #endif // G1_DEBUG
2803 void G1CollectedHeap::forget_alloc_region_list() {
2804 assert(Thread::current()->is_VM_thread(), "Precondition");
2805 while (_gc_alloc_region_list != NULL) {
2806 HeapRegion* r = _gc_alloc_region_list;
2807 assert(r->is_gc_alloc_region(), "Invariant.");
2808 // We need HeapRegion::oops_on_card_seq_iterate_careful() to work on
2809 // newly allocated data in order to be able to apply deferred updates
2810 // before the GC is done for verification purposes (i.e to allow
2811 // G1HRRSFlushLogBuffersOnVerify). It's safe thing to do after the
2812 // collection.
2813 r->ContiguousSpace::set_saved_mark();
2814 _gc_alloc_region_list = r->next_gc_alloc_region();
2815 r->set_next_gc_alloc_region(NULL);
2816 r->set_is_gc_alloc_region(false);
2817 if (r->is_survivor()) {
2818 if (r->is_empty()) {
2819 r->set_not_young();
2820 } else {
2821 _young_list->add_survivor_region(r);
2822 }
2823 }
2824 if (r->is_empty()) {
2825 ++_free_regions;
2826 }
2827 }
2828 #ifdef G1_DEBUG
2829 FindGCAllocRegion fa;
2830 heap_region_iterate(&fa);
2831 #endif // G1_DEBUG
2832 }
2835 bool G1CollectedHeap::check_gc_alloc_regions() {
2836 // TODO: allocation regions check
2837 return true;
2838 }
2840 void G1CollectedHeap::get_gc_alloc_regions() {
2841 // First, let's check that the GC alloc region list is empty (it should)
2842 assert(_gc_alloc_region_list == NULL, "invariant");
2844 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
2845 assert(_gc_alloc_regions[ap] == NULL, "invariant");
2847 // Create new GC alloc regions.
2848 HeapRegion* alloc_region = _retained_gc_alloc_regions[ap];
2849 _retained_gc_alloc_regions[ap] = NULL;
2851 if (alloc_region != NULL) {
2852 assert(_retain_gc_alloc_region[ap], "only way to retain a GC region");
2854 // let's make sure that the GC alloc region is not tagged as such
2855 // outside a GC operation
2856 assert(!alloc_region->is_gc_alloc_region(), "sanity");
2858 if (alloc_region->in_collection_set() ||
2859 alloc_region->top() == alloc_region->end() ||
2860 alloc_region->top() == alloc_region->bottom()) {
2861 // we will discard the current GC alloc region if it's in the
2862 // collection set (it can happen!), if it's already full (no
2863 // point in using it), or if it's empty (this means that it
2864 // was emptied during a cleanup and it should be on the free
2865 // list now).
2867 alloc_region = NULL;
2868 }
2869 }
2871 if (alloc_region == NULL) {
2872 // we will get a new GC alloc region
2873 alloc_region = newAllocRegionWithExpansion(ap, 0);
2874 }
2876 if (alloc_region != NULL) {
2877 assert(_gc_alloc_regions[ap] == NULL, "pre-condition");
2878 set_gc_alloc_region(ap, alloc_region);
2879 }
2881 assert(_gc_alloc_regions[ap] == NULL ||
2882 _gc_alloc_regions[ap]->is_gc_alloc_region(),
2883 "the GC alloc region should be tagged as such");
2884 assert(_gc_alloc_regions[ap] == NULL ||
2885 _gc_alloc_regions[ap] == _gc_alloc_region_list,
2886 "the GC alloc region should be the same as the GC alloc list head");
2887 }
2888 // Set alternative regions for allocation purposes that have reached
2889 // their limit.
2890 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
2891 GCAllocPurpose alt_purpose = g1_policy()->alternative_purpose(ap);
2892 if (_gc_alloc_regions[ap] == NULL && alt_purpose != ap) {
2893 _gc_alloc_regions[ap] = _gc_alloc_regions[alt_purpose];
2894 }
2895 }
2896 assert(check_gc_alloc_regions(), "alloc regions messed up");
2897 }
2899 void G1CollectedHeap::release_gc_alloc_regions(bool totally) {
2900 // We keep a separate list of all regions that have been alloc regions in
2901 // the current collection pause. Forget that now. This method will
2902 // untag the GC alloc regions and tear down the GC alloc region
2903 // list. It's desirable that no regions are tagged as GC alloc
2904 // outside GCs.
2905 forget_alloc_region_list();
2907 // The current alloc regions contain objs that have survived
2908 // collection. Make them no longer GC alloc regions.
2909 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
2910 HeapRegion* r = _gc_alloc_regions[ap];
2911 _retained_gc_alloc_regions[ap] = NULL;
2913 if (r != NULL) {
2914 // we retain nothing on _gc_alloc_regions between GCs
2915 set_gc_alloc_region(ap, NULL);
2916 _gc_alloc_region_counts[ap] = 0;
2918 if (r->is_empty()) {
2919 // we didn't actually allocate anything in it; let's just put
2920 // it on the free list
2921 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
2922 r->set_zero_fill_complete();
2923 put_free_region_on_list_locked(r);
2924 } else if (_retain_gc_alloc_region[ap] && !totally) {
2925 // retain it so that we can use it at the beginning of the next GC
2926 _retained_gc_alloc_regions[ap] = r;
2927 }
2928 }
2929 }
2930 }
2932 #ifndef PRODUCT
2933 // Useful for debugging
2935 void G1CollectedHeap::print_gc_alloc_regions() {
2936 gclog_or_tty->print_cr("GC alloc regions");
2937 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
2938 HeapRegion* r = _gc_alloc_regions[ap];
2939 if (r == NULL) {
2940 gclog_or_tty->print_cr(" %2d : "PTR_FORMAT, ap, NULL);
2941 } else {
2942 gclog_or_tty->print_cr(" %2d : "PTR_FORMAT" "SIZE_FORMAT,
2943 ap, r->bottom(), r->used());
2944 }
2945 }
2946 }
2947 #endif // PRODUCT
2949 void G1CollectedHeap::init_for_evac_failure(OopsInHeapRegionClosure* cl) {
2950 _drain_in_progress = false;
2951 set_evac_failure_closure(cl);
2952 _evac_failure_scan_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(40, true);
2953 }
2955 void G1CollectedHeap::finalize_for_evac_failure() {
2956 assert(_evac_failure_scan_stack != NULL &&
2957 _evac_failure_scan_stack->length() == 0,
2958 "Postcondition");
2959 assert(!_drain_in_progress, "Postcondition");
2960 // Don't have to delete, since the scan stack is a resource object.
2961 _evac_failure_scan_stack = NULL;
2962 }
2966 // *** Sequential G1 Evacuation
2968 HeapWord* G1CollectedHeap::allocate_during_gc(GCAllocPurpose purpose, size_t word_size) {
2969 HeapRegion* alloc_region = _gc_alloc_regions[purpose];
2970 // let the caller handle alloc failure
2971 if (alloc_region == NULL) return NULL;
2972 assert(isHumongous(word_size) || !alloc_region->isHumongous(),
2973 "Either the object is humongous or the region isn't");
2974 HeapWord* block = alloc_region->allocate(word_size);
2975 if (block == NULL) {
2976 block = allocate_during_gc_slow(purpose, alloc_region, false, word_size);
2977 }
2978 return block;
2979 }
2981 class G1IsAliveClosure: public BoolObjectClosure {
2982 G1CollectedHeap* _g1;
2983 public:
2984 G1IsAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
2985 void do_object(oop p) { assert(false, "Do not call."); }
2986 bool do_object_b(oop p) {
2987 // It is reachable if it is outside the collection set, or is inside
2988 // and forwarded.
2990 #ifdef G1_DEBUG
2991 gclog_or_tty->print_cr("is alive "PTR_FORMAT" in CS %d forwarded %d overall %d",
2992 (void*) p, _g1->obj_in_cs(p), p->is_forwarded(),
2993 !_g1->obj_in_cs(p) || p->is_forwarded());
2994 #endif // G1_DEBUG
2996 return !_g1->obj_in_cs(p) || p->is_forwarded();
2997 }
2998 };
3000 class G1KeepAliveClosure: public OopClosure {
3001 G1CollectedHeap* _g1;
3002 public:
3003 G1KeepAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
3004 void do_oop(narrowOop* p) {
3005 guarantee(false, "NYI");
3006 }
3007 void do_oop(oop* p) {
3008 oop obj = *p;
3009 #ifdef G1_DEBUG
3010 if (PrintGC && Verbose) {
3011 gclog_or_tty->print_cr("keep alive *"PTR_FORMAT" = "PTR_FORMAT" "PTR_FORMAT,
3012 p, (void*) obj, (void*) *p);
3013 }
3014 #endif // G1_DEBUG
3016 if (_g1->obj_in_cs(obj)) {
3017 assert( obj->is_forwarded(), "invariant" );
3018 *p = obj->forwardee();
3020 #ifdef G1_DEBUG
3021 gclog_or_tty->print_cr(" in CSet: moved "PTR_FORMAT" -> "PTR_FORMAT,
3022 (void*) obj, (void*) *p);
3023 #endif // G1_DEBUG
3024 }
3025 }
3026 };
3028 class UpdateRSetImmediate : public OopsInHeapRegionClosure {
3029 private:
3030 G1CollectedHeap* _g1;
3031 G1RemSet* _g1_rem_set;
3032 public:
3033 UpdateRSetImmediate(G1CollectedHeap* g1) :
3034 _g1(g1), _g1_rem_set(g1->g1_rem_set()) {}
3036 void do_oop(narrowOop* p) {
3037 guarantee(false, "NYI");
3038 }
3039 void do_oop(oop* p) {
3040 assert(_from->is_in_reserved(p), "paranoia");
3041 if (*p != NULL && !_from->is_survivor()) {
3042 _g1_rem_set->par_write_ref(_from, p, 0);
3043 }
3044 }
3045 };
3047 class UpdateRSetDeferred : public OopsInHeapRegionClosure {
3048 private:
3049 G1CollectedHeap* _g1;
3050 DirtyCardQueue *_dcq;
3051 CardTableModRefBS* _ct_bs;
3053 public:
3054 UpdateRSetDeferred(G1CollectedHeap* g1, DirtyCardQueue* dcq) :
3055 _g1(g1), _ct_bs((CardTableModRefBS*)_g1->barrier_set()), _dcq(dcq) {}
3057 void do_oop(narrowOop* p) {
3058 guarantee(false, "NYI");
3059 }
3060 void do_oop(oop* p) {
3061 assert(_from->is_in_reserved(p), "paranoia");
3062 if (!_from->is_in_reserved(*p) && !_from->is_survivor()) {
3063 size_t card_index = _ct_bs->index_for(p);
3064 if (_ct_bs->mark_card_deferred(card_index)) {
3065 _dcq->enqueue((jbyte*)_ct_bs->byte_for_index(card_index));
3066 }
3067 }
3068 }
3069 };
3073 class RemoveSelfPointerClosure: public ObjectClosure {
3074 private:
3075 G1CollectedHeap* _g1;
3076 ConcurrentMark* _cm;
3077 HeapRegion* _hr;
3078 size_t _prev_marked_bytes;
3079 size_t _next_marked_bytes;
3080 OopsInHeapRegionClosure *_cl;
3081 public:
3082 RemoveSelfPointerClosure(G1CollectedHeap* g1, OopsInHeapRegionClosure* cl) :
3083 _g1(g1), _cm(_g1->concurrent_mark()), _prev_marked_bytes(0),
3084 _next_marked_bytes(0), _cl(cl) {}
3086 size_t prev_marked_bytes() { return _prev_marked_bytes; }
3087 size_t next_marked_bytes() { return _next_marked_bytes; }
3089 // The original idea here was to coalesce evacuated and dead objects.
3090 // However that caused complications with the block offset table (BOT).
3091 // In particular if there were two TLABs, one of them partially refined.
3092 // |----- TLAB_1--------|----TLAB_2-~~~(partially refined part)~~~|
3093 // The BOT entries of the unrefined part of TLAB_2 point to the start
3094 // of TLAB_2. If the last object of the TLAB_1 and the first object
3095 // of TLAB_2 are coalesced, then the cards of the unrefined part
3096 // would point into middle of the filler object.
3097 //
3098 // The current approach is to not coalesce and leave the BOT contents intact.
3099 void do_object(oop obj) {
3100 if (obj->is_forwarded() && obj->forwardee() == obj) {
3101 // The object failed to move.
3102 assert(!_g1->is_obj_dead(obj), "We should not be preserving dead objs.");
3103 _cm->markPrev(obj);
3104 assert(_cm->isPrevMarked(obj), "Should be marked!");
3105 _prev_marked_bytes += (obj->size() * HeapWordSize);
3106 if (_g1->mark_in_progress() && !_g1->is_obj_ill(obj)) {
3107 _cm->markAndGrayObjectIfNecessary(obj);
3108 }
3109 obj->set_mark(markOopDesc::prototype());
3110 // While we were processing RSet buffers during the
3111 // collection, we actually didn't scan any cards on the
3112 // collection set, since we didn't want to update remebered
3113 // sets with entries that point into the collection set, given
3114 // that live objects fromthe collection set are about to move
3115 // and such entries will be stale very soon. This change also
3116 // dealt with a reliability issue which involved scanning a
3117 // card in the collection set and coming across an array that
3118 // was being chunked and looking malformed. The problem is
3119 // that, if evacuation fails, we might have remembered set
3120 // entries missing given that we skipped cards on the
3121 // collection set. So, we'll recreate such entries now.
3122 obj->oop_iterate(_cl);
3123 assert(_cm->isPrevMarked(obj), "Should be marked!");
3124 } else {
3125 // The object has been either evacuated or is dead. Fill it with a
3126 // dummy object.
3127 MemRegion mr((HeapWord*)obj, obj->size());
3128 CollectedHeap::fill_with_object(mr);
3129 _cm->clearRangeBothMaps(mr);
3130 }
3131 }
3132 };
3134 void G1CollectedHeap::remove_self_forwarding_pointers() {
3135 UpdateRSetImmediate immediate_update(_g1h);
3136 DirtyCardQueue dcq(&_g1h->dirty_card_queue_set());
3137 UpdateRSetDeferred deferred_update(_g1h, &dcq);
3138 OopsInHeapRegionClosure *cl;
3139 if (G1DeferredRSUpdate) {
3140 cl = &deferred_update;
3141 } else {
3142 cl = &immediate_update;
3143 }
3144 HeapRegion* cur = g1_policy()->collection_set();
3145 while (cur != NULL) {
3146 assert(g1_policy()->assertMarkedBytesDataOK(), "Should be!");
3148 RemoveSelfPointerClosure rspc(_g1h, cl);
3149 if (cur->evacuation_failed()) {
3150 assert(cur->in_collection_set(), "bad CS");
3151 cl->set_region(cur);
3152 cur->object_iterate(&rspc);
3154 // A number of manipulations to make the TAMS be the current top,
3155 // and the marked bytes be the ones observed in the iteration.
3156 if (_g1h->concurrent_mark()->at_least_one_mark_complete()) {
3157 // The comments below are the postconditions achieved by the
3158 // calls. Note especially the last such condition, which says that
3159 // the count of marked bytes has been properly restored.
3160 cur->note_start_of_marking(false);
3161 // _next_top_at_mark_start == top, _next_marked_bytes == 0
3162 cur->add_to_marked_bytes(rspc.prev_marked_bytes());
3163 // _next_marked_bytes == prev_marked_bytes.
3164 cur->note_end_of_marking();
3165 // _prev_top_at_mark_start == top(),
3166 // _prev_marked_bytes == prev_marked_bytes
3167 }
3168 // If there is no mark in progress, we modified the _next variables
3169 // above needlessly, but harmlessly.
3170 if (_g1h->mark_in_progress()) {
3171 cur->note_start_of_marking(false);
3172 // _next_top_at_mark_start == top, _next_marked_bytes == 0
3173 // _next_marked_bytes == next_marked_bytes.
3174 }
3176 // Now make sure the region has the right index in the sorted array.
3177 g1_policy()->note_change_in_marked_bytes(cur);
3178 }
3179 cur = cur->next_in_collection_set();
3180 }
3181 assert(g1_policy()->assertMarkedBytesDataOK(), "Should be!");
3183 // Now restore saved marks, if any.
3184 if (_objs_with_preserved_marks != NULL) {
3185 assert(_preserved_marks_of_objs != NULL, "Both or none.");
3186 assert(_objs_with_preserved_marks->length() ==
3187 _preserved_marks_of_objs->length(), "Both or none.");
3188 guarantee(_objs_with_preserved_marks->length() ==
3189 _preserved_marks_of_objs->length(), "Both or none.");
3190 for (int i = 0; i < _objs_with_preserved_marks->length(); i++) {
3191 oop obj = _objs_with_preserved_marks->at(i);
3192 markOop m = _preserved_marks_of_objs->at(i);
3193 obj->set_mark(m);
3194 }
3195 // Delete the preserved marks growable arrays (allocated on the C heap).
3196 delete _objs_with_preserved_marks;
3197 delete _preserved_marks_of_objs;
3198 _objs_with_preserved_marks = NULL;
3199 _preserved_marks_of_objs = NULL;
3200 }
3201 }
3203 void G1CollectedHeap::push_on_evac_failure_scan_stack(oop obj) {
3204 _evac_failure_scan_stack->push(obj);
3205 }
3207 void G1CollectedHeap::drain_evac_failure_scan_stack() {
3208 assert(_evac_failure_scan_stack != NULL, "precondition");
3210 while (_evac_failure_scan_stack->length() > 0) {
3211 oop obj = _evac_failure_scan_stack->pop();
3212 _evac_failure_closure->set_region(heap_region_containing(obj));
3213 obj->oop_iterate_backwards(_evac_failure_closure);
3214 }
3215 }
3217 void G1CollectedHeap::handle_evacuation_failure(oop old) {
3218 markOop m = old->mark();
3219 // forward to self
3220 assert(!old->is_forwarded(), "precondition");
3222 old->forward_to(old);
3223 handle_evacuation_failure_common(old, m);
3224 }
3226 oop
3227 G1CollectedHeap::handle_evacuation_failure_par(OopsInHeapRegionClosure* cl,
3228 oop old) {
3229 markOop m = old->mark();
3230 oop forward_ptr = old->forward_to_atomic(old);
3231 if (forward_ptr == NULL) {
3232 // Forward-to-self succeeded.
3233 if (_evac_failure_closure != cl) {
3234 MutexLockerEx x(EvacFailureStack_lock, Mutex::_no_safepoint_check_flag);
3235 assert(!_drain_in_progress,
3236 "Should only be true while someone holds the lock.");
3237 // Set the global evac-failure closure to the current thread's.
3238 assert(_evac_failure_closure == NULL, "Or locking has failed.");
3239 set_evac_failure_closure(cl);
3240 // Now do the common part.
3241 handle_evacuation_failure_common(old, m);
3242 // Reset to NULL.
3243 set_evac_failure_closure(NULL);
3244 } else {
3245 // The lock is already held, and this is recursive.
3246 assert(_drain_in_progress, "This should only be the recursive case.");
3247 handle_evacuation_failure_common(old, m);
3248 }
3249 return old;
3250 } else {
3251 // Someone else had a place to copy it.
3252 return forward_ptr;
3253 }
3254 }
3256 void G1CollectedHeap::handle_evacuation_failure_common(oop old, markOop m) {
3257 set_evacuation_failed(true);
3259 preserve_mark_if_necessary(old, m);
3261 HeapRegion* r = heap_region_containing(old);
3262 if (!r->evacuation_failed()) {
3263 r->set_evacuation_failed(true);
3264 if (G1PrintRegions) {
3265 gclog_or_tty->print("evacuation failed in heap region "PTR_FORMAT" "
3266 "["PTR_FORMAT","PTR_FORMAT")\n",
3267 r, r->bottom(), r->end());
3268 }
3269 }
3271 push_on_evac_failure_scan_stack(old);
3273 if (!_drain_in_progress) {
3274 // prevent recursion in copy_to_survivor_space()
3275 _drain_in_progress = true;
3276 drain_evac_failure_scan_stack();
3277 _drain_in_progress = false;
3278 }
3279 }
3281 void G1CollectedHeap::preserve_mark_if_necessary(oop obj, markOop m) {
3282 if (m != markOopDesc::prototype()) {
3283 if (_objs_with_preserved_marks == NULL) {
3284 assert(_preserved_marks_of_objs == NULL, "Both or none.");
3285 _objs_with_preserved_marks =
3286 new (ResourceObj::C_HEAP) GrowableArray<oop>(40, true);
3287 _preserved_marks_of_objs =
3288 new (ResourceObj::C_HEAP) GrowableArray<markOop>(40, true);
3289 }
3290 _objs_with_preserved_marks->push(obj);
3291 _preserved_marks_of_objs->push(m);
3292 }
3293 }
3295 // *** Parallel G1 Evacuation
3297 HeapWord* G1CollectedHeap::par_allocate_during_gc(GCAllocPurpose purpose,
3298 size_t word_size) {
3299 HeapRegion* alloc_region = _gc_alloc_regions[purpose];
3300 // let the caller handle alloc failure
3301 if (alloc_region == NULL) return NULL;
3303 HeapWord* block = alloc_region->par_allocate(word_size);
3304 if (block == NULL) {
3305 MutexLockerEx x(par_alloc_during_gc_lock(),
3306 Mutex::_no_safepoint_check_flag);
3307 block = allocate_during_gc_slow(purpose, alloc_region, true, word_size);
3308 }
3309 return block;
3310 }
3312 void G1CollectedHeap::retire_alloc_region(HeapRegion* alloc_region,
3313 bool par) {
3314 // Another thread might have obtained alloc_region for the given
3315 // purpose, and might be attempting to allocate in it, and might
3316 // succeed. Therefore, we can't do the "finalization" stuff on the
3317 // region below until we're sure the last allocation has happened.
3318 // We ensure this by allocating the remaining space with a garbage
3319 // object.
3320 if (par) par_allocate_remaining_space(alloc_region);
3321 // Now we can do the post-GC stuff on the region.
3322 alloc_region->note_end_of_copying();
3323 g1_policy()->record_after_bytes(alloc_region->used());
3324 }
3326 HeapWord*
3327 G1CollectedHeap::allocate_during_gc_slow(GCAllocPurpose purpose,
3328 HeapRegion* alloc_region,
3329 bool par,
3330 size_t word_size) {
3331 HeapWord* block = NULL;
3332 // In the parallel case, a previous thread to obtain the lock may have
3333 // already assigned a new gc_alloc_region.
3334 if (alloc_region != _gc_alloc_regions[purpose]) {
3335 assert(par, "But should only happen in parallel case.");
3336 alloc_region = _gc_alloc_regions[purpose];
3337 if (alloc_region == NULL) return NULL;
3338 block = alloc_region->par_allocate(word_size);
3339 if (block != NULL) return block;
3340 // Otherwise, continue; this new region is empty, too.
3341 }
3342 assert(alloc_region != NULL, "We better have an allocation region");
3343 retire_alloc_region(alloc_region, par);
3345 if (_gc_alloc_region_counts[purpose] >= g1_policy()->max_regions(purpose)) {
3346 // Cannot allocate more regions for the given purpose.
3347 GCAllocPurpose alt_purpose = g1_policy()->alternative_purpose(purpose);
3348 // Is there an alternative?
3349 if (purpose != alt_purpose) {
3350 HeapRegion* alt_region = _gc_alloc_regions[alt_purpose];
3351 // Has not the alternative region been aliased?
3352 if (alloc_region != alt_region && alt_region != NULL) {
3353 // Try to allocate in the alternative region.
3354 if (par) {
3355 block = alt_region->par_allocate(word_size);
3356 } else {
3357 block = alt_region->allocate(word_size);
3358 }
3359 // Make an alias.
3360 _gc_alloc_regions[purpose] = _gc_alloc_regions[alt_purpose];
3361 if (block != NULL) {
3362 return block;
3363 }
3364 retire_alloc_region(alt_region, par);
3365 }
3366 // Both the allocation region and the alternative one are full
3367 // and aliased, replace them with a new allocation region.
3368 purpose = alt_purpose;
3369 } else {
3370 set_gc_alloc_region(purpose, NULL);
3371 return NULL;
3372 }
3373 }
3375 // Now allocate a new region for allocation.
3376 alloc_region = newAllocRegionWithExpansion(purpose, word_size, false /*zero_filled*/);
3378 // let the caller handle alloc failure
3379 if (alloc_region != NULL) {
3381 assert(check_gc_alloc_regions(), "alloc regions messed up");
3382 assert(alloc_region->saved_mark_at_top(),
3383 "Mark should have been saved already.");
3384 // We used to assert that the region was zero-filled here, but no
3385 // longer.
3387 // This must be done last: once it's installed, other regions may
3388 // allocate in it (without holding the lock.)
3389 set_gc_alloc_region(purpose, alloc_region);
3391 if (par) {
3392 block = alloc_region->par_allocate(word_size);
3393 } else {
3394 block = alloc_region->allocate(word_size);
3395 }
3396 // Caller handles alloc failure.
3397 } else {
3398 // This sets other apis using the same old alloc region to NULL, also.
3399 set_gc_alloc_region(purpose, NULL);
3400 }
3401 return block; // May be NULL.
3402 }
3404 void G1CollectedHeap::par_allocate_remaining_space(HeapRegion* r) {
3405 HeapWord* block = NULL;
3406 size_t free_words;
3407 do {
3408 free_words = r->free()/HeapWordSize;
3409 // If there's too little space, no one can allocate, so we're done.
3410 if (free_words < (size_t)oopDesc::header_size()) return;
3411 // Otherwise, try to claim it.
3412 block = r->par_allocate(free_words);
3413 } while (block == NULL);
3414 fill_with_object(block, free_words);
3415 }
3417 #define use_local_bitmaps 1
3418 #define verify_local_bitmaps 0
3420 #ifndef PRODUCT
3422 class GCLabBitMap;
3423 class GCLabBitMapClosure: public BitMapClosure {
3424 private:
3425 ConcurrentMark* _cm;
3426 GCLabBitMap* _bitmap;
3428 public:
3429 GCLabBitMapClosure(ConcurrentMark* cm,
3430 GCLabBitMap* bitmap) {
3431 _cm = cm;
3432 _bitmap = bitmap;
3433 }
3435 virtual bool do_bit(size_t offset);
3436 };
3438 #endif // PRODUCT
3440 #define oop_buffer_length 256
3442 class GCLabBitMap: public BitMap {
3443 private:
3444 ConcurrentMark* _cm;
3446 int _shifter;
3447 size_t _bitmap_word_covers_words;
3449 // beginning of the heap
3450 HeapWord* _heap_start;
3452 // this is the actual start of the GCLab
3453 HeapWord* _real_start_word;
3455 // this is the actual end of the GCLab
3456 HeapWord* _real_end_word;
3458 // this is the first word, possibly located before the actual start
3459 // of the GCLab, that corresponds to the first bit of the bitmap
3460 HeapWord* _start_word;
3462 // size of a GCLab in words
3463 size_t _gclab_word_size;
3465 static int shifter() {
3466 return MinObjAlignment - 1;
3467 }
3469 // how many heap words does a single bitmap word corresponds to?
3470 static size_t bitmap_word_covers_words() {
3471 return BitsPerWord << shifter();
3472 }
3474 static size_t gclab_word_size() {
3475 return G1ParallelGCAllocBufferSize / HeapWordSize;
3476 }
3478 static size_t bitmap_size_in_bits() {
3479 size_t bits_in_bitmap = gclab_word_size() >> shifter();
3480 // We are going to ensure that the beginning of a word in this
3481 // bitmap also corresponds to the beginning of a word in the
3482 // global marking bitmap. To handle the case where a GCLab
3483 // starts from the middle of the bitmap, we need to add enough
3484 // space (i.e. up to a bitmap word) to ensure that we have
3485 // enough bits in the bitmap.
3486 return bits_in_bitmap + BitsPerWord - 1;
3487 }
3488 public:
3489 GCLabBitMap(HeapWord* heap_start)
3490 : BitMap(bitmap_size_in_bits()),
3491 _cm(G1CollectedHeap::heap()->concurrent_mark()),
3492 _shifter(shifter()),
3493 _bitmap_word_covers_words(bitmap_word_covers_words()),
3494 _heap_start(heap_start),
3495 _gclab_word_size(gclab_word_size()),
3496 _real_start_word(NULL),
3497 _real_end_word(NULL),
3498 _start_word(NULL)
3499 {
3500 guarantee( size_in_words() >= bitmap_size_in_words(),
3501 "just making sure");
3502 }
3504 inline unsigned heapWordToOffset(HeapWord* addr) {
3505 unsigned offset = (unsigned) pointer_delta(addr, _start_word) >> _shifter;
3506 assert(offset < size(), "offset should be within bounds");
3507 return offset;
3508 }
3510 inline HeapWord* offsetToHeapWord(size_t offset) {
3511 HeapWord* addr = _start_word + (offset << _shifter);
3512 assert(_real_start_word <= addr && addr < _real_end_word, "invariant");
3513 return addr;
3514 }
3516 bool fields_well_formed() {
3517 bool ret1 = (_real_start_word == NULL) &&
3518 (_real_end_word == NULL) &&
3519 (_start_word == NULL);
3520 if (ret1)
3521 return true;
3523 bool ret2 = _real_start_word >= _start_word &&
3524 _start_word < _real_end_word &&
3525 (_real_start_word + _gclab_word_size) == _real_end_word &&
3526 (_start_word + _gclab_word_size + _bitmap_word_covers_words)
3527 > _real_end_word;
3528 return ret2;
3529 }
3531 inline bool mark(HeapWord* addr) {
3532 guarantee(use_local_bitmaps, "invariant");
3533 assert(fields_well_formed(), "invariant");
3535 if (addr >= _real_start_word && addr < _real_end_word) {
3536 assert(!isMarked(addr), "should not have already been marked");
3538 // first mark it on the bitmap
3539 at_put(heapWordToOffset(addr), true);
3541 return true;
3542 } else {
3543 return false;
3544 }
3545 }
3547 inline bool isMarked(HeapWord* addr) {
3548 guarantee(use_local_bitmaps, "invariant");
3549 assert(fields_well_formed(), "invariant");
3551 return at(heapWordToOffset(addr));
3552 }
3554 void set_buffer(HeapWord* start) {
3555 guarantee(use_local_bitmaps, "invariant");
3556 clear();
3558 assert(start != NULL, "invariant");
3559 _real_start_word = start;
3560 _real_end_word = start + _gclab_word_size;
3562 size_t diff =
3563 pointer_delta(start, _heap_start) % _bitmap_word_covers_words;
3564 _start_word = start - diff;
3566 assert(fields_well_formed(), "invariant");
3567 }
3569 #ifndef PRODUCT
3570 void verify() {
3571 // verify that the marks have been propagated
3572 GCLabBitMapClosure cl(_cm, this);
3573 iterate(&cl);
3574 }
3575 #endif // PRODUCT
3577 void retire() {
3578 guarantee(use_local_bitmaps, "invariant");
3579 assert(fields_well_formed(), "invariant");
3581 if (_start_word != NULL) {
3582 CMBitMap* mark_bitmap = _cm->nextMarkBitMap();
3584 // this means that the bitmap was set up for the GCLab
3585 assert(_real_start_word != NULL && _real_end_word != NULL, "invariant");
3587 mark_bitmap->mostly_disjoint_range_union(this,
3588 0, // always start from the start of the bitmap
3589 _start_word,
3590 size_in_words());
3591 _cm->grayRegionIfNecessary(MemRegion(_real_start_word, _real_end_word));
3593 #ifndef PRODUCT
3594 if (use_local_bitmaps && verify_local_bitmaps)
3595 verify();
3596 #endif // PRODUCT
3597 } else {
3598 assert(_real_start_word == NULL && _real_end_word == NULL, "invariant");
3599 }
3600 }
3602 static size_t bitmap_size_in_words() {
3603 return (bitmap_size_in_bits() + BitsPerWord - 1) / BitsPerWord;
3604 }
3605 };
3607 #ifndef PRODUCT
3609 bool GCLabBitMapClosure::do_bit(size_t offset) {
3610 HeapWord* addr = _bitmap->offsetToHeapWord(offset);
3611 guarantee(_cm->isMarked(oop(addr)), "it should be!");
3612 return true;
3613 }
3615 #endif // PRODUCT
3617 class G1ParGCAllocBuffer: public ParGCAllocBuffer {
3618 private:
3619 bool _retired;
3620 bool _during_marking;
3621 GCLabBitMap _bitmap;
3623 public:
3624 G1ParGCAllocBuffer() :
3625 ParGCAllocBuffer(G1ParallelGCAllocBufferSize / HeapWordSize),
3626 _during_marking(G1CollectedHeap::heap()->mark_in_progress()),
3627 _bitmap(G1CollectedHeap::heap()->reserved_region().start()),
3628 _retired(false)
3629 { }
3631 inline bool mark(HeapWord* addr) {
3632 guarantee(use_local_bitmaps, "invariant");
3633 assert(_during_marking, "invariant");
3634 return _bitmap.mark(addr);
3635 }
3637 inline void set_buf(HeapWord* buf) {
3638 if (use_local_bitmaps && _during_marking)
3639 _bitmap.set_buffer(buf);
3640 ParGCAllocBuffer::set_buf(buf);
3641 _retired = false;
3642 }
3644 inline void retire(bool end_of_gc, bool retain) {
3645 if (_retired)
3646 return;
3647 if (use_local_bitmaps && _during_marking) {
3648 _bitmap.retire();
3649 }
3650 ParGCAllocBuffer::retire(end_of_gc, retain);
3651 _retired = true;
3652 }
3653 };
3656 class G1ParScanThreadState : public StackObj {
3657 protected:
3658 G1CollectedHeap* _g1h;
3659 RefToScanQueue* _refs;
3660 DirtyCardQueue _dcq;
3661 CardTableModRefBS* _ct_bs;
3662 G1RemSet* _g1_rem;
3664 typedef GrowableArray<oop*> OverflowQueue;
3665 OverflowQueue* _overflowed_refs;
3667 G1ParGCAllocBuffer _alloc_buffers[GCAllocPurposeCount];
3668 ageTable _age_table;
3670 size_t _alloc_buffer_waste;
3671 size_t _undo_waste;
3673 OopsInHeapRegionClosure* _evac_failure_cl;
3674 G1ParScanHeapEvacClosure* _evac_cl;
3675 G1ParScanPartialArrayClosure* _partial_scan_cl;
3677 int _hash_seed;
3678 int _queue_num;
3680 int _term_attempts;
3681 #if G1_DETAILED_STATS
3682 int _pushes, _pops, _steals, _steal_attempts;
3683 int _overflow_pushes;
3684 #endif
3686 double _start;
3687 double _start_strong_roots;
3688 double _strong_roots_time;
3689 double _start_term;
3690 double _term_time;
3692 // Map from young-age-index (0 == not young, 1 is youngest) to
3693 // surviving words. base is what we get back from the malloc call
3694 size_t* _surviving_young_words_base;
3695 // this points into the array, as we use the first few entries for padding
3696 size_t* _surviving_young_words;
3698 #define PADDING_ELEM_NUM (64 / sizeof(size_t))
3700 void add_to_alloc_buffer_waste(size_t waste) { _alloc_buffer_waste += waste; }
3702 void add_to_undo_waste(size_t waste) { _undo_waste += waste; }
3704 DirtyCardQueue& dirty_card_queue() { return _dcq; }
3705 CardTableModRefBS* ctbs() { return _ct_bs; }
3707 void immediate_rs_update(HeapRegion* from, oop* p, int tid) {
3708 if (!from->is_survivor()) {
3709 _g1_rem->par_write_ref(from, p, tid);
3710 }
3711 }
3713 void deferred_rs_update(HeapRegion* from, oop* p, int tid) {
3714 // If the new value of the field points to the same region or
3715 // is the to-space, we don't need to include it in the Rset updates.
3716 if (!from->is_in_reserved(*p) && !from->is_survivor()) {
3717 size_t card_index = ctbs()->index_for(p);
3718 // If the card hasn't been added to the buffer, do it.
3719 if (ctbs()->mark_card_deferred(card_index)) {
3720 dirty_card_queue().enqueue((jbyte*)ctbs()->byte_for_index(card_index));
3721 }
3722 }
3723 }
3725 public:
3726 G1ParScanThreadState(G1CollectedHeap* g1h, int queue_num)
3727 : _g1h(g1h),
3728 _refs(g1h->task_queue(queue_num)),
3729 _dcq(&g1h->dirty_card_queue_set()),
3730 _ct_bs((CardTableModRefBS*)_g1h->barrier_set()),
3731 _g1_rem(g1h->g1_rem_set()),
3732 _hash_seed(17), _queue_num(queue_num),
3733 _term_attempts(0),
3734 _age_table(false),
3735 #if G1_DETAILED_STATS
3736 _pushes(0), _pops(0), _steals(0),
3737 _steal_attempts(0), _overflow_pushes(0),
3738 #endif
3739 _strong_roots_time(0), _term_time(0),
3740 _alloc_buffer_waste(0), _undo_waste(0)
3741 {
3742 // we allocate G1YoungSurvRateNumRegions plus one entries, since
3743 // we "sacrifice" entry 0 to keep track of surviving bytes for
3744 // non-young regions (where the age is -1)
3745 // We also add a few elements at the beginning and at the end in
3746 // an attempt to eliminate cache contention
3747 size_t real_length = 1 + _g1h->g1_policy()->young_cset_length();
3748 size_t array_length = PADDING_ELEM_NUM +
3749 real_length +
3750 PADDING_ELEM_NUM;
3751 _surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length);
3752 if (_surviving_young_words_base == NULL)
3753 vm_exit_out_of_memory(array_length * sizeof(size_t),
3754 "Not enough space for young surv histo.");
3755 _surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM;
3756 memset(_surviving_young_words, 0, real_length * sizeof(size_t));
3758 _overflowed_refs = new OverflowQueue(10);
3760 _start = os::elapsedTime();
3761 }
3763 ~G1ParScanThreadState() {
3764 FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base);
3765 }
3767 RefToScanQueue* refs() { return _refs; }
3768 OverflowQueue* overflowed_refs() { return _overflowed_refs; }
3769 ageTable* age_table() { return &_age_table; }
3771 G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose) {
3772 return &_alloc_buffers[purpose];
3773 }
3775 size_t alloc_buffer_waste() { return _alloc_buffer_waste; }
3776 size_t undo_waste() { return _undo_waste; }
3778 void push_on_queue(oop* ref) {
3779 assert(ref != NULL, "invariant");
3780 assert(has_partial_array_mask(ref) || _g1h->obj_in_cs(*ref), "invariant");
3782 if (!refs()->push(ref)) {
3783 overflowed_refs()->push(ref);
3784 IF_G1_DETAILED_STATS(note_overflow_push());
3785 } else {
3786 IF_G1_DETAILED_STATS(note_push());
3787 }
3788 }
3790 void pop_from_queue(oop*& ref) {
3791 if (!refs()->pop_local(ref)) {
3792 ref = NULL;
3793 } else {
3794 assert(ref != NULL, "invariant");
3795 assert(has_partial_array_mask(ref) || _g1h->obj_in_cs(*ref),
3796 "invariant");
3798 IF_G1_DETAILED_STATS(note_pop());
3799 }
3800 }
3802 void pop_from_overflow_queue(oop*& ref) {
3803 ref = overflowed_refs()->pop();
3804 }
3806 int refs_to_scan() { return refs()->size(); }
3807 int overflowed_refs_to_scan() { return overflowed_refs()->length(); }
3809 void update_rs(HeapRegion* from, oop* p, int tid) {
3810 if (G1DeferredRSUpdate) {
3811 deferred_rs_update(from, p, tid);
3812 } else {
3813 immediate_rs_update(from, p, tid);
3814 }
3815 }
3817 HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz) {
3819 HeapWord* obj = NULL;
3820 if (word_sz * 100 <
3821 (size_t)(G1ParallelGCAllocBufferSize / HeapWordSize) *
3822 ParallelGCBufferWastePct) {
3823 G1ParGCAllocBuffer* alloc_buf = alloc_buffer(purpose);
3824 add_to_alloc_buffer_waste(alloc_buf->words_remaining());
3825 alloc_buf->retire(false, false);
3827 HeapWord* buf =
3828 _g1h->par_allocate_during_gc(purpose, G1ParallelGCAllocBufferSize / HeapWordSize);
3829 if (buf == NULL) return NULL; // Let caller handle allocation failure.
3830 // Otherwise.
3831 alloc_buf->set_buf(buf);
3833 obj = alloc_buf->allocate(word_sz);
3834 assert(obj != NULL, "buffer was definitely big enough...");
3835 } else {
3836 obj = _g1h->par_allocate_during_gc(purpose, word_sz);
3837 }
3838 return obj;
3839 }
3841 HeapWord* allocate(GCAllocPurpose purpose, size_t word_sz) {
3842 HeapWord* obj = alloc_buffer(purpose)->allocate(word_sz);
3843 if (obj != NULL) return obj;
3844 return allocate_slow(purpose, word_sz);
3845 }
3847 void undo_allocation(GCAllocPurpose purpose, HeapWord* obj, size_t word_sz) {
3848 if (alloc_buffer(purpose)->contains(obj)) {
3849 guarantee(alloc_buffer(purpose)->contains(obj + word_sz - 1),
3850 "should contain whole object");
3851 alloc_buffer(purpose)->undo_allocation(obj, word_sz);
3852 } else {
3853 CollectedHeap::fill_with_object(obj, word_sz);
3854 add_to_undo_waste(word_sz);
3855 }
3856 }
3858 void set_evac_failure_closure(OopsInHeapRegionClosure* evac_failure_cl) {
3859 _evac_failure_cl = evac_failure_cl;
3860 }
3861 OopsInHeapRegionClosure* evac_failure_closure() {
3862 return _evac_failure_cl;
3863 }
3865 void set_evac_closure(G1ParScanHeapEvacClosure* evac_cl) {
3866 _evac_cl = evac_cl;
3867 }
3869 void set_partial_scan_closure(G1ParScanPartialArrayClosure* partial_scan_cl) {
3870 _partial_scan_cl = partial_scan_cl;
3871 }
3873 int* hash_seed() { return &_hash_seed; }
3874 int queue_num() { return _queue_num; }
3876 int term_attempts() { return _term_attempts; }
3877 void note_term_attempt() { _term_attempts++; }
3879 #if G1_DETAILED_STATS
3880 int pushes() { return _pushes; }
3881 int pops() { return _pops; }
3882 int steals() { return _steals; }
3883 int steal_attempts() { return _steal_attempts; }
3884 int overflow_pushes() { return _overflow_pushes; }
3886 void note_push() { _pushes++; }
3887 void note_pop() { _pops++; }
3888 void note_steal() { _steals++; }
3889 void note_steal_attempt() { _steal_attempts++; }
3890 void note_overflow_push() { _overflow_pushes++; }
3891 #endif
3893 void start_strong_roots() {
3894 _start_strong_roots = os::elapsedTime();
3895 }
3896 void end_strong_roots() {
3897 _strong_roots_time += (os::elapsedTime() - _start_strong_roots);
3898 }
3899 double strong_roots_time() { return _strong_roots_time; }
3901 void start_term_time() {
3902 note_term_attempt();
3903 _start_term = os::elapsedTime();
3904 }
3905 void end_term_time() {
3906 _term_time += (os::elapsedTime() - _start_term);
3907 }
3908 double term_time() { return _term_time; }
3910 double elapsed() {
3911 return os::elapsedTime() - _start;
3912 }
3914 size_t* surviving_young_words() {
3915 // We add on to hide entry 0 which accumulates surviving words for
3916 // age -1 regions (i.e. non-young ones)
3917 return _surviving_young_words;
3918 }
3920 void retire_alloc_buffers() {
3921 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
3922 size_t waste = _alloc_buffers[ap].words_remaining();
3923 add_to_alloc_buffer_waste(waste);
3924 _alloc_buffers[ap].retire(true, false);
3925 }
3926 }
3928 private:
3929 void deal_with_reference(oop* ref_to_scan) {
3930 if (has_partial_array_mask(ref_to_scan)) {
3931 _partial_scan_cl->do_oop_nv(ref_to_scan);
3932 } else {
3933 // Note: we can use "raw" versions of "region_containing" because
3934 // "obj_to_scan" is definitely in the heap, and is not in a
3935 // humongous region.
3936 HeapRegion* r = _g1h->heap_region_containing_raw(ref_to_scan);
3937 _evac_cl->set_region(r);
3938 _evac_cl->do_oop_nv(ref_to_scan);
3939 }
3940 }
3942 public:
3943 void trim_queue() {
3944 // I've replicated the loop twice, first to drain the overflow
3945 // queue, second to drain the task queue. This is better than
3946 // having a single loop, which checks both conditions and, inside
3947 // it, either pops the overflow queue or the task queue, as each
3948 // loop is tighter. Also, the decision to drain the overflow queue
3949 // first is not arbitrary, as the overflow queue is not visible
3950 // to the other workers, whereas the task queue is. So, we want to
3951 // drain the "invisible" entries first, while allowing the other
3952 // workers to potentially steal the "visible" entries.
3954 while (refs_to_scan() > 0 || overflowed_refs_to_scan() > 0) {
3955 while (overflowed_refs_to_scan() > 0) {
3956 oop *ref_to_scan = NULL;
3957 pop_from_overflow_queue(ref_to_scan);
3958 assert(ref_to_scan != NULL, "invariant");
3959 // We shouldn't have pushed it on the queue if it was not
3960 // pointing into the CSet.
3961 assert(ref_to_scan != NULL, "sanity");
3962 assert(has_partial_array_mask(ref_to_scan) ||
3963 _g1h->obj_in_cs(*ref_to_scan), "sanity");
3965 deal_with_reference(ref_to_scan);
3966 }
3968 while (refs_to_scan() > 0) {
3969 oop *ref_to_scan = NULL;
3970 pop_from_queue(ref_to_scan);
3972 if (ref_to_scan != NULL) {
3973 // We shouldn't have pushed it on the queue if it was not
3974 // pointing into the CSet.
3975 assert(has_partial_array_mask(ref_to_scan) ||
3976 _g1h->obj_in_cs(*ref_to_scan), "sanity");
3978 deal_with_reference(ref_to_scan);
3979 }
3980 }
3981 }
3982 }
3983 };
3985 G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
3986 _g1(g1), _g1_rem(_g1->g1_rem_set()), _cm(_g1->concurrent_mark()),
3987 _par_scan_state(par_scan_state) { }
3989 // This closure is applied to the fields of the objects that have just been copied.
3990 // Should probably be made inline and moved in g1OopClosures.inline.hpp.
3991 void G1ParScanClosure::do_oop_nv(oop* p) {
3992 oop obj = *p;
3994 if (obj != NULL) {
3995 if (_g1->in_cset_fast_test(obj)) {
3996 // We're not going to even bother checking whether the object is
3997 // already forwarded or not, as this usually causes an immediate
3998 // stall. We'll try to prefetch the object (for write, given that
3999 // we might need to install the forwarding reference) and we'll
4000 // get back to it when pop it from the queue
4001 Prefetch::write(obj->mark_addr(), 0);
4002 Prefetch::read(obj->mark_addr(), (HeapWordSize*2));
4004 // slightly paranoid test; I'm trying to catch potential
4005 // problems before we go into push_on_queue to know where the
4006 // problem is coming from
4007 assert(obj == *p, "the value of *p should not have changed");
4008 _par_scan_state->push_on_queue(p);
4009 } else {
4010 _par_scan_state->update_rs(_from, p, _par_scan_state->queue_num());
4011 }
4012 }
4013 }
4015 void G1ParCopyHelper::mark_forwardee(oop* p) {
4016 // This is called _after_ do_oop_work has been called, hence after
4017 // the object has been relocated to its new location and *p points
4018 // to its new location.
4020 oop thisOop = *p;
4021 if (thisOop != NULL) {
4022 assert((_g1->evacuation_failed()) || (!_g1->obj_in_cs(thisOop)),
4023 "shouldn't still be in the CSet if evacuation didn't fail.");
4024 HeapWord* addr = (HeapWord*)thisOop;
4025 if (_g1->is_in_g1_reserved(addr))
4026 _cm->grayRoot(oop(addr));
4027 }
4028 }
4030 oop G1ParCopyHelper::copy_to_survivor_space(oop old) {
4031 size_t word_sz = old->size();
4032 HeapRegion* from_region = _g1->heap_region_containing_raw(old);
4033 // +1 to make the -1 indexes valid...
4034 int young_index = from_region->young_index_in_cset()+1;
4035 assert( (from_region->is_young() && young_index > 0) ||
4036 (!from_region->is_young() && young_index == 0), "invariant" );
4037 G1CollectorPolicy* g1p = _g1->g1_policy();
4038 markOop m = old->mark();
4039 int age = m->has_displaced_mark_helper() ? m->displaced_mark_helper()->age()
4040 : m->age();
4041 GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, age,
4042 word_sz);
4043 HeapWord* obj_ptr = _par_scan_state->allocate(alloc_purpose, word_sz);
4044 oop obj = oop(obj_ptr);
4046 if (obj_ptr == NULL) {
4047 // This will either forward-to-self, or detect that someone else has
4048 // installed a forwarding pointer.
4049 OopsInHeapRegionClosure* cl = _par_scan_state->evac_failure_closure();
4050 return _g1->handle_evacuation_failure_par(cl, old);
4051 }
4053 // We're going to allocate linearly, so might as well prefetch ahead.
4054 Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes);
4056 oop forward_ptr = old->forward_to_atomic(obj);
4057 if (forward_ptr == NULL) {
4058 Copy::aligned_disjoint_words((HeapWord*) old, obj_ptr, word_sz);
4059 if (g1p->track_object_age(alloc_purpose)) {
4060 // We could simply do obj->incr_age(). However, this causes a
4061 // performance issue. obj->incr_age() will first check whether
4062 // the object has a displaced mark by checking its mark word;
4063 // getting the mark word from the new location of the object
4064 // stalls. So, given that we already have the mark word and we
4065 // are about to install it anyway, it's better to increase the
4066 // age on the mark word, when the object does not have a
4067 // displaced mark word. We're not expecting many objects to have
4068 // a displaced marked word, so that case is not optimized
4069 // further (it could be...) and we simply call obj->incr_age().
4071 if (m->has_displaced_mark_helper()) {
4072 // in this case, we have to install the mark word first,
4073 // otherwise obj looks to be forwarded (the old mark word,
4074 // which contains the forward pointer, was copied)
4075 obj->set_mark(m);
4076 obj->incr_age();
4077 } else {
4078 m = m->incr_age();
4079 obj->set_mark(m);
4080 }
4081 _par_scan_state->age_table()->add(obj, word_sz);
4082 } else {
4083 obj->set_mark(m);
4084 }
4086 // preserve "next" mark bit
4087 if (_g1->mark_in_progress() && !_g1->is_obj_ill(old)) {
4088 if (!use_local_bitmaps ||
4089 !_par_scan_state->alloc_buffer(alloc_purpose)->mark(obj_ptr)) {
4090 // if we couldn't mark it on the local bitmap (this happens when
4091 // the object was not allocated in the GCLab), we have to bite
4092 // the bullet and do the standard parallel mark
4093 _cm->markAndGrayObjectIfNecessary(obj);
4094 }
4095 #if 1
4096 if (_g1->isMarkedNext(old)) {
4097 _cm->nextMarkBitMap()->parClear((HeapWord*)old);
4098 }
4099 #endif
4100 }
4102 size_t* surv_young_words = _par_scan_state->surviving_young_words();
4103 surv_young_words[young_index] += word_sz;
4105 if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) {
4106 arrayOop(old)->set_length(0);
4107 _par_scan_state->push_on_queue(set_partial_array_mask(old));
4108 } else {
4109 // No point in using the slower heap_region_containing() method,
4110 // given that we know obj is in the heap.
4111 _scanner->set_region(_g1->heap_region_containing_raw(obj));
4112 obj->oop_iterate_backwards(_scanner);
4113 }
4114 } else {
4115 _par_scan_state->undo_allocation(alloc_purpose, obj_ptr, word_sz);
4116 obj = forward_ptr;
4117 }
4118 return obj;
4119 }
4121 template<bool do_gen_barrier, G1Barrier barrier,
4122 bool do_mark_forwardee, bool skip_cset_test>
4123 void G1ParCopyClosure<do_gen_barrier, barrier,
4124 do_mark_forwardee, skip_cset_test>::do_oop_work(oop* p) {
4125 oop obj = *p;
4126 assert(barrier != G1BarrierRS || obj != NULL,
4127 "Precondition: G1BarrierRS implies obj is nonNull");
4129 // The only time we skip the cset test is when we're scanning
4130 // references popped from the queue. And we only push on the queue
4131 // references that we know point into the cset, so no point in
4132 // checking again. But we'll leave an assert here for peace of mind.
4133 assert(!skip_cset_test || _g1->obj_in_cs(obj), "invariant");
4135 // here the null check is implicit in the cset_fast_test() test
4136 if (skip_cset_test || _g1->in_cset_fast_test(obj)) {
4137 #if G1_REM_SET_LOGGING
4138 gclog_or_tty->print_cr("Loc "PTR_FORMAT" contains pointer "PTR_FORMAT" "
4139 "into CS.", p, (void*) obj);
4140 #endif
4141 if (obj->is_forwarded()) {
4142 *p = obj->forwardee();
4143 } else {
4144 *p = copy_to_survivor_space(obj);
4145 }
4146 // When scanning the RS, we only care about objs in CS.
4147 if (barrier == G1BarrierRS) {
4148 _par_scan_state->update_rs(_from, p, _par_scan_state->queue_num());
4149 }
4150 }
4152 // When scanning moved objs, must look at all oops.
4153 if (barrier == G1BarrierEvac && obj != NULL) {
4154 _par_scan_state->update_rs(_from, p, _par_scan_state->queue_num());
4155 }
4157 if (do_gen_barrier && obj != NULL) {
4158 par_do_barrier(p);
4159 }
4160 }
4162 template void G1ParCopyClosure<false, G1BarrierEvac, false, true>::do_oop_work(oop* p);
4164 template<class T> void G1ParScanPartialArrayClosure::process_array_chunk(
4165 oop obj, int start, int end) {
4166 // process our set of indices (include header in first chunk)
4167 assert(start < end, "invariant");
4168 T* const base = (T*)objArrayOop(obj)->base();
4169 T* const start_addr = (start == 0) ? (T*) obj : base + start;
4170 T* const end_addr = base + end;
4171 MemRegion mr((HeapWord*)start_addr, (HeapWord*)end_addr);
4172 _scanner.set_region(_g1->heap_region_containing(obj));
4173 obj->oop_iterate(&_scanner, mr);
4174 }
4176 void G1ParScanPartialArrayClosure::do_oop_nv(oop* p) {
4177 assert(!UseCompressedOops, "Needs to be fixed to work with compressed oops");
4178 assert(has_partial_array_mask(p), "invariant");
4179 oop old = clear_partial_array_mask(p);
4180 assert(old->is_objArray(), "must be obj array");
4181 assert(old->is_forwarded(), "must be forwarded");
4182 assert(Universe::heap()->is_in_reserved(old), "must be in heap.");
4184 objArrayOop obj = objArrayOop(old->forwardee());
4185 assert((void*)old != (void*)old->forwardee(), "self forwarding here?");
4186 // Process ParGCArrayScanChunk elements now
4187 // and push the remainder back onto queue
4188 int start = arrayOop(old)->length();
4189 int end = obj->length();
4190 int remainder = end - start;
4191 assert(start <= end, "just checking");
4192 if (remainder > 2 * ParGCArrayScanChunk) {
4193 // Test above combines last partial chunk with a full chunk
4194 end = start + ParGCArrayScanChunk;
4195 arrayOop(old)->set_length(end);
4196 // Push remainder.
4197 _par_scan_state->push_on_queue(set_partial_array_mask(old));
4198 } else {
4199 // Restore length so that the heap remains parsable in
4200 // case of evacuation failure.
4201 arrayOop(old)->set_length(end);
4202 }
4204 // process our set of indices (include header in first chunk)
4205 process_array_chunk<oop>(obj, start, end);
4206 }
4208 int G1ScanAndBalanceClosure::_nq = 0;
4210 class G1ParEvacuateFollowersClosure : public VoidClosure {
4211 protected:
4212 G1CollectedHeap* _g1h;
4213 G1ParScanThreadState* _par_scan_state;
4214 RefToScanQueueSet* _queues;
4215 ParallelTaskTerminator* _terminator;
4217 G1ParScanThreadState* par_scan_state() { return _par_scan_state; }
4218 RefToScanQueueSet* queues() { return _queues; }
4219 ParallelTaskTerminator* terminator() { return _terminator; }
4221 public:
4222 G1ParEvacuateFollowersClosure(G1CollectedHeap* g1h,
4223 G1ParScanThreadState* par_scan_state,
4224 RefToScanQueueSet* queues,
4225 ParallelTaskTerminator* terminator)
4226 : _g1h(g1h), _par_scan_state(par_scan_state),
4227 _queues(queues), _terminator(terminator) {}
4229 void do_void() {
4230 G1ParScanThreadState* pss = par_scan_state();
4231 while (true) {
4232 oop* ref_to_scan;
4233 pss->trim_queue();
4234 IF_G1_DETAILED_STATS(pss->note_steal_attempt());
4235 if (queues()->steal(pss->queue_num(),
4236 pss->hash_seed(),
4237 ref_to_scan)) {
4238 IF_G1_DETAILED_STATS(pss->note_steal());
4240 // slightly paranoid tests; I'm trying to catch potential
4241 // problems before we go into push_on_queue to know where the
4242 // problem is coming from
4243 assert(ref_to_scan != NULL, "invariant");
4244 assert(has_partial_array_mask(ref_to_scan) ||
4245 _g1h->obj_in_cs(*ref_to_scan), "invariant");
4246 pss->push_on_queue(ref_to_scan);
4247 continue;
4248 }
4249 pss->start_term_time();
4250 if (terminator()->offer_termination()) break;
4251 pss->end_term_time();
4252 }
4253 pss->end_term_time();
4254 pss->retire_alloc_buffers();
4255 }
4256 };
4258 class G1ParTask : public AbstractGangTask {
4259 protected:
4260 G1CollectedHeap* _g1h;
4261 RefToScanQueueSet *_queues;
4262 ParallelTaskTerminator _terminator;
4264 Mutex _stats_lock;
4265 Mutex* stats_lock() { return &_stats_lock; }
4267 size_t getNCards() {
4268 return (_g1h->capacity() + G1BlockOffsetSharedArray::N_bytes - 1)
4269 / G1BlockOffsetSharedArray::N_bytes;
4270 }
4272 public:
4273 G1ParTask(G1CollectedHeap* g1h, int workers, RefToScanQueueSet *task_queues)
4274 : AbstractGangTask("G1 collection"),
4275 _g1h(g1h),
4276 _queues(task_queues),
4277 _terminator(workers, _queues),
4278 _stats_lock(Mutex::leaf, "parallel G1 stats lock", true)
4279 {}
4281 RefToScanQueueSet* queues() { return _queues; }
4283 RefToScanQueue *work_queue(int i) {
4284 return queues()->queue(i);
4285 }
4287 void work(int i) {
4288 ResourceMark rm;
4289 HandleMark hm;
4291 G1ParScanThreadState pss(_g1h, i);
4292 G1ParScanHeapEvacClosure scan_evac_cl(_g1h, &pss);
4293 G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss);
4294 G1ParScanPartialArrayClosure partial_scan_cl(_g1h, &pss);
4296 pss.set_evac_closure(&scan_evac_cl);
4297 pss.set_evac_failure_closure(&evac_failure_cl);
4298 pss.set_partial_scan_closure(&partial_scan_cl);
4300 G1ParScanExtRootClosure only_scan_root_cl(_g1h, &pss);
4301 G1ParScanPermClosure only_scan_perm_cl(_g1h, &pss);
4302 G1ParScanHeapRSClosure only_scan_heap_rs_cl(_g1h, &pss);
4304 G1ParScanAndMarkExtRootClosure scan_mark_root_cl(_g1h, &pss);
4305 G1ParScanAndMarkPermClosure scan_mark_perm_cl(_g1h, &pss);
4306 G1ParScanAndMarkHeapRSClosure scan_mark_heap_rs_cl(_g1h, &pss);
4308 OopsInHeapRegionClosure *scan_root_cl;
4309 OopsInHeapRegionClosure *scan_perm_cl;
4310 OopsInHeapRegionClosure *scan_so_cl;
4312 if (_g1h->g1_policy()->should_initiate_conc_mark()) {
4313 scan_root_cl = &scan_mark_root_cl;
4314 scan_perm_cl = &scan_mark_perm_cl;
4315 scan_so_cl = &scan_mark_heap_rs_cl;
4316 } else {
4317 scan_root_cl = &only_scan_root_cl;
4318 scan_perm_cl = &only_scan_perm_cl;
4319 scan_so_cl = &only_scan_heap_rs_cl;
4320 }
4322 pss.start_strong_roots();
4323 _g1h->g1_process_strong_roots(/* not collecting perm */ false,
4324 SharedHeap::SO_AllClasses,
4325 scan_root_cl,
4326 &only_scan_heap_rs_cl,
4327 scan_so_cl,
4328 scan_perm_cl,
4329 i);
4330 pss.end_strong_roots();
4331 {
4332 double start = os::elapsedTime();
4333 G1ParEvacuateFollowersClosure evac(_g1h, &pss, _queues, &_terminator);
4334 evac.do_void();
4335 double elapsed_ms = (os::elapsedTime()-start)*1000.0;
4336 double term_ms = pss.term_time()*1000.0;
4337 _g1h->g1_policy()->record_obj_copy_time(i, elapsed_ms-term_ms);
4338 _g1h->g1_policy()->record_termination_time(i, term_ms);
4339 }
4340 if (G1UseSurvivorSpaces) {
4341 _g1h->g1_policy()->record_thread_age_table(pss.age_table());
4342 }
4343 _g1h->update_surviving_young_words(pss.surviving_young_words()+1);
4345 // Clean up any par-expanded rem sets.
4346 HeapRegionRemSet::par_cleanup();
4348 MutexLocker x(stats_lock());
4349 if (ParallelGCVerbose) {
4350 gclog_or_tty->print("Thread %d complete:\n", i);
4351 #if G1_DETAILED_STATS
4352 gclog_or_tty->print(" Pushes: %7d Pops: %7d Overflows: %7d Steals %7d (in %d attempts)\n",
4353 pss.pushes(),
4354 pss.pops(),
4355 pss.overflow_pushes(),
4356 pss.steals(),
4357 pss.steal_attempts());
4358 #endif
4359 double elapsed = pss.elapsed();
4360 double strong_roots = pss.strong_roots_time();
4361 double term = pss.term_time();
4362 gclog_or_tty->print(" Elapsed: %7.2f ms.\n"
4363 " Strong roots: %7.2f ms (%6.2f%%)\n"
4364 " Termination: %7.2f ms (%6.2f%%) (in %d entries)\n",
4365 elapsed * 1000.0,
4366 strong_roots * 1000.0, (strong_roots*100.0/elapsed),
4367 term * 1000.0, (term*100.0/elapsed),
4368 pss.term_attempts());
4369 size_t total_waste = pss.alloc_buffer_waste() + pss.undo_waste();
4370 gclog_or_tty->print(" Waste: %8dK\n"
4371 " Alloc Buffer: %8dK\n"
4372 " Undo: %8dK\n",
4373 (total_waste * HeapWordSize) / K,
4374 (pss.alloc_buffer_waste() * HeapWordSize) / K,
4375 (pss.undo_waste() * HeapWordSize) / K);
4376 }
4378 assert(pss.refs_to_scan() == 0, "Task queue should be empty");
4379 assert(pss.overflowed_refs_to_scan() == 0, "Overflow queue should be empty");
4380 }
4381 };
4383 // *** Common G1 Evacuation Stuff
4385 class G1CountClosure: public OopsInHeapRegionClosure {
4386 public:
4387 int n;
4388 G1CountClosure() : n(0) {}
4389 void do_oop(narrowOop* p) {
4390 guarantee(false, "NYI");
4391 }
4392 void do_oop(oop* p) {
4393 oop obj = *p;
4394 assert(obj != NULL && G1CollectedHeap::heap()->obj_in_cs(obj),
4395 "Rem set closure called on non-rem-set pointer.");
4396 n++;
4397 }
4398 };
4400 static G1CountClosure count_closure;
4402 void
4403 G1CollectedHeap::
4404 g1_process_strong_roots(bool collecting_perm_gen,
4405 SharedHeap::ScanningOption so,
4406 OopClosure* scan_non_heap_roots,
4407 OopsInHeapRegionClosure* scan_rs,
4408 OopsInHeapRegionClosure* scan_so,
4409 OopsInGenClosure* scan_perm,
4410 int worker_i) {
4411 // First scan the strong roots, including the perm gen.
4412 double ext_roots_start = os::elapsedTime();
4413 double closure_app_time_sec = 0.0;
4415 BufferingOopClosure buf_scan_non_heap_roots(scan_non_heap_roots);
4416 BufferingOopsInGenClosure buf_scan_perm(scan_perm);
4417 buf_scan_perm.set_generation(perm_gen());
4419 process_strong_roots(collecting_perm_gen, so,
4420 &buf_scan_non_heap_roots,
4421 &buf_scan_perm);
4422 // Finish up any enqueued closure apps.
4423 buf_scan_non_heap_roots.done();
4424 buf_scan_perm.done();
4425 double ext_roots_end = os::elapsedTime();
4426 g1_policy()->reset_obj_copy_time(worker_i);
4427 double obj_copy_time_sec =
4428 buf_scan_non_heap_roots.closure_app_seconds() +
4429 buf_scan_perm.closure_app_seconds();
4430 g1_policy()->record_obj_copy_time(worker_i, obj_copy_time_sec * 1000.0);
4431 double ext_root_time_ms =
4432 ((ext_roots_end - ext_roots_start) - obj_copy_time_sec) * 1000.0;
4433 g1_policy()->record_ext_root_scan_time(worker_i, ext_root_time_ms);
4435 // Scan strong roots in mark stack.
4436 if (!_process_strong_tasks->is_task_claimed(G1H_PS_mark_stack_oops_do)) {
4437 concurrent_mark()->oops_do(scan_non_heap_roots);
4438 }
4439 double mark_stack_scan_ms = (os::elapsedTime() - ext_roots_end) * 1000.0;
4440 g1_policy()->record_mark_stack_scan_time(worker_i, mark_stack_scan_ms);
4442 // XXX What should this be doing in the parallel case?
4443 g1_policy()->record_collection_pause_end_CH_strong_roots();
4444 if (scan_so != NULL) {
4445 scan_scan_only_set(scan_so, worker_i);
4446 }
4447 // Now scan the complement of the collection set.
4448 if (scan_rs != NULL) {
4449 g1_rem_set()->oops_into_collection_set_do(scan_rs, worker_i);
4450 }
4451 // Finish with the ref_processor roots.
4452 if (!_process_strong_tasks->is_task_claimed(G1H_PS_refProcessor_oops_do)) {
4453 ref_processor()->oops_do(scan_non_heap_roots);
4454 }
4455 g1_policy()->record_collection_pause_end_G1_strong_roots();
4456 _process_strong_tasks->all_tasks_completed();
4457 }
4459 void
4460 G1CollectedHeap::scan_scan_only_region(HeapRegion* r,
4461 OopsInHeapRegionClosure* oc,
4462 int worker_i) {
4463 HeapWord* startAddr = r->bottom();
4464 HeapWord* endAddr = r->used_region().end();
4466 oc->set_region(r);
4468 HeapWord* p = r->bottom();
4469 HeapWord* t = r->top();
4470 guarantee( p == r->next_top_at_mark_start(), "invariant" );
4471 while (p < t) {
4472 oop obj = oop(p);
4473 p += obj->oop_iterate(oc);
4474 }
4475 }
4477 void
4478 G1CollectedHeap::scan_scan_only_set(OopsInHeapRegionClosure* oc,
4479 int worker_i) {
4480 double start = os::elapsedTime();
4482 BufferingOopsInHeapRegionClosure boc(oc);
4484 FilterInHeapRegionAndIntoCSClosure scan_only(this, &boc);
4485 FilterAndMarkInHeapRegionAndIntoCSClosure scan_and_mark(this, &boc, concurrent_mark());
4487 OopsInHeapRegionClosure *foc;
4488 if (g1_policy()->should_initiate_conc_mark())
4489 foc = &scan_and_mark;
4490 else
4491 foc = &scan_only;
4493 HeapRegion* hr;
4494 int n = 0;
4495 while ((hr = _young_list->par_get_next_scan_only_region()) != NULL) {
4496 scan_scan_only_region(hr, foc, worker_i);
4497 ++n;
4498 }
4499 boc.done();
4501 double closure_app_s = boc.closure_app_seconds();
4502 g1_policy()->record_obj_copy_time(worker_i, closure_app_s * 1000.0);
4503 double ms = (os::elapsedTime() - start - closure_app_s)*1000.0;
4504 g1_policy()->record_scan_only_time(worker_i, ms, n);
4505 }
4507 void
4508 G1CollectedHeap::g1_process_weak_roots(OopClosure* root_closure,
4509 OopClosure* non_root_closure) {
4510 SharedHeap::process_weak_roots(root_closure, non_root_closure);
4511 }
4514 class SaveMarksClosure: public HeapRegionClosure {
4515 public:
4516 bool doHeapRegion(HeapRegion* r) {
4517 r->save_marks();
4518 return false;
4519 }
4520 };
4522 void G1CollectedHeap::save_marks() {
4523 if (ParallelGCThreads == 0) {
4524 SaveMarksClosure sm;
4525 heap_region_iterate(&sm);
4526 }
4527 // We do this even in the parallel case
4528 perm_gen()->save_marks();
4529 }
4531 void G1CollectedHeap::evacuate_collection_set() {
4532 set_evacuation_failed(false);
4534 g1_rem_set()->prepare_for_oops_into_collection_set_do();
4535 concurrent_g1_refine()->set_use_cache(false);
4536 int n_workers = (ParallelGCThreads > 0 ? workers()->total_workers() : 1);
4537 set_par_threads(n_workers);
4538 G1ParTask g1_par_task(this, n_workers, _task_queues);
4540 init_for_evac_failure(NULL);
4542 change_strong_roots_parity(); // In preparation for parallel strong roots.
4543 rem_set()->prepare_for_younger_refs_iterate(true);
4545 assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty");
4546 double start_par = os::elapsedTime();
4547 if (ParallelGCThreads > 0) {
4548 // The individual threads will set their evac-failure closures.
4549 workers()->run_task(&g1_par_task);
4550 } else {
4551 g1_par_task.work(0);
4552 }
4554 double par_time = (os::elapsedTime() - start_par) * 1000.0;
4555 g1_policy()->record_par_time(par_time);
4556 set_par_threads(0);
4557 // Is this the right thing to do here? We don't save marks
4558 // on individual heap regions when we allocate from
4559 // them in parallel, so this seems like the correct place for this.
4560 retire_all_alloc_regions();
4561 {
4562 G1IsAliveClosure is_alive(this);
4563 G1KeepAliveClosure keep_alive(this);
4564 JNIHandles::weak_oops_do(&is_alive, &keep_alive);
4565 }
4566 g1_rem_set()->cleanup_after_oops_into_collection_set_do();
4568 concurrent_g1_refine()->set_use_cache(true);
4570 finalize_for_evac_failure();
4572 // Must do this before removing self-forwarding pointers, which clears
4573 // the per-region evac-failure flags.
4574 concurrent_mark()->complete_marking_in_collection_set();
4576 if (evacuation_failed()) {
4577 remove_self_forwarding_pointers();
4578 if (PrintGCDetails) {
4579 gclog_or_tty->print(" (evacuation failed)");
4580 } else if (PrintGC) {
4581 gclog_or_tty->print("--");
4582 }
4583 }
4585 if (G1DeferredRSUpdate) {
4586 RedirtyLoggedCardTableEntryFastClosure redirty;
4587 dirty_card_queue_set().set_closure(&redirty);
4588 dirty_card_queue_set().apply_closure_to_all_completed_buffers();
4589 JavaThread::dirty_card_queue_set().merge_bufferlists(&dirty_card_queue_set());
4590 assert(dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed");
4591 }
4593 COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
4594 }
4596 void G1CollectedHeap::free_region(HeapRegion* hr) {
4597 size_t pre_used = 0;
4598 size_t cleared_h_regions = 0;
4599 size_t freed_regions = 0;
4600 UncleanRegionList local_list;
4602 HeapWord* start = hr->bottom();
4603 HeapWord* end = hr->prev_top_at_mark_start();
4604 size_t used_bytes = hr->used();
4605 size_t live_bytes = hr->max_live_bytes();
4606 if (used_bytes > 0) {
4607 guarantee( live_bytes <= used_bytes, "invariant" );
4608 } else {
4609 guarantee( live_bytes == 0, "invariant" );
4610 }
4612 size_t garbage_bytes = used_bytes - live_bytes;
4613 if (garbage_bytes > 0)
4614 g1_policy()->decrease_known_garbage_bytes(garbage_bytes);
4616 free_region_work(hr, pre_used, cleared_h_regions, freed_regions,
4617 &local_list);
4618 finish_free_region_work(pre_used, cleared_h_regions, freed_regions,
4619 &local_list);
4620 }
4622 void
4623 G1CollectedHeap::free_region_work(HeapRegion* hr,
4624 size_t& pre_used,
4625 size_t& cleared_h_regions,
4626 size_t& freed_regions,
4627 UncleanRegionList* list,
4628 bool par) {
4629 pre_used += hr->used();
4630 if (hr->isHumongous()) {
4631 assert(hr->startsHumongous(),
4632 "Only the start of a humongous region should be freed.");
4633 int ind = _hrs->find(hr);
4634 assert(ind != -1, "Should have an index.");
4635 // Clear the start region.
4636 hr->hr_clear(par, true /*clear_space*/);
4637 list->insert_before_head(hr);
4638 cleared_h_regions++;
4639 freed_regions++;
4640 // Clear any continued regions.
4641 ind++;
4642 while ((size_t)ind < n_regions()) {
4643 HeapRegion* hrc = _hrs->at(ind);
4644 if (!hrc->continuesHumongous()) break;
4645 // Otherwise, does continue the H region.
4646 assert(hrc->humongous_start_region() == hr, "Huh?");
4647 hrc->hr_clear(par, true /*clear_space*/);
4648 cleared_h_regions++;
4649 freed_regions++;
4650 list->insert_before_head(hrc);
4651 ind++;
4652 }
4653 } else {
4654 hr->hr_clear(par, true /*clear_space*/);
4655 list->insert_before_head(hr);
4656 freed_regions++;
4657 // If we're using clear2, this should not be enabled.
4658 // assert(!hr->in_cohort(), "Can't be both free and in a cohort.");
4659 }
4660 }
4662 void G1CollectedHeap::finish_free_region_work(size_t pre_used,
4663 size_t cleared_h_regions,
4664 size_t freed_regions,
4665 UncleanRegionList* list) {
4666 if (list != NULL && list->sz() > 0) {
4667 prepend_region_list_on_unclean_list(list);
4668 }
4669 // Acquire a lock, if we're parallel, to update possibly-shared
4670 // variables.
4671 Mutex* lock = (n_par_threads() > 0) ? ParGCRareEvent_lock : NULL;
4672 {
4673 MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
4674 _summary_bytes_used -= pre_used;
4675 _num_humongous_regions -= (int) cleared_h_regions;
4676 _free_regions += freed_regions;
4677 }
4678 }
4681 void G1CollectedHeap::dirtyCardsForYoungRegions(CardTableModRefBS* ct_bs, HeapRegion* list) {
4682 while (list != NULL) {
4683 guarantee( list->is_young(), "invariant" );
4685 HeapWord* bottom = list->bottom();
4686 HeapWord* end = list->end();
4687 MemRegion mr(bottom, end);
4688 ct_bs->dirty(mr);
4690 list = list->get_next_young_region();
4691 }
4692 }
4694 void G1CollectedHeap::cleanUpCardTable() {
4695 CardTableModRefBS* ct_bs = (CardTableModRefBS*) (barrier_set());
4696 double start = os::elapsedTime();
4698 ct_bs->clear(_g1_committed);
4700 // now, redirty the cards of the scan-only and survivor regions
4701 // (it seemed faster to do it this way, instead of iterating over
4702 // all regions and then clearing / dirtying as approprite)
4703 dirtyCardsForYoungRegions(ct_bs, _young_list->first_scan_only_region());
4704 dirtyCardsForYoungRegions(ct_bs, _young_list->first_survivor_region());
4706 double elapsed = os::elapsedTime() - start;
4707 g1_policy()->record_clear_ct_time( elapsed * 1000.0);
4708 }
4711 void G1CollectedHeap::do_collection_pause_if_appropriate(size_t word_size) {
4712 if (g1_policy()->should_do_collection_pause(word_size)) {
4713 do_collection_pause();
4714 }
4715 }
4717 void G1CollectedHeap::free_collection_set(HeapRegion* cs_head) {
4718 double young_time_ms = 0.0;
4719 double non_young_time_ms = 0.0;
4721 G1CollectorPolicy* policy = g1_policy();
4723 double start_sec = os::elapsedTime();
4724 bool non_young = true;
4726 HeapRegion* cur = cs_head;
4727 int age_bound = -1;
4728 size_t rs_lengths = 0;
4730 while (cur != NULL) {
4731 if (non_young) {
4732 if (cur->is_young()) {
4733 double end_sec = os::elapsedTime();
4734 double elapsed_ms = (end_sec - start_sec) * 1000.0;
4735 non_young_time_ms += elapsed_ms;
4737 start_sec = os::elapsedTime();
4738 non_young = false;
4739 }
4740 } else {
4741 if (!cur->is_on_free_list()) {
4742 double end_sec = os::elapsedTime();
4743 double elapsed_ms = (end_sec - start_sec) * 1000.0;
4744 young_time_ms += elapsed_ms;
4746 start_sec = os::elapsedTime();
4747 non_young = true;
4748 }
4749 }
4751 rs_lengths += cur->rem_set()->occupied();
4753 HeapRegion* next = cur->next_in_collection_set();
4754 assert(cur->in_collection_set(), "bad CS");
4755 cur->set_next_in_collection_set(NULL);
4756 cur->set_in_collection_set(false);
4758 if (cur->is_young()) {
4759 int index = cur->young_index_in_cset();
4760 guarantee( index != -1, "invariant" );
4761 guarantee( (size_t)index < policy->young_cset_length(), "invariant" );
4762 size_t words_survived = _surviving_young_words[index];
4763 cur->record_surv_words_in_group(words_survived);
4764 } else {
4765 int index = cur->young_index_in_cset();
4766 guarantee( index == -1, "invariant" );
4767 }
4769 assert( (cur->is_young() && cur->young_index_in_cset() > -1) ||
4770 (!cur->is_young() && cur->young_index_in_cset() == -1),
4771 "invariant" );
4773 if (!cur->evacuation_failed()) {
4774 // And the region is empty.
4775 assert(!cur->is_empty(),
4776 "Should not have empty regions in a CS.");
4777 free_region(cur);
4778 } else {
4779 guarantee( !cur->is_scan_only(), "should not be scan only" );
4780 cur->uninstall_surv_rate_group();
4781 if (cur->is_young())
4782 cur->set_young_index_in_cset(-1);
4783 cur->set_not_young();
4784 cur->set_evacuation_failed(false);
4785 }
4786 cur = next;
4787 }
4789 policy->record_max_rs_lengths(rs_lengths);
4790 policy->cset_regions_freed();
4792 double end_sec = os::elapsedTime();
4793 double elapsed_ms = (end_sec - start_sec) * 1000.0;
4794 if (non_young)
4795 non_young_time_ms += elapsed_ms;
4796 else
4797 young_time_ms += elapsed_ms;
4799 policy->record_young_free_cset_time_ms(young_time_ms);
4800 policy->record_non_young_free_cset_time_ms(non_young_time_ms);
4801 }
4803 HeapRegion*
4804 G1CollectedHeap::alloc_region_from_unclean_list_locked(bool zero_filled) {
4805 assert(ZF_mon->owned_by_self(), "Precondition");
4806 HeapRegion* res = pop_unclean_region_list_locked();
4807 if (res != NULL) {
4808 assert(!res->continuesHumongous() &&
4809 res->zero_fill_state() != HeapRegion::Allocated,
4810 "Only free regions on unclean list.");
4811 if (zero_filled) {
4812 res->ensure_zero_filled_locked();
4813 res->set_zero_fill_allocated();
4814 }
4815 }
4816 return res;
4817 }
4819 HeapRegion* G1CollectedHeap::alloc_region_from_unclean_list(bool zero_filled) {
4820 MutexLockerEx zx(ZF_mon, Mutex::_no_safepoint_check_flag);
4821 return alloc_region_from_unclean_list_locked(zero_filled);
4822 }
4824 void G1CollectedHeap::put_region_on_unclean_list(HeapRegion* r) {
4825 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
4826 put_region_on_unclean_list_locked(r);
4827 if (should_zf()) ZF_mon->notify_all(); // Wake up ZF thread.
4828 }
4830 void G1CollectedHeap::set_unclean_regions_coming(bool b) {
4831 MutexLockerEx x(Cleanup_mon);
4832 set_unclean_regions_coming_locked(b);
4833 }
4835 void G1CollectedHeap::set_unclean_regions_coming_locked(bool b) {
4836 assert(Cleanup_mon->owned_by_self(), "Precondition");
4837 _unclean_regions_coming = b;
4838 // Wake up mutator threads that might be waiting for completeCleanup to
4839 // finish.
4840 if (!b) Cleanup_mon->notify_all();
4841 }
4843 void G1CollectedHeap::wait_for_cleanup_complete() {
4844 MutexLockerEx x(Cleanup_mon);
4845 wait_for_cleanup_complete_locked();
4846 }
4848 void G1CollectedHeap::wait_for_cleanup_complete_locked() {
4849 assert(Cleanup_mon->owned_by_self(), "precondition");
4850 while (_unclean_regions_coming) {
4851 Cleanup_mon->wait();
4852 }
4853 }
4855 void
4856 G1CollectedHeap::put_region_on_unclean_list_locked(HeapRegion* r) {
4857 assert(ZF_mon->owned_by_self(), "precondition.");
4858 _unclean_region_list.insert_before_head(r);
4859 }
4861 void
4862 G1CollectedHeap::prepend_region_list_on_unclean_list(UncleanRegionList* list) {
4863 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
4864 prepend_region_list_on_unclean_list_locked(list);
4865 if (should_zf()) ZF_mon->notify_all(); // Wake up ZF thread.
4866 }
4868 void
4869 G1CollectedHeap::
4870 prepend_region_list_on_unclean_list_locked(UncleanRegionList* list) {
4871 assert(ZF_mon->owned_by_self(), "precondition.");
4872 _unclean_region_list.prepend_list(list);
4873 }
4875 HeapRegion* G1CollectedHeap::pop_unclean_region_list_locked() {
4876 assert(ZF_mon->owned_by_self(), "precondition.");
4877 HeapRegion* res = _unclean_region_list.pop();
4878 if (res != NULL) {
4879 // Inform ZF thread that there's a new unclean head.
4880 if (_unclean_region_list.hd() != NULL && should_zf())
4881 ZF_mon->notify_all();
4882 }
4883 return res;
4884 }
4886 HeapRegion* G1CollectedHeap::peek_unclean_region_list_locked() {
4887 assert(ZF_mon->owned_by_self(), "precondition.");
4888 return _unclean_region_list.hd();
4889 }
4892 bool G1CollectedHeap::move_cleaned_region_to_free_list_locked() {
4893 assert(ZF_mon->owned_by_self(), "Precondition");
4894 HeapRegion* r = peek_unclean_region_list_locked();
4895 if (r != NULL && r->zero_fill_state() == HeapRegion::ZeroFilled) {
4896 // Result of below must be equal to "r", since we hold the lock.
4897 (void)pop_unclean_region_list_locked();
4898 put_free_region_on_list_locked(r);
4899 return true;
4900 } else {
4901 return false;
4902 }
4903 }
4905 bool G1CollectedHeap::move_cleaned_region_to_free_list() {
4906 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
4907 return move_cleaned_region_to_free_list_locked();
4908 }
4911 void G1CollectedHeap::put_free_region_on_list_locked(HeapRegion* r) {
4912 assert(ZF_mon->owned_by_self(), "precondition.");
4913 assert(_free_region_list_size == free_region_list_length(), "Inv");
4914 assert(r->zero_fill_state() == HeapRegion::ZeroFilled,
4915 "Regions on free list must be zero filled");
4916 assert(!r->isHumongous(), "Must not be humongous.");
4917 assert(r->is_empty(), "Better be empty");
4918 assert(!r->is_on_free_list(),
4919 "Better not already be on free list");
4920 assert(!r->is_on_unclean_list(),
4921 "Better not already be on unclean list");
4922 r->set_on_free_list(true);
4923 r->set_next_on_free_list(_free_region_list);
4924 _free_region_list = r;
4925 _free_region_list_size++;
4926 assert(_free_region_list_size == free_region_list_length(), "Inv");
4927 }
4929 void G1CollectedHeap::put_free_region_on_list(HeapRegion* r) {
4930 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
4931 put_free_region_on_list_locked(r);
4932 }
4934 HeapRegion* G1CollectedHeap::pop_free_region_list_locked() {
4935 assert(ZF_mon->owned_by_self(), "precondition.");
4936 assert(_free_region_list_size == free_region_list_length(), "Inv");
4937 HeapRegion* res = _free_region_list;
4938 if (res != NULL) {
4939 _free_region_list = res->next_from_free_list();
4940 _free_region_list_size--;
4941 res->set_on_free_list(false);
4942 res->set_next_on_free_list(NULL);
4943 assert(_free_region_list_size == free_region_list_length(), "Inv");
4944 }
4945 return res;
4946 }
4949 HeapRegion* G1CollectedHeap::alloc_free_region_from_lists(bool zero_filled) {
4950 // By self, or on behalf of self.
4951 assert(Heap_lock->is_locked(), "Precondition");
4952 HeapRegion* res = NULL;
4953 bool first = true;
4954 while (res == NULL) {
4955 if (zero_filled || !first) {
4956 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
4957 res = pop_free_region_list_locked();
4958 if (res != NULL) {
4959 assert(!res->zero_fill_is_allocated(),
4960 "No allocated regions on free list.");
4961 res->set_zero_fill_allocated();
4962 } else if (!first) {
4963 break; // We tried both, time to return NULL.
4964 }
4965 }
4967 if (res == NULL) {
4968 res = alloc_region_from_unclean_list(zero_filled);
4969 }
4970 assert(res == NULL ||
4971 !zero_filled ||
4972 res->zero_fill_is_allocated(),
4973 "We must have allocated the region we're returning");
4974 first = false;
4975 }
4976 return res;
4977 }
4979 void G1CollectedHeap::remove_allocated_regions_from_lists() {
4980 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
4981 {
4982 HeapRegion* prev = NULL;
4983 HeapRegion* cur = _unclean_region_list.hd();
4984 while (cur != NULL) {
4985 HeapRegion* next = cur->next_from_unclean_list();
4986 if (cur->zero_fill_is_allocated()) {
4987 // Remove from the list.
4988 if (prev == NULL) {
4989 (void)_unclean_region_list.pop();
4990 } else {
4991 _unclean_region_list.delete_after(prev);
4992 }
4993 cur->set_on_unclean_list(false);
4994 cur->set_next_on_unclean_list(NULL);
4995 } else {
4996 prev = cur;
4997 }
4998 cur = next;
4999 }
5000 assert(_unclean_region_list.sz() == unclean_region_list_length(),
5001 "Inv");
5002 }
5004 {
5005 HeapRegion* prev = NULL;
5006 HeapRegion* cur = _free_region_list;
5007 while (cur != NULL) {
5008 HeapRegion* next = cur->next_from_free_list();
5009 if (cur->zero_fill_is_allocated()) {
5010 // Remove from the list.
5011 if (prev == NULL) {
5012 _free_region_list = cur->next_from_free_list();
5013 } else {
5014 prev->set_next_on_free_list(cur->next_from_free_list());
5015 }
5016 cur->set_on_free_list(false);
5017 cur->set_next_on_free_list(NULL);
5018 _free_region_list_size--;
5019 } else {
5020 prev = cur;
5021 }
5022 cur = next;
5023 }
5024 assert(_free_region_list_size == free_region_list_length(), "Inv");
5025 }
5026 }
5028 bool G1CollectedHeap::verify_region_lists() {
5029 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
5030 return verify_region_lists_locked();
5031 }
5033 bool G1CollectedHeap::verify_region_lists_locked() {
5034 HeapRegion* unclean = _unclean_region_list.hd();
5035 while (unclean != NULL) {
5036 guarantee(unclean->is_on_unclean_list(), "Well, it is!");
5037 guarantee(!unclean->is_on_free_list(), "Well, it shouldn't be!");
5038 guarantee(unclean->zero_fill_state() != HeapRegion::Allocated,
5039 "Everything else is possible.");
5040 unclean = unclean->next_from_unclean_list();
5041 }
5042 guarantee(_unclean_region_list.sz() == unclean_region_list_length(), "Inv");
5044 HeapRegion* free_r = _free_region_list;
5045 while (free_r != NULL) {
5046 assert(free_r->is_on_free_list(), "Well, it is!");
5047 assert(!free_r->is_on_unclean_list(), "Well, it shouldn't be!");
5048 switch (free_r->zero_fill_state()) {
5049 case HeapRegion::NotZeroFilled:
5050 case HeapRegion::ZeroFilling:
5051 guarantee(false, "Should not be on free list.");
5052 break;
5053 default:
5054 // Everything else is possible.
5055 break;
5056 }
5057 free_r = free_r->next_from_free_list();
5058 }
5059 guarantee(_free_region_list_size == free_region_list_length(), "Inv");
5060 // If we didn't do an assertion...
5061 return true;
5062 }
5064 size_t G1CollectedHeap::free_region_list_length() {
5065 assert(ZF_mon->owned_by_self(), "precondition.");
5066 size_t len = 0;
5067 HeapRegion* cur = _free_region_list;
5068 while (cur != NULL) {
5069 len++;
5070 cur = cur->next_from_free_list();
5071 }
5072 return len;
5073 }
5075 size_t G1CollectedHeap::unclean_region_list_length() {
5076 assert(ZF_mon->owned_by_self(), "precondition.");
5077 return _unclean_region_list.length();
5078 }
5080 size_t G1CollectedHeap::n_regions() {
5081 return _hrs->length();
5082 }
5084 size_t G1CollectedHeap::max_regions() {
5085 return
5086 (size_t)align_size_up(g1_reserved_obj_bytes(), HeapRegion::GrainBytes) /
5087 HeapRegion::GrainBytes;
5088 }
5090 size_t G1CollectedHeap::free_regions() {
5091 /* Possibly-expensive assert.
5092 assert(_free_regions == count_free_regions(),
5093 "_free_regions is off.");
5094 */
5095 return _free_regions;
5096 }
5098 bool G1CollectedHeap::should_zf() {
5099 return _free_region_list_size < (size_t) G1ConcZFMaxRegions;
5100 }
5102 class RegionCounter: public HeapRegionClosure {
5103 size_t _n;
5104 public:
5105 RegionCounter() : _n(0) {}
5106 bool doHeapRegion(HeapRegion* r) {
5107 if (r->is_empty()) {
5108 assert(!r->isHumongous(), "H regions should not be empty.");
5109 _n++;
5110 }
5111 return false;
5112 }
5113 int res() { return (int) _n; }
5114 };
5116 size_t G1CollectedHeap::count_free_regions() {
5117 RegionCounter rc;
5118 heap_region_iterate(&rc);
5119 size_t n = rc.res();
5120 if (_cur_alloc_region != NULL && _cur_alloc_region->is_empty())
5121 n--;
5122 return n;
5123 }
5125 size_t G1CollectedHeap::count_free_regions_list() {
5126 size_t n = 0;
5127 size_t o = 0;
5128 ZF_mon->lock_without_safepoint_check();
5129 HeapRegion* cur = _free_region_list;
5130 while (cur != NULL) {
5131 cur = cur->next_from_free_list();
5132 n++;
5133 }
5134 size_t m = unclean_region_list_length();
5135 ZF_mon->unlock();
5136 return n + m;
5137 }
5139 bool G1CollectedHeap::should_set_young_locked() {
5140 assert(heap_lock_held_for_gc(),
5141 "the heap lock should already be held by or for this thread");
5142 return (g1_policy()->in_young_gc_mode() &&
5143 g1_policy()->should_add_next_region_to_young_list());
5144 }
5146 void G1CollectedHeap::set_region_short_lived_locked(HeapRegion* hr) {
5147 assert(heap_lock_held_for_gc(),
5148 "the heap lock should already be held by or for this thread");
5149 _young_list->push_region(hr);
5150 g1_policy()->set_region_short_lived(hr);
5151 }
5153 class NoYoungRegionsClosure: public HeapRegionClosure {
5154 private:
5155 bool _success;
5156 public:
5157 NoYoungRegionsClosure() : _success(true) { }
5158 bool doHeapRegion(HeapRegion* r) {
5159 if (r->is_young()) {
5160 gclog_or_tty->print_cr("Region ["PTR_FORMAT", "PTR_FORMAT") tagged as young",
5161 r->bottom(), r->end());
5162 _success = false;
5163 }
5164 return false;
5165 }
5166 bool success() { return _success; }
5167 };
5169 bool G1CollectedHeap::check_young_list_empty(bool ignore_scan_only_list,
5170 bool check_sample) {
5171 bool ret = true;
5173 ret = _young_list->check_list_empty(ignore_scan_only_list, check_sample);
5174 if (!ignore_scan_only_list) {
5175 NoYoungRegionsClosure closure;
5176 heap_region_iterate(&closure);
5177 ret = ret && closure.success();
5178 }
5180 return ret;
5181 }
5183 void G1CollectedHeap::empty_young_list() {
5184 assert(heap_lock_held_for_gc(),
5185 "the heap lock should already be held by or for this thread");
5186 assert(g1_policy()->in_young_gc_mode(), "should be in young GC mode");
5188 _young_list->empty_list();
5189 }
5191 bool G1CollectedHeap::all_alloc_regions_no_allocs_since_save_marks() {
5192 bool no_allocs = true;
5193 for (int ap = 0; ap < GCAllocPurposeCount && no_allocs; ++ap) {
5194 HeapRegion* r = _gc_alloc_regions[ap];
5195 no_allocs = r == NULL || r->saved_mark_at_top();
5196 }
5197 return no_allocs;
5198 }
5200 void G1CollectedHeap::retire_all_alloc_regions() {
5201 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
5202 HeapRegion* r = _gc_alloc_regions[ap];
5203 if (r != NULL) {
5204 // Check for aliases.
5205 bool has_processed_alias = false;
5206 for (int i = 0; i < ap; ++i) {
5207 if (_gc_alloc_regions[i] == r) {
5208 has_processed_alias = true;
5209 break;
5210 }
5211 }
5212 if (!has_processed_alias) {
5213 retire_alloc_region(r, false /* par */);
5214 }
5215 }
5216 }
5217 }
5220 // Done at the start of full GC.
5221 void G1CollectedHeap::tear_down_region_lists() {
5222 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
5223 while (pop_unclean_region_list_locked() != NULL) ;
5224 assert(_unclean_region_list.hd() == NULL && _unclean_region_list.sz() == 0,
5225 "Postconditions of loop.")
5226 while (pop_free_region_list_locked() != NULL) ;
5227 assert(_free_region_list == NULL, "Postcondition of loop.");
5228 if (_free_region_list_size != 0) {
5229 gclog_or_tty->print_cr("Size is %d.", _free_region_list_size);
5230 print();
5231 }
5232 assert(_free_region_list_size == 0, "Postconditions of loop.");
5233 }
5236 class RegionResetter: public HeapRegionClosure {
5237 G1CollectedHeap* _g1;
5238 int _n;
5239 public:
5240 RegionResetter() : _g1(G1CollectedHeap::heap()), _n(0) {}
5241 bool doHeapRegion(HeapRegion* r) {
5242 if (r->continuesHumongous()) return false;
5243 if (r->top() > r->bottom()) {
5244 if (r->top() < r->end()) {
5245 Copy::fill_to_words(r->top(),
5246 pointer_delta(r->end(), r->top()));
5247 }
5248 r->set_zero_fill_allocated();
5249 } else {
5250 assert(r->is_empty(), "tautology");
5251 _n++;
5252 switch (r->zero_fill_state()) {
5253 case HeapRegion::NotZeroFilled:
5254 case HeapRegion::ZeroFilling:
5255 _g1->put_region_on_unclean_list_locked(r);
5256 break;
5257 case HeapRegion::Allocated:
5258 r->set_zero_fill_complete();
5259 // no break; go on to put on free list.
5260 case HeapRegion::ZeroFilled:
5261 _g1->put_free_region_on_list_locked(r);
5262 break;
5263 }
5264 }
5265 return false;
5266 }
5268 int getFreeRegionCount() {return _n;}
5269 };
5271 // Done at the end of full GC.
5272 void G1CollectedHeap::rebuild_region_lists() {
5273 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
5274 // This needs to go at the end of the full GC.
5275 RegionResetter rs;
5276 heap_region_iterate(&rs);
5277 _free_regions = rs.getFreeRegionCount();
5278 // Tell the ZF thread it may have work to do.
5279 if (should_zf()) ZF_mon->notify_all();
5280 }
5282 class UsedRegionsNeedZeroFillSetter: public HeapRegionClosure {
5283 G1CollectedHeap* _g1;
5284 int _n;
5285 public:
5286 UsedRegionsNeedZeroFillSetter() : _g1(G1CollectedHeap::heap()), _n(0) {}
5287 bool doHeapRegion(HeapRegion* r) {
5288 if (r->continuesHumongous()) return false;
5289 if (r->top() > r->bottom()) {
5290 // There are assertions in "set_zero_fill_needed()" below that
5291 // require top() == bottom(), so this is technically illegal.
5292 // We'll skirt the law here, by making that true temporarily.
5293 DEBUG_ONLY(HeapWord* save_top = r->top();
5294 r->set_top(r->bottom()));
5295 r->set_zero_fill_needed();
5296 DEBUG_ONLY(r->set_top(save_top));
5297 }
5298 return false;
5299 }
5300 };
5302 // Done at the start of full GC.
5303 void G1CollectedHeap::set_used_regions_to_need_zero_fill() {
5304 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
5305 // This needs to go at the end of the full GC.
5306 UsedRegionsNeedZeroFillSetter rs;
5307 heap_region_iterate(&rs);
5308 }
5310 void G1CollectedHeap::set_refine_cte_cl_concurrency(bool concurrent) {
5311 _refine_cte_cl->set_concurrent(concurrent);
5312 }
5314 #ifndef PRODUCT
5316 class PrintHeapRegionClosure: public HeapRegionClosure {
5317 public:
5318 bool doHeapRegion(HeapRegion *r) {
5319 gclog_or_tty->print("Region: "PTR_FORMAT":", r);
5320 if (r != NULL) {
5321 if (r->is_on_free_list())
5322 gclog_or_tty->print("Free ");
5323 if (r->is_young())
5324 gclog_or_tty->print("Young ");
5325 if (r->isHumongous())
5326 gclog_or_tty->print("Is Humongous ");
5327 r->print();
5328 }
5329 return false;
5330 }
5331 };
5333 class SortHeapRegionClosure : public HeapRegionClosure {
5334 size_t young_regions,free_regions, unclean_regions;
5335 size_t hum_regions, count;
5336 size_t unaccounted, cur_unclean, cur_alloc;
5337 size_t total_free;
5338 HeapRegion* cur;
5339 public:
5340 SortHeapRegionClosure(HeapRegion *_cur) : cur(_cur), young_regions(0),
5341 free_regions(0), unclean_regions(0),
5342 hum_regions(0),
5343 count(0), unaccounted(0),
5344 cur_alloc(0), total_free(0)
5345 {}
5346 bool doHeapRegion(HeapRegion *r) {
5347 count++;
5348 if (r->is_on_free_list()) free_regions++;
5349 else if (r->is_on_unclean_list()) unclean_regions++;
5350 else if (r->isHumongous()) hum_regions++;
5351 else if (r->is_young()) young_regions++;
5352 else if (r == cur) cur_alloc++;
5353 else unaccounted++;
5354 return false;
5355 }
5356 void print() {
5357 total_free = free_regions + unclean_regions;
5358 gclog_or_tty->print("%d regions\n", count);
5359 gclog_or_tty->print("%d free: free_list = %d unclean = %d\n",
5360 total_free, free_regions, unclean_regions);
5361 gclog_or_tty->print("%d humongous %d young\n",
5362 hum_regions, young_regions);
5363 gclog_or_tty->print("%d cur_alloc\n", cur_alloc);
5364 gclog_or_tty->print("UHOH unaccounted = %d\n", unaccounted);
5365 }
5366 };
5368 void G1CollectedHeap::print_region_counts() {
5369 SortHeapRegionClosure sc(_cur_alloc_region);
5370 PrintHeapRegionClosure cl;
5371 heap_region_iterate(&cl);
5372 heap_region_iterate(&sc);
5373 sc.print();
5374 print_region_accounting_info();
5375 };
5377 bool G1CollectedHeap::regions_accounted_for() {
5378 // TODO: regions accounting for young/survivor/tenured
5379 return true;
5380 }
5382 bool G1CollectedHeap::print_region_accounting_info() {
5383 gclog_or_tty->print_cr("Free regions: %d (count: %d count list %d) (clean: %d unclean: %d).",
5384 free_regions(),
5385 count_free_regions(), count_free_regions_list(),
5386 _free_region_list_size, _unclean_region_list.sz());
5387 gclog_or_tty->print_cr("cur_alloc: %d.",
5388 (_cur_alloc_region == NULL ? 0 : 1));
5389 gclog_or_tty->print_cr("H regions: %d.", _num_humongous_regions);
5391 // TODO: check regions accounting for young/survivor/tenured
5392 return true;
5393 }
5395 bool G1CollectedHeap::is_in_closed_subset(const void* p) const {
5396 HeapRegion* hr = heap_region_containing(p);
5397 if (hr == NULL) {
5398 return is_in_permanent(p);
5399 } else {
5400 return hr->is_in(p);
5401 }
5402 }
5403 #endif // PRODUCT
5405 void G1CollectedHeap::g1_unimplemented() {
5406 // Unimplemented();
5407 }
5410 // Local Variables: ***
5411 // c-indentation-style: gnu ***
5412 // End: ***