Fri, 06 Mar 2009 13:50:14 -0800
6720309: G1: don't synchronously update RSet during evacuation pauses
6720334: G1: don't update RSets of collection set regions during an evacuation pause
Summary: Introduced a deferred update mechanism for delaying the rset updates during the collection pause
Reviewed-by: apetrusenko, tonyp
1 /*
2 * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
25 #include "incls/_precompiled.incl"
26 #include "incls/_g1CollectedHeap.cpp.incl"
28 // turn it on so that the contents of the young list (scan-only /
29 // to-be-collected) are printed at "strategic" points before / during
30 // / after the collection --- this is useful for debugging
31 #define SCAN_ONLY_VERBOSE 0
32 // CURRENT STATUS
33 // This file is under construction. Search for "FIXME".
35 // INVARIANTS/NOTES
36 //
37 // All allocation activity covered by the G1CollectedHeap interface is
38 // serialized by acquiring the HeapLock. This happens in
39 // mem_allocate_work, which all such allocation functions call.
40 // (Note that this does not apply to TLAB allocation, which is not part
41 // of this interface: it is done by clients of this interface.)
43 // Local to this file.
45 // Finds the first HeapRegion.
46 // No longer used, but might be handy someday.
48 class FindFirstRegionClosure: public HeapRegionClosure {
49 HeapRegion* _a_region;
50 public:
51 FindFirstRegionClosure() : _a_region(NULL) {}
52 bool doHeapRegion(HeapRegion* r) {
53 _a_region = r;
54 return true;
55 }
56 HeapRegion* result() { return _a_region; }
57 };
60 class RefineCardTableEntryClosure: public CardTableEntryClosure {
61 SuspendibleThreadSet* _sts;
62 G1RemSet* _g1rs;
63 ConcurrentG1Refine* _cg1r;
64 bool _concurrent;
65 public:
66 RefineCardTableEntryClosure(SuspendibleThreadSet* sts,
67 G1RemSet* g1rs,
68 ConcurrentG1Refine* cg1r) :
69 _sts(sts), _g1rs(g1rs), _cg1r(cg1r), _concurrent(true)
70 {}
71 bool do_card_ptr(jbyte* card_ptr, int worker_i) {
72 _g1rs->concurrentRefineOneCard(card_ptr, worker_i);
73 if (_concurrent && _sts->should_yield()) {
74 // Caller will actually yield.
75 return false;
76 }
77 // Otherwise, we finished successfully; return true.
78 return true;
79 }
80 void set_concurrent(bool b) { _concurrent = b; }
81 };
84 class ClearLoggedCardTableEntryClosure: public CardTableEntryClosure {
85 int _calls;
86 G1CollectedHeap* _g1h;
87 CardTableModRefBS* _ctbs;
88 int _histo[256];
89 public:
90 ClearLoggedCardTableEntryClosure() :
91 _calls(0)
92 {
93 _g1h = G1CollectedHeap::heap();
94 _ctbs = (CardTableModRefBS*)_g1h->barrier_set();
95 for (int i = 0; i < 256; i++) _histo[i] = 0;
96 }
97 bool do_card_ptr(jbyte* card_ptr, int worker_i) {
98 if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) {
99 _calls++;
100 unsigned char* ujb = (unsigned char*)card_ptr;
101 int ind = (int)(*ujb);
102 _histo[ind]++;
103 *card_ptr = -1;
104 }
105 return true;
106 }
107 int calls() { return _calls; }
108 void print_histo() {
109 gclog_or_tty->print_cr("Card table value histogram:");
110 for (int i = 0; i < 256; i++) {
111 if (_histo[i] != 0) {
112 gclog_or_tty->print_cr(" %d: %d", i, _histo[i]);
113 }
114 }
115 }
116 };
118 class RedirtyLoggedCardTableEntryClosure: public CardTableEntryClosure {
119 int _calls;
120 G1CollectedHeap* _g1h;
121 CardTableModRefBS* _ctbs;
122 public:
123 RedirtyLoggedCardTableEntryClosure() :
124 _calls(0)
125 {
126 _g1h = G1CollectedHeap::heap();
127 _ctbs = (CardTableModRefBS*)_g1h->barrier_set();
128 }
129 bool do_card_ptr(jbyte* card_ptr, int worker_i) {
130 if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) {
131 _calls++;
132 *card_ptr = 0;
133 }
134 return true;
135 }
136 int calls() { return _calls; }
137 };
139 class RedirtyLoggedCardTableEntryFastClosure : public CardTableEntryClosure {
140 public:
141 bool do_card_ptr(jbyte* card_ptr, int worker_i) {
142 *card_ptr = CardTableModRefBS::dirty_card_val();
143 return true;
144 }
145 };
147 YoungList::YoungList(G1CollectedHeap* g1h)
148 : _g1h(g1h), _head(NULL),
149 _scan_only_head(NULL), _scan_only_tail(NULL), _curr_scan_only(NULL),
150 _length(0), _scan_only_length(0),
151 _last_sampled_rs_lengths(0),
152 _survivor_head(NULL), _survivor_tail(NULL), _survivor_length(0)
153 {
154 guarantee( check_list_empty(false), "just making sure..." );
155 }
157 void YoungList::push_region(HeapRegion *hr) {
158 assert(!hr->is_young(), "should not already be young");
159 assert(hr->get_next_young_region() == NULL, "cause it should!");
161 hr->set_next_young_region(_head);
162 _head = hr;
164 hr->set_young();
165 double yg_surv_rate = _g1h->g1_policy()->predict_yg_surv_rate((int)_length);
166 ++_length;
167 }
169 void YoungList::add_survivor_region(HeapRegion* hr) {
170 assert(hr->is_survivor(), "should be flagged as survivor region");
171 assert(hr->get_next_young_region() == NULL, "cause it should!");
173 hr->set_next_young_region(_survivor_head);
174 if (_survivor_head == NULL) {
175 _survivor_tail = hr;
176 }
177 _survivor_head = hr;
179 ++_survivor_length;
180 }
182 HeapRegion* YoungList::pop_region() {
183 while (_head != NULL) {
184 assert( length() > 0, "list should not be empty" );
185 HeapRegion* ret = _head;
186 _head = ret->get_next_young_region();
187 ret->set_next_young_region(NULL);
188 --_length;
189 assert(ret->is_young(), "region should be very young");
191 // Replace 'Survivor' region type with 'Young'. So the region will
192 // be treated as a young region and will not be 'confused' with
193 // newly created survivor regions.
194 if (ret->is_survivor()) {
195 ret->set_young();
196 }
198 if (!ret->is_scan_only()) {
199 return ret;
200 }
202 // scan-only, we'll add it to the scan-only list
203 if (_scan_only_tail == NULL) {
204 guarantee( _scan_only_head == NULL, "invariant" );
206 _scan_only_head = ret;
207 _curr_scan_only = ret;
208 } else {
209 guarantee( _scan_only_head != NULL, "invariant" );
210 _scan_only_tail->set_next_young_region(ret);
211 }
212 guarantee( ret->get_next_young_region() == NULL, "invariant" );
213 _scan_only_tail = ret;
215 // no need to be tagged as scan-only any more
216 ret->set_young();
218 ++_scan_only_length;
219 }
220 assert( length() == 0, "list should be empty" );
221 return NULL;
222 }
224 void YoungList::empty_list(HeapRegion* list) {
225 while (list != NULL) {
226 HeapRegion* next = list->get_next_young_region();
227 list->set_next_young_region(NULL);
228 list->uninstall_surv_rate_group();
229 list->set_not_young();
230 list = next;
231 }
232 }
234 void YoungList::empty_list() {
235 assert(check_list_well_formed(), "young list should be well formed");
237 empty_list(_head);
238 _head = NULL;
239 _length = 0;
241 empty_list(_scan_only_head);
242 _scan_only_head = NULL;
243 _scan_only_tail = NULL;
244 _scan_only_length = 0;
245 _curr_scan_only = NULL;
247 empty_list(_survivor_head);
248 _survivor_head = NULL;
249 _survivor_tail = NULL;
250 _survivor_length = 0;
252 _last_sampled_rs_lengths = 0;
254 assert(check_list_empty(false), "just making sure...");
255 }
257 bool YoungList::check_list_well_formed() {
258 bool ret = true;
260 size_t length = 0;
261 HeapRegion* curr = _head;
262 HeapRegion* last = NULL;
263 while (curr != NULL) {
264 if (!curr->is_young() || curr->is_scan_only()) {
265 gclog_or_tty->print_cr("### YOUNG REGION "PTR_FORMAT"-"PTR_FORMAT" "
266 "incorrectly tagged (%d, %d)",
267 curr->bottom(), curr->end(),
268 curr->is_young(), curr->is_scan_only());
269 ret = false;
270 }
271 ++length;
272 last = curr;
273 curr = curr->get_next_young_region();
274 }
275 ret = ret && (length == _length);
277 if (!ret) {
278 gclog_or_tty->print_cr("### YOUNG LIST seems not well formed!");
279 gclog_or_tty->print_cr("### list has %d entries, _length is %d",
280 length, _length);
281 }
283 bool scan_only_ret = true;
284 length = 0;
285 curr = _scan_only_head;
286 last = NULL;
287 while (curr != NULL) {
288 if (!curr->is_young() || curr->is_scan_only()) {
289 gclog_or_tty->print_cr("### SCAN-ONLY REGION "PTR_FORMAT"-"PTR_FORMAT" "
290 "incorrectly tagged (%d, %d)",
291 curr->bottom(), curr->end(),
292 curr->is_young(), curr->is_scan_only());
293 scan_only_ret = false;
294 }
295 ++length;
296 last = curr;
297 curr = curr->get_next_young_region();
298 }
299 scan_only_ret = scan_only_ret && (length == _scan_only_length);
301 if ( (last != _scan_only_tail) ||
302 (_scan_only_head == NULL && _scan_only_tail != NULL) ||
303 (_scan_only_head != NULL && _scan_only_tail == NULL) ) {
304 gclog_or_tty->print_cr("## _scan_only_tail is set incorrectly");
305 scan_only_ret = false;
306 }
308 if (_curr_scan_only != NULL && _curr_scan_only != _scan_only_head) {
309 gclog_or_tty->print_cr("### _curr_scan_only is set incorrectly");
310 scan_only_ret = false;
311 }
313 if (!scan_only_ret) {
314 gclog_or_tty->print_cr("### SCAN-ONLY LIST seems not well formed!");
315 gclog_or_tty->print_cr("### list has %d entries, _scan_only_length is %d",
316 length, _scan_only_length);
317 }
319 return ret && scan_only_ret;
320 }
322 bool YoungList::check_list_empty(bool ignore_scan_only_list,
323 bool check_sample) {
324 bool ret = true;
326 if (_length != 0) {
327 gclog_or_tty->print_cr("### YOUNG LIST should have 0 length, not %d",
328 _length);
329 ret = false;
330 }
331 if (check_sample && _last_sampled_rs_lengths != 0) {
332 gclog_or_tty->print_cr("### YOUNG LIST has non-zero last sampled RS lengths");
333 ret = false;
334 }
335 if (_head != NULL) {
336 gclog_or_tty->print_cr("### YOUNG LIST does not have a NULL head");
337 ret = false;
338 }
339 if (!ret) {
340 gclog_or_tty->print_cr("### YOUNG LIST does not seem empty");
341 }
343 if (ignore_scan_only_list)
344 return ret;
346 bool scan_only_ret = true;
347 if (_scan_only_length != 0) {
348 gclog_or_tty->print_cr("### SCAN-ONLY LIST should have 0 length, not %d",
349 _scan_only_length);
350 scan_only_ret = false;
351 }
352 if (_scan_only_head != NULL) {
353 gclog_or_tty->print_cr("### SCAN-ONLY LIST does not have a NULL head");
354 scan_only_ret = false;
355 }
356 if (_scan_only_tail != NULL) {
357 gclog_or_tty->print_cr("### SCAN-ONLY LIST does not have a NULL tail");
358 scan_only_ret = false;
359 }
360 if (!scan_only_ret) {
361 gclog_or_tty->print_cr("### SCAN-ONLY LIST does not seem empty");
362 }
364 return ret && scan_only_ret;
365 }
367 void
368 YoungList::rs_length_sampling_init() {
369 _sampled_rs_lengths = 0;
370 _curr = _head;
371 }
373 bool
374 YoungList::rs_length_sampling_more() {
375 return _curr != NULL;
376 }
378 void
379 YoungList::rs_length_sampling_next() {
380 assert( _curr != NULL, "invariant" );
381 _sampled_rs_lengths += _curr->rem_set()->occupied();
382 _curr = _curr->get_next_young_region();
383 if (_curr == NULL) {
384 _last_sampled_rs_lengths = _sampled_rs_lengths;
385 // gclog_or_tty->print_cr("last sampled RS lengths = %d", _last_sampled_rs_lengths);
386 }
387 }
389 void
390 YoungList::reset_auxilary_lists() {
391 // We could have just "moved" the scan-only list to the young list.
392 // However, the scan-only list is ordered according to the region
393 // age in descending order, so, by moving one entry at a time, we
394 // ensure that it is recreated in ascending order.
396 guarantee( is_empty(), "young list should be empty" );
397 assert(check_list_well_formed(), "young list should be well formed");
399 // Add survivor regions to SurvRateGroup.
400 _g1h->g1_policy()->note_start_adding_survivor_regions();
401 _g1h->g1_policy()->finished_recalculating_age_indexes(true /* is_survivors */);
402 for (HeapRegion* curr = _survivor_head;
403 curr != NULL;
404 curr = curr->get_next_young_region()) {
405 _g1h->g1_policy()->set_region_survivors(curr);
406 }
407 _g1h->g1_policy()->note_stop_adding_survivor_regions();
409 if (_survivor_head != NULL) {
410 _head = _survivor_head;
411 _length = _survivor_length + _scan_only_length;
412 _survivor_tail->set_next_young_region(_scan_only_head);
413 } else {
414 _head = _scan_only_head;
415 _length = _scan_only_length;
416 }
418 for (HeapRegion* curr = _scan_only_head;
419 curr != NULL;
420 curr = curr->get_next_young_region()) {
421 curr->recalculate_age_in_surv_rate_group();
422 }
423 _scan_only_head = NULL;
424 _scan_only_tail = NULL;
425 _scan_only_length = 0;
426 _curr_scan_only = NULL;
428 _survivor_head = NULL;
429 _survivor_tail = NULL;
430 _survivor_length = 0;
431 _g1h->g1_policy()->finished_recalculating_age_indexes(false /* is_survivors */);
433 assert(check_list_well_formed(), "young list should be well formed");
434 }
436 void YoungList::print() {
437 HeapRegion* lists[] = {_head, _scan_only_head, _survivor_head};
438 const char* names[] = {"YOUNG", "SCAN-ONLY", "SURVIVOR"};
440 for (unsigned int list = 0; list < ARRAY_SIZE(lists); ++list) {
441 gclog_or_tty->print_cr("%s LIST CONTENTS", names[list]);
442 HeapRegion *curr = lists[list];
443 if (curr == NULL)
444 gclog_or_tty->print_cr(" empty");
445 while (curr != NULL) {
446 gclog_or_tty->print_cr(" [%08x-%08x], t: %08x, P: %08x, N: %08x, C: %08x, "
447 "age: %4d, y: %d, s-o: %d, surv: %d",
448 curr->bottom(), curr->end(),
449 curr->top(),
450 curr->prev_top_at_mark_start(),
451 curr->next_top_at_mark_start(),
452 curr->top_at_conc_mark_count(),
453 curr->age_in_surv_rate_group_cond(),
454 curr->is_young(),
455 curr->is_scan_only(),
456 curr->is_survivor());
457 curr = curr->get_next_young_region();
458 }
459 }
461 gclog_or_tty->print_cr("");
462 }
464 void G1CollectedHeap::stop_conc_gc_threads() {
465 _cg1r->cg1rThread()->stop();
466 _czft->stop();
467 _cmThread->stop();
468 }
471 void G1CollectedHeap::check_ct_logs_at_safepoint() {
472 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
473 CardTableModRefBS* ct_bs = (CardTableModRefBS*)barrier_set();
475 // Count the dirty cards at the start.
476 CountNonCleanMemRegionClosure count1(this);
477 ct_bs->mod_card_iterate(&count1);
478 int orig_count = count1.n();
480 // First clear the logged cards.
481 ClearLoggedCardTableEntryClosure clear;
482 dcqs.set_closure(&clear);
483 dcqs.apply_closure_to_all_completed_buffers();
484 dcqs.iterate_closure_all_threads(false);
485 clear.print_histo();
487 // Now ensure that there's no dirty cards.
488 CountNonCleanMemRegionClosure count2(this);
489 ct_bs->mod_card_iterate(&count2);
490 if (count2.n() != 0) {
491 gclog_or_tty->print_cr("Card table has %d entries; %d originally",
492 count2.n(), orig_count);
493 }
494 guarantee(count2.n() == 0, "Card table should be clean.");
496 RedirtyLoggedCardTableEntryClosure redirty;
497 JavaThread::dirty_card_queue_set().set_closure(&redirty);
498 dcqs.apply_closure_to_all_completed_buffers();
499 dcqs.iterate_closure_all_threads(false);
500 gclog_or_tty->print_cr("Log entries = %d, dirty cards = %d.",
501 clear.calls(), orig_count);
502 guarantee(redirty.calls() == clear.calls(),
503 "Or else mechanism is broken.");
505 CountNonCleanMemRegionClosure count3(this);
506 ct_bs->mod_card_iterate(&count3);
507 if (count3.n() != orig_count) {
508 gclog_or_tty->print_cr("Should have restored them all: orig = %d, final = %d.",
509 orig_count, count3.n());
510 guarantee(count3.n() >= orig_count, "Should have restored them all.");
511 }
513 JavaThread::dirty_card_queue_set().set_closure(_refine_cte_cl);
514 }
516 // Private class members.
518 G1CollectedHeap* G1CollectedHeap::_g1h;
520 // Private methods.
522 // Finds a HeapRegion that can be used to allocate a given size of block.
525 HeapRegion* G1CollectedHeap::newAllocRegion_work(size_t word_size,
526 bool do_expand,
527 bool zero_filled) {
528 ConcurrentZFThread::note_region_alloc();
529 HeapRegion* res = alloc_free_region_from_lists(zero_filled);
530 if (res == NULL && do_expand) {
531 expand(word_size * HeapWordSize);
532 res = alloc_free_region_from_lists(zero_filled);
533 assert(res == NULL ||
534 (!res->isHumongous() &&
535 (!zero_filled ||
536 res->zero_fill_state() == HeapRegion::Allocated)),
537 "Alloc Regions must be zero filled (and non-H)");
538 }
539 if (res != NULL && res->is_empty()) _free_regions--;
540 assert(res == NULL ||
541 (!res->isHumongous() &&
542 (!zero_filled ||
543 res->zero_fill_state() == HeapRegion::Allocated)),
544 "Non-young alloc Regions must be zero filled (and non-H)");
546 if (G1TraceRegions) {
547 if (res != NULL) {
548 gclog_or_tty->print_cr("new alloc region %d:["PTR_FORMAT", "PTR_FORMAT"], "
549 "top "PTR_FORMAT,
550 res->hrs_index(), res->bottom(), res->end(), res->top());
551 }
552 }
554 return res;
555 }
557 HeapRegion* G1CollectedHeap::newAllocRegionWithExpansion(int purpose,
558 size_t word_size,
559 bool zero_filled) {
560 HeapRegion* alloc_region = NULL;
561 if (_gc_alloc_region_counts[purpose] < g1_policy()->max_regions(purpose)) {
562 alloc_region = newAllocRegion_work(word_size, true, zero_filled);
563 if (purpose == GCAllocForSurvived && alloc_region != NULL) {
564 alloc_region->set_survivor();
565 }
566 ++_gc_alloc_region_counts[purpose];
567 } else {
568 g1_policy()->note_alloc_region_limit_reached(purpose);
569 }
570 return alloc_region;
571 }
573 // If could fit into free regions w/o expansion, try.
574 // Otherwise, if can expand, do so.
575 // Otherwise, if using ex regions might help, try with ex given back.
576 HeapWord* G1CollectedHeap::humongousObjAllocate(size_t word_size) {
577 assert(regions_accounted_for(), "Region leakage!");
579 // We can't allocate H regions while cleanupComplete is running, since
580 // some of the regions we find to be empty might not yet be added to the
581 // unclean list. (If we're already at a safepoint, this call is
582 // unnecessary, not to mention wrong.)
583 if (!SafepointSynchronize::is_at_safepoint())
584 wait_for_cleanup_complete();
586 size_t num_regions =
587 round_to(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords;
589 // Special case if < one region???
591 // Remember the ft size.
592 size_t x_size = expansion_regions();
594 HeapWord* res = NULL;
595 bool eliminated_allocated_from_lists = false;
597 // Can the allocation potentially fit in the free regions?
598 if (free_regions() >= num_regions) {
599 res = _hrs->obj_allocate(word_size);
600 }
601 if (res == NULL) {
602 // Try expansion.
603 size_t fs = _hrs->free_suffix();
604 if (fs + x_size >= num_regions) {
605 expand((num_regions - fs) * HeapRegion::GrainBytes);
606 res = _hrs->obj_allocate(word_size);
607 assert(res != NULL, "This should have worked.");
608 } else {
609 // Expansion won't help. Are there enough free regions if we get rid
610 // of reservations?
611 size_t avail = free_regions();
612 if (avail >= num_regions) {
613 res = _hrs->obj_allocate(word_size);
614 if (res != NULL) {
615 remove_allocated_regions_from_lists();
616 eliminated_allocated_from_lists = true;
617 }
618 }
619 }
620 }
621 if (res != NULL) {
622 // Increment by the number of regions allocated.
623 // FIXME: Assumes regions all of size GrainBytes.
624 #ifndef PRODUCT
625 mr_bs()->verify_clean_region(MemRegion(res, res + num_regions *
626 HeapRegion::GrainWords));
627 #endif
628 if (!eliminated_allocated_from_lists)
629 remove_allocated_regions_from_lists();
630 _summary_bytes_used += word_size * HeapWordSize;
631 _free_regions -= num_regions;
632 _num_humongous_regions += (int) num_regions;
633 }
634 assert(regions_accounted_for(), "Region Leakage");
635 return res;
636 }
638 HeapWord*
639 G1CollectedHeap::attempt_allocation_slow(size_t word_size,
640 bool permit_collection_pause) {
641 HeapWord* res = NULL;
642 HeapRegion* allocated_young_region = NULL;
644 assert( SafepointSynchronize::is_at_safepoint() ||
645 Heap_lock->owned_by_self(), "pre condition of the call" );
647 if (isHumongous(word_size)) {
648 // Allocation of a humongous object can, in a sense, complete a
649 // partial region, if the previous alloc was also humongous, and
650 // caused the test below to succeed.
651 if (permit_collection_pause)
652 do_collection_pause_if_appropriate(word_size);
653 res = humongousObjAllocate(word_size);
654 assert(_cur_alloc_region == NULL
655 || !_cur_alloc_region->isHumongous(),
656 "Prevent a regression of this bug.");
658 } else {
659 // We may have concurrent cleanup working at the time. Wait for it
660 // to complete. In the future we would probably want to make the
661 // concurrent cleanup truly concurrent by decoupling it from the
662 // allocation.
663 if (!SafepointSynchronize::is_at_safepoint())
664 wait_for_cleanup_complete();
665 // If we do a collection pause, this will be reset to a non-NULL
666 // value. If we don't, nulling here ensures that we allocate a new
667 // region below.
668 if (_cur_alloc_region != NULL) {
669 // We're finished with the _cur_alloc_region.
670 _summary_bytes_used += _cur_alloc_region->used();
671 _cur_alloc_region = NULL;
672 }
673 assert(_cur_alloc_region == NULL, "Invariant.");
674 // Completion of a heap region is perhaps a good point at which to do
675 // a collection pause.
676 if (permit_collection_pause)
677 do_collection_pause_if_appropriate(word_size);
678 // Make sure we have an allocation region available.
679 if (_cur_alloc_region == NULL) {
680 if (!SafepointSynchronize::is_at_safepoint())
681 wait_for_cleanup_complete();
682 bool next_is_young = should_set_young_locked();
683 // If the next region is not young, make sure it's zero-filled.
684 _cur_alloc_region = newAllocRegion(word_size, !next_is_young);
685 if (_cur_alloc_region != NULL) {
686 _summary_bytes_used -= _cur_alloc_region->used();
687 if (next_is_young) {
688 set_region_short_lived_locked(_cur_alloc_region);
689 allocated_young_region = _cur_alloc_region;
690 }
691 }
692 }
693 assert(_cur_alloc_region == NULL || !_cur_alloc_region->isHumongous(),
694 "Prevent a regression of this bug.");
696 // Now retry the allocation.
697 if (_cur_alloc_region != NULL) {
698 res = _cur_alloc_region->allocate(word_size);
699 }
700 }
702 // NOTE: fails frequently in PRT
703 assert(regions_accounted_for(), "Region leakage!");
705 if (res != NULL) {
706 if (!SafepointSynchronize::is_at_safepoint()) {
707 assert( permit_collection_pause, "invariant" );
708 assert( Heap_lock->owned_by_self(), "invariant" );
709 Heap_lock->unlock();
710 }
712 if (allocated_young_region != NULL) {
713 HeapRegion* hr = allocated_young_region;
714 HeapWord* bottom = hr->bottom();
715 HeapWord* end = hr->end();
716 MemRegion mr(bottom, end);
717 ((CardTableModRefBS*)_g1h->barrier_set())->dirty(mr);
718 }
719 }
721 assert( SafepointSynchronize::is_at_safepoint() ||
722 (res == NULL && Heap_lock->owned_by_self()) ||
723 (res != NULL && !Heap_lock->owned_by_self()),
724 "post condition of the call" );
726 return res;
727 }
729 HeapWord*
730 G1CollectedHeap::mem_allocate(size_t word_size,
731 bool is_noref,
732 bool is_tlab,
733 bool* gc_overhead_limit_was_exceeded) {
734 debug_only(check_for_valid_allocation_state());
735 assert(no_gc_in_progress(), "Allocation during gc not allowed");
736 HeapWord* result = NULL;
738 // Loop until the allocation is satisified,
739 // or unsatisfied after GC.
740 for (int try_count = 1; /* return or throw */; try_count += 1) {
741 int gc_count_before;
742 {
743 Heap_lock->lock();
744 result = attempt_allocation(word_size);
745 if (result != NULL) {
746 // attempt_allocation should have unlocked the heap lock
747 assert(is_in(result), "result not in heap");
748 return result;
749 }
750 // Read the gc count while the heap lock is held.
751 gc_count_before = SharedHeap::heap()->total_collections();
752 Heap_lock->unlock();
753 }
755 // Create the garbage collection operation...
756 VM_G1CollectForAllocation op(word_size,
757 gc_count_before);
759 // ...and get the VM thread to execute it.
760 VMThread::execute(&op);
761 if (op.prologue_succeeded()) {
762 result = op.result();
763 assert(result == NULL || is_in(result), "result not in heap");
764 return result;
765 }
767 // Give a warning if we seem to be looping forever.
768 if ((QueuedAllocationWarningCount > 0) &&
769 (try_count % QueuedAllocationWarningCount == 0)) {
770 warning("G1CollectedHeap::mem_allocate_work retries %d times",
771 try_count);
772 }
773 }
774 }
776 void G1CollectedHeap::abandon_cur_alloc_region() {
777 if (_cur_alloc_region != NULL) {
778 // We're finished with the _cur_alloc_region.
779 if (_cur_alloc_region->is_empty()) {
780 _free_regions++;
781 free_region(_cur_alloc_region);
782 } else {
783 _summary_bytes_used += _cur_alloc_region->used();
784 }
785 _cur_alloc_region = NULL;
786 }
787 }
789 class PostMCRemSetClearClosure: public HeapRegionClosure {
790 ModRefBarrierSet* _mr_bs;
791 public:
792 PostMCRemSetClearClosure(ModRefBarrierSet* mr_bs) : _mr_bs(mr_bs) {}
793 bool doHeapRegion(HeapRegion* r) {
794 r->reset_gc_time_stamp();
795 if (r->continuesHumongous())
796 return false;
797 HeapRegionRemSet* hrrs = r->rem_set();
798 if (hrrs != NULL) hrrs->clear();
799 // You might think here that we could clear just the cards
800 // corresponding to the used region. But no: if we leave a dirty card
801 // in a region we might allocate into, then it would prevent that card
802 // from being enqueued, and cause it to be missed.
803 // Re: the performance cost: we shouldn't be doing full GC anyway!
804 _mr_bs->clear(MemRegion(r->bottom(), r->end()));
805 return false;
806 }
807 };
810 class PostMCRemSetInvalidateClosure: public HeapRegionClosure {
811 ModRefBarrierSet* _mr_bs;
812 public:
813 PostMCRemSetInvalidateClosure(ModRefBarrierSet* mr_bs) : _mr_bs(mr_bs) {}
814 bool doHeapRegion(HeapRegion* r) {
815 if (r->continuesHumongous()) return false;
816 if (r->used_region().word_size() != 0) {
817 _mr_bs->invalidate(r->used_region(), true /*whole heap*/);
818 }
819 return false;
820 }
821 };
823 void G1CollectedHeap::do_collection(bool full, bool clear_all_soft_refs,
824 size_t word_size) {
825 ResourceMark rm;
827 if (full && DisableExplicitGC) {
828 gclog_or_tty->print("\n\n\nDisabling Explicit GC\n\n\n");
829 return;
830 }
832 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
833 assert(Thread::current() == VMThread::vm_thread(), "should be in vm thread");
835 if (GC_locker::is_active()) {
836 return; // GC is disabled (e.g. JNI GetXXXCritical operation)
837 }
839 {
840 IsGCActiveMark x;
842 // Timing
843 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
844 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
845 TraceTime t(full ? "Full GC (System.gc())" : "Full GC", PrintGC, true, gclog_or_tty);
847 double start = os::elapsedTime();
848 GCOverheadReporter::recordSTWStart(start);
849 g1_policy()->record_full_collection_start();
851 gc_prologue(true);
852 increment_total_collections();
854 size_t g1h_prev_used = used();
855 assert(used() == recalculate_used(), "Should be equal");
857 if (VerifyBeforeGC && total_collections() >= VerifyGCStartAt) {
858 HandleMark hm; // Discard invalid handles created during verification
859 prepare_for_verify();
860 gclog_or_tty->print(" VerifyBeforeGC:");
861 Universe::verify(true);
862 }
863 assert(regions_accounted_for(), "Region leakage!");
865 COMPILER2_PRESENT(DerivedPointerTable::clear());
867 // We want to discover references, but not process them yet.
868 // This mode is disabled in
869 // instanceRefKlass::process_discovered_references if the
870 // generation does some collection work, or
871 // instanceRefKlass::enqueue_discovered_references if the
872 // generation returns without doing any work.
873 ref_processor()->disable_discovery();
874 ref_processor()->abandon_partial_discovery();
875 ref_processor()->verify_no_references_recorded();
877 // Abandon current iterations of concurrent marking and concurrent
878 // refinement, if any are in progress.
879 concurrent_mark()->abort();
881 // Make sure we'll choose a new allocation region afterwards.
882 abandon_cur_alloc_region();
883 assert(_cur_alloc_region == NULL, "Invariant.");
884 g1_rem_set()->as_HRInto_G1RemSet()->cleanupHRRS();
885 tear_down_region_lists();
886 set_used_regions_to_need_zero_fill();
887 if (g1_policy()->in_young_gc_mode()) {
888 empty_young_list();
889 g1_policy()->set_full_young_gcs(true);
890 }
892 // Temporarily make reference _discovery_ single threaded (non-MT).
893 ReferenceProcessorMTMutator rp_disc_ser(ref_processor(), false);
895 // Temporarily make refs discovery atomic
896 ReferenceProcessorAtomicMutator rp_disc_atomic(ref_processor(), true);
898 // Temporarily clear _is_alive_non_header
899 ReferenceProcessorIsAliveMutator rp_is_alive_null(ref_processor(), NULL);
901 ref_processor()->enable_discovery();
902 ref_processor()->setup_policy(clear_all_soft_refs);
904 // Do collection work
905 {
906 HandleMark hm; // Discard invalid handles created during gc
907 G1MarkSweep::invoke_at_safepoint(ref_processor(), clear_all_soft_refs);
908 }
909 // Because freeing humongous regions may have added some unclean
910 // regions, it is necessary to tear down again before rebuilding.
911 tear_down_region_lists();
912 rebuild_region_lists();
914 _summary_bytes_used = recalculate_used();
916 ref_processor()->enqueue_discovered_references();
918 COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
920 if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) {
921 HandleMark hm; // Discard invalid handles created during verification
922 gclog_or_tty->print(" VerifyAfterGC:");
923 Universe::verify(false);
924 }
925 NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
927 reset_gc_time_stamp();
928 // Since everything potentially moved, we will clear all remembered
929 // sets, and clear all cards. Later we will also cards in the used
930 // portion of the heap after the resizing (which could be a shrinking.)
931 // We will also reset the GC time stamps of the regions.
932 PostMCRemSetClearClosure rs_clear(mr_bs());
933 heap_region_iterate(&rs_clear);
935 // Resize the heap if necessary.
936 resize_if_necessary_after_full_collection(full ? 0 : word_size);
938 // Since everything potentially moved, we will clear all remembered
939 // sets, but also dirty all cards corresponding to used regions.
940 PostMCRemSetInvalidateClosure rs_invalidate(mr_bs());
941 heap_region_iterate(&rs_invalidate);
942 if (_cg1r->use_cache()) {
943 _cg1r->clear_and_record_card_counts();
944 _cg1r->clear_hot_cache();
945 }
947 if (PrintGC) {
948 print_size_transition(gclog_or_tty, g1h_prev_used, used(), capacity());
949 }
951 if (true) { // FIXME
952 // Ask the permanent generation to adjust size for full collections
953 perm()->compute_new_size();
954 }
956 double end = os::elapsedTime();
957 GCOverheadReporter::recordSTWEnd(end);
958 g1_policy()->record_full_collection_end();
960 #ifdef TRACESPINNING
961 ParallelTaskTerminator::print_termination_counts();
962 #endif
964 gc_epilogue(true);
966 // Abandon concurrent refinement. This must happen last: in the
967 // dirty-card logging system, some cards may be dirty by weak-ref
968 // processing, and may be enqueued. But the whole card table is
969 // dirtied, so this should abandon those logs, and set "do_traversal"
970 // to true.
971 concurrent_g1_refine()->set_pya_restart();
972 assert(!G1DeferredRSUpdate
973 || (G1DeferredRSUpdate && (dirty_card_queue_set().completed_buffers_num() == 0)), "Should not be any");
974 assert(regions_accounted_for(), "Region leakage!");
975 }
977 if (g1_policy()->in_young_gc_mode()) {
978 _young_list->reset_sampled_info();
979 assert( check_young_list_empty(false, false),
980 "young list should be empty at this point");
981 }
982 }
984 void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) {
985 do_collection(true, clear_all_soft_refs, 0);
986 }
988 // This code is mostly copied from TenuredGeneration.
989 void
990 G1CollectedHeap::
991 resize_if_necessary_after_full_collection(size_t word_size) {
992 assert(MinHeapFreeRatio <= MaxHeapFreeRatio, "sanity check");
994 // Include the current allocation, if any, and bytes that will be
995 // pre-allocated to support collections, as "used".
996 const size_t used_after_gc = used();
997 const size_t capacity_after_gc = capacity();
998 const size_t free_after_gc = capacity_after_gc - used_after_gc;
1000 // We don't have floating point command-line arguments
1001 const double minimum_free_percentage = (double) MinHeapFreeRatio / 100;
1002 const double maximum_used_percentage = 1.0 - minimum_free_percentage;
1003 const double maximum_free_percentage = (double) MaxHeapFreeRatio / 100;
1004 const double minimum_used_percentage = 1.0 - maximum_free_percentage;
1006 size_t minimum_desired_capacity = (size_t) (used_after_gc / maximum_used_percentage);
1007 size_t maximum_desired_capacity = (size_t) (used_after_gc / minimum_used_percentage);
1009 // Don't shrink less than the initial size.
1010 minimum_desired_capacity =
1011 MAX2(minimum_desired_capacity,
1012 collector_policy()->initial_heap_byte_size());
1013 maximum_desired_capacity =
1014 MAX2(maximum_desired_capacity,
1015 collector_policy()->initial_heap_byte_size());
1017 // We are failing here because minimum_desired_capacity is
1018 assert(used_after_gc <= minimum_desired_capacity, "sanity check");
1019 assert(minimum_desired_capacity <= maximum_desired_capacity, "sanity check");
1021 if (PrintGC && Verbose) {
1022 const double free_percentage = ((double)free_after_gc) / capacity();
1023 gclog_or_tty->print_cr("Computing new size after full GC ");
1024 gclog_or_tty->print_cr(" "
1025 " minimum_free_percentage: %6.2f",
1026 minimum_free_percentage);
1027 gclog_or_tty->print_cr(" "
1028 " maximum_free_percentage: %6.2f",
1029 maximum_free_percentage);
1030 gclog_or_tty->print_cr(" "
1031 " capacity: %6.1fK"
1032 " minimum_desired_capacity: %6.1fK"
1033 " maximum_desired_capacity: %6.1fK",
1034 capacity() / (double) K,
1035 minimum_desired_capacity / (double) K,
1036 maximum_desired_capacity / (double) K);
1037 gclog_or_tty->print_cr(" "
1038 " free_after_gc : %6.1fK"
1039 " used_after_gc : %6.1fK",
1040 free_after_gc / (double) K,
1041 used_after_gc / (double) K);
1042 gclog_or_tty->print_cr(" "
1043 " free_percentage: %6.2f",
1044 free_percentage);
1045 }
1046 if (capacity() < minimum_desired_capacity) {
1047 // Don't expand unless it's significant
1048 size_t expand_bytes = minimum_desired_capacity - capacity_after_gc;
1049 expand(expand_bytes);
1050 if (PrintGC && Verbose) {
1051 gclog_or_tty->print_cr(" expanding:"
1052 " minimum_desired_capacity: %6.1fK"
1053 " expand_bytes: %6.1fK",
1054 minimum_desired_capacity / (double) K,
1055 expand_bytes / (double) K);
1056 }
1058 // No expansion, now see if we want to shrink
1059 } else if (capacity() > maximum_desired_capacity) {
1060 // Capacity too large, compute shrinking size
1061 size_t shrink_bytes = capacity_after_gc - maximum_desired_capacity;
1062 shrink(shrink_bytes);
1063 if (PrintGC && Verbose) {
1064 gclog_or_tty->print_cr(" "
1065 " shrinking:"
1066 " initSize: %.1fK"
1067 " maximum_desired_capacity: %.1fK",
1068 collector_policy()->initial_heap_byte_size() / (double) K,
1069 maximum_desired_capacity / (double) K);
1070 gclog_or_tty->print_cr(" "
1071 " shrink_bytes: %.1fK",
1072 shrink_bytes / (double) K);
1073 }
1074 }
1075 }
1078 HeapWord*
1079 G1CollectedHeap::satisfy_failed_allocation(size_t word_size) {
1080 HeapWord* result = NULL;
1082 // In a G1 heap, we're supposed to keep allocation from failing by
1083 // incremental pauses. Therefore, at least for now, we'll favor
1084 // expansion over collection. (This might change in the future if we can
1085 // do something smarter than full collection to satisfy a failed alloc.)
1087 result = expand_and_allocate(word_size);
1088 if (result != NULL) {
1089 assert(is_in(result), "result not in heap");
1090 return result;
1091 }
1093 // OK, I guess we have to try collection.
1095 do_collection(false, false, word_size);
1097 result = attempt_allocation(word_size, /*permit_collection_pause*/false);
1099 if (result != NULL) {
1100 assert(is_in(result), "result not in heap");
1101 return result;
1102 }
1104 // Try collecting soft references.
1105 do_collection(false, true, word_size);
1106 result = attempt_allocation(word_size, /*permit_collection_pause*/false);
1107 if (result != NULL) {
1108 assert(is_in(result), "result not in heap");
1109 return result;
1110 }
1112 // What else? We might try synchronous finalization later. If the total
1113 // space available is large enough for the allocation, then a more
1114 // complete compaction phase than we've tried so far might be
1115 // appropriate.
1116 return NULL;
1117 }
1119 // Attempting to expand the heap sufficiently
1120 // to support an allocation of the given "word_size". If
1121 // successful, perform the allocation and return the address of the
1122 // allocated block, or else "NULL".
1124 HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size) {
1125 size_t expand_bytes = word_size * HeapWordSize;
1126 if (expand_bytes < MinHeapDeltaBytes) {
1127 expand_bytes = MinHeapDeltaBytes;
1128 }
1129 expand(expand_bytes);
1130 assert(regions_accounted_for(), "Region leakage!");
1131 HeapWord* result = attempt_allocation(word_size, false /* permit_collection_pause */);
1132 return result;
1133 }
1135 size_t G1CollectedHeap::free_region_if_totally_empty(HeapRegion* hr) {
1136 size_t pre_used = 0;
1137 size_t cleared_h_regions = 0;
1138 size_t freed_regions = 0;
1139 UncleanRegionList local_list;
1140 free_region_if_totally_empty_work(hr, pre_used, cleared_h_regions,
1141 freed_regions, &local_list);
1143 finish_free_region_work(pre_used, cleared_h_regions, freed_regions,
1144 &local_list);
1145 return pre_used;
1146 }
1148 void
1149 G1CollectedHeap::free_region_if_totally_empty_work(HeapRegion* hr,
1150 size_t& pre_used,
1151 size_t& cleared_h,
1152 size_t& freed_regions,
1153 UncleanRegionList* list,
1154 bool par) {
1155 assert(!hr->continuesHumongous(), "should have filtered these out");
1156 size_t res = 0;
1157 if (!hr->popular() && hr->used() > 0 && hr->garbage_bytes() == hr->used()) {
1158 if (!hr->is_young()) {
1159 if (G1PolicyVerbose > 0)
1160 gclog_or_tty->print_cr("Freeing empty region "PTR_FORMAT "(" SIZE_FORMAT " bytes)"
1161 " during cleanup", hr, hr->used());
1162 free_region_work(hr, pre_used, cleared_h, freed_regions, list, par);
1163 }
1164 }
1165 }
1167 // FIXME: both this and shrink could probably be more efficient by
1168 // doing one "VirtualSpace::expand_by" call rather than several.
1169 void G1CollectedHeap::expand(size_t expand_bytes) {
1170 size_t old_mem_size = _g1_storage.committed_size();
1171 // We expand by a minimum of 1K.
1172 expand_bytes = MAX2(expand_bytes, (size_t)K);
1173 size_t aligned_expand_bytes =
1174 ReservedSpace::page_align_size_up(expand_bytes);
1175 aligned_expand_bytes = align_size_up(aligned_expand_bytes,
1176 HeapRegion::GrainBytes);
1177 expand_bytes = aligned_expand_bytes;
1178 while (expand_bytes > 0) {
1179 HeapWord* base = (HeapWord*)_g1_storage.high();
1180 // Commit more storage.
1181 bool successful = _g1_storage.expand_by(HeapRegion::GrainBytes);
1182 if (!successful) {
1183 expand_bytes = 0;
1184 } else {
1185 expand_bytes -= HeapRegion::GrainBytes;
1186 // Expand the committed region.
1187 HeapWord* high = (HeapWord*) _g1_storage.high();
1188 _g1_committed.set_end(high);
1189 // Create a new HeapRegion.
1190 MemRegion mr(base, high);
1191 bool is_zeroed = !_g1_max_committed.contains(base);
1192 HeapRegion* hr = new HeapRegion(_bot_shared, mr, is_zeroed);
1194 // Now update max_committed if necessary.
1195 _g1_max_committed.set_end(MAX2(_g1_max_committed.end(), high));
1197 // Add it to the HeapRegionSeq.
1198 _hrs->insert(hr);
1199 // Set the zero-fill state, according to whether it's already
1200 // zeroed.
1201 {
1202 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
1203 if (is_zeroed) {
1204 hr->set_zero_fill_complete();
1205 put_free_region_on_list_locked(hr);
1206 } else {
1207 hr->set_zero_fill_needed();
1208 put_region_on_unclean_list_locked(hr);
1209 }
1210 }
1211 _free_regions++;
1212 // And we used up an expansion region to create it.
1213 _expansion_regions--;
1214 // Tell the cardtable about it.
1215 Universe::heap()->barrier_set()->resize_covered_region(_g1_committed);
1216 // And the offset table as well.
1217 _bot_shared->resize(_g1_committed.word_size());
1218 }
1219 }
1220 if (Verbose && PrintGC) {
1221 size_t new_mem_size = _g1_storage.committed_size();
1222 gclog_or_tty->print_cr("Expanding garbage-first heap from %ldK by %ldK to %ldK",
1223 old_mem_size/K, aligned_expand_bytes/K,
1224 new_mem_size/K);
1225 }
1226 }
1228 void G1CollectedHeap::shrink_helper(size_t shrink_bytes)
1229 {
1230 size_t old_mem_size = _g1_storage.committed_size();
1231 size_t aligned_shrink_bytes =
1232 ReservedSpace::page_align_size_down(shrink_bytes);
1233 aligned_shrink_bytes = align_size_down(aligned_shrink_bytes,
1234 HeapRegion::GrainBytes);
1235 size_t num_regions_deleted = 0;
1236 MemRegion mr = _hrs->shrink_by(aligned_shrink_bytes, num_regions_deleted);
1238 assert(mr.end() == (HeapWord*)_g1_storage.high(), "Bad shrink!");
1239 if (mr.byte_size() > 0)
1240 _g1_storage.shrink_by(mr.byte_size());
1241 assert(mr.start() == (HeapWord*)_g1_storage.high(), "Bad shrink!");
1243 _g1_committed.set_end(mr.start());
1244 _free_regions -= num_regions_deleted;
1245 _expansion_regions += num_regions_deleted;
1247 // Tell the cardtable about it.
1248 Universe::heap()->barrier_set()->resize_covered_region(_g1_committed);
1250 // And the offset table as well.
1251 _bot_shared->resize(_g1_committed.word_size());
1253 HeapRegionRemSet::shrink_heap(n_regions());
1255 if (Verbose && PrintGC) {
1256 size_t new_mem_size = _g1_storage.committed_size();
1257 gclog_or_tty->print_cr("Shrinking garbage-first heap from %ldK by %ldK to %ldK",
1258 old_mem_size/K, aligned_shrink_bytes/K,
1259 new_mem_size/K);
1260 }
1261 }
1263 void G1CollectedHeap::shrink(size_t shrink_bytes) {
1264 release_gc_alloc_regions();
1265 tear_down_region_lists(); // We will rebuild them in a moment.
1266 shrink_helper(shrink_bytes);
1267 rebuild_region_lists();
1268 }
1270 // Public methods.
1272 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
1273 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
1274 #endif // _MSC_VER
1277 G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
1278 SharedHeap(policy_),
1279 _g1_policy(policy_),
1280 _ref_processor(NULL),
1281 _process_strong_tasks(new SubTasksDone(G1H_PS_NumElements)),
1282 _bot_shared(NULL),
1283 _par_alloc_during_gc_lock(Mutex::leaf, "par alloc during GC lock"),
1284 _objs_with_preserved_marks(NULL), _preserved_marks_of_objs(NULL),
1285 _evac_failure_scan_stack(NULL) ,
1286 _mark_in_progress(false),
1287 _cg1r(NULL), _czft(NULL), _summary_bytes_used(0),
1288 _cur_alloc_region(NULL),
1289 _refine_cte_cl(NULL),
1290 _free_region_list(NULL), _free_region_list_size(0),
1291 _free_regions(0),
1292 _popular_object_boundary(NULL),
1293 _cur_pop_hr_index(0),
1294 _popular_regions_to_be_evacuated(NULL),
1295 _pop_obj_rc_at_copy(),
1296 _full_collection(false),
1297 _unclean_region_list(),
1298 _unclean_regions_coming(false),
1299 _young_list(new YoungList(this)),
1300 _gc_time_stamp(0),
1301 _surviving_young_words(NULL),
1302 _in_cset_fast_test(NULL),
1303 _in_cset_fast_test_base(NULL)
1304 {
1305 _g1h = this; // To catch bugs.
1306 if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) {
1307 vm_exit_during_initialization("Failed necessary allocation.");
1308 }
1309 int n_queues = MAX2((int)ParallelGCThreads, 1);
1310 _task_queues = new RefToScanQueueSet(n_queues);
1312 int n_rem_sets = HeapRegionRemSet::num_par_rem_sets();
1313 assert(n_rem_sets > 0, "Invariant.");
1315 HeapRegionRemSetIterator** iter_arr =
1316 NEW_C_HEAP_ARRAY(HeapRegionRemSetIterator*, n_queues);
1317 for (int i = 0; i < n_queues; i++) {
1318 iter_arr[i] = new HeapRegionRemSetIterator();
1319 }
1320 _rem_set_iterator = iter_arr;
1322 for (int i = 0; i < n_queues; i++) {
1323 RefToScanQueue* q = new RefToScanQueue();
1324 q->initialize();
1325 _task_queues->register_queue(i, q);
1326 }
1328 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
1329 _gc_alloc_regions[ap] = NULL;
1330 _gc_alloc_region_counts[ap] = 0;
1331 }
1332 guarantee(_task_queues != NULL, "task_queues allocation failure.");
1333 }
1335 jint G1CollectedHeap::initialize() {
1336 os::enable_vtime();
1338 // Necessary to satisfy locking discipline assertions.
1340 MutexLocker x(Heap_lock);
1342 // While there are no constraints in the GC code that HeapWordSize
1343 // be any particular value, there are multiple other areas in the
1344 // system which believe this to be true (e.g. oop->object_size in some
1345 // cases incorrectly returns the size in wordSize units rather than
1346 // HeapWordSize).
1347 guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
1349 size_t init_byte_size = collector_policy()->initial_heap_byte_size();
1350 size_t max_byte_size = collector_policy()->max_heap_byte_size();
1352 // Ensure that the sizes are properly aligned.
1353 Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap");
1354 Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap");
1356 // We allocate this in any case, but only do no work if the command line
1357 // param is off.
1358 _cg1r = new ConcurrentG1Refine();
1360 // Reserve the maximum.
1361 PermanentGenerationSpec* pgs = collector_policy()->permanent_generation();
1362 // Includes the perm-gen.
1363 ReservedSpace heap_rs(max_byte_size + pgs->max_size(),
1364 HeapRegion::GrainBytes,
1365 false /*ism*/);
1367 if (!heap_rs.is_reserved()) {
1368 vm_exit_during_initialization("Could not reserve enough space for object heap");
1369 return JNI_ENOMEM;
1370 }
1372 // It is important to do this in a way such that concurrent readers can't
1373 // temporarily think somethings in the heap. (I've actually seen this
1374 // happen in asserts: DLD.)
1375 _reserved.set_word_size(0);
1376 _reserved.set_start((HeapWord*)heap_rs.base());
1377 _reserved.set_end((HeapWord*)(heap_rs.base() + heap_rs.size()));
1379 _expansion_regions = max_byte_size/HeapRegion::GrainBytes;
1381 _num_humongous_regions = 0;
1383 // Create the gen rem set (and barrier set) for the entire reserved region.
1384 _rem_set = collector_policy()->create_rem_set(_reserved, 2);
1385 set_barrier_set(rem_set()->bs());
1386 if (barrier_set()->is_a(BarrierSet::ModRef)) {
1387 _mr_bs = (ModRefBarrierSet*)_barrier_set;
1388 } else {
1389 vm_exit_during_initialization("G1 requires a mod ref bs.");
1390 return JNI_ENOMEM;
1391 }
1393 // Also create a G1 rem set.
1394 if (G1UseHRIntoRS) {
1395 if (mr_bs()->is_a(BarrierSet::CardTableModRef)) {
1396 _g1_rem_set = new HRInto_G1RemSet(this, (CardTableModRefBS*)mr_bs());
1397 } else {
1398 vm_exit_during_initialization("G1 requires a cardtable mod ref bs.");
1399 return JNI_ENOMEM;
1400 }
1401 } else {
1402 _g1_rem_set = new StupidG1RemSet(this);
1403 }
1405 // Carve out the G1 part of the heap.
1407 ReservedSpace g1_rs = heap_rs.first_part(max_byte_size);
1408 _g1_reserved = MemRegion((HeapWord*)g1_rs.base(),
1409 g1_rs.size()/HeapWordSize);
1410 ReservedSpace perm_gen_rs = heap_rs.last_part(max_byte_size);
1412 _perm_gen = pgs->init(perm_gen_rs, pgs->init_size(), rem_set());
1414 _g1_storage.initialize(g1_rs, 0);
1415 _g1_committed = MemRegion((HeapWord*)_g1_storage.low(), (size_t) 0);
1416 _g1_max_committed = _g1_committed;
1417 _hrs = new HeapRegionSeq(_expansion_regions);
1418 guarantee(_hrs != NULL, "Couldn't allocate HeapRegionSeq");
1419 guarantee(_cur_alloc_region == NULL, "from constructor");
1421 _bot_shared = new G1BlockOffsetSharedArray(_reserved,
1422 heap_word_size(init_byte_size));
1424 _g1h = this;
1426 // Create the ConcurrentMark data structure and thread.
1427 // (Must do this late, so that "max_regions" is defined.)
1428 _cm = new ConcurrentMark(heap_rs, (int) max_regions());
1429 _cmThread = _cm->cmThread();
1431 // ...and the concurrent zero-fill thread, if necessary.
1432 if (G1ConcZeroFill) {
1433 _czft = new ConcurrentZFThread();
1434 }
1438 // Allocate the popular regions; take them off free lists.
1439 size_t pop_byte_size = G1NumPopularRegions * HeapRegion::GrainBytes;
1440 expand(pop_byte_size);
1441 _popular_object_boundary =
1442 _g1_reserved.start() + (G1NumPopularRegions * HeapRegion::GrainWords);
1443 for (int i = 0; i < G1NumPopularRegions; i++) {
1444 HeapRegion* hr = newAllocRegion(HeapRegion::GrainWords);
1445 // assert(hr != NULL && hr->bottom() < _popular_object_boundary,
1446 // "Should be enough, and all should be below boundary.");
1447 hr->set_popular(true);
1448 }
1449 assert(_cur_pop_hr_index == 0, "Start allocating at the first region.");
1451 // Initialize the from_card cache structure of HeapRegionRemSet.
1452 HeapRegionRemSet::init_heap(max_regions());
1454 // Now expand into the rest of the initial heap size.
1455 expand(init_byte_size - pop_byte_size);
1457 // Perform any initialization actions delegated to the policy.
1458 g1_policy()->init();
1460 g1_policy()->note_start_of_mark_thread();
1462 _refine_cte_cl =
1463 new RefineCardTableEntryClosure(ConcurrentG1RefineThread::sts(),
1464 g1_rem_set(),
1465 concurrent_g1_refine());
1466 JavaThread::dirty_card_queue_set().set_closure(_refine_cte_cl);
1468 JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon,
1469 SATB_Q_FL_lock,
1470 0,
1471 Shared_SATB_Q_lock);
1472 if (G1RSBarrierUseQueue) {
1473 JavaThread::dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon,
1474 DirtyCardQ_FL_lock,
1475 G1DirtyCardQueueMax,
1476 Shared_DirtyCardQ_lock);
1477 }
1478 if (G1DeferredRSUpdate) {
1479 dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon,
1480 DirtyCardQ_FL_lock,
1481 0,
1482 Shared_DirtyCardQ_lock,
1483 &JavaThread::dirty_card_queue_set());
1484 }
1485 // In case we're keeping closure specialization stats, initialize those
1486 // counts and that mechanism.
1487 SpecializationStats::clear();
1489 _gc_alloc_region_list = NULL;
1491 // Do later initialization work for concurrent refinement.
1492 _cg1r->init();
1494 const char* group_names[] = { "CR", "ZF", "CM", "CL" };
1495 GCOverheadReporter::initGCOverheadReporter(4, group_names);
1497 return JNI_OK;
1498 }
1500 void G1CollectedHeap::ref_processing_init() {
1501 SharedHeap::ref_processing_init();
1502 MemRegion mr = reserved_region();
1503 _ref_processor = ReferenceProcessor::create_ref_processor(
1504 mr, // span
1505 false, // Reference discovery is not atomic
1506 // (though it shouldn't matter here.)
1507 true, // mt_discovery
1508 NULL, // is alive closure: need to fill this in for efficiency
1509 ParallelGCThreads,
1510 ParallelRefProcEnabled,
1511 true); // Setting next fields of discovered
1512 // lists requires a barrier.
1513 }
1515 size_t G1CollectedHeap::capacity() const {
1516 return _g1_committed.byte_size();
1517 }
1519 void G1CollectedHeap::iterate_dirty_card_closure(bool concurrent,
1520 int worker_i) {
1521 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
1522 int n_completed_buffers = 0;
1523 while (dcqs.apply_closure_to_completed_buffer(worker_i, 0, true)) {
1524 n_completed_buffers++;
1525 }
1526 g1_policy()->record_update_rs_processed_buffers(worker_i,
1527 (double) n_completed_buffers);
1528 dcqs.clear_n_completed_buffers();
1529 // Finish up the queue...
1530 if (worker_i == 0) concurrent_g1_refine()->clean_up_cache(worker_i,
1531 g1_rem_set());
1532 assert(!dcqs.completed_buffers_exist_dirty(), "Completed buffers exist!");
1533 }
1536 // Computes the sum of the storage used by the various regions.
1538 size_t G1CollectedHeap::used() const {
1539 assert(Heap_lock->owner() != NULL,
1540 "Should be owned on this thread's behalf.");
1541 size_t result = _summary_bytes_used;
1542 if (_cur_alloc_region != NULL)
1543 result += _cur_alloc_region->used();
1544 return result;
1545 }
1547 class SumUsedClosure: public HeapRegionClosure {
1548 size_t _used;
1549 public:
1550 SumUsedClosure() : _used(0) {}
1551 bool doHeapRegion(HeapRegion* r) {
1552 if (!r->continuesHumongous()) {
1553 _used += r->used();
1554 }
1555 return false;
1556 }
1557 size_t result() { return _used; }
1558 };
1560 size_t G1CollectedHeap::recalculate_used() const {
1561 SumUsedClosure blk;
1562 _hrs->iterate(&blk);
1563 return blk.result();
1564 }
1566 #ifndef PRODUCT
1567 class SumUsedRegionsClosure: public HeapRegionClosure {
1568 size_t _num;
1569 public:
1570 // _num is set to 1 to account for the popular region
1571 SumUsedRegionsClosure() : _num(G1NumPopularRegions) {}
1572 bool doHeapRegion(HeapRegion* r) {
1573 if (r->continuesHumongous() || r->used() > 0 || r->is_gc_alloc_region()) {
1574 _num += 1;
1575 }
1576 return false;
1577 }
1578 size_t result() { return _num; }
1579 };
1581 size_t G1CollectedHeap::recalculate_used_regions() const {
1582 SumUsedRegionsClosure blk;
1583 _hrs->iterate(&blk);
1584 return blk.result();
1585 }
1586 #endif // PRODUCT
1588 size_t G1CollectedHeap::unsafe_max_alloc() {
1589 if (_free_regions > 0) return HeapRegion::GrainBytes;
1590 // otherwise, is there space in the current allocation region?
1592 // We need to store the current allocation region in a local variable
1593 // here. The problem is that this method doesn't take any locks and
1594 // there may be other threads which overwrite the current allocation
1595 // region field. attempt_allocation(), for example, sets it to NULL
1596 // and this can happen *after* the NULL check here but before the call
1597 // to free(), resulting in a SIGSEGV. Note that this doesn't appear
1598 // to be a problem in the optimized build, since the two loads of the
1599 // current allocation region field are optimized away.
1600 HeapRegion* car = _cur_alloc_region;
1602 // FIXME: should iterate over all regions?
1603 if (car == NULL) {
1604 return 0;
1605 }
1606 return car->free();
1607 }
1609 void G1CollectedHeap::collect(GCCause::Cause cause) {
1610 // The caller doesn't have the Heap_lock
1611 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
1612 MutexLocker ml(Heap_lock);
1613 collect_locked(cause);
1614 }
1616 void G1CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {
1617 assert(Thread::current()->is_VM_thread(), "Precondition#1");
1618 assert(Heap_lock->is_locked(), "Precondition#2");
1619 GCCauseSetter gcs(this, cause);
1620 switch (cause) {
1621 case GCCause::_heap_inspection:
1622 case GCCause::_heap_dump: {
1623 HandleMark hm;
1624 do_full_collection(false); // don't clear all soft refs
1625 break;
1626 }
1627 default: // XXX FIX ME
1628 ShouldNotReachHere(); // Unexpected use of this function
1629 }
1630 }
1633 void G1CollectedHeap::collect_locked(GCCause::Cause cause) {
1634 // Don't want to do a GC until cleanup is completed.
1635 wait_for_cleanup_complete();
1637 // Read the GC count while holding the Heap_lock
1638 int gc_count_before = SharedHeap::heap()->total_collections();
1639 {
1640 MutexUnlocker mu(Heap_lock); // give up heap lock, execute gets it back
1641 VM_G1CollectFull op(gc_count_before, cause);
1642 VMThread::execute(&op);
1643 }
1644 }
1646 bool G1CollectedHeap::is_in(const void* p) const {
1647 if (_g1_committed.contains(p)) {
1648 HeapRegion* hr = _hrs->addr_to_region(p);
1649 return hr->is_in(p);
1650 } else {
1651 return _perm_gen->as_gen()->is_in(p);
1652 }
1653 }
1655 // Iteration functions.
1657 // Iterates an OopClosure over all ref-containing fields of objects
1658 // within a HeapRegion.
1660 class IterateOopClosureRegionClosure: public HeapRegionClosure {
1661 MemRegion _mr;
1662 OopClosure* _cl;
1663 public:
1664 IterateOopClosureRegionClosure(MemRegion mr, OopClosure* cl)
1665 : _mr(mr), _cl(cl) {}
1666 bool doHeapRegion(HeapRegion* r) {
1667 if (! r->continuesHumongous()) {
1668 r->oop_iterate(_cl);
1669 }
1670 return false;
1671 }
1672 };
1674 void G1CollectedHeap::oop_iterate(OopClosure* cl) {
1675 IterateOopClosureRegionClosure blk(_g1_committed, cl);
1676 _hrs->iterate(&blk);
1677 }
1679 void G1CollectedHeap::oop_iterate(MemRegion mr, OopClosure* cl) {
1680 IterateOopClosureRegionClosure blk(mr, cl);
1681 _hrs->iterate(&blk);
1682 }
1684 // Iterates an ObjectClosure over all objects within a HeapRegion.
1686 class IterateObjectClosureRegionClosure: public HeapRegionClosure {
1687 ObjectClosure* _cl;
1688 public:
1689 IterateObjectClosureRegionClosure(ObjectClosure* cl) : _cl(cl) {}
1690 bool doHeapRegion(HeapRegion* r) {
1691 if (! r->continuesHumongous()) {
1692 r->object_iterate(_cl);
1693 }
1694 return false;
1695 }
1696 };
1698 void G1CollectedHeap::object_iterate(ObjectClosure* cl) {
1699 IterateObjectClosureRegionClosure blk(cl);
1700 _hrs->iterate(&blk);
1701 }
1703 void G1CollectedHeap::object_iterate_since_last_GC(ObjectClosure* cl) {
1704 // FIXME: is this right?
1705 guarantee(false, "object_iterate_since_last_GC not supported by G1 heap");
1706 }
1708 // Calls a SpaceClosure on a HeapRegion.
1710 class SpaceClosureRegionClosure: public HeapRegionClosure {
1711 SpaceClosure* _cl;
1712 public:
1713 SpaceClosureRegionClosure(SpaceClosure* cl) : _cl(cl) {}
1714 bool doHeapRegion(HeapRegion* r) {
1715 _cl->do_space(r);
1716 return false;
1717 }
1718 };
1720 void G1CollectedHeap::space_iterate(SpaceClosure* cl) {
1721 SpaceClosureRegionClosure blk(cl);
1722 _hrs->iterate(&blk);
1723 }
1725 void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) {
1726 _hrs->iterate(cl);
1727 }
1729 void G1CollectedHeap::heap_region_iterate_from(HeapRegion* r,
1730 HeapRegionClosure* cl) {
1731 _hrs->iterate_from(r, cl);
1732 }
1734 void
1735 G1CollectedHeap::heap_region_iterate_from(int idx, HeapRegionClosure* cl) {
1736 _hrs->iterate_from(idx, cl);
1737 }
1739 HeapRegion* G1CollectedHeap::region_at(size_t idx) { return _hrs->at(idx); }
1741 void
1742 G1CollectedHeap::heap_region_par_iterate_chunked(HeapRegionClosure* cl,
1743 int worker,
1744 jint claim_value) {
1745 const size_t regions = n_regions();
1746 const size_t worker_num = (ParallelGCThreads > 0 ? ParallelGCThreads : 1);
1747 // try to spread out the starting points of the workers
1748 const size_t start_index = regions / worker_num * (size_t) worker;
1750 // each worker will actually look at all regions
1751 for (size_t count = 0; count < regions; ++count) {
1752 const size_t index = (start_index + count) % regions;
1753 assert(0 <= index && index < regions, "sanity");
1754 HeapRegion* r = region_at(index);
1755 // we'll ignore "continues humongous" regions (we'll process them
1756 // when we come across their corresponding "start humongous"
1757 // region) and regions already claimed
1758 if (r->claim_value() == claim_value || r->continuesHumongous()) {
1759 continue;
1760 }
1761 // OK, try to claim it
1762 if (r->claimHeapRegion(claim_value)) {
1763 // success!
1764 assert(!r->continuesHumongous(), "sanity");
1765 if (r->startsHumongous()) {
1766 // If the region is "starts humongous" we'll iterate over its
1767 // "continues humongous" first; in fact we'll do them
1768 // first. The order is important. In on case, calling the
1769 // closure on the "starts humongous" region might de-allocate
1770 // and clear all its "continues humongous" regions and, as a
1771 // result, we might end up processing them twice. So, we'll do
1772 // them first (notice: most closures will ignore them anyway) and
1773 // then we'll do the "starts humongous" region.
1774 for (size_t ch_index = index + 1; ch_index < regions; ++ch_index) {
1775 HeapRegion* chr = region_at(ch_index);
1777 // if the region has already been claimed or it's not
1778 // "continues humongous" we're done
1779 if (chr->claim_value() == claim_value ||
1780 !chr->continuesHumongous()) {
1781 break;
1782 }
1784 // Noone should have claimed it directly. We can given
1785 // that we claimed its "starts humongous" region.
1786 assert(chr->claim_value() != claim_value, "sanity");
1787 assert(chr->humongous_start_region() == r, "sanity");
1789 if (chr->claimHeapRegion(claim_value)) {
1790 // we should always be able to claim it; noone else should
1791 // be trying to claim this region
1793 bool res2 = cl->doHeapRegion(chr);
1794 assert(!res2, "Should not abort");
1796 // Right now, this holds (i.e., no closure that actually
1797 // does something with "continues humongous" regions
1798 // clears them). We might have to weaken it in the future,
1799 // but let's leave these two asserts here for extra safety.
1800 assert(chr->continuesHumongous(), "should still be the case");
1801 assert(chr->humongous_start_region() == r, "sanity");
1802 } else {
1803 guarantee(false, "we should not reach here");
1804 }
1805 }
1806 }
1808 assert(!r->continuesHumongous(), "sanity");
1809 bool res = cl->doHeapRegion(r);
1810 assert(!res, "Should not abort");
1811 }
1812 }
1813 }
1815 class ResetClaimValuesClosure: public HeapRegionClosure {
1816 public:
1817 bool doHeapRegion(HeapRegion* r) {
1818 r->set_claim_value(HeapRegion::InitialClaimValue);
1819 return false;
1820 }
1821 };
1823 void
1824 G1CollectedHeap::reset_heap_region_claim_values() {
1825 ResetClaimValuesClosure blk;
1826 heap_region_iterate(&blk);
1827 }
1829 #ifdef ASSERT
1830 // This checks whether all regions in the heap have the correct claim
1831 // value. I also piggy-backed on this a check to ensure that the
1832 // humongous_start_region() information on "continues humongous"
1833 // regions is correct.
1835 class CheckClaimValuesClosure : public HeapRegionClosure {
1836 private:
1837 jint _claim_value;
1838 size_t _failures;
1839 HeapRegion* _sh_region;
1840 public:
1841 CheckClaimValuesClosure(jint claim_value) :
1842 _claim_value(claim_value), _failures(0), _sh_region(NULL) { }
1843 bool doHeapRegion(HeapRegion* r) {
1844 if (r->claim_value() != _claim_value) {
1845 gclog_or_tty->print_cr("Region ["PTR_FORMAT","PTR_FORMAT"), "
1846 "claim value = %d, should be %d",
1847 r->bottom(), r->end(), r->claim_value(),
1848 _claim_value);
1849 ++_failures;
1850 }
1851 if (!r->isHumongous()) {
1852 _sh_region = NULL;
1853 } else if (r->startsHumongous()) {
1854 _sh_region = r;
1855 } else if (r->continuesHumongous()) {
1856 if (r->humongous_start_region() != _sh_region) {
1857 gclog_or_tty->print_cr("Region ["PTR_FORMAT","PTR_FORMAT"), "
1858 "HS = "PTR_FORMAT", should be "PTR_FORMAT,
1859 r->bottom(), r->end(),
1860 r->humongous_start_region(),
1861 _sh_region);
1862 ++_failures;
1863 }
1864 }
1865 return false;
1866 }
1867 size_t failures() {
1868 return _failures;
1869 }
1870 };
1872 bool G1CollectedHeap::check_heap_region_claim_values(jint claim_value) {
1873 CheckClaimValuesClosure cl(claim_value);
1874 heap_region_iterate(&cl);
1875 return cl.failures() == 0;
1876 }
1877 #endif // ASSERT
1879 void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) {
1880 HeapRegion* r = g1_policy()->collection_set();
1881 while (r != NULL) {
1882 HeapRegion* next = r->next_in_collection_set();
1883 if (cl->doHeapRegion(r)) {
1884 cl->incomplete();
1885 return;
1886 }
1887 r = next;
1888 }
1889 }
1891 void G1CollectedHeap::collection_set_iterate_from(HeapRegion* r,
1892 HeapRegionClosure *cl) {
1893 assert(r->in_collection_set(),
1894 "Start region must be a member of the collection set.");
1895 HeapRegion* cur = r;
1896 while (cur != NULL) {
1897 HeapRegion* next = cur->next_in_collection_set();
1898 if (cl->doHeapRegion(cur) && false) {
1899 cl->incomplete();
1900 return;
1901 }
1902 cur = next;
1903 }
1904 cur = g1_policy()->collection_set();
1905 while (cur != r) {
1906 HeapRegion* next = cur->next_in_collection_set();
1907 if (cl->doHeapRegion(cur) && false) {
1908 cl->incomplete();
1909 return;
1910 }
1911 cur = next;
1912 }
1913 }
1915 CompactibleSpace* G1CollectedHeap::first_compactible_space() {
1916 return _hrs->length() > 0 ? _hrs->at(0) : NULL;
1917 }
1920 Space* G1CollectedHeap::space_containing(const void* addr) const {
1921 Space* res = heap_region_containing(addr);
1922 if (res == NULL)
1923 res = perm_gen()->space_containing(addr);
1924 return res;
1925 }
1927 HeapWord* G1CollectedHeap::block_start(const void* addr) const {
1928 Space* sp = space_containing(addr);
1929 if (sp != NULL) {
1930 return sp->block_start(addr);
1931 }
1932 return NULL;
1933 }
1935 size_t G1CollectedHeap::block_size(const HeapWord* addr) const {
1936 Space* sp = space_containing(addr);
1937 assert(sp != NULL, "block_size of address outside of heap");
1938 return sp->block_size(addr);
1939 }
1941 bool G1CollectedHeap::block_is_obj(const HeapWord* addr) const {
1942 Space* sp = space_containing(addr);
1943 return sp->block_is_obj(addr);
1944 }
1946 bool G1CollectedHeap::supports_tlab_allocation() const {
1947 return true;
1948 }
1950 size_t G1CollectedHeap::tlab_capacity(Thread* ignored) const {
1951 return HeapRegion::GrainBytes;
1952 }
1954 size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
1955 // Return the remaining space in the cur alloc region, but not less than
1956 // the min TLAB size.
1957 // Also, no more than half the region size, since we can't allow tlabs to
1958 // grow big enough to accomodate humongous objects.
1960 // We need to story it locally, since it might change between when we
1961 // test for NULL and when we use it later.
1962 ContiguousSpace* cur_alloc_space = _cur_alloc_region;
1963 if (cur_alloc_space == NULL) {
1964 return HeapRegion::GrainBytes/2;
1965 } else {
1966 return MAX2(MIN2(cur_alloc_space->free(),
1967 (size_t)(HeapRegion::GrainBytes/2)),
1968 (size_t)MinTLABSize);
1969 }
1970 }
1972 HeapWord* G1CollectedHeap::allocate_new_tlab(size_t size) {
1973 bool dummy;
1974 return G1CollectedHeap::mem_allocate(size, false, true, &dummy);
1975 }
1977 bool G1CollectedHeap::allocs_are_zero_filled() {
1978 return false;
1979 }
1981 size_t G1CollectedHeap::large_typearray_limit() {
1982 // FIXME
1983 return HeapRegion::GrainBytes/HeapWordSize;
1984 }
1986 size_t G1CollectedHeap::max_capacity() const {
1987 return _g1_committed.byte_size();
1988 }
1990 jlong G1CollectedHeap::millis_since_last_gc() {
1991 // assert(false, "NYI");
1992 return 0;
1993 }
1996 void G1CollectedHeap::prepare_for_verify() {
1997 if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
1998 ensure_parsability(false);
1999 }
2000 g1_rem_set()->prepare_for_verify();
2001 }
2003 class VerifyLivenessOopClosure: public OopClosure {
2004 G1CollectedHeap* g1h;
2005 public:
2006 VerifyLivenessOopClosure(G1CollectedHeap* _g1h) {
2007 g1h = _g1h;
2008 }
2009 void do_oop(narrowOop *p) {
2010 guarantee(false, "NYI");
2011 }
2012 void do_oop(oop *p) {
2013 oop obj = *p;
2014 assert(obj == NULL || !g1h->is_obj_dead(obj),
2015 "Dead object referenced by a not dead object");
2016 }
2017 };
2019 class VerifyObjsInRegionClosure: public ObjectClosure {
2020 G1CollectedHeap* _g1h;
2021 size_t _live_bytes;
2022 HeapRegion *_hr;
2023 public:
2024 VerifyObjsInRegionClosure(HeapRegion *hr) : _live_bytes(0), _hr(hr) {
2025 _g1h = G1CollectedHeap::heap();
2026 }
2027 void do_object(oop o) {
2028 VerifyLivenessOopClosure isLive(_g1h);
2029 assert(o != NULL, "Huh?");
2030 if (!_g1h->is_obj_dead(o)) {
2031 o->oop_iterate(&isLive);
2032 if (!_hr->obj_allocated_since_prev_marking(o))
2033 _live_bytes += (o->size() * HeapWordSize);
2034 }
2035 }
2036 size_t live_bytes() { return _live_bytes; }
2037 };
2039 class PrintObjsInRegionClosure : public ObjectClosure {
2040 HeapRegion *_hr;
2041 G1CollectedHeap *_g1;
2042 public:
2043 PrintObjsInRegionClosure(HeapRegion *hr) : _hr(hr) {
2044 _g1 = G1CollectedHeap::heap();
2045 };
2047 void do_object(oop o) {
2048 if (o != NULL) {
2049 HeapWord *start = (HeapWord *) o;
2050 size_t word_sz = o->size();
2051 gclog_or_tty->print("\nPrinting obj "PTR_FORMAT" of size " SIZE_FORMAT
2052 " isMarkedPrev %d isMarkedNext %d isAllocSince %d\n",
2053 (void*) o, word_sz,
2054 _g1->isMarkedPrev(o),
2055 _g1->isMarkedNext(o),
2056 _hr->obj_allocated_since_prev_marking(o));
2057 HeapWord *end = start + word_sz;
2058 HeapWord *cur;
2059 int *val;
2060 for (cur = start; cur < end; cur++) {
2061 val = (int *) cur;
2062 gclog_or_tty->print("\t "PTR_FORMAT":"PTR_FORMAT"\n", val, *val);
2063 }
2064 }
2065 }
2066 };
2068 class VerifyRegionClosure: public HeapRegionClosure {
2069 public:
2070 bool _allow_dirty;
2071 bool _par;
2072 VerifyRegionClosure(bool allow_dirty, bool par = false)
2073 : _allow_dirty(allow_dirty), _par(par) {}
2074 bool doHeapRegion(HeapRegion* r) {
2075 guarantee(_par || r->claim_value() == HeapRegion::InitialClaimValue,
2076 "Should be unclaimed at verify points.");
2077 if (r->isHumongous()) {
2078 if (r->startsHumongous()) {
2079 // Verify the single H object.
2080 oop(r->bottom())->verify();
2081 size_t word_sz = oop(r->bottom())->size();
2082 guarantee(r->top() == r->bottom() + word_sz,
2083 "Only one object in a humongous region");
2084 }
2085 } else {
2086 VerifyObjsInRegionClosure not_dead_yet_cl(r);
2087 r->verify(_allow_dirty);
2088 r->object_iterate(¬_dead_yet_cl);
2089 guarantee(r->max_live_bytes() >= not_dead_yet_cl.live_bytes(),
2090 "More live objects than counted in last complete marking.");
2091 }
2092 return false;
2093 }
2094 };
2096 class VerifyRootsClosure: public OopsInGenClosure {
2097 private:
2098 G1CollectedHeap* _g1h;
2099 bool _failures;
2101 public:
2102 VerifyRootsClosure() :
2103 _g1h(G1CollectedHeap::heap()), _failures(false) { }
2105 bool failures() { return _failures; }
2107 void do_oop(narrowOop* p) {
2108 guarantee(false, "NYI");
2109 }
2111 void do_oop(oop* p) {
2112 oop obj = *p;
2113 if (obj != NULL) {
2114 if (_g1h->is_obj_dead(obj)) {
2115 gclog_or_tty->print_cr("Root location "PTR_FORMAT" "
2116 "points to dead obj "PTR_FORMAT, p, (void*) obj);
2117 obj->print_on(gclog_or_tty);
2118 _failures = true;
2119 }
2120 }
2121 }
2122 };
2124 // This is the task used for parallel heap verification.
2126 class G1ParVerifyTask: public AbstractGangTask {
2127 private:
2128 G1CollectedHeap* _g1h;
2129 bool _allow_dirty;
2131 public:
2132 G1ParVerifyTask(G1CollectedHeap* g1h, bool allow_dirty) :
2133 AbstractGangTask("Parallel verify task"),
2134 _g1h(g1h), _allow_dirty(allow_dirty) { }
2136 void work(int worker_i) {
2137 VerifyRegionClosure blk(_allow_dirty, true);
2138 _g1h->heap_region_par_iterate_chunked(&blk, worker_i,
2139 HeapRegion::ParVerifyClaimValue);
2140 }
2141 };
2143 void G1CollectedHeap::verify(bool allow_dirty, bool silent) {
2144 if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
2145 if (!silent) { gclog_or_tty->print("roots "); }
2146 VerifyRootsClosure rootsCl;
2147 process_strong_roots(false,
2148 SharedHeap::SO_AllClasses,
2149 &rootsCl,
2150 &rootsCl);
2151 rem_set()->invalidate(perm_gen()->used_region(), false);
2152 if (!silent) { gclog_or_tty->print("heapRegions "); }
2153 if (GCParallelVerificationEnabled && ParallelGCThreads > 1) {
2154 assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue),
2155 "sanity check");
2157 G1ParVerifyTask task(this, allow_dirty);
2158 int n_workers = workers()->total_workers();
2159 set_par_threads(n_workers);
2160 workers()->run_task(&task);
2161 set_par_threads(0);
2163 assert(check_heap_region_claim_values(HeapRegion::ParVerifyClaimValue),
2164 "sanity check");
2166 reset_heap_region_claim_values();
2168 assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue),
2169 "sanity check");
2170 } else {
2171 VerifyRegionClosure blk(allow_dirty);
2172 _hrs->iterate(&blk);
2173 }
2174 if (!silent) gclog_or_tty->print("remset ");
2175 rem_set()->verify();
2176 guarantee(!rootsCl.failures(), "should not have had failures");
2177 } else {
2178 if (!silent) gclog_or_tty->print("(SKIPPING roots, heapRegions, remset) ");
2179 }
2180 }
2182 class PrintRegionClosure: public HeapRegionClosure {
2183 outputStream* _st;
2184 public:
2185 PrintRegionClosure(outputStream* st) : _st(st) {}
2186 bool doHeapRegion(HeapRegion* r) {
2187 r->print_on(_st);
2188 return false;
2189 }
2190 };
2192 void G1CollectedHeap::print() const { print_on(gclog_or_tty); }
2194 void G1CollectedHeap::print_on(outputStream* st) const {
2195 PrintRegionClosure blk(st);
2196 _hrs->iterate(&blk);
2197 }
2199 void G1CollectedHeap::print_gc_threads_on(outputStream* st) const {
2200 if (ParallelGCThreads > 0) {
2201 workers()->print_worker_threads();
2202 }
2203 st->print("\"G1 concurrent mark GC Thread\" ");
2204 _cmThread->print();
2205 st->cr();
2206 st->print("\"G1 concurrent refinement GC Thread\" ");
2207 _cg1r->cg1rThread()->print_on(st);
2208 st->cr();
2209 st->print("\"G1 zero-fill GC Thread\" ");
2210 _czft->print_on(st);
2211 st->cr();
2212 }
2214 void G1CollectedHeap::gc_threads_do(ThreadClosure* tc) const {
2215 if (ParallelGCThreads > 0) {
2216 workers()->threads_do(tc);
2217 }
2218 tc->do_thread(_cmThread);
2219 tc->do_thread(_cg1r->cg1rThread());
2220 tc->do_thread(_czft);
2221 }
2223 void G1CollectedHeap::print_tracing_info() const {
2224 concurrent_g1_refine()->print_final_card_counts();
2226 // We'll overload this to mean "trace GC pause statistics."
2227 if (TraceGen0Time || TraceGen1Time) {
2228 // The "G1CollectorPolicy" is keeping track of these stats, so delegate
2229 // to that.
2230 g1_policy()->print_tracing_info();
2231 }
2232 if (SummarizeG1RSStats) {
2233 g1_rem_set()->print_summary_info();
2234 }
2235 if (SummarizeG1ConcMark) {
2236 concurrent_mark()->print_summary_info();
2237 }
2238 if (SummarizeG1ZFStats) {
2239 ConcurrentZFThread::print_summary_info();
2240 }
2241 if (G1SummarizePopularity) {
2242 print_popularity_summary_info();
2243 }
2244 g1_policy()->print_yg_surv_rate_info();
2246 GCOverheadReporter::printGCOverhead();
2248 SpecializationStats::print();
2249 }
2252 int G1CollectedHeap::addr_to_arena_id(void* addr) const {
2253 HeapRegion* hr = heap_region_containing(addr);
2254 if (hr == NULL) {
2255 return 0;
2256 } else {
2257 return 1;
2258 }
2259 }
2261 G1CollectedHeap* G1CollectedHeap::heap() {
2262 assert(_sh->kind() == CollectedHeap::G1CollectedHeap,
2263 "not a garbage-first heap");
2264 return _g1h;
2265 }
2267 void G1CollectedHeap::gc_prologue(bool full /* Ignored */) {
2268 if (PrintHeapAtGC){
2269 gclog_or_tty->print_cr(" {Heap before GC collections=%d:", total_collections());
2270 Universe::print();
2271 }
2272 assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
2273 // Call allocation profiler
2274 AllocationProfiler::iterate_since_last_gc();
2275 // Fill TLAB's and such
2276 ensure_parsability(true);
2277 }
2279 void G1CollectedHeap::gc_epilogue(bool full /* Ignored */) {
2280 // FIXME: what is this about?
2281 // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled"
2282 // is set.
2283 COMPILER2_PRESENT(assert(DerivedPointerTable::is_empty(),
2284 "derived pointer present"));
2286 if (PrintHeapAtGC){
2287 gclog_or_tty->print_cr(" Heap after GC collections=%d:", total_collections());
2288 Universe::print();
2289 gclog_or_tty->print("} ");
2290 }
2291 }
2293 void G1CollectedHeap::do_collection_pause() {
2294 // Read the GC count while holding the Heap_lock
2295 // we need to do this _before_ wait_for_cleanup_complete(), to
2296 // ensure that we do not give up the heap lock and potentially
2297 // pick up the wrong count
2298 int gc_count_before = SharedHeap::heap()->total_collections();
2300 // Don't want to do a GC pause while cleanup is being completed!
2301 wait_for_cleanup_complete();
2303 g1_policy()->record_stop_world_start();
2304 {
2305 MutexUnlocker mu(Heap_lock); // give up heap lock, execute gets it back
2306 VM_G1IncCollectionPause op(gc_count_before);
2307 VMThread::execute(&op);
2308 }
2309 }
2311 void
2312 G1CollectedHeap::doConcurrentMark() {
2313 if (G1ConcMark) {
2314 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2315 if (!_cmThread->in_progress()) {
2316 _cmThread->set_started();
2317 CGC_lock->notify();
2318 }
2319 }
2320 }
2322 class VerifyMarkedObjsClosure: public ObjectClosure {
2323 G1CollectedHeap* _g1h;
2324 public:
2325 VerifyMarkedObjsClosure(G1CollectedHeap* g1h) : _g1h(g1h) {}
2326 void do_object(oop obj) {
2327 assert(obj->mark()->is_marked() ? !_g1h->is_obj_dead(obj) : true,
2328 "markandsweep mark should agree with concurrent deadness");
2329 }
2330 };
2332 void
2333 G1CollectedHeap::checkConcurrentMark() {
2334 VerifyMarkedObjsClosure verifycl(this);
2335 doConcurrentMark();
2336 // MutexLockerEx x(getMarkBitMapLock(),
2337 // Mutex::_no_safepoint_check_flag);
2338 object_iterate(&verifycl);
2339 }
2341 void G1CollectedHeap::do_sync_mark() {
2342 _cm->checkpointRootsInitial();
2343 _cm->markFromRoots();
2344 _cm->checkpointRootsFinal(false);
2345 }
2347 // <NEW PREDICTION>
2349 double G1CollectedHeap::predict_region_elapsed_time_ms(HeapRegion *hr,
2350 bool young) {
2351 return _g1_policy->predict_region_elapsed_time_ms(hr, young);
2352 }
2354 void G1CollectedHeap::check_if_region_is_too_expensive(double
2355 predicted_time_ms) {
2356 _g1_policy->check_if_region_is_too_expensive(predicted_time_ms);
2357 }
2359 size_t G1CollectedHeap::pending_card_num() {
2360 size_t extra_cards = 0;
2361 JavaThread *curr = Threads::first();
2362 while (curr != NULL) {
2363 DirtyCardQueue& dcq = curr->dirty_card_queue();
2364 extra_cards += dcq.size();
2365 curr = curr->next();
2366 }
2367 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
2368 size_t buffer_size = dcqs.buffer_size();
2369 size_t buffer_num = dcqs.completed_buffers_num();
2370 return buffer_size * buffer_num + extra_cards;
2371 }
2373 size_t G1CollectedHeap::max_pending_card_num() {
2374 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
2375 size_t buffer_size = dcqs.buffer_size();
2376 size_t buffer_num = dcqs.completed_buffers_num();
2377 int thread_num = Threads::number_of_threads();
2378 return (buffer_num + thread_num) * buffer_size;
2379 }
2381 size_t G1CollectedHeap::cards_scanned() {
2382 HRInto_G1RemSet* g1_rset = (HRInto_G1RemSet*) g1_rem_set();
2383 return g1_rset->cardsScanned();
2384 }
2386 void
2387 G1CollectedHeap::setup_surviving_young_words() {
2388 guarantee( _surviving_young_words == NULL, "pre-condition" );
2389 size_t array_length = g1_policy()->young_cset_length();
2390 _surviving_young_words = NEW_C_HEAP_ARRAY(size_t, array_length);
2391 if (_surviving_young_words == NULL) {
2392 vm_exit_out_of_memory(sizeof(size_t) * array_length,
2393 "Not enough space for young surv words summary.");
2394 }
2395 memset(_surviving_young_words, 0, array_length * sizeof(size_t));
2396 for (size_t i = 0; i < array_length; ++i) {
2397 guarantee( _surviving_young_words[i] == 0, "invariant" );
2398 }
2399 }
2401 void
2402 G1CollectedHeap::update_surviving_young_words(size_t* surv_young_words) {
2403 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
2404 size_t array_length = g1_policy()->young_cset_length();
2405 for (size_t i = 0; i < array_length; ++i)
2406 _surviving_young_words[i] += surv_young_words[i];
2407 }
2409 void
2410 G1CollectedHeap::cleanup_surviving_young_words() {
2411 guarantee( _surviving_young_words != NULL, "pre-condition" );
2412 FREE_C_HEAP_ARRAY(size_t, _surviving_young_words);
2413 _surviving_young_words = NULL;
2414 }
2416 // </NEW PREDICTION>
2418 void
2419 G1CollectedHeap::do_collection_pause_at_safepoint(HeapRegion* popular_region) {
2420 char verbose_str[128];
2421 sprintf(verbose_str, "GC pause ");
2422 if (popular_region != NULL)
2423 strcat(verbose_str, "(popular)");
2424 else if (g1_policy()->in_young_gc_mode()) {
2425 if (g1_policy()->full_young_gcs())
2426 strcat(verbose_str, "(young)");
2427 else
2428 strcat(verbose_str, "(partial)");
2429 }
2430 bool reset_should_initiate_conc_mark = false;
2431 if (popular_region != NULL && g1_policy()->should_initiate_conc_mark()) {
2432 // we currently do not allow an initial mark phase to be piggy-backed
2433 // on a popular pause
2434 reset_should_initiate_conc_mark = true;
2435 g1_policy()->unset_should_initiate_conc_mark();
2436 }
2437 if (g1_policy()->should_initiate_conc_mark())
2438 strcat(verbose_str, " (initial-mark)");
2440 GCCauseSetter x(this, (popular_region == NULL ?
2441 GCCause::_g1_inc_collection_pause :
2442 GCCause::_g1_pop_region_collection_pause));
2444 // if PrintGCDetails is on, we'll print long statistics information
2445 // in the collector policy code, so let's not print this as the output
2446 // is messy if we do.
2447 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
2448 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
2449 TraceTime t(verbose_str, PrintGC && !PrintGCDetails, true, gclog_or_tty);
2451 ResourceMark rm;
2452 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
2453 assert(Thread::current() == VMThread::vm_thread(), "should be in vm thread");
2454 guarantee(!is_gc_active(), "collection is not reentrant");
2455 assert(regions_accounted_for(), "Region leakage!");
2457 increment_gc_time_stamp();
2459 if (g1_policy()->in_young_gc_mode()) {
2460 assert(check_young_list_well_formed(),
2461 "young list should be well formed");
2462 }
2464 if (GC_locker::is_active()) {
2465 return; // GC is disabled (e.g. JNI GetXXXCritical operation)
2466 }
2468 bool abandoned = false;
2469 { // Call to jvmpi::post_class_unload_events must occur outside of active GC
2470 IsGCActiveMark x;
2472 gc_prologue(false);
2473 increment_total_collections();
2475 #if G1_REM_SET_LOGGING
2476 gclog_or_tty->print_cr("\nJust chose CS, heap:");
2477 print();
2478 #endif
2480 if (VerifyBeforeGC && total_collections() >= VerifyGCStartAt) {
2481 HandleMark hm; // Discard invalid handles created during verification
2482 prepare_for_verify();
2483 gclog_or_tty->print(" VerifyBeforeGC:");
2484 Universe::verify(false);
2485 }
2487 COMPILER2_PRESENT(DerivedPointerTable::clear());
2489 // We want to turn off ref discovery, if necessary, and turn it back on
2490 // on again later if we do.
2491 bool was_enabled = ref_processor()->discovery_enabled();
2492 if (was_enabled) ref_processor()->disable_discovery();
2494 // Forget the current alloc region (we might even choose it to be part
2495 // of the collection set!).
2496 abandon_cur_alloc_region();
2498 // The elapsed time induced by the start time below deliberately elides
2499 // the possible verification above.
2500 double start_time_sec = os::elapsedTime();
2501 GCOverheadReporter::recordSTWStart(start_time_sec);
2502 size_t start_used_bytes = used();
2503 if (!G1ConcMark) {
2504 do_sync_mark();
2505 }
2507 g1_policy()->record_collection_pause_start(start_time_sec,
2508 start_used_bytes);
2510 guarantee(_in_cset_fast_test == NULL, "invariant");
2511 guarantee(_in_cset_fast_test_base == NULL, "invariant");
2512 _in_cset_fast_test_length = n_regions();
2513 _in_cset_fast_test_base =
2514 NEW_C_HEAP_ARRAY(bool, _in_cset_fast_test_length);
2515 memset(_in_cset_fast_test_base, false,
2516 _in_cset_fast_test_length * sizeof(bool));
2517 // We're biasing _in_cset_fast_test to avoid subtracting the
2518 // beginning of the heap every time we want to index; basically
2519 // it's the same with what we do with the card table.
2520 _in_cset_fast_test = _in_cset_fast_test_base -
2521 ((size_t) _g1_reserved.start() >> HeapRegion::LogOfHRGrainBytes);
2523 #if SCAN_ONLY_VERBOSE
2524 _young_list->print();
2525 #endif // SCAN_ONLY_VERBOSE
2527 if (g1_policy()->should_initiate_conc_mark()) {
2528 concurrent_mark()->checkpointRootsInitialPre();
2529 }
2530 save_marks();
2532 // We must do this before any possible evacuation that should propogate
2533 // marks, including evacuation of popular objects in a popular pause.
2534 if (mark_in_progress()) {
2535 double start_time_sec = os::elapsedTime();
2537 _cm->drainAllSATBBuffers();
2538 double finish_mark_ms = (os::elapsedTime() - start_time_sec) * 1000.0;
2539 g1_policy()->record_satb_drain_time(finish_mark_ms);
2541 }
2542 // Record the number of elements currently on the mark stack, so we
2543 // only iterate over these. (Since evacuation may add to the mark
2544 // stack, doing more exposes race conditions.) If no mark is in
2545 // progress, this will be zero.
2546 _cm->set_oops_do_bound();
2548 assert(regions_accounted_for(), "Region leakage.");
2550 bool abandoned = false;
2552 if (mark_in_progress())
2553 concurrent_mark()->newCSet();
2555 // Now choose the CS.
2556 if (popular_region == NULL) {
2557 g1_policy()->choose_collection_set();
2558 } else {
2559 // We may be evacuating a single region (for popularity).
2560 g1_policy()->record_popular_pause_preamble_start();
2561 popularity_pause_preamble(popular_region);
2562 g1_policy()->record_popular_pause_preamble_end();
2563 abandoned = (g1_policy()->collection_set() == NULL);
2564 // Now we allow more regions to be added (we have to collect
2565 // all popular regions).
2566 if (!abandoned) {
2567 g1_policy()->choose_collection_set(popular_region);
2568 }
2569 }
2570 // We may abandon a pause if we find no region that will fit in the MMU
2571 // pause.
2572 abandoned = (g1_policy()->collection_set() == NULL);
2574 // Nothing to do if we were unable to choose a collection set.
2575 if (!abandoned) {
2576 #if G1_REM_SET_LOGGING
2577 gclog_or_tty->print_cr("\nAfter pause, heap:");
2578 print();
2579 #endif
2581 setup_surviving_young_words();
2583 // Set up the gc allocation regions.
2584 get_gc_alloc_regions();
2586 // Actually do the work...
2587 evacuate_collection_set();
2588 free_collection_set(g1_policy()->collection_set());
2589 g1_policy()->clear_collection_set();
2591 FREE_C_HEAP_ARRAY(bool, _in_cset_fast_test_base);
2592 // this is more for peace of mind; we're nulling them here and
2593 // we're expecting them to be null at the beginning of the next GC
2594 _in_cset_fast_test = NULL;
2595 _in_cset_fast_test_base = NULL;
2597 if (popular_region != NULL) {
2598 // We have to wait until now, because we don't want the region to
2599 // be rescheduled for pop-evac during RS update.
2600 popular_region->set_popular_pending(false);
2601 }
2603 release_gc_alloc_regions();
2605 cleanup_surviving_young_words();
2607 if (g1_policy()->in_young_gc_mode()) {
2608 _young_list->reset_sampled_info();
2609 assert(check_young_list_empty(true),
2610 "young list should be empty");
2612 #if SCAN_ONLY_VERBOSE
2613 _young_list->print();
2614 #endif // SCAN_ONLY_VERBOSE
2616 g1_policy()->record_survivor_regions(_young_list->survivor_length(),
2617 _young_list->first_survivor_region(),
2618 _young_list->last_survivor_region());
2619 _young_list->reset_auxilary_lists();
2620 }
2621 } else {
2622 COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
2623 }
2625 if (evacuation_failed()) {
2626 _summary_bytes_used = recalculate_used();
2627 } else {
2628 // The "used" of the the collection set have already been subtracted
2629 // when they were freed. Add in the bytes evacuated.
2630 _summary_bytes_used += g1_policy()->bytes_in_to_space();
2631 }
2633 if (g1_policy()->in_young_gc_mode() &&
2634 g1_policy()->should_initiate_conc_mark()) {
2635 concurrent_mark()->checkpointRootsInitialPost();
2636 set_marking_started();
2637 doConcurrentMark();
2638 }
2640 #if SCAN_ONLY_VERBOSE
2641 _young_list->print();
2642 #endif // SCAN_ONLY_VERBOSE
2644 double end_time_sec = os::elapsedTime();
2645 double pause_time_ms = (end_time_sec - start_time_sec) * MILLIUNITS;
2646 g1_policy()->record_pause_time_ms(pause_time_ms);
2647 GCOverheadReporter::recordSTWEnd(end_time_sec);
2648 g1_policy()->record_collection_pause_end(popular_region != NULL,
2649 abandoned);
2651 assert(regions_accounted_for(), "Region leakage.");
2653 if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) {
2654 HandleMark hm; // Discard invalid handles created during verification
2655 gclog_or_tty->print(" VerifyAfterGC:");
2656 Universe::verify(false);
2657 }
2659 if (was_enabled) ref_processor()->enable_discovery();
2661 {
2662 size_t expand_bytes = g1_policy()->expansion_amount();
2663 if (expand_bytes > 0) {
2664 size_t bytes_before = capacity();
2665 expand(expand_bytes);
2666 }
2667 }
2669 if (mark_in_progress()) {
2670 concurrent_mark()->update_g1_committed();
2671 }
2673 #ifdef TRACESPINNING
2674 ParallelTaskTerminator::print_termination_counts();
2675 #endif
2677 gc_epilogue(false);
2678 }
2680 assert(verify_region_lists(), "Bad region lists.");
2682 if (reset_should_initiate_conc_mark)
2683 g1_policy()->set_should_initiate_conc_mark();
2685 if (ExitAfterGCNum > 0 && total_collections() == ExitAfterGCNum) {
2686 gclog_or_tty->print_cr("Stopping after GC #%d", ExitAfterGCNum);
2687 print_tracing_info();
2688 vm_exit(-1);
2689 }
2690 }
2692 void G1CollectedHeap::set_gc_alloc_region(int purpose, HeapRegion* r) {
2693 assert(purpose >= 0 && purpose < GCAllocPurposeCount, "invalid purpose");
2694 HeapWord* original_top = NULL;
2695 if (r != NULL)
2696 original_top = r->top();
2698 // We will want to record the used space in r as being there before gc.
2699 // One we install it as a GC alloc region it's eligible for allocation.
2700 // So record it now and use it later.
2701 size_t r_used = 0;
2702 if (r != NULL) {
2703 r_used = r->used();
2705 if (ParallelGCThreads > 0) {
2706 // need to take the lock to guard against two threads calling
2707 // get_gc_alloc_region concurrently (very unlikely but...)
2708 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
2709 r->save_marks();
2710 }
2711 }
2712 HeapRegion* old_alloc_region = _gc_alloc_regions[purpose];
2713 _gc_alloc_regions[purpose] = r;
2714 if (old_alloc_region != NULL) {
2715 // Replace aliases too.
2716 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
2717 if (_gc_alloc_regions[ap] == old_alloc_region) {
2718 _gc_alloc_regions[ap] = r;
2719 }
2720 }
2721 }
2722 if (r != NULL) {
2723 push_gc_alloc_region(r);
2724 if (mark_in_progress() && original_top != r->next_top_at_mark_start()) {
2725 // We are using a region as a GC alloc region after it has been used
2726 // as a mutator allocation region during the current marking cycle.
2727 // The mutator-allocated objects are currently implicitly marked, but
2728 // when we move hr->next_top_at_mark_start() forward at the the end
2729 // of the GC pause, they won't be. We therefore mark all objects in
2730 // the "gap". We do this object-by-object, since marking densely
2731 // does not currently work right with marking bitmap iteration. This
2732 // means we rely on TLAB filling at the start of pauses, and no
2733 // "resuscitation" of filled TLAB's. If we want to do this, we need
2734 // to fix the marking bitmap iteration.
2735 HeapWord* curhw = r->next_top_at_mark_start();
2736 HeapWord* t = original_top;
2738 while (curhw < t) {
2739 oop cur = (oop)curhw;
2740 // We'll assume parallel for generality. This is rare code.
2741 concurrent_mark()->markAndGrayObjectIfNecessary(cur); // can't we just mark them?
2742 curhw = curhw + cur->size();
2743 }
2744 assert(curhw == t, "Should have parsed correctly.");
2745 }
2746 if (G1PolicyVerbose > 1) {
2747 gclog_or_tty->print("New alloc region ["PTR_FORMAT", "PTR_FORMAT", " PTR_FORMAT") "
2748 "for survivors:", r->bottom(), original_top, r->end());
2749 r->print();
2750 }
2751 g1_policy()->record_before_bytes(r_used);
2752 }
2753 }
2755 void G1CollectedHeap::push_gc_alloc_region(HeapRegion* hr) {
2756 assert(Thread::current()->is_VM_thread() ||
2757 par_alloc_during_gc_lock()->owned_by_self(), "Precondition");
2758 assert(!hr->is_gc_alloc_region() && !hr->in_collection_set(),
2759 "Precondition.");
2760 hr->set_is_gc_alloc_region(true);
2761 hr->set_next_gc_alloc_region(_gc_alloc_region_list);
2762 _gc_alloc_region_list = hr;
2763 }
2765 #ifdef G1_DEBUG
2766 class FindGCAllocRegion: public HeapRegionClosure {
2767 public:
2768 bool doHeapRegion(HeapRegion* r) {
2769 if (r->is_gc_alloc_region()) {
2770 gclog_or_tty->print_cr("Region %d ["PTR_FORMAT"...] is still a gc_alloc_region.",
2771 r->hrs_index(), r->bottom());
2772 }
2773 return false;
2774 }
2775 };
2776 #endif // G1_DEBUG
2778 void G1CollectedHeap::forget_alloc_region_list() {
2779 assert(Thread::current()->is_VM_thread(), "Precondition");
2780 while (_gc_alloc_region_list != NULL) {
2781 HeapRegion* r = _gc_alloc_region_list;
2782 assert(r->is_gc_alloc_region(), "Invariant.");
2783 _gc_alloc_region_list = r->next_gc_alloc_region();
2784 r->set_next_gc_alloc_region(NULL);
2785 r->set_is_gc_alloc_region(false);
2786 if (r->is_survivor()) {
2787 if (r->is_empty()) {
2788 r->set_not_young();
2789 } else {
2790 _young_list->add_survivor_region(r);
2791 }
2792 }
2793 if (r->is_empty()) {
2794 ++_free_regions;
2795 }
2796 }
2797 #ifdef G1_DEBUG
2798 FindGCAllocRegion fa;
2799 heap_region_iterate(&fa);
2800 #endif // G1_DEBUG
2801 }
2804 bool G1CollectedHeap::check_gc_alloc_regions() {
2805 // TODO: allocation regions check
2806 return true;
2807 }
2809 void G1CollectedHeap::get_gc_alloc_regions() {
2810 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
2811 // Create new GC alloc regions.
2812 HeapRegion* alloc_region = _gc_alloc_regions[ap];
2813 // Clear this alloc region, so that in case it turns out to be
2814 // unacceptable, we end up with no allocation region, rather than a bad
2815 // one.
2816 _gc_alloc_regions[ap] = NULL;
2817 if (alloc_region == NULL || alloc_region->in_collection_set()) {
2818 // Can't re-use old one. Allocate a new one.
2819 alloc_region = newAllocRegionWithExpansion(ap, 0);
2820 }
2821 if (alloc_region != NULL) {
2822 set_gc_alloc_region(ap, alloc_region);
2823 }
2824 }
2825 // Set alternative regions for allocation purposes that have reached
2826 // thier limit.
2827 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
2828 GCAllocPurpose alt_purpose = g1_policy()->alternative_purpose(ap);
2829 if (_gc_alloc_regions[ap] == NULL && alt_purpose != ap) {
2830 _gc_alloc_regions[ap] = _gc_alloc_regions[alt_purpose];
2831 }
2832 }
2833 assert(check_gc_alloc_regions(), "alloc regions messed up");
2834 }
2836 void G1CollectedHeap::release_gc_alloc_regions() {
2837 // We keep a separate list of all regions that have been alloc regions in
2838 // the current collection pause. Forget that now.
2839 forget_alloc_region_list();
2841 // The current alloc regions contain objs that have survived
2842 // collection. Make them no longer GC alloc regions.
2843 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
2844 HeapRegion* r = _gc_alloc_regions[ap];
2845 if (r != NULL && r->is_empty()) {
2846 {
2847 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
2848 r->set_zero_fill_complete();
2849 put_free_region_on_list_locked(r);
2850 }
2851 }
2852 // set_gc_alloc_region will also NULLify all aliases to the region
2853 set_gc_alloc_region(ap, NULL);
2854 _gc_alloc_region_counts[ap] = 0;
2855 }
2856 }
2858 void G1CollectedHeap::init_for_evac_failure(OopsInHeapRegionClosure* cl) {
2859 _drain_in_progress = false;
2860 set_evac_failure_closure(cl);
2861 _evac_failure_scan_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(40, true);
2862 }
2864 void G1CollectedHeap::finalize_for_evac_failure() {
2865 assert(_evac_failure_scan_stack != NULL &&
2866 _evac_failure_scan_stack->length() == 0,
2867 "Postcondition");
2868 assert(!_drain_in_progress, "Postcondition");
2869 // Don't have to delete, since the scan stack is a resource object.
2870 _evac_failure_scan_stack = NULL;
2871 }
2875 // *** Sequential G1 Evacuation
2877 HeapWord* G1CollectedHeap::allocate_during_gc(GCAllocPurpose purpose, size_t word_size) {
2878 HeapRegion* alloc_region = _gc_alloc_regions[purpose];
2879 // let the caller handle alloc failure
2880 if (alloc_region == NULL) return NULL;
2881 assert(isHumongous(word_size) || !alloc_region->isHumongous(),
2882 "Either the object is humongous or the region isn't");
2883 HeapWord* block = alloc_region->allocate(word_size);
2884 if (block == NULL) {
2885 block = allocate_during_gc_slow(purpose, alloc_region, false, word_size);
2886 }
2887 return block;
2888 }
2890 class G1IsAliveClosure: public BoolObjectClosure {
2891 G1CollectedHeap* _g1;
2892 public:
2893 G1IsAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
2894 void do_object(oop p) { assert(false, "Do not call."); }
2895 bool do_object_b(oop p) {
2896 // It is reachable if it is outside the collection set, or is inside
2897 // and forwarded.
2899 #ifdef G1_DEBUG
2900 gclog_or_tty->print_cr("is alive "PTR_FORMAT" in CS %d forwarded %d overall %d",
2901 (void*) p, _g1->obj_in_cs(p), p->is_forwarded(),
2902 !_g1->obj_in_cs(p) || p->is_forwarded());
2903 #endif // G1_DEBUG
2905 return !_g1->obj_in_cs(p) || p->is_forwarded();
2906 }
2907 };
2909 class G1KeepAliveClosure: public OopClosure {
2910 G1CollectedHeap* _g1;
2911 public:
2912 G1KeepAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
2913 void do_oop(narrowOop* p) {
2914 guarantee(false, "NYI");
2915 }
2916 void do_oop(oop* p) {
2917 oop obj = *p;
2918 #ifdef G1_DEBUG
2919 if (PrintGC && Verbose) {
2920 gclog_or_tty->print_cr("keep alive *"PTR_FORMAT" = "PTR_FORMAT" "PTR_FORMAT,
2921 p, (void*) obj, (void*) *p);
2922 }
2923 #endif // G1_DEBUG
2925 if (_g1->obj_in_cs(obj)) {
2926 assert( obj->is_forwarded(), "invariant" );
2927 *p = obj->forwardee();
2929 #ifdef G1_DEBUG
2930 gclog_or_tty->print_cr(" in CSet: moved "PTR_FORMAT" -> "PTR_FORMAT,
2931 (void*) obj, (void*) *p);
2932 #endif // G1_DEBUG
2933 }
2934 }
2935 };
2937 class UpdateRSetImmediate : public OopsInHeapRegionClosure {
2938 private:
2939 G1CollectedHeap* _g1;
2940 G1RemSet* _g1_rem_set;
2941 public:
2942 UpdateRSetImmediate(G1CollectedHeap* g1) :
2943 _g1(g1), _g1_rem_set(g1->g1_rem_set()) {}
2945 void do_oop(narrowOop* p) {
2946 guarantee(false, "NYI");
2947 }
2948 void do_oop(oop* p) {
2949 assert(_from->is_in_reserved(p), "paranoia");
2950 if (*p != NULL && !_from->is_survivor()) {
2951 _g1_rem_set->par_write_ref(_from, p, 0);
2952 }
2953 }
2954 };
2956 class UpdateRSetDeferred : public OopsInHeapRegionClosure {
2957 private:
2958 G1CollectedHeap* _g1;
2959 DirtyCardQueue *_dcq;
2960 CardTableModRefBS* _ct_bs;
2962 public:
2963 UpdateRSetDeferred(G1CollectedHeap* g1, DirtyCardQueue* dcq) :
2964 _g1(g1), _ct_bs((CardTableModRefBS*)_g1->barrier_set()), _dcq(dcq) {}
2966 void do_oop(narrowOop* p) {
2967 guarantee(false, "NYI");
2968 }
2969 void do_oop(oop* p) {
2970 assert(_from->is_in_reserved(p), "paranoia");
2971 if (!_from->is_in_reserved(*p) && !_from->is_survivor()) {
2972 size_t card_index = _ct_bs->index_for(p);
2973 if (_ct_bs->mark_card_deferred(card_index)) {
2974 _dcq->enqueue((jbyte*)_ct_bs->byte_for_index(card_index));
2975 }
2976 }
2977 }
2978 };
2982 class RemoveSelfPointerClosure: public ObjectClosure {
2983 private:
2984 G1CollectedHeap* _g1;
2985 ConcurrentMark* _cm;
2986 HeapRegion* _hr;
2987 size_t _prev_marked_bytes;
2988 size_t _next_marked_bytes;
2989 OopsInHeapRegionClosure *_cl;
2990 public:
2991 RemoveSelfPointerClosure(G1CollectedHeap* g1, OopsInHeapRegionClosure* cl) :
2992 _g1(g1), _cm(_g1->concurrent_mark()), _prev_marked_bytes(0),
2993 _next_marked_bytes(0), _cl(cl) {}
2995 size_t prev_marked_bytes() { return _prev_marked_bytes; }
2996 size_t next_marked_bytes() { return _next_marked_bytes; }
2998 // The original idea here was to coalesce evacuated and dead objects.
2999 // However that caused complications with the block offset table (BOT).
3000 // In particular if there were two TLABs, one of them partially refined.
3001 // |----- TLAB_1--------|----TLAB_2-~~~(partially refined part)~~~|
3002 // The BOT entries of the unrefined part of TLAB_2 point to the start
3003 // of TLAB_2. If the last object of the TLAB_1 and the first object
3004 // of TLAB_2 are coalesced, then the cards of the unrefined part
3005 // would point into middle of the filler object.
3006 //
3007 // The current approach is to not coalesce and leave the BOT contents intact.
3008 void do_object(oop obj) {
3009 if (obj->is_forwarded() && obj->forwardee() == obj) {
3010 // The object failed to move.
3011 assert(!_g1->is_obj_dead(obj), "We should not be preserving dead objs.");
3012 _cm->markPrev(obj);
3013 assert(_cm->isPrevMarked(obj), "Should be marked!");
3014 _prev_marked_bytes += (obj->size() * HeapWordSize);
3015 if (_g1->mark_in_progress() && !_g1->is_obj_ill(obj)) {
3016 _cm->markAndGrayObjectIfNecessary(obj);
3017 }
3018 obj->set_mark(markOopDesc::prototype());
3019 // While we were processing RSet buffers during the
3020 // collection, we actually didn't scan any cards on the
3021 // collection set, since we didn't want to update remebered
3022 // sets with entries that point into the collection set, given
3023 // that live objects fromthe collection set are about to move
3024 // and such entries will be stale very soon. This change also
3025 // dealt with a reliability issue which involved scanning a
3026 // card in the collection set and coming across an array that
3027 // was being chunked and looking malformed. The problem is
3028 // that, if evacuation fails, we might have remembered set
3029 // entries missing given that we skipped cards on the
3030 // collection set. So, we'll recreate such entries now.
3031 obj->oop_iterate(_cl);
3032 assert(_cm->isPrevMarked(obj), "Should be marked!");
3033 } else {
3034 // The object has been either evacuated or is dead. Fill it with a
3035 // dummy object.
3036 MemRegion mr((HeapWord*)obj, obj->size());
3037 CollectedHeap::fill_with_object(mr);
3038 _cm->clearRangeBothMaps(mr);
3039 }
3040 }
3041 };
3043 void G1CollectedHeap::remove_self_forwarding_pointers() {
3044 UpdateRSetImmediate immediate_update(_g1h);
3045 DirtyCardQueue dcq(&_g1h->dirty_card_queue_set());
3046 UpdateRSetDeferred deferred_update(_g1h, &dcq);
3047 OopsInHeapRegionClosure *cl;
3048 if (G1DeferredRSUpdate) {
3049 cl = &deferred_update;
3050 } else {
3051 cl = &immediate_update;
3052 }
3053 HeapRegion* cur = g1_policy()->collection_set();
3054 while (cur != NULL) {
3055 assert(g1_policy()->assertMarkedBytesDataOK(), "Should be!");
3057 RemoveSelfPointerClosure rspc(_g1h, cl);
3058 if (cur->evacuation_failed()) {
3059 assert(cur->in_collection_set(), "bad CS");
3060 cl->set_region(cur);
3061 cur->object_iterate(&rspc);
3063 // A number of manipulations to make the TAMS be the current top,
3064 // and the marked bytes be the ones observed in the iteration.
3065 if (_g1h->concurrent_mark()->at_least_one_mark_complete()) {
3066 // The comments below are the postconditions achieved by the
3067 // calls. Note especially the last such condition, which says that
3068 // the count of marked bytes has been properly restored.
3069 cur->note_start_of_marking(false);
3070 // _next_top_at_mark_start == top, _next_marked_bytes == 0
3071 cur->add_to_marked_bytes(rspc.prev_marked_bytes());
3072 // _next_marked_bytes == prev_marked_bytes.
3073 cur->note_end_of_marking();
3074 // _prev_top_at_mark_start == top(),
3075 // _prev_marked_bytes == prev_marked_bytes
3076 }
3077 // If there is no mark in progress, we modified the _next variables
3078 // above needlessly, but harmlessly.
3079 if (_g1h->mark_in_progress()) {
3080 cur->note_start_of_marking(false);
3081 // _next_top_at_mark_start == top, _next_marked_bytes == 0
3082 // _next_marked_bytes == next_marked_bytes.
3083 }
3085 // Now make sure the region has the right index in the sorted array.
3086 g1_policy()->note_change_in_marked_bytes(cur);
3087 }
3088 cur = cur->next_in_collection_set();
3089 }
3090 assert(g1_policy()->assertMarkedBytesDataOK(), "Should be!");
3092 // Now restore saved marks, if any.
3093 if (_objs_with_preserved_marks != NULL) {
3094 assert(_preserved_marks_of_objs != NULL, "Both or none.");
3095 assert(_objs_with_preserved_marks->length() ==
3096 _preserved_marks_of_objs->length(), "Both or none.");
3097 guarantee(_objs_with_preserved_marks->length() ==
3098 _preserved_marks_of_objs->length(), "Both or none.");
3099 for (int i = 0; i < _objs_with_preserved_marks->length(); i++) {
3100 oop obj = _objs_with_preserved_marks->at(i);
3101 markOop m = _preserved_marks_of_objs->at(i);
3102 obj->set_mark(m);
3103 }
3104 // Delete the preserved marks growable arrays (allocated on the C heap).
3105 delete _objs_with_preserved_marks;
3106 delete _preserved_marks_of_objs;
3107 _objs_with_preserved_marks = NULL;
3108 _preserved_marks_of_objs = NULL;
3109 }
3110 }
3112 void G1CollectedHeap::push_on_evac_failure_scan_stack(oop obj) {
3113 _evac_failure_scan_stack->push(obj);
3114 }
3116 void G1CollectedHeap::drain_evac_failure_scan_stack() {
3117 assert(_evac_failure_scan_stack != NULL, "precondition");
3119 while (_evac_failure_scan_stack->length() > 0) {
3120 oop obj = _evac_failure_scan_stack->pop();
3121 _evac_failure_closure->set_region(heap_region_containing(obj));
3122 obj->oop_iterate_backwards(_evac_failure_closure);
3123 }
3124 }
3126 void G1CollectedHeap::handle_evacuation_failure(oop old) {
3127 markOop m = old->mark();
3128 // forward to self
3129 assert(!old->is_forwarded(), "precondition");
3131 old->forward_to(old);
3132 handle_evacuation_failure_common(old, m);
3133 }
3135 oop
3136 G1CollectedHeap::handle_evacuation_failure_par(OopsInHeapRegionClosure* cl,
3137 oop old) {
3138 markOop m = old->mark();
3139 oop forward_ptr = old->forward_to_atomic(old);
3140 if (forward_ptr == NULL) {
3141 // Forward-to-self succeeded.
3142 if (_evac_failure_closure != cl) {
3143 MutexLockerEx x(EvacFailureStack_lock, Mutex::_no_safepoint_check_flag);
3144 assert(!_drain_in_progress,
3145 "Should only be true while someone holds the lock.");
3146 // Set the global evac-failure closure to the current thread's.
3147 assert(_evac_failure_closure == NULL, "Or locking has failed.");
3148 set_evac_failure_closure(cl);
3149 // Now do the common part.
3150 handle_evacuation_failure_common(old, m);
3151 // Reset to NULL.
3152 set_evac_failure_closure(NULL);
3153 } else {
3154 // The lock is already held, and this is recursive.
3155 assert(_drain_in_progress, "This should only be the recursive case.");
3156 handle_evacuation_failure_common(old, m);
3157 }
3158 return old;
3159 } else {
3160 // Someone else had a place to copy it.
3161 return forward_ptr;
3162 }
3163 }
3165 void G1CollectedHeap::handle_evacuation_failure_common(oop old, markOop m) {
3166 set_evacuation_failed(true);
3168 preserve_mark_if_necessary(old, m);
3170 HeapRegion* r = heap_region_containing(old);
3171 if (!r->evacuation_failed()) {
3172 r->set_evacuation_failed(true);
3173 if (G1TraceRegions) {
3174 gclog_or_tty->print("evacuation failed in heap region "PTR_FORMAT" "
3175 "["PTR_FORMAT","PTR_FORMAT")\n",
3176 r, r->bottom(), r->end());
3177 }
3178 }
3180 push_on_evac_failure_scan_stack(old);
3182 if (!_drain_in_progress) {
3183 // prevent recursion in copy_to_survivor_space()
3184 _drain_in_progress = true;
3185 drain_evac_failure_scan_stack();
3186 _drain_in_progress = false;
3187 }
3188 }
3190 void G1CollectedHeap::preserve_mark_if_necessary(oop obj, markOop m) {
3191 if (m != markOopDesc::prototype()) {
3192 if (_objs_with_preserved_marks == NULL) {
3193 assert(_preserved_marks_of_objs == NULL, "Both or none.");
3194 _objs_with_preserved_marks =
3195 new (ResourceObj::C_HEAP) GrowableArray<oop>(40, true);
3196 _preserved_marks_of_objs =
3197 new (ResourceObj::C_HEAP) GrowableArray<markOop>(40, true);
3198 }
3199 _objs_with_preserved_marks->push(obj);
3200 _preserved_marks_of_objs->push(m);
3201 }
3202 }
3204 // *** Parallel G1 Evacuation
3206 HeapWord* G1CollectedHeap::par_allocate_during_gc(GCAllocPurpose purpose,
3207 size_t word_size) {
3208 HeapRegion* alloc_region = _gc_alloc_regions[purpose];
3209 // let the caller handle alloc failure
3210 if (alloc_region == NULL) return NULL;
3212 HeapWord* block = alloc_region->par_allocate(word_size);
3213 if (block == NULL) {
3214 MutexLockerEx x(par_alloc_during_gc_lock(),
3215 Mutex::_no_safepoint_check_flag);
3216 block = allocate_during_gc_slow(purpose, alloc_region, true, word_size);
3217 }
3218 return block;
3219 }
3221 void G1CollectedHeap::retire_alloc_region(HeapRegion* alloc_region,
3222 bool par) {
3223 // Another thread might have obtained alloc_region for the given
3224 // purpose, and might be attempting to allocate in it, and might
3225 // succeed. Therefore, we can't do the "finalization" stuff on the
3226 // region below until we're sure the last allocation has happened.
3227 // We ensure this by allocating the remaining space with a garbage
3228 // object.
3229 if (par) par_allocate_remaining_space(alloc_region);
3230 // Now we can do the post-GC stuff on the region.
3231 alloc_region->note_end_of_copying();
3232 g1_policy()->record_after_bytes(alloc_region->used());
3233 }
3235 HeapWord*
3236 G1CollectedHeap::allocate_during_gc_slow(GCAllocPurpose purpose,
3237 HeapRegion* alloc_region,
3238 bool par,
3239 size_t word_size) {
3240 HeapWord* block = NULL;
3241 // In the parallel case, a previous thread to obtain the lock may have
3242 // already assigned a new gc_alloc_region.
3243 if (alloc_region != _gc_alloc_regions[purpose]) {
3244 assert(par, "But should only happen in parallel case.");
3245 alloc_region = _gc_alloc_regions[purpose];
3246 if (alloc_region == NULL) return NULL;
3247 block = alloc_region->par_allocate(word_size);
3248 if (block != NULL) return block;
3249 // Otherwise, continue; this new region is empty, too.
3250 }
3251 assert(alloc_region != NULL, "We better have an allocation region");
3252 retire_alloc_region(alloc_region, par);
3254 if (_gc_alloc_region_counts[purpose] >= g1_policy()->max_regions(purpose)) {
3255 // Cannot allocate more regions for the given purpose.
3256 GCAllocPurpose alt_purpose = g1_policy()->alternative_purpose(purpose);
3257 // Is there an alternative?
3258 if (purpose != alt_purpose) {
3259 HeapRegion* alt_region = _gc_alloc_regions[alt_purpose];
3260 // Has not the alternative region been aliased?
3261 if (alloc_region != alt_region && alt_region != NULL) {
3262 // Try to allocate in the alternative region.
3263 if (par) {
3264 block = alt_region->par_allocate(word_size);
3265 } else {
3266 block = alt_region->allocate(word_size);
3267 }
3268 // Make an alias.
3269 _gc_alloc_regions[purpose] = _gc_alloc_regions[alt_purpose];
3270 if (block != NULL) {
3271 return block;
3272 }
3273 retire_alloc_region(alt_region, par);
3274 }
3275 // Both the allocation region and the alternative one are full
3276 // and aliased, replace them with a new allocation region.
3277 purpose = alt_purpose;
3278 } else {
3279 set_gc_alloc_region(purpose, NULL);
3280 return NULL;
3281 }
3282 }
3284 // Now allocate a new region for allocation.
3285 alloc_region = newAllocRegionWithExpansion(purpose, word_size, false /*zero_filled*/);
3287 // let the caller handle alloc failure
3288 if (alloc_region != NULL) {
3290 assert(check_gc_alloc_regions(), "alloc regions messed up");
3291 assert(alloc_region->saved_mark_at_top(),
3292 "Mark should have been saved already.");
3293 // We used to assert that the region was zero-filled here, but no
3294 // longer.
3296 // This must be done last: once it's installed, other regions may
3297 // allocate in it (without holding the lock.)
3298 set_gc_alloc_region(purpose, alloc_region);
3300 if (par) {
3301 block = alloc_region->par_allocate(word_size);
3302 } else {
3303 block = alloc_region->allocate(word_size);
3304 }
3305 // Caller handles alloc failure.
3306 } else {
3307 // This sets other apis using the same old alloc region to NULL, also.
3308 set_gc_alloc_region(purpose, NULL);
3309 }
3310 return block; // May be NULL.
3311 }
3313 void G1CollectedHeap::par_allocate_remaining_space(HeapRegion* r) {
3314 HeapWord* block = NULL;
3315 size_t free_words;
3316 do {
3317 free_words = r->free()/HeapWordSize;
3318 // If there's too little space, no one can allocate, so we're done.
3319 if (free_words < (size_t)oopDesc::header_size()) return;
3320 // Otherwise, try to claim it.
3321 block = r->par_allocate(free_words);
3322 } while (block == NULL);
3323 fill_with_object(block, free_words);
3324 }
3326 #define use_local_bitmaps 1
3327 #define verify_local_bitmaps 0
3329 #ifndef PRODUCT
3331 class GCLabBitMap;
3332 class GCLabBitMapClosure: public BitMapClosure {
3333 private:
3334 ConcurrentMark* _cm;
3335 GCLabBitMap* _bitmap;
3337 public:
3338 GCLabBitMapClosure(ConcurrentMark* cm,
3339 GCLabBitMap* bitmap) {
3340 _cm = cm;
3341 _bitmap = bitmap;
3342 }
3344 virtual bool do_bit(size_t offset);
3345 };
3347 #endif // PRODUCT
3349 #define oop_buffer_length 256
3351 class GCLabBitMap: public BitMap {
3352 private:
3353 ConcurrentMark* _cm;
3355 int _shifter;
3356 size_t _bitmap_word_covers_words;
3358 // beginning of the heap
3359 HeapWord* _heap_start;
3361 // this is the actual start of the GCLab
3362 HeapWord* _real_start_word;
3364 // this is the actual end of the GCLab
3365 HeapWord* _real_end_word;
3367 // this is the first word, possibly located before the actual start
3368 // of the GCLab, that corresponds to the first bit of the bitmap
3369 HeapWord* _start_word;
3371 // size of a GCLab in words
3372 size_t _gclab_word_size;
3374 static int shifter() {
3375 return MinObjAlignment - 1;
3376 }
3378 // how many heap words does a single bitmap word corresponds to?
3379 static size_t bitmap_word_covers_words() {
3380 return BitsPerWord << shifter();
3381 }
3383 static size_t gclab_word_size() {
3384 return ParallelGCG1AllocBufferSize / HeapWordSize;
3385 }
3387 static size_t bitmap_size_in_bits() {
3388 size_t bits_in_bitmap = gclab_word_size() >> shifter();
3389 // We are going to ensure that the beginning of a word in this
3390 // bitmap also corresponds to the beginning of a word in the
3391 // global marking bitmap. To handle the case where a GCLab
3392 // starts from the middle of the bitmap, we need to add enough
3393 // space (i.e. up to a bitmap word) to ensure that we have
3394 // enough bits in the bitmap.
3395 return bits_in_bitmap + BitsPerWord - 1;
3396 }
3397 public:
3398 GCLabBitMap(HeapWord* heap_start)
3399 : BitMap(bitmap_size_in_bits()),
3400 _cm(G1CollectedHeap::heap()->concurrent_mark()),
3401 _shifter(shifter()),
3402 _bitmap_word_covers_words(bitmap_word_covers_words()),
3403 _heap_start(heap_start),
3404 _gclab_word_size(gclab_word_size()),
3405 _real_start_word(NULL),
3406 _real_end_word(NULL),
3407 _start_word(NULL)
3408 {
3409 guarantee( size_in_words() >= bitmap_size_in_words(),
3410 "just making sure");
3411 }
3413 inline unsigned heapWordToOffset(HeapWord* addr) {
3414 unsigned offset = (unsigned) pointer_delta(addr, _start_word) >> _shifter;
3415 assert(offset < size(), "offset should be within bounds");
3416 return offset;
3417 }
3419 inline HeapWord* offsetToHeapWord(size_t offset) {
3420 HeapWord* addr = _start_word + (offset << _shifter);
3421 assert(_real_start_word <= addr && addr < _real_end_word, "invariant");
3422 return addr;
3423 }
3425 bool fields_well_formed() {
3426 bool ret1 = (_real_start_word == NULL) &&
3427 (_real_end_word == NULL) &&
3428 (_start_word == NULL);
3429 if (ret1)
3430 return true;
3432 bool ret2 = _real_start_word >= _start_word &&
3433 _start_word < _real_end_word &&
3434 (_real_start_word + _gclab_word_size) == _real_end_word &&
3435 (_start_word + _gclab_word_size + _bitmap_word_covers_words)
3436 > _real_end_word;
3437 return ret2;
3438 }
3440 inline bool mark(HeapWord* addr) {
3441 guarantee(use_local_bitmaps, "invariant");
3442 assert(fields_well_formed(), "invariant");
3444 if (addr >= _real_start_word && addr < _real_end_word) {
3445 assert(!isMarked(addr), "should not have already been marked");
3447 // first mark it on the bitmap
3448 at_put(heapWordToOffset(addr), true);
3450 return true;
3451 } else {
3452 return false;
3453 }
3454 }
3456 inline bool isMarked(HeapWord* addr) {
3457 guarantee(use_local_bitmaps, "invariant");
3458 assert(fields_well_formed(), "invariant");
3460 return at(heapWordToOffset(addr));
3461 }
3463 void set_buffer(HeapWord* start) {
3464 guarantee(use_local_bitmaps, "invariant");
3465 clear();
3467 assert(start != NULL, "invariant");
3468 _real_start_word = start;
3469 _real_end_word = start + _gclab_word_size;
3471 size_t diff =
3472 pointer_delta(start, _heap_start) % _bitmap_word_covers_words;
3473 _start_word = start - diff;
3475 assert(fields_well_formed(), "invariant");
3476 }
3478 #ifndef PRODUCT
3479 void verify() {
3480 // verify that the marks have been propagated
3481 GCLabBitMapClosure cl(_cm, this);
3482 iterate(&cl);
3483 }
3484 #endif // PRODUCT
3486 void retire() {
3487 guarantee(use_local_bitmaps, "invariant");
3488 assert(fields_well_formed(), "invariant");
3490 if (_start_word != NULL) {
3491 CMBitMap* mark_bitmap = _cm->nextMarkBitMap();
3493 // this means that the bitmap was set up for the GCLab
3494 assert(_real_start_word != NULL && _real_end_word != NULL, "invariant");
3496 mark_bitmap->mostly_disjoint_range_union(this,
3497 0, // always start from the start of the bitmap
3498 _start_word,
3499 size_in_words());
3500 _cm->grayRegionIfNecessary(MemRegion(_real_start_word, _real_end_word));
3502 #ifndef PRODUCT
3503 if (use_local_bitmaps && verify_local_bitmaps)
3504 verify();
3505 #endif // PRODUCT
3506 } else {
3507 assert(_real_start_word == NULL && _real_end_word == NULL, "invariant");
3508 }
3509 }
3511 static size_t bitmap_size_in_words() {
3512 return (bitmap_size_in_bits() + BitsPerWord - 1) / BitsPerWord;
3513 }
3514 };
3516 #ifndef PRODUCT
3518 bool GCLabBitMapClosure::do_bit(size_t offset) {
3519 HeapWord* addr = _bitmap->offsetToHeapWord(offset);
3520 guarantee(_cm->isMarked(oop(addr)), "it should be!");
3521 return true;
3522 }
3524 #endif // PRODUCT
3526 class G1ParGCAllocBuffer: public ParGCAllocBuffer {
3527 private:
3528 bool _retired;
3529 bool _during_marking;
3530 GCLabBitMap _bitmap;
3532 public:
3533 G1ParGCAllocBuffer() :
3534 ParGCAllocBuffer(ParallelGCG1AllocBufferSize / HeapWordSize),
3535 _during_marking(G1CollectedHeap::heap()->mark_in_progress()),
3536 _bitmap(G1CollectedHeap::heap()->reserved_region().start()),
3537 _retired(false)
3538 { }
3540 inline bool mark(HeapWord* addr) {
3541 guarantee(use_local_bitmaps, "invariant");
3542 assert(_during_marking, "invariant");
3543 return _bitmap.mark(addr);
3544 }
3546 inline void set_buf(HeapWord* buf) {
3547 if (use_local_bitmaps && _during_marking)
3548 _bitmap.set_buffer(buf);
3549 ParGCAllocBuffer::set_buf(buf);
3550 _retired = false;
3551 }
3553 inline void retire(bool end_of_gc, bool retain) {
3554 if (_retired)
3555 return;
3556 if (use_local_bitmaps && _during_marking) {
3557 _bitmap.retire();
3558 }
3559 ParGCAllocBuffer::retire(end_of_gc, retain);
3560 _retired = true;
3561 }
3562 };
3565 class G1ParScanThreadState : public StackObj {
3566 protected:
3567 G1CollectedHeap* _g1h;
3568 RefToScanQueue* _refs;
3569 DirtyCardQueue _dcq;
3570 CardTableModRefBS* _ct_bs;
3571 G1RemSet* _g1_rem;
3573 typedef GrowableArray<oop*> OverflowQueue;
3574 OverflowQueue* _overflowed_refs;
3576 G1ParGCAllocBuffer _alloc_buffers[GCAllocPurposeCount];
3577 ageTable _age_table;
3579 size_t _alloc_buffer_waste;
3580 size_t _undo_waste;
3582 OopsInHeapRegionClosure* _evac_failure_cl;
3583 G1ParScanHeapEvacClosure* _evac_cl;
3584 G1ParScanPartialArrayClosure* _partial_scan_cl;
3586 int _hash_seed;
3587 int _queue_num;
3589 int _term_attempts;
3590 #if G1_DETAILED_STATS
3591 int _pushes, _pops, _steals, _steal_attempts;
3592 int _overflow_pushes;
3593 #endif
3595 double _start;
3596 double _start_strong_roots;
3597 double _strong_roots_time;
3598 double _start_term;
3599 double _term_time;
3601 // Map from young-age-index (0 == not young, 1 is youngest) to
3602 // surviving words. base is what we get back from the malloc call
3603 size_t* _surviving_young_words_base;
3604 // this points into the array, as we use the first few entries for padding
3605 size_t* _surviving_young_words;
3607 #define PADDING_ELEM_NUM (64 / sizeof(size_t))
3609 void add_to_alloc_buffer_waste(size_t waste) { _alloc_buffer_waste += waste; }
3611 void add_to_undo_waste(size_t waste) { _undo_waste += waste; }
3613 DirtyCardQueue& dirty_card_queue() { return _dcq; }
3614 CardTableModRefBS* ctbs() { return _ct_bs; }
3616 void immediate_rs_update(HeapRegion* from, oop* p, int tid) {
3617 _g1_rem->par_write_ref(from, p, tid);
3618 }
3620 void deferred_rs_update(HeapRegion* from, oop* p, int tid) {
3621 // If the new value of the field points to the same region or
3622 // is the to-space, we don't need to include it in the Rset updates.
3623 if (!from->is_in_reserved(*p) && !from->is_survivor()) {
3624 size_t card_index = ctbs()->index_for(p);
3625 // If the card hasn't been added to the buffer, do it.
3626 if (ctbs()->mark_card_deferred(card_index)) {
3627 dirty_card_queue().enqueue((jbyte*)ctbs()->byte_for_index(card_index));
3628 }
3629 }
3630 }
3632 public:
3633 G1ParScanThreadState(G1CollectedHeap* g1h, int queue_num)
3634 : _g1h(g1h),
3635 _refs(g1h->task_queue(queue_num)),
3636 _dcq(&g1h->dirty_card_queue_set()),
3637 _ct_bs((CardTableModRefBS*)_g1h->barrier_set()),
3638 _g1_rem(g1h->g1_rem_set()),
3639 _hash_seed(17), _queue_num(queue_num),
3640 _term_attempts(0),
3641 _age_table(false),
3642 #if G1_DETAILED_STATS
3643 _pushes(0), _pops(0), _steals(0),
3644 _steal_attempts(0), _overflow_pushes(0),
3645 #endif
3646 _strong_roots_time(0), _term_time(0),
3647 _alloc_buffer_waste(0), _undo_waste(0)
3648 {
3649 // we allocate G1YoungSurvRateNumRegions plus one entries, since
3650 // we "sacrifice" entry 0 to keep track of surviving bytes for
3651 // non-young regions (where the age is -1)
3652 // We also add a few elements at the beginning and at the end in
3653 // an attempt to eliminate cache contention
3654 size_t real_length = 1 + _g1h->g1_policy()->young_cset_length();
3655 size_t array_length = PADDING_ELEM_NUM +
3656 real_length +
3657 PADDING_ELEM_NUM;
3658 _surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length);
3659 if (_surviving_young_words_base == NULL)
3660 vm_exit_out_of_memory(array_length * sizeof(size_t),
3661 "Not enough space for young surv histo.");
3662 _surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM;
3663 memset(_surviving_young_words, 0, real_length * sizeof(size_t));
3665 _overflowed_refs = new OverflowQueue(10);
3667 _start = os::elapsedTime();
3668 }
3670 ~G1ParScanThreadState() {
3671 FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base);
3672 }
3674 RefToScanQueue* refs() { return _refs; }
3675 OverflowQueue* overflowed_refs() { return _overflowed_refs; }
3676 ageTable* age_table() { return &_age_table; }
3678 G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose) {
3679 return &_alloc_buffers[purpose];
3680 }
3682 size_t alloc_buffer_waste() { return _alloc_buffer_waste; }
3683 size_t undo_waste() { return _undo_waste; }
3685 void push_on_queue(oop* ref) {
3686 assert(ref != NULL, "invariant");
3687 assert(has_partial_array_mask(ref) || _g1h->obj_in_cs(*ref), "invariant");
3689 if (!refs()->push(ref)) {
3690 overflowed_refs()->push(ref);
3691 IF_G1_DETAILED_STATS(note_overflow_push());
3692 } else {
3693 IF_G1_DETAILED_STATS(note_push());
3694 }
3695 }
3697 void pop_from_queue(oop*& ref) {
3698 if (!refs()->pop_local(ref)) {
3699 ref = NULL;
3700 } else {
3701 assert(ref != NULL, "invariant");
3702 assert(has_partial_array_mask(ref) || _g1h->obj_in_cs(*ref),
3703 "invariant");
3705 IF_G1_DETAILED_STATS(note_pop());
3706 }
3707 }
3709 void pop_from_overflow_queue(oop*& ref) {
3710 ref = overflowed_refs()->pop();
3711 }
3713 int refs_to_scan() { return refs()->size(); }
3714 int overflowed_refs_to_scan() { return overflowed_refs()->length(); }
3716 void update_rs(HeapRegion* from, oop* p, int tid) {
3717 if (G1DeferredRSUpdate) {
3718 deferred_rs_update(from, p, tid);
3719 } else {
3720 immediate_rs_update(from, p, tid);
3721 }
3722 }
3724 HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz) {
3726 HeapWord* obj = NULL;
3727 if (word_sz * 100 <
3728 (size_t)(ParallelGCG1AllocBufferSize / HeapWordSize) *
3729 ParallelGCBufferWastePct) {
3730 G1ParGCAllocBuffer* alloc_buf = alloc_buffer(purpose);
3731 add_to_alloc_buffer_waste(alloc_buf->words_remaining());
3732 alloc_buf->retire(false, false);
3734 HeapWord* buf =
3735 _g1h->par_allocate_during_gc(purpose, ParallelGCG1AllocBufferSize / HeapWordSize);
3736 if (buf == NULL) return NULL; // Let caller handle allocation failure.
3737 // Otherwise.
3738 alloc_buf->set_buf(buf);
3740 obj = alloc_buf->allocate(word_sz);
3741 assert(obj != NULL, "buffer was definitely big enough...");
3742 } else {
3743 obj = _g1h->par_allocate_during_gc(purpose, word_sz);
3744 }
3745 return obj;
3746 }
3748 HeapWord* allocate(GCAllocPurpose purpose, size_t word_sz) {
3749 HeapWord* obj = alloc_buffer(purpose)->allocate(word_sz);
3750 if (obj != NULL) return obj;
3751 return allocate_slow(purpose, word_sz);
3752 }
3754 void undo_allocation(GCAllocPurpose purpose, HeapWord* obj, size_t word_sz) {
3755 if (alloc_buffer(purpose)->contains(obj)) {
3756 guarantee(alloc_buffer(purpose)->contains(obj + word_sz - 1),
3757 "should contain whole object");
3758 alloc_buffer(purpose)->undo_allocation(obj, word_sz);
3759 } else {
3760 CollectedHeap::fill_with_object(obj, word_sz);
3761 add_to_undo_waste(word_sz);
3762 }
3763 }
3765 void set_evac_failure_closure(OopsInHeapRegionClosure* evac_failure_cl) {
3766 _evac_failure_cl = evac_failure_cl;
3767 }
3768 OopsInHeapRegionClosure* evac_failure_closure() {
3769 return _evac_failure_cl;
3770 }
3772 void set_evac_closure(G1ParScanHeapEvacClosure* evac_cl) {
3773 _evac_cl = evac_cl;
3774 }
3776 void set_partial_scan_closure(G1ParScanPartialArrayClosure* partial_scan_cl) {
3777 _partial_scan_cl = partial_scan_cl;
3778 }
3780 int* hash_seed() { return &_hash_seed; }
3781 int queue_num() { return _queue_num; }
3783 int term_attempts() { return _term_attempts; }
3784 void note_term_attempt() { _term_attempts++; }
3786 #if G1_DETAILED_STATS
3787 int pushes() { return _pushes; }
3788 int pops() { return _pops; }
3789 int steals() { return _steals; }
3790 int steal_attempts() { return _steal_attempts; }
3791 int overflow_pushes() { return _overflow_pushes; }
3793 void note_push() { _pushes++; }
3794 void note_pop() { _pops++; }
3795 void note_steal() { _steals++; }
3796 void note_steal_attempt() { _steal_attempts++; }
3797 void note_overflow_push() { _overflow_pushes++; }
3798 #endif
3800 void start_strong_roots() {
3801 _start_strong_roots = os::elapsedTime();
3802 }
3803 void end_strong_roots() {
3804 _strong_roots_time += (os::elapsedTime() - _start_strong_roots);
3805 }
3806 double strong_roots_time() { return _strong_roots_time; }
3808 void start_term_time() {
3809 note_term_attempt();
3810 _start_term = os::elapsedTime();
3811 }
3812 void end_term_time() {
3813 _term_time += (os::elapsedTime() - _start_term);
3814 }
3815 double term_time() { return _term_time; }
3817 double elapsed() {
3818 return os::elapsedTime() - _start;
3819 }
3821 size_t* surviving_young_words() {
3822 // We add on to hide entry 0 which accumulates surviving words for
3823 // age -1 regions (i.e. non-young ones)
3824 return _surviving_young_words;
3825 }
3827 void retire_alloc_buffers() {
3828 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
3829 size_t waste = _alloc_buffers[ap].words_remaining();
3830 add_to_alloc_buffer_waste(waste);
3831 _alloc_buffers[ap].retire(true, false);
3832 }
3833 }
3835 private:
3836 void deal_with_reference(oop* ref_to_scan) {
3837 if (has_partial_array_mask(ref_to_scan)) {
3838 _partial_scan_cl->do_oop_nv(ref_to_scan);
3839 } else {
3840 // Note: we can use "raw" versions of "region_containing" because
3841 // "obj_to_scan" is definitely in the heap, and is not in a
3842 // humongous region.
3843 HeapRegion* r = _g1h->heap_region_containing_raw(ref_to_scan);
3844 _evac_cl->set_region(r);
3845 _evac_cl->do_oop_nv(ref_to_scan);
3846 }
3847 }
3849 public:
3850 void trim_queue() {
3851 // I've replicated the loop twice, first to drain the overflow
3852 // queue, second to drain the task queue. This is better than
3853 // having a single loop, which checks both conditions and, inside
3854 // it, either pops the overflow queue or the task queue, as each
3855 // loop is tighter. Also, the decision to drain the overflow queue
3856 // first is not arbitrary, as the overflow queue is not visible
3857 // to the other workers, whereas the task queue is. So, we want to
3858 // drain the "invisible" entries first, while allowing the other
3859 // workers to potentially steal the "visible" entries.
3861 while (refs_to_scan() > 0 || overflowed_refs_to_scan() > 0) {
3862 while (overflowed_refs_to_scan() > 0) {
3863 oop *ref_to_scan = NULL;
3864 pop_from_overflow_queue(ref_to_scan);
3865 assert(ref_to_scan != NULL, "invariant");
3866 // We shouldn't have pushed it on the queue if it was not
3867 // pointing into the CSet.
3868 assert(ref_to_scan != NULL, "sanity");
3869 assert(has_partial_array_mask(ref_to_scan) ||
3870 _g1h->obj_in_cs(*ref_to_scan), "sanity");
3872 deal_with_reference(ref_to_scan);
3873 }
3875 while (refs_to_scan() > 0) {
3876 oop *ref_to_scan = NULL;
3877 pop_from_queue(ref_to_scan);
3879 if (ref_to_scan != NULL) {
3880 // We shouldn't have pushed it on the queue if it was not
3881 // pointing into the CSet.
3882 assert(has_partial_array_mask(ref_to_scan) ||
3883 _g1h->obj_in_cs(*ref_to_scan), "sanity");
3885 deal_with_reference(ref_to_scan);
3886 }
3887 }
3888 }
3889 }
3890 };
3892 G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
3893 _g1(g1), _g1_rem(_g1->g1_rem_set()), _cm(_g1->concurrent_mark()),
3894 _par_scan_state(par_scan_state) { }
3896 // This closure is applied to the fields of the objects that have just been copied.
3897 // Should probably be made inline and moved in g1OopClosures.inline.hpp.
3898 void G1ParScanClosure::do_oop_nv(oop* p) {
3899 oop obj = *p;
3901 if (obj != NULL) {
3902 if (_g1->in_cset_fast_test(obj)) {
3903 // We're not going to even bother checking whether the object is
3904 // already forwarded or not, as this usually causes an immediate
3905 // stall. We'll try to prefetch the object (for write, given that
3906 // we might need to install the forwarding reference) and we'll
3907 // get back to it when pop it from the queue
3908 Prefetch::write(obj->mark_addr(), 0);
3909 Prefetch::read(obj->mark_addr(), (HeapWordSize*2));
3911 // slightly paranoid test; I'm trying to catch potential
3912 // problems before we go into push_on_queue to know where the
3913 // problem is coming from
3914 assert(obj == *p, "the value of *p should not have changed");
3915 _par_scan_state->push_on_queue(p);
3916 } else {
3917 _par_scan_state->update_rs(_from, p, _par_scan_state->queue_num());
3918 }
3919 }
3920 }
3922 void G1ParCopyHelper::mark_forwardee(oop* p) {
3923 // This is called _after_ do_oop_work has been called, hence after
3924 // the object has been relocated to its new location and *p points
3925 // to its new location.
3927 oop thisOop = *p;
3928 if (thisOop != NULL) {
3929 assert((_g1->evacuation_failed()) || (!_g1->obj_in_cs(thisOop)),
3930 "shouldn't still be in the CSet if evacuation didn't fail.");
3931 HeapWord* addr = (HeapWord*)thisOop;
3932 if (_g1->is_in_g1_reserved(addr))
3933 _cm->grayRoot(oop(addr));
3934 }
3935 }
3937 oop G1ParCopyHelper::copy_to_survivor_space(oop old) {
3938 size_t word_sz = old->size();
3939 HeapRegion* from_region = _g1->heap_region_containing_raw(old);
3940 // +1 to make the -1 indexes valid...
3941 int young_index = from_region->young_index_in_cset()+1;
3942 assert( (from_region->is_young() && young_index > 0) ||
3943 (!from_region->is_young() && young_index == 0), "invariant" );
3944 G1CollectorPolicy* g1p = _g1->g1_policy();
3945 markOop m = old->mark();
3946 int age = m->has_displaced_mark_helper() ? m->displaced_mark_helper()->age()
3947 : m->age();
3948 GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, age,
3949 word_sz);
3950 HeapWord* obj_ptr = _par_scan_state->allocate(alloc_purpose, word_sz);
3951 oop obj = oop(obj_ptr);
3953 if (obj_ptr == NULL) {
3954 // This will either forward-to-self, or detect that someone else has
3955 // installed a forwarding pointer.
3956 OopsInHeapRegionClosure* cl = _par_scan_state->evac_failure_closure();
3957 return _g1->handle_evacuation_failure_par(cl, old);
3958 }
3960 // We're going to allocate linearly, so might as well prefetch ahead.
3961 Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes);
3963 oop forward_ptr = old->forward_to_atomic(obj);
3964 if (forward_ptr == NULL) {
3965 Copy::aligned_disjoint_words((HeapWord*) old, obj_ptr, word_sz);
3966 if (g1p->track_object_age(alloc_purpose)) {
3967 // We could simply do obj->incr_age(). However, this causes a
3968 // performance issue. obj->incr_age() will first check whether
3969 // the object has a displaced mark by checking its mark word;
3970 // getting the mark word from the new location of the object
3971 // stalls. So, given that we already have the mark word and we
3972 // are about to install it anyway, it's better to increase the
3973 // age on the mark word, when the object does not have a
3974 // displaced mark word. We're not expecting many objects to have
3975 // a displaced marked word, so that case is not optimized
3976 // further (it could be...) and we simply call obj->incr_age().
3978 if (m->has_displaced_mark_helper()) {
3979 // in this case, we have to install the mark word first,
3980 // otherwise obj looks to be forwarded (the old mark word,
3981 // which contains the forward pointer, was copied)
3982 obj->set_mark(m);
3983 obj->incr_age();
3984 } else {
3985 m = m->incr_age();
3986 obj->set_mark(m);
3987 }
3988 _par_scan_state->age_table()->add(obj, word_sz);
3989 } else {
3990 obj->set_mark(m);
3991 }
3993 // preserve "next" mark bit
3994 if (_g1->mark_in_progress() && !_g1->is_obj_ill(old)) {
3995 if (!use_local_bitmaps ||
3996 !_par_scan_state->alloc_buffer(alloc_purpose)->mark(obj_ptr)) {
3997 // if we couldn't mark it on the local bitmap (this happens when
3998 // the object was not allocated in the GCLab), we have to bite
3999 // the bullet and do the standard parallel mark
4000 _cm->markAndGrayObjectIfNecessary(obj);
4001 }
4002 #if 1
4003 if (_g1->isMarkedNext(old)) {
4004 _cm->nextMarkBitMap()->parClear((HeapWord*)old);
4005 }
4006 #endif
4007 }
4009 size_t* surv_young_words = _par_scan_state->surviving_young_words();
4010 surv_young_words[young_index] += word_sz;
4012 if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) {
4013 arrayOop(old)->set_length(0);
4014 _par_scan_state->push_on_queue(set_partial_array_mask(old));
4015 } else {
4016 // No point in using the slower heap_region_containing() method,
4017 // given that we know obj is in the heap.
4018 _scanner->set_region(_g1->heap_region_containing_raw(obj));
4019 obj->oop_iterate_backwards(_scanner);
4020 }
4021 } else {
4022 _par_scan_state->undo_allocation(alloc_purpose, obj_ptr, word_sz);
4023 obj = forward_ptr;
4024 }
4025 return obj;
4026 }
4028 template<bool do_gen_barrier, G1Barrier barrier,
4029 bool do_mark_forwardee, bool skip_cset_test>
4030 void G1ParCopyClosure<do_gen_barrier, barrier,
4031 do_mark_forwardee, skip_cset_test>::do_oop_work(oop* p) {
4032 oop obj = *p;
4033 assert(barrier != G1BarrierRS || obj != NULL,
4034 "Precondition: G1BarrierRS implies obj is nonNull");
4036 // The only time we skip the cset test is when we're scanning
4037 // references popped from the queue. And we only push on the queue
4038 // references that we know point into the cset, so no point in
4039 // checking again. But we'll leave an assert here for peace of mind.
4040 assert(!skip_cset_test || _g1->obj_in_cs(obj), "invariant");
4042 // here the null check is implicit in the cset_fast_test() test
4043 if (skip_cset_test || _g1->in_cset_fast_test(obj)) {
4044 #if G1_REM_SET_LOGGING
4045 gclog_or_tty->print_cr("Loc "PTR_FORMAT" contains pointer "PTR_FORMAT" "
4046 "into CS.", p, (void*) obj);
4047 #endif
4048 if (obj->is_forwarded()) {
4049 *p = obj->forwardee();
4050 } else {
4051 *p = copy_to_survivor_space(obj);
4052 }
4053 // When scanning the RS, we only care about objs in CS.
4054 if (barrier == G1BarrierRS) {
4055 _par_scan_state->update_rs(_from, p, _par_scan_state->queue_num());
4056 }
4057 }
4059 // When scanning moved objs, must look at all oops.
4060 if (barrier == G1BarrierEvac && obj != NULL) {
4061 _par_scan_state->update_rs(_from, p, _par_scan_state->queue_num());
4062 }
4064 if (do_gen_barrier && obj != NULL) {
4065 par_do_barrier(p);
4066 }
4067 }
4069 template void G1ParCopyClosure<false, G1BarrierEvac, false, true>::do_oop_work(oop* p);
4071 template<class T> void G1ParScanPartialArrayClosure::process_array_chunk(
4072 oop obj, int start, int end) {
4073 // process our set of indices (include header in first chunk)
4074 assert(start < end, "invariant");
4075 T* const base = (T*)objArrayOop(obj)->base();
4076 T* const start_addr = (start == 0) ? (T*) obj : base + start;
4077 T* const end_addr = base + end;
4078 MemRegion mr((HeapWord*)start_addr, (HeapWord*)end_addr);
4079 _scanner.set_region(_g1->heap_region_containing(obj));
4080 obj->oop_iterate(&_scanner, mr);
4081 }
4083 void G1ParScanPartialArrayClosure::do_oop_nv(oop* p) {
4084 assert(!UseCompressedOops, "Needs to be fixed to work with compressed oops");
4085 assert(has_partial_array_mask(p), "invariant");
4086 oop old = clear_partial_array_mask(p);
4087 assert(old->is_objArray(), "must be obj array");
4088 assert(old->is_forwarded(), "must be forwarded");
4089 assert(Universe::heap()->is_in_reserved(old), "must be in heap.");
4091 objArrayOop obj = objArrayOop(old->forwardee());
4092 assert((void*)old != (void*)old->forwardee(), "self forwarding here?");
4093 // Process ParGCArrayScanChunk elements now
4094 // and push the remainder back onto queue
4095 int start = arrayOop(old)->length();
4096 int end = obj->length();
4097 int remainder = end - start;
4098 assert(start <= end, "just checking");
4099 if (remainder > 2 * ParGCArrayScanChunk) {
4100 // Test above combines last partial chunk with a full chunk
4101 end = start + ParGCArrayScanChunk;
4102 arrayOop(old)->set_length(end);
4103 // Push remainder.
4104 _par_scan_state->push_on_queue(set_partial_array_mask(old));
4105 } else {
4106 // Restore length so that the heap remains parsable in
4107 // case of evacuation failure.
4108 arrayOop(old)->set_length(end);
4109 }
4111 // process our set of indices (include header in first chunk)
4112 process_array_chunk<oop>(obj, start, end);
4113 }
4115 int G1ScanAndBalanceClosure::_nq = 0;
4117 class G1ParEvacuateFollowersClosure : public VoidClosure {
4118 protected:
4119 G1CollectedHeap* _g1h;
4120 G1ParScanThreadState* _par_scan_state;
4121 RefToScanQueueSet* _queues;
4122 ParallelTaskTerminator* _terminator;
4124 G1ParScanThreadState* par_scan_state() { return _par_scan_state; }
4125 RefToScanQueueSet* queues() { return _queues; }
4126 ParallelTaskTerminator* terminator() { return _terminator; }
4128 public:
4129 G1ParEvacuateFollowersClosure(G1CollectedHeap* g1h,
4130 G1ParScanThreadState* par_scan_state,
4131 RefToScanQueueSet* queues,
4132 ParallelTaskTerminator* terminator)
4133 : _g1h(g1h), _par_scan_state(par_scan_state),
4134 _queues(queues), _terminator(terminator) {}
4136 void do_void() {
4137 G1ParScanThreadState* pss = par_scan_state();
4138 while (true) {
4139 oop* ref_to_scan;
4140 pss->trim_queue();
4141 IF_G1_DETAILED_STATS(pss->note_steal_attempt());
4142 if (queues()->steal(pss->queue_num(),
4143 pss->hash_seed(),
4144 ref_to_scan)) {
4145 IF_G1_DETAILED_STATS(pss->note_steal());
4147 // slightly paranoid tests; I'm trying to catch potential
4148 // problems before we go into push_on_queue to know where the
4149 // problem is coming from
4150 assert(ref_to_scan != NULL, "invariant");
4151 assert(has_partial_array_mask(ref_to_scan) ||
4152 _g1h->obj_in_cs(*ref_to_scan), "invariant");
4153 pss->push_on_queue(ref_to_scan);
4154 continue;
4155 }
4156 pss->start_term_time();
4157 if (terminator()->offer_termination()) break;
4158 pss->end_term_time();
4159 }
4160 pss->end_term_time();
4161 pss->retire_alloc_buffers();
4162 }
4163 };
4165 class G1ParTask : public AbstractGangTask {
4166 protected:
4167 G1CollectedHeap* _g1h;
4168 RefToScanQueueSet *_queues;
4169 ParallelTaskTerminator _terminator;
4171 Mutex _stats_lock;
4172 Mutex* stats_lock() { return &_stats_lock; }
4174 size_t getNCards() {
4175 return (_g1h->capacity() + G1BlockOffsetSharedArray::N_bytes - 1)
4176 / G1BlockOffsetSharedArray::N_bytes;
4177 }
4179 public:
4180 G1ParTask(G1CollectedHeap* g1h, int workers, RefToScanQueueSet *task_queues)
4181 : AbstractGangTask("G1 collection"),
4182 _g1h(g1h),
4183 _queues(task_queues),
4184 _terminator(workers, _queues),
4185 _stats_lock(Mutex::leaf, "parallel G1 stats lock", true)
4186 {}
4188 RefToScanQueueSet* queues() { return _queues; }
4190 RefToScanQueue *work_queue(int i) {
4191 return queues()->queue(i);
4192 }
4194 void work(int i) {
4195 ResourceMark rm;
4196 HandleMark hm;
4198 G1ParScanThreadState pss(_g1h, i);
4199 G1ParScanHeapEvacClosure scan_evac_cl(_g1h, &pss);
4200 G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss);
4201 G1ParScanPartialArrayClosure partial_scan_cl(_g1h, &pss);
4203 pss.set_evac_closure(&scan_evac_cl);
4204 pss.set_evac_failure_closure(&evac_failure_cl);
4205 pss.set_partial_scan_closure(&partial_scan_cl);
4207 G1ParScanExtRootClosure only_scan_root_cl(_g1h, &pss);
4208 G1ParScanPermClosure only_scan_perm_cl(_g1h, &pss);
4209 G1ParScanHeapRSClosure only_scan_heap_rs_cl(_g1h, &pss);
4211 G1ParScanAndMarkExtRootClosure scan_mark_root_cl(_g1h, &pss);
4212 G1ParScanAndMarkPermClosure scan_mark_perm_cl(_g1h, &pss);
4213 G1ParScanAndMarkHeapRSClosure scan_mark_heap_rs_cl(_g1h, &pss);
4215 OopsInHeapRegionClosure *scan_root_cl;
4216 OopsInHeapRegionClosure *scan_perm_cl;
4217 OopsInHeapRegionClosure *scan_so_cl;
4219 if (_g1h->g1_policy()->should_initiate_conc_mark()) {
4220 scan_root_cl = &scan_mark_root_cl;
4221 scan_perm_cl = &scan_mark_perm_cl;
4222 scan_so_cl = &scan_mark_heap_rs_cl;
4223 } else {
4224 scan_root_cl = &only_scan_root_cl;
4225 scan_perm_cl = &only_scan_perm_cl;
4226 scan_so_cl = &only_scan_heap_rs_cl;
4227 }
4229 pss.start_strong_roots();
4230 _g1h->g1_process_strong_roots(/* not collecting perm */ false,
4231 SharedHeap::SO_AllClasses,
4232 scan_root_cl,
4233 &only_scan_heap_rs_cl,
4234 scan_so_cl,
4235 scan_perm_cl,
4236 i);
4237 pss.end_strong_roots();
4238 {
4239 double start = os::elapsedTime();
4240 G1ParEvacuateFollowersClosure evac(_g1h, &pss, _queues, &_terminator);
4241 evac.do_void();
4242 double elapsed_ms = (os::elapsedTime()-start)*1000.0;
4243 double term_ms = pss.term_time()*1000.0;
4244 _g1h->g1_policy()->record_obj_copy_time(i, elapsed_ms-term_ms);
4245 _g1h->g1_policy()->record_termination_time(i, term_ms);
4246 }
4247 if (G1UseSurvivorSpace) {
4248 _g1h->g1_policy()->record_thread_age_table(pss.age_table());
4249 }
4250 _g1h->update_surviving_young_words(pss.surviving_young_words()+1);
4252 // Clean up any par-expanded rem sets.
4253 HeapRegionRemSet::par_cleanup();
4255 MutexLocker x(stats_lock());
4256 if (ParallelGCVerbose) {
4257 gclog_or_tty->print("Thread %d complete:\n", i);
4258 #if G1_DETAILED_STATS
4259 gclog_or_tty->print(" Pushes: %7d Pops: %7d Overflows: %7d Steals %7d (in %d attempts)\n",
4260 pss.pushes(),
4261 pss.pops(),
4262 pss.overflow_pushes(),
4263 pss.steals(),
4264 pss.steal_attempts());
4265 #endif
4266 double elapsed = pss.elapsed();
4267 double strong_roots = pss.strong_roots_time();
4268 double term = pss.term_time();
4269 gclog_or_tty->print(" Elapsed: %7.2f ms.\n"
4270 " Strong roots: %7.2f ms (%6.2f%%)\n"
4271 " Termination: %7.2f ms (%6.2f%%) (in %d entries)\n",
4272 elapsed * 1000.0,
4273 strong_roots * 1000.0, (strong_roots*100.0/elapsed),
4274 term * 1000.0, (term*100.0/elapsed),
4275 pss.term_attempts());
4276 size_t total_waste = pss.alloc_buffer_waste() + pss.undo_waste();
4277 gclog_or_tty->print(" Waste: %8dK\n"
4278 " Alloc Buffer: %8dK\n"
4279 " Undo: %8dK\n",
4280 (total_waste * HeapWordSize) / K,
4281 (pss.alloc_buffer_waste() * HeapWordSize) / K,
4282 (pss.undo_waste() * HeapWordSize) / K);
4283 }
4285 assert(pss.refs_to_scan() == 0, "Task queue should be empty");
4286 assert(pss.overflowed_refs_to_scan() == 0, "Overflow queue should be empty");
4287 }
4288 };
4290 // *** Common G1 Evacuation Stuff
4292 class G1CountClosure: public OopsInHeapRegionClosure {
4293 public:
4294 int n;
4295 G1CountClosure() : n(0) {}
4296 void do_oop(narrowOop* p) {
4297 guarantee(false, "NYI");
4298 }
4299 void do_oop(oop* p) {
4300 oop obj = *p;
4301 assert(obj != NULL && G1CollectedHeap::heap()->obj_in_cs(obj),
4302 "Rem set closure called on non-rem-set pointer.");
4303 n++;
4304 }
4305 };
4307 static G1CountClosure count_closure;
4309 void
4310 G1CollectedHeap::
4311 g1_process_strong_roots(bool collecting_perm_gen,
4312 SharedHeap::ScanningOption so,
4313 OopClosure* scan_non_heap_roots,
4314 OopsInHeapRegionClosure* scan_rs,
4315 OopsInHeapRegionClosure* scan_so,
4316 OopsInGenClosure* scan_perm,
4317 int worker_i) {
4318 // First scan the strong roots, including the perm gen.
4319 double ext_roots_start = os::elapsedTime();
4320 double closure_app_time_sec = 0.0;
4322 BufferingOopClosure buf_scan_non_heap_roots(scan_non_heap_roots);
4323 BufferingOopsInGenClosure buf_scan_perm(scan_perm);
4324 buf_scan_perm.set_generation(perm_gen());
4326 process_strong_roots(collecting_perm_gen, so,
4327 &buf_scan_non_heap_roots,
4328 &buf_scan_perm);
4329 // Finish up any enqueued closure apps.
4330 buf_scan_non_heap_roots.done();
4331 buf_scan_perm.done();
4332 double ext_roots_end = os::elapsedTime();
4333 g1_policy()->reset_obj_copy_time(worker_i);
4334 double obj_copy_time_sec =
4335 buf_scan_non_heap_roots.closure_app_seconds() +
4336 buf_scan_perm.closure_app_seconds();
4337 g1_policy()->record_obj_copy_time(worker_i, obj_copy_time_sec * 1000.0);
4338 double ext_root_time_ms =
4339 ((ext_roots_end - ext_roots_start) - obj_copy_time_sec) * 1000.0;
4340 g1_policy()->record_ext_root_scan_time(worker_i, ext_root_time_ms);
4342 // Scan strong roots in mark stack.
4343 if (!_process_strong_tasks->is_task_claimed(G1H_PS_mark_stack_oops_do)) {
4344 concurrent_mark()->oops_do(scan_non_heap_roots);
4345 }
4346 double mark_stack_scan_ms = (os::elapsedTime() - ext_roots_end) * 1000.0;
4347 g1_policy()->record_mark_stack_scan_time(worker_i, mark_stack_scan_ms);
4349 // XXX What should this be doing in the parallel case?
4350 g1_policy()->record_collection_pause_end_CH_strong_roots();
4351 if (G1VerifyRemSet) {
4352 // :::: FIXME ::::
4353 // The stupid remembered set doesn't know how to filter out dead
4354 // objects, which the smart one does, and so when it is created
4355 // and then compared the number of entries in each differs and
4356 // the verification code fails.
4357 guarantee(false, "verification code is broken, see note");
4359 // Let's make sure that the current rem set agrees with the stupidest
4360 // one possible!
4361 bool refs_enabled = ref_processor()->discovery_enabled();
4362 if (refs_enabled) ref_processor()->disable_discovery();
4363 StupidG1RemSet stupid(this);
4364 count_closure.n = 0;
4365 stupid.oops_into_collection_set_do(&count_closure, worker_i);
4366 int stupid_n = count_closure.n;
4367 count_closure.n = 0;
4368 g1_rem_set()->oops_into_collection_set_do(&count_closure, worker_i);
4369 guarantee(count_closure.n == stupid_n, "Old and new rem sets differ.");
4370 gclog_or_tty->print_cr("\nFound %d pointers in heap RS.", count_closure.n);
4371 if (refs_enabled) ref_processor()->enable_discovery();
4372 }
4373 if (scan_so != NULL) {
4374 scan_scan_only_set(scan_so, worker_i);
4375 }
4376 // Now scan the complement of the collection set.
4377 if (scan_rs != NULL) {
4378 g1_rem_set()->oops_into_collection_set_do(scan_rs, worker_i);
4379 }
4380 // Finish with the ref_processor roots.
4381 if (!_process_strong_tasks->is_task_claimed(G1H_PS_refProcessor_oops_do)) {
4382 ref_processor()->oops_do(scan_non_heap_roots);
4383 }
4384 g1_policy()->record_collection_pause_end_G1_strong_roots();
4385 _process_strong_tasks->all_tasks_completed();
4386 }
4388 void
4389 G1CollectedHeap::scan_scan_only_region(HeapRegion* r,
4390 OopsInHeapRegionClosure* oc,
4391 int worker_i) {
4392 HeapWord* startAddr = r->bottom();
4393 HeapWord* endAddr = r->used_region().end();
4395 oc->set_region(r);
4397 HeapWord* p = r->bottom();
4398 HeapWord* t = r->top();
4399 guarantee( p == r->next_top_at_mark_start(), "invariant" );
4400 while (p < t) {
4401 oop obj = oop(p);
4402 p += obj->oop_iterate(oc);
4403 }
4404 }
4406 void
4407 G1CollectedHeap::scan_scan_only_set(OopsInHeapRegionClosure* oc,
4408 int worker_i) {
4409 double start = os::elapsedTime();
4411 BufferingOopsInHeapRegionClosure boc(oc);
4413 FilterInHeapRegionAndIntoCSClosure scan_only(this, &boc);
4414 FilterAndMarkInHeapRegionAndIntoCSClosure scan_and_mark(this, &boc, concurrent_mark());
4416 OopsInHeapRegionClosure *foc;
4417 if (g1_policy()->should_initiate_conc_mark())
4418 foc = &scan_and_mark;
4419 else
4420 foc = &scan_only;
4422 HeapRegion* hr;
4423 int n = 0;
4424 while ((hr = _young_list->par_get_next_scan_only_region()) != NULL) {
4425 scan_scan_only_region(hr, foc, worker_i);
4426 ++n;
4427 }
4428 boc.done();
4430 double closure_app_s = boc.closure_app_seconds();
4431 g1_policy()->record_obj_copy_time(worker_i, closure_app_s * 1000.0);
4432 double ms = (os::elapsedTime() - start - closure_app_s)*1000.0;
4433 g1_policy()->record_scan_only_time(worker_i, ms, n);
4434 }
4436 void
4437 G1CollectedHeap::g1_process_weak_roots(OopClosure* root_closure,
4438 OopClosure* non_root_closure) {
4439 SharedHeap::process_weak_roots(root_closure, non_root_closure);
4440 }
4443 class SaveMarksClosure: public HeapRegionClosure {
4444 public:
4445 bool doHeapRegion(HeapRegion* r) {
4446 r->save_marks();
4447 return false;
4448 }
4449 };
4451 void G1CollectedHeap::save_marks() {
4452 if (ParallelGCThreads == 0) {
4453 SaveMarksClosure sm;
4454 heap_region_iterate(&sm);
4455 }
4456 // We do this even in the parallel case
4457 perm_gen()->save_marks();
4458 }
4460 void G1CollectedHeap::evacuate_collection_set() {
4461 set_evacuation_failed(false);
4463 g1_rem_set()->prepare_for_oops_into_collection_set_do();
4464 concurrent_g1_refine()->set_use_cache(false);
4465 int n_workers = (ParallelGCThreads > 0 ? workers()->total_workers() : 1);
4466 set_par_threads(n_workers);
4467 G1ParTask g1_par_task(this, n_workers, _task_queues);
4469 init_for_evac_failure(NULL);
4471 change_strong_roots_parity(); // In preparation for parallel strong roots.
4472 rem_set()->prepare_for_younger_refs_iterate(true);
4474 assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty");
4475 double start_par = os::elapsedTime();
4476 if (ParallelGCThreads > 0) {
4477 // The individual threads will set their evac-failure closures.
4478 workers()->run_task(&g1_par_task);
4479 } else {
4480 g1_par_task.work(0);
4481 }
4483 double par_time = (os::elapsedTime() - start_par) * 1000.0;
4484 g1_policy()->record_par_time(par_time);
4485 set_par_threads(0);
4486 // Is this the right thing to do here? We don't save marks
4487 // on individual heap regions when we allocate from
4488 // them in parallel, so this seems like the correct place for this.
4489 retire_all_alloc_regions();
4490 {
4491 G1IsAliveClosure is_alive(this);
4492 G1KeepAliveClosure keep_alive(this);
4493 JNIHandles::weak_oops_do(&is_alive, &keep_alive);
4494 }
4495 g1_rem_set()->cleanup_after_oops_into_collection_set_do();
4497 concurrent_g1_refine()->set_use_cache(true);
4499 finalize_for_evac_failure();
4501 // Must do this before removing self-forwarding pointers, which clears
4502 // the per-region evac-failure flags.
4503 concurrent_mark()->complete_marking_in_collection_set();
4505 if (evacuation_failed()) {
4506 remove_self_forwarding_pointers();
4507 if (PrintGCDetails) {
4508 gclog_or_tty->print(" (evacuation failed)");
4509 } else if (PrintGC) {
4510 gclog_or_tty->print("--");
4511 }
4512 }
4514 if (G1DeferredRSUpdate) {
4515 RedirtyLoggedCardTableEntryFastClosure redirty;
4516 dirty_card_queue_set().set_closure(&redirty);
4517 dirty_card_queue_set().apply_closure_to_all_completed_buffers();
4518 JavaThread::dirty_card_queue_set().merge_bufferlists(&dirty_card_queue_set());
4519 assert(dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed");
4520 }
4522 COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
4523 }
4525 void G1CollectedHeap::free_region(HeapRegion* hr) {
4526 size_t pre_used = 0;
4527 size_t cleared_h_regions = 0;
4528 size_t freed_regions = 0;
4529 UncleanRegionList local_list;
4531 HeapWord* start = hr->bottom();
4532 HeapWord* end = hr->prev_top_at_mark_start();
4533 size_t used_bytes = hr->used();
4534 size_t live_bytes = hr->max_live_bytes();
4535 if (used_bytes > 0) {
4536 guarantee( live_bytes <= used_bytes, "invariant" );
4537 } else {
4538 guarantee( live_bytes == 0, "invariant" );
4539 }
4541 size_t garbage_bytes = used_bytes - live_bytes;
4542 if (garbage_bytes > 0)
4543 g1_policy()->decrease_known_garbage_bytes(garbage_bytes);
4545 free_region_work(hr, pre_used, cleared_h_regions, freed_regions,
4546 &local_list);
4547 finish_free_region_work(pre_used, cleared_h_regions, freed_regions,
4548 &local_list);
4549 }
4551 void
4552 G1CollectedHeap::free_region_work(HeapRegion* hr,
4553 size_t& pre_used,
4554 size_t& cleared_h_regions,
4555 size_t& freed_regions,
4556 UncleanRegionList* list,
4557 bool par) {
4558 assert(!hr->popular(), "should not free popular regions");
4559 pre_used += hr->used();
4560 if (hr->isHumongous()) {
4561 assert(hr->startsHumongous(),
4562 "Only the start of a humongous region should be freed.");
4563 int ind = _hrs->find(hr);
4564 assert(ind != -1, "Should have an index.");
4565 // Clear the start region.
4566 hr->hr_clear(par, true /*clear_space*/);
4567 list->insert_before_head(hr);
4568 cleared_h_regions++;
4569 freed_regions++;
4570 // Clear any continued regions.
4571 ind++;
4572 while ((size_t)ind < n_regions()) {
4573 HeapRegion* hrc = _hrs->at(ind);
4574 if (!hrc->continuesHumongous()) break;
4575 // Otherwise, does continue the H region.
4576 assert(hrc->humongous_start_region() == hr, "Huh?");
4577 hrc->hr_clear(par, true /*clear_space*/);
4578 cleared_h_regions++;
4579 freed_regions++;
4580 list->insert_before_head(hrc);
4581 ind++;
4582 }
4583 } else {
4584 hr->hr_clear(par, true /*clear_space*/);
4585 list->insert_before_head(hr);
4586 freed_regions++;
4587 // If we're using clear2, this should not be enabled.
4588 // assert(!hr->in_cohort(), "Can't be both free and in a cohort.");
4589 }
4590 }
4592 void G1CollectedHeap::finish_free_region_work(size_t pre_used,
4593 size_t cleared_h_regions,
4594 size_t freed_regions,
4595 UncleanRegionList* list) {
4596 if (list != NULL && list->sz() > 0) {
4597 prepend_region_list_on_unclean_list(list);
4598 }
4599 // Acquire a lock, if we're parallel, to update possibly-shared
4600 // variables.
4601 Mutex* lock = (n_par_threads() > 0) ? ParGCRareEvent_lock : NULL;
4602 {
4603 MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
4604 _summary_bytes_used -= pre_used;
4605 _num_humongous_regions -= (int) cleared_h_regions;
4606 _free_regions += freed_regions;
4607 }
4608 }
4611 void G1CollectedHeap::dirtyCardsForYoungRegions(CardTableModRefBS* ct_bs, HeapRegion* list) {
4612 while (list != NULL) {
4613 guarantee( list->is_young(), "invariant" );
4615 HeapWord* bottom = list->bottom();
4616 HeapWord* end = list->end();
4617 MemRegion mr(bottom, end);
4618 ct_bs->dirty(mr);
4620 list = list->get_next_young_region();
4621 }
4622 }
4624 void G1CollectedHeap::cleanUpCardTable() {
4625 CardTableModRefBS* ct_bs = (CardTableModRefBS*) (barrier_set());
4626 double start = os::elapsedTime();
4628 ct_bs->clear(_g1_committed);
4630 // now, redirty the cards of the scan-only and survivor regions
4631 // (it seemed faster to do it this way, instead of iterating over
4632 // all regions and then clearing / dirtying as approprite)
4633 dirtyCardsForYoungRegions(ct_bs, _young_list->first_scan_only_region());
4634 dirtyCardsForYoungRegions(ct_bs, _young_list->first_survivor_region());
4636 double elapsed = os::elapsedTime() - start;
4637 g1_policy()->record_clear_ct_time( elapsed * 1000.0);
4638 }
4641 void G1CollectedHeap::do_collection_pause_if_appropriate(size_t word_size) {
4642 // First do any popular regions.
4643 HeapRegion* hr;
4644 while ((hr = popular_region_to_evac()) != NULL) {
4645 evac_popular_region(hr);
4646 }
4647 // Now do heuristic pauses.
4648 if (g1_policy()->should_do_collection_pause(word_size)) {
4649 do_collection_pause();
4650 }
4651 }
4653 void G1CollectedHeap::free_collection_set(HeapRegion* cs_head) {
4654 double young_time_ms = 0.0;
4655 double non_young_time_ms = 0.0;
4657 G1CollectorPolicy* policy = g1_policy();
4659 double start_sec = os::elapsedTime();
4660 bool non_young = true;
4662 HeapRegion* cur = cs_head;
4663 int age_bound = -1;
4664 size_t rs_lengths = 0;
4666 while (cur != NULL) {
4667 if (non_young) {
4668 if (cur->is_young()) {
4669 double end_sec = os::elapsedTime();
4670 double elapsed_ms = (end_sec - start_sec) * 1000.0;
4671 non_young_time_ms += elapsed_ms;
4673 start_sec = os::elapsedTime();
4674 non_young = false;
4675 }
4676 } else {
4677 if (!cur->is_on_free_list()) {
4678 double end_sec = os::elapsedTime();
4679 double elapsed_ms = (end_sec - start_sec) * 1000.0;
4680 young_time_ms += elapsed_ms;
4682 start_sec = os::elapsedTime();
4683 non_young = true;
4684 }
4685 }
4687 rs_lengths += cur->rem_set()->occupied();
4689 HeapRegion* next = cur->next_in_collection_set();
4690 assert(cur->in_collection_set(), "bad CS");
4691 cur->set_next_in_collection_set(NULL);
4692 cur->set_in_collection_set(false);
4694 if (cur->is_young()) {
4695 int index = cur->young_index_in_cset();
4696 guarantee( index != -1, "invariant" );
4697 guarantee( (size_t)index < policy->young_cset_length(), "invariant" );
4698 size_t words_survived = _surviving_young_words[index];
4699 cur->record_surv_words_in_group(words_survived);
4700 } else {
4701 int index = cur->young_index_in_cset();
4702 guarantee( index == -1, "invariant" );
4703 }
4705 assert( (cur->is_young() && cur->young_index_in_cset() > -1) ||
4706 (!cur->is_young() && cur->young_index_in_cset() == -1),
4707 "invariant" );
4709 if (!cur->evacuation_failed()) {
4710 // And the region is empty.
4711 assert(!cur->is_empty(),
4712 "Should not have empty regions in a CS.");
4713 free_region(cur);
4714 } else {
4715 guarantee( !cur->is_scan_only(), "should not be scan only" );
4716 cur->uninstall_surv_rate_group();
4717 if (cur->is_young())
4718 cur->set_young_index_in_cset(-1);
4719 cur->set_not_young();
4720 cur->set_evacuation_failed(false);
4721 }
4722 cur = next;
4723 }
4725 policy->record_max_rs_lengths(rs_lengths);
4726 policy->cset_regions_freed();
4728 double end_sec = os::elapsedTime();
4729 double elapsed_ms = (end_sec - start_sec) * 1000.0;
4730 if (non_young)
4731 non_young_time_ms += elapsed_ms;
4732 else
4733 young_time_ms += elapsed_ms;
4735 policy->record_young_free_cset_time_ms(young_time_ms);
4736 policy->record_non_young_free_cset_time_ms(non_young_time_ms);
4737 }
4739 HeapRegion*
4740 G1CollectedHeap::alloc_region_from_unclean_list_locked(bool zero_filled) {
4741 assert(ZF_mon->owned_by_self(), "Precondition");
4742 HeapRegion* res = pop_unclean_region_list_locked();
4743 if (res != NULL) {
4744 assert(!res->continuesHumongous() &&
4745 res->zero_fill_state() != HeapRegion::Allocated,
4746 "Only free regions on unclean list.");
4747 if (zero_filled) {
4748 res->ensure_zero_filled_locked();
4749 res->set_zero_fill_allocated();
4750 }
4751 }
4752 return res;
4753 }
4755 HeapRegion* G1CollectedHeap::alloc_region_from_unclean_list(bool zero_filled) {
4756 MutexLockerEx zx(ZF_mon, Mutex::_no_safepoint_check_flag);
4757 return alloc_region_from_unclean_list_locked(zero_filled);
4758 }
4760 void G1CollectedHeap::put_region_on_unclean_list(HeapRegion* r) {
4761 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
4762 put_region_on_unclean_list_locked(r);
4763 if (should_zf()) ZF_mon->notify_all(); // Wake up ZF thread.
4764 }
4766 void G1CollectedHeap::set_unclean_regions_coming(bool b) {
4767 MutexLockerEx x(Cleanup_mon);
4768 set_unclean_regions_coming_locked(b);
4769 }
4771 void G1CollectedHeap::set_unclean_regions_coming_locked(bool b) {
4772 assert(Cleanup_mon->owned_by_self(), "Precondition");
4773 _unclean_regions_coming = b;
4774 // Wake up mutator threads that might be waiting for completeCleanup to
4775 // finish.
4776 if (!b) Cleanup_mon->notify_all();
4777 }
4779 void G1CollectedHeap::wait_for_cleanup_complete() {
4780 MutexLockerEx x(Cleanup_mon);
4781 wait_for_cleanup_complete_locked();
4782 }
4784 void G1CollectedHeap::wait_for_cleanup_complete_locked() {
4785 assert(Cleanup_mon->owned_by_self(), "precondition");
4786 while (_unclean_regions_coming) {
4787 Cleanup_mon->wait();
4788 }
4789 }
4791 void
4792 G1CollectedHeap::put_region_on_unclean_list_locked(HeapRegion* r) {
4793 assert(ZF_mon->owned_by_self(), "precondition.");
4794 _unclean_region_list.insert_before_head(r);
4795 }
4797 void
4798 G1CollectedHeap::prepend_region_list_on_unclean_list(UncleanRegionList* list) {
4799 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
4800 prepend_region_list_on_unclean_list_locked(list);
4801 if (should_zf()) ZF_mon->notify_all(); // Wake up ZF thread.
4802 }
4804 void
4805 G1CollectedHeap::
4806 prepend_region_list_on_unclean_list_locked(UncleanRegionList* list) {
4807 assert(ZF_mon->owned_by_self(), "precondition.");
4808 _unclean_region_list.prepend_list(list);
4809 }
4811 HeapRegion* G1CollectedHeap::pop_unclean_region_list_locked() {
4812 assert(ZF_mon->owned_by_self(), "precondition.");
4813 HeapRegion* res = _unclean_region_list.pop();
4814 if (res != NULL) {
4815 // Inform ZF thread that there's a new unclean head.
4816 if (_unclean_region_list.hd() != NULL && should_zf())
4817 ZF_mon->notify_all();
4818 }
4819 return res;
4820 }
4822 HeapRegion* G1CollectedHeap::peek_unclean_region_list_locked() {
4823 assert(ZF_mon->owned_by_self(), "precondition.");
4824 return _unclean_region_list.hd();
4825 }
4828 bool G1CollectedHeap::move_cleaned_region_to_free_list_locked() {
4829 assert(ZF_mon->owned_by_self(), "Precondition");
4830 HeapRegion* r = peek_unclean_region_list_locked();
4831 if (r != NULL && r->zero_fill_state() == HeapRegion::ZeroFilled) {
4832 // Result of below must be equal to "r", since we hold the lock.
4833 (void)pop_unclean_region_list_locked();
4834 put_free_region_on_list_locked(r);
4835 return true;
4836 } else {
4837 return false;
4838 }
4839 }
4841 bool G1CollectedHeap::move_cleaned_region_to_free_list() {
4842 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
4843 return move_cleaned_region_to_free_list_locked();
4844 }
4847 void G1CollectedHeap::put_free_region_on_list_locked(HeapRegion* r) {
4848 assert(ZF_mon->owned_by_self(), "precondition.");
4849 assert(_free_region_list_size == free_region_list_length(), "Inv");
4850 assert(r->zero_fill_state() == HeapRegion::ZeroFilled,
4851 "Regions on free list must be zero filled");
4852 assert(!r->isHumongous(), "Must not be humongous.");
4853 assert(r->is_empty(), "Better be empty");
4854 assert(!r->is_on_free_list(),
4855 "Better not already be on free list");
4856 assert(!r->is_on_unclean_list(),
4857 "Better not already be on unclean list");
4858 r->set_on_free_list(true);
4859 r->set_next_on_free_list(_free_region_list);
4860 _free_region_list = r;
4861 _free_region_list_size++;
4862 assert(_free_region_list_size == free_region_list_length(), "Inv");
4863 }
4865 void G1CollectedHeap::put_free_region_on_list(HeapRegion* r) {
4866 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
4867 put_free_region_on_list_locked(r);
4868 }
4870 HeapRegion* G1CollectedHeap::pop_free_region_list_locked() {
4871 assert(ZF_mon->owned_by_self(), "precondition.");
4872 assert(_free_region_list_size == free_region_list_length(), "Inv");
4873 HeapRegion* res = _free_region_list;
4874 if (res != NULL) {
4875 _free_region_list = res->next_from_free_list();
4876 _free_region_list_size--;
4877 res->set_on_free_list(false);
4878 res->set_next_on_free_list(NULL);
4879 assert(_free_region_list_size == free_region_list_length(), "Inv");
4880 }
4881 return res;
4882 }
4885 HeapRegion* G1CollectedHeap::alloc_free_region_from_lists(bool zero_filled) {
4886 // By self, or on behalf of self.
4887 assert(Heap_lock->is_locked(), "Precondition");
4888 HeapRegion* res = NULL;
4889 bool first = true;
4890 while (res == NULL) {
4891 if (zero_filled || !first) {
4892 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
4893 res = pop_free_region_list_locked();
4894 if (res != NULL) {
4895 assert(!res->zero_fill_is_allocated(),
4896 "No allocated regions on free list.");
4897 res->set_zero_fill_allocated();
4898 } else if (!first) {
4899 break; // We tried both, time to return NULL.
4900 }
4901 }
4903 if (res == NULL) {
4904 res = alloc_region_from_unclean_list(zero_filled);
4905 }
4906 assert(res == NULL ||
4907 !zero_filled ||
4908 res->zero_fill_is_allocated(),
4909 "We must have allocated the region we're returning");
4910 first = false;
4911 }
4912 return res;
4913 }
4915 void G1CollectedHeap::remove_allocated_regions_from_lists() {
4916 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
4917 {
4918 HeapRegion* prev = NULL;
4919 HeapRegion* cur = _unclean_region_list.hd();
4920 while (cur != NULL) {
4921 HeapRegion* next = cur->next_from_unclean_list();
4922 if (cur->zero_fill_is_allocated()) {
4923 // Remove from the list.
4924 if (prev == NULL) {
4925 (void)_unclean_region_list.pop();
4926 } else {
4927 _unclean_region_list.delete_after(prev);
4928 }
4929 cur->set_on_unclean_list(false);
4930 cur->set_next_on_unclean_list(NULL);
4931 } else {
4932 prev = cur;
4933 }
4934 cur = next;
4935 }
4936 assert(_unclean_region_list.sz() == unclean_region_list_length(),
4937 "Inv");
4938 }
4940 {
4941 HeapRegion* prev = NULL;
4942 HeapRegion* cur = _free_region_list;
4943 while (cur != NULL) {
4944 HeapRegion* next = cur->next_from_free_list();
4945 if (cur->zero_fill_is_allocated()) {
4946 // Remove from the list.
4947 if (prev == NULL) {
4948 _free_region_list = cur->next_from_free_list();
4949 } else {
4950 prev->set_next_on_free_list(cur->next_from_free_list());
4951 }
4952 cur->set_on_free_list(false);
4953 cur->set_next_on_free_list(NULL);
4954 _free_region_list_size--;
4955 } else {
4956 prev = cur;
4957 }
4958 cur = next;
4959 }
4960 assert(_free_region_list_size == free_region_list_length(), "Inv");
4961 }
4962 }
4964 bool G1CollectedHeap::verify_region_lists() {
4965 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
4966 return verify_region_lists_locked();
4967 }
4969 bool G1CollectedHeap::verify_region_lists_locked() {
4970 HeapRegion* unclean = _unclean_region_list.hd();
4971 while (unclean != NULL) {
4972 guarantee(unclean->is_on_unclean_list(), "Well, it is!");
4973 guarantee(!unclean->is_on_free_list(), "Well, it shouldn't be!");
4974 guarantee(unclean->zero_fill_state() != HeapRegion::Allocated,
4975 "Everything else is possible.");
4976 unclean = unclean->next_from_unclean_list();
4977 }
4978 guarantee(_unclean_region_list.sz() == unclean_region_list_length(), "Inv");
4980 HeapRegion* free_r = _free_region_list;
4981 while (free_r != NULL) {
4982 assert(free_r->is_on_free_list(), "Well, it is!");
4983 assert(!free_r->is_on_unclean_list(), "Well, it shouldn't be!");
4984 switch (free_r->zero_fill_state()) {
4985 case HeapRegion::NotZeroFilled:
4986 case HeapRegion::ZeroFilling:
4987 guarantee(false, "Should not be on free list.");
4988 break;
4989 default:
4990 // Everything else is possible.
4991 break;
4992 }
4993 free_r = free_r->next_from_free_list();
4994 }
4995 guarantee(_free_region_list_size == free_region_list_length(), "Inv");
4996 // If we didn't do an assertion...
4997 return true;
4998 }
5000 size_t G1CollectedHeap::free_region_list_length() {
5001 assert(ZF_mon->owned_by_self(), "precondition.");
5002 size_t len = 0;
5003 HeapRegion* cur = _free_region_list;
5004 while (cur != NULL) {
5005 len++;
5006 cur = cur->next_from_free_list();
5007 }
5008 return len;
5009 }
5011 size_t G1CollectedHeap::unclean_region_list_length() {
5012 assert(ZF_mon->owned_by_self(), "precondition.");
5013 return _unclean_region_list.length();
5014 }
5016 size_t G1CollectedHeap::n_regions() {
5017 return _hrs->length();
5018 }
5020 size_t G1CollectedHeap::max_regions() {
5021 return
5022 (size_t)align_size_up(g1_reserved_obj_bytes(), HeapRegion::GrainBytes) /
5023 HeapRegion::GrainBytes;
5024 }
5026 size_t G1CollectedHeap::free_regions() {
5027 /* Possibly-expensive assert.
5028 assert(_free_regions == count_free_regions(),
5029 "_free_regions is off.");
5030 */
5031 return _free_regions;
5032 }
5034 bool G1CollectedHeap::should_zf() {
5035 return _free_region_list_size < (size_t) G1ConcZFMaxRegions;
5036 }
5038 class RegionCounter: public HeapRegionClosure {
5039 size_t _n;
5040 public:
5041 RegionCounter() : _n(0) {}
5042 bool doHeapRegion(HeapRegion* r) {
5043 if (r->is_empty() && !r->popular()) {
5044 assert(!r->isHumongous(), "H regions should not be empty.");
5045 _n++;
5046 }
5047 return false;
5048 }
5049 int res() { return (int) _n; }
5050 };
5052 size_t G1CollectedHeap::count_free_regions() {
5053 RegionCounter rc;
5054 heap_region_iterate(&rc);
5055 size_t n = rc.res();
5056 if (_cur_alloc_region != NULL && _cur_alloc_region->is_empty())
5057 n--;
5058 return n;
5059 }
5061 size_t G1CollectedHeap::count_free_regions_list() {
5062 size_t n = 0;
5063 size_t o = 0;
5064 ZF_mon->lock_without_safepoint_check();
5065 HeapRegion* cur = _free_region_list;
5066 while (cur != NULL) {
5067 cur = cur->next_from_free_list();
5068 n++;
5069 }
5070 size_t m = unclean_region_list_length();
5071 ZF_mon->unlock();
5072 return n + m;
5073 }
5075 bool G1CollectedHeap::should_set_young_locked() {
5076 assert(heap_lock_held_for_gc(),
5077 "the heap lock should already be held by or for this thread");
5078 return (g1_policy()->in_young_gc_mode() &&
5079 g1_policy()->should_add_next_region_to_young_list());
5080 }
5082 void G1CollectedHeap::set_region_short_lived_locked(HeapRegion* hr) {
5083 assert(heap_lock_held_for_gc(),
5084 "the heap lock should already be held by or for this thread");
5085 _young_list->push_region(hr);
5086 g1_policy()->set_region_short_lived(hr);
5087 }
5089 class NoYoungRegionsClosure: public HeapRegionClosure {
5090 private:
5091 bool _success;
5092 public:
5093 NoYoungRegionsClosure() : _success(true) { }
5094 bool doHeapRegion(HeapRegion* r) {
5095 if (r->is_young()) {
5096 gclog_or_tty->print_cr("Region ["PTR_FORMAT", "PTR_FORMAT") tagged as young",
5097 r->bottom(), r->end());
5098 _success = false;
5099 }
5100 return false;
5101 }
5102 bool success() { return _success; }
5103 };
5105 bool G1CollectedHeap::check_young_list_empty(bool ignore_scan_only_list,
5106 bool check_sample) {
5107 bool ret = true;
5109 ret = _young_list->check_list_empty(ignore_scan_only_list, check_sample);
5110 if (!ignore_scan_only_list) {
5111 NoYoungRegionsClosure closure;
5112 heap_region_iterate(&closure);
5113 ret = ret && closure.success();
5114 }
5116 return ret;
5117 }
5119 void G1CollectedHeap::empty_young_list() {
5120 assert(heap_lock_held_for_gc(),
5121 "the heap lock should already be held by or for this thread");
5122 assert(g1_policy()->in_young_gc_mode(), "should be in young GC mode");
5124 _young_list->empty_list();
5125 }
5127 bool G1CollectedHeap::all_alloc_regions_no_allocs_since_save_marks() {
5128 bool no_allocs = true;
5129 for (int ap = 0; ap < GCAllocPurposeCount && no_allocs; ++ap) {
5130 HeapRegion* r = _gc_alloc_regions[ap];
5131 no_allocs = r == NULL || r->saved_mark_at_top();
5132 }
5133 return no_allocs;
5134 }
5136 void G1CollectedHeap::retire_all_alloc_regions() {
5137 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
5138 HeapRegion* r = _gc_alloc_regions[ap];
5139 if (r != NULL) {
5140 // Check for aliases.
5141 bool has_processed_alias = false;
5142 for (int i = 0; i < ap; ++i) {
5143 if (_gc_alloc_regions[i] == r) {
5144 has_processed_alias = true;
5145 break;
5146 }
5147 }
5148 if (!has_processed_alias) {
5149 retire_alloc_region(r, false /* par */);
5150 }
5151 }
5152 }
5153 }
5156 // Done at the start of full GC.
5157 void G1CollectedHeap::tear_down_region_lists() {
5158 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
5159 while (pop_unclean_region_list_locked() != NULL) ;
5160 assert(_unclean_region_list.hd() == NULL && _unclean_region_list.sz() == 0,
5161 "Postconditions of loop.")
5162 while (pop_free_region_list_locked() != NULL) ;
5163 assert(_free_region_list == NULL, "Postcondition of loop.");
5164 if (_free_region_list_size != 0) {
5165 gclog_or_tty->print_cr("Size is %d.", _free_region_list_size);
5166 print();
5167 }
5168 assert(_free_region_list_size == 0, "Postconditions of loop.");
5169 }
5172 class RegionResetter: public HeapRegionClosure {
5173 G1CollectedHeap* _g1;
5174 int _n;
5175 public:
5176 RegionResetter() : _g1(G1CollectedHeap::heap()), _n(0) {}
5177 bool doHeapRegion(HeapRegion* r) {
5178 if (r->continuesHumongous()) return false;
5179 if (r->top() > r->bottom()) {
5180 if (r->top() < r->end()) {
5181 Copy::fill_to_words(r->top(),
5182 pointer_delta(r->end(), r->top()));
5183 }
5184 r->set_zero_fill_allocated();
5185 } else {
5186 assert(r->is_empty(), "tautology");
5187 if (r->popular()) {
5188 if (r->zero_fill_state() != HeapRegion::Allocated) {
5189 r->ensure_zero_filled_locked();
5190 r->set_zero_fill_allocated();
5191 }
5192 } else {
5193 _n++;
5194 switch (r->zero_fill_state()) {
5195 case HeapRegion::NotZeroFilled:
5196 case HeapRegion::ZeroFilling:
5197 _g1->put_region_on_unclean_list_locked(r);
5198 break;
5199 case HeapRegion::Allocated:
5200 r->set_zero_fill_complete();
5201 // no break; go on to put on free list.
5202 case HeapRegion::ZeroFilled:
5203 _g1->put_free_region_on_list_locked(r);
5204 break;
5205 }
5206 }
5207 }
5208 return false;
5209 }
5211 int getFreeRegionCount() {return _n;}
5212 };
5214 // Done at the end of full GC.
5215 void G1CollectedHeap::rebuild_region_lists() {
5216 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
5217 // This needs to go at the end of the full GC.
5218 RegionResetter rs;
5219 heap_region_iterate(&rs);
5220 _free_regions = rs.getFreeRegionCount();
5221 // Tell the ZF thread it may have work to do.
5222 if (should_zf()) ZF_mon->notify_all();
5223 }
5225 class UsedRegionsNeedZeroFillSetter: public HeapRegionClosure {
5226 G1CollectedHeap* _g1;
5227 int _n;
5228 public:
5229 UsedRegionsNeedZeroFillSetter() : _g1(G1CollectedHeap::heap()), _n(0) {}
5230 bool doHeapRegion(HeapRegion* r) {
5231 if (r->continuesHumongous()) return false;
5232 if (r->top() > r->bottom()) {
5233 // There are assertions in "set_zero_fill_needed()" below that
5234 // require top() == bottom(), so this is technically illegal.
5235 // We'll skirt the law here, by making that true temporarily.
5236 DEBUG_ONLY(HeapWord* save_top = r->top();
5237 r->set_top(r->bottom()));
5238 r->set_zero_fill_needed();
5239 DEBUG_ONLY(r->set_top(save_top));
5240 }
5241 return false;
5242 }
5243 };
5245 // Done at the start of full GC.
5246 void G1CollectedHeap::set_used_regions_to_need_zero_fill() {
5247 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
5248 // This needs to go at the end of the full GC.
5249 UsedRegionsNeedZeroFillSetter rs;
5250 heap_region_iterate(&rs);
5251 }
5253 class CountObjClosure: public ObjectClosure {
5254 size_t _n;
5255 public:
5256 CountObjClosure() : _n(0) {}
5257 void do_object(oop obj) { _n++; }
5258 size_t n() { return _n; }
5259 };
5261 size_t G1CollectedHeap::pop_object_used_objs() {
5262 size_t sum_objs = 0;
5263 for (int i = 0; i < G1NumPopularRegions; i++) {
5264 CountObjClosure cl;
5265 _hrs->at(i)->object_iterate(&cl);
5266 sum_objs += cl.n();
5267 }
5268 return sum_objs;
5269 }
5271 size_t G1CollectedHeap::pop_object_used_bytes() {
5272 size_t sum_bytes = 0;
5273 for (int i = 0; i < G1NumPopularRegions; i++) {
5274 sum_bytes += _hrs->at(i)->used();
5275 }
5276 return sum_bytes;
5277 }
5280 static int nq = 0;
5282 HeapWord* G1CollectedHeap::allocate_popular_object(size_t word_size) {
5283 while (_cur_pop_hr_index < G1NumPopularRegions) {
5284 HeapRegion* cur_pop_region = _hrs->at(_cur_pop_hr_index);
5285 HeapWord* res = cur_pop_region->allocate(word_size);
5286 if (res != NULL) {
5287 // We account for popular objs directly in the used summary:
5288 _summary_bytes_used += (word_size * HeapWordSize);
5289 return res;
5290 }
5291 // Otherwise, try the next region (first making sure that we remember
5292 // the last "top" value as the "next_top_at_mark_start", so that
5293 // objects made popular during markings aren't automatically considered
5294 // live).
5295 cur_pop_region->note_end_of_copying();
5296 // Otherwise, try the next region.
5297 _cur_pop_hr_index++;
5298 }
5299 // XXX: For now !!!
5300 vm_exit_out_of_memory(word_size,
5301 "Not enough pop obj space (To Be Fixed)");
5302 return NULL;
5303 }
5305 class HeapRegionList: public CHeapObj {
5306 public:
5307 HeapRegion* hr;
5308 HeapRegionList* next;
5309 };
5311 void G1CollectedHeap::schedule_popular_region_evac(HeapRegion* r) {
5312 // This might happen during parallel GC, so protect by this lock.
5313 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
5314 // We don't schedule regions whose evacuations are already pending, or
5315 // are already being evacuated.
5316 if (!r->popular_pending() && !r->in_collection_set()) {
5317 r->set_popular_pending(true);
5318 if (G1TracePopularity) {
5319 gclog_or_tty->print_cr("Scheduling region "PTR_FORMAT" "
5320 "["PTR_FORMAT", "PTR_FORMAT") for pop-object evacuation.",
5321 r, r->bottom(), r->end());
5322 }
5323 HeapRegionList* hrl = new HeapRegionList;
5324 hrl->hr = r;
5325 hrl->next = _popular_regions_to_be_evacuated;
5326 _popular_regions_to_be_evacuated = hrl;
5327 }
5328 }
5330 HeapRegion* G1CollectedHeap::popular_region_to_evac() {
5331 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
5332 HeapRegion* res = NULL;
5333 while (_popular_regions_to_be_evacuated != NULL && res == NULL) {
5334 HeapRegionList* hrl = _popular_regions_to_be_evacuated;
5335 _popular_regions_to_be_evacuated = hrl->next;
5336 res = hrl->hr;
5337 // The G1RSPopLimit may have increased, so recheck here...
5338 if (res->rem_set()->occupied() < (size_t) G1RSPopLimit) {
5339 // Hah: don't need to schedule.
5340 if (G1TracePopularity) {
5341 gclog_or_tty->print_cr("Unscheduling region "PTR_FORMAT" "
5342 "["PTR_FORMAT", "PTR_FORMAT") "
5343 "for pop-object evacuation (size %d < limit %d)",
5344 res, res->bottom(), res->end(),
5345 res->rem_set()->occupied(), G1RSPopLimit);
5346 }
5347 res->set_popular_pending(false);
5348 res = NULL;
5349 }
5350 // We do not reset res->popular() here; if we did so, it would allow
5351 // the region to be "rescheduled" for popularity evacuation. Instead,
5352 // this is done in the collection pause, with the world stopped.
5353 // So the invariant is that the regions in the list have the popularity
5354 // boolean set, but having the boolean set does not imply membership
5355 // on the list (though there can at most one such pop-pending region
5356 // not on the list at any time).
5357 delete hrl;
5358 }
5359 return res;
5360 }
5362 void G1CollectedHeap::evac_popular_region(HeapRegion* hr) {
5363 while (true) {
5364 // Don't want to do a GC pause while cleanup is being completed!
5365 wait_for_cleanup_complete();
5367 // Read the GC count while holding the Heap_lock
5368 int gc_count_before = SharedHeap::heap()->total_collections();
5369 g1_policy()->record_stop_world_start();
5371 {
5372 MutexUnlocker mu(Heap_lock); // give up heap lock, execute gets it back
5373 VM_G1PopRegionCollectionPause op(gc_count_before, hr);
5374 VMThread::execute(&op);
5376 // If the prolog succeeded, we didn't do a GC for this.
5377 if (op.prologue_succeeded()) break;
5378 }
5379 // Otherwise we didn't. We should recheck the size, though, since
5380 // the limit may have increased...
5381 if (hr->rem_set()->occupied() < (size_t) G1RSPopLimit) {
5382 hr->set_popular_pending(false);
5383 break;
5384 }
5385 }
5386 }
5388 void G1CollectedHeap::atomic_inc_obj_rc(oop obj) {
5389 Atomic::inc(obj_rc_addr(obj));
5390 }
5392 class CountRCClosure: public OopsInHeapRegionClosure {
5393 G1CollectedHeap* _g1h;
5394 bool _parallel;
5395 public:
5396 CountRCClosure(G1CollectedHeap* g1h) :
5397 _g1h(g1h), _parallel(ParallelGCThreads > 0)
5398 {}
5399 void do_oop(narrowOop* p) {
5400 guarantee(false, "NYI");
5401 }
5402 void do_oop(oop* p) {
5403 oop obj = *p;
5404 assert(obj != NULL, "Precondition.");
5405 if (_parallel) {
5406 // We go sticky at the limit to avoid excess contention.
5407 // If we want to track the actual RC's further, we'll need to keep a
5408 // per-thread hash table or something for the popular objects.
5409 if (_g1h->obj_rc(obj) < G1ObjPopLimit) {
5410 _g1h->atomic_inc_obj_rc(obj);
5411 }
5412 } else {
5413 _g1h->inc_obj_rc(obj);
5414 }
5415 }
5416 };
5418 class EvacPopObjClosure: public ObjectClosure {
5419 G1CollectedHeap* _g1h;
5420 size_t _pop_objs;
5421 size_t _max_rc;
5422 public:
5423 EvacPopObjClosure(G1CollectedHeap* g1h) :
5424 _g1h(g1h), _pop_objs(0), _max_rc(0) {}
5426 void do_object(oop obj) {
5427 size_t rc = _g1h->obj_rc(obj);
5428 _max_rc = MAX2(rc, _max_rc);
5429 if (rc >= (size_t) G1ObjPopLimit) {
5430 _g1h->_pop_obj_rc_at_copy.add((double)rc);
5431 size_t word_sz = obj->size();
5432 HeapWord* new_pop_loc = _g1h->allocate_popular_object(word_sz);
5433 oop new_pop_obj = (oop)new_pop_loc;
5434 Copy::aligned_disjoint_words((HeapWord*)obj, new_pop_loc, word_sz);
5435 obj->forward_to(new_pop_obj);
5436 G1ScanAndBalanceClosure scan_and_balance(_g1h);
5437 new_pop_obj->oop_iterate_backwards(&scan_and_balance);
5438 // preserve "next" mark bit if marking is in progress.
5439 if (_g1h->mark_in_progress() && !_g1h->is_obj_ill(obj)) {
5440 _g1h->concurrent_mark()->markAndGrayObjectIfNecessary(new_pop_obj);
5441 }
5443 if (G1TracePopularity) {
5444 gclog_or_tty->print_cr("Found obj " PTR_FORMAT " of word size " SIZE_FORMAT
5445 " pop (%d), move to " PTR_FORMAT,
5446 (void*) obj, word_sz,
5447 _g1h->obj_rc(obj), (void*) new_pop_obj);
5448 }
5449 _pop_objs++;
5450 }
5451 }
5452 size_t pop_objs() { return _pop_objs; }
5453 size_t max_rc() { return _max_rc; }
5454 };
5456 class G1ParCountRCTask : public AbstractGangTask {
5457 G1CollectedHeap* _g1h;
5458 BitMap _bm;
5460 size_t getNCards() {
5461 return (_g1h->capacity() + G1BlockOffsetSharedArray::N_bytes - 1)
5462 / G1BlockOffsetSharedArray::N_bytes;
5463 }
5464 CountRCClosure _count_rc_closure;
5465 public:
5466 G1ParCountRCTask(G1CollectedHeap* g1h) :
5467 AbstractGangTask("G1 Par RC Count task"),
5468 _g1h(g1h), _bm(getNCards()), _count_rc_closure(g1h)
5469 {}
5471 void work(int i) {
5472 ResourceMark rm;
5473 HandleMark hm;
5474 _g1h->g1_rem_set()->oops_into_collection_set_do(&_count_rc_closure, i);
5475 }
5476 };
5478 void G1CollectedHeap::popularity_pause_preamble(HeapRegion* popular_region) {
5479 // We're evacuating a single region (for popularity).
5480 if (G1TracePopularity) {
5481 gclog_or_tty->print_cr("Doing pop region pause for ["PTR_FORMAT", "PTR_FORMAT")",
5482 popular_region->bottom(), popular_region->end());
5483 }
5484 g1_policy()->set_single_region_collection_set(popular_region);
5485 size_t max_rc;
5486 if (!compute_reference_counts_and_evac_popular(popular_region,
5487 &max_rc)) {
5488 // We didn't evacuate any popular objects.
5489 // We increase the RS popularity limit, to prevent this from
5490 // happening in the future.
5491 if (G1RSPopLimit < (1 << 30)) {
5492 G1RSPopLimit *= 2;
5493 }
5494 // For now, interesting enough for a message:
5495 #if 1
5496 gclog_or_tty->print_cr("In pop region pause for ["PTR_FORMAT", "PTR_FORMAT"), "
5497 "failed to find a pop object (max = %d).",
5498 popular_region->bottom(), popular_region->end(),
5499 max_rc);
5500 gclog_or_tty->print_cr("Increased G1RSPopLimit to %d.", G1RSPopLimit);
5501 #endif // 0
5502 // Also, we reset the collection set to NULL, to make the rest of
5503 // the collection do nothing.
5504 assert(popular_region->next_in_collection_set() == NULL,
5505 "should be single-region.");
5506 popular_region->set_in_collection_set(false);
5507 popular_region->set_popular_pending(false);
5508 g1_policy()->clear_collection_set();
5509 }
5510 }
5512 bool G1CollectedHeap::
5513 compute_reference_counts_and_evac_popular(HeapRegion* popular_region,
5514 size_t* max_rc) {
5515 HeapWord* rc_region_bot;
5516 HeapWord* rc_region_end;
5518 // Set up the reference count region.
5519 HeapRegion* rc_region = newAllocRegion(HeapRegion::GrainWords);
5520 if (rc_region != NULL) {
5521 rc_region_bot = rc_region->bottom();
5522 rc_region_end = rc_region->end();
5523 } else {
5524 rc_region_bot = NEW_C_HEAP_ARRAY(HeapWord, HeapRegion::GrainWords);
5525 if (rc_region_bot == NULL) {
5526 vm_exit_out_of_memory(HeapRegion::GrainWords,
5527 "No space for RC region.");
5528 }
5529 rc_region_end = rc_region_bot + HeapRegion::GrainWords;
5530 }
5532 if (G1TracePopularity)
5533 gclog_or_tty->print_cr("RC region is ["PTR_FORMAT", "PTR_FORMAT")",
5534 rc_region_bot, rc_region_end);
5535 if (rc_region_bot > popular_region->bottom()) {
5536 _rc_region_above = true;
5537 _rc_region_diff =
5538 pointer_delta(rc_region_bot, popular_region->bottom(), 1);
5539 } else {
5540 assert(rc_region_bot < popular_region->bottom(), "Can't be equal.");
5541 _rc_region_above = false;
5542 _rc_region_diff =
5543 pointer_delta(popular_region->bottom(), rc_region_bot, 1);
5544 }
5545 g1_policy()->record_pop_compute_rc_start();
5546 // Count external references.
5547 g1_rem_set()->prepare_for_oops_into_collection_set_do();
5548 if (ParallelGCThreads > 0) {
5550 set_par_threads(workers()->total_workers());
5551 G1ParCountRCTask par_count_rc_task(this);
5552 workers()->run_task(&par_count_rc_task);
5553 set_par_threads(0);
5555 } else {
5556 CountRCClosure count_rc_closure(this);
5557 g1_rem_set()->oops_into_collection_set_do(&count_rc_closure, 0);
5558 }
5559 g1_rem_set()->cleanup_after_oops_into_collection_set_do();
5560 g1_policy()->record_pop_compute_rc_end();
5562 // Now evacuate popular objects.
5563 g1_policy()->record_pop_evac_start();
5564 EvacPopObjClosure evac_pop_obj_cl(this);
5565 popular_region->object_iterate(&evac_pop_obj_cl);
5566 *max_rc = evac_pop_obj_cl.max_rc();
5568 // Make sure the last "top" value of the current popular region is copied
5569 // as the "next_top_at_mark_start", so that objects made popular during
5570 // markings aren't automatically considered live.
5571 HeapRegion* cur_pop_region = _hrs->at(_cur_pop_hr_index);
5572 cur_pop_region->note_end_of_copying();
5574 if (rc_region != NULL) {
5575 free_region(rc_region);
5576 } else {
5577 FREE_C_HEAP_ARRAY(HeapWord, rc_region_bot);
5578 }
5579 g1_policy()->record_pop_evac_end();
5581 return evac_pop_obj_cl.pop_objs() > 0;
5582 }
5584 class CountPopObjInfoClosure: public HeapRegionClosure {
5585 size_t _objs;
5586 size_t _bytes;
5588 class CountObjClosure: public ObjectClosure {
5589 int _n;
5590 public:
5591 CountObjClosure() : _n(0) {}
5592 void do_object(oop obj) { _n++; }
5593 size_t n() { return _n; }
5594 };
5596 public:
5597 CountPopObjInfoClosure() : _objs(0), _bytes(0) {}
5598 bool doHeapRegion(HeapRegion* r) {
5599 _bytes += r->used();
5600 CountObjClosure blk;
5601 r->object_iterate(&blk);
5602 _objs += blk.n();
5603 return false;
5604 }
5605 size_t objs() { return _objs; }
5606 size_t bytes() { return _bytes; }
5607 };
5610 void G1CollectedHeap::print_popularity_summary_info() const {
5611 CountPopObjInfoClosure blk;
5612 for (int i = 0; i <= _cur_pop_hr_index; i++) {
5613 blk.doHeapRegion(_hrs->at(i));
5614 }
5615 gclog_or_tty->print_cr("\nPopular objects: %d objs, %d bytes.",
5616 blk.objs(), blk.bytes());
5617 gclog_or_tty->print_cr(" RC at copy = [avg = %5.2f, max = %5.2f, sd = %5.2f].",
5618 _pop_obj_rc_at_copy.avg(),
5619 _pop_obj_rc_at_copy.maximum(),
5620 _pop_obj_rc_at_copy.sd());
5621 }
5623 void G1CollectedHeap::set_refine_cte_cl_concurrency(bool concurrent) {
5624 _refine_cte_cl->set_concurrent(concurrent);
5625 }
5627 #ifndef PRODUCT
5629 class PrintHeapRegionClosure: public HeapRegionClosure {
5630 public:
5631 bool doHeapRegion(HeapRegion *r) {
5632 gclog_or_tty->print("Region: "PTR_FORMAT":", r);
5633 if (r != NULL) {
5634 if (r->is_on_free_list())
5635 gclog_or_tty->print("Free ");
5636 if (r->is_young())
5637 gclog_or_tty->print("Young ");
5638 if (r->isHumongous())
5639 gclog_or_tty->print("Is Humongous ");
5640 r->print();
5641 }
5642 return false;
5643 }
5644 };
5646 class SortHeapRegionClosure : public HeapRegionClosure {
5647 size_t young_regions,free_regions, unclean_regions;
5648 size_t hum_regions, count;
5649 size_t unaccounted, cur_unclean, cur_alloc;
5650 size_t total_free;
5651 HeapRegion* cur;
5652 public:
5653 SortHeapRegionClosure(HeapRegion *_cur) : cur(_cur), young_regions(0),
5654 free_regions(0), unclean_regions(0),
5655 hum_regions(0),
5656 count(0), unaccounted(0),
5657 cur_alloc(0), total_free(0)
5658 {}
5659 bool doHeapRegion(HeapRegion *r) {
5660 count++;
5661 if (r->is_on_free_list()) free_regions++;
5662 else if (r->is_on_unclean_list()) unclean_regions++;
5663 else if (r->isHumongous()) hum_regions++;
5664 else if (r->is_young()) young_regions++;
5665 else if (r == cur) cur_alloc++;
5666 else unaccounted++;
5667 return false;
5668 }
5669 void print() {
5670 total_free = free_regions + unclean_regions;
5671 gclog_or_tty->print("%d regions\n", count);
5672 gclog_or_tty->print("%d free: free_list = %d unclean = %d\n",
5673 total_free, free_regions, unclean_regions);
5674 gclog_or_tty->print("%d humongous %d young\n",
5675 hum_regions, young_regions);
5676 gclog_or_tty->print("%d cur_alloc\n", cur_alloc);
5677 gclog_or_tty->print("UHOH unaccounted = %d\n", unaccounted);
5678 }
5679 };
5681 void G1CollectedHeap::print_region_counts() {
5682 SortHeapRegionClosure sc(_cur_alloc_region);
5683 PrintHeapRegionClosure cl;
5684 heap_region_iterate(&cl);
5685 heap_region_iterate(&sc);
5686 sc.print();
5687 print_region_accounting_info();
5688 };
5690 bool G1CollectedHeap::regions_accounted_for() {
5691 // TODO: regions accounting for young/survivor/tenured
5692 return true;
5693 }
5695 bool G1CollectedHeap::print_region_accounting_info() {
5696 gclog_or_tty->print_cr("P regions: %d.", G1NumPopularRegions);
5697 gclog_or_tty->print_cr("Free regions: %d (count: %d count list %d) (clean: %d unclean: %d).",
5698 free_regions(),
5699 count_free_regions(), count_free_regions_list(),
5700 _free_region_list_size, _unclean_region_list.sz());
5701 gclog_or_tty->print_cr("cur_alloc: %d.",
5702 (_cur_alloc_region == NULL ? 0 : 1));
5703 gclog_or_tty->print_cr("H regions: %d.", _num_humongous_regions);
5705 // TODO: check regions accounting for young/survivor/tenured
5706 return true;
5707 }
5709 bool G1CollectedHeap::is_in_closed_subset(const void* p) const {
5710 HeapRegion* hr = heap_region_containing(p);
5711 if (hr == NULL) {
5712 return is_in_permanent(p);
5713 } else {
5714 return hr->is_in(p);
5715 }
5716 }
5717 #endif // PRODUCT
5719 void G1CollectedHeap::g1_unimplemented() {
5720 // Unimplemented();
5721 }
5724 // Local Variables: ***
5725 // c-indentation-style: gnu ***
5726 // End: ***