Mon, 16 Mar 2009 08:01:32 -0700
6817419: G1: Enable extensive verification for humongous regions
Summary: Enabled full verification for humongous regions. Also made sure that the VerifyAfterGC works with deferred updates and G1HRRSFlushLogBuffersOnVerify.
Reviewed-by: tonyp
1 /*
2 * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
25 #include "incls/_precompiled.incl"
26 #include "incls/_g1CollectedHeap.cpp.incl"
28 // turn it on so that the contents of the young list (scan-only /
29 // to-be-collected) are printed at "strategic" points before / during
30 // / after the collection --- this is useful for debugging
31 #define SCAN_ONLY_VERBOSE 0
32 // CURRENT STATUS
33 // This file is under construction. Search for "FIXME".
35 // INVARIANTS/NOTES
36 //
37 // All allocation activity covered by the G1CollectedHeap interface is
38 // serialized by acquiring the HeapLock. This happens in
39 // mem_allocate_work, which all such allocation functions call.
40 // (Note that this does not apply to TLAB allocation, which is not part
41 // of this interface: it is done by clients of this interface.)
43 // Local to this file.
45 // Finds the first HeapRegion.
46 // No longer used, but might be handy someday.
48 class FindFirstRegionClosure: public HeapRegionClosure {
49 HeapRegion* _a_region;
50 public:
51 FindFirstRegionClosure() : _a_region(NULL) {}
52 bool doHeapRegion(HeapRegion* r) {
53 _a_region = r;
54 return true;
55 }
56 HeapRegion* result() { return _a_region; }
57 };
60 class RefineCardTableEntryClosure: public CardTableEntryClosure {
61 SuspendibleThreadSet* _sts;
62 G1RemSet* _g1rs;
63 ConcurrentG1Refine* _cg1r;
64 bool _concurrent;
65 public:
66 RefineCardTableEntryClosure(SuspendibleThreadSet* sts,
67 G1RemSet* g1rs,
68 ConcurrentG1Refine* cg1r) :
69 _sts(sts), _g1rs(g1rs), _cg1r(cg1r), _concurrent(true)
70 {}
71 bool do_card_ptr(jbyte* card_ptr, int worker_i) {
72 _g1rs->concurrentRefineOneCard(card_ptr, worker_i);
73 if (_concurrent && _sts->should_yield()) {
74 // Caller will actually yield.
75 return false;
76 }
77 // Otherwise, we finished successfully; return true.
78 return true;
79 }
80 void set_concurrent(bool b) { _concurrent = b; }
81 };
84 class ClearLoggedCardTableEntryClosure: public CardTableEntryClosure {
85 int _calls;
86 G1CollectedHeap* _g1h;
87 CardTableModRefBS* _ctbs;
88 int _histo[256];
89 public:
90 ClearLoggedCardTableEntryClosure() :
91 _calls(0)
92 {
93 _g1h = G1CollectedHeap::heap();
94 _ctbs = (CardTableModRefBS*)_g1h->barrier_set();
95 for (int i = 0; i < 256; i++) _histo[i] = 0;
96 }
97 bool do_card_ptr(jbyte* card_ptr, int worker_i) {
98 if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) {
99 _calls++;
100 unsigned char* ujb = (unsigned char*)card_ptr;
101 int ind = (int)(*ujb);
102 _histo[ind]++;
103 *card_ptr = -1;
104 }
105 return true;
106 }
107 int calls() { return _calls; }
108 void print_histo() {
109 gclog_or_tty->print_cr("Card table value histogram:");
110 for (int i = 0; i < 256; i++) {
111 if (_histo[i] != 0) {
112 gclog_or_tty->print_cr(" %d: %d", i, _histo[i]);
113 }
114 }
115 }
116 };
118 class RedirtyLoggedCardTableEntryClosure: public CardTableEntryClosure {
119 int _calls;
120 G1CollectedHeap* _g1h;
121 CardTableModRefBS* _ctbs;
122 public:
123 RedirtyLoggedCardTableEntryClosure() :
124 _calls(0)
125 {
126 _g1h = G1CollectedHeap::heap();
127 _ctbs = (CardTableModRefBS*)_g1h->barrier_set();
128 }
129 bool do_card_ptr(jbyte* card_ptr, int worker_i) {
130 if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) {
131 _calls++;
132 *card_ptr = 0;
133 }
134 return true;
135 }
136 int calls() { return _calls; }
137 };
139 class RedirtyLoggedCardTableEntryFastClosure : public CardTableEntryClosure {
140 public:
141 bool do_card_ptr(jbyte* card_ptr, int worker_i) {
142 *card_ptr = CardTableModRefBS::dirty_card_val();
143 return true;
144 }
145 };
147 YoungList::YoungList(G1CollectedHeap* g1h)
148 : _g1h(g1h), _head(NULL),
149 _scan_only_head(NULL), _scan_only_tail(NULL), _curr_scan_only(NULL),
150 _length(0), _scan_only_length(0),
151 _last_sampled_rs_lengths(0),
152 _survivor_head(NULL), _survivor_tail(NULL), _survivor_length(0)
153 {
154 guarantee( check_list_empty(false), "just making sure..." );
155 }
157 void YoungList::push_region(HeapRegion *hr) {
158 assert(!hr->is_young(), "should not already be young");
159 assert(hr->get_next_young_region() == NULL, "cause it should!");
161 hr->set_next_young_region(_head);
162 _head = hr;
164 hr->set_young();
165 double yg_surv_rate = _g1h->g1_policy()->predict_yg_surv_rate((int)_length);
166 ++_length;
167 }
169 void YoungList::add_survivor_region(HeapRegion* hr) {
170 assert(hr->is_survivor(), "should be flagged as survivor region");
171 assert(hr->get_next_young_region() == NULL, "cause it should!");
173 hr->set_next_young_region(_survivor_head);
174 if (_survivor_head == NULL) {
175 _survivor_tail = hr;
176 }
177 _survivor_head = hr;
179 ++_survivor_length;
180 }
182 HeapRegion* YoungList::pop_region() {
183 while (_head != NULL) {
184 assert( length() > 0, "list should not be empty" );
185 HeapRegion* ret = _head;
186 _head = ret->get_next_young_region();
187 ret->set_next_young_region(NULL);
188 --_length;
189 assert(ret->is_young(), "region should be very young");
191 // Replace 'Survivor' region type with 'Young'. So the region will
192 // be treated as a young region and will not be 'confused' with
193 // newly created survivor regions.
194 if (ret->is_survivor()) {
195 ret->set_young();
196 }
198 if (!ret->is_scan_only()) {
199 return ret;
200 }
202 // scan-only, we'll add it to the scan-only list
203 if (_scan_only_tail == NULL) {
204 guarantee( _scan_only_head == NULL, "invariant" );
206 _scan_only_head = ret;
207 _curr_scan_only = ret;
208 } else {
209 guarantee( _scan_only_head != NULL, "invariant" );
210 _scan_only_tail->set_next_young_region(ret);
211 }
212 guarantee( ret->get_next_young_region() == NULL, "invariant" );
213 _scan_only_tail = ret;
215 // no need to be tagged as scan-only any more
216 ret->set_young();
218 ++_scan_only_length;
219 }
220 assert( length() == 0, "list should be empty" );
221 return NULL;
222 }
224 void YoungList::empty_list(HeapRegion* list) {
225 while (list != NULL) {
226 HeapRegion* next = list->get_next_young_region();
227 list->set_next_young_region(NULL);
228 list->uninstall_surv_rate_group();
229 list->set_not_young();
230 list = next;
231 }
232 }
234 void YoungList::empty_list() {
235 assert(check_list_well_formed(), "young list should be well formed");
237 empty_list(_head);
238 _head = NULL;
239 _length = 0;
241 empty_list(_scan_only_head);
242 _scan_only_head = NULL;
243 _scan_only_tail = NULL;
244 _scan_only_length = 0;
245 _curr_scan_only = NULL;
247 empty_list(_survivor_head);
248 _survivor_head = NULL;
249 _survivor_tail = NULL;
250 _survivor_length = 0;
252 _last_sampled_rs_lengths = 0;
254 assert(check_list_empty(false), "just making sure...");
255 }
257 bool YoungList::check_list_well_formed() {
258 bool ret = true;
260 size_t length = 0;
261 HeapRegion* curr = _head;
262 HeapRegion* last = NULL;
263 while (curr != NULL) {
264 if (!curr->is_young() || curr->is_scan_only()) {
265 gclog_or_tty->print_cr("### YOUNG REGION "PTR_FORMAT"-"PTR_FORMAT" "
266 "incorrectly tagged (%d, %d)",
267 curr->bottom(), curr->end(),
268 curr->is_young(), curr->is_scan_only());
269 ret = false;
270 }
271 ++length;
272 last = curr;
273 curr = curr->get_next_young_region();
274 }
275 ret = ret && (length == _length);
277 if (!ret) {
278 gclog_or_tty->print_cr("### YOUNG LIST seems not well formed!");
279 gclog_or_tty->print_cr("### list has %d entries, _length is %d",
280 length, _length);
281 }
283 bool scan_only_ret = true;
284 length = 0;
285 curr = _scan_only_head;
286 last = NULL;
287 while (curr != NULL) {
288 if (!curr->is_young() || curr->is_scan_only()) {
289 gclog_or_tty->print_cr("### SCAN-ONLY REGION "PTR_FORMAT"-"PTR_FORMAT" "
290 "incorrectly tagged (%d, %d)",
291 curr->bottom(), curr->end(),
292 curr->is_young(), curr->is_scan_only());
293 scan_only_ret = false;
294 }
295 ++length;
296 last = curr;
297 curr = curr->get_next_young_region();
298 }
299 scan_only_ret = scan_only_ret && (length == _scan_only_length);
301 if ( (last != _scan_only_tail) ||
302 (_scan_only_head == NULL && _scan_only_tail != NULL) ||
303 (_scan_only_head != NULL && _scan_only_tail == NULL) ) {
304 gclog_or_tty->print_cr("## _scan_only_tail is set incorrectly");
305 scan_only_ret = false;
306 }
308 if (_curr_scan_only != NULL && _curr_scan_only != _scan_only_head) {
309 gclog_or_tty->print_cr("### _curr_scan_only is set incorrectly");
310 scan_only_ret = false;
311 }
313 if (!scan_only_ret) {
314 gclog_or_tty->print_cr("### SCAN-ONLY LIST seems not well formed!");
315 gclog_or_tty->print_cr("### list has %d entries, _scan_only_length is %d",
316 length, _scan_only_length);
317 }
319 return ret && scan_only_ret;
320 }
322 bool YoungList::check_list_empty(bool ignore_scan_only_list,
323 bool check_sample) {
324 bool ret = true;
326 if (_length != 0) {
327 gclog_or_tty->print_cr("### YOUNG LIST should have 0 length, not %d",
328 _length);
329 ret = false;
330 }
331 if (check_sample && _last_sampled_rs_lengths != 0) {
332 gclog_or_tty->print_cr("### YOUNG LIST has non-zero last sampled RS lengths");
333 ret = false;
334 }
335 if (_head != NULL) {
336 gclog_or_tty->print_cr("### YOUNG LIST does not have a NULL head");
337 ret = false;
338 }
339 if (!ret) {
340 gclog_or_tty->print_cr("### YOUNG LIST does not seem empty");
341 }
343 if (ignore_scan_only_list)
344 return ret;
346 bool scan_only_ret = true;
347 if (_scan_only_length != 0) {
348 gclog_or_tty->print_cr("### SCAN-ONLY LIST should have 0 length, not %d",
349 _scan_only_length);
350 scan_only_ret = false;
351 }
352 if (_scan_only_head != NULL) {
353 gclog_or_tty->print_cr("### SCAN-ONLY LIST does not have a NULL head");
354 scan_only_ret = false;
355 }
356 if (_scan_only_tail != NULL) {
357 gclog_or_tty->print_cr("### SCAN-ONLY LIST does not have a NULL tail");
358 scan_only_ret = false;
359 }
360 if (!scan_only_ret) {
361 gclog_or_tty->print_cr("### SCAN-ONLY LIST does not seem empty");
362 }
364 return ret && scan_only_ret;
365 }
367 void
368 YoungList::rs_length_sampling_init() {
369 _sampled_rs_lengths = 0;
370 _curr = _head;
371 }
373 bool
374 YoungList::rs_length_sampling_more() {
375 return _curr != NULL;
376 }
378 void
379 YoungList::rs_length_sampling_next() {
380 assert( _curr != NULL, "invariant" );
381 _sampled_rs_lengths += _curr->rem_set()->occupied();
382 _curr = _curr->get_next_young_region();
383 if (_curr == NULL) {
384 _last_sampled_rs_lengths = _sampled_rs_lengths;
385 // gclog_or_tty->print_cr("last sampled RS lengths = %d", _last_sampled_rs_lengths);
386 }
387 }
389 void
390 YoungList::reset_auxilary_lists() {
391 // We could have just "moved" the scan-only list to the young list.
392 // However, the scan-only list is ordered according to the region
393 // age in descending order, so, by moving one entry at a time, we
394 // ensure that it is recreated in ascending order.
396 guarantee( is_empty(), "young list should be empty" );
397 assert(check_list_well_formed(), "young list should be well formed");
399 // Add survivor regions to SurvRateGroup.
400 _g1h->g1_policy()->note_start_adding_survivor_regions();
401 _g1h->g1_policy()->finished_recalculating_age_indexes(true /* is_survivors */);
402 for (HeapRegion* curr = _survivor_head;
403 curr != NULL;
404 curr = curr->get_next_young_region()) {
405 _g1h->g1_policy()->set_region_survivors(curr);
406 }
407 _g1h->g1_policy()->note_stop_adding_survivor_regions();
409 if (_survivor_head != NULL) {
410 _head = _survivor_head;
411 _length = _survivor_length + _scan_only_length;
412 _survivor_tail->set_next_young_region(_scan_only_head);
413 } else {
414 _head = _scan_only_head;
415 _length = _scan_only_length;
416 }
418 for (HeapRegion* curr = _scan_only_head;
419 curr != NULL;
420 curr = curr->get_next_young_region()) {
421 curr->recalculate_age_in_surv_rate_group();
422 }
423 _scan_only_head = NULL;
424 _scan_only_tail = NULL;
425 _scan_only_length = 0;
426 _curr_scan_only = NULL;
428 _survivor_head = NULL;
429 _survivor_tail = NULL;
430 _survivor_length = 0;
431 _g1h->g1_policy()->finished_recalculating_age_indexes(false /* is_survivors */);
433 assert(check_list_well_formed(), "young list should be well formed");
434 }
436 void YoungList::print() {
437 HeapRegion* lists[] = {_head, _scan_only_head, _survivor_head};
438 const char* names[] = {"YOUNG", "SCAN-ONLY", "SURVIVOR"};
440 for (unsigned int list = 0; list < ARRAY_SIZE(lists); ++list) {
441 gclog_or_tty->print_cr("%s LIST CONTENTS", names[list]);
442 HeapRegion *curr = lists[list];
443 if (curr == NULL)
444 gclog_or_tty->print_cr(" empty");
445 while (curr != NULL) {
446 gclog_or_tty->print_cr(" [%08x-%08x], t: %08x, P: %08x, N: %08x, C: %08x, "
447 "age: %4d, y: %d, s-o: %d, surv: %d",
448 curr->bottom(), curr->end(),
449 curr->top(),
450 curr->prev_top_at_mark_start(),
451 curr->next_top_at_mark_start(),
452 curr->top_at_conc_mark_count(),
453 curr->age_in_surv_rate_group_cond(),
454 curr->is_young(),
455 curr->is_scan_only(),
456 curr->is_survivor());
457 curr = curr->get_next_young_region();
458 }
459 }
461 gclog_or_tty->print_cr("");
462 }
464 void G1CollectedHeap::stop_conc_gc_threads() {
465 _cg1r->cg1rThread()->stop();
466 _czft->stop();
467 _cmThread->stop();
468 }
471 void G1CollectedHeap::check_ct_logs_at_safepoint() {
472 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
473 CardTableModRefBS* ct_bs = (CardTableModRefBS*)barrier_set();
475 // Count the dirty cards at the start.
476 CountNonCleanMemRegionClosure count1(this);
477 ct_bs->mod_card_iterate(&count1);
478 int orig_count = count1.n();
480 // First clear the logged cards.
481 ClearLoggedCardTableEntryClosure clear;
482 dcqs.set_closure(&clear);
483 dcqs.apply_closure_to_all_completed_buffers();
484 dcqs.iterate_closure_all_threads(false);
485 clear.print_histo();
487 // Now ensure that there's no dirty cards.
488 CountNonCleanMemRegionClosure count2(this);
489 ct_bs->mod_card_iterate(&count2);
490 if (count2.n() != 0) {
491 gclog_or_tty->print_cr("Card table has %d entries; %d originally",
492 count2.n(), orig_count);
493 }
494 guarantee(count2.n() == 0, "Card table should be clean.");
496 RedirtyLoggedCardTableEntryClosure redirty;
497 JavaThread::dirty_card_queue_set().set_closure(&redirty);
498 dcqs.apply_closure_to_all_completed_buffers();
499 dcqs.iterate_closure_all_threads(false);
500 gclog_or_tty->print_cr("Log entries = %d, dirty cards = %d.",
501 clear.calls(), orig_count);
502 guarantee(redirty.calls() == clear.calls(),
503 "Or else mechanism is broken.");
505 CountNonCleanMemRegionClosure count3(this);
506 ct_bs->mod_card_iterate(&count3);
507 if (count3.n() != orig_count) {
508 gclog_or_tty->print_cr("Should have restored them all: orig = %d, final = %d.",
509 orig_count, count3.n());
510 guarantee(count3.n() >= orig_count, "Should have restored them all.");
511 }
513 JavaThread::dirty_card_queue_set().set_closure(_refine_cte_cl);
514 }
516 // Private class members.
518 G1CollectedHeap* G1CollectedHeap::_g1h;
520 // Private methods.
522 // Finds a HeapRegion that can be used to allocate a given size of block.
525 HeapRegion* G1CollectedHeap::newAllocRegion_work(size_t word_size,
526 bool do_expand,
527 bool zero_filled) {
528 ConcurrentZFThread::note_region_alloc();
529 HeapRegion* res = alloc_free_region_from_lists(zero_filled);
530 if (res == NULL && do_expand) {
531 expand(word_size * HeapWordSize);
532 res = alloc_free_region_from_lists(zero_filled);
533 assert(res == NULL ||
534 (!res->isHumongous() &&
535 (!zero_filled ||
536 res->zero_fill_state() == HeapRegion::Allocated)),
537 "Alloc Regions must be zero filled (and non-H)");
538 }
539 if (res != NULL && res->is_empty()) _free_regions--;
540 assert(res == NULL ||
541 (!res->isHumongous() &&
542 (!zero_filled ||
543 res->zero_fill_state() == HeapRegion::Allocated)),
544 "Non-young alloc Regions must be zero filled (and non-H)");
546 if (G1TraceRegions) {
547 if (res != NULL) {
548 gclog_or_tty->print_cr("new alloc region %d:["PTR_FORMAT", "PTR_FORMAT"], "
549 "top "PTR_FORMAT,
550 res->hrs_index(), res->bottom(), res->end(), res->top());
551 }
552 }
554 return res;
555 }
557 HeapRegion* G1CollectedHeap::newAllocRegionWithExpansion(int purpose,
558 size_t word_size,
559 bool zero_filled) {
560 HeapRegion* alloc_region = NULL;
561 if (_gc_alloc_region_counts[purpose] < g1_policy()->max_regions(purpose)) {
562 alloc_region = newAllocRegion_work(word_size, true, zero_filled);
563 if (purpose == GCAllocForSurvived && alloc_region != NULL) {
564 alloc_region->set_survivor();
565 }
566 ++_gc_alloc_region_counts[purpose];
567 } else {
568 g1_policy()->note_alloc_region_limit_reached(purpose);
569 }
570 return alloc_region;
571 }
573 // If could fit into free regions w/o expansion, try.
574 // Otherwise, if can expand, do so.
575 // Otherwise, if using ex regions might help, try with ex given back.
576 HeapWord* G1CollectedHeap::humongousObjAllocate(size_t word_size) {
577 assert(regions_accounted_for(), "Region leakage!");
579 // We can't allocate H regions while cleanupComplete is running, since
580 // some of the regions we find to be empty might not yet be added to the
581 // unclean list. (If we're already at a safepoint, this call is
582 // unnecessary, not to mention wrong.)
583 if (!SafepointSynchronize::is_at_safepoint())
584 wait_for_cleanup_complete();
586 size_t num_regions =
587 round_to(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords;
589 // Special case if < one region???
591 // Remember the ft size.
592 size_t x_size = expansion_regions();
594 HeapWord* res = NULL;
595 bool eliminated_allocated_from_lists = false;
597 // Can the allocation potentially fit in the free regions?
598 if (free_regions() >= num_regions) {
599 res = _hrs->obj_allocate(word_size);
600 }
601 if (res == NULL) {
602 // Try expansion.
603 size_t fs = _hrs->free_suffix();
604 if (fs + x_size >= num_regions) {
605 expand((num_regions - fs) * HeapRegion::GrainBytes);
606 res = _hrs->obj_allocate(word_size);
607 assert(res != NULL, "This should have worked.");
608 } else {
609 // Expansion won't help. Are there enough free regions if we get rid
610 // of reservations?
611 size_t avail = free_regions();
612 if (avail >= num_regions) {
613 res = _hrs->obj_allocate(word_size);
614 if (res != NULL) {
615 remove_allocated_regions_from_lists();
616 eliminated_allocated_from_lists = true;
617 }
618 }
619 }
620 }
621 if (res != NULL) {
622 // Increment by the number of regions allocated.
623 // FIXME: Assumes regions all of size GrainBytes.
624 #ifndef PRODUCT
625 mr_bs()->verify_clean_region(MemRegion(res, res + num_regions *
626 HeapRegion::GrainWords));
627 #endif
628 if (!eliminated_allocated_from_lists)
629 remove_allocated_regions_from_lists();
630 _summary_bytes_used += word_size * HeapWordSize;
631 _free_regions -= num_regions;
632 _num_humongous_regions += (int) num_regions;
633 }
634 assert(regions_accounted_for(), "Region Leakage");
635 return res;
636 }
638 HeapWord*
639 G1CollectedHeap::attempt_allocation_slow(size_t word_size,
640 bool permit_collection_pause) {
641 HeapWord* res = NULL;
642 HeapRegion* allocated_young_region = NULL;
644 assert( SafepointSynchronize::is_at_safepoint() ||
645 Heap_lock->owned_by_self(), "pre condition of the call" );
647 if (isHumongous(word_size)) {
648 // Allocation of a humongous object can, in a sense, complete a
649 // partial region, if the previous alloc was also humongous, and
650 // caused the test below to succeed.
651 if (permit_collection_pause)
652 do_collection_pause_if_appropriate(word_size);
653 res = humongousObjAllocate(word_size);
654 assert(_cur_alloc_region == NULL
655 || !_cur_alloc_region->isHumongous(),
656 "Prevent a regression of this bug.");
658 } else {
659 // We may have concurrent cleanup working at the time. Wait for it
660 // to complete. In the future we would probably want to make the
661 // concurrent cleanup truly concurrent by decoupling it from the
662 // allocation.
663 if (!SafepointSynchronize::is_at_safepoint())
664 wait_for_cleanup_complete();
665 // If we do a collection pause, this will be reset to a non-NULL
666 // value. If we don't, nulling here ensures that we allocate a new
667 // region below.
668 if (_cur_alloc_region != NULL) {
669 // We're finished with the _cur_alloc_region.
670 _summary_bytes_used += _cur_alloc_region->used();
671 _cur_alloc_region = NULL;
672 }
673 assert(_cur_alloc_region == NULL, "Invariant.");
674 // Completion of a heap region is perhaps a good point at which to do
675 // a collection pause.
676 if (permit_collection_pause)
677 do_collection_pause_if_appropriate(word_size);
678 // Make sure we have an allocation region available.
679 if (_cur_alloc_region == NULL) {
680 if (!SafepointSynchronize::is_at_safepoint())
681 wait_for_cleanup_complete();
682 bool next_is_young = should_set_young_locked();
683 // If the next region is not young, make sure it's zero-filled.
684 _cur_alloc_region = newAllocRegion(word_size, !next_is_young);
685 if (_cur_alloc_region != NULL) {
686 _summary_bytes_used -= _cur_alloc_region->used();
687 if (next_is_young) {
688 set_region_short_lived_locked(_cur_alloc_region);
689 allocated_young_region = _cur_alloc_region;
690 }
691 }
692 }
693 assert(_cur_alloc_region == NULL || !_cur_alloc_region->isHumongous(),
694 "Prevent a regression of this bug.");
696 // Now retry the allocation.
697 if (_cur_alloc_region != NULL) {
698 res = _cur_alloc_region->allocate(word_size);
699 }
700 }
702 // NOTE: fails frequently in PRT
703 assert(regions_accounted_for(), "Region leakage!");
705 if (res != NULL) {
706 if (!SafepointSynchronize::is_at_safepoint()) {
707 assert( permit_collection_pause, "invariant" );
708 assert( Heap_lock->owned_by_self(), "invariant" );
709 Heap_lock->unlock();
710 }
712 if (allocated_young_region != NULL) {
713 HeapRegion* hr = allocated_young_region;
714 HeapWord* bottom = hr->bottom();
715 HeapWord* end = hr->end();
716 MemRegion mr(bottom, end);
717 ((CardTableModRefBS*)_g1h->barrier_set())->dirty(mr);
718 }
719 }
721 assert( SafepointSynchronize::is_at_safepoint() ||
722 (res == NULL && Heap_lock->owned_by_self()) ||
723 (res != NULL && !Heap_lock->owned_by_self()),
724 "post condition of the call" );
726 return res;
727 }
729 HeapWord*
730 G1CollectedHeap::mem_allocate(size_t word_size,
731 bool is_noref,
732 bool is_tlab,
733 bool* gc_overhead_limit_was_exceeded) {
734 debug_only(check_for_valid_allocation_state());
735 assert(no_gc_in_progress(), "Allocation during gc not allowed");
736 HeapWord* result = NULL;
738 // Loop until the allocation is satisified,
739 // or unsatisfied after GC.
740 for (int try_count = 1; /* return or throw */; try_count += 1) {
741 int gc_count_before;
742 {
743 Heap_lock->lock();
744 result = attempt_allocation(word_size);
745 if (result != NULL) {
746 // attempt_allocation should have unlocked the heap lock
747 assert(is_in(result), "result not in heap");
748 return result;
749 }
750 // Read the gc count while the heap lock is held.
751 gc_count_before = SharedHeap::heap()->total_collections();
752 Heap_lock->unlock();
753 }
755 // Create the garbage collection operation...
756 VM_G1CollectForAllocation op(word_size,
757 gc_count_before);
759 // ...and get the VM thread to execute it.
760 VMThread::execute(&op);
761 if (op.prologue_succeeded()) {
762 result = op.result();
763 assert(result == NULL || is_in(result), "result not in heap");
764 return result;
765 }
767 // Give a warning if we seem to be looping forever.
768 if ((QueuedAllocationWarningCount > 0) &&
769 (try_count % QueuedAllocationWarningCount == 0)) {
770 warning("G1CollectedHeap::mem_allocate_work retries %d times",
771 try_count);
772 }
773 }
774 }
776 void G1CollectedHeap::abandon_cur_alloc_region() {
777 if (_cur_alloc_region != NULL) {
778 // We're finished with the _cur_alloc_region.
779 if (_cur_alloc_region->is_empty()) {
780 _free_regions++;
781 free_region(_cur_alloc_region);
782 } else {
783 _summary_bytes_used += _cur_alloc_region->used();
784 }
785 _cur_alloc_region = NULL;
786 }
787 }
789 void G1CollectedHeap::abandon_gc_alloc_regions() {
790 // first, make sure that the GC alloc region list is empty (it should!)
791 assert(_gc_alloc_region_list == NULL, "invariant");
792 release_gc_alloc_regions(true /* totally */);
793 }
795 class PostMCRemSetClearClosure: public HeapRegionClosure {
796 ModRefBarrierSet* _mr_bs;
797 public:
798 PostMCRemSetClearClosure(ModRefBarrierSet* mr_bs) : _mr_bs(mr_bs) {}
799 bool doHeapRegion(HeapRegion* r) {
800 r->reset_gc_time_stamp();
801 if (r->continuesHumongous())
802 return false;
803 HeapRegionRemSet* hrrs = r->rem_set();
804 if (hrrs != NULL) hrrs->clear();
805 // You might think here that we could clear just the cards
806 // corresponding to the used region. But no: if we leave a dirty card
807 // in a region we might allocate into, then it would prevent that card
808 // from being enqueued, and cause it to be missed.
809 // Re: the performance cost: we shouldn't be doing full GC anyway!
810 _mr_bs->clear(MemRegion(r->bottom(), r->end()));
811 return false;
812 }
813 };
816 class PostMCRemSetInvalidateClosure: public HeapRegionClosure {
817 ModRefBarrierSet* _mr_bs;
818 public:
819 PostMCRemSetInvalidateClosure(ModRefBarrierSet* mr_bs) : _mr_bs(mr_bs) {}
820 bool doHeapRegion(HeapRegion* r) {
821 if (r->continuesHumongous()) return false;
822 if (r->used_region().word_size() != 0) {
823 _mr_bs->invalidate(r->used_region(), true /*whole heap*/);
824 }
825 return false;
826 }
827 };
829 class RebuildRSOutOfRegionClosure: public HeapRegionClosure {
830 G1CollectedHeap* _g1h;
831 UpdateRSOopClosure _cl;
832 int _worker_i;
833 public:
834 RebuildRSOutOfRegionClosure(G1CollectedHeap* g1, int worker_i = 0) :
835 _cl(g1->g1_rem_set()->as_HRInto_G1RemSet(), worker_i),
836 _worker_i(worker_i),
837 _g1h(g1)
838 { }
839 bool doHeapRegion(HeapRegion* r) {
840 if (!r->continuesHumongous()) {
841 _cl.set_from(r);
842 r->oop_iterate(&_cl);
843 }
844 return false;
845 }
846 };
848 class ParRebuildRSTask: public AbstractGangTask {
849 G1CollectedHeap* _g1;
850 public:
851 ParRebuildRSTask(G1CollectedHeap* g1)
852 : AbstractGangTask("ParRebuildRSTask"),
853 _g1(g1)
854 { }
856 void work(int i) {
857 RebuildRSOutOfRegionClosure rebuild_rs(_g1, i);
858 _g1->heap_region_par_iterate_chunked(&rebuild_rs, i,
859 HeapRegion::RebuildRSClaimValue);
860 }
861 };
863 void G1CollectedHeap::do_collection(bool full, bool clear_all_soft_refs,
864 size_t word_size) {
865 ResourceMark rm;
867 if (full && DisableExplicitGC) {
868 gclog_or_tty->print("\n\n\nDisabling Explicit GC\n\n\n");
869 return;
870 }
872 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
873 assert(Thread::current() == VMThread::vm_thread(), "should be in vm thread");
875 if (GC_locker::is_active()) {
876 return; // GC is disabled (e.g. JNI GetXXXCritical operation)
877 }
879 {
880 IsGCActiveMark x;
882 // Timing
883 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
884 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
885 TraceTime t(full ? "Full GC (System.gc())" : "Full GC", PrintGC, true, gclog_or_tty);
887 double start = os::elapsedTime();
888 GCOverheadReporter::recordSTWStart(start);
889 g1_policy()->record_full_collection_start();
891 gc_prologue(true);
892 increment_total_collections();
894 size_t g1h_prev_used = used();
895 assert(used() == recalculate_used(), "Should be equal");
897 if (VerifyBeforeGC && total_collections() >= VerifyGCStartAt) {
898 HandleMark hm; // Discard invalid handles created during verification
899 prepare_for_verify();
900 gclog_or_tty->print(" VerifyBeforeGC:");
901 Universe::verify(true);
902 }
903 assert(regions_accounted_for(), "Region leakage!");
905 COMPILER2_PRESENT(DerivedPointerTable::clear());
907 // We want to discover references, but not process them yet.
908 // This mode is disabled in
909 // instanceRefKlass::process_discovered_references if the
910 // generation does some collection work, or
911 // instanceRefKlass::enqueue_discovered_references if the
912 // generation returns without doing any work.
913 ref_processor()->disable_discovery();
914 ref_processor()->abandon_partial_discovery();
915 ref_processor()->verify_no_references_recorded();
917 // Abandon current iterations of concurrent marking and concurrent
918 // refinement, if any are in progress.
919 concurrent_mark()->abort();
921 // Make sure we'll choose a new allocation region afterwards.
922 abandon_cur_alloc_region();
923 abandon_gc_alloc_regions();
924 assert(_cur_alloc_region == NULL, "Invariant.");
925 g1_rem_set()->as_HRInto_G1RemSet()->cleanupHRRS();
926 tear_down_region_lists();
927 set_used_regions_to_need_zero_fill();
928 if (g1_policy()->in_young_gc_mode()) {
929 empty_young_list();
930 g1_policy()->set_full_young_gcs(true);
931 }
933 // Temporarily make reference _discovery_ single threaded (non-MT).
934 ReferenceProcessorMTMutator rp_disc_ser(ref_processor(), false);
936 // Temporarily make refs discovery atomic
937 ReferenceProcessorAtomicMutator rp_disc_atomic(ref_processor(), true);
939 // Temporarily clear _is_alive_non_header
940 ReferenceProcessorIsAliveMutator rp_is_alive_null(ref_processor(), NULL);
942 ref_processor()->enable_discovery();
943 ref_processor()->setup_policy(clear_all_soft_refs);
945 // Do collection work
946 {
947 HandleMark hm; // Discard invalid handles created during gc
948 G1MarkSweep::invoke_at_safepoint(ref_processor(), clear_all_soft_refs);
949 }
950 // Because freeing humongous regions may have added some unclean
951 // regions, it is necessary to tear down again before rebuilding.
952 tear_down_region_lists();
953 rebuild_region_lists();
955 _summary_bytes_used = recalculate_used();
957 ref_processor()->enqueue_discovered_references();
959 COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
961 if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) {
962 HandleMark hm; // Discard invalid handles created during verification
963 gclog_or_tty->print(" VerifyAfterGC:");
964 prepare_for_verify();
965 Universe::verify(false);
966 }
967 NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
969 reset_gc_time_stamp();
970 // Since everything potentially moved, we will clear all remembered
971 // sets, and clear all cards. Later we will rebuild remebered
972 // sets. We will also reset the GC time stamps of the regions.
973 PostMCRemSetClearClosure rs_clear(mr_bs());
974 heap_region_iterate(&rs_clear);
976 // Resize the heap if necessary.
977 resize_if_necessary_after_full_collection(full ? 0 : word_size);
979 if (_cg1r->use_cache()) {
980 _cg1r->clear_and_record_card_counts();
981 _cg1r->clear_hot_cache();
982 }
984 // Rebuild remembered sets of all regions.
985 if (ParallelGCThreads > 0) {
986 ParRebuildRSTask rebuild_rs_task(this);
987 assert(check_heap_region_claim_values(
988 HeapRegion::InitialClaimValue), "sanity check");
989 set_par_threads(workers()->total_workers());
990 workers()->run_task(&rebuild_rs_task);
991 set_par_threads(0);
992 assert(check_heap_region_claim_values(
993 HeapRegion::RebuildRSClaimValue), "sanity check");
994 reset_heap_region_claim_values();
995 } else {
996 RebuildRSOutOfRegionClosure rebuild_rs(this);
997 heap_region_iterate(&rebuild_rs);
998 }
1000 if (PrintGC) {
1001 print_size_transition(gclog_or_tty, g1h_prev_used, used(), capacity());
1002 }
1004 if (true) { // FIXME
1005 // Ask the permanent generation to adjust size for full collections
1006 perm()->compute_new_size();
1007 }
1009 double end = os::elapsedTime();
1010 GCOverheadReporter::recordSTWEnd(end);
1011 g1_policy()->record_full_collection_end();
1013 #ifdef TRACESPINNING
1014 ParallelTaskTerminator::print_termination_counts();
1015 #endif
1017 gc_epilogue(true);
1019 // Abandon concurrent refinement. This must happen last: in the
1020 // dirty-card logging system, some cards may be dirty by weak-ref
1021 // processing, and may be enqueued. But the whole card table is
1022 // dirtied, so this should abandon those logs, and set "do_traversal"
1023 // to true.
1024 concurrent_g1_refine()->set_pya_restart();
1025 assert(!G1DeferredRSUpdate
1026 || (G1DeferredRSUpdate && (dirty_card_queue_set().completed_buffers_num() == 0)), "Should not be any");
1027 assert(regions_accounted_for(), "Region leakage!");
1028 }
1030 if (g1_policy()->in_young_gc_mode()) {
1031 _young_list->reset_sampled_info();
1032 assert( check_young_list_empty(false, false),
1033 "young list should be empty at this point");
1034 }
1035 }
1037 void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) {
1038 do_collection(true, clear_all_soft_refs, 0);
1039 }
1041 // This code is mostly copied from TenuredGeneration.
1042 void
1043 G1CollectedHeap::
1044 resize_if_necessary_after_full_collection(size_t word_size) {
1045 assert(MinHeapFreeRatio <= MaxHeapFreeRatio, "sanity check");
1047 // Include the current allocation, if any, and bytes that will be
1048 // pre-allocated to support collections, as "used".
1049 const size_t used_after_gc = used();
1050 const size_t capacity_after_gc = capacity();
1051 const size_t free_after_gc = capacity_after_gc - used_after_gc;
1053 // We don't have floating point command-line arguments
1054 const double minimum_free_percentage = (double) MinHeapFreeRatio / 100;
1055 const double maximum_used_percentage = 1.0 - minimum_free_percentage;
1056 const double maximum_free_percentage = (double) MaxHeapFreeRatio / 100;
1057 const double minimum_used_percentage = 1.0 - maximum_free_percentage;
1059 size_t minimum_desired_capacity = (size_t) (used_after_gc / maximum_used_percentage);
1060 size_t maximum_desired_capacity = (size_t) (used_after_gc / minimum_used_percentage);
1062 // Don't shrink less than the initial size.
1063 minimum_desired_capacity =
1064 MAX2(minimum_desired_capacity,
1065 collector_policy()->initial_heap_byte_size());
1066 maximum_desired_capacity =
1067 MAX2(maximum_desired_capacity,
1068 collector_policy()->initial_heap_byte_size());
1070 // We are failing here because minimum_desired_capacity is
1071 assert(used_after_gc <= minimum_desired_capacity, "sanity check");
1072 assert(minimum_desired_capacity <= maximum_desired_capacity, "sanity check");
1074 if (PrintGC && Verbose) {
1075 const double free_percentage = ((double)free_after_gc) / capacity();
1076 gclog_or_tty->print_cr("Computing new size after full GC ");
1077 gclog_or_tty->print_cr(" "
1078 " minimum_free_percentage: %6.2f",
1079 minimum_free_percentage);
1080 gclog_or_tty->print_cr(" "
1081 " maximum_free_percentage: %6.2f",
1082 maximum_free_percentage);
1083 gclog_or_tty->print_cr(" "
1084 " capacity: %6.1fK"
1085 " minimum_desired_capacity: %6.1fK"
1086 " maximum_desired_capacity: %6.1fK",
1087 capacity() / (double) K,
1088 minimum_desired_capacity / (double) K,
1089 maximum_desired_capacity / (double) K);
1090 gclog_or_tty->print_cr(" "
1091 " free_after_gc : %6.1fK"
1092 " used_after_gc : %6.1fK",
1093 free_after_gc / (double) K,
1094 used_after_gc / (double) K);
1095 gclog_or_tty->print_cr(" "
1096 " free_percentage: %6.2f",
1097 free_percentage);
1098 }
1099 if (capacity() < minimum_desired_capacity) {
1100 // Don't expand unless it's significant
1101 size_t expand_bytes = minimum_desired_capacity - capacity_after_gc;
1102 expand(expand_bytes);
1103 if (PrintGC && Verbose) {
1104 gclog_or_tty->print_cr(" expanding:"
1105 " minimum_desired_capacity: %6.1fK"
1106 " expand_bytes: %6.1fK",
1107 minimum_desired_capacity / (double) K,
1108 expand_bytes / (double) K);
1109 }
1111 // No expansion, now see if we want to shrink
1112 } else if (capacity() > maximum_desired_capacity) {
1113 // Capacity too large, compute shrinking size
1114 size_t shrink_bytes = capacity_after_gc - maximum_desired_capacity;
1115 shrink(shrink_bytes);
1116 if (PrintGC && Verbose) {
1117 gclog_or_tty->print_cr(" "
1118 " shrinking:"
1119 " initSize: %.1fK"
1120 " maximum_desired_capacity: %.1fK",
1121 collector_policy()->initial_heap_byte_size() / (double) K,
1122 maximum_desired_capacity / (double) K);
1123 gclog_or_tty->print_cr(" "
1124 " shrink_bytes: %.1fK",
1125 shrink_bytes / (double) K);
1126 }
1127 }
1128 }
1131 HeapWord*
1132 G1CollectedHeap::satisfy_failed_allocation(size_t word_size) {
1133 HeapWord* result = NULL;
1135 // In a G1 heap, we're supposed to keep allocation from failing by
1136 // incremental pauses. Therefore, at least for now, we'll favor
1137 // expansion over collection. (This might change in the future if we can
1138 // do something smarter than full collection to satisfy a failed alloc.)
1140 result = expand_and_allocate(word_size);
1141 if (result != NULL) {
1142 assert(is_in(result), "result not in heap");
1143 return result;
1144 }
1146 // OK, I guess we have to try collection.
1148 do_collection(false, false, word_size);
1150 result = attempt_allocation(word_size, /*permit_collection_pause*/false);
1152 if (result != NULL) {
1153 assert(is_in(result), "result not in heap");
1154 return result;
1155 }
1157 // Try collecting soft references.
1158 do_collection(false, true, word_size);
1159 result = attempt_allocation(word_size, /*permit_collection_pause*/false);
1160 if (result != NULL) {
1161 assert(is_in(result), "result not in heap");
1162 return result;
1163 }
1165 // What else? We might try synchronous finalization later. If the total
1166 // space available is large enough for the allocation, then a more
1167 // complete compaction phase than we've tried so far might be
1168 // appropriate.
1169 return NULL;
1170 }
1172 // Attempting to expand the heap sufficiently
1173 // to support an allocation of the given "word_size". If
1174 // successful, perform the allocation and return the address of the
1175 // allocated block, or else "NULL".
1177 HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size) {
1178 size_t expand_bytes = word_size * HeapWordSize;
1179 if (expand_bytes < MinHeapDeltaBytes) {
1180 expand_bytes = MinHeapDeltaBytes;
1181 }
1182 expand(expand_bytes);
1183 assert(regions_accounted_for(), "Region leakage!");
1184 HeapWord* result = attempt_allocation(word_size, false /* permit_collection_pause */);
1185 return result;
1186 }
1188 size_t G1CollectedHeap::free_region_if_totally_empty(HeapRegion* hr) {
1189 size_t pre_used = 0;
1190 size_t cleared_h_regions = 0;
1191 size_t freed_regions = 0;
1192 UncleanRegionList local_list;
1193 free_region_if_totally_empty_work(hr, pre_used, cleared_h_regions,
1194 freed_regions, &local_list);
1196 finish_free_region_work(pre_used, cleared_h_regions, freed_regions,
1197 &local_list);
1198 return pre_used;
1199 }
1201 void
1202 G1CollectedHeap::free_region_if_totally_empty_work(HeapRegion* hr,
1203 size_t& pre_used,
1204 size_t& cleared_h,
1205 size_t& freed_regions,
1206 UncleanRegionList* list,
1207 bool par) {
1208 assert(!hr->continuesHumongous(), "should have filtered these out");
1209 size_t res = 0;
1210 if (!hr->popular() && hr->used() > 0 && hr->garbage_bytes() == hr->used()) {
1211 if (!hr->is_young()) {
1212 if (G1PolicyVerbose > 0)
1213 gclog_or_tty->print_cr("Freeing empty region "PTR_FORMAT "(" SIZE_FORMAT " bytes)"
1214 " during cleanup", hr, hr->used());
1215 free_region_work(hr, pre_used, cleared_h, freed_regions, list, par);
1216 }
1217 }
1218 }
1220 // FIXME: both this and shrink could probably be more efficient by
1221 // doing one "VirtualSpace::expand_by" call rather than several.
1222 void G1CollectedHeap::expand(size_t expand_bytes) {
1223 size_t old_mem_size = _g1_storage.committed_size();
1224 // We expand by a minimum of 1K.
1225 expand_bytes = MAX2(expand_bytes, (size_t)K);
1226 size_t aligned_expand_bytes =
1227 ReservedSpace::page_align_size_up(expand_bytes);
1228 aligned_expand_bytes = align_size_up(aligned_expand_bytes,
1229 HeapRegion::GrainBytes);
1230 expand_bytes = aligned_expand_bytes;
1231 while (expand_bytes > 0) {
1232 HeapWord* base = (HeapWord*)_g1_storage.high();
1233 // Commit more storage.
1234 bool successful = _g1_storage.expand_by(HeapRegion::GrainBytes);
1235 if (!successful) {
1236 expand_bytes = 0;
1237 } else {
1238 expand_bytes -= HeapRegion::GrainBytes;
1239 // Expand the committed region.
1240 HeapWord* high = (HeapWord*) _g1_storage.high();
1241 _g1_committed.set_end(high);
1242 // Create a new HeapRegion.
1243 MemRegion mr(base, high);
1244 bool is_zeroed = !_g1_max_committed.contains(base);
1245 HeapRegion* hr = new HeapRegion(_bot_shared, mr, is_zeroed);
1247 // Now update max_committed if necessary.
1248 _g1_max_committed.set_end(MAX2(_g1_max_committed.end(), high));
1250 // Add it to the HeapRegionSeq.
1251 _hrs->insert(hr);
1252 // Set the zero-fill state, according to whether it's already
1253 // zeroed.
1254 {
1255 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
1256 if (is_zeroed) {
1257 hr->set_zero_fill_complete();
1258 put_free_region_on_list_locked(hr);
1259 } else {
1260 hr->set_zero_fill_needed();
1261 put_region_on_unclean_list_locked(hr);
1262 }
1263 }
1264 _free_regions++;
1265 // And we used up an expansion region to create it.
1266 _expansion_regions--;
1267 // Tell the cardtable about it.
1268 Universe::heap()->barrier_set()->resize_covered_region(_g1_committed);
1269 // And the offset table as well.
1270 _bot_shared->resize(_g1_committed.word_size());
1271 }
1272 }
1273 if (Verbose && PrintGC) {
1274 size_t new_mem_size = _g1_storage.committed_size();
1275 gclog_or_tty->print_cr("Expanding garbage-first heap from %ldK by %ldK to %ldK",
1276 old_mem_size/K, aligned_expand_bytes/K,
1277 new_mem_size/K);
1278 }
1279 }
1281 void G1CollectedHeap::shrink_helper(size_t shrink_bytes)
1282 {
1283 size_t old_mem_size = _g1_storage.committed_size();
1284 size_t aligned_shrink_bytes =
1285 ReservedSpace::page_align_size_down(shrink_bytes);
1286 aligned_shrink_bytes = align_size_down(aligned_shrink_bytes,
1287 HeapRegion::GrainBytes);
1288 size_t num_regions_deleted = 0;
1289 MemRegion mr = _hrs->shrink_by(aligned_shrink_bytes, num_regions_deleted);
1291 assert(mr.end() == (HeapWord*)_g1_storage.high(), "Bad shrink!");
1292 if (mr.byte_size() > 0)
1293 _g1_storage.shrink_by(mr.byte_size());
1294 assert(mr.start() == (HeapWord*)_g1_storage.high(), "Bad shrink!");
1296 _g1_committed.set_end(mr.start());
1297 _free_regions -= num_regions_deleted;
1298 _expansion_regions += num_regions_deleted;
1300 // Tell the cardtable about it.
1301 Universe::heap()->barrier_set()->resize_covered_region(_g1_committed);
1303 // And the offset table as well.
1304 _bot_shared->resize(_g1_committed.word_size());
1306 HeapRegionRemSet::shrink_heap(n_regions());
1308 if (Verbose && PrintGC) {
1309 size_t new_mem_size = _g1_storage.committed_size();
1310 gclog_or_tty->print_cr("Shrinking garbage-first heap from %ldK by %ldK to %ldK",
1311 old_mem_size/K, aligned_shrink_bytes/K,
1312 new_mem_size/K);
1313 }
1314 }
1316 void G1CollectedHeap::shrink(size_t shrink_bytes) {
1317 release_gc_alloc_regions(true /* totally */);
1318 tear_down_region_lists(); // We will rebuild them in a moment.
1319 shrink_helper(shrink_bytes);
1320 rebuild_region_lists();
1321 }
1323 // Public methods.
1325 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
1326 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
1327 #endif // _MSC_VER
1330 G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
1331 SharedHeap(policy_),
1332 _g1_policy(policy_),
1333 _ref_processor(NULL),
1334 _process_strong_tasks(new SubTasksDone(G1H_PS_NumElements)),
1335 _bot_shared(NULL),
1336 _par_alloc_during_gc_lock(Mutex::leaf, "par alloc during GC lock"),
1337 _objs_with_preserved_marks(NULL), _preserved_marks_of_objs(NULL),
1338 _evac_failure_scan_stack(NULL) ,
1339 _mark_in_progress(false),
1340 _cg1r(NULL), _czft(NULL), _summary_bytes_used(0),
1341 _cur_alloc_region(NULL),
1342 _refine_cte_cl(NULL),
1343 _free_region_list(NULL), _free_region_list_size(0),
1344 _free_regions(0),
1345 _popular_object_boundary(NULL),
1346 _cur_pop_hr_index(0),
1347 _popular_regions_to_be_evacuated(NULL),
1348 _pop_obj_rc_at_copy(),
1349 _full_collection(false),
1350 _unclean_region_list(),
1351 _unclean_regions_coming(false),
1352 _young_list(new YoungList(this)),
1353 _gc_time_stamp(0),
1354 _surviving_young_words(NULL),
1355 _in_cset_fast_test(NULL),
1356 _in_cset_fast_test_base(NULL) {
1357 _g1h = this; // To catch bugs.
1358 if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) {
1359 vm_exit_during_initialization("Failed necessary allocation.");
1360 }
1361 int n_queues = MAX2((int)ParallelGCThreads, 1);
1362 _task_queues = new RefToScanQueueSet(n_queues);
1364 int n_rem_sets = HeapRegionRemSet::num_par_rem_sets();
1365 assert(n_rem_sets > 0, "Invariant.");
1367 HeapRegionRemSetIterator** iter_arr =
1368 NEW_C_HEAP_ARRAY(HeapRegionRemSetIterator*, n_queues);
1369 for (int i = 0; i < n_queues; i++) {
1370 iter_arr[i] = new HeapRegionRemSetIterator();
1371 }
1372 _rem_set_iterator = iter_arr;
1374 for (int i = 0; i < n_queues; i++) {
1375 RefToScanQueue* q = new RefToScanQueue();
1376 q->initialize();
1377 _task_queues->register_queue(i, q);
1378 }
1380 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
1381 _gc_alloc_regions[ap] = NULL;
1382 _gc_alloc_region_counts[ap] = 0;
1383 _retained_gc_alloc_regions[ap] = NULL;
1384 // by default, we do not retain a GC alloc region for each ap;
1385 // we'll override this, when appropriate, below
1386 _retain_gc_alloc_region[ap] = false;
1387 }
1389 // We will try to remember the last half-full tenured region we
1390 // allocated to at the end of a collection so that we can re-use it
1391 // during the next collection.
1392 _retain_gc_alloc_region[GCAllocForTenured] = true;
1394 guarantee(_task_queues != NULL, "task_queues allocation failure.");
1395 }
1397 jint G1CollectedHeap::initialize() {
1398 os::enable_vtime();
1400 // Necessary to satisfy locking discipline assertions.
1402 MutexLocker x(Heap_lock);
1404 // While there are no constraints in the GC code that HeapWordSize
1405 // be any particular value, there are multiple other areas in the
1406 // system which believe this to be true (e.g. oop->object_size in some
1407 // cases incorrectly returns the size in wordSize units rather than
1408 // HeapWordSize).
1409 guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
1411 size_t init_byte_size = collector_policy()->initial_heap_byte_size();
1412 size_t max_byte_size = collector_policy()->max_heap_byte_size();
1414 // Ensure that the sizes are properly aligned.
1415 Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap");
1416 Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap");
1418 // We allocate this in any case, but only do no work if the command line
1419 // param is off.
1420 _cg1r = new ConcurrentG1Refine();
1422 // Reserve the maximum.
1423 PermanentGenerationSpec* pgs = collector_policy()->permanent_generation();
1424 // Includes the perm-gen.
1425 ReservedSpace heap_rs(max_byte_size + pgs->max_size(),
1426 HeapRegion::GrainBytes,
1427 false /*ism*/);
1429 if (!heap_rs.is_reserved()) {
1430 vm_exit_during_initialization("Could not reserve enough space for object heap");
1431 return JNI_ENOMEM;
1432 }
1434 // It is important to do this in a way such that concurrent readers can't
1435 // temporarily think somethings in the heap. (I've actually seen this
1436 // happen in asserts: DLD.)
1437 _reserved.set_word_size(0);
1438 _reserved.set_start((HeapWord*)heap_rs.base());
1439 _reserved.set_end((HeapWord*)(heap_rs.base() + heap_rs.size()));
1441 _expansion_regions = max_byte_size/HeapRegion::GrainBytes;
1443 _num_humongous_regions = 0;
1445 // Create the gen rem set (and barrier set) for the entire reserved region.
1446 _rem_set = collector_policy()->create_rem_set(_reserved, 2);
1447 set_barrier_set(rem_set()->bs());
1448 if (barrier_set()->is_a(BarrierSet::ModRef)) {
1449 _mr_bs = (ModRefBarrierSet*)_barrier_set;
1450 } else {
1451 vm_exit_during_initialization("G1 requires a mod ref bs.");
1452 return JNI_ENOMEM;
1453 }
1455 // Also create a G1 rem set.
1456 if (G1UseHRIntoRS) {
1457 if (mr_bs()->is_a(BarrierSet::CardTableModRef)) {
1458 _g1_rem_set = new HRInto_G1RemSet(this, (CardTableModRefBS*)mr_bs());
1459 } else {
1460 vm_exit_during_initialization("G1 requires a cardtable mod ref bs.");
1461 return JNI_ENOMEM;
1462 }
1463 } else {
1464 _g1_rem_set = new StupidG1RemSet(this);
1465 }
1467 // Carve out the G1 part of the heap.
1469 ReservedSpace g1_rs = heap_rs.first_part(max_byte_size);
1470 _g1_reserved = MemRegion((HeapWord*)g1_rs.base(),
1471 g1_rs.size()/HeapWordSize);
1472 ReservedSpace perm_gen_rs = heap_rs.last_part(max_byte_size);
1474 _perm_gen = pgs->init(perm_gen_rs, pgs->init_size(), rem_set());
1476 _g1_storage.initialize(g1_rs, 0);
1477 _g1_committed = MemRegion((HeapWord*)_g1_storage.low(), (size_t) 0);
1478 _g1_max_committed = _g1_committed;
1479 _hrs = new HeapRegionSeq(_expansion_regions);
1480 guarantee(_hrs != NULL, "Couldn't allocate HeapRegionSeq");
1481 guarantee(_cur_alloc_region == NULL, "from constructor");
1483 _bot_shared = new G1BlockOffsetSharedArray(_reserved,
1484 heap_word_size(init_byte_size));
1486 _g1h = this;
1488 // Create the ConcurrentMark data structure and thread.
1489 // (Must do this late, so that "max_regions" is defined.)
1490 _cm = new ConcurrentMark(heap_rs, (int) max_regions());
1491 _cmThread = _cm->cmThread();
1493 // ...and the concurrent zero-fill thread, if necessary.
1494 if (G1ConcZeroFill) {
1495 _czft = new ConcurrentZFThread();
1496 }
1500 // Allocate the popular regions; take them off free lists.
1501 size_t pop_byte_size = G1NumPopularRegions * HeapRegion::GrainBytes;
1502 expand(pop_byte_size);
1503 _popular_object_boundary =
1504 _g1_reserved.start() + (G1NumPopularRegions * HeapRegion::GrainWords);
1505 for (int i = 0; i < G1NumPopularRegions; i++) {
1506 HeapRegion* hr = newAllocRegion(HeapRegion::GrainWords);
1507 // assert(hr != NULL && hr->bottom() < _popular_object_boundary,
1508 // "Should be enough, and all should be below boundary.");
1509 hr->set_popular(true);
1510 }
1511 assert(_cur_pop_hr_index == 0, "Start allocating at the first region.");
1513 // Initialize the from_card cache structure of HeapRegionRemSet.
1514 HeapRegionRemSet::init_heap(max_regions());
1516 // Now expand into the rest of the initial heap size.
1517 expand(init_byte_size - pop_byte_size);
1519 // Perform any initialization actions delegated to the policy.
1520 g1_policy()->init();
1522 g1_policy()->note_start_of_mark_thread();
1524 _refine_cte_cl =
1525 new RefineCardTableEntryClosure(ConcurrentG1RefineThread::sts(),
1526 g1_rem_set(),
1527 concurrent_g1_refine());
1528 JavaThread::dirty_card_queue_set().set_closure(_refine_cte_cl);
1530 JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon,
1531 SATB_Q_FL_lock,
1532 0,
1533 Shared_SATB_Q_lock);
1534 if (G1RSBarrierUseQueue) {
1535 JavaThread::dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon,
1536 DirtyCardQ_FL_lock,
1537 G1DirtyCardQueueMax,
1538 Shared_DirtyCardQ_lock);
1539 }
1540 if (G1DeferredRSUpdate) {
1541 dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon,
1542 DirtyCardQ_FL_lock,
1543 0,
1544 Shared_DirtyCardQ_lock,
1545 &JavaThread::dirty_card_queue_set());
1546 }
1547 // In case we're keeping closure specialization stats, initialize those
1548 // counts and that mechanism.
1549 SpecializationStats::clear();
1551 _gc_alloc_region_list = NULL;
1553 // Do later initialization work for concurrent refinement.
1554 _cg1r->init();
1556 const char* group_names[] = { "CR", "ZF", "CM", "CL" };
1557 GCOverheadReporter::initGCOverheadReporter(4, group_names);
1559 return JNI_OK;
1560 }
1562 void G1CollectedHeap::ref_processing_init() {
1563 SharedHeap::ref_processing_init();
1564 MemRegion mr = reserved_region();
1565 _ref_processor = ReferenceProcessor::create_ref_processor(
1566 mr, // span
1567 false, // Reference discovery is not atomic
1568 // (though it shouldn't matter here.)
1569 true, // mt_discovery
1570 NULL, // is alive closure: need to fill this in for efficiency
1571 ParallelGCThreads,
1572 ParallelRefProcEnabled,
1573 true); // Setting next fields of discovered
1574 // lists requires a barrier.
1575 }
1577 size_t G1CollectedHeap::capacity() const {
1578 return _g1_committed.byte_size();
1579 }
1581 void G1CollectedHeap::iterate_dirty_card_closure(bool concurrent,
1582 int worker_i) {
1583 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
1584 int n_completed_buffers = 0;
1585 while (dcqs.apply_closure_to_completed_buffer(worker_i, 0, true)) {
1586 n_completed_buffers++;
1587 }
1588 g1_policy()->record_update_rs_processed_buffers(worker_i,
1589 (double) n_completed_buffers);
1590 dcqs.clear_n_completed_buffers();
1591 // Finish up the queue...
1592 if (worker_i == 0) concurrent_g1_refine()->clean_up_cache(worker_i,
1593 g1_rem_set());
1594 assert(!dcqs.completed_buffers_exist_dirty(), "Completed buffers exist!");
1595 }
1598 // Computes the sum of the storage used by the various regions.
1600 size_t G1CollectedHeap::used() const {
1601 assert(Heap_lock->owner() != NULL,
1602 "Should be owned on this thread's behalf.");
1603 size_t result = _summary_bytes_used;
1604 if (_cur_alloc_region != NULL)
1605 result += _cur_alloc_region->used();
1606 return result;
1607 }
1609 class SumUsedClosure: public HeapRegionClosure {
1610 size_t _used;
1611 public:
1612 SumUsedClosure() : _used(0) {}
1613 bool doHeapRegion(HeapRegion* r) {
1614 if (!r->continuesHumongous()) {
1615 _used += r->used();
1616 }
1617 return false;
1618 }
1619 size_t result() { return _used; }
1620 };
1622 size_t G1CollectedHeap::recalculate_used() const {
1623 SumUsedClosure blk;
1624 _hrs->iterate(&blk);
1625 return blk.result();
1626 }
1628 #ifndef PRODUCT
1629 class SumUsedRegionsClosure: public HeapRegionClosure {
1630 size_t _num;
1631 public:
1632 // _num is set to 1 to account for the popular region
1633 SumUsedRegionsClosure() : _num(G1NumPopularRegions) {}
1634 bool doHeapRegion(HeapRegion* r) {
1635 if (r->continuesHumongous() || r->used() > 0 || r->is_gc_alloc_region()) {
1636 _num += 1;
1637 }
1638 return false;
1639 }
1640 size_t result() { return _num; }
1641 };
1643 size_t G1CollectedHeap::recalculate_used_regions() const {
1644 SumUsedRegionsClosure blk;
1645 _hrs->iterate(&blk);
1646 return blk.result();
1647 }
1648 #endif // PRODUCT
1650 size_t G1CollectedHeap::unsafe_max_alloc() {
1651 if (_free_regions > 0) return HeapRegion::GrainBytes;
1652 // otherwise, is there space in the current allocation region?
1654 // We need to store the current allocation region in a local variable
1655 // here. The problem is that this method doesn't take any locks and
1656 // there may be other threads which overwrite the current allocation
1657 // region field. attempt_allocation(), for example, sets it to NULL
1658 // and this can happen *after* the NULL check here but before the call
1659 // to free(), resulting in a SIGSEGV. Note that this doesn't appear
1660 // to be a problem in the optimized build, since the two loads of the
1661 // current allocation region field are optimized away.
1662 HeapRegion* car = _cur_alloc_region;
1664 // FIXME: should iterate over all regions?
1665 if (car == NULL) {
1666 return 0;
1667 }
1668 return car->free();
1669 }
1671 void G1CollectedHeap::collect(GCCause::Cause cause) {
1672 // The caller doesn't have the Heap_lock
1673 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
1674 MutexLocker ml(Heap_lock);
1675 collect_locked(cause);
1676 }
1678 void G1CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {
1679 assert(Thread::current()->is_VM_thread(), "Precondition#1");
1680 assert(Heap_lock->is_locked(), "Precondition#2");
1681 GCCauseSetter gcs(this, cause);
1682 switch (cause) {
1683 case GCCause::_heap_inspection:
1684 case GCCause::_heap_dump: {
1685 HandleMark hm;
1686 do_full_collection(false); // don't clear all soft refs
1687 break;
1688 }
1689 default: // XXX FIX ME
1690 ShouldNotReachHere(); // Unexpected use of this function
1691 }
1692 }
1695 void G1CollectedHeap::collect_locked(GCCause::Cause cause) {
1696 // Don't want to do a GC until cleanup is completed.
1697 wait_for_cleanup_complete();
1699 // Read the GC count while holding the Heap_lock
1700 int gc_count_before = SharedHeap::heap()->total_collections();
1701 {
1702 MutexUnlocker mu(Heap_lock); // give up heap lock, execute gets it back
1703 VM_G1CollectFull op(gc_count_before, cause);
1704 VMThread::execute(&op);
1705 }
1706 }
1708 bool G1CollectedHeap::is_in(const void* p) const {
1709 if (_g1_committed.contains(p)) {
1710 HeapRegion* hr = _hrs->addr_to_region(p);
1711 return hr->is_in(p);
1712 } else {
1713 return _perm_gen->as_gen()->is_in(p);
1714 }
1715 }
1717 // Iteration functions.
1719 // Iterates an OopClosure over all ref-containing fields of objects
1720 // within a HeapRegion.
1722 class IterateOopClosureRegionClosure: public HeapRegionClosure {
1723 MemRegion _mr;
1724 OopClosure* _cl;
1725 public:
1726 IterateOopClosureRegionClosure(MemRegion mr, OopClosure* cl)
1727 : _mr(mr), _cl(cl) {}
1728 bool doHeapRegion(HeapRegion* r) {
1729 if (! r->continuesHumongous()) {
1730 r->oop_iterate(_cl);
1731 }
1732 return false;
1733 }
1734 };
1736 void G1CollectedHeap::oop_iterate(OopClosure* cl) {
1737 IterateOopClosureRegionClosure blk(_g1_committed, cl);
1738 _hrs->iterate(&blk);
1739 }
1741 void G1CollectedHeap::oop_iterate(MemRegion mr, OopClosure* cl) {
1742 IterateOopClosureRegionClosure blk(mr, cl);
1743 _hrs->iterate(&blk);
1744 }
1746 // Iterates an ObjectClosure over all objects within a HeapRegion.
1748 class IterateObjectClosureRegionClosure: public HeapRegionClosure {
1749 ObjectClosure* _cl;
1750 public:
1751 IterateObjectClosureRegionClosure(ObjectClosure* cl) : _cl(cl) {}
1752 bool doHeapRegion(HeapRegion* r) {
1753 if (! r->continuesHumongous()) {
1754 r->object_iterate(_cl);
1755 }
1756 return false;
1757 }
1758 };
1760 void G1CollectedHeap::object_iterate(ObjectClosure* cl) {
1761 IterateObjectClosureRegionClosure blk(cl);
1762 _hrs->iterate(&blk);
1763 }
1765 void G1CollectedHeap::object_iterate_since_last_GC(ObjectClosure* cl) {
1766 // FIXME: is this right?
1767 guarantee(false, "object_iterate_since_last_GC not supported by G1 heap");
1768 }
1770 // Calls a SpaceClosure on a HeapRegion.
1772 class SpaceClosureRegionClosure: public HeapRegionClosure {
1773 SpaceClosure* _cl;
1774 public:
1775 SpaceClosureRegionClosure(SpaceClosure* cl) : _cl(cl) {}
1776 bool doHeapRegion(HeapRegion* r) {
1777 _cl->do_space(r);
1778 return false;
1779 }
1780 };
1782 void G1CollectedHeap::space_iterate(SpaceClosure* cl) {
1783 SpaceClosureRegionClosure blk(cl);
1784 _hrs->iterate(&blk);
1785 }
1787 void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) {
1788 _hrs->iterate(cl);
1789 }
1791 void G1CollectedHeap::heap_region_iterate_from(HeapRegion* r,
1792 HeapRegionClosure* cl) {
1793 _hrs->iterate_from(r, cl);
1794 }
1796 void
1797 G1CollectedHeap::heap_region_iterate_from(int idx, HeapRegionClosure* cl) {
1798 _hrs->iterate_from(idx, cl);
1799 }
1801 HeapRegion* G1CollectedHeap::region_at(size_t idx) { return _hrs->at(idx); }
1803 void
1804 G1CollectedHeap::heap_region_par_iterate_chunked(HeapRegionClosure* cl,
1805 int worker,
1806 jint claim_value) {
1807 const size_t regions = n_regions();
1808 const size_t worker_num = (ParallelGCThreads > 0 ? ParallelGCThreads : 1);
1809 // try to spread out the starting points of the workers
1810 const size_t start_index = regions / worker_num * (size_t) worker;
1812 // each worker will actually look at all regions
1813 for (size_t count = 0; count < regions; ++count) {
1814 const size_t index = (start_index + count) % regions;
1815 assert(0 <= index && index < regions, "sanity");
1816 HeapRegion* r = region_at(index);
1817 // we'll ignore "continues humongous" regions (we'll process them
1818 // when we come across their corresponding "start humongous"
1819 // region) and regions already claimed
1820 if (r->claim_value() == claim_value || r->continuesHumongous()) {
1821 continue;
1822 }
1823 // OK, try to claim it
1824 if (r->claimHeapRegion(claim_value)) {
1825 // success!
1826 assert(!r->continuesHumongous(), "sanity");
1827 if (r->startsHumongous()) {
1828 // If the region is "starts humongous" we'll iterate over its
1829 // "continues humongous" first; in fact we'll do them
1830 // first. The order is important. In on case, calling the
1831 // closure on the "starts humongous" region might de-allocate
1832 // and clear all its "continues humongous" regions and, as a
1833 // result, we might end up processing them twice. So, we'll do
1834 // them first (notice: most closures will ignore them anyway) and
1835 // then we'll do the "starts humongous" region.
1836 for (size_t ch_index = index + 1; ch_index < regions; ++ch_index) {
1837 HeapRegion* chr = region_at(ch_index);
1839 // if the region has already been claimed or it's not
1840 // "continues humongous" we're done
1841 if (chr->claim_value() == claim_value ||
1842 !chr->continuesHumongous()) {
1843 break;
1844 }
1846 // Noone should have claimed it directly. We can given
1847 // that we claimed its "starts humongous" region.
1848 assert(chr->claim_value() != claim_value, "sanity");
1849 assert(chr->humongous_start_region() == r, "sanity");
1851 if (chr->claimHeapRegion(claim_value)) {
1852 // we should always be able to claim it; noone else should
1853 // be trying to claim this region
1855 bool res2 = cl->doHeapRegion(chr);
1856 assert(!res2, "Should not abort");
1858 // Right now, this holds (i.e., no closure that actually
1859 // does something with "continues humongous" regions
1860 // clears them). We might have to weaken it in the future,
1861 // but let's leave these two asserts here for extra safety.
1862 assert(chr->continuesHumongous(), "should still be the case");
1863 assert(chr->humongous_start_region() == r, "sanity");
1864 } else {
1865 guarantee(false, "we should not reach here");
1866 }
1867 }
1868 }
1870 assert(!r->continuesHumongous(), "sanity");
1871 bool res = cl->doHeapRegion(r);
1872 assert(!res, "Should not abort");
1873 }
1874 }
1875 }
1877 class ResetClaimValuesClosure: public HeapRegionClosure {
1878 public:
1879 bool doHeapRegion(HeapRegion* r) {
1880 r->set_claim_value(HeapRegion::InitialClaimValue);
1881 return false;
1882 }
1883 };
1885 void
1886 G1CollectedHeap::reset_heap_region_claim_values() {
1887 ResetClaimValuesClosure blk;
1888 heap_region_iterate(&blk);
1889 }
1891 #ifdef ASSERT
1892 // This checks whether all regions in the heap have the correct claim
1893 // value. I also piggy-backed on this a check to ensure that the
1894 // humongous_start_region() information on "continues humongous"
1895 // regions is correct.
1897 class CheckClaimValuesClosure : public HeapRegionClosure {
1898 private:
1899 jint _claim_value;
1900 size_t _failures;
1901 HeapRegion* _sh_region;
1902 public:
1903 CheckClaimValuesClosure(jint claim_value) :
1904 _claim_value(claim_value), _failures(0), _sh_region(NULL) { }
1905 bool doHeapRegion(HeapRegion* r) {
1906 if (r->claim_value() != _claim_value) {
1907 gclog_or_tty->print_cr("Region ["PTR_FORMAT","PTR_FORMAT"), "
1908 "claim value = %d, should be %d",
1909 r->bottom(), r->end(), r->claim_value(),
1910 _claim_value);
1911 ++_failures;
1912 }
1913 if (!r->isHumongous()) {
1914 _sh_region = NULL;
1915 } else if (r->startsHumongous()) {
1916 _sh_region = r;
1917 } else if (r->continuesHumongous()) {
1918 if (r->humongous_start_region() != _sh_region) {
1919 gclog_or_tty->print_cr("Region ["PTR_FORMAT","PTR_FORMAT"), "
1920 "HS = "PTR_FORMAT", should be "PTR_FORMAT,
1921 r->bottom(), r->end(),
1922 r->humongous_start_region(),
1923 _sh_region);
1924 ++_failures;
1925 }
1926 }
1927 return false;
1928 }
1929 size_t failures() {
1930 return _failures;
1931 }
1932 };
1934 bool G1CollectedHeap::check_heap_region_claim_values(jint claim_value) {
1935 CheckClaimValuesClosure cl(claim_value);
1936 heap_region_iterate(&cl);
1937 return cl.failures() == 0;
1938 }
1939 #endif // ASSERT
1941 void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) {
1942 HeapRegion* r = g1_policy()->collection_set();
1943 while (r != NULL) {
1944 HeapRegion* next = r->next_in_collection_set();
1945 if (cl->doHeapRegion(r)) {
1946 cl->incomplete();
1947 return;
1948 }
1949 r = next;
1950 }
1951 }
1953 void G1CollectedHeap::collection_set_iterate_from(HeapRegion* r,
1954 HeapRegionClosure *cl) {
1955 assert(r->in_collection_set(),
1956 "Start region must be a member of the collection set.");
1957 HeapRegion* cur = r;
1958 while (cur != NULL) {
1959 HeapRegion* next = cur->next_in_collection_set();
1960 if (cl->doHeapRegion(cur) && false) {
1961 cl->incomplete();
1962 return;
1963 }
1964 cur = next;
1965 }
1966 cur = g1_policy()->collection_set();
1967 while (cur != r) {
1968 HeapRegion* next = cur->next_in_collection_set();
1969 if (cl->doHeapRegion(cur) && false) {
1970 cl->incomplete();
1971 return;
1972 }
1973 cur = next;
1974 }
1975 }
1977 CompactibleSpace* G1CollectedHeap::first_compactible_space() {
1978 return _hrs->length() > 0 ? _hrs->at(0) : NULL;
1979 }
1982 Space* G1CollectedHeap::space_containing(const void* addr) const {
1983 Space* res = heap_region_containing(addr);
1984 if (res == NULL)
1985 res = perm_gen()->space_containing(addr);
1986 return res;
1987 }
1989 HeapWord* G1CollectedHeap::block_start(const void* addr) const {
1990 Space* sp = space_containing(addr);
1991 if (sp != NULL) {
1992 return sp->block_start(addr);
1993 }
1994 return NULL;
1995 }
1997 size_t G1CollectedHeap::block_size(const HeapWord* addr) const {
1998 Space* sp = space_containing(addr);
1999 assert(sp != NULL, "block_size of address outside of heap");
2000 return sp->block_size(addr);
2001 }
2003 bool G1CollectedHeap::block_is_obj(const HeapWord* addr) const {
2004 Space* sp = space_containing(addr);
2005 return sp->block_is_obj(addr);
2006 }
2008 bool G1CollectedHeap::supports_tlab_allocation() const {
2009 return true;
2010 }
2012 size_t G1CollectedHeap::tlab_capacity(Thread* ignored) const {
2013 return HeapRegion::GrainBytes;
2014 }
2016 size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
2017 // Return the remaining space in the cur alloc region, but not less than
2018 // the min TLAB size.
2019 // Also, no more than half the region size, since we can't allow tlabs to
2020 // grow big enough to accomodate humongous objects.
2022 // We need to story it locally, since it might change between when we
2023 // test for NULL and when we use it later.
2024 ContiguousSpace* cur_alloc_space = _cur_alloc_region;
2025 if (cur_alloc_space == NULL) {
2026 return HeapRegion::GrainBytes/2;
2027 } else {
2028 return MAX2(MIN2(cur_alloc_space->free(),
2029 (size_t)(HeapRegion::GrainBytes/2)),
2030 (size_t)MinTLABSize);
2031 }
2032 }
2034 HeapWord* G1CollectedHeap::allocate_new_tlab(size_t size) {
2035 bool dummy;
2036 return G1CollectedHeap::mem_allocate(size, false, true, &dummy);
2037 }
2039 bool G1CollectedHeap::allocs_are_zero_filled() {
2040 return false;
2041 }
2043 size_t G1CollectedHeap::large_typearray_limit() {
2044 // FIXME
2045 return HeapRegion::GrainBytes/HeapWordSize;
2046 }
2048 size_t G1CollectedHeap::max_capacity() const {
2049 return _g1_committed.byte_size();
2050 }
2052 jlong G1CollectedHeap::millis_since_last_gc() {
2053 // assert(false, "NYI");
2054 return 0;
2055 }
2058 void G1CollectedHeap::prepare_for_verify() {
2059 if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
2060 ensure_parsability(false);
2061 }
2062 g1_rem_set()->prepare_for_verify();
2063 }
2065 class VerifyLivenessOopClosure: public OopClosure {
2066 G1CollectedHeap* g1h;
2067 public:
2068 VerifyLivenessOopClosure(G1CollectedHeap* _g1h) {
2069 g1h = _g1h;
2070 }
2071 void do_oop(narrowOop *p) {
2072 guarantee(false, "NYI");
2073 }
2074 void do_oop(oop *p) {
2075 oop obj = *p;
2076 assert(obj == NULL || !g1h->is_obj_dead(obj),
2077 "Dead object referenced by a not dead object");
2078 }
2079 };
2081 class VerifyObjsInRegionClosure: public ObjectClosure {
2082 G1CollectedHeap* _g1h;
2083 size_t _live_bytes;
2084 HeapRegion *_hr;
2085 public:
2086 VerifyObjsInRegionClosure(HeapRegion *hr) : _live_bytes(0), _hr(hr) {
2087 _g1h = G1CollectedHeap::heap();
2088 }
2089 void do_object(oop o) {
2090 VerifyLivenessOopClosure isLive(_g1h);
2091 assert(o != NULL, "Huh?");
2092 if (!_g1h->is_obj_dead(o)) {
2093 o->oop_iterate(&isLive);
2094 if (!_hr->obj_allocated_since_prev_marking(o))
2095 _live_bytes += (o->size() * HeapWordSize);
2096 }
2097 }
2098 size_t live_bytes() { return _live_bytes; }
2099 };
2101 class PrintObjsInRegionClosure : public ObjectClosure {
2102 HeapRegion *_hr;
2103 G1CollectedHeap *_g1;
2104 public:
2105 PrintObjsInRegionClosure(HeapRegion *hr) : _hr(hr) {
2106 _g1 = G1CollectedHeap::heap();
2107 };
2109 void do_object(oop o) {
2110 if (o != NULL) {
2111 HeapWord *start = (HeapWord *) o;
2112 size_t word_sz = o->size();
2113 gclog_or_tty->print("\nPrinting obj "PTR_FORMAT" of size " SIZE_FORMAT
2114 " isMarkedPrev %d isMarkedNext %d isAllocSince %d\n",
2115 (void*) o, word_sz,
2116 _g1->isMarkedPrev(o),
2117 _g1->isMarkedNext(o),
2118 _hr->obj_allocated_since_prev_marking(o));
2119 HeapWord *end = start + word_sz;
2120 HeapWord *cur;
2121 int *val;
2122 for (cur = start; cur < end; cur++) {
2123 val = (int *) cur;
2124 gclog_or_tty->print("\t "PTR_FORMAT":"PTR_FORMAT"\n", val, *val);
2125 }
2126 }
2127 }
2128 };
2130 class VerifyRegionClosure: public HeapRegionClosure {
2131 public:
2132 bool _allow_dirty;
2133 bool _par;
2134 VerifyRegionClosure(bool allow_dirty, bool par = false)
2135 : _allow_dirty(allow_dirty), _par(par) {}
2136 bool doHeapRegion(HeapRegion* r) {
2137 guarantee(_par || r->claim_value() == HeapRegion::InitialClaimValue,
2138 "Should be unclaimed at verify points.");
2139 if (!r->continuesHumongous()) {
2140 VerifyObjsInRegionClosure not_dead_yet_cl(r);
2141 r->verify(_allow_dirty);
2142 r->object_iterate(¬_dead_yet_cl);
2143 guarantee(r->max_live_bytes() >= not_dead_yet_cl.live_bytes(),
2144 "More live objects than counted in last complete marking.");
2145 }
2146 return false;
2147 }
2148 };
2150 class VerifyRootsClosure: public OopsInGenClosure {
2151 private:
2152 G1CollectedHeap* _g1h;
2153 bool _failures;
2155 public:
2156 VerifyRootsClosure() :
2157 _g1h(G1CollectedHeap::heap()), _failures(false) { }
2159 bool failures() { return _failures; }
2161 void do_oop(narrowOop* p) {
2162 guarantee(false, "NYI");
2163 }
2165 void do_oop(oop* p) {
2166 oop obj = *p;
2167 if (obj != NULL) {
2168 if (_g1h->is_obj_dead(obj)) {
2169 gclog_or_tty->print_cr("Root location "PTR_FORMAT" "
2170 "points to dead obj "PTR_FORMAT, p, (void*) obj);
2171 obj->print_on(gclog_or_tty);
2172 _failures = true;
2173 }
2174 }
2175 }
2176 };
2178 // This is the task used for parallel heap verification.
2180 class G1ParVerifyTask: public AbstractGangTask {
2181 private:
2182 G1CollectedHeap* _g1h;
2183 bool _allow_dirty;
2185 public:
2186 G1ParVerifyTask(G1CollectedHeap* g1h, bool allow_dirty) :
2187 AbstractGangTask("Parallel verify task"),
2188 _g1h(g1h), _allow_dirty(allow_dirty) { }
2190 void work(int worker_i) {
2191 HandleMark hm;
2192 VerifyRegionClosure blk(_allow_dirty, true);
2193 _g1h->heap_region_par_iterate_chunked(&blk, worker_i,
2194 HeapRegion::ParVerifyClaimValue);
2195 }
2196 };
2198 void G1CollectedHeap::verify(bool allow_dirty, bool silent) {
2199 if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
2200 if (!silent) { gclog_or_tty->print("roots "); }
2201 VerifyRootsClosure rootsCl;
2202 process_strong_roots(false,
2203 SharedHeap::SO_AllClasses,
2204 &rootsCl,
2205 &rootsCl);
2206 rem_set()->invalidate(perm_gen()->used_region(), false);
2207 if (!silent) { gclog_or_tty->print("heapRegions "); }
2208 if (GCParallelVerificationEnabled && ParallelGCThreads > 1) {
2209 assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue),
2210 "sanity check");
2212 G1ParVerifyTask task(this, allow_dirty);
2213 int n_workers = workers()->total_workers();
2214 set_par_threads(n_workers);
2215 workers()->run_task(&task);
2216 set_par_threads(0);
2218 assert(check_heap_region_claim_values(HeapRegion::ParVerifyClaimValue),
2219 "sanity check");
2221 reset_heap_region_claim_values();
2223 assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue),
2224 "sanity check");
2225 } else {
2226 VerifyRegionClosure blk(allow_dirty);
2227 _hrs->iterate(&blk);
2228 }
2229 if (!silent) gclog_or_tty->print("remset ");
2230 rem_set()->verify();
2231 guarantee(!rootsCl.failures(), "should not have had failures");
2232 } else {
2233 if (!silent) gclog_or_tty->print("(SKIPPING roots, heapRegions, remset) ");
2234 }
2235 }
2237 class PrintRegionClosure: public HeapRegionClosure {
2238 outputStream* _st;
2239 public:
2240 PrintRegionClosure(outputStream* st) : _st(st) {}
2241 bool doHeapRegion(HeapRegion* r) {
2242 r->print_on(_st);
2243 return false;
2244 }
2245 };
2247 void G1CollectedHeap::print() const { print_on(gclog_or_tty); }
2249 void G1CollectedHeap::print_on(outputStream* st) const {
2250 PrintRegionClosure blk(st);
2251 _hrs->iterate(&blk);
2252 }
2254 void G1CollectedHeap::print_gc_threads_on(outputStream* st) const {
2255 if (ParallelGCThreads > 0) {
2256 workers()->print_worker_threads();
2257 }
2258 st->print("\"G1 concurrent mark GC Thread\" ");
2259 _cmThread->print();
2260 st->cr();
2261 st->print("\"G1 concurrent refinement GC Thread\" ");
2262 _cg1r->cg1rThread()->print_on(st);
2263 st->cr();
2264 st->print("\"G1 zero-fill GC Thread\" ");
2265 _czft->print_on(st);
2266 st->cr();
2267 }
2269 void G1CollectedHeap::gc_threads_do(ThreadClosure* tc) const {
2270 if (ParallelGCThreads > 0) {
2271 workers()->threads_do(tc);
2272 }
2273 tc->do_thread(_cmThread);
2274 tc->do_thread(_cg1r->cg1rThread());
2275 tc->do_thread(_czft);
2276 }
2278 void G1CollectedHeap::print_tracing_info() const {
2279 concurrent_g1_refine()->print_final_card_counts();
2281 // We'll overload this to mean "trace GC pause statistics."
2282 if (TraceGen0Time || TraceGen1Time) {
2283 // The "G1CollectorPolicy" is keeping track of these stats, so delegate
2284 // to that.
2285 g1_policy()->print_tracing_info();
2286 }
2287 if (SummarizeG1RSStats) {
2288 g1_rem_set()->print_summary_info();
2289 }
2290 if (SummarizeG1ConcMark) {
2291 concurrent_mark()->print_summary_info();
2292 }
2293 if (SummarizeG1ZFStats) {
2294 ConcurrentZFThread::print_summary_info();
2295 }
2296 if (G1SummarizePopularity) {
2297 print_popularity_summary_info();
2298 }
2299 g1_policy()->print_yg_surv_rate_info();
2301 GCOverheadReporter::printGCOverhead();
2303 SpecializationStats::print();
2304 }
2307 int G1CollectedHeap::addr_to_arena_id(void* addr) const {
2308 HeapRegion* hr = heap_region_containing(addr);
2309 if (hr == NULL) {
2310 return 0;
2311 } else {
2312 return 1;
2313 }
2314 }
2316 G1CollectedHeap* G1CollectedHeap::heap() {
2317 assert(_sh->kind() == CollectedHeap::G1CollectedHeap,
2318 "not a garbage-first heap");
2319 return _g1h;
2320 }
2322 void G1CollectedHeap::gc_prologue(bool full /* Ignored */) {
2323 if (PrintHeapAtGC){
2324 gclog_or_tty->print_cr(" {Heap before GC collections=%d:", total_collections());
2325 Universe::print();
2326 }
2327 assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
2328 // Call allocation profiler
2329 AllocationProfiler::iterate_since_last_gc();
2330 // Fill TLAB's and such
2331 ensure_parsability(true);
2332 }
2334 void G1CollectedHeap::gc_epilogue(bool full /* Ignored */) {
2335 // FIXME: what is this about?
2336 // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled"
2337 // is set.
2338 COMPILER2_PRESENT(assert(DerivedPointerTable::is_empty(),
2339 "derived pointer present"));
2341 if (PrintHeapAtGC){
2342 gclog_or_tty->print_cr(" Heap after GC collections=%d:", total_collections());
2343 Universe::print();
2344 gclog_or_tty->print("} ");
2345 }
2346 }
2348 void G1CollectedHeap::do_collection_pause() {
2349 // Read the GC count while holding the Heap_lock
2350 // we need to do this _before_ wait_for_cleanup_complete(), to
2351 // ensure that we do not give up the heap lock and potentially
2352 // pick up the wrong count
2353 int gc_count_before = SharedHeap::heap()->total_collections();
2355 // Don't want to do a GC pause while cleanup is being completed!
2356 wait_for_cleanup_complete();
2358 g1_policy()->record_stop_world_start();
2359 {
2360 MutexUnlocker mu(Heap_lock); // give up heap lock, execute gets it back
2361 VM_G1IncCollectionPause op(gc_count_before);
2362 VMThread::execute(&op);
2363 }
2364 }
2366 void
2367 G1CollectedHeap::doConcurrentMark() {
2368 if (G1ConcMark) {
2369 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2370 if (!_cmThread->in_progress()) {
2371 _cmThread->set_started();
2372 CGC_lock->notify();
2373 }
2374 }
2375 }
2377 class VerifyMarkedObjsClosure: public ObjectClosure {
2378 G1CollectedHeap* _g1h;
2379 public:
2380 VerifyMarkedObjsClosure(G1CollectedHeap* g1h) : _g1h(g1h) {}
2381 void do_object(oop obj) {
2382 assert(obj->mark()->is_marked() ? !_g1h->is_obj_dead(obj) : true,
2383 "markandsweep mark should agree with concurrent deadness");
2384 }
2385 };
2387 void
2388 G1CollectedHeap::checkConcurrentMark() {
2389 VerifyMarkedObjsClosure verifycl(this);
2390 // MutexLockerEx x(getMarkBitMapLock(),
2391 // Mutex::_no_safepoint_check_flag);
2392 object_iterate(&verifycl);
2393 }
2395 void G1CollectedHeap::do_sync_mark() {
2396 _cm->checkpointRootsInitial();
2397 _cm->markFromRoots();
2398 _cm->checkpointRootsFinal(false);
2399 }
2401 // <NEW PREDICTION>
2403 double G1CollectedHeap::predict_region_elapsed_time_ms(HeapRegion *hr,
2404 bool young) {
2405 return _g1_policy->predict_region_elapsed_time_ms(hr, young);
2406 }
2408 void G1CollectedHeap::check_if_region_is_too_expensive(double
2409 predicted_time_ms) {
2410 _g1_policy->check_if_region_is_too_expensive(predicted_time_ms);
2411 }
2413 size_t G1CollectedHeap::pending_card_num() {
2414 size_t extra_cards = 0;
2415 JavaThread *curr = Threads::first();
2416 while (curr != NULL) {
2417 DirtyCardQueue& dcq = curr->dirty_card_queue();
2418 extra_cards += dcq.size();
2419 curr = curr->next();
2420 }
2421 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
2422 size_t buffer_size = dcqs.buffer_size();
2423 size_t buffer_num = dcqs.completed_buffers_num();
2424 return buffer_size * buffer_num + extra_cards;
2425 }
2427 size_t G1CollectedHeap::max_pending_card_num() {
2428 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
2429 size_t buffer_size = dcqs.buffer_size();
2430 size_t buffer_num = dcqs.completed_buffers_num();
2431 int thread_num = Threads::number_of_threads();
2432 return (buffer_num + thread_num) * buffer_size;
2433 }
2435 size_t G1CollectedHeap::cards_scanned() {
2436 HRInto_G1RemSet* g1_rset = (HRInto_G1RemSet*) g1_rem_set();
2437 return g1_rset->cardsScanned();
2438 }
2440 void
2441 G1CollectedHeap::setup_surviving_young_words() {
2442 guarantee( _surviving_young_words == NULL, "pre-condition" );
2443 size_t array_length = g1_policy()->young_cset_length();
2444 _surviving_young_words = NEW_C_HEAP_ARRAY(size_t, array_length);
2445 if (_surviving_young_words == NULL) {
2446 vm_exit_out_of_memory(sizeof(size_t) * array_length,
2447 "Not enough space for young surv words summary.");
2448 }
2449 memset(_surviving_young_words, 0, array_length * sizeof(size_t));
2450 for (size_t i = 0; i < array_length; ++i) {
2451 guarantee( _surviving_young_words[i] == 0, "invariant" );
2452 }
2453 }
2455 void
2456 G1CollectedHeap::update_surviving_young_words(size_t* surv_young_words) {
2457 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
2458 size_t array_length = g1_policy()->young_cset_length();
2459 for (size_t i = 0; i < array_length; ++i)
2460 _surviving_young_words[i] += surv_young_words[i];
2461 }
2463 void
2464 G1CollectedHeap::cleanup_surviving_young_words() {
2465 guarantee( _surviving_young_words != NULL, "pre-condition" );
2466 FREE_C_HEAP_ARRAY(size_t, _surviving_young_words);
2467 _surviving_young_words = NULL;
2468 }
2470 // </NEW PREDICTION>
2472 void
2473 G1CollectedHeap::do_collection_pause_at_safepoint(HeapRegion* popular_region) {
2474 char verbose_str[128];
2475 sprintf(verbose_str, "GC pause ");
2476 if (popular_region != NULL)
2477 strcat(verbose_str, "(popular)");
2478 else if (g1_policy()->in_young_gc_mode()) {
2479 if (g1_policy()->full_young_gcs())
2480 strcat(verbose_str, "(young)");
2481 else
2482 strcat(verbose_str, "(partial)");
2483 }
2484 bool reset_should_initiate_conc_mark = false;
2485 if (popular_region != NULL && g1_policy()->should_initiate_conc_mark()) {
2486 // we currently do not allow an initial mark phase to be piggy-backed
2487 // on a popular pause
2488 reset_should_initiate_conc_mark = true;
2489 g1_policy()->unset_should_initiate_conc_mark();
2490 }
2491 if (g1_policy()->should_initiate_conc_mark())
2492 strcat(verbose_str, " (initial-mark)");
2494 GCCauseSetter x(this, (popular_region == NULL ?
2495 GCCause::_g1_inc_collection_pause :
2496 GCCause::_g1_pop_region_collection_pause));
2498 // if PrintGCDetails is on, we'll print long statistics information
2499 // in the collector policy code, so let's not print this as the output
2500 // is messy if we do.
2501 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
2502 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
2503 TraceTime t(verbose_str, PrintGC && !PrintGCDetails, true, gclog_or_tty);
2505 ResourceMark rm;
2506 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
2507 assert(Thread::current() == VMThread::vm_thread(), "should be in vm thread");
2508 guarantee(!is_gc_active(), "collection is not reentrant");
2509 assert(regions_accounted_for(), "Region leakage!");
2511 increment_gc_time_stamp();
2513 if (g1_policy()->in_young_gc_mode()) {
2514 assert(check_young_list_well_formed(),
2515 "young list should be well formed");
2516 }
2518 if (GC_locker::is_active()) {
2519 return; // GC is disabled (e.g. JNI GetXXXCritical operation)
2520 }
2522 bool abandoned = false;
2523 { // Call to jvmpi::post_class_unload_events must occur outside of active GC
2524 IsGCActiveMark x;
2526 gc_prologue(false);
2527 increment_total_collections();
2529 #if G1_REM_SET_LOGGING
2530 gclog_or_tty->print_cr("\nJust chose CS, heap:");
2531 print();
2532 #endif
2534 if (VerifyBeforeGC && total_collections() >= VerifyGCStartAt) {
2535 HandleMark hm; // Discard invalid handles created during verification
2536 prepare_for_verify();
2537 gclog_or_tty->print(" VerifyBeforeGC:");
2538 Universe::verify(false);
2539 }
2541 COMPILER2_PRESENT(DerivedPointerTable::clear());
2543 // We want to turn off ref discovery, if necessary, and turn it back on
2544 // on again later if we do.
2545 bool was_enabled = ref_processor()->discovery_enabled();
2546 if (was_enabled) ref_processor()->disable_discovery();
2548 // Forget the current alloc region (we might even choose it to be part
2549 // of the collection set!).
2550 abandon_cur_alloc_region();
2552 // The elapsed time induced by the start time below deliberately elides
2553 // the possible verification above.
2554 double start_time_sec = os::elapsedTime();
2555 GCOverheadReporter::recordSTWStart(start_time_sec);
2556 size_t start_used_bytes = used();
2557 if (!G1ConcMark) {
2558 do_sync_mark();
2559 }
2561 g1_policy()->record_collection_pause_start(start_time_sec,
2562 start_used_bytes);
2564 guarantee(_in_cset_fast_test == NULL, "invariant");
2565 guarantee(_in_cset_fast_test_base == NULL, "invariant");
2566 _in_cset_fast_test_length = max_regions();
2567 _in_cset_fast_test_base =
2568 NEW_C_HEAP_ARRAY(bool, _in_cset_fast_test_length);
2569 memset(_in_cset_fast_test_base, false,
2570 _in_cset_fast_test_length * sizeof(bool));
2571 // We're biasing _in_cset_fast_test to avoid subtracting the
2572 // beginning of the heap every time we want to index; basically
2573 // it's the same with what we do with the card table.
2574 _in_cset_fast_test = _in_cset_fast_test_base -
2575 ((size_t) _g1_reserved.start() >> HeapRegion::LogOfHRGrainBytes);
2577 #if SCAN_ONLY_VERBOSE
2578 _young_list->print();
2579 #endif // SCAN_ONLY_VERBOSE
2581 if (g1_policy()->should_initiate_conc_mark()) {
2582 concurrent_mark()->checkpointRootsInitialPre();
2583 }
2584 save_marks();
2586 // We must do this before any possible evacuation that should propagate
2587 // marks, including evacuation of popular objects in a popular pause.
2588 if (mark_in_progress()) {
2589 double start_time_sec = os::elapsedTime();
2591 _cm->drainAllSATBBuffers();
2592 double finish_mark_ms = (os::elapsedTime() - start_time_sec) * 1000.0;
2593 g1_policy()->record_satb_drain_time(finish_mark_ms);
2595 }
2596 // Record the number of elements currently on the mark stack, so we
2597 // only iterate over these. (Since evacuation may add to the mark
2598 // stack, doing more exposes race conditions.) If no mark is in
2599 // progress, this will be zero.
2600 _cm->set_oops_do_bound();
2602 assert(regions_accounted_for(), "Region leakage.");
2604 bool abandoned = false;
2606 if (mark_in_progress())
2607 concurrent_mark()->newCSet();
2609 // Now choose the CS.
2610 if (popular_region == NULL) {
2611 g1_policy()->choose_collection_set();
2612 } else {
2613 // We may be evacuating a single region (for popularity).
2614 g1_policy()->record_popular_pause_preamble_start();
2615 popularity_pause_preamble(popular_region);
2616 g1_policy()->record_popular_pause_preamble_end();
2617 abandoned = (g1_policy()->collection_set() == NULL);
2618 // Now we allow more regions to be added (we have to collect
2619 // all popular regions).
2620 if (!abandoned) {
2621 g1_policy()->choose_collection_set(popular_region);
2622 }
2623 }
2624 // We may abandon a pause if we find no region that will fit in the MMU
2625 // pause.
2626 abandoned = (g1_policy()->collection_set() == NULL);
2628 // Nothing to do if we were unable to choose a collection set.
2629 if (!abandoned) {
2630 #if G1_REM_SET_LOGGING
2631 gclog_or_tty->print_cr("\nAfter pause, heap:");
2632 print();
2633 #endif
2635 setup_surviving_young_words();
2637 // Set up the gc allocation regions.
2638 get_gc_alloc_regions();
2640 // Actually do the work...
2641 evacuate_collection_set();
2642 free_collection_set(g1_policy()->collection_set());
2643 g1_policy()->clear_collection_set();
2645 FREE_C_HEAP_ARRAY(bool, _in_cset_fast_test_base);
2646 // this is more for peace of mind; we're nulling them here and
2647 // we're expecting them to be null at the beginning of the next GC
2648 _in_cset_fast_test = NULL;
2649 _in_cset_fast_test_base = NULL;
2651 if (popular_region != NULL) {
2652 // We have to wait until now, because we don't want the region to
2653 // be rescheduled for pop-evac during RS update.
2654 popular_region->set_popular_pending(false);
2655 }
2657 release_gc_alloc_regions(false /* totally */);
2659 cleanup_surviving_young_words();
2661 if (g1_policy()->in_young_gc_mode()) {
2662 _young_list->reset_sampled_info();
2663 assert(check_young_list_empty(true),
2664 "young list should be empty");
2666 #if SCAN_ONLY_VERBOSE
2667 _young_list->print();
2668 #endif // SCAN_ONLY_VERBOSE
2670 g1_policy()->record_survivor_regions(_young_list->survivor_length(),
2671 _young_list->first_survivor_region(),
2672 _young_list->last_survivor_region());
2673 _young_list->reset_auxilary_lists();
2674 }
2675 } else {
2676 COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
2677 }
2679 if (evacuation_failed()) {
2680 _summary_bytes_used = recalculate_used();
2681 } else {
2682 // The "used" of the the collection set have already been subtracted
2683 // when they were freed. Add in the bytes evacuated.
2684 _summary_bytes_used += g1_policy()->bytes_in_to_space();
2685 }
2687 if (g1_policy()->in_young_gc_mode() &&
2688 g1_policy()->should_initiate_conc_mark()) {
2689 concurrent_mark()->checkpointRootsInitialPost();
2690 set_marking_started();
2691 doConcurrentMark();
2692 }
2694 #if SCAN_ONLY_VERBOSE
2695 _young_list->print();
2696 #endif // SCAN_ONLY_VERBOSE
2698 double end_time_sec = os::elapsedTime();
2699 double pause_time_ms = (end_time_sec - start_time_sec) * MILLIUNITS;
2700 g1_policy()->record_pause_time_ms(pause_time_ms);
2701 GCOverheadReporter::recordSTWEnd(end_time_sec);
2702 g1_policy()->record_collection_pause_end(popular_region != NULL,
2703 abandoned);
2705 assert(regions_accounted_for(), "Region leakage.");
2707 if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) {
2708 HandleMark hm; // Discard invalid handles created during verification
2709 gclog_or_tty->print(" VerifyAfterGC:");
2710 prepare_for_verify();
2711 Universe::verify(false);
2712 }
2714 if (was_enabled) ref_processor()->enable_discovery();
2716 {
2717 size_t expand_bytes = g1_policy()->expansion_amount();
2718 if (expand_bytes > 0) {
2719 size_t bytes_before = capacity();
2720 expand(expand_bytes);
2721 }
2722 }
2724 if (mark_in_progress()) {
2725 concurrent_mark()->update_g1_committed();
2726 }
2728 #ifdef TRACESPINNING
2729 ParallelTaskTerminator::print_termination_counts();
2730 #endif
2732 gc_epilogue(false);
2733 }
2735 assert(verify_region_lists(), "Bad region lists.");
2737 if (reset_should_initiate_conc_mark)
2738 g1_policy()->set_should_initiate_conc_mark();
2740 if (ExitAfterGCNum > 0 && total_collections() == ExitAfterGCNum) {
2741 gclog_or_tty->print_cr("Stopping after GC #%d", ExitAfterGCNum);
2742 print_tracing_info();
2743 vm_exit(-1);
2744 }
2745 }
2747 void G1CollectedHeap::set_gc_alloc_region(int purpose, HeapRegion* r) {
2748 assert(purpose >= 0 && purpose < GCAllocPurposeCount, "invalid purpose");
2749 // make sure we don't call set_gc_alloc_region() multiple times on
2750 // the same region
2751 assert(r == NULL || !r->is_gc_alloc_region(),
2752 "shouldn't already be a GC alloc region");
2753 HeapWord* original_top = NULL;
2754 if (r != NULL)
2755 original_top = r->top();
2757 // We will want to record the used space in r as being there before gc.
2758 // One we install it as a GC alloc region it's eligible for allocation.
2759 // So record it now and use it later.
2760 size_t r_used = 0;
2761 if (r != NULL) {
2762 r_used = r->used();
2764 if (ParallelGCThreads > 0) {
2765 // need to take the lock to guard against two threads calling
2766 // get_gc_alloc_region concurrently (very unlikely but...)
2767 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
2768 r->save_marks();
2769 }
2770 }
2771 HeapRegion* old_alloc_region = _gc_alloc_regions[purpose];
2772 _gc_alloc_regions[purpose] = r;
2773 if (old_alloc_region != NULL) {
2774 // Replace aliases too.
2775 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
2776 if (_gc_alloc_regions[ap] == old_alloc_region) {
2777 _gc_alloc_regions[ap] = r;
2778 }
2779 }
2780 }
2781 if (r != NULL) {
2782 push_gc_alloc_region(r);
2783 if (mark_in_progress() && original_top != r->next_top_at_mark_start()) {
2784 // We are using a region as a GC alloc region after it has been used
2785 // as a mutator allocation region during the current marking cycle.
2786 // The mutator-allocated objects are currently implicitly marked, but
2787 // when we move hr->next_top_at_mark_start() forward at the the end
2788 // of the GC pause, they won't be. We therefore mark all objects in
2789 // the "gap". We do this object-by-object, since marking densely
2790 // does not currently work right with marking bitmap iteration. This
2791 // means we rely on TLAB filling at the start of pauses, and no
2792 // "resuscitation" of filled TLAB's. If we want to do this, we need
2793 // to fix the marking bitmap iteration.
2794 HeapWord* curhw = r->next_top_at_mark_start();
2795 HeapWord* t = original_top;
2797 while (curhw < t) {
2798 oop cur = (oop)curhw;
2799 // We'll assume parallel for generality. This is rare code.
2800 concurrent_mark()->markAndGrayObjectIfNecessary(cur); // can't we just mark them?
2801 curhw = curhw + cur->size();
2802 }
2803 assert(curhw == t, "Should have parsed correctly.");
2804 }
2805 if (G1PolicyVerbose > 1) {
2806 gclog_or_tty->print("New alloc region ["PTR_FORMAT", "PTR_FORMAT", " PTR_FORMAT") "
2807 "for survivors:", r->bottom(), original_top, r->end());
2808 r->print();
2809 }
2810 g1_policy()->record_before_bytes(r_used);
2811 }
2812 }
2814 void G1CollectedHeap::push_gc_alloc_region(HeapRegion* hr) {
2815 assert(Thread::current()->is_VM_thread() ||
2816 par_alloc_during_gc_lock()->owned_by_self(), "Precondition");
2817 assert(!hr->is_gc_alloc_region() && !hr->in_collection_set(),
2818 "Precondition.");
2819 hr->set_is_gc_alloc_region(true);
2820 hr->set_next_gc_alloc_region(_gc_alloc_region_list);
2821 _gc_alloc_region_list = hr;
2822 }
2824 #ifdef G1_DEBUG
2825 class FindGCAllocRegion: public HeapRegionClosure {
2826 public:
2827 bool doHeapRegion(HeapRegion* r) {
2828 if (r->is_gc_alloc_region()) {
2829 gclog_or_tty->print_cr("Region %d ["PTR_FORMAT"...] is still a gc_alloc_region.",
2830 r->hrs_index(), r->bottom());
2831 }
2832 return false;
2833 }
2834 };
2835 #endif // G1_DEBUG
2837 void G1CollectedHeap::forget_alloc_region_list() {
2838 assert(Thread::current()->is_VM_thread(), "Precondition");
2839 while (_gc_alloc_region_list != NULL) {
2840 HeapRegion* r = _gc_alloc_region_list;
2841 assert(r->is_gc_alloc_region(), "Invariant.");
2842 // We need HeapRegion::oops_on_card_seq_iterate_careful() to work on
2843 // newly allocated data in order to be able to apply deferred updates
2844 // before the GC is done for verification purposes (i.e to allow
2845 // G1HRRSFlushLogBuffersOnVerify). It's safe thing to do after the
2846 // collection.
2847 r->ContiguousSpace::set_saved_mark();
2848 _gc_alloc_region_list = r->next_gc_alloc_region();
2849 r->set_next_gc_alloc_region(NULL);
2850 r->set_is_gc_alloc_region(false);
2851 if (r->is_survivor()) {
2852 if (r->is_empty()) {
2853 r->set_not_young();
2854 } else {
2855 _young_list->add_survivor_region(r);
2856 }
2857 }
2858 if (r->is_empty()) {
2859 ++_free_regions;
2860 }
2861 }
2862 #ifdef G1_DEBUG
2863 FindGCAllocRegion fa;
2864 heap_region_iterate(&fa);
2865 #endif // G1_DEBUG
2866 }
2869 bool G1CollectedHeap::check_gc_alloc_regions() {
2870 // TODO: allocation regions check
2871 return true;
2872 }
2874 void G1CollectedHeap::get_gc_alloc_regions() {
2875 // First, let's check that the GC alloc region list is empty (it should)
2876 assert(_gc_alloc_region_list == NULL, "invariant");
2878 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
2879 assert(_gc_alloc_regions[ap] == NULL, "invariant");
2881 // Create new GC alloc regions.
2882 HeapRegion* alloc_region = _retained_gc_alloc_regions[ap];
2883 _retained_gc_alloc_regions[ap] = NULL;
2885 if (alloc_region != NULL) {
2886 assert(_retain_gc_alloc_region[ap], "only way to retain a GC region");
2888 // let's make sure that the GC alloc region is not tagged as such
2889 // outside a GC operation
2890 assert(!alloc_region->is_gc_alloc_region(), "sanity");
2892 if (alloc_region->in_collection_set() ||
2893 alloc_region->top() == alloc_region->end() ||
2894 alloc_region->top() == alloc_region->bottom()) {
2895 // we will discard the current GC alloc region if it's in the
2896 // collection set (it can happen!), if it's already full (no
2897 // point in using it), or if it's empty (this means that it
2898 // was emptied during a cleanup and it should be on the free
2899 // list now).
2901 alloc_region = NULL;
2902 }
2903 }
2905 if (alloc_region == NULL) {
2906 // we will get a new GC alloc region
2907 alloc_region = newAllocRegionWithExpansion(ap, 0);
2908 }
2910 if (alloc_region != NULL) {
2911 assert(_gc_alloc_regions[ap] == NULL, "pre-condition");
2912 set_gc_alloc_region(ap, alloc_region);
2913 }
2915 assert(_gc_alloc_regions[ap] == NULL ||
2916 _gc_alloc_regions[ap]->is_gc_alloc_region(),
2917 "the GC alloc region should be tagged as such");
2918 assert(_gc_alloc_regions[ap] == NULL ||
2919 _gc_alloc_regions[ap] == _gc_alloc_region_list,
2920 "the GC alloc region should be the same as the GC alloc list head");
2921 }
2922 // Set alternative regions for allocation purposes that have reached
2923 // their limit.
2924 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
2925 GCAllocPurpose alt_purpose = g1_policy()->alternative_purpose(ap);
2926 if (_gc_alloc_regions[ap] == NULL && alt_purpose != ap) {
2927 _gc_alloc_regions[ap] = _gc_alloc_regions[alt_purpose];
2928 }
2929 }
2930 assert(check_gc_alloc_regions(), "alloc regions messed up");
2931 }
2933 void G1CollectedHeap::release_gc_alloc_regions(bool totally) {
2934 // We keep a separate list of all regions that have been alloc regions in
2935 // the current collection pause. Forget that now. This method will
2936 // untag the GC alloc regions and tear down the GC alloc region
2937 // list. It's desirable that no regions are tagged as GC alloc
2938 // outside GCs.
2939 forget_alloc_region_list();
2941 // The current alloc regions contain objs that have survived
2942 // collection. Make them no longer GC alloc regions.
2943 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
2944 HeapRegion* r = _gc_alloc_regions[ap];
2945 _retained_gc_alloc_regions[ap] = NULL;
2947 if (r != NULL) {
2948 // we retain nothing on _gc_alloc_regions between GCs
2949 set_gc_alloc_region(ap, NULL);
2950 _gc_alloc_region_counts[ap] = 0;
2952 if (r->is_empty()) {
2953 // we didn't actually allocate anything in it; let's just put
2954 // it on the free list
2955 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
2956 r->set_zero_fill_complete();
2957 put_free_region_on_list_locked(r);
2958 } else if (_retain_gc_alloc_region[ap] && !totally) {
2959 // retain it so that we can use it at the beginning of the next GC
2960 _retained_gc_alloc_regions[ap] = r;
2961 }
2962 }
2963 }
2964 }
2966 #ifndef PRODUCT
2967 // Useful for debugging
2969 void G1CollectedHeap::print_gc_alloc_regions() {
2970 gclog_or_tty->print_cr("GC alloc regions");
2971 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
2972 HeapRegion* r = _gc_alloc_regions[ap];
2973 if (r == NULL) {
2974 gclog_or_tty->print_cr(" %2d : "PTR_FORMAT, ap, NULL);
2975 } else {
2976 gclog_or_tty->print_cr(" %2d : "PTR_FORMAT" "SIZE_FORMAT,
2977 ap, r->bottom(), r->used());
2978 }
2979 }
2980 }
2981 #endif // PRODUCT
2983 void G1CollectedHeap::init_for_evac_failure(OopsInHeapRegionClosure* cl) {
2984 _drain_in_progress = false;
2985 set_evac_failure_closure(cl);
2986 _evac_failure_scan_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(40, true);
2987 }
2989 void G1CollectedHeap::finalize_for_evac_failure() {
2990 assert(_evac_failure_scan_stack != NULL &&
2991 _evac_failure_scan_stack->length() == 0,
2992 "Postcondition");
2993 assert(!_drain_in_progress, "Postcondition");
2994 // Don't have to delete, since the scan stack is a resource object.
2995 _evac_failure_scan_stack = NULL;
2996 }
3000 // *** Sequential G1 Evacuation
3002 HeapWord* G1CollectedHeap::allocate_during_gc(GCAllocPurpose purpose, size_t word_size) {
3003 HeapRegion* alloc_region = _gc_alloc_regions[purpose];
3004 // let the caller handle alloc failure
3005 if (alloc_region == NULL) return NULL;
3006 assert(isHumongous(word_size) || !alloc_region->isHumongous(),
3007 "Either the object is humongous or the region isn't");
3008 HeapWord* block = alloc_region->allocate(word_size);
3009 if (block == NULL) {
3010 block = allocate_during_gc_slow(purpose, alloc_region, false, word_size);
3011 }
3012 return block;
3013 }
3015 class G1IsAliveClosure: public BoolObjectClosure {
3016 G1CollectedHeap* _g1;
3017 public:
3018 G1IsAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
3019 void do_object(oop p) { assert(false, "Do not call."); }
3020 bool do_object_b(oop p) {
3021 // It is reachable if it is outside the collection set, or is inside
3022 // and forwarded.
3024 #ifdef G1_DEBUG
3025 gclog_or_tty->print_cr("is alive "PTR_FORMAT" in CS %d forwarded %d overall %d",
3026 (void*) p, _g1->obj_in_cs(p), p->is_forwarded(),
3027 !_g1->obj_in_cs(p) || p->is_forwarded());
3028 #endif // G1_DEBUG
3030 return !_g1->obj_in_cs(p) || p->is_forwarded();
3031 }
3032 };
3034 class G1KeepAliveClosure: public OopClosure {
3035 G1CollectedHeap* _g1;
3036 public:
3037 G1KeepAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
3038 void do_oop(narrowOop* p) {
3039 guarantee(false, "NYI");
3040 }
3041 void do_oop(oop* p) {
3042 oop obj = *p;
3043 #ifdef G1_DEBUG
3044 if (PrintGC && Verbose) {
3045 gclog_or_tty->print_cr("keep alive *"PTR_FORMAT" = "PTR_FORMAT" "PTR_FORMAT,
3046 p, (void*) obj, (void*) *p);
3047 }
3048 #endif // G1_DEBUG
3050 if (_g1->obj_in_cs(obj)) {
3051 assert( obj->is_forwarded(), "invariant" );
3052 *p = obj->forwardee();
3054 #ifdef G1_DEBUG
3055 gclog_or_tty->print_cr(" in CSet: moved "PTR_FORMAT" -> "PTR_FORMAT,
3056 (void*) obj, (void*) *p);
3057 #endif // G1_DEBUG
3058 }
3059 }
3060 };
3062 class UpdateRSetImmediate : public OopsInHeapRegionClosure {
3063 private:
3064 G1CollectedHeap* _g1;
3065 G1RemSet* _g1_rem_set;
3066 public:
3067 UpdateRSetImmediate(G1CollectedHeap* g1) :
3068 _g1(g1), _g1_rem_set(g1->g1_rem_set()) {}
3070 void do_oop(narrowOop* p) {
3071 guarantee(false, "NYI");
3072 }
3073 void do_oop(oop* p) {
3074 assert(_from->is_in_reserved(p), "paranoia");
3075 if (*p != NULL && !_from->is_survivor()) {
3076 _g1_rem_set->par_write_ref(_from, p, 0);
3077 }
3078 }
3079 };
3081 class UpdateRSetDeferred : public OopsInHeapRegionClosure {
3082 private:
3083 G1CollectedHeap* _g1;
3084 DirtyCardQueue *_dcq;
3085 CardTableModRefBS* _ct_bs;
3087 public:
3088 UpdateRSetDeferred(G1CollectedHeap* g1, DirtyCardQueue* dcq) :
3089 _g1(g1), _ct_bs((CardTableModRefBS*)_g1->barrier_set()), _dcq(dcq) {}
3091 void do_oop(narrowOop* p) {
3092 guarantee(false, "NYI");
3093 }
3094 void do_oop(oop* p) {
3095 assert(_from->is_in_reserved(p), "paranoia");
3096 if (!_from->is_in_reserved(*p) && !_from->is_survivor()) {
3097 size_t card_index = _ct_bs->index_for(p);
3098 if (_ct_bs->mark_card_deferred(card_index)) {
3099 _dcq->enqueue((jbyte*)_ct_bs->byte_for_index(card_index));
3100 }
3101 }
3102 }
3103 };
3107 class RemoveSelfPointerClosure: public ObjectClosure {
3108 private:
3109 G1CollectedHeap* _g1;
3110 ConcurrentMark* _cm;
3111 HeapRegion* _hr;
3112 size_t _prev_marked_bytes;
3113 size_t _next_marked_bytes;
3114 OopsInHeapRegionClosure *_cl;
3115 public:
3116 RemoveSelfPointerClosure(G1CollectedHeap* g1, OopsInHeapRegionClosure* cl) :
3117 _g1(g1), _cm(_g1->concurrent_mark()), _prev_marked_bytes(0),
3118 _next_marked_bytes(0), _cl(cl) {}
3120 size_t prev_marked_bytes() { return _prev_marked_bytes; }
3121 size_t next_marked_bytes() { return _next_marked_bytes; }
3123 // The original idea here was to coalesce evacuated and dead objects.
3124 // However that caused complications with the block offset table (BOT).
3125 // In particular if there were two TLABs, one of them partially refined.
3126 // |----- TLAB_1--------|----TLAB_2-~~~(partially refined part)~~~|
3127 // The BOT entries of the unrefined part of TLAB_2 point to the start
3128 // of TLAB_2. If the last object of the TLAB_1 and the first object
3129 // of TLAB_2 are coalesced, then the cards of the unrefined part
3130 // would point into middle of the filler object.
3131 //
3132 // The current approach is to not coalesce and leave the BOT contents intact.
3133 void do_object(oop obj) {
3134 if (obj->is_forwarded() && obj->forwardee() == obj) {
3135 // The object failed to move.
3136 assert(!_g1->is_obj_dead(obj), "We should not be preserving dead objs.");
3137 _cm->markPrev(obj);
3138 assert(_cm->isPrevMarked(obj), "Should be marked!");
3139 _prev_marked_bytes += (obj->size() * HeapWordSize);
3140 if (_g1->mark_in_progress() && !_g1->is_obj_ill(obj)) {
3141 _cm->markAndGrayObjectIfNecessary(obj);
3142 }
3143 obj->set_mark(markOopDesc::prototype());
3144 // While we were processing RSet buffers during the
3145 // collection, we actually didn't scan any cards on the
3146 // collection set, since we didn't want to update remebered
3147 // sets with entries that point into the collection set, given
3148 // that live objects fromthe collection set are about to move
3149 // and such entries will be stale very soon. This change also
3150 // dealt with a reliability issue which involved scanning a
3151 // card in the collection set and coming across an array that
3152 // was being chunked and looking malformed. The problem is
3153 // that, if evacuation fails, we might have remembered set
3154 // entries missing given that we skipped cards on the
3155 // collection set. So, we'll recreate such entries now.
3156 obj->oop_iterate(_cl);
3157 assert(_cm->isPrevMarked(obj), "Should be marked!");
3158 } else {
3159 // The object has been either evacuated or is dead. Fill it with a
3160 // dummy object.
3161 MemRegion mr((HeapWord*)obj, obj->size());
3162 CollectedHeap::fill_with_object(mr);
3163 _cm->clearRangeBothMaps(mr);
3164 }
3165 }
3166 };
3168 void G1CollectedHeap::remove_self_forwarding_pointers() {
3169 UpdateRSetImmediate immediate_update(_g1h);
3170 DirtyCardQueue dcq(&_g1h->dirty_card_queue_set());
3171 UpdateRSetDeferred deferred_update(_g1h, &dcq);
3172 OopsInHeapRegionClosure *cl;
3173 if (G1DeferredRSUpdate) {
3174 cl = &deferred_update;
3175 } else {
3176 cl = &immediate_update;
3177 }
3178 HeapRegion* cur = g1_policy()->collection_set();
3179 while (cur != NULL) {
3180 assert(g1_policy()->assertMarkedBytesDataOK(), "Should be!");
3182 RemoveSelfPointerClosure rspc(_g1h, cl);
3183 if (cur->evacuation_failed()) {
3184 assert(cur->in_collection_set(), "bad CS");
3185 cl->set_region(cur);
3186 cur->object_iterate(&rspc);
3188 // A number of manipulations to make the TAMS be the current top,
3189 // and the marked bytes be the ones observed in the iteration.
3190 if (_g1h->concurrent_mark()->at_least_one_mark_complete()) {
3191 // The comments below are the postconditions achieved by the
3192 // calls. Note especially the last such condition, which says that
3193 // the count of marked bytes has been properly restored.
3194 cur->note_start_of_marking(false);
3195 // _next_top_at_mark_start == top, _next_marked_bytes == 0
3196 cur->add_to_marked_bytes(rspc.prev_marked_bytes());
3197 // _next_marked_bytes == prev_marked_bytes.
3198 cur->note_end_of_marking();
3199 // _prev_top_at_mark_start == top(),
3200 // _prev_marked_bytes == prev_marked_bytes
3201 }
3202 // If there is no mark in progress, we modified the _next variables
3203 // above needlessly, but harmlessly.
3204 if (_g1h->mark_in_progress()) {
3205 cur->note_start_of_marking(false);
3206 // _next_top_at_mark_start == top, _next_marked_bytes == 0
3207 // _next_marked_bytes == next_marked_bytes.
3208 }
3210 // Now make sure the region has the right index in the sorted array.
3211 g1_policy()->note_change_in_marked_bytes(cur);
3212 }
3213 cur = cur->next_in_collection_set();
3214 }
3215 assert(g1_policy()->assertMarkedBytesDataOK(), "Should be!");
3217 // Now restore saved marks, if any.
3218 if (_objs_with_preserved_marks != NULL) {
3219 assert(_preserved_marks_of_objs != NULL, "Both or none.");
3220 assert(_objs_with_preserved_marks->length() ==
3221 _preserved_marks_of_objs->length(), "Both or none.");
3222 guarantee(_objs_with_preserved_marks->length() ==
3223 _preserved_marks_of_objs->length(), "Both or none.");
3224 for (int i = 0; i < _objs_with_preserved_marks->length(); i++) {
3225 oop obj = _objs_with_preserved_marks->at(i);
3226 markOop m = _preserved_marks_of_objs->at(i);
3227 obj->set_mark(m);
3228 }
3229 // Delete the preserved marks growable arrays (allocated on the C heap).
3230 delete _objs_with_preserved_marks;
3231 delete _preserved_marks_of_objs;
3232 _objs_with_preserved_marks = NULL;
3233 _preserved_marks_of_objs = NULL;
3234 }
3235 }
3237 void G1CollectedHeap::push_on_evac_failure_scan_stack(oop obj) {
3238 _evac_failure_scan_stack->push(obj);
3239 }
3241 void G1CollectedHeap::drain_evac_failure_scan_stack() {
3242 assert(_evac_failure_scan_stack != NULL, "precondition");
3244 while (_evac_failure_scan_stack->length() > 0) {
3245 oop obj = _evac_failure_scan_stack->pop();
3246 _evac_failure_closure->set_region(heap_region_containing(obj));
3247 obj->oop_iterate_backwards(_evac_failure_closure);
3248 }
3249 }
3251 void G1CollectedHeap::handle_evacuation_failure(oop old) {
3252 markOop m = old->mark();
3253 // forward to self
3254 assert(!old->is_forwarded(), "precondition");
3256 old->forward_to(old);
3257 handle_evacuation_failure_common(old, m);
3258 }
3260 oop
3261 G1CollectedHeap::handle_evacuation_failure_par(OopsInHeapRegionClosure* cl,
3262 oop old) {
3263 markOop m = old->mark();
3264 oop forward_ptr = old->forward_to_atomic(old);
3265 if (forward_ptr == NULL) {
3266 // Forward-to-self succeeded.
3267 if (_evac_failure_closure != cl) {
3268 MutexLockerEx x(EvacFailureStack_lock, Mutex::_no_safepoint_check_flag);
3269 assert(!_drain_in_progress,
3270 "Should only be true while someone holds the lock.");
3271 // Set the global evac-failure closure to the current thread's.
3272 assert(_evac_failure_closure == NULL, "Or locking has failed.");
3273 set_evac_failure_closure(cl);
3274 // Now do the common part.
3275 handle_evacuation_failure_common(old, m);
3276 // Reset to NULL.
3277 set_evac_failure_closure(NULL);
3278 } else {
3279 // The lock is already held, and this is recursive.
3280 assert(_drain_in_progress, "This should only be the recursive case.");
3281 handle_evacuation_failure_common(old, m);
3282 }
3283 return old;
3284 } else {
3285 // Someone else had a place to copy it.
3286 return forward_ptr;
3287 }
3288 }
3290 void G1CollectedHeap::handle_evacuation_failure_common(oop old, markOop m) {
3291 set_evacuation_failed(true);
3293 preserve_mark_if_necessary(old, m);
3295 HeapRegion* r = heap_region_containing(old);
3296 if (!r->evacuation_failed()) {
3297 r->set_evacuation_failed(true);
3298 if (G1TraceRegions) {
3299 gclog_or_tty->print("evacuation failed in heap region "PTR_FORMAT" "
3300 "["PTR_FORMAT","PTR_FORMAT")\n",
3301 r, r->bottom(), r->end());
3302 }
3303 }
3305 push_on_evac_failure_scan_stack(old);
3307 if (!_drain_in_progress) {
3308 // prevent recursion in copy_to_survivor_space()
3309 _drain_in_progress = true;
3310 drain_evac_failure_scan_stack();
3311 _drain_in_progress = false;
3312 }
3313 }
3315 void G1CollectedHeap::preserve_mark_if_necessary(oop obj, markOop m) {
3316 if (m != markOopDesc::prototype()) {
3317 if (_objs_with_preserved_marks == NULL) {
3318 assert(_preserved_marks_of_objs == NULL, "Both or none.");
3319 _objs_with_preserved_marks =
3320 new (ResourceObj::C_HEAP) GrowableArray<oop>(40, true);
3321 _preserved_marks_of_objs =
3322 new (ResourceObj::C_HEAP) GrowableArray<markOop>(40, true);
3323 }
3324 _objs_with_preserved_marks->push(obj);
3325 _preserved_marks_of_objs->push(m);
3326 }
3327 }
3329 // *** Parallel G1 Evacuation
3331 HeapWord* G1CollectedHeap::par_allocate_during_gc(GCAllocPurpose purpose,
3332 size_t word_size) {
3333 HeapRegion* alloc_region = _gc_alloc_regions[purpose];
3334 // let the caller handle alloc failure
3335 if (alloc_region == NULL) return NULL;
3337 HeapWord* block = alloc_region->par_allocate(word_size);
3338 if (block == NULL) {
3339 MutexLockerEx x(par_alloc_during_gc_lock(),
3340 Mutex::_no_safepoint_check_flag);
3341 block = allocate_during_gc_slow(purpose, alloc_region, true, word_size);
3342 }
3343 return block;
3344 }
3346 void G1CollectedHeap::retire_alloc_region(HeapRegion* alloc_region,
3347 bool par) {
3348 // Another thread might have obtained alloc_region for the given
3349 // purpose, and might be attempting to allocate in it, and might
3350 // succeed. Therefore, we can't do the "finalization" stuff on the
3351 // region below until we're sure the last allocation has happened.
3352 // We ensure this by allocating the remaining space with a garbage
3353 // object.
3354 if (par) par_allocate_remaining_space(alloc_region);
3355 // Now we can do the post-GC stuff on the region.
3356 alloc_region->note_end_of_copying();
3357 g1_policy()->record_after_bytes(alloc_region->used());
3358 }
3360 HeapWord*
3361 G1CollectedHeap::allocate_during_gc_slow(GCAllocPurpose purpose,
3362 HeapRegion* alloc_region,
3363 bool par,
3364 size_t word_size) {
3365 HeapWord* block = NULL;
3366 // In the parallel case, a previous thread to obtain the lock may have
3367 // already assigned a new gc_alloc_region.
3368 if (alloc_region != _gc_alloc_regions[purpose]) {
3369 assert(par, "But should only happen in parallel case.");
3370 alloc_region = _gc_alloc_regions[purpose];
3371 if (alloc_region == NULL) return NULL;
3372 block = alloc_region->par_allocate(word_size);
3373 if (block != NULL) return block;
3374 // Otherwise, continue; this new region is empty, too.
3375 }
3376 assert(alloc_region != NULL, "We better have an allocation region");
3377 retire_alloc_region(alloc_region, par);
3379 if (_gc_alloc_region_counts[purpose] >= g1_policy()->max_regions(purpose)) {
3380 // Cannot allocate more regions for the given purpose.
3381 GCAllocPurpose alt_purpose = g1_policy()->alternative_purpose(purpose);
3382 // Is there an alternative?
3383 if (purpose != alt_purpose) {
3384 HeapRegion* alt_region = _gc_alloc_regions[alt_purpose];
3385 // Has not the alternative region been aliased?
3386 if (alloc_region != alt_region && alt_region != NULL) {
3387 // Try to allocate in the alternative region.
3388 if (par) {
3389 block = alt_region->par_allocate(word_size);
3390 } else {
3391 block = alt_region->allocate(word_size);
3392 }
3393 // Make an alias.
3394 _gc_alloc_regions[purpose] = _gc_alloc_regions[alt_purpose];
3395 if (block != NULL) {
3396 return block;
3397 }
3398 retire_alloc_region(alt_region, par);
3399 }
3400 // Both the allocation region and the alternative one are full
3401 // and aliased, replace them with a new allocation region.
3402 purpose = alt_purpose;
3403 } else {
3404 set_gc_alloc_region(purpose, NULL);
3405 return NULL;
3406 }
3407 }
3409 // Now allocate a new region for allocation.
3410 alloc_region = newAllocRegionWithExpansion(purpose, word_size, false /*zero_filled*/);
3412 // let the caller handle alloc failure
3413 if (alloc_region != NULL) {
3415 assert(check_gc_alloc_regions(), "alloc regions messed up");
3416 assert(alloc_region->saved_mark_at_top(),
3417 "Mark should have been saved already.");
3418 // We used to assert that the region was zero-filled here, but no
3419 // longer.
3421 // This must be done last: once it's installed, other regions may
3422 // allocate in it (without holding the lock.)
3423 set_gc_alloc_region(purpose, alloc_region);
3425 if (par) {
3426 block = alloc_region->par_allocate(word_size);
3427 } else {
3428 block = alloc_region->allocate(word_size);
3429 }
3430 // Caller handles alloc failure.
3431 } else {
3432 // This sets other apis using the same old alloc region to NULL, also.
3433 set_gc_alloc_region(purpose, NULL);
3434 }
3435 return block; // May be NULL.
3436 }
3438 void G1CollectedHeap::par_allocate_remaining_space(HeapRegion* r) {
3439 HeapWord* block = NULL;
3440 size_t free_words;
3441 do {
3442 free_words = r->free()/HeapWordSize;
3443 // If there's too little space, no one can allocate, so we're done.
3444 if (free_words < (size_t)oopDesc::header_size()) return;
3445 // Otherwise, try to claim it.
3446 block = r->par_allocate(free_words);
3447 } while (block == NULL);
3448 fill_with_object(block, free_words);
3449 }
3451 #define use_local_bitmaps 1
3452 #define verify_local_bitmaps 0
3454 #ifndef PRODUCT
3456 class GCLabBitMap;
3457 class GCLabBitMapClosure: public BitMapClosure {
3458 private:
3459 ConcurrentMark* _cm;
3460 GCLabBitMap* _bitmap;
3462 public:
3463 GCLabBitMapClosure(ConcurrentMark* cm,
3464 GCLabBitMap* bitmap) {
3465 _cm = cm;
3466 _bitmap = bitmap;
3467 }
3469 virtual bool do_bit(size_t offset);
3470 };
3472 #endif // PRODUCT
3474 #define oop_buffer_length 256
3476 class GCLabBitMap: public BitMap {
3477 private:
3478 ConcurrentMark* _cm;
3480 int _shifter;
3481 size_t _bitmap_word_covers_words;
3483 // beginning of the heap
3484 HeapWord* _heap_start;
3486 // this is the actual start of the GCLab
3487 HeapWord* _real_start_word;
3489 // this is the actual end of the GCLab
3490 HeapWord* _real_end_word;
3492 // this is the first word, possibly located before the actual start
3493 // of the GCLab, that corresponds to the first bit of the bitmap
3494 HeapWord* _start_word;
3496 // size of a GCLab in words
3497 size_t _gclab_word_size;
3499 static int shifter() {
3500 return MinObjAlignment - 1;
3501 }
3503 // how many heap words does a single bitmap word corresponds to?
3504 static size_t bitmap_word_covers_words() {
3505 return BitsPerWord << shifter();
3506 }
3508 static size_t gclab_word_size() {
3509 return ParallelGCG1AllocBufferSize / HeapWordSize;
3510 }
3512 static size_t bitmap_size_in_bits() {
3513 size_t bits_in_bitmap = gclab_word_size() >> shifter();
3514 // We are going to ensure that the beginning of a word in this
3515 // bitmap also corresponds to the beginning of a word in the
3516 // global marking bitmap. To handle the case where a GCLab
3517 // starts from the middle of the bitmap, we need to add enough
3518 // space (i.e. up to a bitmap word) to ensure that we have
3519 // enough bits in the bitmap.
3520 return bits_in_bitmap + BitsPerWord - 1;
3521 }
3522 public:
3523 GCLabBitMap(HeapWord* heap_start)
3524 : BitMap(bitmap_size_in_bits()),
3525 _cm(G1CollectedHeap::heap()->concurrent_mark()),
3526 _shifter(shifter()),
3527 _bitmap_word_covers_words(bitmap_word_covers_words()),
3528 _heap_start(heap_start),
3529 _gclab_word_size(gclab_word_size()),
3530 _real_start_word(NULL),
3531 _real_end_word(NULL),
3532 _start_word(NULL)
3533 {
3534 guarantee( size_in_words() >= bitmap_size_in_words(),
3535 "just making sure");
3536 }
3538 inline unsigned heapWordToOffset(HeapWord* addr) {
3539 unsigned offset = (unsigned) pointer_delta(addr, _start_word) >> _shifter;
3540 assert(offset < size(), "offset should be within bounds");
3541 return offset;
3542 }
3544 inline HeapWord* offsetToHeapWord(size_t offset) {
3545 HeapWord* addr = _start_word + (offset << _shifter);
3546 assert(_real_start_word <= addr && addr < _real_end_word, "invariant");
3547 return addr;
3548 }
3550 bool fields_well_formed() {
3551 bool ret1 = (_real_start_word == NULL) &&
3552 (_real_end_word == NULL) &&
3553 (_start_word == NULL);
3554 if (ret1)
3555 return true;
3557 bool ret2 = _real_start_word >= _start_word &&
3558 _start_word < _real_end_word &&
3559 (_real_start_word + _gclab_word_size) == _real_end_word &&
3560 (_start_word + _gclab_word_size + _bitmap_word_covers_words)
3561 > _real_end_word;
3562 return ret2;
3563 }
3565 inline bool mark(HeapWord* addr) {
3566 guarantee(use_local_bitmaps, "invariant");
3567 assert(fields_well_formed(), "invariant");
3569 if (addr >= _real_start_word && addr < _real_end_word) {
3570 assert(!isMarked(addr), "should not have already been marked");
3572 // first mark it on the bitmap
3573 at_put(heapWordToOffset(addr), true);
3575 return true;
3576 } else {
3577 return false;
3578 }
3579 }
3581 inline bool isMarked(HeapWord* addr) {
3582 guarantee(use_local_bitmaps, "invariant");
3583 assert(fields_well_formed(), "invariant");
3585 return at(heapWordToOffset(addr));
3586 }
3588 void set_buffer(HeapWord* start) {
3589 guarantee(use_local_bitmaps, "invariant");
3590 clear();
3592 assert(start != NULL, "invariant");
3593 _real_start_word = start;
3594 _real_end_word = start + _gclab_word_size;
3596 size_t diff =
3597 pointer_delta(start, _heap_start) % _bitmap_word_covers_words;
3598 _start_word = start - diff;
3600 assert(fields_well_formed(), "invariant");
3601 }
3603 #ifndef PRODUCT
3604 void verify() {
3605 // verify that the marks have been propagated
3606 GCLabBitMapClosure cl(_cm, this);
3607 iterate(&cl);
3608 }
3609 #endif // PRODUCT
3611 void retire() {
3612 guarantee(use_local_bitmaps, "invariant");
3613 assert(fields_well_formed(), "invariant");
3615 if (_start_word != NULL) {
3616 CMBitMap* mark_bitmap = _cm->nextMarkBitMap();
3618 // this means that the bitmap was set up for the GCLab
3619 assert(_real_start_word != NULL && _real_end_word != NULL, "invariant");
3621 mark_bitmap->mostly_disjoint_range_union(this,
3622 0, // always start from the start of the bitmap
3623 _start_word,
3624 size_in_words());
3625 _cm->grayRegionIfNecessary(MemRegion(_real_start_word, _real_end_word));
3627 #ifndef PRODUCT
3628 if (use_local_bitmaps && verify_local_bitmaps)
3629 verify();
3630 #endif // PRODUCT
3631 } else {
3632 assert(_real_start_word == NULL && _real_end_word == NULL, "invariant");
3633 }
3634 }
3636 static size_t bitmap_size_in_words() {
3637 return (bitmap_size_in_bits() + BitsPerWord - 1) / BitsPerWord;
3638 }
3639 };
3641 #ifndef PRODUCT
3643 bool GCLabBitMapClosure::do_bit(size_t offset) {
3644 HeapWord* addr = _bitmap->offsetToHeapWord(offset);
3645 guarantee(_cm->isMarked(oop(addr)), "it should be!");
3646 return true;
3647 }
3649 #endif // PRODUCT
3651 class G1ParGCAllocBuffer: public ParGCAllocBuffer {
3652 private:
3653 bool _retired;
3654 bool _during_marking;
3655 GCLabBitMap _bitmap;
3657 public:
3658 G1ParGCAllocBuffer() :
3659 ParGCAllocBuffer(ParallelGCG1AllocBufferSize / HeapWordSize),
3660 _during_marking(G1CollectedHeap::heap()->mark_in_progress()),
3661 _bitmap(G1CollectedHeap::heap()->reserved_region().start()),
3662 _retired(false)
3663 { }
3665 inline bool mark(HeapWord* addr) {
3666 guarantee(use_local_bitmaps, "invariant");
3667 assert(_during_marking, "invariant");
3668 return _bitmap.mark(addr);
3669 }
3671 inline void set_buf(HeapWord* buf) {
3672 if (use_local_bitmaps && _during_marking)
3673 _bitmap.set_buffer(buf);
3674 ParGCAllocBuffer::set_buf(buf);
3675 _retired = false;
3676 }
3678 inline void retire(bool end_of_gc, bool retain) {
3679 if (_retired)
3680 return;
3681 if (use_local_bitmaps && _during_marking) {
3682 _bitmap.retire();
3683 }
3684 ParGCAllocBuffer::retire(end_of_gc, retain);
3685 _retired = true;
3686 }
3687 };
3690 class G1ParScanThreadState : public StackObj {
3691 protected:
3692 G1CollectedHeap* _g1h;
3693 RefToScanQueue* _refs;
3694 DirtyCardQueue _dcq;
3695 CardTableModRefBS* _ct_bs;
3696 G1RemSet* _g1_rem;
3698 typedef GrowableArray<oop*> OverflowQueue;
3699 OverflowQueue* _overflowed_refs;
3701 G1ParGCAllocBuffer _alloc_buffers[GCAllocPurposeCount];
3702 ageTable _age_table;
3704 size_t _alloc_buffer_waste;
3705 size_t _undo_waste;
3707 OopsInHeapRegionClosure* _evac_failure_cl;
3708 G1ParScanHeapEvacClosure* _evac_cl;
3709 G1ParScanPartialArrayClosure* _partial_scan_cl;
3711 int _hash_seed;
3712 int _queue_num;
3714 int _term_attempts;
3715 #if G1_DETAILED_STATS
3716 int _pushes, _pops, _steals, _steal_attempts;
3717 int _overflow_pushes;
3718 #endif
3720 double _start;
3721 double _start_strong_roots;
3722 double _strong_roots_time;
3723 double _start_term;
3724 double _term_time;
3726 // Map from young-age-index (0 == not young, 1 is youngest) to
3727 // surviving words. base is what we get back from the malloc call
3728 size_t* _surviving_young_words_base;
3729 // this points into the array, as we use the first few entries for padding
3730 size_t* _surviving_young_words;
3732 #define PADDING_ELEM_NUM (64 / sizeof(size_t))
3734 void add_to_alloc_buffer_waste(size_t waste) { _alloc_buffer_waste += waste; }
3736 void add_to_undo_waste(size_t waste) { _undo_waste += waste; }
3738 DirtyCardQueue& dirty_card_queue() { return _dcq; }
3739 CardTableModRefBS* ctbs() { return _ct_bs; }
3741 void immediate_rs_update(HeapRegion* from, oop* p, int tid) {
3742 if (!from->is_survivor()) {
3743 _g1_rem->par_write_ref(from, p, tid);
3744 }
3745 }
3747 void deferred_rs_update(HeapRegion* from, oop* p, int tid) {
3748 // If the new value of the field points to the same region or
3749 // is the to-space, we don't need to include it in the Rset updates.
3750 if (!from->is_in_reserved(*p) && !from->is_survivor()) {
3751 size_t card_index = ctbs()->index_for(p);
3752 // If the card hasn't been added to the buffer, do it.
3753 if (ctbs()->mark_card_deferred(card_index)) {
3754 dirty_card_queue().enqueue((jbyte*)ctbs()->byte_for_index(card_index));
3755 }
3756 }
3757 }
3759 public:
3760 G1ParScanThreadState(G1CollectedHeap* g1h, int queue_num)
3761 : _g1h(g1h),
3762 _refs(g1h->task_queue(queue_num)),
3763 _dcq(&g1h->dirty_card_queue_set()),
3764 _ct_bs((CardTableModRefBS*)_g1h->barrier_set()),
3765 _g1_rem(g1h->g1_rem_set()),
3766 _hash_seed(17), _queue_num(queue_num),
3767 _term_attempts(0),
3768 _age_table(false),
3769 #if G1_DETAILED_STATS
3770 _pushes(0), _pops(0), _steals(0),
3771 _steal_attempts(0), _overflow_pushes(0),
3772 #endif
3773 _strong_roots_time(0), _term_time(0),
3774 _alloc_buffer_waste(0), _undo_waste(0)
3775 {
3776 // we allocate G1YoungSurvRateNumRegions plus one entries, since
3777 // we "sacrifice" entry 0 to keep track of surviving bytes for
3778 // non-young regions (where the age is -1)
3779 // We also add a few elements at the beginning and at the end in
3780 // an attempt to eliminate cache contention
3781 size_t real_length = 1 + _g1h->g1_policy()->young_cset_length();
3782 size_t array_length = PADDING_ELEM_NUM +
3783 real_length +
3784 PADDING_ELEM_NUM;
3785 _surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length);
3786 if (_surviving_young_words_base == NULL)
3787 vm_exit_out_of_memory(array_length * sizeof(size_t),
3788 "Not enough space for young surv histo.");
3789 _surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM;
3790 memset(_surviving_young_words, 0, real_length * sizeof(size_t));
3792 _overflowed_refs = new OverflowQueue(10);
3794 _start = os::elapsedTime();
3795 }
3797 ~G1ParScanThreadState() {
3798 FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base);
3799 }
3801 RefToScanQueue* refs() { return _refs; }
3802 OverflowQueue* overflowed_refs() { return _overflowed_refs; }
3803 ageTable* age_table() { return &_age_table; }
3805 G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose) {
3806 return &_alloc_buffers[purpose];
3807 }
3809 size_t alloc_buffer_waste() { return _alloc_buffer_waste; }
3810 size_t undo_waste() { return _undo_waste; }
3812 void push_on_queue(oop* ref) {
3813 assert(ref != NULL, "invariant");
3814 assert(has_partial_array_mask(ref) || _g1h->obj_in_cs(*ref), "invariant");
3816 if (!refs()->push(ref)) {
3817 overflowed_refs()->push(ref);
3818 IF_G1_DETAILED_STATS(note_overflow_push());
3819 } else {
3820 IF_G1_DETAILED_STATS(note_push());
3821 }
3822 }
3824 void pop_from_queue(oop*& ref) {
3825 if (!refs()->pop_local(ref)) {
3826 ref = NULL;
3827 } else {
3828 assert(ref != NULL, "invariant");
3829 assert(has_partial_array_mask(ref) || _g1h->obj_in_cs(*ref),
3830 "invariant");
3832 IF_G1_DETAILED_STATS(note_pop());
3833 }
3834 }
3836 void pop_from_overflow_queue(oop*& ref) {
3837 ref = overflowed_refs()->pop();
3838 }
3840 int refs_to_scan() { return refs()->size(); }
3841 int overflowed_refs_to_scan() { return overflowed_refs()->length(); }
3843 void update_rs(HeapRegion* from, oop* p, int tid) {
3844 if (G1DeferredRSUpdate) {
3845 deferred_rs_update(from, p, tid);
3846 } else {
3847 immediate_rs_update(from, p, tid);
3848 }
3849 }
3851 HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz) {
3853 HeapWord* obj = NULL;
3854 if (word_sz * 100 <
3855 (size_t)(ParallelGCG1AllocBufferSize / HeapWordSize) *
3856 ParallelGCBufferWastePct) {
3857 G1ParGCAllocBuffer* alloc_buf = alloc_buffer(purpose);
3858 add_to_alloc_buffer_waste(alloc_buf->words_remaining());
3859 alloc_buf->retire(false, false);
3861 HeapWord* buf =
3862 _g1h->par_allocate_during_gc(purpose, ParallelGCG1AllocBufferSize / HeapWordSize);
3863 if (buf == NULL) return NULL; // Let caller handle allocation failure.
3864 // Otherwise.
3865 alloc_buf->set_buf(buf);
3867 obj = alloc_buf->allocate(word_sz);
3868 assert(obj != NULL, "buffer was definitely big enough...");
3869 } else {
3870 obj = _g1h->par_allocate_during_gc(purpose, word_sz);
3871 }
3872 return obj;
3873 }
3875 HeapWord* allocate(GCAllocPurpose purpose, size_t word_sz) {
3876 HeapWord* obj = alloc_buffer(purpose)->allocate(word_sz);
3877 if (obj != NULL) return obj;
3878 return allocate_slow(purpose, word_sz);
3879 }
3881 void undo_allocation(GCAllocPurpose purpose, HeapWord* obj, size_t word_sz) {
3882 if (alloc_buffer(purpose)->contains(obj)) {
3883 guarantee(alloc_buffer(purpose)->contains(obj + word_sz - 1),
3884 "should contain whole object");
3885 alloc_buffer(purpose)->undo_allocation(obj, word_sz);
3886 } else {
3887 CollectedHeap::fill_with_object(obj, word_sz);
3888 add_to_undo_waste(word_sz);
3889 }
3890 }
3892 void set_evac_failure_closure(OopsInHeapRegionClosure* evac_failure_cl) {
3893 _evac_failure_cl = evac_failure_cl;
3894 }
3895 OopsInHeapRegionClosure* evac_failure_closure() {
3896 return _evac_failure_cl;
3897 }
3899 void set_evac_closure(G1ParScanHeapEvacClosure* evac_cl) {
3900 _evac_cl = evac_cl;
3901 }
3903 void set_partial_scan_closure(G1ParScanPartialArrayClosure* partial_scan_cl) {
3904 _partial_scan_cl = partial_scan_cl;
3905 }
3907 int* hash_seed() { return &_hash_seed; }
3908 int queue_num() { return _queue_num; }
3910 int term_attempts() { return _term_attempts; }
3911 void note_term_attempt() { _term_attempts++; }
3913 #if G1_DETAILED_STATS
3914 int pushes() { return _pushes; }
3915 int pops() { return _pops; }
3916 int steals() { return _steals; }
3917 int steal_attempts() { return _steal_attempts; }
3918 int overflow_pushes() { return _overflow_pushes; }
3920 void note_push() { _pushes++; }
3921 void note_pop() { _pops++; }
3922 void note_steal() { _steals++; }
3923 void note_steal_attempt() { _steal_attempts++; }
3924 void note_overflow_push() { _overflow_pushes++; }
3925 #endif
3927 void start_strong_roots() {
3928 _start_strong_roots = os::elapsedTime();
3929 }
3930 void end_strong_roots() {
3931 _strong_roots_time += (os::elapsedTime() - _start_strong_roots);
3932 }
3933 double strong_roots_time() { return _strong_roots_time; }
3935 void start_term_time() {
3936 note_term_attempt();
3937 _start_term = os::elapsedTime();
3938 }
3939 void end_term_time() {
3940 _term_time += (os::elapsedTime() - _start_term);
3941 }
3942 double term_time() { return _term_time; }
3944 double elapsed() {
3945 return os::elapsedTime() - _start;
3946 }
3948 size_t* surviving_young_words() {
3949 // We add on to hide entry 0 which accumulates surviving words for
3950 // age -1 regions (i.e. non-young ones)
3951 return _surviving_young_words;
3952 }
3954 void retire_alloc_buffers() {
3955 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
3956 size_t waste = _alloc_buffers[ap].words_remaining();
3957 add_to_alloc_buffer_waste(waste);
3958 _alloc_buffers[ap].retire(true, false);
3959 }
3960 }
3962 private:
3963 void deal_with_reference(oop* ref_to_scan) {
3964 if (has_partial_array_mask(ref_to_scan)) {
3965 _partial_scan_cl->do_oop_nv(ref_to_scan);
3966 } else {
3967 // Note: we can use "raw" versions of "region_containing" because
3968 // "obj_to_scan" is definitely in the heap, and is not in a
3969 // humongous region.
3970 HeapRegion* r = _g1h->heap_region_containing_raw(ref_to_scan);
3971 _evac_cl->set_region(r);
3972 _evac_cl->do_oop_nv(ref_to_scan);
3973 }
3974 }
3976 public:
3977 void trim_queue() {
3978 // I've replicated the loop twice, first to drain the overflow
3979 // queue, second to drain the task queue. This is better than
3980 // having a single loop, which checks both conditions and, inside
3981 // it, either pops the overflow queue or the task queue, as each
3982 // loop is tighter. Also, the decision to drain the overflow queue
3983 // first is not arbitrary, as the overflow queue is not visible
3984 // to the other workers, whereas the task queue is. So, we want to
3985 // drain the "invisible" entries first, while allowing the other
3986 // workers to potentially steal the "visible" entries.
3988 while (refs_to_scan() > 0 || overflowed_refs_to_scan() > 0) {
3989 while (overflowed_refs_to_scan() > 0) {
3990 oop *ref_to_scan = NULL;
3991 pop_from_overflow_queue(ref_to_scan);
3992 assert(ref_to_scan != NULL, "invariant");
3993 // We shouldn't have pushed it on the queue if it was not
3994 // pointing into the CSet.
3995 assert(ref_to_scan != NULL, "sanity");
3996 assert(has_partial_array_mask(ref_to_scan) ||
3997 _g1h->obj_in_cs(*ref_to_scan), "sanity");
3999 deal_with_reference(ref_to_scan);
4000 }
4002 while (refs_to_scan() > 0) {
4003 oop *ref_to_scan = NULL;
4004 pop_from_queue(ref_to_scan);
4006 if (ref_to_scan != NULL) {
4007 // We shouldn't have pushed it on the queue if it was not
4008 // pointing into the CSet.
4009 assert(has_partial_array_mask(ref_to_scan) ||
4010 _g1h->obj_in_cs(*ref_to_scan), "sanity");
4012 deal_with_reference(ref_to_scan);
4013 }
4014 }
4015 }
4016 }
4017 };
4019 G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
4020 _g1(g1), _g1_rem(_g1->g1_rem_set()), _cm(_g1->concurrent_mark()),
4021 _par_scan_state(par_scan_state) { }
4023 // This closure is applied to the fields of the objects that have just been copied.
4024 // Should probably be made inline and moved in g1OopClosures.inline.hpp.
4025 void G1ParScanClosure::do_oop_nv(oop* p) {
4026 oop obj = *p;
4028 if (obj != NULL) {
4029 if (_g1->in_cset_fast_test(obj)) {
4030 // We're not going to even bother checking whether the object is
4031 // already forwarded or not, as this usually causes an immediate
4032 // stall. We'll try to prefetch the object (for write, given that
4033 // we might need to install the forwarding reference) and we'll
4034 // get back to it when pop it from the queue
4035 Prefetch::write(obj->mark_addr(), 0);
4036 Prefetch::read(obj->mark_addr(), (HeapWordSize*2));
4038 // slightly paranoid test; I'm trying to catch potential
4039 // problems before we go into push_on_queue to know where the
4040 // problem is coming from
4041 assert(obj == *p, "the value of *p should not have changed");
4042 _par_scan_state->push_on_queue(p);
4043 } else {
4044 _par_scan_state->update_rs(_from, p, _par_scan_state->queue_num());
4045 }
4046 }
4047 }
4049 void G1ParCopyHelper::mark_forwardee(oop* p) {
4050 // This is called _after_ do_oop_work has been called, hence after
4051 // the object has been relocated to its new location and *p points
4052 // to its new location.
4054 oop thisOop = *p;
4055 if (thisOop != NULL) {
4056 assert((_g1->evacuation_failed()) || (!_g1->obj_in_cs(thisOop)),
4057 "shouldn't still be in the CSet if evacuation didn't fail.");
4058 HeapWord* addr = (HeapWord*)thisOop;
4059 if (_g1->is_in_g1_reserved(addr))
4060 _cm->grayRoot(oop(addr));
4061 }
4062 }
4064 oop G1ParCopyHelper::copy_to_survivor_space(oop old) {
4065 size_t word_sz = old->size();
4066 HeapRegion* from_region = _g1->heap_region_containing_raw(old);
4067 // +1 to make the -1 indexes valid...
4068 int young_index = from_region->young_index_in_cset()+1;
4069 assert( (from_region->is_young() && young_index > 0) ||
4070 (!from_region->is_young() && young_index == 0), "invariant" );
4071 G1CollectorPolicy* g1p = _g1->g1_policy();
4072 markOop m = old->mark();
4073 int age = m->has_displaced_mark_helper() ? m->displaced_mark_helper()->age()
4074 : m->age();
4075 GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, age,
4076 word_sz);
4077 HeapWord* obj_ptr = _par_scan_state->allocate(alloc_purpose, word_sz);
4078 oop obj = oop(obj_ptr);
4080 if (obj_ptr == NULL) {
4081 // This will either forward-to-self, or detect that someone else has
4082 // installed a forwarding pointer.
4083 OopsInHeapRegionClosure* cl = _par_scan_state->evac_failure_closure();
4084 return _g1->handle_evacuation_failure_par(cl, old);
4085 }
4087 // We're going to allocate linearly, so might as well prefetch ahead.
4088 Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes);
4090 oop forward_ptr = old->forward_to_atomic(obj);
4091 if (forward_ptr == NULL) {
4092 Copy::aligned_disjoint_words((HeapWord*) old, obj_ptr, word_sz);
4093 if (g1p->track_object_age(alloc_purpose)) {
4094 // We could simply do obj->incr_age(). However, this causes a
4095 // performance issue. obj->incr_age() will first check whether
4096 // the object has a displaced mark by checking its mark word;
4097 // getting the mark word from the new location of the object
4098 // stalls. So, given that we already have the mark word and we
4099 // are about to install it anyway, it's better to increase the
4100 // age on the mark word, when the object does not have a
4101 // displaced mark word. We're not expecting many objects to have
4102 // a displaced marked word, so that case is not optimized
4103 // further (it could be...) and we simply call obj->incr_age().
4105 if (m->has_displaced_mark_helper()) {
4106 // in this case, we have to install the mark word first,
4107 // otherwise obj looks to be forwarded (the old mark word,
4108 // which contains the forward pointer, was copied)
4109 obj->set_mark(m);
4110 obj->incr_age();
4111 } else {
4112 m = m->incr_age();
4113 obj->set_mark(m);
4114 }
4115 _par_scan_state->age_table()->add(obj, word_sz);
4116 } else {
4117 obj->set_mark(m);
4118 }
4120 // preserve "next" mark bit
4121 if (_g1->mark_in_progress() && !_g1->is_obj_ill(old)) {
4122 if (!use_local_bitmaps ||
4123 !_par_scan_state->alloc_buffer(alloc_purpose)->mark(obj_ptr)) {
4124 // if we couldn't mark it on the local bitmap (this happens when
4125 // the object was not allocated in the GCLab), we have to bite
4126 // the bullet and do the standard parallel mark
4127 _cm->markAndGrayObjectIfNecessary(obj);
4128 }
4129 #if 1
4130 if (_g1->isMarkedNext(old)) {
4131 _cm->nextMarkBitMap()->parClear((HeapWord*)old);
4132 }
4133 #endif
4134 }
4136 size_t* surv_young_words = _par_scan_state->surviving_young_words();
4137 surv_young_words[young_index] += word_sz;
4139 if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) {
4140 arrayOop(old)->set_length(0);
4141 _par_scan_state->push_on_queue(set_partial_array_mask(old));
4142 } else {
4143 // No point in using the slower heap_region_containing() method,
4144 // given that we know obj is in the heap.
4145 _scanner->set_region(_g1->heap_region_containing_raw(obj));
4146 obj->oop_iterate_backwards(_scanner);
4147 }
4148 } else {
4149 _par_scan_state->undo_allocation(alloc_purpose, obj_ptr, word_sz);
4150 obj = forward_ptr;
4151 }
4152 return obj;
4153 }
4155 template<bool do_gen_barrier, G1Barrier barrier,
4156 bool do_mark_forwardee, bool skip_cset_test>
4157 void G1ParCopyClosure<do_gen_barrier, barrier,
4158 do_mark_forwardee, skip_cset_test>::do_oop_work(oop* p) {
4159 oop obj = *p;
4160 assert(barrier != G1BarrierRS || obj != NULL,
4161 "Precondition: G1BarrierRS implies obj is nonNull");
4163 // The only time we skip the cset test is when we're scanning
4164 // references popped from the queue. And we only push on the queue
4165 // references that we know point into the cset, so no point in
4166 // checking again. But we'll leave an assert here for peace of mind.
4167 assert(!skip_cset_test || _g1->obj_in_cs(obj), "invariant");
4169 // here the null check is implicit in the cset_fast_test() test
4170 if (skip_cset_test || _g1->in_cset_fast_test(obj)) {
4171 #if G1_REM_SET_LOGGING
4172 gclog_or_tty->print_cr("Loc "PTR_FORMAT" contains pointer "PTR_FORMAT" "
4173 "into CS.", p, (void*) obj);
4174 #endif
4175 if (obj->is_forwarded()) {
4176 *p = obj->forwardee();
4177 } else {
4178 *p = copy_to_survivor_space(obj);
4179 }
4180 // When scanning the RS, we only care about objs in CS.
4181 if (barrier == G1BarrierRS) {
4182 _par_scan_state->update_rs(_from, p, _par_scan_state->queue_num());
4183 }
4184 }
4186 // When scanning moved objs, must look at all oops.
4187 if (barrier == G1BarrierEvac && obj != NULL) {
4188 _par_scan_state->update_rs(_from, p, _par_scan_state->queue_num());
4189 }
4191 if (do_gen_barrier && obj != NULL) {
4192 par_do_barrier(p);
4193 }
4194 }
4196 template void G1ParCopyClosure<false, G1BarrierEvac, false, true>::do_oop_work(oop* p);
4198 template<class T> void G1ParScanPartialArrayClosure::process_array_chunk(
4199 oop obj, int start, int end) {
4200 // process our set of indices (include header in first chunk)
4201 assert(start < end, "invariant");
4202 T* const base = (T*)objArrayOop(obj)->base();
4203 T* const start_addr = (start == 0) ? (T*) obj : base + start;
4204 T* const end_addr = base + end;
4205 MemRegion mr((HeapWord*)start_addr, (HeapWord*)end_addr);
4206 _scanner.set_region(_g1->heap_region_containing(obj));
4207 obj->oop_iterate(&_scanner, mr);
4208 }
4210 void G1ParScanPartialArrayClosure::do_oop_nv(oop* p) {
4211 assert(!UseCompressedOops, "Needs to be fixed to work with compressed oops");
4212 assert(has_partial_array_mask(p), "invariant");
4213 oop old = clear_partial_array_mask(p);
4214 assert(old->is_objArray(), "must be obj array");
4215 assert(old->is_forwarded(), "must be forwarded");
4216 assert(Universe::heap()->is_in_reserved(old), "must be in heap.");
4218 objArrayOop obj = objArrayOop(old->forwardee());
4219 assert((void*)old != (void*)old->forwardee(), "self forwarding here?");
4220 // Process ParGCArrayScanChunk elements now
4221 // and push the remainder back onto queue
4222 int start = arrayOop(old)->length();
4223 int end = obj->length();
4224 int remainder = end - start;
4225 assert(start <= end, "just checking");
4226 if (remainder > 2 * ParGCArrayScanChunk) {
4227 // Test above combines last partial chunk with a full chunk
4228 end = start + ParGCArrayScanChunk;
4229 arrayOop(old)->set_length(end);
4230 // Push remainder.
4231 _par_scan_state->push_on_queue(set_partial_array_mask(old));
4232 } else {
4233 // Restore length so that the heap remains parsable in
4234 // case of evacuation failure.
4235 arrayOop(old)->set_length(end);
4236 }
4238 // process our set of indices (include header in first chunk)
4239 process_array_chunk<oop>(obj, start, end);
4240 }
4242 int G1ScanAndBalanceClosure::_nq = 0;
4244 class G1ParEvacuateFollowersClosure : public VoidClosure {
4245 protected:
4246 G1CollectedHeap* _g1h;
4247 G1ParScanThreadState* _par_scan_state;
4248 RefToScanQueueSet* _queues;
4249 ParallelTaskTerminator* _terminator;
4251 G1ParScanThreadState* par_scan_state() { return _par_scan_state; }
4252 RefToScanQueueSet* queues() { return _queues; }
4253 ParallelTaskTerminator* terminator() { return _terminator; }
4255 public:
4256 G1ParEvacuateFollowersClosure(G1CollectedHeap* g1h,
4257 G1ParScanThreadState* par_scan_state,
4258 RefToScanQueueSet* queues,
4259 ParallelTaskTerminator* terminator)
4260 : _g1h(g1h), _par_scan_state(par_scan_state),
4261 _queues(queues), _terminator(terminator) {}
4263 void do_void() {
4264 G1ParScanThreadState* pss = par_scan_state();
4265 while (true) {
4266 oop* ref_to_scan;
4267 pss->trim_queue();
4268 IF_G1_DETAILED_STATS(pss->note_steal_attempt());
4269 if (queues()->steal(pss->queue_num(),
4270 pss->hash_seed(),
4271 ref_to_scan)) {
4272 IF_G1_DETAILED_STATS(pss->note_steal());
4274 // slightly paranoid tests; I'm trying to catch potential
4275 // problems before we go into push_on_queue to know where the
4276 // problem is coming from
4277 assert(ref_to_scan != NULL, "invariant");
4278 assert(has_partial_array_mask(ref_to_scan) ||
4279 _g1h->obj_in_cs(*ref_to_scan), "invariant");
4280 pss->push_on_queue(ref_to_scan);
4281 continue;
4282 }
4283 pss->start_term_time();
4284 if (terminator()->offer_termination()) break;
4285 pss->end_term_time();
4286 }
4287 pss->end_term_time();
4288 pss->retire_alloc_buffers();
4289 }
4290 };
4292 class G1ParTask : public AbstractGangTask {
4293 protected:
4294 G1CollectedHeap* _g1h;
4295 RefToScanQueueSet *_queues;
4296 ParallelTaskTerminator _terminator;
4298 Mutex _stats_lock;
4299 Mutex* stats_lock() { return &_stats_lock; }
4301 size_t getNCards() {
4302 return (_g1h->capacity() + G1BlockOffsetSharedArray::N_bytes - 1)
4303 / G1BlockOffsetSharedArray::N_bytes;
4304 }
4306 public:
4307 G1ParTask(G1CollectedHeap* g1h, int workers, RefToScanQueueSet *task_queues)
4308 : AbstractGangTask("G1 collection"),
4309 _g1h(g1h),
4310 _queues(task_queues),
4311 _terminator(workers, _queues),
4312 _stats_lock(Mutex::leaf, "parallel G1 stats lock", true)
4313 {}
4315 RefToScanQueueSet* queues() { return _queues; }
4317 RefToScanQueue *work_queue(int i) {
4318 return queues()->queue(i);
4319 }
4321 void work(int i) {
4322 ResourceMark rm;
4323 HandleMark hm;
4325 G1ParScanThreadState pss(_g1h, i);
4326 G1ParScanHeapEvacClosure scan_evac_cl(_g1h, &pss);
4327 G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss);
4328 G1ParScanPartialArrayClosure partial_scan_cl(_g1h, &pss);
4330 pss.set_evac_closure(&scan_evac_cl);
4331 pss.set_evac_failure_closure(&evac_failure_cl);
4332 pss.set_partial_scan_closure(&partial_scan_cl);
4334 G1ParScanExtRootClosure only_scan_root_cl(_g1h, &pss);
4335 G1ParScanPermClosure only_scan_perm_cl(_g1h, &pss);
4336 G1ParScanHeapRSClosure only_scan_heap_rs_cl(_g1h, &pss);
4338 G1ParScanAndMarkExtRootClosure scan_mark_root_cl(_g1h, &pss);
4339 G1ParScanAndMarkPermClosure scan_mark_perm_cl(_g1h, &pss);
4340 G1ParScanAndMarkHeapRSClosure scan_mark_heap_rs_cl(_g1h, &pss);
4342 OopsInHeapRegionClosure *scan_root_cl;
4343 OopsInHeapRegionClosure *scan_perm_cl;
4344 OopsInHeapRegionClosure *scan_so_cl;
4346 if (_g1h->g1_policy()->should_initiate_conc_mark()) {
4347 scan_root_cl = &scan_mark_root_cl;
4348 scan_perm_cl = &scan_mark_perm_cl;
4349 scan_so_cl = &scan_mark_heap_rs_cl;
4350 } else {
4351 scan_root_cl = &only_scan_root_cl;
4352 scan_perm_cl = &only_scan_perm_cl;
4353 scan_so_cl = &only_scan_heap_rs_cl;
4354 }
4356 pss.start_strong_roots();
4357 _g1h->g1_process_strong_roots(/* not collecting perm */ false,
4358 SharedHeap::SO_AllClasses,
4359 scan_root_cl,
4360 &only_scan_heap_rs_cl,
4361 scan_so_cl,
4362 scan_perm_cl,
4363 i);
4364 pss.end_strong_roots();
4365 {
4366 double start = os::elapsedTime();
4367 G1ParEvacuateFollowersClosure evac(_g1h, &pss, _queues, &_terminator);
4368 evac.do_void();
4369 double elapsed_ms = (os::elapsedTime()-start)*1000.0;
4370 double term_ms = pss.term_time()*1000.0;
4371 _g1h->g1_policy()->record_obj_copy_time(i, elapsed_ms-term_ms);
4372 _g1h->g1_policy()->record_termination_time(i, term_ms);
4373 }
4374 if (G1UseSurvivorSpace) {
4375 _g1h->g1_policy()->record_thread_age_table(pss.age_table());
4376 }
4377 _g1h->update_surviving_young_words(pss.surviving_young_words()+1);
4379 // Clean up any par-expanded rem sets.
4380 HeapRegionRemSet::par_cleanup();
4382 MutexLocker x(stats_lock());
4383 if (ParallelGCVerbose) {
4384 gclog_or_tty->print("Thread %d complete:\n", i);
4385 #if G1_DETAILED_STATS
4386 gclog_or_tty->print(" Pushes: %7d Pops: %7d Overflows: %7d Steals %7d (in %d attempts)\n",
4387 pss.pushes(),
4388 pss.pops(),
4389 pss.overflow_pushes(),
4390 pss.steals(),
4391 pss.steal_attempts());
4392 #endif
4393 double elapsed = pss.elapsed();
4394 double strong_roots = pss.strong_roots_time();
4395 double term = pss.term_time();
4396 gclog_or_tty->print(" Elapsed: %7.2f ms.\n"
4397 " Strong roots: %7.2f ms (%6.2f%%)\n"
4398 " Termination: %7.2f ms (%6.2f%%) (in %d entries)\n",
4399 elapsed * 1000.0,
4400 strong_roots * 1000.0, (strong_roots*100.0/elapsed),
4401 term * 1000.0, (term*100.0/elapsed),
4402 pss.term_attempts());
4403 size_t total_waste = pss.alloc_buffer_waste() + pss.undo_waste();
4404 gclog_or_tty->print(" Waste: %8dK\n"
4405 " Alloc Buffer: %8dK\n"
4406 " Undo: %8dK\n",
4407 (total_waste * HeapWordSize) / K,
4408 (pss.alloc_buffer_waste() * HeapWordSize) / K,
4409 (pss.undo_waste() * HeapWordSize) / K);
4410 }
4412 assert(pss.refs_to_scan() == 0, "Task queue should be empty");
4413 assert(pss.overflowed_refs_to_scan() == 0, "Overflow queue should be empty");
4414 }
4415 };
4417 // *** Common G1 Evacuation Stuff
4419 class G1CountClosure: public OopsInHeapRegionClosure {
4420 public:
4421 int n;
4422 G1CountClosure() : n(0) {}
4423 void do_oop(narrowOop* p) {
4424 guarantee(false, "NYI");
4425 }
4426 void do_oop(oop* p) {
4427 oop obj = *p;
4428 assert(obj != NULL && G1CollectedHeap::heap()->obj_in_cs(obj),
4429 "Rem set closure called on non-rem-set pointer.");
4430 n++;
4431 }
4432 };
4434 static G1CountClosure count_closure;
4436 void
4437 G1CollectedHeap::
4438 g1_process_strong_roots(bool collecting_perm_gen,
4439 SharedHeap::ScanningOption so,
4440 OopClosure* scan_non_heap_roots,
4441 OopsInHeapRegionClosure* scan_rs,
4442 OopsInHeapRegionClosure* scan_so,
4443 OopsInGenClosure* scan_perm,
4444 int worker_i) {
4445 // First scan the strong roots, including the perm gen.
4446 double ext_roots_start = os::elapsedTime();
4447 double closure_app_time_sec = 0.0;
4449 BufferingOopClosure buf_scan_non_heap_roots(scan_non_heap_roots);
4450 BufferingOopsInGenClosure buf_scan_perm(scan_perm);
4451 buf_scan_perm.set_generation(perm_gen());
4453 process_strong_roots(collecting_perm_gen, so,
4454 &buf_scan_non_heap_roots,
4455 &buf_scan_perm);
4456 // Finish up any enqueued closure apps.
4457 buf_scan_non_heap_roots.done();
4458 buf_scan_perm.done();
4459 double ext_roots_end = os::elapsedTime();
4460 g1_policy()->reset_obj_copy_time(worker_i);
4461 double obj_copy_time_sec =
4462 buf_scan_non_heap_roots.closure_app_seconds() +
4463 buf_scan_perm.closure_app_seconds();
4464 g1_policy()->record_obj_copy_time(worker_i, obj_copy_time_sec * 1000.0);
4465 double ext_root_time_ms =
4466 ((ext_roots_end - ext_roots_start) - obj_copy_time_sec) * 1000.0;
4467 g1_policy()->record_ext_root_scan_time(worker_i, ext_root_time_ms);
4469 // Scan strong roots in mark stack.
4470 if (!_process_strong_tasks->is_task_claimed(G1H_PS_mark_stack_oops_do)) {
4471 concurrent_mark()->oops_do(scan_non_heap_roots);
4472 }
4473 double mark_stack_scan_ms = (os::elapsedTime() - ext_roots_end) * 1000.0;
4474 g1_policy()->record_mark_stack_scan_time(worker_i, mark_stack_scan_ms);
4476 // XXX What should this be doing in the parallel case?
4477 g1_policy()->record_collection_pause_end_CH_strong_roots();
4478 if (G1VerifyRemSet) {
4479 // :::: FIXME ::::
4480 // The stupid remembered set doesn't know how to filter out dead
4481 // objects, which the smart one does, and so when it is created
4482 // and then compared the number of entries in each differs and
4483 // the verification code fails.
4484 guarantee(false, "verification code is broken, see note");
4486 // Let's make sure that the current rem set agrees with the stupidest
4487 // one possible!
4488 bool refs_enabled = ref_processor()->discovery_enabled();
4489 if (refs_enabled) ref_processor()->disable_discovery();
4490 StupidG1RemSet stupid(this);
4491 count_closure.n = 0;
4492 stupid.oops_into_collection_set_do(&count_closure, worker_i);
4493 int stupid_n = count_closure.n;
4494 count_closure.n = 0;
4495 g1_rem_set()->oops_into_collection_set_do(&count_closure, worker_i);
4496 guarantee(count_closure.n == stupid_n, "Old and new rem sets differ.");
4497 gclog_or_tty->print_cr("\nFound %d pointers in heap RS.", count_closure.n);
4498 if (refs_enabled) ref_processor()->enable_discovery();
4499 }
4500 if (scan_so != NULL) {
4501 scan_scan_only_set(scan_so, worker_i);
4502 }
4503 // Now scan the complement of the collection set.
4504 if (scan_rs != NULL) {
4505 g1_rem_set()->oops_into_collection_set_do(scan_rs, worker_i);
4506 }
4507 // Finish with the ref_processor roots.
4508 if (!_process_strong_tasks->is_task_claimed(G1H_PS_refProcessor_oops_do)) {
4509 ref_processor()->oops_do(scan_non_heap_roots);
4510 }
4511 g1_policy()->record_collection_pause_end_G1_strong_roots();
4512 _process_strong_tasks->all_tasks_completed();
4513 }
4515 void
4516 G1CollectedHeap::scan_scan_only_region(HeapRegion* r,
4517 OopsInHeapRegionClosure* oc,
4518 int worker_i) {
4519 HeapWord* startAddr = r->bottom();
4520 HeapWord* endAddr = r->used_region().end();
4522 oc->set_region(r);
4524 HeapWord* p = r->bottom();
4525 HeapWord* t = r->top();
4526 guarantee( p == r->next_top_at_mark_start(), "invariant" );
4527 while (p < t) {
4528 oop obj = oop(p);
4529 p += obj->oop_iterate(oc);
4530 }
4531 }
4533 void
4534 G1CollectedHeap::scan_scan_only_set(OopsInHeapRegionClosure* oc,
4535 int worker_i) {
4536 double start = os::elapsedTime();
4538 BufferingOopsInHeapRegionClosure boc(oc);
4540 FilterInHeapRegionAndIntoCSClosure scan_only(this, &boc);
4541 FilterAndMarkInHeapRegionAndIntoCSClosure scan_and_mark(this, &boc, concurrent_mark());
4543 OopsInHeapRegionClosure *foc;
4544 if (g1_policy()->should_initiate_conc_mark())
4545 foc = &scan_and_mark;
4546 else
4547 foc = &scan_only;
4549 HeapRegion* hr;
4550 int n = 0;
4551 while ((hr = _young_list->par_get_next_scan_only_region()) != NULL) {
4552 scan_scan_only_region(hr, foc, worker_i);
4553 ++n;
4554 }
4555 boc.done();
4557 double closure_app_s = boc.closure_app_seconds();
4558 g1_policy()->record_obj_copy_time(worker_i, closure_app_s * 1000.0);
4559 double ms = (os::elapsedTime() - start - closure_app_s)*1000.0;
4560 g1_policy()->record_scan_only_time(worker_i, ms, n);
4561 }
4563 void
4564 G1CollectedHeap::g1_process_weak_roots(OopClosure* root_closure,
4565 OopClosure* non_root_closure) {
4566 SharedHeap::process_weak_roots(root_closure, non_root_closure);
4567 }
4570 class SaveMarksClosure: public HeapRegionClosure {
4571 public:
4572 bool doHeapRegion(HeapRegion* r) {
4573 r->save_marks();
4574 return false;
4575 }
4576 };
4578 void G1CollectedHeap::save_marks() {
4579 if (ParallelGCThreads == 0) {
4580 SaveMarksClosure sm;
4581 heap_region_iterate(&sm);
4582 }
4583 // We do this even in the parallel case
4584 perm_gen()->save_marks();
4585 }
4587 void G1CollectedHeap::evacuate_collection_set() {
4588 set_evacuation_failed(false);
4590 g1_rem_set()->prepare_for_oops_into_collection_set_do();
4591 concurrent_g1_refine()->set_use_cache(false);
4592 int n_workers = (ParallelGCThreads > 0 ? workers()->total_workers() : 1);
4593 set_par_threads(n_workers);
4594 G1ParTask g1_par_task(this, n_workers, _task_queues);
4596 init_for_evac_failure(NULL);
4598 change_strong_roots_parity(); // In preparation for parallel strong roots.
4599 rem_set()->prepare_for_younger_refs_iterate(true);
4601 assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty");
4602 double start_par = os::elapsedTime();
4603 if (ParallelGCThreads > 0) {
4604 // The individual threads will set their evac-failure closures.
4605 workers()->run_task(&g1_par_task);
4606 } else {
4607 g1_par_task.work(0);
4608 }
4610 double par_time = (os::elapsedTime() - start_par) * 1000.0;
4611 g1_policy()->record_par_time(par_time);
4612 set_par_threads(0);
4613 // Is this the right thing to do here? We don't save marks
4614 // on individual heap regions when we allocate from
4615 // them in parallel, so this seems like the correct place for this.
4616 retire_all_alloc_regions();
4617 {
4618 G1IsAliveClosure is_alive(this);
4619 G1KeepAliveClosure keep_alive(this);
4620 JNIHandles::weak_oops_do(&is_alive, &keep_alive);
4621 }
4622 g1_rem_set()->cleanup_after_oops_into_collection_set_do();
4624 concurrent_g1_refine()->set_use_cache(true);
4626 finalize_for_evac_failure();
4628 // Must do this before removing self-forwarding pointers, which clears
4629 // the per-region evac-failure flags.
4630 concurrent_mark()->complete_marking_in_collection_set();
4632 if (evacuation_failed()) {
4633 remove_self_forwarding_pointers();
4634 if (PrintGCDetails) {
4635 gclog_or_tty->print(" (evacuation failed)");
4636 } else if (PrintGC) {
4637 gclog_or_tty->print("--");
4638 }
4639 }
4641 if (G1DeferredRSUpdate) {
4642 RedirtyLoggedCardTableEntryFastClosure redirty;
4643 dirty_card_queue_set().set_closure(&redirty);
4644 dirty_card_queue_set().apply_closure_to_all_completed_buffers();
4645 JavaThread::dirty_card_queue_set().merge_bufferlists(&dirty_card_queue_set());
4646 assert(dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed");
4647 }
4649 COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
4650 }
4652 void G1CollectedHeap::free_region(HeapRegion* hr) {
4653 size_t pre_used = 0;
4654 size_t cleared_h_regions = 0;
4655 size_t freed_regions = 0;
4656 UncleanRegionList local_list;
4658 HeapWord* start = hr->bottom();
4659 HeapWord* end = hr->prev_top_at_mark_start();
4660 size_t used_bytes = hr->used();
4661 size_t live_bytes = hr->max_live_bytes();
4662 if (used_bytes > 0) {
4663 guarantee( live_bytes <= used_bytes, "invariant" );
4664 } else {
4665 guarantee( live_bytes == 0, "invariant" );
4666 }
4668 size_t garbage_bytes = used_bytes - live_bytes;
4669 if (garbage_bytes > 0)
4670 g1_policy()->decrease_known_garbage_bytes(garbage_bytes);
4672 free_region_work(hr, pre_used, cleared_h_regions, freed_regions,
4673 &local_list);
4674 finish_free_region_work(pre_used, cleared_h_regions, freed_regions,
4675 &local_list);
4676 }
4678 void
4679 G1CollectedHeap::free_region_work(HeapRegion* hr,
4680 size_t& pre_used,
4681 size_t& cleared_h_regions,
4682 size_t& freed_regions,
4683 UncleanRegionList* list,
4684 bool par) {
4685 assert(!hr->popular(), "should not free popular regions");
4686 pre_used += hr->used();
4687 if (hr->isHumongous()) {
4688 assert(hr->startsHumongous(),
4689 "Only the start of a humongous region should be freed.");
4690 int ind = _hrs->find(hr);
4691 assert(ind != -1, "Should have an index.");
4692 // Clear the start region.
4693 hr->hr_clear(par, true /*clear_space*/);
4694 list->insert_before_head(hr);
4695 cleared_h_regions++;
4696 freed_regions++;
4697 // Clear any continued regions.
4698 ind++;
4699 while ((size_t)ind < n_regions()) {
4700 HeapRegion* hrc = _hrs->at(ind);
4701 if (!hrc->continuesHumongous()) break;
4702 // Otherwise, does continue the H region.
4703 assert(hrc->humongous_start_region() == hr, "Huh?");
4704 hrc->hr_clear(par, true /*clear_space*/);
4705 cleared_h_regions++;
4706 freed_regions++;
4707 list->insert_before_head(hrc);
4708 ind++;
4709 }
4710 } else {
4711 hr->hr_clear(par, true /*clear_space*/);
4712 list->insert_before_head(hr);
4713 freed_regions++;
4714 // If we're using clear2, this should not be enabled.
4715 // assert(!hr->in_cohort(), "Can't be both free and in a cohort.");
4716 }
4717 }
4719 void G1CollectedHeap::finish_free_region_work(size_t pre_used,
4720 size_t cleared_h_regions,
4721 size_t freed_regions,
4722 UncleanRegionList* list) {
4723 if (list != NULL && list->sz() > 0) {
4724 prepend_region_list_on_unclean_list(list);
4725 }
4726 // Acquire a lock, if we're parallel, to update possibly-shared
4727 // variables.
4728 Mutex* lock = (n_par_threads() > 0) ? ParGCRareEvent_lock : NULL;
4729 {
4730 MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
4731 _summary_bytes_used -= pre_used;
4732 _num_humongous_regions -= (int) cleared_h_regions;
4733 _free_regions += freed_regions;
4734 }
4735 }
4738 void G1CollectedHeap::dirtyCardsForYoungRegions(CardTableModRefBS* ct_bs, HeapRegion* list) {
4739 while (list != NULL) {
4740 guarantee( list->is_young(), "invariant" );
4742 HeapWord* bottom = list->bottom();
4743 HeapWord* end = list->end();
4744 MemRegion mr(bottom, end);
4745 ct_bs->dirty(mr);
4747 list = list->get_next_young_region();
4748 }
4749 }
4751 void G1CollectedHeap::cleanUpCardTable() {
4752 CardTableModRefBS* ct_bs = (CardTableModRefBS*) (barrier_set());
4753 double start = os::elapsedTime();
4755 ct_bs->clear(_g1_committed);
4757 // now, redirty the cards of the scan-only and survivor regions
4758 // (it seemed faster to do it this way, instead of iterating over
4759 // all regions and then clearing / dirtying as approprite)
4760 dirtyCardsForYoungRegions(ct_bs, _young_list->first_scan_only_region());
4761 dirtyCardsForYoungRegions(ct_bs, _young_list->first_survivor_region());
4763 double elapsed = os::elapsedTime() - start;
4764 g1_policy()->record_clear_ct_time( elapsed * 1000.0);
4765 }
4768 void G1CollectedHeap::do_collection_pause_if_appropriate(size_t word_size) {
4769 // First do any popular regions.
4770 HeapRegion* hr;
4771 while ((hr = popular_region_to_evac()) != NULL) {
4772 evac_popular_region(hr);
4773 }
4774 // Now do heuristic pauses.
4775 if (g1_policy()->should_do_collection_pause(word_size)) {
4776 do_collection_pause();
4777 }
4778 }
4780 void G1CollectedHeap::free_collection_set(HeapRegion* cs_head) {
4781 double young_time_ms = 0.0;
4782 double non_young_time_ms = 0.0;
4784 G1CollectorPolicy* policy = g1_policy();
4786 double start_sec = os::elapsedTime();
4787 bool non_young = true;
4789 HeapRegion* cur = cs_head;
4790 int age_bound = -1;
4791 size_t rs_lengths = 0;
4793 while (cur != NULL) {
4794 if (non_young) {
4795 if (cur->is_young()) {
4796 double end_sec = os::elapsedTime();
4797 double elapsed_ms = (end_sec - start_sec) * 1000.0;
4798 non_young_time_ms += elapsed_ms;
4800 start_sec = os::elapsedTime();
4801 non_young = false;
4802 }
4803 } else {
4804 if (!cur->is_on_free_list()) {
4805 double end_sec = os::elapsedTime();
4806 double elapsed_ms = (end_sec - start_sec) * 1000.0;
4807 young_time_ms += elapsed_ms;
4809 start_sec = os::elapsedTime();
4810 non_young = true;
4811 }
4812 }
4814 rs_lengths += cur->rem_set()->occupied();
4816 HeapRegion* next = cur->next_in_collection_set();
4817 assert(cur->in_collection_set(), "bad CS");
4818 cur->set_next_in_collection_set(NULL);
4819 cur->set_in_collection_set(false);
4821 if (cur->is_young()) {
4822 int index = cur->young_index_in_cset();
4823 guarantee( index != -1, "invariant" );
4824 guarantee( (size_t)index < policy->young_cset_length(), "invariant" );
4825 size_t words_survived = _surviving_young_words[index];
4826 cur->record_surv_words_in_group(words_survived);
4827 } else {
4828 int index = cur->young_index_in_cset();
4829 guarantee( index == -1, "invariant" );
4830 }
4832 assert( (cur->is_young() && cur->young_index_in_cset() > -1) ||
4833 (!cur->is_young() && cur->young_index_in_cset() == -1),
4834 "invariant" );
4836 if (!cur->evacuation_failed()) {
4837 // And the region is empty.
4838 assert(!cur->is_empty(),
4839 "Should not have empty regions in a CS.");
4840 free_region(cur);
4841 } else {
4842 guarantee( !cur->is_scan_only(), "should not be scan only" );
4843 cur->uninstall_surv_rate_group();
4844 if (cur->is_young())
4845 cur->set_young_index_in_cset(-1);
4846 cur->set_not_young();
4847 cur->set_evacuation_failed(false);
4848 }
4849 cur = next;
4850 }
4852 policy->record_max_rs_lengths(rs_lengths);
4853 policy->cset_regions_freed();
4855 double end_sec = os::elapsedTime();
4856 double elapsed_ms = (end_sec - start_sec) * 1000.0;
4857 if (non_young)
4858 non_young_time_ms += elapsed_ms;
4859 else
4860 young_time_ms += elapsed_ms;
4862 policy->record_young_free_cset_time_ms(young_time_ms);
4863 policy->record_non_young_free_cset_time_ms(non_young_time_ms);
4864 }
4866 HeapRegion*
4867 G1CollectedHeap::alloc_region_from_unclean_list_locked(bool zero_filled) {
4868 assert(ZF_mon->owned_by_self(), "Precondition");
4869 HeapRegion* res = pop_unclean_region_list_locked();
4870 if (res != NULL) {
4871 assert(!res->continuesHumongous() &&
4872 res->zero_fill_state() != HeapRegion::Allocated,
4873 "Only free regions on unclean list.");
4874 if (zero_filled) {
4875 res->ensure_zero_filled_locked();
4876 res->set_zero_fill_allocated();
4877 }
4878 }
4879 return res;
4880 }
4882 HeapRegion* G1CollectedHeap::alloc_region_from_unclean_list(bool zero_filled) {
4883 MutexLockerEx zx(ZF_mon, Mutex::_no_safepoint_check_flag);
4884 return alloc_region_from_unclean_list_locked(zero_filled);
4885 }
4887 void G1CollectedHeap::put_region_on_unclean_list(HeapRegion* r) {
4888 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
4889 put_region_on_unclean_list_locked(r);
4890 if (should_zf()) ZF_mon->notify_all(); // Wake up ZF thread.
4891 }
4893 void G1CollectedHeap::set_unclean_regions_coming(bool b) {
4894 MutexLockerEx x(Cleanup_mon);
4895 set_unclean_regions_coming_locked(b);
4896 }
4898 void G1CollectedHeap::set_unclean_regions_coming_locked(bool b) {
4899 assert(Cleanup_mon->owned_by_self(), "Precondition");
4900 _unclean_regions_coming = b;
4901 // Wake up mutator threads that might be waiting for completeCleanup to
4902 // finish.
4903 if (!b) Cleanup_mon->notify_all();
4904 }
4906 void G1CollectedHeap::wait_for_cleanup_complete() {
4907 MutexLockerEx x(Cleanup_mon);
4908 wait_for_cleanup_complete_locked();
4909 }
4911 void G1CollectedHeap::wait_for_cleanup_complete_locked() {
4912 assert(Cleanup_mon->owned_by_self(), "precondition");
4913 while (_unclean_regions_coming) {
4914 Cleanup_mon->wait();
4915 }
4916 }
4918 void
4919 G1CollectedHeap::put_region_on_unclean_list_locked(HeapRegion* r) {
4920 assert(ZF_mon->owned_by_self(), "precondition.");
4921 _unclean_region_list.insert_before_head(r);
4922 }
4924 void
4925 G1CollectedHeap::prepend_region_list_on_unclean_list(UncleanRegionList* list) {
4926 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
4927 prepend_region_list_on_unclean_list_locked(list);
4928 if (should_zf()) ZF_mon->notify_all(); // Wake up ZF thread.
4929 }
4931 void
4932 G1CollectedHeap::
4933 prepend_region_list_on_unclean_list_locked(UncleanRegionList* list) {
4934 assert(ZF_mon->owned_by_self(), "precondition.");
4935 _unclean_region_list.prepend_list(list);
4936 }
4938 HeapRegion* G1CollectedHeap::pop_unclean_region_list_locked() {
4939 assert(ZF_mon->owned_by_self(), "precondition.");
4940 HeapRegion* res = _unclean_region_list.pop();
4941 if (res != NULL) {
4942 // Inform ZF thread that there's a new unclean head.
4943 if (_unclean_region_list.hd() != NULL && should_zf())
4944 ZF_mon->notify_all();
4945 }
4946 return res;
4947 }
4949 HeapRegion* G1CollectedHeap::peek_unclean_region_list_locked() {
4950 assert(ZF_mon->owned_by_self(), "precondition.");
4951 return _unclean_region_list.hd();
4952 }
4955 bool G1CollectedHeap::move_cleaned_region_to_free_list_locked() {
4956 assert(ZF_mon->owned_by_self(), "Precondition");
4957 HeapRegion* r = peek_unclean_region_list_locked();
4958 if (r != NULL && r->zero_fill_state() == HeapRegion::ZeroFilled) {
4959 // Result of below must be equal to "r", since we hold the lock.
4960 (void)pop_unclean_region_list_locked();
4961 put_free_region_on_list_locked(r);
4962 return true;
4963 } else {
4964 return false;
4965 }
4966 }
4968 bool G1CollectedHeap::move_cleaned_region_to_free_list() {
4969 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
4970 return move_cleaned_region_to_free_list_locked();
4971 }
4974 void G1CollectedHeap::put_free_region_on_list_locked(HeapRegion* r) {
4975 assert(ZF_mon->owned_by_self(), "precondition.");
4976 assert(_free_region_list_size == free_region_list_length(), "Inv");
4977 assert(r->zero_fill_state() == HeapRegion::ZeroFilled,
4978 "Regions on free list must be zero filled");
4979 assert(!r->isHumongous(), "Must not be humongous.");
4980 assert(r->is_empty(), "Better be empty");
4981 assert(!r->is_on_free_list(),
4982 "Better not already be on free list");
4983 assert(!r->is_on_unclean_list(),
4984 "Better not already be on unclean list");
4985 r->set_on_free_list(true);
4986 r->set_next_on_free_list(_free_region_list);
4987 _free_region_list = r;
4988 _free_region_list_size++;
4989 assert(_free_region_list_size == free_region_list_length(), "Inv");
4990 }
4992 void G1CollectedHeap::put_free_region_on_list(HeapRegion* r) {
4993 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
4994 put_free_region_on_list_locked(r);
4995 }
4997 HeapRegion* G1CollectedHeap::pop_free_region_list_locked() {
4998 assert(ZF_mon->owned_by_self(), "precondition.");
4999 assert(_free_region_list_size == free_region_list_length(), "Inv");
5000 HeapRegion* res = _free_region_list;
5001 if (res != NULL) {
5002 _free_region_list = res->next_from_free_list();
5003 _free_region_list_size--;
5004 res->set_on_free_list(false);
5005 res->set_next_on_free_list(NULL);
5006 assert(_free_region_list_size == free_region_list_length(), "Inv");
5007 }
5008 return res;
5009 }
5012 HeapRegion* G1CollectedHeap::alloc_free_region_from_lists(bool zero_filled) {
5013 // By self, or on behalf of self.
5014 assert(Heap_lock->is_locked(), "Precondition");
5015 HeapRegion* res = NULL;
5016 bool first = true;
5017 while (res == NULL) {
5018 if (zero_filled || !first) {
5019 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
5020 res = pop_free_region_list_locked();
5021 if (res != NULL) {
5022 assert(!res->zero_fill_is_allocated(),
5023 "No allocated regions on free list.");
5024 res->set_zero_fill_allocated();
5025 } else if (!first) {
5026 break; // We tried both, time to return NULL.
5027 }
5028 }
5030 if (res == NULL) {
5031 res = alloc_region_from_unclean_list(zero_filled);
5032 }
5033 assert(res == NULL ||
5034 !zero_filled ||
5035 res->zero_fill_is_allocated(),
5036 "We must have allocated the region we're returning");
5037 first = false;
5038 }
5039 return res;
5040 }
5042 void G1CollectedHeap::remove_allocated_regions_from_lists() {
5043 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
5044 {
5045 HeapRegion* prev = NULL;
5046 HeapRegion* cur = _unclean_region_list.hd();
5047 while (cur != NULL) {
5048 HeapRegion* next = cur->next_from_unclean_list();
5049 if (cur->zero_fill_is_allocated()) {
5050 // Remove from the list.
5051 if (prev == NULL) {
5052 (void)_unclean_region_list.pop();
5053 } else {
5054 _unclean_region_list.delete_after(prev);
5055 }
5056 cur->set_on_unclean_list(false);
5057 cur->set_next_on_unclean_list(NULL);
5058 } else {
5059 prev = cur;
5060 }
5061 cur = next;
5062 }
5063 assert(_unclean_region_list.sz() == unclean_region_list_length(),
5064 "Inv");
5065 }
5067 {
5068 HeapRegion* prev = NULL;
5069 HeapRegion* cur = _free_region_list;
5070 while (cur != NULL) {
5071 HeapRegion* next = cur->next_from_free_list();
5072 if (cur->zero_fill_is_allocated()) {
5073 // Remove from the list.
5074 if (prev == NULL) {
5075 _free_region_list = cur->next_from_free_list();
5076 } else {
5077 prev->set_next_on_free_list(cur->next_from_free_list());
5078 }
5079 cur->set_on_free_list(false);
5080 cur->set_next_on_free_list(NULL);
5081 _free_region_list_size--;
5082 } else {
5083 prev = cur;
5084 }
5085 cur = next;
5086 }
5087 assert(_free_region_list_size == free_region_list_length(), "Inv");
5088 }
5089 }
5091 bool G1CollectedHeap::verify_region_lists() {
5092 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
5093 return verify_region_lists_locked();
5094 }
5096 bool G1CollectedHeap::verify_region_lists_locked() {
5097 HeapRegion* unclean = _unclean_region_list.hd();
5098 while (unclean != NULL) {
5099 guarantee(unclean->is_on_unclean_list(), "Well, it is!");
5100 guarantee(!unclean->is_on_free_list(), "Well, it shouldn't be!");
5101 guarantee(unclean->zero_fill_state() != HeapRegion::Allocated,
5102 "Everything else is possible.");
5103 unclean = unclean->next_from_unclean_list();
5104 }
5105 guarantee(_unclean_region_list.sz() == unclean_region_list_length(), "Inv");
5107 HeapRegion* free_r = _free_region_list;
5108 while (free_r != NULL) {
5109 assert(free_r->is_on_free_list(), "Well, it is!");
5110 assert(!free_r->is_on_unclean_list(), "Well, it shouldn't be!");
5111 switch (free_r->zero_fill_state()) {
5112 case HeapRegion::NotZeroFilled:
5113 case HeapRegion::ZeroFilling:
5114 guarantee(false, "Should not be on free list.");
5115 break;
5116 default:
5117 // Everything else is possible.
5118 break;
5119 }
5120 free_r = free_r->next_from_free_list();
5121 }
5122 guarantee(_free_region_list_size == free_region_list_length(), "Inv");
5123 // If we didn't do an assertion...
5124 return true;
5125 }
5127 size_t G1CollectedHeap::free_region_list_length() {
5128 assert(ZF_mon->owned_by_self(), "precondition.");
5129 size_t len = 0;
5130 HeapRegion* cur = _free_region_list;
5131 while (cur != NULL) {
5132 len++;
5133 cur = cur->next_from_free_list();
5134 }
5135 return len;
5136 }
5138 size_t G1CollectedHeap::unclean_region_list_length() {
5139 assert(ZF_mon->owned_by_self(), "precondition.");
5140 return _unclean_region_list.length();
5141 }
5143 size_t G1CollectedHeap::n_regions() {
5144 return _hrs->length();
5145 }
5147 size_t G1CollectedHeap::max_regions() {
5148 return
5149 (size_t)align_size_up(g1_reserved_obj_bytes(), HeapRegion::GrainBytes) /
5150 HeapRegion::GrainBytes;
5151 }
5153 size_t G1CollectedHeap::free_regions() {
5154 /* Possibly-expensive assert.
5155 assert(_free_regions == count_free_regions(),
5156 "_free_regions is off.");
5157 */
5158 return _free_regions;
5159 }
5161 bool G1CollectedHeap::should_zf() {
5162 return _free_region_list_size < (size_t) G1ConcZFMaxRegions;
5163 }
5165 class RegionCounter: public HeapRegionClosure {
5166 size_t _n;
5167 public:
5168 RegionCounter() : _n(0) {}
5169 bool doHeapRegion(HeapRegion* r) {
5170 if (r->is_empty() && !r->popular()) {
5171 assert(!r->isHumongous(), "H regions should not be empty.");
5172 _n++;
5173 }
5174 return false;
5175 }
5176 int res() { return (int) _n; }
5177 };
5179 size_t G1CollectedHeap::count_free_regions() {
5180 RegionCounter rc;
5181 heap_region_iterate(&rc);
5182 size_t n = rc.res();
5183 if (_cur_alloc_region != NULL && _cur_alloc_region->is_empty())
5184 n--;
5185 return n;
5186 }
5188 size_t G1CollectedHeap::count_free_regions_list() {
5189 size_t n = 0;
5190 size_t o = 0;
5191 ZF_mon->lock_without_safepoint_check();
5192 HeapRegion* cur = _free_region_list;
5193 while (cur != NULL) {
5194 cur = cur->next_from_free_list();
5195 n++;
5196 }
5197 size_t m = unclean_region_list_length();
5198 ZF_mon->unlock();
5199 return n + m;
5200 }
5202 bool G1CollectedHeap::should_set_young_locked() {
5203 assert(heap_lock_held_for_gc(),
5204 "the heap lock should already be held by or for this thread");
5205 return (g1_policy()->in_young_gc_mode() &&
5206 g1_policy()->should_add_next_region_to_young_list());
5207 }
5209 void G1CollectedHeap::set_region_short_lived_locked(HeapRegion* hr) {
5210 assert(heap_lock_held_for_gc(),
5211 "the heap lock should already be held by or for this thread");
5212 _young_list->push_region(hr);
5213 g1_policy()->set_region_short_lived(hr);
5214 }
5216 class NoYoungRegionsClosure: public HeapRegionClosure {
5217 private:
5218 bool _success;
5219 public:
5220 NoYoungRegionsClosure() : _success(true) { }
5221 bool doHeapRegion(HeapRegion* r) {
5222 if (r->is_young()) {
5223 gclog_or_tty->print_cr("Region ["PTR_FORMAT", "PTR_FORMAT") tagged as young",
5224 r->bottom(), r->end());
5225 _success = false;
5226 }
5227 return false;
5228 }
5229 bool success() { return _success; }
5230 };
5232 bool G1CollectedHeap::check_young_list_empty(bool ignore_scan_only_list,
5233 bool check_sample) {
5234 bool ret = true;
5236 ret = _young_list->check_list_empty(ignore_scan_only_list, check_sample);
5237 if (!ignore_scan_only_list) {
5238 NoYoungRegionsClosure closure;
5239 heap_region_iterate(&closure);
5240 ret = ret && closure.success();
5241 }
5243 return ret;
5244 }
5246 void G1CollectedHeap::empty_young_list() {
5247 assert(heap_lock_held_for_gc(),
5248 "the heap lock should already be held by or for this thread");
5249 assert(g1_policy()->in_young_gc_mode(), "should be in young GC mode");
5251 _young_list->empty_list();
5252 }
5254 bool G1CollectedHeap::all_alloc_regions_no_allocs_since_save_marks() {
5255 bool no_allocs = true;
5256 for (int ap = 0; ap < GCAllocPurposeCount && no_allocs; ++ap) {
5257 HeapRegion* r = _gc_alloc_regions[ap];
5258 no_allocs = r == NULL || r->saved_mark_at_top();
5259 }
5260 return no_allocs;
5261 }
5263 void G1CollectedHeap::retire_all_alloc_regions() {
5264 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
5265 HeapRegion* r = _gc_alloc_regions[ap];
5266 if (r != NULL) {
5267 // Check for aliases.
5268 bool has_processed_alias = false;
5269 for (int i = 0; i < ap; ++i) {
5270 if (_gc_alloc_regions[i] == r) {
5271 has_processed_alias = true;
5272 break;
5273 }
5274 }
5275 if (!has_processed_alias) {
5276 retire_alloc_region(r, false /* par */);
5277 }
5278 }
5279 }
5280 }
5283 // Done at the start of full GC.
5284 void G1CollectedHeap::tear_down_region_lists() {
5285 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
5286 while (pop_unclean_region_list_locked() != NULL) ;
5287 assert(_unclean_region_list.hd() == NULL && _unclean_region_list.sz() == 0,
5288 "Postconditions of loop.")
5289 while (pop_free_region_list_locked() != NULL) ;
5290 assert(_free_region_list == NULL, "Postcondition of loop.");
5291 if (_free_region_list_size != 0) {
5292 gclog_or_tty->print_cr("Size is %d.", _free_region_list_size);
5293 print();
5294 }
5295 assert(_free_region_list_size == 0, "Postconditions of loop.");
5296 }
5299 class RegionResetter: public HeapRegionClosure {
5300 G1CollectedHeap* _g1;
5301 int _n;
5302 public:
5303 RegionResetter() : _g1(G1CollectedHeap::heap()), _n(0) {}
5304 bool doHeapRegion(HeapRegion* r) {
5305 if (r->continuesHumongous()) return false;
5306 if (r->top() > r->bottom()) {
5307 if (r->top() < r->end()) {
5308 Copy::fill_to_words(r->top(),
5309 pointer_delta(r->end(), r->top()));
5310 }
5311 r->set_zero_fill_allocated();
5312 } else {
5313 assert(r->is_empty(), "tautology");
5314 if (r->popular()) {
5315 if (r->zero_fill_state() != HeapRegion::Allocated) {
5316 r->ensure_zero_filled_locked();
5317 r->set_zero_fill_allocated();
5318 }
5319 } else {
5320 _n++;
5321 switch (r->zero_fill_state()) {
5322 case HeapRegion::NotZeroFilled:
5323 case HeapRegion::ZeroFilling:
5324 _g1->put_region_on_unclean_list_locked(r);
5325 break;
5326 case HeapRegion::Allocated:
5327 r->set_zero_fill_complete();
5328 // no break; go on to put on free list.
5329 case HeapRegion::ZeroFilled:
5330 _g1->put_free_region_on_list_locked(r);
5331 break;
5332 }
5333 }
5334 }
5335 return false;
5336 }
5338 int getFreeRegionCount() {return _n;}
5339 };
5341 // Done at the end of full GC.
5342 void G1CollectedHeap::rebuild_region_lists() {
5343 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
5344 // This needs to go at the end of the full GC.
5345 RegionResetter rs;
5346 heap_region_iterate(&rs);
5347 _free_regions = rs.getFreeRegionCount();
5348 // Tell the ZF thread it may have work to do.
5349 if (should_zf()) ZF_mon->notify_all();
5350 }
5352 class UsedRegionsNeedZeroFillSetter: public HeapRegionClosure {
5353 G1CollectedHeap* _g1;
5354 int _n;
5355 public:
5356 UsedRegionsNeedZeroFillSetter() : _g1(G1CollectedHeap::heap()), _n(0) {}
5357 bool doHeapRegion(HeapRegion* r) {
5358 if (r->continuesHumongous()) return false;
5359 if (r->top() > r->bottom()) {
5360 // There are assertions in "set_zero_fill_needed()" below that
5361 // require top() == bottom(), so this is technically illegal.
5362 // We'll skirt the law here, by making that true temporarily.
5363 DEBUG_ONLY(HeapWord* save_top = r->top();
5364 r->set_top(r->bottom()));
5365 r->set_zero_fill_needed();
5366 DEBUG_ONLY(r->set_top(save_top));
5367 }
5368 return false;
5369 }
5370 };
5372 // Done at the start of full GC.
5373 void G1CollectedHeap::set_used_regions_to_need_zero_fill() {
5374 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
5375 // This needs to go at the end of the full GC.
5376 UsedRegionsNeedZeroFillSetter rs;
5377 heap_region_iterate(&rs);
5378 }
5380 class CountObjClosure: public ObjectClosure {
5381 size_t _n;
5382 public:
5383 CountObjClosure() : _n(0) {}
5384 void do_object(oop obj) { _n++; }
5385 size_t n() { return _n; }
5386 };
5388 size_t G1CollectedHeap::pop_object_used_objs() {
5389 size_t sum_objs = 0;
5390 for (int i = 0; i < G1NumPopularRegions; i++) {
5391 CountObjClosure cl;
5392 _hrs->at(i)->object_iterate(&cl);
5393 sum_objs += cl.n();
5394 }
5395 return sum_objs;
5396 }
5398 size_t G1CollectedHeap::pop_object_used_bytes() {
5399 size_t sum_bytes = 0;
5400 for (int i = 0; i < G1NumPopularRegions; i++) {
5401 sum_bytes += _hrs->at(i)->used();
5402 }
5403 return sum_bytes;
5404 }
5407 static int nq = 0;
5409 HeapWord* G1CollectedHeap::allocate_popular_object(size_t word_size) {
5410 while (_cur_pop_hr_index < G1NumPopularRegions) {
5411 HeapRegion* cur_pop_region = _hrs->at(_cur_pop_hr_index);
5412 HeapWord* res = cur_pop_region->allocate(word_size);
5413 if (res != NULL) {
5414 // We account for popular objs directly in the used summary:
5415 _summary_bytes_used += (word_size * HeapWordSize);
5416 return res;
5417 }
5418 // Otherwise, try the next region (first making sure that we remember
5419 // the last "top" value as the "next_top_at_mark_start", so that
5420 // objects made popular during markings aren't automatically considered
5421 // live).
5422 cur_pop_region->note_end_of_copying();
5423 // Otherwise, try the next region.
5424 _cur_pop_hr_index++;
5425 }
5426 // XXX: For now !!!
5427 vm_exit_out_of_memory(word_size,
5428 "Not enough pop obj space (To Be Fixed)");
5429 return NULL;
5430 }
5432 class HeapRegionList: public CHeapObj {
5433 public:
5434 HeapRegion* hr;
5435 HeapRegionList* next;
5436 };
5438 void G1CollectedHeap::schedule_popular_region_evac(HeapRegion* r) {
5439 // This might happen during parallel GC, so protect by this lock.
5440 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
5441 // We don't schedule regions whose evacuations are already pending, or
5442 // are already being evacuated.
5443 if (!r->popular_pending() && !r->in_collection_set()) {
5444 r->set_popular_pending(true);
5445 if (G1TracePopularity) {
5446 gclog_or_tty->print_cr("Scheduling region "PTR_FORMAT" "
5447 "["PTR_FORMAT", "PTR_FORMAT") for pop-object evacuation.",
5448 r, r->bottom(), r->end());
5449 }
5450 HeapRegionList* hrl = new HeapRegionList;
5451 hrl->hr = r;
5452 hrl->next = _popular_regions_to_be_evacuated;
5453 _popular_regions_to_be_evacuated = hrl;
5454 }
5455 }
5457 HeapRegion* G1CollectedHeap::popular_region_to_evac() {
5458 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
5459 HeapRegion* res = NULL;
5460 while (_popular_regions_to_be_evacuated != NULL && res == NULL) {
5461 HeapRegionList* hrl = _popular_regions_to_be_evacuated;
5462 _popular_regions_to_be_evacuated = hrl->next;
5463 res = hrl->hr;
5464 // The G1RSPopLimit may have increased, so recheck here...
5465 if (res->rem_set()->occupied() < (size_t) G1RSPopLimit) {
5466 // Hah: don't need to schedule.
5467 if (G1TracePopularity) {
5468 gclog_or_tty->print_cr("Unscheduling region "PTR_FORMAT" "
5469 "["PTR_FORMAT", "PTR_FORMAT") "
5470 "for pop-object evacuation (size %d < limit %d)",
5471 res, res->bottom(), res->end(),
5472 res->rem_set()->occupied(), G1RSPopLimit);
5473 }
5474 res->set_popular_pending(false);
5475 res = NULL;
5476 }
5477 // We do not reset res->popular() here; if we did so, it would allow
5478 // the region to be "rescheduled" for popularity evacuation. Instead,
5479 // this is done in the collection pause, with the world stopped.
5480 // So the invariant is that the regions in the list have the popularity
5481 // boolean set, but having the boolean set does not imply membership
5482 // on the list (though there can at most one such pop-pending region
5483 // not on the list at any time).
5484 delete hrl;
5485 }
5486 return res;
5487 }
5489 void G1CollectedHeap::evac_popular_region(HeapRegion* hr) {
5490 while (true) {
5491 // Don't want to do a GC pause while cleanup is being completed!
5492 wait_for_cleanup_complete();
5494 // Read the GC count while holding the Heap_lock
5495 int gc_count_before = SharedHeap::heap()->total_collections();
5496 g1_policy()->record_stop_world_start();
5498 {
5499 MutexUnlocker mu(Heap_lock); // give up heap lock, execute gets it back
5500 VM_G1PopRegionCollectionPause op(gc_count_before, hr);
5501 VMThread::execute(&op);
5503 // If the prolog succeeded, we didn't do a GC for this.
5504 if (op.prologue_succeeded()) break;
5505 }
5506 // Otherwise we didn't. We should recheck the size, though, since
5507 // the limit may have increased...
5508 if (hr->rem_set()->occupied() < (size_t) G1RSPopLimit) {
5509 hr->set_popular_pending(false);
5510 break;
5511 }
5512 }
5513 }
5515 void G1CollectedHeap::atomic_inc_obj_rc(oop obj) {
5516 Atomic::inc(obj_rc_addr(obj));
5517 }
5519 class CountRCClosure: public OopsInHeapRegionClosure {
5520 G1CollectedHeap* _g1h;
5521 bool _parallel;
5522 public:
5523 CountRCClosure(G1CollectedHeap* g1h) :
5524 _g1h(g1h), _parallel(ParallelGCThreads > 0)
5525 {}
5526 void do_oop(narrowOop* p) {
5527 guarantee(false, "NYI");
5528 }
5529 void do_oop(oop* p) {
5530 oop obj = *p;
5531 assert(obj != NULL, "Precondition.");
5532 if (_parallel) {
5533 // We go sticky at the limit to avoid excess contention.
5534 // If we want to track the actual RC's further, we'll need to keep a
5535 // per-thread hash table or something for the popular objects.
5536 if (_g1h->obj_rc(obj) < G1ObjPopLimit) {
5537 _g1h->atomic_inc_obj_rc(obj);
5538 }
5539 } else {
5540 _g1h->inc_obj_rc(obj);
5541 }
5542 }
5543 };
5545 class EvacPopObjClosure: public ObjectClosure {
5546 G1CollectedHeap* _g1h;
5547 size_t _pop_objs;
5548 size_t _max_rc;
5549 public:
5550 EvacPopObjClosure(G1CollectedHeap* g1h) :
5551 _g1h(g1h), _pop_objs(0), _max_rc(0) {}
5553 void do_object(oop obj) {
5554 size_t rc = _g1h->obj_rc(obj);
5555 _max_rc = MAX2(rc, _max_rc);
5556 if (rc >= (size_t) G1ObjPopLimit) {
5557 _g1h->_pop_obj_rc_at_copy.add((double)rc);
5558 size_t word_sz = obj->size();
5559 HeapWord* new_pop_loc = _g1h->allocate_popular_object(word_sz);
5560 oop new_pop_obj = (oop)new_pop_loc;
5561 Copy::aligned_disjoint_words((HeapWord*)obj, new_pop_loc, word_sz);
5562 obj->forward_to(new_pop_obj);
5563 G1ScanAndBalanceClosure scan_and_balance(_g1h);
5564 new_pop_obj->oop_iterate_backwards(&scan_and_balance);
5565 // preserve "next" mark bit if marking is in progress.
5566 if (_g1h->mark_in_progress() && !_g1h->is_obj_ill(obj)) {
5567 _g1h->concurrent_mark()->markAndGrayObjectIfNecessary(new_pop_obj);
5568 }
5570 if (G1TracePopularity) {
5571 gclog_or_tty->print_cr("Found obj " PTR_FORMAT " of word size " SIZE_FORMAT
5572 " pop (%d), move to " PTR_FORMAT,
5573 (void*) obj, word_sz,
5574 _g1h->obj_rc(obj), (void*) new_pop_obj);
5575 }
5576 _pop_objs++;
5577 }
5578 }
5579 size_t pop_objs() { return _pop_objs; }
5580 size_t max_rc() { return _max_rc; }
5581 };
5583 class G1ParCountRCTask : public AbstractGangTask {
5584 G1CollectedHeap* _g1h;
5585 BitMap _bm;
5587 size_t getNCards() {
5588 return (_g1h->capacity() + G1BlockOffsetSharedArray::N_bytes - 1)
5589 / G1BlockOffsetSharedArray::N_bytes;
5590 }
5591 CountRCClosure _count_rc_closure;
5592 public:
5593 G1ParCountRCTask(G1CollectedHeap* g1h) :
5594 AbstractGangTask("G1 Par RC Count task"),
5595 _g1h(g1h), _bm(getNCards()), _count_rc_closure(g1h)
5596 {}
5598 void work(int i) {
5599 ResourceMark rm;
5600 HandleMark hm;
5601 _g1h->g1_rem_set()->oops_into_collection_set_do(&_count_rc_closure, i);
5602 }
5603 };
5605 void G1CollectedHeap::popularity_pause_preamble(HeapRegion* popular_region) {
5606 // We're evacuating a single region (for popularity).
5607 if (G1TracePopularity) {
5608 gclog_or_tty->print_cr("Doing pop region pause for ["PTR_FORMAT", "PTR_FORMAT")",
5609 popular_region->bottom(), popular_region->end());
5610 }
5611 g1_policy()->set_single_region_collection_set(popular_region);
5612 size_t max_rc;
5613 if (!compute_reference_counts_and_evac_popular(popular_region,
5614 &max_rc)) {
5615 // We didn't evacuate any popular objects.
5616 // We increase the RS popularity limit, to prevent this from
5617 // happening in the future.
5618 if (G1RSPopLimit < (1 << 30)) {
5619 G1RSPopLimit *= 2;
5620 }
5621 // For now, interesting enough for a message:
5622 #if 1
5623 gclog_or_tty->print_cr("In pop region pause for ["PTR_FORMAT", "PTR_FORMAT"), "
5624 "failed to find a pop object (max = %d).",
5625 popular_region->bottom(), popular_region->end(),
5626 max_rc);
5627 gclog_or_tty->print_cr("Increased G1RSPopLimit to %d.", G1RSPopLimit);
5628 #endif // 0
5629 // Also, we reset the collection set to NULL, to make the rest of
5630 // the collection do nothing.
5631 assert(popular_region->next_in_collection_set() == NULL,
5632 "should be single-region.");
5633 popular_region->set_in_collection_set(false);
5634 popular_region->set_popular_pending(false);
5635 g1_policy()->clear_collection_set();
5636 }
5637 }
5639 bool G1CollectedHeap::
5640 compute_reference_counts_and_evac_popular(HeapRegion* popular_region,
5641 size_t* max_rc) {
5642 HeapWord* rc_region_bot;
5643 HeapWord* rc_region_end;
5645 // Set up the reference count region.
5646 HeapRegion* rc_region = newAllocRegion(HeapRegion::GrainWords);
5647 if (rc_region != NULL) {
5648 rc_region_bot = rc_region->bottom();
5649 rc_region_end = rc_region->end();
5650 } else {
5651 rc_region_bot = NEW_C_HEAP_ARRAY(HeapWord, HeapRegion::GrainWords);
5652 if (rc_region_bot == NULL) {
5653 vm_exit_out_of_memory(HeapRegion::GrainWords,
5654 "No space for RC region.");
5655 }
5656 rc_region_end = rc_region_bot + HeapRegion::GrainWords;
5657 }
5659 if (G1TracePopularity)
5660 gclog_or_tty->print_cr("RC region is ["PTR_FORMAT", "PTR_FORMAT")",
5661 rc_region_bot, rc_region_end);
5662 if (rc_region_bot > popular_region->bottom()) {
5663 _rc_region_above = true;
5664 _rc_region_diff =
5665 pointer_delta(rc_region_bot, popular_region->bottom(), 1);
5666 } else {
5667 assert(rc_region_bot < popular_region->bottom(), "Can't be equal.");
5668 _rc_region_above = false;
5669 _rc_region_diff =
5670 pointer_delta(popular_region->bottom(), rc_region_bot, 1);
5671 }
5672 g1_policy()->record_pop_compute_rc_start();
5673 // Count external references.
5674 g1_rem_set()->prepare_for_oops_into_collection_set_do();
5675 if (ParallelGCThreads > 0) {
5677 set_par_threads(workers()->total_workers());
5678 G1ParCountRCTask par_count_rc_task(this);
5679 workers()->run_task(&par_count_rc_task);
5680 set_par_threads(0);
5682 } else {
5683 CountRCClosure count_rc_closure(this);
5684 g1_rem_set()->oops_into_collection_set_do(&count_rc_closure, 0);
5685 }
5686 g1_rem_set()->cleanup_after_oops_into_collection_set_do();
5687 g1_policy()->record_pop_compute_rc_end();
5689 // Now evacuate popular objects.
5690 g1_policy()->record_pop_evac_start();
5691 EvacPopObjClosure evac_pop_obj_cl(this);
5692 popular_region->object_iterate(&evac_pop_obj_cl);
5693 *max_rc = evac_pop_obj_cl.max_rc();
5695 // Make sure the last "top" value of the current popular region is copied
5696 // as the "next_top_at_mark_start", so that objects made popular during
5697 // markings aren't automatically considered live.
5698 HeapRegion* cur_pop_region = _hrs->at(_cur_pop_hr_index);
5699 cur_pop_region->note_end_of_copying();
5701 if (rc_region != NULL) {
5702 free_region(rc_region);
5703 } else {
5704 FREE_C_HEAP_ARRAY(HeapWord, rc_region_bot);
5705 }
5706 g1_policy()->record_pop_evac_end();
5708 return evac_pop_obj_cl.pop_objs() > 0;
5709 }
5711 class CountPopObjInfoClosure: public HeapRegionClosure {
5712 size_t _objs;
5713 size_t _bytes;
5715 class CountObjClosure: public ObjectClosure {
5716 int _n;
5717 public:
5718 CountObjClosure() : _n(0) {}
5719 void do_object(oop obj) { _n++; }
5720 size_t n() { return _n; }
5721 };
5723 public:
5724 CountPopObjInfoClosure() : _objs(0), _bytes(0) {}
5725 bool doHeapRegion(HeapRegion* r) {
5726 _bytes += r->used();
5727 CountObjClosure blk;
5728 r->object_iterate(&blk);
5729 _objs += blk.n();
5730 return false;
5731 }
5732 size_t objs() { return _objs; }
5733 size_t bytes() { return _bytes; }
5734 };
5737 void G1CollectedHeap::print_popularity_summary_info() const {
5738 CountPopObjInfoClosure blk;
5739 for (int i = 0; i <= _cur_pop_hr_index; i++) {
5740 blk.doHeapRegion(_hrs->at(i));
5741 }
5742 gclog_or_tty->print_cr("\nPopular objects: %d objs, %d bytes.",
5743 blk.objs(), blk.bytes());
5744 gclog_or_tty->print_cr(" RC at copy = [avg = %5.2f, max = %5.2f, sd = %5.2f].",
5745 _pop_obj_rc_at_copy.avg(),
5746 _pop_obj_rc_at_copy.maximum(),
5747 _pop_obj_rc_at_copy.sd());
5748 }
5750 void G1CollectedHeap::set_refine_cte_cl_concurrency(bool concurrent) {
5751 _refine_cte_cl->set_concurrent(concurrent);
5752 }
5754 #ifndef PRODUCT
5756 class PrintHeapRegionClosure: public HeapRegionClosure {
5757 public:
5758 bool doHeapRegion(HeapRegion *r) {
5759 gclog_or_tty->print("Region: "PTR_FORMAT":", r);
5760 if (r != NULL) {
5761 if (r->is_on_free_list())
5762 gclog_or_tty->print("Free ");
5763 if (r->is_young())
5764 gclog_or_tty->print("Young ");
5765 if (r->isHumongous())
5766 gclog_or_tty->print("Is Humongous ");
5767 r->print();
5768 }
5769 return false;
5770 }
5771 };
5773 class SortHeapRegionClosure : public HeapRegionClosure {
5774 size_t young_regions,free_regions, unclean_regions;
5775 size_t hum_regions, count;
5776 size_t unaccounted, cur_unclean, cur_alloc;
5777 size_t total_free;
5778 HeapRegion* cur;
5779 public:
5780 SortHeapRegionClosure(HeapRegion *_cur) : cur(_cur), young_regions(0),
5781 free_regions(0), unclean_regions(0),
5782 hum_regions(0),
5783 count(0), unaccounted(0),
5784 cur_alloc(0), total_free(0)
5785 {}
5786 bool doHeapRegion(HeapRegion *r) {
5787 count++;
5788 if (r->is_on_free_list()) free_regions++;
5789 else if (r->is_on_unclean_list()) unclean_regions++;
5790 else if (r->isHumongous()) hum_regions++;
5791 else if (r->is_young()) young_regions++;
5792 else if (r == cur) cur_alloc++;
5793 else unaccounted++;
5794 return false;
5795 }
5796 void print() {
5797 total_free = free_regions + unclean_regions;
5798 gclog_or_tty->print("%d regions\n", count);
5799 gclog_or_tty->print("%d free: free_list = %d unclean = %d\n",
5800 total_free, free_regions, unclean_regions);
5801 gclog_or_tty->print("%d humongous %d young\n",
5802 hum_regions, young_regions);
5803 gclog_or_tty->print("%d cur_alloc\n", cur_alloc);
5804 gclog_or_tty->print("UHOH unaccounted = %d\n", unaccounted);
5805 }
5806 };
5808 void G1CollectedHeap::print_region_counts() {
5809 SortHeapRegionClosure sc(_cur_alloc_region);
5810 PrintHeapRegionClosure cl;
5811 heap_region_iterate(&cl);
5812 heap_region_iterate(&sc);
5813 sc.print();
5814 print_region_accounting_info();
5815 };
5817 bool G1CollectedHeap::regions_accounted_for() {
5818 // TODO: regions accounting for young/survivor/tenured
5819 return true;
5820 }
5822 bool G1CollectedHeap::print_region_accounting_info() {
5823 gclog_or_tty->print_cr("P regions: %d.", G1NumPopularRegions);
5824 gclog_or_tty->print_cr("Free regions: %d (count: %d count list %d) (clean: %d unclean: %d).",
5825 free_regions(),
5826 count_free_regions(), count_free_regions_list(),
5827 _free_region_list_size, _unclean_region_list.sz());
5828 gclog_or_tty->print_cr("cur_alloc: %d.",
5829 (_cur_alloc_region == NULL ? 0 : 1));
5830 gclog_or_tty->print_cr("H regions: %d.", _num_humongous_regions);
5832 // TODO: check regions accounting for young/survivor/tenured
5833 return true;
5834 }
5836 bool G1CollectedHeap::is_in_closed_subset(const void* p) const {
5837 HeapRegion* hr = heap_region_containing(p);
5838 if (hr == NULL) {
5839 return is_in_permanent(p);
5840 } else {
5841 return hr->is_in(p);
5842 }
5843 }
5844 #endif // PRODUCT
5846 void G1CollectedHeap::g1_unimplemented() {
5847 // Unimplemented();
5848 }
5851 // Local Variables: ***
5852 // c-indentation-style: gnu ***
5853 // End: ***