src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp

Fri, 09 Sep 2011 05:20:58 -0400

author
tonyp
date
Fri, 09 Sep 2011 05:20:58 -0400
changeset 3121
3bddbf0f57d6
parent 3120
af2ab04e0038
child 3169
663cb89032b1
permissions
-rw-r--r--

7087717: G1: make the G1PrintRegionLivenessInfo parameter diagnostic
Reviewed-by: brutisso, ysr

ysr@777 1 /*
tonyp@2453 2 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
ysr@777 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
ysr@777 4 *
ysr@777 5 * This code is free software; you can redistribute it and/or modify it
ysr@777 6 * under the terms of the GNU General Public License version 2 only, as
ysr@777 7 * published by the Free Software Foundation.
ysr@777 8 *
ysr@777 9 * This code is distributed in the hope that it will be useful, but WITHOUT
ysr@777 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
ysr@777 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
ysr@777 12 * version 2 for more details (a copy is included in the LICENSE file that
ysr@777 13 * accompanied this code).
ysr@777 14 *
ysr@777 15 * You should have received a copy of the GNU General Public License version
ysr@777 16 * 2 along with this work; if not, write to the Free Software Foundation,
ysr@777 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
ysr@777 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
ysr@777 22 *
ysr@777 23 */
ysr@777 24
stefank@2314 25 #include "precompiled.hpp"
stefank@2314 26 #include "code/icBuffer.hpp"
stefank@2314 27 #include "gc_implementation/g1/bufferingOopClosure.hpp"
stefank@2314 28 #include "gc_implementation/g1/concurrentG1Refine.hpp"
stefank@2314 29 #include "gc_implementation/g1/concurrentG1RefineThread.hpp"
stefank@2314 30 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
tonyp@2715 31 #include "gc_implementation/g1/g1AllocRegion.inline.hpp"
stefank@2314 32 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
stefank@2314 33 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
tonyp@3114 34 #include "gc_implementation/g1/g1ErgoVerbose.hpp"
stefank@2314 35 #include "gc_implementation/g1/g1MarkSweep.hpp"
stefank@2314 36 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
stefank@2314 37 #include "gc_implementation/g1/g1RemSet.inline.hpp"
stefank@2314 38 #include "gc_implementation/g1/heapRegionRemSet.hpp"
stefank@2314 39 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
stefank@2314 40 #include "gc_implementation/g1/vm_operations_g1.hpp"
stefank@2314 41 #include "gc_implementation/shared/isGCActiveMark.hpp"
stefank@2314 42 #include "memory/gcLocker.inline.hpp"
stefank@2314 43 #include "memory/genOopClosures.inline.hpp"
stefank@2314 44 #include "memory/generationSpec.hpp"
stefank@2314 45 #include "oops/oop.inline.hpp"
stefank@2314 46 #include "oops/oop.pcgc.inline.hpp"
stefank@2314 47 #include "runtime/aprofiler.hpp"
stefank@2314 48 #include "runtime/vmThread.hpp"
ysr@777 49
tonyp@1377 50 size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
tonyp@1377 51
ysr@777 52 // turn it on so that the contents of the young list (scan-only /
ysr@777 53 // to-be-collected) are printed at "strategic" points before / during
ysr@777 54 // / after the collection --- this is useful for debugging
johnc@1829 55 #define YOUNG_LIST_VERBOSE 0
ysr@777 56 // CURRENT STATUS
ysr@777 57 // This file is under construction. Search for "FIXME".
ysr@777 58
ysr@777 59 // INVARIANTS/NOTES
ysr@777 60 //
ysr@777 61 // All allocation activity covered by the G1CollectedHeap interface is
tonyp@2315 62 // serialized by acquiring the HeapLock. This happens in mem_allocate
tonyp@2315 63 // and allocate_new_tlab, which are the "entry" points to the
tonyp@2315 64 // allocation code from the rest of the JVM. (Note that this does not
tonyp@2315 65 // apply to TLAB allocation, which is not part of this interface: it
tonyp@2315 66 // is done by clients of this interface.)
ysr@777 67
ysr@777 68 // Local to this file.
ysr@777 69
ysr@777 70 class RefineCardTableEntryClosure: public CardTableEntryClosure {
ysr@777 71 SuspendibleThreadSet* _sts;
ysr@777 72 G1RemSet* _g1rs;
ysr@777 73 ConcurrentG1Refine* _cg1r;
ysr@777 74 bool _concurrent;
ysr@777 75 public:
ysr@777 76 RefineCardTableEntryClosure(SuspendibleThreadSet* sts,
ysr@777 77 G1RemSet* g1rs,
ysr@777 78 ConcurrentG1Refine* cg1r) :
ysr@777 79 _sts(sts), _g1rs(g1rs), _cg1r(cg1r), _concurrent(true)
ysr@777 80 {}
ysr@777 81 bool do_card_ptr(jbyte* card_ptr, int worker_i) {
johnc@2060 82 bool oops_into_cset = _g1rs->concurrentRefineOneCard(card_ptr, worker_i, false);
johnc@2060 83 // This path is executed by the concurrent refine or mutator threads,
johnc@2060 84 // concurrently, and so we do not care if card_ptr contains references
johnc@2060 85 // that point into the collection set.
johnc@2060 86 assert(!oops_into_cset, "should be");
johnc@2060 87
ysr@777 88 if (_concurrent && _sts->should_yield()) {
ysr@777 89 // Caller will actually yield.
ysr@777 90 return false;
ysr@777 91 }
ysr@777 92 // Otherwise, we finished successfully; return true.
ysr@777 93 return true;
ysr@777 94 }
ysr@777 95 void set_concurrent(bool b) { _concurrent = b; }
ysr@777 96 };
ysr@777 97
ysr@777 98
ysr@777 99 class ClearLoggedCardTableEntryClosure: public CardTableEntryClosure {
ysr@777 100 int _calls;
ysr@777 101 G1CollectedHeap* _g1h;
ysr@777 102 CardTableModRefBS* _ctbs;
ysr@777 103 int _histo[256];
ysr@777 104 public:
ysr@777 105 ClearLoggedCardTableEntryClosure() :
ysr@777 106 _calls(0)
ysr@777 107 {
ysr@777 108 _g1h = G1CollectedHeap::heap();
ysr@777 109 _ctbs = (CardTableModRefBS*)_g1h->barrier_set();
ysr@777 110 for (int i = 0; i < 256; i++) _histo[i] = 0;
ysr@777 111 }
ysr@777 112 bool do_card_ptr(jbyte* card_ptr, int worker_i) {
ysr@777 113 if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) {
ysr@777 114 _calls++;
ysr@777 115 unsigned char* ujb = (unsigned char*)card_ptr;
ysr@777 116 int ind = (int)(*ujb);
ysr@777 117 _histo[ind]++;
ysr@777 118 *card_ptr = -1;
ysr@777 119 }
ysr@777 120 return true;
ysr@777 121 }
ysr@777 122 int calls() { return _calls; }
ysr@777 123 void print_histo() {
ysr@777 124 gclog_or_tty->print_cr("Card table value histogram:");
ysr@777 125 for (int i = 0; i < 256; i++) {
ysr@777 126 if (_histo[i] != 0) {
ysr@777 127 gclog_or_tty->print_cr(" %d: %d", i, _histo[i]);
ysr@777 128 }
ysr@777 129 }
ysr@777 130 }
ysr@777 131 };
ysr@777 132
ysr@777 133 class RedirtyLoggedCardTableEntryClosure: public CardTableEntryClosure {
ysr@777 134 int _calls;
ysr@777 135 G1CollectedHeap* _g1h;
ysr@777 136 CardTableModRefBS* _ctbs;
ysr@777 137 public:
ysr@777 138 RedirtyLoggedCardTableEntryClosure() :
ysr@777 139 _calls(0)
ysr@777 140 {
ysr@777 141 _g1h = G1CollectedHeap::heap();
ysr@777 142 _ctbs = (CardTableModRefBS*)_g1h->barrier_set();
ysr@777 143 }
ysr@777 144 bool do_card_ptr(jbyte* card_ptr, int worker_i) {
ysr@777 145 if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) {
ysr@777 146 _calls++;
ysr@777 147 *card_ptr = 0;
ysr@777 148 }
ysr@777 149 return true;
ysr@777 150 }
ysr@777 151 int calls() { return _calls; }
ysr@777 152 };
ysr@777 153
iveresov@1051 154 class RedirtyLoggedCardTableEntryFastClosure : public CardTableEntryClosure {
iveresov@1051 155 public:
iveresov@1051 156 bool do_card_ptr(jbyte* card_ptr, int worker_i) {
iveresov@1051 157 *card_ptr = CardTableModRefBS::dirty_card_val();
iveresov@1051 158 return true;
iveresov@1051 159 }
iveresov@1051 160 };
iveresov@1051 161
ysr@777 162 YoungList::YoungList(G1CollectedHeap* g1h)
ysr@777 163 : _g1h(g1h), _head(NULL),
johnc@1829 164 _length(0),
ysr@777 165 _last_sampled_rs_lengths(0),
apetrusenko@980 166 _survivor_head(NULL), _survivor_tail(NULL), _survivor_length(0)
ysr@777 167 {
ysr@777 168 guarantee( check_list_empty(false), "just making sure..." );
ysr@777 169 }
ysr@777 170
ysr@777 171 void YoungList::push_region(HeapRegion *hr) {
ysr@777 172 assert(!hr->is_young(), "should not already be young");
ysr@777 173 assert(hr->get_next_young_region() == NULL, "cause it should!");
ysr@777 174
ysr@777 175 hr->set_next_young_region(_head);
ysr@777 176 _head = hr;
ysr@777 177
ysr@777 178 hr->set_young();
ysr@777 179 double yg_surv_rate = _g1h->g1_policy()->predict_yg_surv_rate((int)_length);
ysr@777 180 ++_length;
ysr@777 181 }
ysr@777 182
ysr@777 183 void YoungList::add_survivor_region(HeapRegion* hr) {
apetrusenko@980 184 assert(hr->is_survivor(), "should be flagged as survivor region");
ysr@777 185 assert(hr->get_next_young_region() == NULL, "cause it should!");
ysr@777 186
ysr@777 187 hr->set_next_young_region(_survivor_head);
ysr@777 188 if (_survivor_head == NULL) {
apetrusenko@980 189 _survivor_tail = hr;
ysr@777 190 }
ysr@777 191 _survivor_head = hr;
ysr@777 192
ysr@777 193 ++_survivor_length;
ysr@777 194 }
ysr@777 195
ysr@777 196 void YoungList::empty_list(HeapRegion* list) {
ysr@777 197 while (list != NULL) {
ysr@777 198 HeapRegion* next = list->get_next_young_region();
ysr@777 199 list->set_next_young_region(NULL);
ysr@777 200 list->uninstall_surv_rate_group();
ysr@777 201 list->set_not_young();
ysr@777 202 list = next;
ysr@777 203 }
ysr@777 204 }
ysr@777 205
ysr@777 206 void YoungList::empty_list() {
ysr@777 207 assert(check_list_well_formed(), "young list should be well formed");
ysr@777 208
ysr@777 209 empty_list(_head);
ysr@777 210 _head = NULL;
ysr@777 211 _length = 0;
ysr@777 212
ysr@777 213 empty_list(_survivor_head);
ysr@777 214 _survivor_head = NULL;
apetrusenko@980 215 _survivor_tail = NULL;
ysr@777 216 _survivor_length = 0;
ysr@777 217
ysr@777 218 _last_sampled_rs_lengths = 0;
ysr@777 219
ysr@777 220 assert(check_list_empty(false), "just making sure...");
ysr@777 221 }
ysr@777 222
ysr@777 223 bool YoungList::check_list_well_formed() {
ysr@777 224 bool ret = true;
ysr@777 225
ysr@777 226 size_t length = 0;
ysr@777 227 HeapRegion* curr = _head;
ysr@777 228 HeapRegion* last = NULL;
ysr@777 229 while (curr != NULL) {
johnc@1829 230 if (!curr->is_young()) {
ysr@777 231 gclog_or_tty->print_cr("### YOUNG REGION "PTR_FORMAT"-"PTR_FORMAT" "
johnc@1829 232 "incorrectly tagged (y: %d, surv: %d)",
ysr@777 233 curr->bottom(), curr->end(),
johnc@1829 234 curr->is_young(), curr->is_survivor());
ysr@777 235 ret = false;
ysr@777 236 }
ysr@777 237 ++length;
ysr@777 238 last = curr;
ysr@777 239 curr = curr->get_next_young_region();
ysr@777 240 }
ysr@777 241 ret = ret && (length == _length);
ysr@777 242
ysr@777 243 if (!ret) {
ysr@777 244 gclog_or_tty->print_cr("### YOUNG LIST seems not well formed!");
ysr@777 245 gclog_or_tty->print_cr("### list has %d entries, _length is %d",
ysr@777 246 length, _length);
ysr@777 247 }
ysr@777 248
johnc@1829 249 return ret;
ysr@777 250 }
ysr@777 251
johnc@1829 252 bool YoungList::check_list_empty(bool check_sample) {
ysr@777 253 bool ret = true;
ysr@777 254
ysr@777 255 if (_length != 0) {
ysr@777 256 gclog_or_tty->print_cr("### YOUNG LIST should have 0 length, not %d",
ysr@777 257 _length);
ysr@777 258 ret = false;
ysr@777 259 }
ysr@777 260 if (check_sample && _last_sampled_rs_lengths != 0) {
ysr@777 261 gclog_or_tty->print_cr("### YOUNG LIST has non-zero last sampled RS lengths");
ysr@777 262 ret = false;
ysr@777 263 }
ysr@777 264 if (_head != NULL) {
ysr@777 265 gclog_or_tty->print_cr("### YOUNG LIST does not have a NULL head");
ysr@777 266 ret = false;
ysr@777 267 }
ysr@777 268 if (!ret) {
ysr@777 269 gclog_or_tty->print_cr("### YOUNG LIST does not seem empty");
ysr@777 270 }
ysr@777 271
johnc@1829 272 return ret;
ysr@777 273 }
ysr@777 274
ysr@777 275 void
ysr@777 276 YoungList::rs_length_sampling_init() {
ysr@777 277 _sampled_rs_lengths = 0;
ysr@777 278 _curr = _head;
ysr@777 279 }
ysr@777 280
ysr@777 281 bool
ysr@777 282 YoungList::rs_length_sampling_more() {
ysr@777 283 return _curr != NULL;
ysr@777 284 }
ysr@777 285
ysr@777 286 void
ysr@777 287 YoungList::rs_length_sampling_next() {
ysr@777 288 assert( _curr != NULL, "invariant" );
johnc@1829 289 size_t rs_length = _curr->rem_set()->occupied();
johnc@1829 290
johnc@1829 291 _sampled_rs_lengths += rs_length;
johnc@1829 292
johnc@1829 293 // The current region may not yet have been added to the
johnc@1829 294 // incremental collection set (it gets added when it is
johnc@1829 295 // retired as the current allocation region).
johnc@1829 296 if (_curr->in_collection_set()) {
johnc@1829 297 // Update the collection set policy information for this region
johnc@1829 298 _g1h->g1_policy()->update_incremental_cset_info(_curr, rs_length);
johnc@1829 299 }
johnc@1829 300
ysr@777 301 _curr = _curr->get_next_young_region();
ysr@777 302 if (_curr == NULL) {
ysr@777 303 _last_sampled_rs_lengths = _sampled_rs_lengths;
ysr@777 304 // gclog_or_tty->print_cr("last sampled RS lengths = %d", _last_sampled_rs_lengths);
ysr@777 305 }
ysr@777 306 }
ysr@777 307
ysr@777 308 void
ysr@777 309 YoungList::reset_auxilary_lists() {
ysr@777 310 guarantee( is_empty(), "young list should be empty" );
ysr@777 311 assert(check_list_well_formed(), "young list should be well formed");
ysr@777 312
ysr@777 313 // Add survivor regions to SurvRateGroup.
ysr@777 314 _g1h->g1_policy()->note_start_adding_survivor_regions();
apetrusenko@980 315 _g1h->g1_policy()->finished_recalculating_age_indexes(true /* is_survivors */);
johnc@1829 316
ysr@777 317 for (HeapRegion* curr = _survivor_head;
ysr@777 318 curr != NULL;
ysr@777 319 curr = curr->get_next_young_region()) {
ysr@777 320 _g1h->g1_policy()->set_region_survivors(curr);
johnc@1829 321
johnc@1829 322 // The region is a non-empty survivor so let's add it to
johnc@1829 323 // the incremental collection set for the next evacuation
johnc@1829 324 // pause.
johnc@1829 325 _g1h->g1_policy()->add_region_to_incremental_cset_rhs(curr);
ysr@777 326 }
ysr@777 327 _g1h->g1_policy()->note_stop_adding_survivor_regions();
ysr@777 328
johnc@1829 329 _head = _survivor_head;
johnc@1829 330 _length = _survivor_length;
ysr@777 331 if (_survivor_head != NULL) {
johnc@1829 332 assert(_survivor_tail != NULL, "cause it shouldn't be");
johnc@1829 333 assert(_survivor_length > 0, "invariant");
johnc@1829 334 _survivor_tail->set_next_young_region(NULL);
johnc@1829 335 }
johnc@1829 336
johnc@1829 337 // Don't clear the survivor list handles until the start of
johnc@1829 338 // the next evacuation pause - we need it in order to re-tag
johnc@1829 339 // the survivor regions from this evacuation pause as 'young'
johnc@1829 340 // at the start of the next.
johnc@1829 341
apetrusenko@980 342 _g1h->g1_policy()->finished_recalculating_age_indexes(false /* is_survivors */);
ysr@777 343
ysr@777 344 assert(check_list_well_formed(), "young list should be well formed");
ysr@777 345 }
ysr@777 346
ysr@777 347 void YoungList::print() {
johnc@1829 348 HeapRegion* lists[] = {_head, _survivor_head};
johnc@1829 349 const char* names[] = {"YOUNG", "SURVIVOR"};
ysr@777 350
ysr@777 351 for (unsigned int list = 0; list < ARRAY_SIZE(lists); ++list) {
ysr@777 352 gclog_or_tty->print_cr("%s LIST CONTENTS", names[list]);
ysr@777 353 HeapRegion *curr = lists[list];
ysr@777 354 if (curr == NULL)
ysr@777 355 gclog_or_tty->print_cr(" empty");
ysr@777 356 while (curr != NULL) {
ysr@777 357 gclog_or_tty->print_cr(" [%08x-%08x], t: %08x, P: %08x, N: %08x, C: %08x, "
johnc@1829 358 "age: %4d, y: %d, surv: %d",
ysr@777 359 curr->bottom(), curr->end(),
ysr@777 360 curr->top(),
ysr@777 361 curr->prev_top_at_mark_start(),
ysr@777 362 curr->next_top_at_mark_start(),
ysr@777 363 curr->top_at_conc_mark_count(),
ysr@777 364 curr->age_in_surv_rate_group_cond(),
ysr@777 365 curr->is_young(),
ysr@777 366 curr->is_survivor());
ysr@777 367 curr = curr->get_next_young_region();
ysr@777 368 }
ysr@777 369 }
ysr@777 370
ysr@777 371 gclog_or_tty->print_cr("");
ysr@777 372 }
ysr@777 373
apetrusenko@1231 374 void G1CollectedHeap::push_dirty_cards_region(HeapRegion* hr)
apetrusenko@1231 375 {
apetrusenko@1231 376 // Claim the right to put the region on the dirty cards region list
apetrusenko@1231 377 // by installing a self pointer.
apetrusenko@1231 378 HeapRegion* next = hr->get_next_dirty_cards_region();
apetrusenko@1231 379 if (next == NULL) {
apetrusenko@1231 380 HeapRegion* res = (HeapRegion*)
apetrusenko@1231 381 Atomic::cmpxchg_ptr(hr, hr->next_dirty_cards_region_addr(),
apetrusenko@1231 382 NULL);
apetrusenko@1231 383 if (res == NULL) {
apetrusenko@1231 384 HeapRegion* head;
apetrusenko@1231 385 do {
apetrusenko@1231 386 // Put the region to the dirty cards region list.
apetrusenko@1231 387 head = _dirty_cards_region_list;
apetrusenko@1231 388 next = (HeapRegion*)
apetrusenko@1231 389 Atomic::cmpxchg_ptr(hr, &_dirty_cards_region_list, head);
apetrusenko@1231 390 if (next == head) {
apetrusenko@1231 391 assert(hr->get_next_dirty_cards_region() == hr,
apetrusenko@1231 392 "hr->get_next_dirty_cards_region() != hr");
apetrusenko@1231 393 if (next == NULL) {
apetrusenko@1231 394 // The last region in the list points to itself.
apetrusenko@1231 395 hr->set_next_dirty_cards_region(hr);
apetrusenko@1231 396 } else {
apetrusenko@1231 397 hr->set_next_dirty_cards_region(next);
apetrusenko@1231 398 }
apetrusenko@1231 399 }
apetrusenko@1231 400 } while (next != head);
apetrusenko@1231 401 }
apetrusenko@1231 402 }
apetrusenko@1231 403 }
apetrusenko@1231 404
apetrusenko@1231 405 HeapRegion* G1CollectedHeap::pop_dirty_cards_region()
apetrusenko@1231 406 {
apetrusenko@1231 407 HeapRegion* head;
apetrusenko@1231 408 HeapRegion* hr;
apetrusenko@1231 409 do {
apetrusenko@1231 410 head = _dirty_cards_region_list;
apetrusenko@1231 411 if (head == NULL) {
apetrusenko@1231 412 return NULL;
apetrusenko@1231 413 }
apetrusenko@1231 414 HeapRegion* new_head = head->get_next_dirty_cards_region();
apetrusenko@1231 415 if (head == new_head) {
apetrusenko@1231 416 // The last region.
apetrusenko@1231 417 new_head = NULL;
apetrusenko@1231 418 }
apetrusenko@1231 419 hr = (HeapRegion*)Atomic::cmpxchg_ptr(new_head, &_dirty_cards_region_list,
apetrusenko@1231 420 head);
apetrusenko@1231 421 } while (hr != head);
apetrusenko@1231 422 assert(hr != NULL, "invariant");
apetrusenko@1231 423 hr->set_next_dirty_cards_region(NULL);
apetrusenko@1231 424 return hr;
apetrusenko@1231 425 }
apetrusenko@1231 426
ysr@777 427 void G1CollectedHeap::stop_conc_gc_threads() {
iveresov@1229 428 _cg1r->stop();
ysr@777 429 _cmThread->stop();
ysr@777 430 }
ysr@777 431
jmasa@2909 432 #ifdef ASSERT
jmasa@2909 433 // A region is added to the collection set as it is retired
jmasa@2909 434 // so an address p can point to a region which will be in the
jmasa@2909 435 // collection set but has not yet been retired. This method
jmasa@2909 436 // therefore is only accurate during a GC pause after all
jmasa@2909 437 // regions have been retired. It is used for debugging
jmasa@2909 438 // to check if an nmethod has references to objects that can
jmasa@2909 439 // be move during a partial collection. Though it can be
jmasa@2909 440 // inaccurate, it is sufficient for G1 because the conservative
jmasa@2909 441 // implementation of is_scavengable() for G1 will indicate that
jmasa@2909 442 // all nmethods must be scanned during a partial collection.
jmasa@2909 443 bool G1CollectedHeap::is_in_partial_collection(const void* p) {
jmasa@2909 444 HeapRegion* hr = heap_region_containing(p);
jmasa@2909 445 return hr != NULL && hr->in_collection_set();
jmasa@2909 446 }
jmasa@2909 447 #endif
jmasa@2909 448
jmasa@2909 449 // Returns true if the reference points to an object that
jmasa@2909 450 // can move in an incremental collecction.
jmasa@2909 451 bool G1CollectedHeap::is_scavengable(const void* p) {
jmasa@2909 452 G1CollectedHeap* g1h = G1CollectedHeap::heap();
jmasa@2909 453 G1CollectorPolicy* g1p = g1h->g1_policy();
jmasa@2909 454 HeapRegion* hr = heap_region_containing(p);
jmasa@2909 455 if (hr == NULL) {
jmasa@2909 456 // perm gen (or null)
jmasa@2909 457 return false;
jmasa@2909 458 } else {
jmasa@2909 459 return !hr->isHumongous();
jmasa@2909 460 }
jmasa@2909 461 }
jmasa@2909 462
ysr@777 463 void G1CollectedHeap::check_ct_logs_at_safepoint() {
ysr@777 464 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
ysr@777 465 CardTableModRefBS* ct_bs = (CardTableModRefBS*)barrier_set();
ysr@777 466
ysr@777 467 // Count the dirty cards at the start.
ysr@777 468 CountNonCleanMemRegionClosure count1(this);
ysr@777 469 ct_bs->mod_card_iterate(&count1);
ysr@777 470 int orig_count = count1.n();
ysr@777 471
ysr@777 472 // First clear the logged cards.
ysr@777 473 ClearLoggedCardTableEntryClosure clear;
ysr@777 474 dcqs.set_closure(&clear);
ysr@777 475 dcqs.apply_closure_to_all_completed_buffers();
ysr@777 476 dcqs.iterate_closure_all_threads(false);
ysr@777 477 clear.print_histo();
ysr@777 478
ysr@777 479 // Now ensure that there's no dirty cards.
ysr@777 480 CountNonCleanMemRegionClosure count2(this);
ysr@777 481 ct_bs->mod_card_iterate(&count2);
ysr@777 482 if (count2.n() != 0) {
ysr@777 483 gclog_or_tty->print_cr("Card table has %d entries; %d originally",
ysr@777 484 count2.n(), orig_count);
ysr@777 485 }
ysr@777 486 guarantee(count2.n() == 0, "Card table should be clean.");
ysr@777 487
ysr@777 488 RedirtyLoggedCardTableEntryClosure redirty;
ysr@777 489 JavaThread::dirty_card_queue_set().set_closure(&redirty);
ysr@777 490 dcqs.apply_closure_to_all_completed_buffers();
ysr@777 491 dcqs.iterate_closure_all_threads(false);
ysr@777 492 gclog_or_tty->print_cr("Log entries = %d, dirty cards = %d.",
ysr@777 493 clear.calls(), orig_count);
ysr@777 494 guarantee(redirty.calls() == clear.calls(),
ysr@777 495 "Or else mechanism is broken.");
ysr@777 496
ysr@777 497 CountNonCleanMemRegionClosure count3(this);
ysr@777 498 ct_bs->mod_card_iterate(&count3);
ysr@777 499 if (count3.n() != orig_count) {
ysr@777 500 gclog_or_tty->print_cr("Should have restored them all: orig = %d, final = %d.",
ysr@777 501 orig_count, count3.n());
ysr@777 502 guarantee(count3.n() >= orig_count, "Should have restored them all.");
ysr@777 503 }
ysr@777 504
ysr@777 505 JavaThread::dirty_card_queue_set().set_closure(_refine_cte_cl);
ysr@777 506 }
ysr@777 507
ysr@777 508 // Private class members.
ysr@777 509
ysr@777 510 G1CollectedHeap* G1CollectedHeap::_g1h;
ysr@777 511
ysr@777 512 // Private methods.
ysr@777 513
tonyp@2472 514 HeapRegion*
tonyp@2643 515 G1CollectedHeap::new_region_try_secondary_free_list() {
tonyp@2472 516 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
tonyp@2472 517 while (!_secondary_free_list.is_empty() || free_regions_coming()) {
tonyp@2472 518 if (!_secondary_free_list.is_empty()) {
tonyp@2472 519 if (G1ConcRegionFreeingVerbose) {
tonyp@2472 520 gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
tonyp@2472 521 "secondary_free_list has "SIZE_FORMAT" entries",
tonyp@2472 522 _secondary_free_list.length());
tonyp@2472 523 }
tonyp@2472 524 // It looks as if there are free regions available on the
tonyp@2472 525 // secondary_free_list. Let's move them to the free_list and try
tonyp@2472 526 // again to allocate from it.
tonyp@2472 527 append_secondary_free_list();
tonyp@2472 528
tonyp@2472 529 assert(!_free_list.is_empty(), "if the secondary_free_list was not "
tonyp@2472 530 "empty we should have moved at least one entry to the free_list");
tonyp@2472 531 HeapRegion* res = _free_list.remove_head();
tonyp@2472 532 if (G1ConcRegionFreeingVerbose) {
tonyp@2472 533 gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
tonyp@2472 534 "allocated "HR_FORMAT" from secondary_free_list",
tonyp@2472 535 HR_FORMAT_PARAMS(res));
tonyp@2472 536 }
tonyp@2472 537 return res;
tonyp@2472 538 }
tonyp@2472 539
tonyp@2472 540 // Wait here until we get notifed either when (a) there are no
tonyp@2472 541 // more free regions coming or (b) some regions have been moved on
tonyp@2472 542 // the secondary_free_list.
tonyp@2472 543 SecondaryFreeList_lock->wait(Mutex::_no_safepoint_check_flag);
tonyp@2472 544 }
tonyp@2472 545
tonyp@2472 546 if (G1ConcRegionFreeingVerbose) {
tonyp@2472 547 gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
tonyp@2472 548 "could not allocate from secondary_free_list");
tonyp@2472 549 }
tonyp@2472 550 return NULL;
tonyp@2472 551 }
tonyp@2472 552
tonyp@2715 553 HeapRegion* G1CollectedHeap::new_region(size_t word_size, bool do_expand) {
tonyp@2472 554 assert(!isHumongous(word_size) ||
tonyp@2472 555 word_size <= (size_t) HeapRegion::GrainWords,
tonyp@2472 556 "the only time we use this to allocate a humongous region is "
tonyp@2472 557 "when we are allocating a single humongous region");
tonyp@2472 558
tonyp@2472 559 HeapRegion* res;
tonyp@2472 560 if (G1StressConcRegionFreeing) {
tonyp@2472 561 if (!_secondary_free_list.is_empty()) {
tonyp@2472 562 if (G1ConcRegionFreeingVerbose) {
tonyp@2472 563 gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
tonyp@2472 564 "forced to look at the secondary_free_list");
tonyp@2472 565 }
tonyp@2643 566 res = new_region_try_secondary_free_list();
tonyp@2472 567 if (res != NULL) {
tonyp@2472 568 return res;
tonyp@2472 569 }
tonyp@2472 570 }
tonyp@2472 571 }
tonyp@2472 572 res = _free_list.remove_head_or_null();
tonyp@2472 573 if (res == NULL) {
tonyp@2472 574 if (G1ConcRegionFreeingVerbose) {
tonyp@2472 575 gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
tonyp@2472 576 "res == NULL, trying the secondary_free_list");
tonyp@2472 577 }
tonyp@2643 578 res = new_region_try_secondary_free_list();
tonyp@2472 579 }
ysr@777 580 if (res == NULL && do_expand) {
tonyp@3114 581 ergo_verbose1(ErgoHeapSizing,
tonyp@3114 582 "attempt heap expansion",
tonyp@3114 583 ergo_format_reason("region allocation request failed")
tonyp@3114 584 ergo_format_byte("allocation request"),
tonyp@3114 585 word_size * HeapWordSize);
johnc@2504 586 if (expand(word_size * HeapWordSize)) {
tonyp@2963 587 // Even though the heap was expanded, it might not have reached
tonyp@2963 588 // the desired size. So, we cannot assume that the allocation
tonyp@2963 589 // will succeed.
tonyp@2963 590 res = _free_list.remove_head_or_null();
johnc@2504 591 }
ysr@777 592 }
ysr@777 593 return res;
ysr@777 594 }
ysr@777 595
tonyp@2963 596 size_t G1CollectedHeap::humongous_obj_allocate_find_first(size_t num_regions,
tonyp@2963 597 size_t word_size) {
tonyp@2643 598 assert(isHumongous(word_size), "word_size should be humongous");
tonyp@2643 599 assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition");
tonyp@2643 600
tonyp@2963 601 size_t first = G1_NULL_HRS_INDEX;
tonyp@2472 602 if (num_regions == 1) {
tonyp@2472 603 // Only one region to allocate, no need to go through the slower
tonyp@2472 604 // path. The caller will attempt the expasion if this fails, so
tonyp@2472 605 // let's not try to expand here too.
tonyp@2715 606 HeapRegion* hr = new_region(word_size, false /* do_expand */);
tonyp@2472 607 if (hr != NULL) {
tonyp@2472 608 first = hr->hrs_index();
tonyp@2472 609 } else {
tonyp@2963 610 first = G1_NULL_HRS_INDEX;
tonyp@2472 611 }
tonyp@2472 612 } else {
tonyp@2472 613 // We can't allocate humongous regions while cleanupComplete() is
tonyp@2472 614 // running, since some of the regions we find to be empty might not
tonyp@2472 615 // yet be added to the free list and it is not straightforward to
tonyp@2472 616 // know which list they are on so that we can remove them. Note
tonyp@2472 617 // that we only need to do this if we need to allocate more than
tonyp@2472 618 // one region to satisfy the current humongous allocation
tonyp@2472 619 // request. If we are only allocating one region we use the common
tonyp@2472 620 // region allocation code (see above).
tonyp@2472 621 wait_while_free_regions_coming();
tonyp@2643 622 append_secondary_free_list_if_not_empty_with_lock();
tonyp@2472 623
tonyp@2472 624 if (free_regions() >= num_regions) {
tonyp@2963 625 first = _hrs.find_contiguous(num_regions);
tonyp@2963 626 if (first != G1_NULL_HRS_INDEX) {
tonyp@2963 627 for (size_t i = first; i < first + num_regions; ++i) {
tonyp@2963 628 HeapRegion* hr = region_at(i);
tonyp@2472 629 assert(hr->is_empty(), "sanity");
tonyp@2643 630 assert(is_on_master_free_list(hr), "sanity");
tonyp@2472 631 hr->set_pending_removal(true);
tonyp@2472 632 }
tonyp@2472 633 _free_list.remove_all_pending(num_regions);
tonyp@2472 634 }
tonyp@2472 635 }
tonyp@2472 636 }
tonyp@2472 637 return first;
tonyp@2472 638 }
tonyp@2472 639
tonyp@2643 640 HeapWord*
tonyp@2963 641 G1CollectedHeap::humongous_obj_allocate_initialize_regions(size_t first,
tonyp@2643 642 size_t num_regions,
tonyp@2643 643 size_t word_size) {
tonyp@2963 644 assert(first != G1_NULL_HRS_INDEX, "pre-condition");
tonyp@2643 645 assert(isHumongous(word_size), "word_size should be humongous");
tonyp@2643 646 assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition");
tonyp@2643 647
tonyp@2643 648 // Index of last region in the series + 1.
tonyp@2963 649 size_t last = first + num_regions;
tonyp@2643 650
tonyp@2643 651 // We need to initialize the region(s) we just discovered. This is
tonyp@2643 652 // a bit tricky given that it can happen concurrently with
tonyp@2643 653 // refinement threads refining cards on these regions and
tonyp@2643 654 // potentially wanting to refine the BOT as they are scanning
tonyp@2643 655 // those cards (this can happen shortly after a cleanup; see CR
tonyp@2643 656 // 6991377). So we have to set up the region(s) carefully and in
tonyp@2643 657 // a specific order.
tonyp@2643 658
tonyp@2643 659 // The word size sum of all the regions we will allocate.
tonyp@2643 660 size_t word_size_sum = num_regions * HeapRegion::GrainWords;
tonyp@2643 661 assert(word_size <= word_size_sum, "sanity");
tonyp@2643 662
tonyp@2643 663 // This will be the "starts humongous" region.
tonyp@2963 664 HeapRegion* first_hr = region_at(first);
tonyp@2643 665 // The header of the new object will be placed at the bottom of
tonyp@2643 666 // the first region.
tonyp@2643 667 HeapWord* new_obj = first_hr->bottom();
tonyp@2643 668 // This will be the new end of the first region in the series that
tonyp@2643 669 // should also match the end of the last region in the seriers.
tonyp@2643 670 HeapWord* new_end = new_obj + word_size_sum;
tonyp@2643 671 // This will be the new top of the first region that will reflect
tonyp@2643 672 // this allocation.
tonyp@2643 673 HeapWord* new_top = new_obj + word_size;
tonyp@2643 674
tonyp@2643 675 // First, we need to zero the header of the space that we will be
tonyp@2643 676 // allocating. When we update top further down, some refinement
tonyp@2643 677 // threads might try to scan the region. By zeroing the header we
tonyp@2643 678 // ensure that any thread that will try to scan the region will
tonyp@2643 679 // come across the zero klass word and bail out.
tonyp@2643 680 //
tonyp@2643 681 // NOTE: It would not have been correct to have used
tonyp@2643 682 // CollectedHeap::fill_with_object() and make the space look like
tonyp@2643 683 // an int array. The thread that is doing the allocation will
tonyp@2643 684 // later update the object header to a potentially different array
tonyp@2643 685 // type and, for a very short period of time, the klass and length
tonyp@2643 686 // fields will be inconsistent. This could cause a refinement
tonyp@2643 687 // thread to calculate the object size incorrectly.
tonyp@2643 688 Copy::fill_to_words(new_obj, oopDesc::header_size(), 0);
tonyp@2643 689
tonyp@2643 690 // We will set up the first region as "starts humongous". This
tonyp@2643 691 // will also update the BOT covering all the regions to reflect
tonyp@2643 692 // that there is a single object that starts at the bottom of the
tonyp@2643 693 // first region.
tonyp@2643 694 first_hr->set_startsHumongous(new_top, new_end);
tonyp@2643 695
tonyp@2643 696 // Then, if there are any, we will set up the "continues
tonyp@2643 697 // humongous" regions.
tonyp@2643 698 HeapRegion* hr = NULL;
tonyp@2963 699 for (size_t i = first + 1; i < last; ++i) {
tonyp@2963 700 hr = region_at(i);
tonyp@2643 701 hr->set_continuesHumongous(first_hr);
tonyp@2643 702 }
tonyp@2643 703 // If we have "continues humongous" regions (hr != NULL), then the
tonyp@2643 704 // end of the last one should match new_end.
tonyp@2643 705 assert(hr == NULL || hr->end() == new_end, "sanity");
tonyp@2643 706
tonyp@2643 707 // Up to this point no concurrent thread would have been able to
tonyp@2643 708 // do any scanning on any region in this series. All the top
tonyp@2643 709 // fields still point to bottom, so the intersection between
tonyp@2643 710 // [bottom,top] and [card_start,card_end] will be empty. Before we
tonyp@2643 711 // update the top fields, we'll do a storestore to make sure that
tonyp@2643 712 // no thread sees the update to top before the zeroing of the
tonyp@2643 713 // object header and the BOT initialization.
tonyp@2643 714 OrderAccess::storestore();
tonyp@2643 715
tonyp@2643 716 // Now that the BOT and the object header have been initialized,
tonyp@2643 717 // we can update top of the "starts humongous" region.
tonyp@2643 718 assert(first_hr->bottom() < new_top && new_top <= first_hr->end(),
tonyp@2643 719 "new_top should be in this region");
tonyp@2643 720 first_hr->set_top(new_top);
tonyp@2975 721 if (_hr_printer.is_active()) {
tonyp@2975 722 HeapWord* bottom = first_hr->bottom();
tonyp@2975 723 HeapWord* end = first_hr->orig_end();
tonyp@2975 724 if ((first + 1) == last) {
tonyp@2975 725 // the series has a single humongous region
tonyp@2975 726 _hr_printer.alloc(G1HRPrinter::SingleHumongous, first_hr, new_top);
tonyp@2975 727 } else {
tonyp@2975 728 // the series has more than one humongous regions
tonyp@2975 729 _hr_printer.alloc(G1HRPrinter::StartsHumongous, first_hr, end);
tonyp@2975 730 }
tonyp@2975 731 }
tonyp@2643 732
tonyp@2643 733 // Now, we will update the top fields of the "continues humongous"
tonyp@2643 734 // regions. The reason we need to do this is that, otherwise,
tonyp@2643 735 // these regions would look empty and this will confuse parts of
tonyp@2643 736 // G1. For example, the code that looks for a consecutive number
tonyp@2643 737 // of empty regions will consider them empty and try to
tonyp@2643 738 // re-allocate them. We can extend is_empty() to also include
tonyp@2643 739 // !continuesHumongous(), but it is easier to just update the top
tonyp@2643 740 // fields here. The way we set top for all regions (i.e., top ==
tonyp@2643 741 // end for all regions but the last one, top == new_top for the
tonyp@2643 742 // last one) is actually used when we will free up the humongous
tonyp@2643 743 // region in free_humongous_region().
tonyp@2643 744 hr = NULL;
tonyp@2963 745 for (size_t i = first + 1; i < last; ++i) {
tonyp@2963 746 hr = region_at(i);
tonyp@2643 747 if ((i + 1) == last) {
tonyp@2643 748 // last continues humongous region
tonyp@2643 749 assert(hr->bottom() < new_top && new_top <= hr->end(),
tonyp@2643 750 "new_top should fall on this region");
tonyp@2643 751 hr->set_top(new_top);
tonyp@2975 752 _hr_printer.alloc(G1HRPrinter::ContinuesHumongous, hr, new_top);
tonyp@2643 753 } else {
tonyp@2643 754 // not last one
tonyp@2643 755 assert(new_top > hr->end(), "new_top should be above this region");
tonyp@2643 756 hr->set_top(hr->end());
tonyp@2975 757 _hr_printer.alloc(G1HRPrinter::ContinuesHumongous, hr, hr->end());
tonyp@2643 758 }
tonyp@2643 759 }
tonyp@2643 760 // If we have continues humongous regions (hr != NULL), then the
tonyp@2643 761 // end of the last one should match new_end and its top should
tonyp@2643 762 // match new_top.
tonyp@2643 763 assert(hr == NULL ||
tonyp@2643 764 (hr->end() == new_end && hr->top() == new_top), "sanity");
tonyp@2643 765
tonyp@2643 766 assert(first_hr->used() == word_size * HeapWordSize, "invariant");
tonyp@2643 767 _summary_bytes_used += first_hr->used();
tonyp@2643 768 _humongous_set.add(first_hr);
tonyp@2643 769
tonyp@2643 770 return new_obj;
tonyp@2643 771 }
tonyp@2643 772
ysr@777 773 // If could fit into free regions w/o expansion, try.
ysr@777 774 // Otherwise, if can expand, do so.
ysr@777 775 // Otherwise, if using ex regions might help, try with ex given back.
tonyp@2315 776 HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size) {
tonyp@2472 777 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
tonyp@2472 778
tonyp@2472 779 verify_region_sets_optional();
ysr@777 780
ysr@777 781 size_t num_regions =
tonyp@2315 782 round_to(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords;
ysr@777 783 size_t x_size = expansion_regions();
tonyp@2963 784 size_t fs = _hrs.free_suffix();
tonyp@2963 785 size_t first = humongous_obj_allocate_find_first(num_regions, word_size);
tonyp@2963 786 if (first == G1_NULL_HRS_INDEX) {
tonyp@2472 787 // The only thing we can do now is attempt expansion.
ysr@777 788 if (fs + x_size >= num_regions) {
johnc@2504 789 // If the number of regions we're trying to allocate for this
johnc@2504 790 // object is at most the number of regions in the free suffix,
johnc@2504 791 // then the call to humongous_obj_allocate_find_first() above
johnc@2504 792 // should have succeeded and we wouldn't be here.
johnc@2504 793 //
johnc@2504 794 // We should only be trying to expand when the free suffix is
johnc@2504 795 // not sufficient for the object _and_ we have some expansion
johnc@2504 796 // room available.
johnc@2504 797 assert(num_regions > fs, "earlier allocation should have succeeded");
johnc@2504 798
tonyp@3114 799 ergo_verbose1(ErgoHeapSizing,
tonyp@3114 800 "attempt heap expansion",
tonyp@3114 801 ergo_format_reason("humongous allocation request failed")
tonyp@3114 802 ergo_format_byte("allocation request"),
tonyp@3114 803 word_size * HeapWordSize);
johnc@2504 804 if (expand((num_regions - fs) * HeapRegion::GrainBytes)) {
tonyp@2963 805 // Even though the heap was expanded, it might not have
tonyp@2963 806 // reached the desired size. So, we cannot assume that the
tonyp@2963 807 // allocation will succeed.
johnc@2504 808 first = humongous_obj_allocate_find_first(num_regions, word_size);
johnc@2504 809 }
tonyp@2472 810 }
tonyp@2472 811 }
tonyp@2472 812
tonyp@2643 813 HeapWord* result = NULL;
tonyp@2963 814 if (first != G1_NULL_HRS_INDEX) {
tonyp@2643 815 result =
tonyp@2643 816 humongous_obj_allocate_initialize_regions(first, num_regions, word_size);
tonyp@2643 817 assert(result != NULL, "it should always return a valid result");
tonyp@2472 818 }
tonyp@2472 819
tonyp@2472 820 verify_region_sets_optional();
tonyp@2643 821
tonyp@2643 822 return result;
ysr@777 823 }
ysr@777 824
tonyp@2315 825 HeapWord* G1CollectedHeap::allocate_new_tlab(size_t word_size) {
tonyp@2315 826 assert_heap_not_locked_and_not_at_safepoint();
tonyp@2715 827 assert(!isHumongous(word_size), "we do not allow humongous TLABs");
tonyp@2715 828
tonyp@2715 829 unsigned int dummy_gc_count_before;
tonyp@2715 830 return attempt_allocation(word_size, &dummy_gc_count_before);
ysr@777 831 }
ysr@777 832
ysr@777 833 HeapWord*
ysr@777 834 G1CollectedHeap::mem_allocate(size_t word_size,
tonyp@2315 835 bool* gc_overhead_limit_was_exceeded) {
tonyp@2315 836 assert_heap_not_locked_and_not_at_safepoint();
ysr@777 837
tonyp@2715 838 // Loop until the allocation is satisified, or unsatisfied after GC.
tonyp@2315 839 for (int try_count = 1; /* we'll return */; try_count += 1) {
tonyp@2315 840 unsigned int gc_count_before;
tonyp@2715 841
tonyp@2715 842 HeapWord* result = NULL;
tonyp@2715 843 if (!isHumongous(word_size)) {
tonyp@2715 844 result = attempt_allocation(word_size, &gc_count_before);
tonyp@2715 845 } else {
tonyp@2715 846 result = attempt_allocation_humongous(word_size, &gc_count_before);
tonyp@2715 847 }
tonyp@2715 848 if (result != NULL) {
tonyp@2715 849 return result;
ysr@777 850 }
ysr@777 851
ysr@777 852 // Create the garbage collection operation...
tonyp@2315 853 VM_G1CollectForAllocation op(gc_count_before, word_size);
ysr@777 854 // ...and get the VM thread to execute it.
ysr@777 855 VMThread::execute(&op);
tonyp@2315 856
tonyp@2315 857 if (op.prologue_succeeded() && op.pause_succeeded()) {
tonyp@2315 858 // If the operation was successful we'll return the result even
tonyp@2315 859 // if it is NULL. If the allocation attempt failed immediately
tonyp@2315 860 // after a Full GC, it's unlikely we'll be able to allocate now.
tonyp@2315 861 HeapWord* result = op.result();
tonyp@2315 862 if (result != NULL && !isHumongous(word_size)) {
tonyp@2315 863 // Allocations that take place on VM operations do not do any
tonyp@2315 864 // card dirtying and we have to do it here. We only have to do
tonyp@2315 865 // this for non-humongous allocations, though.
tonyp@2315 866 dirty_young_block(result, word_size);
tonyp@2315 867 }
ysr@777 868 return result;
tonyp@2315 869 } else {
tonyp@2315 870 assert(op.result() == NULL,
tonyp@2315 871 "the result should be NULL if the VM op did not succeed");
ysr@777 872 }
ysr@777 873
ysr@777 874 // Give a warning if we seem to be looping forever.
ysr@777 875 if ((QueuedAllocationWarningCount > 0) &&
ysr@777 876 (try_count % QueuedAllocationWarningCount == 0)) {
tonyp@2315 877 warning("G1CollectedHeap::mem_allocate retries %d times", try_count);
ysr@777 878 }
ysr@777 879 }
tonyp@2315 880
tonyp@2315 881 ShouldNotReachHere();
tonyp@2715 882 return NULL;
ysr@777 883 }
ysr@777 884
tonyp@2715 885 HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size,
tonyp@2715 886 unsigned int *gc_count_before_ret) {
tonyp@2715 887 // Make sure you read the note in attempt_allocation_humongous().
tonyp@2715 888
tonyp@2715 889 assert_heap_not_locked_and_not_at_safepoint();
tonyp@2715 890 assert(!isHumongous(word_size), "attempt_allocation_slow() should not "
tonyp@2715 891 "be called for humongous allocation requests");
tonyp@2715 892
tonyp@2715 893 // We should only get here after the first-level allocation attempt
tonyp@2715 894 // (attempt_allocation()) failed to allocate.
tonyp@2715 895
tonyp@2715 896 // We will loop until a) we manage to successfully perform the
tonyp@2715 897 // allocation or b) we successfully schedule a collection which
tonyp@2715 898 // fails to perform the allocation. b) is the only case when we'll
tonyp@2715 899 // return NULL.
tonyp@2715 900 HeapWord* result = NULL;
tonyp@2715 901 for (int try_count = 1; /* we'll return */; try_count += 1) {
tonyp@2715 902 bool should_try_gc;
tonyp@2715 903 unsigned int gc_count_before;
tonyp@2715 904
tonyp@2715 905 {
tonyp@2715 906 MutexLockerEx x(Heap_lock);
tonyp@2715 907
tonyp@2715 908 result = _mutator_alloc_region.attempt_allocation_locked(word_size,
tonyp@2715 909 false /* bot_updates */);
tonyp@2715 910 if (result != NULL) {
tonyp@2715 911 return result;
tonyp@2715 912 }
tonyp@2715 913
tonyp@2715 914 // If we reach here, attempt_allocation_locked() above failed to
tonyp@2715 915 // allocate a new region. So the mutator alloc region should be NULL.
tonyp@2715 916 assert(_mutator_alloc_region.get() == NULL, "only way to get here");
tonyp@2715 917
tonyp@2715 918 if (GC_locker::is_active_and_needs_gc()) {
tonyp@2715 919 if (g1_policy()->can_expand_young_list()) {
tonyp@3114 920 // No need for an ergo verbose message here,
tonyp@3114 921 // can_expand_young_list() does this when it returns true.
tonyp@2715 922 result = _mutator_alloc_region.attempt_allocation_force(word_size,
tonyp@2715 923 false /* bot_updates */);
tonyp@2715 924 if (result != NULL) {
tonyp@2715 925 return result;
tonyp@2715 926 }
tonyp@2715 927 }
tonyp@2715 928 should_try_gc = false;
tonyp@2715 929 } else {
tonyp@2715 930 // Read the GC count while still holding the Heap_lock.
tonyp@2715 931 gc_count_before = SharedHeap::heap()->total_collections();
tonyp@2715 932 should_try_gc = true;
tonyp@2715 933 }
tonyp@2715 934 }
tonyp@2715 935
tonyp@2715 936 if (should_try_gc) {
tonyp@2715 937 bool succeeded;
tonyp@2715 938 result = do_collection_pause(word_size, gc_count_before, &succeeded);
tonyp@2715 939 if (result != NULL) {
tonyp@2715 940 assert(succeeded, "only way to get back a non-NULL result");
tonyp@2715 941 return result;
tonyp@2715 942 }
tonyp@2715 943
tonyp@2715 944 if (succeeded) {
tonyp@2715 945 // If we get here we successfully scheduled a collection which
tonyp@2715 946 // failed to allocate. No point in trying to allocate
tonyp@2715 947 // further. We'll just return NULL.
tonyp@2715 948 MutexLockerEx x(Heap_lock);
tonyp@2715 949 *gc_count_before_ret = SharedHeap::heap()->total_collections();
tonyp@2715 950 return NULL;
tonyp@2715 951 }
tonyp@2715 952 } else {
tonyp@2715 953 GC_locker::stall_until_clear();
tonyp@2715 954 }
tonyp@2715 955
tonyp@2715 956 // We can reach here if we were unsuccessul in scheduling a
tonyp@2715 957 // collection (because another thread beat us to it) or if we were
tonyp@2715 958 // stalled due to the GC locker. In either can we should retry the
tonyp@2715 959 // allocation attempt in case another thread successfully
tonyp@2715 960 // performed a collection and reclaimed enough space. We do the
tonyp@2715 961 // first attempt (without holding the Heap_lock) here and the
tonyp@2715 962 // follow-on attempt will be at the start of the next loop
tonyp@2715 963 // iteration (after taking the Heap_lock).
tonyp@2715 964 result = _mutator_alloc_region.attempt_allocation(word_size,
tonyp@2715 965 false /* bot_updates */);
tonyp@2715 966 if (result != NULL ){
tonyp@2715 967 return result;
tonyp@2715 968 }
tonyp@2715 969
tonyp@2715 970 // Give a warning if we seem to be looping forever.
tonyp@2715 971 if ((QueuedAllocationWarningCount > 0) &&
tonyp@2715 972 (try_count % QueuedAllocationWarningCount == 0)) {
tonyp@2715 973 warning("G1CollectedHeap::attempt_allocation_slow() "
tonyp@2715 974 "retries %d times", try_count);
tonyp@2715 975 }
tonyp@2715 976 }
tonyp@2715 977
tonyp@2715 978 ShouldNotReachHere();
tonyp@2715 979 return NULL;
tonyp@2715 980 }
tonyp@2715 981
tonyp@2715 982 HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
tonyp@2715 983 unsigned int * gc_count_before_ret) {
tonyp@2715 984 // The structure of this method has a lot of similarities to
tonyp@2715 985 // attempt_allocation_slow(). The reason these two were not merged
tonyp@2715 986 // into a single one is that such a method would require several "if
tonyp@2715 987 // allocation is not humongous do this, otherwise do that"
tonyp@2715 988 // conditional paths which would obscure its flow. In fact, an early
tonyp@2715 989 // version of this code did use a unified method which was harder to
tonyp@2715 990 // follow and, as a result, it had subtle bugs that were hard to
tonyp@2715 991 // track down. So keeping these two methods separate allows each to
tonyp@2715 992 // be more readable. It will be good to keep these two in sync as
tonyp@2715 993 // much as possible.
tonyp@2715 994
tonyp@2715 995 assert_heap_not_locked_and_not_at_safepoint();
tonyp@2715 996 assert(isHumongous(word_size), "attempt_allocation_humongous() "
tonyp@2715 997 "should only be called for humongous allocations");
tonyp@2715 998
tonyp@2715 999 // We will loop until a) we manage to successfully perform the
tonyp@2715 1000 // allocation or b) we successfully schedule a collection which
tonyp@2715 1001 // fails to perform the allocation. b) is the only case when we'll
tonyp@2715 1002 // return NULL.
tonyp@2715 1003 HeapWord* result = NULL;
tonyp@2715 1004 for (int try_count = 1; /* we'll return */; try_count += 1) {
tonyp@2715 1005 bool should_try_gc;
tonyp@2715 1006 unsigned int gc_count_before;
tonyp@2715 1007
tonyp@2715 1008 {
tonyp@2715 1009 MutexLockerEx x(Heap_lock);
tonyp@2715 1010
tonyp@2715 1011 // Given that humongous objects are not allocated in young
tonyp@2715 1012 // regions, we'll first try to do the allocation without doing a
tonyp@2715 1013 // collection hoping that there's enough space in the heap.
tonyp@2715 1014 result = humongous_obj_allocate(word_size);
tonyp@2715 1015 if (result != NULL) {
tonyp@2715 1016 return result;
tonyp@2715 1017 }
tonyp@2715 1018
tonyp@2715 1019 if (GC_locker::is_active_and_needs_gc()) {
tonyp@2715 1020 should_try_gc = false;
tonyp@2715 1021 } else {
tonyp@2715 1022 // Read the GC count while still holding the Heap_lock.
tonyp@2715 1023 gc_count_before = SharedHeap::heap()->total_collections();
tonyp@2715 1024 should_try_gc = true;
tonyp@2715 1025 }
tonyp@2715 1026 }
tonyp@2715 1027
tonyp@2715 1028 if (should_try_gc) {
tonyp@2715 1029 // If we failed to allocate the humongous object, we should try to
tonyp@2715 1030 // do a collection pause (if we're allowed) in case it reclaims
tonyp@2715 1031 // enough space for the allocation to succeed after the pause.
tonyp@2715 1032
tonyp@2715 1033 bool succeeded;
tonyp@2715 1034 result = do_collection_pause(word_size, gc_count_before, &succeeded);
tonyp@2715 1035 if (result != NULL) {
tonyp@2715 1036 assert(succeeded, "only way to get back a non-NULL result");
tonyp@2715 1037 return result;
tonyp@2715 1038 }
tonyp@2715 1039
tonyp@2715 1040 if (succeeded) {
tonyp@2715 1041 // If we get here we successfully scheduled a collection which
tonyp@2715 1042 // failed to allocate. No point in trying to allocate
tonyp@2715 1043 // further. We'll just return NULL.
tonyp@2715 1044 MutexLockerEx x(Heap_lock);
tonyp@2715 1045 *gc_count_before_ret = SharedHeap::heap()->total_collections();
tonyp@2715 1046 return NULL;
tonyp@2715 1047 }
tonyp@2715 1048 } else {
tonyp@2715 1049 GC_locker::stall_until_clear();
tonyp@2715 1050 }
tonyp@2715 1051
tonyp@2715 1052 // We can reach here if we were unsuccessul in scheduling a
tonyp@2715 1053 // collection (because another thread beat us to it) or if we were
tonyp@2715 1054 // stalled due to the GC locker. In either can we should retry the
tonyp@2715 1055 // allocation attempt in case another thread successfully
tonyp@2715 1056 // performed a collection and reclaimed enough space. Give a
tonyp@2715 1057 // warning if we seem to be looping forever.
tonyp@2715 1058
tonyp@2715 1059 if ((QueuedAllocationWarningCount > 0) &&
tonyp@2715 1060 (try_count % QueuedAllocationWarningCount == 0)) {
tonyp@2715 1061 warning("G1CollectedHeap::attempt_allocation_humongous() "
tonyp@2715 1062 "retries %d times", try_count);
tonyp@2715 1063 }
tonyp@2715 1064 }
tonyp@2715 1065
tonyp@2715 1066 ShouldNotReachHere();
tonyp@2715 1067 return NULL;
tonyp@2715 1068 }
tonyp@2715 1069
tonyp@2715 1070 HeapWord* G1CollectedHeap::attempt_allocation_at_safepoint(size_t word_size,
tonyp@2715 1071 bool expect_null_mutator_alloc_region) {
tonyp@2472 1072 assert_at_safepoint(true /* should_be_vm_thread */);
tonyp@2715 1073 assert(_mutator_alloc_region.get() == NULL ||
tonyp@2715 1074 !expect_null_mutator_alloc_region,
tonyp@2715 1075 "the current alloc region was unexpectedly found to be non-NULL");
tonyp@2715 1076
tonyp@2715 1077 if (!isHumongous(word_size)) {
tonyp@2715 1078 return _mutator_alloc_region.attempt_allocation_locked(word_size,
tonyp@2715 1079 false /* bot_updates */);
tonyp@2715 1080 } else {
tonyp@2715 1081 return humongous_obj_allocate(word_size);
tonyp@2715 1082 }
tonyp@2715 1083
tonyp@2715 1084 ShouldNotReachHere();
ysr@777 1085 }
ysr@777 1086
ysr@777 1087 class PostMCRemSetClearClosure: public HeapRegionClosure {
ysr@777 1088 ModRefBarrierSet* _mr_bs;
ysr@777 1089 public:
ysr@777 1090 PostMCRemSetClearClosure(ModRefBarrierSet* mr_bs) : _mr_bs(mr_bs) {}
ysr@777 1091 bool doHeapRegion(HeapRegion* r) {
ysr@777 1092 r->reset_gc_time_stamp();
ysr@777 1093 if (r->continuesHumongous())
ysr@777 1094 return false;
ysr@777 1095 HeapRegionRemSet* hrrs = r->rem_set();
ysr@777 1096 if (hrrs != NULL) hrrs->clear();
ysr@777 1097 // You might think here that we could clear just the cards
ysr@777 1098 // corresponding to the used region. But no: if we leave a dirty card
ysr@777 1099 // in a region we might allocate into, then it would prevent that card
ysr@777 1100 // from being enqueued, and cause it to be missed.
ysr@777 1101 // Re: the performance cost: we shouldn't be doing full GC anyway!
ysr@777 1102 _mr_bs->clear(MemRegion(r->bottom(), r->end()));
ysr@777 1103 return false;
ysr@777 1104 }
ysr@777 1105 };
ysr@777 1106
ysr@777 1107
ysr@777 1108 class PostMCRemSetInvalidateClosure: public HeapRegionClosure {
ysr@777 1109 ModRefBarrierSet* _mr_bs;
ysr@777 1110 public:
ysr@777 1111 PostMCRemSetInvalidateClosure(ModRefBarrierSet* mr_bs) : _mr_bs(mr_bs) {}
ysr@777 1112 bool doHeapRegion(HeapRegion* r) {
ysr@777 1113 if (r->continuesHumongous()) return false;
ysr@777 1114 if (r->used_region().word_size() != 0) {
ysr@777 1115 _mr_bs->invalidate(r->used_region(), true /*whole heap*/);
ysr@777 1116 }
ysr@777 1117 return false;
ysr@777 1118 }
ysr@777 1119 };
ysr@777 1120
apetrusenko@1061 1121 class RebuildRSOutOfRegionClosure: public HeapRegionClosure {
apetrusenko@1061 1122 G1CollectedHeap* _g1h;
apetrusenko@1061 1123 UpdateRSOopClosure _cl;
apetrusenko@1061 1124 int _worker_i;
apetrusenko@1061 1125 public:
apetrusenko@1061 1126 RebuildRSOutOfRegionClosure(G1CollectedHeap* g1, int worker_i = 0) :
johnc@2216 1127 _cl(g1->g1_rem_set(), worker_i),
apetrusenko@1061 1128 _worker_i(worker_i),
apetrusenko@1061 1129 _g1h(g1)
apetrusenko@1061 1130 { }
johnc@2302 1131
apetrusenko@1061 1132 bool doHeapRegion(HeapRegion* r) {
apetrusenko@1061 1133 if (!r->continuesHumongous()) {
apetrusenko@1061 1134 _cl.set_from(r);
apetrusenko@1061 1135 r->oop_iterate(&_cl);
apetrusenko@1061 1136 }
apetrusenko@1061 1137 return false;
apetrusenko@1061 1138 }
apetrusenko@1061 1139 };
apetrusenko@1061 1140
apetrusenko@1061 1141 class ParRebuildRSTask: public AbstractGangTask {
apetrusenko@1061 1142 G1CollectedHeap* _g1;
apetrusenko@1061 1143 public:
apetrusenko@1061 1144 ParRebuildRSTask(G1CollectedHeap* g1)
apetrusenko@1061 1145 : AbstractGangTask("ParRebuildRSTask"),
apetrusenko@1061 1146 _g1(g1)
apetrusenko@1061 1147 { }
apetrusenko@1061 1148
apetrusenko@1061 1149 void work(int i) {
apetrusenko@1061 1150 RebuildRSOutOfRegionClosure rebuild_rs(_g1, i);
apetrusenko@1061 1151 _g1->heap_region_par_iterate_chunked(&rebuild_rs, i,
apetrusenko@1061 1152 HeapRegion::RebuildRSClaimValue);
apetrusenko@1061 1153 }
apetrusenko@1061 1154 };
apetrusenko@1061 1155
tonyp@2975 1156 class PostCompactionPrinterClosure: public HeapRegionClosure {
tonyp@2975 1157 private:
tonyp@2975 1158 G1HRPrinter* _hr_printer;
tonyp@2975 1159 public:
tonyp@2975 1160 bool doHeapRegion(HeapRegion* hr) {
tonyp@2975 1161 assert(!hr->is_young(), "not expecting to find young regions");
tonyp@2975 1162 // We only generate output for non-empty regions.
tonyp@2975 1163 if (!hr->is_empty()) {
tonyp@2975 1164 if (!hr->isHumongous()) {
tonyp@2975 1165 _hr_printer->post_compaction(hr, G1HRPrinter::Old);
tonyp@2975 1166 } else if (hr->startsHumongous()) {
tonyp@2975 1167 if (hr->capacity() == (size_t) HeapRegion::GrainBytes) {
tonyp@2975 1168 // single humongous region
tonyp@2975 1169 _hr_printer->post_compaction(hr, G1HRPrinter::SingleHumongous);
tonyp@2975 1170 } else {
tonyp@2975 1171 _hr_printer->post_compaction(hr, G1HRPrinter::StartsHumongous);
tonyp@2975 1172 }
tonyp@2975 1173 } else {
tonyp@2975 1174 assert(hr->continuesHumongous(), "only way to get here");
tonyp@2975 1175 _hr_printer->post_compaction(hr, G1HRPrinter::ContinuesHumongous);
tonyp@2975 1176 }
tonyp@2975 1177 }
tonyp@2975 1178 return false;
tonyp@2975 1179 }
tonyp@2975 1180
tonyp@2975 1181 PostCompactionPrinterClosure(G1HRPrinter* hr_printer)
tonyp@2975 1182 : _hr_printer(hr_printer) { }
tonyp@2975 1183 };
tonyp@2975 1184
tonyp@2315 1185 bool G1CollectedHeap::do_collection(bool explicit_gc,
tonyp@2011 1186 bool clear_all_soft_refs,
ysr@777 1187 size_t word_size) {
tonyp@2472 1188 assert_at_safepoint(true /* should_be_vm_thread */);
tonyp@2472 1189
tonyp@1794 1190 if (GC_locker::check_active_before_gc()) {
tonyp@2315 1191 return false;
tonyp@1794 1192 }
tonyp@1794 1193
kamg@2445 1194 SvcGCMarker sgcm(SvcGCMarker::FULL);
ysr@777 1195 ResourceMark rm;
ysr@777 1196
tonyp@1273 1197 if (PrintHeapAtGC) {
tonyp@1273 1198 Universe::print_heap_before_gc();
tonyp@1273 1199 }
tonyp@1273 1200
tonyp@2472 1201 verify_region_sets_optional();
ysr@777 1202
jmasa@1822 1203 const bool do_clear_all_soft_refs = clear_all_soft_refs ||
jmasa@1822 1204 collector_policy()->should_clear_all_soft_refs();
jmasa@1822 1205
jmasa@1822 1206 ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy());
jmasa@1822 1207
ysr@777 1208 {
ysr@777 1209 IsGCActiveMark x;
ysr@777 1210
ysr@777 1211 // Timing
tonyp@2011 1212 bool system_gc = (gc_cause() == GCCause::_java_lang_system_gc);
tonyp@2011 1213 assert(!system_gc || explicit_gc, "invariant");
ysr@777 1214 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
ysr@777 1215 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
tonyp@2011 1216 TraceTime t(system_gc ? "Full GC (System.gc())" : "Full GC",
jmasa@1822 1217 PrintGC, true, gclog_or_tty);
ysr@777 1218
jmasa@2821 1219 TraceCollectorStats tcs(g1mm()->full_collection_counters());
fparain@2888 1220 TraceMemoryManagerStats tms(true /* fullGC */, gc_cause());
tonyp@1524 1221
ysr@777 1222 double start = os::elapsedTime();
ysr@777 1223 g1_policy()->record_full_collection_start();
ysr@777 1224
tonyp@2472 1225 wait_while_free_regions_coming();
tonyp@2643 1226 append_secondary_free_list_if_not_empty_with_lock();
tonyp@2472 1227
ysr@777 1228 gc_prologue(true);
tonyp@1273 1229 increment_total_collections(true /* full gc */);
ysr@777 1230
ysr@777 1231 size_t g1h_prev_used = used();
ysr@777 1232 assert(used() == recalculate_used(), "Should be equal");
ysr@777 1233
ysr@777 1234 if (VerifyBeforeGC && total_collections() >= VerifyGCStartAt) {
ysr@777 1235 HandleMark hm; // Discard invalid handles created during verification
tonyp@2715 1236 gclog_or_tty->print(" VerifyBeforeGC:");
ysr@777 1237 prepare_for_verify();
johnc@2969 1238 Universe::verify(/* allow dirty */ true,
johnc@2969 1239 /* silent */ false,
johnc@2969 1240 /* option */ VerifyOption_G1UsePrevMarking);
johnc@2969 1241
ysr@777 1242 }
ysr@3067 1243 pre_full_gc_dump();
ysr@777 1244
ysr@777 1245 COMPILER2_PRESENT(DerivedPointerTable::clear());
ysr@777 1246
ysr@777 1247 // We want to discover references, but not process them yet.
ysr@777 1248 // This mode is disabled in
ysr@777 1249 // instanceRefKlass::process_discovered_references if the
ysr@777 1250 // generation does some collection work, or
ysr@777 1251 // instanceRefKlass::enqueue_discovered_references if the
ysr@777 1252 // generation returns without doing any work.
ysr@777 1253 ref_processor()->disable_discovery();
ysr@777 1254 ref_processor()->abandon_partial_discovery();
ysr@777 1255 ref_processor()->verify_no_references_recorded();
ysr@777 1256
ysr@777 1257 // Abandon current iterations of concurrent marking and concurrent
ysr@777 1258 // refinement, if any are in progress.
ysr@777 1259 concurrent_mark()->abort();
ysr@777 1260
ysr@777 1261 // Make sure we'll choose a new allocation region afterwards.
tonyp@2715 1262 release_mutator_alloc_region();
tonyp@1071 1263 abandon_gc_alloc_regions();
johnc@2216 1264 g1_rem_set()->cleanupHRRS();
ysr@777 1265 tear_down_region_lists();
johnc@1829 1266
tonyp@2975 1267 // We should call this after we retire any currently active alloc
tonyp@2975 1268 // regions so that all the ALLOC / RETIRE events are generated
tonyp@2975 1269 // before the start GC event.
tonyp@2975 1270 _hr_printer.start_gc(true /* full */, (size_t) total_collections());
tonyp@2975 1271
johnc@1829 1272 // We may have added regions to the current incremental collection
johnc@1829 1273 // set between the last GC or pause and now. We need to clear the
johnc@1829 1274 // incremental collection set and then start rebuilding it afresh
johnc@1829 1275 // after this full GC.
johnc@1829 1276 abandon_collection_set(g1_policy()->inc_cset_head());
johnc@1829 1277 g1_policy()->clear_incremental_cset();
johnc@1829 1278 g1_policy()->stop_incremental_cset_building();
johnc@1829 1279
brutisso@3065 1280 empty_young_list();
brutisso@3065 1281 g1_policy()->set_full_young_gcs(true);
ysr@777 1282
johnc@2316 1283 // See the comment in G1CollectedHeap::ref_processing_init() about
johnc@2316 1284 // how reference processing currently works in G1.
johnc@2316 1285
ysr@777 1286 // Temporarily make reference _discovery_ single threaded (non-MT).
ysr@2651 1287 ReferenceProcessorMTDiscoveryMutator rp_disc_ser(ref_processor(), false);
ysr@777 1288
ysr@777 1289 // Temporarily make refs discovery atomic
ysr@777 1290 ReferenceProcessorAtomicMutator rp_disc_atomic(ref_processor(), true);
ysr@777 1291
ysr@777 1292 // Temporarily clear _is_alive_non_header
ysr@777 1293 ReferenceProcessorIsAliveMutator rp_is_alive_null(ref_processor(), NULL);
ysr@777 1294
ysr@777 1295 ref_processor()->enable_discovery();
jmasa@1822 1296 ref_processor()->setup_policy(do_clear_all_soft_refs);
ysr@777 1297 // Do collection work
ysr@777 1298 {
ysr@777 1299 HandleMark hm; // Discard invalid handles created during gc
jmasa@1822 1300 G1MarkSweep::invoke_at_safepoint(ref_processor(), do_clear_all_soft_refs);
ysr@777 1301 }
tonyp@2472 1302 assert(free_regions() == 0, "we should not have added any free regions");
ysr@777 1303 rebuild_region_lists();
ysr@777 1304
ysr@777 1305 _summary_bytes_used = recalculate_used();
ysr@777 1306
ysr@777 1307 ref_processor()->enqueue_discovered_references();
ysr@777 1308
ysr@777 1309 COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
ysr@777 1310
tonyp@1524 1311 MemoryService::track_memory_usage();
tonyp@1524 1312
ysr@777 1313 if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) {
ysr@777 1314 HandleMark hm; // Discard invalid handles created during verification
ysr@777 1315 gclog_or_tty->print(" VerifyAfterGC:");
iveresov@1072 1316 prepare_for_verify();
johnc@2969 1317 Universe::verify(/* allow dirty */ false,
johnc@2969 1318 /* silent */ false,
johnc@2969 1319 /* option */ VerifyOption_G1UsePrevMarking);
johnc@2969 1320
ysr@777 1321 }
ysr@777 1322 NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
ysr@777 1323
ysr@777 1324 reset_gc_time_stamp();
ysr@777 1325 // Since everything potentially moved, we will clear all remembered
apetrusenko@1061 1326 // sets, and clear all cards. Later we will rebuild remebered
apetrusenko@1061 1327 // sets. We will also reset the GC time stamps of the regions.
ysr@777 1328 PostMCRemSetClearClosure rs_clear(mr_bs());
ysr@777 1329 heap_region_iterate(&rs_clear);
ysr@777 1330
ysr@777 1331 // Resize the heap if necessary.
tonyp@2011 1332 resize_if_necessary_after_full_collection(explicit_gc ? 0 : word_size);
ysr@777 1333
tonyp@2975 1334 if (_hr_printer.is_active()) {
tonyp@2975 1335 // We should do this after we potentially resize the heap so
tonyp@2975 1336 // that all the COMMIT / UNCOMMIT events are generated before
tonyp@2975 1337 // the end GC event.
tonyp@2975 1338
tonyp@2975 1339 PostCompactionPrinterClosure cl(hr_printer());
tonyp@2975 1340 heap_region_iterate(&cl);
tonyp@2975 1341
tonyp@2975 1342 _hr_printer.end_gc(true /* full */, (size_t) total_collections());
tonyp@2975 1343 }
tonyp@2975 1344
ysr@777 1345 if (_cg1r->use_cache()) {
ysr@777 1346 _cg1r->clear_and_record_card_counts();
ysr@777 1347 _cg1r->clear_hot_cache();
ysr@777 1348 }
ysr@777 1349
apetrusenko@1061 1350 // Rebuild remembered sets of all regions.
jmasa@2188 1351
jmasa@2188 1352 if (G1CollectedHeap::use_parallel_gc_threads()) {
apetrusenko@1061 1353 ParRebuildRSTask rebuild_rs_task(this);
apetrusenko@1061 1354 assert(check_heap_region_claim_values(
apetrusenko@1061 1355 HeapRegion::InitialClaimValue), "sanity check");
apetrusenko@1061 1356 set_par_threads(workers()->total_workers());
apetrusenko@1061 1357 workers()->run_task(&rebuild_rs_task);
apetrusenko@1061 1358 set_par_threads(0);
apetrusenko@1061 1359 assert(check_heap_region_claim_values(
apetrusenko@1061 1360 HeapRegion::RebuildRSClaimValue), "sanity check");
apetrusenko@1061 1361 reset_heap_region_claim_values();
apetrusenko@1061 1362 } else {
apetrusenko@1061 1363 RebuildRSOutOfRegionClosure rebuild_rs(this);
apetrusenko@1061 1364 heap_region_iterate(&rebuild_rs);
apetrusenko@1061 1365 }
apetrusenko@1061 1366
ysr@777 1367 if (PrintGC) {
ysr@777 1368 print_size_transition(gclog_or_tty, g1h_prev_used, used(), capacity());
ysr@777 1369 }
ysr@777 1370
ysr@777 1371 if (true) { // FIXME
ysr@777 1372 // Ask the permanent generation to adjust size for full collections
ysr@777 1373 perm()->compute_new_size();
ysr@777 1374 }
ysr@777 1375
johnc@1829 1376 // Start a new incremental collection set for the next pause
johnc@1829 1377 assert(g1_policy()->collection_set() == NULL, "must be");
johnc@1829 1378 g1_policy()->start_incremental_cset_building();
johnc@1829 1379
johnc@1829 1380 // Clear the _cset_fast_test bitmap in anticipation of adding
johnc@1829 1381 // regions to the incremental collection set for the next
johnc@1829 1382 // evacuation pause.
johnc@1829 1383 clear_cset_fast_test();
johnc@1829 1384
tonyp@2715 1385 init_mutator_alloc_region();
tonyp@2715 1386
ysr@777 1387 double end = os::elapsedTime();
ysr@777 1388 g1_policy()->record_full_collection_end();
ysr@777 1389
jmasa@981 1390 #ifdef TRACESPINNING
jmasa@981 1391 ParallelTaskTerminator::print_termination_counts();
jmasa@981 1392 #endif
jmasa@981 1393
ysr@777 1394 gc_epilogue(true);
ysr@777 1395
iveresov@1229 1396 // Discard all rset updates
iveresov@1229 1397 JavaThread::dirty_card_queue_set().abandon_logs();
iveresov@1051 1398 assert(!G1DeferredRSUpdate
iveresov@1051 1399 || (G1DeferredRSUpdate && (dirty_card_queue_set().completed_buffers_num() == 0)), "Should not be any");
ysr@777 1400 }
ysr@777 1401
brutisso@3065 1402 _young_list->reset_sampled_info();
brutisso@3065 1403 // At this point there should be no regions in the
brutisso@3065 1404 // entire heap tagged as young.
brutisso@3065 1405 assert( check_young_list_empty(true /* check_heap */),
brutisso@3065 1406 "young list should be empty at this point");
tonyp@1273 1407
tonyp@2011 1408 // Update the number of full collections that have been completed.
tonyp@2372 1409 increment_full_collections_completed(false /* concurrent */);
tonyp@2011 1410
tonyp@2963 1411 _hrs.verify_optional();
tonyp@2472 1412 verify_region_sets_optional();
tonyp@2472 1413
tonyp@1273 1414 if (PrintHeapAtGC) {
tonyp@1273 1415 Universe::print_heap_after_gc();
tonyp@1273 1416 }
jmasa@2821 1417 g1mm()->update_counters();
ysr@3067 1418 post_full_gc_dump();
tonyp@2315 1419
tonyp@2315 1420 return true;
ysr@777 1421 }
ysr@777 1422
ysr@777 1423 void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) {
tonyp@2315 1424 // do_collection() will return whether it succeeded in performing
tonyp@2315 1425 // the GC. Currently, there is no facility on the
tonyp@2315 1426 // do_full_collection() API to notify the caller than the collection
tonyp@2315 1427 // did not succeed (e.g., because it was locked out by the GC
tonyp@2315 1428 // locker). So, right now, we'll ignore the return value.
tonyp@2315 1429 bool dummy = do_collection(true, /* explicit_gc */
tonyp@2315 1430 clear_all_soft_refs,
tonyp@2315 1431 0 /* word_size */);
ysr@777 1432 }
ysr@777 1433
ysr@777 1434 // This code is mostly copied from TenuredGeneration.
ysr@777 1435 void
ysr@777 1436 G1CollectedHeap::
ysr@777 1437 resize_if_necessary_after_full_collection(size_t word_size) {
ysr@777 1438 assert(MinHeapFreeRatio <= MaxHeapFreeRatio, "sanity check");
ysr@777 1439
ysr@777 1440 // Include the current allocation, if any, and bytes that will be
ysr@777 1441 // pre-allocated to support collections, as "used".
ysr@777 1442 const size_t used_after_gc = used();
ysr@777 1443 const size_t capacity_after_gc = capacity();
ysr@777 1444 const size_t free_after_gc = capacity_after_gc - used_after_gc;
ysr@777 1445
tonyp@2072 1446 // This is enforced in arguments.cpp.
tonyp@2072 1447 assert(MinHeapFreeRatio <= MaxHeapFreeRatio,
tonyp@2072 1448 "otherwise the code below doesn't make sense");
tonyp@2072 1449
ysr@777 1450 // We don't have floating point command-line arguments
tonyp@2072 1451 const double minimum_free_percentage = (double) MinHeapFreeRatio / 100.0;
ysr@777 1452 const double maximum_used_percentage = 1.0 - minimum_free_percentage;
tonyp@2072 1453 const double maximum_free_percentage = (double) MaxHeapFreeRatio / 100.0;
ysr@777 1454 const double minimum_used_percentage = 1.0 - maximum_free_percentage;
ysr@777 1455
tonyp@2072 1456 const size_t min_heap_size = collector_policy()->min_heap_byte_size();
tonyp@2072 1457 const size_t max_heap_size = collector_policy()->max_heap_byte_size();
tonyp@2072 1458
tonyp@2072 1459 // We have to be careful here as these two calculations can overflow
tonyp@2072 1460 // 32-bit size_t's.
tonyp@2072 1461 double used_after_gc_d = (double) used_after_gc;
tonyp@2072 1462 double minimum_desired_capacity_d = used_after_gc_d / maximum_used_percentage;
tonyp@2072 1463 double maximum_desired_capacity_d = used_after_gc_d / minimum_used_percentage;
tonyp@2072 1464
tonyp@2072 1465 // Let's make sure that they are both under the max heap size, which
tonyp@2072 1466 // by default will make them fit into a size_t.
tonyp@2072 1467 double desired_capacity_upper_bound = (double) max_heap_size;
tonyp@2072 1468 minimum_desired_capacity_d = MIN2(minimum_desired_capacity_d,
tonyp@2072 1469 desired_capacity_upper_bound);
tonyp@2072 1470 maximum_desired_capacity_d = MIN2(maximum_desired_capacity_d,
tonyp@2072 1471 desired_capacity_upper_bound);
tonyp@2072 1472
tonyp@2072 1473 // We can now safely turn them into size_t's.
tonyp@2072 1474 size_t minimum_desired_capacity = (size_t) minimum_desired_capacity_d;
tonyp@2072 1475 size_t maximum_desired_capacity = (size_t) maximum_desired_capacity_d;
tonyp@2072 1476
tonyp@2072 1477 // This assert only makes sense here, before we adjust them
tonyp@2072 1478 // with respect to the min and max heap size.
tonyp@2072 1479 assert(minimum_desired_capacity <= maximum_desired_capacity,
tonyp@2072 1480 err_msg("minimum_desired_capacity = "SIZE_FORMAT", "
tonyp@2072 1481 "maximum_desired_capacity = "SIZE_FORMAT,
tonyp@2072 1482 minimum_desired_capacity, maximum_desired_capacity));
tonyp@2072 1483
tonyp@2072 1484 // Should not be greater than the heap max size. No need to adjust
tonyp@2072 1485 // it with respect to the heap min size as it's a lower bound (i.e.,
tonyp@2072 1486 // we'll try to make the capacity larger than it, not smaller).
tonyp@2072 1487 minimum_desired_capacity = MIN2(minimum_desired_capacity, max_heap_size);
tonyp@2072 1488 // Should not be less than the heap min size. No need to adjust it
tonyp@2072 1489 // with respect to the heap max size as it's an upper bound (i.e.,
tonyp@2072 1490 // we'll try to make the capacity smaller than it, not greater).
tonyp@2072 1491 maximum_desired_capacity = MAX2(maximum_desired_capacity, min_heap_size);
ysr@777 1492
tonyp@2072 1493 if (capacity_after_gc < minimum_desired_capacity) {
ysr@777 1494 // Don't expand unless it's significant
ysr@777 1495 size_t expand_bytes = minimum_desired_capacity - capacity_after_gc;
tonyp@3114 1496 ergo_verbose4(ErgoHeapSizing,
tonyp@3114 1497 "attempt heap expansion",
tonyp@3114 1498 ergo_format_reason("capacity lower than "
tonyp@3114 1499 "min desired capacity after Full GC")
tonyp@3114 1500 ergo_format_byte("capacity")
tonyp@3114 1501 ergo_format_byte("occupancy")
tonyp@3114 1502 ergo_format_byte_perc("min desired capacity"),
tonyp@3114 1503 capacity_after_gc, used_after_gc,
tonyp@3114 1504 minimum_desired_capacity, (double) MinHeapFreeRatio);
tonyp@3114 1505 expand(expand_bytes);
ysr@777 1506
ysr@777 1507 // No expansion, now see if we want to shrink
tonyp@2072 1508 } else if (capacity_after_gc > maximum_desired_capacity) {
ysr@777 1509 // Capacity too large, compute shrinking size
ysr@777 1510 size_t shrink_bytes = capacity_after_gc - maximum_desired_capacity;
tonyp@3114 1511 ergo_verbose4(ErgoHeapSizing,
tonyp@3114 1512 "attempt heap shrinking",
tonyp@3114 1513 ergo_format_reason("capacity higher than "
tonyp@3114 1514 "max desired capacity after Full GC")
tonyp@3114 1515 ergo_format_byte("capacity")
tonyp@3114 1516 ergo_format_byte("occupancy")
tonyp@3114 1517 ergo_format_byte_perc("max desired capacity"),
tonyp@3114 1518 capacity_after_gc, used_after_gc,
tonyp@3114 1519 maximum_desired_capacity, (double) MaxHeapFreeRatio);
ysr@777 1520 shrink(shrink_bytes);
ysr@777 1521 }
ysr@777 1522 }
ysr@777 1523
ysr@777 1524
ysr@777 1525 HeapWord*
tonyp@2315 1526 G1CollectedHeap::satisfy_failed_allocation(size_t word_size,
tonyp@2315 1527 bool* succeeded) {
tonyp@2472 1528 assert_at_safepoint(true /* should_be_vm_thread */);
tonyp@2315 1529
tonyp@2315 1530 *succeeded = true;
tonyp@2315 1531 // Let's attempt the allocation first.
tonyp@2715 1532 HeapWord* result =
tonyp@2715 1533 attempt_allocation_at_safepoint(word_size,
tonyp@2715 1534 false /* expect_null_mutator_alloc_region */);
tonyp@2315 1535 if (result != NULL) {
tonyp@2315 1536 assert(*succeeded, "sanity");
tonyp@2315 1537 return result;
tonyp@2315 1538 }
ysr@777 1539
ysr@777 1540 // In a G1 heap, we're supposed to keep allocation from failing by
ysr@777 1541 // incremental pauses. Therefore, at least for now, we'll favor
ysr@777 1542 // expansion over collection. (This might change in the future if we can
ysr@777 1543 // do something smarter than full collection to satisfy a failed alloc.)
ysr@777 1544 result = expand_and_allocate(word_size);
ysr@777 1545 if (result != NULL) {
tonyp@2315 1546 assert(*succeeded, "sanity");
ysr@777 1547 return result;
ysr@777 1548 }
ysr@777 1549
tonyp@2315 1550 // Expansion didn't work, we'll try to do a Full GC.
tonyp@2315 1551 bool gc_succeeded = do_collection(false, /* explicit_gc */
tonyp@2315 1552 false, /* clear_all_soft_refs */
tonyp@2315 1553 word_size);
tonyp@2315 1554 if (!gc_succeeded) {
tonyp@2315 1555 *succeeded = false;
tonyp@2315 1556 return NULL;
tonyp@2315 1557 }
tonyp@2315 1558
tonyp@2315 1559 // Retry the allocation
tonyp@2315 1560 result = attempt_allocation_at_safepoint(word_size,
tonyp@2715 1561 true /* expect_null_mutator_alloc_region */);
ysr@777 1562 if (result != NULL) {
tonyp@2315 1563 assert(*succeeded, "sanity");
ysr@777 1564 return result;
ysr@777 1565 }
ysr@777 1566
tonyp@2315 1567 // Then, try a Full GC that will collect all soft references.
tonyp@2315 1568 gc_succeeded = do_collection(false, /* explicit_gc */
tonyp@2315 1569 true, /* clear_all_soft_refs */
tonyp@2315 1570 word_size);
tonyp@2315 1571 if (!gc_succeeded) {
tonyp@2315 1572 *succeeded = false;
tonyp@2315 1573 return NULL;
tonyp@2315 1574 }
tonyp@2315 1575
tonyp@2315 1576 // Retry the allocation once more
tonyp@2315 1577 result = attempt_allocation_at_safepoint(word_size,
tonyp@2715 1578 true /* expect_null_mutator_alloc_region */);
ysr@777 1579 if (result != NULL) {
tonyp@2315 1580 assert(*succeeded, "sanity");
ysr@777 1581 return result;
ysr@777 1582 }
ysr@777 1583
jmasa@1822 1584 assert(!collector_policy()->should_clear_all_soft_refs(),
tonyp@2315 1585 "Flag should have been handled and cleared prior to this point");
jmasa@1822 1586
ysr@777 1587 // What else? We might try synchronous finalization later. If the total
ysr@777 1588 // space available is large enough for the allocation, then a more
ysr@777 1589 // complete compaction phase than we've tried so far might be
ysr@777 1590 // appropriate.
tonyp@2315 1591 assert(*succeeded, "sanity");
ysr@777 1592 return NULL;
ysr@777 1593 }
ysr@777 1594
ysr@777 1595 // Attempting to expand the heap sufficiently
ysr@777 1596 // to support an allocation of the given "word_size". If
ysr@777 1597 // successful, perform the allocation and return the address of the
ysr@777 1598 // allocated block, or else "NULL".
ysr@777 1599
ysr@777 1600 HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size) {
tonyp@2472 1601 assert_at_safepoint(true /* should_be_vm_thread */);
tonyp@2472 1602
tonyp@2472 1603 verify_region_sets_optional();
tonyp@2315 1604
johnc@2504 1605 size_t expand_bytes = MAX2(word_size * HeapWordSize, MinHeapDeltaBytes);
tonyp@3114 1606 ergo_verbose1(ErgoHeapSizing,
tonyp@3114 1607 "attempt heap expansion",
tonyp@3114 1608 ergo_format_reason("allocation request failed")
tonyp@3114 1609 ergo_format_byte("allocation request"),
tonyp@3114 1610 word_size * HeapWordSize);
johnc@2504 1611 if (expand(expand_bytes)) {
tonyp@2963 1612 _hrs.verify_optional();
johnc@2504 1613 verify_region_sets_optional();
johnc@2504 1614 return attempt_allocation_at_safepoint(word_size,
tonyp@2715 1615 false /* expect_null_mutator_alloc_region */);
johnc@2504 1616 }
johnc@2504 1617 return NULL;
ysr@777 1618 }
ysr@777 1619
tonyp@2963 1620 void G1CollectedHeap::update_committed_space(HeapWord* old_end,
tonyp@2963 1621 HeapWord* new_end) {
tonyp@2963 1622 assert(old_end != new_end, "don't call this otherwise");
tonyp@2963 1623 assert((HeapWord*) _g1_storage.high() == new_end, "invariant");
tonyp@2963 1624
tonyp@2963 1625 // Update the committed mem region.
tonyp@2963 1626 _g1_committed.set_end(new_end);
tonyp@2963 1627 // Tell the card table about the update.
tonyp@2963 1628 Universe::heap()->barrier_set()->resize_covered_region(_g1_committed);
tonyp@2963 1629 // Tell the BOT about the update.
tonyp@2963 1630 _bot_shared->resize(_g1_committed.word_size());
tonyp@2963 1631 }
tonyp@2963 1632
johnc@2504 1633 bool G1CollectedHeap::expand(size_t expand_bytes) {
ysr@777 1634 size_t old_mem_size = _g1_storage.committed_size();
johnc@2504 1635 size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes);
ysr@777 1636 aligned_expand_bytes = align_size_up(aligned_expand_bytes,
ysr@777 1637 HeapRegion::GrainBytes);
tonyp@3114 1638 ergo_verbose2(ErgoHeapSizing,
tonyp@3114 1639 "expand the heap",
tonyp@3114 1640 ergo_format_byte("requested expansion amount")
tonyp@3114 1641 ergo_format_byte("attempted expansion amount"),
tonyp@3114 1642 expand_bytes, aligned_expand_bytes);
johnc@2504 1643
tonyp@2963 1644 // First commit the memory.
tonyp@2963 1645 HeapWord* old_end = (HeapWord*) _g1_storage.high();
johnc@2504 1646 bool successful = _g1_storage.expand_by(aligned_expand_bytes);
johnc@2504 1647 if (successful) {
tonyp@2963 1648 // Then propagate this update to the necessary data structures.
tonyp@2963 1649 HeapWord* new_end = (HeapWord*) _g1_storage.high();
tonyp@2963 1650 update_committed_space(old_end, new_end);
tonyp@2963 1651
tonyp@2963 1652 FreeRegionList expansion_list("Local Expansion List");
tonyp@2963 1653 MemRegion mr = _hrs.expand_by(old_end, new_end, &expansion_list);
tonyp@2963 1654 assert(mr.start() == old_end, "post-condition");
tonyp@2963 1655 // mr might be a smaller region than what was requested if
tonyp@2963 1656 // expand_by() was unable to allocate the HeapRegion instances
tonyp@2963 1657 assert(mr.end() <= new_end, "post-condition");
tonyp@2963 1658
tonyp@2963 1659 size_t actual_expand_bytes = mr.byte_size();
tonyp@2963 1660 assert(actual_expand_bytes <= aligned_expand_bytes, "post-condition");
tonyp@2963 1661 assert(actual_expand_bytes == expansion_list.total_capacity_bytes(),
tonyp@2963 1662 "post-condition");
tonyp@2963 1663 if (actual_expand_bytes < aligned_expand_bytes) {
tonyp@2963 1664 // We could not expand _hrs to the desired size. In this case we
tonyp@2963 1665 // need to shrink the committed space accordingly.
tonyp@2963 1666 assert(mr.end() < new_end, "invariant");
tonyp@2963 1667
tonyp@2963 1668 size_t diff_bytes = aligned_expand_bytes - actual_expand_bytes;
tonyp@2963 1669 // First uncommit the memory.
tonyp@2963 1670 _g1_storage.shrink_by(diff_bytes);
tonyp@2963 1671 // Then propagate this update to the necessary data structures.
tonyp@2963 1672 update_committed_space(new_end, mr.end());
johnc@2504 1673 }
tonyp@2963 1674 _free_list.add_as_tail(&expansion_list);
tonyp@2975 1675
tonyp@2975 1676 if (_hr_printer.is_active()) {
tonyp@2975 1677 HeapWord* curr = mr.start();
tonyp@2975 1678 while (curr < mr.end()) {
tonyp@2975 1679 HeapWord* curr_end = curr + HeapRegion::GrainWords;
tonyp@2975 1680 _hr_printer.commit(curr, curr_end);
tonyp@2975 1681 curr = curr_end;
tonyp@2975 1682 }
tonyp@2975 1683 assert(curr == mr.end(), "post-condition");
tonyp@2975 1684 }
brutisso@3120 1685 g1_policy()->record_new_heap_size(n_regions());
johnc@2504 1686 } else {
tonyp@3114 1687 ergo_verbose0(ErgoHeapSizing,
tonyp@3114 1688 "did not expand the heap",
tonyp@3114 1689 ergo_format_reason("heap expansion operation failed"));
johnc@2504 1690 // The expansion of the virtual storage space was unsuccessful.
johnc@2504 1691 // Let's see if it was because we ran out of swap.
johnc@2504 1692 if (G1ExitOnExpansionFailure &&
johnc@2504 1693 _g1_storage.uncommitted_size() >= aligned_expand_bytes) {
johnc@2504 1694 // We had head room...
johnc@2504 1695 vm_exit_out_of_memory(aligned_expand_bytes, "G1 heap expansion");
ysr@777 1696 }
ysr@777 1697 }
johnc@2504 1698 return successful;
ysr@777 1699 }
ysr@777 1700
tonyp@2963 1701 void G1CollectedHeap::shrink_helper(size_t shrink_bytes) {
ysr@777 1702 size_t old_mem_size = _g1_storage.committed_size();
ysr@777 1703 size_t aligned_shrink_bytes =
ysr@777 1704 ReservedSpace::page_align_size_down(shrink_bytes);
ysr@777 1705 aligned_shrink_bytes = align_size_down(aligned_shrink_bytes,
ysr@777 1706 HeapRegion::GrainBytes);
ysr@777 1707 size_t num_regions_deleted = 0;
tonyp@2963 1708 MemRegion mr = _hrs.shrink_by(aligned_shrink_bytes, &num_regions_deleted);
tonyp@2963 1709 HeapWord* old_end = (HeapWord*) _g1_storage.high();
tonyp@2963 1710 assert(mr.end() == old_end, "post-condition");
tonyp@3114 1711
tonyp@3114 1712 ergo_verbose3(ErgoHeapSizing,
tonyp@3114 1713 "shrink the heap",
tonyp@3114 1714 ergo_format_byte("requested shrinking amount")
tonyp@3114 1715 ergo_format_byte("aligned shrinking amount")
tonyp@3114 1716 ergo_format_byte("attempted shrinking amount"),
tonyp@3114 1717 shrink_bytes, aligned_shrink_bytes, mr.byte_size());
tonyp@2963 1718 if (mr.byte_size() > 0) {
tonyp@2975 1719 if (_hr_printer.is_active()) {
tonyp@2975 1720 HeapWord* curr = mr.end();
tonyp@2975 1721 while (curr > mr.start()) {
tonyp@2975 1722 HeapWord* curr_end = curr;
tonyp@2975 1723 curr -= HeapRegion::GrainWords;
tonyp@2975 1724 _hr_printer.uncommit(curr, curr_end);
tonyp@2975 1725 }
tonyp@2975 1726 assert(curr == mr.start(), "post-condition");
tonyp@2975 1727 }
tonyp@2975 1728
ysr@777 1729 _g1_storage.shrink_by(mr.byte_size());
tonyp@2963 1730 HeapWord* new_end = (HeapWord*) _g1_storage.high();
tonyp@2963 1731 assert(mr.start() == new_end, "post-condition");
tonyp@2963 1732
tonyp@2963 1733 _expansion_regions += num_regions_deleted;
tonyp@2963 1734 update_committed_space(old_end, new_end);
tonyp@2963 1735 HeapRegionRemSet::shrink_heap(n_regions());
brutisso@3120 1736 g1_policy()->record_new_heap_size(n_regions());
tonyp@3114 1737 } else {
tonyp@3114 1738 ergo_verbose0(ErgoHeapSizing,
tonyp@3114 1739 "did not shrink the heap",
tonyp@3114 1740 ergo_format_reason("heap shrinking operation failed"));
ysr@777 1741 }
ysr@777 1742 }
ysr@777 1743
ysr@777 1744 void G1CollectedHeap::shrink(size_t shrink_bytes) {
tonyp@2472 1745 verify_region_sets_optional();
tonyp@2472 1746
tonyp@3028 1747 // We should only reach here at the end of a Full GC which means we
tonyp@3028 1748 // should not not be holding to any GC alloc regions. The method
tonyp@3028 1749 // below will make sure of that and do any remaining clean up.
tonyp@3028 1750 abandon_gc_alloc_regions();
tonyp@3028 1751
tonyp@2472 1752 // Instead of tearing down / rebuilding the free lists here, we
tonyp@2472 1753 // could instead use the remove_all_pending() method on free_list to
tonyp@2472 1754 // remove only the ones that we need to remove.
ysr@777 1755 tear_down_region_lists(); // We will rebuild them in a moment.
ysr@777 1756 shrink_helper(shrink_bytes);
ysr@777 1757 rebuild_region_lists();
tonyp@2472 1758
tonyp@2963 1759 _hrs.verify_optional();
tonyp@2472 1760 verify_region_sets_optional();
ysr@777 1761 }
ysr@777 1762
ysr@777 1763 // Public methods.
ysr@777 1764
ysr@777 1765 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
ysr@777 1766 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
ysr@777 1767 #endif // _MSC_VER
ysr@777 1768
ysr@777 1769
ysr@777 1770 G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
ysr@777 1771 SharedHeap(policy_),
ysr@777 1772 _g1_policy(policy_),
iveresov@1546 1773 _dirty_card_queue_set(false),
johnc@2060 1774 _into_cset_dirty_card_queue_set(false),
johnc@2379 1775 _is_alive_closure(this),
ysr@777 1776 _ref_processor(NULL),
ysr@777 1777 _process_strong_tasks(new SubTasksDone(G1H_PS_NumElements)),
ysr@777 1778 _bot_shared(NULL),
ysr@777 1779 _objs_with_preserved_marks(NULL), _preserved_marks_of_objs(NULL),
ysr@777 1780 _evac_failure_scan_stack(NULL) ,
ysr@777 1781 _mark_in_progress(false),
tonyp@2472 1782 _cg1r(NULL), _summary_bytes_used(0),
ysr@777 1783 _refine_cte_cl(NULL),
ysr@777 1784 _full_collection(false),
tonyp@2472 1785 _free_list("Master Free List"),
tonyp@2472 1786 _secondary_free_list("Secondary Free List"),
tonyp@2472 1787 _humongous_set("Master Humongous Set"),
tonyp@2472 1788 _free_regions_coming(false),
ysr@777 1789 _young_list(new YoungList(this)),
ysr@777 1790 _gc_time_stamp(0),
tonyp@3028 1791 _retained_old_gc_alloc_region(NULL),
tonyp@961 1792 _surviving_young_words(NULL),
tonyp@2011 1793 _full_collections_completed(0),
tonyp@961 1794 _in_cset_fast_test(NULL),
apetrusenko@1231 1795 _in_cset_fast_test_base(NULL),
apetrusenko@1231 1796 _dirty_cards_region_list(NULL) {
ysr@777 1797 _g1h = this; // To catch bugs.
ysr@777 1798 if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) {
ysr@777 1799 vm_exit_during_initialization("Failed necessary allocation.");
ysr@777 1800 }
tonyp@1377 1801
tonyp@1377 1802 _humongous_object_threshold_in_words = HeapRegion::GrainWords / 2;
tonyp@1377 1803
ysr@777 1804 int n_queues = MAX2((int)ParallelGCThreads, 1);
ysr@777 1805 _task_queues = new RefToScanQueueSet(n_queues);
ysr@777 1806
ysr@777 1807 int n_rem_sets = HeapRegionRemSet::num_par_rem_sets();
ysr@777 1808 assert(n_rem_sets > 0, "Invariant.");
ysr@777 1809
ysr@777 1810 HeapRegionRemSetIterator** iter_arr =
ysr@777 1811 NEW_C_HEAP_ARRAY(HeapRegionRemSetIterator*, n_queues);
ysr@777 1812 for (int i = 0; i < n_queues; i++) {
ysr@777 1813 iter_arr[i] = new HeapRegionRemSetIterator();
ysr@777 1814 }
ysr@777 1815 _rem_set_iterator = iter_arr;
ysr@777 1816
ysr@777 1817 for (int i = 0; i < n_queues; i++) {
ysr@777 1818 RefToScanQueue* q = new RefToScanQueue();
ysr@777 1819 q->initialize();
ysr@777 1820 _task_queues->register_queue(i, q);
ysr@777 1821 }
ysr@777 1822
ysr@777 1823 guarantee(_task_queues != NULL, "task_queues allocation failure.");
ysr@777 1824 }
ysr@777 1825
ysr@777 1826 jint G1CollectedHeap::initialize() {
ysr@1601 1827 CollectedHeap::pre_initialize();
ysr@777 1828 os::enable_vtime();
ysr@777 1829
ysr@777 1830 // Necessary to satisfy locking discipline assertions.
ysr@777 1831
ysr@777 1832 MutexLocker x(Heap_lock);
ysr@777 1833
tonyp@2975 1834 // We have to initialize the printer before committing the heap, as
tonyp@2975 1835 // it will be used then.
tonyp@2975 1836 _hr_printer.set_active(G1PrintHeapRegions);
tonyp@2975 1837
ysr@777 1838 // While there are no constraints in the GC code that HeapWordSize
ysr@777 1839 // be any particular value, there are multiple other areas in the
ysr@777 1840 // system which believe this to be true (e.g. oop->object_size in some
ysr@777 1841 // cases incorrectly returns the size in wordSize units rather than
ysr@777 1842 // HeapWordSize).
ysr@777 1843 guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
ysr@777 1844
ysr@777 1845 size_t init_byte_size = collector_policy()->initial_heap_byte_size();
ysr@777 1846 size_t max_byte_size = collector_policy()->max_heap_byte_size();
ysr@777 1847
ysr@777 1848 // Ensure that the sizes are properly aligned.
ysr@777 1849 Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap");
ysr@777 1850 Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap");
ysr@777 1851
ysr@777 1852 _cg1r = new ConcurrentG1Refine();
ysr@777 1853
ysr@777 1854 // Reserve the maximum.
ysr@777 1855 PermanentGenerationSpec* pgs = collector_policy()->permanent_generation();
ysr@777 1856 // Includes the perm-gen.
kvn@1077 1857
johnc@3022 1858 // When compressed oops are enabled, the preferred heap base
johnc@3022 1859 // is calculated by subtracting the requested size from the
johnc@3022 1860 // 32Gb boundary and using the result as the base address for
johnc@3022 1861 // heap reservation. If the requested size is not aligned to
johnc@3022 1862 // HeapRegion::GrainBytes (i.e. the alignment that is passed
johnc@3022 1863 // into the ReservedHeapSpace constructor) then the actual
johnc@3022 1864 // base of the reserved heap may end up differing from the
johnc@3022 1865 // address that was requested (i.e. the preferred heap base).
johnc@3022 1866 // If this happens then we could end up using a non-optimal
johnc@3022 1867 // compressed oops mode.
johnc@3022 1868
johnc@3022 1869 // Since max_byte_size is aligned to the size of a heap region (checked
johnc@3022 1870 // above), we also need to align the perm gen size as it might not be.
johnc@3022 1871 const size_t total_reserved = max_byte_size +
johnc@3022 1872 align_size_up(pgs->max_size(), HeapRegion::GrainBytes);
johnc@3022 1873 Universe::check_alignment(total_reserved, HeapRegion::GrainBytes, "g1 heap and perm");
johnc@3022 1874
kvn@1077 1875 char* addr = Universe::preferred_heap_base(total_reserved, Universe::UnscaledNarrowOop);
kvn@1077 1876
johnc@3022 1877 ReservedHeapSpace heap_rs(total_reserved, HeapRegion::GrainBytes,
johnc@3022 1878 UseLargePages, addr);
kvn@1077 1879
kvn@1077 1880 if (UseCompressedOops) {
kvn@1077 1881 if (addr != NULL && !heap_rs.is_reserved()) {
kvn@1077 1882 // Failed to reserve at specified address - the requested memory
kvn@1077 1883 // region is taken already, for example, by 'java' launcher.
kvn@1077 1884 // Try again to reserver heap higher.
kvn@1077 1885 addr = Universe::preferred_heap_base(total_reserved, Universe::ZeroBasedNarrowOop);
johnc@3022 1886
johnc@3022 1887 ReservedHeapSpace heap_rs0(total_reserved, HeapRegion::GrainBytes,
johnc@3022 1888 UseLargePages, addr);
johnc@3022 1889
kvn@1077 1890 if (addr != NULL && !heap_rs0.is_reserved()) {
kvn@1077 1891 // Failed to reserve at specified address again - give up.
kvn@1077 1892 addr = Universe::preferred_heap_base(total_reserved, Universe::HeapBasedNarrowOop);
kvn@1077 1893 assert(addr == NULL, "");
johnc@3022 1894
johnc@3022 1895 ReservedHeapSpace heap_rs1(total_reserved, HeapRegion::GrainBytes,
johnc@3022 1896 UseLargePages, addr);
kvn@1077 1897 heap_rs = heap_rs1;
kvn@1077 1898 } else {
kvn@1077 1899 heap_rs = heap_rs0;
kvn@1077 1900 }
kvn@1077 1901 }
kvn@1077 1902 }
ysr@777 1903
ysr@777 1904 if (!heap_rs.is_reserved()) {
ysr@777 1905 vm_exit_during_initialization("Could not reserve enough space for object heap");
ysr@777 1906 return JNI_ENOMEM;
ysr@777 1907 }
ysr@777 1908
ysr@777 1909 // It is important to do this in a way such that concurrent readers can't
ysr@777 1910 // temporarily think somethings in the heap. (I've actually seen this
ysr@777 1911 // happen in asserts: DLD.)
ysr@777 1912 _reserved.set_word_size(0);
ysr@777 1913 _reserved.set_start((HeapWord*)heap_rs.base());
ysr@777 1914 _reserved.set_end((HeapWord*)(heap_rs.base() + heap_rs.size()));
ysr@777 1915
ysr@777 1916 _expansion_regions = max_byte_size/HeapRegion::GrainBytes;
ysr@777 1917
ysr@777 1918 // Create the gen rem set (and barrier set) for the entire reserved region.
ysr@777 1919 _rem_set = collector_policy()->create_rem_set(_reserved, 2);
ysr@777 1920 set_barrier_set(rem_set()->bs());
ysr@777 1921 if (barrier_set()->is_a(BarrierSet::ModRef)) {
ysr@777 1922 _mr_bs = (ModRefBarrierSet*)_barrier_set;
ysr@777 1923 } else {
ysr@777 1924 vm_exit_during_initialization("G1 requires a mod ref bs.");
ysr@777 1925 return JNI_ENOMEM;
ysr@777 1926 }
ysr@777 1927
ysr@777 1928 // Also create a G1 rem set.
johnc@2216 1929 if (mr_bs()->is_a(BarrierSet::CardTableModRef)) {
johnc@2216 1930 _g1_rem_set = new G1RemSet(this, (CardTableModRefBS*)mr_bs());
ysr@777 1931 } else {
johnc@2216 1932 vm_exit_during_initialization("G1 requires a cardtable mod ref bs.");
johnc@2216 1933 return JNI_ENOMEM;
ysr@777 1934 }
ysr@777 1935
ysr@777 1936 // Carve out the G1 part of the heap.
ysr@777 1937
ysr@777 1938 ReservedSpace g1_rs = heap_rs.first_part(max_byte_size);
ysr@777 1939 _g1_reserved = MemRegion((HeapWord*)g1_rs.base(),
ysr@777 1940 g1_rs.size()/HeapWordSize);
ysr@777 1941 ReservedSpace perm_gen_rs = heap_rs.last_part(max_byte_size);
ysr@777 1942
ysr@777 1943 _perm_gen = pgs->init(perm_gen_rs, pgs->init_size(), rem_set());
ysr@777 1944
ysr@777 1945 _g1_storage.initialize(g1_rs, 0);
ysr@777 1946 _g1_committed = MemRegion((HeapWord*)_g1_storage.low(), (size_t) 0);
tonyp@2963 1947 _hrs.initialize((HeapWord*) _g1_reserved.start(),
tonyp@2963 1948 (HeapWord*) _g1_reserved.end(),
tonyp@2963 1949 _expansion_regions);
ysr@777 1950
johnc@1242 1951 // 6843694 - ensure that the maximum region index can fit
johnc@1242 1952 // in the remembered set structures.
johnc@1242 1953 const size_t max_region_idx = ((size_t)1 << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1;
johnc@1242 1954 guarantee((max_regions() - 1) <= max_region_idx, "too many regions");
johnc@1242 1955
johnc@1242 1956 size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1;
tonyp@1377 1957 guarantee(HeapRegion::CardsPerRegion > 0, "make sure it's initialized");
tonyp@1377 1958 guarantee((size_t) HeapRegion::CardsPerRegion < max_cards_per_region,
tonyp@1377 1959 "too many cards per region");
johnc@1242 1960
tonyp@2472 1961 HeapRegionSet::set_unrealistically_long_length(max_regions() + 1);
tonyp@2472 1962
ysr@777 1963 _bot_shared = new G1BlockOffsetSharedArray(_reserved,
ysr@777 1964 heap_word_size(init_byte_size));
ysr@777 1965
ysr@777 1966 _g1h = this;
ysr@777 1967
johnc@1829 1968 _in_cset_fast_test_length = max_regions();
johnc@1829 1969 _in_cset_fast_test_base = NEW_C_HEAP_ARRAY(bool, _in_cset_fast_test_length);
johnc@1829 1970
johnc@1829 1971 // We're biasing _in_cset_fast_test to avoid subtracting the
johnc@1829 1972 // beginning of the heap every time we want to index; basically
johnc@1829 1973 // it's the same with what we do with the card table.
johnc@1829 1974 _in_cset_fast_test = _in_cset_fast_test_base -
johnc@1829 1975 ((size_t) _g1_reserved.start() >> HeapRegion::LogOfHRGrainBytes);
johnc@1829 1976
johnc@1829 1977 // Clear the _cset_fast_test bitmap in anticipation of adding
johnc@1829 1978 // regions to the incremental collection set for the first
johnc@1829 1979 // evacuation pause.
johnc@1829 1980 clear_cset_fast_test();
johnc@1829 1981
ysr@777 1982 // Create the ConcurrentMark data structure and thread.
ysr@777 1983 // (Must do this late, so that "max_regions" is defined.)
ysr@777 1984 _cm = new ConcurrentMark(heap_rs, (int) max_regions());
ysr@777 1985 _cmThread = _cm->cmThread();
ysr@777 1986
ysr@777 1987 // Initialize the from_card cache structure of HeapRegionRemSet.
ysr@777 1988 HeapRegionRemSet::init_heap(max_regions());
ysr@777 1989
apetrusenko@1112 1990 // Now expand into the initial heap size.
johnc@2504 1991 if (!expand(init_byte_size)) {
johnc@2504 1992 vm_exit_during_initialization("Failed to allocate initial heap.");
johnc@2504 1993 return JNI_ENOMEM;
johnc@2504 1994 }
ysr@777 1995
ysr@777 1996 // Perform any initialization actions delegated to the policy.
ysr@777 1997 g1_policy()->init();
ysr@777 1998
ysr@777 1999 g1_policy()->note_start_of_mark_thread();
ysr@777 2000
ysr@777 2001 _refine_cte_cl =
ysr@777 2002 new RefineCardTableEntryClosure(ConcurrentG1RefineThread::sts(),
ysr@777 2003 g1_rem_set(),
ysr@777 2004 concurrent_g1_refine());
ysr@777 2005 JavaThread::dirty_card_queue_set().set_closure(_refine_cte_cl);
ysr@777 2006
ysr@777 2007 JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon,
ysr@777 2008 SATB_Q_FL_lock,
iveresov@1546 2009 G1SATBProcessCompletedThreshold,
ysr@777 2010 Shared_SATB_Q_lock);
iveresov@1229 2011
iveresov@1229 2012 JavaThread::dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon,
iveresov@1229 2013 DirtyCardQ_FL_lock,
iveresov@1546 2014 concurrent_g1_refine()->yellow_zone(),
iveresov@1546 2015 concurrent_g1_refine()->red_zone(),
iveresov@1229 2016 Shared_DirtyCardQ_lock);
iveresov@1229 2017
iveresov@1051 2018 if (G1DeferredRSUpdate) {
iveresov@1051 2019 dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon,
iveresov@1051 2020 DirtyCardQ_FL_lock,
iveresov@1546 2021 -1, // never trigger processing
iveresov@1546 2022 -1, // no limit on length
iveresov@1051 2023 Shared_DirtyCardQ_lock,
iveresov@1051 2024 &JavaThread::dirty_card_queue_set());
iveresov@1051 2025 }
johnc@2060 2026
johnc@2060 2027 // Initialize the card queue set used to hold cards containing
johnc@2060 2028 // references into the collection set.
johnc@2060 2029 _into_cset_dirty_card_queue_set.initialize(DirtyCardQ_CBL_mon,
johnc@2060 2030 DirtyCardQ_FL_lock,
johnc@2060 2031 -1, // never trigger processing
johnc@2060 2032 -1, // no limit on length
johnc@2060 2033 Shared_DirtyCardQ_lock,
johnc@2060 2034 &JavaThread::dirty_card_queue_set());
johnc@2060 2035
ysr@777 2036 // In case we're keeping closure specialization stats, initialize those
ysr@777 2037 // counts and that mechanism.
ysr@777 2038 SpecializationStats::clear();
ysr@777 2039
ysr@777 2040 // Do later initialization work for concurrent refinement.
ysr@777 2041 _cg1r->init();
ysr@777 2042
tonyp@2715 2043 // Here we allocate the dummy full region that is required by the
tonyp@2715 2044 // G1AllocRegion class. If we don't pass an address in the reserved
tonyp@2715 2045 // space here, lots of asserts fire.
tonyp@2963 2046
tonyp@2963 2047 HeapRegion* dummy_region = new_heap_region(0 /* index of bottom region */,
tonyp@2963 2048 _g1_reserved.start());
tonyp@2715 2049 // We'll re-use the same region whether the alloc region will
tonyp@2715 2050 // require BOT updates or not and, if it doesn't, then a non-young
tonyp@2715 2051 // region will complain that it cannot support allocations without
tonyp@2715 2052 // BOT updates. So we'll tag the dummy region as young to avoid that.
tonyp@2715 2053 dummy_region->set_young();
tonyp@2715 2054 // Make sure it's full.
tonyp@2715 2055 dummy_region->set_top(dummy_region->end());
tonyp@2715 2056 G1AllocRegion::setup(this, dummy_region);
tonyp@2715 2057
tonyp@2715 2058 init_mutator_alloc_region();
tonyp@2715 2059
jmasa@2821 2060 // Do create of the monitoring and management support so that
jmasa@2821 2061 // values in the heap have been properly initialized.
jmasa@2821 2062 _g1mm = new G1MonitoringSupport(this, &_g1_storage);
jmasa@2821 2063
ysr@777 2064 return JNI_OK;
ysr@777 2065 }
ysr@777 2066
ysr@777 2067 void G1CollectedHeap::ref_processing_init() {
johnc@2316 2068 // Reference processing in G1 currently works as follows:
johnc@2316 2069 //
johnc@2316 2070 // * There is only one reference processor instance that
johnc@2316 2071 // 'spans' the entire heap. It is created by the code
johnc@2316 2072 // below.
johnc@2316 2073 // * Reference discovery is not enabled during an incremental
johnc@2316 2074 // pause (see 6484982).
johnc@2316 2075 // * Discoverered refs are not enqueued nor are they processed
johnc@2316 2076 // during an incremental pause (see 6484982).
johnc@2316 2077 // * Reference discovery is enabled at initial marking.
johnc@2316 2078 // * Reference discovery is disabled and the discovered
johnc@2316 2079 // references processed etc during remarking.
johnc@2316 2080 // * Reference discovery is MT (see below).
johnc@2316 2081 // * Reference discovery requires a barrier (see below).
johnc@2316 2082 // * Reference processing is currently not MT (see 6608385).
johnc@2316 2083 // * A full GC enables (non-MT) reference discovery and
johnc@2316 2084 // processes any discovered references.
johnc@2316 2085
ysr@777 2086 SharedHeap::ref_processing_init();
ysr@777 2087 MemRegion mr = reserved_region();
ysr@2651 2088 _ref_processor =
ysr@2651 2089 new ReferenceProcessor(mr, // span
ysr@2651 2090 ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing
ysr@2651 2091 (int) ParallelGCThreads, // degree of mt processing
ysr@2651 2092 ParallelGCThreads > 1 || ConcGCThreads > 1, // mt discovery
ysr@2651 2093 (int) MAX2(ParallelGCThreads, ConcGCThreads), // degree of mt discovery
ysr@2651 2094 false, // Reference discovery is not atomic
ysr@2651 2095 &_is_alive_closure, // is alive closure for efficiency
ysr@2651 2096 true); // Setting next fields of discovered
ysr@2651 2097 // lists requires a barrier.
ysr@777 2098 }
ysr@777 2099
ysr@777 2100 size_t G1CollectedHeap::capacity() const {
ysr@777 2101 return _g1_committed.byte_size();
ysr@777 2102 }
ysr@777 2103
johnc@2060 2104 void G1CollectedHeap::iterate_dirty_card_closure(CardTableEntryClosure* cl,
johnc@2060 2105 DirtyCardQueue* into_cset_dcq,
johnc@2060 2106 bool concurrent,
ysr@777 2107 int worker_i) {
johnc@1324 2108 // Clean cards in the hot card cache
johnc@2060 2109 concurrent_g1_refine()->clean_up_cache(worker_i, g1_rem_set(), into_cset_dcq);
johnc@1324 2110
ysr@777 2111 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
ysr@777 2112 int n_completed_buffers = 0;
johnc@2060 2113 while (dcqs.apply_closure_to_completed_buffer(cl, worker_i, 0, true)) {
ysr@777 2114 n_completed_buffers++;
ysr@777 2115 }
ysr@777 2116 g1_policy()->record_update_rs_processed_buffers(worker_i,
ysr@777 2117 (double) n_completed_buffers);
ysr@777 2118 dcqs.clear_n_completed_buffers();
ysr@777 2119 assert(!dcqs.completed_buffers_exist_dirty(), "Completed buffers exist!");
ysr@777 2120 }
ysr@777 2121
ysr@777 2122
ysr@777 2123 // Computes the sum of the storage used by the various regions.
ysr@777 2124
ysr@777 2125 size_t G1CollectedHeap::used() const {
ysr@1297 2126 assert(Heap_lock->owner() != NULL,
ysr@1297 2127 "Should be owned on this thread's behalf.");
ysr@777 2128 size_t result = _summary_bytes_used;
ysr@1280 2129 // Read only once in case it is set to NULL concurrently
tonyp@2715 2130 HeapRegion* hr = _mutator_alloc_region.get();
ysr@1280 2131 if (hr != NULL)
ysr@1280 2132 result += hr->used();
ysr@777 2133 return result;
ysr@777 2134 }
ysr@777 2135
tonyp@1281 2136 size_t G1CollectedHeap::used_unlocked() const {
tonyp@1281 2137 size_t result = _summary_bytes_used;
tonyp@1281 2138 return result;
tonyp@1281 2139 }
tonyp@1281 2140
ysr@777 2141 class SumUsedClosure: public HeapRegionClosure {
ysr@777 2142 size_t _used;
ysr@777 2143 public:
ysr@777 2144 SumUsedClosure() : _used(0) {}
ysr@777 2145 bool doHeapRegion(HeapRegion* r) {
ysr@777 2146 if (!r->continuesHumongous()) {
ysr@777 2147 _used += r->used();
ysr@777 2148 }
ysr@777 2149 return false;
ysr@777 2150 }
ysr@777 2151 size_t result() { return _used; }
ysr@777 2152 };
ysr@777 2153
ysr@777 2154 size_t G1CollectedHeap::recalculate_used() const {
ysr@777 2155 SumUsedClosure blk;
tonyp@2963 2156 heap_region_iterate(&blk);
ysr@777 2157 return blk.result();
ysr@777 2158 }
ysr@777 2159
ysr@777 2160 size_t G1CollectedHeap::unsafe_max_alloc() {
tonyp@2472 2161 if (free_regions() > 0) return HeapRegion::GrainBytes;
ysr@777 2162 // otherwise, is there space in the current allocation region?
ysr@777 2163
ysr@777 2164 // We need to store the current allocation region in a local variable
ysr@777 2165 // here. The problem is that this method doesn't take any locks and
ysr@777 2166 // there may be other threads which overwrite the current allocation
ysr@777 2167 // region field. attempt_allocation(), for example, sets it to NULL
ysr@777 2168 // and this can happen *after* the NULL check here but before the call
ysr@777 2169 // to free(), resulting in a SIGSEGV. Note that this doesn't appear
ysr@777 2170 // to be a problem in the optimized build, since the two loads of the
ysr@777 2171 // current allocation region field are optimized away.
tonyp@2715 2172 HeapRegion* hr = _mutator_alloc_region.get();
tonyp@2715 2173 if (hr == NULL) {
ysr@777 2174 return 0;
ysr@777 2175 }
tonyp@2715 2176 return hr->free();
ysr@777 2177 }
ysr@777 2178
tonyp@2011 2179 bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
tonyp@2011 2180 return
tonyp@2011 2181 ((cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) ||
tonyp@2011 2182 (cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent));
tonyp@2011 2183 }
tonyp@2011 2184
tonyp@2817 2185 #ifndef PRODUCT
tonyp@2817 2186 void G1CollectedHeap::allocate_dummy_regions() {
tonyp@2817 2187 // Let's fill up most of the region
tonyp@2817 2188 size_t word_size = HeapRegion::GrainWords - 1024;
tonyp@2817 2189 // And as a result the region we'll allocate will be humongous.
tonyp@2817 2190 guarantee(isHumongous(word_size), "sanity");
tonyp@2817 2191
tonyp@2817 2192 for (uintx i = 0; i < G1DummyRegionsPerGC; ++i) {
tonyp@2817 2193 // Let's use the existing mechanism for the allocation
tonyp@2817 2194 HeapWord* dummy_obj = humongous_obj_allocate(word_size);
tonyp@2817 2195 if (dummy_obj != NULL) {
tonyp@2817 2196 MemRegion mr(dummy_obj, word_size);
tonyp@2817 2197 CollectedHeap::fill_with_object(mr);
tonyp@2817 2198 } else {
tonyp@2817 2199 // If we can't allocate once, we probably cannot allocate
tonyp@2817 2200 // again. Let's get out of the loop.
tonyp@2817 2201 break;
tonyp@2817 2202 }
tonyp@2817 2203 }
tonyp@2817 2204 }
tonyp@2817 2205 #endif // !PRODUCT
tonyp@2817 2206
tonyp@2372 2207 void G1CollectedHeap::increment_full_collections_completed(bool concurrent) {
tonyp@2011 2208 MonitorLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
tonyp@2011 2209
tonyp@2372 2210 // We assume that if concurrent == true, then the caller is a
tonyp@2372 2211 // concurrent thread that was joined the Suspendible Thread
tonyp@2372 2212 // Set. If there's ever a cheap way to check this, we should add an
tonyp@2372 2213 // assert here.
tonyp@2372 2214
tonyp@2011 2215 // We have already incremented _total_full_collections at the start
tonyp@2011 2216 // of the GC, so total_full_collections() represents how many full
tonyp@2011 2217 // collections have been started.
tonyp@2011 2218 unsigned int full_collections_started = total_full_collections();
tonyp@2011 2219
tonyp@2011 2220 // Given that this method is called at the end of a Full GC or of a
tonyp@2011 2221 // concurrent cycle, and those can be nested (i.e., a Full GC can
tonyp@2011 2222 // interrupt a concurrent cycle), the number of full collections
tonyp@2011 2223 // completed should be either one (in the case where there was no
tonyp@2011 2224 // nesting) or two (when a Full GC interrupted a concurrent cycle)
tonyp@2011 2225 // behind the number of full collections started.
tonyp@2011 2226
tonyp@2011 2227 // This is the case for the inner caller, i.e. a Full GC.
tonyp@2372 2228 assert(concurrent ||
tonyp@2011 2229 (full_collections_started == _full_collections_completed + 1) ||
tonyp@2011 2230 (full_collections_started == _full_collections_completed + 2),
tonyp@2372 2231 err_msg("for inner caller (Full GC): full_collections_started = %u "
tonyp@2011 2232 "is inconsistent with _full_collections_completed = %u",
tonyp@2011 2233 full_collections_started, _full_collections_completed));
tonyp@2011 2234
tonyp@2011 2235 // This is the case for the outer caller, i.e. the concurrent cycle.
tonyp@2372 2236 assert(!concurrent ||
tonyp@2011 2237 (full_collections_started == _full_collections_completed + 1),
tonyp@2372 2238 err_msg("for outer caller (concurrent cycle): "
tonyp@2372 2239 "full_collections_started = %u "
tonyp@2011 2240 "is inconsistent with _full_collections_completed = %u",
tonyp@2011 2241 full_collections_started, _full_collections_completed));
tonyp@2011 2242
tonyp@2011 2243 _full_collections_completed += 1;
tonyp@2011 2244
johnc@2195 2245 // We need to clear the "in_progress" flag in the CM thread before
johnc@2195 2246 // we wake up any waiters (especially when ExplicitInvokesConcurrent
johnc@2195 2247 // is set) so that if a waiter requests another System.gc() it doesn't
johnc@2195 2248 // incorrectly see that a marking cyle is still in progress.
tonyp@2372 2249 if (concurrent) {
johnc@2195 2250 _cmThread->clear_in_progress();
johnc@2195 2251 }
johnc@2195 2252
tonyp@2011 2253 // This notify_all() will ensure that a thread that called
tonyp@2011 2254 // System.gc() with (with ExplicitGCInvokesConcurrent set or not)
tonyp@2011 2255 // and it's waiting for a full GC to finish will be woken up. It is
tonyp@2011 2256 // waiting in VM_G1IncCollectionPause::doit_epilogue().
tonyp@2011 2257 FullGCCount_lock->notify_all();
tonyp@2011 2258 }
tonyp@2011 2259
ysr@777 2260 void G1CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {
tonyp@2472 2261 assert_at_safepoint(true /* should_be_vm_thread */);
ysr@777 2262 GCCauseSetter gcs(this, cause);
ysr@777 2263 switch (cause) {
ysr@777 2264 case GCCause::_heap_inspection:
ysr@777 2265 case GCCause::_heap_dump: {
ysr@777 2266 HandleMark hm;
ysr@777 2267 do_full_collection(false); // don't clear all soft refs
ysr@777 2268 break;
ysr@777 2269 }
ysr@777 2270 default: // XXX FIX ME
ysr@777 2271 ShouldNotReachHere(); // Unexpected use of this function
ysr@777 2272 }
ysr@777 2273 }
ysr@777 2274
ysr@1523 2275 void G1CollectedHeap::collect(GCCause::Cause cause) {
ysr@1523 2276 // The caller doesn't have the Heap_lock
ysr@1523 2277 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
ysr@1523 2278
tonyp@2011 2279 unsigned int gc_count_before;
tonyp@2011 2280 unsigned int full_gc_count_before;
ysr@777 2281 {
ysr@1523 2282 MutexLocker ml(Heap_lock);
tonyp@2315 2283
ysr@1523 2284 // Read the GC count while holding the Heap_lock
ysr@1523 2285 gc_count_before = SharedHeap::heap()->total_collections();
tonyp@2011 2286 full_gc_count_before = SharedHeap::heap()->total_full_collections();
tonyp@2011 2287 }
tonyp@2011 2288
tonyp@2011 2289 if (should_do_concurrent_full_gc(cause)) {
tonyp@2011 2290 // Schedule an initial-mark evacuation pause that will start a
tonyp@2315 2291 // concurrent cycle. We're setting word_size to 0 which means that
tonyp@2315 2292 // we are not requesting a post-GC allocation.
tonyp@2011 2293 VM_G1IncCollectionPause op(gc_count_before,
tonyp@2315 2294 0, /* word_size */
tonyp@2315 2295 true, /* should_initiate_conc_mark */
tonyp@2011 2296 g1_policy()->max_pause_time_ms(),
tonyp@2011 2297 cause);
tonyp@2011 2298 VMThread::execute(&op);
tonyp@2011 2299 } else {
tonyp@2011 2300 if (cause == GCCause::_gc_locker
tonyp@2011 2301 DEBUG_ONLY(|| cause == GCCause::_scavenge_alot)) {
tonyp@2011 2302
tonyp@2315 2303 // Schedule a standard evacuation pause. We're setting word_size
tonyp@2315 2304 // to 0 which means that we are not requesting a post-GC allocation.
tonyp@2011 2305 VM_G1IncCollectionPause op(gc_count_before,
tonyp@2315 2306 0, /* word_size */
tonyp@2011 2307 false, /* should_initiate_conc_mark */
tonyp@2011 2308 g1_policy()->max_pause_time_ms(),
tonyp@2011 2309 cause);
ysr@1523 2310 VMThread::execute(&op);
tonyp@2011 2311 } else {
tonyp@2011 2312 // Schedule a Full GC.
tonyp@2011 2313 VM_G1CollectFull op(gc_count_before, full_gc_count_before, cause);
ysr@1523 2314 VMThread::execute(&op);
ysr@1523 2315 }
ysr@777 2316 }
ysr@777 2317 }
ysr@777 2318
ysr@777 2319 bool G1CollectedHeap::is_in(const void* p) const {
tonyp@2963 2320 HeapRegion* hr = _hrs.addr_to_region((HeapWord*) p);
tonyp@2963 2321 if (hr != NULL) {
ysr@777 2322 return hr->is_in(p);
ysr@777 2323 } else {
ysr@777 2324 return _perm_gen->as_gen()->is_in(p);
ysr@777 2325 }
ysr@777 2326 }
ysr@777 2327
ysr@777 2328 // Iteration functions.
ysr@777 2329
ysr@777 2330 // Iterates an OopClosure over all ref-containing fields of objects
ysr@777 2331 // within a HeapRegion.
ysr@777 2332
ysr@777 2333 class IterateOopClosureRegionClosure: public HeapRegionClosure {
ysr@777 2334 MemRegion _mr;
ysr@777 2335 OopClosure* _cl;
ysr@777 2336 public:
ysr@777 2337 IterateOopClosureRegionClosure(MemRegion mr, OopClosure* cl)
ysr@777 2338 : _mr(mr), _cl(cl) {}
ysr@777 2339 bool doHeapRegion(HeapRegion* r) {
ysr@777 2340 if (! r->continuesHumongous()) {
ysr@777 2341 r->oop_iterate(_cl);
ysr@777 2342 }
ysr@777 2343 return false;
ysr@777 2344 }
ysr@777 2345 };
ysr@777 2346
iveresov@1113 2347 void G1CollectedHeap::oop_iterate(OopClosure* cl, bool do_perm) {
ysr@777 2348 IterateOopClosureRegionClosure blk(_g1_committed, cl);
tonyp@2963 2349 heap_region_iterate(&blk);
iveresov@1113 2350 if (do_perm) {
iveresov@1113 2351 perm_gen()->oop_iterate(cl);
iveresov@1113 2352 }
ysr@777 2353 }
ysr@777 2354
iveresov@1113 2355 void G1CollectedHeap::oop_iterate(MemRegion mr, OopClosure* cl, bool do_perm) {
ysr@777 2356 IterateOopClosureRegionClosure blk(mr, cl);
tonyp@2963 2357 heap_region_iterate(&blk);
iveresov@1113 2358 if (do_perm) {
iveresov@1113 2359 perm_gen()->oop_iterate(cl);
iveresov@1113 2360 }
ysr@777 2361 }
ysr@777 2362
ysr@777 2363 // Iterates an ObjectClosure over all objects within a HeapRegion.
ysr@777 2364
ysr@777 2365 class IterateObjectClosureRegionClosure: public HeapRegionClosure {
ysr@777 2366 ObjectClosure* _cl;
ysr@777 2367 public:
ysr@777 2368 IterateObjectClosureRegionClosure(ObjectClosure* cl) : _cl(cl) {}
ysr@777 2369 bool doHeapRegion(HeapRegion* r) {
ysr@777 2370 if (! r->continuesHumongous()) {
ysr@777 2371 r->object_iterate(_cl);
ysr@777 2372 }
ysr@777 2373 return false;
ysr@777 2374 }
ysr@777 2375 };
ysr@777 2376
iveresov@1113 2377 void G1CollectedHeap::object_iterate(ObjectClosure* cl, bool do_perm) {
ysr@777 2378 IterateObjectClosureRegionClosure blk(cl);
tonyp@2963 2379 heap_region_iterate(&blk);
iveresov@1113 2380 if (do_perm) {
iveresov@1113 2381 perm_gen()->object_iterate(cl);
iveresov@1113 2382 }
ysr@777 2383 }
ysr@777 2384
ysr@777 2385 void G1CollectedHeap::object_iterate_since_last_GC(ObjectClosure* cl) {
ysr@777 2386 // FIXME: is this right?
ysr@777 2387 guarantee(false, "object_iterate_since_last_GC not supported by G1 heap");
ysr@777 2388 }
ysr@777 2389
ysr@777 2390 // Calls a SpaceClosure on a HeapRegion.
ysr@777 2391
ysr@777 2392 class SpaceClosureRegionClosure: public HeapRegionClosure {
ysr@777 2393 SpaceClosure* _cl;
ysr@777 2394 public:
ysr@777 2395 SpaceClosureRegionClosure(SpaceClosure* cl) : _cl(cl) {}
ysr@777 2396 bool doHeapRegion(HeapRegion* r) {
ysr@777 2397 _cl->do_space(r);
ysr@777 2398 return false;
ysr@777 2399 }
ysr@777 2400 };
ysr@777 2401
ysr@777 2402 void G1CollectedHeap::space_iterate(SpaceClosure* cl) {
ysr@777 2403 SpaceClosureRegionClosure blk(cl);
tonyp@2963 2404 heap_region_iterate(&blk);
ysr@777 2405 }
ysr@777 2406
tonyp@2963 2407 void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) const {
tonyp@2963 2408 _hrs.iterate(cl);
ysr@777 2409 }
ysr@777 2410
ysr@777 2411 void G1CollectedHeap::heap_region_iterate_from(HeapRegion* r,
tonyp@2963 2412 HeapRegionClosure* cl) const {
tonyp@2963 2413 _hrs.iterate_from(r, cl);
ysr@777 2414 }
ysr@777 2415
ysr@777 2416 void
ysr@777 2417 G1CollectedHeap::heap_region_par_iterate_chunked(HeapRegionClosure* cl,
ysr@777 2418 int worker,
ysr@777 2419 jint claim_value) {
tonyp@790 2420 const size_t regions = n_regions();
jmasa@2188 2421 const size_t worker_num = (G1CollectedHeap::use_parallel_gc_threads() ? ParallelGCThreads : 1);
tonyp@790 2422 // try to spread out the starting points of the workers
tonyp@790 2423 const size_t start_index = regions / worker_num * (size_t) worker;
tonyp@790 2424
tonyp@790 2425 // each worker will actually look at all regions
tonyp@790 2426 for (size_t count = 0; count < regions; ++count) {
tonyp@790 2427 const size_t index = (start_index + count) % regions;
tonyp@790 2428 assert(0 <= index && index < regions, "sanity");
tonyp@790 2429 HeapRegion* r = region_at(index);
tonyp@790 2430 // we'll ignore "continues humongous" regions (we'll process them
tonyp@790 2431 // when we come across their corresponding "start humongous"
tonyp@790 2432 // region) and regions already claimed
tonyp@790 2433 if (r->claim_value() == claim_value || r->continuesHumongous()) {
tonyp@790 2434 continue;
tonyp@790 2435 }
tonyp@790 2436 // OK, try to claim it
ysr@777 2437 if (r->claimHeapRegion(claim_value)) {
tonyp@790 2438 // success!
tonyp@790 2439 assert(!r->continuesHumongous(), "sanity");
tonyp@790 2440 if (r->startsHumongous()) {
tonyp@790 2441 // If the region is "starts humongous" we'll iterate over its
tonyp@790 2442 // "continues humongous" first; in fact we'll do them
tonyp@790 2443 // first. The order is important. In on case, calling the
tonyp@790 2444 // closure on the "starts humongous" region might de-allocate
tonyp@790 2445 // and clear all its "continues humongous" regions and, as a
tonyp@790 2446 // result, we might end up processing them twice. So, we'll do
tonyp@790 2447 // them first (notice: most closures will ignore them anyway) and
tonyp@790 2448 // then we'll do the "starts humongous" region.
tonyp@790 2449 for (size_t ch_index = index + 1; ch_index < regions; ++ch_index) {
tonyp@790 2450 HeapRegion* chr = region_at(ch_index);
tonyp@790 2451
tonyp@790 2452 // if the region has already been claimed or it's not
tonyp@790 2453 // "continues humongous" we're done
tonyp@790 2454 if (chr->claim_value() == claim_value ||
tonyp@790 2455 !chr->continuesHumongous()) {
tonyp@790 2456 break;
tonyp@790 2457 }
tonyp@790 2458
tonyp@790 2459 // Noone should have claimed it directly. We can given
tonyp@790 2460 // that we claimed its "starts humongous" region.
tonyp@790 2461 assert(chr->claim_value() != claim_value, "sanity");
tonyp@790 2462 assert(chr->humongous_start_region() == r, "sanity");
tonyp@790 2463
tonyp@790 2464 if (chr->claimHeapRegion(claim_value)) {
tonyp@790 2465 // we should always be able to claim it; noone else should
tonyp@790 2466 // be trying to claim this region
tonyp@790 2467
tonyp@790 2468 bool res2 = cl->doHeapRegion(chr);
tonyp@790 2469 assert(!res2, "Should not abort");
tonyp@790 2470
tonyp@790 2471 // Right now, this holds (i.e., no closure that actually
tonyp@790 2472 // does something with "continues humongous" regions
tonyp@790 2473 // clears them). We might have to weaken it in the future,
tonyp@790 2474 // but let's leave these two asserts here for extra safety.
tonyp@790 2475 assert(chr->continuesHumongous(), "should still be the case");
tonyp@790 2476 assert(chr->humongous_start_region() == r, "sanity");
tonyp@790 2477 } else {
tonyp@790 2478 guarantee(false, "we should not reach here");
tonyp@790 2479 }
tonyp@790 2480 }
tonyp@790 2481 }
tonyp@790 2482
tonyp@790 2483 assert(!r->continuesHumongous(), "sanity");
tonyp@790 2484 bool res = cl->doHeapRegion(r);
tonyp@790 2485 assert(!res, "Should not abort");
tonyp@790 2486 }
tonyp@790 2487 }
tonyp@790 2488 }
tonyp@790 2489
tonyp@825 2490 class ResetClaimValuesClosure: public HeapRegionClosure {
tonyp@825 2491 public:
tonyp@825 2492 bool doHeapRegion(HeapRegion* r) {
tonyp@825 2493 r->set_claim_value(HeapRegion::InitialClaimValue);
tonyp@825 2494 return false;
tonyp@825 2495 }
tonyp@825 2496 };
tonyp@825 2497
tonyp@825 2498 void
tonyp@825 2499 G1CollectedHeap::reset_heap_region_claim_values() {
tonyp@825 2500 ResetClaimValuesClosure blk;
tonyp@825 2501 heap_region_iterate(&blk);
tonyp@825 2502 }
tonyp@825 2503
tonyp@790 2504 #ifdef ASSERT
tonyp@790 2505 // This checks whether all regions in the heap have the correct claim
tonyp@790 2506 // value. I also piggy-backed on this a check to ensure that the
tonyp@790 2507 // humongous_start_region() information on "continues humongous"
tonyp@790 2508 // regions is correct.
tonyp@790 2509
tonyp@790 2510 class CheckClaimValuesClosure : public HeapRegionClosure {
tonyp@790 2511 private:
tonyp@790 2512 jint _claim_value;
tonyp@790 2513 size_t _failures;
tonyp@790 2514 HeapRegion* _sh_region;
tonyp@790 2515 public:
tonyp@790 2516 CheckClaimValuesClosure(jint claim_value) :
tonyp@790 2517 _claim_value(claim_value), _failures(0), _sh_region(NULL) { }
tonyp@790 2518 bool doHeapRegion(HeapRegion* r) {
tonyp@790 2519 if (r->claim_value() != _claim_value) {
tonyp@790 2520 gclog_or_tty->print_cr("Region ["PTR_FORMAT","PTR_FORMAT"), "
tonyp@790 2521 "claim value = %d, should be %d",
tonyp@790 2522 r->bottom(), r->end(), r->claim_value(),
tonyp@790 2523 _claim_value);
tonyp@790 2524 ++_failures;
tonyp@790 2525 }
tonyp@790 2526 if (!r->isHumongous()) {
tonyp@790 2527 _sh_region = NULL;
tonyp@790 2528 } else if (r->startsHumongous()) {
tonyp@790 2529 _sh_region = r;
tonyp@790 2530 } else if (r->continuesHumongous()) {
tonyp@790 2531 if (r->humongous_start_region() != _sh_region) {
tonyp@790 2532 gclog_or_tty->print_cr("Region ["PTR_FORMAT","PTR_FORMAT"), "
tonyp@790 2533 "HS = "PTR_FORMAT", should be "PTR_FORMAT,
tonyp@790 2534 r->bottom(), r->end(),
tonyp@790 2535 r->humongous_start_region(),
tonyp@790 2536 _sh_region);
tonyp@790 2537 ++_failures;
ysr@777 2538 }
ysr@777 2539 }
tonyp@790 2540 return false;
tonyp@790 2541 }
tonyp@790 2542 size_t failures() {
tonyp@790 2543 return _failures;
tonyp@790 2544 }
tonyp@790 2545 };
tonyp@790 2546
tonyp@790 2547 bool G1CollectedHeap::check_heap_region_claim_values(jint claim_value) {
tonyp@790 2548 CheckClaimValuesClosure cl(claim_value);
tonyp@790 2549 heap_region_iterate(&cl);
tonyp@790 2550 return cl.failures() == 0;
tonyp@790 2551 }
tonyp@790 2552 #endif // ASSERT
ysr@777 2553
ysr@777 2554 void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) {
ysr@777 2555 HeapRegion* r = g1_policy()->collection_set();
ysr@777 2556 while (r != NULL) {
ysr@777 2557 HeapRegion* next = r->next_in_collection_set();
ysr@777 2558 if (cl->doHeapRegion(r)) {
ysr@777 2559 cl->incomplete();
ysr@777 2560 return;
ysr@777 2561 }
ysr@777 2562 r = next;
ysr@777 2563 }
ysr@777 2564 }
ysr@777 2565
ysr@777 2566 void G1CollectedHeap::collection_set_iterate_from(HeapRegion* r,
ysr@777 2567 HeapRegionClosure *cl) {
tonyp@2011 2568 if (r == NULL) {
tonyp@2011 2569 // The CSet is empty so there's nothing to do.
tonyp@2011 2570 return;
tonyp@2011 2571 }
tonyp@2011 2572
ysr@777 2573 assert(r->in_collection_set(),
ysr@777 2574 "Start region must be a member of the collection set.");
ysr@777 2575 HeapRegion* cur = r;
ysr@777 2576 while (cur != NULL) {
ysr@777 2577 HeapRegion* next = cur->next_in_collection_set();
ysr@777 2578 if (cl->doHeapRegion(cur) && false) {
ysr@777 2579 cl->incomplete();
ysr@777 2580 return;
ysr@777 2581 }
ysr@777 2582 cur = next;
ysr@777 2583 }
ysr@777 2584 cur = g1_policy()->collection_set();
ysr@777 2585 while (cur != r) {
ysr@777 2586 HeapRegion* next = cur->next_in_collection_set();
ysr@777 2587 if (cl->doHeapRegion(cur) && false) {
ysr@777 2588 cl->incomplete();
ysr@777 2589 return;
ysr@777 2590 }
ysr@777 2591 cur = next;
ysr@777 2592 }
ysr@777 2593 }
ysr@777 2594
ysr@777 2595 CompactibleSpace* G1CollectedHeap::first_compactible_space() {
tonyp@2963 2596 return n_regions() > 0 ? region_at(0) : NULL;
ysr@777 2597 }
ysr@777 2598
ysr@777 2599
ysr@777 2600 Space* G1CollectedHeap::space_containing(const void* addr) const {
ysr@777 2601 Space* res = heap_region_containing(addr);
ysr@777 2602 if (res == NULL)
ysr@777 2603 res = perm_gen()->space_containing(addr);
ysr@777 2604 return res;
ysr@777 2605 }
ysr@777 2606
ysr@777 2607 HeapWord* G1CollectedHeap::block_start(const void* addr) const {
ysr@777 2608 Space* sp = space_containing(addr);
ysr@777 2609 if (sp != NULL) {
ysr@777 2610 return sp->block_start(addr);
ysr@777 2611 }
ysr@777 2612 return NULL;
ysr@777 2613 }
ysr@777 2614
ysr@777 2615 size_t G1CollectedHeap::block_size(const HeapWord* addr) const {
ysr@777 2616 Space* sp = space_containing(addr);
ysr@777 2617 assert(sp != NULL, "block_size of address outside of heap");
ysr@777 2618 return sp->block_size(addr);
ysr@777 2619 }
ysr@777 2620
ysr@777 2621 bool G1CollectedHeap::block_is_obj(const HeapWord* addr) const {
ysr@777 2622 Space* sp = space_containing(addr);
ysr@777 2623 return sp->block_is_obj(addr);
ysr@777 2624 }
ysr@777 2625
ysr@777 2626 bool G1CollectedHeap::supports_tlab_allocation() const {
ysr@777 2627 return true;
ysr@777 2628 }
ysr@777 2629
ysr@777 2630 size_t G1CollectedHeap::tlab_capacity(Thread* ignored) const {
ysr@777 2631 return HeapRegion::GrainBytes;
ysr@777 2632 }
ysr@777 2633
ysr@777 2634 size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
ysr@777 2635 // Return the remaining space in the cur alloc region, but not less than
ysr@777 2636 // the min TLAB size.
johnc@1748 2637
johnc@1748 2638 // Also, this value can be at most the humongous object threshold,
johnc@1748 2639 // since we can't allow tlabs to grow big enough to accomodate
johnc@1748 2640 // humongous objects.
johnc@1748 2641
tonyp@2715 2642 HeapRegion* hr = _mutator_alloc_region.get();
johnc@1748 2643 size_t max_tlab_size = _humongous_object_threshold_in_words * wordSize;
tonyp@2715 2644 if (hr == NULL) {
johnc@1748 2645 return max_tlab_size;
ysr@777 2646 } else {
tonyp@2715 2647 return MIN2(MAX2(hr->free(), (size_t) MinTLABSize), max_tlab_size);
ysr@777 2648 }
ysr@777 2649 }
ysr@777 2650
ysr@777 2651 size_t G1CollectedHeap::max_capacity() const {
johnc@2504 2652 return _g1_reserved.byte_size();
ysr@777 2653 }
ysr@777 2654
ysr@777 2655 jlong G1CollectedHeap::millis_since_last_gc() {
ysr@777 2656 // assert(false, "NYI");
ysr@777 2657 return 0;
ysr@777 2658 }
ysr@777 2659
ysr@777 2660 void G1CollectedHeap::prepare_for_verify() {
ysr@777 2661 if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
ysr@777 2662 ensure_parsability(false);
ysr@777 2663 }
ysr@777 2664 g1_rem_set()->prepare_for_verify();
ysr@777 2665 }
ysr@777 2666
ysr@777 2667 class VerifyLivenessOopClosure: public OopClosure {
johnc@2969 2668 G1CollectedHeap* _g1h;
johnc@2969 2669 VerifyOption _vo;
ysr@777 2670 public:
johnc@2969 2671 VerifyLivenessOopClosure(G1CollectedHeap* g1h, VerifyOption vo):
johnc@2969 2672 _g1h(g1h), _vo(vo)
johnc@2969 2673 { }
ysr@1280 2674 void do_oop(narrowOop *p) { do_oop_work(p); }
ysr@1280 2675 void do_oop( oop *p) { do_oop_work(p); }
ysr@1280 2676
ysr@1280 2677 template <class T> void do_oop_work(T *p) {
ysr@1280 2678 oop obj = oopDesc::load_decode_heap_oop(p);
johnc@2969 2679 guarantee(obj == NULL || !_g1h->is_obj_dead_cond(obj, _vo),
ysr@1280 2680 "Dead object referenced by a not dead object");
ysr@777 2681 }
ysr@777 2682 };
ysr@777 2683
ysr@777 2684 class VerifyObjsInRegionClosure: public ObjectClosure {
tonyp@1246 2685 private:
ysr@777 2686 G1CollectedHeap* _g1h;
ysr@777 2687 size_t _live_bytes;
ysr@777 2688 HeapRegion *_hr;
johnc@2969 2689 VerifyOption _vo;
ysr@777 2690 public:
johnc@2969 2691 // _vo == UsePrevMarking -> use "prev" marking information,
johnc@2969 2692 // _vo == UseNextMarking -> use "next" marking information,
johnc@2969 2693 // _vo == UseMarkWord -> use mark word from object header.
johnc@2969 2694 VerifyObjsInRegionClosure(HeapRegion *hr, VerifyOption vo)
johnc@2969 2695 : _live_bytes(0), _hr(hr), _vo(vo) {
ysr@777 2696 _g1h = G1CollectedHeap::heap();
ysr@777 2697 }
ysr@777 2698 void do_object(oop o) {
johnc@2969 2699 VerifyLivenessOopClosure isLive(_g1h, _vo);
ysr@777 2700 assert(o != NULL, "Huh?");
johnc@2969 2701 if (!_g1h->is_obj_dead_cond(o, _vo)) {
johnc@2969 2702 // If the object is alive according to the mark word,
johnc@2969 2703 // then verify that the marking information agrees.
johnc@2969 2704 // Note we can't verify the contra-positive of the
johnc@2969 2705 // above: if the object is dead (according to the mark
johnc@2969 2706 // word), it may not be marked, or may have been marked
johnc@2969 2707 // but has since became dead, or may have been allocated
johnc@2969 2708 // since the last marking.
johnc@2969 2709 if (_vo == VerifyOption_G1UseMarkWord) {
johnc@2969 2710 guarantee(!_g1h->is_obj_dead(o), "mark word and concurrent mark mismatch");
johnc@2969 2711 }
johnc@2969 2712
ysr@777 2713 o->oop_iterate(&isLive);
johnc@1824 2714 if (!_hr->obj_allocated_since_prev_marking(o)) {
johnc@1824 2715 size_t obj_size = o->size(); // Make sure we don't overflow
johnc@1824 2716 _live_bytes += (obj_size * HeapWordSize);
johnc@1824 2717 }
ysr@777 2718 }
ysr@777 2719 }
ysr@777 2720 size_t live_bytes() { return _live_bytes; }
ysr@777 2721 };
ysr@777 2722
ysr@777 2723 class PrintObjsInRegionClosure : public ObjectClosure {
ysr@777 2724 HeapRegion *_hr;
ysr@777 2725 G1CollectedHeap *_g1;
ysr@777 2726 public:
ysr@777 2727 PrintObjsInRegionClosure(HeapRegion *hr) : _hr(hr) {
ysr@777 2728 _g1 = G1CollectedHeap::heap();
ysr@777 2729 };
ysr@777 2730
ysr@777 2731 void do_object(oop o) {
ysr@777 2732 if (o != NULL) {
ysr@777 2733 HeapWord *start = (HeapWord *) o;
ysr@777 2734 size_t word_sz = o->size();
ysr@777 2735 gclog_or_tty->print("\nPrinting obj "PTR_FORMAT" of size " SIZE_FORMAT
ysr@777 2736 " isMarkedPrev %d isMarkedNext %d isAllocSince %d\n",
ysr@777 2737 (void*) o, word_sz,
ysr@777 2738 _g1->isMarkedPrev(o),
ysr@777 2739 _g1->isMarkedNext(o),
ysr@777 2740 _hr->obj_allocated_since_prev_marking(o));
ysr@777 2741 HeapWord *end = start + word_sz;
ysr@777 2742 HeapWord *cur;
ysr@777 2743 int *val;
ysr@777 2744 for (cur = start; cur < end; cur++) {
ysr@777 2745 val = (int *) cur;
ysr@777 2746 gclog_or_tty->print("\t "PTR_FORMAT":"PTR_FORMAT"\n", val, *val);
ysr@777 2747 }
ysr@777 2748 }
ysr@777 2749 }
ysr@777 2750 };
ysr@777 2751
ysr@777 2752 class VerifyRegionClosure: public HeapRegionClosure {
tonyp@1246 2753 private:
johnc@2969 2754 bool _allow_dirty;
johnc@2969 2755 bool _par;
johnc@2969 2756 VerifyOption _vo;
johnc@2969 2757 bool _failures;
tonyp@1246 2758 public:
johnc@2969 2759 // _vo == UsePrevMarking -> use "prev" marking information,
johnc@2969 2760 // _vo == UseNextMarking -> use "next" marking information,
johnc@2969 2761 // _vo == UseMarkWord -> use mark word from object header.
johnc@2969 2762 VerifyRegionClosure(bool allow_dirty, bool par, VerifyOption vo)
ysr@1280 2763 : _allow_dirty(allow_dirty),
ysr@1280 2764 _par(par),
johnc@2969 2765 _vo(vo),
tonyp@1455 2766 _failures(false) {}
tonyp@1455 2767
tonyp@1455 2768 bool failures() {
tonyp@1455 2769 return _failures;
tonyp@1455 2770 }
ysr@1280 2771
ysr@777 2772 bool doHeapRegion(HeapRegion* r) {
tonyp@825 2773 guarantee(_par || r->claim_value() == HeapRegion::InitialClaimValue,
tonyp@825 2774 "Should be unclaimed at verify points.");
iveresov@1072 2775 if (!r->continuesHumongous()) {
tonyp@1455 2776 bool failures = false;
johnc@2969 2777 r->verify(_allow_dirty, _vo, &failures);
tonyp@1455 2778 if (failures) {
tonyp@1455 2779 _failures = true;
tonyp@1455 2780 } else {
johnc@2969 2781 VerifyObjsInRegionClosure not_dead_yet_cl(r, _vo);
tonyp@1455 2782 r->object_iterate(&not_dead_yet_cl);
tonyp@1455 2783 if (r->max_live_bytes() < not_dead_yet_cl.live_bytes()) {
tonyp@1455 2784 gclog_or_tty->print_cr("["PTR_FORMAT","PTR_FORMAT"] "
tonyp@1455 2785 "max_live_bytes "SIZE_FORMAT" "
tonyp@1455 2786 "< calculated "SIZE_FORMAT,
tonyp@1455 2787 r->bottom(), r->end(),
tonyp@1455 2788 r->max_live_bytes(),
tonyp@1455 2789 not_dead_yet_cl.live_bytes());
tonyp@1455 2790 _failures = true;
tonyp@1455 2791 }
tonyp@1455 2792 }
ysr@777 2793 }
tonyp@1455 2794 return false; // stop the region iteration if we hit a failure
ysr@777 2795 }
ysr@777 2796 };
ysr@777 2797
ysr@777 2798 class VerifyRootsClosure: public OopsInGenClosure {
ysr@777 2799 private:
ysr@777 2800 G1CollectedHeap* _g1h;
johnc@2969 2801 VerifyOption _vo;
ysr@777 2802 bool _failures;
ysr@777 2803 public:
johnc@2969 2804 // _vo == UsePrevMarking -> use "prev" marking information,
johnc@2969 2805 // _vo == UseNextMarking -> use "next" marking information,
johnc@2969 2806 // _vo == UseMarkWord -> use mark word from object header.
johnc@2969 2807 VerifyRootsClosure(VerifyOption vo) :
ysr@1280 2808 _g1h(G1CollectedHeap::heap()),
johnc@2969 2809 _vo(vo),
tonyp@1455 2810 _failures(false) { }
ysr@777 2811
ysr@777 2812 bool failures() { return _failures; }
ysr@777 2813
ysr@1280 2814 template <class T> void do_oop_nv(T* p) {
ysr@1280 2815 T heap_oop = oopDesc::load_heap_oop(p);
ysr@1280 2816 if (!oopDesc::is_null(heap_oop)) {
ysr@1280 2817 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
johnc@2969 2818 if (_g1h->is_obj_dead_cond(obj, _vo)) {
ysr@777 2819 gclog_or_tty->print_cr("Root location "PTR_FORMAT" "
tonyp@1455 2820 "points to dead obj "PTR_FORMAT, p, (void*) obj);
johnc@2969 2821 if (_vo == VerifyOption_G1UseMarkWord) {
johnc@2969 2822 gclog_or_tty->print_cr(" Mark word: "PTR_FORMAT, (void*)(obj->mark()));
johnc@2969 2823 }
ysr@777 2824 obj->print_on(gclog_or_tty);
ysr@777 2825 _failures = true;
ysr@777 2826 }
ysr@777 2827 }
ysr@777 2828 }
ysr@1280 2829
ysr@1280 2830 void do_oop(oop* p) { do_oop_nv(p); }
ysr@1280 2831 void do_oop(narrowOop* p) { do_oop_nv(p); }
ysr@777 2832 };
ysr@777 2833
tonyp@825 2834 // This is the task used for parallel heap verification.
tonyp@825 2835
tonyp@825 2836 class G1ParVerifyTask: public AbstractGangTask {
tonyp@825 2837 private:
tonyp@825 2838 G1CollectedHeap* _g1h;
johnc@2969 2839 bool _allow_dirty;
johnc@2969 2840 VerifyOption _vo;
johnc@2969 2841 bool _failures;
tonyp@825 2842
tonyp@825 2843 public:
johnc@2969 2844 // _vo == UsePrevMarking -> use "prev" marking information,
johnc@2969 2845 // _vo == UseNextMarking -> use "next" marking information,
johnc@2969 2846 // _vo == UseMarkWord -> use mark word from object header.
johnc@2969 2847 G1ParVerifyTask(G1CollectedHeap* g1h, bool allow_dirty, VerifyOption vo) :
tonyp@825 2848 AbstractGangTask("Parallel verify task"),
ysr@1280 2849 _g1h(g1h),
ysr@1280 2850 _allow_dirty(allow_dirty),
johnc@2969 2851 _vo(vo),
tonyp@1455 2852 _failures(false) { }
tonyp@1455 2853
tonyp@1455 2854 bool failures() {
tonyp@1455 2855 return _failures;
tonyp@1455 2856 }
tonyp@825 2857
tonyp@825 2858 void work(int worker_i) {
iveresov@1072 2859 HandleMark hm;
johnc@2969 2860 VerifyRegionClosure blk(_allow_dirty, true, _vo);
tonyp@825 2861 _g1h->heap_region_par_iterate_chunked(&blk, worker_i,
tonyp@825 2862 HeapRegion::ParVerifyClaimValue);
tonyp@1455 2863 if (blk.failures()) {
tonyp@1455 2864 _failures = true;
tonyp@1455 2865 }
tonyp@825 2866 }
tonyp@825 2867 };
tonyp@825 2868
ysr@777 2869 void G1CollectedHeap::verify(bool allow_dirty, bool silent) {
johnc@2969 2870 verify(allow_dirty, silent, VerifyOption_G1UsePrevMarking);
tonyp@1246 2871 }
tonyp@1246 2872
tonyp@1246 2873 void G1CollectedHeap::verify(bool allow_dirty,
tonyp@1246 2874 bool silent,
johnc@2969 2875 VerifyOption vo) {
ysr@777 2876 if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
ysr@2825 2877 if (!silent) { gclog_or_tty->print("Roots (excluding permgen) "); }
johnc@2969 2878 VerifyRootsClosure rootsCl(vo);
jrose@1424 2879 CodeBlobToOopClosure blobsCl(&rootsCl, /*do_marking=*/ false);
johnc@2969 2880
ysr@2825 2881 // We apply the relevant closures to all the oops in the
ysr@2825 2882 // system dictionary, the string table and the code cache.
ysr@2825 2883 const int so = SharedHeap::SO_AllClasses | SharedHeap::SO_Strings | SharedHeap::SO_CodeCache;
johnc@2969 2884
ysr@2825 2885 process_strong_roots(true, // activate StrongRootsScope
ysr@2825 2886 true, // we set "collecting perm gen" to true,
ysr@2825 2887 // so we don't reset the dirty cards in the perm gen.
ysr@2825 2888 SharedHeap::ScanningOption(so), // roots scanning options
ysr@777 2889 &rootsCl,
jrose@1424 2890 &blobsCl,
ysr@777 2891 &rootsCl);
johnc@2969 2892
johnc@2969 2893 // If we're verifying after the marking phase of a Full GC then we can't
johnc@2969 2894 // treat the perm gen as roots into the G1 heap. Some of the objects in
johnc@2969 2895 // the perm gen may be dead and hence not marked. If one of these dead
johnc@2969 2896 // objects is considered to be a root then we may end up with a false
johnc@2969 2897 // "Root location <x> points to dead ob <y>" failure.
johnc@2969 2898 if (vo != VerifyOption_G1UseMarkWord) {
johnc@2969 2899 // Since we used "collecting_perm_gen" == true above, we will not have
johnc@2969 2900 // checked the refs from perm into the G1-collected heap. We check those
johnc@2969 2901 // references explicitly below. Whether the relevant cards are dirty
johnc@2969 2902 // is checked further below in the rem set verification.
johnc@2969 2903 if (!silent) { gclog_or_tty->print("Permgen roots "); }
johnc@2969 2904 perm_gen()->oop_iterate(&rootsCl);
johnc@2969 2905 }
tonyp@1455 2906 bool failures = rootsCl.failures();
johnc@2969 2907
johnc@2969 2908 if (vo != VerifyOption_G1UseMarkWord) {
johnc@2969 2909 // If we're verifying during a full GC then the region sets
johnc@2969 2910 // will have been torn down at the start of the GC. Therefore
johnc@2969 2911 // verifying the region sets will fail. So we only verify
johnc@2969 2912 // the region sets when not in a full GC.
johnc@2969 2913 if (!silent) { gclog_or_tty->print("HeapRegionSets "); }
johnc@2969 2914 verify_region_sets();
johnc@2969 2915 }
johnc@2969 2916
tonyp@2472 2917 if (!silent) { gclog_or_tty->print("HeapRegions "); }
tonyp@825 2918 if (GCParallelVerificationEnabled && ParallelGCThreads > 1) {
tonyp@825 2919 assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue),
tonyp@825 2920 "sanity check");
tonyp@825 2921
johnc@2969 2922 G1ParVerifyTask task(this, allow_dirty, vo);
tonyp@825 2923 int n_workers = workers()->total_workers();
tonyp@825 2924 set_par_threads(n_workers);
tonyp@825 2925 workers()->run_task(&task);
tonyp@825 2926 set_par_threads(0);
tonyp@1455 2927 if (task.failures()) {
tonyp@1455 2928 failures = true;
tonyp@1455 2929 }
tonyp@825 2930
tonyp@825 2931 assert(check_heap_region_claim_values(HeapRegion::ParVerifyClaimValue),
tonyp@825 2932 "sanity check");
tonyp@825 2933
tonyp@825 2934 reset_heap_region_claim_values();
tonyp@825 2935
tonyp@825 2936 assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue),
tonyp@825 2937 "sanity check");
tonyp@825 2938 } else {
johnc@2969 2939 VerifyRegionClosure blk(allow_dirty, false, vo);
tonyp@2963 2940 heap_region_iterate(&blk);
tonyp@1455 2941 if (blk.failures()) {
tonyp@1455 2942 failures = true;
tonyp@1455 2943 }
tonyp@825 2944 }
tonyp@2472 2945 if (!silent) gclog_or_tty->print("RemSet ");
ysr@777 2946 rem_set()->verify();
tonyp@1455 2947
tonyp@1455 2948 if (failures) {
tonyp@1455 2949 gclog_or_tty->print_cr("Heap:");
tonyp@1455 2950 print_on(gclog_or_tty, true /* extended */);
tonyp@1455 2951 gclog_or_tty->print_cr("");
jcoomes@1902 2952 #ifndef PRODUCT
tonyp@1479 2953 if (VerifyDuringGC && G1VerifyDuringGCPrintReachable) {
tonyp@1823 2954 concurrent_mark()->print_reachable("at-verification-failure",
johnc@2969 2955 vo, false /* all */);
tonyp@1455 2956 }
jcoomes@1902 2957 #endif
tonyp@1455 2958 gclog_or_tty->flush();
tonyp@1455 2959 }
tonyp@1455 2960 guarantee(!failures, "there should not have been any failures");
ysr@777 2961 } else {
ysr@777 2962 if (!silent) gclog_or_tty->print("(SKIPPING roots, heapRegions, remset) ");
ysr@777 2963 }
ysr@777 2964 }
ysr@777 2965
ysr@777 2966 class PrintRegionClosure: public HeapRegionClosure {
ysr@777 2967 outputStream* _st;
ysr@777 2968 public:
ysr@777 2969 PrintRegionClosure(outputStream* st) : _st(st) {}
ysr@777 2970 bool doHeapRegion(HeapRegion* r) {
ysr@777 2971 r->print_on(_st);
ysr@777 2972 return false;
ysr@777 2973 }
ysr@777 2974 };
ysr@777 2975
tonyp@1273 2976 void G1CollectedHeap::print() const { print_on(tty); }
ysr@777 2977
ysr@777 2978 void G1CollectedHeap::print_on(outputStream* st) const {
tonyp@1273 2979 print_on(st, PrintHeapAtGCExtended);
tonyp@1273 2980 }
tonyp@1273 2981
tonyp@1273 2982 void G1CollectedHeap::print_on(outputStream* st, bool extended) const {
tonyp@1273 2983 st->print(" %-20s", "garbage-first heap");
tonyp@1273 2984 st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K",
tonyp@1281 2985 capacity()/K, used_unlocked()/K);
tonyp@1273 2986 st->print(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")",
tonyp@1273 2987 _g1_storage.low_boundary(),
tonyp@1273 2988 _g1_storage.high(),
tonyp@1273 2989 _g1_storage.high_boundary());
tonyp@1273 2990 st->cr();
tonyp@1273 2991 st->print(" region size " SIZE_FORMAT "K, ",
tonyp@1273 2992 HeapRegion::GrainBytes/K);
tonyp@1273 2993 size_t young_regions = _young_list->length();
tonyp@1273 2994 st->print(SIZE_FORMAT " young (" SIZE_FORMAT "K), ",
tonyp@1273 2995 young_regions, young_regions * HeapRegion::GrainBytes / K);
tonyp@1273 2996 size_t survivor_regions = g1_policy()->recorded_survivor_regions();
tonyp@1273 2997 st->print(SIZE_FORMAT " survivors (" SIZE_FORMAT "K)",
tonyp@1273 2998 survivor_regions, survivor_regions * HeapRegion::GrainBytes / K);
tonyp@1273 2999 st->cr();
tonyp@1273 3000 perm()->as_gen()->print_on(st);
tonyp@1273 3001 if (extended) {
tonyp@1455 3002 st->cr();
tonyp@1273 3003 print_on_extended(st);
tonyp@1273 3004 }
tonyp@1273 3005 }
tonyp@1273 3006
tonyp@1273 3007 void G1CollectedHeap::print_on_extended(outputStream* st) const {
ysr@777 3008 PrintRegionClosure blk(st);
tonyp@2963 3009 heap_region_iterate(&blk);
ysr@777 3010 }
ysr@777 3011
ysr@777 3012 void G1CollectedHeap::print_gc_threads_on(outputStream* st) const {
jmasa@2188 3013 if (G1CollectedHeap::use_parallel_gc_threads()) {
tonyp@1454 3014 workers()->print_worker_threads_on(st);
tonyp@1454 3015 }
tonyp@1454 3016 _cmThread->print_on(st);
ysr@777 3017 st->cr();
tonyp@1454 3018 _cm->print_worker_threads_on(st);
tonyp@1454 3019 _cg1r->print_worker_threads_on(st);
ysr@777 3020 st->cr();
ysr@777 3021 }
ysr@777 3022
ysr@777 3023 void G1CollectedHeap::gc_threads_do(ThreadClosure* tc) const {
jmasa@2188 3024 if (G1CollectedHeap::use_parallel_gc_threads()) {
ysr@777 3025 workers()->threads_do(tc);
ysr@777 3026 }
ysr@777 3027 tc->do_thread(_cmThread);
iveresov@1229 3028 _cg1r->threads_do(tc);
ysr@777 3029 }
ysr@777 3030
ysr@777 3031 void G1CollectedHeap::print_tracing_info() const {
ysr@777 3032 // We'll overload this to mean "trace GC pause statistics."
ysr@777 3033 if (TraceGen0Time || TraceGen1Time) {
ysr@777 3034 // The "G1CollectorPolicy" is keeping track of these stats, so delegate
ysr@777 3035 // to that.
ysr@777 3036 g1_policy()->print_tracing_info();
ysr@777 3037 }
johnc@1186 3038 if (G1SummarizeRSetStats) {
ysr@777 3039 g1_rem_set()->print_summary_info();
ysr@777 3040 }
tonyp@1717 3041 if (G1SummarizeConcMark) {
ysr@777 3042 concurrent_mark()->print_summary_info();
ysr@777 3043 }
ysr@777 3044 g1_policy()->print_yg_surv_rate_info();
ysr@777 3045 SpecializationStats::print();
ysr@777 3046 }
ysr@777 3047
tonyp@2974 3048 #ifndef PRODUCT
tonyp@2974 3049 // Helpful for debugging RSet issues.
tonyp@2974 3050
tonyp@2974 3051 class PrintRSetsClosure : public HeapRegionClosure {
tonyp@2974 3052 private:
tonyp@2974 3053 const char* _msg;
tonyp@2974 3054 size_t _occupied_sum;
tonyp@2974 3055
tonyp@2974 3056 public:
tonyp@2974 3057 bool doHeapRegion(HeapRegion* r) {
tonyp@2974 3058 HeapRegionRemSet* hrrs = r->rem_set();
tonyp@2974 3059 size_t occupied = hrrs->occupied();
tonyp@2974 3060 _occupied_sum += occupied;
tonyp@2974 3061
tonyp@2974 3062 gclog_or_tty->print_cr("Printing RSet for region "HR_FORMAT,
tonyp@2974 3063 HR_FORMAT_PARAMS(r));
tonyp@2974 3064 if (occupied == 0) {
tonyp@2974 3065 gclog_or_tty->print_cr(" RSet is empty");
tonyp@2974 3066 } else {
tonyp@2974 3067 hrrs->print();
tonyp@2974 3068 }
tonyp@2974 3069 gclog_or_tty->print_cr("----------");
tonyp@2974 3070 return false;
tonyp@2974 3071 }
tonyp@2974 3072
tonyp@2974 3073 PrintRSetsClosure(const char* msg) : _msg(msg), _occupied_sum(0) {
tonyp@2974 3074 gclog_or_tty->cr();
tonyp@2974 3075 gclog_or_tty->print_cr("========================================");
tonyp@2974 3076 gclog_or_tty->print_cr(msg);
tonyp@2974 3077 gclog_or_tty->cr();
tonyp@2974 3078 }
tonyp@2974 3079
tonyp@2974 3080 ~PrintRSetsClosure() {
tonyp@2974 3081 gclog_or_tty->print_cr("Occupied Sum: "SIZE_FORMAT, _occupied_sum);
tonyp@2974 3082 gclog_or_tty->print_cr("========================================");
tonyp@2974 3083 gclog_or_tty->cr();
tonyp@2974 3084 }
tonyp@2974 3085 };
tonyp@2974 3086
tonyp@2974 3087 void G1CollectedHeap::print_cset_rsets() {
tonyp@2974 3088 PrintRSetsClosure cl("Printing CSet RSets");
tonyp@2974 3089 collection_set_iterate(&cl);
tonyp@2974 3090 }
tonyp@2974 3091
tonyp@2974 3092 void G1CollectedHeap::print_all_rsets() {
tonyp@2974 3093 PrintRSetsClosure cl("Printing All RSets");;
tonyp@2974 3094 heap_region_iterate(&cl);
tonyp@2974 3095 }
tonyp@2974 3096 #endif // PRODUCT
tonyp@2974 3097
ysr@777 3098 G1CollectedHeap* G1CollectedHeap::heap() {
ysr@777 3099 assert(_sh->kind() == CollectedHeap::G1CollectedHeap,
ysr@777 3100 "not a garbage-first heap");
ysr@777 3101 return _g1h;
ysr@777 3102 }
ysr@777 3103
ysr@777 3104 void G1CollectedHeap::gc_prologue(bool full /* Ignored */) {
ysr@1680 3105 // always_do_update_barrier = false;
ysr@777 3106 assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
ysr@777 3107 // Call allocation profiler
ysr@777 3108 AllocationProfiler::iterate_since_last_gc();
ysr@777 3109 // Fill TLAB's and such
ysr@777 3110 ensure_parsability(true);
ysr@777 3111 }
ysr@777 3112
ysr@777 3113 void G1CollectedHeap::gc_epilogue(bool full /* Ignored */) {
ysr@777 3114 // FIXME: what is this about?
ysr@777 3115 // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled"
ysr@777 3116 // is set.
ysr@777 3117 COMPILER2_PRESENT(assert(DerivedPointerTable::is_empty(),
ysr@777 3118 "derived pointer present"));
ysr@1680 3119 // always_do_update_barrier = true;
ysr@777 3120 }
ysr@777 3121
tonyp@2315 3122 HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size,
tonyp@2315 3123 unsigned int gc_count_before,
tonyp@2315 3124 bool* succeeded) {
tonyp@2315 3125 assert_heap_not_locked_and_not_at_safepoint();
ysr@777 3126 g1_policy()->record_stop_world_start();
tonyp@2315 3127 VM_G1IncCollectionPause op(gc_count_before,
tonyp@2315 3128 word_size,
tonyp@2315 3129 false, /* should_initiate_conc_mark */
tonyp@2315 3130 g1_policy()->max_pause_time_ms(),
tonyp@2315 3131 GCCause::_g1_inc_collection_pause);
tonyp@2315 3132 VMThread::execute(&op);
tonyp@2315 3133
tonyp@2315 3134 HeapWord* result = op.result();
tonyp@2315 3135 bool ret_succeeded = op.prologue_succeeded() && op.pause_succeeded();
tonyp@2315 3136 assert(result == NULL || ret_succeeded,
tonyp@2315 3137 "the result should be NULL if the VM did not succeed");
tonyp@2315 3138 *succeeded = ret_succeeded;
tonyp@2315 3139
tonyp@2315 3140 assert_heap_not_locked();
tonyp@2315 3141 return result;
ysr@777 3142 }
ysr@777 3143
ysr@777 3144 void
ysr@777 3145 G1CollectedHeap::doConcurrentMark() {
ysr@1280 3146 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
ysr@1280 3147 if (!_cmThread->in_progress()) {
ysr@1280 3148 _cmThread->set_started();
ysr@1280 3149 CGC_lock->notify();
ysr@777 3150 }
ysr@777 3151 }
ysr@777 3152
ysr@777 3153 // <NEW PREDICTION>
ysr@777 3154
ysr@777 3155 double G1CollectedHeap::predict_region_elapsed_time_ms(HeapRegion *hr,
ysr@777 3156 bool young) {
ysr@777 3157 return _g1_policy->predict_region_elapsed_time_ms(hr, young);
ysr@777 3158 }
ysr@777 3159
ysr@777 3160 void G1CollectedHeap::check_if_region_is_too_expensive(double
ysr@777 3161 predicted_time_ms) {
ysr@777 3162 _g1_policy->check_if_region_is_too_expensive(predicted_time_ms);
ysr@777 3163 }
ysr@777 3164
ysr@777 3165 size_t G1CollectedHeap::pending_card_num() {
ysr@777 3166 size_t extra_cards = 0;
ysr@777 3167 JavaThread *curr = Threads::first();
ysr@777 3168 while (curr != NULL) {
ysr@777 3169 DirtyCardQueue& dcq = curr->dirty_card_queue();
ysr@777 3170 extra_cards += dcq.size();
ysr@777 3171 curr = curr->next();
ysr@777 3172 }
ysr@777 3173 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
ysr@777 3174 size_t buffer_size = dcqs.buffer_size();
ysr@777 3175 size_t buffer_num = dcqs.completed_buffers_num();
ysr@777 3176 return buffer_size * buffer_num + extra_cards;
ysr@777 3177 }
ysr@777 3178
ysr@777 3179 size_t G1CollectedHeap::max_pending_card_num() {
ysr@777 3180 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
ysr@777 3181 size_t buffer_size = dcqs.buffer_size();
ysr@777 3182 size_t buffer_num = dcqs.completed_buffers_num();
ysr@777 3183 int thread_num = Threads::number_of_threads();
ysr@777 3184 return (buffer_num + thread_num) * buffer_size;
ysr@777 3185 }
ysr@777 3186
ysr@777 3187 size_t G1CollectedHeap::cards_scanned() {
johnc@2216 3188 return g1_rem_set()->cardsScanned();
ysr@777 3189 }
ysr@777 3190
ysr@777 3191 void
ysr@777 3192 G1CollectedHeap::setup_surviving_young_words() {
ysr@777 3193 guarantee( _surviving_young_words == NULL, "pre-condition" );
ysr@777 3194 size_t array_length = g1_policy()->young_cset_length();
ysr@777 3195 _surviving_young_words = NEW_C_HEAP_ARRAY(size_t, array_length);
ysr@777 3196 if (_surviving_young_words == NULL) {
ysr@777 3197 vm_exit_out_of_memory(sizeof(size_t) * array_length,
ysr@777 3198 "Not enough space for young surv words summary.");
ysr@777 3199 }
ysr@777 3200 memset(_surviving_young_words, 0, array_length * sizeof(size_t));
ysr@1280 3201 #ifdef ASSERT
ysr@777 3202 for (size_t i = 0; i < array_length; ++i) {
ysr@1280 3203 assert( _surviving_young_words[i] == 0, "memset above" );
ysr@1280 3204 }
ysr@1280 3205 #endif // !ASSERT
ysr@777 3206 }
ysr@777 3207
ysr@777 3208 void
ysr@777 3209 G1CollectedHeap::update_surviving_young_words(size_t* surv_young_words) {
ysr@777 3210 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
ysr@777 3211 size_t array_length = g1_policy()->young_cset_length();
ysr@777 3212 for (size_t i = 0; i < array_length; ++i)
ysr@777 3213 _surviving_young_words[i] += surv_young_words[i];
ysr@777 3214 }
ysr@777 3215
ysr@777 3216 void
ysr@777 3217 G1CollectedHeap::cleanup_surviving_young_words() {
ysr@777 3218 guarantee( _surviving_young_words != NULL, "pre-condition" );
ysr@777 3219 FREE_C_HEAP_ARRAY(size_t, _surviving_young_words);
ysr@777 3220 _surviving_young_words = NULL;
ysr@777 3221 }
ysr@777 3222
ysr@777 3223 // </NEW PREDICTION>
ysr@777 3224
tonyp@2974 3225 #ifdef ASSERT
tonyp@2974 3226 class VerifyCSetClosure: public HeapRegionClosure {
tonyp@2974 3227 public:
tonyp@2974 3228 bool doHeapRegion(HeapRegion* hr) {
tonyp@2974 3229 // Here we check that the CSet region's RSet is ready for parallel
tonyp@2974 3230 // iteration. The fields that we'll verify are only manipulated
tonyp@2974 3231 // when the region is part of a CSet and is collected. Afterwards,
tonyp@2974 3232 // we reset these fields when we clear the region's RSet (when the
tonyp@2974 3233 // region is freed) so they are ready when the region is
tonyp@2974 3234 // re-allocated. The only exception to this is if there's an
tonyp@2974 3235 // evacuation failure and instead of freeing the region we leave
tonyp@2974 3236 // it in the heap. In that case, we reset these fields during
tonyp@2974 3237 // evacuation failure handling.
tonyp@2974 3238 guarantee(hr->rem_set()->verify_ready_for_par_iteration(), "verification");
tonyp@2974 3239
tonyp@2974 3240 // Here's a good place to add any other checks we'd like to
tonyp@2974 3241 // perform on CSet regions.
iveresov@1696 3242 return false;
iveresov@1696 3243 }
iveresov@1696 3244 };
tonyp@2974 3245 #endif // ASSERT
iveresov@1696 3246
jcoomes@2064 3247 #if TASKQUEUE_STATS
jcoomes@2064 3248 void G1CollectedHeap::print_taskqueue_stats_hdr(outputStream* const st) {
jcoomes@2064 3249 st->print_raw_cr("GC Task Stats");
jcoomes@2064 3250 st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr();
jcoomes@2064 3251 st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr();
jcoomes@2064 3252 }
jcoomes@2064 3253
jcoomes@2064 3254 void G1CollectedHeap::print_taskqueue_stats(outputStream* const st) const {
jcoomes@2064 3255 print_taskqueue_stats_hdr(st);
jcoomes@2064 3256
jcoomes@2064 3257 TaskQueueStats totals;
jcoomes@2110 3258 const int n = workers() != NULL ? workers()->total_workers() : 1;
jcoomes@2064 3259 for (int i = 0; i < n; ++i) {
jcoomes@2064 3260 st->print("%3d ", i); task_queue(i)->stats.print(st); st->cr();
jcoomes@2064 3261 totals += task_queue(i)->stats;
jcoomes@2064 3262 }
jcoomes@2064 3263 st->print_raw("tot "); totals.print(st); st->cr();
jcoomes@2064 3264
jcoomes@2064 3265 DEBUG_ONLY(totals.verify());
jcoomes@2064 3266 }
jcoomes@2064 3267
jcoomes@2064 3268 void G1CollectedHeap::reset_taskqueue_stats() {
jcoomes@2110 3269 const int n = workers() != NULL ? workers()->total_workers() : 1;
jcoomes@2064 3270 for (int i = 0; i < n; ++i) {
jcoomes@2064 3271 task_queue(i)->stats.reset();
jcoomes@2064 3272 }
jcoomes@2064 3273 }
jcoomes@2064 3274 #endif // TASKQUEUE_STATS
jcoomes@2064 3275
tonyp@2315 3276 bool
tonyp@2011 3277 G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
tonyp@2472 3278 assert_at_safepoint(true /* should_be_vm_thread */);
tonyp@2472 3279 guarantee(!is_gc_active(), "collection is not reentrant");
tonyp@2472 3280
tonyp@1794 3281 if (GC_locker::check_active_before_gc()) {
tonyp@2315 3282 return false;
tonyp@1794 3283 }
tonyp@1794 3284
kamg@2445 3285 SvcGCMarker sgcm(SvcGCMarker::MINOR);
tonyp@2381 3286 ResourceMark rm;
tonyp@2381 3287
tonyp@1273 3288 if (PrintHeapAtGC) {
tonyp@1273 3289 Universe::print_heap_before_gc();
tonyp@1273 3290 }
tonyp@1273 3291
tonyp@2472 3292 verify_region_sets_optional();
tonyp@2715 3293 verify_dirty_young_regions();
tonyp@2472 3294
tonyp@1273 3295 {
tonyp@1794 3296 // This call will decide whether this pause is an initial-mark
tonyp@1794 3297 // pause. If it is, during_initial_mark_pause() will return true
tonyp@1794 3298 // for the duration of this pause.
tonyp@1794 3299 g1_policy()->decide_on_conc_mark_initiation();
tonyp@1794 3300
tonyp@1273 3301 char verbose_str[128];
tonyp@1273 3302 sprintf(verbose_str, "GC pause ");
brutisso@3065 3303 if (g1_policy()->full_young_gcs()) {
brutisso@3065 3304 strcat(verbose_str, "(young)");
brutisso@3065 3305 } else {
brutisso@3065 3306 strcat(verbose_str, "(partial)");
tonyp@1273 3307 }
tonyp@2011 3308 if (g1_policy()->during_initial_mark_pause()) {
tonyp@1273 3309 strcat(verbose_str, " (initial-mark)");
tonyp@2011 3310 // We are about to start a marking cycle, so we increment the
tonyp@2011 3311 // full collection counter.
tonyp@2011 3312 increment_total_full_collections();
tonyp@2011 3313 }
tonyp@1273 3314
tonyp@1273 3315 // if PrintGCDetails is on, we'll print long statistics information
tonyp@1273 3316 // in the collector policy code, so let's not print this as the output
tonyp@1273 3317 // is messy if we do.
tonyp@1273 3318 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
tonyp@1273 3319 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
tonyp@1273 3320 TraceTime t(verbose_str, PrintGC && !PrintGCDetails, true, gclog_or_tty);
tonyp@1273 3321
jmasa@2821 3322 TraceCollectorStats tcs(g1mm()->incremental_collection_counters());
fparain@2888 3323 TraceMemoryManagerStats tms(false /* fullGC */, gc_cause());
tonyp@1524 3324
tonyp@2643 3325 // If the secondary_free_list is not empty, append it to the
tonyp@2643 3326 // free_list. No need to wait for the cleanup operation to finish;
tonyp@2643 3327 // the region allocation code will check the secondary_free_list
tonyp@2643 3328 // and wait if necessary. If the G1StressConcRegionFreeing flag is
tonyp@2643 3329 // set, skip this step so that the region allocation code has to
tonyp@2643 3330 // get entries from the secondary_free_list.
tonyp@2472 3331 if (!G1StressConcRegionFreeing) {
tonyp@2643 3332 append_secondary_free_list_if_not_empty_with_lock();
tonyp@2472 3333 }
tonyp@1273 3334
brutisso@3065 3335 assert(check_young_list_well_formed(),
brutisso@3065 3336 "young list should be well formed");
tonyp@1273 3337
tonyp@1273 3338 { // Call to jvmpi::post_class_unload_events must occur outside of active GC
tonyp@1273 3339 IsGCActiveMark x;
tonyp@1273 3340
tonyp@1273 3341 gc_prologue(false);
tonyp@1273 3342 increment_total_collections(false /* full gc */);
tonyp@3028 3343 increment_gc_time_stamp();
ysr@777 3344
tonyp@1273 3345 if (VerifyBeforeGC && total_collections() >= VerifyGCStartAt) {
tonyp@1273 3346 HandleMark hm; // Discard invalid handles created during verification
tonyp@2715 3347 gclog_or_tty->print(" VerifyBeforeGC:");
tonyp@1273 3348 prepare_for_verify();
johnc@2969 3349 Universe::verify(/* allow dirty */ false,
johnc@2969 3350 /* silent */ false,
johnc@2969 3351 /* option */ VerifyOption_G1UsePrevMarking);
johnc@2969 3352
tonyp@1273 3353 }
tonyp@1273 3354
tonyp@1273 3355 COMPILER2_PRESENT(DerivedPointerTable::clear());
tonyp@1273 3356
johnc@2316 3357 // Please see comment in G1CollectedHeap::ref_processing_init()
johnc@2316 3358 // to see how reference processing currently works in G1.
johnc@2316 3359 //
tonyp@1273 3360 // We want to turn off ref discovery, if necessary, and turn it back on
ysr@1280 3361 // on again later if we do. XXX Dubious: why is discovery disabled?
tonyp@1273 3362 bool was_enabled = ref_processor()->discovery_enabled();
tonyp@1273 3363 if (was_enabled) ref_processor()->disable_discovery();
tonyp@1273 3364
tonyp@1273 3365 // Forget the current alloc region (we might even choose it to be part
tonyp@1273 3366 // of the collection set!).
tonyp@2715 3367 release_mutator_alloc_region();
tonyp@1273 3368
tonyp@2975 3369 // We should call this after we retire the mutator alloc
tonyp@2975 3370 // region(s) so that all the ALLOC / RETIRE events are generated
tonyp@2975 3371 // before the start GC event.
tonyp@2975 3372 _hr_printer.start_gc(false /* full */, (size_t) total_collections());
tonyp@2975 3373
tonyp@1273 3374 // The elapsed time induced by the start time below deliberately elides
tonyp@1273 3375 // the possible verification above.
tonyp@1273 3376 double start_time_sec = os::elapsedTime();
tonyp@1273 3377 size_t start_used_bytes = used();
tonyp@1273 3378
johnc@1829 3379 #if YOUNG_LIST_VERBOSE
johnc@1829 3380 gclog_or_tty->print_cr("\nBefore recording pause start.\nYoung_list:");
johnc@1829 3381 _young_list->print();
johnc@1829 3382 g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
johnc@1829 3383 #endif // YOUNG_LIST_VERBOSE
johnc@1829 3384
tonyp@1273 3385 g1_policy()->record_collection_pause_start(start_time_sec,
tonyp@1273 3386 start_used_bytes);
tonyp@1273 3387
johnc@1829 3388 #if YOUNG_LIST_VERBOSE
johnc@1829 3389 gclog_or_tty->print_cr("\nAfter recording pause start.\nYoung_list:");
tonyp@1273 3390 _young_list->print();
johnc@1829 3391 #endif // YOUNG_LIST_VERBOSE
ysr@777 3392
tonyp@1794 3393 if (g1_policy()->during_initial_mark_pause()) {
tonyp@1273 3394 concurrent_mark()->checkpointRootsInitialPre();
ysr@777 3395 }
tonyp@3028 3396 perm_gen()->save_marks();
tonyp@1273 3397
tonyp@1273 3398 // We must do this before any possible evacuation that should propagate
tonyp@1273 3399 // marks.
tonyp@1273 3400 if (mark_in_progress()) {
tonyp@1273 3401 double start_time_sec = os::elapsedTime();
tonyp@1273 3402
tonyp@1273 3403 _cm->drainAllSATBBuffers();
tonyp@1273 3404 double finish_mark_ms = (os::elapsedTime() - start_time_sec) * 1000.0;
tonyp@1273 3405 g1_policy()->record_satb_drain_time(finish_mark_ms);
tonyp@1273 3406 }
tonyp@1273 3407 // Record the number of elements currently on the mark stack, so we
tonyp@1273 3408 // only iterate over these. (Since evacuation may add to the mark
tonyp@1273 3409 // stack, doing more exposes race conditions.) If no mark is in
tonyp@1273 3410 // progress, this will be zero.
tonyp@1273 3411 _cm->set_oops_do_bound();
tonyp@1273 3412
johnc@2910 3413 if (mark_in_progress()) {
tonyp@1273 3414 concurrent_mark()->newCSet();
johnc@2910 3415 }
tonyp@1273 3416
johnc@1829 3417 #if YOUNG_LIST_VERBOSE
johnc@1829 3418 gclog_or_tty->print_cr("\nBefore choosing collection set.\nYoung_list:");
johnc@1829 3419 _young_list->print();
johnc@1829 3420 g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
johnc@1829 3421 #endif // YOUNG_LIST_VERBOSE
johnc@1829 3422
tonyp@2062 3423 g1_policy()->choose_collection_set(target_pause_time_ms);
tonyp@1273 3424
tonyp@2975 3425 if (_hr_printer.is_active()) {
tonyp@2975 3426 HeapRegion* hr = g1_policy()->collection_set();
tonyp@2975 3427 while (hr != NULL) {
tonyp@2975 3428 G1HRPrinter::RegionType type;
tonyp@2975 3429 if (!hr->is_young()) {
tonyp@2975 3430 type = G1HRPrinter::Old;
tonyp@2975 3431 } else if (hr->is_survivor()) {
tonyp@2975 3432 type = G1HRPrinter::Survivor;
tonyp@2975 3433 } else {
tonyp@2975 3434 type = G1HRPrinter::Eden;
tonyp@2975 3435 }
tonyp@2975 3436 _hr_printer.cset(hr);
tonyp@2975 3437 hr = hr->next_in_collection_set();
tonyp@2975 3438 }
tonyp@2975 3439 }
tonyp@2975 3440
johnc@2910 3441 // We have chosen the complete collection set. If marking is
johnc@2910 3442 // active then, we clear the region fields of any of the
johnc@2910 3443 // concurrent marking tasks whose region fields point into
johnc@2910 3444 // the collection set as these values will become stale. This
johnc@2910 3445 // will cause the owning marking threads to claim a new region
johnc@2910 3446 // when marking restarts.
johnc@2910 3447 if (mark_in_progress()) {
johnc@2910 3448 concurrent_mark()->reset_active_task_region_fields_in_cset();
johnc@2910 3449 }
johnc@2910 3450
tonyp@2974 3451 #ifdef ASSERT
tonyp@2974 3452 VerifyCSetClosure cl;
tonyp@2974 3453 collection_set_iterate(&cl);
tonyp@2974 3454 #endif // ASSERT
tonyp@2062 3455
tonyp@2062 3456 setup_surviving_young_words();
tonyp@2062 3457
tonyp@3028 3458 // Initialize the GC alloc regions.
tonyp@3028 3459 init_gc_alloc_regions();
tonyp@2062 3460
tonyp@2062 3461 // Actually do the work...
tonyp@2062 3462 evacuate_collection_set();
tonyp@2062 3463
tonyp@2062 3464 free_collection_set(g1_policy()->collection_set());
tonyp@2062 3465 g1_policy()->clear_collection_set();
tonyp@2062 3466
tonyp@2062 3467 cleanup_surviving_young_words();
tonyp@2062 3468
tonyp@2062 3469 // Start a new incremental collection set for the next pause.
tonyp@2062 3470 g1_policy()->start_incremental_cset_building();
tonyp@2062 3471
tonyp@2062 3472 // Clear the _cset_fast_test bitmap in anticipation of adding
tonyp@2062 3473 // regions to the incremental collection set for the next
tonyp@2062 3474 // evacuation pause.
tonyp@2062 3475 clear_cset_fast_test();
tonyp@2062 3476
brutisso@3065 3477 _young_list->reset_sampled_info();
brutisso@3065 3478
brutisso@3065 3479 // Don't check the whole heap at this point as the
brutisso@3065 3480 // GC alloc regions from this pause have been tagged
brutisso@3065 3481 // as survivors and moved on to the survivor list.
brutisso@3065 3482 // Survivor regions will fail the !is_young() check.
brutisso@3065 3483 assert(check_young_list_empty(false /* check_heap */),
brutisso@3065 3484 "young list should be empty");
johnc@1829 3485
johnc@1829 3486 #if YOUNG_LIST_VERBOSE
brutisso@3065 3487 gclog_or_tty->print_cr("Before recording survivors.\nYoung List:");
brutisso@3065 3488 _young_list->print();
johnc@1829 3489 #endif // YOUNG_LIST_VERBOSE
tonyp@1273 3490
brutisso@3065 3491 g1_policy()->record_survivor_regions(_young_list->survivor_length(),
brutisso@3065 3492 _young_list->first_survivor_region(),
brutisso@3065 3493 _young_list->last_survivor_region());
brutisso@3065 3494
brutisso@3065 3495 _young_list->reset_auxilary_lists();
tonyp@1273 3496
tonyp@1273 3497 if (evacuation_failed()) {
tonyp@1273 3498 _summary_bytes_used = recalculate_used();
tonyp@1273 3499 } else {
tonyp@1273 3500 // The "used" of the the collection set have already been subtracted
tonyp@1273 3501 // when they were freed. Add in the bytes evacuated.
tonyp@3028 3502 _summary_bytes_used += g1_policy()->bytes_copied_during_gc();
tonyp@1273 3503 }
tonyp@1273 3504
brutisso@3065 3505 if (g1_policy()->during_initial_mark_pause()) {
tonyp@1273 3506 concurrent_mark()->checkpointRootsInitialPost();
tonyp@1273 3507 set_marking_started();
ysr@1280 3508 // CAUTION: after the doConcurrentMark() call below,
ysr@1280 3509 // the concurrent marking thread(s) could be running
ysr@1280 3510 // concurrently with us. Make sure that anything after
ysr@1280 3511 // this point does not assume that we are the only GC thread
ysr@1280 3512 // running. Note: of course, the actual marking work will
ysr@1280 3513 // not start until the safepoint itself is released in
ysr@1280 3514 // ConcurrentGCThread::safepoint_desynchronize().
tonyp@1273 3515 doConcurrentMark();
tonyp@1273 3516 }
tonyp@1273 3517
tonyp@2817 3518 allocate_dummy_regions();
tonyp@2817 3519
johnc@1829 3520 #if YOUNG_LIST_VERBOSE
johnc@1829 3521 gclog_or_tty->print_cr("\nEnd of the pause.\nYoung_list:");
tonyp@1273 3522 _young_list->print();
johnc@1829 3523 g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
johnc@1829 3524 #endif // YOUNG_LIST_VERBOSE
tonyp@1273 3525
tonyp@2715 3526 init_mutator_alloc_region();
tonyp@2715 3527
brutisso@3120 3528 {
brutisso@3120 3529 size_t expand_bytes = g1_policy()->expansion_amount();
brutisso@3120 3530 if (expand_bytes > 0) {
brutisso@3120 3531 size_t bytes_before = capacity();
brutisso@3120 3532 if (!expand(expand_bytes)) {
brutisso@3120 3533 // We failed to expand the heap so let's verify that
brutisso@3120 3534 // committed/uncommitted amount match the backing store
brutisso@3120 3535 assert(capacity() == _g1_storage.committed_size(), "committed size mismatch");
brutisso@3120 3536 assert(max_capacity() == _g1_storage.reserved_size(), "reserved size mismatch");
brutisso@3120 3537 }
brutisso@3120 3538 }
brutisso@3120 3539 }
brutisso@3120 3540
tonyp@1273 3541 double end_time_sec = os::elapsedTime();
tonyp@1273 3542 double pause_time_ms = (end_time_sec - start_time_sec) * MILLIUNITS;
tonyp@1273 3543 g1_policy()->record_pause_time_ms(pause_time_ms);
tonyp@2062 3544 g1_policy()->record_collection_pause_end();
tonyp@1273 3545
tonyp@1524 3546 MemoryService::track_memory_usage();
tonyp@1524 3547
tonyp@3028 3548 // In prepare_for_verify() below we'll need to scan the deferred
tonyp@3028 3549 // update buffers to bring the RSets up-to-date if
tonyp@3028 3550 // G1HRRSFlushLogBuffersOnVerify has been set. While scanning
tonyp@3028 3551 // the update buffers we'll probably need to scan cards on the
tonyp@3028 3552 // regions we just allocated to (i.e., the GC alloc
tonyp@3028 3553 // regions). However, during the last GC we called
tonyp@3028 3554 // set_saved_mark() on all the GC alloc regions, so card
tonyp@3028 3555 // scanning might skip the [saved_mark_word()...top()] area of
tonyp@3028 3556 // those regions (i.e., the area we allocated objects into
tonyp@3028 3557 // during the last GC). But it shouldn't. Given that
tonyp@3028 3558 // saved_mark_word() is conditional on whether the GC time stamp
tonyp@3028 3559 // on the region is current or not, by incrementing the GC time
tonyp@3028 3560 // stamp here we invalidate all the GC time stamps on all the
tonyp@3028 3561 // regions and saved_mark_word() will simply return top() for
tonyp@3028 3562 // all the regions. This is a nicer way of ensuring this rather
tonyp@3028 3563 // than iterating over the regions and fixing them. In fact, the
tonyp@3028 3564 // GC time stamp increment here also ensures that
tonyp@3028 3565 // saved_mark_word() will return top() between pauses, i.e.,
tonyp@3028 3566 // during concurrent refinement. So we don't need the
tonyp@3028 3567 // is_gc_active() check to decided which top to use when
tonyp@3028 3568 // scanning cards (see CR 7039627).
tonyp@3028 3569 increment_gc_time_stamp();
tonyp@3028 3570
tonyp@1273 3571 if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) {
tonyp@1273 3572 HandleMark hm; // Discard invalid handles created during verification
tonyp@1273 3573 gclog_or_tty->print(" VerifyAfterGC:");
tonyp@1273 3574 prepare_for_verify();
johnc@2969 3575 Universe::verify(/* allow dirty */ true,
johnc@2969 3576 /* silent */ false,
johnc@2969 3577 /* option */ VerifyOption_G1UsePrevMarking);
tonyp@1273 3578 }
tonyp@1273 3579
tonyp@1273 3580 if (was_enabled) ref_processor()->enable_discovery();
tonyp@1273 3581
tonyp@1273 3582 {
tonyp@1273 3583 size_t expand_bytes = g1_policy()->expansion_amount();
tonyp@1273 3584 if (expand_bytes > 0) {
tonyp@1273 3585 size_t bytes_before = capacity();
tonyp@3114 3586 // No need for an ergo verbose message here,
tonyp@3114 3587 // expansion_amount() does this when it returns a value > 0.
johnc@2504 3588 if (!expand(expand_bytes)) {
johnc@2504 3589 // We failed to expand the heap so let's verify that
johnc@2504 3590 // committed/uncommitted amount match the backing store
johnc@2504 3591 assert(capacity() == _g1_storage.committed_size(), "committed size mismatch");
johnc@2504 3592 assert(max_capacity() == _g1_storage.reserved_size(), "reserved size mismatch");
johnc@2504 3593 }
tonyp@1273 3594 }
tonyp@1273 3595 }
tonyp@2975 3596
tonyp@2975 3597 // We should do this after we potentially expand the heap so
tonyp@2975 3598 // that all the COMMIT events are generated before the end GC
tonyp@2975 3599 // event, and after we retire the GC alloc regions so that all
tonyp@2975 3600 // RETIRE events are generated before the end GC event.
tonyp@2975 3601 _hr_printer.end_gc(false /* full */, (size_t) total_collections());
tonyp@2975 3602
tonyp@2961 3603 // We have to do this after we decide whether to expand the heap or not.
tonyp@2961 3604 g1_policy()->print_heap_transition();
tonyp@1273 3605
tonyp@1273 3606 if (mark_in_progress()) {
tonyp@1273 3607 concurrent_mark()->update_g1_committed();
tonyp@1273 3608 }
tonyp@1273 3609
tonyp@1273 3610 #ifdef TRACESPINNING
tonyp@1273 3611 ParallelTaskTerminator::print_termination_counts();
tonyp@1273 3612 #endif
tonyp@1273 3613
tonyp@1273 3614 gc_epilogue(false);
ysr@777 3615 }
ysr@777 3616
tonyp@1273 3617 if (ExitAfterGCNum > 0 && total_collections() == ExitAfterGCNum) {
tonyp@1273 3618 gclog_or_tty->print_cr("Stopping after GC #%d", ExitAfterGCNum);
tonyp@1273 3619 print_tracing_info();
tonyp@1273 3620 vm_exit(-1);
ysr@777 3621 }
tonyp@1273 3622 }
tonyp@1273 3623
tonyp@2963 3624 _hrs.verify_optional();
tonyp@2472 3625 verify_region_sets_optional();
tonyp@2472 3626
jcoomes@2064 3627 TASKQUEUE_STATS_ONLY(if (ParallelGCVerbose) print_taskqueue_stats());
jcoomes@2064 3628 TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
jcoomes@2064 3629
tonyp@1273 3630 if (PrintHeapAtGC) {
tonyp@1273 3631 Universe::print_heap_after_gc();
ysr@777 3632 }
jmasa@2821 3633 g1mm()->update_counters();
jmasa@2821 3634
tonyp@1319 3635 if (G1SummarizeRSetStats &&
tonyp@1319 3636 (G1SummarizeRSetStatsPeriod > 0) &&
tonyp@1319 3637 (total_collections() % G1SummarizeRSetStatsPeriod == 0)) {
tonyp@1319 3638 g1_rem_set()->print_summary_info();
tonyp@1319 3639 }
tonyp@2315 3640
tonyp@2315 3641 return true;
ysr@777 3642 }
ysr@777 3643
apetrusenko@1826 3644 size_t G1CollectedHeap::desired_plab_sz(GCAllocPurpose purpose)
apetrusenko@1826 3645 {
apetrusenko@1826 3646 size_t gclab_word_size;
apetrusenko@1826 3647 switch (purpose) {
apetrusenko@1826 3648 case GCAllocForSurvived:
apetrusenko@1826 3649 gclab_word_size = YoungPLABSize;
apetrusenko@1826 3650 break;
apetrusenko@1826 3651 case GCAllocForTenured:
apetrusenko@1826 3652 gclab_word_size = OldPLABSize;
apetrusenko@1826 3653 break;
apetrusenko@1826 3654 default:
apetrusenko@1826 3655 assert(false, "unknown GCAllocPurpose");
apetrusenko@1826 3656 gclab_word_size = OldPLABSize;
apetrusenko@1826 3657 break;
apetrusenko@1826 3658 }
apetrusenko@1826 3659 return gclab_word_size;
apetrusenko@1826 3660 }
apetrusenko@1826 3661
tonyp@2715 3662 void G1CollectedHeap::init_mutator_alloc_region() {
tonyp@2715 3663 assert(_mutator_alloc_region.get() == NULL, "pre-condition");
tonyp@2715 3664 _mutator_alloc_region.init();
tonyp@2715 3665 }
tonyp@2715 3666
tonyp@2715 3667 void G1CollectedHeap::release_mutator_alloc_region() {
tonyp@2715 3668 _mutator_alloc_region.release();
tonyp@2715 3669 assert(_mutator_alloc_region.get() == NULL, "post-condition");
tonyp@2715 3670 }
apetrusenko@1826 3671
tonyp@3028 3672 void G1CollectedHeap::init_gc_alloc_regions() {
tonyp@3028 3673 assert_at_safepoint(true /* should_be_vm_thread */);
tonyp@3028 3674
tonyp@3028 3675 _survivor_gc_alloc_region.init();
tonyp@3028 3676 _old_gc_alloc_region.init();
tonyp@3028 3677 HeapRegion* retained_region = _retained_old_gc_alloc_region;
tonyp@3028 3678 _retained_old_gc_alloc_region = NULL;
tonyp@3028 3679
tonyp@3028 3680 // We will discard the current GC alloc region if:
tonyp@3028 3681 // a) it's in the collection set (it can happen!),
tonyp@3028 3682 // b) it's already full (no point in using it),
tonyp@3028 3683 // c) it's empty (this means that it was emptied during
tonyp@3028 3684 // a cleanup and it should be on the free list now), or
tonyp@3028 3685 // d) it's humongous (this means that it was emptied
tonyp@3028 3686 // during a cleanup and was added to the free list, but
tonyp@3028 3687 // has been subseqently used to allocate a humongous
tonyp@3028 3688 // object that may be less than the region size).
tonyp@3028 3689 if (retained_region != NULL &&
tonyp@3028 3690 !retained_region->in_collection_set() &&
tonyp@3028 3691 !(retained_region->top() == retained_region->end()) &&
tonyp@3028 3692 !retained_region->is_empty() &&
tonyp@3028 3693 !retained_region->isHumongous()) {
tonyp@3028 3694 retained_region->set_saved_mark();
tonyp@3028 3695 _old_gc_alloc_region.set(retained_region);
tonyp@3028 3696 _hr_printer.reuse(retained_region);
ysr@777 3697 }
ysr@777 3698 }
ysr@777 3699
tonyp@3028 3700 void G1CollectedHeap::release_gc_alloc_regions() {
tonyp@3028 3701 _survivor_gc_alloc_region.release();
tonyp@3028 3702 // If we have an old GC alloc region to release, we'll save it in
tonyp@3028 3703 // _retained_old_gc_alloc_region. If we don't
tonyp@3028 3704 // _retained_old_gc_alloc_region will become NULL. This is what we
tonyp@3028 3705 // want either way so no reason to check explicitly for either
tonyp@3028 3706 // condition.
tonyp@3028 3707 _retained_old_gc_alloc_region = _old_gc_alloc_region.release();
ysr@777 3708 }
ysr@777 3709
tonyp@3028 3710 void G1CollectedHeap::abandon_gc_alloc_regions() {
tonyp@3028 3711 assert(_survivor_gc_alloc_region.get() == NULL, "pre-condition");
tonyp@3028 3712 assert(_old_gc_alloc_region.get() == NULL, "pre-condition");
tonyp@3028 3713 _retained_old_gc_alloc_region = NULL;
ysr@777 3714 }
ysr@777 3715
ysr@777 3716 void G1CollectedHeap::init_for_evac_failure(OopsInHeapRegionClosure* cl) {
ysr@777 3717 _drain_in_progress = false;
ysr@777 3718 set_evac_failure_closure(cl);
ysr@777 3719 _evac_failure_scan_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(40, true);
ysr@777 3720 }
ysr@777 3721
ysr@777 3722 void G1CollectedHeap::finalize_for_evac_failure() {
ysr@777 3723 assert(_evac_failure_scan_stack != NULL &&
ysr@777 3724 _evac_failure_scan_stack->length() == 0,
ysr@777 3725 "Postcondition");
ysr@777 3726 assert(!_drain_in_progress, "Postcondition");
apetrusenko@1480 3727 delete _evac_failure_scan_stack;
ysr@777 3728 _evac_failure_scan_stack = NULL;
ysr@777 3729 }
ysr@777 3730
ysr@777 3731 // *** Sequential G1 Evacuation
ysr@777 3732
ysr@777 3733 class G1IsAliveClosure: public BoolObjectClosure {
ysr@777 3734 G1CollectedHeap* _g1;
ysr@777 3735 public:
ysr@777 3736 G1IsAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
ysr@777 3737 void do_object(oop p) { assert(false, "Do not call."); }
ysr@777 3738 bool do_object_b(oop p) {
ysr@777 3739 // It is reachable if it is outside the collection set, or is inside
ysr@777 3740 // and forwarded.
ysr@777 3741 return !_g1->obj_in_cs(p) || p->is_forwarded();
ysr@777 3742 }
ysr@777 3743 };
ysr@777 3744
ysr@777 3745 class G1KeepAliveClosure: public OopClosure {
ysr@777 3746 G1CollectedHeap* _g1;
ysr@777 3747 public:
ysr@777 3748 G1KeepAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
ysr@1280 3749 void do_oop(narrowOop* p) { guarantee(false, "Not needed"); }
ysr@1280 3750 void do_oop( oop* p) {
ysr@777 3751 oop obj = *p;
ysr@777 3752 if (_g1->obj_in_cs(obj)) {
ysr@777 3753 assert( obj->is_forwarded(), "invariant" );
ysr@777 3754 *p = obj->forwardee();
ysr@777 3755 }
ysr@777 3756 }
ysr@777 3757 };
ysr@777 3758
iveresov@1051 3759 class UpdateRSetDeferred : public OopsInHeapRegionClosure {
iveresov@1051 3760 private:
iveresov@1051 3761 G1CollectedHeap* _g1;
iveresov@1051 3762 DirtyCardQueue *_dcq;
iveresov@1051 3763 CardTableModRefBS* _ct_bs;
iveresov@1051 3764
iveresov@1051 3765 public:
iveresov@1051 3766 UpdateRSetDeferred(G1CollectedHeap* g1, DirtyCardQueue* dcq) :
iveresov@1051 3767 _g1(g1), _ct_bs((CardTableModRefBS*)_g1->barrier_set()), _dcq(dcq) {}
iveresov@1051 3768
ysr@1280 3769 virtual void do_oop(narrowOop* p) { do_oop_work(p); }
ysr@1280 3770 virtual void do_oop( oop* p) { do_oop_work(p); }
ysr@1280 3771 template <class T> void do_oop_work(T* p) {
iveresov@1051 3772 assert(_from->is_in_reserved(p), "paranoia");
ysr@1280 3773 if (!_from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) &&
ysr@1280 3774 !_from->is_survivor()) {
iveresov@1051 3775 size_t card_index = _ct_bs->index_for(p);
iveresov@1051 3776 if (_ct_bs->mark_card_deferred(card_index)) {
iveresov@1051 3777 _dcq->enqueue((jbyte*)_ct_bs->byte_for_index(card_index));
iveresov@1051 3778 }
iveresov@1051 3779 }
iveresov@1051 3780 }
iveresov@1051 3781 };
iveresov@1051 3782
ysr@777 3783 class RemoveSelfPointerClosure: public ObjectClosure {
ysr@777 3784 private:
ysr@777 3785 G1CollectedHeap* _g1;
ysr@777 3786 ConcurrentMark* _cm;
ysr@777 3787 HeapRegion* _hr;
ysr@777 3788 size_t _prev_marked_bytes;
ysr@777 3789 size_t _next_marked_bytes;
iveresov@1051 3790 OopsInHeapRegionClosure *_cl;
ysr@777 3791 public:
tonyp@2453 3792 RemoveSelfPointerClosure(G1CollectedHeap* g1, HeapRegion* hr,
tonyp@2453 3793 OopsInHeapRegionClosure* cl) :
tonyp@2453 3794 _g1(g1), _hr(hr), _cm(_g1->concurrent_mark()), _prev_marked_bytes(0),
iveresov@1051 3795 _next_marked_bytes(0), _cl(cl) {}
ysr@777 3796
ysr@777 3797 size_t prev_marked_bytes() { return _prev_marked_bytes; }
ysr@777 3798 size_t next_marked_bytes() { return _next_marked_bytes; }
ysr@777 3799
tonyp@2453 3800 // <original comment>
iveresov@787 3801 // The original idea here was to coalesce evacuated and dead objects.
iveresov@787 3802 // However that caused complications with the block offset table (BOT).
iveresov@787 3803 // In particular if there were two TLABs, one of them partially refined.
iveresov@787 3804 // |----- TLAB_1--------|----TLAB_2-~~~(partially refined part)~~~|
iveresov@787 3805 // The BOT entries of the unrefined part of TLAB_2 point to the start
iveresov@787 3806 // of TLAB_2. If the last object of the TLAB_1 and the first object
iveresov@787 3807 // of TLAB_2 are coalesced, then the cards of the unrefined part
iveresov@787 3808 // would point into middle of the filler object.
tonyp@2453 3809 // The current approach is to not coalesce and leave the BOT contents intact.
tonyp@2453 3810 // </original comment>
iveresov@787 3811 //
tonyp@2453 3812 // We now reset the BOT when we start the object iteration over the
tonyp@2453 3813 // region and refine its entries for every object we come across. So
tonyp@2453 3814 // the above comment is not really relevant and we should be able
tonyp@2453 3815 // to coalesce dead objects if we want to.
iveresov@787 3816 void do_object(oop obj) {
tonyp@2453 3817 HeapWord* obj_addr = (HeapWord*) obj;
tonyp@2453 3818 assert(_hr->is_in(obj_addr), "sanity");
tonyp@2453 3819 size_t obj_size = obj->size();
tonyp@2453 3820 _hr->update_bot_for_object(obj_addr, obj_size);
iveresov@787 3821 if (obj->is_forwarded() && obj->forwardee() == obj) {
iveresov@787 3822 // The object failed to move.
iveresov@787 3823 assert(!_g1->is_obj_dead(obj), "We should not be preserving dead objs.");
iveresov@787 3824 _cm->markPrev(obj);
iveresov@787 3825 assert(_cm->isPrevMarked(obj), "Should be marked!");
tonyp@2453 3826 _prev_marked_bytes += (obj_size * HeapWordSize);
iveresov@787 3827 if (_g1->mark_in_progress() && !_g1->is_obj_ill(obj)) {
iveresov@787 3828 _cm->markAndGrayObjectIfNecessary(obj);
iveresov@787 3829 }
iveresov@787 3830 obj->set_mark(markOopDesc::prototype());
iveresov@787 3831 // While we were processing RSet buffers during the
iveresov@787 3832 // collection, we actually didn't scan any cards on the
iveresov@787 3833 // collection set, since we didn't want to update remebered
iveresov@787 3834 // sets with entries that point into the collection set, given
iveresov@787 3835 // that live objects fromthe collection set are about to move
iveresov@787 3836 // and such entries will be stale very soon. This change also
iveresov@787 3837 // dealt with a reliability issue which involved scanning a
iveresov@787 3838 // card in the collection set and coming across an array that
iveresov@787 3839 // was being chunked and looking malformed. The problem is
iveresov@787 3840 // that, if evacuation fails, we might have remembered set
iveresov@787 3841 // entries missing given that we skipped cards on the
iveresov@787 3842 // collection set. So, we'll recreate such entries now.
iveresov@1051 3843 obj->oop_iterate(_cl);
iveresov@787 3844 assert(_cm->isPrevMarked(obj), "Should be marked!");
iveresov@787 3845 } else {
iveresov@787 3846 // The object has been either evacuated or is dead. Fill it with a
iveresov@787 3847 // dummy object.
tonyp@2453 3848 MemRegion mr((HeapWord*)obj, obj_size);
jcoomes@916 3849 CollectedHeap::fill_with_object(mr);
ysr@777 3850 _cm->clearRangeBothMaps(mr);
ysr@777 3851 }
ysr@777 3852 }
ysr@777 3853 };
ysr@777 3854
ysr@777 3855 void G1CollectedHeap::remove_self_forwarding_pointers() {
johnc@2060 3856 UpdateRSetImmediate immediate_update(_g1h->g1_rem_set());
iveresov@1051 3857 DirtyCardQueue dcq(&_g1h->dirty_card_queue_set());
iveresov@1051 3858 UpdateRSetDeferred deferred_update(_g1h, &dcq);
iveresov@1051 3859 OopsInHeapRegionClosure *cl;
iveresov@1051 3860 if (G1DeferredRSUpdate) {
iveresov@1051 3861 cl = &deferred_update;
iveresov@1051 3862 } else {
iveresov@1051 3863 cl = &immediate_update;
iveresov@1051 3864 }
ysr@777 3865 HeapRegion* cur = g1_policy()->collection_set();
ysr@777 3866 while (cur != NULL) {
ysr@777 3867 assert(g1_policy()->assertMarkedBytesDataOK(), "Should be!");
tonyp@2453 3868 assert(!cur->isHumongous(), "sanity");
tonyp@2453 3869
ysr@777 3870 if (cur->evacuation_failed()) {
ysr@777 3871 assert(cur->in_collection_set(), "bad CS");
tonyp@2453 3872 RemoveSelfPointerClosure rspc(_g1h, cur, cl);
tonyp@2453 3873
tonyp@2974 3874 // In the common case we make sure that this is done when the
tonyp@2974 3875 // region is freed so that it is "ready-to-go" when it's
tonyp@2974 3876 // re-allocated. However, when evacuation failure happens, a
tonyp@2974 3877 // region will remain in the heap and might ultimately be added
tonyp@2974 3878 // to a CSet in the future. So we have to be careful here and
tonyp@2974 3879 // make sure the region's RSet is ready for parallel iteration
tonyp@2974 3880 // whenever this might be required in the future.
tonyp@2974 3881 cur->rem_set()->reset_for_par_iteration();
tonyp@2453 3882 cur->reset_bot();
iveresov@1051 3883 cl->set_region(cur);
ysr@777 3884 cur->object_iterate(&rspc);
ysr@777 3885
ysr@777 3886 // A number of manipulations to make the TAMS be the current top,
ysr@777 3887 // and the marked bytes be the ones observed in the iteration.
ysr@777 3888 if (_g1h->concurrent_mark()->at_least_one_mark_complete()) {
ysr@777 3889 // The comments below are the postconditions achieved by the
ysr@777 3890 // calls. Note especially the last such condition, which says that
ysr@777 3891 // the count of marked bytes has been properly restored.
ysr@777 3892 cur->note_start_of_marking(false);
ysr@777 3893 // _next_top_at_mark_start == top, _next_marked_bytes == 0
ysr@777 3894 cur->add_to_marked_bytes(rspc.prev_marked_bytes());
ysr@777 3895 // _next_marked_bytes == prev_marked_bytes.
ysr@777 3896 cur->note_end_of_marking();
ysr@777 3897 // _prev_top_at_mark_start == top(),
ysr@777 3898 // _prev_marked_bytes == prev_marked_bytes
ysr@777 3899 }
ysr@777 3900 // If there is no mark in progress, we modified the _next variables
ysr@777 3901 // above needlessly, but harmlessly.
ysr@777 3902 if (_g1h->mark_in_progress()) {
ysr@777 3903 cur->note_start_of_marking(false);
ysr@777 3904 // _next_top_at_mark_start == top, _next_marked_bytes == 0
ysr@777 3905 // _next_marked_bytes == next_marked_bytes.
ysr@777 3906 }
ysr@777 3907
ysr@777 3908 // Now make sure the region has the right index in the sorted array.
ysr@777 3909 g1_policy()->note_change_in_marked_bytes(cur);
ysr@777 3910 }
ysr@777 3911 cur = cur->next_in_collection_set();
ysr@777 3912 }
ysr@777 3913 assert(g1_policy()->assertMarkedBytesDataOK(), "Should be!");
ysr@777 3914
ysr@777 3915 // Now restore saved marks, if any.
ysr@777 3916 if (_objs_with_preserved_marks != NULL) {
ysr@777 3917 assert(_preserved_marks_of_objs != NULL, "Both or none.");
ysr@777 3918 guarantee(_objs_with_preserved_marks->length() ==
ysr@777 3919 _preserved_marks_of_objs->length(), "Both or none.");
ysr@777 3920 for (int i = 0; i < _objs_with_preserved_marks->length(); i++) {
ysr@777 3921 oop obj = _objs_with_preserved_marks->at(i);
ysr@777 3922 markOop m = _preserved_marks_of_objs->at(i);
ysr@777 3923 obj->set_mark(m);
ysr@777 3924 }
ysr@777 3925 // Delete the preserved marks growable arrays (allocated on the C heap).
ysr@777 3926 delete _objs_with_preserved_marks;
ysr@777 3927 delete _preserved_marks_of_objs;
ysr@777 3928 _objs_with_preserved_marks = NULL;
ysr@777 3929 _preserved_marks_of_objs = NULL;
ysr@777 3930 }
ysr@777 3931 }
ysr@777 3932
ysr@777 3933 void G1CollectedHeap::push_on_evac_failure_scan_stack(oop obj) {
ysr@777 3934 _evac_failure_scan_stack->push(obj);
ysr@777 3935 }
ysr@777 3936
ysr@777 3937 void G1CollectedHeap::drain_evac_failure_scan_stack() {
ysr@777 3938 assert(_evac_failure_scan_stack != NULL, "precondition");
ysr@777 3939
ysr@777 3940 while (_evac_failure_scan_stack->length() > 0) {
ysr@777 3941 oop obj = _evac_failure_scan_stack->pop();
ysr@777 3942 _evac_failure_closure->set_region(heap_region_containing(obj));
ysr@777 3943 obj->oop_iterate_backwards(_evac_failure_closure);
ysr@777 3944 }
ysr@777 3945 }
ysr@777 3946
ysr@777 3947 oop
ysr@777 3948 G1CollectedHeap::handle_evacuation_failure_par(OopsInHeapRegionClosure* cl,
ysr@777 3949 oop old) {
tonyp@2855 3950 assert(obj_in_cs(old),
tonyp@2855 3951 err_msg("obj: "PTR_FORMAT" should still be in the CSet",
tonyp@2855 3952 (HeapWord*) old));
ysr@777 3953 markOop m = old->mark();
ysr@777 3954 oop forward_ptr = old->forward_to_atomic(old);
ysr@777 3955 if (forward_ptr == NULL) {
ysr@777 3956 // Forward-to-self succeeded.
ysr@777 3957 if (_evac_failure_closure != cl) {
ysr@777 3958 MutexLockerEx x(EvacFailureStack_lock, Mutex::_no_safepoint_check_flag);
ysr@777 3959 assert(!_drain_in_progress,
ysr@777 3960 "Should only be true while someone holds the lock.");
ysr@777 3961 // Set the global evac-failure closure to the current thread's.
ysr@777 3962 assert(_evac_failure_closure == NULL, "Or locking has failed.");
ysr@777 3963 set_evac_failure_closure(cl);
ysr@777 3964 // Now do the common part.
ysr@777 3965 handle_evacuation_failure_common(old, m);
ysr@777 3966 // Reset to NULL.
ysr@777 3967 set_evac_failure_closure(NULL);
ysr@777 3968 } else {
ysr@777 3969 // The lock is already held, and this is recursive.
ysr@777 3970 assert(_drain_in_progress, "This should only be the recursive case.");
ysr@777 3971 handle_evacuation_failure_common(old, m);
ysr@777 3972 }
ysr@777 3973 return old;
ysr@777 3974 } else {
tonyp@2855 3975 // Forward-to-self failed. Either someone else managed to allocate
tonyp@2855 3976 // space for this object (old != forward_ptr) or they beat us in
tonyp@2855 3977 // self-forwarding it (old == forward_ptr).
tonyp@2855 3978 assert(old == forward_ptr || !obj_in_cs(forward_ptr),
tonyp@2855 3979 err_msg("obj: "PTR_FORMAT" forwarded to: "PTR_FORMAT" "
tonyp@2855 3980 "should not be in the CSet",
tonyp@2855 3981 (HeapWord*) old, (HeapWord*) forward_ptr));
ysr@777 3982 return forward_ptr;
ysr@777 3983 }
ysr@777 3984 }
ysr@777 3985
ysr@777 3986 void G1CollectedHeap::handle_evacuation_failure_common(oop old, markOop m) {
ysr@777 3987 set_evacuation_failed(true);
ysr@777 3988
ysr@777 3989 preserve_mark_if_necessary(old, m);
ysr@777 3990
ysr@777 3991 HeapRegion* r = heap_region_containing(old);
ysr@777 3992 if (!r->evacuation_failed()) {
ysr@777 3993 r->set_evacuation_failed(true);
tonyp@2975 3994 _hr_printer.evac_failure(r);
ysr@777 3995 }
ysr@777 3996
ysr@777 3997 push_on_evac_failure_scan_stack(old);
ysr@777 3998
ysr@777 3999 if (!_drain_in_progress) {
ysr@777 4000 // prevent recursion in copy_to_survivor_space()
ysr@777 4001 _drain_in_progress = true;
ysr@777 4002 drain_evac_failure_scan_stack();
ysr@777 4003 _drain_in_progress = false;
ysr@777 4004 }
ysr@777 4005 }
ysr@777 4006
ysr@777 4007 void G1CollectedHeap::preserve_mark_if_necessary(oop obj, markOop m) {
ysr@2380 4008 assert(evacuation_failed(), "Oversaving!");
ysr@2380 4009 // We want to call the "for_promotion_failure" version only in the
ysr@2380 4010 // case of a promotion failure.
ysr@2380 4011 if (m->must_be_preserved_for_promotion_failure(obj)) {
ysr@777 4012 if (_objs_with_preserved_marks == NULL) {
ysr@777 4013 assert(_preserved_marks_of_objs == NULL, "Both or none.");
ysr@777 4014 _objs_with_preserved_marks =
ysr@777 4015 new (ResourceObj::C_HEAP) GrowableArray<oop>(40, true);
ysr@777 4016 _preserved_marks_of_objs =
ysr@777 4017 new (ResourceObj::C_HEAP) GrowableArray<markOop>(40, true);
ysr@777 4018 }
ysr@777 4019 _objs_with_preserved_marks->push(obj);
ysr@777 4020 _preserved_marks_of_objs->push(m);
ysr@777 4021 }
ysr@777 4022 }
ysr@777 4023
ysr@777 4024 HeapWord* G1CollectedHeap::par_allocate_during_gc(GCAllocPurpose purpose,
ysr@777 4025 size_t word_size) {
tonyp@3028 4026 if (purpose == GCAllocForSurvived) {
tonyp@3028 4027 HeapWord* result = survivor_attempt_allocation(word_size);
tonyp@3028 4028 if (result != NULL) {
tonyp@3028 4029 return result;
ysr@777 4030 } else {
tonyp@3028 4031 // Let's try to allocate in the old gen in case we can fit the
tonyp@3028 4032 // object there.
tonyp@3028 4033 return old_attempt_allocation(word_size);
ysr@777 4034 }
tonyp@3028 4035 } else {
tonyp@3028 4036 assert(purpose == GCAllocForTenured, "sanity");
tonyp@3028 4037 HeapWord* result = old_attempt_allocation(word_size);
tonyp@3028 4038 if (result != NULL) {
tonyp@3028 4039 return result;
ysr@777 4040 } else {
tonyp@3028 4041 // Let's try to allocate in the survivors in case we can fit the
tonyp@3028 4042 // object there.
tonyp@3028 4043 return survivor_attempt_allocation(word_size);
ysr@777 4044 }
tonyp@3028 4045 }
tonyp@3028 4046
tonyp@3028 4047 ShouldNotReachHere();
tonyp@3028 4048 // Trying to keep some compilers happy.
tonyp@3028 4049 return NULL;
ysr@777 4050 }
ysr@777 4051
ysr@777 4052 #ifndef PRODUCT
ysr@777 4053 bool GCLabBitMapClosure::do_bit(size_t offset) {
ysr@777 4054 HeapWord* addr = _bitmap->offsetToHeapWord(offset);
ysr@777 4055 guarantee(_cm->isMarked(oop(addr)), "it should be!");
ysr@777 4056 return true;
ysr@777 4057 }
ysr@777 4058 #endif // PRODUCT
ysr@777 4059
johnc@3086 4060 G1ParGCAllocBuffer::G1ParGCAllocBuffer(size_t gclab_word_size) :
johnc@3086 4061 ParGCAllocBuffer(gclab_word_size),
johnc@3086 4062 _should_mark_objects(false),
johnc@3086 4063 _bitmap(G1CollectedHeap::heap()->reserved_region().start(), gclab_word_size),
johnc@3086 4064 _retired(false)
johnc@3086 4065 {
johnc@3086 4066 //_should_mark_objects is set to true when G1ParCopyHelper needs to
johnc@3086 4067 // mark the forwarded location of an evacuated object.
johnc@3086 4068 // We set _should_mark_objects to true if marking is active, i.e. when we
johnc@3086 4069 // need to propagate a mark, or during an initial mark pause, i.e. when we
johnc@3086 4070 // need to mark objects immediately reachable by the roots.
johnc@3086 4071 if (G1CollectedHeap::heap()->mark_in_progress() ||
johnc@3086 4072 G1CollectedHeap::heap()->g1_policy()->during_initial_mark_pause()) {
johnc@3086 4073 _should_mark_objects = true;
johnc@3086 4074 }
johnc@3086 4075 }
johnc@3086 4076
ysr@1280 4077 G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, int queue_num)
ysr@1280 4078 : _g1h(g1h),
ysr@1280 4079 _refs(g1h->task_queue(queue_num)),
ysr@1280 4080 _dcq(&g1h->dirty_card_queue_set()),
ysr@1280 4081 _ct_bs((CardTableModRefBS*)_g1h->barrier_set()),
ysr@1280 4082 _g1_rem(g1h->g1_rem_set()),
ysr@1280 4083 _hash_seed(17), _queue_num(queue_num),
ysr@1280 4084 _term_attempts(0),
apetrusenko@1826 4085 _surviving_alloc_buffer(g1h->desired_plab_sz(GCAllocForSurvived)),
apetrusenko@1826 4086 _tenured_alloc_buffer(g1h->desired_plab_sz(GCAllocForTenured)),
ysr@1280 4087 _age_table(false),
ysr@1280 4088 _strong_roots_time(0), _term_time(0),
ysr@1280 4089 _alloc_buffer_waste(0), _undo_waste(0)
ysr@1280 4090 {
ysr@1280 4091 // we allocate G1YoungSurvRateNumRegions plus one entries, since
ysr@1280 4092 // we "sacrifice" entry 0 to keep track of surviving bytes for
ysr@1280 4093 // non-young regions (where the age is -1)
ysr@1280 4094 // We also add a few elements at the beginning and at the end in
ysr@1280 4095 // an attempt to eliminate cache contention
ysr@1280 4096 size_t real_length = 1 + _g1h->g1_policy()->young_cset_length();
ysr@1280 4097 size_t array_length = PADDING_ELEM_NUM +
ysr@1280 4098 real_length +
ysr@1280 4099 PADDING_ELEM_NUM;
ysr@1280 4100 _surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length);
ysr@1280 4101 if (_surviving_young_words_base == NULL)
ysr@1280 4102 vm_exit_out_of_memory(array_length * sizeof(size_t),
ysr@1280 4103 "Not enough space for young surv histo.");
ysr@1280 4104 _surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM;
ysr@1280 4105 memset(_surviving_young_words, 0, real_length * sizeof(size_t));
ysr@1280 4106
apetrusenko@1826 4107 _alloc_buffers[GCAllocForSurvived] = &_surviving_alloc_buffer;
apetrusenko@1826 4108 _alloc_buffers[GCAllocForTenured] = &_tenured_alloc_buffer;
apetrusenko@1826 4109
ysr@1280 4110 _start = os::elapsedTime();
ysr@1280 4111 }
ysr@777 4112
jcoomes@2064 4113 void
jcoomes@2064 4114 G1ParScanThreadState::print_termination_stats_hdr(outputStream* const st)
jcoomes@2064 4115 {
jcoomes@2064 4116 st->print_raw_cr("GC Termination Stats");
jcoomes@2064 4117 st->print_raw_cr(" elapsed --strong roots-- -------termination-------"
jcoomes@2064 4118 " ------waste (KiB)------");
jcoomes@2064 4119 st->print_raw_cr("thr ms ms % ms % attempts"
jcoomes@2064 4120 " total alloc undo");
jcoomes@2064 4121 st->print_raw_cr("--- --------- --------- ------ --------- ------ --------"
jcoomes@2064 4122 " ------- ------- -------");
jcoomes@2064 4123 }
jcoomes@2064 4124
jcoomes@2064 4125 void
jcoomes@2064 4126 G1ParScanThreadState::print_termination_stats(int i,
jcoomes@2064 4127 outputStream* const st) const
jcoomes@2064 4128 {
jcoomes@2064 4129 const double elapsed_ms = elapsed_time() * 1000.0;
jcoomes@2064 4130 const double s_roots_ms = strong_roots_time() * 1000.0;
jcoomes@2064 4131 const double term_ms = term_time() * 1000.0;
jcoomes@2064 4132 st->print_cr("%3d %9.2f %9.2f %6.2f "
jcoomes@2064 4133 "%9.2f %6.2f " SIZE_FORMAT_W(8) " "
jcoomes@2064 4134 SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7),
jcoomes@2064 4135 i, elapsed_ms, s_roots_ms, s_roots_ms * 100 / elapsed_ms,
jcoomes@2064 4136 term_ms, term_ms * 100 / elapsed_ms, term_attempts(),
jcoomes@2064 4137 (alloc_buffer_waste() + undo_waste()) * HeapWordSize / K,
jcoomes@2064 4138 alloc_buffer_waste() * HeapWordSize / K,
jcoomes@2064 4139 undo_waste() * HeapWordSize / K);
jcoomes@2064 4140 }
jcoomes@2064 4141
jcoomes@2217 4142 #ifdef ASSERT
jcoomes@2217 4143 bool G1ParScanThreadState::verify_ref(narrowOop* ref) const {
jcoomes@2217 4144 assert(ref != NULL, "invariant");
jcoomes@2217 4145 assert(UseCompressedOops, "sanity");
jcoomes@2217 4146 assert(!has_partial_array_mask(ref), err_msg("ref=" PTR_FORMAT, ref));
jcoomes@2217 4147 oop p = oopDesc::load_decode_heap_oop(ref);
jcoomes@2217 4148 assert(_g1h->is_in_g1_reserved(p),
jcoomes@2217 4149 err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, intptr_t(p)));
jcoomes@2217 4150 return true;
jcoomes@2217 4151 }
jcoomes@2217 4152
jcoomes@2217 4153 bool G1ParScanThreadState::verify_ref(oop* ref) const {
jcoomes@2217 4154 assert(ref != NULL, "invariant");
jcoomes@2217 4155 if (has_partial_array_mask(ref)) {
jcoomes@2217 4156 // Must be in the collection set--it's already been copied.
jcoomes@2217 4157 oop p = clear_partial_array_mask(ref);
jcoomes@2217 4158 assert(_g1h->obj_in_cs(p),
jcoomes@2217 4159 err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, intptr_t(p)));
jcoomes@2217 4160 } else {
jcoomes@2217 4161 oop p = oopDesc::load_decode_heap_oop(ref);
jcoomes@2217 4162 assert(_g1h->is_in_g1_reserved(p),
jcoomes@2217 4163 err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, intptr_t(p)));
jcoomes@2217 4164 }
jcoomes@2217 4165 return true;
jcoomes@2217 4166 }
jcoomes@2217 4167
jcoomes@2217 4168 bool G1ParScanThreadState::verify_task(StarTask ref) const {
jcoomes@2217 4169 if (ref.is_narrow()) {
jcoomes@2217 4170 return verify_ref((narrowOop*) ref);
jcoomes@2217 4171 } else {
jcoomes@2217 4172 return verify_ref((oop*) ref);
jcoomes@2217 4173 }
jcoomes@2217 4174 }
jcoomes@2217 4175 #endif // ASSERT
jcoomes@2217 4176
jcoomes@2217 4177 void G1ParScanThreadState::trim_queue() {
jcoomes@2217 4178 StarTask ref;
jcoomes@2217 4179 do {
jcoomes@2217 4180 // Drain the overflow stack first, so other threads can steal.
jcoomes@2217 4181 while (refs()->pop_overflow(ref)) {
jcoomes@2217 4182 deal_with_reference(ref);
jcoomes@2217 4183 }
jcoomes@2217 4184 while (refs()->pop_local(ref)) {
jcoomes@2217 4185 deal_with_reference(ref);
jcoomes@2217 4186 }
jcoomes@2217 4187 } while (!refs()->is_empty());
jcoomes@2217 4188 }
jcoomes@2217 4189
ysr@777 4190 G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
ysr@777 4191 _g1(g1), _g1_rem(_g1->g1_rem_set()), _cm(_g1->concurrent_mark()),
johnc@3086 4192 _par_scan_state(par_scan_state),
johnc@3086 4193 _during_initial_mark(_g1->g1_policy()->during_initial_mark_pause()),
johnc@3086 4194 _mark_in_progress(_g1->mark_in_progress()) { }
johnc@3086 4195
johnc@3086 4196 template <class T> void G1ParCopyHelper::mark_object(T* p) {
johnc@3086 4197 // This is called from do_oop_work for objects that are not
johnc@3086 4198 // in the collection set. Objects in the collection set
johnc@3086 4199 // are marked after they have been evacuated.
ysr@777 4200
ysr@1280 4201 T heap_oop = oopDesc::load_heap_oop(p);
ysr@1280 4202 if (!oopDesc::is_null(heap_oop)) {
ysr@1280 4203 oop obj = oopDesc::decode_heap_oop(heap_oop);
ysr@1280 4204 HeapWord* addr = (HeapWord*)obj;
tonyp@2855 4205 if (_g1->is_in_g1_reserved(addr)) {
ysr@777 4206 _cm->grayRoot(oop(addr));
tonyp@2855 4207 }
ysr@777 4208 }
ysr@777 4209 }
ysr@777 4210
johnc@3086 4211 oop G1ParCopyHelper::copy_to_survivor_space(oop old, bool should_mark_copy) {
ysr@777 4212 size_t word_sz = old->size();
ysr@777 4213 HeapRegion* from_region = _g1->heap_region_containing_raw(old);
ysr@777 4214 // +1 to make the -1 indexes valid...
ysr@777 4215 int young_index = from_region->young_index_in_cset()+1;
ysr@777 4216 assert( (from_region->is_young() && young_index > 0) ||
ysr@777 4217 (!from_region->is_young() && young_index == 0), "invariant" );
ysr@777 4218 G1CollectorPolicy* g1p = _g1->g1_policy();
ysr@777 4219 markOop m = old->mark();
apetrusenko@980 4220 int age = m->has_displaced_mark_helper() ? m->displaced_mark_helper()->age()
apetrusenko@980 4221 : m->age();
apetrusenko@980 4222 GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, age,
ysr@777 4223 word_sz);
ysr@777 4224 HeapWord* obj_ptr = _par_scan_state->allocate(alloc_purpose, word_sz);
ysr@777 4225 oop obj = oop(obj_ptr);
ysr@777 4226
ysr@777 4227 if (obj_ptr == NULL) {
ysr@777 4228 // This will either forward-to-self, or detect that someone else has
ysr@777 4229 // installed a forwarding pointer.
ysr@777 4230 OopsInHeapRegionClosure* cl = _par_scan_state->evac_failure_closure();
ysr@777 4231 return _g1->handle_evacuation_failure_par(cl, old);
ysr@777 4232 }
ysr@777 4233
tonyp@961 4234 // We're going to allocate linearly, so might as well prefetch ahead.
tonyp@961 4235 Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes);
tonyp@961 4236
ysr@777 4237 oop forward_ptr = old->forward_to_atomic(obj);
ysr@777 4238 if (forward_ptr == NULL) {
ysr@777 4239 Copy::aligned_disjoint_words((HeapWord*) old, obj_ptr, word_sz);
tonyp@961 4240 if (g1p->track_object_age(alloc_purpose)) {
tonyp@961 4241 // We could simply do obj->incr_age(). However, this causes a
tonyp@961 4242 // performance issue. obj->incr_age() will first check whether
tonyp@961 4243 // the object has a displaced mark by checking its mark word;
tonyp@961 4244 // getting the mark word from the new location of the object
tonyp@961 4245 // stalls. So, given that we already have the mark word and we
tonyp@961 4246 // are about to install it anyway, it's better to increase the
tonyp@961 4247 // age on the mark word, when the object does not have a
tonyp@961 4248 // displaced mark word. We're not expecting many objects to have
tonyp@961 4249 // a displaced marked word, so that case is not optimized
tonyp@961 4250 // further (it could be...) and we simply call obj->incr_age().
tonyp@961 4251
tonyp@961 4252 if (m->has_displaced_mark_helper()) {
tonyp@961 4253 // in this case, we have to install the mark word first,
tonyp@961 4254 // otherwise obj looks to be forwarded (the old mark word,
tonyp@961 4255 // which contains the forward pointer, was copied)
tonyp@961 4256 obj->set_mark(m);
tonyp@961 4257 obj->incr_age();
tonyp@961 4258 } else {
tonyp@961 4259 m = m->incr_age();
apetrusenko@980 4260 obj->set_mark(m);
tonyp@961 4261 }
apetrusenko@980 4262 _par_scan_state->age_table()->add(obj, word_sz);
apetrusenko@980 4263 } else {
apetrusenko@980 4264 obj->set_mark(m);
tonyp@961 4265 }
tonyp@961 4266
johnc@3086 4267 // Mark the evacuated object or propagate "next" mark bit
johnc@3086 4268 if (should_mark_copy) {
ysr@777 4269 if (!use_local_bitmaps ||
ysr@777 4270 !_par_scan_state->alloc_buffer(alloc_purpose)->mark(obj_ptr)) {
ysr@777 4271 // if we couldn't mark it on the local bitmap (this happens when
ysr@777 4272 // the object was not allocated in the GCLab), we have to bite
ysr@777 4273 // the bullet and do the standard parallel mark
ysr@777 4274 _cm->markAndGrayObjectIfNecessary(obj);
ysr@777 4275 }
johnc@3086 4276
ysr@777 4277 if (_g1->isMarkedNext(old)) {
johnc@3086 4278 // Unmark the object's old location so that marking
johnc@3086 4279 // doesn't think the old object is alive.
ysr@777 4280 _cm->nextMarkBitMap()->parClear((HeapWord*)old);
ysr@777 4281 }
ysr@777 4282 }
ysr@777 4283
ysr@777 4284 size_t* surv_young_words = _par_scan_state->surviving_young_words();
ysr@777 4285 surv_young_words[young_index] += word_sz;
ysr@777 4286
ysr@777 4287 if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) {
ysr@777 4288 arrayOop(old)->set_length(0);
ysr@1280 4289 oop* old_p = set_partial_array_mask(old);
ysr@1280 4290 _par_scan_state->push_on_queue(old_p);
ysr@777 4291 } else {
tonyp@961 4292 // No point in using the slower heap_region_containing() method,
tonyp@961 4293 // given that we know obj is in the heap.
tonyp@961 4294 _scanner->set_region(_g1->heap_region_containing_raw(obj));
ysr@777 4295 obj->oop_iterate_backwards(_scanner);
ysr@777 4296 }
ysr@777 4297 } else {
ysr@777 4298 _par_scan_state->undo_allocation(alloc_purpose, obj_ptr, word_sz);
ysr@777 4299 obj = forward_ptr;
ysr@777 4300 }
ysr@777 4301 return obj;
ysr@777 4302 }
ysr@777 4303
johnc@3086 4304 template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_object>
ysr@1280 4305 template <class T>
johnc@3086 4306 void G1ParCopyClosure<do_gen_barrier, barrier, do_mark_object>
ysr@1280 4307 ::do_oop_work(T* p) {
ysr@1280 4308 oop obj = oopDesc::load_decode_heap_oop(p);
ysr@777 4309 assert(barrier != G1BarrierRS || obj != NULL,
ysr@777 4310 "Precondition: G1BarrierRS implies obj is nonNull");
ysr@777 4311
johnc@3086 4312 // Marking:
johnc@3086 4313 // If the object is in the collection set, then the thread
johnc@3086 4314 // that copies the object should mark, or propagate the
johnc@3086 4315 // mark to, the evacuated object.
johnc@3086 4316 // If the object is not in the collection set then we
johnc@3086 4317 // should call the mark_object() method depending on the
johnc@3086 4318 // value of the template parameter do_mark_object (which will
johnc@3086 4319 // be true for root scanning closures during an initial mark
johnc@3086 4320 // pause).
johnc@3086 4321 // The mark_object() method first checks whether the object
johnc@3086 4322 // is marked and, if not, attempts to mark the object.
johnc@3086 4323
tonyp@961 4324 // here the null check is implicit in the cset_fast_test() test
iveresov@1696 4325 if (_g1->in_cset_fast_test(obj)) {
tonyp@961 4326 if (obj->is_forwarded()) {
ysr@1280 4327 oopDesc::encode_store_heap_oop(p, obj->forwardee());
johnc@3086 4328 // If we are a root scanning closure during an initial
johnc@3086 4329 // mark pause (i.e. do_mark_object will be true) then
johnc@3086 4330 // we also need to handle marking of roots in the
johnc@3086 4331 // event of an evacuation failure. In the event of an
johnc@3086 4332 // evacuation failure, the object is forwarded to itself
johnc@3086 4333 // and not copied so let's mark it here.
johnc@3086 4334 if (do_mark_object && obj->forwardee() == obj) {
johnc@3086 4335 mark_object(p);
johnc@3086 4336 }
tonyp@961 4337 } else {
johnc@3086 4338 // We need to mark the copied object if we're a root scanning
johnc@3086 4339 // closure during an initial mark pause (i.e. do_mark_object
johnc@3086 4340 // will be true), or the object is already marked and we need
johnc@3086 4341 // to propagate the mark to the evacuated copy.
johnc@3086 4342 bool should_mark_copy = do_mark_object ||
johnc@3086 4343 _during_initial_mark ||
johnc@3086 4344 (_mark_in_progress && !_g1->is_obj_ill(obj));
johnc@3086 4345
johnc@3086 4346 oop copy_oop = copy_to_survivor_space(obj, should_mark_copy);
ysr@1280 4347 oopDesc::encode_store_heap_oop(p, copy_oop);
ysr@777 4348 }
tonyp@961 4349 // When scanning the RS, we only care about objs in CS.
tonyp@961 4350 if (barrier == G1BarrierRS) {
iveresov@1051 4351 _par_scan_state->update_rs(_from, p, _par_scan_state->queue_num());
ysr@777 4352 }
johnc@3086 4353 } else {
johnc@3086 4354 // The object is not in collection set. If we're a root scanning
johnc@3086 4355 // closure during an initial mark pause (i.e. do_mark_object will
johnc@3086 4356 // be true) then attempt to mark the object.
johnc@3086 4357 if (do_mark_object) {
johnc@3086 4358 mark_object(p);
johnc@3086 4359 }
tonyp@961 4360 }
tonyp@961 4361
tonyp@961 4362 if (barrier == G1BarrierEvac && obj != NULL) {
iveresov@1051 4363 _par_scan_state->update_rs(_from, p, _par_scan_state->queue_num());
tonyp@961 4364 }
tonyp@961 4365
tonyp@961 4366 if (do_gen_barrier && obj != NULL) {
tonyp@961 4367 par_do_barrier(p);
tonyp@961 4368 }
tonyp@961 4369 }
tonyp@961 4370
iveresov@1696 4371 template void G1ParCopyClosure<false, G1BarrierEvac, false>::do_oop_work(oop* p);
iveresov@1696 4372 template void G1ParCopyClosure<false, G1BarrierEvac, false>::do_oop_work(narrowOop* p);
ysr@1280 4373
ysr@1280 4374 template <class T> void G1ParScanPartialArrayClosure::do_oop_nv(T* p) {
tonyp@961 4375 assert(has_partial_array_mask(p), "invariant");
tonyp@961 4376 oop old = clear_partial_array_mask(p);
ysr@777 4377 assert(old->is_objArray(), "must be obj array");
ysr@777 4378 assert(old->is_forwarded(), "must be forwarded");
ysr@777 4379 assert(Universe::heap()->is_in_reserved(old), "must be in heap.");
ysr@777 4380
ysr@777 4381 objArrayOop obj = objArrayOop(old->forwardee());
ysr@777 4382 assert((void*)old != (void*)old->forwardee(), "self forwarding here?");
ysr@777 4383 // Process ParGCArrayScanChunk elements now
ysr@777 4384 // and push the remainder back onto queue
ysr@777 4385 int start = arrayOop(old)->length();
ysr@777 4386 int end = obj->length();
ysr@777 4387 int remainder = end - start;
ysr@777 4388 assert(start <= end, "just checking");
ysr@777 4389 if (remainder > 2 * ParGCArrayScanChunk) {
ysr@777 4390 // Test above combines last partial chunk with a full chunk
ysr@777 4391 end = start + ParGCArrayScanChunk;
ysr@777 4392 arrayOop(old)->set_length(end);
ysr@777 4393 // Push remainder.
ysr@1280 4394 oop* old_p = set_partial_array_mask(old);
ysr@1280 4395 assert(arrayOop(old)->length() < obj->length(), "Empty push?");
ysr@1280 4396 _par_scan_state->push_on_queue(old_p);
ysr@777 4397 } else {
ysr@777 4398 // Restore length so that the heap remains parsable in
ysr@777 4399 // case of evacuation failure.
ysr@777 4400 arrayOop(old)->set_length(end);
ysr@777 4401 }
ysr@1280 4402 _scanner.set_region(_g1->heap_region_containing_raw(obj));
ysr@777 4403 // process our set of indices (include header in first chunk)
ysr@1280 4404 obj->oop_iterate_range(&_scanner, start, end);
ysr@777 4405 }
ysr@777 4406
ysr@777 4407 class G1ParEvacuateFollowersClosure : public VoidClosure {
ysr@777 4408 protected:
ysr@777 4409 G1CollectedHeap* _g1h;
ysr@777 4410 G1ParScanThreadState* _par_scan_state;
ysr@777 4411 RefToScanQueueSet* _queues;
ysr@777 4412 ParallelTaskTerminator* _terminator;
ysr@777 4413
ysr@777 4414 G1ParScanThreadState* par_scan_state() { return _par_scan_state; }
ysr@777 4415 RefToScanQueueSet* queues() { return _queues; }
ysr@777 4416 ParallelTaskTerminator* terminator() { return _terminator; }
ysr@777 4417
ysr@777 4418 public:
ysr@777 4419 G1ParEvacuateFollowersClosure(G1CollectedHeap* g1h,
ysr@777 4420 G1ParScanThreadState* par_scan_state,
ysr@777 4421 RefToScanQueueSet* queues,
ysr@777 4422 ParallelTaskTerminator* terminator)
ysr@777 4423 : _g1h(g1h), _par_scan_state(par_scan_state),
ysr@777 4424 _queues(queues), _terminator(terminator) {}
ysr@777 4425
jcoomes@2217 4426 void do_void();
jcoomes@2217 4427
jcoomes@2217 4428 private:
jcoomes@2217 4429 inline bool offer_termination();
jcoomes@2217 4430 };
jcoomes@2217 4431
jcoomes@2217 4432 bool G1ParEvacuateFollowersClosure::offer_termination() {
jcoomes@2217 4433 G1ParScanThreadState* const pss = par_scan_state();
jcoomes@2217 4434 pss->start_term_time();
jcoomes@2217 4435 const bool res = terminator()->offer_termination();
jcoomes@2217 4436 pss->end_term_time();
jcoomes@2217 4437 return res;
jcoomes@2217 4438 }
jcoomes@2217 4439
jcoomes@2217 4440 void G1ParEvacuateFollowersClosure::do_void() {
jcoomes@2217 4441 StarTask stolen_task;
jcoomes@2217 4442 G1ParScanThreadState* const pss = par_scan_state();
jcoomes@2217 4443 pss->trim_queue();
jcoomes@2217 4444
jcoomes@2217 4445 do {
jcoomes@2217 4446 while (queues()->steal(pss->queue_num(), pss->hash_seed(), stolen_task)) {
jcoomes@2217 4447 assert(pss->verify_task(stolen_task), "sanity");
jcoomes@2217 4448 if (stolen_task.is_narrow()) {
tonyp@2238 4449 pss->deal_with_reference((narrowOop*) stolen_task);
jcoomes@2217 4450 } else {
tonyp@2238 4451 pss->deal_with_reference((oop*) stolen_task);
jcoomes@2217 4452 }
tonyp@2238 4453
tonyp@2238 4454 // We've just processed a reference and we might have made
tonyp@2238 4455 // available new entries on the queues. So we have to make sure
tonyp@2238 4456 // we drain the queues as necessary.
ysr@777 4457 pss->trim_queue();
ysr@777 4458 }
jcoomes@2217 4459 } while (!offer_termination());
jcoomes@2217 4460
jcoomes@2217 4461 pss->retire_alloc_buffers();
jcoomes@2217 4462 }
ysr@777 4463
ysr@777 4464 class G1ParTask : public AbstractGangTask {
ysr@777 4465 protected:
ysr@777 4466 G1CollectedHeap* _g1h;
ysr@777 4467 RefToScanQueueSet *_queues;
ysr@777 4468 ParallelTaskTerminator _terminator;
ysr@1280 4469 int _n_workers;
ysr@777 4470
ysr@777 4471 Mutex _stats_lock;
ysr@777 4472 Mutex* stats_lock() { return &_stats_lock; }
ysr@777 4473
ysr@777 4474 size_t getNCards() {
ysr@777 4475 return (_g1h->capacity() + G1BlockOffsetSharedArray::N_bytes - 1)
ysr@777 4476 / G1BlockOffsetSharedArray::N_bytes;
ysr@777 4477 }
ysr@777 4478
ysr@777 4479 public:
ysr@777 4480 G1ParTask(G1CollectedHeap* g1h, int workers, RefToScanQueueSet *task_queues)
ysr@777 4481 : AbstractGangTask("G1 collection"),
ysr@777 4482 _g1h(g1h),
ysr@777 4483 _queues(task_queues),
ysr@777 4484 _terminator(workers, _queues),
ysr@1280 4485 _stats_lock(Mutex::leaf, "parallel G1 stats lock", true),
ysr@1280 4486 _n_workers(workers)
ysr@777 4487 {}
ysr@777 4488
ysr@777 4489 RefToScanQueueSet* queues() { return _queues; }
ysr@777 4490
ysr@777 4491 RefToScanQueue *work_queue(int i) {
ysr@777 4492 return queues()->queue(i);
ysr@777 4493 }
ysr@777 4494
ysr@777 4495 void work(int i) {
ysr@1280 4496 if (i >= _n_workers) return; // no work needed this round
tonyp@1966 4497
tonyp@1966 4498 double start_time_ms = os::elapsedTime() * 1000.0;
tonyp@1966 4499 _g1h->g1_policy()->record_gc_worker_start_time(i, start_time_ms);
tonyp@1966 4500
ysr@777 4501 ResourceMark rm;
ysr@777 4502 HandleMark hm;
ysr@777 4503
tonyp@961 4504 G1ParScanThreadState pss(_g1h, i);
tonyp@961 4505 G1ParScanHeapEvacClosure scan_evac_cl(_g1h, &pss);
tonyp@961 4506 G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss);
tonyp@961 4507 G1ParScanPartialArrayClosure partial_scan_cl(_g1h, &pss);
ysr@777 4508
ysr@777 4509 pss.set_evac_closure(&scan_evac_cl);
ysr@777 4510 pss.set_evac_failure_closure(&evac_failure_cl);
ysr@777 4511 pss.set_partial_scan_closure(&partial_scan_cl);
ysr@777 4512
ysr@777 4513 G1ParScanExtRootClosure only_scan_root_cl(_g1h, &pss);
ysr@777 4514 G1ParScanPermClosure only_scan_perm_cl(_g1h, &pss);
ysr@777 4515 G1ParScanHeapRSClosure only_scan_heap_rs_cl(_g1h, &pss);
iveresov@1696 4516 G1ParPushHeapRSClosure push_heap_rs_cl(_g1h, &pss);
iveresov@1051 4517
ysr@777 4518 G1ParScanAndMarkExtRootClosure scan_mark_root_cl(_g1h, &pss);
ysr@777 4519 G1ParScanAndMarkPermClosure scan_mark_perm_cl(_g1h, &pss);
ysr@777 4520 G1ParScanAndMarkHeapRSClosure scan_mark_heap_rs_cl(_g1h, &pss);
ysr@777 4521
ysr@777 4522 OopsInHeapRegionClosure *scan_root_cl;
ysr@777 4523 OopsInHeapRegionClosure *scan_perm_cl;
ysr@777 4524
tonyp@1794 4525 if (_g1h->g1_policy()->during_initial_mark_pause()) {
ysr@777 4526 scan_root_cl = &scan_mark_root_cl;
ysr@777 4527 scan_perm_cl = &scan_mark_perm_cl;
ysr@777 4528 } else {
ysr@777 4529 scan_root_cl = &only_scan_root_cl;
ysr@777 4530 scan_perm_cl = &only_scan_perm_cl;
ysr@777 4531 }
ysr@777 4532
ysr@777 4533 pss.start_strong_roots();
ysr@777 4534 _g1h->g1_process_strong_roots(/* not collecting perm */ false,
ysr@777 4535 SharedHeap::SO_AllClasses,
ysr@777 4536 scan_root_cl,
iveresov@1696 4537 &push_heap_rs_cl,
ysr@777 4538 scan_perm_cl,
ysr@777 4539 i);
ysr@777 4540 pss.end_strong_roots();
johnc@3021 4541
ysr@777 4542 {
ysr@777 4543 double start = os::elapsedTime();
ysr@777 4544 G1ParEvacuateFollowersClosure evac(_g1h, &pss, _queues, &_terminator);
ysr@777 4545 evac.do_void();
ysr@777 4546 double elapsed_ms = (os::elapsedTime()-start)*1000.0;
ysr@777 4547 double term_ms = pss.term_time()*1000.0;
ysr@777 4548 _g1h->g1_policy()->record_obj_copy_time(i, elapsed_ms-term_ms);
tonyp@1966 4549 _g1h->g1_policy()->record_termination(i, term_ms, pss.term_attempts());
ysr@777 4550 }
tonyp@1717 4551 _g1h->g1_policy()->record_thread_age_table(pss.age_table());
ysr@777 4552 _g1h->update_surviving_young_words(pss.surviving_young_words()+1);
ysr@777 4553
ysr@777 4554 // Clean up any par-expanded rem sets.
ysr@777 4555 HeapRegionRemSet::par_cleanup();
ysr@777 4556
ysr@777 4557 if (ParallelGCVerbose) {
jcoomes@2064 4558 MutexLocker x(stats_lock());
jcoomes@2064 4559 pss.print_termination_stats(i);
ysr@777 4560 }
ysr@777 4561
jcoomes@2217 4562 assert(pss.refs()->is_empty(), "should be empty");
tonyp@1966 4563 double end_time_ms = os::elapsedTime() * 1000.0;
tonyp@1966 4564 _g1h->g1_policy()->record_gc_worker_end_time(i, end_time_ms);
ysr@777 4565 }
ysr@777 4566 };
ysr@777 4567
ysr@777 4568 // *** Common G1 Evacuation Stuff
ysr@777 4569
jmasa@2188 4570 // This method is run in a GC worker.
jmasa@2188 4571
ysr@777 4572 void
ysr@777 4573 G1CollectedHeap::
ysr@777 4574 g1_process_strong_roots(bool collecting_perm_gen,
ysr@777 4575 SharedHeap::ScanningOption so,
ysr@777 4576 OopClosure* scan_non_heap_roots,
ysr@777 4577 OopsInHeapRegionClosure* scan_rs,
ysr@777 4578 OopsInGenClosure* scan_perm,
ysr@777 4579 int worker_i) {
ysr@777 4580 // First scan the strong roots, including the perm gen.
ysr@777 4581 double ext_roots_start = os::elapsedTime();
ysr@777 4582 double closure_app_time_sec = 0.0;
ysr@777 4583
ysr@777 4584 BufferingOopClosure buf_scan_non_heap_roots(scan_non_heap_roots);
ysr@777 4585 BufferingOopsInGenClosure buf_scan_perm(scan_perm);
ysr@777 4586 buf_scan_perm.set_generation(perm_gen());
ysr@777 4587
jrose@1424 4588 // Walk the code cache w/o buffering, because StarTask cannot handle
jrose@1424 4589 // unaligned oop locations.
jrose@1424 4590 CodeBlobToOopClosure eager_scan_code_roots(scan_non_heap_roots, /*do_marking=*/ true);
jrose@1424 4591
jrose@1424 4592 process_strong_roots(false, // no scoping; this is parallel code
jrose@1424 4593 collecting_perm_gen, so,
ysr@777 4594 &buf_scan_non_heap_roots,
jrose@1424 4595 &eager_scan_code_roots,
ysr@777 4596 &buf_scan_perm);
johnc@1829 4597
johnc@3021 4598 // Now the ref_processor roots.
ysr@777 4599 if (!_process_strong_tasks->is_task_claimed(G1H_PS_refProcessor_oops_do)) {
johnc@2316 4600 // We need to treat the discovered reference lists as roots and
johnc@2316 4601 // keep entries (which are added by the marking threads) on them
johnc@2316 4602 // live until they can be processed at the end of marking.
johnc@3021 4603 ref_processor()->weak_oops_do(&buf_scan_non_heap_roots);
johnc@3021 4604 }
johnc@3021 4605
johnc@3021 4606 // Finish up any enqueued closure apps (attributed as object copy time).
johnc@3021 4607 buf_scan_non_heap_roots.done();
johnc@3021 4608 buf_scan_perm.done();
johnc@3021 4609
johnc@3021 4610 double ext_roots_end = os::elapsedTime();
johnc@3021 4611
johnc@3021 4612 g1_policy()->reset_obj_copy_time(worker_i);
johnc@3021 4613 double obj_copy_time_sec = buf_scan_perm.closure_app_seconds() +
johnc@3021 4614 buf_scan_non_heap_roots.closure_app_seconds();
johnc@3021 4615 g1_policy()->record_obj_copy_time(worker_i, obj_copy_time_sec * 1000.0);
johnc@3021 4616
johnc@3021 4617 double ext_root_time_ms =
johnc@3021 4618 ((ext_roots_end - ext_roots_start) - obj_copy_time_sec) * 1000.0;
johnc@3021 4619
johnc@3021 4620 g1_policy()->record_ext_root_scan_time(worker_i, ext_root_time_ms);
johnc@3021 4621
johnc@3021 4622 // Scan strong roots in mark stack.
johnc@3021 4623 if (!_process_strong_tasks->is_task_claimed(G1H_PS_mark_stack_oops_do)) {
johnc@3021 4624 concurrent_mark()->oops_do(scan_non_heap_roots);
johnc@3021 4625 }
johnc@3021 4626 double mark_stack_scan_ms = (os::elapsedTime() - ext_roots_end) * 1000.0;
johnc@3021 4627 g1_policy()->record_mark_stack_scan_time(worker_i, mark_stack_scan_ms);
johnc@3021 4628
johnc@3021 4629 // Now scan the complement of the collection set.
johnc@3021 4630 if (scan_rs != NULL) {
johnc@3021 4631 g1_rem_set()->oops_into_collection_set_do(scan_rs, worker_i);
johnc@3021 4632 }
johnc@3021 4633
ysr@777 4634 _process_strong_tasks->all_tasks_completed();
ysr@777 4635 }
ysr@777 4636
ysr@777 4637 void
ysr@777 4638 G1CollectedHeap::g1_process_weak_roots(OopClosure* root_closure,
ysr@777 4639 OopClosure* non_root_closure) {
jrose@1424 4640 CodeBlobToOopClosure roots_in_blobs(root_closure, /*do_marking=*/ false);
jrose@1424 4641 SharedHeap::process_weak_roots(root_closure, &roots_in_blobs, non_root_closure);
ysr@777 4642 }
ysr@777 4643
ysr@777 4644 void G1CollectedHeap::evacuate_collection_set() {
ysr@777 4645 set_evacuation_failed(false);
ysr@777 4646
ysr@777 4647 g1_rem_set()->prepare_for_oops_into_collection_set_do();
ysr@777 4648 concurrent_g1_refine()->set_use_cache(false);
johnc@1324 4649 concurrent_g1_refine()->clear_hot_cache_claimed_index();
johnc@1324 4650
ysr@777 4651 int n_workers = (ParallelGCThreads > 0 ? workers()->total_workers() : 1);
ysr@777 4652 set_par_threads(n_workers);
ysr@777 4653 G1ParTask g1_par_task(this, n_workers, _task_queues);
ysr@777 4654
ysr@777 4655 init_for_evac_failure(NULL);
ysr@777 4656
ysr@777 4657 rem_set()->prepare_for_younger_refs_iterate(true);
iveresov@1051 4658
iveresov@1051 4659 assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty");
ysr@777 4660 double start_par = os::elapsedTime();
jmasa@2188 4661 if (G1CollectedHeap::use_parallel_gc_threads()) {
ysr@777 4662 // The individual threads will set their evac-failure closures.
jrose@1424 4663 StrongRootsScope srs(this);
jcoomes@2064 4664 if (ParallelGCVerbose) G1ParScanThreadState::print_termination_stats_hdr();
ysr@777 4665 workers()->run_task(&g1_par_task);
ysr@777 4666 } else {
jrose@1424 4667 StrongRootsScope srs(this);
ysr@777 4668 g1_par_task.work(0);
ysr@777 4669 }
ysr@777 4670
ysr@777 4671 double par_time = (os::elapsedTime() - start_par) * 1000.0;
ysr@777 4672 g1_policy()->record_par_time(par_time);
ysr@777 4673 set_par_threads(0);
johnc@2316 4674
johnc@2316 4675 // Weak root processing.
johnc@2316 4676 // Note: when JSR 292 is enabled and code blobs can contain
johnc@2316 4677 // non-perm oops then we will need to process the code blobs
johnc@2316 4678 // here too.
ysr@777 4679 {
ysr@777 4680 G1IsAliveClosure is_alive(this);
ysr@777 4681 G1KeepAliveClosure keep_alive(this);
ysr@777 4682 JNIHandles::weak_oops_do(&is_alive, &keep_alive);
ysr@777 4683 }
tonyp@3028 4684 release_gc_alloc_regions();
ysr@777 4685 g1_rem_set()->cleanup_after_oops_into_collection_set_do();
iveresov@1051 4686
johnc@1324 4687 concurrent_g1_refine()->clear_hot_cache();
ysr@777 4688 concurrent_g1_refine()->set_use_cache(true);
ysr@777 4689
ysr@777 4690 finalize_for_evac_failure();
ysr@777 4691
ysr@777 4692 // Must do this before removing self-forwarding pointers, which clears
ysr@777 4693 // the per-region evac-failure flags.
ysr@777 4694 concurrent_mark()->complete_marking_in_collection_set();
ysr@777 4695
ysr@777 4696 if (evacuation_failed()) {
ysr@777 4697 remove_self_forwarding_pointers();
ysr@777 4698 if (PrintGCDetails) {
tonyp@2074 4699 gclog_or_tty->print(" (to-space overflow)");
ysr@777 4700 } else if (PrintGC) {
ysr@777 4701 gclog_or_tty->print("--");
ysr@777 4702 }
ysr@777 4703 }
ysr@777 4704
iveresov@1051 4705 if (G1DeferredRSUpdate) {
iveresov@1051 4706 RedirtyLoggedCardTableEntryFastClosure redirty;
iveresov@1051 4707 dirty_card_queue_set().set_closure(&redirty);
iveresov@1051 4708 dirty_card_queue_set().apply_closure_to_all_completed_buffers();
iveresov@1546 4709
iveresov@1546 4710 DirtyCardQueueSet& dcq = JavaThread::dirty_card_queue_set();
iveresov@1546 4711 dcq.merge_bufferlists(&dirty_card_queue_set());
iveresov@1051 4712 assert(dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed");
iveresov@1051 4713 }
ysr@777 4714 COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
ysr@777 4715 }
ysr@777 4716
tonyp@2493 4717 void G1CollectedHeap::free_region_if_empty(HeapRegion* hr,
tonyp@2472 4718 size_t* pre_used,
tonyp@2472 4719 FreeRegionList* free_list,
tonyp@2472 4720 HumongousRegionSet* humongous_proxy_set,
tonyp@2493 4721 HRRSCleanupTask* hrrs_cleanup_task,
tonyp@2472 4722 bool par) {
tonyp@2472 4723 if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young()) {
tonyp@2472 4724 if (hr->isHumongous()) {
tonyp@2472 4725 assert(hr->startsHumongous(), "we should only see starts humongous");
tonyp@2472 4726 free_humongous_region(hr, pre_used, free_list, humongous_proxy_set, par);
tonyp@2472 4727 } else {
tonyp@2472 4728 free_region(hr, pre_used, free_list, par);
tonyp@2472 4729 }
tonyp@2493 4730 } else {
tonyp@2493 4731 hr->rem_set()->do_cleanup_work(hrrs_cleanup_task);
tonyp@2472 4732 }
ysr@777 4733 }
ysr@777 4734
tonyp@2472 4735 void G1CollectedHeap::free_region(HeapRegion* hr,
tonyp@2472 4736 size_t* pre_used,
tonyp@2472 4737 FreeRegionList* free_list,
ysr@777 4738 bool par) {
tonyp@2472 4739 assert(!hr->isHumongous(), "this is only for non-humongous regions");
tonyp@2472 4740 assert(!hr->is_empty(), "the region should not be empty");
tonyp@2472 4741 assert(free_list != NULL, "pre-condition");
tonyp@2472 4742
tonyp@2472 4743 *pre_used += hr->used();
tonyp@2472 4744 hr->hr_clear(par, true /* clear_space */);
tonyp@2714 4745 free_list->add_as_head(hr);
tonyp@2472 4746 }
tonyp@2472 4747
tonyp@2472 4748 void G1CollectedHeap::free_humongous_region(HeapRegion* hr,
tonyp@2472 4749 size_t* pre_used,
tonyp@2472 4750 FreeRegionList* free_list,
tonyp@2472 4751 HumongousRegionSet* humongous_proxy_set,
tonyp@2472 4752 bool par) {
tonyp@2472 4753 assert(hr->startsHumongous(), "this is only for starts humongous regions");
tonyp@2472 4754 assert(free_list != NULL, "pre-condition");
tonyp@2472 4755 assert(humongous_proxy_set != NULL, "pre-condition");
tonyp@2472 4756
tonyp@2472 4757 size_t hr_used = hr->used();
tonyp@2472 4758 size_t hr_capacity = hr->capacity();
tonyp@2472 4759 size_t hr_pre_used = 0;
tonyp@2472 4760 _humongous_set.remove_with_proxy(hr, humongous_proxy_set);
tonyp@2472 4761 hr->set_notHumongous();
tonyp@2472 4762 free_region(hr, &hr_pre_used, free_list, par);
tonyp@2472 4763
tonyp@2963 4764 size_t i = hr->hrs_index() + 1;
tonyp@2472 4765 size_t num = 1;
tonyp@2963 4766 while (i < n_regions()) {
tonyp@2963 4767 HeapRegion* curr_hr = region_at(i);
tonyp@2472 4768 if (!curr_hr->continuesHumongous()) {
tonyp@2472 4769 break;
ysr@777 4770 }
tonyp@2472 4771 curr_hr->set_notHumongous();
tonyp@2472 4772 free_region(curr_hr, &hr_pre_used, free_list, par);
tonyp@2472 4773 num += 1;
tonyp@2472 4774 i += 1;
tonyp@2472 4775 }
tonyp@2472 4776 assert(hr_pre_used == hr_used,
tonyp@2472 4777 err_msg("hr_pre_used: "SIZE_FORMAT" and hr_used: "SIZE_FORMAT" "
tonyp@2472 4778 "should be the same", hr_pre_used, hr_used));
tonyp@2472 4779 *pre_used += hr_pre_used;
ysr@777 4780 }
ysr@777 4781
tonyp@2472 4782 void G1CollectedHeap::update_sets_after_freeing_regions(size_t pre_used,
tonyp@2472 4783 FreeRegionList* free_list,
tonyp@2472 4784 HumongousRegionSet* humongous_proxy_set,
tonyp@2472 4785 bool par) {
tonyp@2472 4786 if (pre_used > 0) {
tonyp@2472 4787 Mutex* lock = (par) ? ParGCRareEvent_lock : NULL;
ysr@777 4788 MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
tonyp@2472 4789 assert(_summary_bytes_used >= pre_used,
tonyp@2472 4790 err_msg("invariant: _summary_bytes_used: "SIZE_FORMAT" "
tonyp@2472 4791 "should be >= pre_used: "SIZE_FORMAT,
tonyp@2472 4792 _summary_bytes_used, pre_used));
ysr@777 4793 _summary_bytes_used -= pre_used;
tonyp@2472 4794 }
tonyp@2472 4795 if (free_list != NULL && !free_list->is_empty()) {
tonyp@2472 4796 MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
tonyp@2714 4797 _free_list.add_as_head(free_list);
tonyp@2472 4798 }
tonyp@2472 4799 if (humongous_proxy_set != NULL && !humongous_proxy_set->is_empty()) {
tonyp@2472 4800 MutexLockerEx x(OldSets_lock, Mutex::_no_safepoint_check_flag);
tonyp@2472 4801 _humongous_set.update_from_proxy(humongous_proxy_set);
ysr@777 4802 }
ysr@777 4803 }
ysr@777 4804
apetrusenko@1231 4805 class G1ParCleanupCTTask : public AbstractGangTask {
apetrusenko@1231 4806 CardTableModRefBS* _ct_bs;
apetrusenko@1231 4807 G1CollectedHeap* _g1h;
apetrusenko@1375 4808 HeapRegion* volatile _su_head;
apetrusenko@1231 4809 public:
apetrusenko@1231 4810 G1ParCleanupCTTask(CardTableModRefBS* ct_bs,
tonyp@3028 4811 G1CollectedHeap* g1h) :
apetrusenko@1231 4812 AbstractGangTask("G1 Par Cleanup CT Task"),
tonyp@3028 4813 _ct_bs(ct_bs), _g1h(g1h) { }
apetrusenko@1231 4814
apetrusenko@1231 4815 void work(int i) {
apetrusenko@1231 4816 HeapRegion* r;
apetrusenko@1231 4817 while (r = _g1h->pop_dirty_cards_region()) {
apetrusenko@1231 4818 clear_cards(r);
apetrusenko@1231 4819 }
apetrusenko@1375 4820 }
apetrusenko@1375 4821
apetrusenko@1231 4822 void clear_cards(HeapRegion* r) {
tonyp@3028 4823 // Cards of the survivors should have already been dirtied.
johnc@1829 4824 if (!r->is_survivor()) {
apetrusenko@1231 4825 _ct_bs->clear(MemRegion(r->bottom(), r->end()));
apetrusenko@1231 4826 }
apetrusenko@1231 4827 }
apetrusenko@1231 4828 };
apetrusenko@1231 4829
apetrusenko@1375 4830 #ifndef PRODUCT
apetrusenko@1375 4831 class G1VerifyCardTableCleanup: public HeapRegionClosure {
tonyp@2849 4832 G1CollectedHeap* _g1h;
apetrusenko@1375 4833 CardTableModRefBS* _ct_bs;
apetrusenko@1375 4834 public:
tonyp@2849 4835 G1VerifyCardTableCleanup(G1CollectedHeap* g1h, CardTableModRefBS* ct_bs)
tonyp@2849 4836 : _g1h(g1h), _ct_bs(ct_bs) { }
tonyp@2715 4837 virtual bool doHeapRegion(HeapRegion* r) {
johnc@1829 4838 if (r->is_survivor()) {
tonyp@2849 4839 _g1h->verify_dirty_region(r);
apetrusenko@1375 4840 } else {
tonyp@2849 4841 _g1h->verify_not_dirty_region(r);
apetrusenko@1375 4842 }
apetrusenko@1375 4843 return false;
apetrusenko@1375 4844 }
apetrusenko@1375 4845 };
tonyp@2715 4846
tonyp@2849 4847 void G1CollectedHeap::verify_not_dirty_region(HeapRegion* hr) {
tonyp@2849 4848 // All of the region should be clean.
tonyp@2849 4849 CardTableModRefBS* ct_bs = (CardTableModRefBS*)barrier_set();
tonyp@2849 4850 MemRegion mr(hr->bottom(), hr->end());
tonyp@2849 4851 ct_bs->verify_not_dirty_region(mr);
tonyp@2849 4852 }
tonyp@2849 4853
tonyp@2849 4854 void G1CollectedHeap::verify_dirty_region(HeapRegion* hr) {
tonyp@2849 4855 // We cannot guarantee that [bottom(),end()] is dirty. Threads
tonyp@2849 4856 // dirty allocated blocks as they allocate them. The thread that
tonyp@2849 4857 // retires each region and replaces it with a new one will do a
tonyp@2849 4858 // maximal allocation to fill in [pre_dummy_top(),end()] but will
tonyp@2849 4859 // not dirty that area (one less thing to have to do while holding
tonyp@2849 4860 // a lock). So we can only verify that [bottom(),pre_dummy_top()]
tonyp@2849 4861 // is dirty.
tonyp@2849 4862 CardTableModRefBS* ct_bs = (CardTableModRefBS*) barrier_set();
tonyp@2849 4863 MemRegion mr(hr->bottom(), hr->pre_dummy_top());
tonyp@2849 4864 ct_bs->verify_dirty_region(mr);
tonyp@2849 4865 }
tonyp@2849 4866
tonyp@2715 4867 void G1CollectedHeap::verify_dirty_young_list(HeapRegion* head) {
tonyp@2849 4868 CardTableModRefBS* ct_bs = (CardTableModRefBS*) barrier_set();
tonyp@2715 4869 for (HeapRegion* hr = head; hr != NULL; hr = hr->get_next_young_region()) {
tonyp@2849 4870 verify_dirty_region(hr);
tonyp@2715 4871 }
tonyp@2715 4872 }
tonyp@2715 4873
tonyp@2715 4874 void G1CollectedHeap::verify_dirty_young_regions() {
tonyp@2715 4875 verify_dirty_young_list(_young_list->first_region());
tonyp@2715 4876 verify_dirty_young_list(_young_list->first_survivor_region());
tonyp@2715 4877 }
apetrusenko@1375 4878 #endif
apetrusenko@1375 4879
ysr@777 4880 void G1CollectedHeap::cleanUpCardTable() {
ysr@777 4881 CardTableModRefBS* ct_bs = (CardTableModRefBS*) (barrier_set());
ysr@777 4882 double start = os::elapsedTime();
ysr@777 4883
apetrusenko@1231 4884 // Iterate over the dirty cards region list.
tonyp@3028 4885 G1ParCleanupCTTask cleanup_task(ct_bs, this);
johnc@1829 4886
apetrusenko@1231 4887 if (ParallelGCThreads > 0) {
apetrusenko@1231 4888 set_par_threads(workers()->total_workers());
apetrusenko@1231 4889 workers()->run_task(&cleanup_task);
apetrusenko@1231 4890 set_par_threads(0);
apetrusenko@1231 4891 } else {
apetrusenko@1231 4892 while (_dirty_cards_region_list) {
apetrusenko@1231 4893 HeapRegion* r = _dirty_cards_region_list;
apetrusenko@1231 4894 cleanup_task.clear_cards(r);
apetrusenko@1231 4895 _dirty_cards_region_list = r->get_next_dirty_cards_region();
apetrusenko@1231 4896 if (_dirty_cards_region_list == r) {
apetrusenko@1231 4897 // The last region.
apetrusenko@1231 4898 _dirty_cards_region_list = NULL;
apetrusenko@1231 4899 }
apetrusenko@1231 4900 r->set_next_dirty_cards_region(NULL);
apetrusenko@1231 4901 }
apetrusenko@1375 4902 }
johnc@1829 4903
ysr@777 4904 double elapsed = os::elapsedTime() - start;
ysr@777 4905 g1_policy()->record_clear_ct_time( elapsed * 1000.0);
apetrusenko@1375 4906 #ifndef PRODUCT
apetrusenko@1375 4907 if (G1VerifyCTCleanup || VerifyAfterGC) {
tonyp@2849 4908 G1VerifyCardTableCleanup cleanup_verifier(this, ct_bs);
apetrusenko@1375 4909 heap_region_iterate(&cleanup_verifier);
apetrusenko@1375 4910 }
apetrusenko@1375 4911 #endif
ysr@777 4912 }
ysr@777 4913
ysr@777 4914 void G1CollectedHeap::free_collection_set(HeapRegion* cs_head) {
tonyp@2472 4915 size_t pre_used = 0;
tonyp@2472 4916 FreeRegionList local_free_list("Local List for CSet Freeing");
tonyp@2472 4917
ysr@777 4918 double young_time_ms = 0.0;
ysr@777 4919 double non_young_time_ms = 0.0;
ysr@777 4920
johnc@1829 4921 // Since the collection set is a superset of the the young list,
johnc@1829 4922 // all we need to do to clear the young list is clear its
johnc@1829 4923 // head and length, and unlink any young regions in the code below
johnc@1829 4924 _young_list->clear();
johnc@1829 4925
ysr@777 4926 G1CollectorPolicy* policy = g1_policy();
ysr@777 4927
ysr@777 4928 double start_sec = os::elapsedTime();
ysr@777 4929 bool non_young = true;
ysr@777 4930
ysr@777 4931 HeapRegion* cur = cs_head;
ysr@777 4932 int age_bound = -1;
ysr@777 4933 size_t rs_lengths = 0;
ysr@777 4934
ysr@777 4935 while (cur != NULL) {
tonyp@2643 4936 assert(!is_on_master_free_list(cur), "sanity");
tonyp@2472 4937
ysr@777 4938 if (non_young) {
ysr@777 4939 if (cur->is_young()) {
ysr@777 4940 double end_sec = os::elapsedTime();
ysr@777 4941 double elapsed_ms = (end_sec - start_sec) * 1000.0;
ysr@777 4942 non_young_time_ms += elapsed_ms;
ysr@777 4943
ysr@777 4944 start_sec = os::elapsedTime();
ysr@777 4945 non_young = false;
ysr@777 4946 }
ysr@777 4947 } else {
tonyp@2472 4948 double end_sec = os::elapsedTime();
tonyp@2472 4949 double elapsed_ms = (end_sec - start_sec) * 1000.0;
tonyp@2472 4950 young_time_ms += elapsed_ms;
tonyp@2472 4951
tonyp@2472 4952 start_sec = os::elapsedTime();
tonyp@2472 4953 non_young = true;
ysr@777 4954 }
ysr@777 4955
ysr@777 4956 rs_lengths += cur->rem_set()->occupied();
ysr@777 4957
ysr@777 4958 HeapRegion* next = cur->next_in_collection_set();
ysr@777 4959 assert(cur->in_collection_set(), "bad CS");
ysr@777 4960 cur->set_next_in_collection_set(NULL);
ysr@777 4961 cur->set_in_collection_set(false);
ysr@777 4962
ysr@777 4963 if (cur->is_young()) {
ysr@777 4964 int index = cur->young_index_in_cset();
ysr@777 4965 guarantee( index != -1, "invariant" );
ysr@777 4966 guarantee( (size_t)index < policy->young_cset_length(), "invariant" );
ysr@777 4967 size_t words_survived = _surviving_young_words[index];
ysr@777 4968 cur->record_surv_words_in_group(words_survived);
johnc@1829 4969
johnc@1829 4970 // At this point the we have 'popped' cur from the collection set
johnc@1829 4971 // (linked via next_in_collection_set()) but it is still in the
johnc@1829 4972 // young list (linked via next_young_region()). Clear the
johnc@1829 4973 // _next_young_region field.
johnc@1829 4974 cur->set_next_young_region(NULL);
ysr@777 4975 } else {
ysr@777 4976 int index = cur->young_index_in_cset();
ysr@777 4977 guarantee( index == -1, "invariant" );
ysr@777 4978 }
ysr@777 4979
ysr@777 4980 assert( (cur->is_young() && cur->young_index_in_cset() > -1) ||
ysr@777 4981 (!cur->is_young() && cur->young_index_in_cset() == -1),
ysr@777 4982 "invariant" );
ysr@777 4983
ysr@777 4984 if (!cur->evacuation_failed()) {
ysr@777 4985 // And the region is empty.
tonyp@2472 4986 assert(!cur->is_empty(), "Should not have empty regions in a CS.");
tonyp@2472 4987 free_region(cur, &pre_used, &local_free_list, false /* par */);
ysr@777 4988 } else {
ysr@777 4989 cur->uninstall_surv_rate_group();
ysr@777 4990 if (cur->is_young())
ysr@777 4991 cur->set_young_index_in_cset(-1);
ysr@777 4992 cur->set_not_young();
ysr@777 4993 cur->set_evacuation_failed(false);
ysr@777 4994 }
ysr@777 4995 cur = next;
ysr@777 4996 }
ysr@777 4997
ysr@777 4998 policy->record_max_rs_lengths(rs_lengths);
ysr@777 4999 policy->cset_regions_freed();
ysr@777 5000
ysr@777 5001 double end_sec = os::elapsedTime();
ysr@777 5002 double elapsed_ms = (end_sec - start_sec) * 1000.0;
ysr@777 5003 if (non_young)
ysr@777 5004 non_young_time_ms += elapsed_ms;
ysr@777 5005 else
ysr@777 5006 young_time_ms += elapsed_ms;
ysr@777 5007
tonyp@2472 5008 update_sets_after_freeing_regions(pre_used, &local_free_list,
tonyp@2472 5009 NULL /* humongous_proxy_set */,
tonyp@2472 5010 false /* par */);
ysr@777 5011 policy->record_young_free_cset_time_ms(young_time_ms);
ysr@777 5012 policy->record_non_young_free_cset_time_ms(non_young_time_ms);
ysr@777 5013 }
ysr@777 5014
johnc@1829 5015 // This routine is similar to the above but does not record
johnc@1829 5016 // any policy statistics or update free lists; we are abandoning
johnc@1829 5017 // the current incremental collection set in preparation of a
johnc@1829 5018 // full collection. After the full GC we will start to build up
johnc@1829 5019 // the incremental collection set again.
johnc@1829 5020 // This is only called when we're doing a full collection
johnc@1829 5021 // and is immediately followed by the tearing down of the young list.
johnc@1829 5022
johnc@1829 5023 void G1CollectedHeap::abandon_collection_set(HeapRegion* cs_head) {
johnc@1829 5024 HeapRegion* cur = cs_head;
johnc@1829 5025
johnc@1829 5026 while (cur != NULL) {
johnc@1829 5027 HeapRegion* next = cur->next_in_collection_set();
johnc@1829 5028 assert(cur->in_collection_set(), "bad CS");
johnc@1829 5029 cur->set_next_in_collection_set(NULL);
johnc@1829 5030 cur->set_in_collection_set(false);
johnc@1829 5031 cur->set_young_index_in_cset(-1);
johnc@1829 5032 cur = next;
johnc@1829 5033 }
johnc@1829 5034 }
johnc@1829 5035
tonyp@2472 5036 void G1CollectedHeap::set_free_regions_coming() {
tonyp@2472 5037 if (G1ConcRegionFreeingVerbose) {
tonyp@2472 5038 gclog_or_tty->print_cr("G1ConcRegionFreeing [cm thread] : "
tonyp@2472 5039 "setting free regions coming");
tonyp@2472 5040 }
tonyp@2472 5041
tonyp@2472 5042 assert(!free_regions_coming(), "pre-condition");
tonyp@2472 5043 _free_regions_coming = true;
tonyp@2472 5044 }
tonyp@2472 5045
tonyp@2472 5046 void G1CollectedHeap::reset_free_regions_coming() {
tonyp@2472 5047 {
tonyp@2472 5048 assert(free_regions_coming(), "pre-condition");
tonyp@2472 5049 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
tonyp@2472 5050 _free_regions_coming = false;
tonyp@2472 5051 SecondaryFreeList_lock->notify_all();
tonyp@2472 5052 }
tonyp@2472 5053
tonyp@2472 5054 if (G1ConcRegionFreeingVerbose) {
tonyp@2472 5055 gclog_or_tty->print_cr("G1ConcRegionFreeing [cm thread] : "
tonyp@2472 5056 "reset free regions coming");
tonyp@2472 5057 }
tonyp@2472 5058 }
tonyp@2472 5059
tonyp@2472 5060 void G1CollectedHeap::wait_while_free_regions_coming() {
tonyp@2472 5061 // Most of the time we won't have to wait, so let's do a quick test
tonyp@2472 5062 // first before we take the lock.
tonyp@2472 5063 if (!free_regions_coming()) {
tonyp@2472 5064 return;
tonyp@2472 5065 }
tonyp@2472 5066
tonyp@2472 5067 if (G1ConcRegionFreeingVerbose) {
tonyp@2472 5068 gclog_or_tty->print_cr("G1ConcRegionFreeing [other] : "
tonyp@2472 5069 "waiting for free regions");
tonyp@2472 5070 }
tonyp@2472 5071
tonyp@2472 5072 {
tonyp@2472 5073 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
tonyp@2472 5074 while (free_regions_coming()) {
tonyp@2472 5075 SecondaryFreeList_lock->wait(Mutex::_no_safepoint_check_flag);
ysr@777 5076 }
ysr@777 5077 }
tonyp@2472 5078
tonyp@2472 5079 if (G1ConcRegionFreeingVerbose) {
tonyp@2472 5080 gclog_or_tty->print_cr("G1ConcRegionFreeing [other] : "
tonyp@2472 5081 "done waiting for free regions");
tonyp@2472 5082 }
ysr@777 5083 }
ysr@777 5084
ysr@777 5085 void G1CollectedHeap::set_region_short_lived_locked(HeapRegion* hr) {
ysr@777 5086 assert(heap_lock_held_for_gc(),
ysr@777 5087 "the heap lock should already be held by or for this thread");
ysr@777 5088 _young_list->push_region(hr);
ysr@777 5089 g1_policy()->set_region_short_lived(hr);
ysr@777 5090 }
ysr@777 5091
ysr@777 5092 class NoYoungRegionsClosure: public HeapRegionClosure {
ysr@777 5093 private:
ysr@777 5094 bool _success;
ysr@777 5095 public:
ysr@777 5096 NoYoungRegionsClosure() : _success(true) { }
ysr@777 5097 bool doHeapRegion(HeapRegion* r) {
ysr@777 5098 if (r->is_young()) {
ysr@777 5099 gclog_or_tty->print_cr("Region ["PTR_FORMAT", "PTR_FORMAT") tagged as young",
ysr@777 5100 r->bottom(), r->end());
ysr@777 5101 _success = false;
ysr@777 5102 }
ysr@777 5103 return false;
ysr@777 5104 }
ysr@777 5105 bool success() { return _success; }
ysr@777 5106 };
ysr@777 5107
johnc@1829 5108 bool G1CollectedHeap::check_young_list_empty(bool check_heap, bool check_sample) {
johnc@1829 5109 bool ret = _young_list->check_list_empty(check_sample);
johnc@1829 5110
johnc@1829 5111 if (check_heap) {
ysr@777 5112 NoYoungRegionsClosure closure;
ysr@777 5113 heap_region_iterate(&closure);
ysr@777 5114 ret = ret && closure.success();
ysr@777 5115 }
ysr@777 5116
ysr@777 5117 return ret;
ysr@777 5118 }
ysr@777 5119
ysr@777 5120 void G1CollectedHeap::empty_young_list() {
ysr@777 5121 assert(heap_lock_held_for_gc(),
ysr@777 5122 "the heap lock should already be held by or for this thread");
ysr@777 5123
ysr@777 5124 _young_list->empty_list();
ysr@777 5125 }
ysr@777 5126
ysr@777 5127 // Done at the start of full GC.
ysr@777 5128 void G1CollectedHeap::tear_down_region_lists() {
tonyp@2472 5129 _free_list.remove_all();
ysr@777 5130 }
ysr@777 5131
ysr@777 5132 class RegionResetter: public HeapRegionClosure {
tonyp@2472 5133 G1CollectedHeap* _g1h;
tonyp@2472 5134 FreeRegionList _local_free_list;
tonyp@2472 5135
ysr@777 5136 public:
tonyp@2472 5137 RegionResetter() : _g1h(G1CollectedHeap::heap()),
tonyp@2472 5138 _local_free_list("Local Free List for RegionResetter") { }
tonyp@2472 5139
ysr@777 5140 bool doHeapRegion(HeapRegion* r) {
ysr@777 5141 if (r->continuesHumongous()) return false;
ysr@777 5142 if (r->top() > r->bottom()) {
ysr@777 5143 if (r->top() < r->end()) {
ysr@777 5144 Copy::fill_to_words(r->top(),
ysr@777 5145 pointer_delta(r->end(), r->top()));
ysr@777 5146 }
ysr@777 5147 } else {
ysr@777 5148 assert(r->is_empty(), "tautology");
tonyp@2472 5149 _local_free_list.add_as_tail(r);
ysr@777 5150 }
ysr@777 5151 return false;
ysr@777 5152 }
ysr@777 5153
tonyp@2472 5154 void update_free_lists() {
tonyp@2472 5155 _g1h->update_sets_after_freeing_regions(0, &_local_free_list, NULL,
tonyp@2472 5156 false /* par */);
tonyp@2472 5157 }
ysr@777 5158 };
ysr@777 5159
ysr@777 5160 // Done at the end of full GC.
ysr@777 5161 void G1CollectedHeap::rebuild_region_lists() {
ysr@777 5162 // This needs to go at the end of the full GC.
ysr@777 5163 RegionResetter rs;
ysr@777 5164 heap_region_iterate(&rs);
tonyp@2472 5165 rs.update_free_lists();
ysr@777 5166 }
ysr@777 5167
ysr@777 5168 void G1CollectedHeap::set_refine_cte_cl_concurrency(bool concurrent) {
ysr@777 5169 _refine_cte_cl->set_concurrent(concurrent);
ysr@777 5170 }
ysr@777 5171
ysr@777 5172 bool G1CollectedHeap::is_in_closed_subset(const void* p) const {
ysr@777 5173 HeapRegion* hr = heap_region_containing(p);
ysr@777 5174 if (hr == NULL) {
ysr@777 5175 return is_in_permanent(p);
ysr@777 5176 } else {
ysr@777 5177 return hr->is_in(p);
ysr@777 5178 }
ysr@777 5179 }
tonyp@2472 5180
tonyp@3028 5181 // Methods for the mutator alloc region
tonyp@3028 5182
tonyp@2715 5183 HeapRegion* G1CollectedHeap::new_mutator_alloc_region(size_t word_size,
tonyp@2715 5184 bool force) {
tonyp@2715 5185 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
tonyp@2715 5186 assert(!force || g1_policy()->can_expand_young_list(),
tonyp@2715 5187 "if force is true we should be able to expand the young list");
tonyp@2975 5188 bool young_list_full = g1_policy()->is_young_list_full();
tonyp@2975 5189 if (force || !young_list_full) {
tonyp@2715 5190 HeapRegion* new_alloc_region = new_region(word_size,
tonyp@2715 5191 false /* do_expand */);
tonyp@2715 5192 if (new_alloc_region != NULL) {
tonyp@2715 5193 g1_policy()->update_region_num(true /* next_is_young */);
tonyp@2715 5194 set_region_short_lived_locked(new_alloc_region);
tonyp@2975 5195 _hr_printer.alloc(new_alloc_region, G1HRPrinter::Eden, young_list_full);
jmasa@2821 5196 g1mm()->update_eden_counters();
tonyp@2715 5197 return new_alloc_region;
tonyp@2715 5198 }
tonyp@2715 5199 }
tonyp@2715 5200 return NULL;
tonyp@2715 5201 }
tonyp@2715 5202
tonyp@2715 5203 void G1CollectedHeap::retire_mutator_alloc_region(HeapRegion* alloc_region,
tonyp@2715 5204 size_t allocated_bytes) {
tonyp@2715 5205 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
tonyp@2715 5206 assert(alloc_region->is_young(), "all mutator alloc regions should be young");
tonyp@2715 5207
tonyp@2715 5208 g1_policy()->add_region_to_incremental_cset_lhs(alloc_region);
tonyp@2715 5209 _summary_bytes_used += allocated_bytes;
tonyp@2975 5210 _hr_printer.retire(alloc_region);
tonyp@2715 5211 }
tonyp@2715 5212
tonyp@2715 5213 HeapRegion* MutatorAllocRegion::allocate_new_region(size_t word_size,
tonyp@2715 5214 bool force) {
tonyp@2715 5215 return _g1h->new_mutator_alloc_region(word_size, force);
tonyp@2715 5216 }
tonyp@2715 5217
tonyp@2715 5218 void MutatorAllocRegion::retire_region(HeapRegion* alloc_region,
tonyp@2715 5219 size_t allocated_bytes) {
tonyp@2715 5220 _g1h->retire_mutator_alloc_region(alloc_region, allocated_bytes);
tonyp@2715 5221 }
tonyp@2715 5222
tonyp@3028 5223 // Methods for the GC alloc regions
tonyp@3028 5224
tonyp@3028 5225 HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size,
tonyp@3028 5226 size_t count,
tonyp@3028 5227 GCAllocPurpose ap) {
tonyp@3028 5228 assert(FreeList_lock->owned_by_self(), "pre-condition");
tonyp@3028 5229
tonyp@3028 5230 if (count < g1_policy()->max_regions(ap)) {
tonyp@3028 5231 HeapRegion* new_alloc_region = new_region(word_size,
tonyp@3028 5232 true /* do_expand */);
tonyp@3028 5233 if (new_alloc_region != NULL) {
tonyp@3028 5234 // We really only need to do this for old regions given that we
tonyp@3028 5235 // should never scan survivors. But it doesn't hurt to do it
tonyp@3028 5236 // for survivors too.
tonyp@3028 5237 new_alloc_region->set_saved_mark();
tonyp@3028 5238 if (ap == GCAllocForSurvived) {
tonyp@3028 5239 new_alloc_region->set_survivor();
tonyp@3028 5240 _hr_printer.alloc(new_alloc_region, G1HRPrinter::Survivor);
tonyp@3028 5241 } else {
tonyp@3028 5242 _hr_printer.alloc(new_alloc_region, G1HRPrinter::Old);
tonyp@3028 5243 }
tonyp@3028 5244 return new_alloc_region;
tonyp@3028 5245 } else {
tonyp@3028 5246 g1_policy()->note_alloc_region_limit_reached(ap);
tonyp@3028 5247 }
tonyp@3028 5248 }
tonyp@3028 5249 return NULL;
tonyp@3028 5250 }
tonyp@3028 5251
tonyp@3028 5252 void G1CollectedHeap::retire_gc_alloc_region(HeapRegion* alloc_region,
tonyp@3028 5253 size_t allocated_bytes,
tonyp@3028 5254 GCAllocPurpose ap) {
tonyp@3028 5255 alloc_region->note_end_of_copying();
tonyp@3028 5256 g1_policy()->record_bytes_copied_during_gc(allocated_bytes);
tonyp@3028 5257 if (ap == GCAllocForSurvived) {
tonyp@3028 5258 young_list()->add_survivor_region(alloc_region);
tonyp@3028 5259 }
tonyp@3028 5260 _hr_printer.retire(alloc_region);
tonyp@3028 5261 }
tonyp@3028 5262
tonyp@3028 5263 HeapRegion* SurvivorGCAllocRegion::allocate_new_region(size_t word_size,
tonyp@3028 5264 bool force) {
tonyp@3028 5265 assert(!force, "not supported for GC alloc regions");
tonyp@3028 5266 return _g1h->new_gc_alloc_region(word_size, count(), GCAllocForSurvived);
tonyp@3028 5267 }
tonyp@3028 5268
tonyp@3028 5269 void SurvivorGCAllocRegion::retire_region(HeapRegion* alloc_region,
tonyp@3028 5270 size_t allocated_bytes) {
tonyp@3028 5271 _g1h->retire_gc_alloc_region(alloc_region, allocated_bytes,
tonyp@3028 5272 GCAllocForSurvived);
tonyp@3028 5273 }
tonyp@3028 5274
tonyp@3028 5275 HeapRegion* OldGCAllocRegion::allocate_new_region(size_t word_size,
tonyp@3028 5276 bool force) {
tonyp@3028 5277 assert(!force, "not supported for GC alloc regions");
tonyp@3028 5278 return _g1h->new_gc_alloc_region(word_size, count(), GCAllocForTenured);
tonyp@3028 5279 }
tonyp@3028 5280
tonyp@3028 5281 void OldGCAllocRegion::retire_region(HeapRegion* alloc_region,
tonyp@3028 5282 size_t allocated_bytes) {
tonyp@3028 5283 _g1h->retire_gc_alloc_region(alloc_region, allocated_bytes,
tonyp@3028 5284 GCAllocForTenured);
tonyp@3028 5285 }
tonyp@2715 5286 // Heap region set verification
tonyp@2715 5287
tonyp@2472 5288 class VerifyRegionListsClosure : public HeapRegionClosure {
tonyp@2472 5289 private:
tonyp@2472 5290 HumongousRegionSet* _humongous_set;
tonyp@2472 5291 FreeRegionList* _free_list;
tonyp@2472 5292 size_t _region_count;
tonyp@2472 5293
tonyp@2472 5294 public:
tonyp@2472 5295 VerifyRegionListsClosure(HumongousRegionSet* humongous_set,
tonyp@2472 5296 FreeRegionList* free_list) :
tonyp@2472 5297 _humongous_set(humongous_set), _free_list(free_list),
tonyp@2472 5298 _region_count(0) { }
tonyp@2472 5299
tonyp@2472 5300 size_t region_count() { return _region_count; }
tonyp@2472 5301
tonyp@2472 5302 bool doHeapRegion(HeapRegion* hr) {
tonyp@2472 5303 _region_count += 1;
tonyp@2472 5304
tonyp@2472 5305 if (hr->continuesHumongous()) {
tonyp@2472 5306 return false;
tonyp@2472 5307 }
tonyp@2472 5308
tonyp@2472 5309 if (hr->is_young()) {
tonyp@2472 5310 // TODO
tonyp@2472 5311 } else if (hr->startsHumongous()) {
tonyp@2472 5312 _humongous_set->verify_next_region(hr);
tonyp@2472 5313 } else if (hr->is_empty()) {
tonyp@2472 5314 _free_list->verify_next_region(hr);
tonyp@2472 5315 }
tonyp@2472 5316 return false;
tonyp@2472 5317 }
tonyp@2472 5318 };
tonyp@2472 5319
tonyp@2963 5320 HeapRegion* G1CollectedHeap::new_heap_region(size_t hrs_index,
tonyp@2963 5321 HeapWord* bottom) {
tonyp@2963 5322 HeapWord* end = bottom + HeapRegion::GrainWords;
tonyp@2963 5323 MemRegion mr(bottom, end);
tonyp@2963 5324 assert(_g1_reserved.contains(mr), "invariant");
tonyp@2963 5325 // This might return NULL if the allocation fails
tonyp@2963 5326 return new HeapRegion(hrs_index, _bot_shared, mr, true /* is_zeroed */);
tonyp@2963 5327 }
tonyp@2963 5328
tonyp@2472 5329 void G1CollectedHeap::verify_region_sets() {
tonyp@2472 5330 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
tonyp@2472 5331
tonyp@2472 5332 // First, check the explicit lists.
tonyp@2472 5333 _free_list.verify();
tonyp@2472 5334 {
tonyp@2472 5335 // Given that a concurrent operation might be adding regions to
tonyp@2472 5336 // the secondary free list we have to take the lock before
tonyp@2472 5337 // verifying it.
tonyp@2472 5338 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
tonyp@2472 5339 _secondary_free_list.verify();
tonyp@2472 5340 }
tonyp@2472 5341 _humongous_set.verify();
tonyp@2472 5342
tonyp@2472 5343 // If a concurrent region freeing operation is in progress it will
tonyp@2472 5344 // be difficult to correctly attributed any free regions we come
tonyp@2472 5345 // across to the correct free list given that they might belong to
tonyp@2472 5346 // one of several (free_list, secondary_free_list, any local lists,
tonyp@2472 5347 // etc.). So, if that's the case we will skip the rest of the
tonyp@2472 5348 // verification operation. Alternatively, waiting for the concurrent
tonyp@2472 5349 // operation to complete will have a non-trivial effect on the GC's
tonyp@2472 5350 // operation (no concurrent operation will last longer than the
tonyp@2472 5351 // interval between two calls to verification) and it might hide
tonyp@2472 5352 // any issues that we would like to catch during testing.
tonyp@2472 5353 if (free_regions_coming()) {
tonyp@2472 5354 return;
tonyp@2472 5355 }
tonyp@2472 5356
tonyp@2643 5357 // Make sure we append the secondary_free_list on the free_list so
tonyp@2643 5358 // that all free regions we will come across can be safely
tonyp@2643 5359 // attributed to the free_list.
tonyp@2643 5360 append_secondary_free_list_if_not_empty_with_lock();
tonyp@2472 5361
tonyp@2472 5362 // Finally, make sure that the region accounting in the lists is
tonyp@2472 5363 // consistent with what we see in the heap.
tonyp@2472 5364 _humongous_set.verify_start();
tonyp@2472 5365 _free_list.verify_start();
tonyp@2472 5366
tonyp@2472 5367 VerifyRegionListsClosure cl(&_humongous_set, &_free_list);
tonyp@2472 5368 heap_region_iterate(&cl);
tonyp@2472 5369
tonyp@2472 5370 _humongous_set.verify_end();
tonyp@2472 5371 _free_list.verify_end();
ysr@777 5372 }

mercurial