Fri, 08 Apr 2011 14:19:50 -0700
Merge
ysr@777 | 1 | /* |
tonyp@2453 | 2 | * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. |
ysr@777 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
ysr@777 | 4 | * |
ysr@777 | 5 | * This code is free software; you can redistribute it and/or modify it |
ysr@777 | 6 | * under the terms of the GNU General Public License version 2 only, as |
ysr@777 | 7 | * published by the Free Software Foundation. |
ysr@777 | 8 | * |
ysr@777 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
ysr@777 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
ysr@777 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
ysr@777 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
ysr@777 | 13 | * accompanied this code). |
ysr@777 | 14 | * |
ysr@777 | 15 | * You should have received a copy of the GNU General Public License version |
ysr@777 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
ysr@777 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
ysr@777 | 18 | * |
trims@1907 | 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
trims@1907 | 20 | * or visit www.oracle.com if you need additional information or have any |
trims@1907 | 21 | * questions. |
ysr@777 | 22 | * |
ysr@777 | 23 | */ |
ysr@777 | 24 | |
stefank@2314 | 25 | #include "precompiled.hpp" |
stefank@2314 | 26 | #include "code/icBuffer.hpp" |
stefank@2314 | 27 | #include "gc_implementation/g1/bufferingOopClosure.hpp" |
stefank@2314 | 28 | #include "gc_implementation/g1/concurrentG1Refine.hpp" |
stefank@2314 | 29 | #include "gc_implementation/g1/concurrentG1RefineThread.hpp" |
stefank@2314 | 30 | #include "gc_implementation/g1/concurrentMarkThread.inline.hpp" |
tonyp@2715 | 31 | #include "gc_implementation/g1/g1AllocRegion.inline.hpp" |
stefank@2314 | 32 | #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" |
stefank@2314 | 33 | #include "gc_implementation/g1/g1CollectorPolicy.hpp" |
stefank@2314 | 34 | #include "gc_implementation/g1/g1MarkSweep.hpp" |
stefank@2314 | 35 | #include "gc_implementation/g1/g1OopClosures.inline.hpp" |
stefank@2314 | 36 | #include "gc_implementation/g1/g1RemSet.inline.hpp" |
stefank@2314 | 37 | #include "gc_implementation/g1/heapRegionRemSet.hpp" |
stefank@2314 | 38 | #include "gc_implementation/g1/heapRegionSeq.inline.hpp" |
stefank@2314 | 39 | #include "gc_implementation/g1/vm_operations_g1.hpp" |
stefank@2314 | 40 | #include "gc_implementation/shared/isGCActiveMark.hpp" |
stefank@2314 | 41 | #include "memory/gcLocker.inline.hpp" |
stefank@2314 | 42 | #include "memory/genOopClosures.inline.hpp" |
stefank@2314 | 43 | #include "memory/generationSpec.hpp" |
stefank@2314 | 44 | #include "oops/oop.inline.hpp" |
stefank@2314 | 45 | #include "oops/oop.pcgc.inline.hpp" |
stefank@2314 | 46 | #include "runtime/aprofiler.hpp" |
stefank@2314 | 47 | #include "runtime/vmThread.hpp" |
ysr@777 | 48 | |
tonyp@1377 | 49 | size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0; |
tonyp@1377 | 50 | |
ysr@777 | 51 | // turn it on so that the contents of the young list (scan-only / |
ysr@777 | 52 | // to-be-collected) are printed at "strategic" points before / during |
ysr@777 | 53 | // / after the collection --- this is useful for debugging |
johnc@1829 | 54 | #define YOUNG_LIST_VERBOSE 0 |
ysr@777 | 55 | // CURRENT STATUS |
ysr@777 | 56 | // This file is under construction. Search for "FIXME". |
ysr@777 | 57 | |
ysr@777 | 58 | // INVARIANTS/NOTES |
ysr@777 | 59 | // |
ysr@777 | 60 | // All allocation activity covered by the G1CollectedHeap interface is |
tonyp@2315 | 61 | // serialized by acquiring the HeapLock. This happens in mem_allocate |
tonyp@2315 | 62 | // and allocate_new_tlab, which are the "entry" points to the |
tonyp@2315 | 63 | // allocation code from the rest of the JVM. (Note that this does not |
tonyp@2315 | 64 | // apply to TLAB allocation, which is not part of this interface: it |
tonyp@2315 | 65 | // is done by clients of this interface.) |
ysr@777 | 66 | |
ysr@777 | 67 | // Local to this file. |
ysr@777 | 68 | |
ysr@777 | 69 | class RefineCardTableEntryClosure: public CardTableEntryClosure { |
ysr@777 | 70 | SuspendibleThreadSet* _sts; |
ysr@777 | 71 | G1RemSet* _g1rs; |
ysr@777 | 72 | ConcurrentG1Refine* _cg1r; |
ysr@777 | 73 | bool _concurrent; |
ysr@777 | 74 | public: |
ysr@777 | 75 | RefineCardTableEntryClosure(SuspendibleThreadSet* sts, |
ysr@777 | 76 | G1RemSet* g1rs, |
ysr@777 | 77 | ConcurrentG1Refine* cg1r) : |
ysr@777 | 78 | _sts(sts), _g1rs(g1rs), _cg1r(cg1r), _concurrent(true) |
ysr@777 | 79 | {} |
ysr@777 | 80 | bool do_card_ptr(jbyte* card_ptr, int worker_i) { |
johnc@2060 | 81 | bool oops_into_cset = _g1rs->concurrentRefineOneCard(card_ptr, worker_i, false); |
johnc@2060 | 82 | // This path is executed by the concurrent refine or mutator threads, |
johnc@2060 | 83 | // concurrently, and so we do not care if card_ptr contains references |
johnc@2060 | 84 | // that point into the collection set. |
johnc@2060 | 85 | assert(!oops_into_cset, "should be"); |
johnc@2060 | 86 | |
ysr@777 | 87 | if (_concurrent && _sts->should_yield()) { |
ysr@777 | 88 | // Caller will actually yield. |
ysr@777 | 89 | return false; |
ysr@777 | 90 | } |
ysr@777 | 91 | // Otherwise, we finished successfully; return true. |
ysr@777 | 92 | return true; |
ysr@777 | 93 | } |
ysr@777 | 94 | void set_concurrent(bool b) { _concurrent = b; } |
ysr@777 | 95 | }; |
ysr@777 | 96 | |
ysr@777 | 97 | |
ysr@777 | 98 | class ClearLoggedCardTableEntryClosure: public CardTableEntryClosure { |
ysr@777 | 99 | int _calls; |
ysr@777 | 100 | G1CollectedHeap* _g1h; |
ysr@777 | 101 | CardTableModRefBS* _ctbs; |
ysr@777 | 102 | int _histo[256]; |
ysr@777 | 103 | public: |
ysr@777 | 104 | ClearLoggedCardTableEntryClosure() : |
ysr@777 | 105 | _calls(0) |
ysr@777 | 106 | { |
ysr@777 | 107 | _g1h = G1CollectedHeap::heap(); |
ysr@777 | 108 | _ctbs = (CardTableModRefBS*)_g1h->barrier_set(); |
ysr@777 | 109 | for (int i = 0; i < 256; i++) _histo[i] = 0; |
ysr@777 | 110 | } |
ysr@777 | 111 | bool do_card_ptr(jbyte* card_ptr, int worker_i) { |
ysr@777 | 112 | if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) { |
ysr@777 | 113 | _calls++; |
ysr@777 | 114 | unsigned char* ujb = (unsigned char*)card_ptr; |
ysr@777 | 115 | int ind = (int)(*ujb); |
ysr@777 | 116 | _histo[ind]++; |
ysr@777 | 117 | *card_ptr = -1; |
ysr@777 | 118 | } |
ysr@777 | 119 | return true; |
ysr@777 | 120 | } |
ysr@777 | 121 | int calls() { return _calls; } |
ysr@777 | 122 | void print_histo() { |
ysr@777 | 123 | gclog_or_tty->print_cr("Card table value histogram:"); |
ysr@777 | 124 | for (int i = 0; i < 256; i++) { |
ysr@777 | 125 | if (_histo[i] != 0) { |
ysr@777 | 126 | gclog_or_tty->print_cr(" %d: %d", i, _histo[i]); |
ysr@777 | 127 | } |
ysr@777 | 128 | } |
ysr@777 | 129 | } |
ysr@777 | 130 | }; |
ysr@777 | 131 | |
ysr@777 | 132 | class RedirtyLoggedCardTableEntryClosure: public CardTableEntryClosure { |
ysr@777 | 133 | int _calls; |
ysr@777 | 134 | G1CollectedHeap* _g1h; |
ysr@777 | 135 | CardTableModRefBS* _ctbs; |
ysr@777 | 136 | public: |
ysr@777 | 137 | RedirtyLoggedCardTableEntryClosure() : |
ysr@777 | 138 | _calls(0) |
ysr@777 | 139 | { |
ysr@777 | 140 | _g1h = G1CollectedHeap::heap(); |
ysr@777 | 141 | _ctbs = (CardTableModRefBS*)_g1h->barrier_set(); |
ysr@777 | 142 | } |
ysr@777 | 143 | bool do_card_ptr(jbyte* card_ptr, int worker_i) { |
ysr@777 | 144 | if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) { |
ysr@777 | 145 | _calls++; |
ysr@777 | 146 | *card_ptr = 0; |
ysr@777 | 147 | } |
ysr@777 | 148 | return true; |
ysr@777 | 149 | } |
ysr@777 | 150 | int calls() { return _calls; } |
ysr@777 | 151 | }; |
ysr@777 | 152 | |
iveresov@1051 | 153 | class RedirtyLoggedCardTableEntryFastClosure : public CardTableEntryClosure { |
iveresov@1051 | 154 | public: |
iveresov@1051 | 155 | bool do_card_ptr(jbyte* card_ptr, int worker_i) { |
iveresov@1051 | 156 | *card_ptr = CardTableModRefBS::dirty_card_val(); |
iveresov@1051 | 157 | return true; |
iveresov@1051 | 158 | } |
iveresov@1051 | 159 | }; |
iveresov@1051 | 160 | |
ysr@777 | 161 | YoungList::YoungList(G1CollectedHeap* g1h) |
ysr@777 | 162 | : _g1h(g1h), _head(NULL), |
johnc@1829 | 163 | _length(0), |
ysr@777 | 164 | _last_sampled_rs_lengths(0), |
apetrusenko@980 | 165 | _survivor_head(NULL), _survivor_tail(NULL), _survivor_length(0) |
ysr@777 | 166 | { |
ysr@777 | 167 | guarantee( check_list_empty(false), "just making sure..." ); |
ysr@777 | 168 | } |
ysr@777 | 169 | |
ysr@777 | 170 | void YoungList::push_region(HeapRegion *hr) { |
ysr@777 | 171 | assert(!hr->is_young(), "should not already be young"); |
ysr@777 | 172 | assert(hr->get_next_young_region() == NULL, "cause it should!"); |
ysr@777 | 173 | |
ysr@777 | 174 | hr->set_next_young_region(_head); |
ysr@777 | 175 | _head = hr; |
ysr@777 | 176 | |
ysr@777 | 177 | hr->set_young(); |
ysr@777 | 178 | double yg_surv_rate = _g1h->g1_policy()->predict_yg_surv_rate((int)_length); |
ysr@777 | 179 | ++_length; |
ysr@777 | 180 | } |
ysr@777 | 181 | |
ysr@777 | 182 | void YoungList::add_survivor_region(HeapRegion* hr) { |
apetrusenko@980 | 183 | assert(hr->is_survivor(), "should be flagged as survivor region"); |
ysr@777 | 184 | assert(hr->get_next_young_region() == NULL, "cause it should!"); |
ysr@777 | 185 | |
ysr@777 | 186 | hr->set_next_young_region(_survivor_head); |
ysr@777 | 187 | if (_survivor_head == NULL) { |
apetrusenko@980 | 188 | _survivor_tail = hr; |
ysr@777 | 189 | } |
ysr@777 | 190 | _survivor_head = hr; |
ysr@777 | 191 | |
ysr@777 | 192 | ++_survivor_length; |
ysr@777 | 193 | } |
ysr@777 | 194 | |
ysr@777 | 195 | void YoungList::empty_list(HeapRegion* list) { |
ysr@777 | 196 | while (list != NULL) { |
ysr@777 | 197 | HeapRegion* next = list->get_next_young_region(); |
ysr@777 | 198 | list->set_next_young_region(NULL); |
ysr@777 | 199 | list->uninstall_surv_rate_group(); |
ysr@777 | 200 | list->set_not_young(); |
ysr@777 | 201 | list = next; |
ysr@777 | 202 | } |
ysr@777 | 203 | } |
ysr@777 | 204 | |
ysr@777 | 205 | void YoungList::empty_list() { |
ysr@777 | 206 | assert(check_list_well_formed(), "young list should be well formed"); |
ysr@777 | 207 | |
ysr@777 | 208 | empty_list(_head); |
ysr@777 | 209 | _head = NULL; |
ysr@777 | 210 | _length = 0; |
ysr@777 | 211 | |
ysr@777 | 212 | empty_list(_survivor_head); |
ysr@777 | 213 | _survivor_head = NULL; |
apetrusenko@980 | 214 | _survivor_tail = NULL; |
ysr@777 | 215 | _survivor_length = 0; |
ysr@777 | 216 | |
ysr@777 | 217 | _last_sampled_rs_lengths = 0; |
ysr@777 | 218 | |
ysr@777 | 219 | assert(check_list_empty(false), "just making sure..."); |
ysr@777 | 220 | } |
ysr@777 | 221 | |
ysr@777 | 222 | bool YoungList::check_list_well_formed() { |
ysr@777 | 223 | bool ret = true; |
ysr@777 | 224 | |
ysr@777 | 225 | size_t length = 0; |
ysr@777 | 226 | HeapRegion* curr = _head; |
ysr@777 | 227 | HeapRegion* last = NULL; |
ysr@777 | 228 | while (curr != NULL) { |
johnc@1829 | 229 | if (!curr->is_young()) { |
ysr@777 | 230 | gclog_or_tty->print_cr("### YOUNG REGION "PTR_FORMAT"-"PTR_FORMAT" " |
johnc@1829 | 231 | "incorrectly tagged (y: %d, surv: %d)", |
ysr@777 | 232 | curr->bottom(), curr->end(), |
johnc@1829 | 233 | curr->is_young(), curr->is_survivor()); |
ysr@777 | 234 | ret = false; |
ysr@777 | 235 | } |
ysr@777 | 236 | ++length; |
ysr@777 | 237 | last = curr; |
ysr@777 | 238 | curr = curr->get_next_young_region(); |
ysr@777 | 239 | } |
ysr@777 | 240 | ret = ret && (length == _length); |
ysr@777 | 241 | |
ysr@777 | 242 | if (!ret) { |
ysr@777 | 243 | gclog_or_tty->print_cr("### YOUNG LIST seems not well formed!"); |
ysr@777 | 244 | gclog_or_tty->print_cr("### list has %d entries, _length is %d", |
ysr@777 | 245 | length, _length); |
ysr@777 | 246 | } |
ysr@777 | 247 | |
johnc@1829 | 248 | return ret; |
ysr@777 | 249 | } |
ysr@777 | 250 | |
johnc@1829 | 251 | bool YoungList::check_list_empty(bool check_sample) { |
ysr@777 | 252 | bool ret = true; |
ysr@777 | 253 | |
ysr@777 | 254 | if (_length != 0) { |
ysr@777 | 255 | gclog_or_tty->print_cr("### YOUNG LIST should have 0 length, not %d", |
ysr@777 | 256 | _length); |
ysr@777 | 257 | ret = false; |
ysr@777 | 258 | } |
ysr@777 | 259 | if (check_sample && _last_sampled_rs_lengths != 0) { |
ysr@777 | 260 | gclog_or_tty->print_cr("### YOUNG LIST has non-zero last sampled RS lengths"); |
ysr@777 | 261 | ret = false; |
ysr@777 | 262 | } |
ysr@777 | 263 | if (_head != NULL) { |
ysr@777 | 264 | gclog_or_tty->print_cr("### YOUNG LIST does not have a NULL head"); |
ysr@777 | 265 | ret = false; |
ysr@777 | 266 | } |
ysr@777 | 267 | if (!ret) { |
ysr@777 | 268 | gclog_or_tty->print_cr("### YOUNG LIST does not seem empty"); |
ysr@777 | 269 | } |
ysr@777 | 270 | |
johnc@1829 | 271 | return ret; |
ysr@777 | 272 | } |
ysr@777 | 273 | |
ysr@777 | 274 | void |
ysr@777 | 275 | YoungList::rs_length_sampling_init() { |
ysr@777 | 276 | _sampled_rs_lengths = 0; |
ysr@777 | 277 | _curr = _head; |
ysr@777 | 278 | } |
ysr@777 | 279 | |
ysr@777 | 280 | bool |
ysr@777 | 281 | YoungList::rs_length_sampling_more() { |
ysr@777 | 282 | return _curr != NULL; |
ysr@777 | 283 | } |
ysr@777 | 284 | |
ysr@777 | 285 | void |
ysr@777 | 286 | YoungList::rs_length_sampling_next() { |
ysr@777 | 287 | assert( _curr != NULL, "invariant" ); |
johnc@1829 | 288 | size_t rs_length = _curr->rem_set()->occupied(); |
johnc@1829 | 289 | |
johnc@1829 | 290 | _sampled_rs_lengths += rs_length; |
johnc@1829 | 291 | |
johnc@1829 | 292 | // The current region may not yet have been added to the |
johnc@1829 | 293 | // incremental collection set (it gets added when it is |
johnc@1829 | 294 | // retired as the current allocation region). |
johnc@1829 | 295 | if (_curr->in_collection_set()) { |
johnc@1829 | 296 | // Update the collection set policy information for this region |
johnc@1829 | 297 | _g1h->g1_policy()->update_incremental_cset_info(_curr, rs_length); |
johnc@1829 | 298 | } |
johnc@1829 | 299 | |
ysr@777 | 300 | _curr = _curr->get_next_young_region(); |
ysr@777 | 301 | if (_curr == NULL) { |
ysr@777 | 302 | _last_sampled_rs_lengths = _sampled_rs_lengths; |
ysr@777 | 303 | // gclog_or_tty->print_cr("last sampled RS lengths = %d", _last_sampled_rs_lengths); |
ysr@777 | 304 | } |
ysr@777 | 305 | } |
ysr@777 | 306 | |
ysr@777 | 307 | void |
ysr@777 | 308 | YoungList::reset_auxilary_lists() { |
ysr@777 | 309 | guarantee( is_empty(), "young list should be empty" ); |
ysr@777 | 310 | assert(check_list_well_formed(), "young list should be well formed"); |
ysr@777 | 311 | |
ysr@777 | 312 | // Add survivor regions to SurvRateGroup. |
ysr@777 | 313 | _g1h->g1_policy()->note_start_adding_survivor_regions(); |
apetrusenko@980 | 314 | _g1h->g1_policy()->finished_recalculating_age_indexes(true /* is_survivors */); |
johnc@1829 | 315 | |
ysr@777 | 316 | for (HeapRegion* curr = _survivor_head; |
ysr@777 | 317 | curr != NULL; |
ysr@777 | 318 | curr = curr->get_next_young_region()) { |
ysr@777 | 319 | _g1h->g1_policy()->set_region_survivors(curr); |
johnc@1829 | 320 | |
johnc@1829 | 321 | // The region is a non-empty survivor so let's add it to |
johnc@1829 | 322 | // the incremental collection set for the next evacuation |
johnc@1829 | 323 | // pause. |
johnc@1829 | 324 | _g1h->g1_policy()->add_region_to_incremental_cset_rhs(curr); |
ysr@777 | 325 | } |
ysr@777 | 326 | _g1h->g1_policy()->note_stop_adding_survivor_regions(); |
ysr@777 | 327 | |
johnc@1829 | 328 | _head = _survivor_head; |
johnc@1829 | 329 | _length = _survivor_length; |
ysr@777 | 330 | if (_survivor_head != NULL) { |
johnc@1829 | 331 | assert(_survivor_tail != NULL, "cause it shouldn't be"); |
johnc@1829 | 332 | assert(_survivor_length > 0, "invariant"); |
johnc@1829 | 333 | _survivor_tail->set_next_young_region(NULL); |
johnc@1829 | 334 | } |
johnc@1829 | 335 | |
johnc@1829 | 336 | // Don't clear the survivor list handles until the start of |
johnc@1829 | 337 | // the next evacuation pause - we need it in order to re-tag |
johnc@1829 | 338 | // the survivor regions from this evacuation pause as 'young' |
johnc@1829 | 339 | // at the start of the next. |
johnc@1829 | 340 | |
apetrusenko@980 | 341 | _g1h->g1_policy()->finished_recalculating_age_indexes(false /* is_survivors */); |
ysr@777 | 342 | |
ysr@777 | 343 | assert(check_list_well_formed(), "young list should be well formed"); |
ysr@777 | 344 | } |
ysr@777 | 345 | |
ysr@777 | 346 | void YoungList::print() { |
johnc@1829 | 347 | HeapRegion* lists[] = {_head, _survivor_head}; |
johnc@1829 | 348 | const char* names[] = {"YOUNG", "SURVIVOR"}; |
ysr@777 | 349 | |
ysr@777 | 350 | for (unsigned int list = 0; list < ARRAY_SIZE(lists); ++list) { |
ysr@777 | 351 | gclog_or_tty->print_cr("%s LIST CONTENTS", names[list]); |
ysr@777 | 352 | HeapRegion *curr = lists[list]; |
ysr@777 | 353 | if (curr == NULL) |
ysr@777 | 354 | gclog_or_tty->print_cr(" empty"); |
ysr@777 | 355 | while (curr != NULL) { |
ysr@777 | 356 | gclog_or_tty->print_cr(" [%08x-%08x], t: %08x, P: %08x, N: %08x, C: %08x, " |
johnc@1829 | 357 | "age: %4d, y: %d, surv: %d", |
ysr@777 | 358 | curr->bottom(), curr->end(), |
ysr@777 | 359 | curr->top(), |
ysr@777 | 360 | curr->prev_top_at_mark_start(), |
ysr@777 | 361 | curr->next_top_at_mark_start(), |
ysr@777 | 362 | curr->top_at_conc_mark_count(), |
ysr@777 | 363 | curr->age_in_surv_rate_group_cond(), |
ysr@777 | 364 | curr->is_young(), |
ysr@777 | 365 | curr->is_survivor()); |
ysr@777 | 366 | curr = curr->get_next_young_region(); |
ysr@777 | 367 | } |
ysr@777 | 368 | } |
ysr@777 | 369 | |
ysr@777 | 370 | gclog_or_tty->print_cr(""); |
ysr@777 | 371 | } |
ysr@777 | 372 | |
apetrusenko@1231 | 373 | void G1CollectedHeap::push_dirty_cards_region(HeapRegion* hr) |
apetrusenko@1231 | 374 | { |
apetrusenko@1231 | 375 | // Claim the right to put the region on the dirty cards region list |
apetrusenko@1231 | 376 | // by installing a self pointer. |
apetrusenko@1231 | 377 | HeapRegion* next = hr->get_next_dirty_cards_region(); |
apetrusenko@1231 | 378 | if (next == NULL) { |
apetrusenko@1231 | 379 | HeapRegion* res = (HeapRegion*) |
apetrusenko@1231 | 380 | Atomic::cmpxchg_ptr(hr, hr->next_dirty_cards_region_addr(), |
apetrusenko@1231 | 381 | NULL); |
apetrusenko@1231 | 382 | if (res == NULL) { |
apetrusenko@1231 | 383 | HeapRegion* head; |
apetrusenko@1231 | 384 | do { |
apetrusenko@1231 | 385 | // Put the region to the dirty cards region list. |
apetrusenko@1231 | 386 | head = _dirty_cards_region_list; |
apetrusenko@1231 | 387 | next = (HeapRegion*) |
apetrusenko@1231 | 388 | Atomic::cmpxchg_ptr(hr, &_dirty_cards_region_list, head); |
apetrusenko@1231 | 389 | if (next == head) { |
apetrusenko@1231 | 390 | assert(hr->get_next_dirty_cards_region() == hr, |
apetrusenko@1231 | 391 | "hr->get_next_dirty_cards_region() != hr"); |
apetrusenko@1231 | 392 | if (next == NULL) { |
apetrusenko@1231 | 393 | // The last region in the list points to itself. |
apetrusenko@1231 | 394 | hr->set_next_dirty_cards_region(hr); |
apetrusenko@1231 | 395 | } else { |
apetrusenko@1231 | 396 | hr->set_next_dirty_cards_region(next); |
apetrusenko@1231 | 397 | } |
apetrusenko@1231 | 398 | } |
apetrusenko@1231 | 399 | } while (next != head); |
apetrusenko@1231 | 400 | } |
apetrusenko@1231 | 401 | } |
apetrusenko@1231 | 402 | } |
apetrusenko@1231 | 403 | |
apetrusenko@1231 | 404 | HeapRegion* G1CollectedHeap::pop_dirty_cards_region() |
apetrusenko@1231 | 405 | { |
apetrusenko@1231 | 406 | HeapRegion* head; |
apetrusenko@1231 | 407 | HeapRegion* hr; |
apetrusenko@1231 | 408 | do { |
apetrusenko@1231 | 409 | head = _dirty_cards_region_list; |
apetrusenko@1231 | 410 | if (head == NULL) { |
apetrusenko@1231 | 411 | return NULL; |
apetrusenko@1231 | 412 | } |
apetrusenko@1231 | 413 | HeapRegion* new_head = head->get_next_dirty_cards_region(); |
apetrusenko@1231 | 414 | if (head == new_head) { |
apetrusenko@1231 | 415 | // The last region. |
apetrusenko@1231 | 416 | new_head = NULL; |
apetrusenko@1231 | 417 | } |
apetrusenko@1231 | 418 | hr = (HeapRegion*)Atomic::cmpxchg_ptr(new_head, &_dirty_cards_region_list, |
apetrusenko@1231 | 419 | head); |
apetrusenko@1231 | 420 | } while (hr != head); |
apetrusenko@1231 | 421 | assert(hr != NULL, "invariant"); |
apetrusenko@1231 | 422 | hr->set_next_dirty_cards_region(NULL); |
apetrusenko@1231 | 423 | return hr; |
apetrusenko@1231 | 424 | } |
apetrusenko@1231 | 425 | |
ysr@777 | 426 | void G1CollectedHeap::stop_conc_gc_threads() { |
iveresov@1229 | 427 | _cg1r->stop(); |
ysr@777 | 428 | _cmThread->stop(); |
ysr@777 | 429 | } |
ysr@777 | 430 | |
ysr@777 | 431 | void G1CollectedHeap::check_ct_logs_at_safepoint() { |
ysr@777 | 432 | DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); |
ysr@777 | 433 | CardTableModRefBS* ct_bs = (CardTableModRefBS*)barrier_set(); |
ysr@777 | 434 | |
ysr@777 | 435 | // Count the dirty cards at the start. |
ysr@777 | 436 | CountNonCleanMemRegionClosure count1(this); |
ysr@777 | 437 | ct_bs->mod_card_iterate(&count1); |
ysr@777 | 438 | int orig_count = count1.n(); |
ysr@777 | 439 | |
ysr@777 | 440 | // First clear the logged cards. |
ysr@777 | 441 | ClearLoggedCardTableEntryClosure clear; |
ysr@777 | 442 | dcqs.set_closure(&clear); |
ysr@777 | 443 | dcqs.apply_closure_to_all_completed_buffers(); |
ysr@777 | 444 | dcqs.iterate_closure_all_threads(false); |
ysr@777 | 445 | clear.print_histo(); |
ysr@777 | 446 | |
ysr@777 | 447 | // Now ensure that there's no dirty cards. |
ysr@777 | 448 | CountNonCleanMemRegionClosure count2(this); |
ysr@777 | 449 | ct_bs->mod_card_iterate(&count2); |
ysr@777 | 450 | if (count2.n() != 0) { |
ysr@777 | 451 | gclog_or_tty->print_cr("Card table has %d entries; %d originally", |
ysr@777 | 452 | count2.n(), orig_count); |
ysr@777 | 453 | } |
ysr@777 | 454 | guarantee(count2.n() == 0, "Card table should be clean."); |
ysr@777 | 455 | |
ysr@777 | 456 | RedirtyLoggedCardTableEntryClosure redirty; |
ysr@777 | 457 | JavaThread::dirty_card_queue_set().set_closure(&redirty); |
ysr@777 | 458 | dcqs.apply_closure_to_all_completed_buffers(); |
ysr@777 | 459 | dcqs.iterate_closure_all_threads(false); |
ysr@777 | 460 | gclog_or_tty->print_cr("Log entries = %d, dirty cards = %d.", |
ysr@777 | 461 | clear.calls(), orig_count); |
ysr@777 | 462 | guarantee(redirty.calls() == clear.calls(), |
ysr@777 | 463 | "Or else mechanism is broken."); |
ysr@777 | 464 | |
ysr@777 | 465 | CountNonCleanMemRegionClosure count3(this); |
ysr@777 | 466 | ct_bs->mod_card_iterate(&count3); |
ysr@777 | 467 | if (count3.n() != orig_count) { |
ysr@777 | 468 | gclog_or_tty->print_cr("Should have restored them all: orig = %d, final = %d.", |
ysr@777 | 469 | orig_count, count3.n()); |
ysr@777 | 470 | guarantee(count3.n() >= orig_count, "Should have restored them all."); |
ysr@777 | 471 | } |
ysr@777 | 472 | |
ysr@777 | 473 | JavaThread::dirty_card_queue_set().set_closure(_refine_cte_cl); |
ysr@777 | 474 | } |
ysr@777 | 475 | |
ysr@777 | 476 | // Private class members. |
ysr@777 | 477 | |
ysr@777 | 478 | G1CollectedHeap* G1CollectedHeap::_g1h; |
ysr@777 | 479 | |
ysr@777 | 480 | // Private methods. |
ysr@777 | 481 | |
tonyp@2472 | 482 | HeapRegion* |
tonyp@2643 | 483 | G1CollectedHeap::new_region_try_secondary_free_list() { |
tonyp@2472 | 484 | MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag); |
tonyp@2472 | 485 | while (!_secondary_free_list.is_empty() || free_regions_coming()) { |
tonyp@2472 | 486 | if (!_secondary_free_list.is_empty()) { |
tonyp@2472 | 487 | if (G1ConcRegionFreeingVerbose) { |
tonyp@2472 | 488 | gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : " |
tonyp@2472 | 489 | "secondary_free_list has "SIZE_FORMAT" entries", |
tonyp@2472 | 490 | _secondary_free_list.length()); |
tonyp@2472 | 491 | } |
tonyp@2472 | 492 | // It looks as if there are free regions available on the |
tonyp@2472 | 493 | // secondary_free_list. Let's move them to the free_list and try |
tonyp@2472 | 494 | // again to allocate from it. |
tonyp@2472 | 495 | append_secondary_free_list(); |
tonyp@2472 | 496 | |
tonyp@2472 | 497 | assert(!_free_list.is_empty(), "if the secondary_free_list was not " |
tonyp@2472 | 498 | "empty we should have moved at least one entry to the free_list"); |
tonyp@2472 | 499 | HeapRegion* res = _free_list.remove_head(); |
tonyp@2472 | 500 | if (G1ConcRegionFreeingVerbose) { |
tonyp@2472 | 501 | gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : " |
tonyp@2472 | 502 | "allocated "HR_FORMAT" from secondary_free_list", |
tonyp@2472 | 503 | HR_FORMAT_PARAMS(res)); |
tonyp@2472 | 504 | } |
tonyp@2472 | 505 | return res; |
tonyp@2472 | 506 | } |
tonyp@2472 | 507 | |
tonyp@2472 | 508 | // Wait here until we get notifed either when (a) there are no |
tonyp@2472 | 509 | // more free regions coming or (b) some regions have been moved on |
tonyp@2472 | 510 | // the secondary_free_list. |
tonyp@2472 | 511 | SecondaryFreeList_lock->wait(Mutex::_no_safepoint_check_flag); |
tonyp@2472 | 512 | } |
tonyp@2472 | 513 | |
tonyp@2472 | 514 | if (G1ConcRegionFreeingVerbose) { |
tonyp@2472 | 515 | gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : " |
tonyp@2472 | 516 | "could not allocate from secondary_free_list"); |
tonyp@2472 | 517 | } |
tonyp@2472 | 518 | return NULL; |
tonyp@2472 | 519 | } |
tonyp@2472 | 520 | |
tonyp@2715 | 521 | HeapRegion* G1CollectedHeap::new_region(size_t word_size, bool do_expand) { |
tonyp@2472 | 522 | assert(!isHumongous(word_size) || |
tonyp@2472 | 523 | word_size <= (size_t) HeapRegion::GrainWords, |
tonyp@2472 | 524 | "the only time we use this to allocate a humongous region is " |
tonyp@2472 | 525 | "when we are allocating a single humongous region"); |
tonyp@2472 | 526 | |
tonyp@2472 | 527 | HeapRegion* res; |
tonyp@2472 | 528 | if (G1StressConcRegionFreeing) { |
tonyp@2472 | 529 | if (!_secondary_free_list.is_empty()) { |
tonyp@2472 | 530 | if (G1ConcRegionFreeingVerbose) { |
tonyp@2472 | 531 | gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : " |
tonyp@2472 | 532 | "forced to look at the secondary_free_list"); |
tonyp@2472 | 533 | } |
tonyp@2643 | 534 | res = new_region_try_secondary_free_list(); |
tonyp@2472 | 535 | if (res != NULL) { |
tonyp@2472 | 536 | return res; |
tonyp@2472 | 537 | } |
tonyp@2472 | 538 | } |
tonyp@2472 | 539 | } |
tonyp@2472 | 540 | res = _free_list.remove_head_or_null(); |
tonyp@2472 | 541 | if (res == NULL) { |
tonyp@2472 | 542 | if (G1ConcRegionFreeingVerbose) { |
tonyp@2472 | 543 | gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : " |
tonyp@2472 | 544 | "res == NULL, trying the secondary_free_list"); |
tonyp@2472 | 545 | } |
tonyp@2643 | 546 | res = new_region_try_secondary_free_list(); |
tonyp@2472 | 547 | } |
ysr@777 | 548 | if (res == NULL && do_expand) { |
johnc@2504 | 549 | if (expand(word_size * HeapWordSize)) { |
johnc@2504 | 550 | // The expansion succeeded and so we should have at least one |
johnc@2504 | 551 | // region on the free list. |
johnc@2504 | 552 | res = _free_list.remove_head(); |
johnc@2504 | 553 | } |
ysr@777 | 554 | } |
apetrusenko@1900 | 555 | if (res != NULL) { |
apetrusenko@1900 | 556 | if (G1PrintHeapRegions) { |
tonyp@2472 | 557 | gclog_or_tty->print_cr("new alloc region %d:["PTR_FORMAT","PTR_FORMAT"], " |
tonyp@2472 | 558 | "top "PTR_FORMAT, res->hrs_index(), |
tonyp@2472 | 559 | res->bottom(), res->end(), res->top()); |
ysr@777 | 560 | } |
ysr@777 | 561 | } |
ysr@777 | 562 | return res; |
ysr@777 | 563 | } |
ysr@777 | 564 | |
tonyp@2472 | 565 | HeapRegion* G1CollectedHeap::new_gc_alloc_region(int purpose, |
tonyp@2472 | 566 | size_t word_size) { |
ysr@777 | 567 | HeapRegion* alloc_region = NULL; |
ysr@777 | 568 | if (_gc_alloc_region_counts[purpose] < g1_policy()->max_regions(purpose)) { |
tonyp@2715 | 569 | alloc_region = new_region(word_size, true /* do_expand */); |
ysr@777 | 570 | if (purpose == GCAllocForSurvived && alloc_region != NULL) { |
apetrusenko@980 | 571 | alloc_region->set_survivor(); |
ysr@777 | 572 | } |
ysr@777 | 573 | ++_gc_alloc_region_counts[purpose]; |
ysr@777 | 574 | } else { |
ysr@777 | 575 | g1_policy()->note_alloc_region_limit_reached(purpose); |
ysr@777 | 576 | } |
ysr@777 | 577 | return alloc_region; |
ysr@777 | 578 | } |
ysr@777 | 579 | |
tonyp@2472 | 580 | int G1CollectedHeap::humongous_obj_allocate_find_first(size_t num_regions, |
tonyp@2472 | 581 | size_t word_size) { |
tonyp@2643 | 582 | assert(isHumongous(word_size), "word_size should be humongous"); |
tonyp@2643 | 583 | assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition"); |
tonyp@2643 | 584 | |
tonyp@2472 | 585 | int first = -1; |
tonyp@2472 | 586 | if (num_regions == 1) { |
tonyp@2472 | 587 | // Only one region to allocate, no need to go through the slower |
tonyp@2472 | 588 | // path. The caller will attempt the expasion if this fails, so |
tonyp@2472 | 589 | // let's not try to expand here too. |
tonyp@2715 | 590 | HeapRegion* hr = new_region(word_size, false /* do_expand */); |
tonyp@2472 | 591 | if (hr != NULL) { |
tonyp@2472 | 592 | first = hr->hrs_index(); |
tonyp@2472 | 593 | } else { |
tonyp@2472 | 594 | first = -1; |
tonyp@2472 | 595 | } |
tonyp@2472 | 596 | } else { |
tonyp@2472 | 597 | // We can't allocate humongous regions while cleanupComplete() is |
tonyp@2472 | 598 | // running, since some of the regions we find to be empty might not |
tonyp@2472 | 599 | // yet be added to the free list and it is not straightforward to |
tonyp@2472 | 600 | // know which list they are on so that we can remove them. Note |
tonyp@2472 | 601 | // that we only need to do this if we need to allocate more than |
tonyp@2472 | 602 | // one region to satisfy the current humongous allocation |
tonyp@2472 | 603 | // request. If we are only allocating one region we use the common |
tonyp@2472 | 604 | // region allocation code (see above). |
tonyp@2472 | 605 | wait_while_free_regions_coming(); |
tonyp@2643 | 606 | append_secondary_free_list_if_not_empty_with_lock(); |
tonyp@2472 | 607 | |
tonyp@2472 | 608 | if (free_regions() >= num_regions) { |
tonyp@2472 | 609 | first = _hrs->find_contiguous(num_regions); |
tonyp@2472 | 610 | if (first != -1) { |
tonyp@2472 | 611 | for (int i = first; i < first + (int) num_regions; ++i) { |
tonyp@2472 | 612 | HeapRegion* hr = _hrs->at(i); |
tonyp@2472 | 613 | assert(hr->is_empty(), "sanity"); |
tonyp@2643 | 614 | assert(is_on_master_free_list(hr), "sanity"); |
tonyp@2472 | 615 | hr->set_pending_removal(true); |
tonyp@2472 | 616 | } |
tonyp@2472 | 617 | _free_list.remove_all_pending(num_regions); |
tonyp@2472 | 618 | } |
tonyp@2472 | 619 | } |
tonyp@2472 | 620 | } |
tonyp@2472 | 621 | return first; |
tonyp@2472 | 622 | } |
tonyp@2472 | 623 | |
tonyp@2643 | 624 | HeapWord* |
tonyp@2643 | 625 | G1CollectedHeap::humongous_obj_allocate_initialize_regions(int first, |
tonyp@2643 | 626 | size_t num_regions, |
tonyp@2643 | 627 | size_t word_size) { |
tonyp@2643 | 628 | assert(first != -1, "pre-condition"); |
tonyp@2643 | 629 | assert(isHumongous(word_size), "word_size should be humongous"); |
tonyp@2643 | 630 | assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition"); |
tonyp@2643 | 631 | |
tonyp@2643 | 632 | // Index of last region in the series + 1. |
tonyp@2643 | 633 | int last = first + (int) num_regions; |
tonyp@2643 | 634 | |
tonyp@2643 | 635 | // We need to initialize the region(s) we just discovered. This is |
tonyp@2643 | 636 | // a bit tricky given that it can happen concurrently with |
tonyp@2643 | 637 | // refinement threads refining cards on these regions and |
tonyp@2643 | 638 | // potentially wanting to refine the BOT as they are scanning |
tonyp@2643 | 639 | // those cards (this can happen shortly after a cleanup; see CR |
tonyp@2643 | 640 | // 6991377). So we have to set up the region(s) carefully and in |
tonyp@2643 | 641 | // a specific order. |
tonyp@2643 | 642 | |
tonyp@2643 | 643 | // The word size sum of all the regions we will allocate. |
tonyp@2643 | 644 | size_t word_size_sum = num_regions * HeapRegion::GrainWords; |
tonyp@2643 | 645 | assert(word_size <= word_size_sum, "sanity"); |
tonyp@2643 | 646 | |
tonyp@2643 | 647 | // This will be the "starts humongous" region. |
tonyp@2643 | 648 | HeapRegion* first_hr = _hrs->at(first); |
tonyp@2643 | 649 | // The header of the new object will be placed at the bottom of |
tonyp@2643 | 650 | // the first region. |
tonyp@2643 | 651 | HeapWord* new_obj = first_hr->bottom(); |
tonyp@2643 | 652 | // This will be the new end of the first region in the series that |
tonyp@2643 | 653 | // should also match the end of the last region in the seriers. |
tonyp@2643 | 654 | HeapWord* new_end = new_obj + word_size_sum; |
tonyp@2643 | 655 | // This will be the new top of the first region that will reflect |
tonyp@2643 | 656 | // this allocation. |
tonyp@2643 | 657 | HeapWord* new_top = new_obj + word_size; |
tonyp@2643 | 658 | |
tonyp@2643 | 659 | // First, we need to zero the header of the space that we will be |
tonyp@2643 | 660 | // allocating. When we update top further down, some refinement |
tonyp@2643 | 661 | // threads might try to scan the region. By zeroing the header we |
tonyp@2643 | 662 | // ensure that any thread that will try to scan the region will |
tonyp@2643 | 663 | // come across the zero klass word and bail out. |
tonyp@2643 | 664 | // |
tonyp@2643 | 665 | // NOTE: It would not have been correct to have used |
tonyp@2643 | 666 | // CollectedHeap::fill_with_object() and make the space look like |
tonyp@2643 | 667 | // an int array. The thread that is doing the allocation will |
tonyp@2643 | 668 | // later update the object header to a potentially different array |
tonyp@2643 | 669 | // type and, for a very short period of time, the klass and length |
tonyp@2643 | 670 | // fields will be inconsistent. This could cause a refinement |
tonyp@2643 | 671 | // thread to calculate the object size incorrectly. |
tonyp@2643 | 672 | Copy::fill_to_words(new_obj, oopDesc::header_size(), 0); |
tonyp@2643 | 673 | |
tonyp@2643 | 674 | // We will set up the first region as "starts humongous". This |
tonyp@2643 | 675 | // will also update the BOT covering all the regions to reflect |
tonyp@2643 | 676 | // that there is a single object that starts at the bottom of the |
tonyp@2643 | 677 | // first region. |
tonyp@2643 | 678 | first_hr->set_startsHumongous(new_top, new_end); |
tonyp@2643 | 679 | |
tonyp@2643 | 680 | // Then, if there are any, we will set up the "continues |
tonyp@2643 | 681 | // humongous" regions. |
tonyp@2643 | 682 | HeapRegion* hr = NULL; |
tonyp@2643 | 683 | for (int i = first + 1; i < last; ++i) { |
tonyp@2643 | 684 | hr = _hrs->at(i); |
tonyp@2643 | 685 | hr->set_continuesHumongous(first_hr); |
tonyp@2643 | 686 | } |
tonyp@2643 | 687 | // If we have "continues humongous" regions (hr != NULL), then the |
tonyp@2643 | 688 | // end of the last one should match new_end. |
tonyp@2643 | 689 | assert(hr == NULL || hr->end() == new_end, "sanity"); |
tonyp@2643 | 690 | |
tonyp@2643 | 691 | // Up to this point no concurrent thread would have been able to |
tonyp@2643 | 692 | // do any scanning on any region in this series. All the top |
tonyp@2643 | 693 | // fields still point to bottom, so the intersection between |
tonyp@2643 | 694 | // [bottom,top] and [card_start,card_end] will be empty. Before we |
tonyp@2643 | 695 | // update the top fields, we'll do a storestore to make sure that |
tonyp@2643 | 696 | // no thread sees the update to top before the zeroing of the |
tonyp@2643 | 697 | // object header and the BOT initialization. |
tonyp@2643 | 698 | OrderAccess::storestore(); |
tonyp@2643 | 699 | |
tonyp@2643 | 700 | // Now that the BOT and the object header have been initialized, |
tonyp@2643 | 701 | // we can update top of the "starts humongous" region. |
tonyp@2643 | 702 | assert(first_hr->bottom() < new_top && new_top <= first_hr->end(), |
tonyp@2643 | 703 | "new_top should be in this region"); |
tonyp@2643 | 704 | first_hr->set_top(new_top); |
tonyp@2643 | 705 | |
tonyp@2643 | 706 | // Now, we will update the top fields of the "continues humongous" |
tonyp@2643 | 707 | // regions. The reason we need to do this is that, otherwise, |
tonyp@2643 | 708 | // these regions would look empty and this will confuse parts of |
tonyp@2643 | 709 | // G1. For example, the code that looks for a consecutive number |
tonyp@2643 | 710 | // of empty regions will consider them empty and try to |
tonyp@2643 | 711 | // re-allocate them. We can extend is_empty() to also include |
tonyp@2643 | 712 | // !continuesHumongous(), but it is easier to just update the top |
tonyp@2643 | 713 | // fields here. The way we set top for all regions (i.e., top == |
tonyp@2643 | 714 | // end for all regions but the last one, top == new_top for the |
tonyp@2643 | 715 | // last one) is actually used when we will free up the humongous |
tonyp@2643 | 716 | // region in free_humongous_region(). |
tonyp@2643 | 717 | hr = NULL; |
tonyp@2643 | 718 | for (int i = first + 1; i < last; ++i) { |
tonyp@2643 | 719 | hr = _hrs->at(i); |
tonyp@2643 | 720 | if ((i + 1) == last) { |
tonyp@2643 | 721 | // last continues humongous region |
tonyp@2643 | 722 | assert(hr->bottom() < new_top && new_top <= hr->end(), |
tonyp@2643 | 723 | "new_top should fall on this region"); |
tonyp@2643 | 724 | hr->set_top(new_top); |
tonyp@2643 | 725 | } else { |
tonyp@2643 | 726 | // not last one |
tonyp@2643 | 727 | assert(new_top > hr->end(), "new_top should be above this region"); |
tonyp@2643 | 728 | hr->set_top(hr->end()); |
tonyp@2643 | 729 | } |
tonyp@2643 | 730 | } |
tonyp@2643 | 731 | // If we have continues humongous regions (hr != NULL), then the |
tonyp@2643 | 732 | // end of the last one should match new_end and its top should |
tonyp@2643 | 733 | // match new_top. |
tonyp@2643 | 734 | assert(hr == NULL || |
tonyp@2643 | 735 | (hr->end() == new_end && hr->top() == new_top), "sanity"); |
tonyp@2643 | 736 | |
tonyp@2643 | 737 | assert(first_hr->used() == word_size * HeapWordSize, "invariant"); |
tonyp@2643 | 738 | _summary_bytes_used += first_hr->used(); |
tonyp@2643 | 739 | _humongous_set.add(first_hr); |
tonyp@2643 | 740 | |
tonyp@2643 | 741 | return new_obj; |
tonyp@2643 | 742 | } |
tonyp@2643 | 743 | |
ysr@777 | 744 | // If could fit into free regions w/o expansion, try. |
ysr@777 | 745 | // Otherwise, if can expand, do so. |
ysr@777 | 746 | // Otherwise, if using ex regions might help, try with ex given back. |
tonyp@2315 | 747 | HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size) { |
tonyp@2472 | 748 | assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */); |
tonyp@2472 | 749 | |
tonyp@2472 | 750 | verify_region_sets_optional(); |
ysr@777 | 751 | |
ysr@777 | 752 | size_t num_regions = |
tonyp@2315 | 753 | round_to(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords; |
ysr@777 | 754 | size_t x_size = expansion_regions(); |
tonyp@2472 | 755 | size_t fs = _hrs->free_suffix(); |
tonyp@2472 | 756 | int first = humongous_obj_allocate_find_first(num_regions, word_size); |
tonyp@2472 | 757 | if (first == -1) { |
tonyp@2472 | 758 | // The only thing we can do now is attempt expansion. |
ysr@777 | 759 | if (fs + x_size >= num_regions) { |
johnc@2504 | 760 | // If the number of regions we're trying to allocate for this |
johnc@2504 | 761 | // object is at most the number of regions in the free suffix, |
johnc@2504 | 762 | // then the call to humongous_obj_allocate_find_first() above |
johnc@2504 | 763 | // should have succeeded and we wouldn't be here. |
johnc@2504 | 764 | // |
johnc@2504 | 765 | // We should only be trying to expand when the free suffix is |
johnc@2504 | 766 | // not sufficient for the object _and_ we have some expansion |
johnc@2504 | 767 | // room available. |
johnc@2504 | 768 | assert(num_regions > fs, "earlier allocation should have succeeded"); |
johnc@2504 | 769 | |
johnc@2504 | 770 | if (expand((num_regions - fs) * HeapRegion::GrainBytes)) { |
johnc@2504 | 771 | first = humongous_obj_allocate_find_first(num_regions, word_size); |
johnc@2504 | 772 | // If the expansion was successful then the allocation |
johnc@2504 | 773 | // should have been successful. |
johnc@2504 | 774 | assert(first != -1, "this should have worked"); |
johnc@2504 | 775 | } |
tonyp@2472 | 776 | } |
tonyp@2472 | 777 | } |
tonyp@2472 | 778 | |
tonyp@2643 | 779 | HeapWord* result = NULL; |
tonyp@2472 | 780 | if (first != -1) { |
tonyp@2643 | 781 | result = |
tonyp@2643 | 782 | humongous_obj_allocate_initialize_regions(first, num_regions, word_size); |
tonyp@2643 | 783 | assert(result != NULL, "it should always return a valid result"); |
tonyp@2472 | 784 | } |
tonyp@2472 | 785 | |
tonyp@2472 | 786 | verify_region_sets_optional(); |
tonyp@2643 | 787 | |
tonyp@2643 | 788 | return result; |
ysr@777 | 789 | } |
ysr@777 | 790 | |
tonyp@2315 | 791 | HeapWord* G1CollectedHeap::allocate_new_tlab(size_t word_size) { |
tonyp@2315 | 792 | assert_heap_not_locked_and_not_at_safepoint(); |
tonyp@2715 | 793 | assert(!isHumongous(word_size), "we do not allow humongous TLABs"); |
tonyp@2715 | 794 | |
tonyp@2715 | 795 | unsigned int dummy_gc_count_before; |
tonyp@2715 | 796 | return attempt_allocation(word_size, &dummy_gc_count_before); |
ysr@777 | 797 | } |
ysr@777 | 798 | |
ysr@777 | 799 | HeapWord* |
ysr@777 | 800 | G1CollectedHeap::mem_allocate(size_t word_size, |
ysr@777 | 801 | bool is_noref, |
ysr@777 | 802 | bool is_tlab, |
tonyp@2315 | 803 | bool* gc_overhead_limit_was_exceeded) { |
tonyp@2315 | 804 | assert_heap_not_locked_and_not_at_safepoint(); |
tonyp@2315 | 805 | assert(!is_tlab, "mem_allocate() this should not be called directly " |
tonyp@2315 | 806 | "to allocate TLABs"); |
ysr@777 | 807 | |
tonyp@2715 | 808 | // Loop until the allocation is satisified, or unsatisfied after GC. |
tonyp@2315 | 809 | for (int try_count = 1; /* we'll return */; try_count += 1) { |
tonyp@2315 | 810 | unsigned int gc_count_before; |
tonyp@2715 | 811 | |
tonyp@2715 | 812 | HeapWord* result = NULL; |
tonyp@2715 | 813 | if (!isHumongous(word_size)) { |
tonyp@2715 | 814 | result = attempt_allocation(word_size, &gc_count_before); |
tonyp@2715 | 815 | } else { |
tonyp@2715 | 816 | result = attempt_allocation_humongous(word_size, &gc_count_before); |
tonyp@2715 | 817 | } |
tonyp@2715 | 818 | if (result != NULL) { |
tonyp@2715 | 819 | return result; |
ysr@777 | 820 | } |
ysr@777 | 821 | |
ysr@777 | 822 | // Create the garbage collection operation... |
tonyp@2315 | 823 | VM_G1CollectForAllocation op(gc_count_before, word_size); |
ysr@777 | 824 | // ...and get the VM thread to execute it. |
ysr@777 | 825 | VMThread::execute(&op); |
tonyp@2315 | 826 | |
tonyp@2315 | 827 | if (op.prologue_succeeded() && op.pause_succeeded()) { |
tonyp@2315 | 828 | // If the operation was successful we'll return the result even |
tonyp@2315 | 829 | // if it is NULL. If the allocation attempt failed immediately |
tonyp@2315 | 830 | // after a Full GC, it's unlikely we'll be able to allocate now. |
tonyp@2315 | 831 | HeapWord* result = op.result(); |
tonyp@2315 | 832 | if (result != NULL && !isHumongous(word_size)) { |
tonyp@2315 | 833 | // Allocations that take place on VM operations do not do any |
tonyp@2315 | 834 | // card dirtying and we have to do it here. We only have to do |
tonyp@2315 | 835 | // this for non-humongous allocations, though. |
tonyp@2315 | 836 | dirty_young_block(result, word_size); |
tonyp@2315 | 837 | } |
ysr@777 | 838 | return result; |
tonyp@2315 | 839 | } else { |
tonyp@2315 | 840 | assert(op.result() == NULL, |
tonyp@2315 | 841 | "the result should be NULL if the VM op did not succeed"); |
ysr@777 | 842 | } |
ysr@777 | 843 | |
ysr@777 | 844 | // Give a warning if we seem to be looping forever. |
ysr@777 | 845 | if ((QueuedAllocationWarningCount > 0) && |
ysr@777 | 846 | (try_count % QueuedAllocationWarningCount == 0)) { |
tonyp@2315 | 847 | warning("G1CollectedHeap::mem_allocate retries %d times", try_count); |
ysr@777 | 848 | } |
ysr@777 | 849 | } |
tonyp@2315 | 850 | |
tonyp@2315 | 851 | ShouldNotReachHere(); |
tonyp@2715 | 852 | return NULL; |
ysr@777 | 853 | } |
ysr@777 | 854 | |
tonyp@2715 | 855 | HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size, |
tonyp@2715 | 856 | unsigned int *gc_count_before_ret) { |
tonyp@2715 | 857 | // Make sure you read the note in attempt_allocation_humongous(). |
tonyp@2715 | 858 | |
tonyp@2715 | 859 | assert_heap_not_locked_and_not_at_safepoint(); |
tonyp@2715 | 860 | assert(!isHumongous(word_size), "attempt_allocation_slow() should not " |
tonyp@2715 | 861 | "be called for humongous allocation requests"); |
tonyp@2715 | 862 | |
tonyp@2715 | 863 | // We should only get here after the first-level allocation attempt |
tonyp@2715 | 864 | // (attempt_allocation()) failed to allocate. |
tonyp@2715 | 865 | |
tonyp@2715 | 866 | // We will loop until a) we manage to successfully perform the |
tonyp@2715 | 867 | // allocation or b) we successfully schedule a collection which |
tonyp@2715 | 868 | // fails to perform the allocation. b) is the only case when we'll |
tonyp@2715 | 869 | // return NULL. |
tonyp@2715 | 870 | HeapWord* result = NULL; |
tonyp@2715 | 871 | for (int try_count = 1; /* we'll return */; try_count += 1) { |
tonyp@2715 | 872 | bool should_try_gc; |
tonyp@2715 | 873 | unsigned int gc_count_before; |
tonyp@2715 | 874 | |
tonyp@2715 | 875 | { |
tonyp@2715 | 876 | MutexLockerEx x(Heap_lock); |
tonyp@2715 | 877 | |
tonyp@2715 | 878 | result = _mutator_alloc_region.attempt_allocation_locked(word_size, |
tonyp@2715 | 879 | false /* bot_updates */); |
tonyp@2715 | 880 | if (result != NULL) { |
tonyp@2715 | 881 | return result; |
tonyp@2715 | 882 | } |
tonyp@2715 | 883 | |
tonyp@2715 | 884 | // If we reach here, attempt_allocation_locked() above failed to |
tonyp@2715 | 885 | // allocate a new region. So the mutator alloc region should be NULL. |
tonyp@2715 | 886 | assert(_mutator_alloc_region.get() == NULL, "only way to get here"); |
tonyp@2715 | 887 | |
tonyp@2715 | 888 | if (GC_locker::is_active_and_needs_gc()) { |
tonyp@2715 | 889 | if (g1_policy()->can_expand_young_list()) { |
tonyp@2715 | 890 | result = _mutator_alloc_region.attempt_allocation_force(word_size, |
tonyp@2715 | 891 | false /* bot_updates */); |
tonyp@2715 | 892 | if (result != NULL) { |
tonyp@2715 | 893 | return result; |
tonyp@2715 | 894 | } |
tonyp@2715 | 895 | } |
tonyp@2715 | 896 | should_try_gc = false; |
tonyp@2715 | 897 | } else { |
tonyp@2715 | 898 | // Read the GC count while still holding the Heap_lock. |
tonyp@2715 | 899 | gc_count_before = SharedHeap::heap()->total_collections(); |
tonyp@2715 | 900 | should_try_gc = true; |
tonyp@2715 | 901 | } |
tonyp@2715 | 902 | } |
tonyp@2715 | 903 | |
tonyp@2715 | 904 | if (should_try_gc) { |
tonyp@2715 | 905 | bool succeeded; |
tonyp@2715 | 906 | result = do_collection_pause(word_size, gc_count_before, &succeeded); |
tonyp@2715 | 907 | if (result != NULL) { |
tonyp@2715 | 908 | assert(succeeded, "only way to get back a non-NULL result"); |
tonyp@2715 | 909 | return result; |
tonyp@2715 | 910 | } |
tonyp@2715 | 911 | |
tonyp@2715 | 912 | if (succeeded) { |
tonyp@2715 | 913 | // If we get here we successfully scheduled a collection which |
tonyp@2715 | 914 | // failed to allocate. No point in trying to allocate |
tonyp@2715 | 915 | // further. We'll just return NULL. |
tonyp@2715 | 916 | MutexLockerEx x(Heap_lock); |
tonyp@2715 | 917 | *gc_count_before_ret = SharedHeap::heap()->total_collections(); |
tonyp@2715 | 918 | return NULL; |
tonyp@2715 | 919 | } |
tonyp@2715 | 920 | } else { |
tonyp@2715 | 921 | GC_locker::stall_until_clear(); |
tonyp@2715 | 922 | } |
tonyp@2715 | 923 | |
tonyp@2715 | 924 | // We can reach here if we were unsuccessul in scheduling a |
tonyp@2715 | 925 | // collection (because another thread beat us to it) or if we were |
tonyp@2715 | 926 | // stalled due to the GC locker. In either can we should retry the |
tonyp@2715 | 927 | // allocation attempt in case another thread successfully |
tonyp@2715 | 928 | // performed a collection and reclaimed enough space. We do the |
tonyp@2715 | 929 | // first attempt (without holding the Heap_lock) here and the |
tonyp@2715 | 930 | // follow-on attempt will be at the start of the next loop |
tonyp@2715 | 931 | // iteration (after taking the Heap_lock). |
tonyp@2715 | 932 | result = _mutator_alloc_region.attempt_allocation(word_size, |
tonyp@2715 | 933 | false /* bot_updates */); |
tonyp@2715 | 934 | if (result != NULL ){ |
tonyp@2715 | 935 | return result; |
tonyp@2715 | 936 | } |
tonyp@2715 | 937 | |
tonyp@2715 | 938 | // Give a warning if we seem to be looping forever. |
tonyp@2715 | 939 | if ((QueuedAllocationWarningCount > 0) && |
tonyp@2715 | 940 | (try_count % QueuedAllocationWarningCount == 0)) { |
tonyp@2715 | 941 | warning("G1CollectedHeap::attempt_allocation_slow() " |
tonyp@2715 | 942 | "retries %d times", try_count); |
tonyp@2715 | 943 | } |
tonyp@2715 | 944 | } |
tonyp@2715 | 945 | |
tonyp@2715 | 946 | ShouldNotReachHere(); |
tonyp@2715 | 947 | return NULL; |
tonyp@2715 | 948 | } |
tonyp@2715 | 949 | |
tonyp@2715 | 950 | HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size, |
tonyp@2715 | 951 | unsigned int * gc_count_before_ret) { |
tonyp@2715 | 952 | // The structure of this method has a lot of similarities to |
tonyp@2715 | 953 | // attempt_allocation_slow(). The reason these two were not merged |
tonyp@2715 | 954 | // into a single one is that such a method would require several "if |
tonyp@2715 | 955 | // allocation is not humongous do this, otherwise do that" |
tonyp@2715 | 956 | // conditional paths which would obscure its flow. In fact, an early |
tonyp@2715 | 957 | // version of this code did use a unified method which was harder to |
tonyp@2715 | 958 | // follow and, as a result, it had subtle bugs that were hard to |
tonyp@2715 | 959 | // track down. So keeping these two methods separate allows each to |
tonyp@2715 | 960 | // be more readable. It will be good to keep these two in sync as |
tonyp@2715 | 961 | // much as possible. |
tonyp@2715 | 962 | |
tonyp@2715 | 963 | assert_heap_not_locked_and_not_at_safepoint(); |
tonyp@2715 | 964 | assert(isHumongous(word_size), "attempt_allocation_humongous() " |
tonyp@2715 | 965 | "should only be called for humongous allocations"); |
tonyp@2715 | 966 | |
tonyp@2715 | 967 | // We will loop until a) we manage to successfully perform the |
tonyp@2715 | 968 | // allocation or b) we successfully schedule a collection which |
tonyp@2715 | 969 | // fails to perform the allocation. b) is the only case when we'll |
tonyp@2715 | 970 | // return NULL. |
tonyp@2715 | 971 | HeapWord* result = NULL; |
tonyp@2715 | 972 | for (int try_count = 1; /* we'll return */; try_count += 1) { |
tonyp@2715 | 973 | bool should_try_gc; |
tonyp@2715 | 974 | unsigned int gc_count_before; |
tonyp@2715 | 975 | |
tonyp@2715 | 976 | { |
tonyp@2715 | 977 | MutexLockerEx x(Heap_lock); |
tonyp@2715 | 978 | |
tonyp@2715 | 979 | // Given that humongous objects are not allocated in young |
tonyp@2715 | 980 | // regions, we'll first try to do the allocation without doing a |
tonyp@2715 | 981 | // collection hoping that there's enough space in the heap. |
tonyp@2715 | 982 | result = humongous_obj_allocate(word_size); |
tonyp@2715 | 983 | if (result != NULL) { |
tonyp@2715 | 984 | return result; |
tonyp@2715 | 985 | } |
tonyp@2715 | 986 | |
tonyp@2715 | 987 | if (GC_locker::is_active_and_needs_gc()) { |
tonyp@2715 | 988 | should_try_gc = false; |
tonyp@2715 | 989 | } else { |
tonyp@2715 | 990 | // Read the GC count while still holding the Heap_lock. |
tonyp@2715 | 991 | gc_count_before = SharedHeap::heap()->total_collections(); |
tonyp@2715 | 992 | should_try_gc = true; |
tonyp@2715 | 993 | } |
tonyp@2715 | 994 | } |
tonyp@2715 | 995 | |
tonyp@2715 | 996 | if (should_try_gc) { |
tonyp@2715 | 997 | // If we failed to allocate the humongous object, we should try to |
tonyp@2715 | 998 | // do a collection pause (if we're allowed) in case it reclaims |
tonyp@2715 | 999 | // enough space for the allocation to succeed after the pause. |
tonyp@2715 | 1000 | |
tonyp@2715 | 1001 | bool succeeded; |
tonyp@2715 | 1002 | result = do_collection_pause(word_size, gc_count_before, &succeeded); |
tonyp@2715 | 1003 | if (result != NULL) { |
tonyp@2715 | 1004 | assert(succeeded, "only way to get back a non-NULL result"); |
tonyp@2715 | 1005 | return result; |
tonyp@2715 | 1006 | } |
tonyp@2715 | 1007 | |
tonyp@2715 | 1008 | if (succeeded) { |
tonyp@2715 | 1009 | // If we get here we successfully scheduled a collection which |
tonyp@2715 | 1010 | // failed to allocate. No point in trying to allocate |
tonyp@2715 | 1011 | // further. We'll just return NULL. |
tonyp@2715 | 1012 | MutexLockerEx x(Heap_lock); |
tonyp@2715 | 1013 | *gc_count_before_ret = SharedHeap::heap()->total_collections(); |
tonyp@2715 | 1014 | return NULL; |
tonyp@2715 | 1015 | } |
tonyp@2715 | 1016 | } else { |
tonyp@2715 | 1017 | GC_locker::stall_until_clear(); |
tonyp@2715 | 1018 | } |
tonyp@2715 | 1019 | |
tonyp@2715 | 1020 | // We can reach here if we were unsuccessul in scheduling a |
tonyp@2715 | 1021 | // collection (because another thread beat us to it) or if we were |
tonyp@2715 | 1022 | // stalled due to the GC locker. In either can we should retry the |
tonyp@2715 | 1023 | // allocation attempt in case another thread successfully |
tonyp@2715 | 1024 | // performed a collection and reclaimed enough space. Give a |
tonyp@2715 | 1025 | // warning if we seem to be looping forever. |
tonyp@2715 | 1026 | |
tonyp@2715 | 1027 | if ((QueuedAllocationWarningCount > 0) && |
tonyp@2715 | 1028 | (try_count % QueuedAllocationWarningCount == 0)) { |
tonyp@2715 | 1029 | warning("G1CollectedHeap::attempt_allocation_humongous() " |
tonyp@2715 | 1030 | "retries %d times", try_count); |
tonyp@2715 | 1031 | } |
tonyp@2715 | 1032 | } |
tonyp@2715 | 1033 | |
tonyp@2715 | 1034 | ShouldNotReachHere(); |
tonyp@2715 | 1035 | return NULL; |
tonyp@2715 | 1036 | } |
tonyp@2715 | 1037 | |
tonyp@2715 | 1038 | HeapWord* G1CollectedHeap::attempt_allocation_at_safepoint(size_t word_size, |
tonyp@2715 | 1039 | bool expect_null_mutator_alloc_region) { |
tonyp@2472 | 1040 | assert_at_safepoint(true /* should_be_vm_thread */); |
tonyp@2715 | 1041 | assert(_mutator_alloc_region.get() == NULL || |
tonyp@2715 | 1042 | !expect_null_mutator_alloc_region, |
tonyp@2715 | 1043 | "the current alloc region was unexpectedly found to be non-NULL"); |
tonyp@2715 | 1044 | |
tonyp@2715 | 1045 | if (!isHumongous(word_size)) { |
tonyp@2715 | 1046 | return _mutator_alloc_region.attempt_allocation_locked(word_size, |
tonyp@2715 | 1047 | false /* bot_updates */); |
tonyp@2715 | 1048 | } else { |
tonyp@2715 | 1049 | return humongous_obj_allocate(word_size); |
tonyp@2715 | 1050 | } |
tonyp@2715 | 1051 | |
tonyp@2715 | 1052 | ShouldNotReachHere(); |
ysr@777 | 1053 | } |
ysr@777 | 1054 | |
tonyp@1071 | 1055 | void G1CollectedHeap::abandon_gc_alloc_regions() { |
tonyp@1071 | 1056 | // first, make sure that the GC alloc region list is empty (it should!) |
tonyp@1071 | 1057 | assert(_gc_alloc_region_list == NULL, "invariant"); |
tonyp@1071 | 1058 | release_gc_alloc_regions(true /* totally */); |
tonyp@1071 | 1059 | } |
tonyp@1071 | 1060 | |
ysr@777 | 1061 | class PostMCRemSetClearClosure: public HeapRegionClosure { |
ysr@777 | 1062 | ModRefBarrierSet* _mr_bs; |
ysr@777 | 1063 | public: |
ysr@777 | 1064 | PostMCRemSetClearClosure(ModRefBarrierSet* mr_bs) : _mr_bs(mr_bs) {} |
ysr@777 | 1065 | bool doHeapRegion(HeapRegion* r) { |
ysr@777 | 1066 | r->reset_gc_time_stamp(); |
ysr@777 | 1067 | if (r->continuesHumongous()) |
ysr@777 | 1068 | return false; |
ysr@777 | 1069 | HeapRegionRemSet* hrrs = r->rem_set(); |
ysr@777 | 1070 | if (hrrs != NULL) hrrs->clear(); |
ysr@777 | 1071 | // You might think here that we could clear just the cards |
ysr@777 | 1072 | // corresponding to the used region. But no: if we leave a dirty card |
ysr@777 | 1073 | // in a region we might allocate into, then it would prevent that card |
ysr@777 | 1074 | // from being enqueued, and cause it to be missed. |
ysr@777 | 1075 | // Re: the performance cost: we shouldn't be doing full GC anyway! |
ysr@777 | 1076 | _mr_bs->clear(MemRegion(r->bottom(), r->end())); |
ysr@777 | 1077 | return false; |
ysr@777 | 1078 | } |
ysr@777 | 1079 | }; |
ysr@777 | 1080 | |
ysr@777 | 1081 | |
ysr@777 | 1082 | class PostMCRemSetInvalidateClosure: public HeapRegionClosure { |
ysr@777 | 1083 | ModRefBarrierSet* _mr_bs; |
ysr@777 | 1084 | public: |
ysr@777 | 1085 | PostMCRemSetInvalidateClosure(ModRefBarrierSet* mr_bs) : _mr_bs(mr_bs) {} |
ysr@777 | 1086 | bool doHeapRegion(HeapRegion* r) { |
ysr@777 | 1087 | if (r->continuesHumongous()) return false; |
ysr@777 | 1088 | if (r->used_region().word_size() != 0) { |
ysr@777 | 1089 | _mr_bs->invalidate(r->used_region(), true /*whole heap*/); |
ysr@777 | 1090 | } |
ysr@777 | 1091 | return false; |
ysr@777 | 1092 | } |
ysr@777 | 1093 | }; |
ysr@777 | 1094 | |
apetrusenko@1061 | 1095 | class RebuildRSOutOfRegionClosure: public HeapRegionClosure { |
apetrusenko@1061 | 1096 | G1CollectedHeap* _g1h; |
apetrusenko@1061 | 1097 | UpdateRSOopClosure _cl; |
apetrusenko@1061 | 1098 | int _worker_i; |
apetrusenko@1061 | 1099 | public: |
apetrusenko@1061 | 1100 | RebuildRSOutOfRegionClosure(G1CollectedHeap* g1, int worker_i = 0) : |
johnc@2216 | 1101 | _cl(g1->g1_rem_set(), worker_i), |
apetrusenko@1061 | 1102 | _worker_i(worker_i), |
apetrusenko@1061 | 1103 | _g1h(g1) |
apetrusenko@1061 | 1104 | { } |
johnc@2302 | 1105 | |
apetrusenko@1061 | 1106 | bool doHeapRegion(HeapRegion* r) { |
apetrusenko@1061 | 1107 | if (!r->continuesHumongous()) { |
apetrusenko@1061 | 1108 | _cl.set_from(r); |
apetrusenko@1061 | 1109 | r->oop_iterate(&_cl); |
apetrusenko@1061 | 1110 | } |
apetrusenko@1061 | 1111 | return false; |
apetrusenko@1061 | 1112 | } |
apetrusenko@1061 | 1113 | }; |
apetrusenko@1061 | 1114 | |
apetrusenko@1061 | 1115 | class ParRebuildRSTask: public AbstractGangTask { |
apetrusenko@1061 | 1116 | G1CollectedHeap* _g1; |
apetrusenko@1061 | 1117 | public: |
apetrusenko@1061 | 1118 | ParRebuildRSTask(G1CollectedHeap* g1) |
apetrusenko@1061 | 1119 | : AbstractGangTask("ParRebuildRSTask"), |
apetrusenko@1061 | 1120 | _g1(g1) |
apetrusenko@1061 | 1121 | { } |
apetrusenko@1061 | 1122 | |
apetrusenko@1061 | 1123 | void work(int i) { |
apetrusenko@1061 | 1124 | RebuildRSOutOfRegionClosure rebuild_rs(_g1, i); |
apetrusenko@1061 | 1125 | _g1->heap_region_par_iterate_chunked(&rebuild_rs, i, |
apetrusenko@1061 | 1126 | HeapRegion::RebuildRSClaimValue); |
apetrusenko@1061 | 1127 | } |
apetrusenko@1061 | 1128 | }; |
apetrusenko@1061 | 1129 | |
tonyp@2315 | 1130 | bool G1CollectedHeap::do_collection(bool explicit_gc, |
tonyp@2011 | 1131 | bool clear_all_soft_refs, |
ysr@777 | 1132 | size_t word_size) { |
tonyp@2472 | 1133 | assert_at_safepoint(true /* should_be_vm_thread */); |
tonyp@2472 | 1134 | |
tonyp@1794 | 1135 | if (GC_locker::check_active_before_gc()) { |
tonyp@2315 | 1136 | return false; |
tonyp@1794 | 1137 | } |
tonyp@1794 | 1138 | |
kamg@2445 | 1139 | SvcGCMarker sgcm(SvcGCMarker::FULL); |
ysr@777 | 1140 | ResourceMark rm; |
ysr@777 | 1141 | |
tonyp@1273 | 1142 | if (PrintHeapAtGC) { |
tonyp@1273 | 1143 | Universe::print_heap_before_gc(); |
tonyp@1273 | 1144 | } |
tonyp@1273 | 1145 | |
tonyp@2472 | 1146 | verify_region_sets_optional(); |
ysr@777 | 1147 | |
jmasa@1822 | 1148 | const bool do_clear_all_soft_refs = clear_all_soft_refs || |
jmasa@1822 | 1149 | collector_policy()->should_clear_all_soft_refs(); |
jmasa@1822 | 1150 | |
jmasa@1822 | 1151 | ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy()); |
jmasa@1822 | 1152 | |
ysr@777 | 1153 | { |
ysr@777 | 1154 | IsGCActiveMark x; |
ysr@777 | 1155 | |
ysr@777 | 1156 | // Timing |
tonyp@2011 | 1157 | bool system_gc = (gc_cause() == GCCause::_java_lang_system_gc); |
tonyp@2011 | 1158 | assert(!system_gc || explicit_gc, "invariant"); |
ysr@777 | 1159 | gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); |
ysr@777 | 1160 | TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); |
tonyp@2011 | 1161 | TraceTime t(system_gc ? "Full GC (System.gc())" : "Full GC", |
jmasa@1822 | 1162 | PrintGC, true, gclog_or_tty); |
ysr@777 | 1163 | |
tonyp@1524 | 1164 | TraceMemoryManagerStats tms(true /* fullGC */); |
tonyp@1524 | 1165 | |
ysr@777 | 1166 | double start = os::elapsedTime(); |
ysr@777 | 1167 | g1_policy()->record_full_collection_start(); |
ysr@777 | 1168 | |
tonyp@2472 | 1169 | wait_while_free_regions_coming(); |
tonyp@2643 | 1170 | append_secondary_free_list_if_not_empty_with_lock(); |
tonyp@2472 | 1171 | |
ysr@777 | 1172 | gc_prologue(true); |
tonyp@1273 | 1173 | increment_total_collections(true /* full gc */); |
ysr@777 | 1174 | |
ysr@777 | 1175 | size_t g1h_prev_used = used(); |
ysr@777 | 1176 | assert(used() == recalculate_used(), "Should be equal"); |
ysr@777 | 1177 | |
ysr@777 | 1178 | if (VerifyBeforeGC && total_collections() >= VerifyGCStartAt) { |
ysr@777 | 1179 | HandleMark hm; // Discard invalid handles created during verification |
tonyp@2715 | 1180 | gclog_or_tty->print(" VerifyBeforeGC:"); |
ysr@777 | 1181 | prepare_for_verify(); |
ysr@777 | 1182 | Universe::verify(true); |
ysr@777 | 1183 | } |
ysr@777 | 1184 | |
ysr@777 | 1185 | COMPILER2_PRESENT(DerivedPointerTable::clear()); |
ysr@777 | 1186 | |
ysr@777 | 1187 | // We want to discover references, but not process them yet. |
ysr@777 | 1188 | // This mode is disabled in |
ysr@777 | 1189 | // instanceRefKlass::process_discovered_references if the |
ysr@777 | 1190 | // generation does some collection work, or |
ysr@777 | 1191 | // instanceRefKlass::enqueue_discovered_references if the |
ysr@777 | 1192 | // generation returns without doing any work. |
ysr@777 | 1193 | ref_processor()->disable_discovery(); |
ysr@777 | 1194 | ref_processor()->abandon_partial_discovery(); |
ysr@777 | 1195 | ref_processor()->verify_no_references_recorded(); |
ysr@777 | 1196 | |
ysr@777 | 1197 | // Abandon current iterations of concurrent marking and concurrent |
ysr@777 | 1198 | // refinement, if any are in progress. |
ysr@777 | 1199 | concurrent_mark()->abort(); |
ysr@777 | 1200 | |
ysr@777 | 1201 | // Make sure we'll choose a new allocation region afterwards. |
tonyp@2715 | 1202 | release_mutator_alloc_region(); |
tonyp@1071 | 1203 | abandon_gc_alloc_regions(); |
johnc@2216 | 1204 | g1_rem_set()->cleanupHRRS(); |
ysr@777 | 1205 | tear_down_region_lists(); |
johnc@1829 | 1206 | |
johnc@1829 | 1207 | // We may have added regions to the current incremental collection |
johnc@1829 | 1208 | // set between the last GC or pause and now. We need to clear the |
johnc@1829 | 1209 | // incremental collection set and then start rebuilding it afresh |
johnc@1829 | 1210 | // after this full GC. |
johnc@1829 | 1211 | abandon_collection_set(g1_policy()->inc_cset_head()); |
johnc@1829 | 1212 | g1_policy()->clear_incremental_cset(); |
johnc@1829 | 1213 | g1_policy()->stop_incremental_cset_building(); |
johnc@1829 | 1214 | |
ysr@777 | 1215 | if (g1_policy()->in_young_gc_mode()) { |
ysr@777 | 1216 | empty_young_list(); |
ysr@777 | 1217 | g1_policy()->set_full_young_gcs(true); |
ysr@777 | 1218 | } |
ysr@777 | 1219 | |
johnc@2316 | 1220 | // See the comment in G1CollectedHeap::ref_processing_init() about |
johnc@2316 | 1221 | // how reference processing currently works in G1. |
johnc@2316 | 1222 | |
ysr@777 | 1223 | // Temporarily make reference _discovery_ single threaded (non-MT). |
ysr@2651 | 1224 | ReferenceProcessorMTDiscoveryMutator rp_disc_ser(ref_processor(), false); |
ysr@777 | 1225 | |
ysr@777 | 1226 | // Temporarily make refs discovery atomic |
ysr@777 | 1227 | ReferenceProcessorAtomicMutator rp_disc_atomic(ref_processor(), true); |
ysr@777 | 1228 | |
ysr@777 | 1229 | // Temporarily clear _is_alive_non_header |
ysr@777 | 1230 | ReferenceProcessorIsAliveMutator rp_is_alive_null(ref_processor(), NULL); |
ysr@777 | 1231 | |
ysr@777 | 1232 | ref_processor()->enable_discovery(); |
jmasa@1822 | 1233 | ref_processor()->setup_policy(do_clear_all_soft_refs); |
ysr@777 | 1234 | |
ysr@777 | 1235 | // Do collection work |
ysr@777 | 1236 | { |
ysr@777 | 1237 | HandleMark hm; // Discard invalid handles created during gc |
jmasa@1822 | 1238 | G1MarkSweep::invoke_at_safepoint(ref_processor(), do_clear_all_soft_refs); |
ysr@777 | 1239 | } |
tonyp@2472 | 1240 | assert(free_regions() == 0, "we should not have added any free regions"); |
ysr@777 | 1241 | rebuild_region_lists(); |
ysr@777 | 1242 | |
ysr@777 | 1243 | _summary_bytes_used = recalculate_used(); |
ysr@777 | 1244 | |
ysr@777 | 1245 | ref_processor()->enqueue_discovered_references(); |
ysr@777 | 1246 | |
ysr@777 | 1247 | COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); |
ysr@777 | 1248 | |
tonyp@1524 | 1249 | MemoryService::track_memory_usage(); |
tonyp@1524 | 1250 | |
ysr@777 | 1251 | if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) { |
ysr@777 | 1252 | HandleMark hm; // Discard invalid handles created during verification |
ysr@777 | 1253 | gclog_or_tty->print(" VerifyAfterGC:"); |
iveresov@1072 | 1254 | prepare_for_verify(); |
ysr@777 | 1255 | Universe::verify(false); |
ysr@777 | 1256 | } |
ysr@777 | 1257 | NOT_PRODUCT(ref_processor()->verify_no_references_recorded()); |
ysr@777 | 1258 | |
ysr@777 | 1259 | reset_gc_time_stamp(); |
ysr@777 | 1260 | // Since everything potentially moved, we will clear all remembered |
apetrusenko@1061 | 1261 | // sets, and clear all cards. Later we will rebuild remebered |
apetrusenko@1061 | 1262 | // sets. We will also reset the GC time stamps of the regions. |
ysr@777 | 1263 | PostMCRemSetClearClosure rs_clear(mr_bs()); |
ysr@777 | 1264 | heap_region_iterate(&rs_clear); |
ysr@777 | 1265 | |
ysr@777 | 1266 | // Resize the heap if necessary. |
tonyp@2011 | 1267 | resize_if_necessary_after_full_collection(explicit_gc ? 0 : word_size); |
ysr@777 | 1268 | |
ysr@777 | 1269 | if (_cg1r->use_cache()) { |
ysr@777 | 1270 | _cg1r->clear_and_record_card_counts(); |
ysr@777 | 1271 | _cg1r->clear_hot_cache(); |
ysr@777 | 1272 | } |
ysr@777 | 1273 | |
apetrusenko@1061 | 1274 | // Rebuild remembered sets of all regions. |
jmasa@2188 | 1275 | |
jmasa@2188 | 1276 | if (G1CollectedHeap::use_parallel_gc_threads()) { |
apetrusenko@1061 | 1277 | ParRebuildRSTask rebuild_rs_task(this); |
apetrusenko@1061 | 1278 | assert(check_heap_region_claim_values( |
apetrusenko@1061 | 1279 | HeapRegion::InitialClaimValue), "sanity check"); |
apetrusenko@1061 | 1280 | set_par_threads(workers()->total_workers()); |
apetrusenko@1061 | 1281 | workers()->run_task(&rebuild_rs_task); |
apetrusenko@1061 | 1282 | set_par_threads(0); |
apetrusenko@1061 | 1283 | assert(check_heap_region_claim_values( |
apetrusenko@1061 | 1284 | HeapRegion::RebuildRSClaimValue), "sanity check"); |
apetrusenko@1061 | 1285 | reset_heap_region_claim_values(); |
apetrusenko@1061 | 1286 | } else { |
apetrusenko@1061 | 1287 | RebuildRSOutOfRegionClosure rebuild_rs(this); |
apetrusenko@1061 | 1288 | heap_region_iterate(&rebuild_rs); |
apetrusenko@1061 | 1289 | } |
apetrusenko@1061 | 1290 | |
ysr@777 | 1291 | if (PrintGC) { |
ysr@777 | 1292 | print_size_transition(gclog_or_tty, g1h_prev_used, used(), capacity()); |
ysr@777 | 1293 | } |
ysr@777 | 1294 | |
ysr@777 | 1295 | if (true) { // FIXME |
ysr@777 | 1296 | // Ask the permanent generation to adjust size for full collections |
ysr@777 | 1297 | perm()->compute_new_size(); |
ysr@777 | 1298 | } |
ysr@777 | 1299 | |
johnc@1829 | 1300 | // Start a new incremental collection set for the next pause |
johnc@1829 | 1301 | assert(g1_policy()->collection_set() == NULL, "must be"); |
johnc@1829 | 1302 | g1_policy()->start_incremental_cset_building(); |
johnc@1829 | 1303 | |
johnc@1829 | 1304 | // Clear the _cset_fast_test bitmap in anticipation of adding |
johnc@1829 | 1305 | // regions to the incremental collection set for the next |
johnc@1829 | 1306 | // evacuation pause. |
johnc@1829 | 1307 | clear_cset_fast_test(); |
johnc@1829 | 1308 | |
tonyp@2715 | 1309 | init_mutator_alloc_region(); |
tonyp@2715 | 1310 | |
ysr@777 | 1311 | double end = os::elapsedTime(); |
ysr@777 | 1312 | g1_policy()->record_full_collection_end(); |
ysr@777 | 1313 | |
jmasa@981 | 1314 | #ifdef TRACESPINNING |
jmasa@981 | 1315 | ParallelTaskTerminator::print_termination_counts(); |
jmasa@981 | 1316 | #endif |
jmasa@981 | 1317 | |
ysr@777 | 1318 | gc_epilogue(true); |
ysr@777 | 1319 | |
iveresov@1229 | 1320 | // Discard all rset updates |
iveresov@1229 | 1321 | JavaThread::dirty_card_queue_set().abandon_logs(); |
iveresov@1051 | 1322 | assert(!G1DeferredRSUpdate |
iveresov@1051 | 1323 | || (G1DeferredRSUpdate && (dirty_card_queue_set().completed_buffers_num() == 0)), "Should not be any"); |
ysr@777 | 1324 | } |
ysr@777 | 1325 | |
ysr@777 | 1326 | if (g1_policy()->in_young_gc_mode()) { |
ysr@777 | 1327 | _young_list->reset_sampled_info(); |
johnc@1829 | 1328 | // At this point there should be no regions in the |
johnc@1829 | 1329 | // entire heap tagged as young. |
johnc@1829 | 1330 | assert( check_young_list_empty(true /* check_heap */), |
ysr@777 | 1331 | "young list should be empty at this point"); |
ysr@777 | 1332 | } |
tonyp@1273 | 1333 | |
tonyp@2011 | 1334 | // Update the number of full collections that have been completed. |
tonyp@2372 | 1335 | increment_full_collections_completed(false /* concurrent */); |
tonyp@2011 | 1336 | |
tonyp@2472 | 1337 | verify_region_sets_optional(); |
tonyp@2472 | 1338 | |
tonyp@1273 | 1339 | if (PrintHeapAtGC) { |
tonyp@1273 | 1340 | Universe::print_heap_after_gc(); |
tonyp@1273 | 1341 | } |
tonyp@2315 | 1342 | |
tonyp@2315 | 1343 | return true; |
ysr@777 | 1344 | } |
ysr@777 | 1345 | |
ysr@777 | 1346 | void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) { |
tonyp@2315 | 1347 | // do_collection() will return whether it succeeded in performing |
tonyp@2315 | 1348 | // the GC. Currently, there is no facility on the |
tonyp@2315 | 1349 | // do_full_collection() API to notify the caller than the collection |
tonyp@2315 | 1350 | // did not succeed (e.g., because it was locked out by the GC |
tonyp@2315 | 1351 | // locker). So, right now, we'll ignore the return value. |
tonyp@2315 | 1352 | bool dummy = do_collection(true, /* explicit_gc */ |
tonyp@2315 | 1353 | clear_all_soft_refs, |
tonyp@2315 | 1354 | 0 /* word_size */); |
ysr@777 | 1355 | } |
ysr@777 | 1356 | |
ysr@777 | 1357 | // This code is mostly copied from TenuredGeneration. |
ysr@777 | 1358 | void |
ysr@777 | 1359 | G1CollectedHeap:: |
ysr@777 | 1360 | resize_if_necessary_after_full_collection(size_t word_size) { |
ysr@777 | 1361 | assert(MinHeapFreeRatio <= MaxHeapFreeRatio, "sanity check"); |
ysr@777 | 1362 | |
ysr@777 | 1363 | // Include the current allocation, if any, and bytes that will be |
ysr@777 | 1364 | // pre-allocated to support collections, as "used". |
ysr@777 | 1365 | const size_t used_after_gc = used(); |
ysr@777 | 1366 | const size_t capacity_after_gc = capacity(); |
ysr@777 | 1367 | const size_t free_after_gc = capacity_after_gc - used_after_gc; |
ysr@777 | 1368 | |
tonyp@2072 | 1369 | // This is enforced in arguments.cpp. |
tonyp@2072 | 1370 | assert(MinHeapFreeRatio <= MaxHeapFreeRatio, |
tonyp@2072 | 1371 | "otherwise the code below doesn't make sense"); |
tonyp@2072 | 1372 | |
ysr@777 | 1373 | // We don't have floating point command-line arguments |
tonyp@2072 | 1374 | const double minimum_free_percentage = (double) MinHeapFreeRatio / 100.0; |
ysr@777 | 1375 | const double maximum_used_percentage = 1.0 - minimum_free_percentage; |
tonyp@2072 | 1376 | const double maximum_free_percentage = (double) MaxHeapFreeRatio / 100.0; |
ysr@777 | 1377 | const double minimum_used_percentage = 1.0 - maximum_free_percentage; |
ysr@777 | 1378 | |
tonyp@2072 | 1379 | const size_t min_heap_size = collector_policy()->min_heap_byte_size(); |
tonyp@2072 | 1380 | const size_t max_heap_size = collector_policy()->max_heap_byte_size(); |
tonyp@2072 | 1381 | |
tonyp@2072 | 1382 | // We have to be careful here as these two calculations can overflow |
tonyp@2072 | 1383 | // 32-bit size_t's. |
tonyp@2072 | 1384 | double used_after_gc_d = (double) used_after_gc; |
tonyp@2072 | 1385 | double minimum_desired_capacity_d = used_after_gc_d / maximum_used_percentage; |
tonyp@2072 | 1386 | double maximum_desired_capacity_d = used_after_gc_d / minimum_used_percentage; |
tonyp@2072 | 1387 | |
tonyp@2072 | 1388 | // Let's make sure that they are both under the max heap size, which |
tonyp@2072 | 1389 | // by default will make them fit into a size_t. |
tonyp@2072 | 1390 | double desired_capacity_upper_bound = (double) max_heap_size; |
tonyp@2072 | 1391 | minimum_desired_capacity_d = MIN2(minimum_desired_capacity_d, |
tonyp@2072 | 1392 | desired_capacity_upper_bound); |
tonyp@2072 | 1393 | maximum_desired_capacity_d = MIN2(maximum_desired_capacity_d, |
tonyp@2072 | 1394 | desired_capacity_upper_bound); |
tonyp@2072 | 1395 | |
tonyp@2072 | 1396 | // We can now safely turn them into size_t's. |
tonyp@2072 | 1397 | size_t minimum_desired_capacity = (size_t) minimum_desired_capacity_d; |
tonyp@2072 | 1398 | size_t maximum_desired_capacity = (size_t) maximum_desired_capacity_d; |
tonyp@2072 | 1399 | |
tonyp@2072 | 1400 | // This assert only makes sense here, before we adjust them |
tonyp@2072 | 1401 | // with respect to the min and max heap size. |
tonyp@2072 | 1402 | assert(minimum_desired_capacity <= maximum_desired_capacity, |
tonyp@2072 | 1403 | err_msg("minimum_desired_capacity = "SIZE_FORMAT", " |
tonyp@2072 | 1404 | "maximum_desired_capacity = "SIZE_FORMAT, |
tonyp@2072 | 1405 | minimum_desired_capacity, maximum_desired_capacity)); |
tonyp@2072 | 1406 | |
tonyp@2072 | 1407 | // Should not be greater than the heap max size. No need to adjust |
tonyp@2072 | 1408 | // it with respect to the heap min size as it's a lower bound (i.e., |
tonyp@2072 | 1409 | // we'll try to make the capacity larger than it, not smaller). |
tonyp@2072 | 1410 | minimum_desired_capacity = MIN2(minimum_desired_capacity, max_heap_size); |
tonyp@2072 | 1411 | // Should not be less than the heap min size. No need to adjust it |
tonyp@2072 | 1412 | // with respect to the heap max size as it's an upper bound (i.e., |
tonyp@2072 | 1413 | // we'll try to make the capacity smaller than it, not greater). |
tonyp@2072 | 1414 | maximum_desired_capacity = MAX2(maximum_desired_capacity, min_heap_size); |
ysr@777 | 1415 | |
ysr@777 | 1416 | if (PrintGC && Verbose) { |
tonyp@2072 | 1417 | const double free_percentage = |
tonyp@2072 | 1418 | (double) free_after_gc / (double) capacity_after_gc; |
ysr@777 | 1419 | gclog_or_tty->print_cr("Computing new size after full GC "); |
ysr@777 | 1420 | gclog_or_tty->print_cr(" " |
ysr@777 | 1421 | " minimum_free_percentage: %6.2f", |
ysr@777 | 1422 | minimum_free_percentage); |
ysr@777 | 1423 | gclog_or_tty->print_cr(" " |
ysr@777 | 1424 | " maximum_free_percentage: %6.2f", |
ysr@777 | 1425 | maximum_free_percentage); |
ysr@777 | 1426 | gclog_or_tty->print_cr(" " |
ysr@777 | 1427 | " capacity: %6.1fK" |
ysr@777 | 1428 | " minimum_desired_capacity: %6.1fK" |
ysr@777 | 1429 | " maximum_desired_capacity: %6.1fK", |
tonyp@2072 | 1430 | (double) capacity_after_gc / (double) K, |
tonyp@2072 | 1431 | (double) minimum_desired_capacity / (double) K, |
tonyp@2072 | 1432 | (double) maximum_desired_capacity / (double) K); |
ysr@777 | 1433 | gclog_or_tty->print_cr(" " |
tonyp@2072 | 1434 | " free_after_gc: %6.1fK" |
tonyp@2072 | 1435 | " used_after_gc: %6.1fK", |
tonyp@2072 | 1436 | (double) free_after_gc / (double) K, |
tonyp@2072 | 1437 | (double) used_after_gc / (double) K); |
ysr@777 | 1438 | gclog_or_tty->print_cr(" " |
ysr@777 | 1439 | " free_percentage: %6.2f", |
ysr@777 | 1440 | free_percentage); |
ysr@777 | 1441 | } |
tonyp@2072 | 1442 | if (capacity_after_gc < minimum_desired_capacity) { |
ysr@777 | 1443 | // Don't expand unless it's significant |
ysr@777 | 1444 | size_t expand_bytes = minimum_desired_capacity - capacity_after_gc; |
johnc@2504 | 1445 | if (expand(expand_bytes)) { |
johnc@2504 | 1446 | if (PrintGC && Verbose) { |
johnc@2504 | 1447 | gclog_or_tty->print_cr(" " |
johnc@2504 | 1448 | " expanding:" |
johnc@2504 | 1449 | " max_heap_size: %6.1fK" |
johnc@2504 | 1450 | " minimum_desired_capacity: %6.1fK" |
johnc@2504 | 1451 | " expand_bytes: %6.1fK", |
johnc@2504 | 1452 | (double) max_heap_size / (double) K, |
johnc@2504 | 1453 | (double) minimum_desired_capacity / (double) K, |
johnc@2504 | 1454 | (double) expand_bytes / (double) K); |
johnc@2504 | 1455 | } |
ysr@777 | 1456 | } |
ysr@777 | 1457 | |
ysr@777 | 1458 | // No expansion, now see if we want to shrink |
tonyp@2072 | 1459 | } else if (capacity_after_gc > maximum_desired_capacity) { |
ysr@777 | 1460 | // Capacity too large, compute shrinking size |
ysr@777 | 1461 | size_t shrink_bytes = capacity_after_gc - maximum_desired_capacity; |
ysr@777 | 1462 | shrink(shrink_bytes); |
ysr@777 | 1463 | if (PrintGC && Verbose) { |
ysr@777 | 1464 | gclog_or_tty->print_cr(" " |
ysr@777 | 1465 | " shrinking:" |
tonyp@2072 | 1466 | " min_heap_size: %6.1fK" |
tonyp@2072 | 1467 | " maximum_desired_capacity: %6.1fK" |
tonyp@2072 | 1468 | " shrink_bytes: %6.1fK", |
tonyp@2072 | 1469 | (double) min_heap_size / (double) K, |
tonyp@2072 | 1470 | (double) maximum_desired_capacity / (double) K, |
tonyp@2072 | 1471 | (double) shrink_bytes / (double) K); |
ysr@777 | 1472 | } |
ysr@777 | 1473 | } |
ysr@777 | 1474 | } |
ysr@777 | 1475 | |
ysr@777 | 1476 | |
ysr@777 | 1477 | HeapWord* |
tonyp@2315 | 1478 | G1CollectedHeap::satisfy_failed_allocation(size_t word_size, |
tonyp@2315 | 1479 | bool* succeeded) { |
tonyp@2472 | 1480 | assert_at_safepoint(true /* should_be_vm_thread */); |
tonyp@2315 | 1481 | |
tonyp@2315 | 1482 | *succeeded = true; |
tonyp@2315 | 1483 | // Let's attempt the allocation first. |
tonyp@2715 | 1484 | HeapWord* result = |
tonyp@2715 | 1485 | attempt_allocation_at_safepoint(word_size, |
tonyp@2715 | 1486 | false /* expect_null_mutator_alloc_region */); |
tonyp@2315 | 1487 | if (result != NULL) { |
tonyp@2315 | 1488 | assert(*succeeded, "sanity"); |
tonyp@2315 | 1489 | return result; |
tonyp@2315 | 1490 | } |
ysr@777 | 1491 | |
ysr@777 | 1492 | // In a G1 heap, we're supposed to keep allocation from failing by |
ysr@777 | 1493 | // incremental pauses. Therefore, at least for now, we'll favor |
ysr@777 | 1494 | // expansion over collection. (This might change in the future if we can |
ysr@777 | 1495 | // do something smarter than full collection to satisfy a failed alloc.) |
ysr@777 | 1496 | result = expand_and_allocate(word_size); |
ysr@777 | 1497 | if (result != NULL) { |
tonyp@2315 | 1498 | assert(*succeeded, "sanity"); |
ysr@777 | 1499 | return result; |
ysr@777 | 1500 | } |
ysr@777 | 1501 | |
tonyp@2315 | 1502 | // Expansion didn't work, we'll try to do a Full GC. |
tonyp@2315 | 1503 | bool gc_succeeded = do_collection(false, /* explicit_gc */ |
tonyp@2315 | 1504 | false, /* clear_all_soft_refs */ |
tonyp@2315 | 1505 | word_size); |
tonyp@2315 | 1506 | if (!gc_succeeded) { |
tonyp@2315 | 1507 | *succeeded = false; |
tonyp@2315 | 1508 | return NULL; |
tonyp@2315 | 1509 | } |
tonyp@2315 | 1510 | |
tonyp@2315 | 1511 | // Retry the allocation |
tonyp@2315 | 1512 | result = attempt_allocation_at_safepoint(word_size, |
tonyp@2715 | 1513 | true /* expect_null_mutator_alloc_region */); |
ysr@777 | 1514 | if (result != NULL) { |
tonyp@2315 | 1515 | assert(*succeeded, "sanity"); |
ysr@777 | 1516 | return result; |
ysr@777 | 1517 | } |
ysr@777 | 1518 | |
tonyp@2315 | 1519 | // Then, try a Full GC that will collect all soft references. |
tonyp@2315 | 1520 | gc_succeeded = do_collection(false, /* explicit_gc */ |
tonyp@2315 | 1521 | true, /* clear_all_soft_refs */ |
tonyp@2315 | 1522 | word_size); |
tonyp@2315 | 1523 | if (!gc_succeeded) { |
tonyp@2315 | 1524 | *succeeded = false; |
tonyp@2315 | 1525 | return NULL; |
tonyp@2315 | 1526 | } |
tonyp@2315 | 1527 | |
tonyp@2315 | 1528 | // Retry the allocation once more |
tonyp@2315 | 1529 | result = attempt_allocation_at_safepoint(word_size, |
tonyp@2715 | 1530 | true /* expect_null_mutator_alloc_region */); |
ysr@777 | 1531 | if (result != NULL) { |
tonyp@2315 | 1532 | assert(*succeeded, "sanity"); |
ysr@777 | 1533 | return result; |
ysr@777 | 1534 | } |
ysr@777 | 1535 | |
jmasa@1822 | 1536 | assert(!collector_policy()->should_clear_all_soft_refs(), |
tonyp@2315 | 1537 | "Flag should have been handled and cleared prior to this point"); |
jmasa@1822 | 1538 | |
ysr@777 | 1539 | // What else? We might try synchronous finalization later. If the total |
ysr@777 | 1540 | // space available is large enough for the allocation, then a more |
ysr@777 | 1541 | // complete compaction phase than we've tried so far might be |
ysr@777 | 1542 | // appropriate. |
tonyp@2315 | 1543 | assert(*succeeded, "sanity"); |
ysr@777 | 1544 | return NULL; |
ysr@777 | 1545 | } |
ysr@777 | 1546 | |
ysr@777 | 1547 | // Attempting to expand the heap sufficiently |
ysr@777 | 1548 | // to support an allocation of the given "word_size". If |
ysr@777 | 1549 | // successful, perform the allocation and return the address of the |
ysr@777 | 1550 | // allocated block, or else "NULL". |
ysr@777 | 1551 | |
ysr@777 | 1552 | HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size) { |
tonyp@2472 | 1553 | assert_at_safepoint(true /* should_be_vm_thread */); |
tonyp@2472 | 1554 | |
tonyp@2472 | 1555 | verify_region_sets_optional(); |
tonyp@2315 | 1556 | |
johnc@2504 | 1557 | size_t expand_bytes = MAX2(word_size * HeapWordSize, MinHeapDeltaBytes); |
johnc@2504 | 1558 | if (expand(expand_bytes)) { |
johnc@2504 | 1559 | verify_region_sets_optional(); |
johnc@2504 | 1560 | return attempt_allocation_at_safepoint(word_size, |
tonyp@2715 | 1561 | false /* expect_null_mutator_alloc_region */); |
johnc@2504 | 1562 | } |
johnc@2504 | 1563 | return NULL; |
ysr@777 | 1564 | } |
ysr@777 | 1565 | |
johnc@2504 | 1566 | bool G1CollectedHeap::expand(size_t expand_bytes) { |
ysr@777 | 1567 | size_t old_mem_size = _g1_storage.committed_size(); |
johnc@2504 | 1568 | size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes); |
ysr@777 | 1569 | aligned_expand_bytes = align_size_up(aligned_expand_bytes, |
ysr@777 | 1570 | HeapRegion::GrainBytes); |
johnc@2504 | 1571 | |
johnc@2504 | 1572 | if (Verbose && PrintGC) { |
johnc@2504 | 1573 | gclog_or_tty->print("Expanding garbage-first heap from %ldK by %ldK", |
johnc@2504 | 1574 | old_mem_size/K, aligned_expand_bytes/K); |
johnc@2504 | 1575 | } |
johnc@2504 | 1576 | |
johnc@2504 | 1577 | HeapWord* old_end = (HeapWord*)_g1_storage.high(); |
johnc@2504 | 1578 | bool successful = _g1_storage.expand_by(aligned_expand_bytes); |
johnc@2504 | 1579 | if (successful) { |
johnc@2504 | 1580 | HeapWord* new_end = (HeapWord*)_g1_storage.high(); |
johnc@2504 | 1581 | |
johnc@2504 | 1582 | // Expand the committed region. |
johnc@2504 | 1583 | _g1_committed.set_end(new_end); |
johnc@2504 | 1584 | |
johnc@2504 | 1585 | // Tell the cardtable about the expansion. |
johnc@2504 | 1586 | Universe::heap()->barrier_set()->resize_covered_region(_g1_committed); |
johnc@2504 | 1587 | |
johnc@2504 | 1588 | // And the offset table as well. |
johnc@2504 | 1589 | _bot_shared->resize(_g1_committed.word_size()); |
johnc@2504 | 1590 | |
johnc@2504 | 1591 | expand_bytes = aligned_expand_bytes; |
johnc@2504 | 1592 | HeapWord* base = old_end; |
johnc@2504 | 1593 | |
johnc@2504 | 1594 | // Create the heap regions for [old_end, new_end) |
johnc@2504 | 1595 | while (expand_bytes > 0) { |
johnc@2504 | 1596 | HeapWord* high = base + HeapRegion::GrainWords; |
johnc@2504 | 1597 | |
ysr@777 | 1598 | // Create a new HeapRegion. |
ysr@777 | 1599 | MemRegion mr(base, high); |
ysr@777 | 1600 | bool is_zeroed = !_g1_max_committed.contains(base); |
ysr@777 | 1601 | HeapRegion* hr = new HeapRegion(_bot_shared, mr, is_zeroed); |
ysr@777 | 1602 | |
ysr@777 | 1603 | // Add it to the HeapRegionSeq. |
ysr@777 | 1604 | _hrs->insert(hr); |
tonyp@2472 | 1605 | _free_list.add_as_tail(hr); |
johnc@2504 | 1606 | |
ysr@777 | 1607 | // And we used up an expansion region to create it. |
ysr@777 | 1608 | _expansion_regions--; |
johnc@2504 | 1609 | |
johnc@2504 | 1610 | expand_bytes -= HeapRegion::GrainBytes; |
johnc@2504 | 1611 | base += HeapRegion::GrainWords; |
johnc@2504 | 1612 | } |
johnc@2504 | 1613 | assert(base == new_end, "sanity"); |
johnc@2504 | 1614 | |
johnc@2504 | 1615 | // Now update max_committed if necessary. |
johnc@2504 | 1616 | _g1_max_committed.set_end(MAX2(_g1_max_committed.end(), new_end)); |
johnc@2504 | 1617 | |
johnc@2504 | 1618 | } else { |
johnc@2504 | 1619 | // The expansion of the virtual storage space was unsuccessful. |
johnc@2504 | 1620 | // Let's see if it was because we ran out of swap. |
johnc@2504 | 1621 | if (G1ExitOnExpansionFailure && |
johnc@2504 | 1622 | _g1_storage.uncommitted_size() >= aligned_expand_bytes) { |
johnc@2504 | 1623 | // We had head room... |
johnc@2504 | 1624 | vm_exit_out_of_memory(aligned_expand_bytes, "G1 heap expansion"); |
ysr@777 | 1625 | } |
ysr@777 | 1626 | } |
tonyp@2472 | 1627 | |
ysr@777 | 1628 | if (Verbose && PrintGC) { |
ysr@777 | 1629 | size_t new_mem_size = _g1_storage.committed_size(); |
johnc@2504 | 1630 | gclog_or_tty->print_cr("...%s, expanded to %ldK", |
johnc@2504 | 1631 | (successful ? "Successful" : "Failed"), |
ysr@777 | 1632 | new_mem_size/K); |
ysr@777 | 1633 | } |
johnc@2504 | 1634 | return successful; |
ysr@777 | 1635 | } |
ysr@777 | 1636 | |
ysr@777 | 1637 | void G1CollectedHeap::shrink_helper(size_t shrink_bytes) |
ysr@777 | 1638 | { |
ysr@777 | 1639 | size_t old_mem_size = _g1_storage.committed_size(); |
ysr@777 | 1640 | size_t aligned_shrink_bytes = |
ysr@777 | 1641 | ReservedSpace::page_align_size_down(shrink_bytes); |
ysr@777 | 1642 | aligned_shrink_bytes = align_size_down(aligned_shrink_bytes, |
ysr@777 | 1643 | HeapRegion::GrainBytes); |
ysr@777 | 1644 | size_t num_regions_deleted = 0; |
ysr@777 | 1645 | MemRegion mr = _hrs->shrink_by(aligned_shrink_bytes, num_regions_deleted); |
ysr@777 | 1646 | |
ysr@777 | 1647 | assert(mr.end() == (HeapWord*)_g1_storage.high(), "Bad shrink!"); |
ysr@777 | 1648 | if (mr.byte_size() > 0) |
ysr@777 | 1649 | _g1_storage.shrink_by(mr.byte_size()); |
ysr@777 | 1650 | assert(mr.start() == (HeapWord*)_g1_storage.high(), "Bad shrink!"); |
ysr@777 | 1651 | |
ysr@777 | 1652 | _g1_committed.set_end(mr.start()); |
ysr@777 | 1653 | _expansion_regions += num_regions_deleted; |
ysr@777 | 1654 | |
ysr@777 | 1655 | // Tell the cardtable about it. |
ysr@777 | 1656 | Universe::heap()->barrier_set()->resize_covered_region(_g1_committed); |
ysr@777 | 1657 | |
ysr@777 | 1658 | // And the offset table as well. |
ysr@777 | 1659 | _bot_shared->resize(_g1_committed.word_size()); |
ysr@777 | 1660 | |
ysr@777 | 1661 | HeapRegionRemSet::shrink_heap(n_regions()); |
ysr@777 | 1662 | |
ysr@777 | 1663 | if (Verbose && PrintGC) { |
ysr@777 | 1664 | size_t new_mem_size = _g1_storage.committed_size(); |
ysr@777 | 1665 | gclog_or_tty->print_cr("Shrinking garbage-first heap from %ldK by %ldK to %ldK", |
ysr@777 | 1666 | old_mem_size/K, aligned_shrink_bytes/K, |
ysr@777 | 1667 | new_mem_size/K); |
ysr@777 | 1668 | } |
ysr@777 | 1669 | } |
ysr@777 | 1670 | |
ysr@777 | 1671 | void G1CollectedHeap::shrink(size_t shrink_bytes) { |
tonyp@2472 | 1672 | verify_region_sets_optional(); |
tonyp@2472 | 1673 | |
tonyp@1071 | 1674 | release_gc_alloc_regions(true /* totally */); |
tonyp@2472 | 1675 | // Instead of tearing down / rebuilding the free lists here, we |
tonyp@2472 | 1676 | // could instead use the remove_all_pending() method on free_list to |
tonyp@2472 | 1677 | // remove only the ones that we need to remove. |
ysr@777 | 1678 | tear_down_region_lists(); // We will rebuild them in a moment. |
ysr@777 | 1679 | shrink_helper(shrink_bytes); |
ysr@777 | 1680 | rebuild_region_lists(); |
tonyp@2472 | 1681 | |
tonyp@2472 | 1682 | verify_region_sets_optional(); |
ysr@777 | 1683 | } |
ysr@777 | 1684 | |
ysr@777 | 1685 | // Public methods. |
ysr@777 | 1686 | |
ysr@777 | 1687 | #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away |
ysr@777 | 1688 | #pragma warning( disable:4355 ) // 'this' : used in base member initializer list |
ysr@777 | 1689 | #endif // _MSC_VER |
ysr@777 | 1690 | |
ysr@777 | 1691 | |
ysr@777 | 1692 | G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) : |
ysr@777 | 1693 | SharedHeap(policy_), |
ysr@777 | 1694 | _g1_policy(policy_), |
iveresov@1546 | 1695 | _dirty_card_queue_set(false), |
johnc@2060 | 1696 | _into_cset_dirty_card_queue_set(false), |
johnc@2379 | 1697 | _is_alive_closure(this), |
ysr@777 | 1698 | _ref_processor(NULL), |
ysr@777 | 1699 | _process_strong_tasks(new SubTasksDone(G1H_PS_NumElements)), |
ysr@777 | 1700 | _bot_shared(NULL), |
ysr@777 | 1701 | _objs_with_preserved_marks(NULL), _preserved_marks_of_objs(NULL), |
ysr@777 | 1702 | _evac_failure_scan_stack(NULL) , |
ysr@777 | 1703 | _mark_in_progress(false), |
tonyp@2472 | 1704 | _cg1r(NULL), _summary_bytes_used(0), |
ysr@777 | 1705 | _refine_cte_cl(NULL), |
ysr@777 | 1706 | _full_collection(false), |
tonyp@2472 | 1707 | _free_list("Master Free List"), |
tonyp@2472 | 1708 | _secondary_free_list("Secondary Free List"), |
tonyp@2472 | 1709 | _humongous_set("Master Humongous Set"), |
tonyp@2472 | 1710 | _free_regions_coming(false), |
ysr@777 | 1711 | _young_list(new YoungList(this)), |
ysr@777 | 1712 | _gc_time_stamp(0), |
tonyp@961 | 1713 | _surviving_young_words(NULL), |
tonyp@2011 | 1714 | _full_collections_completed(0), |
tonyp@961 | 1715 | _in_cset_fast_test(NULL), |
apetrusenko@1231 | 1716 | _in_cset_fast_test_base(NULL), |
apetrusenko@1231 | 1717 | _dirty_cards_region_list(NULL) { |
ysr@777 | 1718 | _g1h = this; // To catch bugs. |
ysr@777 | 1719 | if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) { |
ysr@777 | 1720 | vm_exit_during_initialization("Failed necessary allocation."); |
ysr@777 | 1721 | } |
tonyp@1377 | 1722 | |
tonyp@1377 | 1723 | _humongous_object_threshold_in_words = HeapRegion::GrainWords / 2; |
tonyp@1377 | 1724 | |
ysr@777 | 1725 | int n_queues = MAX2((int)ParallelGCThreads, 1); |
ysr@777 | 1726 | _task_queues = new RefToScanQueueSet(n_queues); |
ysr@777 | 1727 | |
ysr@777 | 1728 | int n_rem_sets = HeapRegionRemSet::num_par_rem_sets(); |
ysr@777 | 1729 | assert(n_rem_sets > 0, "Invariant."); |
ysr@777 | 1730 | |
ysr@777 | 1731 | HeapRegionRemSetIterator** iter_arr = |
ysr@777 | 1732 | NEW_C_HEAP_ARRAY(HeapRegionRemSetIterator*, n_queues); |
ysr@777 | 1733 | for (int i = 0; i < n_queues; i++) { |
ysr@777 | 1734 | iter_arr[i] = new HeapRegionRemSetIterator(); |
ysr@777 | 1735 | } |
ysr@777 | 1736 | _rem_set_iterator = iter_arr; |
ysr@777 | 1737 | |
ysr@777 | 1738 | for (int i = 0; i < n_queues; i++) { |
ysr@777 | 1739 | RefToScanQueue* q = new RefToScanQueue(); |
ysr@777 | 1740 | q->initialize(); |
ysr@777 | 1741 | _task_queues->register_queue(i, q); |
ysr@777 | 1742 | } |
ysr@777 | 1743 | |
ysr@777 | 1744 | for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { |
tonyp@1071 | 1745 | _gc_alloc_regions[ap] = NULL; |
tonyp@1071 | 1746 | _gc_alloc_region_counts[ap] = 0; |
tonyp@1071 | 1747 | _retained_gc_alloc_regions[ap] = NULL; |
tonyp@1071 | 1748 | // by default, we do not retain a GC alloc region for each ap; |
tonyp@1071 | 1749 | // we'll override this, when appropriate, below |
tonyp@1071 | 1750 | _retain_gc_alloc_region[ap] = false; |
tonyp@1071 | 1751 | } |
tonyp@1071 | 1752 | |
tonyp@1071 | 1753 | // We will try to remember the last half-full tenured region we |
tonyp@1071 | 1754 | // allocated to at the end of a collection so that we can re-use it |
tonyp@1071 | 1755 | // during the next collection. |
tonyp@1071 | 1756 | _retain_gc_alloc_region[GCAllocForTenured] = true; |
tonyp@1071 | 1757 | |
ysr@777 | 1758 | guarantee(_task_queues != NULL, "task_queues allocation failure."); |
ysr@777 | 1759 | } |
ysr@777 | 1760 | |
ysr@777 | 1761 | jint G1CollectedHeap::initialize() { |
ysr@1601 | 1762 | CollectedHeap::pre_initialize(); |
ysr@777 | 1763 | os::enable_vtime(); |
ysr@777 | 1764 | |
ysr@777 | 1765 | // Necessary to satisfy locking discipline assertions. |
ysr@777 | 1766 | |
ysr@777 | 1767 | MutexLocker x(Heap_lock); |
ysr@777 | 1768 | |
ysr@777 | 1769 | // While there are no constraints in the GC code that HeapWordSize |
ysr@777 | 1770 | // be any particular value, there are multiple other areas in the |
ysr@777 | 1771 | // system which believe this to be true (e.g. oop->object_size in some |
ysr@777 | 1772 | // cases incorrectly returns the size in wordSize units rather than |
ysr@777 | 1773 | // HeapWordSize). |
ysr@777 | 1774 | guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize"); |
ysr@777 | 1775 | |
ysr@777 | 1776 | size_t init_byte_size = collector_policy()->initial_heap_byte_size(); |
ysr@777 | 1777 | size_t max_byte_size = collector_policy()->max_heap_byte_size(); |
ysr@777 | 1778 | |
ysr@777 | 1779 | // Ensure that the sizes are properly aligned. |
ysr@777 | 1780 | Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap"); |
ysr@777 | 1781 | Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap"); |
ysr@777 | 1782 | |
ysr@777 | 1783 | _cg1r = new ConcurrentG1Refine(); |
ysr@777 | 1784 | |
ysr@777 | 1785 | // Reserve the maximum. |
ysr@777 | 1786 | PermanentGenerationSpec* pgs = collector_policy()->permanent_generation(); |
ysr@777 | 1787 | // Includes the perm-gen. |
kvn@1077 | 1788 | |
kvn@1077 | 1789 | const size_t total_reserved = max_byte_size + pgs->max_size(); |
kvn@1077 | 1790 | char* addr = Universe::preferred_heap_base(total_reserved, Universe::UnscaledNarrowOop); |
kvn@1077 | 1791 | |
ysr@777 | 1792 | ReservedSpace heap_rs(max_byte_size + pgs->max_size(), |
ysr@777 | 1793 | HeapRegion::GrainBytes, |
brutisso@2455 | 1794 | UseLargePages, addr); |
kvn@1077 | 1795 | |
kvn@1077 | 1796 | if (UseCompressedOops) { |
kvn@1077 | 1797 | if (addr != NULL && !heap_rs.is_reserved()) { |
kvn@1077 | 1798 | // Failed to reserve at specified address - the requested memory |
kvn@1077 | 1799 | // region is taken already, for example, by 'java' launcher. |
kvn@1077 | 1800 | // Try again to reserver heap higher. |
kvn@1077 | 1801 | addr = Universe::preferred_heap_base(total_reserved, Universe::ZeroBasedNarrowOop); |
kvn@1077 | 1802 | ReservedSpace heap_rs0(total_reserved, HeapRegion::GrainBytes, |
brutisso@2455 | 1803 | UseLargePages, addr); |
kvn@1077 | 1804 | if (addr != NULL && !heap_rs0.is_reserved()) { |
kvn@1077 | 1805 | // Failed to reserve at specified address again - give up. |
kvn@1077 | 1806 | addr = Universe::preferred_heap_base(total_reserved, Universe::HeapBasedNarrowOop); |
kvn@1077 | 1807 | assert(addr == NULL, ""); |
kvn@1077 | 1808 | ReservedSpace heap_rs1(total_reserved, HeapRegion::GrainBytes, |
brutisso@2455 | 1809 | UseLargePages, addr); |
kvn@1077 | 1810 | heap_rs = heap_rs1; |
kvn@1077 | 1811 | } else { |
kvn@1077 | 1812 | heap_rs = heap_rs0; |
kvn@1077 | 1813 | } |
kvn@1077 | 1814 | } |
kvn@1077 | 1815 | } |
ysr@777 | 1816 | |
ysr@777 | 1817 | if (!heap_rs.is_reserved()) { |
ysr@777 | 1818 | vm_exit_during_initialization("Could not reserve enough space for object heap"); |
ysr@777 | 1819 | return JNI_ENOMEM; |
ysr@777 | 1820 | } |
ysr@777 | 1821 | |
ysr@777 | 1822 | // It is important to do this in a way such that concurrent readers can't |
ysr@777 | 1823 | // temporarily think somethings in the heap. (I've actually seen this |
ysr@777 | 1824 | // happen in asserts: DLD.) |
ysr@777 | 1825 | _reserved.set_word_size(0); |
ysr@777 | 1826 | _reserved.set_start((HeapWord*)heap_rs.base()); |
ysr@777 | 1827 | _reserved.set_end((HeapWord*)(heap_rs.base() + heap_rs.size())); |
ysr@777 | 1828 | |
ysr@777 | 1829 | _expansion_regions = max_byte_size/HeapRegion::GrainBytes; |
ysr@777 | 1830 | |
ysr@777 | 1831 | // Create the gen rem set (and barrier set) for the entire reserved region. |
ysr@777 | 1832 | _rem_set = collector_policy()->create_rem_set(_reserved, 2); |
ysr@777 | 1833 | set_barrier_set(rem_set()->bs()); |
ysr@777 | 1834 | if (barrier_set()->is_a(BarrierSet::ModRef)) { |
ysr@777 | 1835 | _mr_bs = (ModRefBarrierSet*)_barrier_set; |
ysr@777 | 1836 | } else { |
ysr@777 | 1837 | vm_exit_during_initialization("G1 requires a mod ref bs."); |
ysr@777 | 1838 | return JNI_ENOMEM; |
ysr@777 | 1839 | } |
ysr@777 | 1840 | |
ysr@777 | 1841 | // Also create a G1 rem set. |
johnc@2216 | 1842 | if (mr_bs()->is_a(BarrierSet::CardTableModRef)) { |
johnc@2216 | 1843 | _g1_rem_set = new G1RemSet(this, (CardTableModRefBS*)mr_bs()); |
ysr@777 | 1844 | } else { |
johnc@2216 | 1845 | vm_exit_during_initialization("G1 requires a cardtable mod ref bs."); |
johnc@2216 | 1846 | return JNI_ENOMEM; |
ysr@777 | 1847 | } |
ysr@777 | 1848 | |
ysr@777 | 1849 | // Carve out the G1 part of the heap. |
ysr@777 | 1850 | |
ysr@777 | 1851 | ReservedSpace g1_rs = heap_rs.first_part(max_byte_size); |
ysr@777 | 1852 | _g1_reserved = MemRegion((HeapWord*)g1_rs.base(), |
ysr@777 | 1853 | g1_rs.size()/HeapWordSize); |
ysr@777 | 1854 | ReservedSpace perm_gen_rs = heap_rs.last_part(max_byte_size); |
ysr@777 | 1855 | |
ysr@777 | 1856 | _perm_gen = pgs->init(perm_gen_rs, pgs->init_size(), rem_set()); |
ysr@777 | 1857 | |
ysr@777 | 1858 | _g1_storage.initialize(g1_rs, 0); |
ysr@777 | 1859 | _g1_committed = MemRegion((HeapWord*)_g1_storage.low(), (size_t) 0); |
ysr@777 | 1860 | _g1_max_committed = _g1_committed; |
iveresov@828 | 1861 | _hrs = new HeapRegionSeq(_expansion_regions); |
ysr@777 | 1862 | guarantee(_hrs != NULL, "Couldn't allocate HeapRegionSeq"); |
ysr@777 | 1863 | |
johnc@1242 | 1864 | // 6843694 - ensure that the maximum region index can fit |
johnc@1242 | 1865 | // in the remembered set structures. |
johnc@1242 | 1866 | const size_t max_region_idx = ((size_t)1 << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1; |
johnc@1242 | 1867 | guarantee((max_regions() - 1) <= max_region_idx, "too many regions"); |
johnc@1242 | 1868 | |
johnc@1242 | 1869 | size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1; |
tonyp@1377 | 1870 | guarantee(HeapRegion::CardsPerRegion > 0, "make sure it's initialized"); |
tonyp@1377 | 1871 | guarantee((size_t) HeapRegion::CardsPerRegion < max_cards_per_region, |
tonyp@1377 | 1872 | "too many cards per region"); |
johnc@1242 | 1873 | |
tonyp@2472 | 1874 | HeapRegionSet::set_unrealistically_long_length(max_regions() + 1); |
tonyp@2472 | 1875 | |
ysr@777 | 1876 | _bot_shared = new G1BlockOffsetSharedArray(_reserved, |
ysr@777 | 1877 | heap_word_size(init_byte_size)); |
ysr@777 | 1878 | |
ysr@777 | 1879 | _g1h = this; |
ysr@777 | 1880 | |
johnc@1829 | 1881 | _in_cset_fast_test_length = max_regions(); |
johnc@1829 | 1882 | _in_cset_fast_test_base = NEW_C_HEAP_ARRAY(bool, _in_cset_fast_test_length); |
johnc@1829 | 1883 | |
johnc@1829 | 1884 | // We're biasing _in_cset_fast_test to avoid subtracting the |
johnc@1829 | 1885 | // beginning of the heap every time we want to index; basically |
johnc@1829 | 1886 | // it's the same with what we do with the card table. |
johnc@1829 | 1887 | _in_cset_fast_test = _in_cset_fast_test_base - |
johnc@1829 | 1888 | ((size_t) _g1_reserved.start() >> HeapRegion::LogOfHRGrainBytes); |
johnc@1829 | 1889 | |
johnc@1829 | 1890 | // Clear the _cset_fast_test bitmap in anticipation of adding |
johnc@1829 | 1891 | // regions to the incremental collection set for the first |
johnc@1829 | 1892 | // evacuation pause. |
johnc@1829 | 1893 | clear_cset_fast_test(); |
johnc@1829 | 1894 | |
ysr@777 | 1895 | // Create the ConcurrentMark data structure and thread. |
ysr@777 | 1896 | // (Must do this late, so that "max_regions" is defined.) |
ysr@777 | 1897 | _cm = new ConcurrentMark(heap_rs, (int) max_regions()); |
ysr@777 | 1898 | _cmThread = _cm->cmThread(); |
ysr@777 | 1899 | |
ysr@777 | 1900 | // Initialize the from_card cache structure of HeapRegionRemSet. |
ysr@777 | 1901 | HeapRegionRemSet::init_heap(max_regions()); |
ysr@777 | 1902 | |
apetrusenko@1112 | 1903 | // Now expand into the initial heap size. |
johnc@2504 | 1904 | if (!expand(init_byte_size)) { |
johnc@2504 | 1905 | vm_exit_during_initialization("Failed to allocate initial heap."); |
johnc@2504 | 1906 | return JNI_ENOMEM; |
johnc@2504 | 1907 | } |
ysr@777 | 1908 | |
ysr@777 | 1909 | // Perform any initialization actions delegated to the policy. |
ysr@777 | 1910 | g1_policy()->init(); |
ysr@777 | 1911 | |
ysr@777 | 1912 | g1_policy()->note_start_of_mark_thread(); |
ysr@777 | 1913 | |
ysr@777 | 1914 | _refine_cte_cl = |
ysr@777 | 1915 | new RefineCardTableEntryClosure(ConcurrentG1RefineThread::sts(), |
ysr@777 | 1916 | g1_rem_set(), |
ysr@777 | 1917 | concurrent_g1_refine()); |
ysr@777 | 1918 | JavaThread::dirty_card_queue_set().set_closure(_refine_cte_cl); |
ysr@777 | 1919 | |
ysr@777 | 1920 | JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon, |
ysr@777 | 1921 | SATB_Q_FL_lock, |
iveresov@1546 | 1922 | G1SATBProcessCompletedThreshold, |
ysr@777 | 1923 | Shared_SATB_Q_lock); |
iveresov@1229 | 1924 | |
iveresov@1229 | 1925 | JavaThread::dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon, |
iveresov@1229 | 1926 | DirtyCardQ_FL_lock, |
iveresov@1546 | 1927 | concurrent_g1_refine()->yellow_zone(), |
iveresov@1546 | 1928 | concurrent_g1_refine()->red_zone(), |
iveresov@1229 | 1929 | Shared_DirtyCardQ_lock); |
iveresov@1229 | 1930 | |
iveresov@1051 | 1931 | if (G1DeferredRSUpdate) { |
iveresov@1051 | 1932 | dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon, |
iveresov@1051 | 1933 | DirtyCardQ_FL_lock, |
iveresov@1546 | 1934 | -1, // never trigger processing |
iveresov@1546 | 1935 | -1, // no limit on length |
iveresov@1051 | 1936 | Shared_DirtyCardQ_lock, |
iveresov@1051 | 1937 | &JavaThread::dirty_card_queue_set()); |
iveresov@1051 | 1938 | } |
johnc@2060 | 1939 | |
johnc@2060 | 1940 | // Initialize the card queue set used to hold cards containing |
johnc@2060 | 1941 | // references into the collection set. |
johnc@2060 | 1942 | _into_cset_dirty_card_queue_set.initialize(DirtyCardQ_CBL_mon, |
johnc@2060 | 1943 | DirtyCardQ_FL_lock, |
johnc@2060 | 1944 | -1, // never trigger processing |
johnc@2060 | 1945 | -1, // no limit on length |
johnc@2060 | 1946 | Shared_DirtyCardQ_lock, |
johnc@2060 | 1947 | &JavaThread::dirty_card_queue_set()); |
johnc@2060 | 1948 | |
ysr@777 | 1949 | // In case we're keeping closure specialization stats, initialize those |
ysr@777 | 1950 | // counts and that mechanism. |
ysr@777 | 1951 | SpecializationStats::clear(); |
ysr@777 | 1952 | |
ysr@777 | 1953 | _gc_alloc_region_list = NULL; |
ysr@777 | 1954 | |
ysr@777 | 1955 | // Do later initialization work for concurrent refinement. |
ysr@777 | 1956 | _cg1r->init(); |
ysr@777 | 1957 | |
tonyp@2715 | 1958 | // Here we allocate the dummy full region that is required by the |
tonyp@2715 | 1959 | // G1AllocRegion class. If we don't pass an address in the reserved |
tonyp@2715 | 1960 | // space here, lots of asserts fire. |
tonyp@2715 | 1961 | MemRegion mr(_g1_reserved.start(), HeapRegion::GrainWords); |
tonyp@2715 | 1962 | HeapRegion* dummy_region = new HeapRegion(_bot_shared, mr, true); |
tonyp@2715 | 1963 | // We'll re-use the same region whether the alloc region will |
tonyp@2715 | 1964 | // require BOT updates or not and, if it doesn't, then a non-young |
tonyp@2715 | 1965 | // region will complain that it cannot support allocations without |
tonyp@2715 | 1966 | // BOT updates. So we'll tag the dummy region as young to avoid that. |
tonyp@2715 | 1967 | dummy_region->set_young(); |
tonyp@2715 | 1968 | // Make sure it's full. |
tonyp@2715 | 1969 | dummy_region->set_top(dummy_region->end()); |
tonyp@2715 | 1970 | G1AllocRegion::setup(this, dummy_region); |
tonyp@2715 | 1971 | |
tonyp@2715 | 1972 | init_mutator_alloc_region(); |
tonyp@2715 | 1973 | |
ysr@777 | 1974 | return JNI_OK; |
ysr@777 | 1975 | } |
ysr@777 | 1976 | |
ysr@777 | 1977 | void G1CollectedHeap::ref_processing_init() { |
johnc@2316 | 1978 | // Reference processing in G1 currently works as follows: |
johnc@2316 | 1979 | // |
johnc@2316 | 1980 | // * There is only one reference processor instance that |
johnc@2316 | 1981 | // 'spans' the entire heap. It is created by the code |
johnc@2316 | 1982 | // below. |
johnc@2316 | 1983 | // * Reference discovery is not enabled during an incremental |
johnc@2316 | 1984 | // pause (see 6484982). |
johnc@2316 | 1985 | // * Discoverered refs are not enqueued nor are they processed |
johnc@2316 | 1986 | // during an incremental pause (see 6484982). |
johnc@2316 | 1987 | // * Reference discovery is enabled at initial marking. |
johnc@2316 | 1988 | // * Reference discovery is disabled and the discovered |
johnc@2316 | 1989 | // references processed etc during remarking. |
johnc@2316 | 1990 | // * Reference discovery is MT (see below). |
johnc@2316 | 1991 | // * Reference discovery requires a barrier (see below). |
johnc@2316 | 1992 | // * Reference processing is currently not MT (see 6608385). |
johnc@2316 | 1993 | // * A full GC enables (non-MT) reference discovery and |
johnc@2316 | 1994 | // processes any discovered references. |
johnc@2316 | 1995 | |
ysr@777 | 1996 | SharedHeap::ref_processing_init(); |
ysr@777 | 1997 | MemRegion mr = reserved_region(); |
ysr@2651 | 1998 | _ref_processor = |
ysr@2651 | 1999 | new ReferenceProcessor(mr, // span |
ysr@2651 | 2000 | ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing |
ysr@2651 | 2001 | (int) ParallelGCThreads, // degree of mt processing |
ysr@2651 | 2002 | ParallelGCThreads > 1 || ConcGCThreads > 1, // mt discovery |
ysr@2651 | 2003 | (int) MAX2(ParallelGCThreads, ConcGCThreads), // degree of mt discovery |
ysr@2651 | 2004 | false, // Reference discovery is not atomic |
ysr@2651 | 2005 | &_is_alive_closure, // is alive closure for efficiency |
ysr@2651 | 2006 | true); // Setting next fields of discovered |
ysr@2651 | 2007 | // lists requires a barrier. |
ysr@777 | 2008 | } |
ysr@777 | 2009 | |
ysr@777 | 2010 | size_t G1CollectedHeap::capacity() const { |
ysr@777 | 2011 | return _g1_committed.byte_size(); |
ysr@777 | 2012 | } |
ysr@777 | 2013 | |
johnc@2060 | 2014 | void G1CollectedHeap::iterate_dirty_card_closure(CardTableEntryClosure* cl, |
johnc@2060 | 2015 | DirtyCardQueue* into_cset_dcq, |
johnc@2060 | 2016 | bool concurrent, |
ysr@777 | 2017 | int worker_i) { |
johnc@1324 | 2018 | // Clean cards in the hot card cache |
johnc@2060 | 2019 | concurrent_g1_refine()->clean_up_cache(worker_i, g1_rem_set(), into_cset_dcq); |
johnc@1324 | 2020 | |
ysr@777 | 2021 | DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); |
ysr@777 | 2022 | int n_completed_buffers = 0; |
johnc@2060 | 2023 | while (dcqs.apply_closure_to_completed_buffer(cl, worker_i, 0, true)) { |
ysr@777 | 2024 | n_completed_buffers++; |
ysr@777 | 2025 | } |
ysr@777 | 2026 | g1_policy()->record_update_rs_processed_buffers(worker_i, |
ysr@777 | 2027 | (double) n_completed_buffers); |
ysr@777 | 2028 | dcqs.clear_n_completed_buffers(); |
ysr@777 | 2029 | assert(!dcqs.completed_buffers_exist_dirty(), "Completed buffers exist!"); |
ysr@777 | 2030 | } |
ysr@777 | 2031 | |
ysr@777 | 2032 | |
ysr@777 | 2033 | // Computes the sum of the storage used by the various regions. |
ysr@777 | 2034 | |
ysr@777 | 2035 | size_t G1CollectedHeap::used() const { |
ysr@1297 | 2036 | assert(Heap_lock->owner() != NULL, |
ysr@1297 | 2037 | "Should be owned on this thread's behalf."); |
ysr@777 | 2038 | size_t result = _summary_bytes_used; |
ysr@1280 | 2039 | // Read only once in case it is set to NULL concurrently |
tonyp@2715 | 2040 | HeapRegion* hr = _mutator_alloc_region.get(); |
ysr@1280 | 2041 | if (hr != NULL) |
ysr@1280 | 2042 | result += hr->used(); |
ysr@777 | 2043 | return result; |
ysr@777 | 2044 | } |
ysr@777 | 2045 | |
tonyp@1281 | 2046 | size_t G1CollectedHeap::used_unlocked() const { |
tonyp@1281 | 2047 | size_t result = _summary_bytes_used; |
tonyp@1281 | 2048 | return result; |
tonyp@1281 | 2049 | } |
tonyp@1281 | 2050 | |
ysr@777 | 2051 | class SumUsedClosure: public HeapRegionClosure { |
ysr@777 | 2052 | size_t _used; |
ysr@777 | 2053 | public: |
ysr@777 | 2054 | SumUsedClosure() : _used(0) {} |
ysr@777 | 2055 | bool doHeapRegion(HeapRegion* r) { |
ysr@777 | 2056 | if (!r->continuesHumongous()) { |
ysr@777 | 2057 | _used += r->used(); |
ysr@777 | 2058 | } |
ysr@777 | 2059 | return false; |
ysr@777 | 2060 | } |
ysr@777 | 2061 | size_t result() { return _used; } |
ysr@777 | 2062 | }; |
ysr@777 | 2063 | |
ysr@777 | 2064 | size_t G1CollectedHeap::recalculate_used() const { |
ysr@777 | 2065 | SumUsedClosure blk; |
ysr@777 | 2066 | _hrs->iterate(&blk); |
ysr@777 | 2067 | return blk.result(); |
ysr@777 | 2068 | } |
ysr@777 | 2069 | |
ysr@777 | 2070 | #ifndef PRODUCT |
ysr@777 | 2071 | class SumUsedRegionsClosure: public HeapRegionClosure { |
ysr@777 | 2072 | size_t _num; |
ysr@777 | 2073 | public: |
apetrusenko@1112 | 2074 | SumUsedRegionsClosure() : _num(0) {} |
ysr@777 | 2075 | bool doHeapRegion(HeapRegion* r) { |
ysr@777 | 2076 | if (r->continuesHumongous() || r->used() > 0 || r->is_gc_alloc_region()) { |
ysr@777 | 2077 | _num += 1; |
ysr@777 | 2078 | } |
ysr@777 | 2079 | return false; |
ysr@777 | 2080 | } |
ysr@777 | 2081 | size_t result() { return _num; } |
ysr@777 | 2082 | }; |
ysr@777 | 2083 | |
ysr@777 | 2084 | size_t G1CollectedHeap::recalculate_used_regions() const { |
ysr@777 | 2085 | SumUsedRegionsClosure blk; |
ysr@777 | 2086 | _hrs->iterate(&blk); |
ysr@777 | 2087 | return blk.result(); |
ysr@777 | 2088 | } |
ysr@777 | 2089 | #endif // PRODUCT |
ysr@777 | 2090 | |
ysr@777 | 2091 | size_t G1CollectedHeap::unsafe_max_alloc() { |
tonyp@2472 | 2092 | if (free_regions() > 0) return HeapRegion::GrainBytes; |
ysr@777 | 2093 | // otherwise, is there space in the current allocation region? |
ysr@777 | 2094 | |
ysr@777 | 2095 | // We need to store the current allocation region in a local variable |
ysr@777 | 2096 | // here. The problem is that this method doesn't take any locks and |
ysr@777 | 2097 | // there may be other threads which overwrite the current allocation |
ysr@777 | 2098 | // region field. attempt_allocation(), for example, sets it to NULL |
ysr@777 | 2099 | // and this can happen *after* the NULL check here but before the call |
ysr@777 | 2100 | // to free(), resulting in a SIGSEGV. Note that this doesn't appear |
ysr@777 | 2101 | // to be a problem in the optimized build, since the two loads of the |
ysr@777 | 2102 | // current allocation region field are optimized away. |
tonyp@2715 | 2103 | HeapRegion* hr = _mutator_alloc_region.get(); |
tonyp@2715 | 2104 | if (hr == NULL) { |
ysr@777 | 2105 | return 0; |
ysr@777 | 2106 | } |
tonyp@2715 | 2107 | return hr->free(); |
ysr@777 | 2108 | } |
ysr@777 | 2109 | |
tonyp@2011 | 2110 | bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) { |
tonyp@2011 | 2111 | return |
tonyp@2011 | 2112 | ((cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) || |
tonyp@2011 | 2113 | (cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent)); |
tonyp@2011 | 2114 | } |
tonyp@2011 | 2115 | |
tonyp@2372 | 2116 | void G1CollectedHeap::increment_full_collections_completed(bool concurrent) { |
tonyp@2011 | 2117 | MonitorLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag); |
tonyp@2011 | 2118 | |
tonyp@2372 | 2119 | // We assume that if concurrent == true, then the caller is a |
tonyp@2372 | 2120 | // concurrent thread that was joined the Suspendible Thread |
tonyp@2372 | 2121 | // Set. If there's ever a cheap way to check this, we should add an |
tonyp@2372 | 2122 | // assert here. |
tonyp@2372 | 2123 | |
tonyp@2011 | 2124 | // We have already incremented _total_full_collections at the start |
tonyp@2011 | 2125 | // of the GC, so total_full_collections() represents how many full |
tonyp@2011 | 2126 | // collections have been started. |
tonyp@2011 | 2127 | unsigned int full_collections_started = total_full_collections(); |
tonyp@2011 | 2128 | |
tonyp@2011 | 2129 | // Given that this method is called at the end of a Full GC or of a |
tonyp@2011 | 2130 | // concurrent cycle, and those can be nested (i.e., a Full GC can |
tonyp@2011 | 2131 | // interrupt a concurrent cycle), the number of full collections |
tonyp@2011 | 2132 | // completed should be either one (in the case where there was no |
tonyp@2011 | 2133 | // nesting) or two (when a Full GC interrupted a concurrent cycle) |
tonyp@2011 | 2134 | // behind the number of full collections started. |
tonyp@2011 | 2135 | |
tonyp@2011 | 2136 | // This is the case for the inner caller, i.e. a Full GC. |
tonyp@2372 | 2137 | assert(concurrent || |
tonyp@2011 | 2138 | (full_collections_started == _full_collections_completed + 1) || |
tonyp@2011 | 2139 | (full_collections_started == _full_collections_completed + 2), |
tonyp@2372 | 2140 | err_msg("for inner caller (Full GC): full_collections_started = %u " |
tonyp@2011 | 2141 | "is inconsistent with _full_collections_completed = %u", |
tonyp@2011 | 2142 | full_collections_started, _full_collections_completed)); |
tonyp@2011 | 2143 | |
tonyp@2011 | 2144 | // This is the case for the outer caller, i.e. the concurrent cycle. |
tonyp@2372 | 2145 | assert(!concurrent || |
tonyp@2011 | 2146 | (full_collections_started == _full_collections_completed + 1), |
tonyp@2372 | 2147 | err_msg("for outer caller (concurrent cycle): " |
tonyp@2372 | 2148 | "full_collections_started = %u " |
tonyp@2011 | 2149 | "is inconsistent with _full_collections_completed = %u", |
tonyp@2011 | 2150 | full_collections_started, _full_collections_completed)); |
tonyp@2011 | 2151 | |
tonyp@2011 | 2152 | _full_collections_completed += 1; |
tonyp@2011 | 2153 | |
johnc@2195 | 2154 | // We need to clear the "in_progress" flag in the CM thread before |
johnc@2195 | 2155 | // we wake up any waiters (especially when ExplicitInvokesConcurrent |
johnc@2195 | 2156 | // is set) so that if a waiter requests another System.gc() it doesn't |
johnc@2195 | 2157 | // incorrectly see that a marking cyle is still in progress. |
tonyp@2372 | 2158 | if (concurrent) { |
johnc@2195 | 2159 | _cmThread->clear_in_progress(); |
johnc@2195 | 2160 | } |
johnc@2195 | 2161 | |
tonyp@2011 | 2162 | // This notify_all() will ensure that a thread that called |
tonyp@2011 | 2163 | // System.gc() with (with ExplicitGCInvokesConcurrent set or not) |
tonyp@2011 | 2164 | // and it's waiting for a full GC to finish will be woken up. It is |
tonyp@2011 | 2165 | // waiting in VM_G1IncCollectionPause::doit_epilogue(). |
tonyp@2011 | 2166 | FullGCCount_lock->notify_all(); |
tonyp@2011 | 2167 | } |
tonyp@2011 | 2168 | |
ysr@777 | 2169 | void G1CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) { |
tonyp@2472 | 2170 | assert_at_safepoint(true /* should_be_vm_thread */); |
ysr@777 | 2171 | GCCauseSetter gcs(this, cause); |
ysr@777 | 2172 | switch (cause) { |
ysr@777 | 2173 | case GCCause::_heap_inspection: |
ysr@777 | 2174 | case GCCause::_heap_dump: { |
ysr@777 | 2175 | HandleMark hm; |
ysr@777 | 2176 | do_full_collection(false); // don't clear all soft refs |
ysr@777 | 2177 | break; |
ysr@777 | 2178 | } |
ysr@777 | 2179 | default: // XXX FIX ME |
ysr@777 | 2180 | ShouldNotReachHere(); // Unexpected use of this function |
ysr@777 | 2181 | } |
ysr@777 | 2182 | } |
ysr@777 | 2183 | |
ysr@1523 | 2184 | void G1CollectedHeap::collect(GCCause::Cause cause) { |
ysr@1523 | 2185 | // The caller doesn't have the Heap_lock |
ysr@1523 | 2186 | assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock"); |
ysr@1523 | 2187 | |
tonyp@2011 | 2188 | unsigned int gc_count_before; |
tonyp@2011 | 2189 | unsigned int full_gc_count_before; |
ysr@777 | 2190 | { |
ysr@1523 | 2191 | MutexLocker ml(Heap_lock); |
tonyp@2315 | 2192 | |
ysr@1523 | 2193 | // Read the GC count while holding the Heap_lock |
ysr@1523 | 2194 | gc_count_before = SharedHeap::heap()->total_collections(); |
tonyp@2011 | 2195 | full_gc_count_before = SharedHeap::heap()->total_full_collections(); |
tonyp@2011 | 2196 | } |
tonyp@2011 | 2197 | |
tonyp@2011 | 2198 | if (should_do_concurrent_full_gc(cause)) { |
tonyp@2011 | 2199 | // Schedule an initial-mark evacuation pause that will start a |
tonyp@2315 | 2200 | // concurrent cycle. We're setting word_size to 0 which means that |
tonyp@2315 | 2201 | // we are not requesting a post-GC allocation. |
tonyp@2011 | 2202 | VM_G1IncCollectionPause op(gc_count_before, |
tonyp@2315 | 2203 | 0, /* word_size */ |
tonyp@2315 | 2204 | true, /* should_initiate_conc_mark */ |
tonyp@2011 | 2205 | g1_policy()->max_pause_time_ms(), |
tonyp@2011 | 2206 | cause); |
tonyp@2011 | 2207 | VMThread::execute(&op); |
tonyp@2011 | 2208 | } else { |
tonyp@2011 | 2209 | if (cause == GCCause::_gc_locker |
tonyp@2011 | 2210 | DEBUG_ONLY(|| cause == GCCause::_scavenge_alot)) { |
tonyp@2011 | 2211 | |
tonyp@2315 | 2212 | // Schedule a standard evacuation pause. We're setting word_size |
tonyp@2315 | 2213 | // to 0 which means that we are not requesting a post-GC allocation. |
tonyp@2011 | 2214 | VM_G1IncCollectionPause op(gc_count_before, |
tonyp@2315 | 2215 | 0, /* word_size */ |
tonyp@2011 | 2216 | false, /* should_initiate_conc_mark */ |
tonyp@2011 | 2217 | g1_policy()->max_pause_time_ms(), |
tonyp@2011 | 2218 | cause); |
ysr@1523 | 2219 | VMThread::execute(&op); |
tonyp@2011 | 2220 | } else { |
tonyp@2011 | 2221 | // Schedule a Full GC. |
tonyp@2011 | 2222 | VM_G1CollectFull op(gc_count_before, full_gc_count_before, cause); |
ysr@1523 | 2223 | VMThread::execute(&op); |
ysr@1523 | 2224 | } |
ysr@777 | 2225 | } |
ysr@777 | 2226 | } |
ysr@777 | 2227 | |
ysr@777 | 2228 | bool G1CollectedHeap::is_in(const void* p) const { |
ysr@777 | 2229 | if (_g1_committed.contains(p)) { |
ysr@777 | 2230 | HeapRegion* hr = _hrs->addr_to_region(p); |
ysr@777 | 2231 | return hr->is_in(p); |
ysr@777 | 2232 | } else { |
ysr@777 | 2233 | return _perm_gen->as_gen()->is_in(p); |
ysr@777 | 2234 | } |
ysr@777 | 2235 | } |
ysr@777 | 2236 | |
ysr@777 | 2237 | // Iteration functions. |
ysr@777 | 2238 | |
ysr@777 | 2239 | // Iterates an OopClosure over all ref-containing fields of objects |
ysr@777 | 2240 | // within a HeapRegion. |
ysr@777 | 2241 | |
ysr@777 | 2242 | class IterateOopClosureRegionClosure: public HeapRegionClosure { |
ysr@777 | 2243 | MemRegion _mr; |
ysr@777 | 2244 | OopClosure* _cl; |
ysr@777 | 2245 | public: |
ysr@777 | 2246 | IterateOopClosureRegionClosure(MemRegion mr, OopClosure* cl) |
ysr@777 | 2247 | : _mr(mr), _cl(cl) {} |
ysr@777 | 2248 | bool doHeapRegion(HeapRegion* r) { |
ysr@777 | 2249 | if (! r->continuesHumongous()) { |
ysr@777 | 2250 | r->oop_iterate(_cl); |
ysr@777 | 2251 | } |
ysr@777 | 2252 | return false; |
ysr@777 | 2253 | } |
ysr@777 | 2254 | }; |
ysr@777 | 2255 | |
iveresov@1113 | 2256 | void G1CollectedHeap::oop_iterate(OopClosure* cl, bool do_perm) { |
ysr@777 | 2257 | IterateOopClosureRegionClosure blk(_g1_committed, cl); |
ysr@777 | 2258 | _hrs->iterate(&blk); |
iveresov@1113 | 2259 | if (do_perm) { |
iveresov@1113 | 2260 | perm_gen()->oop_iterate(cl); |
iveresov@1113 | 2261 | } |
ysr@777 | 2262 | } |
ysr@777 | 2263 | |
iveresov@1113 | 2264 | void G1CollectedHeap::oop_iterate(MemRegion mr, OopClosure* cl, bool do_perm) { |
ysr@777 | 2265 | IterateOopClosureRegionClosure blk(mr, cl); |
ysr@777 | 2266 | _hrs->iterate(&blk); |
iveresov@1113 | 2267 | if (do_perm) { |
iveresov@1113 | 2268 | perm_gen()->oop_iterate(cl); |
iveresov@1113 | 2269 | } |
ysr@777 | 2270 | } |
ysr@777 | 2271 | |
ysr@777 | 2272 | // Iterates an ObjectClosure over all objects within a HeapRegion. |
ysr@777 | 2273 | |
ysr@777 | 2274 | class IterateObjectClosureRegionClosure: public HeapRegionClosure { |
ysr@777 | 2275 | ObjectClosure* _cl; |
ysr@777 | 2276 | public: |
ysr@777 | 2277 | IterateObjectClosureRegionClosure(ObjectClosure* cl) : _cl(cl) {} |
ysr@777 | 2278 | bool doHeapRegion(HeapRegion* r) { |
ysr@777 | 2279 | if (! r->continuesHumongous()) { |
ysr@777 | 2280 | r->object_iterate(_cl); |
ysr@777 | 2281 | } |
ysr@777 | 2282 | return false; |
ysr@777 | 2283 | } |
ysr@777 | 2284 | }; |
ysr@777 | 2285 | |
iveresov@1113 | 2286 | void G1CollectedHeap::object_iterate(ObjectClosure* cl, bool do_perm) { |
ysr@777 | 2287 | IterateObjectClosureRegionClosure blk(cl); |
ysr@777 | 2288 | _hrs->iterate(&blk); |
iveresov@1113 | 2289 | if (do_perm) { |
iveresov@1113 | 2290 | perm_gen()->object_iterate(cl); |
iveresov@1113 | 2291 | } |
ysr@777 | 2292 | } |
ysr@777 | 2293 | |
ysr@777 | 2294 | void G1CollectedHeap::object_iterate_since_last_GC(ObjectClosure* cl) { |
ysr@777 | 2295 | // FIXME: is this right? |
ysr@777 | 2296 | guarantee(false, "object_iterate_since_last_GC not supported by G1 heap"); |
ysr@777 | 2297 | } |
ysr@777 | 2298 | |
ysr@777 | 2299 | // Calls a SpaceClosure on a HeapRegion. |
ysr@777 | 2300 | |
ysr@777 | 2301 | class SpaceClosureRegionClosure: public HeapRegionClosure { |
ysr@777 | 2302 | SpaceClosure* _cl; |
ysr@777 | 2303 | public: |
ysr@777 | 2304 | SpaceClosureRegionClosure(SpaceClosure* cl) : _cl(cl) {} |
ysr@777 | 2305 | bool doHeapRegion(HeapRegion* r) { |
ysr@777 | 2306 | _cl->do_space(r); |
ysr@777 | 2307 | return false; |
ysr@777 | 2308 | } |
ysr@777 | 2309 | }; |
ysr@777 | 2310 | |
ysr@777 | 2311 | void G1CollectedHeap::space_iterate(SpaceClosure* cl) { |
ysr@777 | 2312 | SpaceClosureRegionClosure blk(cl); |
ysr@777 | 2313 | _hrs->iterate(&blk); |
ysr@777 | 2314 | } |
ysr@777 | 2315 | |
ysr@777 | 2316 | void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) { |
ysr@777 | 2317 | _hrs->iterate(cl); |
ysr@777 | 2318 | } |
ysr@777 | 2319 | |
ysr@777 | 2320 | void G1CollectedHeap::heap_region_iterate_from(HeapRegion* r, |
ysr@777 | 2321 | HeapRegionClosure* cl) { |
ysr@777 | 2322 | _hrs->iterate_from(r, cl); |
ysr@777 | 2323 | } |
ysr@777 | 2324 | |
ysr@777 | 2325 | void |
ysr@777 | 2326 | G1CollectedHeap::heap_region_iterate_from(int idx, HeapRegionClosure* cl) { |
ysr@777 | 2327 | _hrs->iterate_from(idx, cl); |
ysr@777 | 2328 | } |
ysr@777 | 2329 | |
ysr@777 | 2330 | HeapRegion* G1CollectedHeap::region_at(size_t idx) { return _hrs->at(idx); } |
ysr@777 | 2331 | |
ysr@777 | 2332 | void |
ysr@777 | 2333 | G1CollectedHeap::heap_region_par_iterate_chunked(HeapRegionClosure* cl, |
ysr@777 | 2334 | int worker, |
ysr@777 | 2335 | jint claim_value) { |
tonyp@790 | 2336 | const size_t regions = n_regions(); |
jmasa@2188 | 2337 | const size_t worker_num = (G1CollectedHeap::use_parallel_gc_threads() ? ParallelGCThreads : 1); |
tonyp@790 | 2338 | // try to spread out the starting points of the workers |
tonyp@790 | 2339 | const size_t start_index = regions / worker_num * (size_t) worker; |
tonyp@790 | 2340 | |
tonyp@790 | 2341 | // each worker will actually look at all regions |
tonyp@790 | 2342 | for (size_t count = 0; count < regions; ++count) { |
tonyp@790 | 2343 | const size_t index = (start_index + count) % regions; |
tonyp@790 | 2344 | assert(0 <= index && index < regions, "sanity"); |
tonyp@790 | 2345 | HeapRegion* r = region_at(index); |
tonyp@790 | 2346 | // we'll ignore "continues humongous" regions (we'll process them |
tonyp@790 | 2347 | // when we come across their corresponding "start humongous" |
tonyp@790 | 2348 | // region) and regions already claimed |
tonyp@790 | 2349 | if (r->claim_value() == claim_value || r->continuesHumongous()) { |
tonyp@790 | 2350 | continue; |
tonyp@790 | 2351 | } |
tonyp@790 | 2352 | // OK, try to claim it |
ysr@777 | 2353 | if (r->claimHeapRegion(claim_value)) { |
tonyp@790 | 2354 | // success! |
tonyp@790 | 2355 | assert(!r->continuesHumongous(), "sanity"); |
tonyp@790 | 2356 | if (r->startsHumongous()) { |
tonyp@790 | 2357 | // If the region is "starts humongous" we'll iterate over its |
tonyp@790 | 2358 | // "continues humongous" first; in fact we'll do them |
tonyp@790 | 2359 | // first. The order is important. In on case, calling the |
tonyp@790 | 2360 | // closure on the "starts humongous" region might de-allocate |
tonyp@790 | 2361 | // and clear all its "continues humongous" regions and, as a |
tonyp@790 | 2362 | // result, we might end up processing them twice. So, we'll do |
tonyp@790 | 2363 | // them first (notice: most closures will ignore them anyway) and |
tonyp@790 | 2364 | // then we'll do the "starts humongous" region. |
tonyp@790 | 2365 | for (size_t ch_index = index + 1; ch_index < regions; ++ch_index) { |
tonyp@790 | 2366 | HeapRegion* chr = region_at(ch_index); |
tonyp@790 | 2367 | |
tonyp@790 | 2368 | // if the region has already been claimed or it's not |
tonyp@790 | 2369 | // "continues humongous" we're done |
tonyp@790 | 2370 | if (chr->claim_value() == claim_value || |
tonyp@790 | 2371 | !chr->continuesHumongous()) { |
tonyp@790 | 2372 | break; |
tonyp@790 | 2373 | } |
tonyp@790 | 2374 | |
tonyp@790 | 2375 | // Noone should have claimed it directly. We can given |
tonyp@790 | 2376 | // that we claimed its "starts humongous" region. |
tonyp@790 | 2377 | assert(chr->claim_value() != claim_value, "sanity"); |
tonyp@790 | 2378 | assert(chr->humongous_start_region() == r, "sanity"); |
tonyp@790 | 2379 | |
tonyp@790 | 2380 | if (chr->claimHeapRegion(claim_value)) { |
tonyp@790 | 2381 | // we should always be able to claim it; noone else should |
tonyp@790 | 2382 | // be trying to claim this region |
tonyp@790 | 2383 | |
tonyp@790 | 2384 | bool res2 = cl->doHeapRegion(chr); |
tonyp@790 | 2385 | assert(!res2, "Should not abort"); |
tonyp@790 | 2386 | |
tonyp@790 | 2387 | // Right now, this holds (i.e., no closure that actually |
tonyp@790 | 2388 | // does something with "continues humongous" regions |
tonyp@790 | 2389 | // clears them). We might have to weaken it in the future, |
tonyp@790 | 2390 | // but let's leave these two asserts here for extra safety. |
tonyp@790 | 2391 | assert(chr->continuesHumongous(), "should still be the case"); |
tonyp@790 | 2392 | assert(chr->humongous_start_region() == r, "sanity"); |
tonyp@790 | 2393 | } else { |
tonyp@790 | 2394 | guarantee(false, "we should not reach here"); |
tonyp@790 | 2395 | } |
tonyp@790 | 2396 | } |
tonyp@790 | 2397 | } |
tonyp@790 | 2398 | |
tonyp@790 | 2399 | assert(!r->continuesHumongous(), "sanity"); |
tonyp@790 | 2400 | bool res = cl->doHeapRegion(r); |
tonyp@790 | 2401 | assert(!res, "Should not abort"); |
tonyp@790 | 2402 | } |
tonyp@790 | 2403 | } |
tonyp@790 | 2404 | } |
tonyp@790 | 2405 | |
tonyp@825 | 2406 | class ResetClaimValuesClosure: public HeapRegionClosure { |
tonyp@825 | 2407 | public: |
tonyp@825 | 2408 | bool doHeapRegion(HeapRegion* r) { |
tonyp@825 | 2409 | r->set_claim_value(HeapRegion::InitialClaimValue); |
tonyp@825 | 2410 | return false; |
tonyp@825 | 2411 | } |
tonyp@825 | 2412 | }; |
tonyp@825 | 2413 | |
tonyp@825 | 2414 | void |
tonyp@825 | 2415 | G1CollectedHeap::reset_heap_region_claim_values() { |
tonyp@825 | 2416 | ResetClaimValuesClosure blk; |
tonyp@825 | 2417 | heap_region_iterate(&blk); |
tonyp@825 | 2418 | } |
tonyp@825 | 2419 | |
tonyp@790 | 2420 | #ifdef ASSERT |
tonyp@790 | 2421 | // This checks whether all regions in the heap have the correct claim |
tonyp@790 | 2422 | // value. I also piggy-backed on this a check to ensure that the |
tonyp@790 | 2423 | // humongous_start_region() information on "continues humongous" |
tonyp@790 | 2424 | // regions is correct. |
tonyp@790 | 2425 | |
tonyp@790 | 2426 | class CheckClaimValuesClosure : public HeapRegionClosure { |
tonyp@790 | 2427 | private: |
tonyp@790 | 2428 | jint _claim_value; |
tonyp@790 | 2429 | size_t _failures; |
tonyp@790 | 2430 | HeapRegion* _sh_region; |
tonyp@790 | 2431 | public: |
tonyp@790 | 2432 | CheckClaimValuesClosure(jint claim_value) : |
tonyp@790 | 2433 | _claim_value(claim_value), _failures(0), _sh_region(NULL) { } |
tonyp@790 | 2434 | bool doHeapRegion(HeapRegion* r) { |
tonyp@790 | 2435 | if (r->claim_value() != _claim_value) { |
tonyp@790 | 2436 | gclog_or_tty->print_cr("Region ["PTR_FORMAT","PTR_FORMAT"), " |
tonyp@790 | 2437 | "claim value = %d, should be %d", |
tonyp@790 | 2438 | r->bottom(), r->end(), r->claim_value(), |
tonyp@790 | 2439 | _claim_value); |
tonyp@790 | 2440 | ++_failures; |
tonyp@790 | 2441 | } |
tonyp@790 | 2442 | if (!r->isHumongous()) { |
tonyp@790 | 2443 | _sh_region = NULL; |
tonyp@790 | 2444 | } else if (r->startsHumongous()) { |
tonyp@790 | 2445 | _sh_region = r; |
tonyp@790 | 2446 | } else if (r->continuesHumongous()) { |
tonyp@790 | 2447 | if (r->humongous_start_region() != _sh_region) { |
tonyp@790 | 2448 | gclog_or_tty->print_cr("Region ["PTR_FORMAT","PTR_FORMAT"), " |
tonyp@790 | 2449 | "HS = "PTR_FORMAT", should be "PTR_FORMAT, |
tonyp@790 | 2450 | r->bottom(), r->end(), |
tonyp@790 | 2451 | r->humongous_start_region(), |
tonyp@790 | 2452 | _sh_region); |
tonyp@790 | 2453 | ++_failures; |
ysr@777 | 2454 | } |
ysr@777 | 2455 | } |
tonyp@790 | 2456 | return false; |
tonyp@790 | 2457 | } |
tonyp@790 | 2458 | size_t failures() { |
tonyp@790 | 2459 | return _failures; |
tonyp@790 | 2460 | } |
tonyp@790 | 2461 | }; |
tonyp@790 | 2462 | |
tonyp@790 | 2463 | bool G1CollectedHeap::check_heap_region_claim_values(jint claim_value) { |
tonyp@790 | 2464 | CheckClaimValuesClosure cl(claim_value); |
tonyp@790 | 2465 | heap_region_iterate(&cl); |
tonyp@790 | 2466 | return cl.failures() == 0; |
tonyp@790 | 2467 | } |
tonyp@790 | 2468 | #endif // ASSERT |
ysr@777 | 2469 | |
ysr@777 | 2470 | void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) { |
ysr@777 | 2471 | HeapRegion* r = g1_policy()->collection_set(); |
ysr@777 | 2472 | while (r != NULL) { |
ysr@777 | 2473 | HeapRegion* next = r->next_in_collection_set(); |
ysr@777 | 2474 | if (cl->doHeapRegion(r)) { |
ysr@777 | 2475 | cl->incomplete(); |
ysr@777 | 2476 | return; |
ysr@777 | 2477 | } |
ysr@777 | 2478 | r = next; |
ysr@777 | 2479 | } |
ysr@777 | 2480 | } |
ysr@777 | 2481 | |
ysr@777 | 2482 | void G1CollectedHeap::collection_set_iterate_from(HeapRegion* r, |
ysr@777 | 2483 | HeapRegionClosure *cl) { |
tonyp@2011 | 2484 | if (r == NULL) { |
tonyp@2011 | 2485 | // The CSet is empty so there's nothing to do. |
tonyp@2011 | 2486 | return; |
tonyp@2011 | 2487 | } |
tonyp@2011 | 2488 | |
ysr@777 | 2489 | assert(r->in_collection_set(), |
ysr@777 | 2490 | "Start region must be a member of the collection set."); |
ysr@777 | 2491 | HeapRegion* cur = r; |
ysr@777 | 2492 | while (cur != NULL) { |
ysr@777 | 2493 | HeapRegion* next = cur->next_in_collection_set(); |
ysr@777 | 2494 | if (cl->doHeapRegion(cur) && false) { |
ysr@777 | 2495 | cl->incomplete(); |
ysr@777 | 2496 | return; |
ysr@777 | 2497 | } |
ysr@777 | 2498 | cur = next; |
ysr@777 | 2499 | } |
ysr@777 | 2500 | cur = g1_policy()->collection_set(); |
ysr@777 | 2501 | while (cur != r) { |
ysr@777 | 2502 | HeapRegion* next = cur->next_in_collection_set(); |
ysr@777 | 2503 | if (cl->doHeapRegion(cur) && false) { |
ysr@777 | 2504 | cl->incomplete(); |
ysr@777 | 2505 | return; |
ysr@777 | 2506 | } |
ysr@777 | 2507 | cur = next; |
ysr@777 | 2508 | } |
ysr@777 | 2509 | } |
ysr@777 | 2510 | |
ysr@777 | 2511 | CompactibleSpace* G1CollectedHeap::first_compactible_space() { |
ysr@777 | 2512 | return _hrs->length() > 0 ? _hrs->at(0) : NULL; |
ysr@777 | 2513 | } |
ysr@777 | 2514 | |
ysr@777 | 2515 | |
ysr@777 | 2516 | Space* G1CollectedHeap::space_containing(const void* addr) const { |
ysr@777 | 2517 | Space* res = heap_region_containing(addr); |
ysr@777 | 2518 | if (res == NULL) |
ysr@777 | 2519 | res = perm_gen()->space_containing(addr); |
ysr@777 | 2520 | return res; |
ysr@777 | 2521 | } |
ysr@777 | 2522 | |
ysr@777 | 2523 | HeapWord* G1CollectedHeap::block_start(const void* addr) const { |
ysr@777 | 2524 | Space* sp = space_containing(addr); |
ysr@777 | 2525 | if (sp != NULL) { |
ysr@777 | 2526 | return sp->block_start(addr); |
ysr@777 | 2527 | } |
ysr@777 | 2528 | return NULL; |
ysr@777 | 2529 | } |
ysr@777 | 2530 | |
ysr@777 | 2531 | size_t G1CollectedHeap::block_size(const HeapWord* addr) const { |
ysr@777 | 2532 | Space* sp = space_containing(addr); |
ysr@777 | 2533 | assert(sp != NULL, "block_size of address outside of heap"); |
ysr@777 | 2534 | return sp->block_size(addr); |
ysr@777 | 2535 | } |
ysr@777 | 2536 | |
ysr@777 | 2537 | bool G1CollectedHeap::block_is_obj(const HeapWord* addr) const { |
ysr@777 | 2538 | Space* sp = space_containing(addr); |
ysr@777 | 2539 | return sp->block_is_obj(addr); |
ysr@777 | 2540 | } |
ysr@777 | 2541 | |
ysr@777 | 2542 | bool G1CollectedHeap::supports_tlab_allocation() const { |
ysr@777 | 2543 | return true; |
ysr@777 | 2544 | } |
ysr@777 | 2545 | |
ysr@777 | 2546 | size_t G1CollectedHeap::tlab_capacity(Thread* ignored) const { |
ysr@777 | 2547 | return HeapRegion::GrainBytes; |
ysr@777 | 2548 | } |
ysr@777 | 2549 | |
ysr@777 | 2550 | size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const { |
ysr@777 | 2551 | // Return the remaining space in the cur alloc region, but not less than |
ysr@777 | 2552 | // the min TLAB size. |
johnc@1748 | 2553 | |
johnc@1748 | 2554 | // Also, this value can be at most the humongous object threshold, |
johnc@1748 | 2555 | // since we can't allow tlabs to grow big enough to accomodate |
johnc@1748 | 2556 | // humongous objects. |
johnc@1748 | 2557 | |
tonyp@2715 | 2558 | HeapRegion* hr = _mutator_alloc_region.get(); |
johnc@1748 | 2559 | size_t max_tlab_size = _humongous_object_threshold_in_words * wordSize; |
tonyp@2715 | 2560 | if (hr == NULL) { |
johnc@1748 | 2561 | return max_tlab_size; |
ysr@777 | 2562 | } else { |
tonyp@2715 | 2563 | return MIN2(MAX2(hr->free(), (size_t) MinTLABSize), max_tlab_size); |
ysr@777 | 2564 | } |
ysr@777 | 2565 | } |
ysr@777 | 2566 | |
ysr@777 | 2567 | size_t G1CollectedHeap::large_typearray_limit() { |
ysr@777 | 2568 | // FIXME |
ysr@777 | 2569 | return HeapRegion::GrainBytes/HeapWordSize; |
ysr@777 | 2570 | } |
ysr@777 | 2571 | |
ysr@777 | 2572 | size_t G1CollectedHeap::max_capacity() const { |
johnc@2504 | 2573 | return _g1_reserved.byte_size(); |
ysr@777 | 2574 | } |
ysr@777 | 2575 | |
ysr@777 | 2576 | jlong G1CollectedHeap::millis_since_last_gc() { |
ysr@777 | 2577 | // assert(false, "NYI"); |
ysr@777 | 2578 | return 0; |
ysr@777 | 2579 | } |
ysr@777 | 2580 | |
ysr@777 | 2581 | void G1CollectedHeap::prepare_for_verify() { |
ysr@777 | 2582 | if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) { |
ysr@777 | 2583 | ensure_parsability(false); |
ysr@777 | 2584 | } |
ysr@777 | 2585 | g1_rem_set()->prepare_for_verify(); |
ysr@777 | 2586 | } |
ysr@777 | 2587 | |
ysr@777 | 2588 | class VerifyLivenessOopClosure: public OopClosure { |
ysr@777 | 2589 | G1CollectedHeap* g1h; |
ysr@777 | 2590 | public: |
ysr@777 | 2591 | VerifyLivenessOopClosure(G1CollectedHeap* _g1h) { |
ysr@777 | 2592 | g1h = _g1h; |
ysr@777 | 2593 | } |
ysr@1280 | 2594 | void do_oop(narrowOop *p) { do_oop_work(p); } |
ysr@1280 | 2595 | void do_oop( oop *p) { do_oop_work(p); } |
ysr@1280 | 2596 | |
ysr@1280 | 2597 | template <class T> void do_oop_work(T *p) { |
ysr@1280 | 2598 | oop obj = oopDesc::load_decode_heap_oop(p); |
ysr@1280 | 2599 | guarantee(obj == NULL || !g1h->is_obj_dead(obj), |
ysr@1280 | 2600 | "Dead object referenced by a not dead object"); |
ysr@777 | 2601 | } |
ysr@777 | 2602 | }; |
ysr@777 | 2603 | |
ysr@777 | 2604 | class VerifyObjsInRegionClosure: public ObjectClosure { |
tonyp@1246 | 2605 | private: |
ysr@777 | 2606 | G1CollectedHeap* _g1h; |
ysr@777 | 2607 | size_t _live_bytes; |
ysr@777 | 2608 | HeapRegion *_hr; |
tonyp@1246 | 2609 | bool _use_prev_marking; |
ysr@777 | 2610 | public: |
tonyp@1246 | 2611 | // use_prev_marking == true -> use "prev" marking information, |
tonyp@1246 | 2612 | // use_prev_marking == false -> use "next" marking information |
tonyp@1246 | 2613 | VerifyObjsInRegionClosure(HeapRegion *hr, bool use_prev_marking) |
tonyp@1246 | 2614 | : _live_bytes(0), _hr(hr), _use_prev_marking(use_prev_marking) { |
ysr@777 | 2615 | _g1h = G1CollectedHeap::heap(); |
ysr@777 | 2616 | } |
ysr@777 | 2617 | void do_object(oop o) { |
ysr@777 | 2618 | VerifyLivenessOopClosure isLive(_g1h); |
ysr@777 | 2619 | assert(o != NULL, "Huh?"); |
tonyp@1246 | 2620 | if (!_g1h->is_obj_dead_cond(o, _use_prev_marking)) { |
ysr@777 | 2621 | o->oop_iterate(&isLive); |
johnc@1824 | 2622 | if (!_hr->obj_allocated_since_prev_marking(o)) { |
johnc@1824 | 2623 | size_t obj_size = o->size(); // Make sure we don't overflow |
johnc@1824 | 2624 | _live_bytes += (obj_size * HeapWordSize); |
johnc@1824 | 2625 | } |
ysr@777 | 2626 | } |
ysr@777 | 2627 | } |
ysr@777 | 2628 | size_t live_bytes() { return _live_bytes; } |
ysr@777 | 2629 | }; |
ysr@777 | 2630 | |
ysr@777 | 2631 | class PrintObjsInRegionClosure : public ObjectClosure { |
ysr@777 | 2632 | HeapRegion *_hr; |
ysr@777 | 2633 | G1CollectedHeap *_g1; |
ysr@777 | 2634 | public: |
ysr@777 | 2635 | PrintObjsInRegionClosure(HeapRegion *hr) : _hr(hr) { |
ysr@777 | 2636 | _g1 = G1CollectedHeap::heap(); |
ysr@777 | 2637 | }; |
ysr@777 | 2638 | |
ysr@777 | 2639 | void do_object(oop o) { |
ysr@777 | 2640 | if (o != NULL) { |
ysr@777 | 2641 | HeapWord *start = (HeapWord *) o; |
ysr@777 | 2642 | size_t word_sz = o->size(); |
ysr@777 | 2643 | gclog_or_tty->print("\nPrinting obj "PTR_FORMAT" of size " SIZE_FORMAT |
ysr@777 | 2644 | " isMarkedPrev %d isMarkedNext %d isAllocSince %d\n", |
ysr@777 | 2645 | (void*) o, word_sz, |
ysr@777 | 2646 | _g1->isMarkedPrev(o), |
ysr@777 | 2647 | _g1->isMarkedNext(o), |
ysr@777 | 2648 | _hr->obj_allocated_since_prev_marking(o)); |
ysr@777 | 2649 | HeapWord *end = start + word_sz; |
ysr@777 | 2650 | HeapWord *cur; |
ysr@777 | 2651 | int *val; |
ysr@777 | 2652 | for (cur = start; cur < end; cur++) { |
ysr@777 | 2653 | val = (int *) cur; |
ysr@777 | 2654 | gclog_or_tty->print("\t "PTR_FORMAT":"PTR_FORMAT"\n", val, *val); |
ysr@777 | 2655 | } |
ysr@777 | 2656 | } |
ysr@777 | 2657 | } |
ysr@777 | 2658 | }; |
ysr@777 | 2659 | |
ysr@777 | 2660 | class VerifyRegionClosure: public HeapRegionClosure { |
tonyp@1246 | 2661 | private: |
ysr@777 | 2662 | bool _allow_dirty; |
tonyp@825 | 2663 | bool _par; |
tonyp@1246 | 2664 | bool _use_prev_marking; |
tonyp@1455 | 2665 | bool _failures; |
tonyp@1246 | 2666 | public: |
tonyp@1246 | 2667 | // use_prev_marking == true -> use "prev" marking information, |
tonyp@1246 | 2668 | // use_prev_marking == false -> use "next" marking information |
tonyp@1246 | 2669 | VerifyRegionClosure(bool allow_dirty, bool par, bool use_prev_marking) |
ysr@1280 | 2670 | : _allow_dirty(allow_dirty), |
ysr@1280 | 2671 | _par(par), |
tonyp@1455 | 2672 | _use_prev_marking(use_prev_marking), |
tonyp@1455 | 2673 | _failures(false) {} |
tonyp@1455 | 2674 | |
tonyp@1455 | 2675 | bool failures() { |
tonyp@1455 | 2676 | return _failures; |
tonyp@1455 | 2677 | } |
ysr@1280 | 2678 | |
ysr@777 | 2679 | bool doHeapRegion(HeapRegion* r) { |
tonyp@825 | 2680 | guarantee(_par || r->claim_value() == HeapRegion::InitialClaimValue, |
tonyp@825 | 2681 | "Should be unclaimed at verify points."); |
iveresov@1072 | 2682 | if (!r->continuesHumongous()) { |
tonyp@1455 | 2683 | bool failures = false; |
tonyp@1455 | 2684 | r->verify(_allow_dirty, _use_prev_marking, &failures); |
tonyp@1455 | 2685 | if (failures) { |
tonyp@1455 | 2686 | _failures = true; |
tonyp@1455 | 2687 | } else { |
tonyp@1455 | 2688 | VerifyObjsInRegionClosure not_dead_yet_cl(r, _use_prev_marking); |
tonyp@1455 | 2689 | r->object_iterate(¬_dead_yet_cl); |
tonyp@1455 | 2690 | if (r->max_live_bytes() < not_dead_yet_cl.live_bytes()) { |
tonyp@1455 | 2691 | gclog_or_tty->print_cr("["PTR_FORMAT","PTR_FORMAT"] " |
tonyp@1455 | 2692 | "max_live_bytes "SIZE_FORMAT" " |
tonyp@1455 | 2693 | "< calculated "SIZE_FORMAT, |
tonyp@1455 | 2694 | r->bottom(), r->end(), |
tonyp@1455 | 2695 | r->max_live_bytes(), |
tonyp@1455 | 2696 | not_dead_yet_cl.live_bytes()); |
tonyp@1455 | 2697 | _failures = true; |
tonyp@1455 | 2698 | } |
tonyp@1455 | 2699 | } |
ysr@777 | 2700 | } |
tonyp@1455 | 2701 | return false; // stop the region iteration if we hit a failure |
ysr@777 | 2702 | } |
ysr@777 | 2703 | }; |
ysr@777 | 2704 | |
ysr@777 | 2705 | class VerifyRootsClosure: public OopsInGenClosure { |
ysr@777 | 2706 | private: |
ysr@777 | 2707 | G1CollectedHeap* _g1h; |
tonyp@1455 | 2708 | bool _use_prev_marking; |
ysr@777 | 2709 | bool _failures; |
ysr@777 | 2710 | public: |
tonyp@1246 | 2711 | // use_prev_marking == true -> use "prev" marking information, |
tonyp@1246 | 2712 | // use_prev_marking == false -> use "next" marking information |
tonyp@1246 | 2713 | VerifyRootsClosure(bool use_prev_marking) : |
ysr@1280 | 2714 | _g1h(G1CollectedHeap::heap()), |
tonyp@1455 | 2715 | _use_prev_marking(use_prev_marking), |
tonyp@1455 | 2716 | _failures(false) { } |
ysr@777 | 2717 | |
ysr@777 | 2718 | bool failures() { return _failures; } |
ysr@777 | 2719 | |
ysr@1280 | 2720 | template <class T> void do_oop_nv(T* p) { |
ysr@1280 | 2721 | T heap_oop = oopDesc::load_heap_oop(p); |
ysr@1280 | 2722 | if (!oopDesc::is_null(heap_oop)) { |
ysr@1280 | 2723 | oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); |
tonyp@1246 | 2724 | if (_g1h->is_obj_dead_cond(obj, _use_prev_marking)) { |
ysr@777 | 2725 | gclog_or_tty->print_cr("Root location "PTR_FORMAT" " |
tonyp@1455 | 2726 | "points to dead obj "PTR_FORMAT, p, (void*) obj); |
ysr@777 | 2727 | obj->print_on(gclog_or_tty); |
ysr@777 | 2728 | _failures = true; |
ysr@777 | 2729 | } |
ysr@777 | 2730 | } |
ysr@777 | 2731 | } |
ysr@1280 | 2732 | |
ysr@1280 | 2733 | void do_oop(oop* p) { do_oop_nv(p); } |
ysr@1280 | 2734 | void do_oop(narrowOop* p) { do_oop_nv(p); } |
ysr@777 | 2735 | }; |
ysr@777 | 2736 | |
tonyp@825 | 2737 | // This is the task used for parallel heap verification. |
tonyp@825 | 2738 | |
tonyp@825 | 2739 | class G1ParVerifyTask: public AbstractGangTask { |
tonyp@825 | 2740 | private: |
tonyp@825 | 2741 | G1CollectedHeap* _g1h; |
tonyp@825 | 2742 | bool _allow_dirty; |
tonyp@1246 | 2743 | bool _use_prev_marking; |
tonyp@1455 | 2744 | bool _failures; |
tonyp@825 | 2745 | |
tonyp@825 | 2746 | public: |
tonyp@1246 | 2747 | // use_prev_marking == true -> use "prev" marking information, |
tonyp@1246 | 2748 | // use_prev_marking == false -> use "next" marking information |
tonyp@1246 | 2749 | G1ParVerifyTask(G1CollectedHeap* g1h, bool allow_dirty, |
tonyp@1246 | 2750 | bool use_prev_marking) : |
tonyp@825 | 2751 | AbstractGangTask("Parallel verify task"), |
ysr@1280 | 2752 | _g1h(g1h), |
ysr@1280 | 2753 | _allow_dirty(allow_dirty), |
tonyp@1455 | 2754 | _use_prev_marking(use_prev_marking), |
tonyp@1455 | 2755 | _failures(false) { } |
tonyp@1455 | 2756 | |
tonyp@1455 | 2757 | bool failures() { |
tonyp@1455 | 2758 | return _failures; |
tonyp@1455 | 2759 | } |
tonyp@825 | 2760 | |
tonyp@825 | 2761 | void work(int worker_i) { |
iveresov@1072 | 2762 | HandleMark hm; |
tonyp@1246 | 2763 | VerifyRegionClosure blk(_allow_dirty, true, _use_prev_marking); |
tonyp@825 | 2764 | _g1h->heap_region_par_iterate_chunked(&blk, worker_i, |
tonyp@825 | 2765 | HeapRegion::ParVerifyClaimValue); |
tonyp@1455 | 2766 | if (blk.failures()) { |
tonyp@1455 | 2767 | _failures = true; |
tonyp@1455 | 2768 | } |
tonyp@825 | 2769 | } |
tonyp@825 | 2770 | }; |
tonyp@825 | 2771 | |
ysr@777 | 2772 | void G1CollectedHeap::verify(bool allow_dirty, bool silent) { |
tonyp@1246 | 2773 | verify(allow_dirty, silent, /* use_prev_marking */ true); |
tonyp@1246 | 2774 | } |
tonyp@1246 | 2775 | |
tonyp@1246 | 2776 | void G1CollectedHeap::verify(bool allow_dirty, |
tonyp@1246 | 2777 | bool silent, |
tonyp@1246 | 2778 | bool use_prev_marking) { |
ysr@777 | 2779 | if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) { |
ysr@777 | 2780 | if (!silent) { gclog_or_tty->print("roots "); } |
tonyp@1246 | 2781 | VerifyRootsClosure rootsCl(use_prev_marking); |
jrose@1424 | 2782 | CodeBlobToOopClosure blobsCl(&rootsCl, /*do_marking=*/ false); |
jrose@1424 | 2783 | process_strong_roots(true, // activate StrongRootsScope |
jrose@1424 | 2784 | false, |
ysr@777 | 2785 | SharedHeap::SO_AllClasses, |
ysr@777 | 2786 | &rootsCl, |
jrose@1424 | 2787 | &blobsCl, |
ysr@777 | 2788 | &rootsCl); |
tonyp@1455 | 2789 | bool failures = rootsCl.failures(); |
ysr@777 | 2790 | rem_set()->invalidate(perm_gen()->used_region(), false); |
tonyp@2472 | 2791 | if (!silent) { gclog_or_tty->print("HeapRegionSets "); } |
tonyp@2472 | 2792 | verify_region_sets(); |
tonyp@2472 | 2793 | if (!silent) { gclog_or_tty->print("HeapRegions "); } |
tonyp@825 | 2794 | if (GCParallelVerificationEnabled && ParallelGCThreads > 1) { |
tonyp@825 | 2795 | assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue), |
tonyp@825 | 2796 | "sanity check"); |
tonyp@825 | 2797 | |
tonyp@1246 | 2798 | G1ParVerifyTask task(this, allow_dirty, use_prev_marking); |
tonyp@825 | 2799 | int n_workers = workers()->total_workers(); |
tonyp@825 | 2800 | set_par_threads(n_workers); |
tonyp@825 | 2801 | workers()->run_task(&task); |
tonyp@825 | 2802 | set_par_threads(0); |
tonyp@1455 | 2803 | if (task.failures()) { |
tonyp@1455 | 2804 | failures = true; |
tonyp@1455 | 2805 | } |
tonyp@825 | 2806 | |
tonyp@825 | 2807 | assert(check_heap_region_claim_values(HeapRegion::ParVerifyClaimValue), |
tonyp@825 | 2808 | "sanity check"); |
tonyp@825 | 2809 | |
tonyp@825 | 2810 | reset_heap_region_claim_values(); |
tonyp@825 | 2811 | |
tonyp@825 | 2812 | assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue), |
tonyp@825 | 2813 | "sanity check"); |
tonyp@825 | 2814 | } else { |
tonyp@1246 | 2815 | VerifyRegionClosure blk(allow_dirty, false, use_prev_marking); |
tonyp@825 | 2816 | _hrs->iterate(&blk); |
tonyp@1455 | 2817 | if (blk.failures()) { |
tonyp@1455 | 2818 | failures = true; |
tonyp@1455 | 2819 | } |
tonyp@825 | 2820 | } |
tonyp@2472 | 2821 | if (!silent) gclog_or_tty->print("RemSet "); |
ysr@777 | 2822 | rem_set()->verify(); |
tonyp@1455 | 2823 | |
tonyp@1455 | 2824 | if (failures) { |
tonyp@1455 | 2825 | gclog_or_tty->print_cr("Heap:"); |
tonyp@1455 | 2826 | print_on(gclog_or_tty, true /* extended */); |
tonyp@1455 | 2827 | gclog_or_tty->print_cr(""); |
jcoomes@1902 | 2828 | #ifndef PRODUCT |
tonyp@1479 | 2829 | if (VerifyDuringGC && G1VerifyDuringGCPrintReachable) { |
tonyp@1823 | 2830 | concurrent_mark()->print_reachable("at-verification-failure", |
tonyp@1823 | 2831 | use_prev_marking, false /* all */); |
tonyp@1455 | 2832 | } |
jcoomes@1902 | 2833 | #endif |
tonyp@1455 | 2834 | gclog_or_tty->flush(); |
tonyp@1455 | 2835 | } |
tonyp@1455 | 2836 | guarantee(!failures, "there should not have been any failures"); |
ysr@777 | 2837 | } else { |
ysr@777 | 2838 | if (!silent) gclog_or_tty->print("(SKIPPING roots, heapRegions, remset) "); |
ysr@777 | 2839 | } |
ysr@777 | 2840 | } |
ysr@777 | 2841 | |
ysr@777 | 2842 | class PrintRegionClosure: public HeapRegionClosure { |
ysr@777 | 2843 | outputStream* _st; |
ysr@777 | 2844 | public: |
ysr@777 | 2845 | PrintRegionClosure(outputStream* st) : _st(st) {} |
ysr@777 | 2846 | bool doHeapRegion(HeapRegion* r) { |
ysr@777 | 2847 | r->print_on(_st); |
ysr@777 | 2848 | return false; |
ysr@777 | 2849 | } |
ysr@777 | 2850 | }; |
ysr@777 | 2851 | |
tonyp@1273 | 2852 | void G1CollectedHeap::print() const { print_on(tty); } |
ysr@777 | 2853 | |
ysr@777 | 2854 | void G1CollectedHeap::print_on(outputStream* st) const { |
tonyp@1273 | 2855 | print_on(st, PrintHeapAtGCExtended); |
tonyp@1273 | 2856 | } |
tonyp@1273 | 2857 | |
tonyp@1273 | 2858 | void G1CollectedHeap::print_on(outputStream* st, bool extended) const { |
tonyp@1273 | 2859 | st->print(" %-20s", "garbage-first heap"); |
tonyp@1273 | 2860 | st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K", |
tonyp@1281 | 2861 | capacity()/K, used_unlocked()/K); |
tonyp@1273 | 2862 | st->print(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")", |
tonyp@1273 | 2863 | _g1_storage.low_boundary(), |
tonyp@1273 | 2864 | _g1_storage.high(), |
tonyp@1273 | 2865 | _g1_storage.high_boundary()); |
tonyp@1273 | 2866 | st->cr(); |
tonyp@1273 | 2867 | st->print(" region size " SIZE_FORMAT "K, ", |
tonyp@1273 | 2868 | HeapRegion::GrainBytes/K); |
tonyp@1273 | 2869 | size_t young_regions = _young_list->length(); |
tonyp@1273 | 2870 | st->print(SIZE_FORMAT " young (" SIZE_FORMAT "K), ", |
tonyp@1273 | 2871 | young_regions, young_regions * HeapRegion::GrainBytes / K); |
tonyp@1273 | 2872 | size_t survivor_regions = g1_policy()->recorded_survivor_regions(); |
tonyp@1273 | 2873 | st->print(SIZE_FORMAT " survivors (" SIZE_FORMAT "K)", |
tonyp@1273 | 2874 | survivor_regions, survivor_regions * HeapRegion::GrainBytes / K); |
tonyp@1273 | 2875 | st->cr(); |
tonyp@1273 | 2876 | perm()->as_gen()->print_on(st); |
tonyp@1273 | 2877 | if (extended) { |
tonyp@1455 | 2878 | st->cr(); |
tonyp@1273 | 2879 | print_on_extended(st); |
tonyp@1273 | 2880 | } |
tonyp@1273 | 2881 | } |
tonyp@1273 | 2882 | |
tonyp@1273 | 2883 | void G1CollectedHeap::print_on_extended(outputStream* st) const { |
ysr@777 | 2884 | PrintRegionClosure blk(st); |
ysr@777 | 2885 | _hrs->iterate(&blk); |
ysr@777 | 2886 | } |
ysr@777 | 2887 | |
ysr@777 | 2888 | void G1CollectedHeap::print_gc_threads_on(outputStream* st) const { |
jmasa@2188 | 2889 | if (G1CollectedHeap::use_parallel_gc_threads()) { |
tonyp@1454 | 2890 | workers()->print_worker_threads_on(st); |
tonyp@1454 | 2891 | } |
tonyp@1454 | 2892 | _cmThread->print_on(st); |
ysr@777 | 2893 | st->cr(); |
tonyp@1454 | 2894 | _cm->print_worker_threads_on(st); |
tonyp@1454 | 2895 | _cg1r->print_worker_threads_on(st); |
ysr@777 | 2896 | st->cr(); |
ysr@777 | 2897 | } |
ysr@777 | 2898 | |
ysr@777 | 2899 | void G1CollectedHeap::gc_threads_do(ThreadClosure* tc) const { |
jmasa@2188 | 2900 | if (G1CollectedHeap::use_parallel_gc_threads()) { |
ysr@777 | 2901 | workers()->threads_do(tc); |
ysr@777 | 2902 | } |
ysr@777 | 2903 | tc->do_thread(_cmThread); |
iveresov@1229 | 2904 | _cg1r->threads_do(tc); |
ysr@777 | 2905 | } |
ysr@777 | 2906 | |
ysr@777 | 2907 | void G1CollectedHeap::print_tracing_info() const { |
ysr@777 | 2908 | // We'll overload this to mean "trace GC pause statistics." |
ysr@777 | 2909 | if (TraceGen0Time || TraceGen1Time) { |
ysr@777 | 2910 | // The "G1CollectorPolicy" is keeping track of these stats, so delegate |
ysr@777 | 2911 | // to that. |
ysr@777 | 2912 | g1_policy()->print_tracing_info(); |
ysr@777 | 2913 | } |
johnc@1186 | 2914 | if (G1SummarizeRSetStats) { |
ysr@777 | 2915 | g1_rem_set()->print_summary_info(); |
ysr@777 | 2916 | } |
tonyp@1717 | 2917 | if (G1SummarizeConcMark) { |
ysr@777 | 2918 | concurrent_mark()->print_summary_info(); |
ysr@777 | 2919 | } |
ysr@777 | 2920 | g1_policy()->print_yg_surv_rate_info(); |
ysr@777 | 2921 | SpecializationStats::print(); |
ysr@777 | 2922 | } |
ysr@777 | 2923 | |
ysr@777 | 2924 | int G1CollectedHeap::addr_to_arena_id(void* addr) const { |
ysr@777 | 2925 | HeapRegion* hr = heap_region_containing(addr); |
ysr@777 | 2926 | if (hr == NULL) { |
ysr@777 | 2927 | return 0; |
ysr@777 | 2928 | } else { |
ysr@777 | 2929 | return 1; |
ysr@777 | 2930 | } |
ysr@777 | 2931 | } |
ysr@777 | 2932 | |
ysr@777 | 2933 | G1CollectedHeap* G1CollectedHeap::heap() { |
ysr@777 | 2934 | assert(_sh->kind() == CollectedHeap::G1CollectedHeap, |
ysr@777 | 2935 | "not a garbage-first heap"); |
ysr@777 | 2936 | return _g1h; |
ysr@777 | 2937 | } |
ysr@777 | 2938 | |
ysr@777 | 2939 | void G1CollectedHeap::gc_prologue(bool full /* Ignored */) { |
ysr@1680 | 2940 | // always_do_update_barrier = false; |
ysr@777 | 2941 | assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer"); |
ysr@777 | 2942 | // Call allocation profiler |
ysr@777 | 2943 | AllocationProfiler::iterate_since_last_gc(); |
ysr@777 | 2944 | // Fill TLAB's and such |
ysr@777 | 2945 | ensure_parsability(true); |
ysr@777 | 2946 | } |
ysr@777 | 2947 | |
ysr@777 | 2948 | void G1CollectedHeap::gc_epilogue(bool full /* Ignored */) { |
ysr@777 | 2949 | // FIXME: what is this about? |
ysr@777 | 2950 | // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled" |
ysr@777 | 2951 | // is set. |
ysr@777 | 2952 | COMPILER2_PRESENT(assert(DerivedPointerTable::is_empty(), |
ysr@777 | 2953 | "derived pointer present")); |
ysr@1680 | 2954 | // always_do_update_barrier = true; |
ysr@777 | 2955 | } |
ysr@777 | 2956 | |
tonyp@2315 | 2957 | HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size, |
tonyp@2315 | 2958 | unsigned int gc_count_before, |
tonyp@2315 | 2959 | bool* succeeded) { |
tonyp@2315 | 2960 | assert_heap_not_locked_and_not_at_safepoint(); |
ysr@777 | 2961 | g1_policy()->record_stop_world_start(); |
tonyp@2315 | 2962 | VM_G1IncCollectionPause op(gc_count_before, |
tonyp@2315 | 2963 | word_size, |
tonyp@2315 | 2964 | false, /* should_initiate_conc_mark */ |
tonyp@2315 | 2965 | g1_policy()->max_pause_time_ms(), |
tonyp@2315 | 2966 | GCCause::_g1_inc_collection_pause); |
tonyp@2315 | 2967 | VMThread::execute(&op); |
tonyp@2315 | 2968 | |
tonyp@2315 | 2969 | HeapWord* result = op.result(); |
tonyp@2315 | 2970 | bool ret_succeeded = op.prologue_succeeded() && op.pause_succeeded(); |
tonyp@2315 | 2971 | assert(result == NULL || ret_succeeded, |
tonyp@2315 | 2972 | "the result should be NULL if the VM did not succeed"); |
tonyp@2315 | 2973 | *succeeded = ret_succeeded; |
tonyp@2315 | 2974 | |
tonyp@2315 | 2975 | assert_heap_not_locked(); |
tonyp@2315 | 2976 | return result; |
ysr@777 | 2977 | } |
ysr@777 | 2978 | |
ysr@777 | 2979 | void |
ysr@777 | 2980 | G1CollectedHeap::doConcurrentMark() { |
ysr@1280 | 2981 | MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag); |
ysr@1280 | 2982 | if (!_cmThread->in_progress()) { |
ysr@1280 | 2983 | _cmThread->set_started(); |
ysr@1280 | 2984 | CGC_lock->notify(); |
ysr@777 | 2985 | } |
ysr@777 | 2986 | } |
ysr@777 | 2987 | |
ysr@777 | 2988 | class VerifyMarkedObjsClosure: public ObjectClosure { |
ysr@777 | 2989 | G1CollectedHeap* _g1h; |
ysr@777 | 2990 | public: |
ysr@777 | 2991 | VerifyMarkedObjsClosure(G1CollectedHeap* g1h) : _g1h(g1h) {} |
ysr@777 | 2992 | void do_object(oop obj) { |
ysr@777 | 2993 | assert(obj->mark()->is_marked() ? !_g1h->is_obj_dead(obj) : true, |
ysr@777 | 2994 | "markandsweep mark should agree with concurrent deadness"); |
ysr@777 | 2995 | } |
ysr@777 | 2996 | }; |
ysr@777 | 2997 | |
ysr@777 | 2998 | void |
ysr@777 | 2999 | G1CollectedHeap::checkConcurrentMark() { |
ysr@777 | 3000 | VerifyMarkedObjsClosure verifycl(this); |
ysr@777 | 3001 | // MutexLockerEx x(getMarkBitMapLock(), |
ysr@777 | 3002 | // Mutex::_no_safepoint_check_flag); |
iveresov@1113 | 3003 | object_iterate(&verifycl, false); |
ysr@777 | 3004 | } |
ysr@777 | 3005 | |
ysr@777 | 3006 | void G1CollectedHeap::do_sync_mark() { |
ysr@777 | 3007 | _cm->checkpointRootsInitial(); |
ysr@777 | 3008 | _cm->markFromRoots(); |
ysr@777 | 3009 | _cm->checkpointRootsFinal(false); |
ysr@777 | 3010 | } |
ysr@777 | 3011 | |
ysr@777 | 3012 | // <NEW PREDICTION> |
ysr@777 | 3013 | |
ysr@777 | 3014 | double G1CollectedHeap::predict_region_elapsed_time_ms(HeapRegion *hr, |
ysr@777 | 3015 | bool young) { |
ysr@777 | 3016 | return _g1_policy->predict_region_elapsed_time_ms(hr, young); |
ysr@777 | 3017 | } |
ysr@777 | 3018 | |
ysr@777 | 3019 | void G1CollectedHeap::check_if_region_is_too_expensive(double |
ysr@777 | 3020 | predicted_time_ms) { |
ysr@777 | 3021 | _g1_policy->check_if_region_is_too_expensive(predicted_time_ms); |
ysr@777 | 3022 | } |
ysr@777 | 3023 | |
ysr@777 | 3024 | size_t G1CollectedHeap::pending_card_num() { |
ysr@777 | 3025 | size_t extra_cards = 0; |
ysr@777 | 3026 | JavaThread *curr = Threads::first(); |
ysr@777 | 3027 | while (curr != NULL) { |
ysr@777 | 3028 | DirtyCardQueue& dcq = curr->dirty_card_queue(); |
ysr@777 | 3029 | extra_cards += dcq.size(); |
ysr@777 | 3030 | curr = curr->next(); |
ysr@777 | 3031 | } |
ysr@777 | 3032 | DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); |
ysr@777 | 3033 | size_t buffer_size = dcqs.buffer_size(); |
ysr@777 | 3034 | size_t buffer_num = dcqs.completed_buffers_num(); |
ysr@777 | 3035 | return buffer_size * buffer_num + extra_cards; |
ysr@777 | 3036 | } |
ysr@777 | 3037 | |
ysr@777 | 3038 | size_t G1CollectedHeap::max_pending_card_num() { |
ysr@777 | 3039 | DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); |
ysr@777 | 3040 | size_t buffer_size = dcqs.buffer_size(); |
ysr@777 | 3041 | size_t buffer_num = dcqs.completed_buffers_num(); |
ysr@777 | 3042 | int thread_num = Threads::number_of_threads(); |
ysr@777 | 3043 | return (buffer_num + thread_num) * buffer_size; |
ysr@777 | 3044 | } |
ysr@777 | 3045 | |
ysr@777 | 3046 | size_t G1CollectedHeap::cards_scanned() { |
johnc@2216 | 3047 | return g1_rem_set()->cardsScanned(); |
ysr@777 | 3048 | } |
ysr@777 | 3049 | |
ysr@777 | 3050 | void |
ysr@777 | 3051 | G1CollectedHeap::setup_surviving_young_words() { |
ysr@777 | 3052 | guarantee( _surviving_young_words == NULL, "pre-condition" ); |
ysr@777 | 3053 | size_t array_length = g1_policy()->young_cset_length(); |
ysr@777 | 3054 | _surviving_young_words = NEW_C_HEAP_ARRAY(size_t, array_length); |
ysr@777 | 3055 | if (_surviving_young_words == NULL) { |
ysr@777 | 3056 | vm_exit_out_of_memory(sizeof(size_t) * array_length, |
ysr@777 | 3057 | "Not enough space for young surv words summary."); |
ysr@777 | 3058 | } |
ysr@777 | 3059 | memset(_surviving_young_words, 0, array_length * sizeof(size_t)); |
ysr@1280 | 3060 | #ifdef ASSERT |
ysr@777 | 3061 | for (size_t i = 0; i < array_length; ++i) { |
ysr@1280 | 3062 | assert( _surviving_young_words[i] == 0, "memset above" ); |
ysr@1280 | 3063 | } |
ysr@1280 | 3064 | #endif // !ASSERT |
ysr@777 | 3065 | } |
ysr@777 | 3066 | |
ysr@777 | 3067 | void |
ysr@777 | 3068 | G1CollectedHeap::update_surviving_young_words(size_t* surv_young_words) { |
ysr@777 | 3069 | MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); |
ysr@777 | 3070 | size_t array_length = g1_policy()->young_cset_length(); |
ysr@777 | 3071 | for (size_t i = 0; i < array_length; ++i) |
ysr@777 | 3072 | _surviving_young_words[i] += surv_young_words[i]; |
ysr@777 | 3073 | } |
ysr@777 | 3074 | |
ysr@777 | 3075 | void |
ysr@777 | 3076 | G1CollectedHeap::cleanup_surviving_young_words() { |
ysr@777 | 3077 | guarantee( _surviving_young_words != NULL, "pre-condition" ); |
ysr@777 | 3078 | FREE_C_HEAP_ARRAY(size_t, _surviving_young_words); |
ysr@777 | 3079 | _surviving_young_words = NULL; |
ysr@777 | 3080 | } |
ysr@777 | 3081 | |
ysr@777 | 3082 | // </NEW PREDICTION> |
ysr@777 | 3083 | |
iveresov@1696 | 3084 | struct PrepareForRSScanningClosure : public HeapRegionClosure { |
iveresov@1696 | 3085 | bool doHeapRegion(HeapRegion *r) { |
iveresov@1696 | 3086 | r->rem_set()->set_iter_claimed(0); |
iveresov@1696 | 3087 | return false; |
iveresov@1696 | 3088 | } |
iveresov@1696 | 3089 | }; |
iveresov@1696 | 3090 | |
jcoomes@2064 | 3091 | #if TASKQUEUE_STATS |
jcoomes@2064 | 3092 | void G1CollectedHeap::print_taskqueue_stats_hdr(outputStream* const st) { |
jcoomes@2064 | 3093 | st->print_raw_cr("GC Task Stats"); |
jcoomes@2064 | 3094 | st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr(); |
jcoomes@2064 | 3095 | st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr(); |
jcoomes@2064 | 3096 | } |
jcoomes@2064 | 3097 | |
jcoomes@2064 | 3098 | void G1CollectedHeap::print_taskqueue_stats(outputStream* const st) const { |
jcoomes@2064 | 3099 | print_taskqueue_stats_hdr(st); |
jcoomes@2064 | 3100 | |
jcoomes@2064 | 3101 | TaskQueueStats totals; |
jcoomes@2110 | 3102 | const int n = workers() != NULL ? workers()->total_workers() : 1; |
jcoomes@2064 | 3103 | for (int i = 0; i < n; ++i) { |
jcoomes@2064 | 3104 | st->print("%3d ", i); task_queue(i)->stats.print(st); st->cr(); |
jcoomes@2064 | 3105 | totals += task_queue(i)->stats; |
jcoomes@2064 | 3106 | } |
jcoomes@2064 | 3107 | st->print_raw("tot "); totals.print(st); st->cr(); |
jcoomes@2064 | 3108 | |
jcoomes@2064 | 3109 | DEBUG_ONLY(totals.verify()); |
jcoomes@2064 | 3110 | } |
jcoomes@2064 | 3111 | |
jcoomes@2064 | 3112 | void G1CollectedHeap::reset_taskqueue_stats() { |
jcoomes@2110 | 3113 | const int n = workers() != NULL ? workers()->total_workers() : 1; |
jcoomes@2064 | 3114 | for (int i = 0; i < n; ++i) { |
jcoomes@2064 | 3115 | task_queue(i)->stats.reset(); |
jcoomes@2064 | 3116 | } |
jcoomes@2064 | 3117 | } |
jcoomes@2064 | 3118 | #endif // TASKQUEUE_STATS |
jcoomes@2064 | 3119 | |
tonyp@2315 | 3120 | bool |
tonyp@2011 | 3121 | G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) { |
tonyp@2472 | 3122 | assert_at_safepoint(true /* should_be_vm_thread */); |
tonyp@2472 | 3123 | guarantee(!is_gc_active(), "collection is not reentrant"); |
tonyp@2472 | 3124 | |
tonyp@1794 | 3125 | if (GC_locker::check_active_before_gc()) { |
tonyp@2315 | 3126 | return false; |
tonyp@1794 | 3127 | } |
tonyp@1794 | 3128 | |
kamg@2445 | 3129 | SvcGCMarker sgcm(SvcGCMarker::MINOR); |
tonyp@2381 | 3130 | ResourceMark rm; |
tonyp@2381 | 3131 | |
tonyp@1273 | 3132 | if (PrintHeapAtGC) { |
tonyp@1273 | 3133 | Universe::print_heap_before_gc(); |
tonyp@1273 | 3134 | } |
tonyp@1273 | 3135 | |
tonyp@2472 | 3136 | verify_region_sets_optional(); |
tonyp@2715 | 3137 | verify_dirty_young_regions(); |
tonyp@2472 | 3138 | |
tonyp@1273 | 3139 | { |
tonyp@1794 | 3140 | // This call will decide whether this pause is an initial-mark |
tonyp@1794 | 3141 | // pause. If it is, during_initial_mark_pause() will return true |
tonyp@1794 | 3142 | // for the duration of this pause. |
tonyp@1794 | 3143 | g1_policy()->decide_on_conc_mark_initiation(); |
tonyp@1794 | 3144 | |
tonyp@1273 | 3145 | char verbose_str[128]; |
tonyp@1273 | 3146 | sprintf(verbose_str, "GC pause "); |
tonyp@1273 | 3147 | if (g1_policy()->in_young_gc_mode()) { |
tonyp@1273 | 3148 | if (g1_policy()->full_young_gcs()) |
tonyp@1273 | 3149 | strcat(verbose_str, "(young)"); |
tonyp@1273 | 3150 | else |
tonyp@1273 | 3151 | strcat(verbose_str, "(partial)"); |
tonyp@1273 | 3152 | } |
tonyp@2011 | 3153 | if (g1_policy()->during_initial_mark_pause()) { |
tonyp@1273 | 3154 | strcat(verbose_str, " (initial-mark)"); |
tonyp@2011 | 3155 | // We are about to start a marking cycle, so we increment the |
tonyp@2011 | 3156 | // full collection counter. |
tonyp@2011 | 3157 | increment_total_full_collections(); |
tonyp@2011 | 3158 | } |
tonyp@1273 | 3159 | |
tonyp@1273 | 3160 | // if PrintGCDetails is on, we'll print long statistics information |
tonyp@1273 | 3161 | // in the collector policy code, so let's not print this as the output |
tonyp@1273 | 3162 | // is messy if we do. |
tonyp@1273 | 3163 | gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); |
tonyp@1273 | 3164 | TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); |
tonyp@1273 | 3165 | TraceTime t(verbose_str, PrintGC && !PrintGCDetails, true, gclog_or_tty); |
tonyp@1273 | 3166 | |
tonyp@1524 | 3167 | TraceMemoryManagerStats tms(false /* fullGC */); |
tonyp@1524 | 3168 | |
tonyp@2643 | 3169 | // If the secondary_free_list is not empty, append it to the |
tonyp@2643 | 3170 | // free_list. No need to wait for the cleanup operation to finish; |
tonyp@2643 | 3171 | // the region allocation code will check the secondary_free_list |
tonyp@2643 | 3172 | // and wait if necessary. If the G1StressConcRegionFreeing flag is |
tonyp@2643 | 3173 | // set, skip this step so that the region allocation code has to |
tonyp@2643 | 3174 | // get entries from the secondary_free_list. |
tonyp@2472 | 3175 | if (!G1StressConcRegionFreeing) { |
tonyp@2643 | 3176 | append_secondary_free_list_if_not_empty_with_lock(); |
tonyp@2472 | 3177 | } |
tonyp@1273 | 3178 | |
tonyp@1273 | 3179 | increment_gc_time_stamp(); |
tonyp@1273 | 3180 | |
tonyp@1273 | 3181 | if (g1_policy()->in_young_gc_mode()) { |
tonyp@1273 | 3182 | assert(check_young_list_well_formed(), |
tonyp@1273 | 3183 | "young list should be well formed"); |
tonyp@1273 | 3184 | } |
tonyp@1273 | 3185 | |
tonyp@1273 | 3186 | { // Call to jvmpi::post_class_unload_events must occur outside of active GC |
tonyp@1273 | 3187 | IsGCActiveMark x; |
tonyp@1273 | 3188 | |
tonyp@1273 | 3189 | gc_prologue(false); |
tonyp@1273 | 3190 | increment_total_collections(false /* full gc */); |
ysr@777 | 3191 | |
ysr@777 | 3192 | #if G1_REM_SET_LOGGING |
tonyp@1273 | 3193 | gclog_or_tty->print_cr("\nJust chose CS, heap:"); |
ysr@777 | 3194 | print(); |
ysr@777 | 3195 | #endif |
ysr@777 | 3196 | |
tonyp@1273 | 3197 | if (VerifyBeforeGC && total_collections() >= VerifyGCStartAt) { |
tonyp@1273 | 3198 | HandleMark hm; // Discard invalid handles created during verification |
tonyp@2715 | 3199 | gclog_or_tty->print(" VerifyBeforeGC:"); |
tonyp@1273 | 3200 | prepare_for_verify(); |
tonyp@1273 | 3201 | Universe::verify(false); |
tonyp@1273 | 3202 | } |
tonyp@1273 | 3203 | |
tonyp@1273 | 3204 | COMPILER2_PRESENT(DerivedPointerTable::clear()); |
tonyp@1273 | 3205 | |
johnc@2316 | 3206 | // Please see comment in G1CollectedHeap::ref_processing_init() |
johnc@2316 | 3207 | // to see how reference processing currently works in G1. |
johnc@2316 | 3208 | // |
tonyp@1273 | 3209 | // We want to turn off ref discovery, if necessary, and turn it back on |
ysr@1280 | 3210 | // on again later if we do. XXX Dubious: why is discovery disabled? |
tonyp@1273 | 3211 | bool was_enabled = ref_processor()->discovery_enabled(); |
tonyp@1273 | 3212 | if (was_enabled) ref_processor()->disable_discovery(); |
tonyp@1273 | 3213 | |
tonyp@1273 | 3214 | // Forget the current alloc region (we might even choose it to be part |
tonyp@1273 | 3215 | // of the collection set!). |
tonyp@2715 | 3216 | release_mutator_alloc_region(); |
tonyp@1273 | 3217 | |
tonyp@1273 | 3218 | // The elapsed time induced by the start time below deliberately elides |
tonyp@1273 | 3219 | // the possible verification above. |
tonyp@1273 | 3220 | double start_time_sec = os::elapsedTime(); |
tonyp@1273 | 3221 | size_t start_used_bytes = used(); |
tonyp@1273 | 3222 | |
johnc@1829 | 3223 | #if YOUNG_LIST_VERBOSE |
johnc@1829 | 3224 | gclog_or_tty->print_cr("\nBefore recording pause start.\nYoung_list:"); |
johnc@1829 | 3225 | _young_list->print(); |
johnc@1829 | 3226 | g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty); |
johnc@1829 | 3227 | #endif // YOUNG_LIST_VERBOSE |
johnc@1829 | 3228 | |
tonyp@1273 | 3229 | g1_policy()->record_collection_pause_start(start_time_sec, |
tonyp@1273 | 3230 | start_used_bytes); |
tonyp@1273 | 3231 | |
johnc@1829 | 3232 | #if YOUNG_LIST_VERBOSE |
johnc@1829 | 3233 | gclog_or_tty->print_cr("\nAfter recording pause start.\nYoung_list:"); |
tonyp@1273 | 3234 | _young_list->print(); |
johnc@1829 | 3235 | #endif // YOUNG_LIST_VERBOSE |
ysr@777 | 3236 | |
tonyp@1794 | 3237 | if (g1_policy()->during_initial_mark_pause()) { |
tonyp@1273 | 3238 | concurrent_mark()->checkpointRootsInitialPre(); |
ysr@777 | 3239 | } |
tonyp@1273 | 3240 | save_marks(); |
tonyp@1273 | 3241 | |
tonyp@1273 | 3242 | // We must do this before any possible evacuation that should propagate |
tonyp@1273 | 3243 | // marks. |
tonyp@1273 | 3244 | if (mark_in_progress()) { |
tonyp@1273 | 3245 | double start_time_sec = os::elapsedTime(); |
tonyp@1273 | 3246 | |
tonyp@1273 | 3247 | _cm->drainAllSATBBuffers(); |
tonyp@1273 | 3248 | double finish_mark_ms = (os::elapsedTime() - start_time_sec) * 1000.0; |
tonyp@1273 | 3249 | g1_policy()->record_satb_drain_time(finish_mark_ms); |
tonyp@1273 | 3250 | } |
tonyp@1273 | 3251 | // Record the number of elements currently on the mark stack, so we |
tonyp@1273 | 3252 | // only iterate over these. (Since evacuation may add to the mark |
tonyp@1273 | 3253 | // stack, doing more exposes race conditions.) If no mark is in |
tonyp@1273 | 3254 | // progress, this will be zero. |
tonyp@1273 | 3255 | _cm->set_oops_do_bound(); |
tonyp@1273 | 3256 | |
tonyp@1273 | 3257 | if (mark_in_progress()) |
tonyp@1273 | 3258 | concurrent_mark()->newCSet(); |
tonyp@1273 | 3259 | |
johnc@1829 | 3260 | #if YOUNG_LIST_VERBOSE |
johnc@1829 | 3261 | gclog_or_tty->print_cr("\nBefore choosing collection set.\nYoung_list:"); |
johnc@1829 | 3262 | _young_list->print(); |
johnc@1829 | 3263 | g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty); |
johnc@1829 | 3264 | #endif // YOUNG_LIST_VERBOSE |
johnc@1829 | 3265 | |
tonyp@2062 | 3266 | g1_policy()->choose_collection_set(target_pause_time_ms); |
tonyp@1273 | 3267 | |
tonyp@1273 | 3268 | // Nothing to do if we were unable to choose a collection set. |
tonyp@1273 | 3269 | #if G1_REM_SET_LOGGING |
tonyp@2062 | 3270 | gclog_or_tty->print_cr("\nAfter pause, heap:"); |
tonyp@2062 | 3271 | print(); |
tonyp@1273 | 3272 | #endif |
tonyp@2062 | 3273 | PrepareForRSScanningClosure prepare_for_rs_scan; |
tonyp@2062 | 3274 | collection_set_iterate(&prepare_for_rs_scan); |
tonyp@2062 | 3275 | |
tonyp@2062 | 3276 | setup_surviving_young_words(); |
tonyp@2062 | 3277 | |
tonyp@2062 | 3278 | // Set up the gc allocation regions. |
tonyp@2062 | 3279 | get_gc_alloc_regions(); |
tonyp@2062 | 3280 | |
tonyp@2062 | 3281 | // Actually do the work... |
tonyp@2062 | 3282 | evacuate_collection_set(); |
tonyp@2062 | 3283 | |
tonyp@2062 | 3284 | free_collection_set(g1_policy()->collection_set()); |
tonyp@2062 | 3285 | g1_policy()->clear_collection_set(); |
tonyp@2062 | 3286 | |
tonyp@2062 | 3287 | cleanup_surviving_young_words(); |
tonyp@2062 | 3288 | |
tonyp@2062 | 3289 | // Start a new incremental collection set for the next pause. |
tonyp@2062 | 3290 | g1_policy()->start_incremental_cset_building(); |
tonyp@2062 | 3291 | |
tonyp@2062 | 3292 | // Clear the _cset_fast_test bitmap in anticipation of adding |
tonyp@2062 | 3293 | // regions to the incremental collection set for the next |
tonyp@2062 | 3294 | // evacuation pause. |
tonyp@2062 | 3295 | clear_cset_fast_test(); |
tonyp@2062 | 3296 | |
tonyp@2062 | 3297 | if (g1_policy()->in_young_gc_mode()) { |
tonyp@2062 | 3298 | _young_list->reset_sampled_info(); |
tonyp@2062 | 3299 | |
tonyp@2062 | 3300 | // Don't check the whole heap at this point as the |
tonyp@2062 | 3301 | // GC alloc regions from this pause have been tagged |
tonyp@2062 | 3302 | // as survivors and moved on to the survivor list. |
tonyp@2062 | 3303 | // Survivor regions will fail the !is_young() check. |
tonyp@2062 | 3304 | assert(check_young_list_empty(false /* check_heap */), |
tonyp@2062 | 3305 | "young list should be empty"); |
johnc@1829 | 3306 | |
johnc@1829 | 3307 | #if YOUNG_LIST_VERBOSE |
tonyp@2062 | 3308 | gclog_or_tty->print_cr("Before recording survivors.\nYoung List:"); |
tonyp@2062 | 3309 | _young_list->print(); |
johnc@1829 | 3310 | #endif // YOUNG_LIST_VERBOSE |
tonyp@1273 | 3311 | |
tonyp@2062 | 3312 | g1_policy()->record_survivor_regions(_young_list->survivor_length(), |
tonyp@1273 | 3313 | _young_list->first_survivor_region(), |
tonyp@1273 | 3314 | _young_list->last_survivor_region()); |
johnc@1829 | 3315 | |
tonyp@2062 | 3316 | _young_list->reset_auxilary_lists(); |
tonyp@1273 | 3317 | } |
tonyp@1273 | 3318 | |
tonyp@1273 | 3319 | if (evacuation_failed()) { |
tonyp@1273 | 3320 | _summary_bytes_used = recalculate_used(); |
tonyp@1273 | 3321 | } else { |
tonyp@1273 | 3322 | // The "used" of the the collection set have already been subtracted |
tonyp@1273 | 3323 | // when they were freed. Add in the bytes evacuated. |
tonyp@1273 | 3324 | _summary_bytes_used += g1_policy()->bytes_in_to_space(); |
tonyp@1273 | 3325 | } |
tonyp@1273 | 3326 | |
tonyp@1273 | 3327 | if (g1_policy()->in_young_gc_mode() && |
tonyp@1794 | 3328 | g1_policy()->during_initial_mark_pause()) { |
tonyp@1273 | 3329 | concurrent_mark()->checkpointRootsInitialPost(); |
tonyp@1273 | 3330 | set_marking_started(); |
ysr@1280 | 3331 | // CAUTION: after the doConcurrentMark() call below, |
ysr@1280 | 3332 | // the concurrent marking thread(s) could be running |
ysr@1280 | 3333 | // concurrently with us. Make sure that anything after |
ysr@1280 | 3334 | // this point does not assume that we are the only GC thread |
ysr@1280 | 3335 | // running. Note: of course, the actual marking work will |
ysr@1280 | 3336 | // not start until the safepoint itself is released in |
ysr@1280 | 3337 | // ConcurrentGCThread::safepoint_desynchronize(). |
tonyp@1273 | 3338 | doConcurrentMark(); |
tonyp@1273 | 3339 | } |
tonyp@1273 | 3340 | |
johnc@1829 | 3341 | #if YOUNG_LIST_VERBOSE |
johnc@1829 | 3342 | gclog_or_tty->print_cr("\nEnd of the pause.\nYoung_list:"); |
tonyp@1273 | 3343 | _young_list->print(); |
johnc@1829 | 3344 | g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty); |
johnc@1829 | 3345 | #endif // YOUNG_LIST_VERBOSE |
tonyp@1273 | 3346 | |
tonyp@2715 | 3347 | init_mutator_alloc_region(); |
tonyp@2715 | 3348 | |
tonyp@1273 | 3349 | double end_time_sec = os::elapsedTime(); |
tonyp@1273 | 3350 | double pause_time_ms = (end_time_sec - start_time_sec) * MILLIUNITS; |
tonyp@1273 | 3351 | g1_policy()->record_pause_time_ms(pause_time_ms); |
tonyp@2062 | 3352 | g1_policy()->record_collection_pause_end(); |
tonyp@1273 | 3353 | |
tonyp@1524 | 3354 | MemoryService::track_memory_usage(); |
tonyp@1524 | 3355 | |
tonyp@1273 | 3356 | if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) { |
tonyp@1273 | 3357 | HandleMark hm; // Discard invalid handles created during verification |
tonyp@1273 | 3358 | gclog_or_tty->print(" VerifyAfterGC:"); |
tonyp@1273 | 3359 | prepare_for_verify(); |
tonyp@1273 | 3360 | Universe::verify(false); |
tonyp@1273 | 3361 | } |
tonyp@1273 | 3362 | |
tonyp@1273 | 3363 | if (was_enabled) ref_processor()->enable_discovery(); |
tonyp@1273 | 3364 | |
tonyp@1273 | 3365 | { |
tonyp@1273 | 3366 | size_t expand_bytes = g1_policy()->expansion_amount(); |
tonyp@1273 | 3367 | if (expand_bytes > 0) { |
tonyp@1273 | 3368 | size_t bytes_before = capacity(); |
johnc@2504 | 3369 | if (!expand(expand_bytes)) { |
johnc@2504 | 3370 | // We failed to expand the heap so let's verify that |
johnc@2504 | 3371 | // committed/uncommitted amount match the backing store |
johnc@2504 | 3372 | assert(capacity() == _g1_storage.committed_size(), "committed size mismatch"); |
johnc@2504 | 3373 | assert(max_capacity() == _g1_storage.reserved_size(), "reserved size mismatch"); |
johnc@2504 | 3374 | } |
tonyp@1273 | 3375 | } |
tonyp@1273 | 3376 | } |
tonyp@1273 | 3377 | |
tonyp@1273 | 3378 | if (mark_in_progress()) { |
tonyp@1273 | 3379 | concurrent_mark()->update_g1_committed(); |
tonyp@1273 | 3380 | } |
tonyp@1273 | 3381 | |
tonyp@1273 | 3382 | #ifdef TRACESPINNING |
tonyp@1273 | 3383 | ParallelTaskTerminator::print_termination_counts(); |
tonyp@1273 | 3384 | #endif |
tonyp@1273 | 3385 | |
tonyp@1273 | 3386 | gc_epilogue(false); |
ysr@777 | 3387 | } |
ysr@777 | 3388 | |
tonyp@1273 | 3389 | if (ExitAfterGCNum > 0 && total_collections() == ExitAfterGCNum) { |
tonyp@1273 | 3390 | gclog_or_tty->print_cr("Stopping after GC #%d", ExitAfterGCNum); |
tonyp@1273 | 3391 | print_tracing_info(); |
tonyp@1273 | 3392 | vm_exit(-1); |
ysr@777 | 3393 | } |
tonyp@1273 | 3394 | } |
tonyp@1273 | 3395 | |
tonyp@2472 | 3396 | verify_region_sets_optional(); |
tonyp@2472 | 3397 | |
jcoomes@2064 | 3398 | TASKQUEUE_STATS_ONLY(if (ParallelGCVerbose) print_taskqueue_stats()); |
jcoomes@2064 | 3399 | TASKQUEUE_STATS_ONLY(reset_taskqueue_stats()); |
jcoomes@2064 | 3400 | |
tonyp@1273 | 3401 | if (PrintHeapAtGC) { |
tonyp@1273 | 3402 | Universe::print_heap_after_gc(); |
ysr@777 | 3403 | } |
tonyp@1319 | 3404 | if (G1SummarizeRSetStats && |
tonyp@1319 | 3405 | (G1SummarizeRSetStatsPeriod > 0) && |
tonyp@1319 | 3406 | (total_collections() % G1SummarizeRSetStatsPeriod == 0)) { |
tonyp@1319 | 3407 | g1_rem_set()->print_summary_info(); |
tonyp@1319 | 3408 | } |
tonyp@2315 | 3409 | |
tonyp@2315 | 3410 | return true; |
ysr@777 | 3411 | } |
ysr@777 | 3412 | |
apetrusenko@1826 | 3413 | size_t G1CollectedHeap::desired_plab_sz(GCAllocPurpose purpose) |
apetrusenko@1826 | 3414 | { |
apetrusenko@1826 | 3415 | size_t gclab_word_size; |
apetrusenko@1826 | 3416 | switch (purpose) { |
apetrusenko@1826 | 3417 | case GCAllocForSurvived: |
apetrusenko@1826 | 3418 | gclab_word_size = YoungPLABSize; |
apetrusenko@1826 | 3419 | break; |
apetrusenko@1826 | 3420 | case GCAllocForTenured: |
apetrusenko@1826 | 3421 | gclab_word_size = OldPLABSize; |
apetrusenko@1826 | 3422 | break; |
apetrusenko@1826 | 3423 | default: |
apetrusenko@1826 | 3424 | assert(false, "unknown GCAllocPurpose"); |
apetrusenko@1826 | 3425 | gclab_word_size = OldPLABSize; |
apetrusenko@1826 | 3426 | break; |
apetrusenko@1826 | 3427 | } |
apetrusenko@1826 | 3428 | return gclab_word_size; |
apetrusenko@1826 | 3429 | } |
apetrusenko@1826 | 3430 | |
tonyp@2715 | 3431 | void G1CollectedHeap::init_mutator_alloc_region() { |
tonyp@2715 | 3432 | assert(_mutator_alloc_region.get() == NULL, "pre-condition"); |
tonyp@2715 | 3433 | _mutator_alloc_region.init(); |
tonyp@2715 | 3434 | } |
tonyp@2715 | 3435 | |
tonyp@2715 | 3436 | void G1CollectedHeap::release_mutator_alloc_region() { |
tonyp@2715 | 3437 | _mutator_alloc_region.release(); |
tonyp@2715 | 3438 | assert(_mutator_alloc_region.get() == NULL, "post-condition"); |
tonyp@2715 | 3439 | } |
apetrusenko@1826 | 3440 | |
ysr@777 | 3441 | void G1CollectedHeap::set_gc_alloc_region(int purpose, HeapRegion* r) { |
ysr@777 | 3442 | assert(purpose >= 0 && purpose < GCAllocPurposeCount, "invalid purpose"); |
tonyp@1071 | 3443 | // make sure we don't call set_gc_alloc_region() multiple times on |
tonyp@1071 | 3444 | // the same region |
tonyp@1071 | 3445 | assert(r == NULL || !r->is_gc_alloc_region(), |
tonyp@1071 | 3446 | "shouldn't already be a GC alloc region"); |
johnc@1795 | 3447 | assert(r == NULL || !r->isHumongous(), |
johnc@1795 | 3448 | "humongous regions shouldn't be used as GC alloc regions"); |
johnc@1795 | 3449 | |
ysr@777 | 3450 | HeapWord* original_top = NULL; |
ysr@777 | 3451 | if (r != NULL) |
ysr@777 | 3452 | original_top = r->top(); |
ysr@777 | 3453 | |
ysr@777 | 3454 | // We will want to record the used space in r as being there before gc. |
ysr@777 | 3455 | // One we install it as a GC alloc region it's eligible for allocation. |
ysr@777 | 3456 | // So record it now and use it later. |
ysr@777 | 3457 | size_t r_used = 0; |
ysr@777 | 3458 | if (r != NULL) { |
ysr@777 | 3459 | r_used = r->used(); |
ysr@777 | 3460 | |
jmasa@2188 | 3461 | if (G1CollectedHeap::use_parallel_gc_threads()) { |
ysr@777 | 3462 | // need to take the lock to guard against two threads calling |
ysr@777 | 3463 | // get_gc_alloc_region concurrently (very unlikely but...) |
ysr@777 | 3464 | MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); |
ysr@777 | 3465 | r->save_marks(); |
ysr@777 | 3466 | } |
ysr@777 | 3467 | } |
ysr@777 | 3468 | HeapRegion* old_alloc_region = _gc_alloc_regions[purpose]; |
ysr@777 | 3469 | _gc_alloc_regions[purpose] = r; |
ysr@777 | 3470 | if (old_alloc_region != NULL) { |
ysr@777 | 3471 | // Replace aliases too. |
ysr@777 | 3472 | for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { |
ysr@777 | 3473 | if (_gc_alloc_regions[ap] == old_alloc_region) { |
ysr@777 | 3474 | _gc_alloc_regions[ap] = r; |
ysr@777 | 3475 | } |
ysr@777 | 3476 | } |
ysr@777 | 3477 | } |
ysr@777 | 3478 | if (r != NULL) { |
ysr@777 | 3479 | push_gc_alloc_region(r); |
ysr@777 | 3480 | if (mark_in_progress() && original_top != r->next_top_at_mark_start()) { |
ysr@777 | 3481 | // We are using a region as a GC alloc region after it has been used |
ysr@777 | 3482 | // as a mutator allocation region during the current marking cycle. |
ysr@777 | 3483 | // The mutator-allocated objects are currently implicitly marked, but |
ysr@777 | 3484 | // when we move hr->next_top_at_mark_start() forward at the the end |
ysr@777 | 3485 | // of the GC pause, they won't be. We therefore mark all objects in |
ysr@777 | 3486 | // the "gap". We do this object-by-object, since marking densely |
ysr@777 | 3487 | // does not currently work right with marking bitmap iteration. This |
ysr@777 | 3488 | // means we rely on TLAB filling at the start of pauses, and no |
ysr@777 | 3489 | // "resuscitation" of filled TLAB's. If we want to do this, we need |
ysr@777 | 3490 | // to fix the marking bitmap iteration. |
ysr@777 | 3491 | HeapWord* curhw = r->next_top_at_mark_start(); |
ysr@777 | 3492 | HeapWord* t = original_top; |
ysr@777 | 3493 | |
ysr@777 | 3494 | while (curhw < t) { |
ysr@777 | 3495 | oop cur = (oop)curhw; |
ysr@777 | 3496 | // We'll assume parallel for generality. This is rare code. |
ysr@777 | 3497 | concurrent_mark()->markAndGrayObjectIfNecessary(cur); // can't we just mark them? |
ysr@777 | 3498 | curhw = curhw + cur->size(); |
ysr@777 | 3499 | } |
ysr@777 | 3500 | assert(curhw == t, "Should have parsed correctly."); |
ysr@777 | 3501 | } |
ysr@777 | 3502 | if (G1PolicyVerbose > 1) { |
ysr@777 | 3503 | gclog_or_tty->print("New alloc region ["PTR_FORMAT", "PTR_FORMAT", " PTR_FORMAT") " |
ysr@777 | 3504 | "for survivors:", r->bottom(), original_top, r->end()); |
ysr@777 | 3505 | r->print(); |
ysr@777 | 3506 | } |
ysr@777 | 3507 | g1_policy()->record_before_bytes(r_used); |
ysr@777 | 3508 | } |
ysr@777 | 3509 | } |
ysr@777 | 3510 | |
ysr@777 | 3511 | void G1CollectedHeap::push_gc_alloc_region(HeapRegion* hr) { |
ysr@777 | 3512 | assert(Thread::current()->is_VM_thread() || |
tonyp@2472 | 3513 | FreeList_lock->owned_by_self(), "Precondition"); |
ysr@777 | 3514 | assert(!hr->is_gc_alloc_region() && !hr->in_collection_set(), |
ysr@777 | 3515 | "Precondition."); |
ysr@777 | 3516 | hr->set_is_gc_alloc_region(true); |
ysr@777 | 3517 | hr->set_next_gc_alloc_region(_gc_alloc_region_list); |
ysr@777 | 3518 | _gc_alloc_region_list = hr; |
ysr@777 | 3519 | } |
ysr@777 | 3520 | |
ysr@777 | 3521 | #ifdef G1_DEBUG |
ysr@777 | 3522 | class FindGCAllocRegion: public HeapRegionClosure { |
ysr@777 | 3523 | public: |
ysr@777 | 3524 | bool doHeapRegion(HeapRegion* r) { |
ysr@777 | 3525 | if (r->is_gc_alloc_region()) { |
ysr@777 | 3526 | gclog_or_tty->print_cr("Region %d ["PTR_FORMAT"...] is still a gc_alloc_region.", |
ysr@777 | 3527 | r->hrs_index(), r->bottom()); |
ysr@777 | 3528 | } |
ysr@777 | 3529 | return false; |
ysr@777 | 3530 | } |
ysr@777 | 3531 | }; |
ysr@777 | 3532 | #endif // G1_DEBUG |
ysr@777 | 3533 | |
ysr@777 | 3534 | void G1CollectedHeap::forget_alloc_region_list() { |
tonyp@2472 | 3535 | assert_at_safepoint(true /* should_be_vm_thread */); |
ysr@777 | 3536 | while (_gc_alloc_region_list != NULL) { |
ysr@777 | 3537 | HeapRegion* r = _gc_alloc_region_list; |
ysr@777 | 3538 | assert(r->is_gc_alloc_region(), "Invariant."); |
iveresov@1072 | 3539 | // We need HeapRegion::oops_on_card_seq_iterate_careful() to work on |
iveresov@1072 | 3540 | // newly allocated data in order to be able to apply deferred updates |
iveresov@1072 | 3541 | // before the GC is done for verification purposes (i.e to allow |
iveresov@1072 | 3542 | // G1HRRSFlushLogBuffersOnVerify). It's safe thing to do after the |
iveresov@1072 | 3543 | // collection. |
iveresov@1072 | 3544 | r->ContiguousSpace::set_saved_mark(); |
ysr@777 | 3545 | _gc_alloc_region_list = r->next_gc_alloc_region(); |
ysr@777 | 3546 | r->set_next_gc_alloc_region(NULL); |
ysr@777 | 3547 | r->set_is_gc_alloc_region(false); |
apetrusenko@980 | 3548 | if (r->is_survivor()) { |
apetrusenko@980 | 3549 | if (r->is_empty()) { |
apetrusenko@980 | 3550 | r->set_not_young(); |
apetrusenko@980 | 3551 | } else { |
apetrusenko@980 | 3552 | _young_list->add_survivor_region(r); |
apetrusenko@980 | 3553 | } |
apetrusenko@980 | 3554 | } |
ysr@777 | 3555 | } |
ysr@777 | 3556 | #ifdef G1_DEBUG |
ysr@777 | 3557 | FindGCAllocRegion fa; |
ysr@777 | 3558 | heap_region_iterate(&fa); |
ysr@777 | 3559 | #endif // G1_DEBUG |
ysr@777 | 3560 | } |
ysr@777 | 3561 | |
ysr@777 | 3562 | |
ysr@777 | 3563 | bool G1CollectedHeap::check_gc_alloc_regions() { |
ysr@777 | 3564 | // TODO: allocation regions check |
ysr@777 | 3565 | return true; |
ysr@777 | 3566 | } |
ysr@777 | 3567 | |
ysr@777 | 3568 | void G1CollectedHeap::get_gc_alloc_regions() { |
tonyp@1071 | 3569 | // First, let's check that the GC alloc region list is empty (it should) |
tonyp@1071 | 3570 | assert(_gc_alloc_region_list == NULL, "invariant"); |
tonyp@1071 | 3571 | |
ysr@777 | 3572 | for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { |
tonyp@1071 | 3573 | assert(_gc_alloc_regions[ap] == NULL, "invariant"); |
apetrusenko@1296 | 3574 | assert(_gc_alloc_region_counts[ap] == 0, "invariant"); |
tonyp@1071 | 3575 | |
ysr@777 | 3576 | // Create new GC alloc regions. |
tonyp@1071 | 3577 | HeapRegion* alloc_region = _retained_gc_alloc_regions[ap]; |
tonyp@1071 | 3578 | _retained_gc_alloc_regions[ap] = NULL; |
tonyp@1071 | 3579 | |
tonyp@1071 | 3580 | if (alloc_region != NULL) { |
tonyp@1071 | 3581 | assert(_retain_gc_alloc_region[ap], "only way to retain a GC region"); |
tonyp@1071 | 3582 | |
tonyp@1071 | 3583 | // let's make sure that the GC alloc region is not tagged as such |
tonyp@1071 | 3584 | // outside a GC operation |
tonyp@1071 | 3585 | assert(!alloc_region->is_gc_alloc_region(), "sanity"); |
tonyp@1071 | 3586 | |
tonyp@1071 | 3587 | if (alloc_region->in_collection_set() || |
tonyp@1071 | 3588 | alloc_region->top() == alloc_region->end() || |
johnc@1795 | 3589 | alloc_region->top() == alloc_region->bottom() || |
johnc@1795 | 3590 | alloc_region->isHumongous()) { |
johnc@1795 | 3591 | // we will discard the current GC alloc region if |
johnc@1795 | 3592 | // * it's in the collection set (it can happen!), |
johnc@1795 | 3593 | // * it's already full (no point in using it), |
johnc@1795 | 3594 | // * it's empty (this means that it was emptied during |
johnc@1795 | 3595 | // a cleanup and it should be on the free list now), or |
johnc@1795 | 3596 | // * it's humongous (this means that it was emptied |
johnc@1795 | 3597 | // during a cleanup and was added to the free list, but |
johnc@1795 | 3598 | // has been subseqently used to allocate a humongous |
johnc@1795 | 3599 | // object that may be less than the region size). |
tonyp@1071 | 3600 | |
tonyp@1071 | 3601 | alloc_region = NULL; |
tonyp@1071 | 3602 | } |
tonyp@1071 | 3603 | } |
tonyp@1071 | 3604 | |
tonyp@1071 | 3605 | if (alloc_region == NULL) { |
tonyp@1071 | 3606 | // we will get a new GC alloc region |
johnc@2504 | 3607 | alloc_region = new_gc_alloc_region(ap, HeapRegion::GrainWords); |
apetrusenko@1296 | 3608 | } else { |
apetrusenko@1296 | 3609 | // the region was retained from the last collection |
apetrusenko@1296 | 3610 | ++_gc_alloc_region_counts[ap]; |
tonyp@1823 | 3611 | if (G1PrintHeapRegions) { |
tonyp@1823 | 3612 | gclog_or_tty->print_cr("new alloc region %d:["PTR_FORMAT", "PTR_FORMAT"], " |
tonyp@1823 | 3613 | "top "PTR_FORMAT, |
tonyp@1823 | 3614 | alloc_region->hrs_index(), alloc_region->bottom(), alloc_region->end(), alloc_region->top()); |
tonyp@1823 | 3615 | } |
ysr@777 | 3616 | } |
tonyp@1071 | 3617 | |
ysr@777 | 3618 | if (alloc_region != NULL) { |
tonyp@1071 | 3619 | assert(_gc_alloc_regions[ap] == NULL, "pre-condition"); |
ysr@777 | 3620 | set_gc_alloc_region(ap, alloc_region); |
ysr@777 | 3621 | } |
tonyp@1071 | 3622 | |
tonyp@1071 | 3623 | assert(_gc_alloc_regions[ap] == NULL || |
tonyp@1071 | 3624 | _gc_alloc_regions[ap]->is_gc_alloc_region(), |
tonyp@1071 | 3625 | "the GC alloc region should be tagged as such"); |
tonyp@1071 | 3626 | assert(_gc_alloc_regions[ap] == NULL || |
tonyp@1071 | 3627 | _gc_alloc_regions[ap] == _gc_alloc_region_list, |
tonyp@1071 | 3628 | "the GC alloc region should be the same as the GC alloc list head"); |
ysr@777 | 3629 | } |
ysr@777 | 3630 | // Set alternative regions for allocation purposes that have reached |
tonyp@1071 | 3631 | // their limit. |
ysr@777 | 3632 | for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { |
ysr@777 | 3633 | GCAllocPurpose alt_purpose = g1_policy()->alternative_purpose(ap); |
ysr@777 | 3634 | if (_gc_alloc_regions[ap] == NULL && alt_purpose != ap) { |
ysr@777 | 3635 | _gc_alloc_regions[ap] = _gc_alloc_regions[alt_purpose]; |
ysr@777 | 3636 | } |
ysr@777 | 3637 | } |
ysr@777 | 3638 | assert(check_gc_alloc_regions(), "alloc regions messed up"); |
ysr@777 | 3639 | } |
ysr@777 | 3640 | |
tonyp@1071 | 3641 | void G1CollectedHeap::release_gc_alloc_regions(bool totally) { |
ysr@777 | 3642 | // We keep a separate list of all regions that have been alloc regions in |
tonyp@1071 | 3643 | // the current collection pause. Forget that now. This method will |
tonyp@1071 | 3644 | // untag the GC alloc regions and tear down the GC alloc region |
tonyp@1071 | 3645 | // list. It's desirable that no regions are tagged as GC alloc |
tonyp@1071 | 3646 | // outside GCs. |
johnc@2316 | 3647 | |
ysr@777 | 3648 | forget_alloc_region_list(); |
ysr@777 | 3649 | |
ysr@777 | 3650 | // The current alloc regions contain objs that have survived |
ysr@777 | 3651 | // collection. Make them no longer GC alloc regions. |
ysr@777 | 3652 | for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { |
ysr@777 | 3653 | HeapRegion* r = _gc_alloc_regions[ap]; |
tonyp@1071 | 3654 | _retained_gc_alloc_regions[ap] = NULL; |
apetrusenko@1296 | 3655 | _gc_alloc_region_counts[ap] = 0; |
tonyp@1071 | 3656 | |
tonyp@1071 | 3657 | if (r != NULL) { |
tonyp@1071 | 3658 | // we retain nothing on _gc_alloc_regions between GCs |
tonyp@1071 | 3659 | set_gc_alloc_region(ap, NULL); |
tonyp@1071 | 3660 | |
tonyp@1071 | 3661 | if (r->is_empty()) { |
tonyp@2472 | 3662 | // We didn't actually allocate anything in it; let's just put |
tonyp@2472 | 3663 | // it back on the free list. |
tonyp@2714 | 3664 | _free_list.add_as_head(r); |
tonyp@1071 | 3665 | } else if (_retain_gc_alloc_region[ap] && !totally) { |
tonyp@1071 | 3666 | // retain it so that we can use it at the beginning of the next GC |
tonyp@1071 | 3667 | _retained_gc_alloc_regions[ap] = r; |
ysr@777 | 3668 | } |
ysr@777 | 3669 | } |
tonyp@1071 | 3670 | } |
tonyp@1071 | 3671 | } |
tonyp@1071 | 3672 | |
tonyp@1071 | 3673 | #ifndef PRODUCT |
tonyp@1071 | 3674 | // Useful for debugging |
tonyp@1071 | 3675 | |
tonyp@1071 | 3676 | void G1CollectedHeap::print_gc_alloc_regions() { |
tonyp@1071 | 3677 | gclog_or_tty->print_cr("GC alloc regions"); |
tonyp@1071 | 3678 | for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { |
tonyp@1071 | 3679 | HeapRegion* r = _gc_alloc_regions[ap]; |
tonyp@1071 | 3680 | if (r == NULL) { |
tonyp@1071 | 3681 | gclog_or_tty->print_cr(" %2d : "PTR_FORMAT, ap, NULL); |
tonyp@1071 | 3682 | } else { |
tonyp@1071 | 3683 | gclog_or_tty->print_cr(" %2d : "PTR_FORMAT" "SIZE_FORMAT, |
tonyp@1071 | 3684 | ap, r->bottom(), r->used()); |
tonyp@1071 | 3685 | } |
tonyp@1071 | 3686 | } |
tonyp@1071 | 3687 | } |
tonyp@1071 | 3688 | #endif // PRODUCT |
ysr@777 | 3689 | |
ysr@777 | 3690 | void G1CollectedHeap::init_for_evac_failure(OopsInHeapRegionClosure* cl) { |
ysr@777 | 3691 | _drain_in_progress = false; |
ysr@777 | 3692 | set_evac_failure_closure(cl); |
ysr@777 | 3693 | _evac_failure_scan_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(40, true); |
ysr@777 | 3694 | } |
ysr@777 | 3695 | |
ysr@777 | 3696 | void G1CollectedHeap::finalize_for_evac_failure() { |
ysr@777 | 3697 | assert(_evac_failure_scan_stack != NULL && |
ysr@777 | 3698 | _evac_failure_scan_stack->length() == 0, |
ysr@777 | 3699 | "Postcondition"); |
ysr@777 | 3700 | assert(!_drain_in_progress, "Postcondition"); |
apetrusenko@1480 | 3701 | delete _evac_failure_scan_stack; |
ysr@777 | 3702 | _evac_failure_scan_stack = NULL; |
ysr@777 | 3703 | } |
ysr@777 | 3704 | |
ysr@777 | 3705 | |
ysr@777 | 3706 | |
ysr@777 | 3707 | // *** Sequential G1 Evacuation |
ysr@777 | 3708 | |
ysr@777 | 3709 | class G1IsAliveClosure: public BoolObjectClosure { |
ysr@777 | 3710 | G1CollectedHeap* _g1; |
ysr@777 | 3711 | public: |
ysr@777 | 3712 | G1IsAliveClosure(G1CollectedHeap* g1) : _g1(g1) {} |
ysr@777 | 3713 | void do_object(oop p) { assert(false, "Do not call."); } |
ysr@777 | 3714 | bool do_object_b(oop p) { |
ysr@777 | 3715 | // It is reachable if it is outside the collection set, or is inside |
ysr@777 | 3716 | // and forwarded. |
ysr@777 | 3717 | |
ysr@777 | 3718 | #ifdef G1_DEBUG |
ysr@777 | 3719 | gclog_or_tty->print_cr("is alive "PTR_FORMAT" in CS %d forwarded %d overall %d", |
ysr@777 | 3720 | (void*) p, _g1->obj_in_cs(p), p->is_forwarded(), |
ysr@777 | 3721 | !_g1->obj_in_cs(p) || p->is_forwarded()); |
ysr@777 | 3722 | #endif // G1_DEBUG |
ysr@777 | 3723 | |
ysr@777 | 3724 | return !_g1->obj_in_cs(p) || p->is_forwarded(); |
ysr@777 | 3725 | } |
ysr@777 | 3726 | }; |
ysr@777 | 3727 | |
ysr@777 | 3728 | class G1KeepAliveClosure: public OopClosure { |
ysr@777 | 3729 | G1CollectedHeap* _g1; |
ysr@777 | 3730 | public: |
ysr@777 | 3731 | G1KeepAliveClosure(G1CollectedHeap* g1) : _g1(g1) {} |
ysr@1280 | 3732 | void do_oop(narrowOop* p) { guarantee(false, "Not needed"); } |
ysr@1280 | 3733 | void do_oop( oop* p) { |
ysr@777 | 3734 | oop obj = *p; |
ysr@777 | 3735 | #ifdef G1_DEBUG |
ysr@777 | 3736 | if (PrintGC && Verbose) { |
ysr@777 | 3737 | gclog_or_tty->print_cr("keep alive *"PTR_FORMAT" = "PTR_FORMAT" "PTR_FORMAT, |
ysr@777 | 3738 | p, (void*) obj, (void*) *p); |
ysr@777 | 3739 | } |
ysr@777 | 3740 | #endif // G1_DEBUG |
ysr@777 | 3741 | |
ysr@777 | 3742 | if (_g1->obj_in_cs(obj)) { |
ysr@777 | 3743 | assert( obj->is_forwarded(), "invariant" ); |
ysr@777 | 3744 | *p = obj->forwardee(); |
ysr@777 | 3745 | #ifdef G1_DEBUG |
ysr@777 | 3746 | gclog_or_tty->print_cr(" in CSet: moved "PTR_FORMAT" -> "PTR_FORMAT, |
ysr@777 | 3747 | (void*) obj, (void*) *p); |
ysr@777 | 3748 | #endif // G1_DEBUG |
ysr@777 | 3749 | } |
ysr@777 | 3750 | } |
ysr@777 | 3751 | }; |
ysr@777 | 3752 | |
iveresov@1051 | 3753 | class UpdateRSetDeferred : public OopsInHeapRegionClosure { |
iveresov@1051 | 3754 | private: |
iveresov@1051 | 3755 | G1CollectedHeap* _g1; |
iveresov@1051 | 3756 | DirtyCardQueue *_dcq; |
iveresov@1051 | 3757 | CardTableModRefBS* _ct_bs; |
iveresov@1051 | 3758 | |
iveresov@1051 | 3759 | public: |
iveresov@1051 | 3760 | UpdateRSetDeferred(G1CollectedHeap* g1, DirtyCardQueue* dcq) : |
iveresov@1051 | 3761 | _g1(g1), _ct_bs((CardTableModRefBS*)_g1->barrier_set()), _dcq(dcq) {} |
iveresov@1051 | 3762 | |
ysr@1280 | 3763 | virtual void do_oop(narrowOop* p) { do_oop_work(p); } |
ysr@1280 | 3764 | virtual void do_oop( oop* p) { do_oop_work(p); } |
ysr@1280 | 3765 | template <class T> void do_oop_work(T* p) { |
iveresov@1051 | 3766 | assert(_from->is_in_reserved(p), "paranoia"); |
ysr@1280 | 3767 | if (!_from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) && |
ysr@1280 | 3768 | !_from->is_survivor()) { |
iveresov@1051 | 3769 | size_t card_index = _ct_bs->index_for(p); |
iveresov@1051 | 3770 | if (_ct_bs->mark_card_deferred(card_index)) { |
iveresov@1051 | 3771 | _dcq->enqueue((jbyte*)_ct_bs->byte_for_index(card_index)); |
iveresov@1051 | 3772 | } |
iveresov@1051 | 3773 | } |
iveresov@1051 | 3774 | } |
iveresov@1051 | 3775 | }; |
iveresov@1051 | 3776 | |
ysr@777 | 3777 | class RemoveSelfPointerClosure: public ObjectClosure { |
ysr@777 | 3778 | private: |
ysr@777 | 3779 | G1CollectedHeap* _g1; |
ysr@777 | 3780 | ConcurrentMark* _cm; |
ysr@777 | 3781 | HeapRegion* _hr; |
ysr@777 | 3782 | size_t _prev_marked_bytes; |
ysr@777 | 3783 | size_t _next_marked_bytes; |
iveresov@1051 | 3784 | OopsInHeapRegionClosure *_cl; |
ysr@777 | 3785 | public: |
tonyp@2453 | 3786 | RemoveSelfPointerClosure(G1CollectedHeap* g1, HeapRegion* hr, |
tonyp@2453 | 3787 | OopsInHeapRegionClosure* cl) : |
tonyp@2453 | 3788 | _g1(g1), _hr(hr), _cm(_g1->concurrent_mark()), _prev_marked_bytes(0), |
iveresov@1051 | 3789 | _next_marked_bytes(0), _cl(cl) {} |
ysr@777 | 3790 | |
ysr@777 | 3791 | size_t prev_marked_bytes() { return _prev_marked_bytes; } |
ysr@777 | 3792 | size_t next_marked_bytes() { return _next_marked_bytes; } |
ysr@777 | 3793 | |
tonyp@2453 | 3794 | // <original comment> |
iveresov@787 | 3795 | // The original idea here was to coalesce evacuated and dead objects. |
iveresov@787 | 3796 | // However that caused complications with the block offset table (BOT). |
iveresov@787 | 3797 | // In particular if there were two TLABs, one of them partially refined. |
iveresov@787 | 3798 | // |----- TLAB_1--------|----TLAB_2-~~~(partially refined part)~~~| |
iveresov@787 | 3799 | // The BOT entries of the unrefined part of TLAB_2 point to the start |
iveresov@787 | 3800 | // of TLAB_2. If the last object of the TLAB_1 and the first object |
iveresov@787 | 3801 | // of TLAB_2 are coalesced, then the cards of the unrefined part |
iveresov@787 | 3802 | // would point into middle of the filler object. |
tonyp@2453 | 3803 | // The current approach is to not coalesce and leave the BOT contents intact. |
tonyp@2453 | 3804 | // </original comment> |
iveresov@787 | 3805 | // |
tonyp@2453 | 3806 | // We now reset the BOT when we start the object iteration over the |
tonyp@2453 | 3807 | // region and refine its entries for every object we come across. So |
tonyp@2453 | 3808 | // the above comment is not really relevant and we should be able |
tonyp@2453 | 3809 | // to coalesce dead objects if we want to. |
iveresov@787 | 3810 | void do_object(oop obj) { |
tonyp@2453 | 3811 | HeapWord* obj_addr = (HeapWord*) obj; |
tonyp@2453 | 3812 | assert(_hr->is_in(obj_addr), "sanity"); |
tonyp@2453 | 3813 | size_t obj_size = obj->size(); |
tonyp@2453 | 3814 | _hr->update_bot_for_object(obj_addr, obj_size); |
iveresov@787 | 3815 | if (obj->is_forwarded() && obj->forwardee() == obj) { |
iveresov@787 | 3816 | // The object failed to move. |
iveresov@787 | 3817 | assert(!_g1->is_obj_dead(obj), "We should not be preserving dead objs."); |
iveresov@787 | 3818 | _cm->markPrev(obj); |
iveresov@787 | 3819 | assert(_cm->isPrevMarked(obj), "Should be marked!"); |
tonyp@2453 | 3820 | _prev_marked_bytes += (obj_size * HeapWordSize); |
iveresov@787 | 3821 | if (_g1->mark_in_progress() && !_g1->is_obj_ill(obj)) { |
iveresov@787 | 3822 | _cm->markAndGrayObjectIfNecessary(obj); |
iveresov@787 | 3823 | } |
iveresov@787 | 3824 | obj->set_mark(markOopDesc::prototype()); |
iveresov@787 | 3825 | // While we were processing RSet buffers during the |
iveresov@787 | 3826 | // collection, we actually didn't scan any cards on the |
iveresov@787 | 3827 | // collection set, since we didn't want to update remebered |
iveresov@787 | 3828 | // sets with entries that point into the collection set, given |
iveresov@787 | 3829 | // that live objects fromthe collection set are about to move |
iveresov@787 | 3830 | // and such entries will be stale very soon. This change also |
iveresov@787 | 3831 | // dealt with a reliability issue which involved scanning a |
iveresov@787 | 3832 | // card in the collection set and coming across an array that |
iveresov@787 | 3833 | // was being chunked and looking malformed. The problem is |
iveresov@787 | 3834 | // that, if evacuation fails, we might have remembered set |
iveresov@787 | 3835 | // entries missing given that we skipped cards on the |
iveresov@787 | 3836 | // collection set. So, we'll recreate such entries now. |
iveresov@1051 | 3837 | obj->oop_iterate(_cl); |
iveresov@787 | 3838 | assert(_cm->isPrevMarked(obj), "Should be marked!"); |
iveresov@787 | 3839 | } else { |
iveresov@787 | 3840 | // The object has been either evacuated or is dead. Fill it with a |
iveresov@787 | 3841 | // dummy object. |
tonyp@2453 | 3842 | MemRegion mr((HeapWord*)obj, obj_size); |
jcoomes@916 | 3843 | CollectedHeap::fill_with_object(mr); |
ysr@777 | 3844 | _cm->clearRangeBothMaps(mr); |
ysr@777 | 3845 | } |
ysr@777 | 3846 | } |
ysr@777 | 3847 | }; |
ysr@777 | 3848 | |
ysr@777 | 3849 | void G1CollectedHeap::remove_self_forwarding_pointers() { |
johnc@2060 | 3850 | UpdateRSetImmediate immediate_update(_g1h->g1_rem_set()); |
iveresov@1051 | 3851 | DirtyCardQueue dcq(&_g1h->dirty_card_queue_set()); |
iveresov@1051 | 3852 | UpdateRSetDeferred deferred_update(_g1h, &dcq); |
iveresov@1051 | 3853 | OopsInHeapRegionClosure *cl; |
iveresov@1051 | 3854 | if (G1DeferredRSUpdate) { |
iveresov@1051 | 3855 | cl = &deferred_update; |
iveresov@1051 | 3856 | } else { |
iveresov@1051 | 3857 | cl = &immediate_update; |
iveresov@1051 | 3858 | } |
ysr@777 | 3859 | HeapRegion* cur = g1_policy()->collection_set(); |
ysr@777 | 3860 | while (cur != NULL) { |
ysr@777 | 3861 | assert(g1_policy()->assertMarkedBytesDataOK(), "Should be!"); |
tonyp@2453 | 3862 | assert(!cur->isHumongous(), "sanity"); |
tonyp@2453 | 3863 | |
ysr@777 | 3864 | if (cur->evacuation_failed()) { |
ysr@777 | 3865 | assert(cur->in_collection_set(), "bad CS"); |
tonyp@2453 | 3866 | RemoveSelfPointerClosure rspc(_g1h, cur, cl); |
tonyp@2453 | 3867 | |
tonyp@2453 | 3868 | cur->reset_bot(); |
iveresov@1051 | 3869 | cl->set_region(cur); |
ysr@777 | 3870 | cur->object_iterate(&rspc); |
ysr@777 | 3871 | |
ysr@777 | 3872 | // A number of manipulations to make the TAMS be the current top, |
ysr@777 | 3873 | // and the marked bytes be the ones observed in the iteration. |
ysr@777 | 3874 | if (_g1h->concurrent_mark()->at_least_one_mark_complete()) { |
ysr@777 | 3875 | // The comments below are the postconditions achieved by the |
ysr@777 | 3876 | // calls. Note especially the last such condition, which says that |
ysr@777 | 3877 | // the count of marked bytes has been properly restored. |
ysr@777 | 3878 | cur->note_start_of_marking(false); |
ysr@777 | 3879 | // _next_top_at_mark_start == top, _next_marked_bytes == 0 |
ysr@777 | 3880 | cur->add_to_marked_bytes(rspc.prev_marked_bytes()); |
ysr@777 | 3881 | // _next_marked_bytes == prev_marked_bytes. |
ysr@777 | 3882 | cur->note_end_of_marking(); |
ysr@777 | 3883 | // _prev_top_at_mark_start == top(), |
ysr@777 | 3884 | // _prev_marked_bytes == prev_marked_bytes |
ysr@777 | 3885 | } |
ysr@777 | 3886 | // If there is no mark in progress, we modified the _next variables |
ysr@777 | 3887 | // above needlessly, but harmlessly. |
ysr@777 | 3888 | if (_g1h->mark_in_progress()) { |
ysr@777 | 3889 | cur->note_start_of_marking(false); |
ysr@777 | 3890 | // _next_top_at_mark_start == top, _next_marked_bytes == 0 |
ysr@777 | 3891 | // _next_marked_bytes == next_marked_bytes. |
ysr@777 | 3892 | } |
ysr@777 | 3893 | |
ysr@777 | 3894 | // Now make sure the region has the right index in the sorted array. |
ysr@777 | 3895 | g1_policy()->note_change_in_marked_bytes(cur); |
ysr@777 | 3896 | } |
ysr@777 | 3897 | cur = cur->next_in_collection_set(); |
ysr@777 | 3898 | } |
ysr@777 | 3899 | assert(g1_policy()->assertMarkedBytesDataOK(), "Should be!"); |
ysr@777 | 3900 | |
ysr@777 | 3901 | // Now restore saved marks, if any. |
ysr@777 | 3902 | if (_objs_with_preserved_marks != NULL) { |
ysr@777 | 3903 | assert(_preserved_marks_of_objs != NULL, "Both or none."); |
ysr@777 | 3904 | guarantee(_objs_with_preserved_marks->length() == |
ysr@777 | 3905 | _preserved_marks_of_objs->length(), "Both or none."); |
ysr@777 | 3906 | for (int i = 0; i < _objs_with_preserved_marks->length(); i++) { |
ysr@777 | 3907 | oop obj = _objs_with_preserved_marks->at(i); |
ysr@777 | 3908 | markOop m = _preserved_marks_of_objs->at(i); |
ysr@777 | 3909 | obj->set_mark(m); |
ysr@777 | 3910 | } |
ysr@777 | 3911 | // Delete the preserved marks growable arrays (allocated on the C heap). |
ysr@777 | 3912 | delete _objs_with_preserved_marks; |
ysr@777 | 3913 | delete _preserved_marks_of_objs; |
ysr@777 | 3914 | _objs_with_preserved_marks = NULL; |
ysr@777 | 3915 | _preserved_marks_of_objs = NULL; |
ysr@777 | 3916 | } |
ysr@777 | 3917 | } |
ysr@777 | 3918 | |
ysr@777 | 3919 | void G1CollectedHeap::push_on_evac_failure_scan_stack(oop obj) { |
ysr@777 | 3920 | _evac_failure_scan_stack->push(obj); |
ysr@777 | 3921 | } |
ysr@777 | 3922 | |
ysr@777 | 3923 | void G1CollectedHeap::drain_evac_failure_scan_stack() { |
ysr@777 | 3924 | assert(_evac_failure_scan_stack != NULL, "precondition"); |
ysr@777 | 3925 | |
ysr@777 | 3926 | while (_evac_failure_scan_stack->length() > 0) { |
ysr@777 | 3927 | oop obj = _evac_failure_scan_stack->pop(); |
ysr@777 | 3928 | _evac_failure_closure->set_region(heap_region_containing(obj)); |
ysr@777 | 3929 | obj->oop_iterate_backwards(_evac_failure_closure); |
ysr@777 | 3930 | } |
ysr@777 | 3931 | } |
ysr@777 | 3932 | |
ysr@777 | 3933 | oop |
ysr@777 | 3934 | G1CollectedHeap::handle_evacuation_failure_par(OopsInHeapRegionClosure* cl, |
ysr@777 | 3935 | oop old) { |
ysr@777 | 3936 | markOop m = old->mark(); |
ysr@777 | 3937 | oop forward_ptr = old->forward_to_atomic(old); |
ysr@777 | 3938 | if (forward_ptr == NULL) { |
ysr@777 | 3939 | // Forward-to-self succeeded. |
ysr@777 | 3940 | if (_evac_failure_closure != cl) { |
ysr@777 | 3941 | MutexLockerEx x(EvacFailureStack_lock, Mutex::_no_safepoint_check_flag); |
ysr@777 | 3942 | assert(!_drain_in_progress, |
ysr@777 | 3943 | "Should only be true while someone holds the lock."); |
ysr@777 | 3944 | // Set the global evac-failure closure to the current thread's. |
ysr@777 | 3945 | assert(_evac_failure_closure == NULL, "Or locking has failed."); |
ysr@777 | 3946 | set_evac_failure_closure(cl); |
ysr@777 | 3947 | // Now do the common part. |
ysr@777 | 3948 | handle_evacuation_failure_common(old, m); |
ysr@777 | 3949 | // Reset to NULL. |
ysr@777 | 3950 | set_evac_failure_closure(NULL); |
ysr@777 | 3951 | } else { |
ysr@777 | 3952 | // The lock is already held, and this is recursive. |
ysr@777 | 3953 | assert(_drain_in_progress, "This should only be the recursive case."); |
ysr@777 | 3954 | handle_evacuation_failure_common(old, m); |
ysr@777 | 3955 | } |
ysr@777 | 3956 | return old; |
ysr@777 | 3957 | } else { |
ysr@777 | 3958 | // Someone else had a place to copy it. |
ysr@777 | 3959 | return forward_ptr; |
ysr@777 | 3960 | } |
ysr@777 | 3961 | } |
ysr@777 | 3962 | |
ysr@777 | 3963 | void G1CollectedHeap::handle_evacuation_failure_common(oop old, markOop m) { |
ysr@777 | 3964 | set_evacuation_failed(true); |
ysr@777 | 3965 | |
ysr@777 | 3966 | preserve_mark_if_necessary(old, m); |
ysr@777 | 3967 | |
ysr@777 | 3968 | HeapRegion* r = heap_region_containing(old); |
ysr@777 | 3969 | if (!r->evacuation_failed()) { |
ysr@777 | 3970 | r->set_evacuation_failed(true); |
tonyp@1717 | 3971 | if (G1PrintHeapRegions) { |
tonyp@2074 | 3972 | gclog_or_tty->print("overflow in heap region "PTR_FORMAT" " |
ysr@777 | 3973 | "["PTR_FORMAT","PTR_FORMAT")\n", |
ysr@777 | 3974 | r, r->bottom(), r->end()); |
ysr@777 | 3975 | } |
ysr@777 | 3976 | } |
ysr@777 | 3977 | |
ysr@777 | 3978 | push_on_evac_failure_scan_stack(old); |
ysr@777 | 3979 | |
ysr@777 | 3980 | if (!_drain_in_progress) { |
ysr@777 | 3981 | // prevent recursion in copy_to_survivor_space() |
ysr@777 | 3982 | _drain_in_progress = true; |
ysr@777 | 3983 | drain_evac_failure_scan_stack(); |
ysr@777 | 3984 | _drain_in_progress = false; |
ysr@777 | 3985 | } |
ysr@777 | 3986 | } |
ysr@777 | 3987 | |
ysr@777 | 3988 | void G1CollectedHeap::preserve_mark_if_necessary(oop obj, markOop m) { |
ysr@2380 | 3989 | assert(evacuation_failed(), "Oversaving!"); |
ysr@2380 | 3990 | // We want to call the "for_promotion_failure" version only in the |
ysr@2380 | 3991 | // case of a promotion failure. |
ysr@2380 | 3992 | if (m->must_be_preserved_for_promotion_failure(obj)) { |
ysr@777 | 3993 | if (_objs_with_preserved_marks == NULL) { |
ysr@777 | 3994 | assert(_preserved_marks_of_objs == NULL, "Both or none."); |
ysr@777 | 3995 | _objs_with_preserved_marks = |
ysr@777 | 3996 | new (ResourceObj::C_HEAP) GrowableArray<oop>(40, true); |
ysr@777 | 3997 | _preserved_marks_of_objs = |
ysr@777 | 3998 | new (ResourceObj::C_HEAP) GrowableArray<markOop>(40, true); |
ysr@777 | 3999 | } |
ysr@777 | 4000 | _objs_with_preserved_marks->push(obj); |
ysr@777 | 4001 | _preserved_marks_of_objs->push(m); |
ysr@777 | 4002 | } |
ysr@777 | 4003 | } |
ysr@777 | 4004 | |
ysr@777 | 4005 | // *** Parallel G1 Evacuation |
ysr@777 | 4006 | |
ysr@777 | 4007 | HeapWord* G1CollectedHeap::par_allocate_during_gc(GCAllocPurpose purpose, |
ysr@777 | 4008 | size_t word_size) { |
tonyp@2073 | 4009 | assert(!isHumongous(word_size), |
tonyp@2073 | 4010 | err_msg("we should not be seeing humongous allocation requests " |
tonyp@2073 | 4011 | "during GC, word_size = "SIZE_FORMAT, word_size)); |
tonyp@2073 | 4012 | |
ysr@777 | 4013 | HeapRegion* alloc_region = _gc_alloc_regions[purpose]; |
ysr@777 | 4014 | // let the caller handle alloc failure |
ysr@777 | 4015 | if (alloc_region == NULL) return NULL; |
ysr@777 | 4016 | |
ysr@777 | 4017 | HeapWord* block = alloc_region->par_allocate(word_size); |
ysr@777 | 4018 | if (block == NULL) { |
ysr@777 | 4019 | block = allocate_during_gc_slow(purpose, alloc_region, true, word_size); |
ysr@777 | 4020 | } |
ysr@777 | 4021 | return block; |
ysr@777 | 4022 | } |
ysr@777 | 4023 | |
apetrusenko@980 | 4024 | void G1CollectedHeap::retire_alloc_region(HeapRegion* alloc_region, |
apetrusenko@980 | 4025 | bool par) { |
apetrusenko@980 | 4026 | // Another thread might have obtained alloc_region for the given |
apetrusenko@980 | 4027 | // purpose, and might be attempting to allocate in it, and might |
apetrusenko@980 | 4028 | // succeed. Therefore, we can't do the "finalization" stuff on the |
apetrusenko@980 | 4029 | // region below until we're sure the last allocation has happened. |
apetrusenko@980 | 4030 | // We ensure this by allocating the remaining space with a garbage |
apetrusenko@980 | 4031 | // object. |
apetrusenko@980 | 4032 | if (par) par_allocate_remaining_space(alloc_region); |
apetrusenko@980 | 4033 | // Now we can do the post-GC stuff on the region. |
apetrusenko@980 | 4034 | alloc_region->note_end_of_copying(); |
apetrusenko@980 | 4035 | g1_policy()->record_after_bytes(alloc_region->used()); |
apetrusenko@980 | 4036 | } |
apetrusenko@980 | 4037 | |
ysr@777 | 4038 | HeapWord* |
ysr@777 | 4039 | G1CollectedHeap::allocate_during_gc_slow(GCAllocPurpose purpose, |
ysr@777 | 4040 | HeapRegion* alloc_region, |
ysr@777 | 4041 | bool par, |
ysr@777 | 4042 | size_t word_size) { |
tonyp@2073 | 4043 | assert(!isHumongous(word_size), |
tonyp@2073 | 4044 | err_msg("we should not be seeing humongous allocation requests " |
tonyp@2073 | 4045 | "during GC, word_size = "SIZE_FORMAT, word_size)); |
tonyp@2073 | 4046 | |
tonyp@2472 | 4047 | // We need to make sure we serialize calls to this method. Given |
tonyp@2472 | 4048 | // that the FreeList_lock guards accesses to the free_list anyway, |
tonyp@2472 | 4049 | // and we need to potentially remove a region from it, we'll use it |
tonyp@2472 | 4050 | // to protect the whole call. |
tonyp@2472 | 4051 | MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag); |
tonyp@2472 | 4052 | |
ysr@777 | 4053 | HeapWord* block = NULL; |
ysr@777 | 4054 | // In the parallel case, a previous thread to obtain the lock may have |
ysr@777 | 4055 | // already assigned a new gc_alloc_region. |
ysr@777 | 4056 | if (alloc_region != _gc_alloc_regions[purpose]) { |
ysr@777 | 4057 | assert(par, "But should only happen in parallel case."); |
ysr@777 | 4058 | alloc_region = _gc_alloc_regions[purpose]; |
ysr@777 | 4059 | if (alloc_region == NULL) return NULL; |
ysr@777 | 4060 | block = alloc_region->par_allocate(word_size); |
ysr@777 | 4061 | if (block != NULL) return block; |
ysr@777 | 4062 | // Otherwise, continue; this new region is empty, too. |
ysr@777 | 4063 | } |
ysr@777 | 4064 | assert(alloc_region != NULL, "We better have an allocation region"); |
apetrusenko@980 | 4065 | retire_alloc_region(alloc_region, par); |
ysr@777 | 4066 | |
ysr@777 | 4067 | if (_gc_alloc_region_counts[purpose] >= g1_policy()->max_regions(purpose)) { |
ysr@777 | 4068 | // Cannot allocate more regions for the given purpose. |
ysr@777 | 4069 | GCAllocPurpose alt_purpose = g1_policy()->alternative_purpose(purpose); |
ysr@777 | 4070 | // Is there an alternative? |
ysr@777 | 4071 | if (purpose != alt_purpose) { |
ysr@777 | 4072 | HeapRegion* alt_region = _gc_alloc_regions[alt_purpose]; |
ysr@777 | 4073 | // Has not the alternative region been aliased? |
apetrusenko@980 | 4074 | if (alloc_region != alt_region && alt_region != NULL) { |
ysr@777 | 4075 | // Try to allocate in the alternative region. |
ysr@777 | 4076 | if (par) { |
ysr@777 | 4077 | block = alt_region->par_allocate(word_size); |
ysr@777 | 4078 | } else { |
ysr@777 | 4079 | block = alt_region->allocate(word_size); |
ysr@777 | 4080 | } |
ysr@777 | 4081 | // Make an alias. |
ysr@777 | 4082 | _gc_alloc_regions[purpose] = _gc_alloc_regions[alt_purpose]; |
apetrusenko@980 | 4083 | if (block != NULL) { |
apetrusenko@980 | 4084 | return block; |
apetrusenko@980 | 4085 | } |
apetrusenko@980 | 4086 | retire_alloc_region(alt_region, par); |
ysr@777 | 4087 | } |
ysr@777 | 4088 | // Both the allocation region and the alternative one are full |
ysr@777 | 4089 | // and aliased, replace them with a new allocation region. |
ysr@777 | 4090 | purpose = alt_purpose; |
ysr@777 | 4091 | } else { |
ysr@777 | 4092 | set_gc_alloc_region(purpose, NULL); |
ysr@777 | 4093 | return NULL; |
ysr@777 | 4094 | } |
ysr@777 | 4095 | } |
ysr@777 | 4096 | |
ysr@777 | 4097 | // Now allocate a new region for allocation. |
tonyp@2472 | 4098 | alloc_region = new_gc_alloc_region(purpose, word_size); |
ysr@777 | 4099 | |
ysr@777 | 4100 | // let the caller handle alloc failure |
ysr@777 | 4101 | if (alloc_region != NULL) { |
ysr@777 | 4102 | |
ysr@777 | 4103 | assert(check_gc_alloc_regions(), "alloc regions messed up"); |
ysr@777 | 4104 | assert(alloc_region->saved_mark_at_top(), |
ysr@777 | 4105 | "Mark should have been saved already."); |
ysr@777 | 4106 | // This must be done last: once it's installed, other regions may |
ysr@777 | 4107 | // allocate in it (without holding the lock.) |
ysr@777 | 4108 | set_gc_alloc_region(purpose, alloc_region); |
ysr@777 | 4109 | |
ysr@777 | 4110 | if (par) { |
ysr@777 | 4111 | block = alloc_region->par_allocate(word_size); |
ysr@777 | 4112 | } else { |
ysr@777 | 4113 | block = alloc_region->allocate(word_size); |
ysr@777 | 4114 | } |
ysr@777 | 4115 | // Caller handles alloc failure. |
ysr@777 | 4116 | } else { |
ysr@777 | 4117 | // This sets other apis using the same old alloc region to NULL, also. |
ysr@777 | 4118 | set_gc_alloc_region(purpose, NULL); |
ysr@777 | 4119 | } |
ysr@777 | 4120 | return block; // May be NULL. |
ysr@777 | 4121 | } |
ysr@777 | 4122 | |
ysr@777 | 4123 | void G1CollectedHeap::par_allocate_remaining_space(HeapRegion* r) { |
ysr@777 | 4124 | HeapWord* block = NULL; |
ysr@777 | 4125 | size_t free_words; |
ysr@777 | 4126 | do { |
ysr@777 | 4127 | free_words = r->free()/HeapWordSize; |
ysr@777 | 4128 | // If there's too little space, no one can allocate, so we're done. |
kvn@1926 | 4129 | if (free_words < CollectedHeap::min_fill_size()) return; |
ysr@777 | 4130 | // Otherwise, try to claim it. |
ysr@777 | 4131 | block = r->par_allocate(free_words); |
ysr@777 | 4132 | } while (block == NULL); |
jcoomes@916 | 4133 | fill_with_object(block, free_words); |
ysr@777 | 4134 | } |
ysr@777 | 4135 | |
ysr@777 | 4136 | #ifndef PRODUCT |
ysr@777 | 4137 | bool GCLabBitMapClosure::do_bit(size_t offset) { |
ysr@777 | 4138 | HeapWord* addr = _bitmap->offsetToHeapWord(offset); |
ysr@777 | 4139 | guarantee(_cm->isMarked(oop(addr)), "it should be!"); |
ysr@777 | 4140 | return true; |
ysr@777 | 4141 | } |
ysr@777 | 4142 | #endif // PRODUCT |
ysr@777 | 4143 | |
ysr@1280 | 4144 | G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, int queue_num) |
ysr@1280 | 4145 | : _g1h(g1h), |
ysr@1280 | 4146 | _refs(g1h->task_queue(queue_num)), |
ysr@1280 | 4147 | _dcq(&g1h->dirty_card_queue_set()), |
ysr@1280 | 4148 | _ct_bs((CardTableModRefBS*)_g1h->barrier_set()), |
ysr@1280 | 4149 | _g1_rem(g1h->g1_rem_set()), |
ysr@1280 | 4150 | _hash_seed(17), _queue_num(queue_num), |
ysr@1280 | 4151 | _term_attempts(0), |
apetrusenko@1826 | 4152 | _surviving_alloc_buffer(g1h->desired_plab_sz(GCAllocForSurvived)), |
apetrusenko@1826 | 4153 | _tenured_alloc_buffer(g1h->desired_plab_sz(GCAllocForTenured)), |
ysr@1280 | 4154 | _age_table(false), |
ysr@1280 | 4155 | _strong_roots_time(0), _term_time(0), |
ysr@1280 | 4156 | _alloc_buffer_waste(0), _undo_waste(0) |
ysr@1280 | 4157 | { |
ysr@1280 | 4158 | // we allocate G1YoungSurvRateNumRegions plus one entries, since |
ysr@1280 | 4159 | // we "sacrifice" entry 0 to keep track of surviving bytes for |
ysr@1280 | 4160 | // non-young regions (where the age is -1) |
ysr@1280 | 4161 | // We also add a few elements at the beginning and at the end in |
ysr@1280 | 4162 | // an attempt to eliminate cache contention |
ysr@1280 | 4163 | size_t real_length = 1 + _g1h->g1_policy()->young_cset_length(); |
ysr@1280 | 4164 | size_t array_length = PADDING_ELEM_NUM + |
ysr@1280 | 4165 | real_length + |
ysr@1280 | 4166 | PADDING_ELEM_NUM; |
ysr@1280 | 4167 | _surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length); |
ysr@1280 | 4168 | if (_surviving_young_words_base == NULL) |
ysr@1280 | 4169 | vm_exit_out_of_memory(array_length * sizeof(size_t), |
ysr@1280 | 4170 | "Not enough space for young surv histo."); |
ysr@1280 | 4171 | _surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM; |
ysr@1280 | 4172 | memset(_surviving_young_words, 0, real_length * sizeof(size_t)); |
ysr@1280 | 4173 | |
apetrusenko@1826 | 4174 | _alloc_buffers[GCAllocForSurvived] = &_surviving_alloc_buffer; |
apetrusenko@1826 | 4175 | _alloc_buffers[GCAllocForTenured] = &_tenured_alloc_buffer; |
apetrusenko@1826 | 4176 | |
ysr@1280 | 4177 | _start = os::elapsedTime(); |
ysr@1280 | 4178 | } |
ysr@777 | 4179 | |
jcoomes@2064 | 4180 | void |
jcoomes@2064 | 4181 | G1ParScanThreadState::print_termination_stats_hdr(outputStream* const st) |
jcoomes@2064 | 4182 | { |
jcoomes@2064 | 4183 | st->print_raw_cr("GC Termination Stats"); |
jcoomes@2064 | 4184 | st->print_raw_cr(" elapsed --strong roots-- -------termination-------" |
jcoomes@2064 | 4185 | " ------waste (KiB)------"); |
jcoomes@2064 | 4186 | st->print_raw_cr("thr ms ms % ms % attempts" |
jcoomes@2064 | 4187 | " total alloc undo"); |
jcoomes@2064 | 4188 | st->print_raw_cr("--- --------- --------- ------ --------- ------ --------" |
jcoomes@2064 | 4189 | " ------- ------- -------"); |
jcoomes@2064 | 4190 | } |
jcoomes@2064 | 4191 | |
jcoomes@2064 | 4192 | void |
jcoomes@2064 | 4193 | G1ParScanThreadState::print_termination_stats(int i, |
jcoomes@2064 | 4194 | outputStream* const st) const |
jcoomes@2064 | 4195 | { |
jcoomes@2064 | 4196 | const double elapsed_ms = elapsed_time() * 1000.0; |
jcoomes@2064 | 4197 | const double s_roots_ms = strong_roots_time() * 1000.0; |
jcoomes@2064 | 4198 | const double term_ms = term_time() * 1000.0; |
jcoomes@2064 | 4199 | st->print_cr("%3d %9.2f %9.2f %6.2f " |
jcoomes@2064 | 4200 | "%9.2f %6.2f " SIZE_FORMAT_W(8) " " |
jcoomes@2064 | 4201 | SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7), |
jcoomes@2064 | 4202 | i, elapsed_ms, s_roots_ms, s_roots_ms * 100 / elapsed_ms, |
jcoomes@2064 | 4203 | term_ms, term_ms * 100 / elapsed_ms, term_attempts(), |
jcoomes@2064 | 4204 | (alloc_buffer_waste() + undo_waste()) * HeapWordSize / K, |
jcoomes@2064 | 4205 | alloc_buffer_waste() * HeapWordSize / K, |
jcoomes@2064 | 4206 | undo_waste() * HeapWordSize / K); |
jcoomes@2064 | 4207 | } |
jcoomes@2064 | 4208 | |
jcoomes@2217 | 4209 | #ifdef ASSERT |
jcoomes@2217 | 4210 | bool G1ParScanThreadState::verify_ref(narrowOop* ref) const { |
jcoomes@2217 | 4211 | assert(ref != NULL, "invariant"); |
jcoomes@2217 | 4212 | assert(UseCompressedOops, "sanity"); |
jcoomes@2217 | 4213 | assert(!has_partial_array_mask(ref), err_msg("ref=" PTR_FORMAT, ref)); |
jcoomes@2217 | 4214 | oop p = oopDesc::load_decode_heap_oop(ref); |
jcoomes@2217 | 4215 | assert(_g1h->is_in_g1_reserved(p), |
jcoomes@2217 | 4216 | err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, intptr_t(p))); |
jcoomes@2217 | 4217 | return true; |
jcoomes@2217 | 4218 | } |
jcoomes@2217 | 4219 | |
jcoomes@2217 | 4220 | bool G1ParScanThreadState::verify_ref(oop* ref) const { |
jcoomes@2217 | 4221 | assert(ref != NULL, "invariant"); |
jcoomes@2217 | 4222 | if (has_partial_array_mask(ref)) { |
jcoomes@2217 | 4223 | // Must be in the collection set--it's already been copied. |
jcoomes@2217 | 4224 | oop p = clear_partial_array_mask(ref); |
jcoomes@2217 | 4225 | assert(_g1h->obj_in_cs(p), |
jcoomes@2217 | 4226 | err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, intptr_t(p))); |
jcoomes@2217 | 4227 | } else { |
jcoomes@2217 | 4228 | oop p = oopDesc::load_decode_heap_oop(ref); |
jcoomes@2217 | 4229 | assert(_g1h->is_in_g1_reserved(p), |
jcoomes@2217 | 4230 | err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, intptr_t(p))); |
jcoomes@2217 | 4231 | } |
jcoomes@2217 | 4232 | return true; |
jcoomes@2217 | 4233 | } |
jcoomes@2217 | 4234 | |
jcoomes@2217 | 4235 | bool G1ParScanThreadState::verify_task(StarTask ref) const { |
jcoomes@2217 | 4236 | if (ref.is_narrow()) { |
jcoomes@2217 | 4237 | return verify_ref((narrowOop*) ref); |
jcoomes@2217 | 4238 | } else { |
jcoomes@2217 | 4239 | return verify_ref((oop*) ref); |
jcoomes@2217 | 4240 | } |
jcoomes@2217 | 4241 | } |
jcoomes@2217 | 4242 | #endif // ASSERT |
jcoomes@2217 | 4243 | |
jcoomes@2217 | 4244 | void G1ParScanThreadState::trim_queue() { |
jcoomes@2217 | 4245 | StarTask ref; |
jcoomes@2217 | 4246 | do { |
jcoomes@2217 | 4247 | // Drain the overflow stack first, so other threads can steal. |
jcoomes@2217 | 4248 | while (refs()->pop_overflow(ref)) { |
jcoomes@2217 | 4249 | deal_with_reference(ref); |
jcoomes@2217 | 4250 | } |
jcoomes@2217 | 4251 | while (refs()->pop_local(ref)) { |
jcoomes@2217 | 4252 | deal_with_reference(ref); |
jcoomes@2217 | 4253 | } |
jcoomes@2217 | 4254 | } while (!refs()->is_empty()); |
jcoomes@2217 | 4255 | } |
jcoomes@2217 | 4256 | |
ysr@777 | 4257 | G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) : |
ysr@777 | 4258 | _g1(g1), _g1_rem(_g1->g1_rem_set()), _cm(_g1->concurrent_mark()), |
ysr@777 | 4259 | _par_scan_state(par_scan_state) { } |
ysr@777 | 4260 | |
ysr@1280 | 4261 | template <class T> void G1ParCopyHelper::mark_forwardee(T* p) { |
ysr@777 | 4262 | // This is called _after_ do_oop_work has been called, hence after |
ysr@777 | 4263 | // the object has been relocated to its new location and *p points |
ysr@777 | 4264 | // to its new location. |
ysr@777 | 4265 | |
ysr@1280 | 4266 | T heap_oop = oopDesc::load_heap_oop(p); |
ysr@1280 | 4267 | if (!oopDesc::is_null(heap_oop)) { |
ysr@1280 | 4268 | oop obj = oopDesc::decode_heap_oop(heap_oop); |
ysr@1280 | 4269 | assert((_g1->evacuation_failed()) || (!_g1->obj_in_cs(obj)), |
ysr@777 | 4270 | "shouldn't still be in the CSet if evacuation didn't fail."); |
ysr@1280 | 4271 | HeapWord* addr = (HeapWord*)obj; |
ysr@777 | 4272 | if (_g1->is_in_g1_reserved(addr)) |
ysr@777 | 4273 | _cm->grayRoot(oop(addr)); |
ysr@777 | 4274 | } |
ysr@777 | 4275 | } |
ysr@777 | 4276 | |
ysr@777 | 4277 | oop G1ParCopyHelper::copy_to_survivor_space(oop old) { |
ysr@777 | 4278 | size_t word_sz = old->size(); |
ysr@777 | 4279 | HeapRegion* from_region = _g1->heap_region_containing_raw(old); |
ysr@777 | 4280 | // +1 to make the -1 indexes valid... |
ysr@777 | 4281 | int young_index = from_region->young_index_in_cset()+1; |
ysr@777 | 4282 | assert( (from_region->is_young() && young_index > 0) || |
ysr@777 | 4283 | (!from_region->is_young() && young_index == 0), "invariant" ); |
ysr@777 | 4284 | G1CollectorPolicy* g1p = _g1->g1_policy(); |
ysr@777 | 4285 | markOop m = old->mark(); |
apetrusenko@980 | 4286 | int age = m->has_displaced_mark_helper() ? m->displaced_mark_helper()->age() |
apetrusenko@980 | 4287 | : m->age(); |
apetrusenko@980 | 4288 | GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, age, |
ysr@777 | 4289 | word_sz); |
ysr@777 | 4290 | HeapWord* obj_ptr = _par_scan_state->allocate(alloc_purpose, word_sz); |
ysr@777 | 4291 | oop obj = oop(obj_ptr); |
ysr@777 | 4292 | |
ysr@777 | 4293 | if (obj_ptr == NULL) { |
ysr@777 | 4294 | // This will either forward-to-self, or detect that someone else has |
ysr@777 | 4295 | // installed a forwarding pointer. |
ysr@777 | 4296 | OopsInHeapRegionClosure* cl = _par_scan_state->evac_failure_closure(); |
ysr@777 | 4297 | return _g1->handle_evacuation_failure_par(cl, old); |
ysr@777 | 4298 | } |
ysr@777 | 4299 | |
tonyp@961 | 4300 | // We're going to allocate linearly, so might as well prefetch ahead. |
tonyp@961 | 4301 | Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes); |
tonyp@961 | 4302 | |
ysr@777 | 4303 | oop forward_ptr = old->forward_to_atomic(obj); |
ysr@777 | 4304 | if (forward_ptr == NULL) { |
ysr@777 | 4305 | Copy::aligned_disjoint_words((HeapWord*) old, obj_ptr, word_sz); |
tonyp@961 | 4306 | if (g1p->track_object_age(alloc_purpose)) { |
tonyp@961 | 4307 | // We could simply do obj->incr_age(). However, this causes a |
tonyp@961 | 4308 | // performance issue. obj->incr_age() will first check whether |
tonyp@961 | 4309 | // the object has a displaced mark by checking its mark word; |
tonyp@961 | 4310 | // getting the mark word from the new location of the object |
tonyp@961 | 4311 | // stalls. So, given that we already have the mark word and we |
tonyp@961 | 4312 | // are about to install it anyway, it's better to increase the |
tonyp@961 | 4313 | // age on the mark word, when the object does not have a |
tonyp@961 | 4314 | // displaced mark word. We're not expecting many objects to have |
tonyp@961 | 4315 | // a displaced marked word, so that case is not optimized |
tonyp@961 | 4316 | // further (it could be...) and we simply call obj->incr_age(). |
tonyp@961 | 4317 | |
tonyp@961 | 4318 | if (m->has_displaced_mark_helper()) { |
tonyp@961 | 4319 | // in this case, we have to install the mark word first, |
tonyp@961 | 4320 | // otherwise obj looks to be forwarded (the old mark word, |
tonyp@961 | 4321 | // which contains the forward pointer, was copied) |
tonyp@961 | 4322 | obj->set_mark(m); |
tonyp@961 | 4323 | obj->incr_age(); |
tonyp@961 | 4324 | } else { |
tonyp@961 | 4325 | m = m->incr_age(); |
apetrusenko@980 | 4326 | obj->set_mark(m); |
tonyp@961 | 4327 | } |
apetrusenko@980 | 4328 | _par_scan_state->age_table()->add(obj, word_sz); |
apetrusenko@980 | 4329 | } else { |
apetrusenko@980 | 4330 | obj->set_mark(m); |
tonyp@961 | 4331 | } |
tonyp@961 | 4332 | |
ysr@777 | 4333 | // preserve "next" mark bit |
ysr@777 | 4334 | if (_g1->mark_in_progress() && !_g1->is_obj_ill(old)) { |
ysr@777 | 4335 | if (!use_local_bitmaps || |
ysr@777 | 4336 | !_par_scan_state->alloc_buffer(alloc_purpose)->mark(obj_ptr)) { |
ysr@777 | 4337 | // if we couldn't mark it on the local bitmap (this happens when |
ysr@777 | 4338 | // the object was not allocated in the GCLab), we have to bite |
ysr@777 | 4339 | // the bullet and do the standard parallel mark |
ysr@777 | 4340 | _cm->markAndGrayObjectIfNecessary(obj); |
ysr@777 | 4341 | } |
ysr@777 | 4342 | #if 1 |
ysr@777 | 4343 | if (_g1->isMarkedNext(old)) { |
ysr@777 | 4344 | _cm->nextMarkBitMap()->parClear((HeapWord*)old); |
ysr@777 | 4345 | } |
ysr@777 | 4346 | #endif |
ysr@777 | 4347 | } |
ysr@777 | 4348 | |
ysr@777 | 4349 | size_t* surv_young_words = _par_scan_state->surviving_young_words(); |
ysr@777 | 4350 | surv_young_words[young_index] += word_sz; |
ysr@777 | 4351 | |
ysr@777 | 4352 | if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) { |
ysr@777 | 4353 | arrayOop(old)->set_length(0); |
ysr@1280 | 4354 | oop* old_p = set_partial_array_mask(old); |
ysr@1280 | 4355 | _par_scan_state->push_on_queue(old_p); |
ysr@777 | 4356 | } else { |
tonyp@961 | 4357 | // No point in using the slower heap_region_containing() method, |
tonyp@961 | 4358 | // given that we know obj is in the heap. |
tonyp@961 | 4359 | _scanner->set_region(_g1->heap_region_containing_raw(obj)); |
ysr@777 | 4360 | obj->oop_iterate_backwards(_scanner); |
ysr@777 | 4361 | } |
ysr@777 | 4362 | } else { |
ysr@777 | 4363 | _par_scan_state->undo_allocation(alloc_purpose, obj_ptr, word_sz); |
ysr@777 | 4364 | obj = forward_ptr; |
ysr@777 | 4365 | } |
ysr@777 | 4366 | return obj; |
ysr@777 | 4367 | } |
ysr@777 | 4368 | |
iveresov@1696 | 4369 | template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_forwardee> |
ysr@1280 | 4370 | template <class T> |
iveresov@1696 | 4371 | void G1ParCopyClosure <do_gen_barrier, barrier, do_mark_forwardee> |
ysr@1280 | 4372 | ::do_oop_work(T* p) { |
ysr@1280 | 4373 | oop obj = oopDesc::load_decode_heap_oop(p); |
ysr@777 | 4374 | assert(barrier != G1BarrierRS || obj != NULL, |
ysr@777 | 4375 | "Precondition: G1BarrierRS implies obj is nonNull"); |
ysr@777 | 4376 | |
tonyp@961 | 4377 | // here the null check is implicit in the cset_fast_test() test |
iveresov@1696 | 4378 | if (_g1->in_cset_fast_test(obj)) { |
ysr@777 | 4379 | #if G1_REM_SET_LOGGING |
tonyp@961 | 4380 | gclog_or_tty->print_cr("Loc "PTR_FORMAT" contains pointer "PTR_FORMAT" " |
tonyp@961 | 4381 | "into CS.", p, (void*) obj); |
ysr@777 | 4382 | #endif |
tonyp@961 | 4383 | if (obj->is_forwarded()) { |
ysr@1280 | 4384 | oopDesc::encode_store_heap_oop(p, obj->forwardee()); |
tonyp@961 | 4385 | } else { |
ysr@1280 | 4386 | oop copy_oop = copy_to_survivor_space(obj); |
ysr@1280 | 4387 | oopDesc::encode_store_heap_oop(p, copy_oop); |
ysr@777 | 4388 | } |
tonyp@961 | 4389 | // When scanning the RS, we only care about objs in CS. |
tonyp@961 | 4390 | if (barrier == G1BarrierRS) { |
iveresov@1051 | 4391 | _par_scan_state->update_rs(_from, p, _par_scan_state->queue_num()); |
ysr@777 | 4392 | } |
tonyp@961 | 4393 | } |
tonyp@961 | 4394 | |
tonyp@961 | 4395 | if (barrier == G1BarrierEvac && obj != NULL) { |
iveresov@1051 | 4396 | _par_scan_state->update_rs(_from, p, _par_scan_state->queue_num()); |
tonyp@961 | 4397 | } |
tonyp@961 | 4398 | |
tonyp@961 | 4399 | if (do_gen_barrier && obj != NULL) { |
tonyp@961 | 4400 | par_do_barrier(p); |
tonyp@961 | 4401 | } |
tonyp@961 | 4402 | } |
tonyp@961 | 4403 | |
iveresov@1696 | 4404 | template void G1ParCopyClosure<false, G1BarrierEvac, false>::do_oop_work(oop* p); |
iveresov@1696 | 4405 | template void G1ParCopyClosure<false, G1BarrierEvac, false>::do_oop_work(narrowOop* p); |
ysr@1280 | 4406 | |
ysr@1280 | 4407 | template <class T> void G1ParScanPartialArrayClosure::do_oop_nv(T* p) { |
tonyp@961 | 4408 | assert(has_partial_array_mask(p), "invariant"); |
tonyp@961 | 4409 | oop old = clear_partial_array_mask(p); |
ysr@777 | 4410 | assert(old->is_objArray(), "must be obj array"); |
ysr@777 | 4411 | assert(old->is_forwarded(), "must be forwarded"); |
ysr@777 | 4412 | assert(Universe::heap()->is_in_reserved(old), "must be in heap."); |
ysr@777 | 4413 | |
ysr@777 | 4414 | objArrayOop obj = objArrayOop(old->forwardee()); |
ysr@777 | 4415 | assert((void*)old != (void*)old->forwardee(), "self forwarding here?"); |
ysr@777 | 4416 | // Process ParGCArrayScanChunk elements now |
ysr@777 | 4417 | // and push the remainder back onto queue |
ysr@777 | 4418 | int start = arrayOop(old)->length(); |
ysr@777 | 4419 | int end = obj->length(); |
ysr@777 | 4420 | int remainder = end - start; |
ysr@777 | 4421 | assert(start <= end, "just checking"); |
ysr@777 | 4422 | if (remainder > 2 * ParGCArrayScanChunk) { |
ysr@777 | 4423 | // Test above combines last partial chunk with a full chunk |
ysr@777 | 4424 | end = start + ParGCArrayScanChunk; |
ysr@777 | 4425 | arrayOop(old)->set_length(end); |
ysr@777 | 4426 | // Push remainder. |
ysr@1280 | 4427 | oop* old_p = set_partial_array_mask(old); |
ysr@1280 | 4428 | assert(arrayOop(old)->length() < obj->length(), "Empty push?"); |
ysr@1280 | 4429 | _par_scan_state->push_on_queue(old_p); |
ysr@777 | 4430 | } else { |
ysr@777 | 4431 | // Restore length so that the heap remains parsable in |
ysr@777 | 4432 | // case of evacuation failure. |
ysr@777 | 4433 | arrayOop(old)->set_length(end); |
ysr@777 | 4434 | } |
ysr@1280 | 4435 | _scanner.set_region(_g1->heap_region_containing_raw(obj)); |
ysr@777 | 4436 | // process our set of indices (include header in first chunk) |
ysr@1280 | 4437 | obj->oop_iterate_range(&_scanner, start, end); |
ysr@777 | 4438 | } |
ysr@777 | 4439 | |
ysr@777 | 4440 | class G1ParEvacuateFollowersClosure : public VoidClosure { |
ysr@777 | 4441 | protected: |
ysr@777 | 4442 | G1CollectedHeap* _g1h; |
ysr@777 | 4443 | G1ParScanThreadState* _par_scan_state; |
ysr@777 | 4444 | RefToScanQueueSet* _queues; |
ysr@777 | 4445 | ParallelTaskTerminator* _terminator; |
ysr@777 | 4446 | |
ysr@777 | 4447 | G1ParScanThreadState* par_scan_state() { return _par_scan_state; } |
ysr@777 | 4448 | RefToScanQueueSet* queues() { return _queues; } |
ysr@777 | 4449 | ParallelTaskTerminator* terminator() { return _terminator; } |
ysr@777 | 4450 | |
ysr@777 | 4451 | public: |
ysr@777 | 4452 | G1ParEvacuateFollowersClosure(G1CollectedHeap* g1h, |
ysr@777 | 4453 | G1ParScanThreadState* par_scan_state, |
ysr@777 | 4454 | RefToScanQueueSet* queues, |
ysr@777 | 4455 | ParallelTaskTerminator* terminator) |
ysr@777 | 4456 | : _g1h(g1h), _par_scan_state(par_scan_state), |
ysr@777 | 4457 | _queues(queues), _terminator(terminator) {} |
ysr@777 | 4458 | |
jcoomes@2217 | 4459 | void do_void(); |
jcoomes@2217 | 4460 | |
jcoomes@2217 | 4461 | private: |
jcoomes@2217 | 4462 | inline bool offer_termination(); |
jcoomes@2217 | 4463 | }; |
jcoomes@2217 | 4464 | |
jcoomes@2217 | 4465 | bool G1ParEvacuateFollowersClosure::offer_termination() { |
jcoomes@2217 | 4466 | G1ParScanThreadState* const pss = par_scan_state(); |
jcoomes@2217 | 4467 | pss->start_term_time(); |
jcoomes@2217 | 4468 | const bool res = terminator()->offer_termination(); |
jcoomes@2217 | 4469 | pss->end_term_time(); |
jcoomes@2217 | 4470 | return res; |
jcoomes@2217 | 4471 | } |
jcoomes@2217 | 4472 | |
jcoomes@2217 | 4473 | void G1ParEvacuateFollowersClosure::do_void() { |
jcoomes@2217 | 4474 | StarTask stolen_task; |
jcoomes@2217 | 4475 | G1ParScanThreadState* const pss = par_scan_state(); |
jcoomes@2217 | 4476 | pss->trim_queue(); |
jcoomes@2217 | 4477 | |
jcoomes@2217 | 4478 | do { |
jcoomes@2217 | 4479 | while (queues()->steal(pss->queue_num(), pss->hash_seed(), stolen_task)) { |
jcoomes@2217 | 4480 | assert(pss->verify_task(stolen_task), "sanity"); |
jcoomes@2217 | 4481 | if (stolen_task.is_narrow()) { |
tonyp@2238 | 4482 | pss->deal_with_reference((narrowOop*) stolen_task); |
jcoomes@2217 | 4483 | } else { |
tonyp@2238 | 4484 | pss->deal_with_reference((oop*) stolen_task); |
jcoomes@2217 | 4485 | } |
tonyp@2238 | 4486 | |
tonyp@2238 | 4487 | // We've just processed a reference and we might have made |
tonyp@2238 | 4488 | // available new entries on the queues. So we have to make sure |
tonyp@2238 | 4489 | // we drain the queues as necessary. |
ysr@777 | 4490 | pss->trim_queue(); |
ysr@777 | 4491 | } |
jcoomes@2217 | 4492 | } while (!offer_termination()); |
jcoomes@2217 | 4493 | |
jcoomes@2217 | 4494 | pss->retire_alloc_buffers(); |
jcoomes@2217 | 4495 | } |
ysr@777 | 4496 | |
ysr@777 | 4497 | class G1ParTask : public AbstractGangTask { |
ysr@777 | 4498 | protected: |
ysr@777 | 4499 | G1CollectedHeap* _g1h; |
ysr@777 | 4500 | RefToScanQueueSet *_queues; |
ysr@777 | 4501 | ParallelTaskTerminator _terminator; |
ysr@1280 | 4502 | int _n_workers; |
ysr@777 | 4503 | |
ysr@777 | 4504 | Mutex _stats_lock; |
ysr@777 | 4505 | Mutex* stats_lock() { return &_stats_lock; } |
ysr@777 | 4506 | |
ysr@777 | 4507 | size_t getNCards() { |
ysr@777 | 4508 | return (_g1h->capacity() + G1BlockOffsetSharedArray::N_bytes - 1) |
ysr@777 | 4509 | / G1BlockOffsetSharedArray::N_bytes; |
ysr@777 | 4510 | } |
ysr@777 | 4511 | |
ysr@777 | 4512 | public: |
ysr@777 | 4513 | G1ParTask(G1CollectedHeap* g1h, int workers, RefToScanQueueSet *task_queues) |
ysr@777 | 4514 | : AbstractGangTask("G1 collection"), |
ysr@777 | 4515 | _g1h(g1h), |
ysr@777 | 4516 | _queues(task_queues), |
ysr@777 | 4517 | _terminator(workers, _queues), |
ysr@1280 | 4518 | _stats_lock(Mutex::leaf, "parallel G1 stats lock", true), |
ysr@1280 | 4519 | _n_workers(workers) |
ysr@777 | 4520 | {} |
ysr@777 | 4521 | |
ysr@777 | 4522 | RefToScanQueueSet* queues() { return _queues; } |
ysr@777 | 4523 | |
ysr@777 | 4524 | RefToScanQueue *work_queue(int i) { |
ysr@777 | 4525 | return queues()->queue(i); |
ysr@777 | 4526 | } |
ysr@777 | 4527 | |
ysr@777 | 4528 | void work(int i) { |
ysr@1280 | 4529 | if (i >= _n_workers) return; // no work needed this round |
tonyp@1966 | 4530 | |
tonyp@1966 | 4531 | double start_time_ms = os::elapsedTime() * 1000.0; |
tonyp@1966 | 4532 | _g1h->g1_policy()->record_gc_worker_start_time(i, start_time_ms); |
tonyp@1966 | 4533 | |
ysr@777 | 4534 | ResourceMark rm; |
ysr@777 | 4535 | HandleMark hm; |
ysr@777 | 4536 | |
tonyp@961 | 4537 | G1ParScanThreadState pss(_g1h, i); |
tonyp@961 | 4538 | G1ParScanHeapEvacClosure scan_evac_cl(_g1h, &pss); |
tonyp@961 | 4539 | G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss); |
tonyp@961 | 4540 | G1ParScanPartialArrayClosure partial_scan_cl(_g1h, &pss); |
ysr@777 | 4541 | |
ysr@777 | 4542 | pss.set_evac_closure(&scan_evac_cl); |
ysr@777 | 4543 | pss.set_evac_failure_closure(&evac_failure_cl); |
ysr@777 | 4544 | pss.set_partial_scan_closure(&partial_scan_cl); |
ysr@777 | 4545 | |
ysr@777 | 4546 | G1ParScanExtRootClosure only_scan_root_cl(_g1h, &pss); |
ysr@777 | 4547 | G1ParScanPermClosure only_scan_perm_cl(_g1h, &pss); |
ysr@777 | 4548 | G1ParScanHeapRSClosure only_scan_heap_rs_cl(_g1h, &pss); |
iveresov@1696 | 4549 | G1ParPushHeapRSClosure push_heap_rs_cl(_g1h, &pss); |
iveresov@1051 | 4550 | |
ysr@777 | 4551 | G1ParScanAndMarkExtRootClosure scan_mark_root_cl(_g1h, &pss); |
ysr@777 | 4552 | G1ParScanAndMarkPermClosure scan_mark_perm_cl(_g1h, &pss); |
ysr@777 | 4553 | G1ParScanAndMarkHeapRSClosure scan_mark_heap_rs_cl(_g1h, &pss); |
ysr@777 | 4554 | |
ysr@777 | 4555 | OopsInHeapRegionClosure *scan_root_cl; |
ysr@777 | 4556 | OopsInHeapRegionClosure *scan_perm_cl; |
ysr@777 | 4557 | |
tonyp@1794 | 4558 | if (_g1h->g1_policy()->during_initial_mark_pause()) { |
ysr@777 | 4559 | scan_root_cl = &scan_mark_root_cl; |
ysr@777 | 4560 | scan_perm_cl = &scan_mark_perm_cl; |
ysr@777 | 4561 | } else { |
ysr@777 | 4562 | scan_root_cl = &only_scan_root_cl; |
ysr@777 | 4563 | scan_perm_cl = &only_scan_perm_cl; |
ysr@777 | 4564 | } |
ysr@777 | 4565 | |
ysr@777 | 4566 | pss.start_strong_roots(); |
ysr@777 | 4567 | _g1h->g1_process_strong_roots(/* not collecting perm */ false, |
ysr@777 | 4568 | SharedHeap::SO_AllClasses, |
ysr@777 | 4569 | scan_root_cl, |
iveresov@1696 | 4570 | &push_heap_rs_cl, |
ysr@777 | 4571 | scan_perm_cl, |
ysr@777 | 4572 | i); |
ysr@777 | 4573 | pss.end_strong_roots(); |
ysr@777 | 4574 | { |
ysr@777 | 4575 | double start = os::elapsedTime(); |
ysr@777 | 4576 | G1ParEvacuateFollowersClosure evac(_g1h, &pss, _queues, &_terminator); |
ysr@777 | 4577 | evac.do_void(); |
ysr@777 | 4578 | double elapsed_ms = (os::elapsedTime()-start)*1000.0; |
ysr@777 | 4579 | double term_ms = pss.term_time()*1000.0; |
ysr@777 | 4580 | _g1h->g1_policy()->record_obj_copy_time(i, elapsed_ms-term_ms); |
tonyp@1966 | 4581 | _g1h->g1_policy()->record_termination(i, term_ms, pss.term_attempts()); |
ysr@777 | 4582 | } |
tonyp@1717 | 4583 | _g1h->g1_policy()->record_thread_age_table(pss.age_table()); |
ysr@777 | 4584 | _g1h->update_surviving_young_words(pss.surviving_young_words()+1); |
ysr@777 | 4585 | |
ysr@777 | 4586 | // Clean up any par-expanded rem sets. |
ysr@777 | 4587 | HeapRegionRemSet::par_cleanup(); |
ysr@777 | 4588 | |
ysr@777 | 4589 | if (ParallelGCVerbose) { |
jcoomes@2064 | 4590 | MutexLocker x(stats_lock()); |
jcoomes@2064 | 4591 | pss.print_termination_stats(i); |
ysr@777 | 4592 | } |
ysr@777 | 4593 | |
jcoomes@2217 | 4594 | assert(pss.refs()->is_empty(), "should be empty"); |
tonyp@1966 | 4595 | double end_time_ms = os::elapsedTime() * 1000.0; |
tonyp@1966 | 4596 | _g1h->g1_policy()->record_gc_worker_end_time(i, end_time_ms); |
ysr@777 | 4597 | } |
ysr@777 | 4598 | }; |
ysr@777 | 4599 | |
ysr@777 | 4600 | // *** Common G1 Evacuation Stuff |
ysr@777 | 4601 | |
jmasa@2188 | 4602 | // This method is run in a GC worker. |
jmasa@2188 | 4603 | |
ysr@777 | 4604 | void |
ysr@777 | 4605 | G1CollectedHeap:: |
ysr@777 | 4606 | g1_process_strong_roots(bool collecting_perm_gen, |
ysr@777 | 4607 | SharedHeap::ScanningOption so, |
ysr@777 | 4608 | OopClosure* scan_non_heap_roots, |
ysr@777 | 4609 | OopsInHeapRegionClosure* scan_rs, |
ysr@777 | 4610 | OopsInGenClosure* scan_perm, |
ysr@777 | 4611 | int worker_i) { |
ysr@777 | 4612 | // First scan the strong roots, including the perm gen. |
ysr@777 | 4613 | double ext_roots_start = os::elapsedTime(); |
ysr@777 | 4614 | double closure_app_time_sec = 0.0; |
ysr@777 | 4615 | |
ysr@777 | 4616 | BufferingOopClosure buf_scan_non_heap_roots(scan_non_heap_roots); |
ysr@777 | 4617 | BufferingOopsInGenClosure buf_scan_perm(scan_perm); |
ysr@777 | 4618 | buf_scan_perm.set_generation(perm_gen()); |
ysr@777 | 4619 | |
jrose@1424 | 4620 | // Walk the code cache w/o buffering, because StarTask cannot handle |
jrose@1424 | 4621 | // unaligned oop locations. |
jrose@1424 | 4622 | CodeBlobToOopClosure eager_scan_code_roots(scan_non_heap_roots, /*do_marking=*/ true); |
jrose@1424 | 4623 | |
jrose@1424 | 4624 | process_strong_roots(false, // no scoping; this is parallel code |
jrose@1424 | 4625 | collecting_perm_gen, so, |
ysr@777 | 4626 | &buf_scan_non_heap_roots, |
jrose@1424 | 4627 | &eager_scan_code_roots, |
ysr@777 | 4628 | &buf_scan_perm); |
johnc@1829 | 4629 | |
ysr@777 | 4630 | // Finish up any enqueued closure apps. |
ysr@777 | 4631 | buf_scan_non_heap_roots.done(); |
ysr@777 | 4632 | buf_scan_perm.done(); |
ysr@777 | 4633 | double ext_roots_end = os::elapsedTime(); |
ysr@777 | 4634 | g1_policy()->reset_obj_copy_time(worker_i); |
ysr@777 | 4635 | double obj_copy_time_sec = |
ysr@777 | 4636 | buf_scan_non_heap_roots.closure_app_seconds() + |
ysr@777 | 4637 | buf_scan_perm.closure_app_seconds(); |
ysr@777 | 4638 | g1_policy()->record_obj_copy_time(worker_i, obj_copy_time_sec * 1000.0); |
ysr@777 | 4639 | double ext_root_time_ms = |
ysr@777 | 4640 | ((ext_roots_end - ext_roots_start) - obj_copy_time_sec) * 1000.0; |
ysr@777 | 4641 | g1_policy()->record_ext_root_scan_time(worker_i, ext_root_time_ms); |
ysr@777 | 4642 | |
ysr@777 | 4643 | // Scan strong roots in mark stack. |
ysr@777 | 4644 | if (!_process_strong_tasks->is_task_claimed(G1H_PS_mark_stack_oops_do)) { |
ysr@777 | 4645 | concurrent_mark()->oops_do(scan_non_heap_roots); |
ysr@777 | 4646 | } |
ysr@777 | 4647 | double mark_stack_scan_ms = (os::elapsedTime() - ext_roots_end) * 1000.0; |
ysr@777 | 4648 | g1_policy()->record_mark_stack_scan_time(worker_i, mark_stack_scan_ms); |
ysr@777 | 4649 | |
ysr@777 | 4650 | // XXX What should this be doing in the parallel case? |
ysr@777 | 4651 | g1_policy()->record_collection_pause_end_CH_strong_roots(); |
ysr@777 | 4652 | // Now scan the complement of the collection set. |
ysr@777 | 4653 | if (scan_rs != NULL) { |
ysr@777 | 4654 | g1_rem_set()->oops_into_collection_set_do(scan_rs, worker_i); |
ysr@777 | 4655 | } |
ysr@777 | 4656 | // Finish with the ref_processor roots. |
ysr@777 | 4657 | if (!_process_strong_tasks->is_task_claimed(G1H_PS_refProcessor_oops_do)) { |
johnc@2316 | 4658 | // We need to treat the discovered reference lists as roots and |
johnc@2316 | 4659 | // keep entries (which are added by the marking threads) on them |
johnc@2316 | 4660 | // live until they can be processed at the end of marking. |
johnc@2316 | 4661 | ref_processor()->weak_oops_do(scan_non_heap_roots); |
ysr@777 | 4662 | ref_processor()->oops_do(scan_non_heap_roots); |
ysr@777 | 4663 | } |
ysr@777 | 4664 | g1_policy()->record_collection_pause_end_G1_strong_roots(); |
ysr@777 | 4665 | _process_strong_tasks->all_tasks_completed(); |
ysr@777 | 4666 | } |
ysr@777 | 4667 | |
ysr@777 | 4668 | void |
ysr@777 | 4669 | G1CollectedHeap::g1_process_weak_roots(OopClosure* root_closure, |
ysr@777 | 4670 | OopClosure* non_root_closure) { |
jrose@1424 | 4671 | CodeBlobToOopClosure roots_in_blobs(root_closure, /*do_marking=*/ false); |
jrose@1424 | 4672 | SharedHeap::process_weak_roots(root_closure, &roots_in_blobs, non_root_closure); |
ysr@777 | 4673 | } |
ysr@777 | 4674 | |
ysr@777 | 4675 | |
ysr@777 | 4676 | class SaveMarksClosure: public HeapRegionClosure { |
ysr@777 | 4677 | public: |
ysr@777 | 4678 | bool doHeapRegion(HeapRegion* r) { |
ysr@777 | 4679 | r->save_marks(); |
ysr@777 | 4680 | return false; |
ysr@777 | 4681 | } |
ysr@777 | 4682 | }; |
ysr@777 | 4683 | |
ysr@777 | 4684 | void G1CollectedHeap::save_marks() { |
jmasa@2188 | 4685 | if (!CollectedHeap::use_parallel_gc_threads()) { |
ysr@777 | 4686 | SaveMarksClosure sm; |
ysr@777 | 4687 | heap_region_iterate(&sm); |
ysr@777 | 4688 | } |
ysr@777 | 4689 | // We do this even in the parallel case |
ysr@777 | 4690 | perm_gen()->save_marks(); |
ysr@777 | 4691 | } |
ysr@777 | 4692 | |
ysr@777 | 4693 | void G1CollectedHeap::evacuate_collection_set() { |
ysr@777 | 4694 | set_evacuation_failed(false); |
ysr@777 | 4695 | |
ysr@777 | 4696 | g1_rem_set()->prepare_for_oops_into_collection_set_do(); |
ysr@777 | 4697 | concurrent_g1_refine()->set_use_cache(false); |
johnc@1324 | 4698 | concurrent_g1_refine()->clear_hot_cache_claimed_index(); |
johnc@1324 | 4699 | |
ysr@777 | 4700 | int n_workers = (ParallelGCThreads > 0 ? workers()->total_workers() : 1); |
ysr@777 | 4701 | set_par_threads(n_workers); |
ysr@777 | 4702 | G1ParTask g1_par_task(this, n_workers, _task_queues); |
ysr@777 | 4703 | |
ysr@777 | 4704 | init_for_evac_failure(NULL); |
ysr@777 | 4705 | |
ysr@777 | 4706 | rem_set()->prepare_for_younger_refs_iterate(true); |
iveresov@1051 | 4707 | |
iveresov@1051 | 4708 | assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty"); |
ysr@777 | 4709 | double start_par = os::elapsedTime(); |
jmasa@2188 | 4710 | if (G1CollectedHeap::use_parallel_gc_threads()) { |
ysr@777 | 4711 | // The individual threads will set their evac-failure closures. |
jrose@1424 | 4712 | StrongRootsScope srs(this); |
jcoomes@2064 | 4713 | if (ParallelGCVerbose) G1ParScanThreadState::print_termination_stats_hdr(); |
ysr@777 | 4714 | workers()->run_task(&g1_par_task); |
ysr@777 | 4715 | } else { |
jrose@1424 | 4716 | StrongRootsScope srs(this); |
ysr@777 | 4717 | g1_par_task.work(0); |
ysr@777 | 4718 | } |
ysr@777 | 4719 | |
ysr@777 | 4720 | double par_time = (os::elapsedTime() - start_par) * 1000.0; |
ysr@777 | 4721 | g1_policy()->record_par_time(par_time); |
ysr@777 | 4722 | set_par_threads(0); |
ysr@777 | 4723 | // Is this the right thing to do here? We don't save marks |
ysr@777 | 4724 | // on individual heap regions when we allocate from |
ysr@777 | 4725 | // them in parallel, so this seems like the correct place for this. |
apetrusenko@980 | 4726 | retire_all_alloc_regions(); |
johnc@2316 | 4727 | |
johnc@2316 | 4728 | // Weak root processing. |
johnc@2316 | 4729 | // Note: when JSR 292 is enabled and code blobs can contain |
johnc@2316 | 4730 | // non-perm oops then we will need to process the code blobs |
johnc@2316 | 4731 | // here too. |
ysr@777 | 4732 | { |
ysr@777 | 4733 | G1IsAliveClosure is_alive(this); |
ysr@777 | 4734 | G1KeepAliveClosure keep_alive(this); |
ysr@777 | 4735 | JNIHandles::weak_oops_do(&is_alive, &keep_alive); |
ysr@777 | 4736 | } |
apetrusenko@1375 | 4737 | release_gc_alloc_regions(false /* totally */); |
ysr@777 | 4738 | g1_rem_set()->cleanup_after_oops_into_collection_set_do(); |
iveresov@1051 | 4739 | |
johnc@1324 | 4740 | concurrent_g1_refine()->clear_hot_cache(); |
ysr@777 | 4741 | concurrent_g1_refine()->set_use_cache(true); |
ysr@777 | 4742 | |
ysr@777 | 4743 | finalize_for_evac_failure(); |
ysr@777 | 4744 | |
ysr@777 | 4745 | // Must do this before removing self-forwarding pointers, which clears |
ysr@777 | 4746 | // the per-region evac-failure flags. |
ysr@777 | 4747 | concurrent_mark()->complete_marking_in_collection_set(); |
ysr@777 | 4748 | |
ysr@777 | 4749 | if (evacuation_failed()) { |
ysr@777 | 4750 | remove_self_forwarding_pointers(); |
ysr@777 | 4751 | if (PrintGCDetails) { |
tonyp@2074 | 4752 | gclog_or_tty->print(" (to-space overflow)"); |
ysr@777 | 4753 | } else if (PrintGC) { |
ysr@777 | 4754 | gclog_or_tty->print("--"); |
ysr@777 | 4755 | } |
ysr@777 | 4756 | } |
ysr@777 | 4757 | |
iveresov@1051 | 4758 | if (G1DeferredRSUpdate) { |
iveresov@1051 | 4759 | RedirtyLoggedCardTableEntryFastClosure redirty; |
iveresov@1051 | 4760 | dirty_card_queue_set().set_closure(&redirty); |
iveresov@1051 | 4761 | dirty_card_queue_set().apply_closure_to_all_completed_buffers(); |
iveresov@1546 | 4762 | |
iveresov@1546 | 4763 | DirtyCardQueueSet& dcq = JavaThread::dirty_card_queue_set(); |
iveresov@1546 | 4764 | dcq.merge_bufferlists(&dirty_card_queue_set()); |
iveresov@1051 | 4765 | assert(dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed"); |
iveresov@1051 | 4766 | } |
ysr@777 | 4767 | COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); |
ysr@777 | 4768 | } |
ysr@777 | 4769 | |
tonyp@2493 | 4770 | void G1CollectedHeap::free_region_if_empty(HeapRegion* hr, |
tonyp@2472 | 4771 | size_t* pre_used, |
tonyp@2472 | 4772 | FreeRegionList* free_list, |
tonyp@2472 | 4773 | HumongousRegionSet* humongous_proxy_set, |
tonyp@2493 | 4774 | HRRSCleanupTask* hrrs_cleanup_task, |
tonyp@2472 | 4775 | bool par) { |
tonyp@2472 | 4776 | if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young()) { |
tonyp@2472 | 4777 | if (hr->isHumongous()) { |
tonyp@2472 | 4778 | assert(hr->startsHumongous(), "we should only see starts humongous"); |
tonyp@2472 | 4779 | free_humongous_region(hr, pre_used, free_list, humongous_proxy_set, par); |
tonyp@2472 | 4780 | } else { |
tonyp@2472 | 4781 | free_region(hr, pre_used, free_list, par); |
tonyp@2472 | 4782 | } |
tonyp@2493 | 4783 | } else { |
tonyp@2493 | 4784 | hr->rem_set()->do_cleanup_work(hrrs_cleanup_task); |
tonyp@2472 | 4785 | } |
ysr@777 | 4786 | } |
ysr@777 | 4787 | |
tonyp@2472 | 4788 | void G1CollectedHeap::free_region(HeapRegion* hr, |
tonyp@2472 | 4789 | size_t* pre_used, |
tonyp@2472 | 4790 | FreeRegionList* free_list, |
ysr@777 | 4791 | bool par) { |
tonyp@2472 | 4792 | assert(!hr->isHumongous(), "this is only for non-humongous regions"); |
tonyp@2472 | 4793 | assert(!hr->is_empty(), "the region should not be empty"); |
tonyp@2472 | 4794 | assert(free_list != NULL, "pre-condition"); |
tonyp@2472 | 4795 | |
tonyp@2472 | 4796 | *pre_used += hr->used(); |
tonyp@2472 | 4797 | hr->hr_clear(par, true /* clear_space */); |
tonyp@2714 | 4798 | free_list->add_as_head(hr); |
tonyp@2472 | 4799 | } |
tonyp@2472 | 4800 | |
tonyp@2472 | 4801 | void G1CollectedHeap::free_humongous_region(HeapRegion* hr, |
tonyp@2472 | 4802 | size_t* pre_used, |
tonyp@2472 | 4803 | FreeRegionList* free_list, |
tonyp@2472 | 4804 | HumongousRegionSet* humongous_proxy_set, |
tonyp@2472 | 4805 | bool par) { |
tonyp@2472 | 4806 | assert(hr->startsHumongous(), "this is only for starts humongous regions"); |
tonyp@2472 | 4807 | assert(free_list != NULL, "pre-condition"); |
tonyp@2472 | 4808 | assert(humongous_proxy_set != NULL, "pre-condition"); |
tonyp@2472 | 4809 | |
tonyp@2472 | 4810 | size_t hr_used = hr->used(); |
tonyp@2472 | 4811 | size_t hr_capacity = hr->capacity(); |
tonyp@2472 | 4812 | size_t hr_pre_used = 0; |
tonyp@2472 | 4813 | _humongous_set.remove_with_proxy(hr, humongous_proxy_set); |
tonyp@2472 | 4814 | hr->set_notHumongous(); |
tonyp@2472 | 4815 | free_region(hr, &hr_pre_used, free_list, par); |
tonyp@2472 | 4816 | |
tonyp@2472 | 4817 | int i = hr->hrs_index() + 1; |
tonyp@2472 | 4818 | size_t num = 1; |
tonyp@2472 | 4819 | while ((size_t) i < n_regions()) { |
tonyp@2472 | 4820 | HeapRegion* curr_hr = _hrs->at(i); |
tonyp@2472 | 4821 | if (!curr_hr->continuesHumongous()) { |
tonyp@2472 | 4822 | break; |
ysr@777 | 4823 | } |
tonyp@2472 | 4824 | curr_hr->set_notHumongous(); |
tonyp@2472 | 4825 | free_region(curr_hr, &hr_pre_used, free_list, par); |
tonyp@2472 | 4826 | num += 1; |
tonyp@2472 | 4827 | i += 1; |
tonyp@2472 | 4828 | } |
tonyp@2472 | 4829 | assert(hr_pre_used == hr_used, |
tonyp@2472 | 4830 | err_msg("hr_pre_used: "SIZE_FORMAT" and hr_used: "SIZE_FORMAT" " |
tonyp@2472 | 4831 | "should be the same", hr_pre_used, hr_used)); |
tonyp@2472 | 4832 | *pre_used += hr_pre_used; |
ysr@777 | 4833 | } |
ysr@777 | 4834 | |
tonyp@2472 | 4835 | void G1CollectedHeap::update_sets_after_freeing_regions(size_t pre_used, |
tonyp@2472 | 4836 | FreeRegionList* free_list, |
tonyp@2472 | 4837 | HumongousRegionSet* humongous_proxy_set, |
tonyp@2472 | 4838 | bool par) { |
tonyp@2472 | 4839 | if (pre_used > 0) { |
tonyp@2472 | 4840 | Mutex* lock = (par) ? ParGCRareEvent_lock : NULL; |
ysr@777 | 4841 | MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag); |
tonyp@2472 | 4842 | assert(_summary_bytes_used >= pre_used, |
tonyp@2472 | 4843 | err_msg("invariant: _summary_bytes_used: "SIZE_FORMAT" " |
tonyp@2472 | 4844 | "should be >= pre_used: "SIZE_FORMAT, |
tonyp@2472 | 4845 | _summary_bytes_used, pre_used)); |
ysr@777 | 4846 | _summary_bytes_used -= pre_used; |
tonyp@2472 | 4847 | } |
tonyp@2472 | 4848 | if (free_list != NULL && !free_list->is_empty()) { |
tonyp@2472 | 4849 | MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag); |
tonyp@2714 | 4850 | _free_list.add_as_head(free_list); |
tonyp@2472 | 4851 | } |
tonyp@2472 | 4852 | if (humongous_proxy_set != NULL && !humongous_proxy_set->is_empty()) { |
tonyp@2472 | 4853 | MutexLockerEx x(OldSets_lock, Mutex::_no_safepoint_check_flag); |
tonyp@2472 | 4854 | _humongous_set.update_from_proxy(humongous_proxy_set); |
ysr@777 | 4855 | } |
ysr@777 | 4856 | } |
ysr@777 | 4857 | |
ysr@777 | 4858 | void G1CollectedHeap::dirtyCardsForYoungRegions(CardTableModRefBS* ct_bs, HeapRegion* list) { |
ysr@777 | 4859 | while (list != NULL) { |
ysr@777 | 4860 | guarantee( list->is_young(), "invariant" ); |
ysr@777 | 4861 | |
ysr@777 | 4862 | HeapWord* bottom = list->bottom(); |
ysr@777 | 4863 | HeapWord* end = list->end(); |
ysr@777 | 4864 | MemRegion mr(bottom, end); |
ysr@777 | 4865 | ct_bs->dirty(mr); |
ysr@777 | 4866 | |
ysr@777 | 4867 | list = list->get_next_young_region(); |
ysr@777 | 4868 | } |
ysr@777 | 4869 | } |
ysr@777 | 4870 | |
apetrusenko@1231 | 4871 | |
apetrusenko@1231 | 4872 | class G1ParCleanupCTTask : public AbstractGangTask { |
apetrusenko@1231 | 4873 | CardTableModRefBS* _ct_bs; |
apetrusenko@1231 | 4874 | G1CollectedHeap* _g1h; |
apetrusenko@1375 | 4875 | HeapRegion* volatile _su_head; |
apetrusenko@1231 | 4876 | public: |
apetrusenko@1231 | 4877 | G1ParCleanupCTTask(CardTableModRefBS* ct_bs, |
apetrusenko@1375 | 4878 | G1CollectedHeap* g1h, |
apetrusenko@1375 | 4879 | HeapRegion* survivor_list) : |
apetrusenko@1231 | 4880 | AbstractGangTask("G1 Par Cleanup CT Task"), |
apetrusenko@1231 | 4881 | _ct_bs(ct_bs), |
apetrusenko@1375 | 4882 | _g1h(g1h), |
apetrusenko@1375 | 4883 | _su_head(survivor_list) |
apetrusenko@1231 | 4884 | { } |
apetrusenko@1231 | 4885 | |
apetrusenko@1231 | 4886 | void work(int i) { |
apetrusenko@1231 | 4887 | HeapRegion* r; |
apetrusenko@1231 | 4888 | while (r = _g1h->pop_dirty_cards_region()) { |
apetrusenko@1231 | 4889 | clear_cards(r); |
apetrusenko@1231 | 4890 | } |
johnc@1829 | 4891 | // Redirty the cards of the survivor regions. |
apetrusenko@1375 | 4892 | dirty_list(&this->_su_head); |
apetrusenko@1375 | 4893 | } |
apetrusenko@1375 | 4894 | |
apetrusenko@1231 | 4895 | void clear_cards(HeapRegion* r) { |
johnc@1829 | 4896 | // Cards for Survivor regions will be dirtied later. |
johnc@1829 | 4897 | if (!r->is_survivor()) { |
apetrusenko@1231 | 4898 | _ct_bs->clear(MemRegion(r->bottom(), r->end())); |
apetrusenko@1231 | 4899 | } |
apetrusenko@1231 | 4900 | } |
apetrusenko@1375 | 4901 | |
apetrusenko@1375 | 4902 | void dirty_list(HeapRegion* volatile * head_ptr) { |
apetrusenko@1375 | 4903 | HeapRegion* head; |
apetrusenko@1375 | 4904 | do { |
apetrusenko@1375 | 4905 | // Pop region off the list. |
apetrusenko@1375 | 4906 | head = *head_ptr; |
apetrusenko@1375 | 4907 | if (head != NULL) { |
apetrusenko@1375 | 4908 | HeapRegion* r = (HeapRegion*) |
apetrusenko@1375 | 4909 | Atomic::cmpxchg_ptr(head->get_next_young_region(), head_ptr, head); |
apetrusenko@1375 | 4910 | if (r == head) { |
apetrusenko@1375 | 4911 | assert(!r->isHumongous(), "Humongous regions shouldn't be on survivor list"); |
apetrusenko@1375 | 4912 | _ct_bs->dirty(MemRegion(r->bottom(), r->end())); |
apetrusenko@1375 | 4913 | } |
apetrusenko@1375 | 4914 | } |
apetrusenko@1375 | 4915 | } while (*head_ptr != NULL); |
apetrusenko@1375 | 4916 | } |
apetrusenko@1231 | 4917 | }; |
apetrusenko@1231 | 4918 | |
apetrusenko@1231 | 4919 | |
apetrusenko@1375 | 4920 | #ifndef PRODUCT |
apetrusenko@1375 | 4921 | class G1VerifyCardTableCleanup: public HeapRegionClosure { |
apetrusenko@1375 | 4922 | CardTableModRefBS* _ct_bs; |
apetrusenko@1375 | 4923 | public: |
apetrusenko@1375 | 4924 | G1VerifyCardTableCleanup(CardTableModRefBS* ct_bs) |
tonyp@2715 | 4925 | : _ct_bs(ct_bs) { } |
tonyp@2715 | 4926 | virtual bool doHeapRegion(HeapRegion* r) { |
apetrusenko@1375 | 4927 | MemRegion mr(r->bottom(), r->end()); |
johnc@1829 | 4928 | if (r->is_survivor()) { |
apetrusenko@1375 | 4929 | _ct_bs->verify_dirty_region(mr); |
apetrusenko@1375 | 4930 | } else { |
apetrusenko@1375 | 4931 | _ct_bs->verify_clean_region(mr); |
apetrusenko@1375 | 4932 | } |
apetrusenko@1375 | 4933 | return false; |
apetrusenko@1375 | 4934 | } |
apetrusenko@1375 | 4935 | }; |
tonyp@2715 | 4936 | |
tonyp@2715 | 4937 | void G1CollectedHeap::verify_dirty_young_list(HeapRegion* head) { |
tonyp@2715 | 4938 | CardTableModRefBS* ct_bs = (CardTableModRefBS*) (barrier_set()); |
tonyp@2715 | 4939 | for (HeapRegion* hr = head; hr != NULL; hr = hr->get_next_young_region()) { |
tonyp@2715 | 4940 | // We cannot guarantee that [bottom(),end()] is dirty. Threads |
tonyp@2715 | 4941 | // dirty allocated blocks as they allocate them. The thread that |
tonyp@2715 | 4942 | // retires each region and replaces it with a new one will do a |
tonyp@2715 | 4943 | // maximal allocation to fill in [pre_dummy_top(),end()] but will |
tonyp@2715 | 4944 | // not dirty that area (one less thing to have to do while holding |
tonyp@2715 | 4945 | // a lock). So we can only verify that [bottom(),pre_dummy_top()] |
tonyp@2715 | 4946 | // is dirty. Also note that verify_dirty_region() requires |
tonyp@2715 | 4947 | // mr.start() and mr.end() to be card aligned and pre_dummy_top() |
tonyp@2715 | 4948 | // is not guaranteed to be. |
tonyp@2715 | 4949 | MemRegion mr(hr->bottom(), |
tonyp@2715 | 4950 | ct_bs->align_to_card_boundary(hr->pre_dummy_top())); |
tonyp@2715 | 4951 | ct_bs->verify_dirty_region(mr); |
tonyp@2715 | 4952 | } |
tonyp@2715 | 4953 | } |
tonyp@2715 | 4954 | |
tonyp@2715 | 4955 | void G1CollectedHeap::verify_dirty_young_regions() { |
tonyp@2715 | 4956 | verify_dirty_young_list(_young_list->first_region()); |
tonyp@2715 | 4957 | verify_dirty_young_list(_young_list->first_survivor_region()); |
tonyp@2715 | 4958 | } |
apetrusenko@1375 | 4959 | #endif |
apetrusenko@1375 | 4960 | |
ysr@777 | 4961 | void G1CollectedHeap::cleanUpCardTable() { |
ysr@777 | 4962 | CardTableModRefBS* ct_bs = (CardTableModRefBS*) (barrier_set()); |
ysr@777 | 4963 | double start = os::elapsedTime(); |
ysr@777 | 4964 | |
apetrusenko@1231 | 4965 | // Iterate over the dirty cards region list. |
apetrusenko@1375 | 4966 | G1ParCleanupCTTask cleanup_task(ct_bs, this, |
apetrusenko@1375 | 4967 | _young_list->first_survivor_region()); |
johnc@1829 | 4968 | |
apetrusenko@1231 | 4969 | if (ParallelGCThreads > 0) { |
apetrusenko@1231 | 4970 | set_par_threads(workers()->total_workers()); |
apetrusenko@1231 | 4971 | workers()->run_task(&cleanup_task); |
apetrusenko@1231 | 4972 | set_par_threads(0); |
apetrusenko@1231 | 4973 | } else { |
apetrusenko@1231 | 4974 | while (_dirty_cards_region_list) { |
apetrusenko@1231 | 4975 | HeapRegion* r = _dirty_cards_region_list; |
apetrusenko@1231 | 4976 | cleanup_task.clear_cards(r); |
apetrusenko@1231 | 4977 | _dirty_cards_region_list = r->get_next_dirty_cards_region(); |
apetrusenko@1231 | 4978 | if (_dirty_cards_region_list == r) { |
apetrusenko@1231 | 4979 | // The last region. |
apetrusenko@1231 | 4980 | _dirty_cards_region_list = NULL; |
apetrusenko@1231 | 4981 | } |
apetrusenko@1231 | 4982 | r->set_next_dirty_cards_region(NULL); |
apetrusenko@1231 | 4983 | } |
johnc@1829 | 4984 | // now, redirty the cards of the survivor regions |
apetrusenko@1375 | 4985 | // (it seemed faster to do it this way, instead of iterating over |
apetrusenko@1375 | 4986 | // all regions and then clearing / dirtying as appropriate) |
apetrusenko@1375 | 4987 | dirtyCardsForYoungRegions(ct_bs, _young_list->first_survivor_region()); |
apetrusenko@1375 | 4988 | } |
johnc@1829 | 4989 | |
ysr@777 | 4990 | double elapsed = os::elapsedTime() - start; |
ysr@777 | 4991 | g1_policy()->record_clear_ct_time( elapsed * 1000.0); |
apetrusenko@1375 | 4992 | #ifndef PRODUCT |
apetrusenko@1375 | 4993 | if (G1VerifyCTCleanup || VerifyAfterGC) { |
apetrusenko@1375 | 4994 | G1VerifyCardTableCleanup cleanup_verifier(ct_bs); |
apetrusenko@1375 | 4995 | heap_region_iterate(&cleanup_verifier); |
apetrusenko@1375 | 4996 | } |
apetrusenko@1375 | 4997 | #endif |
ysr@777 | 4998 | } |
ysr@777 | 4999 | |
ysr@777 | 5000 | void G1CollectedHeap::free_collection_set(HeapRegion* cs_head) { |
tonyp@2472 | 5001 | size_t pre_used = 0; |
tonyp@2472 | 5002 | FreeRegionList local_free_list("Local List for CSet Freeing"); |
tonyp@2472 | 5003 | |
ysr@777 | 5004 | double young_time_ms = 0.0; |
ysr@777 | 5005 | double non_young_time_ms = 0.0; |
ysr@777 | 5006 | |
johnc@1829 | 5007 | // Since the collection set is a superset of the the young list, |
johnc@1829 | 5008 | // all we need to do to clear the young list is clear its |
johnc@1829 | 5009 | // head and length, and unlink any young regions in the code below |
johnc@1829 | 5010 | _young_list->clear(); |
johnc@1829 | 5011 | |
ysr@777 | 5012 | G1CollectorPolicy* policy = g1_policy(); |
ysr@777 | 5013 | |
ysr@777 | 5014 | double start_sec = os::elapsedTime(); |
ysr@777 | 5015 | bool non_young = true; |
ysr@777 | 5016 | |
ysr@777 | 5017 | HeapRegion* cur = cs_head; |
ysr@777 | 5018 | int age_bound = -1; |
ysr@777 | 5019 | size_t rs_lengths = 0; |
ysr@777 | 5020 | |
ysr@777 | 5021 | while (cur != NULL) { |
tonyp@2643 | 5022 | assert(!is_on_master_free_list(cur), "sanity"); |
tonyp@2472 | 5023 | |
ysr@777 | 5024 | if (non_young) { |
ysr@777 | 5025 | if (cur->is_young()) { |
ysr@777 | 5026 | double end_sec = os::elapsedTime(); |
ysr@777 | 5027 | double elapsed_ms = (end_sec - start_sec) * 1000.0; |
ysr@777 | 5028 | non_young_time_ms += elapsed_ms; |
ysr@777 | 5029 | |
ysr@777 | 5030 | start_sec = os::elapsedTime(); |
ysr@777 | 5031 | non_young = false; |
ysr@777 | 5032 | } |
ysr@777 | 5033 | } else { |
tonyp@2472 | 5034 | double end_sec = os::elapsedTime(); |
tonyp@2472 | 5035 | double elapsed_ms = (end_sec - start_sec) * 1000.0; |
tonyp@2472 | 5036 | young_time_ms += elapsed_ms; |
tonyp@2472 | 5037 | |
tonyp@2472 | 5038 | start_sec = os::elapsedTime(); |
tonyp@2472 | 5039 | non_young = true; |
ysr@777 | 5040 | } |
ysr@777 | 5041 | |
ysr@777 | 5042 | rs_lengths += cur->rem_set()->occupied(); |
ysr@777 | 5043 | |
ysr@777 | 5044 | HeapRegion* next = cur->next_in_collection_set(); |
ysr@777 | 5045 | assert(cur->in_collection_set(), "bad CS"); |
ysr@777 | 5046 | cur->set_next_in_collection_set(NULL); |
ysr@777 | 5047 | cur->set_in_collection_set(false); |
ysr@777 | 5048 | |
ysr@777 | 5049 | if (cur->is_young()) { |
ysr@777 | 5050 | int index = cur->young_index_in_cset(); |
ysr@777 | 5051 | guarantee( index != -1, "invariant" ); |
ysr@777 | 5052 | guarantee( (size_t)index < policy->young_cset_length(), "invariant" ); |
ysr@777 | 5053 | size_t words_survived = _surviving_young_words[index]; |
ysr@777 | 5054 | cur->record_surv_words_in_group(words_survived); |
johnc@1829 | 5055 | |
johnc@1829 | 5056 | // At this point the we have 'popped' cur from the collection set |
johnc@1829 | 5057 | // (linked via next_in_collection_set()) but it is still in the |
johnc@1829 | 5058 | // young list (linked via next_young_region()). Clear the |
johnc@1829 | 5059 | // _next_young_region field. |
johnc@1829 | 5060 | cur->set_next_young_region(NULL); |
ysr@777 | 5061 | } else { |
ysr@777 | 5062 | int index = cur->young_index_in_cset(); |
ysr@777 | 5063 | guarantee( index == -1, "invariant" ); |
ysr@777 | 5064 | } |
ysr@777 | 5065 | |
ysr@777 | 5066 | assert( (cur->is_young() && cur->young_index_in_cset() > -1) || |
ysr@777 | 5067 | (!cur->is_young() && cur->young_index_in_cset() == -1), |
ysr@777 | 5068 | "invariant" ); |
ysr@777 | 5069 | |
ysr@777 | 5070 | if (!cur->evacuation_failed()) { |
ysr@777 | 5071 | // And the region is empty. |
tonyp@2472 | 5072 | assert(!cur->is_empty(), "Should not have empty regions in a CS."); |
tonyp@2472 | 5073 | free_region(cur, &pre_used, &local_free_list, false /* par */); |
ysr@777 | 5074 | } else { |
ysr@777 | 5075 | cur->uninstall_surv_rate_group(); |
ysr@777 | 5076 | if (cur->is_young()) |
ysr@777 | 5077 | cur->set_young_index_in_cset(-1); |
ysr@777 | 5078 | cur->set_not_young(); |
ysr@777 | 5079 | cur->set_evacuation_failed(false); |
ysr@777 | 5080 | } |
ysr@777 | 5081 | cur = next; |
ysr@777 | 5082 | } |
ysr@777 | 5083 | |
ysr@777 | 5084 | policy->record_max_rs_lengths(rs_lengths); |
ysr@777 | 5085 | policy->cset_regions_freed(); |
ysr@777 | 5086 | |
ysr@777 | 5087 | double end_sec = os::elapsedTime(); |
ysr@777 | 5088 | double elapsed_ms = (end_sec - start_sec) * 1000.0; |
ysr@777 | 5089 | if (non_young) |
ysr@777 | 5090 | non_young_time_ms += elapsed_ms; |
ysr@777 | 5091 | else |
ysr@777 | 5092 | young_time_ms += elapsed_ms; |
ysr@777 | 5093 | |
tonyp@2472 | 5094 | update_sets_after_freeing_regions(pre_used, &local_free_list, |
tonyp@2472 | 5095 | NULL /* humongous_proxy_set */, |
tonyp@2472 | 5096 | false /* par */); |
ysr@777 | 5097 | policy->record_young_free_cset_time_ms(young_time_ms); |
ysr@777 | 5098 | policy->record_non_young_free_cset_time_ms(non_young_time_ms); |
ysr@777 | 5099 | } |
ysr@777 | 5100 | |
johnc@1829 | 5101 | // This routine is similar to the above but does not record |
johnc@1829 | 5102 | // any policy statistics or update free lists; we are abandoning |
johnc@1829 | 5103 | // the current incremental collection set in preparation of a |
johnc@1829 | 5104 | // full collection. After the full GC we will start to build up |
johnc@1829 | 5105 | // the incremental collection set again. |
johnc@1829 | 5106 | // This is only called when we're doing a full collection |
johnc@1829 | 5107 | // and is immediately followed by the tearing down of the young list. |
johnc@1829 | 5108 | |
johnc@1829 | 5109 | void G1CollectedHeap::abandon_collection_set(HeapRegion* cs_head) { |
johnc@1829 | 5110 | HeapRegion* cur = cs_head; |
johnc@1829 | 5111 | |
johnc@1829 | 5112 | while (cur != NULL) { |
johnc@1829 | 5113 | HeapRegion* next = cur->next_in_collection_set(); |
johnc@1829 | 5114 | assert(cur->in_collection_set(), "bad CS"); |
johnc@1829 | 5115 | cur->set_next_in_collection_set(NULL); |
johnc@1829 | 5116 | cur->set_in_collection_set(false); |
johnc@1829 | 5117 | cur->set_young_index_in_cset(-1); |
johnc@1829 | 5118 | cur = next; |
johnc@1829 | 5119 | } |
johnc@1829 | 5120 | } |
johnc@1829 | 5121 | |
tonyp@2472 | 5122 | void G1CollectedHeap::set_free_regions_coming() { |
tonyp@2472 | 5123 | if (G1ConcRegionFreeingVerbose) { |
tonyp@2472 | 5124 | gclog_or_tty->print_cr("G1ConcRegionFreeing [cm thread] : " |
tonyp@2472 | 5125 | "setting free regions coming"); |
tonyp@2472 | 5126 | } |
tonyp@2472 | 5127 | |
tonyp@2472 | 5128 | assert(!free_regions_coming(), "pre-condition"); |
tonyp@2472 | 5129 | _free_regions_coming = true; |
tonyp@2472 | 5130 | } |
tonyp@2472 | 5131 | |
tonyp@2472 | 5132 | void G1CollectedHeap::reset_free_regions_coming() { |
tonyp@2472 | 5133 | { |
tonyp@2472 | 5134 | assert(free_regions_coming(), "pre-condition"); |
tonyp@2472 | 5135 | MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag); |
tonyp@2472 | 5136 | _free_regions_coming = false; |
tonyp@2472 | 5137 | SecondaryFreeList_lock->notify_all(); |
tonyp@2472 | 5138 | } |
tonyp@2472 | 5139 | |
tonyp@2472 | 5140 | if (G1ConcRegionFreeingVerbose) { |
tonyp@2472 | 5141 | gclog_or_tty->print_cr("G1ConcRegionFreeing [cm thread] : " |
tonyp@2472 | 5142 | "reset free regions coming"); |
tonyp@2472 | 5143 | } |
tonyp@2472 | 5144 | } |
tonyp@2472 | 5145 | |
tonyp@2472 | 5146 | void G1CollectedHeap::wait_while_free_regions_coming() { |
tonyp@2472 | 5147 | // Most of the time we won't have to wait, so let's do a quick test |
tonyp@2472 | 5148 | // first before we take the lock. |
tonyp@2472 | 5149 | if (!free_regions_coming()) { |
tonyp@2472 | 5150 | return; |
tonyp@2472 | 5151 | } |
tonyp@2472 | 5152 | |
tonyp@2472 | 5153 | if (G1ConcRegionFreeingVerbose) { |
tonyp@2472 | 5154 | gclog_or_tty->print_cr("G1ConcRegionFreeing [other] : " |
tonyp@2472 | 5155 | "waiting for free regions"); |
tonyp@2472 | 5156 | } |
tonyp@2472 | 5157 | |
tonyp@2472 | 5158 | { |
tonyp@2472 | 5159 | MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag); |
tonyp@2472 | 5160 | while (free_regions_coming()) { |
tonyp@2472 | 5161 | SecondaryFreeList_lock->wait(Mutex::_no_safepoint_check_flag); |
ysr@777 | 5162 | } |
ysr@777 | 5163 | } |
tonyp@2472 | 5164 | |
tonyp@2472 | 5165 | if (G1ConcRegionFreeingVerbose) { |
tonyp@2472 | 5166 | gclog_or_tty->print_cr("G1ConcRegionFreeing [other] : " |
tonyp@2472 | 5167 | "done waiting for free regions"); |
tonyp@2472 | 5168 | } |
ysr@777 | 5169 | } |
ysr@777 | 5170 | |
ysr@777 | 5171 | size_t G1CollectedHeap::n_regions() { |
ysr@777 | 5172 | return _hrs->length(); |
ysr@777 | 5173 | } |
ysr@777 | 5174 | |
ysr@777 | 5175 | size_t G1CollectedHeap::max_regions() { |
ysr@777 | 5176 | return |
johnc@2504 | 5177 | (size_t)align_size_up(max_capacity(), HeapRegion::GrainBytes) / |
ysr@777 | 5178 | HeapRegion::GrainBytes; |
ysr@777 | 5179 | } |
ysr@777 | 5180 | |
ysr@777 | 5181 | void G1CollectedHeap::set_region_short_lived_locked(HeapRegion* hr) { |
ysr@777 | 5182 | assert(heap_lock_held_for_gc(), |
ysr@777 | 5183 | "the heap lock should already be held by or for this thread"); |
ysr@777 | 5184 | _young_list->push_region(hr); |
ysr@777 | 5185 | g1_policy()->set_region_short_lived(hr); |
ysr@777 | 5186 | } |
ysr@777 | 5187 | |
ysr@777 | 5188 | class NoYoungRegionsClosure: public HeapRegionClosure { |
ysr@777 | 5189 | private: |
ysr@777 | 5190 | bool _success; |
ysr@777 | 5191 | public: |
ysr@777 | 5192 | NoYoungRegionsClosure() : _success(true) { } |
ysr@777 | 5193 | bool doHeapRegion(HeapRegion* r) { |
ysr@777 | 5194 | if (r->is_young()) { |
ysr@777 | 5195 | gclog_or_tty->print_cr("Region ["PTR_FORMAT", "PTR_FORMAT") tagged as young", |
ysr@777 | 5196 | r->bottom(), r->end()); |
ysr@777 | 5197 | _success = false; |
ysr@777 | 5198 | } |
ysr@777 | 5199 | return false; |
ysr@777 | 5200 | } |
ysr@777 | 5201 | bool success() { return _success; } |
ysr@777 | 5202 | }; |
ysr@777 | 5203 | |
johnc@1829 | 5204 | bool G1CollectedHeap::check_young_list_empty(bool check_heap, bool check_sample) { |
johnc@1829 | 5205 | bool ret = _young_list->check_list_empty(check_sample); |
johnc@1829 | 5206 | |
johnc@1829 | 5207 | if (check_heap) { |
ysr@777 | 5208 | NoYoungRegionsClosure closure; |
ysr@777 | 5209 | heap_region_iterate(&closure); |
ysr@777 | 5210 | ret = ret && closure.success(); |
ysr@777 | 5211 | } |
ysr@777 | 5212 | |
ysr@777 | 5213 | return ret; |
ysr@777 | 5214 | } |
ysr@777 | 5215 | |
ysr@777 | 5216 | void G1CollectedHeap::empty_young_list() { |
ysr@777 | 5217 | assert(heap_lock_held_for_gc(), |
ysr@777 | 5218 | "the heap lock should already be held by or for this thread"); |
ysr@777 | 5219 | assert(g1_policy()->in_young_gc_mode(), "should be in young GC mode"); |
ysr@777 | 5220 | |
ysr@777 | 5221 | _young_list->empty_list(); |
ysr@777 | 5222 | } |
ysr@777 | 5223 | |
ysr@777 | 5224 | bool G1CollectedHeap::all_alloc_regions_no_allocs_since_save_marks() { |
ysr@777 | 5225 | bool no_allocs = true; |
ysr@777 | 5226 | for (int ap = 0; ap < GCAllocPurposeCount && no_allocs; ++ap) { |
ysr@777 | 5227 | HeapRegion* r = _gc_alloc_regions[ap]; |
ysr@777 | 5228 | no_allocs = r == NULL || r->saved_mark_at_top(); |
ysr@777 | 5229 | } |
ysr@777 | 5230 | return no_allocs; |
ysr@777 | 5231 | } |
ysr@777 | 5232 | |
apetrusenko@980 | 5233 | void G1CollectedHeap::retire_all_alloc_regions() { |
ysr@777 | 5234 | for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { |
ysr@777 | 5235 | HeapRegion* r = _gc_alloc_regions[ap]; |
ysr@777 | 5236 | if (r != NULL) { |
ysr@777 | 5237 | // Check for aliases. |
ysr@777 | 5238 | bool has_processed_alias = false; |
ysr@777 | 5239 | for (int i = 0; i < ap; ++i) { |
ysr@777 | 5240 | if (_gc_alloc_regions[i] == r) { |
ysr@777 | 5241 | has_processed_alias = true; |
ysr@777 | 5242 | break; |
ysr@777 | 5243 | } |
ysr@777 | 5244 | } |
ysr@777 | 5245 | if (!has_processed_alias) { |
apetrusenko@980 | 5246 | retire_alloc_region(r, false /* par */); |
ysr@777 | 5247 | } |
ysr@777 | 5248 | } |
ysr@777 | 5249 | } |
ysr@777 | 5250 | } |
ysr@777 | 5251 | |
ysr@777 | 5252 | // Done at the start of full GC. |
ysr@777 | 5253 | void G1CollectedHeap::tear_down_region_lists() { |
tonyp@2472 | 5254 | _free_list.remove_all(); |
ysr@777 | 5255 | } |
ysr@777 | 5256 | |
ysr@777 | 5257 | class RegionResetter: public HeapRegionClosure { |
tonyp@2472 | 5258 | G1CollectedHeap* _g1h; |
tonyp@2472 | 5259 | FreeRegionList _local_free_list; |
tonyp@2472 | 5260 | |
ysr@777 | 5261 | public: |
tonyp@2472 | 5262 | RegionResetter() : _g1h(G1CollectedHeap::heap()), |
tonyp@2472 | 5263 | _local_free_list("Local Free List for RegionResetter") { } |
tonyp@2472 | 5264 | |
ysr@777 | 5265 | bool doHeapRegion(HeapRegion* r) { |
ysr@777 | 5266 | if (r->continuesHumongous()) return false; |
ysr@777 | 5267 | if (r->top() > r->bottom()) { |
ysr@777 | 5268 | if (r->top() < r->end()) { |
ysr@777 | 5269 | Copy::fill_to_words(r->top(), |
ysr@777 | 5270 | pointer_delta(r->end(), r->top())); |
ysr@777 | 5271 | } |
ysr@777 | 5272 | } else { |
ysr@777 | 5273 | assert(r->is_empty(), "tautology"); |
tonyp@2472 | 5274 | _local_free_list.add_as_tail(r); |
ysr@777 | 5275 | } |
ysr@777 | 5276 | return false; |
ysr@777 | 5277 | } |
ysr@777 | 5278 | |
tonyp@2472 | 5279 | void update_free_lists() { |
tonyp@2472 | 5280 | _g1h->update_sets_after_freeing_regions(0, &_local_free_list, NULL, |
tonyp@2472 | 5281 | false /* par */); |
tonyp@2472 | 5282 | } |
ysr@777 | 5283 | }; |
ysr@777 | 5284 | |
ysr@777 | 5285 | // Done at the end of full GC. |
ysr@777 | 5286 | void G1CollectedHeap::rebuild_region_lists() { |
ysr@777 | 5287 | // This needs to go at the end of the full GC. |
ysr@777 | 5288 | RegionResetter rs; |
ysr@777 | 5289 | heap_region_iterate(&rs); |
tonyp@2472 | 5290 | rs.update_free_lists(); |
ysr@777 | 5291 | } |
ysr@777 | 5292 | |
ysr@777 | 5293 | void G1CollectedHeap::set_refine_cte_cl_concurrency(bool concurrent) { |
ysr@777 | 5294 | _refine_cte_cl->set_concurrent(concurrent); |
ysr@777 | 5295 | } |
ysr@777 | 5296 | |
ysr@777 | 5297 | bool G1CollectedHeap::is_in_closed_subset(const void* p) const { |
ysr@777 | 5298 | HeapRegion* hr = heap_region_containing(p); |
ysr@777 | 5299 | if (hr == NULL) { |
ysr@777 | 5300 | return is_in_permanent(p); |
ysr@777 | 5301 | } else { |
ysr@777 | 5302 | return hr->is_in(p); |
ysr@777 | 5303 | } |
ysr@777 | 5304 | } |
tonyp@2472 | 5305 | |
tonyp@2715 | 5306 | HeapRegion* G1CollectedHeap::new_mutator_alloc_region(size_t word_size, |
tonyp@2715 | 5307 | bool force) { |
tonyp@2715 | 5308 | assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */); |
tonyp@2715 | 5309 | assert(!force || g1_policy()->can_expand_young_list(), |
tonyp@2715 | 5310 | "if force is true we should be able to expand the young list"); |
tonyp@2715 | 5311 | if (force || !g1_policy()->is_young_list_full()) { |
tonyp@2715 | 5312 | HeapRegion* new_alloc_region = new_region(word_size, |
tonyp@2715 | 5313 | false /* do_expand */); |
tonyp@2715 | 5314 | if (new_alloc_region != NULL) { |
tonyp@2715 | 5315 | g1_policy()->update_region_num(true /* next_is_young */); |
tonyp@2715 | 5316 | set_region_short_lived_locked(new_alloc_region); |
tonyp@2715 | 5317 | return new_alloc_region; |
tonyp@2715 | 5318 | } |
tonyp@2715 | 5319 | } |
tonyp@2715 | 5320 | return NULL; |
tonyp@2715 | 5321 | } |
tonyp@2715 | 5322 | |
tonyp@2715 | 5323 | void G1CollectedHeap::retire_mutator_alloc_region(HeapRegion* alloc_region, |
tonyp@2715 | 5324 | size_t allocated_bytes) { |
tonyp@2715 | 5325 | assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */); |
tonyp@2715 | 5326 | assert(alloc_region->is_young(), "all mutator alloc regions should be young"); |
tonyp@2715 | 5327 | |
tonyp@2715 | 5328 | g1_policy()->add_region_to_incremental_cset_lhs(alloc_region); |
tonyp@2715 | 5329 | _summary_bytes_used += allocated_bytes; |
tonyp@2715 | 5330 | } |
tonyp@2715 | 5331 | |
tonyp@2715 | 5332 | HeapRegion* MutatorAllocRegion::allocate_new_region(size_t word_size, |
tonyp@2715 | 5333 | bool force) { |
tonyp@2715 | 5334 | return _g1h->new_mutator_alloc_region(word_size, force); |
tonyp@2715 | 5335 | } |
tonyp@2715 | 5336 | |
tonyp@2715 | 5337 | void MutatorAllocRegion::retire_region(HeapRegion* alloc_region, |
tonyp@2715 | 5338 | size_t allocated_bytes) { |
tonyp@2715 | 5339 | _g1h->retire_mutator_alloc_region(alloc_region, allocated_bytes); |
tonyp@2715 | 5340 | } |
tonyp@2715 | 5341 | |
tonyp@2715 | 5342 | // Heap region set verification |
tonyp@2715 | 5343 | |
tonyp@2472 | 5344 | class VerifyRegionListsClosure : public HeapRegionClosure { |
tonyp@2472 | 5345 | private: |
tonyp@2472 | 5346 | HumongousRegionSet* _humongous_set; |
tonyp@2472 | 5347 | FreeRegionList* _free_list; |
tonyp@2472 | 5348 | size_t _region_count; |
tonyp@2472 | 5349 | |
tonyp@2472 | 5350 | public: |
tonyp@2472 | 5351 | VerifyRegionListsClosure(HumongousRegionSet* humongous_set, |
tonyp@2472 | 5352 | FreeRegionList* free_list) : |
tonyp@2472 | 5353 | _humongous_set(humongous_set), _free_list(free_list), |
tonyp@2472 | 5354 | _region_count(0) { } |
tonyp@2472 | 5355 | |
tonyp@2472 | 5356 | size_t region_count() { return _region_count; } |
tonyp@2472 | 5357 | |
tonyp@2472 | 5358 | bool doHeapRegion(HeapRegion* hr) { |
tonyp@2472 | 5359 | _region_count += 1; |
tonyp@2472 | 5360 | |
tonyp@2472 | 5361 | if (hr->continuesHumongous()) { |
tonyp@2472 | 5362 | return false; |
tonyp@2472 | 5363 | } |
tonyp@2472 | 5364 | |
tonyp@2472 | 5365 | if (hr->is_young()) { |
tonyp@2472 | 5366 | // TODO |
tonyp@2472 | 5367 | } else if (hr->startsHumongous()) { |
tonyp@2472 | 5368 | _humongous_set->verify_next_region(hr); |
tonyp@2472 | 5369 | } else if (hr->is_empty()) { |
tonyp@2472 | 5370 | _free_list->verify_next_region(hr); |
tonyp@2472 | 5371 | } |
tonyp@2472 | 5372 | return false; |
tonyp@2472 | 5373 | } |
tonyp@2472 | 5374 | }; |
tonyp@2472 | 5375 | |
tonyp@2472 | 5376 | void G1CollectedHeap::verify_region_sets() { |
tonyp@2472 | 5377 | assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */); |
tonyp@2472 | 5378 | |
tonyp@2472 | 5379 | // First, check the explicit lists. |
tonyp@2472 | 5380 | _free_list.verify(); |
tonyp@2472 | 5381 | { |
tonyp@2472 | 5382 | // Given that a concurrent operation might be adding regions to |
tonyp@2472 | 5383 | // the secondary free list we have to take the lock before |
tonyp@2472 | 5384 | // verifying it. |
tonyp@2472 | 5385 | MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag); |
tonyp@2472 | 5386 | _secondary_free_list.verify(); |
tonyp@2472 | 5387 | } |
tonyp@2472 | 5388 | _humongous_set.verify(); |
tonyp@2472 | 5389 | |
tonyp@2472 | 5390 | // If a concurrent region freeing operation is in progress it will |
tonyp@2472 | 5391 | // be difficult to correctly attributed any free regions we come |
tonyp@2472 | 5392 | // across to the correct free list given that they might belong to |
tonyp@2472 | 5393 | // one of several (free_list, secondary_free_list, any local lists, |
tonyp@2472 | 5394 | // etc.). So, if that's the case we will skip the rest of the |
tonyp@2472 | 5395 | // verification operation. Alternatively, waiting for the concurrent |
tonyp@2472 | 5396 | // operation to complete will have a non-trivial effect on the GC's |
tonyp@2472 | 5397 | // operation (no concurrent operation will last longer than the |
tonyp@2472 | 5398 | // interval between two calls to verification) and it might hide |
tonyp@2472 | 5399 | // any issues that we would like to catch during testing. |
tonyp@2472 | 5400 | if (free_regions_coming()) { |
tonyp@2472 | 5401 | return; |
tonyp@2472 | 5402 | } |
tonyp@2472 | 5403 | |
tonyp@2643 | 5404 | // Make sure we append the secondary_free_list on the free_list so |
tonyp@2643 | 5405 | // that all free regions we will come across can be safely |
tonyp@2643 | 5406 | // attributed to the free_list. |
tonyp@2643 | 5407 | append_secondary_free_list_if_not_empty_with_lock(); |
tonyp@2472 | 5408 | |
tonyp@2472 | 5409 | // Finally, make sure that the region accounting in the lists is |
tonyp@2472 | 5410 | // consistent with what we see in the heap. |
tonyp@2472 | 5411 | _humongous_set.verify_start(); |
tonyp@2472 | 5412 | _free_list.verify_start(); |
tonyp@2472 | 5413 | |
tonyp@2472 | 5414 | VerifyRegionListsClosure cl(&_humongous_set, &_free_list); |
tonyp@2472 | 5415 | heap_region_iterate(&cl); |
tonyp@2472 | 5416 | |
tonyp@2472 | 5417 | _humongous_set.verify_end(); |
tonyp@2472 | 5418 | _free_list.verify_end(); |
ysr@777 | 5419 | } |