Fri, 27 Feb 2009 13:27:09 -0800
6810672: Comment typos
Summary: I have collected some typos I have found while looking at the code.
Reviewed-by: kvn, never
ysr@777 | 1 | /* |
xdono@905 | 2 | * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. |
ysr@777 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
ysr@777 | 4 | * |
ysr@777 | 5 | * This code is free software; you can redistribute it and/or modify it |
ysr@777 | 6 | * under the terms of the GNU General Public License version 2 only, as |
ysr@777 | 7 | * published by the Free Software Foundation. |
ysr@777 | 8 | * |
ysr@777 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
ysr@777 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
ysr@777 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
ysr@777 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
ysr@777 | 13 | * accompanied this code). |
ysr@777 | 14 | * |
ysr@777 | 15 | * You should have received a copy of the GNU General Public License version |
ysr@777 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
ysr@777 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
ysr@777 | 18 | * |
ysr@777 | 19 | * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, |
ysr@777 | 20 | * CA 95054 USA or visit www.sun.com if you need additional information or |
ysr@777 | 21 | * have any questions. |
ysr@777 | 22 | * |
ysr@777 | 23 | */ |
ysr@777 | 24 | |
ysr@777 | 25 | #include "incls/_precompiled.incl" |
ysr@777 | 26 | #include "incls/_g1CollectedHeap.cpp.incl" |
ysr@777 | 27 | |
ysr@777 | 28 | // turn it on so that the contents of the young list (scan-only / |
ysr@777 | 29 | // to-be-collected) are printed at "strategic" points before / during |
ysr@777 | 30 | // / after the collection --- this is useful for debugging |
ysr@777 | 31 | #define SCAN_ONLY_VERBOSE 0 |
ysr@777 | 32 | // CURRENT STATUS |
ysr@777 | 33 | // This file is under construction. Search for "FIXME". |
ysr@777 | 34 | |
ysr@777 | 35 | // INVARIANTS/NOTES |
ysr@777 | 36 | // |
ysr@777 | 37 | // All allocation activity covered by the G1CollectedHeap interface is |
ysr@777 | 38 | // serialized by acquiring the HeapLock. This happens in |
ysr@777 | 39 | // mem_allocate_work, which all such allocation functions call. |
ysr@777 | 40 | // (Note that this does not apply to TLAB allocation, which is not part |
ysr@777 | 41 | // of this interface: it is done by clients of this interface.) |
ysr@777 | 42 | |
ysr@777 | 43 | // Local to this file. |
ysr@777 | 44 | |
ysr@777 | 45 | // Finds the first HeapRegion. |
ysr@777 | 46 | // No longer used, but might be handy someday. |
ysr@777 | 47 | |
ysr@777 | 48 | class FindFirstRegionClosure: public HeapRegionClosure { |
ysr@777 | 49 | HeapRegion* _a_region; |
ysr@777 | 50 | public: |
ysr@777 | 51 | FindFirstRegionClosure() : _a_region(NULL) {} |
ysr@777 | 52 | bool doHeapRegion(HeapRegion* r) { |
ysr@777 | 53 | _a_region = r; |
ysr@777 | 54 | return true; |
ysr@777 | 55 | } |
ysr@777 | 56 | HeapRegion* result() { return _a_region; } |
ysr@777 | 57 | }; |
ysr@777 | 58 | |
ysr@777 | 59 | |
ysr@777 | 60 | class RefineCardTableEntryClosure: public CardTableEntryClosure { |
ysr@777 | 61 | SuspendibleThreadSet* _sts; |
ysr@777 | 62 | G1RemSet* _g1rs; |
ysr@777 | 63 | ConcurrentG1Refine* _cg1r; |
ysr@777 | 64 | bool _concurrent; |
ysr@777 | 65 | public: |
ysr@777 | 66 | RefineCardTableEntryClosure(SuspendibleThreadSet* sts, |
ysr@777 | 67 | G1RemSet* g1rs, |
ysr@777 | 68 | ConcurrentG1Refine* cg1r) : |
ysr@777 | 69 | _sts(sts), _g1rs(g1rs), _cg1r(cg1r), _concurrent(true) |
ysr@777 | 70 | {} |
ysr@777 | 71 | bool do_card_ptr(jbyte* card_ptr, int worker_i) { |
ysr@777 | 72 | _g1rs->concurrentRefineOneCard(card_ptr, worker_i); |
ysr@777 | 73 | if (_concurrent && _sts->should_yield()) { |
ysr@777 | 74 | // Caller will actually yield. |
ysr@777 | 75 | return false; |
ysr@777 | 76 | } |
ysr@777 | 77 | // Otherwise, we finished successfully; return true. |
ysr@777 | 78 | return true; |
ysr@777 | 79 | } |
ysr@777 | 80 | void set_concurrent(bool b) { _concurrent = b; } |
ysr@777 | 81 | }; |
ysr@777 | 82 | |
ysr@777 | 83 | |
ysr@777 | 84 | class ClearLoggedCardTableEntryClosure: public CardTableEntryClosure { |
ysr@777 | 85 | int _calls; |
ysr@777 | 86 | G1CollectedHeap* _g1h; |
ysr@777 | 87 | CardTableModRefBS* _ctbs; |
ysr@777 | 88 | int _histo[256]; |
ysr@777 | 89 | public: |
ysr@777 | 90 | ClearLoggedCardTableEntryClosure() : |
ysr@777 | 91 | _calls(0) |
ysr@777 | 92 | { |
ysr@777 | 93 | _g1h = G1CollectedHeap::heap(); |
ysr@777 | 94 | _ctbs = (CardTableModRefBS*)_g1h->barrier_set(); |
ysr@777 | 95 | for (int i = 0; i < 256; i++) _histo[i] = 0; |
ysr@777 | 96 | } |
ysr@777 | 97 | bool do_card_ptr(jbyte* card_ptr, int worker_i) { |
ysr@777 | 98 | if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) { |
ysr@777 | 99 | _calls++; |
ysr@777 | 100 | unsigned char* ujb = (unsigned char*)card_ptr; |
ysr@777 | 101 | int ind = (int)(*ujb); |
ysr@777 | 102 | _histo[ind]++; |
ysr@777 | 103 | *card_ptr = -1; |
ysr@777 | 104 | } |
ysr@777 | 105 | return true; |
ysr@777 | 106 | } |
ysr@777 | 107 | int calls() { return _calls; } |
ysr@777 | 108 | void print_histo() { |
ysr@777 | 109 | gclog_or_tty->print_cr("Card table value histogram:"); |
ysr@777 | 110 | for (int i = 0; i < 256; i++) { |
ysr@777 | 111 | if (_histo[i] != 0) { |
ysr@777 | 112 | gclog_or_tty->print_cr(" %d: %d", i, _histo[i]); |
ysr@777 | 113 | } |
ysr@777 | 114 | } |
ysr@777 | 115 | } |
ysr@777 | 116 | }; |
ysr@777 | 117 | |
ysr@777 | 118 | class RedirtyLoggedCardTableEntryClosure: public CardTableEntryClosure { |
ysr@777 | 119 | int _calls; |
ysr@777 | 120 | G1CollectedHeap* _g1h; |
ysr@777 | 121 | CardTableModRefBS* _ctbs; |
ysr@777 | 122 | public: |
ysr@777 | 123 | RedirtyLoggedCardTableEntryClosure() : |
ysr@777 | 124 | _calls(0) |
ysr@777 | 125 | { |
ysr@777 | 126 | _g1h = G1CollectedHeap::heap(); |
ysr@777 | 127 | _ctbs = (CardTableModRefBS*)_g1h->barrier_set(); |
ysr@777 | 128 | } |
ysr@777 | 129 | bool do_card_ptr(jbyte* card_ptr, int worker_i) { |
ysr@777 | 130 | if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) { |
ysr@777 | 131 | _calls++; |
ysr@777 | 132 | *card_ptr = 0; |
ysr@777 | 133 | } |
ysr@777 | 134 | return true; |
ysr@777 | 135 | } |
ysr@777 | 136 | int calls() { return _calls; } |
ysr@777 | 137 | }; |
ysr@777 | 138 | |
ysr@777 | 139 | YoungList::YoungList(G1CollectedHeap* g1h) |
ysr@777 | 140 | : _g1h(g1h), _head(NULL), |
ysr@777 | 141 | _scan_only_head(NULL), _scan_only_tail(NULL), _curr_scan_only(NULL), |
ysr@777 | 142 | _length(0), _scan_only_length(0), |
ysr@777 | 143 | _last_sampled_rs_lengths(0), |
apetrusenko@980 | 144 | _survivor_head(NULL), _survivor_tail(NULL), _survivor_length(0) |
ysr@777 | 145 | { |
ysr@777 | 146 | guarantee( check_list_empty(false), "just making sure..." ); |
ysr@777 | 147 | } |
ysr@777 | 148 | |
ysr@777 | 149 | void YoungList::push_region(HeapRegion *hr) { |
ysr@777 | 150 | assert(!hr->is_young(), "should not already be young"); |
ysr@777 | 151 | assert(hr->get_next_young_region() == NULL, "cause it should!"); |
ysr@777 | 152 | |
ysr@777 | 153 | hr->set_next_young_region(_head); |
ysr@777 | 154 | _head = hr; |
ysr@777 | 155 | |
ysr@777 | 156 | hr->set_young(); |
ysr@777 | 157 | double yg_surv_rate = _g1h->g1_policy()->predict_yg_surv_rate((int)_length); |
ysr@777 | 158 | ++_length; |
ysr@777 | 159 | } |
ysr@777 | 160 | |
ysr@777 | 161 | void YoungList::add_survivor_region(HeapRegion* hr) { |
apetrusenko@980 | 162 | assert(hr->is_survivor(), "should be flagged as survivor region"); |
ysr@777 | 163 | assert(hr->get_next_young_region() == NULL, "cause it should!"); |
ysr@777 | 164 | |
ysr@777 | 165 | hr->set_next_young_region(_survivor_head); |
ysr@777 | 166 | if (_survivor_head == NULL) { |
apetrusenko@980 | 167 | _survivor_tail = hr; |
ysr@777 | 168 | } |
ysr@777 | 169 | _survivor_head = hr; |
ysr@777 | 170 | |
ysr@777 | 171 | ++_survivor_length; |
ysr@777 | 172 | } |
ysr@777 | 173 | |
ysr@777 | 174 | HeapRegion* YoungList::pop_region() { |
ysr@777 | 175 | while (_head != NULL) { |
ysr@777 | 176 | assert( length() > 0, "list should not be empty" ); |
ysr@777 | 177 | HeapRegion* ret = _head; |
ysr@777 | 178 | _head = ret->get_next_young_region(); |
ysr@777 | 179 | ret->set_next_young_region(NULL); |
ysr@777 | 180 | --_length; |
ysr@777 | 181 | assert(ret->is_young(), "region should be very young"); |
ysr@777 | 182 | |
ysr@777 | 183 | // Replace 'Survivor' region type with 'Young'. So the region will |
ysr@777 | 184 | // be treated as a young region and will not be 'confused' with |
ysr@777 | 185 | // newly created survivor regions. |
ysr@777 | 186 | if (ret->is_survivor()) { |
ysr@777 | 187 | ret->set_young(); |
ysr@777 | 188 | } |
ysr@777 | 189 | |
ysr@777 | 190 | if (!ret->is_scan_only()) { |
ysr@777 | 191 | return ret; |
ysr@777 | 192 | } |
ysr@777 | 193 | |
ysr@777 | 194 | // scan-only, we'll add it to the scan-only list |
ysr@777 | 195 | if (_scan_only_tail == NULL) { |
ysr@777 | 196 | guarantee( _scan_only_head == NULL, "invariant" ); |
ysr@777 | 197 | |
ysr@777 | 198 | _scan_only_head = ret; |
ysr@777 | 199 | _curr_scan_only = ret; |
ysr@777 | 200 | } else { |
ysr@777 | 201 | guarantee( _scan_only_head != NULL, "invariant" ); |
ysr@777 | 202 | _scan_only_tail->set_next_young_region(ret); |
ysr@777 | 203 | } |
ysr@777 | 204 | guarantee( ret->get_next_young_region() == NULL, "invariant" ); |
ysr@777 | 205 | _scan_only_tail = ret; |
ysr@777 | 206 | |
ysr@777 | 207 | // no need to be tagged as scan-only any more |
ysr@777 | 208 | ret->set_young(); |
ysr@777 | 209 | |
ysr@777 | 210 | ++_scan_only_length; |
ysr@777 | 211 | } |
ysr@777 | 212 | assert( length() == 0, "list should be empty" ); |
ysr@777 | 213 | return NULL; |
ysr@777 | 214 | } |
ysr@777 | 215 | |
ysr@777 | 216 | void YoungList::empty_list(HeapRegion* list) { |
ysr@777 | 217 | while (list != NULL) { |
ysr@777 | 218 | HeapRegion* next = list->get_next_young_region(); |
ysr@777 | 219 | list->set_next_young_region(NULL); |
ysr@777 | 220 | list->uninstall_surv_rate_group(); |
ysr@777 | 221 | list->set_not_young(); |
ysr@777 | 222 | list = next; |
ysr@777 | 223 | } |
ysr@777 | 224 | } |
ysr@777 | 225 | |
ysr@777 | 226 | void YoungList::empty_list() { |
ysr@777 | 227 | assert(check_list_well_formed(), "young list should be well formed"); |
ysr@777 | 228 | |
ysr@777 | 229 | empty_list(_head); |
ysr@777 | 230 | _head = NULL; |
ysr@777 | 231 | _length = 0; |
ysr@777 | 232 | |
ysr@777 | 233 | empty_list(_scan_only_head); |
ysr@777 | 234 | _scan_only_head = NULL; |
ysr@777 | 235 | _scan_only_tail = NULL; |
ysr@777 | 236 | _scan_only_length = 0; |
ysr@777 | 237 | _curr_scan_only = NULL; |
ysr@777 | 238 | |
ysr@777 | 239 | empty_list(_survivor_head); |
ysr@777 | 240 | _survivor_head = NULL; |
apetrusenko@980 | 241 | _survivor_tail = NULL; |
ysr@777 | 242 | _survivor_length = 0; |
ysr@777 | 243 | |
ysr@777 | 244 | _last_sampled_rs_lengths = 0; |
ysr@777 | 245 | |
ysr@777 | 246 | assert(check_list_empty(false), "just making sure..."); |
ysr@777 | 247 | } |
ysr@777 | 248 | |
ysr@777 | 249 | bool YoungList::check_list_well_formed() { |
ysr@777 | 250 | bool ret = true; |
ysr@777 | 251 | |
ysr@777 | 252 | size_t length = 0; |
ysr@777 | 253 | HeapRegion* curr = _head; |
ysr@777 | 254 | HeapRegion* last = NULL; |
ysr@777 | 255 | while (curr != NULL) { |
ysr@777 | 256 | if (!curr->is_young() || curr->is_scan_only()) { |
ysr@777 | 257 | gclog_or_tty->print_cr("### YOUNG REGION "PTR_FORMAT"-"PTR_FORMAT" " |
ysr@777 | 258 | "incorrectly tagged (%d, %d)", |
ysr@777 | 259 | curr->bottom(), curr->end(), |
ysr@777 | 260 | curr->is_young(), curr->is_scan_only()); |
ysr@777 | 261 | ret = false; |
ysr@777 | 262 | } |
ysr@777 | 263 | ++length; |
ysr@777 | 264 | last = curr; |
ysr@777 | 265 | curr = curr->get_next_young_region(); |
ysr@777 | 266 | } |
ysr@777 | 267 | ret = ret && (length == _length); |
ysr@777 | 268 | |
ysr@777 | 269 | if (!ret) { |
ysr@777 | 270 | gclog_or_tty->print_cr("### YOUNG LIST seems not well formed!"); |
ysr@777 | 271 | gclog_or_tty->print_cr("### list has %d entries, _length is %d", |
ysr@777 | 272 | length, _length); |
ysr@777 | 273 | } |
ysr@777 | 274 | |
ysr@777 | 275 | bool scan_only_ret = true; |
ysr@777 | 276 | length = 0; |
ysr@777 | 277 | curr = _scan_only_head; |
ysr@777 | 278 | last = NULL; |
ysr@777 | 279 | while (curr != NULL) { |
ysr@777 | 280 | if (!curr->is_young() || curr->is_scan_only()) { |
ysr@777 | 281 | gclog_or_tty->print_cr("### SCAN-ONLY REGION "PTR_FORMAT"-"PTR_FORMAT" " |
ysr@777 | 282 | "incorrectly tagged (%d, %d)", |
ysr@777 | 283 | curr->bottom(), curr->end(), |
ysr@777 | 284 | curr->is_young(), curr->is_scan_only()); |
ysr@777 | 285 | scan_only_ret = false; |
ysr@777 | 286 | } |
ysr@777 | 287 | ++length; |
ysr@777 | 288 | last = curr; |
ysr@777 | 289 | curr = curr->get_next_young_region(); |
ysr@777 | 290 | } |
ysr@777 | 291 | scan_only_ret = scan_only_ret && (length == _scan_only_length); |
ysr@777 | 292 | |
ysr@777 | 293 | if ( (last != _scan_only_tail) || |
ysr@777 | 294 | (_scan_only_head == NULL && _scan_only_tail != NULL) || |
ysr@777 | 295 | (_scan_only_head != NULL && _scan_only_tail == NULL) ) { |
ysr@777 | 296 | gclog_or_tty->print_cr("## _scan_only_tail is set incorrectly"); |
ysr@777 | 297 | scan_only_ret = false; |
ysr@777 | 298 | } |
ysr@777 | 299 | |
ysr@777 | 300 | if (_curr_scan_only != NULL && _curr_scan_only != _scan_only_head) { |
ysr@777 | 301 | gclog_or_tty->print_cr("### _curr_scan_only is set incorrectly"); |
ysr@777 | 302 | scan_only_ret = false; |
ysr@777 | 303 | } |
ysr@777 | 304 | |
ysr@777 | 305 | if (!scan_only_ret) { |
ysr@777 | 306 | gclog_or_tty->print_cr("### SCAN-ONLY LIST seems not well formed!"); |
ysr@777 | 307 | gclog_or_tty->print_cr("### list has %d entries, _scan_only_length is %d", |
ysr@777 | 308 | length, _scan_only_length); |
ysr@777 | 309 | } |
ysr@777 | 310 | |
ysr@777 | 311 | return ret && scan_only_ret; |
ysr@777 | 312 | } |
ysr@777 | 313 | |
ysr@777 | 314 | bool YoungList::check_list_empty(bool ignore_scan_only_list, |
ysr@777 | 315 | bool check_sample) { |
ysr@777 | 316 | bool ret = true; |
ysr@777 | 317 | |
ysr@777 | 318 | if (_length != 0) { |
ysr@777 | 319 | gclog_or_tty->print_cr("### YOUNG LIST should have 0 length, not %d", |
ysr@777 | 320 | _length); |
ysr@777 | 321 | ret = false; |
ysr@777 | 322 | } |
ysr@777 | 323 | if (check_sample && _last_sampled_rs_lengths != 0) { |
ysr@777 | 324 | gclog_or_tty->print_cr("### YOUNG LIST has non-zero last sampled RS lengths"); |
ysr@777 | 325 | ret = false; |
ysr@777 | 326 | } |
ysr@777 | 327 | if (_head != NULL) { |
ysr@777 | 328 | gclog_or_tty->print_cr("### YOUNG LIST does not have a NULL head"); |
ysr@777 | 329 | ret = false; |
ysr@777 | 330 | } |
ysr@777 | 331 | if (!ret) { |
ysr@777 | 332 | gclog_or_tty->print_cr("### YOUNG LIST does not seem empty"); |
ysr@777 | 333 | } |
ysr@777 | 334 | |
ysr@777 | 335 | if (ignore_scan_only_list) |
ysr@777 | 336 | return ret; |
ysr@777 | 337 | |
ysr@777 | 338 | bool scan_only_ret = true; |
ysr@777 | 339 | if (_scan_only_length != 0) { |
ysr@777 | 340 | gclog_or_tty->print_cr("### SCAN-ONLY LIST should have 0 length, not %d", |
ysr@777 | 341 | _scan_only_length); |
ysr@777 | 342 | scan_only_ret = false; |
ysr@777 | 343 | } |
ysr@777 | 344 | if (_scan_only_head != NULL) { |
ysr@777 | 345 | gclog_or_tty->print_cr("### SCAN-ONLY LIST does not have a NULL head"); |
ysr@777 | 346 | scan_only_ret = false; |
ysr@777 | 347 | } |
ysr@777 | 348 | if (_scan_only_tail != NULL) { |
ysr@777 | 349 | gclog_or_tty->print_cr("### SCAN-ONLY LIST does not have a NULL tail"); |
ysr@777 | 350 | scan_only_ret = false; |
ysr@777 | 351 | } |
ysr@777 | 352 | if (!scan_only_ret) { |
ysr@777 | 353 | gclog_or_tty->print_cr("### SCAN-ONLY LIST does not seem empty"); |
ysr@777 | 354 | } |
ysr@777 | 355 | |
ysr@777 | 356 | return ret && scan_only_ret; |
ysr@777 | 357 | } |
ysr@777 | 358 | |
ysr@777 | 359 | void |
ysr@777 | 360 | YoungList::rs_length_sampling_init() { |
ysr@777 | 361 | _sampled_rs_lengths = 0; |
ysr@777 | 362 | _curr = _head; |
ysr@777 | 363 | } |
ysr@777 | 364 | |
ysr@777 | 365 | bool |
ysr@777 | 366 | YoungList::rs_length_sampling_more() { |
ysr@777 | 367 | return _curr != NULL; |
ysr@777 | 368 | } |
ysr@777 | 369 | |
ysr@777 | 370 | void |
ysr@777 | 371 | YoungList::rs_length_sampling_next() { |
ysr@777 | 372 | assert( _curr != NULL, "invariant" ); |
ysr@777 | 373 | _sampled_rs_lengths += _curr->rem_set()->occupied(); |
ysr@777 | 374 | _curr = _curr->get_next_young_region(); |
ysr@777 | 375 | if (_curr == NULL) { |
ysr@777 | 376 | _last_sampled_rs_lengths = _sampled_rs_lengths; |
ysr@777 | 377 | // gclog_or_tty->print_cr("last sampled RS lengths = %d", _last_sampled_rs_lengths); |
ysr@777 | 378 | } |
ysr@777 | 379 | } |
ysr@777 | 380 | |
ysr@777 | 381 | void |
ysr@777 | 382 | YoungList::reset_auxilary_lists() { |
ysr@777 | 383 | // We could have just "moved" the scan-only list to the young list. |
ysr@777 | 384 | // However, the scan-only list is ordered according to the region |
ysr@777 | 385 | // age in descending order, so, by moving one entry at a time, we |
ysr@777 | 386 | // ensure that it is recreated in ascending order. |
ysr@777 | 387 | |
ysr@777 | 388 | guarantee( is_empty(), "young list should be empty" ); |
ysr@777 | 389 | assert(check_list_well_formed(), "young list should be well formed"); |
ysr@777 | 390 | |
ysr@777 | 391 | // Add survivor regions to SurvRateGroup. |
ysr@777 | 392 | _g1h->g1_policy()->note_start_adding_survivor_regions(); |
apetrusenko@980 | 393 | _g1h->g1_policy()->finished_recalculating_age_indexes(true /* is_survivors */); |
ysr@777 | 394 | for (HeapRegion* curr = _survivor_head; |
ysr@777 | 395 | curr != NULL; |
ysr@777 | 396 | curr = curr->get_next_young_region()) { |
ysr@777 | 397 | _g1h->g1_policy()->set_region_survivors(curr); |
ysr@777 | 398 | } |
ysr@777 | 399 | _g1h->g1_policy()->note_stop_adding_survivor_regions(); |
ysr@777 | 400 | |
ysr@777 | 401 | if (_survivor_head != NULL) { |
ysr@777 | 402 | _head = _survivor_head; |
ysr@777 | 403 | _length = _survivor_length + _scan_only_length; |
apetrusenko@980 | 404 | _survivor_tail->set_next_young_region(_scan_only_head); |
ysr@777 | 405 | } else { |
ysr@777 | 406 | _head = _scan_only_head; |
ysr@777 | 407 | _length = _scan_only_length; |
ysr@777 | 408 | } |
ysr@777 | 409 | |
ysr@777 | 410 | for (HeapRegion* curr = _scan_only_head; |
ysr@777 | 411 | curr != NULL; |
ysr@777 | 412 | curr = curr->get_next_young_region()) { |
ysr@777 | 413 | curr->recalculate_age_in_surv_rate_group(); |
ysr@777 | 414 | } |
ysr@777 | 415 | _scan_only_head = NULL; |
ysr@777 | 416 | _scan_only_tail = NULL; |
ysr@777 | 417 | _scan_only_length = 0; |
ysr@777 | 418 | _curr_scan_only = NULL; |
ysr@777 | 419 | |
ysr@777 | 420 | _survivor_head = NULL; |
apetrusenko@980 | 421 | _survivor_tail = NULL; |
ysr@777 | 422 | _survivor_length = 0; |
apetrusenko@980 | 423 | _g1h->g1_policy()->finished_recalculating_age_indexes(false /* is_survivors */); |
ysr@777 | 424 | |
ysr@777 | 425 | assert(check_list_well_formed(), "young list should be well formed"); |
ysr@777 | 426 | } |
ysr@777 | 427 | |
ysr@777 | 428 | void YoungList::print() { |
ysr@777 | 429 | HeapRegion* lists[] = {_head, _scan_only_head, _survivor_head}; |
ysr@777 | 430 | const char* names[] = {"YOUNG", "SCAN-ONLY", "SURVIVOR"}; |
ysr@777 | 431 | |
ysr@777 | 432 | for (unsigned int list = 0; list < ARRAY_SIZE(lists); ++list) { |
ysr@777 | 433 | gclog_or_tty->print_cr("%s LIST CONTENTS", names[list]); |
ysr@777 | 434 | HeapRegion *curr = lists[list]; |
ysr@777 | 435 | if (curr == NULL) |
ysr@777 | 436 | gclog_or_tty->print_cr(" empty"); |
ysr@777 | 437 | while (curr != NULL) { |
ysr@777 | 438 | gclog_or_tty->print_cr(" [%08x-%08x], t: %08x, P: %08x, N: %08x, C: %08x, " |
ysr@777 | 439 | "age: %4d, y: %d, s-o: %d, surv: %d", |
ysr@777 | 440 | curr->bottom(), curr->end(), |
ysr@777 | 441 | curr->top(), |
ysr@777 | 442 | curr->prev_top_at_mark_start(), |
ysr@777 | 443 | curr->next_top_at_mark_start(), |
ysr@777 | 444 | curr->top_at_conc_mark_count(), |
ysr@777 | 445 | curr->age_in_surv_rate_group_cond(), |
ysr@777 | 446 | curr->is_young(), |
ysr@777 | 447 | curr->is_scan_only(), |
ysr@777 | 448 | curr->is_survivor()); |
ysr@777 | 449 | curr = curr->get_next_young_region(); |
ysr@777 | 450 | } |
ysr@777 | 451 | } |
ysr@777 | 452 | |
ysr@777 | 453 | gclog_or_tty->print_cr(""); |
ysr@777 | 454 | } |
ysr@777 | 455 | |
ysr@777 | 456 | void G1CollectedHeap::stop_conc_gc_threads() { |
ysr@777 | 457 | _cg1r->cg1rThread()->stop(); |
ysr@777 | 458 | _czft->stop(); |
ysr@777 | 459 | _cmThread->stop(); |
ysr@777 | 460 | } |
ysr@777 | 461 | |
ysr@777 | 462 | |
ysr@777 | 463 | void G1CollectedHeap::check_ct_logs_at_safepoint() { |
ysr@777 | 464 | DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); |
ysr@777 | 465 | CardTableModRefBS* ct_bs = (CardTableModRefBS*)barrier_set(); |
ysr@777 | 466 | |
ysr@777 | 467 | // Count the dirty cards at the start. |
ysr@777 | 468 | CountNonCleanMemRegionClosure count1(this); |
ysr@777 | 469 | ct_bs->mod_card_iterate(&count1); |
ysr@777 | 470 | int orig_count = count1.n(); |
ysr@777 | 471 | |
ysr@777 | 472 | // First clear the logged cards. |
ysr@777 | 473 | ClearLoggedCardTableEntryClosure clear; |
ysr@777 | 474 | dcqs.set_closure(&clear); |
ysr@777 | 475 | dcqs.apply_closure_to_all_completed_buffers(); |
ysr@777 | 476 | dcqs.iterate_closure_all_threads(false); |
ysr@777 | 477 | clear.print_histo(); |
ysr@777 | 478 | |
ysr@777 | 479 | // Now ensure that there's no dirty cards. |
ysr@777 | 480 | CountNonCleanMemRegionClosure count2(this); |
ysr@777 | 481 | ct_bs->mod_card_iterate(&count2); |
ysr@777 | 482 | if (count2.n() != 0) { |
ysr@777 | 483 | gclog_or_tty->print_cr("Card table has %d entries; %d originally", |
ysr@777 | 484 | count2.n(), orig_count); |
ysr@777 | 485 | } |
ysr@777 | 486 | guarantee(count2.n() == 0, "Card table should be clean."); |
ysr@777 | 487 | |
ysr@777 | 488 | RedirtyLoggedCardTableEntryClosure redirty; |
ysr@777 | 489 | JavaThread::dirty_card_queue_set().set_closure(&redirty); |
ysr@777 | 490 | dcqs.apply_closure_to_all_completed_buffers(); |
ysr@777 | 491 | dcqs.iterate_closure_all_threads(false); |
ysr@777 | 492 | gclog_or_tty->print_cr("Log entries = %d, dirty cards = %d.", |
ysr@777 | 493 | clear.calls(), orig_count); |
ysr@777 | 494 | guarantee(redirty.calls() == clear.calls(), |
ysr@777 | 495 | "Or else mechanism is broken."); |
ysr@777 | 496 | |
ysr@777 | 497 | CountNonCleanMemRegionClosure count3(this); |
ysr@777 | 498 | ct_bs->mod_card_iterate(&count3); |
ysr@777 | 499 | if (count3.n() != orig_count) { |
ysr@777 | 500 | gclog_or_tty->print_cr("Should have restored them all: orig = %d, final = %d.", |
ysr@777 | 501 | orig_count, count3.n()); |
ysr@777 | 502 | guarantee(count3.n() >= orig_count, "Should have restored them all."); |
ysr@777 | 503 | } |
ysr@777 | 504 | |
ysr@777 | 505 | JavaThread::dirty_card_queue_set().set_closure(_refine_cte_cl); |
ysr@777 | 506 | } |
ysr@777 | 507 | |
ysr@777 | 508 | // Private class members. |
ysr@777 | 509 | |
ysr@777 | 510 | G1CollectedHeap* G1CollectedHeap::_g1h; |
ysr@777 | 511 | |
ysr@777 | 512 | // Private methods. |
ysr@777 | 513 | |
ysr@777 | 514 | // Finds a HeapRegion that can be used to allocate a given size of block. |
ysr@777 | 515 | |
ysr@777 | 516 | |
ysr@777 | 517 | HeapRegion* G1CollectedHeap::newAllocRegion_work(size_t word_size, |
ysr@777 | 518 | bool do_expand, |
ysr@777 | 519 | bool zero_filled) { |
ysr@777 | 520 | ConcurrentZFThread::note_region_alloc(); |
ysr@777 | 521 | HeapRegion* res = alloc_free_region_from_lists(zero_filled); |
ysr@777 | 522 | if (res == NULL && do_expand) { |
ysr@777 | 523 | expand(word_size * HeapWordSize); |
ysr@777 | 524 | res = alloc_free_region_from_lists(zero_filled); |
ysr@777 | 525 | assert(res == NULL || |
ysr@777 | 526 | (!res->isHumongous() && |
ysr@777 | 527 | (!zero_filled || |
ysr@777 | 528 | res->zero_fill_state() == HeapRegion::Allocated)), |
ysr@777 | 529 | "Alloc Regions must be zero filled (and non-H)"); |
ysr@777 | 530 | } |
ysr@777 | 531 | if (res != NULL && res->is_empty()) _free_regions--; |
ysr@777 | 532 | assert(res == NULL || |
ysr@777 | 533 | (!res->isHumongous() && |
ysr@777 | 534 | (!zero_filled || |
ysr@777 | 535 | res->zero_fill_state() == HeapRegion::Allocated)), |
ysr@777 | 536 | "Non-young alloc Regions must be zero filled (and non-H)"); |
ysr@777 | 537 | |
ysr@777 | 538 | if (G1TraceRegions) { |
ysr@777 | 539 | if (res != NULL) { |
ysr@777 | 540 | gclog_or_tty->print_cr("new alloc region %d:["PTR_FORMAT", "PTR_FORMAT"], " |
ysr@777 | 541 | "top "PTR_FORMAT, |
ysr@777 | 542 | res->hrs_index(), res->bottom(), res->end(), res->top()); |
ysr@777 | 543 | } |
ysr@777 | 544 | } |
ysr@777 | 545 | |
ysr@777 | 546 | return res; |
ysr@777 | 547 | } |
ysr@777 | 548 | |
ysr@777 | 549 | HeapRegion* G1CollectedHeap::newAllocRegionWithExpansion(int purpose, |
ysr@777 | 550 | size_t word_size, |
ysr@777 | 551 | bool zero_filled) { |
ysr@777 | 552 | HeapRegion* alloc_region = NULL; |
ysr@777 | 553 | if (_gc_alloc_region_counts[purpose] < g1_policy()->max_regions(purpose)) { |
ysr@777 | 554 | alloc_region = newAllocRegion_work(word_size, true, zero_filled); |
ysr@777 | 555 | if (purpose == GCAllocForSurvived && alloc_region != NULL) { |
apetrusenko@980 | 556 | alloc_region->set_survivor(); |
ysr@777 | 557 | } |
ysr@777 | 558 | ++_gc_alloc_region_counts[purpose]; |
ysr@777 | 559 | } else { |
ysr@777 | 560 | g1_policy()->note_alloc_region_limit_reached(purpose); |
ysr@777 | 561 | } |
ysr@777 | 562 | return alloc_region; |
ysr@777 | 563 | } |
ysr@777 | 564 | |
ysr@777 | 565 | // If could fit into free regions w/o expansion, try. |
ysr@777 | 566 | // Otherwise, if can expand, do so. |
ysr@777 | 567 | // Otherwise, if using ex regions might help, try with ex given back. |
ysr@777 | 568 | HeapWord* G1CollectedHeap::humongousObjAllocate(size_t word_size) { |
ysr@777 | 569 | assert(regions_accounted_for(), "Region leakage!"); |
ysr@777 | 570 | |
ysr@777 | 571 | // We can't allocate H regions while cleanupComplete is running, since |
ysr@777 | 572 | // some of the regions we find to be empty might not yet be added to the |
ysr@777 | 573 | // unclean list. (If we're already at a safepoint, this call is |
ysr@777 | 574 | // unnecessary, not to mention wrong.) |
ysr@777 | 575 | if (!SafepointSynchronize::is_at_safepoint()) |
ysr@777 | 576 | wait_for_cleanup_complete(); |
ysr@777 | 577 | |
ysr@777 | 578 | size_t num_regions = |
ysr@777 | 579 | round_to(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords; |
ysr@777 | 580 | |
ysr@777 | 581 | // Special case if < one region??? |
ysr@777 | 582 | |
ysr@777 | 583 | // Remember the ft size. |
ysr@777 | 584 | size_t x_size = expansion_regions(); |
ysr@777 | 585 | |
ysr@777 | 586 | HeapWord* res = NULL; |
ysr@777 | 587 | bool eliminated_allocated_from_lists = false; |
ysr@777 | 588 | |
ysr@777 | 589 | // Can the allocation potentially fit in the free regions? |
ysr@777 | 590 | if (free_regions() >= num_regions) { |
ysr@777 | 591 | res = _hrs->obj_allocate(word_size); |
ysr@777 | 592 | } |
ysr@777 | 593 | if (res == NULL) { |
ysr@777 | 594 | // Try expansion. |
ysr@777 | 595 | size_t fs = _hrs->free_suffix(); |
ysr@777 | 596 | if (fs + x_size >= num_regions) { |
ysr@777 | 597 | expand((num_regions - fs) * HeapRegion::GrainBytes); |
ysr@777 | 598 | res = _hrs->obj_allocate(word_size); |
ysr@777 | 599 | assert(res != NULL, "This should have worked."); |
ysr@777 | 600 | } else { |
ysr@777 | 601 | // Expansion won't help. Are there enough free regions if we get rid |
ysr@777 | 602 | // of reservations? |
ysr@777 | 603 | size_t avail = free_regions(); |
ysr@777 | 604 | if (avail >= num_regions) { |
ysr@777 | 605 | res = _hrs->obj_allocate(word_size); |
ysr@777 | 606 | if (res != NULL) { |
ysr@777 | 607 | remove_allocated_regions_from_lists(); |
ysr@777 | 608 | eliminated_allocated_from_lists = true; |
ysr@777 | 609 | } |
ysr@777 | 610 | } |
ysr@777 | 611 | } |
ysr@777 | 612 | } |
ysr@777 | 613 | if (res != NULL) { |
ysr@777 | 614 | // Increment by the number of regions allocated. |
ysr@777 | 615 | // FIXME: Assumes regions all of size GrainBytes. |
ysr@777 | 616 | #ifndef PRODUCT |
ysr@777 | 617 | mr_bs()->verify_clean_region(MemRegion(res, res + num_regions * |
ysr@777 | 618 | HeapRegion::GrainWords)); |
ysr@777 | 619 | #endif |
ysr@777 | 620 | if (!eliminated_allocated_from_lists) |
ysr@777 | 621 | remove_allocated_regions_from_lists(); |
ysr@777 | 622 | _summary_bytes_used += word_size * HeapWordSize; |
ysr@777 | 623 | _free_regions -= num_regions; |
ysr@777 | 624 | _num_humongous_regions += (int) num_regions; |
ysr@777 | 625 | } |
ysr@777 | 626 | assert(regions_accounted_for(), "Region Leakage"); |
ysr@777 | 627 | return res; |
ysr@777 | 628 | } |
ysr@777 | 629 | |
ysr@777 | 630 | HeapWord* |
ysr@777 | 631 | G1CollectedHeap::attempt_allocation_slow(size_t word_size, |
ysr@777 | 632 | bool permit_collection_pause) { |
ysr@777 | 633 | HeapWord* res = NULL; |
ysr@777 | 634 | HeapRegion* allocated_young_region = NULL; |
ysr@777 | 635 | |
ysr@777 | 636 | assert( SafepointSynchronize::is_at_safepoint() || |
ysr@777 | 637 | Heap_lock->owned_by_self(), "pre condition of the call" ); |
ysr@777 | 638 | |
ysr@777 | 639 | if (isHumongous(word_size)) { |
ysr@777 | 640 | // Allocation of a humongous object can, in a sense, complete a |
ysr@777 | 641 | // partial region, if the previous alloc was also humongous, and |
ysr@777 | 642 | // caused the test below to succeed. |
ysr@777 | 643 | if (permit_collection_pause) |
ysr@777 | 644 | do_collection_pause_if_appropriate(word_size); |
ysr@777 | 645 | res = humongousObjAllocate(word_size); |
ysr@777 | 646 | assert(_cur_alloc_region == NULL |
ysr@777 | 647 | || !_cur_alloc_region->isHumongous(), |
ysr@777 | 648 | "Prevent a regression of this bug."); |
ysr@777 | 649 | |
ysr@777 | 650 | } else { |
iveresov@789 | 651 | // We may have concurrent cleanup working at the time. Wait for it |
iveresov@789 | 652 | // to complete. In the future we would probably want to make the |
iveresov@789 | 653 | // concurrent cleanup truly concurrent by decoupling it from the |
iveresov@789 | 654 | // allocation. |
iveresov@789 | 655 | if (!SafepointSynchronize::is_at_safepoint()) |
iveresov@789 | 656 | wait_for_cleanup_complete(); |
ysr@777 | 657 | // If we do a collection pause, this will be reset to a non-NULL |
ysr@777 | 658 | // value. If we don't, nulling here ensures that we allocate a new |
ysr@777 | 659 | // region below. |
ysr@777 | 660 | if (_cur_alloc_region != NULL) { |
ysr@777 | 661 | // We're finished with the _cur_alloc_region. |
ysr@777 | 662 | _summary_bytes_used += _cur_alloc_region->used(); |
ysr@777 | 663 | _cur_alloc_region = NULL; |
ysr@777 | 664 | } |
ysr@777 | 665 | assert(_cur_alloc_region == NULL, "Invariant."); |
ysr@777 | 666 | // Completion of a heap region is perhaps a good point at which to do |
ysr@777 | 667 | // a collection pause. |
ysr@777 | 668 | if (permit_collection_pause) |
ysr@777 | 669 | do_collection_pause_if_appropriate(word_size); |
ysr@777 | 670 | // Make sure we have an allocation region available. |
ysr@777 | 671 | if (_cur_alloc_region == NULL) { |
ysr@777 | 672 | if (!SafepointSynchronize::is_at_safepoint()) |
ysr@777 | 673 | wait_for_cleanup_complete(); |
ysr@777 | 674 | bool next_is_young = should_set_young_locked(); |
ysr@777 | 675 | // If the next region is not young, make sure it's zero-filled. |
ysr@777 | 676 | _cur_alloc_region = newAllocRegion(word_size, !next_is_young); |
ysr@777 | 677 | if (_cur_alloc_region != NULL) { |
ysr@777 | 678 | _summary_bytes_used -= _cur_alloc_region->used(); |
ysr@777 | 679 | if (next_is_young) { |
ysr@777 | 680 | set_region_short_lived_locked(_cur_alloc_region); |
ysr@777 | 681 | allocated_young_region = _cur_alloc_region; |
ysr@777 | 682 | } |
ysr@777 | 683 | } |
ysr@777 | 684 | } |
ysr@777 | 685 | assert(_cur_alloc_region == NULL || !_cur_alloc_region->isHumongous(), |
ysr@777 | 686 | "Prevent a regression of this bug."); |
ysr@777 | 687 | |
ysr@777 | 688 | // Now retry the allocation. |
ysr@777 | 689 | if (_cur_alloc_region != NULL) { |
ysr@777 | 690 | res = _cur_alloc_region->allocate(word_size); |
ysr@777 | 691 | } |
ysr@777 | 692 | } |
ysr@777 | 693 | |
ysr@777 | 694 | // NOTE: fails frequently in PRT |
ysr@777 | 695 | assert(regions_accounted_for(), "Region leakage!"); |
ysr@777 | 696 | |
ysr@777 | 697 | if (res != NULL) { |
ysr@777 | 698 | if (!SafepointSynchronize::is_at_safepoint()) { |
ysr@777 | 699 | assert( permit_collection_pause, "invariant" ); |
ysr@777 | 700 | assert( Heap_lock->owned_by_self(), "invariant" ); |
ysr@777 | 701 | Heap_lock->unlock(); |
ysr@777 | 702 | } |
ysr@777 | 703 | |
ysr@777 | 704 | if (allocated_young_region != NULL) { |
ysr@777 | 705 | HeapRegion* hr = allocated_young_region; |
ysr@777 | 706 | HeapWord* bottom = hr->bottom(); |
ysr@777 | 707 | HeapWord* end = hr->end(); |
ysr@777 | 708 | MemRegion mr(bottom, end); |
ysr@777 | 709 | ((CardTableModRefBS*)_g1h->barrier_set())->dirty(mr); |
ysr@777 | 710 | } |
ysr@777 | 711 | } |
ysr@777 | 712 | |
ysr@777 | 713 | assert( SafepointSynchronize::is_at_safepoint() || |
ysr@777 | 714 | (res == NULL && Heap_lock->owned_by_self()) || |
ysr@777 | 715 | (res != NULL && !Heap_lock->owned_by_self()), |
ysr@777 | 716 | "post condition of the call" ); |
ysr@777 | 717 | |
ysr@777 | 718 | return res; |
ysr@777 | 719 | } |
ysr@777 | 720 | |
ysr@777 | 721 | HeapWord* |
ysr@777 | 722 | G1CollectedHeap::mem_allocate(size_t word_size, |
ysr@777 | 723 | bool is_noref, |
ysr@777 | 724 | bool is_tlab, |
ysr@777 | 725 | bool* gc_overhead_limit_was_exceeded) { |
ysr@777 | 726 | debug_only(check_for_valid_allocation_state()); |
ysr@777 | 727 | assert(no_gc_in_progress(), "Allocation during gc not allowed"); |
ysr@777 | 728 | HeapWord* result = NULL; |
ysr@777 | 729 | |
ysr@777 | 730 | // Loop until the allocation is satisified, |
ysr@777 | 731 | // or unsatisfied after GC. |
ysr@777 | 732 | for (int try_count = 1; /* return or throw */; try_count += 1) { |
ysr@777 | 733 | int gc_count_before; |
ysr@777 | 734 | { |
ysr@777 | 735 | Heap_lock->lock(); |
ysr@777 | 736 | result = attempt_allocation(word_size); |
ysr@777 | 737 | if (result != NULL) { |
ysr@777 | 738 | // attempt_allocation should have unlocked the heap lock |
ysr@777 | 739 | assert(is_in(result), "result not in heap"); |
ysr@777 | 740 | return result; |
ysr@777 | 741 | } |
ysr@777 | 742 | // Read the gc count while the heap lock is held. |
ysr@777 | 743 | gc_count_before = SharedHeap::heap()->total_collections(); |
ysr@777 | 744 | Heap_lock->unlock(); |
ysr@777 | 745 | } |
ysr@777 | 746 | |
ysr@777 | 747 | // Create the garbage collection operation... |
ysr@777 | 748 | VM_G1CollectForAllocation op(word_size, |
ysr@777 | 749 | gc_count_before); |
ysr@777 | 750 | |
ysr@777 | 751 | // ...and get the VM thread to execute it. |
ysr@777 | 752 | VMThread::execute(&op); |
ysr@777 | 753 | if (op.prologue_succeeded()) { |
ysr@777 | 754 | result = op.result(); |
ysr@777 | 755 | assert(result == NULL || is_in(result), "result not in heap"); |
ysr@777 | 756 | return result; |
ysr@777 | 757 | } |
ysr@777 | 758 | |
ysr@777 | 759 | // Give a warning if we seem to be looping forever. |
ysr@777 | 760 | if ((QueuedAllocationWarningCount > 0) && |
ysr@777 | 761 | (try_count % QueuedAllocationWarningCount == 0)) { |
ysr@777 | 762 | warning("G1CollectedHeap::mem_allocate_work retries %d times", |
ysr@777 | 763 | try_count); |
ysr@777 | 764 | } |
ysr@777 | 765 | } |
ysr@777 | 766 | } |
ysr@777 | 767 | |
ysr@777 | 768 | void G1CollectedHeap::abandon_cur_alloc_region() { |
ysr@777 | 769 | if (_cur_alloc_region != NULL) { |
ysr@777 | 770 | // We're finished with the _cur_alloc_region. |
ysr@777 | 771 | if (_cur_alloc_region->is_empty()) { |
ysr@777 | 772 | _free_regions++; |
ysr@777 | 773 | free_region(_cur_alloc_region); |
ysr@777 | 774 | } else { |
ysr@777 | 775 | _summary_bytes_used += _cur_alloc_region->used(); |
ysr@777 | 776 | } |
ysr@777 | 777 | _cur_alloc_region = NULL; |
ysr@777 | 778 | } |
ysr@777 | 779 | } |
ysr@777 | 780 | |
ysr@777 | 781 | class PostMCRemSetClearClosure: public HeapRegionClosure { |
ysr@777 | 782 | ModRefBarrierSet* _mr_bs; |
ysr@777 | 783 | public: |
ysr@777 | 784 | PostMCRemSetClearClosure(ModRefBarrierSet* mr_bs) : _mr_bs(mr_bs) {} |
ysr@777 | 785 | bool doHeapRegion(HeapRegion* r) { |
ysr@777 | 786 | r->reset_gc_time_stamp(); |
ysr@777 | 787 | if (r->continuesHumongous()) |
ysr@777 | 788 | return false; |
ysr@777 | 789 | HeapRegionRemSet* hrrs = r->rem_set(); |
ysr@777 | 790 | if (hrrs != NULL) hrrs->clear(); |
ysr@777 | 791 | // You might think here that we could clear just the cards |
ysr@777 | 792 | // corresponding to the used region. But no: if we leave a dirty card |
ysr@777 | 793 | // in a region we might allocate into, then it would prevent that card |
ysr@777 | 794 | // from being enqueued, and cause it to be missed. |
ysr@777 | 795 | // Re: the performance cost: we shouldn't be doing full GC anyway! |
ysr@777 | 796 | _mr_bs->clear(MemRegion(r->bottom(), r->end())); |
ysr@777 | 797 | return false; |
ysr@777 | 798 | } |
ysr@777 | 799 | }; |
ysr@777 | 800 | |
ysr@777 | 801 | |
ysr@777 | 802 | class PostMCRemSetInvalidateClosure: public HeapRegionClosure { |
ysr@777 | 803 | ModRefBarrierSet* _mr_bs; |
ysr@777 | 804 | public: |
ysr@777 | 805 | PostMCRemSetInvalidateClosure(ModRefBarrierSet* mr_bs) : _mr_bs(mr_bs) {} |
ysr@777 | 806 | bool doHeapRegion(HeapRegion* r) { |
ysr@777 | 807 | if (r->continuesHumongous()) return false; |
ysr@777 | 808 | if (r->used_region().word_size() != 0) { |
ysr@777 | 809 | _mr_bs->invalidate(r->used_region(), true /*whole heap*/); |
ysr@777 | 810 | } |
ysr@777 | 811 | return false; |
ysr@777 | 812 | } |
ysr@777 | 813 | }; |
ysr@777 | 814 | |
ysr@777 | 815 | void G1CollectedHeap::do_collection(bool full, bool clear_all_soft_refs, |
ysr@777 | 816 | size_t word_size) { |
ysr@777 | 817 | ResourceMark rm; |
ysr@777 | 818 | |
ysr@777 | 819 | if (full && DisableExplicitGC) { |
ysr@777 | 820 | gclog_or_tty->print("\n\n\nDisabling Explicit GC\n\n\n"); |
ysr@777 | 821 | return; |
ysr@777 | 822 | } |
ysr@777 | 823 | |
ysr@777 | 824 | assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); |
ysr@777 | 825 | assert(Thread::current() == VMThread::vm_thread(), "should be in vm thread"); |
ysr@777 | 826 | |
ysr@777 | 827 | if (GC_locker::is_active()) { |
ysr@777 | 828 | return; // GC is disabled (e.g. JNI GetXXXCritical operation) |
ysr@777 | 829 | } |
ysr@777 | 830 | |
ysr@777 | 831 | { |
ysr@777 | 832 | IsGCActiveMark x; |
ysr@777 | 833 | |
ysr@777 | 834 | // Timing |
ysr@777 | 835 | gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); |
ysr@777 | 836 | TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); |
ysr@777 | 837 | TraceTime t(full ? "Full GC (System.gc())" : "Full GC", PrintGC, true, gclog_or_tty); |
ysr@777 | 838 | |
ysr@777 | 839 | double start = os::elapsedTime(); |
ysr@777 | 840 | GCOverheadReporter::recordSTWStart(start); |
ysr@777 | 841 | g1_policy()->record_full_collection_start(); |
ysr@777 | 842 | |
ysr@777 | 843 | gc_prologue(true); |
ysr@777 | 844 | increment_total_collections(); |
ysr@777 | 845 | |
ysr@777 | 846 | size_t g1h_prev_used = used(); |
ysr@777 | 847 | assert(used() == recalculate_used(), "Should be equal"); |
ysr@777 | 848 | |
ysr@777 | 849 | if (VerifyBeforeGC && total_collections() >= VerifyGCStartAt) { |
ysr@777 | 850 | HandleMark hm; // Discard invalid handles created during verification |
ysr@777 | 851 | prepare_for_verify(); |
ysr@777 | 852 | gclog_or_tty->print(" VerifyBeforeGC:"); |
ysr@777 | 853 | Universe::verify(true); |
ysr@777 | 854 | } |
ysr@777 | 855 | assert(regions_accounted_for(), "Region leakage!"); |
ysr@777 | 856 | |
ysr@777 | 857 | COMPILER2_PRESENT(DerivedPointerTable::clear()); |
ysr@777 | 858 | |
ysr@777 | 859 | // We want to discover references, but not process them yet. |
ysr@777 | 860 | // This mode is disabled in |
ysr@777 | 861 | // instanceRefKlass::process_discovered_references if the |
ysr@777 | 862 | // generation does some collection work, or |
ysr@777 | 863 | // instanceRefKlass::enqueue_discovered_references if the |
ysr@777 | 864 | // generation returns without doing any work. |
ysr@777 | 865 | ref_processor()->disable_discovery(); |
ysr@777 | 866 | ref_processor()->abandon_partial_discovery(); |
ysr@777 | 867 | ref_processor()->verify_no_references_recorded(); |
ysr@777 | 868 | |
ysr@777 | 869 | // Abandon current iterations of concurrent marking and concurrent |
ysr@777 | 870 | // refinement, if any are in progress. |
ysr@777 | 871 | concurrent_mark()->abort(); |
ysr@777 | 872 | |
ysr@777 | 873 | // Make sure we'll choose a new allocation region afterwards. |
ysr@777 | 874 | abandon_cur_alloc_region(); |
ysr@777 | 875 | assert(_cur_alloc_region == NULL, "Invariant."); |
ysr@777 | 876 | g1_rem_set()->as_HRInto_G1RemSet()->cleanupHRRS(); |
ysr@777 | 877 | tear_down_region_lists(); |
ysr@777 | 878 | set_used_regions_to_need_zero_fill(); |
ysr@777 | 879 | if (g1_policy()->in_young_gc_mode()) { |
ysr@777 | 880 | empty_young_list(); |
ysr@777 | 881 | g1_policy()->set_full_young_gcs(true); |
ysr@777 | 882 | } |
ysr@777 | 883 | |
ysr@777 | 884 | // Temporarily make reference _discovery_ single threaded (non-MT). |
ysr@777 | 885 | ReferenceProcessorMTMutator rp_disc_ser(ref_processor(), false); |
ysr@777 | 886 | |
ysr@777 | 887 | // Temporarily make refs discovery atomic |
ysr@777 | 888 | ReferenceProcessorAtomicMutator rp_disc_atomic(ref_processor(), true); |
ysr@777 | 889 | |
ysr@777 | 890 | // Temporarily clear _is_alive_non_header |
ysr@777 | 891 | ReferenceProcessorIsAliveMutator rp_is_alive_null(ref_processor(), NULL); |
ysr@777 | 892 | |
ysr@777 | 893 | ref_processor()->enable_discovery(); |
ysr@892 | 894 | ref_processor()->setup_policy(clear_all_soft_refs); |
ysr@777 | 895 | |
ysr@777 | 896 | // Do collection work |
ysr@777 | 897 | { |
ysr@777 | 898 | HandleMark hm; // Discard invalid handles created during gc |
ysr@777 | 899 | G1MarkSweep::invoke_at_safepoint(ref_processor(), clear_all_soft_refs); |
ysr@777 | 900 | } |
ysr@777 | 901 | // Because freeing humongous regions may have added some unclean |
ysr@777 | 902 | // regions, it is necessary to tear down again before rebuilding. |
ysr@777 | 903 | tear_down_region_lists(); |
ysr@777 | 904 | rebuild_region_lists(); |
ysr@777 | 905 | |
ysr@777 | 906 | _summary_bytes_used = recalculate_used(); |
ysr@777 | 907 | |
ysr@777 | 908 | ref_processor()->enqueue_discovered_references(); |
ysr@777 | 909 | |
ysr@777 | 910 | COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); |
ysr@777 | 911 | |
ysr@777 | 912 | if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) { |
ysr@777 | 913 | HandleMark hm; // Discard invalid handles created during verification |
ysr@777 | 914 | gclog_or_tty->print(" VerifyAfterGC:"); |
ysr@777 | 915 | Universe::verify(false); |
ysr@777 | 916 | } |
ysr@777 | 917 | NOT_PRODUCT(ref_processor()->verify_no_references_recorded()); |
ysr@777 | 918 | |
ysr@777 | 919 | reset_gc_time_stamp(); |
ysr@777 | 920 | // Since everything potentially moved, we will clear all remembered |
ysr@777 | 921 | // sets, and clear all cards. Later we will also cards in the used |
ysr@777 | 922 | // portion of the heap after the resizing (which could be a shrinking.) |
ysr@777 | 923 | // We will also reset the GC time stamps of the regions. |
ysr@777 | 924 | PostMCRemSetClearClosure rs_clear(mr_bs()); |
ysr@777 | 925 | heap_region_iterate(&rs_clear); |
ysr@777 | 926 | |
ysr@777 | 927 | // Resize the heap if necessary. |
ysr@777 | 928 | resize_if_necessary_after_full_collection(full ? 0 : word_size); |
ysr@777 | 929 | |
ysr@777 | 930 | // Since everything potentially moved, we will clear all remembered |
ysr@777 | 931 | // sets, but also dirty all cards corresponding to used regions. |
ysr@777 | 932 | PostMCRemSetInvalidateClosure rs_invalidate(mr_bs()); |
ysr@777 | 933 | heap_region_iterate(&rs_invalidate); |
ysr@777 | 934 | if (_cg1r->use_cache()) { |
ysr@777 | 935 | _cg1r->clear_and_record_card_counts(); |
ysr@777 | 936 | _cg1r->clear_hot_cache(); |
ysr@777 | 937 | } |
ysr@777 | 938 | |
ysr@777 | 939 | if (PrintGC) { |
ysr@777 | 940 | print_size_transition(gclog_or_tty, g1h_prev_used, used(), capacity()); |
ysr@777 | 941 | } |
ysr@777 | 942 | |
ysr@777 | 943 | if (true) { // FIXME |
ysr@777 | 944 | // Ask the permanent generation to adjust size for full collections |
ysr@777 | 945 | perm()->compute_new_size(); |
ysr@777 | 946 | } |
ysr@777 | 947 | |
ysr@777 | 948 | double end = os::elapsedTime(); |
ysr@777 | 949 | GCOverheadReporter::recordSTWEnd(end); |
ysr@777 | 950 | g1_policy()->record_full_collection_end(); |
ysr@777 | 951 | |
jmasa@981 | 952 | #ifdef TRACESPINNING |
jmasa@981 | 953 | ParallelTaskTerminator::print_termination_counts(); |
jmasa@981 | 954 | #endif |
jmasa@981 | 955 | |
ysr@777 | 956 | gc_epilogue(true); |
ysr@777 | 957 | |
ysr@777 | 958 | // Abandon concurrent refinement. This must happen last: in the |
ysr@777 | 959 | // dirty-card logging system, some cards may be dirty by weak-ref |
ysr@777 | 960 | // processing, and may be enqueued. But the whole card table is |
ysr@777 | 961 | // dirtied, so this should abandon those logs, and set "do_traversal" |
ysr@777 | 962 | // to true. |
ysr@777 | 963 | concurrent_g1_refine()->set_pya_restart(); |
ysr@777 | 964 | |
ysr@777 | 965 | assert(regions_accounted_for(), "Region leakage!"); |
ysr@777 | 966 | } |
ysr@777 | 967 | |
ysr@777 | 968 | if (g1_policy()->in_young_gc_mode()) { |
ysr@777 | 969 | _young_list->reset_sampled_info(); |
ysr@777 | 970 | assert( check_young_list_empty(false, false), |
ysr@777 | 971 | "young list should be empty at this point"); |
ysr@777 | 972 | } |
ysr@777 | 973 | } |
ysr@777 | 974 | |
ysr@777 | 975 | void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) { |
ysr@777 | 976 | do_collection(true, clear_all_soft_refs, 0); |
ysr@777 | 977 | } |
ysr@777 | 978 | |
ysr@777 | 979 | // This code is mostly copied from TenuredGeneration. |
ysr@777 | 980 | void |
ysr@777 | 981 | G1CollectedHeap:: |
ysr@777 | 982 | resize_if_necessary_after_full_collection(size_t word_size) { |
ysr@777 | 983 | assert(MinHeapFreeRatio <= MaxHeapFreeRatio, "sanity check"); |
ysr@777 | 984 | |
ysr@777 | 985 | // Include the current allocation, if any, and bytes that will be |
ysr@777 | 986 | // pre-allocated to support collections, as "used". |
ysr@777 | 987 | const size_t used_after_gc = used(); |
ysr@777 | 988 | const size_t capacity_after_gc = capacity(); |
ysr@777 | 989 | const size_t free_after_gc = capacity_after_gc - used_after_gc; |
ysr@777 | 990 | |
ysr@777 | 991 | // We don't have floating point command-line arguments |
ysr@777 | 992 | const double minimum_free_percentage = (double) MinHeapFreeRatio / 100; |
ysr@777 | 993 | const double maximum_used_percentage = 1.0 - minimum_free_percentage; |
ysr@777 | 994 | const double maximum_free_percentage = (double) MaxHeapFreeRatio / 100; |
ysr@777 | 995 | const double minimum_used_percentage = 1.0 - maximum_free_percentage; |
ysr@777 | 996 | |
ysr@777 | 997 | size_t minimum_desired_capacity = (size_t) (used_after_gc / maximum_used_percentage); |
ysr@777 | 998 | size_t maximum_desired_capacity = (size_t) (used_after_gc / minimum_used_percentage); |
ysr@777 | 999 | |
ysr@777 | 1000 | // Don't shrink less than the initial size. |
ysr@777 | 1001 | minimum_desired_capacity = |
ysr@777 | 1002 | MAX2(minimum_desired_capacity, |
ysr@777 | 1003 | collector_policy()->initial_heap_byte_size()); |
ysr@777 | 1004 | maximum_desired_capacity = |
ysr@777 | 1005 | MAX2(maximum_desired_capacity, |
ysr@777 | 1006 | collector_policy()->initial_heap_byte_size()); |
ysr@777 | 1007 | |
ysr@777 | 1008 | // We are failing here because minimum_desired_capacity is |
ysr@777 | 1009 | assert(used_after_gc <= minimum_desired_capacity, "sanity check"); |
ysr@777 | 1010 | assert(minimum_desired_capacity <= maximum_desired_capacity, "sanity check"); |
ysr@777 | 1011 | |
ysr@777 | 1012 | if (PrintGC && Verbose) { |
ysr@777 | 1013 | const double free_percentage = ((double)free_after_gc) / capacity(); |
ysr@777 | 1014 | gclog_or_tty->print_cr("Computing new size after full GC "); |
ysr@777 | 1015 | gclog_or_tty->print_cr(" " |
ysr@777 | 1016 | " minimum_free_percentage: %6.2f", |
ysr@777 | 1017 | minimum_free_percentage); |
ysr@777 | 1018 | gclog_or_tty->print_cr(" " |
ysr@777 | 1019 | " maximum_free_percentage: %6.2f", |
ysr@777 | 1020 | maximum_free_percentage); |
ysr@777 | 1021 | gclog_or_tty->print_cr(" " |
ysr@777 | 1022 | " capacity: %6.1fK" |
ysr@777 | 1023 | " minimum_desired_capacity: %6.1fK" |
ysr@777 | 1024 | " maximum_desired_capacity: %6.1fK", |
ysr@777 | 1025 | capacity() / (double) K, |
ysr@777 | 1026 | minimum_desired_capacity / (double) K, |
ysr@777 | 1027 | maximum_desired_capacity / (double) K); |
ysr@777 | 1028 | gclog_or_tty->print_cr(" " |
ysr@777 | 1029 | " free_after_gc : %6.1fK" |
ysr@777 | 1030 | " used_after_gc : %6.1fK", |
ysr@777 | 1031 | free_after_gc / (double) K, |
ysr@777 | 1032 | used_after_gc / (double) K); |
ysr@777 | 1033 | gclog_or_tty->print_cr(" " |
ysr@777 | 1034 | " free_percentage: %6.2f", |
ysr@777 | 1035 | free_percentage); |
ysr@777 | 1036 | } |
ysr@777 | 1037 | if (capacity() < minimum_desired_capacity) { |
ysr@777 | 1038 | // Don't expand unless it's significant |
ysr@777 | 1039 | size_t expand_bytes = minimum_desired_capacity - capacity_after_gc; |
ysr@777 | 1040 | expand(expand_bytes); |
ysr@777 | 1041 | if (PrintGC && Verbose) { |
ysr@777 | 1042 | gclog_or_tty->print_cr(" expanding:" |
ysr@777 | 1043 | " minimum_desired_capacity: %6.1fK" |
ysr@777 | 1044 | " expand_bytes: %6.1fK", |
ysr@777 | 1045 | minimum_desired_capacity / (double) K, |
ysr@777 | 1046 | expand_bytes / (double) K); |
ysr@777 | 1047 | } |
ysr@777 | 1048 | |
ysr@777 | 1049 | // No expansion, now see if we want to shrink |
ysr@777 | 1050 | } else if (capacity() > maximum_desired_capacity) { |
ysr@777 | 1051 | // Capacity too large, compute shrinking size |
ysr@777 | 1052 | size_t shrink_bytes = capacity_after_gc - maximum_desired_capacity; |
ysr@777 | 1053 | shrink(shrink_bytes); |
ysr@777 | 1054 | if (PrintGC && Verbose) { |
ysr@777 | 1055 | gclog_or_tty->print_cr(" " |
ysr@777 | 1056 | " shrinking:" |
ysr@777 | 1057 | " initSize: %.1fK" |
ysr@777 | 1058 | " maximum_desired_capacity: %.1fK", |
ysr@777 | 1059 | collector_policy()->initial_heap_byte_size() / (double) K, |
ysr@777 | 1060 | maximum_desired_capacity / (double) K); |
ysr@777 | 1061 | gclog_or_tty->print_cr(" " |
ysr@777 | 1062 | " shrink_bytes: %.1fK", |
ysr@777 | 1063 | shrink_bytes / (double) K); |
ysr@777 | 1064 | } |
ysr@777 | 1065 | } |
ysr@777 | 1066 | } |
ysr@777 | 1067 | |
ysr@777 | 1068 | |
ysr@777 | 1069 | HeapWord* |
ysr@777 | 1070 | G1CollectedHeap::satisfy_failed_allocation(size_t word_size) { |
ysr@777 | 1071 | HeapWord* result = NULL; |
ysr@777 | 1072 | |
ysr@777 | 1073 | // In a G1 heap, we're supposed to keep allocation from failing by |
ysr@777 | 1074 | // incremental pauses. Therefore, at least for now, we'll favor |
ysr@777 | 1075 | // expansion over collection. (This might change in the future if we can |
ysr@777 | 1076 | // do something smarter than full collection to satisfy a failed alloc.) |
ysr@777 | 1077 | |
ysr@777 | 1078 | result = expand_and_allocate(word_size); |
ysr@777 | 1079 | if (result != NULL) { |
ysr@777 | 1080 | assert(is_in(result), "result not in heap"); |
ysr@777 | 1081 | return result; |
ysr@777 | 1082 | } |
ysr@777 | 1083 | |
ysr@777 | 1084 | // OK, I guess we have to try collection. |
ysr@777 | 1085 | |
ysr@777 | 1086 | do_collection(false, false, word_size); |
ysr@777 | 1087 | |
ysr@777 | 1088 | result = attempt_allocation(word_size, /*permit_collection_pause*/false); |
ysr@777 | 1089 | |
ysr@777 | 1090 | if (result != NULL) { |
ysr@777 | 1091 | assert(is_in(result), "result not in heap"); |
ysr@777 | 1092 | return result; |
ysr@777 | 1093 | } |
ysr@777 | 1094 | |
ysr@777 | 1095 | // Try collecting soft references. |
ysr@777 | 1096 | do_collection(false, true, word_size); |
ysr@777 | 1097 | result = attempt_allocation(word_size, /*permit_collection_pause*/false); |
ysr@777 | 1098 | if (result != NULL) { |
ysr@777 | 1099 | assert(is_in(result), "result not in heap"); |
ysr@777 | 1100 | return result; |
ysr@777 | 1101 | } |
ysr@777 | 1102 | |
ysr@777 | 1103 | // What else? We might try synchronous finalization later. If the total |
ysr@777 | 1104 | // space available is large enough for the allocation, then a more |
ysr@777 | 1105 | // complete compaction phase than we've tried so far might be |
ysr@777 | 1106 | // appropriate. |
ysr@777 | 1107 | return NULL; |
ysr@777 | 1108 | } |
ysr@777 | 1109 | |
ysr@777 | 1110 | // Attempting to expand the heap sufficiently |
ysr@777 | 1111 | // to support an allocation of the given "word_size". If |
ysr@777 | 1112 | // successful, perform the allocation and return the address of the |
ysr@777 | 1113 | // allocated block, or else "NULL". |
ysr@777 | 1114 | |
ysr@777 | 1115 | HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size) { |
ysr@777 | 1116 | size_t expand_bytes = word_size * HeapWordSize; |
ysr@777 | 1117 | if (expand_bytes < MinHeapDeltaBytes) { |
ysr@777 | 1118 | expand_bytes = MinHeapDeltaBytes; |
ysr@777 | 1119 | } |
ysr@777 | 1120 | expand(expand_bytes); |
ysr@777 | 1121 | assert(regions_accounted_for(), "Region leakage!"); |
ysr@777 | 1122 | HeapWord* result = attempt_allocation(word_size, false /* permit_collection_pause */); |
ysr@777 | 1123 | return result; |
ysr@777 | 1124 | } |
ysr@777 | 1125 | |
ysr@777 | 1126 | size_t G1CollectedHeap::free_region_if_totally_empty(HeapRegion* hr) { |
ysr@777 | 1127 | size_t pre_used = 0; |
ysr@777 | 1128 | size_t cleared_h_regions = 0; |
ysr@777 | 1129 | size_t freed_regions = 0; |
ysr@777 | 1130 | UncleanRegionList local_list; |
ysr@777 | 1131 | free_region_if_totally_empty_work(hr, pre_used, cleared_h_regions, |
ysr@777 | 1132 | freed_regions, &local_list); |
ysr@777 | 1133 | |
ysr@777 | 1134 | finish_free_region_work(pre_used, cleared_h_regions, freed_regions, |
ysr@777 | 1135 | &local_list); |
ysr@777 | 1136 | return pre_used; |
ysr@777 | 1137 | } |
ysr@777 | 1138 | |
ysr@777 | 1139 | void |
ysr@777 | 1140 | G1CollectedHeap::free_region_if_totally_empty_work(HeapRegion* hr, |
ysr@777 | 1141 | size_t& pre_used, |
ysr@777 | 1142 | size_t& cleared_h, |
ysr@777 | 1143 | size_t& freed_regions, |
ysr@777 | 1144 | UncleanRegionList* list, |
ysr@777 | 1145 | bool par) { |
ysr@777 | 1146 | assert(!hr->continuesHumongous(), "should have filtered these out"); |
ysr@777 | 1147 | size_t res = 0; |
ysr@777 | 1148 | if (!hr->popular() && hr->used() > 0 && hr->garbage_bytes() == hr->used()) { |
ysr@777 | 1149 | if (!hr->is_young()) { |
ysr@777 | 1150 | if (G1PolicyVerbose > 0) |
ysr@777 | 1151 | gclog_or_tty->print_cr("Freeing empty region "PTR_FORMAT "(" SIZE_FORMAT " bytes)" |
ysr@777 | 1152 | " during cleanup", hr, hr->used()); |
ysr@777 | 1153 | free_region_work(hr, pre_used, cleared_h, freed_regions, list, par); |
ysr@777 | 1154 | } |
ysr@777 | 1155 | } |
ysr@777 | 1156 | } |
ysr@777 | 1157 | |
ysr@777 | 1158 | // FIXME: both this and shrink could probably be more efficient by |
ysr@777 | 1159 | // doing one "VirtualSpace::expand_by" call rather than several. |
ysr@777 | 1160 | void G1CollectedHeap::expand(size_t expand_bytes) { |
ysr@777 | 1161 | size_t old_mem_size = _g1_storage.committed_size(); |
ysr@777 | 1162 | // We expand by a minimum of 1K. |
ysr@777 | 1163 | expand_bytes = MAX2(expand_bytes, (size_t)K); |
ysr@777 | 1164 | size_t aligned_expand_bytes = |
ysr@777 | 1165 | ReservedSpace::page_align_size_up(expand_bytes); |
ysr@777 | 1166 | aligned_expand_bytes = align_size_up(aligned_expand_bytes, |
ysr@777 | 1167 | HeapRegion::GrainBytes); |
ysr@777 | 1168 | expand_bytes = aligned_expand_bytes; |
ysr@777 | 1169 | while (expand_bytes > 0) { |
ysr@777 | 1170 | HeapWord* base = (HeapWord*)_g1_storage.high(); |
ysr@777 | 1171 | // Commit more storage. |
ysr@777 | 1172 | bool successful = _g1_storage.expand_by(HeapRegion::GrainBytes); |
ysr@777 | 1173 | if (!successful) { |
ysr@777 | 1174 | expand_bytes = 0; |
ysr@777 | 1175 | } else { |
ysr@777 | 1176 | expand_bytes -= HeapRegion::GrainBytes; |
ysr@777 | 1177 | // Expand the committed region. |
ysr@777 | 1178 | HeapWord* high = (HeapWord*) _g1_storage.high(); |
ysr@777 | 1179 | _g1_committed.set_end(high); |
ysr@777 | 1180 | // Create a new HeapRegion. |
ysr@777 | 1181 | MemRegion mr(base, high); |
ysr@777 | 1182 | bool is_zeroed = !_g1_max_committed.contains(base); |
ysr@777 | 1183 | HeapRegion* hr = new HeapRegion(_bot_shared, mr, is_zeroed); |
ysr@777 | 1184 | |
ysr@777 | 1185 | // Now update max_committed if necessary. |
ysr@777 | 1186 | _g1_max_committed.set_end(MAX2(_g1_max_committed.end(), high)); |
ysr@777 | 1187 | |
ysr@777 | 1188 | // Add it to the HeapRegionSeq. |
ysr@777 | 1189 | _hrs->insert(hr); |
ysr@777 | 1190 | // Set the zero-fill state, according to whether it's already |
ysr@777 | 1191 | // zeroed. |
ysr@777 | 1192 | { |
ysr@777 | 1193 | MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); |
ysr@777 | 1194 | if (is_zeroed) { |
ysr@777 | 1195 | hr->set_zero_fill_complete(); |
ysr@777 | 1196 | put_free_region_on_list_locked(hr); |
ysr@777 | 1197 | } else { |
ysr@777 | 1198 | hr->set_zero_fill_needed(); |
ysr@777 | 1199 | put_region_on_unclean_list_locked(hr); |
ysr@777 | 1200 | } |
ysr@777 | 1201 | } |
ysr@777 | 1202 | _free_regions++; |
ysr@777 | 1203 | // And we used up an expansion region to create it. |
ysr@777 | 1204 | _expansion_regions--; |
ysr@777 | 1205 | // Tell the cardtable about it. |
ysr@777 | 1206 | Universe::heap()->barrier_set()->resize_covered_region(_g1_committed); |
ysr@777 | 1207 | // And the offset table as well. |
ysr@777 | 1208 | _bot_shared->resize(_g1_committed.word_size()); |
ysr@777 | 1209 | } |
ysr@777 | 1210 | } |
ysr@777 | 1211 | if (Verbose && PrintGC) { |
ysr@777 | 1212 | size_t new_mem_size = _g1_storage.committed_size(); |
ysr@777 | 1213 | gclog_or_tty->print_cr("Expanding garbage-first heap from %ldK by %ldK to %ldK", |
ysr@777 | 1214 | old_mem_size/K, aligned_expand_bytes/K, |
ysr@777 | 1215 | new_mem_size/K); |
ysr@777 | 1216 | } |
ysr@777 | 1217 | } |
ysr@777 | 1218 | |
ysr@777 | 1219 | void G1CollectedHeap::shrink_helper(size_t shrink_bytes) |
ysr@777 | 1220 | { |
ysr@777 | 1221 | size_t old_mem_size = _g1_storage.committed_size(); |
ysr@777 | 1222 | size_t aligned_shrink_bytes = |
ysr@777 | 1223 | ReservedSpace::page_align_size_down(shrink_bytes); |
ysr@777 | 1224 | aligned_shrink_bytes = align_size_down(aligned_shrink_bytes, |
ysr@777 | 1225 | HeapRegion::GrainBytes); |
ysr@777 | 1226 | size_t num_regions_deleted = 0; |
ysr@777 | 1227 | MemRegion mr = _hrs->shrink_by(aligned_shrink_bytes, num_regions_deleted); |
ysr@777 | 1228 | |
ysr@777 | 1229 | assert(mr.end() == (HeapWord*)_g1_storage.high(), "Bad shrink!"); |
ysr@777 | 1230 | if (mr.byte_size() > 0) |
ysr@777 | 1231 | _g1_storage.shrink_by(mr.byte_size()); |
ysr@777 | 1232 | assert(mr.start() == (HeapWord*)_g1_storage.high(), "Bad shrink!"); |
ysr@777 | 1233 | |
ysr@777 | 1234 | _g1_committed.set_end(mr.start()); |
ysr@777 | 1235 | _free_regions -= num_regions_deleted; |
ysr@777 | 1236 | _expansion_regions += num_regions_deleted; |
ysr@777 | 1237 | |
ysr@777 | 1238 | // Tell the cardtable about it. |
ysr@777 | 1239 | Universe::heap()->barrier_set()->resize_covered_region(_g1_committed); |
ysr@777 | 1240 | |
ysr@777 | 1241 | // And the offset table as well. |
ysr@777 | 1242 | _bot_shared->resize(_g1_committed.word_size()); |
ysr@777 | 1243 | |
ysr@777 | 1244 | HeapRegionRemSet::shrink_heap(n_regions()); |
ysr@777 | 1245 | |
ysr@777 | 1246 | if (Verbose && PrintGC) { |
ysr@777 | 1247 | size_t new_mem_size = _g1_storage.committed_size(); |
ysr@777 | 1248 | gclog_or_tty->print_cr("Shrinking garbage-first heap from %ldK by %ldK to %ldK", |
ysr@777 | 1249 | old_mem_size/K, aligned_shrink_bytes/K, |
ysr@777 | 1250 | new_mem_size/K); |
ysr@777 | 1251 | } |
ysr@777 | 1252 | } |
ysr@777 | 1253 | |
ysr@777 | 1254 | void G1CollectedHeap::shrink(size_t shrink_bytes) { |
ysr@777 | 1255 | release_gc_alloc_regions(); |
ysr@777 | 1256 | tear_down_region_lists(); // We will rebuild them in a moment. |
ysr@777 | 1257 | shrink_helper(shrink_bytes); |
ysr@777 | 1258 | rebuild_region_lists(); |
ysr@777 | 1259 | } |
ysr@777 | 1260 | |
ysr@777 | 1261 | // Public methods. |
ysr@777 | 1262 | |
ysr@777 | 1263 | #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away |
ysr@777 | 1264 | #pragma warning( disable:4355 ) // 'this' : used in base member initializer list |
ysr@777 | 1265 | #endif // _MSC_VER |
ysr@777 | 1266 | |
ysr@777 | 1267 | |
ysr@777 | 1268 | G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) : |
ysr@777 | 1269 | SharedHeap(policy_), |
ysr@777 | 1270 | _g1_policy(policy_), |
ysr@777 | 1271 | _ref_processor(NULL), |
ysr@777 | 1272 | _process_strong_tasks(new SubTasksDone(G1H_PS_NumElements)), |
ysr@777 | 1273 | _bot_shared(NULL), |
ysr@777 | 1274 | _par_alloc_during_gc_lock(Mutex::leaf, "par alloc during GC lock"), |
ysr@777 | 1275 | _objs_with_preserved_marks(NULL), _preserved_marks_of_objs(NULL), |
ysr@777 | 1276 | _evac_failure_scan_stack(NULL) , |
ysr@777 | 1277 | _mark_in_progress(false), |
ysr@777 | 1278 | _cg1r(NULL), _czft(NULL), _summary_bytes_used(0), |
ysr@777 | 1279 | _cur_alloc_region(NULL), |
ysr@777 | 1280 | _refine_cte_cl(NULL), |
ysr@777 | 1281 | _free_region_list(NULL), _free_region_list_size(0), |
ysr@777 | 1282 | _free_regions(0), |
ysr@777 | 1283 | _popular_object_boundary(NULL), |
ysr@777 | 1284 | _cur_pop_hr_index(0), |
ysr@777 | 1285 | _popular_regions_to_be_evacuated(NULL), |
ysr@777 | 1286 | _pop_obj_rc_at_copy(), |
ysr@777 | 1287 | _full_collection(false), |
ysr@777 | 1288 | _unclean_region_list(), |
ysr@777 | 1289 | _unclean_regions_coming(false), |
ysr@777 | 1290 | _young_list(new YoungList(this)), |
ysr@777 | 1291 | _gc_time_stamp(0), |
tonyp@961 | 1292 | _surviving_young_words(NULL), |
tonyp@961 | 1293 | _in_cset_fast_test(NULL), |
tonyp@961 | 1294 | _in_cset_fast_test_base(NULL) |
ysr@777 | 1295 | { |
ysr@777 | 1296 | _g1h = this; // To catch bugs. |
ysr@777 | 1297 | if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) { |
ysr@777 | 1298 | vm_exit_during_initialization("Failed necessary allocation."); |
ysr@777 | 1299 | } |
ysr@777 | 1300 | int n_queues = MAX2((int)ParallelGCThreads, 1); |
ysr@777 | 1301 | _task_queues = new RefToScanQueueSet(n_queues); |
ysr@777 | 1302 | |
ysr@777 | 1303 | int n_rem_sets = HeapRegionRemSet::num_par_rem_sets(); |
ysr@777 | 1304 | assert(n_rem_sets > 0, "Invariant."); |
ysr@777 | 1305 | |
ysr@777 | 1306 | HeapRegionRemSetIterator** iter_arr = |
ysr@777 | 1307 | NEW_C_HEAP_ARRAY(HeapRegionRemSetIterator*, n_queues); |
ysr@777 | 1308 | for (int i = 0; i < n_queues; i++) { |
ysr@777 | 1309 | iter_arr[i] = new HeapRegionRemSetIterator(); |
ysr@777 | 1310 | } |
ysr@777 | 1311 | _rem_set_iterator = iter_arr; |
ysr@777 | 1312 | |
ysr@777 | 1313 | for (int i = 0; i < n_queues; i++) { |
ysr@777 | 1314 | RefToScanQueue* q = new RefToScanQueue(); |
ysr@777 | 1315 | q->initialize(); |
ysr@777 | 1316 | _task_queues->register_queue(i, q); |
ysr@777 | 1317 | } |
ysr@777 | 1318 | |
ysr@777 | 1319 | for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { |
ysr@777 | 1320 | _gc_alloc_regions[ap] = NULL; |
ysr@777 | 1321 | _gc_alloc_region_counts[ap] = 0; |
ysr@777 | 1322 | } |
ysr@777 | 1323 | guarantee(_task_queues != NULL, "task_queues allocation failure."); |
ysr@777 | 1324 | } |
ysr@777 | 1325 | |
ysr@777 | 1326 | jint G1CollectedHeap::initialize() { |
ysr@777 | 1327 | os::enable_vtime(); |
ysr@777 | 1328 | |
ysr@777 | 1329 | // Necessary to satisfy locking discipline assertions. |
ysr@777 | 1330 | |
ysr@777 | 1331 | MutexLocker x(Heap_lock); |
ysr@777 | 1332 | |
ysr@777 | 1333 | // While there are no constraints in the GC code that HeapWordSize |
ysr@777 | 1334 | // be any particular value, there are multiple other areas in the |
ysr@777 | 1335 | // system which believe this to be true (e.g. oop->object_size in some |
ysr@777 | 1336 | // cases incorrectly returns the size in wordSize units rather than |
ysr@777 | 1337 | // HeapWordSize). |
ysr@777 | 1338 | guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize"); |
ysr@777 | 1339 | |
ysr@777 | 1340 | size_t init_byte_size = collector_policy()->initial_heap_byte_size(); |
ysr@777 | 1341 | size_t max_byte_size = collector_policy()->max_heap_byte_size(); |
ysr@777 | 1342 | |
ysr@777 | 1343 | // Ensure that the sizes are properly aligned. |
ysr@777 | 1344 | Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap"); |
ysr@777 | 1345 | Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap"); |
ysr@777 | 1346 | |
ysr@777 | 1347 | // We allocate this in any case, but only do no work if the command line |
ysr@777 | 1348 | // param is off. |
ysr@777 | 1349 | _cg1r = new ConcurrentG1Refine(); |
ysr@777 | 1350 | |
ysr@777 | 1351 | // Reserve the maximum. |
ysr@777 | 1352 | PermanentGenerationSpec* pgs = collector_policy()->permanent_generation(); |
ysr@777 | 1353 | // Includes the perm-gen. |
ysr@777 | 1354 | ReservedSpace heap_rs(max_byte_size + pgs->max_size(), |
ysr@777 | 1355 | HeapRegion::GrainBytes, |
ysr@777 | 1356 | false /*ism*/); |
ysr@777 | 1357 | |
ysr@777 | 1358 | if (!heap_rs.is_reserved()) { |
ysr@777 | 1359 | vm_exit_during_initialization("Could not reserve enough space for object heap"); |
ysr@777 | 1360 | return JNI_ENOMEM; |
ysr@777 | 1361 | } |
ysr@777 | 1362 | |
ysr@777 | 1363 | // It is important to do this in a way such that concurrent readers can't |
ysr@777 | 1364 | // temporarily think somethings in the heap. (I've actually seen this |
ysr@777 | 1365 | // happen in asserts: DLD.) |
ysr@777 | 1366 | _reserved.set_word_size(0); |
ysr@777 | 1367 | _reserved.set_start((HeapWord*)heap_rs.base()); |
ysr@777 | 1368 | _reserved.set_end((HeapWord*)(heap_rs.base() + heap_rs.size())); |
ysr@777 | 1369 | |
ysr@777 | 1370 | _expansion_regions = max_byte_size/HeapRegion::GrainBytes; |
ysr@777 | 1371 | |
ysr@777 | 1372 | _num_humongous_regions = 0; |
ysr@777 | 1373 | |
ysr@777 | 1374 | // Create the gen rem set (and barrier set) for the entire reserved region. |
ysr@777 | 1375 | _rem_set = collector_policy()->create_rem_set(_reserved, 2); |
ysr@777 | 1376 | set_barrier_set(rem_set()->bs()); |
ysr@777 | 1377 | if (barrier_set()->is_a(BarrierSet::ModRef)) { |
ysr@777 | 1378 | _mr_bs = (ModRefBarrierSet*)_barrier_set; |
ysr@777 | 1379 | } else { |
ysr@777 | 1380 | vm_exit_during_initialization("G1 requires a mod ref bs."); |
ysr@777 | 1381 | return JNI_ENOMEM; |
ysr@777 | 1382 | } |
ysr@777 | 1383 | |
ysr@777 | 1384 | // Also create a G1 rem set. |
ysr@777 | 1385 | if (G1UseHRIntoRS) { |
ysr@777 | 1386 | if (mr_bs()->is_a(BarrierSet::CardTableModRef)) { |
ysr@777 | 1387 | _g1_rem_set = new HRInto_G1RemSet(this, (CardTableModRefBS*)mr_bs()); |
ysr@777 | 1388 | } else { |
ysr@777 | 1389 | vm_exit_during_initialization("G1 requires a cardtable mod ref bs."); |
ysr@777 | 1390 | return JNI_ENOMEM; |
ysr@777 | 1391 | } |
ysr@777 | 1392 | } else { |
ysr@777 | 1393 | _g1_rem_set = new StupidG1RemSet(this); |
ysr@777 | 1394 | } |
ysr@777 | 1395 | |
ysr@777 | 1396 | // Carve out the G1 part of the heap. |
ysr@777 | 1397 | |
ysr@777 | 1398 | ReservedSpace g1_rs = heap_rs.first_part(max_byte_size); |
ysr@777 | 1399 | _g1_reserved = MemRegion((HeapWord*)g1_rs.base(), |
ysr@777 | 1400 | g1_rs.size()/HeapWordSize); |
ysr@777 | 1401 | ReservedSpace perm_gen_rs = heap_rs.last_part(max_byte_size); |
ysr@777 | 1402 | |
ysr@777 | 1403 | _perm_gen = pgs->init(perm_gen_rs, pgs->init_size(), rem_set()); |
ysr@777 | 1404 | |
ysr@777 | 1405 | _g1_storage.initialize(g1_rs, 0); |
ysr@777 | 1406 | _g1_committed = MemRegion((HeapWord*)_g1_storage.low(), (size_t) 0); |
ysr@777 | 1407 | _g1_max_committed = _g1_committed; |
iveresov@828 | 1408 | _hrs = new HeapRegionSeq(_expansion_regions); |
ysr@777 | 1409 | guarantee(_hrs != NULL, "Couldn't allocate HeapRegionSeq"); |
ysr@777 | 1410 | guarantee(_cur_alloc_region == NULL, "from constructor"); |
ysr@777 | 1411 | |
ysr@777 | 1412 | _bot_shared = new G1BlockOffsetSharedArray(_reserved, |
ysr@777 | 1413 | heap_word_size(init_byte_size)); |
ysr@777 | 1414 | |
ysr@777 | 1415 | _g1h = this; |
ysr@777 | 1416 | |
ysr@777 | 1417 | // Create the ConcurrentMark data structure and thread. |
ysr@777 | 1418 | // (Must do this late, so that "max_regions" is defined.) |
ysr@777 | 1419 | _cm = new ConcurrentMark(heap_rs, (int) max_regions()); |
ysr@777 | 1420 | _cmThread = _cm->cmThread(); |
ysr@777 | 1421 | |
ysr@777 | 1422 | // ...and the concurrent zero-fill thread, if necessary. |
ysr@777 | 1423 | if (G1ConcZeroFill) { |
ysr@777 | 1424 | _czft = new ConcurrentZFThread(); |
ysr@777 | 1425 | } |
ysr@777 | 1426 | |
ysr@777 | 1427 | |
ysr@777 | 1428 | |
ysr@777 | 1429 | // Allocate the popular regions; take them off free lists. |
ysr@777 | 1430 | size_t pop_byte_size = G1NumPopularRegions * HeapRegion::GrainBytes; |
ysr@777 | 1431 | expand(pop_byte_size); |
ysr@777 | 1432 | _popular_object_boundary = |
ysr@777 | 1433 | _g1_reserved.start() + (G1NumPopularRegions * HeapRegion::GrainWords); |
ysr@777 | 1434 | for (int i = 0; i < G1NumPopularRegions; i++) { |
ysr@777 | 1435 | HeapRegion* hr = newAllocRegion(HeapRegion::GrainWords); |
ysr@777 | 1436 | // assert(hr != NULL && hr->bottom() < _popular_object_boundary, |
ysr@777 | 1437 | // "Should be enough, and all should be below boundary."); |
ysr@777 | 1438 | hr->set_popular(true); |
ysr@777 | 1439 | } |
ysr@777 | 1440 | assert(_cur_pop_hr_index == 0, "Start allocating at the first region."); |
ysr@777 | 1441 | |
ysr@777 | 1442 | // Initialize the from_card cache structure of HeapRegionRemSet. |
ysr@777 | 1443 | HeapRegionRemSet::init_heap(max_regions()); |
ysr@777 | 1444 | |
ysr@777 | 1445 | // Now expand into the rest of the initial heap size. |
ysr@777 | 1446 | expand(init_byte_size - pop_byte_size); |
ysr@777 | 1447 | |
ysr@777 | 1448 | // Perform any initialization actions delegated to the policy. |
ysr@777 | 1449 | g1_policy()->init(); |
ysr@777 | 1450 | |
ysr@777 | 1451 | g1_policy()->note_start_of_mark_thread(); |
ysr@777 | 1452 | |
ysr@777 | 1453 | _refine_cte_cl = |
ysr@777 | 1454 | new RefineCardTableEntryClosure(ConcurrentG1RefineThread::sts(), |
ysr@777 | 1455 | g1_rem_set(), |
ysr@777 | 1456 | concurrent_g1_refine()); |
ysr@777 | 1457 | JavaThread::dirty_card_queue_set().set_closure(_refine_cte_cl); |
ysr@777 | 1458 | |
ysr@777 | 1459 | JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon, |
ysr@777 | 1460 | SATB_Q_FL_lock, |
ysr@777 | 1461 | 0, |
ysr@777 | 1462 | Shared_SATB_Q_lock); |
ysr@777 | 1463 | if (G1RSBarrierUseQueue) { |
ysr@777 | 1464 | JavaThread::dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon, |
ysr@777 | 1465 | DirtyCardQ_FL_lock, |
ysr@777 | 1466 | G1DirtyCardQueueMax, |
ysr@777 | 1467 | Shared_DirtyCardQ_lock); |
ysr@777 | 1468 | } |
ysr@777 | 1469 | // In case we're keeping closure specialization stats, initialize those |
ysr@777 | 1470 | // counts and that mechanism. |
ysr@777 | 1471 | SpecializationStats::clear(); |
ysr@777 | 1472 | |
ysr@777 | 1473 | _gc_alloc_region_list = NULL; |
ysr@777 | 1474 | |
ysr@777 | 1475 | // Do later initialization work for concurrent refinement. |
ysr@777 | 1476 | _cg1r->init(); |
ysr@777 | 1477 | |
ysr@777 | 1478 | const char* group_names[] = { "CR", "ZF", "CM", "CL" }; |
ysr@777 | 1479 | GCOverheadReporter::initGCOverheadReporter(4, group_names); |
ysr@777 | 1480 | |
ysr@777 | 1481 | return JNI_OK; |
ysr@777 | 1482 | } |
ysr@777 | 1483 | |
ysr@777 | 1484 | void G1CollectedHeap::ref_processing_init() { |
ysr@777 | 1485 | SharedHeap::ref_processing_init(); |
ysr@777 | 1486 | MemRegion mr = reserved_region(); |
ysr@777 | 1487 | _ref_processor = ReferenceProcessor::create_ref_processor( |
ysr@777 | 1488 | mr, // span |
ysr@777 | 1489 | false, // Reference discovery is not atomic |
ysr@777 | 1490 | // (though it shouldn't matter here.) |
ysr@777 | 1491 | true, // mt_discovery |
ysr@777 | 1492 | NULL, // is alive closure: need to fill this in for efficiency |
ysr@777 | 1493 | ParallelGCThreads, |
ysr@777 | 1494 | ParallelRefProcEnabled, |
ysr@777 | 1495 | true); // Setting next fields of discovered |
ysr@777 | 1496 | // lists requires a barrier. |
ysr@777 | 1497 | } |
ysr@777 | 1498 | |
ysr@777 | 1499 | size_t G1CollectedHeap::capacity() const { |
ysr@777 | 1500 | return _g1_committed.byte_size(); |
ysr@777 | 1501 | } |
ysr@777 | 1502 | |
ysr@777 | 1503 | void G1CollectedHeap::iterate_dirty_card_closure(bool concurrent, |
ysr@777 | 1504 | int worker_i) { |
ysr@777 | 1505 | DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); |
ysr@777 | 1506 | int n_completed_buffers = 0; |
ysr@777 | 1507 | while (dcqs.apply_closure_to_completed_buffer(worker_i, 0, true)) { |
ysr@777 | 1508 | n_completed_buffers++; |
ysr@777 | 1509 | } |
ysr@777 | 1510 | g1_policy()->record_update_rs_processed_buffers(worker_i, |
ysr@777 | 1511 | (double) n_completed_buffers); |
ysr@777 | 1512 | dcqs.clear_n_completed_buffers(); |
ysr@777 | 1513 | // Finish up the queue... |
ysr@777 | 1514 | if (worker_i == 0) concurrent_g1_refine()->clean_up_cache(worker_i, |
ysr@777 | 1515 | g1_rem_set()); |
ysr@777 | 1516 | assert(!dcqs.completed_buffers_exist_dirty(), "Completed buffers exist!"); |
ysr@777 | 1517 | } |
ysr@777 | 1518 | |
ysr@777 | 1519 | |
ysr@777 | 1520 | // Computes the sum of the storage used by the various regions. |
ysr@777 | 1521 | |
ysr@777 | 1522 | size_t G1CollectedHeap::used() const { |
ysr@777 | 1523 | assert(Heap_lock->owner() != NULL, |
ysr@777 | 1524 | "Should be owned on this thread's behalf."); |
ysr@777 | 1525 | size_t result = _summary_bytes_used; |
ysr@777 | 1526 | if (_cur_alloc_region != NULL) |
ysr@777 | 1527 | result += _cur_alloc_region->used(); |
ysr@777 | 1528 | return result; |
ysr@777 | 1529 | } |
ysr@777 | 1530 | |
ysr@777 | 1531 | class SumUsedClosure: public HeapRegionClosure { |
ysr@777 | 1532 | size_t _used; |
ysr@777 | 1533 | public: |
ysr@777 | 1534 | SumUsedClosure() : _used(0) {} |
ysr@777 | 1535 | bool doHeapRegion(HeapRegion* r) { |
ysr@777 | 1536 | if (!r->continuesHumongous()) { |
ysr@777 | 1537 | _used += r->used(); |
ysr@777 | 1538 | } |
ysr@777 | 1539 | return false; |
ysr@777 | 1540 | } |
ysr@777 | 1541 | size_t result() { return _used; } |
ysr@777 | 1542 | }; |
ysr@777 | 1543 | |
ysr@777 | 1544 | size_t G1CollectedHeap::recalculate_used() const { |
ysr@777 | 1545 | SumUsedClosure blk; |
ysr@777 | 1546 | _hrs->iterate(&blk); |
ysr@777 | 1547 | return blk.result(); |
ysr@777 | 1548 | } |
ysr@777 | 1549 | |
ysr@777 | 1550 | #ifndef PRODUCT |
ysr@777 | 1551 | class SumUsedRegionsClosure: public HeapRegionClosure { |
ysr@777 | 1552 | size_t _num; |
ysr@777 | 1553 | public: |
ysr@777 | 1554 | // _num is set to 1 to account for the popular region |
ysr@777 | 1555 | SumUsedRegionsClosure() : _num(G1NumPopularRegions) {} |
ysr@777 | 1556 | bool doHeapRegion(HeapRegion* r) { |
ysr@777 | 1557 | if (r->continuesHumongous() || r->used() > 0 || r->is_gc_alloc_region()) { |
ysr@777 | 1558 | _num += 1; |
ysr@777 | 1559 | } |
ysr@777 | 1560 | return false; |
ysr@777 | 1561 | } |
ysr@777 | 1562 | size_t result() { return _num; } |
ysr@777 | 1563 | }; |
ysr@777 | 1564 | |
ysr@777 | 1565 | size_t G1CollectedHeap::recalculate_used_regions() const { |
ysr@777 | 1566 | SumUsedRegionsClosure blk; |
ysr@777 | 1567 | _hrs->iterate(&blk); |
ysr@777 | 1568 | return blk.result(); |
ysr@777 | 1569 | } |
ysr@777 | 1570 | #endif // PRODUCT |
ysr@777 | 1571 | |
ysr@777 | 1572 | size_t G1CollectedHeap::unsafe_max_alloc() { |
ysr@777 | 1573 | if (_free_regions > 0) return HeapRegion::GrainBytes; |
ysr@777 | 1574 | // otherwise, is there space in the current allocation region? |
ysr@777 | 1575 | |
ysr@777 | 1576 | // We need to store the current allocation region in a local variable |
ysr@777 | 1577 | // here. The problem is that this method doesn't take any locks and |
ysr@777 | 1578 | // there may be other threads which overwrite the current allocation |
ysr@777 | 1579 | // region field. attempt_allocation(), for example, sets it to NULL |
ysr@777 | 1580 | // and this can happen *after* the NULL check here but before the call |
ysr@777 | 1581 | // to free(), resulting in a SIGSEGV. Note that this doesn't appear |
ysr@777 | 1582 | // to be a problem in the optimized build, since the two loads of the |
ysr@777 | 1583 | // current allocation region field are optimized away. |
ysr@777 | 1584 | HeapRegion* car = _cur_alloc_region; |
ysr@777 | 1585 | |
ysr@777 | 1586 | // FIXME: should iterate over all regions? |
ysr@777 | 1587 | if (car == NULL) { |
ysr@777 | 1588 | return 0; |
ysr@777 | 1589 | } |
ysr@777 | 1590 | return car->free(); |
ysr@777 | 1591 | } |
ysr@777 | 1592 | |
ysr@777 | 1593 | void G1CollectedHeap::collect(GCCause::Cause cause) { |
ysr@777 | 1594 | // The caller doesn't have the Heap_lock |
ysr@777 | 1595 | assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock"); |
ysr@777 | 1596 | MutexLocker ml(Heap_lock); |
ysr@777 | 1597 | collect_locked(cause); |
ysr@777 | 1598 | } |
ysr@777 | 1599 | |
ysr@777 | 1600 | void G1CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) { |
ysr@777 | 1601 | assert(Thread::current()->is_VM_thread(), "Precondition#1"); |
ysr@777 | 1602 | assert(Heap_lock->is_locked(), "Precondition#2"); |
ysr@777 | 1603 | GCCauseSetter gcs(this, cause); |
ysr@777 | 1604 | switch (cause) { |
ysr@777 | 1605 | case GCCause::_heap_inspection: |
ysr@777 | 1606 | case GCCause::_heap_dump: { |
ysr@777 | 1607 | HandleMark hm; |
ysr@777 | 1608 | do_full_collection(false); // don't clear all soft refs |
ysr@777 | 1609 | break; |
ysr@777 | 1610 | } |
ysr@777 | 1611 | default: // XXX FIX ME |
ysr@777 | 1612 | ShouldNotReachHere(); // Unexpected use of this function |
ysr@777 | 1613 | } |
ysr@777 | 1614 | } |
ysr@777 | 1615 | |
ysr@777 | 1616 | |
ysr@777 | 1617 | void G1CollectedHeap::collect_locked(GCCause::Cause cause) { |
ysr@777 | 1618 | // Don't want to do a GC until cleanup is completed. |
ysr@777 | 1619 | wait_for_cleanup_complete(); |
ysr@777 | 1620 | |
ysr@777 | 1621 | // Read the GC count while holding the Heap_lock |
ysr@777 | 1622 | int gc_count_before = SharedHeap::heap()->total_collections(); |
ysr@777 | 1623 | { |
ysr@777 | 1624 | MutexUnlocker mu(Heap_lock); // give up heap lock, execute gets it back |
ysr@777 | 1625 | VM_G1CollectFull op(gc_count_before, cause); |
ysr@777 | 1626 | VMThread::execute(&op); |
ysr@777 | 1627 | } |
ysr@777 | 1628 | } |
ysr@777 | 1629 | |
ysr@777 | 1630 | bool G1CollectedHeap::is_in(const void* p) const { |
ysr@777 | 1631 | if (_g1_committed.contains(p)) { |
ysr@777 | 1632 | HeapRegion* hr = _hrs->addr_to_region(p); |
ysr@777 | 1633 | return hr->is_in(p); |
ysr@777 | 1634 | } else { |
ysr@777 | 1635 | return _perm_gen->as_gen()->is_in(p); |
ysr@777 | 1636 | } |
ysr@777 | 1637 | } |
ysr@777 | 1638 | |
ysr@777 | 1639 | // Iteration functions. |
ysr@777 | 1640 | |
ysr@777 | 1641 | // Iterates an OopClosure over all ref-containing fields of objects |
ysr@777 | 1642 | // within a HeapRegion. |
ysr@777 | 1643 | |
ysr@777 | 1644 | class IterateOopClosureRegionClosure: public HeapRegionClosure { |
ysr@777 | 1645 | MemRegion _mr; |
ysr@777 | 1646 | OopClosure* _cl; |
ysr@777 | 1647 | public: |
ysr@777 | 1648 | IterateOopClosureRegionClosure(MemRegion mr, OopClosure* cl) |
ysr@777 | 1649 | : _mr(mr), _cl(cl) {} |
ysr@777 | 1650 | bool doHeapRegion(HeapRegion* r) { |
ysr@777 | 1651 | if (! r->continuesHumongous()) { |
ysr@777 | 1652 | r->oop_iterate(_cl); |
ysr@777 | 1653 | } |
ysr@777 | 1654 | return false; |
ysr@777 | 1655 | } |
ysr@777 | 1656 | }; |
ysr@777 | 1657 | |
ysr@777 | 1658 | void G1CollectedHeap::oop_iterate(OopClosure* cl) { |
ysr@777 | 1659 | IterateOopClosureRegionClosure blk(_g1_committed, cl); |
ysr@777 | 1660 | _hrs->iterate(&blk); |
ysr@777 | 1661 | } |
ysr@777 | 1662 | |
ysr@777 | 1663 | void G1CollectedHeap::oop_iterate(MemRegion mr, OopClosure* cl) { |
ysr@777 | 1664 | IterateOopClosureRegionClosure blk(mr, cl); |
ysr@777 | 1665 | _hrs->iterate(&blk); |
ysr@777 | 1666 | } |
ysr@777 | 1667 | |
ysr@777 | 1668 | // Iterates an ObjectClosure over all objects within a HeapRegion. |
ysr@777 | 1669 | |
ysr@777 | 1670 | class IterateObjectClosureRegionClosure: public HeapRegionClosure { |
ysr@777 | 1671 | ObjectClosure* _cl; |
ysr@777 | 1672 | public: |
ysr@777 | 1673 | IterateObjectClosureRegionClosure(ObjectClosure* cl) : _cl(cl) {} |
ysr@777 | 1674 | bool doHeapRegion(HeapRegion* r) { |
ysr@777 | 1675 | if (! r->continuesHumongous()) { |
ysr@777 | 1676 | r->object_iterate(_cl); |
ysr@777 | 1677 | } |
ysr@777 | 1678 | return false; |
ysr@777 | 1679 | } |
ysr@777 | 1680 | }; |
ysr@777 | 1681 | |
ysr@777 | 1682 | void G1CollectedHeap::object_iterate(ObjectClosure* cl) { |
ysr@777 | 1683 | IterateObjectClosureRegionClosure blk(cl); |
ysr@777 | 1684 | _hrs->iterate(&blk); |
ysr@777 | 1685 | } |
ysr@777 | 1686 | |
ysr@777 | 1687 | void G1CollectedHeap::object_iterate_since_last_GC(ObjectClosure* cl) { |
ysr@777 | 1688 | // FIXME: is this right? |
ysr@777 | 1689 | guarantee(false, "object_iterate_since_last_GC not supported by G1 heap"); |
ysr@777 | 1690 | } |
ysr@777 | 1691 | |
ysr@777 | 1692 | // Calls a SpaceClosure on a HeapRegion. |
ysr@777 | 1693 | |
ysr@777 | 1694 | class SpaceClosureRegionClosure: public HeapRegionClosure { |
ysr@777 | 1695 | SpaceClosure* _cl; |
ysr@777 | 1696 | public: |
ysr@777 | 1697 | SpaceClosureRegionClosure(SpaceClosure* cl) : _cl(cl) {} |
ysr@777 | 1698 | bool doHeapRegion(HeapRegion* r) { |
ysr@777 | 1699 | _cl->do_space(r); |
ysr@777 | 1700 | return false; |
ysr@777 | 1701 | } |
ysr@777 | 1702 | }; |
ysr@777 | 1703 | |
ysr@777 | 1704 | void G1CollectedHeap::space_iterate(SpaceClosure* cl) { |
ysr@777 | 1705 | SpaceClosureRegionClosure blk(cl); |
ysr@777 | 1706 | _hrs->iterate(&blk); |
ysr@777 | 1707 | } |
ysr@777 | 1708 | |
ysr@777 | 1709 | void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) { |
ysr@777 | 1710 | _hrs->iterate(cl); |
ysr@777 | 1711 | } |
ysr@777 | 1712 | |
ysr@777 | 1713 | void G1CollectedHeap::heap_region_iterate_from(HeapRegion* r, |
ysr@777 | 1714 | HeapRegionClosure* cl) { |
ysr@777 | 1715 | _hrs->iterate_from(r, cl); |
ysr@777 | 1716 | } |
ysr@777 | 1717 | |
ysr@777 | 1718 | void |
ysr@777 | 1719 | G1CollectedHeap::heap_region_iterate_from(int idx, HeapRegionClosure* cl) { |
ysr@777 | 1720 | _hrs->iterate_from(idx, cl); |
ysr@777 | 1721 | } |
ysr@777 | 1722 | |
ysr@777 | 1723 | HeapRegion* G1CollectedHeap::region_at(size_t idx) { return _hrs->at(idx); } |
ysr@777 | 1724 | |
ysr@777 | 1725 | void |
ysr@777 | 1726 | G1CollectedHeap::heap_region_par_iterate_chunked(HeapRegionClosure* cl, |
ysr@777 | 1727 | int worker, |
ysr@777 | 1728 | jint claim_value) { |
tonyp@790 | 1729 | const size_t regions = n_regions(); |
tonyp@790 | 1730 | const size_t worker_num = (ParallelGCThreads > 0 ? ParallelGCThreads : 1); |
tonyp@790 | 1731 | // try to spread out the starting points of the workers |
tonyp@790 | 1732 | const size_t start_index = regions / worker_num * (size_t) worker; |
tonyp@790 | 1733 | |
tonyp@790 | 1734 | // each worker will actually look at all regions |
tonyp@790 | 1735 | for (size_t count = 0; count < regions; ++count) { |
tonyp@790 | 1736 | const size_t index = (start_index + count) % regions; |
tonyp@790 | 1737 | assert(0 <= index && index < regions, "sanity"); |
tonyp@790 | 1738 | HeapRegion* r = region_at(index); |
tonyp@790 | 1739 | // we'll ignore "continues humongous" regions (we'll process them |
tonyp@790 | 1740 | // when we come across their corresponding "start humongous" |
tonyp@790 | 1741 | // region) and regions already claimed |
tonyp@790 | 1742 | if (r->claim_value() == claim_value || r->continuesHumongous()) { |
tonyp@790 | 1743 | continue; |
tonyp@790 | 1744 | } |
tonyp@790 | 1745 | // OK, try to claim it |
ysr@777 | 1746 | if (r->claimHeapRegion(claim_value)) { |
tonyp@790 | 1747 | // success! |
tonyp@790 | 1748 | assert(!r->continuesHumongous(), "sanity"); |
tonyp@790 | 1749 | if (r->startsHumongous()) { |
tonyp@790 | 1750 | // If the region is "starts humongous" we'll iterate over its |
tonyp@790 | 1751 | // "continues humongous" first; in fact we'll do them |
tonyp@790 | 1752 | // first. The order is important. In on case, calling the |
tonyp@790 | 1753 | // closure on the "starts humongous" region might de-allocate |
tonyp@790 | 1754 | // and clear all its "continues humongous" regions and, as a |
tonyp@790 | 1755 | // result, we might end up processing them twice. So, we'll do |
tonyp@790 | 1756 | // them first (notice: most closures will ignore them anyway) and |
tonyp@790 | 1757 | // then we'll do the "starts humongous" region. |
tonyp@790 | 1758 | for (size_t ch_index = index + 1; ch_index < regions; ++ch_index) { |
tonyp@790 | 1759 | HeapRegion* chr = region_at(ch_index); |
tonyp@790 | 1760 | |
tonyp@790 | 1761 | // if the region has already been claimed or it's not |
tonyp@790 | 1762 | // "continues humongous" we're done |
tonyp@790 | 1763 | if (chr->claim_value() == claim_value || |
tonyp@790 | 1764 | !chr->continuesHumongous()) { |
tonyp@790 | 1765 | break; |
tonyp@790 | 1766 | } |
tonyp@790 | 1767 | |
tonyp@790 | 1768 | // Noone should have claimed it directly. We can given |
tonyp@790 | 1769 | // that we claimed its "starts humongous" region. |
tonyp@790 | 1770 | assert(chr->claim_value() != claim_value, "sanity"); |
tonyp@790 | 1771 | assert(chr->humongous_start_region() == r, "sanity"); |
tonyp@790 | 1772 | |
tonyp@790 | 1773 | if (chr->claimHeapRegion(claim_value)) { |
tonyp@790 | 1774 | // we should always be able to claim it; noone else should |
tonyp@790 | 1775 | // be trying to claim this region |
tonyp@790 | 1776 | |
tonyp@790 | 1777 | bool res2 = cl->doHeapRegion(chr); |
tonyp@790 | 1778 | assert(!res2, "Should not abort"); |
tonyp@790 | 1779 | |
tonyp@790 | 1780 | // Right now, this holds (i.e., no closure that actually |
tonyp@790 | 1781 | // does something with "continues humongous" regions |
tonyp@790 | 1782 | // clears them). We might have to weaken it in the future, |
tonyp@790 | 1783 | // but let's leave these two asserts here for extra safety. |
tonyp@790 | 1784 | assert(chr->continuesHumongous(), "should still be the case"); |
tonyp@790 | 1785 | assert(chr->humongous_start_region() == r, "sanity"); |
tonyp@790 | 1786 | } else { |
tonyp@790 | 1787 | guarantee(false, "we should not reach here"); |
tonyp@790 | 1788 | } |
tonyp@790 | 1789 | } |
tonyp@790 | 1790 | } |
tonyp@790 | 1791 | |
tonyp@790 | 1792 | assert(!r->continuesHumongous(), "sanity"); |
tonyp@790 | 1793 | bool res = cl->doHeapRegion(r); |
tonyp@790 | 1794 | assert(!res, "Should not abort"); |
tonyp@790 | 1795 | } |
tonyp@790 | 1796 | } |
tonyp@790 | 1797 | } |
tonyp@790 | 1798 | |
tonyp@825 | 1799 | class ResetClaimValuesClosure: public HeapRegionClosure { |
tonyp@825 | 1800 | public: |
tonyp@825 | 1801 | bool doHeapRegion(HeapRegion* r) { |
tonyp@825 | 1802 | r->set_claim_value(HeapRegion::InitialClaimValue); |
tonyp@825 | 1803 | return false; |
tonyp@825 | 1804 | } |
tonyp@825 | 1805 | }; |
tonyp@825 | 1806 | |
tonyp@825 | 1807 | void |
tonyp@825 | 1808 | G1CollectedHeap::reset_heap_region_claim_values() { |
tonyp@825 | 1809 | ResetClaimValuesClosure blk; |
tonyp@825 | 1810 | heap_region_iterate(&blk); |
tonyp@825 | 1811 | } |
tonyp@825 | 1812 | |
tonyp@790 | 1813 | #ifdef ASSERT |
tonyp@790 | 1814 | // This checks whether all regions in the heap have the correct claim |
tonyp@790 | 1815 | // value. I also piggy-backed on this a check to ensure that the |
tonyp@790 | 1816 | // humongous_start_region() information on "continues humongous" |
tonyp@790 | 1817 | // regions is correct. |
tonyp@790 | 1818 | |
tonyp@790 | 1819 | class CheckClaimValuesClosure : public HeapRegionClosure { |
tonyp@790 | 1820 | private: |
tonyp@790 | 1821 | jint _claim_value; |
tonyp@790 | 1822 | size_t _failures; |
tonyp@790 | 1823 | HeapRegion* _sh_region; |
tonyp@790 | 1824 | public: |
tonyp@790 | 1825 | CheckClaimValuesClosure(jint claim_value) : |
tonyp@790 | 1826 | _claim_value(claim_value), _failures(0), _sh_region(NULL) { } |
tonyp@790 | 1827 | bool doHeapRegion(HeapRegion* r) { |
tonyp@790 | 1828 | if (r->claim_value() != _claim_value) { |
tonyp@790 | 1829 | gclog_or_tty->print_cr("Region ["PTR_FORMAT","PTR_FORMAT"), " |
tonyp@790 | 1830 | "claim value = %d, should be %d", |
tonyp@790 | 1831 | r->bottom(), r->end(), r->claim_value(), |
tonyp@790 | 1832 | _claim_value); |
tonyp@790 | 1833 | ++_failures; |
tonyp@790 | 1834 | } |
tonyp@790 | 1835 | if (!r->isHumongous()) { |
tonyp@790 | 1836 | _sh_region = NULL; |
tonyp@790 | 1837 | } else if (r->startsHumongous()) { |
tonyp@790 | 1838 | _sh_region = r; |
tonyp@790 | 1839 | } else if (r->continuesHumongous()) { |
tonyp@790 | 1840 | if (r->humongous_start_region() != _sh_region) { |
tonyp@790 | 1841 | gclog_or_tty->print_cr("Region ["PTR_FORMAT","PTR_FORMAT"), " |
tonyp@790 | 1842 | "HS = "PTR_FORMAT", should be "PTR_FORMAT, |
tonyp@790 | 1843 | r->bottom(), r->end(), |
tonyp@790 | 1844 | r->humongous_start_region(), |
tonyp@790 | 1845 | _sh_region); |
tonyp@790 | 1846 | ++_failures; |
ysr@777 | 1847 | } |
ysr@777 | 1848 | } |
tonyp@790 | 1849 | return false; |
tonyp@790 | 1850 | } |
tonyp@790 | 1851 | size_t failures() { |
tonyp@790 | 1852 | return _failures; |
tonyp@790 | 1853 | } |
tonyp@790 | 1854 | }; |
tonyp@790 | 1855 | |
tonyp@790 | 1856 | bool G1CollectedHeap::check_heap_region_claim_values(jint claim_value) { |
tonyp@790 | 1857 | CheckClaimValuesClosure cl(claim_value); |
tonyp@790 | 1858 | heap_region_iterate(&cl); |
tonyp@790 | 1859 | return cl.failures() == 0; |
tonyp@790 | 1860 | } |
tonyp@790 | 1861 | #endif // ASSERT |
ysr@777 | 1862 | |
ysr@777 | 1863 | void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) { |
ysr@777 | 1864 | HeapRegion* r = g1_policy()->collection_set(); |
ysr@777 | 1865 | while (r != NULL) { |
ysr@777 | 1866 | HeapRegion* next = r->next_in_collection_set(); |
ysr@777 | 1867 | if (cl->doHeapRegion(r)) { |
ysr@777 | 1868 | cl->incomplete(); |
ysr@777 | 1869 | return; |
ysr@777 | 1870 | } |
ysr@777 | 1871 | r = next; |
ysr@777 | 1872 | } |
ysr@777 | 1873 | } |
ysr@777 | 1874 | |
ysr@777 | 1875 | void G1CollectedHeap::collection_set_iterate_from(HeapRegion* r, |
ysr@777 | 1876 | HeapRegionClosure *cl) { |
ysr@777 | 1877 | assert(r->in_collection_set(), |
ysr@777 | 1878 | "Start region must be a member of the collection set."); |
ysr@777 | 1879 | HeapRegion* cur = r; |
ysr@777 | 1880 | while (cur != NULL) { |
ysr@777 | 1881 | HeapRegion* next = cur->next_in_collection_set(); |
ysr@777 | 1882 | if (cl->doHeapRegion(cur) && false) { |
ysr@777 | 1883 | cl->incomplete(); |
ysr@777 | 1884 | return; |
ysr@777 | 1885 | } |
ysr@777 | 1886 | cur = next; |
ysr@777 | 1887 | } |
ysr@777 | 1888 | cur = g1_policy()->collection_set(); |
ysr@777 | 1889 | while (cur != r) { |
ysr@777 | 1890 | HeapRegion* next = cur->next_in_collection_set(); |
ysr@777 | 1891 | if (cl->doHeapRegion(cur) && false) { |
ysr@777 | 1892 | cl->incomplete(); |
ysr@777 | 1893 | return; |
ysr@777 | 1894 | } |
ysr@777 | 1895 | cur = next; |
ysr@777 | 1896 | } |
ysr@777 | 1897 | } |
ysr@777 | 1898 | |
ysr@777 | 1899 | CompactibleSpace* G1CollectedHeap::first_compactible_space() { |
ysr@777 | 1900 | return _hrs->length() > 0 ? _hrs->at(0) : NULL; |
ysr@777 | 1901 | } |
ysr@777 | 1902 | |
ysr@777 | 1903 | |
ysr@777 | 1904 | Space* G1CollectedHeap::space_containing(const void* addr) const { |
ysr@777 | 1905 | Space* res = heap_region_containing(addr); |
ysr@777 | 1906 | if (res == NULL) |
ysr@777 | 1907 | res = perm_gen()->space_containing(addr); |
ysr@777 | 1908 | return res; |
ysr@777 | 1909 | } |
ysr@777 | 1910 | |
ysr@777 | 1911 | HeapWord* G1CollectedHeap::block_start(const void* addr) const { |
ysr@777 | 1912 | Space* sp = space_containing(addr); |
ysr@777 | 1913 | if (sp != NULL) { |
ysr@777 | 1914 | return sp->block_start(addr); |
ysr@777 | 1915 | } |
ysr@777 | 1916 | return NULL; |
ysr@777 | 1917 | } |
ysr@777 | 1918 | |
ysr@777 | 1919 | size_t G1CollectedHeap::block_size(const HeapWord* addr) const { |
ysr@777 | 1920 | Space* sp = space_containing(addr); |
ysr@777 | 1921 | assert(sp != NULL, "block_size of address outside of heap"); |
ysr@777 | 1922 | return sp->block_size(addr); |
ysr@777 | 1923 | } |
ysr@777 | 1924 | |
ysr@777 | 1925 | bool G1CollectedHeap::block_is_obj(const HeapWord* addr) const { |
ysr@777 | 1926 | Space* sp = space_containing(addr); |
ysr@777 | 1927 | return sp->block_is_obj(addr); |
ysr@777 | 1928 | } |
ysr@777 | 1929 | |
ysr@777 | 1930 | bool G1CollectedHeap::supports_tlab_allocation() const { |
ysr@777 | 1931 | return true; |
ysr@777 | 1932 | } |
ysr@777 | 1933 | |
ysr@777 | 1934 | size_t G1CollectedHeap::tlab_capacity(Thread* ignored) const { |
ysr@777 | 1935 | return HeapRegion::GrainBytes; |
ysr@777 | 1936 | } |
ysr@777 | 1937 | |
ysr@777 | 1938 | size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const { |
ysr@777 | 1939 | // Return the remaining space in the cur alloc region, but not less than |
ysr@777 | 1940 | // the min TLAB size. |
ysr@777 | 1941 | // Also, no more than half the region size, since we can't allow tlabs to |
ysr@777 | 1942 | // grow big enough to accomodate humongous objects. |
ysr@777 | 1943 | |
ysr@777 | 1944 | // We need to story it locally, since it might change between when we |
ysr@777 | 1945 | // test for NULL and when we use it later. |
ysr@777 | 1946 | ContiguousSpace* cur_alloc_space = _cur_alloc_region; |
ysr@777 | 1947 | if (cur_alloc_space == NULL) { |
ysr@777 | 1948 | return HeapRegion::GrainBytes/2; |
ysr@777 | 1949 | } else { |
ysr@777 | 1950 | return MAX2(MIN2(cur_alloc_space->free(), |
ysr@777 | 1951 | (size_t)(HeapRegion::GrainBytes/2)), |
ysr@777 | 1952 | (size_t)MinTLABSize); |
ysr@777 | 1953 | } |
ysr@777 | 1954 | } |
ysr@777 | 1955 | |
ysr@777 | 1956 | HeapWord* G1CollectedHeap::allocate_new_tlab(size_t size) { |
ysr@777 | 1957 | bool dummy; |
ysr@777 | 1958 | return G1CollectedHeap::mem_allocate(size, false, true, &dummy); |
ysr@777 | 1959 | } |
ysr@777 | 1960 | |
ysr@777 | 1961 | bool G1CollectedHeap::allocs_are_zero_filled() { |
ysr@777 | 1962 | return false; |
ysr@777 | 1963 | } |
ysr@777 | 1964 | |
ysr@777 | 1965 | size_t G1CollectedHeap::large_typearray_limit() { |
ysr@777 | 1966 | // FIXME |
ysr@777 | 1967 | return HeapRegion::GrainBytes/HeapWordSize; |
ysr@777 | 1968 | } |
ysr@777 | 1969 | |
ysr@777 | 1970 | size_t G1CollectedHeap::max_capacity() const { |
ysr@777 | 1971 | return _g1_committed.byte_size(); |
ysr@777 | 1972 | } |
ysr@777 | 1973 | |
ysr@777 | 1974 | jlong G1CollectedHeap::millis_since_last_gc() { |
ysr@777 | 1975 | // assert(false, "NYI"); |
ysr@777 | 1976 | return 0; |
ysr@777 | 1977 | } |
ysr@777 | 1978 | |
ysr@777 | 1979 | |
ysr@777 | 1980 | void G1CollectedHeap::prepare_for_verify() { |
ysr@777 | 1981 | if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) { |
ysr@777 | 1982 | ensure_parsability(false); |
ysr@777 | 1983 | } |
ysr@777 | 1984 | g1_rem_set()->prepare_for_verify(); |
ysr@777 | 1985 | } |
ysr@777 | 1986 | |
ysr@777 | 1987 | class VerifyLivenessOopClosure: public OopClosure { |
ysr@777 | 1988 | G1CollectedHeap* g1h; |
ysr@777 | 1989 | public: |
ysr@777 | 1990 | VerifyLivenessOopClosure(G1CollectedHeap* _g1h) { |
ysr@777 | 1991 | g1h = _g1h; |
ysr@777 | 1992 | } |
ysr@777 | 1993 | void do_oop(narrowOop *p) { |
ysr@777 | 1994 | guarantee(false, "NYI"); |
ysr@777 | 1995 | } |
ysr@777 | 1996 | void do_oop(oop *p) { |
ysr@777 | 1997 | oop obj = *p; |
ysr@777 | 1998 | assert(obj == NULL || !g1h->is_obj_dead(obj), |
ysr@777 | 1999 | "Dead object referenced by a not dead object"); |
ysr@777 | 2000 | } |
ysr@777 | 2001 | }; |
ysr@777 | 2002 | |
ysr@777 | 2003 | class VerifyObjsInRegionClosure: public ObjectClosure { |
ysr@777 | 2004 | G1CollectedHeap* _g1h; |
ysr@777 | 2005 | size_t _live_bytes; |
ysr@777 | 2006 | HeapRegion *_hr; |
ysr@777 | 2007 | public: |
ysr@777 | 2008 | VerifyObjsInRegionClosure(HeapRegion *hr) : _live_bytes(0), _hr(hr) { |
ysr@777 | 2009 | _g1h = G1CollectedHeap::heap(); |
ysr@777 | 2010 | } |
ysr@777 | 2011 | void do_object(oop o) { |
ysr@777 | 2012 | VerifyLivenessOopClosure isLive(_g1h); |
ysr@777 | 2013 | assert(o != NULL, "Huh?"); |
ysr@777 | 2014 | if (!_g1h->is_obj_dead(o)) { |
ysr@777 | 2015 | o->oop_iterate(&isLive); |
ysr@777 | 2016 | if (!_hr->obj_allocated_since_prev_marking(o)) |
ysr@777 | 2017 | _live_bytes += (o->size() * HeapWordSize); |
ysr@777 | 2018 | } |
ysr@777 | 2019 | } |
ysr@777 | 2020 | size_t live_bytes() { return _live_bytes; } |
ysr@777 | 2021 | }; |
ysr@777 | 2022 | |
ysr@777 | 2023 | class PrintObjsInRegionClosure : public ObjectClosure { |
ysr@777 | 2024 | HeapRegion *_hr; |
ysr@777 | 2025 | G1CollectedHeap *_g1; |
ysr@777 | 2026 | public: |
ysr@777 | 2027 | PrintObjsInRegionClosure(HeapRegion *hr) : _hr(hr) { |
ysr@777 | 2028 | _g1 = G1CollectedHeap::heap(); |
ysr@777 | 2029 | }; |
ysr@777 | 2030 | |
ysr@777 | 2031 | void do_object(oop o) { |
ysr@777 | 2032 | if (o != NULL) { |
ysr@777 | 2033 | HeapWord *start = (HeapWord *) o; |
ysr@777 | 2034 | size_t word_sz = o->size(); |
ysr@777 | 2035 | gclog_or_tty->print("\nPrinting obj "PTR_FORMAT" of size " SIZE_FORMAT |
ysr@777 | 2036 | " isMarkedPrev %d isMarkedNext %d isAllocSince %d\n", |
ysr@777 | 2037 | (void*) o, word_sz, |
ysr@777 | 2038 | _g1->isMarkedPrev(o), |
ysr@777 | 2039 | _g1->isMarkedNext(o), |
ysr@777 | 2040 | _hr->obj_allocated_since_prev_marking(o)); |
ysr@777 | 2041 | HeapWord *end = start + word_sz; |
ysr@777 | 2042 | HeapWord *cur; |
ysr@777 | 2043 | int *val; |
ysr@777 | 2044 | for (cur = start; cur < end; cur++) { |
ysr@777 | 2045 | val = (int *) cur; |
ysr@777 | 2046 | gclog_or_tty->print("\t "PTR_FORMAT":"PTR_FORMAT"\n", val, *val); |
ysr@777 | 2047 | } |
ysr@777 | 2048 | } |
ysr@777 | 2049 | } |
ysr@777 | 2050 | }; |
ysr@777 | 2051 | |
ysr@777 | 2052 | class VerifyRegionClosure: public HeapRegionClosure { |
ysr@777 | 2053 | public: |
ysr@777 | 2054 | bool _allow_dirty; |
tonyp@825 | 2055 | bool _par; |
tonyp@825 | 2056 | VerifyRegionClosure(bool allow_dirty, bool par = false) |
tonyp@825 | 2057 | : _allow_dirty(allow_dirty), _par(par) {} |
ysr@777 | 2058 | bool doHeapRegion(HeapRegion* r) { |
tonyp@825 | 2059 | guarantee(_par || r->claim_value() == HeapRegion::InitialClaimValue, |
tonyp@825 | 2060 | "Should be unclaimed at verify points."); |
ysr@777 | 2061 | if (r->isHumongous()) { |
ysr@777 | 2062 | if (r->startsHumongous()) { |
ysr@777 | 2063 | // Verify the single H object. |
ysr@777 | 2064 | oop(r->bottom())->verify(); |
ysr@777 | 2065 | size_t word_sz = oop(r->bottom())->size(); |
ysr@777 | 2066 | guarantee(r->top() == r->bottom() + word_sz, |
ysr@777 | 2067 | "Only one object in a humongous region"); |
ysr@777 | 2068 | } |
ysr@777 | 2069 | } else { |
ysr@777 | 2070 | VerifyObjsInRegionClosure not_dead_yet_cl(r); |
ysr@777 | 2071 | r->verify(_allow_dirty); |
ysr@777 | 2072 | r->object_iterate(¬_dead_yet_cl); |
ysr@777 | 2073 | guarantee(r->max_live_bytes() >= not_dead_yet_cl.live_bytes(), |
ysr@777 | 2074 | "More live objects than counted in last complete marking."); |
ysr@777 | 2075 | } |
ysr@777 | 2076 | return false; |
ysr@777 | 2077 | } |
ysr@777 | 2078 | }; |
ysr@777 | 2079 | |
ysr@777 | 2080 | class VerifyRootsClosure: public OopsInGenClosure { |
ysr@777 | 2081 | private: |
ysr@777 | 2082 | G1CollectedHeap* _g1h; |
ysr@777 | 2083 | bool _failures; |
ysr@777 | 2084 | |
ysr@777 | 2085 | public: |
ysr@777 | 2086 | VerifyRootsClosure() : |
ysr@777 | 2087 | _g1h(G1CollectedHeap::heap()), _failures(false) { } |
ysr@777 | 2088 | |
ysr@777 | 2089 | bool failures() { return _failures; } |
ysr@777 | 2090 | |
ysr@777 | 2091 | void do_oop(narrowOop* p) { |
ysr@777 | 2092 | guarantee(false, "NYI"); |
ysr@777 | 2093 | } |
ysr@777 | 2094 | |
ysr@777 | 2095 | void do_oop(oop* p) { |
ysr@777 | 2096 | oop obj = *p; |
ysr@777 | 2097 | if (obj != NULL) { |
ysr@777 | 2098 | if (_g1h->is_obj_dead(obj)) { |
ysr@777 | 2099 | gclog_or_tty->print_cr("Root location "PTR_FORMAT" " |
ysr@777 | 2100 | "points to dead obj "PTR_FORMAT, p, (void*) obj); |
ysr@777 | 2101 | obj->print_on(gclog_or_tty); |
ysr@777 | 2102 | _failures = true; |
ysr@777 | 2103 | } |
ysr@777 | 2104 | } |
ysr@777 | 2105 | } |
ysr@777 | 2106 | }; |
ysr@777 | 2107 | |
tonyp@825 | 2108 | // This is the task used for parallel heap verification. |
tonyp@825 | 2109 | |
tonyp@825 | 2110 | class G1ParVerifyTask: public AbstractGangTask { |
tonyp@825 | 2111 | private: |
tonyp@825 | 2112 | G1CollectedHeap* _g1h; |
tonyp@825 | 2113 | bool _allow_dirty; |
tonyp@825 | 2114 | |
tonyp@825 | 2115 | public: |
tonyp@825 | 2116 | G1ParVerifyTask(G1CollectedHeap* g1h, bool allow_dirty) : |
tonyp@825 | 2117 | AbstractGangTask("Parallel verify task"), |
tonyp@825 | 2118 | _g1h(g1h), _allow_dirty(allow_dirty) { } |
tonyp@825 | 2119 | |
tonyp@825 | 2120 | void work(int worker_i) { |
tonyp@825 | 2121 | VerifyRegionClosure blk(_allow_dirty, true); |
tonyp@825 | 2122 | _g1h->heap_region_par_iterate_chunked(&blk, worker_i, |
tonyp@825 | 2123 | HeapRegion::ParVerifyClaimValue); |
tonyp@825 | 2124 | } |
tonyp@825 | 2125 | }; |
tonyp@825 | 2126 | |
ysr@777 | 2127 | void G1CollectedHeap::verify(bool allow_dirty, bool silent) { |
ysr@777 | 2128 | if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) { |
ysr@777 | 2129 | if (!silent) { gclog_or_tty->print("roots "); } |
ysr@777 | 2130 | VerifyRootsClosure rootsCl; |
ysr@777 | 2131 | process_strong_roots(false, |
ysr@777 | 2132 | SharedHeap::SO_AllClasses, |
ysr@777 | 2133 | &rootsCl, |
ysr@777 | 2134 | &rootsCl); |
ysr@777 | 2135 | rem_set()->invalidate(perm_gen()->used_region(), false); |
ysr@777 | 2136 | if (!silent) { gclog_or_tty->print("heapRegions "); } |
tonyp@825 | 2137 | if (GCParallelVerificationEnabled && ParallelGCThreads > 1) { |
tonyp@825 | 2138 | assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue), |
tonyp@825 | 2139 | "sanity check"); |
tonyp@825 | 2140 | |
tonyp@825 | 2141 | G1ParVerifyTask task(this, allow_dirty); |
tonyp@825 | 2142 | int n_workers = workers()->total_workers(); |
tonyp@825 | 2143 | set_par_threads(n_workers); |
tonyp@825 | 2144 | workers()->run_task(&task); |
tonyp@825 | 2145 | set_par_threads(0); |
tonyp@825 | 2146 | |
tonyp@825 | 2147 | assert(check_heap_region_claim_values(HeapRegion::ParVerifyClaimValue), |
tonyp@825 | 2148 | "sanity check"); |
tonyp@825 | 2149 | |
tonyp@825 | 2150 | reset_heap_region_claim_values(); |
tonyp@825 | 2151 | |
tonyp@825 | 2152 | assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue), |
tonyp@825 | 2153 | "sanity check"); |
tonyp@825 | 2154 | } else { |
tonyp@825 | 2155 | VerifyRegionClosure blk(allow_dirty); |
tonyp@825 | 2156 | _hrs->iterate(&blk); |
tonyp@825 | 2157 | } |
ysr@777 | 2158 | if (!silent) gclog_or_tty->print("remset "); |
ysr@777 | 2159 | rem_set()->verify(); |
ysr@777 | 2160 | guarantee(!rootsCl.failures(), "should not have had failures"); |
ysr@777 | 2161 | } else { |
ysr@777 | 2162 | if (!silent) gclog_or_tty->print("(SKIPPING roots, heapRegions, remset) "); |
ysr@777 | 2163 | } |
ysr@777 | 2164 | } |
ysr@777 | 2165 | |
ysr@777 | 2166 | class PrintRegionClosure: public HeapRegionClosure { |
ysr@777 | 2167 | outputStream* _st; |
ysr@777 | 2168 | public: |
ysr@777 | 2169 | PrintRegionClosure(outputStream* st) : _st(st) {} |
ysr@777 | 2170 | bool doHeapRegion(HeapRegion* r) { |
ysr@777 | 2171 | r->print_on(_st); |
ysr@777 | 2172 | return false; |
ysr@777 | 2173 | } |
ysr@777 | 2174 | }; |
ysr@777 | 2175 | |
ysr@777 | 2176 | void G1CollectedHeap::print() const { print_on(gclog_or_tty); } |
ysr@777 | 2177 | |
ysr@777 | 2178 | void G1CollectedHeap::print_on(outputStream* st) const { |
ysr@777 | 2179 | PrintRegionClosure blk(st); |
ysr@777 | 2180 | _hrs->iterate(&blk); |
ysr@777 | 2181 | } |
ysr@777 | 2182 | |
ysr@777 | 2183 | void G1CollectedHeap::print_gc_threads_on(outputStream* st) const { |
ysr@777 | 2184 | if (ParallelGCThreads > 0) { |
ysr@777 | 2185 | workers()->print_worker_threads(); |
ysr@777 | 2186 | } |
ysr@777 | 2187 | st->print("\"G1 concurrent mark GC Thread\" "); |
ysr@777 | 2188 | _cmThread->print(); |
ysr@777 | 2189 | st->cr(); |
ysr@777 | 2190 | st->print("\"G1 concurrent refinement GC Thread\" "); |
ysr@777 | 2191 | _cg1r->cg1rThread()->print_on(st); |
ysr@777 | 2192 | st->cr(); |
ysr@777 | 2193 | st->print("\"G1 zero-fill GC Thread\" "); |
ysr@777 | 2194 | _czft->print_on(st); |
ysr@777 | 2195 | st->cr(); |
ysr@777 | 2196 | } |
ysr@777 | 2197 | |
ysr@777 | 2198 | void G1CollectedHeap::gc_threads_do(ThreadClosure* tc) const { |
ysr@777 | 2199 | if (ParallelGCThreads > 0) { |
ysr@777 | 2200 | workers()->threads_do(tc); |
ysr@777 | 2201 | } |
ysr@777 | 2202 | tc->do_thread(_cmThread); |
ysr@777 | 2203 | tc->do_thread(_cg1r->cg1rThread()); |
ysr@777 | 2204 | tc->do_thread(_czft); |
ysr@777 | 2205 | } |
ysr@777 | 2206 | |
ysr@777 | 2207 | void G1CollectedHeap::print_tracing_info() const { |
ysr@777 | 2208 | concurrent_g1_refine()->print_final_card_counts(); |
ysr@777 | 2209 | |
ysr@777 | 2210 | // We'll overload this to mean "trace GC pause statistics." |
ysr@777 | 2211 | if (TraceGen0Time || TraceGen1Time) { |
ysr@777 | 2212 | // The "G1CollectorPolicy" is keeping track of these stats, so delegate |
ysr@777 | 2213 | // to that. |
ysr@777 | 2214 | g1_policy()->print_tracing_info(); |
ysr@777 | 2215 | } |
ysr@777 | 2216 | if (SummarizeG1RSStats) { |
ysr@777 | 2217 | g1_rem_set()->print_summary_info(); |
ysr@777 | 2218 | } |
ysr@777 | 2219 | if (SummarizeG1ConcMark) { |
ysr@777 | 2220 | concurrent_mark()->print_summary_info(); |
ysr@777 | 2221 | } |
ysr@777 | 2222 | if (SummarizeG1ZFStats) { |
ysr@777 | 2223 | ConcurrentZFThread::print_summary_info(); |
ysr@777 | 2224 | } |
ysr@777 | 2225 | if (G1SummarizePopularity) { |
ysr@777 | 2226 | print_popularity_summary_info(); |
ysr@777 | 2227 | } |
ysr@777 | 2228 | g1_policy()->print_yg_surv_rate_info(); |
ysr@777 | 2229 | |
ysr@777 | 2230 | GCOverheadReporter::printGCOverhead(); |
ysr@777 | 2231 | |
ysr@777 | 2232 | SpecializationStats::print(); |
ysr@777 | 2233 | } |
ysr@777 | 2234 | |
ysr@777 | 2235 | |
ysr@777 | 2236 | int G1CollectedHeap::addr_to_arena_id(void* addr) const { |
ysr@777 | 2237 | HeapRegion* hr = heap_region_containing(addr); |
ysr@777 | 2238 | if (hr == NULL) { |
ysr@777 | 2239 | return 0; |
ysr@777 | 2240 | } else { |
ysr@777 | 2241 | return 1; |
ysr@777 | 2242 | } |
ysr@777 | 2243 | } |
ysr@777 | 2244 | |
ysr@777 | 2245 | G1CollectedHeap* G1CollectedHeap::heap() { |
ysr@777 | 2246 | assert(_sh->kind() == CollectedHeap::G1CollectedHeap, |
ysr@777 | 2247 | "not a garbage-first heap"); |
ysr@777 | 2248 | return _g1h; |
ysr@777 | 2249 | } |
ysr@777 | 2250 | |
ysr@777 | 2251 | void G1CollectedHeap::gc_prologue(bool full /* Ignored */) { |
ysr@777 | 2252 | if (PrintHeapAtGC){ |
ysr@777 | 2253 | gclog_or_tty->print_cr(" {Heap before GC collections=%d:", total_collections()); |
ysr@777 | 2254 | Universe::print(); |
ysr@777 | 2255 | } |
ysr@777 | 2256 | assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer"); |
ysr@777 | 2257 | // Call allocation profiler |
ysr@777 | 2258 | AllocationProfiler::iterate_since_last_gc(); |
ysr@777 | 2259 | // Fill TLAB's and such |
ysr@777 | 2260 | ensure_parsability(true); |
ysr@777 | 2261 | } |
ysr@777 | 2262 | |
ysr@777 | 2263 | void G1CollectedHeap::gc_epilogue(bool full /* Ignored */) { |
ysr@777 | 2264 | // FIXME: what is this about? |
ysr@777 | 2265 | // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled" |
ysr@777 | 2266 | // is set. |
ysr@777 | 2267 | COMPILER2_PRESENT(assert(DerivedPointerTable::is_empty(), |
ysr@777 | 2268 | "derived pointer present")); |
ysr@777 | 2269 | |
ysr@777 | 2270 | if (PrintHeapAtGC){ |
ysr@777 | 2271 | gclog_or_tty->print_cr(" Heap after GC collections=%d:", total_collections()); |
ysr@777 | 2272 | Universe::print(); |
ysr@777 | 2273 | gclog_or_tty->print("} "); |
ysr@777 | 2274 | } |
ysr@777 | 2275 | } |
ysr@777 | 2276 | |
ysr@777 | 2277 | void G1CollectedHeap::do_collection_pause() { |
ysr@777 | 2278 | // Read the GC count while holding the Heap_lock |
ysr@777 | 2279 | // we need to do this _before_ wait_for_cleanup_complete(), to |
ysr@777 | 2280 | // ensure that we do not give up the heap lock and potentially |
ysr@777 | 2281 | // pick up the wrong count |
ysr@777 | 2282 | int gc_count_before = SharedHeap::heap()->total_collections(); |
ysr@777 | 2283 | |
ysr@777 | 2284 | // Don't want to do a GC pause while cleanup is being completed! |
ysr@777 | 2285 | wait_for_cleanup_complete(); |
ysr@777 | 2286 | |
ysr@777 | 2287 | g1_policy()->record_stop_world_start(); |
ysr@777 | 2288 | { |
ysr@777 | 2289 | MutexUnlocker mu(Heap_lock); // give up heap lock, execute gets it back |
ysr@777 | 2290 | VM_G1IncCollectionPause op(gc_count_before); |
ysr@777 | 2291 | VMThread::execute(&op); |
ysr@777 | 2292 | } |
ysr@777 | 2293 | } |
ysr@777 | 2294 | |
ysr@777 | 2295 | void |
ysr@777 | 2296 | G1CollectedHeap::doConcurrentMark() { |
ysr@777 | 2297 | if (G1ConcMark) { |
ysr@777 | 2298 | MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag); |
ysr@777 | 2299 | if (!_cmThread->in_progress()) { |
ysr@777 | 2300 | _cmThread->set_started(); |
ysr@777 | 2301 | CGC_lock->notify(); |
ysr@777 | 2302 | } |
ysr@777 | 2303 | } |
ysr@777 | 2304 | } |
ysr@777 | 2305 | |
ysr@777 | 2306 | class VerifyMarkedObjsClosure: public ObjectClosure { |
ysr@777 | 2307 | G1CollectedHeap* _g1h; |
ysr@777 | 2308 | public: |
ysr@777 | 2309 | VerifyMarkedObjsClosure(G1CollectedHeap* g1h) : _g1h(g1h) {} |
ysr@777 | 2310 | void do_object(oop obj) { |
ysr@777 | 2311 | assert(obj->mark()->is_marked() ? !_g1h->is_obj_dead(obj) : true, |
ysr@777 | 2312 | "markandsweep mark should agree with concurrent deadness"); |
ysr@777 | 2313 | } |
ysr@777 | 2314 | }; |
ysr@777 | 2315 | |
ysr@777 | 2316 | void |
ysr@777 | 2317 | G1CollectedHeap::checkConcurrentMark() { |
ysr@777 | 2318 | VerifyMarkedObjsClosure verifycl(this); |
ysr@777 | 2319 | doConcurrentMark(); |
ysr@777 | 2320 | // MutexLockerEx x(getMarkBitMapLock(), |
ysr@777 | 2321 | // Mutex::_no_safepoint_check_flag); |
ysr@777 | 2322 | object_iterate(&verifycl); |
ysr@777 | 2323 | } |
ysr@777 | 2324 | |
ysr@777 | 2325 | void G1CollectedHeap::do_sync_mark() { |
ysr@777 | 2326 | _cm->checkpointRootsInitial(); |
ysr@777 | 2327 | _cm->markFromRoots(); |
ysr@777 | 2328 | _cm->checkpointRootsFinal(false); |
ysr@777 | 2329 | } |
ysr@777 | 2330 | |
ysr@777 | 2331 | // <NEW PREDICTION> |
ysr@777 | 2332 | |
ysr@777 | 2333 | double G1CollectedHeap::predict_region_elapsed_time_ms(HeapRegion *hr, |
ysr@777 | 2334 | bool young) { |
ysr@777 | 2335 | return _g1_policy->predict_region_elapsed_time_ms(hr, young); |
ysr@777 | 2336 | } |
ysr@777 | 2337 | |
ysr@777 | 2338 | void G1CollectedHeap::check_if_region_is_too_expensive(double |
ysr@777 | 2339 | predicted_time_ms) { |
ysr@777 | 2340 | _g1_policy->check_if_region_is_too_expensive(predicted_time_ms); |
ysr@777 | 2341 | } |
ysr@777 | 2342 | |
ysr@777 | 2343 | size_t G1CollectedHeap::pending_card_num() { |
ysr@777 | 2344 | size_t extra_cards = 0; |
ysr@777 | 2345 | JavaThread *curr = Threads::first(); |
ysr@777 | 2346 | while (curr != NULL) { |
ysr@777 | 2347 | DirtyCardQueue& dcq = curr->dirty_card_queue(); |
ysr@777 | 2348 | extra_cards += dcq.size(); |
ysr@777 | 2349 | curr = curr->next(); |
ysr@777 | 2350 | } |
ysr@777 | 2351 | DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); |
ysr@777 | 2352 | size_t buffer_size = dcqs.buffer_size(); |
ysr@777 | 2353 | size_t buffer_num = dcqs.completed_buffers_num(); |
ysr@777 | 2354 | return buffer_size * buffer_num + extra_cards; |
ysr@777 | 2355 | } |
ysr@777 | 2356 | |
ysr@777 | 2357 | size_t G1CollectedHeap::max_pending_card_num() { |
ysr@777 | 2358 | DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); |
ysr@777 | 2359 | size_t buffer_size = dcqs.buffer_size(); |
ysr@777 | 2360 | size_t buffer_num = dcqs.completed_buffers_num(); |
ysr@777 | 2361 | int thread_num = Threads::number_of_threads(); |
ysr@777 | 2362 | return (buffer_num + thread_num) * buffer_size; |
ysr@777 | 2363 | } |
ysr@777 | 2364 | |
ysr@777 | 2365 | size_t G1CollectedHeap::cards_scanned() { |
ysr@777 | 2366 | HRInto_G1RemSet* g1_rset = (HRInto_G1RemSet*) g1_rem_set(); |
ysr@777 | 2367 | return g1_rset->cardsScanned(); |
ysr@777 | 2368 | } |
ysr@777 | 2369 | |
ysr@777 | 2370 | void |
ysr@777 | 2371 | G1CollectedHeap::setup_surviving_young_words() { |
ysr@777 | 2372 | guarantee( _surviving_young_words == NULL, "pre-condition" ); |
ysr@777 | 2373 | size_t array_length = g1_policy()->young_cset_length(); |
ysr@777 | 2374 | _surviving_young_words = NEW_C_HEAP_ARRAY(size_t, array_length); |
ysr@777 | 2375 | if (_surviving_young_words == NULL) { |
ysr@777 | 2376 | vm_exit_out_of_memory(sizeof(size_t) * array_length, |
ysr@777 | 2377 | "Not enough space for young surv words summary."); |
ysr@777 | 2378 | } |
ysr@777 | 2379 | memset(_surviving_young_words, 0, array_length * sizeof(size_t)); |
ysr@777 | 2380 | for (size_t i = 0; i < array_length; ++i) { |
ysr@777 | 2381 | guarantee( _surviving_young_words[i] == 0, "invariant" ); |
ysr@777 | 2382 | } |
ysr@777 | 2383 | } |
ysr@777 | 2384 | |
ysr@777 | 2385 | void |
ysr@777 | 2386 | G1CollectedHeap::update_surviving_young_words(size_t* surv_young_words) { |
ysr@777 | 2387 | MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); |
ysr@777 | 2388 | size_t array_length = g1_policy()->young_cset_length(); |
ysr@777 | 2389 | for (size_t i = 0; i < array_length; ++i) |
ysr@777 | 2390 | _surviving_young_words[i] += surv_young_words[i]; |
ysr@777 | 2391 | } |
ysr@777 | 2392 | |
ysr@777 | 2393 | void |
ysr@777 | 2394 | G1CollectedHeap::cleanup_surviving_young_words() { |
ysr@777 | 2395 | guarantee( _surviving_young_words != NULL, "pre-condition" ); |
ysr@777 | 2396 | FREE_C_HEAP_ARRAY(size_t, _surviving_young_words); |
ysr@777 | 2397 | _surviving_young_words = NULL; |
ysr@777 | 2398 | } |
ysr@777 | 2399 | |
ysr@777 | 2400 | // </NEW PREDICTION> |
ysr@777 | 2401 | |
ysr@777 | 2402 | void |
ysr@777 | 2403 | G1CollectedHeap::do_collection_pause_at_safepoint(HeapRegion* popular_region) { |
ysr@777 | 2404 | char verbose_str[128]; |
ysr@777 | 2405 | sprintf(verbose_str, "GC pause "); |
ysr@777 | 2406 | if (popular_region != NULL) |
ysr@777 | 2407 | strcat(verbose_str, "(popular)"); |
ysr@777 | 2408 | else if (g1_policy()->in_young_gc_mode()) { |
ysr@777 | 2409 | if (g1_policy()->full_young_gcs()) |
ysr@777 | 2410 | strcat(verbose_str, "(young)"); |
ysr@777 | 2411 | else |
ysr@777 | 2412 | strcat(verbose_str, "(partial)"); |
ysr@777 | 2413 | } |
ysr@777 | 2414 | bool reset_should_initiate_conc_mark = false; |
ysr@777 | 2415 | if (popular_region != NULL && g1_policy()->should_initiate_conc_mark()) { |
ysr@777 | 2416 | // we currently do not allow an initial mark phase to be piggy-backed |
ysr@777 | 2417 | // on a popular pause |
ysr@777 | 2418 | reset_should_initiate_conc_mark = true; |
ysr@777 | 2419 | g1_policy()->unset_should_initiate_conc_mark(); |
ysr@777 | 2420 | } |
ysr@777 | 2421 | if (g1_policy()->should_initiate_conc_mark()) |
ysr@777 | 2422 | strcat(verbose_str, " (initial-mark)"); |
ysr@777 | 2423 | |
ysr@777 | 2424 | GCCauseSetter x(this, (popular_region == NULL ? |
ysr@777 | 2425 | GCCause::_g1_inc_collection_pause : |
ysr@777 | 2426 | GCCause::_g1_pop_region_collection_pause)); |
ysr@777 | 2427 | |
ysr@777 | 2428 | // if PrintGCDetails is on, we'll print long statistics information |
ysr@777 | 2429 | // in the collector policy code, so let's not print this as the output |
ysr@777 | 2430 | // is messy if we do. |
ysr@777 | 2431 | gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); |
ysr@777 | 2432 | TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); |
ysr@777 | 2433 | TraceTime t(verbose_str, PrintGC && !PrintGCDetails, true, gclog_or_tty); |
ysr@777 | 2434 | |
ysr@777 | 2435 | ResourceMark rm; |
ysr@777 | 2436 | assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); |
ysr@777 | 2437 | assert(Thread::current() == VMThread::vm_thread(), "should be in vm thread"); |
ysr@777 | 2438 | guarantee(!is_gc_active(), "collection is not reentrant"); |
ysr@777 | 2439 | assert(regions_accounted_for(), "Region leakage!"); |
iveresov@788 | 2440 | |
iveresov@788 | 2441 | increment_gc_time_stamp(); |
ysr@777 | 2442 | |
ysr@777 | 2443 | if (g1_policy()->in_young_gc_mode()) { |
ysr@777 | 2444 | assert(check_young_list_well_formed(), |
ysr@777 | 2445 | "young list should be well formed"); |
ysr@777 | 2446 | } |
ysr@777 | 2447 | |
ysr@777 | 2448 | if (GC_locker::is_active()) { |
ysr@777 | 2449 | return; // GC is disabled (e.g. JNI GetXXXCritical operation) |
ysr@777 | 2450 | } |
ysr@777 | 2451 | |
ysr@777 | 2452 | bool abandoned = false; |
ysr@777 | 2453 | { // Call to jvmpi::post_class_unload_events must occur outside of active GC |
ysr@777 | 2454 | IsGCActiveMark x; |
ysr@777 | 2455 | |
ysr@777 | 2456 | gc_prologue(false); |
ysr@777 | 2457 | increment_total_collections(); |
ysr@777 | 2458 | |
ysr@777 | 2459 | #if G1_REM_SET_LOGGING |
ysr@777 | 2460 | gclog_or_tty->print_cr("\nJust chose CS, heap:"); |
ysr@777 | 2461 | print(); |
ysr@777 | 2462 | #endif |
ysr@777 | 2463 | |
ysr@777 | 2464 | if (VerifyBeforeGC && total_collections() >= VerifyGCStartAt) { |
ysr@777 | 2465 | HandleMark hm; // Discard invalid handles created during verification |
ysr@777 | 2466 | prepare_for_verify(); |
ysr@777 | 2467 | gclog_or_tty->print(" VerifyBeforeGC:"); |
ysr@777 | 2468 | Universe::verify(false); |
ysr@777 | 2469 | } |
ysr@777 | 2470 | |
ysr@777 | 2471 | COMPILER2_PRESENT(DerivedPointerTable::clear()); |
ysr@777 | 2472 | |
ysr@888 | 2473 | // We want to turn off ref discovery, if necessary, and turn it back on |
ysr@777 | 2474 | // on again later if we do. |
ysr@777 | 2475 | bool was_enabled = ref_processor()->discovery_enabled(); |
ysr@777 | 2476 | if (was_enabled) ref_processor()->disable_discovery(); |
ysr@777 | 2477 | |
ysr@777 | 2478 | // Forget the current alloc region (we might even choose it to be part |
ysr@777 | 2479 | // of the collection set!). |
ysr@777 | 2480 | abandon_cur_alloc_region(); |
ysr@777 | 2481 | |
ysr@777 | 2482 | // The elapsed time induced by the start time below deliberately elides |
ysr@777 | 2483 | // the possible verification above. |
ysr@777 | 2484 | double start_time_sec = os::elapsedTime(); |
ysr@777 | 2485 | GCOverheadReporter::recordSTWStart(start_time_sec); |
ysr@777 | 2486 | size_t start_used_bytes = used(); |
ysr@777 | 2487 | if (!G1ConcMark) { |
ysr@777 | 2488 | do_sync_mark(); |
ysr@777 | 2489 | } |
ysr@777 | 2490 | |
ysr@777 | 2491 | g1_policy()->record_collection_pause_start(start_time_sec, |
ysr@777 | 2492 | start_used_bytes); |
ysr@777 | 2493 | |
tonyp@961 | 2494 | guarantee(_in_cset_fast_test == NULL, "invariant"); |
tonyp@961 | 2495 | guarantee(_in_cset_fast_test_base == NULL, "invariant"); |
tonyp@961 | 2496 | _in_cset_fast_test_length = n_regions(); |
tonyp@961 | 2497 | _in_cset_fast_test_base = |
tonyp@961 | 2498 | NEW_C_HEAP_ARRAY(bool, _in_cset_fast_test_length); |
tonyp@961 | 2499 | memset(_in_cset_fast_test_base, false, |
tonyp@961 | 2500 | _in_cset_fast_test_length * sizeof(bool)); |
tonyp@961 | 2501 | // We're biasing _in_cset_fast_test to avoid subtracting the |
tonyp@961 | 2502 | // beginning of the heap every time we want to index; basically |
tonyp@961 | 2503 | // it's the same with what we do with the card table. |
tonyp@961 | 2504 | _in_cset_fast_test = _in_cset_fast_test_base - |
tonyp@961 | 2505 | ((size_t) _g1_reserved.start() >> HeapRegion::LogOfHRGrainBytes); |
tonyp@961 | 2506 | |
ysr@777 | 2507 | #if SCAN_ONLY_VERBOSE |
ysr@777 | 2508 | _young_list->print(); |
ysr@777 | 2509 | #endif // SCAN_ONLY_VERBOSE |
ysr@777 | 2510 | |
ysr@777 | 2511 | if (g1_policy()->should_initiate_conc_mark()) { |
ysr@777 | 2512 | concurrent_mark()->checkpointRootsInitialPre(); |
ysr@777 | 2513 | } |
ysr@777 | 2514 | save_marks(); |
ysr@777 | 2515 | |
twisti@1040 | 2516 | // We must do this before any possible evacuation that should propagate |
ysr@777 | 2517 | // marks, including evacuation of popular objects in a popular pause. |
ysr@777 | 2518 | if (mark_in_progress()) { |
ysr@777 | 2519 | double start_time_sec = os::elapsedTime(); |
ysr@777 | 2520 | |
ysr@777 | 2521 | _cm->drainAllSATBBuffers(); |
ysr@777 | 2522 | double finish_mark_ms = (os::elapsedTime() - start_time_sec) * 1000.0; |
ysr@777 | 2523 | g1_policy()->record_satb_drain_time(finish_mark_ms); |
ysr@777 | 2524 | |
ysr@777 | 2525 | } |
ysr@777 | 2526 | // Record the number of elements currently on the mark stack, so we |
ysr@777 | 2527 | // only iterate over these. (Since evacuation may add to the mark |
ysr@777 | 2528 | // stack, doing more exposes race conditions.) If no mark is in |
ysr@777 | 2529 | // progress, this will be zero. |
ysr@777 | 2530 | _cm->set_oops_do_bound(); |
ysr@777 | 2531 | |
ysr@777 | 2532 | assert(regions_accounted_for(), "Region leakage."); |
ysr@777 | 2533 | |
ysr@777 | 2534 | bool abandoned = false; |
ysr@777 | 2535 | |
ysr@777 | 2536 | if (mark_in_progress()) |
ysr@777 | 2537 | concurrent_mark()->newCSet(); |
ysr@777 | 2538 | |
ysr@777 | 2539 | // Now choose the CS. |
ysr@777 | 2540 | if (popular_region == NULL) { |
ysr@777 | 2541 | g1_policy()->choose_collection_set(); |
ysr@777 | 2542 | } else { |
ysr@777 | 2543 | // We may be evacuating a single region (for popularity). |
ysr@777 | 2544 | g1_policy()->record_popular_pause_preamble_start(); |
ysr@777 | 2545 | popularity_pause_preamble(popular_region); |
ysr@777 | 2546 | g1_policy()->record_popular_pause_preamble_end(); |
ysr@777 | 2547 | abandoned = (g1_policy()->collection_set() == NULL); |
ysr@777 | 2548 | // Now we allow more regions to be added (we have to collect |
ysr@777 | 2549 | // all popular regions). |
ysr@777 | 2550 | if (!abandoned) { |
ysr@777 | 2551 | g1_policy()->choose_collection_set(popular_region); |
ysr@777 | 2552 | } |
ysr@777 | 2553 | } |
ysr@777 | 2554 | // We may abandon a pause if we find no region that will fit in the MMU |
ysr@777 | 2555 | // pause. |
ysr@777 | 2556 | abandoned = (g1_policy()->collection_set() == NULL); |
ysr@777 | 2557 | |
ysr@777 | 2558 | // Nothing to do if we were unable to choose a collection set. |
ysr@777 | 2559 | if (!abandoned) { |
ysr@777 | 2560 | #if G1_REM_SET_LOGGING |
ysr@777 | 2561 | gclog_or_tty->print_cr("\nAfter pause, heap:"); |
ysr@777 | 2562 | print(); |
ysr@777 | 2563 | #endif |
ysr@777 | 2564 | |
ysr@777 | 2565 | setup_surviving_young_words(); |
ysr@777 | 2566 | |
ysr@777 | 2567 | // Set up the gc allocation regions. |
ysr@777 | 2568 | get_gc_alloc_regions(); |
ysr@777 | 2569 | |
ysr@777 | 2570 | // Actually do the work... |
ysr@777 | 2571 | evacuate_collection_set(); |
ysr@777 | 2572 | free_collection_set(g1_policy()->collection_set()); |
ysr@777 | 2573 | g1_policy()->clear_collection_set(); |
ysr@777 | 2574 | |
tonyp@961 | 2575 | FREE_C_HEAP_ARRAY(bool, _in_cset_fast_test_base); |
tonyp@961 | 2576 | // this is more for peace of mind; we're nulling them here and |
tonyp@961 | 2577 | // we're expecting them to be null at the beginning of the next GC |
tonyp@961 | 2578 | _in_cset_fast_test = NULL; |
tonyp@961 | 2579 | _in_cset_fast_test_base = NULL; |
tonyp@961 | 2580 | |
ysr@777 | 2581 | if (popular_region != NULL) { |
ysr@777 | 2582 | // We have to wait until now, because we don't want the region to |
ysr@777 | 2583 | // be rescheduled for pop-evac during RS update. |
ysr@777 | 2584 | popular_region->set_popular_pending(false); |
ysr@777 | 2585 | } |
ysr@777 | 2586 | |
ysr@777 | 2587 | release_gc_alloc_regions(); |
ysr@777 | 2588 | |
ysr@777 | 2589 | cleanup_surviving_young_words(); |
ysr@777 | 2590 | |
ysr@777 | 2591 | if (g1_policy()->in_young_gc_mode()) { |
ysr@777 | 2592 | _young_list->reset_sampled_info(); |
ysr@777 | 2593 | assert(check_young_list_empty(true), |
ysr@777 | 2594 | "young list should be empty"); |
ysr@777 | 2595 | |
ysr@777 | 2596 | #if SCAN_ONLY_VERBOSE |
ysr@777 | 2597 | _young_list->print(); |
ysr@777 | 2598 | #endif // SCAN_ONLY_VERBOSE |
ysr@777 | 2599 | |
apetrusenko@980 | 2600 | g1_policy()->record_survivor_regions(_young_list->survivor_length(), |
apetrusenko@980 | 2601 | _young_list->first_survivor_region(), |
apetrusenko@980 | 2602 | _young_list->last_survivor_region()); |
ysr@777 | 2603 | _young_list->reset_auxilary_lists(); |
ysr@777 | 2604 | } |
ysr@777 | 2605 | } else { |
ysr@777 | 2606 | COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); |
ysr@777 | 2607 | } |
ysr@777 | 2608 | |
ysr@777 | 2609 | if (evacuation_failed()) { |
ysr@777 | 2610 | _summary_bytes_used = recalculate_used(); |
ysr@777 | 2611 | } else { |
ysr@777 | 2612 | // The "used" of the the collection set have already been subtracted |
ysr@777 | 2613 | // when they were freed. Add in the bytes evacuated. |
ysr@777 | 2614 | _summary_bytes_used += g1_policy()->bytes_in_to_space(); |
ysr@777 | 2615 | } |
ysr@777 | 2616 | |
ysr@777 | 2617 | if (g1_policy()->in_young_gc_mode() && |
ysr@777 | 2618 | g1_policy()->should_initiate_conc_mark()) { |
ysr@777 | 2619 | concurrent_mark()->checkpointRootsInitialPost(); |
ysr@777 | 2620 | set_marking_started(); |
ysr@777 | 2621 | doConcurrentMark(); |
ysr@777 | 2622 | } |
ysr@777 | 2623 | |
ysr@777 | 2624 | #if SCAN_ONLY_VERBOSE |
ysr@777 | 2625 | _young_list->print(); |
ysr@777 | 2626 | #endif // SCAN_ONLY_VERBOSE |
ysr@777 | 2627 | |
ysr@777 | 2628 | double end_time_sec = os::elapsedTime(); |
apetrusenko@980 | 2629 | if (!evacuation_failed()) { |
apetrusenko@980 | 2630 | g1_policy()->record_pause_time((end_time_sec - start_time_sec)*1000.0); |
apetrusenko@980 | 2631 | } |
ysr@777 | 2632 | GCOverheadReporter::recordSTWEnd(end_time_sec); |
ysr@777 | 2633 | g1_policy()->record_collection_pause_end(popular_region != NULL, |
ysr@777 | 2634 | abandoned); |
ysr@777 | 2635 | |
ysr@777 | 2636 | assert(regions_accounted_for(), "Region leakage."); |
ysr@777 | 2637 | |
ysr@777 | 2638 | if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) { |
ysr@777 | 2639 | HandleMark hm; // Discard invalid handles created during verification |
ysr@777 | 2640 | gclog_or_tty->print(" VerifyAfterGC:"); |
ysr@777 | 2641 | Universe::verify(false); |
ysr@777 | 2642 | } |
ysr@777 | 2643 | |
ysr@777 | 2644 | if (was_enabled) ref_processor()->enable_discovery(); |
ysr@777 | 2645 | |
ysr@777 | 2646 | { |
ysr@777 | 2647 | size_t expand_bytes = g1_policy()->expansion_amount(); |
ysr@777 | 2648 | if (expand_bytes > 0) { |
ysr@777 | 2649 | size_t bytes_before = capacity(); |
ysr@777 | 2650 | expand(expand_bytes); |
ysr@777 | 2651 | } |
ysr@777 | 2652 | } |
ysr@777 | 2653 | |
jmasa@981 | 2654 | if (mark_in_progress()) { |
ysr@777 | 2655 | concurrent_mark()->update_g1_committed(); |
jmasa@981 | 2656 | } |
jmasa@981 | 2657 | |
jmasa@981 | 2658 | #ifdef TRACESPINNING |
jmasa@981 | 2659 | ParallelTaskTerminator::print_termination_counts(); |
jmasa@981 | 2660 | #endif |
ysr@777 | 2661 | |
ysr@777 | 2662 | gc_epilogue(false); |
ysr@777 | 2663 | } |
ysr@777 | 2664 | |
ysr@777 | 2665 | assert(verify_region_lists(), "Bad region lists."); |
ysr@777 | 2666 | |
ysr@777 | 2667 | if (reset_should_initiate_conc_mark) |
ysr@777 | 2668 | g1_policy()->set_should_initiate_conc_mark(); |
ysr@777 | 2669 | |
ysr@777 | 2670 | if (ExitAfterGCNum > 0 && total_collections() == ExitAfterGCNum) { |
ysr@777 | 2671 | gclog_or_tty->print_cr("Stopping after GC #%d", ExitAfterGCNum); |
ysr@777 | 2672 | print_tracing_info(); |
ysr@777 | 2673 | vm_exit(-1); |
ysr@777 | 2674 | } |
ysr@777 | 2675 | } |
ysr@777 | 2676 | |
ysr@777 | 2677 | void G1CollectedHeap::set_gc_alloc_region(int purpose, HeapRegion* r) { |
ysr@777 | 2678 | assert(purpose >= 0 && purpose < GCAllocPurposeCount, "invalid purpose"); |
ysr@777 | 2679 | HeapWord* original_top = NULL; |
ysr@777 | 2680 | if (r != NULL) |
ysr@777 | 2681 | original_top = r->top(); |
ysr@777 | 2682 | |
ysr@777 | 2683 | // We will want to record the used space in r as being there before gc. |
ysr@777 | 2684 | // One we install it as a GC alloc region it's eligible for allocation. |
ysr@777 | 2685 | // So record it now and use it later. |
ysr@777 | 2686 | size_t r_used = 0; |
ysr@777 | 2687 | if (r != NULL) { |
ysr@777 | 2688 | r_used = r->used(); |
ysr@777 | 2689 | |
ysr@777 | 2690 | if (ParallelGCThreads > 0) { |
ysr@777 | 2691 | // need to take the lock to guard against two threads calling |
ysr@777 | 2692 | // get_gc_alloc_region concurrently (very unlikely but...) |
ysr@777 | 2693 | MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); |
ysr@777 | 2694 | r->save_marks(); |
ysr@777 | 2695 | } |
ysr@777 | 2696 | } |
ysr@777 | 2697 | HeapRegion* old_alloc_region = _gc_alloc_regions[purpose]; |
ysr@777 | 2698 | _gc_alloc_regions[purpose] = r; |
ysr@777 | 2699 | if (old_alloc_region != NULL) { |
ysr@777 | 2700 | // Replace aliases too. |
ysr@777 | 2701 | for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { |
ysr@777 | 2702 | if (_gc_alloc_regions[ap] == old_alloc_region) { |
ysr@777 | 2703 | _gc_alloc_regions[ap] = r; |
ysr@777 | 2704 | } |
ysr@777 | 2705 | } |
ysr@777 | 2706 | } |
ysr@777 | 2707 | if (r != NULL) { |
ysr@777 | 2708 | push_gc_alloc_region(r); |
ysr@777 | 2709 | if (mark_in_progress() && original_top != r->next_top_at_mark_start()) { |
ysr@777 | 2710 | // We are using a region as a GC alloc region after it has been used |
ysr@777 | 2711 | // as a mutator allocation region during the current marking cycle. |
ysr@777 | 2712 | // The mutator-allocated objects are currently implicitly marked, but |
ysr@777 | 2713 | // when we move hr->next_top_at_mark_start() forward at the the end |
ysr@777 | 2714 | // of the GC pause, they won't be. We therefore mark all objects in |
ysr@777 | 2715 | // the "gap". We do this object-by-object, since marking densely |
ysr@777 | 2716 | // does not currently work right with marking bitmap iteration. This |
ysr@777 | 2717 | // means we rely on TLAB filling at the start of pauses, and no |
ysr@777 | 2718 | // "resuscitation" of filled TLAB's. If we want to do this, we need |
ysr@777 | 2719 | // to fix the marking bitmap iteration. |
ysr@777 | 2720 | HeapWord* curhw = r->next_top_at_mark_start(); |
ysr@777 | 2721 | HeapWord* t = original_top; |
ysr@777 | 2722 | |
ysr@777 | 2723 | while (curhw < t) { |
ysr@777 | 2724 | oop cur = (oop)curhw; |
ysr@777 | 2725 | // We'll assume parallel for generality. This is rare code. |
ysr@777 | 2726 | concurrent_mark()->markAndGrayObjectIfNecessary(cur); // can't we just mark them? |
ysr@777 | 2727 | curhw = curhw + cur->size(); |
ysr@777 | 2728 | } |
ysr@777 | 2729 | assert(curhw == t, "Should have parsed correctly."); |
ysr@777 | 2730 | } |
ysr@777 | 2731 | if (G1PolicyVerbose > 1) { |
ysr@777 | 2732 | gclog_or_tty->print("New alloc region ["PTR_FORMAT", "PTR_FORMAT", " PTR_FORMAT") " |
ysr@777 | 2733 | "for survivors:", r->bottom(), original_top, r->end()); |
ysr@777 | 2734 | r->print(); |
ysr@777 | 2735 | } |
ysr@777 | 2736 | g1_policy()->record_before_bytes(r_used); |
ysr@777 | 2737 | } |
ysr@777 | 2738 | } |
ysr@777 | 2739 | |
ysr@777 | 2740 | void G1CollectedHeap::push_gc_alloc_region(HeapRegion* hr) { |
ysr@777 | 2741 | assert(Thread::current()->is_VM_thread() || |
ysr@777 | 2742 | par_alloc_during_gc_lock()->owned_by_self(), "Precondition"); |
ysr@777 | 2743 | assert(!hr->is_gc_alloc_region() && !hr->in_collection_set(), |
ysr@777 | 2744 | "Precondition."); |
ysr@777 | 2745 | hr->set_is_gc_alloc_region(true); |
ysr@777 | 2746 | hr->set_next_gc_alloc_region(_gc_alloc_region_list); |
ysr@777 | 2747 | _gc_alloc_region_list = hr; |
ysr@777 | 2748 | } |
ysr@777 | 2749 | |
ysr@777 | 2750 | #ifdef G1_DEBUG |
ysr@777 | 2751 | class FindGCAllocRegion: public HeapRegionClosure { |
ysr@777 | 2752 | public: |
ysr@777 | 2753 | bool doHeapRegion(HeapRegion* r) { |
ysr@777 | 2754 | if (r->is_gc_alloc_region()) { |
ysr@777 | 2755 | gclog_or_tty->print_cr("Region %d ["PTR_FORMAT"...] is still a gc_alloc_region.", |
ysr@777 | 2756 | r->hrs_index(), r->bottom()); |
ysr@777 | 2757 | } |
ysr@777 | 2758 | return false; |
ysr@777 | 2759 | } |
ysr@777 | 2760 | }; |
ysr@777 | 2761 | #endif // G1_DEBUG |
ysr@777 | 2762 | |
ysr@777 | 2763 | void G1CollectedHeap::forget_alloc_region_list() { |
ysr@777 | 2764 | assert(Thread::current()->is_VM_thread(), "Precondition"); |
ysr@777 | 2765 | while (_gc_alloc_region_list != NULL) { |
ysr@777 | 2766 | HeapRegion* r = _gc_alloc_region_list; |
ysr@777 | 2767 | assert(r->is_gc_alloc_region(), "Invariant."); |
ysr@777 | 2768 | _gc_alloc_region_list = r->next_gc_alloc_region(); |
ysr@777 | 2769 | r->set_next_gc_alloc_region(NULL); |
ysr@777 | 2770 | r->set_is_gc_alloc_region(false); |
apetrusenko@980 | 2771 | if (r->is_survivor()) { |
apetrusenko@980 | 2772 | if (r->is_empty()) { |
apetrusenko@980 | 2773 | r->set_not_young(); |
apetrusenko@980 | 2774 | } else { |
apetrusenko@980 | 2775 | _young_list->add_survivor_region(r); |
apetrusenko@980 | 2776 | } |
apetrusenko@980 | 2777 | } |
ysr@777 | 2778 | if (r->is_empty()) { |
ysr@777 | 2779 | ++_free_regions; |
ysr@777 | 2780 | } |
ysr@777 | 2781 | } |
ysr@777 | 2782 | #ifdef G1_DEBUG |
ysr@777 | 2783 | FindGCAllocRegion fa; |
ysr@777 | 2784 | heap_region_iterate(&fa); |
ysr@777 | 2785 | #endif // G1_DEBUG |
ysr@777 | 2786 | } |
ysr@777 | 2787 | |
ysr@777 | 2788 | |
ysr@777 | 2789 | bool G1CollectedHeap::check_gc_alloc_regions() { |
ysr@777 | 2790 | // TODO: allocation regions check |
ysr@777 | 2791 | return true; |
ysr@777 | 2792 | } |
ysr@777 | 2793 | |
ysr@777 | 2794 | void G1CollectedHeap::get_gc_alloc_regions() { |
ysr@777 | 2795 | for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { |
ysr@777 | 2796 | // Create new GC alloc regions. |
ysr@777 | 2797 | HeapRegion* alloc_region = _gc_alloc_regions[ap]; |
ysr@777 | 2798 | // Clear this alloc region, so that in case it turns out to be |
ysr@777 | 2799 | // unacceptable, we end up with no allocation region, rather than a bad |
ysr@777 | 2800 | // one. |
ysr@777 | 2801 | _gc_alloc_regions[ap] = NULL; |
ysr@777 | 2802 | if (alloc_region == NULL || alloc_region->in_collection_set()) { |
ysr@777 | 2803 | // Can't re-use old one. Allocate a new one. |
ysr@777 | 2804 | alloc_region = newAllocRegionWithExpansion(ap, 0); |
ysr@777 | 2805 | } |
ysr@777 | 2806 | if (alloc_region != NULL) { |
ysr@777 | 2807 | set_gc_alloc_region(ap, alloc_region); |
ysr@777 | 2808 | } |
ysr@777 | 2809 | } |
ysr@777 | 2810 | // Set alternative regions for allocation purposes that have reached |
ysr@777 | 2811 | // thier limit. |
ysr@777 | 2812 | for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { |
ysr@777 | 2813 | GCAllocPurpose alt_purpose = g1_policy()->alternative_purpose(ap); |
ysr@777 | 2814 | if (_gc_alloc_regions[ap] == NULL && alt_purpose != ap) { |
ysr@777 | 2815 | _gc_alloc_regions[ap] = _gc_alloc_regions[alt_purpose]; |
ysr@777 | 2816 | } |
ysr@777 | 2817 | } |
ysr@777 | 2818 | assert(check_gc_alloc_regions(), "alloc regions messed up"); |
ysr@777 | 2819 | } |
ysr@777 | 2820 | |
ysr@777 | 2821 | void G1CollectedHeap::release_gc_alloc_regions() { |
ysr@777 | 2822 | // We keep a separate list of all regions that have been alloc regions in |
ysr@777 | 2823 | // the current collection pause. Forget that now. |
ysr@777 | 2824 | forget_alloc_region_list(); |
ysr@777 | 2825 | |
ysr@777 | 2826 | // The current alloc regions contain objs that have survived |
ysr@777 | 2827 | // collection. Make them no longer GC alloc regions. |
ysr@777 | 2828 | for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { |
ysr@777 | 2829 | HeapRegion* r = _gc_alloc_regions[ap]; |
ysr@777 | 2830 | if (r != NULL && r->is_empty()) { |
ysr@777 | 2831 | { |
ysr@777 | 2832 | MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); |
ysr@777 | 2833 | r->set_zero_fill_complete(); |
ysr@777 | 2834 | put_free_region_on_list_locked(r); |
ysr@777 | 2835 | } |
ysr@777 | 2836 | } |
ysr@777 | 2837 | // set_gc_alloc_region will also NULLify all aliases to the region |
ysr@777 | 2838 | set_gc_alloc_region(ap, NULL); |
ysr@777 | 2839 | _gc_alloc_region_counts[ap] = 0; |
ysr@777 | 2840 | } |
ysr@777 | 2841 | } |
ysr@777 | 2842 | |
ysr@777 | 2843 | void G1CollectedHeap::init_for_evac_failure(OopsInHeapRegionClosure* cl) { |
ysr@777 | 2844 | _drain_in_progress = false; |
ysr@777 | 2845 | set_evac_failure_closure(cl); |
ysr@777 | 2846 | _evac_failure_scan_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(40, true); |
ysr@777 | 2847 | } |
ysr@777 | 2848 | |
ysr@777 | 2849 | void G1CollectedHeap::finalize_for_evac_failure() { |
ysr@777 | 2850 | assert(_evac_failure_scan_stack != NULL && |
ysr@777 | 2851 | _evac_failure_scan_stack->length() == 0, |
ysr@777 | 2852 | "Postcondition"); |
ysr@777 | 2853 | assert(!_drain_in_progress, "Postcondition"); |
ysr@777 | 2854 | // Don't have to delete, since the scan stack is a resource object. |
ysr@777 | 2855 | _evac_failure_scan_stack = NULL; |
ysr@777 | 2856 | } |
ysr@777 | 2857 | |
ysr@777 | 2858 | |
ysr@777 | 2859 | |
ysr@777 | 2860 | // *** Sequential G1 Evacuation |
ysr@777 | 2861 | |
ysr@777 | 2862 | HeapWord* G1CollectedHeap::allocate_during_gc(GCAllocPurpose purpose, size_t word_size) { |
ysr@777 | 2863 | HeapRegion* alloc_region = _gc_alloc_regions[purpose]; |
ysr@777 | 2864 | // let the caller handle alloc failure |
ysr@777 | 2865 | if (alloc_region == NULL) return NULL; |
ysr@777 | 2866 | assert(isHumongous(word_size) || !alloc_region->isHumongous(), |
ysr@777 | 2867 | "Either the object is humongous or the region isn't"); |
ysr@777 | 2868 | HeapWord* block = alloc_region->allocate(word_size); |
ysr@777 | 2869 | if (block == NULL) { |
ysr@777 | 2870 | block = allocate_during_gc_slow(purpose, alloc_region, false, word_size); |
ysr@777 | 2871 | } |
ysr@777 | 2872 | return block; |
ysr@777 | 2873 | } |
ysr@777 | 2874 | |
ysr@777 | 2875 | class G1IsAliveClosure: public BoolObjectClosure { |
ysr@777 | 2876 | G1CollectedHeap* _g1; |
ysr@777 | 2877 | public: |
ysr@777 | 2878 | G1IsAliveClosure(G1CollectedHeap* g1) : _g1(g1) {} |
ysr@777 | 2879 | void do_object(oop p) { assert(false, "Do not call."); } |
ysr@777 | 2880 | bool do_object_b(oop p) { |
ysr@777 | 2881 | // It is reachable if it is outside the collection set, or is inside |
ysr@777 | 2882 | // and forwarded. |
ysr@777 | 2883 | |
ysr@777 | 2884 | #ifdef G1_DEBUG |
ysr@777 | 2885 | gclog_or_tty->print_cr("is alive "PTR_FORMAT" in CS %d forwarded %d overall %d", |
ysr@777 | 2886 | (void*) p, _g1->obj_in_cs(p), p->is_forwarded(), |
ysr@777 | 2887 | !_g1->obj_in_cs(p) || p->is_forwarded()); |
ysr@777 | 2888 | #endif // G1_DEBUG |
ysr@777 | 2889 | |
ysr@777 | 2890 | return !_g1->obj_in_cs(p) || p->is_forwarded(); |
ysr@777 | 2891 | } |
ysr@777 | 2892 | }; |
ysr@777 | 2893 | |
ysr@777 | 2894 | class G1KeepAliveClosure: public OopClosure { |
ysr@777 | 2895 | G1CollectedHeap* _g1; |
ysr@777 | 2896 | public: |
ysr@777 | 2897 | G1KeepAliveClosure(G1CollectedHeap* g1) : _g1(g1) {} |
ysr@777 | 2898 | void do_oop(narrowOop* p) { |
ysr@777 | 2899 | guarantee(false, "NYI"); |
ysr@777 | 2900 | } |
ysr@777 | 2901 | void do_oop(oop* p) { |
ysr@777 | 2902 | oop obj = *p; |
ysr@777 | 2903 | #ifdef G1_DEBUG |
ysr@777 | 2904 | if (PrintGC && Verbose) { |
ysr@777 | 2905 | gclog_or_tty->print_cr("keep alive *"PTR_FORMAT" = "PTR_FORMAT" "PTR_FORMAT, |
ysr@777 | 2906 | p, (void*) obj, (void*) *p); |
ysr@777 | 2907 | } |
ysr@777 | 2908 | #endif // G1_DEBUG |
ysr@777 | 2909 | |
ysr@777 | 2910 | if (_g1->obj_in_cs(obj)) { |
ysr@777 | 2911 | assert( obj->is_forwarded(), "invariant" ); |
ysr@777 | 2912 | *p = obj->forwardee(); |
ysr@777 | 2913 | |
ysr@777 | 2914 | #ifdef G1_DEBUG |
ysr@777 | 2915 | gclog_or_tty->print_cr(" in CSet: moved "PTR_FORMAT" -> "PTR_FORMAT, |
ysr@777 | 2916 | (void*) obj, (void*) *p); |
ysr@777 | 2917 | #endif // G1_DEBUG |
ysr@777 | 2918 | } |
ysr@777 | 2919 | } |
ysr@777 | 2920 | }; |
ysr@777 | 2921 | |
ysr@777 | 2922 | class RecreateRSetEntriesClosure: public OopClosure { |
ysr@777 | 2923 | private: |
ysr@777 | 2924 | G1CollectedHeap* _g1; |
ysr@777 | 2925 | G1RemSet* _g1_rem_set; |
ysr@777 | 2926 | HeapRegion* _from; |
ysr@777 | 2927 | public: |
ysr@777 | 2928 | RecreateRSetEntriesClosure(G1CollectedHeap* g1, HeapRegion* from) : |
ysr@777 | 2929 | _g1(g1), _g1_rem_set(g1->g1_rem_set()), _from(from) |
ysr@777 | 2930 | {} |
ysr@777 | 2931 | |
ysr@777 | 2932 | void do_oop(narrowOop* p) { |
ysr@777 | 2933 | guarantee(false, "NYI"); |
ysr@777 | 2934 | } |
ysr@777 | 2935 | void do_oop(oop* p) { |
ysr@777 | 2936 | assert(_from->is_in_reserved(p), "paranoia"); |
ysr@777 | 2937 | if (*p != NULL) { |
ysr@777 | 2938 | _g1_rem_set->write_ref(_from, p); |
ysr@777 | 2939 | } |
ysr@777 | 2940 | } |
ysr@777 | 2941 | }; |
ysr@777 | 2942 | |
ysr@777 | 2943 | class RemoveSelfPointerClosure: public ObjectClosure { |
ysr@777 | 2944 | private: |
ysr@777 | 2945 | G1CollectedHeap* _g1; |
ysr@777 | 2946 | ConcurrentMark* _cm; |
ysr@777 | 2947 | HeapRegion* _hr; |
ysr@777 | 2948 | size_t _prev_marked_bytes; |
ysr@777 | 2949 | size_t _next_marked_bytes; |
ysr@777 | 2950 | public: |
ysr@777 | 2951 | RemoveSelfPointerClosure(G1CollectedHeap* g1, HeapRegion* hr) : |
ysr@777 | 2952 | _g1(g1), _cm(_g1->concurrent_mark()), _hr(hr), |
ysr@777 | 2953 | _prev_marked_bytes(0), _next_marked_bytes(0) |
ysr@777 | 2954 | {} |
ysr@777 | 2955 | |
ysr@777 | 2956 | size_t prev_marked_bytes() { return _prev_marked_bytes; } |
ysr@777 | 2957 | size_t next_marked_bytes() { return _next_marked_bytes; } |
ysr@777 | 2958 | |
iveresov@787 | 2959 | // The original idea here was to coalesce evacuated and dead objects. |
iveresov@787 | 2960 | // However that caused complications with the block offset table (BOT). |
iveresov@787 | 2961 | // In particular if there were two TLABs, one of them partially refined. |
iveresov@787 | 2962 | // |----- TLAB_1--------|----TLAB_2-~~~(partially refined part)~~~| |
iveresov@787 | 2963 | // The BOT entries of the unrefined part of TLAB_2 point to the start |
iveresov@787 | 2964 | // of TLAB_2. If the last object of the TLAB_1 and the first object |
iveresov@787 | 2965 | // of TLAB_2 are coalesced, then the cards of the unrefined part |
iveresov@787 | 2966 | // would point into middle of the filler object. |
iveresov@787 | 2967 | // |
iveresov@787 | 2968 | // The current approach is to not coalesce and leave the BOT contents intact. |
iveresov@787 | 2969 | void do_object(oop obj) { |
iveresov@787 | 2970 | if (obj->is_forwarded() && obj->forwardee() == obj) { |
iveresov@787 | 2971 | // The object failed to move. |
iveresov@787 | 2972 | assert(!_g1->is_obj_dead(obj), "We should not be preserving dead objs."); |
iveresov@787 | 2973 | _cm->markPrev(obj); |
iveresov@787 | 2974 | assert(_cm->isPrevMarked(obj), "Should be marked!"); |
iveresov@787 | 2975 | _prev_marked_bytes += (obj->size() * HeapWordSize); |
iveresov@787 | 2976 | if (_g1->mark_in_progress() && !_g1->is_obj_ill(obj)) { |
iveresov@787 | 2977 | _cm->markAndGrayObjectIfNecessary(obj); |
iveresov@787 | 2978 | } |
iveresov@787 | 2979 | obj->set_mark(markOopDesc::prototype()); |
iveresov@787 | 2980 | // While we were processing RSet buffers during the |
iveresov@787 | 2981 | // collection, we actually didn't scan any cards on the |
iveresov@787 | 2982 | // collection set, since we didn't want to update remebered |
iveresov@787 | 2983 | // sets with entries that point into the collection set, given |
iveresov@787 | 2984 | // that live objects fromthe collection set are about to move |
iveresov@787 | 2985 | // and such entries will be stale very soon. This change also |
iveresov@787 | 2986 | // dealt with a reliability issue which involved scanning a |
iveresov@787 | 2987 | // card in the collection set and coming across an array that |
iveresov@787 | 2988 | // was being chunked and looking malformed. The problem is |
iveresov@787 | 2989 | // that, if evacuation fails, we might have remembered set |
iveresov@787 | 2990 | // entries missing given that we skipped cards on the |
iveresov@787 | 2991 | // collection set. So, we'll recreate such entries now. |
iveresov@787 | 2992 | RecreateRSetEntriesClosure cl(_g1, _hr); |
iveresov@787 | 2993 | obj->oop_iterate(&cl); |
iveresov@787 | 2994 | assert(_cm->isPrevMarked(obj), "Should be marked!"); |
iveresov@787 | 2995 | } else { |
iveresov@787 | 2996 | // The object has been either evacuated or is dead. Fill it with a |
iveresov@787 | 2997 | // dummy object. |
iveresov@787 | 2998 | MemRegion mr((HeapWord*)obj, obj->size()); |
jcoomes@916 | 2999 | CollectedHeap::fill_with_object(mr); |
ysr@777 | 3000 | _cm->clearRangeBothMaps(mr); |
ysr@777 | 3001 | } |
ysr@777 | 3002 | } |
ysr@777 | 3003 | }; |
ysr@777 | 3004 | |
ysr@777 | 3005 | void G1CollectedHeap::remove_self_forwarding_pointers() { |
ysr@777 | 3006 | HeapRegion* cur = g1_policy()->collection_set(); |
ysr@777 | 3007 | |
ysr@777 | 3008 | while (cur != NULL) { |
ysr@777 | 3009 | assert(g1_policy()->assertMarkedBytesDataOK(), "Should be!"); |
ysr@777 | 3010 | |
ysr@777 | 3011 | if (cur->evacuation_failed()) { |
ysr@777 | 3012 | RemoveSelfPointerClosure rspc(_g1h, cur); |
ysr@777 | 3013 | assert(cur->in_collection_set(), "bad CS"); |
ysr@777 | 3014 | cur->object_iterate(&rspc); |
ysr@777 | 3015 | |
ysr@777 | 3016 | // A number of manipulations to make the TAMS be the current top, |
ysr@777 | 3017 | // and the marked bytes be the ones observed in the iteration. |
ysr@777 | 3018 | if (_g1h->concurrent_mark()->at_least_one_mark_complete()) { |
ysr@777 | 3019 | // The comments below are the postconditions achieved by the |
ysr@777 | 3020 | // calls. Note especially the last such condition, which says that |
ysr@777 | 3021 | // the count of marked bytes has been properly restored. |
ysr@777 | 3022 | cur->note_start_of_marking(false); |
ysr@777 | 3023 | // _next_top_at_mark_start == top, _next_marked_bytes == 0 |
ysr@777 | 3024 | cur->add_to_marked_bytes(rspc.prev_marked_bytes()); |
ysr@777 | 3025 | // _next_marked_bytes == prev_marked_bytes. |
ysr@777 | 3026 | cur->note_end_of_marking(); |
ysr@777 | 3027 | // _prev_top_at_mark_start == top(), |
ysr@777 | 3028 | // _prev_marked_bytes == prev_marked_bytes |
ysr@777 | 3029 | } |
ysr@777 | 3030 | // If there is no mark in progress, we modified the _next variables |
ysr@777 | 3031 | // above needlessly, but harmlessly. |
ysr@777 | 3032 | if (_g1h->mark_in_progress()) { |
ysr@777 | 3033 | cur->note_start_of_marking(false); |
ysr@777 | 3034 | // _next_top_at_mark_start == top, _next_marked_bytes == 0 |
ysr@777 | 3035 | // _next_marked_bytes == next_marked_bytes. |
ysr@777 | 3036 | } |
ysr@777 | 3037 | |
ysr@777 | 3038 | // Now make sure the region has the right index in the sorted array. |
ysr@777 | 3039 | g1_policy()->note_change_in_marked_bytes(cur); |
ysr@777 | 3040 | } |
ysr@777 | 3041 | cur = cur->next_in_collection_set(); |
ysr@777 | 3042 | } |
ysr@777 | 3043 | assert(g1_policy()->assertMarkedBytesDataOK(), "Should be!"); |
ysr@777 | 3044 | |
ysr@777 | 3045 | // Now restore saved marks, if any. |
ysr@777 | 3046 | if (_objs_with_preserved_marks != NULL) { |
ysr@777 | 3047 | assert(_preserved_marks_of_objs != NULL, "Both or none."); |
ysr@777 | 3048 | assert(_objs_with_preserved_marks->length() == |
ysr@777 | 3049 | _preserved_marks_of_objs->length(), "Both or none."); |
ysr@777 | 3050 | guarantee(_objs_with_preserved_marks->length() == |
ysr@777 | 3051 | _preserved_marks_of_objs->length(), "Both or none."); |
ysr@777 | 3052 | for (int i = 0; i < _objs_with_preserved_marks->length(); i++) { |
ysr@777 | 3053 | oop obj = _objs_with_preserved_marks->at(i); |
ysr@777 | 3054 | markOop m = _preserved_marks_of_objs->at(i); |
ysr@777 | 3055 | obj->set_mark(m); |
ysr@777 | 3056 | } |
ysr@777 | 3057 | // Delete the preserved marks growable arrays (allocated on the C heap). |
ysr@777 | 3058 | delete _objs_with_preserved_marks; |
ysr@777 | 3059 | delete _preserved_marks_of_objs; |
ysr@777 | 3060 | _objs_with_preserved_marks = NULL; |
ysr@777 | 3061 | _preserved_marks_of_objs = NULL; |
ysr@777 | 3062 | } |
ysr@777 | 3063 | } |
ysr@777 | 3064 | |
ysr@777 | 3065 | void G1CollectedHeap::push_on_evac_failure_scan_stack(oop obj) { |
ysr@777 | 3066 | _evac_failure_scan_stack->push(obj); |
ysr@777 | 3067 | } |
ysr@777 | 3068 | |
ysr@777 | 3069 | void G1CollectedHeap::drain_evac_failure_scan_stack() { |
ysr@777 | 3070 | assert(_evac_failure_scan_stack != NULL, "precondition"); |
ysr@777 | 3071 | |
ysr@777 | 3072 | while (_evac_failure_scan_stack->length() > 0) { |
ysr@777 | 3073 | oop obj = _evac_failure_scan_stack->pop(); |
ysr@777 | 3074 | _evac_failure_closure->set_region(heap_region_containing(obj)); |
ysr@777 | 3075 | obj->oop_iterate_backwards(_evac_failure_closure); |
ysr@777 | 3076 | } |
ysr@777 | 3077 | } |
ysr@777 | 3078 | |
ysr@777 | 3079 | void G1CollectedHeap::handle_evacuation_failure(oop old) { |
ysr@777 | 3080 | markOop m = old->mark(); |
ysr@777 | 3081 | // forward to self |
ysr@777 | 3082 | assert(!old->is_forwarded(), "precondition"); |
ysr@777 | 3083 | |
ysr@777 | 3084 | old->forward_to(old); |
ysr@777 | 3085 | handle_evacuation_failure_common(old, m); |
ysr@777 | 3086 | } |
ysr@777 | 3087 | |
ysr@777 | 3088 | oop |
ysr@777 | 3089 | G1CollectedHeap::handle_evacuation_failure_par(OopsInHeapRegionClosure* cl, |
ysr@777 | 3090 | oop old) { |
ysr@777 | 3091 | markOop m = old->mark(); |
ysr@777 | 3092 | oop forward_ptr = old->forward_to_atomic(old); |
ysr@777 | 3093 | if (forward_ptr == NULL) { |
ysr@777 | 3094 | // Forward-to-self succeeded. |
ysr@777 | 3095 | if (_evac_failure_closure != cl) { |
ysr@777 | 3096 | MutexLockerEx x(EvacFailureStack_lock, Mutex::_no_safepoint_check_flag); |
ysr@777 | 3097 | assert(!_drain_in_progress, |
ysr@777 | 3098 | "Should only be true while someone holds the lock."); |
ysr@777 | 3099 | // Set the global evac-failure closure to the current thread's. |
ysr@777 | 3100 | assert(_evac_failure_closure == NULL, "Or locking has failed."); |
ysr@777 | 3101 | set_evac_failure_closure(cl); |
ysr@777 | 3102 | // Now do the common part. |
ysr@777 | 3103 | handle_evacuation_failure_common(old, m); |
ysr@777 | 3104 | // Reset to NULL. |
ysr@777 | 3105 | set_evac_failure_closure(NULL); |
ysr@777 | 3106 | } else { |
ysr@777 | 3107 | // The lock is already held, and this is recursive. |
ysr@777 | 3108 | assert(_drain_in_progress, "This should only be the recursive case."); |
ysr@777 | 3109 | handle_evacuation_failure_common(old, m); |
ysr@777 | 3110 | } |
ysr@777 | 3111 | return old; |
ysr@777 | 3112 | } else { |
ysr@777 | 3113 | // Someone else had a place to copy it. |
ysr@777 | 3114 | return forward_ptr; |
ysr@777 | 3115 | } |
ysr@777 | 3116 | } |
ysr@777 | 3117 | |
ysr@777 | 3118 | void G1CollectedHeap::handle_evacuation_failure_common(oop old, markOop m) { |
ysr@777 | 3119 | set_evacuation_failed(true); |
ysr@777 | 3120 | |
ysr@777 | 3121 | preserve_mark_if_necessary(old, m); |
ysr@777 | 3122 | |
ysr@777 | 3123 | HeapRegion* r = heap_region_containing(old); |
ysr@777 | 3124 | if (!r->evacuation_failed()) { |
ysr@777 | 3125 | r->set_evacuation_failed(true); |
ysr@777 | 3126 | if (G1TraceRegions) { |
ysr@777 | 3127 | gclog_or_tty->print("evacuation failed in heap region "PTR_FORMAT" " |
ysr@777 | 3128 | "["PTR_FORMAT","PTR_FORMAT")\n", |
ysr@777 | 3129 | r, r->bottom(), r->end()); |
ysr@777 | 3130 | } |
ysr@777 | 3131 | } |
ysr@777 | 3132 | |
ysr@777 | 3133 | push_on_evac_failure_scan_stack(old); |
ysr@777 | 3134 | |
ysr@777 | 3135 | if (!_drain_in_progress) { |
ysr@777 | 3136 | // prevent recursion in copy_to_survivor_space() |
ysr@777 | 3137 | _drain_in_progress = true; |
ysr@777 | 3138 | drain_evac_failure_scan_stack(); |
ysr@777 | 3139 | _drain_in_progress = false; |
ysr@777 | 3140 | } |
ysr@777 | 3141 | } |
ysr@777 | 3142 | |
ysr@777 | 3143 | void G1CollectedHeap::preserve_mark_if_necessary(oop obj, markOop m) { |
ysr@777 | 3144 | if (m != markOopDesc::prototype()) { |
ysr@777 | 3145 | if (_objs_with_preserved_marks == NULL) { |
ysr@777 | 3146 | assert(_preserved_marks_of_objs == NULL, "Both or none."); |
ysr@777 | 3147 | _objs_with_preserved_marks = |
ysr@777 | 3148 | new (ResourceObj::C_HEAP) GrowableArray<oop>(40, true); |
ysr@777 | 3149 | _preserved_marks_of_objs = |
ysr@777 | 3150 | new (ResourceObj::C_HEAP) GrowableArray<markOop>(40, true); |
ysr@777 | 3151 | } |
ysr@777 | 3152 | _objs_with_preserved_marks->push(obj); |
ysr@777 | 3153 | _preserved_marks_of_objs->push(m); |
ysr@777 | 3154 | } |
ysr@777 | 3155 | } |
ysr@777 | 3156 | |
ysr@777 | 3157 | // *** Parallel G1 Evacuation |
ysr@777 | 3158 | |
ysr@777 | 3159 | HeapWord* G1CollectedHeap::par_allocate_during_gc(GCAllocPurpose purpose, |
ysr@777 | 3160 | size_t word_size) { |
ysr@777 | 3161 | HeapRegion* alloc_region = _gc_alloc_regions[purpose]; |
ysr@777 | 3162 | // let the caller handle alloc failure |
ysr@777 | 3163 | if (alloc_region == NULL) return NULL; |
ysr@777 | 3164 | |
ysr@777 | 3165 | HeapWord* block = alloc_region->par_allocate(word_size); |
ysr@777 | 3166 | if (block == NULL) { |
ysr@777 | 3167 | MutexLockerEx x(par_alloc_during_gc_lock(), |
ysr@777 | 3168 | Mutex::_no_safepoint_check_flag); |
ysr@777 | 3169 | block = allocate_during_gc_slow(purpose, alloc_region, true, word_size); |
ysr@777 | 3170 | } |
ysr@777 | 3171 | return block; |
ysr@777 | 3172 | } |
ysr@777 | 3173 | |
apetrusenko@980 | 3174 | void G1CollectedHeap::retire_alloc_region(HeapRegion* alloc_region, |
apetrusenko@980 | 3175 | bool par) { |
apetrusenko@980 | 3176 | // Another thread might have obtained alloc_region for the given |
apetrusenko@980 | 3177 | // purpose, and might be attempting to allocate in it, and might |
apetrusenko@980 | 3178 | // succeed. Therefore, we can't do the "finalization" stuff on the |
apetrusenko@980 | 3179 | // region below until we're sure the last allocation has happened. |
apetrusenko@980 | 3180 | // We ensure this by allocating the remaining space with a garbage |
apetrusenko@980 | 3181 | // object. |
apetrusenko@980 | 3182 | if (par) par_allocate_remaining_space(alloc_region); |
apetrusenko@980 | 3183 | // Now we can do the post-GC stuff on the region. |
apetrusenko@980 | 3184 | alloc_region->note_end_of_copying(); |
apetrusenko@980 | 3185 | g1_policy()->record_after_bytes(alloc_region->used()); |
apetrusenko@980 | 3186 | } |
apetrusenko@980 | 3187 | |
ysr@777 | 3188 | HeapWord* |
ysr@777 | 3189 | G1CollectedHeap::allocate_during_gc_slow(GCAllocPurpose purpose, |
ysr@777 | 3190 | HeapRegion* alloc_region, |
ysr@777 | 3191 | bool par, |
ysr@777 | 3192 | size_t word_size) { |
ysr@777 | 3193 | HeapWord* block = NULL; |
ysr@777 | 3194 | // In the parallel case, a previous thread to obtain the lock may have |
ysr@777 | 3195 | // already assigned a new gc_alloc_region. |
ysr@777 | 3196 | if (alloc_region != _gc_alloc_regions[purpose]) { |
ysr@777 | 3197 | assert(par, "But should only happen in parallel case."); |
ysr@777 | 3198 | alloc_region = _gc_alloc_regions[purpose]; |
ysr@777 | 3199 | if (alloc_region == NULL) return NULL; |
ysr@777 | 3200 | block = alloc_region->par_allocate(word_size); |
ysr@777 | 3201 | if (block != NULL) return block; |
ysr@777 | 3202 | // Otherwise, continue; this new region is empty, too. |
ysr@777 | 3203 | } |
ysr@777 | 3204 | assert(alloc_region != NULL, "We better have an allocation region"); |
apetrusenko@980 | 3205 | retire_alloc_region(alloc_region, par); |
ysr@777 | 3206 | |
ysr@777 | 3207 | if (_gc_alloc_region_counts[purpose] >= g1_policy()->max_regions(purpose)) { |
ysr@777 | 3208 | // Cannot allocate more regions for the given purpose. |
ysr@777 | 3209 | GCAllocPurpose alt_purpose = g1_policy()->alternative_purpose(purpose); |
ysr@777 | 3210 | // Is there an alternative? |
ysr@777 | 3211 | if (purpose != alt_purpose) { |
ysr@777 | 3212 | HeapRegion* alt_region = _gc_alloc_regions[alt_purpose]; |
ysr@777 | 3213 | // Has not the alternative region been aliased? |
apetrusenko@980 | 3214 | if (alloc_region != alt_region && alt_region != NULL) { |
ysr@777 | 3215 | // Try to allocate in the alternative region. |
ysr@777 | 3216 | if (par) { |
ysr@777 | 3217 | block = alt_region->par_allocate(word_size); |
ysr@777 | 3218 | } else { |
ysr@777 | 3219 | block = alt_region->allocate(word_size); |
ysr@777 | 3220 | } |
ysr@777 | 3221 | // Make an alias. |
ysr@777 | 3222 | _gc_alloc_regions[purpose] = _gc_alloc_regions[alt_purpose]; |
apetrusenko@980 | 3223 | if (block != NULL) { |
apetrusenko@980 | 3224 | return block; |
apetrusenko@980 | 3225 | } |
apetrusenko@980 | 3226 | retire_alloc_region(alt_region, par); |
ysr@777 | 3227 | } |
ysr@777 | 3228 | // Both the allocation region and the alternative one are full |
ysr@777 | 3229 | // and aliased, replace them with a new allocation region. |
ysr@777 | 3230 | purpose = alt_purpose; |
ysr@777 | 3231 | } else { |
ysr@777 | 3232 | set_gc_alloc_region(purpose, NULL); |
ysr@777 | 3233 | return NULL; |
ysr@777 | 3234 | } |
ysr@777 | 3235 | } |
ysr@777 | 3236 | |
ysr@777 | 3237 | // Now allocate a new region for allocation. |
ysr@777 | 3238 | alloc_region = newAllocRegionWithExpansion(purpose, word_size, false /*zero_filled*/); |
ysr@777 | 3239 | |
ysr@777 | 3240 | // let the caller handle alloc failure |
ysr@777 | 3241 | if (alloc_region != NULL) { |
ysr@777 | 3242 | |
ysr@777 | 3243 | assert(check_gc_alloc_regions(), "alloc regions messed up"); |
ysr@777 | 3244 | assert(alloc_region->saved_mark_at_top(), |
ysr@777 | 3245 | "Mark should have been saved already."); |
ysr@777 | 3246 | // We used to assert that the region was zero-filled here, but no |
ysr@777 | 3247 | // longer. |
ysr@777 | 3248 | |
ysr@777 | 3249 | // This must be done last: once it's installed, other regions may |
ysr@777 | 3250 | // allocate in it (without holding the lock.) |
ysr@777 | 3251 | set_gc_alloc_region(purpose, alloc_region); |
ysr@777 | 3252 | |
ysr@777 | 3253 | if (par) { |
ysr@777 | 3254 | block = alloc_region->par_allocate(word_size); |
ysr@777 | 3255 | } else { |
ysr@777 | 3256 | block = alloc_region->allocate(word_size); |
ysr@777 | 3257 | } |
ysr@777 | 3258 | // Caller handles alloc failure. |
ysr@777 | 3259 | } else { |
ysr@777 | 3260 | // This sets other apis using the same old alloc region to NULL, also. |
ysr@777 | 3261 | set_gc_alloc_region(purpose, NULL); |
ysr@777 | 3262 | } |
ysr@777 | 3263 | return block; // May be NULL. |
ysr@777 | 3264 | } |
ysr@777 | 3265 | |
ysr@777 | 3266 | void G1CollectedHeap::par_allocate_remaining_space(HeapRegion* r) { |
ysr@777 | 3267 | HeapWord* block = NULL; |
ysr@777 | 3268 | size_t free_words; |
ysr@777 | 3269 | do { |
ysr@777 | 3270 | free_words = r->free()/HeapWordSize; |
ysr@777 | 3271 | // If there's too little space, no one can allocate, so we're done. |
ysr@777 | 3272 | if (free_words < (size_t)oopDesc::header_size()) return; |
ysr@777 | 3273 | // Otherwise, try to claim it. |
ysr@777 | 3274 | block = r->par_allocate(free_words); |
ysr@777 | 3275 | } while (block == NULL); |
jcoomes@916 | 3276 | fill_with_object(block, free_words); |
ysr@777 | 3277 | } |
ysr@777 | 3278 | |
ysr@777 | 3279 | #define use_local_bitmaps 1 |
ysr@777 | 3280 | #define verify_local_bitmaps 0 |
ysr@777 | 3281 | |
ysr@777 | 3282 | #ifndef PRODUCT |
ysr@777 | 3283 | |
ysr@777 | 3284 | class GCLabBitMap; |
ysr@777 | 3285 | class GCLabBitMapClosure: public BitMapClosure { |
ysr@777 | 3286 | private: |
ysr@777 | 3287 | ConcurrentMark* _cm; |
ysr@777 | 3288 | GCLabBitMap* _bitmap; |
ysr@777 | 3289 | |
ysr@777 | 3290 | public: |
ysr@777 | 3291 | GCLabBitMapClosure(ConcurrentMark* cm, |
ysr@777 | 3292 | GCLabBitMap* bitmap) { |
ysr@777 | 3293 | _cm = cm; |
ysr@777 | 3294 | _bitmap = bitmap; |
ysr@777 | 3295 | } |
ysr@777 | 3296 | |
ysr@777 | 3297 | virtual bool do_bit(size_t offset); |
ysr@777 | 3298 | }; |
ysr@777 | 3299 | |
ysr@777 | 3300 | #endif // PRODUCT |
ysr@777 | 3301 | |
ysr@777 | 3302 | #define oop_buffer_length 256 |
ysr@777 | 3303 | |
ysr@777 | 3304 | class GCLabBitMap: public BitMap { |
ysr@777 | 3305 | private: |
ysr@777 | 3306 | ConcurrentMark* _cm; |
ysr@777 | 3307 | |
ysr@777 | 3308 | int _shifter; |
ysr@777 | 3309 | size_t _bitmap_word_covers_words; |
ysr@777 | 3310 | |
ysr@777 | 3311 | // beginning of the heap |
ysr@777 | 3312 | HeapWord* _heap_start; |
ysr@777 | 3313 | |
ysr@777 | 3314 | // this is the actual start of the GCLab |
ysr@777 | 3315 | HeapWord* _real_start_word; |
ysr@777 | 3316 | |
ysr@777 | 3317 | // this is the actual end of the GCLab |
ysr@777 | 3318 | HeapWord* _real_end_word; |
ysr@777 | 3319 | |
ysr@777 | 3320 | // this is the first word, possibly located before the actual start |
ysr@777 | 3321 | // of the GCLab, that corresponds to the first bit of the bitmap |
ysr@777 | 3322 | HeapWord* _start_word; |
ysr@777 | 3323 | |
ysr@777 | 3324 | // size of a GCLab in words |
ysr@777 | 3325 | size_t _gclab_word_size; |
ysr@777 | 3326 | |
ysr@777 | 3327 | static int shifter() { |
ysr@777 | 3328 | return MinObjAlignment - 1; |
ysr@777 | 3329 | } |
ysr@777 | 3330 | |
ysr@777 | 3331 | // how many heap words does a single bitmap word corresponds to? |
ysr@777 | 3332 | static size_t bitmap_word_covers_words() { |
ysr@777 | 3333 | return BitsPerWord << shifter(); |
ysr@777 | 3334 | } |
ysr@777 | 3335 | |
ysr@777 | 3336 | static size_t gclab_word_size() { |
ysr@777 | 3337 | return ParallelGCG1AllocBufferSize / HeapWordSize; |
ysr@777 | 3338 | } |
ysr@777 | 3339 | |
ysr@777 | 3340 | static size_t bitmap_size_in_bits() { |
ysr@777 | 3341 | size_t bits_in_bitmap = gclab_word_size() >> shifter(); |
ysr@777 | 3342 | // We are going to ensure that the beginning of a word in this |
ysr@777 | 3343 | // bitmap also corresponds to the beginning of a word in the |
ysr@777 | 3344 | // global marking bitmap. To handle the case where a GCLab |
ysr@777 | 3345 | // starts from the middle of the bitmap, we need to add enough |
ysr@777 | 3346 | // space (i.e. up to a bitmap word) to ensure that we have |
ysr@777 | 3347 | // enough bits in the bitmap. |
ysr@777 | 3348 | return bits_in_bitmap + BitsPerWord - 1; |
ysr@777 | 3349 | } |
ysr@777 | 3350 | public: |
ysr@777 | 3351 | GCLabBitMap(HeapWord* heap_start) |
ysr@777 | 3352 | : BitMap(bitmap_size_in_bits()), |
ysr@777 | 3353 | _cm(G1CollectedHeap::heap()->concurrent_mark()), |
ysr@777 | 3354 | _shifter(shifter()), |
ysr@777 | 3355 | _bitmap_word_covers_words(bitmap_word_covers_words()), |
ysr@777 | 3356 | _heap_start(heap_start), |
ysr@777 | 3357 | _gclab_word_size(gclab_word_size()), |
ysr@777 | 3358 | _real_start_word(NULL), |
ysr@777 | 3359 | _real_end_word(NULL), |
ysr@777 | 3360 | _start_word(NULL) |
ysr@777 | 3361 | { |
ysr@777 | 3362 | guarantee( size_in_words() >= bitmap_size_in_words(), |
ysr@777 | 3363 | "just making sure"); |
ysr@777 | 3364 | } |
ysr@777 | 3365 | |
ysr@777 | 3366 | inline unsigned heapWordToOffset(HeapWord* addr) { |
ysr@777 | 3367 | unsigned offset = (unsigned) pointer_delta(addr, _start_word) >> _shifter; |
ysr@777 | 3368 | assert(offset < size(), "offset should be within bounds"); |
ysr@777 | 3369 | return offset; |
ysr@777 | 3370 | } |
ysr@777 | 3371 | |
ysr@777 | 3372 | inline HeapWord* offsetToHeapWord(size_t offset) { |
ysr@777 | 3373 | HeapWord* addr = _start_word + (offset << _shifter); |
ysr@777 | 3374 | assert(_real_start_word <= addr && addr < _real_end_word, "invariant"); |
ysr@777 | 3375 | return addr; |
ysr@777 | 3376 | } |
ysr@777 | 3377 | |
ysr@777 | 3378 | bool fields_well_formed() { |
ysr@777 | 3379 | bool ret1 = (_real_start_word == NULL) && |
ysr@777 | 3380 | (_real_end_word == NULL) && |
ysr@777 | 3381 | (_start_word == NULL); |
ysr@777 | 3382 | if (ret1) |
ysr@777 | 3383 | return true; |
ysr@777 | 3384 | |
ysr@777 | 3385 | bool ret2 = _real_start_word >= _start_word && |
ysr@777 | 3386 | _start_word < _real_end_word && |
ysr@777 | 3387 | (_real_start_word + _gclab_word_size) == _real_end_word && |
ysr@777 | 3388 | (_start_word + _gclab_word_size + _bitmap_word_covers_words) |
ysr@777 | 3389 | > _real_end_word; |
ysr@777 | 3390 | return ret2; |
ysr@777 | 3391 | } |
ysr@777 | 3392 | |
ysr@777 | 3393 | inline bool mark(HeapWord* addr) { |
ysr@777 | 3394 | guarantee(use_local_bitmaps, "invariant"); |
ysr@777 | 3395 | assert(fields_well_formed(), "invariant"); |
ysr@777 | 3396 | |
ysr@777 | 3397 | if (addr >= _real_start_word && addr < _real_end_word) { |
ysr@777 | 3398 | assert(!isMarked(addr), "should not have already been marked"); |
ysr@777 | 3399 | |
ysr@777 | 3400 | // first mark it on the bitmap |
ysr@777 | 3401 | at_put(heapWordToOffset(addr), true); |
ysr@777 | 3402 | |
ysr@777 | 3403 | return true; |
ysr@777 | 3404 | } else { |
ysr@777 | 3405 | return false; |
ysr@777 | 3406 | } |
ysr@777 | 3407 | } |
ysr@777 | 3408 | |
ysr@777 | 3409 | inline bool isMarked(HeapWord* addr) { |
ysr@777 | 3410 | guarantee(use_local_bitmaps, "invariant"); |
ysr@777 | 3411 | assert(fields_well_formed(), "invariant"); |
ysr@777 | 3412 | |
ysr@777 | 3413 | return at(heapWordToOffset(addr)); |
ysr@777 | 3414 | } |
ysr@777 | 3415 | |
ysr@777 | 3416 | void set_buffer(HeapWord* start) { |
ysr@777 | 3417 | guarantee(use_local_bitmaps, "invariant"); |
ysr@777 | 3418 | clear(); |
ysr@777 | 3419 | |
ysr@777 | 3420 | assert(start != NULL, "invariant"); |
ysr@777 | 3421 | _real_start_word = start; |
ysr@777 | 3422 | _real_end_word = start + _gclab_word_size; |
ysr@777 | 3423 | |
ysr@777 | 3424 | size_t diff = |
ysr@777 | 3425 | pointer_delta(start, _heap_start) % _bitmap_word_covers_words; |
ysr@777 | 3426 | _start_word = start - diff; |
ysr@777 | 3427 | |
ysr@777 | 3428 | assert(fields_well_formed(), "invariant"); |
ysr@777 | 3429 | } |
ysr@777 | 3430 | |
ysr@777 | 3431 | #ifndef PRODUCT |
ysr@777 | 3432 | void verify() { |
ysr@777 | 3433 | // verify that the marks have been propagated |
ysr@777 | 3434 | GCLabBitMapClosure cl(_cm, this); |
ysr@777 | 3435 | iterate(&cl); |
ysr@777 | 3436 | } |
ysr@777 | 3437 | #endif // PRODUCT |
ysr@777 | 3438 | |
ysr@777 | 3439 | void retire() { |
ysr@777 | 3440 | guarantee(use_local_bitmaps, "invariant"); |
ysr@777 | 3441 | assert(fields_well_formed(), "invariant"); |
ysr@777 | 3442 | |
ysr@777 | 3443 | if (_start_word != NULL) { |
ysr@777 | 3444 | CMBitMap* mark_bitmap = _cm->nextMarkBitMap(); |
ysr@777 | 3445 | |
ysr@777 | 3446 | // this means that the bitmap was set up for the GCLab |
ysr@777 | 3447 | assert(_real_start_word != NULL && _real_end_word != NULL, "invariant"); |
ysr@777 | 3448 | |
ysr@777 | 3449 | mark_bitmap->mostly_disjoint_range_union(this, |
ysr@777 | 3450 | 0, // always start from the start of the bitmap |
ysr@777 | 3451 | _start_word, |
ysr@777 | 3452 | size_in_words()); |
ysr@777 | 3453 | _cm->grayRegionIfNecessary(MemRegion(_real_start_word, _real_end_word)); |
ysr@777 | 3454 | |
ysr@777 | 3455 | #ifndef PRODUCT |
ysr@777 | 3456 | if (use_local_bitmaps && verify_local_bitmaps) |
ysr@777 | 3457 | verify(); |
ysr@777 | 3458 | #endif // PRODUCT |
ysr@777 | 3459 | } else { |
ysr@777 | 3460 | assert(_real_start_word == NULL && _real_end_word == NULL, "invariant"); |
ysr@777 | 3461 | } |
ysr@777 | 3462 | } |
ysr@777 | 3463 | |
ysr@777 | 3464 | static size_t bitmap_size_in_words() { |
ysr@777 | 3465 | return (bitmap_size_in_bits() + BitsPerWord - 1) / BitsPerWord; |
ysr@777 | 3466 | } |
ysr@777 | 3467 | }; |
ysr@777 | 3468 | |
ysr@777 | 3469 | #ifndef PRODUCT |
ysr@777 | 3470 | |
ysr@777 | 3471 | bool GCLabBitMapClosure::do_bit(size_t offset) { |
ysr@777 | 3472 | HeapWord* addr = _bitmap->offsetToHeapWord(offset); |
ysr@777 | 3473 | guarantee(_cm->isMarked(oop(addr)), "it should be!"); |
ysr@777 | 3474 | return true; |
ysr@777 | 3475 | } |
ysr@777 | 3476 | |
ysr@777 | 3477 | #endif // PRODUCT |
ysr@777 | 3478 | |
ysr@777 | 3479 | class G1ParGCAllocBuffer: public ParGCAllocBuffer { |
ysr@777 | 3480 | private: |
ysr@777 | 3481 | bool _retired; |
ysr@777 | 3482 | bool _during_marking; |
ysr@777 | 3483 | GCLabBitMap _bitmap; |
ysr@777 | 3484 | |
ysr@777 | 3485 | public: |
ysr@777 | 3486 | G1ParGCAllocBuffer() : |
ysr@777 | 3487 | ParGCAllocBuffer(ParallelGCG1AllocBufferSize / HeapWordSize), |
ysr@777 | 3488 | _during_marking(G1CollectedHeap::heap()->mark_in_progress()), |
ysr@777 | 3489 | _bitmap(G1CollectedHeap::heap()->reserved_region().start()), |
ysr@777 | 3490 | _retired(false) |
ysr@777 | 3491 | { } |
ysr@777 | 3492 | |
ysr@777 | 3493 | inline bool mark(HeapWord* addr) { |
ysr@777 | 3494 | guarantee(use_local_bitmaps, "invariant"); |
ysr@777 | 3495 | assert(_during_marking, "invariant"); |
ysr@777 | 3496 | return _bitmap.mark(addr); |
ysr@777 | 3497 | } |
ysr@777 | 3498 | |
ysr@777 | 3499 | inline void set_buf(HeapWord* buf) { |
ysr@777 | 3500 | if (use_local_bitmaps && _during_marking) |
ysr@777 | 3501 | _bitmap.set_buffer(buf); |
ysr@777 | 3502 | ParGCAllocBuffer::set_buf(buf); |
ysr@777 | 3503 | _retired = false; |
ysr@777 | 3504 | } |
ysr@777 | 3505 | |
ysr@777 | 3506 | inline void retire(bool end_of_gc, bool retain) { |
ysr@777 | 3507 | if (_retired) |
ysr@777 | 3508 | return; |
ysr@777 | 3509 | if (use_local_bitmaps && _during_marking) { |
ysr@777 | 3510 | _bitmap.retire(); |
ysr@777 | 3511 | } |
ysr@777 | 3512 | ParGCAllocBuffer::retire(end_of_gc, retain); |
ysr@777 | 3513 | _retired = true; |
ysr@777 | 3514 | } |
ysr@777 | 3515 | }; |
ysr@777 | 3516 | |
ysr@777 | 3517 | |
ysr@777 | 3518 | class G1ParScanThreadState : public StackObj { |
ysr@777 | 3519 | protected: |
ysr@777 | 3520 | G1CollectedHeap* _g1h; |
ysr@777 | 3521 | RefToScanQueue* _refs; |
ysr@777 | 3522 | |
ysr@777 | 3523 | typedef GrowableArray<oop*> OverflowQueue; |
ysr@777 | 3524 | OverflowQueue* _overflowed_refs; |
ysr@777 | 3525 | |
ysr@777 | 3526 | G1ParGCAllocBuffer _alloc_buffers[GCAllocPurposeCount]; |
apetrusenko@980 | 3527 | ageTable _age_table; |
ysr@777 | 3528 | |
ysr@777 | 3529 | size_t _alloc_buffer_waste; |
ysr@777 | 3530 | size_t _undo_waste; |
ysr@777 | 3531 | |
ysr@777 | 3532 | OopsInHeapRegionClosure* _evac_failure_cl; |
ysr@777 | 3533 | G1ParScanHeapEvacClosure* _evac_cl; |
ysr@777 | 3534 | G1ParScanPartialArrayClosure* _partial_scan_cl; |
ysr@777 | 3535 | |
ysr@777 | 3536 | int _hash_seed; |
ysr@777 | 3537 | int _queue_num; |
ysr@777 | 3538 | |
ysr@777 | 3539 | int _term_attempts; |
ysr@777 | 3540 | #if G1_DETAILED_STATS |
ysr@777 | 3541 | int _pushes, _pops, _steals, _steal_attempts; |
ysr@777 | 3542 | int _overflow_pushes; |
ysr@777 | 3543 | #endif |
ysr@777 | 3544 | |
ysr@777 | 3545 | double _start; |
ysr@777 | 3546 | double _start_strong_roots; |
ysr@777 | 3547 | double _strong_roots_time; |
ysr@777 | 3548 | double _start_term; |
ysr@777 | 3549 | double _term_time; |
ysr@777 | 3550 | |
ysr@777 | 3551 | // Map from young-age-index (0 == not young, 1 is youngest) to |
ysr@777 | 3552 | // surviving words. base is what we get back from the malloc call |
ysr@777 | 3553 | size_t* _surviving_young_words_base; |
ysr@777 | 3554 | // this points into the array, as we use the first few entries for padding |
ysr@777 | 3555 | size_t* _surviving_young_words; |
ysr@777 | 3556 | |
ysr@777 | 3557 | #define PADDING_ELEM_NUM (64 / sizeof(size_t)) |
ysr@777 | 3558 | |
ysr@777 | 3559 | void add_to_alloc_buffer_waste(size_t waste) { _alloc_buffer_waste += waste; } |
ysr@777 | 3560 | |
ysr@777 | 3561 | void add_to_undo_waste(size_t waste) { _undo_waste += waste; } |
ysr@777 | 3562 | |
ysr@777 | 3563 | public: |
ysr@777 | 3564 | G1ParScanThreadState(G1CollectedHeap* g1h, int queue_num) |
ysr@777 | 3565 | : _g1h(g1h), |
ysr@777 | 3566 | _refs(g1h->task_queue(queue_num)), |
ysr@777 | 3567 | _hash_seed(17), _queue_num(queue_num), |
ysr@777 | 3568 | _term_attempts(0), |
apetrusenko@980 | 3569 | _age_table(false), |
ysr@777 | 3570 | #if G1_DETAILED_STATS |
ysr@777 | 3571 | _pushes(0), _pops(0), _steals(0), |
ysr@777 | 3572 | _steal_attempts(0), _overflow_pushes(0), |
ysr@777 | 3573 | #endif |
ysr@777 | 3574 | _strong_roots_time(0), _term_time(0), |
ysr@777 | 3575 | _alloc_buffer_waste(0), _undo_waste(0) |
ysr@777 | 3576 | { |
ysr@777 | 3577 | // we allocate G1YoungSurvRateNumRegions plus one entries, since |
ysr@777 | 3578 | // we "sacrifice" entry 0 to keep track of surviving bytes for |
ysr@777 | 3579 | // non-young regions (where the age is -1) |
ysr@777 | 3580 | // We also add a few elements at the beginning and at the end in |
ysr@777 | 3581 | // an attempt to eliminate cache contention |
ysr@777 | 3582 | size_t real_length = 1 + _g1h->g1_policy()->young_cset_length(); |
ysr@777 | 3583 | size_t array_length = PADDING_ELEM_NUM + |
ysr@777 | 3584 | real_length + |
ysr@777 | 3585 | PADDING_ELEM_NUM; |
ysr@777 | 3586 | _surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length); |
ysr@777 | 3587 | if (_surviving_young_words_base == NULL) |
ysr@777 | 3588 | vm_exit_out_of_memory(array_length * sizeof(size_t), |
ysr@777 | 3589 | "Not enough space for young surv histo."); |
ysr@777 | 3590 | _surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM; |
ysr@777 | 3591 | memset(_surviving_young_words, 0, real_length * sizeof(size_t)); |
ysr@777 | 3592 | |
ysr@777 | 3593 | _overflowed_refs = new OverflowQueue(10); |
ysr@777 | 3594 | |
ysr@777 | 3595 | _start = os::elapsedTime(); |
ysr@777 | 3596 | } |
ysr@777 | 3597 | |
ysr@777 | 3598 | ~G1ParScanThreadState() { |
ysr@777 | 3599 | FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base); |
ysr@777 | 3600 | } |
ysr@777 | 3601 | |
ysr@777 | 3602 | RefToScanQueue* refs() { return _refs; } |
ysr@777 | 3603 | OverflowQueue* overflowed_refs() { return _overflowed_refs; } |
apetrusenko@980 | 3604 | ageTable* age_table() { return &_age_table; } |
apetrusenko@980 | 3605 | |
apetrusenko@980 | 3606 | G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose) { |
ysr@777 | 3607 | return &_alloc_buffers[purpose]; |
ysr@777 | 3608 | } |
ysr@777 | 3609 | |
ysr@777 | 3610 | size_t alloc_buffer_waste() { return _alloc_buffer_waste; } |
ysr@777 | 3611 | size_t undo_waste() { return _undo_waste; } |
ysr@777 | 3612 | |
ysr@777 | 3613 | void push_on_queue(oop* ref) { |
tonyp@961 | 3614 | assert(ref != NULL, "invariant"); |
tonyp@961 | 3615 | assert(has_partial_array_mask(ref) || _g1h->obj_in_cs(*ref), "invariant"); |
tonyp@961 | 3616 | |
ysr@777 | 3617 | if (!refs()->push(ref)) { |
ysr@777 | 3618 | overflowed_refs()->push(ref); |
ysr@777 | 3619 | IF_G1_DETAILED_STATS(note_overflow_push()); |
ysr@777 | 3620 | } else { |
ysr@777 | 3621 | IF_G1_DETAILED_STATS(note_push()); |
ysr@777 | 3622 | } |
ysr@777 | 3623 | } |
ysr@777 | 3624 | |
ysr@777 | 3625 | void pop_from_queue(oop*& ref) { |
ysr@777 | 3626 | if (!refs()->pop_local(ref)) { |
ysr@777 | 3627 | ref = NULL; |
ysr@777 | 3628 | } else { |
tonyp@961 | 3629 | assert(ref != NULL, "invariant"); |
tonyp@961 | 3630 | assert(has_partial_array_mask(ref) || _g1h->obj_in_cs(*ref), |
tonyp@961 | 3631 | "invariant"); |
tonyp@961 | 3632 | |
ysr@777 | 3633 | IF_G1_DETAILED_STATS(note_pop()); |
ysr@777 | 3634 | } |
ysr@777 | 3635 | } |
ysr@777 | 3636 | |
ysr@777 | 3637 | void pop_from_overflow_queue(oop*& ref) { |
ysr@777 | 3638 | ref = overflowed_refs()->pop(); |
ysr@777 | 3639 | } |
ysr@777 | 3640 | |
ysr@777 | 3641 | int refs_to_scan() { return refs()->size(); } |
ysr@777 | 3642 | int overflowed_refs_to_scan() { return overflowed_refs()->length(); } |
ysr@777 | 3643 | |
ysr@777 | 3644 | HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz) { |
ysr@777 | 3645 | |
ysr@777 | 3646 | HeapWord* obj = NULL; |
ysr@777 | 3647 | if (word_sz * 100 < |
ysr@777 | 3648 | (size_t)(ParallelGCG1AllocBufferSize / HeapWordSize) * |
ysr@777 | 3649 | ParallelGCBufferWastePct) { |
ysr@777 | 3650 | G1ParGCAllocBuffer* alloc_buf = alloc_buffer(purpose); |
ysr@777 | 3651 | add_to_alloc_buffer_waste(alloc_buf->words_remaining()); |
ysr@777 | 3652 | alloc_buf->retire(false, false); |
ysr@777 | 3653 | |
ysr@777 | 3654 | HeapWord* buf = |
ysr@777 | 3655 | _g1h->par_allocate_during_gc(purpose, ParallelGCG1AllocBufferSize / HeapWordSize); |
ysr@777 | 3656 | if (buf == NULL) return NULL; // Let caller handle allocation failure. |
ysr@777 | 3657 | // Otherwise. |
ysr@777 | 3658 | alloc_buf->set_buf(buf); |
ysr@777 | 3659 | |
ysr@777 | 3660 | obj = alloc_buf->allocate(word_sz); |
ysr@777 | 3661 | assert(obj != NULL, "buffer was definitely big enough..."); |
tonyp@961 | 3662 | } else { |
ysr@777 | 3663 | obj = _g1h->par_allocate_during_gc(purpose, word_sz); |
ysr@777 | 3664 | } |
ysr@777 | 3665 | return obj; |
ysr@777 | 3666 | } |
ysr@777 | 3667 | |
ysr@777 | 3668 | HeapWord* allocate(GCAllocPurpose purpose, size_t word_sz) { |
ysr@777 | 3669 | HeapWord* obj = alloc_buffer(purpose)->allocate(word_sz); |
ysr@777 | 3670 | if (obj != NULL) return obj; |
ysr@777 | 3671 | return allocate_slow(purpose, word_sz); |
ysr@777 | 3672 | } |
ysr@777 | 3673 | |
ysr@777 | 3674 | void undo_allocation(GCAllocPurpose purpose, HeapWord* obj, size_t word_sz) { |
ysr@777 | 3675 | if (alloc_buffer(purpose)->contains(obj)) { |
ysr@777 | 3676 | guarantee(alloc_buffer(purpose)->contains(obj + word_sz - 1), |
ysr@777 | 3677 | "should contain whole object"); |
ysr@777 | 3678 | alloc_buffer(purpose)->undo_allocation(obj, word_sz); |
jcoomes@916 | 3679 | } else { |
jcoomes@916 | 3680 | CollectedHeap::fill_with_object(obj, word_sz); |
ysr@777 | 3681 | add_to_undo_waste(word_sz); |
ysr@777 | 3682 | } |
ysr@777 | 3683 | } |
ysr@777 | 3684 | |
ysr@777 | 3685 | void set_evac_failure_closure(OopsInHeapRegionClosure* evac_failure_cl) { |
ysr@777 | 3686 | _evac_failure_cl = evac_failure_cl; |
ysr@777 | 3687 | } |
ysr@777 | 3688 | OopsInHeapRegionClosure* evac_failure_closure() { |
ysr@777 | 3689 | return _evac_failure_cl; |
ysr@777 | 3690 | } |
ysr@777 | 3691 | |
ysr@777 | 3692 | void set_evac_closure(G1ParScanHeapEvacClosure* evac_cl) { |
ysr@777 | 3693 | _evac_cl = evac_cl; |
ysr@777 | 3694 | } |
ysr@777 | 3695 | |
ysr@777 | 3696 | void set_partial_scan_closure(G1ParScanPartialArrayClosure* partial_scan_cl) { |
ysr@777 | 3697 | _partial_scan_cl = partial_scan_cl; |
ysr@777 | 3698 | } |
ysr@777 | 3699 | |
ysr@777 | 3700 | int* hash_seed() { return &_hash_seed; } |
ysr@777 | 3701 | int queue_num() { return _queue_num; } |
ysr@777 | 3702 | |
ysr@777 | 3703 | int term_attempts() { return _term_attempts; } |
ysr@777 | 3704 | void note_term_attempt() { _term_attempts++; } |
ysr@777 | 3705 | |
ysr@777 | 3706 | #if G1_DETAILED_STATS |
ysr@777 | 3707 | int pushes() { return _pushes; } |
ysr@777 | 3708 | int pops() { return _pops; } |
ysr@777 | 3709 | int steals() { return _steals; } |
ysr@777 | 3710 | int steal_attempts() { return _steal_attempts; } |
ysr@777 | 3711 | int overflow_pushes() { return _overflow_pushes; } |
ysr@777 | 3712 | |
ysr@777 | 3713 | void note_push() { _pushes++; } |
ysr@777 | 3714 | void note_pop() { _pops++; } |
ysr@777 | 3715 | void note_steal() { _steals++; } |
ysr@777 | 3716 | void note_steal_attempt() { _steal_attempts++; } |
ysr@777 | 3717 | void note_overflow_push() { _overflow_pushes++; } |
ysr@777 | 3718 | #endif |
ysr@777 | 3719 | |
ysr@777 | 3720 | void start_strong_roots() { |
ysr@777 | 3721 | _start_strong_roots = os::elapsedTime(); |
ysr@777 | 3722 | } |
ysr@777 | 3723 | void end_strong_roots() { |
ysr@777 | 3724 | _strong_roots_time += (os::elapsedTime() - _start_strong_roots); |
ysr@777 | 3725 | } |
ysr@777 | 3726 | double strong_roots_time() { return _strong_roots_time; } |
ysr@777 | 3727 | |
ysr@777 | 3728 | void start_term_time() { |
ysr@777 | 3729 | note_term_attempt(); |
ysr@777 | 3730 | _start_term = os::elapsedTime(); |
ysr@777 | 3731 | } |
ysr@777 | 3732 | void end_term_time() { |
ysr@777 | 3733 | _term_time += (os::elapsedTime() - _start_term); |
ysr@777 | 3734 | } |
ysr@777 | 3735 | double term_time() { return _term_time; } |
ysr@777 | 3736 | |
ysr@777 | 3737 | double elapsed() { |
ysr@777 | 3738 | return os::elapsedTime() - _start; |
ysr@777 | 3739 | } |
ysr@777 | 3740 | |
ysr@777 | 3741 | size_t* surviving_young_words() { |
ysr@777 | 3742 | // We add on to hide entry 0 which accumulates surviving words for |
ysr@777 | 3743 | // age -1 regions (i.e. non-young ones) |
ysr@777 | 3744 | return _surviving_young_words; |
ysr@777 | 3745 | } |
ysr@777 | 3746 | |
ysr@777 | 3747 | void retire_alloc_buffers() { |
ysr@777 | 3748 | for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { |
ysr@777 | 3749 | size_t waste = _alloc_buffers[ap].words_remaining(); |
ysr@777 | 3750 | add_to_alloc_buffer_waste(waste); |
ysr@777 | 3751 | _alloc_buffers[ap].retire(true, false); |
ysr@777 | 3752 | } |
ysr@777 | 3753 | } |
ysr@777 | 3754 | |
tonyp@961 | 3755 | private: |
tonyp@961 | 3756 | void deal_with_reference(oop* ref_to_scan) { |
tonyp@961 | 3757 | if (has_partial_array_mask(ref_to_scan)) { |
tonyp@961 | 3758 | _partial_scan_cl->do_oop_nv(ref_to_scan); |
tonyp@961 | 3759 | } else { |
tonyp@961 | 3760 | // Note: we can use "raw" versions of "region_containing" because |
tonyp@961 | 3761 | // "obj_to_scan" is definitely in the heap, and is not in a |
tonyp@961 | 3762 | // humongous region. |
tonyp@961 | 3763 | HeapRegion* r = _g1h->heap_region_containing_raw(ref_to_scan); |
tonyp@961 | 3764 | _evac_cl->set_region(r); |
tonyp@961 | 3765 | _evac_cl->do_oop_nv(ref_to_scan); |
tonyp@961 | 3766 | } |
tonyp@961 | 3767 | } |
tonyp@961 | 3768 | |
tonyp@961 | 3769 | public: |
ysr@777 | 3770 | void trim_queue() { |
tonyp@961 | 3771 | // I've replicated the loop twice, first to drain the overflow |
tonyp@961 | 3772 | // queue, second to drain the task queue. This is better than |
tonyp@961 | 3773 | // having a single loop, which checks both conditions and, inside |
tonyp@961 | 3774 | // it, either pops the overflow queue or the task queue, as each |
tonyp@961 | 3775 | // loop is tighter. Also, the decision to drain the overflow queue |
tonyp@961 | 3776 | // first is not arbitrary, as the overflow queue is not visible |
tonyp@961 | 3777 | // to the other workers, whereas the task queue is. So, we want to |
tonyp@961 | 3778 | // drain the "invisible" entries first, while allowing the other |
tonyp@961 | 3779 | // workers to potentially steal the "visible" entries. |
tonyp@961 | 3780 | |
ysr@777 | 3781 | while (refs_to_scan() > 0 || overflowed_refs_to_scan() > 0) { |
tonyp@961 | 3782 | while (overflowed_refs_to_scan() > 0) { |
tonyp@961 | 3783 | oop *ref_to_scan = NULL; |
tonyp@961 | 3784 | pop_from_overflow_queue(ref_to_scan); |
tonyp@961 | 3785 | assert(ref_to_scan != NULL, "invariant"); |
tonyp@961 | 3786 | // We shouldn't have pushed it on the queue if it was not |
tonyp@961 | 3787 | // pointing into the CSet. |
tonyp@961 | 3788 | assert(ref_to_scan != NULL, "sanity"); |
tonyp@961 | 3789 | assert(has_partial_array_mask(ref_to_scan) || |
tonyp@961 | 3790 | _g1h->obj_in_cs(*ref_to_scan), "sanity"); |
tonyp@961 | 3791 | |
tonyp@961 | 3792 | deal_with_reference(ref_to_scan); |
tonyp@961 | 3793 | } |
tonyp@961 | 3794 | |
tonyp@961 | 3795 | while (refs_to_scan() > 0) { |
tonyp@961 | 3796 | oop *ref_to_scan = NULL; |
ysr@777 | 3797 | pop_from_queue(ref_to_scan); |
tonyp@961 | 3798 | |
tonyp@961 | 3799 | if (ref_to_scan != NULL) { |
tonyp@961 | 3800 | // We shouldn't have pushed it on the queue if it was not |
tonyp@961 | 3801 | // pointing into the CSet. |
tonyp@961 | 3802 | assert(has_partial_array_mask(ref_to_scan) || |
tonyp@961 | 3803 | _g1h->obj_in_cs(*ref_to_scan), "sanity"); |
tonyp@961 | 3804 | |
tonyp@961 | 3805 | deal_with_reference(ref_to_scan); |
ysr@777 | 3806 | } |
ysr@777 | 3807 | } |
ysr@777 | 3808 | } |
ysr@777 | 3809 | } |
ysr@777 | 3810 | }; |
ysr@777 | 3811 | |
ysr@777 | 3812 | |
ysr@777 | 3813 | G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) : |
ysr@777 | 3814 | _g1(g1), _g1_rem(_g1->g1_rem_set()), _cm(_g1->concurrent_mark()), |
ysr@777 | 3815 | _par_scan_state(par_scan_state) { } |
ysr@777 | 3816 | |
ysr@777 | 3817 | // This closure is applied to the fields of the objects that have just been copied. |
ysr@777 | 3818 | // Should probably be made inline and moved in g1OopClosures.inline.hpp. |
ysr@777 | 3819 | void G1ParScanClosure::do_oop_nv(oop* p) { |
ysr@777 | 3820 | oop obj = *p; |
tonyp@961 | 3821 | |
ysr@777 | 3822 | if (obj != NULL) { |
tonyp@961 | 3823 | if (_g1->in_cset_fast_test(obj)) { |
tonyp@961 | 3824 | // We're not going to even bother checking whether the object is |
tonyp@961 | 3825 | // already forwarded or not, as this usually causes an immediate |
tonyp@961 | 3826 | // stall. We'll try to prefetch the object (for write, given that |
tonyp@961 | 3827 | // we might need to install the forwarding reference) and we'll |
tonyp@961 | 3828 | // get back to it when pop it from the queue |
tonyp@961 | 3829 | Prefetch::write(obj->mark_addr(), 0); |
tonyp@961 | 3830 | Prefetch::read(obj->mark_addr(), (HeapWordSize*2)); |
tonyp@961 | 3831 | |
tonyp@961 | 3832 | // slightly paranoid test; I'm trying to catch potential |
tonyp@961 | 3833 | // problems before we go into push_on_queue to know where the |
tonyp@961 | 3834 | // problem is coming from |
tonyp@961 | 3835 | assert(obj == *p, "the value of *p should not have changed"); |
tonyp@961 | 3836 | _par_scan_state->push_on_queue(p); |
tonyp@961 | 3837 | } else { |
tonyp@961 | 3838 | _g1_rem->par_write_ref(_from, p, _par_scan_state->queue_num()); |
ysr@777 | 3839 | } |
ysr@777 | 3840 | } |
ysr@777 | 3841 | } |
ysr@777 | 3842 | |
ysr@777 | 3843 | void G1ParCopyHelper::mark_forwardee(oop* p) { |
ysr@777 | 3844 | // This is called _after_ do_oop_work has been called, hence after |
ysr@777 | 3845 | // the object has been relocated to its new location and *p points |
ysr@777 | 3846 | // to its new location. |
ysr@777 | 3847 | |
ysr@777 | 3848 | oop thisOop = *p; |
ysr@777 | 3849 | if (thisOop != NULL) { |
ysr@777 | 3850 | assert((_g1->evacuation_failed()) || (!_g1->obj_in_cs(thisOop)), |
ysr@777 | 3851 | "shouldn't still be in the CSet if evacuation didn't fail."); |
ysr@777 | 3852 | HeapWord* addr = (HeapWord*)thisOop; |
ysr@777 | 3853 | if (_g1->is_in_g1_reserved(addr)) |
ysr@777 | 3854 | _cm->grayRoot(oop(addr)); |
ysr@777 | 3855 | } |
ysr@777 | 3856 | } |
ysr@777 | 3857 | |
ysr@777 | 3858 | oop G1ParCopyHelper::copy_to_survivor_space(oop old) { |
ysr@777 | 3859 | size_t word_sz = old->size(); |
ysr@777 | 3860 | HeapRegion* from_region = _g1->heap_region_containing_raw(old); |
ysr@777 | 3861 | // +1 to make the -1 indexes valid... |
ysr@777 | 3862 | int young_index = from_region->young_index_in_cset()+1; |
ysr@777 | 3863 | assert( (from_region->is_young() && young_index > 0) || |
ysr@777 | 3864 | (!from_region->is_young() && young_index == 0), "invariant" ); |
ysr@777 | 3865 | G1CollectorPolicy* g1p = _g1->g1_policy(); |
ysr@777 | 3866 | markOop m = old->mark(); |
apetrusenko@980 | 3867 | int age = m->has_displaced_mark_helper() ? m->displaced_mark_helper()->age() |
apetrusenko@980 | 3868 | : m->age(); |
apetrusenko@980 | 3869 | GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, age, |
ysr@777 | 3870 | word_sz); |
ysr@777 | 3871 | HeapWord* obj_ptr = _par_scan_state->allocate(alloc_purpose, word_sz); |
ysr@777 | 3872 | oop obj = oop(obj_ptr); |
ysr@777 | 3873 | |
ysr@777 | 3874 | if (obj_ptr == NULL) { |
ysr@777 | 3875 | // This will either forward-to-self, or detect that someone else has |
ysr@777 | 3876 | // installed a forwarding pointer. |
ysr@777 | 3877 | OopsInHeapRegionClosure* cl = _par_scan_state->evac_failure_closure(); |
ysr@777 | 3878 | return _g1->handle_evacuation_failure_par(cl, old); |
ysr@777 | 3879 | } |
ysr@777 | 3880 | |
tonyp@961 | 3881 | // We're going to allocate linearly, so might as well prefetch ahead. |
tonyp@961 | 3882 | Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes); |
tonyp@961 | 3883 | |
ysr@777 | 3884 | oop forward_ptr = old->forward_to_atomic(obj); |
ysr@777 | 3885 | if (forward_ptr == NULL) { |
ysr@777 | 3886 | Copy::aligned_disjoint_words((HeapWord*) old, obj_ptr, word_sz); |
tonyp@961 | 3887 | if (g1p->track_object_age(alloc_purpose)) { |
tonyp@961 | 3888 | // We could simply do obj->incr_age(). However, this causes a |
tonyp@961 | 3889 | // performance issue. obj->incr_age() will first check whether |
tonyp@961 | 3890 | // the object has a displaced mark by checking its mark word; |
tonyp@961 | 3891 | // getting the mark word from the new location of the object |
tonyp@961 | 3892 | // stalls. So, given that we already have the mark word and we |
tonyp@961 | 3893 | // are about to install it anyway, it's better to increase the |
tonyp@961 | 3894 | // age on the mark word, when the object does not have a |
tonyp@961 | 3895 | // displaced mark word. We're not expecting many objects to have |
tonyp@961 | 3896 | // a displaced marked word, so that case is not optimized |
tonyp@961 | 3897 | // further (it could be...) and we simply call obj->incr_age(). |
tonyp@961 | 3898 | |
tonyp@961 | 3899 | if (m->has_displaced_mark_helper()) { |
tonyp@961 | 3900 | // in this case, we have to install the mark word first, |
tonyp@961 | 3901 | // otherwise obj looks to be forwarded (the old mark word, |
tonyp@961 | 3902 | // which contains the forward pointer, was copied) |
tonyp@961 | 3903 | obj->set_mark(m); |
tonyp@961 | 3904 | obj->incr_age(); |
tonyp@961 | 3905 | } else { |
tonyp@961 | 3906 | m = m->incr_age(); |
apetrusenko@980 | 3907 | obj->set_mark(m); |
tonyp@961 | 3908 | } |
apetrusenko@980 | 3909 | _par_scan_state->age_table()->add(obj, word_sz); |
apetrusenko@980 | 3910 | } else { |
apetrusenko@980 | 3911 | obj->set_mark(m); |
tonyp@961 | 3912 | } |
tonyp@961 | 3913 | |
ysr@777 | 3914 | // preserve "next" mark bit |
ysr@777 | 3915 | if (_g1->mark_in_progress() && !_g1->is_obj_ill(old)) { |
ysr@777 | 3916 | if (!use_local_bitmaps || |
ysr@777 | 3917 | !_par_scan_state->alloc_buffer(alloc_purpose)->mark(obj_ptr)) { |
ysr@777 | 3918 | // if we couldn't mark it on the local bitmap (this happens when |
ysr@777 | 3919 | // the object was not allocated in the GCLab), we have to bite |
ysr@777 | 3920 | // the bullet and do the standard parallel mark |
ysr@777 | 3921 | _cm->markAndGrayObjectIfNecessary(obj); |
ysr@777 | 3922 | } |
ysr@777 | 3923 | #if 1 |
ysr@777 | 3924 | if (_g1->isMarkedNext(old)) { |
ysr@777 | 3925 | _cm->nextMarkBitMap()->parClear((HeapWord*)old); |
ysr@777 | 3926 | } |
ysr@777 | 3927 | #endif |
ysr@777 | 3928 | } |
ysr@777 | 3929 | |
ysr@777 | 3930 | size_t* surv_young_words = _par_scan_state->surviving_young_words(); |
ysr@777 | 3931 | surv_young_words[young_index] += word_sz; |
ysr@777 | 3932 | |
ysr@777 | 3933 | if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) { |
ysr@777 | 3934 | arrayOop(old)->set_length(0); |
tonyp@961 | 3935 | _par_scan_state->push_on_queue(set_partial_array_mask(old)); |
ysr@777 | 3936 | } else { |
tonyp@961 | 3937 | // No point in using the slower heap_region_containing() method, |
tonyp@961 | 3938 | // given that we know obj is in the heap. |
tonyp@961 | 3939 | _scanner->set_region(_g1->heap_region_containing_raw(obj)); |
ysr@777 | 3940 | obj->oop_iterate_backwards(_scanner); |
ysr@777 | 3941 | } |
ysr@777 | 3942 | } else { |
ysr@777 | 3943 | _par_scan_state->undo_allocation(alloc_purpose, obj_ptr, word_sz); |
ysr@777 | 3944 | obj = forward_ptr; |
ysr@777 | 3945 | } |
ysr@777 | 3946 | return obj; |
ysr@777 | 3947 | } |
ysr@777 | 3948 | |
tonyp@961 | 3949 | template<bool do_gen_barrier, G1Barrier barrier, |
tonyp@961 | 3950 | bool do_mark_forwardee, bool skip_cset_test> |
tonyp@961 | 3951 | void G1ParCopyClosure<do_gen_barrier, barrier, |
tonyp@961 | 3952 | do_mark_forwardee, skip_cset_test>::do_oop_work(oop* p) { |
ysr@777 | 3953 | oop obj = *p; |
ysr@777 | 3954 | assert(barrier != G1BarrierRS || obj != NULL, |
ysr@777 | 3955 | "Precondition: G1BarrierRS implies obj is nonNull"); |
ysr@777 | 3956 | |
tonyp@961 | 3957 | // The only time we skip the cset test is when we're scanning |
tonyp@961 | 3958 | // references popped from the queue. And we only push on the queue |
tonyp@961 | 3959 | // references that we know point into the cset, so no point in |
tonyp@961 | 3960 | // checking again. But we'll leave an assert here for peace of mind. |
tonyp@961 | 3961 | assert(!skip_cset_test || _g1->obj_in_cs(obj), "invariant"); |
tonyp@961 | 3962 | |
tonyp@961 | 3963 | // here the null check is implicit in the cset_fast_test() test |
tonyp@961 | 3964 | if (skip_cset_test || _g1->in_cset_fast_test(obj)) { |
ysr@777 | 3965 | #if G1_REM_SET_LOGGING |
tonyp@961 | 3966 | gclog_or_tty->print_cr("Loc "PTR_FORMAT" contains pointer "PTR_FORMAT" " |
tonyp@961 | 3967 | "into CS.", p, (void*) obj); |
ysr@777 | 3968 | #endif |
tonyp@961 | 3969 | if (obj->is_forwarded()) { |
tonyp@961 | 3970 | *p = obj->forwardee(); |
tonyp@961 | 3971 | } else { |
tonyp@961 | 3972 | *p = copy_to_survivor_space(obj); |
ysr@777 | 3973 | } |
tonyp@961 | 3974 | // When scanning the RS, we only care about objs in CS. |
tonyp@961 | 3975 | if (barrier == G1BarrierRS) { |
ysr@777 | 3976 | _g1_rem->par_write_ref(_from, p, _par_scan_state->queue_num()); |
ysr@777 | 3977 | } |
tonyp@961 | 3978 | } |
tonyp@961 | 3979 | |
tonyp@961 | 3980 | // When scanning moved objs, must look at all oops. |
tonyp@961 | 3981 | if (barrier == G1BarrierEvac && obj != NULL) { |
tonyp@961 | 3982 | _g1_rem->par_write_ref(_from, p, _par_scan_state->queue_num()); |
tonyp@961 | 3983 | } |
tonyp@961 | 3984 | |
tonyp@961 | 3985 | if (do_gen_barrier && obj != NULL) { |
tonyp@961 | 3986 | par_do_barrier(p); |
tonyp@961 | 3987 | } |
tonyp@961 | 3988 | } |
tonyp@961 | 3989 | |
tonyp@961 | 3990 | template void G1ParCopyClosure<false, G1BarrierEvac, false, true>::do_oop_work(oop* p); |
tonyp@961 | 3991 | |
tonyp@961 | 3992 | template<class T> void G1ParScanPartialArrayClosure::process_array_chunk( |
ysr@777 | 3993 | oop obj, int start, int end) { |
ysr@777 | 3994 | // process our set of indices (include header in first chunk) |
ysr@777 | 3995 | assert(start < end, "invariant"); |
ysr@777 | 3996 | T* const base = (T*)objArrayOop(obj)->base(); |
tonyp@961 | 3997 | T* const start_addr = (start == 0) ? (T*) obj : base + start; |
ysr@777 | 3998 | T* const end_addr = base + end; |
ysr@777 | 3999 | MemRegion mr((HeapWord*)start_addr, (HeapWord*)end_addr); |
ysr@777 | 4000 | _scanner.set_region(_g1->heap_region_containing(obj)); |
ysr@777 | 4001 | obj->oop_iterate(&_scanner, mr); |
ysr@777 | 4002 | } |
ysr@777 | 4003 | |
ysr@777 | 4004 | void G1ParScanPartialArrayClosure::do_oop_nv(oop* p) { |
ysr@777 | 4005 | assert(!UseCompressedOops, "Needs to be fixed to work with compressed oops"); |
tonyp@961 | 4006 | assert(has_partial_array_mask(p), "invariant"); |
tonyp@961 | 4007 | oop old = clear_partial_array_mask(p); |
ysr@777 | 4008 | assert(old->is_objArray(), "must be obj array"); |
ysr@777 | 4009 | assert(old->is_forwarded(), "must be forwarded"); |
ysr@777 | 4010 | assert(Universe::heap()->is_in_reserved(old), "must be in heap."); |
ysr@777 | 4011 | |
ysr@777 | 4012 | objArrayOop obj = objArrayOop(old->forwardee()); |
ysr@777 | 4013 | assert((void*)old != (void*)old->forwardee(), "self forwarding here?"); |
ysr@777 | 4014 | // Process ParGCArrayScanChunk elements now |
ysr@777 | 4015 | // and push the remainder back onto queue |
ysr@777 | 4016 | int start = arrayOop(old)->length(); |
ysr@777 | 4017 | int end = obj->length(); |
ysr@777 | 4018 | int remainder = end - start; |
ysr@777 | 4019 | assert(start <= end, "just checking"); |
ysr@777 | 4020 | if (remainder > 2 * ParGCArrayScanChunk) { |
ysr@777 | 4021 | // Test above combines last partial chunk with a full chunk |
ysr@777 | 4022 | end = start + ParGCArrayScanChunk; |
ysr@777 | 4023 | arrayOop(old)->set_length(end); |
ysr@777 | 4024 | // Push remainder. |
tonyp@961 | 4025 | _par_scan_state->push_on_queue(set_partial_array_mask(old)); |
ysr@777 | 4026 | } else { |
ysr@777 | 4027 | // Restore length so that the heap remains parsable in |
ysr@777 | 4028 | // case of evacuation failure. |
ysr@777 | 4029 | arrayOop(old)->set_length(end); |
ysr@777 | 4030 | } |
ysr@777 | 4031 | |
ysr@777 | 4032 | // process our set of indices (include header in first chunk) |
ysr@777 | 4033 | process_array_chunk<oop>(obj, start, end); |
ysr@777 | 4034 | } |
ysr@777 | 4035 | |
ysr@777 | 4036 | int G1ScanAndBalanceClosure::_nq = 0; |
ysr@777 | 4037 | |
ysr@777 | 4038 | class G1ParEvacuateFollowersClosure : public VoidClosure { |
ysr@777 | 4039 | protected: |
ysr@777 | 4040 | G1CollectedHeap* _g1h; |
ysr@777 | 4041 | G1ParScanThreadState* _par_scan_state; |
ysr@777 | 4042 | RefToScanQueueSet* _queues; |
ysr@777 | 4043 | ParallelTaskTerminator* _terminator; |
ysr@777 | 4044 | |
ysr@777 | 4045 | G1ParScanThreadState* par_scan_state() { return _par_scan_state; } |
ysr@777 | 4046 | RefToScanQueueSet* queues() { return _queues; } |
ysr@777 | 4047 | ParallelTaskTerminator* terminator() { return _terminator; } |
ysr@777 | 4048 | |
ysr@777 | 4049 | public: |
ysr@777 | 4050 | G1ParEvacuateFollowersClosure(G1CollectedHeap* g1h, |
ysr@777 | 4051 | G1ParScanThreadState* par_scan_state, |
ysr@777 | 4052 | RefToScanQueueSet* queues, |
ysr@777 | 4053 | ParallelTaskTerminator* terminator) |
ysr@777 | 4054 | : _g1h(g1h), _par_scan_state(par_scan_state), |
ysr@777 | 4055 | _queues(queues), _terminator(terminator) {} |
ysr@777 | 4056 | |
ysr@777 | 4057 | void do_void() { |
ysr@777 | 4058 | G1ParScanThreadState* pss = par_scan_state(); |
ysr@777 | 4059 | while (true) { |
ysr@777 | 4060 | oop* ref_to_scan; |
ysr@777 | 4061 | pss->trim_queue(); |
ysr@777 | 4062 | IF_G1_DETAILED_STATS(pss->note_steal_attempt()); |
ysr@777 | 4063 | if (queues()->steal(pss->queue_num(), |
ysr@777 | 4064 | pss->hash_seed(), |
ysr@777 | 4065 | ref_to_scan)) { |
ysr@777 | 4066 | IF_G1_DETAILED_STATS(pss->note_steal()); |
tonyp@961 | 4067 | |
tonyp@961 | 4068 | // slightly paranoid tests; I'm trying to catch potential |
tonyp@961 | 4069 | // problems before we go into push_on_queue to know where the |
tonyp@961 | 4070 | // problem is coming from |
tonyp@961 | 4071 | assert(ref_to_scan != NULL, "invariant"); |
tonyp@961 | 4072 | assert(has_partial_array_mask(ref_to_scan) || |
tonyp@961 | 4073 | _g1h->obj_in_cs(*ref_to_scan), "invariant"); |
ysr@777 | 4074 | pss->push_on_queue(ref_to_scan); |
ysr@777 | 4075 | continue; |
ysr@777 | 4076 | } |
ysr@777 | 4077 | pss->start_term_time(); |
ysr@777 | 4078 | if (terminator()->offer_termination()) break; |
ysr@777 | 4079 | pss->end_term_time(); |
ysr@777 | 4080 | } |
ysr@777 | 4081 | pss->end_term_time(); |
ysr@777 | 4082 | pss->retire_alloc_buffers(); |
ysr@777 | 4083 | } |
ysr@777 | 4084 | }; |
ysr@777 | 4085 | |
ysr@777 | 4086 | class G1ParTask : public AbstractGangTask { |
ysr@777 | 4087 | protected: |
ysr@777 | 4088 | G1CollectedHeap* _g1h; |
ysr@777 | 4089 | RefToScanQueueSet *_queues; |
ysr@777 | 4090 | ParallelTaskTerminator _terminator; |
ysr@777 | 4091 | |
ysr@777 | 4092 | Mutex _stats_lock; |
ysr@777 | 4093 | Mutex* stats_lock() { return &_stats_lock; } |
ysr@777 | 4094 | |
ysr@777 | 4095 | size_t getNCards() { |
ysr@777 | 4096 | return (_g1h->capacity() + G1BlockOffsetSharedArray::N_bytes - 1) |
ysr@777 | 4097 | / G1BlockOffsetSharedArray::N_bytes; |
ysr@777 | 4098 | } |
ysr@777 | 4099 | |
ysr@777 | 4100 | public: |
ysr@777 | 4101 | G1ParTask(G1CollectedHeap* g1h, int workers, RefToScanQueueSet *task_queues) |
ysr@777 | 4102 | : AbstractGangTask("G1 collection"), |
ysr@777 | 4103 | _g1h(g1h), |
ysr@777 | 4104 | _queues(task_queues), |
ysr@777 | 4105 | _terminator(workers, _queues), |
ysr@777 | 4106 | _stats_lock(Mutex::leaf, "parallel G1 stats lock", true) |
ysr@777 | 4107 | {} |
ysr@777 | 4108 | |
ysr@777 | 4109 | RefToScanQueueSet* queues() { return _queues; } |
ysr@777 | 4110 | |
ysr@777 | 4111 | RefToScanQueue *work_queue(int i) { |
ysr@777 | 4112 | return queues()->queue(i); |
ysr@777 | 4113 | } |
ysr@777 | 4114 | |
ysr@777 | 4115 | void work(int i) { |
ysr@777 | 4116 | ResourceMark rm; |
ysr@777 | 4117 | HandleMark hm; |
ysr@777 | 4118 | |
tonyp@961 | 4119 | G1ParScanThreadState pss(_g1h, i); |
tonyp@961 | 4120 | G1ParScanHeapEvacClosure scan_evac_cl(_g1h, &pss); |
tonyp@961 | 4121 | G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss); |
tonyp@961 | 4122 | G1ParScanPartialArrayClosure partial_scan_cl(_g1h, &pss); |
ysr@777 | 4123 | |
ysr@777 | 4124 | pss.set_evac_closure(&scan_evac_cl); |
ysr@777 | 4125 | pss.set_evac_failure_closure(&evac_failure_cl); |
ysr@777 | 4126 | pss.set_partial_scan_closure(&partial_scan_cl); |
ysr@777 | 4127 | |
ysr@777 | 4128 | G1ParScanExtRootClosure only_scan_root_cl(_g1h, &pss); |
ysr@777 | 4129 | G1ParScanPermClosure only_scan_perm_cl(_g1h, &pss); |
ysr@777 | 4130 | G1ParScanHeapRSClosure only_scan_heap_rs_cl(_g1h, &pss); |
ysr@777 | 4131 | G1ParScanAndMarkExtRootClosure scan_mark_root_cl(_g1h, &pss); |
ysr@777 | 4132 | G1ParScanAndMarkPermClosure scan_mark_perm_cl(_g1h, &pss); |
ysr@777 | 4133 | G1ParScanAndMarkHeapRSClosure scan_mark_heap_rs_cl(_g1h, &pss); |
ysr@777 | 4134 | |
ysr@777 | 4135 | OopsInHeapRegionClosure *scan_root_cl; |
ysr@777 | 4136 | OopsInHeapRegionClosure *scan_perm_cl; |
ysr@777 | 4137 | OopsInHeapRegionClosure *scan_so_cl; |
ysr@777 | 4138 | |
ysr@777 | 4139 | if (_g1h->g1_policy()->should_initiate_conc_mark()) { |
ysr@777 | 4140 | scan_root_cl = &scan_mark_root_cl; |
ysr@777 | 4141 | scan_perm_cl = &scan_mark_perm_cl; |
ysr@777 | 4142 | scan_so_cl = &scan_mark_heap_rs_cl; |
ysr@777 | 4143 | } else { |
ysr@777 | 4144 | scan_root_cl = &only_scan_root_cl; |
ysr@777 | 4145 | scan_perm_cl = &only_scan_perm_cl; |
ysr@777 | 4146 | scan_so_cl = &only_scan_heap_rs_cl; |
ysr@777 | 4147 | } |
ysr@777 | 4148 | |
ysr@777 | 4149 | pss.start_strong_roots(); |
ysr@777 | 4150 | _g1h->g1_process_strong_roots(/* not collecting perm */ false, |
ysr@777 | 4151 | SharedHeap::SO_AllClasses, |
ysr@777 | 4152 | scan_root_cl, |
ysr@777 | 4153 | &only_scan_heap_rs_cl, |
ysr@777 | 4154 | scan_so_cl, |
ysr@777 | 4155 | scan_perm_cl, |
ysr@777 | 4156 | i); |
ysr@777 | 4157 | pss.end_strong_roots(); |
ysr@777 | 4158 | { |
ysr@777 | 4159 | double start = os::elapsedTime(); |
ysr@777 | 4160 | G1ParEvacuateFollowersClosure evac(_g1h, &pss, _queues, &_terminator); |
ysr@777 | 4161 | evac.do_void(); |
ysr@777 | 4162 | double elapsed_ms = (os::elapsedTime()-start)*1000.0; |
ysr@777 | 4163 | double term_ms = pss.term_time()*1000.0; |
ysr@777 | 4164 | _g1h->g1_policy()->record_obj_copy_time(i, elapsed_ms-term_ms); |
ysr@777 | 4165 | _g1h->g1_policy()->record_termination_time(i, term_ms); |
ysr@777 | 4166 | } |
apetrusenko@980 | 4167 | if (G1UseSurvivorSpace) { |
apetrusenko@980 | 4168 | _g1h->g1_policy()->record_thread_age_table(pss.age_table()); |
apetrusenko@980 | 4169 | } |
ysr@777 | 4170 | _g1h->update_surviving_young_words(pss.surviving_young_words()+1); |
ysr@777 | 4171 | |
ysr@777 | 4172 | // Clean up any par-expanded rem sets. |
ysr@777 | 4173 | HeapRegionRemSet::par_cleanup(); |
ysr@777 | 4174 | |
ysr@777 | 4175 | MutexLocker x(stats_lock()); |
ysr@777 | 4176 | if (ParallelGCVerbose) { |
ysr@777 | 4177 | gclog_or_tty->print("Thread %d complete:\n", i); |
ysr@777 | 4178 | #if G1_DETAILED_STATS |
ysr@777 | 4179 | gclog_or_tty->print(" Pushes: %7d Pops: %7d Overflows: %7d Steals %7d (in %d attempts)\n", |
ysr@777 | 4180 | pss.pushes(), |
ysr@777 | 4181 | pss.pops(), |
ysr@777 | 4182 | pss.overflow_pushes(), |
ysr@777 | 4183 | pss.steals(), |
ysr@777 | 4184 | pss.steal_attempts()); |
ysr@777 | 4185 | #endif |
ysr@777 | 4186 | double elapsed = pss.elapsed(); |
ysr@777 | 4187 | double strong_roots = pss.strong_roots_time(); |
ysr@777 | 4188 | double term = pss.term_time(); |
ysr@777 | 4189 | gclog_or_tty->print(" Elapsed: %7.2f ms.\n" |
ysr@777 | 4190 | " Strong roots: %7.2f ms (%6.2f%%)\n" |
ysr@777 | 4191 | " Termination: %7.2f ms (%6.2f%%) (in %d entries)\n", |
ysr@777 | 4192 | elapsed * 1000.0, |
ysr@777 | 4193 | strong_roots * 1000.0, (strong_roots*100.0/elapsed), |
ysr@777 | 4194 | term * 1000.0, (term*100.0/elapsed), |
ysr@777 | 4195 | pss.term_attempts()); |
ysr@777 | 4196 | size_t total_waste = pss.alloc_buffer_waste() + pss.undo_waste(); |
ysr@777 | 4197 | gclog_or_tty->print(" Waste: %8dK\n" |
ysr@777 | 4198 | " Alloc Buffer: %8dK\n" |
ysr@777 | 4199 | " Undo: %8dK\n", |
ysr@777 | 4200 | (total_waste * HeapWordSize) / K, |
ysr@777 | 4201 | (pss.alloc_buffer_waste() * HeapWordSize) / K, |
ysr@777 | 4202 | (pss.undo_waste() * HeapWordSize) / K); |
ysr@777 | 4203 | } |
ysr@777 | 4204 | |
ysr@777 | 4205 | assert(pss.refs_to_scan() == 0, "Task queue should be empty"); |
ysr@777 | 4206 | assert(pss.overflowed_refs_to_scan() == 0, "Overflow queue should be empty"); |
ysr@777 | 4207 | } |
ysr@777 | 4208 | }; |
ysr@777 | 4209 | |
ysr@777 | 4210 | // *** Common G1 Evacuation Stuff |
ysr@777 | 4211 | |
ysr@777 | 4212 | class G1CountClosure: public OopsInHeapRegionClosure { |
ysr@777 | 4213 | public: |
ysr@777 | 4214 | int n; |
ysr@777 | 4215 | G1CountClosure() : n(0) {} |
ysr@777 | 4216 | void do_oop(narrowOop* p) { |
ysr@777 | 4217 | guarantee(false, "NYI"); |
ysr@777 | 4218 | } |
ysr@777 | 4219 | void do_oop(oop* p) { |
ysr@777 | 4220 | oop obj = *p; |
ysr@777 | 4221 | assert(obj != NULL && G1CollectedHeap::heap()->obj_in_cs(obj), |
ysr@777 | 4222 | "Rem set closure called on non-rem-set pointer."); |
ysr@777 | 4223 | n++; |
ysr@777 | 4224 | } |
ysr@777 | 4225 | }; |
ysr@777 | 4226 | |
ysr@777 | 4227 | static G1CountClosure count_closure; |
ysr@777 | 4228 | |
ysr@777 | 4229 | void |
ysr@777 | 4230 | G1CollectedHeap:: |
ysr@777 | 4231 | g1_process_strong_roots(bool collecting_perm_gen, |
ysr@777 | 4232 | SharedHeap::ScanningOption so, |
ysr@777 | 4233 | OopClosure* scan_non_heap_roots, |
ysr@777 | 4234 | OopsInHeapRegionClosure* scan_rs, |
ysr@777 | 4235 | OopsInHeapRegionClosure* scan_so, |
ysr@777 | 4236 | OopsInGenClosure* scan_perm, |
ysr@777 | 4237 | int worker_i) { |
ysr@777 | 4238 | // First scan the strong roots, including the perm gen. |
ysr@777 | 4239 | double ext_roots_start = os::elapsedTime(); |
ysr@777 | 4240 | double closure_app_time_sec = 0.0; |
ysr@777 | 4241 | |
ysr@777 | 4242 | BufferingOopClosure buf_scan_non_heap_roots(scan_non_heap_roots); |
ysr@777 | 4243 | BufferingOopsInGenClosure buf_scan_perm(scan_perm); |
ysr@777 | 4244 | buf_scan_perm.set_generation(perm_gen()); |
ysr@777 | 4245 | |
ysr@777 | 4246 | process_strong_roots(collecting_perm_gen, so, |
ysr@777 | 4247 | &buf_scan_non_heap_roots, |
ysr@777 | 4248 | &buf_scan_perm); |
ysr@777 | 4249 | // Finish up any enqueued closure apps. |
ysr@777 | 4250 | buf_scan_non_heap_roots.done(); |
ysr@777 | 4251 | buf_scan_perm.done(); |
ysr@777 | 4252 | double ext_roots_end = os::elapsedTime(); |
ysr@777 | 4253 | g1_policy()->reset_obj_copy_time(worker_i); |
ysr@777 | 4254 | double obj_copy_time_sec = |
ysr@777 | 4255 | buf_scan_non_heap_roots.closure_app_seconds() + |
ysr@777 | 4256 | buf_scan_perm.closure_app_seconds(); |
ysr@777 | 4257 | g1_policy()->record_obj_copy_time(worker_i, obj_copy_time_sec * 1000.0); |
ysr@777 | 4258 | double ext_root_time_ms = |
ysr@777 | 4259 | ((ext_roots_end - ext_roots_start) - obj_copy_time_sec) * 1000.0; |
ysr@777 | 4260 | g1_policy()->record_ext_root_scan_time(worker_i, ext_root_time_ms); |
ysr@777 | 4261 | |
ysr@777 | 4262 | // Scan strong roots in mark stack. |
ysr@777 | 4263 | if (!_process_strong_tasks->is_task_claimed(G1H_PS_mark_stack_oops_do)) { |
ysr@777 | 4264 | concurrent_mark()->oops_do(scan_non_heap_roots); |
ysr@777 | 4265 | } |
ysr@777 | 4266 | double mark_stack_scan_ms = (os::elapsedTime() - ext_roots_end) * 1000.0; |
ysr@777 | 4267 | g1_policy()->record_mark_stack_scan_time(worker_i, mark_stack_scan_ms); |
ysr@777 | 4268 | |
ysr@777 | 4269 | // XXX What should this be doing in the parallel case? |
ysr@777 | 4270 | g1_policy()->record_collection_pause_end_CH_strong_roots(); |
ysr@777 | 4271 | if (G1VerifyRemSet) { |
ysr@777 | 4272 | // :::: FIXME :::: |
ysr@777 | 4273 | // The stupid remembered set doesn't know how to filter out dead |
ysr@777 | 4274 | // objects, which the smart one does, and so when it is created |
ysr@777 | 4275 | // and then compared the number of entries in each differs and |
ysr@777 | 4276 | // the verification code fails. |
ysr@777 | 4277 | guarantee(false, "verification code is broken, see note"); |
ysr@777 | 4278 | |
ysr@777 | 4279 | // Let's make sure that the current rem set agrees with the stupidest |
ysr@777 | 4280 | // one possible! |
ysr@777 | 4281 | bool refs_enabled = ref_processor()->discovery_enabled(); |
ysr@777 | 4282 | if (refs_enabled) ref_processor()->disable_discovery(); |
ysr@777 | 4283 | StupidG1RemSet stupid(this); |
ysr@777 | 4284 | count_closure.n = 0; |
ysr@777 | 4285 | stupid.oops_into_collection_set_do(&count_closure, worker_i); |
ysr@777 | 4286 | int stupid_n = count_closure.n; |
ysr@777 | 4287 | count_closure.n = 0; |
ysr@777 | 4288 | g1_rem_set()->oops_into_collection_set_do(&count_closure, worker_i); |
ysr@777 | 4289 | guarantee(count_closure.n == stupid_n, "Old and new rem sets differ."); |
ysr@777 | 4290 | gclog_or_tty->print_cr("\nFound %d pointers in heap RS.", count_closure.n); |
ysr@777 | 4291 | if (refs_enabled) ref_processor()->enable_discovery(); |
ysr@777 | 4292 | } |
ysr@777 | 4293 | if (scan_so != NULL) { |
ysr@777 | 4294 | scan_scan_only_set(scan_so, worker_i); |
ysr@777 | 4295 | } |
ysr@777 | 4296 | // Now scan the complement of the collection set. |
ysr@777 | 4297 | if (scan_rs != NULL) { |
ysr@777 | 4298 | g1_rem_set()->oops_into_collection_set_do(scan_rs, worker_i); |
ysr@777 | 4299 | } |
ysr@777 | 4300 | // Finish with the ref_processor roots. |
ysr@777 | 4301 | if (!_process_strong_tasks->is_task_claimed(G1H_PS_refProcessor_oops_do)) { |
ysr@777 | 4302 | ref_processor()->oops_do(scan_non_heap_roots); |
ysr@777 | 4303 | } |
ysr@777 | 4304 | g1_policy()->record_collection_pause_end_G1_strong_roots(); |
ysr@777 | 4305 | _process_strong_tasks->all_tasks_completed(); |
ysr@777 | 4306 | } |
ysr@777 | 4307 | |
ysr@777 | 4308 | void |
ysr@777 | 4309 | G1CollectedHeap::scan_scan_only_region(HeapRegion* r, |
ysr@777 | 4310 | OopsInHeapRegionClosure* oc, |
ysr@777 | 4311 | int worker_i) { |
ysr@777 | 4312 | HeapWord* startAddr = r->bottom(); |
ysr@777 | 4313 | HeapWord* endAddr = r->used_region().end(); |
ysr@777 | 4314 | |
ysr@777 | 4315 | oc->set_region(r); |
ysr@777 | 4316 | |
ysr@777 | 4317 | HeapWord* p = r->bottom(); |
ysr@777 | 4318 | HeapWord* t = r->top(); |
ysr@777 | 4319 | guarantee( p == r->next_top_at_mark_start(), "invariant" ); |
ysr@777 | 4320 | while (p < t) { |
ysr@777 | 4321 | oop obj = oop(p); |
ysr@777 | 4322 | p += obj->oop_iterate(oc); |
ysr@777 | 4323 | } |
ysr@777 | 4324 | } |
ysr@777 | 4325 | |
ysr@777 | 4326 | void |
ysr@777 | 4327 | G1CollectedHeap::scan_scan_only_set(OopsInHeapRegionClosure* oc, |
ysr@777 | 4328 | int worker_i) { |
ysr@777 | 4329 | double start = os::elapsedTime(); |
ysr@777 | 4330 | |
ysr@777 | 4331 | BufferingOopsInHeapRegionClosure boc(oc); |
ysr@777 | 4332 | |
ysr@777 | 4333 | FilterInHeapRegionAndIntoCSClosure scan_only(this, &boc); |
ysr@777 | 4334 | FilterAndMarkInHeapRegionAndIntoCSClosure scan_and_mark(this, &boc, concurrent_mark()); |
ysr@777 | 4335 | |
ysr@777 | 4336 | OopsInHeapRegionClosure *foc; |
ysr@777 | 4337 | if (g1_policy()->should_initiate_conc_mark()) |
ysr@777 | 4338 | foc = &scan_and_mark; |
ysr@777 | 4339 | else |
ysr@777 | 4340 | foc = &scan_only; |
ysr@777 | 4341 | |
ysr@777 | 4342 | HeapRegion* hr; |
ysr@777 | 4343 | int n = 0; |
ysr@777 | 4344 | while ((hr = _young_list->par_get_next_scan_only_region()) != NULL) { |
ysr@777 | 4345 | scan_scan_only_region(hr, foc, worker_i); |
ysr@777 | 4346 | ++n; |
ysr@777 | 4347 | } |
ysr@777 | 4348 | boc.done(); |
ysr@777 | 4349 | |
ysr@777 | 4350 | double closure_app_s = boc.closure_app_seconds(); |
ysr@777 | 4351 | g1_policy()->record_obj_copy_time(worker_i, closure_app_s * 1000.0); |
ysr@777 | 4352 | double ms = (os::elapsedTime() - start - closure_app_s)*1000.0; |
ysr@777 | 4353 | g1_policy()->record_scan_only_time(worker_i, ms, n); |
ysr@777 | 4354 | } |
ysr@777 | 4355 | |
ysr@777 | 4356 | void |
ysr@777 | 4357 | G1CollectedHeap::g1_process_weak_roots(OopClosure* root_closure, |
ysr@777 | 4358 | OopClosure* non_root_closure) { |
ysr@777 | 4359 | SharedHeap::process_weak_roots(root_closure, non_root_closure); |
ysr@777 | 4360 | } |
ysr@777 | 4361 | |
ysr@777 | 4362 | |
ysr@777 | 4363 | class SaveMarksClosure: public HeapRegionClosure { |
ysr@777 | 4364 | public: |
ysr@777 | 4365 | bool doHeapRegion(HeapRegion* r) { |
ysr@777 | 4366 | r->save_marks(); |
ysr@777 | 4367 | return false; |
ysr@777 | 4368 | } |
ysr@777 | 4369 | }; |
ysr@777 | 4370 | |
ysr@777 | 4371 | void G1CollectedHeap::save_marks() { |
ysr@777 | 4372 | if (ParallelGCThreads == 0) { |
ysr@777 | 4373 | SaveMarksClosure sm; |
ysr@777 | 4374 | heap_region_iterate(&sm); |
ysr@777 | 4375 | } |
ysr@777 | 4376 | // We do this even in the parallel case |
ysr@777 | 4377 | perm_gen()->save_marks(); |
ysr@777 | 4378 | } |
ysr@777 | 4379 | |
ysr@777 | 4380 | void G1CollectedHeap::evacuate_collection_set() { |
ysr@777 | 4381 | set_evacuation_failed(false); |
ysr@777 | 4382 | |
ysr@777 | 4383 | g1_rem_set()->prepare_for_oops_into_collection_set_do(); |
ysr@777 | 4384 | concurrent_g1_refine()->set_use_cache(false); |
ysr@777 | 4385 | int n_workers = (ParallelGCThreads > 0 ? workers()->total_workers() : 1); |
ysr@777 | 4386 | |
ysr@777 | 4387 | set_par_threads(n_workers); |
ysr@777 | 4388 | G1ParTask g1_par_task(this, n_workers, _task_queues); |
ysr@777 | 4389 | |
ysr@777 | 4390 | init_for_evac_failure(NULL); |
ysr@777 | 4391 | |
ysr@777 | 4392 | change_strong_roots_parity(); // In preparation for parallel strong roots. |
ysr@777 | 4393 | rem_set()->prepare_for_younger_refs_iterate(true); |
ysr@777 | 4394 | double start_par = os::elapsedTime(); |
ysr@777 | 4395 | |
ysr@777 | 4396 | if (ParallelGCThreads > 0) { |
ysr@777 | 4397 | // The individual threads will set their evac-failure closures. |
ysr@777 | 4398 | workers()->run_task(&g1_par_task); |
ysr@777 | 4399 | } else { |
ysr@777 | 4400 | g1_par_task.work(0); |
ysr@777 | 4401 | } |
ysr@777 | 4402 | |
ysr@777 | 4403 | double par_time = (os::elapsedTime() - start_par) * 1000.0; |
ysr@777 | 4404 | g1_policy()->record_par_time(par_time); |
ysr@777 | 4405 | set_par_threads(0); |
ysr@777 | 4406 | // Is this the right thing to do here? We don't save marks |
ysr@777 | 4407 | // on individual heap regions when we allocate from |
ysr@777 | 4408 | // them in parallel, so this seems like the correct place for this. |
apetrusenko@980 | 4409 | retire_all_alloc_regions(); |
ysr@777 | 4410 | { |
ysr@777 | 4411 | G1IsAliveClosure is_alive(this); |
ysr@777 | 4412 | G1KeepAliveClosure keep_alive(this); |
ysr@777 | 4413 | JNIHandles::weak_oops_do(&is_alive, &keep_alive); |
ysr@777 | 4414 | } |
ysr@777 | 4415 | |
ysr@777 | 4416 | g1_rem_set()->cleanup_after_oops_into_collection_set_do(); |
ysr@777 | 4417 | concurrent_g1_refine()->set_use_cache(true); |
ysr@777 | 4418 | |
ysr@777 | 4419 | finalize_for_evac_failure(); |
ysr@777 | 4420 | |
ysr@777 | 4421 | // Must do this before removing self-forwarding pointers, which clears |
ysr@777 | 4422 | // the per-region evac-failure flags. |
ysr@777 | 4423 | concurrent_mark()->complete_marking_in_collection_set(); |
ysr@777 | 4424 | |
ysr@777 | 4425 | if (evacuation_failed()) { |
ysr@777 | 4426 | remove_self_forwarding_pointers(); |
ysr@777 | 4427 | |
ysr@777 | 4428 | if (PrintGCDetails) { |
ysr@777 | 4429 | gclog_or_tty->print(" (evacuation failed)"); |
ysr@777 | 4430 | } else if (PrintGC) { |
ysr@777 | 4431 | gclog_or_tty->print("--"); |
ysr@777 | 4432 | } |
ysr@777 | 4433 | } |
ysr@777 | 4434 | |
ysr@777 | 4435 | COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); |
ysr@777 | 4436 | } |
ysr@777 | 4437 | |
ysr@777 | 4438 | void G1CollectedHeap::free_region(HeapRegion* hr) { |
ysr@777 | 4439 | size_t pre_used = 0; |
ysr@777 | 4440 | size_t cleared_h_regions = 0; |
ysr@777 | 4441 | size_t freed_regions = 0; |
ysr@777 | 4442 | UncleanRegionList local_list; |
ysr@777 | 4443 | |
ysr@777 | 4444 | HeapWord* start = hr->bottom(); |
ysr@777 | 4445 | HeapWord* end = hr->prev_top_at_mark_start(); |
ysr@777 | 4446 | size_t used_bytes = hr->used(); |
ysr@777 | 4447 | size_t live_bytes = hr->max_live_bytes(); |
ysr@777 | 4448 | if (used_bytes > 0) { |
ysr@777 | 4449 | guarantee( live_bytes <= used_bytes, "invariant" ); |
ysr@777 | 4450 | } else { |
ysr@777 | 4451 | guarantee( live_bytes == 0, "invariant" ); |
ysr@777 | 4452 | } |
ysr@777 | 4453 | |
ysr@777 | 4454 | size_t garbage_bytes = used_bytes - live_bytes; |
ysr@777 | 4455 | if (garbage_bytes > 0) |
ysr@777 | 4456 | g1_policy()->decrease_known_garbage_bytes(garbage_bytes); |
ysr@777 | 4457 | |
ysr@777 | 4458 | free_region_work(hr, pre_used, cleared_h_regions, freed_regions, |
ysr@777 | 4459 | &local_list); |
ysr@777 | 4460 | finish_free_region_work(pre_used, cleared_h_regions, freed_regions, |
ysr@777 | 4461 | &local_list); |
ysr@777 | 4462 | } |
ysr@777 | 4463 | |
ysr@777 | 4464 | void |
ysr@777 | 4465 | G1CollectedHeap::free_region_work(HeapRegion* hr, |
ysr@777 | 4466 | size_t& pre_used, |
ysr@777 | 4467 | size_t& cleared_h_regions, |
ysr@777 | 4468 | size_t& freed_regions, |
ysr@777 | 4469 | UncleanRegionList* list, |
ysr@777 | 4470 | bool par) { |
ysr@777 | 4471 | assert(!hr->popular(), "should not free popular regions"); |
ysr@777 | 4472 | pre_used += hr->used(); |
ysr@777 | 4473 | if (hr->isHumongous()) { |
ysr@777 | 4474 | assert(hr->startsHumongous(), |
ysr@777 | 4475 | "Only the start of a humongous region should be freed."); |
ysr@777 | 4476 | int ind = _hrs->find(hr); |
ysr@777 | 4477 | assert(ind != -1, "Should have an index."); |
ysr@777 | 4478 | // Clear the start region. |
ysr@777 | 4479 | hr->hr_clear(par, true /*clear_space*/); |
ysr@777 | 4480 | list->insert_before_head(hr); |
ysr@777 | 4481 | cleared_h_regions++; |
ysr@777 | 4482 | freed_regions++; |
ysr@777 | 4483 | // Clear any continued regions. |
ysr@777 | 4484 | ind++; |
ysr@777 | 4485 | while ((size_t)ind < n_regions()) { |
ysr@777 | 4486 | HeapRegion* hrc = _hrs->at(ind); |
ysr@777 | 4487 | if (!hrc->continuesHumongous()) break; |
ysr@777 | 4488 | // Otherwise, does continue the H region. |
ysr@777 | 4489 | assert(hrc->humongous_start_region() == hr, "Huh?"); |
ysr@777 | 4490 | hrc->hr_clear(par, true /*clear_space*/); |
ysr@777 | 4491 | cleared_h_regions++; |
ysr@777 | 4492 | freed_regions++; |
ysr@777 | 4493 | list->insert_before_head(hrc); |
ysr@777 | 4494 | ind++; |
ysr@777 | 4495 | } |
ysr@777 | 4496 | } else { |
ysr@777 | 4497 | hr->hr_clear(par, true /*clear_space*/); |
ysr@777 | 4498 | list->insert_before_head(hr); |
ysr@777 | 4499 | freed_regions++; |
ysr@777 | 4500 | // If we're using clear2, this should not be enabled. |
ysr@777 | 4501 | // assert(!hr->in_cohort(), "Can't be both free and in a cohort."); |
ysr@777 | 4502 | } |
ysr@777 | 4503 | } |
ysr@777 | 4504 | |
ysr@777 | 4505 | void G1CollectedHeap::finish_free_region_work(size_t pre_used, |
ysr@777 | 4506 | size_t cleared_h_regions, |
ysr@777 | 4507 | size_t freed_regions, |
ysr@777 | 4508 | UncleanRegionList* list) { |
ysr@777 | 4509 | if (list != NULL && list->sz() > 0) { |
ysr@777 | 4510 | prepend_region_list_on_unclean_list(list); |
ysr@777 | 4511 | } |
ysr@777 | 4512 | // Acquire a lock, if we're parallel, to update possibly-shared |
ysr@777 | 4513 | // variables. |
ysr@777 | 4514 | Mutex* lock = (n_par_threads() > 0) ? ParGCRareEvent_lock : NULL; |
ysr@777 | 4515 | { |
ysr@777 | 4516 | MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag); |
ysr@777 | 4517 | _summary_bytes_used -= pre_used; |
ysr@777 | 4518 | _num_humongous_regions -= (int) cleared_h_regions; |
ysr@777 | 4519 | _free_regions += freed_regions; |
ysr@777 | 4520 | } |
ysr@777 | 4521 | } |
ysr@777 | 4522 | |
ysr@777 | 4523 | |
ysr@777 | 4524 | void G1CollectedHeap::dirtyCardsForYoungRegions(CardTableModRefBS* ct_bs, HeapRegion* list) { |
ysr@777 | 4525 | while (list != NULL) { |
ysr@777 | 4526 | guarantee( list->is_young(), "invariant" ); |
ysr@777 | 4527 | |
ysr@777 | 4528 | HeapWord* bottom = list->bottom(); |
ysr@777 | 4529 | HeapWord* end = list->end(); |
ysr@777 | 4530 | MemRegion mr(bottom, end); |
ysr@777 | 4531 | ct_bs->dirty(mr); |
ysr@777 | 4532 | |
ysr@777 | 4533 | list = list->get_next_young_region(); |
ysr@777 | 4534 | } |
ysr@777 | 4535 | } |
ysr@777 | 4536 | |
ysr@777 | 4537 | void G1CollectedHeap::cleanUpCardTable() { |
ysr@777 | 4538 | CardTableModRefBS* ct_bs = (CardTableModRefBS*) (barrier_set()); |
ysr@777 | 4539 | double start = os::elapsedTime(); |
ysr@777 | 4540 | |
ysr@777 | 4541 | ct_bs->clear(_g1_committed); |
ysr@777 | 4542 | |
ysr@777 | 4543 | // now, redirty the cards of the scan-only and survivor regions |
ysr@777 | 4544 | // (it seemed faster to do it this way, instead of iterating over |
ysr@777 | 4545 | // all regions and then clearing / dirtying as approprite) |
ysr@777 | 4546 | dirtyCardsForYoungRegions(ct_bs, _young_list->first_scan_only_region()); |
ysr@777 | 4547 | dirtyCardsForYoungRegions(ct_bs, _young_list->first_survivor_region()); |
ysr@777 | 4548 | |
ysr@777 | 4549 | double elapsed = os::elapsedTime() - start; |
ysr@777 | 4550 | g1_policy()->record_clear_ct_time( elapsed * 1000.0); |
ysr@777 | 4551 | } |
ysr@777 | 4552 | |
ysr@777 | 4553 | |
ysr@777 | 4554 | void G1CollectedHeap::do_collection_pause_if_appropriate(size_t word_size) { |
ysr@777 | 4555 | // First do any popular regions. |
ysr@777 | 4556 | HeapRegion* hr; |
ysr@777 | 4557 | while ((hr = popular_region_to_evac()) != NULL) { |
ysr@777 | 4558 | evac_popular_region(hr); |
ysr@777 | 4559 | } |
ysr@777 | 4560 | // Now do heuristic pauses. |
ysr@777 | 4561 | if (g1_policy()->should_do_collection_pause(word_size)) { |
ysr@777 | 4562 | do_collection_pause(); |
ysr@777 | 4563 | } |
ysr@777 | 4564 | } |
ysr@777 | 4565 | |
ysr@777 | 4566 | void G1CollectedHeap::free_collection_set(HeapRegion* cs_head) { |
ysr@777 | 4567 | double young_time_ms = 0.0; |
ysr@777 | 4568 | double non_young_time_ms = 0.0; |
ysr@777 | 4569 | |
ysr@777 | 4570 | G1CollectorPolicy* policy = g1_policy(); |
ysr@777 | 4571 | |
ysr@777 | 4572 | double start_sec = os::elapsedTime(); |
ysr@777 | 4573 | bool non_young = true; |
ysr@777 | 4574 | |
ysr@777 | 4575 | HeapRegion* cur = cs_head; |
ysr@777 | 4576 | int age_bound = -1; |
ysr@777 | 4577 | size_t rs_lengths = 0; |
ysr@777 | 4578 | |
ysr@777 | 4579 | while (cur != NULL) { |
ysr@777 | 4580 | if (non_young) { |
ysr@777 | 4581 | if (cur->is_young()) { |
ysr@777 | 4582 | double end_sec = os::elapsedTime(); |
ysr@777 | 4583 | double elapsed_ms = (end_sec - start_sec) * 1000.0; |
ysr@777 | 4584 | non_young_time_ms += elapsed_ms; |
ysr@777 | 4585 | |
ysr@777 | 4586 | start_sec = os::elapsedTime(); |
ysr@777 | 4587 | non_young = false; |
ysr@777 | 4588 | } |
ysr@777 | 4589 | } else { |
ysr@777 | 4590 | if (!cur->is_on_free_list()) { |
ysr@777 | 4591 | double end_sec = os::elapsedTime(); |
ysr@777 | 4592 | double elapsed_ms = (end_sec - start_sec) * 1000.0; |
ysr@777 | 4593 | young_time_ms += elapsed_ms; |
ysr@777 | 4594 | |
ysr@777 | 4595 | start_sec = os::elapsedTime(); |
ysr@777 | 4596 | non_young = true; |
ysr@777 | 4597 | } |
ysr@777 | 4598 | } |
ysr@777 | 4599 | |
ysr@777 | 4600 | rs_lengths += cur->rem_set()->occupied(); |
ysr@777 | 4601 | |
ysr@777 | 4602 | HeapRegion* next = cur->next_in_collection_set(); |
ysr@777 | 4603 | assert(cur->in_collection_set(), "bad CS"); |
ysr@777 | 4604 | cur->set_next_in_collection_set(NULL); |
ysr@777 | 4605 | cur->set_in_collection_set(false); |
ysr@777 | 4606 | |
ysr@777 | 4607 | if (cur->is_young()) { |
ysr@777 | 4608 | int index = cur->young_index_in_cset(); |
ysr@777 | 4609 | guarantee( index != -1, "invariant" ); |
ysr@777 | 4610 | guarantee( (size_t)index < policy->young_cset_length(), "invariant" ); |
ysr@777 | 4611 | size_t words_survived = _surviving_young_words[index]; |
ysr@777 | 4612 | cur->record_surv_words_in_group(words_survived); |
ysr@777 | 4613 | } else { |
ysr@777 | 4614 | int index = cur->young_index_in_cset(); |
ysr@777 | 4615 | guarantee( index == -1, "invariant" ); |
ysr@777 | 4616 | } |
ysr@777 | 4617 | |
ysr@777 | 4618 | assert( (cur->is_young() && cur->young_index_in_cset() > -1) || |
ysr@777 | 4619 | (!cur->is_young() && cur->young_index_in_cset() == -1), |
ysr@777 | 4620 | "invariant" ); |
ysr@777 | 4621 | |
ysr@777 | 4622 | if (!cur->evacuation_failed()) { |
ysr@777 | 4623 | // And the region is empty. |
ysr@777 | 4624 | assert(!cur->is_empty(), |
ysr@777 | 4625 | "Should not have empty regions in a CS."); |
ysr@777 | 4626 | free_region(cur); |
ysr@777 | 4627 | } else { |
ysr@777 | 4628 | guarantee( !cur->is_scan_only(), "should not be scan only" ); |
ysr@777 | 4629 | cur->uninstall_surv_rate_group(); |
ysr@777 | 4630 | if (cur->is_young()) |
ysr@777 | 4631 | cur->set_young_index_in_cset(-1); |
ysr@777 | 4632 | cur->set_not_young(); |
ysr@777 | 4633 | cur->set_evacuation_failed(false); |
ysr@777 | 4634 | } |
ysr@777 | 4635 | cur = next; |
ysr@777 | 4636 | } |
ysr@777 | 4637 | |
ysr@777 | 4638 | policy->record_max_rs_lengths(rs_lengths); |
ysr@777 | 4639 | policy->cset_regions_freed(); |
ysr@777 | 4640 | |
ysr@777 | 4641 | double end_sec = os::elapsedTime(); |
ysr@777 | 4642 | double elapsed_ms = (end_sec - start_sec) * 1000.0; |
ysr@777 | 4643 | if (non_young) |
ysr@777 | 4644 | non_young_time_ms += elapsed_ms; |
ysr@777 | 4645 | else |
ysr@777 | 4646 | young_time_ms += elapsed_ms; |
ysr@777 | 4647 | |
ysr@777 | 4648 | policy->record_young_free_cset_time_ms(young_time_ms); |
ysr@777 | 4649 | policy->record_non_young_free_cset_time_ms(non_young_time_ms); |
ysr@777 | 4650 | } |
ysr@777 | 4651 | |
ysr@777 | 4652 | HeapRegion* |
ysr@777 | 4653 | G1CollectedHeap::alloc_region_from_unclean_list_locked(bool zero_filled) { |
ysr@777 | 4654 | assert(ZF_mon->owned_by_self(), "Precondition"); |
ysr@777 | 4655 | HeapRegion* res = pop_unclean_region_list_locked(); |
ysr@777 | 4656 | if (res != NULL) { |
ysr@777 | 4657 | assert(!res->continuesHumongous() && |
ysr@777 | 4658 | res->zero_fill_state() != HeapRegion::Allocated, |
ysr@777 | 4659 | "Only free regions on unclean list."); |
ysr@777 | 4660 | if (zero_filled) { |
ysr@777 | 4661 | res->ensure_zero_filled_locked(); |
ysr@777 | 4662 | res->set_zero_fill_allocated(); |
ysr@777 | 4663 | } |
ysr@777 | 4664 | } |
ysr@777 | 4665 | return res; |
ysr@777 | 4666 | } |
ysr@777 | 4667 | |
ysr@777 | 4668 | HeapRegion* G1CollectedHeap::alloc_region_from_unclean_list(bool zero_filled) { |
ysr@777 | 4669 | MutexLockerEx zx(ZF_mon, Mutex::_no_safepoint_check_flag); |
ysr@777 | 4670 | return alloc_region_from_unclean_list_locked(zero_filled); |
ysr@777 | 4671 | } |
ysr@777 | 4672 | |
ysr@777 | 4673 | void G1CollectedHeap::put_region_on_unclean_list(HeapRegion* r) { |
ysr@777 | 4674 | MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); |
ysr@777 | 4675 | put_region_on_unclean_list_locked(r); |
ysr@777 | 4676 | if (should_zf()) ZF_mon->notify_all(); // Wake up ZF thread. |
ysr@777 | 4677 | } |
ysr@777 | 4678 | |
ysr@777 | 4679 | void G1CollectedHeap::set_unclean_regions_coming(bool b) { |
ysr@777 | 4680 | MutexLockerEx x(Cleanup_mon); |
ysr@777 | 4681 | set_unclean_regions_coming_locked(b); |
ysr@777 | 4682 | } |
ysr@777 | 4683 | |
ysr@777 | 4684 | void G1CollectedHeap::set_unclean_regions_coming_locked(bool b) { |
ysr@777 | 4685 | assert(Cleanup_mon->owned_by_self(), "Precondition"); |
ysr@777 | 4686 | _unclean_regions_coming = b; |
ysr@777 | 4687 | // Wake up mutator threads that might be waiting for completeCleanup to |
ysr@777 | 4688 | // finish. |
ysr@777 | 4689 | if (!b) Cleanup_mon->notify_all(); |
ysr@777 | 4690 | } |
ysr@777 | 4691 | |
ysr@777 | 4692 | void G1CollectedHeap::wait_for_cleanup_complete() { |
ysr@777 | 4693 | MutexLockerEx x(Cleanup_mon); |
ysr@777 | 4694 | wait_for_cleanup_complete_locked(); |
ysr@777 | 4695 | } |
ysr@777 | 4696 | |
ysr@777 | 4697 | void G1CollectedHeap::wait_for_cleanup_complete_locked() { |
ysr@777 | 4698 | assert(Cleanup_mon->owned_by_self(), "precondition"); |
ysr@777 | 4699 | while (_unclean_regions_coming) { |
ysr@777 | 4700 | Cleanup_mon->wait(); |
ysr@777 | 4701 | } |
ysr@777 | 4702 | } |
ysr@777 | 4703 | |
ysr@777 | 4704 | void |
ysr@777 | 4705 | G1CollectedHeap::put_region_on_unclean_list_locked(HeapRegion* r) { |
ysr@777 | 4706 | assert(ZF_mon->owned_by_self(), "precondition."); |
ysr@777 | 4707 | _unclean_region_list.insert_before_head(r); |
ysr@777 | 4708 | } |
ysr@777 | 4709 | |
ysr@777 | 4710 | void |
ysr@777 | 4711 | G1CollectedHeap::prepend_region_list_on_unclean_list(UncleanRegionList* list) { |
ysr@777 | 4712 | MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); |
ysr@777 | 4713 | prepend_region_list_on_unclean_list_locked(list); |
ysr@777 | 4714 | if (should_zf()) ZF_mon->notify_all(); // Wake up ZF thread. |
ysr@777 | 4715 | } |
ysr@777 | 4716 | |
ysr@777 | 4717 | void |
ysr@777 | 4718 | G1CollectedHeap:: |
ysr@777 | 4719 | prepend_region_list_on_unclean_list_locked(UncleanRegionList* list) { |
ysr@777 | 4720 | assert(ZF_mon->owned_by_self(), "precondition."); |
ysr@777 | 4721 | _unclean_region_list.prepend_list(list); |
ysr@777 | 4722 | } |
ysr@777 | 4723 | |
ysr@777 | 4724 | HeapRegion* G1CollectedHeap::pop_unclean_region_list_locked() { |
ysr@777 | 4725 | assert(ZF_mon->owned_by_self(), "precondition."); |
ysr@777 | 4726 | HeapRegion* res = _unclean_region_list.pop(); |
ysr@777 | 4727 | if (res != NULL) { |
ysr@777 | 4728 | // Inform ZF thread that there's a new unclean head. |
ysr@777 | 4729 | if (_unclean_region_list.hd() != NULL && should_zf()) |
ysr@777 | 4730 | ZF_mon->notify_all(); |
ysr@777 | 4731 | } |
ysr@777 | 4732 | return res; |
ysr@777 | 4733 | } |
ysr@777 | 4734 | |
ysr@777 | 4735 | HeapRegion* G1CollectedHeap::peek_unclean_region_list_locked() { |
ysr@777 | 4736 | assert(ZF_mon->owned_by_self(), "precondition."); |
ysr@777 | 4737 | return _unclean_region_list.hd(); |
ysr@777 | 4738 | } |
ysr@777 | 4739 | |
ysr@777 | 4740 | |
ysr@777 | 4741 | bool G1CollectedHeap::move_cleaned_region_to_free_list_locked() { |
ysr@777 | 4742 | assert(ZF_mon->owned_by_self(), "Precondition"); |
ysr@777 | 4743 | HeapRegion* r = peek_unclean_region_list_locked(); |
ysr@777 | 4744 | if (r != NULL && r->zero_fill_state() == HeapRegion::ZeroFilled) { |
ysr@777 | 4745 | // Result of below must be equal to "r", since we hold the lock. |
ysr@777 | 4746 | (void)pop_unclean_region_list_locked(); |
ysr@777 | 4747 | put_free_region_on_list_locked(r); |
ysr@777 | 4748 | return true; |
ysr@777 | 4749 | } else { |
ysr@777 | 4750 | return false; |
ysr@777 | 4751 | } |
ysr@777 | 4752 | } |
ysr@777 | 4753 | |
ysr@777 | 4754 | bool G1CollectedHeap::move_cleaned_region_to_free_list() { |
ysr@777 | 4755 | MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); |
ysr@777 | 4756 | return move_cleaned_region_to_free_list_locked(); |
ysr@777 | 4757 | } |
ysr@777 | 4758 | |
ysr@777 | 4759 | |
ysr@777 | 4760 | void G1CollectedHeap::put_free_region_on_list_locked(HeapRegion* r) { |
ysr@777 | 4761 | assert(ZF_mon->owned_by_self(), "precondition."); |
ysr@777 | 4762 | assert(_free_region_list_size == free_region_list_length(), "Inv"); |
ysr@777 | 4763 | assert(r->zero_fill_state() == HeapRegion::ZeroFilled, |
ysr@777 | 4764 | "Regions on free list must be zero filled"); |
ysr@777 | 4765 | assert(!r->isHumongous(), "Must not be humongous."); |
ysr@777 | 4766 | assert(r->is_empty(), "Better be empty"); |
ysr@777 | 4767 | assert(!r->is_on_free_list(), |
ysr@777 | 4768 | "Better not already be on free list"); |
ysr@777 | 4769 | assert(!r->is_on_unclean_list(), |
ysr@777 | 4770 | "Better not already be on unclean list"); |
ysr@777 | 4771 | r->set_on_free_list(true); |
ysr@777 | 4772 | r->set_next_on_free_list(_free_region_list); |
ysr@777 | 4773 | _free_region_list = r; |
ysr@777 | 4774 | _free_region_list_size++; |
ysr@777 | 4775 | assert(_free_region_list_size == free_region_list_length(), "Inv"); |
ysr@777 | 4776 | } |
ysr@777 | 4777 | |
ysr@777 | 4778 | void G1CollectedHeap::put_free_region_on_list(HeapRegion* r) { |
ysr@777 | 4779 | MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); |
ysr@777 | 4780 | put_free_region_on_list_locked(r); |
ysr@777 | 4781 | } |
ysr@777 | 4782 | |
ysr@777 | 4783 | HeapRegion* G1CollectedHeap::pop_free_region_list_locked() { |
ysr@777 | 4784 | assert(ZF_mon->owned_by_self(), "precondition."); |
ysr@777 | 4785 | assert(_free_region_list_size == free_region_list_length(), "Inv"); |
ysr@777 | 4786 | HeapRegion* res = _free_region_list; |
ysr@777 | 4787 | if (res != NULL) { |
ysr@777 | 4788 | _free_region_list = res->next_from_free_list(); |
ysr@777 | 4789 | _free_region_list_size--; |
ysr@777 | 4790 | res->set_on_free_list(false); |
ysr@777 | 4791 | res->set_next_on_free_list(NULL); |
ysr@777 | 4792 | assert(_free_region_list_size == free_region_list_length(), "Inv"); |
ysr@777 | 4793 | } |
ysr@777 | 4794 | return res; |
ysr@777 | 4795 | } |
ysr@777 | 4796 | |
ysr@777 | 4797 | |
ysr@777 | 4798 | HeapRegion* G1CollectedHeap::alloc_free_region_from_lists(bool zero_filled) { |
ysr@777 | 4799 | // By self, or on behalf of self. |
ysr@777 | 4800 | assert(Heap_lock->is_locked(), "Precondition"); |
ysr@777 | 4801 | HeapRegion* res = NULL; |
ysr@777 | 4802 | bool first = true; |
ysr@777 | 4803 | while (res == NULL) { |
ysr@777 | 4804 | if (zero_filled || !first) { |
ysr@777 | 4805 | MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); |
ysr@777 | 4806 | res = pop_free_region_list_locked(); |
ysr@777 | 4807 | if (res != NULL) { |
ysr@777 | 4808 | assert(!res->zero_fill_is_allocated(), |
ysr@777 | 4809 | "No allocated regions on free list."); |
ysr@777 | 4810 | res->set_zero_fill_allocated(); |
ysr@777 | 4811 | } else if (!first) { |
ysr@777 | 4812 | break; // We tried both, time to return NULL. |
ysr@777 | 4813 | } |
ysr@777 | 4814 | } |
ysr@777 | 4815 | |
ysr@777 | 4816 | if (res == NULL) { |
ysr@777 | 4817 | res = alloc_region_from_unclean_list(zero_filled); |
ysr@777 | 4818 | } |
ysr@777 | 4819 | assert(res == NULL || |
ysr@777 | 4820 | !zero_filled || |
ysr@777 | 4821 | res->zero_fill_is_allocated(), |
ysr@777 | 4822 | "We must have allocated the region we're returning"); |
ysr@777 | 4823 | first = false; |
ysr@777 | 4824 | } |
ysr@777 | 4825 | return res; |
ysr@777 | 4826 | } |
ysr@777 | 4827 | |
ysr@777 | 4828 | void G1CollectedHeap::remove_allocated_regions_from_lists() { |
ysr@777 | 4829 | MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); |
ysr@777 | 4830 | { |
ysr@777 | 4831 | HeapRegion* prev = NULL; |
ysr@777 | 4832 | HeapRegion* cur = _unclean_region_list.hd(); |
ysr@777 | 4833 | while (cur != NULL) { |
ysr@777 | 4834 | HeapRegion* next = cur->next_from_unclean_list(); |
ysr@777 | 4835 | if (cur->zero_fill_is_allocated()) { |
ysr@777 | 4836 | // Remove from the list. |
ysr@777 | 4837 | if (prev == NULL) { |
ysr@777 | 4838 | (void)_unclean_region_list.pop(); |
ysr@777 | 4839 | } else { |
ysr@777 | 4840 | _unclean_region_list.delete_after(prev); |
ysr@777 | 4841 | } |
ysr@777 | 4842 | cur->set_on_unclean_list(false); |
ysr@777 | 4843 | cur->set_next_on_unclean_list(NULL); |
ysr@777 | 4844 | } else { |
ysr@777 | 4845 | prev = cur; |
ysr@777 | 4846 | } |
ysr@777 | 4847 | cur = next; |
ysr@777 | 4848 | } |
ysr@777 | 4849 | assert(_unclean_region_list.sz() == unclean_region_list_length(), |
ysr@777 | 4850 | "Inv"); |
ysr@777 | 4851 | } |
ysr@777 | 4852 | |
ysr@777 | 4853 | { |
ysr@777 | 4854 | HeapRegion* prev = NULL; |
ysr@777 | 4855 | HeapRegion* cur = _free_region_list; |
ysr@777 | 4856 | while (cur != NULL) { |
ysr@777 | 4857 | HeapRegion* next = cur->next_from_free_list(); |
ysr@777 | 4858 | if (cur->zero_fill_is_allocated()) { |
ysr@777 | 4859 | // Remove from the list. |
ysr@777 | 4860 | if (prev == NULL) { |
ysr@777 | 4861 | _free_region_list = cur->next_from_free_list(); |
ysr@777 | 4862 | } else { |
ysr@777 | 4863 | prev->set_next_on_free_list(cur->next_from_free_list()); |
ysr@777 | 4864 | } |
ysr@777 | 4865 | cur->set_on_free_list(false); |
ysr@777 | 4866 | cur->set_next_on_free_list(NULL); |
ysr@777 | 4867 | _free_region_list_size--; |
ysr@777 | 4868 | } else { |
ysr@777 | 4869 | prev = cur; |
ysr@777 | 4870 | } |
ysr@777 | 4871 | cur = next; |
ysr@777 | 4872 | } |
ysr@777 | 4873 | assert(_free_region_list_size == free_region_list_length(), "Inv"); |
ysr@777 | 4874 | } |
ysr@777 | 4875 | } |
ysr@777 | 4876 | |
ysr@777 | 4877 | bool G1CollectedHeap::verify_region_lists() { |
ysr@777 | 4878 | MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); |
ysr@777 | 4879 | return verify_region_lists_locked(); |
ysr@777 | 4880 | } |
ysr@777 | 4881 | |
ysr@777 | 4882 | bool G1CollectedHeap::verify_region_lists_locked() { |
ysr@777 | 4883 | HeapRegion* unclean = _unclean_region_list.hd(); |
ysr@777 | 4884 | while (unclean != NULL) { |
ysr@777 | 4885 | guarantee(unclean->is_on_unclean_list(), "Well, it is!"); |
ysr@777 | 4886 | guarantee(!unclean->is_on_free_list(), "Well, it shouldn't be!"); |
ysr@777 | 4887 | guarantee(unclean->zero_fill_state() != HeapRegion::Allocated, |
ysr@777 | 4888 | "Everything else is possible."); |
ysr@777 | 4889 | unclean = unclean->next_from_unclean_list(); |
ysr@777 | 4890 | } |
ysr@777 | 4891 | guarantee(_unclean_region_list.sz() == unclean_region_list_length(), "Inv"); |
ysr@777 | 4892 | |
ysr@777 | 4893 | HeapRegion* free_r = _free_region_list; |
ysr@777 | 4894 | while (free_r != NULL) { |
ysr@777 | 4895 | assert(free_r->is_on_free_list(), "Well, it is!"); |
ysr@777 | 4896 | assert(!free_r->is_on_unclean_list(), "Well, it shouldn't be!"); |
ysr@777 | 4897 | switch (free_r->zero_fill_state()) { |
ysr@777 | 4898 | case HeapRegion::NotZeroFilled: |
ysr@777 | 4899 | case HeapRegion::ZeroFilling: |
ysr@777 | 4900 | guarantee(false, "Should not be on free list."); |
ysr@777 | 4901 | break; |
ysr@777 | 4902 | default: |
ysr@777 | 4903 | // Everything else is possible. |
ysr@777 | 4904 | break; |
ysr@777 | 4905 | } |
ysr@777 | 4906 | free_r = free_r->next_from_free_list(); |
ysr@777 | 4907 | } |
ysr@777 | 4908 | guarantee(_free_region_list_size == free_region_list_length(), "Inv"); |
ysr@777 | 4909 | // If we didn't do an assertion... |
ysr@777 | 4910 | return true; |
ysr@777 | 4911 | } |
ysr@777 | 4912 | |
ysr@777 | 4913 | size_t G1CollectedHeap::free_region_list_length() { |
ysr@777 | 4914 | assert(ZF_mon->owned_by_self(), "precondition."); |
ysr@777 | 4915 | size_t len = 0; |
ysr@777 | 4916 | HeapRegion* cur = _free_region_list; |
ysr@777 | 4917 | while (cur != NULL) { |
ysr@777 | 4918 | len++; |
ysr@777 | 4919 | cur = cur->next_from_free_list(); |
ysr@777 | 4920 | } |
ysr@777 | 4921 | return len; |
ysr@777 | 4922 | } |
ysr@777 | 4923 | |
ysr@777 | 4924 | size_t G1CollectedHeap::unclean_region_list_length() { |
ysr@777 | 4925 | assert(ZF_mon->owned_by_self(), "precondition."); |
ysr@777 | 4926 | return _unclean_region_list.length(); |
ysr@777 | 4927 | } |
ysr@777 | 4928 | |
ysr@777 | 4929 | size_t G1CollectedHeap::n_regions() { |
ysr@777 | 4930 | return _hrs->length(); |
ysr@777 | 4931 | } |
ysr@777 | 4932 | |
ysr@777 | 4933 | size_t G1CollectedHeap::max_regions() { |
ysr@777 | 4934 | return |
ysr@777 | 4935 | (size_t)align_size_up(g1_reserved_obj_bytes(), HeapRegion::GrainBytes) / |
ysr@777 | 4936 | HeapRegion::GrainBytes; |
ysr@777 | 4937 | } |
ysr@777 | 4938 | |
ysr@777 | 4939 | size_t G1CollectedHeap::free_regions() { |
ysr@777 | 4940 | /* Possibly-expensive assert. |
ysr@777 | 4941 | assert(_free_regions == count_free_regions(), |
ysr@777 | 4942 | "_free_regions is off."); |
ysr@777 | 4943 | */ |
ysr@777 | 4944 | return _free_regions; |
ysr@777 | 4945 | } |
ysr@777 | 4946 | |
ysr@777 | 4947 | bool G1CollectedHeap::should_zf() { |
ysr@777 | 4948 | return _free_region_list_size < (size_t) G1ConcZFMaxRegions; |
ysr@777 | 4949 | } |
ysr@777 | 4950 | |
ysr@777 | 4951 | class RegionCounter: public HeapRegionClosure { |
ysr@777 | 4952 | size_t _n; |
ysr@777 | 4953 | public: |
ysr@777 | 4954 | RegionCounter() : _n(0) {} |
ysr@777 | 4955 | bool doHeapRegion(HeapRegion* r) { |
ysr@777 | 4956 | if (r->is_empty() && !r->popular()) { |
ysr@777 | 4957 | assert(!r->isHumongous(), "H regions should not be empty."); |
ysr@777 | 4958 | _n++; |
ysr@777 | 4959 | } |
ysr@777 | 4960 | return false; |
ysr@777 | 4961 | } |
ysr@777 | 4962 | int res() { return (int) _n; } |
ysr@777 | 4963 | }; |
ysr@777 | 4964 | |
ysr@777 | 4965 | size_t G1CollectedHeap::count_free_regions() { |
ysr@777 | 4966 | RegionCounter rc; |
ysr@777 | 4967 | heap_region_iterate(&rc); |
ysr@777 | 4968 | size_t n = rc.res(); |
ysr@777 | 4969 | if (_cur_alloc_region != NULL && _cur_alloc_region->is_empty()) |
ysr@777 | 4970 | n--; |
ysr@777 | 4971 | return n; |
ysr@777 | 4972 | } |
ysr@777 | 4973 | |
ysr@777 | 4974 | size_t G1CollectedHeap::count_free_regions_list() { |
ysr@777 | 4975 | size_t n = 0; |
ysr@777 | 4976 | size_t o = 0; |
ysr@777 | 4977 | ZF_mon->lock_without_safepoint_check(); |
ysr@777 | 4978 | HeapRegion* cur = _free_region_list; |
ysr@777 | 4979 | while (cur != NULL) { |
ysr@777 | 4980 | cur = cur->next_from_free_list(); |
ysr@777 | 4981 | n++; |
ysr@777 | 4982 | } |
ysr@777 | 4983 | size_t m = unclean_region_list_length(); |
ysr@777 | 4984 | ZF_mon->unlock(); |
ysr@777 | 4985 | return n + m; |
ysr@777 | 4986 | } |
ysr@777 | 4987 | |
ysr@777 | 4988 | bool G1CollectedHeap::should_set_young_locked() { |
ysr@777 | 4989 | assert(heap_lock_held_for_gc(), |
ysr@777 | 4990 | "the heap lock should already be held by or for this thread"); |
ysr@777 | 4991 | return (g1_policy()->in_young_gc_mode() && |
ysr@777 | 4992 | g1_policy()->should_add_next_region_to_young_list()); |
ysr@777 | 4993 | } |
ysr@777 | 4994 | |
ysr@777 | 4995 | void G1CollectedHeap::set_region_short_lived_locked(HeapRegion* hr) { |
ysr@777 | 4996 | assert(heap_lock_held_for_gc(), |
ysr@777 | 4997 | "the heap lock should already be held by or for this thread"); |
ysr@777 | 4998 | _young_list->push_region(hr); |
ysr@777 | 4999 | g1_policy()->set_region_short_lived(hr); |
ysr@777 | 5000 | } |
ysr@777 | 5001 | |
ysr@777 | 5002 | class NoYoungRegionsClosure: public HeapRegionClosure { |
ysr@777 | 5003 | private: |
ysr@777 | 5004 | bool _success; |
ysr@777 | 5005 | public: |
ysr@777 | 5006 | NoYoungRegionsClosure() : _success(true) { } |
ysr@777 | 5007 | bool doHeapRegion(HeapRegion* r) { |
ysr@777 | 5008 | if (r->is_young()) { |
ysr@777 | 5009 | gclog_or_tty->print_cr("Region ["PTR_FORMAT", "PTR_FORMAT") tagged as young", |
ysr@777 | 5010 | r->bottom(), r->end()); |
ysr@777 | 5011 | _success = false; |
ysr@777 | 5012 | } |
ysr@777 | 5013 | return false; |
ysr@777 | 5014 | } |
ysr@777 | 5015 | bool success() { return _success; } |
ysr@777 | 5016 | }; |
ysr@777 | 5017 | |
ysr@777 | 5018 | bool G1CollectedHeap::check_young_list_empty(bool ignore_scan_only_list, |
ysr@777 | 5019 | bool check_sample) { |
ysr@777 | 5020 | bool ret = true; |
ysr@777 | 5021 | |
ysr@777 | 5022 | ret = _young_list->check_list_empty(ignore_scan_only_list, check_sample); |
ysr@777 | 5023 | if (!ignore_scan_only_list) { |
ysr@777 | 5024 | NoYoungRegionsClosure closure; |
ysr@777 | 5025 | heap_region_iterate(&closure); |
ysr@777 | 5026 | ret = ret && closure.success(); |
ysr@777 | 5027 | } |
ysr@777 | 5028 | |
ysr@777 | 5029 | return ret; |
ysr@777 | 5030 | } |
ysr@777 | 5031 | |
ysr@777 | 5032 | void G1CollectedHeap::empty_young_list() { |
ysr@777 | 5033 | assert(heap_lock_held_for_gc(), |
ysr@777 | 5034 | "the heap lock should already be held by or for this thread"); |
ysr@777 | 5035 | assert(g1_policy()->in_young_gc_mode(), "should be in young GC mode"); |
ysr@777 | 5036 | |
ysr@777 | 5037 | _young_list->empty_list(); |
ysr@777 | 5038 | } |
ysr@777 | 5039 | |
ysr@777 | 5040 | bool G1CollectedHeap::all_alloc_regions_no_allocs_since_save_marks() { |
ysr@777 | 5041 | bool no_allocs = true; |
ysr@777 | 5042 | for (int ap = 0; ap < GCAllocPurposeCount && no_allocs; ++ap) { |
ysr@777 | 5043 | HeapRegion* r = _gc_alloc_regions[ap]; |
ysr@777 | 5044 | no_allocs = r == NULL || r->saved_mark_at_top(); |
ysr@777 | 5045 | } |
ysr@777 | 5046 | return no_allocs; |
ysr@777 | 5047 | } |
ysr@777 | 5048 | |
apetrusenko@980 | 5049 | void G1CollectedHeap::retire_all_alloc_regions() { |
ysr@777 | 5050 | for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { |
ysr@777 | 5051 | HeapRegion* r = _gc_alloc_regions[ap]; |
ysr@777 | 5052 | if (r != NULL) { |
ysr@777 | 5053 | // Check for aliases. |
ysr@777 | 5054 | bool has_processed_alias = false; |
ysr@777 | 5055 | for (int i = 0; i < ap; ++i) { |
ysr@777 | 5056 | if (_gc_alloc_regions[i] == r) { |
ysr@777 | 5057 | has_processed_alias = true; |
ysr@777 | 5058 | break; |
ysr@777 | 5059 | } |
ysr@777 | 5060 | } |
ysr@777 | 5061 | if (!has_processed_alias) { |
apetrusenko@980 | 5062 | retire_alloc_region(r, false /* par */); |
ysr@777 | 5063 | } |
ysr@777 | 5064 | } |
ysr@777 | 5065 | } |
ysr@777 | 5066 | } |
ysr@777 | 5067 | |
ysr@777 | 5068 | |
ysr@777 | 5069 | // Done at the start of full GC. |
ysr@777 | 5070 | void G1CollectedHeap::tear_down_region_lists() { |
ysr@777 | 5071 | MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); |
ysr@777 | 5072 | while (pop_unclean_region_list_locked() != NULL) ; |
ysr@777 | 5073 | assert(_unclean_region_list.hd() == NULL && _unclean_region_list.sz() == 0, |
ysr@777 | 5074 | "Postconditions of loop.") |
ysr@777 | 5075 | while (pop_free_region_list_locked() != NULL) ; |
ysr@777 | 5076 | assert(_free_region_list == NULL, "Postcondition of loop."); |
ysr@777 | 5077 | if (_free_region_list_size != 0) { |
ysr@777 | 5078 | gclog_or_tty->print_cr("Size is %d.", _free_region_list_size); |
ysr@777 | 5079 | print(); |
ysr@777 | 5080 | } |
ysr@777 | 5081 | assert(_free_region_list_size == 0, "Postconditions of loop."); |
ysr@777 | 5082 | } |
ysr@777 | 5083 | |
ysr@777 | 5084 | |
ysr@777 | 5085 | class RegionResetter: public HeapRegionClosure { |
ysr@777 | 5086 | G1CollectedHeap* _g1; |
ysr@777 | 5087 | int _n; |
ysr@777 | 5088 | public: |
ysr@777 | 5089 | RegionResetter() : _g1(G1CollectedHeap::heap()), _n(0) {} |
ysr@777 | 5090 | bool doHeapRegion(HeapRegion* r) { |
ysr@777 | 5091 | if (r->continuesHumongous()) return false; |
ysr@777 | 5092 | if (r->top() > r->bottom()) { |
ysr@777 | 5093 | if (r->top() < r->end()) { |
ysr@777 | 5094 | Copy::fill_to_words(r->top(), |
ysr@777 | 5095 | pointer_delta(r->end(), r->top())); |
ysr@777 | 5096 | } |
ysr@777 | 5097 | r->set_zero_fill_allocated(); |
ysr@777 | 5098 | } else { |
ysr@777 | 5099 | assert(r->is_empty(), "tautology"); |
ysr@777 | 5100 | if (r->popular()) { |
ysr@777 | 5101 | if (r->zero_fill_state() != HeapRegion::Allocated) { |
ysr@777 | 5102 | r->ensure_zero_filled_locked(); |
ysr@777 | 5103 | r->set_zero_fill_allocated(); |
ysr@777 | 5104 | } |
ysr@777 | 5105 | } else { |
ysr@777 | 5106 | _n++; |
ysr@777 | 5107 | switch (r->zero_fill_state()) { |
ysr@777 | 5108 | case HeapRegion::NotZeroFilled: |
ysr@777 | 5109 | case HeapRegion::ZeroFilling: |
ysr@777 | 5110 | _g1->put_region_on_unclean_list_locked(r); |
ysr@777 | 5111 | break; |
ysr@777 | 5112 | case HeapRegion::Allocated: |
ysr@777 | 5113 | r->set_zero_fill_complete(); |
ysr@777 | 5114 | // no break; go on to put on free list. |
ysr@777 | 5115 | case HeapRegion::ZeroFilled: |
ysr@777 | 5116 | _g1->put_free_region_on_list_locked(r); |
ysr@777 | 5117 | break; |
ysr@777 | 5118 | } |
ysr@777 | 5119 | } |
ysr@777 | 5120 | } |
ysr@777 | 5121 | return false; |
ysr@777 | 5122 | } |
ysr@777 | 5123 | |
ysr@777 | 5124 | int getFreeRegionCount() {return _n;} |
ysr@777 | 5125 | }; |
ysr@777 | 5126 | |
ysr@777 | 5127 | // Done at the end of full GC. |
ysr@777 | 5128 | void G1CollectedHeap::rebuild_region_lists() { |
ysr@777 | 5129 | MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); |
ysr@777 | 5130 | // This needs to go at the end of the full GC. |
ysr@777 | 5131 | RegionResetter rs; |
ysr@777 | 5132 | heap_region_iterate(&rs); |
ysr@777 | 5133 | _free_regions = rs.getFreeRegionCount(); |
ysr@777 | 5134 | // Tell the ZF thread it may have work to do. |
ysr@777 | 5135 | if (should_zf()) ZF_mon->notify_all(); |
ysr@777 | 5136 | } |
ysr@777 | 5137 | |
ysr@777 | 5138 | class UsedRegionsNeedZeroFillSetter: public HeapRegionClosure { |
ysr@777 | 5139 | G1CollectedHeap* _g1; |
ysr@777 | 5140 | int _n; |
ysr@777 | 5141 | public: |
ysr@777 | 5142 | UsedRegionsNeedZeroFillSetter() : _g1(G1CollectedHeap::heap()), _n(0) {} |
ysr@777 | 5143 | bool doHeapRegion(HeapRegion* r) { |
ysr@777 | 5144 | if (r->continuesHumongous()) return false; |
ysr@777 | 5145 | if (r->top() > r->bottom()) { |
ysr@777 | 5146 | // There are assertions in "set_zero_fill_needed()" below that |
ysr@777 | 5147 | // require top() == bottom(), so this is technically illegal. |
ysr@777 | 5148 | // We'll skirt the law here, by making that true temporarily. |
ysr@777 | 5149 | DEBUG_ONLY(HeapWord* save_top = r->top(); |
ysr@777 | 5150 | r->set_top(r->bottom())); |
ysr@777 | 5151 | r->set_zero_fill_needed(); |
ysr@777 | 5152 | DEBUG_ONLY(r->set_top(save_top)); |
ysr@777 | 5153 | } |
ysr@777 | 5154 | return false; |
ysr@777 | 5155 | } |
ysr@777 | 5156 | }; |
ysr@777 | 5157 | |
ysr@777 | 5158 | // Done at the start of full GC. |
ysr@777 | 5159 | void G1CollectedHeap::set_used_regions_to_need_zero_fill() { |
ysr@777 | 5160 | MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); |
ysr@777 | 5161 | // This needs to go at the end of the full GC. |
ysr@777 | 5162 | UsedRegionsNeedZeroFillSetter rs; |
ysr@777 | 5163 | heap_region_iterate(&rs); |
ysr@777 | 5164 | } |
ysr@777 | 5165 | |
ysr@777 | 5166 | class CountObjClosure: public ObjectClosure { |
ysr@777 | 5167 | size_t _n; |
ysr@777 | 5168 | public: |
ysr@777 | 5169 | CountObjClosure() : _n(0) {} |
ysr@777 | 5170 | void do_object(oop obj) { _n++; } |
ysr@777 | 5171 | size_t n() { return _n; } |
ysr@777 | 5172 | }; |
ysr@777 | 5173 | |
ysr@777 | 5174 | size_t G1CollectedHeap::pop_object_used_objs() { |
ysr@777 | 5175 | size_t sum_objs = 0; |
ysr@777 | 5176 | for (int i = 0; i < G1NumPopularRegions; i++) { |
ysr@777 | 5177 | CountObjClosure cl; |
ysr@777 | 5178 | _hrs->at(i)->object_iterate(&cl); |
ysr@777 | 5179 | sum_objs += cl.n(); |
ysr@777 | 5180 | } |
ysr@777 | 5181 | return sum_objs; |
ysr@777 | 5182 | } |
ysr@777 | 5183 | |
ysr@777 | 5184 | size_t G1CollectedHeap::pop_object_used_bytes() { |
ysr@777 | 5185 | size_t sum_bytes = 0; |
ysr@777 | 5186 | for (int i = 0; i < G1NumPopularRegions; i++) { |
ysr@777 | 5187 | sum_bytes += _hrs->at(i)->used(); |
ysr@777 | 5188 | } |
ysr@777 | 5189 | return sum_bytes; |
ysr@777 | 5190 | } |
ysr@777 | 5191 | |
ysr@777 | 5192 | |
ysr@777 | 5193 | static int nq = 0; |
ysr@777 | 5194 | |
ysr@777 | 5195 | HeapWord* G1CollectedHeap::allocate_popular_object(size_t word_size) { |
ysr@777 | 5196 | while (_cur_pop_hr_index < G1NumPopularRegions) { |
ysr@777 | 5197 | HeapRegion* cur_pop_region = _hrs->at(_cur_pop_hr_index); |
ysr@777 | 5198 | HeapWord* res = cur_pop_region->allocate(word_size); |
ysr@777 | 5199 | if (res != NULL) { |
ysr@777 | 5200 | // We account for popular objs directly in the used summary: |
ysr@777 | 5201 | _summary_bytes_used += (word_size * HeapWordSize); |
ysr@777 | 5202 | return res; |
ysr@777 | 5203 | } |
ysr@777 | 5204 | // Otherwise, try the next region (first making sure that we remember |
ysr@777 | 5205 | // the last "top" value as the "next_top_at_mark_start", so that |
ysr@777 | 5206 | // objects made popular during markings aren't automatically considered |
ysr@777 | 5207 | // live). |
ysr@777 | 5208 | cur_pop_region->note_end_of_copying(); |
ysr@777 | 5209 | // Otherwise, try the next region. |
ysr@777 | 5210 | _cur_pop_hr_index++; |
ysr@777 | 5211 | } |
ysr@777 | 5212 | // XXX: For now !!! |
ysr@777 | 5213 | vm_exit_out_of_memory(word_size, |
ysr@777 | 5214 | "Not enough pop obj space (To Be Fixed)"); |
ysr@777 | 5215 | return NULL; |
ysr@777 | 5216 | } |
ysr@777 | 5217 | |
ysr@777 | 5218 | class HeapRegionList: public CHeapObj { |
ysr@777 | 5219 | public: |
ysr@777 | 5220 | HeapRegion* hr; |
ysr@777 | 5221 | HeapRegionList* next; |
ysr@777 | 5222 | }; |
ysr@777 | 5223 | |
ysr@777 | 5224 | void G1CollectedHeap::schedule_popular_region_evac(HeapRegion* r) { |
ysr@777 | 5225 | // This might happen during parallel GC, so protect by this lock. |
ysr@777 | 5226 | MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); |
ysr@777 | 5227 | // We don't schedule regions whose evacuations are already pending, or |
ysr@777 | 5228 | // are already being evacuated. |
ysr@777 | 5229 | if (!r->popular_pending() && !r->in_collection_set()) { |
ysr@777 | 5230 | r->set_popular_pending(true); |
ysr@777 | 5231 | if (G1TracePopularity) { |
ysr@777 | 5232 | gclog_or_tty->print_cr("Scheduling region "PTR_FORMAT" " |
ysr@777 | 5233 | "["PTR_FORMAT", "PTR_FORMAT") for pop-object evacuation.", |
ysr@777 | 5234 | r, r->bottom(), r->end()); |
ysr@777 | 5235 | } |
ysr@777 | 5236 | HeapRegionList* hrl = new HeapRegionList; |
ysr@777 | 5237 | hrl->hr = r; |
ysr@777 | 5238 | hrl->next = _popular_regions_to_be_evacuated; |
ysr@777 | 5239 | _popular_regions_to_be_evacuated = hrl; |
ysr@777 | 5240 | } |
ysr@777 | 5241 | } |
ysr@777 | 5242 | |
ysr@777 | 5243 | HeapRegion* G1CollectedHeap::popular_region_to_evac() { |
ysr@777 | 5244 | MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); |
ysr@777 | 5245 | HeapRegion* res = NULL; |
ysr@777 | 5246 | while (_popular_regions_to_be_evacuated != NULL && res == NULL) { |
ysr@777 | 5247 | HeapRegionList* hrl = _popular_regions_to_be_evacuated; |
ysr@777 | 5248 | _popular_regions_to_be_evacuated = hrl->next; |
ysr@777 | 5249 | res = hrl->hr; |
ysr@777 | 5250 | // The G1RSPopLimit may have increased, so recheck here... |
ysr@777 | 5251 | if (res->rem_set()->occupied() < (size_t) G1RSPopLimit) { |
ysr@777 | 5252 | // Hah: don't need to schedule. |
ysr@777 | 5253 | if (G1TracePopularity) { |
ysr@777 | 5254 | gclog_or_tty->print_cr("Unscheduling region "PTR_FORMAT" " |
ysr@777 | 5255 | "["PTR_FORMAT", "PTR_FORMAT") " |
ysr@777 | 5256 | "for pop-object evacuation (size %d < limit %d)", |
ysr@777 | 5257 | res, res->bottom(), res->end(), |
ysr@777 | 5258 | res->rem_set()->occupied(), G1RSPopLimit); |
ysr@777 | 5259 | } |
ysr@777 | 5260 | res->set_popular_pending(false); |
ysr@777 | 5261 | res = NULL; |
ysr@777 | 5262 | } |
ysr@777 | 5263 | // We do not reset res->popular() here; if we did so, it would allow |
ysr@777 | 5264 | // the region to be "rescheduled" for popularity evacuation. Instead, |
ysr@777 | 5265 | // this is done in the collection pause, with the world stopped. |
ysr@777 | 5266 | // So the invariant is that the regions in the list have the popularity |
ysr@777 | 5267 | // boolean set, but having the boolean set does not imply membership |
ysr@777 | 5268 | // on the list (though there can at most one such pop-pending region |
ysr@777 | 5269 | // not on the list at any time). |
ysr@777 | 5270 | delete hrl; |
ysr@777 | 5271 | } |
ysr@777 | 5272 | return res; |
ysr@777 | 5273 | } |
ysr@777 | 5274 | |
ysr@777 | 5275 | void G1CollectedHeap::evac_popular_region(HeapRegion* hr) { |
ysr@777 | 5276 | while (true) { |
ysr@777 | 5277 | // Don't want to do a GC pause while cleanup is being completed! |
ysr@777 | 5278 | wait_for_cleanup_complete(); |
ysr@777 | 5279 | |
ysr@777 | 5280 | // Read the GC count while holding the Heap_lock |
ysr@777 | 5281 | int gc_count_before = SharedHeap::heap()->total_collections(); |
ysr@777 | 5282 | g1_policy()->record_stop_world_start(); |
ysr@777 | 5283 | |
ysr@777 | 5284 | { |
ysr@777 | 5285 | MutexUnlocker mu(Heap_lock); // give up heap lock, execute gets it back |
ysr@777 | 5286 | VM_G1PopRegionCollectionPause op(gc_count_before, hr); |
ysr@777 | 5287 | VMThread::execute(&op); |
ysr@777 | 5288 | |
ysr@777 | 5289 | // If the prolog succeeded, we didn't do a GC for this. |
ysr@777 | 5290 | if (op.prologue_succeeded()) break; |
ysr@777 | 5291 | } |
ysr@777 | 5292 | // Otherwise we didn't. We should recheck the size, though, since |
ysr@777 | 5293 | // the limit may have increased... |
ysr@777 | 5294 | if (hr->rem_set()->occupied() < (size_t) G1RSPopLimit) { |
ysr@777 | 5295 | hr->set_popular_pending(false); |
ysr@777 | 5296 | break; |
ysr@777 | 5297 | } |
ysr@777 | 5298 | } |
ysr@777 | 5299 | } |
ysr@777 | 5300 | |
ysr@777 | 5301 | void G1CollectedHeap::atomic_inc_obj_rc(oop obj) { |
ysr@777 | 5302 | Atomic::inc(obj_rc_addr(obj)); |
ysr@777 | 5303 | } |
ysr@777 | 5304 | |
ysr@777 | 5305 | class CountRCClosure: public OopsInHeapRegionClosure { |
ysr@777 | 5306 | G1CollectedHeap* _g1h; |
ysr@777 | 5307 | bool _parallel; |
ysr@777 | 5308 | public: |
ysr@777 | 5309 | CountRCClosure(G1CollectedHeap* g1h) : |
ysr@777 | 5310 | _g1h(g1h), _parallel(ParallelGCThreads > 0) |
ysr@777 | 5311 | {} |
ysr@777 | 5312 | void do_oop(narrowOop* p) { |
ysr@777 | 5313 | guarantee(false, "NYI"); |
ysr@777 | 5314 | } |
ysr@777 | 5315 | void do_oop(oop* p) { |
ysr@777 | 5316 | oop obj = *p; |
ysr@777 | 5317 | assert(obj != NULL, "Precondition."); |
ysr@777 | 5318 | if (_parallel) { |
ysr@777 | 5319 | // We go sticky at the limit to avoid excess contention. |
ysr@777 | 5320 | // If we want to track the actual RC's further, we'll need to keep a |
ysr@777 | 5321 | // per-thread hash table or something for the popular objects. |
ysr@777 | 5322 | if (_g1h->obj_rc(obj) < G1ObjPopLimit) { |
ysr@777 | 5323 | _g1h->atomic_inc_obj_rc(obj); |
ysr@777 | 5324 | } |
ysr@777 | 5325 | } else { |
ysr@777 | 5326 | _g1h->inc_obj_rc(obj); |
ysr@777 | 5327 | } |
ysr@777 | 5328 | } |
ysr@777 | 5329 | }; |
ysr@777 | 5330 | |
ysr@777 | 5331 | class EvacPopObjClosure: public ObjectClosure { |
ysr@777 | 5332 | G1CollectedHeap* _g1h; |
ysr@777 | 5333 | size_t _pop_objs; |
ysr@777 | 5334 | size_t _max_rc; |
ysr@777 | 5335 | public: |
ysr@777 | 5336 | EvacPopObjClosure(G1CollectedHeap* g1h) : |
ysr@777 | 5337 | _g1h(g1h), _pop_objs(0), _max_rc(0) {} |
ysr@777 | 5338 | |
ysr@777 | 5339 | void do_object(oop obj) { |
ysr@777 | 5340 | size_t rc = _g1h->obj_rc(obj); |
ysr@777 | 5341 | _max_rc = MAX2(rc, _max_rc); |
ysr@777 | 5342 | if (rc >= (size_t) G1ObjPopLimit) { |
ysr@777 | 5343 | _g1h->_pop_obj_rc_at_copy.add((double)rc); |
ysr@777 | 5344 | size_t word_sz = obj->size(); |
ysr@777 | 5345 | HeapWord* new_pop_loc = _g1h->allocate_popular_object(word_sz); |
ysr@777 | 5346 | oop new_pop_obj = (oop)new_pop_loc; |
ysr@777 | 5347 | Copy::aligned_disjoint_words((HeapWord*)obj, new_pop_loc, word_sz); |
ysr@777 | 5348 | obj->forward_to(new_pop_obj); |
ysr@777 | 5349 | G1ScanAndBalanceClosure scan_and_balance(_g1h); |
ysr@777 | 5350 | new_pop_obj->oop_iterate_backwards(&scan_and_balance); |
ysr@777 | 5351 | // preserve "next" mark bit if marking is in progress. |
ysr@777 | 5352 | if (_g1h->mark_in_progress() && !_g1h->is_obj_ill(obj)) { |
ysr@777 | 5353 | _g1h->concurrent_mark()->markAndGrayObjectIfNecessary(new_pop_obj); |
ysr@777 | 5354 | } |
ysr@777 | 5355 | |
ysr@777 | 5356 | if (G1TracePopularity) { |
ysr@777 | 5357 | gclog_or_tty->print_cr("Found obj " PTR_FORMAT " of word size " SIZE_FORMAT |
ysr@777 | 5358 | " pop (%d), move to " PTR_FORMAT, |
ysr@777 | 5359 | (void*) obj, word_sz, |
ysr@777 | 5360 | _g1h->obj_rc(obj), (void*) new_pop_obj); |
ysr@777 | 5361 | } |
ysr@777 | 5362 | _pop_objs++; |
ysr@777 | 5363 | } |
ysr@777 | 5364 | } |
ysr@777 | 5365 | size_t pop_objs() { return _pop_objs; } |
ysr@777 | 5366 | size_t max_rc() { return _max_rc; } |
ysr@777 | 5367 | }; |
ysr@777 | 5368 | |
ysr@777 | 5369 | class G1ParCountRCTask : public AbstractGangTask { |
ysr@777 | 5370 | G1CollectedHeap* _g1h; |
ysr@777 | 5371 | BitMap _bm; |
ysr@777 | 5372 | |
ysr@777 | 5373 | size_t getNCards() { |
ysr@777 | 5374 | return (_g1h->capacity() + G1BlockOffsetSharedArray::N_bytes - 1) |
ysr@777 | 5375 | / G1BlockOffsetSharedArray::N_bytes; |
ysr@777 | 5376 | } |
ysr@777 | 5377 | CountRCClosure _count_rc_closure; |
ysr@777 | 5378 | public: |
ysr@777 | 5379 | G1ParCountRCTask(G1CollectedHeap* g1h) : |
ysr@777 | 5380 | AbstractGangTask("G1 Par RC Count task"), |
ysr@777 | 5381 | _g1h(g1h), _bm(getNCards()), _count_rc_closure(g1h) |
ysr@777 | 5382 | {} |
ysr@777 | 5383 | |
ysr@777 | 5384 | void work(int i) { |
ysr@777 | 5385 | ResourceMark rm; |
ysr@777 | 5386 | HandleMark hm; |
ysr@777 | 5387 | _g1h->g1_rem_set()->oops_into_collection_set_do(&_count_rc_closure, i); |
ysr@777 | 5388 | } |
ysr@777 | 5389 | }; |
ysr@777 | 5390 | |
ysr@777 | 5391 | void G1CollectedHeap::popularity_pause_preamble(HeapRegion* popular_region) { |
ysr@777 | 5392 | // We're evacuating a single region (for popularity). |
ysr@777 | 5393 | if (G1TracePopularity) { |
ysr@777 | 5394 | gclog_or_tty->print_cr("Doing pop region pause for ["PTR_FORMAT", "PTR_FORMAT")", |
ysr@777 | 5395 | popular_region->bottom(), popular_region->end()); |
ysr@777 | 5396 | } |
ysr@777 | 5397 | g1_policy()->set_single_region_collection_set(popular_region); |
ysr@777 | 5398 | size_t max_rc; |
ysr@777 | 5399 | if (!compute_reference_counts_and_evac_popular(popular_region, |
ysr@777 | 5400 | &max_rc)) { |
ysr@777 | 5401 | // We didn't evacuate any popular objects. |
ysr@777 | 5402 | // We increase the RS popularity limit, to prevent this from |
ysr@777 | 5403 | // happening in the future. |
ysr@777 | 5404 | if (G1RSPopLimit < (1 << 30)) { |
ysr@777 | 5405 | G1RSPopLimit *= 2; |
ysr@777 | 5406 | } |
ysr@777 | 5407 | // For now, interesting enough for a message: |
ysr@777 | 5408 | #if 1 |
ysr@777 | 5409 | gclog_or_tty->print_cr("In pop region pause for ["PTR_FORMAT", "PTR_FORMAT"), " |
ysr@777 | 5410 | "failed to find a pop object (max = %d).", |
ysr@777 | 5411 | popular_region->bottom(), popular_region->end(), |
ysr@777 | 5412 | max_rc); |
ysr@777 | 5413 | gclog_or_tty->print_cr("Increased G1RSPopLimit to %d.", G1RSPopLimit); |
ysr@777 | 5414 | #endif // 0 |
ysr@777 | 5415 | // Also, we reset the collection set to NULL, to make the rest of |
ysr@777 | 5416 | // the collection do nothing. |
ysr@777 | 5417 | assert(popular_region->next_in_collection_set() == NULL, |
ysr@777 | 5418 | "should be single-region."); |
ysr@777 | 5419 | popular_region->set_in_collection_set(false); |
ysr@777 | 5420 | popular_region->set_popular_pending(false); |
ysr@777 | 5421 | g1_policy()->clear_collection_set(); |
ysr@777 | 5422 | } |
ysr@777 | 5423 | } |
ysr@777 | 5424 | |
ysr@777 | 5425 | bool G1CollectedHeap:: |
ysr@777 | 5426 | compute_reference_counts_and_evac_popular(HeapRegion* popular_region, |
ysr@777 | 5427 | size_t* max_rc) { |
ysr@777 | 5428 | HeapWord* rc_region_bot; |
ysr@777 | 5429 | HeapWord* rc_region_end; |
ysr@777 | 5430 | |
ysr@777 | 5431 | // Set up the reference count region. |
ysr@777 | 5432 | HeapRegion* rc_region = newAllocRegion(HeapRegion::GrainWords); |
ysr@777 | 5433 | if (rc_region != NULL) { |
ysr@777 | 5434 | rc_region_bot = rc_region->bottom(); |
ysr@777 | 5435 | rc_region_end = rc_region->end(); |
ysr@777 | 5436 | } else { |
ysr@777 | 5437 | rc_region_bot = NEW_C_HEAP_ARRAY(HeapWord, HeapRegion::GrainWords); |
ysr@777 | 5438 | if (rc_region_bot == NULL) { |
ysr@777 | 5439 | vm_exit_out_of_memory(HeapRegion::GrainWords, |
ysr@777 | 5440 | "No space for RC region."); |
ysr@777 | 5441 | } |
ysr@777 | 5442 | rc_region_end = rc_region_bot + HeapRegion::GrainWords; |
ysr@777 | 5443 | } |
ysr@777 | 5444 | |
ysr@777 | 5445 | if (G1TracePopularity) |
ysr@777 | 5446 | gclog_or_tty->print_cr("RC region is ["PTR_FORMAT", "PTR_FORMAT")", |
ysr@777 | 5447 | rc_region_bot, rc_region_end); |
ysr@777 | 5448 | if (rc_region_bot > popular_region->bottom()) { |
ysr@777 | 5449 | _rc_region_above = true; |
ysr@777 | 5450 | _rc_region_diff = |
ysr@777 | 5451 | pointer_delta(rc_region_bot, popular_region->bottom(), 1); |
ysr@777 | 5452 | } else { |
ysr@777 | 5453 | assert(rc_region_bot < popular_region->bottom(), "Can't be equal."); |
ysr@777 | 5454 | _rc_region_above = false; |
ysr@777 | 5455 | _rc_region_diff = |
ysr@777 | 5456 | pointer_delta(popular_region->bottom(), rc_region_bot, 1); |
ysr@777 | 5457 | } |
ysr@777 | 5458 | g1_policy()->record_pop_compute_rc_start(); |
ysr@777 | 5459 | // Count external references. |
ysr@777 | 5460 | g1_rem_set()->prepare_for_oops_into_collection_set_do(); |
ysr@777 | 5461 | if (ParallelGCThreads > 0) { |
ysr@777 | 5462 | |
ysr@777 | 5463 | set_par_threads(workers()->total_workers()); |
ysr@777 | 5464 | G1ParCountRCTask par_count_rc_task(this); |
ysr@777 | 5465 | workers()->run_task(&par_count_rc_task); |
ysr@777 | 5466 | set_par_threads(0); |
ysr@777 | 5467 | |
ysr@777 | 5468 | } else { |
ysr@777 | 5469 | CountRCClosure count_rc_closure(this); |
ysr@777 | 5470 | g1_rem_set()->oops_into_collection_set_do(&count_rc_closure, 0); |
ysr@777 | 5471 | } |
ysr@777 | 5472 | g1_rem_set()->cleanup_after_oops_into_collection_set_do(); |
ysr@777 | 5473 | g1_policy()->record_pop_compute_rc_end(); |
ysr@777 | 5474 | |
ysr@777 | 5475 | // Now evacuate popular objects. |
ysr@777 | 5476 | g1_policy()->record_pop_evac_start(); |
ysr@777 | 5477 | EvacPopObjClosure evac_pop_obj_cl(this); |
ysr@777 | 5478 | popular_region->object_iterate(&evac_pop_obj_cl); |
ysr@777 | 5479 | *max_rc = evac_pop_obj_cl.max_rc(); |
ysr@777 | 5480 | |
ysr@777 | 5481 | // Make sure the last "top" value of the current popular region is copied |
ysr@777 | 5482 | // as the "next_top_at_mark_start", so that objects made popular during |
ysr@777 | 5483 | // markings aren't automatically considered live. |
ysr@777 | 5484 | HeapRegion* cur_pop_region = _hrs->at(_cur_pop_hr_index); |
ysr@777 | 5485 | cur_pop_region->note_end_of_copying(); |
ysr@777 | 5486 | |
ysr@777 | 5487 | if (rc_region != NULL) { |
ysr@777 | 5488 | free_region(rc_region); |
ysr@777 | 5489 | } else { |
ysr@777 | 5490 | FREE_C_HEAP_ARRAY(HeapWord, rc_region_bot); |
ysr@777 | 5491 | } |
ysr@777 | 5492 | g1_policy()->record_pop_evac_end(); |
ysr@777 | 5493 | |
ysr@777 | 5494 | return evac_pop_obj_cl.pop_objs() > 0; |
ysr@777 | 5495 | } |
ysr@777 | 5496 | |
ysr@777 | 5497 | class CountPopObjInfoClosure: public HeapRegionClosure { |
ysr@777 | 5498 | size_t _objs; |
ysr@777 | 5499 | size_t _bytes; |
ysr@777 | 5500 | |
ysr@777 | 5501 | class CountObjClosure: public ObjectClosure { |
ysr@777 | 5502 | int _n; |
ysr@777 | 5503 | public: |
ysr@777 | 5504 | CountObjClosure() : _n(0) {} |
ysr@777 | 5505 | void do_object(oop obj) { _n++; } |
ysr@777 | 5506 | size_t n() { return _n; } |
ysr@777 | 5507 | }; |
ysr@777 | 5508 | |
ysr@777 | 5509 | public: |
ysr@777 | 5510 | CountPopObjInfoClosure() : _objs(0), _bytes(0) {} |
ysr@777 | 5511 | bool doHeapRegion(HeapRegion* r) { |
ysr@777 | 5512 | _bytes += r->used(); |
ysr@777 | 5513 | CountObjClosure blk; |
ysr@777 | 5514 | r->object_iterate(&blk); |
ysr@777 | 5515 | _objs += blk.n(); |
ysr@777 | 5516 | return false; |
ysr@777 | 5517 | } |
ysr@777 | 5518 | size_t objs() { return _objs; } |
ysr@777 | 5519 | size_t bytes() { return _bytes; } |
ysr@777 | 5520 | }; |
ysr@777 | 5521 | |
ysr@777 | 5522 | |
ysr@777 | 5523 | void G1CollectedHeap::print_popularity_summary_info() const { |
ysr@777 | 5524 | CountPopObjInfoClosure blk; |
ysr@777 | 5525 | for (int i = 0; i <= _cur_pop_hr_index; i++) { |
ysr@777 | 5526 | blk.doHeapRegion(_hrs->at(i)); |
ysr@777 | 5527 | } |
ysr@777 | 5528 | gclog_or_tty->print_cr("\nPopular objects: %d objs, %d bytes.", |
ysr@777 | 5529 | blk.objs(), blk.bytes()); |
ysr@777 | 5530 | gclog_or_tty->print_cr(" RC at copy = [avg = %5.2f, max = %5.2f, sd = %5.2f].", |
ysr@777 | 5531 | _pop_obj_rc_at_copy.avg(), |
ysr@777 | 5532 | _pop_obj_rc_at_copy.maximum(), |
ysr@777 | 5533 | _pop_obj_rc_at_copy.sd()); |
ysr@777 | 5534 | } |
ysr@777 | 5535 | |
ysr@777 | 5536 | void G1CollectedHeap::set_refine_cte_cl_concurrency(bool concurrent) { |
ysr@777 | 5537 | _refine_cte_cl->set_concurrent(concurrent); |
ysr@777 | 5538 | } |
ysr@777 | 5539 | |
ysr@777 | 5540 | #ifndef PRODUCT |
ysr@777 | 5541 | |
ysr@777 | 5542 | class PrintHeapRegionClosure: public HeapRegionClosure { |
ysr@777 | 5543 | public: |
ysr@777 | 5544 | bool doHeapRegion(HeapRegion *r) { |
ysr@777 | 5545 | gclog_or_tty->print("Region: "PTR_FORMAT":", r); |
ysr@777 | 5546 | if (r != NULL) { |
ysr@777 | 5547 | if (r->is_on_free_list()) |
ysr@777 | 5548 | gclog_or_tty->print("Free "); |
ysr@777 | 5549 | if (r->is_young()) |
ysr@777 | 5550 | gclog_or_tty->print("Young "); |
ysr@777 | 5551 | if (r->isHumongous()) |
ysr@777 | 5552 | gclog_or_tty->print("Is Humongous "); |
ysr@777 | 5553 | r->print(); |
ysr@777 | 5554 | } |
ysr@777 | 5555 | return false; |
ysr@777 | 5556 | } |
ysr@777 | 5557 | }; |
ysr@777 | 5558 | |
ysr@777 | 5559 | class SortHeapRegionClosure : public HeapRegionClosure { |
ysr@777 | 5560 | size_t young_regions,free_regions, unclean_regions; |
ysr@777 | 5561 | size_t hum_regions, count; |
ysr@777 | 5562 | size_t unaccounted, cur_unclean, cur_alloc; |
ysr@777 | 5563 | size_t total_free; |
ysr@777 | 5564 | HeapRegion* cur; |
ysr@777 | 5565 | public: |
ysr@777 | 5566 | SortHeapRegionClosure(HeapRegion *_cur) : cur(_cur), young_regions(0), |
ysr@777 | 5567 | free_regions(0), unclean_regions(0), |
ysr@777 | 5568 | hum_regions(0), |
ysr@777 | 5569 | count(0), unaccounted(0), |
ysr@777 | 5570 | cur_alloc(0), total_free(0) |
ysr@777 | 5571 | {} |
ysr@777 | 5572 | bool doHeapRegion(HeapRegion *r) { |
ysr@777 | 5573 | count++; |
ysr@777 | 5574 | if (r->is_on_free_list()) free_regions++; |
ysr@777 | 5575 | else if (r->is_on_unclean_list()) unclean_regions++; |
ysr@777 | 5576 | else if (r->isHumongous()) hum_regions++; |
ysr@777 | 5577 | else if (r->is_young()) young_regions++; |
ysr@777 | 5578 | else if (r == cur) cur_alloc++; |
ysr@777 | 5579 | else unaccounted++; |
ysr@777 | 5580 | return false; |
ysr@777 | 5581 | } |
ysr@777 | 5582 | void print() { |
ysr@777 | 5583 | total_free = free_regions + unclean_regions; |
ysr@777 | 5584 | gclog_or_tty->print("%d regions\n", count); |
ysr@777 | 5585 | gclog_or_tty->print("%d free: free_list = %d unclean = %d\n", |
ysr@777 | 5586 | total_free, free_regions, unclean_regions); |
ysr@777 | 5587 | gclog_or_tty->print("%d humongous %d young\n", |
ysr@777 | 5588 | hum_regions, young_regions); |
ysr@777 | 5589 | gclog_or_tty->print("%d cur_alloc\n", cur_alloc); |
ysr@777 | 5590 | gclog_or_tty->print("UHOH unaccounted = %d\n", unaccounted); |
ysr@777 | 5591 | } |
ysr@777 | 5592 | }; |
ysr@777 | 5593 | |
ysr@777 | 5594 | void G1CollectedHeap::print_region_counts() { |
ysr@777 | 5595 | SortHeapRegionClosure sc(_cur_alloc_region); |
ysr@777 | 5596 | PrintHeapRegionClosure cl; |
ysr@777 | 5597 | heap_region_iterate(&cl); |
ysr@777 | 5598 | heap_region_iterate(&sc); |
ysr@777 | 5599 | sc.print(); |
ysr@777 | 5600 | print_region_accounting_info(); |
ysr@777 | 5601 | }; |
ysr@777 | 5602 | |
ysr@777 | 5603 | bool G1CollectedHeap::regions_accounted_for() { |
ysr@777 | 5604 | // TODO: regions accounting for young/survivor/tenured |
ysr@777 | 5605 | return true; |
ysr@777 | 5606 | } |
ysr@777 | 5607 | |
ysr@777 | 5608 | bool G1CollectedHeap::print_region_accounting_info() { |
ysr@777 | 5609 | gclog_or_tty->print_cr("P regions: %d.", G1NumPopularRegions); |
ysr@777 | 5610 | gclog_or_tty->print_cr("Free regions: %d (count: %d count list %d) (clean: %d unclean: %d).", |
ysr@777 | 5611 | free_regions(), |
ysr@777 | 5612 | count_free_regions(), count_free_regions_list(), |
ysr@777 | 5613 | _free_region_list_size, _unclean_region_list.sz()); |
ysr@777 | 5614 | gclog_or_tty->print_cr("cur_alloc: %d.", |
ysr@777 | 5615 | (_cur_alloc_region == NULL ? 0 : 1)); |
ysr@777 | 5616 | gclog_or_tty->print_cr("H regions: %d.", _num_humongous_regions); |
ysr@777 | 5617 | |
ysr@777 | 5618 | // TODO: check regions accounting for young/survivor/tenured |
ysr@777 | 5619 | return true; |
ysr@777 | 5620 | } |
ysr@777 | 5621 | |
ysr@777 | 5622 | bool G1CollectedHeap::is_in_closed_subset(const void* p) const { |
ysr@777 | 5623 | HeapRegion* hr = heap_region_containing(p); |
ysr@777 | 5624 | if (hr == NULL) { |
ysr@777 | 5625 | return is_in_permanent(p); |
ysr@777 | 5626 | } else { |
ysr@777 | 5627 | return hr->is_in(p); |
ysr@777 | 5628 | } |
ysr@777 | 5629 | } |
ysr@777 | 5630 | #endif // PRODUCT |
ysr@777 | 5631 | |
ysr@777 | 5632 | void G1CollectedHeap::g1_unimplemented() { |
ysr@777 | 5633 | // Unimplemented(); |
ysr@777 | 5634 | } |
ysr@777 | 5635 | |
ysr@777 | 5636 | |
ysr@777 | 5637 | // Local Variables: *** |
ysr@777 | 5638 | // c-indentation-style: gnu *** |
ysr@777 | 5639 | // End: *** |