Tue, 26 Jan 2010 16:52:29 -0800
6920090: G1: Disable ReduceInitialCardMarks at least until 6920109 is fixed
Summary: G1 now answers "no" to the query can_elide_initializing_store_barrier() in the product build. A debug flag allows alternate behaviour in debug builds.
Reviewed-by: iveresov, tonyp
ysr@777 | 1 | /* |
xdono@1014 | 2 | * Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved. |
ysr@777 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
ysr@777 | 4 | * |
ysr@777 | 5 | * This code is free software; you can redistribute it and/or modify it |
ysr@777 | 6 | * under the terms of the GNU General Public License version 2 only, as |
ysr@777 | 7 | * published by the Free Software Foundation. |
ysr@777 | 8 | * |
ysr@777 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
ysr@777 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
ysr@777 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
ysr@777 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
ysr@777 | 13 | * accompanied this code). |
ysr@777 | 14 | * |
ysr@777 | 15 | * You should have received a copy of the GNU General Public License version |
ysr@777 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
ysr@777 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
ysr@777 | 18 | * |
ysr@777 | 19 | * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, |
ysr@777 | 20 | * CA 95054 USA or visit www.sun.com if you need additional information or |
ysr@777 | 21 | * have any questions. |
ysr@777 | 22 | * |
ysr@777 | 23 | */ |
ysr@777 | 24 | |
ysr@777 | 25 | #include "incls/_precompiled.incl" |
ysr@777 | 26 | #include "incls/_g1CollectedHeap.cpp.incl" |
ysr@777 | 27 | |
tonyp@1377 | 28 | size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0; |
tonyp@1377 | 29 | |
ysr@777 | 30 | // turn it on so that the contents of the young list (scan-only / |
ysr@777 | 31 | // to-be-collected) are printed at "strategic" points before / during |
ysr@777 | 32 | // / after the collection --- this is useful for debugging |
ysr@777 | 33 | #define SCAN_ONLY_VERBOSE 0 |
ysr@777 | 34 | // CURRENT STATUS |
ysr@777 | 35 | // This file is under construction. Search for "FIXME". |
ysr@777 | 36 | |
ysr@777 | 37 | // INVARIANTS/NOTES |
ysr@777 | 38 | // |
ysr@777 | 39 | // All allocation activity covered by the G1CollectedHeap interface is |
ysr@777 | 40 | // serialized by acquiring the HeapLock. This happens in |
ysr@777 | 41 | // mem_allocate_work, which all such allocation functions call. |
ysr@777 | 42 | // (Note that this does not apply to TLAB allocation, which is not part |
ysr@777 | 43 | // of this interface: it is done by clients of this interface.) |
ysr@777 | 44 | |
ysr@777 | 45 | // Local to this file. |
ysr@777 | 46 | |
ysr@777 | 47 | class RefineCardTableEntryClosure: public CardTableEntryClosure { |
ysr@777 | 48 | SuspendibleThreadSet* _sts; |
ysr@777 | 49 | G1RemSet* _g1rs; |
ysr@777 | 50 | ConcurrentG1Refine* _cg1r; |
ysr@777 | 51 | bool _concurrent; |
ysr@777 | 52 | public: |
ysr@777 | 53 | RefineCardTableEntryClosure(SuspendibleThreadSet* sts, |
ysr@777 | 54 | G1RemSet* g1rs, |
ysr@777 | 55 | ConcurrentG1Refine* cg1r) : |
ysr@777 | 56 | _sts(sts), _g1rs(g1rs), _cg1r(cg1r), _concurrent(true) |
ysr@777 | 57 | {} |
ysr@777 | 58 | bool do_card_ptr(jbyte* card_ptr, int worker_i) { |
ysr@777 | 59 | _g1rs->concurrentRefineOneCard(card_ptr, worker_i); |
ysr@777 | 60 | if (_concurrent && _sts->should_yield()) { |
ysr@777 | 61 | // Caller will actually yield. |
ysr@777 | 62 | return false; |
ysr@777 | 63 | } |
ysr@777 | 64 | // Otherwise, we finished successfully; return true. |
ysr@777 | 65 | return true; |
ysr@777 | 66 | } |
ysr@777 | 67 | void set_concurrent(bool b) { _concurrent = b; } |
ysr@777 | 68 | }; |
ysr@777 | 69 | |
ysr@777 | 70 | |
ysr@777 | 71 | class ClearLoggedCardTableEntryClosure: public CardTableEntryClosure { |
ysr@777 | 72 | int _calls; |
ysr@777 | 73 | G1CollectedHeap* _g1h; |
ysr@777 | 74 | CardTableModRefBS* _ctbs; |
ysr@777 | 75 | int _histo[256]; |
ysr@777 | 76 | public: |
ysr@777 | 77 | ClearLoggedCardTableEntryClosure() : |
ysr@777 | 78 | _calls(0) |
ysr@777 | 79 | { |
ysr@777 | 80 | _g1h = G1CollectedHeap::heap(); |
ysr@777 | 81 | _ctbs = (CardTableModRefBS*)_g1h->barrier_set(); |
ysr@777 | 82 | for (int i = 0; i < 256; i++) _histo[i] = 0; |
ysr@777 | 83 | } |
ysr@777 | 84 | bool do_card_ptr(jbyte* card_ptr, int worker_i) { |
ysr@777 | 85 | if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) { |
ysr@777 | 86 | _calls++; |
ysr@777 | 87 | unsigned char* ujb = (unsigned char*)card_ptr; |
ysr@777 | 88 | int ind = (int)(*ujb); |
ysr@777 | 89 | _histo[ind]++; |
ysr@777 | 90 | *card_ptr = -1; |
ysr@777 | 91 | } |
ysr@777 | 92 | return true; |
ysr@777 | 93 | } |
ysr@777 | 94 | int calls() { return _calls; } |
ysr@777 | 95 | void print_histo() { |
ysr@777 | 96 | gclog_or_tty->print_cr("Card table value histogram:"); |
ysr@777 | 97 | for (int i = 0; i < 256; i++) { |
ysr@777 | 98 | if (_histo[i] != 0) { |
ysr@777 | 99 | gclog_or_tty->print_cr(" %d: %d", i, _histo[i]); |
ysr@777 | 100 | } |
ysr@777 | 101 | } |
ysr@777 | 102 | } |
ysr@777 | 103 | }; |
ysr@777 | 104 | |
ysr@777 | 105 | class RedirtyLoggedCardTableEntryClosure: public CardTableEntryClosure { |
ysr@777 | 106 | int _calls; |
ysr@777 | 107 | G1CollectedHeap* _g1h; |
ysr@777 | 108 | CardTableModRefBS* _ctbs; |
ysr@777 | 109 | public: |
ysr@777 | 110 | RedirtyLoggedCardTableEntryClosure() : |
ysr@777 | 111 | _calls(0) |
ysr@777 | 112 | { |
ysr@777 | 113 | _g1h = G1CollectedHeap::heap(); |
ysr@777 | 114 | _ctbs = (CardTableModRefBS*)_g1h->barrier_set(); |
ysr@777 | 115 | } |
ysr@777 | 116 | bool do_card_ptr(jbyte* card_ptr, int worker_i) { |
ysr@777 | 117 | if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) { |
ysr@777 | 118 | _calls++; |
ysr@777 | 119 | *card_ptr = 0; |
ysr@777 | 120 | } |
ysr@777 | 121 | return true; |
ysr@777 | 122 | } |
ysr@777 | 123 | int calls() { return _calls; } |
ysr@777 | 124 | }; |
ysr@777 | 125 | |
iveresov@1051 | 126 | class RedirtyLoggedCardTableEntryFastClosure : public CardTableEntryClosure { |
iveresov@1051 | 127 | public: |
iveresov@1051 | 128 | bool do_card_ptr(jbyte* card_ptr, int worker_i) { |
iveresov@1051 | 129 | *card_ptr = CardTableModRefBS::dirty_card_val(); |
iveresov@1051 | 130 | return true; |
iveresov@1051 | 131 | } |
iveresov@1051 | 132 | }; |
iveresov@1051 | 133 | |
ysr@777 | 134 | YoungList::YoungList(G1CollectedHeap* g1h) |
ysr@777 | 135 | : _g1h(g1h), _head(NULL), |
ysr@777 | 136 | _scan_only_head(NULL), _scan_only_tail(NULL), _curr_scan_only(NULL), |
ysr@777 | 137 | _length(0), _scan_only_length(0), |
ysr@777 | 138 | _last_sampled_rs_lengths(0), |
apetrusenko@980 | 139 | _survivor_head(NULL), _survivor_tail(NULL), _survivor_length(0) |
ysr@777 | 140 | { |
ysr@777 | 141 | guarantee( check_list_empty(false), "just making sure..." ); |
ysr@777 | 142 | } |
ysr@777 | 143 | |
ysr@777 | 144 | void YoungList::push_region(HeapRegion *hr) { |
ysr@777 | 145 | assert(!hr->is_young(), "should not already be young"); |
ysr@777 | 146 | assert(hr->get_next_young_region() == NULL, "cause it should!"); |
ysr@777 | 147 | |
ysr@777 | 148 | hr->set_next_young_region(_head); |
ysr@777 | 149 | _head = hr; |
ysr@777 | 150 | |
ysr@777 | 151 | hr->set_young(); |
ysr@777 | 152 | double yg_surv_rate = _g1h->g1_policy()->predict_yg_surv_rate((int)_length); |
ysr@777 | 153 | ++_length; |
ysr@777 | 154 | } |
ysr@777 | 155 | |
ysr@777 | 156 | void YoungList::add_survivor_region(HeapRegion* hr) { |
apetrusenko@980 | 157 | assert(hr->is_survivor(), "should be flagged as survivor region"); |
ysr@777 | 158 | assert(hr->get_next_young_region() == NULL, "cause it should!"); |
ysr@777 | 159 | |
ysr@777 | 160 | hr->set_next_young_region(_survivor_head); |
ysr@777 | 161 | if (_survivor_head == NULL) { |
apetrusenko@980 | 162 | _survivor_tail = hr; |
ysr@777 | 163 | } |
ysr@777 | 164 | _survivor_head = hr; |
ysr@777 | 165 | |
ysr@777 | 166 | ++_survivor_length; |
ysr@777 | 167 | } |
ysr@777 | 168 | |
ysr@777 | 169 | HeapRegion* YoungList::pop_region() { |
ysr@777 | 170 | while (_head != NULL) { |
ysr@777 | 171 | assert( length() > 0, "list should not be empty" ); |
ysr@777 | 172 | HeapRegion* ret = _head; |
ysr@777 | 173 | _head = ret->get_next_young_region(); |
ysr@777 | 174 | ret->set_next_young_region(NULL); |
ysr@777 | 175 | --_length; |
ysr@777 | 176 | assert(ret->is_young(), "region should be very young"); |
ysr@777 | 177 | |
ysr@777 | 178 | // Replace 'Survivor' region type with 'Young'. So the region will |
ysr@777 | 179 | // be treated as a young region and will not be 'confused' with |
ysr@777 | 180 | // newly created survivor regions. |
ysr@777 | 181 | if (ret->is_survivor()) { |
ysr@777 | 182 | ret->set_young(); |
ysr@777 | 183 | } |
ysr@777 | 184 | |
ysr@777 | 185 | if (!ret->is_scan_only()) { |
ysr@777 | 186 | return ret; |
ysr@777 | 187 | } |
ysr@777 | 188 | |
ysr@777 | 189 | // scan-only, we'll add it to the scan-only list |
ysr@777 | 190 | if (_scan_only_tail == NULL) { |
ysr@777 | 191 | guarantee( _scan_only_head == NULL, "invariant" ); |
ysr@777 | 192 | |
ysr@777 | 193 | _scan_only_head = ret; |
ysr@777 | 194 | _curr_scan_only = ret; |
ysr@777 | 195 | } else { |
ysr@777 | 196 | guarantee( _scan_only_head != NULL, "invariant" ); |
ysr@777 | 197 | _scan_only_tail->set_next_young_region(ret); |
ysr@777 | 198 | } |
ysr@777 | 199 | guarantee( ret->get_next_young_region() == NULL, "invariant" ); |
ysr@777 | 200 | _scan_only_tail = ret; |
ysr@777 | 201 | |
ysr@777 | 202 | // no need to be tagged as scan-only any more |
ysr@777 | 203 | ret->set_young(); |
ysr@777 | 204 | |
ysr@777 | 205 | ++_scan_only_length; |
ysr@777 | 206 | } |
ysr@777 | 207 | assert( length() == 0, "list should be empty" ); |
ysr@777 | 208 | return NULL; |
ysr@777 | 209 | } |
ysr@777 | 210 | |
ysr@777 | 211 | void YoungList::empty_list(HeapRegion* list) { |
ysr@777 | 212 | while (list != NULL) { |
ysr@777 | 213 | HeapRegion* next = list->get_next_young_region(); |
ysr@777 | 214 | list->set_next_young_region(NULL); |
ysr@777 | 215 | list->uninstall_surv_rate_group(); |
ysr@777 | 216 | list->set_not_young(); |
ysr@777 | 217 | list = next; |
ysr@777 | 218 | } |
ysr@777 | 219 | } |
ysr@777 | 220 | |
ysr@777 | 221 | void YoungList::empty_list() { |
ysr@777 | 222 | assert(check_list_well_formed(), "young list should be well formed"); |
ysr@777 | 223 | |
ysr@777 | 224 | empty_list(_head); |
ysr@777 | 225 | _head = NULL; |
ysr@777 | 226 | _length = 0; |
ysr@777 | 227 | |
ysr@777 | 228 | empty_list(_scan_only_head); |
ysr@777 | 229 | _scan_only_head = NULL; |
ysr@777 | 230 | _scan_only_tail = NULL; |
ysr@777 | 231 | _scan_only_length = 0; |
ysr@777 | 232 | _curr_scan_only = NULL; |
ysr@777 | 233 | |
ysr@777 | 234 | empty_list(_survivor_head); |
ysr@777 | 235 | _survivor_head = NULL; |
apetrusenko@980 | 236 | _survivor_tail = NULL; |
ysr@777 | 237 | _survivor_length = 0; |
ysr@777 | 238 | |
ysr@777 | 239 | _last_sampled_rs_lengths = 0; |
ysr@777 | 240 | |
ysr@777 | 241 | assert(check_list_empty(false), "just making sure..."); |
ysr@777 | 242 | } |
ysr@777 | 243 | |
ysr@777 | 244 | bool YoungList::check_list_well_formed() { |
ysr@777 | 245 | bool ret = true; |
ysr@777 | 246 | |
ysr@777 | 247 | size_t length = 0; |
ysr@777 | 248 | HeapRegion* curr = _head; |
ysr@777 | 249 | HeapRegion* last = NULL; |
ysr@777 | 250 | while (curr != NULL) { |
ysr@777 | 251 | if (!curr->is_young() || curr->is_scan_only()) { |
ysr@777 | 252 | gclog_or_tty->print_cr("### YOUNG REGION "PTR_FORMAT"-"PTR_FORMAT" " |
ysr@777 | 253 | "incorrectly tagged (%d, %d)", |
ysr@777 | 254 | curr->bottom(), curr->end(), |
ysr@777 | 255 | curr->is_young(), curr->is_scan_only()); |
ysr@777 | 256 | ret = false; |
ysr@777 | 257 | } |
ysr@777 | 258 | ++length; |
ysr@777 | 259 | last = curr; |
ysr@777 | 260 | curr = curr->get_next_young_region(); |
ysr@777 | 261 | } |
ysr@777 | 262 | ret = ret && (length == _length); |
ysr@777 | 263 | |
ysr@777 | 264 | if (!ret) { |
ysr@777 | 265 | gclog_or_tty->print_cr("### YOUNG LIST seems not well formed!"); |
ysr@777 | 266 | gclog_or_tty->print_cr("### list has %d entries, _length is %d", |
ysr@777 | 267 | length, _length); |
ysr@777 | 268 | } |
ysr@777 | 269 | |
ysr@777 | 270 | bool scan_only_ret = true; |
ysr@777 | 271 | length = 0; |
ysr@777 | 272 | curr = _scan_only_head; |
ysr@777 | 273 | last = NULL; |
ysr@777 | 274 | while (curr != NULL) { |
ysr@777 | 275 | if (!curr->is_young() || curr->is_scan_only()) { |
ysr@777 | 276 | gclog_or_tty->print_cr("### SCAN-ONLY REGION "PTR_FORMAT"-"PTR_FORMAT" " |
ysr@777 | 277 | "incorrectly tagged (%d, %d)", |
ysr@777 | 278 | curr->bottom(), curr->end(), |
ysr@777 | 279 | curr->is_young(), curr->is_scan_only()); |
ysr@777 | 280 | scan_only_ret = false; |
ysr@777 | 281 | } |
ysr@777 | 282 | ++length; |
ysr@777 | 283 | last = curr; |
ysr@777 | 284 | curr = curr->get_next_young_region(); |
ysr@777 | 285 | } |
ysr@777 | 286 | scan_only_ret = scan_only_ret && (length == _scan_only_length); |
ysr@777 | 287 | |
ysr@777 | 288 | if ( (last != _scan_only_tail) || |
ysr@777 | 289 | (_scan_only_head == NULL && _scan_only_tail != NULL) || |
ysr@777 | 290 | (_scan_only_head != NULL && _scan_only_tail == NULL) ) { |
ysr@777 | 291 | gclog_or_tty->print_cr("## _scan_only_tail is set incorrectly"); |
ysr@777 | 292 | scan_only_ret = false; |
ysr@777 | 293 | } |
ysr@777 | 294 | |
ysr@777 | 295 | if (_curr_scan_only != NULL && _curr_scan_only != _scan_only_head) { |
ysr@777 | 296 | gclog_or_tty->print_cr("### _curr_scan_only is set incorrectly"); |
ysr@777 | 297 | scan_only_ret = false; |
ysr@777 | 298 | } |
ysr@777 | 299 | |
ysr@777 | 300 | if (!scan_only_ret) { |
ysr@777 | 301 | gclog_or_tty->print_cr("### SCAN-ONLY LIST seems not well formed!"); |
ysr@777 | 302 | gclog_or_tty->print_cr("### list has %d entries, _scan_only_length is %d", |
ysr@777 | 303 | length, _scan_only_length); |
ysr@777 | 304 | } |
ysr@777 | 305 | |
ysr@777 | 306 | return ret && scan_only_ret; |
ysr@777 | 307 | } |
ysr@777 | 308 | |
ysr@777 | 309 | bool YoungList::check_list_empty(bool ignore_scan_only_list, |
ysr@777 | 310 | bool check_sample) { |
ysr@777 | 311 | bool ret = true; |
ysr@777 | 312 | |
ysr@777 | 313 | if (_length != 0) { |
ysr@777 | 314 | gclog_or_tty->print_cr("### YOUNG LIST should have 0 length, not %d", |
ysr@777 | 315 | _length); |
ysr@777 | 316 | ret = false; |
ysr@777 | 317 | } |
ysr@777 | 318 | if (check_sample && _last_sampled_rs_lengths != 0) { |
ysr@777 | 319 | gclog_or_tty->print_cr("### YOUNG LIST has non-zero last sampled RS lengths"); |
ysr@777 | 320 | ret = false; |
ysr@777 | 321 | } |
ysr@777 | 322 | if (_head != NULL) { |
ysr@777 | 323 | gclog_or_tty->print_cr("### YOUNG LIST does not have a NULL head"); |
ysr@777 | 324 | ret = false; |
ysr@777 | 325 | } |
ysr@777 | 326 | if (!ret) { |
ysr@777 | 327 | gclog_or_tty->print_cr("### YOUNG LIST does not seem empty"); |
ysr@777 | 328 | } |
ysr@777 | 329 | |
ysr@777 | 330 | if (ignore_scan_only_list) |
ysr@777 | 331 | return ret; |
ysr@777 | 332 | |
ysr@777 | 333 | bool scan_only_ret = true; |
ysr@777 | 334 | if (_scan_only_length != 0) { |
ysr@777 | 335 | gclog_or_tty->print_cr("### SCAN-ONLY LIST should have 0 length, not %d", |
ysr@777 | 336 | _scan_only_length); |
ysr@777 | 337 | scan_only_ret = false; |
ysr@777 | 338 | } |
ysr@777 | 339 | if (_scan_only_head != NULL) { |
ysr@777 | 340 | gclog_or_tty->print_cr("### SCAN-ONLY LIST does not have a NULL head"); |
ysr@777 | 341 | scan_only_ret = false; |
ysr@777 | 342 | } |
ysr@777 | 343 | if (_scan_only_tail != NULL) { |
ysr@777 | 344 | gclog_or_tty->print_cr("### SCAN-ONLY LIST does not have a NULL tail"); |
ysr@777 | 345 | scan_only_ret = false; |
ysr@777 | 346 | } |
ysr@777 | 347 | if (!scan_only_ret) { |
ysr@777 | 348 | gclog_or_tty->print_cr("### SCAN-ONLY LIST does not seem empty"); |
ysr@777 | 349 | } |
ysr@777 | 350 | |
ysr@777 | 351 | return ret && scan_only_ret; |
ysr@777 | 352 | } |
ysr@777 | 353 | |
ysr@777 | 354 | void |
ysr@777 | 355 | YoungList::rs_length_sampling_init() { |
ysr@777 | 356 | _sampled_rs_lengths = 0; |
ysr@777 | 357 | _curr = _head; |
ysr@777 | 358 | } |
ysr@777 | 359 | |
ysr@777 | 360 | bool |
ysr@777 | 361 | YoungList::rs_length_sampling_more() { |
ysr@777 | 362 | return _curr != NULL; |
ysr@777 | 363 | } |
ysr@777 | 364 | |
ysr@777 | 365 | void |
ysr@777 | 366 | YoungList::rs_length_sampling_next() { |
ysr@777 | 367 | assert( _curr != NULL, "invariant" ); |
ysr@777 | 368 | _sampled_rs_lengths += _curr->rem_set()->occupied(); |
ysr@777 | 369 | _curr = _curr->get_next_young_region(); |
ysr@777 | 370 | if (_curr == NULL) { |
ysr@777 | 371 | _last_sampled_rs_lengths = _sampled_rs_lengths; |
ysr@777 | 372 | // gclog_or_tty->print_cr("last sampled RS lengths = %d", _last_sampled_rs_lengths); |
ysr@777 | 373 | } |
ysr@777 | 374 | } |
ysr@777 | 375 | |
ysr@777 | 376 | void |
ysr@777 | 377 | YoungList::reset_auxilary_lists() { |
ysr@777 | 378 | // We could have just "moved" the scan-only list to the young list. |
ysr@777 | 379 | // However, the scan-only list is ordered according to the region |
ysr@777 | 380 | // age in descending order, so, by moving one entry at a time, we |
ysr@777 | 381 | // ensure that it is recreated in ascending order. |
ysr@777 | 382 | |
ysr@777 | 383 | guarantee( is_empty(), "young list should be empty" ); |
ysr@777 | 384 | assert(check_list_well_formed(), "young list should be well formed"); |
ysr@777 | 385 | |
ysr@777 | 386 | // Add survivor regions to SurvRateGroup. |
ysr@777 | 387 | _g1h->g1_policy()->note_start_adding_survivor_regions(); |
apetrusenko@980 | 388 | _g1h->g1_policy()->finished_recalculating_age_indexes(true /* is_survivors */); |
ysr@777 | 389 | for (HeapRegion* curr = _survivor_head; |
ysr@777 | 390 | curr != NULL; |
ysr@777 | 391 | curr = curr->get_next_young_region()) { |
ysr@777 | 392 | _g1h->g1_policy()->set_region_survivors(curr); |
ysr@777 | 393 | } |
ysr@777 | 394 | _g1h->g1_policy()->note_stop_adding_survivor_regions(); |
ysr@777 | 395 | |
ysr@777 | 396 | if (_survivor_head != NULL) { |
ysr@777 | 397 | _head = _survivor_head; |
ysr@777 | 398 | _length = _survivor_length + _scan_only_length; |
apetrusenko@980 | 399 | _survivor_tail->set_next_young_region(_scan_only_head); |
ysr@777 | 400 | } else { |
ysr@777 | 401 | _head = _scan_only_head; |
ysr@777 | 402 | _length = _scan_only_length; |
ysr@777 | 403 | } |
ysr@777 | 404 | |
ysr@777 | 405 | for (HeapRegion* curr = _scan_only_head; |
ysr@777 | 406 | curr != NULL; |
ysr@777 | 407 | curr = curr->get_next_young_region()) { |
ysr@777 | 408 | curr->recalculate_age_in_surv_rate_group(); |
ysr@777 | 409 | } |
ysr@777 | 410 | _scan_only_head = NULL; |
ysr@777 | 411 | _scan_only_tail = NULL; |
ysr@777 | 412 | _scan_only_length = 0; |
ysr@777 | 413 | _curr_scan_only = NULL; |
ysr@777 | 414 | |
ysr@777 | 415 | _survivor_head = NULL; |
apetrusenko@980 | 416 | _survivor_tail = NULL; |
ysr@777 | 417 | _survivor_length = 0; |
apetrusenko@980 | 418 | _g1h->g1_policy()->finished_recalculating_age_indexes(false /* is_survivors */); |
ysr@777 | 419 | |
ysr@777 | 420 | assert(check_list_well_formed(), "young list should be well formed"); |
ysr@777 | 421 | } |
ysr@777 | 422 | |
ysr@777 | 423 | void YoungList::print() { |
ysr@777 | 424 | HeapRegion* lists[] = {_head, _scan_only_head, _survivor_head}; |
ysr@777 | 425 | const char* names[] = {"YOUNG", "SCAN-ONLY", "SURVIVOR"}; |
ysr@777 | 426 | |
ysr@777 | 427 | for (unsigned int list = 0; list < ARRAY_SIZE(lists); ++list) { |
ysr@777 | 428 | gclog_or_tty->print_cr("%s LIST CONTENTS", names[list]); |
ysr@777 | 429 | HeapRegion *curr = lists[list]; |
ysr@777 | 430 | if (curr == NULL) |
ysr@777 | 431 | gclog_or_tty->print_cr(" empty"); |
ysr@777 | 432 | while (curr != NULL) { |
ysr@777 | 433 | gclog_or_tty->print_cr(" [%08x-%08x], t: %08x, P: %08x, N: %08x, C: %08x, " |
ysr@777 | 434 | "age: %4d, y: %d, s-o: %d, surv: %d", |
ysr@777 | 435 | curr->bottom(), curr->end(), |
ysr@777 | 436 | curr->top(), |
ysr@777 | 437 | curr->prev_top_at_mark_start(), |
ysr@777 | 438 | curr->next_top_at_mark_start(), |
ysr@777 | 439 | curr->top_at_conc_mark_count(), |
ysr@777 | 440 | curr->age_in_surv_rate_group_cond(), |
ysr@777 | 441 | curr->is_young(), |
ysr@777 | 442 | curr->is_scan_only(), |
ysr@777 | 443 | curr->is_survivor()); |
ysr@777 | 444 | curr = curr->get_next_young_region(); |
ysr@777 | 445 | } |
ysr@777 | 446 | } |
ysr@777 | 447 | |
ysr@777 | 448 | gclog_or_tty->print_cr(""); |
ysr@777 | 449 | } |
ysr@777 | 450 | |
apetrusenko@1231 | 451 | void G1CollectedHeap::push_dirty_cards_region(HeapRegion* hr) |
apetrusenko@1231 | 452 | { |
apetrusenko@1231 | 453 | // Claim the right to put the region on the dirty cards region list |
apetrusenko@1231 | 454 | // by installing a self pointer. |
apetrusenko@1231 | 455 | HeapRegion* next = hr->get_next_dirty_cards_region(); |
apetrusenko@1231 | 456 | if (next == NULL) { |
apetrusenko@1231 | 457 | HeapRegion* res = (HeapRegion*) |
apetrusenko@1231 | 458 | Atomic::cmpxchg_ptr(hr, hr->next_dirty_cards_region_addr(), |
apetrusenko@1231 | 459 | NULL); |
apetrusenko@1231 | 460 | if (res == NULL) { |
apetrusenko@1231 | 461 | HeapRegion* head; |
apetrusenko@1231 | 462 | do { |
apetrusenko@1231 | 463 | // Put the region to the dirty cards region list. |
apetrusenko@1231 | 464 | head = _dirty_cards_region_list; |
apetrusenko@1231 | 465 | next = (HeapRegion*) |
apetrusenko@1231 | 466 | Atomic::cmpxchg_ptr(hr, &_dirty_cards_region_list, head); |
apetrusenko@1231 | 467 | if (next == head) { |
apetrusenko@1231 | 468 | assert(hr->get_next_dirty_cards_region() == hr, |
apetrusenko@1231 | 469 | "hr->get_next_dirty_cards_region() != hr"); |
apetrusenko@1231 | 470 | if (next == NULL) { |
apetrusenko@1231 | 471 | // The last region in the list points to itself. |
apetrusenko@1231 | 472 | hr->set_next_dirty_cards_region(hr); |
apetrusenko@1231 | 473 | } else { |
apetrusenko@1231 | 474 | hr->set_next_dirty_cards_region(next); |
apetrusenko@1231 | 475 | } |
apetrusenko@1231 | 476 | } |
apetrusenko@1231 | 477 | } while (next != head); |
apetrusenko@1231 | 478 | } |
apetrusenko@1231 | 479 | } |
apetrusenko@1231 | 480 | } |
apetrusenko@1231 | 481 | |
apetrusenko@1231 | 482 | HeapRegion* G1CollectedHeap::pop_dirty_cards_region() |
apetrusenko@1231 | 483 | { |
apetrusenko@1231 | 484 | HeapRegion* head; |
apetrusenko@1231 | 485 | HeapRegion* hr; |
apetrusenko@1231 | 486 | do { |
apetrusenko@1231 | 487 | head = _dirty_cards_region_list; |
apetrusenko@1231 | 488 | if (head == NULL) { |
apetrusenko@1231 | 489 | return NULL; |
apetrusenko@1231 | 490 | } |
apetrusenko@1231 | 491 | HeapRegion* new_head = head->get_next_dirty_cards_region(); |
apetrusenko@1231 | 492 | if (head == new_head) { |
apetrusenko@1231 | 493 | // The last region. |
apetrusenko@1231 | 494 | new_head = NULL; |
apetrusenko@1231 | 495 | } |
apetrusenko@1231 | 496 | hr = (HeapRegion*)Atomic::cmpxchg_ptr(new_head, &_dirty_cards_region_list, |
apetrusenko@1231 | 497 | head); |
apetrusenko@1231 | 498 | } while (hr != head); |
apetrusenko@1231 | 499 | assert(hr != NULL, "invariant"); |
apetrusenko@1231 | 500 | hr->set_next_dirty_cards_region(NULL); |
apetrusenko@1231 | 501 | return hr; |
apetrusenko@1231 | 502 | } |
apetrusenko@1231 | 503 | |
ysr@777 | 504 | void G1CollectedHeap::stop_conc_gc_threads() { |
iveresov@1229 | 505 | _cg1r->stop(); |
ysr@777 | 506 | _czft->stop(); |
ysr@777 | 507 | _cmThread->stop(); |
ysr@777 | 508 | } |
ysr@777 | 509 | |
ysr@777 | 510 | |
ysr@777 | 511 | void G1CollectedHeap::check_ct_logs_at_safepoint() { |
ysr@777 | 512 | DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); |
ysr@777 | 513 | CardTableModRefBS* ct_bs = (CardTableModRefBS*)barrier_set(); |
ysr@777 | 514 | |
ysr@777 | 515 | // Count the dirty cards at the start. |
ysr@777 | 516 | CountNonCleanMemRegionClosure count1(this); |
ysr@777 | 517 | ct_bs->mod_card_iterate(&count1); |
ysr@777 | 518 | int orig_count = count1.n(); |
ysr@777 | 519 | |
ysr@777 | 520 | // First clear the logged cards. |
ysr@777 | 521 | ClearLoggedCardTableEntryClosure clear; |
ysr@777 | 522 | dcqs.set_closure(&clear); |
ysr@777 | 523 | dcqs.apply_closure_to_all_completed_buffers(); |
ysr@777 | 524 | dcqs.iterate_closure_all_threads(false); |
ysr@777 | 525 | clear.print_histo(); |
ysr@777 | 526 | |
ysr@777 | 527 | // Now ensure that there's no dirty cards. |
ysr@777 | 528 | CountNonCleanMemRegionClosure count2(this); |
ysr@777 | 529 | ct_bs->mod_card_iterate(&count2); |
ysr@777 | 530 | if (count2.n() != 0) { |
ysr@777 | 531 | gclog_or_tty->print_cr("Card table has %d entries; %d originally", |
ysr@777 | 532 | count2.n(), orig_count); |
ysr@777 | 533 | } |
ysr@777 | 534 | guarantee(count2.n() == 0, "Card table should be clean."); |
ysr@777 | 535 | |
ysr@777 | 536 | RedirtyLoggedCardTableEntryClosure redirty; |
ysr@777 | 537 | JavaThread::dirty_card_queue_set().set_closure(&redirty); |
ysr@777 | 538 | dcqs.apply_closure_to_all_completed_buffers(); |
ysr@777 | 539 | dcqs.iterate_closure_all_threads(false); |
ysr@777 | 540 | gclog_or_tty->print_cr("Log entries = %d, dirty cards = %d.", |
ysr@777 | 541 | clear.calls(), orig_count); |
ysr@777 | 542 | guarantee(redirty.calls() == clear.calls(), |
ysr@777 | 543 | "Or else mechanism is broken."); |
ysr@777 | 544 | |
ysr@777 | 545 | CountNonCleanMemRegionClosure count3(this); |
ysr@777 | 546 | ct_bs->mod_card_iterate(&count3); |
ysr@777 | 547 | if (count3.n() != orig_count) { |
ysr@777 | 548 | gclog_or_tty->print_cr("Should have restored them all: orig = %d, final = %d.", |
ysr@777 | 549 | orig_count, count3.n()); |
ysr@777 | 550 | guarantee(count3.n() >= orig_count, "Should have restored them all."); |
ysr@777 | 551 | } |
ysr@777 | 552 | |
ysr@777 | 553 | JavaThread::dirty_card_queue_set().set_closure(_refine_cte_cl); |
ysr@777 | 554 | } |
ysr@777 | 555 | |
ysr@777 | 556 | // Private class members. |
ysr@777 | 557 | |
ysr@777 | 558 | G1CollectedHeap* G1CollectedHeap::_g1h; |
ysr@777 | 559 | |
ysr@777 | 560 | // Private methods. |
ysr@777 | 561 | |
ysr@777 | 562 | // Finds a HeapRegion that can be used to allocate a given size of block. |
ysr@777 | 563 | |
ysr@777 | 564 | |
ysr@777 | 565 | HeapRegion* G1CollectedHeap::newAllocRegion_work(size_t word_size, |
ysr@777 | 566 | bool do_expand, |
ysr@777 | 567 | bool zero_filled) { |
ysr@777 | 568 | ConcurrentZFThread::note_region_alloc(); |
ysr@777 | 569 | HeapRegion* res = alloc_free_region_from_lists(zero_filled); |
ysr@777 | 570 | if (res == NULL && do_expand) { |
ysr@777 | 571 | expand(word_size * HeapWordSize); |
ysr@777 | 572 | res = alloc_free_region_from_lists(zero_filled); |
ysr@777 | 573 | assert(res == NULL || |
ysr@777 | 574 | (!res->isHumongous() && |
ysr@777 | 575 | (!zero_filled || |
ysr@777 | 576 | res->zero_fill_state() == HeapRegion::Allocated)), |
ysr@777 | 577 | "Alloc Regions must be zero filled (and non-H)"); |
ysr@777 | 578 | } |
ysr@777 | 579 | if (res != NULL && res->is_empty()) _free_regions--; |
ysr@777 | 580 | assert(res == NULL || |
ysr@777 | 581 | (!res->isHumongous() && |
ysr@777 | 582 | (!zero_filled || |
ysr@777 | 583 | res->zero_fill_state() == HeapRegion::Allocated)), |
ysr@777 | 584 | "Non-young alloc Regions must be zero filled (and non-H)"); |
ysr@777 | 585 | |
johnc@1186 | 586 | if (G1PrintRegions) { |
ysr@777 | 587 | if (res != NULL) { |
ysr@777 | 588 | gclog_or_tty->print_cr("new alloc region %d:["PTR_FORMAT", "PTR_FORMAT"], " |
ysr@777 | 589 | "top "PTR_FORMAT, |
ysr@777 | 590 | res->hrs_index(), res->bottom(), res->end(), res->top()); |
ysr@777 | 591 | } |
ysr@777 | 592 | } |
ysr@777 | 593 | |
ysr@777 | 594 | return res; |
ysr@777 | 595 | } |
ysr@777 | 596 | |
ysr@777 | 597 | HeapRegion* G1CollectedHeap::newAllocRegionWithExpansion(int purpose, |
ysr@777 | 598 | size_t word_size, |
ysr@777 | 599 | bool zero_filled) { |
ysr@777 | 600 | HeapRegion* alloc_region = NULL; |
ysr@777 | 601 | if (_gc_alloc_region_counts[purpose] < g1_policy()->max_regions(purpose)) { |
ysr@777 | 602 | alloc_region = newAllocRegion_work(word_size, true, zero_filled); |
ysr@777 | 603 | if (purpose == GCAllocForSurvived && alloc_region != NULL) { |
apetrusenko@980 | 604 | alloc_region->set_survivor(); |
ysr@777 | 605 | } |
ysr@777 | 606 | ++_gc_alloc_region_counts[purpose]; |
ysr@777 | 607 | } else { |
ysr@777 | 608 | g1_policy()->note_alloc_region_limit_reached(purpose); |
ysr@777 | 609 | } |
ysr@777 | 610 | return alloc_region; |
ysr@777 | 611 | } |
ysr@777 | 612 | |
ysr@777 | 613 | // If could fit into free regions w/o expansion, try. |
ysr@777 | 614 | // Otherwise, if can expand, do so. |
ysr@777 | 615 | // Otherwise, if using ex regions might help, try with ex given back. |
ysr@777 | 616 | HeapWord* G1CollectedHeap::humongousObjAllocate(size_t word_size) { |
ysr@777 | 617 | assert(regions_accounted_for(), "Region leakage!"); |
ysr@777 | 618 | |
ysr@777 | 619 | // We can't allocate H regions while cleanupComplete is running, since |
ysr@777 | 620 | // some of the regions we find to be empty might not yet be added to the |
ysr@777 | 621 | // unclean list. (If we're already at a safepoint, this call is |
ysr@777 | 622 | // unnecessary, not to mention wrong.) |
ysr@777 | 623 | if (!SafepointSynchronize::is_at_safepoint()) |
ysr@777 | 624 | wait_for_cleanup_complete(); |
ysr@777 | 625 | |
ysr@777 | 626 | size_t num_regions = |
ysr@777 | 627 | round_to(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords; |
ysr@777 | 628 | |
ysr@777 | 629 | // Special case if < one region??? |
ysr@777 | 630 | |
ysr@777 | 631 | // Remember the ft size. |
ysr@777 | 632 | size_t x_size = expansion_regions(); |
ysr@777 | 633 | |
ysr@777 | 634 | HeapWord* res = NULL; |
ysr@777 | 635 | bool eliminated_allocated_from_lists = false; |
ysr@777 | 636 | |
ysr@777 | 637 | // Can the allocation potentially fit in the free regions? |
ysr@777 | 638 | if (free_regions() >= num_regions) { |
ysr@777 | 639 | res = _hrs->obj_allocate(word_size); |
ysr@777 | 640 | } |
ysr@777 | 641 | if (res == NULL) { |
ysr@777 | 642 | // Try expansion. |
ysr@777 | 643 | size_t fs = _hrs->free_suffix(); |
ysr@777 | 644 | if (fs + x_size >= num_regions) { |
ysr@777 | 645 | expand((num_regions - fs) * HeapRegion::GrainBytes); |
ysr@777 | 646 | res = _hrs->obj_allocate(word_size); |
ysr@777 | 647 | assert(res != NULL, "This should have worked."); |
ysr@777 | 648 | } else { |
ysr@777 | 649 | // Expansion won't help. Are there enough free regions if we get rid |
ysr@777 | 650 | // of reservations? |
ysr@777 | 651 | size_t avail = free_regions(); |
ysr@777 | 652 | if (avail >= num_regions) { |
ysr@777 | 653 | res = _hrs->obj_allocate(word_size); |
ysr@777 | 654 | if (res != NULL) { |
ysr@777 | 655 | remove_allocated_regions_from_lists(); |
ysr@777 | 656 | eliminated_allocated_from_lists = true; |
ysr@777 | 657 | } |
ysr@777 | 658 | } |
ysr@777 | 659 | } |
ysr@777 | 660 | } |
ysr@777 | 661 | if (res != NULL) { |
ysr@777 | 662 | // Increment by the number of regions allocated. |
ysr@777 | 663 | // FIXME: Assumes regions all of size GrainBytes. |
ysr@777 | 664 | #ifndef PRODUCT |
ysr@777 | 665 | mr_bs()->verify_clean_region(MemRegion(res, res + num_regions * |
ysr@777 | 666 | HeapRegion::GrainWords)); |
ysr@777 | 667 | #endif |
ysr@777 | 668 | if (!eliminated_allocated_from_lists) |
ysr@777 | 669 | remove_allocated_regions_from_lists(); |
ysr@777 | 670 | _summary_bytes_used += word_size * HeapWordSize; |
ysr@777 | 671 | _free_regions -= num_regions; |
ysr@777 | 672 | _num_humongous_regions += (int) num_regions; |
ysr@777 | 673 | } |
ysr@777 | 674 | assert(regions_accounted_for(), "Region Leakage"); |
ysr@777 | 675 | return res; |
ysr@777 | 676 | } |
ysr@777 | 677 | |
ysr@777 | 678 | HeapWord* |
ysr@777 | 679 | G1CollectedHeap::attempt_allocation_slow(size_t word_size, |
ysr@777 | 680 | bool permit_collection_pause) { |
ysr@777 | 681 | HeapWord* res = NULL; |
ysr@777 | 682 | HeapRegion* allocated_young_region = NULL; |
ysr@777 | 683 | |
ysr@777 | 684 | assert( SafepointSynchronize::is_at_safepoint() || |
ysr@777 | 685 | Heap_lock->owned_by_self(), "pre condition of the call" ); |
ysr@777 | 686 | |
ysr@777 | 687 | if (isHumongous(word_size)) { |
ysr@777 | 688 | // Allocation of a humongous object can, in a sense, complete a |
ysr@777 | 689 | // partial region, if the previous alloc was also humongous, and |
ysr@777 | 690 | // caused the test below to succeed. |
ysr@777 | 691 | if (permit_collection_pause) |
ysr@777 | 692 | do_collection_pause_if_appropriate(word_size); |
ysr@777 | 693 | res = humongousObjAllocate(word_size); |
ysr@777 | 694 | assert(_cur_alloc_region == NULL |
ysr@777 | 695 | || !_cur_alloc_region->isHumongous(), |
ysr@777 | 696 | "Prevent a regression of this bug."); |
ysr@777 | 697 | |
ysr@777 | 698 | } else { |
iveresov@789 | 699 | // We may have concurrent cleanup working at the time. Wait for it |
iveresov@789 | 700 | // to complete. In the future we would probably want to make the |
iveresov@789 | 701 | // concurrent cleanup truly concurrent by decoupling it from the |
iveresov@789 | 702 | // allocation. |
iveresov@789 | 703 | if (!SafepointSynchronize::is_at_safepoint()) |
iveresov@789 | 704 | wait_for_cleanup_complete(); |
ysr@777 | 705 | // If we do a collection pause, this will be reset to a non-NULL |
ysr@777 | 706 | // value. If we don't, nulling here ensures that we allocate a new |
ysr@777 | 707 | // region below. |
ysr@777 | 708 | if (_cur_alloc_region != NULL) { |
ysr@777 | 709 | // We're finished with the _cur_alloc_region. |
ysr@777 | 710 | _summary_bytes_used += _cur_alloc_region->used(); |
ysr@777 | 711 | _cur_alloc_region = NULL; |
ysr@777 | 712 | } |
ysr@777 | 713 | assert(_cur_alloc_region == NULL, "Invariant."); |
ysr@777 | 714 | // Completion of a heap region is perhaps a good point at which to do |
ysr@777 | 715 | // a collection pause. |
ysr@777 | 716 | if (permit_collection_pause) |
ysr@777 | 717 | do_collection_pause_if_appropriate(word_size); |
ysr@777 | 718 | // Make sure we have an allocation region available. |
ysr@777 | 719 | if (_cur_alloc_region == NULL) { |
ysr@777 | 720 | if (!SafepointSynchronize::is_at_safepoint()) |
ysr@777 | 721 | wait_for_cleanup_complete(); |
ysr@777 | 722 | bool next_is_young = should_set_young_locked(); |
ysr@777 | 723 | // If the next region is not young, make sure it's zero-filled. |
ysr@777 | 724 | _cur_alloc_region = newAllocRegion(word_size, !next_is_young); |
ysr@777 | 725 | if (_cur_alloc_region != NULL) { |
ysr@777 | 726 | _summary_bytes_used -= _cur_alloc_region->used(); |
ysr@777 | 727 | if (next_is_young) { |
ysr@777 | 728 | set_region_short_lived_locked(_cur_alloc_region); |
ysr@777 | 729 | allocated_young_region = _cur_alloc_region; |
ysr@777 | 730 | } |
ysr@777 | 731 | } |
ysr@777 | 732 | } |
ysr@777 | 733 | assert(_cur_alloc_region == NULL || !_cur_alloc_region->isHumongous(), |
ysr@777 | 734 | "Prevent a regression of this bug."); |
ysr@777 | 735 | |
ysr@777 | 736 | // Now retry the allocation. |
ysr@777 | 737 | if (_cur_alloc_region != NULL) { |
ysr@777 | 738 | res = _cur_alloc_region->allocate(word_size); |
ysr@777 | 739 | } |
ysr@777 | 740 | } |
ysr@777 | 741 | |
ysr@777 | 742 | // NOTE: fails frequently in PRT |
ysr@777 | 743 | assert(regions_accounted_for(), "Region leakage!"); |
ysr@777 | 744 | |
ysr@777 | 745 | if (res != NULL) { |
ysr@777 | 746 | if (!SafepointSynchronize::is_at_safepoint()) { |
ysr@777 | 747 | assert( permit_collection_pause, "invariant" ); |
ysr@777 | 748 | assert( Heap_lock->owned_by_self(), "invariant" ); |
ysr@777 | 749 | Heap_lock->unlock(); |
ysr@777 | 750 | } |
ysr@777 | 751 | |
ysr@777 | 752 | if (allocated_young_region != NULL) { |
ysr@777 | 753 | HeapRegion* hr = allocated_young_region; |
ysr@777 | 754 | HeapWord* bottom = hr->bottom(); |
ysr@777 | 755 | HeapWord* end = hr->end(); |
ysr@777 | 756 | MemRegion mr(bottom, end); |
ysr@777 | 757 | ((CardTableModRefBS*)_g1h->barrier_set())->dirty(mr); |
ysr@777 | 758 | } |
ysr@777 | 759 | } |
ysr@777 | 760 | |
ysr@777 | 761 | assert( SafepointSynchronize::is_at_safepoint() || |
ysr@777 | 762 | (res == NULL && Heap_lock->owned_by_self()) || |
ysr@777 | 763 | (res != NULL && !Heap_lock->owned_by_self()), |
ysr@777 | 764 | "post condition of the call" ); |
ysr@777 | 765 | |
ysr@777 | 766 | return res; |
ysr@777 | 767 | } |
ysr@777 | 768 | |
ysr@777 | 769 | HeapWord* |
ysr@777 | 770 | G1CollectedHeap::mem_allocate(size_t word_size, |
ysr@777 | 771 | bool is_noref, |
ysr@777 | 772 | bool is_tlab, |
ysr@777 | 773 | bool* gc_overhead_limit_was_exceeded) { |
ysr@777 | 774 | debug_only(check_for_valid_allocation_state()); |
ysr@777 | 775 | assert(no_gc_in_progress(), "Allocation during gc not allowed"); |
ysr@777 | 776 | HeapWord* result = NULL; |
ysr@777 | 777 | |
ysr@777 | 778 | // Loop until the allocation is satisified, |
ysr@777 | 779 | // or unsatisfied after GC. |
ysr@777 | 780 | for (int try_count = 1; /* return or throw */; try_count += 1) { |
ysr@777 | 781 | int gc_count_before; |
ysr@777 | 782 | { |
ysr@777 | 783 | Heap_lock->lock(); |
ysr@777 | 784 | result = attempt_allocation(word_size); |
ysr@777 | 785 | if (result != NULL) { |
ysr@777 | 786 | // attempt_allocation should have unlocked the heap lock |
ysr@777 | 787 | assert(is_in(result), "result not in heap"); |
ysr@777 | 788 | return result; |
ysr@777 | 789 | } |
ysr@777 | 790 | // Read the gc count while the heap lock is held. |
ysr@777 | 791 | gc_count_before = SharedHeap::heap()->total_collections(); |
ysr@777 | 792 | Heap_lock->unlock(); |
ysr@777 | 793 | } |
ysr@777 | 794 | |
ysr@777 | 795 | // Create the garbage collection operation... |
ysr@777 | 796 | VM_G1CollectForAllocation op(word_size, |
ysr@777 | 797 | gc_count_before); |
ysr@777 | 798 | |
ysr@777 | 799 | // ...and get the VM thread to execute it. |
ysr@777 | 800 | VMThread::execute(&op); |
ysr@777 | 801 | if (op.prologue_succeeded()) { |
ysr@777 | 802 | result = op.result(); |
ysr@777 | 803 | assert(result == NULL || is_in(result), "result not in heap"); |
ysr@777 | 804 | return result; |
ysr@777 | 805 | } |
ysr@777 | 806 | |
ysr@777 | 807 | // Give a warning if we seem to be looping forever. |
ysr@777 | 808 | if ((QueuedAllocationWarningCount > 0) && |
ysr@777 | 809 | (try_count % QueuedAllocationWarningCount == 0)) { |
ysr@777 | 810 | warning("G1CollectedHeap::mem_allocate_work retries %d times", |
ysr@777 | 811 | try_count); |
ysr@777 | 812 | } |
ysr@777 | 813 | } |
ysr@777 | 814 | } |
ysr@777 | 815 | |
ysr@777 | 816 | void G1CollectedHeap::abandon_cur_alloc_region() { |
ysr@777 | 817 | if (_cur_alloc_region != NULL) { |
ysr@777 | 818 | // We're finished with the _cur_alloc_region. |
ysr@777 | 819 | if (_cur_alloc_region->is_empty()) { |
ysr@777 | 820 | _free_regions++; |
ysr@777 | 821 | free_region(_cur_alloc_region); |
ysr@777 | 822 | } else { |
ysr@777 | 823 | _summary_bytes_used += _cur_alloc_region->used(); |
ysr@777 | 824 | } |
ysr@777 | 825 | _cur_alloc_region = NULL; |
ysr@777 | 826 | } |
ysr@777 | 827 | } |
ysr@777 | 828 | |
tonyp@1071 | 829 | void G1CollectedHeap::abandon_gc_alloc_regions() { |
tonyp@1071 | 830 | // first, make sure that the GC alloc region list is empty (it should!) |
tonyp@1071 | 831 | assert(_gc_alloc_region_list == NULL, "invariant"); |
tonyp@1071 | 832 | release_gc_alloc_regions(true /* totally */); |
tonyp@1071 | 833 | } |
tonyp@1071 | 834 | |
ysr@777 | 835 | class PostMCRemSetClearClosure: public HeapRegionClosure { |
ysr@777 | 836 | ModRefBarrierSet* _mr_bs; |
ysr@777 | 837 | public: |
ysr@777 | 838 | PostMCRemSetClearClosure(ModRefBarrierSet* mr_bs) : _mr_bs(mr_bs) {} |
ysr@777 | 839 | bool doHeapRegion(HeapRegion* r) { |
ysr@777 | 840 | r->reset_gc_time_stamp(); |
ysr@777 | 841 | if (r->continuesHumongous()) |
ysr@777 | 842 | return false; |
ysr@777 | 843 | HeapRegionRemSet* hrrs = r->rem_set(); |
ysr@777 | 844 | if (hrrs != NULL) hrrs->clear(); |
ysr@777 | 845 | // You might think here that we could clear just the cards |
ysr@777 | 846 | // corresponding to the used region. But no: if we leave a dirty card |
ysr@777 | 847 | // in a region we might allocate into, then it would prevent that card |
ysr@777 | 848 | // from being enqueued, and cause it to be missed. |
ysr@777 | 849 | // Re: the performance cost: we shouldn't be doing full GC anyway! |
ysr@777 | 850 | _mr_bs->clear(MemRegion(r->bottom(), r->end())); |
ysr@777 | 851 | return false; |
ysr@777 | 852 | } |
ysr@777 | 853 | }; |
ysr@777 | 854 | |
ysr@777 | 855 | |
ysr@777 | 856 | class PostMCRemSetInvalidateClosure: public HeapRegionClosure { |
ysr@777 | 857 | ModRefBarrierSet* _mr_bs; |
ysr@777 | 858 | public: |
ysr@777 | 859 | PostMCRemSetInvalidateClosure(ModRefBarrierSet* mr_bs) : _mr_bs(mr_bs) {} |
ysr@777 | 860 | bool doHeapRegion(HeapRegion* r) { |
ysr@777 | 861 | if (r->continuesHumongous()) return false; |
ysr@777 | 862 | if (r->used_region().word_size() != 0) { |
ysr@777 | 863 | _mr_bs->invalidate(r->used_region(), true /*whole heap*/); |
ysr@777 | 864 | } |
ysr@777 | 865 | return false; |
ysr@777 | 866 | } |
ysr@777 | 867 | }; |
ysr@777 | 868 | |
apetrusenko@1061 | 869 | class RebuildRSOutOfRegionClosure: public HeapRegionClosure { |
apetrusenko@1061 | 870 | G1CollectedHeap* _g1h; |
apetrusenko@1061 | 871 | UpdateRSOopClosure _cl; |
apetrusenko@1061 | 872 | int _worker_i; |
apetrusenko@1061 | 873 | public: |
apetrusenko@1061 | 874 | RebuildRSOutOfRegionClosure(G1CollectedHeap* g1, int worker_i = 0) : |
apetrusenko@1061 | 875 | _cl(g1->g1_rem_set()->as_HRInto_G1RemSet(), worker_i), |
apetrusenko@1061 | 876 | _worker_i(worker_i), |
apetrusenko@1061 | 877 | _g1h(g1) |
apetrusenko@1061 | 878 | { } |
apetrusenko@1061 | 879 | bool doHeapRegion(HeapRegion* r) { |
apetrusenko@1061 | 880 | if (!r->continuesHumongous()) { |
apetrusenko@1061 | 881 | _cl.set_from(r); |
apetrusenko@1061 | 882 | r->oop_iterate(&_cl); |
apetrusenko@1061 | 883 | } |
apetrusenko@1061 | 884 | return false; |
apetrusenko@1061 | 885 | } |
apetrusenko@1061 | 886 | }; |
apetrusenko@1061 | 887 | |
apetrusenko@1061 | 888 | class ParRebuildRSTask: public AbstractGangTask { |
apetrusenko@1061 | 889 | G1CollectedHeap* _g1; |
apetrusenko@1061 | 890 | public: |
apetrusenko@1061 | 891 | ParRebuildRSTask(G1CollectedHeap* g1) |
apetrusenko@1061 | 892 | : AbstractGangTask("ParRebuildRSTask"), |
apetrusenko@1061 | 893 | _g1(g1) |
apetrusenko@1061 | 894 | { } |
apetrusenko@1061 | 895 | |
apetrusenko@1061 | 896 | void work(int i) { |
apetrusenko@1061 | 897 | RebuildRSOutOfRegionClosure rebuild_rs(_g1, i); |
apetrusenko@1061 | 898 | _g1->heap_region_par_iterate_chunked(&rebuild_rs, i, |
apetrusenko@1061 | 899 | HeapRegion::RebuildRSClaimValue); |
apetrusenko@1061 | 900 | } |
apetrusenko@1061 | 901 | }; |
apetrusenko@1061 | 902 | |
ysr@777 | 903 | void G1CollectedHeap::do_collection(bool full, bool clear_all_soft_refs, |
ysr@777 | 904 | size_t word_size) { |
ysr@777 | 905 | ResourceMark rm; |
ysr@777 | 906 | |
tonyp@1273 | 907 | if (PrintHeapAtGC) { |
tonyp@1273 | 908 | Universe::print_heap_before_gc(); |
tonyp@1273 | 909 | } |
tonyp@1273 | 910 | |
ysr@777 | 911 | if (full && DisableExplicitGC) { |
ysr@777 | 912 | gclog_or_tty->print("\n\n\nDisabling Explicit GC\n\n\n"); |
ysr@777 | 913 | return; |
ysr@777 | 914 | } |
ysr@777 | 915 | |
ysr@777 | 916 | assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); |
ysr@777 | 917 | assert(Thread::current() == VMThread::vm_thread(), "should be in vm thread"); |
ysr@777 | 918 | |
ysr@777 | 919 | if (GC_locker::is_active()) { |
ysr@777 | 920 | return; // GC is disabled (e.g. JNI GetXXXCritical operation) |
ysr@777 | 921 | } |
ysr@777 | 922 | |
ysr@777 | 923 | { |
ysr@777 | 924 | IsGCActiveMark x; |
ysr@777 | 925 | |
ysr@777 | 926 | // Timing |
ysr@777 | 927 | gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); |
ysr@777 | 928 | TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); |
ysr@777 | 929 | TraceTime t(full ? "Full GC (System.gc())" : "Full GC", PrintGC, true, gclog_or_tty); |
ysr@777 | 930 | |
tonyp@1524 | 931 | TraceMemoryManagerStats tms(true /* fullGC */); |
tonyp@1524 | 932 | |
ysr@777 | 933 | double start = os::elapsedTime(); |
ysr@777 | 934 | g1_policy()->record_full_collection_start(); |
ysr@777 | 935 | |
ysr@777 | 936 | gc_prologue(true); |
tonyp@1273 | 937 | increment_total_collections(true /* full gc */); |
ysr@777 | 938 | |
ysr@777 | 939 | size_t g1h_prev_used = used(); |
ysr@777 | 940 | assert(used() == recalculate_used(), "Should be equal"); |
ysr@777 | 941 | |
ysr@777 | 942 | if (VerifyBeforeGC && total_collections() >= VerifyGCStartAt) { |
ysr@777 | 943 | HandleMark hm; // Discard invalid handles created during verification |
ysr@777 | 944 | prepare_for_verify(); |
ysr@777 | 945 | gclog_or_tty->print(" VerifyBeforeGC:"); |
ysr@777 | 946 | Universe::verify(true); |
ysr@777 | 947 | } |
ysr@777 | 948 | assert(regions_accounted_for(), "Region leakage!"); |
ysr@777 | 949 | |
ysr@777 | 950 | COMPILER2_PRESENT(DerivedPointerTable::clear()); |
ysr@777 | 951 | |
ysr@777 | 952 | // We want to discover references, but not process them yet. |
ysr@777 | 953 | // This mode is disabled in |
ysr@777 | 954 | // instanceRefKlass::process_discovered_references if the |
ysr@777 | 955 | // generation does some collection work, or |
ysr@777 | 956 | // instanceRefKlass::enqueue_discovered_references if the |
ysr@777 | 957 | // generation returns without doing any work. |
ysr@777 | 958 | ref_processor()->disable_discovery(); |
ysr@777 | 959 | ref_processor()->abandon_partial_discovery(); |
ysr@777 | 960 | ref_processor()->verify_no_references_recorded(); |
ysr@777 | 961 | |
ysr@777 | 962 | // Abandon current iterations of concurrent marking and concurrent |
ysr@777 | 963 | // refinement, if any are in progress. |
ysr@777 | 964 | concurrent_mark()->abort(); |
ysr@777 | 965 | |
ysr@777 | 966 | // Make sure we'll choose a new allocation region afterwards. |
ysr@777 | 967 | abandon_cur_alloc_region(); |
tonyp@1071 | 968 | abandon_gc_alloc_regions(); |
ysr@777 | 969 | assert(_cur_alloc_region == NULL, "Invariant."); |
ysr@777 | 970 | g1_rem_set()->as_HRInto_G1RemSet()->cleanupHRRS(); |
ysr@777 | 971 | tear_down_region_lists(); |
ysr@777 | 972 | set_used_regions_to_need_zero_fill(); |
ysr@777 | 973 | if (g1_policy()->in_young_gc_mode()) { |
ysr@777 | 974 | empty_young_list(); |
ysr@777 | 975 | g1_policy()->set_full_young_gcs(true); |
ysr@777 | 976 | } |
ysr@777 | 977 | |
ysr@777 | 978 | // Temporarily make reference _discovery_ single threaded (non-MT). |
ysr@777 | 979 | ReferenceProcessorMTMutator rp_disc_ser(ref_processor(), false); |
ysr@777 | 980 | |
ysr@777 | 981 | // Temporarily make refs discovery atomic |
ysr@777 | 982 | ReferenceProcessorAtomicMutator rp_disc_atomic(ref_processor(), true); |
ysr@777 | 983 | |
ysr@777 | 984 | // Temporarily clear _is_alive_non_header |
ysr@777 | 985 | ReferenceProcessorIsAliveMutator rp_is_alive_null(ref_processor(), NULL); |
ysr@777 | 986 | |
ysr@777 | 987 | ref_processor()->enable_discovery(); |
ysr@892 | 988 | ref_processor()->setup_policy(clear_all_soft_refs); |
ysr@777 | 989 | |
ysr@777 | 990 | // Do collection work |
ysr@777 | 991 | { |
ysr@777 | 992 | HandleMark hm; // Discard invalid handles created during gc |
ysr@777 | 993 | G1MarkSweep::invoke_at_safepoint(ref_processor(), clear_all_soft_refs); |
ysr@777 | 994 | } |
ysr@777 | 995 | // Because freeing humongous regions may have added some unclean |
ysr@777 | 996 | // regions, it is necessary to tear down again before rebuilding. |
ysr@777 | 997 | tear_down_region_lists(); |
ysr@777 | 998 | rebuild_region_lists(); |
ysr@777 | 999 | |
ysr@777 | 1000 | _summary_bytes_used = recalculate_used(); |
ysr@777 | 1001 | |
ysr@777 | 1002 | ref_processor()->enqueue_discovered_references(); |
ysr@777 | 1003 | |
ysr@777 | 1004 | COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); |
ysr@777 | 1005 | |
tonyp@1524 | 1006 | MemoryService::track_memory_usage(); |
tonyp@1524 | 1007 | |
ysr@777 | 1008 | if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) { |
ysr@777 | 1009 | HandleMark hm; // Discard invalid handles created during verification |
ysr@777 | 1010 | gclog_or_tty->print(" VerifyAfterGC:"); |
iveresov@1072 | 1011 | prepare_for_verify(); |
ysr@777 | 1012 | Universe::verify(false); |
ysr@777 | 1013 | } |
ysr@777 | 1014 | NOT_PRODUCT(ref_processor()->verify_no_references_recorded()); |
ysr@777 | 1015 | |
ysr@777 | 1016 | reset_gc_time_stamp(); |
ysr@777 | 1017 | // Since everything potentially moved, we will clear all remembered |
apetrusenko@1061 | 1018 | // sets, and clear all cards. Later we will rebuild remebered |
apetrusenko@1061 | 1019 | // sets. We will also reset the GC time stamps of the regions. |
ysr@777 | 1020 | PostMCRemSetClearClosure rs_clear(mr_bs()); |
ysr@777 | 1021 | heap_region_iterate(&rs_clear); |
ysr@777 | 1022 | |
ysr@777 | 1023 | // Resize the heap if necessary. |
ysr@777 | 1024 | resize_if_necessary_after_full_collection(full ? 0 : word_size); |
ysr@777 | 1025 | |
ysr@777 | 1026 | if (_cg1r->use_cache()) { |
ysr@777 | 1027 | _cg1r->clear_and_record_card_counts(); |
ysr@777 | 1028 | _cg1r->clear_hot_cache(); |
ysr@777 | 1029 | } |
ysr@777 | 1030 | |
apetrusenko@1061 | 1031 | // Rebuild remembered sets of all regions. |
apetrusenko@1061 | 1032 | if (ParallelGCThreads > 0) { |
apetrusenko@1061 | 1033 | ParRebuildRSTask rebuild_rs_task(this); |
apetrusenko@1061 | 1034 | assert(check_heap_region_claim_values( |
apetrusenko@1061 | 1035 | HeapRegion::InitialClaimValue), "sanity check"); |
apetrusenko@1061 | 1036 | set_par_threads(workers()->total_workers()); |
apetrusenko@1061 | 1037 | workers()->run_task(&rebuild_rs_task); |
apetrusenko@1061 | 1038 | set_par_threads(0); |
apetrusenko@1061 | 1039 | assert(check_heap_region_claim_values( |
apetrusenko@1061 | 1040 | HeapRegion::RebuildRSClaimValue), "sanity check"); |
apetrusenko@1061 | 1041 | reset_heap_region_claim_values(); |
apetrusenko@1061 | 1042 | } else { |
apetrusenko@1061 | 1043 | RebuildRSOutOfRegionClosure rebuild_rs(this); |
apetrusenko@1061 | 1044 | heap_region_iterate(&rebuild_rs); |
apetrusenko@1061 | 1045 | } |
apetrusenko@1061 | 1046 | |
ysr@777 | 1047 | if (PrintGC) { |
ysr@777 | 1048 | print_size_transition(gclog_or_tty, g1h_prev_used, used(), capacity()); |
ysr@777 | 1049 | } |
ysr@777 | 1050 | |
ysr@777 | 1051 | if (true) { // FIXME |
ysr@777 | 1052 | // Ask the permanent generation to adjust size for full collections |
ysr@777 | 1053 | perm()->compute_new_size(); |
ysr@777 | 1054 | } |
ysr@777 | 1055 | |
ysr@777 | 1056 | double end = os::elapsedTime(); |
ysr@777 | 1057 | g1_policy()->record_full_collection_end(); |
ysr@777 | 1058 | |
jmasa@981 | 1059 | #ifdef TRACESPINNING |
jmasa@981 | 1060 | ParallelTaskTerminator::print_termination_counts(); |
jmasa@981 | 1061 | #endif |
jmasa@981 | 1062 | |
ysr@777 | 1063 | gc_epilogue(true); |
ysr@777 | 1064 | |
iveresov@1229 | 1065 | // Discard all rset updates |
iveresov@1229 | 1066 | JavaThread::dirty_card_queue_set().abandon_logs(); |
iveresov@1051 | 1067 | assert(!G1DeferredRSUpdate |
iveresov@1051 | 1068 | || (G1DeferredRSUpdate && (dirty_card_queue_set().completed_buffers_num() == 0)), "Should not be any"); |
ysr@777 | 1069 | assert(regions_accounted_for(), "Region leakage!"); |
ysr@777 | 1070 | } |
ysr@777 | 1071 | |
ysr@777 | 1072 | if (g1_policy()->in_young_gc_mode()) { |
ysr@777 | 1073 | _young_list->reset_sampled_info(); |
ysr@777 | 1074 | assert( check_young_list_empty(false, false), |
ysr@777 | 1075 | "young list should be empty at this point"); |
ysr@777 | 1076 | } |
tonyp@1273 | 1077 | |
tonyp@1273 | 1078 | if (PrintHeapAtGC) { |
tonyp@1273 | 1079 | Universe::print_heap_after_gc(); |
tonyp@1273 | 1080 | } |
ysr@777 | 1081 | } |
ysr@777 | 1082 | |
ysr@777 | 1083 | void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) { |
ysr@777 | 1084 | do_collection(true, clear_all_soft_refs, 0); |
ysr@777 | 1085 | } |
ysr@777 | 1086 | |
ysr@777 | 1087 | // This code is mostly copied from TenuredGeneration. |
ysr@777 | 1088 | void |
ysr@777 | 1089 | G1CollectedHeap:: |
ysr@777 | 1090 | resize_if_necessary_after_full_collection(size_t word_size) { |
ysr@777 | 1091 | assert(MinHeapFreeRatio <= MaxHeapFreeRatio, "sanity check"); |
ysr@777 | 1092 | |
ysr@777 | 1093 | // Include the current allocation, if any, and bytes that will be |
ysr@777 | 1094 | // pre-allocated to support collections, as "used". |
ysr@777 | 1095 | const size_t used_after_gc = used(); |
ysr@777 | 1096 | const size_t capacity_after_gc = capacity(); |
ysr@777 | 1097 | const size_t free_after_gc = capacity_after_gc - used_after_gc; |
ysr@777 | 1098 | |
ysr@777 | 1099 | // We don't have floating point command-line arguments |
ysr@777 | 1100 | const double minimum_free_percentage = (double) MinHeapFreeRatio / 100; |
ysr@777 | 1101 | const double maximum_used_percentage = 1.0 - minimum_free_percentage; |
ysr@777 | 1102 | const double maximum_free_percentage = (double) MaxHeapFreeRatio / 100; |
ysr@777 | 1103 | const double minimum_used_percentage = 1.0 - maximum_free_percentage; |
ysr@777 | 1104 | |
ysr@777 | 1105 | size_t minimum_desired_capacity = (size_t) (used_after_gc / maximum_used_percentage); |
ysr@777 | 1106 | size_t maximum_desired_capacity = (size_t) (used_after_gc / minimum_used_percentage); |
ysr@777 | 1107 | |
ysr@777 | 1108 | // Don't shrink less than the initial size. |
ysr@777 | 1109 | minimum_desired_capacity = |
ysr@777 | 1110 | MAX2(minimum_desired_capacity, |
ysr@777 | 1111 | collector_policy()->initial_heap_byte_size()); |
ysr@777 | 1112 | maximum_desired_capacity = |
ysr@777 | 1113 | MAX2(maximum_desired_capacity, |
ysr@777 | 1114 | collector_policy()->initial_heap_byte_size()); |
ysr@777 | 1115 | |
ysr@777 | 1116 | // We are failing here because minimum_desired_capacity is |
ysr@777 | 1117 | assert(used_after_gc <= minimum_desired_capacity, "sanity check"); |
ysr@777 | 1118 | assert(minimum_desired_capacity <= maximum_desired_capacity, "sanity check"); |
ysr@777 | 1119 | |
ysr@777 | 1120 | if (PrintGC && Verbose) { |
ysr@777 | 1121 | const double free_percentage = ((double)free_after_gc) / capacity(); |
ysr@777 | 1122 | gclog_or_tty->print_cr("Computing new size after full GC "); |
ysr@777 | 1123 | gclog_or_tty->print_cr(" " |
ysr@777 | 1124 | " minimum_free_percentage: %6.2f", |
ysr@777 | 1125 | minimum_free_percentage); |
ysr@777 | 1126 | gclog_or_tty->print_cr(" " |
ysr@777 | 1127 | " maximum_free_percentage: %6.2f", |
ysr@777 | 1128 | maximum_free_percentage); |
ysr@777 | 1129 | gclog_or_tty->print_cr(" " |
ysr@777 | 1130 | " capacity: %6.1fK" |
ysr@777 | 1131 | " minimum_desired_capacity: %6.1fK" |
ysr@777 | 1132 | " maximum_desired_capacity: %6.1fK", |
ysr@777 | 1133 | capacity() / (double) K, |
ysr@777 | 1134 | minimum_desired_capacity / (double) K, |
ysr@777 | 1135 | maximum_desired_capacity / (double) K); |
ysr@777 | 1136 | gclog_or_tty->print_cr(" " |
ysr@777 | 1137 | " free_after_gc : %6.1fK" |
ysr@777 | 1138 | " used_after_gc : %6.1fK", |
ysr@777 | 1139 | free_after_gc / (double) K, |
ysr@777 | 1140 | used_after_gc / (double) K); |
ysr@777 | 1141 | gclog_or_tty->print_cr(" " |
ysr@777 | 1142 | " free_percentage: %6.2f", |
ysr@777 | 1143 | free_percentage); |
ysr@777 | 1144 | } |
ysr@777 | 1145 | if (capacity() < minimum_desired_capacity) { |
ysr@777 | 1146 | // Don't expand unless it's significant |
ysr@777 | 1147 | size_t expand_bytes = minimum_desired_capacity - capacity_after_gc; |
ysr@777 | 1148 | expand(expand_bytes); |
ysr@777 | 1149 | if (PrintGC && Verbose) { |
ysr@777 | 1150 | gclog_or_tty->print_cr(" expanding:" |
ysr@777 | 1151 | " minimum_desired_capacity: %6.1fK" |
ysr@777 | 1152 | " expand_bytes: %6.1fK", |
ysr@777 | 1153 | minimum_desired_capacity / (double) K, |
ysr@777 | 1154 | expand_bytes / (double) K); |
ysr@777 | 1155 | } |
ysr@777 | 1156 | |
ysr@777 | 1157 | // No expansion, now see if we want to shrink |
ysr@777 | 1158 | } else if (capacity() > maximum_desired_capacity) { |
ysr@777 | 1159 | // Capacity too large, compute shrinking size |
ysr@777 | 1160 | size_t shrink_bytes = capacity_after_gc - maximum_desired_capacity; |
ysr@777 | 1161 | shrink(shrink_bytes); |
ysr@777 | 1162 | if (PrintGC && Verbose) { |
ysr@777 | 1163 | gclog_or_tty->print_cr(" " |
ysr@777 | 1164 | " shrinking:" |
ysr@777 | 1165 | " initSize: %.1fK" |
ysr@777 | 1166 | " maximum_desired_capacity: %.1fK", |
ysr@777 | 1167 | collector_policy()->initial_heap_byte_size() / (double) K, |
ysr@777 | 1168 | maximum_desired_capacity / (double) K); |
ysr@777 | 1169 | gclog_or_tty->print_cr(" " |
ysr@777 | 1170 | " shrink_bytes: %.1fK", |
ysr@777 | 1171 | shrink_bytes / (double) K); |
ysr@777 | 1172 | } |
ysr@777 | 1173 | } |
ysr@777 | 1174 | } |
ysr@777 | 1175 | |
ysr@777 | 1176 | |
ysr@777 | 1177 | HeapWord* |
ysr@777 | 1178 | G1CollectedHeap::satisfy_failed_allocation(size_t word_size) { |
ysr@777 | 1179 | HeapWord* result = NULL; |
ysr@777 | 1180 | |
ysr@777 | 1181 | // In a G1 heap, we're supposed to keep allocation from failing by |
ysr@777 | 1182 | // incremental pauses. Therefore, at least for now, we'll favor |
ysr@777 | 1183 | // expansion over collection. (This might change in the future if we can |
ysr@777 | 1184 | // do something smarter than full collection to satisfy a failed alloc.) |
ysr@777 | 1185 | |
ysr@777 | 1186 | result = expand_and_allocate(word_size); |
ysr@777 | 1187 | if (result != NULL) { |
ysr@777 | 1188 | assert(is_in(result), "result not in heap"); |
ysr@777 | 1189 | return result; |
ysr@777 | 1190 | } |
ysr@777 | 1191 | |
ysr@777 | 1192 | // OK, I guess we have to try collection. |
ysr@777 | 1193 | |
ysr@777 | 1194 | do_collection(false, false, word_size); |
ysr@777 | 1195 | |
ysr@777 | 1196 | result = attempt_allocation(word_size, /*permit_collection_pause*/false); |
ysr@777 | 1197 | |
ysr@777 | 1198 | if (result != NULL) { |
ysr@777 | 1199 | assert(is_in(result), "result not in heap"); |
ysr@777 | 1200 | return result; |
ysr@777 | 1201 | } |
ysr@777 | 1202 | |
ysr@777 | 1203 | // Try collecting soft references. |
ysr@777 | 1204 | do_collection(false, true, word_size); |
ysr@777 | 1205 | result = attempt_allocation(word_size, /*permit_collection_pause*/false); |
ysr@777 | 1206 | if (result != NULL) { |
ysr@777 | 1207 | assert(is_in(result), "result not in heap"); |
ysr@777 | 1208 | return result; |
ysr@777 | 1209 | } |
ysr@777 | 1210 | |
ysr@777 | 1211 | // What else? We might try synchronous finalization later. If the total |
ysr@777 | 1212 | // space available is large enough for the allocation, then a more |
ysr@777 | 1213 | // complete compaction phase than we've tried so far might be |
ysr@777 | 1214 | // appropriate. |
ysr@777 | 1215 | return NULL; |
ysr@777 | 1216 | } |
ysr@777 | 1217 | |
ysr@777 | 1218 | // Attempting to expand the heap sufficiently |
ysr@777 | 1219 | // to support an allocation of the given "word_size". If |
ysr@777 | 1220 | // successful, perform the allocation and return the address of the |
ysr@777 | 1221 | // allocated block, or else "NULL". |
ysr@777 | 1222 | |
ysr@777 | 1223 | HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size) { |
ysr@777 | 1224 | size_t expand_bytes = word_size * HeapWordSize; |
ysr@777 | 1225 | if (expand_bytes < MinHeapDeltaBytes) { |
ysr@777 | 1226 | expand_bytes = MinHeapDeltaBytes; |
ysr@777 | 1227 | } |
ysr@777 | 1228 | expand(expand_bytes); |
ysr@777 | 1229 | assert(regions_accounted_for(), "Region leakage!"); |
ysr@777 | 1230 | HeapWord* result = attempt_allocation(word_size, false /* permit_collection_pause */); |
ysr@777 | 1231 | return result; |
ysr@777 | 1232 | } |
ysr@777 | 1233 | |
ysr@777 | 1234 | size_t G1CollectedHeap::free_region_if_totally_empty(HeapRegion* hr) { |
ysr@777 | 1235 | size_t pre_used = 0; |
ysr@777 | 1236 | size_t cleared_h_regions = 0; |
ysr@777 | 1237 | size_t freed_regions = 0; |
ysr@777 | 1238 | UncleanRegionList local_list; |
ysr@777 | 1239 | free_region_if_totally_empty_work(hr, pre_used, cleared_h_regions, |
ysr@777 | 1240 | freed_regions, &local_list); |
ysr@777 | 1241 | |
ysr@777 | 1242 | finish_free_region_work(pre_used, cleared_h_regions, freed_regions, |
ysr@777 | 1243 | &local_list); |
ysr@777 | 1244 | return pre_used; |
ysr@777 | 1245 | } |
ysr@777 | 1246 | |
ysr@777 | 1247 | void |
ysr@777 | 1248 | G1CollectedHeap::free_region_if_totally_empty_work(HeapRegion* hr, |
ysr@777 | 1249 | size_t& pre_used, |
ysr@777 | 1250 | size_t& cleared_h, |
ysr@777 | 1251 | size_t& freed_regions, |
ysr@777 | 1252 | UncleanRegionList* list, |
ysr@777 | 1253 | bool par) { |
ysr@777 | 1254 | assert(!hr->continuesHumongous(), "should have filtered these out"); |
ysr@777 | 1255 | size_t res = 0; |
apetrusenko@1112 | 1256 | if (hr->used() > 0 && hr->garbage_bytes() == hr->used() && |
apetrusenko@1112 | 1257 | !hr->is_young()) { |
apetrusenko@1112 | 1258 | if (G1PolicyVerbose > 0) |
apetrusenko@1112 | 1259 | gclog_or_tty->print_cr("Freeing empty region "PTR_FORMAT "(" SIZE_FORMAT " bytes)" |
apetrusenko@1112 | 1260 | " during cleanup", hr, hr->used()); |
apetrusenko@1112 | 1261 | free_region_work(hr, pre_used, cleared_h, freed_regions, list, par); |
ysr@777 | 1262 | } |
ysr@777 | 1263 | } |
ysr@777 | 1264 | |
ysr@777 | 1265 | // FIXME: both this and shrink could probably be more efficient by |
ysr@777 | 1266 | // doing one "VirtualSpace::expand_by" call rather than several. |
ysr@777 | 1267 | void G1CollectedHeap::expand(size_t expand_bytes) { |
ysr@777 | 1268 | size_t old_mem_size = _g1_storage.committed_size(); |
ysr@777 | 1269 | // We expand by a minimum of 1K. |
ysr@777 | 1270 | expand_bytes = MAX2(expand_bytes, (size_t)K); |
ysr@777 | 1271 | size_t aligned_expand_bytes = |
ysr@777 | 1272 | ReservedSpace::page_align_size_up(expand_bytes); |
ysr@777 | 1273 | aligned_expand_bytes = align_size_up(aligned_expand_bytes, |
ysr@777 | 1274 | HeapRegion::GrainBytes); |
ysr@777 | 1275 | expand_bytes = aligned_expand_bytes; |
ysr@777 | 1276 | while (expand_bytes > 0) { |
ysr@777 | 1277 | HeapWord* base = (HeapWord*)_g1_storage.high(); |
ysr@777 | 1278 | // Commit more storage. |
ysr@777 | 1279 | bool successful = _g1_storage.expand_by(HeapRegion::GrainBytes); |
ysr@777 | 1280 | if (!successful) { |
ysr@777 | 1281 | expand_bytes = 0; |
ysr@777 | 1282 | } else { |
ysr@777 | 1283 | expand_bytes -= HeapRegion::GrainBytes; |
ysr@777 | 1284 | // Expand the committed region. |
ysr@777 | 1285 | HeapWord* high = (HeapWord*) _g1_storage.high(); |
ysr@777 | 1286 | _g1_committed.set_end(high); |
ysr@777 | 1287 | // Create a new HeapRegion. |
ysr@777 | 1288 | MemRegion mr(base, high); |
ysr@777 | 1289 | bool is_zeroed = !_g1_max_committed.contains(base); |
ysr@777 | 1290 | HeapRegion* hr = new HeapRegion(_bot_shared, mr, is_zeroed); |
ysr@777 | 1291 | |
ysr@777 | 1292 | // Now update max_committed if necessary. |
ysr@777 | 1293 | _g1_max_committed.set_end(MAX2(_g1_max_committed.end(), high)); |
ysr@777 | 1294 | |
ysr@777 | 1295 | // Add it to the HeapRegionSeq. |
ysr@777 | 1296 | _hrs->insert(hr); |
ysr@777 | 1297 | // Set the zero-fill state, according to whether it's already |
ysr@777 | 1298 | // zeroed. |
ysr@777 | 1299 | { |
ysr@777 | 1300 | MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); |
ysr@777 | 1301 | if (is_zeroed) { |
ysr@777 | 1302 | hr->set_zero_fill_complete(); |
ysr@777 | 1303 | put_free_region_on_list_locked(hr); |
ysr@777 | 1304 | } else { |
ysr@777 | 1305 | hr->set_zero_fill_needed(); |
ysr@777 | 1306 | put_region_on_unclean_list_locked(hr); |
ysr@777 | 1307 | } |
ysr@777 | 1308 | } |
ysr@777 | 1309 | _free_regions++; |
ysr@777 | 1310 | // And we used up an expansion region to create it. |
ysr@777 | 1311 | _expansion_regions--; |
ysr@777 | 1312 | // Tell the cardtable about it. |
ysr@777 | 1313 | Universe::heap()->barrier_set()->resize_covered_region(_g1_committed); |
ysr@777 | 1314 | // And the offset table as well. |
ysr@777 | 1315 | _bot_shared->resize(_g1_committed.word_size()); |
ysr@777 | 1316 | } |
ysr@777 | 1317 | } |
ysr@777 | 1318 | if (Verbose && PrintGC) { |
ysr@777 | 1319 | size_t new_mem_size = _g1_storage.committed_size(); |
ysr@777 | 1320 | gclog_or_tty->print_cr("Expanding garbage-first heap from %ldK by %ldK to %ldK", |
ysr@777 | 1321 | old_mem_size/K, aligned_expand_bytes/K, |
ysr@777 | 1322 | new_mem_size/K); |
ysr@777 | 1323 | } |
ysr@777 | 1324 | } |
ysr@777 | 1325 | |
ysr@777 | 1326 | void G1CollectedHeap::shrink_helper(size_t shrink_bytes) |
ysr@777 | 1327 | { |
ysr@777 | 1328 | size_t old_mem_size = _g1_storage.committed_size(); |
ysr@777 | 1329 | size_t aligned_shrink_bytes = |
ysr@777 | 1330 | ReservedSpace::page_align_size_down(shrink_bytes); |
ysr@777 | 1331 | aligned_shrink_bytes = align_size_down(aligned_shrink_bytes, |
ysr@777 | 1332 | HeapRegion::GrainBytes); |
ysr@777 | 1333 | size_t num_regions_deleted = 0; |
ysr@777 | 1334 | MemRegion mr = _hrs->shrink_by(aligned_shrink_bytes, num_regions_deleted); |
ysr@777 | 1335 | |
ysr@777 | 1336 | assert(mr.end() == (HeapWord*)_g1_storage.high(), "Bad shrink!"); |
ysr@777 | 1337 | if (mr.byte_size() > 0) |
ysr@777 | 1338 | _g1_storage.shrink_by(mr.byte_size()); |
ysr@777 | 1339 | assert(mr.start() == (HeapWord*)_g1_storage.high(), "Bad shrink!"); |
ysr@777 | 1340 | |
ysr@777 | 1341 | _g1_committed.set_end(mr.start()); |
ysr@777 | 1342 | _free_regions -= num_regions_deleted; |
ysr@777 | 1343 | _expansion_regions += num_regions_deleted; |
ysr@777 | 1344 | |
ysr@777 | 1345 | // Tell the cardtable about it. |
ysr@777 | 1346 | Universe::heap()->barrier_set()->resize_covered_region(_g1_committed); |
ysr@777 | 1347 | |
ysr@777 | 1348 | // And the offset table as well. |
ysr@777 | 1349 | _bot_shared->resize(_g1_committed.word_size()); |
ysr@777 | 1350 | |
ysr@777 | 1351 | HeapRegionRemSet::shrink_heap(n_regions()); |
ysr@777 | 1352 | |
ysr@777 | 1353 | if (Verbose && PrintGC) { |
ysr@777 | 1354 | size_t new_mem_size = _g1_storage.committed_size(); |
ysr@777 | 1355 | gclog_or_tty->print_cr("Shrinking garbage-first heap from %ldK by %ldK to %ldK", |
ysr@777 | 1356 | old_mem_size/K, aligned_shrink_bytes/K, |
ysr@777 | 1357 | new_mem_size/K); |
ysr@777 | 1358 | } |
ysr@777 | 1359 | } |
ysr@777 | 1360 | |
ysr@777 | 1361 | void G1CollectedHeap::shrink(size_t shrink_bytes) { |
tonyp@1071 | 1362 | release_gc_alloc_regions(true /* totally */); |
ysr@777 | 1363 | tear_down_region_lists(); // We will rebuild them in a moment. |
ysr@777 | 1364 | shrink_helper(shrink_bytes); |
ysr@777 | 1365 | rebuild_region_lists(); |
ysr@777 | 1366 | } |
ysr@777 | 1367 | |
ysr@777 | 1368 | // Public methods. |
ysr@777 | 1369 | |
ysr@777 | 1370 | #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away |
ysr@777 | 1371 | #pragma warning( disable:4355 ) // 'this' : used in base member initializer list |
ysr@777 | 1372 | #endif // _MSC_VER |
ysr@777 | 1373 | |
ysr@777 | 1374 | |
ysr@777 | 1375 | G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) : |
ysr@777 | 1376 | SharedHeap(policy_), |
ysr@777 | 1377 | _g1_policy(policy_), |
iveresov@1546 | 1378 | _dirty_card_queue_set(false), |
ysr@777 | 1379 | _ref_processor(NULL), |
ysr@777 | 1380 | _process_strong_tasks(new SubTasksDone(G1H_PS_NumElements)), |
ysr@777 | 1381 | _bot_shared(NULL), |
ysr@777 | 1382 | _par_alloc_during_gc_lock(Mutex::leaf, "par alloc during GC lock"), |
ysr@777 | 1383 | _objs_with_preserved_marks(NULL), _preserved_marks_of_objs(NULL), |
ysr@777 | 1384 | _evac_failure_scan_stack(NULL) , |
ysr@777 | 1385 | _mark_in_progress(false), |
ysr@777 | 1386 | _cg1r(NULL), _czft(NULL), _summary_bytes_used(0), |
ysr@777 | 1387 | _cur_alloc_region(NULL), |
ysr@777 | 1388 | _refine_cte_cl(NULL), |
ysr@777 | 1389 | _free_region_list(NULL), _free_region_list_size(0), |
ysr@777 | 1390 | _free_regions(0), |
ysr@777 | 1391 | _full_collection(false), |
ysr@777 | 1392 | _unclean_region_list(), |
ysr@777 | 1393 | _unclean_regions_coming(false), |
ysr@777 | 1394 | _young_list(new YoungList(this)), |
ysr@777 | 1395 | _gc_time_stamp(0), |
tonyp@961 | 1396 | _surviving_young_words(NULL), |
tonyp@961 | 1397 | _in_cset_fast_test(NULL), |
apetrusenko@1231 | 1398 | _in_cset_fast_test_base(NULL), |
apetrusenko@1231 | 1399 | _dirty_cards_region_list(NULL) { |
ysr@777 | 1400 | _g1h = this; // To catch bugs. |
ysr@777 | 1401 | if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) { |
ysr@777 | 1402 | vm_exit_during_initialization("Failed necessary allocation."); |
ysr@777 | 1403 | } |
tonyp@1377 | 1404 | |
tonyp@1377 | 1405 | _humongous_object_threshold_in_words = HeapRegion::GrainWords / 2; |
tonyp@1377 | 1406 | |
ysr@777 | 1407 | int n_queues = MAX2((int)ParallelGCThreads, 1); |
ysr@777 | 1408 | _task_queues = new RefToScanQueueSet(n_queues); |
ysr@777 | 1409 | |
ysr@777 | 1410 | int n_rem_sets = HeapRegionRemSet::num_par_rem_sets(); |
ysr@777 | 1411 | assert(n_rem_sets > 0, "Invariant."); |
ysr@777 | 1412 | |
ysr@777 | 1413 | HeapRegionRemSetIterator** iter_arr = |
ysr@777 | 1414 | NEW_C_HEAP_ARRAY(HeapRegionRemSetIterator*, n_queues); |
ysr@777 | 1415 | for (int i = 0; i < n_queues; i++) { |
ysr@777 | 1416 | iter_arr[i] = new HeapRegionRemSetIterator(); |
ysr@777 | 1417 | } |
ysr@777 | 1418 | _rem_set_iterator = iter_arr; |
ysr@777 | 1419 | |
ysr@777 | 1420 | for (int i = 0; i < n_queues; i++) { |
ysr@777 | 1421 | RefToScanQueue* q = new RefToScanQueue(); |
ysr@777 | 1422 | q->initialize(); |
ysr@777 | 1423 | _task_queues->register_queue(i, q); |
ysr@777 | 1424 | } |
ysr@777 | 1425 | |
ysr@777 | 1426 | for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { |
tonyp@1071 | 1427 | _gc_alloc_regions[ap] = NULL; |
tonyp@1071 | 1428 | _gc_alloc_region_counts[ap] = 0; |
tonyp@1071 | 1429 | _retained_gc_alloc_regions[ap] = NULL; |
tonyp@1071 | 1430 | // by default, we do not retain a GC alloc region for each ap; |
tonyp@1071 | 1431 | // we'll override this, when appropriate, below |
tonyp@1071 | 1432 | _retain_gc_alloc_region[ap] = false; |
tonyp@1071 | 1433 | } |
tonyp@1071 | 1434 | |
tonyp@1071 | 1435 | // We will try to remember the last half-full tenured region we |
tonyp@1071 | 1436 | // allocated to at the end of a collection so that we can re-use it |
tonyp@1071 | 1437 | // during the next collection. |
tonyp@1071 | 1438 | _retain_gc_alloc_region[GCAllocForTenured] = true; |
tonyp@1071 | 1439 | |
ysr@777 | 1440 | guarantee(_task_queues != NULL, "task_queues allocation failure."); |
ysr@777 | 1441 | } |
ysr@777 | 1442 | |
ysr@777 | 1443 | jint G1CollectedHeap::initialize() { |
ysr@1601 | 1444 | CollectedHeap::pre_initialize(); |
ysr@777 | 1445 | os::enable_vtime(); |
ysr@777 | 1446 | |
ysr@777 | 1447 | // Necessary to satisfy locking discipline assertions. |
ysr@777 | 1448 | |
ysr@777 | 1449 | MutexLocker x(Heap_lock); |
ysr@777 | 1450 | |
ysr@777 | 1451 | // While there are no constraints in the GC code that HeapWordSize |
ysr@777 | 1452 | // be any particular value, there are multiple other areas in the |
ysr@777 | 1453 | // system which believe this to be true (e.g. oop->object_size in some |
ysr@777 | 1454 | // cases incorrectly returns the size in wordSize units rather than |
ysr@777 | 1455 | // HeapWordSize). |
ysr@777 | 1456 | guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize"); |
ysr@777 | 1457 | |
ysr@777 | 1458 | size_t init_byte_size = collector_policy()->initial_heap_byte_size(); |
ysr@777 | 1459 | size_t max_byte_size = collector_policy()->max_heap_byte_size(); |
ysr@777 | 1460 | |
ysr@777 | 1461 | // Ensure that the sizes are properly aligned. |
ysr@777 | 1462 | Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap"); |
ysr@777 | 1463 | Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap"); |
ysr@777 | 1464 | |
ysr@777 | 1465 | _cg1r = new ConcurrentG1Refine(); |
ysr@777 | 1466 | |
ysr@777 | 1467 | // Reserve the maximum. |
ysr@777 | 1468 | PermanentGenerationSpec* pgs = collector_policy()->permanent_generation(); |
ysr@777 | 1469 | // Includes the perm-gen. |
kvn@1077 | 1470 | |
kvn@1077 | 1471 | const size_t total_reserved = max_byte_size + pgs->max_size(); |
kvn@1077 | 1472 | char* addr = Universe::preferred_heap_base(total_reserved, Universe::UnscaledNarrowOop); |
kvn@1077 | 1473 | |
ysr@777 | 1474 | ReservedSpace heap_rs(max_byte_size + pgs->max_size(), |
ysr@777 | 1475 | HeapRegion::GrainBytes, |
kvn@1077 | 1476 | false /*ism*/, addr); |
kvn@1077 | 1477 | |
kvn@1077 | 1478 | if (UseCompressedOops) { |
kvn@1077 | 1479 | if (addr != NULL && !heap_rs.is_reserved()) { |
kvn@1077 | 1480 | // Failed to reserve at specified address - the requested memory |
kvn@1077 | 1481 | // region is taken already, for example, by 'java' launcher. |
kvn@1077 | 1482 | // Try again to reserver heap higher. |
kvn@1077 | 1483 | addr = Universe::preferred_heap_base(total_reserved, Universe::ZeroBasedNarrowOop); |
kvn@1077 | 1484 | ReservedSpace heap_rs0(total_reserved, HeapRegion::GrainBytes, |
kvn@1077 | 1485 | false /*ism*/, addr); |
kvn@1077 | 1486 | if (addr != NULL && !heap_rs0.is_reserved()) { |
kvn@1077 | 1487 | // Failed to reserve at specified address again - give up. |
kvn@1077 | 1488 | addr = Universe::preferred_heap_base(total_reserved, Universe::HeapBasedNarrowOop); |
kvn@1077 | 1489 | assert(addr == NULL, ""); |
kvn@1077 | 1490 | ReservedSpace heap_rs1(total_reserved, HeapRegion::GrainBytes, |
kvn@1077 | 1491 | false /*ism*/, addr); |
kvn@1077 | 1492 | heap_rs = heap_rs1; |
kvn@1077 | 1493 | } else { |
kvn@1077 | 1494 | heap_rs = heap_rs0; |
kvn@1077 | 1495 | } |
kvn@1077 | 1496 | } |
kvn@1077 | 1497 | } |
ysr@777 | 1498 | |
ysr@777 | 1499 | if (!heap_rs.is_reserved()) { |
ysr@777 | 1500 | vm_exit_during_initialization("Could not reserve enough space for object heap"); |
ysr@777 | 1501 | return JNI_ENOMEM; |
ysr@777 | 1502 | } |
ysr@777 | 1503 | |
ysr@777 | 1504 | // It is important to do this in a way such that concurrent readers can't |
ysr@777 | 1505 | // temporarily think somethings in the heap. (I've actually seen this |
ysr@777 | 1506 | // happen in asserts: DLD.) |
ysr@777 | 1507 | _reserved.set_word_size(0); |
ysr@777 | 1508 | _reserved.set_start((HeapWord*)heap_rs.base()); |
ysr@777 | 1509 | _reserved.set_end((HeapWord*)(heap_rs.base() + heap_rs.size())); |
ysr@777 | 1510 | |
ysr@777 | 1511 | _expansion_regions = max_byte_size/HeapRegion::GrainBytes; |
ysr@777 | 1512 | |
ysr@777 | 1513 | _num_humongous_regions = 0; |
ysr@777 | 1514 | |
ysr@777 | 1515 | // Create the gen rem set (and barrier set) for the entire reserved region. |
ysr@777 | 1516 | _rem_set = collector_policy()->create_rem_set(_reserved, 2); |
ysr@777 | 1517 | set_barrier_set(rem_set()->bs()); |
ysr@777 | 1518 | if (barrier_set()->is_a(BarrierSet::ModRef)) { |
ysr@777 | 1519 | _mr_bs = (ModRefBarrierSet*)_barrier_set; |
ysr@777 | 1520 | } else { |
ysr@777 | 1521 | vm_exit_during_initialization("G1 requires a mod ref bs."); |
ysr@777 | 1522 | return JNI_ENOMEM; |
ysr@777 | 1523 | } |
ysr@777 | 1524 | |
ysr@777 | 1525 | // Also create a G1 rem set. |
ysr@777 | 1526 | if (G1UseHRIntoRS) { |
ysr@777 | 1527 | if (mr_bs()->is_a(BarrierSet::CardTableModRef)) { |
ysr@777 | 1528 | _g1_rem_set = new HRInto_G1RemSet(this, (CardTableModRefBS*)mr_bs()); |
ysr@777 | 1529 | } else { |
ysr@777 | 1530 | vm_exit_during_initialization("G1 requires a cardtable mod ref bs."); |
ysr@777 | 1531 | return JNI_ENOMEM; |
ysr@777 | 1532 | } |
ysr@777 | 1533 | } else { |
ysr@777 | 1534 | _g1_rem_set = new StupidG1RemSet(this); |
ysr@777 | 1535 | } |
ysr@777 | 1536 | |
ysr@777 | 1537 | // Carve out the G1 part of the heap. |
ysr@777 | 1538 | |
ysr@777 | 1539 | ReservedSpace g1_rs = heap_rs.first_part(max_byte_size); |
ysr@777 | 1540 | _g1_reserved = MemRegion((HeapWord*)g1_rs.base(), |
ysr@777 | 1541 | g1_rs.size()/HeapWordSize); |
ysr@777 | 1542 | ReservedSpace perm_gen_rs = heap_rs.last_part(max_byte_size); |
ysr@777 | 1543 | |
ysr@777 | 1544 | _perm_gen = pgs->init(perm_gen_rs, pgs->init_size(), rem_set()); |
ysr@777 | 1545 | |
ysr@777 | 1546 | _g1_storage.initialize(g1_rs, 0); |
ysr@777 | 1547 | _g1_committed = MemRegion((HeapWord*)_g1_storage.low(), (size_t) 0); |
ysr@777 | 1548 | _g1_max_committed = _g1_committed; |
iveresov@828 | 1549 | _hrs = new HeapRegionSeq(_expansion_regions); |
ysr@777 | 1550 | guarantee(_hrs != NULL, "Couldn't allocate HeapRegionSeq"); |
ysr@777 | 1551 | guarantee(_cur_alloc_region == NULL, "from constructor"); |
ysr@777 | 1552 | |
johnc@1242 | 1553 | // 6843694 - ensure that the maximum region index can fit |
johnc@1242 | 1554 | // in the remembered set structures. |
johnc@1242 | 1555 | const size_t max_region_idx = ((size_t)1 << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1; |
johnc@1242 | 1556 | guarantee((max_regions() - 1) <= max_region_idx, "too many regions"); |
johnc@1242 | 1557 | |
johnc@1242 | 1558 | size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1; |
tonyp@1377 | 1559 | guarantee(HeapRegion::CardsPerRegion > 0, "make sure it's initialized"); |
tonyp@1377 | 1560 | guarantee((size_t) HeapRegion::CardsPerRegion < max_cards_per_region, |
tonyp@1377 | 1561 | "too many cards per region"); |
johnc@1242 | 1562 | |
ysr@777 | 1563 | _bot_shared = new G1BlockOffsetSharedArray(_reserved, |
ysr@777 | 1564 | heap_word_size(init_byte_size)); |
ysr@777 | 1565 | |
ysr@777 | 1566 | _g1h = this; |
ysr@777 | 1567 | |
ysr@777 | 1568 | // Create the ConcurrentMark data structure and thread. |
ysr@777 | 1569 | // (Must do this late, so that "max_regions" is defined.) |
ysr@777 | 1570 | _cm = new ConcurrentMark(heap_rs, (int) max_regions()); |
ysr@777 | 1571 | _cmThread = _cm->cmThread(); |
ysr@777 | 1572 | |
ysr@777 | 1573 | // ...and the concurrent zero-fill thread, if necessary. |
ysr@777 | 1574 | if (G1ConcZeroFill) { |
ysr@777 | 1575 | _czft = new ConcurrentZFThread(); |
ysr@777 | 1576 | } |
ysr@777 | 1577 | |
ysr@777 | 1578 | // Initialize the from_card cache structure of HeapRegionRemSet. |
ysr@777 | 1579 | HeapRegionRemSet::init_heap(max_regions()); |
ysr@777 | 1580 | |
apetrusenko@1112 | 1581 | // Now expand into the initial heap size. |
apetrusenko@1112 | 1582 | expand(init_byte_size); |
ysr@777 | 1583 | |
ysr@777 | 1584 | // Perform any initialization actions delegated to the policy. |
ysr@777 | 1585 | g1_policy()->init(); |
ysr@777 | 1586 | |
ysr@777 | 1587 | g1_policy()->note_start_of_mark_thread(); |
ysr@777 | 1588 | |
ysr@777 | 1589 | _refine_cte_cl = |
ysr@777 | 1590 | new RefineCardTableEntryClosure(ConcurrentG1RefineThread::sts(), |
ysr@777 | 1591 | g1_rem_set(), |
ysr@777 | 1592 | concurrent_g1_refine()); |
ysr@777 | 1593 | JavaThread::dirty_card_queue_set().set_closure(_refine_cte_cl); |
ysr@777 | 1594 | |
ysr@777 | 1595 | JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon, |
ysr@777 | 1596 | SATB_Q_FL_lock, |
iveresov@1546 | 1597 | G1SATBProcessCompletedThreshold, |
ysr@777 | 1598 | Shared_SATB_Q_lock); |
iveresov@1229 | 1599 | |
iveresov@1229 | 1600 | JavaThread::dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon, |
iveresov@1229 | 1601 | DirtyCardQ_FL_lock, |
iveresov@1546 | 1602 | concurrent_g1_refine()->yellow_zone(), |
iveresov@1546 | 1603 | concurrent_g1_refine()->red_zone(), |
iveresov@1229 | 1604 | Shared_DirtyCardQ_lock); |
iveresov@1229 | 1605 | |
iveresov@1051 | 1606 | if (G1DeferredRSUpdate) { |
iveresov@1051 | 1607 | dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon, |
iveresov@1051 | 1608 | DirtyCardQ_FL_lock, |
iveresov@1546 | 1609 | -1, // never trigger processing |
iveresov@1546 | 1610 | -1, // no limit on length |
iveresov@1051 | 1611 | Shared_DirtyCardQ_lock, |
iveresov@1051 | 1612 | &JavaThread::dirty_card_queue_set()); |
iveresov@1051 | 1613 | } |
ysr@777 | 1614 | // In case we're keeping closure specialization stats, initialize those |
ysr@777 | 1615 | // counts and that mechanism. |
ysr@777 | 1616 | SpecializationStats::clear(); |
ysr@777 | 1617 | |
ysr@777 | 1618 | _gc_alloc_region_list = NULL; |
ysr@777 | 1619 | |
ysr@777 | 1620 | // Do later initialization work for concurrent refinement. |
ysr@777 | 1621 | _cg1r->init(); |
ysr@777 | 1622 | |
ysr@777 | 1623 | return JNI_OK; |
ysr@777 | 1624 | } |
ysr@777 | 1625 | |
ysr@777 | 1626 | void G1CollectedHeap::ref_processing_init() { |
ysr@777 | 1627 | SharedHeap::ref_processing_init(); |
ysr@777 | 1628 | MemRegion mr = reserved_region(); |
ysr@777 | 1629 | _ref_processor = ReferenceProcessor::create_ref_processor( |
ysr@777 | 1630 | mr, // span |
ysr@777 | 1631 | false, // Reference discovery is not atomic |
ysr@777 | 1632 | // (though it shouldn't matter here.) |
ysr@777 | 1633 | true, // mt_discovery |
ysr@777 | 1634 | NULL, // is alive closure: need to fill this in for efficiency |
ysr@777 | 1635 | ParallelGCThreads, |
ysr@777 | 1636 | ParallelRefProcEnabled, |
ysr@777 | 1637 | true); // Setting next fields of discovered |
ysr@777 | 1638 | // lists requires a barrier. |
ysr@777 | 1639 | } |
ysr@777 | 1640 | |
ysr@777 | 1641 | size_t G1CollectedHeap::capacity() const { |
ysr@777 | 1642 | return _g1_committed.byte_size(); |
ysr@777 | 1643 | } |
ysr@777 | 1644 | |
ysr@777 | 1645 | void G1CollectedHeap::iterate_dirty_card_closure(bool concurrent, |
ysr@777 | 1646 | int worker_i) { |
johnc@1324 | 1647 | // Clean cards in the hot card cache |
johnc@1324 | 1648 | concurrent_g1_refine()->clean_up_cache(worker_i, g1_rem_set()); |
johnc@1324 | 1649 | |
ysr@777 | 1650 | DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); |
ysr@777 | 1651 | int n_completed_buffers = 0; |
ysr@777 | 1652 | while (dcqs.apply_closure_to_completed_buffer(worker_i, 0, true)) { |
ysr@777 | 1653 | n_completed_buffers++; |
ysr@777 | 1654 | } |
ysr@777 | 1655 | g1_policy()->record_update_rs_processed_buffers(worker_i, |
ysr@777 | 1656 | (double) n_completed_buffers); |
ysr@777 | 1657 | dcqs.clear_n_completed_buffers(); |
ysr@777 | 1658 | assert(!dcqs.completed_buffers_exist_dirty(), "Completed buffers exist!"); |
ysr@777 | 1659 | } |
ysr@777 | 1660 | |
ysr@777 | 1661 | |
ysr@777 | 1662 | // Computes the sum of the storage used by the various regions. |
ysr@777 | 1663 | |
ysr@777 | 1664 | size_t G1CollectedHeap::used() const { |
ysr@1297 | 1665 | assert(Heap_lock->owner() != NULL, |
ysr@1297 | 1666 | "Should be owned on this thread's behalf."); |
ysr@777 | 1667 | size_t result = _summary_bytes_used; |
ysr@1280 | 1668 | // Read only once in case it is set to NULL concurrently |
ysr@1280 | 1669 | HeapRegion* hr = _cur_alloc_region; |
ysr@1280 | 1670 | if (hr != NULL) |
ysr@1280 | 1671 | result += hr->used(); |
ysr@777 | 1672 | return result; |
ysr@777 | 1673 | } |
ysr@777 | 1674 | |
tonyp@1281 | 1675 | size_t G1CollectedHeap::used_unlocked() const { |
tonyp@1281 | 1676 | size_t result = _summary_bytes_used; |
tonyp@1281 | 1677 | return result; |
tonyp@1281 | 1678 | } |
tonyp@1281 | 1679 | |
ysr@777 | 1680 | class SumUsedClosure: public HeapRegionClosure { |
ysr@777 | 1681 | size_t _used; |
ysr@777 | 1682 | public: |
ysr@777 | 1683 | SumUsedClosure() : _used(0) {} |
ysr@777 | 1684 | bool doHeapRegion(HeapRegion* r) { |
ysr@777 | 1685 | if (!r->continuesHumongous()) { |
ysr@777 | 1686 | _used += r->used(); |
ysr@777 | 1687 | } |
ysr@777 | 1688 | return false; |
ysr@777 | 1689 | } |
ysr@777 | 1690 | size_t result() { return _used; } |
ysr@777 | 1691 | }; |
ysr@777 | 1692 | |
ysr@777 | 1693 | size_t G1CollectedHeap::recalculate_used() const { |
ysr@777 | 1694 | SumUsedClosure blk; |
ysr@777 | 1695 | _hrs->iterate(&blk); |
ysr@777 | 1696 | return blk.result(); |
ysr@777 | 1697 | } |
ysr@777 | 1698 | |
ysr@777 | 1699 | #ifndef PRODUCT |
ysr@777 | 1700 | class SumUsedRegionsClosure: public HeapRegionClosure { |
ysr@777 | 1701 | size_t _num; |
ysr@777 | 1702 | public: |
apetrusenko@1112 | 1703 | SumUsedRegionsClosure() : _num(0) {} |
ysr@777 | 1704 | bool doHeapRegion(HeapRegion* r) { |
ysr@777 | 1705 | if (r->continuesHumongous() || r->used() > 0 || r->is_gc_alloc_region()) { |
ysr@777 | 1706 | _num += 1; |
ysr@777 | 1707 | } |
ysr@777 | 1708 | return false; |
ysr@777 | 1709 | } |
ysr@777 | 1710 | size_t result() { return _num; } |
ysr@777 | 1711 | }; |
ysr@777 | 1712 | |
ysr@777 | 1713 | size_t G1CollectedHeap::recalculate_used_regions() const { |
ysr@777 | 1714 | SumUsedRegionsClosure blk; |
ysr@777 | 1715 | _hrs->iterate(&blk); |
ysr@777 | 1716 | return blk.result(); |
ysr@777 | 1717 | } |
ysr@777 | 1718 | #endif // PRODUCT |
ysr@777 | 1719 | |
ysr@777 | 1720 | size_t G1CollectedHeap::unsafe_max_alloc() { |
ysr@777 | 1721 | if (_free_regions > 0) return HeapRegion::GrainBytes; |
ysr@777 | 1722 | // otherwise, is there space in the current allocation region? |
ysr@777 | 1723 | |
ysr@777 | 1724 | // We need to store the current allocation region in a local variable |
ysr@777 | 1725 | // here. The problem is that this method doesn't take any locks and |
ysr@777 | 1726 | // there may be other threads which overwrite the current allocation |
ysr@777 | 1727 | // region field. attempt_allocation(), for example, sets it to NULL |
ysr@777 | 1728 | // and this can happen *after* the NULL check here but before the call |
ysr@777 | 1729 | // to free(), resulting in a SIGSEGV. Note that this doesn't appear |
ysr@777 | 1730 | // to be a problem in the optimized build, since the two loads of the |
ysr@777 | 1731 | // current allocation region field are optimized away. |
ysr@777 | 1732 | HeapRegion* car = _cur_alloc_region; |
ysr@777 | 1733 | |
ysr@777 | 1734 | // FIXME: should iterate over all regions? |
ysr@777 | 1735 | if (car == NULL) { |
ysr@777 | 1736 | return 0; |
ysr@777 | 1737 | } |
ysr@777 | 1738 | return car->free(); |
ysr@777 | 1739 | } |
ysr@777 | 1740 | |
ysr@777 | 1741 | void G1CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) { |
ysr@777 | 1742 | assert(Thread::current()->is_VM_thread(), "Precondition#1"); |
ysr@777 | 1743 | assert(Heap_lock->is_locked(), "Precondition#2"); |
ysr@777 | 1744 | GCCauseSetter gcs(this, cause); |
ysr@777 | 1745 | switch (cause) { |
ysr@777 | 1746 | case GCCause::_heap_inspection: |
ysr@777 | 1747 | case GCCause::_heap_dump: { |
ysr@777 | 1748 | HandleMark hm; |
ysr@777 | 1749 | do_full_collection(false); // don't clear all soft refs |
ysr@777 | 1750 | break; |
ysr@777 | 1751 | } |
ysr@777 | 1752 | default: // XXX FIX ME |
ysr@777 | 1753 | ShouldNotReachHere(); // Unexpected use of this function |
ysr@777 | 1754 | } |
ysr@777 | 1755 | } |
ysr@777 | 1756 | |
ysr@1523 | 1757 | void G1CollectedHeap::collect(GCCause::Cause cause) { |
ysr@1523 | 1758 | // The caller doesn't have the Heap_lock |
ysr@1523 | 1759 | assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock"); |
ysr@1523 | 1760 | |
ysr@1523 | 1761 | int gc_count_before; |
ysr@777 | 1762 | { |
ysr@1523 | 1763 | MutexLocker ml(Heap_lock); |
ysr@1523 | 1764 | // Read the GC count while holding the Heap_lock |
ysr@1523 | 1765 | gc_count_before = SharedHeap::heap()->total_collections(); |
ysr@1523 | 1766 | |
ysr@1523 | 1767 | // Don't want to do a GC until cleanup is completed. |
ysr@1523 | 1768 | wait_for_cleanup_complete(); |
ysr@1523 | 1769 | } // We give up heap lock; VMThread::execute gets it back below |
ysr@1523 | 1770 | switch (cause) { |
ysr@1523 | 1771 | case GCCause::_scavenge_alot: { |
ysr@1523 | 1772 | // Do an incremental pause, which might sometimes be abandoned. |
ysr@1523 | 1773 | VM_G1IncCollectionPause op(gc_count_before, cause); |
ysr@1523 | 1774 | VMThread::execute(&op); |
ysr@1523 | 1775 | break; |
ysr@1523 | 1776 | } |
ysr@1523 | 1777 | default: { |
ysr@1523 | 1778 | // In all other cases, we currently do a full gc. |
ysr@1523 | 1779 | VM_G1CollectFull op(gc_count_before, cause); |
ysr@1523 | 1780 | VMThread::execute(&op); |
ysr@1523 | 1781 | } |
ysr@777 | 1782 | } |
ysr@777 | 1783 | } |
ysr@777 | 1784 | |
ysr@777 | 1785 | bool G1CollectedHeap::is_in(const void* p) const { |
ysr@777 | 1786 | if (_g1_committed.contains(p)) { |
ysr@777 | 1787 | HeapRegion* hr = _hrs->addr_to_region(p); |
ysr@777 | 1788 | return hr->is_in(p); |
ysr@777 | 1789 | } else { |
ysr@777 | 1790 | return _perm_gen->as_gen()->is_in(p); |
ysr@777 | 1791 | } |
ysr@777 | 1792 | } |
ysr@777 | 1793 | |
ysr@777 | 1794 | // Iteration functions. |
ysr@777 | 1795 | |
ysr@777 | 1796 | // Iterates an OopClosure over all ref-containing fields of objects |
ysr@777 | 1797 | // within a HeapRegion. |
ysr@777 | 1798 | |
ysr@777 | 1799 | class IterateOopClosureRegionClosure: public HeapRegionClosure { |
ysr@777 | 1800 | MemRegion _mr; |
ysr@777 | 1801 | OopClosure* _cl; |
ysr@777 | 1802 | public: |
ysr@777 | 1803 | IterateOopClosureRegionClosure(MemRegion mr, OopClosure* cl) |
ysr@777 | 1804 | : _mr(mr), _cl(cl) {} |
ysr@777 | 1805 | bool doHeapRegion(HeapRegion* r) { |
ysr@777 | 1806 | if (! r->continuesHumongous()) { |
ysr@777 | 1807 | r->oop_iterate(_cl); |
ysr@777 | 1808 | } |
ysr@777 | 1809 | return false; |
ysr@777 | 1810 | } |
ysr@777 | 1811 | }; |
ysr@777 | 1812 | |
iveresov@1113 | 1813 | void G1CollectedHeap::oop_iterate(OopClosure* cl, bool do_perm) { |
ysr@777 | 1814 | IterateOopClosureRegionClosure blk(_g1_committed, cl); |
ysr@777 | 1815 | _hrs->iterate(&blk); |
iveresov@1113 | 1816 | if (do_perm) { |
iveresov@1113 | 1817 | perm_gen()->oop_iterate(cl); |
iveresov@1113 | 1818 | } |
ysr@777 | 1819 | } |
ysr@777 | 1820 | |
iveresov@1113 | 1821 | void G1CollectedHeap::oop_iterate(MemRegion mr, OopClosure* cl, bool do_perm) { |
ysr@777 | 1822 | IterateOopClosureRegionClosure blk(mr, cl); |
ysr@777 | 1823 | _hrs->iterate(&blk); |
iveresov@1113 | 1824 | if (do_perm) { |
iveresov@1113 | 1825 | perm_gen()->oop_iterate(cl); |
iveresov@1113 | 1826 | } |
ysr@777 | 1827 | } |
ysr@777 | 1828 | |
ysr@777 | 1829 | // Iterates an ObjectClosure over all objects within a HeapRegion. |
ysr@777 | 1830 | |
ysr@777 | 1831 | class IterateObjectClosureRegionClosure: public HeapRegionClosure { |
ysr@777 | 1832 | ObjectClosure* _cl; |
ysr@777 | 1833 | public: |
ysr@777 | 1834 | IterateObjectClosureRegionClosure(ObjectClosure* cl) : _cl(cl) {} |
ysr@777 | 1835 | bool doHeapRegion(HeapRegion* r) { |
ysr@777 | 1836 | if (! r->continuesHumongous()) { |
ysr@777 | 1837 | r->object_iterate(_cl); |
ysr@777 | 1838 | } |
ysr@777 | 1839 | return false; |
ysr@777 | 1840 | } |
ysr@777 | 1841 | }; |
ysr@777 | 1842 | |
iveresov@1113 | 1843 | void G1CollectedHeap::object_iterate(ObjectClosure* cl, bool do_perm) { |
ysr@777 | 1844 | IterateObjectClosureRegionClosure blk(cl); |
ysr@777 | 1845 | _hrs->iterate(&blk); |
iveresov@1113 | 1846 | if (do_perm) { |
iveresov@1113 | 1847 | perm_gen()->object_iterate(cl); |
iveresov@1113 | 1848 | } |
ysr@777 | 1849 | } |
ysr@777 | 1850 | |
ysr@777 | 1851 | void G1CollectedHeap::object_iterate_since_last_GC(ObjectClosure* cl) { |
ysr@777 | 1852 | // FIXME: is this right? |
ysr@777 | 1853 | guarantee(false, "object_iterate_since_last_GC not supported by G1 heap"); |
ysr@777 | 1854 | } |
ysr@777 | 1855 | |
ysr@777 | 1856 | // Calls a SpaceClosure on a HeapRegion. |
ysr@777 | 1857 | |
ysr@777 | 1858 | class SpaceClosureRegionClosure: public HeapRegionClosure { |
ysr@777 | 1859 | SpaceClosure* _cl; |
ysr@777 | 1860 | public: |
ysr@777 | 1861 | SpaceClosureRegionClosure(SpaceClosure* cl) : _cl(cl) {} |
ysr@777 | 1862 | bool doHeapRegion(HeapRegion* r) { |
ysr@777 | 1863 | _cl->do_space(r); |
ysr@777 | 1864 | return false; |
ysr@777 | 1865 | } |
ysr@777 | 1866 | }; |
ysr@777 | 1867 | |
ysr@777 | 1868 | void G1CollectedHeap::space_iterate(SpaceClosure* cl) { |
ysr@777 | 1869 | SpaceClosureRegionClosure blk(cl); |
ysr@777 | 1870 | _hrs->iterate(&blk); |
ysr@777 | 1871 | } |
ysr@777 | 1872 | |
ysr@777 | 1873 | void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) { |
ysr@777 | 1874 | _hrs->iterate(cl); |
ysr@777 | 1875 | } |
ysr@777 | 1876 | |
ysr@777 | 1877 | void G1CollectedHeap::heap_region_iterate_from(HeapRegion* r, |
ysr@777 | 1878 | HeapRegionClosure* cl) { |
ysr@777 | 1879 | _hrs->iterate_from(r, cl); |
ysr@777 | 1880 | } |
ysr@777 | 1881 | |
ysr@777 | 1882 | void |
ysr@777 | 1883 | G1CollectedHeap::heap_region_iterate_from(int idx, HeapRegionClosure* cl) { |
ysr@777 | 1884 | _hrs->iterate_from(idx, cl); |
ysr@777 | 1885 | } |
ysr@777 | 1886 | |
ysr@777 | 1887 | HeapRegion* G1CollectedHeap::region_at(size_t idx) { return _hrs->at(idx); } |
ysr@777 | 1888 | |
ysr@777 | 1889 | void |
ysr@777 | 1890 | G1CollectedHeap::heap_region_par_iterate_chunked(HeapRegionClosure* cl, |
ysr@777 | 1891 | int worker, |
ysr@777 | 1892 | jint claim_value) { |
tonyp@790 | 1893 | const size_t regions = n_regions(); |
tonyp@790 | 1894 | const size_t worker_num = (ParallelGCThreads > 0 ? ParallelGCThreads : 1); |
tonyp@790 | 1895 | // try to spread out the starting points of the workers |
tonyp@790 | 1896 | const size_t start_index = regions / worker_num * (size_t) worker; |
tonyp@790 | 1897 | |
tonyp@790 | 1898 | // each worker will actually look at all regions |
tonyp@790 | 1899 | for (size_t count = 0; count < regions; ++count) { |
tonyp@790 | 1900 | const size_t index = (start_index + count) % regions; |
tonyp@790 | 1901 | assert(0 <= index && index < regions, "sanity"); |
tonyp@790 | 1902 | HeapRegion* r = region_at(index); |
tonyp@790 | 1903 | // we'll ignore "continues humongous" regions (we'll process them |
tonyp@790 | 1904 | // when we come across their corresponding "start humongous" |
tonyp@790 | 1905 | // region) and regions already claimed |
tonyp@790 | 1906 | if (r->claim_value() == claim_value || r->continuesHumongous()) { |
tonyp@790 | 1907 | continue; |
tonyp@790 | 1908 | } |
tonyp@790 | 1909 | // OK, try to claim it |
ysr@777 | 1910 | if (r->claimHeapRegion(claim_value)) { |
tonyp@790 | 1911 | // success! |
tonyp@790 | 1912 | assert(!r->continuesHumongous(), "sanity"); |
tonyp@790 | 1913 | if (r->startsHumongous()) { |
tonyp@790 | 1914 | // If the region is "starts humongous" we'll iterate over its |
tonyp@790 | 1915 | // "continues humongous" first; in fact we'll do them |
tonyp@790 | 1916 | // first. The order is important. In on case, calling the |
tonyp@790 | 1917 | // closure on the "starts humongous" region might de-allocate |
tonyp@790 | 1918 | // and clear all its "continues humongous" regions and, as a |
tonyp@790 | 1919 | // result, we might end up processing them twice. So, we'll do |
tonyp@790 | 1920 | // them first (notice: most closures will ignore them anyway) and |
tonyp@790 | 1921 | // then we'll do the "starts humongous" region. |
tonyp@790 | 1922 | for (size_t ch_index = index + 1; ch_index < regions; ++ch_index) { |
tonyp@790 | 1923 | HeapRegion* chr = region_at(ch_index); |
tonyp@790 | 1924 | |
tonyp@790 | 1925 | // if the region has already been claimed or it's not |
tonyp@790 | 1926 | // "continues humongous" we're done |
tonyp@790 | 1927 | if (chr->claim_value() == claim_value || |
tonyp@790 | 1928 | !chr->continuesHumongous()) { |
tonyp@790 | 1929 | break; |
tonyp@790 | 1930 | } |
tonyp@790 | 1931 | |
tonyp@790 | 1932 | // Noone should have claimed it directly. We can given |
tonyp@790 | 1933 | // that we claimed its "starts humongous" region. |
tonyp@790 | 1934 | assert(chr->claim_value() != claim_value, "sanity"); |
tonyp@790 | 1935 | assert(chr->humongous_start_region() == r, "sanity"); |
tonyp@790 | 1936 | |
tonyp@790 | 1937 | if (chr->claimHeapRegion(claim_value)) { |
tonyp@790 | 1938 | // we should always be able to claim it; noone else should |
tonyp@790 | 1939 | // be trying to claim this region |
tonyp@790 | 1940 | |
tonyp@790 | 1941 | bool res2 = cl->doHeapRegion(chr); |
tonyp@790 | 1942 | assert(!res2, "Should not abort"); |
tonyp@790 | 1943 | |
tonyp@790 | 1944 | // Right now, this holds (i.e., no closure that actually |
tonyp@790 | 1945 | // does something with "continues humongous" regions |
tonyp@790 | 1946 | // clears them). We might have to weaken it in the future, |
tonyp@790 | 1947 | // but let's leave these two asserts here for extra safety. |
tonyp@790 | 1948 | assert(chr->continuesHumongous(), "should still be the case"); |
tonyp@790 | 1949 | assert(chr->humongous_start_region() == r, "sanity"); |
tonyp@790 | 1950 | } else { |
tonyp@790 | 1951 | guarantee(false, "we should not reach here"); |
tonyp@790 | 1952 | } |
tonyp@790 | 1953 | } |
tonyp@790 | 1954 | } |
tonyp@790 | 1955 | |
tonyp@790 | 1956 | assert(!r->continuesHumongous(), "sanity"); |
tonyp@790 | 1957 | bool res = cl->doHeapRegion(r); |
tonyp@790 | 1958 | assert(!res, "Should not abort"); |
tonyp@790 | 1959 | } |
tonyp@790 | 1960 | } |
tonyp@790 | 1961 | } |
tonyp@790 | 1962 | |
tonyp@825 | 1963 | class ResetClaimValuesClosure: public HeapRegionClosure { |
tonyp@825 | 1964 | public: |
tonyp@825 | 1965 | bool doHeapRegion(HeapRegion* r) { |
tonyp@825 | 1966 | r->set_claim_value(HeapRegion::InitialClaimValue); |
tonyp@825 | 1967 | return false; |
tonyp@825 | 1968 | } |
tonyp@825 | 1969 | }; |
tonyp@825 | 1970 | |
tonyp@825 | 1971 | void |
tonyp@825 | 1972 | G1CollectedHeap::reset_heap_region_claim_values() { |
tonyp@825 | 1973 | ResetClaimValuesClosure blk; |
tonyp@825 | 1974 | heap_region_iterate(&blk); |
tonyp@825 | 1975 | } |
tonyp@825 | 1976 | |
tonyp@790 | 1977 | #ifdef ASSERT |
tonyp@790 | 1978 | // This checks whether all regions in the heap have the correct claim |
tonyp@790 | 1979 | // value. I also piggy-backed on this a check to ensure that the |
tonyp@790 | 1980 | // humongous_start_region() information on "continues humongous" |
tonyp@790 | 1981 | // regions is correct. |
tonyp@790 | 1982 | |
tonyp@790 | 1983 | class CheckClaimValuesClosure : public HeapRegionClosure { |
tonyp@790 | 1984 | private: |
tonyp@790 | 1985 | jint _claim_value; |
tonyp@790 | 1986 | size_t _failures; |
tonyp@790 | 1987 | HeapRegion* _sh_region; |
tonyp@790 | 1988 | public: |
tonyp@790 | 1989 | CheckClaimValuesClosure(jint claim_value) : |
tonyp@790 | 1990 | _claim_value(claim_value), _failures(0), _sh_region(NULL) { } |
tonyp@790 | 1991 | bool doHeapRegion(HeapRegion* r) { |
tonyp@790 | 1992 | if (r->claim_value() != _claim_value) { |
tonyp@790 | 1993 | gclog_or_tty->print_cr("Region ["PTR_FORMAT","PTR_FORMAT"), " |
tonyp@790 | 1994 | "claim value = %d, should be %d", |
tonyp@790 | 1995 | r->bottom(), r->end(), r->claim_value(), |
tonyp@790 | 1996 | _claim_value); |
tonyp@790 | 1997 | ++_failures; |
tonyp@790 | 1998 | } |
tonyp@790 | 1999 | if (!r->isHumongous()) { |
tonyp@790 | 2000 | _sh_region = NULL; |
tonyp@790 | 2001 | } else if (r->startsHumongous()) { |
tonyp@790 | 2002 | _sh_region = r; |
tonyp@790 | 2003 | } else if (r->continuesHumongous()) { |
tonyp@790 | 2004 | if (r->humongous_start_region() != _sh_region) { |
tonyp@790 | 2005 | gclog_or_tty->print_cr("Region ["PTR_FORMAT","PTR_FORMAT"), " |
tonyp@790 | 2006 | "HS = "PTR_FORMAT", should be "PTR_FORMAT, |
tonyp@790 | 2007 | r->bottom(), r->end(), |
tonyp@790 | 2008 | r->humongous_start_region(), |
tonyp@790 | 2009 | _sh_region); |
tonyp@790 | 2010 | ++_failures; |
ysr@777 | 2011 | } |
ysr@777 | 2012 | } |
tonyp@790 | 2013 | return false; |
tonyp@790 | 2014 | } |
tonyp@790 | 2015 | size_t failures() { |
tonyp@790 | 2016 | return _failures; |
tonyp@790 | 2017 | } |
tonyp@790 | 2018 | }; |
tonyp@790 | 2019 | |
tonyp@790 | 2020 | bool G1CollectedHeap::check_heap_region_claim_values(jint claim_value) { |
tonyp@790 | 2021 | CheckClaimValuesClosure cl(claim_value); |
tonyp@790 | 2022 | heap_region_iterate(&cl); |
tonyp@790 | 2023 | return cl.failures() == 0; |
tonyp@790 | 2024 | } |
tonyp@790 | 2025 | #endif // ASSERT |
ysr@777 | 2026 | |
ysr@777 | 2027 | void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) { |
ysr@777 | 2028 | HeapRegion* r = g1_policy()->collection_set(); |
ysr@777 | 2029 | while (r != NULL) { |
ysr@777 | 2030 | HeapRegion* next = r->next_in_collection_set(); |
ysr@777 | 2031 | if (cl->doHeapRegion(r)) { |
ysr@777 | 2032 | cl->incomplete(); |
ysr@777 | 2033 | return; |
ysr@777 | 2034 | } |
ysr@777 | 2035 | r = next; |
ysr@777 | 2036 | } |
ysr@777 | 2037 | } |
ysr@777 | 2038 | |
ysr@777 | 2039 | void G1CollectedHeap::collection_set_iterate_from(HeapRegion* r, |
ysr@777 | 2040 | HeapRegionClosure *cl) { |
ysr@777 | 2041 | assert(r->in_collection_set(), |
ysr@777 | 2042 | "Start region must be a member of the collection set."); |
ysr@777 | 2043 | HeapRegion* cur = r; |
ysr@777 | 2044 | while (cur != NULL) { |
ysr@777 | 2045 | HeapRegion* next = cur->next_in_collection_set(); |
ysr@777 | 2046 | if (cl->doHeapRegion(cur) && false) { |
ysr@777 | 2047 | cl->incomplete(); |
ysr@777 | 2048 | return; |
ysr@777 | 2049 | } |
ysr@777 | 2050 | cur = next; |
ysr@777 | 2051 | } |
ysr@777 | 2052 | cur = g1_policy()->collection_set(); |
ysr@777 | 2053 | while (cur != r) { |
ysr@777 | 2054 | HeapRegion* next = cur->next_in_collection_set(); |
ysr@777 | 2055 | if (cl->doHeapRegion(cur) && false) { |
ysr@777 | 2056 | cl->incomplete(); |
ysr@777 | 2057 | return; |
ysr@777 | 2058 | } |
ysr@777 | 2059 | cur = next; |
ysr@777 | 2060 | } |
ysr@777 | 2061 | } |
ysr@777 | 2062 | |
ysr@777 | 2063 | CompactibleSpace* G1CollectedHeap::first_compactible_space() { |
ysr@777 | 2064 | return _hrs->length() > 0 ? _hrs->at(0) : NULL; |
ysr@777 | 2065 | } |
ysr@777 | 2066 | |
ysr@777 | 2067 | |
ysr@777 | 2068 | Space* G1CollectedHeap::space_containing(const void* addr) const { |
ysr@777 | 2069 | Space* res = heap_region_containing(addr); |
ysr@777 | 2070 | if (res == NULL) |
ysr@777 | 2071 | res = perm_gen()->space_containing(addr); |
ysr@777 | 2072 | return res; |
ysr@777 | 2073 | } |
ysr@777 | 2074 | |
ysr@777 | 2075 | HeapWord* G1CollectedHeap::block_start(const void* addr) const { |
ysr@777 | 2076 | Space* sp = space_containing(addr); |
ysr@777 | 2077 | if (sp != NULL) { |
ysr@777 | 2078 | return sp->block_start(addr); |
ysr@777 | 2079 | } |
ysr@777 | 2080 | return NULL; |
ysr@777 | 2081 | } |
ysr@777 | 2082 | |
ysr@777 | 2083 | size_t G1CollectedHeap::block_size(const HeapWord* addr) const { |
ysr@777 | 2084 | Space* sp = space_containing(addr); |
ysr@777 | 2085 | assert(sp != NULL, "block_size of address outside of heap"); |
ysr@777 | 2086 | return sp->block_size(addr); |
ysr@777 | 2087 | } |
ysr@777 | 2088 | |
ysr@777 | 2089 | bool G1CollectedHeap::block_is_obj(const HeapWord* addr) const { |
ysr@777 | 2090 | Space* sp = space_containing(addr); |
ysr@777 | 2091 | return sp->block_is_obj(addr); |
ysr@777 | 2092 | } |
ysr@777 | 2093 | |
ysr@777 | 2094 | bool G1CollectedHeap::supports_tlab_allocation() const { |
ysr@777 | 2095 | return true; |
ysr@777 | 2096 | } |
ysr@777 | 2097 | |
ysr@777 | 2098 | size_t G1CollectedHeap::tlab_capacity(Thread* ignored) const { |
ysr@777 | 2099 | return HeapRegion::GrainBytes; |
ysr@777 | 2100 | } |
ysr@777 | 2101 | |
ysr@777 | 2102 | size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const { |
ysr@777 | 2103 | // Return the remaining space in the cur alloc region, but not less than |
ysr@777 | 2104 | // the min TLAB size. |
ysr@777 | 2105 | // Also, no more than half the region size, since we can't allow tlabs to |
ysr@777 | 2106 | // grow big enough to accomodate humongous objects. |
ysr@777 | 2107 | |
ysr@777 | 2108 | // We need to story it locally, since it might change between when we |
ysr@777 | 2109 | // test for NULL and when we use it later. |
ysr@777 | 2110 | ContiguousSpace* cur_alloc_space = _cur_alloc_region; |
ysr@777 | 2111 | if (cur_alloc_space == NULL) { |
ysr@777 | 2112 | return HeapRegion::GrainBytes/2; |
ysr@777 | 2113 | } else { |
ysr@777 | 2114 | return MAX2(MIN2(cur_alloc_space->free(), |
ysr@777 | 2115 | (size_t)(HeapRegion::GrainBytes/2)), |
ysr@777 | 2116 | (size_t)MinTLABSize); |
ysr@777 | 2117 | } |
ysr@777 | 2118 | } |
ysr@777 | 2119 | |
ysr@777 | 2120 | HeapWord* G1CollectedHeap::allocate_new_tlab(size_t size) { |
ysr@777 | 2121 | bool dummy; |
ysr@777 | 2122 | return G1CollectedHeap::mem_allocate(size, false, true, &dummy); |
ysr@777 | 2123 | } |
ysr@777 | 2124 | |
ysr@777 | 2125 | bool G1CollectedHeap::allocs_are_zero_filled() { |
ysr@777 | 2126 | return false; |
ysr@777 | 2127 | } |
ysr@777 | 2128 | |
ysr@777 | 2129 | size_t G1CollectedHeap::large_typearray_limit() { |
ysr@777 | 2130 | // FIXME |
ysr@777 | 2131 | return HeapRegion::GrainBytes/HeapWordSize; |
ysr@777 | 2132 | } |
ysr@777 | 2133 | |
ysr@777 | 2134 | size_t G1CollectedHeap::max_capacity() const { |
tonyp@1527 | 2135 | return g1_reserved_obj_bytes(); |
ysr@777 | 2136 | } |
ysr@777 | 2137 | |
ysr@777 | 2138 | jlong G1CollectedHeap::millis_since_last_gc() { |
ysr@777 | 2139 | // assert(false, "NYI"); |
ysr@777 | 2140 | return 0; |
ysr@777 | 2141 | } |
ysr@777 | 2142 | |
ysr@777 | 2143 | |
ysr@777 | 2144 | void G1CollectedHeap::prepare_for_verify() { |
ysr@777 | 2145 | if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) { |
ysr@777 | 2146 | ensure_parsability(false); |
ysr@777 | 2147 | } |
ysr@777 | 2148 | g1_rem_set()->prepare_for_verify(); |
ysr@777 | 2149 | } |
ysr@777 | 2150 | |
ysr@777 | 2151 | class VerifyLivenessOopClosure: public OopClosure { |
ysr@777 | 2152 | G1CollectedHeap* g1h; |
ysr@777 | 2153 | public: |
ysr@777 | 2154 | VerifyLivenessOopClosure(G1CollectedHeap* _g1h) { |
ysr@777 | 2155 | g1h = _g1h; |
ysr@777 | 2156 | } |
ysr@1280 | 2157 | void do_oop(narrowOop *p) { do_oop_work(p); } |
ysr@1280 | 2158 | void do_oop( oop *p) { do_oop_work(p); } |
ysr@1280 | 2159 | |
ysr@1280 | 2160 | template <class T> void do_oop_work(T *p) { |
ysr@1280 | 2161 | oop obj = oopDesc::load_decode_heap_oop(p); |
ysr@1280 | 2162 | guarantee(obj == NULL || !g1h->is_obj_dead(obj), |
ysr@1280 | 2163 | "Dead object referenced by a not dead object"); |
ysr@777 | 2164 | } |
ysr@777 | 2165 | }; |
ysr@777 | 2166 | |
ysr@777 | 2167 | class VerifyObjsInRegionClosure: public ObjectClosure { |
tonyp@1246 | 2168 | private: |
ysr@777 | 2169 | G1CollectedHeap* _g1h; |
ysr@777 | 2170 | size_t _live_bytes; |
ysr@777 | 2171 | HeapRegion *_hr; |
tonyp@1246 | 2172 | bool _use_prev_marking; |
ysr@777 | 2173 | public: |
tonyp@1246 | 2174 | // use_prev_marking == true -> use "prev" marking information, |
tonyp@1246 | 2175 | // use_prev_marking == false -> use "next" marking information |
tonyp@1246 | 2176 | VerifyObjsInRegionClosure(HeapRegion *hr, bool use_prev_marking) |
tonyp@1246 | 2177 | : _live_bytes(0), _hr(hr), _use_prev_marking(use_prev_marking) { |
ysr@777 | 2178 | _g1h = G1CollectedHeap::heap(); |
ysr@777 | 2179 | } |
ysr@777 | 2180 | void do_object(oop o) { |
ysr@777 | 2181 | VerifyLivenessOopClosure isLive(_g1h); |
ysr@777 | 2182 | assert(o != NULL, "Huh?"); |
tonyp@1246 | 2183 | if (!_g1h->is_obj_dead_cond(o, _use_prev_marking)) { |
ysr@777 | 2184 | o->oop_iterate(&isLive); |
ysr@777 | 2185 | if (!_hr->obj_allocated_since_prev_marking(o)) |
ysr@777 | 2186 | _live_bytes += (o->size() * HeapWordSize); |
ysr@777 | 2187 | } |
ysr@777 | 2188 | } |
ysr@777 | 2189 | size_t live_bytes() { return _live_bytes; } |
ysr@777 | 2190 | }; |
ysr@777 | 2191 | |
ysr@777 | 2192 | class PrintObjsInRegionClosure : public ObjectClosure { |
ysr@777 | 2193 | HeapRegion *_hr; |
ysr@777 | 2194 | G1CollectedHeap *_g1; |
ysr@777 | 2195 | public: |
ysr@777 | 2196 | PrintObjsInRegionClosure(HeapRegion *hr) : _hr(hr) { |
ysr@777 | 2197 | _g1 = G1CollectedHeap::heap(); |
ysr@777 | 2198 | }; |
ysr@777 | 2199 | |
ysr@777 | 2200 | void do_object(oop o) { |
ysr@777 | 2201 | if (o != NULL) { |
ysr@777 | 2202 | HeapWord *start = (HeapWord *) o; |
ysr@777 | 2203 | size_t word_sz = o->size(); |
ysr@777 | 2204 | gclog_or_tty->print("\nPrinting obj "PTR_FORMAT" of size " SIZE_FORMAT |
ysr@777 | 2205 | " isMarkedPrev %d isMarkedNext %d isAllocSince %d\n", |
ysr@777 | 2206 | (void*) o, word_sz, |
ysr@777 | 2207 | _g1->isMarkedPrev(o), |
ysr@777 | 2208 | _g1->isMarkedNext(o), |
ysr@777 | 2209 | _hr->obj_allocated_since_prev_marking(o)); |
ysr@777 | 2210 | HeapWord *end = start + word_sz; |
ysr@777 | 2211 | HeapWord *cur; |
ysr@777 | 2212 | int *val; |
ysr@777 | 2213 | for (cur = start; cur < end; cur++) { |
ysr@777 | 2214 | val = (int *) cur; |
ysr@777 | 2215 | gclog_or_tty->print("\t "PTR_FORMAT":"PTR_FORMAT"\n", val, *val); |
ysr@777 | 2216 | } |
ysr@777 | 2217 | } |
ysr@777 | 2218 | } |
ysr@777 | 2219 | }; |
ysr@777 | 2220 | |
ysr@777 | 2221 | class VerifyRegionClosure: public HeapRegionClosure { |
tonyp@1246 | 2222 | private: |
ysr@777 | 2223 | bool _allow_dirty; |
tonyp@825 | 2224 | bool _par; |
tonyp@1246 | 2225 | bool _use_prev_marking; |
tonyp@1455 | 2226 | bool _failures; |
tonyp@1246 | 2227 | public: |
tonyp@1246 | 2228 | // use_prev_marking == true -> use "prev" marking information, |
tonyp@1246 | 2229 | // use_prev_marking == false -> use "next" marking information |
tonyp@1246 | 2230 | VerifyRegionClosure(bool allow_dirty, bool par, bool use_prev_marking) |
ysr@1280 | 2231 | : _allow_dirty(allow_dirty), |
ysr@1280 | 2232 | _par(par), |
tonyp@1455 | 2233 | _use_prev_marking(use_prev_marking), |
tonyp@1455 | 2234 | _failures(false) {} |
tonyp@1455 | 2235 | |
tonyp@1455 | 2236 | bool failures() { |
tonyp@1455 | 2237 | return _failures; |
tonyp@1455 | 2238 | } |
ysr@1280 | 2239 | |
ysr@777 | 2240 | bool doHeapRegion(HeapRegion* r) { |
tonyp@825 | 2241 | guarantee(_par || r->claim_value() == HeapRegion::InitialClaimValue, |
tonyp@825 | 2242 | "Should be unclaimed at verify points."); |
iveresov@1072 | 2243 | if (!r->continuesHumongous()) { |
tonyp@1455 | 2244 | bool failures = false; |
tonyp@1455 | 2245 | r->verify(_allow_dirty, _use_prev_marking, &failures); |
tonyp@1455 | 2246 | if (failures) { |
tonyp@1455 | 2247 | _failures = true; |
tonyp@1455 | 2248 | } else { |
tonyp@1455 | 2249 | VerifyObjsInRegionClosure not_dead_yet_cl(r, _use_prev_marking); |
tonyp@1455 | 2250 | r->object_iterate(¬_dead_yet_cl); |
tonyp@1455 | 2251 | if (r->max_live_bytes() < not_dead_yet_cl.live_bytes()) { |
tonyp@1455 | 2252 | gclog_or_tty->print_cr("["PTR_FORMAT","PTR_FORMAT"] " |
tonyp@1455 | 2253 | "max_live_bytes "SIZE_FORMAT" " |
tonyp@1455 | 2254 | "< calculated "SIZE_FORMAT, |
tonyp@1455 | 2255 | r->bottom(), r->end(), |
tonyp@1455 | 2256 | r->max_live_bytes(), |
tonyp@1455 | 2257 | not_dead_yet_cl.live_bytes()); |
tonyp@1455 | 2258 | _failures = true; |
tonyp@1455 | 2259 | } |
tonyp@1455 | 2260 | } |
ysr@777 | 2261 | } |
tonyp@1455 | 2262 | return false; // stop the region iteration if we hit a failure |
ysr@777 | 2263 | } |
ysr@777 | 2264 | }; |
ysr@777 | 2265 | |
ysr@777 | 2266 | class VerifyRootsClosure: public OopsInGenClosure { |
ysr@777 | 2267 | private: |
ysr@777 | 2268 | G1CollectedHeap* _g1h; |
tonyp@1455 | 2269 | bool _use_prev_marking; |
ysr@777 | 2270 | bool _failures; |
ysr@777 | 2271 | public: |
tonyp@1246 | 2272 | // use_prev_marking == true -> use "prev" marking information, |
tonyp@1246 | 2273 | // use_prev_marking == false -> use "next" marking information |
tonyp@1246 | 2274 | VerifyRootsClosure(bool use_prev_marking) : |
ysr@1280 | 2275 | _g1h(G1CollectedHeap::heap()), |
tonyp@1455 | 2276 | _use_prev_marking(use_prev_marking), |
tonyp@1455 | 2277 | _failures(false) { } |
ysr@777 | 2278 | |
ysr@777 | 2279 | bool failures() { return _failures; } |
ysr@777 | 2280 | |
ysr@1280 | 2281 | template <class T> void do_oop_nv(T* p) { |
ysr@1280 | 2282 | T heap_oop = oopDesc::load_heap_oop(p); |
ysr@1280 | 2283 | if (!oopDesc::is_null(heap_oop)) { |
ysr@1280 | 2284 | oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); |
tonyp@1246 | 2285 | if (_g1h->is_obj_dead_cond(obj, _use_prev_marking)) { |
ysr@777 | 2286 | gclog_or_tty->print_cr("Root location "PTR_FORMAT" " |
tonyp@1455 | 2287 | "points to dead obj "PTR_FORMAT, p, (void*) obj); |
ysr@777 | 2288 | obj->print_on(gclog_or_tty); |
ysr@777 | 2289 | _failures = true; |
ysr@777 | 2290 | } |
ysr@777 | 2291 | } |
ysr@777 | 2292 | } |
ysr@1280 | 2293 | |
ysr@1280 | 2294 | void do_oop(oop* p) { do_oop_nv(p); } |
ysr@1280 | 2295 | void do_oop(narrowOop* p) { do_oop_nv(p); } |
ysr@777 | 2296 | }; |
ysr@777 | 2297 | |
tonyp@825 | 2298 | // This is the task used for parallel heap verification. |
tonyp@825 | 2299 | |
tonyp@825 | 2300 | class G1ParVerifyTask: public AbstractGangTask { |
tonyp@825 | 2301 | private: |
tonyp@825 | 2302 | G1CollectedHeap* _g1h; |
tonyp@825 | 2303 | bool _allow_dirty; |
tonyp@1246 | 2304 | bool _use_prev_marking; |
tonyp@1455 | 2305 | bool _failures; |
tonyp@825 | 2306 | |
tonyp@825 | 2307 | public: |
tonyp@1246 | 2308 | // use_prev_marking == true -> use "prev" marking information, |
tonyp@1246 | 2309 | // use_prev_marking == false -> use "next" marking information |
tonyp@1246 | 2310 | G1ParVerifyTask(G1CollectedHeap* g1h, bool allow_dirty, |
tonyp@1246 | 2311 | bool use_prev_marking) : |
tonyp@825 | 2312 | AbstractGangTask("Parallel verify task"), |
ysr@1280 | 2313 | _g1h(g1h), |
ysr@1280 | 2314 | _allow_dirty(allow_dirty), |
tonyp@1455 | 2315 | _use_prev_marking(use_prev_marking), |
tonyp@1455 | 2316 | _failures(false) { } |
tonyp@1455 | 2317 | |
tonyp@1455 | 2318 | bool failures() { |
tonyp@1455 | 2319 | return _failures; |
tonyp@1455 | 2320 | } |
tonyp@825 | 2321 | |
tonyp@825 | 2322 | void work(int worker_i) { |
iveresov@1072 | 2323 | HandleMark hm; |
tonyp@1246 | 2324 | VerifyRegionClosure blk(_allow_dirty, true, _use_prev_marking); |
tonyp@825 | 2325 | _g1h->heap_region_par_iterate_chunked(&blk, worker_i, |
tonyp@825 | 2326 | HeapRegion::ParVerifyClaimValue); |
tonyp@1455 | 2327 | if (blk.failures()) { |
tonyp@1455 | 2328 | _failures = true; |
tonyp@1455 | 2329 | } |
tonyp@825 | 2330 | } |
tonyp@825 | 2331 | }; |
tonyp@825 | 2332 | |
ysr@777 | 2333 | void G1CollectedHeap::verify(bool allow_dirty, bool silent) { |
tonyp@1246 | 2334 | verify(allow_dirty, silent, /* use_prev_marking */ true); |
tonyp@1246 | 2335 | } |
tonyp@1246 | 2336 | |
tonyp@1246 | 2337 | void G1CollectedHeap::verify(bool allow_dirty, |
tonyp@1246 | 2338 | bool silent, |
tonyp@1246 | 2339 | bool use_prev_marking) { |
ysr@777 | 2340 | if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) { |
ysr@777 | 2341 | if (!silent) { gclog_or_tty->print("roots "); } |
tonyp@1246 | 2342 | VerifyRootsClosure rootsCl(use_prev_marking); |
jrose@1424 | 2343 | CodeBlobToOopClosure blobsCl(&rootsCl, /*do_marking=*/ false); |
jrose@1424 | 2344 | process_strong_roots(true, // activate StrongRootsScope |
jrose@1424 | 2345 | false, |
ysr@777 | 2346 | SharedHeap::SO_AllClasses, |
ysr@777 | 2347 | &rootsCl, |
jrose@1424 | 2348 | &blobsCl, |
ysr@777 | 2349 | &rootsCl); |
tonyp@1455 | 2350 | bool failures = rootsCl.failures(); |
ysr@777 | 2351 | rem_set()->invalidate(perm_gen()->used_region(), false); |
ysr@777 | 2352 | if (!silent) { gclog_or_tty->print("heapRegions "); } |
tonyp@825 | 2353 | if (GCParallelVerificationEnabled && ParallelGCThreads > 1) { |
tonyp@825 | 2354 | assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue), |
tonyp@825 | 2355 | "sanity check"); |
tonyp@825 | 2356 | |
tonyp@1246 | 2357 | G1ParVerifyTask task(this, allow_dirty, use_prev_marking); |
tonyp@825 | 2358 | int n_workers = workers()->total_workers(); |
tonyp@825 | 2359 | set_par_threads(n_workers); |
tonyp@825 | 2360 | workers()->run_task(&task); |
tonyp@825 | 2361 | set_par_threads(0); |
tonyp@1455 | 2362 | if (task.failures()) { |
tonyp@1455 | 2363 | failures = true; |
tonyp@1455 | 2364 | } |
tonyp@825 | 2365 | |
tonyp@825 | 2366 | assert(check_heap_region_claim_values(HeapRegion::ParVerifyClaimValue), |
tonyp@825 | 2367 | "sanity check"); |
tonyp@825 | 2368 | |
tonyp@825 | 2369 | reset_heap_region_claim_values(); |
tonyp@825 | 2370 | |
tonyp@825 | 2371 | assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue), |
tonyp@825 | 2372 | "sanity check"); |
tonyp@825 | 2373 | } else { |
tonyp@1246 | 2374 | VerifyRegionClosure blk(allow_dirty, false, use_prev_marking); |
tonyp@825 | 2375 | _hrs->iterate(&blk); |
tonyp@1455 | 2376 | if (blk.failures()) { |
tonyp@1455 | 2377 | failures = true; |
tonyp@1455 | 2378 | } |
tonyp@825 | 2379 | } |
ysr@777 | 2380 | if (!silent) gclog_or_tty->print("remset "); |
ysr@777 | 2381 | rem_set()->verify(); |
tonyp@1455 | 2382 | |
tonyp@1455 | 2383 | if (failures) { |
tonyp@1455 | 2384 | gclog_or_tty->print_cr("Heap:"); |
tonyp@1455 | 2385 | print_on(gclog_or_tty, true /* extended */); |
tonyp@1455 | 2386 | gclog_or_tty->print_cr(""); |
tonyp@1479 | 2387 | if (VerifyDuringGC && G1VerifyDuringGCPrintReachable) { |
tonyp@1479 | 2388 | concurrent_mark()->print_reachable(use_prev_marking, |
tonyp@1479 | 2389 | "failed-verification"); |
tonyp@1455 | 2390 | } |
tonyp@1455 | 2391 | gclog_or_tty->flush(); |
tonyp@1455 | 2392 | } |
tonyp@1455 | 2393 | guarantee(!failures, "there should not have been any failures"); |
ysr@777 | 2394 | } else { |
ysr@777 | 2395 | if (!silent) gclog_or_tty->print("(SKIPPING roots, heapRegions, remset) "); |
ysr@777 | 2396 | } |
ysr@777 | 2397 | } |
ysr@777 | 2398 | |
ysr@777 | 2399 | class PrintRegionClosure: public HeapRegionClosure { |
ysr@777 | 2400 | outputStream* _st; |
ysr@777 | 2401 | public: |
ysr@777 | 2402 | PrintRegionClosure(outputStream* st) : _st(st) {} |
ysr@777 | 2403 | bool doHeapRegion(HeapRegion* r) { |
ysr@777 | 2404 | r->print_on(_st); |
ysr@777 | 2405 | return false; |
ysr@777 | 2406 | } |
ysr@777 | 2407 | }; |
ysr@777 | 2408 | |
tonyp@1273 | 2409 | void G1CollectedHeap::print() const { print_on(tty); } |
ysr@777 | 2410 | |
ysr@777 | 2411 | void G1CollectedHeap::print_on(outputStream* st) const { |
tonyp@1273 | 2412 | print_on(st, PrintHeapAtGCExtended); |
tonyp@1273 | 2413 | } |
tonyp@1273 | 2414 | |
tonyp@1273 | 2415 | void G1CollectedHeap::print_on(outputStream* st, bool extended) const { |
tonyp@1273 | 2416 | st->print(" %-20s", "garbage-first heap"); |
tonyp@1273 | 2417 | st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K", |
tonyp@1281 | 2418 | capacity()/K, used_unlocked()/K); |
tonyp@1273 | 2419 | st->print(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")", |
tonyp@1273 | 2420 | _g1_storage.low_boundary(), |
tonyp@1273 | 2421 | _g1_storage.high(), |
tonyp@1273 | 2422 | _g1_storage.high_boundary()); |
tonyp@1273 | 2423 | st->cr(); |
tonyp@1273 | 2424 | st->print(" region size " SIZE_FORMAT "K, ", |
tonyp@1273 | 2425 | HeapRegion::GrainBytes/K); |
tonyp@1273 | 2426 | size_t young_regions = _young_list->length(); |
tonyp@1273 | 2427 | st->print(SIZE_FORMAT " young (" SIZE_FORMAT "K), ", |
tonyp@1273 | 2428 | young_regions, young_regions * HeapRegion::GrainBytes / K); |
tonyp@1273 | 2429 | size_t survivor_regions = g1_policy()->recorded_survivor_regions(); |
tonyp@1273 | 2430 | st->print(SIZE_FORMAT " survivors (" SIZE_FORMAT "K)", |
tonyp@1273 | 2431 | survivor_regions, survivor_regions * HeapRegion::GrainBytes / K); |
tonyp@1273 | 2432 | st->cr(); |
tonyp@1273 | 2433 | perm()->as_gen()->print_on(st); |
tonyp@1273 | 2434 | if (extended) { |
tonyp@1455 | 2435 | st->cr(); |
tonyp@1273 | 2436 | print_on_extended(st); |
tonyp@1273 | 2437 | } |
tonyp@1273 | 2438 | } |
tonyp@1273 | 2439 | |
tonyp@1273 | 2440 | void G1CollectedHeap::print_on_extended(outputStream* st) const { |
ysr@777 | 2441 | PrintRegionClosure blk(st); |
ysr@777 | 2442 | _hrs->iterate(&blk); |
ysr@777 | 2443 | } |
ysr@777 | 2444 | |
ysr@777 | 2445 | void G1CollectedHeap::print_gc_threads_on(outputStream* st) const { |
ysr@777 | 2446 | if (ParallelGCThreads > 0) { |
tonyp@1454 | 2447 | workers()->print_worker_threads_on(st); |
tonyp@1454 | 2448 | } |
tonyp@1454 | 2449 | |
tonyp@1454 | 2450 | _cmThread->print_on(st); |
ysr@777 | 2451 | st->cr(); |
tonyp@1454 | 2452 | |
tonyp@1454 | 2453 | _cm->print_worker_threads_on(st); |
tonyp@1454 | 2454 | |
tonyp@1454 | 2455 | _cg1r->print_worker_threads_on(st); |
tonyp@1454 | 2456 | |
ysr@777 | 2457 | _czft->print_on(st); |
ysr@777 | 2458 | st->cr(); |
ysr@777 | 2459 | } |
ysr@777 | 2460 | |
ysr@777 | 2461 | void G1CollectedHeap::gc_threads_do(ThreadClosure* tc) const { |
ysr@777 | 2462 | if (ParallelGCThreads > 0) { |
ysr@777 | 2463 | workers()->threads_do(tc); |
ysr@777 | 2464 | } |
ysr@777 | 2465 | tc->do_thread(_cmThread); |
iveresov@1229 | 2466 | _cg1r->threads_do(tc); |
ysr@777 | 2467 | tc->do_thread(_czft); |
ysr@777 | 2468 | } |
ysr@777 | 2469 | |
ysr@777 | 2470 | void G1CollectedHeap::print_tracing_info() const { |
ysr@777 | 2471 | // We'll overload this to mean "trace GC pause statistics." |
ysr@777 | 2472 | if (TraceGen0Time || TraceGen1Time) { |
ysr@777 | 2473 | // The "G1CollectorPolicy" is keeping track of these stats, so delegate |
ysr@777 | 2474 | // to that. |
ysr@777 | 2475 | g1_policy()->print_tracing_info(); |
ysr@777 | 2476 | } |
johnc@1186 | 2477 | if (G1SummarizeRSetStats) { |
ysr@777 | 2478 | g1_rem_set()->print_summary_info(); |
ysr@777 | 2479 | } |
johnc@1186 | 2480 | if (G1SummarizeConcurrentMark) { |
ysr@777 | 2481 | concurrent_mark()->print_summary_info(); |
ysr@777 | 2482 | } |
johnc@1186 | 2483 | if (G1SummarizeZFStats) { |
ysr@777 | 2484 | ConcurrentZFThread::print_summary_info(); |
ysr@777 | 2485 | } |
ysr@777 | 2486 | g1_policy()->print_yg_surv_rate_info(); |
ysr@777 | 2487 | |
ysr@777 | 2488 | SpecializationStats::print(); |
ysr@777 | 2489 | } |
ysr@777 | 2490 | |
ysr@777 | 2491 | |
ysr@777 | 2492 | int G1CollectedHeap::addr_to_arena_id(void* addr) const { |
ysr@777 | 2493 | HeapRegion* hr = heap_region_containing(addr); |
ysr@777 | 2494 | if (hr == NULL) { |
ysr@777 | 2495 | return 0; |
ysr@777 | 2496 | } else { |
ysr@777 | 2497 | return 1; |
ysr@777 | 2498 | } |
ysr@777 | 2499 | } |
ysr@777 | 2500 | |
ysr@777 | 2501 | G1CollectedHeap* G1CollectedHeap::heap() { |
ysr@777 | 2502 | assert(_sh->kind() == CollectedHeap::G1CollectedHeap, |
ysr@777 | 2503 | "not a garbage-first heap"); |
ysr@777 | 2504 | return _g1h; |
ysr@777 | 2505 | } |
ysr@777 | 2506 | |
ysr@777 | 2507 | void G1CollectedHeap::gc_prologue(bool full /* Ignored */) { |
ysr@777 | 2508 | assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer"); |
ysr@777 | 2509 | // Call allocation profiler |
ysr@777 | 2510 | AllocationProfiler::iterate_since_last_gc(); |
ysr@777 | 2511 | // Fill TLAB's and such |
ysr@777 | 2512 | ensure_parsability(true); |
ysr@777 | 2513 | } |
ysr@777 | 2514 | |
ysr@777 | 2515 | void G1CollectedHeap::gc_epilogue(bool full /* Ignored */) { |
ysr@777 | 2516 | // FIXME: what is this about? |
ysr@777 | 2517 | // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled" |
ysr@777 | 2518 | // is set. |
ysr@777 | 2519 | COMPILER2_PRESENT(assert(DerivedPointerTable::is_empty(), |
ysr@777 | 2520 | "derived pointer present")); |
ysr@777 | 2521 | } |
ysr@777 | 2522 | |
ysr@777 | 2523 | void G1CollectedHeap::do_collection_pause() { |
ysr@777 | 2524 | // Read the GC count while holding the Heap_lock |
ysr@777 | 2525 | // we need to do this _before_ wait_for_cleanup_complete(), to |
ysr@777 | 2526 | // ensure that we do not give up the heap lock and potentially |
ysr@777 | 2527 | // pick up the wrong count |
ysr@777 | 2528 | int gc_count_before = SharedHeap::heap()->total_collections(); |
ysr@777 | 2529 | |
ysr@777 | 2530 | // Don't want to do a GC pause while cleanup is being completed! |
ysr@777 | 2531 | wait_for_cleanup_complete(); |
ysr@777 | 2532 | |
ysr@777 | 2533 | g1_policy()->record_stop_world_start(); |
ysr@777 | 2534 | { |
ysr@777 | 2535 | MutexUnlocker mu(Heap_lock); // give up heap lock, execute gets it back |
ysr@777 | 2536 | VM_G1IncCollectionPause op(gc_count_before); |
ysr@777 | 2537 | VMThread::execute(&op); |
ysr@777 | 2538 | } |
ysr@777 | 2539 | } |
ysr@777 | 2540 | |
ysr@777 | 2541 | void |
ysr@777 | 2542 | G1CollectedHeap::doConcurrentMark() { |
ysr@1280 | 2543 | MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag); |
ysr@1280 | 2544 | if (!_cmThread->in_progress()) { |
ysr@1280 | 2545 | _cmThread->set_started(); |
ysr@1280 | 2546 | CGC_lock->notify(); |
ysr@777 | 2547 | } |
ysr@777 | 2548 | } |
ysr@777 | 2549 | |
ysr@777 | 2550 | class VerifyMarkedObjsClosure: public ObjectClosure { |
ysr@777 | 2551 | G1CollectedHeap* _g1h; |
ysr@777 | 2552 | public: |
ysr@777 | 2553 | VerifyMarkedObjsClosure(G1CollectedHeap* g1h) : _g1h(g1h) {} |
ysr@777 | 2554 | void do_object(oop obj) { |
ysr@777 | 2555 | assert(obj->mark()->is_marked() ? !_g1h->is_obj_dead(obj) : true, |
ysr@777 | 2556 | "markandsweep mark should agree with concurrent deadness"); |
ysr@777 | 2557 | } |
ysr@777 | 2558 | }; |
ysr@777 | 2559 | |
ysr@777 | 2560 | void |
ysr@777 | 2561 | G1CollectedHeap::checkConcurrentMark() { |
ysr@777 | 2562 | VerifyMarkedObjsClosure verifycl(this); |
ysr@777 | 2563 | // MutexLockerEx x(getMarkBitMapLock(), |
ysr@777 | 2564 | // Mutex::_no_safepoint_check_flag); |
iveresov@1113 | 2565 | object_iterate(&verifycl, false); |
ysr@777 | 2566 | } |
ysr@777 | 2567 | |
ysr@777 | 2568 | void G1CollectedHeap::do_sync_mark() { |
ysr@777 | 2569 | _cm->checkpointRootsInitial(); |
ysr@777 | 2570 | _cm->markFromRoots(); |
ysr@777 | 2571 | _cm->checkpointRootsFinal(false); |
ysr@777 | 2572 | } |
ysr@777 | 2573 | |
ysr@777 | 2574 | // <NEW PREDICTION> |
ysr@777 | 2575 | |
ysr@777 | 2576 | double G1CollectedHeap::predict_region_elapsed_time_ms(HeapRegion *hr, |
ysr@777 | 2577 | bool young) { |
ysr@777 | 2578 | return _g1_policy->predict_region_elapsed_time_ms(hr, young); |
ysr@777 | 2579 | } |
ysr@777 | 2580 | |
ysr@777 | 2581 | void G1CollectedHeap::check_if_region_is_too_expensive(double |
ysr@777 | 2582 | predicted_time_ms) { |
ysr@777 | 2583 | _g1_policy->check_if_region_is_too_expensive(predicted_time_ms); |
ysr@777 | 2584 | } |
ysr@777 | 2585 | |
ysr@777 | 2586 | size_t G1CollectedHeap::pending_card_num() { |
ysr@777 | 2587 | size_t extra_cards = 0; |
ysr@777 | 2588 | JavaThread *curr = Threads::first(); |
ysr@777 | 2589 | while (curr != NULL) { |
ysr@777 | 2590 | DirtyCardQueue& dcq = curr->dirty_card_queue(); |
ysr@777 | 2591 | extra_cards += dcq.size(); |
ysr@777 | 2592 | curr = curr->next(); |
ysr@777 | 2593 | } |
ysr@777 | 2594 | DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); |
ysr@777 | 2595 | size_t buffer_size = dcqs.buffer_size(); |
ysr@777 | 2596 | size_t buffer_num = dcqs.completed_buffers_num(); |
ysr@777 | 2597 | return buffer_size * buffer_num + extra_cards; |
ysr@777 | 2598 | } |
ysr@777 | 2599 | |
ysr@777 | 2600 | size_t G1CollectedHeap::max_pending_card_num() { |
ysr@777 | 2601 | DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); |
ysr@777 | 2602 | size_t buffer_size = dcqs.buffer_size(); |
ysr@777 | 2603 | size_t buffer_num = dcqs.completed_buffers_num(); |
ysr@777 | 2604 | int thread_num = Threads::number_of_threads(); |
ysr@777 | 2605 | return (buffer_num + thread_num) * buffer_size; |
ysr@777 | 2606 | } |
ysr@777 | 2607 | |
ysr@777 | 2608 | size_t G1CollectedHeap::cards_scanned() { |
ysr@777 | 2609 | HRInto_G1RemSet* g1_rset = (HRInto_G1RemSet*) g1_rem_set(); |
ysr@777 | 2610 | return g1_rset->cardsScanned(); |
ysr@777 | 2611 | } |
ysr@777 | 2612 | |
ysr@777 | 2613 | void |
ysr@777 | 2614 | G1CollectedHeap::setup_surviving_young_words() { |
ysr@777 | 2615 | guarantee( _surviving_young_words == NULL, "pre-condition" ); |
ysr@777 | 2616 | size_t array_length = g1_policy()->young_cset_length(); |
ysr@777 | 2617 | _surviving_young_words = NEW_C_HEAP_ARRAY(size_t, array_length); |
ysr@777 | 2618 | if (_surviving_young_words == NULL) { |
ysr@777 | 2619 | vm_exit_out_of_memory(sizeof(size_t) * array_length, |
ysr@777 | 2620 | "Not enough space for young surv words summary."); |
ysr@777 | 2621 | } |
ysr@777 | 2622 | memset(_surviving_young_words, 0, array_length * sizeof(size_t)); |
ysr@1280 | 2623 | #ifdef ASSERT |
ysr@777 | 2624 | for (size_t i = 0; i < array_length; ++i) { |
ysr@1280 | 2625 | assert( _surviving_young_words[i] == 0, "memset above" ); |
ysr@1280 | 2626 | } |
ysr@1280 | 2627 | #endif // !ASSERT |
ysr@777 | 2628 | } |
ysr@777 | 2629 | |
ysr@777 | 2630 | void |
ysr@777 | 2631 | G1CollectedHeap::update_surviving_young_words(size_t* surv_young_words) { |
ysr@777 | 2632 | MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); |
ysr@777 | 2633 | size_t array_length = g1_policy()->young_cset_length(); |
ysr@777 | 2634 | for (size_t i = 0; i < array_length; ++i) |
ysr@777 | 2635 | _surviving_young_words[i] += surv_young_words[i]; |
ysr@777 | 2636 | } |
ysr@777 | 2637 | |
ysr@777 | 2638 | void |
ysr@777 | 2639 | G1CollectedHeap::cleanup_surviving_young_words() { |
ysr@777 | 2640 | guarantee( _surviving_young_words != NULL, "pre-condition" ); |
ysr@777 | 2641 | FREE_C_HEAP_ARRAY(size_t, _surviving_young_words); |
ysr@777 | 2642 | _surviving_young_words = NULL; |
ysr@777 | 2643 | } |
ysr@777 | 2644 | |
ysr@777 | 2645 | // </NEW PREDICTION> |
ysr@777 | 2646 | |
ysr@777 | 2647 | void |
apetrusenko@1112 | 2648 | G1CollectedHeap::do_collection_pause_at_safepoint() { |
tonyp@1273 | 2649 | if (PrintHeapAtGC) { |
tonyp@1273 | 2650 | Universe::print_heap_before_gc(); |
tonyp@1273 | 2651 | } |
tonyp@1273 | 2652 | |
tonyp@1273 | 2653 | { |
tonyp@1524 | 2654 | ResourceMark rm; |
tonyp@1524 | 2655 | |
tonyp@1273 | 2656 | char verbose_str[128]; |
tonyp@1273 | 2657 | sprintf(verbose_str, "GC pause "); |
tonyp@1273 | 2658 | if (g1_policy()->in_young_gc_mode()) { |
tonyp@1273 | 2659 | if (g1_policy()->full_young_gcs()) |
tonyp@1273 | 2660 | strcat(verbose_str, "(young)"); |
tonyp@1273 | 2661 | else |
tonyp@1273 | 2662 | strcat(verbose_str, "(partial)"); |
tonyp@1273 | 2663 | } |
tonyp@1273 | 2664 | if (g1_policy()->should_initiate_conc_mark()) |
tonyp@1273 | 2665 | strcat(verbose_str, " (initial-mark)"); |
tonyp@1273 | 2666 | |
tonyp@1273 | 2667 | // if PrintGCDetails is on, we'll print long statistics information |
tonyp@1273 | 2668 | // in the collector policy code, so let's not print this as the output |
tonyp@1273 | 2669 | // is messy if we do. |
tonyp@1273 | 2670 | gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); |
tonyp@1273 | 2671 | TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); |
tonyp@1273 | 2672 | TraceTime t(verbose_str, PrintGC && !PrintGCDetails, true, gclog_or_tty); |
tonyp@1273 | 2673 | |
tonyp@1524 | 2674 | TraceMemoryManagerStats tms(false /* fullGC */); |
tonyp@1524 | 2675 | |
tonyp@1273 | 2676 | assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); |
tonyp@1273 | 2677 | assert(Thread::current() == VMThread::vm_thread(), "should be in vm thread"); |
tonyp@1273 | 2678 | guarantee(!is_gc_active(), "collection is not reentrant"); |
tonyp@1273 | 2679 | assert(regions_accounted_for(), "Region leakage!"); |
tonyp@1273 | 2680 | |
tonyp@1273 | 2681 | increment_gc_time_stamp(); |
tonyp@1273 | 2682 | |
tonyp@1273 | 2683 | if (g1_policy()->in_young_gc_mode()) { |
tonyp@1273 | 2684 | assert(check_young_list_well_formed(), |
tonyp@1273 | 2685 | "young list should be well formed"); |
tonyp@1273 | 2686 | } |
tonyp@1273 | 2687 | |
tonyp@1273 | 2688 | if (GC_locker::is_active()) { |
tonyp@1273 | 2689 | return; // GC is disabled (e.g. JNI GetXXXCritical operation) |
tonyp@1273 | 2690 | } |
tonyp@1273 | 2691 | |
tonyp@1273 | 2692 | bool abandoned = false; |
tonyp@1273 | 2693 | { // Call to jvmpi::post_class_unload_events must occur outside of active GC |
tonyp@1273 | 2694 | IsGCActiveMark x; |
tonyp@1273 | 2695 | |
tonyp@1273 | 2696 | gc_prologue(false); |
tonyp@1273 | 2697 | increment_total_collections(false /* full gc */); |
ysr@777 | 2698 | |
ysr@777 | 2699 | #if G1_REM_SET_LOGGING |
tonyp@1273 | 2700 | gclog_or_tty->print_cr("\nJust chose CS, heap:"); |
ysr@777 | 2701 | print(); |
ysr@777 | 2702 | #endif |
ysr@777 | 2703 | |
tonyp@1273 | 2704 | if (VerifyBeforeGC && total_collections() >= VerifyGCStartAt) { |
tonyp@1273 | 2705 | HandleMark hm; // Discard invalid handles created during verification |
tonyp@1273 | 2706 | prepare_for_verify(); |
tonyp@1273 | 2707 | gclog_or_tty->print(" VerifyBeforeGC:"); |
tonyp@1273 | 2708 | Universe::verify(false); |
tonyp@1273 | 2709 | } |
tonyp@1273 | 2710 | |
tonyp@1273 | 2711 | COMPILER2_PRESENT(DerivedPointerTable::clear()); |
tonyp@1273 | 2712 | |
tonyp@1273 | 2713 | // We want to turn off ref discovery, if necessary, and turn it back on |
ysr@1280 | 2714 | // on again later if we do. XXX Dubious: why is discovery disabled? |
tonyp@1273 | 2715 | bool was_enabled = ref_processor()->discovery_enabled(); |
tonyp@1273 | 2716 | if (was_enabled) ref_processor()->disable_discovery(); |
tonyp@1273 | 2717 | |
tonyp@1273 | 2718 | // Forget the current alloc region (we might even choose it to be part |
tonyp@1273 | 2719 | // of the collection set!). |
tonyp@1273 | 2720 | abandon_cur_alloc_region(); |
tonyp@1273 | 2721 | |
tonyp@1273 | 2722 | // The elapsed time induced by the start time below deliberately elides |
tonyp@1273 | 2723 | // the possible verification above. |
tonyp@1273 | 2724 | double start_time_sec = os::elapsedTime(); |
tonyp@1273 | 2725 | size_t start_used_bytes = used(); |
tonyp@1273 | 2726 | |
tonyp@1273 | 2727 | g1_policy()->record_collection_pause_start(start_time_sec, |
tonyp@1273 | 2728 | start_used_bytes); |
tonyp@1273 | 2729 | |
tonyp@1273 | 2730 | guarantee(_in_cset_fast_test == NULL, "invariant"); |
tonyp@1273 | 2731 | guarantee(_in_cset_fast_test_base == NULL, "invariant"); |
tonyp@1273 | 2732 | _in_cset_fast_test_length = max_regions(); |
tonyp@1273 | 2733 | _in_cset_fast_test_base = |
tonyp@1273 | 2734 | NEW_C_HEAP_ARRAY(bool, _in_cset_fast_test_length); |
tonyp@1273 | 2735 | memset(_in_cset_fast_test_base, false, |
tonyp@1273 | 2736 | _in_cset_fast_test_length * sizeof(bool)); |
tonyp@1273 | 2737 | // We're biasing _in_cset_fast_test to avoid subtracting the |
tonyp@1273 | 2738 | // beginning of the heap every time we want to index; basically |
tonyp@1273 | 2739 | // it's the same with what we do with the card table. |
tonyp@1273 | 2740 | _in_cset_fast_test = _in_cset_fast_test_base - |
tonyp@1273 | 2741 | ((size_t) _g1_reserved.start() >> HeapRegion::LogOfHRGrainBytes); |
ysr@777 | 2742 | |
ysr@777 | 2743 | #if SCAN_ONLY_VERBOSE |
tonyp@1273 | 2744 | _young_list->print(); |
ysr@777 | 2745 | #endif // SCAN_ONLY_VERBOSE |
ysr@777 | 2746 | |
tonyp@1273 | 2747 | if (g1_policy()->should_initiate_conc_mark()) { |
tonyp@1273 | 2748 | concurrent_mark()->checkpointRootsInitialPre(); |
ysr@777 | 2749 | } |
tonyp@1273 | 2750 | save_marks(); |
tonyp@1273 | 2751 | |
tonyp@1273 | 2752 | // We must do this before any possible evacuation that should propagate |
tonyp@1273 | 2753 | // marks. |
tonyp@1273 | 2754 | if (mark_in_progress()) { |
tonyp@1273 | 2755 | double start_time_sec = os::elapsedTime(); |
tonyp@1273 | 2756 | |
tonyp@1273 | 2757 | _cm->drainAllSATBBuffers(); |
tonyp@1273 | 2758 | double finish_mark_ms = (os::elapsedTime() - start_time_sec) * 1000.0; |
tonyp@1273 | 2759 | g1_policy()->record_satb_drain_time(finish_mark_ms); |
tonyp@1273 | 2760 | } |
tonyp@1273 | 2761 | // Record the number of elements currently on the mark stack, so we |
tonyp@1273 | 2762 | // only iterate over these. (Since evacuation may add to the mark |
tonyp@1273 | 2763 | // stack, doing more exposes race conditions.) If no mark is in |
tonyp@1273 | 2764 | // progress, this will be zero. |
tonyp@1273 | 2765 | _cm->set_oops_do_bound(); |
tonyp@1273 | 2766 | |
tonyp@1273 | 2767 | assert(regions_accounted_for(), "Region leakage."); |
tonyp@1273 | 2768 | |
tonyp@1273 | 2769 | if (mark_in_progress()) |
tonyp@1273 | 2770 | concurrent_mark()->newCSet(); |
tonyp@1273 | 2771 | |
tonyp@1273 | 2772 | // Now choose the CS. |
tonyp@1273 | 2773 | g1_policy()->choose_collection_set(); |
tonyp@1273 | 2774 | |
tonyp@1273 | 2775 | // We may abandon a pause if we find no region that will fit in the MMU |
tonyp@1273 | 2776 | // pause. |
tonyp@1273 | 2777 | bool abandoned = (g1_policy()->collection_set() == NULL); |
tonyp@1273 | 2778 | |
tonyp@1273 | 2779 | // Nothing to do if we were unable to choose a collection set. |
tonyp@1273 | 2780 | if (!abandoned) { |
tonyp@1273 | 2781 | #if G1_REM_SET_LOGGING |
tonyp@1273 | 2782 | gclog_or_tty->print_cr("\nAfter pause, heap:"); |
tonyp@1273 | 2783 | print(); |
tonyp@1273 | 2784 | #endif |
tonyp@1273 | 2785 | |
tonyp@1273 | 2786 | setup_surviving_young_words(); |
tonyp@1273 | 2787 | |
tonyp@1273 | 2788 | // Set up the gc allocation regions. |
tonyp@1273 | 2789 | get_gc_alloc_regions(); |
tonyp@1273 | 2790 | |
tonyp@1273 | 2791 | // Actually do the work... |
tonyp@1273 | 2792 | evacuate_collection_set(); |
tonyp@1273 | 2793 | free_collection_set(g1_policy()->collection_set()); |
tonyp@1273 | 2794 | g1_policy()->clear_collection_set(); |
tonyp@1273 | 2795 | |
tonyp@1273 | 2796 | FREE_C_HEAP_ARRAY(bool, _in_cset_fast_test_base); |
tonyp@1273 | 2797 | // this is more for peace of mind; we're nulling them here and |
tonyp@1273 | 2798 | // we're expecting them to be null at the beginning of the next GC |
tonyp@1273 | 2799 | _in_cset_fast_test = NULL; |
tonyp@1273 | 2800 | _in_cset_fast_test_base = NULL; |
tonyp@1273 | 2801 | |
tonyp@1273 | 2802 | cleanup_surviving_young_words(); |
tonyp@1273 | 2803 | |
tonyp@1273 | 2804 | if (g1_policy()->in_young_gc_mode()) { |
tonyp@1273 | 2805 | _young_list->reset_sampled_info(); |
tonyp@1273 | 2806 | assert(check_young_list_empty(true), |
tonyp@1273 | 2807 | "young list should be empty"); |
tonyp@1273 | 2808 | |
tonyp@1273 | 2809 | #if SCAN_ONLY_VERBOSE |
tonyp@1273 | 2810 | _young_list->print(); |
tonyp@1273 | 2811 | #endif // SCAN_ONLY_VERBOSE |
tonyp@1273 | 2812 | |
tonyp@1273 | 2813 | g1_policy()->record_survivor_regions(_young_list->survivor_length(), |
tonyp@1273 | 2814 | _young_list->first_survivor_region(), |
tonyp@1273 | 2815 | _young_list->last_survivor_region()); |
tonyp@1273 | 2816 | _young_list->reset_auxilary_lists(); |
tonyp@1273 | 2817 | } |
tonyp@1273 | 2818 | } else { |
ysr@1523 | 2819 | if (_in_cset_fast_test != NULL) { |
ysr@1523 | 2820 | assert(_in_cset_fast_test_base != NULL, "Since _in_cset_fast_test isn't"); |
ysr@1523 | 2821 | FREE_C_HEAP_ARRAY(bool, _in_cset_fast_test_base); |
ysr@1523 | 2822 | // this is more for peace of mind; we're nulling them here and |
ysr@1523 | 2823 | // we're expecting them to be null at the beginning of the next GC |
ysr@1523 | 2824 | _in_cset_fast_test = NULL; |
ysr@1523 | 2825 | _in_cset_fast_test_base = NULL; |
ysr@1523 | 2826 | } |
ysr@1523 | 2827 | // This looks confusing, because the DPT should really be empty |
ysr@1523 | 2828 | // at this point -- since we have not done any collection work, |
ysr@1523 | 2829 | // there should not be any derived pointers in the table to update; |
ysr@1523 | 2830 | // however, there is some additional state in the DPT which is |
ysr@1523 | 2831 | // reset at the end of the (null) "gc" here via the following call. |
ysr@1523 | 2832 | // A better approach might be to split off that state resetting work |
ysr@1523 | 2833 | // into a separate method that asserts that the DPT is empty and call |
ysr@1523 | 2834 | // that here. That is deferred for now. |
tonyp@1273 | 2835 | COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); |
tonyp@1273 | 2836 | } |
tonyp@1273 | 2837 | |
tonyp@1273 | 2838 | if (evacuation_failed()) { |
tonyp@1273 | 2839 | _summary_bytes_used = recalculate_used(); |
tonyp@1273 | 2840 | } else { |
tonyp@1273 | 2841 | // The "used" of the the collection set have already been subtracted |
tonyp@1273 | 2842 | // when they were freed. Add in the bytes evacuated. |
tonyp@1273 | 2843 | _summary_bytes_used += g1_policy()->bytes_in_to_space(); |
tonyp@1273 | 2844 | } |
tonyp@1273 | 2845 | |
tonyp@1273 | 2846 | if (g1_policy()->in_young_gc_mode() && |
tonyp@1273 | 2847 | g1_policy()->should_initiate_conc_mark()) { |
tonyp@1273 | 2848 | concurrent_mark()->checkpointRootsInitialPost(); |
tonyp@1273 | 2849 | set_marking_started(); |
ysr@1280 | 2850 | // CAUTION: after the doConcurrentMark() call below, |
ysr@1280 | 2851 | // the concurrent marking thread(s) could be running |
ysr@1280 | 2852 | // concurrently with us. Make sure that anything after |
ysr@1280 | 2853 | // this point does not assume that we are the only GC thread |
ysr@1280 | 2854 | // running. Note: of course, the actual marking work will |
ysr@1280 | 2855 | // not start until the safepoint itself is released in |
ysr@1280 | 2856 | // ConcurrentGCThread::safepoint_desynchronize(). |
tonyp@1273 | 2857 | doConcurrentMark(); |
tonyp@1273 | 2858 | } |
tonyp@1273 | 2859 | |
tonyp@1273 | 2860 | #if SCAN_ONLY_VERBOSE |
tonyp@1273 | 2861 | _young_list->print(); |
tonyp@1273 | 2862 | #endif // SCAN_ONLY_VERBOSE |
tonyp@1273 | 2863 | |
tonyp@1273 | 2864 | double end_time_sec = os::elapsedTime(); |
tonyp@1273 | 2865 | double pause_time_ms = (end_time_sec - start_time_sec) * MILLIUNITS; |
tonyp@1273 | 2866 | g1_policy()->record_pause_time_ms(pause_time_ms); |
tonyp@1273 | 2867 | g1_policy()->record_collection_pause_end(abandoned); |
tonyp@1273 | 2868 | |
tonyp@1273 | 2869 | assert(regions_accounted_for(), "Region leakage."); |
tonyp@1273 | 2870 | |
tonyp@1524 | 2871 | MemoryService::track_memory_usage(); |
tonyp@1524 | 2872 | |
tonyp@1273 | 2873 | if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) { |
tonyp@1273 | 2874 | HandleMark hm; // Discard invalid handles created during verification |
tonyp@1273 | 2875 | gclog_or_tty->print(" VerifyAfterGC:"); |
tonyp@1273 | 2876 | prepare_for_verify(); |
tonyp@1273 | 2877 | Universe::verify(false); |
tonyp@1273 | 2878 | } |
tonyp@1273 | 2879 | |
tonyp@1273 | 2880 | if (was_enabled) ref_processor()->enable_discovery(); |
tonyp@1273 | 2881 | |
tonyp@1273 | 2882 | { |
tonyp@1273 | 2883 | size_t expand_bytes = g1_policy()->expansion_amount(); |
tonyp@1273 | 2884 | if (expand_bytes > 0) { |
tonyp@1273 | 2885 | size_t bytes_before = capacity(); |
tonyp@1273 | 2886 | expand(expand_bytes); |
tonyp@1273 | 2887 | } |
tonyp@1273 | 2888 | } |
tonyp@1273 | 2889 | |
tonyp@1273 | 2890 | if (mark_in_progress()) { |
tonyp@1273 | 2891 | concurrent_mark()->update_g1_committed(); |
tonyp@1273 | 2892 | } |
tonyp@1273 | 2893 | |
tonyp@1273 | 2894 | #ifdef TRACESPINNING |
tonyp@1273 | 2895 | ParallelTaskTerminator::print_termination_counts(); |
tonyp@1273 | 2896 | #endif |
tonyp@1273 | 2897 | |
tonyp@1273 | 2898 | gc_epilogue(false); |
ysr@777 | 2899 | } |
ysr@777 | 2900 | |
tonyp@1273 | 2901 | assert(verify_region_lists(), "Bad region lists."); |
tonyp@1273 | 2902 | |
tonyp@1273 | 2903 | if (ExitAfterGCNum > 0 && total_collections() == ExitAfterGCNum) { |
tonyp@1273 | 2904 | gclog_or_tty->print_cr("Stopping after GC #%d", ExitAfterGCNum); |
tonyp@1273 | 2905 | print_tracing_info(); |
tonyp@1273 | 2906 | vm_exit(-1); |
ysr@777 | 2907 | } |
tonyp@1273 | 2908 | } |
tonyp@1273 | 2909 | |
tonyp@1273 | 2910 | if (PrintHeapAtGC) { |
tonyp@1273 | 2911 | Universe::print_heap_after_gc(); |
ysr@777 | 2912 | } |
tonyp@1319 | 2913 | if (G1SummarizeRSetStats && |
tonyp@1319 | 2914 | (G1SummarizeRSetStatsPeriod > 0) && |
tonyp@1319 | 2915 | (total_collections() % G1SummarizeRSetStatsPeriod == 0)) { |
tonyp@1319 | 2916 | g1_rem_set()->print_summary_info(); |
tonyp@1319 | 2917 | } |
ysr@777 | 2918 | } |
ysr@777 | 2919 | |
ysr@777 | 2920 | void G1CollectedHeap::set_gc_alloc_region(int purpose, HeapRegion* r) { |
ysr@777 | 2921 | assert(purpose >= 0 && purpose < GCAllocPurposeCount, "invalid purpose"); |
tonyp@1071 | 2922 | // make sure we don't call set_gc_alloc_region() multiple times on |
tonyp@1071 | 2923 | // the same region |
tonyp@1071 | 2924 | assert(r == NULL || !r->is_gc_alloc_region(), |
tonyp@1071 | 2925 | "shouldn't already be a GC alloc region"); |
ysr@777 | 2926 | HeapWord* original_top = NULL; |
ysr@777 | 2927 | if (r != NULL) |
ysr@777 | 2928 | original_top = r->top(); |
ysr@777 | 2929 | |
ysr@777 | 2930 | // We will want to record the used space in r as being there before gc. |
ysr@777 | 2931 | // One we install it as a GC alloc region it's eligible for allocation. |
ysr@777 | 2932 | // So record it now and use it later. |
ysr@777 | 2933 | size_t r_used = 0; |
ysr@777 | 2934 | if (r != NULL) { |
ysr@777 | 2935 | r_used = r->used(); |
ysr@777 | 2936 | |
ysr@777 | 2937 | if (ParallelGCThreads > 0) { |
ysr@777 | 2938 | // need to take the lock to guard against two threads calling |
ysr@777 | 2939 | // get_gc_alloc_region concurrently (very unlikely but...) |
ysr@777 | 2940 | MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); |
ysr@777 | 2941 | r->save_marks(); |
ysr@777 | 2942 | } |
ysr@777 | 2943 | } |
ysr@777 | 2944 | HeapRegion* old_alloc_region = _gc_alloc_regions[purpose]; |
ysr@777 | 2945 | _gc_alloc_regions[purpose] = r; |
ysr@777 | 2946 | if (old_alloc_region != NULL) { |
ysr@777 | 2947 | // Replace aliases too. |
ysr@777 | 2948 | for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { |
ysr@777 | 2949 | if (_gc_alloc_regions[ap] == old_alloc_region) { |
ysr@777 | 2950 | _gc_alloc_regions[ap] = r; |
ysr@777 | 2951 | } |
ysr@777 | 2952 | } |
ysr@777 | 2953 | } |
ysr@777 | 2954 | if (r != NULL) { |
ysr@777 | 2955 | push_gc_alloc_region(r); |
ysr@777 | 2956 | if (mark_in_progress() && original_top != r->next_top_at_mark_start()) { |
ysr@777 | 2957 | // We are using a region as a GC alloc region after it has been used |
ysr@777 | 2958 | // as a mutator allocation region during the current marking cycle. |
ysr@777 | 2959 | // The mutator-allocated objects are currently implicitly marked, but |
ysr@777 | 2960 | // when we move hr->next_top_at_mark_start() forward at the the end |
ysr@777 | 2961 | // of the GC pause, they won't be. We therefore mark all objects in |
ysr@777 | 2962 | // the "gap". We do this object-by-object, since marking densely |
ysr@777 | 2963 | // does not currently work right with marking bitmap iteration. This |
ysr@777 | 2964 | // means we rely on TLAB filling at the start of pauses, and no |
ysr@777 | 2965 | // "resuscitation" of filled TLAB's. If we want to do this, we need |
ysr@777 | 2966 | // to fix the marking bitmap iteration. |
ysr@777 | 2967 | HeapWord* curhw = r->next_top_at_mark_start(); |
ysr@777 | 2968 | HeapWord* t = original_top; |
ysr@777 | 2969 | |
ysr@777 | 2970 | while (curhw < t) { |
ysr@777 | 2971 | oop cur = (oop)curhw; |
ysr@777 | 2972 | // We'll assume parallel for generality. This is rare code. |
ysr@777 | 2973 | concurrent_mark()->markAndGrayObjectIfNecessary(cur); // can't we just mark them? |
ysr@777 | 2974 | curhw = curhw + cur->size(); |
ysr@777 | 2975 | } |
ysr@777 | 2976 | assert(curhw == t, "Should have parsed correctly."); |
ysr@777 | 2977 | } |
ysr@777 | 2978 | if (G1PolicyVerbose > 1) { |
ysr@777 | 2979 | gclog_or_tty->print("New alloc region ["PTR_FORMAT", "PTR_FORMAT", " PTR_FORMAT") " |
ysr@777 | 2980 | "for survivors:", r->bottom(), original_top, r->end()); |
ysr@777 | 2981 | r->print(); |
ysr@777 | 2982 | } |
ysr@777 | 2983 | g1_policy()->record_before_bytes(r_used); |
ysr@777 | 2984 | } |
ysr@777 | 2985 | } |
ysr@777 | 2986 | |
ysr@777 | 2987 | void G1CollectedHeap::push_gc_alloc_region(HeapRegion* hr) { |
ysr@777 | 2988 | assert(Thread::current()->is_VM_thread() || |
ysr@777 | 2989 | par_alloc_during_gc_lock()->owned_by_self(), "Precondition"); |
ysr@777 | 2990 | assert(!hr->is_gc_alloc_region() && !hr->in_collection_set(), |
ysr@777 | 2991 | "Precondition."); |
ysr@777 | 2992 | hr->set_is_gc_alloc_region(true); |
ysr@777 | 2993 | hr->set_next_gc_alloc_region(_gc_alloc_region_list); |
ysr@777 | 2994 | _gc_alloc_region_list = hr; |
ysr@777 | 2995 | } |
ysr@777 | 2996 | |
ysr@777 | 2997 | #ifdef G1_DEBUG |
ysr@777 | 2998 | class FindGCAllocRegion: public HeapRegionClosure { |
ysr@777 | 2999 | public: |
ysr@777 | 3000 | bool doHeapRegion(HeapRegion* r) { |
ysr@777 | 3001 | if (r->is_gc_alloc_region()) { |
ysr@777 | 3002 | gclog_or_tty->print_cr("Region %d ["PTR_FORMAT"...] is still a gc_alloc_region.", |
ysr@777 | 3003 | r->hrs_index(), r->bottom()); |
ysr@777 | 3004 | } |
ysr@777 | 3005 | return false; |
ysr@777 | 3006 | } |
ysr@777 | 3007 | }; |
ysr@777 | 3008 | #endif // G1_DEBUG |
ysr@777 | 3009 | |
ysr@777 | 3010 | void G1CollectedHeap::forget_alloc_region_list() { |
ysr@777 | 3011 | assert(Thread::current()->is_VM_thread(), "Precondition"); |
ysr@777 | 3012 | while (_gc_alloc_region_list != NULL) { |
ysr@777 | 3013 | HeapRegion* r = _gc_alloc_region_list; |
ysr@777 | 3014 | assert(r->is_gc_alloc_region(), "Invariant."); |
iveresov@1072 | 3015 | // We need HeapRegion::oops_on_card_seq_iterate_careful() to work on |
iveresov@1072 | 3016 | // newly allocated data in order to be able to apply deferred updates |
iveresov@1072 | 3017 | // before the GC is done for verification purposes (i.e to allow |
iveresov@1072 | 3018 | // G1HRRSFlushLogBuffersOnVerify). It's safe thing to do after the |
iveresov@1072 | 3019 | // collection. |
iveresov@1072 | 3020 | r->ContiguousSpace::set_saved_mark(); |
ysr@777 | 3021 | _gc_alloc_region_list = r->next_gc_alloc_region(); |
ysr@777 | 3022 | r->set_next_gc_alloc_region(NULL); |
ysr@777 | 3023 | r->set_is_gc_alloc_region(false); |
apetrusenko@980 | 3024 | if (r->is_survivor()) { |
apetrusenko@980 | 3025 | if (r->is_empty()) { |
apetrusenko@980 | 3026 | r->set_not_young(); |
apetrusenko@980 | 3027 | } else { |
apetrusenko@980 | 3028 | _young_list->add_survivor_region(r); |
apetrusenko@980 | 3029 | } |
apetrusenko@980 | 3030 | } |
ysr@777 | 3031 | if (r->is_empty()) { |
ysr@777 | 3032 | ++_free_regions; |
ysr@777 | 3033 | } |
ysr@777 | 3034 | } |
ysr@777 | 3035 | #ifdef G1_DEBUG |
ysr@777 | 3036 | FindGCAllocRegion fa; |
ysr@777 | 3037 | heap_region_iterate(&fa); |
ysr@777 | 3038 | #endif // G1_DEBUG |
ysr@777 | 3039 | } |
ysr@777 | 3040 | |
ysr@777 | 3041 | |
ysr@777 | 3042 | bool G1CollectedHeap::check_gc_alloc_regions() { |
ysr@777 | 3043 | // TODO: allocation regions check |
ysr@777 | 3044 | return true; |
ysr@777 | 3045 | } |
ysr@777 | 3046 | |
ysr@777 | 3047 | void G1CollectedHeap::get_gc_alloc_regions() { |
tonyp@1071 | 3048 | // First, let's check that the GC alloc region list is empty (it should) |
tonyp@1071 | 3049 | assert(_gc_alloc_region_list == NULL, "invariant"); |
tonyp@1071 | 3050 | |
ysr@777 | 3051 | for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { |
tonyp@1071 | 3052 | assert(_gc_alloc_regions[ap] == NULL, "invariant"); |
apetrusenko@1296 | 3053 | assert(_gc_alloc_region_counts[ap] == 0, "invariant"); |
tonyp@1071 | 3054 | |
ysr@777 | 3055 | // Create new GC alloc regions. |
tonyp@1071 | 3056 | HeapRegion* alloc_region = _retained_gc_alloc_regions[ap]; |
tonyp@1071 | 3057 | _retained_gc_alloc_regions[ap] = NULL; |
tonyp@1071 | 3058 | |
tonyp@1071 | 3059 | if (alloc_region != NULL) { |
tonyp@1071 | 3060 | assert(_retain_gc_alloc_region[ap], "only way to retain a GC region"); |
tonyp@1071 | 3061 | |
tonyp@1071 | 3062 | // let's make sure that the GC alloc region is not tagged as such |
tonyp@1071 | 3063 | // outside a GC operation |
tonyp@1071 | 3064 | assert(!alloc_region->is_gc_alloc_region(), "sanity"); |
tonyp@1071 | 3065 | |
tonyp@1071 | 3066 | if (alloc_region->in_collection_set() || |
tonyp@1071 | 3067 | alloc_region->top() == alloc_region->end() || |
tonyp@1071 | 3068 | alloc_region->top() == alloc_region->bottom()) { |
tonyp@1071 | 3069 | // we will discard the current GC alloc region if it's in the |
tonyp@1071 | 3070 | // collection set (it can happen!), if it's already full (no |
tonyp@1071 | 3071 | // point in using it), or if it's empty (this means that it |
tonyp@1071 | 3072 | // was emptied during a cleanup and it should be on the free |
tonyp@1071 | 3073 | // list now). |
tonyp@1071 | 3074 | |
tonyp@1071 | 3075 | alloc_region = NULL; |
tonyp@1071 | 3076 | } |
tonyp@1071 | 3077 | } |
tonyp@1071 | 3078 | |
tonyp@1071 | 3079 | if (alloc_region == NULL) { |
tonyp@1071 | 3080 | // we will get a new GC alloc region |
ysr@777 | 3081 | alloc_region = newAllocRegionWithExpansion(ap, 0); |
apetrusenko@1296 | 3082 | } else { |
apetrusenko@1296 | 3083 | // the region was retained from the last collection |
apetrusenko@1296 | 3084 | ++_gc_alloc_region_counts[ap]; |
ysr@777 | 3085 | } |
tonyp@1071 | 3086 | |
ysr@777 | 3087 | if (alloc_region != NULL) { |
tonyp@1071 | 3088 | assert(_gc_alloc_regions[ap] == NULL, "pre-condition"); |
ysr@777 | 3089 | set_gc_alloc_region(ap, alloc_region); |
ysr@777 | 3090 | } |
tonyp@1071 | 3091 | |
tonyp@1071 | 3092 | assert(_gc_alloc_regions[ap] == NULL || |
tonyp@1071 | 3093 | _gc_alloc_regions[ap]->is_gc_alloc_region(), |
tonyp@1071 | 3094 | "the GC alloc region should be tagged as such"); |
tonyp@1071 | 3095 | assert(_gc_alloc_regions[ap] == NULL || |
tonyp@1071 | 3096 | _gc_alloc_regions[ap] == _gc_alloc_region_list, |
tonyp@1071 | 3097 | "the GC alloc region should be the same as the GC alloc list head"); |
ysr@777 | 3098 | } |
ysr@777 | 3099 | // Set alternative regions for allocation purposes that have reached |
tonyp@1071 | 3100 | // their limit. |
ysr@777 | 3101 | for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { |
ysr@777 | 3102 | GCAllocPurpose alt_purpose = g1_policy()->alternative_purpose(ap); |
ysr@777 | 3103 | if (_gc_alloc_regions[ap] == NULL && alt_purpose != ap) { |
ysr@777 | 3104 | _gc_alloc_regions[ap] = _gc_alloc_regions[alt_purpose]; |
ysr@777 | 3105 | } |
ysr@777 | 3106 | } |
ysr@777 | 3107 | assert(check_gc_alloc_regions(), "alloc regions messed up"); |
ysr@777 | 3108 | } |
ysr@777 | 3109 | |
tonyp@1071 | 3110 | void G1CollectedHeap::release_gc_alloc_regions(bool totally) { |
ysr@777 | 3111 | // We keep a separate list of all regions that have been alloc regions in |
tonyp@1071 | 3112 | // the current collection pause. Forget that now. This method will |
tonyp@1071 | 3113 | // untag the GC alloc regions and tear down the GC alloc region |
tonyp@1071 | 3114 | // list. It's desirable that no regions are tagged as GC alloc |
tonyp@1071 | 3115 | // outside GCs. |
ysr@777 | 3116 | forget_alloc_region_list(); |
ysr@777 | 3117 | |
ysr@777 | 3118 | // The current alloc regions contain objs that have survived |
ysr@777 | 3119 | // collection. Make them no longer GC alloc regions. |
ysr@777 | 3120 | for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { |
ysr@777 | 3121 | HeapRegion* r = _gc_alloc_regions[ap]; |
tonyp@1071 | 3122 | _retained_gc_alloc_regions[ap] = NULL; |
apetrusenko@1296 | 3123 | _gc_alloc_region_counts[ap] = 0; |
tonyp@1071 | 3124 | |
tonyp@1071 | 3125 | if (r != NULL) { |
tonyp@1071 | 3126 | // we retain nothing on _gc_alloc_regions between GCs |
tonyp@1071 | 3127 | set_gc_alloc_region(ap, NULL); |
tonyp@1071 | 3128 | |
tonyp@1071 | 3129 | if (r->is_empty()) { |
tonyp@1071 | 3130 | // we didn't actually allocate anything in it; let's just put |
tonyp@1071 | 3131 | // it on the free list |
ysr@777 | 3132 | MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); |
ysr@777 | 3133 | r->set_zero_fill_complete(); |
ysr@777 | 3134 | put_free_region_on_list_locked(r); |
tonyp@1071 | 3135 | } else if (_retain_gc_alloc_region[ap] && !totally) { |
tonyp@1071 | 3136 | // retain it so that we can use it at the beginning of the next GC |
tonyp@1071 | 3137 | _retained_gc_alloc_regions[ap] = r; |
ysr@777 | 3138 | } |
ysr@777 | 3139 | } |
tonyp@1071 | 3140 | } |
tonyp@1071 | 3141 | } |
tonyp@1071 | 3142 | |
tonyp@1071 | 3143 | #ifndef PRODUCT |
tonyp@1071 | 3144 | // Useful for debugging |
tonyp@1071 | 3145 | |
tonyp@1071 | 3146 | void G1CollectedHeap::print_gc_alloc_regions() { |
tonyp@1071 | 3147 | gclog_or_tty->print_cr("GC alloc regions"); |
tonyp@1071 | 3148 | for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { |
tonyp@1071 | 3149 | HeapRegion* r = _gc_alloc_regions[ap]; |
tonyp@1071 | 3150 | if (r == NULL) { |
tonyp@1071 | 3151 | gclog_or_tty->print_cr(" %2d : "PTR_FORMAT, ap, NULL); |
tonyp@1071 | 3152 | } else { |
tonyp@1071 | 3153 | gclog_or_tty->print_cr(" %2d : "PTR_FORMAT" "SIZE_FORMAT, |
tonyp@1071 | 3154 | ap, r->bottom(), r->used()); |
tonyp@1071 | 3155 | } |
tonyp@1071 | 3156 | } |
tonyp@1071 | 3157 | } |
tonyp@1071 | 3158 | #endif // PRODUCT |
ysr@777 | 3159 | |
ysr@777 | 3160 | void G1CollectedHeap::init_for_evac_failure(OopsInHeapRegionClosure* cl) { |
ysr@777 | 3161 | _drain_in_progress = false; |
ysr@777 | 3162 | set_evac_failure_closure(cl); |
ysr@777 | 3163 | _evac_failure_scan_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(40, true); |
ysr@777 | 3164 | } |
ysr@777 | 3165 | |
ysr@777 | 3166 | void G1CollectedHeap::finalize_for_evac_failure() { |
ysr@777 | 3167 | assert(_evac_failure_scan_stack != NULL && |
ysr@777 | 3168 | _evac_failure_scan_stack->length() == 0, |
ysr@777 | 3169 | "Postcondition"); |
ysr@777 | 3170 | assert(!_drain_in_progress, "Postcondition"); |
apetrusenko@1480 | 3171 | delete _evac_failure_scan_stack; |
ysr@777 | 3172 | _evac_failure_scan_stack = NULL; |
ysr@777 | 3173 | } |
ysr@777 | 3174 | |
ysr@777 | 3175 | |
ysr@777 | 3176 | |
ysr@777 | 3177 | // *** Sequential G1 Evacuation |
ysr@777 | 3178 | |
ysr@777 | 3179 | HeapWord* G1CollectedHeap::allocate_during_gc(GCAllocPurpose purpose, size_t word_size) { |
ysr@777 | 3180 | HeapRegion* alloc_region = _gc_alloc_regions[purpose]; |
ysr@777 | 3181 | // let the caller handle alloc failure |
ysr@777 | 3182 | if (alloc_region == NULL) return NULL; |
ysr@777 | 3183 | assert(isHumongous(word_size) || !alloc_region->isHumongous(), |
ysr@777 | 3184 | "Either the object is humongous or the region isn't"); |
ysr@777 | 3185 | HeapWord* block = alloc_region->allocate(word_size); |
ysr@777 | 3186 | if (block == NULL) { |
ysr@777 | 3187 | block = allocate_during_gc_slow(purpose, alloc_region, false, word_size); |
ysr@777 | 3188 | } |
ysr@777 | 3189 | return block; |
ysr@777 | 3190 | } |
ysr@777 | 3191 | |
ysr@777 | 3192 | class G1IsAliveClosure: public BoolObjectClosure { |
ysr@777 | 3193 | G1CollectedHeap* _g1; |
ysr@777 | 3194 | public: |
ysr@777 | 3195 | G1IsAliveClosure(G1CollectedHeap* g1) : _g1(g1) {} |
ysr@777 | 3196 | void do_object(oop p) { assert(false, "Do not call."); } |
ysr@777 | 3197 | bool do_object_b(oop p) { |
ysr@777 | 3198 | // It is reachable if it is outside the collection set, or is inside |
ysr@777 | 3199 | // and forwarded. |
ysr@777 | 3200 | |
ysr@777 | 3201 | #ifdef G1_DEBUG |
ysr@777 | 3202 | gclog_or_tty->print_cr("is alive "PTR_FORMAT" in CS %d forwarded %d overall %d", |
ysr@777 | 3203 | (void*) p, _g1->obj_in_cs(p), p->is_forwarded(), |
ysr@777 | 3204 | !_g1->obj_in_cs(p) || p->is_forwarded()); |
ysr@777 | 3205 | #endif // G1_DEBUG |
ysr@777 | 3206 | |
ysr@777 | 3207 | return !_g1->obj_in_cs(p) || p->is_forwarded(); |
ysr@777 | 3208 | } |
ysr@777 | 3209 | }; |
ysr@777 | 3210 | |
ysr@777 | 3211 | class G1KeepAliveClosure: public OopClosure { |
ysr@777 | 3212 | G1CollectedHeap* _g1; |
ysr@777 | 3213 | public: |
ysr@777 | 3214 | G1KeepAliveClosure(G1CollectedHeap* g1) : _g1(g1) {} |
ysr@1280 | 3215 | void do_oop(narrowOop* p) { guarantee(false, "Not needed"); } |
ysr@1280 | 3216 | void do_oop( oop* p) { |
ysr@777 | 3217 | oop obj = *p; |
ysr@777 | 3218 | #ifdef G1_DEBUG |
ysr@777 | 3219 | if (PrintGC && Verbose) { |
ysr@777 | 3220 | gclog_or_tty->print_cr("keep alive *"PTR_FORMAT" = "PTR_FORMAT" "PTR_FORMAT, |
ysr@777 | 3221 | p, (void*) obj, (void*) *p); |
ysr@777 | 3222 | } |
ysr@777 | 3223 | #endif // G1_DEBUG |
ysr@777 | 3224 | |
ysr@777 | 3225 | if (_g1->obj_in_cs(obj)) { |
ysr@777 | 3226 | assert( obj->is_forwarded(), "invariant" ); |
ysr@777 | 3227 | *p = obj->forwardee(); |
ysr@777 | 3228 | #ifdef G1_DEBUG |
ysr@777 | 3229 | gclog_or_tty->print_cr(" in CSet: moved "PTR_FORMAT" -> "PTR_FORMAT, |
ysr@777 | 3230 | (void*) obj, (void*) *p); |
ysr@777 | 3231 | #endif // G1_DEBUG |
ysr@777 | 3232 | } |
ysr@777 | 3233 | } |
ysr@777 | 3234 | }; |
ysr@777 | 3235 | |
iveresov@1051 | 3236 | class UpdateRSetImmediate : public OopsInHeapRegionClosure { |
ysr@777 | 3237 | private: |
ysr@777 | 3238 | G1CollectedHeap* _g1; |
ysr@777 | 3239 | G1RemSet* _g1_rem_set; |
ysr@777 | 3240 | public: |
iveresov@1051 | 3241 | UpdateRSetImmediate(G1CollectedHeap* g1) : |
iveresov@1051 | 3242 | _g1(g1), _g1_rem_set(g1->g1_rem_set()) {} |
ysr@777 | 3243 | |
ysr@1280 | 3244 | virtual void do_oop(narrowOop* p) { do_oop_work(p); } |
ysr@1280 | 3245 | virtual void do_oop( oop* p) { do_oop_work(p); } |
ysr@1280 | 3246 | template <class T> void do_oop_work(T* p) { |
ysr@777 | 3247 | assert(_from->is_in_reserved(p), "paranoia"); |
ysr@1280 | 3248 | T heap_oop = oopDesc::load_heap_oop(p); |
ysr@1280 | 3249 | if (!oopDesc::is_null(heap_oop) && !_from->is_survivor()) { |
iveresov@1051 | 3250 | _g1_rem_set->par_write_ref(_from, p, 0); |
ysr@777 | 3251 | } |
ysr@777 | 3252 | } |
ysr@777 | 3253 | }; |
ysr@777 | 3254 | |
iveresov@1051 | 3255 | class UpdateRSetDeferred : public OopsInHeapRegionClosure { |
iveresov@1051 | 3256 | private: |
iveresov@1051 | 3257 | G1CollectedHeap* _g1; |
iveresov@1051 | 3258 | DirtyCardQueue *_dcq; |
iveresov@1051 | 3259 | CardTableModRefBS* _ct_bs; |
iveresov@1051 | 3260 | |
iveresov@1051 | 3261 | public: |
iveresov@1051 | 3262 | UpdateRSetDeferred(G1CollectedHeap* g1, DirtyCardQueue* dcq) : |
iveresov@1051 | 3263 | _g1(g1), _ct_bs((CardTableModRefBS*)_g1->barrier_set()), _dcq(dcq) {} |
iveresov@1051 | 3264 | |
ysr@1280 | 3265 | virtual void do_oop(narrowOop* p) { do_oop_work(p); } |
ysr@1280 | 3266 | virtual void do_oop( oop* p) { do_oop_work(p); } |
ysr@1280 | 3267 | template <class T> void do_oop_work(T* p) { |
iveresov@1051 | 3268 | assert(_from->is_in_reserved(p), "paranoia"); |
ysr@1280 | 3269 | if (!_from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) && |
ysr@1280 | 3270 | !_from->is_survivor()) { |
iveresov@1051 | 3271 | size_t card_index = _ct_bs->index_for(p); |
iveresov@1051 | 3272 | if (_ct_bs->mark_card_deferred(card_index)) { |
iveresov@1051 | 3273 | _dcq->enqueue((jbyte*)_ct_bs->byte_for_index(card_index)); |
iveresov@1051 | 3274 | } |
iveresov@1051 | 3275 | } |
iveresov@1051 | 3276 | } |
iveresov@1051 | 3277 | }; |
iveresov@1051 | 3278 | |
iveresov@1051 | 3279 | |
iveresov@1051 | 3280 | |
ysr@777 | 3281 | class RemoveSelfPointerClosure: public ObjectClosure { |
ysr@777 | 3282 | private: |
ysr@777 | 3283 | G1CollectedHeap* _g1; |
ysr@777 | 3284 | ConcurrentMark* _cm; |
ysr@777 | 3285 | HeapRegion* _hr; |
ysr@777 | 3286 | size_t _prev_marked_bytes; |
ysr@777 | 3287 | size_t _next_marked_bytes; |
iveresov@1051 | 3288 | OopsInHeapRegionClosure *_cl; |
ysr@777 | 3289 | public: |
iveresov@1051 | 3290 | RemoveSelfPointerClosure(G1CollectedHeap* g1, OopsInHeapRegionClosure* cl) : |
iveresov@1051 | 3291 | _g1(g1), _cm(_g1->concurrent_mark()), _prev_marked_bytes(0), |
iveresov@1051 | 3292 | _next_marked_bytes(0), _cl(cl) {} |
ysr@777 | 3293 | |
ysr@777 | 3294 | size_t prev_marked_bytes() { return _prev_marked_bytes; } |
ysr@777 | 3295 | size_t next_marked_bytes() { return _next_marked_bytes; } |
ysr@777 | 3296 | |
iveresov@787 | 3297 | // The original idea here was to coalesce evacuated and dead objects. |
iveresov@787 | 3298 | // However that caused complications with the block offset table (BOT). |
iveresov@787 | 3299 | // In particular if there were two TLABs, one of them partially refined. |
iveresov@787 | 3300 | // |----- TLAB_1--------|----TLAB_2-~~~(partially refined part)~~~| |
iveresov@787 | 3301 | // The BOT entries of the unrefined part of TLAB_2 point to the start |
iveresov@787 | 3302 | // of TLAB_2. If the last object of the TLAB_1 and the first object |
iveresov@787 | 3303 | // of TLAB_2 are coalesced, then the cards of the unrefined part |
iveresov@787 | 3304 | // would point into middle of the filler object. |
iveresov@787 | 3305 | // |
iveresov@787 | 3306 | // The current approach is to not coalesce and leave the BOT contents intact. |
iveresov@787 | 3307 | void do_object(oop obj) { |
iveresov@787 | 3308 | if (obj->is_forwarded() && obj->forwardee() == obj) { |
iveresov@787 | 3309 | // The object failed to move. |
iveresov@787 | 3310 | assert(!_g1->is_obj_dead(obj), "We should not be preserving dead objs."); |
iveresov@787 | 3311 | _cm->markPrev(obj); |
iveresov@787 | 3312 | assert(_cm->isPrevMarked(obj), "Should be marked!"); |
iveresov@787 | 3313 | _prev_marked_bytes += (obj->size() * HeapWordSize); |
iveresov@787 | 3314 | if (_g1->mark_in_progress() && !_g1->is_obj_ill(obj)) { |
iveresov@787 | 3315 | _cm->markAndGrayObjectIfNecessary(obj); |
iveresov@787 | 3316 | } |
iveresov@787 | 3317 | obj->set_mark(markOopDesc::prototype()); |
iveresov@787 | 3318 | // While we were processing RSet buffers during the |
iveresov@787 | 3319 | // collection, we actually didn't scan any cards on the |
iveresov@787 | 3320 | // collection set, since we didn't want to update remebered |
iveresov@787 | 3321 | // sets with entries that point into the collection set, given |
iveresov@787 | 3322 | // that live objects fromthe collection set are about to move |
iveresov@787 | 3323 | // and such entries will be stale very soon. This change also |
iveresov@787 | 3324 | // dealt with a reliability issue which involved scanning a |
iveresov@787 | 3325 | // card in the collection set and coming across an array that |
iveresov@787 | 3326 | // was being chunked and looking malformed. The problem is |
iveresov@787 | 3327 | // that, if evacuation fails, we might have remembered set |
iveresov@787 | 3328 | // entries missing given that we skipped cards on the |
iveresov@787 | 3329 | // collection set. So, we'll recreate such entries now. |
iveresov@1051 | 3330 | obj->oop_iterate(_cl); |
iveresov@787 | 3331 | assert(_cm->isPrevMarked(obj), "Should be marked!"); |
iveresov@787 | 3332 | } else { |
iveresov@787 | 3333 | // The object has been either evacuated or is dead. Fill it with a |
iveresov@787 | 3334 | // dummy object. |
iveresov@787 | 3335 | MemRegion mr((HeapWord*)obj, obj->size()); |
jcoomes@916 | 3336 | CollectedHeap::fill_with_object(mr); |
ysr@777 | 3337 | _cm->clearRangeBothMaps(mr); |
ysr@777 | 3338 | } |
ysr@777 | 3339 | } |
ysr@777 | 3340 | }; |
ysr@777 | 3341 | |
ysr@777 | 3342 | void G1CollectedHeap::remove_self_forwarding_pointers() { |
iveresov@1051 | 3343 | UpdateRSetImmediate immediate_update(_g1h); |
iveresov@1051 | 3344 | DirtyCardQueue dcq(&_g1h->dirty_card_queue_set()); |
iveresov@1051 | 3345 | UpdateRSetDeferred deferred_update(_g1h, &dcq); |
iveresov@1051 | 3346 | OopsInHeapRegionClosure *cl; |
iveresov@1051 | 3347 | if (G1DeferredRSUpdate) { |
iveresov@1051 | 3348 | cl = &deferred_update; |
iveresov@1051 | 3349 | } else { |
iveresov@1051 | 3350 | cl = &immediate_update; |
iveresov@1051 | 3351 | } |
ysr@777 | 3352 | HeapRegion* cur = g1_policy()->collection_set(); |
ysr@777 | 3353 | while (cur != NULL) { |
ysr@777 | 3354 | assert(g1_policy()->assertMarkedBytesDataOK(), "Should be!"); |
ysr@777 | 3355 | |
iveresov@1051 | 3356 | RemoveSelfPointerClosure rspc(_g1h, cl); |
ysr@777 | 3357 | if (cur->evacuation_failed()) { |
ysr@777 | 3358 | assert(cur->in_collection_set(), "bad CS"); |
iveresov@1051 | 3359 | cl->set_region(cur); |
ysr@777 | 3360 | cur->object_iterate(&rspc); |
ysr@777 | 3361 | |
ysr@777 | 3362 | // A number of manipulations to make the TAMS be the current top, |
ysr@777 | 3363 | // and the marked bytes be the ones observed in the iteration. |
ysr@777 | 3364 | if (_g1h->concurrent_mark()->at_least_one_mark_complete()) { |
ysr@777 | 3365 | // The comments below are the postconditions achieved by the |
ysr@777 | 3366 | // calls. Note especially the last such condition, which says that |
ysr@777 | 3367 | // the count of marked bytes has been properly restored. |
ysr@777 | 3368 | cur->note_start_of_marking(false); |
ysr@777 | 3369 | // _next_top_at_mark_start == top, _next_marked_bytes == 0 |
ysr@777 | 3370 | cur->add_to_marked_bytes(rspc.prev_marked_bytes()); |
ysr@777 | 3371 | // _next_marked_bytes == prev_marked_bytes. |
ysr@777 | 3372 | cur->note_end_of_marking(); |
ysr@777 | 3373 | // _prev_top_at_mark_start == top(), |
ysr@777 | 3374 | // _prev_marked_bytes == prev_marked_bytes |
ysr@777 | 3375 | } |
ysr@777 | 3376 | // If there is no mark in progress, we modified the _next variables |
ysr@777 | 3377 | // above needlessly, but harmlessly. |
ysr@777 | 3378 | if (_g1h->mark_in_progress()) { |
ysr@777 | 3379 | cur->note_start_of_marking(false); |
ysr@777 | 3380 | // _next_top_at_mark_start == top, _next_marked_bytes == 0 |
ysr@777 | 3381 | // _next_marked_bytes == next_marked_bytes. |
ysr@777 | 3382 | } |
ysr@777 | 3383 | |
ysr@777 | 3384 | // Now make sure the region has the right index in the sorted array. |
ysr@777 | 3385 | g1_policy()->note_change_in_marked_bytes(cur); |
ysr@777 | 3386 | } |
ysr@777 | 3387 | cur = cur->next_in_collection_set(); |
ysr@777 | 3388 | } |
ysr@777 | 3389 | assert(g1_policy()->assertMarkedBytesDataOK(), "Should be!"); |
ysr@777 | 3390 | |
ysr@777 | 3391 | // Now restore saved marks, if any. |
ysr@777 | 3392 | if (_objs_with_preserved_marks != NULL) { |
ysr@777 | 3393 | assert(_preserved_marks_of_objs != NULL, "Both or none."); |
ysr@777 | 3394 | assert(_objs_with_preserved_marks->length() == |
ysr@777 | 3395 | _preserved_marks_of_objs->length(), "Both or none."); |
ysr@777 | 3396 | guarantee(_objs_with_preserved_marks->length() == |
ysr@777 | 3397 | _preserved_marks_of_objs->length(), "Both or none."); |
ysr@777 | 3398 | for (int i = 0; i < _objs_with_preserved_marks->length(); i++) { |
ysr@777 | 3399 | oop obj = _objs_with_preserved_marks->at(i); |
ysr@777 | 3400 | markOop m = _preserved_marks_of_objs->at(i); |
ysr@777 | 3401 | obj->set_mark(m); |
ysr@777 | 3402 | } |
ysr@777 | 3403 | // Delete the preserved marks growable arrays (allocated on the C heap). |
ysr@777 | 3404 | delete _objs_with_preserved_marks; |
ysr@777 | 3405 | delete _preserved_marks_of_objs; |
ysr@777 | 3406 | _objs_with_preserved_marks = NULL; |
ysr@777 | 3407 | _preserved_marks_of_objs = NULL; |
ysr@777 | 3408 | } |
ysr@777 | 3409 | } |
ysr@777 | 3410 | |
ysr@777 | 3411 | void G1CollectedHeap::push_on_evac_failure_scan_stack(oop obj) { |
ysr@777 | 3412 | _evac_failure_scan_stack->push(obj); |
ysr@777 | 3413 | } |
ysr@777 | 3414 | |
ysr@777 | 3415 | void G1CollectedHeap::drain_evac_failure_scan_stack() { |
ysr@777 | 3416 | assert(_evac_failure_scan_stack != NULL, "precondition"); |
ysr@777 | 3417 | |
ysr@777 | 3418 | while (_evac_failure_scan_stack->length() > 0) { |
ysr@777 | 3419 | oop obj = _evac_failure_scan_stack->pop(); |
ysr@777 | 3420 | _evac_failure_closure->set_region(heap_region_containing(obj)); |
ysr@777 | 3421 | obj->oop_iterate_backwards(_evac_failure_closure); |
ysr@777 | 3422 | } |
ysr@777 | 3423 | } |
ysr@777 | 3424 | |
ysr@777 | 3425 | void G1CollectedHeap::handle_evacuation_failure(oop old) { |
ysr@777 | 3426 | markOop m = old->mark(); |
ysr@777 | 3427 | // forward to self |
ysr@777 | 3428 | assert(!old->is_forwarded(), "precondition"); |
ysr@777 | 3429 | |
ysr@777 | 3430 | old->forward_to(old); |
ysr@777 | 3431 | handle_evacuation_failure_common(old, m); |
ysr@777 | 3432 | } |
ysr@777 | 3433 | |
ysr@777 | 3434 | oop |
ysr@777 | 3435 | G1CollectedHeap::handle_evacuation_failure_par(OopsInHeapRegionClosure* cl, |
ysr@777 | 3436 | oop old) { |
ysr@777 | 3437 | markOop m = old->mark(); |
ysr@777 | 3438 | oop forward_ptr = old->forward_to_atomic(old); |
ysr@777 | 3439 | if (forward_ptr == NULL) { |
ysr@777 | 3440 | // Forward-to-self succeeded. |
ysr@777 | 3441 | if (_evac_failure_closure != cl) { |
ysr@777 | 3442 | MutexLockerEx x(EvacFailureStack_lock, Mutex::_no_safepoint_check_flag); |
ysr@777 | 3443 | assert(!_drain_in_progress, |
ysr@777 | 3444 | "Should only be true while someone holds the lock."); |
ysr@777 | 3445 | // Set the global evac-failure closure to the current thread's. |
ysr@777 | 3446 | assert(_evac_failure_closure == NULL, "Or locking has failed."); |
ysr@777 | 3447 | set_evac_failure_closure(cl); |
ysr@777 | 3448 | // Now do the common part. |
ysr@777 | 3449 | handle_evacuation_failure_common(old, m); |
ysr@777 | 3450 | // Reset to NULL. |
ysr@777 | 3451 | set_evac_failure_closure(NULL); |
ysr@777 | 3452 | } else { |
ysr@777 | 3453 | // The lock is already held, and this is recursive. |
ysr@777 | 3454 | assert(_drain_in_progress, "This should only be the recursive case."); |
ysr@777 | 3455 | handle_evacuation_failure_common(old, m); |
ysr@777 | 3456 | } |
ysr@777 | 3457 | return old; |
ysr@777 | 3458 | } else { |
ysr@777 | 3459 | // Someone else had a place to copy it. |
ysr@777 | 3460 | return forward_ptr; |
ysr@777 | 3461 | } |
ysr@777 | 3462 | } |
ysr@777 | 3463 | |
ysr@777 | 3464 | void G1CollectedHeap::handle_evacuation_failure_common(oop old, markOop m) { |
ysr@777 | 3465 | set_evacuation_failed(true); |
ysr@777 | 3466 | |
ysr@777 | 3467 | preserve_mark_if_necessary(old, m); |
ysr@777 | 3468 | |
ysr@777 | 3469 | HeapRegion* r = heap_region_containing(old); |
ysr@777 | 3470 | if (!r->evacuation_failed()) { |
ysr@777 | 3471 | r->set_evacuation_failed(true); |
johnc@1186 | 3472 | if (G1PrintRegions) { |
ysr@777 | 3473 | gclog_or_tty->print("evacuation failed in heap region "PTR_FORMAT" " |
ysr@777 | 3474 | "["PTR_FORMAT","PTR_FORMAT")\n", |
ysr@777 | 3475 | r, r->bottom(), r->end()); |
ysr@777 | 3476 | } |
ysr@777 | 3477 | } |
ysr@777 | 3478 | |
ysr@777 | 3479 | push_on_evac_failure_scan_stack(old); |
ysr@777 | 3480 | |
ysr@777 | 3481 | if (!_drain_in_progress) { |
ysr@777 | 3482 | // prevent recursion in copy_to_survivor_space() |
ysr@777 | 3483 | _drain_in_progress = true; |
ysr@777 | 3484 | drain_evac_failure_scan_stack(); |
ysr@777 | 3485 | _drain_in_progress = false; |
ysr@777 | 3486 | } |
ysr@777 | 3487 | } |
ysr@777 | 3488 | |
ysr@777 | 3489 | void G1CollectedHeap::preserve_mark_if_necessary(oop obj, markOop m) { |
ysr@777 | 3490 | if (m != markOopDesc::prototype()) { |
ysr@777 | 3491 | if (_objs_with_preserved_marks == NULL) { |
ysr@777 | 3492 | assert(_preserved_marks_of_objs == NULL, "Both or none."); |
ysr@777 | 3493 | _objs_with_preserved_marks = |
ysr@777 | 3494 | new (ResourceObj::C_HEAP) GrowableArray<oop>(40, true); |
ysr@777 | 3495 | _preserved_marks_of_objs = |
ysr@777 | 3496 | new (ResourceObj::C_HEAP) GrowableArray<markOop>(40, true); |
ysr@777 | 3497 | } |
ysr@777 | 3498 | _objs_with_preserved_marks->push(obj); |
ysr@777 | 3499 | _preserved_marks_of_objs->push(m); |
ysr@777 | 3500 | } |
ysr@777 | 3501 | } |
ysr@777 | 3502 | |
ysr@777 | 3503 | // *** Parallel G1 Evacuation |
ysr@777 | 3504 | |
ysr@777 | 3505 | HeapWord* G1CollectedHeap::par_allocate_during_gc(GCAllocPurpose purpose, |
ysr@777 | 3506 | size_t word_size) { |
ysr@777 | 3507 | HeapRegion* alloc_region = _gc_alloc_regions[purpose]; |
ysr@777 | 3508 | // let the caller handle alloc failure |
ysr@777 | 3509 | if (alloc_region == NULL) return NULL; |
ysr@777 | 3510 | |
ysr@777 | 3511 | HeapWord* block = alloc_region->par_allocate(word_size); |
ysr@777 | 3512 | if (block == NULL) { |
ysr@777 | 3513 | MutexLockerEx x(par_alloc_during_gc_lock(), |
ysr@777 | 3514 | Mutex::_no_safepoint_check_flag); |
ysr@777 | 3515 | block = allocate_during_gc_slow(purpose, alloc_region, true, word_size); |
ysr@777 | 3516 | } |
ysr@777 | 3517 | return block; |
ysr@777 | 3518 | } |
ysr@777 | 3519 | |
apetrusenko@980 | 3520 | void G1CollectedHeap::retire_alloc_region(HeapRegion* alloc_region, |
apetrusenko@980 | 3521 | bool par) { |
apetrusenko@980 | 3522 | // Another thread might have obtained alloc_region for the given |
apetrusenko@980 | 3523 | // purpose, and might be attempting to allocate in it, and might |
apetrusenko@980 | 3524 | // succeed. Therefore, we can't do the "finalization" stuff on the |
apetrusenko@980 | 3525 | // region below until we're sure the last allocation has happened. |
apetrusenko@980 | 3526 | // We ensure this by allocating the remaining space with a garbage |
apetrusenko@980 | 3527 | // object. |
apetrusenko@980 | 3528 | if (par) par_allocate_remaining_space(alloc_region); |
apetrusenko@980 | 3529 | // Now we can do the post-GC stuff on the region. |
apetrusenko@980 | 3530 | alloc_region->note_end_of_copying(); |
apetrusenko@980 | 3531 | g1_policy()->record_after_bytes(alloc_region->used()); |
apetrusenko@980 | 3532 | } |
apetrusenko@980 | 3533 | |
ysr@777 | 3534 | HeapWord* |
ysr@777 | 3535 | G1CollectedHeap::allocate_during_gc_slow(GCAllocPurpose purpose, |
ysr@777 | 3536 | HeapRegion* alloc_region, |
ysr@777 | 3537 | bool par, |
ysr@777 | 3538 | size_t word_size) { |
ysr@777 | 3539 | HeapWord* block = NULL; |
ysr@777 | 3540 | // In the parallel case, a previous thread to obtain the lock may have |
ysr@777 | 3541 | // already assigned a new gc_alloc_region. |
ysr@777 | 3542 | if (alloc_region != _gc_alloc_regions[purpose]) { |
ysr@777 | 3543 | assert(par, "But should only happen in parallel case."); |
ysr@777 | 3544 | alloc_region = _gc_alloc_regions[purpose]; |
ysr@777 | 3545 | if (alloc_region == NULL) return NULL; |
ysr@777 | 3546 | block = alloc_region->par_allocate(word_size); |
ysr@777 | 3547 | if (block != NULL) return block; |
ysr@777 | 3548 | // Otherwise, continue; this new region is empty, too. |
ysr@777 | 3549 | } |
ysr@777 | 3550 | assert(alloc_region != NULL, "We better have an allocation region"); |
apetrusenko@980 | 3551 | retire_alloc_region(alloc_region, par); |
ysr@777 | 3552 | |
ysr@777 | 3553 | if (_gc_alloc_region_counts[purpose] >= g1_policy()->max_regions(purpose)) { |
ysr@777 | 3554 | // Cannot allocate more regions for the given purpose. |
ysr@777 | 3555 | GCAllocPurpose alt_purpose = g1_policy()->alternative_purpose(purpose); |
ysr@777 | 3556 | // Is there an alternative? |
ysr@777 | 3557 | if (purpose != alt_purpose) { |
ysr@777 | 3558 | HeapRegion* alt_region = _gc_alloc_regions[alt_purpose]; |
ysr@777 | 3559 | // Has not the alternative region been aliased? |
apetrusenko@980 | 3560 | if (alloc_region != alt_region && alt_region != NULL) { |
ysr@777 | 3561 | // Try to allocate in the alternative region. |
ysr@777 | 3562 | if (par) { |
ysr@777 | 3563 | block = alt_region->par_allocate(word_size); |
ysr@777 | 3564 | } else { |
ysr@777 | 3565 | block = alt_region->allocate(word_size); |
ysr@777 | 3566 | } |
ysr@777 | 3567 | // Make an alias. |
ysr@777 | 3568 | _gc_alloc_regions[purpose] = _gc_alloc_regions[alt_purpose]; |
apetrusenko@980 | 3569 | if (block != NULL) { |
apetrusenko@980 | 3570 | return block; |
apetrusenko@980 | 3571 | } |
apetrusenko@980 | 3572 | retire_alloc_region(alt_region, par); |
ysr@777 | 3573 | } |
ysr@777 | 3574 | // Both the allocation region and the alternative one are full |
ysr@777 | 3575 | // and aliased, replace them with a new allocation region. |
ysr@777 | 3576 | purpose = alt_purpose; |
ysr@777 | 3577 | } else { |
ysr@777 | 3578 | set_gc_alloc_region(purpose, NULL); |
ysr@777 | 3579 | return NULL; |
ysr@777 | 3580 | } |
ysr@777 | 3581 | } |
ysr@777 | 3582 | |
ysr@777 | 3583 | // Now allocate a new region for allocation. |
ysr@777 | 3584 | alloc_region = newAllocRegionWithExpansion(purpose, word_size, false /*zero_filled*/); |
ysr@777 | 3585 | |
ysr@777 | 3586 | // let the caller handle alloc failure |
ysr@777 | 3587 | if (alloc_region != NULL) { |
ysr@777 | 3588 | |
ysr@777 | 3589 | assert(check_gc_alloc_regions(), "alloc regions messed up"); |
ysr@777 | 3590 | assert(alloc_region->saved_mark_at_top(), |
ysr@777 | 3591 | "Mark should have been saved already."); |
ysr@777 | 3592 | // We used to assert that the region was zero-filled here, but no |
ysr@777 | 3593 | // longer. |
ysr@777 | 3594 | |
ysr@777 | 3595 | // This must be done last: once it's installed, other regions may |
ysr@777 | 3596 | // allocate in it (without holding the lock.) |
ysr@777 | 3597 | set_gc_alloc_region(purpose, alloc_region); |
ysr@777 | 3598 | |
ysr@777 | 3599 | if (par) { |
ysr@777 | 3600 | block = alloc_region->par_allocate(word_size); |
ysr@777 | 3601 | } else { |
ysr@777 | 3602 | block = alloc_region->allocate(word_size); |
ysr@777 | 3603 | } |
ysr@777 | 3604 | // Caller handles alloc failure. |
ysr@777 | 3605 | } else { |
ysr@777 | 3606 | // This sets other apis using the same old alloc region to NULL, also. |
ysr@777 | 3607 | set_gc_alloc_region(purpose, NULL); |
ysr@777 | 3608 | } |
ysr@777 | 3609 | return block; // May be NULL. |
ysr@777 | 3610 | } |
ysr@777 | 3611 | |
ysr@777 | 3612 | void G1CollectedHeap::par_allocate_remaining_space(HeapRegion* r) { |
ysr@777 | 3613 | HeapWord* block = NULL; |
ysr@777 | 3614 | size_t free_words; |
ysr@777 | 3615 | do { |
ysr@777 | 3616 | free_words = r->free()/HeapWordSize; |
ysr@777 | 3617 | // If there's too little space, no one can allocate, so we're done. |
ysr@777 | 3618 | if (free_words < (size_t)oopDesc::header_size()) return; |
ysr@777 | 3619 | // Otherwise, try to claim it. |
ysr@777 | 3620 | block = r->par_allocate(free_words); |
ysr@777 | 3621 | } while (block == NULL); |
jcoomes@916 | 3622 | fill_with_object(block, free_words); |
ysr@777 | 3623 | } |
ysr@777 | 3624 | |
ysr@777 | 3625 | #ifndef PRODUCT |
ysr@777 | 3626 | bool GCLabBitMapClosure::do_bit(size_t offset) { |
ysr@777 | 3627 | HeapWord* addr = _bitmap->offsetToHeapWord(offset); |
ysr@777 | 3628 | guarantee(_cm->isMarked(oop(addr)), "it should be!"); |
ysr@777 | 3629 | return true; |
ysr@777 | 3630 | } |
ysr@777 | 3631 | #endif // PRODUCT |
ysr@777 | 3632 | |
ysr@1280 | 3633 | G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, int queue_num) |
ysr@1280 | 3634 | : _g1h(g1h), |
ysr@1280 | 3635 | _refs(g1h->task_queue(queue_num)), |
ysr@1280 | 3636 | _dcq(&g1h->dirty_card_queue_set()), |
ysr@1280 | 3637 | _ct_bs((CardTableModRefBS*)_g1h->barrier_set()), |
ysr@1280 | 3638 | _g1_rem(g1h->g1_rem_set()), |
ysr@1280 | 3639 | _hash_seed(17), _queue_num(queue_num), |
ysr@1280 | 3640 | _term_attempts(0), |
ysr@1280 | 3641 | _age_table(false), |
ysr@777 | 3642 | #if G1_DETAILED_STATS |
ysr@1280 | 3643 | _pushes(0), _pops(0), _steals(0), |
ysr@1280 | 3644 | _steal_attempts(0), _overflow_pushes(0), |
ysr@777 | 3645 | #endif |
ysr@1280 | 3646 | _strong_roots_time(0), _term_time(0), |
ysr@1280 | 3647 | _alloc_buffer_waste(0), _undo_waste(0) |
ysr@1280 | 3648 | { |
ysr@1280 | 3649 | // we allocate G1YoungSurvRateNumRegions plus one entries, since |
ysr@1280 | 3650 | // we "sacrifice" entry 0 to keep track of surviving bytes for |
ysr@1280 | 3651 | // non-young regions (where the age is -1) |
ysr@1280 | 3652 | // We also add a few elements at the beginning and at the end in |
ysr@1280 | 3653 | // an attempt to eliminate cache contention |
ysr@1280 | 3654 | size_t real_length = 1 + _g1h->g1_policy()->young_cset_length(); |
ysr@1280 | 3655 | size_t array_length = PADDING_ELEM_NUM + |
ysr@1280 | 3656 | real_length + |
ysr@1280 | 3657 | PADDING_ELEM_NUM; |
ysr@1280 | 3658 | _surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length); |
ysr@1280 | 3659 | if (_surviving_young_words_base == NULL) |
ysr@1280 | 3660 | vm_exit_out_of_memory(array_length * sizeof(size_t), |
ysr@1280 | 3661 | "Not enough space for young surv histo."); |
ysr@1280 | 3662 | _surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM; |
ysr@1280 | 3663 | memset(_surviving_young_words, 0, real_length * sizeof(size_t)); |
ysr@1280 | 3664 | |
ysr@1280 | 3665 | _overflowed_refs = new OverflowQueue(10); |
ysr@1280 | 3666 | |
ysr@1280 | 3667 | _start = os::elapsedTime(); |
ysr@1280 | 3668 | } |
ysr@777 | 3669 | |
ysr@777 | 3670 | G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) : |
ysr@777 | 3671 | _g1(g1), _g1_rem(_g1->g1_rem_set()), _cm(_g1->concurrent_mark()), |
ysr@777 | 3672 | _par_scan_state(par_scan_state) { } |
ysr@777 | 3673 | |
ysr@1280 | 3674 | template <class T> void G1ParCopyHelper::mark_forwardee(T* p) { |
ysr@777 | 3675 | // This is called _after_ do_oop_work has been called, hence after |
ysr@777 | 3676 | // the object has been relocated to its new location and *p points |
ysr@777 | 3677 | // to its new location. |
ysr@777 | 3678 | |
ysr@1280 | 3679 | T heap_oop = oopDesc::load_heap_oop(p); |
ysr@1280 | 3680 | if (!oopDesc::is_null(heap_oop)) { |
ysr@1280 | 3681 | oop obj = oopDesc::decode_heap_oop(heap_oop); |
ysr@1280 | 3682 | assert((_g1->evacuation_failed()) || (!_g1->obj_in_cs(obj)), |
ysr@777 | 3683 | "shouldn't still be in the CSet if evacuation didn't fail."); |
ysr@1280 | 3684 | HeapWord* addr = (HeapWord*)obj; |
ysr@777 | 3685 | if (_g1->is_in_g1_reserved(addr)) |
ysr@777 | 3686 | _cm->grayRoot(oop(addr)); |
ysr@777 | 3687 | } |
ysr@777 | 3688 | } |
ysr@777 | 3689 | |
ysr@777 | 3690 | oop G1ParCopyHelper::copy_to_survivor_space(oop old) { |
ysr@777 | 3691 | size_t word_sz = old->size(); |
ysr@777 | 3692 | HeapRegion* from_region = _g1->heap_region_containing_raw(old); |
ysr@777 | 3693 | // +1 to make the -1 indexes valid... |
ysr@777 | 3694 | int young_index = from_region->young_index_in_cset()+1; |
ysr@777 | 3695 | assert( (from_region->is_young() && young_index > 0) || |
ysr@777 | 3696 | (!from_region->is_young() && young_index == 0), "invariant" ); |
ysr@777 | 3697 | G1CollectorPolicy* g1p = _g1->g1_policy(); |
ysr@777 | 3698 | markOop m = old->mark(); |
apetrusenko@980 | 3699 | int age = m->has_displaced_mark_helper() ? m->displaced_mark_helper()->age() |
apetrusenko@980 | 3700 | : m->age(); |
apetrusenko@980 | 3701 | GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, age, |
ysr@777 | 3702 | word_sz); |
ysr@777 | 3703 | HeapWord* obj_ptr = _par_scan_state->allocate(alloc_purpose, word_sz); |
ysr@777 | 3704 | oop obj = oop(obj_ptr); |
ysr@777 | 3705 | |
ysr@777 | 3706 | if (obj_ptr == NULL) { |
ysr@777 | 3707 | // This will either forward-to-self, or detect that someone else has |
ysr@777 | 3708 | // installed a forwarding pointer. |
ysr@777 | 3709 | OopsInHeapRegionClosure* cl = _par_scan_state->evac_failure_closure(); |
ysr@777 | 3710 | return _g1->handle_evacuation_failure_par(cl, old); |
ysr@777 | 3711 | } |
ysr@777 | 3712 | |
tonyp@961 | 3713 | // We're going to allocate linearly, so might as well prefetch ahead. |
tonyp@961 | 3714 | Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes); |
tonyp@961 | 3715 | |
ysr@777 | 3716 | oop forward_ptr = old->forward_to_atomic(obj); |
ysr@777 | 3717 | if (forward_ptr == NULL) { |
ysr@777 | 3718 | Copy::aligned_disjoint_words((HeapWord*) old, obj_ptr, word_sz); |
tonyp@961 | 3719 | if (g1p->track_object_age(alloc_purpose)) { |
tonyp@961 | 3720 | // We could simply do obj->incr_age(). However, this causes a |
tonyp@961 | 3721 | // performance issue. obj->incr_age() will first check whether |
tonyp@961 | 3722 | // the object has a displaced mark by checking its mark word; |
tonyp@961 | 3723 | // getting the mark word from the new location of the object |
tonyp@961 | 3724 | // stalls. So, given that we already have the mark word and we |
tonyp@961 | 3725 | // are about to install it anyway, it's better to increase the |
tonyp@961 | 3726 | // age on the mark word, when the object does not have a |
tonyp@961 | 3727 | // displaced mark word. We're not expecting many objects to have |
tonyp@961 | 3728 | // a displaced marked word, so that case is not optimized |
tonyp@961 | 3729 | // further (it could be...) and we simply call obj->incr_age(). |
tonyp@961 | 3730 | |
tonyp@961 | 3731 | if (m->has_displaced_mark_helper()) { |
tonyp@961 | 3732 | // in this case, we have to install the mark word first, |
tonyp@961 | 3733 | // otherwise obj looks to be forwarded (the old mark word, |
tonyp@961 | 3734 | // which contains the forward pointer, was copied) |
tonyp@961 | 3735 | obj->set_mark(m); |
tonyp@961 | 3736 | obj->incr_age(); |
tonyp@961 | 3737 | } else { |
tonyp@961 | 3738 | m = m->incr_age(); |
apetrusenko@980 | 3739 | obj->set_mark(m); |
tonyp@961 | 3740 | } |
apetrusenko@980 | 3741 | _par_scan_state->age_table()->add(obj, word_sz); |
apetrusenko@980 | 3742 | } else { |
apetrusenko@980 | 3743 | obj->set_mark(m); |
tonyp@961 | 3744 | } |
tonyp@961 | 3745 | |
ysr@777 | 3746 | // preserve "next" mark bit |
ysr@777 | 3747 | if (_g1->mark_in_progress() && !_g1->is_obj_ill(old)) { |
ysr@777 | 3748 | if (!use_local_bitmaps || |
ysr@777 | 3749 | !_par_scan_state->alloc_buffer(alloc_purpose)->mark(obj_ptr)) { |
ysr@777 | 3750 | // if we couldn't mark it on the local bitmap (this happens when |
ysr@777 | 3751 | // the object was not allocated in the GCLab), we have to bite |
ysr@777 | 3752 | // the bullet and do the standard parallel mark |
ysr@777 | 3753 | _cm->markAndGrayObjectIfNecessary(obj); |
ysr@777 | 3754 | } |
ysr@777 | 3755 | #if 1 |
ysr@777 | 3756 | if (_g1->isMarkedNext(old)) { |
ysr@777 | 3757 | _cm->nextMarkBitMap()->parClear((HeapWord*)old); |
ysr@777 | 3758 | } |
ysr@777 | 3759 | #endif |
ysr@777 | 3760 | } |
ysr@777 | 3761 | |
ysr@777 | 3762 | size_t* surv_young_words = _par_scan_state->surviving_young_words(); |
ysr@777 | 3763 | surv_young_words[young_index] += word_sz; |
ysr@777 | 3764 | |
ysr@777 | 3765 | if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) { |
ysr@777 | 3766 | arrayOop(old)->set_length(0); |
ysr@1280 | 3767 | oop* old_p = set_partial_array_mask(old); |
ysr@1280 | 3768 | _par_scan_state->push_on_queue(old_p); |
ysr@777 | 3769 | } else { |
tonyp@961 | 3770 | // No point in using the slower heap_region_containing() method, |
tonyp@961 | 3771 | // given that we know obj is in the heap. |
tonyp@961 | 3772 | _scanner->set_region(_g1->heap_region_containing_raw(obj)); |
ysr@777 | 3773 | obj->oop_iterate_backwards(_scanner); |
ysr@777 | 3774 | } |
ysr@777 | 3775 | } else { |
ysr@777 | 3776 | _par_scan_state->undo_allocation(alloc_purpose, obj_ptr, word_sz); |
ysr@777 | 3777 | obj = forward_ptr; |
ysr@777 | 3778 | } |
ysr@777 | 3779 | return obj; |
ysr@777 | 3780 | } |
ysr@777 | 3781 | |
ysr@1280 | 3782 | template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_forwardee, bool skip_cset_test> |
ysr@1280 | 3783 | template <class T> |
ysr@1280 | 3784 | void G1ParCopyClosure <do_gen_barrier, barrier, do_mark_forwardee, skip_cset_test> |
ysr@1280 | 3785 | ::do_oop_work(T* p) { |
ysr@1280 | 3786 | oop obj = oopDesc::load_decode_heap_oop(p); |
ysr@777 | 3787 | assert(barrier != G1BarrierRS || obj != NULL, |
ysr@777 | 3788 | "Precondition: G1BarrierRS implies obj is nonNull"); |
ysr@777 | 3789 | |
tonyp@961 | 3790 | // The only time we skip the cset test is when we're scanning |
tonyp@961 | 3791 | // references popped from the queue. And we only push on the queue |
tonyp@961 | 3792 | // references that we know point into the cset, so no point in |
tonyp@961 | 3793 | // checking again. But we'll leave an assert here for peace of mind. |
tonyp@961 | 3794 | assert(!skip_cset_test || _g1->obj_in_cs(obj), "invariant"); |
tonyp@961 | 3795 | |
tonyp@961 | 3796 | // here the null check is implicit in the cset_fast_test() test |
tonyp@961 | 3797 | if (skip_cset_test || _g1->in_cset_fast_test(obj)) { |
ysr@777 | 3798 | #if G1_REM_SET_LOGGING |
tonyp@961 | 3799 | gclog_or_tty->print_cr("Loc "PTR_FORMAT" contains pointer "PTR_FORMAT" " |
tonyp@961 | 3800 | "into CS.", p, (void*) obj); |
ysr@777 | 3801 | #endif |
tonyp@961 | 3802 | if (obj->is_forwarded()) { |
ysr@1280 | 3803 | oopDesc::encode_store_heap_oop(p, obj->forwardee()); |
tonyp@961 | 3804 | } else { |
ysr@1280 | 3805 | oop copy_oop = copy_to_survivor_space(obj); |
ysr@1280 | 3806 | oopDesc::encode_store_heap_oop(p, copy_oop); |
ysr@777 | 3807 | } |
tonyp@961 | 3808 | // When scanning the RS, we only care about objs in CS. |
tonyp@961 | 3809 | if (barrier == G1BarrierRS) { |
iveresov@1051 | 3810 | _par_scan_state->update_rs(_from, p, _par_scan_state->queue_num()); |
ysr@777 | 3811 | } |
tonyp@961 | 3812 | } |
tonyp@961 | 3813 | |
tonyp@961 | 3814 | // When scanning moved objs, must look at all oops. |
tonyp@961 | 3815 | if (barrier == G1BarrierEvac && obj != NULL) { |
iveresov@1051 | 3816 | _par_scan_state->update_rs(_from, p, _par_scan_state->queue_num()); |
tonyp@961 | 3817 | } |
tonyp@961 | 3818 | |
tonyp@961 | 3819 | if (do_gen_barrier && obj != NULL) { |
tonyp@961 | 3820 | par_do_barrier(p); |
tonyp@961 | 3821 | } |
tonyp@961 | 3822 | } |
tonyp@961 | 3823 | |
tonyp@961 | 3824 | template void G1ParCopyClosure<false, G1BarrierEvac, false, true>::do_oop_work(oop* p); |
ysr@1280 | 3825 | template void G1ParCopyClosure<false, G1BarrierEvac, false, true>::do_oop_work(narrowOop* p); |
ysr@1280 | 3826 | |
ysr@1280 | 3827 | template <class T> void G1ParScanPartialArrayClosure::do_oop_nv(T* p) { |
tonyp@961 | 3828 | assert(has_partial_array_mask(p), "invariant"); |
tonyp@961 | 3829 | oop old = clear_partial_array_mask(p); |
ysr@777 | 3830 | assert(old->is_objArray(), "must be obj array"); |
ysr@777 | 3831 | assert(old->is_forwarded(), "must be forwarded"); |
ysr@777 | 3832 | assert(Universe::heap()->is_in_reserved(old), "must be in heap."); |
ysr@777 | 3833 | |
ysr@777 | 3834 | objArrayOop obj = objArrayOop(old->forwardee()); |
ysr@777 | 3835 | assert((void*)old != (void*)old->forwardee(), "self forwarding here?"); |
ysr@777 | 3836 | // Process ParGCArrayScanChunk elements now |
ysr@777 | 3837 | // and push the remainder back onto queue |
ysr@777 | 3838 | int start = arrayOop(old)->length(); |
ysr@777 | 3839 | int end = obj->length(); |
ysr@777 | 3840 | int remainder = end - start; |
ysr@777 | 3841 | assert(start <= end, "just checking"); |
ysr@777 | 3842 | if (remainder > 2 * ParGCArrayScanChunk) { |
ysr@777 | 3843 | // Test above combines last partial chunk with a full chunk |
ysr@777 | 3844 | end = start + ParGCArrayScanChunk; |
ysr@777 | 3845 | arrayOop(old)->set_length(end); |
ysr@777 | 3846 | // Push remainder. |
ysr@1280 | 3847 | oop* old_p = set_partial_array_mask(old); |
ysr@1280 | 3848 | assert(arrayOop(old)->length() < obj->length(), "Empty push?"); |
ysr@1280 | 3849 | _par_scan_state->push_on_queue(old_p); |
ysr@777 | 3850 | } else { |
ysr@777 | 3851 | // Restore length so that the heap remains parsable in |
ysr@777 | 3852 | // case of evacuation failure. |
ysr@777 | 3853 | arrayOop(old)->set_length(end); |
ysr@777 | 3854 | } |
ysr@1280 | 3855 | _scanner.set_region(_g1->heap_region_containing_raw(obj)); |
ysr@777 | 3856 | // process our set of indices (include header in first chunk) |
ysr@1280 | 3857 | obj->oop_iterate_range(&_scanner, start, end); |
ysr@777 | 3858 | } |
ysr@777 | 3859 | |
ysr@777 | 3860 | class G1ParEvacuateFollowersClosure : public VoidClosure { |
ysr@777 | 3861 | protected: |
ysr@777 | 3862 | G1CollectedHeap* _g1h; |
ysr@777 | 3863 | G1ParScanThreadState* _par_scan_state; |
ysr@777 | 3864 | RefToScanQueueSet* _queues; |
ysr@777 | 3865 | ParallelTaskTerminator* _terminator; |
ysr@777 | 3866 | |
ysr@777 | 3867 | G1ParScanThreadState* par_scan_state() { return _par_scan_state; } |
ysr@777 | 3868 | RefToScanQueueSet* queues() { return _queues; } |
ysr@777 | 3869 | ParallelTaskTerminator* terminator() { return _terminator; } |
ysr@777 | 3870 | |
ysr@777 | 3871 | public: |
ysr@777 | 3872 | G1ParEvacuateFollowersClosure(G1CollectedHeap* g1h, |
ysr@777 | 3873 | G1ParScanThreadState* par_scan_state, |
ysr@777 | 3874 | RefToScanQueueSet* queues, |
ysr@777 | 3875 | ParallelTaskTerminator* terminator) |
ysr@777 | 3876 | : _g1h(g1h), _par_scan_state(par_scan_state), |
ysr@777 | 3877 | _queues(queues), _terminator(terminator) {} |
ysr@777 | 3878 | |
ysr@777 | 3879 | void do_void() { |
ysr@777 | 3880 | G1ParScanThreadState* pss = par_scan_state(); |
ysr@777 | 3881 | while (true) { |
ysr@777 | 3882 | pss->trim_queue(); |
ysr@777 | 3883 | IF_G1_DETAILED_STATS(pss->note_steal_attempt()); |
ysr@1280 | 3884 | |
ysr@1280 | 3885 | StarTask stolen_task; |
ysr@1280 | 3886 | if (queues()->steal(pss->queue_num(), pss->hash_seed(), stolen_task)) { |
ysr@777 | 3887 | IF_G1_DETAILED_STATS(pss->note_steal()); |
tonyp@961 | 3888 | |
tonyp@961 | 3889 | // slightly paranoid tests; I'm trying to catch potential |
tonyp@961 | 3890 | // problems before we go into push_on_queue to know where the |
tonyp@961 | 3891 | // problem is coming from |
ysr@1280 | 3892 | assert((oop*)stolen_task != NULL, "Error"); |
ysr@1280 | 3893 | if (stolen_task.is_narrow()) { |
ysr@1280 | 3894 | assert(UseCompressedOops, "Error"); |
ysr@1280 | 3895 | narrowOop* p = (narrowOop*) stolen_task; |
ysr@1280 | 3896 | assert(has_partial_array_mask(p) || |
ysr@1280 | 3897 | _g1h->obj_in_cs(oopDesc::load_decode_heap_oop(p)), "Error"); |
ysr@1280 | 3898 | pss->push_on_queue(p); |
ysr@1280 | 3899 | } else { |
ysr@1280 | 3900 | oop* p = (oop*) stolen_task; |
ysr@1280 | 3901 | assert(has_partial_array_mask(p) || _g1h->obj_in_cs(*p), "Error"); |
ysr@1280 | 3902 | pss->push_on_queue(p); |
ysr@1280 | 3903 | } |
ysr@777 | 3904 | continue; |
ysr@777 | 3905 | } |
ysr@777 | 3906 | pss->start_term_time(); |
ysr@777 | 3907 | if (terminator()->offer_termination()) break; |
ysr@777 | 3908 | pss->end_term_time(); |
ysr@777 | 3909 | } |
ysr@777 | 3910 | pss->end_term_time(); |
ysr@777 | 3911 | pss->retire_alloc_buffers(); |
ysr@777 | 3912 | } |
ysr@777 | 3913 | }; |
ysr@777 | 3914 | |
ysr@777 | 3915 | class G1ParTask : public AbstractGangTask { |
ysr@777 | 3916 | protected: |
ysr@777 | 3917 | G1CollectedHeap* _g1h; |
ysr@777 | 3918 | RefToScanQueueSet *_queues; |
ysr@777 | 3919 | ParallelTaskTerminator _terminator; |
ysr@1280 | 3920 | int _n_workers; |
ysr@777 | 3921 | |
ysr@777 | 3922 | Mutex _stats_lock; |
ysr@777 | 3923 | Mutex* stats_lock() { return &_stats_lock; } |
ysr@777 | 3924 | |
ysr@777 | 3925 | size_t getNCards() { |
ysr@777 | 3926 | return (_g1h->capacity() + G1BlockOffsetSharedArray::N_bytes - 1) |
ysr@777 | 3927 | / G1BlockOffsetSharedArray::N_bytes; |
ysr@777 | 3928 | } |
ysr@777 | 3929 | |
ysr@777 | 3930 | public: |
ysr@777 | 3931 | G1ParTask(G1CollectedHeap* g1h, int workers, RefToScanQueueSet *task_queues) |
ysr@777 | 3932 | : AbstractGangTask("G1 collection"), |
ysr@777 | 3933 | _g1h(g1h), |
ysr@777 | 3934 | _queues(task_queues), |
ysr@777 | 3935 | _terminator(workers, _queues), |
ysr@1280 | 3936 | _stats_lock(Mutex::leaf, "parallel G1 stats lock", true), |
ysr@1280 | 3937 | _n_workers(workers) |
ysr@777 | 3938 | {} |
ysr@777 | 3939 | |
ysr@777 | 3940 | RefToScanQueueSet* queues() { return _queues; } |
ysr@777 | 3941 | |
ysr@777 | 3942 | RefToScanQueue *work_queue(int i) { |
ysr@777 | 3943 | return queues()->queue(i); |
ysr@777 | 3944 | } |
ysr@777 | 3945 | |
ysr@777 | 3946 | void work(int i) { |
ysr@1280 | 3947 | if (i >= _n_workers) return; // no work needed this round |
ysr@777 | 3948 | ResourceMark rm; |
ysr@777 | 3949 | HandleMark hm; |
ysr@777 | 3950 | |
tonyp@961 | 3951 | G1ParScanThreadState pss(_g1h, i); |
tonyp@961 | 3952 | G1ParScanHeapEvacClosure scan_evac_cl(_g1h, &pss); |
tonyp@961 | 3953 | G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss); |
tonyp@961 | 3954 | G1ParScanPartialArrayClosure partial_scan_cl(_g1h, &pss); |
ysr@777 | 3955 | |
ysr@777 | 3956 | pss.set_evac_closure(&scan_evac_cl); |
ysr@777 | 3957 | pss.set_evac_failure_closure(&evac_failure_cl); |
ysr@777 | 3958 | pss.set_partial_scan_closure(&partial_scan_cl); |
ysr@777 | 3959 | |
ysr@777 | 3960 | G1ParScanExtRootClosure only_scan_root_cl(_g1h, &pss); |
ysr@777 | 3961 | G1ParScanPermClosure only_scan_perm_cl(_g1h, &pss); |
ysr@777 | 3962 | G1ParScanHeapRSClosure only_scan_heap_rs_cl(_g1h, &pss); |
iveresov@1051 | 3963 | |
ysr@777 | 3964 | G1ParScanAndMarkExtRootClosure scan_mark_root_cl(_g1h, &pss); |
ysr@777 | 3965 | G1ParScanAndMarkPermClosure scan_mark_perm_cl(_g1h, &pss); |
ysr@777 | 3966 | G1ParScanAndMarkHeapRSClosure scan_mark_heap_rs_cl(_g1h, &pss); |
ysr@777 | 3967 | |
ysr@777 | 3968 | OopsInHeapRegionClosure *scan_root_cl; |
ysr@777 | 3969 | OopsInHeapRegionClosure *scan_perm_cl; |
ysr@777 | 3970 | OopsInHeapRegionClosure *scan_so_cl; |
ysr@777 | 3971 | |
ysr@777 | 3972 | if (_g1h->g1_policy()->should_initiate_conc_mark()) { |
ysr@777 | 3973 | scan_root_cl = &scan_mark_root_cl; |
ysr@777 | 3974 | scan_perm_cl = &scan_mark_perm_cl; |
ysr@777 | 3975 | scan_so_cl = &scan_mark_heap_rs_cl; |
ysr@777 | 3976 | } else { |
ysr@777 | 3977 | scan_root_cl = &only_scan_root_cl; |
ysr@777 | 3978 | scan_perm_cl = &only_scan_perm_cl; |
ysr@777 | 3979 | scan_so_cl = &only_scan_heap_rs_cl; |
ysr@777 | 3980 | } |
ysr@777 | 3981 | |
ysr@777 | 3982 | pss.start_strong_roots(); |
ysr@777 | 3983 | _g1h->g1_process_strong_roots(/* not collecting perm */ false, |
ysr@777 | 3984 | SharedHeap::SO_AllClasses, |
ysr@777 | 3985 | scan_root_cl, |
ysr@777 | 3986 | &only_scan_heap_rs_cl, |
ysr@777 | 3987 | scan_so_cl, |
ysr@777 | 3988 | scan_perm_cl, |
ysr@777 | 3989 | i); |
ysr@777 | 3990 | pss.end_strong_roots(); |
ysr@777 | 3991 | { |
ysr@777 | 3992 | double start = os::elapsedTime(); |
ysr@777 | 3993 | G1ParEvacuateFollowersClosure evac(_g1h, &pss, _queues, &_terminator); |
ysr@777 | 3994 | evac.do_void(); |
ysr@777 | 3995 | double elapsed_ms = (os::elapsedTime()-start)*1000.0; |
ysr@777 | 3996 | double term_ms = pss.term_time()*1000.0; |
ysr@777 | 3997 | _g1h->g1_policy()->record_obj_copy_time(i, elapsed_ms-term_ms); |
ysr@777 | 3998 | _g1h->g1_policy()->record_termination_time(i, term_ms); |
ysr@777 | 3999 | } |
johnc@1186 | 4000 | if (G1UseSurvivorSpaces) { |
apetrusenko@980 | 4001 | _g1h->g1_policy()->record_thread_age_table(pss.age_table()); |
apetrusenko@980 | 4002 | } |
ysr@777 | 4003 | _g1h->update_surviving_young_words(pss.surviving_young_words()+1); |
ysr@777 | 4004 | |
ysr@777 | 4005 | // Clean up any par-expanded rem sets. |
ysr@777 | 4006 | HeapRegionRemSet::par_cleanup(); |
ysr@777 | 4007 | |
ysr@777 | 4008 | MutexLocker x(stats_lock()); |
ysr@777 | 4009 | if (ParallelGCVerbose) { |
ysr@777 | 4010 | gclog_or_tty->print("Thread %d complete:\n", i); |
ysr@777 | 4011 | #if G1_DETAILED_STATS |
ysr@777 | 4012 | gclog_or_tty->print(" Pushes: %7d Pops: %7d Overflows: %7d Steals %7d (in %d attempts)\n", |
ysr@777 | 4013 | pss.pushes(), |
ysr@777 | 4014 | pss.pops(), |
ysr@777 | 4015 | pss.overflow_pushes(), |
ysr@777 | 4016 | pss.steals(), |
ysr@777 | 4017 | pss.steal_attempts()); |
ysr@777 | 4018 | #endif |
ysr@777 | 4019 | double elapsed = pss.elapsed(); |
ysr@777 | 4020 | double strong_roots = pss.strong_roots_time(); |
ysr@777 | 4021 | double term = pss.term_time(); |
ysr@777 | 4022 | gclog_or_tty->print(" Elapsed: %7.2f ms.\n" |
ysr@777 | 4023 | " Strong roots: %7.2f ms (%6.2f%%)\n" |
ysr@777 | 4024 | " Termination: %7.2f ms (%6.2f%%) (in %d entries)\n", |
ysr@777 | 4025 | elapsed * 1000.0, |
ysr@777 | 4026 | strong_roots * 1000.0, (strong_roots*100.0/elapsed), |
ysr@777 | 4027 | term * 1000.0, (term*100.0/elapsed), |
ysr@777 | 4028 | pss.term_attempts()); |
ysr@777 | 4029 | size_t total_waste = pss.alloc_buffer_waste() + pss.undo_waste(); |
ysr@777 | 4030 | gclog_or_tty->print(" Waste: %8dK\n" |
ysr@777 | 4031 | " Alloc Buffer: %8dK\n" |
ysr@777 | 4032 | " Undo: %8dK\n", |
ysr@777 | 4033 | (total_waste * HeapWordSize) / K, |
ysr@777 | 4034 | (pss.alloc_buffer_waste() * HeapWordSize) / K, |
ysr@777 | 4035 | (pss.undo_waste() * HeapWordSize) / K); |
ysr@777 | 4036 | } |
ysr@777 | 4037 | |
ysr@777 | 4038 | assert(pss.refs_to_scan() == 0, "Task queue should be empty"); |
ysr@777 | 4039 | assert(pss.overflowed_refs_to_scan() == 0, "Overflow queue should be empty"); |
ysr@777 | 4040 | } |
ysr@777 | 4041 | }; |
ysr@777 | 4042 | |
ysr@777 | 4043 | // *** Common G1 Evacuation Stuff |
ysr@777 | 4044 | |
ysr@777 | 4045 | void |
ysr@777 | 4046 | G1CollectedHeap:: |
ysr@777 | 4047 | g1_process_strong_roots(bool collecting_perm_gen, |
ysr@777 | 4048 | SharedHeap::ScanningOption so, |
ysr@777 | 4049 | OopClosure* scan_non_heap_roots, |
ysr@777 | 4050 | OopsInHeapRegionClosure* scan_rs, |
ysr@777 | 4051 | OopsInHeapRegionClosure* scan_so, |
ysr@777 | 4052 | OopsInGenClosure* scan_perm, |
ysr@777 | 4053 | int worker_i) { |
ysr@777 | 4054 | // First scan the strong roots, including the perm gen. |
ysr@777 | 4055 | double ext_roots_start = os::elapsedTime(); |
ysr@777 | 4056 | double closure_app_time_sec = 0.0; |
ysr@777 | 4057 | |
ysr@777 | 4058 | BufferingOopClosure buf_scan_non_heap_roots(scan_non_heap_roots); |
ysr@777 | 4059 | BufferingOopsInGenClosure buf_scan_perm(scan_perm); |
ysr@777 | 4060 | buf_scan_perm.set_generation(perm_gen()); |
ysr@777 | 4061 | |
jrose@1424 | 4062 | // Walk the code cache w/o buffering, because StarTask cannot handle |
jrose@1424 | 4063 | // unaligned oop locations. |
jrose@1424 | 4064 | CodeBlobToOopClosure eager_scan_code_roots(scan_non_heap_roots, /*do_marking=*/ true); |
jrose@1424 | 4065 | |
jrose@1424 | 4066 | process_strong_roots(false, // no scoping; this is parallel code |
jrose@1424 | 4067 | collecting_perm_gen, so, |
ysr@777 | 4068 | &buf_scan_non_heap_roots, |
jrose@1424 | 4069 | &eager_scan_code_roots, |
ysr@777 | 4070 | &buf_scan_perm); |
ysr@777 | 4071 | // Finish up any enqueued closure apps. |
ysr@777 | 4072 | buf_scan_non_heap_roots.done(); |
ysr@777 | 4073 | buf_scan_perm.done(); |
ysr@777 | 4074 | double ext_roots_end = os::elapsedTime(); |
ysr@777 | 4075 | g1_policy()->reset_obj_copy_time(worker_i); |
ysr@777 | 4076 | double obj_copy_time_sec = |
ysr@777 | 4077 | buf_scan_non_heap_roots.closure_app_seconds() + |
ysr@777 | 4078 | buf_scan_perm.closure_app_seconds(); |
ysr@777 | 4079 | g1_policy()->record_obj_copy_time(worker_i, obj_copy_time_sec * 1000.0); |
ysr@777 | 4080 | double ext_root_time_ms = |
ysr@777 | 4081 | ((ext_roots_end - ext_roots_start) - obj_copy_time_sec) * 1000.0; |
ysr@777 | 4082 | g1_policy()->record_ext_root_scan_time(worker_i, ext_root_time_ms); |
ysr@777 | 4083 | |
ysr@777 | 4084 | // Scan strong roots in mark stack. |
ysr@777 | 4085 | if (!_process_strong_tasks->is_task_claimed(G1H_PS_mark_stack_oops_do)) { |
ysr@777 | 4086 | concurrent_mark()->oops_do(scan_non_heap_roots); |
ysr@777 | 4087 | } |
ysr@777 | 4088 | double mark_stack_scan_ms = (os::elapsedTime() - ext_roots_end) * 1000.0; |
ysr@777 | 4089 | g1_policy()->record_mark_stack_scan_time(worker_i, mark_stack_scan_ms); |
ysr@777 | 4090 | |
ysr@777 | 4091 | // XXX What should this be doing in the parallel case? |
ysr@777 | 4092 | g1_policy()->record_collection_pause_end_CH_strong_roots(); |
ysr@777 | 4093 | if (scan_so != NULL) { |
ysr@777 | 4094 | scan_scan_only_set(scan_so, worker_i); |
ysr@777 | 4095 | } |
ysr@777 | 4096 | // Now scan the complement of the collection set. |
ysr@777 | 4097 | if (scan_rs != NULL) { |
ysr@777 | 4098 | g1_rem_set()->oops_into_collection_set_do(scan_rs, worker_i); |
ysr@777 | 4099 | } |
ysr@777 | 4100 | // Finish with the ref_processor roots. |
ysr@777 | 4101 | if (!_process_strong_tasks->is_task_claimed(G1H_PS_refProcessor_oops_do)) { |
ysr@777 | 4102 | ref_processor()->oops_do(scan_non_heap_roots); |
ysr@777 | 4103 | } |
ysr@777 | 4104 | g1_policy()->record_collection_pause_end_G1_strong_roots(); |
ysr@777 | 4105 | _process_strong_tasks->all_tasks_completed(); |
ysr@777 | 4106 | } |
ysr@777 | 4107 | |
ysr@777 | 4108 | void |
ysr@777 | 4109 | G1CollectedHeap::scan_scan_only_region(HeapRegion* r, |
ysr@777 | 4110 | OopsInHeapRegionClosure* oc, |
ysr@777 | 4111 | int worker_i) { |
ysr@777 | 4112 | HeapWord* startAddr = r->bottom(); |
ysr@777 | 4113 | HeapWord* endAddr = r->used_region().end(); |
ysr@777 | 4114 | |
ysr@777 | 4115 | oc->set_region(r); |
ysr@777 | 4116 | |
ysr@777 | 4117 | HeapWord* p = r->bottom(); |
ysr@777 | 4118 | HeapWord* t = r->top(); |
ysr@777 | 4119 | guarantee( p == r->next_top_at_mark_start(), "invariant" ); |
ysr@777 | 4120 | while (p < t) { |
ysr@777 | 4121 | oop obj = oop(p); |
ysr@777 | 4122 | p += obj->oop_iterate(oc); |
ysr@777 | 4123 | } |
ysr@777 | 4124 | } |
ysr@777 | 4125 | |
ysr@777 | 4126 | void |
ysr@777 | 4127 | G1CollectedHeap::scan_scan_only_set(OopsInHeapRegionClosure* oc, |
ysr@777 | 4128 | int worker_i) { |
ysr@777 | 4129 | double start = os::elapsedTime(); |
ysr@777 | 4130 | |
ysr@777 | 4131 | BufferingOopsInHeapRegionClosure boc(oc); |
ysr@777 | 4132 | |
ysr@777 | 4133 | FilterInHeapRegionAndIntoCSClosure scan_only(this, &boc); |
ysr@777 | 4134 | FilterAndMarkInHeapRegionAndIntoCSClosure scan_and_mark(this, &boc, concurrent_mark()); |
ysr@777 | 4135 | |
ysr@777 | 4136 | OopsInHeapRegionClosure *foc; |
ysr@777 | 4137 | if (g1_policy()->should_initiate_conc_mark()) |
ysr@777 | 4138 | foc = &scan_and_mark; |
ysr@777 | 4139 | else |
ysr@777 | 4140 | foc = &scan_only; |
ysr@777 | 4141 | |
ysr@777 | 4142 | HeapRegion* hr; |
ysr@777 | 4143 | int n = 0; |
ysr@777 | 4144 | while ((hr = _young_list->par_get_next_scan_only_region()) != NULL) { |
ysr@777 | 4145 | scan_scan_only_region(hr, foc, worker_i); |
ysr@777 | 4146 | ++n; |
ysr@777 | 4147 | } |
ysr@777 | 4148 | boc.done(); |
ysr@777 | 4149 | |
ysr@777 | 4150 | double closure_app_s = boc.closure_app_seconds(); |
ysr@777 | 4151 | g1_policy()->record_obj_copy_time(worker_i, closure_app_s * 1000.0); |
ysr@777 | 4152 | double ms = (os::elapsedTime() - start - closure_app_s)*1000.0; |
ysr@777 | 4153 | g1_policy()->record_scan_only_time(worker_i, ms, n); |
ysr@777 | 4154 | } |
ysr@777 | 4155 | |
ysr@777 | 4156 | void |
ysr@777 | 4157 | G1CollectedHeap::g1_process_weak_roots(OopClosure* root_closure, |
ysr@777 | 4158 | OopClosure* non_root_closure) { |
jrose@1424 | 4159 | CodeBlobToOopClosure roots_in_blobs(root_closure, /*do_marking=*/ false); |
jrose@1424 | 4160 | SharedHeap::process_weak_roots(root_closure, &roots_in_blobs, non_root_closure); |
ysr@777 | 4161 | } |
ysr@777 | 4162 | |
ysr@777 | 4163 | |
ysr@777 | 4164 | class SaveMarksClosure: public HeapRegionClosure { |
ysr@777 | 4165 | public: |
ysr@777 | 4166 | bool doHeapRegion(HeapRegion* r) { |
ysr@777 | 4167 | r->save_marks(); |
ysr@777 | 4168 | return false; |
ysr@777 | 4169 | } |
ysr@777 | 4170 | }; |
ysr@777 | 4171 | |
ysr@777 | 4172 | void G1CollectedHeap::save_marks() { |
ysr@777 | 4173 | if (ParallelGCThreads == 0) { |
ysr@777 | 4174 | SaveMarksClosure sm; |
ysr@777 | 4175 | heap_region_iterate(&sm); |
ysr@777 | 4176 | } |
ysr@777 | 4177 | // We do this even in the parallel case |
ysr@777 | 4178 | perm_gen()->save_marks(); |
ysr@777 | 4179 | } |
ysr@777 | 4180 | |
ysr@777 | 4181 | void G1CollectedHeap::evacuate_collection_set() { |
ysr@777 | 4182 | set_evacuation_failed(false); |
ysr@777 | 4183 | |
ysr@777 | 4184 | g1_rem_set()->prepare_for_oops_into_collection_set_do(); |
ysr@777 | 4185 | concurrent_g1_refine()->set_use_cache(false); |
johnc@1324 | 4186 | concurrent_g1_refine()->clear_hot_cache_claimed_index(); |
johnc@1324 | 4187 | |
ysr@777 | 4188 | int n_workers = (ParallelGCThreads > 0 ? workers()->total_workers() : 1); |
ysr@777 | 4189 | set_par_threads(n_workers); |
ysr@777 | 4190 | G1ParTask g1_par_task(this, n_workers, _task_queues); |
ysr@777 | 4191 | |
ysr@777 | 4192 | init_for_evac_failure(NULL); |
ysr@777 | 4193 | |
ysr@777 | 4194 | rem_set()->prepare_for_younger_refs_iterate(true); |
iveresov@1051 | 4195 | |
iveresov@1051 | 4196 | assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty"); |
ysr@777 | 4197 | double start_par = os::elapsedTime(); |
ysr@777 | 4198 | if (ParallelGCThreads > 0) { |
ysr@777 | 4199 | // The individual threads will set their evac-failure closures. |
jrose@1424 | 4200 | StrongRootsScope srs(this); |
ysr@777 | 4201 | workers()->run_task(&g1_par_task); |
ysr@777 | 4202 | } else { |
jrose@1424 | 4203 | StrongRootsScope srs(this); |
ysr@777 | 4204 | g1_par_task.work(0); |
ysr@777 | 4205 | } |
ysr@777 | 4206 | |
ysr@777 | 4207 | double par_time = (os::elapsedTime() - start_par) * 1000.0; |
ysr@777 | 4208 | g1_policy()->record_par_time(par_time); |
ysr@777 | 4209 | set_par_threads(0); |
ysr@777 | 4210 | // Is this the right thing to do here? We don't save marks |
ysr@777 | 4211 | // on individual heap regions when we allocate from |
ysr@777 | 4212 | // them in parallel, so this seems like the correct place for this. |
apetrusenko@980 | 4213 | retire_all_alloc_regions(); |
ysr@777 | 4214 | { |
ysr@777 | 4215 | G1IsAliveClosure is_alive(this); |
ysr@777 | 4216 | G1KeepAliveClosure keep_alive(this); |
ysr@777 | 4217 | JNIHandles::weak_oops_do(&is_alive, &keep_alive); |
ysr@777 | 4218 | } |
apetrusenko@1375 | 4219 | release_gc_alloc_regions(false /* totally */); |
ysr@777 | 4220 | g1_rem_set()->cleanup_after_oops_into_collection_set_do(); |
iveresov@1051 | 4221 | |
johnc@1324 | 4222 | concurrent_g1_refine()->clear_hot_cache(); |
ysr@777 | 4223 | concurrent_g1_refine()->set_use_cache(true); |
ysr@777 | 4224 | |
ysr@777 | 4225 | finalize_for_evac_failure(); |
ysr@777 | 4226 | |
ysr@777 | 4227 | // Must do this before removing self-forwarding pointers, which clears |
ysr@777 | 4228 | // the per-region evac-failure flags. |
ysr@777 | 4229 | concurrent_mark()->complete_marking_in_collection_set(); |
ysr@777 | 4230 | |
ysr@777 | 4231 | if (evacuation_failed()) { |
ysr@777 | 4232 | remove_self_forwarding_pointers(); |
ysr@777 | 4233 | if (PrintGCDetails) { |
ysr@777 | 4234 | gclog_or_tty->print(" (evacuation failed)"); |
ysr@777 | 4235 | } else if (PrintGC) { |
ysr@777 | 4236 | gclog_or_tty->print("--"); |
ysr@777 | 4237 | } |
ysr@777 | 4238 | } |
ysr@777 | 4239 | |
iveresov@1051 | 4240 | if (G1DeferredRSUpdate) { |
iveresov@1051 | 4241 | RedirtyLoggedCardTableEntryFastClosure redirty; |
iveresov@1051 | 4242 | dirty_card_queue_set().set_closure(&redirty); |
iveresov@1051 | 4243 | dirty_card_queue_set().apply_closure_to_all_completed_buffers(); |
iveresov@1546 | 4244 | |
iveresov@1546 | 4245 | DirtyCardQueueSet& dcq = JavaThread::dirty_card_queue_set(); |
iveresov@1546 | 4246 | dcq.merge_bufferlists(&dirty_card_queue_set()); |
iveresov@1051 | 4247 | assert(dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed"); |
iveresov@1051 | 4248 | } |
ysr@777 | 4249 | COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); |
ysr@777 | 4250 | } |
ysr@777 | 4251 | |
ysr@777 | 4252 | void G1CollectedHeap::free_region(HeapRegion* hr) { |
ysr@777 | 4253 | size_t pre_used = 0; |
ysr@777 | 4254 | size_t cleared_h_regions = 0; |
ysr@777 | 4255 | size_t freed_regions = 0; |
ysr@777 | 4256 | UncleanRegionList local_list; |
ysr@777 | 4257 | |
ysr@777 | 4258 | HeapWord* start = hr->bottom(); |
ysr@777 | 4259 | HeapWord* end = hr->prev_top_at_mark_start(); |
ysr@777 | 4260 | size_t used_bytes = hr->used(); |
ysr@777 | 4261 | size_t live_bytes = hr->max_live_bytes(); |
ysr@777 | 4262 | if (used_bytes > 0) { |
ysr@777 | 4263 | guarantee( live_bytes <= used_bytes, "invariant" ); |
ysr@777 | 4264 | } else { |
ysr@777 | 4265 | guarantee( live_bytes == 0, "invariant" ); |
ysr@777 | 4266 | } |
ysr@777 | 4267 | |
ysr@777 | 4268 | size_t garbage_bytes = used_bytes - live_bytes; |
ysr@777 | 4269 | if (garbage_bytes > 0) |
ysr@777 | 4270 | g1_policy()->decrease_known_garbage_bytes(garbage_bytes); |
ysr@777 | 4271 | |
ysr@777 | 4272 | free_region_work(hr, pre_used, cleared_h_regions, freed_regions, |
ysr@777 | 4273 | &local_list); |
ysr@777 | 4274 | finish_free_region_work(pre_used, cleared_h_regions, freed_regions, |
ysr@777 | 4275 | &local_list); |
ysr@777 | 4276 | } |
ysr@777 | 4277 | |
ysr@777 | 4278 | void |
ysr@777 | 4279 | G1CollectedHeap::free_region_work(HeapRegion* hr, |
ysr@777 | 4280 | size_t& pre_used, |
ysr@777 | 4281 | size_t& cleared_h_regions, |
ysr@777 | 4282 | size_t& freed_regions, |
ysr@777 | 4283 | UncleanRegionList* list, |
ysr@777 | 4284 | bool par) { |
ysr@777 | 4285 | pre_used += hr->used(); |
ysr@777 | 4286 | if (hr->isHumongous()) { |
ysr@777 | 4287 | assert(hr->startsHumongous(), |
ysr@777 | 4288 | "Only the start of a humongous region should be freed."); |
ysr@777 | 4289 | int ind = _hrs->find(hr); |
ysr@777 | 4290 | assert(ind != -1, "Should have an index."); |
ysr@777 | 4291 | // Clear the start region. |
ysr@777 | 4292 | hr->hr_clear(par, true /*clear_space*/); |
ysr@777 | 4293 | list->insert_before_head(hr); |
ysr@777 | 4294 | cleared_h_regions++; |
ysr@777 | 4295 | freed_regions++; |
ysr@777 | 4296 | // Clear any continued regions. |
ysr@777 | 4297 | ind++; |
ysr@777 | 4298 | while ((size_t)ind < n_regions()) { |
ysr@777 | 4299 | HeapRegion* hrc = _hrs->at(ind); |
ysr@777 | 4300 | if (!hrc->continuesHumongous()) break; |
ysr@777 | 4301 | // Otherwise, does continue the H region. |
ysr@777 | 4302 | assert(hrc->humongous_start_region() == hr, "Huh?"); |
ysr@777 | 4303 | hrc->hr_clear(par, true /*clear_space*/); |
ysr@777 | 4304 | cleared_h_regions++; |
ysr@777 | 4305 | freed_regions++; |
ysr@777 | 4306 | list->insert_before_head(hrc); |
ysr@777 | 4307 | ind++; |
ysr@777 | 4308 | } |
ysr@777 | 4309 | } else { |
ysr@777 | 4310 | hr->hr_clear(par, true /*clear_space*/); |
ysr@777 | 4311 | list->insert_before_head(hr); |
ysr@777 | 4312 | freed_regions++; |
ysr@777 | 4313 | // If we're using clear2, this should not be enabled. |
ysr@777 | 4314 | // assert(!hr->in_cohort(), "Can't be both free and in a cohort."); |
ysr@777 | 4315 | } |
ysr@777 | 4316 | } |
ysr@777 | 4317 | |
ysr@777 | 4318 | void G1CollectedHeap::finish_free_region_work(size_t pre_used, |
ysr@777 | 4319 | size_t cleared_h_regions, |
ysr@777 | 4320 | size_t freed_regions, |
ysr@777 | 4321 | UncleanRegionList* list) { |
ysr@777 | 4322 | if (list != NULL && list->sz() > 0) { |
ysr@777 | 4323 | prepend_region_list_on_unclean_list(list); |
ysr@777 | 4324 | } |
ysr@777 | 4325 | // Acquire a lock, if we're parallel, to update possibly-shared |
ysr@777 | 4326 | // variables. |
ysr@777 | 4327 | Mutex* lock = (n_par_threads() > 0) ? ParGCRareEvent_lock : NULL; |
ysr@777 | 4328 | { |
ysr@777 | 4329 | MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag); |
ysr@777 | 4330 | _summary_bytes_used -= pre_used; |
ysr@777 | 4331 | _num_humongous_regions -= (int) cleared_h_regions; |
ysr@777 | 4332 | _free_regions += freed_regions; |
ysr@777 | 4333 | } |
ysr@777 | 4334 | } |
ysr@777 | 4335 | |
ysr@777 | 4336 | |
ysr@777 | 4337 | void G1CollectedHeap::dirtyCardsForYoungRegions(CardTableModRefBS* ct_bs, HeapRegion* list) { |
ysr@777 | 4338 | while (list != NULL) { |
ysr@777 | 4339 | guarantee( list->is_young(), "invariant" ); |
ysr@777 | 4340 | |
ysr@777 | 4341 | HeapWord* bottom = list->bottom(); |
ysr@777 | 4342 | HeapWord* end = list->end(); |
ysr@777 | 4343 | MemRegion mr(bottom, end); |
ysr@777 | 4344 | ct_bs->dirty(mr); |
ysr@777 | 4345 | |
ysr@777 | 4346 | list = list->get_next_young_region(); |
ysr@777 | 4347 | } |
ysr@777 | 4348 | } |
ysr@777 | 4349 | |
apetrusenko@1231 | 4350 | |
apetrusenko@1231 | 4351 | class G1ParCleanupCTTask : public AbstractGangTask { |
apetrusenko@1231 | 4352 | CardTableModRefBS* _ct_bs; |
apetrusenko@1231 | 4353 | G1CollectedHeap* _g1h; |
apetrusenko@1375 | 4354 | HeapRegion* volatile _so_head; |
apetrusenko@1375 | 4355 | HeapRegion* volatile _su_head; |
apetrusenko@1231 | 4356 | public: |
apetrusenko@1231 | 4357 | G1ParCleanupCTTask(CardTableModRefBS* ct_bs, |
apetrusenko@1375 | 4358 | G1CollectedHeap* g1h, |
apetrusenko@1375 | 4359 | HeapRegion* scan_only_list, |
apetrusenko@1375 | 4360 | HeapRegion* survivor_list) : |
apetrusenko@1231 | 4361 | AbstractGangTask("G1 Par Cleanup CT Task"), |
apetrusenko@1231 | 4362 | _ct_bs(ct_bs), |
apetrusenko@1375 | 4363 | _g1h(g1h), |
apetrusenko@1375 | 4364 | _so_head(scan_only_list), |
apetrusenko@1375 | 4365 | _su_head(survivor_list) |
apetrusenko@1231 | 4366 | { } |
apetrusenko@1231 | 4367 | |
apetrusenko@1231 | 4368 | void work(int i) { |
apetrusenko@1231 | 4369 | HeapRegion* r; |
apetrusenko@1231 | 4370 | while (r = _g1h->pop_dirty_cards_region()) { |
apetrusenko@1231 | 4371 | clear_cards(r); |
apetrusenko@1231 | 4372 | } |
apetrusenko@1375 | 4373 | // Redirty the cards of the scan-only and survivor regions. |
apetrusenko@1375 | 4374 | dirty_list(&this->_so_head); |
apetrusenko@1375 | 4375 | dirty_list(&this->_su_head); |
apetrusenko@1375 | 4376 | } |
apetrusenko@1375 | 4377 | |
apetrusenko@1231 | 4378 | void clear_cards(HeapRegion* r) { |
apetrusenko@1231 | 4379 | // Cards for Survivor and Scan-Only regions will be dirtied later. |
apetrusenko@1231 | 4380 | if (!r->is_scan_only() && !r->is_survivor()) { |
apetrusenko@1231 | 4381 | _ct_bs->clear(MemRegion(r->bottom(), r->end())); |
apetrusenko@1231 | 4382 | } |
apetrusenko@1231 | 4383 | } |
apetrusenko@1375 | 4384 | |
apetrusenko@1375 | 4385 | void dirty_list(HeapRegion* volatile * head_ptr) { |
apetrusenko@1375 | 4386 | HeapRegion* head; |
apetrusenko@1375 | 4387 | do { |
apetrusenko@1375 | 4388 | // Pop region off the list. |
apetrusenko@1375 | 4389 | head = *head_ptr; |
apetrusenko@1375 | 4390 | if (head != NULL) { |
apetrusenko@1375 | 4391 | HeapRegion* r = (HeapRegion*) |
apetrusenko@1375 | 4392 | Atomic::cmpxchg_ptr(head->get_next_young_region(), head_ptr, head); |
apetrusenko@1375 | 4393 | if (r == head) { |
apetrusenko@1375 | 4394 | assert(!r->isHumongous(), "Humongous regions shouldn't be on survivor list"); |
apetrusenko@1375 | 4395 | _ct_bs->dirty(MemRegion(r->bottom(), r->end())); |
apetrusenko@1375 | 4396 | } |
apetrusenko@1375 | 4397 | } |
apetrusenko@1375 | 4398 | } while (*head_ptr != NULL); |
apetrusenko@1375 | 4399 | } |
apetrusenko@1231 | 4400 | }; |
apetrusenko@1231 | 4401 | |
apetrusenko@1231 | 4402 | |
apetrusenko@1375 | 4403 | #ifndef PRODUCT |
apetrusenko@1375 | 4404 | class G1VerifyCardTableCleanup: public HeapRegionClosure { |
apetrusenko@1375 | 4405 | CardTableModRefBS* _ct_bs; |
apetrusenko@1375 | 4406 | public: |
apetrusenko@1375 | 4407 | G1VerifyCardTableCleanup(CardTableModRefBS* ct_bs) |
apetrusenko@1375 | 4408 | : _ct_bs(ct_bs) |
apetrusenko@1375 | 4409 | { } |
apetrusenko@1375 | 4410 | virtual bool doHeapRegion(HeapRegion* r) |
apetrusenko@1375 | 4411 | { |
apetrusenko@1375 | 4412 | MemRegion mr(r->bottom(), r->end()); |
apetrusenko@1375 | 4413 | if (r->is_scan_only() || r->is_survivor()) { |
apetrusenko@1375 | 4414 | _ct_bs->verify_dirty_region(mr); |
apetrusenko@1375 | 4415 | } else { |
apetrusenko@1375 | 4416 | _ct_bs->verify_clean_region(mr); |
apetrusenko@1375 | 4417 | } |
apetrusenko@1375 | 4418 | return false; |
apetrusenko@1375 | 4419 | } |
apetrusenko@1375 | 4420 | }; |
apetrusenko@1375 | 4421 | #endif |
apetrusenko@1375 | 4422 | |
ysr@777 | 4423 | void G1CollectedHeap::cleanUpCardTable() { |
ysr@777 | 4424 | CardTableModRefBS* ct_bs = (CardTableModRefBS*) (barrier_set()); |
ysr@777 | 4425 | double start = os::elapsedTime(); |
ysr@777 | 4426 | |
apetrusenko@1231 | 4427 | // Iterate over the dirty cards region list. |
apetrusenko@1375 | 4428 | G1ParCleanupCTTask cleanup_task(ct_bs, this, |
apetrusenko@1375 | 4429 | _young_list->first_scan_only_region(), |
apetrusenko@1375 | 4430 | _young_list->first_survivor_region()); |
apetrusenko@1231 | 4431 | if (ParallelGCThreads > 0) { |
apetrusenko@1231 | 4432 | set_par_threads(workers()->total_workers()); |
apetrusenko@1231 | 4433 | workers()->run_task(&cleanup_task); |
apetrusenko@1231 | 4434 | set_par_threads(0); |
apetrusenko@1231 | 4435 | } else { |
apetrusenko@1231 | 4436 | while (_dirty_cards_region_list) { |
apetrusenko@1231 | 4437 | HeapRegion* r = _dirty_cards_region_list; |
apetrusenko@1231 | 4438 | cleanup_task.clear_cards(r); |
apetrusenko@1231 | 4439 | _dirty_cards_region_list = r->get_next_dirty_cards_region(); |
apetrusenko@1231 | 4440 | if (_dirty_cards_region_list == r) { |
apetrusenko@1231 | 4441 | // The last region. |
apetrusenko@1231 | 4442 | _dirty_cards_region_list = NULL; |
apetrusenko@1231 | 4443 | } |
apetrusenko@1231 | 4444 | r->set_next_dirty_cards_region(NULL); |
apetrusenko@1231 | 4445 | } |
apetrusenko@1375 | 4446 | // now, redirty the cards of the scan-only and survivor regions |
apetrusenko@1375 | 4447 | // (it seemed faster to do it this way, instead of iterating over |
apetrusenko@1375 | 4448 | // all regions and then clearing / dirtying as appropriate) |
apetrusenko@1375 | 4449 | dirtyCardsForYoungRegions(ct_bs, _young_list->first_scan_only_region()); |
apetrusenko@1375 | 4450 | dirtyCardsForYoungRegions(ct_bs, _young_list->first_survivor_region()); |
apetrusenko@1375 | 4451 | } |
ysr@777 | 4452 | double elapsed = os::elapsedTime() - start; |
ysr@777 | 4453 | g1_policy()->record_clear_ct_time( elapsed * 1000.0); |
apetrusenko@1375 | 4454 | #ifndef PRODUCT |
apetrusenko@1375 | 4455 | if (G1VerifyCTCleanup || VerifyAfterGC) { |
apetrusenko@1375 | 4456 | G1VerifyCardTableCleanup cleanup_verifier(ct_bs); |
apetrusenko@1375 | 4457 | heap_region_iterate(&cleanup_verifier); |
apetrusenko@1375 | 4458 | } |
apetrusenko@1375 | 4459 | #endif |
ysr@777 | 4460 | } |
ysr@777 | 4461 | |
ysr@777 | 4462 | void G1CollectedHeap::do_collection_pause_if_appropriate(size_t word_size) { |
ysr@777 | 4463 | if (g1_policy()->should_do_collection_pause(word_size)) { |
ysr@777 | 4464 | do_collection_pause(); |
ysr@777 | 4465 | } |
ysr@777 | 4466 | } |
ysr@777 | 4467 | |
ysr@777 | 4468 | void G1CollectedHeap::free_collection_set(HeapRegion* cs_head) { |
ysr@777 | 4469 | double young_time_ms = 0.0; |
ysr@777 | 4470 | double non_young_time_ms = 0.0; |
ysr@777 | 4471 | |
ysr@777 | 4472 | G1CollectorPolicy* policy = g1_policy(); |
ysr@777 | 4473 | |
ysr@777 | 4474 | double start_sec = os::elapsedTime(); |
ysr@777 | 4475 | bool non_young = true; |
ysr@777 | 4476 | |
ysr@777 | 4477 | HeapRegion* cur = cs_head; |
ysr@777 | 4478 | int age_bound = -1; |
ysr@777 | 4479 | size_t rs_lengths = 0; |
ysr@777 | 4480 | |
ysr@777 | 4481 | while (cur != NULL) { |
ysr@777 | 4482 | if (non_young) { |
ysr@777 | 4483 | if (cur->is_young()) { |
ysr@777 | 4484 | double end_sec = os::elapsedTime(); |
ysr@777 | 4485 | double elapsed_ms = (end_sec - start_sec) * 1000.0; |
ysr@777 | 4486 | non_young_time_ms += elapsed_ms; |
ysr@777 | 4487 | |
ysr@777 | 4488 | start_sec = os::elapsedTime(); |
ysr@777 | 4489 | non_young = false; |
ysr@777 | 4490 | } |
ysr@777 | 4491 | } else { |
ysr@777 | 4492 | if (!cur->is_on_free_list()) { |
ysr@777 | 4493 | double end_sec = os::elapsedTime(); |
ysr@777 | 4494 | double elapsed_ms = (end_sec - start_sec) * 1000.0; |
ysr@777 | 4495 | young_time_ms += elapsed_ms; |
ysr@777 | 4496 | |
ysr@777 | 4497 | start_sec = os::elapsedTime(); |
ysr@777 | 4498 | non_young = true; |
ysr@777 | 4499 | } |
ysr@777 | 4500 | } |
ysr@777 | 4501 | |
ysr@777 | 4502 | rs_lengths += cur->rem_set()->occupied(); |
ysr@777 | 4503 | |
ysr@777 | 4504 | HeapRegion* next = cur->next_in_collection_set(); |
ysr@777 | 4505 | assert(cur->in_collection_set(), "bad CS"); |
ysr@777 | 4506 | cur->set_next_in_collection_set(NULL); |
ysr@777 | 4507 | cur->set_in_collection_set(false); |
ysr@777 | 4508 | |
ysr@777 | 4509 | if (cur->is_young()) { |
ysr@777 | 4510 | int index = cur->young_index_in_cset(); |
ysr@777 | 4511 | guarantee( index != -1, "invariant" ); |
ysr@777 | 4512 | guarantee( (size_t)index < policy->young_cset_length(), "invariant" ); |
ysr@777 | 4513 | size_t words_survived = _surviving_young_words[index]; |
ysr@777 | 4514 | cur->record_surv_words_in_group(words_survived); |
ysr@777 | 4515 | } else { |
ysr@777 | 4516 | int index = cur->young_index_in_cset(); |
ysr@777 | 4517 | guarantee( index == -1, "invariant" ); |
ysr@777 | 4518 | } |
ysr@777 | 4519 | |
ysr@777 | 4520 | assert( (cur->is_young() && cur->young_index_in_cset() > -1) || |
ysr@777 | 4521 | (!cur->is_young() && cur->young_index_in_cset() == -1), |
ysr@777 | 4522 | "invariant" ); |
ysr@777 | 4523 | |
ysr@777 | 4524 | if (!cur->evacuation_failed()) { |
ysr@777 | 4525 | // And the region is empty. |
ysr@777 | 4526 | assert(!cur->is_empty(), |
ysr@777 | 4527 | "Should not have empty regions in a CS."); |
ysr@777 | 4528 | free_region(cur); |
ysr@777 | 4529 | } else { |
ysr@777 | 4530 | guarantee( !cur->is_scan_only(), "should not be scan only" ); |
ysr@777 | 4531 | cur->uninstall_surv_rate_group(); |
ysr@777 | 4532 | if (cur->is_young()) |
ysr@777 | 4533 | cur->set_young_index_in_cset(-1); |
ysr@777 | 4534 | cur->set_not_young(); |
ysr@777 | 4535 | cur->set_evacuation_failed(false); |
ysr@777 | 4536 | } |
ysr@777 | 4537 | cur = next; |
ysr@777 | 4538 | } |
ysr@777 | 4539 | |
ysr@777 | 4540 | policy->record_max_rs_lengths(rs_lengths); |
ysr@777 | 4541 | policy->cset_regions_freed(); |
ysr@777 | 4542 | |
ysr@777 | 4543 | double end_sec = os::elapsedTime(); |
ysr@777 | 4544 | double elapsed_ms = (end_sec - start_sec) * 1000.0; |
ysr@777 | 4545 | if (non_young) |
ysr@777 | 4546 | non_young_time_ms += elapsed_ms; |
ysr@777 | 4547 | else |
ysr@777 | 4548 | young_time_ms += elapsed_ms; |
ysr@777 | 4549 | |
ysr@777 | 4550 | policy->record_young_free_cset_time_ms(young_time_ms); |
ysr@777 | 4551 | policy->record_non_young_free_cset_time_ms(non_young_time_ms); |
ysr@777 | 4552 | } |
ysr@777 | 4553 | |
ysr@777 | 4554 | HeapRegion* |
ysr@777 | 4555 | G1CollectedHeap::alloc_region_from_unclean_list_locked(bool zero_filled) { |
ysr@777 | 4556 | assert(ZF_mon->owned_by_self(), "Precondition"); |
ysr@777 | 4557 | HeapRegion* res = pop_unclean_region_list_locked(); |
ysr@777 | 4558 | if (res != NULL) { |
ysr@777 | 4559 | assert(!res->continuesHumongous() && |
ysr@777 | 4560 | res->zero_fill_state() != HeapRegion::Allocated, |
ysr@777 | 4561 | "Only free regions on unclean list."); |
ysr@777 | 4562 | if (zero_filled) { |
ysr@777 | 4563 | res->ensure_zero_filled_locked(); |
ysr@777 | 4564 | res->set_zero_fill_allocated(); |
ysr@777 | 4565 | } |
ysr@777 | 4566 | } |
ysr@777 | 4567 | return res; |
ysr@777 | 4568 | } |
ysr@777 | 4569 | |
ysr@777 | 4570 | HeapRegion* G1CollectedHeap::alloc_region_from_unclean_list(bool zero_filled) { |
ysr@777 | 4571 | MutexLockerEx zx(ZF_mon, Mutex::_no_safepoint_check_flag); |
ysr@777 | 4572 | return alloc_region_from_unclean_list_locked(zero_filled); |
ysr@777 | 4573 | } |
ysr@777 | 4574 | |
ysr@777 | 4575 | void G1CollectedHeap::put_region_on_unclean_list(HeapRegion* r) { |
ysr@777 | 4576 | MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); |
ysr@777 | 4577 | put_region_on_unclean_list_locked(r); |
ysr@777 | 4578 | if (should_zf()) ZF_mon->notify_all(); // Wake up ZF thread. |
ysr@777 | 4579 | } |
ysr@777 | 4580 | |
ysr@777 | 4581 | void G1CollectedHeap::set_unclean_regions_coming(bool b) { |
ysr@777 | 4582 | MutexLockerEx x(Cleanup_mon); |
ysr@777 | 4583 | set_unclean_regions_coming_locked(b); |
ysr@777 | 4584 | } |
ysr@777 | 4585 | |
ysr@777 | 4586 | void G1CollectedHeap::set_unclean_regions_coming_locked(bool b) { |
ysr@777 | 4587 | assert(Cleanup_mon->owned_by_self(), "Precondition"); |
ysr@777 | 4588 | _unclean_regions_coming = b; |
ysr@777 | 4589 | // Wake up mutator threads that might be waiting for completeCleanup to |
ysr@777 | 4590 | // finish. |
ysr@777 | 4591 | if (!b) Cleanup_mon->notify_all(); |
ysr@777 | 4592 | } |
ysr@777 | 4593 | |
ysr@777 | 4594 | void G1CollectedHeap::wait_for_cleanup_complete() { |
ysr@777 | 4595 | MutexLockerEx x(Cleanup_mon); |
ysr@777 | 4596 | wait_for_cleanup_complete_locked(); |
ysr@777 | 4597 | } |
ysr@777 | 4598 | |
ysr@777 | 4599 | void G1CollectedHeap::wait_for_cleanup_complete_locked() { |
ysr@777 | 4600 | assert(Cleanup_mon->owned_by_self(), "precondition"); |
ysr@777 | 4601 | while (_unclean_regions_coming) { |
ysr@777 | 4602 | Cleanup_mon->wait(); |
ysr@777 | 4603 | } |
ysr@777 | 4604 | } |
ysr@777 | 4605 | |
ysr@777 | 4606 | void |
ysr@777 | 4607 | G1CollectedHeap::put_region_on_unclean_list_locked(HeapRegion* r) { |
ysr@777 | 4608 | assert(ZF_mon->owned_by_self(), "precondition."); |
ysr@777 | 4609 | _unclean_region_list.insert_before_head(r); |
ysr@777 | 4610 | } |
ysr@777 | 4611 | |
ysr@777 | 4612 | void |
ysr@777 | 4613 | G1CollectedHeap::prepend_region_list_on_unclean_list(UncleanRegionList* list) { |
ysr@777 | 4614 | MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); |
ysr@777 | 4615 | prepend_region_list_on_unclean_list_locked(list); |
ysr@777 | 4616 | if (should_zf()) ZF_mon->notify_all(); // Wake up ZF thread. |
ysr@777 | 4617 | } |
ysr@777 | 4618 | |
ysr@777 | 4619 | void |
ysr@777 | 4620 | G1CollectedHeap:: |
ysr@777 | 4621 | prepend_region_list_on_unclean_list_locked(UncleanRegionList* list) { |
ysr@777 | 4622 | assert(ZF_mon->owned_by_self(), "precondition."); |
ysr@777 | 4623 | _unclean_region_list.prepend_list(list); |
ysr@777 | 4624 | } |
ysr@777 | 4625 | |
ysr@777 | 4626 | HeapRegion* G1CollectedHeap::pop_unclean_region_list_locked() { |
ysr@777 | 4627 | assert(ZF_mon->owned_by_self(), "precondition."); |
ysr@777 | 4628 | HeapRegion* res = _unclean_region_list.pop(); |
ysr@777 | 4629 | if (res != NULL) { |
ysr@777 | 4630 | // Inform ZF thread that there's a new unclean head. |
ysr@777 | 4631 | if (_unclean_region_list.hd() != NULL && should_zf()) |
ysr@777 | 4632 | ZF_mon->notify_all(); |
ysr@777 | 4633 | } |
ysr@777 | 4634 | return res; |
ysr@777 | 4635 | } |
ysr@777 | 4636 | |
ysr@777 | 4637 | HeapRegion* G1CollectedHeap::peek_unclean_region_list_locked() { |
ysr@777 | 4638 | assert(ZF_mon->owned_by_self(), "precondition."); |
ysr@777 | 4639 | return _unclean_region_list.hd(); |
ysr@777 | 4640 | } |
ysr@777 | 4641 | |
ysr@777 | 4642 | |
ysr@777 | 4643 | bool G1CollectedHeap::move_cleaned_region_to_free_list_locked() { |
ysr@777 | 4644 | assert(ZF_mon->owned_by_self(), "Precondition"); |
ysr@777 | 4645 | HeapRegion* r = peek_unclean_region_list_locked(); |
ysr@777 | 4646 | if (r != NULL && r->zero_fill_state() == HeapRegion::ZeroFilled) { |
ysr@777 | 4647 | // Result of below must be equal to "r", since we hold the lock. |
ysr@777 | 4648 | (void)pop_unclean_region_list_locked(); |
ysr@777 | 4649 | put_free_region_on_list_locked(r); |
ysr@777 | 4650 | return true; |
ysr@777 | 4651 | } else { |
ysr@777 | 4652 | return false; |
ysr@777 | 4653 | } |
ysr@777 | 4654 | } |
ysr@777 | 4655 | |
ysr@777 | 4656 | bool G1CollectedHeap::move_cleaned_region_to_free_list() { |
ysr@777 | 4657 | MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); |
ysr@777 | 4658 | return move_cleaned_region_to_free_list_locked(); |
ysr@777 | 4659 | } |
ysr@777 | 4660 | |
ysr@777 | 4661 | |
ysr@777 | 4662 | void G1CollectedHeap::put_free_region_on_list_locked(HeapRegion* r) { |
ysr@777 | 4663 | assert(ZF_mon->owned_by_self(), "precondition."); |
ysr@777 | 4664 | assert(_free_region_list_size == free_region_list_length(), "Inv"); |
ysr@777 | 4665 | assert(r->zero_fill_state() == HeapRegion::ZeroFilled, |
ysr@777 | 4666 | "Regions on free list must be zero filled"); |
ysr@777 | 4667 | assert(!r->isHumongous(), "Must not be humongous."); |
ysr@777 | 4668 | assert(r->is_empty(), "Better be empty"); |
ysr@777 | 4669 | assert(!r->is_on_free_list(), |
ysr@777 | 4670 | "Better not already be on free list"); |
ysr@777 | 4671 | assert(!r->is_on_unclean_list(), |
ysr@777 | 4672 | "Better not already be on unclean list"); |
ysr@777 | 4673 | r->set_on_free_list(true); |
ysr@777 | 4674 | r->set_next_on_free_list(_free_region_list); |
ysr@777 | 4675 | _free_region_list = r; |
ysr@777 | 4676 | _free_region_list_size++; |
ysr@777 | 4677 | assert(_free_region_list_size == free_region_list_length(), "Inv"); |
ysr@777 | 4678 | } |
ysr@777 | 4679 | |
ysr@777 | 4680 | void G1CollectedHeap::put_free_region_on_list(HeapRegion* r) { |
ysr@777 | 4681 | MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); |
ysr@777 | 4682 | put_free_region_on_list_locked(r); |
ysr@777 | 4683 | } |
ysr@777 | 4684 | |
ysr@777 | 4685 | HeapRegion* G1CollectedHeap::pop_free_region_list_locked() { |
ysr@777 | 4686 | assert(ZF_mon->owned_by_self(), "precondition."); |
ysr@777 | 4687 | assert(_free_region_list_size == free_region_list_length(), "Inv"); |
ysr@777 | 4688 | HeapRegion* res = _free_region_list; |
ysr@777 | 4689 | if (res != NULL) { |
ysr@777 | 4690 | _free_region_list = res->next_from_free_list(); |
ysr@777 | 4691 | _free_region_list_size--; |
ysr@777 | 4692 | res->set_on_free_list(false); |
ysr@777 | 4693 | res->set_next_on_free_list(NULL); |
ysr@777 | 4694 | assert(_free_region_list_size == free_region_list_length(), "Inv"); |
ysr@777 | 4695 | } |
ysr@777 | 4696 | return res; |
ysr@777 | 4697 | } |
ysr@777 | 4698 | |
ysr@777 | 4699 | |
ysr@777 | 4700 | HeapRegion* G1CollectedHeap::alloc_free_region_from_lists(bool zero_filled) { |
ysr@777 | 4701 | // By self, or on behalf of self. |
ysr@777 | 4702 | assert(Heap_lock->is_locked(), "Precondition"); |
ysr@777 | 4703 | HeapRegion* res = NULL; |
ysr@777 | 4704 | bool first = true; |
ysr@777 | 4705 | while (res == NULL) { |
ysr@777 | 4706 | if (zero_filled || !first) { |
ysr@777 | 4707 | MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); |
ysr@777 | 4708 | res = pop_free_region_list_locked(); |
ysr@777 | 4709 | if (res != NULL) { |
ysr@777 | 4710 | assert(!res->zero_fill_is_allocated(), |
ysr@777 | 4711 | "No allocated regions on free list."); |
ysr@777 | 4712 | res->set_zero_fill_allocated(); |
ysr@777 | 4713 | } else if (!first) { |
ysr@777 | 4714 | break; // We tried both, time to return NULL. |
ysr@777 | 4715 | } |
ysr@777 | 4716 | } |
ysr@777 | 4717 | |
ysr@777 | 4718 | if (res == NULL) { |
ysr@777 | 4719 | res = alloc_region_from_unclean_list(zero_filled); |
ysr@777 | 4720 | } |
ysr@777 | 4721 | assert(res == NULL || |
ysr@777 | 4722 | !zero_filled || |
ysr@777 | 4723 | res->zero_fill_is_allocated(), |
ysr@777 | 4724 | "We must have allocated the region we're returning"); |
ysr@777 | 4725 | first = false; |
ysr@777 | 4726 | } |
ysr@777 | 4727 | return res; |
ysr@777 | 4728 | } |
ysr@777 | 4729 | |
ysr@777 | 4730 | void G1CollectedHeap::remove_allocated_regions_from_lists() { |
ysr@777 | 4731 | MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); |
ysr@777 | 4732 | { |
ysr@777 | 4733 | HeapRegion* prev = NULL; |
ysr@777 | 4734 | HeapRegion* cur = _unclean_region_list.hd(); |
ysr@777 | 4735 | while (cur != NULL) { |
ysr@777 | 4736 | HeapRegion* next = cur->next_from_unclean_list(); |
ysr@777 | 4737 | if (cur->zero_fill_is_allocated()) { |
ysr@777 | 4738 | // Remove from the list. |
ysr@777 | 4739 | if (prev == NULL) { |
ysr@777 | 4740 | (void)_unclean_region_list.pop(); |
ysr@777 | 4741 | } else { |
ysr@777 | 4742 | _unclean_region_list.delete_after(prev); |
ysr@777 | 4743 | } |
ysr@777 | 4744 | cur->set_on_unclean_list(false); |
ysr@777 | 4745 | cur->set_next_on_unclean_list(NULL); |
ysr@777 | 4746 | } else { |
ysr@777 | 4747 | prev = cur; |
ysr@777 | 4748 | } |
ysr@777 | 4749 | cur = next; |
ysr@777 | 4750 | } |
ysr@777 | 4751 | assert(_unclean_region_list.sz() == unclean_region_list_length(), |
ysr@777 | 4752 | "Inv"); |
ysr@777 | 4753 | } |
ysr@777 | 4754 | |
ysr@777 | 4755 | { |
ysr@777 | 4756 | HeapRegion* prev = NULL; |
ysr@777 | 4757 | HeapRegion* cur = _free_region_list; |
ysr@777 | 4758 | while (cur != NULL) { |
ysr@777 | 4759 | HeapRegion* next = cur->next_from_free_list(); |
ysr@777 | 4760 | if (cur->zero_fill_is_allocated()) { |
ysr@777 | 4761 | // Remove from the list. |
ysr@777 | 4762 | if (prev == NULL) { |
ysr@777 | 4763 | _free_region_list = cur->next_from_free_list(); |
ysr@777 | 4764 | } else { |
ysr@777 | 4765 | prev->set_next_on_free_list(cur->next_from_free_list()); |
ysr@777 | 4766 | } |
ysr@777 | 4767 | cur->set_on_free_list(false); |
ysr@777 | 4768 | cur->set_next_on_free_list(NULL); |
ysr@777 | 4769 | _free_region_list_size--; |
ysr@777 | 4770 | } else { |
ysr@777 | 4771 | prev = cur; |
ysr@777 | 4772 | } |
ysr@777 | 4773 | cur = next; |
ysr@777 | 4774 | } |
ysr@777 | 4775 | assert(_free_region_list_size == free_region_list_length(), "Inv"); |
ysr@777 | 4776 | } |
ysr@777 | 4777 | } |
ysr@777 | 4778 | |
ysr@777 | 4779 | bool G1CollectedHeap::verify_region_lists() { |
ysr@777 | 4780 | MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); |
ysr@777 | 4781 | return verify_region_lists_locked(); |
ysr@777 | 4782 | } |
ysr@777 | 4783 | |
ysr@777 | 4784 | bool G1CollectedHeap::verify_region_lists_locked() { |
ysr@777 | 4785 | HeapRegion* unclean = _unclean_region_list.hd(); |
ysr@777 | 4786 | while (unclean != NULL) { |
ysr@777 | 4787 | guarantee(unclean->is_on_unclean_list(), "Well, it is!"); |
ysr@777 | 4788 | guarantee(!unclean->is_on_free_list(), "Well, it shouldn't be!"); |
ysr@777 | 4789 | guarantee(unclean->zero_fill_state() != HeapRegion::Allocated, |
ysr@777 | 4790 | "Everything else is possible."); |
ysr@777 | 4791 | unclean = unclean->next_from_unclean_list(); |
ysr@777 | 4792 | } |
ysr@777 | 4793 | guarantee(_unclean_region_list.sz() == unclean_region_list_length(), "Inv"); |
ysr@777 | 4794 | |
ysr@777 | 4795 | HeapRegion* free_r = _free_region_list; |
ysr@777 | 4796 | while (free_r != NULL) { |
ysr@777 | 4797 | assert(free_r->is_on_free_list(), "Well, it is!"); |
ysr@777 | 4798 | assert(!free_r->is_on_unclean_list(), "Well, it shouldn't be!"); |
ysr@777 | 4799 | switch (free_r->zero_fill_state()) { |
ysr@777 | 4800 | case HeapRegion::NotZeroFilled: |
ysr@777 | 4801 | case HeapRegion::ZeroFilling: |
ysr@777 | 4802 | guarantee(false, "Should not be on free list."); |
ysr@777 | 4803 | break; |
ysr@777 | 4804 | default: |
ysr@777 | 4805 | // Everything else is possible. |
ysr@777 | 4806 | break; |
ysr@777 | 4807 | } |
ysr@777 | 4808 | free_r = free_r->next_from_free_list(); |
ysr@777 | 4809 | } |
ysr@777 | 4810 | guarantee(_free_region_list_size == free_region_list_length(), "Inv"); |
ysr@777 | 4811 | // If we didn't do an assertion... |
ysr@777 | 4812 | return true; |
ysr@777 | 4813 | } |
ysr@777 | 4814 | |
ysr@777 | 4815 | size_t G1CollectedHeap::free_region_list_length() { |
ysr@777 | 4816 | assert(ZF_mon->owned_by_self(), "precondition."); |
ysr@777 | 4817 | size_t len = 0; |
ysr@777 | 4818 | HeapRegion* cur = _free_region_list; |
ysr@777 | 4819 | while (cur != NULL) { |
ysr@777 | 4820 | len++; |
ysr@777 | 4821 | cur = cur->next_from_free_list(); |
ysr@777 | 4822 | } |
ysr@777 | 4823 | return len; |
ysr@777 | 4824 | } |
ysr@777 | 4825 | |
ysr@777 | 4826 | size_t G1CollectedHeap::unclean_region_list_length() { |
ysr@777 | 4827 | assert(ZF_mon->owned_by_self(), "precondition."); |
ysr@777 | 4828 | return _unclean_region_list.length(); |
ysr@777 | 4829 | } |
ysr@777 | 4830 | |
ysr@777 | 4831 | size_t G1CollectedHeap::n_regions() { |
ysr@777 | 4832 | return _hrs->length(); |
ysr@777 | 4833 | } |
ysr@777 | 4834 | |
ysr@777 | 4835 | size_t G1CollectedHeap::max_regions() { |
ysr@777 | 4836 | return |
ysr@777 | 4837 | (size_t)align_size_up(g1_reserved_obj_bytes(), HeapRegion::GrainBytes) / |
ysr@777 | 4838 | HeapRegion::GrainBytes; |
ysr@777 | 4839 | } |
ysr@777 | 4840 | |
ysr@777 | 4841 | size_t G1CollectedHeap::free_regions() { |
ysr@777 | 4842 | /* Possibly-expensive assert. |
ysr@777 | 4843 | assert(_free_regions == count_free_regions(), |
ysr@777 | 4844 | "_free_regions is off."); |
ysr@777 | 4845 | */ |
ysr@777 | 4846 | return _free_regions; |
ysr@777 | 4847 | } |
ysr@777 | 4848 | |
ysr@777 | 4849 | bool G1CollectedHeap::should_zf() { |
ysr@777 | 4850 | return _free_region_list_size < (size_t) G1ConcZFMaxRegions; |
ysr@777 | 4851 | } |
ysr@777 | 4852 | |
ysr@777 | 4853 | class RegionCounter: public HeapRegionClosure { |
ysr@777 | 4854 | size_t _n; |
ysr@777 | 4855 | public: |
ysr@777 | 4856 | RegionCounter() : _n(0) {} |
ysr@777 | 4857 | bool doHeapRegion(HeapRegion* r) { |
apetrusenko@1112 | 4858 | if (r->is_empty()) { |
ysr@777 | 4859 | assert(!r->isHumongous(), "H regions should not be empty."); |
ysr@777 | 4860 | _n++; |
ysr@777 | 4861 | } |
ysr@777 | 4862 | return false; |
ysr@777 | 4863 | } |
ysr@777 | 4864 | int res() { return (int) _n; } |
ysr@777 | 4865 | }; |
ysr@777 | 4866 | |
ysr@777 | 4867 | size_t G1CollectedHeap::count_free_regions() { |
ysr@777 | 4868 | RegionCounter rc; |
ysr@777 | 4869 | heap_region_iterate(&rc); |
ysr@777 | 4870 | size_t n = rc.res(); |
ysr@777 | 4871 | if (_cur_alloc_region != NULL && _cur_alloc_region->is_empty()) |
ysr@777 | 4872 | n--; |
ysr@777 | 4873 | return n; |
ysr@777 | 4874 | } |
ysr@777 | 4875 | |
ysr@777 | 4876 | size_t G1CollectedHeap::count_free_regions_list() { |
ysr@777 | 4877 | size_t n = 0; |
ysr@777 | 4878 | size_t o = 0; |
ysr@777 | 4879 | ZF_mon->lock_without_safepoint_check(); |
ysr@777 | 4880 | HeapRegion* cur = _free_region_list; |
ysr@777 | 4881 | while (cur != NULL) { |
ysr@777 | 4882 | cur = cur->next_from_free_list(); |
ysr@777 | 4883 | n++; |
ysr@777 | 4884 | } |
ysr@777 | 4885 | size_t m = unclean_region_list_length(); |
ysr@777 | 4886 | ZF_mon->unlock(); |
ysr@777 | 4887 | return n + m; |
ysr@777 | 4888 | } |
ysr@777 | 4889 | |
ysr@777 | 4890 | bool G1CollectedHeap::should_set_young_locked() { |
ysr@777 | 4891 | assert(heap_lock_held_for_gc(), |
ysr@777 | 4892 | "the heap lock should already be held by or for this thread"); |
ysr@777 | 4893 | return (g1_policy()->in_young_gc_mode() && |
ysr@777 | 4894 | g1_policy()->should_add_next_region_to_young_list()); |
ysr@777 | 4895 | } |
ysr@777 | 4896 | |
ysr@777 | 4897 | void G1CollectedHeap::set_region_short_lived_locked(HeapRegion* hr) { |
ysr@777 | 4898 | assert(heap_lock_held_for_gc(), |
ysr@777 | 4899 | "the heap lock should already be held by or for this thread"); |
ysr@777 | 4900 | _young_list->push_region(hr); |
ysr@777 | 4901 | g1_policy()->set_region_short_lived(hr); |
ysr@777 | 4902 | } |
ysr@777 | 4903 | |
ysr@777 | 4904 | class NoYoungRegionsClosure: public HeapRegionClosure { |
ysr@777 | 4905 | private: |
ysr@777 | 4906 | bool _success; |
ysr@777 | 4907 | public: |
ysr@777 | 4908 | NoYoungRegionsClosure() : _success(true) { } |
ysr@777 | 4909 | bool doHeapRegion(HeapRegion* r) { |
ysr@777 | 4910 | if (r->is_young()) { |
ysr@777 | 4911 | gclog_or_tty->print_cr("Region ["PTR_FORMAT", "PTR_FORMAT") tagged as young", |
ysr@777 | 4912 | r->bottom(), r->end()); |
ysr@777 | 4913 | _success = false; |
ysr@777 | 4914 | } |
ysr@777 | 4915 | return false; |
ysr@777 | 4916 | } |
ysr@777 | 4917 | bool success() { return _success; } |
ysr@777 | 4918 | }; |
ysr@777 | 4919 | |
ysr@777 | 4920 | bool G1CollectedHeap::check_young_list_empty(bool ignore_scan_only_list, |
ysr@777 | 4921 | bool check_sample) { |
ysr@777 | 4922 | bool ret = true; |
ysr@777 | 4923 | |
ysr@777 | 4924 | ret = _young_list->check_list_empty(ignore_scan_only_list, check_sample); |
ysr@777 | 4925 | if (!ignore_scan_only_list) { |
ysr@777 | 4926 | NoYoungRegionsClosure closure; |
ysr@777 | 4927 | heap_region_iterate(&closure); |
ysr@777 | 4928 | ret = ret && closure.success(); |
ysr@777 | 4929 | } |
ysr@777 | 4930 | |
ysr@777 | 4931 | return ret; |
ysr@777 | 4932 | } |
ysr@777 | 4933 | |
ysr@777 | 4934 | void G1CollectedHeap::empty_young_list() { |
ysr@777 | 4935 | assert(heap_lock_held_for_gc(), |
ysr@777 | 4936 | "the heap lock should already be held by or for this thread"); |
ysr@777 | 4937 | assert(g1_policy()->in_young_gc_mode(), "should be in young GC mode"); |
ysr@777 | 4938 | |
ysr@777 | 4939 | _young_list->empty_list(); |
ysr@777 | 4940 | } |
ysr@777 | 4941 | |
ysr@777 | 4942 | bool G1CollectedHeap::all_alloc_regions_no_allocs_since_save_marks() { |
ysr@777 | 4943 | bool no_allocs = true; |
ysr@777 | 4944 | for (int ap = 0; ap < GCAllocPurposeCount && no_allocs; ++ap) { |
ysr@777 | 4945 | HeapRegion* r = _gc_alloc_regions[ap]; |
ysr@777 | 4946 | no_allocs = r == NULL || r->saved_mark_at_top(); |
ysr@777 | 4947 | } |
ysr@777 | 4948 | return no_allocs; |
ysr@777 | 4949 | } |
ysr@777 | 4950 | |
apetrusenko@980 | 4951 | void G1CollectedHeap::retire_all_alloc_regions() { |
ysr@777 | 4952 | for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { |
ysr@777 | 4953 | HeapRegion* r = _gc_alloc_regions[ap]; |
ysr@777 | 4954 | if (r != NULL) { |
ysr@777 | 4955 | // Check for aliases. |
ysr@777 | 4956 | bool has_processed_alias = false; |
ysr@777 | 4957 | for (int i = 0; i < ap; ++i) { |
ysr@777 | 4958 | if (_gc_alloc_regions[i] == r) { |
ysr@777 | 4959 | has_processed_alias = true; |
ysr@777 | 4960 | break; |
ysr@777 | 4961 | } |
ysr@777 | 4962 | } |
ysr@777 | 4963 | if (!has_processed_alias) { |
apetrusenko@980 | 4964 | retire_alloc_region(r, false /* par */); |
ysr@777 | 4965 | } |
ysr@777 | 4966 | } |
ysr@777 | 4967 | } |
ysr@777 | 4968 | } |
ysr@777 | 4969 | |
ysr@777 | 4970 | |
ysr@777 | 4971 | // Done at the start of full GC. |
ysr@777 | 4972 | void G1CollectedHeap::tear_down_region_lists() { |
ysr@777 | 4973 | MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); |
ysr@777 | 4974 | while (pop_unclean_region_list_locked() != NULL) ; |
ysr@777 | 4975 | assert(_unclean_region_list.hd() == NULL && _unclean_region_list.sz() == 0, |
ysr@777 | 4976 | "Postconditions of loop.") |
ysr@777 | 4977 | while (pop_free_region_list_locked() != NULL) ; |
ysr@777 | 4978 | assert(_free_region_list == NULL, "Postcondition of loop."); |
ysr@777 | 4979 | if (_free_region_list_size != 0) { |
ysr@777 | 4980 | gclog_or_tty->print_cr("Size is %d.", _free_region_list_size); |
tonyp@1273 | 4981 | print_on(gclog_or_tty, true /* extended */); |
ysr@777 | 4982 | } |
ysr@777 | 4983 | assert(_free_region_list_size == 0, "Postconditions of loop."); |
ysr@777 | 4984 | } |
ysr@777 | 4985 | |
ysr@777 | 4986 | |
ysr@777 | 4987 | class RegionResetter: public HeapRegionClosure { |
ysr@777 | 4988 | G1CollectedHeap* _g1; |
ysr@777 | 4989 | int _n; |
ysr@777 | 4990 | public: |
ysr@777 | 4991 | RegionResetter() : _g1(G1CollectedHeap::heap()), _n(0) {} |
ysr@777 | 4992 | bool doHeapRegion(HeapRegion* r) { |
ysr@777 | 4993 | if (r->continuesHumongous()) return false; |
ysr@777 | 4994 | if (r->top() > r->bottom()) { |
ysr@777 | 4995 | if (r->top() < r->end()) { |
ysr@777 | 4996 | Copy::fill_to_words(r->top(), |
ysr@777 | 4997 | pointer_delta(r->end(), r->top())); |
ysr@777 | 4998 | } |
ysr@777 | 4999 | r->set_zero_fill_allocated(); |
ysr@777 | 5000 | } else { |
ysr@777 | 5001 | assert(r->is_empty(), "tautology"); |
apetrusenko@1112 | 5002 | _n++; |
apetrusenko@1112 | 5003 | switch (r->zero_fill_state()) { |
ysr@777 | 5004 | case HeapRegion::NotZeroFilled: |
ysr@777 | 5005 | case HeapRegion::ZeroFilling: |
ysr@777 | 5006 | _g1->put_region_on_unclean_list_locked(r); |
ysr@777 | 5007 | break; |
ysr@777 | 5008 | case HeapRegion::Allocated: |
ysr@777 | 5009 | r->set_zero_fill_complete(); |
ysr@777 | 5010 | // no break; go on to put on free list. |
ysr@777 | 5011 | case HeapRegion::ZeroFilled: |
ysr@777 | 5012 | _g1->put_free_region_on_list_locked(r); |
ysr@777 | 5013 | break; |
ysr@777 | 5014 | } |
ysr@777 | 5015 | } |
ysr@777 | 5016 | return false; |
ysr@777 | 5017 | } |
ysr@777 | 5018 | |
ysr@777 | 5019 | int getFreeRegionCount() {return _n;} |
ysr@777 | 5020 | }; |
ysr@777 | 5021 | |
ysr@777 | 5022 | // Done at the end of full GC. |
ysr@777 | 5023 | void G1CollectedHeap::rebuild_region_lists() { |
ysr@777 | 5024 | MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); |
ysr@777 | 5025 | // This needs to go at the end of the full GC. |
ysr@777 | 5026 | RegionResetter rs; |
ysr@777 | 5027 | heap_region_iterate(&rs); |
ysr@777 | 5028 | _free_regions = rs.getFreeRegionCount(); |
ysr@777 | 5029 | // Tell the ZF thread it may have work to do. |
ysr@777 | 5030 | if (should_zf()) ZF_mon->notify_all(); |
ysr@777 | 5031 | } |
ysr@777 | 5032 | |
ysr@777 | 5033 | class UsedRegionsNeedZeroFillSetter: public HeapRegionClosure { |
ysr@777 | 5034 | G1CollectedHeap* _g1; |
ysr@777 | 5035 | int _n; |
ysr@777 | 5036 | public: |
ysr@777 | 5037 | UsedRegionsNeedZeroFillSetter() : _g1(G1CollectedHeap::heap()), _n(0) {} |
ysr@777 | 5038 | bool doHeapRegion(HeapRegion* r) { |
ysr@777 | 5039 | if (r->continuesHumongous()) return false; |
ysr@777 | 5040 | if (r->top() > r->bottom()) { |
ysr@777 | 5041 | // There are assertions in "set_zero_fill_needed()" below that |
ysr@777 | 5042 | // require top() == bottom(), so this is technically illegal. |
ysr@777 | 5043 | // We'll skirt the law here, by making that true temporarily. |
ysr@777 | 5044 | DEBUG_ONLY(HeapWord* save_top = r->top(); |
ysr@777 | 5045 | r->set_top(r->bottom())); |
ysr@777 | 5046 | r->set_zero_fill_needed(); |
ysr@777 | 5047 | DEBUG_ONLY(r->set_top(save_top)); |
ysr@777 | 5048 | } |
ysr@777 | 5049 | return false; |
ysr@777 | 5050 | } |
ysr@777 | 5051 | }; |
ysr@777 | 5052 | |
ysr@777 | 5053 | // Done at the start of full GC. |
ysr@777 | 5054 | void G1CollectedHeap::set_used_regions_to_need_zero_fill() { |
ysr@777 | 5055 | MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); |
ysr@777 | 5056 | // This needs to go at the end of the full GC. |
ysr@777 | 5057 | UsedRegionsNeedZeroFillSetter rs; |
ysr@777 | 5058 | heap_region_iterate(&rs); |
ysr@777 | 5059 | } |
ysr@777 | 5060 | |
ysr@777 | 5061 | void G1CollectedHeap::set_refine_cte_cl_concurrency(bool concurrent) { |
ysr@777 | 5062 | _refine_cte_cl->set_concurrent(concurrent); |
ysr@777 | 5063 | } |
ysr@777 | 5064 | |
ysr@777 | 5065 | #ifndef PRODUCT |
ysr@777 | 5066 | |
ysr@777 | 5067 | class PrintHeapRegionClosure: public HeapRegionClosure { |
ysr@777 | 5068 | public: |
ysr@777 | 5069 | bool doHeapRegion(HeapRegion *r) { |
ysr@777 | 5070 | gclog_or_tty->print("Region: "PTR_FORMAT":", r); |
ysr@777 | 5071 | if (r != NULL) { |
ysr@777 | 5072 | if (r->is_on_free_list()) |
ysr@777 | 5073 | gclog_or_tty->print("Free "); |
ysr@777 | 5074 | if (r->is_young()) |
ysr@777 | 5075 | gclog_or_tty->print("Young "); |
ysr@777 | 5076 | if (r->isHumongous()) |
ysr@777 | 5077 | gclog_or_tty->print("Is Humongous "); |
ysr@777 | 5078 | r->print(); |
ysr@777 | 5079 | } |
ysr@777 | 5080 | return false; |
ysr@777 | 5081 | } |
ysr@777 | 5082 | }; |
ysr@777 | 5083 | |
ysr@777 | 5084 | class SortHeapRegionClosure : public HeapRegionClosure { |
ysr@777 | 5085 | size_t young_regions,free_regions, unclean_regions; |
ysr@777 | 5086 | size_t hum_regions, count; |
ysr@777 | 5087 | size_t unaccounted, cur_unclean, cur_alloc; |
ysr@777 | 5088 | size_t total_free; |
ysr@777 | 5089 | HeapRegion* cur; |
ysr@777 | 5090 | public: |
ysr@777 | 5091 | SortHeapRegionClosure(HeapRegion *_cur) : cur(_cur), young_regions(0), |
ysr@777 | 5092 | free_regions(0), unclean_regions(0), |
ysr@777 | 5093 | hum_regions(0), |
ysr@777 | 5094 | count(0), unaccounted(0), |
ysr@777 | 5095 | cur_alloc(0), total_free(0) |
ysr@777 | 5096 | {} |
ysr@777 | 5097 | bool doHeapRegion(HeapRegion *r) { |
ysr@777 | 5098 | count++; |
ysr@777 | 5099 | if (r->is_on_free_list()) free_regions++; |
ysr@777 | 5100 | else if (r->is_on_unclean_list()) unclean_regions++; |
ysr@777 | 5101 | else if (r->isHumongous()) hum_regions++; |
ysr@777 | 5102 | else if (r->is_young()) young_regions++; |
ysr@777 | 5103 | else if (r == cur) cur_alloc++; |
ysr@777 | 5104 | else unaccounted++; |
ysr@777 | 5105 | return false; |
ysr@777 | 5106 | } |
ysr@777 | 5107 | void print() { |
ysr@777 | 5108 | total_free = free_regions + unclean_regions; |
ysr@777 | 5109 | gclog_or_tty->print("%d regions\n", count); |
ysr@777 | 5110 | gclog_or_tty->print("%d free: free_list = %d unclean = %d\n", |
ysr@777 | 5111 | total_free, free_regions, unclean_regions); |
ysr@777 | 5112 | gclog_or_tty->print("%d humongous %d young\n", |
ysr@777 | 5113 | hum_regions, young_regions); |
ysr@777 | 5114 | gclog_or_tty->print("%d cur_alloc\n", cur_alloc); |
ysr@777 | 5115 | gclog_or_tty->print("UHOH unaccounted = %d\n", unaccounted); |
ysr@777 | 5116 | } |
ysr@777 | 5117 | }; |
ysr@777 | 5118 | |
ysr@777 | 5119 | void G1CollectedHeap::print_region_counts() { |
ysr@777 | 5120 | SortHeapRegionClosure sc(_cur_alloc_region); |
ysr@777 | 5121 | PrintHeapRegionClosure cl; |
ysr@777 | 5122 | heap_region_iterate(&cl); |
ysr@777 | 5123 | heap_region_iterate(&sc); |
ysr@777 | 5124 | sc.print(); |
ysr@777 | 5125 | print_region_accounting_info(); |
ysr@777 | 5126 | }; |
ysr@777 | 5127 | |
ysr@777 | 5128 | bool G1CollectedHeap::regions_accounted_for() { |
ysr@777 | 5129 | // TODO: regions accounting for young/survivor/tenured |
ysr@777 | 5130 | return true; |
ysr@777 | 5131 | } |
ysr@777 | 5132 | |
ysr@777 | 5133 | bool G1CollectedHeap::print_region_accounting_info() { |
ysr@777 | 5134 | gclog_or_tty->print_cr("Free regions: %d (count: %d count list %d) (clean: %d unclean: %d).", |
ysr@777 | 5135 | free_regions(), |
ysr@777 | 5136 | count_free_regions(), count_free_regions_list(), |
ysr@777 | 5137 | _free_region_list_size, _unclean_region_list.sz()); |
ysr@777 | 5138 | gclog_or_tty->print_cr("cur_alloc: %d.", |
ysr@777 | 5139 | (_cur_alloc_region == NULL ? 0 : 1)); |
ysr@777 | 5140 | gclog_or_tty->print_cr("H regions: %d.", _num_humongous_regions); |
ysr@777 | 5141 | |
ysr@777 | 5142 | // TODO: check regions accounting for young/survivor/tenured |
ysr@777 | 5143 | return true; |
ysr@777 | 5144 | } |
ysr@777 | 5145 | |
ysr@777 | 5146 | bool G1CollectedHeap::is_in_closed_subset(const void* p) const { |
ysr@777 | 5147 | HeapRegion* hr = heap_region_containing(p); |
ysr@777 | 5148 | if (hr == NULL) { |
ysr@777 | 5149 | return is_in_permanent(p); |
ysr@777 | 5150 | } else { |
ysr@777 | 5151 | return hr->is_in(p); |
ysr@777 | 5152 | } |
ysr@777 | 5153 | } |
ysr@1376 | 5154 | #endif // !PRODUCT |
ysr@777 | 5155 | |
ysr@777 | 5156 | void G1CollectedHeap::g1_unimplemented() { |
ysr@777 | 5157 | // Unimplemented(); |
ysr@777 | 5158 | } |