src/share/vm/gc_implementation/g1/heapRegionManager.cpp

Fri, 10 Oct 2014 15:51:58 +0200

author
tschatzl
date
Fri, 10 Oct 2014 15:51:58 +0200
changeset 7257
e7d0505c8a30
parent 7131
d35872270666
child 7535
7ae4e26cb1e0
child 7835
e5406a79ae90
permissions
-rw-r--r--

8059758: Footprint regressions with JDK-8038423
Summary: Changes in JDK-8038423 always initialize (zero out) virtual memory used for auxiliary data structures. This causes a footprint regression for G1 in startup benchmarks. This is because they do not touch that memory at all, so the operating system does not actually commit these pages. The fix is to, if the initialization value of the data structures matches the default value of just committed memory (=0), do not do anything.
Reviewed-by: jwilhelm, brutisso

ysr@777 1 /*
drchase@6680 2 * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
ysr@777 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
ysr@777 4 *
ysr@777 5 * This code is free software; you can redistribute it and/or modify it
ysr@777 6 * under the terms of the GNU General Public License version 2 only, as
ysr@777 7 * published by the Free Software Foundation.
ysr@777 8 *
ysr@777 9 * This code is distributed in the hope that it will be useful, but WITHOUT
ysr@777 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
ysr@777 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
ysr@777 12 * version 2 for more details (a copy is included in the LICENSE file that
ysr@777 13 * accompanied this code).
ysr@777 14 *
ysr@777 15 * You should have received a copy of the GNU General Public License version
ysr@777 16 * 2 along with this work; if not, write to the Free Software Foundation,
ysr@777 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
ysr@777 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
ysr@777 22 *
ysr@777 23 */
ysr@777 24
stefank@2314 25 #include "precompiled.hpp"
tonyp@2963 26 #include "gc_implementation/g1/heapRegion.hpp"
tschatzl@7091 27 #include "gc_implementation/g1/heapRegionManager.inline.hpp"
tschatzl@7050 28 #include "gc_implementation/g1/heapRegionSet.inline.hpp"
stefank@2314 29 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
tschatzl@7050 30 #include "gc_implementation/g1/concurrentG1Refine.hpp"
stefank@2314 31 #include "memory/allocation.hpp"
ysr@777 32
tschatzl@7091 33 void HeapRegionManager::initialize(G1RegionToSpaceMapper* heap_storage,
tschatzl@7051 34 G1RegionToSpaceMapper* prev_bitmap,
tschatzl@7051 35 G1RegionToSpaceMapper* next_bitmap,
tschatzl@7051 36 G1RegionToSpaceMapper* bot,
tschatzl@7051 37 G1RegionToSpaceMapper* cardtable,
tschatzl@7051 38 G1RegionToSpaceMapper* card_counts) {
tschatzl@7050 39 _allocated_heapregions_length = 0;
tschatzl@7050 40
tschatzl@7051 41 _heap_mapper = heap_storage;
tschatzl@7051 42
tschatzl@7051 43 _prev_bitmap_mapper = prev_bitmap;
tschatzl@7051 44 _next_bitmap_mapper = next_bitmap;
tschatzl@7051 45
tschatzl@7051 46 _bot_mapper = bot;
tschatzl@7051 47 _cardtable_mapper = cardtable;
tschatzl@7051 48
tschatzl@7051 49 _card_counts_mapper = card_counts;
tschatzl@7051 50
tschatzl@7051 51 MemRegion reserved = heap_storage->reserved();
tschatzl@7051 52 _regions.initialize(reserved.start(), reserved.end(), HeapRegion::GrainBytes);
tschatzl@7051 53
tschatzl@7051 54 _available_map.resize(_regions.length(), false);
tschatzl@7051 55 _available_map.clear();
tschatzl@7050 56 }
tschatzl@7050 57
tschatzl@7091 58 bool HeapRegionManager::is_available(uint region) const {
tschatzl@7051 59 return _available_map.at(region);
tschatzl@7050 60 }
tschatzl@7050 61
tschatzl@7050 62 #ifdef ASSERT
tschatzl@7091 63 bool HeapRegionManager::is_free(HeapRegion* hr) const {
tschatzl@7050 64 return _free_list.contains(hr);
tschatzl@7050 65 }
tschatzl@7050 66 #endif
tschatzl@7050 67
tschatzl@7091 68 HeapRegion* HeapRegionManager::new_heap_region(uint hrm_index) {
sjohanss@7131 69 G1CollectedHeap* g1h = G1CollectedHeap::heap();
sjohanss@7131 70 HeapWord* bottom = g1h->bottom_addr_for_region(hrm_index);
tschatzl@7050 71 MemRegion mr(bottom, bottom + HeapRegion::GrainWords);
tschatzl@7050 72 assert(reserved().contains(mr), "invariant");
sjohanss@7131 73 return g1h->allocator()->new_heap_region(hrm_index, g1h->bot_shared(), mr);
tschatzl@7050 74 }
tschatzl@7050 75
tschatzl@7091 76 void HeapRegionManager::commit_regions(uint index, size_t num_regions) {
tschatzl@7050 77 guarantee(num_regions > 0, "Must commit more than zero regions");
tschatzl@7050 78 guarantee(_num_committed + num_regions <= max_length(), "Cannot commit more than the maximum amount of regions");
tschatzl@7050 79
tschatzl@7051 80 _num_committed += (uint)num_regions;
tschatzl@7051 81
tschatzl@7051 82 _heap_mapper->commit_regions(index, num_regions);
tschatzl@7051 83
tschatzl@7051 84 // Also commit auxiliary data
tschatzl@7051 85 _prev_bitmap_mapper->commit_regions(index, num_regions);
tschatzl@7051 86 _next_bitmap_mapper->commit_regions(index, num_regions);
tschatzl@7051 87
tschatzl@7051 88 _bot_mapper->commit_regions(index, num_regions);
tschatzl@7051 89 _cardtable_mapper->commit_regions(index, num_regions);
tschatzl@7051 90
tschatzl@7051 91 _card_counts_mapper->commit_regions(index, num_regions);
tschatzl@7050 92 }
tschatzl@7050 93
tschatzl@7091 94 void HeapRegionManager::uncommit_regions(uint start, size_t num_regions) {
tschatzl@7051 95 guarantee(num_regions >= 1, err_msg("Need to specify at least one region to uncommit, tried to uncommit zero regions at %u", start));
tschatzl@7050 96 guarantee(_num_committed >= num_regions, "pre-condition");
tschatzl@7050 97
tschatzl@7050 98 // Print before uncommitting.
tschatzl@7050 99 if (G1CollectedHeap::heap()->hr_printer()->is_active()) {
tschatzl@7050 100 for (uint i = start; i < start + num_regions; i++) {
tschatzl@7050 101 HeapRegion* hr = at(i);
tschatzl@7050 102 G1CollectedHeap::heap()->hr_printer()->uncommit(hr->bottom(), hr->end());
tonyp@2472 103 }
tonyp@2472 104 }
tschatzl@7050 105
tschatzl@7050 106 _num_committed -= (uint)num_regions;
tschatzl@7050 107
tschatzl@7051 108 _available_map.par_clear_range(start, start + num_regions, BitMap::unknown_range);
tschatzl@7051 109 _heap_mapper->uncommit_regions(start, num_regions);
tschatzl@7051 110
tschatzl@7051 111 // Also uncommit auxiliary data
tschatzl@7051 112 _prev_bitmap_mapper->uncommit_regions(start, num_regions);
tschatzl@7051 113 _next_bitmap_mapper->uncommit_regions(start, num_regions);
tschatzl@7051 114
tschatzl@7051 115 _bot_mapper->uncommit_regions(start, num_regions);
tschatzl@7051 116 _cardtable_mapper->uncommit_regions(start, num_regions);
tschatzl@7051 117
tschatzl@7051 118 _card_counts_mapper->uncommit_regions(start, num_regions);
tschatzl@7050 119 }
tschatzl@7050 120
tschatzl@7091 121 void HeapRegionManager::make_regions_available(uint start, uint num_regions) {
tschatzl@7050 122 guarantee(num_regions > 0, "No point in calling this for zero regions");
tschatzl@7050 123 commit_regions(start, num_regions);
tschatzl@7050 124 for (uint i = start; i < start + num_regions; i++) {
tschatzl@7050 125 if (_regions.get_by_index(i) == NULL) {
tschatzl@7050 126 HeapRegion* new_hr = new_heap_region(i);
tschatzl@7050 127 _regions.set_by_index(i, new_hr);
tschatzl@7050 128 _allocated_heapregions_length = MAX2(_allocated_heapregions_length, i + 1);
tonyp@2472 129 }
tschatzl@7050 130 }
tschatzl@7050 131
tschatzl@7051 132 _available_map.par_set_range(start, start + num_regions, BitMap::unknown_range);
tschatzl@7050 133
tschatzl@7050 134 for (uint i = start; i < start + num_regions; i++) {
tschatzl@7050 135 assert(is_available(i), err_msg("Just made region %u available but is apparently not.", i));
tschatzl@7050 136 HeapRegion* hr = at(i);
tschatzl@7050 137 if (G1CollectedHeap::heap()->hr_printer()->is_active()) {
tschatzl@7050 138 G1CollectedHeap::heap()->hr_printer()->commit(hr->bottom(), hr->end());
tschatzl@7050 139 }
tschatzl@7050 140 HeapWord* bottom = G1CollectedHeap::heap()->bottom_addr_for_region(i);
tschatzl@7050 141 MemRegion mr(bottom, bottom + HeapRegion::GrainWords);
tschatzl@7050 142
tschatzl@7050 143 hr->initialize(mr);
tschatzl@7050 144 insert_into_free_list(at(i));
tonyp@2472 145 }
tonyp@2472 146 }
tonyp@2472 147
tschatzl@7091 148 uint HeapRegionManager::expand_by(uint num_regions) {
tschatzl@7051 149 return expand_at(0, num_regions);
tonyp@2963 150 }
tonyp@2963 151
tschatzl@7091 152 uint HeapRegionManager::expand_at(uint start, uint num_regions) {
tschatzl@7050 153 if (num_regions == 0) {
tschatzl@7050 154 return 0;
tschatzl@7050 155 }
tonyp@2963 156
tschatzl@7050 157 uint cur = start;
tschatzl@7050 158 uint idx_last_found = 0;
tschatzl@7050 159 uint num_last_found = 0;
tonyp@2963 160
tschatzl@7050 161 uint expanded = 0;
tonyp@2963 162
tschatzl@7050 163 while (expanded < num_regions &&
tschatzl@7050 164 (num_last_found = find_unavailable_from_idx(cur, &idx_last_found)) > 0) {
tschatzl@7050 165 uint to_expand = MIN2(num_regions - expanded, num_last_found);
tschatzl@7050 166 make_regions_available(idx_last_found, to_expand);
tschatzl@7050 167 expanded += to_expand;
tschatzl@7050 168 cur = idx_last_found + num_last_found + 1;
tschatzl@7050 169 }
tonyp@2963 170
tschatzl@7050 171 verify_optional();
tschatzl@7050 172 return expanded;
tonyp@2963 173 }
tonyp@2963 174
tschatzl@7091 175 uint HeapRegionManager::find_contiguous(size_t num, bool empty_only) {
tschatzl@7050 176 uint found = 0;
tschatzl@7050 177 size_t length_found = 0;
tschatzl@7050 178 uint cur = 0;
tschatzl@7050 179
tschatzl@7050 180 while (length_found < num && cur < max_length()) {
tschatzl@7050 181 HeapRegion* hr = _regions.get_by_index(cur);
tschatzl@7050 182 if ((!empty_only && !is_available(cur)) || (is_available(cur) && hr != NULL && hr->is_empty())) {
tschatzl@7050 183 // This region is a potential candidate for allocation into.
tschatzl@7050 184 length_found++;
tschatzl@7050 185 } else {
tschatzl@7050 186 // This region is not a candidate. The next region is the next possible one.
tschatzl@7050 187 found = cur + 1;
tschatzl@7050 188 length_found = 0;
tonyp@2963 189 }
tschatzl@7050 190 cur++;
tonyp@2472 191 }
tschatzl@7050 192
tschatzl@7050 193 if (length_found == num) {
tschatzl@7050 194 for (uint i = found; i < (found + num); i++) {
tschatzl@7050 195 HeapRegion* hr = _regions.get_by_index(i);
tschatzl@7050 196 // sanity check
tschatzl@7050 197 guarantee((!empty_only && !is_available(i)) || (is_available(i) && hr != NULL && hr->is_empty()),
tschatzl@7050 198 err_msg("Found region sequence starting at " UINT32_FORMAT ", length " SIZE_FORMAT
tschatzl@7050 199 " that is not empty at " UINT32_FORMAT ". Hr is " PTR_FORMAT, found, num, i, p2i(hr)));
tschatzl@7050 200 }
tschatzl@7050 201 return found;
tschatzl@7050 202 } else {
tschatzl@7091 203 return G1_NO_HRM_INDEX;
tschatzl@7050 204 }
ysr@777 205 }
ysr@777 206
tschatzl@7091 207 HeapRegion* HeapRegionManager::next_region_in_heap(const HeapRegion* r) const {
tschatzl@7050 208 guarantee(r != NULL, "Start region must be a valid region");
tschatzl@7091 209 guarantee(is_available(r->hrm_index()), err_msg("Trying to iterate starting from region %u which is not in the heap", r->hrm_index()));
tschatzl@7091 210 for (uint i = r->hrm_index() + 1; i < _allocated_heapregions_length; i++) {
tschatzl@7050 211 HeapRegion* hr = _regions.get_by_index(i);
tschatzl@7050 212 if (is_available(i)) {
tschatzl@7050 213 return hr;
tschatzl@7050 214 }
tonyp@2963 215 }
tschatzl@7050 216 return NULL;
ysr@777 217 }
ysr@777 218
tschatzl@7091 219 void HeapRegionManager::iterate(HeapRegionClosure* blk) const {
tschatzl@7050 220 uint len = max_length();
ysr@777 221
tschatzl@7050 222 for (uint i = 0; i < len; i++) {
tschatzl@7050 223 if (!is_available(i)) {
tschatzl@7050 224 continue;
ysr@777 225 }
tschatzl@7050 226 guarantee(at(i) != NULL, err_msg("Tried to access region %u that has a NULL HeapRegion*", i));
tonyp@2963 227 bool res = blk->doHeapRegion(at(i));
ysr@777 228 if (res) {
ysr@777 229 blk->incomplete();
ysr@777 230 return;
ysr@777 231 }
ysr@777 232 }
ysr@777 233 }
ysr@777 234
tschatzl@7091 235 uint HeapRegionManager::find_unavailable_from_idx(uint start_idx, uint* res_idx) const {
tschatzl@7050 236 guarantee(res_idx != NULL, "checking");
tschatzl@7050 237 guarantee(start_idx <= (max_length() + 1), "checking");
tschatzl@7050 238
tschatzl@7050 239 uint num_regions = 0;
tschatzl@7050 240
tschatzl@7050 241 uint cur = start_idx;
tschatzl@7050 242 while (cur < max_length() && is_available(cur)) {
tschatzl@7050 243 cur++;
tschatzl@7050 244 }
tschatzl@7050 245 if (cur == max_length()) {
tschatzl@7050 246 return num_regions;
tschatzl@7050 247 }
tschatzl@7050 248 *res_idx = cur;
tschatzl@7050 249 while (cur < max_length() && !is_available(cur)) {
tschatzl@7050 250 cur++;
tschatzl@7050 251 }
tschatzl@7050 252 num_regions = cur - *res_idx;
tschatzl@7050 253 #ifdef ASSERT
tschatzl@7050 254 for (uint i = *res_idx; i < (*res_idx + num_regions); i++) {
tschatzl@7050 255 assert(!is_available(i), "just checking");
tschatzl@7050 256 }
tschatzl@7050 257 assert(cur == max_length() || num_regions == 0 || is_available(cur),
tschatzl@7050 258 err_msg("The region at the current position %u must be available or at the end of the heap.", cur));
tschatzl@7050 259 #endif
tschatzl@7050 260 return num_regions;
tschatzl@7050 261 }
tschatzl@7050 262
tschatzl@7091 263 uint HeapRegionManager::start_region_for_worker(uint worker_i, uint num_workers, uint num_regions) const {
tschatzl@7050 264 return num_regions * worker_i / num_workers;
tschatzl@7050 265 }
tschatzl@7050 266
tschatzl@7091 267 void HeapRegionManager::par_iterate(HeapRegionClosure* blk, uint worker_id, uint num_workers, jint claim_value) const {
tschatzl@7050 268 const uint start_index = start_region_for_worker(worker_id, num_workers, _allocated_heapregions_length);
tschatzl@7050 269
tschatzl@7050 270 // Every worker will actually look at all regions, skipping over regions that
tschatzl@7050 271 // are currently not committed.
tschatzl@7050 272 // This also (potentially) iterates over regions newly allocated during GC. This
tschatzl@7050 273 // is no problem except for some extra work.
tschatzl@7050 274 for (uint count = 0; count < _allocated_heapregions_length; count++) {
tschatzl@7050 275 const uint index = (start_index + count) % _allocated_heapregions_length;
tschatzl@7050 276 assert(0 <= index && index < _allocated_heapregions_length, "sanity");
tschatzl@7050 277 // Skip over unavailable regions
tschatzl@7050 278 if (!is_available(index)) {
tschatzl@7050 279 continue;
tschatzl@7050 280 }
tschatzl@7050 281 HeapRegion* r = _regions.get_by_index(index);
tschatzl@7050 282 // We'll ignore "continues humongous" regions (we'll process them
tschatzl@7050 283 // when we come across their corresponding "start humongous"
tschatzl@7050 284 // region) and regions already claimed.
tschatzl@7050 285 if (r->claim_value() == claim_value || r->continuesHumongous()) {
tschatzl@7050 286 continue;
tschatzl@7050 287 }
tschatzl@7050 288 // OK, try to claim it
tschatzl@7050 289 if (!r->claimHeapRegion(claim_value)) {
tschatzl@7050 290 continue;
tschatzl@7050 291 }
tschatzl@7050 292 // Success!
tschatzl@7050 293 if (r->startsHumongous()) {
tschatzl@7050 294 // If the region is "starts humongous" we'll iterate over its
tschatzl@7050 295 // "continues humongous" first; in fact we'll do them
tschatzl@7050 296 // first. The order is important. In one case, calling the
tschatzl@7050 297 // closure on the "starts humongous" region might de-allocate
tschatzl@7050 298 // and clear all its "continues humongous" regions and, as a
tschatzl@7050 299 // result, we might end up processing them twice. So, we'll do
tschatzl@7050 300 // them first (note: most closures will ignore them anyway) and
tschatzl@7050 301 // then we'll do the "starts humongous" region.
tschatzl@7050 302 for (uint ch_index = index + 1; ch_index < index + r->region_num(); ch_index++) {
tschatzl@7050 303 HeapRegion* chr = _regions.get_by_index(ch_index);
tschatzl@7050 304
tschatzl@7050 305 assert(chr->continuesHumongous(), "Must be humongous region");
tschatzl@7050 306 assert(chr->humongous_start_region() == r,
tschatzl@7050 307 err_msg("Must work on humongous continuation of the original start region "
tschatzl@7050 308 PTR_FORMAT ", but is " PTR_FORMAT, p2i(r), p2i(chr)));
tschatzl@7050 309 assert(chr->claim_value() != claim_value,
tschatzl@7050 310 "Must not have been claimed yet because claiming of humongous continuation first claims the start region");
tschatzl@7050 311
tschatzl@7050 312 bool claim_result = chr->claimHeapRegion(claim_value);
tschatzl@7050 313 // We should always be able to claim it; no one else should
tschatzl@7050 314 // be trying to claim this region.
tschatzl@7050 315 guarantee(claim_result, "We should always be able to claim the continuesHumongous part of the humongous object");
tschatzl@7050 316
tschatzl@7050 317 bool res2 = blk->doHeapRegion(chr);
tschatzl@7050 318 if (res2) {
tschatzl@7050 319 return;
tschatzl@7050 320 }
tschatzl@7050 321
tschatzl@7050 322 // Right now, this holds (i.e., no closure that actually
tschatzl@7050 323 // does something with "continues humongous" regions
tschatzl@7050 324 // clears them). We might have to weaken it in the future,
tschatzl@7050 325 // but let's leave these two asserts here for extra safety.
tschatzl@7050 326 assert(chr->continuesHumongous(), "should still be the case");
tschatzl@7050 327 assert(chr->humongous_start_region() == r, "sanity");
tschatzl@7050 328 }
tschatzl@7050 329 }
tschatzl@7050 330
tschatzl@7050 331 bool res = blk->doHeapRegion(r);
tschatzl@7050 332 if (res) {
tschatzl@7050 333 return;
tschatzl@7050 334 }
tschatzl@7050 335 }
tschatzl@7050 336 }
tschatzl@7050 337
tschatzl@7091 338 uint HeapRegionManager::shrink_by(uint num_regions_to_remove) {
tonyp@2963 339 assert(length() > 0, "the region sequence should not be empty");
tschatzl@7050 340 assert(length() <= _allocated_heapregions_length, "invariant");
tschatzl@7050 341 assert(_allocated_heapregions_length > 0, "we should have at least one region committed");
brutisso@5074 342 assert(num_regions_to_remove < length(), "We should never remove all regions");
ysr@777 343
tschatzl@7050 344 if (num_regions_to_remove == 0) {
tschatzl@7050 345 return 0;
tschatzl@7050 346 }
tonyp@2963 347
tschatzl@7050 348 uint removed = 0;
tschatzl@7050 349 uint cur = _allocated_heapregions_length - 1;
tschatzl@7050 350 uint idx_last_found = 0;
tschatzl@7050 351 uint num_last_found = 0;
tschatzl@7050 352
tschatzl@7051 353 while ((removed < num_regions_to_remove) &&
tschatzl@7051 354 (num_last_found = find_empty_from_idx_reverse(cur, &idx_last_found)) > 0) {
tschatzl@7050 355 uint to_remove = MIN2(num_regions_to_remove - removed, num_last_found);
tschatzl@7050 356
tschatzl@7050 357 uncommit_regions(idx_last_found + num_last_found - to_remove, to_remove);
tschatzl@7050 358
tschatzl@7050 359 cur -= num_last_found;
tschatzl@7050 360 removed += to_remove;
ysr@777 361 }
brutisso@5074 362
tschatzl@7050 363 verify_optional();
tschatzl@7050 364
tschatzl@7050 365 return removed;
ysr@777 366 }
ysr@777 367
tschatzl@7091 368 uint HeapRegionManager::find_empty_from_idx_reverse(uint start_idx, uint* res_idx) const {
tschatzl@7050 369 guarantee(start_idx < _allocated_heapregions_length, "checking");
tschatzl@7050 370 guarantee(res_idx != NULL, "checking");
tschatzl@7050 371
tschatzl@7050 372 uint num_regions_found = 0;
tschatzl@7050 373
tschatzl@7050 374 jlong cur = start_idx;
tschatzl@7050 375 while (cur != -1 && !(is_available(cur) && at(cur)->is_empty())) {
tschatzl@7050 376 cur--;
tschatzl@7050 377 }
tschatzl@7050 378 if (cur == -1) {
tschatzl@7050 379 return num_regions_found;
tschatzl@7050 380 }
tschatzl@7050 381 jlong old_cur = cur;
tschatzl@7050 382 // cur indexes the first empty region
tschatzl@7050 383 while (cur != -1 && is_available(cur) && at(cur)->is_empty()) {
tschatzl@7050 384 cur--;
tschatzl@7050 385 }
tschatzl@7050 386 *res_idx = cur + 1;
tschatzl@7050 387 num_regions_found = old_cur - cur;
tschatzl@7050 388
tschatzl@7050 389 #ifdef ASSERT
tschatzl@7050 390 for (uint i = *res_idx; i < (*res_idx + num_regions_found); i++) {
tschatzl@7050 391 assert(at(i)->is_empty(), "just checking");
tschatzl@7050 392 }
tschatzl@7050 393 #endif
tschatzl@7050 394 return num_regions_found;
tschatzl@7050 395 }
tschatzl@7050 396
tschatzl@7091 397 void HeapRegionManager::verify() {
tschatzl@7050 398 guarantee(length() <= _allocated_heapregions_length,
tonyp@3713 399 err_msg("invariant: _length: %u _allocated_length: %u",
tschatzl@7050 400 length(), _allocated_heapregions_length));
tschatzl@7050 401 guarantee(_allocated_heapregions_length <= max_length(),
tonyp@3713 402 err_msg("invariant: _allocated_length: %u _max_length: %u",
tschatzl@7050 403 _allocated_heapregions_length, max_length()));
tonyp@2963 404
tschatzl@7050 405 bool prev_committed = true;
tschatzl@7050 406 uint num_committed = 0;
tschatzl@5773 407 HeapWord* prev_end = heap_bottom();
tschatzl@7050 408 for (uint i = 0; i < _allocated_heapregions_length; i++) {
tschatzl@7050 409 if (!is_available(i)) {
tschatzl@7050 410 prev_committed = false;
tschatzl@7050 411 continue;
tschatzl@7050 412 }
tschatzl@7050 413 num_committed++;
tschatzl@5773 414 HeapRegion* hr = _regions.get_by_index(i);
tonyp@3713 415 guarantee(hr != NULL, err_msg("invariant: i: %u", i));
tschatzl@7050 416 guarantee(!prev_committed || hr->bottom() == prev_end,
tonyp@3713 417 err_msg("invariant i: %u "HR_FORMAT" prev_end: "PTR_FORMAT,
drchase@6680 418 i, HR_FORMAT_PARAMS(hr), p2i(prev_end)));
tschatzl@7091 419 guarantee(hr->hrm_index() == i,
tschatzl@7091 420 err_msg("invariant: i: %u hrm_index(): %u", i, hr->hrm_index()));
tschatzl@7050 421 // Asserts will fire if i is >= _length
tschatzl@7050 422 HeapWord* addr = hr->bottom();
tschatzl@7050 423 guarantee(addr_to_region(addr) == hr, "sanity");
tschatzl@7050 424 // We cannot check whether the region is part of a particular set: at the time
tschatzl@7050 425 // this method may be called, we have only completed allocation of the regions,
tschatzl@7050 426 // but not put into a region set.
tschatzl@7050 427 prev_committed = true;
tonyp@2963 428 if (hr->startsHumongous()) {
tonyp@2963 429 prev_end = hr->orig_end();
tonyp@2963 430 } else {
tonyp@2963 431 prev_end = hr->end();
tonyp@2963 432 }
ysr@777 433 }
tschatzl@7050 434 for (uint i = _allocated_heapregions_length; i < max_length(); i++) {
tschatzl@5773 435 guarantee(_regions.get_by_index(i) == NULL, err_msg("invariant i: %u", i));
tonyp@2963 436 }
tschatzl@7050 437
tschatzl@7050 438 guarantee(num_committed == _num_committed, err_msg("Found %u committed regions, but should be %u", num_committed, _num_committed));
tschatzl@7050 439 _free_list.verify();
tschatzl@7050 440 }
tschatzl@7050 441
tschatzl@7050 442 #ifndef PRODUCT
tschatzl@7091 443 void HeapRegionManager::verify_optional() {
tschatzl@7050 444 verify();
ysr@777 445 }
tonyp@2963 446 #endif // PRODUCT
tschatzl@7050 447

mercurial