Tue, 23 Nov 2010 13:22:55 -0800
6989984: Use standard include model for Hospot
Summary: Replaced MakeDeps and the includeDB files with more standardized solutions.
Reviewed-by: coleenp, kvn, kamg
ysr@777 | 1 | /* |
stefank@2314 | 2 | * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. |
ysr@777 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
ysr@777 | 4 | * |
ysr@777 | 5 | * This code is free software; you can redistribute it and/or modify it |
ysr@777 | 6 | * under the terms of the GNU General Public License version 2 only, as |
ysr@777 | 7 | * published by the Free Software Foundation. |
ysr@777 | 8 | * |
ysr@777 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
ysr@777 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
ysr@777 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
ysr@777 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
ysr@777 | 13 | * accompanied this code). |
ysr@777 | 14 | * |
ysr@777 | 15 | * You should have received a copy of the GNU General Public License version |
ysr@777 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
ysr@777 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
ysr@777 | 18 | * |
trims@1907 | 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
trims@1907 | 20 | * or visit www.oracle.com if you need additional information or have any |
trims@1907 | 21 | * questions. |
ysr@777 | 22 | * |
ysr@777 | 23 | */ |
ysr@777 | 24 | |
stefank@2314 | 25 | #include "precompiled.hpp" |
stefank@2314 | 26 | #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" |
stefank@2314 | 27 | #include "gc_implementation/g1/heapRegionSeq.hpp" |
stefank@2314 | 28 | #include "memory/allocation.hpp" |
ysr@777 | 29 | |
ysr@777 | 30 | // Local to this file. |
ysr@777 | 31 | |
ysr@777 | 32 | static int orderRegions(HeapRegion** hr1p, HeapRegion** hr2p) { |
ysr@777 | 33 | if ((*hr1p)->end() <= (*hr2p)->bottom()) return -1; |
ysr@777 | 34 | else if ((*hr2p)->end() <= (*hr1p)->bottom()) return 1; |
ysr@777 | 35 | else if (*hr1p == *hr2p) return 0; |
ysr@777 | 36 | else { |
ysr@777 | 37 | assert(false, "We should never compare distinct overlapping regions."); |
ysr@777 | 38 | } |
ysr@777 | 39 | return 0; |
ysr@777 | 40 | } |
ysr@777 | 41 | |
iveresov@828 | 42 | HeapRegionSeq::HeapRegionSeq(const size_t max_size) : |
ysr@777 | 43 | _alloc_search_start(0), |
ysr@777 | 44 | // The line below is the worst bit of C++ hackery I've ever written |
ysr@777 | 45 | // (Detlefs, 11/23). You should think of it as equivalent to |
ysr@777 | 46 | // "_regions(100, true)": initialize the growable array and inform it |
kvn@2043 | 47 | // that it should allocate its elem array(s) on the C heap. |
kvn@2043 | 48 | // |
kvn@2043 | 49 | // The first argument, however, is actually a comma expression |
kvn@2043 | 50 | // (set_allocation_type(this, C_HEAP), 100). The purpose of the |
kvn@2043 | 51 | // set_allocation_type() call is to replace the default allocation |
kvn@2043 | 52 | // type for embedded objects STACK_OR_EMBEDDED with C_HEAP. It will |
kvn@2043 | 53 | // allow to pass the assert in GenericGrowableArray() which checks |
kvn@2043 | 54 | // that a growable array object must be on C heap if elements are. |
kvn@2043 | 55 | // |
kvn@2043 | 56 | // Note: containing object is allocated on C heap since it is CHeapObj. |
kvn@2043 | 57 | // |
kvn@2043 | 58 | _regions((ResourceObj::set_allocation_type((address)&_regions, |
kvn@2043 | 59 | ResourceObj::C_HEAP), |
iveresov@828 | 60 | (int)max_size), |
ysr@777 | 61 | true), |
ysr@777 | 62 | _next_rr_candidate(0), |
ysr@777 | 63 | _seq_bottom(NULL) |
ysr@777 | 64 | {} |
ysr@777 | 65 | |
ysr@777 | 66 | // Private methods. |
ysr@777 | 67 | |
ysr@777 | 68 | HeapWord* |
ysr@777 | 69 | HeapRegionSeq::alloc_obj_from_region_index(int ind, size_t word_size) { |
ysr@777 | 70 | assert(G1CollectedHeap::isHumongous(word_size), |
ysr@777 | 71 | "Allocation size should be humongous"); |
ysr@777 | 72 | int cur = ind; |
ysr@777 | 73 | int first = cur; |
ysr@777 | 74 | size_t sumSizes = 0; |
ysr@777 | 75 | while (cur < _regions.length() && sumSizes < word_size) { |
ysr@777 | 76 | // Loop invariant: |
ysr@777 | 77 | // For all i in [first, cur): |
ysr@777 | 78 | // _regions.at(i)->is_empty() |
ysr@777 | 79 | // && _regions.at(i) is contiguous with its predecessor, if any |
ysr@777 | 80 | // && sumSizes is the sum of the sizes of the regions in the interval |
ysr@777 | 81 | // [first, cur) |
ysr@777 | 82 | HeapRegion* curhr = _regions.at(cur); |
ysr@777 | 83 | if (curhr->is_empty() |
ysr@777 | 84 | && (first == cur |
ysr@777 | 85 | || (_regions.at(cur-1)->end() == |
ysr@777 | 86 | curhr->bottom()))) { |
ysr@777 | 87 | sumSizes += curhr->capacity() / HeapWordSize; |
ysr@777 | 88 | } else { |
ysr@777 | 89 | first = cur + 1; |
ysr@777 | 90 | sumSizes = 0; |
ysr@777 | 91 | } |
ysr@777 | 92 | cur++; |
ysr@777 | 93 | } |
ysr@777 | 94 | if (sumSizes >= word_size) { |
ysr@777 | 95 | _alloc_search_start = cur; |
tonyp@2241 | 96 | |
tonyp@2241 | 97 | // We need to initialize the region(s) we just discovered. This is |
tonyp@2241 | 98 | // a bit tricky given that it can happen concurrently with |
tonyp@2241 | 99 | // refinement threads refining cards on these regions and |
tonyp@2241 | 100 | // potentially wanting to refine the BOT as they are scanning |
tonyp@2241 | 101 | // those cards (this can happen shortly after a cleanup; see CR |
tonyp@2241 | 102 | // 6991377). So we have to set up the region(s) carefully and in |
tonyp@2241 | 103 | // a specific order. |
tonyp@2241 | 104 | |
tonyp@2241 | 105 | // Currently, allocs_are_zero_filled() returns false. The zero |
tonyp@2241 | 106 | // filling infrastructure will be going away soon (see CR 6977804). |
tonyp@2241 | 107 | // So no need to do anything else here. |
ysr@777 | 108 | bool zf = G1CollectedHeap::heap()->allocs_are_zero_filled(); |
tonyp@2241 | 109 | assert(!zf, "not supported"); |
tonyp@2241 | 110 | |
tonyp@2241 | 111 | // This will be the "starts humongous" region. |
ysr@777 | 112 | HeapRegion* first_hr = _regions.at(first); |
tonyp@2241 | 113 | { |
tonyp@2241 | 114 | MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); |
tonyp@2241 | 115 | first_hr->set_zero_fill_allocated(); |
tonyp@2241 | 116 | } |
tonyp@2241 | 117 | // The header of the new object will be placed at the bottom of |
tonyp@2241 | 118 | // the first region. |
tonyp@2241 | 119 | HeapWord* new_obj = first_hr->bottom(); |
tonyp@2241 | 120 | // This will be the new end of the first region in the series that |
tonyp@2241 | 121 | // should also match the end of the last region in the seriers. |
tonyp@2241 | 122 | // (Note: sumSizes = "region size" x "number of regions we found"). |
tonyp@2241 | 123 | HeapWord* new_end = new_obj + sumSizes; |
tonyp@2241 | 124 | // This will be the new top of the first region that will reflect |
tonyp@2241 | 125 | // this allocation. |
tonyp@2241 | 126 | HeapWord* new_top = new_obj + word_size; |
tonyp@2241 | 127 | |
tonyp@2241 | 128 | // First, we need to zero the header of the space that we will be |
tonyp@2241 | 129 | // allocating. When we update top further down, some refinement |
tonyp@2241 | 130 | // threads might try to scan the region. By zeroing the header we |
tonyp@2241 | 131 | // ensure that any thread that will try to scan the region will |
tonyp@2241 | 132 | // come across the zero klass word and bail out. |
tonyp@2241 | 133 | // |
tonyp@2241 | 134 | // NOTE: It would not have been correct to have used |
tonyp@2241 | 135 | // CollectedHeap::fill_with_object() and make the space look like |
tonyp@2241 | 136 | // an int array. The thread that is doing the allocation will |
tonyp@2241 | 137 | // later update the object header to a potentially different array |
tonyp@2241 | 138 | // type and, for a very short period of time, the klass and length |
tonyp@2241 | 139 | // fields will be inconsistent. This could cause a refinement |
tonyp@2241 | 140 | // thread to calculate the object size incorrectly. |
tonyp@2241 | 141 | Copy::fill_to_words(new_obj, oopDesc::header_size(), 0); |
tonyp@2241 | 142 | |
tonyp@2241 | 143 | // We will set up the first region as "starts humongous". This |
tonyp@2241 | 144 | // will also update the BOT covering all the regions to reflect |
tonyp@2241 | 145 | // that there is a single object that starts at the bottom of the |
tonyp@2241 | 146 | // first region. |
tonyp@2241 | 147 | first_hr->set_startsHumongous(new_end); |
tonyp@2241 | 148 | |
tonyp@2241 | 149 | // Then, if there are any, we will set up the "continues |
tonyp@2241 | 150 | // humongous" regions. |
tonyp@2241 | 151 | HeapRegion* hr = NULL; |
tonyp@2241 | 152 | for (int i = first + 1; i < cur; ++i) { |
tonyp@2241 | 153 | hr = _regions.at(i); |
ysr@777 | 154 | { |
ysr@777 | 155 | MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); |
ysr@777 | 156 | hr->set_zero_fill_allocated(); |
ysr@777 | 157 | } |
tonyp@2241 | 158 | hr->set_continuesHumongous(first_hr); |
tonyp@2241 | 159 | } |
tonyp@2241 | 160 | // If we have "continues humongous" regions (hr != NULL), then the |
tonyp@2241 | 161 | // end of the last one should match new_end. |
tonyp@2241 | 162 | assert(hr == NULL || hr->end() == new_end, "sanity"); |
tonyp@2241 | 163 | |
tonyp@2241 | 164 | // Up to this point no concurrent thread would have been able to |
tonyp@2241 | 165 | // do any scanning on any region in this series. All the top |
tonyp@2241 | 166 | // fields still point to bottom, so the intersection between |
tonyp@2241 | 167 | // [bottom,top] and [card_start,card_end] will be empty. Before we |
tonyp@2241 | 168 | // update the top fields, we'll do a storestore to make sure that |
tonyp@2241 | 169 | // no thread sees the update to top before the zeroing of the |
tonyp@2241 | 170 | // object header and the BOT initialization. |
tonyp@2241 | 171 | OrderAccess::storestore(); |
tonyp@2241 | 172 | |
tonyp@2241 | 173 | // Now that the BOT and the object header have been initialized, |
tonyp@2241 | 174 | // we can update top of the "starts humongous" region. |
tonyp@2241 | 175 | assert(first_hr->bottom() < new_top && new_top <= first_hr->end(), |
tonyp@2241 | 176 | "new_top should be in this region"); |
tonyp@2241 | 177 | first_hr->set_top(new_top); |
tonyp@2241 | 178 | |
tonyp@2241 | 179 | // Now, we will update the top fields of the "continues humongous" |
tonyp@2241 | 180 | // regions. The reason we need to do this is that, otherwise, |
tonyp@2241 | 181 | // these regions would look empty and this will confuse parts of |
tonyp@2241 | 182 | // G1. For example, the code that looks for a consecutive number |
tonyp@2241 | 183 | // of empty regions will consider them empty and try to |
tonyp@2241 | 184 | // re-allocate them. We can extend is_empty() to also include |
tonyp@2241 | 185 | // !continuesHumongous(), but it is easier to just update the top |
tonyp@2241 | 186 | // fields here. |
tonyp@2241 | 187 | hr = NULL; |
tonyp@2241 | 188 | for (int i = first + 1; i < cur; ++i) { |
tonyp@2241 | 189 | hr = _regions.at(i); |
tonyp@2241 | 190 | if ((i + 1) == cur) { |
tonyp@2241 | 191 | // last continues humongous region |
tonyp@2241 | 192 | assert(hr->bottom() < new_top && new_top <= hr->end(), |
tonyp@2241 | 193 | "new_top should fall on this region"); |
tonyp@2241 | 194 | hr->set_top(new_top); |
ysr@777 | 195 | } else { |
tonyp@2241 | 196 | // not last one |
tonyp@2241 | 197 | assert(new_top > hr->end(), "new_top should be above this region"); |
tonyp@2241 | 198 | hr->set_top(hr->end()); |
ysr@777 | 199 | } |
ysr@777 | 200 | } |
tonyp@2241 | 201 | // If we have continues humongous regions (hr != NULL), then the |
tonyp@2241 | 202 | // end of the last one should match new_end and its top should |
tonyp@2241 | 203 | // match new_top. |
tonyp@2241 | 204 | assert(hr == NULL || |
tonyp@2241 | 205 | (hr->end() == new_end && hr->top() == new_top), "sanity"); |
tonyp@2241 | 206 | |
tonyp@2241 | 207 | return new_obj; |
ysr@777 | 208 | } else { |
ysr@777 | 209 | // If we started from the beginning, we want to know why we can't alloc. |
ysr@777 | 210 | return NULL; |
ysr@777 | 211 | } |
ysr@777 | 212 | } |
ysr@777 | 213 | |
apetrusenko@1112 | 214 | void HeapRegionSeq::print_empty_runs() { |
ysr@777 | 215 | int empty_run = 0; |
ysr@777 | 216 | int n_empty = 0; |
ysr@777 | 217 | int empty_run_start; |
ysr@777 | 218 | for (int i = 0; i < _regions.length(); i++) { |
ysr@777 | 219 | HeapRegion* r = _regions.at(i); |
ysr@777 | 220 | if (r->continuesHumongous()) continue; |
apetrusenko@1112 | 221 | if (r->is_empty()) { |
ysr@777 | 222 | assert(!r->isHumongous(), "H regions should not be empty."); |
ysr@777 | 223 | if (empty_run == 0) empty_run_start = i; |
ysr@777 | 224 | empty_run++; |
ysr@777 | 225 | n_empty++; |
ysr@777 | 226 | } else { |
ysr@777 | 227 | if (empty_run > 0) { |
ysr@777 | 228 | gclog_or_tty->print(" %d:%d", empty_run_start, empty_run); |
ysr@777 | 229 | empty_run = 0; |
ysr@777 | 230 | } |
ysr@777 | 231 | } |
ysr@777 | 232 | } |
ysr@777 | 233 | if (empty_run > 0) { |
ysr@777 | 234 | gclog_or_tty->print(" %d:%d", empty_run_start, empty_run); |
ysr@777 | 235 | } |
ysr@777 | 236 | gclog_or_tty->print_cr(" [tot = %d]", n_empty); |
ysr@777 | 237 | } |
ysr@777 | 238 | |
ysr@777 | 239 | int HeapRegionSeq::find(HeapRegion* hr) { |
ysr@777 | 240 | // FIXME: optimized for adjacent regions of fixed size. |
ysr@777 | 241 | int ind = hr->hrs_index(); |
ysr@777 | 242 | if (ind != -1) { |
ysr@777 | 243 | assert(_regions.at(ind) == hr, "Mismatch"); |
ysr@777 | 244 | } |
ysr@777 | 245 | return ind; |
ysr@777 | 246 | } |
ysr@777 | 247 | |
ysr@777 | 248 | |
ysr@777 | 249 | // Public methods. |
ysr@777 | 250 | |
ysr@777 | 251 | void HeapRegionSeq::insert(HeapRegion* hr) { |
iveresov@828 | 252 | assert(!_regions.is_full(), "Too many elements in HeapRegionSeq"); |
ysr@777 | 253 | if (_regions.length() == 0 |
ysr@777 | 254 | || _regions.top()->end() <= hr->bottom()) { |
ysr@777 | 255 | hr->set_hrs_index(_regions.length()); |
ysr@777 | 256 | _regions.append(hr); |
ysr@777 | 257 | } else { |
ysr@777 | 258 | _regions.append(hr); |
ysr@777 | 259 | _regions.sort(orderRegions); |
ysr@777 | 260 | for (int i = 0; i < _regions.length(); i++) { |
ysr@777 | 261 | _regions.at(i)->set_hrs_index(i); |
ysr@777 | 262 | } |
ysr@777 | 263 | } |
ysr@777 | 264 | char* bot = (char*)_regions.at(0)->bottom(); |
ysr@777 | 265 | if (_seq_bottom == NULL || bot < _seq_bottom) _seq_bottom = bot; |
ysr@777 | 266 | } |
ysr@777 | 267 | |
ysr@777 | 268 | size_t HeapRegionSeq::length() { |
ysr@777 | 269 | return _regions.length(); |
ysr@777 | 270 | } |
ysr@777 | 271 | |
ysr@777 | 272 | size_t HeapRegionSeq::free_suffix() { |
ysr@777 | 273 | size_t res = 0; |
ysr@777 | 274 | int first = _regions.length() - 1; |
ysr@777 | 275 | int cur = first; |
ysr@777 | 276 | while (cur >= 0 && |
ysr@777 | 277 | (_regions.at(cur)->is_empty() |
ysr@777 | 278 | && (first == cur |
ysr@777 | 279 | || (_regions.at(cur+1)->bottom() == |
ysr@777 | 280 | _regions.at(cur)->end())))) { |
ysr@777 | 281 | res++; |
ysr@777 | 282 | cur--; |
ysr@777 | 283 | } |
ysr@777 | 284 | return res; |
ysr@777 | 285 | } |
ysr@777 | 286 | |
ysr@777 | 287 | HeapWord* HeapRegionSeq::obj_allocate(size_t word_size) { |
ysr@777 | 288 | int cur = _alloc_search_start; |
ysr@777 | 289 | // Make sure "cur" is a valid index. |
ysr@777 | 290 | assert(cur >= 0, "Invariant."); |
ysr@777 | 291 | HeapWord* res = alloc_obj_from_region_index(cur, word_size); |
ysr@777 | 292 | if (res == NULL) |
ysr@777 | 293 | res = alloc_obj_from_region_index(0, word_size); |
ysr@777 | 294 | return res; |
ysr@777 | 295 | } |
ysr@777 | 296 | |
ysr@777 | 297 | void HeapRegionSeq::iterate(HeapRegionClosure* blk) { |
ysr@777 | 298 | iterate_from((HeapRegion*)NULL, blk); |
ysr@777 | 299 | } |
ysr@777 | 300 | |
ysr@777 | 301 | // The first argument r is the heap region at which iteration begins. |
ysr@777 | 302 | // This operation runs fastest when r is NULL, or the heap region for |
ysr@777 | 303 | // which a HeapRegionClosure most recently returned true, or the |
ysr@777 | 304 | // heap region immediately to its right in the sequence. In all |
ysr@777 | 305 | // other cases a linear search is required to find the index of r. |
ysr@777 | 306 | |
ysr@777 | 307 | void HeapRegionSeq::iterate_from(HeapRegion* r, HeapRegionClosure* blk) { |
ysr@777 | 308 | |
ysr@777 | 309 | // :::: FIXME :::: |
ysr@777 | 310 | // Static cache value is bad, especially when we start doing parallel |
ysr@777 | 311 | // remembered set update. For now just don't cache anything (the |
ysr@777 | 312 | // code in the def'd out blocks). |
ysr@777 | 313 | |
ysr@777 | 314 | #if 0 |
ysr@777 | 315 | static int cached_j = 0; |
ysr@777 | 316 | #endif |
ysr@777 | 317 | int len = _regions.length(); |
ysr@777 | 318 | int j = 0; |
ysr@777 | 319 | // Find the index of r. |
ysr@777 | 320 | if (r != NULL) { |
ysr@777 | 321 | #if 0 |
ysr@777 | 322 | assert(cached_j >= 0, "Invariant."); |
ysr@777 | 323 | if ((cached_j < len) && (r == _regions.at(cached_j))) { |
ysr@777 | 324 | j = cached_j; |
ysr@777 | 325 | } else if ((cached_j + 1 < len) && (r == _regions.at(cached_j + 1))) { |
ysr@777 | 326 | j = cached_j + 1; |
ysr@777 | 327 | } else { |
ysr@777 | 328 | j = find(r); |
ysr@777 | 329 | #endif |
ysr@777 | 330 | if (j < 0) { |
ysr@777 | 331 | j = 0; |
ysr@777 | 332 | } |
ysr@777 | 333 | #if 0 |
ysr@777 | 334 | } |
ysr@777 | 335 | #endif |
ysr@777 | 336 | } |
ysr@777 | 337 | int i; |
ysr@777 | 338 | for (i = j; i < len; i += 1) { |
ysr@777 | 339 | int res = blk->doHeapRegion(_regions.at(i)); |
ysr@777 | 340 | if (res) { |
ysr@777 | 341 | #if 0 |
ysr@777 | 342 | cached_j = i; |
ysr@777 | 343 | #endif |
ysr@777 | 344 | blk->incomplete(); |
ysr@777 | 345 | return; |
ysr@777 | 346 | } |
ysr@777 | 347 | } |
ysr@777 | 348 | for (i = 0; i < j; i += 1) { |
ysr@777 | 349 | int res = blk->doHeapRegion(_regions.at(i)); |
ysr@777 | 350 | if (res) { |
ysr@777 | 351 | #if 0 |
ysr@777 | 352 | cached_j = i; |
ysr@777 | 353 | #endif |
ysr@777 | 354 | blk->incomplete(); |
ysr@777 | 355 | return; |
ysr@777 | 356 | } |
ysr@777 | 357 | } |
ysr@777 | 358 | } |
ysr@777 | 359 | |
ysr@777 | 360 | void HeapRegionSeq::iterate_from(int idx, HeapRegionClosure* blk) { |
ysr@777 | 361 | int len = _regions.length(); |
ysr@777 | 362 | int i; |
ysr@777 | 363 | for (i = idx; i < len; i++) { |
ysr@777 | 364 | if (blk->doHeapRegion(_regions.at(i))) { |
ysr@777 | 365 | blk->incomplete(); |
ysr@777 | 366 | return; |
ysr@777 | 367 | } |
ysr@777 | 368 | } |
ysr@777 | 369 | for (i = 0; i < idx; i++) { |
ysr@777 | 370 | if (blk->doHeapRegion(_regions.at(i))) { |
ysr@777 | 371 | blk->incomplete(); |
ysr@777 | 372 | return; |
ysr@777 | 373 | } |
ysr@777 | 374 | } |
ysr@777 | 375 | } |
ysr@777 | 376 | |
ysr@777 | 377 | MemRegion HeapRegionSeq::shrink_by(size_t shrink_bytes, |
ysr@777 | 378 | size_t& num_regions_deleted) { |
ysr@777 | 379 | assert(shrink_bytes % os::vm_page_size() == 0, "unaligned"); |
ysr@777 | 380 | assert(shrink_bytes % HeapRegion::GrainBytes == 0, "unaligned"); |
ysr@777 | 381 | |
ysr@777 | 382 | if (_regions.length() == 0) { |
ysr@777 | 383 | num_regions_deleted = 0; |
ysr@777 | 384 | return MemRegion(); |
ysr@777 | 385 | } |
ysr@777 | 386 | int j = _regions.length() - 1; |
ysr@777 | 387 | HeapWord* end = _regions.at(j)->end(); |
ysr@777 | 388 | HeapWord* last_start = end; |
ysr@777 | 389 | while (j >= 0 && shrink_bytes > 0) { |
ysr@777 | 390 | HeapRegion* cur = _regions.at(j); |
ysr@777 | 391 | // We have to leave humongous regions where they are, |
ysr@777 | 392 | // and work around them. |
ysr@777 | 393 | if (cur->isHumongous()) { |
ysr@777 | 394 | return MemRegion(last_start, end); |
ysr@777 | 395 | } |
ysr@777 | 396 | assert(cur == _regions.top(), "Should be top"); |
ysr@777 | 397 | if (!cur->is_empty()) break; |
ysr@1395 | 398 | cur->reset_zero_fill(); |
ysr@777 | 399 | shrink_bytes -= cur->capacity(); |
ysr@777 | 400 | num_regions_deleted++; |
ysr@777 | 401 | _regions.pop(); |
ysr@777 | 402 | last_start = cur->bottom(); |
ysr@777 | 403 | // We need to delete these somehow, but can't currently do so here: if |
ysr@777 | 404 | // we do, the ZF thread may still access the deleted region. We'll |
ysr@777 | 405 | // leave this here as a reminder that we have to do something about |
ysr@777 | 406 | // this. |
ysr@777 | 407 | // delete cur; |
ysr@777 | 408 | j--; |
ysr@777 | 409 | } |
ysr@777 | 410 | return MemRegion(last_start, end); |
ysr@777 | 411 | } |
ysr@777 | 412 | |
ysr@777 | 413 | |
ysr@777 | 414 | class PrintHeapRegionClosure : public HeapRegionClosure { |
ysr@777 | 415 | public: |
ysr@777 | 416 | bool doHeapRegion(HeapRegion* r) { |
ysr@777 | 417 | gclog_or_tty->print(PTR_FORMAT ":", r); |
ysr@777 | 418 | r->print(); |
ysr@777 | 419 | return false; |
ysr@777 | 420 | } |
ysr@777 | 421 | }; |
ysr@777 | 422 | |
ysr@777 | 423 | void HeapRegionSeq::print() { |
ysr@777 | 424 | PrintHeapRegionClosure cl; |
ysr@777 | 425 | iterate(&cl); |
ysr@777 | 426 | } |