Tue, 08 Aug 2017 15:57:29 +0800
merge
aoqi@0 | 1 | /* |
aoqi@0 | 2 | * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved. |
aoqi@0 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
aoqi@0 | 4 | * |
aoqi@0 | 5 | * This code is free software; you can redistribute it and/or modify it |
aoqi@0 | 6 | * under the terms of the GNU General Public License version 2 only, as |
aoqi@0 | 7 | * published by the Free Software Foundation. |
aoqi@0 | 8 | * |
aoqi@0 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
aoqi@0 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
aoqi@0 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
aoqi@0 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
aoqi@0 | 13 | * accompanied this code). |
aoqi@0 | 14 | * |
aoqi@0 | 15 | * You should have received a copy of the GNU General Public License version |
aoqi@0 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
aoqi@0 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
aoqi@0 | 18 | * |
aoqi@0 | 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
aoqi@0 | 20 | * or visit www.oracle.com if you need additional information or have any |
aoqi@0 | 21 | * questions. |
aoqi@0 | 22 | * |
aoqi@0 | 23 | */ |
aoqi@0 | 24 | |
aoqi@0 | 25 | #include "precompiled.hpp" |
aoqi@0 | 26 | #include "gc_implementation/shared/parGCAllocBuffer.hpp" |
aoqi@0 | 27 | #include "memory/sharedHeap.hpp" |
aoqi@0 | 28 | #include "oops/arrayOop.hpp" |
aoqi@0 | 29 | #include "oops/oop.inline.hpp" |
aoqi@0 | 30 | |
aoqi@0 | 31 | PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC |
aoqi@0 | 32 | |
aoqi@0 | 33 | ParGCAllocBuffer::ParGCAllocBuffer(size_t desired_plab_sz_) : |
aoqi@0 | 34 | _word_sz(desired_plab_sz_), _bottom(NULL), _top(NULL), |
aoqi@0 | 35 | _end(NULL), _hard_end(NULL), |
aoqi@0 | 36 | _retained(false), _retained_filler(), |
aoqi@0 | 37 | _allocated(0), _wasted(0) |
aoqi@0 | 38 | { |
aoqi@0 | 39 | assert (min_size() > AlignmentReserve, "Inconsistency!"); |
aoqi@0 | 40 | // arrayOopDesc::header_size depends on command line initialization. |
aoqi@0 | 41 | FillerHeaderSize = align_object_size(arrayOopDesc::header_size(T_INT)); |
aoqi@0 | 42 | AlignmentReserve = oopDesc::header_size() > MinObjAlignment ? FillerHeaderSize : 0; |
aoqi@0 | 43 | } |
aoqi@0 | 44 | |
aoqi@0 | 45 | size_t ParGCAllocBuffer::FillerHeaderSize; |
aoqi@0 | 46 | |
aoqi@0 | 47 | // If the minimum object size is greater than MinObjAlignment, we can |
aoqi@0 | 48 | // end up with a shard at the end of the buffer that's smaller than |
aoqi@0 | 49 | // the smallest object. We can't allow that because the buffer must |
aoqi@0 | 50 | // look like it's full of objects when we retire it, so we make |
aoqi@0 | 51 | // sure we have enough space for a filler int array object. |
aoqi@0 | 52 | size_t ParGCAllocBuffer::AlignmentReserve; |
aoqi@0 | 53 | |
aoqi@0 | 54 | void ParGCAllocBuffer::retire(bool end_of_gc, bool retain) { |
aoqi@0 | 55 | assert(!retain || end_of_gc, "Can only retain at GC end."); |
aoqi@0 | 56 | if (_retained) { |
aoqi@0 | 57 | // If the buffer had been retained shorten the previous filler object. |
aoqi@0 | 58 | assert(_retained_filler.end() <= _top, "INVARIANT"); |
aoqi@0 | 59 | CollectedHeap::fill_with_object(_retained_filler); |
aoqi@0 | 60 | // Wasted space book-keeping, otherwise (normally) done in invalidate() |
aoqi@0 | 61 | _wasted += _retained_filler.word_size(); |
aoqi@0 | 62 | _retained = false; |
aoqi@0 | 63 | } |
aoqi@0 | 64 | assert(!end_of_gc || !_retained, "At this point, end_of_gc ==> !_retained."); |
aoqi@0 | 65 | if (_top < _hard_end) { |
aoqi@0 | 66 | CollectedHeap::fill_with_object(_top, _hard_end); |
aoqi@0 | 67 | if (!retain) { |
aoqi@0 | 68 | invalidate(); |
aoqi@0 | 69 | } else { |
aoqi@0 | 70 | // Is there wasted space we'd like to retain for the next GC? |
aoqi@0 | 71 | if (pointer_delta(_end, _top) > FillerHeaderSize) { |
aoqi@0 | 72 | _retained = true; |
aoqi@0 | 73 | _retained_filler = MemRegion(_top, FillerHeaderSize); |
aoqi@0 | 74 | _top = _top + FillerHeaderSize; |
aoqi@0 | 75 | } else { |
aoqi@0 | 76 | invalidate(); |
aoqi@0 | 77 | } |
aoqi@0 | 78 | } |
aoqi@0 | 79 | } |
aoqi@0 | 80 | } |
aoqi@0 | 81 | |
aoqi@0 | 82 | void ParGCAllocBuffer::flush_stats(PLABStats* stats) { |
aoqi@0 | 83 | assert(ResizePLAB, "Wasted work"); |
aoqi@0 | 84 | stats->add_allocated(_allocated); |
aoqi@0 | 85 | stats->add_wasted(_wasted); |
aoqi@0 | 86 | stats->add_unused(pointer_delta(_end, _top)); |
aoqi@0 | 87 | } |
aoqi@0 | 88 | |
aoqi@0 | 89 | // Compute desired plab size and latch result for later |
aoqi@0 | 90 | // use. This should be called once at the end of parallel |
aoqi@0 | 91 | // scavenge; it clears the sensor accumulators. |
aoqi@0 | 92 | void PLABStats::adjust_desired_plab_sz(uint no_of_gc_workers) { |
aoqi@0 | 93 | assert(ResizePLAB, "Not set"); |
aoqi@0 | 94 | |
aoqi@0 | 95 | assert(is_object_aligned(max_size()) && min_size() <= max_size(), |
aoqi@0 | 96 | "PLAB clipping computation may be incorrect"); |
aoqi@0 | 97 | |
aoqi@0 | 98 | if (_allocated == 0) { |
aoqi@0 | 99 | assert(_unused == 0, |
aoqi@0 | 100 | err_msg("Inconsistency in PLAB stats: " |
aoqi@0 | 101 | "_allocated: "SIZE_FORMAT", " |
aoqi@0 | 102 | "_wasted: "SIZE_FORMAT", " |
aoqi@0 | 103 | "_unused: "SIZE_FORMAT", " |
aoqi@0 | 104 | "_used : "SIZE_FORMAT, |
aoqi@0 | 105 | _allocated, _wasted, _unused, _used)); |
aoqi@0 | 106 | |
aoqi@0 | 107 | _allocated = 1; |
aoqi@0 | 108 | } |
aoqi@0 | 109 | double wasted_frac = (double)_unused/(double)_allocated; |
aoqi@0 | 110 | size_t target_refills = (size_t)((wasted_frac*TargetSurvivorRatio)/ |
aoqi@0 | 111 | TargetPLABWastePct); |
aoqi@0 | 112 | if (target_refills == 0) { |
aoqi@0 | 113 | target_refills = 1; |
aoqi@0 | 114 | } |
aoqi@0 | 115 | _used = _allocated - _wasted - _unused; |
aoqi@0 | 116 | size_t plab_sz = _used/(target_refills*no_of_gc_workers); |
aoqi@0 | 117 | if (PrintPLAB) gclog_or_tty->print(" (plab_sz = %d ", plab_sz); |
aoqi@0 | 118 | // Take historical weighted average |
aoqi@0 | 119 | _filter.sample(plab_sz); |
aoqi@0 | 120 | // Clip from above and below, and align to object boundary |
aoqi@0 | 121 | plab_sz = MAX2(min_size(), (size_t)_filter.average()); |
aoqi@0 | 122 | plab_sz = MIN2(max_size(), plab_sz); |
aoqi@0 | 123 | plab_sz = align_object_size(plab_sz); |
aoqi@0 | 124 | // Latch the result |
aoqi@0 | 125 | if (PrintPLAB) gclog_or_tty->print(" desired_plab_sz = %d) ", plab_sz); |
aoqi@0 | 126 | _desired_plab_sz = plab_sz; |
aoqi@0 | 127 | // Now clear the accumulators for next round: |
aoqi@0 | 128 | // note this needs to be fixed in the case where we |
aoqi@0 | 129 | // are retaining across scavenges. FIX ME !!! XXX |
aoqi@0 | 130 | _allocated = 0; |
aoqi@0 | 131 | _wasted = 0; |
aoqi@0 | 132 | _unused = 0; |
aoqi@0 | 133 | } |
aoqi@0 | 134 | |
aoqi@0 | 135 | #ifndef PRODUCT |
aoqi@0 | 136 | void ParGCAllocBuffer::print() { |
aoqi@0 | 137 | gclog_or_tty->print("parGCAllocBuffer: _bottom: %p _top: %p _end: %p _hard_end: %p" |
aoqi@0 | 138 | "_retained: %c _retained_filler: [%p,%p)\n", |
aoqi@0 | 139 | _bottom, _top, _end, _hard_end, |
aoqi@0 | 140 | "FT"[_retained], _retained_filler.start(), _retained_filler.end()); |
aoqi@0 | 141 | } |
aoqi@0 | 142 | #endif // !PRODUCT |
aoqi@0 | 143 | |
aoqi@0 | 144 | const size_t ParGCAllocBufferWithBOT::ChunkSizeInWords = |
aoqi@0 | 145 | MIN2(CardTableModRefBS::par_chunk_heapword_alignment(), |
aoqi@0 | 146 | ((size_t)Generation::GenGrain)/HeapWordSize); |
aoqi@0 | 147 | const size_t ParGCAllocBufferWithBOT::ChunkSizeInBytes = |
aoqi@0 | 148 | MIN2(CardTableModRefBS::par_chunk_heapword_alignment() * HeapWordSize, |
aoqi@0 | 149 | (size_t)Generation::GenGrain); |
aoqi@0 | 150 | |
aoqi@0 | 151 | ParGCAllocBufferWithBOT::ParGCAllocBufferWithBOT(size_t word_sz, |
aoqi@0 | 152 | BlockOffsetSharedArray* bsa) : |
aoqi@0 | 153 | ParGCAllocBuffer(word_sz), |
aoqi@0 | 154 | _bsa(bsa), |
aoqi@0 | 155 | _bt(bsa, MemRegion(_bottom, _hard_end)), |
aoqi@0 | 156 | _true_end(_hard_end) |
aoqi@0 | 157 | {} |
aoqi@0 | 158 | |
aoqi@0 | 159 | // The buffer comes with its own BOT, with a shared (obviously) underlying |
aoqi@0 | 160 | // BlockOffsetSharedArray. We manipulate this BOT in the normal way |
aoqi@0 | 161 | // as we would for any contiguous space. However, on accasion we |
aoqi@0 | 162 | // need to do some buffer surgery at the extremities before we |
aoqi@0 | 163 | // start using the body of the buffer for allocations. Such surgery |
aoqi@0 | 164 | // (as explained elsewhere) is to prevent allocation on a card that |
aoqi@0 | 165 | // is in the process of being walked concurrently by another GC thread. |
aoqi@0 | 166 | // When such surgery happens at a point that is far removed (to the |
aoqi@0 | 167 | // right of the current allocation point, top), we use the "contig" |
aoqi@0 | 168 | // parameter below to directly manipulate the shared array without |
aoqi@0 | 169 | // modifying the _next_threshold state in the BOT. |
aoqi@0 | 170 | void ParGCAllocBufferWithBOT::fill_region_with_block(MemRegion mr, |
aoqi@0 | 171 | bool contig) { |
aoqi@0 | 172 | CollectedHeap::fill_with_object(mr); |
aoqi@0 | 173 | if (contig) { |
aoqi@0 | 174 | _bt.alloc_block(mr.start(), mr.end()); |
aoqi@0 | 175 | } else { |
aoqi@0 | 176 | _bt.BlockOffsetArray::alloc_block(mr.start(), mr.end()); |
aoqi@0 | 177 | } |
aoqi@0 | 178 | } |
aoqi@0 | 179 | |
aoqi@0 | 180 | HeapWord* ParGCAllocBufferWithBOT::allocate_slow(size_t word_sz) { |
aoqi@0 | 181 | HeapWord* res = NULL; |
aoqi@0 | 182 | if (_true_end > _hard_end) { |
aoqi@0 | 183 | assert((HeapWord*)align_size_down(intptr_t(_hard_end), |
aoqi@0 | 184 | ChunkSizeInBytes) == _hard_end, |
aoqi@0 | 185 | "or else _true_end should be equal to _hard_end"); |
aoqi@0 | 186 | assert(_retained, "or else _true_end should be equal to _hard_end"); |
aoqi@0 | 187 | assert(_retained_filler.end() <= _top, "INVARIANT"); |
aoqi@0 | 188 | CollectedHeap::fill_with_object(_retained_filler); |
aoqi@0 | 189 | if (_top < _hard_end) { |
aoqi@0 | 190 | fill_region_with_block(MemRegion(_top, _hard_end), true); |
aoqi@0 | 191 | } |
aoqi@0 | 192 | HeapWord* next_hard_end = MIN2(_true_end, _hard_end + ChunkSizeInWords); |
aoqi@0 | 193 | _retained_filler = MemRegion(_hard_end, FillerHeaderSize); |
aoqi@0 | 194 | _bt.alloc_block(_retained_filler.start(), _retained_filler.word_size()); |
aoqi@0 | 195 | _top = _retained_filler.end(); |
aoqi@0 | 196 | _hard_end = next_hard_end; |
aoqi@0 | 197 | _end = _hard_end - AlignmentReserve; |
aoqi@0 | 198 | res = ParGCAllocBuffer::allocate(word_sz); |
aoqi@0 | 199 | if (res != NULL) { |
aoqi@0 | 200 | _bt.alloc_block(res, word_sz); |
aoqi@0 | 201 | } |
aoqi@0 | 202 | } |
aoqi@0 | 203 | return res; |
aoqi@0 | 204 | } |
aoqi@0 | 205 | |
aoqi@0 | 206 | void |
aoqi@0 | 207 | ParGCAllocBufferWithBOT::undo_allocation(HeapWord* obj, size_t word_sz) { |
aoqi@0 | 208 | ParGCAllocBuffer::undo_allocation(obj, word_sz); |
aoqi@0 | 209 | // This may back us up beyond the previous threshold, so reset. |
aoqi@0 | 210 | _bt.set_region(MemRegion(_top, _hard_end)); |
aoqi@0 | 211 | _bt.initialize_threshold(); |
aoqi@0 | 212 | } |
aoqi@0 | 213 | |
aoqi@0 | 214 | void ParGCAllocBufferWithBOT::retire(bool end_of_gc, bool retain) { |
aoqi@0 | 215 | assert(!retain || end_of_gc, "Can only retain at GC end."); |
aoqi@0 | 216 | if (_retained) { |
aoqi@0 | 217 | // We're about to make the retained_filler into a block. |
aoqi@0 | 218 | _bt.BlockOffsetArray::alloc_block(_retained_filler.start(), |
aoqi@0 | 219 | _retained_filler.end()); |
aoqi@0 | 220 | } |
aoqi@0 | 221 | // Reset _hard_end to _true_end (and update _end) |
aoqi@0 | 222 | if (retain && _hard_end != NULL) { |
aoqi@0 | 223 | assert(_hard_end <= _true_end, "Invariant."); |
aoqi@0 | 224 | _hard_end = _true_end; |
aoqi@0 | 225 | _end = MAX2(_top, _hard_end - AlignmentReserve); |
aoqi@0 | 226 | assert(_end <= _hard_end, "Invariant."); |
aoqi@0 | 227 | } |
aoqi@0 | 228 | _true_end = _hard_end; |
aoqi@0 | 229 | HeapWord* pre_top = _top; |
aoqi@0 | 230 | |
aoqi@0 | 231 | ParGCAllocBuffer::retire(end_of_gc, retain); |
aoqi@0 | 232 | // Now any old _retained_filler is cut back to size, the free part is |
aoqi@0 | 233 | // filled with a filler object, and top is past the header of that |
aoqi@0 | 234 | // object. |
aoqi@0 | 235 | |
aoqi@0 | 236 | if (retain && _top < _end) { |
aoqi@0 | 237 | assert(end_of_gc && retain, "Or else retain should be false."); |
aoqi@0 | 238 | // If the lab does not start on a card boundary, we don't want to |
aoqi@0 | 239 | // allocate onto that card, since that might lead to concurrent |
aoqi@0 | 240 | // allocation and card scanning, which we don't support. So we fill |
aoqi@0 | 241 | // the first card with a garbage object. |
aoqi@0 | 242 | size_t first_card_index = _bsa->index_for(pre_top); |
aoqi@0 | 243 | HeapWord* first_card_start = _bsa->address_for_index(first_card_index); |
aoqi@0 | 244 | if (first_card_start < pre_top) { |
aoqi@0 | 245 | HeapWord* second_card_start = |
aoqi@0 | 246 | _bsa->inc_by_region_size(first_card_start); |
aoqi@0 | 247 | |
aoqi@0 | 248 | // Ensure enough room to fill with the smallest block |
aoqi@0 | 249 | second_card_start = MAX2(second_card_start, pre_top + AlignmentReserve); |
aoqi@0 | 250 | |
aoqi@0 | 251 | // If the end is already in the first card, don't go beyond it! |
aoqi@0 | 252 | // Or if the remainder is too small for a filler object, gobble it up. |
aoqi@0 | 253 | if (_hard_end < second_card_start || |
aoqi@0 | 254 | pointer_delta(_hard_end, second_card_start) < AlignmentReserve) { |
aoqi@0 | 255 | second_card_start = _hard_end; |
aoqi@0 | 256 | } |
aoqi@0 | 257 | if (pre_top < second_card_start) { |
aoqi@0 | 258 | MemRegion first_card_suffix(pre_top, second_card_start); |
aoqi@0 | 259 | fill_region_with_block(first_card_suffix, true); |
aoqi@0 | 260 | } |
aoqi@0 | 261 | pre_top = second_card_start; |
aoqi@0 | 262 | _top = pre_top; |
aoqi@0 | 263 | _end = MAX2(_top, _hard_end - AlignmentReserve); |
aoqi@0 | 264 | } |
aoqi@0 | 265 | |
aoqi@0 | 266 | // If the lab does not end on a card boundary, we don't want to |
aoqi@0 | 267 | // allocate onto that card, since that might lead to concurrent |
aoqi@0 | 268 | // allocation and card scanning, which we don't support. So we fill |
aoqi@0 | 269 | // the last card with a garbage object. |
aoqi@0 | 270 | size_t last_card_index = _bsa->index_for(_hard_end); |
aoqi@0 | 271 | HeapWord* last_card_start = _bsa->address_for_index(last_card_index); |
aoqi@0 | 272 | if (last_card_start < _hard_end) { |
aoqi@0 | 273 | |
aoqi@0 | 274 | // Ensure enough room to fill with the smallest block |
aoqi@0 | 275 | last_card_start = MIN2(last_card_start, _hard_end - AlignmentReserve); |
aoqi@0 | 276 | |
aoqi@0 | 277 | // If the top is already in the last card, don't go back beyond it! |
aoqi@0 | 278 | // Or if the remainder is too small for a filler object, gobble it up. |
aoqi@0 | 279 | if (_top > last_card_start || |
aoqi@0 | 280 | pointer_delta(last_card_start, _top) < AlignmentReserve) { |
aoqi@0 | 281 | last_card_start = _top; |
aoqi@0 | 282 | } |
aoqi@0 | 283 | if (last_card_start < _hard_end) { |
aoqi@0 | 284 | MemRegion last_card_prefix(last_card_start, _hard_end); |
aoqi@0 | 285 | fill_region_with_block(last_card_prefix, false); |
aoqi@0 | 286 | } |
aoqi@0 | 287 | _hard_end = last_card_start; |
aoqi@0 | 288 | _end = MAX2(_top, _hard_end - AlignmentReserve); |
aoqi@0 | 289 | _true_end = _hard_end; |
aoqi@0 | 290 | assert(_end <= _hard_end, "Invariant."); |
aoqi@0 | 291 | } |
aoqi@0 | 292 | |
aoqi@0 | 293 | // At this point: |
aoqi@0 | 294 | // 1) we had a filler object from the original top to hard_end. |
aoqi@0 | 295 | // 2) We've filled in any partial cards at the front and back. |
aoqi@0 | 296 | if (pre_top < _hard_end) { |
aoqi@0 | 297 | // Now we can reset the _bt to do allocation in the given area. |
aoqi@0 | 298 | MemRegion new_filler(pre_top, _hard_end); |
aoqi@0 | 299 | fill_region_with_block(new_filler, false); |
aoqi@0 | 300 | _top = pre_top + ParGCAllocBuffer::FillerHeaderSize; |
aoqi@0 | 301 | // If there's no space left, don't retain. |
aoqi@0 | 302 | if (_top >= _end) { |
aoqi@0 | 303 | _retained = false; |
aoqi@0 | 304 | invalidate(); |
aoqi@0 | 305 | return; |
aoqi@0 | 306 | } |
aoqi@0 | 307 | _retained_filler = MemRegion(pre_top, _top); |
aoqi@0 | 308 | _bt.set_region(MemRegion(_top, _hard_end)); |
aoqi@0 | 309 | _bt.initialize_threshold(); |
aoqi@0 | 310 | assert(_bt.threshold() > _top, "initialize_threshold failed!"); |
aoqi@0 | 311 | |
aoqi@0 | 312 | // There may be other reasons for queries into the middle of the |
aoqi@0 | 313 | // filler object. When such queries are done in parallel with |
aoqi@0 | 314 | // allocation, bad things can happen, if the query involves object |
aoqi@0 | 315 | // iteration. So we ensure that such queries do not involve object |
aoqi@0 | 316 | // iteration, by putting another filler object on the boundaries of |
aoqi@0 | 317 | // such queries. One such is the object spanning a parallel card |
aoqi@0 | 318 | // chunk boundary. |
aoqi@0 | 319 | |
aoqi@0 | 320 | // "chunk_boundary" is the address of the first chunk boundary less |
aoqi@0 | 321 | // than "hard_end". |
aoqi@0 | 322 | HeapWord* chunk_boundary = |
aoqi@0 | 323 | (HeapWord*)align_size_down(intptr_t(_hard_end-1), ChunkSizeInBytes); |
aoqi@0 | 324 | assert(chunk_boundary < _hard_end, "Or else above did not work."); |
aoqi@0 | 325 | assert(pointer_delta(_true_end, chunk_boundary) >= AlignmentReserve, |
aoqi@0 | 326 | "Consequence of last card handling above."); |
aoqi@0 | 327 | |
aoqi@0 | 328 | if (_top <= chunk_boundary) { |
aoqi@0 | 329 | assert(_true_end == _hard_end, "Invariant."); |
aoqi@0 | 330 | while (_top <= chunk_boundary) { |
aoqi@0 | 331 | assert(pointer_delta(_hard_end, chunk_boundary) >= AlignmentReserve, |
aoqi@0 | 332 | "Consequence of last card handling above."); |
aoqi@0 | 333 | _bt.BlockOffsetArray::alloc_block(chunk_boundary, _hard_end); |
aoqi@0 | 334 | CollectedHeap::fill_with_object(chunk_boundary, _hard_end); |
aoqi@0 | 335 | _hard_end = chunk_boundary; |
aoqi@0 | 336 | chunk_boundary -= ChunkSizeInWords; |
aoqi@0 | 337 | } |
aoqi@0 | 338 | _end = _hard_end - AlignmentReserve; |
aoqi@0 | 339 | assert(_top <= _end, "Invariant."); |
aoqi@0 | 340 | // Now reset the initial filler chunk so it doesn't overlap with |
aoqi@0 | 341 | // the one(s) inserted above. |
aoqi@0 | 342 | MemRegion new_filler(pre_top, _hard_end); |
aoqi@0 | 343 | fill_region_with_block(new_filler, false); |
aoqi@0 | 344 | } |
aoqi@0 | 345 | } else { |
aoqi@0 | 346 | _retained = false; |
aoqi@0 | 347 | invalidate(); |
aoqi@0 | 348 | } |
aoqi@0 | 349 | } else { |
aoqi@0 | 350 | assert(!end_of_gc || |
aoqi@0 | 351 | (!_retained && _true_end == _hard_end), "Checking."); |
aoqi@0 | 352 | } |
aoqi@0 | 353 | assert(_end <= _hard_end, "Invariant."); |
aoqi@0 | 354 | assert(_top < _end || _top == _hard_end, "Invariant"); |
aoqi@0 | 355 | } |