Mon, 26 Jan 2009 12:47:21 -0800
6786503: Overflow list performance can be improved
Summary: Avoid overflow list walk in CMS & ParNew when it is unnecessary. Fix a couple of correctness issues, including a C-heap leak, in ParNew at the intersection of promotion failure, work queue overflow and object array chunking. Add stress testing option and related assertion checking.
Reviewed-by: jmasa
duke@435 | 1 | /* |
xdono@631 | 2 | * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. |
duke@435 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
duke@435 | 4 | * |
duke@435 | 5 | * This code is free software; you can redistribute it and/or modify it |
duke@435 | 6 | * under the terms of the GNU General Public License version 2 only, as |
duke@435 | 7 | * published by the Free Software Foundation. |
duke@435 | 8 | * |
duke@435 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
duke@435 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
duke@435 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
duke@435 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
duke@435 | 13 | * accompanied this code). |
duke@435 | 14 | * |
duke@435 | 15 | * You should have received a copy of the GNU General Public License version |
duke@435 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
duke@435 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
duke@435 | 18 | * |
duke@435 | 19 | * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, |
duke@435 | 20 | * CA 95054 USA or visit www.sun.com if you need additional information or |
duke@435 | 21 | * have any questions. |
duke@435 | 22 | * |
duke@435 | 23 | */ |
duke@435 | 24 | |
duke@435 | 25 | # include "incls/_precompiled.incl" |
duke@435 | 26 | # include "incls/_space.cpp.incl" |
duke@435 | 27 | |
coleenp@548 | 28 | void SpaceMemRegionOopsIterClosure::do_oop(oop* p) { SpaceMemRegionOopsIterClosure::do_oop_work(p); } |
coleenp@548 | 29 | void SpaceMemRegionOopsIterClosure::do_oop(narrowOop* p) { SpaceMemRegionOopsIterClosure::do_oop_work(p); } |
coleenp@548 | 30 | |
duke@435 | 31 | HeapWord* DirtyCardToOopClosure::get_actual_top(HeapWord* top, |
duke@435 | 32 | HeapWord* top_obj) { |
duke@435 | 33 | if (top_obj != NULL) { |
duke@435 | 34 | if (_sp->block_is_obj(top_obj)) { |
duke@435 | 35 | if (_precision == CardTableModRefBS::ObjHeadPreciseArray) { |
duke@435 | 36 | if (oop(top_obj)->is_objArray() || oop(top_obj)->is_typeArray()) { |
duke@435 | 37 | // An arrayOop is starting on the dirty card - since we do exact |
duke@435 | 38 | // store checks for objArrays we are done. |
duke@435 | 39 | } else { |
duke@435 | 40 | // Otherwise, it is possible that the object starting on the dirty |
duke@435 | 41 | // card spans the entire card, and that the store happened on a |
duke@435 | 42 | // later card. Figure out where the object ends. |
duke@435 | 43 | // Use the block_size() method of the space over which |
duke@435 | 44 | // the iteration is being done. That space (e.g. CMS) may have |
duke@435 | 45 | // specific requirements on object sizes which will |
duke@435 | 46 | // be reflected in the block_size() method. |
duke@435 | 47 | top = top_obj + oop(top_obj)->size(); |
duke@435 | 48 | } |
duke@435 | 49 | } |
duke@435 | 50 | } else { |
duke@435 | 51 | top = top_obj; |
duke@435 | 52 | } |
duke@435 | 53 | } else { |
duke@435 | 54 | assert(top == _sp->end(), "only case where top_obj == NULL"); |
duke@435 | 55 | } |
duke@435 | 56 | return top; |
duke@435 | 57 | } |
duke@435 | 58 | |
duke@435 | 59 | void DirtyCardToOopClosure::walk_mem_region(MemRegion mr, |
duke@435 | 60 | HeapWord* bottom, |
duke@435 | 61 | HeapWord* top) { |
duke@435 | 62 | // 1. Blocks may or may not be objects. |
duke@435 | 63 | // 2. Even when a block_is_obj(), it may not entirely |
duke@435 | 64 | // occupy the block if the block quantum is larger than |
duke@435 | 65 | // the object size. |
duke@435 | 66 | // We can and should try to optimize by calling the non-MemRegion |
duke@435 | 67 | // version of oop_iterate() for all but the extremal objects |
duke@435 | 68 | // (for which we need to call the MemRegion version of |
duke@435 | 69 | // oop_iterate()) To be done post-beta XXX |
duke@435 | 70 | for (; bottom < top; bottom += _sp->block_size(bottom)) { |
duke@435 | 71 | // As in the case of contiguous space above, we'd like to |
duke@435 | 72 | // just use the value returned by oop_iterate to increment the |
duke@435 | 73 | // current pointer; unfortunately, that won't work in CMS because |
duke@435 | 74 | // we'd need an interface change (it seems) to have the space |
duke@435 | 75 | // "adjust the object size" (for instance pad it up to its |
duke@435 | 76 | // block alignment or minimum block size restrictions. XXX |
duke@435 | 77 | if (_sp->block_is_obj(bottom) && |
duke@435 | 78 | !_sp->obj_allocated_since_save_marks(oop(bottom))) { |
duke@435 | 79 | oop(bottom)->oop_iterate(_cl, mr); |
duke@435 | 80 | } |
duke@435 | 81 | } |
duke@435 | 82 | } |
duke@435 | 83 | |
duke@435 | 84 | void DirtyCardToOopClosure::do_MemRegion(MemRegion mr) { |
duke@435 | 85 | |
duke@435 | 86 | // Some collectors need to do special things whenever their dirty |
duke@435 | 87 | // cards are processed. For instance, CMS must remember mutator updates |
duke@435 | 88 | // (i.e. dirty cards) so as to re-scan mutated objects. |
duke@435 | 89 | // Such work can be piggy-backed here on dirty card scanning, so as to make |
duke@435 | 90 | // it slightly more efficient than doing a complete non-detructive pre-scan |
duke@435 | 91 | // of the card table. |
duke@435 | 92 | MemRegionClosure* pCl = _sp->preconsumptionDirtyCardClosure(); |
duke@435 | 93 | if (pCl != NULL) { |
duke@435 | 94 | pCl->do_MemRegion(mr); |
duke@435 | 95 | } |
duke@435 | 96 | |
duke@435 | 97 | HeapWord* bottom = mr.start(); |
duke@435 | 98 | HeapWord* last = mr.last(); |
duke@435 | 99 | HeapWord* top = mr.end(); |
duke@435 | 100 | HeapWord* bottom_obj; |
duke@435 | 101 | HeapWord* top_obj; |
duke@435 | 102 | |
duke@435 | 103 | assert(_precision == CardTableModRefBS::ObjHeadPreciseArray || |
duke@435 | 104 | _precision == CardTableModRefBS::Precise, |
duke@435 | 105 | "Only ones we deal with for now."); |
duke@435 | 106 | |
duke@435 | 107 | assert(_precision != CardTableModRefBS::ObjHeadPreciseArray || |
ysr@777 | 108 | _cl->idempotent() || _last_bottom == NULL || |
duke@435 | 109 | top <= _last_bottom, |
duke@435 | 110 | "Not decreasing"); |
duke@435 | 111 | NOT_PRODUCT(_last_bottom = mr.start()); |
duke@435 | 112 | |
duke@435 | 113 | bottom_obj = _sp->block_start(bottom); |
duke@435 | 114 | top_obj = _sp->block_start(last); |
duke@435 | 115 | |
duke@435 | 116 | assert(bottom_obj <= bottom, "just checking"); |
duke@435 | 117 | assert(top_obj <= top, "just checking"); |
duke@435 | 118 | |
duke@435 | 119 | // Given what we think is the top of the memory region and |
duke@435 | 120 | // the start of the object at the top, get the actual |
duke@435 | 121 | // value of the top. |
duke@435 | 122 | top = get_actual_top(top, top_obj); |
duke@435 | 123 | |
duke@435 | 124 | // If the previous call did some part of this region, don't redo. |
duke@435 | 125 | if (_precision == CardTableModRefBS::ObjHeadPreciseArray && |
duke@435 | 126 | _min_done != NULL && |
duke@435 | 127 | _min_done < top) { |
duke@435 | 128 | top = _min_done; |
duke@435 | 129 | } |
duke@435 | 130 | |
duke@435 | 131 | // Top may have been reset, and in fact may be below bottom, |
duke@435 | 132 | // e.g. the dirty card region is entirely in a now free object |
duke@435 | 133 | // -- something that could happen with a concurrent sweeper. |
duke@435 | 134 | bottom = MIN2(bottom, top); |
duke@435 | 135 | mr = MemRegion(bottom, top); |
duke@435 | 136 | assert(bottom <= top && |
duke@435 | 137 | (_precision != CardTableModRefBS::ObjHeadPreciseArray || |
duke@435 | 138 | _min_done == NULL || |
duke@435 | 139 | top <= _min_done), |
duke@435 | 140 | "overlap!"); |
duke@435 | 141 | |
duke@435 | 142 | // Walk the region if it is not empty; otherwise there is nothing to do. |
duke@435 | 143 | if (!mr.is_empty()) { |
duke@435 | 144 | walk_mem_region(mr, bottom_obj, top); |
duke@435 | 145 | } |
duke@435 | 146 | |
ysr@777 | 147 | // An idempotent closure might be applied in any order, so we don't |
ysr@777 | 148 | // record a _min_done for it. |
ysr@777 | 149 | if (!_cl->idempotent()) { |
ysr@777 | 150 | _min_done = bottom; |
ysr@777 | 151 | } else { |
ysr@777 | 152 | assert(_min_done == _last_explicit_min_done, |
ysr@777 | 153 | "Don't update _min_done for idempotent cl"); |
ysr@777 | 154 | } |
duke@435 | 155 | } |
duke@435 | 156 | |
duke@435 | 157 | DirtyCardToOopClosure* Space::new_dcto_cl(OopClosure* cl, |
duke@435 | 158 | CardTableModRefBS::PrecisionStyle precision, |
duke@435 | 159 | HeapWord* boundary) { |
duke@435 | 160 | return new DirtyCardToOopClosure(this, cl, precision, boundary); |
duke@435 | 161 | } |
duke@435 | 162 | |
duke@435 | 163 | HeapWord* ContiguousSpaceDCTOC::get_actual_top(HeapWord* top, |
duke@435 | 164 | HeapWord* top_obj) { |
duke@435 | 165 | if (top_obj != NULL && top_obj < (_sp->toContiguousSpace())->top()) { |
duke@435 | 166 | if (_precision == CardTableModRefBS::ObjHeadPreciseArray) { |
duke@435 | 167 | if (oop(top_obj)->is_objArray() || oop(top_obj)->is_typeArray()) { |
duke@435 | 168 | // An arrayOop is starting on the dirty card - since we do exact |
duke@435 | 169 | // store checks for objArrays we are done. |
duke@435 | 170 | } else { |
duke@435 | 171 | // Otherwise, it is possible that the object starting on the dirty |
duke@435 | 172 | // card spans the entire card, and that the store happened on a |
duke@435 | 173 | // later card. Figure out where the object ends. |
duke@435 | 174 | assert(_sp->block_size(top_obj) == (size_t) oop(top_obj)->size(), |
duke@435 | 175 | "Block size and object size mismatch"); |
duke@435 | 176 | top = top_obj + oop(top_obj)->size(); |
duke@435 | 177 | } |
duke@435 | 178 | } |
duke@435 | 179 | } else { |
duke@435 | 180 | top = (_sp->toContiguousSpace())->top(); |
duke@435 | 181 | } |
duke@435 | 182 | return top; |
duke@435 | 183 | } |
duke@435 | 184 | |
duke@435 | 185 | void Filtering_DCTOC::walk_mem_region(MemRegion mr, |
duke@435 | 186 | HeapWord* bottom, |
duke@435 | 187 | HeapWord* top) { |
duke@435 | 188 | // Note that this assumption won't hold if we have a concurrent |
duke@435 | 189 | // collector in this space, which may have freed up objects after |
duke@435 | 190 | // they were dirtied and before the stop-the-world GC that is |
duke@435 | 191 | // examining cards here. |
duke@435 | 192 | assert(bottom < top, "ought to be at least one obj on a dirty card."); |
duke@435 | 193 | |
duke@435 | 194 | if (_boundary != NULL) { |
duke@435 | 195 | // We have a boundary outside of which we don't want to look |
duke@435 | 196 | // at objects, so create a filtering closure around the |
duke@435 | 197 | // oop closure before walking the region. |
duke@435 | 198 | FilteringClosure filter(_boundary, _cl); |
duke@435 | 199 | walk_mem_region_with_cl(mr, bottom, top, &filter); |
duke@435 | 200 | } else { |
duke@435 | 201 | // No boundary, simply walk the heap with the oop closure. |
duke@435 | 202 | walk_mem_region_with_cl(mr, bottom, top, _cl); |
duke@435 | 203 | } |
duke@435 | 204 | |
duke@435 | 205 | } |
duke@435 | 206 | |
duke@435 | 207 | // We must replicate this so that the static type of "FilteringClosure" |
duke@435 | 208 | // (see above) is apparent at the oop_iterate calls. |
duke@435 | 209 | #define ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(ClosureType) \ |
duke@435 | 210 | void ContiguousSpaceDCTOC::walk_mem_region_with_cl(MemRegion mr, \ |
duke@435 | 211 | HeapWord* bottom, \ |
duke@435 | 212 | HeapWord* top, \ |
duke@435 | 213 | ClosureType* cl) { \ |
duke@435 | 214 | bottom += oop(bottom)->oop_iterate(cl, mr); \ |
duke@435 | 215 | if (bottom < top) { \ |
duke@435 | 216 | HeapWord* next_obj = bottom + oop(bottom)->size(); \ |
duke@435 | 217 | while (next_obj < top) { \ |
duke@435 | 218 | /* Bottom lies entirely below top, so we can call the */ \ |
duke@435 | 219 | /* non-memRegion version of oop_iterate below. */ \ |
duke@435 | 220 | oop(bottom)->oop_iterate(cl); \ |
duke@435 | 221 | bottom = next_obj; \ |
duke@435 | 222 | next_obj = bottom + oop(bottom)->size(); \ |
duke@435 | 223 | } \ |
duke@435 | 224 | /* Last object. */ \ |
duke@435 | 225 | oop(bottom)->oop_iterate(cl, mr); \ |
duke@435 | 226 | } \ |
duke@435 | 227 | } |
duke@435 | 228 | |
duke@435 | 229 | // (There are only two of these, rather than N, because the split is due |
duke@435 | 230 | // only to the introduction of the FilteringClosure, a local part of the |
duke@435 | 231 | // impl of this abstraction.) |
duke@435 | 232 | ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(OopClosure) |
duke@435 | 233 | ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure) |
duke@435 | 234 | |
duke@435 | 235 | DirtyCardToOopClosure* |
duke@435 | 236 | ContiguousSpace::new_dcto_cl(OopClosure* cl, |
duke@435 | 237 | CardTableModRefBS::PrecisionStyle precision, |
duke@435 | 238 | HeapWord* boundary) { |
duke@435 | 239 | return new ContiguousSpaceDCTOC(this, cl, precision, boundary); |
duke@435 | 240 | } |
duke@435 | 241 | |
jmasa@698 | 242 | void Space::initialize(MemRegion mr, |
jmasa@698 | 243 | bool clear_space, |
jmasa@698 | 244 | bool mangle_space) { |
duke@435 | 245 | HeapWord* bottom = mr.start(); |
duke@435 | 246 | HeapWord* end = mr.end(); |
duke@435 | 247 | assert(Universe::on_page_boundary(bottom) && Universe::on_page_boundary(end), |
duke@435 | 248 | "invalid space boundaries"); |
duke@435 | 249 | set_bottom(bottom); |
duke@435 | 250 | set_end(end); |
jmasa@698 | 251 | if (clear_space) clear(mangle_space); |
duke@435 | 252 | } |
duke@435 | 253 | |
jmasa@698 | 254 | void Space::clear(bool mangle_space) { |
jmasa@698 | 255 | if (ZapUnusedHeapArea && mangle_space) { |
jmasa@698 | 256 | mangle_unused_area(); |
jmasa@698 | 257 | } |
duke@435 | 258 | } |
duke@435 | 259 | |
tonyp@791 | 260 | ContiguousSpace::ContiguousSpace(): CompactibleSpace(), _top(NULL), |
tonyp@791 | 261 | _concurrent_iteration_safe_limit(NULL) { |
jmasa@698 | 262 | _mangler = new GenSpaceMangler(this); |
jmasa@698 | 263 | } |
jmasa@698 | 264 | |
jmasa@698 | 265 | ContiguousSpace::~ContiguousSpace() { |
jmasa@698 | 266 | delete _mangler; |
jmasa@698 | 267 | } |
jmasa@698 | 268 | |
jmasa@698 | 269 | void ContiguousSpace::initialize(MemRegion mr, |
jmasa@698 | 270 | bool clear_space, |
jmasa@698 | 271 | bool mangle_space) |
duke@435 | 272 | { |
jmasa@698 | 273 | CompactibleSpace::initialize(mr, clear_space, mangle_space); |
ysr@782 | 274 | set_concurrent_iteration_safe_limit(top()); |
duke@435 | 275 | } |
duke@435 | 276 | |
jmasa@698 | 277 | void ContiguousSpace::clear(bool mangle_space) { |
duke@435 | 278 | set_top(bottom()); |
duke@435 | 279 | set_saved_mark(); |
tonyp@791 | 280 | CompactibleSpace::clear(mangle_space); |
duke@435 | 281 | } |
duke@435 | 282 | |
duke@435 | 283 | bool Space::is_in(const void* p) const { |
ysr@777 | 284 | HeapWord* b = block_start_const(p); |
duke@435 | 285 | return b != NULL && block_is_obj(b); |
duke@435 | 286 | } |
duke@435 | 287 | |
duke@435 | 288 | bool ContiguousSpace::is_in(const void* p) const { |
duke@435 | 289 | return _bottom <= p && p < _top; |
duke@435 | 290 | } |
duke@435 | 291 | |
duke@435 | 292 | bool ContiguousSpace::is_free_block(const HeapWord* p) const { |
duke@435 | 293 | return p >= _top; |
duke@435 | 294 | } |
duke@435 | 295 | |
jmasa@698 | 296 | void OffsetTableContigSpace::clear(bool mangle_space) { |
jmasa@698 | 297 | ContiguousSpace::clear(mangle_space); |
duke@435 | 298 | _offsets.initialize_threshold(); |
duke@435 | 299 | } |
duke@435 | 300 | |
duke@435 | 301 | void OffsetTableContigSpace::set_bottom(HeapWord* new_bottom) { |
duke@435 | 302 | Space::set_bottom(new_bottom); |
duke@435 | 303 | _offsets.set_bottom(new_bottom); |
duke@435 | 304 | } |
duke@435 | 305 | |
duke@435 | 306 | void OffsetTableContigSpace::set_end(HeapWord* new_end) { |
duke@435 | 307 | // Space should not advertize an increase in size |
duke@435 | 308 | // until after the underlying offest table has been enlarged. |
duke@435 | 309 | _offsets.resize(pointer_delta(new_end, bottom())); |
duke@435 | 310 | Space::set_end(new_end); |
duke@435 | 311 | } |
duke@435 | 312 | |
jmasa@698 | 313 | #ifndef PRODUCT |
jmasa@698 | 314 | |
jmasa@698 | 315 | void ContiguousSpace::set_top_for_allocations(HeapWord* v) { |
jmasa@698 | 316 | mangler()->set_top_for_allocations(v); |
jmasa@698 | 317 | } |
jmasa@698 | 318 | void ContiguousSpace::set_top_for_allocations() { |
jmasa@698 | 319 | mangler()->set_top_for_allocations(top()); |
jmasa@698 | 320 | } |
jmasa@698 | 321 | void ContiguousSpace::check_mangled_unused_area(HeapWord* limit) { |
jmasa@698 | 322 | mangler()->check_mangled_unused_area(limit); |
duke@435 | 323 | } |
duke@435 | 324 | |
jmasa@698 | 325 | void ContiguousSpace::check_mangled_unused_area_complete() { |
jmasa@698 | 326 | mangler()->check_mangled_unused_area_complete(); |
duke@435 | 327 | } |
duke@435 | 328 | |
jmasa@698 | 329 | // Mangled only the unused space that has not previously |
jmasa@698 | 330 | // been mangled and that has not been allocated since being |
jmasa@698 | 331 | // mangled. |
jmasa@698 | 332 | void ContiguousSpace::mangle_unused_area() { |
jmasa@698 | 333 | mangler()->mangle_unused_area(); |
jmasa@698 | 334 | } |
jmasa@698 | 335 | void ContiguousSpace::mangle_unused_area_complete() { |
jmasa@698 | 336 | mangler()->mangle_unused_area_complete(); |
jmasa@698 | 337 | } |
jmasa@698 | 338 | void ContiguousSpace::mangle_region(MemRegion mr) { |
jmasa@698 | 339 | // Although this method uses SpaceMangler::mangle_region() which |
jmasa@698 | 340 | // is not specific to a space, the when the ContiguousSpace version |
jmasa@698 | 341 | // is called, it is always with regard to a space and this |
jmasa@698 | 342 | // bounds checking is appropriate. |
jmasa@698 | 343 | MemRegion space_mr(bottom(), end()); |
jmasa@698 | 344 | assert(space_mr.contains(mr), "Mangling outside space"); |
jmasa@698 | 345 | SpaceMangler::mangle_region(mr); |
jmasa@698 | 346 | } |
jmasa@698 | 347 | #endif // NOT_PRODUCT |
jmasa@698 | 348 | |
jmasa@698 | 349 | void CompactibleSpace::initialize(MemRegion mr, |
jmasa@698 | 350 | bool clear_space, |
jmasa@698 | 351 | bool mangle_space) { |
jmasa@698 | 352 | Space::initialize(mr, clear_space, mangle_space); |
tonyp@791 | 353 | set_compaction_top(bottom()); |
tonyp@791 | 354 | _next_compaction_space = NULL; |
tonyp@791 | 355 | } |
tonyp@791 | 356 | |
tonyp@791 | 357 | void CompactibleSpace::clear(bool mangle_space) { |
tonyp@791 | 358 | Space::clear(mangle_space); |
duke@435 | 359 | _compaction_top = bottom(); |
duke@435 | 360 | } |
duke@435 | 361 | |
duke@435 | 362 | HeapWord* CompactibleSpace::forward(oop q, size_t size, |
duke@435 | 363 | CompactPoint* cp, HeapWord* compact_top) { |
duke@435 | 364 | // q is alive |
duke@435 | 365 | // First check if we should switch compaction space |
duke@435 | 366 | assert(this == cp->space, "'this' should be current compaction space."); |
duke@435 | 367 | size_t compaction_max_size = pointer_delta(end(), compact_top); |
duke@435 | 368 | while (size > compaction_max_size) { |
duke@435 | 369 | // switch to next compaction space |
duke@435 | 370 | cp->space->set_compaction_top(compact_top); |
duke@435 | 371 | cp->space = cp->space->next_compaction_space(); |
duke@435 | 372 | if (cp->space == NULL) { |
duke@435 | 373 | cp->gen = GenCollectedHeap::heap()->prev_gen(cp->gen); |
duke@435 | 374 | assert(cp->gen != NULL, "compaction must succeed"); |
duke@435 | 375 | cp->space = cp->gen->first_compaction_space(); |
duke@435 | 376 | assert(cp->space != NULL, "generation must have a first compaction space"); |
duke@435 | 377 | } |
duke@435 | 378 | compact_top = cp->space->bottom(); |
duke@435 | 379 | cp->space->set_compaction_top(compact_top); |
duke@435 | 380 | cp->threshold = cp->space->initialize_threshold(); |
duke@435 | 381 | compaction_max_size = pointer_delta(cp->space->end(), compact_top); |
duke@435 | 382 | } |
duke@435 | 383 | |
duke@435 | 384 | // store the forwarding pointer into the mark word |
duke@435 | 385 | if ((HeapWord*)q != compact_top) { |
duke@435 | 386 | q->forward_to(oop(compact_top)); |
duke@435 | 387 | assert(q->is_gc_marked(), "encoding the pointer should preserve the mark"); |
duke@435 | 388 | } else { |
duke@435 | 389 | // if the object isn't moving we can just set the mark to the default |
duke@435 | 390 | // mark and handle it specially later on. |
duke@435 | 391 | q->init_mark(); |
duke@435 | 392 | assert(q->forwardee() == NULL, "should be forwarded to NULL"); |
duke@435 | 393 | } |
duke@435 | 394 | |
coleenp@548 | 395 | VALIDATE_MARK_SWEEP_ONLY(MarkSweep::register_live_oop(q, size)); |
duke@435 | 396 | compact_top += size; |
duke@435 | 397 | |
duke@435 | 398 | // we need to update the offset table so that the beginnings of objects can be |
duke@435 | 399 | // found during scavenge. Note that we are updating the offset table based on |
duke@435 | 400 | // where the object will be once the compaction phase finishes. |
duke@435 | 401 | if (compact_top > cp->threshold) |
duke@435 | 402 | cp->threshold = |
duke@435 | 403 | cp->space->cross_threshold(compact_top - size, compact_top); |
duke@435 | 404 | return compact_top; |
duke@435 | 405 | } |
duke@435 | 406 | |
duke@435 | 407 | |
duke@435 | 408 | bool CompactibleSpace::insert_deadspace(size_t& allowed_deadspace_words, |
duke@435 | 409 | HeapWord* q, size_t deadlength) { |
duke@435 | 410 | if (allowed_deadspace_words >= deadlength) { |
duke@435 | 411 | allowed_deadspace_words -= deadlength; |
jcoomes@916 | 412 | CollectedHeap::fill_with_object(q, deadlength); |
jcoomes@916 | 413 | oop(q)->set_mark(oop(q)->mark()->set_marked()); |
jcoomes@916 | 414 | assert((int) deadlength == oop(q)->size(), "bad filler object size"); |
duke@435 | 415 | // Recall that we required "q == compaction_top". |
duke@435 | 416 | return true; |
duke@435 | 417 | } else { |
duke@435 | 418 | allowed_deadspace_words = 0; |
duke@435 | 419 | return false; |
duke@435 | 420 | } |
duke@435 | 421 | } |
duke@435 | 422 | |
duke@435 | 423 | #define block_is_always_obj(q) true |
duke@435 | 424 | #define obj_size(q) oop(q)->size() |
duke@435 | 425 | #define adjust_obj_size(s) s |
duke@435 | 426 | |
duke@435 | 427 | void CompactibleSpace::prepare_for_compaction(CompactPoint* cp) { |
duke@435 | 428 | SCAN_AND_FORWARD(cp, end, block_is_obj, block_size); |
duke@435 | 429 | } |
duke@435 | 430 | |
duke@435 | 431 | // Faster object search. |
duke@435 | 432 | void ContiguousSpace::prepare_for_compaction(CompactPoint* cp) { |
duke@435 | 433 | SCAN_AND_FORWARD(cp, top, block_is_always_obj, obj_size); |
duke@435 | 434 | } |
duke@435 | 435 | |
duke@435 | 436 | void Space::adjust_pointers() { |
duke@435 | 437 | // adjust all the interior pointers to point at the new locations of objects |
duke@435 | 438 | // Used by MarkSweep::mark_sweep_phase3() |
duke@435 | 439 | |
duke@435 | 440 | // First check to see if there is any work to be done. |
duke@435 | 441 | if (used() == 0) { |
duke@435 | 442 | return; // Nothing to do. |
duke@435 | 443 | } |
duke@435 | 444 | |
duke@435 | 445 | // Otherwise... |
duke@435 | 446 | HeapWord* q = bottom(); |
duke@435 | 447 | HeapWord* t = end(); |
duke@435 | 448 | |
duke@435 | 449 | debug_only(HeapWord* prev_q = NULL); |
duke@435 | 450 | while (q < t) { |
duke@435 | 451 | if (oop(q)->is_gc_marked()) { |
duke@435 | 452 | // q is alive |
duke@435 | 453 | |
coleenp@548 | 454 | VALIDATE_MARK_SWEEP_ONLY(MarkSweep::track_interior_pointers(oop(q))); |
duke@435 | 455 | // point all the oops to the new location |
duke@435 | 456 | size_t size = oop(q)->adjust_pointers(); |
coleenp@548 | 457 | VALIDATE_MARK_SWEEP_ONLY(MarkSweep::check_interior_pointers()); |
duke@435 | 458 | |
duke@435 | 459 | debug_only(prev_q = q); |
coleenp@548 | 460 | VALIDATE_MARK_SWEEP_ONLY(MarkSweep::validate_live_oop(oop(q), size)); |
duke@435 | 461 | |
duke@435 | 462 | q += size; |
duke@435 | 463 | } else { |
duke@435 | 464 | // q is not a live object. But we're not in a compactible space, |
duke@435 | 465 | // So we don't have live ranges. |
duke@435 | 466 | debug_only(prev_q = q); |
duke@435 | 467 | q += block_size(q); |
duke@435 | 468 | assert(q > prev_q, "we should be moving forward through memory"); |
duke@435 | 469 | } |
duke@435 | 470 | } |
duke@435 | 471 | assert(q == t, "just checking"); |
duke@435 | 472 | } |
duke@435 | 473 | |
duke@435 | 474 | void CompactibleSpace::adjust_pointers() { |
duke@435 | 475 | // Check first is there is any work to do. |
duke@435 | 476 | if (used() == 0) { |
duke@435 | 477 | return; // Nothing to do. |
duke@435 | 478 | } |
duke@435 | 479 | |
duke@435 | 480 | SCAN_AND_ADJUST_POINTERS(adjust_obj_size); |
duke@435 | 481 | } |
duke@435 | 482 | |
duke@435 | 483 | void CompactibleSpace::compact() { |
duke@435 | 484 | SCAN_AND_COMPACT(obj_size); |
duke@435 | 485 | } |
duke@435 | 486 | |
duke@435 | 487 | void Space::print_short() const { print_short_on(tty); } |
duke@435 | 488 | |
duke@435 | 489 | void Space::print_short_on(outputStream* st) const { |
duke@435 | 490 | st->print(" space " SIZE_FORMAT "K, %3d%% used", capacity() / K, |
duke@435 | 491 | (int) ((double) used() * 100 / capacity())); |
duke@435 | 492 | } |
duke@435 | 493 | |
duke@435 | 494 | void Space::print() const { print_on(tty); } |
duke@435 | 495 | |
duke@435 | 496 | void Space::print_on(outputStream* st) const { |
duke@435 | 497 | print_short_on(st); |
duke@435 | 498 | st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ")", |
duke@435 | 499 | bottom(), end()); |
duke@435 | 500 | } |
duke@435 | 501 | |
duke@435 | 502 | void ContiguousSpace::print_on(outputStream* st) const { |
duke@435 | 503 | print_short_on(st); |
duke@435 | 504 | st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")", |
duke@435 | 505 | bottom(), top(), end()); |
duke@435 | 506 | } |
duke@435 | 507 | |
duke@435 | 508 | void OffsetTableContigSpace::print_on(outputStream* st) const { |
duke@435 | 509 | print_short_on(st); |
duke@435 | 510 | st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " |
duke@435 | 511 | INTPTR_FORMAT ", " INTPTR_FORMAT ")", |
duke@435 | 512 | bottom(), top(), _offsets.threshold(), end()); |
duke@435 | 513 | } |
duke@435 | 514 | |
duke@435 | 515 | void ContiguousSpace::verify(bool allow_dirty) const { |
duke@435 | 516 | HeapWord* p = bottom(); |
duke@435 | 517 | HeapWord* t = top(); |
duke@435 | 518 | HeapWord* prev_p = NULL; |
duke@435 | 519 | while (p < t) { |
duke@435 | 520 | oop(p)->verify(); |
duke@435 | 521 | prev_p = p; |
duke@435 | 522 | p += oop(p)->size(); |
duke@435 | 523 | } |
duke@435 | 524 | guarantee(p == top(), "end of last object must match end of space"); |
duke@435 | 525 | if (top() != end()) { |
ysr@777 | 526 | guarantee(top() == block_start_const(end()-1) && |
ysr@777 | 527 | top() == block_start_const(top()), |
duke@435 | 528 | "top should be start of unallocated block, if it exists"); |
duke@435 | 529 | } |
duke@435 | 530 | } |
duke@435 | 531 | |
duke@435 | 532 | void Space::oop_iterate(OopClosure* blk) { |
duke@435 | 533 | ObjectToOopClosure blk2(blk); |
duke@435 | 534 | object_iterate(&blk2); |
duke@435 | 535 | } |
duke@435 | 536 | |
duke@435 | 537 | HeapWord* Space::object_iterate_careful(ObjectClosureCareful* cl) { |
duke@435 | 538 | guarantee(false, "NYI"); |
duke@435 | 539 | return bottom(); |
duke@435 | 540 | } |
duke@435 | 541 | |
duke@435 | 542 | HeapWord* Space::object_iterate_careful_m(MemRegion mr, |
duke@435 | 543 | ObjectClosureCareful* cl) { |
duke@435 | 544 | guarantee(false, "NYI"); |
duke@435 | 545 | return bottom(); |
duke@435 | 546 | } |
duke@435 | 547 | |
duke@435 | 548 | |
duke@435 | 549 | void Space::object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl) { |
duke@435 | 550 | assert(!mr.is_empty(), "Should be non-empty"); |
duke@435 | 551 | // We use MemRegion(bottom(), end()) rather than used_region() below |
duke@435 | 552 | // because the two are not necessarily equal for some kinds of |
duke@435 | 553 | // spaces, in particular, certain kinds of free list spaces. |
duke@435 | 554 | // We could use the more complicated but more precise: |
duke@435 | 555 | // MemRegion(used_region().start(), round_to(used_region().end(), CardSize)) |
duke@435 | 556 | // but the slight imprecision seems acceptable in the assertion check. |
duke@435 | 557 | assert(MemRegion(bottom(), end()).contains(mr), |
duke@435 | 558 | "Should be within used space"); |
duke@435 | 559 | HeapWord* prev = cl->previous(); // max address from last time |
duke@435 | 560 | if (prev >= mr.end()) { // nothing to do |
duke@435 | 561 | return; |
duke@435 | 562 | } |
duke@435 | 563 | // This assert will not work when we go from cms space to perm |
duke@435 | 564 | // space, and use same closure. Easy fix deferred for later. XXX YSR |
duke@435 | 565 | // assert(prev == NULL || contains(prev), "Should be within space"); |
duke@435 | 566 | |
duke@435 | 567 | bool last_was_obj_array = false; |
duke@435 | 568 | HeapWord *blk_start_addr, *region_start_addr; |
duke@435 | 569 | if (prev > mr.start()) { |
duke@435 | 570 | region_start_addr = prev; |
duke@435 | 571 | blk_start_addr = prev; |
jmasa@953 | 572 | // The previous invocation may have pushed "prev" beyond the |
jmasa@953 | 573 | // last allocated block yet there may be still be blocks |
jmasa@953 | 574 | // in this region due to a particular coalescing policy. |
jmasa@953 | 575 | // Relax the assertion so that the case where the unallocated |
jmasa@953 | 576 | // block is maintained and "prev" is beyond the unallocated |
jmasa@953 | 577 | // block does not cause the assertion to fire. |
jmasa@953 | 578 | assert((BlockOffsetArrayUseUnallocatedBlock && |
jmasa@953 | 579 | (!is_in(prev))) || |
jmasa@953 | 580 | (blk_start_addr == block_start(region_start_addr)), "invariant"); |
duke@435 | 581 | } else { |
duke@435 | 582 | region_start_addr = mr.start(); |
duke@435 | 583 | blk_start_addr = block_start(region_start_addr); |
duke@435 | 584 | } |
duke@435 | 585 | HeapWord* region_end_addr = mr.end(); |
duke@435 | 586 | MemRegion derived_mr(region_start_addr, region_end_addr); |
duke@435 | 587 | while (blk_start_addr < region_end_addr) { |
duke@435 | 588 | const size_t size = block_size(blk_start_addr); |
duke@435 | 589 | if (block_is_obj(blk_start_addr)) { |
duke@435 | 590 | last_was_obj_array = cl->do_object_bm(oop(blk_start_addr), derived_mr); |
duke@435 | 591 | } else { |
duke@435 | 592 | last_was_obj_array = false; |
duke@435 | 593 | } |
duke@435 | 594 | blk_start_addr += size; |
duke@435 | 595 | } |
duke@435 | 596 | if (!last_was_obj_array) { |
duke@435 | 597 | assert((bottom() <= blk_start_addr) && (blk_start_addr <= end()), |
duke@435 | 598 | "Should be within (closed) used space"); |
duke@435 | 599 | assert(blk_start_addr > prev, "Invariant"); |
duke@435 | 600 | cl->set_previous(blk_start_addr); // min address for next time |
duke@435 | 601 | } |
duke@435 | 602 | } |
duke@435 | 603 | |
duke@435 | 604 | bool Space::obj_is_alive(const HeapWord* p) const { |
duke@435 | 605 | assert (block_is_obj(p), "The address should point to an object"); |
duke@435 | 606 | return true; |
duke@435 | 607 | } |
duke@435 | 608 | |
duke@435 | 609 | void ContiguousSpace::object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl) { |
duke@435 | 610 | assert(!mr.is_empty(), "Should be non-empty"); |
duke@435 | 611 | assert(used_region().contains(mr), "Should be within used space"); |
duke@435 | 612 | HeapWord* prev = cl->previous(); // max address from last time |
duke@435 | 613 | if (prev >= mr.end()) { // nothing to do |
duke@435 | 614 | return; |
duke@435 | 615 | } |
duke@435 | 616 | // See comment above (in more general method above) in case you |
duke@435 | 617 | // happen to use this method. |
duke@435 | 618 | assert(prev == NULL || is_in_reserved(prev), "Should be within space"); |
duke@435 | 619 | |
duke@435 | 620 | bool last_was_obj_array = false; |
duke@435 | 621 | HeapWord *obj_start_addr, *region_start_addr; |
duke@435 | 622 | if (prev > mr.start()) { |
duke@435 | 623 | region_start_addr = prev; |
duke@435 | 624 | obj_start_addr = prev; |
duke@435 | 625 | assert(obj_start_addr == block_start(region_start_addr), "invariant"); |
duke@435 | 626 | } else { |
duke@435 | 627 | region_start_addr = mr.start(); |
duke@435 | 628 | obj_start_addr = block_start(region_start_addr); |
duke@435 | 629 | } |
duke@435 | 630 | HeapWord* region_end_addr = mr.end(); |
duke@435 | 631 | MemRegion derived_mr(region_start_addr, region_end_addr); |
duke@435 | 632 | while (obj_start_addr < region_end_addr) { |
duke@435 | 633 | oop obj = oop(obj_start_addr); |
duke@435 | 634 | const size_t size = obj->size(); |
duke@435 | 635 | last_was_obj_array = cl->do_object_bm(obj, derived_mr); |
duke@435 | 636 | obj_start_addr += size; |
duke@435 | 637 | } |
duke@435 | 638 | if (!last_was_obj_array) { |
duke@435 | 639 | assert((bottom() <= obj_start_addr) && (obj_start_addr <= end()), |
duke@435 | 640 | "Should be within (closed) used space"); |
duke@435 | 641 | assert(obj_start_addr > prev, "Invariant"); |
duke@435 | 642 | cl->set_previous(obj_start_addr); // min address for next time |
duke@435 | 643 | } |
duke@435 | 644 | } |
duke@435 | 645 | |
duke@435 | 646 | #ifndef SERIALGC |
duke@435 | 647 | #define ContigSpace_PAR_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \ |
duke@435 | 648 | \ |
duke@435 | 649 | void ContiguousSpace::par_oop_iterate(MemRegion mr, OopClosureType* blk) {\ |
duke@435 | 650 | HeapWord* obj_addr = mr.start(); \ |
duke@435 | 651 | HeapWord* t = mr.end(); \ |
duke@435 | 652 | while (obj_addr < t) { \ |
duke@435 | 653 | assert(oop(obj_addr)->is_oop(), "Should be an oop"); \ |
duke@435 | 654 | obj_addr += oop(obj_addr)->oop_iterate(blk); \ |
duke@435 | 655 | } \ |
duke@435 | 656 | } |
duke@435 | 657 | |
duke@435 | 658 | ALL_PAR_OOP_ITERATE_CLOSURES(ContigSpace_PAR_OOP_ITERATE_DEFN) |
duke@435 | 659 | |
duke@435 | 660 | #undef ContigSpace_PAR_OOP_ITERATE_DEFN |
duke@435 | 661 | #endif // SERIALGC |
duke@435 | 662 | |
duke@435 | 663 | void ContiguousSpace::oop_iterate(OopClosure* blk) { |
duke@435 | 664 | if (is_empty()) return; |
duke@435 | 665 | HeapWord* obj_addr = bottom(); |
duke@435 | 666 | HeapWord* t = top(); |
duke@435 | 667 | // Could call objects iterate, but this is easier. |
duke@435 | 668 | while (obj_addr < t) { |
duke@435 | 669 | obj_addr += oop(obj_addr)->oop_iterate(blk); |
duke@435 | 670 | } |
duke@435 | 671 | } |
duke@435 | 672 | |
duke@435 | 673 | void ContiguousSpace::oop_iterate(MemRegion mr, OopClosure* blk) { |
duke@435 | 674 | if (is_empty()) { |
duke@435 | 675 | return; |
duke@435 | 676 | } |
duke@435 | 677 | MemRegion cur = MemRegion(bottom(), top()); |
duke@435 | 678 | mr = mr.intersection(cur); |
duke@435 | 679 | if (mr.is_empty()) { |
duke@435 | 680 | return; |
duke@435 | 681 | } |
duke@435 | 682 | if (mr.equals(cur)) { |
duke@435 | 683 | oop_iterate(blk); |
duke@435 | 684 | return; |
duke@435 | 685 | } |
duke@435 | 686 | assert(mr.end() <= top(), "just took an intersection above"); |
duke@435 | 687 | HeapWord* obj_addr = block_start(mr.start()); |
duke@435 | 688 | HeapWord* t = mr.end(); |
duke@435 | 689 | |
duke@435 | 690 | // Handle first object specially. |
duke@435 | 691 | oop obj = oop(obj_addr); |
duke@435 | 692 | SpaceMemRegionOopsIterClosure smr_blk(blk, mr); |
duke@435 | 693 | obj_addr += obj->oop_iterate(&smr_blk); |
duke@435 | 694 | while (obj_addr < t) { |
duke@435 | 695 | oop obj = oop(obj_addr); |
duke@435 | 696 | assert(obj->is_oop(), "expected an oop"); |
duke@435 | 697 | obj_addr += obj->size(); |
duke@435 | 698 | // If "obj_addr" is not greater than top, then the |
duke@435 | 699 | // entire object "obj" is within the region. |
duke@435 | 700 | if (obj_addr <= t) { |
duke@435 | 701 | obj->oop_iterate(blk); |
duke@435 | 702 | } else { |
duke@435 | 703 | // "obj" extends beyond end of region |
duke@435 | 704 | obj->oop_iterate(&smr_blk); |
duke@435 | 705 | break; |
duke@435 | 706 | } |
duke@435 | 707 | }; |
duke@435 | 708 | } |
duke@435 | 709 | |
duke@435 | 710 | void ContiguousSpace::object_iterate(ObjectClosure* blk) { |
duke@435 | 711 | if (is_empty()) return; |
duke@435 | 712 | WaterMark bm = bottom_mark(); |
duke@435 | 713 | object_iterate_from(bm, blk); |
duke@435 | 714 | } |
duke@435 | 715 | |
jmasa@952 | 716 | // For a continguous space object_iterate() and safe_object_iterate() |
jmasa@952 | 717 | // are the same. |
jmasa@952 | 718 | void ContiguousSpace::safe_object_iterate(ObjectClosure* blk) { |
jmasa@952 | 719 | object_iterate(blk); |
jmasa@952 | 720 | } |
jmasa@952 | 721 | |
duke@435 | 722 | void ContiguousSpace::object_iterate_from(WaterMark mark, ObjectClosure* blk) { |
duke@435 | 723 | assert(mark.space() == this, "Mark does not match space"); |
duke@435 | 724 | HeapWord* p = mark.point(); |
duke@435 | 725 | while (p < top()) { |
duke@435 | 726 | blk->do_object(oop(p)); |
duke@435 | 727 | p += oop(p)->size(); |
duke@435 | 728 | } |
duke@435 | 729 | } |
duke@435 | 730 | |
duke@435 | 731 | HeapWord* |
duke@435 | 732 | ContiguousSpace::object_iterate_careful(ObjectClosureCareful* blk) { |
duke@435 | 733 | HeapWord * limit = concurrent_iteration_safe_limit(); |
duke@435 | 734 | assert(limit <= top(), "sanity check"); |
duke@435 | 735 | for (HeapWord* p = bottom(); p < limit;) { |
duke@435 | 736 | size_t size = blk->do_object_careful(oop(p)); |
duke@435 | 737 | if (size == 0) { |
duke@435 | 738 | return p; // failed at p |
duke@435 | 739 | } else { |
duke@435 | 740 | p += size; |
duke@435 | 741 | } |
duke@435 | 742 | } |
duke@435 | 743 | return NULL; // all done |
duke@435 | 744 | } |
duke@435 | 745 | |
duke@435 | 746 | #define ContigSpace_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \ |
duke@435 | 747 | \ |
duke@435 | 748 | void ContiguousSpace:: \ |
duke@435 | 749 | oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) { \ |
duke@435 | 750 | HeapWord* t; \ |
duke@435 | 751 | HeapWord* p = saved_mark_word(); \ |
duke@435 | 752 | assert(p != NULL, "expected saved mark"); \ |
duke@435 | 753 | \ |
duke@435 | 754 | const intx interval = PrefetchScanIntervalInBytes; \ |
duke@435 | 755 | do { \ |
duke@435 | 756 | t = top(); \ |
duke@435 | 757 | while (p < t) { \ |
duke@435 | 758 | Prefetch::write(p, interval); \ |
duke@435 | 759 | debug_only(HeapWord* prev = p); \ |
duke@435 | 760 | oop m = oop(p); \ |
duke@435 | 761 | p += m->oop_iterate(blk); \ |
duke@435 | 762 | } \ |
duke@435 | 763 | } while (t < top()); \ |
duke@435 | 764 | \ |
duke@435 | 765 | set_saved_mark_word(p); \ |
duke@435 | 766 | } |
duke@435 | 767 | |
duke@435 | 768 | ALL_SINCE_SAVE_MARKS_CLOSURES(ContigSpace_OOP_SINCE_SAVE_MARKS_DEFN) |
duke@435 | 769 | |
duke@435 | 770 | #undef ContigSpace_OOP_SINCE_SAVE_MARKS_DEFN |
duke@435 | 771 | |
duke@435 | 772 | // Very general, slow implementation. |
ysr@777 | 773 | HeapWord* ContiguousSpace::block_start_const(const void* p) const { |
duke@435 | 774 | assert(MemRegion(bottom(), end()).contains(p), "p not in space"); |
duke@435 | 775 | if (p >= top()) { |
duke@435 | 776 | return top(); |
duke@435 | 777 | } else { |
duke@435 | 778 | HeapWord* last = bottom(); |
duke@435 | 779 | HeapWord* cur = last; |
duke@435 | 780 | while (cur <= p) { |
duke@435 | 781 | last = cur; |
duke@435 | 782 | cur += oop(cur)->size(); |
duke@435 | 783 | } |
duke@435 | 784 | assert(oop(last)->is_oop(), "Should be an object start"); |
duke@435 | 785 | return last; |
duke@435 | 786 | } |
duke@435 | 787 | } |
duke@435 | 788 | |
duke@435 | 789 | size_t ContiguousSpace::block_size(const HeapWord* p) const { |
duke@435 | 790 | assert(MemRegion(bottom(), end()).contains(p), "p not in space"); |
duke@435 | 791 | HeapWord* current_top = top(); |
duke@435 | 792 | assert(p <= current_top, "p is not a block start"); |
duke@435 | 793 | assert(p == current_top || oop(p)->is_oop(), "p is not a block start"); |
duke@435 | 794 | if (p < current_top) |
duke@435 | 795 | return oop(p)->size(); |
duke@435 | 796 | else { |
duke@435 | 797 | assert(p == current_top, "just checking"); |
duke@435 | 798 | return pointer_delta(end(), (HeapWord*) p); |
duke@435 | 799 | } |
duke@435 | 800 | } |
duke@435 | 801 | |
duke@435 | 802 | // This version requires locking. |
duke@435 | 803 | inline HeapWord* ContiguousSpace::allocate_impl(size_t size, |
duke@435 | 804 | HeapWord* const end_value) { |
duke@435 | 805 | assert(Heap_lock->owned_by_self() || |
duke@435 | 806 | (SafepointSynchronize::is_at_safepoint() && |
duke@435 | 807 | Thread::current()->is_VM_thread()), |
duke@435 | 808 | "not locked"); |
duke@435 | 809 | HeapWord* obj = top(); |
duke@435 | 810 | if (pointer_delta(end_value, obj) >= size) { |
duke@435 | 811 | HeapWord* new_top = obj + size; |
duke@435 | 812 | set_top(new_top); |
duke@435 | 813 | assert(is_aligned(obj) && is_aligned(new_top), "checking alignment"); |
duke@435 | 814 | return obj; |
duke@435 | 815 | } else { |
duke@435 | 816 | return NULL; |
duke@435 | 817 | } |
duke@435 | 818 | } |
duke@435 | 819 | |
duke@435 | 820 | // This version is lock-free. |
duke@435 | 821 | inline HeapWord* ContiguousSpace::par_allocate_impl(size_t size, |
duke@435 | 822 | HeapWord* const end_value) { |
duke@435 | 823 | do { |
duke@435 | 824 | HeapWord* obj = top(); |
duke@435 | 825 | if (pointer_delta(end_value, obj) >= size) { |
duke@435 | 826 | HeapWord* new_top = obj + size; |
duke@435 | 827 | HeapWord* result = (HeapWord*)Atomic::cmpxchg_ptr(new_top, top_addr(), obj); |
duke@435 | 828 | // result can be one of two: |
duke@435 | 829 | // the old top value: the exchange succeeded |
duke@435 | 830 | // otherwise: the new value of the top is returned. |
duke@435 | 831 | if (result == obj) { |
duke@435 | 832 | assert(is_aligned(obj) && is_aligned(new_top), "checking alignment"); |
duke@435 | 833 | return obj; |
duke@435 | 834 | } |
duke@435 | 835 | } else { |
duke@435 | 836 | return NULL; |
duke@435 | 837 | } |
duke@435 | 838 | } while (true); |
duke@435 | 839 | } |
duke@435 | 840 | |
duke@435 | 841 | // Requires locking. |
duke@435 | 842 | HeapWord* ContiguousSpace::allocate(size_t size) { |
duke@435 | 843 | return allocate_impl(size, end()); |
duke@435 | 844 | } |
duke@435 | 845 | |
duke@435 | 846 | // Lock-free. |
duke@435 | 847 | HeapWord* ContiguousSpace::par_allocate(size_t size) { |
duke@435 | 848 | return par_allocate_impl(size, end()); |
duke@435 | 849 | } |
duke@435 | 850 | |
duke@435 | 851 | void ContiguousSpace::allocate_temporary_filler(int factor) { |
duke@435 | 852 | // allocate temporary type array decreasing free size with factor 'factor' |
duke@435 | 853 | assert(factor >= 0, "just checking"); |
duke@435 | 854 | size_t size = pointer_delta(end(), top()); |
duke@435 | 855 | |
duke@435 | 856 | // if space is full, return |
duke@435 | 857 | if (size == 0) return; |
duke@435 | 858 | |
duke@435 | 859 | if (factor > 0) { |
duke@435 | 860 | size -= size/factor; |
duke@435 | 861 | } |
duke@435 | 862 | size = align_object_size(size); |
duke@435 | 863 | |
duke@435 | 864 | const size_t min_int_array_size = typeArrayOopDesc::header_size(T_INT); |
duke@435 | 865 | if (size >= min_int_array_size) { |
duke@435 | 866 | size_t length = (size - min_int_array_size) * (HeapWordSize / sizeof(jint)); |
duke@435 | 867 | // allocate uninitialized int array |
duke@435 | 868 | typeArrayOop t = (typeArrayOop) allocate(size); |
duke@435 | 869 | assert(t != NULL, "allocation should succeed"); |
duke@435 | 870 | t->set_mark(markOopDesc::prototype()); |
duke@435 | 871 | t->set_klass(Universe::intArrayKlassObj()); |
duke@435 | 872 | t->set_length((int)length); |
duke@435 | 873 | } else { |
duke@435 | 874 | assert((int) size == instanceOopDesc::header_size(), |
duke@435 | 875 | "size for smallest fake object doesn't match"); |
duke@435 | 876 | instanceOop obj = (instanceOop) allocate(size); |
duke@435 | 877 | obj->set_mark(markOopDesc::prototype()); |
coleenp@602 | 878 | obj->set_klass_gap(0); |
duke@435 | 879 | obj->set_klass(SystemDictionary::object_klass()); |
duke@435 | 880 | } |
duke@435 | 881 | } |
duke@435 | 882 | |
jmasa@698 | 883 | void EdenSpace::clear(bool mangle_space) { |
jmasa@698 | 884 | ContiguousSpace::clear(mangle_space); |
duke@435 | 885 | set_soft_end(end()); |
duke@435 | 886 | } |
duke@435 | 887 | |
duke@435 | 888 | // Requires locking. |
duke@435 | 889 | HeapWord* EdenSpace::allocate(size_t size) { |
duke@435 | 890 | return allocate_impl(size, soft_end()); |
duke@435 | 891 | } |
duke@435 | 892 | |
duke@435 | 893 | // Lock-free. |
duke@435 | 894 | HeapWord* EdenSpace::par_allocate(size_t size) { |
duke@435 | 895 | return par_allocate_impl(size, soft_end()); |
duke@435 | 896 | } |
duke@435 | 897 | |
duke@435 | 898 | HeapWord* ConcEdenSpace::par_allocate(size_t size) |
duke@435 | 899 | { |
duke@435 | 900 | do { |
duke@435 | 901 | // The invariant is top() should be read before end() because |
duke@435 | 902 | // top() can't be greater than end(), so if an update of _soft_end |
duke@435 | 903 | // occurs between 'end_val = end();' and 'top_val = top();' top() |
duke@435 | 904 | // also can grow up to the new end() and the condition |
duke@435 | 905 | // 'top_val > end_val' is true. To ensure the loading order |
duke@435 | 906 | // OrderAccess::loadload() is required after top() read. |
duke@435 | 907 | HeapWord* obj = top(); |
duke@435 | 908 | OrderAccess::loadload(); |
duke@435 | 909 | if (pointer_delta(*soft_end_addr(), obj) >= size) { |
duke@435 | 910 | HeapWord* new_top = obj + size; |
duke@435 | 911 | HeapWord* result = (HeapWord*)Atomic::cmpxchg_ptr(new_top, top_addr(), obj); |
duke@435 | 912 | // result can be one of two: |
duke@435 | 913 | // the old top value: the exchange succeeded |
duke@435 | 914 | // otherwise: the new value of the top is returned. |
duke@435 | 915 | if (result == obj) { |
duke@435 | 916 | assert(is_aligned(obj) && is_aligned(new_top), "checking alignment"); |
duke@435 | 917 | return obj; |
duke@435 | 918 | } |
duke@435 | 919 | } else { |
duke@435 | 920 | return NULL; |
duke@435 | 921 | } |
duke@435 | 922 | } while (true); |
duke@435 | 923 | } |
duke@435 | 924 | |
duke@435 | 925 | |
duke@435 | 926 | HeapWord* OffsetTableContigSpace::initialize_threshold() { |
duke@435 | 927 | return _offsets.initialize_threshold(); |
duke@435 | 928 | } |
duke@435 | 929 | |
duke@435 | 930 | HeapWord* OffsetTableContigSpace::cross_threshold(HeapWord* start, HeapWord* end) { |
duke@435 | 931 | _offsets.alloc_block(start, end); |
duke@435 | 932 | return _offsets.threshold(); |
duke@435 | 933 | } |
duke@435 | 934 | |
duke@435 | 935 | OffsetTableContigSpace::OffsetTableContigSpace(BlockOffsetSharedArray* sharedOffsetArray, |
duke@435 | 936 | MemRegion mr) : |
duke@435 | 937 | _offsets(sharedOffsetArray, mr), |
duke@435 | 938 | _par_alloc_lock(Mutex::leaf, "OffsetTableContigSpace par alloc lock", true) |
duke@435 | 939 | { |
duke@435 | 940 | _offsets.set_contig_space(this); |
jmasa@698 | 941 | initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle); |
duke@435 | 942 | } |
duke@435 | 943 | |
duke@435 | 944 | |
duke@435 | 945 | class VerifyOldOopClosure : public OopClosure { |
duke@435 | 946 | public: |
coleenp@548 | 947 | oop _the_obj; |
coleenp@548 | 948 | bool _allow_dirty; |
duke@435 | 949 | void do_oop(oop* p) { |
coleenp@548 | 950 | _the_obj->verify_old_oop(p, _allow_dirty); |
coleenp@548 | 951 | } |
coleenp@548 | 952 | void do_oop(narrowOop* p) { |
coleenp@548 | 953 | _the_obj->verify_old_oop(p, _allow_dirty); |
duke@435 | 954 | } |
duke@435 | 955 | }; |
duke@435 | 956 | |
duke@435 | 957 | #define OBJ_SAMPLE_INTERVAL 0 |
duke@435 | 958 | #define BLOCK_SAMPLE_INTERVAL 100 |
duke@435 | 959 | |
duke@435 | 960 | void OffsetTableContigSpace::verify(bool allow_dirty) const { |
duke@435 | 961 | HeapWord* p = bottom(); |
duke@435 | 962 | HeapWord* prev_p = NULL; |
duke@435 | 963 | VerifyOldOopClosure blk; // Does this do anything? |
coleenp@548 | 964 | blk._allow_dirty = allow_dirty; |
duke@435 | 965 | int objs = 0; |
duke@435 | 966 | int blocks = 0; |
duke@435 | 967 | |
duke@435 | 968 | if (VerifyObjectStartArray) { |
duke@435 | 969 | _offsets.verify(); |
duke@435 | 970 | } |
duke@435 | 971 | |
duke@435 | 972 | while (p < top()) { |
duke@435 | 973 | size_t size = oop(p)->size(); |
duke@435 | 974 | // For a sampling of objects in the space, find it using the |
duke@435 | 975 | // block offset table. |
duke@435 | 976 | if (blocks == BLOCK_SAMPLE_INTERVAL) { |
ysr@777 | 977 | guarantee(p == block_start_const(p + (size/2)), |
ysr@777 | 978 | "check offset computation"); |
duke@435 | 979 | blocks = 0; |
duke@435 | 980 | } else { |
duke@435 | 981 | blocks++; |
duke@435 | 982 | } |
duke@435 | 983 | |
duke@435 | 984 | if (objs == OBJ_SAMPLE_INTERVAL) { |
duke@435 | 985 | oop(p)->verify(); |
coleenp@548 | 986 | blk._the_obj = oop(p); |
duke@435 | 987 | oop(p)->oop_iterate(&blk); |
duke@435 | 988 | objs = 0; |
duke@435 | 989 | } else { |
duke@435 | 990 | objs++; |
duke@435 | 991 | } |
duke@435 | 992 | prev_p = p; |
duke@435 | 993 | p += size; |
duke@435 | 994 | } |
duke@435 | 995 | guarantee(p == top(), "end of last object must match end of space"); |
duke@435 | 996 | } |
duke@435 | 997 | |
duke@435 | 998 | void OffsetTableContigSpace::serialize_block_offset_array_offsets( |
duke@435 | 999 | SerializeOopClosure* soc) { |
duke@435 | 1000 | _offsets.serialize(soc); |
duke@435 | 1001 | } |
duke@435 | 1002 | |
duke@435 | 1003 | |
jcoomes@873 | 1004 | size_t TenuredSpace::allowed_dead_ratio() const { |
duke@435 | 1005 | return MarkSweepDeadRatio; |
duke@435 | 1006 | } |
duke@435 | 1007 | |
duke@435 | 1008 | |
jcoomes@873 | 1009 | size_t ContigPermSpace::allowed_dead_ratio() const { |
duke@435 | 1010 | return PermMarkSweepDeadRatio; |
duke@435 | 1011 | } |