Fri, 23 Mar 2012 11:16:05 -0400
7150058: Allocate symbols from null boot loader to an arena for NMT
Summary: Move symbol allocation to an arena so NMT doesn't have to track them at startup.
Reviewed-by: never, kamg, zgu
duke@435 | 1 | /* |
stefank@2314 | 2 | * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. |
duke@435 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
duke@435 | 4 | * |
duke@435 | 5 | * This code is free software; you can redistribute it and/or modify it |
duke@435 | 6 | * under the terms of the GNU General Public License version 2 only, as |
duke@435 | 7 | * published by the Free Software Foundation. |
duke@435 | 8 | * |
duke@435 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
duke@435 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
duke@435 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
duke@435 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
duke@435 | 13 | * accompanied this code). |
duke@435 | 14 | * |
duke@435 | 15 | * You should have received a copy of the GNU General Public License version |
duke@435 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
duke@435 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
duke@435 | 18 | * |
trims@1907 | 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
trims@1907 | 20 | * or visit www.oracle.com if you need additional information or have any |
trims@1907 | 21 | * questions. |
duke@435 | 22 | * |
duke@435 | 23 | */ |
duke@435 | 24 | |
stefank@2314 | 25 | #include "precompiled.hpp" |
stefank@2314 | 26 | #include "classfile/systemDictionary.hpp" |
stefank@2314 | 27 | #include "classfile/vmSymbols.hpp" |
stefank@2314 | 28 | #include "gc_implementation/shared/liveRange.hpp" |
stefank@2314 | 29 | #include "gc_implementation/shared/markSweep.hpp" |
stefank@2314 | 30 | #include "gc_implementation/shared/spaceDecorator.hpp" |
stefank@2314 | 31 | #include "memory/blockOffsetTable.inline.hpp" |
stefank@2314 | 32 | #include "memory/defNewGeneration.hpp" |
stefank@2314 | 33 | #include "memory/genCollectedHeap.hpp" |
stefank@2314 | 34 | #include "memory/space.hpp" |
stefank@2314 | 35 | #include "memory/space.inline.hpp" |
stefank@2314 | 36 | #include "memory/universe.inline.hpp" |
stefank@2314 | 37 | #include "oops/oop.inline.hpp" |
stefank@2314 | 38 | #include "oops/oop.inline2.hpp" |
stefank@2314 | 39 | #include "runtime/java.hpp" |
stefank@2314 | 40 | #include "runtime/safepoint.hpp" |
stefank@2314 | 41 | #include "utilities/copy.hpp" |
stefank@2314 | 42 | #include "utilities/globalDefinitions.hpp" |
duke@435 | 43 | |
coleenp@548 | 44 | void SpaceMemRegionOopsIterClosure::do_oop(oop* p) { SpaceMemRegionOopsIterClosure::do_oop_work(p); } |
coleenp@548 | 45 | void SpaceMemRegionOopsIterClosure::do_oop(narrowOop* p) { SpaceMemRegionOopsIterClosure::do_oop_work(p); } |
coleenp@548 | 46 | |
duke@435 | 47 | HeapWord* DirtyCardToOopClosure::get_actual_top(HeapWord* top, |
duke@435 | 48 | HeapWord* top_obj) { |
duke@435 | 49 | if (top_obj != NULL) { |
duke@435 | 50 | if (_sp->block_is_obj(top_obj)) { |
duke@435 | 51 | if (_precision == CardTableModRefBS::ObjHeadPreciseArray) { |
duke@435 | 52 | if (oop(top_obj)->is_objArray() || oop(top_obj)->is_typeArray()) { |
duke@435 | 53 | // An arrayOop is starting on the dirty card - since we do exact |
duke@435 | 54 | // store checks for objArrays we are done. |
duke@435 | 55 | } else { |
duke@435 | 56 | // Otherwise, it is possible that the object starting on the dirty |
duke@435 | 57 | // card spans the entire card, and that the store happened on a |
duke@435 | 58 | // later card. Figure out where the object ends. |
duke@435 | 59 | // Use the block_size() method of the space over which |
duke@435 | 60 | // the iteration is being done. That space (e.g. CMS) may have |
duke@435 | 61 | // specific requirements on object sizes which will |
duke@435 | 62 | // be reflected in the block_size() method. |
duke@435 | 63 | top = top_obj + oop(top_obj)->size(); |
duke@435 | 64 | } |
duke@435 | 65 | } |
duke@435 | 66 | } else { |
duke@435 | 67 | top = top_obj; |
duke@435 | 68 | } |
duke@435 | 69 | } else { |
duke@435 | 70 | assert(top == _sp->end(), "only case where top_obj == NULL"); |
duke@435 | 71 | } |
duke@435 | 72 | return top; |
duke@435 | 73 | } |
duke@435 | 74 | |
duke@435 | 75 | void DirtyCardToOopClosure::walk_mem_region(MemRegion mr, |
duke@435 | 76 | HeapWord* bottom, |
duke@435 | 77 | HeapWord* top) { |
duke@435 | 78 | // 1. Blocks may or may not be objects. |
duke@435 | 79 | // 2. Even when a block_is_obj(), it may not entirely |
duke@435 | 80 | // occupy the block if the block quantum is larger than |
duke@435 | 81 | // the object size. |
duke@435 | 82 | // We can and should try to optimize by calling the non-MemRegion |
duke@435 | 83 | // version of oop_iterate() for all but the extremal objects |
duke@435 | 84 | // (for which we need to call the MemRegion version of |
duke@435 | 85 | // oop_iterate()) To be done post-beta XXX |
duke@435 | 86 | for (; bottom < top; bottom += _sp->block_size(bottom)) { |
duke@435 | 87 | // As in the case of contiguous space above, we'd like to |
duke@435 | 88 | // just use the value returned by oop_iterate to increment the |
duke@435 | 89 | // current pointer; unfortunately, that won't work in CMS because |
duke@435 | 90 | // we'd need an interface change (it seems) to have the space |
duke@435 | 91 | // "adjust the object size" (for instance pad it up to its |
duke@435 | 92 | // block alignment or minimum block size restrictions. XXX |
duke@435 | 93 | if (_sp->block_is_obj(bottom) && |
duke@435 | 94 | !_sp->obj_allocated_since_save_marks(oop(bottom))) { |
duke@435 | 95 | oop(bottom)->oop_iterate(_cl, mr); |
duke@435 | 96 | } |
duke@435 | 97 | } |
duke@435 | 98 | } |
duke@435 | 99 | |
ysr@2889 | 100 | // We get called with "mr" representing the dirty region |
ysr@2889 | 101 | // that we want to process. Because of imprecise marking, |
ysr@2889 | 102 | // we may need to extend the incoming "mr" to the right, |
ysr@2889 | 103 | // and scan more. However, because we may already have |
ysr@2889 | 104 | // scanned some of that extended region, we may need to |
ysr@2889 | 105 | // trim its right-end back some so we do not scan what |
ysr@2889 | 106 | // we (or another worker thread) may already have scanned |
ysr@2889 | 107 | // or planning to scan. |
duke@435 | 108 | void DirtyCardToOopClosure::do_MemRegion(MemRegion mr) { |
duke@435 | 109 | |
duke@435 | 110 | // Some collectors need to do special things whenever their dirty |
duke@435 | 111 | // cards are processed. For instance, CMS must remember mutator updates |
duke@435 | 112 | // (i.e. dirty cards) so as to re-scan mutated objects. |
duke@435 | 113 | // Such work can be piggy-backed here on dirty card scanning, so as to make |
duke@435 | 114 | // it slightly more efficient than doing a complete non-detructive pre-scan |
duke@435 | 115 | // of the card table. |
duke@435 | 116 | MemRegionClosure* pCl = _sp->preconsumptionDirtyCardClosure(); |
duke@435 | 117 | if (pCl != NULL) { |
duke@435 | 118 | pCl->do_MemRegion(mr); |
duke@435 | 119 | } |
duke@435 | 120 | |
duke@435 | 121 | HeapWord* bottom = mr.start(); |
duke@435 | 122 | HeapWord* last = mr.last(); |
duke@435 | 123 | HeapWord* top = mr.end(); |
duke@435 | 124 | HeapWord* bottom_obj; |
duke@435 | 125 | HeapWord* top_obj; |
duke@435 | 126 | |
duke@435 | 127 | assert(_precision == CardTableModRefBS::ObjHeadPreciseArray || |
duke@435 | 128 | _precision == CardTableModRefBS::Precise, |
duke@435 | 129 | "Only ones we deal with for now."); |
duke@435 | 130 | |
duke@435 | 131 | assert(_precision != CardTableModRefBS::ObjHeadPreciseArray || |
ysr@777 | 132 | _cl->idempotent() || _last_bottom == NULL || |
duke@435 | 133 | top <= _last_bottom, |
duke@435 | 134 | "Not decreasing"); |
duke@435 | 135 | NOT_PRODUCT(_last_bottom = mr.start()); |
duke@435 | 136 | |
duke@435 | 137 | bottom_obj = _sp->block_start(bottom); |
duke@435 | 138 | top_obj = _sp->block_start(last); |
duke@435 | 139 | |
duke@435 | 140 | assert(bottom_obj <= bottom, "just checking"); |
duke@435 | 141 | assert(top_obj <= top, "just checking"); |
duke@435 | 142 | |
duke@435 | 143 | // Given what we think is the top of the memory region and |
duke@435 | 144 | // the start of the object at the top, get the actual |
duke@435 | 145 | // value of the top. |
duke@435 | 146 | top = get_actual_top(top, top_obj); |
duke@435 | 147 | |
duke@435 | 148 | // If the previous call did some part of this region, don't redo. |
duke@435 | 149 | if (_precision == CardTableModRefBS::ObjHeadPreciseArray && |
duke@435 | 150 | _min_done != NULL && |
duke@435 | 151 | _min_done < top) { |
duke@435 | 152 | top = _min_done; |
duke@435 | 153 | } |
duke@435 | 154 | |
duke@435 | 155 | // Top may have been reset, and in fact may be below bottom, |
duke@435 | 156 | // e.g. the dirty card region is entirely in a now free object |
duke@435 | 157 | // -- something that could happen with a concurrent sweeper. |
duke@435 | 158 | bottom = MIN2(bottom, top); |
ysr@2889 | 159 | MemRegion extended_mr = MemRegion(bottom, top); |
duke@435 | 160 | assert(bottom <= top && |
duke@435 | 161 | (_precision != CardTableModRefBS::ObjHeadPreciseArray || |
duke@435 | 162 | _min_done == NULL || |
duke@435 | 163 | top <= _min_done), |
duke@435 | 164 | "overlap!"); |
duke@435 | 165 | |
duke@435 | 166 | // Walk the region if it is not empty; otherwise there is nothing to do. |
ysr@2889 | 167 | if (!extended_mr.is_empty()) { |
ysr@2889 | 168 | walk_mem_region(extended_mr, bottom_obj, top); |
duke@435 | 169 | } |
duke@435 | 170 | |
ysr@777 | 171 | // An idempotent closure might be applied in any order, so we don't |
ysr@777 | 172 | // record a _min_done for it. |
ysr@777 | 173 | if (!_cl->idempotent()) { |
ysr@777 | 174 | _min_done = bottom; |
ysr@777 | 175 | } else { |
ysr@777 | 176 | assert(_min_done == _last_explicit_min_done, |
ysr@777 | 177 | "Don't update _min_done for idempotent cl"); |
ysr@777 | 178 | } |
duke@435 | 179 | } |
duke@435 | 180 | |
duke@435 | 181 | DirtyCardToOopClosure* Space::new_dcto_cl(OopClosure* cl, |
duke@435 | 182 | CardTableModRefBS::PrecisionStyle precision, |
duke@435 | 183 | HeapWord* boundary) { |
duke@435 | 184 | return new DirtyCardToOopClosure(this, cl, precision, boundary); |
duke@435 | 185 | } |
duke@435 | 186 | |
duke@435 | 187 | HeapWord* ContiguousSpaceDCTOC::get_actual_top(HeapWord* top, |
duke@435 | 188 | HeapWord* top_obj) { |
duke@435 | 189 | if (top_obj != NULL && top_obj < (_sp->toContiguousSpace())->top()) { |
duke@435 | 190 | if (_precision == CardTableModRefBS::ObjHeadPreciseArray) { |
duke@435 | 191 | if (oop(top_obj)->is_objArray() || oop(top_obj)->is_typeArray()) { |
duke@435 | 192 | // An arrayOop is starting on the dirty card - since we do exact |
duke@435 | 193 | // store checks for objArrays we are done. |
duke@435 | 194 | } else { |
duke@435 | 195 | // Otherwise, it is possible that the object starting on the dirty |
duke@435 | 196 | // card spans the entire card, and that the store happened on a |
duke@435 | 197 | // later card. Figure out where the object ends. |
duke@435 | 198 | assert(_sp->block_size(top_obj) == (size_t) oop(top_obj)->size(), |
duke@435 | 199 | "Block size and object size mismatch"); |
duke@435 | 200 | top = top_obj + oop(top_obj)->size(); |
duke@435 | 201 | } |
duke@435 | 202 | } |
duke@435 | 203 | } else { |
duke@435 | 204 | top = (_sp->toContiguousSpace())->top(); |
duke@435 | 205 | } |
duke@435 | 206 | return top; |
duke@435 | 207 | } |
duke@435 | 208 | |
duke@435 | 209 | void Filtering_DCTOC::walk_mem_region(MemRegion mr, |
duke@435 | 210 | HeapWord* bottom, |
duke@435 | 211 | HeapWord* top) { |
duke@435 | 212 | // Note that this assumption won't hold if we have a concurrent |
duke@435 | 213 | // collector in this space, which may have freed up objects after |
duke@435 | 214 | // they were dirtied and before the stop-the-world GC that is |
duke@435 | 215 | // examining cards here. |
duke@435 | 216 | assert(bottom < top, "ought to be at least one obj on a dirty card."); |
duke@435 | 217 | |
duke@435 | 218 | if (_boundary != NULL) { |
duke@435 | 219 | // We have a boundary outside of which we don't want to look |
duke@435 | 220 | // at objects, so create a filtering closure around the |
duke@435 | 221 | // oop closure before walking the region. |
duke@435 | 222 | FilteringClosure filter(_boundary, _cl); |
duke@435 | 223 | walk_mem_region_with_cl(mr, bottom, top, &filter); |
duke@435 | 224 | } else { |
duke@435 | 225 | // No boundary, simply walk the heap with the oop closure. |
duke@435 | 226 | walk_mem_region_with_cl(mr, bottom, top, _cl); |
duke@435 | 227 | } |
duke@435 | 228 | |
duke@435 | 229 | } |
duke@435 | 230 | |
duke@435 | 231 | // We must replicate this so that the static type of "FilteringClosure" |
duke@435 | 232 | // (see above) is apparent at the oop_iterate calls. |
duke@435 | 233 | #define ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(ClosureType) \ |
duke@435 | 234 | void ContiguousSpaceDCTOC::walk_mem_region_with_cl(MemRegion mr, \ |
duke@435 | 235 | HeapWord* bottom, \ |
duke@435 | 236 | HeapWord* top, \ |
duke@435 | 237 | ClosureType* cl) { \ |
duke@435 | 238 | bottom += oop(bottom)->oop_iterate(cl, mr); \ |
duke@435 | 239 | if (bottom < top) { \ |
duke@435 | 240 | HeapWord* next_obj = bottom + oop(bottom)->size(); \ |
duke@435 | 241 | while (next_obj < top) { \ |
duke@435 | 242 | /* Bottom lies entirely below top, so we can call the */ \ |
duke@435 | 243 | /* non-memRegion version of oop_iterate below. */ \ |
duke@435 | 244 | oop(bottom)->oop_iterate(cl); \ |
duke@435 | 245 | bottom = next_obj; \ |
duke@435 | 246 | next_obj = bottom + oop(bottom)->size(); \ |
duke@435 | 247 | } \ |
duke@435 | 248 | /* Last object. */ \ |
duke@435 | 249 | oop(bottom)->oop_iterate(cl, mr); \ |
duke@435 | 250 | } \ |
duke@435 | 251 | } |
duke@435 | 252 | |
duke@435 | 253 | // (There are only two of these, rather than N, because the split is due |
duke@435 | 254 | // only to the introduction of the FilteringClosure, a local part of the |
duke@435 | 255 | // impl of this abstraction.) |
duke@435 | 256 | ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(OopClosure) |
duke@435 | 257 | ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure) |
duke@435 | 258 | |
duke@435 | 259 | DirtyCardToOopClosure* |
duke@435 | 260 | ContiguousSpace::new_dcto_cl(OopClosure* cl, |
duke@435 | 261 | CardTableModRefBS::PrecisionStyle precision, |
duke@435 | 262 | HeapWord* boundary) { |
duke@435 | 263 | return new ContiguousSpaceDCTOC(this, cl, precision, boundary); |
duke@435 | 264 | } |
duke@435 | 265 | |
jmasa@698 | 266 | void Space::initialize(MemRegion mr, |
jmasa@698 | 267 | bool clear_space, |
jmasa@698 | 268 | bool mangle_space) { |
duke@435 | 269 | HeapWord* bottom = mr.start(); |
duke@435 | 270 | HeapWord* end = mr.end(); |
duke@435 | 271 | assert(Universe::on_page_boundary(bottom) && Universe::on_page_boundary(end), |
duke@435 | 272 | "invalid space boundaries"); |
duke@435 | 273 | set_bottom(bottom); |
duke@435 | 274 | set_end(end); |
jmasa@698 | 275 | if (clear_space) clear(mangle_space); |
duke@435 | 276 | } |
duke@435 | 277 | |
jmasa@698 | 278 | void Space::clear(bool mangle_space) { |
jmasa@698 | 279 | if (ZapUnusedHeapArea && mangle_space) { |
jmasa@698 | 280 | mangle_unused_area(); |
jmasa@698 | 281 | } |
duke@435 | 282 | } |
duke@435 | 283 | |
tonyp@791 | 284 | ContiguousSpace::ContiguousSpace(): CompactibleSpace(), _top(NULL), |
tonyp@791 | 285 | _concurrent_iteration_safe_limit(NULL) { |
jmasa@698 | 286 | _mangler = new GenSpaceMangler(this); |
jmasa@698 | 287 | } |
jmasa@698 | 288 | |
jmasa@698 | 289 | ContiguousSpace::~ContiguousSpace() { |
jmasa@698 | 290 | delete _mangler; |
jmasa@698 | 291 | } |
jmasa@698 | 292 | |
jmasa@698 | 293 | void ContiguousSpace::initialize(MemRegion mr, |
jmasa@698 | 294 | bool clear_space, |
jmasa@698 | 295 | bool mangle_space) |
duke@435 | 296 | { |
jmasa@698 | 297 | CompactibleSpace::initialize(mr, clear_space, mangle_space); |
ysr@782 | 298 | set_concurrent_iteration_safe_limit(top()); |
duke@435 | 299 | } |
duke@435 | 300 | |
jmasa@698 | 301 | void ContiguousSpace::clear(bool mangle_space) { |
duke@435 | 302 | set_top(bottom()); |
duke@435 | 303 | set_saved_mark(); |
tonyp@791 | 304 | CompactibleSpace::clear(mangle_space); |
duke@435 | 305 | } |
duke@435 | 306 | |
duke@435 | 307 | bool ContiguousSpace::is_in(const void* p) const { |
duke@435 | 308 | return _bottom <= p && p < _top; |
duke@435 | 309 | } |
duke@435 | 310 | |
duke@435 | 311 | bool ContiguousSpace::is_free_block(const HeapWord* p) const { |
duke@435 | 312 | return p >= _top; |
duke@435 | 313 | } |
duke@435 | 314 | |
jmasa@698 | 315 | void OffsetTableContigSpace::clear(bool mangle_space) { |
jmasa@698 | 316 | ContiguousSpace::clear(mangle_space); |
duke@435 | 317 | _offsets.initialize_threshold(); |
duke@435 | 318 | } |
duke@435 | 319 | |
duke@435 | 320 | void OffsetTableContigSpace::set_bottom(HeapWord* new_bottom) { |
duke@435 | 321 | Space::set_bottom(new_bottom); |
duke@435 | 322 | _offsets.set_bottom(new_bottom); |
duke@435 | 323 | } |
duke@435 | 324 | |
duke@435 | 325 | void OffsetTableContigSpace::set_end(HeapWord* new_end) { |
duke@435 | 326 | // Space should not advertize an increase in size |
duke@435 | 327 | // until after the underlying offest table has been enlarged. |
duke@435 | 328 | _offsets.resize(pointer_delta(new_end, bottom())); |
duke@435 | 329 | Space::set_end(new_end); |
duke@435 | 330 | } |
duke@435 | 331 | |
jmasa@698 | 332 | #ifndef PRODUCT |
jmasa@698 | 333 | |
jmasa@698 | 334 | void ContiguousSpace::set_top_for_allocations(HeapWord* v) { |
jmasa@698 | 335 | mangler()->set_top_for_allocations(v); |
jmasa@698 | 336 | } |
jmasa@698 | 337 | void ContiguousSpace::set_top_for_allocations() { |
jmasa@698 | 338 | mangler()->set_top_for_allocations(top()); |
jmasa@698 | 339 | } |
jmasa@698 | 340 | void ContiguousSpace::check_mangled_unused_area(HeapWord* limit) { |
jmasa@698 | 341 | mangler()->check_mangled_unused_area(limit); |
duke@435 | 342 | } |
duke@435 | 343 | |
jmasa@698 | 344 | void ContiguousSpace::check_mangled_unused_area_complete() { |
jmasa@698 | 345 | mangler()->check_mangled_unused_area_complete(); |
duke@435 | 346 | } |
duke@435 | 347 | |
jmasa@698 | 348 | // Mangled only the unused space that has not previously |
jmasa@698 | 349 | // been mangled and that has not been allocated since being |
jmasa@698 | 350 | // mangled. |
jmasa@698 | 351 | void ContiguousSpace::mangle_unused_area() { |
jmasa@698 | 352 | mangler()->mangle_unused_area(); |
jmasa@698 | 353 | } |
jmasa@698 | 354 | void ContiguousSpace::mangle_unused_area_complete() { |
jmasa@698 | 355 | mangler()->mangle_unused_area_complete(); |
jmasa@698 | 356 | } |
jmasa@698 | 357 | void ContiguousSpace::mangle_region(MemRegion mr) { |
jmasa@698 | 358 | // Although this method uses SpaceMangler::mangle_region() which |
jmasa@698 | 359 | // is not specific to a space, the when the ContiguousSpace version |
jmasa@698 | 360 | // is called, it is always with regard to a space and this |
jmasa@698 | 361 | // bounds checking is appropriate. |
jmasa@698 | 362 | MemRegion space_mr(bottom(), end()); |
jmasa@698 | 363 | assert(space_mr.contains(mr), "Mangling outside space"); |
jmasa@698 | 364 | SpaceMangler::mangle_region(mr); |
jmasa@698 | 365 | } |
jmasa@698 | 366 | #endif // NOT_PRODUCT |
jmasa@698 | 367 | |
jmasa@698 | 368 | void CompactibleSpace::initialize(MemRegion mr, |
jmasa@698 | 369 | bool clear_space, |
jmasa@698 | 370 | bool mangle_space) { |
jmasa@698 | 371 | Space::initialize(mr, clear_space, mangle_space); |
tonyp@791 | 372 | set_compaction_top(bottom()); |
tonyp@791 | 373 | _next_compaction_space = NULL; |
tonyp@791 | 374 | } |
tonyp@791 | 375 | |
tonyp@791 | 376 | void CompactibleSpace::clear(bool mangle_space) { |
tonyp@791 | 377 | Space::clear(mangle_space); |
duke@435 | 378 | _compaction_top = bottom(); |
duke@435 | 379 | } |
duke@435 | 380 | |
duke@435 | 381 | HeapWord* CompactibleSpace::forward(oop q, size_t size, |
duke@435 | 382 | CompactPoint* cp, HeapWord* compact_top) { |
duke@435 | 383 | // q is alive |
duke@435 | 384 | // First check if we should switch compaction space |
duke@435 | 385 | assert(this == cp->space, "'this' should be current compaction space."); |
duke@435 | 386 | size_t compaction_max_size = pointer_delta(end(), compact_top); |
duke@435 | 387 | while (size > compaction_max_size) { |
duke@435 | 388 | // switch to next compaction space |
duke@435 | 389 | cp->space->set_compaction_top(compact_top); |
duke@435 | 390 | cp->space = cp->space->next_compaction_space(); |
duke@435 | 391 | if (cp->space == NULL) { |
duke@435 | 392 | cp->gen = GenCollectedHeap::heap()->prev_gen(cp->gen); |
duke@435 | 393 | assert(cp->gen != NULL, "compaction must succeed"); |
duke@435 | 394 | cp->space = cp->gen->first_compaction_space(); |
duke@435 | 395 | assert(cp->space != NULL, "generation must have a first compaction space"); |
duke@435 | 396 | } |
duke@435 | 397 | compact_top = cp->space->bottom(); |
duke@435 | 398 | cp->space->set_compaction_top(compact_top); |
duke@435 | 399 | cp->threshold = cp->space->initialize_threshold(); |
duke@435 | 400 | compaction_max_size = pointer_delta(cp->space->end(), compact_top); |
duke@435 | 401 | } |
duke@435 | 402 | |
duke@435 | 403 | // store the forwarding pointer into the mark word |
duke@435 | 404 | if ((HeapWord*)q != compact_top) { |
duke@435 | 405 | q->forward_to(oop(compact_top)); |
duke@435 | 406 | assert(q->is_gc_marked(), "encoding the pointer should preserve the mark"); |
duke@435 | 407 | } else { |
duke@435 | 408 | // if the object isn't moving we can just set the mark to the default |
duke@435 | 409 | // mark and handle it specially later on. |
duke@435 | 410 | q->init_mark(); |
duke@435 | 411 | assert(q->forwardee() == NULL, "should be forwarded to NULL"); |
duke@435 | 412 | } |
duke@435 | 413 | |
coleenp@548 | 414 | VALIDATE_MARK_SWEEP_ONLY(MarkSweep::register_live_oop(q, size)); |
duke@435 | 415 | compact_top += size; |
duke@435 | 416 | |
duke@435 | 417 | // we need to update the offset table so that the beginnings of objects can be |
duke@435 | 418 | // found during scavenge. Note that we are updating the offset table based on |
duke@435 | 419 | // where the object will be once the compaction phase finishes. |
duke@435 | 420 | if (compact_top > cp->threshold) |
duke@435 | 421 | cp->threshold = |
duke@435 | 422 | cp->space->cross_threshold(compact_top - size, compact_top); |
duke@435 | 423 | return compact_top; |
duke@435 | 424 | } |
duke@435 | 425 | |
duke@435 | 426 | |
duke@435 | 427 | bool CompactibleSpace::insert_deadspace(size_t& allowed_deadspace_words, |
duke@435 | 428 | HeapWord* q, size_t deadlength) { |
duke@435 | 429 | if (allowed_deadspace_words >= deadlength) { |
duke@435 | 430 | allowed_deadspace_words -= deadlength; |
jcoomes@916 | 431 | CollectedHeap::fill_with_object(q, deadlength); |
jcoomes@916 | 432 | oop(q)->set_mark(oop(q)->mark()->set_marked()); |
jcoomes@916 | 433 | assert((int) deadlength == oop(q)->size(), "bad filler object size"); |
duke@435 | 434 | // Recall that we required "q == compaction_top". |
duke@435 | 435 | return true; |
duke@435 | 436 | } else { |
duke@435 | 437 | allowed_deadspace_words = 0; |
duke@435 | 438 | return false; |
duke@435 | 439 | } |
duke@435 | 440 | } |
duke@435 | 441 | |
duke@435 | 442 | #define block_is_always_obj(q) true |
duke@435 | 443 | #define obj_size(q) oop(q)->size() |
duke@435 | 444 | #define adjust_obj_size(s) s |
duke@435 | 445 | |
duke@435 | 446 | void CompactibleSpace::prepare_for_compaction(CompactPoint* cp) { |
duke@435 | 447 | SCAN_AND_FORWARD(cp, end, block_is_obj, block_size); |
duke@435 | 448 | } |
duke@435 | 449 | |
duke@435 | 450 | // Faster object search. |
duke@435 | 451 | void ContiguousSpace::prepare_for_compaction(CompactPoint* cp) { |
duke@435 | 452 | SCAN_AND_FORWARD(cp, top, block_is_always_obj, obj_size); |
duke@435 | 453 | } |
duke@435 | 454 | |
duke@435 | 455 | void Space::adjust_pointers() { |
duke@435 | 456 | // adjust all the interior pointers to point at the new locations of objects |
duke@435 | 457 | // Used by MarkSweep::mark_sweep_phase3() |
duke@435 | 458 | |
duke@435 | 459 | // First check to see if there is any work to be done. |
duke@435 | 460 | if (used() == 0) { |
duke@435 | 461 | return; // Nothing to do. |
duke@435 | 462 | } |
duke@435 | 463 | |
duke@435 | 464 | // Otherwise... |
duke@435 | 465 | HeapWord* q = bottom(); |
duke@435 | 466 | HeapWord* t = end(); |
duke@435 | 467 | |
duke@435 | 468 | debug_only(HeapWord* prev_q = NULL); |
duke@435 | 469 | while (q < t) { |
duke@435 | 470 | if (oop(q)->is_gc_marked()) { |
duke@435 | 471 | // q is alive |
duke@435 | 472 | |
coleenp@548 | 473 | VALIDATE_MARK_SWEEP_ONLY(MarkSweep::track_interior_pointers(oop(q))); |
duke@435 | 474 | // point all the oops to the new location |
duke@435 | 475 | size_t size = oop(q)->adjust_pointers(); |
coleenp@548 | 476 | VALIDATE_MARK_SWEEP_ONLY(MarkSweep::check_interior_pointers()); |
duke@435 | 477 | |
duke@435 | 478 | debug_only(prev_q = q); |
coleenp@548 | 479 | VALIDATE_MARK_SWEEP_ONLY(MarkSweep::validate_live_oop(oop(q), size)); |
duke@435 | 480 | |
duke@435 | 481 | q += size; |
duke@435 | 482 | } else { |
duke@435 | 483 | // q is not a live object. But we're not in a compactible space, |
duke@435 | 484 | // So we don't have live ranges. |
duke@435 | 485 | debug_only(prev_q = q); |
duke@435 | 486 | q += block_size(q); |
duke@435 | 487 | assert(q > prev_q, "we should be moving forward through memory"); |
duke@435 | 488 | } |
duke@435 | 489 | } |
duke@435 | 490 | assert(q == t, "just checking"); |
duke@435 | 491 | } |
duke@435 | 492 | |
duke@435 | 493 | void CompactibleSpace::adjust_pointers() { |
duke@435 | 494 | // Check first is there is any work to do. |
duke@435 | 495 | if (used() == 0) { |
duke@435 | 496 | return; // Nothing to do. |
duke@435 | 497 | } |
duke@435 | 498 | |
duke@435 | 499 | SCAN_AND_ADJUST_POINTERS(adjust_obj_size); |
duke@435 | 500 | } |
duke@435 | 501 | |
duke@435 | 502 | void CompactibleSpace::compact() { |
duke@435 | 503 | SCAN_AND_COMPACT(obj_size); |
duke@435 | 504 | } |
duke@435 | 505 | |
duke@435 | 506 | void Space::print_short() const { print_short_on(tty); } |
duke@435 | 507 | |
duke@435 | 508 | void Space::print_short_on(outputStream* st) const { |
duke@435 | 509 | st->print(" space " SIZE_FORMAT "K, %3d%% used", capacity() / K, |
duke@435 | 510 | (int) ((double) used() * 100 / capacity())); |
duke@435 | 511 | } |
duke@435 | 512 | |
duke@435 | 513 | void Space::print() const { print_on(tty); } |
duke@435 | 514 | |
duke@435 | 515 | void Space::print_on(outputStream* st) const { |
duke@435 | 516 | print_short_on(st); |
duke@435 | 517 | st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ")", |
duke@435 | 518 | bottom(), end()); |
duke@435 | 519 | } |
duke@435 | 520 | |
duke@435 | 521 | void ContiguousSpace::print_on(outputStream* st) const { |
duke@435 | 522 | print_short_on(st); |
duke@435 | 523 | st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")", |
duke@435 | 524 | bottom(), top(), end()); |
duke@435 | 525 | } |
duke@435 | 526 | |
duke@435 | 527 | void OffsetTableContigSpace::print_on(outputStream* st) const { |
duke@435 | 528 | print_short_on(st); |
duke@435 | 529 | st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " |
duke@435 | 530 | INTPTR_FORMAT ", " INTPTR_FORMAT ")", |
duke@435 | 531 | bottom(), top(), _offsets.threshold(), end()); |
duke@435 | 532 | } |
duke@435 | 533 | |
duke@435 | 534 | void ContiguousSpace::verify(bool allow_dirty) const { |
duke@435 | 535 | HeapWord* p = bottom(); |
duke@435 | 536 | HeapWord* t = top(); |
duke@435 | 537 | HeapWord* prev_p = NULL; |
duke@435 | 538 | while (p < t) { |
duke@435 | 539 | oop(p)->verify(); |
duke@435 | 540 | prev_p = p; |
duke@435 | 541 | p += oop(p)->size(); |
duke@435 | 542 | } |
duke@435 | 543 | guarantee(p == top(), "end of last object must match end of space"); |
duke@435 | 544 | if (top() != end()) { |
ysr@777 | 545 | guarantee(top() == block_start_const(end()-1) && |
ysr@777 | 546 | top() == block_start_const(top()), |
duke@435 | 547 | "top should be start of unallocated block, if it exists"); |
duke@435 | 548 | } |
duke@435 | 549 | } |
duke@435 | 550 | |
duke@435 | 551 | void Space::oop_iterate(OopClosure* blk) { |
duke@435 | 552 | ObjectToOopClosure blk2(blk); |
duke@435 | 553 | object_iterate(&blk2); |
duke@435 | 554 | } |
duke@435 | 555 | |
duke@435 | 556 | HeapWord* Space::object_iterate_careful(ObjectClosureCareful* cl) { |
duke@435 | 557 | guarantee(false, "NYI"); |
duke@435 | 558 | return bottom(); |
duke@435 | 559 | } |
duke@435 | 560 | |
duke@435 | 561 | HeapWord* Space::object_iterate_careful_m(MemRegion mr, |
duke@435 | 562 | ObjectClosureCareful* cl) { |
duke@435 | 563 | guarantee(false, "NYI"); |
duke@435 | 564 | return bottom(); |
duke@435 | 565 | } |
duke@435 | 566 | |
duke@435 | 567 | |
duke@435 | 568 | void Space::object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl) { |
duke@435 | 569 | assert(!mr.is_empty(), "Should be non-empty"); |
duke@435 | 570 | // We use MemRegion(bottom(), end()) rather than used_region() below |
duke@435 | 571 | // because the two are not necessarily equal for some kinds of |
duke@435 | 572 | // spaces, in particular, certain kinds of free list spaces. |
duke@435 | 573 | // We could use the more complicated but more precise: |
duke@435 | 574 | // MemRegion(used_region().start(), round_to(used_region().end(), CardSize)) |
duke@435 | 575 | // but the slight imprecision seems acceptable in the assertion check. |
duke@435 | 576 | assert(MemRegion(bottom(), end()).contains(mr), |
duke@435 | 577 | "Should be within used space"); |
duke@435 | 578 | HeapWord* prev = cl->previous(); // max address from last time |
duke@435 | 579 | if (prev >= mr.end()) { // nothing to do |
duke@435 | 580 | return; |
duke@435 | 581 | } |
duke@435 | 582 | // This assert will not work when we go from cms space to perm |
duke@435 | 583 | // space, and use same closure. Easy fix deferred for later. XXX YSR |
duke@435 | 584 | // assert(prev == NULL || contains(prev), "Should be within space"); |
duke@435 | 585 | |
duke@435 | 586 | bool last_was_obj_array = false; |
duke@435 | 587 | HeapWord *blk_start_addr, *region_start_addr; |
duke@435 | 588 | if (prev > mr.start()) { |
duke@435 | 589 | region_start_addr = prev; |
duke@435 | 590 | blk_start_addr = prev; |
jmasa@953 | 591 | // The previous invocation may have pushed "prev" beyond the |
jmasa@953 | 592 | // last allocated block yet there may be still be blocks |
jmasa@953 | 593 | // in this region due to a particular coalescing policy. |
jmasa@953 | 594 | // Relax the assertion so that the case where the unallocated |
jmasa@953 | 595 | // block is maintained and "prev" is beyond the unallocated |
jmasa@953 | 596 | // block does not cause the assertion to fire. |
jmasa@953 | 597 | assert((BlockOffsetArrayUseUnallocatedBlock && |
jmasa@953 | 598 | (!is_in(prev))) || |
jmasa@953 | 599 | (blk_start_addr == block_start(region_start_addr)), "invariant"); |
duke@435 | 600 | } else { |
duke@435 | 601 | region_start_addr = mr.start(); |
duke@435 | 602 | blk_start_addr = block_start(region_start_addr); |
duke@435 | 603 | } |
duke@435 | 604 | HeapWord* region_end_addr = mr.end(); |
duke@435 | 605 | MemRegion derived_mr(region_start_addr, region_end_addr); |
duke@435 | 606 | while (blk_start_addr < region_end_addr) { |
duke@435 | 607 | const size_t size = block_size(blk_start_addr); |
duke@435 | 608 | if (block_is_obj(blk_start_addr)) { |
duke@435 | 609 | last_was_obj_array = cl->do_object_bm(oop(blk_start_addr), derived_mr); |
duke@435 | 610 | } else { |
duke@435 | 611 | last_was_obj_array = false; |
duke@435 | 612 | } |
duke@435 | 613 | blk_start_addr += size; |
duke@435 | 614 | } |
duke@435 | 615 | if (!last_was_obj_array) { |
duke@435 | 616 | assert((bottom() <= blk_start_addr) && (blk_start_addr <= end()), |
duke@435 | 617 | "Should be within (closed) used space"); |
duke@435 | 618 | assert(blk_start_addr > prev, "Invariant"); |
duke@435 | 619 | cl->set_previous(blk_start_addr); // min address for next time |
duke@435 | 620 | } |
duke@435 | 621 | } |
duke@435 | 622 | |
duke@435 | 623 | bool Space::obj_is_alive(const HeapWord* p) const { |
duke@435 | 624 | assert (block_is_obj(p), "The address should point to an object"); |
duke@435 | 625 | return true; |
duke@435 | 626 | } |
duke@435 | 627 | |
duke@435 | 628 | void ContiguousSpace::object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl) { |
duke@435 | 629 | assert(!mr.is_empty(), "Should be non-empty"); |
duke@435 | 630 | assert(used_region().contains(mr), "Should be within used space"); |
duke@435 | 631 | HeapWord* prev = cl->previous(); // max address from last time |
duke@435 | 632 | if (prev >= mr.end()) { // nothing to do |
duke@435 | 633 | return; |
duke@435 | 634 | } |
duke@435 | 635 | // See comment above (in more general method above) in case you |
duke@435 | 636 | // happen to use this method. |
duke@435 | 637 | assert(prev == NULL || is_in_reserved(prev), "Should be within space"); |
duke@435 | 638 | |
duke@435 | 639 | bool last_was_obj_array = false; |
duke@435 | 640 | HeapWord *obj_start_addr, *region_start_addr; |
duke@435 | 641 | if (prev > mr.start()) { |
duke@435 | 642 | region_start_addr = prev; |
duke@435 | 643 | obj_start_addr = prev; |
duke@435 | 644 | assert(obj_start_addr == block_start(region_start_addr), "invariant"); |
duke@435 | 645 | } else { |
duke@435 | 646 | region_start_addr = mr.start(); |
duke@435 | 647 | obj_start_addr = block_start(region_start_addr); |
duke@435 | 648 | } |
duke@435 | 649 | HeapWord* region_end_addr = mr.end(); |
duke@435 | 650 | MemRegion derived_mr(region_start_addr, region_end_addr); |
duke@435 | 651 | while (obj_start_addr < region_end_addr) { |
duke@435 | 652 | oop obj = oop(obj_start_addr); |
duke@435 | 653 | const size_t size = obj->size(); |
duke@435 | 654 | last_was_obj_array = cl->do_object_bm(obj, derived_mr); |
duke@435 | 655 | obj_start_addr += size; |
duke@435 | 656 | } |
duke@435 | 657 | if (!last_was_obj_array) { |
duke@435 | 658 | assert((bottom() <= obj_start_addr) && (obj_start_addr <= end()), |
duke@435 | 659 | "Should be within (closed) used space"); |
duke@435 | 660 | assert(obj_start_addr > prev, "Invariant"); |
duke@435 | 661 | cl->set_previous(obj_start_addr); // min address for next time |
duke@435 | 662 | } |
duke@435 | 663 | } |
duke@435 | 664 | |
duke@435 | 665 | #ifndef SERIALGC |
duke@435 | 666 | #define ContigSpace_PAR_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \ |
duke@435 | 667 | \ |
duke@435 | 668 | void ContiguousSpace::par_oop_iterate(MemRegion mr, OopClosureType* blk) {\ |
duke@435 | 669 | HeapWord* obj_addr = mr.start(); \ |
duke@435 | 670 | HeapWord* t = mr.end(); \ |
duke@435 | 671 | while (obj_addr < t) { \ |
duke@435 | 672 | assert(oop(obj_addr)->is_oop(), "Should be an oop"); \ |
duke@435 | 673 | obj_addr += oop(obj_addr)->oop_iterate(blk); \ |
duke@435 | 674 | } \ |
duke@435 | 675 | } |
duke@435 | 676 | |
duke@435 | 677 | ALL_PAR_OOP_ITERATE_CLOSURES(ContigSpace_PAR_OOP_ITERATE_DEFN) |
duke@435 | 678 | |
duke@435 | 679 | #undef ContigSpace_PAR_OOP_ITERATE_DEFN |
duke@435 | 680 | #endif // SERIALGC |
duke@435 | 681 | |
duke@435 | 682 | void ContiguousSpace::oop_iterate(OopClosure* blk) { |
duke@435 | 683 | if (is_empty()) return; |
duke@435 | 684 | HeapWord* obj_addr = bottom(); |
duke@435 | 685 | HeapWord* t = top(); |
duke@435 | 686 | // Could call objects iterate, but this is easier. |
duke@435 | 687 | while (obj_addr < t) { |
duke@435 | 688 | obj_addr += oop(obj_addr)->oop_iterate(blk); |
duke@435 | 689 | } |
duke@435 | 690 | } |
duke@435 | 691 | |
duke@435 | 692 | void ContiguousSpace::oop_iterate(MemRegion mr, OopClosure* blk) { |
duke@435 | 693 | if (is_empty()) { |
duke@435 | 694 | return; |
duke@435 | 695 | } |
duke@435 | 696 | MemRegion cur = MemRegion(bottom(), top()); |
duke@435 | 697 | mr = mr.intersection(cur); |
duke@435 | 698 | if (mr.is_empty()) { |
duke@435 | 699 | return; |
duke@435 | 700 | } |
duke@435 | 701 | if (mr.equals(cur)) { |
duke@435 | 702 | oop_iterate(blk); |
duke@435 | 703 | return; |
duke@435 | 704 | } |
duke@435 | 705 | assert(mr.end() <= top(), "just took an intersection above"); |
duke@435 | 706 | HeapWord* obj_addr = block_start(mr.start()); |
duke@435 | 707 | HeapWord* t = mr.end(); |
duke@435 | 708 | |
duke@435 | 709 | // Handle first object specially. |
duke@435 | 710 | oop obj = oop(obj_addr); |
duke@435 | 711 | SpaceMemRegionOopsIterClosure smr_blk(blk, mr); |
duke@435 | 712 | obj_addr += obj->oop_iterate(&smr_blk); |
duke@435 | 713 | while (obj_addr < t) { |
duke@435 | 714 | oop obj = oop(obj_addr); |
duke@435 | 715 | assert(obj->is_oop(), "expected an oop"); |
duke@435 | 716 | obj_addr += obj->size(); |
duke@435 | 717 | // If "obj_addr" is not greater than top, then the |
duke@435 | 718 | // entire object "obj" is within the region. |
duke@435 | 719 | if (obj_addr <= t) { |
duke@435 | 720 | obj->oop_iterate(blk); |
duke@435 | 721 | } else { |
duke@435 | 722 | // "obj" extends beyond end of region |
duke@435 | 723 | obj->oop_iterate(&smr_blk); |
duke@435 | 724 | break; |
duke@435 | 725 | } |
duke@435 | 726 | }; |
duke@435 | 727 | } |
duke@435 | 728 | |
duke@435 | 729 | void ContiguousSpace::object_iterate(ObjectClosure* blk) { |
duke@435 | 730 | if (is_empty()) return; |
duke@435 | 731 | WaterMark bm = bottom_mark(); |
duke@435 | 732 | object_iterate_from(bm, blk); |
duke@435 | 733 | } |
duke@435 | 734 | |
jmasa@952 | 735 | // For a continguous space object_iterate() and safe_object_iterate() |
jmasa@952 | 736 | // are the same. |
jmasa@952 | 737 | void ContiguousSpace::safe_object_iterate(ObjectClosure* blk) { |
jmasa@952 | 738 | object_iterate(blk); |
jmasa@952 | 739 | } |
jmasa@952 | 740 | |
duke@435 | 741 | void ContiguousSpace::object_iterate_from(WaterMark mark, ObjectClosure* blk) { |
duke@435 | 742 | assert(mark.space() == this, "Mark does not match space"); |
duke@435 | 743 | HeapWord* p = mark.point(); |
duke@435 | 744 | while (p < top()) { |
duke@435 | 745 | blk->do_object(oop(p)); |
duke@435 | 746 | p += oop(p)->size(); |
duke@435 | 747 | } |
duke@435 | 748 | } |
duke@435 | 749 | |
duke@435 | 750 | HeapWord* |
duke@435 | 751 | ContiguousSpace::object_iterate_careful(ObjectClosureCareful* blk) { |
duke@435 | 752 | HeapWord * limit = concurrent_iteration_safe_limit(); |
duke@435 | 753 | assert(limit <= top(), "sanity check"); |
duke@435 | 754 | for (HeapWord* p = bottom(); p < limit;) { |
duke@435 | 755 | size_t size = blk->do_object_careful(oop(p)); |
duke@435 | 756 | if (size == 0) { |
duke@435 | 757 | return p; // failed at p |
duke@435 | 758 | } else { |
duke@435 | 759 | p += size; |
duke@435 | 760 | } |
duke@435 | 761 | } |
duke@435 | 762 | return NULL; // all done |
duke@435 | 763 | } |
duke@435 | 764 | |
duke@435 | 765 | #define ContigSpace_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \ |
duke@435 | 766 | \ |
duke@435 | 767 | void ContiguousSpace:: \ |
duke@435 | 768 | oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) { \ |
duke@435 | 769 | HeapWord* t; \ |
duke@435 | 770 | HeapWord* p = saved_mark_word(); \ |
duke@435 | 771 | assert(p != NULL, "expected saved mark"); \ |
duke@435 | 772 | \ |
duke@435 | 773 | const intx interval = PrefetchScanIntervalInBytes; \ |
duke@435 | 774 | do { \ |
duke@435 | 775 | t = top(); \ |
duke@435 | 776 | while (p < t) { \ |
duke@435 | 777 | Prefetch::write(p, interval); \ |
duke@435 | 778 | debug_only(HeapWord* prev = p); \ |
duke@435 | 779 | oop m = oop(p); \ |
duke@435 | 780 | p += m->oop_iterate(blk); \ |
duke@435 | 781 | } \ |
duke@435 | 782 | } while (t < top()); \ |
duke@435 | 783 | \ |
duke@435 | 784 | set_saved_mark_word(p); \ |
duke@435 | 785 | } |
duke@435 | 786 | |
duke@435 | 787 | ALL_SINCE_SAVE_MARKS_CLOSURES(ContigSpace_OOP_SINCE_SAVE_MARKS_DEFN) |
duke@435 | 788 | |
duke@435 | 789 | #undef ContigSpace_OOP_SINCE_SAVE_MARKS_DEFN |
duke@435 | 790 | |
duke@435 | 791 | // Very general, slow implementation. |
ysr@777 | 792 | HeapWord* ContiguousSpace::block_start_const(const void* p) const { |
duke@435 | 793 | assert(MemRegion(bottom(), end()).contains(p), "p not in space"); |
duke@435 | 794 | if (p >= top()) { |
duke@435 | 795 | return top(); |
duke@435 | 796 | } else { |
duke@435 | 797 | HeapWord* last = bottom(); |
duke@435 | 798 | HeapWord* cur = last; |
duke@435 | 799 | while (cur <= p) { |
duke@435 | 800 | last = cur; |
duke@435 | 801 | cur += oop(cur)->size(); |
duke@435 | 802 | } |
duke@435 | 803 | assert(oop(last)->is_oop(), "Should be an object start"); |
duke@435 | 804 | return last; |
duke@435 | 805 | } |
duke@435 | 806 | } |
duke@435 | 807 | |
duke@435 | 808 | size_t ContiguousSpace::block_size(const HeapWord* p) const { |
duke@435 | 809 | assert(MemRegion(bottom(), end()).contains(p), "p not in space"); |
duke@435 | 810 | HeapWord* current_top = top(); |
duke@435 | 811 | assert(p <= current_top, "p is not a block start"); |
duke@435 | 812 | assert(p == current_top || oop(p)->is_oop(), "p is not a block start"); |
duke@435 | 813 | if (p < current_top) |
duke@435 | 814 | return oop(p)->size(); |
duke@435 | 815 | else { |
duke@435 | 816 | assert(p == current_top, "just checking"); |
duke@435 | 817 | return pointer_delta(end(), (HeapWord*) p); |
duke@435 | 818 | } |
duke@435 | 819 | } |
duke@435 | 820 | |
duke@435 | 821 | // This version requires locking. |
duke@435 | 822 | inline HeapWord* ContiguousSpace::allocate_impl(size_t size, |
duke@435 | 823 | HeapWord* const end_value) { |
tonyp@2715 | 824 | // In G1 there are places where a GC worker can allocates into a |
tonyp@2715 | 825 | // region using this serial allocation code without being prone to a |
tonyp@2715 | 826 | // race with other GC workers (we ensure that no other GC worker can |
tonyp@2715 | 827 | // access the same region at the same time). So the assert below is |
tonyp@2715 | 828 | // too strong in the case of G1. |
duke@435 | 829 | assert(Heap_lock->owned_by_self() || |
duke@435 | 830 | (SafepointSynchronize::is_at_safepoint() && |
tonyp@2715 | 831 | (Thread::current()->is_VM_thread() || UseG1GC)), |
duke@435 | 832 | "not locked"); |
duke@435 | 833 | HeapWord* obj = top(); |
duke@435 | 834 | if (pointer_delta(end_value, obj) >= size) { |
duke@435 | 835 | HeapWord* new_top = obj + size; |
duke@435 | 836 | set_top(new_top); |
duke@435 | 837 | assert(is_aligned(obj) && is_aligned(new_top), "checking alignment"); |
duke@435 | 838 | return obj; |
duke@435 | 839 | } else { |
duke@435 | 840 | return NULL; |
duke@435 | 841 | } |
duke@435 | 842 | } |
duke@435 | 843 | |
duke@435 | 844 | // This version is lock-free. |
duke@435 | 845 | inline HeapWord* ContiguousSpace::par_allocate_impl(size_t size, |
duke@435 | 846 | HeapWord* const end_value) { |
duke@435 | 847 | do { |
duke@435 | 848 | HeapWord* obj = top(); |
duke@435 | 849 | if (pointer_delta(end_value, obj) >= size) { |
duke@435 | 850 | HeapWord* new_top = obj + size; |
duke@435 | 851 | HeapWord* result = (HeapWord*)Atomic::cmpxchg_ptr(new_top, top_addr(), obj); |
duke@435 | 852 | // result can be one of two: |
duke@435 | 853 | // the old top value: the exchange succeeded |
duke@435 | 854 | // otherwise: the new value of the top is returned. |
duke@435 | 855 | if (result == obj) { |
duke@435 | 856 | assert(is_aligned(obj) && is_aligned(new_top), "checking alignment"); |
duke@435 | 857 | return obj; |
duke@435 | 858 | } |
duke@435 | 859 | } else { |
duke@435 | 860 | return NULL; |
duke@435 | 861 | } |
duke@435 | 862 | } while (true); |
duke@435 | 863 | } |
duke@435 | 864 | |
duke@435 | 865 | // Requires locking. |
duke@435 | 866 | HeapWord* ContiguousSpace::allocate(size_t size) { |
duke@435 | 867 | return allocate_impl(size, end()); |
duke@435 | 868 | } |
duke@435 | 869 | |
duke@435 | 870 | // Lock-free. |
duke@435 | 871 | HeapWord* ContiguousSpace::par_allocate(size_t size) { |
duke@435 | 872 | return par_allocate_impl(size, end()); |
duke@435 | 873 | } |
duke@435 | 874 | |
duke@435 | 875 | void ContiguousSpace::allocate_temporary_filler(int factor) { |
duke@435 | 876 | // allocate temporary type array decreasing free size with factor 'factor' |
duke@435 | 877 | assert(factor >= 0, "just checking"); |
duke@435 | 878 | size_t size = pointer_delta(end(), top()); |
duke@435 | 879 | |
duke@435 | 880 | // if space is full, return |
duke@435 | 881 | if (size == 0) return; |
duke@435 | 882 | |
duke@435 | 883 | if (factor > 0) { |
duke@435 | 884 | size -= size/factor; |
duke@435 | 885 | } |
duke@435 | 886 | size = align_object_size(size); |
duke@435 | 887 | |
kvn@1926 | 888 | const size_t array_header_size = typeArrayOopDesc::header_size(T_INT); |
kvn@1926 | 889 | if (size >= (size_t)align_object_size(array_header_size)) { |
kvn@1926 | 890 | size_t length = (size - array_header_size) * (HeapWordSize / sizeof(jint)); |
duke@435 | 891 | // allocate uninitialized int array |
duke@435 | 892 | typeArrayOop t = (typeArrayOop) allocate(size); |
duke@435 | 893 | assert(t != NULL, "allocation should succeed"); |
duke@435 | 894 | t->set_mark(markOopDesc::prototype()); |
duke@435 | 895 | t->set_klass(Universe::intArrayKlassObj()); |
duke@435 | 896 | t->set_length((int)length); |
duke@435 | 897 | } else { |
kvn@1926 | 898 | assert(size == CollectedHeap::min_fill_size(), |
duke@435 | 899 | "size for smallest fake object doesn't match"); |
duke@435 | 900 | instanceOop obj = (instanceOop) allocate(size); |
duke@435 | 901 | obj->set_mark(markOopDesc::prototype()); |
coleenp@602 | 902 | obj->set_klass_gap(0); |
never@1577 | 903 | obj->set_klass(SystemDictionary::Object_klass()); |
duke@435 | 904 | } |
duke@435 | 905 | } |
duke@435 | 906 | |
jmasa@698 | 907 | void EdenSpace::clear(bool mangle_space) { |
jmasa@698 | 908 | ContiguousSpace::clear(mangle_space); |
duke@435 | 909 | set_soft_end(end()); |
duke@435 | 910 | } |
duke@435 | 911 | |
duke@435 | 912 | // Requires locking. |
duke@435 | 913 | HeapWord* EdenSpace::allocate(size_t size) { |
duke@435 | 914 | return allocate_impl(size, soft_end()); |
duke@435 | 915 | } |
duke@435 | 916 | |
duke@435 | 917 | // Lock-free. |
duke@435 | 918 | HeapWord* EdenSpace::par_allocate(size_t size) { |
duke@435 | 919 | return par_allocate_impl(size, soft_end()); |
duke@435 | 920 | } |
duke@435 | 921 | |
duke@435 | 922 | HeapWord* ConcEdenSpace::par_allocate(size_t size) |
duke@435 | 923 | { |
duke@435 | 924 | do { |
duke@435 | 925 | // The invariant is top() should be read before end() because |
duke@435 | 926 | // top() can't be greater than end(), so if an update of _soft_end |
duke@435 | 927 | // occurs between 'end_val = end();' and 'top_val = top();' top() |
duke@435 | 928 | // also can grow up to the new end() and the condition |
duke@435 | 929 | // 'top_val > end_val' is true. To ensure the loading order |
duke@435 | 930 | // OrderAccess::loadload() is required after top() read. |
duke@435 | 931 | HeapWord* obj = top(); |
duke@435 | 932 | OrderAccess::loadload(); |
duke@435 | 933 | if (pointer_delta(*soft_end_addr(), obj) >= size) { |
duke@435 | 934 | HeapWord* new_top = obj + size; |
duke@435 | 935 | HeapWord* result = (HeapWord*)Atomic::cmpxchg_ptr(new_top, top_addr(), obj); |
duke@435 | 936 | // result can be one of two: |
duke@435 | 937 | // the old top value: the exchange succeeded |
duke@435 | 938 | // otherwise: the new value of the top is returned. |
duke@435 | 939 | if (result == obj) { |
duke@435 | 940 | assert(is_aligned(obj) && is_aligned(new_top), "checking alignment"); |
duke@435 | 941 | return obj; |
duke@435 | 942 | } |
duke@435 | 943 | } else { |
duke@435 | 944 | return NULL; |
duke@435 | 945 | } |
duke@435 | 946 | } while (true); |
duke@435 | 947 | } |
duke@435 | 948 | |
duke@435 | 949 | |
duke@435 | 950 | HeapWord* OffsetTableContigSpace::initialize_threshold() { |
duke@435 | 951 | return _offsets.initialize_threshold(); |
duke@435 | 952 | } |
duke@435 | 953 | |
duke@435 | 954 | HeapWord* OffsetTableContigSpace::cross_threshold(HeapWord* start, HeapWord* end) { |
duke@435 | 955 | _offsets.alloc_block(start, end); |
duke@435 | 956 | return _offsets.threshold(); |
duke@435 | 957 | } |
duke@435 | 958 | |
duke@435 | 959 | OffsetTableContigSpace::OffsetTableContigSpace(BlockOffsetSharedArray* sharedOffsetArray, |
duke@435 | 960 | MemRegion mr) : |
duke@435 | 961 | _offsets(sharedOffsetArray, mr), |
duke@435 | 962 | _par_alloc_lock(Mutex::leaf, "OffsetTableContigSpace par alloc lock", true) |
duke@435 | 963 | { |
duke@435 | 964 | _offsets.set_contig_space(this); |
jmasa@698 | 965 | initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle); |
duke@435 | 966 | } |
duke@435 | 967 | |
duke@435 | 968 | |
duke@435 | 969 | class VerifyOldOopClosure : public OopClosure { |
duke@435 | 970 | public: |
coleenp@548 | 971 | oop _the_obj; |
coleenp@548 | 972 | bool _allow_dirty; |
duke@435 | 973 | void do_oop(oop* p) { |
coleenp@548 | 974 | _the_obj->verify_old_oop(p, _allow_dirty); |
coleenp@548 | 975 | } |
coleenp@548 | 976 | void do_oop(narrowOop* p) { |
coleenp@548 | 977 | _the_obj->verify_old_oop(p, _allow_dirty); |
duke@435 | 978 | } |
duke@435 | 979 | }; |
duke@435 | 980 | |
duke@435 | 981 | #define OBJ_SAMPLE_INTERVAL 0 |
duke@435 | 982 | #define BLOCK_SAMPLE_INTERVAL 100 |
duke@435 | 983 | |
duke@435 | 984 | void OffsetTableContigSpace::verify(bool allow_dirty) const { |
duke@435 | 985 | HeapWord* p = bottom(); |
duke@435 | 986 | HeapWord* prev_p = NULL; |
duke@435 | 987 | VerifyOldOopClosure blk; // Does this do anything? |
coleenp@548 | 988 | blk._allow_dirty = allow_dirty; |
duke@435 | 989 | int objs = 0; |
duke@435 | 990 | int blocks = 0; |
duke@435 | 991 | |
duke@435 | 992 | if (VerifyObjectStartArray) { |
duke@435 | 993 | _offsets.verify(); |
duke@435 | 994 | } |
duke@435 | 995 | |
duke@435 | 996 | while (p < top()) { |
duke@435 | 997 | size_t size = oop(p)->size(); |
duke@435 | 998 | // For a sampling of objects in the space, find it using the |
duke@435 | 999 | // block offset table. |
duke@435 | 1000 | if (blocks == BLOCK_SAMPLE_INTERVAL) { |
ysr@777 | 1001 | guarantee(p == block_start_const(p + (size/2)), |
ysr@777 | 1002 | "check offset computation"); |
duke@435 | 1003 | blocks = 0; |
duke@435 | 1004 | } else { |
duke@435 | 1005 | blocks++; |
duke@435 | 1006 | } |
duke@435 | 1007 | |
duke@435 | 1008 | if (objs == OBJ_SAMPLE_INTERVAL) { |
duke@435 | 1009 | oop(p)->verify(); |
coleenp@548 | 1010 | blk._the_obj = oop(p); |
duke@435 | 1011 | oop(p)->oop_iterate(&blk); |
duke@435 | 1012 | objs = 0; |
duke@435 | 1013 | } else { |
duke@435 | 1014 | objs++; |
duke@435 | 1015 | } |
duke@435 | 1016 | prev_p = p; |
duke@435 | 1017 | p += size; |
duke@435 | 1018 | } |
duke@435 | 1019 | guarantee(p == top(), "end of last object must match end of space"); |
duke@435 | 1020 | } |
duke@435 | 1021 | |
duke@435 | 1022 | void OffsetTableContigSpace::serialize_block_offset_array_offsets( |
duke@435 | 1023 | SerializeOopClosure* soc) { |
duke@435 | 1024 | _offsets.serialize(soc); |
duke@435 | 1025 | } |
duke@435 | 1026 | |
duke@435 | 1027 | |
jcoomes@873 | 1028 | size_t TenuredSpace::allowed_dead_ratio() const { |
duke@435 | 1029 | return MarkSweepDeadRatio; |
duke@435 | 1030 | } |
duke@435 | 1031 | |
duke@435 | 1032 | |
jcoomes@873 | 1033 | size_t ContigPermSpace::allowed_dead_ratio() const { |
duke@435 | 1034 | return PermMarkSweepDeadRatio; |
duke@435 | 1035 | } |