Thu, 21 Aug 2014 13:57:51 -0700
8046070: Class Data Sharing clean up and refactoring
Summary: Cleaned up CDS to be more configurable, maintainable and extensible
Reviewed-by: dholmes, coleenp, acorn, mchung
duke@435 | 1 | /* |
mikael@4153 | 2 | * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved. |
duke@435 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
duke@435 | 4 | * |
duke@435 | 5 | * This code is free software; you can redistribute it and/or modify it |
duke@435 | 6 | * under the terms of the GNU General Public License version 2 only, as |
duke@435 | 7 | * published by the Free Software Foundation. |
duke@435 | 8 | * |
duke@435 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
duke@435 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
duke@435 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
duke@435 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
duke@435 | 13 | * accompanied this code). |
duke@435 | 14 | * |
duke@435 | 15 | * You should have received a copy of the GNU General Public License version |
duke@435 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
duke@435 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
duke@435 | 18 | * |
trims@1907 | 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
trims@1907 | 20 | * or visit www.oracle.com if you need additional information or have any |
trims@1907 | 21 | * questions. |
duke@435 | 22 | * |
duke@435 | 23 | */ |
duke@435 | 24 | |
stefank@2314 | 25 | #ifndef SHARE_VM_MEMORY_SPACE_INLINE_HPP |
stefank@2314 | 26 | #define SHARE_VM_MEMORY_SPACE_INLINE_HPP |
stefank@2314 | 27 | |
stefank@2314 | 28 | #include "gc_interface/collectedHeap.hpp" |
stefank@2314 | 29 | #include "memory/space.hpp" |
stefank@2314 | 30 | #include "memory/universe.hpp" |
goetz@6912 | 31 | #include "runtime/prefetch.inline.hpp" |
stefank@2314 | 32 | #include "runtime/safepoint.hpp" |
stefank@2314 | 33 | |
ysr@777 | 34 | inline HeapWord* Space::block_start(const void* p) { |
ysr@777 | 35 | return block_start_const(p); |
ysr@777 | 36 | } |
ysr@777 | 37 | |
goetz@6912 | 38 | #define SCAN_AND_FORWARD(cp,scan_limit,block_is_obj,block_size) { \ |
goetz@6912 | 39 | /* Compute the new addresses for the live objects and store it in the mark \ |
goetz@6912 | 40 | * Used by universe::mark_sweep_phase2() \ |
goetz@6912 | 41 | */ \ |
goetz@6912 | 42 | HeapWord* compact_top; /* This is where we are currently compacting to. */ \ |
goetz@6912 | 43 | \ |
goetz@6912 | 44 | /* We're sure to be here before any objects are compacted into this \ |
goetz@6912 | 45 | * space, so this is a good time to initialize this: \ |
goetz@6912 | 46 | */ \ |
goetz@6912 | 47 | set_compaction_top(bottom()); \ |
goetz@6912 | 48 | \ |
goetz@6912 | 49 | if (cp->space == NULL) { \ |
goetz@6912 | 50 | assert(cp->gen != NULL, "need a generation"); \ |
goetz@6912 | 51 | assert(cp->threshold == NULL, "just checking"); \ |
goetz@6912 | 52 | assert(cp->gen->first_compaction_space() == this, "just checking"); \ |
goetz@6912 | 53 | cp->space = cp->gen->first_compaction_space(); \ |
goetz@6912 | 54 | compact_top = cp->space->bottom(); \ |
goetz@6912 | 55 | cp->space->set_compaction_top(compact_top); \ |
goetz@6912 | 56 | cp->threshold = cp->space->initialize_threshold(); \ |
goetz@6912 | 57 | } else { \ |
goetz@6912 | 58 | compact_top = cp->space->compaction_top(); \ |
goetz@6912 | 59 | } \ |
goetz@6912 | 60 | \ |
goetz@6912 | 61 | /* We allow some amount of garbage towards the bottom of the space, so \ |
goetz@6912 | 62 | * we don't start compacting before there is a significant gain to be made.\ |
goetz@6912 | 63 | * Occasionally, we want to ensure a full compaction, which is determined \ |
goetz@6912 | 64 | * by the MarkSweepAlwaysCompactCount parameter. \ |
goetz@6912 | 65 | */ \ |
goetz@6912 | 66 | uint invocations = MarkSweep::total_invocations(); \ |
goetz@6912 | 67 | bool skip_dead = ((invocations % MarkSweepAlwaysCompactCount) != 0); \ |
goetz@6912 | 68 | \ |
goetz@6912 | 69 | size_t allowed_deadspace = 0; \ |
goetz@6912 | 70 | if (skip_dead) { \ |
goetz@6912 | 71 | const size_t ratio = allowed_dead_ratio(); \ |
goetz@6912 | 72 | allowed_deadspace = (capacity() * ratio / 100) / HeapWordSize; \ |
goetz@6912 | 73 | } \ |
goetz@6912 | 74 | \ |
goetz@6912 | 75 | HeapWord* q = bottom(); \ |
goetz@6912 | 76 | HeapWord* t = scan_limit(); \ |
goetz@6912 | 77 | \ |
goetz@6912 | 78 | HeapWord* end_of_live= q; /* One byte beyond the last byte of the last \ |
goetz@6912 | 79 | live object. */ \ |
goetz@6912 | 80 | HeapWord* first_dead = end();/* The first dead object. */ \ |
goetz@6912 | 81 | LiveRange* liveRange = NULL; /* The current live range, recorded in the \ |
goetz@6912 | 82 | first header of preceding free area. */ \ |
goetz@6912 | 83 | _first_dead = first_dead; \ |
goetz@6912 | 84 | \ |
goetz@6912 | 85 | const intx interval = PrefetchScanIntervalInBytes; \ |
goetz@6912 | 86 | \ |
goetz@6912 | 87 | while (q < t) { \ |
goetz@6912 | 88 | assert(!block_is_obj(q) || \ |
goetz@6912 | 89 | oop(q)->mark()->is_marked() || oop(q)->mark()->is_unlocked() || \ |
goetz@6912 | 90 | oop(q)->mark()->has_bias_pattern(), \ |
goetz@6912 | 91 | "these are the only valid states during a mark sweep"); \ |
goetz@6912 | 92 | if (block_is_obj(q) && oop(q)->is_gc_marked()) { \ |
goetz@6912 | 93 | /* prefetch beyond q */ \ |
goetz@6912 | 94 | Prefetch::write(q, interval); \ |
goetz@6912 | 95 | size_t size = block_size(q); \ |
goetz@6912 | 96 | compact_top = cp->space->forward(oop(q), size, cp, compact_top); \ |
goetz@6912 | 97 | q += size; \ |
goetz@6912 | 98 | end_of_live = q; \ |
goetz@6912 | 99 | } else { \ |
goetz@6912 | 100 | /* run over all the contiguous dead objects */ \ |
goetz@6912 | 101 | HeapWord* end = q; \ |
goetz@6912 | 102 | do { \ |
goetz@6912 | 103 | /* prefetch beyond end */ \ |
goetz@6912 | 104 | Prefetch::write(end, interval); \ |
goetz@6912 | 105 | end += block_size(end); \ |
goetz@6912 | 106 | } while (end < t && (!block_is_obj(end) || !oop(end)->is_gc_marked()));\ |
goetz@6912 | 107 | \ |
goetz@6912 | 108 | /* see if we might want to pretend this object is alive so that \ |
goetz@6912 | 109 | * we don't have to compact quite as often. \ |
goetz@6912 | 110 | */ \ |
goetz@6912 | 111 | if (allowed_deadspace > 0 && q == compact_top) { \ |
goetz@6912 | 112 | size_t sz = pointer_delta(end, q); \ |
goetz@6912 | 113 | if (insert_deadspace(allowed_deadspace, q, sz)) { \ |
goetz@6912 | 114 | compact_top = cp->space->forward(oop(q), sz, cp, compact_top); \ |
goetz@6912 | 115 | q = end; \ |
goetz@6912 | 116 | end_of_live = end; \ |
goetz@6912 | 117 | continue; \ |
goetz@6912 | 118 | } \ |
goetz@6912 | 119 | } \ |
goetz@6912 | 120 | \ |
goetz@6912 | 121 | /* otherwise, it really is a free region. */ \ |
goetz@6912 | 122 | \ |
goetz@6912 | 123 | /* for the previous LiveRange, record the end of the live objects. */ \ |
goetz@6912 | 124 | if (liveRange) { \ |
goetz@6912 | 125 | liveRange->set_end(q); \ |
goetz@6912 | 126 | } \ |
goetz@6912 | 127 | \ |
goetz@6912 | 128 | /* record the current LiveRange object. \ |
goetz@6912 | 129 | * liveRange->start() is overlaid on the mark word. \ |
goetz@6912 | 130 | */ \ |
goetz@6912 | 131 | liveRange = (LiveRange*)q; \ |
goetz@6912 | 132 | liveRange->set_start(end); \ |
goetz@6912 | 133 | liveRange->set_end(end); \ |
goetz@6912 | 134 | \ |
goetz@6912 | 135 | /* see if this is the first dead region. */ \ |
goetz@6912 | 136 | if (q < first_dead) { \ |
goetz@6912 | 137 | first_dead = q; \ |
goetz@6912 | 138 | } \ |
goetz@6912 | 139 | \ |
goetz@6912 | 140 | /* move on to the next object */ \ |
goetz@6912 | 141 | q = end; \ |
goetz@6912 | 142 | } \ |
goetz@6912 | 143 | } \ |
goetz@6912 | 144 | \ |
goetz@6912 | 145 | assert(q == t, "just checking"); \ |
goetz@6912 | 146 | if (liveRange != NULL) { \ |
goetz@6912 | 147 | liveRange->set_end(q); \ |
goetz@6912 | 148 | } \ |
goetz@6912 | 149 | _end_of_live = end_of_live; \ |
goetz@6912 | 150 | if (end_of_live < first_dead) { \ |
goetz@6912 | 151 | first_dead = end_of_live; \ |
goetz@6912 | 152 | } \ |
goetz@6912 | 153 | _first_dead = first_dead; \ |
goetz@6912 | 154 | \ |
goetz@6912 | 155 | /* save the compaction_top of the compaction space. */ \ |
goetz@6912 | 156 | cp->space->set_compaction_top(compact_top); \ |
goetz@6912 | 157 | } |
goetz@6912 | 158 | |
goetz@6912 | 159 | #define SCAN_AND_ADJUST_POINTERS(adjust_obj_size) { \ |
goetz@6912 | 160 | /* adjust all the interior pointers to point at the new locations of objects \ |
goetz@6912 | 161 | * Used by MarkSweep::mark_sweep_phase3() */ \ |
goetz@6912 | 162 | \ |
goetz@6912 | 163 | HeapWord* q = bottom(); \ |
goetz@6912 | 164 | HeapWord* t = _end_of_live; /* Established by "prepare_for_compaction". */ \ |
goetz@6912 | 165 | \ |
goetz@6912 | 166 | assert(_first_dead <= _end_of_live, "Stands to reason, no?"); \ |
goetz@6912 | 167 | \ |
goetz@6912 | 168 | if (q < t && _first_dead > q && \ |
goetz@6912 | 169 | !oop(q)->is_gc_marked()) { \ |
goetz@6912 | 170 | /* we have a chunk of the space which hasn't moved and we've \ |
goetz@6912 | 171 | * reinitialized the mark word during the previous pass, so we can't \ |
goetz@6912 | 172 | * use is_gc_marked for the traversal. */ \ |
goetz@6912 | 173 | HeapWord* end = _first_dead; \ |
goetz@6912 | 174 | \ |
goetz@6912 | 175 | while (q < end) { \ |
goetz@6912 | 176 | /* I originally tried to conjoin "block_start(q) == q" to the \ |
goetz@6912 | 177 | * assertion below, but that doesn't work, because you can't \ |
goetz@6912 | 178 | * accurately traverse previous objects to get to the current one \ |
goetz@6912 | 179 | * after their pointers have been \ |
goetz@6912 | 180 | * updated, until the actual compaction is done. dld, 4/00 */ \ |
goetz@6912 | 181 | assert(block_is_obj(q), \ |
goetz@6912 | 182 | "should be at block boundaries, and should be looking at objs"); \ |
goetz@6912 | 183 | \ |
goetz@6912 | 184 | /* point all the oops to the new location */ \ |
goetz@6912 | 185 | size_t size = oop(q)->adjust_pointers(); \ |
goetz@6912 | 186 | size = adjust_obj_size(size); \ |
goetz@6912 | 187 | \ |
goetz@6912 | 188 | q += size; \ |
goetz@6912 | 189 | } \ |
goetz@6912 | 190 | \ |
goetz@6912 | 191 | if (_first_dead == t) { \ |
goetz@6912 | 192 | q = t; \ |
goetz@6912 | 193 | } else { \ |
goetz@6912 | 194 | /* $$$ This is funky. Using this to read the previously written \ |
goetz@6912 | 195 | * LiveRange. See also use below. */ \ |
goetz@6912 | 196 | q = (HeapWord*)oop(_first_dead)->mark()->decode_pointer(); \ |
goetz@6912 | 197 | } \ |
goetz@6912 | 198 | } \ |
goetz@6912 | 199 | \ |
goetz@6912 | 200 | const intx interval = PrefetchScanIntervalInBytes; \ |
goetz@6912 | 201 | \ |
goetz@6912 | 202 | debug_only(HeapWord* prev_q = NULL); \ |
goetz@6912 | 203 | while (q < t) { \ |
goetz@6912 | 204 | /* prefetch beyond q */ \ |
goetz@6912 | 205 | Prefetch::write(q, interval); \ |
goetz@6912 | 206 | if (oop(q)->is_gc_marked()) { \ |
goetz@6912 | 207 | /* q is alive */ \ |
goetz@6912 | 208 | /* point all the oops to the new location */ \ |
goetz@6912 | 209 | size_t size = oop(q)->adjust_pointers(); \ |
goetz@6912 | 210 | size = adjust_obj_size(size); \ |
goetz@6912 | 211 | debug_only(prev_q = q); \ |
goetz@6912 | 212 | q += size; \ |
goetz@6912 | 213 | } else { \ |
goetz@6912 | 214 | /* q is not a live object, so its mark should point at the next \ |
goetz@6912 | 215 | * live object */ \ |
goetz@6912 | 216 | debug_only(prev_q = q); \ |
goetz@6912 | 217 | q = (HeapWord*) oop(q)->mark()->decode_pointer(); \ |
goetz@6912 | 218 | assert(q > prev_q, "we should be moving forward through memory"); \ |
goetz@6912 | 219 | } \ |
goetz@6912 | 220 | } \ |
goetz@6912 | 221 | \ |
goetz@6912 | 222 | assert(q == t, "just checking"); \ |
goetz@6912 | 223 | } |
goetz@6912 | 224 | |
goetz@6912 | 225 | #define SCAN_AND_COMPACT(obj_size) { \ |
goetz@6912 | 226 | /* Copy all live objects to their new location \ |
goetz@6912 | 227 | * Used by MarkSweep::mark_sweep_phase4() */ \ |
goetz@6912 | 228 | \ |
goetz@6912 | 229 | HeapWord* q = bottom(); \ |
goetz@6912 | 230 | HeapWord* const t = _end_of_live; \ |
goetz@6912 | 231 | debug_only(HeapWord* prev_q = NULL); \ |
goetz@6912 | 232 | \ |
goetz@6912 | 233 | if (q < t && _first_dead > q && \ |
goetz@6912 | 234 | !oop(q)->is_gc_marked()) { \ |
goetz@6912 | 235 | debug_only( \ |
goetz@6912 | 236 | /* we have a chunk of the space which hasn't moved and we've reinitialized \ |
goetz@6912 | 237 | * the mark word during the previous pass, so we can't use is_gc_marked for \ |
goetz@6912 | 238 | * the traversal. */ \ |
goetz@6912 | 239 | HeapWord* const end = _first_dead; \ |
goetz@6912 | 240 | \ |
goetz@6912 | 241 | while (q < end) { \ |
goetz@6912 | 242 | size_t size = obj_size(q); \ |
goetz@6912 | 243 | assert(!oop(q)->is_gc_marked(), \ |
goetz@6912 | 244 | "should be unmarked (special dense prefix handling)"); \ |
goetz@6912 | 245 | debug_only(prev_q = q); \ |
goetz@6912 | 246 | q += size; \ |
goetz@6912 | 247 | } \ |
goetz@6912 | 248 | ) /* debug_only */ \ |
goetz@6912 | 249 | \ |
goetz@6912 | 250 | if (_first_dead == t) { \ |
goetz@6912 | 251 | q = t; \ |
goetz@6912 | 252 | } else { \ |
goetz@6912 | 253 | /* $$$ Funky */ \ |
goetz@6912 | 254 | q = (HeapWord*) oop(_first_dead)->mark()->decode_pointer(); \ |
goetz@6912 | 255 | } \ |
goetz@6912 | 256 | } \ |
goetz@6912 | 257 | \ |
goetz@6912 | 258 | const intx scan_interval = PrefetchScanIntervalInBytes; \ |
goetz@6912 | 259 | const intx copy_interval = PrefetchCopyIntervalInBytes; \ |
goetz@6912 | 260 | while (q < t) { \ |
goetz@6912 | 261 | if (!oop(q)->is_gc_marked()) { \ |
goetz@6912 | 262 | /* mark is pointer to next marked oop */ \ |
goetz@6912 | 263 | debug_only(prev_q = q); \ |
goetz@6912 | 264 | q = (HeapWord*) oop(q)->mark()->decode_pointer(); \ |
goetz@6912 | 265 | assert(q > prev_q, "we should be moving forward through memory"); \ |
goetz@6912 | 266 | } else { \ |
goetz@6912 | 267 | /* prefetch beyond q */ \ |
goetz@6912 | 268 | Prefetch::read(q, scan_interval); \ |
goetz@6912 | 269 | \ |
goetz@6912 | 270 | /* size and destination */ \ |
goetz@6912 | 271 | size_t size = obj_size(q); \ |
goetz@6912 | 272 | HeapWord* compaction_top = (HeapWord*)oop(q)->forwardee(); \ |
goetz@6912 | 273 | \ |
goetz@6912 | 274 | /* prefetch beyond compaction_top */ \ |
goetz@6912 | 275 | Prefetch::write(compaction_top, copy_interval); \ |
goetz@6912 | 276 | \ |
goetz@6912 | 277 | /* copy object and reinit its mark */ \ |
goetz@6912 | 278 | assert(q != compaction_top, "everything in this pass should be moving"); \ |
goetz@6912 | 279 | Copy::aligned_conjoint_words(q, compaction_top, size); \ |
goetz@6912 | 280 | oop(compaction_top)->init_mark(); \ |
goetz@6912 | 281 | assert(oop(compaction_top)->klass() != NULL, "should have a class"); \ |
goetz@6912 | 282 | \ |
goetz@6912 | 283 | debug_only(prev_q = q); \ |
goetz@6912 | 284 | q += size; \ |
goetz@6912 | 285 | } \ |
goetz@6912 | 286 | } \ |
goetz@6912 | 287 | \ |
goetz@6912 | 288 | /* Let's remember if we were empty before we did the compaction. */ \ |
goetz@6912 | 289 | bool was_empty = used_region().is_empty(); \ |
goetz@6912 | 290 | /* Reset space after compaction is complete */ \ |
goetz@6912 | 291 | reset_after_compaction(); \ |
goetz@6912 | 292 | /* We do this clear, below, since it has overloaded meanings for some */ \ |
goetz@6912 | 293 | /* space subtypes. For example, OffsetTableContigSpace's that were */ \ |
goetz@6912 | 294 | /* compacted into will have had their offset table thresholds updated */ \ |
goetz@6912 | 295 | /* continuously, but those that weren't need to have their thresholds */ \ |
goetz@6912 | 296 | /* re-initialized. Also mangles unused area for debugging. */ \ |
goetz@6912 | 297 | if (used_region().is_empty()) { \ |
goetz@6912 | 298 | if (!was_empty) clear(SpaceDecorator::Mangle); \ |
goetz@6912 | 299 | } else { \ |
goetz@6912 | 300 | if (ZapUnusedHeapArea) mangle_unused_area(); \ |
goetz@6912 | 301 | } \ |
goetz@6912 | 302 | } |
goetz@6912 | 303 | |
duke@435 | 304 | inline HeapWord* OffsetTableContigSpace::allocate(size_t size) { |
duke@435 | 305 | HeapWord* res = ContiguousSpace::allocate(size); |
duke@435 | 306 | if (res != NULL) { |
duke@435 | 307 | _offsets.alloc_block(res, size); |
duke@435 | 308 | } |
duke@435 | 309 | return res; |
duke@435 | 310 | } |
duke@435 | 311 | |
duke@435 | 312 | // Because of the requirement of keeping "_offsets" up to date with the |
duke@435 | 313 | // allocations, we sequentialize these with a lock. Therefore, best if |
duke@435 | 314 | // this is used for larger LAB allocations only. |
duke@435 | 315 | inline HeapWord* OffsetTableContigSpace::par_allocate(size_t size) { |
duke@435 | 316 | MutexLocker x(&_par_alloc_lock); |
duke@435 | 317 | // This ought to be just "allocate", because of the lock above, but that |
duke@435 | 318 | // ContiguousSpace::allocate asserts that either the allocating thread |
duke@435 | 319 | // holds the heap lock or it is the VM thread and we're at a safepoint. |
duke@435 | 320 | // The best I (dld) could figure was to put a field in ContiguousSpace |
duke@435 | 321 | // meaning "locking at safepoint taken care of", and set/reset that |
duke@435 | 322 | // here. But this will do for now, especially in light of the comment |
duke@435 | 323 | // above. Perhaps in the future some lock-free manner of keeping the |
duke@435 | 324 | // coordination. |
duke@435 | 325 | HeapWord* res = ContiguousSpace::par_allocate(size); |
duke@435 | 326 | if (res != NULL) { |
duke@435 | 327 | _offsets.alloc_block(res, size); |
duke@435 | 328 | } |
duke@435 | 329 | return res; |
duke@435 | 330 | } |
duke@435 | 331 | |
ysr@777 | 332 | inline HeapWord* |
ysr@777 | 333 | OffsetTableContigSpace::block_start_const(const void* p) const { |
duke@435 | 334 | return _offsets.block_start(p); |
duke@435 | 335 | } |
duke@435 | 336 | |
stefank@2314 | 337 | #endif // SHARE_VM_MEMORY_SPACE_INLINE_HPP |