1.1 --- a/src/share/vm/memory/space.inline.hpp Tue Apr 29 15:17:27 2014 +0200 1.2 +++ b/src/share/vm/memory/space.inline.hpp Thu May 08 15:37:17 2014 +0200 1.3 @@ -28,12 +28,279 @@ 1.4 #include "gc_interface/collectedHeap.hpp" 1.5 #include "memory/space.hpp" 1.6 #include "memory/universe.hpp" 1.7 +#include "runtime/prefetch.inline.hpp" 1.8 #include "runtime/safepoint.hpp" 1.9 1.10 inline HeapWord* Space::block_start(const void* p) { 1.11 return block_start_const(p); 1.12 } 1.13 1.14 +#define SCAN_AND_FORWARD(cp,scan_limit,block_is_obj,block_size) { \ 1.15 + /* Compute the new addresses for the live objects and store it in the mark \ 1.16 + * Used by universe::mark_sweep_phase2() \ 1.17 + */ \ 1.18 + HeapWord* compact_top; /* This is where we are currently compacting to. */ \ 1.19 + \ 1.20 + /* We're sure to be here before any objects are compacted into this \ 1.21 + * space, so this is a good time to initialize this: \ 1.22 + */ \ 1.23 + set_compaction_top(bottom()); \ 1.24 + \ 1.25 + if (cp->space == NULL) { \ 1.26 + assert(cp->gen != NULL, "need a generation"); \ 1.27 + assert(cp->threshold == NULL, "just checking"); \ 1.28 + assert(cp->gen->first_compaction_space() == this, "just checking"); \ 1.29 + cp->space = cp->gen->first_compaction_space(); \ 1.30 + compact_top = cp->space->bottom(); \ 1.31 + cp->space->set_compaction_top(compact_top); \ 1.32 + cp->threshold = cp->space->initialize_threshold(); \ 1.33 + } else { \ 1.34 + compact_top = cp->space->compaction_top(); \ 1.35 + } \ 1.36 + \ 1.37 + /* We allow some amount of garbage towards the bottom of the space, so \ 1.38 + * we don't start compacting before there is a significant gain to be made.\ 1.39 + * Occasionally, we want to ensure a full compaction, which is determined \ 1.40 + * by the MarkSweepAlwaysCompactCount parameter. \ 1.41 + */ \ 1.42 + uint invocations = MarkSweep::total_invocations(); \ 1.43 + bool skip_dead = ((invocations % MarkSweepAlwaysCompactCount) != 0); \ 1.44 + \ 1.45 + size_t allowed_deadspace = 0; \ 1.46 + if (skip_dead) { \ 1.47 + const size_t ratio = allowed_dead_ratio(); \ 1.48 + allowed_deadspace = (capacity() * ratio / 100) / HeapWordSize; \ 1.49 + } \ 1.50 + \ 1.51 + HeapWord* q = bottom(); \ 1.52 + HeapWord* t = scan_limit(); \ 1.53 + \ 1.54 + HeapWord* end_of_live= q; /* One byte beyond the last byte of the last \ 1.55 + live object. */ \ 1.56 + HeapWord* first_dead = end();/* The first dead object. */ \ 1.57 + LiveRange* liveRange = NULL; /* The current live range, recorded in the \ 1.58 + first header of preceding free area. */ \ 1.59 + _first_dead = first_dead; \ 1.60 + \ 1.61 + const intx interval = PrefetchScanIntervalInBytes; \ 1.62 + \ 1.63 + while (q < t) { \ 1.64 + assert(!block_is_obj(q) || \ 1.65 + oop(q)->mark()->is_marked() || oop(q)->mark()->is_unlocked() || \ 1.66 + oop(q)->mark()->has_bias_pattern(), \ 1.67 + "these are the only valid states during a mark sweep"); \ 1.68 + if (block_is_obj(q) && oop(q)->is_gc_marked()) { \ 1.69 + /* prefetch beyond q */ \ 1.70 + Prefetch::write(q, interval); \ 1.71 + size_t size = block_size(q); \ 1.72 + compact_top = cp->space->forward(oop(q), size, cp, compact_top); \ 1.73 + q += size; \ 1.74 + end_of_live = q; \ 1.75 + } else { \ 1.76 + /* run over all the contiguous dead objects */ \ 1.77 + HeapWord* end = q; \ 1.78 + do { \ 1.79 + /* prefetch beyond end */ \ 1.80 + Prefetch::write(end, interval); \ 1.81 + end += block_size(end); \ 1.82 + } while (end < t && (!block_is_obj(end) || !oop(end)->is_gc_marked()));\ 1.83 + \ 1.84 + /* see if we might want to pretend this object is alive so that \ 1.85 + * we don't have to compact quite as often. \ 1.86 + */ \ 1.87 + if (allowed_deadspace > 0 && q == compact_top) { \ 1.88 + size_t sz = pointer_delta(end, q); \ 1.89 + if (insert_deadspace(allowed_deadspace, q, sz)) { \ 1.90 + compact_top = cp->space->forward(oop(q), sz, cp, compact_top); \ 1.91 + q = end; \ 1.92 + end_of_live = end; \ 1.93 + continue; \ 1.94 + } \ 1.95 + } \ 1.96 + \ 1.97 + /* otherwise, it really is a free region. */ \ 1.98 + \ 1.99 + /* for the previous LiveRange, record the end of the live objects. */ \ 1.100 + if (liveRange) { \ 1.101 + liveRange->set_end(q); \ 1.102 + } \ 1.103 + \ 1.104 + /* record the current LiveRange object. \ 1.105 + * liveRange->start() is overlaid on the mark word. \ 1.106 + */ \ 1.107 + liveRange = (LiveRange*)q; \ 1.108 + liveRange->set_start(end); \ 1.109 + liveRange->set_end(end); \ 1.110 + \ 1.111 + /* see if this is the first dead region. */ \ 1.112 + if (q < first_dead) { \ 1.113 + first_dead = q; \ 1.114 + } \ 1.115 + \ 1.116 + /* move on to the next object */ \ 1.117 + q = end; \ 1.118 + } \ 1.119 + } \ 1.120 + \ 1.121 + assert(q == t, "just checking"); \ 1.122 + if (liveRange != NULL) { \ 1.123 + liveRange->set_end(q); \ 1.124 + } \ 1.125 + _end_of_live = end_of_live; \ 1.126 + if (end_of_live < first_dead) { \ 1.127 + first_dead = end_of_live; \ 1.128 + } \ 1.129 + _first_dead = first_dead; \ 1.130 + \ 1.131 + /* save the compaction_top of the compaction space. */ \ 1.132 + cp->space->set_compaction_top(compact_top); \ 1.133 +} 1.134 + 1.135 +#define SCAN_AND_ADJUST_POINTERS(adjust_obj_size) { \ 1.136 + /* adjust all the interior pointers to point at the new locations of objects \ 1.137 + * Used by MarkSweep::mark_sweep_phase3() */ \ 1.138 + \ 1.139 + HeapWord* q = bottom(); \ 1.140 + HeapWord* t = _end_of_live; /* Established by "prepare_for_compaction". */ \ 1.141 + \ 1.142 + assert(_first_dead <= _end_of_live, "Stands to reason, no?"); \ 1.143 + \ 1.144 + if (q < t && _first_dead > q && \ 1.145 + !oop(q)->is_gc_marked()) { \ 1.146 + /* we have a chunk of the space which hasn't moved and we've \ 1.147 + * reinitialized the mark word during the previous pass, so we can't \ 1.148 + * use is_gc_marked for the traversal. */ \ 1.149 + HeapWord* end = _first_dead; \ 1.150 + \ 1.151 + while (q < end) { \ 1.152 + /* I originally tried to conjoin "block_start(q) == q" to the \ 1.153 + * assertion below, but that doesn't work, because you can't \ 1.154 + * accurately traverse previous objects to get to the current one \ 1.155 + * after their pointers have been \ 1.156 + * updated, until the actual compaction is done. dld, 4/00 */ \ 1.157 + assert(block_is_obj(q), \ 1.158 + "should be at block boundaries, and should be looking at objs"); \ 1.159 + \ 1.160 + /* point all the oops to the new location */ \ 1.161 + size_t size = oop(q)->adjust_pointers(); \ 1.162 + size = adjust_obj_size(size); \ 1.163 + \ 1.164 + q += size; \ 1.165 + } \ 1.166 + \ 1.167 + if (_first_dead == t) { \ 1.168 + q = t; \ 1.169 + } else { \ 1.170 + /* $$$ This is funky. Using this to read the previously written \ 1.171 + * LiveRange. See also use below. */ \ 1.172 + q = (HeapWord*)oop(_first_dead)->mark()->decode_pointer(); \ 1.173 + } \ 1.174 + } \ 1.175 + \ 1.176 + const intx interval = PrefetchScanIntervalInBytes; \ 1.177 + \ 1.178 + debug_only(HeapWord* prev_q = NULL); \ 1.179 + while (q < t) { \ 1.180 + /* prefetch beyond q */ \ 1.181 + Prefetch::write(q, interval); \ 1.182 + if (oop(q)->is_gc_marked()) { \ 1.183 + /* q is alive */ \ 1.184 + /* point all the oops to the new location */ \ 1.185 + size_t size = oop(q)->adjust_pointers(); \ 1.186 + size = adjust_obj_size(size); \ 1.187 + debug_only(prev_q = q); \ 1.188 + q += size; \ 1.189 + } else { \ 1.190 + /* q is not a live object, so its mark should point at the next \ 1.191 + * live object */ \ 1.192 + debug_only(prev_q = q); \ 1.193 + q = (HeapWord*) oop(q)->mark()->decode_pointer(); \ 1.194 + assert(q > prev_q, "we should be moving forward through memory"); \ 1.195 + } \ 1.196 + } \ 1.197 + \ 1.198 + assert(q == t, "just checking"); \ 1.199 +} 1.200 + 1.201 +#define SCAN_AND_COMPACT(obj_size) { \ 1.202 + /* Copy all live objects to their new location \ 1.203 + * Used by MarkSweep::mark_sweep_phase4() */ \ 1.204 + \ 1.205 + HeapWord* q = bottom(); \ 1.206 + HeapWord* const t = _end_of_live; \ 1.207 + debug_only(HeapWord* prev_q = NULL); \ 1.208 + \ 1.209 + if (q < t && _first_dead > q && \ 1.210 + !oop(q)->is_gc_marked()) { \ 1.211 + debug_only( \ 1.212 + /* we have a chunk of the space which hasn't moved and we've reinitialized \ 1.213 + * the mark word during the previous pass, so we can't use is_gc_marked for \ 1.214 + * the traversal. */ \ 1.215 + HeapWord* const end = _first_dead; \ 1.216 + \ 1.217 + while (q < end) { \ 1.218 + size_t size = obj_size(q); \ 1.219 + assert(!oop(q)->is_gc_marked(), \ 1.220 + "should be unmarked (special dense prefix handling)"); \ 1.221 + debug_only(prev_q = q); \ 1.222 + q += size; \ 1.223 + } \ 1.224 + ) /* debug_only */ \ 1.225 + \ 1.226 + if (_first_dead == t) { \ 1.227 + q = t; \ 1.228 + } else { \ 1.229 + /* $$$ Funky */ \ 1.230 + q = (HeapWord*) oop(_first_dead)->mark()->decode_pointer(); \ 1.231 + } \ 1.232 + } \ 1.233 + \ 1.234 + const intx scan_interval = PrefetchScanIntervalInBytes; \ 1.235 + const intx copy_interval = PrefetchCopyIntervalInBytes; \ 1.236 + while (q < t) { \ 1.237 + if (!oop(q)->is_gc_marked()) { \ 1.238 + /* mark is pointer to next marked oop */ \ 1.239 + debug_only(prev_q = q); \ 1.240 + q = (HeapWord*) oop(q)->mark()->decode_pointer(); \ 1.241 + assert(q > prev_q, "we should be moving forward through memory"); \ 1.242 + } else { \ 1.243 + /* prefetch beyond q */ \ 1.244 + Prefetch::read(q, scan_interval); \ 1.245 + \ 1.246 + /* size and destination */ \ 1.247 + size_t size = obj_size(q); \ 1.248 + HeapWord* compaction_top = (HeapWord*)oop(q)->forwardee(); \ 1.249 + \ 1.250 + /* prefetch beyond compaction_top */ \ 1.251 + Prefetch::write(compaction_top, copy_interval); \ 1.252 + \ 1.253 + /* copy object and reinit its mark */ \ 1.254 + assert(q != compaction_top, "everything in this pass should be moving"); \ 1.255 + Copy::aligned_conjoint_words(q, compaction_top, size); \ 1.256 + oop(compaction_top)->init_mark(); \ 1.257 + assert(oop(compaction_top)->klass() != NULL, "should have a class"); \ 1.258 + \ 1.259 + debug_only(prev_q = q); \ 1.260 + q += size; \ 1.261 + } \ 1.262 + } \ 1.263 + \ 1.264 + /* Let's remember if we were empty before we did the compaction. */ \ 1.265 + bool was_empty = used_region().is_empty(); \ 1.266 + /* Reset space after compaction is complete */ \ 1.267 + reset_after_compaction(); \ 1.268 + /* We do this clear, below, since it has overloaded meanings for some */ \ 1.269 + /* space subtypes. For example, OffsetTableContigSpace's that were */ \ 1.270 + /* compacted into will have had their offset table thresholds updated */ \ 1.271 + /* continuously, but those that weren't need to have their thresholds */ \ 1.272 + /* re-initialized. Also mangles unused area for debugging. */ \ 1.273 + if (used_region().is_empty()) { \ 1.274 + if (!was_empty) clear(SpaceDecorator::Mangle); \ 1.275 + } else { \ 1.276 + if (ZapUnusedHeapArea) mangle_unused_area(); \ 1.277 + } \ 1.278 +} 1.279 + 1.280 inline HeapWord* OffsetTableContigSpace::allocate(size_t size) { 1.281 HeapWord* res = ContiguousSpace::allocate(size); 1.282 if (res != NULL) {