1.1 --- a/src/share/vm/memory/space.hpp Tue Apr 29 15:17:27 2014 +0200 1.2 +++ b/src/share/vm/memory/space.hpp Thu May 08 15:37:17 2014 +0200 1.3 @@ -33,24 +33,8 @@ 1.4 #include "memory/watermark.hpp" 1.5 #include "oops/markOop.hpp" 1.6 #include "runtime/mutexLocker.hpp" 1.7 -#include "runtime/prefetch.hpp" 1.8 #include "utilities/macros.hpp" 1.9 #include "utilities/workgroup.hpp" 1.10 -#ifdef TARGET_OS_FAMILY_linux 1.11 -# include "os_linux.inline.hpp" 1.12 -#endif 1.13 -#ifdef TARGET_OS_FAMILY_solaris 1.14 -# include "os_solaris.inline.hpp" 1.15 -#endif 1.16 -#ifdef TARGET_OS_FAMILY_windows 1.17 -# include "os_windows.inline.hpp" 1.18 -#endif 1.19 -#ifdef TARGET_OS_FAMILY_aix 1.20 -# include "os_aix.inline.hpp" 1.21 -#endif 1.22 -#ifdef TARGET_OS_FAMILY_bsd 1.23 -# include "os_bsd.inline.hpp" 1.24 -#endif 1.25 1.26 // A space is an abstraction for the "storage units" backing 1.27 // up the generation abstraction. It includes specific 1.28 @@ -512,272 +496,6 @@ 1.29 size_t word_len); 1.30 }; 1.31 1.32 -#define SCAN_AND_FORWARD(cp,scan_limit,block_is_obj,block_size) { \ 1.33 - /* Compute the new addresses for the live objects and store it in the mark \ 1.34 - * Used by universe::mark_sweep_phase2() \ 1.35 - */ \ 1.36 - HeapWord* compact_top; /* This is where we are currently compacting to. */ \ 1.37 - \ 1.38 - /* We're sure to be here before any objects are compacted into this \ 1.39 - * space, so this is a good time to initialize this: \ 1.40 - */ \ 1.41 - set_compaction_top(bottom()); \ 1.42 - \ 1.43 - if (cp->space == NULL) { \ 1.44 - assert(cp->gen != NULL, "need a generation"); \ 1.45 - assert(cp->threshold == NULL, "just checking"); \ 1.46 - assert(cp->gen->first_compaction_space() == this, "just checking"); \ 1.47 - cp->space = cp->gen->first_compaction_space(); \ 1.48 - compact_top = cp->space->bottom(); \ 1.49 - cp->space->set_compaction_top(compact_top); \ 1.50 - cp->threshold = cp->space->initialize_threshold(); \ 1.51 - } else { \ 1.52 - compact_top = cp->space->compaction_top(); \ 1.53 - } \ 1.54 - \ 1.55 - /* We allow some amount of garbage towards the bottom of the space, so \ 1.56 - * we don't start compacting before there is a significant gain to be made.\ 1.57 - * Occasionally, we want to ensure a full compaction, which is determined \ 1.58 - * by the MarkSweepAlwaysCompactCount parameter. \ 1.59 - */ \ 1.60 - uint invocations = MarkSweep::total_invocations(); \ 1.61 - bool skip_dead = ((invocations % MarkSweepAlwaysCompactCount) != 0); \ 1.62 - \ 1.63 - size_t allowed_deadspace = 0; \ 1.64 - if (skip_dead) { \ 1.65 - const size_t ratio = allowed_dead_ratio(); \ 1.66 - allowed_deadspace = (capacity() * ratio / 100) / HeapWordSize; \ 1.67 - } \ 1.68 - \ 1.69 - HeapWord* q = bottom(); \ 1.70 - HeapWord* t = scan_limit(); \ 1.71 - \ 1.72 - HeapWord* end_of_live= q; /* One byte beyond the last byte of the last \ 1.73 - live object. */ \ 1.74 - HeapWord* first_dead = end();/* The first dead object. */ \ 1.75 - LiveRange* liveRange = NULL; /* The current live range, recorded in the \ 1.76 - first header of preceding free area. */ \ 1.77 - _first_dead = first_dead; \ 1.78 - \ 1.79 - const intx interval = PrefetchScanIntervalInBytes; \ 1.80 - \ 1.81 - while (q < t) { \ 1.82 - assert(!block_is_obj(q) || \ 1.83 - oop(q)->mark()->is_marked() || oop(q)->mark()->is_unlocked() || \ 1.84 - oop(q)->mark()->has_bias_pattern(), \ 1.85 - "these are the only valid states during a mark sweep"); \ 1.86 - if (block_is_obj(q) && oop(q)->is_gc_marked()) { \ 1.87 - /* prefetch beyond q */ \ 1.88 - Prefetch::write(q, interval); \ 1.89 - size_t size = block_size(q); \ 1.90 - compact_top = cp->space->forward(oop(q), size, cp, compact_top); \ 1.91 - q += size; \ 1.92 - end_of_live = q; \ 1.93 - } else { \ 1.94 - /* run over all the contiguous dead objects */ \ 1.95 - HeapWord* end = q; \ 1.96 - do { \ 1.97 - /* prefetch beyond end */ \ 1.98 - Prefetch::write(end, interval); \ 1.99 - end += block_size(end); \ 1.100 - } while (end < t && (!block_is_obj(end) || !oop(end)->is_gc_marked()));\ 1.101 - \ 1.102 - /* see if we might want to pretend this object is alive so that \ 1.103 - * we don't have to compact quite as often. \ 1.104 - */ \ 1.105 - if (allowed_deadspace > 0 && q == compact_top) { \ 1.106 - size_t sz = pointer_delta(end, q); \ 1.107 - if (insert_deadspace(allowed_deadspace, q, sz)) { \ 1.108 - compact_top = cp->space->forward(oop(q), sz, cp, compact_top); \ 1.109 - q = end; \ 1.110 - end_of_live = end; \ 1.111 - continue; \ 1.112 - } \ 1.113 - } \ 1.114 - \ 1.115 - /* otherwise, it really is a free region. */ \ 1.116 - \ 1.117 - /* for the previous LiveRange, record the end of the live objects. */ \ 1.118 - if (liveRange) { \ 1.119 - liveRange->set_end(q); \ 1.120 - } \ 1.121 - \ 1.122 - /* record the current LiveRange object. \ 1.123 - * liveRange->start() is overlaid on the mark word. \ 1.124 - */ \ 1.125 - liveRange = (LiveRange*)q; \ 1.126 - liveRange->set_start(end); \ 1.127 - liveRange->set_end(end); \ 1.128 - \ 1.129 - /* see if this is the first dead region. */ \ 1.130 - if (q < first_dead) { \ 1.131 - first_dead = q; \ 1.132 - } \ 1.133 - \ 1.134 - /* move on to the next object */ \ 1.135 - q = end; \ 1.136 - } \ 1.137 - } \ 1.138 - \ 1.139 - assert(q == t, "just checking"); \ 1.140 - if (liveRange != NULL) { \ 1.141 - liveRange->set_end(q); \ 1.142 - } \ 1.143 - _end_of_live = end_of_live; \ 1.144 - if (end_of_live < first_dead) { \ 1.145 - first_dead = end_of_live; \ 1.146 - } \ 1.147 - _first_dead = first_dead; \ 1.148 - \ 1.149 - /* save the compaction_top of the compaction space. */ \ 1.150 - cp->space->set_compaction_top(compact_top); \ 1.151 -} 1.152 - 1.153 -#define SCAN_AND_ADJUST_POINTERS(adjust_obj_size) { \ 1.154 - /* adjust all the interior pointers to point at the new locations of objects \ 1.155 - * Used by MarkSweep::mark_sweep_phase3() */ \ 1.156 - \ 1.157 - HeapWord* q = bottom(); \ 1.158 - HeapWord* t = _end_of_live; /* Established by "prepare_for_compaction". */ \ 1.159 - \ 1.160 - assert(_first_dead <= _end_of_live, "Stands to reason, no?"); \ 1.161 - \ 1.162 - if (q < t && _first_dead > q && \ 1.163 - !oop(q)->is_gc_marked()) { \ 1.164 - /* we have a chunk of the space which hasn't moved and we've \ 1.165 - * reinitialized the mark word during the previous pass, so we can't \ 1.166 - * use is_gc_marked for the traversal. */ \ 1.167 - HeapWord* end = _first_dead; \ 1.168 - \ 1.169 - while (q < end) { \ 1.170 - /* I originally tried to conjoin "block_start(q) == q" to the \ 1.171 - * assertion below, but that doesn't work, because you can't \ 1.172 - * accurately traverse previous objects to get to the current one \ 1.173 - * after their pointers have been \ 1.174 - * updated, until the actual compaction is done. dld, 4/00 */ \ 1.175 - assert(block_is_obj(q), \ 1.176 - "should be at block boundaries, and should be looking at objs"); \ 1.177 - \ 1.178 - /* point all the oops to the new location */ \ 1.179 - size_t size = oop(q)->adjust_pointers(); \ 1.180 - size = adjust_obj_size(size); \ 1.181 - \ 1.182 - q += size; \ 1.183 - } \ 1.184 - \ 1.185 - if (_first_dead == t) { \ 1.186 - q = t; \ 1.187 - } else { \ 1.188 - /* $$$ This is funky. Using this to read the previously written \ 1.189 - * LiveRange. See also use below. */ \ 1.190 - q = (HeapWord*)oop(_first_dead)->mark()->decode_pointer(); \ 1.191 - } \ 1.192 - } \ 1.193 - \ 1.194 - const intx interval = PrefetchScanIntervalInBytes; \ 1.195 - \ 1.196 - debug_only(HeapWord* prev_q = NULL); \ 1.197 - while (q < t) { \ 1.198 - /* prefetch beyond q */ \ 1.199 - Prefetch::write(q, interval); \ 1.200 - if (oop(q)->is_gc_marked()) { \ 1.201 - /* q is alive */ \ 1.202 - /* point all the oops to the new location */ \ 1.203 - size_t size = oop(q)->adjust_pointers(); \ 1.204 - size = adjust_obj_size(size); \ 1.205 - debug_only(prev_q = q); \ 1.206 - q += size; \ 1.207 - } else { \ 1.208 - /* q is not a live object, so its mark should point at the next \ 1.209 - * live object */ \ 1.210 - debug_only(prev_q = q); \ 1.211 - q = (HeapWord*) oop(q)->mark()->decode_pointer(); \ 1.212 - assert(q > prev_q, "we should be moving forward through memory"); \ 1.213 - } \ 1.214 - } \ 1.215 - \ 1.216 - assert(q == t, "just checking"); \ 1.217 -} 1.218 - 1.219 -#define SCAN_AND_COMPACT(obj_size) { \ 1.220 - /* Copy all live objects to their new location \ 1.221 - * Used by MarkSweep::mark_sweep_phase4() */ \ 1.222 - \ 1.223 - HeapWord* q = bottom(); \ 1.224 - HeapWord* const t = _end_of_live; \ 1.225 - debug_only(HeapWord* prev_q = NULL); \ 1.226 - \ 1.227 - if (q < t && _first_dead > q && \ 1.228 - !oop(q)->is_gc_marked()) { \ 1.229 - debug_only( \ 1.230 - /* we have a chunk of the space which hasn't moved and we've reinitialized \ 1.231 - * the mark word during the previous pass, so we can't use is_gc_marked for \ 1.232 - * the traversal. */ \ 1.233 - HeapWord* const end = _first_dead; \ 1.234 - \ 1.235 - while (q < end) { \ 1.236 - size_t size = obj_size(q); \ 1.237 - assert(!oop(q)->is_gc_marked(), \ 1.238 - "should be unmarked (special dense prefix handling)"); \ 1.239 - debug_only(prev_q = q); \ 1.240 - q += size; \ 1.241 - } \ 1.242 - ) /* debug_only */ \ 1.243 - \ 1.244 - if (_first_dead == t) { \ 1.245 - q = t; \ 1.246 - } else { \ 1.247 - /* $$$ Funky */ \ 1.248 - q = (HeapWord*) oop(_first_dead)->mark()->decode_pointer(); \ 1.249 - } \ 1.250 - } \ 1.251 - \ 1.252 - const intx scan_interval = PrefetchScanIntervalInBytes; \ 1.253 - const intx copy_interval = PrefetchCopyIntervalInBytes; \ 1.254 - while (q < t) { \ 1.255 - if (!oop(q)->is_gc_marked()) { \ 1.256 - /* mark is pointer to next marked oop */ \ 1.257 - debug_only(prev_q = q); \ 1.258 - q = (HeapWord*) oop(q)->mark()->decode_pointer(); \ 1.259 - assert(q > prev_q, "we should be moving forward through memory"); \ 1.260 - } else { \ 1.261 - /* prefetch beyond q */ \ 1.262 - Prefetch::read(q, scan_interval); \ 1.263 - \ 1.264 - /* size and destination */ \ 1.265 - size_t size = obj_size(q); \ 1.266 - HeapWord* compaction_top = (HeapWord*)oop(q)->forwardee(); \ 1.267 - \ 1.268 - /* prefetch beyond compaction_top */ \ 1.269 - Prefetch::write(compaction_top, copy_interval); \ 1.270 - \ 1.271 - /* copy object and reinit its mark */ \ 1.272 - assert(q != compaction_top, "everything in this pass should be moving"); \ 1.273 - Copy::aligned_conjoint_words(q, compaction_top, size); \ 1.274 - oop(compaction_top)->init_mark(); \ 1.275 - assert(oop(compaction_top)->klass() != NULL, "should have a class"); \ 1.276 - \ 1.277 - debug_only(prev_q = q); \ 1.278 - q += size; \ 1.279 - } \ 1.280 - } \ 1.281 - \ 1.282 - /* Let's remember if we were empty before we did the compaction. */ \ 1.283 - bool was_empty = used_region().is_empty(); \ 1.284 - /* Reset space after compaction is complete */ \ 1.285 - reset_after_compaction(); \ 1.286 - /* We do this clear, below, since it has overloaded meanings for some */ \ 1.287 - /* space subtypes. For example, OffsetTableContigSpace's that were */ \ 1.288 - /* compacted into will have had their offset table thresholds updated */ \ 1.289 - /* continuously, but those that weren't need to have their thresholds */ \ 1.290 - /* re-initialized. Also mangles unused area for debugging. */ \ 1.291 - if (used_region().is_empty()) { \ 1.292 - if (!was_empty) clear(SpaceDecorator::Mangle); \ 1.293 - } else { \ 1.294 - if (ZapUnusedHeapArea) mangle_unused_area(); \ 1.295 - } \ 1.296 -} 1.297 - 1.298 class GenSpaceMangler; 1.299 1.300 // A space in which the free area is contiguous. It therefore supports