1.1 --- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp Wed Aug 20 23:05:04 2008 -0700 1.2 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp Tue Aug 26 14:54:48 2008 -0700 1.3 @@ -3650,6 +3650,7 @@ 1.4 CompactibleFreeListSpace* _cms_space; 1.5 CompactibleFreeListSpace* _perm_space; 1.6 HeapWord* _global_finger; 1.7 + HeapWord* _restart_addr; 1.8 1.9 // Exposed here for yielding support 1.10 Mutex* const _bit_map_lock; 1.11 @@ -3680,7 +3681,7 @@ 1.12 _term.set_task(this); 1.13 assert(_cms_space->bottom() < _perm_space->bottom(), 1.14 "Finger incorrectly initialized below"); 1.15 - _global_finger = _cms_space->bottom(); 1.16 + _restart_addr = _global_finger = _cms_space->bottom(); 1.17 } 1.18 1.19 1.20 @@ -3698,6 +3699,10 @@ 1.21 bool result() { return _result; } 1.22 1.23 void reset(HeapWord* ra) { 1.24 + assert(_global_finger >= _cms_space->end(), "Postcondition of ::work(i)"); 1.25 + assert(_global_finger >= _perm_space->end(), "Postcondition of ::work(i)"); 1.26 + assert(ra < _perm_space->end(), "ra too large"); 1.27 + _restart_addr = _global_finger = ra; 1.28 _term.reset_for_reuse(); 1.29 } 1.30 1.31 @@ -3842,16 +3847,24 @@ 1.32 int n_tasks = pst->n_tasks(); 1.33 // We allow that there may be no tasks to do here because 1.34 // we are restarting after a stack overflow. 1.35 - assert(pst->valid() || n_tasks == 0, "Uninitializd use?"); 1.36 + assert(pst->valid() || n_tasks == 0, "Uninitialized use?"); 1.37 int nth_task = 0; 1.38 1.39 - HeapWord* start = sp->bottom(); 1.40 + HeapWord* aligned_start = sp->bottom(); 1.41 + if (sp->used_region().contains(_restart_addr)) { 1.42 + // Align down to a card boundary for the start of 0th task 1.43 + // for this space. 1.44 + aligned_start = 1.45 + (HeapWord*)align_size_down((uintptr_t)_restart_addr, 1.46 + CardTableModRefBS::card_size); 1.47 + } 1.48 + 1.49 size_t chunk_size = sp->marking_task_size(); 1.50 while (!pst->is_task_claimed(/* reference */ nth_task)) { 1.51 // Having claimed the nth task in this space, 1.52 // compute the chunk that it corresponds to: 1.53 - MemRegion span = MemRegion(start + nth_task*chunk_size, 1.54 - start + (nth_task+1)*chunk_size); 1.55 + MemRegion span = MemRegion(aligned_start + nth_task*chunk_size, 1.56 + aligned_start + (nth_task+1)*chunk_size); 1.57 // Try and bump the global finger via a CAS; 1.58 // note that we need to do the global finger bump 1.59 // _before_ taking the intersection below, because 1.60 @@ -3866,26 +3879,40 @@ 1.61 // beyond the "top" address of the space. 1.62 span = span.intersection(sp->used_region()); 1.63 if (!span.is_empty()) { // Non-null task 1.64 - // We want to skip the first object because 1.65 - // the protocol is to scan any object in its entirety 1.66 - // that _starts_ in this span; a fortiori, any 1.67 - // object starting in an earlier span is scanned 1.68 - // as part of an earlier claimed task. 1.69 - // Below we use the "careful" version of block_start 1.70 - // so we do not try to navigate uninitialized objects. 1.71 - HeapWord* prev_obj = sp->block_start_careful(span.start()); 1.72 - // Below we use a variant of block_size that uses the 1.73 - // Printezis bits to avoid waiting for allocated 1.74 - // objects to become initialized/parsable. 1.75 - while (prev_obj < span.start()) { 1.76 - size_t sz = sp->block_size_no_stall(prev_obj, _collector); 1.77 - if (sz > 0) { 1.78 - prev_obj += sz; 1.79 + HeapWord* prev_obj; 1.80 + assert(!span.contains(_restart_addr) || nth_task == 0, 1.81 + "Inconsistency"); 1.82 + if (nth_task == 0) { 1.83 + // For the 0th task, we'll not need to compute a block_start. 1.84 + if (span.contains(_restart_addr)) { 1.85 + // In the case of a restart because of stack overflow, 1.86 + // we might additionally skip a chunk prefix. 1.87 + prev_obj = _restart_addr; 1.88 } else { 1.89 - // In this case we may end up doing a bit of redundant 1.90 - // scanning, but that appears unavoidable, short of 1.91 - // locking the free list locks; see bug 6324141. 1.92 - break; 1.93 + prev_obj = span.start(); 1.94 + } 1.95 + } else { 1.96 + // We want to skip the first object because 1.97 + // the protocol is to scan any object in its entirety 1.98 + // that _starts_ in this span; a fortiori, any 1.99 + // object starting in an earlier span is scanned 1.100 + // as part of an earlier claimed task. 1.101 + // Below we use the "careful" version of block_start 1.102 + // so we do not try to navigate uninitialized objects. 1.103 + prev_obj = sp->block_start_careful(span.start()); 1.104 + // Below we use a variant of block_size that uses the 1.105 + // Printezis bits to avoid waiting for allocated 1.106 + // objects to become initialized/parsable. 1.107 + while (prev_obj < span.start()) { 1.108 + size_t sz = sp->block_size_no_stall(prev_obj, _collector); 1.109 + if (sz > 0) { 1.110 + prev_obj += sz; 1.111 + } else { 1.112 + // In this case we may end up doing a bit of redundant 1.113 + // scanning, but that appears unavoidable, short of 1.114 + // locking the free list locks; see bug 6324141. 1.115 + break; 1.116 + } 1.117 } 1.118 } 1.119 if (prev_obj < span.end()) { 1.120 @@ -3938,12 +3965,14 @@ 1.121 void handle_stack_overflow(HeapWord* lost); 1.122 }; 1.123 1.124 -// Grey object rescan during work stealing phase -- 1.125 -// the salient assumption here is that stolen oops must 1.126 -// always be initialized, so we do not need to check for 1.127 -// uninitialized objects before scanning here. 1.128 +// Grey object scanning during work stealing phase -- 1.129 +// the salient assumption here is that any references 1.130 +// that are in these stolen objects being scanned must 1.131 +// already have been initialized (else they would not have 1.132 +// been published), so we do not need to check for 1.133 +// uninitialized objects before pushing here. 1.134 void Par_ConcMarkingClosure::do_oop(oop obj) { 1.135 - assert(obj->is_oop_or_null(), "expected an oop or NULL"); 1.136 + assert(obj->is_oop_or_null(true), "expected an oop or NULL"); 1.137 HeapWord* addr = (HeapWord*)obj; 1.138 // Check if oop points into the CMS generation 1.139 // and is not marked 1.140 @@ -4001,7 +4030,7 @@ 1.141 // in CMSCollector's _restart_address. 1.142 void Par_ConcMarkingClosure::handle_stack_overflow(HeapWord* lost) { 1.143 // We need to do this under a mutex to prevent other 1.144 - // workers from interfering with the expansion below. 1.145 + // workers from interfering with the work done below. 1.146 MutexLockerEx ml(_overflow_stack->par_lock(), 1.147 Mutex::_no_safepoint_check_flag); 1.148 // Remember the least grey address discarded 1.149 @@ -6554,7 +6583,7 @@ 1.150 if (obj != NULL) { 1.151 // Ignore mark word because this could be an already marked oop 1.152 // that may be chained at the end of the overflow list. 1.153 - assert(obj->is_oop(), "expected an oop"); 1.154 + assert(obj->is_oop(true), "expected an oop"); 1.155 HeapWord* addr = (HeapWord*)obj; 1.156 if (_span.contains(addr) && 1.157 !_bit_map->isMarked(addr)) { 1.158 @@ -7289,6 +7318,8 @@ 1.159 _should_remember_klasses(collector->should_unload_classes()) 1.160 { } 1.161 1.162 +// Assumes thread-safe access by callers, who are 1.163 +// responsible for mutual exclusion. 1.164 void CMSCollector::lower_restart_addr(HeapWord* low) { 1.165 assert(_span.contains(low), "Out of bounds addr"); 1.166 if (_restart_addr == NULL) { 1.167 @@ -7314,7 +7345,7 @@ 1.168 // in CMSCollector's _restart_address. 1.169 void Par_PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) { 1.170 // We need to do this under a mutex to prevent other 1.171 - // workers from interfering with the expansion below. 1.172 + // workers from interfering with the work done below. 1.173 MutexLockerEx ml(_overflow_stack->par_lock(), 1.174 Mutex::_no_safepoint_check_flag); 1.175 // Remember the least grey address discarded