1.1 --- a/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp Mon Aug 02 12:51:43 2010 -0700 1.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp Thu Jul 22 10:27:41 2010 -0400 1.3 @@ -27,7 +27,6 @@ 1.4 1.5 PSPromotionManager** PSPromotionManager::_manager_array = NULL; 1.6 OopStarTaskQueueSet* PSPromotionManager::_stack_array_depth = NULL; 1.7 -OopTaskQueueSet* PSPromotionManager::_stack_array_breadth = NULL; 1.8 PSOldGen* PSPromotionManager::_old_gen = NULL; 1.9 MutableSpace* PSPromotionManager::_young_space = NULL; 1.10 1.11 @@ -42,23 +41,14 @@ 1.12 _manager_array = NEW_C_HEAP_ARRAY(PSPromotionManager*, ParallelGCThreads+1 ); 1.13 guarantee(_manager_array != NULL, "Could not initialize promotion manager"); 1.14 1.15 - if (UseDepthFirstScavengeOrder) { 1.16 - _stack_array_depth = new OopStarTaskQueueSet(ParallelGCThreads); 1.17 - guarantee(_stack_array_depth != NULL, "Count not initialize promotion manager"); 1.18 - } else { 1.19 - _stack_array_breadth = new OopTaskQueueSet(ParallelGCThreads); 1.20 - guarantee(_stack_array_breadth != NULL, "Count not initialize promotion manager"); 1.21 - } 1.22 + _stack_array_depth = new OopStarTaskQueueSet(ParallelGCThreads); 1.23 + guarantee(_stack_array_depth != NULL, "Cound not initialize promotion manager"); 1.24 1.25 // Create and register the PSPromotionManager(s) for the worker threads. 1.26 for(uint i=0; i<ParallelGCThreads; i++) { 1.27 _manager_array[i] = new PSPromotionManager(); 1.28 guarantee(_manager_array[i] != NULL, "Could not create PSPromotionManager"); 1.29 - if (UseDepthFirstScavengeOrder) { 1.30 - stack_array_depth()->register_queue(i, _manager_array[i]->claimed_stack_depth()); 1.31 - } else { 1.32 - stack_array_breadth()->register_queue(i, _manager_array[i]->claimed_stack_breadth()); 1.33 - } 1.34 + stack_array_depth()->register_queue(i, _manager_array[i]->claimed_stack_depth()); 1.35 } 1.36 1.37 // The VMThread gets its own PSPromotionManager, which is not available 1.38 @@ -93,11 +83,7 @@ 1.39 TASKQUEUE_STATS_ONLY(if (PrintGCDetails && ParallelGCVerbose) print_stats()); 1.40 for (uint i = 0; i < ParallelGCThreads + 1; i++) { 1.41 PSPromotionManager* manager = manager_array(i); 1.42 - if (UseDepthFirstScavengeOrder) { 1.43 - assert(manager->claimed_stack_depth()->is_empty(), "should be empty"); 1.44 - } else { 1.45 - assert(manager->claimed_stack_breadth()->is_empty(), "should be empty"); 1.46 - } 1.47 + assert(manager->claimed_stack_depth()->is_empty(), "should be empty"); 1.48 manager->flush_labs(); 1.49 } 1.50 } 1.51 @@ -105,10 +91,8 @@ 1.52 #if TASKQUEUE_STATS 1.53 void 1.54 PSPromotionManager::print_taskqueue_stats(uint i) const { 1.55 - const TaskQueueStats& stats = depth_first() ? 1.56 - _claimed_stack_depth.stats : _claimed_stack_breadth.stats; 1.57 tty->print("%3u ", i); 1.58 - stats.print(); 1.59 + _claimed_stack_depth.stats.print(); 1.60 tty->cr(); 1.61 } 1.62 1.63 @@ -128,8 +112,7 @@ 1.64 1.65 void 1.66 PSPromotionManager::print_stats() { 1.67 - const bool df = UseDepthFirstScavengeOrder; 1.68 - tty->print_cr("== GC Task Stats (%s-First), GC %3d", df ? "Depth" : "Breadth", 1.69 + tty->print_cr("== GC Tasks Stats, GC %3d", 1.70 Universe::heap()->total_collections()); 1.71 1.72 tty->print("thr "); TaskQueueStats::print_header(1); tty->cr(); 1.73 @@ -147,9 +130,7 @@ 1.74 1.75 void 1.76 PSPromotionManager::reset_stats() { 1.77 - TaskQueueStats& stats = depth_first() ? 1.78 - claimed_stack_depth()->stats : claimed_stack_breadth()->stats; 1.79 - stats.reset(); 1.80 + claimed_stack_depth()->stats.reset(); 1.81 _masked_pushes = _masked_steals = 0; 1.82 _arrays_chunked = _array_chunks_processed = 0; 1.83 } 1.84 @@ -158,19 +139,13 @@ 1.85 PSPromotionManager::PSPromotionManager() { 1.86 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); 1.87 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); 1.88 - _depth_first = UseDepthFirstScavengeOrder; 1.89 1.90 // We set the old lab's start array. 1.91 _old_lab.set_start_array(old_gen()->start_array()); 1.92 1.93 uint queue_size; 1.94 - if (depth_first()) { 1.95 - claimed_stack_depth()->initialize(); 1.96 - queue_size = claimed_stack_depth()->max_elems(); 1.97 - } else { 1.98 - claimed_stack_breadth()->initialize(); 1.99 - queue_size = claimed_stack_breadth()->max_elems(); 1.100 - } 1.101 + claimed_stack_depth()->initialize(); 1.102 + queue_size = claimed_stack_depth()->max_elems(); 1.103 1.104 _totally_drain = (ParallelGCThreads == 1) || (GCDrainStackTargetSize == 0); 1.105 if (_totally_drain) { 1.106 @@ -205,14 +180,11 @@ 1.107 _old_lab.initialize(MemRegion(lab_base, (size_t)0)); 1.108 _old_gen_is_full = false; 1.109 1.110 - _prefetch_queue.clear(); 1.111 - 1.112 TASKQUEUE_STATS_ONLY(reset_stats()); 1.113 } 1.114 1.115 1.116 void PSPromotionManager::drain_stacks_depth(bool totally_drain) { 1.117 - assert(depth_first(), "invariant"); 1.118 assert(claimed_stack_depth()->overflow_stack() != NULL, "invariant"); 1.119 totally_drain = totally_drain || _totally_drain; 1.120 1.121 @@ -250,50 +222,6 @@ 1.122 assert(tq->overflow_empty(), "Sanity"); 1.123 } 1.124 1.125 -void PSPromotionManager::drain_stacks_breadth(bool totally_drain) { 1.126 - assert(!depth_first(), "invariant"); 1.127 - assert(claimed_stack_breadth()->overflow_stack() != NULL, "invariant"); 1.128 - totally_drain = totally_drain || _totally_drain; 1.129 - 1.130 -#ifdef ASSERT 1.131 - ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); 1.132 - assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); 1.133 - MutableSpace* to_space = heap->young_gen()->to_space(); 1.134 - MutableSpace* old_space = heap->old_gen()->object_space(); 1.135 - MutableSpace* perm_space = heap->perm_gen()->object_space(); 1.136 -#endif /* ASSERT */ 1.137 - 1.138 - OverflowTaskQueue<oop>* const tq = claimed_stack_breadth(); 1.139 - do { 1.140 - oop obj; 1.141 - 1.142 - // Drain overflow stack first, so other threads can steal from 1.143 - // claimed stack while we work. 1.144 - while (tq->pop_overflow(obj)) { 1.145 - obj->copy_contents(this); 1.146 - } 1.147 - 1.148 - if (totally_drain) { 1.149 - while (tq->pop_local(obj)) { 1.150 - obj->copy_contents(this); 1.151 - } 1.152 - } else { 1.153 - while (tq->size() > _target_stack_size && tq->pop_local(obj)) { 1.154 - obj->copy_contents(this); 1.155 - } 1.156 - } 1.157 - 1.158 - // If we could not find any other work, flush the prefetch queue 1.159 - if (tq->is_empty()) { 1.160 - flush_prefetch_queue(); 1.161 - } 1.162 - } while (totally_drain && !tq->taskqueue_empty() || !tq->overflow_empty()); 1.163 - 1.164 - assert(!totally_drain || tq->taskqueue_empty(), "Sanity"); 1.165 - assert(totally_drain || tq->size() <= _target_stack_size, "Sanity"); 1.166 - assert(tq->overflow_empty(), "Sanity"); 1.167 -} 1.168 - 1.169 void PSPromotionManager::flush_labs() { 1.170 assert(stacks_empty(), "Attempt to flush lab with live stack"); 1.171 1.172 @@ -319,7 +247,7 @@ 1.173 // performance. 1.174 // 1.175 1.176 -oop PSPromotionManager::copy_to_survivor_space(oop o, bool depth_first) { 1.177 +oop PSPromotionManager::copy_to_survivor_space(oop o) { 1.178 assert(PSScavenge::should_scavenge(&o), "Sanity"); 1.179 1.180 oop new_obj = NULL; 1.181 @@ -423,24 +351,20 @@ 1.182 assert(young_space()->contains(new_obj), "Attempt to push non-promoted obj"); 1.183 } 1.184 1.185 - if (depth_first) { 1.186 - // Do the size comparison first with new_obj_size, which we 1.187 - // already have. Hopefully, only a few objects are larger than 1.188 - // _min_array_size_for_chunking, and most of them will be arrays. 1.189 - // So, the is->objArray() test would be very infrequent. 1.190 - if (new_obj_size > _min_array_size_for_chunking && 1.191 - new_obj->is_objArray() && 1.192 - PSChunkLargeArrays) { 1.193 - // we'll chunk it 1.194 - oop* const masked_o = mask_chunked_array_oop(o); 1.195 - push_depth(masked_o); 1.196 - TASKQUEUE_STATS_ONLY(++_arrays_chunked; ++_masked_pushes); 1.197 - } else { 1.198 - // we'll just push its contents 1.199 - new_obj->push_contents(this); 1.200 - } 1.201 + // Do the size comparison first with new_obj_size, which we 1.202 + // already have. Hopefully, only a few objects are larger than 1.203 + // _min_array_size_for_chunking, and most of them will be arrays. 1.204 + // So, the is->objArray() test would be very infrequent. 1.205 + if (new_obj_size > _min_array_size_for_chunking && 1.206 + new_obj->is_objArray() && 1.207 + PSChunkLargeArrays) { 1.208 + // we'll chunk it 1.209 + oop* const masked_o = mask_chunked_array_oop(o); 1.210 + push_depth(masked_o); 1.211 + TASKQUEUE_STATS_ONLY(++_arrays_chunked; ++_masked_pushes); 1.212 } else { 1.213 - push_breadth(new_obj); 1.214 + // we'll just push its contents 1.215 + new_obj->push_contents(this); 1.216 } 1.217 } else { 1.218 // We lost, someone else "owns" this object 1.219 @@ -537,13 +461,7 @@ 1.220 // We won any races, we "own" this object. 1.221 assert(obj == obj->forwardee(), "Sanity"); 1.222 1.223 - if (depth_first()) { 1.224 - obj->push_contents(this); 1.225 - } else { 1.226 - // Don't bother incrementing the age, just push 1.227 - // onto the claimed_stack.. 1.228 - push_breadth(obj); 1.229 - } 1.230 + obj->push_contents(this); 1.231 1.232 // Save the mark if needed 1.233 PSScavenge::oop_promotion_failed(obj, obj_mark);