1.1 --- a/src/share/vm/gc_implementation/g1/g1ParScanThreadState.cpp Wed Mar 25 15:50:17 2015 +0100 1.2 +++ b/src/share/vm/gc_implementation/g1/g1ParScanThreadState.cpp Fri Dec 19 09:21:06 2014 +0100 1.3 @@ -38,6 +38,7 @@ 1.4 _g1_rem(g1h->g1_rem_set()), 1.5 _hash_seed(17), _queue_num(queue_num), 1.6 _term_attempts(0), 1.7 + _tenuring_threshold(g1h->g1_policy()->tenuring_threshold()), 1.8 _age_table(false), _scanner(g1h, rp), 1.9 _strong_roots_time(0), _term_time(0) { 1.10 _scanner.set_par_scan_thread_state(this); 1.11 @@ -59,6 +60,12 @@ 1.12 1.13 _g1_par_allocator = G1ParGCAllocator::create_allocator(_g1h); 1.14 1.15 + _dest[InCSetState::NotInCSet] = InCSetState::NotInCSet; 1.16 + // The dest for Young is used when the objects are aged enough to 1.17 + // need to be moved to the next space. 1.18 + _dest[InCSetState::Young] = InCSetState::Old; 1.19 + _dest[InCSetState::Old] = InCSetState::Old; 1.20 + 1.21 _start = os::elapsedTime(); 1.22 } 1.23 1.24 @@ -150,52 +157,94 @@ 1.25 } while (!_refs->is_empty()); 1.26 } 1.27 1.28 -oop G1ParScanThreadState::copy_to_survivor_space(oop const old, 1.29 +HeapWord* G1ParScanThreadState::allocate_in_next_plab(InCSetState const state, 1.30 + InCSetState* dest, 1.31 + size_t word_sz, 1.32 + AllocationContext_t const context) { 1.33 + assert(state.is_in_cset_or_humongous(), err_msg("Unexpected state: " CSETSTATE_FORMAT, state.value())); 1.34 + assert(dest->is_in_cset_or_humongous(), err_msg("Unexpected dest: " CSETSTATE_FORMAT, dest->value())); 1.35 + 1.36 + // Right now we only have two types of regions (young / old) so 1.37 + // let's keep the logic here simple. We can generalize it when necessary. 1.38 + if (dest->is_young()) { 1.39 + HeapWord* const obj_ptr = _g1_par_allocator->allocate(InCSetState::Old, 1.40 + word_sz, context); 1.41 + if (obj_ptr == NULL) { 1.42 + return NULL; 1.43 + } 1.44 + // Make sure that we won't attempt to copy any other objects out 1.45 + // of a survivor region (given that apparently we cannot allocate 1.46 + // any new ones) to avoid coming into this slow path. 1.47 + _tenuring_threshold = 0; 1.48 + dest->set_old(); 1.49 + return obj_ptr; 1.50 + } else { 1.51 + assert(dest->is_old(), err_msg("Unexpected dest: " CSETSTATE_FORMAT, dest->value())); 1.52 + // no other space to try. 1.53 + return NULL; 1.54 + } 1.55 +} 1.56 + 1.57 +InCSetState G1ParScanThreadState::next_state(InCSetState const state, markOop const m, uint& age) { 1.58 + if (state.is_young()) { 1.59 + age = !m->has_displaced_mark_helper() ? m->age() 1.60 + : m->displaced_mark_helper()->age(); 1.61 + if (age < _tenuring_threshold) { 1.62 + return state; 1.63 + } 1.64 + } 1.65 + return dest(state); 1.66 +} 1.67 + 1.68 +oop G1ParScanThreadState::copy_to_survivor_space(InCSetState const state, 1.69 + oop const old, 1.70 markOop const old_mark) { 1.71 - size_t word_sz = old->size(); 1.72 - HeapRegion* from_region = _g1h->heap_region_containing_raw(old); 1.73 + const size_t word_sz = old->size(); 1.74 + HeapRegion* const from_region = _g1h->heap_region_containing_raw(old); 1.75 // +1 to make the -1 indexes valid... 1.76 - int young_index = from_region->young_index_in_cset()+1; 1.77 + const int young_index = from_region->young_index_in_cset()+1; 1.78 assert( (from_region->is_young() && young_index > 0) || 1.79 (!from_region->is_young() && young_index == 0), "invariant" ); 1.80 - G1CollectorPolicy* g1p = _g1h->g1_policy(); 1.81 - uint age = old_mark->has_displaced_mark_helper() ? old_mark->displaced_mark_helper()->age() 1.82 - : old_mark->age(); 1.83 - GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, age, 1.84 - word_sz); 1.85 - AllocationContext_t context = from_region->allocation_context(); 1.86 - HeapWord* obj_ptr = _g1_par_allocator->allocate(alloc_purpose, word_sz, context); 1.87 + const AllocationContext_t context = from_region->allocation_context(); 1.88 + 1.89 + uint age = 0; 1.90 + InCSetState dest_state = next_state(state, old_mark, age); 1.91 + HeapWord* obj_ptr = _g1_par_allocator->plab_allocate(dest_state, word_sz, context); 1.92 + 1.93 + // PLAB allocations should succeed most of the time, so we'll 1.94 + // normally check against NULL once and that's it. 1.95 + if (obj_ptr == NULL) { 1.96 + obj_ptr = _g1_par_allocator->allocate_direct_or_new_plab(dest_state, word_sz, context); 1.97 + if (obj_ptr == NULL) { 1.98 + obj_ptr = allocate_in_next_plab(state, &dest_state, word_sz, context); 1.99 + if (obj_ptr == NULL) { 1.100 + // This will either forward-to-self, or detect that someone else has 1.101 + // installed a forwarding pointer. 1.102 + return _g1h->handle_evacuation_failure_par(this, old); 1.103 + } 1.104 + } 1.105 + } 1.106 + 1.107 + assert(obj_ptr != NULL, "when we get here, allocation should have succeeded"); 1.108 #ifndef PRODUCT 1.109 // Should this evacuation fail? 1.110 if (_g1h->evacuation_should_fail()) { 1.111 - if (obj_ptr != NULL) { 1.112 - _g1_par_allocator->undo_allocation(alloc_purpose, obj_ptr, word_sz, context); 1.113 - obj_ptr = NULL; 1.114 - } 1.115 + // Doing this after all the allocation attempts also tests the 1.116 + // undo_allocation() method too. 1.117 + _g1_par_allocator->undo_allocation(dest_state, obj_ptr, word_sz, context); 1.118 + return _g1h->handle_evacuation_failure_par(this, old); 1.119 } 1.120 #endif // !PRODUCT 1.121 1.122 - if (obj_ptr == NULL) { 1.123 - // This will either forward-to-self, or detect that someone else has 1.124 - // installed a forwarding pointer. 1.125 - return _g1h->handle_evacuation_failure_par(this, old); 1.126 - } 1.127 - 1.128 - oop obj = oop(obj_ptr); 1.129 - 1.130 // We're going to allocate linearly, so might as well prefetch ahead. 1.131 Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes); 1.132 1.133 - oop forward_ptr = old->forward_to_atomic(obj); 1.134 + const oop obj = oop(obj_ptr); 1.135 + const oop forward_ptr = old->forward_to_atomic(obj); 1.136 if (forward_ptr == NULL) { 1.137 Copy::aligned_disjoint_words((HeapWord*) old, obj_ptr, word_sz); 1.138 1.139 - // alloc_purpose is just a hint to allocate() above, recheck the type of region 1.140 - // we actually allocated from and update alloc_purpose accordingly 1.141 - HeapRegion* to_region = _g1h->heap_region_containing_raw(obj_ptr); 1.142 - alloc_purpose = to_region->is_young() ? GCAllocForSurvived : GCAllocForTenured; 1.143 - 1.144 - if (g1p->track_object_age(alloc_purpose)) { 1.145 + if (dest_state.is_young()) { 1.146 if (age < markOopDesc::max_age) { 1.147 age++; 1.148 } 1.149 @@ -215,13 +264,19 @@ 1.150 } 1.151 1.152 if (G1StringDedup::is_enabled()) { 1.153 - G1StringDedup::enqueue_from_evacuation(from_region->is_young(), 1.154 - to_region->is_young(), 1.155 + const bool is_from_young = state.is_young(); 1.156 + const bool is_to_young = dest_state.is_young(); 1.157 + assert(is_from_young == _g1h->heap_region_containing_raw(old)->is_young(), 1.158 + "sanity"); 1.159 + assert(is_to_young == _g1h->heap_region_containing_raw(obj)->is_young(), 1.160 + "sanity"); 1.161 + G1StringDedup::enqueue_from_evacuation(is_from_young, 1.162 + is_to_young, 1.163 queue_num(), 1.164 obj); 1.165 } 1.166 1.167 - size_t* surv_young_words = surviving_young_words(); 1.168 + size_t* const surv_young_words = surviving_young_words(); 1.169 surv_young_words[young_index] += word_sz; 1.170 1.171 if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) { 1.172 @@ -232,14 +287,13 @@ 1.173 oop* old_p = set_partial_array_mask(old); 1.174 push_on_queue(old_p); 1.175 } else { 1.176 - // No point in using the slower heap_region_containing() method, 1.177 - // given that we know obj is in the heap. 1.178 - _scanner.set_region(_g1h->heap_region_containing_raw(obj)); 1.179 + HeapRegion* const to_region = _g1h->heap_region_containing_raw(obj_ptr); 1.180 + _scanner.set_region(to_region); 1.181 obj->oop_iterate_backwards(&_scanner); 1.182 } 1.183 + return obj; 1.184 } else { 1.185 - _g1_par_allocator->undo_allocation(alloc_purpose, obj_ptr, word_sz, context); 1.186 - obj = forward_ptr; 1.187 + _g1_par_allocator->undo_allocation(dest_state, obj_ptr, word_sz, context); 1.188 + return forward_ptr; 1.189 } 1.190 - return obj; 1.191 }