1.1 --- a/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp Mon Feb 06 12:18:24 2012 -0800 1.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp Fri Feb 10 17:40:20 2012 -0800 1.3 @@ -1,5 +1,5 @@ 1.4 /* 1.5 - * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. 1.6 + * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved. 1.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 1.8 * 1.9 * This code is free software; you can redistribute it and/or modify it 1.10 @@ -61,6 +61,170 @@ 1.11 claim_or_forward_internal_depth(p); 1.12 } 1.13 1.14 +// 1.15 +// This method is pretty bulky. It would be nice to split it up 1.16 +// into smaller submethods, but we need to be careful not to hurt 1.17 +// performance. 1.18 +// 1.19 +template<bool promote_immediately> 1.20 +oop PSPromotionManager::copy_to_survivor_space(oop o) { 1.21 + assert(PSScavenge::should_scavenge(&o), "Sanity"); 1.22 + 1.23 + oop new_obj = NULL; 1.24 + 1.25 + // NOTE! We must be very careful with any methods that access the mark 1.26 + // in o. There may be multiple threads racing on it, and it may be forwarded 1.27 + // at any time. Do not use oop methods for accessing the mark! 1.28 + markOop test_mark = o->mark(); 1.29 + 1.30 + // The same test as "o->is_forwarded()" 1.31 + if (!test_mark->is_marked()) { 1.32 + bool new_obj_is_tenured = false; 1.33 + size_t new_obj_size = o->size(); 1.34 + 1.35 + if (!promote_immediately) { 1.36 + // Find the objects age, MT safe. 1.37 + int age = (test_mark->has_displaced_mark_helper() /* o->has_displaced_mark() */) ? 1.38 + test_mark->displaced_mark_helper()->age() : test_mark->age(); 1.39 + 1.40 + // Try allocating obj in to-space (unless too old) 1.41 + if (age < PSScavenge::tenuring_threshold()) { 1.42 + new_obj = (oop) _young_lab.allocate(new_obj_size); 1.43 + if (new_obj == NULL && !_young_gen_is_full) { 1.44 + // Do we allocate directly, or flush and refill? 1.45 + if (new_obj_size > (YoungPLABSize / 2)) { 1.46 + // Allocate this object directly 1.47 + new_obj = (oop)young_space()->cas_allocate(new_obj_size); 1.48 + } else { 1.49 + // Flush and fill 1.50 + _young_lab.flush(); 1.51 + 1.52 + HeapWord* lab_base = young_space()->cas_allocate(YoungPLABSize); 1.53 + if (lab_base != NULL) { 1.54 + _young_lab.initialize(MemRegion(lab_base, YoungPLABSize)); 1.55 + // Try the young lab allocation again. 1.56 + new_obj = (oop) _young_lab.allocate(new_obj_size); 1.57 + } else { 1.58 + _young_gen_is_full = true; 1.59 + } 1.60 + } 1.61 + } 1.62 + } 1.63 + } 1.64 + 1.65 + // Otherwise try allocating obj tenured 1.66 + if (new_obj == NULL) { 1.67 +#ifndef PRODUCT 1.68 + if (Universe::heap()->promotion_should_fail()) { 1.69 + return oop_promotion_failed(o, test_mark); 1.70 + } 1.71 +#endif // #ifndef PRODUCT 1.72 + 1.73 + new_obj = (oop) _old_lab.allocate(new_obj_size); 1.74 + new_obj_is_tenured = true; 1.75 + 1.76 + if (new_obj == NULL) { 1.77 + if (!_old_gen_is_full) { 1.78 + // Do we allocate directly, or flush and refill? 1.79 + if (new_obj_size > (OldPLABSize / 2)) { 1.80 + // Allocate this object directly 1.81 + new_obj = (oop)old_gen()->cas_allocate(new_obj_size); 1.82 + } else { 1.83 + // Flush and fill 1.84 + _old_lab.flush(); 1.85 + 1.86 + HeapWord* lab_base = old_gen()->cas_allocate(OldPLABSize); 1.87 + if(lab_base != NULL) { 1.88 + _old_lab.initialize(MemRegion(lab_base, OldPLABSize)); 1.89 + // Try the old lab allocation again. 1.90 + new_obj = (oop) _old_lab.allocate(new_obj_size); 1.91 + } 1.92 + } 1.93 + } 1.94 + 1.95 + // This is the promotion failed test, and code handling. 1.96 + // The code belongs here for two reasons. It is slightly 1.97 + // different thatn the code below, and cannot share the 1.98 + // CAS testing code. Keeping the code here also minimizes 1.99 + // the impact on the common case fast path code. 1.100 + 1.101 + if (new_obj == NULL) { 1.102 + _old_gen_is_full = true; 1.103 + return oop_promotion_failed(o, test_mark); 1.104 + } 1.105 + } 1.106 + } 1.107 + 1.108 + assert(new_obj != NULL, "allocation should have succeeded"); 1.109 + 1.110 + // Copy obj 1.111 + Copy::aligned_disjoint_words((HeapWord*)o, (HeapWord*)new_obj, new_obj_size); 1.112 + 1.113 + // Now we have to CAS in the header. 1.114 + if (o->cas_forward_to(new_obj, test_mark)) { 1.115 + // We won any races, we "own" this object. 1.116 + assert(new_obj == o->forwardee(), "Sanity"); 1.117 + 1.118 + // Increment age if obj still in new generation. Now that 1.119 + // we're dealing with a markOop that cannot change, it is 1.120 + // okay to use the non mt safe oop methods. 1.121 + if (!new_obj_is_tenured) { 1.122 + new_obj->incr_age(); 1.123 + assert(young_space()->contains(new_obj), "Attempt to push non-promoted obj"); 1.124 + } 1.125 + 1.126 + // Do the size comparison first with new_obj_size, which we 1.127 + // already have. Hopefully, only a few objects are larger than 1.128 + // _min_array_size_for_chunking, and most of them will be arrays. 1.129 + // So, the is->objArray() test would be very infrequent. 1.130 + if (new_obj_size > _min_array_size_for_chunking && 1.131 + new_obj->is_objArray() && 1.132 + PSChunkLargeArrays) { 1.133 + // we'll chunk it 1.134 + oop* const masked_o = mask_chunked_array_oop(o); 1.135 + push_depth(masked_o); 1.136 + TASKQUEUE_STATS_ONLY(++_arrays_chunked; ++_masked_pushes); 1.137 + } else { 1.138 + // we'll just push its contents 1.139 + new_obj->push_contents(this); 1.140 + } 1.141 + } else { 1.142 + // We lost, someone else "owns" this object 1.143 + guarantee(o->is_forwarded(), "Object must be forwarded if the cas failed."); 1.144 + 1.145 + // Try to deallocate the space. If it was directly allocated we cannot 1.146 + // deallocate it, so we have to test. If the deallocation fails, 1.147 + // overwrite with a filler object. 1.148 + if (new_obj_is_tenured) { 1.149 + if (!_old_lab.unallocate_object((HeapWord*) new_obj, new_obj_size)) { 1.150 + CollectedHeap::fill_with_object((HeapWord*) new_obj, new_obj_size); 1.151 + } 1.152 + } else if (!_young_lab.unallocate_object((HeapWord*) new_obj, new_obj_size)) { 1.153 + CollectedHeap::fill_with_object((HeapWord*) new_obj, new_obj_size); 1.154 + } 1.155 + 1.156 + // don't update this before the unallocation! 1.157 + new_obj = o->forwardee(); 1.158 + } 1.159 + } else { 1.160 + assert(o->is_forwarded(), "Sanity"); 1.161 + new_obj = o->forwardee(); 1.162 + } 1.163 + 1.164 +#ifdef DEBUG 1.165 + // This code must come after the CAS test, or it will print incorrect 1.166 + // information. 1.167 + if (TraceScavenge) { 1.168 + gclog_or_tty->print_cr("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (" SIZE_FORMAT ")}", 1.169 + PSScavenge::should_scavenge(&new_obj) ? "copying" : "tenuring", 1.170 + new_obj->blueprint()->internal_name(), o, new_obj, new_obj->size()); 1.171 + } 1.172 +#endif 1.173 + 1.174 + return new_obj; 1.175 +} 1.176 + 1.177 + 1.178 inline void PSPromotionManager::process_popped_location_depth(StarTask p) { 1.179 if (is_oop_masked(p)) { 1.180 assert(PSChunkLargeArrays, "invariant"); 1.181 @@ -69,9 +233,9 @@ 1.182 } else { 1.183 if (p.is_narrow()) { 1.184 assert(UseCompressedOops, "Error"); 1.185 - PSScavenge::copy_and_push_safe_barrier(this, (narrowOop*)p); 1.186 + PSScavenge::copy_and_push_safe_barrier<narrowOop, /*promote_immediately=*/false>(this, p); 1.187 } else { 1.188 - PSScavenge::copy_and_push_safe_barrier(this, (oop*)p); 1.189 + PSScavenge::copy_and_push_safe_barrier<oop, /*promote_immediately=*/false>(this, p); 1.190 } 1.191 } 1.192 }