Fri, 16 Jul 2010 21:33:21 -0700
6962947: shared TaskQueue statistics
Reviewed-by: tonyp, ysr
duke@435 | 1 | /* |
trims@1907 | 2 | * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. |
duke@435 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
duke@435 | 4 | * |
duke@435 | 5 | * This code is free software; you can redistribute it and/or modify it |
duke@435 | 6 | * under the terms of the GNU General Public License version 2 only, as |
duke@435 | 7 | * published by the Free Software Foundation. |
duke@435 | 8 | * |
duke@435 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
duke@435 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
duke@435 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
duke@435 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
duke@435 | 13 | * accompanied this code). |
duke@435 | 14 | * |
duke@435 | 15 | * You should have received a copy of the GNU General Public License version |
duke@435 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
duke@435 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
duke@435 | 18 | * |
trims@1907 | 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
trims@1907 | 20 | * or visit www.oracle.com if you need additional information or have any |
trims@1907 | 21 | * questions. |
duke@435 | 22 | * |
duke@435 | 23 | */ |
duke@435 | 24 | |
duke@435 | 25 | # include "incls/_precompiled.incl" |
duke@435 | 26 | # include "incls/_parNewGeneration.cpp.incl" |
duke@435 | 27 | |
duke@435 | 28 | #ifdef _MSC_VER |
duke@435 | 29 | #pragma warning( push ) |
duke@435 | 30 | #pragma warning( disable:4355 ) // 'this' : used in base member initializer list |
duke@435 | 31 | #endif |
duke@435 | 32 | ParScanThreadState::ParScanThreadState(Space* to_space_, |
duke@435 | 33 | ParNewGeneration* gen_, |
duke@435 | 34 | Generation* old_gen_, |
duke@435 | 35 | int thread_num_, |
duke@435 | 36 | ObjToScanQueueSet* work_queue_set_, |
ysr@1130 | 37 | GrowableArray<oop>** overflow_stack_set_, |
duke@435 | 38 | size_t desired_plab_sz_, |
duke@435 | 39 | ParallelTaskTerminator& term_) : |
ysr@1114 | 40 | _to_space(to_space_), _old_gen(old_gen_), _young_gen(gen_), _thread_num(thread_num_), |
duke@435 | 41 | _work_queue(work_queue_set_->queue(thread_num_)), _to_space_full(false), |
ysr@1130 | 42 | _overflow_stack(overflow_stack_set_[thread_num_]), |
duke@435 | 43 | _ageTable(false), // false ==> not the global age table, no perf data. |
duke@435 | 44 | _to_space_alloc_buffer(desired_plab_sz_), |
duke@435 | 45 | _to_space_closure(gen_, this), _old_gen_closure(gen_, this), |
duke@435 | 46 | _to_space_root_closure(gen_, this), _old_gen_root_closure(gen_, this), |
duke@435 | 47 | _older_gen_closure(gen_, this), |
duke@435 | 48 | _evacuate_followers(this, &_to_space_closure, &_old_gen_closure, |
duke@435 | 49 | &_to_space_root_closure, gen_, &_old_gen_root_closure, |
duke@435 | 50 | work_queue_set_, &term_), |
duke@435 | 51 | _is_alive_closure(gen_), _scan_weak_ref_closure(gen_, this), |
duke@435 | 52 | _keep_alive_closure(&_scan_weak_ref_closure), |
ysr@1580 | 53 | _promotion_failure_size(0), |
duke@435 | 54 | _pushes(0), _pops(0), _steals(0), _steal_attempts(0), _term_attempts(0), |
duke@435 | 55 | _strong_roots_time(0.0), _term_time(0.0) |
duke@435 | 56 | { |
duke@435 | 57 | _survivor_chunk_array = |
duke@435 | 58 | (ChunkArray*) old_gen()->get_data_recorder(thread_num()); |
duke@435 | 59 | _hash_seed = 17; // Might want to take time-based random value. |
duke@435 | 60 | _start = os::elapsedTime(); |
duke@435 | 61 | _old_gen_closure.set_generation(old_gen_); |
duke@435 | 62 | _old_gen_root_closure.set_generation(old_gen_); |
duke@435 | 63 | } |
duke@435 | 64 | #ifdef _MSC_VER |
duke@435 | 65 | #pragma warning( pop ) |
duke@435 | 66 | #endif |
duke@435 | 67 | |
duke@435 | 68 | void ParScanThreadState::record_survivor_plab(HeapWord* plab_start, |
duke@435 | 69 | size_t plab_word_size) { |
duke@435 | 70 | ChunkArray* sca = survivor_chunk_array(); |
duke@435 | 71 | if (sca != NULL) { |
duke@435 | 72 | // A non-null SCA implies that we want the PLAB data recorded. |
duke@435 | 73 | sca->record_sample(plab_start, plab_word_size); |
duke@435 | 74 | } |
duke@435 | 75 | } |
duke@435 | 76 | |
duke@435 | 77 | bool ParScanThreadState::should_be_partially_scanned(oop new_obj, oop old_obj) const { |
duke@435 | 78 | return new_obj->is_objArray() && |
duke@435 | 79 | arrayOop(new_obj)->length() > ParGCArrayScanChunk && |
duke@435 | 80 | new_obj != old_obj; |
duke@435 | 81 | } |
duke@435 | 82 | |
duke@435 | 83 | void ParScanThreadState::scan_partial_array_and_push_remainder(oop old) { |
duke@435 | 84 | assert(old->is_objArray(), "must be obj array"); |
duke@435 | 85 | assert(old->is_forwarded(), "must be forwarded"); |
duke@435 | 86 | assert(Universe::heap()->is_in_reserved(old), "must be in heap."); |
ysr@1114 | 87 | assert(!old_gen()->is_in(old), "must be in young generation."); |
duke@435 | 88 | |
duke@435 | 89 | objArrayOop obj = objArrayOop(old->forwardee()); |
duke@435 | 90 | // Process ParGCArrayScanChunk elements now |
duke@435 | 91 | // and push the remainder back onto queue |
duke@435 | 92 | int start = arrayOop(old)->length(); |
duke@435 | 93 | int end = obj->length(); |
duke@435 | 94 | int remainder = end - start; |
duke@435 | 95 | assert(start <= end, "just checking"); |
duke@435 | 96 | if (remainder > 2 * ParGCArrayScanChunk) { |
duke@435 | 97 | // Test above combines last partial chunk with a full chunk |
duke@435 | 98 | end = start + ParGCArrayScanChunk; |
duke@435 | 99 | arrayOop(old)->set_length(end); |
duke@435 | 100 | // Push remainder. |
duke@435 | 101 | bool ok = work_queue()->push(old); |
duke@435 | 102 | assert(ok, "just popped, push must be okay"); |
duke@435 | 103 | note_push(); |
duke@435 | 104 | } else { |
duke@435 | 105 | // Restore length so that it can be used if there |
duke@435 | 106 | // is a promotion failure and forwarding pointers |
duke@435 | 107 | // must be removed. |
duke@435 | 108 | arrayOop(old)->set_length(end); |
duke@435 | 109 | } |
coleenp@548 | 110 | |
duke@435 | 111 | // process our set of indices (include header in first chunk) |
coleenp@548 | 112 | // should make sure end is even (aligned to HeapWord in case of compressed oops) |
duke@435 | 113 | if ((HeapWord *)obj < young_old_boundary()) { |
duke@435 | 114 | // object is in to_space |
coleenp@548 | 115 | obj->oop_iterate_range(&_to_space_closure, start, end); |
duke@435 | 116 | } else { |
duke@435 | 117 | // object is in old generation |
coleenp@548 | 118 | obj->oop_iterate_range(&_old_gen_closure, start, end); |
duke@435 | 119 | } |
duke@435 | 120 | } |
duke@435 | 121 | |
duke@435 | 122 | |
duke@435 | 123 | void ParScanThreadState::trim_queues(int max_size) { |
duke@435 | 124 | ObjToScanQueue* queue = work_queue(); |
ysr@1114 | 125 | do { |
ysr@1114 | 126 | while (queue->size() > (juint)max_size) { |
ysr@1114 | 127 | oop obj_to_scan; |
ysr@1114 | 128 | if (queue->pop_local(obj_to_scan)) { |
ysr@1114 | 129 | note_pop(); |
ysr@1114 | 130 | if ((HeapWord *)obj_to_scan < young_old_boundary()) { |
ysr@1114 | 131 | if (obj_to_scan->is_objArray() && |
ysr@1114 | 132 | obj_to_scan->is_forwarded() && |
ysr@1114 | 133 | obj_to_scan->forwardee() != obj_to_scan) { |
ysr@1114 | 134 | scan_partial_array_and_push_remainder(obj_to_scan); |
ysr@1114 | 135 | } else { |
ysr@1114 | 136 | // object is in to_space |
ysr@1114 | 137 | obj_to_scan->oop_iterate(&_to_space_closure); |
ysr@1114 | 138 | } |
duke@435 | 139 | } else { |
ysr@1114 | 140 | // object is in old generation |
ysr@1114 | 141 | obj_to_scan->oop_iterate(&_old_gen_closure); |
duke@435 | 142 | } |
duke@435 | 143 | } |
duke@435 | 144 | } |
ysr@1114 | 145 | // For the case of compressed oops, we have a private, non-shared |
ysr@1114 | 146 | // overflow stack, so we eagerly drain it so as to more evenly |
ysr@1114 | 147 | // distribute load early. Note: this may be good to do in |
ysr@1114 | 148 | // general rather than delay for the final stealing phase. |
ysr@1114 | 149 | // If applicable, we'll transfer a set of objects over to our |
ysr@1114 | 150 | // work queue, allowing them to be stolen and draining our |
ysr@1114 | 151 | // private overflow stack. |
ysr@1114 | 152 | } while (ParGCTrimOverflow && young_gen()->take_from_overflow_list(this)); |
ysr@1114 | 153 | } |
ysr@1114 | 154 | |
ysr@1114 | 155 | bool ParScanThreadState::take_from_overflow_stack() { |
ysr@1130 | 156 | assert(ParGCUseLocalOverflow, "Else should not call"); |
ysr@1114 | 157 | assert(young_gen()->overflow_list() == NULL, "Error"); |
ysr@1114 | 158 | ObjToScanQueue* queue = work_queue(); |
ysr@1114 | 159 | GrowableArray<oop>* of_stack = overflow_stack(); |
ysr@1114 | 160 | uint num_overflow_elems = of_stack->length(); |
ysr@1114 | 161 | uint num_take_elems = MIN2(MIN2((queue->max_elems() - queue->size())/4, |
ysr@1114 | 162 | (juint)ParGCDesiredObjsFromOverflowList), |
ysr@1114 | 163 | num_overflow_elems); |
ysr@1114 | 164 | // Transfer the most recent num_take_elems from the overflow |
ysr@1114 | 165 | // stack to our work queue. |
ysr@1114 | 166 | for (size_t i = 0; i != num_take_elems; i++) { |
ysr@1114 | 167 | oop cur = of_stack->pop(); |
ysr@1114 | 168 | oop obj_to_push = cur->forwardee(); |
ysr@1114 | 169 | assert(Universe::heap()->is_in_reserved(cur), "Should be in heap"); |
ysr@1114 | 170 | assert(!old_gen()->is_in_reserved(cur), "Should be in young gen"); |
ysr@1114 | 171 | assert(Universe::heap()->is_in_reserved(obj_to_push), "Should be in heap"); |
ysr@1114 | 172 | if (should_be_partially_scanned(obj_to_push, cur)) { |
ysr@1114 | 173 | assert(arrayOop(cur)->length() == 0, "entire array remaining to be scanned"); |
ysr@1114 | 174 | obj_to_push = cur; |
ysr@1114 | 175 | } |
ysr@1114 | 176 | bool ok = queue->push(obj_to_push); |
ysr@1114 | 177 | assert(ok, "Should have succeeded"); |
duke@435 | 178 | } |
ysr@1114 | 179 | assert(young_gen()->overflow_list() == NULL, "Error"); |
ysr@1114 | 180 | return num_take_elems > 0; // was something transferred? |
ysr@1114 | 181 | } |
ysr@1114 | 182 | |
ysr@1114 | 183 | void ParScanThreadState::push_on_overflow_stack(oop p) { |
ysr@1130 | 184 | assert(ParGCUseLocalOverflow, "Else should not call"); |
ysr@1114 | 185 | overflow_stack()->push(p); |
ysr@1114 | 186 | assert(young_gen()->overflow_list() == NULL, "Error"); |
duke@435 | 187 | } |
duke@435 | 188 | |
duke@435 | 189 | HeapWord* ParScanThreadState::alloc_in_to_space_slow(size_t word_sz) { |
duke@435 | 190 | |
duke@435 | 191 | // Otherwise, if the object is small enough, try to reallocate the |
duke@435 | 192 | // buffer. |
duke@435 | 193 | HeapWord* obj = NULL; |
duke@435 | 194 | if (!_to_space_full) { |
duke@435 | 195 | ParGCAllocBuffer* const plab = to_space_alloc_buffer(); |
duke@435 | 196 | Space* const sp = to_space(); |
duke@435 | 197 | if (word_sz * 100 < |
duke@435 | 198 | ParallelGCBufferWastePct * plab->word_sz()) { |
duke@435 | 199 | // Is small enough; abandon this buffer and start a new one. |
duke@435 | 200 | plab->retire(false, false); |
duke@435 | 201 | size_t buf_size = plab->word_sz(); |
duke@435 | 202 | HeapWord* buf_space = sp->par_allocate(buf_size); |
duke@435 | 203 | if (buf_space == NULL) { |
duke@435 | 204 | const size_t min_bytes = |
duke@435 | 205 | ParGCAllocBuffer::min_size() << LogHeapWordSize; |
duke@435 | 206 | size_t free_bytes = sp->free(); |
duke@435 | 207 | while(buf_space == NULL && free_bytes >= min_bytes) { |
duke@435 | 208 | buf_size = free_bytes >> LogHeapWordSize; |
duke@435 | 209 | assert(buf_size == (size_t)align_object_size(buf_size), |
duke@435 | 210 | "Invariant"); |
duke@435 | 211 | buf_space = sp->par_allocate(buf_size); |
duke@435 | 212 | free_bytes = sp->free(); |
duke@435 | 213 | } |
duke@435 | 214 | } |
duke@435 | 215 | if (buf_space != NULL) { |
duke@435 | 216 | plab->set_word_size(buf_size); |
duke@435 | 217 | plab->set_buf(buf_space); |
duke@435 | 218 | record_survivor_plab(buf_space, buf_size); |
duke@435 | 219 | obj = plab->allocate(word_sz); |
duke@435 | 220 | // Note that we cannot compare buf_size < word_sz below |
duke@435 | 221 | // because of AlignmentReserve (see ParGCAllocBuffer::allocate()). |
duke@435 | 222 | assert(obj != NULL || plab->words_remaining() < word_sz, |
duke@435 | 223 | "Else should have been able to allocate"); |
duke@435 | 224 | // It's conceivable that we may be able to use the |
duke@435 | 225 | // buffer we just grabbed for subsequent small requests |
duke@435 | 226 | // even if not for this one. |
duke@435 | 227 | } else { |
duke@435 | 228 | // We're used up. |
duke@435 | 229 | _to_space_full = true; |
duke@435 | 230 | } |
duke@435 | 231 | |
duke@435 | 232 | } else { |
duke@435 | 233 | // Too large; allocate the object individually. |
duke@435 | 234 | obj = sp->par_allocate(word_sz); |
duke@435 | 235 | } |
duke@435 | 236 | } |
duke@435 | 237 | return obj; |
duke@435 | 238 | } |
duke@435 | 239 | |
duke@435 | 240 | |
duke@435 | 241 | void ParScanThreadState::undo_alloc_in_to_space(HeapWord* obj, |
duke@435 | 242 | size_t word_sz) { |
duke@435 | 243 | // Is the alloc in the current alloc buffer? |
duke@435 | 244 | if (to_space_alloc_buffer()->contains(obj)) { |
duke@435 | 245 | assert(to_space_alloc_buffer()->contains(obj + word_sz - 1), |
duke@435 | 246 | "Should contain whole object."); |
duke@435 | 247 | to_space_alloc_buffer()->undo_allocation(obj, word_sz); |
duke@435 | 248 | } else { |
jcoomes@916 | 249 | CollectedHeap::fill_with_object(obj, word_sz); |
duke@435 | 250 | } |
duke@435 | 251 | } |
duke@435 | 252 | |
ysr@1580 | 253 | void ParScanThreadState::print_and_clear_promotion_failure_size() { |
ysr@1580 | 254 | if (_promotion_failure_size != 0) { |
ysr@1580 | 255 | if (PrintPromotionFailure) { |
ysr@1580 | 256 | gclog_or_tty->print(" (%d: promotion failure size = " SIZE_FORMAT ") ", |
ysr@1580 | 257 | _thread_num, _promotion_failure_size); |
ysr@1580 | 258 | } |
ysr@1580 | 259 | _promotion_failure_size = 0; |
ysr@1580 | 260 | } |
ysr@1580 | 261 | } |
ysr@1580 | 262 | |
duke@435 | 263 | class ParScanThreadStateSet: private ResourceArray { |
duke@435 | 264 | public: |
duke@435 | 265 | // Initializes states for the specified number of threads; |
duke@435 | 266 | ParScanThreadStateSet(int num_threads, |
duke@435 | 267 | Space& to_space, |
duke@435 | 268 | ParNewGeneration& gen, |
duke@435 | 269 | Generation& old_gen, |
duke@435 | 270 | ObjToScanQueueSet& queue_set, |
ysr@1130 | 271 | GrowableArray<oop>** overflow_stacks_, |
duke@435 | 272 | size_t desired_plab_sz, |
duke@435 | 273 | ParallelTaskTerminator& term); |
ysr@1580 | 274 | inline ParScanThreadState& thread_state(int i); |
duke@435 | 275 | int pushes() { return _pushes; } |
duke@435 | 276 | int pops() { return _pops; } |
duke@435 | 277 | int steals() { return _steals; } |
ysr@1580 | 278 | void reset(bool promotion_failed); |
duke@435 | 279 | void flush(); |
duke@435 | 280 | private: |
duke@435 | 281 | ParallelTaskTerminator& _term; |
duke@435 | 282 | ParNewGeneration& _gen; |
duke@435 | 283 | Generation& _next_gen; |
duke@435 | 284 | // staticstics |
duke@435 | 285 | int _pushes; |
duke@435 | 286 | int _pops; |
duke@435 | 287 | int _steals; |
duke@435 | 288 | }; |
duke@435 | 289 | |
duke@435 | 290 | |
duke@435 | 291 | ParScanThreadStateSet::ParScanThreadStateSet( |
duke@435 | 292 | int num_threads, Space& to_space, ParNewGeneration& gen, |
duke@435 | 293 | Generation& old_gen, ObjToScanQueueSet& queue_set, |
ysr@1130 | 294 | GrowableArray<oop>** overflow_stack_set_, |
duke@435 | 295 | size_t desired_plab_sz, ParallelTaskTerminator& term) |
duke@435 | 296 | : ResourceArray(sizeof(ParScanThreadState), num_threads), |
duke@435 | 297 | _gen(gen), _next_gen(old_gen), _term(term), |
duke@435 | 298 | _pushes(0), _pops(0), _steals(0) |
duke@435 | 299 | { |
duke@435 | 300 | assert(num_threads > 0, "sanity check!"); |
duke@435 | 301 | // Initialize states. |
duke@435 | 302 | for (int i = 0; i < num_threads; ++i) { |
duke@435 | 303 | new ((ParScanThreadState*)_data + i) |
duke@435 | 304 | ParScanThreadState(&to_space, &gen, &old_gen, i, &queue_set, |
ysr@1130 | 305 | overflow_stack_set_, desired_plab_sz, term); |
duke@435 | 306 | } |
duke@435 | 307 | } |
duke@435 | 308 | |
ysr@1580 | 309 | inline ParScanThreadState& ParScanThreadStateSet::thread_state(int i) |
duke@435 | 310 | { |
duke@435 | 311 | assert(i >= 0 && i < length(), "sanity check!"); |
duke@435 | 312 | return ((ParScanThreadState*)_data)[i]; |
duke@435 | 313 | } |
duke@435 | 314 | |
duke@435 | 315 | |
ysr@1580 | 316 | void ParScanThreadStateSet::reset(bool promotion_failed) |
duke@435 | 317 | { |
duke@435 | 318 | _term.reset_for_reuse(); |
ysr@1580 | 319 | if (promotion_failed) { |
ysr@1580 | 320 | for (int i = 0; i < length(); ++i) { |
ysr@1580 | 321 | thread_state(i).print_and_clear_promotion_failure_size(); |
ysr@1580 | 322 | } |
ysr@1580 | 323 | } |
duke@435 | 324 | } |
duke@435 | 325 | |
duke@435 | 326 | void ParScanThreadStateSet::flush() |
duke@435 | 327 | { |
ysr@1580 | 328 | // Work in this loop should be kept as lightweight as |
ysr@1580 | 329 | // possible since this might otherwise become a bottleneck |
ysr@1580 | 330 | // to scaling. Should we add heavy-weight work into this |
ysr@1580 | 331 | // loop, consider parallelizing the loop into the worker threads. |
duke@435 | 332 | for (int i = 0; i < length(); ++i) { |
ysr@1580 | 333 | ParScanThreadState& par_scan_state = thread_state(i); |
duke@435 | 334 | |
duke@435 | 335 | // Flush stats related to To-space PLAB activity and |
duke@435 | 336 | // retire the last buffer. |
duke@435 | 337 | par_scan_state.to_space_alloc_buffer()-> |
duke@435 | 338 | flush_stats_and_retire(_gen.plab_stats(), |
duke@435 | 339 | false /* !retain */); |
duke@435 | 340 | |
duke@435 | 341 | // Every thread has its own age table. We need to merge |
duke@435 | 342 | // them all into one. |
duke@435 | 343 | ageTable *local_table = par_scan_state.age_table(); |
duke@435 | 344 | _gen.age_table()->merge(local_table); |
duke@435 | 345 | |
duke@435 | 346 | // Inform old gen that we're done. |
duke@435 | 347 | _next_gen.par_promote_alloc_done(i); |
duke@435 | 348 | _next_gen.par_oop_since_save_marks_iterate_done(i); |
duke@435 | 349 | |
duke@435 | 350 | // Flush stats related to work queue activity (push/pop/steal) |
duke@435 | 351 | // This could conceivably become a bottleneck; if so, we'll put the |
duke@435 | 352 | // stat's gathering under the flag. |
duke@435 | 353 | if (PAR_STATS_ENABLED) { |
duke@435 | 354 | _pushes += par_scan_state.pushes(); |
duke@435 | 355 | _pops += par_scan_state.pops(); |
duke@435 | 356 | _steals += par_scan_state.steals(); |
duke@435 | 357 | if (ParallelGCVerbose) { |
duke@435 | 358 | gclog_or_tty->print("Thread %d complete:\n" |
duke@435 | 359 | " Pushes: %7d Pops: %7d Steals %7d (in %d attempts)\n", |
duke@435 | 360 | i, par_scan_state.pushes(), par_scan_state.pops(), |
duke@435 | 361 | par_scan_state.steals(), par_scan_state.steal_attempts()); |
duke@435 | 362 | if (par_scan_state.overflow_pushes() > 0 || |
duke@435 | 363 | par_scan_state.overflow_refills() > 0) { |
duke@435 | 364 | gclog_or_tty->print(" Overflow pushes: %7d " |
duke@435 | 365 | "Overflow refills: %7d for %d objs.\n", |
duke@435 | 366 | par_scan_state.overflow_pushes(), |
duke@435 | 367 | par_scan_state.overflow_refills(), |
duke@435 | 368 | par_scan_state.overflow_refill_objs()); |
duke@435 | 369 | } |
duke@435 | 370 | |
duke@435 | 371 | double elapsed = par_scan_state.elapsed(); |
duke@435 | 372 | double strong_roots = par_scan_state.strong_roots_time(); |
duke@435 | 373 | double term = par_scan_state.term_time(); |
duke@435 | 374 | gclog_or_tty->print( |
duke@435 | 375 | " Elapsed: %7.2f ms.\n" |
duke@435 | 376 | " Strong roots: %7.2f ms (%6.2f%%)\n" |
duke@435 | 377 | " Termination: %7.2f ms (%6.2f%%) (in %d entries)\n", |
duke@435 | 378 | elapsed * 1000.0, |
duke@435 | 379 | strong_roots * 1000.0, (strong_roots*100.0/elapsed), |
duke@435 | 380 | term * 1000.0, (term*100.0/elapsed), |
duke@435 | 381 | par_scan_state.term_attempts()); |
duke@435 | 382 | } |
duke@435 | 383 | } |
duke@435 | 384 | } |
ysr@1580 | 385 | if (UseConcMarkSweepGC && ParallelGCThreads > 0) { |
ysr@1580 | 386 | // We need to call this even when ResizeOldPLAB is disabled |
ysr@1580 | 387 | // so as to avoid breaking some asserts. While we may be able |
ysr@1580 | 388 | // to avoid this by reorganizing the code a bit, I am loathe |
ysr@1580 | 389 | // to do that unless we find cases where ergo leads to bad |
ysr@1580 | 390 | // performance. |
ysr@1580 | 391 | CFLS_LAB::compute_desired_plab_size(); |
ysr@1580 | 392 | } |
duke@435 | 393 | } |
duke@435 | 394 | |
duke@435 | 395 | ParScanClosure::ParScanClosure(ParNewGeneration* g, |
duke@435 | 396 | ParScanThreadState* par_scan_state) : |
duke@435 | 397 | OopsInGenClosure(g), _par_scan_state(par_scan_state), _g(g) |
duke@435 | 398 | { |
duke@435 | 399 | assert(_g->level() == 0, "Optimized for youngest generation"); |
duke@435 | 400 | _boundary = _g->reserved().end(); |
duke@435 | 401 | } |
duke@435 | 402 | |
coleenp@548 | 403 | void ParScanWithBarrierClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, true, false); } |
coleenp@548 | 404 | void ParScanWithBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, true, false); } |
coleenp@548 | 405 | |
coleenp@548 | 406 | void ParScanWithoutBarrierClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, false, false); } |
coleenp@548 | 407 | void ParScanWithoutBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, false, false); } |
coleenp@548 | 408 | |
coleenp@548 | 409 | void ParRootScanWithBarrierTwoGensClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, true, true); } |
coleenp@548 | 410 | void ParRootScanWithBarrierTwoGensClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, true, true); } |
coleenp@548 | 411 | |
coleenp@548 | 412 | void ParRootScanWithoutBarrierClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, false, true); } |
coleenp@548 | 413 | void ParRootScanWithoutBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, false, true); } |
coleenp@548 | 414 | |
duke@435 | 415 | ParScanWeakRefClosure::ParScanWeakRefClosure(ParNewGeneration* g, |
duke@435 | 416 | ParScanThreadState* par_scan_state) |
duke@435 | 417 | : ScanWeakRefClosure(g), _par_scan_state(par_scan_state) |
coleenp@548 | 418 | {} |
coleenp@548 | 419 | |
coleenp@548 | 420 | void ParScanWeakRefClosure::do_oop(oop* p) { ParScanWeakRefClosure::do_oop_work(p); } |
coleenp@548 | 421 | void ParScanWeakRefClosure::do_oop(narrowOop* p) { ParScanWeakRefClosure::do_oop_work(p); } |
duke@435 | 422 | |
duke@435 | 423 | #ifdef WIN32 |
duke@435 | 424 | #pragma warning(disable: 4786) /* identifier was truncated to '255' characters in the browser information */ |
duke@435 | 425 | #endif |
duke@435 | 426 | |
duke@435 | 427 | ParEvacuateFollowersClosure::ParEvacuateFollowersClosure( |
duke@435 | 428 | ParScanThreadState* par_scan_state_, |
duke@435 | 429 | ParScanWithoutBarrierClosure* to_space_closure_, |
duke@435 | 430 | ParScanWithBarrierClosure* old_gen_closure_, |
duke@435 | 431 | ParRootScanWithoutBarrierClosure* to_space_root_closure_, |
duke@435 | 432 | ParNewGeneration* par_gen_, |
duke@435 | 433 | ParRootScanWithBarrierTwoGensClosure* old_gen_root_closure_, |
duke@435 | 434 | ObjToScanQueueSet* task_queues_, |
duke@435 | 435 | ParallelTaskTerminator* terminator_) : |
duke@435 | 436 | |
duke@435 | 437 | _par_scan_state(par_scan_state_), |
duke@435 | 438 | _to_space_closure(to_space_closure_), |
duke@435 | 439 | _old_gen_closure(old_gen_closure_), |
duke@435 | 440 | _to_space_root_closure(to_space_root_closure_), |
duke@435 | 441 | _old_gen_root_closure(old_gen_root_closure_), |
duke@435 | 442 | _par_gen(par_gen_), |
duke@435 | 443 | _task_queues(task_queues_), |
duke@435 | 444 | _terminator(terminator_) |
duke@435 | 445 | {} |
duke@435 | 446 | |
duke@435 | 447 | void ParEvacuateFollowersClosure::do_void() { |
duke@435 | 448 | ObjToScanQueue* work_q = par_scan_state()->work_queue(); |
duke@435 | 449 | |
duke@435 | 450 | while (true) { |
duke@435 | 451 | |
duke@435 | 452 | // Scan to-space and old-gen objs until we run out of both. |
duke@435 | 453 | oop obj_to_scan; |
duke@435 | 454 | par_scan_state()->trim_queues(0); |
duke@435 | 455 | |
duke@435 | 456 | // We have no local work, attempt to steal from other threads. |
duke@435 | 457 | |
duke@435 | 458 | // attempt to steal work from promoted. |
duke@435 | 459 | par_scan_state()->note_steal_attempt(); |
duke@435 | 460 | if (task_queues()->steal(par_scan_state()->thread_num(), |
duke@435 | 461 | par_scan_state()->hash_seed(), |
duke@435 | 462 | obj_to_scan)) { |
duke@435 | 463 | par_scan_state()->note_steal(); |
duke@435 | 464 | bool res = work_q->push(obj_to_scan); |
duke@435 | 465 | assert(res, "Empty queue should have room for a push."); |
duke@435 | 466 | |
duke@435 | 467 | par_scan_state()->note_push(); |
duke@435 | 468 | // if successful, goto Start. |
duke@435 | 469 | continue; |
duke@435 | 470 | |
duke@435 | 471 | // try global overflow list. |
duke@435 | 472 | } else if (par_gen()->take_from_overflow_list(par_scan_state())) { |
duke@435 | 473 | continue; |
duke@435 | 474 | } |
duke@435 | 475 | |
duke@435 | 476 | // Otherwise, offer termination. |
duke@435 | 477 | par_scan_state()->start_term_time(); |
duke@435 | 478 | if (terminator()->offer_termination()) break; |
duke@435 | 479 | par_scan_state()->end_term_time(); |
duke@435 | 480 | } |
ysr@969 | 481 | assert(par_gen()->_overflow_list == NULL && par_gen()->_num_par_pushes == 0, |
ysr@969 | 482 | "Broken overflow list?"); |
duke@435 | 483 | // Finish the last termination pause. |
duke@435 | 484 | par_scan_state()->end_term_time(); |
duke@435 | 485 | } |
duke@435 | 486 | |
duke@435 | 487 | ParNewGenTask::ParNewGenTask(ParNewGeneration* gen, Generation* next_gen, |
duke@435 | 488 | HeapWord* young_old_boundary, ParScanThreadStateSet* state_set) : |
duke@435 | 489 | AbstractGangTask("ParNewGeneration collection"), |
duke@435 | 490 | _gen(gen), _next_gen(next_gen), |
duke@435 | 491 | _young_old_boundary(young_old_boundary), |
duke@435 | 492 | _state_set(state_set) |
duke@435 | 493 | {} |
duke@435 | 494 | |
duke@435 | 495 | void ParNewGenTask::work(int i) { |
duke@435 | 496 | GenCollectedHeap* gch = GenCollectedHeap::heap(); |
duke@435 | 497 | // Since this is being done in a separate thread, need new resource |
duke@435 | 498 | // and handle marks. |
duke@435 | 499 | ResourceMark rm; |
duke@435 | 500 | HandleMark hm; |
duke@435 | 501 | // We would need multiple old-gen queues otherwise. |
ysr@1114 | 502 | assert(gch->n_gens() == 2, "Par young collection currently only works with one older gen."); |
duke@435 | 503 | |
duke@435 | 504 | Generation* old_gen = gch->next_gen(_gen); |
duke@435 | 505 | |
ysr@1580 | 506 | ParScanThreadState& par_scan_state = _state_set->thread_state(i); |
duke@435 | 507 | par_scan_state.set_young_old_boundary(_young_old_boundary); |
duke@435 | 508 | |
duke@435 | 509 | par_scan_state.start_strong_roots(); |
duke@435 | 510 | gch->gen_process_strong_roots(_gen->level(), |
jrose@1424 | 511 | true, // Process younger gens, if any, |
jrose@1424 | 512 | // as strong roots. |
jrose@1424 | 513 | false, // no scope; this is parallel code |
jrose@1424 | 514 | false, // not collecting perm generation. |
duke@435 | 515 | SharedHeap::SO_AllClasses, |
jrose@1424 | 516 | &par_scan_state.to_space_root_closure(), |
jrose@1424 | 517 | true, // walk *all* scavengable nmethods |
jrose@1424 | 518 | &par_scan_state.older_gen_closure()); |
duke@435 | 519 | par_scan_state.end_strong_roots(); |
duke@435 | 520 | |
duke@435 | 521 | // "evacuate followers". |
duke@435 | 522 | par_scan_state.evacuate_followers_closure().do_void(); |
duke@435 | 523 | } |
duke@435 | 524 | |
duke@435 | 525 | #ifdef _MSC_VER |
duke@435 | 526 | #pragma warning( push ) |
duke@435 | 527 | #pragma warning( disable:4355 ) // 'this' : used in base member initializer list |
duke@435 | 528 | #endif |
duke@435 | 529 | ParNewGeneration:: |
duke@435 | 530 | ParNewGeneration(ReservedSpace rs, size_t initial_byte_size, int level) |
duke@435 | 531 | : DefNewGeneration(rs, initial_byte_size, level, "PCopy"), |
duke@435 | 532 | _overflow_list(NULL), |
duke@435 | 533 | _is_alive_closure(this), |
duke@435 | 534 | _plab_stats(YoungPLABSize, PLABWeight) |
duke@435 | 535 | { |
ysr@969 | 536 | NOT_PRODUCT(_overflow_counter = ParGCWorkQueueOverflowInterval;) |
ysr@969 | 537 | NOT_PRODUCT(_num_par_pushes = 0;) |
duke@435 | 538 | _task_queues = new ObjToScanQueueSet(ParallelGCThreads); |
duke@435 | 539 | guarantee(_task_queues != NULL, "task_queues allocation failure."); |
duke@435 | 540 | |
duke@435 | 541 | for (uint i1 = 0; i1 < ParallelGCThreads; i1++) { |
jcoomes@2020 | 542 | ObjToScanQueue *q = new ObjToScanQueue(); |
jcoomes@2020 | 543 | guarantee(q != NULL, "work_queue Allocation failure."); |
jcoomes@2020 | 544 | _task_queues->register_queue(i1, q); |
duke@435 | 545 | } |
duke@435 | 546 | |
duke@435 | 547 | for (uint i2 = 0; i2 < ParallelGCThreads; i2++) |
duke@435 | 548 | _task_queues->queue(i2)->initialize(); |
duke@435 | 549 | |
ysr@1130 | 550 | _overflow_stacks = NEW_C_HEAP_ARRAY(GrowableArray<oop>*, ParallelGCThreads); |
ysr@1130 | 551 | guarantee(_overflow_stacks != NULL, "Overflow stack set allocation failure"); |
ysr@1130 | 552 | for (uint i = 0; i < ParallelGCThreads; i++) { |
ysr@1130 | 553 | if (ParGCUseLocalOverflow) { |
ysr@1130 | 554 | _overflow_stacks[i] = new (ResourceObj::C_HEAP) GrowableArray<oop>(512, true); |
ysr@1130 | 555 | guarantee(_overflow_stacks[i] != NULL, "Overflow Stack allocation failure."); |
ysr@1130 | 556 | } else { |
ysr@1130 | 557 | _overflow_stacks[i] = NULL; |
ysr@1130 | 558 | } |
ysr@1130 | 559 | } |
ysr@1130 | 560 | |
duke@435 | 561 | if (UsePerfData) { |
duke@435 | 562 | EXCEPTION_MARK; |
duke@435 | 563 | ResourceMark rm; |
duke@435 | 564 | |
duke@435 | 565 | const char* cname = |
duke@435 | 566 | PerfDataManager::counter_name(_gen_counters->name_space(), "threads"); |
duke@435 | 567 | PerfDataManager::create_constant(SUN_GC, cname, PerfData::U_None, |
duke@435 | 568 | ParallelGCThreads, CHECK); |
duke@435 | 569 | } |
duke@435 | 570 | } |
duke@435 | 571 | #ifdef _MSC_VER |
duke@435 | 572 | #pragma warning( pop ) |
duke@435 | 573 | #endif |
duke@435 | 574 | |
duke@435 | 575 | // ParNewGeneration:: |
duke@435 | 576 | ParKeepAliveClosure::ParKeepAliveClosure(ParScanWeakRefClosure* cl) : |
duke@435 | 577 | DefNewGeneration::KeepAliveClosure(cl), _par_cl(cl) {} |
duke@435 | 578 | |
coleenp@548 | 579 | template <class T> |
coleenp@548 | 580 | void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop_work(T* p) { |
coleenp@548 | 581 | #ifdef ASSERT |
coleenp@548 | 582 | { |
coleenp@548 | 583 | assert(!oopDesc::is_null(*p), "expected non-null ref"); |
coleenp@548 | 584 | oop obj = oopDesc::load_decode_heap_oop_not_null(p); |
coleenp@548 | 585 | // We never expect to see a null reference being processed |
coleenp@548 | 586 | // as a weak reference. |
coleenp@548 | 587 | assert(obj->is_oop(), "expected an oop while scanning weak refs"); |
coleenp@548 | 588 | } |
coleenp@548 | 589 | #endif // ASSERT |
duke@435 | 590 | |
duke@435 | 591 | _par_cl->do_oop_nv(p); |
duke@435 | 592 | |
duke@435 | 593 | if (Universe::heap()->is_in_reserved(p)) { |
coleenp@548 | 594 | oop obj = oopDesc::load_decode_heap_oop_not_null(p); |
coleenp@548 | 595 | _rs->write_ref_field_gc_par(p, obj); |
duke@435 | 596 | } |
duke@435 | 597 | } |
duke@435 | 598 | |
coleenp@548 | 599 | void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop(oop* p) { ParKeepAliveClosure::do_oop_work(p); } |
coleenp@548 | 600 | void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop(narrowOop* p) { ParKeepAliveClosure::do_oop_work(p); } |
coleenp@548 | 601 | |
duke@435 | 602 | // ParNewGeneration:: |
duke@435 | 603 | KeepAliveClosure::KeepAliveClosure(ScanWeakRefClosure* cl) : |
duke@435 | 604 | DefNewGeneration::KeepAliveClosure(cl) {} |
duke@435 | 605 | |
coleenp@548 | 606 | template <class T> |
coleenp@548 | 607 | void /*ParNewGeneration::*/KeepAliveClosure::do_oop_work(T* p) { |
coleenp@548 | 608 | #ifdef ASSERT |
coleenp@548 | 609 | { |
coleenp@548 | 610 | assert(!oopDesc::is_null(*p), "expected non-null ref"); |
coleenp@548 | 611 | oop obj = oopDesc::load_decode_heap_oop_not_null(p); |
coleenp@548 | 612 | // We never expect to see a null reference being processed |
coleenp@548 | 613 | // as a weak reference. |
coleenp@548 | 614 | assert(obj->is_oop(), "expected an oop while scanning weak refs"); |
coleenp@548 | 615 | } |
coleenp@548 | 616 | #endif // ASSERT |
duke@435 | 617 | |
duke@435 | 618 | _cl->do_oop_nv(p); |
duke@435 | 619 | |
duke@435 | 620 | if (Universe::heap()->is_in_reserved(p)) { |
coleenp@548 | 621 | oop obj = oopDesc::load_decode_heap_oop_not_null(p); |
coleenp@548 | 622 | _rs->write_ref_field_gc_par(p, obj); |
duke@435 | 623 | } |
duke@435 | 624 | } |
duke@435 | 625 | |
coleenp@548 | 626 | void /*ParNewGeneration::*/KeepAliveClosure::do_oop(oop* p) { KeepAliveClosure::do_oop_work(p); } |
coleenp@548 | 627 | void /*ParNewGeneration::*/KeepAliveClosure::do_oop(narrowOop* p) { KeepAliveClosure::do_oop_work(p); } |
coleenp@548 | 628 | |
coleenp@548 | 629 | template <class T> void ScanClosureWithParBarrier::do_oop_work(T* p) { |
coleenp@548 | 630 | T heap_oop = oopDesc::load_heap_oop(p); |
coleenp@548 | 631 | if (!oopDesc::is_null(heap_oop)) { |
coleenp@548 | 632 | oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); |
duke@435 | 633 | if ((HeapWord*)obj < _boundary) { |
duke@435 | 634 | assert(!_g->to()->is_in_reserved(obj), "Scanning field twice?"); |
coleenp@548 | 635 | oop new_obj = obj->is_forwarded() |
coleenp@548 | 636 | ? obj->forwardee() |
coleenp@548 | 637 | : _g->DefNewGeneration::copy_to_survivor_space(obj); |
coleenp@548 | 638 | oopDesc::encode_store_heap_oop_not_null(p, new_obj); |
duke@435 | 639 | } |
duke@435 | 640 | if (_gc_barrier) { |
duke@435 | 641 | // If p points to a younger generation, mark the card. |
duke@435 | 642 | if ((HeapWord*)obj < _gen_boundary) { |
duke@435 | 643 | _rs->write_ref_field_gc_par(p, obj); |
duke@435 | 644 | } |
duke@435 | 645 | } |
duke@435 | 646 | } |
duke@435 | 647 | } |
duke@435 | 648 | |
coleenp@548 | 649 | void ScanClosureWithParBarrier::do_oop(oop* p) { ScanClosureWithParBarrier::do_oop_work(p); } |
coleenp@548 | 650 | void ScanClosureWithParBarrier::do_oop(narrowOop* p) { ScanClosureWithParBarrier::do_oop_work(p); } |
coleenp@548 | 651 | |
duke@435 | 652 | class ParNewRefProcTaskProxy: public AbstractGangTask { |
duke@435 | 653 | typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask; |
duke@435 | 654 | public: |
duke@435 | 655 | ParNewRefProcTaskProxy(ProcessTask& task, ParNewGeneration& gen, |
duke@435 | 656 | Generation& next_gen, |
duke@435 | 657 | HeapWord* young_old_boundary, |
duke@435 | 658 | ParScanThreadStateSet& state_set); |
duke@435 | 659 | |
duke@435 | 660 | private: |
duke@435 | 661 | virtual void work(int i); |
duke@435 | 662 | |
duke@435 | 663 | private: |
duke@435 | 664 | ParNewGeneration& _gen; |
duke@435 | 665 | ProcessTask& _task; |
duke@435 | 666 | Generation& _next_gen; |
duke@435 | 667 | HeapWord* _young_old_boundary; |
duke@435 | 668 | ParScanThreadStateSet& _state_set; |
duke@435 | 669 | }; |
duke@435 | 670 | |
duke@435 | 671 | ParNewRefProcTaskProxy::ParNewRefProcTaskProxy( |
duke@435 | 672 | ProcessTask& task, ParNewGeneration& gen, |
duke@435 | 673 | Generation& next_gen, |
duke@435 | 674 | HeapWord* young_old_boundary, |
duke@435 | 675 | ParScanThreadStateSet& state_set) |
duke@435 | 676 | : AbstractGangTask("ParNewGeneration parallel reference processing"), |
duke@435 | 677 | _gen(gen), |
duke@435 | 678 | _task(task), |
duke@435 | 679 | _next_gen(next_gen), |
duke@435 | 680 | _young_old_boundary(young_old_boundary), |
duke@435 | 681 | _state_set(state_set) |
duke@435 | 682 | { |
duke@435 | 683 | } |
duke@435 | 684 | |
duke@435 | 685 | void ParNewRefProcTaskProxy::work(int i) |
duke@435 | 686 | { |
duke@435 | 687 | ResourceMark rm; |
duke@435 | 688 | HandleMark hm; |
ysr@1580 | 689 | ParScanThreadState& par_scan_state = _state_set.thread_state(i); |
duke@435 | 690 | par_scan_state.set_young_old_boundary(_young_old_boundary); |
duke@435 | 691 | _task.work(i, par_scan_state.is_alive_closure(), |
duke@435 | 692 | par_scan_state.keep_alive_closure(), |
duke@435 | 693 | par_scan_state.evacuate_followers_closure()); |
duke@435 | 694 | } |
duke@435 | 695 | |
duke@435 | 696 | class ParNewRefEnqueueTaskProxy: public AbstractGangTask { |
duke@435 | 697 | typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask; |
duke@435 | 698 | EnqueueTask& _task; |
duke@435 | 699 | |
duke@435 | 700 | public: |
duke@435 | 701 | ParNewRefEnqueueTaskProxy(EnqueueTask& task) |
duke@435 | 702 | : AbstractGangTask("ParNewGeneration parallel reference enqueue"), |
duke@435 | 703 | _task(task) |
duke@435 | 704 | { } |
duke@435 | 705 | |
duke@435 | 706 | virtual void work(int i) |
duke@435 | 707 | { |
duke@435 | 708 | _task.work(i); |
duke@435 | 709 | } |
duke@435 | 710 | }; |
duke@435 | 711 | |
duke@435 | 712 | |
duke@435 | 713 | void ParNewRefProcTaskExecutor::execute(ProcessTask& task) |
duke@435 | 714 | { |
duke@435 | 715 | GenCollectedHeap* gch = GenCollectedHeap::heap(); |
duke@435 | 716 | assert(gch->kind() == CollectedHeap::GenCollectedHeap, |
duke@435 | 717 | "not a generational heap"); |
duke@435 | 718 | WorkGang* workers = gch->workers(); |
duke@435 | 719 | assert(workers != NULL, "Need parallel worker threads."); |
duke@435 | 720 | ParNewRefProcTaskProxy rp_task(task, _generation, *_generation.next_gen(), |
duke@435 | 721 | _generation.reserved().end(), _state_set); |
duke@435 | 722 | workers->run_task(&rp_task); |
ysr@1580 | 723 | _state_set.reset(_generation.promotion_failed()); |
duke@435 | 724 | } |
duke@435 | 725 | |
duke@435 | 726 | void ParNewRefProcTaskExecutor::execute(EnqueueTask& task) |
duke@435 | 727 | { |
duke@435 | 728 | GenCollectedHeap* gch = GenCollectedHeap::heap(); |
duke@435 | 729 | WorkGang* workers = gch->workers(); |
duke@435 | 730 | assert(workers != NULL, "Need parallel worker threads."); |
duke@435 | 731 | ParNewRefEnqueueTaskProxy enq_task(task); |
duke@435 | 732 | workers->run_task(&enq_task); |
duke@435 | 733 | } |
duke@435 | 734 | |
duke@435 | 735 | void ParNewRefProcTaskExecutor::set_single_threaded_mode() |
duke@435 | 736 | { |
duke@435 | 737 | _state_set.flush(); |
duke@435 | 738 | GenCollectedHeap* gch = GenCollectedHeap::heap(); |
duke@435 | 739 | gch->set_par_threads(0); // 0 ==> non-parallel. |
duke@435 | 740 | gch->save_marks(); |
duke@435 | 741 | } |
duke@435 | 742 | |
duke@435 | 743 | ScanClosureWithParBarrier:: |
duke@435 | 744 | ScanClosureWithParBarrier(ParNewGeneration* g, bool gc_barrier) : |
duke@435 | 745 | ScanClosure(g, gc_barrier) {} |
duke@435 | 746 | |
duke@435 | 747 | EvacuateFollowersClosureGeneral:: |
duke@435 | 748 | EvacuateFollowersClosureGeneral(GenCollectedHeap* gch, int level, |
duke@435 | 749 | OopsInGenClosure* cur, |
duke@435 | 750 | OopsInGenClosure* older) : |
duke@435 | 751 | _gch(gch), _level(level), |
duke@435 | 752 | _scan_cur_or_nonheap(cur), _scan_older(older) |
duke@435 | 753 | {} |
duke@435 | 754 | |
duke@435 | 755 | void EvacuateFollowersClosureGeneral::do_void() { |
duke@435 | 756 | do { |
duke@435 | 757 | // Beware: this call will lead to closure applications via virtual |
duke@435 | 758 | // calls. |
duke@435 | 759 | _gch->oop_since_save_marks_iterate(_level, |
duke@435 | 760 | _scan_cur_or_nonheap, |
duke@435 | 761 | _scan_older); |
duke@435 | 762 | } while (!_gch->no_allocs_since_save_marks(_level)); |
duke@435 | 763 | } |
duke@435 | 764 | |
duke@435 | 765 | |
duke@435 | 766 | bool ParNewGeneration::_avoid_promotion_undo = false; |
duke@435 | 767 | |
duke@435 | 768 | void ParNewGeneration::adjust_desired_tenuring_threshold() { |
duke@435 | 769 | // Set the desired survivor size to half the real survivor space |
duke@435 | 770 | _tenuring_threshold = |
duke@435 | 771 | age_table()->compute_tenuring_threshold(to()->capacity()/HeapWordSize); |
duke@435 | 772 | } |
duke@435 | 773 | |
duke@435 | 774 | // A Generation that does parallel young-gen collection. |
duke@435 | 775 | |
duke@435 | 776 | void ParNewGeneration::collect(bool full, |
duke@435 | 777 | bool clear_all_soft_refs, |
duke@435 | 778 | size_t size, |
duke@435 | 779 | bool is_tlab) { |
duke@435 | 780 | assert(full || size > 0, "otherwise we don't want to collect"); |
duke@435 | 781 | GenCollectedHeap* gch = GenCollectedHeap::heap(); |
duke@435 | 782 | assert(gch->kind() == CollectedHeap::GenCollectedHeap, |
duke@435 | 783 | "not a CMS generational heap"); |
duke@435 | 784 | AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy(); |
duke@435 | 785 | WorkGang* workers = gch->workers(); |
duke@435 | 786 | _next_gen = gch->next_gen(this); |
duke@435 | 787 | assert(_next_gen != NULL, |
duke@435 | 788 | "This must be the youngest gen, and not the only gen"); |
duke@435 | 789 | assert(gch->n_gens() == 2, |
duke@435 | 790 | "Par collection currently only works with single older gen."); |
duke@435 | 791 | // Do we have to avoid promotion_undo? |
duke@435 | 792 | if (gch->collector_policy()->is_concurrent_mark_sweep_policy()) { |
duke@435 | 793 | set_avoid_promotion_undo(true); |
duke@435 | 794 | } |
duke@435 | 795 | |
duke@435 | 796 | // If the next generation is too full to accomodate worst-case promotion |
duke@435 | 797 | // from this generation, pass on collection; let the next generation |
duke@435 | 798 | // do it. |
duke@435 | 799 | if (!collection_attempt_is_safe()) { |
duke@435 | 800 | gch->set_incremental_collection_will_fail(); |
duke@435 | 801 | return; |
duke@435 | 802 | } |
duke@435 | 803 | assert(to()->is_empty(), "Else not collection_attempt_is_safe"); |
duke@435 | 804 | |
duke@435 | 805 | init_assuming_no_promotion_failure(); |
duke@435 | 806 | |
duke@435 | 807 | if (UseAdaptiveSizePolicy) { |
duke@435 | 808 | set_survivor_overflow(false); |
duke@435 | 809 | size_policy->minor_collection_begin(); |
duke@435 | 810 | } |
duke@435 | 811 | |
duke@435 | 812 | TraceTime t1("GC", PrintGC && !PrintGCDetails, true, gclog_or_tty); |
duke@435 | 813 | // Capture heap used before collection (for printing). |
duke@435 | 814 | size_t gch_prev_used = gch->used(); |
duke@435 | 815 | |
duke@435 | 816 | SpecializationStats::clear(); |
duke@435 | 817 | |
duke@435 | 818 | age_table()->clear(); |
jmasa@698 | 819 | to()->clear(SpaceDecorator::Mangle); |
duke@435 | 820 | |
duke@435 | 821 | gch->save_marks(); |
duke@435 | 822 | assert(workers != NULL, "Need parallel worker threads."); |
duke@435 | 823 | ParallelTaskTerminator _term(workers->total_workers(), task_queues()); |
duke@435 | 824 | ParScanThreadStateSet thread_state_set(workers->total_workers(), |
duke@435 | 825 | *to(), *this, *_next_gen, *task_queues(), |
ysr@1130 | 826 | _overflow_stacks, desired_plab_sz(), _term); |
duke@435 | 827 | |
duke@435 | 828 | ParNewGenTask tsk(this, _next_gen, reserved().end(), &thread_state_set); |
duke@435 | 829 | int n_workers = workers->total_workers(); |
duke@435 | 830 | gch->set_par_threads(n_workers); |
duke@435 | 831 | gch->rem_set()->prepare_for_younger_refs_iterate(true); |
duke@435 | 832 | // It turns out that even when we're using 1 thread, doing the work in a |
duke@435 | 833 | // separate thread causes wide variance in run times. We can't help this |
duke@435 | 834 | // in the multi-threaded case, but we special-case n=1 here to get |
duke@435 | 835 | // repeatable measurements of the 1-thread overhead of the parallel code. |
duke@435 | 836 | if (n_workers > 1) { |
jrose@1424 | 837 | GenCollectedHeap::StrongRootsScope srs(gch); |
duke@435 | 838 | workers->run_task(&tsk); |
duke@435 | 839 | } else { |
jrose@1424 | 840 | GenCollectedHeap::StrongRootsScope srs(gch); |
duke@435 | 841 | tsk.work(0); |
duke@435 | 842 | } |
ysr@1580 | 843 | thread_state_set.reset(promotion_failed()); |
duke@435 | 844 | |
duke@435 | 845 | if (PAR_STATS_ENABLED && ParallelGCVerbose) { |
duke@435 | 846 | gclog_or_tty->print("Thread totals:\n" |
duke@435 | 847 | " Pushes: %7d Pops: %7d Steals %7d (sum = %7d).\n", |
duke@435 | 848 | thread_state_set.pushes(), thread_state_set.pops(), |
duke@435 | 849 | thread_state_set.steals(), |
duke@435 | 850 | thread_state_set.pops()+thread_state_set.steals()); |
duke@435 | 851 | } |
ysr@888 | 852 | assert(thread_state_set.pushes() == thread_state_set.pops() |
ysr@888 | 853 | + thread_state_set.steals(), |
duke@435 | 854 | "Or else the queues are leaky."); |
duke@435 | 855 | |
duke@435 | 856 | // Process (weak) reference objects found during scavenge. |
ysr@888 | 857 | ReferenceProcessor* rp = ref_processor(); |
duke@435 | 858 | IsAliveClosure is_alive(this); |
duke@435 | 859 | ScanWeakRefClosure scan_weak_ref(this); |
duke@435 | 860 | KeepAliveClosure keep_alive(&scan_weak_ref); |
duke@435 | 861 | ScanClosure scan_without_gc_barrier(this, false); |
duke@435 | 862 | ScanClosureWithParBarrier scan_with_gc_barrier(this, true); |
duke@435 | 863 | set_promo_failure_scan_stack_closure(&scan_without_gc_barrier); |
duke@435 | 864 | EvacuateFollowersClosureGeneral evacuate_followers(gch, _level, |
duke@435 | 865 | &scan_without_gc_barrier, &scan_with_gc_barrier); |
ysr@892 | 866 | rp->setup_policy(clear_all_soft_refs); |
ysr@888 | 867 | if (rp->processing_is_mt()) { |
duke@435 | 868 | ParNewRefProcTaskExecutor task_executor(*this, thread_state_set); |
ysr@888 | 869 | rp->process_discovered_references(&is_alive, &keep_alive, |
ysr@888 | 870 | &evacuate_followers, &task_executor); |
duke@435 | 871 | } else { |
duke@435 | 872 | thread_state_set.flush(); |
duke@435 | 873 | gch->set_par_threads(0); // 0 ==> non-parallel. |
duke@435 | 874 | gch->save_marks(); |
ysr@888 | 875 | rp->process_discovered_references(&is_alive, &keep_alive, |
ysr@888 | 876 | &evacuate_followers, NULL); |
duke@435 | 877 | } |
duke@435 | 878 | if (!promotion_failed()) { |
duke@435 | 879 | // Swap the survivor spaces. |
jmasa@698 | 880 | eden()->clear(SpaceDecorator::Mangle); |
jmasa@698 | 881 | from()->clear(SpaceDecorator::Mangle); |
jmasa@698 | 882 | if (ZapUnusedHeapArea) { |
jmasa@698 | 883 | // This is now done here because of the piece-meal mangling which |
jmasa@698 | 884 | // can check for valid mangling at intermediate points in the |
jmasa@698 | 885 | // collection(s). When a minor collection fails to collect |
jmasa@698 | 886 | // sufficient space resizing of the young generation can occur |
jmasa@698 | 887 | // an redistribute the spaces in the young generation. Mangle |
jmasa@698 | 888 | // here so that unzapped regions don't get distributed to |
jmasa@698 | 889 | // other spaces. |
jmasa@698 | 890 | to()->mangle_unused_area(); |
jmasa@698 | 891 | } |
duke@435 | 892 | swap_spaces(); |
duke@435 | 893 | |
jmasa@1822 | 894 | // A successful scavenge should restart the GC time limit count which is |
jmasa@1822 | 895 | // for full GC's. |
jmasa@1822 | 896 | size_policy->reset_gc_overhead_limit_count(); |
jmasa@1822 | 897 | |
duke@435 | 898 | assert(to()->is_empty(), "to space should be empty now"); |
duke@435 | 899 | } else { |
duke@435 | 900 | assert(HandlePromotionFailure, |
duke@435 | 901 | "Should only be here if promotion failure handling is on"); |
duke@435 | 902 | if (_promo_failure_scan_stack != NULL) { |
duke@435 | 903 | // Can be non-null because of reference processing. |
duke@435 | 904 | // Free stack with its elements. |
duke@435 | 905 | delete _promo_failure_scan_stack; |
duke@435 | 906 | _promo_failure_scan_stack = NULL; |
duke@435 | 907 | } |
duke@435 | 908 | remove_forwarding_pointers(); |
duke@435 | 909 | if (PrintGCDetails) { |
duke@435 | 910 | gclog_or_tty->print(" (promotion failed)"); |
duke@435 | 911 | } |
duke@435 | 912 | // All the spaces are in play for mark-sweep. |
duke@435 | 913 | swap_spaces(); // Make life simpler for CMS || rescan; see 6483690. |
duke@435 | 914 | from()->set_next_compaction_space(to()); |
duke@435 | 915 | gch->set_incremental_collection_will_fail(); |
ysr@1580 | 916 | // Inform the next generation that a promotion failure occurred. |
ysr@1580 | 917 | _next_gen->promotion_failure_occurred(); |
jmasa@441 | 918 | |
jmasa@441 | 919 | // Reset the PromotionFailureALot counters. |
jmasa@441 | 920 | NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();) |
duke@435 | 921 | } |
duke@435 | 922 | // set new iteration safe limit for the survivor spaces |
duke@435 | 923 | from()->set_concurrent_iteration_safe_limit(from()->top()); |
duke@435 | 924 | to()->set_concurrent_iteration_safe_limit(to()->top()); |
duke@435 | 925 | |
duke@435 | 926 | adjust_desired_tenuring_threshold(); |
duke@435 | 927 | if (ResizePLAB) { |
duke@435 | 928 | plab_stats()->adjust_desired_plab_sz(); |
duke@435 | 929 | } |
duke@435 | 930 | |
duke@435 | 931 | if (PrintGC && !PrintGCDetails) { |
duke@435 | 932 | gch->print_heap_change(gch_prev_used); |
duke@435 | 933 | } |
duke@435 | 934 | |
duke@435 | 935 | if (UseAdaptiveSizePolicy) { |
duke@435 | 936 | size_policy->minor_collection_end(gch->gc_cause()); |
duke@435 | 937 | size_policy->avg_survived()->sample(from()->used()); |
duke@435 | 938 | } |
duke@435 | 939 | |
duke@435 | 940 | update_time_of_last_gc(os::javaTimeMillis()); |
duke@435 | 941 | |
duke@435 | 942 | SpecializationStats::print(); |
duke@435 | 943 | |
ysr@888 | 944 | rp->set_enqueuing_is_done(true); |
ysr@888 | 945 | if (rp->processing_is_mt()) { |
duke@435 | 946 | ParNewRefProcTaskExecutor task_executor(*this, thread_state_set); |
ysr@888 | 947 | rp->enqueue_discovered_references(&task_executor); |
duke@435 | 948 | } else { |
ysr@888 | 949 | rp->enqueue_discovered_references(NULL); |
duke@435 | 950 | } |
ysr@888 | 951 | rp->verify_no_references_recorded(); |
duke@435 | 952 | } |
duke@435 | 953 | |
duke@435 | 954 | static int sum; |
duke@435 | 955 | void ParNewGeneration::waste_some_time() { |
duke@435 | 956 | for (int i = 0; i < 100; i++) { |
duke@435 | 957 | sum += i; |
duke@435 | 958 | } |
duke@435 | 959 | } |
duke@435 | 960 | |
duke@435 | 961 | static const oop ClaimedForwardPtr = oop(0x4); |
duke@435 | 962 | |
duke@435 | 963 | // Because of concurrency, there are times where an object for which |
duke@435 | 964 | // "is_forwarded()" is true contains an "interim" forwarding pointer |
duke@435 | 965 | // value. Such a value will soon be overwritten with a real value. |
duke@435 | 966 | // This method requires "obj" to have a forwarding pointer, and waits, if |
duke@435 | 967 | // necessary for a real one to be inserted, and returns it. |
duke@435 | 968 | |
duke@435 | 969 | oop ParNewGeneration::real_forwardee(oop obj) { |
duke@435 | 970 | oop forward_ptr = obj->forwardee(); |
duke@435 | 971 | if (forward_ptr != ClaimedForwardPtr) { |
duke@435 | 972 | return forward_ptr; |
duke@435 | 973 | } else { |
duke@435 | 974 | return real_forwardee_slow(obj); |
duke@435 | 975 | } |
duke@435 | 976 | } |
duke@435 | 977 | |
duke@435 | 978 | oop ParNewGeneration::real_forwardee_slow(oop obj) { |
duke@435 | 979 | // Spin-read if it is claimed but not yet written by another thread. |
duke@435 | 980 | oop forward_ptr = obj->forwardee(); |
duke@435 | 981 | while (forward_ptr == ClaimedForwardPtr) { |
duke@435 | 982 | waste_some_time(); |
duke@435 | 983 | assert(obj->is_forwarded(), "precondition"); |
duke@435 | 984 | forward_ptr = obj->forwardee(); |
duke@435 | 985 | } |
duke@435 | 986 | return forward_ptr; |
duke@435 | 987 | } |
duke@435 | 988 | |
duke@435 | 989 | #ifdef ASSERT |
duke@435 | 990 | bool ParNewGeneration::is_legal_forward_ptr(oop p) { |
duke@435 | 991 | return |
duke@435 | 992 | (_avoid_promotion_undo && p == ClaimedForwardPtr) |
duke@435 | 993 | || Universe::heap()->is_in_reserved(p); |
duke@435 | 994 | } |
duke@435 | 995 | #endif |
duke@435 | 996 | |
duke@435 | 997 | void ParNewGeneration::preserve_mark_if_necessary(oop obj, markOop m) { |
duke@435 | 998 | if ((m != markOopDesc::prototype()) && |
duke@435 | 999 | (!UseBiasedLocking || (m != markOopDesc::biased_locking_prototype()))) { |
duke@435 | 1000 | MutexLocker ml(ParGCRareEvent_lock); |
duke@435 | 1001 | DefNewGeneration::preserve_mark_if_necessary(obj, m); |
duke@435 | 1002 | } |
duke@435 | 1003 | } |
duke@435 | 1004 | |
duke@435 | 1005 | // Multiple GC threads may try to promote an object. If the object |
duke@435 | 1006 | // is successfully promoted, a forwarding pointer will be installed in |
duke@435 | 1007 | // the object in the young generation. This method claims the right |
duke@435 | 1008 | // to install the forwarding pointer before it copies the object, |
duke@435 | 1009 | // thus avoiding the need to undo the copy as in |
duke@435 | 1010 | // copy_to_survivor_space_avoiding_with_undo. |
duke@435 | 1011 | |
duke@435 | 1012 | oop ParNewGeneration::copy_to_survivor_space_avoiding_promotion_undo( |
duke@435 | 1013 | ParScanThreadState* par_scan_state, oop old, size_t sz, markOop m) { |
duke@435 | 1014 | // In the sequential version, this assert also says that the object is |
duke@435 | 1015 | // not forwarded. That might not be the case here. It is the case that |
duke@435 | 1016 | // the caller observed it to be not forwarded at some time in the past. |
duke@435 | 1017 | assert(is_in_reserved(old), "shouldn't be scavenging this oop"); |
duke@435 | 1018 | |
duke@435 | 1019 | // The sequential code read "old->age()" below. That doesn't work here, |
duke@435 | 1020 | // since the age is in the mark word, and that might be overwritten with |
duke@435 | 1021 | // a forwarding pointer by a parallel thread. So we must save the mark |
duke@435 | 1022 | // word in a local and then analyze it. |
duke@435 | 1023 | oopDesc dummyOld; |
duke@435 | 1024 | dummyOld.set_mark(m); |
duke@435 | 1025 | assert(!dummyOld.is_forwarded(), |
duke@435 | 1026 | "should not be called with forwarding pointer mark word."); |
duke@435 | 1027 | |
duke@435 | 1028 | oop new_obj = NULL; |
duke@435 | 1029 | oop forward_ptr; |
duke@435 | 1030 | |
duke@435 | 1031 | // Try allocating obj in to-space (unless too old) |
duke@435 | 1032 | if (dummyOld.age() < tenuring_threshold()) { |
duke@435 | 1033 | new_obj = (oop)par_scan_state->alloc_in_to_space(sz); |
duke@435 | 1034 | if (new_obj == NULL) { |
duke@435 | 1035 | set_survivor_overflow(true); |
duke@435 | 1036 | } |
duke@435 | 1037 | } |
duke@435 | 1038 | |
duke@435 | 1039 | if (new_obj == NULL) { |
duke@435 | 1040 | // Either to-space is full or we decided to promote |
duke@435 | 1041 | // try allocating obj tenured |
duke@435 | 1042 | |
duke@435 | 1043 | // Attempt to install a null forwarding pointer (atomically), |
duke@435 | 1044 | // to claim the right to install the real forwarding pointer. |
duke@435 | 1045 | forward_ptr = old->forward_to_atomic(ClaimedForwardPtr); |
duke@435 | 1046 | if (forward_ptr != NULL) { |
duke@435 | 1047 | // someone else beat us to it. |
duke@435 | 1048 | return real_forwardee(old); |
duke@435 | 1049 | } |
duke@435 | 1050 | |
duke@435 | 1051 | new_obj = _next_gen->par_promote(par_scan_state->thread_num(), |
duke@435 | 1052 | old, m, sz); |
duke@435 | 1053 | |
duke@435 | 1054 | if (new_obj == NULL) { |
duke@435 | 1055 | if (!HandlePromotionFailure) { |
duke@435 | 1056 | // A failed promotion likely means the MaxLiveObjectEvacuationRatio flag |
duke@435 | 1057 | // is incorrectly set. In any case, its seriously wrong to be here! |
duke@435 | 1058 | vm_exit_out_of_memory(sz*wordSize, "promotion"); |
duke@435 | 1059 | } |
duke@435 | 1060 | // promotion failed, forward to self |
duke@435 | 1061 | _promotion_failed = true; |
duke@435 | 1062 | new_obj = old; |
duke@435 | 1063 | |
duke@435 | 1064 | preserve_mark_if_necessary(old, m); |
ysr@1580 | 1065 | // Log the size of the maiden promotion failure |
ysr@1580 | 1066 | par_scan_state->log_promotion_failure(sz); |
duke@435 | 1067 | } |
duke@435 | 1068 | |
duke@435 | 1069 | old->forward_to(new_obj); |
duke@435 | 1070 | forward_ptr = NULL; |
duke@435 | 1071 | } else { |
duke@435 | 1072 | // Is in to-space; do copying ourselves. |
duke@435 | 1073 | Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)new_obj, sz); |
duke@435 | 1074 | forward_ptr = old->forward_to_atomic(new_obj); |
duke@435 | 1075 | // Restore the mark word copied above. |
duke@435 | 1076 | new_obj->set_mark(m); |
duke@435 | 1077 | // Increment age if obj still in new generation |
duke@435 | 1078 | new_obj->incr_age(); |
duke@435 | 1079 | par_scan_state->age_table()->add(new_obj, sz); |
duke@435 | 1080 | } |
duke@435 | 1081 | assert(new_obj != NULL, "just checking"); |
duke@435 | 1082 | |
duke@435 | 1083 | if (forward_ptr == NULL) { |
duke@435 | 1084 | oop obj_to_push = new_obj; |
duke@435 | 1085 | if (par_scan_state->should_be_partially_scanned(obj_to_push, old)) { |
duke@435 | 1086 | // Length field used as index of next element to be scanned. |
duke@435 | 1087 | // Real length can be obtained from real_forwardee() |
duke@435 | 1088 | arrayOop(old)->set_length(0); |
duke@435 | 1089 | obj_to_push = old; |
duke@435 | 1090 | assert(obj_to_push->is_forwarded() && obj_to_push->forwardee() != obj_to_push, |
duke@435 | 1091 | "push forwarded object"); |
duke@435 | 1092 | } |
duke@435 | 1093 | // Push it on one of the queues of to-be-scanned objects. |
ysr@969 | 1094 | bool simulate_overflow = false; |
ysr@969 | 1095 | NOT_PRODUCT( |
ysr@969 | 1096 | if (ParGCWorkQueueOverflowALot && should_simulate_overflow()) { |
ysr@969 | 1097 | // simulate a stack overflow |
ysr@969 | 1098 | simulate_overflow = true; |
ysr@969 | 1099 | } |
ysr@969 | 1100 | ) |
ysr@969 | 1101 | if (simulate_overflow || !par_scan_state->work_queue()->push(obj_to_push)) { |
duke@435 | 1102 | // Add stats for overflow pushes. |
duke@435 | 1103 | if (Verbose && PrintGCDetails) { |
duke@435 | 1104 | gclog_or_tty->print("queue overflow!\n"); |
duke@435 | 1105 | } |
ysr@969 | 1106 | push_on_overflow_list(old, par_scan_state); |
duke@435 | 1107 | par_scan_state->note_overflow_push(); |
duke@435 | 1108 | } |
duke@435 | 1109 | par_scan_state->note_push(); |
duke@435 | 1110 | |
duke@435 | 1111 | return new_obj; |
duke@435 | 1112 | } |
duke@435 | 1113 | |
duke@435 | 1114 | // Oops. Someone beat us to it. Undo the allocation. Where did we |
duke@435 | 1115 | // allocate it? |
duke@435 | 1116 | if (is_in_reserved(new_obj)) { |
duke@435 | 1117 | // Must be in to_space. |
duke@435 | 1118 | assert(to()->is_in_reserved(new_obj), "Checking"); |
duke@435 | 1119 | if (forward_ptr == ClaimedForwardPtr) { |
duke@435 | 1120 | // Wait to get the real forwarding pointer value. |
duke@435 | 1121 | forward_ptr = real_forwardee(old); |
duke@435 | 1122 | } |
duke@435 | 1123 | par_scan_state->undo_alloc_in_to_space((HeapWord*)new_obj, sz); |
duke@435 | 1124 | } |
duke@435 | 1125 | |
duke@435 | 1126 | return forward_ptr; |
duke@435 | 1127 | } |
duke@435 | 1128 | |
duke@435 | 1129 | |
duke@435 | 1130 | // Multiple GC threads may try to promote the same object. If two |
duke@435 | 1131 | // or more GC threads copy the object, only one wins the race to install |
duke@435 | 1132 | // the forwarding pointer. The other threads have to undo their copy. |
duke@435 | 1133 | |
duke@435 | 1134 | oop ParNewGeneration::copy_to_survivor_space_with_undo( |
duke@435 | 1135 | ParScanThreadState* par_scan_state, oop old, size_t sz, markOop m) { |
duke@435 | 1136 | |
duke@435 | 1137 | // In the sequential version, this assert also says that the object is |
duke@435 | 1138 | // not forwarded. That might not be the case here. It is the case that |
duke@435 | 1139 | // the caller observed it to be not forwarded at some time in the past. |
duke@435 | 1140 | assert(is_in_reserved(old), "shouldn't be scavenging this oop"); |
duke@435 | 1141 | |
duke@435 | 1142 | // The sequential code read "old->age()" below. That doesn't work here, |
duke@435 | 1143 | // since the age is in the mark word, and that might be overwritten with |
duke@435 | 1144 | // a forwarding pointer by a parallel thread. So we must save the mark |
duke@435 | 1145 | // word here, install it in a local oopDesc, and then analyze it. |
duke@435 | 1146 | oopDesc dummyOld; |
duke@435 | 1147 | dummyOld.set_mark(m); |
duke@435 | 1148 | assert(!dummyOld.is_forwarded(), |
duke@435 | 1149 | "should not be called with forwarding pointer mark word."); |
duke@435 | 1150 | |
duke@435 | 1151 | bool failed_to_promote = false; |
duke@435 | 1152 | oop new_obj = NULL; |
duke@435 | 1153 | oop forward_ptr; |
duke@435 | 1154 | |
duke@435 | 1155 | // Try allocating obj in to-space (unless too old) |
duke@435 | 1156 | if (dummyOld.age() < tenuring_threshold()) { |
duke@435 | 1157 | new_obj = (oop)par_scan_state->alloc_in_to_space(sz); |
duke@435 | 1158 | if (new_obj == NULL) { |
duke@435 | 1159 | set_survivor_overflow(true); |
duke@435 | 1160 | } |
duke@435 | 1161 | } |
duke@435 | 1162 | |
duke@435 | 1163 | if (new_obj == NULL) { |
duke@435 | 1164 | // Either to-space is full or we decided to promote |
duke@435 | 1165 | // try allocating obj tenured |
duke@435 | 1166 | new_obj = _next_gen->par_promote(par_scan_state->thread_num(), |
duke@435 | 1167 | old, m, sz); |
duke@435 | 1168 | |
duke@435 | 1169 | if (new_obj == NULL) { |
duke@435 | 1170 | if (!HandlePromotionFailure) { |
duke@435 | 1171 | // A failed promotion likely means the MaxLiveObjectEvacuationRatio |
duke@435 | 1172 | // flag is incorrectly set. In any case, its seriously wrong to be |
duke@435 | 1173 | // here! |
duke@435 | 1174 | vm_exit_out_of_memory(sz*wordSize, "promotion"); |
duke@435 | 1175 | } |
duke@435 | 1176 | // promotion failed, forward to self |
duke@435 | 1177 | forward_ptr = old->forward_to_atomic(old); |
duke@435 | 1178 | new_obj = old; |
duke@435 | 1179 | |
duke@435 | 1180 | if (forward_ptr != NULL) { |
duke@435 | 1181 | return forward_ptr; // someone else succeeded |
duke@435 | 1182 | } |
duke@435 | 1183 | |
duke@435 | 1184 | _promotion_failed = true; |
duke@435 | 1185 | failed_to_promote = true; |
duke@435 | 1186 | |
duke@435 | 1187 | preserve_mark_if_necessary(old, m); |
ysr@1580 | 1188 | // Log the size of the maiden promotion failure |
ysr@1580 | 1189 | par_scan_state->log_promotion_failure(sz); |
duke@435 | 1190 | } |
duke@435 | 1191 | } else { |
duke@435 | 1192 | // Is in to-space; do copying ourselves. |
duke@435 | 1193 | Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)new_obj, sz); |
duke@435 | 1194 | // Restore the mark word copied above. |
duke@435 | 1195 | new_obj->set_mark(m); |
duke@435 | 1196 | // Increment age if new_obj still in new generation |
duke@435 | 1197 | new_obj->incr_age(); |
duke@435 | 1198 | par_scan_state->age_table()->add(new_obj, sz); |
duke@435 | 1199 | } |
duke@435 | 1200 | assert(new_obj != NULL, "just checking"); |
duke@435 | 1201 | |
duke@435 | 1202 | // Now attempt to install the forwarding pointer (atomically). |
duke@435 | 1203 | // We have to copy the mark word before overwriting with forwarding |
duke@435 | 1204 | // ptr, so we can restore it below in the copy. |
duke@435 | 1205 | if (!failed_to_promote) { |
duke@435 | 1206 | forward_ptr = old->forward_to_atomic(new_obj); |
duke@435 | 1207 | } |
duke@435 | 1208 | |
duke@435 | 1209 | if (forward_ptr == NULL) { |
duke@435 | 1210 | oop obj_to_push = new_obj; |
duke@435 | 1211 | if (par_scan_state->should_be_partially_scanned(obj_to_push, old)) { |
duke@435 | 1212 | // Length field used as index of next element to be scanned. |
duke@435 | 1213 | // Real length can be obtained from real_forwardee() |
duke@435 | 1214 | arrayOop(old)->set_length(0); |
duke@435 | 1215 | obj_to_push = old; |
duke@435 | 1216 | assert(obj_to_push->is_forwarded() && obj_to_push->forwardee() != obj_to_push, |
duke@435 | 1217 | "push forwarded object"); |
duke@435 | 1218 | } |
duke@435 | 1219 | // Push it on one of the queues of to-be-scanned objects. |
ysr@969 | 1220 | bool simulate_overflow = false; |
ysr@969 | 1221 | NOT_PRODUCT( |
ysr@969 | 1222 | if (ParGCWorkQueueOverflowALot && should_simulate_overflow()) { |
ysr@969 | 1223 | // simulate a stack overflow |
ysr@969 | 1224 | simulate_overflow = true; |
ysr@969 | 1225 | } |
ysr@969 | 1226 | ) |
ysr@969 | 1227 | if (simulate_overflow || !par_scan_state->work_queue()->push(obj_to_push)) { |
duke@435 | 1228 | // Add stats for overflow pushes. |
ysr@969 | 1229 | push_on_overflow_list(old, par_scan_state); |
duke@435 | 1230 | par_scan_state->note_overflow_push(); |
duke@435 | 1231 | } |
duke@435 | 1232 | par_scan_state->note_push(); |
duke@435 | 1233 | |
duke@435 | 1234 | return new_obj; |
duke@435 | 1235 | } |
duke@435 | 1236 | |
duke@435 | 1237 | // Oops. Someone beat us to it. Undo the allocation. Where did we |
duke@435 | 1238 | // allocate it? |
duke@435 | 1239 | if (is_in_reserved(new_obj)) { |
duke@435 | 1240 | // Must be in to_space. |
duke@435 | 1241 | assert(to()->is_in_reserved(new_obj), "Checking"); |
duke@435 | 1242 | par_scan_state->undo_alloc_in_to_space((HeapWord*)new_obj, sz); |
duke@435 | 1243 | } else { |
duke@435 | 1244 | assert(!_avoid_promotion_undo, "Should not be here if avoiding."); |
duke@435 | 1245 | _next_gen->par_promote_alloc_undo(par_scan_state->thread_num(), |
duke@435 | 1246 | (HeapWord*)new_obj, sz); |
duke@435 | 1247 | } |
duke@435 | 1248 | |
duke@435 | 1249 | return forward_ptr; |
duke@435 | 1250 | } |
duke@435 | 1251 | |
ysr@969 | 1252 | #ifndef PRODUCT |
ysr@969 | 1253 | // It's OK to call this multi-threaded; the worst thing |
ysr@969 | 1254 | // that can happen is that we'll get a bunch of closely |
ysr@969 | 1255 | // spaced simulated oveflows, but that's OK, in fact |
ysr@969 | 1256 | // probably good as it would exercise the overflow code |
ysr@969 | 1257 | // under contention. |
ysr@969 | 1258 | bool ParNewGeneration::should_simulate_overflow() { |
ysr@969 | 1259 | if (_overflow_counter-- <= 0) { // just being defensive |
ysr@969 | 1260 | _overflow_counter = ParGCWorkQueueOverflowInterval; |
ysr@969 | 1261 | return true; |
ysr@969 | 1262 | } else { |
ysr@969 | 1263 | return false; |
ysr@969 | 1264 | } |
ysr@969 | 1265 | } |
ysr@969 | 1266 | #endif |
ysr@969 | 1267 | |
ysr@1114 | 1268 | // In case we are using compressed oops, we need to be careful. |
ysr@1114 | 1269 | // If the object being pushed is an object array, then its length |
ysr@1114 | 1270 | // field keeps track of the "grey boundary" at which the next |
ysr@1114 | 1271 | // incremental scan will be done (see ParGCArrayScanChunk). |
ysr@1114 | 1272 | // When using compressed oops, this length field is kept in the |
ysr@1114 | 1273 | // lower 32 bits of the erstwhile klass word and cannot be used |
ysr@1114 | 1274 | // for the overflow chaining pointer (OCP below). As such the OCP |
ysr@1114 | 1275 | // would itself need to be compressed into the top 32-bits in this |
ysr@1114 | 1276 | // case. Unfortunately, see below, in the event that we have a |
ysr@1114 | 1277 | // promotion failure, the node to be pushed on the list can be |
ysr@1114 | 1278 | // outside of the Java heap, so the heap-based pointer compression |
ysr@1114 | 1279 | // would not work (we would have potential aliasing between C-heap |
ysr@1114 | 1280 | // and Java-heap pointers). For this reason, when using compressed |
ysr@1114 | 1281 | // oops, we simply use a worker-thread-local, non-shared overflow |
ysr@1114 | 1282 | // list in the form of a growable array, with a slightly different |
ysr@1114 | 1283 | // overflow stack draining strategy. If/when we start using fat |
ysr@1114 | 1284 | // stacks here, we can go back to using (fat) pointer chains |
ysr@1114 | 1285 | // (although some performance comparisons would be useful since |
ysr@1114 | 1286 | // single global lists have their own performance disadvantages |
ysr@1114 | 1287 | // as we were made painfully aware not long ago, see 6786503). |
ysr@969 | 1288 | #define BUSY (oop(0x1aff1aff)) |
ysr@969 | 1289 | void ParNewGeneration::push_on_overflow_list(oop from_space_obj, ParScanThreadState* par_scan_state) { |
ysr@1114 | 1290 | assert(is_in_reserved(from_space_obj), "Should be from this generation"); |
ysr@1130 | 1291 | if (ParGCUseLocalOverflow) { |
ysr@1114 | 1292 | // In the case of compressed oops, we use a private, not-shared |
ysr@1114 | 1293 | // overflow stack. |
ysr@1114 | 1294 | par_scan_state->push_on_overflow_stack(from_space_obj); |
ysr@1114 | 1295 | } else { |
ysr@1130 | 1296 | assert(!UseCompressedOops, "Error"); |
ysr@1114 | 1297 | // if the object has been forwarded to itself, then we cannot |
ysr@1114 | 1298 | // use the klass pointer for the linked list. Instead we have |
ysr@1114 | 1299 | // to allocate an oopDesc in the C-Heap and use that for the linked list. |
ysr@1114 | 1300 | // XXX This is horribly inefficient when a promotion failure occurs |
ysr@1114 | 1301 | // and should be fixed. XXX FIX ME !!! |
ysr@969 | 1302 | #ifndef PRODUCT |
ysr@1114 | 1303 | Atomic::inc_ptr(&_num_par_pushes); |
ysr@1114 | 1304 | assert(_num_par_pushes > 0, "Tautology"); |
ysr@969 | 1305 | #endif |
ysr@1114 | 1306 | if (from_space_obj->forwardee() == from_space_obj) { |
ysr@1114 | 1307 | oopDesc* listhead = NEW_C_HEAP_ARRAY(oopDesc, 1); |
ysr@1114 | 1308 | listhead->forward_to(from_space_obj); |
ysr@1114 | 1309 | from_space_obj = listhead; |
ysr@1114 | 1310 | } |
ysr@1114 | 1311 | oop observed_overflow_list = _overflow_list; |
ysr@1114 | 1312 | oop cur_overflow_list; |
ysr@1114 | 1313 | do { |
ysr@1114 | 1314 | cur_overflow_list = observed_overflow_list; |
ysr@1114 | 1315 | if (cur_overflow_list != BUSY) { |
ysr@1114 | 1316 | from_space_obj->set_klass_to_list_ptr(cur_overflow_list); |
ysr@1114 | 1317 | } else { |
ysr@1114 | 1318 | from_space_obj->set_klass_to_list_ptr(NULL); |
ysr@1114 | 1319 | } |
ysr@1114 | 1320 | observed_overflow_list = |
ysr@1114 | 1321 | (oop)Atomic::cmpxchg_ptr(from_space_obj, &_overflow_list, cur_overflow_list); |
ysr@1114 | 1322 | } while (cur_overflow_list != observed_overflow_list); |
duke@435 | 1323 | } |
duke@435 | 1324 | } |
duke@435 | 1325 | |
ysr@1114 | 1326 | bool ParNewGeneration::take_from_overflow_list(ParScanThreadState* par_scan_state) { |
ysr@1114 | 1327 | bool res; |
ysr@1114 | 1328 | |
ysr@1130 | 1329 | if (ParGCUseLocalOverflow) { |
ysr@1114 | 1330 | res = par_scan_state->take_from_overflow_stack(); |
ysr@1114 | 1331 | } else { |
ysr@1130 | 1332 | assert(!UseCompressedOops, "Error"); |
ysr@1114 | 1333 | res = take_from_overflow_list_work(par_scan_state); |
ysr@1114 | 1334 | } |
ysr@1114 | 1335 | return res; |
ysr@1114 | 1336 | } |
ysr@1114 | 1337 | |
ysr@1114 | 1338 | |
ysr@969 | 1339 | // *NOTE*: The overflow list manipulation code here and |
ysr@969 | 1340 | // in CMSCollector:: are very similar in shape, |
ysr@969 | 1341 | // except that in the CMS case we thread the objects |
ysr@969 | 1342 | // directly into the list via their mark word, and do |
ysr@969 | 1343 | // not need to deal with special cases below related |
ysr@969 | 1344 | // to chunking of object arrays and promotion failure |
ysr@969 | 1345 | // handling. |
ysr@969 | 1346 | // CR 6797058 has been filed to attempt consolidation of |
ysr@969 | 1347 | // the common code. |
ysr@969 | 1348 | // Because of the common code, if you make any changes in |
ysr@969 | 1349 | // the code below, please check the CMS version to see if |
ysr@969 | 1350 | // similar changes might be needed. |
ysr@969 | 1351 | // See CMSCollector::par_take_from_overflow_list() for |
ysr@969 | 1352 | // more extensive documentation comments. |
ysr@1114 | 1353 | bool ParNewGeneration::take_from_overflow_list_work(ParScanThreadState* par_scan_state) { |
duke@435 | 1354 | ObjToScanQueue* work_q = par_scan_state->work_queue(); |
duke@435 | 1355 | // How many to take? |
ysr@1114 | 1356 | size_t objsFromOverflow = MIN2((size_t)(work_q->max_elems() - work_q->size())/4, |
ysr@969 | 1357 | (size_t)ParGCDesiredObjsFromOverflowList); |
duke@435 | 1358 | |
ysr@1114 | 1359 | assert(par_scan_state->overflow_stack() == NULL, "Error"); |
ysr@1130 | 1360 | assert(!UseCompressedOops, "Error"); |
duke@435 | 1361 | if (_overflow_list == NULL) return false; |
duke@435 | 1362 | |
duke@435 | 1363 | // Otherwise, there was something there; try claiming the list. |
ysr@969 | 1364 | oop prefix = (oop)Atomic::xchg_ptr(BUSY, &_overflow_list); |
ysr@969 | 1365 | // Trim off a prefix of at most objsFromOverflow items |
ysr@969 | 1366 | Thread* tid = Thread::current(); |
ysr@969 | 1367 | size_t spin_count = (size_t)ParallelGCThreads; |
ysr@969 | 1368 | size_t sleep_time_millis = MAX2((size_t)1, objsFromOverflow/100); |
ysr@969 | 1369 | for (size_t spin = 0; prefix == BUSY && spin < spin_count; spin++) { |
ysr@969 | 1370 | // someone grabbed it before we did ... |
ysr@969 | 1371 | // ... we spin for a short while... |
ysr@969 | 1372 | os::sleep(tid, sleep_time_millis, false); |
ysr@969 | 1373 | if (_overflow_list == NULL) { |
ysr@969 | 1374 | // nothing left to take |
ysr@969 | 1375 | return false; |
ysr@969 | 1376 | } else if (_overflow_list != BUSY) { |
ysr@969 | 1377 | // try and grab the prefix |
ysr@969 | 1378 | prefix = (oop)Atomic::xchg_ptr(BUSY, &_overflow_list); |
ysr@969 | 1379 | } |
duke@435 | 1380 | } |
ysr@969 | 1381 | if (prefix == NULL || prefix == BUSY) { |
ysr@969 | 1382 | // Nothing to take or waited long enough |
ysr@969 | 1383 | if (prefix == NULL) { |
ysr@969 | 1384 | // Write back the NULL in case we overwrote it with BUSY above |
ysr@969 | 1385 | // and it is still the same value. |
ysr@969 | 1386 | (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY); |
ysr@969 | 1387 | } |
ysr@969 | 1388 | return false; |
ysr@969 | 1389 | } |
ysr@969 | 1390 | assert(prefix != NULL && prefix != BUSY, "Error"); |
ysr@969 | 1391 | size_t i = 1; |
duke@435 | 1392 | oop cur = prefix; |
coleenp@602 | 1393 | while (i < objsFromOverflow && cur->klass_or_null() != NULL) { |
duke@435 | 1394 | i++; cur = oop(cur->klass()); |
duke@435 | 1395 | } |
duke@435 | 1396 | |
duke@435 | 1397 | // Reattach remaining (suffix) to overflow list |
ysr@969 | 1398 | if (cur->klass_or_null() == NULL) { |
ysr@969 | 1399 | // Write back the NULL in lieu of the BUSY we wrote |
ysr@969 | 1400 | // above and it is still the same value. |
ysr@969 | 1401 | if (_overflow_list == BUSY) { |
ysr@969 | 1402 | (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY); |
duke@435 | 1403 | } |
ysr@969 | 1404 | } else { |
ysr@969 | 1405 | assert(cur->klass_or_null() != BUSY, "Error"); |
ysr@969 | 1406 | oop suffix = oop(cur->klass()); // suffix will be put back on global list |
ysr@969 | 1407 | cur->set_klass_to_list_ptr(NULL); // break off suffix |
ysr@969 | 1408 | // It's possible that the list is still in the empty(busy) state |
ysr@969 | 1409 | // we left it in a short while ago; in that case we may be |
ysr@969 | 1410 | // able to place back the suffix. |
ysr@969 | 1411 | oop observed_overflow_list = _overflow_list; |
ysr@969 | 1412 | oop cur_overflow_list = observed_overflow_list; |
ysr@969 | 1413 | bool attached = false; |
ysr@969 | 1414 | while (observed_overflow_list == BUSY || observed_overflow_list == NULL) { |
ysr@969 | 1415 | observed_overflow_list = |
ysr@969 | 1416 | (oop) Atomic::cmpxchg_ptr(suffix, &_overflow_list, cur_overflow_list); |
ysr@969 | 1417 | if (cur_overflow_list == observed_overflow_list) { |
ysr@969 | 1418 | attached = true; |
ysr@969 | 1419 | break; |
ysr@969 | 1420 | } else cur_overflow_list = observed_overflow_list; |
ysr@969 | 1421 | } |
ysr@969 | 1422 | if (!attached) { |
ysr@969 | 1423 | // Too bad, someone else got in in between; we'll need to do a splice. |
ysr@969 | 1424 | // Find the last item of suffix list |
ysr@969 | 1425 | oop last = suffix; |
ysr@969 | 1426 | while (last->klass_or_null() != NULL) { |
ysr@969 | 1427 | last = oop(last->klass()); |
ysr@969 | 1428 | } |
ysr@969 | 1429 | // Atomically prepend suffix to current overflow list |
ysr@969 | 1430 | observed_overflow_list = _overflow_list; |
ysr@969 | 1431 | do { |
ysr@969 | 1432 | cur_overflow_list = observed_overflow_list; |
ysr@969 | 1433 | if (cur_overflow_list != BUSY) { |
ysr@969 | 1434 | // Do the splice ... |
ysr@969 | 1435 | last->set_klass_to_list_ptr(cur_overflow_list); |
ysr@969 | 1436 | } else { // cur_overflow_list == BUSY |
ysr@969 | 1437 | last->set_klass_to_list_ptr(NULL); |
ysr@969 | 1438 | } |
ysr@969 | 1439 | observed_overflow_list = |
ysr@969 | 1440 | (oop)Atomic::cmpxchg_ptr(suffix, &_overflow_list, cur_overflow_list); |
ysr@969 | 1441 | } while (cur_overflow_list != observed_overflow_list); |
duke@435 | 1442 | } |
duke@435 | 1443 | } |
duke@435 | 1444 | |
duke@435 | 1445 | // Push objects on prefix list onto this thread's work queue |
ysr@969 | 1446 | assert(prefix != NULL && prefix != BUSY, "program logic"); |
duke@435 | 1447 | cur = prefix; |
ysr@969 | 1448 | ssize_t n = 0; |
duke@435 | 1449 | while (cur != NULL) { |
duke@435 | 1450 | oop obj_to_push = cur->forwardee(); |
ysr@889 | 1451 | oop next = oop(cur->klass_or_null()); |
duke@435 | 1452 | cur->set_klass(obj_to_push->klass()); |
ysr@969 | 1453 | // This may be an array object that is self-forwarded. In that case, the list pointer |
ysr@969 | 1454 | // space, cur, is not in the Java heap, but rather in the C-heap and should be freed. |
ysr@969 | 1455 | if (!is_in_reserved(cur)) { |
ysr@969 | 1456 | // This can become a scaling bottleneck when there is work queue overflow coincident |
ysr@969 | 1457 | // with promotion failure. |
ysr@969 | 1458 | oopDesc* f = cur; |
ysr@969 | 1459 | FREE_C_HEAP_ARRAY(oopDesc, f); |
ysr@969 | 1460 | } else if (par_scan_state->should_be_partially_scanned(obj_to_push, cur)) { |
ysr@969 | 1461 | assert(arrayOop(cur)->length() == 0, "entire array remaining to be scanned"); |
duke@435 | 1462 | obj_to_push = cur; |
duke@435 | 1463 | } |
ysr@969 | 1464 | bool ok = work_q->push(obj_to_push); |
ysr@969 | 1465 | assert(ok, "Should have succeeded"); |
duke@435 | 1466 | cur = next; |
duke@435 | 1467 | n++; |
duke@435 | 1468 | } |
duke@435 | 1469 | par_scan_state->note_overflow_refill(n); |
ysr@969 | 1470 | #ifndef PRODUCT |
ysr@969 | 1471 | assert(_num_par_pushes >= n, "Too many pops?"); |
ysr@969 | 1472 | Atomic::add_ptr(-(intptr_t)n, &_num_par_pushes); |
ysr@969 | 1473 | #endif |
duke@435 | 1474 | return true; |
duke@435 | 1475 | } |
ysr@969 | 1476 | #undef BUSY |
duke@435 | 1477 | |
duke@435 | 1478 | void ParNewGeneration::ref_processor_init() |
duke@435 | 1479 | { |
duke@435 | 1480 | if (_ref_processor == NULL) { |
duke@435 | 1481 | // Allocate and initialize a reference processor |
duke@435 | 1482 | _ref_processor = ReferenceProcessor::create_ref_processor( |
duke@435 | 1483 | _reserved, // span |
duke@435 | 1484 | refs_discovery_is_atomic(), // atomic_discovery |
duke@435 | 1485 | refs_discovery_is_mt(), // mt_discovery |
duke@435 | 1486 | NULL, // is_alive_non_header |
duke@435 | 1487 | ParallelGCThreads, |
duke@435 | 1488 | ParallelRefProcEnabled); |
duke@435 | 1489 | } |
duke@435 | 1490 | } |
duke@435 | 1491 | |
duke@435 | 1492 | const char* ParNewGeneration::name() const { |
duke@435 | 1493 | return "par new generation"; |
duke@435 | 1494 | } |