Mon, 03 Aug 2009 12:59:30 -0700
6865703: G1: Parallelize hot card cache cleanup
Summary: Have the GC worker threads clear the hot card cache in parallel by having each worker thread claim a chunk of the card cache and process the cards in that chunk. The size of the chunks that each thread will claim is determined at VM initialization from the size of the card cache and the number of worker threads.
Reviewed-by: jmasa, tonyp
ysr@777 | 1 | /* |
xdono@1279 | 2 | * Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved. |
ysr@777 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
ysr@777 | 4 | * |
ysr@777 | 5 | * This code is free software; you can redistribute it and/or modify it |
ysr@777 | 6 | * under the terms of the GNU General Public License version 2 only, as |
ysr@777 | 7 | * published by the Free Software Foundation. |
ysr@777 | 8 | * |
ysr@777 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
ysr@777 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
ysr@777 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
ysr@777 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
ysr@777 | 13 | * accompanied this code). |
ysr@777 | 14 | * |
ysr@777 | 15 | * You should have received a copy of the GNU General Public License version |
ysr@777 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
ysr@777 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
ysr@777 | 18 | * |
ysr@777 | 19 | * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, |
ysr@777 | 20 | * CA 95054 USA or visit www.sun.com if you need additional information or |
ysr@777 | 21 | * have any questions. |
ysr@777 | 22 | * |
ysr@777 | 23 | */ |
ysr@777 | 24 | |
ysr@777 | 25 | inline size_t G1RemSet::n_workers() { |
ysr@777 | 26 | if (_g1->workers() != NULL) { |
ysr@777 | 27 | return _g1->workers()->total_workers(); |
ysr@777 | 28 | } else { |
ysr@777 | 29 | return 1; |
ysr@777 | 30 | } |
ysr@777 | 31 | } |
ysr@777 | 32 | |
ysr@1280 | 33 | template <class T> inline void HRInto_G1RemSet::write_ref_nv(HeapRegion* from, T* p) { |
ysr@1280 | 34 | par_write_ref_nv(from, p, 0); |
ysr@777 | 35 | } |
ysr@777 | 36 | |
ysr@777 | 37 | inline bool HRInto_G1RemSet::self_forwarded(oop obj) { |
ysr@777 | 38 | bool result = (obj->is_forwarded() && (obj->forwardee()== obj)); |
ysr@777 | 39 | return result; |
ysr@777 | 40 | } |
ysr@777 | 41 | |
ysr@1280 | 42 | template <class T> inline void HRInto_G1RemSet::par_write_ref_nv(HeapRegion* from, T* p, int tid) { |
ysr@1280 | 43 | oop obj = oopDesc::load_decode_heap_oop(p); |
ysr@777 | 44 | #ifdef ASSERT |
ysr@777 | 45 | // can't do because of races |
ysr@777 | 46 | // assert(obj == NULL || obj->is_oop(), "expected an oop"); |
ysr@777 | 47 | |
ysr@777 | 48 | // Do the safe subset of is_oop |
ysr@777 | 49 | if (obj != NULL) { |
ysr@777 | 50 | #ifdef CHECK_UNHANDLED_OOPS |
ysr@777 | 51 | oopDesc* o = obj.obj(); |
ysr@777 | 52 | #else |
ysr@777 | 53 | oopDesc* o = obj; |
ysr@777 | 54 | #endif // CHECK_UNHANDLED_OOPS |
ysr@777 | 55 | assert((intptr_t)o % MinObjAlignmentInBytes == 0, "not oop aligned"); |
ysr@777 | 56 | assert(Universe::heap()->is_in_reserved(obj), "must be in heap"); |
ysr@777 | 57 | } |
ysr@777 | 58 | #endif // ASSERT |
ysr@777 | 59 | assert(from == NULL || from->is_in_reserved(p), |
ysr@777 | 60 | "p is not in from"); |
ysr@777 | 61 | HeapRegion* to = _g1->heap_region_containing(obj); |
ysr@777 | 62 | // The test below could be optimized by applying a bit op to to and from. |
ysr@777 | 63 | if (to != NULL && from != NULL && from != to) { |
iveresov@1051 | 64 | // There is a tricky infinite loop if we keep pushing |
iveresov@1051 | 65 | // self forwarding pointers onto our _new_refs list. |
iveresov@1051 | 66 | // The _par_traversal_in_progress flag is true during the collection pause, |
iveresov@1051 | 67 | // false during the evacuation failure handing. |
iveresov@1051 | 68 | if (_par_traversal_in_progress && |
iveresov@1051 | 69 | to->in_collection_set() && !self_forwarded(obj)) { |
ysr@1280 | 70 | _new_refs[tid]->push((void*)p); |
iveresov@1051 | 71 | // Deferred updates to the Cset are either discarded (in the normal case), |
iveresov@1051 | 72 | // or processed (if an evacuation failure occurs) at the end |
iveresov@1051 | 73 | // of the collection. |
iveresov@1051 | 74 | // See HRInto_G1RemSet::cleanup_after_oops_into_collection_set_do(). |
apetrusenko@1112 | 75 | } else { |
ysr@777 | 76 | #if G1_REM_SET_LOGGING |
ysr@777 | 77 | gclog_or_tty->print_cr("Adding " PTR_FORMAT " (" PTR_FORMAT ") to RS" |
ysr@777 | 78 | " for region [" PTR_FORMAT ", " PTR_FORMAT ")", |
ysr@777 | 79 | p, obj, |
ysr@777 | 80 | to->bottom(), to->end()); |
ysr@777 | 81 | #endif |
ysr@777 | 82 | assert(to->rem_set() != NULL, "Need per-region 'into' remsets."); |
apetrusenko@1112 | 83 | to->rem_set()->add_reference(p, tid); |
ysr@777 | 84 | } |
ysr@777 | 85 | } |
ysr@777 | 86 | } |
apetrusenko@1061 | 87 | |
ysr@1280 | 88 | template <class T> inline void UpdateRSOopClosure::do_oop_work(T* p) { |
apetrusenko@1061 | 89 | assert(_from != NULL, "from region must be non-NULL"); |
apetrusenko@1061 | 90 | _rs->par_write_ref(_from, p, _worker_i); |
apetrusenko@1061 | 91 | } |