Mon, 03 Aug 2009 12:59:30 -0700
6865703: G1: Parallelize hot card cache cleanup
Summary: Have the GC worker threads clear the hot card cache in parallel by having each worker thread claim a chunk of the card cache and process the cards in that chunk. The size of the chunks that each thread will claim is determined at VM initialization from the size of the card cache and the number of worker threads.
Reviewed-by: jmasa, tonyp
1 /*
2 * Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
25 inline size_t G1RemSet::n_workers() {
26 if (_g1->workers() != NULL) {
27 return _g1->workers()->total_workers();
28 } else {
29 return 1;
30 }
31 }
33 template <class T> inline void HRInto_G1RemSet::write_ref_nv(HeapRegion* from, T* p) {
34 par_write_ref_nv(from, p, 0);
35 }
37 inline bool HRInto_G1RemSet::self_forwarded(oop obj) {
38 bool result = (obj->is_forwarded() && (obj->forwardee()== obj));
39 return result;
40 }
42 template <class T> inline void HRInto_G1RemSet::par_write_ref_nv(HeapRegion* from, T* p, int tid) {
43 oop obj = oopDesc::load_decode_heap_oop(p);
44 #ifdef ASSERT
45 // can't do because of races
46 // assert(obj == NULL || obj->is_oop(), "expected an oop");
48 // Do the safe subset of is_oop
49 if (obj != NULL) {
50 #ifdef CHECK_UNHANDLED_OOPS
51 oopDesc* o = obj.obj();
52 #else
53 oopDesc* o = obj;
54 #endif // CHECK_UNHANDLED_OOPS
55 assert((intptr_t)o % MinObjAlignmentInBytes == 0, "not oop aligned");
56 assert(Universe::heap()->is_in_reserved(obj), "must be in heap");
57 }
58 #endif // ASSERT
59 assert(from == NULL || from->is_in_reserved(p),
60 "p is not in from");
61 HeapRegion* to = _g1->heap_region_containing(obj);
62 // The test below could be optimized by applying a bit op to to and from.
63 if (to != NULL && from != NULL && from != to) {
64 // There is a tricky infinite loop if we keep pushing
65 // self forwarding pointers onto our _new_refs list.
66 // The _par_traversal_in_progress flag is true during the collection pause,
67 // false during the evacuation failure handing.
68 if (_par_traversal_in_progress &&
69 to->in_collection_set() && !self_forwarded(obj)) {
70 _new_refs[tid]->push((void*)p);
71 // Deferred updates to the Cset are either discarded (in the normal case),
72 // or processed (if an evacuation failure occurs) at the end
73 // of the collection.
74 // See HRInto_G1RemSet::cleanup_after_oops_into_collection_set_do().
75 } else {
76 #if G1_REM_SET_LOGGING
77 gclog_or_tty->print_cr("Adding " PTR_FORMAT " (" PTR_FORMAT ") to RS"
78 " for region [" PTR_FORMAT ", " PTR_FORMAT ")",
79 p, obj,
80 to->bottom(), to->end());
81 #endif
82 assert(to->rem_set() != NULL, "Need per-region 'into' remsets.");
83 to->rem_set()->add_reference(p, tid);
84 }
85 }
86 }
88 template <class T> inline void UpdateRSOopClosure::do_oop_work(T* p) {
89 assert(_from != NULL, "from region must be non-NULL");
90 _rs->par_write_ref(_from, p, _worker_i);
91 }