Fri, 18 Feb 2011 10:07:34 -0800
7020042: G1: Partially remove fix for 6994628
Summary: Disable reference discovery and processing during concurrent marking by disabling fix for 6994628.
Reviewed-by: tonyp, ysr
1 /*
2 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1REMSET_INLINE_HPP
26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1REMSET_INLINE_HPP
28 #include "gc_implementation/g1/g1RemSet.hpp"
29 #include "gc_implementation/g1/heapRegionRemSet.hpp"
30 #include "oops/oop.inline.hpp"
32 inline size_t G1RemSet::n_workers() {
33 if (_g1->workers() != NULL) {
34 return _g1->workers()->total_workers();
35 } else {
36 return 1;
37 }
38 }
40 template <class T>
41 inline void G1RemSet::write_ref(HeapRegion* from, T* p) {
42 par_write_ref(from, p, 0);
43 }
45 template <class T>
46 inline void G1RemSet::par_write_ref(HeapRegion* from, T* p, int tid) {
47 oop obj = oopDesc::load_decode_heap_oop(p);
48 #ifdef ASSERT
49 // can't do because of races
50 // assert(obj == NULL || obj->is_oop(), "expected an oop");
52 // Do the safe subset of is_oop
53 if (obj != NULL) {
54 #ifdef CHECK_UNHANDLED_OOPS
55 oopDesc* o = obj.obj();
56 #else
57 oopDesc* o = obj;
58 #endif // CHECK_UNHANDLED_OOPS
59 assert((intptr_t)o % MinObjAlignmentInBytes == 0, "not oop aligned");
60 assert(Universe::heap()->is_in_reserved(obj), "must be in heap");
61 }
62 #endif // ASSERT
64 assert(from == NULL || from->is_in_reserved(p), "p is not in from");
66 HeapRegion* to = _g1->heap_region_containing(obj);
67 if (to != NULL && from != to) {
68 #if G1_REM_SET_LOGGING
69 gclog_or_tty->print_cr("Adding " PTR_FORMAT " (" PTR_FORMAT ") to RS"
70 " for region [" PTR_FORMAT ", " PTR_FORMAT ")",
71 p, obj,
72 to->bottom(), to->end());
73 #endif
74 assert(to->rem_set() != NULL, "Need per-region 'into' remsets.");
75 to->rem_set()->add_reference(p, tid);
76 }
77 }
79 template <class T>
80 inline void UpdateRSOopClosure::do_oop_work(T* p) {
81 assert(_from != NULL, "from region must be non-NULL");
82 _rs->par_write_ref(_from, p, _worker_i);
83 }
85 template <class T>
86 inline void UpdateRSetImmediate::do_oop_work(T* p) {
87 assert(_from->is_in_reserved(p), "paranoia");
88 T heap_oop = oopDesc::load_heap_oop(p);
89 if (!oopDesc::is_null(heap_oop) && !_from->is_survivor()) {
90 _g1_rem_set->par_write_ref(_from, p, 0);
91 }
92 }
94 template <class T>
95 inline void UpdateRSOrPushRefOopClosure::do_oop_work(T* p) {
96 oop obj = oopDesc::load_decode_heap_oop(p);
97 #ifdef ASSERT
98 // can't do because of races
99 // assert(obj == NULL || obj->is_oop(), "expected an oop");
101 // Do the safe subset of is_oop
102 if (obj != NULL) {
103 #ifdef CHECK_UNHANDLED_OOPS
104 oopDesc* o = obj.obj();
105 #else
106 oopDesc* o = obj;
107 #endif // CHECK_UNHANDLED_OOPS
108 assert((intptr_t)o % MinObjAlignmentInBytes == 0, "not oop aligned");
109 assert(Universe::heap()->is_in_reserved(obj), "must be in heap");
110 }
111 #endif // ASSERT
113 assert(_from != NULL, "from region must be non-NULL");
115 HeapRegion* to = _g1->heap_region_containing(obj);
116 if (to != NULL && _from != to) {
117 // The _record_refs_into_cset flag is true during the RSet
118 // updating part of an evacuation pause. It is false at all
119 // other times:
120 // * rebuilding the rembered sets after a full GC
121 // * during concurrent refinement.
122 // * updating the remembered sets of regions in the collection
123 // set in the event of an evacuation failure (when deferred
124 // updates are enabled).
126 if (_record_refs_into_cset && to->in_collection_set()) {
127 // We are recording references that point into the collection
128 // set and this particular reference does exactly that...
129 // If the referenced object has already been forwarded
130 // to itself, we are handling an evacuation failure and
131 // we have already visited/tried to copy this object
132 // there is no need to retry.
133 if (!self_forwarded(obj)) {
134 assert(_push_ref_cl != NULL, "should not be null");
135 // Push the reference in the refs queue of the G1ParScanThreadState
136 // instance for this worker thread.
137 _push_ref_cl->do_oop(p);
138 }
140 // Deferred updates to the CSet are either discarded (in the normal case),
141 // or processed (if an evacuation failure occurs) at the end
142 // of the collection.
143 // See G1RemSet::cleanup_after_oops_into_collection_set_do().
144 } else {
145 // We either don't care about pushing references that point into the
146 // collection set (i.e. we're not during an evacuation pause) _or_
147 // the reference doesn't point into the collection set. Either way
148 // we add the reference directly to the RSet of the region containing
149 // the referenced object.
150 _g1_rem_set->par_write_ref(_from, p, _worker_i);
151 }
152 }
153 }
156 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1REMSET_INLINE_HPP