Thu, 07 Apr 2011 09:53:20 -0700
7009266: G1: assert(obj->is_oop_or_null(true )) failed: Error
Summary: A referent object that is only weakly reachable at the start of concurrent marking but is re-attached to the strongly reachable object graph during marking may not be marked as live. This can cause the reference object to be processed prematurely and leave dangling pointers to the referent object. Implement a read barrier for the java.lang.ref.Reference::referent field by intrinsifying the Reference.get() method, and intercepting accesses though JNI, reflection, and Unsafe, so that when a non-null referent object is read it is also logged in an SATB buffer.
Reviewed-by: kvn, iveresov, never, tonyp, dholmes
1 /*
2 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP
26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP
28 #include "gc_implementation/g1/concurrentMark.hpp"
29 #include "gc_implementation/g1/g1CollectedHeap.hpp"
30 #include "gc_implementation/g1/g1AllocRegion.inline.hpp"
31 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
32 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
33 #include "utilities/taskqueue.hpp"
35 // Inline functions for G1CollectedHeap
37 inline HeapRegion*
38 G1CollectedHeap::heap_region_containing(const void* addr) const {
39 HeapRegion* hr = _hrs->addr_to_region(addr);
40 // hr can be null if addr in perm_gen
41 if (hr != NULL && hr->continuesHumongous()) {
42 hr = hr->humongous_start_region();
43 }
44 return hr;
45 }
47 inline HeapRegion*
48 G1CollectedHeap::heap_region_containing_raw(const void* addr) const {
49 assert(_g1_reserved.contains(addr), "invariant");
50 size_t index = pointer_delta(addr, _g1_reserved.start(), 1)
51 >> HeapRegion::LogOfHRGrainBytes;
53 HeapRegion* res = _hrs->at(index);
54 assert(res == _hrs->addr_to_region(addr), "sanity");
55 return res;
56 }
58 inline bool G1CollectedHeap::obj_in_cs(oop obj) {
59 HeapRegion* r = _hrs->addr_to_region(obj);
60 return r != NULL && r->in_collection_set();
61 }
63 inline HeapWord*
64 G1CollectedHeap::attempt_allocation(size_t word_size,
65 unsigned int* gc_count_before_ret) {
66 assert_heap_not_locked_and_not_at_safepoint();
67 assert(!isHumongous(word_size), "attempt_allocation() should not "
68 "be called for humongous allocation requests");
70 HeapWord* result = _mutator_alloc_region.attempt_allocation(word_size,
71 false /* bot_updates */);
72 if (result == NULL) {
73 result = attempt_allocation_slow(word_size, gc_count_before_ret);
74 }
75 assert_heap_not_locked();
76 if (result != NULL) {
77 dirty_young_block(result, word_size);
78 }
79 return result;
80 }
82 // It dirties the cards that cover the block so that so that the post
83 // write barrier never queues anything when updating objects on this
84 // block. It is assumed (and in fact we assert) that the block
85 // belongs to a young region.
86 inline void
87 G1CollectedHeap::dirty_young_block(HeapWord* start, size_t word_size) {
88 assert_heap_not_locked();
90 // Assign the containing region to containing_hr so that we don't
91 // have to keep calling heap_region_containing_raw() in the
92 // asserts below.
93 DEBUG_ONLY(HeapRegion* containing_hr = heap_region_containing_raw(start);)
94 assert(containing_hr != NULL && start != NULL && word_size > 0,
95 "pre-condition");
96 assert(containing_hr->is_in(start), "it should contain start");
97 assert(containing_hr->is_young(), "it should be young");
98 assert(!containing_hr->isHumongous(), "it should not be humongous");
100 HeapWord* end = start + word_size;
101 assert(containing_hr->is_in(end - 1), "it should also contain end - 1");
103 MemRegion mr(start, end);
104 ((CardTableModRefBS*)_g1h->barrier_set())->dirty(mr);
105 }
107 inline RefToScanQueue* G1CollectedHeap::task_queue(int i) const {
108 return _task_queues->queue(i);
109 }
111 inline bool G1CollectedHeap::isMarkedPrev(oop obj) const {
112 return _cm->prevMarkBitMap()->isMarked((HeapWord *)obj);
113 }
115 inline bool G1CollectedHeap::isMarkedNext(oop obj) const {
116 return _cm->nextMarkBitMap()->isMarked((HeapWord *)obj);
117 }
119 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP