Thu, 07 Apr 2011 09:53:20 -0700
7009266: G1: assert(obj->is_oop_or_null(true )) failed: Error
Summary: A referent object that is only weakly reachable at the start of concurrent marking but is re-attached to the strongly reachable object graph during marking may not be marked as live. This can cause the reference object to be processed prematurely and leave dangling pointers to the referent object. Implement a read barrier for the java.lang.ref.Reference::referent field by intrinsifying the Reference.get() method, and intercepting accesses though JNI, reflection, and Unsafe, so that when a non-null referent object is read it is also logged in an SATB buffer.
Reviewed-by: kvn, iveresov, never, tonyp, dholmes
1 /*
2 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_DIRTYCARDQUEUE_HPP
26 #define SHARE_VM_GC_IMPLEMENTATION_G1_DIRTYCARDQUEUE_HPP
28 #include "gc_implementation/g1/ptrQueue.hpp"
29 #include "memory/allocation.hpp"
31 class FreeIdSet;
33 // A closure class for processing card table entries. Note that we don't
34 // require these closure objects to be stack-allocated.
35 class CardTableEntryClosure: public CHeapObj {
36 public:
37 // Process the card whose card table entry is "card_ptr". If returns
38 // "false", terminate the iteration early.
39 virtual bool do_card_ptr(jbyte* card_ptr, int worker_i = 0) = 0;
40 };
42 // A ptrQueue whose elements are "oops", pointers to object heads.
43 class DirtyCardQueue: public PtrQueue {
44 public:
45 DirtyCardQueue(PtrQueueSet* qset_, bool perm = false) :
46 // Dirty card queues are always active, so we create them with their
47 // active field set to true.
48 PtrQueue(qset_, perm, true /* active */) { }
50 // Apply the closure to all elements, and reset the index to make the
51 // buffer empty. If a closure application returns "false", return
52 // "false" immediately, halting the iteration. If "consume" is true,
53 // deletes processed entries from logs.
54 bool apply_closure(CardTableEntryClosure* cl,
55 bool consume = true,
56 size_t worker_i = 0);
58 // Apply the closure to all elements of "buf", down to "index"
59 // (inclusive.) If returns "false", then a closure application returned
60 // "false", and we return immediately. If "consume" is true, entries are
61 // set to NULL as they are processed, so they will not be processed again
62 // later.
63 static bool apply_closure_to_buffer(CardTableEntryClosure* cl,
64 void** buf, size_t index, size_t sz,
65 bool consume = true,
66 int worker_i = 0);
67 void **get_buf() { return _buf;}
68 void set_buf(void **buf) {_buf = buf;}
69 size_t get_index() { return _index;}
70 void reinitialize() { _buf = 0; _sz = 0; _index = 0;}
71 };
75 class DirtyCardQueueSet: public PtrQueueSet {
76 CardTableEntryClosure* _closure;
78 DirtyCardQueue _shared_dirty_card_queue;
80 // Override.
81 bool mut_process_buffer(void** buf);
83 // Protected by the _cbl_mon.
84 FreeIdSet* _free_ids;
86 // The number of completed buffers processed by mutator and rs thread,
87 // respectively.
88 jint _processed_buffers_mut;
89 jint _processed_buffers_rs_thread;
91 public:
92 DirtyCardQueueSet(bool notify_when_complete = true);
94 void initialize(Monitor* cbl_mon, Mutex* fl_lock,
95 int process_completed_threshold,
96 int max_completed_queue,
97 Mutex* lock, PtrQueueSet* fl_owner = NULL);
99 // The number of parallel ids that can be claimed to allow collector or
100 // mutator threads to do card-processing work.
101 static size_t num_par_ids();
103 static void handle_zero_index_for_thread(JavaThread* t);
105 // Register "blk" as "the closure" for all queues. Only one such closure
106 // is allowed. The "apply_closure_to_completed_buffer" method will apply
107 // this closure to a completed buffer, and "iterate_closure_all_threads"
108 // applies it to partially-filled buffers (the latter should only be done
109 // with the world stopped).
110 void set_closure(CardTableEntryClosure* closure);
112 // If there is a registered closure for buffers, apply it to all entries
113 // in all currently-active buffers. This should only be applied at a
114 // safepoint. (Currently must not be called in parallel; this should
115 // change in the future.) If "consume" is true, processed entries are
116 // discarded.
117 void iterate_closure_all_threads(bool consume = true,
118 size_t worker_i = 0);
120 // If there exists some completed buffer, pop it, then apply the
121 // registered closure to all its elements, nulling out those elements
122 // processed. If all elements are processed, returns "true". If no
123 // completed buffers exist, returns false. If a completed buffer exists,
124 // but is only partially completed before a "yield" happens, the
125 // partially completed buffer (with its processed elements set to NULL)
126 // is returned to the completed buffer set, and this call returns false.
127 bool apply_closure_to_completed_buffer(int worker_i = 0,
128 int stop_at = 0,
129 bool during_pause = false);
131 // If there exists some completed buffer, pop it, then apply the
132 // specified closure to all its elements, nulling out those elements
133 // processed. If all elements are processed, returns "true". If no
134 // completed buffers exist, returns false. If a completed buffer exists,
135 // but is only partially completed before a "yield" happens, the
136 // partially completed buffer (with its processed elements set to NULL)
137 // is returned to the completed buffer set, and this call returns false.
138 bool apply_closure_to_completed_buffer(CardTableEntryClosure* cl,
139 int worker_i = 0,
140 int stop_at = 0,
141 bool during_pause = false);
143 // Helper routine for the above.
144 bool apply_closure_to_completed_buffer_helper(CardTableEntryClosure* cl,
145 int worker_i,
146 BufferNode* nd);
148 BufferNode* get_completed_buffer(int stop_at);
150 // Applies the current closure to all completed buffers,
151 // non-consumptively.
152 void apply_closure_to_all_completed_buffers();
154 DirtyCardQueue* shared_dirty_card_queue() {
155 return &_shared_dirty_card_queue;
156 }
158 // Deallocate any completed log buffers
159 void clear();
161 // If a full collection is happening, reset partial logs, and ignore
162 // completed ones: the full collection will make them all irrelevant.
163 void abandon_logs();
165 // If any threads have partial logs, add them to the global list of logs.
166 void concatenate_logs();
167 void clear_n_completed_buffers() { _n_completed_buffers = 0;}
169 jint processed_buffers_mut() {
170 return _processed_buffers_mut;
171 }
172 jint processed_buffers_rs_thread() {
173 return _processed_buffers_rs_thread;
174 }
176 };
178 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_DIRTYCARDQUEUE_HPP