Wed, 19 Jan 2011 09:35:17 -0500
7011379: G1: overly long concurrent marking cycles
Summary: This changeset introduces filtering of SATB buffers at the point when they are about to be enqueued. If this filtering clears enough entries on each buffer, the buffer can then be re-used and not enqueued. This cuts down the number of SATB buffers that need to be processed by the concurrent marking threads.
Reviewed-by: johnc, ysr
1 /*
2 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
27 #include "gc_implementation/g1/satbQueue.hpp"
28 #include "memory/allocation.inline.hpp"
29 #include "memory/sharedHeap.hpp"
30 #include "runtime/mutexLocker.hpp"
31 #include "runtime/thread.hpp"
33 // This method removes entries from an SATB buffer that will not be
34 // useful to the concurrent marking threads. An entry is removed if it
35 // satisfies one of the following conditions:
36 //
37 // * it points to an object outside the G1 heap (G1's concurrent
38 // marking only visits objects inside the G1 heap),
39 // * it points to an object that has been allocated since marking
40 // started (according to SATB those objects do not need to be
41 // visited during marking), or
42 // * it points to an object that has already been marked (no need to
43 // process it again).
44 //
45 // The rest of the entries will be retained and are compacted towards
46 // the top of the buffer. If with this filtering we clear a large
47 // enough chunk of the buffer we can re-use it (instead of enqueueing
48 // it) and we can just allow the mutator to carry on executing.
50 bool ObjPtrQueue::should_enqueue_buffer() {
51 assert(_lock == NULL || _lock->owned_by_self(),
52 "we should have taken the lock before calling this");
54 // A value of 0 means "don't filter SATB buffers".
55 if (G1SATBBufferEnqueueingThresholdPercent == 0) {
56 return true;
57 }
59 G1CollectedHeap* g1h = G1CollectedHeap::heap();
61 // This method should only be called if there is a non-NULL buffer
62 // that is full.
63 assert(_index == 0, "pre-condition");
64 assert(_buf != NULL, "pre-condition");
66 void** buf = _buf;
67 size_t sz = _sz;
69 // Used for sanity checking at the end of the loop.
70 debug_only(size_t entries = 0; size_t retained = 0;)
72 size_t i = sz;
73 size_t new_index = sz;
75 // Given that we are expecting _index == 0, we could have changed
76 // the loop condition to (i > 0). But we are using _index for
77 // generality.
78 while (i > _index) {
79 assert(i > 0, "we should have at least one more entry to process");
80 i -= oopSize;
81 debug_only(entries += 1;)
82 oop* p = (oop*) &buf[byte_index_to_index((int) i)];
83 oop obj = *p;
84 // NULL the entry so that unused parts of the buffer contain NULLs
85 // at the end. If we are going to retain it we will copy it to its
86 // final place. If we have retained all entries we have visited so
87 // far, we'll just end up copying it to the same place.
88 *p = NULL;
90 bool retain = g1h->is_obj_ill(obj);
91 if (retain) {
92 assert(new_index > 0, "we should not have already filled up the buffer");
93 new_index -= oopSize;
94 assert(new_index >= i,
95 "new_index should never be below i, as we alwaysr compact 'up'");
96 oop* new_p = (oop*) &buf[byte_index_to_index((int) new_index)];
97 assert(new_p >= p, "the destination location should never be below "
98 "the source as we always compact 'up'");
99 assert(*new_p == NULL,
100 "we should have already cleared the destination location");
101 *new_p = obj;
102 debug_only(retained += 1;)
103 }
104 }
105 size_t entries_calc = (sz - _index) / oopSize;
106 assert(entries == entries_calc, "the number of entries we counted "
107 "should match the number of entries we calculated");
108 size_t retained_calc = (sz - new_index) / oopSize;
109 assert(retained == retained_calc, "the number of retained entries we counted "
110 "should match the number of retained entries we calculated");
111 size_t perc = retained_calc * 100 / entries_calc;
112 bool should_enqueue = perc > (size_t) G1SATBBufferEnqueueingThresholdPercent;
113 _index = new_index;
115 return should_enqueue;
116 }
118 void ObjPtrQueue::apply_closure(ObjectClosure* cl) {
119 if (_buf != NULL) {
120 apply_closure_to_buffer(cl, _buf, _index, _sz);
121 _index = _sz;
122 }
123 }
125 void ObjPtrQueue::apply_closure_to_buffer(ObjectClosure* cl,
126 void** buf, size_t index, size_t sz) {
127 if (cl == NULL) return;
128 for (size_t i = index; i < sz; i += oopSize) {
129 oop obj = (oop)buf[byte_index_to_index((int)i)];
130 // There can be NULL entries because of destructors.
131 if (obj != NULL) {
132 cl->do_object(obj);
133 }
134 }
135 }
137 #ifdef ASSERT
138 void ObjPtrQueue::verify_oops_in_buffer() {
139 if (_buf == NULL) return;
140 for (size_t i = _index; i < _sz; i += oopSize) {
141 oop obj = (oop)_buf[byte_index_to_index((int)i)];
142 assert(obj != NULL && obj->is_oop(true /* ignore mark word */),
143 "Not an oop");
144 }
145 }
146 #endif
148 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
149 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
150 #endif // _MSC_VER
153 SATBMarkQueueSet::SATBMarkQueueSet() :
154 PtrQueueSet(),
155 _closure(NULL), _par_closures(NULL),
156 _shared_satb_queue(this, true /*perm*/)
157 {}
159 void SATBMarkQueueSet::initialize(Monitor* cbl_mon, Mutex* fl_lock,
160 int process_completed_threshold,
161 Mutex* lock) {
162 PtrQueueSet::initialize(cbl_mon, fl_lock, process_completed_threshold, -1);
163 _shared_satb_queue.set_lock(lock);
164 if (ParallelGCThreads > 0) {
165 _par_closures = NEW_C_HEAP_ARRAY(ObjectClosure*, ParallelGCThreads);
166 }
167 }
170 void SATBMarkQueueSet::handle_zero_index_for_thread(JavaThread* t) {
171 DEBUG_ONLY(t->satb_mark_queue().verify_oops_in_buffer();)
172 t->satb_mark_queue().handle_zero_index();
173 }
175 #ifdef ASSERT
176 void SATBMarkQueueSet::dump_active_values(JavaThread* first,
177 bool expected_active) {
178 gclog_or_tty->print_cr("SATB queue active values for Java Threads");
179 gclog_or_tty->print_cr(" SATB queue set: active is %s",
180 (is_active()) ? "TRUE" : "FALSE");
181 gclog_or_tty->print_cr(" expected_active is %s",
182 (expected_active) ? "TRUE" : "FALSE");
183 for (JavaThread* t = first; t; t = t->next()) {
184 bool active = t->satb_mark_queue().is_active();
185 gclog_or_tty->print_cr(" thread %s, active is %s",
186 t->name(), (active) ? "TRUE" : "FALSE");
187 }
188 }
189 #endif // ASSERT
191 void SATBMarkQueueSet::set_active_all_threads(bool b,
192 bool expected_active) {
193 assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
194 JavaThread* first = Threads::first();
196 #ifdef ASSERT
197 if (_all_active != expected_active) {
198 dump_active_values(first, expected_active);
200 // I leave this here as a guarantee, instead of an assert, so
201 // that it will still be compiled in if we choose to uncomment
202 // the #ifdef ASSERT in a product build. The whole block is
203 // within an #ifdef ASSERT so the guarantee will not be compiled
204 // in a product build anyway.
205 guarantee(false,
206 "SATB queue set has an unexpected active value");
207 }
208 #endif // ASSERT
209 _all_active = b;
211 for (JavaThread* t = first; t; t = t->next()) {
212 #ifdef ASSERT
213 bool active = t->satb_mark_queue().is_active();
214 if (active != expected_active) {
215 dump_active_values(first, expected_active);
217 // I leave this here as a guarantee, instead of an assert, so
218 // that it will still be compiled in if we choose to uncomment
219 // the #ifdef ASSERT in a product build. The whole block is
220 // within an #ifdef ASSERT so the guarantee will not be compiled
221 // in a product build anyway.
222 guarantee(false,
223 "thread has an unexpected active value in its SATB queue");
224 }
225 #endif // ASSERT
226 t->satb_mark_queue().set_active(b);
227 }
228 }
230 void SATBMarkQueueSet::set_closure(ObjectClosure* closure) {
231 _closure = closure;
232 }
234 void SATBMarkQueueSet::set_par_closure(int i, ObjectClosure* par_closure) {
235 assert(ParallelGCThreads > 0 && _par_closures != NULL, "Precondition");
236 _par_closures[i] = par_closure;
237 }
239 void SATBMarkQueueSet::iterate_closure_all_threads() {
240 for(JavaThread* t = Threads::first(); t; t = t->next()) {
241 t->satb_mark_queue().apply_closure(_closure);
242 }
243 shared_satb_queue()->apply_closure(_closure);
244 }
246 void SATBMarkQueueSet::par_iterate_closure_all_threads(int worker) {
247 SharedHeap* sh = SharedHeap::heap();
248 int parity = sh->strong_roots_parity();
250 for(JavaThread* t = Threads::first(); t; t = t->next()) {
251 if (t->claim_oops_do(true, parity)) {
252 t->satb_mark_queue().apply_closure(_par_closures[worker]);
253 }
254 }
255 // We'll have worker 0 do this one.
256 if (worker == 0) {
257 shared_satb_queue()->apply_closure(_par_closures[0]);
258 }
259 }
261 bool SATBMarkQueueSet::apply_closure_to_completed_buffer_work(bool par,
262 int worker) {
263 BufferNode* nd = NULL;
264 {
265 MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
266 if (_completed_buffers_head != NULL) {
267 nd = _completed_buffers_head;
268 _completed_buffers_head = nd->next();
269 if (_completed_buffers_head == NULL) _completed_buffers_tail = NULL;
270 _n_completed_buffers--;
271 if (_n_completed_buffers == 0) _process_completed = false;
272 }
273 }
274 ObjectClosure* cl = (par ? _par_closures[worker] : _closure);
275 if (nd != NULL) {
276 void **buf = BufferNode::make_buffer_from_node(nd);
277 ObjPtrQueue::apply_closure_to_buffer(cl, buf, 0, _sz);
278 deallocate_buffer(buf);
279 return true;
280 } else {
281 return false;
282 }
283 }
285 void SATBMarkQueueSet::abandon_partial_marking() {
286 BufferNode* buffers_to_delete = NULL;
287 {
288 MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
289 while (_completed_buffers_head != NULL) {
290 BufferNode* nd = _completed_buffers_head;
291 _completed_buffers_head = nd->next();
292 nd->set_next(buffers_to_delete);
293 buffers_to_delete = nd;
294 }
295 _completed_buffers_tail = NULL;
296 _n_completed_buffers = 0;
297 DEBUG_ONLY(assert_completed_buffer_list_len_correct_locked());
298 }
299 while (buffers_to_delete != NULL) {
300 BufferNode* nd = buffers_to_delete;
301 buffers_to_delete = nd->next();
302 deallocate_buffer(BufferNode::make_buffer_from_node(nd));
303 }
304 assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
305 // So we can safely manipulate these queues.
306 for (JavaThread* t = Threads::first(); t; t = t->next()) {
307 t->satb_mark_queue().reset();
308 }
309 shared_satb_queue()->reset();
310 }