src/share/vm/gc_implementation/g1/satbQueue.cpp

Thu, 22 Sep 2011 10:57:37 -0700

author
johnc
date
Thu, 22 Sep 2011 10:57:37 -0700
changeset 3175
4dfb2df418f2
parent 2469
7e37af9d69ef
child 3416
2ace1c4ee8da
permissions
-rw-r--r--

6484982: G1: process references during evacuation pauses
Summary: G1 now uses two reference processors - one is used by concurrent marking and the other is used by STW GCs (both full and incremental evacuation pauses). In an evacuation pause, the reference processor is embedded into the closures used to scan objects. Doing so causes causes reference objects to be 'discovered' by the reference processor. At the end of the evacuation pause, these discovered reference objects are processed - preserving (and copying) referent objects (and their reachable graphs) as appropriate.
Reviewed-by: ysr, jwilhelm, brutisso, stefank, tonyp

ysr@777 1 /*
tonyp@2469 2 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
ysr@777 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
ysr@777 4 *
ysr@777 5 * This code is free software; you can redistribute it and/or modify it
ysr@777 6 * under the terms of the GNU General Public License version 2 only, as
ysr@777 7 * published by the Free Software Foundation.
ysr@777 8 *
ysr@777 9 * This code is distributed in the hope that it will be useful, but WITHOUT
ysr@777 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
ysr@777 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
ysr@777 12 * version 2 for more details (a copy is included in the LICENSE file that
ysr@777 13 * accompanied this code).
ysr@777 14 *
ysr@777 15 * You should have received a copy of the GNU General Public License version
ysr@777 16 * 2 along with this work; if not, write to the Free Software Foundation,
ysr@777 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
ysr@777 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
ysr@777 22 *
ysr@777 23 */
ysr@777 24
stefank@2314 25 #include "precompiled.hpp"
tonyp@2469 26 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
stefank@2314 27 #include "gc_implementation/g1/satbQueue.hpp"
stefank@2314 28 #include "memory/allocation.inline.hpp"
stefank@2314 29 #include "memory/sharedHeap.hpp"
stefank@2314 30 #include "runtime/mutexLocker.hpp"
stefank@2314 31 #include "runtime/thread.hpp"
johnc@3175 32 #include "runtime/vmThread.hpp"
ysr@777 33
tonyp@2469 34 // This method removes entries from an SATB buffer that will not be
tonyp@2469 35 // useful to the concurrent marking threads. An entry is removed if it
tonyp@2469 36 // satisfies one of the following conditions:
tonyp@2469 37 //
tonyp@2469 38 // * it points to an object outside the G1 heap (G1's concurrent
tonyp@2469 39 // marking only visits objects inside the G1 heap),
tonyp@2469 40 // * it points to an object that has been allocated since marking
tonyp@2469 41 // started (according to SATB those objects do not need to be
tonyp@2469 42 // visited during marking), or
tonyp@2469 43 // * it points to an object that has already been marked (no need to
tonyp@2469 44 // process it again).
tonyp@2469 45 //
tonyp@2469 46 // The rest of the entries will be retained and are compacted towards
tonyp@2469 47 // the top of the buffer. If with this filtering we clear a large
tonyp@2469 48 // enough chunk of the buffer we can re-use it (instead of enqueueing
tonyp@2469 49 // it) and we can just allow the mutator to carry on executing.
tonyp@2469 50
tonyp@2469 51 bool ObjPtrQueue::should_enqueue_buffer() {
tonyp@2469 52 assert(_lock == NULL || _lock->owned_by_self(),
tonyp@2469 53 "we should have taken the lock before calling this");
tonyp@2469 54
tonyp@2469 55 // A value of 0 means "don't filter SATB buffers".
tonyp@2469 56 if (G1SATBBufferEnqueueingThresholdPercent == 0) {
tonyp@2469 57 return true;
tonyp@2469 58 }
tonyp@2469 59
tonyp@2469 60 G1CollectedHeap* g1h = G1CollectedHeap::heap();
tonyp@2469 61
tonyp@2469 62 // This method should only be called if there is a non-NULL buffer
tonyp@2469 63 // that is full.
tonyp@2469 64 assert(_index == 0, "pre-condition");
tonyp@2469 65 assert(_buf != NULL, "pre-condition");
tonyp@2469 66
tonyp@2469 67 void** buf = _buf;
tonyp@2469 68 size_t sz = _sz;
tonyp@2469 69
tonyp@2469 70 // Used for sanity checking at the end of the loop.
tonyp@2469 71 debug_only(size_t entries = 0; size_t retained = 0;)
tonyp@2469 72
tonyp@2469 73 size_t i = sz;
tonyp@2469 74 size_t new_index = sz;
tonyp@2469 75
tonyp@2469 76 // Given that we are expecting _index == 0, we could have changed
tonyp@2469 77 // the loop condition to (i > 0). But we are using _index for
tonyp@2469 78 // generality.
tonyp@2469 79 while (i > _index) {
tonyp@2469 80 assert(i > 0, "we should have at least one more entry to process");
tonyp@2469 81 i -= oopSize;
tonyp@2469 82 debug_only(entries += 1;)
tonyp@2469 83 oop* p = (oop*) &buf[byte_index_to_index((int) i)];
tonyp@2469 84 oop obj = *p;
tonyp@2469 85 // NULL the entry so that unused parts of the buffer contain NULLs
tonyp@2469 86 // at the end. If we are going to retain it we will copy it to its
tonyp@2469 87 // final place. If we have retained all entries we have visited so
tonyp@2469 88 // far, we'll just end up copying it to the same place.
tonyp@2469 89 *p = NULL;
tonyp@2469 90
tonyp@2469 91 bool retain = g1h->is_obj_ill(obj);
tonyp@2469 92 if (retain) {
tonyp@2469 93 assert(new_index > 0, "we should not have already filled up the buffer");
tonyp@2469 94 new_index -= oopSize;
tonyp@2469 95 assert(new_index >= i,
tonyp@2469 96 "new_index should never be below i, as we alwaysr compact 'up'");
tonyp@2469 97 oop* new_p = (oop*) &buf[byte_index_to_index((int) new_index)];
tonyp@2469 98 assert(new_p >= p, "the destination location should never be below "
tonyp@2469 99 "the source as we always compact 'up'");
tonyp@2469 100 assert(*new_p == NULL,
tonyp@2469 101 "we should have already cleared the destination location");
tonyp@2469 102 *new_p = obj;
tonyp@2469 103 debug_only(retained += 1;)
tonyp@2469 104 }
tonyp@2469 105 }
tonyp@2469 106 size_t entries_calc = (sz - _index) / oopSize;
tonyp@2469 107 assert(entries == entries_calc, "the number of entries we counted "
tonyp@2469 108 "should match the number of entries we calculated");
tonyp@2469 109 size_t retained_calc = (sz - new_index) / oopSize;
tonyp@2469 110 assert(retained == retained_calc, "the number of retained entries we counted "
tonyp@2469 111 "should match the number of retained entries we calculated");
tonyp@2469 112 size_t perc = retained_calc * 100 / entries_calc;
tonyp@2469 113 bool should_enqueue = perc > (size_t) G1SATBBufferEnqueueingThresholdPercent;
tonyp@2469 114 _index = new_index;
tonyp@2469 115
tonyp@2469 116 return should_enqueue;
tonyp@2469 117 }
tonyp@2469 118
ysr@777 119 void ObjPtrQueue::apply_closure(ObjectClosure* cl) {
ysr@777 120 if (_buf != NULL) {
ysr@777 121 apply_closure_to_buffer(cl, _buf, _index, _sz);
ysr@777 122 _index = _sz;
ysr@777 123 }
ysr@777 124 }
ysr@777 125
ysr@777 126 void ObjPtrQueue::apply_closure_to_buffer(ObjectClosure* cl,
ysr@777 127 void** buf, size_t index, size_t sz) {
ysr@777 128 if (cl == NULL) return;
ysr@777 129 for (size_t i = index; i < sz; i += oopSize) {
ysr@777 130 oop obj = (oop)buf[byte_index_to_index((int)i)];
ysr@777 131 // There can be NULL entries because of destructors.
ysr@777 132 if (obj != NULL) {
ysr@777 133 cl->do_object(obj);
ysr@777 134 }
ysr@777 135 }
ysr@777 136 }
ysr@1280 137
ysr@1280 138 #ifdef ASSERT
ysr@1280 139 void ObjPtrQueue::verify_oops_in_buffer() {
ysr@1280 140 if (_buf == NULL) return;
ysr@1280 141 for (size_t i = _index; i < _sz; i += oopSize) {
ysr@1280 142 oop obj = (oop)_buf[byte_index_to_index((int)i)];
ysr@1280 143 assert(obj != NULL && obj->is_oop(true /* ignore mark word */),
ysr@1280 144 "Not an oop");
ysr@1280 145 }
ysr@1280 146 }
ysr@1280 147 #endif
ysr@1280 148
ysr@777 149 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
ysr@777 150 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
ysr@777 151 #endif // _MSC_VER
ysr@777 152
ysr@777 153
ysr@777 154 SATBMarkQueueSet::SATBMarkQueueSet() :
ysr@777 155 PtrQueueSet(),
ysr@777 156 _closure(NULL), _par_closures(NULL),
ysr@777 157 _shared_satb_queue(this, true /*perm*/)
ysr@777 158 {}
ysr@777 159
ysr@777 160 void SATBMarkQueueSet::initialize(Monitor* cbl_mon, Mutex* fl_lock,
iveresov@1546 161 int process_completed_threshold,
ysr@777 162 Mutex* lock) {
iveresov@1546 163 PtrQueueSet::initialize(cbl_mon, fl_lock, process_completed_threshold, -1);
ysr@777 164 _shared_satb_queue.set_lock(lock);
ysr@777 165 if (ParallelGCThreads > 0) {
ysr@777 166 _par_closures = NEW_C_HEAP_ARRAY(ObjectClosure*, ParallelGCThreads);
ysr@777 167 }
ysr@777 168 }
ysr@777 169
ysr@777 170
ysr@777 171 void SATBMarkQueueSet::handle_zero_index_for_thread(JavaThread* t) {
ysr@1280 172 DEBUG_ONLY(t->satb_mark_queue().verify_oops_in_buffer();)
ysr@777 173 t->satb_mark_queue().handle_zero_index();
ysr@777 174 }
ysr@777 175
tonyp@1752 176 #ifdef ASSERT
tonyp@1752 177 void SATBMarkQueueSet::dump_active_values(JavaThread* first,
tonyp@1752 178 bool expected_active) {
tonyp@1752 179 gclog_or_tty->print_cr("SATB queue active values for Java Threads");
tonyp@1752 180 gclog_or_tty->print_cr(" SATB queue set: active is %s",
tonyp@1752 181 (is_active()) ? "TRUE" : "FALSE");
tonyp@1752 182 gclog_or_tty->print_cr(" expected_active is %s",
tonyp@1752 183 (expected_active) ? "TRUE" : "FALSE");
tonyp@1752 184 for (JavaThread* t = first; t; t = t->next()) {
tonyp@1752 185 bool active = t->satb_mark_queue().is_active();
tonyp@1752 186 gclog_or_tty->print_cr(" thread %s, active is %s",
tonyp@1752 187 t->name(), (active) ? "TRUE" : "FALSE");
tonyp@1752 188 }
tonyp@1752 189 }
tonyp@1752 190 #endif // ASSERT
tonyp@1752 191
tonyp@1752 192 void SATBMarkQueueSet::set_active_all_threads(bool b,
tonyp@1752 193 bool expected_active) {
tonyp@1752 194 assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
tonyp@1752 195 JavaThread* first = Threads::first();
tonyp@1752 196
tonyp@1752 197 #ifdef ASSERT
tonyp@1752 198 if (_all_active != expected_active) {
tonyp@1752 199 dump_active_values(first, expected_active);
tonyp@1752 200
tonyp@1752 201 // I leave this here as a guarantee, instead of an assert, so
tonyp@1752 202 // that it will still be compiled in if we choose to uncomment
tonyp@1752 203 // the #ifdef ASSERT in a product build. The whole block is
tonyp@1752 204 // within an #ifdef ASSERT so the guarantee will not be compiled
tonyp@1752 205 // in a product build anyway.
tonyp@1752 206 guarantee(false,
tonyp@1752 207 "SATB queue set has an unexpected active value");
tonyp@1752 208 }
tonyp@1752 209 #endif // ASSERT
ysr@777 210 _all_active = b;
tonyp@1752 211
tonyp@1752 212 for (JavaThread* t = first; t; t = t->next()) {
tonyp@1752 213 #ifdef ASSERT
tonyp@1752 214 bool active = t->satb_mark_queue().is_active();
tonyp@1752 215 if (active != expected_active) {
tonyp@1752 216 dump_active_values(first, expected_active);
tonyp@1752 217
tonyp@1752 218 // I leave this here as a guarantee, instead of an assert, so
tonyp@1752 219 // that it will still be compiled in if we choose to uncomment
tonyp@1752 220 // the #ifdef ASSERT in a product build. The whole block is
tonyp@1752 221 // within an #ifdef ASSERT so the guarantee will not be compiled
tonyp@1752 222 // in a product build anyway.
tonyp@1752 223 guarantee(false,
tonyp@1752 224 "thread has an unexpected active value in its SATB queue");
tonyp@1752 225 }
tonyp@1752 226 #endif // ASSERT
ysr@777 227 t->satb_mark_queue().set_active(b);
ysr@777 228 }
ysr@777 229 }
ysr@777 230
ysr@777 231 void SATBMarkQueueSet::set_closure(ObjectClosure* closure) {
ysr@777 232 _closure = closure;
ysr@777 233 }
ysr@777 234
ysr@777 235 void SATBMarkQueueSet::set_par_closure(int i, ObjectClosure* par_closure) {
ysr@777 236 assert(ParallelGCThreads > 0 && _par_closures != NULL, "Precondition");
ysr@777 237 _par_closures[i] = par_closure;
ysr@777 238 }
ysr@777 239
ysr@777 240 void SATBMarkQueueSet::iterate_closure_all_threads() {
ysr@777 241 for(JavaThread* t = Threads::first(); t; t = t->next()) {
ysr@777 242 t->satb_mark_queue().apply_closure(_closure);
ysr@777 243 }
ysr@777 244 shared_satb_queue()->apply_closure(_closure);
ysr@777 245 }
ysr@777 246
ysr@777 247 void SATBMarkQueueSet::par_iterate_closure_all_threads(int worker) {
ysr@777 248 SharedHeap* sh = SharedHeap::heap();
ysr@777 249 int parity = sh->strong_roots_parity();
ysr@777 250
ysr@777 251 for(JavaThread* t = Threads::first(); t; t = t->next()) {
ysr@777 252 if (t->claim_oops_do(true, parity)) {
ysr@777 253 t->satb_mark_queue().apply_closure(_par_closures[worker]);
ysr@777 254 }
ysr@777 255 }
johnc@3175 256
johnc@3175 257 // We also need to claim the VMThread so that its parity is updated
johnc@3175 258 // otherwise the next call to Thread::possibly_parallel_oops_do inside
johnc@3175 259 // a StrongRootsScope might skip the VMThread because it has a stale
johnc@3175 260 // parity that matches the parity set by the StrongRootsScope
johnc@3175 261 //
johnc@3175 262 // Whichever worker succeeds in claiming the VMThread gets to do
johnc@3175 263 // the shared queue.
johnc@3175 264
johnc@3175 265 VMThread* vmt = VMThread::vm_thread();
johnc@3175 266 if (vmt->claim_oops_do(true, parity)) {
johnc@3175 267 shared_satb_queue()->apply_closure(_par_closures[worker]);
ysr@777 268 }
ysr@777 269 }
ysr@777 270
ysr@777 271 bool SATBMarkQueueSet::apply_closure_to_completed_buffer_work(bool par,
ysr@777 272 int worker) {
iveresov@1546 273 BufferNode* nd = NULL;
ysr@777 274 {
ysr@777 275 MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
ysr@777 276 if (_completed_buffers_head != NULL) {
ysr@777 277 nd = _completed_buffers_head;
iveresov@1546 278 _completed_buffers_head = nd->next();
ysr@777 279 if (_completed_buffers_head == NULL) _completed_buffers_tail = NULL;
ysr@777 280 _n_completed_buffers--;
ysr@777 281 if (_n_completed_buffers == 0) _process_completed = false;
ysr@777 282 }
ysr@777 283 }
ysr@777 284 ObjectClosure* cl = (par ? _par_closures[worker] : _closure);
ysr@777 285 if (nd != NULL) {
iveresov@1546 286 void **buf = BufferNode::make_buffer_from_node(nd);
iveresov@1546 287 ObjPtrQueue::apply_closure_to_buffer(cl, buf, 0, _sz);
iveresov@1546 288 deallocate_buffer(buf);
ysr@777 289 return true;
ysr@777 290 } else {
ysr@777 291 return false;
ysr@777 292 }
ysr@777 293 }
ysr@777 294
ysr@777 295 void SATBMarkQueueSet::abandon_partial_marking() {
iveresov@1546 296 BufferNode* buffers_to_delete = NULL;
ysr@777 297 {
ysr@777 298 MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
ysr@777 299 while (_completed_buffers_head != NULL) {
iveresov@1546 300 BufferNode* nd = _completed_buffers_head;
iveresov@1546 301 _completed_buffers_head = nd->next();
iveresov@1546 302 nd->set_next(buffers_to_delete);
ysr@777 303 buffers_to_delete = nd;
ysr@777 304 }
ysr@777 305 _completed_buffers_tail = NULL;
ysr@777 306 _n_completed_buffers = 0;
ysr@1280 307 DEBUG_ONLY(assert_completed_buffer_list_len_correct_locked());
ysr@777 308 }
ysr@777 309 while (buffers_to_delete != NULL) {
iveresov@1546 310 BufferNode* nd = buffers_to_delete;
iveresov@1546 311 buffers_to_delete = nd->next();
iveresov@1546 312 deallocate_buffer(BufferNode::make_buffer_from_node(nd));
ysr@777 313 }
ysr@777 314 assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
ysr@777 315 // So we can safely manipulate these queues.
ysr@777 316 for (JavaThread* t = Threads::first(); t; t = t->next()) {
ysr@777 317 t->satb_mark_queue().reset();
ysr@777 318 }
ysr@777 319 shared_satb_queue()->reset();
ysr@777 320 }

mercurial