src/share/vm/gc_implementation/g1/satbQueue.cpp

Tue, 21 Aug 2012 14:10:39 -0700

author
johnc
date
Tue, 21 Aug 2012 14:10:39 -0700
changeset 3998
7383557659bd
parent 3900
d2a62e0f25eb
child 4037
da91efe96a93
permissions
-rw-r--r--

7185699: G1: Prediction model discrepancies
Summary: Correct the result value of G1CollectedHeap::pending_card_num(). Change the code that calculates the GC efficiency of a non-young heap region to use historical data from mixed GCs and the actual number of live bytes when predicting how long it would take to collect the region. Changes were also reviewed by Thomas Schatzl.
Reviewed-by: azeemj, brutisso

ysr@777 1 /*
tonyp@3416 2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
ysr@777 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
ysr@777 4 *
ysr@777 5 * This code is free software; you can redistribute it and/or modify it
ysr@777 6 * under the terms of the GNU General Public License version 2 only, as
ysr@777 7 * published by the Free Software Foundation.
ysr@777 8 *
ysr@777 9 * This code is distributed in the hope that it will be useful, but WITHOUT
ysr@777 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
ysr@777 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
ysr@777 12 * version 2 for more details (a copy is included in the LICENSE file that
ysr@777 13 * accompanied this code).
ysr@777 14 *
ysr@777 15 * You should have received a copy of the GNU General Public License version
ysr@777 16 * 2 along with this work; if not, write to the Free Software Foundation,
ysr@777 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
ysr@777 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
ysr@777 22 *
ysr@777 23 */
ysr@777 24
stefank@2314 25 #include "precompiled.hpp"
tonyp@2469 26 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
stefank@2314 27 #include "gc_implementation/g1/satbQueue.hpp"
stefank@2314 28 #include "memory/allocation.inline.hpp"
stefank@2314 29 #include "memory/sharedHeap.hpp"
stefank@2314 30 #include "runtime/mutexLocker.hpp"
stefank@2314 31 #include "runtime/thread.hpp"
johnc@3175 32 #include "runtime/vmThread.hpp"
ysr@777 33
tonyp@3416 34 void ObjPtrQueue::flush() {
tonyp@3416 35 // The buffer might contain refs into the CSet. We have to filter it
tonyp@3416 36 // first before we flush it, otherwise we might end up with an
tonyp@3416 37 // enqueued buffer with refs into the CSet which breaks our invariants.
tonyp@3416 38 filter();
tonyp@3416 39 PtrQueue::flush();
tonyp@3416 40 }
tonyp@3416 41
tonyp@2469 42 // This method removes entries from an SATB buffer that will not be
tonyp@2469 43 // useful to the concurrent marking threads. An entry is removed if it
tonyp@2469 44 // satisfies one of the following conditions:
tonyp@2469 45 //
tonyp@2469 46 // * it points to an object outside the G1 heap (G1's concurrent
tonyp@2469 47 // marking only visits objects inside the G1 heap),
tonyp@2469 48 // * it points to an object that has been allocated since marking
tonyp@2469 49 // started (according to SATB those objects do not need to be
tonyp@2469 50 // visited during marking), or
tonyp@2469 51 // * it points to an object that has already been marked (no need to
tonyp@2469 52 // process it again).
tonyp@2469 53 //
tonyp@2469 54 // The rest of the entries will be retained and are compacted towards
tonyp@3416 55 // the top of the buffer. Note that, because we do not allow old
tonyp@3416 56 // regions in the CSet during marking, all objects on the CSet regions
tonyp@3416 57 // are young (eden or survivors) and therefore implicitly live. So any
tonyp@3416 58 // references into the CSet will be removed during filtering.
tonyp@2469 59
tonyp@3416 60 void ObjPtrQueue::filter() {
tonyp@2469 61 G1CollectedHeap* g1h = G1CollectedHeap::heap();
tonyp@2469 62 void** buf = _buf;
tonyp@2469 63 size_t sz = _sz;
tonyp@2469 64
tonyp@3416 65 if (buf == NULL) {
tonyp@3416 66 // nothing to do
tonyp@3416 67 return;
tonyp@3416 68 }
tonyp@3416 69
tonyp@2469 70 // Used for sanity checking at the end of the loop.
tonyp@2469 71 debug_only(size_t entries = 0; size_t retained = 0;)
tonyp@2469 72
tonyp@2469 73 size_t i = sz;
tonyp@2469 74 size_t new_index = sz;
tonyp@2469 75
tonyp@2469 76 while (i > _index) {
tonyp@2469 77 assert(i > 0, "we should have at least one more entry to process");
tonyp@2469 78 i -= oopSize;
tonyp@2469 79 debug_only(entries += 1;)
tonyp@2469 80 oop* p = (oop*) &buf[byte_index_to_index((int) i)];
tonyp@2469 81 oop obj = *p;
tonyp@2469 82 // NULL the entry so that unused parts of the buffer contain NULLs
tonyp@2469 83 // at the end. If we are going to retain it we will copy it to its
tonyp@2469 84 // final place. If we have retained all entries we have visited so
tonyp@2469 85 // far, we'll just end up copying it to the same place.
tonyp@2469 86 *p = NULL;
tonyp@2469 87
tonyp@2469 88 bool retain = g1h->is_obj_ill(obj);
tonyp@2469 89 if (retain) {
tonyp@2469 90 assert(new_index > 0, "we should not have already filled up the buffer");
tonyp@2469 91 new_index -= oopSize;
tonyp@2469 92 assert(new_index >= i,
tonyp@2469 93 "new_index should never be below i, as we alwaysr compact 'up'");
tonyp@2469 94 oop* new_p = (oop*) &buf[byte_index_to_index((int) new_index)];
tonyp@2469 95 assert(new_p >= p, "the destination location should never be below "
tonyp@2469 96 "the source as we always compact 'up'");
tonyp@2469 97 assert(*new_p == NULL,
tonyp@2469 98 "we should have already cleared the destination location");
tonyp@2469 99 *new_p = obj;
tonyp@2469 100 debug_only(retained += 1;)
tonyp@2469 101 }
tonyp@2469 102 }
tonyp@3416 103
tonyp@3416 104 #ifdef ASSERT
tonyp@2469 105 size_t entries_calc = (sz - _index) / oopSize;
tonyp@2469 106 assert(entries == entries_calc, "the number of entries we counted "
tonyp@2469 107 "should match the number of entries we calculated");
tonyp@2469 108 size_t retained_calc = (sz - new_index) / oopSize;
tonyp@2469 109 assert(retained == retained_calc, "the number of retained entries we counted "
tonyp@2469 110 "should match the number of retained entries we calculated");
tonyp@3416 111 #endif // ASSERT
tonyp@3416 112
tonyp@3416 113 _index = new_index;
tonyp@3416 114 }
tonyp@3416 115
tonyp@3416 116 // This method will first apply the above filtering to the buffer. If
tonyp@3416 117 // post-filtering a large enough chunk of the buffer has been cleared
tonyp@3416 118 // we can re-use the buffer (instead of enqueueing it) and we can just
tonyp@3416 119 // allow the mutator to carry on executing using the same buffer
tonyp@3416 120 // instead of replacing it.
tonyp@3416 121
tonyp@3416 122 bool ObjPtrQueue::should_enqueue_buffer() {
tonyp@3416 123 assert(_lock == NULL || _lock->owned_by_self(),
tonyp@3416 124 "we should have taken the lock before calling this");
tonyp@3416 125
tonyp@3416 126 // Even if G1SATBBufferEnqueueingThresholdPercent == 0 we have to
tonyp@3416 127 // filter the buffer given that this will remove any references into
tonyp@3416 128 // the CSet as we currently assume that no such refs will appear in
tonyp@3416 129 // enqueued buffers.
tonyp@3416 130
tonyp@3416 131 // This method should only be called if there is a non-NULL buffer
tonyp@3416 132 // that is full.
tonyp@3416 133 assert(_index == 0, "pre-condition");
tonyp@3416 134 assert(_buf != NULL, "pre-condition");
tonyp@3416 135
tonyp@3416 136 filter();
tonyp@3416 137
tonyp@3416 138 size_t sz = _sz;
tonyp@3416 139 size_t all_entries = sz / oopSize;
tonyp@3416 140 size_t retained_entries = (sz - _index) / oopSize;
tonyp@3416 141 size_t perc = retained_entries * 100 / all_entries;
tonyp@2469 142 bool should_enqueue = perc > (size_t) G1SATBBufferEnqueueingThresholdPercent;
tonyp@2469 143 return should_enqueue;
tonyp@2469 144 }
tonyp@2469 145
ysr@777 146 void ObjPtrQueue::apply_closure(ObjectClosure* cl) {
ysr@777 147 if (_buf != NULL) {
ysr@777 148 apply_closure_to_buffer(cl, _buf, _index, _sz);
tonyp@3416 149 }
tonyp@3416 150 }
tonyp@3416 151
tonyp@3416 152 void ObjPtrQueue::apply_closure_and_empty(ObjectClosure* cl) {
tonyp@3416 153 if (_buf != NULL) {
tonyp@3416 154 apply_closure_to_buffer(cl, _buf, _index, _sz);
ysr@777 155 _index = _sz;
ysr@777 156 }
ysr@777 157 }
ysr@777 158
ysr@777 159 void ObjPtrQueue::apply_closure_to_buffer(ObjectClosure* cl,
ysr@777 160 void** buf, size_t index, size_t sz) {
ysr@777 161 if (cl == NULL) return;
ysr@777 162 for (size_t i = index; i < sz; i += oopSize) {
ysr@777 163 oop obj = (oop)buf[byte_index_to_index((int)i)];
ysr@777 164 // There can be NULL entries because of destructors.
ysr@777 165 if (obj != NULL) {
ysr@777 166 cl->do_object(obj);
ysr@777 167 }
ysr@777 168 }
ysr@777 169 }
ysr@1280 170
tonyp@3416 171 #ifndef PRODUCT
tonyp@3416 172 // Helpful for debugging
tonyp@3416 173
tonyp@3416 174 void ObjPtrQueue::print(const char* name) {
tonyp@3416 175 print(name, _buf, _index, _sz);
tonyp@3416 176 }
tonyp@3416 177
tonyp@3416 178 void ObjPtrQueue::print(const char* name,
tonyp@3416 179 void** buf, size_t index, size_t sz) {
tonyp@3416 180 gclog_or_tty->print_cr(" SATB BUFFER [%s] buf: "PTR_FORMAT" "
tonyp@3416 181 "index: "SIZE_FORMAT" sz: "SIZE_FORMAT,
tonyp@3416 182 name, buf, index, sz);
tonyp@3416 183 }
tonyp@3416 184 #endif // PRODUCT
tonyp@3416 185
ysr@1280 186 #ifdef ASSERT
ysr@1280 187 void ObjPtrQueue::verify_oops_in_buffer() {
ysr@1280 188 if (_buf == NULL) return;
ysr@1280 189 for (size_t i = _index; i < _sz; i += oopSize) {
ysr@1280 190 oop obj = (oop)_buf[byte_index_to_index((int)i)];
ysr@1280 191 assert(obj != NULL && obj->is_oop(true /* ignore mark word */),
ysr@1280 192 "Not an oop");
ysr@1280 193 }
ysr@1280 194 }
ysr@1280 195 #endif
ysr@1280 196
ysr@777 197 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
ysr@777 198 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
ysr@777 199 #endif // _MSC_VER
ysr@777 200
ysr@777 201 SATBMarkQueueSet::SATBMarkQueueSet() :
tonyp@3416 202 PtrQueueSet(), _closure(NULL), _par_closures(NULL),
tonyp@3416 203 _shared_satb_queue(this, true /*perm*/) { }
ysr@777 204
ysr@777 205 void SATBMarkQueueSet::initialize(Monitor* cbl_mon, Mutex* fl_lock,
iveresov@1546 206 int process_completed_threshold,
ysr@777 207 Mutex* lock) {
iveresov@1546 208 PtrQueueSet::initialize(cbl_mon, fl_lock, process_completed_threshold, -1);
ysr@777 209 _shared_satb_queue.set_lock(lock);
ysr@777 210 if (ParallelGCThreads > 0) {
zgu@3900 211 _par_closures = NEW_C_HEAP_ARRAY(ObjectClosure*, ParallelGCThreads, mtGC);
ysr@777 212 }
ysr@777 213 }
ysr@777 214
ysr@777 215 void SATBMarkQueueSet::handle_zero_index_for_thread(JavaThread* t) {
ysr@1280 216 DEBUG_ONLY(t->satb_mark_queue().verify_oops_in_buffer();)
ysr@777 217 t->satb_mark_queue().handle_zero_index();
ysr@777 218 }
ysr@777 219
tonyp@1752 220 #ifdef ASSERT
tonyp@1752 221 void SATBMarkQueueSet::dump_active_values(JavaThread* first,
tonyp@1752 222 bool expected_active) {
tonyp@1752 223 gclog_or_tty->print_cr("SATB queue active values for Java Threads");
tonyp@1752 224 gclog_or_tty->print_cr(" SATB queue set: active is %s",
tonyp@1752 225 (is_active()) ? "TRUE" : "FALSE");
tonyp@1752 226 gclog_or_tty->print_cr(" expected_active is %s",
tonyp@1752 227 (expected_active) ? "TRUE" : "FALSE");
tonyp@1752 228 for (JavaThread* t = first; t; t = t->next()) {
tonyp@1752 229 bool active = t->satb_mark_queue().is_active();
tonyp@1752 230 gclog_or_tty->print_cr(" thread %s, active is %s",
tonyp@1752 231 t->name(), (active) ? "TRUE" : "FALSE");
tonyp@1752 232 }
tonyp@1752 233 }
tonyp@1752 234 #endif // ASSERT
tonyp@1752 235
tonyp@1752 236 void SATBMarkQueueSet::set_active_all_threads(bool b,
tonyp@1752 237 bool expected_active) {
tonyp@1752 238 assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
tonyp@1752 239 JavaThread* first = Threads::first();
tonyp@1752 240
tonyp@1752 241 #ifdef ASSERT
tonyp@1752 242 if (_all_active != expected_active) {
tonyp@1752 243 dump_active_values(first, expected_active);
tonyp@1752 244
tonyp@1752 245 // I leave this here as a guarantee, instead of an assert, so
tonyp@1752 246 // that it will still be compiled in if we choose to uncomment
tonyp@1752 247 // the #ifdef ASSERT in a product build. The whole block is
tonyp@1752 248 // within an #ifdef ASSERT so the guarantee will not be compiled
tonyp@1752 249 // in a product build anyway.
tonyp@1752 250 guarantee(false,
tonyp@1752 251 "SATB queue set has an unexpected active value");
tonyp@1752 252 }
tonyp@1752 253 #endif // ASSERT
ysr@777 254 _all_active = b;
tonyp@1752 255
tonyp@1752 256 for (JavaThread* t = first; t; t = t->next()) {
tonyp@1752 257 #ifdef ASSERT
tonyp@1752 258 bool active = t->satb_mark_queue().is_active();
tonyp@1752 259 if (active != expected_active) {
tonyp@1752 260 dump_active_values(first, expected_active);
tonyp@1752 261
tonyp@1752 262 // I leave this here as a guarantee, instead of an assert, so
tonyp@1752 263 // that it will still be compiled in if we choose to uncomment
tonyp@1752 264 // the #ifdef ASSERT in a product build. The whole block is
tonyp@1752 265 // within an #ifdef ASSERT so the guarantee will not be compiled
tonyp@1752 266 // in a product build anyway.
tonyp@1752 267 guarantee(false,
tonyp@1752 268 "thread has an unexpected active value in its SATB queue");
tonyp@1752 269 }
tonyp@1752 270 #endif // ASSERT
ysr@777 271 t->satb_mark_queue().set_active(b);
ysr@777 272 }
ysr@777 273 }
ysr@777 274
tonyp@3416 275 void SATBMarkQueueSet::filter_thread_buffers() {
tonyp@3416 276 for(JavaThread* t = Threads::first(); t; t = t->next()) {
tonyp@3416 277 t->satb_mark_queue().filter();
tonyp@3416 278 }
tonyp@3416 279 shared_satb_queue()->filter();
tonyp@3416 280 }
tonyp@3416 281
ysr@777 282 void SATBMarkQueueSet::set_closure(ObjectClosure* closure) {
ysr@777 283 _closure = closure;
ysr@777 284 }
ysr@777 285
ysr@777 286 void SATBMarkQueueSet::set_par_closure(int i, ObjectClosure* par_closure) {
ysr@777 287 assert(ParallelGCThreads > 0 && _par_closures != NULL, "Precondition");
ysr@777 288 _par_closures[i] = par_closure;
ysr@777 289 }
ysr@777 290
ysr@777 291 void SATBMarkQueueSet::iterate_closure_all_threads() {
ysr@777 292 for(JavaThread* t = Threads::first(); t; t = t->next()) {
tonyp@3416 293 t->satb_mark_queue().apply_closure_and_empty(_closure);
ysr@777 294 }
tonyp@3416 295 shared_satb_queue()->apply_closure_and_empty(_closure);
ysr@777 296 }
ysr@777 297
ysr@777 298 void SATBMarkQueueSet::par_iterate_closure_all_threads(int worker) {
ysr@777 299 SharedHeap* sh = SharedHeap::heap();
ysr@777 300 int parity = sh->strong_roots_parity();
ysr@777 301
ysr@777 302 for(JavaThread* t = Threads::first(); t; t = t->next()) {
ysr@777 303 if (t->claim_oops_do(true, parity)) {
tonyp@3416 304 t->satb_mark_queue().apply_closure_and_empty(_par_closures[worker]);
ysr@777 305 }
ysr@777 306 }
johnc@3175 307
johnc@3175 308 // We also need to claim the VMThread so that its parity is updated
johnc@3175 309 // otherwise the next call to Thread::possibly_parallel_oops_do inside
johnc@3175 310 // a StrongRootsScope might skip the VMThread because it has a stale
johnc@3175 311 // parity that matches the parity set by the StrongRootsScope
johnc@3175 312 //
johnc@3175 313 // Whichever worker succeeds in claiming the VMThread gets to do
johnc@3175 314 // the shared queue.
johnc@3175 315
johnc@3175 316 VMThread* vmt = VMThread::vm_thread();
johnc@3175 317 if (vmt->claim_oops_do(true, parity)) {
tonyp@3416 318 shared_satb_queue()->apply_closure_and_empty(_par_closures[worker]);
ysr@777 319 }
ysr@777 320 }
ysr@777 321
ysr@777 322 bool SATBMarkQueueSet::apply_closure_to_completed_buffer_work(bool par,
ysr@777 323 int worker) {
iveresov@1546 324 BufferNode* nd = NULL;
ysr@777 325 {
ysr@777 326 MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
ysr@777 327 if (_completed_buffers_head != NULL) {
ysr@777 328 nd = _completed_buffers_head;
iveresov@1546 329 _completed_buffers_head = nd->next();
ysr@777 330 if (_completed_buffers_head == NULL) _completed_buffers_tail = NULL;
ysr@777 331 _n_completed_buffers--;
ysr@777 332 if (_n_completed_buffers == 0) _process_completed = false;
ysr@777 333 }
ysr@777 334 }
ysr@777 335 ObjectClosure* cl = (par ? _par_closures[worker] : _closure);
ysr@777 336 if (nd != NULL) {
iveresov@1546 337 void **buf = BufferNode::make_buffer_from_node(nd);
iveresov@1546 338 ObjPtrQueue::apply_closure_to_buffer(cl, buf, 0, _sz);
iveresov@1546 339 deallocate_buffer(buf);
ysr@777 340 return true;
ysr@777 341 } else {
ysr@777 342 return false;
ysr@777 343 }
ysr@777 344 }
ysr@777 345
tonyp@3416 346 void SATBMarkQueueSet::iterate_completed_buffers_read_only(ObjectClosure* cl) {
tonyp@3416 347 assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
tonyp@3416 348 assert(cl != NULL, "pre-condition");
tonyp@3416 349
tonyp@3416 350 BufferNode* nd = _completed_buffers_head;
tonyp@3416 351 while (nd != NULL) {
tonyp@3416 352 void** buf = BufferNode::make_buffer_from_node(nd);
tonyp@3416 353 ObjPtrQueue::apply_closure_to_buffer(cl, buf, 0, _sz);
tonyp@3416 354 nd = nd->next();
tonyp@3416 355 }
tonyp@3416 356 }
tonyp@3416 357
tonyp@3416 358 void SATBMarkQueueSet::iterate_thread_buffers_read_only(ObjectClosure* cl) {
tonyp@3416 359 assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
tonyp@3416 360 assert(cl != NULL, "pre-condition");
tonyp@3416 361
tonyp@3416 362 for (JavaThread* t = Threads::first(); t; t = t->next()) {
tonyp@3416 363 t->satb_mark_queue().apply_closure(cl);
tonyp@3416 364 }
tonyp@3416 365 shared_satb_queue()->apply_closure(cl);
tonyp@3416 366 }
tonyp@3416 367
tonyp@3416 368 #ifndef PRODUCT
tonyp@3416 369 // Helpful for debugging
tonyp@3416 370
tonyp@3416 371 #define SATB_PRINTER_BUFFER_SIZE 256
tonyp@3416 372
tonyp@3416 373 void SATBMarkQueueSet::print_all(const char* msg) {
tonyp@3416 374 char buffer[SATB_PRINTER_BUFFER_SIZE];
tonyp@3416 375 assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
tonyp@3416 376
tonyp@3416 377 gclog_or_tty->cr();
tonyp@3416 378 gclog_or_tty->print_cr("SATB BUFFERS [%s]", msg);
tonyp@3416 379
tonyp@3416 380 BufferNode* nd = _completed_buffers_head;
tonyp@3416 381 int i = 0;
tonyp@3416 382 while (nd != NULL) {
tonyp@3416 383 void** buf = BufferNode::make_buffer_from_node(nd);
tonyp@3416 384 jio_snprintf(buffer, SATB_PRINTER_BUFFER_SIZE, "Enqueued: %d", i);
tonyp@3416 385 ObjPtrQueue::print(buffer, buf, 0, _sz);
tonyp@3416 386 nd = nd->next();
tonyp@3416 387 i += 1;
tonyp@3416 388 }
tonyp@3416 389
tonyp@3416 390 for (JavaThread* t = Threads::first(); t; t = t->next()) {
tonyp@3416 391 jio_snprintf(buffer, SATB_PRINTER_BUFFER_SIZE, "Thread: %s", t->name());
tonyp@3416 392 t->satb_mark_queue().print(buffer);
tonyp@3416 393 }
tonyp@3416 394
tonyp@3416 395 shared_satb_queue()->print("Shared");
tonyp@3416 396
tonyp@3416 397 gclog_or_tty->cr();
tonyp@3416 398 }
tonyp@3416 399 #endif // PRODUCT
tonyp@3416 400
ysr@777 401 void SATBMarkQueueSet::abandon_partial_marking() {
iveresov@1546 402 BufferNode* buffers_to_delete = NULL;
ysr@777 403 {
ysr@777 404 MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
ysr@777 405 while (_completed_buffers_head != NULL) {
iveresov@1546 406 BufferNode* nd = _completed_buffers_head;
iveresov@1546 407 _completed_buffers_head = nd->next();
iveresov@1546 408 nd->set_next(buffers_to_delete);
ysr@777 409 buffers_to_delete = nd;
ysr@777 410 }
ysr@777 411 _completed_buffers_tail = NULL;
ysr@777 412 _n_completed_buffers = 0;
ysr@1280 413 DEBUG_ONLY(assert_completed_buffer_list_len_correct_locked());
ysr@777 414 }
ysr@777 415 while (buffers_to_delete != NULL) {
iveresov@1546 416 BufferNode* nd = buffers_to_delete;
iveresov@1546 417 buffers_to_delete = nd->next();
iveresov@1546 418 deallocate_buffer(BufferNode::make_buffer_from_node(nd));
ysr@777 419 }
ysr@777 420 assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
ysr@777 421 // So we can safely manipulate these queues.
ysr@777 422 for (JavaThread* t = Threads::first(); t; t = t->next()) {
ysr@777 423 t->satb_mark_queue().reset();
ysr@777 424 }
tonyp@3416 425 shared_satb_queue()->reset();
ysr@777 426 }

mercurial