Fri, 10 Oct 2014 15:51:58 +0200
8059758: Footprint regressions with JDK-8038423
Summary: Changes in JDK-8038423 always initialize (zero out) virtual memory used for auxiliary data structures. This causes a footprint regression for G1 in startup benchmarks. This is because they do not touch that memory at all, so the operating system does not actually commit these pages. The fix is to, if the initialization value of the data structures matches the default value of just committed memory (=0), do not do anything.
Reviewed-by: jwilhelm, brutisso
ysr@777 | 1 | /* |
drchase@6680 | 2 | * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved. |
ysr@777 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
ysr@777 | 4 | * |
ysr@777 | 5 | * This code is free software; you can redistribute it and/or modify it |
ysr@777 | 6 | * under the terms of the GNU General Public License version 2 only, as |
ysr@777 | 7 | * published by the Free Software Foundation. |
ysr@777 | 8 | * |
ysr@777 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
ysr@777 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
ysr@777 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
ysr@777 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
ysr@777 | 13 | * accompanied this code). |
ysr@777 | 14 | * |
ysr@777 | 15 | * You should have received a copy of the GNU General Public License version |
ysr@777 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
ysr@777 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
ysr@777 | 18 | * |
trims@1907 | 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
trims@1907 | 20 | * or visit www.oracle.com if you need additional information or have any |
trims@1907 | 21 | * questions. |
ysr@777 | 22 | * |
ysr@777 | 23 | */ |
ysr@777 | 24 | |
stefank@2314 | 25 | #include "precompiled.hpp" |
tonyp@2469 | 26 | #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" |
stefank@2314 | 27 | #include "gc_implementation/g1/satbQueue.hpp" |
stefank@2314 | 28 | #include "memory/allocation.inline.hpp" |
stefank@2314 | 29 | #include "memory/sharedHeap.hpp" |
coleenp@4037 | 30 | #include "oops/oop.inline.hpp" |
stefank@2314 | 31 | #include "runtime/mutexLocker.hpp" |
stefank@2314 | 32 | #include "runtime/thread.hpp" |
johnc@3175 | 33 | #include "runtime/vmThread.hpp" |
ysr@777 | 34 | |
drchase@6680 | 35 | PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC |
drchase@6680 | 36 | |
tonyp@3416 | 37 | void ObjPtrQueue::flush() { |
tonyp@3416 | 38 | // The buffer might contain refs into the CSet. We have to filter it |
tonyp@3416 | 39 | // first before we flush it, otherwise we might end up with an |
tonyp@3416 | 40 | // enqueued buffer with refs into the CSet which breaks our invariants. |
tonyp@3416 | 41 | filter(); |
tonyp@3416 | 42 | PtrQueue::flush(); |
tonyp@3416 | 43 | } |
tonyp@3416 | 44 | |
tonyp@2469 | 45 | // This method removes entries from an SATB buffer that will not be |
tonyp@2469 | 46 | // useful to the concurrent marking threads. An entry is removed if it |
tonyp@2469 | 47 | // satisfies one of the following conditions: |
tonyp@2469 | 48 | // |
tonyp@2469 | 49 | // * it points to an object outside the G1 heap (G1's concurrent |
tonyp@2469 | 50 | // marking only visits objects inside the G1 heap), |
tonyp@2469 | 51 | // * it points to an object that has been allocated since marking |
tonyp@2469 | 52 | // started (according to SATB those objects do not need to be |
tonyp@2469 | 53 | // visited during marking), or |
tonyp@2469 | 54 | // * it points to an object that has already been marked (no need to |
tonyp@2469 | 55 | // process it again). |
tonyp@2469 | 56 | // |
tonyp@2469 | 57 | // The rest of the entries will be retained and are compacted towards |
tonyp@3416 | 58 | // the top of the buffer. Note that, because we do not allow old |
tonyp@3416 | 59 | // regions in the CSet during marking, all objects on the CSet regions |
tonyp@3416 | 60 | // are young (eden or survivors) and therefore implicitly live. So any |
tonyp@3416 | 61 | // references into the CSet will be removed during filtering. |
tonyp@2469 | 62 | |
tonyp@3416 | 63 | void ObjPtrQueue::filter() { |
tonyp@2469 | 64 | G1CollectedHeap* g1h = G1CollectedHeap::heap(); |
tonyp@2469 | 65 | void** buf = _buf; |
tonyp@2469 | 66 | size_t sz = _sz; |
tonyp@2469 | 67 | |
tonyp@3416 | 68 | if (buf == NULL) { |
tonyp@3416 | 69 | // nothing to do |
tonyp@3416 | 70 | return; |
tonyp@3416 | 71 | } |
tonyp@3416 | 72 | |
tonyp@2469 | 73 | // Used for sanity checking at the end of the loop. |
tonyp@2469 | 74 | debug_only(size_t entries = 0; size_t retained = 0;) |
tonyp@2469 | 75 | |
tonyp@2469 | 76 | size_t i = sz; |
tonyp@2469 | 77 | size_t new_index = sz; |
tonyp@2469 | 78 | |
tonyp@2469 | 79 | while (i > _index) { |
tonyp@2469 | 80 | assert(i > 0, "we should have at least one more entry to process"); |
tonyp@2469 | 81 | i -= oopSize; |
tonyp@2469 | 82 | debug_only(entries += 1;) |
tonyp@2469 | 83 | oop* p = (oop*) &buf[byte_index_to_index((int) i)]; |
tonyp@2469 | 84 | oop obj = *p; |
tonyp@2469 | 85 | // NULL the entry so that unused parts of the buffer contain NULLs |
tonyp@2469 | 86 | // at the end. If we are going to retain it we will copy it to its |
tonyp@2469 | 87 | // final place. If we have retained all entries we have visited so |
tonyp@2469 | 88 | // far, we'll just end up copying it to the same place. |
tonyp@2469 | 89 | *p = NULL; |
tonyp@2469 | 90 | |
tonyp@2469 | 91 | bool retain = g1h->is_obj_ill(obj); |
tonyp@2469 | 92 | if (retain) { |
tonyp@2469 | 93 | assert(new_index > 0, "we should not have already filled up the buffer"); |
tonyp@2469 | 94 | new_index -= oopSize; |
tonyp@2469 | 95 | assert(new_index >= i, |
tonyp@2469 | 96 | "new_index should never be below i, as we alwaysr compact 'up'"); |
tonyp@2469 | 97 | oop* new_p = (oop*) &buf[byte_index_to_index((int) new_index)]; |
tonyp@2469 | 98 | assert(new_p >= p, "the destination location should never be below " |
tonyp@2469 | 99 | "the source as we always compact 'up'"); |
tonyp@2469 | 100 | assert(*new_p == NULL, |
tonyp@2469 | 101 | "we should have already cleared the destination location"); |
tonyp@2469 | 102 | *new_p = obj; |
tonyp@2469 | 103 | debug_only(retained += 1;) |
tonyp@2469 | 104 | } |
tonyp@2469 | 105 | } |
tonyp@3416 | 106 | |
tonyp@3416 | 107 | #ifdef ASSERT |
tonyp@2469 | 108 | size_t entries_calc = (sz - _index) / oopSize; |
tonyp@2469 | 109 | assert(entries == entries_calc, "the number of entries we counted " |
tonyp@2469 | 110 | "should match the number of entries we calculated"); |
tonyp@2469 | 111 | size_t retained_calc = (sz - new_index) / oopSize; |
tonyp@2469 | 112 | assert(retained == retained_calc, "the number of retained entries we counted " |
tonyp@2469 | 113 | "should match the number of retained entries we calculated"); |
tonyp@3416 | 114 | #endif // ASSERT |
tonyp@3416 | 115 | |
tonyp@3416 | 116 | _index = new_index; |
tonyp@3416 | 117 | } |
tonyp@3416 | 118 | |
tonyp@3416 | 119 | // This method will first apply the above filtering to the buffer. If |
tonyp@3416 | 120 | // post-filtering a large enough chunk of the buffer has been cleared |
tonyp@3416 | 121 | // we can re-use the buffer (instead of enqueueing it) and we can just |
tonyp@3416 | 122 | // allow the mutator to carry on executing using the same buffer |
tonyp@3416 | 123 | // instead of replacing it. |
tonyp@3416 | 124 | |
tonyp@3416 | 125 | bool ObjPtrQueue::should_enqueue_buffer() { |
tonyp@3416 | 126 | assert(_lock == NULL || _lock->owned_by_self(), |
tonyp@3416 | 127 | "we should have taken the lock before calling this"); |
tonyp@3416 | 128 | |
tonyp@3416 | 129 | // Even if G1SATBBufferEnqueueingThresholdPercent == 0 we have to |
tonyp@3416 | 130 | // filter the buffer given that this will remove any references into |
tonyp@3416 | 131 | // the CSet as we currently assume that no such refs will appear in |
tonyp@3416 | 132 | // enqueued buffers. |
tonyp@3416 | 133 | |
tonyp@3416 | 134 | // This method should only be called if there is a non-NULL buffer |
tonyp@3416 | 135 | // that is full. |
tonyp@3416 | 136 | assert(_index == 0, "pre-condition"); |
tonyp@3416 | 137 | assert(_buf != NULL, "pre-condition"); |
tonyp@3416 | 138 | |
tonyp@3416 | 139 | filter(); |
tonyp@3416 | 140 | |
tonyp@3416 | 141 | size_t sz = _sz; |
tonyp@3416 | 142 | size_t all_entries = sz / oopSize; |
tonyp@3416 | 143 | size_t retained_entries = (sz - _index) / oopSize; |
tonyp@3416 | 144 | size_t perc = retained_entries * 100 / all_entries; |
tonyp@2469 | 145 | bool should_enqueue = perc > (size_t) G1SATBBufferEnqueueingThresholdPercent; |
tonyp@2469 | 146 | return should_enqueue; |
tonyp@2469 | 147 | } |
tonyp@2469 | 148 | |
ysr@777 | 149 | void ObjPtrQueue::apply_closure(ObjectClosure* cl) { |
ysr@777 | 150 | if (_buf != NULL) { |
ysr@777 | 151 | apply_closure_to_buffer(cl, _buf, _index, _sz); |
tonyp@3416 | 152 | } |
tonyp@3416 | 153 | } |
tonyp@3416 | 154 | |
tonyp@3416 | 155 | void ObjPtrQueue::apply_closure_and_empty(ObjectClosure* cl) { |
tonyp@3416 | 156 | if (_buf != NULL) { |
tonyp@3416 | 157 | apply_closure_to_buffer(cl, _buf, _index, _sz); |
ysr@777 | 158 | _index = _sz; |
ysr@777 | 159 | } |
ysr@777 | 160 | } |
ysr@777 | 161 | |
ysr@777 | 162 | void ObjPtrQueue::apply_closure_to_buffer(ObjectClosure* cl, |
ysr@777 | 163 | void** buf, size_t index, size_t sz) { |
ysr@777 | 164 | if (cl == NULL) return; |
ysr@777 | 165 | for (size_t i = index; i < sz; i += oopSize) { |
ysr@777 | 166 | oop obj = (oop)buf[byte_index_to_index((int)i)]; |
ysr@777 | 167 | // There can be NULL entries because of destructors. |
ysr@777 | 168 | if (obj != NULL) { |
ysr@777 | 169 | cl->do_object(obj); |
ysr@777 | 170 | } |
ysr@777 | 171 | } |
ysr@777 | 172 | } |
ysr@1280 | 173 | |
tonyp@3416 | 174 | #ifndef PRODUCT |
tonyp@3416 | 175 | // Helpful for debugging |
tonyp@3416 | 176 | |
tonyp@3416 | 177 | void ObjPtrQueue::print(const char* name) { |
tonyp@3416 | 178 | print(name, _buf, _index, _sz); |
tonyp@3416 | 179 | } |
tonyp@3416 | 180 | |
tonyp@3416 | 181 | void ObjPtrQueue::print(const char* name, |
tonyp@3416 | 182 | void** buf, size_t index, size_t sz) { |
tonyp@3416 | 183 | gclog_or_tty->print_cr(" SATB BUFFER [%s] buf: "PTR_FORMAT" " |
tonyp@3416 | 184 | "index: "SIZE_FORMAT" sz: "SIZE_FORMAT, |
tonyp@3416 | 185 | name, buf, index, sz); |
tonyp@3416 | 186 | } |
tonyp@3416 | 187 | #endif // PRODUCT |
tonyp@3416 | 188 | |
ysr@1280 | 189 | #ifdef ASSERT |
ysr@1280 | 190 | void ObjPtrQueue::verify_oops_in_buffer() { |
ysr@1280 | 191 | if (_buf == NULL) return; |
ysr@1280 | 192 | for (size_t i = _index; i < _sz; i += oopSize) { |
ysr@1280 | 193 | oop obj = (oop)_buf[byte_index_to_index((int)i)]; |
ysr@1280 | 194 | assert(obj != NULL && obj->is_oop(true /* ignore mark word */), |
ysr@1280 | 195 | "Not an oop"); |
ysr@1280 | 196 | } |
ysr@1280 | 197 | } |
ysr@1280 | 198 | #endif |
ysr@1280 | 199 | |
ysr@777 | 200 | #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away |
ysr@777 | 201 | #pragma warning( disable:4355 ) // 'this' : used in base member initializer list |
ysr@777 | 202 | #endif // _MSC_VER |
ysr@777 | 203 | |
ysr@777 | 204 | SATBMarkQueueSet::SATBMarkQueueSet() : |
tonyp@3416 | 205 | PtrQueueSet(), _closure(NULL), _par_closures(NULL), |
tonyp@3416 | 206 | _shared_satb_queue(this, true /*perm*/) { } |
ysr@777 | 207 | |
ysr@777 | 208 | void SATBMarkQueueSet::initialize(Monitor* cbl_mon, Mutex* fl_lock, |
iveresov@1546 | 209 | int process_completed_threshold, |
ysr@777 | 210 | Mutex* lock) { |
iveresov@1546 | 211 | PtrQueueSet::initialize(cbl_mon, fl_lock, process_completed_threshold, -1); |
ysr@777 | 212 | _shared_satb_queue.set_lock(lock); |
ysr@777 | 213 | if (ParallelGCThreads > 0) { |
zgu@3900 | 214 | _par_closures = NEW_C_HEAP_ARRAY(ObjectClosure*, ParallelGCThreads, mtGC); |
ysr@777 | 215 | } |
ysr@777 | 216 | } |
ysr@777 | 217 | |
ysr@777 | 218 | void SATBMarkQueueSet::handle_zero_index_for_thread(JavaThread* t) { |
ysr@1280 | 219 | DEBUG_ONLY(t->satb_mark_queue().verify_oops_in_buffer();) |
ysr@777 | 220 | t->satb_mark_queue().handle_zero_index(); |
ysr@777 | 221 | } |
ysr@777 | 222 | |
tonyp@1752 | 223 | #ifdef ASSERT |
pliden@6396 | 224 | void SATBMarkQueueSet::dump_active_states(bool expected_active) { |
pliden@6396 | 225 | gclog_or_tty->print_cr("Expected SATB active state: %s", |
pliden@6396 | 226 | expected_active ? "ACTIVE" : "INACTIVE"); |
pliden@6396 | 227 | gclog_or_tty->print_cr("Actual SATB active states:"); |
pliden@6396 | 228 | gclog_or_tty->print_cr(" Queue set: %s", is_active() ? "ACTIVE" : "INACTIVE"); |
pliden@6396 | 229 | for (JavaThread* t = Threads::first(); t; t = t->next()) { |
pliden@6396 | 230 | gclog_or_tty->print_cr(" Thread \"%s\" queue: %s", t->name(), |
pliden@6396 | 231 | t->satb_mark_queue().is_active() ? "ACTIVE" : "INACTIVE"); |
pliden@6396 | 232 | } |
pliden@6396 | 233 | gclog_or_tty->print_cr(" Shared queue: %s", |
pliden@6396 | 234 | shared_satb_queue()->is_active() ? "ACTIVE" : "INACTIVE"); |
pliden@6396 | 235 | } |
pliden@6396 | 236 | |
pliden@6396 | 237 | void SATBMarkQueueSet::verify_active_states(bool expected_active) { |
pliden@6396 | 238 | // Verify queue set state |
pliden@6396 | 239 | if (is_active() != expected_active) { |
pliden@6396 | 240 | dump_active_states(expected_active); |
pliden@6396 | 241 | guarantee(false, "SATB queue set has an unexpected active state"); |
pliden@6396 | 242 | } |
pliden@6396 | 243 | |
pliden@6396 | 244 | // Verify thread queue states |
pliden@6396 | 245 | for (JavaThread* t = Threads::first(); t; t = t->next()) { |
pliden@6396 | 246 | if (t->satb_mark_queue().is_active() != expected_active) { |
pliden@6396 | 247 | dump_active_states(expected_active); |
pliden@6396 | 248 | guarantee(false, "Thread SATB queue has an unexpected active state"); |
pliden@6396 | 249 | } |
pliden@6396 | 250 | } |
pliden@6396 | 251 | |
pliden@6396 | 252 | // Verify shared queue state |
pliden@6396 | 253 | if (shared_satb_queue()->is_active() != expected_active) { |
pliden@6396 | 254 | dump_active_states(expected_active); |
pliden@6396 | 255 | guarantee(false, "Shared SATB queue has an unexpected active state"); |
tonyp@1752 | 256 | } |
tonyp@1752 | 257 | } |
tonyp@1752 | 258 | #endif // ASSERT |
tonyp@1752 | 259 | |
pliden@6396 | 260 | void SATBMarkQueueSet::set_active_all_threads(bool active, bool expected_active) { |
tonyp@1752 | 261 | assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint."); |
tonyp@1752 | 262 | #ifdef ASSERT |
pliden@6396 | 263 | verify_active_states(expected_active); |
pliden@6396 | 264 | #endif // ASSERT |
pliden@6396 | 265 | _all_active = active; |
pliden@6396 | 266 | for (JavaThread* t = Threads::first(); t; t = t->next()) { |
pliden@6396 | 267 | t->satb_mark_queue().set_active(active); |
tonyp@1752 | 268 | } |
pliden@6396 | 269 | shared_satb_queue()->set_active(active); |
ysr@777 | 270 | } |
ysr@777 | 271 | |
tonyp@3416 | 272 | void SATBMarkQueueSet::filter_thread_buffers() { |
tonyp@3416 | 273 | for(JavaThread* t = Threads::first(); t; t = t->next()) { |
tonyp@3416 | 274 | t->satb_mark_queue().filter(); |
tonyp@3416 | 275 | } |
tonyp@3416 | 276 | shared_satb_queue()->filter(); |
tonyp@3416 | 277 | } |
tonyp@3416 | 278 | |
ysr@777 | 279 | void SATBMarkQueueSet::set_closure(ObjectClosure* closure) { |
ysr@777 | 280 | _closure = closure; |
ysr@777 | 281 | } |
ysr@777 | 282 | |
ysr@777 | 283 | void SATBMarkQueueSet::set_par_closure(int i, ObjectClosure* par_closure) { |
ysr@777 | 284 | assert(ParallelGCThreads > 0 && _par_closures != NULL, "Precondition"); |
ysr@777 | 285 | _par_closures[i] = par_closure; |
ysr@777 | 286 | } |
ysr@777 | 287 | |
ysr@777 | 288 | bool SATBMarkQueueSet::apply_closure_to_completed_buffer_work(bool par, |
vkempik@6552 | 289 | uint worker) { |
iveresov@1546 | 290 | BufferNode* nd = NULL; |
ysr@777 | 291 | { |
ysr@777 | 292 | MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag); |
ysr@777 | 293 | if (_completed_buffers_head != NULL) { |
ysr@777 | 294 | nd = _completed_buffers_head; |
iveresov@1546 | 295 | _completed_buffers_head = nd->next(); |
ysr@777 | 296 | if (_completed_buffers_head == NULL) _completed_buffers_tail = NULL; |
ysr@777 | 297 | _n_completed_buffers--; |
ysr@777 | 298 | if (_n_completed_buffers == 0) _process_completed = false; |
ysr@777 | 299 | } |
ysr@777 | 300 | } |
ysr@777 | 301 | ObjectClosure* cl = (par ? _par_closures[worker] : _closure); |
ysr@777 | 302 | if (nd != NULL) { |
iveresov@1546 | 303 | void **buf = BufferNode::make_buffer_from_node(nd); |
iveresov@1546 | 304 | ObjPtrQueue::apply_closure_to_buffer(cl, buf, 0, _sz); |
iveresov@1546 | 305 | deallocate_buffer(buf); |
ysr@777 | 306 | return true; |
ysr@777 | 307 | } else { |
ysr@777 | 308 | return false; |
ysr@777 | 309 | } |
ysr@777 | 310 | } |
ysr@777 | 311 | |
tonyp@3416 | 312 | void SATBMarkQueueSet::iterate_completed_buffers_read_only(ObjectClosure* cl) { |
tonyp@3416 | 313 | assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint."); |
tonyp@3416 | 314 | assert(cl != NULL, "pre-condition"); |
tonyp@3416 | 315 | |
tonyp@3416 | 316 | BufferNode* nd = _completed_buffers_head; |
tonyp@3416 | 317 | while (nd != NULL) { |
tonyp@3416 | 318 | void** buf = BufferNode::make_buffer_from_node(nd); |
tonyp@3416 | 319 | ObjPtrQueue::apply_closure_to_buffer(cl, buf, 0, _sz); |
tonyp@3416 | 320 | nd = nd->next(); |
tonyp@3416 | 321 | } |
tonyp@3416 | 322 | } |
tonyp@3416 | 323 | |
tonyp@3416 | 324 | void SATBMarkQueueSet::iterate_thread_buffers_read_only(ObjectClosure* cl) { |
tonyp@3416 | 325 | assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint."); |
tonyp@3416 | 326 | assert(cl != NULL, "pre-condition"); |
tonyp@3416 | 327 | |
tonyp@3416 | 328 | for (JavaThread* t = Threads::first(); t; t = t->next()) { |
tonyp@3416 | 329 | t->satb_mark_queue().apply_closure(cl); |
tonyp@3416 | 330 | } |
tonyp@3416 | 331 | shared_satb_queue()->apply_closure(cl); |
tonyp@3416 | 332 | } |
tonyp@3416 | 333 | |
tonyp@3416 | 334 | #ifndef PRODUCT |
tonyp@3416 | 335 | // Helpful for debugging |
tonyp@3416 | 336 | |
tonyp@3416 | 337 | #define SATB_PRINTER_BUFFER_SIZE 256 |
tonyp@3416 | 338 | |
tonyp@3416 | 339 | void SATBMarkQueueSet::print_all(const char* msg) { |
tonyp@3416 | 340 | char buffer[SATB_PRINTER_BUFFER_SIZE]; |
tonyp@3416 | 341 | assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint."); |
tonyp@3416 | 342 | |
tonyp@3416 | 343 | gclog_or_tty->cr(); |
tonyp@3416 | 344 | gclog_or_tty->print_cr("SATB BUFFERS [%s]", msg); |
tonyp@3416 | 345 | |
tonyp@3416 | 346 | BufferNode* nd = _completed_buffers_head; |
tonyp@3416 | 347 | int i = 0; |
tonyp@3416 | 348 | while (nd != NULL) { |
tonyp@3416 | 349 | void** buf = BufferNode::make_buffer_from_node(nd); |
tonyp@3416 | 350 | jio_snprintf(buffer, SATB_PRINTER_BUFFER_SIZE, "Enqueued: %d", i); |
tonyp@3416 | 351 | ObjPtrQueue::print(buffer, buf, 0, _sz); |
tonyp@3416 | 352 | nd = nd->next(); |
tonyp@3416 | 353 | i += 1; |
tonyp@3416 | 354 | } |
tonyp@3416 | 355 | |
tonyp@3416 | 356 | for (JavaThread* t = Threads::first(); t; t = t->next()) { |
tonyp@3416 | 357 | jio_snprintf(buffer, SATB_PRINTER_BUFFER_SIZE, "Thread: %s", t->name()); |
tonyp@3416 | 358 | t->satb_mark_queue().print(buffer); |
tonyp@3416 | 359 | } |
tonyp@3416 | 360 | |
tonyp@3416 | 361 | shared_satb_queue()->print("Shared"); |
tonyp@3416 | 362 | |
tonyp@3416 | 363 | gclog_or_tty->cr(); |
tonyp@3416 | 364 | } |
tonyp@3416 | 365 | #endif // PRODUCT |
tonyp@3416 | 366 | |
ysr@777 | 367 | void SATBMarkQueueSet::abandon_partial_marking() { |
iveresov@1546 | 368 | BufferNode* buffers_to_delete = NULL; |
ysr@777 | 369 | { |
ysr@777 | 370 | MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag); |
ysr@777 | 371 | while (_completed_buffers_head != NULL) { |
iveresov@1546 | 372 | BufferNode* nd = _completed_buffers_head; |
iveresov@1546 | 373 | _completed_buffers_head = nd->next(); |
iveresov@1546 | 374 | nd->set_next(buffers_to_delete); |
ysr@777 | 375 | buffers_to_delete = nd; |
ysr@777 | 376 | } |
ysr@777 | 377 | _completed_buffers_tail = NULL; |
ysr@777 | 378 | _n_completed_buffers = 0; |
ysr@1280 | 379 | DEBUG_ONLY(assert_completed_buffer_list_len_correct_locked()); |
ysr@777 | 380 | } |
ysr@777 | 381 | while (buffers_to_delete != NULL) { |
iveresov@1546 | 382 | BufferNode* nd = buffers_to_delete; |
iveresov@1546 | 383 | buffers_to_delete = nd->next(); |
iveresov@1546 | 384 | deallocate_buffer(BufferNode::make_buffer_from_node(nd)); |
ysr@777 | 385 | } |
ysr@777 | 386 | assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint."); |
ysr@777 | 387 | // So we can safely manipulate these queues. |
ysr@777 | 388 | for (JavaThread* t = Threads::first(); t; t = t->next()) { |
ysr@777 | 389 | t->satb_mark_queue().reset(); |
ysr@777 | 390 | } |
tonyp@3416 | 391 | shared_satb_queue()->reset(); |
ysr@777 | 392 | } |