src/share/vm/gc_implementation/g1/satbQueue.cpp

Wed, 22 Apr 2015 14:06:49 -0400

author
kbarrett
date
Wed, 22 Apr 2015 14:06:49 -0400
changeset 7833
0f8f1250fed5
parent 7832
b5d14ef905b5
child 7834
399885e13e90
permissions
-rw-r--r--

8078023: verify_no_cset_oops found reclaimed humongous object in SATB buffer
Summary: Removed no longer valid checking of SATB buffers
Reviewed-by: jmasa, pliden

ysr@777 1 /*
kbarrett@7831 2 * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
ysr@777 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
ysr@777 4 *
ysr@777 5 * This code is free software; you can redistribute it and/or modify it
ysr@777 6 * under the terms of the GNU General Public License version 2 only, as
ysr@777 7 * published by the Free Software Foundation.
ysr@777 8 *
ysr@777 9 * This code is distributed in the hope that it will be useful, but WITHOUT
ysr@777 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
ysr@777 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
ysr@777 12 * version 2 for more details (a copy is included in the LICENSE file that
ysr@777 13 * accompanied this code).
ysr@777 14 *
ysr@777 15 * You should have received a copy of the GNU General Public License version
ysr@777 16 * 2 along with this work; if not, write to the Free Software Foundation,
ysr@777 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
ysr@777 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
ysr@777 22 *
ysr@777 23 */
ysr@777 24
stefank@2314 25 #include "precompiled.hpp"
tonyp@2469 26 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
stefank@2314 27 #include "gc_implementation/g1/satbQueue.hpp"
stefank@2314 28 #include "memory/allocation.inline.hpp"
stefank@2314 29 #include "memory/sharedHeap.hpp"
coleenp@4037 30 #include "oops/oop.inline.hpp"
stefank@2314 31 #include "runtime/mutexLocker.hpp"
stefank@2314 32 #include "runtime/thread.hpp"
johnc@3175 33 #include "runtime/vmThread.hpp"
ysr@777 34
drchase@6680 35 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
drchase@6680 36
tonyp@3416 37 void ObjPtrQueue::flush() {
kbarrett@7831 38 // Filter now to possibly save work later. If filtering empties the
kbarrett@7831 39 // buffer then flush_impl can deallocate the buffer.
tonyp@3416 40 filter();
tschatzl@7445 41 flush_impl();
tonyp@3416 42 }
tonyp@3416 43
kbarrett@7831 44 // Return true if a SATB buffer entry refers to an object that
kbarrett@7831 45 // requires marking.
tonyp@2469 46 //
kbarrett@7831 47 // The entry must point into the G1 heap. In particular, it must not
kbarrett@7831 48 // be a NULL pointer. NULL pointers are pre-filtered and never
kbarrett@7831 49 // inserted into a SATB buffer.
tonyp@2469 50 //
kbarrett@7831 51 // An entry that is below the NTAMS pointer for the containing heap
kbarrett@7831 52 // region requires marking. Such an entry must point to a valid object.
kbarrett@7831 53 //
kbarrett@7831 54 // An entry that is at least the NTAMS pointer for the containing heap
kbarrett@7831 55 // region might be any of the following, none of which should be marked.
kbarrett@7831 56 //
kbarrett@7831 57 // * A reference to an object allocated since marking started.
kbarrett@7831 58 // According to SATB, such objects are implicitly kept live and do
kbarrett@7831 59 // not need to be dealt with via SATB buffer processing.
kbarrett@7831 60 //
kbarrett@7831 61 // * A reference to a young generation object. Young objects are
kbarrett@7831 62 // handled separately and are not marked by concurrent marking.
kbarrett@7831 63 //
kbarrett@7831 64 // * A stale reference to a young generation object. If a young
kbarrett@7831 65 // generation object reference is recorded and not filtered out
kbarrett@7831 66 // before being moved by a young collection, the reference becomes
kbarrett@7831 67 // stale.
kbarrett@7831 68 //
kbarrett@7831 69 // * A stale reference to an eagerly reclaimed humongous object. If a
kbarrett@7831 70 // humongous object is recorded and then reclaimed, the reference
kbarrett@7831 71 // becomes stale.
kbarrett@7831 72 //
kbarrett@7831 73 // The stale reference cases are implicitly handled by the NTAMS
kbarrett@7831 74 // comparison. Because of the possibility of stale references, buffer
kbarrett@7831 75 // processing must be somewhat circumspect and not assume entries
kbarrett@7831 76 // in an unfiltered buffer refer to valid objects.
kbarrett@7831 77
kbarrett@7831 78 inline bool requires_marking(const void* entry, G1CollectedHeap* heap) {
kbarrett@7831 79 // Includes rejection of NULL pointers.
kbarrett@7831 80 assert(heap->is_in_reserved(entry),
kbarrett@7831 81 err_msg("Non-heap pointer in SATB buffer: " PTR_FORMAT, p2i(entry)));
kbarrett@7831 82
kbarrett@7831 83 HeapRegion* region = heap->heap_region_containing_raw(entry);
kbarrett@7831 84 assert(region != NULL, err_msg("No region for " PTR_FORMAT, p2i(entry)));
kbarrett@7831 85 if (entry >= region->next_top_at_mark_start()) {
kbarrett@7831 86 return false;
kbarrett@7831 87 }
kbarrett@7831 88
kbarrett@7831 89 assert(((oop)entry)->is_oop(true /* ignore mark word */),
kbarrett@7831 90 err_msg("Invalid oop in SATB buffer: " PTR_FORMAT, p2i(entry)));
kbarrett@7831 91
kbarrett@7831 92 return true;
kbarrett@7831 93 }
kbarrett@7831 94
kbarrett@7831 95 // This method removes entries from a SATB buffer that will not be
kbarrett@7831 96 // useful to the concurrent marking threads. Entries are retained if
kbarrett@7831 97 // they require marking and are not already marked. Retained entries
kbarrett@7831 98 // are compacted toward the top of the buffer.
tonyp@2469 99
tonyp@3416 100 void ObjPtrQueue::filter() {
tonyp@2469 101 G1CollectedHeap* g1h = G1CollectedHeap::heap();
tonyp@2469 102 void** buf = _buf;
tonyp@2469 103 size_t sz = _sz;
tonyp@2469 104
tonyp@3416 105 if (buf == NULL) {
tonyp@3416 106 // nothing to do
tonyp@3416 107 return;
tonyp@3416 108 }
tonyp@3416 109
tonyp@2469 110 // Used for sanity checking at the end of the loop.
tonyp@2469 111 debug_only(size_t entries = 0; size_t retained = 0;)
tonyp@2469 112
tonyp@2469 113 size_t i = sz;
tonyp@2469 114 size_t new_index = sz;
tonyp@2469 115
tonyp@2469 116 while (i > _index) {
tonyp@2469 117 assert(i > 0, "we should have at least one more entry to process");
tonyp@2469 118 i -= oopSize;
tonyp@2469 119 debug_only(entries += 1;)
kbarrett@7831 120 void** p = &buf[byte_index_to_index((int) i)];
kbarrett@7831 121 void* entry = *p;
tonyp@2469 122 // NULL the entry so that unused parts of the buffer contain NULLs
tonyp@2469 123 // at the end. If we are going to retain it we will copy it to its
tonyp@2469 124 // final place. If we have retained all entries we have visited so
tonyp@2469 125 // far, we'll just end up copying it to the same place.
tonyp@2469 126 *p = NULL;
tonyp@2469 127
kbarrett@7831 128 if (requires_marking(entry, g1h) && !g1h->isMarkedNext((oop)entry)) {
tonyp@2469 129 assert(new_index > 0, "we should not have already filled up the buffer");
tonyp@2469 130 new_index -= oopSize;
tonyp@2469 131 assert(new_index >= i,
tonyp@2469 132 "new_index should never be below i, as we alwaysr compact 'up'");
kbarrett@7831 133 void** new_p = &buf[byte_index_to_index((int) new_index)];
tonyp@2469 134 assert(new_p >= p, "the destination location should never be below "
tonyp@2469 135 "the source as we always compact 'up'");
tonyp@2469 136 assert(*new_p == NULL,
tonyp@2469 137 "we should have already cleared the destination location");
kbarrett@7831 138 *new_p = entry;
tonyp@2469 139 debug_only(retained += 1;)
tonyp@2469 140 }
tonyp@2469 141 }
tonyp@3416 142
tonyp@3416 143 #ifdef ASSERT
tonyp@2469 144 size_t entries_calc = (sz - _index) / oopSize;
tonyp@2469 145 assert(entries == entries_calc, "the number of entries we counted "
tonyp@2469 146 "should match the number of entries we calculated");
tonyp@2469 147 size_t retained_calc = (sz - new_index) / oopSize;
tonyp@2469 148 assert(retained == retained_calc, "the number of retained entries we counted "
tonyp@2469 149 "should match the number of retained entries we calculated");
tonyp@3416 150 #endif // ASSERT
tonyp@3416 151
tonyp@3416 152 _index = new_index;
tonyp@3416 153 }
tonyp@3416 154
tonyp@3416 155 // This method will first apply the above filtering to the buffer. If
tonyp@3416 156 // post-filtering a large enough chunk of the buffer has been cleared
tonyp@3416 157 // we can re-use the buffer (instead of enqueueing it) and we can just
tonyp@3416 158 // allow the mutator to carry on executing using the same buffer
tonyp@3416 159 // instead of replacing it.
tonyp@3416 160
tonyp@3416 161 bool ObjPtrQueue::should_enqueue_buffer() {
tonyp@3416 162 assert(_lock == NULL || _lock->owned_by_self(),
tonyp@3416 163 "we should have taken the lock before calling this");
tonyp@3416 164
tonyp@3416 165 // Even if G1SATBBufferEnqueueingThresholdPercent == 0 we have to
tonyp@3416 166 // filter the buffer given that this will remove any references into
tonyp@3416 167 // the CSet as we currently assume that no such refs will appear in
tonyp@3416 168 // enqueued buffers.
tonyp@3416 169
tonyp@3416 170 // This method should only be called if there is a non-NULL buffer
tonyp@3416 171 // that is full.
tonyp@3416 172 assert(_index == 0, "pre-condition");
tonyp@3416 173 assert(_buf != NULL, "pre-condition");
tonyp@3416 174
tonyp@3416 175 filter();
tonyp@3416 176
tonyp@3416 177 size_t sz = _sz;
tonyp@3416 178 size_t all_entries = sz / oopSize;
tonyp@3416 179 size_t retained_entries = (sz - _index) / oopSize;
tonyp@3416 180 size_t perc = retained_entries * 100 / all_entries;
tonyp@2469 181 bool should_enqueue = perc > (size_t) G1SATBBufferEnqueueingThresholdPercent;
tonyp@2469 182 return should_enqueue;
tonyp@2469 183 }
tonyp@2469 184
tonyp@3416 185 void ObjPtrQueue::apply_closure_and_empty(ObjectClosure* cl) {
tonyp@3416 186 if (_buf != NULL) {
tonyp@3416 187 apply_closure_to_buffer(cl, _buf, _index, _sz);
ysr@777 188 _index = _sz;
ysr@777 189 }
ysr@777 190 }
ysr@777 191
ysr@777 192 void ObjPtrQueue::apply_closure_to_buffer(ObjectClosure* cl,
ysr@777 193 void** buf, size_t index, size_t sz) {
ysr@777 194 if (cl == NULL) return;
ysr@777 195 for (size_t i = index; i < sz; i += oopSize) {
ysr@777 196 oop obj = (oop)buf[byte_index_to_index((int)i)];
ysr@777 197 // There can be NULL entries because of destructors.
ysr@777 198 if (obj != NULL) {
ysr@777 199 cl->do_object(obj);
ysr@777 200 }
ysr@777 201 }
ysr@777 202 }
ysr@1280 203
tonyp@3416 204 #ifndef PRODUCT
tonyp@3416 205 // Helpful for debugging
tonyp@3416 206
tonyp@3416 207 void ObjPtrQueue::print(const char* name) {
tonyp@3416 208 print(name, _buf, _index, _sz);
tonyp@3416 209 }
tonyp@3416 210
tonyp@3416 211 void ObjPtrQueue::print(const char* name,
tonyp@3416 212 void** buf, size_t index, size_t sz) {
tonyp@3416 213 gclog_or_tty->print_cr(" SATB BUFFER [%s] buf: "PTR_FORMAT" "
tonyp@3416 214 "index: "SIZE_FORMAT" sz: "SIZE_FORMAT,
tonyp@3416 215 name, buf, index, sz);
tonyp@3416 216 }
tonyp@3416 217 #endif // PRODUCT
tonyp@3416 218
ysr@777 219 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
ysr@777 220 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
ysr@777 221 #endif // _MSC_VER
ysr@777 222
ysr@777 223 SATBMarkQueueSet::SATBMarkQueueSet() :
kbarrett@7832 224 PtrQueueSet(),
tonyp@3416 225 _shared_satb_queue(this, true /*perm*/) { }
ysr@777 226
ysr@777 227 void SATBMarkQueueSet::initialize(Monitor* cbl_mon, Mutex* fl_lock,
iveresov@1546 228 int process_completed_threshold,
ysr@777 229 Mutex* lock) {
iveresov@1546 230 PtrQueueSet::initialize(cbl_mon, fl_lock, process_completed_threshold, -1);
ysr@777 231 _shared_satb_queue.set_lock(lock);
ysr@777 232 }
ysr@777 233
ysr@777 234 void SATBMarkQueueSet::handle_zero_index_for_thread(JavaThread* t) {
ysr@777 235 t->satb_mark_queue().handle_zero_index();
ysr@777 236 }
ysr@777 237
tonyp@1752 238 #ifdef ASSERT
pliden@6396 239 void SATBMarkQueueSet::dump_active_states(bool expected_active) {
pliden@6396 240 gclog_or_tty->print_cr("Expected SATB active state: %s",
pliden@6396 241 expected_active ? "ACTIVE" : "INACTIVE");
pliden@6396 242 gclog_or_tty->print_cr("Actual SATB active states:");
pliden@6396 243 gclog_or_tty->print_cr(" Queue set: %s", is_active() ? "ACTIVE" : "INACTIVE");
pliden@6396 244 for (JavaThread* t = Threads::first(); t; t = t->next()) {
pliden@6396 245 gclog_or_tty->print_cr(" Thread \"%s\" queue: %s", t->name(),
pliden@6396 246 t->satb_mark_queue().is_active() ? "ACTIVE" : "INACTIVE");
pliden@6396 247 }
pliden@6396 248 gclog_or_tty->print_cr(" Shared queue: %s",
pliden@6396 249 shared_satb_queue()->is_active() ? "ACTIVE" : "INACTIVE");
pliden@6396 250 }
pliden@6396 251
pliden@6396 252 void SATBMarkQueueSet::verify_active_states(bool expected_active) {
pliden@6396 253 // Verify queue set state
pliden@6396 254 if (is_active() != expected_active) {
pliden@6396 255 dump_active_states(expected_active);
pliden@6396 256 guarantee(false, "SATB queue set has an unexpected active state");
pliden@6396 257 }
pliden@6396 258
pliden@6396 259 // Verify thread queue states
pliden@6396 260 for (JavaThread* t = Threads::first(); t; t = t->next()) {
pliden@6396 261 if (t->satb_mark_queue().is_active() != expected_active) {
pliden@6396 262 dump_active_states(expected_active);
pliden@6396 263 guarantee(false, "Thread SATB queue has an unexpected active state");
pliden@6396 264 }
pliden@6396 265 }
pliden@6396 266
pliden@6396 267 // Verify shared queue state
pliden@6396 268 if (shared_satb_queue()->is_active() != expected_active) {
pliden@6396 269 dump_active_states(expected_active);
pliden@6396 270 guarantee(false, "Shared SATB queue has an unexpected active state");
tonyp@1752 271 }
tonyp@1752 272 }
tonyp@1752 273 #endif // ASSERT
tonyp@1752 274
pliden@6396 275 void SATBMarkQueueSet::set_active_all_threads(bool active, bool expected_active) {
tonyp@1752 276 assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
tonyp@1752 277 #ifdef ASSERT
pliden@6396 278 verify_active_states(expected_active);
pliden@6396 279 #endif // ASSERT
pliden@6396 280 _all_active = active;
pliden@6396 281 for (JavaThread* t = Threads::first(); t; t = t->next()) {
pliden@6396 282 t->satb_mark_queue().set_active(active);
tonyp@1752 283 }
pliden@6396 284 shared_satb_queue()->set_active(active);
ysr@777 285 }
ysr@777 286
tonyp@3416 287 void SATBMarkQueueSet::filter_thread_buffers() {
tonyp@3416 288 for(JavaThread* t = Threads::first(); t; t = t->next()) {
tonyp@3416 289 t->satb_mark_queue().filter();
tonyp@3416 290 }
tonyp@3416 291 shared_satb_queue()->filter();
tonyp@3416 292 }
tonyp@3416 293
kbarrett@7832 294 bool SATBMarkQueueSet::apply_closure_to_completed_buffer(ObjectClosure* cl) {
iveresov@1546 295 BufferNode* nd = NULL;
ysr@777 296 {
ysr@777 297 MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
ysr@777 298 if (_completed_buffers_head != NULL) {
ysr@777 299 nd = _completed_buffers_head;
iveresov@1546 300 _completed_buffers_head = nd->next();
ysr@777 301 if (_completed_buffers_head == NULL) _completed_buffers_tail = NULL;
ysr@777 302 _n_completed_buffers--;
ysr@777 303 if (_n_completed_buffers == 0) _process_completed = false;
ysr@777 304 }
ysr@777 305 }
ysr@777 306 if (nd != NULL) {
iveresov@1546 307 void **buf = BufferNode::make_buffer_from_node(nd);
iveresov@1546 308 ObjPtrQueue::apply_closure_to_buffer(cl, buf, 0, _sz);
iveresov@1546 309 deallocate_buffer(buf);
ysr@777 310 return true;
ysr@777 311 } else {
ysr@777 312 return false;
ysr@777 313 }
ysr@777 314 }
ysr@777 315
tonyp@3416 316 #ifndef PRODUCT
tonyp@3416 317 // Helpful for debugging
tonyp@3416 318
tonyp@3416 319 #define SATB_PRINTER_BUFFER_SIZE 256
tonyp@3416 320
tonyp@3416 321 void SATBMarkQueueSet::print_all(const char* msg) {
tonyp@3416 322 char buffer[SATB_PRINTER_BUFFER_SIZE];
tonyp@3416 323 assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
tonyp@3416 324
tonyp@3416 325 gclog_or_tty->cr();
tonyp@3416 326 gclog_or_tty->print_cr("SATB BUFFERS [%s]", msg);
tonyp@3416 327
tonyp@3416 328 BufferNode* nd = _completed_buffers_head;
tonyp@3416 329 int i = 0;
tonyp@3416 330 while (nd != NULL) {
tonyp@3416 331 void** buf = BufferNode::make_buffer_from_node(nd);
tonyp@3416 332 jio_snprintf(buffer, SATB_PRINTER_BUFFER_SIZE, "Enqueued: %d", i);
tonyp@3416 333 ObjPtrQueue::print(buffer, buf, 0, _sz);
tonyp@3416 334 nd = nd->next();
tonyp@3416 335 i += 1;
tonyp@3416 336 }
tonyp@3416 337
tonyp@3416 338 for (JavaThread* t = Threads::first(); t; t = t->next()) {
tonyp@3416 339 jio_snprintf(buffer, SATB_PRINTER_BUFFER_SIZE, "Thread: %s", t->name());
tonyp@3416 340 t->satb_mark_queue().print(buffer);
tonyp@3416 341 }
tonyp@3416 342
tonyp@3416 343 shared_satb_queue()->print("Shared");
tonyp@3416 344
tonyp@3416 345 gclog_or_tty->cr();
tonyp@3416 346 }
tonyp@3416 347 #endif // PRODUCT
tonyp@3416 348
ysr@777 349 void SATBMarkQueueSet::abandon_partial_marking() {
iveresov@1546 350 BufferNode* buffers_to_delete = NULL;
ysr@777 351 {
ysr@777 352 MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
ysr@777 353 while (_completed_buffers_head != NULL) {
iveresov@1546 354 BufferNode* nd = _completed_buffers_head;
iveresov@1546 355 _completed_buffers_head = nd->next();
iveresov@1546 356 nd->set_next(buffers_to_delete);
ysr@777 357 buffers_to_delete = nd;
ysr@777 358 }
ysr@777 359 _completed_buffers_tail = NULL;
ysr@777 360 _n_completed_buffers = 0;
ysr@1280 361 DEBUG_ONLY(assert_completed_buffer_list_len_correct_locked());
ysr@777 362 }
ysr@777 363 while (buffers_to_delete != NULL) {
iveresov@1546 364 BufferNode* nd = buffers_to_delete;
iveresov@1546 365 buffers_to_delete = nd->next();
iveresov@1546 366 deallocate_buffer(BufferNode::make_buffer_from_node(nd));
ysr@777 367 }
ysr@777 368 assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
ysr@777 369 // So we can safely manipulate these queues.
ysr@777 370 for (JavaThread* t = Threads::first(); t; t = t->next()) {
ysr@777 371 t->satb_mark_queue().reset();
ysr@777 372 }
tonyp@3416 373 shared_satb_queue()->reset();
ysr@777 374 }

mercurial