src/share/vm/gc_implementation/g1/dirtyCardQueue.cpp

Tue, 21 Aug 2012 14:10:39 -0700

author
johnc
date
Tue, 21 Aug 2012 14:10:39 -0700
changeset 3998
7383557659bd
parent 3156
f08d439fab8c
child 4299
f34d701e952e
permissions
-rw-r--r--

7185699: G1: Prediction model discrepancies
Summary: Correct the result value of G1CollectedHeap::pending_card_num(). Change the code that calculates the GC efficiency of a non-young heap region to use historical data from mixed GCs and the actual number of live bytes when predicting how long it would take to collect the region. Changes were also reviewed by Thomas Schatzl.
Reviewed-by: azeemj, brutisso

ysr@777 1 /*
johnc@2060 2 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
ysr@777 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
ysr@777 4 *
ysr@777 5 * This code is free software; you can redistribute it and/or modify it
ysr@777 6 * under the terms of the GNU General Public License version 2 only, as
ysr@777 7 * published by the Free Software Foundation.
ysr@777 8 *
ysr@777 9 * This code is distributed in the hope that it will be useful, but WITHOUT
ysr@777 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
ysr@777 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
ysr@777 12 * version 2 for more details (a copy is included in the LICENSE file that
ysr@777 13 * accompanied this code).
ysr@777 14 *
ysr@777 15 * You should have received a copy of the GNU General Public License version
ysr@777 16 * 2 along with this work; if not, write to the Free Software Foundation,
ysr@777 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
ysr@777 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
ysr@777 22 *
ysr@777 23 */
ysr@777 24
stefank@2314 25 #include "precompiled.hpp"
stefank@2314 26 #include "gc_implementation/g1/dirtyCardQueue.hpp"
stefank@2314 27 #include "gc_implementation/g1/heapRegionRemSet.hpp"
stefank@2314 28 #include "runtime/atomic.hpp"
stefank@2314 29 #include "runtime/mutexLocker.hpp"
stefank@2314 30 #include "runtime/safepoint.hpp"
stefank@2314 31 #include "runtime/thread.hpp"
stefank@2314 32 #include "utilities/workgroup.hpp"
stefank@2314 33 #ifdef TARGET_OS_FAMILY_linux
stefank@2314 34 # include "thread_linux.inline.hpp"
stefank@2314 35 #endif
stefank@2314 36 #ifdef TARGET_OS_FAMILY_solaris
stefank@2314 37 # include "thread_solaris.inline.hpp"
stefank@2314 38 #endif
stefank@2314 39 #ifdef TARGET_OS_FAMILY_windows
stefank@2314 40 # include "thread_windows.inline.hpp"
stefank@2314 41 #endif
never@3156 42 #ifdef TARGET_OS_FAMILY_bsd
never@3156 43 # include "thread_bsd.inline.hpp"
never@3156 44 #endif
ysr@777 45
ysr@777 46 bool DirtyCardQueue::apply_closure(CardTableEntryClosure* cl,
ysr@777 47 bool consume,
ysr@777 48 size_t worker_i) {
ysr@777 49 bool res = true;
ysr@777 50 if (_buf != NULL) {
ysr@777 51 res = apply_closure_to_buffer(cl, _buf, _index, _sz,
ysr@777 52 consume,
ysr@777 53 (int) worker_i);
ysr@777 54 if (res && consume) _index = _sz;
ysr@777 55 }
ysr@777 56 return res;
ysr@777 57 }
ysr@777 58
ysr@777 59 bool DirtyCardQueue::apply_closure_to_buffer(CardTableEntryClosure* cl,
ysr@777 60 void** buf,
ysr@777 61 size_t index, size_t sz,
ysr@777 62 bool consume,
ysr@777 63 int worker_i) {
ysr@777 64 if (cl == NULL) return true;
ysr@777 65 for (size_t i = index; i < sz; i += oopSize) {
ysr@777 66 int ind = byte_index_to_index((int)i);
ysr@777 67 jbyte* card_ptr = (jbyte*)buf[ind];
ysr@777 68 if (card_ptr != NULL) {
ysr@777 69 // Set the entry to null, so we don't do it again (via the test
ysr@777 70 // above) if we reconsider this buffer.
ysr@777 71 if (consume) buf[ind] = NULL;
ysr@777 72 if (!cl->do_card_ptr(card_ptr, worker_i)) return false;
ysr@777 73 }
ysr@777 74 }
ysr@777 75 return true;
ysr@777 76 }
ysr@777 77
ysr@777 78 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
ysr@777 79 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
ysr@777 80 #endif // _MSC_VER
ysr@777 81
iveresov@1546 82 DirtyCardQueueSet::DirtyCardQueueSet(bool notify_when_complete) :
iveresov@1546 83 PtrQueueSet(notify_when_complete),
ysr@777 84 _closure(NULL),
ysr@777 85 _shared_dirty_card_queue(this, true /*perm*/),
ysr@777 86 _free_ids(NULL),
ysr@777 87 _processed_buffers_mut(0), _processed_buffers_rs_thread(0)
ysr@777 88 {
ysr@777 89 _all_active = true;
ysr@777 90 }
ysr@777 91
iveresov@1230 92 // Determines how many mutator threads can process the buffers in parallel.
ysr@777 93 size_t DirtyCardQueueSet::num_par_ids() {
iveresov@1230 94 return os::processor_count();
ysr@777 95 }
ysr@777 96
ysr@777 97 void DirtyCardQueueSet::initialize(Monitor* cbl_mon, Mutex* fl_lock,
iveresov@1546 98 int process_completed_threshold,
ysr@777 99 int max_completed_queue,
iveresov@1051 100 Mutex* lock, PtrQueueSet* fl_owner) {
iveresov@1546 101 PtrQueueSet::initialize(cbl_mon, fl_lock, process_completed_threshold,
iveresov@1546 102 max_completed_queue, fl_owner);
tonyp@1318 103 set_buffer_size(G1UpdateBufferSize);
ysr@777 104 _shared_dirty_card_queue.set_lock(lock);
ysr@777 105 _free_ids = new FreeIdSet((int) num_par_ids(), _cbl_mon);
ysr@777 106 }
ysr@777 107
ysr@777 108 void DirtyCardQueueSet::handle_zero_index_for_thread(JavaThread* t) {
ysr@777 109 t->dirty_card_queue().handle_zero_index();
ysr@777 110 }
ysr@777 111
ysr@777 112 void DirtyCardQueueSet::set_closure(CardTableEntryClosure* closure) {
ysr@777 113 _closure = closure;
ysr@777 114 }
ysr@777 115
ysr@777 116 void DirtyCardQueueSet::iterate_closure_all_threads(bool consume,
ysr@777 117 size_t worker_i) {
ysr@777 118 assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
ysr@777 119 for(JavaThread* t = Threads::first(); t; t = t->next()) {
ysr@777 120 bool b = t->dirty_card_queue().apply_closure(_closure, consume);
ysr@777 121 guarantee(b, "Should not be interrupted.");
ysr@777 122 }
ysr@777 123 bool b = shared_dirty_card_queue()->apply_closure(_closure,
ysr@777 124 consume,
ysr@777 125 worker_i);
ysr@777 126 guarantee(b, "Should not be interrupted.");
ysr@777 127 }
ysr@777 128
ysr@777 129 bool DirtyCardQueueSet::mut_process_buffer(void** buf) {
ysr@777 130
ysr@777 131 // Used to determine if we had already claimed a par_id
ysr@777 132 // before entering this method.
ysr@777 133 bool already_claimed = false;
ysr@777 134
ysr@777 135 // We grab the current JavaThread.
ysr@777 136 JavaThread* thread = JavaThread::current();
ysr@777 137
ysr@777 138 // We get the the number of any par_id that this thread
ysr@777 139 // might have already claimed.
ysr@777 140 int worker_i = thread->get_claimed_par_id();
ysr@777 141
ysr@777 142 // If worker_i is not -1 then the thread has already claimed
ysr@777 143 // a par_id. We make note of it using the already_claimed value
ysr@777 144 if (worker_i != -1) {
ysr@777 145 already_claimed = true;
ysr@777 146 } else {
ysr@777 147
ysr@777 148 // Otherwise we need to claim a par id
ysr@777 149 worker_i = _free_ids->claim_par_id();
ysr@777 150
ysr@777 151 // And store the par_id value in the thread
ysr@777 152 thread->set_claimed_par_id(worker_i);
ysr@777 153 }
ysr@777 154
ysr@777 155 bool b = false;
ysr@777 156 if (worker_i != -1) {
ysr@777 157 b = DirtyCardQueue::apply_closure_to_buffer(_closure, buf, 0,
ysr@777 158 _sz, true, worker_i);
ysr@777 159 if (b) Atomic::inc(&_processed_buffers_mut);
ysr@777 160
ysr@777 161 // If we had not claimed an id before entering the method
ysr@777 162 // then we must release the id.
ysr@777 163 if (!already_claimed) {
ysr@777 164
ysr@777 165 // we release the id
ysr@777 166 _free_ids->release_par_id(worker_i);
ysr@777 167
ysr@777 168 // and set the claimed_id in the thread to -1
ysr@777 169 thread->set_claimed_par_id(-1);
ysr@777 170 }
ysr@777 171 }
ysr@777 172 return b;
ysr@777 173 }
ysr@777 174
iveresov@1546 175
iveresov@1546 176 BufferNode*
johnc@1525 177 DirtyCardQueueSet::get_completed_buffer(int stop_at) {
iveresov@1546 178 BufferNode* nd = NULL;
ysr@777 179 MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
ysr@777 180
ysr@777 181 if ((int)_n_completed_buffers <= stop_at) {
ysr@777 182 _process_completed = false;
ysr@777 183 return NULL;
ysr@777 184 }
ysr@777 185
ysr@777 186 if (_completed_buffers_head != NULL) {
ysr@777 187 nd = _completed_buffers_head;
iveresov@1546 188 _completed_buffers_head = nd->next();
ysr@777 189 if (_completed_buffers_head == NULL)
ysr@777 190 _completed_buffers_tail = NULL;
ysr@777 191 _n_completed_buffers--;
iveresov@1546 192 assert(_n_completed_buffers >= 0, "Invariant");
ysr@777 193 }
ysr@777 194 debug_only(assert_completed_buffer_list_len_correct_locked());
ysr@777 195 return nd;
ysr@777 196 }
ysr@777 197
ysr@777 198 bool DirtyCardQueueSet::
johnc@2060 199 apply_closure_to_completed_buffer_helper(CardTableEntryClosure* cl,
johnc@2060 200 int worker_i,
iveresov@1546 201 BufferNode* nd) {
ysr@777 202 if (nd != NULL) {
iveresov@1546 203 void **buf = BufferNode::make_buffer_from_node(nd);
iveresov@1546 204 size_t index = nd->index();
ysr@777 205 bool b =
johnc@2060 206 DirtyCardQueue::apply_closure_to_buffer(cl, buf,
iveresov@1546 207 index, _sz,
ysr@777 208 true, worker_i);
ysr@777 209 if (b) {
ysr@777 210 deallocate_buffer(buf);
ysr@777 211 return true; // In normal case, go on to next buffer.
ysr@777 212 } else {
iveresov@1546 213 enqueue_complete_buffer(buf, index);
ysr@777 214 return false;
ysr@777 215 }
ysr@777 216 } else {
ysr@777 217 return false;
ysr@777 218 }
ysr@777 219 }
ysr@777 220
johnc@2060 221 bool DirtyCardQueueSet::apply_closure_to_completed_buffer(CardTableEntryClosure* cl,
johnc@2060 222 int worker_i,
johnc@2060 223 int stop_at,
johnc@2060 224 bool during_pause) {
johnc@2060 225 assert(!during_pause || stop_at == 0, "Should not leave any completed buffers during a pause");
johnc@2060 226 BufferNode* nd = get_completed_buffer(stop_at);
johnc@2060 227 bool res = apply_closure_to_completed_buffer_helper(cl, worker_i, nd);
johnc@2060 228 if (res) Atomic::inc(&_processed_buffers_rs_thread);
johnc@2060 229 return res;
johnc@2060 230 }
johnc@2060 231
ysr@777 232 bool DirtyCardQueueSet::apply_closure_to_completed_buffer(int worker_i,
ysr@777 233 int stop_at,
johnc@2060 234 bool during_pause) {
johnc@2060 235 return apply_closure_to_completed_buffer(_closure, worker_i,
johnc@2060 236 stop_at, during_pause);
ysr@777 237 }
ysr@777 238
ysr@777 239 void DirtyCardQueueSet::apply_closure_to_all_completed_buffers() {
iveresov@1546 240 BufferNode* nd = _completed_buffers_head;
ysr@777 241 while (nd != NULL) {
ysr@777 242 bool b =
iveresov@1546 243 DirtyCardQueue::apply_closure_to_buffer(_closure,
iveresov@1546 244 BufferNode::make_buffer_from_node(nd),
iveresov@1546 245 0, _sz, false);
ysr@777 246 guarantee(b, "Should not stop early.");
iveresov@1546 247 nd = nd->next();
ysr@777 248 }
ysr@777 249 }
ysr@777 250
johnc@2060 251 // Deallocates any completed log buffers
johnc@2060 252 void DirtyCardQueueSet::clear() {
iveresov@1546 253 BufferNode* buffers_to_delete = NULL;
ysr@777 254 {
ysr@777 255 MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
ysr@777 256 while (_completed_buffers_head != NULL) {
iveresov@1546 257 BufferNode* nd = _completed_buffers_head;
iveresov@1546 258 _completed_buffers_head = nd->next();
iveresov@1546 259 nd->set_next(buffers_to_delete);
ysr@777 260 buffers_to_delete = nd;
ysr@777 261 }
ysr@777 262 _n_completed_buffers = 0;
ysr@777 263 _completed_buffers_tail = NULL;
ysr@777 264 debug_only(assert_completed_buffer_list_len_correct_locked());
ysr@777 265 }
ysr@777 266 while (buffers_to_delete != NULL) {
iveresov@1546 267 BufferNode* nd = buffers_to_delete;
iveresov@1546 268 buffers_to_delete = nd->next();
iveresov@1546 269 deallocate_buffer(BufferNode::make_buffer_from_node(nd));
ysr@777 270 }
johnc@2060 271
johnc@2060 272 }
johnc@2060 273
johnc@2060 274 void DirtyCardQueueSet::abandon_logs() {
johnc@2060 275 assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
johnc@2060 276 clear();
ysr@777 277 // Since abandon is done only at safepoints, we can safely manipulate
ysr@777 278 // these queues.
ysr@777 279 for (JavaThread* t = Threads::first(); t; t = t->next()) {
ysr@777 280 t->dirty_card_queue().reset();
ysr@777 281 }
ysr@777 282 shared_dirty_card_queue()->reset();
ysr@777 283 }
ysr@777 284
ysr@777 285
ysr@777 286 void DirtyCardQueueSet::concatenate_logs() {
ysr@777 287 // Iterate over all the threads, if we find a partial log add it to
ysr@777 288 // the global list of logs. Temporarily turn off the limit on the number
ysr@777 289 // of outstanding buffers.
ysr@777 290 int save_max_completed_queue = _max_completed_queue;
ysr@777 291 _max_completed_queue = max_jint;
ysr@777 292 assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
ysr@777 293 for (JavaThread* t = Threads::first(); t; t = t->next()) {
ysr@777 294 DirtyCardQueue& dcq = t->dirty_card_queue();
ysr@777 295 if (dcq.size() != 0) {
ysr@777 296 void **buf = t->dirty_card_queue().get_buf();
ysr@777 297 // We must NULL out the unused entries, then enqueue.
ysr@777 298 for (size_t i = 0; i < t->dirty_card_queue().get_index(); i += oopSize) {
ysr@777 299 buf[PtrQueue::byte_index_to_index((int)i)] = NULL;
ysr@777 300 }
ysr@777 301 enqueue_complete_buffer(dcq.get_buf(), dcq.get_index());
ysr@777 302 dcq.reinitialize();
ysr@777 303 }
ysr@777 304 }
ysr@777 305 if (_shared_dirty_card_queue.size() != 0) {
ysr@777 306 enqueue_complete_buffer(_shared_dirty_card_queue.get_buf(),
ysr@777 307 _shared_dirty_card_queue.get_index());
ysr@777 308 _shared_dirty_card_queue.reinitialize();
ysr@777 309 }
ysr@777 310 // Restore the completed buffer queue limit.
ysr@777 311 _max_completed_queue = save_max_completed_queue;
ysr@777 312 }

mercurial