Tue, 26 Jan 2010 16:52:29 -0800
6920090: G1: Disable ReduceInitialCardMarks at least until 6920109 is fixed
Summary: G1 now answers "no" to the query can_elide_initializing_store_barrier() in the product build. A debug flag allows alternate behaviour in debug builds.
Reviewed-by: iveresov, tonyp
ysr@777 | 1 | /* |
xdono@1279 | 2 | * Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved. |
ysr@777 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
ysr@777 | 4 | * |
ysr@777 | 5 | * This code is free software; you can redistribute it and/or modify it |
ysr@777 | 6 | * under the terms of the GNU General Public License version 2 only, as |
ysr@777 | 7 | * published by the Free Software Foundation. |
ysr@777 | 8 | * |
ysr@777 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
ysr@777 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
ysr@777 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
ysr@777 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
ysr@777 | 13 | * accompanied this code). |
ysr@777 | 14 | * |
ysr@777 | 15 | * You should have received a copy of the GNU General Public License version |
ysr@777 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
ysr@777 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
ysr@777 | 18 | * |
ysr@777 | 19 | * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, |
ysr@777 | 20 | * CA 95054 USA or visit www.sun.com if you need additional information or |
ysr@777 | 21 | * have any questions. |
ysr@777 | 22 | * |
ysr@777 | 23 | */ |
ysr@777 | 24 | |
ysr@777 | 25 | # include "incls/_precompiled.incl" |
ysr@777 | 26 | # include "incls/_dirtyCardQueue.cpp.incl" |
ysr@777 | 27 | |
ysr@777 | 28 | bool DirtyCardQueue::apply_closure(CardTableEntryClosure* cl, |
ysr@777 | 29 | bool consume, |
ysr@777 | 30 | size_t worker_i) { |
ysr@777 | 31 | bool res = true; |
ysr@777 | 32 | if (_buf != NULL) { |
ysr@777 | 33 | res = apply_closure_to_buffer(cl, _buf, _index, _sz, |
ysr@777 | 34 | consume, |
ysr@777 | 35 | (int) worker_i); |
ysr@777 | 36 | if (res && consume) _index = _sz; |
ysr@777 | 37 | } |
ysr@777 | 38 | return res; |
ysr@777 | 39 | } |
ysr@777 | 40 | |
ysr@777 | 41 | bool DirtyCardQueue::apply_closure_to_buffer(CardTableEntryClosure* cl, |
ysr@777 | 42 | void** buf, |
ysr@777 | 43 | size_t index, size_t sz, |
ysr@777 | 44 | bool consume, |
ysr@777 | 45 | int worker_i) { |
ysr@777 | 46 | if (cl == NULL) return true; |
ysr@777 | 47 | for (size_t i = index; i < sz; i += oopSize) { |
ysr@777 | 48 | int ind = byte_index_to_index((int)i); |
ysr@777 | 49 | jbyte* card_ptr = (jbyte*)buf[ind]; |
ysr@777 | 50 | if (card_ptr != NULL) { |
ysr@777 | 51 | // Set the entry to null, so we don't do it again (via the test |
ysr@777 | 52 | // above) if we reconsider this buffer. |
ysr@777 | 53 | if (consume) buf[ind] = NULL; |
ysr@777 | 54 | if (!cl->do_card_ptr(card_ptr, worker_i)) return false; |
ysr@777 | 55 | } |
ysr@777 | 56 | } |
ysr@777 | 57 | return true; |
ysr@777 | 58 | } |
ysr@777 | 59 | |
ysr@777 | 60 | #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away |
ysr@777 | 61 | #pragma warning( disable:4355 ) // 'this' : used in base member initializer list |
ysr@777 | 62 | #endif // _MSC_VER |
ysr@777 | 63 | |
iveresov@1546 | 64 | DirtyCardQueueSet::DirtyCardQueueSet(bool notify_when_complete) : |
iveresov@1546 | 65 | PtrQueueSet(notify_when_complete), |
ysr@777 | 66 | _closure(NULL), |
ysr@777 | 67 | _shared_dirty_card_queue(this, true /*perm*/), |
ysr@777 | 68 | _free_ids(NULL), |
ysr@777 | 69 | _processed_buffers_mut(0), _processed_buffers_rs_thread(0) |
ysr@777 | 70 | { |
ysr@777 | 71 | _all_active = true; |
ysr@777 | 72 | } |
ysr@777 | 73 | |
iveresov@1230 | 74 | // Determines how many mutator threads can process the buffers in parallel. |
ysr@777 | 75 | size_t DirtyCardQueueSet::num_par_ids() { |
iveresov@1230 | 76 | return os::processor_count(); |
ysr@777 | 77 | } |
ysr@777 | 78 | |
ysr@777 | 79 | void DirtyCardQueueSet::initialize(Monitor* cbl_mon, Mutex* fl_lock, |
iveresov@1546 | 80 | int process_completed_threshold, |
ysr@777 | 81 | int max_completed_queue, |
iveresov@1051 | 82 | Mutex* lock, PtrQueueSet* fl_owner) { |
iveresov@1546 | 83 | PtrQueueSet::initialize(cbl_mon, fl_lock, process_completed_threshold, |
iveresov@1546 | 84 | max_completed_queue, fl_owner); |
tonyp@1318 | 85 | set_buffer_size(G1UpdateBufferSize); |
ysr@777 | 86 | _shared_dirty_card_queue.set_lock(lock); |
ysr@777 | 87 | _free_ids = new FreeIdSet((int) num_par_ids(), _cbl_mon); |
ysr@777 | 88 | } |
ysr@777 | 89 | |
ysr@777 | 90 | void DirtyCardQueueSet::handle_zero_index_for_thread(JavaThread* t) { |
ysr@777 | 91 | t->dirty_card_queue().handle_zero_index(); |
ysr@777 | 92 | } |
ysr@777 | 93 | |
ysr@777 | 94 | void DirtyCardQueueSet::set_closure(CardTableEntryClosure* closure) { |
ysr@777 | 95 | _closure = closure; |
ysr@777 | 96 | } |
ysr@777 | 97 | |
ysr@777 | 98 | void DirtyCardQueueSet::iterate_closure_all_threads(bool consume, |
ysr@777 | 99 | size_t worker_i) { |
ysr@777 | 100 | assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint."); |
ysr@777 | 101 | for(JavaThread* t = Threads::first(); t; t = t->next()) { |
ysr@777 | 102 | bool b = t->dirty_card_queue().apply_closure(_closure, consume); |
ysr@777 | 103 | guarantee(b, "Should not be interrupted."); |
ysr@777 | 104 | } |
ysr@777 | 105 | bool b = shared_dirty_card_queue()->apply_closure(_closure, |
ysr@777 | 106 | consume, |
ysr@777 | 107 | worker_i); |
ysr@777 | 108 | guarantee(b, "Should not be interrupted."); |
ysr@777 | 109 | } |
ysr@777 | 110 | |
ysr@777 | 111 | bool DirtyCardQueueSet::mut_process_buffer(void** buf) { |
ysr@777 | 112 | |
ysr@777 | 113 | // Used to determine if we had already claimed a par_id |
ysr@777 | 114 | // before entering this method. |
ysr@777 | 115 | bool already_claimed = false; |
ysr@777 | 116 | |
ysr@777 | 117 | // We grab the current JavaThread. |
ysr@777 | 118 | JavaThread* thread = JavaThread::current(); |
ysr@777 | 119 | |
ysr@777 | 120 | // We get the the number of any par_id that this thread |
ysr@777 | 121 | // might have already claimed. |
ysr@777 | 122 | int worker_i = thread->get_claimed_par_id(); |
ysr@777 | 123 | |
ysr@777 | 124 | // If worker_i is not -1 then the thread has already claimed |
ysr@777 | 125 | // a par_id. We make note of it using the already_claimed value |
ysr@777 | 126 | if (worker_i != -1) { |
ysr@777 | 127 | already_claimed = true; |
ysr@777 | 128 | } else { |
ysr@777 | 129 | |
ysr@777 | 130 | // Otherwise we need to claim a par id |
ysr@777 | 131 | worker_i = _free_ids->claim_par_id(); |
ysr@777 | 132 | |
ysr@777 | 133 | // And store the par_id value in the thread |
ysr@777 | 134 | thread->set_claimed_par_id(worker_i); |
ysr@777 | 135 | } |
ysr@777 | 136 | |
ysr@777 | 137 | bool b = false; |
ysr@777 | 138 | if (worker_i != -1) { |
ysr@777 | 139 | b = DirtyCardQueue::apply_closure_to_buffer(_closure, buf, 0, |
ysr@777 | 140 | _sz, true, worker_i); |
ysr@777 | 141 | if (b) Atomic::inc(&_processed_buffers_mut); |
ysr@777 | 142 | |
ysr@777 | 143 | // If we had not claimed an id before entering the method |
ysr@777 | 144 | // then we must release the id. |
ysr@777 | 145 | if (!already_claimed) { |
ysr@777 | 146 | |
ysr@777 | 147 | // we release the id |
ysr@777 | 148 | _free_ids->release_par_id(worker_i); |
ysr@777 | 149 | |
ysr@777 | 150 | // and set the claimed_id in the thread to -1 |
ysr@777 | 151 | thread->set_claimed_par_id(-1); |
ysr@777 | 152 | } |
ysr@777 | 153 | } |
ysr@777 | 154 | return b; |
ysr@777 | 155 | } |
ysr@777 | 156 | |
iveresov@1546 | 157 | |
iveresov@1546 | 158 | BufferNode* |
johnc@1525 | 159 | DirtyCardQueueSet::get_completed_buffer(int stop_at) { |
iveresov@1546 | 160 | BufferNode* nd = NULL; |
ysr@777 | 161 | MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag); |
ysr@777 | 162 | |
ysr@777 | 163 | if ((int)_n_completed_buffers <= stop_at) { |
ysr@777 | 164 | _process_completed = false; |
ysr@777 | 165 | return NULL; |
ysr@777 | 166 | } |
ysr@777 | 167 | |
ysr@777 | 168 | if (_completed_buffers_head != NULL) { |
ysr@777 | 169 | nd = _completed_buffers_head; |
iveresov@1546 | 170 | _completed_buffers_head = nd->next(); |
ysr@777 | 171 | if (_completed_buffers_head == NULL) |
ysr@777 | 172 | _completed_buffers_tail = NULL; |
ysr@777 | 173 | _n_completed_buffers--; |
iveresov@1546 | 174 | assert(_n_completed_buffers >= 0, "Invariant"); |
ysr@777 | 175 | } |
ysr@777 | 176 | debug_only(assert_completed_buffer_list_len_correct_locked()); |
ysr@777 | 177 | return nd; |
ysr@777 | 178 | } |
ysr@777 | 179 | |
ysr@777 | 180 | bool DirtyCardQueueSet:: |
ysr@777 | 181 | apply_closure_to_completed_buffer_helper(int worker_i, |
iveresov@1546 | 182 | BufferNode* nd) { |
ysr@777 | 183 | if (nd != NULL) { |
iveresov@1546 | 184 | void **buf = BufferNode::make_buffer_from_node(nd); |
iveresov@1546 | 185 | size_t index = nd->index(); |
ysr@777 | 186 | bool b = |
iveresov@1546 | 187 | DirtyCardQueue::apply_closure_to_buffer(_closure, buf, |
iveresov@1546 | 188 | index, _sz, |
ysr@777 | 189 | true, worker_i); |
ysr@777 | 190 | if (b) { |
ysr@777 | 191 | deallocate_buffer(buf); |
ysr@777 | 192 | return true; // In normal case, go on to next buffer. |
ysr@777 | 193 | } else { |
iveresov@1546 | 194 | enqueue_complete_buffer(buf, index); |
ysr@777 | 195 | return false; |
ysr@777 | 196 | } |
ysr@777 | 197 | } else { |
ysr@777 | 198 | return false; |
ysr@777 | 199 | } |
ysr@777 | 200 | } |
ysr@777 | 201 | |
ysr@777 | 202 | bool DirtyCardQueueSet::apply_closure_to_completed_buffer(int worker_i, |
ysr@777 | 203 | int stop_at, |
johnc@1525 | 204 | bool during_pause) |
ysr@777 | 205 | { |
johnc@1525 | 206 | assert(!during_pause || stop_at == 0, "Should not leave any completed buffers during a pause"); |
iveresov@1546 | 207 | BufferNode* nd = get_completed_buffer(stop_at); |
ysr@777 | 208 | bool res = apply_closure_to_completed_buffer_helper(worker_i, nd); |
iveresov@1229 | 209 | if (res) Atomic::inc(&_processed_buffers_rs_thread); |
ysr@777 | 210 | return res; |
ysr@777 | 211 | } |
ysr@777 | 212 | |
ysr@777 | 213 | void DirtyCardQueueSet::apply_closure_to_all_completed_buffers() { |
iveresov@1546 | 214 | BufferNode* nd = _completed_buffers_head; |
ysr@777 | 215 | while (nd != NULL) { |
ysr@777 | 216 | bool b = |
iveresov@1546 | 217 | DirtyCardQueue::apply_closure_to_buffer(_closure, |
iveresov@1546 | 218 | BufferNode::make_buffer_from_node(nd), |
iveresov@1546 | 219 | 0, _sz, false); |
ysr@777 | 220 | guarantee(b, "Should not stop early."); |
iveresov@1546 | 221 | nd = nd->next(); |
ysr@777 | 222 | } |
ysr@777 | 223 | } |
ysr@777 | 224 | |
ysr@777 | 225 | void DirtyCardQueueSet::abandon_logs() { |
ysr@777 | 226 | assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint."); |
iveresov@1546 | 227 | BufferNode* buffers_to_delete = NULL; |
ysr@777 | 228 | { |
ysr@777 | 229 | MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag); |
ysr@777 | 230 | while (_completed_buffers_head != NULL) { |
iveresov@1546 | 231 | BufferNode* nd = _completed_buffers_head; |
iveresov@1546 | 232 | _completed_buffers_head = nd->next(); |
iveresov@1546 | 233 | nd->set_next(buffers_to_delete); |
ysr@777 | 234 | buffers_to_delete = nd; |
ysr@777 | 235 | } |
ysr@777 | 236 | _n_completed_buffers = 0; |
ysr@777 | 237 | _completed_buffers_tail = NULL; |
ysr@777 | 238 | debug_only(assert_completed_buffer_list_len_correct_locked()); |
ysr@777 | 239 | } |
ysr@777 | 240 | while (buffers_to_delete != NULL) { |
iveresov@1546 | 241 | BufferNode* nd = buffers_to_delete; |
iveresov@1546 | 242 | buffers_to_delete = nd->next(); |
iveresov@1546 | 243 | deallocate_buffer(BufferNode::make_buffer_from_node(nd)); |
ysr@777 | 244 | } |
ysr@777 | 245 | // Since abandon is done only at safepoints, we can safely manipulate |
ysr@777 | 246 | // these queues. |
ysr@777 | 247 | for (JavaThread* t = Threads::first(); t; t = t->next()) { |
ysr@777 | 248 | t->dirty_card_queue().reset(); |
ysr@777 | 249 | } |
ysr@777 | 250 | shared_dirty_card_queue()->reset(); |
ysr@777 | 251 | } |
ysr@777 | 252 | |
ysr@777 | 253 | |
ysr@777 | 254 | void DirtyCardQueueSet::concatenate_logs() { |
ysr@777 | 255 | // Iterate over all the threads, if we find a partial log add it to |
ysr@777 | 256 | // the global list of logs. Temporarily turn off the limit on the number |
ysr@777 | 257 | // of outstanding buffers. |
ysr@777 | 258 | int save_max_completed_queue = _max_completed_queue; |
ysr@777 | 259 | _max_completed_queue = max_jint; |
ysr@777 | 260 | assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint."); |
ysr@777 | 261 | for (JavaThread* t = Threads::first(); t; t = t->next()) { |
ysr@777 | 262 | DirtyCardQueue& dcq = t->dirty_card_queue(); |
ysr@777 | 263 | if (dcq.size() != 0) { |
ysr@777 | 264 | void **buf = t->dirty_card_queue().get_buf(); |
ysr@777 | 265 | // We must NULL out the unused entries, then enqueue. |
ysr@777 | 266 | for (size_t i = 0; i < t->dirty_card_queue().get_index(); i += oopSize) { |
ysr@777 | 267 | buf[PtrQueue::byte_index_to_index((int)i)] = NULL; |
ysr@777 | 268 | } |
ysr@777 | 269 | enqueue_complete_buffer(dcq.get_buf(), dcq.get_index()); |
ysr@777 | 270 | dcq.reinitialize(); |
ysr@777 | 271 | } |
ysr@777 | 272 | } |
ysr@777 | 273 | if (_shared_dirty_card_queue.size() != 0) { |
ysr@777 | 274 | enqueue_complete_buffer(_shared_dirty_card_queue.get_buf(), |
ysr@777 | 275 | _shared_dirty_card_queue.get_index()); |
ysr@777 | 276 | _shared_dirty_card_queue.reinitialize(); |
ysr@777 | 277 | } |
ysr@777 | 278 | // Restore the completed buffer queue limit. |
ysr@777 | 279 | _max_completed_queue = save_max_completed_queue; |
ysr@777 | 280 | } |