Mon, 11 May 2009 16:30:56 -0700
6484957: G1: parallel concurrent refinement
6826318: G1: remove traversal-based refinement code
Summary: Removed traversal-based refinement code as it's no longer used. Made the concurrent refinement (queue-based) parallel.
Reviewed-by: tonyp
ysr@777 | 1 | /* |
xdono@905 | 2 | * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. |
ysr@777 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
ysr@777 | 4 | * |
ysr@777 | 5 | * This code is free software; you can redistribute it and/or modify it |
ysr@777 | 6 | * under the terms of the GNU General Public License version 2 only, as |
ysr@777 | 7 | * published by the Free Software Foundation. |
ysr@777 | 8 | * |
ysr@777 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
ysr@777 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
ysr@777 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
ysr@777 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
ysr@777 | 13 | * accompanied this code). |
ysr@777 | 14 | * |
ysr@777 | 15 | * You should have received a copy of the GNU General Public License version |
ysr@777 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
ysr@777 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
ysr@777 | 18 | * |
ysr@777 | 19 | * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, |
ysr@777 | 20 | * CA 95054 USA or visit www.sun.com if you need additional information or |
ysr@777 | 21 | * have any questions. |
ysr@777 | 22 | * |
ysr@777 | 23 | */ |
ysr@777 | 24 | |
ysr@777 | 25 | # include "incls/_precompiled.incl" |
ysr@777 | 26 | # include "incls/_ptrQueue.cpp.incl" |
ysr@777 | 27 | |
ysr@777 | 28 | PtrQueue::PtrQueue(PtrQueueSet* qset_, bool perm) : |
ysr@777 | 29 | _qset(qset_), _buf(NULL), _index(0), _active(false), |
ysr@777 | 30 | _perm(perm), _lock(NULL) |
ysr@777 | 31 | {} |
ysr@777 | 32 | |
iveresov@876 | 33 | void PtrQueue::flush() { |
ysr@777 | 34 | if (!_perm && _buf != NULL) { |
ysr@777 | 35 | if (_index == _sz) { |
ysr@777 | 36 | // No work to do. |
ysr@777 | 37 | qset()->deallocate_buffer(_buf); |
ysr@777 | 38 | } else { |
ysr@777 | 39 | // We must NULL out the unused entries, then enqueue. |
ysr@777 | 40 | for (size_t i = 0; i < _index; i += oopSize) { |
ysr@777 | 41 | _buf[byte_index_to_index((int)i)] = NULL; |
ysr@777 | 42 | } |
ysr@777 | 43 | qset()->enqueue_complete_buffer(_buf); |
ysr@777 | 44 | } |
iveresov@876 | 45 | _buf = NULL; |
iveresov@876 | 46 | _index = 0; |
ysr@777 | 47 | } |
ysr@777 | 48 | } |
ysr@777 | 49 | |
ysr@777 | 50 | |
ysr@777 | 51 | static int byte_index_to_index(int ind) { |
ysr@777 | 52 | assert((ind % oopSize) == 0, "Invariant."); |
ysr@777 | 53 | return ind / oopSize; |
ysr@777 | 54 | } |
ysr@777 | 55 | |
ysr@777 | 56 | static int index_to_byte_index(int byte_ind) { |
ysr@777 | 57 | return byte_ind * oopSize; |
ysr@777 | 58 | } |
ysr@777 | 59 | |
ysr@777 | 60 | void PtrQueue::enqueue_known_active(void* ptr) { |
ysr@777 | 61 | assert(0 <= _index && _index <= _sz, "Invariant."); |
ysr@777 | 62 | assert(_index == 0 || _buf != NULL, "invariant"); |
ysr@777 | 63 | |
ysr@777 | 64 | while (_index == 0) { |
ysr@777 | 65 | handle_zero_index(); |
ysr@777 | 66 | } |
ysr@777 | 67 | assert(_index > 0, "postcondition"); |
ysr@777 | 68 | |
ysr@777 | 69 | _index -= oopSize; |
ysr@777 | 70 | _buf[byte_index_to_index((int)_index)] = ptr; |
ysr@777 | 71 | assert(0 <= _index && _index <= _sz, "Invariant."); |
ysr@777 | 72 | } |
ysr@777 | 73 | |
ysr@777 | 74 | void PtrQueue::locking_enqueue_completed_buffer(void** buf) { |
ysr@777 | 75 | assert(_lock->owned_by_self(), "Required."); |
ysr@777 | 76 | _lock->unlock(); |
ysr@777 | 77 | qset()->enqueue_complete_buffer(buf); |
ysr@777 | 78 | // We must relock only because the caller will unlock, for the normal |
ysr@777 | 79 | // case. |
ysr@777 | 80 | _lock->lock_without_safepoint_check(); |
ysr@777 | 81 | } |
ysr@777 | 82 | |
ysr@777 | 83 | |
ysr@777 | 84 | PtrQueueSet::PtrQueueSet(bool notify_when_complete) : |
ysr@777 | 85 | _max_completed_queue(0), |
ysr@777 | 86 | _cbl_mon(NULL), _fl_lock(NULL), |
ysr@777 | 87 | _notify_when_complete(notify_when_complete), |
ysr@777 | 88 | _sz(0), |
ysr@777 | 89 | _completed_buffers_head(NULL), |
ysr@777 | 90 | _completed_buffers_tail(NULL), |
ysr@777 | 91 | _n_completed_buffers(0), |
ysr@777 | 92 | _process_completed_threshold(0), _process_completed(false), |
ysr@777 | 93 | _buf_free_list(NULL), _buf_free_list_sz(0) |
iveresov@1051 | 94 | { |
iveresov@1051 | 95 | _fl_owner = this; |
iveresov@1051 | 96 | } |
ysr@777 | 97 | |
ysr@777 | 98 | void** PtrQueueSet::allocate_buffer() { |
ysr@777 | 99 | assert(_sz > 0, "Didn't set a buffer size."); |
iveresov@1051 | 100 | MutexLockerEx x(_fl_owner->_fl_lock, Mutex::_no_safepoint_check_flag); |
iveresov@1051 | 101 | if (_fl_owner->_buf_free_list != NULL) { |
iveresov@1051 | 102 | void** res = _fl_owner->_buf_free_list; |
iveresov@1051 | 103 | _fl_owner->_buf_free_list = (void**)_fl_owner->_buf_free_list[0]; |
iveresov@1051 | 104 | _fl_owner->_buf_free_list_sz--; |
ysr@777 | 105 | // Just override the next pointer with NULL, just in case we scan this part |
ysr@777 | 106 | // of the buffer. |
ysr@777 | 107 | res[0] = NULL; |
ysr@777 | 108 | return res; |
ysr@777 | 109 | } else { |
ysr@777 | 110 | return NEW_C_HEAP_ARRAY(void*, _sz); |
ysr@777 | 111 | } |
ysr@777 | 112 | } |
ysr@777 | 113 | |
ysr@777 | 114 | void PtrQueueSet::deallocate_buffer(void** buf) { |
ysr@777 | 115 | assert(_sz > 0, "Didn't set a buffer size."); |
iveresov@1051 | 116 | MutexLockerEx x(_fl_owner->_fl_lock, Mutex::_no_safepoint_check_flag); |
iveresov@1051 | 117 | buf[0] = (void*)_fl_owner->_buf_free_list; |
iveresov@1051 | 118 | _fl_owner->_buf_free_list = buf; |
iveresov@1051 | 119 | _fl_owner->_buf_free_list_sz++; |
ysr@777 | 120 | } |
ysr@777 | 121 | |
ysr@777 | 122 | void PtrQueueSet::reduce_free_list() { |
ysr@777 | 123 | // For now we'll adopt the strategy of deleting half. |
ysr@777 | 124 | MutexLockerEx x(_fl_lock, Mutex::_no_safepoint_check_flag); |
ysr@777 | 125 | size_t n = _buf_free_list_sz / 2; |
ysr@777 | 126 | while (n > 0) { |
ysr@777 | 127 | assert(_buf_free_list != NULL, "_buf_free_list_sz must be wrong."); |
ysr@777 | 128 | void** head = _buf_free_list; |
ysr@777 | 129 | _buf_free_list = (void**)_buf_free_list[0]; |
ysr@777 | 130 | FREE_C_HEAP_ARRAY(void*,head); |
ysr@777 | 131 | n--; |
ysr@777 | 132 | } |
ysr@777 | 133 | } |
ysr@777 | 134 | |
ysr@777 | 135 | void PtrQueueSet::enqueue_complete_buffer(void** buf, size_t index, bool ignore_max_completed) { |
ysr@777 | 136 | // I use explicit locking here because there's a bailout in the middle. |
ysr@777 | 137 | _cbl_mon->lock_without_safepoint_check(); |
ysr@777 | 138 | |
ysr@777 | 139 | Thread* thread = Thread::current(); |
ysr@777 | 140 | assert( ignore_max_completed || |
ysr@777 | 141 | thread->is_Java_thread() || |
ysr@777 | 142 | SafepointSynchronize::is_at_safepoint(), |
ysr@777 | 143 | "invariant" ); |
ysr@777 | 144 | ignore_max_completed = ignore_max_completed || !thread->is_Java_thread(); |
ysr@777 | 145 | |
ysr@777 | 146 | if (!ignore_max_completed && _max_completed_queue > 0 && |
ysr@777 | 147 | _n_completed_buffers >= (size_t) _max_completed_queue) { |
ysr@777 | 148 | _cbl_mon->unlock(); |
ysr@777 | 149 | bool b = mut_process_buffer(buf); |
ysr@777 | 150 | if (b) { |
ysr@777 | 151 | deallocate_buffer(buf); |
ysr@777 | 152 | return; |
ysr@777 | 153 | } |
ysr@777 | 154 | |
ysr@777 | 155 | // Otherwise, go ahead and enqueue the buffer. Must reaquire the lock. |
ysr@777 | 156 | _cbl_mon->lock_without_safepoint_check(); |
ysr@777 | 157 | } |
ysr@777 | 158 | |
ysr@777 | 159 | // Here we still hold the _cbl_mon. |
ysr@777 | 160 | CompletedBufferNode* cbn = new CompletedBufferNode; |
ysr@777 | 161 | cbn->buf = buf; |
ysr@777 | 162 | cbn->next = NULL; |
ysr@777 | 163 | cbn->index = index; |
ysr@777 | 164 | if (_completed_buffers_tail == NULL) { |
ysr@777 | 165 | assert(_completed_buffers_head == NULL, "Well-formedness"); |
ysr@777 | 166 | _completed_buffers_head = cbn; |
ysr@777 | 167 | _completed_buffers_tail = cbn; |
ysr@777 | 168 | } else { |
ysr@777 | 169 | _completed_buffers_tail->next = cbn; |
ysr@777 | 170 | _completed_buffers_tail = cbn; |
ysr@777 | 171 | } |
ysr@777 | 172 | _n_completed_buffers++; |
ysr@777 | 173 | |
ysr@777 | 174 | if (!_process_completed && |
iveresov@1229 | 175 | _n_completed_buffers >= _process_completed_threshold) { |
ysr@777 | 176 | _process_completed = true; |
ysr@777 | 177 | if (_notify_when_complete) |
ysr@777 | 178 | _cbl_mon->notify_all(); |
ysr@777 | 179 | } |
ysr@777 | 180 | debug_only(assert_completed_buffer_list_len_correct_locked()); |
ysr@777 | 181 | _cbl_mon->unlock(); |
ysr@777 | 182 | } |
ysr@777 | 183 | |
ysr@777 | 184 | int PtrQueueSet::completed_buffers_list_length() { |
ysr@777 | 185 | int n = 0; |
ysr@777 | 186 | CompletedBufferNode* cbn = _completed_buffers_head; |
ysr@777 | 187 | while (cbn != NULL) { |
ysr@777 | 188 | n++; |
ysr@777 | 189 | cbn = cbn->next; |
ysr@777 | 190 | } |
ysr@777 | 191 | return n; |
ysr@777 | 192 | } |
ysr@777 | 193 | |
ysr@777 | 194 | void PtrQueueSet::assert_completed_buffer_list_len_correct() { |
ysr@777 | 195 | MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag); |
ysr@777 | 196 | assert_completed_buffer_list_len_correct_locked(); |
ysr@777 | 197 | } |
ysr@777 | 198 | |
ysr@777 | 199 | void PtrQueueSet::assert_completed_buffer_list_len_correct_locked() { |
ysr@777 | 200 | guarantee((size_t)completed_buffers_list_length() == _n_completed_buffers, |
ysr@777 | 201 | "Completed buffer length is wrong."); |
ysr@777 | 202 | } |
ysr@777 | 203 | |
ysr@777 | 204 | void PtrQueueSet::set_buffer_size(size_t sz) { |
ysr@777 | 205 | assert(_sz == 0 && sz > 0, "Should be called only once."); |
ysr@777 | 206 | _sz = sz * oopSize; |
ysr@777 | 207 | } |
ysr@777 | 208 | |
ysr@777 | 209 | void PtrQueueSet::set_process_completed_threshold(size_t sz) { |
ysr@777 | 210 | _process_completed_threshold = sz; |
ysr@777 | 211 | } |
iveresov@1051 | 212 | |
iveresov@1051 | 213 | // Merge lists of buffers. Notify waiting threads if the length of the list |
iveresov@1051 | 214 | // exceeds threshold. The source queue is emptied as a result. The queues |
iveresov@1051 | 215 | // must share the monitor. |
iveresov@1051 | 216 | void PtrQueueSet::merge_bufferlists(PtrQueueSet *src) { |
iveresov@1051 | 217 | assert(_cbl_mon == src->_cbl_mon, "Should share the same lock"); |
iveresov@1051 | 218 | MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag); |
iveresov@1051 | 219 | if (_completed_buffers_tail == NULL) { |
iveresov@1051 | 220 | assert(_completed_buffers_head == NULL, "Well-formedness"); |
iveresov@1051 | 221 | _completed_buffers_head = src->_completed_buffers_head; |
iveresov@1051 | 222 | _completed_buffers_tail = src->_completed_buffers_tail; |
iveresov@1051 | 223 | } else { |
iveresov@1051 | 224 | assert(_completed_buffers_head != NULL, "Well formedness"); |
iveresov@1051 | 225 | if (src->_completed_buffers_head != NULL) { |
iveresov@1051 | 226 | _completed_buffers_tail->next = src->_completed_buffers_head; |
iveresov@1051 | 227 | _completed_buffers_tail = src->_completed_buffers_tail; |
iveresov@1051 | 228 | } |
iveresov@1051 | 229 | } |
iveresov@1051 | 230 | _n_completed_buffers += src->_n_completed_buffers; |
iveresov@1051 | 231 | |
iveresov@1051 | 232 | src->_n_completed_buffers = 0; |
iveresov@1051 | 233 | src->_completed_buffers_head = NULL; |
iveresov@1051 | 234 | src->_completed_buffers_tail = NULL; |
iveresov@1051 | 235 | |
iveresov@1051 | 236 | assert(_completed_buffers_head == NULL && _completed_buffers_tail == NULL || |
iveresov@1051 | 237 | _completed_buffers_head != NULL && _completed_buffers_tail != NULL, |
iveresov@1051 | 238 | "Sanity"); |
iveresov@1051 | 239 | |
iveresov@1051 | 240 | if (!_process_completed && |
iveresov@1051 | 241 | _n_completed_buffers >= _process_completed_threshold) { |
iveresov@1051 | 242 | _process_completed = true; |
iveresov@1051 | 243 | if (_notify_when_complete) |
iveresov@1051 | 244 | _cbl_mon->notify_all(); |
iveresov@1051 | 245 | } |
iveresov@1051 | 246 | } |
iveresov@1051 | 247 | |
iveresov@1051 | 248 | // Merge free lists of the two queues. The free list of the source |
iveresov@1051 | 249 | // queue is emptied as a result. The queues must share the same |
iveresov@1051 | 250 | // mutex that guards free lists. |
iveresov@1051 | 251 | void PtrQueueSet::merge_freelists(PtrQueueSet* src) { |
iveresov@1051 | 252 | assert(_fl_lock == src->_fl_lock, "Should share the same lock"); |
iveresov@1051 | 253 | MutexLockerEx x(_fl_lock, Mutex::_no_safepoint_check_flag); |
iveresov@1051 | 254 | if (_buf_free_list != NULL) { |
iveresov@1051 | 255 | void **p = _buf_free_list; |
iveresov@1051 | 256 | while (*p != NULL) { |
iveresov@1051 | 257 | p = (void**)*p; |
iveresov@1051 | 258 | } |
iveresov@1051 | 259 | *p = src->_buf_free_list; |
iveresov@1051 | 260 | } else { |
iveresov@1051 | 261 | _buf_free_list = src->_buf_free_list; |
iveresov@1051 | 262 | } |
iveresov@1051 | 263 | _buf_free_list_sz += src->_buf_free_list_sz; |
iveresov@1051 | 264 | src->_buf_free_list = NULL; |
iveresov@1051 | 265 | src->_buf_free_list_sz = 0; |
iveresov@1051 | 266 | } |