Wed, 16 Dec 2009 15:12:51 -0800
6862387: tune concurrent refinement further
Summary: Reworked the concurrent refinement: threads activation, feedback-based threshold adjustment, other miscellaneous fixes.
Reviewed-by: apetrusenko, tonyp
ysr@777 | 1 | /* |
xdono@1279 | 2 | * Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved. |
ysr@777 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
ysr@777 | 4 | * |
ysr@777 | 5 | * This code is free software; you can redistribute it and/or modify it |
ysr@777 | 6 | * under the terms of the GNU General Public License version 2 only, as |
ysr@777 | 7 | * published by the Free Software Foundation. |
ysr@777 | 8 | * |
ysr@777 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
ysr@777 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
ysr@777 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
ysr@777 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
ysr@777 | 13 | * accompanied this code). |
ysr@777 | 14 | * |
ysr@777 | 15 | * You should have received a copy of the GNU General Public License version |
ysr@777 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
ysr@777 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
ysr@777 | 18 | * |
ysr@777 | 19 | * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, |
ysr@777 | 20 | * CA 95054 USA or visit www.sun.com if you need additional information or |
ysr@777 | 21 | * have any questions. |
ysr@777 | 22 | * |
ysr@777 | 23 | */ |
ysr@777 | 24 | |
ysr@777 | 25 | # include "incls/_precompiled.incl" |
ysr@777 | 26 | # include "incls/_ptrQueue.cpp.incl" |
ysr@777 | 27 | |
ysr@777 | 28 | PtrQueue::PtrQueue(PtrQueueSet* qset_, bool perm) : |
ysr@777 | 29 | _qset(qset_), _buf(NULL), _index(0), _active(false), |
ysr@777 | 30 | _perm(perm), _lock(NULL) |
ysr@777 | 31 | {} |
ysr@777 | 32 | |
iveresov@876 | 33 | void PtrQueue::flush() { |
ysr@777 | 34 | if (!_perm && _buf != NULL) { |
ysr@777 | 35 | if (_index == _sz) { |
ysr@777 | 36 | // No work to do. |
ysr@777 | 37 | qset()->deallocate_buffer(_buf); |
ysr@777 | 38 | } else { |
ysr@777 | 39 | // We must NULL out the unused entries, then enqueue. |
ysr@777 | 40 | for (size_t i = 0; i < _index; i += oopSize) { |
ysr@777 | 41 | _buf[byte_index_to_index((int)i)] = NULL; |
ysr@777 | 42 | } |
ysr@777 | 43 | qset()->enqueue_complete_buffer(_buf); |
ysr@777 | 44 | } |
iveresov@876 | 45 | _buf = NULL; |
iveresov@876 | 46 | _index = 0; |
ysr@777 | 47 | } |
ysr@777 | 48 | } |
ysr@777 | 49 | |
ysr@777 | 50 | |
ysr@777 | 51 | static int byte_index_to_index(int ind) { |
ysr@777 | 52 | assert((ind % oopSize) == 0, "Invariant."); |
ysr@777 | 53 | return ind / oopSize; |
ysr@777 | 54 | } |
ysr@777 | 55 | |
ysr@777 | 56 | static int index_to_byte_index(int byte_ind) { |
ysr@777 | 57 | return byte_ind * oopSize; |
ysr@777 | 58 | } |
ysr@777 | 59 | |
ysr@777 | 60 | void PtrQueue::enqueue_known_active(void* ptr) { |
ysr@777 | 61 | assert(0 <= _index && _index <= _sz, "Invariant."); |
ysr@777 | 62 | assert(_index == 0 || _buf != NULL, "invariant"); |
ysr@777 | 63 | |
ysr@777 | 64 | while (_index == 0) { |
ysr@777 | 65 | handle_zero_index(); |
ysr@777 | 66 | } |
iveresov@1546 | 67 | |
ysr@777 | 68 | assert(_index > 0, "postcondition"); |
ysr@777 | 69 | _index -= oopSize; |
ysr@777 | 70 | _buf[byte_index_to_index((int)_index)] = ptr; |
ysr@777 | 71 | assert(0 <= _index && _index <= _sz, "Invariant."); |
ysr@777 | 72 | } |
ysr@777 | 73 | |
ysr@777 | 74 | void PtrQueue::locking_enqueue_completed_buffer(void** buf) { |
ysr@777 | 75 | assert(_lock->owned_by_self(), "Required."); |
ysr@777 | 76 | _lock->unlock(); |
ysr@777 | 77 | qset()->enqueue_complete_buffer(buf); |
ysr@777 | 78 | // We must relock only because the caller will unlock, for the normal |
ysr@777 | 79 | // case. |
ysr@777 | 80 | _lock->lock_without_safepoint_check(); |
ysr@777 | 81 | } |
ysr@777 | 82 | |
ysr@777 | 83 | |
ysr@777 | 84 | PtrQueueSet::PtrQueueSet(bool notify_when_complete) : |
ysr@777 | 85 | _max_completed_queue(0), |
ysr@777 | 86 | _cbl_mon(NULL), _fl_lock(NULL), |
ysr@777 | 87 | _notify_when_complete(notify_when_complete), |
ysr@777 | 88 | _sz(0), |
ysr@777 | 89 | _completed_buffers_head(NULL), |
ysr@777 | 90 | _completed_buffers_tail(NULL), |
ysr@777 | 91 | _n_completed_buffers(0), |
ysr@777 | 92 | _process_completed_threshold(0), _process_completed(false), |
ysr@777 | 93 | _buf_free_list(NULL), _buf_free_list_sz(0) |
iveresov@1051 | 94 | { |
iveresov@1051 | 95 | _fl_owner = this; |
iveresov@1051 | 96 | } |
ysr@777 | 97 | |
ysr@777 | 98 | void** PtrQueueSet::allocate_buffer() { |
ysr@777 | 99 | assert(_sz > 0, "Didn't set a buffer size."); |
iveresov@1051 | 100 | MutexLockerEx x(_fl_owner->_fl_lock, Mutex::_no_safepoint_check_flag); |
iveresov@1051 | 101 | if (_fl_owner->_buf_free_list != NULL) { |
iveresov@1546 | 102 | void** res = BufferNode::make_buffer_from_node(_fl_owner->_buf_free_list); |
iveresov@1546 | 103 | _fl_owner->_buf_free_list = _fl_owner->_buf_free_list->next(); |
iveresov@1051 | 104 | _fl_owner->_buf_free_list_sz--; |
ysr@777 | 105 | return res; |
ysr@777 | 106 | } else { |
iveresov@1546 | 107 | // Allocate space for the BufferNode in front of the buffer. |
iveresov@1546 | 108 | char *b = NEW_C_HEAP_ARRAY(char, _sz + BufferNode::aligned_size()); |
iveresov@1546 | 109 | return BufferNode::make_buffer_from_block(b); |
ysr@777 | 110 | } |
ysr@777 | 111 | } |
ysr@777 | 112 | |
ysr@777 | 113 | void PtrQueueSet::deallocate_buffer(void** buf) { |
ysr@777 | 114 | assert(_sz > 0, "Didn't set a buffer size."); |
iveresov@1051 | 115 | MutexLockerEx x(_fl_owner->_fl_lock, Mutex::_no_safepoint_check_flag); |
iveresov@1546 | 116 | BufferNode *node = BufferNode::make_node_from_buffer(buf); |
iveresov@1546 | 117 | node->set_next(_fl_owner->_buf_free_list); |
iveresov@1546 | 118 | _fl_owner->_buf_free_list = node; |
iveresov@1051 | 119 | _fl_owner->_buf_free_list_sz++; |
ysr@777 | 120 | } |
ysr@777 | 121 | |
ysr@777 | 122 | void PtrQueueSet::reduce_free_list() { |
iveresov@1546 | 123 | assert(_fl_owner == this, "Free list reduction is allowed only for the owner"); |
ysr@777 | 124 | // For now we'll adopt the strategy of deleting half. |
ysr@777 | 125 | MutexLockerEx x(_fl_lock, Mutex::_no_safepoint_check_flag); |
ysr@777 | 126 | size_t n = _buf_free_list_sz / 2; |
ysr@777 | 127 | while (n > 0) { |
ysr@777 | 128 | assert(_buf_free_list != NULL, "_buf_free_list_sz must be wrong."); |
iveresov@1546 | 129 | void* b = BufferNode::make_block_from_node(_buf_free_list); |
iveresov@1546 | 130 | _buf_free_list = _buf_free_list->next(); |
iveresov@1546 | 131 | FREE_C_HEAP_ARRAY(char, b); |
johnc@1519 | 132 | _buf_free_list_sz --; |
ysr@777 | 133 | n--; |
ysr@777 | 134 | } |
ysr@777 | 135 | } |
ysr@777 | 136 | |
iveresov@1546 | 137 | void PtrQueue::handle_zero_index() { |
iveresov@1546 | 138 | assert(0 == _index, "Precondition."); |
iveresov@1546 | 139 | // This thread records the full buffer and allocates a new one (while |
iveresov@1546 | 140 | // holding the lock if there is one). |
iveresov@1546 | 141 | if (_buf != NULL) { |
iveresov@1546 | 142 | if (_lock) { |
iveresov@1546 | 143 | locking_enqueue_completed_buffer(_buf); |
iveresov@1546 | 144 | } else { |
iveresov@1546 | 145 | if (qset()->process_or_enqueue_complete_buffer(_buf)) { |
iveresov@1546 | 146 | // Recycle the buffer. No allocation. |
iveresov@1546 | 147 | _sz = qset()->buffer_size(); |
iveresov@1546 | 148 | _index = _sz; |
iveresov@1546 | 149 | return; |
iveresov@1546 | 150 | } |
iveresov@1546 | 151 | } |
iveresov@1546 | 152 | } |
iveresov@1546 | 153 | // Reallocate the buffer |
iveresov@1546 | 154 | _buf = qset()->allocate_buffer(); |
iveresov@1546 | 155 | _sz = qset()->buffer_size(); |
iveresov@1546 | 156 | _index = _sz; |
iveresov@1546 | 157 | assert(0 <= _index && _index <= _sz, "Invariant."); |
iveresov@1546 | 158 | } |
ysr@777 | 159 | |
iveresov@1546 | 160 | bool PtrQueueSet::process_or_enqueue_complete_buffer(void** buf) { |
iveresov@1546 | 161 | if (Thread::current()->is_Java_thread()) { |
iveresov@1546 | 162 | // We don't lock. It is fine to be epsilon-precise here. |
iveresov@1546 | 163 | if (_max_completed_queue == 0 || _max_completed_queue > 0 && |
iveresov@1546 | 164 | _n_completed_buffers >= _max_completed_queue + _completed_queue_padding) { |
iveresov@1546 | 165 | bool b = mut_process_buffer(buf); |
iveresov@1546 | 166 | if (b) { |
iveresov@1546 | 167 | // True here means that the buffer hasn't been deallocated and the caller may reuse it. |
iveresov@1546 | 168 | return true; |
iveresov@1546 | 169 | } |
iveresov@1546 | 170 | } |
iveresov@1546 | 171 | } |
iveresov@1546 | 172 | // The buffer will be enqueued. The caller will have to get a new one. |
iveresov@1546 | 173 | enqueue_complete_buffer(buf); |
iveresov@1546 | 174 | return false; |
iveresov@1546 | 175 | } |
ysr@777 | 176 | |
iveresov@1546 | 177 | void PtrQueueSet::enqueue_complete_buffer(void** buf, size_t index) { |
iveresov@1546 | 178 | MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag); |
iveresov@1546 | 179 | BufferNode* cbn = BufferNode::new_from_buffer(buf); |
iveresov@1546 | 180 | cbn->set_index(index); |
ysr@777 | 181 | if (_completed_buffers_tail == NULL) { |
ysr@777 | 182 | assert(_completed_buffers_head == NULL, "Well-formedness"); |
ysr@777 | 183 | _completed_buffers_head = cbn; |
ysr@777 | 184 | _completed_buffers_tail = cbn; |
ysr@777 | 185 | } else { |
iveresov@1546 | 186 | _completed_buffers_tail->set_next(cbn); |
ysr@777 | 187 | _completed_buffers_tail = cbn; |
ysr@777 | 188 | } |
ysr@777 | 189 | _n_completed_buffers++; |
ysr@777 | 190 | |
iveresov@1546 | 191 | if (!_process_completed && _process_completed_threshold >= 0 && |
iveresov@1229 | 192 | _n_completed_buffers >= _process_completed_threshold) { |
ysr@777 | 193 | _process_completed = true; |
ysr@777 | 194 | if (_notify_when_complete) |
iveresov@1546 | 195 | _cbl_mon->notify(); |
ysr@777 | 196 | } |
ysr@777 | 197 | debug_only(assert_completed_buffer_list_len_correct_locked()); |
ysr@777 | 198 | } |
ysr@777 | 199 | |
ysr@777 | 200 | int PtrQueueSet::completed_buffers_list_length() { |
ysr@777 | 201 | int n = 0; |
iveresov@1546 | 202 | BufferNode* cbn = _completed_buffers_head; |
ysr@777 | 203 | while (cbn != NULL) { |
ysr@777 | 204 | n++; |
iveresov@1546 | 205 | cbn = cbn->next(); |
ysr@777 | 206 | } |
ysr@777 | 207 | return n; |
ysr@777 | 208 | } |
ysr@777 | 209 | |
ysr@777 | 210 | void PtrQueueSet::assert_completed_buffer_list_len_correct() { |
ysr@777 | 211 | MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag); |
ysr@777 | 212 | assert_completed_buffer_list_len_correct_locked(); |
ysr@777 | 213 | } |
ysr@777 | 214 | |
ysr@777 | 215 | void PtrQueueSet::assert_completed_buffer_list_len_correct_locked() { |
iveresov@1546 | 216 | guarantee(completed_buffers_list_length() == _n_completed_buffers, |
ysr@777 | 217 | "Completed buffer length is wrong."); |
ysr@777 | 218 | } |
ysr@777 | 219 | |
ysr@777 | 220 | void PtrQueueSet::set_buffer_size(size_t sz) { |
ysr@777 | 221 | assert(_sz == 0 && sz > 0, "Should be called only once."); |
ysr@777 | 222 | _sz = sz * oopSize; |
ysr@777 | 223 | } |
ysr@777 | 224 | |
iveresov@1546 | 225 | // Merge lists of buffers. Notify the processing threads. |
iveresov@1546 | 226 | // The source queue is emptied as a result. The queues |
iveresov@1051 | 227 | // must share the monitor. |
iveresov@1051 | 228 | void PtrQueueSet::merge_bufferlists(PtrQueueSet *src) { |
iveresov@1051 | 229 | assert(_cbl_mon == src->_cbl_mon, "Should share the same lock"); |
iveresov@1051 | 230 | MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag); |
iveresov@1051 | 231 | if (_completed_buffers_tail == NULL) { |
iveresov@1051 | 232 | assert(_completed_buffers_head == NULL, "Well-formedness"); |
iveresov@1051 | 233 | _completed_buffers_head = src->_completed_buffers_head; |
iveresov@1051 | 234 | _completed_buffers_tail = src->_completed_buffers_tail; |
iveresov@1051 | 235 | } else { |
iveresov@1051 | 236 | assert(_completed_buffers_head != NULL, "Well formedness"); |
iveresov@1051 | 237 | if (src->_completed_buffers_head != NULL) { |
iveresov@1546 | 238 | _completed_buffers_tail->set_next(src->_completed_buffers_head); |
iveresov@1051 | 239 | _completed_buffers_tail = src->_completed_buffers_tail; |
iveresov@1051 | 240 | } |
iveresov@1051 | 241 | } |
iveresov@1051 | 242 | _n_completed_buffers += src->_n_completed_buffers; |
iveresov@1051 | 243 | |
iveresov@1051 | 244 | src->_n_completed_buffers = 0; |
iveresov@1051 | 245 | src->_completed_buffers_head = NULL; |
iveresov@1051 | 246 | src->_completed_buffers_tail = NULL; |
iveresov@1051 | 247 | |
iveresov@1051 | 248 | assert(_completed_buffers_head == NULL && _completed_buffers_tail == NULL || |
iveresov@1051 | 249 | _completed_buffers_head != NULL && _completed_buffers_tail != NULL, |
iveresov@1051 | 250 | "Sanity"); |
iveresov@1546 | 251 | } |
iveresov@1051 | 252 | |
iveresov@1546 | 253 | void PtrQueueSet::notify_if_necessary() { |
iveresov@1546 | 254 | MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag); |
iveresov@1546 | 255 | if (_n_completed_buffers >= _process_completed_threshold || _max_completed_queue == 0) { |
iveresov@1051 | 256 | _process_completed = true; |
iveresov@1051 | 257 | if (_notify_when_complete) |
iveresov@1546 | 258 | _cbl_mon->notify(); |
iveresov@1051 | 259 | } |
iveresov@1051 | 260 | } |