src/share/vm/gc_implementation/g1/ptrQueue.cpp

Sat, 06 Oct 2012 01:17:44 -0700

author
johnc
date
Sat, 06 Oct 2012 01:17:44 -0700
changeset 4173
8a5ea0a9ccc4
parent 3900
d2a62e0f25eb
child 4153
b9a9ed0f8eeb
permissions
-rw-r--r--

7127708: G1: change task num types from int to uint in concurrent mark
Summary: Change the type of various task num fields, parameters etc to unsigned and rename them to be more consistent with the other collectors. Code changes were also reviewed by Vitaly Davidovich.
Reviewed-by: johnc
Contributed-by: Kaushik Srenevasan <kaushik@twitter.com>

ysr@777 1 /*
tonyp@2469 2 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
ysr@777 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
ysr@777 4 *
ysr@777 5 * This code is free software; you can redistribute it and/or modify it
ysr@777 6 * under the terms of the GNU General Public License version 2 only, as
ysr@777 7 * published by the Free Software Foundation.
ysr@777 8 *
ysr@777 9 * This code is distributed in the hope that it will be useful, but WITHOUT
ysr@777 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
ysr@777 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
ysr@777 12 * version 2 for more details (a copy is included in the LICENSE file that
ysr@777 13 * accompanied this code).
ysr@777 14 *
ysr@777 15 * You should have received a copy of the GNU General Public License version
ysr@777 16 * 2 along with this work; if not, write to the Free Software Foundation,
ysr@777 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
ysr@777 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
ysr@777 22 *
ysr@777 23 */
ysr@777 24
stefank@2314 25 #include "precompiled.hpp"
stefank@2314 26 #include "gc_implementation/g1/ptrQueue.hpp"
stefank@2314 27 #include "memory/allocation.hpp"
stefank@2314 28 #include "memory/allocation.inline.hpp"
stefank@2314 29 #include "runtime/mutex.hpp"
stefank@2314 30 #include "runtime/mutexLocker.hpp"
stefank@2314 31 #ifdef TARGET_OS_FAMILY_linux
stefank@2314 32 # include "thread_linux.inline.hpp"
stefank@2314 33 #endif
stefank@2314 34 #ifdef TARGET_OS_FAMILY_solaris
stefank@2314 35 # include "thread_solaris.inline.hpp"
stefank@2314 36 #endif
stefank@2314 37 #ifdef TARGET_OS_FAMILY_windows
stefank@2314 38 # include "thread_windows.inline.hpp"
stefank@2314 39 #endif
never@3156 40 #ifdef TARGET_OS_FAMILY_bsd
never@3156 41 # include "thread_bsd.inline.hpp"
never@3156 42 #endif
ysr@777 43
tonyp@2469 44 PtrQueue::PtrQueue(PtrQueueSet* qset, bool perm, bool active) :
tonyp@2469 45 _qset(qset), _buf(NULL), _index(0), _active(active),
ysr@777 46 _perm(perm), _lock(NULL)
ysr@777 47 {}
ysr@777 48
iveresov@876 49 void PtrQueue::flush() {
ysr@777 50 if (!_perm && _buf != NULL) {
ysr@777 51 if (_index == _sz) {
ysr@777 52 // No work to do.
ysr@777 53 qset()->deallocate_buffer(_buf);
ysr@777 54 } else {
ysr@777 55 // We must NULL out the unused entries, then enqueue.
ysr@777 56 for (size_t i = 0; i < _index; i += oopSize) {
ysr@777 57 _buf[byte_index_to_index((int)i)] = NULL;
ysr@777 58 }
ysr@777 59 qset()->enqueue_complete_buffer(_buf);
ysr@777 60 }
iveresov@876 61 _buf = NULL;
iveresov@876 62 _index = 0;
ysr@777 63 }
ysr@777 64 }
ysr@777 65
ysr@777 66
ysr@777 67 static int byte_index_to_index(int ind) {
ysr@777 68 assert((ind % oopSize) == 0, "Invariant.");
ysr@777 69 return ind / oopSize;
ysr@777 70 }
ysr@777 71
ysr@777 72 static int index_to_byte_index(int byte_ind) {
ysr@777 73 return byte_ind * oopSize;
ysr@777 74 }
ysr@777 75
ysr@777 76 void PtrQueue::enqueue_known_active(void* ptr) {
ysr@777 77 assert(0 <= _index && _index <= _sz, "Invariant.");
ysr@777 78 assert(_index == 0 || _buf != NULL, "invariant");
ysr@777 79
ysr@777 80 while (_index == 0) {
ysr@777 81 handle_zero_index();
ysr@777 82 }
iveresov@1546 83
ysr@777 84 assert(_index > 0, "postcondition");
ysr@777 85 _index -= oopSize;
ysr@777 86 _buf[byte_index_to_index((int)_index)] = ptr;
ysr@777 87 assert(0 <= _index && _index <= _sz, "Invariant.");
ysr@777 88 }
ysr@777 89
ysr@777 90 void PtrQueue::locking_enqueue_completed_buffer(void** buf) {
ysr@777 91 assert(_lock->owned_by_self(), "Required.");
johnc@1604 92
johnc@1604 93 // We have to unlock _lock (which may be Shared_DirtyCardQ_lock) before
johnc@1604 94 // we acquire DirtyCardQ_CBL_mon inside enqeue_complete_buffer as they
johnc@1604 95 // have the same rank and we may get the "possible deadlock" message
ysr@777 96 _lock->unlock();
johnc@1604 97
ysr@777 98 qset()->enqueue_complete_buffer(buf);
ysr@777 99 // We must relock only because the caller will unlock, for the normal
ysr@777 100 // case.
ysr@777 101 _lock->lock_without_safepoint_check();
ysr@777 102 }
ysr@777 103
ysr@777 104
ysr@777 105 PtrQueueSet::PtrQueueSet(bool notify_when_complete) :
ysr@777 106 _max_completed_queue(0),
ysr@777 107 _cbl_mon(NULL), _fl_lock(NULL),
ysr@777 108 _notify_when_complete(notify_when_complete),
ysr@777 109 _sz(0),
ysr@777 110 _completed_buffers_head(NULL),
ysr@777 111 _completed_buffers_tail(NULL),
ysr@777 112 _n_completed_buffers(0),
ysr@777 113 _process_completed_threshold(0), _process_completed(false),
ysr@777 114 _buf_free_list(NULL), _buf_free_list_sz(0)
iveresov@1051 115 {
iveresov@1051 116 _fl_owner = this;
iveresov@1051 117 }
ysr@777 118
ysr@777 119 void** PtrQueueSet::allocate_buffer() {
ysr@777 120 assert(_sz > 0, "Didn't set a buffer size.");
iveresov@1051 121 MutexLockerEx x(_fl_owner->_fl_lock, Mutex::_no_safepoint_check_flag);
iveresov@1051 122 if (_fl_owner->_buf_free_list != NULL) {
iveresov@1546 123 void** res = BufferNode::make_buffer_from_node(_fl_owner->_buf_free_list);
iveresov@1546 124 _fl_owner->_buf_free_list = _fl_owner->_buf_free_list->next();
iveresov@1051 125 _fl_owner->_buf_free_list_sz--;
ysr@777 126 return res;
ysr@777 127 } else {
iveresov@1546 128 // Allocate space for the BufferNode in front of the buffer.
zgu@3900 129 char *b = NEW_C_HEAP_ARRAY(char, _sz + BufferNode::aligned_size(), mtGC);
iveresov@1546 130 return BufferNode::make_buffer_from_block(b);
ysr@777 131 }
ysr@777 132 }
ysr@777 133
ysr@777 134 void PtrQueueSet::deallocate_buffer(void** buf) {
ysr@777 135 assert(_sz > 0, "Didn't set a buffer size.");
iveresov@1051 136 MutexLockerEx x(_fl_owner->_fl_lock, Mutex::_no_safepoint_check_flag);
iveresov@1546 137 BufferNode *node = BufferNode::make_node_from_buffer(buf);
iveresov@1546 138 node->set_next(_fl_owner->_buf_free_list);
iveresov@1546 139 _fl_owner->_buf_free_list = node;
iveresov@1051 140 _fl_owner->_buf_free_list_sz++;
ysr@777 141 }
ysr@777 142
ysr@777 143 void PtrQueueSet::reduce_free_list() {
iveresov@1546 144 assert(_fl_owner == this, "Free list reduction is allowed only for the owner");
ysr@777 145 // For now we'll adopt the strategy of deleting half.
ysr@777 146 MutexLockerEx x(_fl_lock, Mutex::_no_safepoint_check_flag);
ysr@777 147 size_t n = _buf_free_list_sz / 2;
ysr@777 148 while (n > 0) {
ysr@777 149 assert(_buf_free_list != NULL, "_buf_free_list_sz must be wrong.");
iveresov@1546 150 void* b = BufferNode::make_block_from_node(_buf_free_list);
iveresov@1546 151 _buf_free_list = _buf_free_list->next();
zgu@3900 152 FREE_C_HEAP_ARRAY(char, b, mtGC);
johnc@1519 153 _buf_free_list_sz --;
ysr@777 154 n--;
ysr@777 155 }
ysr@777 156 }
ysr@777 157
iveresov@1546 158 void PtrQueue::handle_zero_index() {
tonyp@2469 159 assert(_index == 0, "Precondition.");
tonyp@2469 160
iveresov@1546 161 // This thread records the full buffer and allocates a new one (while
iveresov@1546 162 // holding the lock if there is one).
iveresov@1546 163 if (_buf != NULL) {
tonyp@2469 164 if (!should_enqueue_buffer()) {
tonyp@2469 165 assert(_index > 0, "the buffer can only be re-used if it's not full");
tonyp@2469 166 return;
tonyp@2469 167 }
tonyp@2469 168
iveresov@1546 169 if (_lock) {
johnc@1604 170 assert(_lock->owned_by_self(), "Required.");
johnc@1604 171
johnc@1604 172 // The current PtrQ may be the shared dirty card queue and
johnc@1604 173 // may be being manipulated by more than one worker thread
johnc@1604 174 // during a pause. Since the enqueuing of the completed
johnc@1604 175 // buffer unlocks the Shared_DirtyCardQ_lock more than one
johnc@1604 176 // worker thread can 'race' on reading the shared queue attributes
johnc@1604 177 // (_buf and _index) and multiple threads can call into this
johnc@1604 178 // routine for the same buffer. This will cause the completed
johnc@1604 179 // buffer to be added to the CBL multiple times.
johnc@1604 180
johnc@1604 181 // We "claim" the current buffer by caching value of _buf in
johnc@1604 182 // a local and clearing the field while holding _lock. When
johnc@1604 183 // _lock is released (while enqueueing the completed buffer)
johnc@1604 184 // the thread that acquires _lock will skip this code,
johnc@1604 185 // preventing the subsequent the multiple enqueue, and
johnc@1604 186 // install a newly allocated buffer below.
johnc@1604 187
johnc@1604 188 void** buf = _buf; // local pointer to completed buffer
johnc@1604 189 _buf = NULL; // clear shared _buf field
johnc@1604 190
johnc@1604 191 locking_enqueue_completed_buffer(buf); // enqueue completed buffer
johnc@1604 192
johnc@1604 193 // While the current thread was enqueuing the buffer another thread
johnc@1604 194 // may have a allocated a new buffer and inserted it into this pointer
johnc@1604 195 // queue. If that happens then we just return so that the current
johnc@1604 196 // thread doesn't overwrite the buffer allocated by the other thread
johnc@1604 197 // and potentially losing some dirtied cards.
johnc@1604 198
johnc@1604 199 if (_buf != NULL) return;
iveresov@1546 200 } else {
iveresov@1546 201 if (qset()->process_or_enqueue_complete_buffer(_buf)) {
iveresov@1546 202 // Recycle the buffer. No allocation.
iveresov@1546 203 _sz = qset()->buffer_size();
iveresov@1546 204 _index = _sz;
iveresov@1546 205 return;
iveresov@1546 206 }
iveresov@1546 207 }
iveresov@1546 208 }
iveresov@1546 209 // Reallocate the buffer
iveresov@1546 210 _buf = qset()->allocate_buffer();
iveresov@1546 211 _sz = qset()->buffer_size();
iveresov@1546 212 _index = _sz;
iveresov@1546 213 assert(0 <= _index && _index <= _sz, "Invariant.");
iveresov@1546 214 }
ysr@777 215
iveresov@1546 216 bool PtrQueueSet::process_or_enqueue_complete_buffer(void** buf) {
iveresov@1546 217 if (Thread::current()->is_Java_thread()) {
iveresov@1546 218 // We don't lock. It is fine to be epsilon-precise here.
iveresov@1546 219 if (_max_completed_queue == 0 || _max_completed_queue > 0 &&
iveresov@1546 220 _n_completed_buffers >= _max_completed_queue + _completed_queue_padding) {
iveresov@1546 221 bool b = mut_process_buffer(buf);
iveresov@1546 222 if (b) {
iveresov@1546 223 // True here means that the buffer hasn't been deallocated and the caller may reuse it.
iveresov@1546 224 return true;
iveresov@1546 225 }
iveresov@1546 226 }
iveresov@1546 227 }
iveresov@1546 228 // The buffer will be enqueued. The caller will have to get a new one.
iveresov@1546 229 enqueue_complete_buffer(buf);
iveresov@1546 230 return false;
iveresov@1546 231 }
ysr@777 232
iveresov@1546 233 void PtrQueueSet::enqueue_complete_buffer(void** buf, size_t index) {
iveresov@1546 234 MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
iveresov@1546 235 BufferNode* cbn = BufferNode::new_from_buffer(buf);
iveresov@1546 236 cbn->set_index(index);
ysr@777 237 if (_completed_buffers_tail == NULL) {
ysr@777 238 assert(_completed_buffers_head == NULL, "Well-formedness");
ysr@777 239 _completed_buffers_head = cbn;
ysr@777 240 _completed_buffers_tail = cbn;
ysr@777 241 } else {
iveresov@1546 242 _completed_buffers_tail->set_next(cbn);
ysr@777 243 _completed_buffers_tail = cbn;
ysr@777 244 }
ysr@777 245 _n_completed_buffers++;
ysr@777 246
iveresov@1546 247 if (!_process_completed && _process_completed_threshold >= 0 &&
iveresov@1229 248 _n_completed_buffers >= _process_completed_threshold) {
ysr@777 249 _process_completed = true;
ysr@777 250 if (_notify_when_complete)
iveresov@1546 251 _cbl_mon->notify();
ysr@777 252 }
ysr@777 253 debug_only(assert_completed_buffer_list_len_correct_locked());
ysr@777 254 }
ysr@777 255
ysr@777 256 int PtrQueueSet::completed_buffers_list_length() {
ysr@777 257 int n = 0;
iveresov@1546 258 BufferNode* cbn = _completed_buffers_head;
ysr@777 259 while (cbn != NULL) {
ysr@777 260 n++;
iveresov@1546 261 cbn = cbn->next();
ysr@777 262 }
ysr@777 263 return n;
ysr@777 264 }
ysr@777 265
ysr@777 266 void PtrQueueSet::assert_completed_buffer_list_len_correct() {
ysr@777 267 MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
ysr@777 268 assert_completed_buffer_list_len_correct_locked();
ysr@777 269 }
ysr@777 270
ysr@777 271 void PtrQueueSet::assert_completed_buffer_list_len_correct_locked() {
iveresov@1546 272 guarantee(completed_buffers_list_length() == _n_completed_buffers,
ysr@777 273 "Completed buffer length is wrong.");
ysr@777 274 }
ysr@777 275
ysr@777 276 void PtrQueueSet::set_buffer_size(size_t sz) {
ysr@777 277 assert(_sz == 0 && sz > 0, "Should be called only once.");
ysr@777 278 _sz = sz * oopSize;
ysr@777 279 }
ysr@777 280
iveresov@1546 281 // Merge lists of buffers. Notify the processing threads.
iveresov@1546 282 // The source queue is emptied as a result. The queues
iveresov@1051 283 // must share the monitor.
iveresov@1051 284 void PtrQueueSet::merge_bufferlists(PtrQueueSet *src) {
iveresov@1051 285 assert(_cbl_mon == src->_cbl_mon, "Should share the same lock");
iveresov@1051 286 MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
iveresov@1051 287 if (_completed_buffers_tail == NULL) {
iveresov@1051 288 assert(_completed_buffers_head == NULL, "Well-formedness");
iveresov@1051 289 _completed_buffers_head = src->_completed_buffers_head;
iveresov@1051 290 _completed_buffers_tail = src->_completed_buffers_tail;
iveresov@1051 291 } else {
iveresov@1051 292 assert(_completed_buffers_head != NULL, "Well formedness");
iveresov@1051 293 if (src->_completed_buffers_head != NULL) {
iveresov@1546 294 _completed_buffers_tail->set_next(src->_completed_buffers_head);
iveresov@1051 295 _completed_buffers_tail = src->_completed_buffers_tail;
iveresov@1051 296 }
iveresov@1051 297 }
iveresov@1051 298 _n_completed_buffers += src->_n_completed_buffers;
iveresov@1051 299
iveresov@1051 300 src->_n_completed_buffers = 0;
iveresov@1051 301 src->_completed_buffers_head = NULL;
iveresov@1051 302 src->_completed_buffers_tail = NULL;
iveresov@1051 303
iveresov@1051 304 assert(_completed_buffers_head == NULL && _completed_buffers_tail == NULL ||
iveresov@1051 305 _completed_buffers_head != NULL && _completed_buffers_tail != NULL,
iveresov@1051 306 "Sanity");
iveresov@1546 307 }
iveresov@1051 308
iveresov@1546 309 void PtrQueueSet::notify_if_necessary() {
iveresov@1546 310 MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
iveresov@1546 311 if (_n_completed_buffers >= _process_completed_threshold || _max_completed_queue == 0) {
iveresov@1051 312 _process_completed = true;
iveresov@1051 313 if (_notify_when_complete)
iveresov@1546 314 _cbl_mon->notify();
iveresov@1051 315 }
iveresov@1051 316 }

mercurial