src/share/vm/gc_implementation/g1/ptrQueue.cpp

Mon, 02 Aug 2010 12:51:43 -0700

author
johnc
date
Mon, 02 Aug 2010 12:51:43 -0700
changeset 2060
2d160770d2e5
parent 1907
c18cbe5936b8
child 2314
f95d63e2154a
permissions
-rw-r--r--

6814437: G1: remove the _new_refs array
Summary: The per-worker _new_refs array is used to hold references that point into the collection set. It is populated during RSet updating and subsequently processed. In the event of an evacuation failure it processed again to recreate the RSets of regions in the collection set. Remove the per-worker _new_refs array by processing the references directly. Use a DirtyCardQueue to hold the cards containing the references so that the RSets of regions in the collection set can be recreated when handling an evacuation failure.
Reviewed-by: iveresov, jmasa, tonyp

ysr@777 1 /*
trims@1907 2 * Copyright (c) 2001, 2009, Oracle and/or its affiliates. All rights reserved.
ysr@777 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
ysr@777 4 *
ysr@777 5 * This code is free software; you can redistribute it and/or modify it
ysr@777 6 * under the terms of the GNU General Public License version 2 only, as
ysr@777 7 * published by the Free Software Foundation.
ysr@777 8 *
ysr@777 9 * This code is distributed in the hope that it will be useful, but WITHOUT
ysr@777 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
ysr@777 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
ysr@777 12 * version 2 for more details (a copy is included in the LICENSE file that
ysr@777 13 * accompanied this code).
ysr@777 14 *
ysr@777 15 * You should have received a copy of the GNU General Public License version
ysr@777 16 * 2 along with this work; if not, write to the Free Software Foundation,
ysr@777 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
ysr@777 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
ysr@777 22 *
ysr@777 23 */
ysr@777 24
ysr@777 25 # include "incls/_precompiled.incl"
ysr@777 26 # include "incls/_ptrQueue.cpp.incl"
ysr@777 27
tonyp@1752 28 PtrQueue::PtrQueue(PtrQueueSet* qset_, bool perm, bool active) :
tonyp@1752 29 _qset(qset_), _buf(NULL), _index(0), _active(active),
ysr@777 30 _perm(perm), _lock(NULL)
ysr@777 31 {}
ysr@777 32
iveresov@876 33 void PtrQueue::flush() {
ysr@777 34 if (!_perm && _buf != NULL) {
ysr@777 35 if (_index == _sz) {
ysr@777 36 // No work to do.
ysr@777 37 qset()->deallocate_buffer(_buf);
ysr@777 38 } else {
ysr@777 39 // We must NULL out the unused entries, then enqueue.
ysr@777 40 for (size_t i = 0; i < _index; i += oopSize) {
ysr@777 41 _buf[byte_index_to_index((int)i)] = NULL;
ysr@777 42 }
ysr@777 43 qset()->enqueue_complete_buffer(_buf);
ysr@777 44 }
iveresov@876 45 _buf = NULL;
iveresov@876 46 _index = 0;
ysr@777 47 }
ysr@777 48 }
ysr@777 49
ysr@777 50
ysr@777 51 static int byte_index_to_index(int ind) {
ysr@777 52 assert((ind % oopSize) == 0, "Invariant.");
ysr@777 53 return ind / oopSize;
ysr@777 54 }
ysr@777 55
ysr@777 56 static int index_to_byte_index(int byte_ind) {
ysr@777 57 return byte_ind * oopSize;
ysr@777 58 }
ysr@777 59
ysr@777 60 void PtrQueue::enqueue_known_active(void* ptr) {
ysr@777 61 assert(0 <= _index && _index <= _sz, "Invariant.");
ysr@777 62 assert(_index == 0 || _buf != NULL, "invariant");
ysr@777 63
ysr@777 64 while (_index == 0) {
ysr@777 65 handle_zero_index();
ysr@777 66 }
iveresov@1546 67
ysr@777 68 assert(_index > 0, "postcondition");
ysr@777 69 _index -= oopSize;
ysr@777 70 _buf[byte_index_to_index((int)_index)] = ptr;
ysr@777 71 assert(0 <= _index && _index <= _sz, "Invariant.");
ysr@777 72 }
ysr@777 73
ysr@777 74 void PtrQueue::locking_enqueue_completed_buffer(void** buf) {
ysr@777 75 assert(_lock->owned_by_self(), "Required.");
johnc@1604 76
johnc@1604 77 // We have to unlock _lock (which may be Shared_DirtyCardQ_lock) before
johnc@1604 78 // we acquire DirtyCardQ_CBL_mon inside enqeue_complete_buffer as they
johnc@1604 79 // have the same rank and we may get the "possible deadlock" message
ysr@777 80 _lock->unlock();
johnc@1604 81
ysr@777 82 qset()->enqueue_complete_buffer(buf);
ysr@777 83 // We must relock only because the caller will unlock, for the normal
ysr@777 84 // case.
ysr@777 85 _lock->lock_without_safepoint_check();
ysr@777 86 }
ysr@777 87
ysr@777 88
ysr@777 89 PtrQueueSet::PtrQueueSet(bool notify_when_complete) :
ysr@777 90 _max_completed_queue(0),
ysr@777 91 _cbl_mon(NULL), _fl_lock(NULL),
ysr@777 92 _notify_when_complete(notify_when_complete),
ysr@777 93 _sz(0),
ysr@777 94 _completed_buffers_head(NULL),
ysr@777 95 _completed_buffers_tail(NULL),
ysr@777 96 _n_completed_buffers(0),
ysr@777 97 _process_completed_threshold(0), _process_completed(false),
ysr@777 98 _buf_free_list(NULL), _buf_free_list_sz(0)
iveresov@1051 99 {
iveresov@1051 100 _fl_owner = this;
iveresov@1051 101 }
ysr@777 102
ysr@777 103 void** PtrQueueSet::allocate_buffer() {
ysr@777 104 assert(_sz > 0, "Didn't set a buffer size.");
iveresov@1051 105 MutexLockerEx x(_fl_owner->_fl_lock, Mutex::_no_safepoint_check_flag);
iveresov@1051 106 if (_fl_owner->_buf_free_list != NULL) {
iveresov@1546 107 void** res = BufferNode::make_buffer_from_node(_fl_owner->_buf_free_list);
iveresov@1546 108 _fl_owner->_buf_free_list = _fl_owner->_buf_free_list->next();
iveresov@1051 109 _fl_owner->_buf_free_list_sz--;
ysr@777 110 return res;
ysr@777 111 } else {
iveresov@1546 112 // Allocate space for the BufferNode in front of the buffer.
iveresov@1546 113 char *b = NEW_C_HEAP_ARRAY(char, _sz + BufferNode::aligned_size());
iveresov@1546 114 return BufferNode::make_buffer_from_block(b);
ysr@777 115 }
ysr@777 116 }
ysr@777 117
ysr@777 118 void PtrQueueSet::deallocate_buffer(void** buf) {
ysr@777 119 assert(_sz > 0, "Didn't set a buffer size.");
iveresov@1051 120 MutexLockerEx x(_fl_owner->_fl_lock, Mutex::_no_safepoint_check_flag);
iveresov@1546 121 BufferNode *node = BufferNode::make_node_from_buffer(buf);
iveresov@1546 122 node->set_next(_fl_owner->_buf_free_list);
iveresov@1546 123 _fl_owner->_buf_free_list = node;
iveresov@1051 124 _fl_owner->_buf_free_list_sz++;
ysr@777 125 }
ysr@777 126
ysr@777 127 void PtrQueueSet::reduce_free_list() {
iveresov@1546 128 assert(_fl_owner == this, "Free list reduction is allowed only for the owner");
ysr@777 129 // For now we'll adopt the strategy of deleting half.
ysr@777 130 MutexLockerEx x(_fl_lock, Mutex::_no_safepoint_check_flag);
ysr@777 131 size_t n = _buf_free_list_sz / 2;
ysr@777 132 while (n > 0) {
ysr@777 133 assert(_buf_free_list != NULL, "_buf_free_list_sz must be wrong.");
iveresov@1546 134 void* b = BufferNode::make_block_from_node(_buf_free_list);
iveresov@1546 135 _buf_free_list = _buf_free_list->next();
iveresov@1546 136 FREE_C_HEAP_ARRAY(char, b);
johnc@1519 137 _buf_free_list_sz --;
ysr@777 138 n--;
ysr@777 139 }
ysr@777 140 }
ysr@777 141
iveresov@1546 142 void PtrQueue::handle_zero_index() {
iveresov@1546 143 assert(0 == _index, "Precondition.");
iveresov@1546 144 // This thread records the full buffer and allocates a new one (while
iveresov@1546 145 // holding the lock if there is one).
iveresov@1546 146 if (_buf != NULL) {
iveresov@1546 147 if (_lock) {
johnc@1604 148 assert(_lock->owned_by_self(), "Required.");
johnc@1604 149
johnc@1604 150 // The current PtrQ may be the shared dirty card queue and
johnc@1604 151 // may be being manipulated by more than one worker thread
johnc@1604 152 // during a pause. Since the enqueuing of the completed
johnc@1604 153 // buffer unlocks the Shared_DirtyCardQ_lock more than one
johnc@1604 154 // worker thread can 'race' on reading the shared queue attributes
johnc@1604 155 // (_buf and _index) and multiple threads can call into this
johnc@1604 156 // routine for the same buffer. This will cause the completed
johnc@1604 157 // buffer to be added to the CBL multiple times.
johnc@1604 158
johnc@1604 159 // We "claim" the current buffer by caching value of _buf in
johnc@1604 160 // a local and clearing the field while holding _lock. When
johnc@1604 161 // _lock is released (while enqueueing the completed buffer)
johnc@1604 162 // the thread that acquires _lock will skip this code,
johnc@1604 163 // preventing the subsequent the multiple enqueue, and
johnc@1604 164 // install a newly allocated buffer below.
johnc@1604 165
johnc@1604 166 void** buf = _buf; // local pointer to completed buffer
johnc@1604 167 _buf = NULL; // clear shared _buf field
johnc@1604 168
johnc@1604 169 locking_enqueue_completed_buffer(buf); // enqueue completed buffer
johnc@1604 170
johnc@1604 171 // While the current thread was enqueuing the buffer another thread
johnc@1604 172 // may have a allocated a new buffer and inserted it into this pointer
johnc@1604 173 // queue. If that happens then we just return so that the current
johnc@1604 174 // thread doesn't overwrite the buffer allocated by the other thread
johnc@1604 175 // and potentially losing some dirtied cards.
johnc@1604 176
johnc@1604 177 if (_buf != NULL) return;
iveresov@1546 178 } else {
iveresov@1546 179 if (qset()->process_or_enqueue_complete_buffer(_buf)) {
iveresov@1546 180 // Recycle the buffer. No allocation.
iveresov@1546 181 _sz = qset()->buffer_size();
iveresov@1546 182 _index = _sz;
iveresov@1546 183 return;
iveresov@1546 184 }
iveresov@1546 185 }
iveresov@1546 186 }
iveresov@1546 187 // Reallocate the buffer
iveresov@1546 188 _buf = qset()->allocate_buffer();
iveresov@1546 189 _sz = qset()->buffer_size();
iveresov@1546 190 _index = _sz;
iveresov@1546 191 assert(0 <= _index && _index <= _sz, "Invariant.");
iveresov@1546 192 }
ysr@777 193
iveresov@1546 194 bool PtrQueueSet::process_or_enqueue_complete_buffer(void** buf) {
iveresov@1546 195 if (Thread::current()->is_Java_thread()) {
iveresov@1546 196 // We don't lock. It is fine to be epsilon-precise here.
iveresov@1546 197 if (_max_completed_queue == 0 || _max_completed_queue > 0 &&
iveresov@1546 198 _n_completed_buffers >= _max_completed_queue + _completed_queue_padding) {
iveresov@1546 199 bool b = mut_process_buffer(buf);
iveresov@1546 200 if (b) {
iveresov@1546 201 // True here means that the buffer hasn't been deallocated and the caller may reuse it.
iveresov@1546 202 return true;
iveresov@1546 203 }
iveresov@1546 204 }
iveresov@1546 205 }
iveresov@1546 206 // The buffer will be enqueued. The caller will have to get a new one.
iveresov@1546 207 enqueue_complete_buffer(buf);
iveresov@1546 208 return false;
iveresov@1546 209 }
ysr@777 210
iveresov@1546 211 void PtrQueueSet::enqueue_complete_buffer(void** buf, size_t index) {
iveresov@1546 212 MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
iveresov@1546 213 BufferNode* cbn = BufferNode::new_from_buffer(buf);
iveresov@1546 214 cbn->set_index(index);
ysr@777 215 if (_completed_buffers_tail == NULL) {
ysr@777 216 assert(_completed_buffers_head == NULL, "Well-formedness");
ysr@777 217 _completed_buffers_head = cbn;
ysr@777 218 _completed_buffers_tail = cbn;
ysr@777 219 } else {
iveresov@1546 220 _completed_buffers_tail->set_next(cbn);
ysr@777 221 _completed_buffers_tail = cbn;
ysr@777 222 }
ysr@777 223 _n_completed_buffers++;
ysr@777 224
iveresov@1546 225 if (!_process_completed && _process_completed_threshold >= 0 &&
iveresov@1229 226 _n_completed_buffers >= _process_completed_threshold) {
ysr@777 227 _process_completed = true;
ysr@777 228 if (_notify_when_complete)
iveresov@1546 229 _cbl_mon->notify();
ysr@777 230 }
ysr@777 231 debug_only(assert_completed_buffer_list_len_correct_locked());
ysr@777 232 }
ysr@777 233
ysr@777 234 int PtrQueueSet::completed_buffers_list_length() {
ysr@777 235 int n = 0;
iveresov@1546 236 BufferNode* cbn = _completed_buffers_head;
ysr@777 237 while (cbn != NULL) {
ysr@777 238 n++;
iveresov@1546 239 cbn = cbn->next();
ysr@777 240 }
ysr@777 241 return n;
ysr@777 242 }
ysr@777 243
ysr@777 244 void PtrQueueSet::assert_completed_buffer_list_len_correct() {
ysr@777 245 MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
ysr@777 246 assert_completed_buffer_list_len_correct_locked();
ysr@777 247 }
ysr@777 248
ysr@777 249 void PtrQueueSet::assert_completed_buffer_list_len_correct_locked() {
iveresov@1546 250 guarantee(completed_buffers_list_length() == _n_completed_buffers,
ysr@777 251 "Completed buffer length is wrong.");
ysr@777 252 }
ysr@777 253
ysr@777 254 void PtrQueueSet::set_buffer_size(size_t sz) {
ysr@777 255 assert(_sz == 0 && sz > 0, "Should be called only once.");
ysr@777 256 _sz = sz * oopSize;
ysr@777 257 }
ysr@777 258
iveresov@1546 259 // Merge lists of buffers. Notify the processing threads.
iveresov@1546 260 // The source queue is emptied as a result. The queues
iveresov@1051 261 // must share the monitor.
iveresov@1051 262 void PtrQueueSet::merge_bufferlists(PtrQueueSet *src) {
iveresov@1051 263 assert(_cbl_mon == src->_cbl_mon, "Should share the same lock");
iveresov@1051 264 MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
iveresov@1051 265 if (_completed_buffers_tail == NULL) {
iveresov@1051 266 assert(_completed_buffers_head == NULL, "Well-formedness");
iveresov@1051 267 _completed_buffers_head = src->_completed_buffers_head;
iveresov@1051 268 _completed_buffers_tail = src->_completed_buffers_tail;
iveresov@1051 269 } else {
iveresov@1051 270 assert(_completed_buffers_head != NULL, "Well formedness");
iveresov@1051 271 if (src->_completed_buffers_head != NULL) {
iveresov@1546 272 _completed_buffers_tail->set_next(src->_completed_buffers_head);
iveresov@1051 273 _completed_buffers_tail = src->_completed_buffers_tail;
iveresov@1051 274 }
iveresov@1051 275 }
iveresov@1051 276 _n_completed_buffers += src->_n_completed_buffers;
iveresov@1051 277
iveresov@1051 278 src->_n_completed_buffers = 0;
iveresov@1051 279 src->_completed_buffers_head = NULL;
iveresov@1051 280 src->_completed_buffers_tail = NULL;
iveresov@1051 281
iveresov@1051 282 assert(_completed_buffers_head == NULL && _completed_buffers_tail == NULL ||
iveresov@1051 283 _completed_buffers_head != NULL && _completed_buffers_tail != NULL,
iveresov@1051 284 "Sanity");
iveresov@1546 285 }
iveresov@1051 286
iveresov@1546 287 void PtrQueueSet::notify_if_necessary() {
iveresov@1546 288 MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
iveresov@1546 289 if (_n_completed_buffers >= _process_completed_threshold || _max_completed_queue == 0) {
iveresov@1051 290 _process_completed = true;
iveresov@1051 291 if (_notify_when_complete)
iveresov@1546 292 _cbl_mon->notify();
iveresov@1051 293 }
iveresov@1051 294 }

mercurial