ysr@777: /* tonyp@2469: * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. ysr@777: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ysr@777: * ysr@777: * This code is free software; you can redistribute it and/or modify it ysr@777: * under the terms of the GNU General Public License version 2 only, as ysr@777: * published by the Free Software Foundation. ysr@777: * ysr@777: * This code is distributed in the hope that it will be useful, but WITHOUT ysr@777: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ysr@777: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ysr@777: * version 2 for more details (a copy is included in the LICENSE file that ysr@777: * accompanied this code). ysr@777: * ysr@777: * You should have received a copy of the GNU General Public License version ysr@777: * 2 along with this work; if not, write to the Free Software Foundation, ysr@777: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ysr@777: * trims@1907: * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA trims@1907: * or visit www.oracle.com if you need additional information or have any trims@1907: * questions. ysr@777: * ysr@777: */ ysr@777: stefank@2314: #include "precompiled.hpp" stefank@2314: #include "gc_implementation/g1/ptrQueue.hpp" stefank@2314: #include "memory/allocation.hpp" stefank@2314: #include "memory/allocation.inline.hpp" stefank@2314: #include "runtime/mutex.hpp" stefank@2314: #include "runtime/mutexLocker.hpp" stefank@2314: #ifdef TARGET_OS_FAMILY_linux stefank@2314: # include "thread_linux.inline.hpp" stefank@2314: #endif stefank@2314: #ifdef TARGET_OS_FAMILY_solaris stefank@2314: # include "thread_solaris.inline.hpp" stefank@2314: #endif stefank@2314: #ifdef TARGET_OS_FAMILY_windows stefank@2314: # include "thread_windows.inline.hpp" stefank@2314: #endif never@3156: #ifdef TARGET_OS_FAMILY_bsd never@3156: # include "thread_bsd.inline.hpp" never@3156: #endif ysr@777: tonyp@2469: PtrQueue::PtrQueue(PtrQueueSet* qset, bool perm, bool active) : tonyp@2469: _qset(qset), _buf(NULL), _index(0), _active(active), ysr@777: _perm(perm), _lock(NULL) ysr@777: {} ysr@777: iveresov@876: void PtrQueue::flush() { ysr@777: if (!_perm && _buf != NULL) { ysr@777: if (_index == _sz) { ysr@777: // No work to do. ysr@777: qset()->deallocate_buffer(_buf); ysr@777: } else { ysr@777: // We must NULL out the unused entries, then enqueue. ysr@777: for (size_t i = 0; i < _index; i += oopSize) { ysr@777: _buf[byte_index_to_index((int)i)] = NULL; ysr@777: } ysr@777: qset()->enqueue_complete_buffer(_buf); ysr@777: } iveresov@876: _buf = NULL; iveresov@876: _index = 0; ysr@777: } ysr@777: } ysr@777: ysr@777: ysr@777: static int byte_index_to_index(int ind) { ysr@777: assert((ind % oopSize) == 0, "Invariant."); ysr@777: return ind / oopSize; ysr@777: } ysr@777: ysr@777: static int index_to_byte_index(int byte_ind) { ysr@777: return byte_ind * oopSize; ysr@777: } ysr@777: ysr@777: void PtrQueue::enqueue_known_active(void* ptr) { ysr@777: assert(0 <= _index && _index <= _sz, "Invariant."); ysr@777: assert(_index == 0 || _buf != NULL, "invariant"); ysr@777: ysr@777: while (_index == 0) { ysr@777: handle_zero_index(); ysr@777: } iveresov@1546: ysr@777: assert(_index > 0, "postcondition"); ysr@777: _index -= oopSize; ysr@777: _buf[byte_index_to_index((int)_index)] = ptr; ysr@777: assert(0 <= _index && _index <= _sz, "Invariant."); ysr@777: } ysr@777: ysr@777: void PtrQueue::locking_enqueue_completed_buffer(void** buf) { ysr@777: assert(_lock->owned_by_self(), "Required."); johnc@1604: johnc@1604: // We have to unlock _lock (which may be Shared_DirtyCardQ_lock) before johnc@1604: // we acquire DirtyCardQ_CBL_mon inside enqeue_complete_buffer as they johnc@1604: // have the same rank and we may get the "possible deadlock" message ysr@777: _lock->unlock(); johnc@1604: ysr@777: qset()->enqueue_complete_buffer(buf); ysr@777: // We must relock only because the caller will unlock, for the normal ysr@777: // case. ysr@777: _lock->lock_without_safepoint_check(); ysr@777: } ysr@777: ysr@777: ysr@777: PtrQueueSet::PtrQueueSet(bool notify_when_complete) : ysr@777: _max_completed_queue(0), ysr@777: _cbl_mon(NULL), _fl_lock(NULL), ysr@777: _notify_when_complete(notify_when_complete), ysr@777: _sz(0), ysr@777: _completed_buffers_head(NULL), ysr@777: _completed_buffers_tail(NULL), ysr@777: _n_completed_buffers(0), ysr@777: _process_completed_threshold(0), _process_completed(false), ysr@777: _buf_free_list(NULL), _buf_free_list_sz(0) iveresov@1051: { iveresov@1051: _fl_owner = this; iveresov@1051: } ysr@777: ysr@777: void** PtrQueueSet::allocate_buffer() { ysr@777: assert(_sz > 0, "Didn't set a buffer size."); iveresov@1051: MutexLockerEx x(_fl_owner->_fl_lock, Mutex::_no_safepoint_check_flag); iveresov@1051: if (_fl_owner->_buf_free_list != NULL) { iveresov@1546: void** res = BufferNode::make_buffer_from_node(_fl_owner->_buf_free_list); iveresov@1546: _fl_owner->_buf_free_list = _fl_owner->_buf_free_list->next(); iveresov@1051: _fl_owner->_buf_free_list_sz--; ysr@777: return res; ysr@777: } else { iveresov@1546: // Allocate space for the BufferNode in front of the buffer. zgu@3900: char *b = NEW_C_HEAP_ARRAY(char, _sz + BufferNode::aligned_size(), mtGC); iveresov@1546: return BufferNode::make_buffer_from_block(b); ysr@777: } ysr@777: } ysr@777: ysr@777: void PtrQueueSet::deallocate_buffer(void** buf) { ysr@777: assert(_sz > 0, "Didn't set a buffer size."); iveresov@1051: MutexLockerEx x(_fl_owner->_fl_lock, Mutex::_no_safepoint_check_flag); iveresov@1546: BufferNode *node = BufferNode::make_node_from_buffer(buf); iveresov@1546: node->set_next(_fl_owner->_buf_free_list); iveresov@1546: _fl_owner->_buf_free_list = node; iveresov@1051: _fl_owner->_buf_free_list_sz++; ysr@777: } ysr@777: ysr@777: void PtrQueueSet::reduce_free_list() { iveresov@1546: assert(_fl_owner == this, "Free list reduction is allowed only for the owner"); ysr@777: // For now we'll adopt the strategy of deleting half. ysr@777: MutexLockerEx x(_fl_lock, Mutex::_no_safepoint_check_flag); ysr@777: size_t n = _buf_free_list_sz / 2; ysr@777: while (n > 0) { ysr@777: assert(_buf_free_list != NULL, "_buf_free_list_sz must be wrong."); iveresov@1546: void* b = BufferNode::make_block_from_node(_buf_free_list); iveresov@1546: _buf_free_list = _buf_free_list->next(); zgu@3900: FREE_C_HEAP_ARRAY(char, b, mtGC); johnc@1519: _buf_free_list_sz --; ysr@777: n--; ysr@777: } ysr@777: } ysr@777: iveresov@1546: void PtrQueue::handle_zero_index() { tonyp@2469: assert(_index == 0, "Precondition."); tonyp@2469: iveresov@1546: // This thread records the full buffer and allocates a new one (while iveresov@1546: // holding the lock if there is one). iveresov@1546: if (_buf != NULL) { tonyp@2469: if (!should_enqueue_buffer()) { tonyp@2469: assert(_index > 0, "the buffer can only be re-used if it's not full"); tonyp@2469: return; tonyp@2469: } tonyp@2469: iveresov@1546: if (_lock) { johnc@1604: assert(_lock->owned_by_self(), "Required."); johnc@1604: johnc@1604: // The current PtrQ may be the shared dirty card queue and johnc@1604: // may be being manipulated by more than one worker thread johnc@1604: // during a pause. Since the enqueuing of the completed johnc@1604: // buffer unlocks the Shared_DirtyCardQ_lock more than one johnc@1604: // worker thread can 'race' on reading the shared queue attributes johnc@1604: // (_buf and _index) and multiple threads can call into this johnc@1604: // routine for the same buffer. This will cause the completed johnc@1604: // buffer to be added to the CBL multiple times. johnc@1604: johnc@1604: // We "claim" the current buffer by caching value of _buf in johnc@1604: // a local and clearing the field while holding _lock. When johnc@1604: // _lock is released (while enqueueing the completed buffer) johnc@1604: // the thread that acquires _lock will skip this code, johnc@1604: // preventing the subsequent the multiple enqueue, and johnc@1604: // install a newly allocated buffer below. johnc@1604: johnc@1604: void** buf = _buf; // local pointer to completed buffer johnc@1604: _buf = NULL; // clear shared _buf field johnc@1604: johnc@1604: locking_enqueue_completed_buffer(buf); // enqueue completed buffer johnc@1604: johnc@1604: // While the current thread was enqueuing the buffer another thread johnc@1604: // may have a allocated a new buffer and inserted it into this pointer johnc@1604: // queue. If that happens then we just return so that the current johnc@1604: // thread doesn't overwrite the buffer allocated by the other thread johnc@1604: // and potentially losing some dirtied cards. johnc@1604: johnc@1604: if (_buf != NULL) return; iveresov@1546: } else { iveresov@1546: if (qset()->process_or_enqueue_complete_buffer(_buf)) { iveresov@1546: // Recycle the buffer. No allocation. iveresov@1546: _sz = qset()->buffer_size(); iveresov@1546: _index = _sz; iveresov@1546: return; iveresov@1546: } iveresov@1546: } iveresov@1546: } iveresov@1546: // Reallocate the buffer iveresov@1546: _buf = qset()->allocate_buffer(); iveresov@1546: _sz = qset()->buffer_size(); iveresov@1546: _index = _sz; iveresov@1546: assert(0 <= _index && _index <= _sz, "Invariant."); iveresov@1546: } ysr@777: iveresov@1546: bool PtrQueueSet::process_or_enqueue_complete_buffer(void** buf) { iveresov@1546: if (Thread::current()->is_Java_thread()) { iveresov@1546: // We don't lock. It is fine to be epsilon-precise here. iveresov@1546: if (_max_completed_queue == 0 || _max_completed_queue > 0 && iveresov@1546: _n_completed_buffers >= _max_completed_queue + _completed_queue_padding) { iveresov@1546: bool b = mut_process_buffer(buf); iveresov@1546: if (b) { iveresov@1546: // True here means that the buffer hasn't been deallocated and the caller may reuse it. iveresov@1546: return true; iveresov@1546: } iveresov@1546: } iveresov@1546: } iveresov@1546: // The buffer will be enqueued. The caller will have to get a new one. iveresov@1546: enqueue_complete_buffer(buf); iveresov@1546: return false; iveresov@1546: } ysr@777: iveresov@1546: void PtrQueueSet::enqueue_complete_buffer(void** buf, size_t index) { iveresov@1546: MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag); iveresov@1546: BufferNode* cbn = BufferNode::new_from_buffer(buf); iveresov@1546: cbn->set_index(index); ysr@777: if (_completed_buffers_tail == NULL) { ysr@777: assert(_completed_buffers_head == NULL, "Well-formedness"); ysr@777: _completed_buffers_head = cbn; ysr@777: _completed_buffers_tail = cbn; ysr@777: } else { iveresov@1546: _completed_buffers_tail->set_next(cbn); ysr@777: _completed_buffers_tail = cbn; ysr@777: } ysr@777: _n_completed_buffers++; ysr@777: iveresov@1546: if (!_process_completed && _process_completed_threshold >= 0 && iveresov@1229: _n_completed_buffers >= _process_completed_threshold) { ysr@777: _process_completed = true; ysr@777: if (_notify_when_complete) iveresov@1546: _cbl_mon->notify(); ysr@777: } ysr@777: debug_only(assert_completed_buffer_list_len_correct_locked()); ysr@777: } ysr@777: ysr@777: int PtrQueueSet::completed_buffers_list_length() { ysr@777: int n = 0; iveresov@1546: BufferNode* cbn = _completed_buffers_head; ysr@777: while (cbn != NULL) { ysr@777: n++; iveresov@1546: cbn = cbn->next(); ysr@777: } ysr@777: return n; ysr@777: } ysr@777: ysr@777: void PtrQueueSet::assert_completed_buffer_list_len_correct() { ysr@777: MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag); ysr@777: assert_completed_buffer_list_len_correct_locked(); ysr@777: } ysr@777: ysr@777: void PtrQueueSet::assert_completed_buffer_list_len_correct_locked() { iveresov@1546: guarantee(completed_buffers_list_length() == _n_completed_buffers, ysr@777: "Completed buffer length is wrong."); ysr@777: } ysr@777: ysr@777: void PtrQueueSet::set_buffer_size(size_t sz) { ysr@777: assert(_sz == 0 && sz > 0, "Should be called only once."); ysr@777: _sz = sz * oopSize; ysr@777: } ysr@777: iveresov@1546: // Merge lists of buffers. Notify the processing threads. iveresov@1546: // The source queue is emptied as a result. The queues iveresov@1051: // must share the monitor. iveresov@1051: void PtrQueueSet::merge_bufferlists(PtrQueueSet *src) { iveresov@1051: assert(_cbl_mon == src->_cbl_mon, "Should share the same lock"); iveresov@1051: MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag); iveresov@1051: if (_completed_buffers_tail == NULL) { iveresov@1051: assert(_completed_buffers_head == NULL, "Well-formedness"); iveresov@1051: _completed_buffers_head = src->_completed_buffers_head; iveresov@1051: _completed_buffers_tail = src->_completed_buffers_tail; iveresov@1051: } else { iveresov@1051: assert(_completed_buffers_head != NULL, "Well formedness"); iveresov@1051: if (src->_completed_buffers_head != NULL) { iveresov@1546: _completed_buffers_tail->set_next(src->_completed_buffers_head); iveresov@1051: _completed_buffers_tail = src->_completed_buffers_tail; iveresov@1051: } iveresov@1051: } iveresov@1051: _n_completed_buffers += src->_n_completed_buffers; iveresov@1051: iveresov@1051: src->_n_completed_buffers = 0; iveresov@1051: src->_completed_buffers_head = NULL; iveresov@1051: src->_completed_buffers_tail = NULL; iveresov@1051: iveresov@1051: assert(_completed_buffers_head == NULL && _completed_buffers_tail == NULL || iveresov@1051: _completed_buffers_head != NULL && _completed_buffers_tail != NULL, iveresov@1051: "Sanity"); iveresov@1546: } iveresov@1051: iveresov@1546: void PtrQueueSet::notify_if_necessary() { iveresov@1546: MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag); iveresov@1546: if (_n_completed_buffers >= _process_completed_threshold || _max_completed_queue == 0) { iveresov@1051: _process_completed = true; iveresov@1051: if (_notify_when_complete) iveresov@1546: _cbl_mon->notify(); iveresov@1051: } iveresov@1051: }