ysr@777: /* stefank@2314: * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. ysr@777: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ysr@777: * ysr@777: * This code is free software; you can redistribute it and/or modify it ysr@777: * under the terms of the GNU General Public License version 2 only, as ysr@777: * published by the Free Software Foundation. ysr@777: * ysr@777: * This code is distributed in the hope that it will be useful, but WITHOUT ysr@777: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ysr@777: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ysr@777: * version 2 for more details (a copy is included in the LICENSE file that ysr@777: * accompanied this code). ysr@777: * ysr@777: * You should have received a copy of the GNU General Public License version ysr@777: * 2 along with this work; if not, write to the Free Software Foundation, ysr@777: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ysr@777: * trims@1907: * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA trims@1907: * or visit www.oracle.com if you need additional information or have any trims@1907: * questions. ysr@777: * ysr@777: */ ysr@777: stefank@2314: #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_PTRQUEUE_HPP stefank@2314: #define SHARE_VM_GC_IMPLEMENTATION_G1_PTRQUEUE_HPP stefank@2314: stefank@2314: #include "memory/allocation.hpp" stefank@2314: #include "utilities/sizes.hpp" stefank@2314: ysr@777: // There are various techniques that require threads to be able to log ysr@777: // addresses. For example, a generational write barrier might log ysr@777: // the addresses of modified old-generation objects. This type supports ysr@777: // this operation. ysr@777: iveresov@1546: // The definition of placement operator new(size_t, void*) in the . iveresov@1546: #include iveresov@1546: ysr@777: class PtrQueueSet; apetrusenko@984: class PtrQueue VALUE_OBJ_CLASS_SPEC { ysr@777: ysr@777: protected: ysr@777: // The ptr queue set to which this queue belongs. ysr@777: PtrQueueSet* _qset; ysr@777: ysr@777: // Whether updates should be logged. ysr@777: bool _active; ysr@777: ysr@777: // The buffer. ysr@777: void** _buf; ysr@777: // The index at which an object was last enqueued. Starts at "_sz" ysr@777: // (indicating an empty buffer) and goes towards zero. ysr@777: size_t _index; ysr@777: ysr@777: // The size of the buffer. ysr@777: size_t _sz; ysr@777: ysr@777: // If true, the queue is permanent, and doesn't need to deallocate ysr@777: // its buffer in the destructor (since that obtains a lock which may not ysr@777: // be legally locked by then. ysr@777: bool _perm; ysr@777: ysr@777: // If there is a lock associated with this buffer, this is that lock. ysr@777: Mutex* _lock; ysr@777: ysr@777: PtrQueueSet* qset() { return _qset; } ysr@777: ysr@777: public: ysr@777: // Initialize this queue to contain a null buffer, and be part of the ysr@777: // given PtrQueueSet. tonyp@1752: PtrQueue(PtrQueueSet*, bool perm = false, bool active = false); ysr@777: // Release any contained resources. iveresov@876: void flush(); iveresov@876: // Calls flush() when destroyed. iveresov@876: ~PtrQueue() { flush(); } ysr@777: ysr@777: // Associate a lock with a ptr queue. ysr@777: void set_lock(Mutex* lock) { _lock = lock; } ysr@777: ysr@777: void reset() { if (_buf != NULL) _index = _sz; } ysr@777: ysr@777: // Enqueues the given "obj". ysr@777: void enqueue(void* ptr) { ysr@777: if (!_active) return; ysr@777: else enqueue_known_active(ptr); ysr@777: } ysr@777: iveresov@1546: void handle_zero_index(); ysr@777: void locking_enqueue_completed_buffer(void** buf); ysr@777: ysr@777: void enqueue_known_active(void* ptr); ysr@777: ysr@777: size_t size() { ysr@777: assert(_sz >= _index, "Invariant."); ysr@777: return _buf == NULL ? 0 : _sz - _index; ysr@777: } ysr@777: tonyp@2197: bool is_empty() { tonyp@2197: return _buf == NULL || _sz == _index; tonyp@2197: } tonyp@2197: ysr@777: // Set the "active" property of the queue to "b". An enqueue to an ysr@777: // inactive thread is a no-op. Setting a queue to inactive resets its ysr@777: // log to the empty state. ysr@777: void set_active(bool b) { ysr@777: _active = b; ysr@777: if (!b && _buf != NULL) { ysr@777: _index = _sz; ysr@777: } else if (b && _buf != NULL) { ysr@777: assert(_index == _sz, "invariant: queues are empty when activated."); ysr@777: } ysr@777: } ysr@777: tonyp@1752: bool is_active() { return _active; } tonyp@1752: ysr@777: static int byte_index_to_index(int ind) { ysr@777: assert((ind % oopSize) == 0, "Invariant."); ysr@777: return ind / oopSize; ysr@777: } ysr@777: ysr@777: static int index_to_byte_index(int byte_ind) { ysr@777: return byte_ind * oopSize; ysr@777: } ysr@777: ysr@777: // To support compiler. ysr@777: static ByteSize byte_offset_of_index() { ysr@777: return byte_offset_of(PtrQueue, _index); ysr@777: } ysr@777: static ByteSize byte_width_of_index() { return in_ByteSize(sizeof(size_t)); } ysr@777: ysr@777: static ByteSize byte_offset_of_buf() { ysr@777: return byte_offset_of(PtrQueue, _buf); ysr@777: } ysr@777: static ByteSize byte_width_of_buf() { return in_ByteSize(sizeof(void*)); } ysr@777: ysr@777: static ByteSize byte_offset_of_active() { ysr@777: return byte_offset_of(PtrQueue, _active); ysr@777: } ysr@777: static ByteSize byte_width_of_active() { return in_ByteSize(sizeof(bool)); } ysr@777: ysr@777: }; ysr@777: iveresov@1546: class BufferNode { iveresov@1546: size_t _index; iveresov@1546: BufferNode* _next; iveresov@1546: public: iveresov@1546: BufferNode() : _index(0), _next(NULL) { } iveresov@1546: BufferNode* next() const { return _next; } iveresov@1546: void set_next(BufferNode* n) { _next = n; } iveresov@1546: size_t index() const { return _index; } iveresov@1546: void set_index(size_t i) { _index = i; } iveresov@1546: iveresov@1546: // Align the size of the structure to the size of the pointer iveresov@1546: static size_t aligned_size() { iveresov@1546: static const size_t alignment = round_to(sizeof(BufferNode), sizeof(void*)); iveresov@1546: return alignment; iveresov@1546: } iveresov@1546: iveresov@1546: // BufferNode is allocated before the buffer. iveresov@1546: // The chunk of memory that holds both of them is a block. iveresov@1546: iveresov@1546: // Produce a new BufferNode given a buffer. iveresov@1546: static BufferNode* new_from_buffer(void** buf) { iveresov@1546: return new (make_block_from_buffer(buf)) BufferNode; iveresov@1546: } iveresov@1546: iveresov@1546: // The following are the required conversion routines: iveresov@1546: static BufferNode* make_node_from_buffer(void** buf) { iveresov@1546: return (BufferNode*)make_block_from_buffer(buf); iveresov@1546: } iveresov@1546: static void** make_buffer_from_node(BufferNode *node) { iveresov@1546: return make_buffer_from_block(node); iveresov@1546: } iveresov@1546: static void* make_block_from_node(BufferNode *node) { iveresov@1546: return (void*)node; iveresov@1546: } iveresov@1546: static void** make_buffer_from_block(void* p) { iveresov@1546: return (void**)((char*)p + aligned_size()); iveresov@1546: } iveresov@1546: static void* make_block_from_buffer(void** p) { iveresov@1546: return (void*)((char*)p - aligned_size()); iveresov@1546: } iveresov@1546: }; iveresov@1546: ysr@777: // A PtrQueueSet represents resources common to a set of pointer queues. ysr@777: // In particular, the individual queues allocate buffers from this shared ysr@777: // set, and return completed buffers to the set. ysr@777: // All these variables are are protected by the TLOQ_CBL_mon. XXX ??? apetrusenko@984: class PtrQueueSet VALUE_OBJ_CLASS_SPEC { ysr@777: protected: ysr@777: Monitor* _cbl_mon; // Protects the fields below. iveresov@1546: BufferNode* _completed_buffers_head; iveresov@1546: BufferNode* _completed_buffers_tail; iveresov@1546: int _n_completed_buffers; iveresov@1546: int _process_completed_threshold; ysr@777: volatile bool _process_completed; ysr@777: ysr@777: // This (and the interpretation of the first element as a "next" ysr@777: // pointer) are protected by the TLOQ_FL_lock. ysr@777: Mutex* _fl_lock; iveresov@1546: BufferNode* _buf_free_list; ysr@777: size_t _buf_free_list_sz; iveresov@1051: // Queue set can share a freelist. The _fl_owner variable iveresov@1051: // specifies the owner. It is set to "this" by default. iveresov@1051: PtrQueueSet* _fl_owner; ysr@777: ysr@777: // The size of all buffers in the set. ysr@777: size_t _sz; ysr@777: ysr@777: bool _all_active; ysr@777: ysr@777: // If true, notify_all on _cbl_mon when the threshold is reached. ysr@777: bool _notify_when_complete; ysr@777: ysr@777: // Maximum number of elements allowed on completed queue: after that, ysr@777: // enqueuer does the work itself. Zero indicates no maximum. ysr@777: int _max_completed_queue; iveresov@1546: int _completed_queue_padding; ysr@777: ysr@777: int completed_buffers_list_length(); ysr@777: void assert_completed_buffer_list_len_correct_locked(); ysr@777: void assert_completed_buffer_list_len_correct(); ysr@777: ysr@777: protected: ysr@777: // A mutator thread does the the work of processing a buffer. ysr@777: // Returns "true" iff the work is complete (and the buffer may be ysr@777: // deallocated). ysr@777: virtual bool mut_process_buffer(void** buf) { ysr@777: ShouldNotReachHere(); ysr@777: return false; ysr@777: } ysr@777: ysr@777: public: ysr@777: // Create an empty ptr queue set. ysr@777: PtrQueueSet(bool notify_when_complete = false); ysr@777: ysr@777: // Because of init-order concerns, we can't pass these as constructor ysr@777: // arguments. ysr@777: void initialize(Monitor* cbl_mon, Mutex* fl_lock, iveresov@1546: int process_completed_threshold, iveresov@1546: int max_completed_queue, iveresov@1051: PtrQueueSet *fl_owner = NULL) { ysr@777: _max_completed_queue = max_completed_queue; iveresov@1546: _process_completed_threshold = process_completed_threshold; iveresov@1546: _completed_queue_padding = 0; ysr@777: assert(cbl_mon != NULL && fl_lock != NULL, "Init order issue?"); iveresov@1051: _cbl_mon = cbl_mon; iveresov@1051: _fl_lock = fl_lock; iveresov@1051: _fl_owner = (fl_owner != NULL) ? fl_owner : this; ysr@777: } ysr@777: ysr@777: // Return an empty oop array of size _sz (required to be non-zero). ysr@777: void** allocate_buffer(); ysr@777: ysr@777: // Return an empty buffer to the free list. The "buf" argument is ysr@777: // required to be a pointer to the head of an array of length "_sz". ysr@777: void deallocate_buffer(void** buf); ysr@777: ysr@777: // Declares that "buf" is a complete buffer. iveresov@1546: void enqueue_complete_buffer(void** buf, size_t index = 0); iveresov@1546: iveresov@1546: // To be invoked by the mutator. iveresov@1546: bool process_or_enqueue_complete_buffer(void** buf); ysr@777: ysr@777: bool completed_buffers_exist_dirty() { ysr@777: return _n_completed_buffers > 0; ysr@777: } ysr@777: ysr@777: bool process_completed_buffers() { return _process_completed; } iveresov@1546: void set_process_completed(bool x) { _process_completed = x; } ysr@777: tonyp@1752: bool is_active() { return _all_active; } ysr@777: ysr@777: // Set the buffer size. Should be called before any "enqueue" operation ysr@777: // can be called. And should only be called once. ysr@777: void set_buffer_size(size_t sz); ysr@777: ysr@777: // Get the buffer size. ysr@777: size_t buffer_size() { return _sz; } ysr@777: iveresov@1546: // Get/Set the number of completed buffers that triggers log processing. iveresov@1546: void set_process_completed_threshold(int sz) { _process_completed_threshold = sz; } iveresov@1546: int process_completed_threshold() const { return _process_completed_threshold; } ysr@777: ysr@777: // Must only be called at a safe point. Indicates that the buffer free ysr@777: // list size may be reduced, if that is deemed desirable. ysr@777: void reduce_free_list(); ysr@777: iveresov@1546: int completed_buffers_num() { return _n_completed_buffers; } iveresov@1051: iveresov@1051: void merge_bufferlists(PtrQueueSet* src); iveresov@1546: iveresov@1546: void set_max_completed_queue(int m) { _max_completed_queue = m; } iveresov@1546: int max_completed_queue() { return _max_completed_queue; } iveresov@1546: iveresov@1546: void set_completed_queue_padding(int padding) { _completed_queue_padding = padding; } iveresov@1546: int completed_queue_padding() { return _completed_queue_padding; } iveresov@1546: iveresov@1546: // Notify the consumer if the number of buffers crossed the threshold iveresov@1546: void notify_if_necessary(); ysr@777: }; stefank@2314: stefank@2314: #endif // SHARE_VM_GC_IMPLEMENTATION_G1_PTRQUEUE_HPP