src/share/vm/gc_implementation/g1/ptrQueue.hpp

Mon, 02 Aug 2010 12:51:43 -0700

author
johnc
date
Mon, 02 Aug 2010 12:51:43 -0700
changeset 2060
2d160770d2e5
parent 1907
c18cbe5936b8
child 2197
6e0aac35bfa9
permissions
-rw-r--r--

6814437: G1: remove the _new_refs array
Summary: The per-worker _new_refs array is used to hold references that point into the collection set. It is populated during RSet updating and subsequently processed. In the event of an evacuation failure it processed again to recreate the RSets of regions in the collection set. Remove the per-worker _new_refs array by processing the references directly. Use a DirtyCardQueue to hold the cards containing the references so that the RSets of regions in the collection set can be recreated when handling an evacuation failure.
Reviewed-by: iveresov, jmasa, tonyp

ysr@777 1 /*
trims@1907 2 * Copyright (c) 2001, 2009, Oracle and/or its affiliates. All rights reserved.
ysr@777 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
ysr@777 4 *
ysr@777 5 * This code is free software; you can redistribute it and/or modify it
ysr@777 6 * under the terms of the GNU General Public License version 2 only, as
ysr@777 7 * published by the Free Software Foundation.
ysr@777 8 *
ysr@777 9 * This code is distributed in the hope that it will be useful, but WITHOUT
ysr@777 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
ysr@777 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
ysr@777 12 * version 2 for more details (a copy is included in the LICENSE file that
ysr@777 13 * accompanied this code).
ysr@777 14 *
ysr@777 15 * You should have received a copy of the GNU General Public License version
ysr@777 16 * 2 along with this work; if not, write to the Free Software Foundation,
ysr@777 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
ysr@777 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
ysr@777 22 *
ysr@777 23 */
ysr@777 24
ysr@777 25 // There are various techniques that require threads to be able to log
ysr@777 26 // addresses. For example, a generational write barrier might log
ysr@777 27 // the addresses of modified old-generation objects. This type supports
ysr@777 28 // this operation.
ysr@777 29
iveresov@1546 30 // The definition of placement operator new(size_t, void*) in the <new>.
iveresov@1546 31 #include <new>
iveresov@1546 32
ysr@777 33 class PtrQueueSet;
apetrusenko@984 34 class PtrQueue VALUE_OBJ_CLASS_SPEC {
ysr@777 35
ysr@777 36 protected:
ysr@777 37 // The ptr queue set to which this queue belongs.
ysr@777 38 PtrQueueSet* _qset;
ysr@777 39
ysr@777 40 // Whether updates should be logged.
ysr@777 41 bool _active;
ysr@777 42
ysr@777 43 // The buffer.
ysr@777 44 void** _buf;
ysr@777 45 // The index at which an object was last enqueued. Starts at "_sz"
ysr@777 46 // (indicating an empty buffer) and goes towards zero.
ysr@777 47 size_t _index;
ysr@777 48
ysr@777 49 // The size of the buffer.
ysr@777 50 size_t _sz;
ysr@777 51
ysr@777 52 // If true, the queue is permanent, and doesn't need to deallocate
ysr@777 53 // its buffer in the destructor (since that obtains a lock which may not
ysr@777 54 // be legally locked by then.
ysr@777 55 bool _perm;
ysr@777 56
ysr@777 57 // If there is a lock associated with this buffer, this is that lock.
ysr@777 58 Mutex* _lock;
ysr@777 59
ysr@777 60 PtrQueueSet* qset() { return _qset; }
ysr@777 61
ysr@777 62 public:
ysr@777 63 // Initialize this queue to contain a null buffer, and be part of the
ysr@777 64 // given PtrQueueSet.
tonyp@1752 65 PtrQueue(PtrQueueSet*, bool perm = false, bool active = false);
ysr@777 66 // Release any contained resources.
iveresov@876 67 void flush();
iveresov@876 68 // Calls flush() when destroyed.
iveresov@876 69 ~PtrQueue() { flush(); }
ysr@777 70
ysr@777 71 // Associate a lock with a ptr queue.
ysr@777 72 void set_lock(Mutex* lock) { _lock = lock; }
ysr@777 73
ysr@777 74 void reset() { if (_buf != NULL) _index = _sz; }
ysr@777 75
ysr@777 76 // Enqueues the given "obj".
ysr@777 77 void enqueue(void* ptr) {
ysr@777 78 if (!_active) return;
ysr@777 79 else enqueue_known_active(ptr);
ysr@777 80 }
ysr@777 81
iveresov@1546 82 void handle_zero_index();
ysr@777 83 void locking_enqueue_completed_buffer(void** buf);
ysr@777 84
ysr@777 85 void enqueue_known_active(void* ptr);
ysr@777 86
ysr@777 87 size_t size() {
ysr@777 88 assert(_sz >= _index, "Invariant.");
ysr@777 89 return _buf == NULL ? 0 : _sz - _index;
ysr@777 90 }
ysr@777 91
ysr@777 92 // Set the "active" property of the queue to "b". An enqueue to an
ysr@777 93 // inactive thread is a no-op. Setting a queue to inactive resets its
ysr@777 94 // log to the empty state.
ysr@777 95 void set_active(bool b) {
ysr@777 96 _active = b;
ysr@777 97 if (!b && _buf != NULL) {
ysr@777 98 _index = _sz;
ysr@777 99 } else if (b && _buf != NULL) {
ysr@777 100 assert(_index == _sz, "invariant: queues are empty when activated.");
ysr@777 101 }
ysr@777 102 }
ysr@777 103
tonyp@1752 104 bool is_active() { return _active; }
tonyp@1752 105
ysr@777 106 static int byte_index_to_index(int ind) {
ysr@777 107 assert((ind % oopSize) == 0, "Invariant.");
ysr@777 108 return ind / oopSize;
ysr@777 109 }
ysr@777 110
ysr@777 111 static int index_to_byte_index(int byte_ind) {
ysr@777 112 return byte_ind * oopSize;
ysr@777 113 }
ysr@777 114
ysr@777 115 // To support compiler.
ysr@777 116 static ByteSize byte_offset_of_index() {
ysr@777 117 return byte_offset_of(PtrQueue, _index);
ysr@777 118 }
ysr@777 119 static ByteSize byte_width_of_index() { return in_ByteSize(sizeof(size_t)); }
ysr@777 120
ysr@777 121 static ByteSize byte_offset_of_buf() {
ysr@777 122 return byte_offset_of(PtrQueue, _buf);
ysr@777 123 }
ysr@777 124 static ByteSize byte_width_of_buf() { return in_ByteSize(sizeof(void*)); }
ysr@777 125
ysr@777 126 static ByteSize byte_offset_of_active() {
ysr@777 127 return byte_offset_of(PtrQueue, _active);
ysr@777 128 }
ysr@777 129 static ByteSize byte_width_of_active() { return in_ByteSize(sizeof(bool)); }
ysr@777 130
ysr@777 131 };
ysr@777 132
iveresov@1546 133 class BufferNode {
iveresov@1546 134 size_t _index;
iveresov@1546 135 BufferNode* _next;
iveresov@1546 136 public:
iveresov@1546 137 BufferNode() : _index(0), _next(NULL) { }
iveresov@1546 138 BufferNode* next() const { return _next; }
iveresov@1546 139 void set_next(BufferNode* n) { _next = n; }
iveresov@1546 140 size_t index() const { return _index; }
iveresov@1546 141 void set_index(size_t i) { _index = i; }
iveresov@1546 142
iveresov@1546 143 // Align the size of the structure to the size of the pointer
iveresov@1546 144 static size_t aligned_size() {
iveresov@1546 145 static const size_t alignment = round_to(sizeof(BufferNode), sizeof(void*));
iveresov@1546 146 return alignment;
iveresov@1546 147 }
iveresov@1546 148
iveresov@1546 149 // BufferNode is allocated before the buffer.
iveresov@1546 150 // The chunk of memory that holds both of them is a block.
iveresov@1546 151
iveresov@1546 152 // Produce a new BufferNode given a buffer.
iveresov@1546 153 static BufferNode* new_from_buffer(void** buf) {
iveresov@1546 154 return new (make_block_from_buffer(buf)) BufferNode;
iveresov@1546 155 }
iveresov@1546 156
iveresov@1546 157 // The following are the required conversion routines:
iveresov@1546 158 static BufferNode* make_node_from_buffer(void** buf) {
iveresov@1546 159 return (BufferNode*)make_block_from_buffer(buf);
iveresov@1546 160 }
iveresov@1546 161 static void** make_buffer_from_node(BufferNode *node) {
iveresov@1546 162 return make_buffer_from_block(node);
iveresov@1546 163 }
iveresov@1546 164 static void* make_block_from_node(BufferNode *node) {
iveresov@1546 165 return (void*)node;
iveresov@1546 166 }
iveresov@1546 167 static void** make_buffer_from_block(void* p) {
iveresov@1546 168 return (void**)((char*)p + aligned_size());
iveresov@1546 169 }
iveresov@1546 170 static void* make_block_from_buffer(void** p) {
iveresov@1546 171 return (void*)((char*)p - aligned_size());
iveresov@1546 172 }
iveresov@1546 173 };
iveresov@1546 174
ysr@777 175 // A PtrQueueSet represents resources common to a set of pointer queues.
ysr@777 176 // In particular, the individual queues allocate buffers from this shared
ysr@777 177 // set, and return completed buffers to the set.
ysr@777 178 // All these variables are are protected by the TLOQ_CBL_mon. XXX ???
apetrusenko@984 179 class PtrQueueSet VALUE_OBJ_CLASS_SPEC {
ysr@777 180 protected:
ysr@777 181 Monitor* _cbl_mon; // Protects the fields below.
iveresov@1546 182 BufferNode* _completed_buffers_head;
iveresov@1546 183 BufferNode* _completed_buffers_tail;
iveresov@1546 184 int _n_completed_buffers;
iveresov@1546 185 int _process_completed_threshold;
ysr@777 186 volatile bool _process_completed;
ysr@777 187
ysr@777 188 // This (and the interpretation of the first element as a "next"
ysr@777 189 // pointer) are protected by the TLOQ_FL_lock.
ysr@777 190 Mutex* _fl_lock;
iveresov@1546 191 BufferNode* _buf_free_list;
ysr@777 192 size_t _buf_free_list_sz;
iveresov@1051 193 // Queue set can share a freelist. The _fl_owner variable
iveresov@1051 194 // specifies the owner. It is set to "this" by default.
iveresov@1051 195 PtrQueueSet* _fl_owner;
ysr@777 196
ysr@777 197 // The size of all buffers in the set.
ysr@777 198 size_t _sz;
ysr@777 199
ysr@777 200 bool _all_active;
ysr@777 201
ysr@777 202 // If true, notify_all on _cbl_mon when the threshold is reached.
ysr@777 203 bool _notify_when_complete;
ysr@777 204
ysr@777 205 // Maximum number of elements allowed on completed queue: after that,
ysr@777 206 // enqueuer does the work itself. Zero indicates no maximum.
ysr@777 207 int _max_completed_queue;
iveresov@1546 208 int _completed_queue_padding;
ysr@777 209
ysr@777 210 int completed_buffers_list_length();
ysr@777 211 void assert_completed_buffer_list_len_correct_locked();
ysr@777 212 void assert_completed_buffer_list_len_correct();
ysr@777 213
ysr@777 214 protected:
ysr@777 215 // A mutator thread does the the work of processing a buffer.
ysr@777 216 // Returns "true" iff the work is complete (and the buffer may be
ysr@777 217 // deallocated).
ysr@777 218 virtual bool mut_process_buffer(void** buf) {
ysr@777 219 ShouldNotReachHere();
ysr@777 220 return false;
ysr@777 221 }
ysr@777 222
ysr@777 223 public:
ysr@777 224 // Create an empty ptr queue set.
ysr@777 225 PtrQueueSet(bool notify_when_complete = false);
ysr@777 226
ysr@777 227 // Because of init-order concerns, we can't pass these as constructor
ysr@777 228 // arguments.
ysr@777 229 void initialize(Monitor* cbl_mon, Mutex* fl_lock,
iveresov@1546 230 int process_completed_threshold,
iveresov@1546 231 int max_completed_queue,
iveresov@1051 232 PtrQueueSet *fl_owner = NULL) {
ysr@777 233 _max_completed_queue = max_completed_queue;
iveresov@1546 234 _process_completed_threshold = process_completed_threshold;
iveresov@1546 235 _completed_queue_padding = 0;
ysr@777 236 assert(cbl_mon != NULL && fl_lock != NULL, "Init order issue?");
iveresov@1051 237 _cbl_mon = cbl_mon;
iveresov@1051 238 _fl_lock = fl_lock;
iveresov@1051 239 _fl_owner = (fl_owner != NULL) ? fl_owner : this;
ysr@777 240 }
ysr@777 241
ysr@777 242 // Return an empty oop array of size _sz (required to be non-zero).
ysr@777 243 void** allocate_buffer();
ysr@777 244
ysr@777 245 // Return an empty buffer to the free list. The "buf" argument is
ysr@777 246 // required to be a pointer to the head of an array of length "_sz".
ysr@777 247 void deallocate_buffer(void** buf);
ysr@777 248
ysr@777 249 // Declares that "buf" is a complete buffer.
iveresov@1546 250 void enqueue_complete_buffer(void** buf, size_t index = 0);
iveresov@1546 251
iveresov@1546 252 // To be invoked by the mutator.
iveresov@1546 253 bool process_or_enqueue_complete_buffer(void** buf);
ysr@777 254
ysr@777 255 bool completed_buffers_exist_dirty() {
ysr@777 256 return _n_completed_buffers > 0;
ysr@777 257 }
ysr@777 258
ysr@777 259 bool process_completed_buffers() { return _process_completed; }
iveresov@1546 260 void set_process_completed(bool x) { _process_completed = x; }
ysr@777 261
tonyp@1752 262 bool is_active() { return _all_active; }
ysr@777 263
ysr@777 264 // Set the buffer size. Should be called before any "enqueue" operation
ysr@777 265 // can be called. And should only be called once.
ysr@777 266 void set_buffer_size(size_t sz);
ysr@777 267
ysr@777 268 // Get the buffer size.
ysr@777 269 size_t buffer_size() { return _sz; }
ysr@777 270
iveresov@1546 271 // Get/Set the number of completed buffers that triggers log processing.
iveresov@1546 272 void set_process_completed_threshold(int sz) { _process_completed_threshold = sz; }
iveresov@1546 273 int process_completed_threshold() const { return _process_completed_threshold; }
ysr@777 274
ysr@777 275 // Must only be called at a safe point. Indicates that the buffer free
ysr@777 276 // list size may be reduced, if that is deemed desirable.
ysr@777 277 void reduce_free_list();
ysr@777 278
iveresov@1546 279 int completed_buffers_num() { return _n_completed_buffers; }
iveresov@1051 280
iveresov@1051 281 void merge_bufferlists(PtrQueueSet* src);
iveresov@1546 282
iveresov@1546 283 void set_max_completed_queue(int m) { _max_completed_queue = m; }
iveresov@1546 284 int max_completed_queue() { return _max_completed_queue; }
iveresov@1546 285
iveresov@1546 286 void set_completed_queue_padding(int padding) { _completed_queue_padding = padding; }
iveresov@1546 287 int completed_queue_padding() { return _completed_queue_padding; }
iveresov@1546 288
iveresov@1546 289 // Notify the consumer if the number of buffers crossed the threshold
iveresov@1546 290 void notify_if_necessary();
ysr@777 291 };

mercurial