Mon, 02 Aug 2010 12:51:43 -0700
6814437: G1: remove the _new_refs array
Summary: The per-worker _new_refs array is used to hold references that point into the collection set. It is populated during RSet updating and subsequently processed. In the event of an evacuation failure it processed again to recreate the RSets of regions in the collection set. Remove the per-worker _new_refs array by processing the references directly. Use a DirtyCardQueue to hold the cards containing the references so that the RSets of regions in the collection set can be recreated when handling an evacuation failure.
Reviewed-by: iveresov, jmasa, tonyp
1 /*
2 * Copyright (c) 2001, 2009, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 // There are various techniques that require threads to be able to log
26 // addresses. For example, a generational write barrier might log
27 // the addresses of modified old-generation objects. This type supports
28 // this operation.
30 // The definition of placement operator new(size_t, void*) in the <new>.
31 #include <new>
33 class PtrQueueSet;
34 class PtrQueue VALUE_OBJ_CLASS_SPEC {
36 protected:
37 // The ptr queue set to which this queue belongs.
38 PtrQueueSet* _qset;
40 // Whether updates should be logged.
41 bool _active;
43 // The buffer.
44 void** _buf;
45 // The index at which an object was last enqueued. Starts at "_sz"
46 // (indicating an empty buffer) and goes towards zero.
47 size_t _index;
49 // The size of the buffer.
50 size_t _sz;
52 // If true, the queue is permanent, and doesn't need to deallocate
53 // its buffer in the destructor (since that obtains a lock which may not
54 // be legally locked by then.
55 bool _perm;
57 // If there is a lock associated with this buffer, this is that lock.
58 Mutex* _lock;
60 PtrQueueSet* qset() { return _qset; }
62 public:
63 // Initialize this queue to contain a null buffer, and be part of the
64 // given PtrQueueSet.
65 PtrQueue(PtrQueueSet*, bool perm = false, bool active = false);
66 // Release any contained resources.
67 void flush();
68 // Calls flush() when destroyed.
69 ~PtrQueue() { flush(); }
71 // Associate a lock with a ptr queue.
72 void set_lock(Mutex* lock) { _lock = lock; }
74 void reset() { if (_buf != NULL) _index = _sz; }
76 // Enqueues the given "obj".
77 void enqueue(void* ptr) {
78 if (!_active) return;
79 else enqueue_known_active(ptr);
80 }
82 void handle_zero_index();
83 void locking_enqueue_completed_buffer(void** buf);
85 void enqueue_known_active(void* ptr);
87 size_t size() {
88 assert(_sz >= _index, "Invariant.");
89 return _buf == NULL ? 0 : _sz - _index;
90 }
92 // Set the "active" property of the queue to "b". An enqueue to an
93 // inactive thread is a no-op. Setting a queue to inactive resets its
94 // log to the empty state.
95 void set_active(bool b) {
96 _active = b;
97 if (!b && _buf != NULL) {
98 _index = _sz;
99 } else if (b && _buf != NULL) {
100 assert(_index == _sz, "invariant: queues are empty when activated.");
101 }
102 }
104 bool is_active() { return _active; }
106 static int byte_index_to_index(int ind) {
107 assert((ind % oopSize) == 0, "Invariant.");
108 return ind / oopSize;
109 }
111 static int index_to_byte_index(int byte_ind) {
112 return byte_ind * oopSize;
113 }
115 // To support compiler.
116 static ByteSize byte_offset_of_index() {
117 return byte_offset_of(PtrQueue, _index);
118 }
119 static ByteSize byte_width_of_index() { return in_ByteSize(sizeof(size_t)); }
121 static ByteSize byte_offset_of_buf() {
122 return byte_offset_of(PtrQueue, _buf);
123 }
124 static ByteSize byte_width_of_buf() { return in_ByteSize(sizeof(void*)); }
126 static ByteSize byte_offset_of_active() {
127 return byte_offset_of(PtrQueue, _active);
128 }
129 static ByteSize byte_width_of_active() { return in_ByteSize(sizeof(bool)); }
131 };
133 class BufferNode {
134 size_t _index;
135 BufferNode* _next;
136 public:
137 BufferNode() : _index(0), _next(NULL) { }
138 BufferNode* next() const { return _next; }
139 void set_next(BufferNode* n) { _next = n; }
140 size_t index() const { return _index; }
141 void set_index(size_t i) { _index = i; }
143 // Align the size of the structure to the size of the pointer
144 static size_t aligned_size() {
145 static const size_t alignment = round_to(sizeof(BufferNode), sizeof(void*));
146 return alignment;
147 }
149 // BufferNode is allocated before the buffer.
150 // The chunk of memory that holds both of them is a block.
152 // Produce a new BufferNode given a buffer.
153 static BufferNode* new_from_buffer(void** buf) {
154 return new (make_block_from_buffer(buf)) BufferNode;
155 }
157 // The following are the required conversion routines:
158 static BufferNode* make_node_from_buffer(void** buf) {
159 return (BufferNode*)make_block_from_buffer(buf);
160 }
161 static void** make_buffer_from_node(BufferNode *node) {
162 return make_buffer_from_block(node);
163 }
164 static void* make_block_from_node(BufferNode *node) {
165 return (void*)node;
166 }
167 static void** make_buffer_from_block(void* p) {
168 return (void**)((char*)p + aligned_size());
169 }
170 static void* make_block_from_buffer(void** p) {
171 return (void*)((char*)p - aligned_size());
172 }
173 };
175 // A PtrQueueSet represents resources common to a set of pointer queues.
176 // In particular, the individual queues allocate buffers from this shared
177 // set, and return completed buffers to the set.
178 // All these variables are are protected by the TLOQ_CBL_mon. XXX ???
179 class PtrQueueSet VALUE_OBJ_CLASS_SPEC {
180 protected:
181 Monitor* _cbl_mon; // Protects the fields below.
182 BufferNode* _completed_buffers_head;
183 BufferNode* _completed_buffers_tail;
184 int _n_completed_buffers;
185 int _process_completed_threshold;
186 volatile bool _process_completed;
188 // This (and the interpretation of the first element as a "next"
189 // pointer) are protected by the TLOQ_FL_lock.
190 Mutex* _fl_lock;
191 BufferNode* _buf_free_list;
192 size_t _buf_free_list_sz;
193 // Queue set can share a freelist. The _fl_owner variable
194 // specifies the owner. It is set to "this" by default.
195 PtrQueueSet* _fl_owner;
197 // The size of all buffers in the set.
198 size_t _sz;
200 bool _all_active;
202 // If true, notify_all on _cbl_mon when the threshold is reached.
203 bool _notify_when_complete;
205 // Maximum number of elements allowed on completed queue: after that,
206 // enqueuer does the work itself. Zero indicates no maximum.
207 int _max_completed_queue;
208 int _completed_queue_padding;
210 int completed_buffers_list_length();
211 void assert_completed_buffer_list_len_correct_locked();
212 void assert_completed_buffer_list_len_correct();
214 protected:
215 // A mutator thread does the the work of processing a buffer.
216 // Returns "true" iff the work is complete (and the buffer may be
217 // deallocated).
218 virtual bool mut_process_buffer(void** buf) {
219 ShouldNotReachHere();
220 return false;
221 }
223 public:
224 // Create an empty ptr queue set.
225 PtrQueueSet(bool notify_when_complete = false);
227 // Because of init-order concerns, we can't pass these as constructor
228 // arguments.
229 void initialize(Monitor* cbl_mon, Mutex* fl_lock,
230 int process_completed_threshold,
231 int max_completed_queue,
232 PtrQueueSet *fl_owner = NULL) {
233 _max_completed_queue = max_completed_queue;
234 _process_completed_threshold = process_completed_threshold;
235 _completed_queue_padding = 0;
236 assert(cbl_mon != NULL && fl_lock != NULL, "Init order issue?");
237 _cbl_mon = cbl_mon;
238 _fl_lock = fl_lock;
239 _fl_owner = (fl_owner != NULL) ? fl_owner : this;
240 }
242 // Return an empty oop array of size _sz (required to be non-zero).
243 void** allocate_buffer();
245 // Return an empty buffer to the free list. The "buf" argument is
246 // required to be a pointer to the head of an array of length "_sz".
247 void deallocate_buffer(void** buf);
249 // Declares that "buf" is a complete buffer.
250 void enqueue_complete_buffer(void** buf, size_t index = 0);
252 // To be invoked by the mutator.
253 bool process_or_enqueue_complete_buffer(void** buf);
255 bool completed_buffers_exist_dirty() {
256 return _n_completed_buffers > 0;
257 }
259 bool process_completed_buffers() { return _process_completed; }
260 void set_process_completed(bool x) { _process_completed = x; }
262 bool is_active() { return _all_active; }
264 // Set the buffer size. Should be called before any "enqueue" operation
265 // can be called. And should only be called once.
266 void set_buffer_size(size_t sz);
268 // Get the buffer size.
269 size_t buffer_size() { return _sz; }
271 // Get/Set the number of completed buffers that triggers log processing.
272 void set_process_completed_threshold(int sz) { _process_completed_threshold = sz; }
273 int process_completed_threshold() const { return _process_completed_threshold; }
275 // Must only be called at a safe point. Indicates that the buffer free
276 // list size may be reduced, if that is deemed desirable.
277 void reduce_free_list();
279 int completed_buffers_num() { return _n_completed_buffers; }
281 void merge_bufferlists(PtrQueueSet* src);
283 void set_max_completed_queue(int m) { _max_completed_queue = m; }
284 int max_completed_queue() { return _max_completed_queue; }
286 void set_completed_queue_padding(int padding) { _completed_queue_padding = padding; }
287 int completed_queue_padding() { return _completed_queue_padding; }
289 // Notify the consumer if the number of buffers crossed the threshold
290 void notify_if_necessary();
291 };