Tue, 21 Aug 2012 14:10:39 -0700
7185699: G1: Prediction model discrepancies
Summary: Correct the result value of G1CollectedHeap::pending_card_num(). Change the code that calculates the GC efficiency of a non-young heap region to use historical data from mixed GCs and the actual number of live bytes when predicting how long it would take to collect the region. Changes were also reviewed by Thomas Schatzl.
Reviewed-by: azeemj, brutisso
1 /*
2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_PTRQUEUE_HPP
26 #define SHARE_VM_GC_IMPLEMENTATION_G1_PTRQUEUE_HPP
28 #include "memory/allocation.hpp"
29 #include "utilities/sizes.hpp"
31 // There are various techniques that require threads to be able to log
32 // addresses. For example, a generational write barrier might log
33 // the addresses of modified old-generation objects. This type supports
34 // this operation.
36 // The definition of placement operator new(size_t, void*) in the <new>.
37 #include <new>
39 class PtrQueueSet;
40 class PtrQueue VALUE_OBJ_CLASS_SPEC {
42 protected:
43 // The ptr queue set to which this queue belongs.
44 PtrQueueSet* _qset;
46 // Whether updates should be logged.
47 bool _active;
49 // The buffer.
50 void** _buf;
51 // The index at which an object was last enqueued. Starts at "_sz"
52 // (indicating an empty buffer) and goes towards zero.
53 size_t _index;
55 // The size of the buffer.
56 size_t _sz;
58 // If true, the queue is permanent, and doesn't need to deallocate
59 // its buffer in the destructor (since that obtains a lock which may not
60 // be legally locked by then.
61 bool _perm;
63 // If there is a lock associated with this buffer, this is that lock.
64 Mutex* _lock;
66 PtrQueueSet* qset() { return _qset; }
68 public:
69 // Initialize this queue to contain a null buffer, and be part of the
70 // given PtrQueueSet.
71 PtrQueue(PtrQueueSet* qset, bool perm = false, bool active = false);
72 // Release any contained resources.
73 virtual void flush();
74 // Calls flush() when destroyed.
75 ~PtrQueue() { flush(); }
77 // Associate a lock with a ptr queue.
78 void set_lock(Mutex* lock) { _lock = lock; }
80 void reset() { if (_buf != NULL) _index = _sz; }
82 // Enqueues the given "obj".
83 void enqueue(void* ptr) {
84 if (!_active) return;
85 else enqueue_known_active(ptr);
86 }
88 // This method is called when we're doing the zero index handling
89 // and gives a chance to the queues to do any pre-enqueueing
90 // processing they might want to do on the buffer. It should return
91 // true if the buffer should be enqueued, or false if enough
92 // entries were cleared from it so that it can be re-used. It should
93 // not return false if the buffer is still full (otherwise we can
94 // get into an infinite loop).
95 virtual bool should_enqueue_buffer() { return true; }
96 void handle_zero_index();
97 void locking_enqueue_completed_buffer(void** buf);
99 void enqueue_known_active(void* ptr);
101 size_t size() {
102 assert(_sz >= _index, "Invariant.");
103 return _buf == NULL ? 0 : _sz - _index;
104 }
106 bool is_empty() {
107 return _buf == NULL || _sz == _index;
108 }
110 // Set the "active" property of the queue to "b". An enqueue to an
111 // inactive thread is a no-op. Setting a queue to inactive resets its
112 // log to the empty state.
113 void set_active(bool b) {
114 _active = b;
115 if (!b && _buf != NULL) {
116 _index = _sz;
117 } else if (b && _buf != NULL) {
118 assert(_index == _sz, "invariant: queues are empty when activated.");
119 }
120 }
122 bool is_active() { return _active; }
124 static int byte_index_to_index(int ind) {
125 assert((ind % oopSize) == 0, "Invariant.");
126 return ind / oopSize;
127 }
129 static int index_to_byte_index(int byte_ind) {
130 return byte_ind * oopSize;
131 }
133 // To support compiler.
134 static ByteSize byte_offset_of_index() {
135 return byte_offset_of(PtrQueue, _index);
136 }
137 static ByteSize byte_width_of_index() { return in_ByteSize(sizeof(size_t)); }
139 static ByteSize byte_offset_of_buf() {
140 return byte_offset_of(PtrQueue, _buf);
141 }
142 static ByteSize byte_width_of_buf() { return in_ByteSize(sizeof(void*)); }
144 static ByteSize byte_offset_of_active() {
145 return byte_offset_of(PtrQueue, _active);
146 }
147 static ByteSize byte_width_of_active() { return in_ByteSize(sizeof(bool)); }
149 };
151 class BufferNode {
152 size_t _index;
153 BufferNode* _next;
154 public:
155 BufferNode() : _index(0), _next(NULL) { }
156 BufferNode* next() const { return _next; }
157 void set_next(BufferNode* n) { _next = n; }
158 size_t index() const { return _index; }
159 void set_index(size_t i) { _index = i; }
161 // Align the size of the structure to the size of the pointer
162 static size_t aligned_size() {
163 static const size_t alignment = round_to(sizeof(BufferNode), sizeof(void*));
164 return alignment;
165 }
167 // BufferNode is allocated before the buffer.
168 // The chunk of memory that holds both of them is a block.
170 // Produce a new BufferNode given a buffer.
171 static BufferNode* new_from_buffer(void** buf) {
172 return new (make_block_from_buffer(buf)) BufferNode;
173 }
175 // The following are the required conversion routines:
176 static BufferNode* make_node_from_buffer(void** buf) {
177 return (BufferNode*)make_block_from_buffer(buf);
178 }
179 static void** make_buffer_from_node(BufferNode *node) {
180 return make_buffer_from_block(node);
181 }
182 static void* make_block_from_node(BufferNode *node) {
183 return (void*)node;
184 }
185 static void** make_buffer_from_block(void* p) {
186 return (void**)((char*)p + aligned_size());
187 }
188 static void* make_block_from_buffer(void** p) {
189 return (void*)((char*)p - aligned_size());
190 }
191 };
193 // A PtrQueueSet represents resources common to a set of pointer queues.
194 // In particular, the individual queues allocate buffers from this shared
195 // set, and return completed buffers to the set.
196 // All these variables are are protected by the TLOQ_CBL_mon. XXX ???
197 class PtrQueueSet VALUE_OBJ_CLASS_SPEC {
198 protected:
199 Monitor* _cbl_mon; // Protects the fields below.
200 BufferNode* _completed_buffers_head;
201 BufferNode* _completed_buffers_tail;
202 int _n_completed_buffers;
203 int _process_completed_threshold;
204 volatile bool _process_completed;
206 // This (and the interpretation of the first element as a "next"
207 // pointer) are protected by the TLOQ_FL_lock.
208 Mutex* _fl_lock;
209 BufferNode* _buf_free_list;
210 size_t _buf_free_list_sz;
211 // Queue set can share a freelist. The _fl_owner variable
212 // specifies the owner. It is set to "this" by default.
213 PtrQueueSet* _fl_owner;
215 // The size of all buffers in the set.
216 size_t _sz;
218 bool _all_active;
220 // If true, notify_all on _cbl_mon when the threshold is reached.
221 bool _notify_when_complete;
223 // Maximum number of elements allowed on completed queue: after that,
224 // enqueuer does the work itself. Zero indicates no maximum.
225 int _max_completed_queue;
226 int _completed_queue_padding;
228 int completed_buffers_list_length();
229 void assert_completed_buffer_list_len_correct_locked();
230 void assert_completed_buffer_list_len_correct();
232 protected:
233 // A mutator thread does the the work of processing a buffer.
234 // Returns "true" iff the work is complete (and the buffer may be
235 // deallocated).
236 virtual bool mut_process_buffer(void** buf) {
237 ShouldNotReachHere();
238 return false;
239 }
241 public:
242 // Create an empty ptr queue set.
243 PtrQueueSet(bool notify_when_complete = false);
245 // Because of init-order concerns, we can't pass these as constructor
246 // arguments.
247 void initialize(Monitor* cbl_mon, Mutex* fl_lock,
248 int process_completed_threshold,
249 int max_completed_queue,
250 PtrQueueSet *fl_owner = NULL) {
251 _max_completed_queue = max_completed_queue;
252 _process_completed_threshold = process_completed_threshold;
253 _completed_queue_padding = 0;
254 assert(cbl_mon != NULL && fl_lock != NULL, "Init order issue?");
255 _cbl_mon = cbl_mon;
256 _fl_lock = fl_lock;
257 _fl_owner = (fl_owner != NULL) ? fl_owner : this;
258 }
260 // Return an empty oop array of size _sz (required to be non-zero).
261 void** allocate_buffer();
263 // Return an empty buffer to the free list. The "buf" argument is
264 // required to be a pointer to the head of an array of length "_sz".
265 void deallocate_buffer(void** buf);
267 // Declares that "buf" is a complete buffer.
268 void enqueue_complete_buffer(void** buf, size_t index = 0);
270 // To be invoked by the mutator.
271 bool process_or_enqueue_complete_buffer(void** buf);
273 bool completed_buffers_exist_dirty() {
274 return _n_completed_buffers > 0;
275 }
277 bool process_completed_buffers() { return _process_completed; }
278 void set_process_completed(bool x) { _process_completed = x; }
280 bool is_active() { return _all_active; }
282 // Set the buffer size. Should be called before any "enqueue" operation
283 // can be called. And should only be called once.
284 void set_buffer_size(size_t sz);
286 // Get the buffer size.
287 size_t buffer_size() { return _sz; }
289 // Get/Set the number of completed buffers that triggers log processing.
290 void set_process_completed_threshold(int sz) { _process_completed_threshold = sz; }
291 int process_completed_threshold() const { return _process_completed_threshold; }
293 // Must only be called at a safe point. Indicates that the buffer free
294 // list size may be reduced, if that is deemed desirable.
295 void reduce_free_list();
297 int completed_buffers_num() { return _n_completed_buffers; }
299 void merge_bufferlists(PtrQueueSet* src);
301 void set_max_completed_queue(int m) { _max_completed_queue = m; }
302 int max_completed_queue() { return _max_completed_queue; }
304 void set_completed_queue_padding(int padding) { _completed_queue_padding = padding; }
305 int completed_queue_padding() { return _completed_queue_padding; }
307 // Notify the consumer if the number of buffers crossed the threshold
308 void notify_if_necessary();
309 };
311 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_PTRQUEUE_HPP