src/share/vm/gc_implementation/g1/ptrQueue.hpp

Thu, 07 Apr 2011 09:53:20 -0700

author
johnc
date
Thu, 07 Apr 2011 09:53:20 -0700
changeset 2781
e1162778c1c8
parent 2469
7e37af9d69ef
child 3416
2ace1c4ee8da
permissions
-rw-r--r--

7009266: G1: assert(obj->is_oop_or_null(true )) failed: Error
Summary: A referent object that is only weakly reachable at the start of concurrent marking but is re-attached to the strongly reachable object graph during marking may not be marked as live. This can cause the reference object to be processed prematurely and leave dangling pointers to the referent object. Implement a read barrier for the java.lang.ref.Reference::referent field by intrinsifying the Reference.get() method, and intercepting accesses though JNI, reflection, and Unsafe, so that when a non-null referent object is read it is also logged in an SATB buffer.
Reviewed-by: kvn, iveresov, never, tonyp, dholmes

ysr@777 1 /*
tonyp@2469 2 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
ysr@777 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
ysr@777 4 *
ysr@777 5 * This code is free software; you can redistribute it and/or modify it
ysr@777 6 * under the terms of the GNU General Public License version 2 only, as
ysr@777 7 * published by the Free Software Foundation.
ysr@777 8 *
ysr@777 9 * This code is distributed in the hope that it will be useful, but WITHOUT
ysr@777 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
ysr@777 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
ysr@777 12 * version 2 for more details (a copy is included in the LICENSE file that
ysr@777 13 * accompanied this code).
ysr@777 14 *
ysr@777 15 * You should have received a copy of the GNU General Public License version
ysr@777 16 * 2 along with this work; if not, write to the Free Software Foundation,
ysr@777 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
ysr@777 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
ysr@777 22 *
ysr@777 23 */
ysr@777 24
stefank@2314 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_PTRQUEUE_HPP
stefank@2314 26 #define SHARE_VM_GC_IMPLEMENTATION_G1_PTRQUEUE_HPP
stefank@2314 27
stefank@2314 28 #include "memory/allocation.hpp"
stefank@2314 29 #include "utilities/sizes.hpp"
stefank@2314 30
ysr@777 31 // There are various techniques that require threads to be able to log
ysr@777 32 // addresses. For example, a generational write barrier might log
ysr@777 33 // the addresses of modified old-generation objects. This type supports
ysr@777 34 // this operation.
ysr@777 35
iveresov@1546 36 // The definition of placement operator new(size_t, void*) in the <new>.
iveresov@1546 37 #include <new>
iveresov@1546 38
ysr@777 39 class PtrQueueSet;
apetrusenko@984 40 class PtrQueue VALUE_OBJ_CLASS_SPEC {
ysr@777 41
ysr@777 42 protected:
ysr@777 43 // The ptr queue set to which this queue belongs.
ysr@777 44 PtrQueueSet* _qset;
ysr@777 45
ysr@777 46 // Whether updates should be logged.
ysr@777 47 bool _active;
ysr@777 48
ysr@777 49 // The buffer.
ysr@777 50 void** _buf;
ysr@777 51 // The index at which an object was last enqueued. Starts at "_sz"
ysr@777 52 // (indicating an empty buffer) and goes towards zero.
ysr@777 53 size_t _index;
ysr@777 54
ysr@777 55 // The size of the buffer.
ysr@777 56 size_t _sz;
ysr@777 57
ysr@777 58 // If true, the queue is permanent, and doesn't need to deallocate
ysr@777 59 // its buffer in the destructor (since that obtains a lock which may not
ysr@777 60 // be legally locked by then.
ysr@777 61 bool _perm;
ysr@777 62
ysr@777 63 // If there is a lock associated with this buffer, this is that lock.
ysr@777 64 Mutex* _lock;
ysr@777 65
ysr@777 66 PtrQueueSet* qset() { return _qset; }
ysr@777 67
ysr@777 68 public:
ysr@777 69 // Initialize this queue to contain a null buffer, and be part of the
ysr@777 70 // given PtrQueueSet.
tonyp@2469 71 PtrQueue(PtrQueueSet* qset, bool perm = false, bool active = false);
ysr@777 72 // Release any contained resources.
iveresov@876 73 void flush();
iveresov@876 74 // Calls flush() when destroyed.
iveresov@876 75 ~PtrQueue() { flush(); }
ysr@777 76
ysr@777 77 // Associate a lock with a ptr queue.
ysr@777 78 void set_lock(Mutex* lock) { _lock = lock; }
ysr@777 79
ysr@777 80 void reset() { if (_buf != NULL) _index = _sz; }
ysr@777 81
ysr@777 82 // Enqueues the given "obj".
ysr@777 83 void enqueue(void* ptr) {
ysr@777 84 if (!_active) return;
ysr@777 85 else enqueue_known_active(ptr);
ysr@777 86 }
ysr@777 87
tonyp@2469 88 // This method is called when we're doing the zero index handling
tonyp@2469 89 // and gives a chance to the queues to do any pre-enqueueing
tonyp@2469 90 // processing they might want to do on the buffer. It should return
tonyp@2469 91 // true if the buffer should be enqueued, or false if enough
tonyp@2469 92 // entries were cleared from it so that it can be re-used. It should
tonyp@2469 93 // not return false if the buffer is still full (otherwise we can
tonyp@2469 94 // get into an infinite loop).
tonyp@2469 95 virtual bool should_enqueue_buffer() { return true; }
iveresov@1546 96 void handle_zero_index();
ysr@777 97 void locking_enqueue_completed_buffer(void** buf);
ysr@777 98
ysr@777 99 void enqueue_known_active(void* ptr);
ysr@777 100
ysr@777 101 size_t size() {
ysr@777 102 assert(_sz >= _index, "Invariant.");
ysr@777 103 return _buf == NULL ? 0 : _sz - _index;
ysr@777 104 }
ysr@777 105
tonyp@2197 106 bool is_empty() {
tonyp@2197 107 return _buf == NULL || _sz == _index;
tonyp@2197 108 }
tonyp@2197 109
ysr@777 110 // Set the "active" property of the queue to "b". An enqueue to an
ysr@777 111 // inactive thread is a no-op. Setting a queue to inactive resets its
ysr@777 112 // log to the empty state.
ysr@777 113 void set_active(bool b) {
ysr@777 114 _active = b;
ysr@777 115 if (!b && _buf != NULL) {
ysr@777 116 _index = _sz;
ysr@777 117 } else if (b && _buf != NULL) {
ysr@777 118 assert(_index == _sz, "invariant: queues are empty when activated.");
ysr@777 119 }
ysr@777 120 }
ysr@777 121
tonyp@1752 122 bool is_active() { return _active; }
tonyp@1752 123
ysr@777 124 static int byte_index_to_index(int ind) {
ysr@777 125 assert((ind % oopSize) == 0, "Invariant.");
ysr@777 126 return ind / oopSize;
ysr@777 127 }
ysr@777 128
ysr@777 129 static int index_to_byte_index(int byte_ind) {
ysr@777 130 return byte_ind * oopSize;
ysr@777 131 }
ysr@777 132
ysr@777 133 // To support compiler.
ysr@777 134 static ByteSize byte_offset_of_index() {
ysr@777 135 return byte_offset_of(PtrQueue, _index);
ysr@777 136 }
ysr@777 137 static ByteSize byte_width_of_index() { return in_ByteSize(sizeof(size_t)); }
ysr@777 138
ysr@777 139 static ByteSize byte_offset_of_buf() {
ysr@777 140 return byte_offset_of(PtrQueue, _buf);
ysr@777 141 }
ysr@777 142 static ByteSize byte_width_of_buf() { return in_ByteSize(sizeof(void*)); }
ysr@777 143
ysr@777 144 static ByteSize byte_offset_of_active() {
ysr@777 145 return byte_offset_of(PtrQueue, _active);
ysr@777 146 }
ysr@777 147 static ByteSize byte_width_of_active() { return in_ByteSize(sizeof(bool)); }
ysr@777 148
ysr@777 149 };
ysr@777 150
iveresov@1546 151 class BufferNode {
iveresov@1546 152 size_t _index;
iveresov@1546 153 BufferNode* _next;
iveresov@1546 154 public:
iveresov@1546 155 BufferNode() : _index(0), _next(NULL) { }
iveresov@1546 156 BufferNode* next() const { return _next; }
iveresov@1546 157 void set_next(BufferNode* n) { _next = n; }
iveresov@1546 158 size_t index() const { return _index; }
iveresov@1546 159 void set_index(size_t i) { _index = i; }
iveresov@1546 160
iveresov@1546 161 // Align the size of the structure to the size of the pointer
iveresov@1546 162 static size_t aligned_size() {
iveresov@1546 163 static const size_t alignment = round_to(sizeof(BufferNode), sizeof(void*));
iveresov@1546 164 return alignment;
iveresov@1546 165 }
iveresov@1546 166
iveresov@1546 167 // BufferNode is allocated before the buffer.
iveresov@1546 168 // The chunk of memory that holds both of them is a block.
iveresov@1546 169
iveresov@1546 170 // Produce a new BufferNode given a buffer.
iveresov@1546 171 static BufferNode* new_from_buffer(void** buf) {
iveresov@1546 172 return new (make_block_from_buffer(buf)) BufferNode;
iveresov@1546 173 }
iveresov@1546 174
iveresov@1546 175 // The following are the required conversion routines:
iveresov@1546 176 static BufferNode* make_node_from_buffer(void** buf) {
iveresov@1546 177 return (BufferNode*)make_block_from_buffer(buf);
iveresov@1546 178 }
iveresov@1546 179 static void** make_buffer_from_node(BufferNode *node) {
iveresov@1546 180 return make_buffer_from_block(node);
iveresov@1546 181 }
iveresov@1546 182 static void* make_block_from_node(BufferNode *node) {
iveresov@1546 183 return (void*)node;
iveresov@1546 184 }
iveresov@1546 185 static void** make_buffer_from_block(void* p) {
iveresov@1546 186 return (void**)((char*)p + aligned_size());
iveresov@1546 187 }
iveresov@1546 188 static void* make_block_from_buffer(void** p) {
iveresov@1546 189 return (void*)((char*)p - aligned_size());
iveresov@1546 190 }
iveresov@1546 191 };
iveresov@1546 192
ysr@777 193 // A PtrQueueSet represents resources common to a set of pointer queues.
ysr@777 194 // In particular, the individual queues allocate buffers from this shared
ysr@777 195 // set, and return completed buffers to the set.
ysr@777 196 // All these variables are are protected by the TLOQ_CBL_mon. XXX ???
apetrusenko@984 197 class PtrQueueSet VALUE_OBJ_CLASS_SPEC {
ysr@777 198 protected:
ysr@777 199 Monitor* _cbl_mon; // Protects the fields below.
iveresov@1546 200 BufferNode* _completed_buffers_head;
iveresov@1546 201 BufferNode* _completed_buffers_tail;
iveresov@1546 202 int _n_completed_buffers;
iveresov@1546 203 int _process_completed_threshold;
ysr@777 204 volatile bool _process_completed;
ysr@777 205
ysr@777 206 // This (and the interpretation of the first element as a "next"
ysr@777 207 // pointer) are protected by the TLOQ_FL_lock.
ysr@777 208 Mutex* _fl_lock;
iveresov@1546 209 BufferNode* _buf_free_list;
ysr@777 210 size_t _buf_free_list_sz;
iveresov@1051 211 // Queue set can share a freelist. The _fl_owner variable
iveresov@1051 212 // specifies the owner. It is set to "this" by default.
iveresov@1051 213 PtrQueueSet* _fl_owner;
ysr@777 214
ysr@777 215 // The size of all buffers in the set.
ysr@777 216 size_t _sz;
ysr@777 217
ysr@777 218 bool _all_active;
ysr@777 219
ysr@777 220 // If true, notify_all on _cbl_mon when the threshold is reached.
ysr@777 221 bool _notify_when_complete;
ysr@777 222
ysr@777 223 // Maximum number of elements allowed on completed queue: after that,
ysr@777 224 // enqueuer does the work itself. Zero indicates no maximum.
ysr@777 225 int _max_completed_queue;
iveresov@1546 226 int _completed_queue_padding;
ysr@777 227
ysr@777 228 int completed_buffers_list_length();
ysr@777 229 void assert_completed_buffer_list_len_correct_locked();
ysr@777 230 void assert_completed_buffer_list_len_correct();
ysr@777 231
ysr@777 232 protected:
ysr@777 233 // A mutator thread does the the work of processing a buffer.
ysr@777 234 // Returns "true" iff the work is complete (and the buffer may be
ysr@777 235 // deallocated).
ysr@777 236 virtual bool mut_process_buffer(void** buf) {
ysr@777 237 ShouldNotReachHere();
ysr@777 238 return false;
ysr@777 239 }
ysr@777 240
ysr@777 241 public:
ysr@777 242 // Create an empty ptr queue set.
ysr@777 243 PtrQueueSet(bool notify_when_complete = false);
ysr@777 244
ysr@777 245 // Because of init-order concerns, we can't pass these as constructor
ysr@777 246 // arguments.
ysr@777 247 void initialize(Monitor* cbl_mon, Mutex* fl_lock,
iveresov@1546 248 int process_completed_threshold,
iveresov@1546 249 int max_completed_queue,
iveresov@1051 250 PtrQueueSet *fl_owner = NULL) {
ysr@777 251 _max_completed_queue = max_completed_queue;
iveresov@1546 252 _process_completed_threshold = process_completed_threshold;
iveresov@1546 253 _completed_queue_padding = 0;
ysr@777 254 assert(cbl_mon != NULL && fl_lock != NULL, "Init order issue?");
iveresov@1051 255 _cbl_mon = cbl_mon;
iveresov@1051 256 _fl_lock = fl_lock;
iveresov@1051 257 _fl_owner = (fl_owner != NULL) ? fl_owner : this;
ysr@777 258 }
ysr@777 259
ysr@777 260 // Return an empty oop array of size _sz (required to be non-zero).
ysr@777 261 void** allocate_buffer();
ysr@777 262
ysr@777 263 // Return an empty buffer to the free list. The "buf" argument is
ysr@777 264 // required to be a pointer to the head of an array of length "_sz".
ysr@777 265 void deallocate_buffer(void** buf);
ysr@777 266
ysr@777 267 // Declares that "buf" is a complete buffer.
iveresov@1546 268 void enqueue_complete_buffer(void** buf, size_t index = 0);
iveresov@1546 269
iveresov@1546 270 // To be invoked by the mutator.
iveresov@1546 271 bool process_or_enqueue_complete_buffer(void** buf);
ysr@777 272
ysr@777 273 bool completed_buffers_exist_dirty() {
ysr@777 274 return _n_completed_buffers > 0;
ysr@777 275 }
ysr@777 276
ysr@777 277 bool process_completed_buffers() { return _process_completed; }
iveresov@1546 278 void set_process_completed(bool x) { _process_completed = x; }
ysr@777 279
tonyp@1752 280 bool is_active() { return _all_active; }
ysr@777 281
ysr@777 282 // Set the buffer size. Should be called before any "enqueue" operation
ysr@777 283 // can be called. And should only be called once.
ysr@777 284 void set_buffer_size(size_t sz);
ysr@777 285
ysr@777 286 // Get the buffer size.
ysr@777 287 size_t buffer_size() { return _sz; }
ysr@777 288
iveresov@1546 289 // Get/Set the number of completed buffers that triggers log processing.
iveresov@1546 290 void set_process_completed_threshold(int sz) { _process_completed_threshold = sz; }
iveresov@1546 291 int process_completed_threshold() const { return _process_completed_threshold; }
ysr@777 292
ysr@777 293 // Must only be called at a safe point. Indicates that the buffer free
ysr@777 294 // list size may be reduced, if that is deemed desirable.
ysr@777 295 void reduce_free_list();
ysr@777 296
iveresov@1546 297 int completed_buffers_num() { return _n_completed_buffers; }
iveresov@1051 298
iveresov@1051 299 void merge_bufferlists(PtrQueueSet* src);
iveresov@1546 300
iveresov@1546 301 void set_max_completed_queue(int m) { _max_completed_queue = m; }
iveresov@1546 302 int max_completed_queue() { return _max_completed_queue; }
iveresov@1546 303
iveresov@1546 304 void set_completed_queue_padding(int padding) { _completed_queue_padding = padding; }
iveresov@1546 305 int completed_queue_padding() { return _completed_queue_padding; }
iveresov@1546 306
iveresov@1546 307 // Notify the consumer if the number of buffers crossed the threshold
iveresov@1546 308 void notify_if_necessary();
ysr@777 309 };
stefank@2314 310
stefank@2314 311 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_PTRQUEUE_HPP

mercurial