src/share/vm/gc_implementation/g1/ptrQueue.cpp

Thu, 07 Apr 2011 09:53:20 -0700

author
johnc
date
Thu, 07 Apr 2011 09:53:20 -0700
changeset 2781
e1162778c1c8
parent 2469
7e37af9d69ef
child 3156
f08d439fab8c
permissions
-rw-r--r--

7009266: G1: assert(obj->is_oop_or_null(true )) failed: Error
Summary: A referent object that is only weakly reachable at the start of concurrent marking but is re-attached to the strongly reachable object graph during marking may not be marked as live. This can cause the reference object to be processed prematurely and leave dangling pointers to the referent object. Implement a read barrier for the java.lang.ref.Reference::referent field by intrinsifying the Reference.get() method, and intercepting accesses though JNI, reflection, and Unsafe, so that when a non-null referent object is read it is also logged in an SATB buffer.
Reviewed-by: kvn, iveresov, never, tonyp, dholmes

     1 /*
     2  * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "gc_implementation/g1/ptrQueue.hpp"
    27 #include "memory/allocation.hpp"
    28 #include "memory/allocation.inline.hpp"
    29 #include "runtime/mutex.hpp"
    30 #include "runtime/mutexLocker.hpp"
    31 #ifdef TARGET_OS_FAMILY_linux
    32 # include "thread_linux.inline.hpp"
    33 #endif
    34 #ifdef TARGET_OS_FAMILY_solaris
    35 # include "thread_solaris.inline.hpp"
    36 #endif
    37 #ifdef TARGET_OS_FAMILY_windows
    38 # include "thread_windows.inline.hpp"
    39 #endif
    41 PtrQueue::PtrQueue(PtrQueueSet* qset, bool perm, bool active) :
    42   _qset(qset), _buf(NULL), _index(0), _active(active),
    43   _perm(perm), _lock(NULL)
    44 {}
    46 void PtrQueue::flush() {
    47   if (!_perm && _buf != NULL) {
    48     if (_index == _sz) {
    49       // No work to do.
    50       qset()->deallocate_buffer(_buf);
    51     } else {
    52       // We must NULL out the unused entries, then enqueue.
    53       for (size_t i = 0; i < _index; i += oopSize) {
    54         _buf[byte_index_to_index((int)i)] = NULL;
    55       }
    56       qset()->enqueue_complete_buffer(_buf);
    57     }
    58     _buf = NULL;
    59     _index = 0;
    60   }
    61 }
    64 static int byte_index_to_index(int ind) {
    65   assert((ind % oopSize) == 0, "Invariant.");
    66   return ind / oopSize;
    67 }
    69 static int index_to_byte_index(int byte_ind) {
    70   return byte_ind * oopSize;
    71 }
    73 void PtrQueue::enqueue_known_active(void* ptr) {
    74   assert(0 <= _index && _index <= _sz, "Invariant.");
    75   assert(_index == 0 || _buf != NULL, "invariant");
    77   while (_index == 0) {
    78     handle_zero_index();
    79   }
    81   assert(_index > 0, "postcondition");
    82   _index -= oopSize;
    83   _buf[byte_index_to_index((int)_index)] = ptr;
    84   assert(0 <= _index && _index <= _sz, "Invariant.");
    85 }
    87 void PtrQueue::locking_enqueue_completed_buffer(void** buf) {
    88   assert(_lock->owned_by_self(), "Required.");
    90   // We have to unlock _lock (which may be Shared_DirtyCardQ_lock) before
    91   // we acquire DirtyCardQ_CBL_mon inside enqeue_complete_buffer as they
    92   // have the same rank and we may get the "possible deadlock" message
    93   _lock->unlock();
    95   qset()->enqueue_complete_buffer(buf);
    96   // We must relock only because the caller will unlock, for the normal
    97   // case.
    98   _lock->lock_without_safepoint_check();
    99 }
   102 PtrQueueSet::PtrQueueSet(bool notify_when_complete) :
   103   _max_completed_queue(0),
   104   _cbl_mon(NULL), _fl_lock(NULL),
   105   _notify_when_complete(notify_when_complete),
   106   _sz(0),
   107   _completed_buffers_head(NULL),
   108   _completed_buffers_tail(NULL),
   109   _n_completed_buffers(0),
   110   _process_completed_threshold(0), _process_completed(false),
   111   _buf_free_list(NULL), _buf_free_list_sz(0)
   112 {
   113   _fl_owner = this;
   114 }
   116 void** PtrQueueSet::allocate_buffer() {
   117   assert(_sz > 0, "Didn't set a buffer size.");
   118   MutexLockerEx x(_fl_owner->_fl_lock, Mutex::_no_safepoint_check_flag);
   119   if (_fl_owner->_buf_free_list != NULL) {
   120     void** res = BufferNode::make_buffer_from_node(_fl_owner->_buf_free_list);
   121     _fl_owner->_buf_free_list = _fl_owner->_buf_free_list->next();
   122     _fl_owner->_buf_free_list_sz--;
   123     return res;
   124   } else {
   125     // Allocate space for the BufferNode in front of the buffer.
   126     char *b =  NEW_C_HEAP_ARRAY(char, _sz + BufferNode::aligned_size());
   127     return BufferNode::make_buffer_from_block(b);
   128   }
   129 }
   131 void PtrQueueSet::deallocate_buffer(void** buf) {
   132   assert(_sz > 0, "Didn't set a buffer size.");
   133   MutexLockerEx x(_fl_owner->_fl_lock, Mutex::_no_safepoint_check_flag);
   134   BufferNode *node = BufferNode::make_node_from_buffer(buf);
   135   node->set_next(_fl_owner->_buf_free_list);
   136   _fl_owner->_buf_free_list = node;
   137   _fl_owner->_buf_free_list_sz++;
   138 }
   140 void PtrQueueSet::reduce_free_list() {
   141   assert(_fl_owner == this, "Free list reduction is allowed only for the owner");
   142   // For now we'll adopt the strategy of deleting half.
   143   MutexLockerEx x(_fl_lock, Mutex::_no_safepoint_check_flag);
   144   size_t n = _buf_free_list_sz / 2;
   145   while (n > 0) {
   146     assert(_buf_free_list != NULL, "_buf_free_list_sz must be wrong.");
   147     void* b = BufferNode::make_block_from_node(_buf_free_list);
   148     _buf_free_list = _buf_free_list->next();
   149     FREE_C_HEAP_ARRAY(char, b);
   150     _buf_free_list_sz --;
   151     n--;
   152   }
   153 }
   155 void PtrQueue::handle_zero_index() {
   156   assert(_index == 0, "Precondition.");
   158   // This thread records the full buffer and allocates a new one (while
   159   // holding the lock if there is one).
   160   if (_buf != NULL) {
   161     if (!should_enqueue_buffer()) {
   162       assert(_index > 0, "the buffer can only be re-used if it's not full");
   163       return;
   164     }
   166     if (_lock) {
   167       assert(_lock->owned_by_self(), "Required.");
   169       // The current PtrQ may be the shared dirty card queue and
   170       // may be being manipulated by more than one worker thread
   171       // during a pause. Since the enqueuing of the completed
   172       // buffer unlocks the Shared_DirtyCardQ_lock more than one
   173       // worker thread can 'race' on reading the shared queue attributes
   174       // (_buf and _index) and multiple threads can call into this
   175       // routine for the same buffer. This will cause the completed
   176       // buffer to be added to the CBL multiple times.
   178       // We "claim" the current buffer by caching value of _buf in
   179       // a local and clearing the field while holding _lock. When
   180       // _lock is released (while enqueueing the completed buffer)
   181       // the thread that acquires _lock will skip this code,
   182       // preventing the subsequent the multiple enqueue, and
   183       // install a newly allocated buffer below.
   185       void** buf = _buf;   // local pointer to completed buffer
   186       _buf = NULL;         // clear shared _buf field
   188       locking_enqueue_completed_buffer(buf);  // enqueue completed buffer
   190       // While the current thread was enqueuing the buffer another thread
   191       // may have a allocated a new buffer and inserted it into this pointer
   192       // queue. If that happens then we just return so that the current
   193       // thread doesn't overwrite the buffer allocated by the other thread
   194       // and potentially losing some dirtied cards.
   196       if (_buf != NULL) return;
   197     } else {
   198       if (qset()->process_or_enqueue_complete_buffer(_buf)) {
   199         // Recycle the buffer. No allocation.
   200         _sz = qset()->buffer_size();
   201         _index = _sz;
   202         return;
   203       }
   204     }
   205   }
   206   // Reallocate the buffer
   207   _buf = qset()->allocate_buffer();
   208   _sz = qset()->buffer_size();
   209   _index = _sz;
   210   assert(0 <= _index && _index <= _sz, "Invariant.");
   211 }
   213 bool PtrQueueSet::process_or_enqueue_complete_buffer(void** buf) {
   214   if (Thread::current()->is_Java_thread()) {
   215     // We don't lock. It is fine to be epsilon-precise here.
   216     if (_max_completed_queue == 0 || _max_completed_queue > 0 &&
   217         _n_completed_buffers >= _max_completed_queue + _completed_queue_padding) {
   218       bool b = mut_process_buffer(buf);
   219       if (b) {
   220         // True here means that the buffer hasn't been deallocated and the caller may reuse it.
   221         return true;
   222       }
   223     }
   224   }
   225   // The buffer will be enqueued. The caller will have to get a new one.
   226   enqueue_complete_buffer(buf);
   227   return false;
   228 }
   230 void PtrQueueSet::enqueue_complete_buffer(void** buf, size_t index) {
   231   MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
   232   BufferNode* cbn = BufferNode::new_from_buffer(buf);
   233   cbn->set_index(index);
   234   if (_completed_buffers_tail == NULL) {
   235     assert(_completed_buffers_head == NULL, "Well-formedness");
   236     _completed_buffers_head = cbn;
   237     _completed_buffers_tail = cbn;
   238   } else {
   239     _completed_buffers_tail->set_next(cbn);
   240     _completed_buffers_tail = cbn;
   241   }
   242   _n_completed_buffers++;
   244   if (!_process_completed && _process_completed_threshold >= 0 &&
   245       _n_completed_buffers >= _process_completed_threshold) {
   246     _process_completed = true;
   247     if (_notify_when_complete)
   248       _cbl_mon->notify();
   249   }
   250   debug_only(assert_completed_buffer_list_len_correct_locked());
   251 }
   253 int PtrQueueSet::completed_buffers_list_length() {
   254   int n = 0;
   255   BufferNode* cbn = _completed_buffers_head;
   256   while (cbn != NULL) {
   257     n++;
   258     cbn = cbn->next();
   259   }
   260   return n;
   261 }
   263 void PtrQueueSet::assert_completed_buffer_list_len_correct() {
   264   MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
   265   assert_completed_buffer_list_len_correct_locked();
   266 }
   268 void PtrQueueSet::assert_completed_buffer_list_len_correct_locked() {
   269   guarantee(completed_buffers_list_length() ==  _n_completed_buffers,
   270             "Completed buffer length is wrong.");
   271 }
   273 void PtrQueueSet::set_buffer_size(size_t sz) {
   274   assert(_sz == 0 && sz > 0, "Should be called only once.");
   275   _sz = sz * oopSize;
   276 }
   278 // Merge lists of buffers. Notify the processing threads.
   279 // The source queue is emptied as a result. The queues
   280 // must share the monitor.
   281 void PtrQueueSet::merge_bufferlists(PtrQueueSet *src) {
   282   assert(_cbl_mon == src->_cbl_mon, "Should share the same lock");
   283   MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
   284   if (_completed_buffers_tail == NULL) {
   285     assert(_completed_buffers_head == NULL, "Well-formedness");
   286     _completed_buffers_head = src->_completed_buffers_head;
   287     _completed_buffers_tail = src->_completed_buffers_tail;
   288   } else {
   289     assert(_completed_buffers_head != NULL, "Well formedness");
   290     if (src->_completed_buffers_head != NULL) {
   291       _completed_buffers_tail->set_next(src->_completed_buffers_head);
   292       _completed_buffers_tail = src->_completed_buffers_tail;
   293     }
   294   }
   295   _n_completed_buffers += src->_n_completed_buffers;
   297   src->_n_completed_buffers = 0;
   298   src->_completed_buffers_head = NULL;
   299   src->_completed_buffers_tail = NULL;
   301   assert(_completed_buffers_head == NULL && _completed_buffers_tail == NULL ||
   302          _completed_buffers_head != NULL && _completed_buffers_tail != NULL,
   303          "Sanity");
   304 }
   306 void PtrQueueSet::notify_if_necessary() {
   307   MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
   308   if (_n_completed_buffers >= _process_completed_threshold || _max_completed_queue == 0) {
   309     _process_completed = true;
   310     if (_notify_when_complete)
   311       _cbl_mon->notify();
   312   }
   313 }

mercurial