src/share/vm/gc_implementation/g1/dirtyCardQueue.cpp

Thu, 22 Sep 2011 10:57:37 -0700

author
johnc
date
Thu, 22 Sep 2011 10:57:37 -0700
changeset 3175
4dfb2df418f2
parent 2314
f95d63e2154a
child 3156
f08d439fab8c
permissions
-rw-r--r--

6484982: G1: process references during evacuation pauses
Summary: G1 now uses two reference processors - one is used by concurrent marking and the other is used by STW GCs (both full and incremental evacuation pauses). In an evacuation pause, the reference processor is embedded into the closures used to scan objects. Doing so causes causes reference objects to be 'discovered' by the reference processor. At the end of the evacuation pause, these discovered reference objects are processed - preserving (and copying) referent objects (and their reachable graphs) as appropriate.
Reviewed-by: ysr, jwilhelm, brutisso, stefank, tonyp

     1 /*
     2  * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "gc_implementation/g1/dirtyCardQueue.hpp"
    27 #include "gc_implementation/g1/heapRegionRemSet.hpp"
    28 #include "runtime/atomic.hpp"
    29 #include "runtime/mutexLocker.hpp"
    30 #include "runtime/safepoint.hpp"
    31 #include "runtime/thread.hpp"
    32 #include "utilities/workgroup.hpp"
    33 #ifdef TARGET_OS_FAMILY_linux
    34 # include "thread_linux.inline.hpp"
    35 #endif
    36 #ifdef TARGET_OS_FAMILY_solaris
    37 # include "thread_solaris.inline.hpp"
    38 #endif
    39 #ifdef TARGET_OS_FAMILY_windows
    40 # include "thread_windows.inline.hpp"
    41 #endif
    43 bool DirtyCardQueue::apply_closure(CardTableEntryClosure* cl,
    44                                    bool consume,
    45                                    size_t worker_i) {
    46   bool res = true;
    47   if (_buf != NULL) {
    48     res = apply_closure_to_buffer(cl, _buf, _index, _sz,
    49                                   consume,
    50                                   (int) worker_i);
    51     if (res && consume) _index = _sz;
    52   }
    53   return res;
    54 }
    56 bool DirtyCardQueue::apply_closure_to_buffer(CardTableEntryClosure* cl,
    57                                              void** buf,
    58                                              size_t index, size_t sz,
    59                                              bool consume,
    60                                              int worker_i) {
    61   if (cl == NULL) return true;
    62   for (size_t i = index; i < sz; i += oopSize) {
    63     int ind = byte_index_to_index((int)i);
    64     jbyte* card_ptr = (jbyte*)buf[ind];
    65     if (card_ptr != NULL) {
    66       // Set the entry to null, so we don't do it again (via the test
    67       // above) if we reconsider this buffer.
    68       if (consume) buf[ind] = NULL;
    69       if (!cl->do_card_ptr(card_ptr, worker_i)) return false;
    70     }
    71   }
    72   return true;
    73 }
    75 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
    76 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
    77 #endif // _MSC_VER
    79 DirtyCardQueueSet::DirtyCardQueueSet(bool notify_when_complete) :
    80   PtrQueueSet(notify_when_complete),
    81   _closure(NULL),
    82   _shared_dirty_card_queue(this, true /*perm*/),
    83   _free_ids(NULL),
    84   _processed_buffers_mut(0), _processed_buffers_rs_thread(0)
    85 {
    86   _all_active = true;
    87 }
    89 // Determines how many mutator threads can process the buffers in parallel.
    90 size_t DirtyCardQueueSet::num_par_ids() {
    91   return os::processor_count();
    92 }
    94 void DirtyCardQueueSet::initialize(Monitor* cbl_mon, Mutex* fl_lock,
    95                                    int process_completed_threshold,
    96                                    int max_completed_queue,
    97                                    Mutex* lock, PtrQueueSet* fl_owner) {
    98   PtrQueueSet::initialize(cbl_mon, fl_lock, process_completed_threshold,
    99                           max_completed_queue, fl_owner);
   100   set_buffer_size(G1UpdateBufferSize);
   101   _shared_dirty_card_queue.set_lock(lock);
   102   _free_ids = new FreeIdSet((int) num_par_ids(), _cbl_mon);
   103 }
   105 void DirtyCardQueueSet::handle_zero_index_for_thread(JavaThread* t) {
   106   t->dirty_card_queue().handle_zero_index();
   107 }
   109 void DirtyCardQueueSet::set_closure(CardTableEntryClosure* closure) {
   110   _closure = closure;
   111 }
   113 void DirtyCardQueueSet::iterate_closure_all_threads(bool consume,
   114                                                     size_t worker_i) {
   115   assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
   116   for(JavaThread* t = Threads::first(); t; t = t->next()) {
   117     bool b = t->dirty_card_queue().apply_closure(_closure, consume);
   118     guarantee(b, "Should not be interrupted.");
   119   }
   120   bool b = shared_dirty_card_queue()->apply_closure(_closure,
   121                                                     consume,
   122                                                     worker_i);
   123   guarantee(b, "Should not be interrupted.");
   124 }
   126 bool DirtyCardQueueSet::mut_process_buffer(void** buf) {
   128   // Used to determine if we had already claimed a par_id
   129   // before entering this method.
   130   bool already_claimed = false;
   132   // We grab the current JavaThread.
   133   JavaThread* thread = JavaThread::current();
   135   // We get the the number of any par_id that this thread
   136   // might have already claimed.
   137   int worker_i = thread->get_claimed_par_id();
   139   // If worker_i is not -1 then the thread has already claimed
   140   // a par_id. We make note of it using the already_claimed value
   141   if (worker_i != -1) {
   142     already_claimed = true;
   143   } else {
   145     // Otherwise we need to claim a par id
   146     worker_i = _free_ids->claim_par_id();
   148     // And store the par_id value in the thread
   149     thread->set_claimed_par_id(worker_i);
   150   }
   152   bool b = false;
   153   if (worker_i != -1) {
   154     b = DirtyCardQueue::apply_closure_to_buffer(_closure, buf, 0,
   155                                                 _sz, true, worker_i);
   156     if (b) Atomic::inc(&_processed_buffers_mut);
   158     // If we had not claimed an id before entering the method
   159     // then we must release the id.
   160     if (!already_claimed) {
   162       // we release the id
   163       _free_ids->release_par_id(worker_i);
   165       // and set the claimed_id in the thread to -1
   166       thread->set_claimed_par_id(-1);
   167     }
   168   }
   169   return b;
   170 }
   173 BufferNode*
   174 DirtyCardQueueSet::get_completed_buffer(int stop_at) {
   175   BufferNode* nd = NULL;
   176   MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
   178   if ((int)_n_completed_buffers <= stop_at) {
   179     _process_completed = false;
   180     return NULL;
   181   }
   183   if (_completed_buffers_head != NULL) {
   184     nd = _completed_buffers_head;
   185     _completed_buffers_head = nd->next();
   186     if (_completed_buffers_head == NULL)
   187       _completed_buffers_tail = NULL;
   188     _n_completed_buffers--;
   189     assert(_n_completed_buffers >= 0, "Invariant");
   190   }
   191   debug_only(assert_completed_buffer_list_len_correct_locked());
   192   return nd;
   193 }
   195 bool DirtyCardQueueSet::
   196 apply_closure_to_completed_buffer_helper(CardTableEntryClosure* cl,
   197                                          int worker_i,
   198                                          BufferNode* nd) {
   199   if (nd != NULL) {
   200     void **buf = BufferNode::make_buffer_from_node(nd);
   201     size_t index = nd->index();
   202     bool b =
   203       DirtyCardQueue::apply_closure_to_buffer(cl, buf,
   204                                               index, _sz,
   205                                               true, worker_i);
   206     if (b) {
   207       deallocate_buffer(buf);
   208       return true;  // In normal case, go on to next buffer.
   209     } else {
   210       enqueue_complete_buffer(buf, index);
   211       return false;
   212     }
   213   } else {
   214     return false;
   215   }
   216 }
   218 bool DirtyCardQueueSet::apply_closure_to_completed_buffer(CardTableEntryClosure* cl,
   219                                                           int worker_i,
   220                                                           int stop_at,
   221                                                           bool during_pause) {
   222   assert(!during_pause || stop_at == 0, "Should not leave any completed buffers during a pause");
   223   BufferNode* nd = get_completed_buffer(stop_at);
   224   bool res = apply_closure_to_completed_buffer_helper(cl, worker_i, nd);
   225   if (res) Atomic::inc(&_processed_buffers_rs_thread);
   226   return res;
   227 }
   229 bool DirtyCardQueueSet::apply_closure_to_completed_buffer(int worker_i,
   230                                                           int stop_at,
   231                                                           bool during_pause) {
   232   return apply_closure_to_completed_buffer(_closure, worker_i,
   233                                            stop_at, during_pause);
   234 }
   236 void DirtyCardQueueSet::apply_closure_to_all_completed_buffers() {
   237   BufferNode* nd = _completed_buffers_head;
   238   while (nd != NULL) {
   239     bool b =
   240       DirtyCardQueue::apply_closure_to_buffer(_closure,
   241                                               BufferNode::make_buffer_from_node(nd),
   242                                               0, _sz, false);
   243     guarantee(b, "Should not stop early.");
   244     nd = nd->next();
   245   }
   246 }
   248 // Deallocates any completed log buffers
   249 void DirtyCardQueueSet::clear() {
   250   BufferNode* buffers_to_delete = NULL;
   251   {
   252     MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
   253     while (_completed_buffers_head != NULL) {
   254       BufferNode* nd = _completed_buffers_head;
   255       _completed_buffers_head = nd->next();
   256       nd->set_next(buffers_to_delete);
   257       buffers_to_delete = nd;
   258     }
   259     _n_completed_buffers = 0;
   260     _completed_buffers_tail = NULL;
   261     debug_only(assert_completed_buffer_list_len_correct_locked());
   262   }
   263   while (buffers_to_delete != NULL) {
   264     BufferNode* nd = buffers_to_delete;
   265     buffers_to_delete = nd->next();
   266     deallocate_buffer(BufferNode::make_buffer_from_node(nd));
   267   }
   269 }
   271 void DirtyCardQueueSet::abandon_logs() {
   272   assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
   273   clear();
   274   // Since abandon is done only at safepoints, we can safely manipulate
   275   // these queues.
   276   for (JavaThread* t = Threads::first(); t; t = t->next()) {
   277     t->dirty_card_queue().reset();
   278   }
   279   shared_dirty_card_queue()->reset();
   280 }
   283 void DirtyCardQueueSet::concatenate_logs() {
   284   // Iterate over all the threads, if we find a partial log add it to
   285   // the global list of logs.  Temporarily turn off the limit on the number
   286   // of outstanding buffers.
   287   int save_max_completed_queue = _max_completed_queue;
   288   _max_completed_queue = max_jint;
   289   assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
   290   for (JavaThread* t = Threads::first(); t; t = t->next()) {
   291     DirtyCardQueue& dcq = t->dirty_card_queue();
   292     if (dcq.size() != 0) {
   293       void **buf = t->dirty_card_queue().get_buf();
   294       // We must NULL out the unused entries, then enqueue.
   295       for (size_t i = 0; i < t->dirty_card_queue().get_index(); i += oopSize) {
   296         buf[PtrQueue::byte_index_to_index((int)i)] = NULL;
   297       }
   298       enqueue_complete_buffer(dcq.get_buf(), dcq.get_index());
   299       dcq.reinitialize();
   300     }
   301   }
   302   if (_shared_dirty_card_queue.size() != 0) {
   303     enqueue_complete_buffer(_shared_dirty_card_queue.get_buf(),
   304                             _shared_dirty_card_queue.get_index());
   305     _shared_dirty_card_queue.reinitialize();
   306   }
   307   // Restore the completed buffer queue limit.
   308   _max_completed_queue = save_max_completed_queue;
   309 }

mercurial