src/share/vm/gc_implementation/g1/satbQueue.cpp

Thu, 22 Sep 2011 10:57:37 -0700

author
johnc
date
Thu, 22 Sep 2011 10:57:37 -0700
changeset 3175
4dfb2df418f2
parent 2469
7e37af9d69ef
child 3416
2ace1c4ee8da
permissions
-rw-r--r--

6484982: G1: process references during evacuation pauses
Summary: G1 now uses two reference processors - one is used by concurrent marking and the other is used by STW GCs (both full and incremental evacuation pauses). In an evacuation pause, the reference processor is embedded into the closures used to scan objects. Doing so causes causes reference objects to be 'discovered' by the reference processor. At the end of the evacuation pause, these discovered reference objects are processed - preserving (and copying) referent objects (and their reachable graphs) as appropriate.
Reviewed-by: ysr, jwilhelm, brutisso, stefank, tonyp

     1 /*
     2  * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
    27 #include "gc_implementation/g1/satbQueue.hpp"
    28 #include "memory/allocation.inline.hpp"
    29 #include "memory/sharedHeap.hpp"
    30 #include "runtime/mutexLocker.hpp"
    31 #include "runtime/thread.hpp"
    32 #include "runtime/vmThread.hpp"
    34 // This method removes entries from an SATB buffer that will not be
    35 // useful to the concurrent marking threads. An entry is removed if it
    36 // satisfies one of the following conditions:
    37 //
    38 // * it points to an object outside the G1 heap (G1's concurrent
    39 //     marking only visits objects inside the G1 heap),
    40 // * it points to an object that has been allocated since marking
    41 //     started (according to SATB those objects do not need to be
    42 //     visited during marking), or
    43 // * it points to an object that has already been marked (no need to
    44 //     process it again).
    45 //
    46 // The rest of the entries will be retained and are compacted towards
    47 // the top of the buffer. If with this filtering we clear a large
    48 // enough chunk of the buffer we can re-use it (instead of enqueueing
    49 // it) and we can just allow the mutator to carry on executing.
    51 bool ObjPtrQueue::should_enqueue_buffer() {
    52   assert(_lock == NULL || _lock->owned_by_self(),
    53          "we should have taken the lock before calling this");
    55   // A value of 0 means "don't filter SATB buffers".
    56   if (G1SATBBufferEnqueueingThresholdPercent == 0) {
    57     return true;
    58   }
    60   G1CollectedHeap* g1h = G1CollectedHeap::heap();
    62   // This method should only be called if there is a non-NULL buffer
    63   // that is full.
    64   assert(_index == 0, "pre-condition");
    65   assert(_buf != NULL, "pre-condition");
    67   void** buf = _buf;
    68   size_t sz = _sz;
    70   // Used for sanity checking at the end of the loop.
    71   debug_only(size_t entries = 0; size_t retained = 0;)
    73   size_t i = sz;
    74   size_t new_index = sz;
    76   // Given that we are expecting _index == 0, we could have changed
    77   // the loop condition to (i > 0). But we are using _index for
    78   // generality.
    79   while (i > _index) {
    80     assert(i > 0, "we should have at least one more entry to process");
    81     i -= oopSize;
    82     debug_only(entries += 1;)
    83     oop* p = (oop*) &buf[byte_index_to_index((int) i)];
    84     oop obj = *p;
    85     // NULL the entry so that unused parts of the buffer contain NULLs
    86     // at the end. If we are going to retain it we will copy it to its
    87     // final place. If we have retained all entries we have visited so
    88     // far, we'll just end up copying it to the same place.
    89     *p = NULL;
    91     bool retain = g1h->is_obj_ill(obj);
    92     if (retain) {
    93       assert(new_index > 0, "we should not have already filled up the buffer");
    94       new_index -= oopSize;
    95       assert(new_index >= i,
    96              "new_index should never be below i, as we alwaysr compact 'up'");
    97       oop* new_p = (oop*) &buf[byte_index_to_index((int) new_index)];
    98       assert(new_p >= p, "the destination location should never be below "
    99              "the source as we always compact 'up'");
   100       assert(*new_p == NULL,
   101              "we should have already cleared the destination location");
   102       *new_p = obj;
   103       debug_only(retained += 1;)
   104     }
   105   }
   106   size_t entries_calc = (sz - _index) / oopSize;
   107   assert(entries == entries_calc, "the number of entries we counted "
   108          "should match the number of entries we calculated");
   109   size_t retained_calc = (sz - new_index) / oopSize;
   110   assert(retained == retained_calc, "the number of retained entries we counted "
   111          "should match the number of retained entries we calculated");
   112   size_t perc = retained_calc * 100 / entries_calc;
   113   bool should_enqueue = perc > (size_t) G1SATBBufferEnqueueingThresholdPercent;
   114   _index = new_index;
   116   return should_enqueue;
   117 }
   119 void ObjPtrQueue::apply_closure(ObjectClosure* cl) {
   120   if (_buf != NULL) {
   121     apply_closure_to_buffer(cl, _buf, _index, _sz);
   122     _index = _sz;
   123   }
   124 }
   126 void ObjPtrQueue::apply_closure_to_buffer(ObjectClosure* cl,
   127                                           void** buf, size_t index, size_t sz) {
   128   if (cl == NULL) return;
   129   for (size_t i = index; i < sz; i += oopSize) {
   130     oop obj = (oop)buf[byte_index_to_index((int)i)];
   131     // There can be NULL entries because of destructors.
   132     if (obj != NULL) {
   133       cl->do_object(obj);
   134     }
   135   }
   136 }
   138 #ifdef ASSERT
   139 void ObjPtrQueue::verify_oops_in_buffer() {
   140   if (_buf == NULL) return;
   141   for (size_t i = _index; i < _sz; i += oopSize) {
   142     oop obj = (oop)_buf[byte_index_to_index((int)i)];
   143     assert(obj != NULL && obj->is_oop(true /* ignore mark word */),
   144            "Not an oop");
   145   }
   146 }
   147 #endif
   149 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
   150 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
   151 #endif // _MSC_VER
   154 SATBMarkQueueSet::SATBMarkQueueSet() :
   155   PtrQueueSet(),
   156   _closure(NULL), _par_closures(NULL),
   157   _shared_satb_queue(this, true /*perm*/)
   158 {}
   160 void SATBMarkQueueSet::initialize(Monitor* cbl_mon, Mutex* fl_lock,
   161                                   int process_completed_threshold,
   162                                   Mutex* lock) {
   163   PtrQueueSet::initialize(cbl_mon, fl_lock, process_completed_threshold, -1);
   164   _shared_satb_queue.set_lock(lock);
   165   if (ParallelGCThreads > 0) {
   166     _par_closures = NEW_C_HEAP_ARRAY(ObjectClosure*, ParallelGCThreads);
   167   }
   168 }
   171 void SATBMarkQueueSet::handle_zero_index_for_thread(JavaThread* t) {
   172   DEBUG_ONLY(t->satb_mark_queue().verify_oops_in_buffer();)
   173   t->satb_mark_queue().handle_zero_index();
   174 }
   176 #ifdef ASSERT
   177 void SATBMarkQueueSet::dump_active_values(JavaThread* first,
   178                                           bool expected_active) {
   179   gclog_or_tty->print_cr("SATB queue active values for Java Threads");
   180   gclog_or_tty->print_cr(" SATB queue set: active is %s",
   181                          (is_active()) ? "TRUE" : "FALSE");
   182   gclog_or_tty->print_cr(" expected_active is %s",
   183                          (expected_active) ? "TRUE" : "FALSE");
   184   for (JavaThread* t = first; t; t = t->next()) {
   185     bool active = t->satb_mark_queue().is_active();
   186     gclog_or_tty->print_cr("  thread %s, active is %s",
   187                            t->name(), (active) ? "TRUE" : "FALSE");
   188   }
   189 }
   190 #endif // ASSERT
   192 void SATBMarkQueueSet::set_active_all_threads(bool b,
   193                                               bool expected_active) {
   194   assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
   195   JavaThread* first = Threads::first();
   197 #ifdef ASSERT
   198   if (_all_active != expected_active) {
   199     dump_active_values(first, expected_active);
   201     // I leave this here as a guarantee, instead of an assert, so
   202     // that it will still be compiled in if we choose to uncomment
   203     // the #ifdef ASSERT in a product build. The whole block is
   204     // within an #ifdef ASSERT so the guarantee will not be compiled
   205     // in a product build anyway.
   206     guarantee(false,
   207               "SATB queue set has an unexpected active value");
   208   }
   209 #endif // ASSERT
   210   _all_active = b;
   212   for (JavaThread* t = first; t; t = t->next()) {
   213 #ifdef ASSERT
   214     bool active = t->satb_mark_queue().is_active();
   215     if (active != expected_active) {
   216       dump_active_values(first, expected_active);
   218       // I leave this here as a guarantee, instead of an assert, so
   219       // that it will still be compiled in if we choose to uncomment
   220       // the #ifdef ASSERT in a product build. The whole block is
   221       // within an #ifdef ASSERT so the guarantee will not be compiled
   222       // in a product build anyway.
   223       guarantee(false,
   224                 "thread has an unexpected active value in its SATB queue");
   225     }
   226 #endif // ASSERT
   227     t->satb_mark_queue().set_active(b);
   228   }
   229 }
   231 void SATBMarkQueueSet::set_closure(ObjectClosure* closure) {
   232   _closure = closure;
   233 }
   235 void SATBMarkQueueSet::set_par_closure(int i, ObjectClosure* par_closure) {
   236   assert(ParallelGCThreads > 0 && _par_closures != NULL, "Precondition");
   237   _par_closures[i] = par_closure;
   238 }
   240 void SATBMarkQueueSet::iterate_closure_all_threads() {
   241   for(JavaThread* t = Threads::first(); t; t = t->next()) {
   242     t->satb_mark_queue().apply_closure(_closure);
   243   }
   244   shared_satb_queue()->apply_closure(_closure);
   245 }
   247 void SATBMarkQueueSet::par_iterate_closure_all_threads(int worker) {
   248   SharedHeap* sh = SharedHeap::heap();
   249   int parity = sh->strong_roots_parity();
   251   for(JavaThread* t = Threads::first(); t; t = t->next()) {
   252     if (t->claim_oops_do(true, parity)) {
   253       t->satb_mark_queue().apply_closure(_par_closures[worker]);
   254     }
   255   }
   257   // We also need to claim the VMThread so that its parity is updated
   258   // otherwise the next call to Thread::possibly_parallel_oops_do inside
   259   // a StrongRootsScope might skip the VMThread because it has a stale
   260   // parity that matches the parity set by the StrongRootsScope
   261   //
   262   // Whichever worker succeeds in claiming the VMThread gets to do
   263   // the shared queue.
   265   VMThread* vmt = VMThread::vm_thread();
   266   if (vmt->claim_oops_do(true, parity)) {
   267     shared_satb_queue()->apply_closure(_par_closures[worker]);
   268   }
   269 }
   271 bool SATBMarkQueueSet::apply_closure_to_completed_buffer_work(bool par,
   272                                                               int worker) {
   273   BufferNode* nd = NULL;
   274   {
   275     MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
   276     if (_completed_buffers_head != NULL) {
   277       nd = _completed_buffers_head;
   278       _completed_buffers_head = nd->next();
   279       if (_completed_buffers_head == NULL) _completed_buffers_tail = NULL;
   280       _n_completed_buffers--;
   281       if (_n_completed_buffers == 0) _process_completed = false;
   282     }
   283   }
   284   ObjectClosure* cl = (par ? _par_closures[worker] : _closure);
   285   if (nd != NULL) {
   286     void **buf = BufferNode::make_buffer_from_node(nd);
   287     ObjPtrQueue::apply_closure_to_buffer(cl, buf, 0, _sz);
   288     deallocate_buffer(buf);
   289     return true;
   290   } else {
   291     return false;
   292   }
   293 }
   295 void SATBMarkQueueSet::abandon_partial_marking() {
   296   BufferNode* buffers_to_delete = NULL;
   297   {
   298     MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
   299     while (_completed_buffers_head != NULL) {
   300       BufferNode* nd = _completed_buffers_head;
   301       _completed_buffers_head = nd->next();
   302       nd->set_next(buffers_to_delete);
   303       buffers_to_delete = nd;
   304     }
   305     _completed_buffers_tail = NULL;
   306     _n_completed_buffers = 0;
   307     DEBUG_ONLY(assert_completed_buffer_list_len_correct_locked());
   308   }
   309   while (buffers_to_delete != NULL) {
   310     BufferNode* nd = buffers_to_delete;
   311     buffers_to_delete = nd->next();
   312     deallocate_buffer(BufferNode::make_buffer_from_node(nd));
   313   }
   314   assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
   315   // So we can safely manipulate these queues.
   316   for (JavaThread* t = Threads::first(); t; t = t->next()) {
   317     t->satb_mark_queue().reset();
   318   }
   319   shared_satb_queue()->reset();
   320 }

mercurial