src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.cpp

Mon, 24 Mar 2014 15:30:14 +0100

author
tschatzl
date
Mon, 24 Mar 2014 15:30:14 +0100
changeset 6402
191174b49bec
parent 5865
aa6f2ea19d8f
child 6493
3205e78d8193
permissions
-rw-r--r--

8035406: Improve data structure for Code Cache remembered sets
Summary: Change the code cache remembered sets data structure from a GrowableArray to a chunked list of nmethods. This makes the data structure more amenable to parallelization, and decreases freeing time.
Reviewed-by: mgerdin, brutisso

     1 /*
     2  * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
    27 #include "gc_implementation/g1/heapRegion.hpp"
    28 #include "gc_implementation/g1/satbQueue.hpp"
    29 #include "runtime/mutexLocker.hpp"
    30 #include "runtime/thread.inline.hpp"
    32 G1SATBCardTableModRefBS::G1SATBCardTableModRefBS(MemRegion whole_heap,
    33                                                  int max_covered_regions) :
    34     CardTableModRefBSForCTRS(whole_heap, max_covered_regions)
    35 {
    36   _kind = G1SATBCT;
    37 }
    40 void G1SATBCardTableModRefBS::enqueue(oop pre_val) {
    41   // Nulls should have been already filtered.
    42   assert(pre_val->is_oop(true), "Error");
    44   if (!JavaThread::satb_mark_queue_set().is_active()) return;
    45   Thread* thr = Thread::current();
    46   if (thr->is_Java_thread()) {
    47     JavaThread* jt = (JavaThread*)thr;
    48     jt->satb_mark_queue().enqueue(pre_val);
    49   } else {
    50     MutexLockerEx x(Shared_SATB_Q_lock, Mutex::_no_safepoint_check_flag);
    51     JavaThread::satb_mark_queue_set().shared_satb_queue()->enqueue(pre_val);
    52   }
    53 }
    55 template <class T> void
    56 G1SATBCardTableModRefBS::write_ref_array_pre_work(T* dst, int count) {
    57   if (!JavaThread::satb_mark_queue_set().is_active()) return;
    58   T* elem_ptr = dst;
    59   for (int i = 0; i < count; i++, elem_ptr++) {
    60     T heap_oop = oopDesc::load_heap_oop(elem_ptr);
    61     if (!oopDesc::is_null(heap_oop)) {
    62       enqueue(oopDesc::decode_heap_oop_not_null(heap_oop));
    63     }
    64   }
    65 }
    67 bool G1SATBCardTableModRefBS::mark_card_deferred(size_t card_index) {
    68   jbyte val = _byte_map[card_index];
    69   // It's already processed
    70   if ((val & (clean_card_mask_val() | deferred_card_val())) == deferred_card_val()) {
    71     return false;
    72   }
    74   if  (val == g1_young_gen) {
    75     // the card is for a young gen region. We don't need to keep track of all pointers into young
    76     return false;
    77   }
    79   // Cached bit can be installed either on a clean card or on a claimed card.
    80   jbyte new_val = val;
    81   if (val == clean_card_val()) {
    82     new_val = (jbyte)deferred_card_val();
    83   } else {
    84     if (val & claimed_card_val()) {
    85       new_val = val | (jbyte)deferred_card_val();
    86     }
    87   }
    88   if (new_val != val) {
    89     Atomic::cmpxchg(new_val, &_byte_map[card_index], val);
    90   }
    91   return true;
    92 }
    94 void G1SATBCardTableModRefBS::g1_mark_as_young(const MemRegion& mr) {
    95   jbyte *const first = byte_for(mr.start());
    96   jbyte *const last = byte_after(mr.last());
    98   memset(first, g1_young_gen, last - first);
    99 }
   101 #ifndef PRODUCT
   102 void G1SATBCardTableModRefBS::verify_g1_young_region(MemRegion mr) {
   103   verify_region(mr, g1_young_gen,  true);
   104 }
   105 #endif
   107 G1SATBCardTableLoggingModRefBS::
   108 G1SATBCardTableLoggingModRefBS(MemRegion whole_heap,
   109                                int max_covered_regions) :
   110   G1SATBCardTableModRefBS(whole_heap, max_covered_regions),
   111   _dcqs(JavaThread::dirty_card_queue_set())
   112 {
   113   _kind = G1SATBCTLogging;
   114 }
   116 void
   117 G1SATBCardTableLoggingModRefBS::write_ref_field_work(void* field,
   118                                                      oop new_val) {
   119   volatile jbyte* byte = byte_for(field);
   120   if (*byte == g1_young_gen) {
   121     return;
   122   }
   123   OrderAccess::storeload();
   124   if (*byte != dirty_card) {
   125     *byte = dirty_card;
   126     Thread* thr = Thread::current();
   127     if (thr->is_Java_thread()) {
   128       JavaThread* jt = (JavaThread*)thr;
   129       jt->dirty_card_queue().enqueue(byte);
   130     } else {
   131       MutexLockerEx x(Shared_DirtyCardQ_lock,
   132                       Mutex::_no_safepoint_check_flag);
   133       _dcqs.shared_dirty_card_queue()->enqueue(byte);
   134     }
   135   }
   136 }
   138 void
   139 G1SATBCardTableLoggingModRefBS::write_ref_field_static(void* field,
   140                                                        oop new_val) {
   141   uintptr_t field_uint = (uintptr_t)field;
   142   uintptr_t new_val_uint = cast_from_oop<uintptr_t>(new_val);
   143   uintptr_t comb = field_uint ^ new_val_uint;
   144   comb = comb >> HeapRegion::LogOfHRGrainBytes;
   145   if (comb == 0) return;
   146   if (new_val == NULL) return;
   147   // Otherwise, log it.
   148   G1SATBCardTableLoggingModRefBS* g1_bs =
   149     (G1SATBCardTableLoggingModRefBS*)Universe::heap()->barrier_set();
   150   g1_bs->write_ref_field_work(field, new_val);
   151 }
   153 void
   154 G1SATBCardTableLoggingModRefBS::invalidate(MemRegion mr, bool whole_heap) {
   155   volatile jbyte* byte = byte_for(mr.start());
   156   jbyte* last_byte = byte_for(mr.last());
   157   Thread* thr = Thread::current();
   158   if (whole_heap) {
   159     while (byte <= last_byte) {
   160       *byte = dirty_card;
   161       byte++;
   162     }
   163   } else {
   164     // skip all consecutive young cards
   165     for (; byte <= last_byte && *byte == g1_young_gen; byte++);
   167     if (byte <= last_byte) {
   168       OrderAccess::storeload();
   169       // Enqueue if necessary.
   170       if (thr->is_Java_thread()) {
   171         JavaThread* jt = (JavaThread*)thr;
   172         for (; byte <= last_byte; byte++) {
   173           if (*byte == g1_young_gen) {
   174             continue;
   175           }
   176           if (*byte != dirty_card) {
   177             *byte = dirty_card;
   178             jt->dirty_card_queue().enqueue(byte);
   179           }
   180         }
   181       } else {
   182         MutexLockerEx x(Shared_DirtyCardQ_lock,
   183                         Mutex::_no_safepoint_check_flag);
   184         for (; byte <= last_byte; byte++) {
   185           if (*byte == g1_young_gen) {
   186             continue;
   187           }
   188           if (*byte != dirty_card) {
   189             *byte = dirty_card;
   190             _dcqs.shared_dirty_card_queue()->enqueue(byte);
   191           }
   192         }
   193       }
   194     }
   195   }
   196 }

mercurial