src/share/vm/memory/cardTableModRefBS.cpp

Fri, 25 Jan 2013 15:06:18 -0500

author
acorn
date
Fri, 25 Jan 2013 15:06:18 -0500
changeset 4497
16fb9f942703
parent 4153
b9a9ed0f8eeb
child 4542
db9981fd3124
permissions
-rw-r--r--

6479360: PrintClassHistogram improvements
Summary: jcmd <pid> GC.class_stats (UnlockDiagnosticVMOptions)
Reviewed-by: coleenp, hseigel, sla, acorn
Contributed-by: ioi.lam@oracle.com

     1 /*
     2  * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "memory/allocation.inline.hpp"
    27 #include "memory/cardTableModRefBS.hpp"
    28 #include "memory/cardTableRS.hpp"
    29 #include "memory/sharedHeap.hpp"
    30 #include "memory/space.hpp"
    31 #include "memory/space.inline.hpp"
    32 #include "memory/universe.hpp"
    33 #include "runtime/java.hpp"
    34 #include "runtime/mutexLocker.hpp"
    35 #include "runtime/virtualspace.hpp"
    36 #include "services/memTracker.hpp"
    37 #ifdef COMPILER1
    38 #include "c1/c1_LIR.hpp"
    39 #include "c1/c1_LIRGenerator.hpp"
    40 #endif
    42 // This kind of "BarrierSet" allows a "CollectedHeap" to detect and
    43 // enumerate ref fields that have been modified (since the last
    44 // enumeration.)
    46 size_t CardTableModRefBS::cards_required(size_t covered_words)
    47 {
    48   // Add one for a guard card, used to detect errors.
    49   const size_t words = align_size_up(covered_words, card_size_in_words);
    50   return words / card_size_in_words + 1;
    51 }
    53 size_t CardTableModRefBS::compute_byte_map_size()
    54 {
    55   assert(_guard_index == cards_required(_whole_heap.word_size()) - 1,
    56                                         "unitialized, check declaration order");
    57   assert(_page_size != 0, "unitialized, check declaration order");
    58   const size_t granularity = os::vm_allocation_granularity();
    59   return align_size_up(_guard_index + 1, MAX2(_page_size, granularity));
    60 }
    62 CardTableModRefBS::CardTableModRefBS(MemRegion whole_heap,
    63                                      int max_covered_regions):
    64   ModRefBarrierSet(max_covered_regions),
    65   _whole_heap(whole_heap),
    66   _guard_index(cards_required(whole_heap.word_size()) - 1),
    67   _last_valid_index(_guard_index - 1),
    68   _page_size(os::vm_page_size()),
    69   _byte_map_size(compute_byte_map_size())
    70 {
    71   _kind = BarrierSet::CardTableModRef;
    73   HeapWord* low_bound  = _whole_heap.start();
    74   HeapWord* high_bound = _whole_heap.end();
    75   assert((uintptr_t(low_bound)  & (card_size - 1))  == 0, "heap must start at card boundary");
    76   assert((uintptr_t(high_bound) & (card_size - 1))  == 0, "heap must end at card boundary");
    78   assert(card_size <= 512, "card_size must be less than 512"); // why?
    80   _covered   = new MemRegion[max_covered_regions];
    81   _committed = new MemRegion[max_covered_regions];
    82   if (_covered == NULL || _committed == NULL)
    83     vm_exit_during_initialization("couldn't alloc card table covered region set.");
    84   int i;
    85   for (i = 0; i < max_covered_regions; i++) {
    86     _covered[i].set_word_size(0);
    87     _committed[i].set_word_size(0);
    88   }
    89   _cur_covered_regions = 0;
    91   const size_t rs_align = _page_size == (size_t) os::vm_page_size() ? 0 :
    92     MAX2(_page_size, (size_t) os::vm_allocation_granularity());
    93   ReservedSpace heap_rs(_byte_map_size, rs_align, false);
    95   MemTracker::record_virtual_memory_type((address)heap_rs.base(), mtGC);
    97   os::trace_page_sizes("card table", _guard_index + 1, _guard_index + 1,
    98                        _page_size, heap_rs.base(), heap_rs.size());
    99   if (!heap_rs.is_reserved()) {
   100     vm_exit_during_initialization("Could not reserve enough space for the "
   101                                   "card marking array");
   102   }
   104   // The assember store_check code will do an unsigned shift of the oop,
   105   // then add it to byte_map_base, i.e.
   106   //
   107   //   _byte_map = byte_map_base + (uintptr_t(low_bound) >> card_shift)
   108   _byte_map = (jbyte*) heap_rs.base();
   109   byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift);
   110   assert(byte_for(low_bound) == &_byte_map[0], "Checking start of map");
   111   assert(byte_for(high_bound-1) <= &_byte_map[_last_valid_index], "Checking end of map");
   113   jbyte* guard_card = &_byte_map[_guard_index];
   114   uintptr_t guard_page = align_size_down((uintptr_t)guard_card, _page_size);
   115   _guard_region = MemRegion((HeapWord*)guard_page, _page_size);
   116   if (!os::commit_memory((char*)guard_page, _page_size, _page_size)) {
   117     // Do better than this for Merlin
   118     vm_exit_out_of_memory(_page_size, "card table last card");
   119   }
   121   *guard_card = last_card;
   123    _lowest_non_clean =
   124     NEW_C_HEAP_ARRAY(CardArr, max_covered_regions, mtGC);
   125   _lowest_non_clean_chunk_size =
   126     NEW_C_HEAP_ARRAY(size_t, max_covered_regions, mtGC);
   127   _lowest_non_clean_base_chunk_index =
   128     NEW_C_HEAP_ARRAY(uintptr_t, max_covered_regions, mtGC);
   129   _last_LNC_resizing_collection =
   130     NEW_C_HEAP_ARRAY(int, max_covered_regions, mtGC);
   131   if (_lowest_non_clean == NULL
   132       || _lowest_non_clean_chunk_size == NULL
   133       || _lowest_non_clean_base_chunk_index == NULL
   134       || _last_LNC_resizing_collection == NULL)
   135     vm_exit_during_initialization("couldn't allocate an LNC array.");
   136   for (i = 0; i < max_covered_regions; i++) {
   137     _lowest_non_clean[i] = NULL;
   138     _lowest_non_clean_chunk_size[i] = 0;
   139     _last_LNC_resizing_collection[i] = -1;
   140   }
   142   if (TraceCardTableModRefBS) {
   143     gclog_or_tty->print_cr("CardTableModRefBS::CardTableModRefBS: ");
   144     gclog_or_tty->print_cr("  "
   145                   "  &_byte_map[0]: " INTPTR_FORMAT
   146                   "  &_byte_map[_last_valid_index]: " INTPTR_FORMAT,
   147                   &_byte_map[0],
   148                   &_byte_map[_last_valid_index]);
   149     gclog_or_tty->print_cr("  "
   150                   "  byte_map_base: " INTPTR_FORMAT,
   151                   byte_map_base);
   152   }
   153 }
   155 int CardTableModRefBS::find_covering_region_by_base(HeapWord* base) {
   156   int i;
   157   for (i = 0; i < _cur_covered_regions; i++) {
   158     if (_covered[i].start() == base) return i;
   159     if (_covered[i].start() > base) break;
   160   }
   161   // If we didn't find it, create a new one.
   162   assert(_cur_covered_regions < _max_covered_regions,
   163          "too many covered regions");
   164   // Move the ones above up, to maintain sorted order.
   165   for (int j = _cur_covered_regions; j > i; j--) {
   166     _covered[j] = _covered[j-1];
   167     _committed[j] = _committed[j-1];
   168   }
   169   int res = i;
   170   _cur_covered_regions++;
   171   _covered[res].set_start(base);
   172   _covered[res].set_word_size(0);
   173   jbyte* ct_start = byte_for(base);
   174   uintptr_t ct_start_aligned = align_size_down((uintptr_t)ct_start, _page_size);
   175   _committed[res].set_start((HeapWord*)ct_start_aligned);
   176   _committed[res].set_word_size(0);
   177   return res;
   178 }
   180 int CardTableModRefBS::find_covering_region_containing(HeapWord* addr) {
   181   for (int i = 0; i < _cur_covered_regions; i++) {
   182     if (_covered[i].contains(addr)) {
   183       return i;
   184     }
   185   }
   186   assert(0, "address outside of heap?");
   187   return -1;
   188 }
   190 HeapWord* CardTableModRefBS::largest_prev_committed_end(int ind) const {
   191   HeapWord* max_end = NULL;
   192   for (int j = 0; j < ind; j++) {
   193     HeapWord* this_end = _committed[j].end();
   194     if (this_end > max_end) max_end = this_end;
   195   }
   196   return max_end;
   197 }
   199 MemRegion CardTableModRefBS::committed_unique_to_self(int self,
   200                                                       MemRegion mr) const {
   201   MemRegion result = mr;
   202   for (int r = 0; r < _cur_covered_regions; r += 1) {
   203     if (r != self) {
   204       result = result.minus(_committed[r]);
   205     }
   206   }
   207   // Never include the guard page.
   208   result = result.minus(_guard_region);
   209   return result;
   210 }
   212 void CardTableModRefBS::resize_covered_region(MemRegion new_region) {
   213   // We don't change the start of a region, only the end.
   214   assert(_whole_heap.contains(new_region),
   215            "attempt to cover area not in reserved area");
   216   debug_only(verify_guard();)
   217   // collided is true if the expansion would push into another committed region
   218   debug_only(bool collided = false;)
   219   int const ind = find_covering_region_by_base(new_region.start());
   220   MemRegion const old_region = _covered[ind];
   221   assert(old_region.start() == new_region.start(), "just checking");
   222   if (new_region.word_size() != old_region.word_size()) {
   223     // Commit new or uncommit old pages, if necessary.
   224     MemRegion cur_committed = _committed[ind];
   225     // Extend the end of this _commited region
   226     // to cover the end of any lower _committed regions.
   227     // This forms overlapping regions, but never interior regions.
   228     HeapWord* const max_prev_end = largest_prev_committed_end(ind);
   229     if (max_prev_end > cur_committed.end()) {
   230       cur_committed.set_end(max_prev_end);
   231     }
   232     // Align the end up to a page size (starts are already aligned).
   233     jbyte* const new_end = byte_after(new_region.last());
   234     HeapWord* new_end_aligned =
   235       (HeapWord*) align_size_up((uintptr_t)new_end, _page_size);
   236     assert(new_end_aligned >= (HeapWord*) new_end,
   237            "align up, but less");
   238     // Check the other regions (excludes "ind") to ensure that
   239     // the new_end_aligned does not intrude onto the committed
   240     // space of another region.
   241     int ri = 0;
   242     for (ri = 0; ri < _cur_covered_regions; ri++) {
   243       if (ri != ind) {
   244         if (_committed[ri].contains(new_end_aligned)) {
   245           // The prior check included in the assert
   246           // (new_end_aligned >= _committed[ri].start())
   247           // is redundant with the "contains" test.
   248           // Any region containing the new end
   249           // should start at or beyond the region found (ind)
   250           // for the new end (committed regions are not expected to
   251           // be proper subsets of other committed regions).
   252           assert(_committed[ri].start() >= _committed[ind].start(),
   253                  "New end of committed region is inconsistent");
   254           new_end_aligned = _committed[ri].start();
   255           // new_end_aligned can be equal to the start of its
   256           // committed region (i.e., of "ind") if a second
   257           // region following "ind" also start at the same location
   258           // as "ind".
   259           assert(new_end_aligned >= _committed[ind].start(),
   260             "New end of committed region is before start");
   261           debug_only(collided = true;)
   262           // Should only collide with 1 region
   263           break;
   264         }
   265       }
   266     }
   267 #ifdef ASSERT
   268     for (++ri; ri < _cur_covered_regions; ri++) {
   269       assert(!_committed[ri].contains(new_end_aligned),
   270         "New end of committed region is in a second committed region");
   271     }
   272 #endif
   273     // The guard page is always committed and should not be committed over.
   274     // "guarded" is used for assertion checking below and recalls the fact
   275     // that the would-be end of the new committed region would have
   276     // penetrated the guard page.
   277     HeapWord* new_end_for_commit = new_end_aligned;
   279     DEBUG_ONLY(bool guarded = false;)
   280     if (new_end_for_commit > _guard_region.start()) {
   281       new_end_for_commit = _guard_region.start();
   282       DEBUG_ONLY(guarded = true;)
   283     }
   285     if (new_end_for_commit > cur_committed.end()) {
   286       // Must commit new pages.
   287       MemRegion const new_committed =
   288         MemRegion(cur_committed.end(), new_end_for_commit);
   290       assert(!new_committed.is_empty(), "Region should not be empty here");
   291       if (!os::commit_memory((char*)new_committed.start(),
   292                              new_committed.byte_size(), _page_size)) {
   293         // Do better than this for Merlin
   294         vm_exit_out_of_memory(new_committed.byte_size(),
   295                 "card table expansion");
   296       }
   297     // Use new_end_aligned (as opposed to new_end_for_commit) because
   298     // the cur_committed region may include the guard region.
   299     } else if (new_end_aligned < cur_committed.end()) {
   300       // Must uncommit pages.
   301       MemRegion const uncommit_region =
   302         committed_unique_to_self(ind, MemRegion(new_end_aligned,
   303                                                 cur_committed.end()));
   304       if (!uncommit_region.is_empty()) {
   305         // It is not safe to uncommit cards if the boundary between
   306         // the generations is moving.  A shrink can uncommit cards
   307         // owned by generation A but being used by generation B.
   308         if (!UseAdaptiveGCBoundary) {
   309           if (!os::uncommit_memory((char*)uncommit_region.start(),
   310                                    uncommit_region.byte_size())) {
   311             assert(false, "Card table contraction failed");
   312             // The call failed so don't change the end of the
   313             // committed region.  This is better than taking the
   314             // VM down.
   315             new_end_aligned = _committed[ind].end();
   316           }
   317         } else {
   318           new_end_aligned = _committed[ind].end();
   319         }
   320       }
   321     }
   322     // In any case, we can reset the end of the current committed entry.
   323     _committed[ind].set_end(new_end_aligned);
   325 #ifdef ASSERT
   326     // Check that the last card in the new region is committed according
   327     // to the tables.
   328     bool covered = false;
   329     for (int cr = 0; cr < _cur_covered_regions; cr++) {
   330       if (_committed[cr].contains(new_end - 1)) {
   331         covered = true;
   332         break;
   333       }
   334     }
   335     assert(covered, "Card for end of new region not committed");
   336 #endif
   338     // The default of 0 is not necessarily clean cards.
   339     jbyte* entry;
   340     if (old_region.last() < _whole_heap.start()) {
   341       entry = byte_for(_whole_heap.start());
   342     } else {
   343       entry = byte_after(old_region.last());
   344     }
   345     assert(index_for(new_region.last()) <  _guard_index,
   346       "The guard card will be overwritten");
   347     // This line commented out cleans the newly expanded region and
   348     // not the aligned up expanded region.
   349     // jbyte* const end = byte_after(new_region.last());
   350     jbyte* const end = (jbyte*) new_end_for_commit;
   351     assert((end >= byte_after(new_region.last())) || collided || guarded,
   352       "Expect to be beyond new region unless impacting another region");
   353     // do nothing if we resized downward.
   354 #ifdef ASSERT
   355     for (int ri = 0; ri < _cur_covered_regions; ri++) {
   356       if (ri != ind) {
   357         // The end of the new committed region should not
   358         // be in any existing region unless it matches
   359         // the start of the next region.
   360         assert(!_committed[ri].contains(end) ||
   361                (_committed[ri].start() == (HeapWord*) end),
   362                "Overlapping committed regions");
   363       }
   364     }
   365 #endif
   366     if (entry < end) {
   367       memset(entry, clean_card, pointer_delta(end, entry, sizeof(jbyte)));
   368     }
   369   }
   370   // In any case, the covered size changes.
   371   _covered[ind].set_word_size(new_region.word_size());
   372   if (TraceCardTableModRefBS) {
   373     gclog_or_tty->print_cr("CardTableModRefBS::resize_covered_region: ");
   374     gclog_or_tty->print_cr("  "
   375                   "  _covered[%d].start(): " INTPTR_FORMAT
   376                   "  _covered[%d].last(): " INTPTR_FORMAT,
   377                   ind, _covered[ind].start(),
   378                   ind, _covered[ind].last());
   379     gclog_or_tty->print_cr("  "
   380                   "  _committed[%d].start(): " INTPTR_FORMAT
   381                   "  _committed[%d].last(): " INTPTR_FORMAT,
   382                   ind, _committed[ind].start(),
   383                   ind, _committed[ind].last());
   384     gclog_or_tty->print_cr("  "
   385                   "  byte_for(start): " INTPTR_FORMAT
   386                   "  byte_for(last): " INTPTR_FORMAT,
   387                   byte_for(_covered[ind].start()),
   388                   byte_for(_covered[ind].last()));
   389     gclog_or_tty->print_cr("  "
   390                   "  addr_for(start): " INTPTR_FORMAT
   391                   "  addr_for(last): " INTPTR_FORMAT,
   392                   addr_for((jbyte*) _committed[ind].start()),
   393                   addr_for((jbyte*) _committed[ind].last()));
   394   }
   395   // Touch the last card of the covered region to show that it
   396   // is committed (or SEGV).
   397   debug_only(*byte_for(_covered[ind].last());)
   398   debug_only(verify_guard();)
   399 }
   401 // Note that these versions are precise!  The scanning code has to handle the
   402 // fact that the write barrier may be either precise or imprecise.
   404 void CardTableModRefBS::write_ref_field_work(void* field, oop newVal) {
   405   inline_write_ref_field(field, newVal);
   406 }
   408 /*
   409    Claimed and deferred bits are used together in G1 during the evacuation
   410    pause. These bits can have the following state transitions:
   411    1. The claimed bit can be put over any other card state. Except that
   412       the "dirty -> dirty and claimed" transition is checked for in
   413       G1 code and is not used.
   414    2. Deferred bit can be set only if the previous state of the card
   415       was either clean or claimed. mark_card_deferred() is wait-free.
   416       We do not care if the operation is be successful because if
   417       it does not it will only result in duplicate entry in the update
   418       buffer because of the "cache-miss". So it's not worth spinning.
   419  */
   422 bool CardTableModRefBS::claim_card(size_t card_index) {
   423   jbyte val = _byte_map[card_index];
   424   assert(val != dirty_card_val(), "Shouldn't claim a dirty card");
   425   while (val == clean_card_val() ||
   426          (val & (clean_card_mask_val() | claimed_card_val())) != claimed_card_val()) {
   427     jbyte new_val = val;
   428     if (val == clean_card_val()) {
   429       new_val = (jbyte)claimed_card_val();
   430     } else {
   431       new_val = val | (jbyte)claimed_card_val();
   432     }
   433     jbyte res = Atomic::cmpxchg(new_val, &_byte_map[card_index], val);
   434     if (res == val) {
   435       return true;
   436     }
   437     val = res;
   438   }
   439   return false;
   440 }
   442 bool CardTableModRefBS::mark_card_deferred(size_t card_index) {
   443   jbyte val = _byte_map[card_index];
   444   // It's already processed
   445   if ((val & (clean_card_mask_val() | deferred_card_val())) == deferred_card_val()) {
   446     return false;
   447   }
   448   // Cached bit can be installed either on a clean card or on a claimed card.
   449   jbyte new_val = val;
   450   if (val == clean_card_val()) {
   451     new_val = (jbyte)deferred_card_val();
   452   } else {
   453     if (val & claimed_card_val()) {
   454       new_val = val | (jbyte)deferred_card_val();
   455     }
   456   }
   457   if (new_val != val) {
   458     Atomic::cmpxchg(new_val, &_byte_map[card_index], val);
   459   }
   460   return true;
   461 }
   463 void CardTableModRefBS::non_clean_card_iterate_possibly_parallel(Space* sp,
   464                                                                  MemRegion mr,
   465                                                                  OopsInGenClosure* cl,
   466                                                                  CardTableRS* ct) {
   467   if (!mr.is_empty()) {
   468     // Caller (process_strong_roots()) claims that all GC threads
   469     // execute this call.  With UseDynamicNumberOfGCThreads now all
   470     // active GC threads execute this call.  The number of active GC
   471     // threads needs to be passed to par_non_clean_card_iterate_work()
   472     // to get proper partitioning and termination.
   473     //
   474     // This is an example of where n_par_threads() is used instead
   475     // of workers()->active_workers().  n_par_threads can be set to 0 to
   476     // turn off parallelism.  For example when this code is called as
   477     // part of verification and SharedHeap::process_strong_roots() is being
   478     // used, then n_par_threads() may have been set to 0.  active_workers
   479     // is not overloaded with the meaning that it is a switch to disable
   480     // parallelism and so keeps the meaning of the number of
   481     // active gc workers.  If parallelism has not been shut off by
   482     // setting n_par_threads to 0, then n_par_threads should be
   483     // equal to active_workers.  When a different mechanism for shutting
   484     // off parallelism is used, then active_workers can be used in
   485     // place of n_par_threads.
   486     //  This is an example of a path where n_par_threads is
   487     // set to 0 to turn off parallism.
   488     //  [7] CardTableModRefBS::non_clean_card_iterate()
   489     //  [8] CardTableRS::younger_refs_in_space_iterate()
   490     //  [9] Generation::younger_refs_in_space_iterate()
   491     //  [10] OneContigSpaceCardGeneration::younger_refs_iterate()
   492     //  [11] CompactingPermGenGen::younger_refs_iterate()
   493     //  [12] CardTableRS::younger_refs_iterate()
   494     //  [13] SharedHeap::process_strong_roots()
   495     //  [14] G1CollectedHeap::verify()
   496     //  [15] Universe::verify()
   497     //  [16] G1CollectedHeap::do_collection_pause_at_safepoint()
   498     //
   499     int n_threads =  SharedHeap::heap()->n_par_threads();
   500     bool is_par = n_threads > 0;
   501     if (is_par) {
   502 #ifndef SERIALGC
   503       assert(SharedHeap::heap()->n_par_threads() ==
   504              SharedHeap::heap()->workers()->active_workers(), "Mismatch");
   505       non_clean_card_iterate_parallel_work(sp, mr, cl, ct, n_threads);
   506 #else  // SERIALGC
   507       fatal("Parallel gc not supported here.");
   508 #endif // SERIALGC
   509     } else {
   510       // We do not call the non_clean_card_iterate_serial() version below because
   511       // we want to clear the cards (which non_clean_card_iterate_serial() does not
   512       // do for us): clear_cl here does the work of finding contiguous dirty ranges
   513       // of cards to process and clear.
   515       DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, precision(),
   516                                                        cl->gen_boundary());
   517       ClearNoncleanCardWrapper clear_cl(dcto_cl, ct);
   519       clear_cl.do_MemRegion(mr);
   520     }
   521   }
   522 }
   524 // The iterator itself is not MT-aware, but
   525 // MT-aware callers and closures can use this to
   526 // accomplish dirty card iteration in parallel. The
   527 // iterator itself does not clear the dirty cards, or
   528 // change their values in any manner.
   529 void CardTableModRefBS::non_clean_card_iterate_serial(MemRegion mr,
   530                                                       MemRegionClosure* cl) {
   531   bool is_par = (SharedHeap::heap()->n_par_threads() > 0);
   532   assert(!is_par ||
   533           (SharedHeap::heap()->n_par_threads() ==
   534           SharedHeap::heap()->workers()->active_workers()), "Mismatch");
   535   for (int i = 0; i < _cur_covered_regions; i++) {
   536     MemRegion mri = mr.intersection(_covered[i]);
   537     if (mri.word_size() > 0) {
   538       jbyte* cur_entry = byte_for(mri.last());
   539       jbyte* limit = byte_for(mri.start());
   540       while (cur_entry >= limit) {
   541         jbyte* next_entry = cur_entry - 1;
   542         if (*cur_entry != clean_card) {
   543           size_t non_clean_cards = 1;
   544           // Should the next card be included in this range of dirty cards.
   545           while (next_entry >= limit && *next_entry != clean_card) {
   546             non_clean_cards++;
   547             cur_entry = next_entry;
   548             next_entry--;
   549           }
   550           // The memory region may not be on a card boundary.  So that
   551           // objects beyond the end of the region are not processed, make
   552           // cur_cards precise with regard to the end of the memory region.
   553           MemRegion cur_cards(addr_for(cur_entry),
   554                               non_clean_cards * card_size_in_words);
   555           MemRegion dirty_region = cur_cards.intersection(mri);
   556           cl->do_MemRegion(dirty_region);
   557         }
   558         cur_entry = next_entry;
   559       }
   560     }
   561   }
   562 }
   564 void CardTableModRefBS::dirty_MemRegion(MemRegion mr) {
   565   assert((HeapWord*)align_size_down((uintptr_t)mr.start(), HeapWordSize) == mr.start(), "Unaligned start");
   566   assert((HeapWord*)align_size_up  ((uintptr_t)mr.end(),   HeapWordSize) == mr.end(),   "Unaligned end"  );
   567   jbyte* cur  = byte_for(mr.start());
   568   jbyte* last = byte_after(mr.last());
   569   while (cur < last) {
   570     *cur = dirty_card;
   571     cur++;
   572   }
   573 }
   575 void CardTableModRefBS::invalidate(MemRegion mr, bool whole_heap) {
   576   assert((HeapWord*)align_size_down((uintptr_t)mr.start(), HeapWordSize) == mr.start(), "Unaligned start");
   577   assert((HeapWord*)align_size_up  ((uintptr_t)mr.end(),   HeapWordSize) == mr.end(),   "Unaligned end"  );
   578   for (int i = 0; i < _cur_covered_regions; i++) {
   579     MemRegion mri = mr.intersection(_covered[i]);
   580     if (!mri.is_empty()) dirty_MemRegion(mri);
   581   }
   582 }
   584 void CardTableModRefBS::clear_MemRegion(MemRegion mr) {
   585   // Be conservative: only clean cards entirely contained within the
   586   // region.
   587   jbyte* cur;
   588   if (mr.start() == _whole_heap.start()) {
   589     cur = byte_for(mr.start());
   590   } else {
   591     assert(mr.start() > _whole_heap.start(), "mr is not covered.");
   592     cur = byte_after(mr.start() - 1);
   593   }
   594   jbyte* last = byte_after(mr.last());
   595   memset(cur, clean_card, pointer_delta(last, cur, sizeof(jbyte)));
   596 }
   598 void CardTableModRefBS::clear(MemRegion mr) {
   599   for (int i = 0; i < _cur_covered_regions; i++) {
   600     MemRegion mri = mr.intersection(_covered[i]);
   601     if (!mri.is_empty()) clear_MemRegion(mri);
   602   }
   603 }
   605 void CardTableModRefBS::dirty(MemRegion mr) {
   606   jbyte* first = byte_for(mr.start());
   607   jbyte* last  = byte_after(mr.last());
   608   memset(first, dirty_card, last-first);
   609 }
   611 // Unlike several other card table methods, dirty_card_iterate()
   612 // iterates over dirty cards ranges in increasing address order.
   613 void CardTableModRefBS::dirty_card_iterate(MemRegion mr,
   614                                            MemRegionClosure* cl) {
   615   for (int i = 0; i < _cur_covered_regions; i++) {
   616     MemRegion mri = mr.intersection(_covered[i]);
   617     if (!mri.is_empty()) {
   618       jbyte *cur_entry, *next_entry, *limit;
   619       for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last());
   620            cur_entry <= limit;
   621            cur_entry  = next_entry) {
   622         next_entry = cur_entry + 1;
   623         if (*cur_entry == dirty_card) {
   624           size_t dirty_cards;
   625           // Accumulate maximal dirty card range, starting at cur_entry
   626           for (dirty_cards = 1;
   627                next_entry <= limit && *next_entry == dirty_card;
   628                dirty_cards++, next_entry++);
   629           MemRegion cur_cards(addr_for(cur_entry),
   630                               dirty_cards*card_size_in_words);
   631           cl->do_MemRegion(cur_cards);
   632         }
   633       }
   634     }
   635   }
   636 }
   638 MemRegion CardTableModRefBS::dirty_card_range_after_reset(MemRegion mr,
   639                                                           bool reset,
   640                                                           int reset_val) {
   641   for (int i = 0; i < _cur_covered_regions; i++) {
   642     MemRegion mri = mr.intersection(_covered[i]);
   643     if (!mri.is_empty()) {
   644       jbyte* cur_entry, *next_entry, *limit;
   645       for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last());
   646            cur_entry <= limit;
   647            cur_entry  = next_entry) {
   648         next_entry = cur_entry + 1;
   649         if (*cur_entry == dirty_card) {
   650           size_t dirty_cards;
   651           // Accumulate maximal dirty card range, starting at cur_entry
   652           for (dirty_cards = 1;
   653                next_entry <= limit && *next_entry == dirty_card;
   654                dirty_cards++, next_entry++);
   655           MemRegion cur_cards(addr_for(cur_entry),
   656                               dirty_cards*card_size_in_words);
   657           if (reset) {
   658             for (size_t i = 0; i < dirty_cards; i++) {
   659               cur_entry[i] = reset_val;
   660             }
   661           }
   662           return cur_cards;
   663         }
   664       }
   665     }
   666   }
   667   return MemRegion(mr.end(), mr.end());
   668 }
   670 uintx CardTableModRefBS::ct_max_alignment_constraint() {
   671   return card_size * os::vm_page_size();
   672 }
   674 void CardTableModRefBS::verify_guard() {
   675   // For product build verification
   676   guarantee(_byte_map[_guard_index] == last_card,
   677             "card table guard has been modified");
   678 }
   680 void CardTableModRefBS::verify() {
   681   verify_guard();
   682 }
   684 #ifndef PRODUCT
   685 void CardTableModRefBS::verify_region(MemRegion mr,
   686                                       jbyte val, bool val_equals) {
   687   jbyte* start    = byte_for(mr.start());
   688   jbyte* end      = byte_for(mr.last());
   689   bool   failures = false;
   690   for (jbyte* curr = start; curr <= end; ++curr) {
   691     jbyte curr_val = *curr;
   692     bool failed = (val_equals) ? (curr_val != val) : (curr_val == val);
   693     if (failed) {
   694       if (!failures) {
   695         tty->cr();
   696         tty->print_cr("== CT verification failed: ["PTR_FORMAT","PTR_FORMAT"]");
   697         tty->print_cr("==   %sexpecting value: %d",
   698                       (val_equals) ? "" : "not ", val);
   699         failures = true;
   700       }
   701       tty->print_cr("==   card "PTR_FORMAT" ["PTR_FORMAT","PTR_FORMAT"], "
   702                     "val: %d", curr, addr_for(curr),
   703                     (HeapWord*) (((size_t) addr_for(curr)) + card_size),
   704                     (int) curr_val);
   705     }
   706   }
   707   guarantee(!failures, "there should not have been any failures");
   708 }
   710 void CardTableModRefBS::verify_not_dirty_region(MemRegion mr) {
   711   verify_region(mr, dirty_card, false /* val_equals */);
   712 }
   714 void CardTableModRefBS::verify_dirty_region(MemRegion mr) {
   715   verify_region(mr, dirty_card, true /* val_equals */);
   716 }
   717 #endif
   719 void CardTableModRefBS::print_on(outputStream* st) const {
   720   st->print_cr("Card table byte_map: [" INTPTR_FORMAT "," INTPTR_FORMAT "] byte_map_base: " INTPTR_FORMAT,
   721                _byte_map, _byte_map + _byte_map_size, byte_map_base);
   722 }
   724 bool CardTableModRefBSForCTRS::card_will_be_scanned(jbyte cv) {
   725   return
   726     CardTableModRefBS::card_will_be_scanned(cv) ||
   727     _rs->is_prev_nonclean_card_val(cv);
   728 };
   730 bool CardTableModRefBSForCTRS::card_may_have_been_dirty(jbyte cv) {
   731   return
   732     cv != clean_card &&
   733     (CardTableModRefBS::card_may_have_been_dirty(cv) ||
   734      CardTableRS::youngergen_may_have_been_dirty(cv));
   735 };

mercurial