src/share/vm/memory/cardTableModRefBS.cpp

Tue, 10 May 2011 00:33:21 -0700

author
ysr
date
Tue, 10 May 2011 00:33:21 -0700
changeset 2889
fc2b798ab316
parent 2849
063382f9b575
child 3294
bca17e38de00
permissions
-rw-r--r--

6883834: ParNew: assert(!_g->to()->is_in_reserved(obj),"Scanning field twice?") with LargeObjects tests
Summary: Fixed process_chunk_boundaries(), used for parallel card scanning when using ParNew/CMS, so as to prevent double-scanning, or worse, non-scanning of imprecisely marked objects exceeding parallel chunk size. Made some sizing parameters for parallel card scanning diagnostic, disabled ParallelGCRetainPLAB, and elaborated and clarified some comments.
Reviewed-by: stefank, johnc

     1 /*
     2  * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "memory/allocation.inline.hpp"
    27 #include "memory/cardTableModRefBS.hpp"
    28 #include "memory/cardTableRS.hpp"
    29 #include "memory/sharedHeap.hpp"
    30 #include "memory/space.hpp"
    31 #include "memory/space.inline.hpp"
    32 #include "memory/universe.hpp"
    33 #include "runtime/java.hpp"
    34 #include "runtime/mutexLocker.hpp"
    35 #include "runtime/virtualspace.hpp"
    36 #ifdef COMPILER1
    37 #include "c1/c1_LIR.hpp"
    38 #include "c1/c1_LIRGenerator.hpp"
    39 #endif
    41 // This kind of "BarrierSet" allows a "CollectedHeap" to detect and
    42 // enumerate ref fields that have been modified (since the last
    43 // enumeration.)
    45 size_t CardTableModRefBS::cards_required(size_t covered_words)
    46 {
    47   // Add one for a guard card, used to detect errors.
    48   const size_t words = align_size_up(covered_words, card_size_in_words);
    49   return words / card_size_in_words + 1;
    50 }
    52 size_t CardTableModRefBS::compute_byte_map_size()
    53 {
    54   assert(_guard_index == cards_required(_whole_heap.word_size()) - 1,
    55                                         "unitialized, check declaration order");
    56   assert(_page_size != 0, "unitialized, check declaration order");
    57   const size_t granularity = os::vm_allocation_granularity();
    58   return align_size_up(_guard_index + 1, MAX2(_page_size, granularity));
    59 }
    61 CardTableModRefBS::CardTableModRefBS(MemRegion whole_heap,
    62                                      int max_covered_regions):
    63   ModRefBarrierSet(max_covered_regions),
    64   _whole_heap(whole_heap),
    65   _guard_index(cards_required(whole_heap.word_size()) - 1),
    66   _last_valid_index(_guard_index - 1),
    67   _page_size(os::vm_page_size()),
    68   _byte_map_size(compute_byte_map_size())
    69 {
    70   _kind = BarrierSet::CardTableModRef;
    72   HeapWord* low_bound  = _whole_heap.start();
    73   HeapWord* high_bound = _whole_heap.end();
    74   assert((uintptr_t(low_bound)  & (card_size - 1))  == 0, "heap must start at card boundary");
    75   assert((uintptr_t(high_bound) & (card_size - 1))  == 0, "heap must end at card boundary");
    77   assert(card_size <= 512, "card_size must be less than 512"); // why?
    79   _covered   = new MemRegion[max_covered_regions];
    80   _committed = new MemRegion[max_covered_regions];
    81   if (_covered == NULL || _committed == NULL)
    82     vm_exit_during_initialization("couldn't alloc card table covered region set.");
    83   int i;
    84   for (i = 0; i < max_covered_regions; i++) {
    85     _covered[i].set_word_size(0);
    86     _committed[i].set_word_size(0);
    87   }
    88   _cur_covered_regions = 0;
    90   const size_t rs_align = _page_size == (size_t) os::vm_page_size() ? 0 :
    91     MAX2(_page_size, (size_t) os::vm_allocation_granularity());
    92   ReservedSpace heap_rs(_byte_map_size, rs_align, false);
    93   os::trace_page_sizes("card table", _guard_index + 1, _guard_index + 1,
    94                        _page_size, heap_rs.base(), heap_rs.size());
    95   if (!heap_rs.is_reserved()) {
    96     vm_exit_during_initialization("Could not reserve enough space for the "
    97                                   "card marking array");
    98   }
   100   // The assember store_check code will do an unsigned shift of the oop,
   101   // then add it to byte_map_base, i.e.
   102   //
   103   //   _byte_map = byte_map_base + (uintptr_t(low_bound) >> card_shift)
   104   _byte_map = (jbyte*) heap_rs.base();
   105   byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift);
   106   assert(byte_for(low_bound) == &_byte_map[0], "Checking start of map");
   107   assert(byte_for(high_bound-1) <= &_byte_map[_last_valid_index], "Checking end of map");
   109   jbyte* guard_card = &_byte_map[_guard_index];
   110   uintptr_t guard_page = align_size_down((uintptr_t)guard_card, _page_size);
   111   _guard_region = MemRegion((HeapWord*)guard_page, _page_size);
   112   if (!os::commit_memory((char*)guard_page, _page_size, _page_size)) {
   113     // Do better than this for Merlin
   114     vm_exit_out_of_memory(_page_size, "card table last card");
   115   }
   116   *guard_card = last_card;
   118    _lowest_non_clean =
   119     NEW_C_HEAP_ARRAY(CardArr, max_covered_regions);
   120   _lowest_non_clean_chunk_size =
   121     NEW_C_HEAP_ARRAY(size_t, max_covered_regions);
   122   _lowest_non_clean_base_chunk_index =
   123     NEW_C_HEAP_ARRAY(uintptr_t, max_covered_regions);
   124   _last_LNC_resizing_collection =
   125     NEW_C_HEAP_ARRAY(int, max_covered_regions);
   126   if (_lowest_non_clean == NULL
   127       || _lowest_non_clean_chunk_size == NULL
   128       || _lowest_non_clean_base_chunk_index == NULL
   129       || _last_LNC_resizing_collection == NULL)
   130     vm_exit_during_initialization("couldn't allocate an LNC array.");
   131   for (i = 0; i < max_covered_regions; i++) {
   132     _lowest_non_clean[i] = NULL;
   133     _lowest_non_clean_chunk_size[i] = 0;
   134     _last_LNC_resizing_collection[i] = -1;
   135   }
   137   if (TraceCardTableModRefBS) {
   138     gclog_or_tty->print_cr("CardTableModRefBS::CardTableModRefBS: ");
   139     gclog_or_tty->print_cr("  "
   140                   "  &_byte_map[0]: " INTPTR_FORMAT
   141                   "  &_byte_map[_last_valid_index]: " INTPTR_FORMAT,
   142                   &_byte_map[0],
   143                   &_byte_map[_last_valid_index]);
   144     gclog_or_tty->print_cr("  "
   145                   "  byte_map_base: " INTPTR_FORMAT,
   146                   byte_map_base);
   147   }
   148 }
   150 int CardTableModRefBS::find_covering_region_by_base(HeapWord* base) {
   151   int i;
   152   for (i = 0; i < _cur_covered_regions; i++) {
   153     if (_covered[i].start() == base) return i;
   154     if (_covered[i].start() > base) break;
   155   }
   156   // If we didn't find it, create a new one.
   157   assert(_cur_covered_regions < _max_covered_regions,
   158          "too many covered regions");
   159   // Move the ones above up, to maintain sorted order.
   160   for (int j = _cur_covered_regions; j > i; j--) {
   161     _covered[j] = _covered[j-1];
   162     _committed[j] = _committed[j-1];
   163   }
   164   int res = i;
   165   _cur_covered_regions++;
   166   _covered[res].set_start(base);
   167   _covered[res].set_word_size(0);
   168   jbyte* ct_start = byte_for(base);
   169   uintptr_t ct_start_aligned = align_size_down((uintptr_t)ct_start, _page_size);
   170   _committed[res].set_start((HeapWord*)ct_start_aligned);
   171   _committed[res].set_word_size(0);
   172   return res;
   173 }
   175 int CardTableModRefBS::find_covering_region_containing(HeapWord* addr) {
   176   for (int i = 0; i < _cur_covered_regions; i++) {
   177     if (_covered[i].contains(addr)) {
   178       return i;
   179     }
   180   }
   181   assert(0, "address outside of heap?");
   182   return -1;
   183 }
   185 HeapWord* CardTableModRefBS::largest_prev_committed_end(int ind) const {
   186   HeapWord* max_end = NULL;
   187   for (int j = 0; j < ind; j++) {
   188     HeapWord* this_end = _committed[j].end();
   189     if (this_end > max_end) max_end = this_end;
   190   }
   191   return max_end;
   192 }
   194 MemRegion CardTableModRefBS::committed_unique_to_self(int self,
   195                                                       MemRegion mr) const {
   196   MemRegion result = mr;
   197   for (int r = 0; r < _cur_covered_regions; r += 1) {
   198     if (r != self) {
   199       result = result.minus(_committed[r]);
   200     }
   201   }
   202   // Never include the guard page.
   203   result = result.minus(_guard_region);
   204   return result;
   205 }
   207 void CardTableModRefBS::resize_covered_region(MemRegion new_region) {
   208   // We don't change the start of a region, only the end.
   209   assert(_whole_heap.contains(new_region),
   210            "attempt to cover area not in reserved area");
   211   debug_only(verify_guard();)
   212   // collided is true if the expansion would push into another committed region
   213   debug_only(bool collided = false;)
   214   int const ind = find_covering_region_by_base(new_region.start());
   215   MemRegion const old_region = _covered[ind];
   216   assert(old_region.start() == new_region.start(), "just checking");
   217   if (new_region.word_size() != old_region.word_size()) {
   218     // Commit new or uncommit old pages, if necessary.
   219     MemRegion cur_committed = _committed[ind];
   220     // Extend the end of this _commited region
   221     // to cover the end of any lower _committed regions.
   222     // This forms overlapping regions, but never interior regions.
   223     HeapWord* const max_prev_end = largest_prev_committed_end(ind);
   224     if (max_prev_end > cur_committed.end()) {
   225       cur_committed.set_end(max_prev_end);
   226     }
   227     // Align the end up to a page size (starts are already aligned).
   228     jbyte* const new_end = byte_after(new_region.last());
   229     HeapWord* new_end_aligned =
   230       (HeapWord*) align_size_up((uintptr_t)new_end, _page_size);
   231     assert(new_end_aligned >= (HeapWord*) new_end,
   232            "align up, but less");
   233     // Check the other regions (excludes "ind") to ensure that
   234     // the new_end_aligned does not intrude onto the committed
   235     // space of another region.
   236     int ri = 0;
   237     for (ri = 0; ri < _cur_covered_regions; ri++) {
   238       if (ri != ind) {
   239         if (_committed[ri].contains(new_end_aligned)) {
   240           // The prior check included in the assert
   241           // (new_end_aligned >= _committed[ri].start())
   242           // is redundant with the "contains" test.
   243           // Any region containing the new end
   244           // should start at or beyond the region found (ind)
   245           // for the new end (committed regions are not expected to
   246           // be proper subsets of other committed regions).
   247           assert(_committed[ri].start() >= _committed[ind].start(),
   248                  "New end of committed region is inconsistent");
   249           new_end_aligned = _committed[ri].start();
   250           // new_end_aligned can be equal to the start of its
   251           // committed region (i.e., of "ind") if a second
   252           // region following "ind" also start at the same location
   253           // as "ind".
   254           assert(new_end_aligned >= _committed[ind].start(),
   255             "New end of committed region is before start");
   256           debug_only(collided = true;)
   257           // Should only collide with 1 region
   258           break;
   259         }
   260       }
   261     }
   262 #ifdef ASSERT
   263     for (++ri; ri < _cur_covered_regions; ri++) {
   264       assert(!_committed[ri].contains(new_end_aligned),
   265         "New end of committed region is in a second committed region");
   266     }
   267 #endif
   268     // The guard page is always committed and should not be committed over.
   269     // "guarded" is used for assertion checking below and recalls the fact
   270     // that the would-be end of the new committed region would have
   271     // penetrated the guard page.
   272     HeapWord* new_end_for_commit = new_end_aligned;
   274     DEBUG_ONLY(bool guarded = false;)
   275     if (new_end_for_commit > _guard_region.start()) {
   276       new_end_for_commit = _guard_region.start();
   277       DEBUG_ONLY(guarded = true;)
   278     }
   280     if (new_end_for_commit > cur_committed.end()) {
   281       // Must commit new pages.
   282       MemRegion const new_committed =
   283         MemRegion(cur_committed.end(), new_end_for_commit);
   285       assert(!new_committed.is_empty(), "Region should not be empty here");
   286       if (!os::commit_memory((char*)new_committed.start(),
   287                              new_committed.byte_size(), _page_size)) {
   288         // Do better than this for Merlin
   289         vm_exit_out_of_memory(new_committed.byte_size(),
   290                 "card table expansion");
   291       }
   292     // Use new_end_aligned (as opposed to new_end_for_commit) because
   293     // the cur_committed region may include the guard region.
   294     } else if (new_end_aligned < cur_committed.end()) {
   295       // Must uncommit pages.
   296       MemRegion const uncommit_region =
   297         committed_unique_to_self(ind, MemRegion(new_end_aligned,
   298                                                 cur_committed.end()));
   299       if (!uncommit_region.is_empty()) {
   300         // It is not safe to uncommit cards if the boundary between
   301         // the generations is moving.  A shrink can uncommit cards
   302         // owned by generation A but being used by generation B.
   303         if (!UseAdaptiveGCBoundary) {
   304           if (!os::uncommit_memory((char*)uncommit_region.start(),
   305                                    uncommit_region.byte_size())) {
   306             assert(false, "Card table contraction failed");
   307             // The call failed so don't change the end of the
   308             // committed region.  This is better than taking the
   309             // VM down.
   310             new_end_aligned = _committed[ind].end();
   311           }
   312         } else {
   313           new_end_aligned = _committed[ind].end();
   314         }
   315       }
   316     }
   317     // In any case, we can reset the end of the current committed entry.
   318     _committed[ind].set_end(new_end_aligned);
   320 #ifdef ASSERT
   321     // Check that the last card in the new region is committed according
   322     // to the tables.
   323     bool covered = false;
   324     for (int cr = 0; cr < _cur_covered_regions; cr++) {
   325       if (_committed[cr].contains(new_end - 1)) {
   326         covered = true;
   327         break;
   328       }
   329     }
   330     assert(covered, "Card for end of new region not committed");
   331 #endif
   333     // The default of 0 is not necessarily clean cards.
   334     jbyte* entry;
   335     if (old_region.last() < _whole_heap.start()) {
   336       entry = byte_for(_whole_heap.start());
   337     } else {
   338       entry = byte_after(old_region.last());
   339     }
   340     assert(index_for(new_region.last()) <  _guard_index,
   341       "The guard card will be overwritten");
   342     // This line commented out cleans the newly expanded region and
   343     // not the aligned up expanded region.
   344     // jbyte* const end = byte_after(new_region.last());
   345     jbyte* const end = (jbyte*) new_end_for_commit;
   346     assert((end >= byte_after(new_region.last())) || collided || guarded,
   347       "Expect to be beyond new region unless impacting another region");
   348     // do nothing if we resized downward.
   349 #ifdef ASSERT
   350     for (int ri = 0; ri < _cur_covered_regions; ri++) {
   351       if (ri != ind) {
   352         // The end of the new committed region should not
   353         // be in any existing region unless it matches
   354         // the start of the next region.
   355         assert(!_committed[ri].contains(end) ||
   356                (_committed[ri].start() == (HeapWord*) end),
   357                "Overlapping committed regions");
   358       }
   359     }
   360 #endif
   361     if (entry < end) {
   362       memset(entry, clean_card, pointer_delta(end, entry, sizeof(jbyte)));
   363     }
   364   }
   365   // In any case, the covered size changes.
   366   _covered[ind].set_word_size(new_region.word_size());
   367   if (TraceCardTableModRefBS) {
   368     gclog_or_tty->print_cr("CardTableModRefBS::resize_covered_region: ");
   369     gclog_or_tty->print_cr("  "
   370                   "  _covered[%d].start(): " INTPTR_FORMAT
   371                   "  _covered[%d].last(): " INTPTR_FORMAT,
   372                   ind, _covered[ind].start(),
   373                   ind, _covered[ind].last());
   374     gclog_or_tty->print_cr("  "
   375                   "  _committed[%d].start(): " INTPTR_FORMAT
   376                   "  _committed[%d].last(): " INTPTR_FORMAT,
   377                   ind, _committed[ind].start(),
   378                   ind, _committed[ind].last());
   379     gclog_or_tty->print_cr("  "
   380                   "  byte_for(start): " INTPTR_FORMAT
   381                   "  byte_for(last): " INTPTR_FORMAT,
   382                   byte_for(_covered[ind].start()),
   383                   byte_for(_covered[ind].last()));
   384     gclog_or_tty->print_cr("  "
   385                   "  addr_for(start): " INTPTR_FORMAT
   386                   "  addr_for(last): " INTPTR_FORMAT,
   387                   addr_for((jbyte*) _committed[ind].start()),
   388                   addr_for((jbyte*) _committed[ind].last()));
   389   }
   390   // Touch the last card of the covered region to show that it
   391   // is committed (or SEGV).
   392   debug_only(*byte_for(_covered[ind].last());)
   393   debug_only(verify_guard();)
   394 }
   396 // Note that these versions are precise!  The scanning code has to handle the
   397 // fact that the write barrier may be either precise or imprecise.
   399 void CardTableModRefBS::write_ref_field_work(void* field, oop newVal) {
   400   inline_write_ref_field(field, newVal);
   401 }
   403 /*
   404    Claimed and deferred bits are used together in G1 during the evacuation
   405    pause. These bits can have the following state transitions:
   406    1. The claimed bit can be put over any other card state. Except that
   407       the "dirty -> dirty and claimed" transition is checked for in
   408       G1 code and is not used.
   409    2. Deferred bit can be set only if the previous state of the card
   410       was either clean or claimed. mark_card_deferred() is wait-free.
   411       We do not care if the operation is be successful because if
   412       it does not it will only result in duplicate entry in the update
   413       buffer because of the "cache-miss". So it's not worth spinning.
   414  */
   417 bool CardTableModRefBS::claim_card(size_t card_index) {
   418   jbyte val = _byte_map[card_index];
   419   assert(val != dirty_card_val(), "Shouldn't claim a dirty card");
   420   while (val == clean_card_val() ||
   421          (val & (clean_card_mask_val() | claimed_card_val())) != claimed_card_val()) {
   422     jbyte new_val = val;
   423     if (val == clean_card_val()) {
   424       new_val = (jbyte)claimed_card_val();
   425     } else {
   426       new_val = val | (jbyte)claimed_card_val();
   427     }
   428     jbyte res = Atomic::cmpxchg(new_val, &_byte_map[card_index], val);
   429     if (res == val) {
   430       return true;
   431     }
   432     val = res;
   433   }
   434   return false;
   435 }
   437 bool CardTableModRefBS::mark_card_deferred(size_t card_index) {
   438   jbyte val = _byte_map[card_index];
   439   // It's already processed
   440   if ((val & (clean_card_mask_val() | deferred_card_val())) == deferred_card_val()) {
   441     return false;
   442   }
   443   // Cached bit can be installed either on a clean card or on a claimed card.
   444   jbyte new_val = val;
   445   if (val == clean_card_val()) {
   446     new_val = (jbyte)deferred_card_val();
   447   } else {
   448     if (val & claimed_card_val()) {
   449       new_val = val | (jbyte)deferred_card_val();
   450     }
   451   }
   452   if (new_val != val) {
   453     Atomic::cmpxchg(new_val, &_byte_map[card_index], val);
   454   }
   455   return true;
   456 }
   458 void CardTableModRefBS::non_clean_card_iterate_possibly_parallel(Space* sp,
   459                                                                  MemRegion mr,
   460                                                                  OopsInGenClosure* cl,
   461                                                                  CardTableRS* ct) {
   462   if (!mr.is_empty()) {
   463     int n_threads = SharedHeap::heap()->n_par_threads();
   464     if (n_threads > 0) {
   465 #ifndef SERIALGC
   466       non_clean_card_iterate_parallel_work(sp, mr, cl, ct, n_threads);
   467 #else  // SERIALGC
   468       fatal("Parallel gc not supported here.");
   469 #endif // SERIALGC
   470     } else {
   471       // We do not call the non_clean_card_iterate_serial() version below because
   472       // we want to clear the cards (which non_clean_card_iterate_serial() does not
   473       // do for us): clear_cl here does the work of finding contiguous dirty ranges
   474       // of cards to process and clear.
   476       DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, precision(),
   477                                                        cl->gen_boundary());
   478       ClearNoncleanCardWrapper clear_cl(dcto_cl, ct);
   480       clear_cl.do_MemRegion(mr);
   481     }
   482   }
   483 }
   485 // The iterator itself is not MT-aware, but
   486 // MT-aware callers and closures can use this to
   487 // accomplish dirty card iteration in parallel. The
   488 // iterator itself does not clear the dirty cards, or
   489 // change their values in any manner.
   490 void CardTableModRefBS::non_clean_card_iterate_serial(MemRegion mr,
   491                                                       MemRegionClosure* cl) {
   492   for (int i = 0; i < _cur_covered_regions; i++) {
   493     MemRegion mri = mr.intersection(_covered[i]);
   494     if (mri.word_size() > 0) {
   495       jbyte* cur_entry = byte_for(mri.last());
   496       jbyte* limit = byte_for(mri.start());
   497       while (cur_entry >= limit) {
   498         jbyte* next_entry = cur_entry - 1;
   499         if (*cur_entry != clean_card) {
   500           size_t non_clean_cards = 1;
   501           // Should the next card be included in this range of dirty cards.
   502           while (next_entry >= limit && *next_entry != clean_card) {
   503             non_clean_cards++;
   504             cur_entry = next_entry;
   505             next_entry--;
   506           }
   507           // The memory region may not be on a card boundary.  So that
   508           // objects beyond the end of the region are not processed, make
   509           // cur_cards precise with regard to the end of the memory region.
   510           MemRegion cur_cards(addr_for(cur_entry),
   511                               non_clean_cards * card_size_in_words);
   512           MemRegion dirty_region = cur_cards.intersection(mri);
   513           cl->do_MemRegion(dirty_region);
   514         }
   515         cur_entry = next_entry;
   516       }
   517     }
   518   }
   519 }
   521 void CardTableModRefBS::dirty_MemRegion(MemRegion mr) {
   522   assert((HeapWord*)align_size_down((uintptr_t)mr.start(), HeapWordSize) == mr.start(), "Unaligned start");
   523   assert((HeapWord*)align_size_up  ((uintptr_t)mr.end(),   HeapWordSize) == mr.end(),   "Unaligned end"  );
   524   jbyte* cur  = byte_for(mr.start());
   525   jbyte* last = byte_after(mr.last());
   526   while (cur < last) {
   527     *cur = dirty_card;
   528     cur++;
   529   }
   530 }
   532 void CardTableModRefBS::invalidate(MemRegion mr, bool whole_heap) {
   533   assert((HeapWord*)align_size_down((uintptr_t)mr.start(), HeapWordSize) == mr.start(), "Unaligned start");
   534   assert((HeapWord*)align_size_up  ((uintptr_t)mr.end(),   HeapWordSize) == mr.end(),   "Unaligned end"  );
   535   for (int i = 0; i < _cur_covered_regions; i++) {
   536     MemRegion mri = mr.intersection(_covered[i]);
   537     if (!mri.is_empty()) dirty_MemRegion(mri);
   538   }
   539 }
   541 void CardTableModRefBS::clear_MemRegion(MemRegion mr) {
   542   // Be conservative: only clean cards entirely contained within the
   543   // region.
   544   jbyte* cur;
   545   if (mr.start() == _whole_heap.start()) {
   546     cur = byte_for(mr.start());
   547   } else {
   548     assert(mr.start() > _whole_heap.start(), "mr is not covered.");
   549     cur = byte_after(mr.start() - 1);
   550   }
   551   jbyte* last = byte_after(mr.last());
   552   memset(cur, clean_card, pointer_delta(last, cur, sizeof(jbyte)));
   553 }
   555 void CardTableModRefBS::clear(MemRegion mr) {
   556   for (int i = 0; i < _cur_covered_regions; i++) {
   557     MemRegion mri = mr.intersection(_covered[i]);
   558     if (!mri.is_empty()) clear_MemRegion(mri);
   559   }
   560 }
   562 void CardTableModRefBS::dirty(MemRegion mr) {
   563   jbyte* first = byte_for(mr.start());
   564   jbyte* last  = byte_after(mr.last());
   565   memset(first, dirty_card, last-first);
   566 }
   568 // Unlike several other card table methods, dirty_card_iterate()
   569 // iterates over dirty cards ranges in increasing address order.
   570 void CardTableModRefBS::dirty_card_iterate(MemRegion mr,
   571                                            MemRegionClosure* cl) {
   572   for (int i = 0; i < _cur_covered_regions; i++) {
   573     MemRegion mri = mr.intersection(_covered[i]);
   574     if (!mri.is_empty()) {
   575       jbyte *cur_entry, *next_entry, *limit;
   576       for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last());
   577            cur_entry <= limit;
   578            cur_entry  = next_entry) {
   579         next_entry = cur_entry + 1;
   580         if (*cur_entry == dirty_card) {
   581           size_t dirty_cards;
   582           // Accumulate maximal dirty card range, starting at cur_entry
   583           for (dirty_cards = 1;
   584                next_entry <= limit && *next_entry == dirty_card;
   585                dirty_cards++, next_entry++);
   586           MemRegion cur_cards(addr_for(cur_entry),
   587                               dirty_cards*card_size_in_words);
   588           cl->do_MemRegion(cur_cards);
   589         }
   590       }
   591     }
   592   }
   593 }
   595 MemRegion CardTableModRefBS::dirty_card_range_after_reset(MemRegion mr,
   596                                                           bool reset,
   597                                                           int reset_val) {
   598   for (int i = 0; i < _cur_covered_regions; i++) {
   599     MemRegion mri = mr.intersection(_covered[i]);
   600     if (!mri.is_empty()) {
   601       jbyte* cur_entry, *next_entry, *limit;
   602       for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last());
   603            cur_entry <= limit;
   604            cur_entry  = next_entry) {
   605         next_entry = cur_entry + 1;
   606         if (*cur_entry == dirty_card) {
   607           size_t dirty_cards;
   608           // Accumulate maximal dirty card range, starting at cur_entry
   609           for (dirty_cards = 1;
   610                next_entry <= limit && *next_entry == dirty_card;
   611                dirty_cards++, next_entry++);
   612           MemRegion cur_cards(addr_for(cur_entry),
   613                               dirty_cards*card_size_in_words);
   614           if (reset) {
   615             for (size_t i = 0; i < dirty_cards; i++) {
   616               cur_entry[i] = reset_val;
   617             }
   618           }
   619           return cur_cards;
   620         }
   621       }
   622     }
   623   }
   624   return MemRegion(mr.end(), mr.end());
   625 }
   627 // Set all the dirty cards in the given region to "precleaned" state.
   628 void CardTableModRefBS::preclean_dirty_cards(MemRegion mr) {
   629   for (int i = 0; i < _cur_covered_regions; i++) {
   630     MemRegion mri = mr.intersection(_covered[i]);
   631     if (!mri.is_empty()) {
   632       jbyte *cur_entry, *limit;
   633       for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last());
   634            cur_entry <= limit;
   635            cur_entry++) {
   636         if (*cur_entry == dirty_card) {
   637           *cur_entry = precleaned_card;
   638         }
   639       }
   640     }
   641   }
   642 }
   644 uintx CardTableModRefBS::ct_max_alignment_constraint() {
   645   return card_size * os::vm_page_size();
   646 }
   648 void CardTableModRefBS::verify_guard() {
   649   // For product build verification
   650   guarantee(_byte_map[_guard_index] == last_card,
   651             "card table guard has been modified");
   652 }
   654 void CardTableModRefBS::verify() {
   655   verify_guard();
   656 }
   658 #ifndef PRODUCT
   659 void CardTableModRefBS::verify_region(MemRegion mr,
   660                                       jbyte val, bool val_equals) {
   661   jbyte* start    = byte_for(mr.start());
   662   jbyte* end      = byte_for(mr.last());
   663   bool   failures = false;
   664   for (jbyte* curr = start; curr <= end; ++curr) {
   665     jbyte curr_val = *curr;
   666     bool failed = (val_equals) ? (curr_val != val) : (curr_val == val);
   667     if (failed) {
   668       if (!failures) {
   669         tty->cr();
   670         tty->print_cr("== CT verification failed: ["PTR_FORMAT","PTR_FORMAT"]");
   671         tty->print_cr("==   %sexpecting value: %d",
   672                       (val_equals) ? "" : "not ", val);
   673         failures = true;
   674       }
   675       tty->print_cr("==   card "PTR_FORMAT" ["PTR_FORMAT","PTR_FORMAT"], "
   676                     "val: %d", curr, addr_for(curr),
   677                     (HeapWord*) (((size_t) addr_for(curr)) + card_size),
   678                     (int) curr_val);
   679     }
   680   }
   681   guarantee(!failures, "there should not have been any failures");
   682 }
   684 void CardTableModRefBS::verify_not_dirty_region(MemRegion mr) {
   685   verify_region(mr, dirty_card, false /* val_equals */);
   686 }
   688 void CardTableModRefBS::verify_dirty_region(MemRegion mr) {
   689   verify_region(mr, dirty_card, true /* val_equals */);
   690 }
   691 #endif
   693 bool CardTableModRefBSForCTRS::card_will_be_scanned(jbyte cv) {
   694   return
   695     CardTableModRefBS::card_will_be_scanned(cv) ||
   696     _rs->is_prev_nonclean_card_val(cv);
   697 };
   699 bool CardTableModRefBSForCTRS::card_may_have_been_dirty(jbyte cv) {
   700   return
   701     cv != clean_card &&
   702     (CardTableModRefBS::card_may_have_been_dirty(cv) ||
   703      CardTableRS::youngergen_may_have_been_dirty(cv));
   704 };

mercurial