src/share/vm/memory/cardTableModRefBS.cpp

Thu, 21 Aug 2014 11:47:10 +0200

author
tschatzl
date
Thu, 21 Aug 2014 11:47:10 +0200
changeset 7051
1f1d373cd044
parent 6992
2c6ef90f030a
child 7535
7ae4e26cb1e0
child 9327
f96fcd9e1e1b
permissions
-rw-r--r--

8038423: G1: Decommit memory within heap
Summary: Allow G1 to decommit memory of arbitrary regions within the heap and their associated auxiliary data structures card table, BOT, hot card cache, and mark bitmaps.
Reviewed-by: mgerdin, brutisso, jwilhelm

     1 /*
     2  * Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "memory/allocation.inline.hpp"
    27 #include "memory/cardTableModRefBS.hpp"
    28 #include "memory/cardTableRS.hpp"
    29 #include "memory/sharedHeap.hpp"
    30 #include "memory/space.hpp"
    31 #include "memory/space.inline.hpp"
    32 #include "memory/universe.hpp"
    33 #include "runtime/java.hpp"
    34 #include "runtime/mutexLocker.hpp"
    35 #include "runtime/virtualspace.hpp"
    36 #include "services/memTracker.hpp"
    37 #include "utilities/macros.hpp"
    38 #ifdef COMPILER1
    39 #include "c1/c1_LIR.hpp"
    40 #include "c1/c1_LIRGenerator.hpp"
    41 #endif
    43 // This kind of "BarrierSet" allows a "CollectedHeap" to detect and
    44 // enumerate ref fields that have been modified (since the last
    45 // enumeration.)
    47 size_t CardTableModRefBS::compute_byte_map_size()
    48 {
    49   assert(_guard_index == cards_required(_whole_heap.word_size()) - 1,
    50                                         "unitialized, check declaration order");
    51   assert(_page_size != 0, "unitialized, check declaration order");
    52   const size_t granularity = os::vm_allocation_granularity();
    53   return align_size_up(_guard_index + 1, MAX2(_page_size, granularity));
    54 }
    56 CardTableModRefBS::CardTableModRefBS(MemRegion whole_heap,
    57                                      int max_covered_regions):
    58   ModRefBarrierSet(max_covered_regions),
    59   _whole_heap(whole_heap),
    60   _guard_index(0),
    61   _guard_region(),
    62   _last_valid_index(0),
    63   _page_size(os::vm_page_size()),
    64   _byte_map_size(0),
    65   _covered(NULL),
    66   _committed(NULL),
    67   _cur_covered_regions(0),
    68   _byte_map(NULL),
    69   byte_map_base(NULL),
    70   // LNC functionality
    71   _lowest_non_clean(NULL),
    72   _lowest_non_clean_chunk_size(NULL),
    73   _lowest_non_clean_base_chunk_index(NULL),
    74   _last_LNC_resizing_collection(NULL)
    75 {
    76   _kind = BarrierSet::CardTableModRef;
    78   assert((uintptr_t(_whole_heap.start())  & (card_size - 1))  == 0, "heap must start at card boundary");
    79   assert((uintptr_t(_whole_heap.end()) & (card_size - 1))  == 0, "heap must end at card boundary");
    81   assert(card_size <= 512, "card_size must be less than 512"); // why?
    83   _covered   = new MemRegion[_max_covered_regions];
    84   if (_covered == NULL) {
    85     vm_exit_during_initialization("Could not allocate card table covered region set.");
    86   }
    87 }
    89 void CardTableModRefBS::initialize() {
    90   _guard_index = cards_required(_whole_heap.word_size()) - 1;
    91   _last_valid_index = _guard_index - 1;
    93   _byte_map_size = compute_byte_map_size();
    95   HeapWord* low_bound  = _whole_heap.start();
    96   HeapWord* high_bound = _whole_heap.end();
    98   _cur_covered_regions = 0;
    99   _committed = new MemRegion[_max_covered_regions];
   100   if (_committed == NULL) {
   101     vm_exit_during_initialization("Could not allocate card table committed region set.");
   102   }
   104   const size_t rs_align = _page_size == (size_t) os::vm_page_size() ? 0 :
   105     MAX2(_page_size, (size_t) os::vm_allocation_granularity());
   106   ReservedSpace heap_rs(_byte_map_size, rs_align, false);
   108   MemTracker::record_virtual_memory_type((address)heap_rs.base(), mtGC);
   110   os::trace_page_sizes("card table", _guard_index + 1, _guard_index + 1,
   111                        _page_size, heap_rs.base(), heap_rs.size());
   112   if (!heap_rs.is_reserved()) {
   113     vm_exit_during_initialization("Could not reserve enough space for the "
   114                                   "card marking array");
   115   }
   117   // The assember store_check code will do an unsigned shift of the oop,
   118   // then add it to byte_map_base, i.e.
   119   //
   120   //   _byte_map = byte_map_base + (uintptr_t(low_bound) >> card_shift)
   121   _byte_map = (jbyte*) heap_rs.base();
   122   byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift);
   123   assert(byte_for(low_bound) == &_byte_map[0], "Checking start of map");
   124   assert(byte_for(high_bound-1) <= &_byte_map[_last_valid_index], "Checking end of map");
   126   jbyte* guard_card = &_byte_map[_guard_index];
   127   uintptr_t guard_page = align_size_down((uintptr_t)guard_card, _page_size);
   128   _guard_region = MemRegion((HeapWord*)guard_page, _page_size);
   129   os::commit_memory_or_exit((char*)guard_page, _page_size, _page_size,
   130                             !ExecMem, "card table last card");
   131   *guard_card = last_card;
   133   _lowest_non_clean =
   134     NEW_C_HEAP_ARRAY(CardArr, _max_covered_regions, mtGC);
   135   _lowest_non_clean_chunk_size =
   136     NEW_C_HEAP_ARRAY(size_t, _max_covered_regions, mtGC);
   137   _lowest_non_clean_base_chunk_index =
   138     NEW_C_HEAP_ARRAY(uintptr_t, _max_covered_regions, mtGC);
   139   _last_LNC_resizing_collection =
   140     NEW_C_HEAP_ARRAY(int, _max_covered_regions, mtGC);
   141   if (_lowest_non_clean == NULL
   142       || _lowest_non_clean_chunk_size == NULL
   143       || _lowest_non_clean_base_chunk_index == NULL
   144       || _last_LNC_resizing_collection == NULL)
   145     vm_exit_during_initialization("couldn't allocate an LNC array.");
   146   for (int i = 0; i < _max_covered_regions; i++) {
   147     _lowest_non_clean[i] = NULL;
   148     _lowest_non_clean_chunk_size[i] = 0;
   149     _last_LNC_resizing_collection[i] = -1;
   150   }
   152   if (TraceCardTableModRefBS) {
   153     gclog_or_tty->print_cr("CardTableModRefBS::CardTableModRefBS: ");
   154     gclog_or_tty->print_cr("  "
   155                   "  &_byte_map[0]: " INTPTR_FORMAT
   156                   "  &_byte_map[_last_valid_index]: " INTPTR_FORMAT,
   157                   p2i(&_byte_map[0]),
   158                   p2i(&_byte_map[_last_valid_index]));
   159     gclog_or_tty->print_cr("  "
   160                   "  byte_map_base: " INTPTR_FORMAT,
   161                   p2i(byte_map_base));
   162   }
   163 }
   165 CardTableModRefBS::~CardTableModRefBS() {
   166   if (_covered) {
   167     delete[] _covered;
   168     _covered = NULL;
   169   }
   170   if (_committed) {
   171     delete[] _committed;
   172     _committed = NULL;
   173   }
   174   if (_lowest_non_clean) {
   175     FREE_C_HEAP_ARRAY(CardArr, _lowest_non_clean, mtGC);
   176     _lowest_non_clean = NULL;
   177   }
   178   if (_lowest_non_clean_chunk_size) {
   179     FREE_C_HEAP_ARRAY(size_t, _lowest_non_clean_chunk_size, mtGC);
   180     _lowest_non_clean_chunk_size = NULL;
   181   }
   182   if (_lowest_non_clean_base_chunk_index) {
   183     FREE_C_HEAP_ARRAY(uintptr_t, _lowest_non_clean_base_chunk_index, mtGC);
   184     _lowest_non_clean_base_chunk_index = NULL;
   185   }
   186   if (_last_LNC_resizing_collection) {
   187     FREE_C_HEAP_ARRAY(int, _last_LNC_resizing_collection, mtGC);
   188     _last_LNC_resizing_collection = NULL;
   189   }
   190 }
   192 int CardTableModRefBS::find_covering_region_by_base(HeapWord* base) {
   193   int i;
   194   for (i = 0; i < _cur_covered_regions; i++) {
   195     if (_covered[i].start() == base) return i;
   196     if (_covered[i].start() > base) break;
   197   }
   198   // If we didn't find it, create a new one.
   199   assert(_cur_covered_regions < _max_covered_regions,
   200          "too many covered regions");
   201   // Move the ones above up, to maintain sorted order.
   202   for (int j = _cur_covered_regions; j > i; j--) {
   203     _covered[j] = _covered[j-1];
   204     _committed[j] = _committed[j-1];
   205   }
   206   int res = i;
   207   _cur_covered_regions++;
   208   _covered[res].set_start(base);
   209   _covered[res].set_word_size(0);
   210   jbyte* ct_start = byte_for(base);
   211   uintptr_t ct_start_aligned = align_size_down((uintptr_t)ct_start, _page_size);
   212   _committed[res].set_start((HeapWord*)ct_start_aligned);
   213   _committed[res].set_word_size(0);
   214   return res;
   215 }
   217 int CardTableModRefBS::find_covering_region_containing(HeapWord* addr) {
   218   for (int i = 0; i < _cur_covered_regions; i++) {
   219     if (_covered[i].contains(addr)) {
   220       return i;
   221     }
   222   }
   223   assert(0, "address outside of heap?");
   224   return -1;
   225 }
   227 HeapWord* CardTableModRefBS::largest_prev_committed_end(int ind) const {
   228   HeapWord* max_end = NULL;
   229   for (int j = 0; j < ind; j++) {
   230     HeapWord* this_end = _committed[j].end();
   231     if (this_end > max_end) max_end = this_end;
   232   }
   233   return max_end;
   234 }
   236 MemRegion CardTableModRefBS::committed_unique_to_self(int self,
   237                                                       MemRegion mr) const {
   238   MemRegion result = mr;
   239   for (int r = 0; r < _cur_covered_regions; r += 1) {
   240     if (r != self) {
   241       result = result.minus(_committed[r]);
   242     }
   243   }
   244   // Never include the guard page.
   245   result = result.minus(_guard_region);
   246   return result;
   247 }
   249 void CardTableModRefBS::resize_covered_region(MemRegion new_region) {
   250   // We don't change the start of a region, only the end.
   251   assert(_whole_heap.contains(new_region),
   252            "attempt to cover area not in reserved area");
   253   debug_only(verify_guard();)
   254   // collided is true if the expansion would push into another committed region
   255   debug_only(bool collided = false;)
   256   int const ind = find_covering_region_by_base(new_region.start());
   257   MemRegion const old_region = _covered[ind];
   258   assert(old_region.start() == new_region.start(), "just checking");
   259   if (new_region.word_size() != old_region.word_size()) {
   260     // Commit new or uncommit old pages, if necessary.
   261     MemRegion cur_committed = _committed[ind];
   262     // Extend the end of this _commited region
   263     // to cover the end of any lower _committed regions.
   264     // This forms overlapping regions, but never interior regions.
   265     HeapWord* const max_prev_end = largest_prev_committed_end(ind);
   266     if (max_prev_end > cur_committed.end()) {
   267       cur_committed.set_end(max_prev_end);
   268     }
   269     // Align the end up to a page size (starts are already aligned).
   270     jbyte* const new_end = byte_after(new_region.last());
   271     HeapWord* new_end_aligned =
   272       (HeapWord*) align_size_up((uintptr_t)new_end, _page_size);
   273     assert(new_end_aligned >= (HeapWord*) new_end,
   274            "align up, but less");
   275     // Check the other regions (excludes "ind") to ensure that
   276     // the new_end_aligned does not intrude onto the committed
   277     // space of another region.
   278     int ri = 0;
   279     for (ri = 0; ri < _cur_covered_regions; ri++) {
   280       if (ri != ind) {
   281         if (_committed[ri].contains(new_end_aligned)) {
   282           // The prior check included in the assert
   283           // (new_end_aligned >= _committed[ri].start())
   284           // is redundant with the "contains" test.
   285           // Any region containing the new end
   286           // should start at or beyond the region found (ind)
   287           // for the new end (committed regions are not expected to
   288           // be proper subsets of other committed regions).
   289           assert(_committed[ri].start() >= _committed[ind].start(),
   290                  "New end of committed region is inconsistent");
   291           new_end_aligned = _committed[ri].start();
   292           // new_end_aligned can be equal to the start of its
   293           // committed region (i.e., of "ind") if a second
   294           // region following "ind" also start at the same location
   295           // as "ind".
   296           assert(new_end_aligned >= _committed[ind].start(),
   297             "New end of committed region is before start");
   298           debug_only(collided = true;)
   299           // Should only collide with 1 region
   300           break;
   301         }
   302       }
   303     }
   304 #ifdef ASSERT
   305     for (++ri; ri < _cur_covered_regions; ri++) {
   306       assert(!_committed[ri].contains(new_end_aligned),
   307         "New end of committed region is in a second committed region");
   308     }
   309 #endif
   310     // The guard page is always committed and should not be committed over.
   311     // "guarded" is used for assertion checking below and recalls the fact
   312     // that the would-be end of the new committed region would have
   313     // penetrated the guard page.
   314     HeapWord* new_end_for_commit = new_end_aligned;
   316     DEBUG_ONLY(bool guarded = false;)
   317     if (new_end_for_commit > _guard_region.start()) {
   318       new_end_for_commit = _guard_region.start();
   319       DEBUG_ONLY(guarded = true;)
   320     }
   322     if (new_end_for_commit > cur_committed.end()) {
   323       // Must commit new pages.
   324       MemRegion const new_committed =
   325         MemRegion(cur_committed.end(), new_end_for_commit);
   327       assert(!new_committed.is_empty(), "Region should not be empty here");
   328       os::commit_memory_or_exit((char*)new_committed.start(),
   329                                 new_committed.byte_size(), _page_size,
   330                                 !ExecMem, "card table expansion");
   331     // Use new_end_aligned (as opposed to new_end_for_commit) because
   332     // the cur_committed region may include the guard region.
   333     } else if (new_end_aligned < cur_committed.end()) {
   334       // Must uncommit pages.
   335       MemRegion const uncommit_region =
   336         committed_unique_to_self(ind, MemRegion(new_end_aligned,
   337                                                 cur_committed.end()));
   338       if (!uncommit_region.is_empty()) {
   339         // It is not safe to uncommit cards if the boundary between
   340         // the generations is moving.  A shrink can uncommit cards
   341         // owned by generation A but being used by generation B.
   342         if (!UseAdaptiveGCBoundary) {
   343           if (!os::uncommit_memory((char*)uncommit_region.start(),
   344                                    uncommit_region.byte_size())) {
   345             assert(false, "Card table contraction failed");
   346             // The call failed so don't change the end of the
   347             // committed region.  This is better than taking the
   348             // VM down.
   349             new_end_aligned = _committed[ind].end();
   350           }
   351         } else {
   352           new_end_aligned = _committed[ind].end();
   353         }
   354       }
   355     }
   356     // In any case, we can reset the end of the current committed entry.
   357     _committed[ind].set_end(new_end_aligned);
   359 #ifdef ASSERT
   360     // Check that the last card in the new region is committed according
   361     // to the tables.
   362     bool covered = false;
   363     for (int cr = 0; cr < _cur_covered_regions; cr++) {
   364       if (_committed[cr].contains(new_end - 1)) {
   365         covered = true;
   366         break;
   367       }
   368     }
   369     assert(covered, "Card for end of new region not committed");
   370 #endif
   372     // The default of 0 is not necessarily clean cards.
   373     jbyte* entry;
   374     if (old_region.last() < _whole_heap.start()) {
   375       entry = byte_for(_whole_heap.start());
   376     } else {
   377       entry = byte_after(old_region.last());
   378     }
   379     assert(index_for(new_region.last()) <  _guard_index,
   380       "The guard card will be overwritten");
   381     // This line commented out cleans the newly expanded region and
   382     // not the aligned up expanded region.
   383     // jbyte* const end = byte_after(new_region.last());
   384     jbyte* const end = (jbyte*) new_end_for_commit;
   385     assert((end >= byte_after(new_region.last())) || collided || guarded,
   386       "Expect to be beyond new region unless impacting another region");
   387     // do nothing if we resized downward.
   388 #ifdef ASSERT
   389     for (int ri = 0; ri < _cur_covered_regions; ri++) {
   390       if (ri != ind) {
   391         // The end of the new committed region should not
   392         // be in any existing region unless it matches
   393         // the start of the next region.
   394         assert(!_committed[ri].contains(end) ||
   395                (_committed[ri].start() == (HeapWord*) end),
   396                "Overlapping committed regions");
   397       }
   398     }
   399 #endif
   400     if (entry < end) {
   401       memset(entry, clean_card, pointer_delta(end, entry, sizeof(jbyte)));
   402     }
   403   }
   404   // In any case, the covered size changes.
   405   _covered[ind].set_word_size(new_region.word_size());
   406   if (TraceCardTableModRefBS) {
   407     gclog_or_tty->print_cr("CardTableModRefBS::resize_covered_region: ");
   408     gclog_or_tty->print_cr("  "
   409                   "  _covered[%d].start(): " INTPTR_FORMAT
   410                   "  _covered[%d].last(): " INTPTR_FORMAT,
   411                   ind, p2i(_covered[ind].start()),
   412                   ind, p2i(_covered[ind].last()));
   413     gclog_or_tty->print_cr("  "
   414                   "  _committed[%d].start(): " INTPTR_FORMAT
   415                   "  _committed[%d].last(): " INTPTR_FORMAT,
   416                   ind, p2i(_committed[ind].start()),
   417                   ind, p2i(_committed[ind].last()));
   418     gclog_or_tty->print_cr("  "
   419                   "  byte_for(start): " INTPTR_FORMAT
   420                   "  byte_for(last): " INTPTR_FORMAT,
   421                   p2i(byte_for(_covered[ind].start())),
   422                   p2i(byte_for(_covered[ind].last())));
   423     gclog_or_tty->print_cr("  "
   424                   "  addr_for(start): " INTPTR_FORMAT
   425                   "  addr_for(last): " INTPTR_FORMAT,
   426                   p2i(addr_for((jbyte*) _committed[ind].start())),
   427                   p2i(addr_for((jbyte*) _committed[ind].last())));
   428   }
   429   // Touch the last card of the covered region to show that it
   430   // is committed (or SEGV).
   431   debug_only((void) (*byte_for(_covered[ind].last()));)
   432   debug_only(verify_guard();)
   433 }
   435 // Note that these versions are precise!  The scanning code has to handle the
   436 // fact that the write barrier may be either precise or imprecise.
   438 void CardTableModRefBS::write_ref_field_work(void* field, oop newVal, bool release) {
   439   inline_write_ref_field(field, newVal, release);
   440 }
   443 void CardTableModRefBS::non_clean_card_iterate_possibly_parallel(Space* sp,
   444                                                                  MemRegion mr,
   445                                                                  OopsInGenClosure* cl,
   446                                                                  CardTableRS* ct) {
   447   if (!mr.is_empty()) {
   448     // Caller (process_roots()) claims that all GC threads
   449     // execute this call.  With UseDynamicNumberOfGCThreads now all
   450     // active GC threads execute this call.  The number of active GC
   451     // threads needs to be passed to par_non_clean_card_iterate_work()
   452     // to get proper partitioning and termination.
   453     //
   454     // This is an example of where n_par_threads() is used instead
   455     // of workers()->active_workers().  n_par_threads can be set to 0 to
   456     // turn off parallelism.  For example when this code is called as
   457     // part of verification and SharedHeap::process_roots() is being
   458     // used, then n_par_threads() may have been set to 0.  active_workers
   459     // is not overloaded with the meaning that it is a switch to disable
   460     // parallelism and so keeps the meaning of the number of
   461     // active gc workers.  If parallelism has not been shut off by
   462     // setting n_par_threads to 0, then n_par_threads should be
   463     // equal to active_workers.  When a different mechanism for shutting
   464     // off parallelism is used, then active_workers can be used in
   465     // place of n_par_threads.
   466     //  This is an example of a path where n_par_threads is
   467     // set to 0 to turn off parallism.
   468     //  [7] CardTableModRefBS::non_clean_card_iterate()
   469     //  [8] CardTableRS::younger_refs_in_space_iterate()
   470     //  [9] Generation::younger_refs_in_space_iterate()
   471     //  [10] OneContigSpaceCardGeneration::younger_refs_iterate()
   472     //  [11] CompactingPermGenGen::younger_refs_iterate()
   473     //  [12] CardTableRS::younger_refs_iterate()
   474     //  [13] SharedHeap::process_strong_roots()
   475     //  [14] G1CollectedHeap::verify()
   476     //  [15] Universe::verify()
   477     //  [16] G1CollectedHeap::do_collection_pause_at_safepoint()
   478     //
   479     int n_threads =  SharedHeap::heap()->n_par_threads();
   480     bool is_par = n_threads > 0;
   481     if (is_par) {
   482 #if INCLUDE_ALL_GCS
   483       assert(SharedHeap::heap()->n_par_threads() ==
   484              SharedHeap::heap()->workers()->active_workers(), "Mismatch");
   485       non_clean_card_iterate_parallel_work(sp, mr, cl, ct, n_threads);
   486 #else  // INCLUDE_ALL_GCS
   487       fatal("Parallel gc not supported here.");
   488 #endif // INCLUDE_ALL_GCS
   489     } else {
   490       // We do not call the non_clean_card_iterate_serial() version below because
   491       // we want to clear the cards (which non_clean_card_iterate_serial() does not
   492       // do for us): clear_cl here does the work of finding contiguous dirty ranges
   493       // of cards to process and clear.
   495       DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, precision(),
   496                                                        cl->gen_boundary());
   497       ClearNoncleanCardWrapper clear_cl(dcto_cl, ct);
   499       clear_cl.do_MemRegion(mr);
   500     }
   501   }
   502 }
   504 // The iterator itself is not MT-aware, but
   505 // MT-aware callers and closures can use this to
   506 // accomplish dirty card iteration in parallel. The
   507 // iterator itself does not clear the dirty cards, or
   508 // change their values in any manner.
   509 void CardTableModRefBS::non_clean_card_iterate_serial(MemRegion mr,
   510                                                       MemRegionClosure* cl) {
   511   bool is_par = (SharedHeap::heap()->n_par_threads() > 0);
   512   assert(!is_par ||
   513           (SharedHeap::heap()->n_par_threads() ==
   514           SharedHeap::heap()->workers()->active_workers()), "Mismatch");
   515   for (int i = 0; i < _cur_covered_regions; i++) {
   516     MemRegion mri = mr.intersection(_covered[i]);
   517     if (mri.word_size() > 0) {
   518       jbyte* cur_entry = byte_for(mri.last());
   519       jbyte* limit = byte_for(mri.start());
   520       while (cur_entry >= limit) {
   521         jbyte* next_entry = cur_entry - 1;
   522         if (*cur_entry != clean_card) {
   523           size_t non_clean_cards = 1;
   524           // Should the next card be included in this range of dirty cards.
   525           while (next_entry >= limit && *next_entry != clean_card) {
   526             non_clean_cards++;
   527             cur_entry = next_entry;
   528             next_entry--;
   529           }
   530           // The memory region may not be on a card boundary.  So that
   531           // objects beyond the end of the region are not processed, make
   532           // cur_cards precise with regard to the end of the memory region.
   533           MemRegion cur_cards(addr_for(cur_entry),
   534                               non_clean_cards * card_size_in_words);
   535           MemRegion dirty_region = cur_cards.intersection(mri);
   536           cl->do_MemRegion(dirty_region);
   537         }
   538         cur_entry = next_entry;
   539       }
   540     }
   541   }
   542 }
   544 void CardTableModRefBS::dirty_MemRegion(MemRegion mr) {
   545   assert((HeapWord*)align_size_down((uintptr_t)mr.start(), HeapWordSize) == mr.start(), "Unaligned start");
   546   assert((HeapWord*)align_size_up  ((uintptr_t)mr.end(),   HeapWordSize) == mr.end(),   "Unaligned end"  );
   547   jbyte* cur  = byte_for(mr.start());
   548   jbyte* last = byte_after(mr.last());
   549   while (cur < last) {
   550     *cur = dirty_card;
   551     cur++;
   552   }
   553 }
   555 void CardTableModRefBS::invalidate(MemRegion mr, bool whole_heap) {
   556   assert((HeapWord*)align_size_down((uintptr_t)mr.start(), HeapWordSize) == mr.start(), "Unaligned start");
   557   assert((HeapWord*)align_size_up  ((uintptr_t)mr.end(),   HeapWordSize) == mr.end(),   "Unaligned end"  );
   558   for (int i = 0; i < _cur_covered_regions; i++) {
   559     MemRegion mri = mr.intersection(_covered[i]);
   560     if (!mri.is_empty()) dirty_MemRegion(mri);
   561   }
   562 }
   564 void CardTableModRefBS::clear_MemRegion(MemRegion mr) {
   565   // Be conservative: only clean cards entirely contained within the
   566   // region.
   567   jbyte* cur;
   568   if (mr.start() == _whole_heap.start()) {
   569     cur = byte_for(mr.start());
   570   } else {
   571     assert(mr.start() > _whole_heap.start(), "mr is not covered.");
   572     cur = byte_after(mr.start() - 1);
   573   }
   574   jbyte* last = byte_after(mr.last());
   575   memset(cur, clean_card, pointer_delta(last, cur, sizeof(jbyte)));
   576 }
   578 void CardTableModRefBS::clear(MemRegion mr) {
   579   for (int i = 0; i < _cur_covered_regions; i++) {
   580     MemRegion mri = mr.intersection(_covered[i]);
   581     if (!mri.is_empty()) clear_MemRegion(mri);
   582   }
   583 }
   585 void CardTableModRefBS::dirty(MemRegion mr) {
   586   jbyte* first = byte_for(mr.start());
   587   jbyte* last  = byte_after(mr.last());
   588   memset(first, dirty_card, last-first);
   589 }
   591 // Unlike several other card table methods, dirty_card_iterate()
   592 // iterates over dirty cards ranges in increasing address order.
   593 void CardTableModRefBS::dirty_card_iterate(MemRegion mr,
   594                                            MemRegionClosure* cl) {
   595   for (int i = 0; i < _cur_covered_regions; i++) {
   596     MemRegion mri = mr.intersection(_covered[i]);
   597     if (!mri.is_empty()) {
   598       jbyte *cur_entry, *next_entry, *limit;
   599       for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last());
   600            cur_entry <= limit;
   601            cur_entry  = next_entry) {
   602         next_entry = cur_entry + 1;
   603         if (*cur_entry == dirty_card) {
   604           size_t dirty_cards;
   605           // Accumulate maximal dirty card range, starting at cur_entry
   606           for (dirty_cards = 1;
   607                next_entry <= limit && *next_entry == dirty_card;
   608                dirty_cards++, next_entry++);
   609           MemRegion cur_cards(addr_for(cur_entry),
   610                               dirty_cards*card_size_in_words);
   611           cl->do_MemRegion(cur_cards);
   612         }
   613       }
   614     }
   615   }
   616 }
   618 MemRegion CardTableModRefBS::dirty_card_range_after_reset(MemRegion mr,
   619                                                           bool reset,
   620                                                           int reset_val) {
   621   for (int i = 0; i < _cur_covered_regions; i++) {
   622     MemRegion mri = mr.intersection(_covered[i]);
   623     if (!mri.is_empty()) {
   624       jbyte* cur_entry, *next_entry, *limit;
   625       for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last());
   626            cur_entry <= limit;
   627            cur_entry  = next_entry) {
   628         next_entry = cur_entry + 1;
   629         if (*cur_entry == dirty_card) {
   630           size_t dirty_cards;
   631           // Accumulate maximal dirty card range, starting at cur_entry
   632           for (dirty_cards = 1;
   633                next_entry <= limit && *next_entry == dirty_card;
   634                dirty_cards++, next_entry++);
   635           MemRegion cur_cards(addr_for(cur_entry),
   636                               dirty_cards*card_size_in_words);
   637           if (reset) {
   638             for (size_t i = 0; i < dirty_cards; i++) {
   639               cur_entry[i] = reset_val;
   640             }
   641           }
   642           return cur_cards;
   643         }
   644       }
   645     }
   646   }
   647   return MemRegion(mr.end(), mr.end());
   648 }
   650 uintx CardTableModRefBS::ct_max_alignment_constraint() {
   651   return card_size * os::vm_page_size();
   652 }
   654 void CardTableModRefBS::verify_guard() {
   655   // For product build verification
   656   guarantee(_byte_map[_guard_index] == last_card,
   657             "card table guard has been modified");
   658 }
   660 void CardTableModRefBS::verify() {
   661   verify_guard();
   662 }
   664 #ifndef PRODUCT
   665 void CardTableModRefBS::verify_region(MemRegion mr,
   666                                       jbyte val, bool val_equals) {
   667   jbyte* start    = byte_for(mr.start());
   668   jbyte* end      = byte_for(mr.last());
   669   bool failures = false;
   670   for (jbyte* curr = start; curr <= end; ++curr) {
   671     jbyte curr_val = *curr;
   672     bool failed = (val_equals) ? (curr_val != val) : (curr_val == val);
   673     if (failed) {
   674       if (!failures) {
   675         tty->cr();
   676         tty->print_cr("== CT verification failed: [" INTPTR_FORMAT "," INTPTR_FORMAT "]", p2i(start), p2i(end));
   677         tty->print_cr("==   %sexpecting value: %d",
   678                       (val_equals) ? "" : "not ", val);
   679         failures = true;
   680       }
   681       tty->print_cr("==   card "PTR_FORMAT" ["PTR_FORMAT","PTR_FORMAT"], "
   682                     "val: %d", p2i(curr), p2i(addr_for(curr)),
   683                     p2i((HeapWord*) (((size_t) addr_for(curr)) + card_size)),
   684                     (int) curr_val);
   685     }
   686   }
   687   guarantee(!failures, "there should not have been any failures");
   688 }
   690 void CardTableModRefBS::verify_not_dirty_region(MemRegion mr) {
   691   verify_region(mr, dirty_card, false /* val_equals */);
   692 }
   694 void CardTableModRefBS::verify_dirty_region(MemRegion mr) {
   695   verify_region(mr, dirty_card, true /* val_equals */);
   696 }
   697 #endif
   699 void CardTableModRefBS::print_on(outputStream* st) const {
   700   st->print_cr("Card table byte_map: [" INTPTR_FORMAT "," INTPTR_FORMAT "] byte_map_base: " INTPTR_FORMAT,
   701                p2i(_byte_map), p2i(_byte_map + _byte_map_size), p2i(byte_map_base));
   702 }
   704 bool CardTableModRefBSForCTRS::card_will_be_scanned(jbyte cv) {
   705   return
   706     CardTableModRefBS::card_will_be_scanned(cv) ||
   707     _rs->is_prev_nonclean_card_val(cv);
   708 };
   710 bool CardTableModRefBSForCTRS::card_may_have_been_dirty(jbyte cv) {
   711   return
   712     cv != clean_card &&
   713     (CardTableModRefBS::card_may_have_been_dirty(cv) ||
   714      CardTableRS::youngergen_may_have_been_dirty(cv));
   715 };

mercurial