src/share/vm/memory/cardTableModRefBS.cpp

Mon, 07 Jul 2014 10:12:40 +0200

author
stefank
date
Mon, 07 Jul 2014 10:12:40 +0200
changeset 6992
2c6ef90f030a
parent 6680
78bbf4d43a14
child 7051
1f1d373cd044
permissions
-rw-r--r--

8049421: G1 Class Unloading after completing a concurrent mark cycle
Reviewed-by: tschatzl, ehelin, brutisso, coleenp, roland, iveresov
Contributed-by: stefan.karlsson@oracle.com, mikael.gerdin@oracle.com

     1 /*
     2  * Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "memory/allocation.inline.hpp"
    27 #include "memory/cardTableModRefBS.hpp"
    28 #include "memory/cardTableRS.hpp"
    29 #include "memory/sharedHeap.hpp"
    30 #include "memory/space.hpp"
    31 #include "memory/space.inline.hpp"
    32 #include "memory/universe.hpp"
    33 #include "runtime/java.hpp"
    34 #include "runtime/mutexLocker.hpp"
    35 #include "runtime/virtualspace.hpp"
    36 #include "services/memTracker.hpp"
    37 #include "utilities/macros.hpp"
    38 #ifdef COMPILER1
    39 #include "c1/c1_LIR.hpp"
    40 #include "c1/c1_LIRGenerator.hpp"
    41 #endif
    43 // This kind of "BarrierSet" allows a "CollectedHeap" to detect and
    44 // enumerate ref fields that have been modified (since the last
    45 // enumeration.)
    47 size_t CardTableModRefBS::cards_required(size_t covered_words)
    48 {
    49   // Add one for a guard card, used to detect errors.
    50   const size_t words = align_size_up(covered_words, card_size_in_words);
    51   return words / card_size_in_words + 1;
    52 }
    54 size_t CardTableModRefBS::compute_byte_map_size()
    55 {
    56   assert(_guard_index == cards_required(_whole_heap.word_size()) - 1,
    57                                         "unitialized, check declaration order");
    58   assert(_page_size != 0, "unitialized, check declaration order");
    59   const size_t granularity = os::vm_allocation_granularity();
    60   return align_size_up(_guard_index + 1, MAX2(_page_size, granularity));
    61 }
    63 CardTableModRefBS::CardTableModRefBS(MemRegion whole_heap,
    64                                      int max_covered_regions):
    65   ModRefBarrierSet(max_covered_regions),
    66   _whole_heap(whole_heap),
    67   _guard_index(cards_required(whole_heap.word_size()) - 1),
    68   _last_valid_index(_guard_index - 1),
    69   _page_size(os::vm_page_size()),
    70   _byte_map_size(compute_byte_map_size())
    71 {
    72   _kind = BarrierSet::CardTableModRef;
    74   HeapWord* low_bound  = _whole_heap.start();
    75   HeapWord* high_bound = _whole_heap.end();
    76   assert((uintptr_t(low_bound)  & (card_size - 1))  == 0, "heap must start at card boundary");
    77   assert((uintptr_t(high_bound) & (card_size - 1))  == 0, "heap must end at card boundary");
    79   assert(card_size <= 512, "card_size must be less than 512"); // why?
    81   _covered   = new MemRegion[max_covered_regions];
    82   _committed = new MemRegion[max_covered_regions];
    83   if (_covered == NULL || _committed == NULL) {
    84     vm_exit_during_initialization("couldn't alloc card table covered region set.");
    85   }
    87   _cur_covered_regions = 0;
    88   const size_t rs_align = _page_size == (size_t) os::vm_page_size() ? 0 :
    89     MAX2(_page_size, (size_t) os::vm_allocation_granularity());
    90   ReservedSpace heap_rs(_byte_map_size, rs_align, false);
    92   MemTracker::record_virtual_memory_type((address)heap_rs.base(), mtGC);
    94   os::trace_page_sizes("card table", _guard_index + 1, _guard_index + 1,
    95                        _page_size, heap_rs.base(), heap_rs.size());
    96   if (!heap_rs.is_reserved()) {
    97     vm_exit_during_initialization("Could not reserve enough space for the "
    98                                   "card marking array");
    99   }
   101   // The assember store_check code will do an unsigned shift of the oop,
   102   // then add it to byte_map_base, i.e.
   103   //
   104   //   _byte_map = byte_map_base + (uintptr_t(low_bound) >> card_shift)
   105   _byte_map = (jbyte*) heap_rs.base();
   106   byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift);
   107   assert(byte_for(low_bound) == &_byte_map[0], "Checking start of map");
   108   assert(byte_for(high_bound-1) <= &_byte_map[_last_valid_index], "Checking end of map");
   110   jbyte* guard_card = &_byte_map[_guard_index];
   111   uintptr_t guard_page = align_size_down((uintptr_t)guard_card, _page_size);
   112   _guard_region = MemRegion((HeapWord*)guard_page, _page_size);
   113   os::commit_memory_or_exit((char*)guard_page, _page_size, _page_size,
   114                             !ExecMem, "card table last card");
   115   *guard_card = last_card;
   117    _lowest_non_clean =
   118     NEW_C_HEAP_ARRAY(CardArr, max_covered_regions, mtGC);
   119   _lowest_non_clean_chunk_size =
   120     NEW_C_HEAP_ARRAY(size_t, max_covered_regions, mtGC);
   121   _lowest_non_clean_base_chunk_index =
   122     NEW_C_HEAP_ARRAY(uintptr_t, max_covered_regions, mtGC);
   123   _last_LNC_resizing_collection =
   124     NEW_C_HEAP_ARRAY(int, max_covered_regions, mtGC);
   125   if (_lowest_non_clean == NULL
   126       || _lowest_non_clean_chunk_size == NULL
   127       || _lowest_non_clean_base_chunk_index == NULL
   128       || _last_LNC_resizing_collection == NULL)
   129     vm_exit_during_initialization("couldn't allocate an LNC array.");
   130   for (int i = 0; i < max_covered_regions; i++) {
   131     _lowest_non_clean[i] = NULL;
   132     _lowest_non_clean_chunk_size[i] = 0;
   133     _last_LNC_resizing_collection[i] = -1;
   134   }
   136   if (TraceCardTableModRefBS) {
   137     gclog_or_tty->print_cr("CardTableModRefBS::CardTableModRefBS: ");
   138     gclog_or_tty->print_cr("  "
   139                   "  &_byte_map[0]: " INTPTR_FORMAT
   140                   "  &_byte_map[_last_valid_index]: " INTPTR_FORMAT,
   141                   p2i(&_byte_map[0]),
   142                   p2i(&_byte_map[_last_valid_index]));
   143     gclog_or_tty->print_cr("  "
   144                   "  byte_map_base: " INTPTR_FORMAT,
   145                   p2i(byte_map_base));
   146   }
   147 }
   149 CardTableModRefBS::~CardTableModRefBS() {
   150   if (_covered) {
   151     delete[] _covered;
   152     _covered = NULL;
   153   }
   154   if (_committed) {
   155     delete[] _committed;
   156     _committed = NULL;
   157   }
   158   if (_lowest_non_clean) {
   159     FREE_C_HEAP_ARRAY(CardArr, _lowest_non_clean, mtGC);
   160     _lowest_non_clean = NULL;
   161   }
   162   if (_lowest_non_clean_chunk_size) {
   163     FREE_C_HEAP_ARRAY(size_t, _lowest_non_clean_chunk_size, mtGC);
   164     _lowest_non_clean_chunk_size = NULL;
   165   }
   166   if (_lowest_non_clean_base_chunk_index) {
   167     FREE_C_HEAP_ARRAY(uintptr_t, _lowest_non_clean_base_chunk_index, mtGC);
   168     _lowest_non_clean_base_chunk_index = NULL;
   169   }
   170   if (_last_LNC_resizing_collection) {
   171     FREE_C_HEAP_ARRAY(int, _last_LNC_resizing_collection, mtGC);
   172     _last_LNC_resizing_collection = NULL;
   173   }
   174 }
   176 int CardTableModRefBS::find_covering_region_by_base(HeapWord* base) {
   177   int i;
   178   for (i = 0; i < _cur_covered_regions; i++) {
   179     if (_covered[i].start() == base) return i;
   180     if (_covered[i].start() > base) break;
   181   }
   182   // If we didn't find it, create a new one.
   183   assert(_cur_covered_regions < _max_covered_regions,
   184          "too many covered regions");
   185   // Move the ones above up, to maintain sorted order.
   186   for (int j = _cur_covered_regions; j > i; j--) {
   187     _covered[j] = _covered[j-1];
   188     _committed[j] = _committed[j-1];
   189   }
   190   int res = i;
   191   _cur_covered_regions++;
   192   _covered[res].set_start(base);
   193   _covered[res].set_word_size(0);
   194   jbyte* ct_start = byte_for(base);
   195   uintptr_t ct_start_aligned = align_size_down((uintptr_t)ct_start, _page_size);
   196   _committed[res].set_start((HeapWord*)ct_start_aligned);
   197   _committed[res].set_word_size(0);
   198   return res;
   199 }
   201 int CardTableModRefBS::find_covering_region_containing(HeapWord* addr) {
   202   for (int i = 0; i < _cur_covered_regions; i++) {
   203     if (_covered[i].contains(addr)) {
   204       return i;
   205     }
   206   }
   207   assert(0, "address outside of heap?");
   208   return -1;
   209 }
   211 HeapWord* CardTableModRefBS::largest_prev_committed_end(int ind) const {
   212   HeapWord* max_end = NULL;
   213   for (int j = 0; j < ind; j++) {
   214     HeapWord* this_end = _committed[j].end();
   215     if (this_end > max_end) max_end = this_end;
   216   }
   217   return max_end;
   218 }
   220 MemRegion CardTableModRefBS::committed_unique_to_self(int self,
   221                                                       MemRegion mr) const {
   222   MemRegion result = mr;
   223   for (int r = 0; r < _cur_covered_regions; r += 1) {
   224     if (r != self) {
   225       result = result.minus(_committed[r]);
   226     }
   227   }
   228   // Never include the guard page.
   229   result = result.minus(_guard_region);
   230   return result;
   231 }
   233 void CardTableModRefBS::resize_covered_region(MemRegion new_region) {
   234   // We don't change the start of a region, only the end.
   235   assert(_whole_heap.contains(new_region),
   236            "attempt to cover area not in reserved area");
   237   debug_only(verify_guard();)
   238   // collided is true if the expansion would push into another committed region
   239   debug_only(bool collided = false;)
   240   int const ind = find_covering_region_by_base(new_region.start());
   241   MemRegion const old_region = _covered[ind];
   242   assert(old_region.start() == new_region.start(), "just checking");
   243   if (new_region.word_size() != old_region.word_size()) {
   244     // Commit new or uncommit old pages, if necessary.
   245     MemRegion cur_committed = _committed[ind];
   246     // Extend the end of this _commited region
   247     // to cover the end of any lower _committed regions.
   248     // This forms overlapping regions, but never interior regions.
   249     HeapWord* const max_prev_end = largest_prev_committed_end(ind);
   250     if (max_prev_end > cur_committed.end()) {
   251       cur_committed.set_end(max_prev_end);
   252     }
   253     // Align the end up to a page size (starts are already aligned).
   254     jbyte* const new_end = byte_after(new_region.last());
   255     HeapWord* new_end_aligned =
   256       (HeapWord*) align_size_up((uintptr_t)new_end, _page_size);
   257     assert(new_end_aligned >= (HeapWord*) new_end,
   258            "align up, but less");
   259     // Check the other regions (excludes "ind") to ensure that
   260     // the new_end_aligned does not intrude onto the committed
   261     // space of another region.
   262     int ri = 0;
   263     for (ri = 0; ri < _cur_covered_regions; ri++) {
   264       if (ri != ind) {
   265         if (_committed[ri].contains(new_end_aligned)) {
   266           // The prior check included in the assert
   267           // (new_end_aligned >= _committed[ri].start())
   268           // is redundant with the "contains" test.
   269           // Any region containing the new end
   270           // should start at or beyond the region found (ind)
   271           // for the new end (committed regions are not expected to
   272           // be proper subsets of other committed regions).
   273           assert(_committed[ri].start() >= _committed[ind].start(),
   274                  "New end of committed region is inconsistent");
   275           new_end_aligned = _committed[ri].start();
   276           // new_end_aligned can be equal to the start of its
   277           // committed region (i.e., of "ind") if a second
   278           // region following "ind" also start at the same location
   279           // as "ind".
   280           assert(new_end_aligned >= _committed[ind].start(),
   281             "New end of committed region is before start");
   282           debug_only(collided = true;)
   283           // Should only collide with 1 region
   284           break;
   285         }
   286       }
   287     }
   288 #ifdef ASSERT
   289     for (++ri; ri < _cur_covered_regions; ri++) {
   290       assert(!_committed[ri].contains(new_end_aligned),
   291         "New end of committed region is in a second committed region");
   292     }
   293 #endif
   294     // The guard page is always committed and should not be committed over.
   295     // "guarded" is used for assertion checking below and recalls the fact
   296     // that the would-be end of the new committed region would have
   297     // penetrated the guard page.
   298     HeapWord* new_end_for_commit = new_end_aligned;
   300     DEBUG_ONLY(bool guarded = false;)
   301     if (new_end_for_commit > _guard_region.start()) {
   302       new_end_for_commit = _guard_region.start();
   303       DEBUG_ONLY(guarded = true;)
   304     }
   306     if (new_end_for_commit > cur_committed.end()) {
   307       // Must commit new pages.
   308       MemRegion const new_committed =
   309         MemRegion(cur_committed.end(), new_end_for_commit);
   311       assert(!new_committed.is_empty(), "Region should not be empty here");
   312       os::commit_memory_or_exit((char*)new_committed.start(),
   313                                 new_committed.byte_size(), _page_size,
   314                                 !ExecMem, "card table expansion");
   315     // Use new_end_aligned (as opposed to new_end_for_commit) because
   316     // the cur_committed region may include the guard region.
   317     } else if (new_end_aligned < cur_committed.end()) {
   318       // Must uncommit pages.
   319       MemRegion const uncommit_region =
   320         committed_unique_to_self(ind, MemRegion(new_end_aligned,
   321                                                 cur_committed.end()));
   322       if (!uncommit_region.is_empty()) {
   323         // It is not safe to uncommit cards if the boundary between
   324         // the generations is moving.  A shrink can uncommit cards
   325         // owned by generation A but being used by generation B.
   326         if (!UseAdaptiveGCBoundary) {
   327           if (!os::uncommit_memory((char*)uncommit_region.start(),
   328                                    uncommit_region.byte_size())) {
   329             assert(false, "Card table contraction failed");
   330             // The call failed so don't change the end of the
   331             // committed region.  This is better than taking the
   332             // VM down.
   333             new_end_aligned = _committed[ind].end();
   334           }
   335         } else {
   336           new_end_aligned = _committed[ind].end();
   337         }
   338       }
   339     }
   340     // In any case, we can reset the end of the current committed entry.
   341     _committed[ind].set_end(new_end_aligned);
   343 #ifdef ASSERT
   344     // Check that the last card in the new region is committed according
   345     // to the tables.
   346     bool covered = false;
   347     for (int cr = 0; cr < _cur_covered_regions; cr++) {
   348       if (_committed[cr].contains(new_end - 1)) {
   349         covered = true;
   350         break;
   351       }
   352     }
   353     assert(covered, "Card for end of new region not committed");
   354 #endif
   356     // The default of 0 is not necessarily clean cards.
   357     jbyte* entry;
   358     if (old_region.last() < _whole_heap.start()) {
   359       entry = byte_for(_whole_heap.start());
   360     } else {
   361       entry = byte_after(old_region.last());
   362     }
   363     assert(index_for(new_region.last()) <  _guard_index,
   364       "The guard card will be overwritten");
   365     // This line commented out cleans the newly expanded region and
   366     // not the aligned up expanded region.
   367     // jbyte* const end = byte_after(new_region.last());
   368     jbyte* const end = (jbyte*) new_end_for_commit;
   369     assert((end >= byte_after(new_region.last())) || collided || guarded,
   370       "Expect to be beyond new region unless impacting another region");
   371     // do nothing if we resized downward.
   372 #ifdef ASSERT
   373     for (int ri = 0; ri < _cur_covered_regions; ri++) {
   374       if (ri != ind) {
   375         // The end of the new committed region should not
   376         // be in any existing region unless it matches
   377         // the start of the next region.
   378         assert(!_committed[ri].contains(end) ||
   379                (_committed[ri].start() == (HeapWord*) end),
   380                "Overlapping committed regions");
   381       }
   382     }
   383 #endif
   384     if (entry < end) {
   385       memset(entry, clean_card, pointer_delta(end, entry, sizeof(jbyte)));
   386     }
   387   }
   388   // In any case, the covered size changes.
   389   _covered[ind].set_word_size(new_region.word_size());
   390   if (TraceCardTableModRefBS) {
   391     gclog_or_tty->print_cr("CardTableModRefBS::resize_covered_region: ");
   392     gclog_or_tty->print_cr("  "
   393                   "  _covered[%d].start(): " INTPTR_FORMAT
   394                   "  _covered[%d].last(): " INTPTR_FORMAT,
   395                   ind, p2i(_covered[ind].start()),
   396                   ind, p2i(_covered[ind].last()));
   397     gclog_or_tty->print_cr("  "
   398                   "  _committed[%d].start(): " INTPTR_FORMAT
   399                   "  _committed[%d].last(): " INTPTR_FORMAT,
   400                   ind, p2i(_committed[ind].start()),
   401                   ind, p2i(_committed[ind].last()));
   402     gclog_or_tty->print_cr("  "
   403                   "  byte_for(start): " INTPTR_FORMAT
   404                   "  byte_for(last): " INTPTR_FORMAT,
   405                   p2i(byte_for(_covered[ind].start())),
   406                   p2i(byte_for(_covered[ind].last())));
   407     gclog_or_tty->print_cr("  "
   408                   "  addr_for(start): " INTPTR_FORMAT
   409                   "  addr_for(last): " INTPTR_FORMAT,
   410                   p2i(addr_for((jbyte*) _committed[ind].start())),
   411                   p2i(addr_for((jbyte*) _committed[ind].last())));
   412   }
   413   // Touch the last card of the covered region to show that it
   414   // is committed (or SEGV).
   415   debug_only((void) (*byte_for(_covered[ind].last()));)
   416   debug_only(verify_guard();)
   417 }
   419 // Note that these versions are precise!  The scanning code has to handle the
   420 // fact that the write barrier may be either precise or imprecise.
   422 void CardTableModRefBS::write_ref_field_work(void* field, oop newVal, bool release) {
   423   inline_write_ref_field(field, newVal, release);
   424 }
   427 void CardTableModRefBS::non_clean_card_iterate_possibly_parallel(Space* sp,
   428                                                                  MemRegion mr,
   429                                                                  OopsInGenClosure* cl,
   430                                                                  CardTableRS* ct) {
   431   if (!mr.is_empty()) {
   432     // Caller (process_roots()) claims that all GC threads
   433     // execute this call.  With UseDynamicNumberOfGCThreads now all
   434     // active GC threads execute this call.  The number of active GC
   435     // threads needs to be passed to par_non_clean_card_iterate_work()
   436     // to get proper partitioning and termination.
   437     //
   438     // This is an example of where n_par_threads() is used instead
   439     // of workers()->active_workers().  n_par_threads can be set to 0 to
   440     // turn off parallelism.  For example when this code is called as
   441     // part of verification and SharedHeap::process_roots() is being
   442     // used, then n_par_threads() may have been set to 0.  active_workers
   443     // is not overloaded with the meaning that it is a switch to disable
   444     // parallelism and so keeps the meaning of the number of
   445     // active gc workers.  If parallelism has not been shut off by
   446     // setting n_par_threads to 0, then n_par_threads should be
   447     // equal to active_workers.  When a different mechanism for shutting
   448     // off parallelism is used, then active_workers can be used in
   449     // place of n_par_threads.
   450     //  This is an example of a path where n_par_threads is
   451     // set to 0 to turn off parallism.
   452     //  [7] CardTableModRefBS::non_clean_card_iterate()
   453     //  [8] CardTableRS::younger_refs_in_space_iterate()
   454     //  [9] Generation::younger_refs_in_space_iterate()
   455     //  [10] OneContigSpaceCardGeneration::younger_refs_iterate()
   456     //  [11] CompactingPermGenGen::younger_refs_iterate()
   457     //  [12] CardTableRS::younger_refs_iterate()
   458     //  [13] SharedHeap::process_strong_roots()
   459     //  [14] G1CollectedHeap::verify()
   460     //  [15] Universe::verify()
   461     //  [16] G1CollectedHeap::do_collection_pause_at_safepoint()
   462     //
   463     int n_threads =  SharedHeap::heap()->n_par_threads();
   464     bool is_par = n_threads > 0;
   465     if (is_par) {
   466 #if INCLUDE_ALL_GCS
   467       assert(SharedHeap::heap()->n_par_threads() ==
   468              SharedHeap::heap()->workers()->active_workers(), "Mismatch");
   469       non_clean_card_iterate_parallel_work(sp, mr, cl, ct, n_threads);
   470 #else  // INCLUDE_ALL_GCS
   471       fatal("Parallel gc not supported here.");
   472 #endif // INCLUDE_ALL_GCS
   473     } else {
   474       // We do not call the non_clean_card_iterate_serial() version below because
   475       // we want to clear the cards (which non_clean_card_iterate_serial() does not
   476       // do for us): clear_cl here does the work of finding contiguous dirty ranges
   477       // of cards to process and clear.
   479       DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, precision(),
   480                                                        cl->gen_boundary());
   481       ClearNoncleanCardWrapper clear_cl(dcto_cl, ct);
   483       clear_cl.do_MemRegion(mr);
   484     }
   485   }
   486 }
   488 // The iterator itself is not MT-aware, but
   489 // MT-aware callers and closures can use this to
   490 // accomplish dirty card iteration in parallel. The
   491 // iterator itself does not clear the dirty cards, or
   492 // change their values in any manner.
   493 void CardTableModRefBS::non_clean_card_iterate_serial(MemRegion mr,
   494                                                       MemRegionClosure* cl) {
   495   bool is_par = (SharedHeap::heap()->n_par_threads() > 0);
   496   assert(!is_par ||
   497           (SharedHeap::heap()->n_par_threads() ==
   498           SharedHeap::heap()->workers()->active_workers()), "Mismatch");
   499   for (int i = 0; i < _cur_covered_regions; i++) {
   500     MemRegion mri = mr.intersection(_covered[i]);
   501     if (mri.word_size() > 0) {
   502       jbyte* cur_entry = byte_for(mri.last());
   503       jbyte* limit = byte_for(mri.start());
   504       while (cur_entry >= limit) {
   505         jbyte* next_entry = cur_entry - 1;
   506         if (*cur_entry != clean_card) {
   507           size_t non_clean_cards = 1;
   508           // Should the next card be included in this range of dirty cards.
   509           while (next_entry >= limit && *next_entry != clean_card) {
   510             non_clean_cards++;
   511             cur_entry = next_entry;
   512             next_entry--;
   513           }
   514           // The memory region may not be on a card boundary.  So that
   515           // objects beyond the end of the region are not processed, make
   516           // cur_cards precise with regard to the end of the memory region.
   517           MemRegion cur_cards(addr_for(cur_entry),
   518                               non_clean_cards * card_size_in_words);
   519           MemRegion dirty_region = cur_cards.intersection(mri);
   520           cl->do_MemRegion(dirty_region);
   521         }
   522         cur_entry = next_entry;
   523       }
   524     }
   525   }
   526 }
   528 void CardTableModRefBS::dirty_MemRegion(MemRegion mr) {
   529   assert((HeapWord*)align_size_down((uintptr_t)mr.start(), HeapWordSize) == mr.start(), "Unaligned start");
   530   assert((HeapWord*)align_size_up  ((uintptr_t)mr.end(),   HeapWordSize) == mr.end(),   "Unaligned end"  );
   531   jbyte* cur  = byte_for(mr.start());
   532   jbyte* last = byte_after(mr.last());
   533   while (cur < last) {
   534     *cur = dirty_card;
   535     cur++;
   536   }
   537 }
   539 void CardTableModRefBS::invalidate(MemRegion mr, bool whole_heap) {
   540   assert((HeapWord*)align_size_down((uintptr_t)mr.start(), HeapWordSize) == mr.start(), "Unaligned start");
   541   assert((HeapWord*)align_size_up  ((uintptr_t)mr.end(),   HeapWordSize) == mr.end(),   "Unaligned end"  );
   542   for (int i = 0; i < _cur_covered_regions; i++) {
   543     MemRegion mri = mr.intersection(_covered[i]);
   544     if (!mri.is_empty()) dirty_MemRegion(mri);
   545   }
   546 }
   548 void CardTableModRefBS::clear_MemRegion(MemRegion mr) {
   549   // Be conservative: only clean cards entirely contained within the
   550   // region.
   551   jbyte* cur;
   552   if (mr.start() == _whole_heap.start()) {
   553     cur = byte_for(mr.start());
   554   } else {
   555     assert(mr.start() > _whole_heap.start(), "mr is not covered.");
   556     cur = byte_after(mr.start() - 1);
   557   }
   558   jbyte* last = byte_after(mr.last());
   559   memset(cur, clean_card, pointer_delta(last, cur, sizeof(jbyte)));
   560 }
   562 void CardTableModRefBS::clear(MemRegion mr) {
   563   for (int i = 0; i < _cur_covered_regions; i++) {
   564     MemRegion mri = mr.intersection(_covered[i]);
   565     if (!mri.is_empty()) clear_MemRegion(mri);
   566   }
   567 }
   569 void CardTableModRefBS::dirty(MemRegion mr) {
   570   jbyte* first = byte_for(mr.start());
   571   jbyte* last  = byte_after(mr.last());
   572   memset(first, dirty_card, last-first);
   573 }
   575 // Unlike several other card table methods, dirty_card_iterate()
   576 // iterates over dirty cards ranges in increasing address order.
   577 void CardTableModRefBS::dirty_card_iterate(MemRegion mr,
   578                                            MemRegionClosure* cl) {
   579   for (int i = 0; i < _cur_covered_regions; i++) {
   580     MemRegion mri = mr.intersection(_covered[i]);
   581     if (!mri.is_empty()) {
   582       jbyte *cur_entry, *next_entry, *limit;
   583       for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last());
   584            cur_entry <= limit;
   585            cur_entry  = next_entry) {
   586         next_entry = cur_entry + 1;
   587         if (*cur_entry == dirty_card) {
   588           size_t dirty_cards;
   589           // Accumulate maximal dirty card range, starting at cur_entry
   590           for (dirty_cards = 1;
   591                next_entry <= limit && *next_entry == dirty_card;
   592                dirty_cards++, next_entry++);
   593           MemRegion cur_cards(addr_for(cur_entry),
   594                               dirty_cards*card_size_in_words);
   595           cl->do_MemRegion(cur_cards);
   596         }
   597       }
   598     }
   599   }
   600 }
   602 MemRegion CardTableModRefBS::dirty_card_range_after_reset(MemRegion mr,
   603                                                           bool reset,
   604                                                           int reset_val) {
   605   for (int i = 0; i < _cur_covered_regions; i++) {
   606     MemRegion mri = mr.intersection(_covered[i]);
   607     if (!mri.is_empty()) {
   608       jbyte* cur_entry, *next_entry, *limit;
   609       for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last());
   610            cur_entry <= limit;
   611            cur_entry  = next_entry) {
   612         next_entry = cur_entry + 1;
   613         if (*cur_entry == dirty_card) {
   614           size_t dirty_cards;
   615           // Accumulate maximal dirty card range, starting at cur_entry
   616           for (dirty_cards = 1;
   617                next_entry <= limit && *next_entry == dirty_card;
   618                dirty_cards++, next_entry++);
   619           MemRegion cur_cards(addr_for(cur_entry),
   620                               dirty_cards*card_size_in_words);
   621           if (reset) {
   622             for (size_t i = 0; i < dirty_cards; i++) {
   623               cur_entry[i] = reset_val;
   624             }
   625           }
   626           return cur_cards;
   627         }
   628       }
   629     }
   630   }
   631   return MemRegion(mr.end(), mr.end());
   632 }
   634 uintx CardTableModRefBS::ct_max_alignment_constraint() {
   635   return card_size * os::vm_page_size();
   636 }
   638 void CardTableModRefBS::verify_guard() {
   639   // For product build verification
   640   guarantee(_byte_map[_guard_index] == last_card,
   641             "card table guard has been modified");
   642 }
   644 void CardTableModRefBS::verify() {
   645   verify_guard();
   646 }
   648 #ifndef PRODUCT
   649 void CardTableModRefBS::verify_region(MemRegion mr,
   650                                       jbyte val, bool val_equals) {
   651   jbyte* start    = byte_for(mr.start());
   652   jbyte* end      = byte_for(mr.last());
   653   bool   failures = false;
   654   for (jbyte* curr = start; curr <= end; ++curr) {
   655     jbyte curr_val = *curr;
   656     bool failed = (val_equals) ? (curr_val != val) : (curr_val == val);
   657     if (failed) {
   658       if (!failures) {
   659         tty->cr();
   660         tty->print_cr("== CT verification failed: [" INTPTR_FORMAT "," INTPTR_FORMAT "]", p2i(start), p2i(end));
   661         tty->print_cr("==   %sexpecting value: %d",
   662                       (val_equals) ? "" : "not ", val);
   663         failures = true;
   664       }
   665       tty->print_cr("==   card "PTR_FORMAT" ["PTR_FORMAT","PTR_FORMAT"], "
   666                     "val: %d", p2i(curr), p2i(addr_for(curr)),
   667                     p2i((HeapWord*) (((size_t) addr_for(curr)) + card_size)),
   668                     (int) curr_val);
   669     }
   670   }
   671   guarantee(!failures, "there should not have been any failures");
   672 }
   674 void CardTableModRefBS::verify_not_dirty_region(MemRegion mr) {
   675   verify_region(mr, dirty_card, false /* val_equals */);
   676 }
   678 void CardTableModRefBS::verify_dirty_region(MemRegion mr) {
   679   verify_region(mr, dirty_card, true /* val_equals */);
   680 }
   681 #endif
   683 void CardTableModRefBS::print_on(outputStream* st) const {
   684   st->print_cr("Card table byte_map: [" INTPTR_FORMAT "," INTPTR_FORMAT "] byte_map_base: " INTPTR_FORMAT,
   685                p2i(_byte_map), p2i(_byte_map + _byte_map_size), p2i(byte_map_base));
   686 }
   688 bool CardTableModRefBSForCTRS::card_will_be_scanned(jbyte cv) {
   689   return
   690     CardTableModRefBS::card_will_be_scanned(cv) ||
   691     _rs->is_prev_nonclean_card_val(cv);
   692 };
   694 bool CardTableModRefBSForCTRS::card_may_have_been_dirty(jbyte cv) {
   695   return
   696     cv != clean_card &&
   697     (CardTableModRefBS::card_may_have_been_dirty(cv) ||
   698      CardTableRS::youngergen_may_have_been_dirty(cv));
   699 };

mercurial