src/share/vm/memory/blockOffsetTable.cpp

Wed, 13 Jan 2010 15:26:39 -0800

author
ysr
date
Wed, 13 Jan 2010 15:26:39 -0800
changeset 1601
7b0e9cba0307
parent 777
37f87013dfd8
child 1844
cff162798819
permissions
-rw-r--r--

6896647: card marks can be deferred too long
Summary: Deferred card marks are now flushed during the gc prologue. Parallel[Scavege,OldGC] and SerialGC no longer defer card marks generated by COMPILER2 as a result of ReduceInitialCardMarks. For these cases, introduced a diagnostic option to defer the card marks, only for the purposes of testing and diagnostics. CMS and G1 continue to defer card marks. Potential performance concern related to single-threaded flushing of deferred card marks in the gc prologue will be addressed in the future.
Reviewed-by: never, johnc

     1 /*
     2  * Copyright 2000-2006 Sun Microsystems, Inc.  All Rights Reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
    20  * CA 95054 USA or visit www.sun.com if you need additional information or
    21  * have any questions.
    22  *
    23  */
    25 # include "incls/_precompiled.incl"
    26 # include "incls/_blockOffsetTable.cpp.incl"
    28 //////////////////////////////////////////////////////////////////////
    29 // BlockOffsetSharedArray
    30 //////////////////////////////////////////////////////////////////////
    32 BlockOffsetSharedArray::BlockOffsetSharedArray(MemRegion reserved,
    33                                                size_t init_word_size):
    34   _reserved(reserved), _end(NULL)
    35 {
    36   size_t size = compute_size(reserved.word_size());
    37   ReservedSpace rs(size);
    38   if (!rs.is_reserved()) {
    39     vm_exit_during_initialization("Could not reserve enough space for heap offset array");
    40   }
    41   if (!_vs.initialize(rs, 0)) {
    42     vm_exit_during_initialization("Could not reserve enough space for heap offset array");
    43   }
    44   _offset_array = (u_char*)_vs.low_boundary();
    45   resize(init_word_size);
    46   if (TraceBlockOffsetTable) {
    47     gclog_or_tty->print_cr("BlockOffsetSharedArray::BlockOffsetSharedArray: ");
    48     gclog_or_tty->print_cr("  "
    49                   "  rs.base(): " INTPTR_FORMAT
    50                   "  rs.size(): " INTPTR_FORMAT
    51                   "  rs end(): " INTPTR_FORMAT,
    52                   rs.base(), rs.size(), rs.base() + rs.size());
    53     gclog_or_tty->print_cr("  "
    54                   "  _vs.low_boundary(): " INTPTR_FORMAT
    55                   "  _vs.high_boundary(): " INTPTR_FORMAT,
    56                   _vs.low_boundary(),
    57                   _vs.high_boundary());
    58   }
    59 }
    61 void BlockOffsetSharedArray::resize(size_t new_word_size) {
    62   assert(new_word_size <= _reserved.word_size(), "Resize larger than reserved");
    63   size_t new_size = compute_size(new_word_size);
    64   size_t old_size = _vs.committed_size();
    65   size_t delta;
    66   char* high = _vs.high();
    67   _end = _reserved.start() + new_word_size;
    68   if (new_size > old_size) {
    69     delta = ReservedSpace::page_align_size_up(new_size - old_size);
    70     assert(delta > 0, "just checking");
    71     if (!_vs.expand_by(delta)) {
    72       // Do better than this for Merlin
    73       vm_exit_out_of_memory(delta, "offset table expansion");
    74     }
    75     assert(_vs.high() == high + delta, "invalid expansion");
    76   } else {
    77     delta = ReservedSpace::page_align_size_down(old_size - new_size);
    78     if (delta == 0) return;
    79     _vs.shrink_by(delta);
    80     assert(_vs.high() == high - delta, "invalid expansion");
    81   }
    82 }
    84 bool BlockOffsetSharedArray::is_card_boundary(HeapWord* p) const {
    85   assert(p >= _reserved.start(), "just checking");
    86   size_t delta = pointer_delta(p, _reserved.start());
    87   return (delta & right_n_bits(LogN_words)) == (size_t)NoBits;
    88 }
    91 void BlockOffsetSharedArray::serialize(SerializeOopClosure* soc,
    92                                        HeapWord* start, HeapWord* end) {
    93   assert(_offset_array[0] == 0, "objects can't cross covered areas");
    94   assert(start <= end, "bad address range");
    95   size_t start_index = index_for(start);
    96   size_t end_index = index_for(end-1)+1;
    97   soc->do_region(&_offset_array[start_index],
    98                  (end_index - start_index) * sizeof(_offset_array[0]));
    99 }
   101 //////////////////////////////////////////////////////////////////////
   102 // BlockOffsetArray
   103 //////////////////////////////////////////////////////////////////////
   105 BlockOffsetArray::BlockOffsetArray(BlockOffsetSharedArray* array,
   106                                    MemRegion mr, bool init_to_zero) :
   107   BlockOffsetTable(mr.start(), mr.end()),
   108   _array(array),
   109   _init_to_zero(init_to_zero)
   110 {
   111   assert(_bottom <= _end, "arguments out of order");
   112   if (!_init_to_zero) {
   113     // initialize cards to point back to mr.start()
   114     set_remainder_to_point_to_start(mr.start() + N_words, mr.end());
   115     _array->set_offset_array(0, 0);  // set first card to 0
   116   }
   117 }
   120 // The arguments follow the normal convention of denoting
   121 // a right-open interval: [start, end)
   122 void
   123 BlockOffsetArray::
   124 set_remainder_to_point_to_start(HeapWord* start, HeapWord* end) {
   126   if (start >= end) {
   127     // The start address is equal to the end address (or to
   128     // the right of the end address) so there are not cards
   129     // that need to be updated..
   130     return;
   131   }
   133   // Write the backskip value for each region.
   134   //
   135   //    offset
   136   //    card             2nd                       3rd
   137   //     | +- 1st        |                         |
   138   //     v v             v                         v
   139   //    +-+-+-+-+-+-+-+-+-+-+-+-+-+-+     +-+-+-+-+-+-+-+-+-+-+-
   140   //    |x|0|0|0|0|0|0|0|1|1|1|1|1|1| ... |1|1|1|1|2|2|2|2|2|2| ...
   141   //    +-+-+-+-+-+-+-+-+-+-+-+-+-+-+     +-+-+-+-+-+-+-+-+-+-+-
   142   //    11              19                        75
   143   //      12
   144   //
   145   //    offset card is the card that points to the start of an object
   146   //      x - offset value of offset card
   147   //    1st - start of first logarithmic region
   148   //      0 corresponds to logarithmic value N_words + 0 and 2**(3 * 0) = 1
   149   //    2nd - start of second logarithmic region
   150   //      1 corresponds to logarithmic value N_words + 1 and 2**(3 * 1) = 8
   151   //    3rd - start of third logarithmic region
   152   //      2 corresponds to logarithmic value N_words + 2 and 2**(3 * 2) = 64
   153   //
   154   //    integer below the block offset entry is an example of
   155   //    the index of the entry
   156   //
   157   //    Given an address,
   158   //      Find the index for the address
   159   //      Find the block offset table entry
   160   //      Convert the entry to a back slide
   161   //        (e.g., with today's, offset = 0x81 =>
   162   //          back slip = 2**(3*(0x81 - N_words)) = 2**3) = 8
   163   //      Move back N (e.g., 8) entries and repeat with the
   164   //        value of the new entry
   165   //
   166   size_t start_card = _array->index_for(start);
   167   size_t end_card = _array->index_for(end-1);
   168   assert(start ==_array->address_for_index(start_card), "Precondition");
   169   assert(end ==_array->address_for_index(end_card)+N_words, "Precondition");
   170   set_remainder_to_point_to_start_incl(start_card, end_card); // closed interval
   171 }
   174 // Unlike the normal convention in this code, the argument here denotes
   175 // a closed, inclusive interval: [start_card, end_card], cf set_remainder_to_point_to_start()
   176 // above.
   177 void
   178 BlockOffsetArray::set_remainder_to_point_to_start_incl(size_t start_card, size_t end_card) {
   179   if (start_card > end_card) {
   180     return;
   181   }
   182   assert(start_card > _array->index_for(_bottom), "Cannot be first card");
   183   assert(_array->offset_array(start_card-1) <= N_words,
   184     "Offset card has an unexpected value");
   185   size_t start_card_for_region = start_card;
   186   u_char offset = max_jubyte;
   187   for (int i = 0; i < N_powers; i++) {
   188     // -1 so that the the card with the actual offset is counted.  Another -1
   189     // so that the reach ends in this region and not at the start
   190     // of the next.
   191     size_t reach = start_card - 1 + (power_to_cards_back(i+1) - 1);
   192     offset = N_words + i;
   193     if (reach >= end_card) {
   194       _array->set_offset_array(start_card_for_region, end_card, offset);
   195       start_card_for_region = reach + 1;
   196       break;
   197     }
   198     _array->set_offset_array(start_card_for_region, reach, offset);
   199     start_card_for_region = reach + 1;
   200   }
   201   assert(start_card_for_region > end_card, "Sanity check");
   202   DEBUG_ONLY(check_all_cards(start_card, end_card);)
   203 }
   205 // The card-interval [start_card, end_card] is a closed interval; this
   206 // is an expensive check -- use with care and only under protection of
   207 // suitable flag.
   208 void BlockOffsetArray::check_all_cards(size_t start_card, size_t end_card) const {
   210   if (end_card < start_card) {
   211     return;
   212   }
   213   guarantee(_array->offset_array(start_card) == N_words, "Wrong value in second card");
   214   for (size_t c = start_card + 1; c <= end_card; c++ /* yeah! */) {
   215     u_char entry = _array->offset_array(c);
   216     if (c - start_card > power_to_cards_back(1)) {
   217       guarantee(entry > N_words, "Should be in logarithmic region");
   218     }
   219     size_t backskip = entry_to_cards_back(entry);
   220     size_t landing_card = c - backskip;
   221     guarantee(landing_card >= (start_card - 1), "Inv");
   222     if (landing_card >= start_card) {
   223       guarantee(_array->offset_array(landing_card) <= entry, "monotonicity");
   224     } else {
   225       guarantee(landing_card == start_card - 1, "Tautology");
   226       guarantee(_array->offset_array(landing_card) <= N_words, "Offset value");
   227     }
   228   }
   229 }
   232 void
   233 BlockOffsetArray::alloc_block(HeapWord* blk_start, HeapWord* blk_end) {
   234   assert(blk_start != NULL && blk_end > blk_start,
   235          "phantom block");
   236   single_block(blk_start, blk_end);
   237 }
   239 // Action_mark - update the BOT for the block [blk_start, blk_end).
   240 //               Current typical use is for splitting a block.
   241 // Action_single - udpate the BOT for an allocation.
   242 // Action_verify - BOT verification.
   243 void
   244 BlockOffsetArray::do_block_internal(HeapWord* blk_start,
   245                                     HeapWord* blk_end,
   246                                     Action action) {
   247   assert(Universe::heap()->is_in_reserved(blk_start),
   248          "reference must be into the heap");
   249   assert(Universe::heap()->is_in_reserved(blk_end-1),
   250          "limit must be within the heap");
   251   // This is optimized to make the test fast, assuming we only rarely
   252   // cross boundaries.
   253   uintptr_t end_ui = (uintptr_t)(blk_end - 1);
   254   uintptr_t start_ui = (uintptr_t)blk_start;
   255   // Calculate the last card boundary preceding end of blk
   256   intptr_t boundary_before_end = (intptr_t)end_ui;
   257   clear_bits(boundary_before_end, right_n_bits(LogN));
   258   if (start_ui <= (uintptr_t)boundary_before_end) {
   259     // blk starts at or crosses a boundary
   260     // Calculate index of card on which blk begins
   261     size_t    start_index = _array->index_for(blk_start);
   262     // Index of card on which blk ends
   263     size_t    end_index   = _array->index_for(blk_end - 1);
   264     // Start address of card on which blk begins
   265     HeapWord* boundary    = _array->address_for_index(start_index);
   266     assert(boundary <= blk_start, "blk should start at or after boundary");
   267     if (blk_start != boundary) {
   268       // blk starts strictly after boundary
   269       // adjust card boundary and start_index forward to next card
   270       boundary += N_words;
   271       start_index++;
   272     }
   273     assert(start_index <= end_index, "monotonicity of index_for()");
   274     assert(boundary <= (HeapWord*)boundary_before_end, "tautology");
   275     switch (action) {
   276       case Action_mark: {
   277         if (init_to_zero()) {
   278           _array->set_offset_array(start_index, boundary, blk_start);
   279           break;
   280         } // Else fall through to the next case
   281       }
   282       case Action_single: {
   283         _array->set_offset_array(start_index, boundary, blk_start);
   284         // We have finished marking the "offset card". We need to now
   285         // mark the subsequent cards that this blk spans.
   286         if (start_index < end_index) {
   287           HeapWord* rem_st = _array->address_for_index(start_index) + N_words;
   288           HeapWord* rem_end = _array->address_for_index(end_index) + N_words;
   289           set_remainder_to_point_to_start(rem_st, rem_end);
   290         }
   291         break;
   292       }
   293       case Action_check: {
   294         _array->check_offset_array(start_index, boundary, blk_start);
   295         // We have finished checking the "offset card". We need to now
   296         // check the subsequent cards that this blk spans.
   297         check_all_cards(start_index + 1, end_index);
   298         break;
   299       }
   300       default:
   301         ShouldNotReachHere();
   302     }
   303   }
   304 }
   306 // The range [blk_start, blk_end) represents a single contiguous block
   307 // of storage; modify the block offset table to represent this
   308 // information; Right-open interval: [blk_start, blk_end)
   309 // NOTE: this method does _not_ adjust _unallocated_block.
   310 void
   311 BlockOffsetArray::single_block(HeapWord* blk_start,
   312                                HeapWord* blk_end) {
   313   do_block_internal(blk_start, blk_end, Action_single);
   314 }
   316 void BlockOffsetArray::verify() const {
   317   // For each entry in the block offset table, verify that
   318   // the entry correctly finds the start of an object at the
   319   // first address covered by the block or to the left of that
   320   // first address.
   322   size_t next_index = 1;
   323   size_t last_index = last_active_index();
   325   // Use for debugging.  Initialize to NULL to distinguish the
   326   // first iteration through the while loop.
   327   HeapWord* last_p = NULL;
   328   HeapWord* last_start = NULL;
   329   oop last_o = NULL;
   331   while (next_index <= last_index) {
   332     // Use an address past the start of the address for
   333     // the entry.
   334     HeapWord* p = _array->address_for_index(next_index) + 1;
   335     if (p >= _end) {
   336       // That's all of the allocated block table.
   337       return;
   338     }
   339     // block_start() asserts that start <= p.
   340     HeapWord* start = block_start(p);
   341     // First check if the start is an allocated block and only
   342     // then if it is a valid object.
   343     oop o = oop(start);
   344     assert(!Universe::is_fully_initialized() ||
   345            _sp->is_free_block(start) ||
   346            o->is_oop_or_null(), "Bad object was found");
   347     next_index++;
   348     last_p = p;
   349     last_start = start;
   350     last_o = o;
   351   }
   352 }
   354 //////////////////////////////////////////////////////////////////////
   355 // BlockOffsetArrayNonContigSpace
   356 //////////////////////////////////////////////////////////////////////
   358 // The block [blk_start, blk_end) has been allocated;
   359 // adjust the block offset table to represent this information;
   360 // NOTE: Clients of BlockOffsetArrayNonContigSpace: consider using
   361 // the somewhat more lightweight split_block() or
   362 // (when init_to_zero()) mark_block() wherever possible.
   363 // right-open interval: [blk_start, blk_end)
   364 void
   365 BlockOffsetArrayNonContigSpace::alloc_block(HeapWord* blk_start,
   366                                             HeapWord* blk_end) {
   367   assert(blk_start != NULL && blk_end > blk_start,
   368          "phantom block");
   369   single_block(blk_start, blk_end);
   370   allocated(blk_start, blk_end);
   371 }
   373 // Adjust BOT to show that a previously whole block has been split
   374 // into two.  We verify the BOT for the first part (prefix) and
   375 // update the  BOT for the second part (suffix).
   376 //      blk is the start of the block
   377 //      blk_size is the size of the original block
   378 //      left_blk_size is the size of the first part of the split
   379 void BlockOffsetArrayNonContigSpace::split_block(HeapWord* blk,
   380                                                  size_t blk_size,
   381                                                  size_t left_blk_size) {
   382   // Verify that the BOT shows [blk, blk + blk_size) to be one block.
   383   verify_single_block(blk, blk_size);
   384   // Update the BOT to indicate that [blk + left_blk_size, blk + blk_size)
   385   // is one single block.
   386   assert(blk_size > 0, "Should be positive");
   387   assert(left_blk_size > 0, "Should be positive");
   388   assert(left_blk_size < blk_size, "Not a split");
   390   // Start addresses of prefix block and suffix block.
   391   HeapWord* pref_addr = blk;
   392   HeapWord* suff_addr = blk + left_blk_size;
   393   HeapWord* end_addr  = blk + blk_size;
   395   // Indices for starts of prefix block and suffix block.
   396   size_t pref_index = _array->index_for(pref_addr);
   397   if (_array->address_for_index(pref_index) != pref_addr) {
   398     // pref_addr deos not begin pref_index
   399     pref_index++;
   400   }
   402   size_t suff_index = _array->index_for(suff_addr);
   403   if (_array->address_for_index(suff_index) != suff_addr) {
   404     // suff_addr does not begin suff_index
   405     suff_index++;
   406   }
   408   // Definition: A block B, denoted [B_start, B_end) __starts__
   409   //     a card C, denoted [C_start, C_end), where C_start and C_end
   410   //     are the heap addresses that card C covers, iff
   411   //     B_start <= C_start < B_end.
   412   //
   413   //     We say that a card C "is started by" a block B, iff
   414   //     B "starts" C.
   415   //
   416   //     Note that the cardinality of the set of cards {C}
   417   //     started by a block B can be 0, 1, or more.
   418   //
   419   // Below, pref_index and suff_index are, respectively, the
   420   // first (least) card indices that the prefix and suffix of
   421   // the split start; end_index is one more than the index of
   422   // the last (greatest) card that blk starts.
   423   size_t end_index  = _array->index_for(end_addr - 1) + 1;
   425   // Calculate the # cards that the prefix and suffix affect.
   426   size_t num_pref_cards = suff_index - pref_index;
   428   size_t num_suff_cards = end_index  - suff_index;
   429   // Change the cards that need changing
   430   if (num_suff_cards > 0) {
   431     HeapWord* boundary = _array->address_for_index(suff_index);
   432     // Set the offset card for suffix block
   433     _array->set_offset_array(suff_index, boundary, suff_addr);
   434     // Change any further cards that need changing in the suffix
   435     if (num_pref_cards > 0) {
   436       if (num_pref_cards >= num_suff_cards) {
   437         // Unilaterally fix all of the suffix cards: closed card
   438         // index interval in args below.
   439         set_remainder_to_point_to_start_incl(suff_index + 1, end_index - 1);
   440       } else {
   441         // Unilaterally fix the first (num_pref_cards - 1) following
   442         // the "offset card" in the suffix block.
   443         set_remainder_to_point_to_start_incl(suff_index + 1,
   444           suff_index + num_pref_cards - 1);
   445         // Fix the appropriate cards in the remainder of the
   446         // suffix block -- these are the last num_pref_cards
   447         // cards in each power block of the "new" range plumbed
   448         // from suff_addr.
   449         bool more = true;
   450         uint i = 1;
   451         while (more && (i < N_powers)) {
   452           size_t back_by = power_to_cards_back(i);
   453           size_t right_index = suff_index + back_by - 1;
   454           size_t left_index  = right_index - num_pref_cards + 1;
   455           if (right_index >= end_index - 1) { // last iteration
   456             right_index = end_index - 1;
   457             more = false;
   458           }
   459           if (back_by > num_pref_cards) {
   460             // Fill in the remainder of this "power block", if it
   461             // is non-null.
   462             if (left_index <= right_index) {
   463               _array->set_offset_array(left_index, right_index,
   464                                      N_words + i - 1);
   465             } else {
   466               more = false; // we are done
   467             }
   468             i++;
   469             break;
   470           }
   471           i++;
   472         }
   473         while (more && (i < N_powers)) {
   474           size_t back_by = power_to_cards_back(i);
   475           size_t right_index = suff_index + back_by - 1;
   476           size_t left_index  = right_index - num_pref_cards + 1;
   477           if (right_index >= end_index - 1) { // last iteration
   478             right_index = end_index - 1;
   479             if (left_index > right_index) {
   480               break;
   481             }
   482             more  = false;
   483           }
   484           assert(left_index <= right_index, "Error");
   485           _array->set_offset_array(left_index, right_index, N_words + i - 1);
   486           i++;
   487         }
   488       }
   489     } // else no more cards to fix in suffix
   490   } // else nothing needs to be done
   491   // Verify that we did the right thing
   492   verify_single_block(pref_addr, left_blk_size);
   493   verify_single_block(suff_addr, blk_size - left_blk_size);
   494 }
   497 // Mark the BOT such that if [blk_start, blk_end) straddles a card
   498 // boundary, the card following the first such boundary is marked
   499 // with the appropriate offset.
   500 // NOTE: this method does _not_ adjust _unallocated_block or
   501 // any cards subsequent to the first one.
   502 void
   503 BlockOffsetArrayNonContigSpace::mark_block(HeapWord* blk_start,
   504                                            HeapWord* blk_end) {
   505   do_block_internal(blk_start, blk_end, Action_mark);
   506 }
   508 HeapWord* BlockOffsetArrayNonContigSpace::block_start_unsafe(
   509   const void* addr) const {
   510   assert(_array->offset_array(0) == 0, "objects can't cross covered areas");
   512   assert(_bottom <= addr && addr < _end,
   513          "addr must be covered by this Array");
   514   // Must read this exactly once because it can be modified by parallel
   515   // allocation.
   516   HeapWord* ub = _unallocated_block;
   517   if (BlockOffsetArrayUseUnallocatedBlock && addr >= ub) {
   518     assert(ub < _end, "tautology (see above)");
   519     return ub;
   520   }
   522   // Otherwise, find the block start using the table.
   523   size_t index = _array->index_for(addr);
   524   HeapWord* q = _array->address_for_index(index);
   526   uint offset = _array->offset_array(index);    // Extend u_char to uint.
   527   while (offset >= N_words) {
   528     // The excess of the offset from N_words indicates a power of Base
   529     // to go back by.
   530     size_t n_cards_back = entry_to_cards_back(offset);
   531     q -= (N_words * n_cards_back);
   532     assert(q >= _sp->bottom(), "Went below bottom!");
   533     index -= n_cards_back;
   534     offset = _array->offset_array(index);
   535   }
   536   assert(offset < N_words, "offset too large");
   537   index--;
   538   q -= offset;
   539   HeapWord* n = q;
   541   while (n <= addr) {
   542     debug_only(HeapWord* last = q);   // for debugging
   543     q = n;
   544     n += _sp->block_size(n);
   545   }
   546   assert(q <= addr, "wrong order for current and arg");
   547   assert(addr <= n, "wrong order for arg and next");
   548   return q;
   549 }
   551 HeapWord* BlockOffsetArrayNonContigSpace::block_start_careful(
   552   const void* addr) const {
   553   assert(_array->offset_array(0) == 0, "objects can't cross covered areas");
   555   assert(_bottom <= addr && addr < _end,
   556          "addr must be covered by this Array");
   557   // Must read this exactly once because it can be modified by parallel
   558   // allocation.
   559   HeapWord* ub = _unallocated_block;
   560   if (BlockOffsetArrayUseUnallocatedBlock && addr >= ub) {
   561     assert(ub < _end, "tautology (see above)");
   562     return ub;
   563   }
   565   // Otherwise, find the block start using the table, but taking
   566   // care (cf block_start_unsafe() above) not to parse any objects/blocks
   567   // on the cards themsleves.
   568   size_t index = _array->index_for(addr);
   569   assert(_array->address_for_index(index) == addr,
   570          "arg should be start of card");
   572   HeapWord* q = (HeapWord*)addr;
   573   uint offset;
   574   do {
   575     offset = _array->offset_array(index);
   576     if (offset < N_words) {
   577       q -= offset;
   578     } else {
   579       size_t n_cards_back = entry_to_cards_back(offset);
   580       q -= (n_cards_back * N_words);
   581       index -= n_cards_back;
   582     }
   583   } while (offset >= N_words);
   584   assert(q <= addr, "block start should be to left of arg");
   585   return q;
   586 }
   588 #ifndef PRODUCT
   589 // Verification & debugging - ensure that the offset table reflects the fact
   590 // that the block [blk_start, blk_end) or [blk, blk + size) is a
   591 // single block of storage. NOTE: can't const this because of
   592 // call to non-const do_block_internal() below.
   593 void BlockOffsetArrayNonContigSpace::verify_single_block(
   594   HeapWord* blk_start, HeapWord* blk_end) {
   595   if (VerifyBlockOffsetArray) {
   596     do_block_internal(blk_start, blk_end, Action_check);
   597   }
   598 }
   600 void BlockOffsetArrayNonContigSpace::verify_single_block(
   601   HeapWord* blk, size_t size) {
   602   verify_single_block(blk, blk + size);
   603 }
   605 // Verify that the given block is before _unallocated_block
   606 void BlockOffsetArrayNonContigSpace::verify_not_unallocated(
   607   HeapWord* blk_start, HeapWord* blk_end) const {
   608   if (BlockOffsetArrayUseUnallocatedBlock) {
   609     assert(blk_start < blk_end, "Block inconsistency?");
   610     assert(blk_end <= _unallocated_block, "_unallocated_block problem");
   611   }
   612 }
   614 void BlockOffsetArrayNonContigSpace::verify_not_unallocated(
   615   HeapWord* blk, size_t size) const {
   616   verify_not_unallocated(blk, blk + size);
   617 }
   618 #endif // PRODUCT
   620 size_t BlockOffsetArrayNonContigSpace::last_active_index() const {
   621   if (_unallocated_block == _bottom) {
   622     return 0;
   623   } else {
   624     return _array->index_for(_unallocated_block - 1);
   625   }
   626 }
   628 //////////////////////////////////////////////////////////////////////
   629 // BlockOffsetArrayContigSpace
   630 //////////////////////////////////////////////////////////////////////
   632 HeapWord* BlockOffsetArrayContigSpace::block_start_unsafe(const void* addr) const {
   633   assert(_array->offset_array(0) == 0, "objects can't cross covered areas");
   635   // Otherwise, find the block start using the table.
   636   assert(_bottom <= addr && addr < _end,
   637          "addr must be covered by this Array");
   638   size_t index = _array->index_for(addr);
   639   // We must make sure that the offset table entry we use is valid.  If
   640   // "addr" is past the end, start at the last known one and go forward.
   641   index = MIN2(index, _next_offset_index-1);
   642   HeapWord* q = _array->address_for_index(index);
   644   uint offset = _array->offset_array(index);    // Extend u_char to uint.
   645   while (offset > N_words) {
   646     // The excess of the offset from N_words indicates a power of Base
   647     // to go back by.
   648     size_t n_cards_back = entry_to_cards_back(offset);
   649     q -= (N_words * n_cards_back);
   650     assert(q >= _sp->bottom(), "Went below bottom!");
   651     index -= n_cards_back;
   652     offset = _array->offset_array(index);
   653   }
   654   while (offset == N_words) {
   655     assert(q >= _sp->bottom(), "Went below bottom!");
   656     q -= N_words;
   657     index--;
   658     offset = _array->offset_array(index);
   659   }
   660   assert(offset < N_words, "offset too large");
   661   q -= offset;
   662   HeapWord* n = q;
   664   while (n <= addr) {
   665     debug_only(HeapWord* last = q);   // for debugging
   666     q = n;
   667     n += _sp->block_size(n);
   668   }
   669   assert(q <= addr, "wrong order for current and arg");
   670   assert(addr <= n, "wrong order for arg and next");
   671   return q;
   672 }
   674 //
   675 //              _next_offset_threshold
   676 //              |   _next_offset_index
   677 //              v   v
   678 //      +-------+-------+-------+-------+-------+
   679 //      | i-1   |   i   | i+1   | i+2   | i+3   |
   680 //      +-------+-------+-------+-------+-------+
   681 //       ( ^    ]
   682 //         block-start
   683 //
   685 void BlockOffsetArrayContigSpace::alloc_block_work(HeapWord* blk_start,
   686                                         HeapWord* blk_end) {
   687   assert(blk_start != NULL && blk_end > blk_start,
   688          "phantom block");
   689   assert(blk_end > _next_offset_threshold,
   690          "should be past threshold");
   691   assert(blk_start <= _next_offset_threshold,
   692          "blk_start should be at or before threshold")
   693   assert(pointer_delta(_next_offset_threshold, blk_start) <= N_words,
   694          "offset should be <= BlockOffsetSharedArray::N");
   695   assert(Universe::heap()->is_in_reserved(blk_start),
   696          "reference must be into the heap");
   697   assert(Universe::heap()->is_in_reserved(blk_end-1),
   698          "limit must be within the heap");
   699   assert(_next_offset_threshold ==
   700          _array->_reserved.start() + _next_offset_index*N_words,
   701          "index must agree with threshold");
   703   debug_only(size_t orig_next_offset_index = _next_offset_index;)
   705   // Mark the card that holds the offset into the block.  Note
   706   // that _next_offset_index and _next_offset_threshold are not
   707   // updated until the end of this method.
   708   _array->set_offset_array(_next_offset_index,
   709                            _next_offset_threshold,
   710                            blk_start);
   712   // We need to now mark the subsequent cards that this blk spans.
   714   // Index of card on which blk ends.
   715   size_t end_index   = _array->index_for(blk_end - 1);
   717   // Are there more cards left to be updated?
   718   if (_next_offset_index + 1 <= end_index) {
   719     HeapWord* rem_st  = _array->address_for_index(_next_offset_index + 1);
   720     // Calculate rem_end this way because end_index
   721     // may be the last valid index in the covered region.
   722     HeapWord* rem_end = _array->address_for_index(end_index) +  N_words;
   723     set_remainder_to_point_to_start(rem_st, rem_end);
   724   }
   726   // _next_offset_index and _next_offset_threshold updated here.
   727   _next_offset_index = end_index + 1;
   728   // Calculate _next_offset_threshold this way because end_index
   729   // may be the last valid index in the covered region.
   730   _next_offset_threshold = _array->address_for_index(end_index) +
   731     N_words;
   732   assert(_next_offset_threshold >= blk_end, "Incorrent offset threshold");
   734 #ifdef ASSERT
   735   // The offset can be 0 if the block starts on a boundary.  That
   736   // is checked by an assertion above.
   737   size_t start_index = _array->index_for(blk_start);
   738   HeapWord* boundary    = _array->address_for_index(start_index);
   739   assert((_array->offset_array(orig_next_offset_index) == 0 &&
   740           blk_start == boundary) ||
   741           (_array->offset_array(orig_next_offset_index) > 0 &&
   742          _array->offset_array(orig_next_offset_index) <= N_words),
   743          "offset array should have been set");
   744   for (size_t j = orig_next_offset_index + 1; j <= end_index; j++) {
   745     assert(_array->offset_array(j) > 0 &&
   746            _array->offset_array(j) <= (u_char) (N_words+N_powers-1),
   747            "offset array should have been set");
   748   }
   749 #endif
   750 }
   752 HeapWord* BlockOffsetArrayContigSpace::initialize_threshold() {
   753   assert(!Universe::heap()->is_in_reserved(_array->_offset_array),
   754          "just checking");
   755   _next_offset_index = _array->index_for(_bottom);
   756   _next_offset_index++;
   757   _next_offset_threshold =
   758     _array->address_for_index(_next_offset_index);
   759   return _next_offset_threshold;
   760 }
   762 void BlockOffsetArrayContigSpace::zero_bottom_entry() {
   763   assert(!Universe::heap()->is_in_reserved(_array->_offset_array),
   764          "just checking");
   765   size_t bottom_index = _array->index_for(_bottom);
   766   _array->set_offset_array(bottom_index, 0);
   767 }
   770 void BlockOffsetArrayContigSpace::serialize(SerializeOopClosure* soc) {
   771   if (soc->reading()) {
   772     // Null these values so that the serializer won't object to updating them.
   773     _next_offset_threshold = NULL;
   774     _next_offset_index = 0;
   775   }
   776   soc->do_ptr(&_next_offset_threshold);
   777   soc->do_size_t(&_next_offset_index);
   778 }
   780 size_t BlockOffsetArrayContigSpace::last_active_index() const {
   781   size_t result = _next_offset_index - 1;
   782   return result >= 0 ? result : 0;
   783 }

mercurial