src/share/vm/memory/blockOffsetTable.cpp

Mon, 16 Aug 2010 15:58:42 -0700

author
ysr
date
Mon, 16 Aug 2010 15:58:42 -0700
changeset 2071
be3f9c242c9d
parent 1907
c18cbe5936b8
child 2314
f95d63e2154a
permissions
-rw-r--r--

6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
Summary: GC workers now recognize an intermediate transient state of blocks which are allocated but have not yet completed initialization. blk_start() calls do not attempt to determine the size of a block in the transient state, rather waiting for the block to become initialized so that it is safe to query its size. Audited and ensured the order of initialization of object fields (klass, free bit and size) to respect block state transition protocol. Also included some new assertion checking code enabled in debug mode.
Reviewed-by: chrisphi, johnc, poonam

     1 /*
     2  * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 # include "incls/_precompiled.incl"
    26 # include "incls/_blockOffsetTable.cpp.incl"
    28 //////////////////////////////////////////////////////////////////////
    29 // BlockOffsetSharedArray
    30 //////////////////////////////////////////////////////////////////////
    32 BlockOffsetSharedArray::BlockOffsetSharedArray(MemRegion reserved,
    33                                                size_t init_word_size):
    34   _reserved(reserved), _end(NULL)
    35 {
    36   size_t size = compute_size(reserved.word_size());
    37   ReservedSpace rs(size);
    38   if (!rs.is_reserved()) {
    39     vm_exit_during_initialization("Could not reserve enough space for heap offset array");
    40   }
    41   if (!_vs.initialize(rs, 0)) {
    42     vm_exit_during_initialization("Could not reserve enough space for heap offset array");
    43   }
    44   _offset_array = (u_char*)_vs.low_boundary();
    45   resize(init_word_size);
    46   if (TraceBlockOffsetTable) {
    47     gclog_or_tty->print_cr("BlockOffsetSharedArray::BlockOffsetSharedArray: ");
    48     gclog_or_tty->print_cr("  "
    49                   "  rs.base(): " INTPTR_FORMAT
    50                   "  rs.size(): " INTPTR_FORMAT
    51                   "  rs end(): " INTPTR_FORMAT,
    52                   rs.base(), rs.size(), rs.base() + rs.size());
    53     gclog_or_tty->print_cr("  "
    54                   "  _vs.low_boundary(): " INTPTR_FORMAT
    55                   "  _vs.high_boundary(): " INTPTR_FORMAT,
    56                   _vs.low_boundary(),
    57                   _vs.high_boundary());
    58   }
    59 }
    61 void BlockOffsetSharedArray::resize(size_t new_word_size) {
    62   assert(new_word_size <= _reserved.word_size(), "Resize larger than reserved");
    63   size_t new_size = compute_size(new_word_size);
    64   size_t old_size = _vs.committed_size();
    65   size_t delta;
    66   char* high = _vs.high();
    67   _end = _reserved.start() + new_word_size;
    68   if (new_size > old_size) {
    69     delta = ReservedSpace::page_align_size_up(new_size - old_size);
    70     assert(delta > 0, "just checking");
    71     if (!_vs.expand_by(delta)) {
    72       // Do better than this for Merlin
    73       vm_exit_out_of_memory(delta, "offset table expansion");
    74     }
    75     assert(_vs.high() == high + delta, "invalid expansion");
    76   } else {
    77     delta = ReservedSpace::page_align_size_down(old_size - new_size);
    78     if (delta == 0) return;
    79     _vs.shrink_by(delta);
    80     assert(_vs.high() == high - delta, "invalid expansion");
    81   }
    82 }
    84 bool BlockOffsetSharedArray::is_card_boundary(HeapWord* p) const {
    85   assert(p >= _reserved.start(), "just checking");
    86   size_t delta = pointer_delta(p, _reserved.start());
    87   return (delta & right_n_bits(LogN_words)) == (size_t)NoBits;
    88 }
    91 void BlockOffsetSharedArray::serialize(SerializeOopClosure* soc,
    92                                        HeapWord* start, HeapWord* end) {
    93   assert(_offset_array[0] == 0, "objects can't cross covered areas");
    94   assert(start <= end, "bad address range");
    95   size_t start_index = index_for(start);
    96   size_t end_index = index_for(end-1)+1;
    97   soc->do_region(&_offset_array[start_index],
    98                  (end_index - start_index) * sizeof(_offset_array[0]));
    99 }
   101 //////////////////////////////////////////////////////////////////////
   102 // BlockOffsetArray
   103 //////////////////////////////////////////////////////////////////////
   105 BlockOffsetArray::BlockOffsetArray(BlockOffsetSharedArray* array,
   106                                    MemRegion mr, bool init_to_zero_) :
   107   BlockOffsetTable(mr.start(), mr.end()),
   108   _array(array)
   109 {
   110   assert(_bottom <= _end, "arguments out of order");
   111   set_init_to_zero(init_to_zero_);
   112   if (!init_to_zero_) {
   113     // initialize cards to point back to mr.start()
   114     set_remainder_to_point_to_start(mr.start() + N_words, mr.end());
   115     _array->set_offset_array(0, 0);  // set first card to 0
   116   }
   117 }
   120 // The arguments follow the normal convention of denoting
   121 // a right-open interval: [start, end)
   122 void
   123 BlockOffsetArray::
   124 set_remainder_to_point_to_start(HeapWord* start, HeapWord* end, bool reducing) {
   126   check_reducing_assertion(reducing);
   127   if (start >= end) {
   128     // The start address is equal to the end address (or to
   129     // the right of the end address) so there are not cards
   130     // that need to be updated..
   131     return;
   132   }
   134   // Write the backskip value for each region.
   135   //
   136   //    offset
   137   //    card             2nd                       3rd
   138   //     | +- 1st        |                         |
   139   //     v v             v                         v
   140   //    +-+-+-+-+-+-+-+-+-+-+-+-+-+-+     +-+-+-+-+-+-+-+-+-+-+-
   141   //    |x|0|0|0|0|0|0|0|1|1|1|1|1|1| ... |1|1|1|1|2|2|2|2|2|2| ...
   142   //    +-+-+-+-+-+-+-+-+-+-+-+-+-+-+     +-+-+-+-+-+-+-+-+-+-+-
   143   //    11              19                        75
   144   //      12
   145   //
   146   //    offset card is the card that points to the start of an object
   147   //      x - offset value of offset card
   148   //    1st - start of first logarithmic region
   149   //      0 corresponds to logarithmic value N_words + 0 and 2**(3 * 0) = 1
   150   //    2nd - start of second logarithmic region
   151   //      1 corresponds to logarithmic value N_words + 1 and 2**(3 * 1) = 8
   152   //    3rd - start of third logarithmic region
   153   //      2 corresponds to logarithmic value N_words + 2 and 2**(3 * 2) = 64
   154   //
   155   //    integer below the block offset entry is an example of
   156   //    the index of the entry
   157   //
   158   //    Given an address,
   159   //      Find the index for the address
   160   //      Find the block offset table entry
   161   //      Convert the entry to a back slide
   162   //        (e.g., with today's, offset = 0x81 =>
   163   //          back slip = 2**(3*(0x81 - N_words)) = 2**3) = 8
   164   //      Move back N (e.g., 8) entries and repeat with the
   165   //        value of the new entry
   166   //
   167   size_t start_card = _array->index_for(start);
   168   size_t end_card = _array->index_for(end-1);
   169   assert(start ==_array->address_for_index(start_card), "Precondition");
   170   assert(end ==_array->address_for_index(end_card)+N_words, "Precondition");
   171   set_remainder_to_point_to_start_incl(start_card, end_card, reducing); // closed interval
   172 }
   175 // Unlike the normal convention in this code, the argument here denotes
   176 // a closed, inclusive interval: [start_card, end_card], cf set_remainder_to_point_to_start()
   177 // above.
   178 void
   179 BlockOffsetArray::set_remainder_to_point_to_start_incl(size_t start_card, size_t end_card, bool reducing) {
   181   check_reducing_assertion(reducing);
   182   if (start_card > end_card) {
   183     return;
   184   }
   185   assert(start_card > _array->index_for(_bottom), "Cannot be first card");
   186   assert(_array->offset_array(start_card-1) <= N_words,
   187     "Offset card has an unexpected value");
   188   size_t start_card_for_region = start_card;
   189   u_char offset = max_jubyte;
   190   for (int i = 0; i < N_powers; i++) {
   191     // -1 so that the the card with the actual offset is counted.  Another -1
   192     // so that the reach ends in this region and not at the start
   193     // of the next.
   194     size_t reach = start_card - 1 + (power_to_cards_back(i+1) - 1);
   195     offset = N_words + i;
   196     if (reach >= end_card) {
   197       _array->set_offset_array(start_card_for_region, end_card, offset, reducing);
   198       start_card_for_region = reach + 1;
   199       break;
   200     }
   201     _array->set_offset_array(start_card_for_region, reach, offset, reducing);
   202     start_card_for_region = reach + 1;
   203   }
   204   assert(start_card_for_region > end_card, "Sanity check");
   205   DEBUG_ONLY(check_all_cards(start_card, end_card);)
   206 }
   208 // The card-interval [start_card, end_card] is a closed interval; this
   209 // is an expensive check -- use with care and only under protection of
   210 // suitable flag.
   211 void BlockOffsetArray::check_all_cards(size_t start_card, size_t end_card) const {
   213   if (end_card < start_card) {
   214     return;
   215   }
   216   guarantee(_array->offset_array(start_card) == N_words, "Wrong value in second card");
   217   u_char last_entry = N_words;
   218   for (size_t c = start_card + 1; c <= end_card; c++ /* yeah! */) {
   219     u_char entry = _array->offset_array(c);
   220     guarantee(entry >= last_entry, "Monotonicity");
   221     if (c - start_card > power_to_cards_back(1)) {
   222       guarantee(entry > N_words, "Should be in logarithmic region");
   223     }
   224     size_t backskip = entry_to_cards_back(entry);
   225     size_t landing_card = c - backskip;
   226     guarantee(landing_card >= (start_card - 1), "Inv");
   227     if (landing_card >= start_card) {
   228       guarantee(_array->offset_array(landing_card) <= entry, "Monotonicity");
   229     } else {
   230       guarantee(landing_card == (start_card - 1), "Tautology");
   231       // Note that N_words is the maximum offset value
   232       guarantee(_array->offset_array(landing_card) <= N_words, "Offset value");
   233     }
   234     last_entry = entry;  // remember for monotonicity test
   235   }
   236 }
   239 void
   240 BlockOffsetArray::alloc_block(HeapWord* blk_start, HeapWord* blk_end) {
   241   assert(blk_start != NULL && blk_end > blk_start,
   242          "phantom block");
   243   single_block(blk_start, blk_end);
   244 }
   246 // Action_mark - update the BOT for the block [blk_start, blk_end).
   247 //               Current typical use is for splitting a block.
   248 // Action_single - udpate the BOT for an allocation.
   249 // Action_verify - BOT verification.
   250 void
   251 BlockOffsetArray::do_block_internal(HeapWord* blk_start,
   252                                     HeapWord* blk_end,
   253                                     Action action, bool reducing) {
   254   assert(Universe::heap()->is_in_reserved(blk_start),
   255          "reference must be into the heap");
   256   assert(Universe::heap()->is_in_reserved(blk_end-1),
   257          "limit must be within the heap");
   258   // This is optimized to make the test fast, assuming we only rarely
   259   // cross boundaries.
   260   uintptr_t end_ui = (uintptr_t)(blk_end - 1);
   261   uintptr_t start_ui = (uintptr_t)blk_start;
   262   // Calculate the last card boundary preceding end of blk
   263   intptr_t boundary_before_end = (intptr_t)end_ui;
   264   clear_bits(boundary_before_end, right_n_bits(LogN));
   265   if (start_ui <= (uintptr_t)boundary_before_end) {
   266     // blk starts at or crosses a boundary
   267     // Calculate index of card on which blk begins
   268     size_t    start_index = _array->index_for(blk_start);
   269     // Index of card on which blk ends
   270     size_t    end_index   = _array->index_for(blk_end - 1);
   271     // Start address of card on which blk begins
   272     HeapWord* boundary    = _array->address_for_index(start_index);
   273     assert(boundary <= blk_start, "blk should start at or after boundary");
   274     if (blk_start != boundary) {
   275       // blk starts strictly after boundary
   276       // adjust card boundary and start_index forward to next card
   277       boundary += N_words;
   278       start_index++;
   279     }
   280     assert(start_index <= end_index, "monotonicity of index_for()");
   281     assert(boundary <= (HeapWord*)boundary_before_end, "tautology");
   282     switch (action) {
   283       case Action_mark: {
   284         if (init_to_zero()) {
   285           _array->set_offset_array(start_index, boundary, blk_start, reducing);
   286           break;
   287         } // Else fall through to the next case
   288       }
   289       case Action_single: {
   290         _array->set_offset_array(start_index, boundary, blk_start, reducing);
   291         // We have finished marking the "offset card". We need to now
   292         // mark the subsequent cards that this blk spans.
   293         if (start_index < end_index) {
   294           HeapWord* rem_st = _array->address_for_index(start_index) + N_words;
   295           HeapWord* rem_end = _array->address_for_index(end_index) + N_words;
   296           set_remainder_to_point_to_start(rem_st, rem_end, reducing);
   297         }
   298         break;
   299       }
   300       case Action_check: {
   301         _array->check_offset_array(start_index, boundary, blk_start);
   302         // We have finished checking the "offset card". We need to now
   303         // check the subsequent cards that this blk spans.
   304         check_all_cards(start_index + 1, end_index);
   305         break;
   306       }
   307       default:
   308         ShouldNotReachHere();
   309     }
   310   }
   311 }
   313 // The range [blk_start, blk_end) represents a single contiguous block
   314 // of storage; modify the block offset table to represent this
   315 // information; Right-open interval: [blk_start, blk_end)
   316 // NOTE: this method does _not_ adjust _unallocated_block.
   317 void
   318 BlockOffsetArray::single_block(HeapWord* blk_start,
   319                                HeapWord* blk_end) {
   320   do_block_internal(blk_start, blk_end, Action_single);
   321 }
   323 void BlockOffsetArray::verify() const {
   324   // For each entry in the block offset table, verify that
   325   // the entry correctly finds the start of an object at the
   326   // first address covered by the block or to the left of that
   327   // first address.
   329   size_t next_index = 1;
   330   size_t last_index = last_active_index();
   332   // Use for debugging.  Initialize to NULL to distinguish the
   333   // first iteration through the while loop.
   334   HeapWord* last_p = NULL;
   335   HeapWord* last_start = NULL;
   336   oop last_o = NULL;
   338   while (next_index <= last_index) {
   339     // Use an address past the start of the address for
   340     // the entry.
   341     HeapWord* p = _array->address_for_index(next_index) + 1;
   342     if (p >= _end) {
   343       // That's all of the allocated block table.
   344       return;
   345     }
   346     // block_start() asserts that start <= p.
   347     HeapWord* start = block_start(p);
   348     // First check if the start is an allocated block and only
   349     // then if it is a valid object.
   350     oop o = oop(start);
   351     assert(!Universe::is_fully_initialized() ||
   352            _sp->is_free_block(start) ||
   353            o->is_oop_or_null(), "Bad object was found");
   354     next_index++;
   355     last_p = p;
   356     last_start = start;
   357     last_o = o;
   358   }
   359 }
   361 //////////////////////////////////////////////////////////////////////
   362 // BlockOffsetArrayNonContigSpace
   363 //////////////////////////////////////////////////////////////////////
   365 // The block [blk_start, blk_end) has been allocated;
   366 // adjust the block offset table to represent this information;
   367 // NOTE: Clients of BlockOffsetArrayNonContigSpace: consider using
   368 // the somewhat more lightweight split_block() or
   369 // (when init_to_zero()) mark_block() wherever possible.
   370 // right-open interval: [blk_start, blk_end)
   371 void
   372 BlockOffsetArrayNonContigSpace::alloc_block(HeapWord* blk_start,
   373                                             HeapWord* blk_end) {
   374   assert(blk_start != NULL && blk_end > blk_start,
   375          "phantom block");
   376   single_block(blk_start, blk_end);
   377   allocated(blk_start, blk_end);
   378 }
   380 // Adjust BOT to show that a previously whole block has been split
   381 // into two.  We verify the BOT for the first part (prefix) and
   382 // update the  BOT for the second part (suffix).
   383 //      blk is the start of the block
   384 //      blk_size is the size of the original block
   385 //      left_blk_size is the size of the first part of the split
   386 void BlockOffsetArrayNonContigSpace::split_block(HeapWord* blk,
   387                                                  size_t blk_size,
   388                                                  size_t left_blk_size) {
   389   // Verify that the BOT shows [blk, blk + blk_size) to be one block.
   390   verify_single_block(blk, blk_size);
   391   // Update the BOT to indicate that [blk + left_blk_size, blk + blk_size)
   392   // is one single block.
   393   assert(blk_size > 0, "Should be positive");
   394   assert(left_blk_size > 0, "Should be positive");
   395   assert(left_blk_size < blk_size, "Not a split");
   397   // Start addresses of prefix block and suffix block.
   398   HeapWord* pref_addr = blk;
   399   HeapWord* suff_addr = blk + left_blk_size;
   400   HeapWord* end_addr  = blk + blk_size;
   402   // Indices for starts of prefix block and suffix block.
   403   size_t pref_index = _array->index_for(pref_addr);
   404   if (_array->address_for_index(pref_index) != pref_addr) {
   405     // pref_addr does not begin pref_index
   406     pref_index++;
   407   }
   409   size_t suff_index = _array->index_for(suff_addr);
   410   if (_array->address_for_index(suff_index) != suff_addr) {
   411     // suff_addr does not begin suff_index
   412     suff_index++;
   413   }
   415   // Definition: A block B, denoted [B_start, B_end) __starts__
   416   //     a card C, denoted [C_start, C_end), where C_start and C_end
   417   //     are the heap addresses that card C covers, iff
   418   //     B_start <= C_start < B_end.
   419   //
   420   //     We say that a card C "is started by" a block B, iff
   421   //     B "starts" C.
   422   //
   423   //     Note that the cardinality of the set of cards {C}
   424   //     started by a block B can be 0, 1, or more.
   425   //
   426   // Below, pref_index and suff_index are, respectively, the
   427   // first (least) card indices that the prefix and suffix of
   428   // the split start; end_index is one more than the index of
   429   // the last (greatest) card that blk starts.
   430   size_t end_index  = _array->index_for(end_addr - 1) + 1;
   432   // Calculate the # cards that the prefix and suffix affect.
   433   size_t num_pref_cards = suff_index - pref_index;
   435   size_t num_suff_cards = end_index  - suff_index;
   436   // Change the cards that need changing
   437   if (num_suff_cards > 0) {
   438     HeapWord* boundary = _array->address_for_index(suff_index);
   439     // Set the offset card for suffix block
   440     _array->set_offset_array(suff_index, boundary, suff_addr, true /* reducing */);
   441     // Change any further cards that need changing in the suffix
   442     if (num_pref_cards > 0) {
   443       if (num_pref_cards >= num_suff_cards) {
   444         // Unilaterally fix all of the suffix cards: closed card
   445         // index interval in args below.
   446         set_remainder_to_point_to_start_incl(suff_index + 1, end_index - 1, true /* reducing */);
   447       } else {
   448         // Unilaterally fix the first (num_pref_cards - 1) following
   449         // the "offset card" in the suffix block.
   450         set_remainder_to_point_to_start_incl(suff_index + 1,
   451           suff_index + num_pref_cards - 1, true /* reducing */);
   452         // Fix the appropriate cards in the remainder of the
   453         // suffix block -- these are the last num_pref_cards
   454         // cards in each power block of the "new" range plumbed
   455         // from suff_addr.
   456         bool more = true;
   457         uint i = 1;
   458         while (more && (i < N_powers)) {
   459           size_t back_by = power_to_cards_back(i);
   460           size_t right_index = suff_index + back_by - 1;
   461           size_t left_index  = right_index - num_pref_cards + 1;
   462           if (right_index >= end_index - 1) { // last iteration
   463             right_index = end_index - 1;
   464             more = false;
   465           }
   466           if (back_by > num_pref_cards) {
   467             // Fill in the remainder of this "power block", if it
   468             // is non-null.
   469             if (left_index <= right_index) {
   470               _array->set_offset_array(left_index, right_index,
   471                                      N_words + i - 1, true /* reducing */);
   472             } else {
   473               more = false; // we are done
   474             }
   475             i++;
   476             break;
   477           }
   478           i++;
   479         }
   480         while (more && (i < N_powers)) {
   481           size_t back_by = power_to_cards_back(i);
   482           size_t right_index = suff_index + back_by - 1;
   483           size_t left_index  = right_index - num_pref_cards + 1;
   484           if (right_index >= end_index - 1) { // last iteration
   485             right_index = end_index - 1;
   486             if (left_index > right_index) {
   487               break;
   488             }
   489             more  = false;
   490           }
   491           assert(left_index <= right_index, "Error");
   492           _array->set_offset_array(left_index, right_index, N_words + i - 1, true /* reducing */);
   493           i++;
   494         }
   495       }
   496     } // else no more cards to fix in suffix
   497   } // else nothing needs to be done
   498   // Verify that we did the right thing
   499   verify_single_block(pref_addr, left_blk_size);
   500   verify_single_block(suff_addr, blk_size - left_blk_size);
   501 }
   504 // Mark the BOT such that if [blk_start, blk_end) straddles a card
   505 // boundary, the card following the first such boundary is marked
   506 // with the appropriate offset.
   507 // NOTE: this method does _not_ adjust _unallocated_block or
   508 // any cards subsequent to the first one.
   509 void
   510 BlockOffsetArrayNonContigSpace::mark_block(HeapWord* blk_start,
   511                                            HeapWord* blk_end, bool reducing) {
   512   do_block_internal(blk_start, blk_end, Action_mark, reducing);
   513 }
   515 HeapWord* BlockOffsetArrayNonContigSpace::block_start_unsafe(
   516   const void* addr) const {
   517   assert(_array->offset_array(0) == 0, "objects can't cross covered areas");
   518   assert(_bottom <= addr && addr < _end,
   519          "addr must be covered by this Array");
   520   // Must read this exactly once because it can be modified by parallel
   521   // allocation.
   522   HeapWord* ub = _unallocated_block;
   523   if (BlockOffsetArrayUseUnallocatedBlock && addr >= ub) {
   524     assert(ub < _end, "tautology (see above)");
   525     return ub;
   526   }
   528   // Otherwise, find the block start using the table.
   529   size_t index = _array->index_for(addr);
   530   HeapWord* q = _array->address_for_index(index);
   532   uint offset = _array->offset_array(index);    // Extend u_char to uint.
   533   while (offset >= N_words) {
   534     // The excess of the offset from N_words indicates a power of Base
   535     // to go back by.
   536     size_t n_cards_back = entry_to_cards_back(offset);
   537     q -= (N_words * n_cards_back);
   538     assert(q >= _sp->bottom(), "Went below bottom!");
   539     index -= n_cards_back;
   540     offset = _array->offset_array(index);
   541   }
   542   assert(offset < N_words, "offset too large");
   543   index--;
   544   q -= offset;
   545   HeapWord* n = q;
   547   while (n <= addr) {
   548     debug_only(HeapWord* last = q);   // for debugging
   549     q = n;
   550     n += _sp->block_size(n);
   551     assert(n > q, err_msg("Looping at: " INTPTR_FORMAT, n));
   552   }
   553   assert(q <= addr, err_msg("wrong order for current (" INTPTR_FORMAT ") <= arg (" INTPTR_FORMAT ")", q, addr));
   554   assert(addr <= n, err_msg("wrong order for arg (" INTPTR_FORMAT ") <= next (" INTPTR_FORMAT ")", addr, n));
   555   return q;
   556 }
   558 HeapWord* BlockOffsetArrayNonContigSpace::block_start_careful(
   559   const void* addr) const {
   560   assert(_array->offset_array(0) == 0, "objects can't cross covered areas");
   562   assert(_bottom <= addr && addr < _end,
   563          "addr must be covered by this Array");
   564   // Must read this exactly once because it can be modified by parallel
   565   // allocation.
   566   HeapWord* ub = _unallocated_block;
   567   if (BlockOffsetArrayUseUnallocatedBlock && addr >= ub) {
   568     assert(ub < _end, "tautology (see above)");
   569     return ub;
   570   }
   572   // Otherwise, find the block start using the table, but taking
   573   // care (cf block_start_unsafe() above) not to parse any objects/blocks
   574   // on the cards themsleves.
   575   size_t index = _array->index_for(addr);
   576   assert(_array->address_for_index(index) == addr,
   577          "arg should be start of card");
   579   HeapWord* q = (HeapWord*)addr;
   580   uint offset;
   581   do {
   582     offset = _array->offset_array(index);
   583     if (offset < N_words) {
   584       q -= offset;
   585     } else {
   586       size_t n_cards_back = entry_to_cards_back(offset);
   587       q -= (n_cards_back * N_words);
   588       index -= n_cards_back;
   589     }
   590   } while (offset >= N_words);
   591   assert(q <= addr, "block start should be to left of arg");
   592   return q;
   593 }
   595 #ifndef PRODUCT
   596 // Verification & debugging - ensure that the offset table reflects the fact
   597 // that the block [blk_start, blk_end) or [blk, blk + size) is a
   598 // single block of storage. NOTE: can't const this because of
   599 // call to non-const do_block_internal() below.
   600 void BlockOffsetArrayNonContigSpace::verify_single_block(
   601   HeapWord* blk_start, HeapWord* blk_end) {
   602   if (VerifyBlockOffsetArray) {
   603     do_block_internal(blk_start, blk_end, Action_check);
   604   }
   605 }
   607 void BlockOffsetArrayNonContigSpace::verify_single_block(
   608   HeapWord* blk, size_t size) {
   609   verify_single_block(blk, blk + size);
   610 }
   612 // Verify that the given block is before _unallocated_block
   613 void BlockOffsetArrayNonContigSpace::verify_not_unallocated(
   614   HeapWord* blk_start, HeapWord* blk_end) const {
   615   if (BlockOffsetArrayUseUnallocatedBlock) {
   616     assert(blk_start < blk_end, "Block inconsistency?");
   617     assert(blk_end <= _unallocated_block, "_unallocated_block problem");
   618   }
   619 }
   621 void BlockOffsetArrayNonContigSpace::verify_not_unallocated(
   622   HeapWord* blk, size_t size) const {
   623   verify_not_unallocated(blk, blk + size);
   624 }
   625 #endif // PRODUCT
   627 size_t BlockOffsetArrayNonContigSpace::last_active_index() const {
   628   if (_unallocated_block == _bottom) {
   629     return 0;
   630   } else {
   631     return _array->index_for(_unallocated_block - 1);
   632   }
   633 }
   635 //////////////////////////////////////////////////////////////////////
   636 // BlockOffsetArrayContigSpace
   637 //////////////////////////////////////////////////////////////////////
   639 HeapWord* BlockOffsetArrayContigSpace::block_start_unsafe(const void* addr) const {
   640   assert(_array->offset_array(0) == 0, "objects can't cross covered areas");
   642   // Otherwise, find the block start using the table.
   643   assert(_bottom <= addr && addr < _end,
   644          "addr must be covered by this Array");
   645   size_t index = _array->index_for(addr);
   646   // We must make sure that the offset table entry we use is valid.  If
   647   // "addr" is past the end, start at the last known one and go forward.
   648   index = MIN2(index, _next_offset_index-1);
   649   HeapWord* q = _array->address_for_index(index);
   651   uint offset = _array->offset_array(index);    // Extend u_char to uint.
   652   while (offset > N_words) {
   653     // The excess of the offset from N_words indicates a power of Base
   654     // to go back by.
   655     size_t n_cards_back = entry_to_cards_back(offset);
   656     q -= (N_words * n_cards_back);
   657     assert(q >= _sp->bottom(), "Went below bottom!");
   658     index -= n_cards_back;
   659     offset = _array->offset_array(index);
   660   }
   661   while (offset == N_words) {
   662     assert(q >= _sp->bottom(), "Went below bottom!");
   663     q -= N_words;
   664     index--;
   665     offset = _array->offset_array(index);
   666   }
   667   assert(offset < N_words, "offset too large");
   668   q -= offset;
   669   HeapWord* n = q;
   671   while (n <= addr) {
   672     debug_only(HeapWord* last = q);   // for debugging
   673     q = n;
   674     n += _sp->block_size(n);
   675   }
   676   assert(q <= addr, "wrong order for current and arg");
   677   assert(addr <= n, "wrong order for arg and next");
   678   return q;
   679 }
   681 //
   682 //              _next_offset_threshold
   683 //              |   _next_offset_index
   684 //              v   v
   685 //      +-------+-------+-------+-------+-------+
   686 //      | i-1   |   i   | i+1   | i+2   | i+3   |
   687 //      +-------+-------+-------+-------+-------+
   688 //       ( ^    ]
   689 //         block-start
   690 //
   692 void BlockOffsetArrayContigSpace::alloc_block_work(HeapWord* blk_start,
   693                                         HeapWord* blk_end) {
   694   assert(blk_start != NULL && blk_end > blk_start,
   695          "phantom block");
   696   assert(blk_end > _next_offset_threshold,
   697          "should be past threshold");
   698   assert(blk_start <= _next_offset_threshold,
   699          "blk_start should be at or before threshold");
   700   assert(pointer_delta(_next_offset_threshold, blk_start) <= N_words,
   701          "offset should be <= BlockOffsetSharedArray::N");
   702   assert(Universe::heap()->is_in_reserved(blk_start),
   703          "reference must be into the heap");
   704   assert(Universe::heap()->is_in_reserved(blk_end-1),
   705          "limit must be within the heap");
   706   assert(_next_offset_threshold ==
   707          _array->_reserved.start() + _next_offset_index*N_words,
   708          "index must agree with threshold");
   710   debug_only(size_t orig_next_offset_index = _next_offset_index;)
   712   // Mark the card that holds the offset into the block.  Note
   713   // that _next_offset_index and _next_offset_threshold are not
   714   // updated until the end of this method.
   715   _array->set_offset_array(_next_offset_index,
   716                            _next_offset_threshold,
   717                            blk_start);
   719   // We need to now mark the subsequent cards that this blk spans.
   721   // Index of card on which blk ends.
   722   size_t end_index   = _array->index_for(blk_end - 1);
   724   // Are there more cards left to be updated?
   725   if (_next_offset_index + 1 <= end_index) {
   726     HeapWord* rem_st  = _array->address_for_index(_next_offset_index + 1);
   727     // Calculate rem_end this way because end_index
   728     // may be the last valid index in the covered region.
   729     HeapWord* rem_end = _array->address_for_index(end_index) +  N_words;
   730     set_remainder_to_point_to_start(rem_st, rem_end);
   731   }
   733   // _next_offset_index and _next_offset_threshold updated here.
   734   _next_offset_index = end_index + 1;
   735   // Calculate _next_offset_threshold this way because end_index
   736   // may be the last valid index in the covered region.
   737   _next_offset_threshold = _array->address_for_index(end_index) + N_words;
   738   assert(_next_offset_threshold >= blk_end, "Incorrect offset threshold");
   740 #ifdef ASSERT
   741   // The offset can be 0 if the block starts on a boundary.  That
   742   // is checked by an assertion above.
   743   size_t start_index = _array->index_for(blk_start);
   744   HeapWord* boundary    = _array->address_for_index(start_index);
   745   assert((_array->offset_array(orig_next_offset_index) == 0 &&
   746           blk_start == boundary) ||
   747           (_array->offset_array(orig_next_offset_index) > 0 &&
   748          _array->offset_array(orig_next_offset_index) <= N_words),
   749          "offset array should have been set");
   750   for (size_t j = orig_next_offset_index + 1; j <= end_index; j++) {
   751     assert(_array->offset_array(j) > 0 &&
   752            _array->offset_array(j) <= (u_char) (N_words+N_powers-1),
   753            "offset array should have been set");
   754   }
   755 #endif
   756 }
   758 HeapWord* BlockOffsetArrayContigSpace::initialize_threshold() {
   759   assert(!Universe::heap()->is_in_reserved(_array->_offset_array),
   760          "just checking");
   761   _next_offset_index = _array->index_for(_bottom);
   762   _next_offset_index++;
   763   _next_offset_threshold =
   764     _array->address_for_index(_next_offset_index);
   765   return _next_offset_threshold;
   766 }
   768 void BlockOffsetArrayContigSpace::zero_bottom_entry() {
   769   assert(!Universe::heap()->is_in_reserved(_array->_offset_array),
   770          "just checking");
   771   size_t bottom_index = _array->index_for(_bottom);
   772   _array->set_offset_array(bottom_index, 0);
   773 }
   776 void BlockOffsetArrayContigSpace::serialize(SerializeOopClosure* soc) {
   777   if (soc->reading()) {
   778     // Null these values so that the serializer won't object to updating them.
   779     _next_offset_threshold = NULL;
   780     _next_offset_index = 0;
   781   }
   782   soc->do_ptr(&_next_offset_threshold);
   783   soc->do_size_t(&_next_offset_index);
   784 }
   786 size_t BlockOffsetArrayContigSpace::last_active_index() const {
   787   size_t result = _next_offset_index - 1;
   788   return result >= 0 ? result : 0;
   789 }

mercurial