src/share/vm/gc_implementation/g1/g1BlockOffsetTable.cpp

Mon, 02 Aug 2010 12:51:43 -0700

author
johnc
date
Mon, 02 Aug 2010 12:51:43 -0700
changeset 2060
2d160770d2e5
parent 1907
c18cbe5936b8
child 2241
72a161e62cc4
permissions
-rw-r--r--

6814437: G1: remove the _new_refs array
Summary: The per-worker _new_refs array is used to hold references that point into the collection set. It is populated during RSet updating and subsequently processed. In the event of an evacuation failure it processed again to recreate the RSets of regions in the collection set. Remove the per-worker _new_refs array by processing the references directly. Use a DirtyCardQueue to hold the cards containing the references so that the RSets of regions in the collection set can be recreated when handling an evacuation failure.
Reviewed-by: iveresov, jmasa, tonyp

     1 /*
     2  * Copyright (c) 2001, 2008, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "incls/_precompiled.incl"
    26 #include "incls/_g1BlockOffsetTable.cpp.incl"
    28 //////////////////////////////////////////////////////////////////////
    29 // G1BlockOffsetSharedArray
    30 //////////////////////////////////////////////////////////////////////
    32 G1BlockOffsetSharedArray::G1BlockOffsetSharedArray(MemRegion reserved,
    33                                                    size_t init_word_size) :
    34   _reserved(reserved), _end(NULL)
    35 {
    36   size_t size = compute_size(reserved.word_size());
    37   ReservedSpace rs(ReservedSpace::allocation_align_size_up(size));
    38   if (!rs.is_reserved()) {
    39     vm_exit_during_initialization("Could not reserve enough space for heap offset array");
    40   }
    41   if (!_vs.initialize(rs, 0)) {
    42     vm_exit_during_initialization("Could not reserve enough space for heap offset array");
    43   }
    44   _offset_array = (u_char*)_vs.low_boundary();
    45   resize(init_word_size);
    46   if (TraceBlockOffsetTable) {
    47     gclog_or_tty->print_cr("G1BlockOffsetSharedArray::G1BlockOffsetSharedArray: ");
    48     gclog_or_tty->print_cr("  "
    49                   "  rs.base(): " INTPTR_FORMAT
    50                   "  rs.size(): " INTPTR_FORMAT
    51                   "  rs end(): " INTPTR_FORMAT,
    52                   rs.base(), rs.size(), rs.base() + rs.size());
    53     gclog_or_tty->print_cr("  "
    54                   "  _vs.low_boundary(): " INTPTR_FORMAT
    55                   "  _vs.high_boundary(): " INTPTR_FORMAT,
    56                   _vs.low_boundary(),
    57                   _vs.high_boundary());
    58   }
    59 }
    61 void G1BlockOffsetSharedArray::resize(size_t new_word_size) {
    62   assert(new_word_size <= _reserved.word_size(), "Resize larger than reserved");
    63   size_t new_size = compute_size(new_word_size);
    64   size_t old_size = _vs.committed_size();
    65   size_t delta;
    66   char* high = _vs.high();
    67   _end = _reserved.start() + new_word_size;
    68   if (new_size > old_size) {
    69     delta = ReservedSpace::page_align_size_up(new_size - old_size);
    70     assert(delta > 0, "just checking");
    71     if (!_vs.expand_by(delta)) {
    72       // Do better than this for Merlin
    73       vm_exit_out_of_memory(delta, "offset table expansion");
    74     }
    75     assert(_vs.high() == high + delta, "invalid expansion");
    76     // Initialization of the contents is left to the
    77     // G1BlockOffsetArray that uses it.
    78   } else {
    79     delta = ReservedSpace::page_align_size_down(old_size - new_size);
    80     if (delta == 0) return;
    81     _vs.shrink_by(delta);
    82     assert(_vs.high() == high - delta, "invalid expansion");
    83   }
    84 }
    86 bool G1BlockOffsetSharedArray::is_card_boundary(HeapWord* p) const {
    87   assert(p >= _reserved.start(), "just checking");
    88   size_t delta = pointer_delta(p, _reserved.start());
    89   return (delta & right_n_bits(LogN_words)) == (size_t)NoBits;
    90 }
    93 //////////////////////////////////////////////////////////////////////
    94 // G1BlockOffsetArray
    95 //////////////////////////////////////////////////////////////////////
    97 G1BlockOffsetArray::G1BlockOffsetArray(G1BlockOffsetSharedArray* array,
    98                                        MemRegion mr, bool init_to_zero) :
    99   G1BlockOffsetTable(mr.start(), mr.end()),
   100   _unallocated_block(_bottom),
   101   _array(array), _csp(NULL),
   102   _init_to_zero(init_to_zero) {
   103   assert(_bottom <= _end, "arguments out of order");
   104   if (!_init_to_zero) {
   105     // initialize cards to point back to mr.start()
   106     set_remainder_to_point_to_start(mr.start() + N_words, mr.end());
   107     _array->set_offset_array(0, 0);  // set first card to 0
   108   }
   109 }
   111 void G1BlockOffsetArray::set_space(Space* sp) {
   112   _sp = sp;
   113   _csp = sp->toContiguousSpace();
   114 }
   116 // The arguments follow the normal convention of denoting
   117 // a right-open interval: [start, end)
   118 void
   119 G1BlockOffsetArray:: set_remainder_to_point_to_start(HeapWord* start, HeapWord* end) {
   121   if (start >= end) {
   122     // The start address is equal to the end address (or to
   123     // the right of the end address) so there are not cards
   124     // that need to be updated..
   125     return;
   126   }
   128   // Write the backskip value for each region.
   129   //
   130   //    offset
   131   //    card             2nd                       3rd
   132   //     | +- 1st        |                         |
   133   //     v v             v                         v
   134   //    +-+-+-+-+-+-+-+-+-+-+-+-+-+-+     +-+-+-+-+-+-+-+-+-+-+-
   135   //    |x|0|0|0|0|0|0|0|1|1|1|1|1|1| ... |1|1|1|1|2|2|2|2|2|2| ...
   136   //    +-+-+-+-+-+-+-+-+-+-+-+-+-+-+     +-+-+-+-+-+-+-+-+-+-+-
   137   //    11              19                        75
   138   //      12
   139   //
   140   //    offset card is the card that points to the start of an object
   141   //      x - offset value of offset card
   142   //    1st - start of first logarithmic region
   143   //      0 corresponds to logarithmic value N_words + 0 and 2**(3 * 0) = 1
   144   //    2nd - start of second logarithmic region
   145   //      1 corresponds to logarithmic value N_words + 1 and 2**(3 * 1) = 8
   146   //    3rd - start of third logarithmic region
   147   //      2 corresponds to logarithmic value N_words + 2 and 2**(3 * 2) = 64
   148   //
   149   //    integer below the block offset entry is an example of
   150   //    the index of the entry
   151   //
   152   //    Given an address,
   153   //      Find the index for the address
   154   //      Find the block offset table entry
   155   //      Convert the entry to a back slide
   156   //        (e.g., with today's, offset = 0x81 =>
   157   //          back slip = 2**(3*(0x81 - N_words)) = 2**3) = 8
   158   //      Move back N (e.g., 8) entries and repeat with the
   159   //        value of the new entry
   160   //
   161   size_t start_card = _array->index_for(start);
   162   size_t end_card = _array->index_for(end-1);
   163   assert(start ==_array->address_for_index(start_card), "Precondition");
   164   assert(end ==_array->address_for_index(end_card)+N_words, "Precondition");
   165   set_remainder_to_point_to_start_incl(start_card, end_card); // closed interval
   166 }
   168 // Unlike the normal convention in this code, the argument here denotes
   169 // a closed, inclusive interval: [start_card, end_card], cf set_remainder_to_point_to_start()
   170 // above.
   171 void
   172 G1BlockOffsetArray::set_remainder_to_point_to_start_incl(size_t start_card, size_t end_card) {
   173   if (start_card > end_card) {
   174     return;
   175   }
   176   assert(start_card > _array->index_for(_bottom), "Cannot be first card");
   177   assert(_array->offset_array(start_card-1) <= N_words,
   178     "Offset card has an unexpected value");
   179   size_t start_card_for_region = start_card;
   180   u_char offset = max_jubyte;
   181   for (int i = 0; i < BlockOffsetArray::N_powers; i++) {
   182     // -1 so that the the card with the actual offset is counted.  Another -1
   183     // so that the reach ends in this region and not at the start
   184     // of the next.
   185     size_t reach = start_card - 1 + (BlockOffsetArray::power_to_cards_back(i+1) - 1);
   186     offset = N_words + i;
   187     if (reach >= end_card) {
   188       _array->set_offset_array(start_card_for_region, end_card, offset);
   189       start_card_for_region = reach + 1;
   190       break;
   191     }
   192     _array->set_offset_array(start_card_for_region, reach, offset);
   193     start_card_for_region = reach + 1;
   194   }
   195   assert(start_card_for_region > end_card, "Sanity check");
   196   DEBUG_ONLY(check_all_cards(start_card, end_card);)
   197 }
   199 // The block [blk_start, blk_end) has been allocated;
   200 // adjust the block offset table to represent this information;
   201 // right-open interval: [blk_start, blk_end)
   202 void
   203 G1BlockOffsetArray::alloc_block(HeapWord* blk_start, HeapWord* blk_end) {
   204   mark_block(blk_start, blk_end);
   205   allocated(blk_start, blk_end);
   206 }
   208 // Adjust BOT to show that a previously whole block has been split
   209 // into two.
   210 void G1BlockOffsetArray::split_block(HeapWord* blk, size_t blk_size,
   211                                      size_t left_blk_size) {
   212   // Verify that the BOT shows [blk, blk + blk_size) to be one block.
   213   verify_single_block(blk, blk_size);
   214   // Update the BOT to indicate that [blk + left_blk_size, blk + blk_size)
   215   // is one single block.
   216   mark_block(blk + left_blk_size, blk + blk_size);
   217 }
   220 // Action_mark - update the BOT for the block [blk_start, blk_end).
   221 //               Current typical use is for splitting a block.
   222 // Action_single - udpate the BOT for an allocation.
   223 // Action_verify - BOT verification.
   224 void G1BlockOffsetArray::do_block_internal(HeapWord* blk_start,
   225                                            HeapWord* blk_end,
   226                                            Action action) {
   227   assert(Universe::heap()->is_in_reserved(blk_start),
   228          "reference must be into the heap");
   229   assert(Universe::heap()->is_in_reserved(blk_end-1),
   230          "limit must be within the heap");
   231   // This is optimized to make the test fast, assuming we only rarely
   232   // cross boundaries.
   233   uintptr_t end_ui = (uintptr_t)(blk_end - 1);
   234   uintptr_t start_ui = (uintptr_t)blk_start;
   235   // Calculate the last card boundary preceding end of blk
   236   intptr_t boundary_before_end = (intptr_t)end_ui;
   237   clear_bits(boundary_before_end, right_n_bits(LogN));
   238   if (start_ui <= (uintptr_t)boundary_before_end) {
   239     // blk starts at or crosses a boundary
   240     // Calculate index of card on which blk begins
   241     size_t    start_index = _array->index_for(blk_start);
   242     // Index of card on which blk ends
   243     size_t    end_index   = _array->index_for(blk_end - 1);
   244     // Start address of card on which blk begins
   245     HeapWord* boundary    = _array->address_for_index(start_index);
   246     assert(boundary <= blk_start, "blk should start at or after boundary");
   247     if (blk_start != boundary) {
   248       // blk starts strictly after boundary
   249       // adjust card boundary and start_index forward to next card
   250       boundary += N_words;
   251       start_index++;
   252     }
   253     assert(start_index <= end_index, "monotonicity of index_for()");
   254     assert(boundary <= (HeapWord*)boundary_before_end, "tautology");
   255     switch (action) {
   256       case Action_mark: {
   257         if (init_to_zero()) {
   258           _array->set_offset_array(start_index, boundary, blk_start);
   259           break;
   260         } // Else fall through to the next case
   261       }
   262       case Action_single: {
   263         _array->set_offset_array(start_index, boundary, blk_start);
   264         // We have finished marking the "offset card". We need to now
   265         // mark the subsequent cards that this blk spans.
   266         if (start_index < end_index) {
   267           HeapWord* rem_st = _array->address_for_index(start_index) + N_words;
   268           HeapWord* rem_end = _array->address_for_index(end_index) + N_words;
   269           set_remainder_to_point_to_start(rem_st, rem_end);
   270         }
   271         break;
   272       }
   273       case Action_check: {
   274         _array->check_offset_array(start_index, boundary, blk_start);
   275         // We have finished checking the "offset card". We need to now
   276         // check the subsequent cards that this blk spans.
   277         check_all_cards(start_index + 1, end_index);
   278         break;
   279       }
   280       default:
   281         ShouldNotReachHere();
   282     }
   283   }
   284 }
   286 // The card-interval [start_card, end_card] is a closed interval; this
   287 // is an expensive check -- use with care and only under protection of
   288 // suitable flag.
   289 void G1BlockOffsetArray::check_all_cards(size_t start_card, size_t end_card) const {
   291   if (end_card < start_card) {
   292     return;
   293   }
   294   guarantee(_array->offset_array(start_card) == N_words, "Wrong value in second card");
   295   for (size_t c = start_card + 1; c <= end_card; c++ /* yeah! */) {
   296     u_char entry = _array->offset_array(c);
   297     if (c - start_card > BlockOffsetArray::power_to_cards_back(1)) {
   298       guarantee(entry > N_words, "Should be in logarithmic region");
   299     }
   300     size_t backskip = BlockOffsetArray::entry_to_cards_back(entry);
   301     size_t landing_card = c - backskip;
   302     guarantee(landing_card >= (start_card - 1), "Inv");
   303     if (landing_card >= start_card) {
   304       guarantee(_array->offset_array(landing_card) <= entry, "monotonicity");
   305     } else {
   306       guarantee(landing_card == start_card - 1, "Tautology");
   307       guarantee(_array->offset_array(landing_card) <= N_words, "Offset value");
   308     }
   309   }
   310 }
   312 // The range [blk_start, blk_end) represents a single contiguous block
   313 // of storage; modify the block offset table to represent this
   314 // information; Right-open interval: [blk_start, blk_end)
   315 // NOTE: this method does _not_ adjust _unallocated_block.
   316 void
   317 G1BlockOffsetArray::single_block(HeapWord* blk_start, HeapWord* blk_end) {
   318   do_block_internal(blk_start, blk_end, Action_single);
   319 }
   321 // Mark the BOT such that if [blk_start, blk_end) straddles a card
   322 // boundary, the card following the first such boundary is marked
   323 // with the appropriate offset.
   324 // NOTE: this method does _not_ adjust _unallocated_block or
   325 // any cards subsequent to the first one.
   326 void
   327 G1BlockOffsetArray::mark_block(HeapWord* blk_start, HeapWord* blk_end) {
   328   do_block_internal(blk_start, blk_end, Action_mark);
   329 }
   331 void G1BlockOffsetArray::join_blocks(HeapWord* blk1, HeapWord* blk2) {
   332   HeapWord* blk1_start = Universe::heap()->block_start(blk1);
   333   HeapWord* blk2_start = Universe::heap()->block_start(blk2);
   334   assert(blk1 == blk1_start && blk2 == blk2_start,
   335          "Must be block starts.");
   336   assert(blk1 + _sp->block_size(blk1) == blk2, "Must be contiguous.");
   337   size_t blk1_start_index = _array->index_for(blk1);
   338   size_t blk2_start_index = _array->index_for(blk2);
   339   assert(blk1_start_index <= blk2_start_index, "sanity");
   340   HeapWord* blk2_card_start = _array->address_for_index(blk2_start_index);
   341   if (blk2 == blk2_card_start) {
   342     // blk2 starts a card.  Does blk1 start on the prevous card, or futher
   343     // back?
   344     assert(blk1_start_index < blk2_start_index, "must be lower card.");
   345     if (blk1_start_index + 1 == blk2_start_index) {
   346       // previous card; new value for blk2 card is size of blk1.
   347       _array->set_offset_array(blk2_start_index, (u_char) _sp->block_size(blk1));
   348     } else {
   349       // Earlier card; go back a card.
   350       _array->set_offset_array(blk2_start_index, N_words);
   351     }
   352   } else {
   353     // blk2 does not start a card.  Does it cross a card?  If not, nothing
   354     // to do.
   355     size_t blk2_end_index =
   356       _array->index_for(blk2 + _sp->block_size(blk2) - 1);
   357     assert(blk2_end_index >= blk2_start_index, "sanity");
   358     if (blk2_end_index > blk2_start_index) {
   359       // Yes, it crosses a card.  The value for the next card must change.
   360       if (blk1_start_index + 1 == blk2_start_index) {
   361         // previous card; new value for second blk2 card is size of blk1.
   362         _array->set_offset_array(blk2_start_index + 1,
   363                                  (u_char) _sp->block_size(blk1));
   364       } else {
   365         // Earlier card; go back a card.
   366         _array->set_offset_array(blk2_start_index + 1, N_words);
   367       }
   368     }
   369   }
   370 }
   372 HeapWord* G1BlockOffsetArray::block_start_unsafe(const void* addr) {
   373   assert(_bottom <= addr && addr < _end,
   374          "addr must be covered by this Array");
   375   // Must read this exactly once because it can be modified by parallel
   376   // allocation.
   377   HeapWord* ub = _unallocated_block;
   378   if (BlockOffsetArrayUseUnallocatedBlock && addr >= ub) {
   379     assert(ub < _end, "tautology (see above)");
   380     return ub;
   381   }
   382   // Otherwise, find the block start using the table.
   383   HeapWord* q = block_at_or_preceding(addr, false, 0);
   384   return forward_to_block_containing_addr(q, addr);
   385 }
   387 // This duplicates a little code from the above: unavoidable.
   388 HeapWord*
   389 G1BlockOffsetArray::block_start_unsafe_const(const void* addr) const {
   390   assert(_bottom <= addr && addr < _end,
   391          "addr must be covered by this Array");
   392   // Must read this exactly once because it can be modified by parallel
   393   // allocation.
   394   HeapWord* ub = _unallocated_block;
   395   if (BlockOffsetArrayUseUnallocatedBlock && addr >= ub) {
   396     assert(ub < _end, "tautology (see above)");
   397     return ub;
   398   }
   399   // Otherwise, find the block start using the table.
   400   HeapWord* q = block_at_or_preceding(addr, false, 0);
   401   HeapWord* n = q + _sp->block_size(q);
   402   return forward_to_block_containing_addr_const(q, n, addr);
   403 }
   406 HeapWord*
   407 G1BlockOffsetArray::forward_to_block_containing_addr_slow(HeapWord* q,
   408                                                           HeapWord* n,
   409                                                           const void* addr) {
   410   // We're not in the normal case.  We need to handle an important subcase
   411   // here: LAB allocation.  An allocation previously recorded in the
   412   // offset table was actually a lab allocation, and was divided into
   413   // several objects subsequently.  Fix this situation as we answer the
   414   // query, by updating entries as we cross them.
   416   // If the fist object's end q is at the card boundary. Start refining
   417   // with the corresponding card (the value of the entry will be basically
   418   // set to 0). If the object crosses the boundary -- start from the next card.
   419   size_t next_index = _array->index_for(n) + !_array->is_card_boundary(n);
   420   HeapWord* next_boundary = _array->address_for_index(next_index);
   421   if (csp() != NULL) {
   422     if (addr >= csp()->top()) return csp()->top();
   423     while (next_boundary < addr) {
   424       while (n <= next_boundary) {
   425         q = n;
   426         oop obj = oop(q);
   427         if (obj->klass_or_null() == NULL) return q;
   428         n += obj->size();
   429       }
   430       assert(q <= next_boundary && n > next_boundary, "Consequence of loop");
   431       // [q, n) is the block that crosses the boundary.
   432       alloc_block_work2(&next_boundary, &next_index, q, n);
   433     }
   434   } else {
   435     while (next_boundary < addr) {
   436       while (n <= next_boundary) {
   437         q = n;
   438         oop obj = oop(q);
   439         if (obj->klass_or_null() == NULL) return q;
   440         n += _sp->block_size(q);
   441       }
   442       assert(q <= next_boundary && n > next_boundary, "Consequence of loop");
   443       // [q, n) is the block that crosses the boundary.
   444       alloc_block_work2(&next_boundary, &next_index, q, n);
   445     }
   446   }
   447   return forward_to_block_containing_addr_const(q, n, addr);
   448 }
   450 HeapWord* G1BlockOffsetArray::block_start_careful(const void* addr) const {
   451   assert(_array->offset_array(0) == 0, "objects can't cross covered areas");
   453   assert(_bottom <= addr && addr < _end,
   454          "addr must be covered by this Array");
   455   // Must read this exactly once because it can be modified by parallel
   456   // allocation.
   457   HeapWord* ub = _unallocated_block;
   458   if (BlockOffsetArrayUseUnallocatedBlock && addr >= ub) {
   459     assert(ub < _end, "tautology (see above)");
   460     return ub;
   461   }
   463   // Otherwise, find the block start using the table, but taking
   464   // care (cf block_start_unsafe() above) not to parse any objects/blocks
   465   // on the cards themsleves.
   466   size_t index = _array->index_for(addr);
   467   assert(_array->address_for_index(index) == addr,
   468          "arg should be start of card");
   470   HeapWord* q = (HeapWord*)addr;
   471   uint offset;
   472   do {
   473     offset = _array->offset_array(index--);
   474     q -= offset;
   475   } while (offset == N_words);
   476   assert(q <= addr, "block start should be to left of arg");
   477   return q;
   478 }
   480 // Note that the committed size of the covered space may have changed,
   481 // so the table size might also wish to change.
   482 void G1BlockOffsetArray::resize(size_t new_word_size) {
   483   HeapWord* new_end = _bottom + new_word_size;
   484   if (_end < new_end && !init_to_zero()) {
   485     // verify that the old and new boundaries are also card boundaries
   486     assert(_array->is_card_boundary(_end),
   487            "_end not a card boundary");
   488     assert(_array->is_card_boundary(new_end),
   489            "new _end would not be a card boundary");
   490     // set all the newly added cards
   491     _array->set_offset_array(_end, new_end, N_words);
   492   }
   493   _end = new_end;  // update _end
   494 }
   496 void G1BlockOffsetArray::set_region(MemRegion mr) {
   497   _bottom = mr.start();
   498   _end = mr.end();
   499 }
   501 //
   502 //              threshold_
   503 //              |   _index_
   504 //              v   v
   505 //      +-------+-------+-------+-------+-------+
   506 //      | i-1   |   i   | i+1   | i+2   | i+3   |
   507 //      +-------+-------+-------+-------+-------+
   508 //       ( ^    ]
   509 //         block-start
   510 //
   511 void G1BlockOffsetArray::alloc_block_work2(HeapWord** threshold_, size_t* index_,
   512                                            HeapWord* blk_start, HeapWord* blk_end) {
   513   // For efficiency, do copy-in/copy-out.
   514   HeapWord* threshold = *threshold_;
   515   size_t    index = *index_;
   517   assert(blk_start != NULL && blk_end > blk_start,
   518          "phantom block");
   519   assert(blk_end > threshold, "should be past threshold");
   520   assert(blk_start <= threshold, "blk_start should be at or before threshold");
   521   assert(pointer_delta(threshold, blk_start) <= N_words,
   522          "offset should be <= BlockOffsetSharedArray::N");
   523   assert(Universe::heap()->is_in_reserved(blk_start),
   524          "reference must be into the heap");
   525   assert(Universe::heap()->is_in_reserved(blk_end-1),
   526          "limit must be within the heap");
   527   assert(threshold == _array->_reserved.start() + index*N_words,
   528          "index must agree with threshold");
   530   DEBUG_ONLY(size_t orig_index = index;)
   532   // Mark the card that holds the offset into the block.  Note
   533   // that _next_offset_index and _next_offset_threshold are not
   534   // updated until the end of this method.
   535   _array->set_offset_array(index, threshold, blk_start);
   537   // We need to now mark the subsequent cards that this blk spans.
   539   // Index of card on which blk ends.
   540   size_t end_index   = _array->index_for(blk_end - 1);
   542   // Are there more cards left to be updated?
   543   if (index + 1 <= end_index) {
   544     HeapWord* rem_st  = _array->address_for_index(index + 1);
   545     // Calculate rem_end this way because end_index
   546     // may be the last valid index in the covered region.
   547     HeapWord* rem_end = _array->address_for_index(end_index) +  N_words;
   548     set_remainder_to_point_to_start(rem_st, rem_end);
   549   }
   551   index = end_index + 1;
   552   // Calculate threshold_ this way because end_index
   553   // may be the last valid index in the covered region.
   554   threshold = _array->address_for_index(end_index) + N_words;
   555   assert(threshold >= blk_end, "Incorrect offset threshold");
   557   // index_ and threshold_ updated here.
   558   *threshold_ = threshold;
   559   *index_ = index;
   561 #ifdef ASSERT
   562   // The offset can be 0 if the block starts on a boundary.  That
   563   // is checked by an assertion above.
   564   size_t start_index = _array->index_for(blk_start);
   565   HeapWord* boundary    = _array->address_for_index(start_index);
   566   assert((_array->offset_array(orig_index) == 0 &&
   567           blk_start == boundary) ||
   568           (_array->offset_array(orig_index) > 0 &&
   569          _array->offset_array(orig_index) <= N_words),
   570          "offset array should have been set");
   571   for (size_t j = orig_index + 1; j <= end_index; j++) {
   572     assert(_array->offset_array(j) > 0 &&
   573            _array->offset_array(j) <=
   574              (u_char) (N_words+BlockOffsetArray::N_powers-1),
   575            "offset array should have been set");
   576   }
   577 #endif
   578 }
   580 //////////////////////////////////////////////////////////////////////
   581 // G1BlockOffsetArrayContigSpace
   582 //////////////////////////////////////////////////////////////////////
   584 HeapWord*
   585 G1BlockOffsetArrayContigSpace::block_start_unsafe(const void* addr) {
   586   assert(_bottom <= addr && addr < _end,
   587          "addr must be covered by this Array");
   588   HeapWord* q = block_at_or_preceding(addr, true, _next_offset_index-1);
   589   return forward_to_block_containing_addr(q, addr);
   590 }
   592 HeapWord*
   593 G1BlockOffsetArrayContigSpace::
   594 block_start_unsafe_const(const void* addr) const {
   595   assert(_bottom <= addr && addr < _end,
   596          "addr must be covered by this Array");
   597   HeapWord* q = block_at_or_preceding(addr, true, _next_offset_index-1);
   598   HeapWord* n = q + _sp->block_size(q);
   599   return forward_to_block_containing_addr_const(q, n, addr);
   600 }
   602 G1BlockOffsetArrayContigSpace::
   603 G1BlockOffsetArrayContigSpace(G1BlockOffsetSharedArray* array,
   604                               MemRegion mr) :
   605   G1BlockOffsetArray(array, mr, true)
   606 {
   607   _next_offset_threshold = NULL;
   608   _next_offset_index = 0;
   609 }
   611 HeapWord* G1BlockOffsetArrayContigSpace::initialize_threshold() {
   612   assert(!Universe::heap()->is_in_reserved(_array->_offset_array),
   613          "just checking");
   614   _next_offset_index = _array->index_for(_bottom);
   615   _next_offset_index++;
   616   _next_offset_threshold =
   617     _array->address_for_index(_next_offset_index);
   618   return _next_offset_threshold;
   619 }
   621 void G1BlockOffsetArrayContigSpace::zero_bottom_entry() {
   622   assert(!Universe::heap()->is_in_reserved(_array->_offset_array),
   623          "just checking");
   624   size_t bottom_index = _array->index_for(_bottom);
   625   assert(_array->address_for_index(bottom_index) == _bottom,
   626          "Precondition of call");
   627   _array->set_offset_array(bottom_index, 0);
   628 }

mercurial