src/share/vm/gc_implementation/g1/g1BlockOffsetTable.cpp

Mon, 07 Nov 2011 22:11:12 -0500

author
tonyp
date
Mon, 07 Nov 2011 22:11:12 -0500
changeset 3268
8aae2050e83e
parent 2453
2250ee17e258
child 3900
d2a62e0f25eb
permissions
-rw-r--r--

7092309: G1: introduce old region set
Summary: Keep track of all the old regions in the heap with a heap region set.
Reviewed-by: brutisso, johnc

     1 /*
     2  * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp"
    27 #include "memory/space.hpp"
    28 #include "oops/oop.inline.hpp"
    29 #include "runtime/java.hpp"
    31 //////////////////////////////////////////////////////////////////////
    32 // G1BlockOffsetSharedArray
    33 //////////////////////////////////////////////////////////////////////
    35 G1BlockOffsetSharedArray::G1BlockOffsetSharedArray(MemRegion reserved,
    36                                                    size_t init_word_size) :
    37   _reserved(reserved), _end(NULL)
    38 {
    39   size_t size = compute_size(reserved.word_size());
    40   ReservedSpace rs(ReservedSpace::allocation_align_size_up(size));
    41   if (!rs.is_reserved()) {
    42     vm_exit_during_initialization("Could not reserve enough space for heap offset array");
    43   }
    44   if (!_vs.initialize(rs, 0)) {
    45     vm_exit_during_initialization("Could not reserve enough space for heap offset array");
    46   }
    47   _offset_array = (u_char*)_vs.low_boundary();
    48   resize(init_word_size);
    49   if (TraceBlockOffsetTable) {
    50     gclog_or_tty->print_cr("G1BlockOffsetSharedArray::G1BlockOffsetSharedArray: ");
    51     gclog_or_tty->print_cr("  "
    52                   "  rs.base(): " INTPTR_FORMAT
    53                   "  rs.size(): " INTPTR_FORMAT
    54                   "  rs end(): " INTPTR_FORMAT,
    55                   rs.base(), rs.size(), rs.base() + rs.size());
    56     gclog_or_tty->print_cr("  "
    57                   "  _vs.low_boundary(): " INTPTR_FORMAT
    58                   "  _vs.high_boundary(): " INTPTR_FORMAT,
    59                   _vs.low_boundary(),
    60                   _vs.high_boundary());
    61   }
    62 }
    64 void G1BlockOffsetSharedArray::resize(size_t new_word_size) {
    65   assert(new_word_size <= _reserved.word_size(), "Resize larger than reserved");
    66   size_t new_size = compute_size(new_word_size);
    67   size_t old_size = _vs.committed_size();
    68   size_t delta;
    69   char* high = _vs.high();
    70   _end = _reserved.start() + new_word_size;
    71   if (new_size > old_size) {
    72     delta = ReservedSpace::page_align_size_up(new_size - old_size);
    73     assert(delta > 0, "just checking");
    74     if (!_vs.expand_by(delta)) {
    75       // Do better than this for Merlin
    76       vm_exit_out_of_memory(delta, "offset table expansion");
    77     }
    78     assert(_vs.high() == high + delta, "invalid expansion");
    79     // Initialization of the contents is left to the
    80     // G1BlockOffsetArray that uses it.
    81   } else {
    82     delta = ReservedSpace::page_align_size_down(old_size - new_size);
    83     if (delta == 0) return;
    84     _vs.shrink_by(delta);
    85     assert(_vs.high() == high - delta, "invalid expansion");
    86   }
    87 }
    89 bool G1BlockOffsetSharedArray::is_card_boundary(HeapWord* p) const {
    90   assert(p >= _reserved.start(), "just checking");
    91   size_t delta = pointer_delta(p, _reserved.start());
    92   return (delta & right_n_bits(LogN_words)) == (size_t)NoBits;
    93 }
    96 //////////////////////////////////////////////////////////////////////
    97 // G1BlockOffsetArray
    98 //////////////////////////////////////////////////////////////////////
   100 G1BlockOffsetArray::G1BlockOffsetArray(G1BlockOffsetSharedArray* array,
   101                                        MemRegion mr, bool init_to_zero) :
   102   G1BlockOffsetTable(mr.start(), mr.end()),
   103   _unallocated_block(_bottom),
   104   _array(array), _csp(NULL),
   105   _init_to_zero(init_to_zero) {
   106   assert(_bottom <= _end, "arguments out of order");
   107   if (!_init_to_zero) {
   108     // initialize cards to point back to mr.start()
   109     set_remainder_to_point_to_start(mr.start() + N_words, mr.end());
   110     _array->set_offset_array(0, 0);  // set first card to 0
   111   }
   112 }
   114 void G1BlockOffsetArray::set_space(Space* sp) {
   115   _sp = sp;
   116   _csp = sp->toContiguousSpace();
   117 }
   119 // The arguments follow the normal convention of denoting
   120 // a right-open interval: [start, end)
   121 void
   122 G1BlockOffsetArray:: set_remainder_to_point_to_start(HeapWord* start, HeapWord* end) {
   124   if (start >= end) {
   125     // The start address is equal to the end address (or to
   126     // the right of the end address) so there are not cards
   127     // that need to be updated..
   128     return;
   129   }
   131   // Write the backskip value for each region.
   132   //
   133   //    offset
   134   //    card             2nd                       3rd
   135   //     | +- 1st        |                         |
   136   //     v v             v                         v
   137   //    +-+-+-+-+-+-+-+-+-+-+-+-+-+-+     +-+-+-+-+-+-+-+-+-+-+-
   138   //    |x|0|0|0|0|0|0|0|1|1|1|1|1|1| ... |1|1|1|1|2|2|2|2|2|2| ...
   139   //    +-+-+-+-+-+-+-+-+-+-+-+-+-+-+     +-+-+-+-+-+-+-+-+-+-+-
   140   //    11              19                        75
   141   //      12
   142   //
   143   //    offset card is the card that points to the start of an object
   144   //      x - offset value of offset card
   145   //    1st - start of first logarithmic region
   146   //      0 corresponds to logarithmic value N_words + 0 and 2**(3 * 0) = 1
   147   //    2nd - start of second logarithmic region
   148   //      1 corresponds to logarithmic value N_words + 1 and 2**(3 * 1) = 8
   149   //    3rd - start of third logarithmic region
   150   //      2 corresponds to logarithmic value N_words + 2 and 2**(3 * 2) = 64
   151   //
   152   //    integer below the block offset entry is an example of
   153   //    the index of the entry
   154   //
   155   //    Given an address,
   156   //      Find the index for the address
   157   //      Find the block offset table entry
   158   //      Convert the entry to a back slide
   159   //        (e.g., with today's, offset = 0x81 =>
   160   //          back slip = 2**(3*(0x81 - N_words)) = 2**3) = 8
   161   //      Move back N (e.g., 8) entries and repeat with the
   162   //        value of the new entry
   163   //
   164   size_t start_card = _array->index_for(start);
   165   size_t end_card = _array->index_for(end-1);
   166   assert(start ==_array->address_for_index(start_card), "Precondition");
   167   assert(end ==_array->address_for_index(end_card)+N_words, "Precondition");
   168   set_remainder_to_point_to_start_incl(start_card, end_card); // closed interval
   169 }
   171 // Unlike the normal convention in this code, the argument here denotes
   172 // a closed, inclusive interval: [start_card, end_card], cf set_remainder_to_point_to_start()
   173 // above.
   174 void
   175 G1BlockOffsetArray::set_remainder_to_point_to_start_incl(size_t start_card, size_t end_card) {
   176   if (start_card > end_card) {
   177     return;
   178   }
   179   assert(start_card > _array->index_for(_bottom), "Cannot be first card");
   180   assert(_array->offset_array(start_card-1) <= N_words,
   181          "Offset card has an unexpected value");
   182   size_t start_card_for_region = start_card;
   183   u_char offset = max_jubyte;
   184   for (int i = 0; i < BlockOffsetArray::N_powers; i++) {
   185     // -1 so that the the card with the actual offset is counted.  Another -1
   186     // so that the reach ends in this region and not at the start
   187     // of the next.
   188     size_t reach = start_card - 1 + (BlockOffsetArray::power_to_cards_back(i+1) - 1);
   189     offset = N_words + i;
   190     if (reach >= end_card) {
   191       _array->set_offset_array(start_card_for_region, end_card, offset);
   192       start_card_for_region = reach + 1;
   193       break;
   194     }
   195     _array->set_offset_array(start_card_for_region, reach, offset);
   196     start_card_for_region = reach + 1;
   197   }
   198   assert(start_card_for_region > end_card, "Sanity check");
   199   DEBUG_ONLY(check_all_cards(start_card, end_card);)
   200 }
   202 // The block [blk_start, blk_end) has been allocated;
   203 // adjust the block offset table to represent this information;
   204 // right-open interval: [blk_start, blk_end)
   205 void
   206 G1BlockOffsetArray::alloc_block(HeapWord* blk_start, HeapWord* blk_end) {
   207   mark_block(blk_start, blk_end);
   208   allocated(blk_start, blk_end);
   209 }
   211 // Adjust BOT to show that a previously whole block has been split
   212 // into two.
   213 void G1BlockOffsetArray::split_block(HeapWord* blk, size_t blk_size,
   214                                      size_t left_blk_size) {
   215   // Verify that the BOT shows [blk, blk + blk_size) to be one block.
   216   verify_single_block(blk, blk_size);
   217   // Update the BOT to indicate that [blk + left_blk_size, blk + blk_size)
   218   // is one single block.
   219   mark_block(blk + left_blk_size, blk + blk_size);
   220 }
   223 // Action_mark - update the BOT for the block [blk_start, blk_end).
   224 //               Current typical use is for splitting a block.
   225 // Action_single - update the BOT for an allocation.
   226 // Action_verify - BOT verification.
   227 void G1BlockOffsetArray::do_block_internal(HeapWord* blk_start,
   228                                            HeapWord* blk_end,
   229                                            Action action) {
   230   assert(Universe::heap()->is_in_reserved(blk_start),
   231          "reference must be into the heap");
   232   assert(Universe::heap()->is_in_reserved(blk_end-1),
   233          "limit must be within the heap");
   234   // This is optimized to make the test fast, assuming we only rarely
   235   // cross boundaries.
   236   uintptr_t end_ui = (uintptr_t)(blk_end - 1);
   237   uintptr_t start_ui = (uintptr_t)blk_start;
   238   // Calculate the last card boundary preceding end of blk
   239   intptr_t boundary_before_end = (intptr_t)end_ui;
   240   clear_bits(boundary_before_end, right_n_bits(LogN));
   241   if (start_ui <= (uintptr_t)boundary_before_end) {
   242     // blk starts at or crosses a boundary
   243     // Calculate index of card on which blk begins
   244     size_t    start_index = _array->index_for(blk_start);
   245     // Index of card on which blk ends
   246     size_t    end_index   = _array->index_for(blk_end - 1);
   247     // Start address of card on which blk begins
   248     HeapWord* boundary    = _array->address_for_index(start_index);
   249     assert(boundary <= blk_start, "blk should start at or after boundary");
   250     if (blk_start != boundary) {
   251       // blk starts strictly after boundary
   252       // adjust card boundary and start_index forward to next card
   253       boundary += N_words;
   254       start_index++;
   255     }
   256     assert(start_index <= end_index, "monotonicity of index_for()");
   257     assert(boundary <= (HeapWord*)boundary_before_end, "tautology");
   258     switch (action) {
   259       case Action_mark: {
   260         if (init_to_zero()) {
   261           _array->set_offset_array(start_index, boundary, blk_start);
   262           break;
   263         } // Else fall through to the next case
   264       }
   265       case Action_single: {
   266         _array->set_offset_array(start_index, boundary, blk_start);
   267         // We have finished marking the "offset card". We need to now
   268         // mark the subsequent cards that this blk spans.
   269         if (start_index < end_index) {
   270           HeapWord* rem_st = _array->address_for_index(start_index) + N_words;
   271           HeapWord* rem_end = _array->address_for_index(end_index) + N_words;
   272           set_remainder_to_point_to_start(rem_st, rem_end);
   273         }
   274         break;
   275       }
   276       case Action_check: {
   277         _array->check_offset_array(start_index, boundary, blk_start);
   278         // We have finished checking the "offset card". We need to now
   279         // check the subsequent cards that this blk spans.
   280         check_all_cards(start_index + 1, end_index);
   281         break;
   282       }
   283       default:
   284         ShouldNotReachHere();
   285     }
   286   }
   287 }
   289 // The card-interval [start_card, end_card] is a closed interval; this
   290 // is an expensive check -- use with care and only under protection of
   291 // suitable flag.
   292 void G1BlockOffsetArray::check_all_cards(size_t start_card, size_t end_card) const {
   294   if (end_card < start_card) {
   295     return;
   296   }
   297   guarantee(_array->offset_array(start_card) == N_words, "Wrong value in second card");
   298   for (size_t c = start_card + 1; c <= end_card; c++ /* yeah! */) {
   299     u_char entry = _array->offset_array(c);
   300     if (c - start_card > BlockOffsetArray::power_to_cards_back(1)) {
   301       guarantee(entry > N_words, "Should be in logarithmic region");
   302     }
   303     size_t backskip = BlockOffsetArray::entry_to_cards_back(entry);
   304     size_t landing_card = c - backskip;
   305     guarantee(landing_card >= (start_card - 1), "Inv");
   306     if (landing_card >= start_card) {
   307       guarantee(_array->offset_array(landing_card) <= entry, "monotonicity");
   308     } else {
   309       guarantee(landing_card == start_card - 1, "Tautology");
   310       guarantee(_array->offset_array(landing_card) <= N_words, "Offset value");
   311     }
   312   }
   313 }
   315 // The range [blk_start, blk_end) represents a single contiguous block
   316 // of storage; modify the block offset table to represent this
   317 // information; Right-open interval: [blk_start, blk_end)
   318 // NOTE: this method does _not_ adjust _unallocated_block.
   319 void
   320 G1BlockOffsetArray::single_block(HeapWord* blk_start, HeapWord* blk_end) {
   321   do_block_internal(blk_start, blk_end, Action_single);
   322 }
   324 // Mark the BOT such that if [blk_start, blk_end) straddles a card
   325 // boundary, the card following the first such boundary is marked
   326 // with the appropriate offset.
   327 // NOTE: this method does _not_ adjust _unallocated_block or
   328 // any cards subsequent to the first one.
   329 void
   330 G1BlockOffsetArray::mark_block(HeapWord* blk_start, HeapWord* blk_end) {
   331   do_block_internal(blk_start, blk_end, Action_mark);
   332 }
   334 HeapWord* G1BlockOffsetArray::block_start_unsafe(const void* addr) {
   335   assert(_bottom <= addr && addr < _end,
   336          "addr must be covered by this Array");
   337   // Must read this exactly once because it can be modified by parallel
   338   // allocation.
   339   HeapWord* ub = _unallocated_block;
   340   if (BlockOffsetArrayUseUnallocatedBlock && addr >= ub) {
   341     assert(ub < _end, "tautology (see above)");
   342     return ub;
   343   }
   344   // Otherwise, find the block start using the table.
   345   HeapWord* q = block_at_or_preceding(addr, false, 0);
   346   return forward_to_block_containing_addr(q, addr);
   347 }
   349 // This duplicates a little code from the above: unavoidable.
   350 HeapWord*
   351 G1BlockOffsetArray::block_start_unsafe_const(const void* addr) const {
   352   assert(_bottom <= addr && addr < _end,
   353          "addr must be covered by this Array");
   354   // Must read this exactly once because it can be modified by parallel
   355   // allocation.
   356   HeapWord* ub = _unallocated_block;
   357   if (BlockOffsetArrayUseUnallocatedBlock && addr >= ub) {
   358     assert(ub < _end, "tautology (see above)");
   359     return ub;
   360   }
   361   // Otherwise, find the block start using the table.
   362   HeapWord* q = block_at_or_preceding(addr, false, 0);
   363   HeapWord* n = q + _sp->block_size(q);
   364   return forward_to_block_containing_addr_const(q, n, addr);
   365 }
   368 HeapWord*
   369 G1BlockOffsetArray::forward_to_block_containing_addr_slow(HeapWord* q,
   370                                                           HeapWord* n,
   371                                                           const void* addr) {
   372   // We're not in the normal case.  We need to handle an important subcase
   373   // here: LAB allocation.  An allocation previously recorded in the
   374   // offset table was actually a lab allocation, and was divided into
   375   // several objects subsequently.  Fix this situation as we answer the
   376   // query, by updating entries as we cross them.
   378   // If the fist object's end q is at the card boundary. Start refining
   379   // with the corresponding card (the value of the entry will be basically
   380   // set to 0). If the object crosses the boundary -- start from the next card.
   381   size_t next_index = _array->index_for(n) + !_array->is_card_boundary(n);
   382   HeapWord* next_boundary = _array->address_for_index(next_index);
   383   if (csp() != NULL) {
   384     if (addr >= csp()->top()) return csp()->top();
   385     while (next_boundary < addr) {
   386       while (n <= next_boundary) {
   387         q = n;
   388         oop obj = oop(q);
   389         if (obj->klass_or_null() == NULL) return q;
   390         n += obj->size();
   391       }
   392       assert(q <= next_boundary && n > next_boundary, "Consequence of loop");
   393       // [q, n) is the block that crosses the boundary.
   394       alloc_block_work2(&next_boundary, &next_index, q, n);
   395     }
   396   } else {
   397     while (next_boundary < addr) {
   398       while (n <= next_boundary) {
   399         q = n;
   400         oop obj = oop(q);
   401         if (obj->klass_or_null() == NULL) return q;
   402         n += _sp->block_size(q);
   403       }
   404       assert(q <= next_boundary && n > next_boundary, "Consequence of loop");
   405       // [q, n) is the block that crosses the boundary.
   406       alloc_block_work2(&next_boundary, &next_index, q, n);
   407     }
   408   }
   409   return forward_to_block_containing_addr_const(q, n, addr);
   410 }
   412 HeapWord* G1BlockOffsetArray::block_start_careful(const void* addr) const {
   413   assert(_array->offset_array(0) == 0, "objects can't cross covered areas");
   415   assert(_bottom <= addr && addr < _end,
   416          "addr must be covered by this Array");
   417   // Must read this exactly once because it can be modified by parallel
   418   // allocation.
   419   HeapWord* ub = _unallocated_block;
   420   if (BlockOffsetArrayUseUnallocatedBlock && addr >= ub) {
   421     assert(ub < _end, "tautology (see above)");
   422     return ub;
   423   }
   425   // Otherwise, find the block start using the table, but taking
   426   // care (cf block_start_unsafe() above) not to parse any objects/blocks
   427   // on the cards themsleves.
   428   size_t index = _array->index_for(addr);
   429   assert(_array->address_for_index(index) == addr,
   430          "arg should be start of card");
   432   HeapWord* q = (HeapWord*)addr;
   433   uint offset;
   434   do {
   435     offset = _array->offset_array(index--);
   436     q -= offset;
   437   } while (offset == N_words);
   438   assert(q <= addr, "block start should be to left of arg");
   439   return q;
   440 }
   442 // Note that the committed size of the covered space may have changed,
   443 // so the table size might also wish to change.
   444 void G1BlockOffsetArray::resize(size_t new_word_size) {
   445   HeapWord* new_end = _bottom + new_word_size;
   446   if (_end < new_end && !init_to_zero()) {
   447     // verify that the old and new boundaries are also card boundaries
   448     assert(_array->is_card_boundary(_end),
   449            "_end not a card boundary");
   450     assert(_array->is_card_boundary(new_end),
   451            "new _end would not be a card boundary");
   452     // set all the newly added cards
   453     _array->set_offset_array(_end, new_end, N_words);
   454   }
   455   _end = new_end;  // update _end
   456 }
   458 void G1BlockOffsetArray::set_region(MemRegion mr) {
   459   _bottom = mr.start();
   460   _end = mr.end();
   461 }
   463 //
   464 //              threshold_
   465 //              |   _index_
   466 //              v   v
   467 //      +-------+-------+-------+-------+-------+
   468 //      | i-1   |   i   | i+1   | i+2   | i+3   |
   469 //      +-------+-------+-------+-------+-------+
   470 //       ( ^    ]
   471 //         block-start
   472 //
   473 void G1BlockOffsetArray::alloc_block_work2(HeapWord** threshold_, size_t* index_,
   474                                            HeapWord* blk_start, HeapWord* blk_end) {
   475   // For efficiency, do copy-in/copy-out.
   476   HeapWord* threshold = *threshold_;
   477   size_t    index = *index_;
   479   assert(blk_start != NULL && blk_end > blk_start,
   480          "phantom block");
   481   assert(blk_end > threshold, "should be past threshold");
   482   assert(blk_start <= threshold, "blk_start should be at or before threshold");
   483   assert(pointer_delta(threshold, blk_start) <= N_words,
   484          "offset should be <= BlockOffsetSharedArray::N");
   485   assert(Universe::heap()->is_in_reserved(blk_start),
   486          "reference must be into the heap");
   487   assert(Universe::heap()->is_in_reserved(blk_end-1),
   488          "limit must be within the heap");
   489   assert(threshold == _array->_reserved.start() + index*N_words,
   490          "index must agree with threshold");
   492   DEBUG_ONLY(size_t orig_index = index;)
   494   // Mark the card that holds the offset into the block.  Note
   495   // that _next_offset_index and _next_offset_threshold are not
   496   // updated until the end of this method.
   497   _array->set_offset_array(index, threshold, blk_start);
   499   // We need to now mark the subsequent cards that this blk spans.
   501   // Index of card on which blk ends.
   502   size_t end_index   = _array->index_for(blk_end - 1);
   504   // Are there more cards left to be updated?
   505   if (index + 1 <= end_index) {
   506     HeapWord* rem_st  = _array->address_for_index(index + 1);
   507     // Calculate rem_end this way because end_index
   508     // may be the last valid index in the covered region.
   509     HeapWord* rem_end = _array->address_for_index(end_index) +  N_words;
   510     set_remainder_to_point_to_start(rem_st, rem_end);
   511   }
   513   index = end_index + 1;
   514   // Calculate threshold_ this way because end_index
   515   // may be the last valid index in the covered region.
   516   threshold = _array->address_for_index(end_index) + N_words;
   517   assert(threshold >= blk_end, "Incorrect offset threshold");
   519   // index_ and threshold_ updated here.
   520   *threshold_ = threshold;
   521   *index_ = index;
   523 #ifdef ASSERT
   524   // The offset can be 0 if the block starts on a boundary.  That
   525   // is checked by an assertion above.
   526   size_t start_index = _array->index_for(blk_start);
   527   HeapWord* boundary    = _array->address_for_index(start_index);
   528   assert((_array->offset_array(orig_index) == 0 &&
   529           blk_start == boundary) ||
   530           (_array->offset_array(orig_index) > 0 &&
   531          _array->offset_array(orig_index) <= N_words),
   532          "offset array should have been set");
   533   for (size_t j = orig_index + 1; j <= end_index; j++) {
   534     assert(_array->offset_array(j) > 0 &&
   535            _array->offset_array(j) <=
   536              (u_char) (N_words+BlockOffsetArray::N_powers-1),
   537            "offset array should have been set");
   538   }
   539 #endif
   540 }
   542 bool
   543 G1BlockOffsetArray::verify_for_object(HeapWord* obj_start,
   544                                       size_t word_size) const {
   545   size_t first_card = _array->index_for(obj_start);
   546   size_t last_card = _array->index_for(obj_start + word_size - 1);
   547   if (!_array->is_card_boundary(obj_start)) {
   548     // If the object is not on a card boundary the BOT entry of the
   549     // first card should point to another object so we should not
   550     // check that one.
   551     first_card += 1;
   552   }
   553   for (size_t card = first_card; card <= last_card; card += 1) {
   554     HeapWord* card_addr = _array->address_for_index(card);
   555     HeapWord* block_start = block_start_const(card_addr);
   556     if (block_start != obj_start) {
   557       gclog_or_tty->print_cr("block start: "PTR_FORMAT" is incorrect - "
   558                              "card index: "SIZE_FORMAT" "
   559                              "card addr: "PTR_FORMAT" BOT entry: %u "
   560                              "obj: "PTR_FORMAT" word size: "SIZE_FORMAT" "
   561                              "cards: ["SIZE_FORMAT","SIZE_FORMAT"]",
   562                              block_start, card, card_addr,
   563                              _array->offset_array(card),
   564                              obj_start, word_size, first_card, last_card);
   565       return false;
   566     }
   567   }
   568   return true;
   569 }
   571 #ifndef PRODUCT
   572 void
   573 G1BlockOffsetArray::print_on(outputStream* out) {
   574   size_t from_index = _array->index_for(_bottom);
   575   size_t to_index = _array->index_for(_end);
   576   out->print_cr(">> BOT for area ["PTR_FORMAT","PTR_FORMAT") "
   577                 "cards ["SIZE_FORMAT","SIZE_FORMAT")",
   578                 _bottom, _end, from_index, to_index);
   579   for (size_t i = from_index; i < to_index; ++i) {
   580     out->print_cr("  entry "SIZE_FORMAT_W(8)" | "PTR_FORMAT" : %3u",
   581                   i, _array->address_for_index(i),
   582                   (uint) _array->offset_array(i));
   583   }
   584 }
   585 #endif // !PRODUCT
   587 //////////////////////////////////////////////////////////////////////
   588 // G1BlockOffsetArrayContigSpace
   589 //////////////////////////////////////////////////////////////////////
   591 HeapWord*
   592 G1BlockOffsetArrayContigSpace::block_start_unsafe(const void* addr) {
   593   assert(_bottom <= addr && addr < _end,
   594          "addr must be covered by this Array");
   595   HeapWord* q = block_at_or_preceding(addr, true, _next_offset_index-1);
   596   return forward_to_block_containing_addr(q, addr);
   597 }
   599 HeapWord*
   600 G1BlockOffsetArrayContigSpace::
   601 block_start_unsafe_const(const void* addr) const {
   602   assert(_bottom <= addr && addr < _end,
   603          "addr must be covered by this Array");
   604   HeapWord* q = block_at_or_preceding(addr, true, _next_offset_index-1);
   605   HeapWord* n = q + _sp->block_size(q);
   606   return forward_to_block_containing_addr_const(q, n, addr);
   607 }
   609 G1BlockOffsetArrayContigSpace::
   610 G1BlockOffsetArrayContigSpace(G1BlockOffsetSharedArray* array,
   611                               MemRegion mr) :
   612   G1BlockOffsetArray(array, mr, true)
   613 {
   614   _next_offset_threshold = NULL;
   615   _next_offset_index = 0;
   616 }
   618 HeapWord* G1BlockOffsetArrayContigSpace::initialize_threshold() {
   619   assert(!Universe::heap()->is_in_reserved(_array->_offset_array),
   620          "just checking");
   621   _next_offset_index = _array->index_for(_bottom);
   622   _next_offset_index++;
   623   _next_offset_threshold =
   624     _array->address_for_index(_next_offset_index);
   625   return _next_offset_threshold;
   626 }
   628 void G1BlockOffsetArrayContigSpace::zero_bottom_entry() {
   629   assert(!Universe::heap()->is_in_reserved(_array->_offset_array),
   630          "just checking");
   631   size_t bottom_index = _array->index_for(_bottom);
   632   assert(_array->address_for_index(bottom_index) == _bottom,
   633          "Precondition of call");
   634   _array->set_offset_array(bottom_index, 0);
   635 }
   637 void
   638 G1BlockOffsetArrayContigSpace::set_for_starts_humongous(HeapWord* new_top) {
   639   assert(new_top <= _end, "_end should have already been updated");
   641   // The first BOT entry should have offset 0.
   642   zero_bottom_entry();
   643   initialize_threshold();
   644   alloc_block(_bottom, new_top);
   645  }
   647 #ifndef PRODUCT
   648 void
   649 G1BlockOffsetArrayContigSpace::print_on(outputStream* out) {
   650   G1BlockOffsetArray::print_on(out);
   651   out->print_cr("  next offset threshold: "PTR_FORMAT, _next_offset_threshold);
   652   out->print_cr("  next offset index:     "SIZE_FORMAT, _next_offset_index);
   653 }
   654 #endif // !PRODUCT

mercurial