src/share/vm/gc_implementation/g1/g1BlockOffsetTable.inline.hpp

Mon, 03 Aug 2009 12:59:30 -0700

author
johnc
date
Mon, 03 Aug 2009 12:59:30 -0700
changeset 1324
15c5903cf9e1
parent 1280
df6caf649ff7
child 1907
c18cbe5936b8
permissions
-rw-r--r--

6865703: G1: Parallelize hot card cache cleanup
Summary: Have the GC worker threads clear the hot card cache in parallel by having each worker thread claim a chunk of the card cache and process the cards in that chunk. The size of the chunks that each thread will claim is determined at VM initialization from the size of the card cache and the number of worker threads.
Reviewed-by: jmasa, tonyp

     1 /*
     2  * Copyright 2001-2007 Sun Microsystems, Inc.  All Rights Reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
    20  * CA 95054 USA or visit www.sun.com if you need additional information or
    21  * have any questions.
    22  *
    23  */
    25 inline HeapWord* G1BlockOffsetTable::block_start(const void* addr) {
    26   if (addr >= _bottom && addr < _end) {
    27     return block_start_unsafe(addr);
    28   } else {
    29     return NULL;
    30   }
    31 }
    33 inline HeapWord*
    34 G1BlockOffsetTable::block_start_const(const void* addr) const {
    35   if (addr >= _bottom && addr < _end) {
    36     return block_start_unsafe_const(addr);
    37   } else {
    38     return NULL;
    39   }
    40 }
    42 inline size_t G1BlockOffsetSharedArray::index_for(const void* p) const {
    43   char* pc = (char*)p;
    44   assert(pc >= (char*)_reserved.start() &&
    45          pc <  (char*)_reserved.end(),
    46          "p not in range.");
    47   size_t delta = pointer_delta(pc, _reserved.start(), sizeof(char));
    48   size_t result = delta >> LogN;
    49   assert(result < _vs.committed_size(), "bad index from address");
    50   return result;
    51 }
    53 inline HeapWord*
    54 G1BlockOffsetSharedArray::address_for_index(size_t index) const {
    55   assert(index < _vs.committed_size(), "bad index");
    56   HeapWord* result = _reserved.start() + (index << LogN_words);
    57   assert(result >= _reserved.start() && result < _reserved.end(),
    58          "bad address from index");
    59   return result;
    60 }
    62 inline HeapWord*
    63 G1BlockOffsetArray::block_at_or_preceding(const void* addr,
    64                                           bool has_max_index,
    65                                           size_t max_index) const {
    66   assert(_array->offset_array(0) == 0, "objects can't cross covered areas");
    67   size_t index = _array->index_for(addr);
    68   // We must make sure that the offset table entry we use is valid.  If
    69   // "addr" is past the end, start at the last known one and go forward.
    70   if (has_max_index) {
    71     index = MIN2(index, max_index);
    72   }
    73   HeapWord* q = _array->address_for_index(index);
    75   uint offset = _array->offset_array(index);  // Extend u_char to uint.
    76   while (offset >= N_words) {
    77     // The excess of the offset from N_words indicates a power of Base
    78     // to go back by.
    79     size_t n_cards_back = BlockOffsetArray::entry_to_cards_back(offset);
    80     q -= (N_words * n_cards_back);
    81     assert(q >= _sp->bottom(), "Went below bottom!");
    82     index -= n_cards_back;
    83     offset = _array->offset_array(index);
    84   }
    85   assert(offset < N_words, "offset too large");
    86   q -= offset;
    87   return q;
    88 }
    90 inline HeapWord*
    91 G1BlockOffsetArray::
    92 forward_to_block_containing_addr_const(HeapWord* q, HeapWord* n,
    93                                        const void* addr) const {
    94   if (csp() != NULL) {
    95     if (addr >= csp()->top()) return csp()->top();
    96     while (n <= addr) {
    97       q = n;
    98       oop obj = oop(q);
    99       if (obj->klass_or_null() == NULL) return q;
   100       n += obj->size();
   101     }
   102   } else {
   103     while (n <= addr) {
   104       q = n;
   105       oop obj = oop(q);
   106       if (obj->klass_or_null() == NULL) return q;
   107       n += _sp->block_size(q);
   108     }
   109   }
   110   assert(q <= n, "wrong order for q and addr");
   111   assert(addr < n, "wrong order for addr and n");
   112   return q;
   113 }
   115 inline HeapWord*
   116 G1BlockOffsetArray::forward_to_block_containing_addr(HeapWord* q,
   117                                                      const void* addr) {
   118   if (oop(q)->klass_or_null() == NULL) return q;
   119   HeapWord* n = q + _sp->block_size(q);
   120   // In the normal case, where the query "addr" is a card boundary, and the
   121   // offset table chunks are the same size as cards, the block starting at
   122   // "q" will contain addr, so the test below will fail, and we'll fall
   123   // through quickly.
   124   if (n <= addr) {
   125     q = forward_to_block_containing_addr_slow(q, n, addr);
   126   }
   127   assert(q <= addr, "wrong order for current and arg");
   128   return q;
   129 }
   131 //////////////////////////////////////////////////////////////////////////
   132 // BlockOffsetArrayNonContigSpace inlines
   133 //////////////////////////////////////////////////////////////////////////
   134 inline void G1BlockOffsetArray::freed(HeapWord* blk_start, HeapWord* blk_end) {
   135   // Verify that the BOT shows [blk_start, blk_end) to be one block.
   136   verify_single_block(blk_start, blk_end);
   137   // adjust _unallocated_block upward or downward
   138   // as appropriate
   139   if (BlockOffsetArrayUseUnallocatedBlock) {
   140     assert(_unallocated_block <= _end,
   141            "Inconsistent value for _unallocated_block");
   142     if (blk_end >= _unallocated_block && blk_start <= _unallocated_block) {
   143       // CMS-specific note: a block abutting _unallocated_block to
   144       // its left is being freed, a new block is being added or
   145       // we are resetting following a compaction
   146       _unallocated_block = blk_start;
   147     }
   148   }
   149 }
   151 inline void G1BlockOffsetArray::freed(HeapWord* blk, size_t size) {
   152   freed(blk, blk + size);
   153 }

mercurial