src/share/vm/gc_implementation/parallelScavenge/objectStartArray.hpp

Thu, 22 Sep 2011 10:57:37 -0700

author
johnc
date
Thu, 22 Sep 2011 10:57:37 -0700
changeset 3175
4dfb2df418f2
parent 2314
f95d63e2154a
child 3900
d2a62e0f25eb
permissions
-rw-r--r--

6484982: G1: process references during evacuation pauses
Summary: G1 now uses two reference processors - one is used by concurrent marking and the other is used by STW GCs (both full and incremental evacuation pauses). In an evacuation pause, the reference processor is embedded into the closures used to scan objects. Doing so causes causes reference objects to be 'discovered' by the reference processor. At the end of the evacuation pause, these discovered reference objects are processed - preserving (and copying) referent objects (and their reachable graphs) as appropriate.
Reviewed-by: ysr, jwilhelm, brutisso, stefank, tonyp

     1 /*
     2  * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #ifndef SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_OBJECTSTARTARRAY_HPP
    26 #define SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_OBJECTSTARTARRAY_HPP
    28 #include "gc_implementation/parallelScavenge/psVirtualspace.hpp"
    29 #include "memory/allocation.hpp"
    30 #include "memory/memRegion.hpp"
    31 #include "oops/oop.hpp"
    33 //
    34 // This class can be used to locate the beginning of an object in the
    35 // covered region.
    36 //
    38 class ObjectStartArray : public CHeapObj {
    39  friend class VerifyObjectStartArrayClosure;
    41  private:
    42   PSVirtualSpace  _virtual_space;
    43   MemRegion       _reserved_region;
    44   MemRegion       _covered_region;
    45   MemRegion       _blocks_region;
    46   jbyte*          _raw_base;
    47   jbyte*          _offset_base;
    49  public:
    51   enum BlockValueConstants {
    52     clean_block                  = -1
    53   };
    55   enum BlockSizeConstants {
    56     block_shift                  = 9,
    57     block_size                   = 1 << block_shift,
    58     block_size_in_words          = block_size / sizeof(HeapWord)
    59   };
    61  protected:
    63   // Mapping from address to object start array entry
    64   jbyte* block_for_addr(void* p) const {
    65     assert(_covered_region.contains(p),
    66            "out of bounds access to object start array");
    67     jbyte* result = &_offset_base[uintptr_t(p) >> block_shift];
    68     assert(_blocks_region.contains(result),
    69            "out of bounds result in byte_for");
    70     return result;
    71   }
    73   // Mapping from object start array entry to address of first word
    74   HeapWord* addr_for_block(jbyte* p) {
    75     assert(_blocks_region.contains(p),
    76            "out of bounds access to object start array");
    77     size_t delta = pointer_delta(p, _offset_base, sizeof(jbyte));
    78     HeapWord* result = (HeapWord*) (delta << block_shift);
    79     assert(_covered_region.contains(result),
    80            "out of bounds accessor from card marking array");
    81     return result;
    82   }
    84   // Mapping that includes the derived offset.
    85   // If the block is clean, returns the last address in the covered region.
    86   // If the block is < index 0, returns the start of the covered region.
    87   HeapWord* offset_addr_for_block (jbyte* p) const {
    88     // We have to do this before the assert
    89     if (p < _raw_base) {
    90       return _covered_region.start();
    91     }
    93     assert(_blocks_region.contains(p),
    94            "out of bounds access to object start array");
    96     if (*p == clean_block) {
    97       return _covered_region.end();
    98     }
   100     size_t delta = pointer_delta(p, _offset_base, sizeof(jbyte));
   101     HeapWord* result = (HeapWord*) (delta << block_shift);
   102     result += *p;
   104     assert(_covered_region.contains(result),
   105            "out of bounds accessor from card marking array");
   107     return result;
   108   }
   110  public:
   112   // This method is in lieu of a constructor, so that this class can be
   113   // embedded inline in other classes.
   114   void initialize(MemRegion reserved_region);
   116   void set_covered_region(MemRegion mr);
   118   void reset();
   120   MemRegion covered_region() { return _covered_region; }
   122   void allocate_block(HeapWord* p) {
   123     assert(_covered_region.contains(p), "Must be in covered region");
   124     jbyte* block = block_for_addr(p);
   125     HeapWord* block_base = addr_for_block(block);
   126     size_t offset = pointer_delta(p, block_base, sizeof(HeapWord*));
   127     assert(offset < 128, "Sanity");
   128     // When doing MT offsets, we can't assert this.
   129     //assert(offset > *block, "Found backwards allocation");
   130     *block = (jbyte)offset;
   132     // tty->print_cr("[%p]", p);
   133   }
   135   // Optimized for finding the first object that crosses into
   136   // a given block. The blocks contain the offset of the last
   137   // object in that block. Scroll backwards by one, and the first
   138   // object hit should be at the beginning of the block
   139   HeapWord* object_start(HeapWord* addr) const {
   140     assert(_covered_region.contains(addr), "Must be in covered region");
   141     jbyte* block = block_for_addr(addr);
   142     HeapWord* scroll_forward = offset_addr_for_block(block--);
   143     while (scroll_forward > addr) {
   144       scroll_forward = offset_addr_for_block(block--);
   145     }
   147     HeapWord* next = scroll_forward;
   148     while (next <= addr) {
   149       scroll_forward = next;
   150       next += oop(next)->size();
   151     }
   152     assert(scroll_forward <= addr, "wrong order for current and arg");
   153     assert(addr <= next, "wrong order for arg and next");
   154     return scroll_forward;
   155   }
   157   bool is_block_allocated(HeapWord* addr) {
   158     assert(_covered_region.contains(addr), "Must be in covered region");
   159     jbyte* block = block_for_addr(addr);
   160     if (*block == clean_block)
   161       return false;
   163     return true;
   164   }
   166   // Return true if an object starts in the range of heap addresses.
   167   // If an object starts at an address corresponding to
   168   // "start", the method will return true.
   169   bool object_starts_in_range(HeapWord* start_addr, HeapWord* end_addr) const;
   170 };
   172 #endif // SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_OBJECTSTARTARRAY_HPP

mercurial