src/share/vm/gc_implementation/g1/g1BlockOffsetTable.hpp

changeset 7256
0fcaab91d485
parent 7051
1f1d373cd044
child 7257
e7d0505c8a30
     1.1 --- a/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.hpp	Mon Sep 29 09:59:23 2014 +0200
     1.2 +++ b/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.hpp	Fri Oct 10 15:45:45 2014 +0200
     1.3 @@ -109,7 +109,12 @@
     1.4  
     1.5  class G1BlockOffsetSharedArrayMappingChangedListener : public G1MappingChangedListener {
     1.6   public:
     1.7 -  virtual void on_commit(uint start_idx, size_t num_regions);
     1.8 +  virtual void on_commit(uint start_idx, size_t num_regions) {
     1.9 +    // Nothing to do. The BOT is hard-wired to be part of the HeapRegion, and we cannot
    1.10 +    // retrieve it here since this would cause firing of several asserts. The code
    1.11 +    // executed after commit of a region already needs to do some re-initialization of
    1.12 +    // the HeapRegion, so we combine that.
    1.13 +  }
    1.14  };
    1.15  
    1.16  // This implementation of "G1BlockOffsetTable" divides the covered region
    1.17 @@ -153,8 +158,6 @@
    1.18    // For performance these have to devolve to array accesses in product builds.
    1.19    inline u_char offset_array(size_t index) const;
    1.20  
    1.21 -  void set_offset_array(HeapWord* left, HeapWord* right, u_char offset);
    1.22 -
    1.23    void set_offset_array_raw(size_t index, u_char offset) {
    1.24      _offset_array[index] = offset;
    1.25    }
    1.26 @@ -165,8 +168,6 @@
    1.27  
    1.28    inline void set_offset_array(size_t left, size_t right, u_char offset);
    1.29  
    1.30 -  inline void check_offset_array(size_t index, HeapWord* high, HeapWord* low) const;
    1.31 -
    1.32    bool is_card_boundary(HeapWord* p) const;
    1.33  
    1.34  public:
    1.35 @@ -193,8 +194,6 @@
    1.36    // G1BlockOffsetTable(s) to initialize cards.
    1.37    G1BlockOffsetSharedArray(MemRegion heap, G1RegionToSpaceMapper* storage);
    1.38  
    1.39 -  void set_bottom(HeapWord* new_bottom);
    1.40 -
    1.41    // Return the appropriate index into "_offset_array" for "p".
    1.42    inline size_t index_for(const void* p) const;
    1.43    inline size_t index_for_raw(const void* p) const;
    1.44 @@ -220,14 +219,6 @@
    1.45      LogN    = G1BlockOffsetSharedArray::LogN
    1.46    };
    1.47  
    1.48 -  // The following enums are used by do_block_helper
    1.49 -  enum Action {
    1.50 -    Action_single,      // BOT records a single block (see single_block())
    1.51 -    Action_mark,        // BOT marks the start of a block (see mark_block())
    1.52 -    Action_check        // Check that BOT records block correctly
    1.53 -                        // (see verify_single_block()).
    1.54 -  };
    1.55 -
    1.56    // This is the array, which can be shared by several BlockOffsetArray's
    1.57    // servicing different
    1.58    G1BlockOffsetSharedArray* _array;
    1.59 @@ -235,10 +226,6 @@
    1.60    // The space that owns this subregion.
    1.61    G1OffsetTableContigSpace* _gsp;
    1.62  
    1.63 -  // If true, array entries are initialized to 0; otherwise, they are
    1.64 -  // initialized to point backwards to the beginning of the covered region.
    1.65 -  bool _init_to_zero;
    1.66 -
    1.67    // The portion [_unallocated_block, _sp.end()) of the space that
    1.68    // is a single block known not to contain any objects.
    1.69    // NOTE: See BlockOffsetArrayUseUnallocatedBlock flag.
    1.70 @@ -253,9 +240,6 @@
    1.71    // that is closed: [start_index, end_index]
    1.72    void set_remainder_to_point_to_start_incl(size_t start, size_t end);
    1.73  
    1.74 -  // A helper function for BOT adjustment/verification work
    1.75 -  void do_block_internal(HeapWord* blk_start, HeapWord* blk_end, Action action);
    1.76 -
    1.77  protected:
    1.78  
    1.79    G1OffsetTableContigSpace* gsp() const { return _gsp; }
    1.80 @@ -303,11 +287,9 @@
    1.81  
    1.82  public:
    1.83    // The space may not have it's bottom and top set yet, which is why the
    1.84 -  // region is passed as a parameter.  If "init_to_zero" is true, the
    1.85 -  // elements of the array are initialized to zero.  Otherwise, they are
    1.86 -  // initialized to point backwards to the beginning.
    1.87 -  G1BlockOffsetArray(G1BlockOffsetSharedArray* array, MemRegion mr,
    1.88 -                     bool init_to_zero);
    1.89 +  // region is passed as a parameter. The elements of the array are
    1.90 +  // initialized to zero.
    1.91 +  G1BlockOffsetArray(G1BlockOffsetSharedArray* array, MemRegion mr);
    1.92  
    1.93    // Note: this ought to be part of the constructor, but that would require
    1.94    // "this" to be passed as a parameter to a member constructor for
    1.95 @@ -315,114 +297,19 @@
    1.96    // This would be legal C++, but MS VC++ doesn't allow it.
    1.97    void set_space(G1OffsetTableContigSpace* sp);
    1.98  
    1.99 -  // Resets the covered region to the given "mr".
   1.100 -  void set_region(MemRegion mr);
   1.101 -
   1.102    // Resets the covered region to one with the same _bottom as before but
   1.103    // the "new_word_size".
   1.104    void resize(size_t new_word_size);
   1.105  
   1.106 -  // These must be guaranteed to work properly (i.e., do nothing)
   1.107 -  // when "blk_start" ("blk" for second version) is "NULL".
   1.108 -  virtual void alloc_block(HeapWord* blk_start, HeapWord* blk_end);
   1.109 -  virtual void alloc_block(HeapWord* blk, size_t size) {
   1.110 -    alloc_block(blk, blk + size);
   1.111 -  }
   1.112 -
   1.113 -  // The following methods are useful and optimized for a
   1.114 -  // general, non-contiguous space.
   1.115 -
   1.116 -  // Given a block [blk_start, blk_start + full_blk_size), and
   1.117 -  // a left_blk_size < full_blk_size, adjust the BOT to show two
   1.118 -  // blocks [blk_start, blk_start + left_blk_size) and
   1.119 -  // [blk_start + left_blk_size, blk_start + full_blk_size).
   1.120 -  // It is assumed (and verified in the non-product VM) that the
   1.121 -  // BOT was correct for the original block.
   1.122 -  void split_block(HeapWord* blk_start, size_t full_blk_size,
   1.123 -                           size_t left_blk_size);
   1.124 -
   1.125 -  // Adjust the BOT to show that it has a single block in the
   1.126 -  // range [blk_start, blk_start + size). All necessary BOT
   1.127 -  // cards are adjusted, but _unallocated_block isn't.
   1.128 -  void single_block(HeapWord* blk_start, HeapWord* blk_end);
   1.129 -  void single_block(HeapWord* blk, size_t size) {
   1.130 -    single_block(blk, blk + size);
   1.131 -  }
   1.132 -
   1.133 -  // Adjust BOT to show that it has a block in the range
   1.134 -  // [blk_start, blk_start + size). Only the first card
   1.135 -  // of BOT is touched. It is assumed (and verified in the
   1.136 -  // non-product VM) that the remaining cards of the block
   1.137 -  // are correct.
   1.138 -  void mark_block(HeapWord* blk_start, HeapWord* blk_end);
   1.139 -  void mark_block(HeapWord* blk, size_t size) {
   1.140 -    mark_block(blk, blk + size);
   1.141 -  }
   1.142 -
   1.143 -  // Adjust _unallocated_block to indicate that a particular
   1.144 -  // block has been newly allocated or freed. It is assumed (and
   1.145 -  // verified in the non-product VM) that the BOT is correct for
   1.146 -  // the given block.
   1.147 -  inline void allocated(HeapWord* blk_start, HeapWord* blk_end) {
   1.148 -    // Verify that the BOT shows [blk, blk + blk_size) to be one block.
   1.149 -    verify_single_block(blk_start, blk_end);
   1.150 -    if (BlockOffsetArrayUseUnallocatedBlock) {
   1.151 -      _unallocated_block = MAX2(_unallocated_block, blk_end);
   1.152 -    }
   1.153 -  }
   1.154 -
   1.155 -  inline void allocated(HeapWord* blk, size_t size) {
   1.156 -    allocated(blk, blk + size);
   1.157 -  }
   1.158 -
   1.159 -  inline void freed(HeapWord* blk_start, HeapWord* blk_end);
   1.160 -
   1.161 -  inline void freed(HeapWord* blk, size_t size);
   1.162 -
   1.163    virtual HeapWord* block_start_unsafe(const void* addr);
   1.164    virtual HeapWord* block_start_unsafe_const(const void* addr) const;
   1.165  
   1.166 -  // Requires "addr" to be the start of a card and returns the
   1.167 -  // start of the block that contains the given address.
   1.168 -  HeapWord* block_start_careful(const void* addr) const;
   1.169 -
   1.170 -  // If true, initialize array slots with no allocated blocks to zero.
   1.171 -  // Otherwise, make them point back to the front.
   1.172 -  bool init_to_zero() { return _init_to_zero; }
   1.173 -
   1.174 -  // Verification & debugging - ensure that the offset table reflects the fact
   1.175 -  // that the block [blk_start, blk_end) or [blk, blk + size) is a
   1.176 -  // single block of storage. NOTE: can;t const this because of
   1.177 -  // call to non-const do_block_internal() below.
   1.178 -  inline void verify_single_block(HeapWord* blk_start, HeapWord* blk_end) {
   1.179 -    if (VerifyBlockOffsetArray) {
   1.180 -      do_block_internal(blk_start, blk_end, Action_check);
   1.181 -    }
   1.182 -  }
   1.183 -
   1.184 -  inline void verify_single_block(HeapWord* blk, size_t size) {
   1.185 -    verify_single_block(blk, blk + size);
   1.186 -  }
   1.187 -
   1.188    // Used by region verification. Checks that the contents of the
   1.189    // BOT reflect that there's a single object that spans the address
   1.190    // range [obj_start, obj_start + word_size); returns true if this is
   1.191    // the case, returns false if it's not.
   1.192    bool verify_for_object(HeapWord* obj_start, size_t word_size) const;
   1.193  
   1.194 -  // Verify that the given block is before _unallocated_block
   1.195 -  inline void verify_not_unallocated(HeapWord* blk_start,
   1.196 -                                     HeapWord* blk_end) const {
   1.197 -    if (BlockOffsetArrayUseUnallocatedBlock) {
   1.198 -      assert(blk_start < blk_end, "Block inconsistency?");
   1.199 -      assert(blk_end <= _unallocated_block, "_unallocated_block problem");
   1.200 -    }
   1.201 -  }
   1.202 -
   1.203 -  inline void verify_not_unallocated(HeapWord* blk, size_t size) const {
   1.204 -    verify_not_unallocated(blk, blk + size);
   1.205 -  }
   1.206 -
   1.207    void check_all_cards(size_t left_card, size_t right_card) const;
   1.208  
   1.209    virtual void print_on(outputStream* out) PRODUCT_RETURN;
   1.210 @@ -445,14 +332,12 @@
   1.211                        blk_start, blk_end);
   1.212    }
   1.213  
   1.214 -  // Variant of zero_bottom_entry that does not check for availability of the
   1.215 +  // Zero out the entry for _bottom (offset will be zero). Does not check for availability of the
   1.216    // memory first.
   1.217    void zero_bottom_entry_raw();
   1.218    // Variant of initialize_threshold that does not check for availability of the
   1.219    // memory first.
   1.220    HeapWord* initialize_threshold_raw();
   1.221 -  // Zero out the entry for _bottom (offset will be zero).
   1.222 -  void zero_bottom_entry();
   1.223   public:
   1.224    G1BlockOffsetArrayContigSpace(G1BlockOffsetSharedArray* array, MemRegion mr);
   1.225  

mercurial