src/share/vm/gc_implementation/g1/g1BlockOffsetTable.hpp

Fri, 10 Oct 2014 15:51:58 +0200

author
tschatzl
date
Fri, 10 Oct 2014 15:51:58 +0200
changeset 7257
e7d0505c8a30
parent 7256
0fcaab91d485
child 7535
7ae4e26cb1e0
child 9697
cfe3264deba4
permissions
-rw-r--r--

8059758: Footprint regressions with JDK-8038423
Summary: Changes in JDK-8038423 always initialize (zero out) virtual memory used for auxiliary data structures. This causes a footprint regression for G1 in startup benchmarks. This is because they do not touch that memory at all, so the operating system does not actually commit these pages. The fix is to, if the initialization value of the data structures matches the default value of just committed memory (=0), do not do anything.
Reviewed-by: jwilhelm, brutisso

ysr@777 1 /*
drchase@6680 2 * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
ysr@777 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
ysr@777 4 *
ysr@777 5 * This code is free software; you can redistribute it and/or modify it
ysr@777 6 * under the terms of the GNU General Public License version 2 only, as
ysr@777 7 * published by the Free Software Foundation.
ysr@777 8 *
ysr@777 9 * This code is distributed in the hope that it will be useful, but WITHOUT
ysr@777 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
ysr@777 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
ysr@777 12 * version 2 for more details (a copy is included in the LICENSE file that
ysr@777 13 * accompanied this code).
ysr@777 14 *
ysr@777 15 * You should have received a copy of the GNU General Public License version
ysr@777 16 * 2 along with this work; if not, write to the Free Software Foundation,
ysr@777 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
ysr@777 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
ysr@777 22 *
ysr@777 23 */
ysr@777 24
stefank@2314 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1BLOCKOFFSETTABLE_HPP
stefank@2314 26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1BLOCKOFFSETTABLE_HPP
stefank@2314 27
tschatzl@7051 28 #include "gc_implementation/g1/g1RegionToSpaceMapper.hpp"
stefank@2314 29 #include "memory/memRegion.hpp"
stefank@2314 30 #include "runtime/virtualspace.hpp"
stefank@2314 31 #include "utilities/globalDefinitions.hpp"
stefank@2314 32
ysr@777 33 // The CollectedHeap type requires subtypes to implement a method
ysr@777 34 // "block_start". For some subtypes, notably generational
ysr@777 35 // systems using card-table-based write barriers, the efficiency of this
ysr@777 36 // operation may be important. Implementations of the "BlockOffsetArray"
ysr@777 37 // class may be useful in providing such efficient implementations.
ysr@777 38 //
ysr@777 39 // While generally mirroring the structure of the BOT for GenCollectedHeap,
ysr@777 40 // the following types are tailored more towards G1's uses; these should,
ysr@777 41 // however, be merged back into a common BOT to avoid code duplication
ysr@777 42 // and reduce maintenance overhead.
ysr@777 43 //
ysr@777 44 // G1BlockOffsetTable (abstract)
ysr@777 45 // -- G1BlockOffsetArray (uses G1BlockOffsetSharedArray)
ysr@777 46 // -- G1BlockOffsetArrayContigSpace
ysr@777 47 //
ysr@777 48 // A main impediment to the consolidation of this code might be the
ysr@777 49 // effect of making some of the block_start*() calls non-const as
ysr@777 50 // below. Whether that might adversely affect performance optimizations
ysr@777 51 // that compilers might normally perform in the case of non-G1
ysr@777 52 // collectors needs to be carefully investigated prior to any such
ysr@777 53 // consolidation.
ysr@777 54
ysr@777 55 // Forward declarations
ysr@777 56 class G1BlockOffsetSharedArray;
mgerdin@6987 57 class G1OffsetTableContigSpace;
ysr@777 58
ysr@777 59 class G1BlockOffsetTable VALUE_OBJ_CLASS_SPEC {
ysr@777 60 friend class VMStructs;
ysr@777 61 protected:
ysr@777 62 // These members describe the region covered by the table.
ysr@777 63
ysr@777 64 // The space this table is covering.
ysr@777 65 HeapWord* _bottom; // == reserved.start
ysr@777 66 HeapWord* _end; // End of currently allocated region.
ysr@777 67
ysr@777 68 public:
ysr@777 69 // Initialize the table to cover the given space.
ysr@777 70 // The contents of the initial table are undefined.
ysr@777 71 G1BlockOffsetTable(HeapWord* bottom, HeapWord* end) :
ysr@777 72 _bottom(bottom), _end(end)
ysr@777 73 {
ysr@777 74 assert(_bottom <= _end, "arguments out of order");
ysr@777 75 }
ysr@777 76
ysr@777 77 // Note that the committed size of the covered space may have changed,
ysr@777 78 // so the table size might also wish to change.
ysr@777 79 virtual void resize(size_t new_word_size) = 0;
ysr@777 80
ysr@777 81 virtual void set_bottom(HeapWord* new_bottom) {
johnc@4300 82 assert(new_bottom <= _end,
johnc@4300 83 err_msg("new_bottom (" PTR_FORMAT ") > _end (" PTR_FORMAT ")",
drchase@6680 84 p2i(new_bottom), p2i(_end)));
ysr@777 85 _bottom = new_bottom;
ysr@777 86 resize(pointer_delta(_end, _bottom));
ysr@777 87 }
ysr@777 88
ysr@777 89 // Requires "addr" to be contained by a block, and returns the address of
ysr@777 90 // the start of that block. (May have side effects, namely updating of
ysr@777 91 // shared array entries that "point" too far backwards. This can occur,
ysr@777 92 // for example, when LAB allocation is used in a space covered by the
ysr@777 93 // table.)
ysr@777 94 virtual HeapWord* block_start_unsafe(const void* addr) = 0;
ysr@777 95 // Same as above, but does not have any of the possible side effects
ysr@777 96 // discussed above.
ysr@777 97 virtual HeapWord* block_start_unsafe_const(const void* addr) const = 0;
ysr@777 98
ysr@777 99 // Returns the address of the start of the block containing "addr", or
ysr@777 100 // else "null" if it is covered by no block. (May have side effects,
ysr@777 101 // namely updating of shared array entries that "point" too far
ysr@777 102 // backwards. This can occur, for example, when lab allocation is used
ysr@777 103 // in a space covered by the table.)
ysr@777 104 inline HeapWord* block_start(const void* addr);
ysr@777 105 // Same as above, but does not have any of the possible side effects
ysr@777 106 // discussed above.
ysr@777 107 inline HeapWord* block_start_const(const void* addr) const;
ysr@777 108 };
ysr@777 109
tschatzl@7051 110 class G1BlockOffsetSharedArrayMappingChangedListener : public G1MappingChangedListener {
tschatzl@7051 111 public:
tschatzl@7257 112 virtual void on_commit(uint start_idx, size_t num_regions, bool zero_filled) {
brutisso@7256 113 // Nothing to do. The BOT is hard-wired to be part of the HeapRegion, and we cannot
brutisso@7256 114 // retrieve it here since this would cause firing of several asserts. The code
brutisso@7256 115 // executed after commit of a region already needs to do some re-initialization of
brutisso@7256 116 // the HeapRegion, so we combine that.
brutisso@7256 117 }
tschatzl@7051 118 };
tschatzl@7051 119
ysr@777 120 // This implementation of "G1BlockOffsetTable" divides the covered region
ysr@777 121 // into "N"-word subregions (where "N" = 2^"LogN". An array with an entry
ysr@777 122 // for each such subregion indicates how far back one must go to find the
ysr@777 123 // start of the chunk that includes the first word of the subregion.
ysr@777 124 //
ysr@777 125 // Each BlockOffsetArray is owned by a Space. However, the actual array
ysr@777 126 // may be shared by several BlockOffsetArrays; this is useful
ysr@777 127 // when a single resizable area (such as a generation) is divided up into
ysr@777 128 // several spaces in which contiguous allocation takes place,
ysr@777 129 // such as, for example, in G1 or in the train generation.)
ysr@777 130
ysr@777 131 // Here is the shared array type.
ysr@777 132
zgu@3900 133 class G1BlockOffsetSharedArray: public CHeapObj<mtGC> {
ysr@777 134 friend class G1BlockOffsetArray;
ysr@777 135 friend class G1BlockOffsetArrayContigSpace;
ysr@777 136 friend class VMStructs;
ysr@777 137
ysr@777 138 private:
tschatzl@7051 139 G1BlockOffsetSharedArrayMappingChangedListener _listener;
ysr@777 140 // The reserved region covered by the shared array.
ysr@777 141 MemRegion _reserved;
ysr@777 142
ysr@777 143 // End of the current committed region.
ysr@777 144 HeapWord* _end;
ysr@777 145
ysr@777 146 // Array for keeping offsets for retrieving object start fast given an
ysr@777 147 // address.
ysr@777 148 u_char* _offset_array; // byte array keeping backwards offsets
ysr@777 149
johnc@4300 150 void check_offset(size_t offset, const char* msg) const {
johnc@4300 151 assert(offset <= N_words,
johnc@4300 152 err_msg("%s - "
drchase@6680 153 "offset: " SIZE_FORMAT ", N_words: " UINT32_FORMAT,
johnc@4300 154 msg, offset, N_words));
johnc@4300 155 }
johnc@4300 156
ysr@777 157 // Bounds checking accessors:
ysr@777 158 // For performance these have to devolve to array accesses in product builds.
tschatzl@7051 159 inline u_char offset_array(size_t index) const;
ysr@777 160
tschatzl@7051 161 void set_offset_array_raw(size_t index, u_char offset) {
ysr@777 162 _offset_array[index] = offset;
ysr@777 163 }
ysr@777 164
tschatzl@7051 165 inline void set_offset_array(size_t index, u_char offset);
ysr@777 166
tschatzl@7051 167 inline void set_offset_array(size_t index, HeapWord* high, HeapWord* low);
ysr@777 168
tschatzl@7051 169 inline void set_offset_array(size_t left, size_t right, u_char offset);
tschatzl@7051 170
ysr@777 171 bool is_card_boundary(HeapWord* p) const;
ysr@777 172
tschatzl@7051 173 public:
tschatzl@7051 174
ysr@777 175 // Return the number of slots needed for an offset array
ysr@777 176 // that covers mem_region_words words.
tschatzl@7051 177 static size_t compute_size(size_t mem_region_words) {
tschatzl@7051 178 size_t number_of_slots = (mem_region_words / N_words);
tschatzl@7051 179 return ReservedSpace::allocation_align_size_up(number_of_slots);
ysr@777 180 }
ysr@777 181
ysr@777 182 enum SomePublicConstants {
ysr@777 183 LogN = 9,
ysr@777 184 LogN_words = LogN - LogHeapWordSize,
ysr@777 185 N_bytes = 1 << LogN,
ysr@777 186 N_words = 1 << LogN_words
ysr@777 187 };
ysr@777 188
ysr@777 189 // Initialize the table to cover from "base" to (at least)
ysr@777 190 // "base + init_word_size". In the future, the table may be expanded
ysr@777 191 // (see "resize" below) up to the size of "_reserved" (which must be at
ysr@777 192 // least "init_word_size".) The contents of the initial table are
ysr@777 193 // undefined; it is the responsibility of the constituent
ysr@777 194 // G1BlockOffsetTable(s) to initialize cards.
tschatzl@7051 195 G1BlockOffsetSharedArray(MemRegion heap, G1RegionToSpaceMapper* storage);
ysr@777 196
ysr@777 197 // Return the appropriate index into "_offset_array" for "p".
ysr@777 198 inline size_t index_for(const void* p) const;
tschatzl@7051 199 inline size_t index_for_raw(const void* p) const;
ysr@777 200
ysr@777 201 // Return the address indicating the start of the region corresponding to
ysr@777 202 // "index" in "_offset_array".
ysr@777 203 inline HeapWord* address_for_index(size_t index) const;
tschatzl@7051 204 // Variant of address_for_index that does not check the index for validity.
tschatzl@7051 205 inline HeapWord* address_for_index_raw(size_t index) const {
tschatzl@7051 206 return _reserved.start() + (index << LogN_words);
tschatzl@7051 207 }
ysr@777 208 };
ysr@777 209
ysr@777 210 // And here is the G1BlockOffsetTable subtype that uses the array.
ysr@777 211
ysr@777 212 class G1BlockOffsetArray: public G1BlockOffsetTable {
ysr@777 213 friend class G1BlockOffsetSharedArray;
ysr@777 214 friend class G1BlockOffsetArrayContigSpace;
ysr@777 215 friend class VMStructs;
ysr@777 216 private:
ysr@777 217 enum SomePrivateConstants {
ysr@777 218 N_words = G1BlockOffsetSharedArray::N_words,
ysr@777 219 LogN = G1BlockOffsetSharedArray::LogN
ysr@777 220 };
ysr@777 221
ysr@777 222 // This is the array, which can be shared by several BlockOffsetArray's
ysr@777 223 // servicing different
ysr@777 224 G1BlockOffsetSharedArray* _array;
ysr@777 225
ysr@777 226 // The space that owns this subregion.
mgerdin@6987 227 G1OffsetTableContigSpace* _gsp;
ysr@777 228
ysr@777 229 // The portion [_unallocated_block, _sp.end()) of the space that
ysr@777 230 // is a single block known not to contain any objects.
ysr@777 231 // NOTE: See BlockOffsetArrayUseUnallocatedBlock flag.
ysr@777 232 HeapWord* _unallocated_block;
ysr@777 233
ysr@777 234 // Sets the entries
ysr@777 235 // corresponding to the cards starting at "start" and ending at "end"
ysr@777 236 // to point back to the card before "start": the interval [start, end)
ysr@777 237 // is right-open.
ysr@777 238 void set_remainder_to_point_to_start(HeapWord* start, HeapWord* end);
ysr@777 239 // Same as above, except that the args here are a card _index_ interval
ysr@777 240 // that is closed: [start_index, end_index]
ysr@777 241 void set_remainder_to_point_to_start_incl(size_t start, size_t end);
ysr@777 242
ysr@777 243 protected:
ysr@777 244
mgerdin@6987 245 G1OffsetTableContigSpace* gsp() const { return _gsp; }
mgerdin@6987 246
mgerdin@6987 247 inline size_t block_size(const HeapWord* p) const;
ysr@777 248
ysr@777 249 // Returns the address of a block whose start is at most "addr".
ysr@777 250 // If "has_max_index" is true, "assumes "max_index" is the last valid one
ysr@777 251 // in the array.
ysr@777 252 inline HeapWord* block_at_or_preceding(const void* addr,
ysr@777 253 bool has_max_index,
ysr@777 254 size_t max_index) const;
ysr@777 255
ysr@777 256 // "q" is a block boundary that is <= "addr"; "n" is the address of the
ysr@777 257 // next block (or the end of the space.) Return the address of the
ysr@777 258 // beginning of the block that contains "addr". Does so without side
ysr@777 259 // effects (see, e.g., spec of block_start.)
ysr@777 260 inline HeapWord*
ysr@777 261 forward_to_block_containing_addr_const(HeapWord* q, HeapWord* n,
ysr@777 262 const void* addr) const;
ysr@777 263
ysr@777 264 // "q" is a block boundary that is <= "addr"; return the address of the
ysr@777 265 // beginning of the block that contains "addr". May have side effects
ysr@777 266 // on "this", by updating imprecise entries.
ysr@777 267 inline HeapWord* forward_to_block_containing_addr(HeapWord* q,
ysr@777 268 const void* addr);
ysr@777 269
ysr@777 270 // "q" is a block boundary that is <= "addr"; "n" is the address of the
ysr@777 271 // next block (or the end of the space.) Return the address of the
ysr@777 272 // beginning of the block that contains "addr". May have side effects
ysr@777 273 // on "this", by updating imprecise entries.
ysr@777 274 HeapWord* forward_to_block_containing_addr_slow(HeapWord* q,
ysr@777 275 HeapWord* n,
ysr@777 276 const void* addr);
ysr@777 277
ysr@777 278 // Requires that "*threshold_" be the first array entry boundary at or
ysr@777 279 // above "blk_start", and that "*index_" be the corresponding array
ysr@777 280 // index. If the block starts at or crosses "*threshold_", records
ysr@777 281 // "blk_start" as the appropriate block start for the array index
ysr@777 282 // starting at "*threshold_", and for any other indices crossed by the
ysr@777 283 // block. Updates "*threshold_" and "*index_" to correspond to the first
ysr@777 284 // index after the block end.
ysr@777 285 void alloc_block_work2(HeapWord** threshold_, size_t* index_,
ysr@777 286 HeapWord* blk_start, HeapWord* blk_end);
ysr@777 287
ysr@777 288 public:
ysr@777 289 // The space may not have it's bottom and top set yet, which is why the
brutisso@7256 290 // region is passed as a parameter. The elements of the array are
brutisso@7256 291 // initialized to zero.
brutisso@7256 292 G1BlockOffsetArray(G1BlockOffsetSharedArray* array, MemRegion mr);
ysr@777 293
ysr@777 294 // Note: this ought to be part of the constructor, but that would require
ysr@777 295 // "this" to be passed as a parameter to a member constructor for
ysr@777 296 // the containing concrete subtype of Space.
ysr@777 297 // This would be legal C++, but MS VC++ doesn't allow it.
mgerdin@6987 298 void set_space(G1OffsetTableContigSpace* sp);
ysr@777 299
ysr@777 300 // Resets the covered region to one with the same _bottom as before but
ysr@777 301 // the "new_word_size".
ysr@777 302 void resize(size_t new_word_size);
ysr@777 303
ysr@777 304 virtual HeapWord* block_start_unsafe(const void* addr);
ysr@777 305 virtual HeapWord* block_start_unsafe_const(const void* addr) const;
ysr@777 306
tonyp@2453 307 // Used by region verification. Checks that the contents of the
tonyp@2453 308 // BOT reflect that there's a single object that spans the address
tonyp@2453 309 // range [obj_start, obj_start + word_size); returns true if this is
tonyp@2453 310 // the case, returns false if it's not.
tonyp@2453 311 bool verify_for_object(HeapWord* obj_start, size_t word_size) const;
tonyp@2453 312
ysr@777 313 void check_all_cards(size_t left_card, size_t right_card) const;
tonyp@2241 314
tonyp@2453 315 virtual void print_on(outputStream* out) PRODUCT_RETURN;
ysr@777 316 };
ysr@777 317
ysr@777 318 // A subtype of BlockOffsetArray that takes advantage of the fact
ysr@777 319 // that its underlying space is a ContiguousSpace, so that its "active"
ysr@777 320 // region can be more efficiently tracked (than for a non-contiguous space).
ysr@777 321 class G1BlockOffsetArrayContigSpace: public G1BlockOffsetArray {
ysr@777 322 friend class VMStructs;
ysr@777 323
ysr@777 324 // allocation boundary at which offset array must be updated
ysr@777 325 HeapWord* _next_offset_threshold;
ysr@777 326 size_t _next_offset_index; // index corresponding to that boundary
ysr@777 327
ysr@777 328 // Work function to be called when allocation start crosses the next
ysr@777 329 // threshold in the contig space.
ysr@777 330 void alloc_block_work1(HeapWord* blk_start, HeapWord* blk_end) {
ysr@777 331 alloc_block_work2(&_next_offset_threshold, &_next_offset_index,
ysr@777 332 blk_start, blk_end);
ysr@777 333 }
ysr@777 334
brutisso@7256 335 // Zero out the entry for _bottom (offset will be zero). Does not check for availability of the
tschatzl@7051 336 // memory first.
tschatzl@7051 337 void zero_bottom_entry_raw();
tschatzl@7051 338 // Variant of initialize_threshold that does not check for availability of the
tschatzl@7051 339 // memory first.
tschatzl@7051 340 HeapWord* initialize_threshold_raw();
ysr@777 341 public:
ysr@777 342 G1BlockOffsetArrayContigSpace(G1BlockOffsetSharedArray* array, MemRegion mr);
ysr@777 343
ysr@777 344 // Initialize the threshold to reflect the first boundary after the
ysr@777 345 // bottom of the covered region.
ysr@777 346 HeapWord* initialize_threshold();
ysr@777 347
tschatzl@7050 348 void reset_bot() {
tschatzl@7051 349 zero_bottom_entry_raw();
tschatzl@7051 350 initialize_threshold_raw();
tschatzl@7050 351 }
ysr@777 352
ysr@777 353 // Return the next threshold, the point at which the table should be
ysr@777 354 // updated.
ysr@777 355 HeapWord* threshold() const { return _next_offset_threshold; }
ysr@777 356
ysr@777 357 // These must be guaranteed to work properly (i.e., do nothing)
ysr@777 358 // when "blk_start" ("blk" for second version) is "NULL". In this
ysr@777 359 // implementation, that's true because NULL is represented as 0, and thus
ysr@777 360 // never exceeds the "_next_offset_threshold".
ysr@777 361 void alloc_block(HeapWord* blk_start, HeapWord* blk_end) {
ysr@777 362 if (blk_end > _next_offset_threshold)
ysr@777 363 alloc_block_work1(blk_start, blk_end);
ysr@777 364 }
ysr@777 365 void alloc_block(HeapWord* blk, size_t size) {
ysr@777 366 alloc_block(blk, blk+size);
ysr@777 367 }
ysr@777 368
ysr@777 369 HeapWord* block_start_unsafe(const void* addr);
ysr@777 370 HeapWord* block_start_unsafe_const(const void* addr) const;
tonyp@2241 371
tonyp@2453 372 void set_for_starts_humongous(HeapWord* new_top);
tonyp@2453 373
tonyp@2453 374 virtual void print_on(outputStream* out) PRODUCT_RETURN;
ysr@777 375 };
stefank@2314 376
stefank@2314 377 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1BLOCKOFFSETTABLE_HPP

mercurial