Tue, 19 Aug 2014 10:50:27 +0200
8054818: Refactor HeapRegionSeq to manage heap region and auxiliary data
Summary: Let HeapRegionSeq manage the heap region and auxiliary data to decrease the amount of responsibilities of G1CollectedHeap, and encapsulate this work from other code.
Reviewed-by: jwilhelm, jmasa, mgerdin, brutisso
1 /*
2 * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1BLOCKOFFSETTABLE_HPP
26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1BLOCKOFFSETTABLE_HPP
28 #include "memory/memRegion.hpp"
29 #include "runtime/virtualspace.hpp"
30 #include "utilities/globalDefinitions.hpp"
32 // The CollectedHeap type requires subtypes to implement a method
33 // "block_start". For some subtypes, notably generational
34 // systems using card-table-based write barriers, the efficiency of this
35 // operation may be important. Implementations of the "BlockOffsetArray"
36 // class may be useful in providing such efficient implementations.
37 //
38 // While generally mirroring the structure of the BOT for GenCollectedHeap,
39 // the following types are tailored more towards G1's uses; these should,
40 // however, be merged back into a common BOT to avoid code duplication
41 // and reduce maintenance overhead.
42 //
43 // G1BlockOffsetTable (abstract)
44 // -- G1BlockOffsetArray (uses G1BlockOffsetSharedArray)
45 // -- G1BlockOffsetArrayContigSpace
46 //
47 // A main impediment to the consolidation of this code might be the
48 // effect of making some of the block_start*() calls non-const as
49 // below. Whether that might adversely affect performance optimizations
50 // that compilers might normally perform in the case of non-G1
51 // collectors needs to be carefully investigated prior to any such
52 // consolidation.
54 // Forward declarations
55 class G1BlockOffsetSharedArray;
56 class G1OffsetTableContigSpace;
58 class G1BlockOffsetTable VALUE_OBJ_CLASS_SPEC {
59 friend class VMStructs;
60 protected:
61 // These members describe the region covered by the table.
63 // The space this table is covering.
64 HeapWord* _bottom; // == reserved.start
65 HeapWord* _end; // End of currently allocated region.
67 public:
68 // Initialize the table to cover the given space.
69 // The contents of the initial table are undefined.
70 G1BlockOffsetTable(HeapWord* bottom, HeapWord* end) :
71 _bottom(bottom), _end(end)
72 {
73 assert(_bottom <= _end, "arguments out of order");
74 }
76 // Note that the committed size of the covered space may have changed,
77 // so the table size might also wish to change.
78 virtual void resize(size_t new_word_size) = 0;
80 virtual void set_bottom(HeapWord* new_bottom) {
81 assert(new_bottom <= _end,
82 err_msg("new_bottom (" PTR_FORMAT ") > _end (" PTR_FORMAT ")",
83 p2i(new_bottom), p2i(_end)));
84 _bottom = new_bottom;
85 resize(pointer_delta(_end, _bottom));
86 }
88 // Requires "addr" to be contained by a block, and returns the address of
89 // the start of that block. (May have side effects, namely updating of
90 // shared array entries that "point" too far backwards. This can occur,
91 // for example, when LAB allocation is used in a space covered by the
92 // table.)
93 virtual HeapWord* block_start_unsafe(const void* addr) = 0;
94 // Same as above, but does not have any of the possible side effects
95 // discussed above.
96 virtual HeapWord* block_start_unsafe_const(const void* addr) const = 0;
98 // Returns the address of the start of the block containing "addr", or
99 // else "null" if it is covered by no block. (May have side effects,
100 // namely updating of shared array entries that "point" too far
101 // backwards. This can occur, for example, when lab allocation is used
102 // in a space covered by the table.)
103 inline HeapWord* block_start(const void* addr);
104 // Same as above, but does not have any of the possible side effects
105 // discussed above.
106 inline HeapWord* block_start_const(const void* addr) const;
107 };
109 // This implementation of "G1BlockOffsetTable" divides the covered region
110 // into "N"-word subregions (where "N" = 2^"LogN". An array with an entry
111 // for each such subregion indicates how far back one must go to find the
112 // start of the chunk that includes the first word of the subregion.
113 //
114 // Each BlockOffsetArray is owned by a Space. However, the actual array
115 // may be shared by several BlockOffsetArrays; this is useful
116 // when a single resizable area (such as a generation) is divided up into
117 // several spaces in which contiguous allocation takes place,
118 // such as, for example, in G1 or in the train generation.)
120 // Here is the shared array type.
122 class G1BlockOffsetSharedArray: public CHeapObj<mtGC> {
123 friend class G1BlockOffsetArray;
124 friend class G1BlockOffsetArrayContigSpace;
125 friend class VMStructs;
127 private:
128 // The reserved region covered by the shared array.
129 MemRegion _reserved;
131 // End of the current committed region.
132 HeapWord* _end;
134 // Array for keeping offsets for retrieving object start fast given an
135 // address.
136 VirtualSpace _vs;
137 u_char* _offset_array; // byte array keeping backwards offsets
139 void check_index(size_t index, const char* msg) const {
140 assert(index < _vs.committed_size(),
141 err_msg("%s - "
142 "index: " SIZE_FORMAT ", _vs.committed_size: " SIZE_FORMAT,
143 msg, index, _vs.committed_size()));
144 }
146 void check_offset(size_t offset, const char* msg) const {
147 assert(offset <= N_words,
148 err_msg("%s - "
149 "offset: " SIZE_FORMAT ", N_words: " UINT32_FORMAT,
150 msg, offset, N_words));
151 }
153 // Bounds checking accessors:
154 // For performance these have to devolve to array accesses in product builds.
155 u_char offset_array(size_t index) const {
156 check_index(index, "index out of range");
157 return _offset_array[index];
158 }
160 void set_offset_array(HeapWord* left, HeapWord* right, u_char offset);
162 void set_offset_array(size_t index, u_char offset) {
163 check_index(index, "index out of range");
164 check_offset(offset, "offset too large");
165 _offset_array[index] = offset;
166 }
168 void set_offset_array(size_t index, HeapWord* high, HeapWord* low) {
169 check_index(index, "index out of range");
170 assert(high >= low, "addresses out of order");
171 check_offset(pointer_delta(high, low), "offset too large");
172 _offset_array[index] = (u_char) pointer_delta(high, low);
173 }
175 void set_offset_array(size_t left, size_t right, u_char offset) {
176 check_index(right, "right index out of range");
177 assert(left <= right, "indexes out of order");
178 size_t num_cards = right - left + 1;
179 if (UseMemSetInBOT) {
180 memset(&_offset_array[left], offset, num_cards);
181 } else {
182 size_t i = left;
183 const size_t end = i + num_cards;
184 for (; i < end; i++) {
185 _offset_array[i] = offset;
186 }
187 }
188 }
190 void check_offset_array(size_t index, HeapWord* high, HeapWord* low) const {
191 check_index(index, "index out of range");
192 assert(high >= low, "addresses out of order");
193 check_offset(pointer_delta(high, low), "offset too large");
194 assert(_offset_array[index] == pointer_delta(high, low), "Wrong offset");
195 }
197 bool is_card_boundary(HeapWord* p) const;
199 // Return the number of slots needed for an offset array
200 // that covers mem_region_words words.
201 // We always add an extra slot because if an object
202 // ends on a card boundary we put a 0 in the next
203 // offset array slot, so we want that slot always
204 // to be reserved.
206 size_t compute_size(size_t mem_region_words) {
207 size_t number_of_slots = (mem_region_words / N_words) + 1;
208 return ReservedSpace::page_align_size_up(number_of_slots);
209 }
211 public:
212 enum SomePublicConstants {
213 LogN = 9,
214 LogN_words = LogN - LogHeapWordSize,
215 N_bytes = 1 << LogN,
216 N_words = 1 << LogN_words
217 };
219 // Initialize the table to cover from "base" to (at least)
220 // "base + init_word_size". In the future, the table may be expanded
221 // (see "resize" below) up to the size of "_reserved" (which must be at
222 // least "init_word_size".) The contents of the initial table are
223 // undefined; it is the responsibility of the constituent
224 // G1BlockOffsetTable(s) to initialize cards.
225 G1BlockOffsetSharedArray(MemRegion reserved, size_t init_word_size);
227 // Notes a change in the committed size of the region covered by the
228 // table. The "new_word_size" may not be larger than the size of the
229 // reserved region this table covers.
230 void resize(size_t new_word_size);
232 void set_bottom(HeapWord* new_bottom);
234 // Return the appropriate index into "_offset_array" for "p".
235 inline size_t index_for(const void* p) const;
237 // Return the address indicating the start of the region corresponding to
238 // "index" in "_offset_array".
239 inline HeapWord* address_for_index(size_t index) const;
240 };
242 // And here is the G1BlockOffsetTable subtype that uses the array.
244 class G1BlockOffsetArray: public G1BlockOffsetTable {
245 friend class G1BlockOffsetSharedArray;
246 friend class G1BlockOffsetArrayContigSpace;
247 friend class VMStructs;
248 private:
249 enum SomePrivateConstants {
250 N_words = G1BlockOffsetSharedArray::N_words,
251 LogN = G1BlockOffsetSharedArray::LogN
252 };
254 // The following enums are used by do_block_helper
255 enum Action {
256 Action_single, // BOT records a single block (see single_block())
257 Action_mark, // BOT marks the start of a block (see mark_block())
258 Action_check // Check that BOT records block correctly
259 // (see verify_single_block()).
260 };
262 // This is the array, which can be shared by several BlockOffsetArray's
263 // servicing different
264 G1BlockOffsetSharedArray* _array;
266 // The space that owns this subregion.
267 G1OffsetTableContigSpace* _gsp;
269 // If true, array entries are initialized to 0; otherwise, they are
270 // initialized to point backwards to the beginning of the covered region.
271 bool _init_to_zero;
273 // The portion [_unallocated_block, _sp.end()) of the space that
274 // is a single block known not to contain any objects.
275 // NOTE: See BlockOffsetArrayUseUnallocatedBlock flag.
276 HeapWord* _unallocated_block;
278 // Sets the entries
279 // corresponding to the cards starting at "start" and ending at "end"
280 // to point back to the card before "start": the interval [start, end)
281 // is right-open.
282 void set_remainder_to_point_to_start(HeapWord* start, HeapWord* end);
283 // Same as above, except that the args here are a card _index_ interval
284 // that is closed: [start_index, end_index]
285 void set_remainder_to_point_to_start_incl(size_t start, size_t end);
287 // A helper function for BOT adjustment/verification work
288 void do_block_internal(HeapWord* blk_start, HeapWord* blk_end, Action action);
290 protected:
292 G1OffsetTableContigSpace* gsp() const { return _gsp; }
294 inline size_t block_size(const HeapWord* p) const;
296 // Returns the address of a block whose start is at most "addr".
297 // If "has_max_index" is true, "assumes "max_index" is the last valid one
298 // in the array.
299 inline HeapWord* block_at_or_preceding(const void* addr,
300 bool has_max_index,
301 size_t max_index) const;
303 // "q" is a block boundary that is <= "addr"; "n" is the address of the
304 // next block (or the end of the space.) Return the address of the
305 // beginning of the block that contains "addr". Does so without side
306 // effects (see, e.g., spec of block_start.)
307 inline HeapWord*
308 forward_to_block_containing_addr_const(HeapWord* q, HeapWord* n,
309 const void* addr) const;
311 // "q" is a block boundary that is <= "addr"; return the address of the
312 // beginning of the block that contains "addr". May have side effects
313 // on "this", by updating imprecise entries.
314 inline HeapWord* forward_to_block_containing_addr(HeapWord* q,
315 const void* addr);
317 // "q" is a block boundary that is <= "addr"; "n" is the address of the
318 // next block (or the end of the space.) Return the address of the
319 // beginning of the block that contains "addr". May have side effects
320 // on "this", by updating imprecise entries.
321 HeapWord* forward_to_block_containing_addr_slow(HeapWord* q,
322 HeapWord* n,
323 const void* addr);
325 // Requires that "*threshold_" be the first array entry boundary at or
326 // above "blk_start", and that "*index_" be the corresponding array
327 // index. If the block starts at or crosses "*threshold_", records
328 // "blk_start" as the appropriate block start for the array index
329 // starting at "*threshold_", and for any other indices crossed by the
330 // block. Updates "*threshold_" and "*index_" to correspond to the first
331 // index after the block end.
332 void alloc_block_work2(HeapWord** threshold_, size_t* index_,
333 HeapWord* blk_start, HeapWord* blk_end);
335 public:
336 // The space may not have it's bottom and top set yet, which is why the
337 // region is passed as a parameter. If "init_to_zero" is true, the
338 // elements of the array are initialized to zero. Otherwise, they are
339 // initialized to point backwards to the beginning.
340 G1BlockOffsetArray(G1BlockOffsetSharedArray* array, MemRegion mr,
341 bool init_to_zero);
343 // Note: this ought to be part of the constructor, but that would require
344 // "this" to be passed as a parameter to a member constructor for
345 // the containing concrete subtype of Space.
346 // This would be legal C++, but MS VC++ doesn't allow it.
347 void set_space(G1OffsetTableContigSpace* sp);
349 // Resets the covered region to the given "mr".
350 void set_region(MemRegion mr);
352 // Resets the covered region to one with the same _bottom as before but
353 // the "new_word_size".
354 void resize(size_t new_word_size);
356 // These must be guaranteed to work properly (i.e., do nothing)
357 // when "blk_start" ("blk" for second version) is "NULL".
358 virtual void alloc_block(HeapWord* blk_start, HeapWord* blk_end);
359 virtual void alloc_block(HeapWord* blk, size_t size) {
360 alloc_block(blk, blk + size);
361 }
363 // The following methods are useful and optimized for a
364 // general, non-contiguous space.
366 // Given a block [blk_start, blk_start + full_blk_size), and
367 // a left_blk_size < full_blk_size, adjust the BOT to show two
368 // blocks [blk_start, blk_start + left_blk_size) and
369 // [blk_start + left_blk_size, blk_start + full_blk_size).
370 // It is assumed (and verified in the non-product VM) that the
371 // BOT was correct for the original block.
372 void split_block(HeapWord* blk_start, size_t full_blk_size,
373 size_t left_blk_size);
375 // Adjust the BOT to show that it has a single block in the
376 // range [blk_start, blk_start + size). All necessary BOT
377 // cards are adjusted, but _unallocated_block isn't.
378 void single_block(HeapWord* blk_start, HeapWord* blk_end);
379 void single_block(HeapWord* blk, size_t size) {
380 single_block(blk, blk + size);
381 }
383 // Adjust BOT to show that it has a block in the range
384 // [blk_start, blk_start + size). Only the first card
385 // of BOT is touched. It is assumed (and verified in the
386 // non-product VM) that the remaining cards of the block
387 // are correct.
388 void mark_block(HeapWord* blk_start, HeapWord* blk_end);
389 void mark_block(HeapWord* blk, size_t size) {
390 mark_block(blk, blk + size);
391 }
393 // Adjust _unallocated_block to indicate that a particular
394 // block has been newly allocated or freed. It is assumed (and
395 // verified in the non-product VM) that the BOT is correct for
396 // the given block.
397 inline void allocated(HeapWord* blk_start, HeapWord* blk_end) {
398 // Verify that the BOT shows [blk, blk + blk_size) to be one block.
399 verify_single_block(blk_start, blk_end);
400 if (BlockOffsetArrayUseUnallocatedBlock) {
401 _unallocated_block = MAX2(_unallocated_block, blk_end);
402 }
403 }
405 inline void allocated(HeapWord* blk, size_t size) {
406 allocated(blk, blk + size);
407 }
409 inline void freed(HeapWord* blk_start, HeapWord* blk_end);
411 inline void freed(HeapWord* blk, size_t size);
413 virtual HeapWord* block_start_unsafe(const void* addr);
414 virtual HeapWord* block_start_unsafe_const(const void* addr) const;
416 // Requires "addr" to be the start of a card and returns the
417 // start of the block that contains the given address.
418 HeapWord* block_start_careful(const void* addr) const;
420 // If true, initialize array slots with no allocated blocks to zero.
421 // Otherwise, make them point back to the front.
422 bool init_to_zero() { return _init_to_zero; }
424 // Verification & debugging - ensure that the offset table reflects the fact
425 // that the block [blk_start, blk_end) or [blk, blk + size) is a
426 // single block of storage. NOTE: can;t const this because of
427 // call to non-const do_block_internal() below.
428 inline void verify_single_block(HeapWord* blk_start, HeapWord* blk_end) {
429 if (VerifyBlockOffsetArray) {
430 do_block_internal(blk_start, blk_end, Action_check);
431 }
432 }
434 inline void verify_single_block(HeapWord* blk, size_t size) {
435 verify_single_block(blk, blk + size);
436 }
438 // Used by region verification. Checks that the contents of the
439 // BOT reflect that there's a single object that spans the address
440 // range [obj_start, obj_start + word_size); returns true if this is
441 // the case, returns false if it's not.
442 bool verify_for_object(HeapWord* obj_start, size_t word_size) const;
444 // Verify that the given block is before _unallocated_block
445 inline void verify_not_unallocated(HeapWord* blk_start,
446 HeapWord* blk_end) const {
447 if (BlockOffsetArrayUseUnallocatedBlock) {
448 assert(blk_start < blk_end, "Block inconsistency?");
449 assert(blk_end <= _unallocated_block, "_unallocated_block problem");
450 }
451 }
453 inline void verify_not_unallocated(HeapWord* blk, size_t size) const {
454 verify_not_unallocated(blk, blk + size);
455 }
457 void check_all_cards(size_t left_card, size_t right_card) const;
459 virtual void print_on(outputStream* out) PRODUCT_RETURN;
460 };
462 // A subtype of BlockOffsetArray that takes advantage of the fact
463 // that its underlying space is a ContiguousSpace, so that its "active"
464 // region can be more efficiently tracked (than for a non-contiguous space).
465 class G1BlockOffsetArrayContigSpace: public G1BlockOffsetArray {
466 friend class VMStructs;
468 // allocation boundary at which offset array must be updated
469 HeapWord* _next_offset_threshold;
470 size_t _next_offset_index; // index corresponding to that boundary
472 // Work function to be called when allocation start crosses the next
473 // threshold in the contig space.
474 void alloc_block_work1(HeapWord* blk_start, HeapWord* blk_end) {
475 alloc_block_work2(&_next_offset_threshold, &_next_offset_index,
476 blk_start, blk_end);
477 }
479 // Zero out the entry for _bottom (offset will be zero).
480 void zero_bottom_entry();
481 public:
482 G1BlockOffsetArrayContigSpace(G1BlockOffsetSharedArray* array, MemRegion mr);
484 // Initialize the threshold to reflect the first boundary after the
485 // bottom of the covered region.
486 HeapWord* initialize_threshold();
488 void reset_bot() {
489 zero_bottom_entry();
490 initialize_threshold();
491 }
493 // Return the next threshold, the point at which the table should be
494 // updated.
495 HeapWord* threshold() const { return _next_offset_threshold; }
497 // These must be guaranteed to work properly (i.e., do nothing)
498 // when "blk_start" ("blk" for second version) is "NULL". In this
499 // implementation, that's true because NULL is represented as 0, and thus
500 // never exceeds the "_next_offset_threshold".
501 void alloc_block(HeapWord* blk_start, HeapWord* blk_end) {
502 if (blk_end > _next_offset_threshold)
503 alloc_block_work1(blk_start, blk_end);
504 }
505 void alloc_block(HeapWord* blk, size_t size) {
506 alloc_block(blk, blk+size);
507 }
509 HeapWord* block_start_unsafe(const void* addr);
510 HeapWord* block_start_unsafe_const(const void* addr) const;
512 void set_for_starts_humongous(HeapWord* new_top);
514 virtual void print_on(outputStream* out) PRODUCT_RETURN;
515 };
517 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1BLOCKOFFSETTABLE_HPP