Tue, 25 Sep 2012 07:05:55 -0700
7200615: NPG: optimized VM build is broken
Reviewed-by: kvn
1 /*
2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1BLOCKOFFSETTABLE_HPP
26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1BLOCKOFFSETTABLE_HPP
28 #include "memory/memRegion.hpp"
29 #include "runtime/virtualspace.hpp"
30 #include "utilities/globalDefinitions.hpp"
32 // The CollectedHeap type requires subtypes to implement a method
33 // "block_start". For some subtypes, notably generational
34 // systems using card-table-based write barriers, the efficiency of this
35 // operation may be important. Implementations of the "BlockOffsetArray"
36 // class may be useful in providing such efficient implementations.
37 //
38 // While generally mirroring the structure of the BOT for GenCollectedHeap,
39 // the following types are tailored more towards G1's uses; these should,
40 // however, be merged back into a common BOT to avoid code duplication
41 // and reduce maintenance overhead.
42 //
43 // G1BlockOffsetTable (abstract)
44 // -- G1BlockOffsetArray (uses G1BlockOffsetSharedArray)
45 // -- G1BlockOffsetArrayContigSpace
46 //
47 // A main impediment to the consolidation of this code might be the
48 // effect of making some of the block_start*() calls non-const as
49 // below. Whether that might adversely affect performance optimizations
50 // that compilers might normally perform in the case of non-G1
51 // collectors needs to be carefully investigated prior to any such
52 // consolidation.
54 // Forward declarations
55 class ContiguousSpace;
56 class G1BlockOffsetSharedArray;
58 class G1BlockOffsetTable VALUE_OBJ_CLASS_SPEC {
59 friend class VMStructs;
60 protected:
61 // These members describe the region covered by the table.
63 // The space this table is covering.
64 HeapWord* _bottom; // == reserved.start
65 HeapWord* _end; // End of currently allocated region.
67 public:
68 // Initialize the table to cover the given space.
69 // The contents of the initial table are undefined.
70 G1BlockOffsetTable(HeapWord* bottom, HeapWord* end) :
71 _bottom(bottom), _end(end)
72 {
73 assert(_bottom <= _end, "arguments out of order");
74 }
76 // Note that the committed size of the covered space may have changed,
77 // so the table size might also wish to change.
78 virtual void resize(size_t new_word_size) = 0;
80 virtual void set_bottom(HeapWord* new_bottom) {
81 assert(new_bottom <= _end, "new_bottom > _end");
82 _bottom = new_bottom;
83 resize(pointer_delta(_end, _bottom));
84 }
86 // Requires "addr" to be contained by a block, and returns the address of
87 // the start of that block. (May have side effects, namely updating of
88 // shared array entries that "point" too far backwards. This can occur,
89 // for example, when LAB allocation is used in a space covered by the
90 // table.)
91 virtual HeapWord* block_start_unsafe(const void* addr) = 0;
92 // Same as above, but does not have any of the possible side effects
93 // discussed above.
94 virtual HeapWord* block_start_unsafe_const(const void* addr) const = 0;
96 // Returns the address of the start of the block containing "addr", or
97 // else "null" if it is covered by no block. (May have side effects,
98 // namely updating of shared array entries that "point" too far
99 // backwards. This can occur, for example, when lab allocation is used
100 // in a space covered by the table.)
101 inline HeapWord* block_start(const void* addr);
102 // Same as above, but does not have any of the possible side effects
103 // discussed above.
104 inline HeapWord* block_start_const(const void* addr) const;
105 };
107 // This implementation of "G1BlockOffsetTable" divides the covered region
108 // into "N"-word subregions (where "N" = 2^"LogN". An array with an entry
109 // for each such subregion indicates how far back one must go to find the
110 // start of the chunk that includes the first word of the subregion.
111 //
112 // Each BlockOffsetArray is owned by a Space. However, the actual array
113 // may be shared by several BlockOffsetArrays; this is useful
114 // when a single resizable area (such as a generation) is divided up into
115 // several spaces in which contiguous allocation takes place,
116 // such as, for example, in G1 or in the train generation.)
118 // Here is the shared array type.
120 class G1BlockOffsetSharedArray: public CHeapObj<mtGC> {
121 friend class G1BlockOffsetArray;
122 friend class G1BlockOffsetArrayContigSpace;
123 friend class VMStructs;
125 private:
126 // The reserved region covered by the shared array.
127 MemRegion _reserved;
129 // End of the current committed region.
130 HeapWord* _end;
132 // Array for keeping offsets for retrieving object start fast given an
133 // address.
134 VirtualSpace _vs;
135 u_char* _offset_array; // byte array keeping backwards offsets
137 // Bounds checking accessors:
138 // For performance these have to devolve to array accesses in product builds.
139 u_char offset_array(size_t index) const {
140 assert(index < _vs.committed_size(), "index out of range");
141 return _offset_array[index];
142 }
144 void set_offset_array(size_t index, u_char offset) {
145 assert(index < _vs.committed_size(), "index out of range");
146 assert(offset <= N_words, "offset too large");
147 _offset_array[index] = offset;
148 }
150 void set_offset_array(size_t index, HeapWord* high, HeapWord* low) {
151 assert(index < _vs.committed_size(), "index out of range");
152 assert(high >= low, "addresses out of order");
153 assert(pointer_delta(high, low) <= N_words, "offset too large");
154 _offset_array[index] = (u_char) pointer_delta(high, low);
155 }
157 void set_offset_array(HeapWord* left, HeapWord* right, u_char offset) {
158 assert(index_for(right - 1) < _vs.committed_size(),
159 "right address out of range");
160 assert(left < right, "Heap addresses out of order");
161 size_t num_cards = pointer_delta(right, left) >> LogN_words;
162 if (UseMemSetInBOT) {
163 memset(&_offset_array[index_for(left)], offset, num_cards);
164 } else {
165 size_t i = index_for(left);
166 const size_t end = i + num_cards;
167 for (; i < end; i++) {
168 _offset_array[i] = offset;
169 }
170 }
171 }
173 void set_offset_array(size_t left, size_t right, u_char offset) {
174 assert(right < _vs.committed_size(), "right address out of range");
175 assert(left <= right, "indexes out of order");
176 size_t num_cards = right - left + 1;
177 if (UseMemSetInBOT) {
178 memset(&_offset_array[left], offset, num_cards);
179 } else {
180 size_t i = left;
181 const size_t end = i + num_cards;
182 for (; i < end; i++) {
183 _offset_array[i] = offset;
184 }
185 }
186 }
188 void check_offset_array(size_t index, HeapWord* high, HeapWord* low) const {
189 assert(index < _vs.committed_size(), "index out of range");
190 assert(high >= low, "addresses out of order");
191 assert(pointer_delta(high, low) <= N_words, "offset too large");
192 assert(_offset_array[index] == pointer_delta(high, low),
193 "Wrong offset");
194 }
196 bool is_card_boundary(HeapWord* p) const;
198 // Return the number of slots needed for an offset array
199 // that covers mem_region_words words.
200 // We always add an extra slot because if an object
201 // ends on a card boundary we put a 0 in the next
202 // offset array slot, so we want that slot always
203 // to be reserved.
205 size_t compute_size(size_t mem_region_words) {
206 size_t number_of_slots = (mem_region_words / N_words) + 1;
207 return ReservedSpace::page_align_size_up(number_of_slots);
208 }
210 public:
211 enum SomePublicConstants {
212 LogN = 9,
213 LogN_words = LogN - LogHeapWordSize,
214 N_bytes = 1 << LogN,
215 N_words = 1 << LogN_words
216 };
218 // Initialize the table to cover from "base" to (at least)
219 // "base + init_word_size". In the future, the table may be expanded
220 // (see "resize" below) up to the size of "_reserved" (which must be at
221 // least "init_word_size".) The contents of the initial table are
222 // undefined; it is the responsibility of the constituent
223 // G1BlockOffsetTable(s) to initialize cards.
224 G1BlockOffsetSharedArray(MemRegion reserved, size_t init_word_size);
226 // Notes a change in the committed size of the region covered by the
227 // table. The "new_word_size" may not be larger than the size of the
228 // reserved region this table covers.
229 void resize(size_t new_word_size);
231 void set_bottom(HeapWord* new_bottom);
233 // Updates all the BlockOffsetArray's sharing this shared array to
234 // reflect the current "top"'s of their spaces.
235 void update_offset_arrays();
237 // Return the appropriate index into "_offset_array" for "p".
238 inline size_t index_for(const void* p) const;
240 // Return the address indicating the start of the region corresponding to
241 // "index" in "_offset_array".
242 inline HeapWord* address_for_index(size_t index) const;
243 };
245 // And here is the G1BlockOffsetTable subtype that uses the array.
247 class G1BlockOffsetArray: public G1BlockOffsetTable {
248 friend class G1BlockOffsetSharedArray;
249 friend class G1BlockOffsetArrayContigSpace;
250 friend class VMStructs;
251 private:
252 enum SomePrivateConstants {
253 N_words = G1BlockOffsetSharedArray::N_words,
254 LogN = G1BlockOffsetSharedArray::LogN
255 };
257 // The following enums are used by do_block_helper
258 enum Action {
259 Action_single, // BOT records a single block (see single_block())
260 Action_mark, // BOT marks the start of a block (see mark_block())
261 Action_check // Check that BOT records block correctly
262 // (see verify_single_block()).
263 };
265 // This is the array, which can be shared by several BlockOffsetArray's
266 // servicing different
267 G1BlockOffsetSharedArray* _array;
269 // The space that owns this subregion.
270 Space* _sp;
272 // If "_sp" is a contiguous space, the field below is the view of "_sp"
273 // as a contiguous space, else NULL.
274 ContiguousSpace* _csp;
276 // If true, array entries are initialized to 0; otherwise, they are
277 // initialized to point backwards to the beginning of the covered region.
278 bool _init_to_zero;
280 // The portion [_unallocated_block, _sp.end()) of the space that
281 // is a single block known not to contain any objects.
282 // NOTE: See BlockOffsetArrayUseUnallocatedBlock flag.
283 HeapWord* _unallocated_block;
285 // Sets the entries
286 // corresponding to the cards starting at "start" and ending at "end"
287 // to point back to the card before "start": the interval [start, end)
288 // is right-open.
289 void set_remainder_to_point_to_start(HeapWord* start, HeapWord* end);
290 // Same as above, except that the args here are a card _index_ interval
291 // that is closed: [start_index, end_index]
292 void set_remainder_to_point_to_start_incl(size_t start, size_t end);
294 // A helper function for BOT adjustment/verification work
295 void do_block_internal(HeapWord* blk_start, HeapWord* blk_end, Action action);
297 protected:
299 ContiguousSpace* csp() const { return _csp; }
301 // Returns the address of a block whose start is at most "addr".
302 // If "has_max_index" is true, "assumes "max_index" is the last valid one
303 // in the array.
304 inline HeapWord* block_at_or_preceding(const void* addr,
305 bool has_max_index,
306 size_t max_index) const;
308 // "q" is a block boundary that is <= "addr"; "n" is the address of the
309 // next block (or the end of the space.) Return the address of the
310 // beginning of the block that contains "addr". Does so without side
311 // effects (see, e.g., spec of block_start.)
312 inline HeapWord*
313 forward_to_block_containing_addr_const(HeapWord* q, HeapWord* n,
314 const void* addr) const;
316 // "q" is a block boundary that is <= "addr"; return the address of the
317 // beginning of the block that contains "addr". May have side effects
318 // on "this", by updating imprecise entries.
319 inline HeapWord* forward_to_block_containing_addr(HeapWord* q,
320 const void* addr);
322 // "q" is a block boundary that is <= "addr"; "n" is the address of the
323 // next block (or the end of the space.) Return the address of the
324 // beginning of the block that contains "addr". May have side effects
325 // on "this", by updating imprecise entries.
326 HeapWord* forward_to_block_containing_addr_slow(HeapWord* q,
327 HeapWord* n,
328 const void* addr);
330 // Requires that "*threshold_" be the first array entry boundary at or
331 // above "blk_start", and that "*index_" be the corresponding array
332 // index. If the block starts at or crosses "*threshold_", records
333 // "blk_start" as the appropriate block start for the array index
334 // starting at "*threshold_", and for any other indices crossed by the
335 // block. Updates "*threshold_" and "*index_" to correspond to the first
336 // index after the block end.
337 void alloc_block_work2(HeapWord** threshold_, size_t* index_,
338 HeapWord* blk_start, HeapWord* blk_end);
340 public:
341 // The space may not have it's bottom and top set yet, which is why the
342 // region is passed as a parameter. If "init_to_zero" is true, the
343 // elements of the array are initialized to zero. Otherwise, they are
344 // initialized to point backwards to the beginning.
345 G1BlockOffsetArray(G1BlockOffsetSharedArray* array, MemRegion mr,
346 bool init_to_zero);
348 // Note: this ought to be part of the constructor, but that would require
349 // "this" to be passed as a parameter to a member constructor for
350 // the containing concrete subtype of Space.
351 // This would be legal C++, but MS VC++ doesn't allow it.
352 void set_space(Space* sp);
354 // Resets the covered region to the given "mr".
355 void set_region(MemRegion mr);
357 // Resets the covered region to one with the same _bottom as before but
358 // the "new_word_size".
359 void resize(size_t new_word_size);
361 // These must be guaranteed to work properly (i.e., do nothing)
362 // when "blk_start" ("blk" for second version) is "NULL".
363 virtual void alloc_block(HeapWord* blk_start, HeapWord* blk_end);
364 virtual void alloc_block(HeapWord* blk, size_t size) {
365 alloc_block(blk, blk + size);
366 }
368 // The following methods are useful and optimized for a
369 // general, non-contiguous space.
371 // Given a block [blk_start, blk_start + full_blk_size), and
372 // a left_blk_size < full_blk_size, adjust the BOT to show two
373 // blocks [blk_start, blk_start + left_blk_size) and
374 // [blk_start + left_blk_size, blk_start + full_blk_size).
375 // It is assumed (and verified in the non-product VM) that the
376 // BOT was correct for the original block.
377 void split_block(HeapWord* blk_start, size_t full_blk_size,
378 size_t left_blk_size);
380 // Adjust the BOT to show that it has a single block in the
381 // range [blk_start, blk_start + size). All necessary BOT
382 // cards are adjusted, but _unallocated_block isn't.
383 void single_block(HeapWord* blk_start, HeapWord* blk_end);
384 void single_block(HeapWord* blk, size_t size) {
385 single_block(blk, blk + size);
386 }
388 // Adjust BOT to show that it has a block in the range
389 // [blk_start, blk_start + size). Only the first card
390 // of BOT is touched. It is assumed (and verified in the
391 // non-product VM) that the remaining cards of the block
392 // are correct.
393 void mark_block(HeapWord* blk_start, HeapWord* blk_end);
394 void mark_block(HeapWord* blk, size_t size) {
395 mark_block(blk, blk + size);
396 }
398 // Adjust _unallocated_block to indicate that a particular
399 // block has been newly allocated or freed. It is assumed (and
400 // verified in the non-product VM) that the BOT is correct for
401 // the given block.
402 inline void allocated(HeapWord* blk_start, HeapWord* blk_end) {
403 // Verify that the BOT shows [blk, blk + blk_size) to be one block.
404 verify_single_block(blk_start, blk_end);
405 if (BlockOffsetArrayUseUnallocatedBlock) {
406 _unallocated_block = MAX2(_unallocated_block, blk_end);
407 }
408 }
410 inline void allocated(HeapWord* blk, size_t size) {
411 allocated(blk, blk + size);
412 }
414 inline void freed(HeapWord* blk_start, HeapWord* blk_end);
416 inline void freed(HeapWord* blk, size_t size);
418 virtual HeapWord* block_start_unsafe(const void* addr);
419 virtual HeapWord* block_start_unsafe_const(const void* addr) const;
421 // Requires "addr" to be the start of a card and returns the
422 // start of the block that contains the given address.
423 HeapWord* block_start_careful(const void* addr) const;
425 // If true, initialize array slots with no allocated blocks to zero.
426 // Otherwise, make them point back to the front.
427 bool init_to_zero() { return _init_to_zero; }
429 // Verification & debugging - ensure that the offset table reflects the fact
430 // that the block [blk_start, blk_end) or [blk, blk + size) is a
431 // single block of storage. NOTE: can;t const this because of
432 // call to non-const do_block_internal() below.
433 inline void verify_single_block(HeapWord* blk_start, HeapWord* blk_end) {
434 if (VerifyBlockOffsetArray) {
435 do_block_internal(blk_start, blk_end, Action_check);
436 }
437 }
439 inline void verify_single_block(HeapWord* blk, size_t size) {
440 verify_single_block(blk, blk + size);
441 }
443 // Used by region verification. Checks that the contents of the
444 // BOT reflect that there's a single object that spans the address
445 // range [obj_start, obj_start + word_size); returns true if this is
446 // the case, returns false if it's not.
447 bool verify_for_object(HeapWord* obj_start, size_t word_size) const;
449 // Verify that the given block is before _unallocated_block
450 inline void verify_not_unallocated(HeapWord* blk_start,
451 HeapWord* blk_end) const {
452 if (BlockOffsetArrayUseUnallocatedBlock) {
453 assert(blk_start < blk_end, "Block inconsistency?");
454 assert(blk_end <= _unallocated_block, "_unallocated_block problem");
455 }
456 }
458 inline void verify_not_unallocated(HeapWord* blk, size_t size) const {
459 verify_not_unallocated(blk, blk + size);
460 }
462 void check_all_cards(size_t left_card, size_t right_card) const;
464 virtual void print_on(outputStream* out) PRODUCT_RETURN;
465 };
467 // A subtype of BlockOffsetArray that takes advantage of the fact
468 // that its underlying space is a ContiguousSpace, so that its "active"
469 // region can be more efficiently tracked (than for a non-contiguous space).
470 class G1BlockOffsetArrayContigSpace: public G1BlockOffsetArray {
471 friend class VMStructs;
473 // allocation boundary at which offset array must be updated
474 HeapWord* _next_offset_threshold;
475 size_t _next_offset_index; // index corresponding to that boundary
477 // Work function to be called when allocation start crosses the next
478 // threshold in the contig space.
479 void alloc_block_work1(HeapWord* blk_start, HeapWord* blk_end) {
480 alloc_block_work2(&_next_offset_threshold, &_next_offset_index,
481 blk_start, blk_end);
482 }
485 public:
486 G1BlockOffsetArrayContigSpace(G1BlockOffsetSharedArray* array, MemRegion mr);
488 // Initialize the threshold to reflect the first boundary after the
489 // bottom of the covered region.
490 HeapWord* initialize_threshold();
492 // Zero out the entry for _bottom (offset will be zero).
493 void zero_bottom_entry();
495 // Return the next threshold, the point at which the table should be
496 // updated.
497 HeapWord* threshold() const { return _next_offset_threshold; }
499 // These must be guaranteed to work properly (i.e., do nothing)
500 // when "blk_start" ("blk" for second version) is "NULL". In this
501 // implementation, that's true because NULL is represented as 0, and thus
502 // never exceeds the "_next_offset_threshold".
503 void alloc_block(HeapWord* blk_start, HeapWord* blk_end) {
504 if (blk_end > _next_offset_threshold)
505 alloc_block_work1(blk_start, blk_end);
506 }
507 void alloc_block(HeapWord* blk, size_t size) {
508 alloc_block(blk, blk+size);
509 }
511 HeapWord* block_start_unsafe(const void* addr);
512 HeapWord* block_start_unsafe_const(const void* addr) const;
514 void set_for_starts_humongous(HeapWord* new_top);
516 virtual void print_on(outputStream* out) PRODUCT_RETURN;
517 };
519 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1BLOCKOFFSETTABLE_HPP