Thu, 12 Jun 2008 13:50:55 -0700
Merge
1 /*
2 * Copyright 2000-2006 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
25 // The CollectedHeap type requires subtypes to implement a method
26 // "block_start". For some subtypes, notably generational
27 // systems using card-table-based write barriers, the efficiency of this
28 // operation may be important. Implementations of the "BlockOffsetArray"
29 // class may be useful in providing such efficient implementations.
30 //
31 // BlockOffsetTable (abstract)
32 // - BlockOffsetArray (abstract)
33 // - BlockOffsetArrayNonContigSpace
34 // - BlockOffsetArrayContigSpace
35 //
37 class ContiguousSpace;
38 class SerializeOopClosure;
40 //////////////////////////////////////////////////////////////////////////
41 // The BlockOffsetTable "interface"
42 //////////////////////////////////////////////////////////////////////////
43 class BlockOffsetTable VALUE_OBJ_CLASS_SPEC {
44 friend class VMStructs;
45 protected:
46 // These members describe the region covered by the table.
48 // The space this table is covering.
49 HeapWord* _bottom; // == reserved.start
50 HeapWord* _end; // End of currently allocated region.
52 public:
53 // Initialize the table to cover the given space.
54 // The contents of the initial table are undefined.
55 BlockOffsetTable(HeapWord* bottom, HeapWord* end):
56 _bottom(bottom), _end(end) {
57 assert(_bottom <= _end, "arguments out of order");
58 }
60 // Note that the committed size of the covered space may have changed,
61 // so the table size might also wish to change.
62 virtual void resize(size_t new_word_size) = 0;
64 virtual void set_bottom(HeapWord* new_bottom) {
65 assert(new_bottom <= _end, "new_bottom > _end");
66 _bottom = new_bottom;
67 resize(pointer_delta(_end, _bottom));
68 }
70 // Requires "addr" to be contained by a block, and returns the address of
71 // the start of that block.
72 virtual HeapWord* block_start_unsafe(const void* addr) const = 0;
74 // Returns the address of the start of the block containing "addr", or
75 // else "null" if it is covered by no block.
76 HeapWord* block_start(const void* addr) const;
77 };
79 //////////////////////////////////////////////////////////////////////////
80 // One implementation of "BlockOffsetTable," the BlockOffsetArray,
81 // divides the covered region into "N"-word subregions (where
82 // "N" = 2^"LogN". An array with an entry for each such subregion
83 // indicates how far back one must go to find the start of the
84 // chunk that includes the first word of the subregion.
85 //
86 // Each BlockOffsetArray is owned by a Space. However, the actual array
87 // may be shared by several BlockOffsetArrays; this is useful
88 // when a single resizable area (such as a generation) is divided up into
89 // several spaces in which contiguous allocation takes place. (Consider,
90 // for example, the garbage-first generation.)
92 // Here is the shared array type.
93 //////////////////////////////////////////////////////////////////////////
94 // BlockOffsetSharedArray
95 //////////////////////////////////////////////////////////////////////////
96 class BlockOffsetSharedArray: public CHeapObj {
97 friend class BlockOffsetArray;
98 friend class BlockOffsetArrayNonContigSpace;
99 friend class BlockOffsetArrayContigSpace;
100 friend class VMStructs;
102 private:
103 enum SomePrivateConstants {
104 LogN = 9,
105 LogN_words = LogN - LogHeapWordSize,
106 N_bytes = 1 << LogN,
107 N_words = 1 << LogN_words
108 };
110 // The reserved region covered by the shared array.
111 MemRegion _reserved;
113 // End of the current committed region.
114 HeapWord* _end;
116 // Array for keeping offsets for retrieving object start fast given an
117 // address.
118 VirtualSpace _vs;
119 u_char* _offset_array; // byte array keeping backwards offsets
121 protected:
122 // Bounds checking accessors:
123 // For performance these have to devolve to array accesses in product builds.
124 u_char offset_array(size_t index) const {
125 assert(index < _vs.committed_size(), "index out of range");
126 return _offset_array[index];
127 }
128 void set_offset_array(size_t index, u_char offset) {
129 assert(index < _vs.committed_size(), "index out of range");
130 _offset_array[index] = offset;
131 }
132 void set_offset_array(size_t index, HeapWord* high, HeapWord* low) {
133 assert(index < _vs.committed_size(), "index out of range");
134 assert(high >= low, "addresses out of order");
135 assert(pointer_delta(high, low) <= N_words, "offset too large");
136 _offset_array[index] = (u_char)pointer_delta(high, low);
137 }
138 void set_offset_array(HeapWord* left, HeapWord* right, u_char offset) {
139 assert(index_for(right - 1) < _vs.committed_size(),
140 "right address out of range");
141 assert(left < right, "Heap addresses out of order");
142 size_t num_cards = pointer_delta(right, left) >> LogN_words;
143 memset(&_offset_array[index_for(left)], offset, num_cards);
144 }
146 void set_offset_array(size_t left, size_t right, u_char offset) {
147 assert(right < _vs.committed_size(), "right address out of range");
148 assert(left <= right, "indexes out of order");
149 size_t num_cards = right - left + 1;
150 memset(&_offset_array[left], offset, num_cards);
151 }
153 void check_offset_array(size_t index, HeapWord* high, HeapWord* low) const {
154 assert(index < _vs.committed_size(), "index out of range");
155 assert(high >= low, "addresses out of order");
156 assert(pointer_delta(high, low) <= N_words, "offset too large");
157 assert(_offset_array[index] == pointer_delta(high, low),
158 "Wrong offset");
159 }
161 bool is_card_boundary(HeapWord* p) const;
163 // Return the number of slots needed for an offset array
164 // that covers mem_region_words words.
165 // We always add an extra slot because if an object
166 // ends on a card boundary we put a 0 in the next
167 // offset array slot, so we want that slot always
168 // to be reserved.
170 size_t compute_size(size_t mem_region_words) {
171 size_t number_of_slots = (mem_region_words / N_words) + 1;
172 return ReservedSpace::allocation_align_size_up(number_of_slots);
173 }
175 public:
176 // Initialize the table to cover from "base" to (at least)
177 // "base + init_word_size". In the future, the table may be expanded
178 // (see "resize" below) up to the size of "_reserved" (which must be at
179 // least "init_word_size".) The contents of the initial table are
180 // undefined; it is the responsibility of the constituent
181 // BlockOffsetTable(s) to initialize cards.
182 BlockOffsetSharedArray(MemRegion reserved, size_t init_word_size);
184 // Notes a change in the committed size of the region covered by the
185 // table. The "new_word_size" may not be larger than the size of the
186 // reserved region this table covers.
187 void resize(size_t new_word_size);
189 void set_bottom(HeapWord* new_bottom);
191 // Updates all the BlockOffsetArray's sharing this shared array to
192 // reflect the current "top"'s of their spaces.
193 void update_offset_arrays(); // Not yet implemented!
195 // Return the appropriate index into "_offset_array" for "p".
196 size_t index_for(const void* p) const;
198 // Return the address indicating the start of the region corresponding to
199 // "index" in "_offset_array".
200 HeapWord* address_for_index(size_t index) const;
202 // Shared space support
203 void serialize(SerializeOopClosure* soc, HeapWord* start, HeapWord* end);
204 };
206 //////////////////////////////////////////////////////////////////////////
207 // The BlockOffsetArray whose subtypes use the BlockOffsetSharedArray.
208 //////////////////////////////////////////////////////////////////////////
209 class BlockOffsetArray: public BlockOffsetTable {
210 friend class VMStructs;
211 friend class G1BlockOffsetArray; // temp. until we restructure and cleanup
212 protected:
213 // The following enums are used by do_block_internal() below
214 enum Action {
215 Action_single, // BOT records a single block (see single_block())
216 Action_mark, // BOT marks the start of a block (see mark_block())
217 Action_check // Check that BOT records block correctly
218 // (see verify_single_block()).
219 };
221 enum SomePrivateConstants {
222 N_words = BlockOffsetSharedArray::N_words,
223 LogN = BlockOffsetSharedArray::LogN,
224 // entries "e" of at least N_words mean "go back by Base^(e-N_words)."
225 // All entries are less than "N_words + N_powers".
226 LogBase = 4,
227 Base = (1 << LogBase),
228 N_powers = 14
229 };
231 static size_t power_to_cards_back(uint i) {
232 return 1 << (LogBase * i);
233 }
234 static size_t power_to_words_back(uint i) {
235 return power_to_cards_back(i) * N_words;
236 }
237 static size_t entry_to_cards_back(u_char entry) {
238 assert(entry >= N_words, "Precondition");
239 return power_to_cards_back(entry - N_words);
240 }
241 static size_t entry_to_words_back(u_char entry) {
242 assert(entry >= N_words, "Precondition");
243 return power_to_words_back(entry - N_words);
244 }
246 // The shared array, which is shared with other BlockOffsetArray's
247 // corresponding to different spaces within a generation or span of
248 // memory.
249 BlockOffsetSharedArray* _array;
251 // The space that owns this subregion.
252 Space* _sp;
254 // If true, array entries are initialized to 0; otherwise, they are
255 // initialized to point backwards to the beginning of the covered region.
256 bool _init_to_zero;
258 // Sets the entries
259 // corresponding to the cards starting at "start" and ending at "end"
260 // to point back to the card before "start": the interval [start, end)
261 // is right-open.
262 void set_remainder_to_point_to_start(HeapWord* start, HeapWord* end);
263 // Same as above, except that the args here are a card _index_ interval
264 // that is closed: [start_index, end_index]
265 void set_remainder_to_point_to_start_incl(size_t start, size_t end);
267 // A helper function for BOT adjustment/verification work
268 void do_block_internal(HeapWord* blk_start, HeapWord* blk_end, Action action);
270 public:
271 // The space may not have its bottom and top set yet, which is why the
272 // region is passed as a parameter. If "init_to_zero" is true, the
273 // elements of the array are initialized to zero. Otherwise, they are
274 // initialized to point backwards to the beginning.
275 BlockOffsetArray(BlockOffsetSharedArray* array, MemRegion mr,
276 bool init_to_zero);
278 // Note: this ought to be part of the constructor, but that would require
279 // "this" to be passed as a parameter to a member constructor for
280 // the containing concrete subtype of Space.
281 // This would be legal C++, but MS VC++ doesn't allow it.
282 void set_space(Space* sp) { _sp = sp; }
284 // Resets the covered region to the given "mr".
285 void set_region(MemRegion mr) {
286 _bottom = mr.start();
287 _end = mr.end();
288 }
290 // Note that the committed size of the covered space may have changed,
291 // so the table size might also wish to change.
292 virtual void resize(size_t new_word_size) {
293 HeapWord* new_end = _bottom + new_word_size;
294 if (_end < new_end && !init_to_zero()) {
295 // verify that the old and new boundaries are also card boundaries
296 assert(_array->is_card_boundary(_end),
297 "_end not a card boundary");
298 assert(_array->is_card_boundary(new_end),
299 "new _end would not be a card boundary");
300 // set all the newly added cards
301 _array->set_offset_array(_end, new_end, N_words);
302 }
303 _end = new_end; // update _end
304 }
306 // Adjust the BOT to show that it has a single block in the
307 // range [blk_start, blk_start + size). All necessary BOT
308 // cards are adjusted, but _unallocated_block isn't.
309 void single_block(HeapWord* blk_start, HeapWord* blk_end);
310 void single_block(HeapWord* blk, size_t size) {
311 single_block(blk, blk + size);
312 }
314 // When the alloc_block() call returns, the block offset table should
315 // have enough information such that any subsequent block_start() call
316 // with an argument equal to an address that is within the range
317 // [blk_start, blk_end) would return the value blk_start, provided
318 // there have been no calls in between that reset this information
319 // (e.g. see BlockOffsetArrayNonContigSpace::single_block() call
320 // for an appropriate range covering the said interval).
321 // These methods expect to be called with [blk_start, blk_end)
322 // representing a block of memory in the heap.
323 virtual void alloc_block(HeapWord* blk_start, HeapWord* blk_end);
324 void alloc_block(HeapWord* blk, size_t size) {
325 alloc_block(blk, blk + size);
326 }
328 // If true, initialize array slots with no allocated blocks to zero.
329 // Otherwise, make them point back to the front.
330 bool init_to_zero() { return _init_to_zero; }
332 // Debugging
333 // Return the index of the last entry in the "active" region.
334 virtual size_t last_active_index() const = 0;
335 // Verify the block offset table
336 void verify() const;
337 void check_all_cards(size_t left_card, size_t right_card) const;
338 };
340 ////////////////////////////////////////////////////////////////////////////
341 // A subtype of BlockOffsetArray that takes advantage of the fact
342 // that its underlying space is a NonContiguousSpace, so that some
343 // specialized interfaces can be made available for spaces that
344 // manipulate the table.
345 ////////////////////////////////////////////////////////////////////////////
346 class BlockOffsetArrayNonContigSpace: public BlockOffsetArray {
347 friend class VMStructs;
348 private:
349 // The portion [_unallocated_block, _sp.end()) of the space that
350 // is a single block known not to contain any objects.
351 // NOTE: See BlockOffsetArrayUseUnallocatedBlock flag.
352 HeapWord* _unallocated_block;
354 public:
355 BlockOffsetArrayNonContigSpace(BlockOffsetSharedArray* array, MemRegion mr):
356 BlockOffsetArray(array, mr, false),
357 _unallocated_block(_bottom) { }
359 // accessor
360 HeapWord* unallocated_block() const {
361 assert(BlockOffsetArrayUseUnallocatedBlock,
362 "_unallocated_block is not being maintained");
363 return _unallocated_block;
364 }
366 void set_unallocated_block(HeapWord* block) {
367 assert(BlockOffsetArrayUseUnallocatedBlock,
368 "_unallocated_block is not being maintained");
369 assert(block >= _bottom && block <= _end, "out of range");
370 _unallocated_block = block;
371 }
373 // These methods expect to be called with [blk_start, blk_end)
374 // representing a block of memory in the heap.
375 void alloc_block(HeapWord* blk_start, HeapWord* blk_end);
376 void alloc_block(HeapWord* blk, size_t size) {
377 alloc_block(blk, blk + size);
378 }
380 // The following methods are useful and optimized for a
381 // non-contiguous space.
383 // Given a block [blk_start, blk_start + full_blk_size), and
384 // a left_blk_size < full_blk_size, adjust the BOT to show two
385 // blocks [blk_start, blk_start + left_blk_size) and
386 // [blk_start + left_blk_size, blk_start + full_blk_size).
387 // It is assumed (and verified in the non-product VM) that the
388 // BOT was correct for the original block.
389 void split_block(HeapWord* blk_start, size_t full_blk_size,
390 size_t left_blk_size);
392 // Adjust BOT to show that it has a block in the range
393 // [blk_start, blk_start + size). Only the first card
394 // of BOT is touched. It is assumed (and verified in the
395 // non-product VM) that the remaining cards of the block
396 // are correct.
397 void mark_block(HeapWord* blk_start, HeapWord* blk_end);
398 void mark_block(HeapWord* blk, size_t size) {
399 mark_block(blk, blk + size);
400 }
402 // Adjust _unallocated_block to indicate that a particular
403 // block has been newly allocated or freed. It is assumed (and
404 // verified in the non-product VM) that the BOT is correct for
405 // the given block.
406 void allocated(HeapWord* blk_start, HeapWord* blk_end) {
407 // Verify that the BOT shows [blk, blk + blk_size) to be one block.
408 verify_single_block(blk_start, blk_end);
409 if (BlockOffsetArrayUseUnallocatedBlock) {
410 _unallocated_block = MAX2(_unallocated_block, blk_end);
411 }
412 }
414 void allocated(HeapWord* blk, size_t size) {
415 allocated(blk, blk + size);
416 }
418 void freed(HeapWord* blk_start, HeapWord* blk_end);
419 void freed(HeapWord* blk, size_t size) {
420 freed(blk, blk + size);
421 }
423 HeapWord* block_start_unsafe(const void* addr) const;
425 // Requires "addr" to be the start of a card and returns the
426 // start of the block that contains the given address.
427 HeapWord* block_start_careful(const void* addr) const;
430 // Verification & debugging: ensure that the offset table reflects
431 // the fact that the block [blk_start, blk_end) or [blk, blk + size)
432 // is a single block of storage. NOTE: can't const this because of
433 // call to non-const do_block_internal() below.
434 void verify_single_block(HeapWord* blk_start, HeapWord* blk_end)
435 PRODUCT_RETURN;
436 void verify_single_block(HeapWord* blk, size_t size) PRODUCT_RETURN;
438 // Verify that the given block is before _unallocated_block
439 void verify_not_unallocated(HeapWord* blk_start, HeapWord* blk_end)
440 const PRODUCT_RETURN;
441 void verify_not_unallocated(HeapWord* blk, size_t size)
442 const PRODUCT_RETURN;
444 // Debugging support
445 virtual size_t last_active_index() const;
446 };
448 ////////////////////////////////////////////////////////////////////////////
449 // A subtype of BlockOffsetArray that takes advantage of the fact
450 // that its underlying space is a ContiguousSpace, so that its "active"
451 // region can be more efficiently tracked (than for a non-contiguous space).
452 ////////////////////////////////////////////////////////////////////////////
453 class BlockOffsetArrayContigSpace: public BlockOffsetArray {
454 friend class VMStructs;
455 private:
456 // allocation boundary at which offset array must be updated
457 HeapWord* _next_offset_threshold;
458 size_t _next_offset_index; // index corresponding to that boundary
460 // Work function when allocation start crosses threshold.
461 void alloc_block_work(HeapWord* blk_start, HeapWord* blk_end);
463 public:
464 BlockOffsetArrayContigSpace(BlockOffsetSharedArray* array, MemRegion mr):
465 BlockOffsetArray(array, mr, true) {
466 _next_offset_threshold = NULL;
467 _next_offset_index = 0;
468 }
470 void set_contig_space(ContiguousSpace* sp) { set_space((Space*)sp); }
472 // Initialize the threshold for an empty heap.
473 HeapWord* initialize_threshold();
474 // Zero out the entry for _bottom (offset will be zero)
475 void zero_bottom_entry();
477 // Return the next threshold, the point at which the table should be
478 // updated.
479 HeapWord* threshold() const { return _next_offset_threshold; }
481 // In general, these methods expect to be called with
482 // [blk_start, blk_end) representing a block of memory in the heap.
483 // In this implementation, however, we are OK even if blk_start and/or
484 // blk_end are NULL because NULL is represented as 0, and thus
485 // never exceeds the "_next_offset_threshold".
486 void alloc_block(HeapWord* blk_start, HeapWord* blk_end) {
487 if (blk_end > _next_offset_threshold) {
488 alloc_block_work(blk_start, blk_end);
489 }
490 }
491 void alloc_block(HeapWord* blk, size_t size) {
492 alloc_block(blk, blk + size);
493 }
495 HeapWord* block_start_unsafe(const void* addr) const;
497 void serialize(SerializeOopClosure* soc);
499 // Debugging support
500 virtual size_t last_active_index() const;
501 };