Tue, 21 Aug 2012 14:10:39 -0700
7185699: G1: Prediction model discrepancies
Summary: Correct the result value of G1CollectedHeap::pending_card_num(). Change the code that calculates the GC efficiency of a non-young heap region to use historical data from mixed GCs and the actual number of live bytes when predicting how long it would take to collect the region. Changes were also reviewed by Thomas Schatzl.
Reviewed-by: azeemj, brutisso
1 /*
2 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp"
27 #include "memory/space.hpp"
28 #include "oops/oop.inline.hpp"
29 #include "runtime/java.hpp"
30 #include "services/memTracker.hpp"
32 //////////////////////////////////////////////////////////////////////
33 // G1BlockOffsetSharedArray
34 //////////////////////////////////////////////////////////////////////
36 G1BlockOffsetSharedArray::G1BlockOffsetSharedArray(MemRegion reserved,
37 size_t init_word_size) :
38 _reserved(reserved), _end(NULL)
39 {
40 size_t size = compute_size(reserved.word_size());
41 ReservedSpace rs(ReservedSpace::allocation_align_size_up(size));
42 if (!rs.is_reserved()) {
43 vm_exit_during_initialization("Could not reserve enough space for heap offset array");
44 }
45 if (!_vs.initialize(rs, 0)) {
46 vm_exit_during_initialization("Could not reserve enough space for heap offset array");
47 }
49 MemTracker::record_virtual_memory_type((address)rs.base(), mtGC);
51 _offset_array = (u_char*)_vs.low_boundary();
52 resize(init_word_size);
53 if (TraceBlockOffsetTable) {
54 gclog_or_tty->print_cr("G1BlockOffsetSharedArray::G1BlockOffsetSharedArray: ");
55 gclog_or_tty->print_cr(" "
56 " rs.base(): " INTPTR_FORMAT
57 " rs.size(): " INTPTR_FORMAT
58 " rs end(): " INTPTR_FORMAT,
59 rs.base(), rs.size(), rs.base() + rs.size());
60 gclog_or_tty->print_cr(" "
61 " _vs.low_boundary(): " INTPTR_FORMAT
62 " _vs.high_boundary(): " INTPTR_FORMAT,
63 _vs.low_boundary(),
64 _vs.high_boundary());
65 }
66 }
68 void G1BlockOffsetSharedArray::resize(size_t new_word_size) {
69 assert(new_word_size <= _reserved.word_size(), "Resize larger than reserved");
70 size_t new_size = compute_size(new_word_size);
71 size_t old_size = _vs.committed_size();
72 size_t delta;
73 char* high = _vs.high();
74 _end = _reserved.start() + new_word_size;
75 if (new_size > old_size) {
76 delta = ReservedSpace::page_align_size_up(new_size - old_size);
77 assert(delta > 0, "just checking");
78 if (!_vs.expand_by(delta)) {
79 // Do better than this for Merlin
80 vm_exit_out_of_memory(delta, "offset table expansion");
81 }
82 assert(_vs.high() == high + delta, "invalid expansion");
83 // Initialization of the contents is left to the
84 // G1BlockOffsetArray that uses it.
85 } else {
86 delta = ReservedSpace::page_align_size_down(old_size - new_size);
87 if (delta == 0) return;
88 _vs.shrink_by(delta);
89 assert(_vs.high() == high - delta, "invalid expansion");
90 }
91 }
93 bool G1BlockOffsetSharedArray::is_card_boundary(HeapWord* p) const {
94 assert(p >= _reserved.start(), "just checking");
95 size_t delta = pointer_delta(p, _reserved.start());
96 return (delta & right_n_bits(LogN_words)) == (size_t)NoBits;
97 }
100 //////////////////////////////////////////////////////////////////////
101 // G1BlockOffsetArray
102 //////////////////////////////////////////////////////////////////////
104 G1BlockOffsetArray::G1BlockOffsetArray(G1BlockOffsetSharedArray* array,
105 MemRegion mr, bool init_to_zero) :
106 G1BlockOffsetTable(mr.start(), mr.end()),
107 _unallocated_block(_bottom),
108 _array(array), _csp(NULL),
109 _init_to_zero(init_to_zero) {
110 assert(_bottom <= _end, "arguments out of order");
111 if (!_init_to_zero) {
112 // initialize cards to point back to mr.start()
113 set_remainder_to_point_to_start(mr.start() + N_words, mr.end());
114 _array->set_offset_array(0, 0); // set first card to 0
115 }
116 }
118 void G1BlockOffsetArray::set_space(Space* sp) {
119 _sp = sp;
120 _csp = sp->toContiguousSpace();
121 }
123 // The arguments follow the normal convention of denoting
124 // a right-open interval: [start, end)
125 void
126 G1BlockOffsetArray:: set_remainder_to_point_to_start(HeapWord* start, HeapWord* end) {
128 if (start >= end) {
129 // The start address is equal to the end address (or to
130 // the right of the end address) so there are not cards
131 // that need to be updated..
132 return;
133 }
135 // Write the backskip value for each region.
136 //
137 // offset
138 // card 2nd 3rd
139 // | +- 1st | |
140 // v v v v
141 // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +-+-+-+-+-+-+-+-+-+-+-
142 // |x|0|0|0|0|0|0|0|1|1|1|1|1|1| ... |1|1|1|1|2|2|2|2|2|2| ...
143 // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +-+-+-+-+-+-+-+-+-+-+-
144 // 11 19 75
145 // 12
146 //
147 // offset card is the card that points to the start of an object
148 // x - offset value of offset card
149 // 1st - start of first logarithmic region
150 // 0 corresponds to logarithmic value N_words + 0 and 2**(3 * 0) = 1
151 // 2nd - start of second logarithmic region
152 // 1 corresponds to logarithmic value N_words + 1 and 2**(3 * 1) = 8
153 // 3rd - start of third logarithmic region
154 // 2 corresponds to logarithmic value N_words + 2 and 2**(3 * 2) = 64
155 //
156 // integer below the block offset entry is an example of
157 // the index of the entry
158 //
159 // Given an address,
160 // Find the index for the address
161 // Find the block offset table entry
162 // Convert the entry to a back slide
163 // (e.g., with today's, offset = 0x81 =>
164 // back slip = 2**(3*(0x81 - N_words)) = 2**3) = 8
165 // Move back N (e.g., 8) entries and repeat with the
166 // value of the new entry
167 //
168 size_t start_card = _array->index_for(start);
169 size_t end_card = _array->index_for(end-1);
170 assert(start ==_array->address_for_index(start_card), "Precondition");
171 assert(end ==_array->address_for_index(end_card)+N_words, "Precondition");
172 set_remainder_to_point_to_start_incl(start_card, end_card); // closed interval
173 }
175 // Unlike the normal convention in this code, the argument here denotes
176 // a closed, inclusive interval: [start_card, end_card], cf set_remainder_to_point_to_start()
177 // above.
178 void
179 G1BlockOffsetArray::set_remainder_to_point_to_start_incl(size_t start_card, size_t end_card) {
180 if (start_card > end_card) {
181 return;
182 }
183 assert(start_card > _array->index_for(_bottom), "Cannot be first card");
184 assert(_array->offset_array(start_card-1) <= N_words,
185 "Offset card has an unexpected value");
186 size_t start_card_for_region = start_card;
187 u_char offset = max_jubyte;
188 for (int i = 0; i < BlockOffsetArray::N_powers; i++) {
189 // -1 so that the the card with the actual offset is counted. Another -1
190 // so that the reach ends in this region and not at the start
191 // of the next.
192 size_t reach = start_card - 1 + (BlockOffsetArray::power_to_cards_back(i+1) - 1);
193 offset = N_words + i;
194 if (reach >= end_card) {
195 _array->set_offset_array(start_card_for_region, end_card, offset);
196 start_card_for_region = reach + 1;
197 break;
198 }
199 _array->set_offset_array(start_card_for_region, reach, offset);
200 start_card_for_region = reach + 1;
201 }
202 assert(start_card_for_region > end_card, "Sanity check");
203 DEBUG_ONLY(check_all_cards(start_card, end_card);)
204 }
206 // The block [blk_start, blk_end) has been allocated;
207 // adjust the block offset table to represent this information;
208 // right-open interval: [blk_start, blk_end)
209 void
210 G1BlockOffsetArray::alloc_block(HeapWord* blk_start, HeapWord* blk_end) {
211 mark_block(blk_start, blk_end);
212 allocated(blk_start, blk_end);
213 }
215 // Adjust BOT to show that a previously whole block has been split
216 // into two.
217 void G1BlockOffsetArray::split_block(HeapWord* blk, size_t blk_size,
218 size_t left_blk_size) {
219 // Verify that the BOT shows [blk, blk + blk_size) to be one block.
220 verify_single_block(blk, blk_size);
221 // Update the BOT to indicate that [blk + left_blk_size, blk + blk_size)
222 // is one single block.
223 mark_block(blk + left_blk_size, blk + blk_size);
224 }
227 // Action_mark - update the BOT for the block [blk_start, blk_end).
228 // Current typical use is for splitting a block.
229 // Action_single - update the BOT for an allocation.
230 // Action_verify - BOT verification.
231 void G1BlockOffsetArray::do_block_internal(HeapWord* blk_start,
232 HeapWord* blk_end,
233 Action action) {
234 assert(Universe::heap()->is_in_reserved(blk_start),
235 "reference must be into the heap");
236 assert(Universe::heap()->is_in_reserved(blk_end-1),
237 "limit must be within the heap");
238 // This is optimized to make the test fast, assuming we only rarely
239 // cross boundaries.
240 uintptr_t end_ui = (uintptr_t)(blk_end - 1);
241 uintptr_t start_ui = (uintptr_t)blk_start;
242 // Calculate the last card boundary preceding end of blk
243 intptr_t boundary_before_end = (intptr_t)end_ui;
244 clear_bits(boundary_before_end, right_n_bits(LogN));
245 if (start_ui <= (uintptr_t)boundary_before_end) {
246 // blk starts at or crosses a boundary
247 // Calculate index of card on which blk begins
248 size_t start_index = _array->index_for(blk_start);
249 // Index of card on which blk ends
250 size_t end_index = _array->index_for(blk_end - 1);
251 // Start address of card on which blk begins
252 HeapWord* boundary = _array->address_for_index(start_index);
253 assert(boundary <= blk_start, "blk should start at or after boundary");
254 if (blk_start != boundary) {
255 // blk starts strictly after boundary
256 // adjust card boundary and start_index forward to next card
257 boundary += N_words;
258 start_index++;
259 }
260 assert(start_index <= end_index, "monotonicity of index_for()");
261 assert(boundary <= (HeapWord*)boundary_before_end, "tautology");
262 switch (action) {
263 case Action_mark: {
264 if (init_to_zero()) {
265 _array->set_offset_array(start_index, boundary, blk_start);
266 break;
267 } // Else fall through to the next case
268 }
269 case Action_single: {
270 _array->set_offset_array(start_index, boundary, blk_start);
271 // We have finished marking the "offset card". We need to now
272 // mark the subsequent cards that this blk spans.
273 if (start_index < end_index) {
274 HeapWord* rem_st = _array->address_for_index(start_index) + N_words;
275 HeapWord* rem_end = _array->address_for_index(end_index) + N_words;
276 set_remainder_to_point_to_start(rem_st, rem_end);
277 }
278 break;
279 }
280 case Action_check: {
281 _array->check_offset_array(start_index, boundary, blk_start);
282 // We have finished checking the "offset card". We need to now
283 // check the subsequent cards that this blk spans.
284 check_all_cards(start_index + 1, end_index);
285 break;
286 }
287 default:
288 ShouldNotReachHere();
289 }
290 }
291 }
293 // The card-interval [start_card, end_card] is a closed interval; this
294 // is an expensive check -- use with care and only under protection of
295 // suitable flag.
296 void G1BlockOffsetArray::check_all_cards(size_t start_card, size_t end_card) const {
298 if (end_card < start_card) {
299 return;
300 }
301 guarantee(_array->offset_array(start_card) == N_words, "Wrong value in second card");
302 for (size_t c = start_card + 1; c <= end_card; c++ /* yeah! */) {
303 u_char entry = _array->offset_array(c);
304 if (c - start_card > BlockOffsetArray::power_to_cards_back(1)) {
305 guarantee(entry > N_words, "Should be in logarithmic region");
306 }
307 size_t backskip = BlockOffsetArray::entry_to_cards_back(entry);
308 size_t landing_card = c - backskip;
309 guarantee(landing_card >= (start_card - 1), "Inv");
310 if (landing_card >= start_card) {
311 guarantee(_array->offset_array(landing_card) <= entry, "monotonicity");
312 } else {
313 guarantee(landing_card == start_card - 1, "Tautology");
314 guarantee(_array->offset_array(landing_card) <= N_words, "Offset value");
315 }
316 }
317 }
319 // The range [blk_start, blk_end) represents a single contiguous block
320 // of storage; modify the block offset table to represent this
321 // information; Right-open interval: [blk_start, blk_end)
322 // NOTE: this method does _not_ adjust _unallocated_block.
323 void
324 G1BlockOffsetArray::single_block(HeapWord* blk_start, HeapWord* blk_end) {
325 do_block_internal(blk_start, blk_end, Action_single);
326 }
328 // Mark the BOT such that if [blk_start, blk_end) straddles a card
329 // boundary, the card following the first such boundary is marked
330 // with the appropriate offset.
331 // NOTE: this method does _not_ adjust _unallocated_block or
332 // any cards subsequent to the first one.
333 void
334 G1BlockOffsetArray::mark_block(HeapWord* blk_start, HeapWord* blk_end) {
335 do_block_internal(blk_start, blk_end, Action_mark);
336 }
338 HeapWord* G1BlockOffsetArray::block_start_unsafe(const void* addr) {
339 assert(_bottom <= addr && addr < _end,
340 "addr must be covered by this Array");
341 // Must read this exactly once because it can be modified by parallel
342 // allocation.
343 HeapWord* ub = _unallocated_block;
344 if (BlockOffsetArrayUseUnallocatedBlock && addr >= ub) {
345 assert(ub < _end, "tautology (see above)");
346 return ub;
347 }
348 // Otherwise, find the block start using the table.
349 HeapWord* q = block_at_or_preceding(addr, false, 0);
350 return forward_to_block_containing_addr(q, addr);
351 }
353 // This duplicates a little code from the above: unavoidable.
354 HeapWord*
355 G1BlockOffsetArray::block_start_unsafe_const(const void* addr) const {
356 assert(_bottom <= addr && addr < _end,
357 "addr must be covered by this Array");
358 // Must read this exactly once because it can be modified by parallel
359 // allocation.
360 HeapWord* ub = _unallocated_block;
361 if (BlockOffsetArrayUseUnallocatedBlock && addr >= ub) {
362 assert(ub < _end, "tautology (see above)");
363 return ub;
364 }
365 // Otherwise, find the block start using the table.
366 HeapWord* q = block_at_or_preceding(addr, false, 0);
367 HeapWord* n = q + _sp->block_size(q);
368 return forward_to_block_containing_addr_const(q, n, addr);
369 }
372 HeapWord*
373 G1BlockOffsetArray::forward_to_block_containing_addr_slow(HeapWord* q,
374 HeapWord* n,
375 const void* addr) {
376 // We're not in the normal case. We need to handle an important subcase
377 // here: LAB allocation. An allocation previously recorded in the
378 // offset table was actually a lab allocation, and was divided into
379 // several objects subsequently. Fix this situation as we answer the
380 // query, by updating entries as we cross them.
382 // If the fist object's end q is at the card boundary. Start refining
383 // with the corresponding card (the value of the entry will be basically
384 // set to 0). If the object crosses the boundary -- start from the next card.
385 size_t next_index = _array->index_for(n) + !_array->is_card_boundary(n);
386 HeapWord* next_boundary = _array->address_for_index(next_index);
387 if (csp() != NULL) {
388 if (addr >= csp()->top()) return csp()->top();
389 while (next_boundary < addr) {
390 while (n <= next_boundary) {
391 q = n;
392 oop obj = oop(q);
393 if (obj->klass_or_null() == NULL) return q;
394 n += obj->size();
395 }
396 assert(q <= next_boundary && n > next_boundary, "Consequence of loop");
397 // [q, n) is the block that crosses the boundary.
398 alloc_block_work2(&next_boundary, &next_index, q, n);
399 }
400 } else {
401 while (next_boundary < addr) {
402 while (n <= next_boundary) {
403 q = n;
404 oop obj = oop(q);
405 if (obj->klass_or_null() == NULL) return q;
406 n += _sp->block_size(q);
407 }
408 assert(q <= next_boundary && n > next_boundary, "Consequence of loop");
409 // [q, n) is the block that crosses the boundary.
410 alloc_block_work2(&next_boundary, &next_index, q, n);
411 }
412 }
413 return forward_to_block_containing_addr_const(q, n, addr);
414 }
416 HeapWord* G1BlockOffsetArray::block_start_careful(const void* addr) const {
417 assert(_array->offset_array(0) == 0, "objects can't cross covered areas");
419 assert(_bottom <= addr && addr < _end,
420 "addr must be covered by this Array");
421 // Must read this exactly once because it can be modified by parallel
422 // allocation.
423 HeapWord* ub = _unallocated_block;
424 if (BlockOffsetArrayUseUnallocatedBlock && addr >= ub) {
425 assert(ub < _end, "tautology (see above)");
426 return ub;
427 }
429 // Otherwise, find the block start using the table, but taking
430 // care (cf block_start_unsafe() above) not to parse any objects/blocks
431 // on the cards themsleves.
432 size_t index = _array->index_for(addr);
433 assert(_array->address_for_index(index) == addr,
434 "arg should be start of card");
436 HeapWord* q = (HeapWord*)addr;
437 uint offset;
438 do {
439 offset = _array->offset_array(index--);
440 q -= offset;
441 } while (offset == N_words);
442 assert(q <= addr, "block start should be to left of arg");
443 return q;
444 }
446 // Note that the committed size of the covered space may have changed,
447 // so the table size might also wish to change.
448 void G1BlockOffsetArray::resize(size_t new_word_size) {
449 HeapWord* new_end = _bottom + new_word_size;
450 if (_end < new_end && !init_to_zero()) {
451 // verify that the old and new boundaries are also card boundaries
452 assert(_array->is_card_boundary(_end),
453 "_end not a card boundary");
454 assert(_array->is_card_boundary(new_end),
455 "new _end would not be a card boundary");
456 // set all the newly added cards
457 _array->set_offset_array(_end, new_end, N_words);
458 }
459 _end = new_end; // update _end
460 }
462 void G1BlockOffsetArray::set_region(MemRegion mr) {
463 _bottom = mr.start();
464 _end = mr.end();
465 }
467 //
468 // threshold_
469 // | _index_
470 // v v
471 // +-------+-------+-------+-------+-------+
472 // | i-1 | i | i+1 | i+2 | i+3 |
473 // +-------+-------+-------+-------+-------+
474 // ( ^ ]
475 // block-start
476 //
477 void G1BlockOffsetArray::alloc_block_work2(HeapWord** threshold_, size_t* index_,
478 HeapWord* blk_start, HeapWord* blk_end) {
479 // For efficiency, do copy-in/copy-out.
480 HeapWord* threshold = *threshold_;
481 size_t index = *index_;
483 assert(blk_start != NULL && blk_end > blk_start,
484 "phantom block");
485 assert(blk_end > threshold, "should be past threshold");
486 assert(blk_start <= threshold, "blk_start should be at or before threshold");
487 assert(pointer_delta(threshold, blk_start) <= N_words,
488 "offset should be <= BlockOffsetSharedArray::N");
489 assert(Universe::heap()->is_in_reserved(blk_start),
490 "reference must be into the heap");
491 assert(Universe::heap()->is_in_reserved(blk_end-1),
492 "limit must be within the heap");
493 assert(threshold == _array->_reserved.start() + index*N_words,
494 "index must agree with threshold");
496 DEBUG_ONLY(size_t orig_index = index;)
498 // Mark the card that holds the offset into the block. Note
499 // that _next_offset_index and _next_offset_threshold are not
500 // updated until the end of this method.
501 _array->set_offset_array(index, threshold, blk_start);
503 // We need to now mark the subsequent cards that this blk spans.
505 // Index of card on which blk ends.
506 size_t end_index = _array->index_for(blk_end - 1);
508 // Are there more cards left to be updated?
509 if (index + 1 <= end_index) {
510 HeapWord* rem_st = _array->address_for_index(index + 1);
511 // Calculate rem_end this way because end_index
512 // may be the last valid index in the covered region.
513 HeapWord* rem_end = _array->address_for_index(end_index) + N_words;
514 set_remainder_to_point_to_start(rem_st, rem_end);
515 }
517 index = end_index + 1;
518 // Calculate threshold_ this way because end_index
519 // may be the last valid index in the covered region.
520 threshold = _array->address_for_index(end_index) + N_words;
521 assert(threshold >= blk_end, "Incorrect offset threshold");
523 // index_ and threshold_ updated here.
524 *threshold_ = threshold;
525 *index_ = index;
527 #ifdef ASSERT
528 // The offset can be 0 if the block starts on a boundary. That
529 // is checked by an assertion above.
530 size_t start_index = _array->index_for(blk_start);
531 HeapWord* boundary = _array->address_for_index(start_index);
532 assert((_array->offset_array(orig_index) == 0 &&
533 blk_start == boundary) ||
534 (_array->offset_array(orig_index) > 0 &&
535 _array->offset_array(orig_index) <= N_words),
536 "offset array should have been set");
537 for (size_t j = orig_index + 1; j <= end_index; j++) {
538 assert(_array->offset_array(j) > 0 &&
539 _array->offset_array(j) <=
540 (u_char) (N_words+BlockOffsetArray::N_powers-1),
541 "offset array should have been set");
542 }
543 #endif
544 }
546 bool
547 G1BlockOffsetArray::verify_for_object(HeapWord* obj_start,
548 size_t word_size) const {
549 size_t first_card = _array->index_for(obj_start);
550 size_t last_card = _array->index_for(obj_start + word_size - 1);
551 if (!_array->is_card_boundary(obj_start)) {
552 // If the object is not on a card boundary the BOT entry of the
553 // first card should point to another object so we should not
554 // check that one.
555 first_card += 1;
556 }
557 for (size_t card = first_card; card <= last_card; card += 1) {
558 HeapWord* card_addr = _array->address_for_index(card);
559 HeapWord* block_start = block_start_const(card_addr);
560 if (block_start != obj_start) {
561 gclog_or_tty->print_cr("block start: "PTR_FORMAT" is incorrect - "
562 "card index: "SIZE_FORMAT" "
563 "card addr: "PTR_FORMAT" BOT entry: %u "
564 "obj: "PTR_FORMAT" word size: "SIZE_FORMAT" "
565 "cards: ["SIZE_FORMAT","SIZE_FORMAT"]",
566 block_start, card, card_addr,
567 _array->offset_array(card),
568 obj_start, word_size, first_card, last_card);
569 return false;
570 }
571 }
572 return true;
573 }
575 #ifndef PRODUCT
576 void
577 G1BlockOffsetArray::print_on(outputStream* out) {
578 size_t from_index = _array->index_for(_bottom);
579 size_t to_index = _array->index_for(_end);
580 out->print_cr(">> BOT for area ["PTR_FORMAT","PTR_FORMAT") "
581 "cards ["SIZE_FORMAT","SIZE_FORMAT")",
582 _bottom, _end, from_index, to_index);
583 for (size_t i = from_index; i < to_index; ++i) {
584 out->print_cr(" entry "SIZE_FORMAT_W(8)" | "PTR_FORMAT" : %3u",
585 i, _array->address_for_index(i),
586 (uint) _array->offset_array(i));
587 }
588 }
589 #endif // !PRODUCT
591 //////////////////////////////////////////////////////////////////////
592 // G1BlockOffsetArrayContigSpace
593 //////////////////////////////////////////////////////////////////////
595 HeapWord*
596 G1BlockOffsetArrayContigSpace::block_start_unsafe(const void* addr) {
597 assert(_bottom <= addr && addr < _end,
598 "addr must be covered by this Array");
599 HeapWord* q = block_at_or_preceding(addr, true, _next_offset_index-1);
600 return forward_to_block_containing_addr(q, addr);
601 }
603 HeapWord*
604 G1BlockOffsetArrayContigSpace::
605 block_start_unsafe_const(const void* addr) const {
606 assert(_bottom <= addr && addr < _end,
607 "addr must be covered by this Array");
608 HeapWord* q = block_at_or_preceding(addr, true, _next_offset_index-1);
609 HeapWord* n = q + _sp->block_size(q);
610 return forward_to_block_containing_addr_const(q, n, addr);
611 }
613 G1BlockOffsetArrayContigSpace::
614 G1BlockOffsetArrayContigSpace(G1BlockOffsetSharedArray* array,
615 MemRegion mr) :
616 G1BlockOffsetArray(array, mr, true)
617 {
618 _next_offset_threshold = NULL;
619 _next_offset_index = 0;
620 }
622 HeapWord* G1BlockOffsetArrayContigSpace::initialize_threshold() {
623 assert(!Universe::heap()->is_in_reserved(_array->_offset_array),
624 "just checking");
625 _next_offset_index = _array->index_for(_bottom);
626 _next_offset_index++;
627 _next_offset_threshold =
628 _array->address_for_index(_next_offset_index);
629 return _next_offset_threshold;
630 }
632 void G1BlockOffsetArrayContigSpace::zero_bottom_entry() {
633 assert(!Universe::heap()->is_in_reserved(_array->_offset_array),
634 "just checking");
635 size_t bottom_index = _array->index_for(_bottom);
636 assert(_array->address_for_index(bottom_index) == _bottom,
637 "Precondition of call");
638 _array->set_offset_array(bottom_index, 0);
639 }
641 void
642 G1BlockOffsetArrayContigSpace::set_for_starts_humongous(HeapWord* new_top) {
643 assert(new_top <= _end, "_end should have already been updated");
645 // The first BOT entry should have offset 0.
646 zero_bottom_entry();
647 initialize_threshold();
648 alloc_block(_bottom, new_top);
649 }
651 #ifndef PRODUCT
652 void
653 G1BlockOffsetArrayContigSpace::print_on(outputStream* out) {
654 G1BlockOffsetArray::print_on(out);
655 out->print_cr(" next offset threshold: "PTR_FORMAT, _next_offset_threshold);
656 out->print_cr(" next offset index: "SIZE_FORMAT, _next_offset_index);
657 }
658 #endif // !PRODUCT