30 #include "runtime/java.hpp" |
30 #include "runtime/java.hpp" |
31 #include "services/memTracker.hpp" |
31 #include "services/memTracker.hpp" |
32 |
32 |
33 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC |
33 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC |
34 |
34 |
35 void G1BlockOffsetSharedArrayMappingChangedListener::on_commit(uint start_idx, size_t num_regions) { |
|
36 // Nothing to do. The BOT is hard-wired to be part of the HeapRegion, and we cannot |
|
37 // retrieve it here since this would cause firing of several asserts. The code |
|
38 // executed after commit of a region already needs to do some re-initialization of |
|
39 // the HeapRegion, so we combine that. |
|
40 } |
|
41 |
|
42 ////////////////////////////////////////////////////////////////////// |
35 ////////////////////////////////////////////////////////////////////// |
43 // G1BlockOffsetSharedArray |
36 // G1BlockOffsetSharedArray |
44 ////////////////////////////////////////////////////////////////////// |
37 ////////////////////////////////////////////////////////////////////// |
45 |
38 |
46 G1BlockOffsetSharedArray::G1BlockOffsetSharedArray(MemRegion heap, G1RegionToSpaceMapper* storage) : |
39 G1BlockOffsetSharedArray::G1BlockOffsetSharedArray(MemRegion heap, G1RegionToSpaceMapper* storage) : |
70 assert(p >= _reserved.start(), "just checking"); |
63 assert(p >= _reserved.start(), "just checking"); |
71 size_t delta = pointer_delta(p, _reserved.start()); |
64 size_t delta = pointer_delta(p, _reserved.start()); |
72 return (delta & right_n_bits(LogN_words)) == (size_t)NoBits; |
65 return (delta & right_n_bits(LogN_words)) == (size_t)NoBits; |
73 } |
66 } |
74 |
67 |
75 void G1BlockOffsetSharedArray::set_offset_array(HeapWord* left, HeapWord* right, u_char offset) { |
|
76 set_offset_array(index_for(left), index_for(right -1), offset); |
|
77 } |
|
78 |
|
79 ////////////////////////////////////////////////////////////////////// |
68 ////////////////////////////////////////////////////////////////////// |
80 // G1BlockOffsetArray |
69 // G1BlockOffsetArray |
81 ////////////////////////////////////////////////////////////////////// |
70 ////////////////////////////////////////////////////////////////////// |
82 |
71 |
83 G1BlockOffsetArray::G1BlockOffsetArray(G1BlockOffsetSharedArray* array, |
72 G1BlockOffsetArray::G1BlockOffsetArray(G1BlockOffsetSharedArray* array, |
84 MemRegion mr, bool init_to_zero) : |
73 MemRegion mr) : |
85 G1BlockOffsetTable(mr.start(), mr.end()), |
74 G1BlockOffsetTable(mr.start(), mr.end()), |
86 _unallocated_block(_bottom), |
75 _unallocated_block(_bottom), |
87 _array(array), _gsp(NULL), |
76 _array(array), _gsp(NULL) { |
88 _init_to_zero(init_to_zero) { |
|
89 assert(_bottom <= _end, "arguments out of order"); |
77 assert(_bottom <= _end, "arguments out of order"); |
90 if (!_init_to_zero) { |
|
91 // initialize cards to point back to mr.start() |
|
92 set_remainder_to_point_to_start(mr.start() + N_words, mr.end()); |
|
93 _array->set_offset_array(0, 0); // set first card to 0 |
|
94 } |
|
95 } |
78 } |
96 |
79 |
97 void G1BlockOffsetArray::set_space(G1OffsetTableContigSpace* sp) { |
80 void G1BlockOffsetArray::set_space(G1OffsetTableContigSpace* sp) { |
98 _gsp = sp; |
81 _gsp = sp; |
99 } |
82 } |
179 } |
162 } |
180 assert(start_card_for_region > end_card, "Sanity check"); |
163 assert(start_card_for_region > end_card, "Sanity check"); |
181 DEBUG_ONLY(check_all_cards(start_card, end_card);) |
164 DEBUG_ONLY(check_all_cards(start_card, end_card);) |
182 } |
165 } |
183 |
166 |
184 // The block [blk_start, blk_end) has been allocated; |
|
185 // adjust the block offset table to represent this information; |
|
186 // right-open interval: [blk_start, blk_end) |
|
187 void |
|
188 G1BlockOffsetArray::alloc_block(HeapWord* blk_start, HeapWord* blk_end) { |
|
189 mark_block(blk_start, blk_end); |
|
190 allocated(blk_start, blk_end); |
|
191 } |
|
192 |
|
193 // Adjust BOT to show that a previously whole block has been split |
|
194 // into two. |
|
195 void G1BlockOffsetArray::split_block(HeapWord* blk, size_t blk_size, |
|
196 size_t left_blk_size) { |
|
197 // Verify that the BOT shows [blk, blk + blk_size) to be one block. |
|
198 verify_single_block(blk, blk_size); |
|
199 // Update the BOT to indicate that [blk + left_blk_size, blk + blk_size) |
|
200 // is one single block. |
|
201 mark_block(blk + left_blk_size, blk + blk_size); |
|
202 } |
|
203 |
|
204 |
|
205 // Action_mark - update the BOT for the block [blk_start, blk_end). |
|
206 // Current typical use is for splitting a block. |
|
207 // Action_single - update the BOT for an allocation. |
|
208 // Action_verify - BOT verification. |
|
209 void G1BlockOffsetArray::do_block_internal(HeapWord* blk_start, |
|
210 HeapWord* blk_end, |
|
211 Action action) { |
|
212 assert(Universe::heap()->is_in_reserved(blk_start), |
|
213 "reference must be into the heap"); |
|
214 assert(Universe::heap()->is_in_reserved(blk_end-1), |
|
215 "limit must be within the heap"); |
|
216 // This is optimized to make the test fast, assuming we only rarely |
|
217 // cross boundaries. |
|
218 uintptr_t end_ui = (uintptr_t)(blk_end - 1); |
|
219 uintptr_t start_ui = (uintptr_t)blk_start; |
|
220 // Calculate the last card boundary preceding end of blk |
|
221 intptr_t boundary_before_end = (intptr_t)end_ui; |
|
222 clear_bits(boundary_before_end, right_n_bits(LogN)); |
|
223 if (start_ui <= (uintptr_t)boundary_before_end) { |
|
224 // blk starts at or crosses a boundary |
|
225 // Calculate index of card on which blk begins |
|
226 size_t start_index = _array->index_for(blk_start); |
|
227 // Index of card on which blk ends |
|
228 size_t end_index = _array->index_for(blk_end - 1); |
|
229 // Start address of card on which blk begins |
|
230 HeapWord* boundary = _array->address_for_index(start_index); |
|
231 assert(boundary <= blk_start, "blk should start at or after boundary"); |
|
232 if (blk_start != boundary) { |
|
233 // blk starts strictly after boundary |
|
234 // adjust card boundary and start_index forward to next card |
|
235 boundary += N_words; |
|
236 start_index++; |
|
237 } |
|
238 assert(start_index <= end_index, "monotonicity of index_for()"); |
|
239 assert(boundary <= (HeapWord*)boundary_before_end, "tautology"); |
|
240 switch (action) { |
|
241 case Action_mark: { |
|
242 if (init_to_zero()) { |
|
243 _array->set_offset_array(start_index, boundary, blk_start); |
|
244 break; |
|
245 } // Else fall through to the next case |
|
246 } |
|
247 case Action_single: { |
|
248 _array->set_offset_array(start_index, boundary, blk_start); |
|
249 // We have finished marking the "offset card". We need to now |
|
250 // mark the subsequent cards that this blk spans. |
|
251 if (start_index < end_index) { |
|
252 HeapWord* rem_st = _array->address_for_index(start_index) + N_words; |
|
253 HeapWord* rem_end = _array->address_for_index(end_index) + N_words; |
|
254 set_remainder_to_point_to_start(rem_st, rem_end); |
|
255 } |
|
256 break; |
|
257 } |
|
258 case Action_check: { |
|
259 _array->check_offset_array(start_index, boundary, blk_start); |
|
260 // We have finished checking the "offset card". We need to now |
|
261 // check the subsequent cards that this blk spans. |
|
262 check_all_cards(start_index + 1, end_index); |
|
263 break; |
|
264 } |
|
265 default: |
|
266 ShouldNotReachHere(); |
|
267 } |
|
268 } |
|
269 } |
|
270 |
|
271 // The card-interval [start_card, end_card] is a closed interval; this |
167 // The card-interval [start_card, end_card] is a closed interval; this |
272 // is an expensive check -- use with care and only under protection of |
168 // is an expensive check -- use with care and only under protection of |
273 // suitable flag. |
169 // suitable flag. |
274 void G1BlockOffsetArray::check_all_cards(size_t start_card, size_t end_card) const { |
170 void G1BlockOffsetArray::check_all_cards(size_t start_card, size_t end_card) const { |
275 |
171 |
304 _array->offset_array(landing_card), N_words)); |
200 _array->offset_array(landing_card), N_words)); |
305 } |
201 } |
306 } |
202 } |
307 } |
203 } |
308 |
204 |
309 // The range [blk_start, blk_end) represents a single contiguous block |
|
310 // of storage; modify the block offset table to represent this |
|
311 // information; Right-open interval: [blk_start, blk_end) |
|
312 // NOTE: this method does _not_ adjust _unallocated_block. |
|
313 void |
|
314 G1BlockOffsetArray::single_block(HeapWord* blk_start, HeapWord* blk_end) { |
|
315 do_block_internal(blk_start, blk_end, Action_single); |
|
316 } |
|
317 |
|
318 // Mark the BOT such that if [blk_start, blk_end) straddles a card |
|
319 // boundary, the card following the first such boundary is marked |
|
320 // with the appropriate offset. |
|
321 // NOTE: this method does _not_ adjust _unallocated_block or |
|
322 // any cards subsequent to the first one. |
|
323 void |
|
324 G1BlockOffsetArray::mark_block(HeapWord* blk_start, HeapWord* blk_end) { |
|
325 do_block_internal(blk_start, blk_end, Action_mark); |
|
326 } |
|
327 |
|
328 HeapWord* G1BlockOffsetArray::block_start_unsafe(const void* addr) { |
205 HeapWord* G1BlockOffsetArray::block_start_unsafe(const void* addr) { |
329 assert(_bottom <= addr && addr < _end, |
206 assert(_bottom <= addr && addr < _end, |
330 "addr must be covered by this Array"); |
207 "addr must be covered by this Array"); |
331 // Must read this exactly once because it can be modified by parallel |
208 // Must read this exactly once because it can be modified by parallel |
332 // allocation. |
209 // allocation. |
395 alloc_block_work2(&next_boundary, &next_index, q, n); |
272 alloc_block_work2(&next_boundary, &next_index, q, n); |
396 } |
273 } |
397 return forward_to_block_containing_addr_const(q, n, addr); |
274 return forward_to_block_containing_addr_const(q, n, addr); |
398 } |
275 } |
399 |
276 |
400 HeapWord* G1BlockOffsetArray::block_start_careful(const void* addr) const { |
|
401 assert(_array->offset_array(0) == 0, "objects can't cross covered areas"); |
|
402 |
|
403 assert(_bottom <= addr && addr < _end, |
|
404 "addr must be covered by this Array"); |
|
405 // Must read this exactly once because it can be modified by parallel |
|
406 // allocation. |
|
407 HeapWord* ub = _unallocated_block; |
|
408 if (BlockOffsetArrayUseUnallocatedBlock && addr >= ub) { |
|
409 assert(ub < _end, "tautology (see above)"); |
|
410 return ub; |
|
411 } |
|
412 |
|
413 // Otherwise, find the block start using the table, but taking |
|
414 // care (cf block_start_unsafe() above) not to parse any objects/blocks |
|
415 // on the cards themsleves. |
|
416 size_t index = _array->index_for(addr); |
|
417 assert(_array->address_for_index(index) == addr, |
|
418 "arg should be start of card"); |
|
419 |
|
420 HeapWord* q = (HeapWord*)addr; |
|
421 uint offset; |
|
422 do { |
|
423 offset = _array->offset_array(index--); |
|
424 q -= offset; |
|
425 } while (offset == N_words); |
|
426 assert(q <= addr, "block start should be to left of arg"); |
|
427 return q; |
|
428 } |
|
429 |
|
430 // Note that the committed size of the covered space may have changed, |
277 // Note that the committed size of the covered space may have changed, |
431 // so the table size might also wish to change. |
278 // so the table size might also wish to change. |
432 void G1BlockOffsetArray::resize(size_t new_word_size) { |
279 void G1BlockOffsetArray::resize(size_t new_word_size) { |
433 HeapWord* new_end = _bottom + new_word_size; |
280 HeapWord* new_end = _bottom + new_word_size; |
434 if (_end < new_end && !init_to_zero()) { |
|
435 // verify that the old and new boundaries are also card boundaries |
|
436 assert(_array->is_card_boundary(_end), |
|
437 "_end not a card boundary"); |
|
438 assert(_array->is_card_boundary(new_end), |
|
439 "new _end would not be a card boundary"); |
|
440 // set all the newly added cards |
|
441 _array->set_offset_array(_end, new_end, N_words); |
|
442 } |
|
443 _end = new_end; // update _end |
281 _end = new_end; // update _end |
444 } |
|
445 |
|
446 void G1BlockOffsetArray::set_region(MemRegion mr) { |
|
447 _bottom = mr.start(); |
|
448 _end = mr.end(); |
|
449 } |
282 } |
450 |
283 |
451 // |
284 // |
452 // threshold_ |
285 // threshold_ |
453 // | _index_ |
286 // | _index_ |
640 _next_offset_threshold = |
473 _next_offset_threshold = |
641 _array->address_for_index(_next_offset_index); |
474 _array->address_for_index(_next_offset_index); |
642 return _next_offset_threshold; |
475 return _next_offset_threshold; |
643 } |
476 } |
644 |
477 |
645 void G1BlockOffsetArrayContigSpace::zero_bottom_entry() { |
|
646 assert(!Universe::heap()->is_in_reserved(_array->_offset_array), |
|
647 "just checking"); |
|
648 size_t bottom_index = _array->index_for(_bottom); |
|
649 assert(_array->address_for_index(bottom_index) == _bottom, |
|
650 "Precondition of call"); |
|
651 _array->set_offset_array(bottom_index, 0); |
|
652 } |
|
653 |
|
654 void |
478 void |
655 G1BlockOffsetArrayContigSpace::set_for_starts_humongous(HeapWord* new_top) { |
479 G1BlockOffsetArrayContigSpace::set_for_starts_humongous(HeapWord* new_top) { |
656 assert(new_top <= _end, "_end should have already been updated"); |
480 assert(new_top <= _end, "_end should have already been updated"); |
657 |
481 |
658 // The first BOT entry should have offset 0. |
482 // The first BOT entry should have offset 0. |