Thu, 21 Aug 2014 11:47:10 +0200
8038423: G1: Decommit memory within heap
Summary: Allow G1 to decommit memory of arbitrary regions within the heap and their associated auxiliary data structures card table, BOT, hot card cache, and mark bitmaps.
Reviewed-by: mgerdin, brutisso, jwilhelm
1 /*
2 * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_INLINE_HPP
26 #define SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_INLINE_HPP
28 #include "gc_implementation/g1/concurrentMark.hpp"
29 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
31 // Utility routine to set an exclusive range of cards on the given
32 // card liveness bitmap
33 inline void ConcurrentMark::set_card_bitmap_range(BitMap* card_bm,
34 BitMap::idx_t start_idx,
35 BitMap::idx_t end_idx,
36 bool is_par) {
38 // Set the exclusive bit range [start_idx, end_idx).
39 assert((end_idx - start_idx) > 0, "at least one card");
40 assert(end_idx <= card_bm->size(), "sanity");
42 // Silently clip the end index
43 end_idx = MIN2(end_idx, card_bm->size());
45 // For small ranges use a simple loop; otherwise use set_range or
46 // use par_at_put_range (if parallel). The range is made up of the
47 // cards that are spanned by an object/mem region so 8 cards will
48 // allow up to object sizes up to 4K to be handled using the loop.
49 if ((end_idx - start_idx) <= 8) {
50 for (BitMap::idx_t i = start_idx; i < end_idx; i += 1) {
51 if (is_par) {
52 card_bm->par_set_bit(i);
53 } else {
54 card_bm->set_bit(i);
55 }
56 }
57 } else {
58 // Note BitMap::par_at_put_range() and BitMap::set_range() are exclusive.
59 if (is_par) {
60 card_bm->par_at_put_range(start_idx, end_idx, true);
61 } else {
62 card_bm->set_range(start_idx, end_idx);
63 }
64 }
65 }
67 // Returns the index in the liveness accounting card bitmap
68 // for the given address
69 inline BitMap::idx_t ConcurrentMark::card_bitmap_index_for(HeapWord* addr) {
70 // Below, the term "card num" means the result of shifting an address
71 // by the card shift -- address 0 corresponds to card number 0. One
72 // must subtract the card num of the bottom of the heap to obtain a
73 // card table index.
74 intptr_t card_num = intptr_t(uintptr_t(addr) >> CardTableModRefBS::card_shift);
75 return card_num - heap_bottom_card_num();
76 }
78 // Counts the given memory region in the given task/worker
79 // counting data structures.
80 inline void ConcurrentMark::count_region(MemRegion mr, HeapRegion* hr,
81 size_t* marked_bytes_array,
82 BitMap* task_card_bm) {
83 G1CollectedHeap* g1h = _g1h;
84 CardTableModRefBS* ct_bs = g1h->g1_barrier_set();
86 HeapWord* start = mr.start();
87 HeapWord* end = mr.end();
88 size_t region_size_bytes = mr.byte_size();
89 uint index = hr->hrs_index();
91 assert(!hr->continuesHumongous(), "should not be HC region");
92 assert(hr == g1h->heap_region_containing(start), "sanity");
93 assert(hr == g1h->heap_region_containing(mr.last()), "sanity");
94 assert(marked_bytes_array != NULL, "pre-condition");
95 assert(task_card_bm != NULL, "pre-condition");
97 // Add to the task local marked bytes for this region.
98 marked_bytes_array[index] += region_size_bytes;
100 BitMap::idx_t start_idx = card_bitmap_index_for(start);
101 BitMap::idx_t end_idx = card_bitmap_index_for(end);
103 // Note: if we're looking at the last region in heap - end
104 // could be actually just beyond the end of the heap; end_idx
105 // will then correspond to a (non-existent) card that is also
106 // just beyond the heap.
107 if (g1h->is_in_g1_reserved(end) && !ct_bs->is_card_aligned(end)) {
108 // end of region is not card aligned - incremement to cover
109 // all the cards spanned by the region.
110 end_idx += 1;
111 }
112 // The card bitmap is task/worker specific => no need to use
113 // the 'par' BitMap routines.
114 // Set bits in the exclusive bit range [start_idx, end_idx).
115 set_card_bitmap_range(task_card_bm, start_idx, end_idx, false /* is_par */);
116 }
118 // Counts the given memory region in the task/worker counting
119 // data structures for the given worker id.
120 inline void ConcurrentMark::count_region(MemRegion mr,
121 HeapRegion* hr,
122 uint worker_id) {
123 size_t* marked_bytes_array = count_marked_bytes_array_for(worker_id);
124 BitMap* task_card_bm = count_card_bitmap_for(worker_id);
125 count_region(mr, hr, marked_bytes_array, task_card_bm);
126 }
128 // Counts the given memory region, which may be a single object, in the
129 // task/worker counting data structures for the given worker id.
130 inline void ConcurrentMark::count_region(MemRegion mr, uint worker_id) {
131 HeapWord* addr = mr.start();
132 HeapRegion* hr = _g1h->heap_region_containing_raw(addr);
133 count_region(mr, hr, worker_id);
134 }
136 // Counts the given object in the given task/worker counting data structures.
137 inline void ConcurrentMark::count_object(oop obj,
138 HeapRegion* hr,
139 size_t* marked_bytes_array,
140 BitMap* task_card_bm) {
141 MemRegion mr((HeapWord*)obj, obj->size());
142 count_region(mr, hr, marked_bytes_array, task_card_bm);
143 }
145 // Counts the given object in the task/worker counting data
146 // structures for the given worker id.
147 inline void ConcurrentMark::count_object(oop obj,
148 HeapRegion* hr,
149 uint worker_id) {
150 size_t* marked_bytes_array = count_marked_bytes_array_for(worker_id);
151 BitMap* task_card_bm = count_card_bitmap_for(worker_id);
152 HeapWord* addr = (HeapWord*) obj;
153 count_object(obj, hr, marked_bytes_array, task_card_bm);
154 }
156 // Attempts to mark the given object and, if successful, counts
157 // the object in the given task/worker counting structures.
158 inline bool ConcurrentMark::par_mark_and_count(oop obj,
159 HeapRegion* hr,
160 size_t* marked_bytes_array,
161 BitMap* task_card_bm) {
162 HeapWord* addr = (HeapWord*)obj;
163 if (_nextMarkBitMap->parMark(addr)) {
164 // Update the task specific count data for the object.
165 count_object(obj, hr, marked_bytes_array, task_card_bm);
166 return true;
167 }
168 return false;
169 }
171 // Attempts to mark the given object and, if successful, counts
172 // the object in the task/worker counting structures for the
173 // given worker id.
174 inline bool ConcurrentMark::par_mark_and_count(oop obj,
175 size_t word_size,
176 HeapRegion* hr,
177 uint worker_id) {
178 HeapWord* addr = (HeapWord*)obj;
179 if (_nextMarkBitMap->parMark(addr)) {
180 MemRegion mr(addr, word_size);
181 count_region(mr, hr, worker_id);
182 return true;
183 }
184 return false;
185 }
187 // Attempts to mark the given object and, if successful, counts
188 // the object in the task/worker counting structures for the
189 // given worker id.
190 inline bool ConcurrentMark::par_mark_and_count(oop obj,
191 HeapRegion* hr,
192 uint worker_id) {
193 HeapWord* addr = (HeapWord*)obj;
194 if (_nextMarkBitMap->parMark(addr)) {
195 // Update the task specific count data for the object.
196 count_object(obj, hr, worker_id);
197 return true;
198 }
199 return false;
200 }
202 // As above - but we don't know the heap region containing the
203 // object and so have to supply it.
204 inline bool ConcurrentMark::par_mark_and_count(oop obj, uint worker_id) {
205 HeapWord* addr = (HeapWord*)obj;
206 HeapRegion* hr = _g1h->heap_region_containing_raw(addr);
207 return par_mark_and_count(obj, hr, worker_id);
208 }
210 // Similar to the above routine but we already know the size, in words, of
211 // the object that we wish to mark/count
212 inline bool ConcurrentMark::par_mark_and_count(oop obj,
213 size_t word_size,
214 uint worker_id) {
215 HeapWord* addr = (HeapWord*)obj;
216 if (_nextMarkBitMap->parMark(addr)) {
217 // Update the task specific count data for the object.
218 MemRegion mr(addr, word_size);
219 count_region(mr, worker_id);
220 return true;
221 }
222 return false;
223 }
225 // Unconditionally mark the given object, and unconditinally count
226 // the object in the counting structures for worker id 0.
227 // Should *not* be called from parallel code.
228 inline bool ConcurrentMark::mark_and_count(oop obj, HeapRegion* hr) {
229 HeapWord* addr = (HeapWord*)obj;
230 _nextMarkBitMap->mark(addr);
231 // Update the task specific count data for the object.
232 count_object(obj, hr, 0 /* worker_id */);
233 return true;
234 }
236 // As above - but we don't have the heap region containing the
237 // object, so we have to supply it.
238 inline bool ConcurrentMark::mark_and_count(oop obj) {
239 HeapWord* addr = (HeapWord*)obj;
240 HeapRegion* hr = _g1h->heap_region_containing_raw(addr);
241 return mark_and_count(obj, hr);
242 }
244 inline bool CMBitMapRO::iterate(BitMapClosure* cl, MemRegion mr) {
245 HeapWord* start_addr = MAX2(startWord(), mr.start());
246 HeapWord* end_addr = MIN2(endWord(), mr.end());
248 if (end_addr > start_addr) {
249 // Right-open interval [start-offset, end-offset).
250 BitMap::idx_t start_offset = heapWordToOffset(start_addr);
251 BitMap::idx_t end_offset = heapWordToOffset(end_addr);
253 start_offset = _bm.get_next_one_offset(start_offset, end_offset);
254 while (start_offset < end_offset) {
255 if (!cl->do_bit(start_offset)) {
256 return false;
257 }
258 HeapWord* next_addr = MIN2(nextObject(offsetToHeapWord(start_offset)), end_addr);
259 BitMap::idx_t next_offset = heapWordToOffset(next_addr);
260 start_offset = _bm.get_next_one_offset(next_offset, end_offset);
261 }
262 }
263 return true;
264 }
266 inline bool CMBitMapRO::iterate(BitMapClosure* cl) {
267 MemRegion mr(startWord(), sizeInWords());
268 return iterate(cl, mr);
269 }
271 #define check_mark(addr) \
272 assert(_bmStartWord <= (addr) && (addr) < (_bmStartWord + _bmWordSize), \
273 "outside underlying space?"); \
274 assert(G1CollectedHeap::heap()->is_in_exact(addr), \
275 err_msg("Trying to access not available bitmap "PTR_FORMAT \
276 " corresponding to "PTR_FORMAT" (%u)", \
277 p2i(this), p2i(addr), G1CollectedHeap::heap()->addr_to_region(addr)));
279 inline void CMBitMap::mark(HeapWord* addr) {
280 check_mark(addr);
281 _bm.set_bit(heapWordToOffset(addr));
282 }
284 inline void CMBitMap::clear(HeapWord* addr) {
285 check_mark(addr);
286 _bm.clear_bit(heapWordToOffset(addr));
287 }
289 inline bool CMBitMap::parMark(HeapWord* addr) {
290 check_mark(addr);
291 return _bm.par_set_bit(heapWordToOffset(addr));
292 }
294 inline bool CMBitMap::parClear(HeapWord* addr) {
295 check_mark(addr);
296 return _bm.par_clear_bit(heapWordToOffset(addr));
297 }
299 #undef check_mark
301 inline void CMTask::push(oop obj) {
302 HeapWord* objAddr = (HeapWord*) obj;
303 assert(_g1h->is_in_g1_reserved(objAddr), "invariant");
304 assert(!_g1h->is_on_master_free_list(
305 _g1h->heap_region_containing((HeapWord*) objAddr)), "invariant");
306 assert(!_g1h->is_obj_ill(obj), "invariant");
307 assert(_nextMarkBitMap->isMarked(objAddr), "invariant");
309 if (_cm->verbose_high()) {
310 gclog_or_tty->print_cr("[%u] pushing " PTR_FORMAT, _worker_id, p2i((void*) obj));
311 }
313 if (!_task_queue->push(obj)) {
314 // The local task queue looks full. We need to push some entries
315 // to the global stack.
317 if (_cm->verbose_medium()) {
318 gclog_or_tty->print_cr("[%u] task queue overflow, "
319 "moving entries to the global stack",
320 _worker_id);
321 }
322 move_entries_to_global_stack();
324 // this should succeed since, even if we overflow the global
325 // stack, we should have definitely removed some entries from the
326 // local queue. So, there must be space on it.
327 bool success = _task_queue->push(obj);
328 assert(success, "invariant");
329 }
331 statsOnly( int tmp_size = _task_queue->size();
332 if (tmp_size > _local_max_size) {
333 _local_max_size = tmp_size;
334 }
335 ++_local_pushes );
336 }
338 // This determines whether the method below will check both the local
339 // and global fingers when determining whether to push on the stack a
340 // gray object (value 1) or whether it will only check the global one
341 // (value 0). The tradeoffs are that the former will be a bit more
342 // accurate and possibly push less on the stack, but it might also be
343 // a little bit slower.
345 #define _CHECK_BOTH_FINGERS_ 1
347 inline void CMTask::deal_with_reference(oop obj) {
348 if (_cm->verbose_high()) {
349 gclog_or_tty->print_cr("[%u] we're dealing with reference = "PTR_FORMAT,
350 _worker_id, p2i((void*) obj));
351 }
353 ++_refs_reached;
355 HeapWord* objAddr = (HeapWord*) obj;
356 assert(obj->is_oop_or_null(true /* ignore mark word */), "Error");
357 if (_g1h->is_in_g1_reserved(objAddr)) {
358 assert(obj != NULL, "null check is implicit");
359 if (!_nextMarkBitMap->isMarked(objAddr)) {
360 // Only get the containing region if the object is not marked on the
361 // bitmap (otherwise, it's a waste of time since we won't do
362 // anything with it).
363 HeapRegion* hr = _g1h->heap_region_containing_raw(obj);
364 if (!hr->obj_allocated_since_next_marking(obj)) {
365 if (_cm->verbose_high()) {
366 gclog_or_tty->print_cr("[%u] "PTR_FORMAT" is not considered marked",
367 _worker_id, p2i((void*) obj));
368 }
370 // we need to mark it first
371 if (_cm->par_mark_and_count(obj, hr, _marked_bytes_array, _card_bm)) {
372 // No OrderAccess:store_load() is needed. It is implicit in the
373 // CAS done in CMBitMap::parMark() call in the routine above.
374 HeapWord* global_finger = _cm->finger();
376 #if _CHECK_BOTH_FINGERS_
377 // we will check both the local and global fingers
379 if (_finger != NULL && objAddr < _finger) {
380 if (_cm->verbose_high()) {
381 gclog_or_tty->print_cr("[%u] below the local finger ("PTR_FORMAT"), "
382 "pushing it", _worker_id, p2i(_finger));
383 }
384 push(obj);
385 } else if (_curr_region != NULL && objAddr < _region_limit) {
386 // do nothing
387 } else if (objAddr < global_finger) {
388 // Notice that the global finger might be moving forward
389 // concurrently. This is not a problem. In the worst case, we
390 // mark the object while it is above the global finger and, by
391 // the time we read the global finger, it has moved forward
392 // passed this object. In this case, the object will probably
393 // be visited when a task is scanning the region and will also
394 // be pushed on the stack. So, some duplicate work, but no
395 // correctness problems.
397 if (_cm->verbose_high()) {
398 gclog_or_tty->print_cr("[%u] below the global finger "
399 "("PTR_FORMAT"), pushing it",
400 _worker_id, p2i(global_finger));
401 }
402 push(obj);
403 } else {
404 // do nothing
405 }
406 #else // _CHECK_BOTH_FINGERS_
407 // we will only check the global finger
409 if (objAddr < global_finger) {
410 // see long comment above
412 if (_cm->verbose_high()) {
413 gclog_or_tty->print_cr("[%u] below the global finger "
414 "("PTR_FORMAT"), pushing it",
415 _worker_id, p2i(global_finger));
416 }
417 push(obj);
418 }
419 #endif // _CHECK_BOTH_FINGERS_
420 }
421 }
422 }
423 }
424 }
426 inline void ConcurrentMark::markPrev(oop p) {
427 assert(!_prevMarkBitMap->isMarked((HeapWord*) p), "sanity");
428 // Note we are overriding the read-only view of the prev map here, via
429 // the cast.
430 ((CMBitMap*)_prevMarkBitMap)->mark((HeapWord*) p);
431 }
433 inline void ConcurrentMark::grayRoot(oop obj, size_t word_size,
434 uint worker_id, HeapRegion* hr) {
435 assert(obj != NULL, "pre-condition");
436 HeapWord* addr = (HeapWord*) obj;
437 if (hr == NULL) {
438 hr = _g1h->heap_region_containing_raw(addr);
439 } else {
440 assert(hr->is_in(addr), "pre-condition");
441 }
442 assert(hr != NULL, "sanity");
443 // Given that we're looking for a region that contains an object
444 // header it's impossible to get back a HC region.
445 assert(!hr->continuesHumongous(), "sanity");
447 // We cannot assert that word_size == obj->size() given that obj
448 // might not be in a consistent state (another thread might be in
449 // the process of copying it). So the best thing we can do is to
450 // assert that word_size is under an upper bound which is its
451 // containing region's capacity.
452 assert(word_size * HeapWordSize <= hr->capacity(),
453 err_msg("size: "SIZE_FORMAT" capacity: "SIZE_FORMAT" "HR_FORMAT,
454 word_size * HeapWordSize, hr->capacity(),
455 HR_FORMAT_PARAMS(hr)));
457 if (addr < hr->next_top_at_mark_start()) {
458 if (!_nextMarkBitMap->isMarked(addr)) {
459 par_mark_and_count(obj, word_size, hr, worker_id);
460 }
461 }
462 }
464 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_INLINE_HPP