Tue, 21 Aug 2012 14:10:39 -0700
7185699: G1: Prediction model discrepancies
Summary: Correct the result value of G1CollectedHeap::pending_card_num(). Change the code that calculates the GC efficiency of a non-young heap region to use historical data from mixed GCs and the actual number of live bytes when predicting how long it would take to collect the region. Changes were also reviewed by Thomas Schatzl.
Reviewed-by: azeemj, brutisso
1 /*
2 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP
26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP
28 #include "gc_implementation/g1/concurrentMark.hpp"
29 #include "gc_implementation/g1/g1CollectedHeap.hpp"
30 #include "gc_implementation/g1/g1AllocRegion.inline.hpp"
31 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
32 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
33 #include "utilities/taskqueue.hpp"
35 // Inline functions for G1CollectedHeap
37 template <class T>
38 inline HeapRegion*
39 G1CollectedHeap::heap_region_containing(const T addr) const {
40 HeapRegion* hr = _hrs.addr_to_region((HeapWord*) addr);
41 // hr can be null if addr in perm_gen
42 if (hr != NULL && hr->continuesHumongous()) {
43 hr = hr->humongous_start_region();
44 }
45 return hr;
46 }
48 template <class T>
49 inline HeapRegion*
50 G1CollectedHeap::heap_region_containing_raw(const T addr) const {
51 assert(_g1_reserved.contains((const void*) addr), "invariant");
52 HeapRegion* res = _hrs.addr_to_region_unsafe((HeapWord*) addr);
53 return res;
54 }
56 inline bool G1CollectedHeap::obj_in_cs(oop obj) {
57 HeapRegion* r = _hrs.addr_to_region((HeapWord*) obj);
58 return r != NULL && r->in_collection_set();
59 }
61 inline HeapWord*
62 G1CollectedHeap::attempt_allocation(size_t word_size,
63 unsigned int* gc_count_before_ret) {
64 assert_heap_not_locked_and_not_at_safepoint();
65 assert(!isHumongous(word_size), "attempt_allocation() should not "
66 "be called for humongous allocation requests");
68 HeapWord* result = _mutator_alloc_region.attempt_allocation(word_size,
69 false /* bot_updates */);
70 if (result == NULL) {
71 result = attempt_allocation_slow(word_size, gc_count_before_ret);
72 }
73 assert_heap_not_locked();
74 if (result != NULL) {
75 dirty_young_block(result, word_size);
76 }
77 return result;
78 }
80 inline HeapWord* G1CollectedHeap::survivor_attempt_allocation(size_t
81 word_size) {
82 assert(!isHumongous(word_size),
83 "we should not be seeing humongous-size allocations in this path");
85 HeapWord* result = _survivor_gc_alloc_region.attempt_allocation(word_size,
86 false /* bot_updates */);
87 if (result == NULL) {
88 MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
89 result = _survivor_gc_alloc_region.attempt_allocation_locked(word_size,
90 false /* bot_updates */);
91 }
92 if (result != NULL) {
93 dirty_young_block(result, word_size);
94 }
95 return result;
96 }
98 inline HeapWord* G1CollectedHeap::old_attempt_allocation(size_t word_size) {
99 assert(!isHumongous(word_size),
100 "we should not be seeing humongous-size allocations in this path");
102 HeapWord* result = _old_gc_alloc_region.attempt_allocation(word_size,
103 true /* bot_updates */);
104 if (result == NULL) {
105 MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
106 result = _old_gc_alloc_region.attempt_allocation_locked(word_size,
107 true /* bot_updates */);
108 }
109 return result;
110 }
112 // It dirties the cards that cover the block so that so that the post
113 // write barrier never queues anything when updating objects on this
114 // block. It is assumed (and in fact we assert) that the block
115 // belongs to a young region.
116 inline void
117 G1CollectedHeap::dirty_young_block(HeapWord* start, size_t word_size) {
118 assert_heap_not_locked();
120 // Assign the containing region to containing_hr so that we don't
121 // have to keep calling heap_region_containing_raw() in the
122 // asserts below.
123 DEBUG_ONLY(HeapRegion* containing_hr = heap_region_containing_raw(start);)
124 assert(containing_hr != NULL && start != NULL && word_size > 0,
125 "pre-condition");
126 assert(containing_hr->is_in(start), "it should contain start");
127 assert(containing_hr->is_young(), "it should be young");
128 assert(!containing_hr->isHumongous(), "it should not be humongous");
130 HeapWord* end = start + word_size;
131 assert(containing_hr->is_in(end - 1), "it should also contain end - 1");
133 MemRegion mr(start, end);
134 ((CardTableModRefBS*)_g1h->barrier_set())->dirty(mr);
135 }
137 inline RefToScanQueue* G1CollectedHeap::task_queue(int i) const {
138 return _task_queues->queue(i);
139 }
141 inline bool G1CollectedHeap::isMarkedPrev(oop obj) const {
142 return _cm->prevMarkBitMap()->isMarked((HeapWord *)obj);
143 }
145 inline bool G1CollectedHeap::isMarkedNext(oop obj) const {
146 return _cm->nextMarkBitMap()->isMarked((HeapWord *)obj);
147 }
149 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP