src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp

Fri, 10 Oct 2014 15:51:58 +0200

author
tschatzl
date
Fri, 10 Oct 2014 15:51:58 +0200
changeset 7257
e7d0505c8a30
parent 7159
e5668dcf12e9
child 7535
7ae4e26cb1e0
child 7651
c132be0fb74d
permissions
-rw-r--r--

8059758: Footprint regressions with JDK-8038423
Summary: Changes in JDK-8038423 always initialize (zero out) virtual memory used for auxiliary data structures. This causes a footprint regression for G1 in startup benchmarks. This is because they do not touch that memory at all, so the operating system does not actually commit these pages. The fix is to, if the initialization value of the data structures matches the default value of just committed memory (=0), do not do anything.
Reviewed-by: jwilhelm, brutisso

ysr@777 1 /*
drchase@6680 2 * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
ysr@777 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
ysr@777 4 *
ysr@777 5 * This code is free software; you can redistribute it and/or modify it
ysr@777 6 * under the terms of the GNU General Public License version 2 only, as
ysr@777 7 * published by the Free Software Foundation.
ysr@777 8 *
ysr@777 9 * This code is distributed in the hope that it will be useful, but WITHOUT
ysr@777 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
ysr@777 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
ysr@777 12 * version 2 for more details (a copy is included in the LICENSE file that
ysr@777 13 * accompanied this code).
ysr@777 14 *
ysr@777 15 * You should have received a copy of the GNU General Public License version
ysr@777 16 * 2 along with this work; if not, write to the Free Software Foundation,
ysr@777 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
ysr@777 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
ysr@777 22 *
ysr@777 23 */
ysr@777 24
stefank@2314 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP
stefank@2314 26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP
stefank@2314 27
stefank@2314 28 #include "gc_implementation/g1/concurrentMark.hpp"
stefank@2314 29 #include "gc_implementation/g1/g1CollectedHeap.hpp"
tonyp@2715 30 #include "gc_implementation/g1/g1AllocRegion.inline.hpp"
tonyp@2315 31 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
mgerdin@5860 32 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
tschatzl@7091 33 #include "gc_implementation/g1/heapRegionManager.inline.hpp"
brutisso@6385 34 #include "gc_implementation/g1/heapRegionSet.inline.hpp"
goetz@6911 35 #include "runtime/orderAccess.inline.hpp"
stefank@2314 36 #include "utilities/taskqueue.hpp"
stefank@2314 37
ysr@777 38 // Inline functions for G1CollectedHeap
ysr@777 39
jcoomes@7159 40 inline AllocationContextStats& G1CollectedHeap::allocation_context_stats() {
jcoomes@7159 41 return _allocation_context_stats;
jcoomes@7159 42 }
jcoomes@7159 43
tschatzl@6541 44 // Return the region with the given index. It assumes the index is valid.
tschatzl@7091 45 inline HeapRegion* G1CollectedHeap::region_at(uint index) const { return _hrm.at(index); }
tschatzl@6541 46
tschatzl@7010 47 inline uint G1CollectedHeap::addr_to_region(HeapWord* addr) const {
tschatzl@7010 48 assert(is_in_reserved(addr),
tschatzl@7010 49 err_msg("Cannot calculate region index for address "PTR_FORMAT" that is outside of the heap ["PTR_FORMAT", "PTR_FORMAT")",
tschatzl@7010 50 p2i(addr), p2i(_reserved.start()), p2i(_reserved.end())));
tschatzl@7010 51 return (uint)(pointer_delta(addr, _reserved.start(), sizeof(uint8_t)) >> HeapRegion::LogOfHRGrainBytes);
tschatzl@7010 52 }
tschatzl@7010 53
tschatzl@7050 54 inline HeapWord* G1CollectedHeap::bottom_addr_for_region(uint index) const {
tschatzl@7091 55 return _hrm.reserved().start() + index * HeapRegion::GrainWords;
tschatzl@7050 56 }
tschatzl@7050 57
tonyp@2963 58 template <class T>
tschatzl@7050 59 inline HeapRegion* G1CollectedHeap::heap_region_containing_raw(const T addr) const {
brutisso@7049 60 assert(addr != NULL, "invariant");
tschatzl@7050 61 assert(is_in_g1_reserved((const void*) addr),
brutisso@7049 62 err_msg("Address "PTR_FORMAT" is outside of the heap ranging from ["PTR_FORMAT" to "PTR_FORMAT")",
tschatzl@7050 63 p2i((void*)addr), p2i(g1_reserved().start()), p2i(g1_reserved().end())));
tschatzl@7091 64 return _hrm.addr_to_region((HeapWord*) addr);
ysr@777 65 }
ysr@777 66
tonyp@2963 67 template <class T>
tschatzl@7050 68 inline HeapRegion* G1CollectedHeap::heap_region_containing(const T addr) const {
brutisso@7049 69 HeapRegion* hr = heap_region_containing_raw(addr);
brutisso@7049 70 if (hr->continuesHumongous()) {
brutisso@7049 71 return hr->humongous_start_region();
brutisso@7049 72 }
brutisso@7049 73 return hr;
ysr@777 74 }
ysr@777 75
goetz@6911 76 inline void G1CollectedHeap::reset_gc_time_stamp() {
goetz@6911 77 _gc_time_stamp = 0;
goetz@6911 78 OrderAccess::fence();
goetz@6911 79 // Clear the cached CSet starting regions and time stamps.
goetz@6911 80 // Their validity is dependent on the GC timestamp.
goetz@6911 81 clear_cset_start_regions();
goetz@6911 82 }
goetz@6911 83
goetz@6911 84 inline void G1CollectedHeap::increment_gc_time_stamp() {
goetz@6911 85 ++_gc_time_stamp;
goetz@6911 86 OrderAccess::fence();
goetz@6911 87 }
goetz@6911 88
tschatzl@6541 89 inline void G1CollectedHeap::old_set_remove(HeapRegion* hr) {
tschatzl@6541 90 _old_set.remove(hr);
tschatzl@6541 91 }
tschatzl@6541 92
ysr@777 93 inline bool G1CollectedHeap::obj_in_cs(oop obj) {
tschatzl@7091 94 HeapRegion* r = _hrm.addr_to_region((HeapWord*) obj);
ysr@777 95 return r != NULL && r->in_collection_set();
ysr@777 96 }
ysr@777 97
tschatzl@7050 98 inline HeapWord* G1CollectedHeap::attempt_allocation(size_t word_size,
tschatzl@7050 99 unsigned int* gc_count_before_ret,
tschatzl@7050 100 int* gclocker_retry_count_ret) {
tonyp@2715 101 assert_heap_not_locked_and_not_at_safepoint();
tonyp@2715 102 assert(!isHumongous(word_size), "attempt_allocation() should not "
tonyp@2715 103 "be called for humongous allocation requests");
ysr@777 104
sjohanss@7118 105 AllocationContext_t context = AllocationContext::current();
sjohanss@7118 106 HeapWord* result = _allocator->mutator_alloc_region(context)->attempt_allocation(word_size,
sjohanss@7118 107 false /* bot_updates */);
tonyp@2715 108 if (result == NULL) {
mgerdin@4853 109 result = attempt_allocation_slow(word_size,
sjohanss@7118 110 context,
mgerdin@4853 111 gc_count_before_ret,
mgerdin@4853 112 gclocker_retry_count_ret);
tonyp@2715 113 }
tonyp@2715 114 assert_heap_not_locked();
tonyp@2315 115 if (result != NULL) {
tonyp@2315 116 dirty_young_block(result, word_size);
tonyp@2315 117 }
tonyp@2715 118 return result;
tonyp@2454 119 }
tonyp@2454 120
sjohanss@7118 121 inline HeapWord* G1CollectedHeap::survivor_attempt_allocation(size_t word_size,
sjohanss@7118 122 AllocationContext_t context) {
tonyp@3028 123 assert(!isHumongous(word_size),
tonyp@3028 124 "we should not be seeing humongous-size allocations in this path");
tonyp@3028 125
sjohanss@7118 126 HeapWord* result = _allocator->survivor_gc_alloc_region(context)->attempt_allocation(word_size,
sjohanss@7118 127 false /* bot_updates */);
tonyp@3028 128 if (result == NULL) {
tonyp@3028 129 MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
sjohanss@7118 130 result = _allocator->survivor_gc_alloc_region(context)->attempt_allocation_locked(word_size,
sjohanss@7118 131 false /* bot_updates */);
tonyp@3028 132 }
tonyp@3028 133 if (result != NULL) {
tonyp@3028 134 dirty_young_block(result, word_size);
tonyp@3028 135 }
tonyp@3028 136 return result;
tonyp@3028 137 }
tonyp@3028 138
sjohanss@7118 139 inline HeapWord* G1CollectedHeap::old_attempt_allocation(size_t word_size,
sjohanss@7118 140 AllocationContext_t context) {
tonyp@3028 141 assert(!isHumongous(word_size),
tonyp@3028 142 "we should not be seeing humongous-size allocations in this path");
tonyp@3028 143
sjohanss@7118 144 HeapWord* result = _allocator->old_gc_alloc_region(context)->attempt_allocation(word_size,
sjohanss@7118 145 true /* bot_updates */);
tonyp@3028 146 if (result == NULL) {
tonyp@3028 147 MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
sjohanss@7118 148 result = _allocator->old_gc_alloc_region(context)->attempt_allocation_locked(word_size,
sjohanss@7118 149 true /* bot_updates */);
tonyp@3028 150 }
tonyp@3028 151 return result;
tonyp@3028 152 }
tonyp@3028 153
tonyp@2315 154 // It dirties the cards that cover the block so that so that the post
tonyp@2315 155 // write barrier never queues anything when updating objects on this
tonyp@2315 156 // block. It is assumed (and in fact we assert) that the block
tonyp@2315 157 // belongs to a young region.
tonyp@2315 158 inline void
tonyp@2315 159 G1CollectedHeap::dirty_young_block(HeapWord* start, size_t word_size) {
tonyp@2315 160 assert_heap_not_locked();
tonyp@2315 161
tonyp@2315 162 // Assign the containing region to containing_hr so that we don't
tonyp@2315 163 // have to keep calling heap_region_containing_raw() in the
tonyp@2315 164 // asserts below.
tonyp@2315 165 DEBUG_ONLY(HeapRegion* containing_hr = heap_region_containing_raw(start);)
brutisso@7049 166 assert(word_size > 0, "pre-condition");
tonyp@2315 167 assert(containing_hr->is_in(start), "it should contain start");
tonyp@2315 168 assert(containing_hr->is_young(), "it should be young");
tonyp@2315 169 assert(!containing_hr->isHumongous(), "it should not be humongous");
tonyp@2315 170
tonyp@2315 171 HeapWord* end = start + word_size;
tonyp@2315 172 assert(containing_hr->is_in(end - 1), "it should also contain end - 1");
tonyp@2315 173
tonyp@2315 174 MemRegion mr(start, end);
mgerdin@5860 175 g1_barrier_set()->g1_mark_as_young(mr);
ysr@777 176 }
ysr@777 177
jcoomes@2064 178 inline RefToScanQueue* G1CollectedHeap::task_queue(int i) const {
ysr@777 179 return _task_queues->queue(i);
ysr@777 180 }
ysr@777 181
johnc@4016 182 inline bool G1CollectedHeap::isMarkedPrev(oop obj) const {
ysr@777 183 return _cm->prevMarkBitMap()->isMarked((HeapWord *)obj);
ysr@777 184 }
ysr@777 185
ysr@777 186 inline bool G1CollectedHeap::isMarkedNext(oop obj) const {
ysr@777 187 return _cm->nextMarkBitMap()->isMarked((HeapWord *)obj);
ysr@777 188 }
stefank@2314 189
tschatzl@6541 190 // This is a fast test on whether a reference points into the
tschatzl@6541 191 // collection set or not. Assume that the reference
tschatzl@6541 192 // points into the heap.
tschatzl@7010 193 inline bool G1CollectedHeap::is_in_cset(oop obj) {
tschatzl@7010 194 bool ret = _in_cset_fast_test.is_in_cset((HeapWord*)obj);
tschatzl@6541 195 // let's make sure the result is consistent with what the slower
tschatzl@6541 196 // test returns
tschatzl@6541 197 assert( ret || !obj_in_cs(obj), "sanity");
tschatzl@6541 198 assert(!ret || obj_in_cs(obj), "sanity");
tschatzl@6541 199 return ret;
tschatzl@6541 200 }
tschatzl@6541 201
tschatzl@7010 202 bool G1CollectedHeap::is_in_cset_or_humongous(const oop obj) {
tschatzl@7010 203 return _in_cset_fast_test.is_in_cset_or_humongous((HeapWord*)obj);
tschatzl@7010 204 }
tschatzl@7010 205
tschatzl@7010 206 G1CollectedHeap::in_cset_state_t G1CollectedHeap::in_cset_state(const oop obj) {
tschatzl@7010 207 return _in_cset_fast_test.at((HeapWord*)obj);
tschatzl@7010 208 }
tschatzl@7010 209
tschatzl@7010 210 void G1CollectedHeap::register_humongous_region_with_in_cset_fast_test(uint index) {
tschatzl@7010 211 _in_cset_fast_test.set_humongous(index);
tschatzl@7010 212 }
tschatzl@7010 213
johnc@4016 214 #ifndef PRODUCT
johnc@4016 215 // Support for G1EvacuationFailureALot
johnc@4016 216
johnc@4016 217 inline bool
johnc@4016 218 G1CollectedHeap::evacuation_failure_alot_for_gc_type(bool gcs_are_young,
johnc@4016 219 bool during_initial_mark,
johnc@4016 220 bool during_marking) {
johnc@4016 221 bool res = false;
johnc@4016 222 if (during_marking) {
johnc@4016 223 res |= G1EvacuationFailureALotDuringConcMark;
johnc@4016 224 }
johnc@4016 225 if (during_initial_mark) {
johnc@4016 226 res |= G1EvacuationFailureALotDuringInitialMark;
johnc@4016 227 }
johnc@4016 228 if (gcs_are_young) {
johnc@4016 229 res |= G1EvacuationFailureALotDuringYoungGC;
johnc@4016 230 } else {
johnc@4016 231 // GCs are mixed
johnc@4016 232 res |= G1EvacuationFailureALotDuringMixedGC;
johnc@4016 233 }
johnc@4016 234 return res;
johnc@4016 235 }
johnc@4016 236
johnc@4016 237 inline void
johnc@4016 238 G1CollectedHeap::set_evacuation_failure_alot_for_current_gc() {
johnc@4016 239 if (G1EvacuationFailureALot) {
johnc@4016 240 // Note we can't assert that _evacuation_failure_alot_for_current_gc
johnc@4016 241 // is clear here. It may have been set during a previous GC but that GC
johnc@4016 242 // did not copy enough objects (i.e. G1EvacuationFailureALotCount) to
johnc@4016 243 // trigger an evacuation failure and clear the flags and and counts.
johnc@4016 244
johnc@4016 245 // Check if we have gone over the interval.
johnc@4016 246 const size_t gc_num = total_collections();
johnc@4016 247 const size_t elapsed_gcs = gc_num - _evacuation_failure_alot_gc_number;
johnc@4016 248
johnc@4016 249 _evacuation_failure_alot_for_current_gc = (elapsed_gcs >= G1EvacuationFailureALotInterval);
johnc@4016 250
johnc@4016 251 // Now check if G1EvacuationFailureALot is enabled for the current GC type.
johnc@4016 252 const bool gcs_are_young = g1_policy()->gcs_are_young();
johnc@4016 253 const bool during_im = g1_policy()->during_initial_mark_pause();
johnc@4016 254 const bool during_marking = mark_in_progress();
johnc@4016 255
johnc@4016 256 _evacuation_failure_alot_for_current_gc &=
johnc@4016 257 evacuation_failure_alot_for_gc_type(gcs_are_young,
johnc@4016 258 during_im,
johnc@4016 259 during_marking);
johnc@4016 260 }
johnc@4016 261 }
johnc@4016 262
tschatzl@7050 263 inline bool G1CollectedHeap::evacuation_should_fail() {
johnc@4016 264 if (!G1EvacuationFailureALot || !_evacuation_failure_alot_for_current_gc) {
johnc@4016 265 return false;
johnc@4016 266 }
johnc@4016 267 // G1EvacuationFailureALot is in effect for current GC
johnc@4016 268 // Access to _evacuation_failure_alot_count is not atomic;
johnc@4016 269 // the value does not have to be exact.
johnc@4016 270 if (++_evacuation_failure_alot_count < G1EvacuationFailureALotCount) {
johnc@4016 271 return false;
johnc@4016 272 }
johnc@4016 273 _evacuation_failure_alot_count = 0;
johnc@4016 274 return true;
johnc@4016 275 }
johnc@4016 276
johnc@4016 277 inline void G1CollectedHeap::reset_evacuation_should_fail() {
johnc@4016 278 if (G1EvacuationFailureALot) {
johnc@4016 279 _evacuation_failure_alot_gc_number = total_collections();
johnc@4016 280 _evacuation_failure_alot_count = 0;
johnc@4016 281 _evacuation_failure_alot_for_current_gc = false;
johnc@4016 282 }
johnc@4016 283 }
johnc@4016 284 #endif // #ifndef PRODUCT
johnc@4016 285
tschatzl@6541 286 inline bool G1CollectedHeap::is_in_young(const oop obj) {
brutisso@7049 287 if (obj == NULL) {
brutisso@7049 288 return false;
brutisso@7049 289 }
brutisso@7049 290 return heap_region_containing(obj)->is_young();
tschatzl@6541 291 }
tschatzl@6541 292
tschatzl@6541 293 // We don't need barriers for initializing stores to objects
tschatzl@6541 294 // in the young gen: for the SATB pre-barrier, there is no
tschatzl@6541 295 // pre-value that needs to be remembered; for the remembered-set
tschatzl@6541 296 // update logging post-barrier, we don't maintain remembered set
tschatzl@6541 297 // information for young gen objects.
tschatzl@6541 298 inline bool G1CollectedHeap::can_elide_initializing_store_barrier(oop new_obj) {
tschatzl@6541 299 return is_in_young(new_obj);
tschatzl@6541 300 }
tschatzl@6541 301
tschatzl@6541 302 inline bool G1CollectedHeap::is_obj_dead(const oop obj) const {
brutisso@7049 303 if (obj == NULL) {
brutisso@7049 304 return false;
tschatzl@6541 305 }
brutisso@7049 306 return is_obj_dead(obj, heap_region_containing(obj));
tschatzl@6541 307 }
tschatzl@6541 308
tschatzl@6541 309 inline bool G1CollectedHeap::is_obj_ill(const oop obj) const {
brutisso@7049 310 if (obj == NULL) {
brutisso@7049 311 return false;
tschatzl@6541 312 }
brutisso@7049 313 return is_obj_ill(obj, heap_region_containing(obj));
tschatzl@6541 314 }
tschatzl@6541 315
tschatzl@7010 316 inline void G1CollectedHeap::set_humongous_is_live(oop obj) {
tschatzl@7010 317 uint region = addr_to_region((HeapWord*)obj);
tschatzl@7010 318 // We not only set the "live" flag in the humongous_is_live table, but also
tschatzl@7010 319 // reset the entry in the _in_cset_fast_test table so that subsequent references
tschatzl@7010 320 // to the same humongous object do not go into the slow path again.
tschatzl@7010 321 // This is racy, as multiple threads may at the same time enter here, but this
tschatzl@7010 322 // is benign.
tschatzl@7010 323 // During collection we only ever set the "live" flag, and only ever clear the
tschatzl@7010 324 // entry in the in_cset_fast_table.
tschatzl@7010 325 // We only ever evaluate the contents of these tables (in the VM thread) after
tschatzl@7010 326 // having synchronized the worker threads with the VM thread, or in the same
tschatzl@7010 327 // thread (i.e. within the VM thread).
tschatzl@7010 328 if (!_humongous_is_live.is_live(region)) {
tschatzl@7010 329 _humongous_is_live.set_live(region);
tschatzl@7010 330 _in_cset_fast_test.clear_humongous(region);
tschatzl@7010 331 }
tschatzl@7010 332 }
tschatzl@7010 333
stefank@2314 334 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP

mercurial