src/share/vm/gc_implementation/g1/g1Allocator.cpp

Fri, 10 Oct 2014 15:51:58 +0200

author
tschatzl
date
Fri, 10 Oct 2014 15:51:58 +0200
changeset 7257
e7d0505c8a30
parent 7118
227a9e5e4b4a
child 7647
80ac3ee51955
permissions
-rw-r--r--

8059758: Footprint regressions with JDK-8038423
Summary: Changes in JDK-8038423 always initialize (zero out) virtual memory used for auxiliary data structures. This causes a footprint regression for G1 in startup benchmarks. This is because they do not touch that memory at all, so the operating system does not actually commit these pages. The fix is to, if the initialization value of the data structures matches the default value of just committed memory (=0), do not do anything.
Reviewed-by: jwilhelm, brutisso

sjohanss@7118 1 /*
sjohanss@7118 2 * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
sjohanss@7118 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
sjohanss@7118 4 *
sjohanss@7118 5 * This code is free software; you can redistribute it and/or modify it
sjohanss@7118 6 * under the terms of the GNU General Public License version 2 only, as
sjohanss@7118 7 * published by the Free Software Foundation.
sjohanss@7118 8 *
sjohanss@7118 9 * This code is distributed in the hope that it will be useful, but WITHOUT
sjohanss@7118 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
sjohanss@7118 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
sjohanss@7118 12 * version 2 for more details (a copy is included in the LICENSE file that
sjohanss@7118 13 * accompanied this code).
sjohanss@7118 14 *
sjohanss@7118 15 * You should have received a copy of the GNU General Public License version
sjohanss@7118 16 * 2 along with this work; if not, write to the Free Software Foundation,
sjohanss@7118 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
sjohanss@7118 18 *
sjohanss@7118 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
sjohanss@7118 20 * or visit www.oracle.com if you need additional information or have any
sjohanss@7118 21 * questions.
sjohanss@7118 22 *
sjohanss@7118 23 */
sjohanss@7118 24
sjohanss@7118 25 #include "precompiled.hpp"
sjohanss@7118 26 #include "gc_implementation/g1/g1Allocator.hpp"
sjohanss@7118 27 #include "gc_implementation/g1/g1CollectedHeap.hpp"
sjohanss@7118 28 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
sjohanss@7118 29 #include "gc_implementation/g1/heapRegion.inline.hpp"
sjohanss@7118 30 #include "gc_implementation/g1/heapRegionSet.inline.hpp"
sjohanss@7118 31
sjohanss@7118 32 void G1DefaultAllocator::init_mutator_alloc_region() {
sjohanss@7118 33 assert(_mutator_alloc_region.get() == NULL, "pre-condition");
sjohanss@7118 34 _mutator_alloc_region.init();
sjohanss@7118 35 }
sjohanss@7118 36
sjohanss@7118 37 void G1DefaultAllocator::release_mutator_alloc_region() {
sjohanss@7118 38 _mutator_alloc_region.release();
sjohanss@7118 39 assert(_mutator_alloc_region.get() == NULL, "post-condition");
sjohanss@7118 40 }
sjohanss@7118 41
sjohanss@7118 42 void G1Allocator::reuse_retained_old_region(EvacuationInfo& evacuation_info,
sjohanss@7118 43 OldGCAllocRegion* old,
sjohanss@7118 44 HeapRegion** retained_old) {
sjohanss@7118 45 HeapRegion* retained_region = *retained_old;
sjohanss@7118 46 *retained_old = NULL;
sjohanss@7118 47
sjohanss@7118 48 // We will discard the current GC alloc region if:
sjohanss@7118 49 // a) it's in the collection set (it can happen!),
sjohanss@7118 50 // b) it's already full (no point in using it),
sjohanss@7118 51 // c) it's empty (this means that it was emptied during
sjohanss@7118 52 // a cleanup and it should be on the free list now), or
sjohanss@7118 53 // d) it's humongous (this means that it was emptied
sjohanss@7118 54 // during a cleanup and was added to the free list, but
sjohanss@7118 55 // has been subsequently used to allocate a humongous
sjohanss@7118 56 // object that may be less than the region size).
sjohanss@7118 57 if (retained_region != NULL &&
sjohanss@7118 58 !retained_region->in_collection_set() &&
sjohanss@7118 59 !(retained_region->top() == retained_region->end()) &&
sjohanss@7118 60 !retained_region->is_empty() &&
sjohanss@7118 61 !retained_region->isHumongous()) {
sjohanss@7118 62 retained_region->record_top_and_timestamp();
sjohanss@7118 63 // The retained region was added to the old region set when it was
sjohanss@7118 64 // retired. We have to remove it now, since we don't allow regions
sjohanss@7118 65 // we allocate to in the region sets. We'll re-add it later, when
sjohanss@7118 66 // it's retired again.
sjohanss@7118 67 _g1h->_old_set.remove(retained_region);
sjohanss@7118 68 bool during_im = _g1h->g1_policy()->during_initial_mark_pause();
sjohanss@7118 69 retained_region->note_start_of_copying(during_im);
sjohanss@7118 70 old->set(retained_region);
sjohanss@7118 71 _g1h->_hr_printer.reuse(retained_region);
sjohanss@7118 72 evacuation_info.set_alloc_regions_used_before(retained_region->used());
sjohanss@7118 73 }
sjohanss@7118 74 }
sjohanss@7118 75
sjohanss@7118 76 void G1DefaultAllocator::init_gc_alloc_regions(EvacuationInfo& evacuation_info) {
sjohanss@7118 77 assert_at_safepoint(true /* should_be_vm_thread */);
sjohanss@7118 78
sjohanss@7118 79 _survivor_gc_alloc_region.init();
sjohanss@7118 80 _old_gc_alloc_region.init();
sjohanss@7118 81 reuse_retained_old_region(evacuation_info,
sjohanss@7118 82 &_old_gc_alloc_region,
sjohanss@7118 83 &_retained_old_gc_alloc_region);
sjohanss@7118 84 }
sjohanss@7118 85
sjohanss@7118 86 void G1DefaultAllocator::release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info) {
sjohanss@7118 87 AllocationContext_t context = AllocationContext::current();
sjohanss@7118 88 evacuation_info.set_allocation_regions(survivor_gc_alloc_region(context)->count() +
sjohanss@7118 89 old_gc_alloc_region(context)->count());
sjohanss@7118 90 survivor_gc_alloc_region(context)->release();
sjohanss@7118 91 // If we have an old GC alloc region to release, we'll save it in
sjohanss@7118 92 // _retained_old_gc_alloc_region. If we don't
sjohanss@7118 93 // _retained_old_gc_alloc_region will become NULL. This is what we
sjohanss@7118 94 // want either way so no reason to check explicitly for either
sjohanss@7118 95 // condition.
sjohanss@7118 96 _retained_old_gc_alloc_region = old_gc_alloc_region(context)->release();
sjohanss@7118 97
sjohanss@7118 98 if (ResizePLAB) {
sjohanss@7118 99 _g1h->_survivor_plab_stats.adjust_desired_plab_sz(no_of_gc_workers);
sjohanss@7118 100 _g1h->_old_plab_stats.adjust_desired_plab_sz(no_of_gc_workers);
sjohanss@7118 101 }
sjohanss@7118 102 }
sjohanss@7118 103
sjohanss@7118 104 void G1DefaultAllocator::abandon_gc_alloc_regions() {
sjohanss@7118 105 assert(survivor_gc_alloc_region(AllocationContext::current())->get() == NULL, "pre-condition");
sjohanss@7118 106 assert(old_gc_alloc_region(AllocationContext::current())->get() == NULL, "pre-condition");
sjohanss@7118 107 _retained_old_gc_alloc_region = NULL;
sjohanss@7118 108 }
sjohanss@7118 109
sjohanss@7118 110 G1ParGCAllocBuffer::G1ParGCAllocBuffer(size_t gclab_word_size) :
sjohanss@7118 111 ParGCAllocBuffer(gclab_word_size), _retired(true) { }
sjohanss@7118 112
sjohanss@7118 113 HeapWord* G1ParGCAllocator::allocate_slow(GCAllocPurpose purpose, size_t word_sz, AllocationContext_t context) {
sjohanss@7118 114 HeapWord* obj = NULL;
sjohanss@7118 115 size_t gclab_word_size = _g1h->desired_plab_sz(purpose);
sjohanss@7118 116 if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) {
sjohanss@7118 117 G1ParGCAllocBuffer* alloc_buf = alloc_buffer(purpose, context);
sjohanss@7118 118 add_to_alloc_buffer_waste(alloc_buf->words_remaining());
sjohanss@7118 119 alloc_buf->retire(false /* end_of_gc */, false /* retain */);
sjohanss@7118 120
sjohanss@7118 121 HeapWord* buf = _g1h->par_allocate_during_gc(purpose, gclab_word_size, context);
sjohanss@7118 122 if (buf == NULL) {
sjohanss@7118 123 return NULL; // Let caller handle allocation failure.
sjohanss@7118 124 }
sjohanss@7118 125 // Otherwise.
sjohanss@7118 126 alloc_buf->set_word_size(gclab_word_size);
sjohanss@7118 127 alloc_buf->set_buf(buf);
sjohanss@7118 128
sjohanss@7118 129 obj = alloc_buf->allocate(word_sz);
sjohanss@7118 130 assert(obj != NULL, "buffer was definitely big enough...");
sjohanss@7118 131 } else {
sjohanss@7118 132 obj = _g1h->par_allocate_during_gc(purpose, word_sz, context);
sjohanss@7118 133 }
sjohanss@7118 134 return obj;
sjohanss@7118 135 }
sjohanss@7118 136
sjohanss@7118 137 G1DefaultParGCAllocator::G1DefaultParGCAllocator(G1CollectedHeap* g1h) :
sjohanss@7118 138 G1ParGCAllocator(g1h),
sjohanss@7118 139 _surviving_alloc_buffer(g1h->desired_plab_sz(GCAllocForSurvived)),
sjohanss@7118 140 _tenured_alloc_buffer(g1h->desired_plab_sz(GCAllocForTenured)) {
sjohanss@7118 141
sjohanss@7118 142 _alloc_buffers[GCAllocForSurvived] = &_surviving_alloc_buffer;
sjohanss@7118 143 _alloc_buffers[GCAllocForTenured] = &_tenured_alloc_buffer;
sjohanss@7118 144
sjohanss@7118 145 }
sjohanss@7118 146
sjohanss@7118 147 void G1DefaultParGCAllocator::retire_alloc_buffers() {
sjohanss@7118 148 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
sjohanss@7118 149 size_t waste = _alloc_buffers[ap]->words_remaining();
sjohanss@7118 150 add_to_alloc_buffer_waste(waste);
sjohanss@7118 151 _alloc_buffers[ap]->flush_stats_and_retire(_g1h->stats_for_purpose((GCAllocPurpose)ap),
sjohanss@7118 152 true /* end_of_gc */,
sjohanss@7118 153 false /* retain */);
sjohanss@7118 154 }
sjohanss@7118 155 }

mercurial