src/share/vm/gc_implementation/g1/g1ParScanThreadState.inline.hpp

Fri, 10 Oct 2014 15:51:58 +0200

author
tschatzl
date
Fri, 10 Oct 2014 15:51:58 +0200
changeset 7257
e7d0505c8a30
parent 7218
6948da6d7c13
child 7645
f2e3f0e1f97d
permissions
-rw-r--r--

8059758: Footprint regressions with JDK-8038423
Summary: Changes in JDK-8038423 always initialize (zero out) virtual memory used for auxiliary data structures. This causes a footprint regression for G1 in startup benchmarks. This is because they do not touch that memory at all, so the operating system does not actually commit these pages. The fix is to, if the initialization value of the data structures matches the default value of just committed memory (=0), do not do anything.
Reviewed-by: jwilhelm, brutisso

tschatzl@6937 1 /*
tschatzl@6937 2 * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
tschatzl@6937 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
tschatzl@6937 4 *
tschatzl@6937 5 * This code is free software; you can redistribute it and/or modify it
tschatzl@6937 6 * under the terms of the GNU General Public License version 2 only, as
tschatzl@6937 7 * published by the Free Software Foundation.
tschatzl@6937 8 *
tschatzl@6937 9 * This code is distributed in the hope that it will be useful, but WITHOUT
tschatzl@6937 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
tschatzl@6937 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
tschatzl@6937 12 * version 2 for more details (a copy is included in the LICENSE file that
tschatzl@6937 13 * accompanied this code).
tschatzl@6937 14 *
tschatzl@6937 15 * You should have received a copy of the GNU General Public License version
tschatzl@6937 16 * 2 along with this work; if not, write to the Free Software Foundation,
tschatzl@6937 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
tschatzl@6937 18 *
tschatzl@6937 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
tschatzl@6937 20 * or visit www.oracle.com if you need additional information or have any
tschatzl@6937 21 * questions.
tschatzl@6937 22 *
tschatzl@6937 23 */
tschatzl@6937 24
tschatzl@6937 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1PARSCANTHREADSTATE_INLINE_HPP
tschatzl@6937 26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1PARSCANTHREADSTATE_INLINE_HPP
tschatzl@6937 27
tschatzl@6937 28 #include "gc_implementation/g1/g1ParScanThreadState.hpp"
tschatzl@6937 29 #include "gc_implementation/g1/g1RemSet.inline.hpp"
tschatzl@6937 30 #include "oops/oop.inline.hpp"
tschatzl@6937 31
tschatzl@6938 32 template <class T> void G1ParScanThreadState::do_oop_evac(T* p, HeapRegion* from) {
tschatzl@6938 33 assert(!oopDesc::is_null(oopDesc::load_decode_heap_oop(p)),
tschatzl@6938 34 "Reference should not be NULL here as such are never pushed to the task queue.");
tschatzl@6938 35 oop obj = oopDesc::load_decode_heap_oop_not_null(p);
tschatzl@6938 36
tschatzl@6938 37 // Although we never intentionally push references outside of the collection
tschatzl@6938 38 // set, due to (benign) races in the claim mechanism during RSet scanning more
tschatzl@6938 39 // than one thread might claim the same card. So the same card may be
tschatzl@6938 40 // processed multiple times. So redo this check.
tschatzl@7010 41 G1CollectedHeap::in_cset_state_t in_cset_state = _g1h->in_cset_state(obj);
tschatzl@7010 42 if (in_cset_state == G1CollectedHeap::InCSet) {
tschatzl@6938 43 oop forwardee;
tschatzl@6938 44 if (obj->is_forwarded()) {
tschatzl@6938 45 forwardee = obj->forwardee();
tschatzl@6938 46 } else {
tschatzl@6938 47 forwardee = copy_to_survivor_space(obj);
tschatzl@6938 48 }
tschatzl@6938 49 oopDesc::encode_store_heap_oop(p, forwardee);
tschatzl@7010 50 } else if (in_cset_state == G1CollectedHeap::IsHumongous) {
tschatzl@7010 51 _g1h->set_humongous_is_live(obj);
tschatzl@7010 52 } else {
tschatzl@7010 53 assert(in_cset_state == G1CollectedHeap::InNeither,
tschatzl@7010 54 err_msg("In_cset_state must be InNeither here, but is %d", in_cset_state));
tschatzl@6938 55 }
tschatzl@6938 56
tschatzl@6938 57 assert(obj != NULL, "Must be");
tschatzl@6938 58 update_rs(from, p, queue_num());
tschatzl@6938 59 }
tschatzl@6938 60
tschatzl@6937 61 inline void G1ParScanThreadState::do_oop_partial_array(oop* p) {
tschatzl@6937 62 assert(has_partial_array_mask(p), "invariant");
tschatzl@6937 63 oop from_obj = clear_partial_array_mask(p);
tschatzl@6937 64
tschatzl@6937 65 assert(Universe::heap()->is_in_reserved(from_obj), "must be in heap.");
tschatzl@6937 66 assert(from_obj->is_objArray(), "must be obj array");
tschatzl@6937 67 objArrayOop from_obj_array = objArrayOop(from_obj);
tschatzl@6937 68 // The from-space object contains the real length.
tschatzl@6937 69 int length = from_obj_array->length();
tschatzl@6937 70
tschatzl@6937 71 assert(from_obj->is_forwarded(), "must be forwarded");
tschatzl@6937 72 oop to_obj = from_obj->forwardee();
tschatzl@6937 73 assert(from_obj != to_obj, "should not be chunking self-forwarded objects");
tschatzl@6937 74 objArrayOop to_obj_array = objArrayOop(to_obj);
tschatzl@6937 75 // We keep track of the next start index in the length field of the
tschatzl@6937 76 // to-space object.
tschatzl@6937 77 int next_index = to_obj_array->length();
tschatzl@6937 78 assert(0 <= next_index && next_index < length,
tschatzl@6937 79 err_msg("invariant, next index: %d, length: %d", next_index, length));
tschatzl@6937 80
tschatzl@6937 81 int start = next_index;
tschatzl@6937 82 int end = length;
tschatzl@6937 83 int remainder = end - start;
tschatzl@6937 84 // We'll try not to push a range that's smaller than ParGCArrayScanChunk.
tschatzl@6937 85 if (remainder > 2 * ParGCArrayScanChunk) {
tschatzl@6937 86 end = start + ParGCArrayScanChunk;
tschatzl@6937 87 to_obj_array->set_length(end);
tschatzl@6937 88 // Push the remainder before we process the range in case another
tschatzl@6937 89 // worker has run out of things to do and can steal it.
tschatzl@6937 90 oop* from_obj_p = set_partial_array_mask(from_obj);
tschatzl@6937 91 push_on_queue(from_obj_p);
tschatzl@6937 92 } else {
tschatzl@6937 93 assert(length == end, "sanity");
tschatzl@6937 94 // We'll process the final range for this object. Restore the length
tschatzl@6937 95 // so that the heap remains parsable in case of evacuation failure.
tschatzl@6937 96 to_obj_array->set_length(end);
tschatzl@6937 97 }
tschatzl@6937 98 _scanner.set_region(_g1h->heap_region_containing_raw(to_obj));
tschatzl@6937 99 // Process indexes [start,end). It will also process the header
tschatzl@6937 100 // along with the first chunk (i.e., the chunk with start == 0).
tschatzl@6937 101 // Note that at this point the length field of to_obj_array is not
tschatzl@6937 102 // correct given that we are using it to keep track of the next
tschatzl@6937 103 // start index. oop_iterate_range() (thankfully!) ignores the length
tschatzl@6937 104 // field and only relies on the start / end parameters. It does
tschatzl@6937 105 // however return the size of the object which will be incorrect. So
tschatzl@6937 106 // we have to ignore it even if we wanted to use it.
tschatzl@6937 107 to_obj_array->oop_iterate_range(&_scanner, start, end);
tschatzl@6937 108 }
tschatzl@6937 109
tschatzl@6937 110 template <class T> inline void G1ParScanThreadState::deal_with_reference(T* ref_to_scan) {
tschatzl@6937 111 if (!has_partial_array_mask(ref_to_scan)) {
tschatzl@6937 112 // Note: we can use "raw" versions of "region_containing" because
tschatzl@6937 113 // "obj_to_scan" is definitely in the heap, and is not in a
tschatzl@6937 114 // humongous region.
tschatzl@6937 115 HeapRegion* r = _g1h->heap_region_containing_raw(ref_to_scan);
tschatzl@6937 116 do_oop_evac(ref_to_scan, r);
tschatzl@6937 117 } else {
tschatzl@6937 118 do_oop_partial_array((oop*)ref_to_scan);
tschatzl@6937 119 }
tschatzl@6937 120 }
tschatzl@6937 121
tschatzl@6938 122 inline void G1ParScanThreadState::dispatch_reference(StarTask ref) {
tschatzl@6937 123 assert(verify_task(ref), "sanity");
tschatzl@6937 124 if (ref.is_narrow()) {
tschatzl@6937 125 deal_with_reference((narrowOop*)ref);
tschatzl@6937 126 } else {
tschatzl@6937 127 deal_with_reference((oop*)ref);
tschatzl@6937 128 }
tschatzl@6937 129 }
tschatzl@6937 130
tschatzl@6938 131 void G1ParScanThreadState::steal_and_trim_queue(RefToScanQueueSet *task_queues) {
tschatzl@6938 132 StarTask stolen_task;
tschatzl@6938 133 while (task_queues->steal(queue_num(), hash_seed(), stolen_task)) {
tschatzl@6938 134 assert(verify_task(stolen_task), "sanity");
tschatzl@6938 135 dispatch_reference(stolen_task);
tschatzl@6938 136
tschatzl@6938 137 // We've just processed a reference and we might have made
tschatzl@6938 138 // available new entries on the queues. So we have to make sure
tschatzl@6938 139 // we drain the queues as necessary.
tschatzl@6938 140 trim_queue();
tschatzl@6938 141 }
tschatzl@6938 142 }
tschatzl@6938 143
tschatzl@6937 144 #endif /* SHARE_VM_GC_IMPLEMENTATION_G1_G1PARSCANTHREADSTATE_INLINE_HPP */
tschatzl@6937 145

mercurial