src/share/vm/gc_implementation/g1/g1ParScanThreadState.inline.hpp

Fri, 10 Oct 2014 15:51:58 +0200

author
tschatzl
date
Fri, 10 Oct 2014 15:51:58 +0200
changeset 7257
e7d0505c8a30
parent 7218
6948da6d7c13
child 7645
f2e3f0e1f97d
permissions
-rw-r--r--

8059758: Footprint regressions with JDK-8038423
Summary: Changes in JDK-8038423 always initialize (zero out) virtual memory used for auxiliary data structures. This causes a footprint regression for G1 in startup benchmarks. This is because they do not touch that memory at all, so the operating system does not actually commit these pages. The fix is to, if the initialization value of the data structures matches the default value of just committed memory (=0), do not do anything.
Reviewed-by: jwilhelm, brutisso

     1 /*
     2  * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1PARSCANTHREADSTATE_INLINE_HPP
    26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1PARSCANTHREADSTATE_INLINE_HPP
    28 #include "gc_implementation/g1/g1ParScanThreadState.hpp"
    29 #include "gc_implementation/g1/g1RemSet.inline.hpp"
    30 #include "oops/oop.inline.hpp"
    32 template <class T> void G1ParScanThreadState::do_oop_evac(T* p, HeapRegion* from) {
    33   assert(!oopDesc::is_null(oopDesc::load_decode_heap_oop(p)),
    34          "Reference should not be NULL here as such are never pushed to the task queue.");
    35   oop obj = oopDesc::load_decode_heap_oop_not_null(p);
    37   // Although we never intentionally push references outside of the collection
    38   // set, due to (benign) races in the claim mechanism during RSet scanning more
    39   // than one thread might claim the same card. So the same card may be
    40   // processed multiple times. So redo this check.
    41   G1CollectedHeap::in_cset_state_t in_cset_state = _g1h->in_cset_state(obj);
    42   if (in_cset_state == G1CollectedHeap::InCSet) {
    43     oop forwardee;
    44     if (obj->is_forwarded()) {
    45       forwardee = obj->forwardee();
    46     } else {
    47       forwardee = copy_to_survivor_space(obj);
    48     }
    49     oopDesc::encode_store_heap_oop(p, forwardee);
    50   } else if (in_cset_state == G1CollectedHeap::IsHumongous) {
    51     _g1h->set_humongous_is_live(obj);
    52   } else {
    53     assert(in_cset_state == G1CollectedHeap::InNeither,
    54            err_msg("In_cset_state must be InNeither here, but is %d", in_cset_state));
    55   }
    57   assert(obj != NULL, "Must be");
    58   update_rs(from, p, queue_num());
    59 }
    61 inline void G1ParScanThreadState::do_oop_partial_array(oop* p) {
    62   assert(has_partial_array_mask(p), "invariant");
    63   oop from_obj = clear_partial_array_mask(p);
    65   assert(Universe::heap()->is_in_reserved(from_obj), "must be in heap.");
    66   assert(from_obj->is_objArray(), "must be obj array");
    67   objArrayOop from_obj_array = objArrayOop(from_obj);
    68   // The from-space object contains the real length.
    69   int length                 = from_obj_array->length();
    71   assert(from_obj->is_forwarded(), "must be forwarded");
    72   oop to_obj                 = from_obj->forwardee();
    73   assert(from_obj != to_obj, "should not be chunking self-forwarded objects");
    74   objArrayOop to_obj_array   = objArrayOop(to_obj);
    75   // We keep track of the next start index in the length field of the
    76   // to-space object.
    77   int next_index             = to_obj_array->length();
    78   assert(0 <= next_index && next_index < length,
    79          err_msg("invariant, next index: %d, length: %d", next_index, length));
    81   int start                  = next_index;
    82   int end                    = length;
    83   int remainder              = end - start;
    84   // We'll try not to push a range that's smaller than ParGCArrayScanChunk.
    85   if (remainder > 2 * ParGCArrayScanChunk) {
    86     end = start + ParGCArrayScanChunk;
    87     to_obj_array->set_length(end);
    88     // Push the remainder before we process the range in case another
    89     // worker has run out of things to do and can steal it.
    90     oop* from_obj_p = set_partial_array_mask(from_obj);
    91     push_on_queue(from_obj_p);
    92   } else {
    93     assert(length == end, "sanity");
    94     // We'll process the final range for this object. Restore the length
    95     // so that the heap remains parsable in case of evacuation failure.
    96     to_obj_array->set_length(end);
    97   }
    98   _scanner.set_region(_g1h->heap_region_containing_raw(to_obj));
    99   // Process indexes [start,end). It will also process the header
   100   // along with the first chunk (i.e., the chunk with start == 0).
   101   // Note that at this point the length field of to_obj_array is not
   102   // correct given that we are using it to keep track of the next
   103   // start index. oop_iterate_range() (thankfully!) ignores the length
   104   // field and only relies on the start / end parameters.  It does
   105   // however return the size of the object which will be incorrect. So
   106   // we have to ignore it even if we wanted to use it.
   107   to_obj_array->oop_iterate_range(&_scanner, start, end);
   108 }
   110 template <class T> inline void G1ParScanThreadState::deal_with_reference(T* ref_to_scan) {
   111   if (!has_partial_array_mask(ref_to_scan)) {
   112     // Note: we can use "raw" versions of "region_containing" because
   113     // "obj_to_scan" is definitely in the heap, and is not in a
   114     // humongous region.
   115     HeapRegion* r = _g1h->heap_region_containing_raw(ref_to_scan);
   116     do_oop_evac(ref_to_scan, r);
   117   } else {
   118     do_oop_partial_array((oop*)ref_to_scan);
   119   }
   120 }
   122 inline void G1ParScanThreadState::dispatch_reference(StarTask ref) {
   123   assert(verify_task(ref), "sanity");
   124   if (ref.is_narrow()) {
   125     deal_with_reference((narrowOop*)ref);
   126   } else {
   127     deal_with_reference((oop*)ref);
   128   }
   129 }
   131 void G1ParScanThreadState::steal_and_trim_queue(RefToScanQueueSet *task_queues) {
   132   StarTask stolen_task;
   133   while (task_queues->steal(queue_num(), hash_seed(), stolen_task)) {
   134     assert(verify_task(stolen_task), "sanity");
   135     dispatch_reference(stolen_task);
   137     // We've just processed a reference and we might have made
   138     // available new entries on the queues. So we have to make sure
   139     // we drain the queues as necessary.
   140     trim_queue();
   141   }
   142 }
   144 #endif /* SHARE_VM_GC_IMPLEMENTATION_G1_G1PARSCANTHREADSTATE_INLINE_HPP */

mercurial