src/share/vm/gc_implementation/g1/heapRegion.inline.hpp

Fri, 10 Oct 2014 15:51:58 +0200

author
tschatzl
date
Fri, 10 Oct 2014 15:51:58 +0200
changeset 7257
e7d0505c8a30
parent 6996
f3aeae1f9fc5
child 7535
7ae4e26cb1e0
child 9327
f96fcd9e1e1b
permissions
-rw-r--r--

8059758: Footprint regressions with JDK-8038423
Summary: Changes in JDK-8038423 always initialize (zero out) virtual memory used for auxiliary data structures. This causes a footprint regression for G1 in startup benchmarks. This is because they do not touch that memory at all, so the operating system does not actually commit these pages. The fix is to, if the initialization value of the data structures matches the default value of just committed memory (=0), do not do anything.
Reviewed-by: jwilhelm, brutisso

     1 /*
     2  * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_INLINE_HPP
    26 #define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_INLINE_HPP
    28 #include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp"
    29 #include "gc_implementation/g1/g1CollectedHeap.hpp"
    30 #include "gc_implementation/g1/heapRegion.hpp"
    31 #include "memory/space.hpp"
    32 #include "runtime/atomic.inline.hpp"
    34 // This version requires locking.
    35 inline HeapWord* G1OffsetTableContigSpace::allocate_impl(size_t size,
    36                                                 HeapWord* const end_value) {
    37   HeapWord* obj = top();
    38   if (pointer_delta(end_value, obj) >= size) {
    39     HeapWord* new_top = obj + size;
    40     set_top(new_top);
    41     assert(is_aligned(obj) && is_aligned(new_top), "checking alignment");
    42     return obj;
    43   } else {
    44     return NULL;
    45   }
    46 }
    48 // This version is lock-free.
    49 inline HeapWord* G1OffsetTableContigSpace::par_allocate_impl(size_t size,
    50                                                     HeapWord* const end_value) {
    51   do {
    52     HeapWord* obj = top();
    53     if (pointer_delta(end_value, obj) >= size) {
    54       HeapWord* new_top = obj + size;
    55       HeapWord* result = (HeapWord*)Atomic::cmpxchg_ptr(new_top, top_addr(), obj);
    56       // result can be one of two:
    57       //  the old top value: the exchange succeeded
    58       //  otherwise: the new value of the top is returned.
    59       if (result == obj) {
    60         assert(is_aligned(obj) && is_aligned(new_top), "checking alignment");
    61         return obj;
    62       }
    63     } else {
    64       return NULL;
    65     }
    66   } while (true);
    67 }
    69 inline HeapWord* G1OffsetTableContigSpace::allocate(size_t size) {
    70   HeapWord* res = allocate_impl(size, end());
    71   if (res != NULL) {
    72     _offsets.alloc_block(res, size);
    73   }
    74   return res;
    75 }
    77 // Because of the requirement of keeping "_offsets" up to date with the
    78 // allocations, we sequentialize these with a lock.  Therefore, best if
    79 // this is used for larger LAB allocations only.
    80 inline HeapWord* G1OffsetTableContigSpace::par_allocate(size_t size) {
    81   MutexLocker x(&_par_alloc_lock);
    82   return allocate(size);
    83 }
    85 inline HeapWord* G1OffsetTableContigSpace::block_start(const void* p) {
    86   return _offsets.block_start(p);
    87 }
    89 inline HeapWord*
    90 G1OffsetTableContigSpace::block_start_const(const void* p) const {
    91   return _offsets.block_start_const(p);
    92 }
    94 inline bool
    95 HeapRegion::block_is_obj(const HeapWord* p) const {
    96   G1CollectedHeap* g1h = G1CollectedHeap::heap();
    97   if (ClassUnloadingWithConcurrentMark) {
    98     return !g1h->is_obj_dead(oop(p), this);
    99   }
   100   return p < top();
   101 }
   103 inline size_t
   104 HeapRegion::block_size(const HeapWord *addr) const {
   105   if (addr == top()) {
   106     return pointer_delta(end(), addr);
   107   }
   109   if (block_is_obj(addr)) {
   110     return oop(addr)->size();
   111   }
   113   assert(ClassUnloadingWithConcurrentMark,
   114       err_msg("All blocks should be objects if G1 Class Unloading isn't used. "
   115               "HR: ["PTR_FORMAT", "PTR_FORMAT", "PTR_FORMAT") "
   116               "addr: " PTR_FORMAT,
   117               p2i(bottom()), p2i(top()), p2i(end()), p2i(addr)));
   119   // Old regions' dead objects may have dead classes
   120   // We need to find the next live object in some other
   121   // manner than getting the oop size
   122   G1CollectedHeap* g1h = G1CollectedHeap::heap();
   123   HeapWord* next = g1h->concurrent_mark()->prevMarkBitMap()->
   124       getNextMarkedWordAddress(addr, prev_top_at_mark_start());
   126   assert(next > addr, "must get the next live object");
   127   return pointer_delta(next, addr);
   128 }
   130 inline HeapWord* HeapRegion::par_allocate_no_bot_updates(size_t word_size) {
   131   assert(is_young(), "we can only skip BOT updates on young regions");
   132   return par_allocate_impl(word_size, end());
   133 }
   135 inline HeapWord* HeapRegion::allocate_no_bot_updates(size_t word_size) {
   136   assert(is_young(), "we can only skip BOT updates on young regions");
   137   return allocate_impl(word_size, end());
   138 }
   140 inline void HeapRegion::note_start_of_marking() {
   141   _next_marked_bytes = 0;
   142   _next_top_at_mark_start = top();
   143 }
   145 inline void HeapRegion::note_end_of_marking() {
   146   _prev_top_at_mark_start = _next_top_at_mark_start;
   147   _prev_marked_bytes = _next_marked_bytes;
   148   _next_marked_bytes = 0;
   150   assert(_prev_marked_bytes <=
   151          (size_t) pointer_delta(prev_top_at_mark_start(), bottom()) *
   152          HeapWordSize, "invariant");
   153 }
   155 inline void HeapRegion::note_start_of_copying(bool during_initial_mark) {
   156   if (is_survivor()) {
   157     // This is how we always allocate survivors.
   158     assert(_next_top_at_mark_start == bottom(), "invariant");
   159   } else {
   160     if (during_initial_mark) {
   161       // During initial-mark we'll explicitly mark any objects on old
   162       // regions that are pointed to by roots. Given that explicit
   163       // marks only make sense under NTAMS it'd be nice if we could
   164       // check that condition if we wanted to. Given that we don't
   165       // know where the top of this region will end up, we simply set
   166       // NTAMS to the end of the region so all marks will be below
   167       // NTAMS. We'll set it to the actual top when we retire this region.
   168       _next_top_at_mark_start = end();
   169     } else {
   170       // We could have re-used this old region as to-space over a
   171       // couple of GCs since the start of the concurrent marking
   172       // cycle. This means that [bottom,NTAMS) will contain objects
   173       // copied up to and including initial-mark and [NTAMS, top)
   174       // will contain objects copied during the concurrent marking cycle.
   175       assert(top() >= _next_top_at_mark_start, "invariant");
   176     }
   177   }
   178 }
   180 inline void HeapRegion::note_end_of_copying(bool during_initial_mark) {
   181   if (is_survivor()) {
   182     // This is how we always allocate survivors.
   183     assert(_next_top_at_mark_start == bottom(), "invariant");
   184   } else {
   185     if (during_initial_mark) {
   186       // See the comment for note_start_of_copying() for the details
   187       // on this.
   188       assert(_next_top_at_mark_start == end(), "pre-condition");
   189       _next_top_at_mark_start = top();
   190     } else {
   191       // See the comment for note_start_of_copying() for the details
   192       // on this.
   193       assert(top() >= _next_top_at_mark_start, "invariant");
   194     }
   195   }
   196 }
   198 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_INLINE_HPP

mercurial