src/share/vm/gc_implementation/parallelScavenge/psPromotionLAB.cpp

Thu, 22 Sep 2011 10:57:37 -0700

author
johnc
date
Thu, 22 Sep 2011 10:57:37 -0700
changeset 3175
4dfb2df418f2
parent 2314
f95d63e2154a
child 3181
c63b928b212b
permissions
-rw-r--r--

6484982: G1: process references during evacuation pauses
Summary: G1 now uses two reference processors - one is used by concurrent marking and the other is used by STW GCs (both full and incremental evacuation pauses). In an evacuation pause, the reference processor is embedded into the closures used to scan objects. Doing so causes causes reference objects to be 'discovered' by the reference processor. At the end of the evacuation pause, these discovered reference objects are processed - preserving (and copying) referent objects (and their reachable graphs) as appropriate.
Reviewed-by: ysr, jwilhelm, brutisso, stefank, tonyp

     1 /*
     2  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
    27 #include "gc_implementation/parallelScavenge/psPromotionLAB.hpp"
    28 #include "gc_implementation/shared/mutableSpace.hpp"
    29 #include "oops/oop.inline.hpp"
    31 size_t PSPromotionLAB::filler_header_size;
    33 // This is the shared initialization code. It sets up the basic pointers,
    34 // and allows enough extra space for a filler object. We call a virtual
    35 // method, "lab_is_valid()" to handle the different asserts the old/young
    36 // labs require.
    37 void PSPromotionLAB::initialize(MemRegion lab) {
    38   assert(lab_is_valid(lab), "Sanity");
    40   HeapWord* bottom = lab.start();
    41   HeapWord* end    = lab.end();
    43   set_bottom(bottom);
    44   set_end(end);
    45   set_top(bottom);
    47   // Initialize after VM starts up because header_size depends on compressed
    48   // oops.
    49   filler_header_size = align_object_size(typeArrayOopDesc::header_size(T_INT));
    51   // We can be initialized to a zero size!
    52   if (free() > 0) {
    53     if (ZapUnusedHeapArea) {
    54       debug_only(Copy::fill_to_words(top(), free()/HeapWordSize, badHeapWord));
    55     }
    57     // NOTE! We need to allow space for a filler object.
    58     assert(lab.word_size() >= filler_header_size, "lab is too small");
    59     end = end - filler_header_size;
    60     set_end(end);
    62     _state = needs_flush;
    63   } else {
    64     _state = zero_size;
    65   }
    67   assert(this->top() <= this->end(), "pointers out of order");
    68 }
    70 // Fill all remaining lab space with an unreachable object.
    71 // The goal is to leave a contiguous parseable span of objects.
    72 void PSPromotionLAB::flush() {
    73   assert(_state != flushed, "Attempt to flush PLAB twice");
    74   assert(top() <= end(), "pointers out of order");
    76   // If we were initialized to a zero sized lab, there is
    77   // nothing to flush
    78   if (_state == zero_size)
    79     return;
    81   // PLAB's never allocate the last aligned_header_size
    82   // so they can always fill with an array.
    83   HeapWord* tlab_end = end() + filler_header_size;
    84   typeArrayOop filler_oop = (typeArrayOop) top();
    85   filler_oop->set_mark(markOopDesc::prototype());
    86   filler_oop->set_klass(Universe::intArrayKlassObj());
    87   const size_t array_length =
    88     pointer_delta(tlab_end, top()) - typeArrayOopDesc::header_size(T_INT);
    89   assert( (array_length * (HeapWordSize/sizeof(jint))) < (size_t)max_jint, "array too big in PSPromotionLAB");
    90   filler_oop->set_length((int)(array_length * (HeapWordSize/sizeof(jint))));
    92 #ifdef ASSERT
    93   // Note that we actually DO NOT want to use the aligned header size!
    94   HeapWord* elt_words = ((HeapWord*)filler_oop) + typeArrayOopDesc::header_size(T_INT);
    95   Copy::fill_to_words(elt_words, array_length, 0xDEAABABE);
    96 #endif
    98   set_bottom(NULL);
    99   set_end(NULL);
   100   set_top(NULL);
   102   _state = flushed;
   103 }
   105 bool PSPromotionLAB::unallocate_object(oop obj) {
   106   assert(Universe::heap()->is_in(obj), "Object outside heap");
   108   if (contains(obj)) {
   109     HeapWord* object_end = (HeapWord*)obj + obj->size();
   110     assert(object_end <= top(), "Object crosses promotion LAB boundary");
   112     if (object_end == top()) {
   113       set_top((HeapWord*)obj);
   114       return true;
   115     }
   116   }
   118   return false;
   119 }
   121 // Fill all remaining lab space with an unreachable object.
   122 // The goal is to leave a contiguous parseable span of objects.
   123 void PSOldPromotionLAB::flush() {
   124   assert(_state != flushed, "Attempt to flush PLAB twice");
   125   assert(top() <= end(), "pointers out of order");
   127   if (_state == zero_size)
   128     return;
   130   HeapWord* obj = top();
   132   PSPromotionLAB::flush();
   134   assert(_start_array != NULL, "Sanity");
   136   _start_array->allocate_block(obj);
   137 }
   139 #ifdef ASSERT
   141 bool PSYoungPromotionLAB::lab_is_valid(MemRegion lab) {
   142   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
   143   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
   145   MutableSpace* to_space = heap->young_gen()->to_space();
   146   MemRegion used = to_space->used_region();
   147   if (used.contains(lab)) {
   148     return true;
   149   }
   151   return false;
   152 }
   154 bool PSOldPromotionLAB::lab_is_valid(MemRegion lab) {
   155   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
   156   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
   157   assert(_start_array->covered_region().contains(lab), "Sanity");
   159   PSOldGen* old_gen = heap->old_gen();
   160   MemRegion used = old_gen->object_space()->used_region();
   162   if (used.contains(lab)) {
   163     return true;
   164   }
   166   return false;
   167 }
   169 #endif /* ASSERT */

mercurial