src/share/vm/gc_implementation/parallelScavenge/psPromotionLAB.cpp

Fri, 29 Apr 2016 00:06:10 +0800

author
aoqi
date
Fri, 29 Apr 2016 00:06:10 +0800
changeset 1
2d8a650513c2
parent 0
f90c822e73f8
child 25
873fd82b133d
permissions
-rw-r--r--

Added MIPS 64-bit port.

     1 /*
     2  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 /*
    26  * This file has been modified by Loongson Technology in 2015. These
    27  * modifications are Copyright (c) 2015 Loongson Technology, and are made
    28  * available on the same license terms set forth above.
    29  */
    31 #include "precompiled.hpp"
    32 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
    33 #include "gc_implementation/parallelScavenge/psPromotionLAB.hpp"
    34 #include "gc_implementation/shared/mutableSpace.hpp"
    35 #include "gc_implementation/shared/mutableNUMASpace.hpp"
    36 #include "oops/oop.inline.hpp"
    38 size_t PSPromotionLAB::filler_header_size;
    40 // This is the shared initialization code. It sets up the basic pointers,
    41 // and allows enough extra space for a filler object. We call a virtual
    42 // method, "lab_is_valid()" to handle the different asserts the old/young
    43 // labs require.
    44 void PSPromotionLAB::initialize(MemRegion lab) {
    45   assert(lab_is_valid(lab), "Sanity");
    47   HeapWord* bottom = lab.start();
    48   HeapWord* end    = lab.end();
    50   set_bottom(bottom);
    51   set_end(end);
    52   set_top(bottom);
    54   // Initialize after VM starts up because header_size depends on compressed
    55   // oops.
    56   filler_header_size = align_object_size(typeArrayOopDesc::header_size(T_INT));
    58   // We can be initialized to a zero size!
    59   if (free() > 0) {
    60     if (ZapUnusedHeapArea) {
    61       debug_only(Copy::fill_to_words(top(), free()/HeapWordSize, badHeapWord));
    62     }
    64     // NOTE! We need to allow space for a filler object.
    65     assert(lab.word_size() >= filler_header_size, "lab is too small");
    66     end = end - filler_header_size;
    67     set_end(end);
    69     _state = needs_flush;
    70   } else {
    71     _state = zero_size;
    72   }
    74   assert(this->top() <= this->end(), "pointers out of order");
    75 }
    77 // Fill all remaining lab space with an unreachable object.
    78 // The goal is to leave a contiguous parseable span of objects.
    79 void PSPromotionLAB::flush() {
    80   assert(_state != flushed, "Attempt to flush PLAB twice");
    81   assert(top() <= end(), "pointers out of order");
    83   // If we were initialized to a zero sized lab, there is
    84   // nothing to flush
    85   if (_state == zero_size)
    86     return;
    88   // PLAB's never allocate the last aligned_header_size
    89   // so they can always fill with an array.
    90   HeapWord* tlab_end = end() + filler_header_size;
    91   typeArrayOop filler_oop = (typeArrayOop) top();
    92   filler_oop->set_mark(markOopDesc::prototype());
    93   filler_oop->set_klass(Universe::intArrayKlassObj());
    94   const size_t array_length =
    95     pointer_delta(tlab_end, top()) - typeArrayOopDesc::header_size(T_INT);
    96   assert( (array_length * (HeapWordSize/sizeof(jint))) < (size_t)max_jint, "array too big in PSPromotionLAB");
    97   filler_oop->set_length((int)(array_length * (HeapWordSize/sizeof(jint))));
    99 #ifdef ASSERT
   100   // Note that we actually DO NOT want to use the aligned header size!
   101   HeapWord* elt_words = ((HeapWord*)filler_oop) + typeArrayOopDesc::header_size(T_INT);
   102   Copy::fill_to_words(elt_words, array_length, 0xDEAABABE);
   103 #endif
   105   set_bottom(NULL);
   106   set_end(NULL);
   107   set_top(NULL);
   109   _state = flushed;
   110 }
   112 bool PSPromotionLAB::unallocate_object(HeapWord* obj, size_t obj_size) {
   113   assert(Universe::heap()->is_in(obj), "Object outside heap");
   115   if (contains(obj)) {
   116     HeapWord* object_end = obj + obj_size;
   117     assert(object_end == top(), "Not matching last allocation");
   119     set_top(obj);
   120     return true;
   121   }
   123   return false;
   124 }
   126 // Fill all remaining lab space with an unreachable object.
   127 // The goal is to leave a contiguous parseable span of objects.
   128 void PSOldPromotionLAB::flush() {
   129   assert(_state != flushed, "Attempt to flush PLAB twice");
   130   assert(top() <= end(), "pointers out of order");
   132   if (_state == zero_size)
   133     return;
   135   HeapWord* obj = top();
   137   PSPromotionLAB::flush();
   139   assert(_start_array != NULL, "Sanity");
   141   _start_array->allocate_block(obj);
   142 }
   144 #ifdef ASSERT
   146 bool PSYoungPromotionLAB::lab_is_valid(MemRegion lab) {
   147   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
   148   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
   150   MutableSpace* to_space = heap->young_gen()->to_space();
   151   MemRegion used = to_space->used_region();
   152   if (used.contains(lab)) {
   153     return true;
   154   }
   156   return false;
   157 }
   159 bool PSOldPromotionLAB::lab_is_valid(MemRegion lab) {
   160   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
   161   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
   162   assert(_start_array->covered_region().contains(lab), "Sanity");
   164   PSOldGen* old_gen = heap->old_gen();
   165   MemRegion used = old_gen->object_space()->used_region();
   167   /* 2014/2/12/ Liao: In UseOldNUMA, the new lab may be allocated out of the current used_region. 
   168    * For example, a new plab should be allocated in lgrp2, while the top of current used_region 
   169    * is in lgrp1. The original checking will return invalid, while this situation is reasonable. 
   170    * So we should check whether the lab is in one of the lgrps. */
   171   if(UseOldNUMA) {
   172     MutableSpace* sp;
   173     MutableNUMASpace::LGRPSpace *ls;
   174     MutableNUMASpace* s = (MutableNUMASpace*) old_gen->object_space();
   175     int i, j;
   176     i = s->lgrp_spaces()->length();
   177     for(j = 0; j < i; j++) {
   178       ls = s->lgrp_spaces()->at(j);
   179       sp = ls->space();
   180       used = sp ->used_region(); 
   181       if (used.contains(lab)) 
   182         return true;
   183     }
   184   }
   185   else {
   186     if (used.contains(lab)) {
   187       return true;
   188     }
   189   }
   191   return false;
   192 }
   194 #endif /* ASSERT */

mercurial