src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp

Fri, 29 Apr 2016 00:06:10 +0800

author
aoqi
date
Fri, 29 Apr 2016 00:06:10 +0800
changeset 1
2d8a650513c2
parent 0
f90c822e73f8
child 25
873fd82b133d
permissions
-rw-r--r--

Added MIPS 64-bit port.

     1 /*
     2  * Copyright (c) 2002, 2014, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 /*
    26  * This file has been modified by Loongson Technology in 2015. These
    27  * modifications are Copyright (c) 2015 Loongson Technology, and are made
    28  * available on the same license terms set forth above.
    29  */
    31 #ifndef SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPROMOTIONMANAGER_INLINE_HPP
    32 #define SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPROMOTIONMANAGER_INLINE_HPP
    34 #include "gc_implementation/parallelScavenge/psOldGen.hpp"
    35 #include "gc_implementation/parallelScavenge/psPromotionManager.hpp"
    36 #include "gc_implementation/parallelScavenge/psScavenge.hpp"
    37 #include "gc_implementation/shared/mutableNUMASpace.hpp"
    38 #include "oops/oop.psgc.inline.hpp"
    40 inline PSPromotionManager* PSPromotionManager::manager_array(int index) {
    41   assert(_manager_array != NULL, "access of NULL manager_array");
    42   assert(index >= 0 && index <= (int)ParallelGCThreads, "out of range manager_array access");
    43   return &_manager_array[index];
    44 }
    46 template <class T>
    47 inline void PSPromotionManager::claim_or_forward_internal_depth(T* p) {
    48   if (p != NULL) { // XXX: error if p != NULL here
    49     oop o = oopDesc::load_decode_heap_oop_not_null(p);
    50     if (o->is_forwarded()) {
    51       o = o->forwardee();
    52       // Card mark
    53       if (PSScavenge::is_obj_in_young(o)) {
    54         PSScavenge::card_table()->inline_write_ref_field_gc(p, o);
    55       }
    56       oopDesc::encode_store_heap_oop_not_null(p, o);
    57     } else {
    58       push_depth(p);
    59     }
    60   }
    61 }
    63 template <class T>
    64 inline void PSPromotionManager::claim_or_forward_depth(T* p) {
    65   assert(PSScavenge::should_scavenge(p, true), "revisiting object?");
    66   assert(Universe::heap()->kind() == CollectedHeap::ParallelScavengeHeap,
    67          "Sanity");
    68   assert(Universe::heap()->is_in(p), "pointer outside heap");
    70   claim_or_forward_internal_depth(p);
    71 }
    73 //
    74 // This method is pretty bulky. It would be nice to split it up
    75 // into smaller submethods, but we need to be careful not to hurt
    76 // performance.
    77 //
    79 extern int node_ex;
    80 extern int   each_gc_copy_fre[16];
    81 extern float each_gc_copy_time[16];
    83 template<bool promote_immediately>
    84 oop PSPromotionManager::copy_to_survivor_space(oop o) {
    85   assert(PSScavenge::should_scavenge(&o), "Sanity");
    87   oop new_obj = NULL;
    89   // NOTE! We must be very careful with any methods that access the mark
    90   // in o. There may be multiple threads racing on it, and it may be forwarded
    91   // at any time. Do not use oop methods for accessing the mark!
    92   markOop test_mark = o->mark();
    94   // The same test as "o->is_forwarded()"
    95   if (!test_mark->is_marked()) {
    96     bool new_obj_is_tenured = false;
    97     size_t new_obj_size = o->size();
    99     if(UseStasticScavenge) {
   100       stastic_scavenge(o);
   101     }
   103     if (!promote_immediately) {
   104       // Find the objects age, MT safe.
   105       uint age = (test_mark->has_displaced_mark_helper() /* o->has_displaced_mark() */) ?
   106         test_mark->displaced_mark_helper()->age() : test_mark->age();
   108       // Try allocating obj in to-space (unless too old)
   109       if (age < PSScavenge::tenuring_threshold()) {
   110         new_obj = (oop) _young_lab.allocate(new_obj_size);
   111         if (new_obj == NULL && !_young_gen_is_full) {
   112           // Do we allocate directly, or flush and refill?
   113           if (new_obj_size > (YoungPLABSize / 2)) {
   114             // Allocate this object directly
   115             new_obj = (oop)young_space()->cas_allocate(new_obj_size);
   116           } else {
   117             // Flush and fill
   118             _young_lab.flush();
   120             HeapWord* lab_base = young_space()->cas_allocate(YoungPLABSize);
   121             if (lab_base != NULL) {
   122               _young_lab.initialize(MemRegion(lab_base, YoungPLABSize));
   123               // Try the young lab allocation again.
   124               new_obj = (oop) _young_lab.allocate(new_obj_size);
   125             } else {
   126               _young_gen_is_full = true;
   127             }
   128           }
   129         }
   130       }
   131     }
   133     // Otherwise try allocating obj tenured
   134     if (new_obj == NULL) {
   135 #ifndef PRODUCT
   136       if (Universe::heap()->promotion_should_fail()) {
   137         return oop_promotion_failed(o, test_mark);
   138       }
   139 #endif  // #ifndef PRODUCT
   141       if(UseOldNUMA) { 
   142 /* 2014/7/7 Liao: Copy objects to the same node of current GC thread */
   143         if(UseNUMAGC) {
   144           new_obj = (oop) _old_lab_oldnuma[os::numa_get_group_id()].allocate(new_obj_size);
   145           new_obj_is_tenured = true;
   147           if (new_obj == NULL) {
   148             if (!_old_gen_is_full) {
   149               // Do we allocate directly, or flush and refill?
   150               if (new_obj_size > (OldPLABSize / 2)) {
   151                 // Allocate this object directly
   152                 new_obj = (oop)old_gen()->cas_allocate(new_obj_size, os::numa_get_group_id());
   153               } else {
   154                 // Flush and fill
   155                 _old_lab_oldnuma[os::numa_get_group_id()].flush();
   157                 HeapWord* lab_base = old_gen()->cas_allocate(OldPLABSize, os::numa_get_group_id());
   158                 if(lab_base != NULL) {
   159                   _old_lab_oldnuma[os::numa_get_group_id()].initialize(MemRegion(lab_base, OldPLABSize));
   160                   // Try the old lab allocation again.
   161                   new_obj = (oop) _old_lab_oldnuma[os::numa_get_group_id()].allocate(new_obj_size);
   162                 }
   163               }
   164             }
   166             // This is the promotion failed test, and code handling.
   167             // The code belongs here for two reasons. It is slightly
   168             // different than the code below, and cannot share the
   169             // CAS testing code. Keeping the code here also minimizes
   170             // the impact on the common case fast path code.
   172             if (new_obj == NULL) {
   173               _old_gen_is_full = true;
   174               return oop_promotion_failed(o, test_mark);
   175             }
   176           }
   177         }
   178         else {
   179           ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
   180           MutableNUMASpace* s = (MutableNUMASpace*) heap->old_gen()->object_space();
   181           int i = s->lgrp_spaces()->length();
   182           int node;
   183           if(i > 1) {
   184             node = node_ex % (i - 1) + 1;
   185             node_ex++;
   186           }
   187           else
   188            node = 0;
   190           new_obj = (oop) _old_lab_oldnuma[node].allocate(new_obj_size);
   191           new_obj_is_tenured = true;
   193           if (new_obj == NULL) {
   194             if (!_old_gen_is_full) {
   195               // Do we allocate directly, or flush and refill?
   196               if (new_obj_size > (OldPLABSize / 2)) {
   197                 // Allocate this object directly
   198                 new_obj = (oop)old_gen()->cas_allocate(new_obj_size, node);
   199               } else {
   200                 // Flush and fill
   201                 _old_lab_oldnuma[node].flush();
   203                 HeapWord* lab_base = old_gen()->cas_allocate(OldPLABSize, node);
   204                 if(lab_base != NULL) {
   205                   _old_lab_oldnuma[node].initialize(MemRegion(lab_base, OldPLABSize));
   206                   // Try the old lab allocation again.
   207                   new_obj = (oop) _old_lab_oldnuma[node].allocate(new_obj_size);
   208                 }
   209               }
   210             }
   212             // This is the promotion failed test, and code handling.
   213             // The code belongs here for two reasons. It is slightly
   214             // different than the code below, and cannot share the
   215             // CAS testing code. Keeping the code here also minimizes
   216             // the impact on the common case fast path code.
   218             if (new_obj == NULL) {
   219               _old_gen_is_full = true;
   220               return oop_promotion_failed(o, test_mark);
   221             }
   222           }
   223         }
   224       }
   225       else {
   226         new_obj = (oop) _old_lab.allocate(new_obj_size);
   227         new_obj_is_tenured = true;
   229         if (new_obj == NULL) {
   230           if (!_old_gen_is_full) {
   231             // Do we allocate directly, or flush and refill?
   232             if (new_obj_size > (OldPLABSize / 2)) {
   233               // Allocate this object directly
   234               new_obj = (oop)old_gen()->cas_allocate(new_obj_size, 0);
   235             } else {
   236               // Flush and fill
   237               _old_lab.flush();
   239               HeapWord* lab_base = old_gen()->cas_allocate(OldPLABSize, 0);
   240               if(lab_base != NULL) {
   241 #ifdef ASSERT
   242                 // Delay the initialization of the promotion lab (plab).
   243                 // This exposes uninitialized plabs to card table processing.
   244                 if (GCWorkerDelayMillis > 0) {
   245                   os::sleep(Thread::current(), GCWorkerDelayMillis, false);
   246                 }
   247 #endif
   248                 _old_lab.initialize(MemRegion(lab_base, OldPLABSize));
   249                 // Try the old lab allocation again.
   250                 new_obj = (oop) _old_lab.allocate(new_obj_size);
   251               }
   252             }
   253           }
   255           // This is the promotion failed test, and code handling.
   256           // The code belongs here for two reasons. It is slightly
   257           // different than the code below, and cannot share the
   258           // CAS testing code. Keeping the code here also minimizes
   259           // the impact on the common case fast path code.
   261           if (new_obj == NULL) {
   262             _old_gen_is_full = true;
   263             return oop_promotion_failed(o, test_mark);
   264           }
   265         }
   266       }
   267     }
   269     assert(new_obj != NULL, "allocation should have succeeded");
   271     TimeStamp before_copy, after_copy;
   273     if(UseStasticCopy) {
   274       before_copy.update();
   275     }
   277     // Copy obj
   278     Copy::aligned_disjoint_words((HeapWord*)o, (HeapWord*)new_obj, new_obj_size);
   280     if(UseStasticCopy) {
   281       after_copy.update();
   282     }
   284     if(UseStasticCopy) {
   285       each_gc_copy_time[os::numa_get_cpu_id()] += after_copy.ticks() - before_copy.ticks();
   286       each_gc_copy_fre[os::numa_get_cpu_id()]++;
   287     }
   289     // Now we have to CAS in the header.
   290     if (o->cas_forward_to(new_obj, test_mark)) {
   291       // We won any races, we "own" this object.
   292       assert(new_obj == o->forwardee(), "Sanity");
   294       // Increment age if obj still in new generation. Now that
   295       // we're dealing with a markOop that cannot change, it is
   296       // okay to use the non mt safe oop methods.
   297       if (!new_obj_is_tenured) {
   298         new_obj->incr_age();
   299         assert(young_space()->contains(new_obj), "Attempt to push non-promoted obj");
   300       }
   302       // Do the size comparison first with new_obj_size, which we
   303       // already have. Hopefully, only a few objects are larger than
   304       // _min_array_size_for_chunking, and most of them will be arrays.
   305       // So, the is->objArray() test would be very infrequent.
   306       if (new_obj_size > _min_array_size_for_chunking &&
   307           new_obj->is_objArray() &&
   308           PSChunkLargeArrays) {
   309         // we'll chunk it
   310         oop* const masked_o = mask_chunked_array_oop(o);
   311         push_depth(masked_o);
   312         TASKQUEUE_STATS_ONLY(++_arrays_chunked; ++_masked_pushes);
   313       } else {
   314         // we'll just push its contents
   315         new_obj->push_contents(this);
   316       }
   317     }  else {
   318       // We lost, someone else "owns" this object
   319       guarantee(o->is_forwarded(), "Object must be forwarded if the cas failed.");
   321       // Try to deallocate the space.  If it was directly allocated we cannot
   322       // deallocate it, so we have to test.  If the deallocation fails,
   323       // overwrite with a filler object.
   324       if (new_obj_is_tenured) {
   325         if(UseOldNUMA) {
   326           ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
   327           MutableNUMASpace* s = (MutableNUMASpace*) heap->old_gen()->object_space();
   328           int i;
   329           for(i = 0; i < s->lgrp_spaces()->length(); i++) {
   330             if (!_old_lab_oldnuma[i].unallocate_object((HeapWord*) new_obj, new_obj_size)) {
   331               CollectedHeap::fill_with_object((HeapWord*) new_obj, new_obj_size);
   332             }
   333           }
   334         }
   335         else {
   336           if (!_old_lab.unallocate_object((HeapWord*) new_obj, new_obj_size)) {
   337             CollectedHeap::fill_with_object((HeapWord*) new_obj, new_obj_size);
   338           }
   339         }
   340       } else if (!_young_lab.unallocate_object((HeapWord*) new_obj, new_obj_size)) {
   341         CollectedHeap::fill_with_object((HeapWord*) new_obj, new_obj_size);
   342       }
   344       // don't update this before the unallocation!
   345       new_obj = o->forwardee();
   346     }
   347   } else {
   348     assert(o->is_forwarded(), "Sanity");
   349     new_obj = o->forwardee();
   350   }
   352 #ifndef PRODUCT
   353   // This code must come after the CAS test, or it will print incorrect
   354   // information.
   355   if (TraceScavenge) {
   356     gclog_or_tty->print_cr("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (%d)}",
   357        PSScavenge::should_scavenge(&new_obj) ? "copying" : "tenuring",
   358        new_obj->klass()->internal_name(), p2i((void *)o), p2i((void *)new_obj), new_obj->size());
   359   }
   360 #endif
   362   return new_obj;
   363 }
   365 inline void PSPromotionManager::process_popped_location_depth(StarTask p) {
   366   if (is_oop_masked(p)) {
   367     assert(PSChunkLargeArrays, "invariant");
   368     oop const old = unmask_chunked_array_oop(p);
   369     process_array_chunk(old);
   370   } else {
   371     if (p.is_narrow()) {
   372       assert(UseCompressedOops, "Error");
   373       PSScavenge::copy_and_push_safe_barrier<narrowOop, /*promote_immediately=*/false>(this, p);
   374     } else {
   375       PSScavenge::copy_and_push_safe_barrier<oop, /*promote_immediately=*/false>(this, p);
   376     }
   377   }
   378 }
   380 #if TASKQUEUE_STATS
   381 void PSPromotionManager::record_steal(StarTask& p) {
   382   if (is_oop_masked(p)) {
   383     ++_masked_steals;
   384   }
   385 }
   386 #endif // TASKQUEUE_STATS
   388 #endif // SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPROMOTIONMANAGER_INLINE_HPP

mercurial