src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp

Thu, 22 Sep 2011 10:57:37 -0700

author
johnc
date
Thu, 22 Sep 2011 10:57:37 -0700
changeset 3175
4dfb2df418f2
parent 2314
f95d63e2154a
child 3536
95f6641e38e0
permissions
-rw-r--r--

6484982: G1: process references during evacuation pauses
Summary: G1 now uses two reference processors - one is used by concurrent marking and the other is used by STW GCs (both full and incremental evacuation pauses). In an evacuation pause, the reference processor is embedded into the closures used to scan objects. Doing so causes causes reference objects to be 'discovered' by the reference processor. At the end of the evacuation pause, these discovered reference objects are processed - preserving (and copying) referent objects (and their reachable graphs) as appropriate.
Reviewed-by: ysr, jwilhelm, brutisso, stefank, tonyp

     1 /*
     2  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #ifndef SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPROMOTIONMANAGER_HPP
    26 #define SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPROMOTIONMANAGER_HPP
    28 #include "gc_implementation/parallelScavenge/psPromotionLAB.hpp"
    29 #include "memory/allocation.hpp"
    30 #include "utilities/taskqueue.hpp"
    32 //
    33 // psPromotionManager is used by a single thread to manage object survival
    34 // during a scavenge. The promotion manager contains thread local data only.
    35 //
    36 // NOTE! Be carefull when allocating the stacks on cheap. If you are going
    37 // to use a promotion manager in more than one thread, the stacks MUST be
    38 // on cheap. This can lead to memory leaks, though, as they are not auto
    39 // deallocated.
    40 //
    41 // FIX ME FIX ME Add a destructor, and don't rely on the user to drain/flush/deallocate!
    42 //
    44 // Move to some global location
    45 #define HAS_BEEN_MOVED 0x1501d01d
    46 // End move to some global location
    48 class MutableSpace;
    49 class PSOldGen;
    50 class ParCompactionManager;
    52 class PSPromotionManager : public CHeapObj {
    53   friend class PSScavenge;
    54   friend class PSRefProcTaskExecutor;
    55  private:
    56   static PSPromotionManager**         _manager_array;
    57   static OopStarTaskQueueSet*         _stack_array_depth;
    58   static PSOldGen*                    _old_gen;
    59   static MutableSpace*                _young_space;
    61 #if TASKQUEUE_STATS
    62   size_t                              _masked_pushes;
    63   size_t                              _masked_steals;
    64   size_t                              _arrays_chunked;
    65   size_t                              _array_chunks_processed;
    67   void print_taskqueue_stats(uint i) const;
    68   void print_local_stats(uint i) const;
    69   static void print_stats();
    71   void reset_stats();
    72 #endif // TASKQUEUE_STATS
    74   PSYoungPromotionLAB                 _young_lab;
    75   PSOldPromotionLAB                   _old_lab;
    76   bool                                _young_gen_is_full;
    77   bool                                _old_gen_is_full;
    79   OopStarTaskQueue                    _claimed_stack_depth;
    80   OverflowTaskQueue<oop>              _claimed_stack_breadth;
    82   bool                                _totally_drain;
    83   uint                                _target_stack_size;
    85   uint                                _array_chunk_size;
    86   uint                                _min_array_size_for_chunking;
    88   // Accessors
    89   static PSOldGen* old_gen()         { return _old_gen; }
    90   static MutableSpace* young_space() { return _young_space; }
    92   inline static PSPromotionManager* manager_array(int index);
    93   template <class T> inline void claim_or_forward_internal_depth(T* p);
    95   // On the task queues we push reference locations as well as
    96   // partially-scanned arrays (in the latter case, we push an oop to
    97   // the from-space image of the array and the length on the
    98   // from-space image indicates how many entries on the array we still
    99   // need to scan; this is basically how ParNew does partial array
   100   // scanning too). To be able to distinguish between reference
   101   // locations and partially-scanned array oops we simply mask the
   102   // latter oops with 0x01. The next three methods do the masking,
   103   // unmasking, and checking whether the oop is masked or not. Notice
   104   // that the signature of the mask and unmask methods looks a bit
   105   // strange, as they accept and return different types (oop and
   106   // oop*). This is because of the difference in types between what
   107   // the task queue holds (oop*) and oops to partially-scanned arrays
   108   // (oop). We do all the necessary casting in the mask / unmask
   109   // methods to avoid sprinkling the rest of the code with more casts.
   111   // These are added to the taskqueue so PS_CHUNKED_ARRAY_OOP_MASK (or any
   112   // future masks) can't conflict with COMPRESSED_OOP_MASK
   113 #define PS_CHUNKED_ARRAY_OOP_MASK  0x2
   115   bool is_oop_masked(StarTask p) {
   116     // If something is marked chunked it's always treated like wide oop*
   117     return (((intptr_t)(oop*)p) & PS_CHUNKED_ARRAY_OOP_MASK) ==
   118                                   PS_CHUNKED_ARRAY_OOP_MASK;
   119   }
   121   oop* mask_chunked_array_oop(oop obj) {
   122     assert(!is_oop_masked((oop*) obj), "invariant");
   123     oop* ret = (oop*) ((uintptr_t)obj | PS_CHUNKED_ARRAY_OOP_MASK);
   124     assert(is_oop_masked(ret), "invariant");
   125     return ret;
   126   }
   128   oop unmask_chunked_array_oop(StarTask p) {
   129     assert(is_oop_masked(p), "invariant");
   130     assert(!p.is_narrow(), "chunked array oops cannot be narrow");
   131     oop *chunk = (oop*)p;  // cast p to oop (uses conversion operator)
   132     oop ret = oop((oop*)((uintptr_t)chunk & ~PS_CHUNKED_ARRAY_OOP_MASK));
   133     assert(!is_oop_masked((oop*) ret), "invariant");
   134     return ret;
   135   }
   137   template <class T> void  process_array_chunk_work(oop obj,
   138                                                     int start, int end);
   139   void process_array_chunk(oop old);
   141   template <class T> void push_depth(T* p) {
   142     claimed_stack_depth()->push(p);
   143   }
   145  protected:
   146   static OopStarTaskQueueSet* stack_array_depth()   { return _stack_array_depth; }
   147  public:
   148   // Static
   149   static void initialize();
   151   static void pre_scavenge();
   152   static void post_scavenge();
   154   static PSPromotionManager* gc_thread_promotion_manager(int index);
   155   static PSPromotionManager* vm_thread_promotion_manager();
   157   static bool steal_depth(int queue_num, int* seed, StarTask& t) {
   158     return stack_array_depth()->steal(queue_num, seed, t);
   159   }
   161   PSPromotionManager();
   163   // Accessors
   164   OopStarTaskQueue* claimed_stack_depth() {
   165     return &_claimed_stack_depth;
   166   }
   168   bool young_gen_is_full()             { return _young_gen_is_full; }
   170   bool old_gen_is_full()               { return _old_gen_is_full; }
   171   void set_old_gen_is_full(bool state) { _old_gen_is_full = state; }
   173   // Promotion methods
   174   oop copy_to_survivor_space(oop o);
   175   oop oop_promotion_failed(oop obj, markOop obj_mark);
   177   void reset();
   179   void flush_labs();
   180   void drain_stacks(bool totally_drain) {
   181     drain_stacks_depth(totally_drain);
   182   }
   183  public:
   184   void drain_stacks_cond_depth() {
   185     if (claimed_stack_depth()->size() > _target_stack_size) {
   186       drain_stacks_depth(false);
   187     }
   188   }
   189   void drain_stacks_depth(bool totally_drain);
   191   bool stacks_empty() {
   192     return claimed_stack_depth()->is_empty();
   193   }
   195   inline void process_popped_location_depth(StarTask p);
   197   template <class T> inline void claim_or_forward_depth(T* p);
   199   TASKQUEUE_STATS_ONLY(inline void record_steal(StarTask& p);)
   200 };
   202 #endif // SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPROMOTIONMANAGER_HPP

mercurial