src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp

Thu, 01 Jul 2010 21:40:45 -0700

author
jcoomes
date
Thu, 01 Jul 2010 21:40:45 -0700
changeset 1993
b2a00dd3117c
parent 1907
c18cbe5936b8
child 2020
a93a9eda13f7
permissions
-rw-r--r--

6957084: simplify TaskQueue overflow handling
Reviewed-by: ysr, jmasa

     1 /*
     2  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 //
    26 // psPromotionManager is used by a single thread to manage object survival
    27 // during a scavenge. The promotion manager contains thread local data only.
    28 //
    29 // NOTE! Be carefull when allocating the stacks on cheap. If you are going
    30 // to use a promotion manager in more than one thread, the stacks MUST be
    31 // on cheap. This can lead to memory leaks, though, as they are not auto
    32 // deallocated.
    33 //
    34 // FIX ME FIX ME Add a destructor, and don't rely on the user to drain/flush/deallocate!
    35 //
    37 // Move to some global location
    38 #define HAS_BEEN_MOVED 0x1501d01d
    39 // End move to some global location
    41 class MutableSpace;
    42 class PSOldGen;
    43 class ParCompactionManager;
    45 #define PS_PM_STATS         0
    47 class PSPromotionManager : public CHeapObj {
    48   friend class PSScavenge;
    49   friend class PSRefProcTaskExecutor;
    50  private:
    51   static PSPromotionManager**         _manager_array;
    52   static OopStarTaskQueueSet*         _stack_array_depth;
    53   static OopTaskQueueSet*             _stack_array_breadth;
    54   static PSOldGen*                    _old_gen;
    55   static MutableSpace*                _young_space;
    57 #if PS_PM_STATS
    58   uint                                _total_pushes;
    59   uint                                _masked_pushes;
    61   uint                                _overflow_pushes;
    62   uint                                _max_overflow_length;
    64   uint                                _arrays_chunked;
    65   uint                                _array_chunks_processed;
    67   uint                                _total_steals;
    68   uint                                _masked_steals;
    70   void print_stats(uint i);
    71   static void print_stats();
    72 #endif // PS_PM_STATS
    74   PSYoungPromotionLAB                 _young_lab;
    75   PSOldPromotionLAB                   _old_lab;
    76   bool                                _young_gen_is_full;
    77   bool                                _old_gen_is_full;
    78   PrefetchQueue                       _prefetch_queue;
    80   OopStarTaskQueue                    _claimed_stack_depth;
    81   OverflowTaskQueue<oop>              _claimed_stack_breadth;
    83   bool                                _depth_first;
    84   bool                                _totally_drain;
    85   uint                                _target_stack_size;
    87   uint                                _array_chunk_size;
    88   uint                                _min_array_size_for_chunking;
    90   // Accessors
    91   static PSOldGen* old_gen()         { return _old_gen; }
    92   static MutableSpace* young_space() { return _young_space; }
    94   inline static PSPromotionManager* manager_array(int index);
    95   template <class T> inline void claim_or_forward_internal_depth(T* p);
    96   template <class T> inline void claim_or_forward_internal_breadth(T* p);
    98   // On the task queues we push reference locations as well as
    99   // partially-scanned arrays (in the latter case, we push an oop to
   100   // the from-space image of the array and the length on the
   101   // from-space image indicates how many entries on the array we still
   102   // need to scan; this is basically how ParNew does partial array
   103   // scanning too). To be able to distinguish between reference
   104   // locations and partially-scanned array oops we simply mask the
   105   // latter oops with 0x01. The next three methods do the masking,
   106   // unmasking, and checking whether the oop is masked or not. Notice
   107   // that the signature of the mask and unmask methods looks a bit
   108   // strange, as they accept and return different types (oop and
   109   // oop*). This is because of the difference in types between what
   110   // the task queue holds (oop*) and oops to partially-scanned arrays
   111   // (oop). We do all the necessary casting in the mask / unmask
   112   // methods to avoid sprinkling the rest of the code with more casts.
   114   // These are added to the taskqueue so PS_CHUNKED_ARRAY_OOP_MASK (or any
   115   // future masks) can't conflict with COMPRESSED_OOP_MASK
   116 #define PS_CHUNKED_ARRAY_OOP_MASK  0x2
   118   bool is_oop_masked(StarTask p) {
   119     // If something is marked chunked it's always treated like wide oop*
   120     return (((intptr_t)(oop*)p) & PS_CHUNKED_ARRAY_OOP_MASK) ==
   121                                   PS_CHUNKED_ARRAY_OOP_MASK;
   122   }
   124   oop* mask_chunked_array_oop(oop obj) {
   125     assert(!is_oop_masked((oop*) obj), "invariant");
   126     oop* ret = (oop*) ((uintptr_t)obj | PS_CHUNKED_ARRAY_OOP_MASK);
   127     assert(is_oop_masked(ret), "invariant");
   128     return ret;
   129   }
   131   oop unmask_chunked_array_oop(StarTask p) {
   132     assert(is_oop_masked(p), "invariant");
   133     assert(!p.is_narrow(), "chunked array oops cannot be narrow");
   134     oop *chunk = (oop*)p;  // cast p to oop (uses conversion operator)
   135     oop ret = oop((oop*)((uintptr_t)chunk & ~PS_CHUNKED_ARRAY_OOP_MASK));
   136     assert(!is_oop_masked((oop*) ret), "invariant");
   137     return ret;
   138   }
   140   template <class T> void  process_array_chunk_work(oop obj,
   141                                                     int start, int end);
   142   void process_array_chunk(oop old);
   144   template <class T> void push_depth(T* p) {
   145     assert(depth_first(), "pre-condition");
   147 #if PS_PM_STATS
   148     ++_total_pushes;
   149     int stack_length = claimed_stack_depth()->overflow_stack()->length();
   150 #endif // PS_PM_STATS
   152     claimed_stack_depth()->push(p);
   154 #if PS_PM_STATS
   155     if (claimed_stack_depth()->overflow_stack()->length() != stack_length) {
   156       ++_overflow_pushes;
   157       if ((uint)stack_length + 1 > _max_overflow_length) {
   158         _max_overflow_length = (uint)stack_length + 1;
   159       }
   160     }
   161 #endif // PS_PM_STATS
   162   }
   164   void push_breadth(oop o) {
   165     assert(!depth_first(), "pre-condition");
   167 #if PS_PM_STATS
   168     ++_total_pushes;
   169     int stack_length = claimed_stack_breadth()->overflow_stack()->length();
   170 #endif // PS_PM_STATS
   172     claimed_stack_breadth()->push(o);
   174 #if PS_PM_STATS
   175     if (claimed_stack_breadth()->overflow_stack()->length() != stack_length) {
   176       ++_overflow_pushes;
   177       if ((uint)stack_length + 1 > _max_overflow_length) {
   178         _max_overflow_length = (uint)stack_length + 1;
   179       }
   180     }
   181 #endif // PS_PM_STATS
   182   }
   184  protected:
   185   static OopStarTaskQueueSet* stack_array_depth()   { return _stack_array_depth; }
   186   static OopTaskQueueSet*     stack_array_breadth() { return _stack_array_breadth; }
   188  public:
   189   // Static
   190   static void initialize();
   192   static void pre_scavenge();
   193   static void post_scavenge();
   195   static PSPromotionManager* gc_thread_promotion_manager(int index);
   196   static PSPromotionManager* vm_thread_promotion_manager();
   198   static bool steal_depth(int queue_num, int* seed, StarTask& t) {
   199     return stack_array_depth()->steal(queue_num, seed, t);
   200   }
   202   static bool steal_breadth(int queue_num, int* seed, oop& t) {
   203     return stack_array_breadth()->steal(queue_num, seed, t);
   204   }
   206   PSPromotionManager();
   208   // Accessors
   209   OopStarTaskQueue* claimed_stack_depth() {
   210     return &_claimed_stack_depth;
   211   }
   212   OverflowTaskQueue<oop>* claimed_stack_breadth() {
   213     return &_claimed_stack_breadth;
   214   }
   216   bool young_gen_is_full()             { return _young_gen_is_full; }
   218   bool old_gen_is_full()               { return _old_gen_is_full; }
   219   void set_old_gen_is_full(bool state) { _old_gen_is_full = state; }
   221   // Promotion methods
   222   oop copy_to_survivor_space(oop o, bool depth_first);
   223   oop oop_promotion_failed(oop obj, markOop obj_mark);
   225   void reset();
   227   void flush_labs();
   228   void drain_stacks(bool totally_drain) {
   229     if (depth_first()) {
   230       drain_stacks_depth(totally_drain);
   231     } else {
   232       drain_stacks_breadth(totally_drain);
   233     }
   234   }
   235  public:
   236   void drain_stacks_cond_depth() {
   237     if (claimed_stack_depth()->size() > _target_stack_size) {
   238       drain_stacks_depth(false);
   239     }
   240   }
   241   void drain_stacks_depth(bool totally_drain);
   242   void drain_stacks_breadth(bool totally_drain);
   244   bool depth_first() const {
   245     return _depth_first;
   246   }
   247   bool stacks_empty() {
   248     return depth_first() ?
   249       claimed_stack_depth()->is_empty() :
   250       claimed_stack_breadth()->is_empty();
   251   }
   253   inline void process_popped_location_depth(StarTask p);
   255   inline void flush_prefetch_queue();
   256   template <class T> inline void claim_or_forward_depth(T* p);
   257   template <class T> inline void claim_or_forward_breadth(T* p);
   259 #if PS_PM_STATS
   260   void increment_steals(oop* p = NULL) {
   261     _total_steals += 1;
   262     if (p != NULL && is_oop_masked(p)) {
   263       _masked_steals += 1;
   264     }
   265   }
   266 #endif // PS_PM_STATS
   267 };

mercurial