src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp

changeset 0
f90c822e73f8
child 1
2d8a650513c2
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp	Wed Apr 27 01:25:04 2016 +0800
     1.3 @@ -0,0 +1,208 @@
     1.4 +/*
     1.5 + * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
     1.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     1.7 + *
     1.8 + * This code is free software; you can redistribute it and/or modify it
     1.9 + * under the terms of the GNU General Public License version 2 only, as
    1.10 + * published by the Free Software Foundation.
    1.11 + *
    1.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
    1.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    1.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    1.15 + * version 2 for more details (a copy is included in the LICENSE file that
    1.16 + * accompanied this code).
    1.17 + *
    1.18 + * You should have received a copy of the GNU General Public License version
    1.19 + * 2 along with this work; if not, write to the Free Software Foundation,
    1.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    1.21 + *
    1.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    1.23 + * or visit www.oracle.com if you need additional information or have any
    1.24 + * questions.
    1.25 + *
    1.26 + */
    1.27 +
    1.28 +#ifndef SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPROMOTIONMANAGER_HPP
    1.29 +#define SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPROMOTIONMANAGER_HPP
    1.30 +
    1.31 +#include "gc_implementation/parallelScavenge/psPromotionLAB.hpp"
    1.32 +#include "gc_implementation/shared/gcTrace.hpp"
    1.33 +#include "gc_implementation/shared/copyFailedInfo.hpp"
    1.34 +#include "memory/allocation.hpp"
    1.35 +#include "memory/padded.hpp"
    1.36 +#include "utilities/globalDefinitions.hpp"
    1.37 +#include "utilities/taskqueue.hpp"
    1.38 +
    1.39 +//
    1.40 +// psPromotionManager is used by a single thread to manage object survival
    1.41 +// during a scavenge. The promotion manager contains thread local data only.
    1.42 +//
    1.43 +// NOTE! Be careful when allocating the stacks on cheap. If you are going
    1.44 +// to use a promotion manager in more than one thread, the stacks MUST be
    1.45 +// on cheap. This can lead to memory leaks, though, as they are not auto
    1.46 +// deallocated.
    1.47 +//
    1.48 +// FIX ME FIX ME Add a destructor, and don't rely on the user to drain/flush/deallocate!
    1.49 +//
    1.50 +
    1.51 +// Move to some global location
    1.52 +#define HAS_BEEN_MOVED 0x1501d01d
    1.53 +// End move to some global location
    1.54 +
    1.55 +class MutableSpace;
    1.56 +class PSOldGen;
    1.57 +class ParCompactionManager;
    1.58 +
    1.59 +class PSPromotionManager VALUE_OBJ_CLASS_SPEC {
    1.60 +  friend class PSScavenge;
    1.61 +  friend class PSRefProcTaskExecutor;
    1.62 + private:
    1.63 +  static PaddedEnd<PSPromotionManager>* _manager_array;
    1.64 +  static OopStarTaskQueueSet*           _stack_array_depth;
    1.65 +  static PSOldGen*                      _old_gen;
    1.66 +  static MutableSpace*                  _young_space;
    1.67 +
    1.68 +#if TASKQUEUE_STATS
    1.69 +  size_t                              _masked_pushes;
    1.70 +  size_t                              _masked_steals;
    1.71 +  size_t                              _arrays_chunked;
    1.72 +  size_t                              _array_chunks_processed;
    1.73 +
    1.74 +  void print_taskqueue_stats(uint i) const;
    1.75 +  void print_local_stats(uint i) const;
    1.76 +  static void print_stats();
    1.77 +
    1.78 +  void reset_stats();
    1.79 +#endif // TASKQUEUE_STATS
    1.80 +
    1.81 +  PSYoungPromotionLAB                 _young_lab;
    1.82 +  PSOldPromotionLAB                   _old_lab;
    1.83 +  bool                                _young_gen_is_full;
    1.84 +  bool                                _old_gen_is_full;
    1.85 +
    1.86 +  OopStarTaskQueue                    _claimed_stack_depth;
    1.87 +  OverflowTaskQueue<oop, mtGC>        _claimed_stack_breadth;
    1.88 +
    1.89 +  bool                                _totally_drain;
    1.90 +  uint                                _target_stack_size;
    1.91 +
    1.92 +  uint                                _array_chunk_size;
    1.93 +  uint                                _min_array_size_for_chunking;
    1.94 +
    1.95 +  PromotionFailedInfo                 _promotion_failed_info;
    1.96 +
    1.97 +  // Accessors
    1.98 +  static PSOldGen* old_gen()         { return _old_gen; }
    1.99 +  static MutableSpace* young_space() { return _young_space; }
   1.100 +
   1.101 +  inline static PSPromotionManager* manager_array(int index);
   1.102 +  template <class T> inline void claim_or_forward_internal_depth(T* p);
   1.103 +
   1.104 +  // On the task queues we push reference locations as well as
   1.105 +  // partially-scanned arrays (in the latter case, we push an oop to
   1.106 +  // the from-space image of the array and the length on the
   1.107 +  // from-space image indicates how many entries on the array we still
   1.108 +  // need to scan; this is basically how ParNew does partial array
   1.109 +  // scanning too). To be able to distinguish between reference
   1.110 +  // locations and partially-scanned array oops we simply mask the
   1.111 +  // latter oops with 0x01. The next three methods do the masking,
   1.112 +  // unmasking, and checking whether the oop is masked or not. Notice
   1.113 +  // that the signature of the mask and unmask methods looks a bit
   1.114 +  // strange, as they accept and return different types (oop and
   1.115 +  // oop*). This is because of the difference in types between what
   1.116 +  // the task queue holds (oop*) and oops to partially-scanned arrays
   1.117 +  // (oop). We do all the necessary casting in the mask / unmask
   1.118 +  // methods to avoid sprinkling the rest of the code with more casts.
   1.119 +
   1.120 +  // These are added to the taskqueue so PS_CHUNKED_ARRAY_OOP_MASK (or any
   1.121 +  // future masks) can't conflict with COMPRESSED_OOP_MASK
   1.122 +#define PS_CHUNKED_ARRAY_OOP_MASK  0x2
   1.123 +
   1.124 +  bool is_oop_masked(StarTask p) {
   1.125 +    // If something is marked chunked it's always treated like wide oop*
   1.126 +    return (((intptr_t)(oop*)p) & PS_CHUNKED_ARRAY_OOP_MASK) ==
   1.127 +                                  PS_CHUNKED_ARRAY_OOP_MASK;
   1.128 +  }
   1.129 +
   1.130 +  oop* mask_chunked_array_oop(oop obj) {
   1.131 +    assert(!is_oop_masked((oop*) obj), "invariant");
   1.132 +    oop* ret = (oop*) (cast_from_oop<uintptr_t>(obj) | PS_CHUNKED_ARRAY_OOP_MASK);
   1.133 +    assert(is_oop_masked(ret), "invariant");
   1.134 +    return ret;
   1.135 +  }
   1.136 +
   1.137 +  oop unmask_chunked_array_oop(StarTask p) {
   1.138 +    assert(is_oop_masked(p), "invariant");
   1.139 +    assert(!p.is_narrow(), "chunked array oops cannot be narrow");
   1.140 +    oop *chunk = (oop*)p;  // cast p to oop (uses conversion operator)
   1.141 +    oop ret = oop((oop*)((uintptr_t)chunk & ~PS_CHUNKED_ARRAY_OOP_MASK));
   1.142 +    assert(!is_oop_masked((oop*) ret), "invariant");
   1.143 +    return ret;
   1.144 +  }
   1.145 +
   1.146 +  template <class T> void  process_array_chunk_work(oop obj,
   1.147 +                                                    int start, int end);
   1.148 +  void process_array_chunk(oop old);
   1.149 +
   1.150 +  template <class T> void push_depth(T* p) {
   1.151 +    claimed_stack_depth()->push(p);
   1.152 +  }
   1.153 +
   1.154 + protected:
   1.155 +  static OopStarTaskQueueSet* stack_array_depth()   { return _stack_array_depth; }
   1.156 + public:
   1.157 +  // Static
   1.158 +  static void initialize();
   1.159 +
   1.160 +  static void pre_scavenge();
   1.161 +  static bool post_scavenge(YoungGCTracer& gc_tracer);
   1.162 +
   1.163 +  static PSPromotionManager* gc_thread_promotion_manager(int index);
   1.164 +  static PSPromotionManager* vm_thread_promotion_manager();
   1.165 +
   1.166 +  static bool steal_depth(int queue_num, int* seed, StarTask& t) {
   1.167 +    return stack_array_depth()->steal(queue_num, seed, t);
   1.168 +  }
   1.169 +
   1.170 +  PSPromotionManager();
   1.171 +
   1.172 +  // Accessors
   1.173 +  OopStarTaskQueue* claimed_stack_depth() {
   1.174 +    return &_claimed_stack_depth;
   1.175 +  }
   1.176 +
   1.177 +  bool young_gen_is_full()             { return _young_gen_is_full; }
   1.178 +
   1.179 +  bool old_gen_is_full()               { return _old_gen_is_full; }
   1.180 +  void set_old_gen_is_full(bool state) { _old_gen_is_full = state; }
   1.181 +
   1.182 +  // Promotion methods
   1.183 +  template<bool promote_immediately> oop copy_to_survivor_space(oop o);
   1.184 +  oop oop_promotion_failed(oop obj, markOop obj_mark);
   1.185 +
   1.186 +  void reset();
   1.187 +
   1.188 +  void flush_labs();
   1.189 +  void drain_stacks(bool totally_drain) {
   1.190 +    drain_stacks_depth(totally_drain);
   1.191 +  }
   1.192 + public:
   1.193 +  void drain_stacks_cond_depth() {
   1.194 +    if (claimed_stack_depth()->size() > _target_stack_size) {
   1.195 +      drain_stacks_depth(false);
   1.196 +    }
   1.197 +  }
   1.198 +  void drain_stacks_depth(bool totally_drain);
   1.199 +
   1.200 +  bool stacks_empty() {
   1.201 +    return claimed_stack_depth()->is_empty();
   1.202 +  }
   1.203 +
   1.204 +  inline void process_popped_location_depth(StarTask p);
   1.205 +
   1.206 +  template <class T> inline void claim_or_forward_depth(T* p);
   1.207 +
   1.208 +  TASKQUEUE_STATS_ONLY(inline void record_steal(StarTask& p);)
   1.209 +};
   1.210 +
   1.211 +#endif // SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPROMOTIONMANAGER_HPP

mercurial