src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp

Thu, 22 Sep 2011 10:57:37 -0700

author
johnc
date
Thu, 22 Sep 2011 10:57:37 -0700
changeset 3175
4dfb2df418f2
parent 2314
f95d63e2154a
child 3536
95f6641e38e0
permissions
-rw-r--r--

6484982: G1: process references during evacuation pauses
Summary: G1 now uses two reference processors - one is used by concurrent marking and the other is used by STW GCs (both full and incremental evacuation pauses). In an evacuation pause, the reference processor is embedded into the closures used to scan objects. Doing so causes causes reference objects to be 'discovered' by the reference processor. At the end of the evacuation pause, these discovered reference objects are processed - preserving (and copying) referent objects (and their reachable graphs) as appropriate.
Reviewed-by: ysr, jwilhelm, brutisso, stefank, tonyp

duke@435 1 /*
jcoomes@1993 2 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
duke@435 22 *
duke@435 23 */
duke@435 24
stefank@2314 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPROMOTIONMANAGER_HPP
stefank@2314 26 #define SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPROMOTIONMANAGER_HPP
stefank@2314 27
stefank@2314 28 #include "gc_implementation/parallelScavenge/psPromotionLAB.hpp"
stefank@2314 29 #include "memory/allocation.hpp"
stefank@2314 30 #include "utilities/taskqueue.hpp"
stefank@2314 31
duke@435 32 //
duke@435 33 // psPromotionManager is used by a single thread to manage object survival
duke@435 34 // during a scavenge. The promotion manager contains thread local data only.
duke@435 35 //
duke@435 36 // NOTE! Be carefull when allocating the stacks on cheap. If you are going
duke@435 37 // to use a promotion manager in more than one thread, the stacks MUST be
duke@435 38 // on cheap. This can lead to memory leaks, though, as they are not auto
duke@435 39 // deallocated.
duke@435 40 //
duke@435 41 // FIX ME FIX ME Add a destructor, and don't rely on the user to drain/flush/deallocate!
duke@435 42 //
duke@435 43
duke@435 44 // Move to some global location
duke@435 45 #define HAS_BEEN_MOVED 0x1501d01d
duke@435 46 // End move to some global location
duke@435 47
duke@435 48 class MutableSpace;
duke@435 49 class PSOldGen;
duke@435 50 class ParCompactionManager;
duke@435 51
duke@435 52 class PSPromotionManager : public CHeapObj {
duke@435 53 friend class PSScavenge;
duke@435 54 friend class PSRefProcTaskExecutor;
duke@435 55 private:
duke@435 56 static PSPromotionManager** _manager_array;
duke@435 57 static OopStarTaskQueueSet* _stack_array_depth;
duke@435 58 static PSOldGen* _old_gen;
duke@435 59 static MutableSpace* _young_space;
duke@435 60
jcoomes@2020 61 #if TASKQUEUE_STATS
jcoomes@2020 62 size_t _masked_pushes;
jcoomes@2020 63 size_t _masked_steals;
jcoomes@2020 64 size_t _arrays_chunked;
jcoomes@2020 65 size_t _array_chunks_processed;
duke@435 66
jcoomes@2020 67 void print_taskqueue_stats(uint i) const;
jcoomes@2020 68 void print_local_stats(uint i) const;
jcoomes@2020 69 static void print_stats();
duke@435 70
jcoomes@2020 71 void reset_stats();
jcoomes@2020 72 #endif // TASKQUEUE_STATS
duke@435 73
duke@435 74 PSYoungPromotionLAB _young_lab;
duke@435 75 PSOldPromotionLAB _old_lab;
duke@435 76 bool _young_gen_is_full;
duke@435 77 bool _old_gen_is_full;
duke@435 78
duke@435 79 OopStarTaskQueue _claimed_stack_depth;
jcoomes@1993 80 OverflowTaskQueue<oop> _claimed_stack_breadth;
duke@435 81
duke@435 82 bool _totally_drain;
duke@435 83 uint _target_stack_size;
duke@435 84
duke@435 85 uint _array_chunk_size;
duke@435 86 uint _min_array_size_for_chunking;
duke@435 87
duke@435 88 // Accessors
coleenp@548 89 static PSOldGen* old_gen() { return _old_gen; }
coleenp@548 90 static MutableSpace* young_space() { return _young_space; }
duke@435 91
duke@435 92 inline static PSPromotionManager* manager_array(int index);
coleenp@548 93 template <class T> inline void claim_or_forward_internal_depth(T* p);
duke@435 94
duke@435 95 // On the task queues we push reference locations as well as
duke@435 96 // partially-scanned arrays (in the latter case, we push an oop to
duke@435 97 // the from-space image of the array and the length on the
duke@435 98 // from-space image indicates how many entries on the array we still
duke@435 99 // need to scan; this is basically how ParNew does partial array
duke@435 100 // scanning too). To be able to distinguish between reference
duke@435 101 // locations and partially-scanned array oops we simply mask the
duke@435 102 // latter oops with 0x01. The next three methods do the masking,
duke@435 103 // unmasking, and checking whether the oop is masked or not. Notice
duke@435 104 // that the signature of the mask and unmask methods looks a bit
duke@435 105 // strange, as they accept and return different types (oop and
duke@435 106 // oop*). This is because of the difference in types between what
duke@435 107 // the task queue holds (oop*) and oops to partially-scanned arrays
duke@435 108 // (oop). We do all the necessary casting in the mask / unmask
duke@435 109 // methods to avoid sprinkling the rest of the code with more casts.
duke@435 110
coleenp@548 111 // These are added to the taskqueue so PS_CHUNKED_ARRAY_OOP_MASK (or any
coleenp@548 112 // future masks) can't conflict with COMPRESSED_OOP_MASK
coleenp@548 113 #define PS_CHUNKED_ARRAY_OOP_MASK 0x2
coleenp@548 114
coleenp@548 115 bool is_oop_masked(StarTask p) {
coleenp@548 116 // If something is marked chunked it's always treated like wide oop*
coleenp@548 117 return (((intptr_t)(oop*)p) & PS_CHUNKED_ARRAY_OOP_MASK) ==
coleenp@548 118 PS_CHUNKED_ARRAY_OOP_MASK;
duke@435 119 }
duke@435 120
duke@435 121 oop* mask_chunked_array_oop(oop obj) {
duke@435 122 assert(!is_oop_masked((oop*) obj), "invariant");
coleenp@548 123 oop* ret = (oop*) ((uintptr_t)obj | PS_CHUNKED_ARRAY_OOP_MASK);
duke@435 124 assert(is_oop_masked(ret), "invariant");
duke@435 125 return ret;
duke@435 126 }
duke@435 127
coleenp@548 128 oop unmask_chunked_array_oop(StarTask p) {
duke@435 129 assert(is_oop_masked(p), "invariant");
coleenp@548 130 assert(!p.is_narrow(), "chunked array oops cannot be narrow");
coleenp@548 131 oop *chunk = (oop*)p; // cast p to oop (uses conversion operator)
coleenp@548 132 oop ret = oop((oop*)((uintptr_t)chunk & ~PS_CHUNKED_ARRAY_OOP_MASK));
duke@435 133 assert(!is_oop_masked((oop*) ret), "invariant");
duke@435 134 return ret;
duke@435 135 }
duke@435 136
coleenp@548 137 template <class T> void process_array_chunk_work(oop obj,
coleenp@548 138 int start, int end);
duke@435 139 void process_array_chunk(oop old);
duke@435 140
coleenp@548 141 template <class T> void push_depth(T* p) {
jcoomes@1993 142 claimed_stack_depth()->push(p);
duke@435 143 }
duke@435 144
duke@435 145 protected:
coleenp@548 146 static OopStarTaskQueueSet* stack_array_depth() { return _stack_array_depth; }
duke@435 147 public:
duke@435 148 // Static
duke@435 149 static void initialize();
duke@435 150
duke@435 151 static void pre_scavenge();
duke@435 152 static void post_scavenge();
duke@435 153
duke@435 154 static PSPromotionManager* gc_thread_promotion_manager(int index);
duke@435 155 static PSPromotionManager* vm_thread_promotion_manager();
duke@435 156
duke@435 157 static bool steal_depth(int queue_num, int* seed, StarTask& t) {
duke@435 158 return stack_array_depth()->steal(queue_num, seed, t);
duke@435 159 }
duke@435 160
duke@435 161 PSPromotionManager();
duke@435 162
duke@435 163 // Accessors
duke@435 164 OopStarTaskQueue* claimed_stack_depth() {
duke@435 165 return &_claimed_stack_depth;
duke@435 166 }
duke@435 167
duke@435 168 bool young_gen_is_full() { return _young_gen_is_full; }
duke@435 169
duke@435 170 bool old_gen_is_full() { return _old_gen_is_full; }
duke@435 171 void set_old_gen_is_full(bool state) { _old_gen_is_full = state; }
duke@435 172
duke@435 173 // Promotion methods
tonyp@2061 174 oop copy_to_survivor_space(oop o);
duke@435 175 oop oop_promotion_failed(oop obj, markOop obj_mark);
duke@435 176
duke@435 177 void reset();
duke@435 178
duke@435 179 void flush_labs();
duke@435 180 void drain_stacks(bool totally_drain) {
tonyp@2061 181 drain_stacks_depth(totally_drain);
duke@435 182 }
coleenp@548 183 public:
duke@435 184 void drain_stacks_cond_depth() {
duke@435 185 if (claimed_stack_depth()->size() > _target_stack_size) {
duke@435 186 drain_stacks_depth(false);
duke@435 187 }
duke@435 188 }
duke@435 189 void drain_stacks_depth(bool totally_drain);
duke@435 190
duke@435 191 bool stacks_empty() {
tonyp@2061 192 return claimed_stack_depth()->is_empty();
duke@435 193 }
duke@435 194
coleenp@548 195 inline void process_popped_location_depth(StarTask p);
duke@435 196
coleenp@548 197 template <class T> inline void claim_or_forward_depth(T* p);
duke@435 198
jcoomes@2020 199 TASKQUEUE_STATS_ONLY(inline void record_steal(StarTask& p);)
duke@435 200 };
stefank@2314 201
stefank@2314 202 #endif // SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPROMOTIONMANAGER_HPP

mercurial