src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp

Fri, 28 Mar 2008 23:35:42 -0700

author
jcoomes
date
Fri, 28 Mar 2008 23:35:42 -0700
changeset 514
82db0859acbe
parent 435
a61af66fc99e
child 548
ba764ed4b6f2
permissions
-rw-r--r--

6642862: Code cache allocation fails with large pages after 6588638
Reviewed-by: apetrusenko

duke@435 1 /*
duke@435 2 * Copyright 2002-2007 Sun Microsystems, Inc. All Rights Reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
duke@435 19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
duke@435 20 * CA 95054 USA or visit www.sun.com if you need additional information or
duke@435 21 * have any questions.
duke@435 22 *
duke@435 23 */
duke@435 24
duke@435 25 //
duke@435 26 // psPromotionManager is used by a single thread to manage object survival
duke@435 27 // during a scavenge. The promotion manager contains thread local data only.
duke@435 28 //
duke@435 29 // NOTE! Be carefull when allocating the stacks on cheap. If you are going
duke@435 30 // to use a promotion manager in more than one thread, the stacks MUST be
duke@435 31 // on cheap. This can lead to memory leaks, though, as they are not auto
duke@435 32 // deallocated.
duke@435 33 //
duke@435 34 // FIX ME FIX ME Add a destructor, and don't rely on the user to drain/flush/deallocate!
duke@435 35 //
duke@435 36
duke@435 37 // Move to some global location
duke@435 38 #define HAS_BEEN_MOVED 0x1501d01d
duke@435 39 // End move to some global location
duke@435 40
duke@435 41 class MutableSpace;
duke@435 42 class PSOldGen;
duke@435 43 class ParCompactionManager;
duke@435 44
duke@435 45 #define PS_CHUNKED_ARRAY_OOP_MASK 1
duke@435 46
duke@435 47 #define PS_PM_STATS 0
duke@435 48
duke@435 49 class PSPromotionManager : public CHeapObj {
duke@435 50 friend class PSScavenge;
duke@435 51 friend class PSRefProcTaskExecutor;
duke@435 52 private:
duke@435 53 static PSPromotionManager** _manager_array;
duke@435 54 static OopStarTaskQueueSet* _stack_array_depth;
duke@435 55 static OopTaskQueueSet* _stack_array_breadth;
duke@435 56 static PSOldGen* _old_gen;
duke@435 57 static MutableSpace* _young_space;
duke@435 58
duke@435 59 #if PS_PM_STATS
duke@435 60 uint _total_pushes;
duke@435 61 uint _masked_pushes;
duke@435 62
duke@435 63 uint _overflow_pushes;
duke@435 64 uint _max_overflow_length;
duke@435 65
duke@435 66 uint _arrays_chunked;
duke@435 67 uint _array_chunks_processed;
duke@435 68
duke@435 69 uint _total_steals;
duke@435 70 uint _masked_steals;
duke@435 71
duke@435 72 void print_stats(uint i);
duke@435 73 static void print_stats();
duke@435 74 #endif // PS_PM_STATS
duke@435 75
duke@435 76 PSYoungPromotionLAB _young_lab;
duke@435 77 PSOldPromotionLAB _old_lab;
duke@435 78 bool _young_gen_is_full;
duke@435 79 bool _old_gen_is_full;
duke@435 80 PrefetchQueue _prefetch_queue;
duke@435 81
duke@435 82 OopStarTaskQueue _claimed_stack_depth;
duke@435 83 GrowableArray<oop*>* _overflow_stack_depth;
duke@435 84 OopTaskQueue _claimed_stack_breadth;
duke@435 85 GrowableArray<oop>* _overflow_stack_breadth;
duke@435 86
duke@435 87 bool _depth_first;
duke@435 88 bool _totally_drain;
duke@435 89 uint _target_stack_size;
duke@435 90
duke@435 91 uint _array_chunk_size;
duke@435 92 uint _min_array_size_for_chunking;
duke@435 93
duke@435 94 // Accessors
duke@435 95 static PSOldGen* old_gen() { return _old_gen; }
duke@435 96 static MutableSpace* young_space() { return _young_space; }
duke@435 97
duke@435 98 inline static PSPromotionManager* manager_array(int index);
duke@435 99
duke@435 100 GrowableArray<oop*>* overflow_stack_depth() { return _overflow_stack_depth; }
duke@435 101 GrowableArray<oop>* overflow_stack_breadth() { return _overflow_stack_breadth; }
duke@435 102
duke@435 103 // On the task queues we push reference locations as well as
duke@435 104 // partially-scanned arrays (in the latter case, we push an oop to
duke@435 105 // the from-space image of the array and the length on the
duke@435 106 // from-space image indicates how many entries on the array we still
duke@435 107 // need to scan; this is basically how ParNew does partial array
duke@435 108 // scanning too). To be able to distinguish between reference
duke@435 109 // locations and partially-scanned array oops we simply mask the
duke@435 110 // latter oops with 0x01. The next three methods do the masking,
duke@435 111 // unmasking, and checking whether the oop is masked or not. Notice
duke@435 112 // that the signature of the mask and unmask methods looks a bit
duke@435 113 // strange, as they accept and return different types (oop and
duke@435 114 // oop*). This is because of the difference in types between what
duke@435 115 // the task queue holds (oop*) and oops to partially-scanned arrays
duke@435 116 // (oop). We do all the necessary casting in the mask / unmask
duke@435 117 // methods to avoid sprinkling the rest of the code with more casts.
duke@435 118
duke@435 119 bool is_oop_masked(oop* p) {
duke@435 120 return ((intptr_t) p & PS_CHUNKED_ARRAY_OOP_MASK) == PS_CHUNKED_ARRAY_OOP_MASK;
duke@435 121 }
duke@435 122
duke@435 123 oop* mask_chunked_array_oop(oop obj) {
duke@435 124 assert(!is_oop_masked((oop*) obj), "invariant");
duke@435 125 oop* ret = (oop*) ((intptr_t) obj | PS_CHUNKED_ARRAY_OOP_MASK);
duke@435 126 assert(is_oop_masked(ret), "invariant");
duke@435 127 return ret;
duke@435 128 }
duke@435 129
duke@435 130 oop unmask_chunked_array_oop(oop* p) {
duke@435 131 assert(is_oop_masked(p), "invariant");
duke@435 132 oop ret = oop((intptr_t) p & ~PS_CHUNKED_ARRAY_OOP_MASK);
duke@435 133 assert(!is_oop_masked((oop*) ret), "invariant");
duke@435 134 return ret;
duke@435 135 }
duke@435 136
duke@435 137 void process_array_chunk(oop old);
duke@435 138
duke@435 139 void push_depth(oop* p) {
duke@435 140 assert(depth_first(), "pre-condition");
duke@435 141
duke@435 142 #if PS_PM_STATS
duke@435 143 ++_total_pushes;
duke@435 144 #endif // PS_PM_STATS
duke@435 145
duke@435 146 if (!claimed_stack_depth()->push(p)) {
duke@435 147 overflow_stack_depth()->push(p);
duke@435 148 #if PS_PM_STATS
duke@435 149 ++_overflow_pushes;
duke@435 150 uint stack_length = (uint) overflow_stack_depth()->length();
duke@435 151 if (stack_length > _max_overflow_length) {
duke@435 152 _max_overflow_length = stack_length;
duke@435 153 }
duke@435 154 #endif // PS_PM_STATS
duke@435 155 }
duke@435 156 }
duke@435 157
duke@435 158 void push_breadth(oop o) {
duke@435 159 assert(!depth_first(), "pre-condition");
duke@435 160
duke@435 161 #if PS_PM_STATS
duke@435 162 ++_total_pushes;
duke@435 163 #endif // PS_PM_STATS
duke@435 164
duke@435 165 if(!claimed_stack_breadth()->push(o)) {
duke@435 166 overflow_stack_breadth()->push(o);
duke@435 167 #if PS_PM_STATS
duke@435 168 ++_overflow_pushes;
duke@435 169 uint stack_length = (uint) overflow_stack_breadth()->length();
duke@435 170 if (stack_length > _max_overflow_length) {
duke@435 171 _max_overflow_length = stack_length;
duke@435 172 }
duke@435 173 #endif // PS_PM_STATS
duke@435 174 }
duke@435 175 }
duke@435 176
duke@435 177 protected:
duke@435 178 static OopStarTaskQueueSet* stack_array_depth() { return _stack_array_depth; }
duke@435 179 static OopTaskQueueSet* stack_array_breadth() { return _stack_array_breadth; }
duke@435 180
duke@435 181 public:
duke@435 182 // Static
duke@435 183 static void initialize();
duke@435 184
duke@435 185 static void pre_scavenge();
duke@435 186 static void post_scavenge();
duke@435 187
duke@435 188 static PSPromotionManager* gc_thread_promotion_manager(int index);
duke@435 189 static PSPromotionManager* vm_thread_promotion_manager();
duke@435 190
duke@435 191 static bool steal_depth(int queue_num, int* seed, StarTask& t) {
duke@435 192 assert(stack_array_depth() != NULL, "invariant");
duke@435 193 return stack_array_depth()->steal(queue_num, seed, t);
duke@435 194 }
duke@435 195
duke@435 196 static bool steal_breadth(int queue_num, int* seed, Task& t) {
duke@435 197 assert(stack_array_breadth() != NULL, "invariant");
duke@435 198 return stack_array_breadth()->steal(queue_num, seed, t);
duke@435 199 }
duke@435 200
duke@435 201 PSPromotionManager();
duke@435 202
duke@435 203 // Accessors
duke@435 204 OopStarTaskQueue* claimed_stack_depth() {
duke@435 205 return &_claimed_stack_depth;
duke@435 206 }
duke@435 207 OopTaskQueue* claimed_stack_breadth() {
duke@435 208 return &_claimed_stack_breadth;
duke@435 209 }
duke@435 210
duke@435 211 bool young_gen_is_full() { return _young_gen_is_full; }
duke@435 212
duke@435 213 bool old_gen_is_full() { return _old_gen_is_full; }
duke@435 214 void set_old_gen_is_full(bool state) { _old_gen_is_full = state; }
duke@435 215
duke@435 216 // Promotion methods
duke@435 217 oop copy_to_survivor_space(oop o, bool depth_first);
duke@435 218 oop oop_promotion_failed(oop obj, markOop obj_mark);
duke@435 219
duke@435 220 void reset();
duke@435 221
duke@435 222 void flush_labs();
duke@435 223 void drain_stacks(bool totally_drain) {
duke@435 224 if (depth_first()) {
duke@435 225 drain_stacks_depth(totally_drain);
duke@435 226 } else {
duke@435 227 drain_stacks_breadth(totally_drain);
duke@435 228 }
duke@435 229 }
duke@435 230 void drain_stacks_cond_depth() {
duke@435 231 if (claimed_stack_depth()->size() > _target_stack_size) {
duke@435 232 drain_stacks_depth(false);
duke@435 233 }
duke@435 234 }
duke@435 235 void drain_stacks_depth(bool totally_drain);
duke@435 236 void drain_stacks_breadth(bool totally_drain);
duke@435 237
duke@435 238 bool claimed_stack_empty() {
duke@435 239 if (depth_first()) {
duke@435 240 return claimed_stack_depth()->size() <= 0;
duke@435 241 } else {
duke@435 242 return claimed_stack_breadth()->size() <= 0;
duke@435 243 }
duke@435 244 }
duke@435 245 bool overflow_stack_empty() {
duke@435 246 if (depth_first()) {
duke@435 247 return overflow_stack_depth()->length() <= 0;
duke@435 248 } else {
duke@435 249 return overflow_stack_breadth()->length() <= 0;
duke@435 250 }
duke@435 251 }
duke@435 252 bool stacks_empty() {
duke@435 253 return claimed_stack_empty() && overflow_stack_empty();
duke@435 254 }
duke@435 255 bool depth_first() {
duke@435 256 return _depth_first;
duke@435 257 }
duke@435 258
duke@435 259 inline void process_popped_location_depth(oop* p);
duke@435 260
duke@435 261 inline void flush_prefetch_queue();
duke@435 262
duke@435 263 inline void claim_or_forward_depth(oop* p);
duke@435 264 inline void claim_or_forward_internal_depth(oop* p);
duke@435 265
duke@435 266 inline void claim_or_forward_breadth(oop* p);
duke@435 267 inline void claim_or_forward_internal_breadth(oop* p);
duke@435 268
duke@435 269 #if PS_PM_STATS
duke@435 270 void increment_steals(oop* p = NULL) {
duke@435 271 _total_steals += 1;
duke@435 272 if (p != NULL && is_oop_masked(p)) {
duke@435 273 _masked_steals += 1;
duke@435 274 }
duke@435 275 }
duke@435 276 #endif // PS_PM_STATS
duke@435 277 };

mercurial