Sat, 23 Oct 2010 23:03:49 -0700
6896603: CMS/GCH: collection_attempt_is_safe() ergo should use more recent data
Summary: Deprecated HandlePromotionFailure, removing the ability to turn off that feature, did away with one epoch look-ahead when deciding if a scavenge is likely to fail, relying on current data.
Reviewed-by: jmasa, johnc, poonam
1 /*
2 * Copyright (c) 2001, 2008, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 class EdenSpace;
26 class ContiguousSpace;
27 class ScanClosure;
29 // DefNewGeneration is a young generation containing eden, from- and
30 // to-space.
32 class DefNewGeneration: public Generation {
33 friend class VMStructs;
35 protected:
36 Generation* _next_gen;
37 int _tenuring_threshold; // Tenuring threshold for next collection.
38 ageTable _age_table;
39 // Size of object to pretenure in words; command line provides bytes
40 size_t _pretenure_size_threshold_words;
42 ageTable* age_table() { return &_age_table; }
43 // Initialize state to optimistically assume no promotion failure will
44 // happen.
45 void init_assuming_no_promotion_failure();
46 // True iff a promotion has failed in the current collection.
47 bool _promotion_failed;
48 bool promotion_failed() { return _promotion_failed; }
50 // Handling promotion failure. A young generation collection
51 // can fail if a live object cannot be copied out of its
52 // location in eden or from-space during the collection. If
53 // a collection fails, the young generation is left in a
54 // consistent state such that it can be collected by a
55 // full collection.
56 // Before the collection
57 // Objects are in eden or from-space
58 // All roots into the young generation point into eden or from-space.
59 //
60 // After a failed collection
61 // Objects may be in eden, from-space, or to-space
62 // An object A in eden or from-space may have a copy B
63 // in to-space. If B exists, all roots that once pointed
64 // to A must now point to B.
65 // All objects in the young generation are unmarked.
66 // Eden, from-space, and to-space will all be collected by
67 // the full collection.
68 void handle_promotion_failure(oop);
70 // In the absence of promotion failure, we wouldn't look at "from-space"
71 // objects after a young-gen collection. When promotion fails, however,
72 // the subsequent full collection will look at from-space objects:
73 // therefore we must remove their forwarding pointers.
74 void remove_forwarding_pointers();
76 // Preserve the mark of "obj", if necessary, in preparation for its mark
77 // word being overwritten with a self-forwarding-pointer.
78 void preserve_mark_if_necessary(oop obj, markOop m);
80 // Together, these keep <object with a preserved mark, mark value> pairs.
81 // They should always contain the same number of elements.
82 Stack<oop> _objs_with_preserved_marks;
83 Stack<markOop> _preserved_marks_of_objs;
85 // Promotion failure handling
86 OopClosure *_promo_failure_scan_stack_closure;
87 void set_promo_failure_scan_stack_closure(OopClosure *scan_stack_closure) {
88 _promo_failure_scan_stack_closure = scan_stack_closure;
89 }
91 Stack<oop> _promo_failure_scan_stack;
92 void drain_promo_failure_scan_stack(void);
93 bool _promo_failure_drain_in_progress;
95 // Performance Counters
96 GenerationCounters* _gen_counters;
97 CSpaceCounters* _eden_counters;
98 CSpaceCounters* _from_counters;
99 CSpaceCounters* _to_counters;
101 // sizing information
102 size_t _max_eden_size;
103 size_t _max_survivor_size;
105 // Allocation support
106 bool _should_allocate_from_space;
107 bool should_allocate_from_space() const {
108 return _should_allocate_from_space;
109 }
110 void clear_should_allocate_from_space() {
111 _should_allocate_from_space = false;
112 }
113 void set_should_allocate_from_space() {
114 _should_allocate_from_space = true;
115 }
117 protected:
118 // Spaces
119 EdenSpace* _eden_space;
120 ContiguousSpace* _from_space;
121 ContiguousSpace* _to_space;
123 enum SomeProtectedConstants {
124 // Generations are GenGrain-aligned and have size that are multiples of
125 // GenGrain.
126 MinFreeScratchWords = 100
127 };
129 // Return the size of a survivor space if this generation were of size
130 // gen_size.
131 size_t compute_survivor_size(size_t gen_size, size_t alignment) const {
132 size_t n = gen_size / (SurvivorRatio + 2);
133 return n > alignment ? align_size_down(n, alignment) : alignment;
134 }
136 public: // was "protected" but caused compile error on win32
137 class IsAliveClosure: public BoolObjectClosure {
138 Generation* _g;
139 public:
140 IsAliveClosure(Generation* g);
141 void do_object(oop p);
142 bool do_object_b(oop p);
143 };
145 class KeepAliveClosure: public OopClosure {
146 protected:
147 ScanWeakRefClosure* _cl;
148 CardTableRS* _rs;
149 template <class T> void do_oop_work(T* p);
150 public:
151 KeepAliveClosure(ScanWeakRefClosure* cl);
152 virtual void do_oop(oop* p);
153 virtual void do_oop(narrowOop* p);
154 };
156 class FastKeepAliveClosure: public KeepAliveClosure {
157 protected:
158 HeapWord* _boundary;
159 template <class T> void do_oop_work(T* p);
160 public:
161 FastKeepAliveClosure(DefNewGeneration* g, ScanWeakRefClosure* cl);
162 virtual void do_oop(oop* p);
163 virtual void do_oop(narrowOop* p);
164 };
166 class EvacuateFollowersClosure: public VoidClosure {
167 GenCollectedHeap* _gch;
168 int _level;
169 ScanClosure* _scan_cur_or_nonheap;
170 ScanClosure* _scan_older;
171 public:
172 EvacuateFollowersClosure(GenCollectedHeap* gch, int level,
173 ScanClosure* cur, ScanClosure* older);
174 void do_void();
175 };
177 class FastEvacuateFollowersClosure: public VoidClosure {
178 GenCollectedHeap* _gch;
179 int _level;
180 DefNewGeneration* _gen;
181 FastScanClosure* _scan_cur_or_nonheap;
182 FastScanClosure* _scan_older;
183 public:
184 FastEvacuateFollowersClosure(GenCollectedHeap* gch, int level,
185 DefNewGeneration* gen,
186 FastScanClosure* cur,
187 FastScanClosure* older);
188 void do_void();
189 };
191 public:
192 DefNewGeneration(ReservedSpace rs, size_t initial_byte_size, int level,
193 const char* policy="Copy");
195 virtual Generation::Name kind() { return Generation::DefNew; }
197 // Accessing spaces
198 EdenSpace* eden() const { return _eden_space; }
199 ContiguousSpace* from() const { return _from_space; }
200 ContiguousSpace* to() const { return _to_space; }
202 virtual CompactibleSpace* first_compaction_space() const;
204 // Space enquiries
205 size_t capacity() const;
206 size_t used() const;
207 size_t free() const;
208 size_t max_capacity() const;
209 size_t capacity_before_gc() const;
210 size_t unsafe_max_alloc_nogc() const;
211 size_t contiguous_available() const;
213 size_t max_eden_size() const { return _max_eden_size; }
214 size_t max_survivor_size() const { return _max_survivor_size; }
216 bool supports_inline_contig_alloc() const { return true; }
217 HeapWord** top_addr() const;
218 HeapWord** end_addr() const;
220 // Thread-local allocation buffers
221 bool supports_tlab_allocation() const { return true; }
222 size_t tlab_capacity() const;
223 size_t unsafe_max_tlab_alloc() const;
225 // Grow the generation by the specified number of bytes.
226 // The size of bytes is assumed to be properly aligned.
227 // Return true if the expansion was successful.
228 bool expand(size_t bytes);
230 // DefNewGeneration cannot currently expand except at
231 // a GC.
232 virtual bool is_maximal_no_gc() const { return true; }
234 // Iteration
235 void object_iterate(ObjectClosure* blk);
236 void object_iterate_since_last_GC(ObjectClosure* cl);
238 void younger_refs_iterate(OopsInGenClosure* cl);
240 void space_iterate(SpaceClosure* blk, bool usedOnly = false);
242 // Allocation support
243 virtual bool should_allocate(size_t word_size, bool is_tlab) {
244 assert(UseTLAB || !is_tlab, "Should not allocate tlab");
246 size_t overflow_limit = (size_t)1 << (BitsPerSize_t - LogHeapWordSize);
248 const bool non_zero = word_size > 0;
249 const bool overflows = word_size >= overflow_limit;
250 const bool check_too_big = _pretenure_size_threshold_words > 0;
251 const bool not_too_big = word_size < _pretenure_size_threshold_words;
252 const bool size_ok = is_tlab || !check_too_big || not_too_big;
254 bool result = !overflows &&
255 non_zero &&
256 size_ok;
258 return result;
259 }
261 HeapWord* allocate(size_t word_size, bool is_tlab);
262 HeapWord* allocate_from_space(size_t word_size);
264 HeapWord* par_allocate(size_t word_size, bool is_tlab);
266 // Prologue & Epilogue
267 virtual void gc_prologue(bool full);
268 virtual void gc_epilogue(bool full);
270 // Save the tops for eden, from, and to
271 virtual void record_spaces_top();
273 // Doesn't require additional work during GC prologue and epilogue
274 virtual bool performs_in_place_marking() const { return false; }
276 // Accessing marks
277 void save_marks();
278 void reset_saved_marks();
279 bool no_allocs_since_save_marks();
281 // Need to declare the full complement of closures, whether we'll
282 // override them or not, or get message from the compiler:
283 // oop_since_save_marks_iterate_nv hides virtual function...
284 #define DefNew_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \
285 void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl);
287 ALL_SINCE_SAVE_MARKS_CLOSURES(DefNew_SINCE_SAVE_MARKS_DECL)
289 #undef DefNew_SINCE_SAVE_MARKS_DECL
291 // For non-youngest collection, the DefNewGeneration can contribute
292 // "to-space".
293 virtual void contribute_scratch(ScratchBlock*& list, Generation* requestor,
294 size_t max_alloc_words);
296 // Reset for contribution of "to-space".
297 virtual void reset_scratch();
299 // GC support
300 virtual void compute_new_size();
302 // Returns true if the collection is likely to be safely
303 // completed. Even if this method returns true, a collection
304 // may not be guaranteed to succeed, and the system should be
305 // able to safely unwind and recover from that failure, albeit
306 // at some additional cost. Override superclass's implementation.
307 virtual bool collection_attempt_is_safe();
309 virtual void collect(bool full,
310 bool clear_all_soft_refs,
311 size_t size,
312 bool is_tlab);
313 HeapWord* expand_and_allocate(size_t size,
314 bool is_tlab,
315 bool parallel = false);
317 oop copy_to_survivor_space(oop old);
318 int tenuring_threshold() { return _tenuring_threshold; }
320 // Performance Counter support
321 void update_counters();
323 // Printing
324 virtual const char* name() const;
325 virtual const char* short_name() const { return "DefNew"; }
327 bool must_be_youngest() const { return true; }
328 bool must_be_oldest() const { return false; }
330 // PrintHeapAtGC support.
331 void print_on(outputStream* st) const;
333 void verify(bool allow_dirty);
335 bool promo_failure_scan_is_complete() const {
336 return _promo_failure_scan_stack.is_empty();
337 }
339 protected:
340 // If clear_space is true, clear the survivor spaces. Eden is
341 // cleared if the minimum size of eden is 0. If mangle_space
342 // is true, also mangle the space in debug mode.
343 void compute_space_boundaries(uintx minimum_eden_size,
344 bool clear_space,
345 bool mangle_space);
346 // Scavenge support
347 void swap_spaces();
348 };