Fri, 16 Oct 2009 02:05:46 -0700
6888898: CMS: ReduceInitialCardMarks unsafe in the presence of cms precleaning
6889757: G1: enable card mark elision for initializing writes from compiled code (ReduceInitialCardMarks)
Summary: Defer the (compiler-elided) card-mark upon a slow-path allocation until after the store and before the next subsequent safepoint; G1 now answers yes to can_elide_tlab_write_barriers().
Reviewed-by: jcoomes, kvn, never
1 /*
2 * Copyright 2000-2009 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
25 class SubTasksDone;
27 // A "GenCollectedHeap" is a SharedHeap that uses generational
28 // collection. It is represented with a sequence of Generation's.
29 class GenCollectedHeap : public SharedHeap {
30 friend class GenCollectorPolicy;
31 friend class Generation;
32 friend class DefNewGeneration;
33 friend class TenuredGeneration;
34 friend class ConcurrentMarkSweepGeneration;
35 friend class CMSCollector;
36 friend class GenMarkSweep;
37 friend class VM_GenCollectForAllocation;
38 friend class VM_GenCollectForPermanentAllocation;
39 friend class VM_GenCollectFull;
40 friend class VM_GenCollectFullConcurrent;
41 friend class VM_GC_HeapInspection;
42 friend class VM_HeapDumper;
43 friend class HeapInspection;
44 friend class GCCauseSetter;
45 friend class VMStructs;
46 public:
47 enum SomeConstants {
48 max_gens = 10
49 };
51 friend class VM_PopulateDumpSharedSpace;
53 protected:
54 // Fields:
55 static GenCollectedHeap* _gch;
57 private:
58 int _n_gens;
59 Generation* _gens[max_gens];
60 GenerationSpec** _gen_specs;
62 // The generational collector policy.
63 GenCollectorPolicy* _gen_policy;
65 // If a generation would bail out of an incremental collection,
66 // it sets this flag. If the flag is set, satisfy_failed_allocation
67 // will attempt allocating in all generations before doing a full GC.
68 bool _incremental_collection_will_fail;
69 bool _last_incremental_collection_failed;
71 // In support of ExplicitGCInvokesConcurrent functionality
72 unsigned int _full_collections_completed;
74 // Data structure for claiming the (potentially) parallel tasks in
75 // (gen-specific) strong roots processing.
76 SubTasksDone* _gen_process_strong_tasks;
78 // In block contents verification, the number of header words to skip
79 NOT_PRODUCT(static size_t _skip_header_HeapWords;)
81 // GC is not allowed during the dump of the shared classes. Keep track
82 // of this in order to provide an reasonable error message when terminating.
83 bool _preloading_shared_classes;
85 protected:
86 // Directs each generation up to and including "collectedGen" to recompute
87 // its desired size.
88 void compute_new_generation_sizes(int collectedGen);
90 // Helper functions for allocation
91 HeapWord* attempt_allocation(size_t size,
92 bool is_tlab,
93 bool first_only);
95 // Helper function for two callbacks below.
96 // Considers collection of the first max_level+1 generations.
97 void do_collection(bool full,
98 bool clear_all_soft_refs,
99 size_t size,
100 bool is_tlab,
101 int max_level);
103 // Callback from VM_GenCollectForAllocation operation.
104 // This function does everything necessary/possible to satisfy an
105 // allocation request that failed in the youngest generation that should
106 // have handled it (including collection, expansion, etc.)
107 HeapWord* satisfy_failed_allocation(size_t size, bool is_tlab);
109 // Callback from VM_GenCollectFull operation.
110 // Perform a full collection of the first max_level+1 generations.
111 void do_full_collection(bool clear_all_soft_refs, int max_level);
113 // Does the "cause" of GC indicate that
114 // we absolutely __must__ clear soft refs?
115 bool must_clear_all_soft_refs();
117 public:
118 GenCollectedHeap(GenCollectorPolicy *policy);
120 GCStats* gc_stats(int level) const;
122 // Returns JNI_OK on success
123 virtual jint initialize();
124 char* allocate(size_t alignment, PermanentGenerationSpec* perm_gen_spec,
125 size_t* _total_reserved, int* _n_covered_regions,
126 ReservedSpace* heap_rs);
128 // Does operations required after initialization has been done.
129 void post_initialize();
131 // Initialize ("weak") refs processing support
132 virtual void ref_processing_init();
134 virtual CollectedHeap::Name kind() const {
135 return CollectedHeap::GenCollectedHeap;
136 }
138 // The generational collector policy.
139 GenCollectorPolicy* gen_policy() const { return _gen_policy; }
141 // Adaptive size policy
142 virtual AdaptiveSizePolicy* size_policy() {
143 return gen_policy()->size_policy();
144 }
146 size_t capacity() const;
147 size_t used() const;
149 // Save the "used_region" for generations level and lower,
150 // and, if perm is true, for perm gen.
151 void save_used_regions(int level, bool perm);
153 size_t max_capacity() const;
155 HeapWord* mem_allocate(size_t size,
156 bool is_large_noref,
157 bool is_tlab,
158 bool* gc_overhead_limit_was_exceeded);
160 // We may support a shared contiguous allocation area, if the youngest
161 // generation does.
162 bool supports_inline_contig_alloc() const;
163 HeapWord** top_addr() const;
164 HeapWord** end_addr() const;
166 // Return an estimate of the maximum allocation that could be performed
167 // without triggering any collection activity. In a generational
168 // collector, for example, this is probably the largest allocation that
169 // could be supported in the youngest generation. It is "unsafe" because
170 // no locks are taken; the result should be treated as an approximation,
171 // not a guarantee.
172 size_t unsafe_max_alloc();
174 // Does this heap support heap inspection? (+PrintClassHistogram)
175 virtual bool supports_heap_inspection() const { return true; }
177 // Perform a full collection of the heap; intended for use in implementing
178 // "System.gc". This implies as full a collection as the CollectedHeap
179 // supports. Caller does not hold the Heap_lock on entry.
180 void collect(GCCause::Cause cause);
182 // This interface assumes that it's being called by the
183 // vm thread. It collects the heap assuming that the
184 // heap lock is already held and that we are executing in
185 // the context of the vm thread.
186 void collect_as_vm_thread(GCCause::Cause cause);
188 // The same as above but assume that the caller holds the Heap_lock.
189 void collect_locked(GCCause::Cause cause);
191 // Perform a full collection of the first max_level+1 generations.
192 // Mostly used for testing purposes. Caller does not hold the Heap_lock on entry.
193 void collect(GCCause::Cause cause, int max_level);
195 // Returns "TRUE" iff "p" points into the allocated area of the heap.
196 // The methods is_in(), is_in_closed_subset() and is_in_youngest() may
197 // be expensive to compute in general, so, to prevent
198 // their inadvertent use in product jvm's, we restrict their use to
199 // assertion checking or verification only.
200 bool is_in(const void* p) const;
202 // override
203 bool is_in_closed_subset(const void* p) const {
204 if (UseConcMarkSweepGC) {
205 return is_in_reserved(p);
206 } else {
207 return is_in(p);
208 }
209 }
211 // Returns "TRUE" iff "p" points into the youngest generation.
212 bool is_in_youngest(void* p);
214 // Iteration functions.
215 void oop_iterate(OopClosure* cl);
216 void oop_iterate(MemRegion mr, OopClosure* cl);
217 void object_iterate(ObjectClosure* cl);
218 void safe_object_iterate(ObjectClosure* cl);
219 void object_iterate_since_last_GC(ObjectClosure* cl);
220 Space* space_containing(const void* addr) const;
222 // A CollectedHeap is divided into a dense sequence of "blocks"; that is,
223 // each address in the (reserved) heap is a member of exactly
224 // one block. The defining characteristic of a block is that it is
225 // possible to find its size, and thus to progress forward to the next
226 // block. (Blocks may be of different sizes.) Thus, blocks may
227 // represent Java objects, or they might be free blocks in a
228 // free-list-based heap (or subheap), as long as the two kinds are
229 // distinguishable and the size of each is determinable.
231 // Returns the address of the start of the "block" that contains the
232 // address "addr". We say "blocks" instead of "object" since some heaps
233 // may not pack objects densely; a chunk may either be an object or a
234 // non-object.
235 virtual HeapWord* block_start(const void* addr) const;
237 // Requires "addr" to be the start of a chunk, and returns its size.
238 // "addr + size" is required to be the start of a new chunk, or the end
239 // of the active area of the heap. Assumes (and verifies in non-product
240 // builds) that addr is in the allocated part of the heap and is
241 // the start of a chunk.
242 virtual size_t block_size(const HeapWord* addr) const;
244 // Requires "addr" to be the start of a block, and returns "TRUE" iff
245 // the block is an object. Assumes (and verifies in non-product
246 // builds) that addr is in the allocated part of the heap and is
247 // the start of a chunk.
248 virtual bool block_is_obj(const HeapWord* addr) const;
250 // Section on TLAB's.
251 virtual bool supports_tlab_allocation() const;
252 virtual size_t tlab_capacity(Thread* thr) const;
253 virtual size_t unsafe_max_tlab_alloc(Thread* thr) const;
254 virtual HeapWord* allocate_new_tlab(size_t size);
256 // Can a compiler initialize a new object without store barriers?
257 // This permission only extends from the creation of a new object
258 // via a TLAB up to the first subsequent safepoint.
259 virtual bool can_elide_tlab_store_barriers() const {
260 return true;
261 }
263 // We don't need barriers for stores to objects in the
264 // young gen and, a fortiori, for initializing stores to
265 // objects therein. This applies to {DefNew,ParNew}+{Tenured,CMS}
266 // only and may need to be re-examined in case other
267 // kinds of collectors are implemented in the future.
268 virtual bool can_elide_initializing_store_barrier(oop new_obj) {
269 assert(UseParNewGC || UseSerialGC || UseConcMarkSweepGC,
270 "Check can_elide_initializing_store_barrier() for this collector");
271 return is_in_youngest((void*)new_obj);
272 }
274 // Can a compiler elide a store barrier when it writes
275 // a permanent oop into the heap? Applies when the compiler
276 // is storing x to the heap, where x->is_perm() is true.
277 virtual bool can_elide_permanent_oop_store_barriers() const {
278 // CMS needs to see all, even intra-generational, ref updates.
279 return !UseConcMarkSweepGC;
280 }
282 // The "requestor" generation is performing some garbage collection
283 // action for which it would be useful to have scratch space. The
284 // requestor promises to allocate no more than "max_alloc_words" in any
285 // older generation (via promotion say.) Any blocks of space that can
286 // be provided are returned as a list of ScratchBlocks, sorted by
287 // decreasing size.
288 ScratchBlock* gather_scratch(Generation* requestor, size_t max_alloc_words);
289 // Allow each generation to reset any scratch space that it has
290 // contributed as it needs.
291 void release_scratch();
293 size_t large_typearray_limit();
295 // Ensure parsability: override
296 virtual void ensure_parsability(bool retire_tlabs);
298 // Time in ms since the longest time a collector ran in
299 // in any generation.
300 virtual jlong millis_since_last_gc();
302 // Total number of full collections completed.
303 unsigned int total_full_collections_completed() {
304 assert(_full_collections_completed <= _total_full_collections,
305 "Can't complete more collections than were started");
306 return _full_collections_completed;
307 }
309 // Update above counter, as appropriate, at the end of a stop-world GC cycle
310 unsigned int update_full_collections_completed();
311 // Update above counter, as appropriate, at the end of a concurrent GC cycle
312 unsigned int update_full_collections_completed(unsigned int count);
314 // Update "time of last gc" for all constituent generations
315 // to "now".
316 void update_time_of_last_gc(jlong now) {
317 for (int i = 0; i < _n_gens; i++) {
318 _gens[i]->update_time_of_last_gc(now);
319 }
320 perm_gen()->update_time_of_last_gc(now);
321 }
323 // Update the gc statistics for each generation.
324 // "level" is the level of the lastest collection
325 void update_gc_stats(int current_level, bool full) {
326 for (int i = 0; i < _n_gens; i++) {
327 _gens[i]->update_gc_stats(current_level, full);
328 }
329 perm_gen()->update_gc_stats(current_level, full);
330 }
332 // Override.
333 bool no_gc_in_progress() { return !is_gc_active(); }
335 // Override.
336 void prepare_for_verify();
338 // Override.
339 void verify(bool allow_dirty, bool silent, bool /* option */);
341 // Override.
342 void print() const;
343 void print_on(outputStream* st) const;
344 virtual void print_gc_threads_on(outputStream* st) const;
345 virtual void gc_threads_do(ThreadClosure* tc) const;
346 virtual void print_tracing_info() const;
348 // PrintGC, PrintGCDetails support
349 void print_heap_change(size_t prev_used) const;
350 void print_perm_heap_change(size_t perm_prev_used) const;
352 // The functions below are helper functions that a subclass of
353 // "CollectedHeap" can use in the implementation of its virtual
354 // functions.
356 class GenClosure : public StackObj {
357 public:
358 virtual void do_generation(Generation* gen) = 0;
359 };
361 // Apply "cl.do_generation" to all generations in the heap (not including
362 // the permanent generation). If "old_to_young" determines the order.
363 void generation_iterate(GenClosure* cl, bool old_to_young);
365 void space_iterate(SpaceClosure* cl);
367 // Return "true" if all generations (but perm) have reached the
368 // maximal committed limit that they can reach, without a garbage
369 // collection.
370 virtual bool is_maximal_no_gc() const;
372 // Return the generation before "gen", or else NULL.
373 Generation* prev_gen(Generation* gen) const {
374 int l = gen->level();
375 if (l == 0) return NULL;
376 else return _gens[l-1];
377 }
379 // Return the generation after "gen", or else NULL.
380 Generation* next_gen(Generation* gen) const {
381 int l = gen->level() + 1;
382 if (l == _n_gens) return NULL;
383 else return _gens[l];
384 }
386 Generation* get_gen(int i) const {
387 if (i >= 0 && i < _n_gens)
388 return _gens[i];
389 else
390 return NULL;
391 }
393 int n_gens() const {
394 assert(_n_gens == gen_policy()->number_of_generations(), "Sanity");
395 return _n_gens;
396 }
398 // Convenience function to be used in situations where the heap type can be
399 // asserted to be this type.
400 static GenCollectedHeap* heap();
402 void set_par_threads(int t);
405 // Invoke the "do_oop" method of one of the closures "not_older_gens"
406 // or "older_gens" on root locations for the generation at
407 // "level". (The "older_gens" closure is used for scanning references
408 // from older generations; "not_older_gens" is used everywhere else.)
409 // If "younger_gens_as_roots" is false, younger generations are
410 // not scanned as roots; in this case, the caller must be arranging to
411 // scan the younger generations itself. (For example, a generation might
412 // explicitly mark reachable objects in younger generations, to avoid
413 // excess storage retention.) If "collecting_perm_gen" is false, then
414 // roots that may only contain references to permGen objects are not
415 // scanned. The "so" argument determines which of the roots
416 // the closure is applied to:
417 // "SO_None" does none;
418 // "SO_AllClasses" applies the closure to all entries in the SystemDictionary;
419 // "SO_SystemClasses" to all the "system" classes and loaders;
420 // "SO_Symbols_and_Strings" applies the closure to all entries in
421 // SymbolsTable and StringTable.
422 void gen_process_strong_roots(int level,
423 bool younger_gens_as_roots,
424 // The remaining arguments are in an order
425 // consistent with SharedHeap::process_strong_roots:
426 bool activate_scope,
427 bool collecting_perm_gen,
428 SharedHeap::ScanningOption so,
429 OopsInGenClosure* not_older_gens,
430 bool do_code_roots,
431 OopsInGenClosure* older_gens);
433 // Apply "blk" to all the weak roots of the system. These include
434 // JNI weak roots, the code cache, system dictionary, symbol table,
435 // string table, and referents of reachable weak refs.
436 void gen_process_weak_roots(OopClosure* root_closure,
437 CodeBlobClosure* code_roots,
438 OopClosure* non_root_closure);
440 // Set the saved marks of generations, if that makes sense.
441 // In particular, if any generation might iterate over the oops
442 // in other generations, it should call this method.
443 void save_marks();
445 // Apply "cur->do_oop" or "older->do_oop" to all the oops in objects
446 // allocated since the last call to save_marks in generations at or above
447 // "level" (including the permanent generation.) The "cur" closure is
448 // applied to references in the generation at "level", and the "older"
449 // closure to older (and permanent) generations.
450 #define GCH_SINCE_SAVE_MARKS_ITERATE_DECL(OopClosureType, nv_suffix) \
451 void oop_since_save_marks_iterate(int level, \
452 OopClosureType* cur, \
453 OopClosureType* older);
455 ALL_SINCE_SAVE_MARKS_CLOSURES(GCH_SINCE_SAVE_MARKS_ITERATE_DECL)
457 #undef GCH_SINCE_SAVE_MARKS_ITERATE_DECL
459 // Returns "true" iff no allocations have occurred in any generation at
460 // "level" or above (including the permanent generation) since the last
461 // call to "save_marks".
462 bool no_allocs_since_save_marks(int level);
464 // If a generation bails out of an incremental collection,
465 // it sets this flag.
466 bool incremental_collection_will_fail() {
467 return _incremental_collection_will_fail;
468 }
469 void set_incremental_collection_will_fail() {
470 _incremental_collection_will_fail = true;
471 }
472 void clear_incremental_collection_will_fail() {
473 _incremental_collection_will_fail = false;
474 }
476 bool last_incremental_collection_failed() const {
477 return _last_incremental_collection_failed;
478 }
479 void set_last_incremental_collection_failed() {
480 _last_incremental_collection_failed = true;
481 }
482 void clear_last_incremental_collection_failed() {
483 _last_incremental_collection_failed = false;
484 }
486 // Promotion of obj into gen failed. Try to promote obj to higher non-perm
487 // gens in ascending order; return the new location of obj if successful.
488 // Otherwise, try expand-and-allocate for obj in each generation starting at
489 // gen; return the new location of obj if successful. Otherwise, return NULL.
490 oop handle_failed_promotion(Generation* gen,
491 oop obj,
492 size_t obj_size);
494 private:
495 // Accessor for memory state verification support
496 NOT_PRODUCT(
497 static size_t skip_header_HeapWords() { return _skip_header_HeapWords; }
498 )
500 // Override
501 void check_for_non_bad_heap_word_value(HeapWord* addr,
502 size_t size) PRODUCT_RETURN;
504 // For use by mark-sweep. As implemented, mark-sweep-compact is global
505 // in an essential way: compaction is performed across generations, by
506 // iterating over spaces.
507 void prepare_for_compaction();
509 // Perform a full collection of the first max_level+1 generations.
510 // This is the low level interface used by the public versions of
511 // collect() and collect_locked(). Caller holds the Heap_lock on entry.
512 void collect_locked(GCCause::Cause cause, int max_level);
514 // Returns success or failure.
515 bool create_cms_collector();
517 // In support of ExplicitGCInvokesConcurrent functionality
518 bool should_do_concurrent_full_gc(GCCause::Cause cause);
519 void collect_mostly_concurrent(GCCause::Cause cause);
521 // Save the tops of the spaces in all generations
522 void record_gen_tops_before_GC() PRODUCT_RETURN;
524 protected:
525 virtual void gc_prologue(bool full);
526 virtual void gc_epilogue(bool full);
528 public:
529 virtual void preload_and_dump(TRAPS) KERNEL_RETURN;
530 };