Tue, 13 Apr 2010 13:52:10 -0700
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
Summary: Ensure a full GC that clears SoftReferences before throwing an out-of-memory
Reviewed-by: ysr, jcoomes
1 /*
2 * Copyright 2000-2009 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
25 class SubTasksDone;
27 // A "GenCollectedHeap" is a SharedHeap that uses generational
28 // collection. It is represented with a sequence of Generation's.
29 class GenCollectedHeap : public SharedHeap {
30 friend class GenCollectorPolicy;
31 friend class Generation;
32 friend class DefNewGeneration;
33 friend class TenuredGeneration;
34 friend class ConcurrentMarkSweepGeneration;
35 friend class CMSCollector;
36 friend class GenMarkSweep;
37 friend class VM_GenCollectForAllocation;
38 friend class VM_GenCollectForPermanentAllocation;
39 friend class VM_GenCollectFull;
40 friend class VM_GenCollectFullConcurrent;
41 friend class VM_GC_HeapInspection;
42 friend class VM_HeapDumper;
43 friend class HeapInspection;
44 friend class GCCauseSetter;
45 friend class VMStructs;
46 public:
47 enum SomeConstants {
48 max_gens = 10
49 };
51 friend class VM_PopulateDumpSharedSpace;
53 protected:
54 // Fields:
55 static GenCollectedHeap* _gch;
57 private:
58 int _n_gens;
59 Generation* _gens[max_gens];
60 GenerationSpec** _gen_specs;
62 // The generational collector policy.
63 GenCollectorPolicy* _gen_policy;
65 // If a generation would bail out of an incremental collection,
66 // it sets this flag. If the flag is set, satisfy_failed_allocation
67 // will attempt allocating in all generations before doing a full GC.
68 bool _incremental_collection_will_fail;
69 bool _last_incremental_collection_failed;
71 // In support of ExplicitGCInvokesConcurrent functionality
72 unsigned int _full_collections_completed;
74 // Data structure for claiming the (potentially) parallel tasks in
75 // (gen-specific) strong roots processing.
76 SubTasksDone* _gen_process_strong_tasks;
78 // In block contents verification, the number of header words to skip
79 NOT_PRODUCT(static size_t _skip_header_HeapWords;)
81 // GC is not allowed during the dump of the shared classes. Keep track
82 // of this in order to provide an reasonable error message when terminating.
83 bool _preloading_shared_classes;
85 protected:
86 // Directs each generation up to and including "collectedGen" to recompute
87 // its desired size.
88 void compute_new_generation_sizes(int collectedGen);
90 // Helper functions for allocation
91 HeapWord* attempt_allocation(size_t size,
92 bool is_tlab,
93 bool first_only);
95 // Helper function for two callbacks below.
96 // Considers collection of the first max_level+1 generations.
97 void do_collection(bool full,
98 bool clear_all_soft_refs,
99 size_t size,
100 bool is_tlab,
101 int max_level);
103 // Callback from VM_GenCollectForAllocation operation.
104 // This function does everything necessary/possible to satisfy an
105 // allocation request that failed in the youngest generation that should
106 // have handled it (including collection, expansion, etc.)
107 HeapWord* satisfy_failed_allocation(size_t size, bool is_tlab);
109 // Callback from VM_GenCollectFull operation.
110 // Perform a full collection of the first max_level+1 generations.
111 void do_full_collection(bool clear_all_soft_refs, int max_level);
113 // Does the "cause" of GC indicate that
114 // we absolutely __must__ clear soft refs?
115 bool must_clear_all_soft_refs();
117 public:
118 GenCollectedHeap(GenCollectorPolicy *policy);
120 GCStats* gc_stats(int level) const;
122 // Returns JNI_OK on success
123 virtual jint initialize();
124 char* allocate(size_t alignment, PermanentGenerationSpec* perm_gen_spec,
125 size_t* _total_reserved, int* _n_covered_regions,
126 ReservedSpace* heap_rs);
128 // Does operations required after initialization has been done.
129 void post_initialize();
131 // Initialize ("weak") refs processing support
132 virtual void ref_processing_init();
134 virtual CollectedHeap::Name kind() const {
135 return CollectedHeap::GenCollectedHeap;
136 }
138 // The generational collector policy.
139 GenCollectorPolicy* gen_policy() const { return _gen_policy; }
141 // Adaptive size policy
142 virtual AdaptiveSizePolicy* size_policy() {
143 return gen_policy()->size_policy();
144 }
146 size_t capacity() const;
147 size_t used() const;
149 // Save the "used_region" for generations level and lower,
150 // and, if perm is true, for perm gen.
151 void save_used_regions(int level, bool perm);
153 size_t max_capacity() const;
155 HeapWord* mem_allocate(size_t size,
156 bool is_large_noref,
157 bool is_tlab,
158 bool* gc_overhead_limit_was_exceeded);
160 // We may support a shared contiguous allocation area, if the youngest
161 // generation does.
162 bool supports_inline_contig_alloc() const;
163 HeapWord** top_addr() const;
164 HeapWord** end_addr() const;
166 // Return an estimate of the maximum allocation that could be performed
167 // without triggering any collection activity. In a generational
168 // collector, for example, this is probably the largest allocation that
169 // could be supported in the youngest generation. It is "unsafe" because
170 // no locks are taken; the result should be treated as an approximation,
171 // not a guarantee.
172 size_t unsafe_max_alloc();
174 // Does this heap support heap inspection? (+PrintClassHistogram)
175 virtual bool supports_heap_inspection() const { return true; }
177 // Perform a full collection of the heap; intended for use in implementing
178 // "System.gc". This implies as full a collection as the CollectedHeap
179 // supports. Caller does not hold the Heap_lock on entry.
180 void collect(GCCause::Cause cause);
182 // This interface assumes that it's being called by the
183 // vm thread. It collects the heap assuming that the
184 // heap lock is already held and that we are executing in
185 // the context of the vm thread.
186 void collect_as_vm_thread(GCCause::Cause cause);
188 // The same as above but assume that the caller holds the Heap_lock.
189 void collect_locked(GCCause::Cause cause);
191 // Perform a full collection of the first max_level+1 generations.
192 // Mostly used for testing purposes. Caller does not hold the Heap_lock on entry.
193 void collect(GCCause::Cause cause, int max_level);
195 // Returns "TRUE" iff "p" points into the allocated area of the heap.
196 // The methods is_in(), is_in_closed_subset() and is_in_youngest() may
197 // be expensive to compute in general, so, to prevent
198 // their inadvertent use in product jvm's, we restrict their use to
199 // assertion checking or verification only.
200 bool is_in(const void* p) const;
202 // override
203 bool is_in_closed_subset(const void* p) const {
204 if (UseConcMarkSweepGC) {
205 return is_in_reserved(p);
206 } else {
207 return is_in(p);
208 }
209 }
211 // Returns "TRUE" iff "p" points into the youngest generation.
212 bool is_in_youngest(void* p);
214 // Iteration functions.
215 void oop_iterate(OopClosure* cl);
216 void oop_iterate(MemRegion mr, OopClosure* cl);
217 void object_iterate(ObjectClosure* cl);
218 void safe_object_iterate(ObjectClosure* cl);
219 void object_iterate_since_last_GC(ObjectClosure* cl);
220 Space* space_containing(const void* addr) const;
222 // A CollectedHeap is divided into a dense sequence of "blocks"; that is,
223 // each address in the (reserved) heap is a member of exactly
224 // one block. The defining characteristic of a block is that it is
225 // possible to find its size, and thus to progress forward to the next
226 // block. (Blocks may be of different sizes.) Thus, blocks may
227 // represent Java objects, or they might be free blocks in a
228 // free-list-based heap (or subheap), as long as the two kinds are
229 // distinguishable and the size of each is determinable.
231 // Returns the address of the start of the "block" that contains the
232 // address "addr". We say "blocks" instead of "object" since some heaps
233 // may not pack objects densely; a chunk may either be an object or a
234 // non-object.
235 virtual HeapWord* block_start(const void* addr) const;
237 // Requires "addr" to be the start of a chunk, and returns its size.
238 // "addr + size" is required to be the start of a new chunk, or the end
239 // of the active area of the heap. Assumes (and verifies in non-product
240 // builds) that addr is in the allocated part of the heap and is
241 // the start of a chunk.
242 virtual size_t block_size(const HeapWord* addr) const;
244 // Requires "addr" to be the start of a block, and returns "TRUE" iff
245 // the block is an object. Assumes (and verifies in non-product
246 // builds) that addr is in the allocated part of the heap and is
247 // the start of a chunk.
248 virtual bool block_is_obj(const HeapWord* addr) const;
250 // Section on TLAB's.
251 virtual bool supports_tlab_allocation() const;
252 virtual size_t tlab_capacity(Thread* thr) const;
253 virtual size_t unsafe_max_tlab_alloc(Thread* thr) const;
254 virtual HeapWord* allocate_new_tlab(size_t size);
256 // Can a compiler initialize a new object without store barriers?
257 // This permission only extends from the creation of a new object
258 // via a TLAB up to the first subsequent safepoint.
259 virtual bool can_elide_tlab_store_barriers() const {
260 return true;
261 }
263 virtual bool card_mark_must_follow_store() const {
264 return UseConcMarkSweepGC;
265 }
267 // We don't need barriers for stores to objects in the
268 // young gen and, a fortiori, for initializing stores to
269 // objects therein. This applies to {DefNew,ParNew}+{Tenured,CMS}
270 // only and may need to be re-examined in case other
271 // kinds of collectors are implemented in the future.
272 virtual bool can_elide_initializing_store_barrier(oop new_obj) {
273 // We wanted to assert that:-
274 // assert(UseParNewGC || UseSerialGC || UseConcMarkSweepGC,
275 // "Check can_elide_initializing_store_barrier() for this collector");
276 // but unfortunately the flag UseSerialGC need not necessarily always
277 // be set when DefNew+Tenured are being used.
278 return is_in_youngest((void*)new_obj);
279 }
281 // Can a compiler elide a store barrier when it writes
282 // a permanent oop into the heap? Applies when the compiler
283 // is storing x to the heap, where x->is_perm() is true.
284 virtual bool can_elide_permanent_oop_store_barriers() const {
285 // CMS needs to see all, even intra-generational, ref updates.
286 return !UseConcMarkSweepGC;
287 }
289 // The "requestor" generation is performing some garbage collection
290 // action for which it would be useful to have scratch space. The
291 // requestor promises to allocate no more than "max_alloc_words" in any
292 // older generation (via promotion say.) Any blocks of space that can
293 // be provided are returned as a list of ScratchBlocks, sorted by
294 // decreasing size.
295 ScratchBlock* gather_scratch(Generation* requestor, size_t max_alloc_words);
296 // Allow each generation to reset any scratch space that it has
297 // contributed as it needs.
298 void release_scratch();
300 size_t large_typearray_limit();
302 // Ensure parsability: override
303 virtual void ensure_parsability(bool retire_tlabs);
305 // Time in ms since the longest time a collector ran in
306 // in any generation.
307 virtual jlong millis_since_last_gc();
309 // Total number of full collections completed.
310 unsigned int total_full_collections_completed() {
311 assert(_full_collections_completed <= _total_full_collections,
312 "Can't complete more collections than were started");
313 return _full_collections_completed;
314 }
316 // Update above counter, as appropriate, at the end of a stop-world GC cycle
317 unsigned int update_full_collections_completed();
318 // Update above counter, as appropriate, at the end of a concurrent GC cycle
319 unsigned int update_full_collections_completed(unsigned int count);
321 // Update "time of last gc" for all constituent generations
322 // to "now".
323 void update_time_of_last_gc(jlong now) {
324 for (int i = 0; i < _n_gens; i++) {
325 _gens[i]->update_time_of_last_gc(now);
326 }
327 perm_gen()->update_time_of_last_gc(now);
328 }
330 // Update the gc statistics for each generation.
331 // "level" is the level of the lastest collection
332 void update_gc_stats(int current_level, bool full) {
333 for (int i = 0; i < _n_gens; i++) {
334 _gens[i]->update_gc_stats(current_level, full);
335 }
336 perm_gen()->update_gc_stats(current_level, full);
337 }
339 // Override.
340 bool no_gc_in_progress() { return !is_gc_active(); }
342 // Override.
343 void prepare_for_verify();
345 // Override.
346 void verify(bool allow_dirty, bool silent, bool /* option */);
348 // Override.
349 void print() const;
350 void print_on(outputStream* st) const;
351 virtual void print_gc_threads_on(outputStream* st) const;
352 virtual void gc_threads_do(ThreadClosure* tc) const;
353 virtual void print_tracing_info() const;
355 // PrintGC, PrintGCDetails support
356 void print_heap_change(size_t prev_used) const;
357 void print_perm_heap_change(size_t perm_prev_used) const;
359 // The functions below are helper functions that a subclass of
360 // "CollectedHeap" can use in the implementation of its virtual
361 // functions.
363 class GenClosure : public StackObj {
364 public:
365 virtual void do_generation(Generation* gen) = 0;
366 };
368 // Apply "cl.do_generation" to all generations in the heap (not including
369 // the permanent generation). If "old_to_young" determines the order.
370 void generation_iterate(GenClosure* cl, bool old_to_young);
372 void space_iterate(SpaceClosure* cl);
374 // Return "true" if all generations (but perm) have reached the
375 // maximal committed limit that they can reach, without a garbage
376 // collection.
377 virtual bool is_maximal_no_gc() const;
379 // Return the generation before "gen", or else NULL.
380 Generation* prev_gen(Generation* gen) const {
381 int l = gen->level();
382 if (l == 0) return NULL;
383 else return _gens[l-1];
384 }
386 // Return the generation after "gen", or else NULL.
387 Generation* next_gen(Generation* gen) const {
388 int l = gen->level() + 1;
389 if (l == _n_gens) return NULL;
390 else return _gens[l];
391 }
393 Generation* get_gen(int i) const {
394 if (i >= 0 && i < _n_gens)
395 return _gens[i];
396 else
397 return NULL;
398 }
400 int n_gens() const {
401 assert(_n_gens == gen_policy()->number_of_generations(), "Sanity");
402 return _n_gens;
403 }
405 // Convenience function to be used in situations where the heap type can be
406 // asserted to be this type.
407 static GenCollectedHeap* heap();
409 void set_par_threads(int t);
412 // Invoke the "do_oop" method of one of the closures "not_older_gens"
413 // or "older_gens" on root locations for the generation at
414 // "level". (The "older_gens" closure is used for scanning references
415 // from older generations; "not_older_gens" is used everywhere else.)
416 // If "younger_gens_as_roots" is false, younger generations are
417 // not scanned as roots; in this case, the caller must be arranging to
418 // scan the younger generations itself. (For example, a generation might
419 // explicitly mark reachable objects in younger generations, to avoid
420 // excess storage retention.) If "collecting_perm_gen" is false, then
421 // roots that may only contain references to permGen objects are not
422 // scanned. The "so" argument determines which of the roots
423 // the closure is applied to:
424 // "SO_None" does none;
425 // "SO_AllClasses" applies the closure to all entries in the SystemDictionary;
426 // "SO_SystemClasses" to all the "system" classes and loaders;
427 // "SO_Symbols_and_Strings" applies the closure to all entries in
428 // SymbolsTable and StringTable.
429 void gen_process_strong_roots(int level,
430 bool younger_gens_as_roots,
431 // The remaining arguments are in an order
432 // consistent with SharedHeap::process_strong_roots:
433 bool activate_scope,
434 bool collecting_perm_gen,
435 SharedHeap::ScanningOption so,
436 OopsInGenClosure* not_older_gens,
437 bool do_code_roots,
438 OopsInGenClosure* older_gens);
440 // Apply "blk" to all the weak roots of the system. These include
441 // JNI weak roots, the code cache, system dictionary, symbol table,
442 // string table, and referents of reachable weak refs.
443 void gen_process_weak_roots(OopClosure* root_closure,
444 CodeBlobClosure* code_roots,
445 OopClosure* non_root_closure);
447 // Set the saved marks of generations, if that makes sense.
448 // In particular, if any generation might iterate over the oops
449 // in other generations, it should call this method.
450 void save_marks();
452 // Apply "cur->do_oop" or "older->do_oop" to all the oops in objects
453 // allocated since the last call to save_marks in generations at or above
454 // "level" (including the permanent generation.) The "cur" closure is
455 // applied to references in the generation at "level", and the "older"
456 // closure to older (and permanent) generations.
457 #define GCH_SINCE_SAVE_MARKS_ITERATE_DECL(OopClosureType, nv_suffix) \
458 void oop_since_save_marks_iterate(int level, \
459 OopClosureType* cur, \
460 OopClosureType* older);
462 ALL_SINCE_SAVE_MARKS_CLOSURES(GCH_SINCE_SAVE_MARKS_ITERATE_DECL)
464 #undef GCH_SINCE_SAVE_MARKS_ITERATE_DECL
466 // Returns "true" iff no allocations have occurred in any generation at
467 // "level" or above (including the permanent generation) since the last
468 // call to "save_marks".
469 bool no_allocs_since_save_marks(int level);
471 // If a generation bails out of an incremental collection,
472 // it sets this flag.
473 bool incremental_collection_will_fail() {
474 return _incremental_collection_will_fail;
475 }
476 void set_incremental_collection_will_fail() {
477 _incremental_collection_will_fail = true;
478 }
479 void clear_incremental_collection_will_fail() {
480 _incremental_collection_will_fail = false;
481 }
483 bool last_incremental_collection_failed() const {
484 return _last_incremental_collection_failed;
485 }
486 void set_last_incremental_collection_failed() {
487 _last_incremental_collection_failed = true;
488 }
489 void clear_last_incremental_collection_failed() {
490 _last_incremental_collection_failed = false;
491 }
493 // Promotion of obj into gen failed. Try to promote obj to higher non-perm
494 // gens in ascending order; return the new location of obj if successful.
495 // Otherwise, try expand-and-allocate for obj in each generation starting at
496 // gen; return the new location of obj if successful. Otherwise, return NULL.
497 oop handle_failed_promotion(Generation* gen,
498 oop obj,
499 size_t obj_size);
501 private:
502 // Accessor for memory state verification support
503 NOT_PRODUCT(
504 static size_t skip_header_HeapWords() { return _skip_header_HeapWords; }
505 )
507 // Override
508 void check_for_non_bad_heap_word_value(HeapWord* addr,
509 size_t size) PRODUCT_RETURN;
511 // For use by mark-sweep. As implemented, mark-sweep-compact is global
512 // in an essential way: compaction is performed across generations, by
513 // iterating over spaces.
514 void prepare_for_compaction();
516 // Perform a full collection of the first max_level+1 generations.
517 // This is the low level interface used by the public versions of
518 // collect() and collect_locked(). Caller holds the Heap_lock on entry.
519 void collect_locked(GCCause::Cause cause, int max_level);
521 // Returns success or failure.
522 bool create_cms_collector();
524 // In support of ExplicitGCInvokesConcurrent functionality
525 bool should_do_concurrent_full_gc(GCCause::Cause cause);
526 void collect_mostly_concurrent(GCCause::Cause cause);
528 // Save the tops of the spaces in all generations
529 void record_gen_tops_before_GC() PRODUCT_RETURN;
531 protected:
532 virtual void gc_prologue(bool full);
533 virtual void gc_epilogue(bool full);
535 public:
536 virtual void preload_and_dump(TRAPS) KERNEL_RETURN;
537 };