Mon, 22 Aug 2011 12:30:06 -0700
6810861: G1: support -XX:+{PrintClassHistogram,HeapDump}{Before,After}FullGC
Summary: Call {pre,post}_full_gc_dump() before and after a STW full gc of G1CollectedHeap. Also adjusted the prefix message, including the addition of missing whitespace.
Reviewed-by: brutisso, tonyp
1 /*
2 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "classfile/systemDictionary.hpp"
27 #include "gc_implementation/shared/vmGCOperations.hpp"
28 #include "gc_interface/collectedHeap.hpp"
29 #include "gc_interface/collectedHeap.inline.hpp"
30 #include "oops/oop.inline.hpp"
31 #include "runtime/init.hpp"
32 #include "services/heapDumper.hpp"
33 #ifdef TARGET_OS_FAMILY_linux
34 # include "thread_linux.inline.hpp"
35 #endif
36 #ifdef TARGET_OS_FAMILY_solaris
37 # include "thread_solaris.inline.hpp"
38 #endif
39 #ifdef TARGET_OS_FAMILY_windows
40 # include "thread_windows.inline.hpp"
41 #endif
44 #ifdef ASSERT
45 int CollectedHeap::_fire_out_of_memory_count = 0;
46 #endif
48 size_t CollectedHeap::_filler_array_max_size = 0;
50 // Memory state functions.
53 CollectedHeap::CollectedHeap() : _n_par_threads(0)
55 {
56 const size_t max_len = size_t(arrayOopDesc::max_array_length(T_INT));
57 const size_t elements_per_word = HeapWordSize / sizeof(jint);
58 _filler_array_max_size = align_object_size(filler_array_hdr_size() +
59 max_len * elements_per_word);
61 _barrier_set = NULL;
62 _is_gc_active = false;
63 _total_collections = _total_full_collections = 0;
64 _gc_cause = _gc_lastcause = GCCause::_no_gc;
65 NOT_PRODUCT(_promotion_failure_alot_count = 0;)
66 NOT_PRODUCT(_promotion_failure_alot_gc_number = 0;)
68 if (UsePerfData) {
69 EXCEPTION_MARK;
71 // create the gc cause jvmstat counters
72 _perf_gc_cause = PerfDataManager::create_string_variable(SUN_GC, "cause",
73 80, GCCause::to_string(_gc_cause), CHECK);
75 _perf_gc_lastcause =
76 PerfDataManager::create_string_variable(SUN_GC, "lastCause",
77 80, GCCause::to_string(_gc_lastcause), CHECK);
78 }
79 _defer_initial_card_mark = false; // strengthened by subclass in pre_initialize() below.
80 }
82 void CollectedHeap::pre_initialize() {
83 // Used for ReduceInitialCardMarks (when COMPILER2 is used);
84 // otherwise remains unused.
85 #ifdef COMPILER2
86 _defer_initial_card_mark = ReduceInitialCardMarks && can_elide_tlab_store_barriers()
87 && (DeferInitialCardMark || card_mark_must_follow_store());
88 #else
89 assert(_defer_initial_card_mark == false, "Who would set it?");
90 #endif
91 }
93 #ifndef PRODUCT
94 void CollectedHeap::check_for_bad_heap_word_value(HeapWord* addr, size_t size) {
95 if (CheckMemoryInitialization && ZapUnusedHeapArea) {
96 for (size_t slot = 0; slot < size; slot += 1) {
97 assert((*(intptr_t*) (addr + slot)) != ((intptr_t) badHeapWordVal),
98 "Found badHeapWordValue in post-allocation check");
99 }
100 }
101 }
103 void CollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr, size_t size) {
104 if (CheckMemoryInitialization && ZapUnusedHeapArea) {
105 for (size_t slot = 0; slot < size; slot += 1) {
106 assert((*(intptr_t*) (addr + slot)) == ((intptr_t) badHeapWordVal),
107 "Found non badHeapWordValue in pre-allocation check");
108 }
109 }
110 }
111 #endif // PRODUCT
113 #ifdef ASSERT
114 void CollectedHeap::check_for_valid_allocation_state() {
115 Thread *thread = Thread::current();
116 // How to choose between a pending exception and a potential
117 // OutOfMemoryError? Don't allow pending exceptions.
118 // This is a VM policy failure, so how do we exhaustively test it?
119 assert(!thread->has_pending_exception(),
120 "shouldn't be allocating with pending exception");
121 if (StrictSafepointChecks) {
122 assert(thread->allow_allocation(),
123 "Allocation done by thread for which allocation is blocked "
124 "by No_Allocation_Verifier!");
125 // Allocation of an oop can always invoke a safepoint,
126 // hence, the true argument
127 thread->check_for_valid_safepoint_state(true);
128 }
129 }
130 #endif
132 HeapWord* CollectedHeap::allocate_from_tlab_slow(Thread* thread, size_t size) {
134 // Retain tlab and allocate object in shared space if
135 // the amount free in the tlab is too large to discard.
136 if (thread->tlab().free() > thread->tlab().refill_waste_limit()) {
137 thread->tlab().record_slow_allocation(size);
138 return NULL;
139 }
141 // Discard tlab and allocate a new one.
142 // To minimize fragmentation, the last TLAB may be smaller than the rest.
143 size_t new_tlab_size = thread->tlab().compute_size(size);
145 thread->tlab().clear_before_allocation();
147 if (new_tlab_size == 0) {
148 return NULL;
149 }
151 // Allocate a new TLAB...
152 HeapWord* obj = Universe::heap()->allocate_new_tlab(new_tlab_size);
153 if (obj == NULL) {
154 return NULL;
155 }
156 if (ZeroTLAB) {
157 // ..and clear it.
158 Copy::zero_to_words(obj, new_tlab_size);
159 } else {
160 // ...and clear just the allocated object.
161 Copy::zero_to_words(obj, size);
162 }
163 thread->tlab().fill(obj, obj + size, new_tlab_size);
164 return obj;
165 }
167 void CollectedHeap::flush_deferred_store_barrier(JavaThread* thread) {
168 MemRegion deferred = thread->deferred_card_mark();
169 if (!deferred.is_empty()) {
170 assert(_defer_initial_card_mark, "Otherwise should be empty");
171 {
172 // Verify that the storage points to a parsable object in heap
173 DEBUG_ONLY(oop old_obj = oop(deferred.start());)
174 assert(is_in(old_obj), "Not in allocated heap");
175 assert(!can_elide_initializing_store_barrier(old_obj),
176 "Else should have been filtered in new_store_pre_barrier()");
177 assert(!is_in_permanent(old_obj), "Sanity: not expected");
178 assert(old_obj->is_oop(true), "Not an oop");
179 assert(old_obj->is_parsable(), "Will not be concurrently parsable");
180 assert(deferred.word_size() == (size_t)(old_obj->size()),
181 "Mismatch: multiple objects?");
182 }
183 BarrierSet* bs = barrier_set();
184 assert(bs->has_write_region_opt(), "No write_region() on BarrierSet");
185 bs->write_region(deferred);
186 // "Clear" the deferred_card_mark field
187 thread->set_deferred_card_mark(MemRegion());
188 }
189 assert(thread->deferred_card_mark().is_empty(), "invariant");
190 }
192 // Helper for ReduceInitialCardMarks. For performance,
193 // compiled code may elide card-marks for initializing stores
194 // to a newly allocated object along the fast-path. We
195 // compensate for such elided card-marks as follows:
196 // (a) Generational, non-concurrent collectors, such as
197 // GenCollectedHeap(ParNew,DefNew,Tenured) and
198 // ParallelScavengeHeap(ParallelGC, ParallelOldGC)
199 // need the card-mark if and only if the region is
200 // in the old gen, and do not care if the card-mark
201 // succeeds or precedes the initializing stores themselves,
202 // so long as the card-mark is completed before the next
203 // scavenge. For all these cases, we can do a card mark
204 // at the point at which we do a slow path allocation
205 // in the old gen, i.e. in this call.
206 // (b) GenCollectedHeap(ConcurrentMarkSweepGeneration) requires
207 // in addition that the card-mark for an old gen allocated
208 // object strictly follow any associated initializing stores.
209 // In these cases, the memRegion remembered below is
210 // used to card-mark the entire region either just before the next
211 // slow-path allocation by this thread or just before the next scavenge or
212 // CMS-associated safepoint, whichever of these events happens first.
213 // (The implicit assumption is that the object has been fully
214 // initialized by this point, a fact that we assert when doing the
215 // card-mark.)
216 // (c) G1CollectedHeap(G1) uses two kinds of write barriers. When a
217 // G1 concurrent marking is in progress an SATB (pre-write-)barrier is
218 // is used to remember the pre-value of any store. Initializing
219 // stores will not need this barrier, so we need not worry about
220 // compensating for the missing pre-barrier here. Turning now
221 // to the post-barrier, we note that G1 needs a RS update barrier
222 // which simply enqueues a (sequence of) dirty cards which may
223 // optionally be refined by the concurrent update threads. Note
224 // that this barrier need only be applied to a non-young write,
225 // but, like in CMS, because of the presence of concurrent refinement
226 // (much like CMS' precleaning), must strictly follow the oop-store.
227 // Thus, using the same protocol for maintaining the intended
228 // invariants turns out, serendepitously, to be the same for both
229 // G1 and CMS.
230 //
231 // For any future collector, this code should be reexamined with
232 // that specific collector in mind, and the documentation above suitably
233 // extended and updated.
234 oop CollectedHeap::new_store_pre_barrier(JavaThread* thread, oop new_obj) {
235 // If a previous card-mark was deferred, flush it now.
236 flush_deferred_store_barrier(thread);
237 if (can_elide_initializing_store_barrier(new_obj)) {
238 // The deferred_card_mark region should be empty
239 // following the flush above.
240 assert(thread->deferred_card_mark().is_empty(), "Error");
241 } else {
242 MemRegion mr((HeapWord*)new_obj, new_obj->size());
243 assert(!mr.is_empty(), "Error");
244 if (_defer_initial_card_mark) {
245 // Defer the card mark
246 thread->set_deferred_card_mark(mr);
247 } else {
248 // Do the card mark
249 BarrierSet* bs = barrier_set();
250 assert(bs->has_write_region_opt(), "No write_region() on BarrierSet");
251 bs->write_region(mr);
252 }
253 }
254 return new_obj;
255 }
257 size_t CollectedHeap::filler_array_hdr_size() {
258 return size_t(align_object_offset(arrayOopDesc::header_size(T_INT))); // align to Long
259 }
261 size_t CollectedHeap::filler_array_min_size() {
262 return align_object_size(filler_array_hdr_size()); // align to MinObjAlignment
263 }
265 size_t CollectedHeap::filler_array_max_size() {
266 return _filler_array_max_size;
267 }
269 #ifdef ASSERT
270 void CollectedHeap::fill_args_check(HeapWord* start, size_t words)
271 {
272 assert(words >= min_fill_size(), "too small to fill");
273 assert(words % MinObjAlignment == 0, "unaligned size");
274 assert(Universe::heap()->is_in_reserved(start), "not in heap");
275 assert(Universe::heap()->is_in_reserved(start + words - 1), "not in heap");
276 }
278 void CollectedHeap::zap_filler_array(HeapWord* start, size_t words, bool zap)
279 {
280 if (ZapFillerObjects && zap) {
281 Copy::fill_to_words(start + filler_array_hdr_size(),
282 words - filler_array_hdr_size(), 0XDEAFBABE);
283 }
284 }
285 #endif // ASSERT
287 void
288 CollectedHeap::fill_with_array(HeapWord* start, size_t words, bool zap)
289 {
290 assert(words >= filler_array_min_size(), "too small for an array");
291 assert(words <= filler_array_max_size(), "too big for a single object");
293 const size_t payload_size = words - filler_array_hdr_size();
294 const size_t len = payload_size * HeapWordSize / sizeof(jint);
296 // Set the length first for concurrent GC.
297 ((arrayOop)start)->set_length((int)len);
298 post_allocation_setup_common(Universe::intArrayKlassObj(), start, words);
299 DEBUG_ONLY(zap_filler_array(start, words, zap);)
300 }
302 void
303 CollectedHeap::fill_with_object_impl(HeapWord* start, size_t words, bool zap)
304 {
305 assert(words <= filler_array_max_size(), "too big for a single object");
307 if (words >= filler_array_min_size()) {
308 fill_with_array(start, words, zap);
309 } else if (words > 0) {
310 assert(words == min_fill_size(), "unaligned size");
311 post_allocation_setup_common(SystemDictionary::Object_klass(), start,
312 words);
313 }
314 }
316 void CollectedHeap::fill_with_object(HeapWord* start, size_t words, bool zap)
317 {
318 DEBUG_ONLY(fill_args_check(start, words);)
319 HandleMark hm; // Free handles before leaving.
320 fill_with_object_impl(start, words, zap);
321 }
323 void CollectedHeap::fill_with_objects(HeapWord* start, size_t words, bool zap)
324 {
325 DEBUG_ONLY(fill_args_check(start, words);)
326 HandleMark hm; // Free handles before leaving.
328 #ifdef _LP64
329 // A single array can fill ~8G, so multiple objects are needed only in 64-bit.
330 // First fill with arrays, ensuring that any remaining space is big enough to
331 // fill. The remainder is filled with a single object.
332 const size_t min = min_fill_size();
333 const size_t max = filler_array_max_size();
334 while (words > max) {
335 const size_t cur = words - max >= min ? max : max - min;
336 fill_with_array(start, cur, zap);
337 start += cur;
338 words -= cur;
339 }
340 #endif
342 fill_with_object_impl(start, words, zap);
343 }
345 HeapWord* CollectedHeap::allocate_new_tlab(size_t size) {
346 guarantee(false, "thread-local allocation buffers not supported");
347 return NULL;
348 }
350 void CollectedHeap::ensure_parsability(bool retire_tlabs) {
351 // The second disjunct in the assertion below makes a concession
352 // for the start-up verification done while the VM is being
353 // created. Callers be careful that you know that mutators
354 // aren't going to interfere -- for instance, this is permissible
355 // if we are still single-threaded and have either not yet
356 // started allocating (nothing much to verify) or we have
357 // started allocating but are now a full-fledged JavaThread
358 // (and have thus made our TLAB's) available for filling.
359 assert(SafepointSynchronize::is_at_safepoint() ||
360 !is_init_completed(),
361 "Should only be called at a safepoint or at start-up"
362 " otherwise concurrent mutator activity may make heap "
363 " unparsable again");
364 const bool use_tlab = UseTLAB;
365 const bool deferred = _defer_initial_card_mark;
366 // The main thread starts allocating via a TLAB even before it
367 // has added itself to the threads list at vm boot-up.
368 assert(!use_tlab || Threads::first() != NULL,
369 "Attempt to fill tlabs before main thread has been added"
370 " to threads list is doomed to failure!");
371 for (JavaThread *thread = Threads::first(); thread; thread = thread->next()) {
372 if (use_tlab) thread->tlab().make_parsable(retire_tlabs);
373 #ifdef COMPILER2
374 // The deferred store barriers must all have been flushed to the
375 // card-table (or other remembered set structure) before GC starts
376 // processing the card-table (or other remembered set).
377 if (deferred) flush_deferred_store_barrier(thread);
378 #else
379 assert(!deferred, "Should be false");
380 assert(thread->deferred_card_mark().is_empty(), "Should be empty");
381 #endif
382 }
383 }
385 void CollectedHeap::accumulate_statistics_all_tlabs() {
386 if (UseTLAB) {
387 assert(SafepointSynchronize::is_at_safepoint() ||
388 !is_init_completed(),
389 "should only accumulate statistics on tlabs at safepoint");
391 ThreadLocalAllocBuffer::accumulate_statistics_before_gc();
392 }
393 }
395 void CollectedHeap::resize_all_tlabs() {
396 if (UseTLAB) {
397 assert(SafepointSynchronize::is_at_safepoint() ||
398 !is_init_completed(),
399 "should only resize tlabs at safepoint");
401 ThreadLocalAllocBuffer::resize_all_tlabs();
402 }
403 }
405 void CollectedHeap::pre_full_gc_dump() {
406 if (HeapDumpBeforeFullGC) {
407 TraceTime tt("Heap Dump (before full gc): ", PrintGCDetails, false, gclog_or_tty);
408 // We are doing a "major" collection and a heap dump before
409 // major collection has been requested.
410 HeapDumper::dump_heap();
411 }
412 if (PrintClassHistogramBeforeFullGC) {
413 TraceTime tt("Class Histogram (before full gc): ", PrintGCDetails, true, gclog_or_tty);
414 VM_GC_HeapInspection inspector(gclog_or_tty, false /* ! full gc */, false /* ! prologue */);
415 inspector.doit();
416 }
417 }
419 void CollectedHeap::post_full_gc_dump() {
420 if (HeapDumpAfterFullGC) {
421 TraceTime tt("Heap Dump (after full gc): ", PrintGCDetails, false, gclog_or_tty);
422 HeapDumper::dump_heap();
423 }
424 if (PrintClassHistogramAfterFullGC) {
425 TraceTime tt("Class Histogram (after full gc): ", PrintGCDetails, true, gclog_or_tty);
426 VM_GC_HeapInspection inspector(gclog_or_tty, false /* ! full gc */, false /* ! prologue */);
427 inspector.doit();
428 }
429 }