Thu, 12 May 2011 10:30:11 -0700
7036199: Adding a notification to the implementation of GarbageCollectorMXBeans
Summary: Add a notification to the GarbageCollectorMXBeans
Reviewed-by: acorn, mchung
1 /*
2 * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "classfile/symbolTable.hpp"
27 #include "classfile/systemDictionary.hpp"
28 #include "classfile/vmSymbols.hpp"
29 #include "code/icBuffer.hpp"
30 #include "gc_implementation/shared/collectorCounters.hpp"
31 #include "gc_implementation/shared/vmGCOperations.hpp"
32 #include "gc_interface/collectedHeap.inline.hpp"
33 #include "memory/compactPermGen.hpp"
34 #include "memory/filemap.hpp"
35 #include "memory/gcLocker.inline.hpp"
36 #include "memory/genCollectedHeap.hpp"
37 #include "memory/genOopClosures.inline.hpp"
38 #include "memory/generation.inline.hpp"
39 #include "memory/generationSpec.hpp"
40 #include "memory/permGen.hpp"
41 #include "memory/resourceArea.hpp"
42 #include "memory/sharedHeap.hpp"
43 #include "memory/space.hpp"
44 #include "oops/oop.inline.hpp"
45 #include "oops/oop.inline2.hpp"
46 #include "runtime/aprofiler.hpp"
47 #include "runtime/biasedLocking.hpp"
48 #include "runtime/fprofiler.hpp"
49 #include "runtime/handles.hpp"
50 #include "runtime/handles.inline.hpp"
51 #include "runtime/java.hpp"
52 #include "runtime/vmThread.hpp"
53 #include "services/memoryService.hpp"
54 #include "utilities/vmError.hpp"
55 #include "utilities/workgroup.hpp"
56 #ifndef SERIALGC
57 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp"
58 #include "gc_implementation/concurrentMarkSweep/vmCMSOperations.hpp"
59 #endif
61 GenCollectedHeap* GenCollectedHeap::_gch;
62 NOT_PRODUCT(size_t GenCollectedHeap::_skip_header_HeapWords = 0;)
64 // The set of potentially parallel tasks in strong root scanning.
65 enum GCH_process_strong_roots_tasks {
66 // We probably want to parallelize both of these internally, but for now...
67 GCH_PS_younger_gens,
68 // Leave this one last.
69 GCH_PS_NumElements
70 };
72 GenCollectedHeap::GenCollectedHeap(GenCollectorPolicy *policy) :
73 SharedHeap(policy),
74 _gen_policy(policy),
75 _gen_process_strong_tasks(new SubTasksDone(GCH_PS_NumElements)),
76 _full_collections_completed(0)
77 {
78 if (_gen_process_strong_tasks == NULL ||
79 !_gen_process_strong_tasks->valid()) {
80 vm_exit_during_initialization("Failed necessary allocation.");
81 }
82 assert(policy != NULL, "Sanity check");
83 _preloading_shared_classes = false;
84 }
86 jint GenCollectedHeap::initialize() {
87 CollectedHeap::pre_initialize();
89 int i;
90 _n_gens = gen_policy()->number_of_generations();
92 // While there are no constraints in the GC code that HeapWordSize
93 // be any particular value, there are multiple other areas in the
94 // system which believe this to be true (e.g. oop->object_size in some
95 // cases incorrectly returns the size in wordSize units rather than
96 // HeapWordSize).
97 guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
99 // The heap must be at least as aligned as generations.
100 size_t alignment = Generation::GenGrain;
102 _gen_specs = gen_policy()->generations();
103 PermanentGenerationSpec *perm_gen_spec =
104 collector_policy()->permanent_generation();
106 // Make sure the sizes are all aligned.
107 for (i = 0; i < _n_gens; i++) {
108 _gen_specs[i]->align(alignment);
109 }
110 perm_gen_spec->align(alignment);
112 // If we are dumping the heap, then allocate a wasted block of address
113 // space in order to push the heap to a lower address. This extra
114 // address range allows for other (or larger) libraries to be loaded
115 // without them occupying the space required for the shared spaces.
117 if (DumpSharedSpaces) {
118 uintx reserved = 0;
119 uintx block_size = 64*1024*1024;
120 while (reserved < SharedDummyBlockSize) {
121 char* dummy = os::reserve_memory(block_size);
122 reserved += block_size;
123 }
124 }
126 // Allocate space for the heap.
128 char* heap_address;
129 size_t total_reserved = 0;
130 int n_covered_regions = 0;
131 ReservedSpace heap_rs(0);
133 heap_address = allocate(alignment, perm_gen_spec, &total_reserved,
134 &n_covered_regions, &heap_rs);
136 if (UseSharedSpaces) {
137 if (!heap_rs.is_reserved() || heap_address != heap_rs.base()) {
138 if (heap_rs.is_reserved()) {
139 heap_rs.release();
140 }
141 FileMapInfo* mapinfo = FileMapInfo::current_info();
142 mapinfo->fail_continue("Unable to reserve shared region.");
143 allocate(alignment, perm_gen_spec, &total_reserved, &n_covered_regions,
144 &heap_rs);
145 }
146 }
148 if (!heap_rs.is_reserved()) {
149 vm_shutdown_during_initialization(
150 "Could not reserve enough space for object heap");
151 return JNI_ENOMEM;
152 }
154 _reserved = MemRegion((HeapWord*)heap_rs.base(),
155 (HeapWord*)(heap_rs.base() + heap_rs.size()));
157 // It is important to do this in a way such that concurrent readers can't
158 // temporarily think somethings in the heap. (Seen this happen in asserts.)
159 _reserved.set_word_size(0);
160 _reserved.set_start((HeapWord*)heap_rs.base());
161 size_t actual_heap_size = heap_rs.size() - perm_gen_spec->misc_data_size()
162 - perm_gen_spec->misc_code_size();
163 _reserved.set_end((HeapWord*)(heap_rs.base() + actual_heap_size));
165 _rem_set = collector_policy()->create_rem_set(_reserved, n_covered_regions);
166 set_barrier_set(rem_set()->bs());
168 _gch = this;
170 for (i = 0; i < _n_gens; i++) {
171 ReservedSpace this_rs = heap_rs.first_part(_gen_specs[i]->max_size(),
172 UseSharedSpaces, UseSharedSpaces);
173 _gens[i] = _gen_specs[i]->init(this_rs, i, rem_set());
174 heap_rs = heap_rs.last_part(_gen_specs[i]->max_size());
175 }
176 _perm_gen = perm_gen_spec->init(heap_rs, PermSize, rem_set());
178 clear_incremental_collection_failed();
180 #ifndef SERIALGC
181 // If we are running CMS, create the collector responsible
182 // for collecting the CMS generations.
183 if (collector_policy()->is_concurrent_mark_sweep_policy()) {
184 bool success = create_cms_collector();
185 if (!success) return JNI_ENOMEM;
186 }
187 #endif // SERIALGC
189 return JNI_OK;
190 }
193 char* GenCollectedHeap::allocate(size_t alignment,
194 PermanentGenerationSpec* perm_gen_spec,
195 size_t* _total_reserved,
196 int* _n_covered_regions,
197 ReservedSpace* heap_rs){
198 const char overflow_msg[] = "The size of the object heap + VM data exceeds "
199 "the maximum representable size";
201 // Now figure out the total size.
202 size_t total_reserved = 0;
203 int n_covered_regions = 0;
204 const size_t pageSize = UseLargePages ?
205 os::large_page_size() : os::vm_page_size();
207 for (int i = 0; i < _n_gens; i++) {
208 total_reserved += _gen_specs[i]->max_size();
209 if (total_reserved < _gen_specs[i]->max_size()) {
210 vm_exit_during_initialization(overflow_msg);
211 }
212 n_covered_regions += _gen_specs[i]->n_covered_regions();
213 }
214 assert(total_reserved % pageSize == 0,
215 err_msg("Gen size; total_reserved=" SIZE_FORMAT ", pageSize="
216 SIZE_FORMAT, total_reserved, pageSize));
217 total_reserved += perm_gen_spec->max_size();
218 assert(total_reserved % pageSize == 0,
219 err_msg("Perm size; total_reserved=" SIZE_FORMAT ", pageSize="
220 SIZE_FORMAT ", perm gen max=" SIZE_FORMAT, total_reserved,
221 pageSize, perm_gen_spec->max_size()));
223 if (total_reserved < perm_gen_spec->max_size()) {
224 vm_exit_during_initialization(overflow_msg);
225 }
226 n_covered_regions += perm_gen_spec->n_covered_regions();
228 // Add the size of the data area which shares the same reserved area
229 // as the heap, but which is not actually part of the heap.
230 size_t s = perm_gen_spec->misc_data_size() + perm_gen_spec->misc_code_size();
232 total_reserved += s;
233 if (total_reserved < s) {
234 vm_exit_during_initialization(overflow_msg);
235 }
237 if (UseLargePages) {
238 assert(total_reserved != 0, "total_reserved cannot be 0");
239 total_reserved = round_to(total_reserved, os::large_page_size());
240 if (total_reserved < os::large_page_size()) {
241 vm_exit_during_initialization(overflow_msg);
242 }
243 }
245 // Calculate the address at which the heap must reside in order for
246 // the shared data to be at the required address.
248 char* heap_address;
249 if (UseSharedSpaces) {
251 // Calculate the address of the first word beyond the heap.
252 FileMapInfo* mapinfo = FileMapInfo::current_info();
253 int lr = CompactingPermGenGen::n_regions - 1;
254 size_t capacity = align_size_up(mapinfo->space_capacity(lr), alignment);
255 heap_address = mapinfo->region_base(lr) + capacity;
257 // Calculate the address of the first word of the heap.
258 heap_address -= total_reserved;
259 } else {
260 heap_address = NULL; // any address will do.
261 if (UseCompressedOops) {
262 heap_address = Universe::preferred_heap_base(total_reserved, Universe::UnscaledNarrowOop);
263 *_total_reserved = total_reserved;
264 *_n_covered_regions = n_covered_regions;
265 *heap_rs = ReservedHeapSpace(total_reserved, alignment,
266 UseLargePages, heap_address);
268 if (heap_address != NULL && !heap_rs->is_reserved()) {
269 // Failed to reserve at specified address - the requested memory
270 // region is taken already, for example, by 'java' launcher.
271 // Try again to reserver heap higher.
272 heap_address = Universe::preferred_heap_base(total_reserved, Universe::ZeroBasedNarrowOop);
273 *heap_rs = ReservedHeapSpace(total_reserved, alignment,
274 UseLargePages, heap_address);
276 if (heap_address != NULL && !heap_rs->is_reserved()) {
277 // Failed to reserve at specified address again - give up.
278 heap_address = Universe::preferred_heap_base(total_reserved, Universe::HeapBasedNarrowOop);
279 assert(heap_address == NULL, "");
280 *heap_rs = ReservedHeapSpace(total_reserved, alignment,
281 UseLargePages, heap_address);
282 }
283 }
284 return heap_address;
285 }
286 }
288 *_total_reserved = total_reserved;
289 *_n_covered_regions = n_covered_regions;
290 *heap_rs = ReservedHeapSpace(total_reserved, alignment,
291 UseLargePages, heap_address);
293 return heap_address;
294 }
297 void GenCollectedHeap::post_initialize() {
298 SharedHeap::post_initialize();
299 TwoGenerationCollectorPolicy *policy =
300 (TwoGenerationCollectorPolicy *)collector_policy();
301 guarantee(policy->is_two_generation_policy(), "Illegal policy type");
302 DefNewGeneration* def_new_gen = (DefNewGeneration*) get_gen(0);
303 assert(def_new_gen->kind() == Generation::DefNew ||
304 def_new_gen->kind() == Generation::ParNew ||
305 def_new_gen->kind() == Generation::ASParNew,
306 "Wrong generation kind");
308 Generation* old_gen = get_gen(1);
309 assert(old_gen->kind() == Generation::ConcurrentMarkSweep ||
310 old_gen->kind() == Generation::ASConcurrentMarkSweep ||
311 old_gen->kind() == Generation::MarkSweepCompact,
312 "Wrong generation kind");
314 policy->initialize_size_policy(def_new_gen->eden()->capacity(),
315 old_gen->capacity(),
316 def_new_gen->from()->capacity());
317 policy->initialize_gc_policy_counters();
318 }
320 void GenCollectedHeap::ref_processing_init() {
321 SharedHeap::ref_processing_init();
322 for (int i = 0; i < _n_gens; i++) {
323 _gens[i]->ref_processor_init();
324 }
325 }
327 size_t GenCollectedHeap::capacity() const {
328 size_t res = 0;
329 for (int i = 0; i < _n_gens; i++) {
330 res += _gens[i]->capacity();
331 }
332 return res;
333 }
335 size_t GenCollectedHeap::used() const {
336 size_t res = 0;
337 for (int i = 0; i < _n_gens; i++) {
338 res += _gens[i]->used();
339 }
340 return res;
341 }
343 // Save the "used_region" for generations level and lower,
344 // and, if perm is true, for perm gen.
345 void GenCollectedHeap::save_used_regions(int level, bool perm) {
346 assert(level < _n_gens, "Illegal level parameter");
347 for (int i = level; i >= 0; i--) {
348 _gens[i]->save_used_region();
349 }
350 if (perm) {
351 perm_gen()->save_used_region();
352 }
353 }
355 size_t GenCollectedHeap::max_capacity() const {
356 size_t res = 0;
357 for (int i = 0; i < _n_gens; i++) {
358 res += _gens[i]->max_capacity();
359 }
360 return res;
361 }
363 // Update the _full_collections_completed counter
364 // at the end of a stop-world full GC.
365 unsigned int GenCollectedHeap::update_full_collections_completed() {
366 MonitorLockerEx ml(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
367 assert(_full_collections_completed <= _total_full_collections,
368 "Can't complete more collections than were started");
369 _full_collections_completed = _total_full_collections;
370 ml.notify_all();
371 return _full_collections_completed;
372 }
374 // Update the _full_collections_completed counter, as appropriate,
375 // at the end of a concurrent GC cycle. Note the conditional update
376 // below to allow this method to be called by a concurrent collector
377 // without synchronizing in any manner with the VM thread (which
378 // may already have initiated a STW full collection "concurrently").
379 unsigned int GenCollectedHeap::update_full_collections_completed(unsigned int count) {
380 MonitorLockerEx ml(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
381 assert((_full_collections_completed <= _total_full_collections) &&
382 (count <= _total_full_collections),
383 "Can't complete more collections than were started");
384 if (count > _full_collections_completed) {
385 _full_collections_completed = count;
386 ml.notify_all();
387 }
388 return _full_collections_completed;
389 }
392 #ifndef PRODUCT
393 // Override of memory state checking method in CollectedHeap:
394 // Some collectors (CMS for example) can't have badHeapWordVal written
395 // in the first two words of an object. (For instance , in the case of
396 // CMS these words hold state used to synchronize between certain
397 // (concurrent) GC steps and direct allocating mutators.)
398 // The skip_header_HeapWords() method below, allows us to skip
399 // over the requisite number of HeapWord's. Note that (for
400 // generational collectors) this means that those many words are
401 // skipped in each object, irrespective of the generation in which
402 // that object lives. The resultant loss of precision seems to be
403 // harmless and the pain of avoiding that imprecision appears somewhat
404 // higher than we are prepared to pay for such rudimentary debugging
405 // support.
406 void GenCollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr,
407 size_t size) {
408 if (CheckMemoryInitialization && ZapUnusedHeapArea) {
409 // We are asked to check a size in HeapWords,
410 // but the memory is mangled in juint words.
411 juint* start = (juint*) (addr + skip_header_HeapWords());
412 juint* end = (juint*) (addr + size);
413 for (juint* slot = start; slot < end; slot += 1) {
414 assert(*slot == badHeapWordVal,
415 "Found non badHeapWordValue in pre-allocation check");
416 }
417 }
418 }
419 #endif
421 HeapWord* GenCollectedHeap::attempt_allocation(size_t size,
422 bool is_tlab,
423 bool first_only) {
424 HeapWord* res;
425 for (int i = 0; i < _n_gens; i++) {
426 if (_gens[i]->should_allocate(size, is_tlab)) {
427 res = _gens[i]->allocate(size, is_tlab);
428 if (res != NULL) return res;
429 else if (first_only) break;
430 }
431 }
432 // Otherwise...
433 return NULL;
434 }
436 HeapWord* GenCollectedHeap::mem_allocate(size_t size,
437 bool is_large_noref,
438 bool is_tlab,
439 bool* gc_overhead_limit_was_exceeded) {
440 return collector_policy()->mem_allocate_work(size,
441 is_tlab,
442 gc_overhead_limit_was_exceeded);
443 }
445 bool GenCollectedHeap::must_clear_all_soft_refs() {
446 return _gc_cause == GCCause::_last_ditch_collection;
447 }
449 bool GenCollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
450 return UseConcMarkSweepGC &&
451 ((cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) ||
452 (cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent));
453 }
455 void GenCollectedHeap::do_collection(bool full,
456 bool clear_all_soft_refs,
457 size_t size,
458 bool is_tlab,
459 int max_level) {
460 bool prepared_for_verification = false;
461 ResourceMark rm;
462 DEBUG_ONLY(Thread* my_thread = Thread::current();)
464 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
465 assert(my_thread->is_VM_thread() ||
466 my_thread->is_ConcurrentGC_thread(),
467 "incorrect thread type capability");
468 assert(Heap_lock->is_locked(),
469 "the requesting thread should have the Heap_lock");
470 guarantee(!is_gc_active(), "collection is not reentrant");
471 assert(max_level < n_gens(), "sanity check");
473 if (GC_locker::check_active_before_gc()) {
474 return; // GC is disabled (e.g. JNI GetXXXCritical operation)
475 }
477 const bool do_clear_all_soft_refs = clear_all_soft_refs ||
478 collector_policy()->should_clear_all_soft_refs();
480 ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy());
482 const size_t perm_prev_used = perm_gen()->used();
484 if (PrintHeapAtGC) {
485 Universe::print_heap_before_gc();
486 if (Verbose) {
487 gclog_or_tty->print_cr("GC Cause: %s", GCCause::to_string(gc_cause()));
488 }
489 }
491 {
492 FlagSetting fl(_is_gc_active, true);
494 bool complete = full && (max_level == (n_gens()-1));
495 const char* gc_cause_str = "GC ";
496 if (complete) {
497 GCCause::Cause cause = gc_cause();
498 if (cause == GCCause::_java_lang_system_gc) {
499 gc_cause_str = "Full GC (System) ";
500 } else {
501 gc_cause_str = "Full GC ";
502 }
503 }
504 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
505 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
506 TraceTime t(gc_cause_str, PrintGCDetails, false, gclog_or_tty);
508 gc_prologue(complete);
509 increment_total_collections(complete);
511 size_t gch_prev_used = used();
513 int starting_level = 0;
514 if (full) {
515 // Search for the oldest generation which will collect all younger
516 // generations, and start collection loop there.
517 for (int i = max_level; i >= 0; i--) {
518 if (_gens[i]->full_collects_younger_generations()) {
519 starting_level = i;
520 break;
521 }
522 }
523 }
525 bool must_restore_marks_for_biased_locking = false;
527 int max_level_collected = starting_level;
528 for (int i = starting_level; i <= max_level; i++) {
529 if (_gens[i]->should_collect(full, size, is_tlab)) {
530 if (i == n_gens() - 1) { // a major collection is to happen
531 if (!complete) {
532 // The full_collections increment was missed above.
533 increment_total_full_collections();
534 }
535 pre_full_gc_dump(); // do any pre full gc dumps
536 }
537 // Timer for individual generations. Last argument is false: no CR
538 TraceTime t1(_gens[i]->short_name(), PrintGCDetails, false, gclog_or_tty);
539 TraceCollectorStats tcs(_gens[i]->counters());
540 TraceMemoryManagerStats tmms(_gens[i]->kind(),gc_cause());
542 size_t prev_used = _gens[i]->used();
543 _gens[i]->stat_record()->invocations++;
544 _gens[i]->stat_record()->accumulated_time.start();
546 // Must be done anew before each collection because
547 // a previous collection will do mangling and will
548 // change top of some spaces.
549 record_gen_tops_before_GC();
551 if (PrintGC && Verbose) {
552 gclog_or_tty->print("level=%d invoke=%d size=" SIZE_FORMAT,
553 i,
554 _gens[i]->stat_record()->invocations,
555 size*HeapWordSize);
556 }
558 if (VerifyBeforeGC && i >= VerifyGCLevel &&
559 total_collections() >= VerifyGCStartAt) {
560 HandleMark hm; // Discard invalid handles created during verification
561 if (!prepared_for_verification) {
562 prepare_for_verify();
563 prepared_for_verification = true;
564 }
565 gclog_or_tty->print(" VerifyBeforeGC:");
566 Universe::verify(true);
567 }
568 COMPILER2_PRESENT(DerivedPointerTable::clear());
570 if (!must_restore_marks_for_biased_locking &&
571 _gens[i]->performs_in_place_marking()) {
572 // We perform this mark word preservation work lazily
573 // because it's only at this point that we know whether we
574 // absolutely have to do it; we want to avoid doing it for
575 // scavenge-only collections where it's unnecessary
576 must_restore_marks_for_biased_locking = true;
577 BiasedLocking::preserve_marks();
578 }
580 // Do collection work
581 {
582 // Note on ref discovery: For what appear to be historical reasons,
583 // GCH enables and disabled (by enqueing) refs discovery.
584 // In the future this should be moved into the generation's
585 // collect method so that ref discovery and enqueueing concerns
586 // are local to a generation. The collect method could return
587 // an appropriate indication in the case that notification on
588 // the ref lock was needed. This will make the treatment of
589 // weak refs more uniform (and indeed remove such concerns
590 // from GCH). XXX
592 HandleMark hm; // Discard invalid handles created during gc
593 save_marks(); // save marks for all gens
594 // We want to discover references, but not process them yet.
595 // This mode is disabled in process_discovered_references if the
596 // generation does some collection work, or in
597 // enqueue_discovered_references if the generation returns
598 // without doing any work.
599 ReferenceProcessor* rp = _gens[i]->ref_processor();
600 // If the discovery of ("weak") refs in this generation is
601 // atomic wrt other collectors in this configuration, we
602 // are guaranteed to have empty discovered ref lists.
603 if (rp->discovery_is_atomic()) {
604 rp->verify_no_references_recorded();
605 rp->enable_discovery();
606 rp->setup_policy(do_clear_all_soft_refs);
607 } else {
608 // collect() below will enable discovery as appropriate
609 }
610 _gens[i]->collect(full, do_clear_all_soft_refs, size, is_tlab);
611 if (!rp->enqueuing_is_done()) {
612 rp->enqueue_discovered_references();
613 } else {
614 rp->set_enqueuing_is_done(false);
615 }
616 rp->verify_no_references_recorded();
617 }
618 max_level_collected = i;
620 // Determine if allocation request was met.
621 if (size > 0) {
622 if (!is_tlab || _gens[i]->supports_tlab_allocation()) {
623 if (size*HeapWordSize <= _gens[i]->unsafe_max_alloc_nogc()) {
624 size = 0;
625 }
626 }
627 }
629 COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
631 _gens[i]->stat_record()->accumulated_time.stop();
633 update_gc_stats(i, full);
635 if (VerifyAfterGC && i >= VerifyGCLevel &&
636 total_collections() >= VerifyGCStartAt) {
637 HandleMark hm; // Discard invalid handles created during verification
638 gclog_or_tty->print(" VerifyAfterGC:");
639 Universe::verify(false);
640 }
642 if (PrintGCDetails) {
643 gclog_or_tty->print(":");
644 _gens[i]->print_heap_change(prev_used);
645 }
646 }
647 }
649 // Update "complete" boolean wrt what actually transpired --
650 // for instance, a promotion failure could have led to
651 // a whole heap collection.
652 complete = complete || (max_level_collected == n_gens() - 1);
654 if (complete) { // We did a "major" collection
655 post_full_gc_dump(); // do any post full gc dumps
656 }
658 if (PrintGCDetails) {
659 print_heap_change(gch_prev_used);
661 // Print perm gen info for full GC with PrintGCDetails flag.
662 if (complete) {
663 print_perm_heap_change(perm_prev_used);
664 }
665 }
667 for (int j = max_level_collected; j >= 0; j -= 1) {
668 // Adjust generation sizes.
669 _gens[j]->compute_new_size();
670 }
672 if (complete) {
673 // Ask the permanent generation to adjust size for full collections
674 perm()->compute_new_size();
675 update_full_collections_completed();
676 }
678 // Track memory usage and detect low memory after GC finishes
679 MemoryService::track_memory_usage();
681 gc_epilogue(complete);
683 if (must_restore_marks_for_biased_locking) {
684 BiasedLocking::restore_marks();
685 }
686 }
688 AdaptiveSizePolicy* sp = gen_policy()->size_policy();
689 AdaptiveSizePolicyOutput(sp, total_collections());
691 if (PrintHeapAtGC) {
692 Universe::print_heap_after_gc();
693 }
695 #ifdef TRACESPINNING
696 ParallelTaskTerminator::print_termination_counts();
697 #endif
699 if (ExitAfterGCNum > 0 && total_collections() == ExitAfterGCNum) {
700 tty->print_cr("Stopping after GC #%d", ExitAfterGCNum);
701 vm_exit(-1);
702 }
703 }
705 HeapWord* GenCollectedHeap::satisfy_failed_allocation(size_t size, bool is_tlab) {
706 return collector_policy()->satisfy_failed_allocation(size, is_tlab);
707 }
709 void GenCollectedHeap::set_par_threads(int t) {
710 SharedHeap::set_par_threads(t);
711 _gen_process_strong_tasks->set_n_threads(t);
712 }
714 class AssertIsPermClosure: public OopClosure {
715 public:
716 void do_oop(oop* p) {
717 assert((*p) == NULL || (*p)->is_perm(), "Referent should be perm.");
718 }
719 void do_oop(narrowOop* p) { ShouldNotReachHere(); }
720 };
721 static AssertIsPermClosure assert_is_perm_closure;
723 void GenCollectedHeap::
724 gen_process_strong_roots(int level,
725 bool younger_gens_as_roots,
726 bool activate_scope,
727 bool collecting_perm_gen,
728 SharedHeap::ScanningOption so,
729 OopsInGenClosure* not_older_gens,
730 bool do_code_roots,
731 OopsInGenClosure* older_gens) {
732 // General strong roots.
734 if (!do_code_roots) {
735 SharedHeap::process_strong_roots(activate_scope, collecting_perm_gen, so,
736 not_older_gens, NULL, older_gens);
737 } else {
738 bool do_code_marking = (activate_scope || nmethod::oops_do_marking_is_active());
739 CodeBlobToOopClosure code_roots(not_older_gens, /*do_marking=*/ do_code_marking);
740 SharedHeap::process_strong_roots(activate_scope, collecting_perm_gen, so,
741 not_older_gens, &code_roots, older_gens);
742 }
744 if (younger_gens_as_roots) {
745 if (!_gen_process_strong_tasks->is_task_claimed(GCH_PS_younger_gens)) {
746 for (int i = 0; i < level; i++) {
747 not_older_gens->set_generation(_gens[i]);
748 _gens[i]->oop_iterate(not_older_gens);
749 }
750 not_older_gens->reset_generation();
751 }
752 }
753 // When collection is parallel, all threads get to cooperate to do
754 // older-gen scanning.
755 for (int i = level+1; i < _n_gens; i++) {
756 older_gens->set_generation(_gens[i]);
757 rem_set()->younger_refs_iterate(_gens[i], older_gens);
758 older_gens->reset_generation();
759 }
761 _gen_process_strong_tasks->all_tasks_completed();
762 }
764 void GenCollectedHeap::gen_process_weak_roots(OopClosure* root_closure,
765 CodeBlobClosure* code_roots,
766 OopClosure* non_root_closure) {
767 SharedHeap::process_weak_roots(root_closure, code_roots, non_root_closure);
768 // "Local" "weak" refs
769 for (int i = 0; i < _n_gens; i++) {
770 _gens[i]->ref_processor()->weak_oops_do(root_closure);
771 }
772 }
774 #define GCH_SINCE_SAVE_MARKS_ITERATE_DEFN(OopClosureType, nv_suffix) \
775 void GenCollectedHeap:: \
776 oop_since_save_marks_iterate(int level, \
777 OopClosureType* cur, \
778 OopClosureType* older) { \
779 _gens[level]->oop_since_save_marks_iterate##nv_suffix(cur); \
780 for (int i = level+1; i < n_gens(); i++) { \
781 _gens[i]->oop_since_save_marks_iterate##nv_suffix(older); \
782 } \
783 perm_gen()->oop_since_save_marks_iterate##nv_suffix(older); \
784 }
786 ALL_SINCE_SAVE_MARKS_CLOSURES(GCH_SINCE_SAVE_MARKS_ITERATE_DEFN)
788 #undef GCH_SINCE_SAVE_MARKS_ITERATE_DEFN
790 bool GenCollectedHeap::no_allocs_since_save_marks(int level) {
791 for (int i = level; i < _n_gens; i++) {
792 if (!_gens[i]->no_allocs_since_save_marks()) return false;
793 }
794 return perm_gen()->no_allocs_since_save_marks();
795 }
797 bool GenCollectedHeap::supports_inline_contig_alloc() const {
798 return _gens[0]->supports_inline_contig_alloc();
799 }
801 HeapWord** GenCollectedHeap::top_addr() const {
802 return _gens[0]->top_addr();
803 }
805 HeapWord** GenCollectedHeap::end_addr() const {
806 return _gens[0]->end_addr();
807 }
809 size_t GenCollectedHeap::unsafe_max_alloc() {
810 return _gens[0]->unsafe_max_alloc_nogc();
811 }
813 // public collection interfaces
815 void GenCollectedHeap::collect(GCCause::Cause cause) {
816 if (should_do_concurrent_full_gc(cause)) {
817 #ifndef SERIALGC
818 // mostly concurrent full collection
819 collect_mostly_concurrent(cause);
820 #else // SERIALGC
821 ShouldNotReachHere();
822 #endif // SERIALGC
823 } else {
824 #ifdef ASSERT
825 if (cause == GCCause::_scavenge_alot) {
826 // minor collection only
827 collect(cause, 0);
828 } else {
829 // Stop-the-world full collection
830 collect(cause, n_gens() - 1);
831 }
832 #else
833 // Stop-the-world full collection
834 collect(cause, n_gens() - 1);
835 #endif
836 }
837 }
839 void GenCollectedHeap::collect(GCCause::Cause cause, int max_level) {
840 // The caller doesn't have the Heap_lock
841 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
842 MutexLocker ml(Heap_lock);
843 collect_locked(cause, max_level);
844 }
846 // This interface assumes that it's being called by the
847 // vm thread. It collects the heap assuming that the
848 // heap lock is already held and that we are executing in
849 // the context of the vm thread.
850 void GenCollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {
851 assert(Thread::current()->is_VM_thread(), "Precondition#1");
852 assert(Heap_lock->is_locked(), "Precondition#2");
853 GCCauseSetter gcs(this, cause);
854 switch (cause) {
855 case GCCause::_heap_inspection:
856 case GCCause::_heap_dump: {
857 HandleMark hm;
858 do_full_collection(false, // don't clear all soft refs
859 n_gens() - 1);
860 break;
861 }
862 default: // XXX FIX ME
863 ShouldNotReachHere(); // Unexpected use of this function
864 }
865 }
867 void GenCollectedHeap::collect_locked(GCCause::Cause cause) {
868 // The caller has the Heap_lock
869 assert(Heap_lock->owned_by_self(), "this thread should own the Heap_lock");
870 collect_locked(cause, n_gens() - 1);
871 }
873 // this is the private collection interface
874 // The Heap_lock is expected to be held on entry.
876 void GenCollectedHeap::collect_locked(GCCause::Cause cause, int max_level) {
877 if (_preloading_shared_classes) {
878 report_out_of_shared_space(SharedPermGen);
879 }
880 // Read the GC count while holding the Heap_lock
881 unsigned int gc_count_before = total_collections();
882 unsigned int full_gc_count_before = total_full_collections();
883 {
884 MutexUnlocker mu(Heap_lock); // give up heap lock, execute gets it back
885 VM_GenCollectFull op(gc_count_before, full_gc_count_before,
886 cause, max_level);
887 VMThread::execute(&op);
888 }
889 }
891 #ifndef SERIALGC
892 bool GenCollectedHeap::create_cms_collector() {
894 assert(((_gens[1]->kind() == Generation::ConcurrentMarkSweep) ||
895 (_gens[1]->kind() == Generation::ASConcurrentMarkSweep)) &&
896 _perm_gen->as_gen()->kind() == Generation::ConcurrentMarkSweep,
897 "Unexpected generation kinds");
898 // Skip two header words in the block content verification
899 NOT_PRODUCT(_skip_header_HeapWords = CMSCollector::skip_header_HeapWords();)
900 CMSCollector* collector = new CMSCollector(
901 (ConcurrentMarkSweepGeneration*)_gens[1],
902 (ConcurrentMarkSweepGeneration*)_perm_gen->as_gen(),
903 _rem_set->as_CardTableRS(),
904 (ConcurrentMarkSweepPolicy*) collector_policy());
906 if (collector == NULL || !collector->completed_initialization()) {
907 if (collector) {
908 delete collector; // Be nice in embedded situation
909 }
910 vm_shutdown_during_initialization("Could not create CMS collector");
911 return false;
912 }
913 return true; // success
914 }
916 void GenCollectedHeap::collect_mostly_concurrent(GCCause::Cause cause) {
917 assert(!Heap_lock->owned_by_self(), "Should not own Heap_lock");
919 MutexLocker ml(Heap_lock);
920 // Read the GC counts while holding the Heap_lock
921 unsigned int full_gc_count_before = total_full_collections();
922 unsigned int gc_count_before = total_collections();
923 {
924 MutexUnlocker mu(Heap_lock);
925 VM_GenCollectFullConcurrent op(gc_count_before, full_gc_count_before, cause);
926 VMThread::execute(&op);
927 }
928 }
929 #endif // SERIALGC
932 void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs,
933 int max_level) {
934 int local_max_level;
935 if (!incremental_collection_will_fail(false /* don't consult_young */) &&
936 gc_cause() == GCCause::_gc_locker) {
937 local_max_level = 0;
938 } else {
939 local_max_level = max_level;
940 }
942 do_collection(true /* full */,
943 clear_all_soft_refs /* clear_all_soft_refs */,
944 0 /* size */,
945 false /* is_tlab */,
946 local_max_level /* max_level */);
947 // Hack XXX FIX ME !!!
948 // A scavenge may not have been attempted, or may have
949 // been attempted and failed, because the old gen was too full
950 if (local_max_level == 0 && gc_cause() == GCCause::_gc_locker &&
951 incremental_collection_will_fail(false /* don't consult_young */)) {
952 if (PrintGCDetails) {
953 gclog_or_tty->print_cr("GC locker: Trying a full collection "
954 "because scavenge failed");
955 }
956 // This time allow the old gen to be collected as well
957 do_collection(true /* full */,
958 clear_all_soft_refs /* clear_all_soft_refs */,
959 0 /* size */,
960 false /* is_tlab */,
961 n_gens() - 1 /* max_level */);
962 }
963 }
965 // Returns "TRUE" iff "p" points into the allocated area of the heap.
966 bool GenCollectedHeap::is_in(const void* p) const {
967 #ifndef ASSERT
968 guarantee(VerifyBeforeGC ||
969 VerifyDuringGC ||
970 VerifyBeforeExit ||
971 PrintAssembly ||
972 tty->count() != 0 || // already printing
973 VerifyAfterGC ||
974 VMError::fatal_error_in_progress(), "too expensive");
976 #endif
977 // This might be sped up with a cache of the last generation that
978 // answered yes.
979 for (int i = 0; i < _n_gens; i++) {
980 if (_gens[i]->is_in(p)) return true;
981 }
982 if (_perm_gen->as_gen()->is_in(p)) return true;
983 // Otherwise...
984 return false;
985 }
987 // Returns "TRUE" iff "p" points into the allocated area of the heap.
988 bool GenCollectedHeap::is_in_youngest(void* p) {
989 return _gens[0]->is_in(p);
990 }
992 void GenCollectedHeap::oop_iterate(OopClosure* cl) {
993 for (int i = 0; i < _n_gens; i++) {
994 _gens[i]->oop_iterate(cl);
995 }
996 }
998 void GenCollectedHeap::oop_iterate(MemRegion mr, OopClosure* cl) {
999 for (int i = 0; i < _n_gens; i++) {
1000 _gens[i]->oop_iterate(mr, cl);
1001 }
1002 }
1004 void GenCollectedHeap::object_iterate(ObjectClosure* cl) {
1005 for (int i = 0; i < _n_gens; i++) {
1006 _gens[i]->object_iterate(cl);
1007 }
1008 perm_gen()->object_iterate(cl);
1009 }
1011 void GenCollectedHeap::safe_object_iterate(ObjectClosure* cl) {
1012 for (int i = 0; i < _n_gens; i++) {
1013 _gens[i]->safe_object_iterate(cl);
1014 }
1015 perm_gen()->safe_object_iterate(cl);
1016 }
1018 void GenCollectedHeap::object_iterate_since_last_GC(ObjectClosure* cl) {
1019 for (int i = 0; i < _n_gens; i++) {
1020 _gens[i]->object_iterate_since_last_GC(cl);
1021 }
1022 }
1024 Space* GenCollectedHeap::space_containing(const void* addr) const {
1025 for (int i = 0; i < _n_gens; i++) {
1026 Space* res = _gens[i]->space_containing(addr);
1027 if (res != NULL) return res;
1028 }
1029 Space* res = perm_gen()->space_containing(addr);
1030 if (res != NULL) return res;
1031 // Otherwise...
1032 assert(false, "Could not find containing space");
1033 return NULL;
1034 }
1037 HeapWord* GenCollectedHeap::block_start(const void* addr) const {
1038 assert(is_in_reserved(addr), "block_start of address outside of heap");
1039 for (int i = 0; i < _n_gens; i++) {
1040 if (_gens[i]->is_in_reserved(addr)) {
1041 assert(_gens[i]->is_in(addr),
1042 "addr should be in allocated part of generation");
1043 return _gens[i]->block_start(addr);
1044 }
1045 }
1046 if (perm_gen()->is_in_reserved(addr)) {
1047 assert(perm_gen()->is_in(addr),
1048 "addr should be in allocated part of perm gen");
1049 return perm_gen()->block_start(addr);
1050 }
1051 assert(false, "Some generation should contain the address");
1052 return NULL;
1053 }
1055 size_t GenCollectedHeap::block_size(const HeapWord* addr) const {
1056 assert(is_in_reserved(addr), "block_size of address outside of heap");
1057 for (int i = 0; i < _n_gens; i++) {
1058 if (_gens[i]->is_in_reserved(addr)) {
1059 assert(_gens[i]->is_in(addr),
1060 "addr should be in allocated part of generation");
1061 return _gens[i]->block_size(addr);
1062 }
1063 }
1064 if (perm_gen()->is_in_reserved(addr)) {
1065 assert(perm_gen()->is_in(addr),
1066 "addr should be in allocated part of perm gen");
1067 return perm_gen()->block_size(addr);
1068 }
1069 assert(false, "Some generation should contain the address");
1070 return 0;
1071 }
1073 bool GenCollectedHeap::block_is_obj(const HeapWord* addr) const {
1074 assert(is_in_reserved(addr), "block_is_obj of address outside of heap");
1075 assert(block_start(addr) == addr, "addr must be a block start");
1076 for (int i = 0; i < _n_gens; i++) {
1077 if (_gens[i]->is_in_reserved(addr)) {
1078 return _gens[i]->block_is_obj(addr);
1079 }
1080 }
1081 if (perm_gen()->is_in_reserved(addr)) {
1082 return perm_gen()->block_is_obj(addr);
1083 }
1084 assert(false, "Some generation should contain the address");
1085 return false;
1086 }
1088 bool GenCollectedHeap::supports_tlab_allocation() const {
1089 for (int i = 0; i < _n_gens; i += 1) {
1090 if (_gens[i]->supports_tlab_allocation()) {
1091 return true;
1092 }
1093 }
1094 return false;
1095 }
1097 size_t GenCollectedHeap::tlab_capacity(Thread* thr) const {
1098 size_t result = 0;
1099 for (int i = 0; i < _n_gens; i += 1) {
1100 if (_gens[i]->supports_tlab_allocation()) {
1101 result += _gens[i]->tlab_capacity();
1102 }
1103 }
1104 return result;
1105 }
1107 size_t GenCollectedHeap::unsafe_max_tlab_alloc(Thread* thr) const {
1108 size_t result = 0;
1109 for (int i = 0; i < _n_gens; i += 1) {
1110 if (_gens[i]->supports_tlab_allocation()) {
1111 result += _gens[i]->unsafe_max_tlab_alloc();
1112 }
1113 }
1114 return result;
1115 }
1117 HeapWord* GenCollectedHeap::allocate_new_tlab(size_t size) {
1118 bool gc_overhead_limit_was_exceeded;
1119 HeapWord* result = mem_allocate(size /* size */,
1120 false /* is_large_noref */,
1121 true /* is_tlab */,
1122 &gc_overhead_limit_was_exceeded);
1123 return result;
1124 }
1126 // Requires "*prev_ptr" to be non-NULL. Deletes and a block of minimal size
1127 // from the list headed by "*prev_ptr".
1128 static ScratchBlock *removeSmallestScratch(ScratchBlock **prev_ptr) {
1129 bool first = true;
1130 size_t min_size = 0; // "first" makes this conceptually infinite.
1131 ScratchBlock **smallest_ptr, *smallest;
1132 ScratchBlock *cur = *prev_ptr;
1133 while (cur) {
1134 assert(*prev_ptr == cur, "just checking");
1135 if (first || cur->num_words < min_size) {
1136 smallest_ptr = prev_ptr;
1137 smallest = cur;
1138 min_size = smallest->num_words;
1139 first = false;
1140 }
1141 prev_ptr = &cur->next;
1142 cur = cur->next;
1143 }
1144 smallest = *smallest_ptr;
1145 *smallest_ptr = smallest->next;
1146 return smallest;
1147 }
1149 // Sort the scratch block list headed by res into decreasing size order,
1150 // and set "res" to the result.
1151 static void sort_scratch_list(ScratchBlock*& list) {
1152 ScratchBlock* sorted = NULL;
1153 ScratchBlock* unsorted = list;
1154 while (unsorted) {
1155 ScratchBlock *smallest = removeSmallestScratch(&unsorted);
1156 smallest->next = sorted;
1157 sorted = smallest;
1158 }
1159 list = sorted;
1160 }
1162 ScratchBlock* GenCollectedHeap::gather_scratch(Generation* requestor,
1163 size_t max_alloc_words) {
1164 ScratchBlock* res = NULL;
1165 for (int i = 0; i < _n_gens; i++) {
1166 _gens[i]->contribute_scratch(res, requestor, max_alloc_words);
1167 }
1168 sort_scratch_list(res);
1169 return res;
1170 }
1172 void GenCollectedHeap::release_scratch() {
1173 for (int i = 0; i < _n_gens; i++) {
1174 _gens[i]->reset_scratch();
1175 }
1176 }
1178 size_t GenCollectedHeap::large_typearray_limit() {
1179 return gen_policy()->large_typearray_limit();
1180 }
1182 class GenPrepareForVerifyClosure: public GenCollectedHeap::GenClosure {
1183 void do_generation(Generation* gen) {
1184 gen->prepare_for_verify();
1185 }
1186 };
1188 void GenCollectedHeap::prepare_for_verify() {
1189 ensure_parsability(false); // no need to retire TLABs
1190 GenPrepareForVerifyClosure blk;
1191 generation_iterate(&blk, false);
1192 perm_gen()->prepare_for_verify();
1193 }
1196 void GenCollectedHeap::generation_iterate(GenClosure* cl,
1197 bool old_to_young) {
1198 if (old_to_young) {
1199 for (int i = _n_gens-1; i >= 0; i--) {
1200 cl->do_generation(_gens[i]);
1201 }
1202 } else {
1203 for (int i = 0; i < _n_gens; i++) {
1204 cl->do_generation(_gens[i]);
1205 }
1206 }
1207 }
1209 void GenCollectedHeap::space_iterate(SpaceClosure* cl) {
1210 for (int i = 0; i < _n_gens; i++) {
1211 _gens[i]->space_iterate(cl, true);
1212 }
1213 perm_gen()->space_iterate(cl, true);
1214 }
1216 bool GenCollectedHeap::is_maximal_no_gc() const {
1217 for (int i = 0; i < _n_gens; i++) { // skip perm gen
1218 if (!_gens[i]->is_maximal_no_gc()) {
1219 return false;
1220 }
1221 }
1222 return true;
1223 }
1225 void GenCollectedHeap::save_marks() {
1226 for (int i = 0; i < _n_gens; i++) {
1227 _gens[i]->save_marks();
1228 }
1229 perm_gen()->save_marks();
1230 }
1232 void GenCollectedHeap::compute_new_generation_sizes(int collectedGen) {
1233 for (int i = 0; i <= collectedGen; i++) {
1234 _gens[i]->compute_new_size();
1235 }
1236 }
1238 GenCollectedHeap* GenCollectedHeap::heap() {
1239 assert(_gch != NULL, "Uninitialized access to GenCollectedHeap::heap()");
1240 assert(_gch->kind() == CollectedHeap::GenCollectedHeap, "not a generational heap");
1241 return _gch;
1242 }
1245 void GenCollectedHeap::prepare_for_compaction() {
1246 Generation* scanning_gen = _gens[_n_gens-1];
1247 // Start by compacting into same gen.
1248 CompactPoint cp(scanning_gen, NULL, NULL);
1249 while (scanning_gen != NULL) {
1250 scanning_gen->prepare_for_compaction(&cp);
1251 scanning_gen = prev_gen(scanning_gen);
1252 }
1253 }
1255 GCStats* GenCollectedHeap::gc_stats(int level) const {
1256 return _gens[level]->gc_stats();
1257 }
1259 void GenCollectedHeap::verify(bool allow_dirty, bool silent, bool option /* ignored */) {
1260 if (!silent) {
1261 gclog_or_tty->print("permgen ");
1262 }
1263 perm_gen()->verify(allow_dirty);
1264 for (int i = _n_gens-1; i >= 0; i--) {
1265 Generation* g = _gens[i];
1266 if (!silent) {
1267 gclog_or_tty->print(g->name());
1268 gclog_or_tty->print(" ");
1269 }
1270 g->verify(allow_dirty);
1271 }
1272 if (!silent) {
1273 gclog_or_tty->print("remset ");
1274 }
1275 rem_set()->verify();
1276 if (!silent) {
1277 gclog_or_tty->print("ref_proc ");
1278 }
1279 ReferenceProcessor::verify();
1280 }
1282 void GenCollectedHeap::print() const { print_on(tty); }
1283 void GenCollectedHeap::print_on(outputStream* st) const {
1284 for (int i = 0; i < _n_gens; i++) {
1285 _gens[i]->print_on(st);
1286 }
1287 perm_gen()->print_on(st);
1288 }
1290 void GenCollectedHeap::gc_threads_do(ThreadClosure* tc) const {
1291 if (workers() != NULL) {
1292 workers()->threads_do(tc);
1293 }
1294 #ifndef SERIALGC
1295 if (UseConcMarkSweepGC) {
1296 ConcurrentMarkSweepThread::threads_do(tc);
1297 }
1298 #endif // SERIALGC
1299 }
1301 void GenCollectedHeap::print_gc_threads_on(outputStream* st) const {
1302 #ifndef SERIALGC
1303 if (UseParNewGC) {
1304 workers()->print_worker_threads_on(st);
1305 }
1306 if (UseConcMarkSweepGC) {
1307 ConcurrentMarkSweepThread::print_all_on(st);
1308 }
1309 #endif // SERIALGC
1310 }
1312 void GenCollectedHeap::print_tracing_info() const {
1313 if (TraceGen0Time) {
1314 get_gen(0)->print_summary_info();
1315 }
1316 if (TraceGen1Time) {
1317 get_gen(1)->print_summary_info();
1318 }
1319 }
1321 void GenCollectedHeap::print_heap_change(size_t prev_used) const {
1322 if (PrintGCDetails && Verbose) {
1323 gclog_or_tty->print(" " SIZE_FORMAT
1324 "->" SIZE_FORMAT
1325 "(" SIZE_FORMAT ")",
1326 prev_used, used(), capacity());
1327 } else {
1328 gclog_or_tty->print(" " SIZE_FORMAT "K"
1329 "->" SIZE_FORMAT "K"
1330 "(" SIZE_FORMAT "K)",
1331 prev_used / K, used() / K, capacity() / K);
1332 }
1333 }
1335 //New method to print perm gen info with PrintGCDetails flag
1336 void GenCollectedHeap::print_perm_heap_change(size_t perm_prev_used) const {
1337 gclog_or_tty->print(", [%s :", perm_gen()->short_name());
1338 perm_gen()->print_heap_change(perm_prev_used);
1339 gclog_or_tty->print("]");
1340 }
1342 class GenGCPrologueClosure: public GenCollectedHeap::GenClosure {
1343 private:
1344 bool _full;
1345 public:
1346 void do_generation(Generation* gen) {
1347 gen->gc_prologue(_full);
1348 }
1349 GenGCPrologueClosure(bool full) : _full(full) {};
1350 };
1352 void GenCollectedHeap::gc_prologue(bool full) {
1353 assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
1355 always_do_update_barrier = false;
1356 // Fill TLAB's and such
1357 CollectedHeap::accumulate_statistics_all_tlabs();
1358 ensure_parsability(true); // retire TLABs
1360 // Call allocation profiler
1361 AllocationProfiler::iterate_since_last_gc();
1362 // Walk generations
1363 GenGCPrologueClosure blk(full);
1364 generation_iterate(&blk, false); // not old-to-young.
1365 perm_gen()->gc_prologue(full);
1366 };
1368 class GenGCEpilogueClosure: public GenCollectedHeap::GenClosure {
1369 private:
1370 bool _full;
1371 public:
1372 void do_generation(Generation* gen) {
1373 gen->gc_epilogue(_full);
1374 }
1375 GenGCEpilogueClosure(bool full) : _full(full) {};
1376 };
1378 void GenCollectedHeap::gc_epilogue(bool full) {
1379 #ifdef COMPILER2
1380 assert(DerivedPointerTable::is_empty(), "derived pointer present");
1381 size_t actual_gap = pointer_delta((HeapWord*) (max_uintx-3), *(end_addr()));
1382 guarantee(actual_gap > (size_t)FastAllocateSizeLimit, "inline allocation wraps");
1383 #endif /* COMPILER2 */
1385 resize_all_tlabs();
1387 GenGCEpilogueClosure blk(full);
1388 generation_iterate(&blk, false); // not old-to-young.
1389 perm_gen()->gc_epilogue(full);
1391 always_do_update_barrier = UseConcMarkSweepGC;
1392 };
1394 #ifndef PRODUCT
1395 class GenGCSaveTopsBeforeGCClosure: public GenCollectedHeap::GenClosure {
1396 private:
1397 public:
1398 void do_generation(Generation* gen) {
1399 gen->record_spaces_top();
1400 }
1401 };
1403 void GenCollectedHeap::record_gen_tops_before_GC() {
1404 if (ZapUnusedHeapArea) {
1405 GenGCSaveTopsBeforeGCClosure blk;
1406 generation_iterate(&blk, false); // not old-to-young.
1407 perm_gen()->record_spaces_top();
1408 }
1409 }
1410 #endif // not PRODUCT
1412 class GenEnsureParsabilityClosure: public GenCollectedHeap::GenClosure {
1413 public:
1414 void do_generation(Generation* gen) {
1415 gen->ensure_parsability();
1416 }
1417 };
1419 void GenCollectedHeap::ensure_parsability(bool retire_tlabs) {
1420 CollectedHeap::ensure_parsability(retire_tlabs);
1421 GenEnsureParsabilityClosure ep_cl;
1422 generation_iterate(&ep_cl, false);
1423 perm_gen()->ensure_parsability();
1424 }
1426 oop GenCollectedHeap::handle_failed_promotion(Generation* gen,
1427 oop obj,
1428 size_t obj_size) {
1429 assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
1430 HeapWord* result = NULL;
1432 // First give each higher generation a chance to allocate the promoted object.
1433 Generation* allocator = next_gen(gen);
1434 if (allocator != NULL) {
1435 do {
1436 result = allocator->allocate(obj_size, false);
1437 } while (result == NULL && (allocator = next_gen(allocator)) != NULL);
1438 }
1440 if (result == NULL) {
1441 // Then give gen and higher generations a chance to expand and allocate the
1442 // object.
1443 do {
1444 result = gen->expand_and_allocate(obj_size, false);
1445 } while (result == NULL && (gen = next_gen(gen)) != NULL);
1446 }
1448 if (result != NULL) {
1449 Copy::aligned_disjoint_words((HeapWord*)obj, result, obj_size);
1450 }
1451 return oop(result);
1452 }
1454 class GenTimeOfLastGCClosure: public GenCollectedHeap::GenClosure {
1455 jlong _time; // in ms
1456 jlong _now; // in ms
1458 public:
1459 GenTimeOfLastGCClosure(jlong now) : _time(now), _now(now) { }
1461 jlong time() { return _time; }
1463 void do_generation(Generation* gen) {
1464 _time = MIN2(_time, gen->time_of_last_gc(_now));
1465 }
1466 };
1468 jlong GenCollectedHeap::millis_since_last_gc() {
1469 jlong now = os::javaTimeMillis();
1470 GenTimeOfLastGCClosure tolgc_cl(now);
1471 // iterate over generations getting the oldest
1472 // time that a generation was collected
1473 generation_iterate(&tolgc_cl, false);
1474 tolgc_cl.do_generation(perm_gen());
1475 // XXX Despite the assert above, since javaTimeMillis()
1476 // doesnot guarantee monotonically increasing return
1477 // values (note, i didn't say "strictly monotonic"),
1478 // we need to guard against getting back a time
1479 // later than now. This should be fixed by basing
1480 // on someting like gethrtime() which guarantees
1481 // monotonicity. Note that cond_wait() is susceptible
1482 // to a similar problem, because its interface is
1483 // based on absolute time in the form of the
1484 // system time's notion of UCT. See also 4506635
1485 // for yet another problem of similar nature. XXX
1486 jlong retVal = now - tolgc_cl.time();
1487 if (retVal < 0) {
1488 NOT_PRODUCT(warning("time warp: %d", retVal);)
1489 return 0;
1490 }
1491 return retVal;
1492 }