Fri, 05 Apr 2013 10:38:08 -0700
Merge
1 /*
2 * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "classfile/symbolTable.hpp"
27 #include "classfile/systemDictionary.hpp"
28 #include "classfile/vmSymbols.hpp"
29 #include "code/icBuffer.hpp"
30 #include "gc_implementation/shared/collectorCounters.hpp"
31 #include "gc_implementation/shared/vmGCOperations.hpp"
32 #include "gc_interface/collectedHeap.inline.hpp"
33 #include "memory/filemap.hpp"
34 #include "memory/gcLocker.inline.hpp"
35 #include "memory/genCollectedHeap.hpp"
36 #include "memory/genOopClosures.inline.hpp"
37 #include "memory/generation.inline.hpp"
38 #include "memory/generationSpec.hpp"
39 #include "memory/resourceArea.hpp"
40 #include "memory/sharedHeap.hpp"
41 #include "memory/space.hpp"
42 #include "oops/oop.inline.hpp"
43 #include "oops/oop.inline2.hpp"
44 #include "runtime/aprofiler.hpp"
45 #include "runtime/biasedLocking.hpp"
46 #include "runtime/fprofiler.hpp"
47 #include "runtime/handles.hpp"
48 #include "runtime/handles.inline.hpp"
49 #include "runtime/java.hpp"
50 #include "runtime/vmThread.hpp"
51 #include "services/memoryService.hpp"
52 #include "utilities/vmError.hpp"
53 #include "utilities/workgroup.hpp"
54 #include "utilities/macros.hpp"
55 #if INCLUDE_ALL_GCS
56 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp"
57 #include "gc_implementation/concurrentMarkSweep/vmCMSOperations.hpp"
58 #endif // INCLUDE_ALL_GCS
60 GenCollectedHeap* GenCollectedHeap::_gch;
61 NOT_PRODUCT(size_t GenCollectedHeap::_skip_header_HeapWords = 0;)
63 // The set of potentially parallel tasks in strong root scanning.
64 enum GCH_process_strong_roots_tasks {
65 // We probably want to parallelize both of these internally, but for now...
66 GCH_PS_younger_gens,
67 // Leave this one last.
68 GCH_PS_NumElements
69 };
71 GenCollectedHeap::GenCollectedHeap(GenCollectorPolicy *policy) :
72 SharedHeap(policy),
73 _gen_policy(policy),
74 _gen_process_strong_tasks(new SubTasksDone(GCH_PS_NumElements)),
75 _full_collections_completed(0)
76 {
77 if (_gen_process_strong_tasks == NULL ||
78 !_gen_process_strong_tasks->valid()) {
79 vm_exit_during_initialization("Failed necessary allocation.");
80 }
81 assert(policy != NULL, "Sanity check");
82 }
84 jint GenCollectedHeap::initialize() {
85 CollectedHeap::pre_initialize();
87 int i;
88 _n_gens = gen_policy()->number_of_generations();
90 // While there are no constraints in the GC code that HeapWordSize
91 // be any particular value, there are multiple other areas in the
92 // system which believe this to be true (e.g. oop->object_size in some
93 // cases incorrectly returns the size in wordSize units rather than
94 // HeapWordSize).
95 guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
97 // The heap must be at least as aligned as generations.
98 size_t alignment = Generation::GenGrain;
100 _gen_specs = gen_policy()->generations();
102 // Make sure the sizes are all aligned.
103 for (i = 0; i < _n_gens; i++) {
104 _gen_specs[i]->align(alignment);
105 }
107 // Allocate space for the heap.
109 char* heap_address;
110 size_t total_reserved = 0;
111 int n_covered_regions = 0;
112 ReservedSpace heap_rs(0);
114 heap_address = allocate(alignment, &total_reserved,
115 &n_covered_regions, &heap_rs);
117 if (!heap_rs.is_reserved()) {
118 vm_shutdown_during_initialization(
119 "Could not reserve enough space for object heap");
120 return JNI_ENOMEM;
121 }
123 _reserved = MemRegion((HeapWord*)heap_rs.base(),
124 (HeapWord*)(heap_rs.base() + heap_rs.size()));
126 // It is important to do this in a way such that concurrent readers can't
127 // temporarily think somethings in the heap. (Seen this happen in asserts.)
128 _reserved.set_word_size(0);
129 _reserved.set_start((HeapWord*)heap_rs.base());
130 size_t actual_heap_size = heap_rs.size();
131 _reserved.set_end((HeapWord*)(heap_rs.base() + actual_heap_size));
133 _rem_set = collector_policy()->create_rem_set(_reserved, n_covered_regions);
134 set_barrier_set(rem_set()->bs());
136 _gch = this;
138 for (i = 0; i < _n_gens; i++) {
139 ReservedSpace this_rs = heap_rs.first_part(_gen_specs[i]->max_size(), false, false);
140 _gens[i] = _gen_specs[i]->init(this_rs, i, rem_set());
141 heap_rs = heap_rs.last_part(_gen_specs[i]->max_size());
142 }
143 clear_incremental_collection_failed();
145 #if INCLUDE_ALL_GCS
146 // If we are running CMS, create the collector responsible
147 // for collecting the CMS generations.
148 if (collector_policy()->is_concurrent_mark_sweep_policy()) {
149 bool success = create_cms_collector();
150 if (!success) return JNI_ENOMEM;
151 }
152 #endif // INCLUDE_ALL_GCS
154 return JNI_OK;
155 }
158 char* GenCollectedHeap::allocate(size_t alignment,
159 size_t* _total_reserved,
160 int* _n_covered_regions,
161 ReservedSpace* heap_rs){
162 const char overflow_msg[] = "The size of the object heap + VM data exceeds "
163 "the maximum representable size";
165 // Now figure out the total size.
166 size_t total_reserved = 0;
167 int n_covered_regions = 0;
168 const size_t pageSize = UseLargePages ?
169 os::large_page_size() : os::vm_page_size();
171 for (int i = 0; i < _n_gens; i++) {
172 total_reserved += _gen_specs[i]->max_size();
173 if (total_reserved < _gen_specs[i]->max_size()) {
174 vm_exit_during_initialization(overflow_msg);
175 }
176 n_covered_regions += _gen_specs[i]->n_covered_regions();
177 }
178 assert(total_reserved % pageSize == 0,
179 err_msg("Gen size; total_reserved=" SIZE_FORMAT ", pageSize="
180 SIZE_FORMAT, total_reserved, pageSize));
182 // Needed until the cardtable is fixed to have the right number
183 // of covered regions.
184 n_covered_regions += 2;
186 if (UseLargePages) {
187 assert(total_reserved != 0, "total_reserved cannot be 0");
188 total_reserved = round_to(total_reserved, os::large_page_size());
189 if (total_reserved < os::large_page_size()) {
190 vm_exit_during_initialization(overflow_msg);
191 }
192 }
194 *_total_reserved = total_reserved;
195 *_n_covered_regions = n_covered_regions;
196 *heap_rs = Universe::reserve_heap(total_reserved, alignment);
197 return heap_rs->base();
198 }
201 void GenCollectedHeap::post_initialize() {
202 SharedHeap::post_initialize();
203 TwoGenerationCollectorPolicy *policy =
204 (TwoGenerationCollectorPolicy *)collector_policy();
205 guarantee(policy->is_two_generation_policy(), "Illegal policy type");
206 DefNewGeneration* def_new_gen = (DefNewGeneration*) get_gen(0);
207 assert(def_new_gen->kind() == Generation::DefNew ||
208 def_new_gen->kind() == Generation::ParNew ||
209 def_new_gen->kind() == Generation::ASParNew,
210 "Wrong generation kind");
212 Generation* old_gen = get_gen(1);
213 assert(old_gen->kind() == Generation::ConcurrentMarkSweep ||
214 old_gen->kind() == Generation::ASConcurrentMarkSweep ||
215 old_gen->kind() == Generation::MarkSweepCompact,
216 "Wrong generation kind");
218 policy->initialize_size_policy(def_new_gen->eden()->capacity(),
219 old_gen->capacity(),
220 def_new_gen->from()->capacity());
221 policy->initialize_gc_policy_counters();
222 }
224 void GenCollectedHeap::ref_processing_init() {
225 SharedHeap::ref_processing_init();
226 for (int i = 0; i < _n_gens; i++) {
227 _gens[i]->ref_processor_init();
228 }
229 }
231 size_t GenCollectedHeap::capacity() const {
232 size_t res = 0;
233 for (int i = 0; i < _n_gens; i++) {
234 res += _gens[i]->capacity();
235 }
236 return res;
237 }
239 size_t GenCollectedHeap::used() const {
240 size_t res = 0;
241 for (int i = 0; i < _n_gens; i++) {
242 res += _gens[i]->used();
243 }
244 return res;
245 }
247 // Save the "used_region" for generations level and lower.
248 void GenCollectedHeap::save_used_regions(int level) {
249 assert(level < _n_gens, "Illegal level parameter");
250 for (int i = level; i >= 0; i--) {
251 _gens[i]->save_used_region();
252 }
253 }
255 size_t GenCollectedHeap::max_capacity() const {
256 size_t res = 0;
257 for (int i = 0; i < _n_gens; i++) {
258 res += _gens[i]->max_capacity();
259 }
260 return res;
261 }
263 // Update the _full_collections_completed counter
264 // at the end of a stop-world full GC.
265 unsigned int GenCollectedHeap::update_full_collections_completed() {
266 MonitorLockerEx ml(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
267 assert(_full_collections_completed <= _total_full_collections,
268 "Can't complete more collections than were started");
269 _full_collections_completed = _total_full_collections;
270 ml.notify_all();
271 return _full_collections_completed;
272 }
274 // Update the _full_collections_completed counter, as appropriate,
275 // at the end of a concurrent GC cycle. Note the conditional update
276 // below to allow this method to be called by a concurrent collector
277 // without synchronizing in any manner with the VM thread (which
278 // may already have initiated a STW full collection "concurrently").
279 unsigned int GenCollectedHeap::update_full_collections_completed(unsigned int count) {
280 MonitorLockerEx ml(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
281 assert((_full_collections_completed <= _total_full_collections) &&
282 (count <= _total_full_collections),
283 "Can't complete more collections than were started");
284 if (count > _full_collections_completed) {
285 _full_collections_completed = count;
286 ml.notify_all();
287 }
288 return _full_collections_completed;
289 }
292 #ifndef PRODUCT
293 // Override of memory state checking method in CollectedHeap:
294 // Some collectors (CMS for example) can't have badHeapWordVal written
295 // in the first two words of an object. (For instance , in the case of
296 // CMS these words hold state used to synchronize between certain
297 // (concurrent) GC steps and direct allocating mutators.)
298 // The skip_header_HeapWords() method below, allows us to skip
299 // over the requisite number of HeapWord's. Note that (for
300 // generational collectors) this means that those many words are
301 // skipped in each object, irrespective of the generation in which
302 // that object lives. The resultant loss of precision seems to be
303 // harmless and the pain of avoiding that imprecision appears somewhat
304 // higher than we are prepared to pay for such rudimentary debugging
305 // support.
306 void GenCollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr,
307 size_t size) {
308 if (CheckMemoryInitialization && ZapUnusedHeapArea) {
309 // We are asked to check a size in HeapWords,
310 // but the memory is mangled in juint words.
311 juint* start = (juint*) (addr + skip_header_HeapWords());
312 juint* end = (juint*) (addr + size);
313 for (juint* slot = start; slot < end; slot += 1) {
314 assert(*slot == badHeapWordVal,
315 "Found non badHeapWordValue in pre-allocation check");
316 }
317 }
318 }
319 #endif
321 HeapWord* GenCollectedHeap::attempt_allocation(size_t size,
322 bool is_tlab,
323 bool first_only) {
324 HeapWord* res;
325 for (int i = 0; i < _n_gens; i++) {
326 if (_gens[i]->should_allocate(size, is_tlab)) {
327 res = _gens[i]->allocate(size, is_tlab);
328 if (res != NULL) return res;
329 else if (first_only) break;
330 }
331 }
332 // Otherwise...
333 return NULL;
334 }
336 HeapWord* GenCollectedHeap::mem_allocate(size_t size,
337 bool* gc_overhead_limit_was_exceeded) {
338 return collector_policy()->mem_allocate_work(size,
339 false /* is_tlab */,
340 gc_overhead_limit_was_exceeded);
341 }
343 bool GenCollectedHeap::must_clear_all_soft_refs() {
344 return _gc_cause == GCCause::_last_ditch_collection;
345 }
347 bool GenCollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
348 return UseConcMarkSweepGC &&
349 ((cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) ||
350 (cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent));
351 }
353 void GenCollectedHeap::do_collection(bool full,
354 bool clear_all_soft_refs,
355 size_t size,
356 bool is_tlab,
357 int max_level) {
358 bool prepared_for_verification = false;
359 ResourceMark rm;
360 DEBUG_ONLY(Thread* my_thread = Thread::current();)
362 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
363 assert(my_thread->is_VM_thread() ||
364 my_thread->is_ConcurrentGC_thread(),
365 "incorrect thread type capability");
366 assert(Heap_lock->is_locked(),
367 "the requesting thread should have the Heap_lock");
368 guarantee(!is_gc_active(), "collection is not reentrant");
369 assert(max_level < n_gens(), "sanity check");
371 if (GC_locker::check_active_before_gc()) {
372 return; // GC is disabled (e.g. JNI GetXXXCritical operation)
373 }
375 const bool do_clear_all_soft_refs = clear_all_soft_refs ||
376 collector_policy()->should_clear_all_soft_refs();
378 ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy());
380 const size_t metadata_prev_used = MetaspaceAux::used_in_bytes();
382 print_heap_before_gc();
384 {
385 FlagSetting fl(_is_gc_active, true);
387 bool complete = full && (max_level == (n_gens()-1));
388 const char* gc_cause_prefix = complete ? "Full GC" : "GC";
389 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
390 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
391 TraceTime t(GCCauseString(gc_cause_prefix, gc_cause()), PrintGCDetails, false, gclog_or_tty);
393 gc_prologue(complete);
394 increment_total_collections(complete);
396 size_t gch_prev_used = used();
398 int starting_level = 0;
399 if (full) {
400 // Search for the oldest generation which will collect all younger
401 // generations, and start collection loop there.
402 for (int i = max_level; i >= 0; i--) {
403 if (_gens[i]->full_collects_younger_generations()) {
404 starting_level = i;
405 break;
406 }
407 }
408 }
410 bool must_restore_marks_for_biased_locking = false;
412 int max_level_collected = starting_level;
413 for (int i = starting_level; i <= max_level; i++) {
414 if (_gens[i]->should_collect(full, size, is_tlab)) {
415 if (i == n_gens() - 1) { // a major collection is to happen
416 if (!complete) {
417 // The full_collections increment was missed above.
418 increment_total_full_collections();
419 }
420 pre_full_gc_dump(); // do any pre full gc dumps
421 }
422 // Timer for individual generations. Last argument is false: no CR
423 TraceTime t1(_gens[i]->short_name(), PrintGCDetails, false, gclog_or_tty);
424 TraceCollectorStats tcs(_gens[i]->counters());
425 TraceMemoryManagerStats tmms(_gens[i]->kind(),gc_cause());
427 size_t prev_used = _gens[i]->used();
428 _gens[i]->stat_record()->invocations++;
429 _gens[i]->stat_record()->accumulated_time.start();
431 // Must be done anew before each collection because
432 // a previous collection will do mangling and will
433 // change top of some spaces.
434 record_gen_tops_before_GC();
436 if (PrintGC && Verbose) {
437 gclog_or_tty->print("level=%d invoke=%d size=" SIZE_FORMAT,
438 i,
439 _gens[i]->stat_record()->invocations,
440 size*HeapWordSize);
441 }
443 if (VerifyBeforeGC && i >= VerifyGCLevel &&
444 total_collections() >= VerifyGCStartAt) {
445 HandleMark hm; // Discard invalid handles created during verification
446 if (!prepared_for_verification) {
447 prepare_for_verify();
448 prepared_for_verification = true;
449 }
450 gclog_or_tty->print(" VerifyBeforeGC:");
451 Universe::verify();
452 }
453 COMPILER2_PRESENT(DerivedPointerTable::clear());
455 if (!must_restore_marks_for_biased_locking &&
456 _gens[i]->performs_in_place_marking()) {
457 // We perform this mark word preservation work lazily
458 // because it's only at this point that we know whether we
459 // absolutely have to do it; we want to avoid doing it for
460 // scavenge-only collections where it's unnecessary
461 must_restore_marks_for_biased_locking = true;
462 BiasedLocking::preserve_marks();
463 }
465 // Do collection work
466 {
467 // Note on ref discovery: For what appear to be historical reasons,
468 // GCH enables and disabled (by enqueing) refs discovery.
469 // In the future this should be moved into the generation's
470 // collect method so that ref discovery and enqueueing concerns
471 // are local to a generation. The collect method could return
472 // an appropriate indication in the case that notification on
473 // the ref lock was needed. This will make the treatment of
474 // weak refs more uniform (and indeed remove such concerns
475 // from GCH). XXX
477 HandleMark hm; // Discard invalid handles created during gc
478 save_marks(); // save marks for all gens
479 // We want to discover references, but not process them yet.
480 // This mode is disabled in process_discovered_references if the
481 // generation does some collection work, or in
482 // enqueue_discovered_references if the generation returns
483 // without doing any work.
484 ReferenceProcessor* rp = _gens[i]->ref_processor();
485 // If the discovery of ("weak") refs in this generation is
486 // atomic wrt other collectors in this configuration, we
487 // are guaranteed to have empty discovered ref lists.
488 if (rp->discovery_is_atomic()) {
489 rp->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
490 rp->setup_policy(do_clear_all_soft_refs);
491 } else {
492 // collect() below will enable discovery as appropriate
493 }
494 _gens[i]->collect(full, do_clear_all_soft_refs, size, is_tlab);
495 if (!rp->enqueuing_is_done()) {
496 rp->enqueue_discovered_references();
497 } else {
498 rp->set_enqueuing_is_done(false);
499 }
500 rp->verify_no_references_recorded();
501 }
502 max_level_collected = i;
504 // Determine if allocation request was met.
505 if (size > 0) {
506 if (!is_tlab || _gens[i]->supports_tlab_allocation()) {
507 if (size*HeapWordSize <= _gens[i]->unsafe_max_alloc_nogc()) {
508 size = 0;
509 }
510 }
511 }
513 COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
515 _gens[i]->stat_record()->accumulated_time.stop();
517 update_gc_stats(i, full);
519 if (VerifyAfterGC && i >= VerifyGCLevel &&
520 total_collections() >= VerifyGCStartAt) {
521 HandleMark hm; // Discard invalid handles created during verification
522 gclog_or_tty->print(" VerifyAfterGC:");
523 Universe::verify();
524 }
526 if (PrintGCDetails) {
527 gclog_or_tty->print(":");
528 _gens[i]->print_heap_change(prev_used);
529 }
530 }
531 }
533 // Update "complete" boolean wrt what actually transpired --
534 // for instance, a promotion failure could have led to
535 // a whole heap collection.
536 complete = complete || (max_level_collected == n_gens() - 1);
538 if (complete) { // We did a "major" collection
539 post_full_gc_dump(); // do any post full gc dumps
540 }
542 if (PrintGCDetails) {
543 print_heap_change(gch_prev_used);
545 // Print metaspace info for full GC with PrintGCDetails flag.
546 if (complete) {
547 MetaspaceAux::print_metaspace_change(metadata_prev_used);
548 }
549 }
551 for (int j = max_level_collected; j >= 0; j -= 1) {
552 // Adjust generation sizes.
553 _gens[j]->compute_new_size();
554 }
556 if (complete) {
557 // Delete metaspaces for unloaded class loaders and clean up loader_data graph
558 ClassLoaderDataGraph::purge();
559 // Resize the metaspace capacity after full collections
560 MetaspaceGC::compute_new_size();
561 update_full_collections_completed();
562 }
564 // Track memory usage and detect low memory after GC finishes
565 MemoryService::track_memory_usage();
567 gc_epilogue(complete);
569 if (must_restore_marks_for_biased_locking) {
570 BiasedLocking::restore_marks();
571 }
572 }
574 AdaptiveSizePolicy* sp = gen_policy()->size_policy();
575 AdaptiveSizePolicyOutput(sp, total_collections());
577 print_heap_after_gc();
579 #ifdef TRACESPINNING
580 ParallelTaskTerminator::print_termination_counts();
581 #endif
582 }
584 HeapWord* GenCollectedHeap::satisfy_failed_allocation(size_t size, bool is_tlab) {
585 return collector_policy()->satisfy_failed_allocation(size, is_tlab);
586 }
588 void GenCollectedHeap::set_par_threads(uint t) {
589 SharedHeap::set_par_threads(t);
590 _gen_process_strong_tasks->set_n_threads(t);
591 }
593 void GenCollectedHeap::
594 gen_process_strong_roots(int level,
595 bool younger_gens_as_roots,
596 bool activate_scope,
597 bool is_scavenging,
598 SharedHeap::ScanningOption so,
599 OopsInGenClosure* not_older_gens,
600 bool do_code_roots,
601 OopsInGenClosure* older_gens,
602 KlassClosure* klass_closure) {
603 // General strong roots.
605 if (!do_code_roots) {
606 SharedHeap::process_strong_roots(activate_scope, is_scavenging, so,
607 not_older_gens, NULL, klass_closure);
608 } else {
609 bool do_code_marking = (activate_scope || nmethod::oops_do_marking_is_active());
610 CodeBlobToOopClosure code_roots(not_older_gens, /*do_marking=*/ do_code_marking);
611 SharedHeap::process_strong_roots(activate_scope, is_scavenging, so,
612 not_older_gens, &code_roots, klass_closure);
613 }
615 if (younger_gens_as_roots) {
616 if (!_gen_process_strong_tasks->is_task_claimed(GCH_PS_younger_gens)) {
617 for (int i = 0; i < level; i++) {
618 not_older_gens->set_generation(_gens[i]);
619 _gens[i]->oop_iterate(not_older_gens);
620 }
621 not_older_gens->reset_generation();
622 }
623 }
624 // When collection is parallel, all threads get to cooperate to do
625 // older-gen scanning.
626 for (int i = level+1; i < _n_gens; i++) {
627 older_gens->set_generation(_gens[i]);
628 rem_set()->younger_refs_iterate(_gens[i], older_gens);
629 older_gens->reset_generation();
630 }
632 _gen_process_strong_tasks->all_tasks_completed();
633 }
635 void GenCollectedHeap::gen_process_weak_roots(OopClosure* root_closure,
636 CodeBlobClosure* code_roots,
637 OopClosure* non_root_closure) {
638 SharedHeap::process_weak_roots(root_closure, code_roots, non_root_closure);
639 // "Local" "weak" refs
640 for (int i = 0; i < _n_gens; i++) {
641 _gens[i]->ref_processor()->weak_oops_do(root_closure);
642 }
643 }
645 #define GCH_SINCE_SAVE_MARKS_ITERATE_DEFN(OopClosureType, nv_suffix) \
646 void GenCollectedHeap:: \
647 oop_since_save_marks_iterate(int level, \
648 OopClosureType* cur, \
649 OopClosureType* older) { \
650 _gens[level]->oop_since_save_marks_iterate##nv_suffix(cur); \
651 for (int i = level+1; i < n_gens(); i++) { \
652 _gens[i]->oop_since_save_marks_iterate##nv_suffix(older); \
653 } \
654 }
656 ALL_SINCE_SAVE_MARKS_CLOSURES(GCH_SINCE_SAVE_MARKS_ITERATE_DEFN)
658 #undef GCH_SINCE_SAVE_MARKS_ITERATE_DEFN
660 bool GenCollectedHeap::no_allocs_since_save_marks(int level) {
661 for (int i = level; i < _n_gens; i++) {
662 if (!_gens[i]->no_allocs_since_save_marks()) return false;
663 }
664 return true;
665 }
667 bool GenCollectedHeap::supports_inline_contig_alloc() const {
668 return _gens[0]->supports_inline_contig_alloc();
669 }
671 HeapWord** GenCollectedHeap::top_addr() const {
672 return _gens[0]->top_addr();
673 }
675 HeapWord** GenCollectedHeap::end_addr() const {
676 return _gens[0]->end_addr();
677 }
679 size_t GenCollectedHeap::unsafe_max_alloc() {
680 return _gens[0]->unsafe_max_alloc_nogc();
681 }
683 // public collection interfaces
685 void GenCollectedHeap::collect(GCCause::Cause cause) {
686 if (should_do_concurrent_full_gc(cause)) {
687 #if INCLUDE_ALL_GCS
688 // mostly concurrent full collection
689 collect_mostly_concurrent(cause);
690 #else // INCLUDE_ALL_GCS
691 ShouldNotReachHere();
692 #endif // INCLUDE_ALL_GCS
693 } else {
694 #ifdef ASSERT
695 if (cause == GCCause::_scavenge_alot) {
696 // minor collection only
697 collect(cause, 0);
698 } else {
699 // Stop-the-world full collection
700 collect(cause, n_gens() - 1);
701 }
702 #else
703 // Stop-the-world full collection
704 collect(cause, n_gens() - 1);
705 #endif
706 }
707 }
709 void GenCollectedHeap::collect(GCCause::Cause cause, int max_level) {
710 // The caller doesn't have the Heap_lock
711 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
712 MutexLocker ml(Heap_lock);
713 collect_locked(cause, max_level);
714 }
716 void GenCollectedHeap::collect_locked(GCCause::Cause cause) {
717 // The caller has the Heap_lock
718 assert(Heap_lock->owned_by_self(), "this thread should own the Heap_lock");
719 collect_locked(cause, n_gens() - 1);
720 }
722 // this is the private collection interface
723 // The Heap_lock is expected to be held on entry.
725 void GenCollectedHeap::collect_locked(GCCause::Cause cause, int max_level) {
726 // Read the GC count while holding the Heap_lock
727 unsigned int gc_count_before = total_collections();
728 unsigned int full_gc_count_before = total_full_collections();
729 {
730 MutexUnlocker mu(Heap_lock); // give up heap lock, execute gets it back
731 VM_GenCollectFull op(gc_count_before, full_gc_count_before,
732 cause, max_level);
733 VMThread::execute(&op);
734 }
735 }
737 #if INCLUDE_ALL_GCS
738 bool GenCollectedHeap::create_cms_collector() {
740 assert(((_gens[1]->kind() == Generation::ConcurrentMarkSweep) ||
741 (_gens[1]->kind() == Generation::ASConcurrentMarkSweep)),
742 "Unexpected generation kinds");
743 // Skip two header words in the block content verification
744 NOT_PRODUCT(_skip_header_HeapWords = CMSCollector::skip_header_HeapWords();)
745 CMSCollector* collector = new CMSCollector(
746 (ConcurrentMarkSweepGeneration*)_gens[1],
747 _rem_set->as_CardTableRS(),
748 (ConcurrentMarkSweepPolicy*) collector_policy());
750 if (collector == NULL || !collector->completed_initialization()) {
751 if (collector) {
752 delete collector; // Be nice in embedded situation
753 }
754 vm_shutdown_during_initialization("Could not create CMS collector");
755 return false;
756 }
757 return true; // success
758 }
760 void GenCollectedHeap::collect_mostly_concurrent(GCCause::Cause cause) {
761 assert(!Heap_lock->owned_by_self(), "Should not own Heap_lock");
763 MutexLocker ml(Heap_lock);
764 // Read the GC counts while holding the Heap_lock
765 unsigned int full_gc_count_before = total_full_collections();
766 unsigned int gc_count_before = total_collections();
767 {
768 MutexUnlocker mu(Heap_lock);
769 VM_GenCollectFullConcurrent op(gc_count_before, full_gc_count_before, cause);
770 VMThread::execute(&op);
771 }
772 }
773 #endif // INCLUDE_ALL_GCS
775 void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs) {
776 do_full_collection(clear_all_soft_refs, _n_gens - 1);
777 }
779 void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs,
780 int max_level) {
781 int local_max_level;
782 if (!incremental_collection_will_fail(false /* don't consult_young */) &&
783 gc_cause() == GCCause::_gc_locker) {
784 local_max_level = 0;
785 } else {
786 local_max_level = max_level;
787 }
789 do_collection(true /* full */,
790 clear_all_soft_refs /* clear_all_soft_refs */,
791 0 /* size */,
792 false /* is_tlab */,
793 local_max_level /* max_level */);
794 // Hack XXX FIX ME !!!
795 // A scavenge may not have been attempted, or may have
796 // been attempted and failed, because the old gen was too full
797 if (local_max_level == 0 && gc_cause() == GCCause::_gc_locker &&
798 incremental_collection_will_fail(false /* don't consult_young */)) {
799 if (PrintGCDetails) {
800 gclog_or_tty->print_cr("GC locker: Trying a full collection "
801 "because scavenge failed");
802 }
803 // This time allow the old gen to be collected as well
804 do_collection(true /* full */,
805 clear_all_soft_refs /* clear_all_soft_refs */,
806 0 /* size */,
807 false /* is_tlab */,
808 n_gens() - 1 /* max_level */);
809 }
810 }
812 bool GenCollectedHeap::is_in_young(oop p) {
813 bool result = ((HeapWord*)p) < _gens[_n_gens - 1]->reserved().start();
814 assert(result == _gens[0]->is_in_reserved(p),
815 err_msg("incorrect test - result=%d, p=" PTR_FORMAT, result, (void*)p));
816 return result;
817 }
819 // Returns "TRUE" iff "p" points into the committed areas of the heap.
820 bool GenCollectedHeap::is_in(const void* p) const {
821 #ifndef ASSERT
822 guarantee(VerifyBeforeGC ||
823 VerifyDuringGC ||
824 VerifyBeforeExit ||
825 PrintAssembly ||
826 tty->count() != 0 || // already printing
827 VerifyAfterGC ||
828 VMError::fatal_error_in_progress(), "too expensive");
830 #endif
831 // This might be sped up with a cache of the last generation that
832 // answered yes.
833 for (int i = 0; i < _n_gens; i++) {
834 if (_gens[i]->is_in(p)) return true;
835 }
836 // Otherwise...
837 return false;
838 }
840 #ifdef ASSERT
841 // Don't implement this by using is_in_young(). This method is used
842 // in some cases to check that is_in_young() is correct.
843 bool GenCollectedHeap::is_in_partial_collection(const void* p) {
844 assert(is_in_reserved(p) || p == NULL,
845 "Does not work if address is non-null and outside of the heap");
846 return p < _gens[_n_gens - 2]->reserved().end() && p != NULL;
847 }
848 #endif
850 void GenCollectedHeap::oop_iterate(ExtendedOopClosure* cl) {
851 for (int i = 0; i < _n_gens; i++) {
852 _gens[i]->oop_iterate(cl);
853 }
854 }
856 void GenCollectedHeap::oop_iterate(MemRegion mr, ExtendedOopClosure* cl) {
857 for (int i = 0; i < _n_gens; i++) {
858 _gens[i]->oop_iterate(mr, cl);
859 }
860 }
862 void GenCollectedHeap::object_iterate(ObjectClosure* cl) {
863 for (int i = 0; i < _n_gens; i++) {
864 _gens[i]->object_iterate(cl);
865 }
866 }
868 void GenCollectedHeap::safe_object_iterate(ObjectClosure* cl) {
869 for (int i = 0; i < _n_gens; i++) {
870 _gens[i]->safe_object_iterate(cl);
871 }
872 }
874 void GenCollectedHeap::object_iterate_since_last_GC(ObjectClosure* cl) {
875 for (int i = 0; i < _n_gens; i++) {
876 _gens[i]->object_iterate_since_last_GC(cl);
877 }
878 }
880 Space* GenCollectedHeap::space_containing(const void* addr) const {
881 for (int i = 0; i < _n_gens; i++) {
882 Space* res = _gens[i]->space_containing(addr);
883 if (res != NULL) return res;
884 }
885 // Otherwise...
886 assert(false, "Could not find containing space");
887 return NULL;
888 }
891 HeapWord* GenCollectedHeap::block_start(const void* addr) const {
892 assert(is_in_reserved(addr), "block_start of address outside of heap");
893 for (int i = 0; i < _n_gens; i++) {
894 if (_gens[i]->is_in_reserved(addr)) {
895 assert(_gens[i]->is_in(addr),
896 "addr should be in allocated part of generation");
897 return _gens[i]->block_start(addr);
898 }
899 }
900 assert(false, "Some generation should contain the address");
901 return NULL;
902 }
904 size_t GenCollectedHeap::block_size(const HeapWord* addr) const {
905 assert(is_in_reserved(addr), "block_size of address outside of heap");
906 for (int i = 0; i < _n_gens; i++) {
907 if (_gens[i]->is_in_reserved(addr)) {
908 assert(_gens[i]->is_in(addr),
909 "addr should be in allocated part of generation");
910 return _gens[i]->block_size(addr);
911 }
912 }
913 assert(false, "Some generation should contain the address");
914 return 0;
915 }
917 bool GenCollectedHeap::block_is_obj(const HeapWord* addr) const {
918 assert(is_in_reserved(addr), "block_is_obj of address outside of heap");
919 assert(block_start(addr) == addr, "addr must be a block start");
920 for (int i = 0; i < _n_gens; i++) {
921 if (_gens[i]->is_in_reserved(addr)) {
922 return _gens[i]->block_is_obj(addr);
923 }
924 }
925 assert(false, "Some generation should contain the address");
926 return false;
927 }
929 bool GenCollectedHeap::supports_tlab_allocation() const {
930 for (int i = 0; i < _n_gens; i += 1) {
931 if (_gens[i]->supports_tlab_allocation()) {
932 return true;
933 }
934 }
935 return false;
936 }
938 size_t GenCollectedHeap::tlab_capacity(Thread* thr) const {
939 size_t result = 0;
940 for (int i = 0; i < _n_gens; i += 1) {
941 if (_gens[i]->supports_tlab_allocation()) {
942 result += _gens[i]->tlab_capacity();
943 }
944 }
945 return result;
946 }
948 size_t GenCollectedHeap::unsafe_max_tlab_alloc(Thread* thr) const {
949 size_t result = 0;
950 for (int i = 0; i < _n_gens; i += 1) {
951 if (_gens[i]->supports_tlab_allocation()) {
952 result += _gens[i]->unsafe_max_tlab_alloc();
953 }
954 }
955 return result;
956 }
958 HeapWord* GenCollectedHeap::allocate_new_tlab(size_t size) {
959 bool gc_overhead_limit_was_exceeded;
960 return collector_policy()->mem_allocate_work(size /* size */,
961 true /* is_tlab */,
962 &gc_overhead_limit_was_exceeded);
963 }
965 // Requires "*prev_ptr" to be non-NULL. Deletes and a block of minimal size
966 // from the list headed by "*prev_ptr".
967 static ScratchBlock *removeSmallestScratch(ScratchBlock **prev_ptr) {
968 bool first = true;
969 size_t min_size = 0; // "first" makes this conceptually infinite.
970 ScratchBlock **smallest_ptr, *smallest;
971 ScratchBlock *cur = *prev_ptr;
972 while (cur) {
973 assert(*prev_ptr == cur, "just checking");
974 if (first || cur->num_words < min_size) {
975 smallest_ptr = prev_ptr;
976 smallest = cur;
977 min_size = smallest->num_words;
978 first = false;
979 }
980 prev_ptr = &cur->next;
981 cur = cur->next;
982 }
983 smallest = *smallest_ptr;
984 *smallest_ptr = smallest->next;
985 return smallest;
986 }
988 // Sort the scratch block list headed by res into decreasing size order,
989 // and set "res" to the result.
990 static void sort_scratch_list(ScratchBlock*& list) {
991 ScratchBlock* sorted = NULL;
992 ScratchBlock* unsorted = list;
993 while (unsorted) {
994 ScratchBlock *smallest = removeSmallestScratch(&unsorted);
995 smallest->next = sorted;
996 sorted = smallest;
997 }
998 list = sorted;
999 }
1001 ScratchBlock* GenCollectedHeap::gather_scratch(Generation* requestor,
1002 size_t max_alloc_words) {
1003 ScratchBlock* res = NULL;
1004 for (int i = 0; i < _n_gens; i++) {
1005 _gens[i]->contribute_scratch(res, requestor, max_alloc_words);
1006 }
1007 sort_scratch_list(res);
1008 return res;
1009 }
1011 void GenCollectedHeap::release_scratch() {
1012 for (int i = 0; i < _n_gens; i++) {
1013 _gens[i]->reset_scratch();
1014 }
1015 }
1017 class GenPrepareForVerifyClosure: public GenCollectedHeap::GenClosure {
1018 void do_generation(Generation* gen) {
1019 gen->prepare_for_verify();
1020 }
1021 };
1023 void GenCollectedHeap::prepare_for_verify() {
1024 ensure_parsability(false); // no need to retire TLABs
1025 GenPrepareForVerifyClosure blk;
1026 generation_iterate(&blk, false);
1027 }
1030 void GenCollectedHeap::generation_iterate(GenClosure* cl,
1031 bool old_to_young) {
1032 if (old_to_young) {
1033 for (int i = _n_gens-1; i >= 0; i--) {
1034 cl->do_generation(_gens[i]);
1035 }
1036 } else {
1037 for (int i = 0; i < _n_gens; i++) {
1038 cl->do_generation(_gens[i]);
1039 }
1040 }
1041 }
1043 void GenCollectedHeap::space_iterate(SpaceClosure* cl) {
1044 for (int i = 0; i < _n_gens; i++) {
1045 _gens[i]->space_iterate(cl, true);
1046 }
1047 }
1049 bool GenCollectedHeap::is_maximal_no_gc() const {
1050 for (int i = 0; i < _n_gens; i++) {
1051 if (!_gens[i]->is_maximal_no_gc()) {
1052 return false;
1053 }
1054 }
1055 return true;
1056 }
1058 void GenCollectedHeap::save_marks() {
1059 for (int i = 0; i < _n_gens; i++) {
1060 _gens[i]->save_marks();
1061 }
1062 }
1064 void GenCollectedHeap::compute_new_generation_sizes(int collectedGen) {
1065 for (int i = 0; i <= collectedGen; i++) {
1066 _gens[i]->compute_new_size();
1067 }
1068 }
1070 GenCollectedHeap* GenCollectedHeap::heap() {
1071 assert(_gch != NULL, "Uninitialized access to GenCollectedHeap::heap()");
1072 assert(_gch->kind() == CollectedHeap::GenCollectedHeap, "not a generational heap");
1073 return _gch;
1074 }
1077 void GenCollectedHeap::prepare_for_compaction() {
1078 Generation* scanning_gen = _gens[_n_gens-1];
1079 // Start by compacting into same gen.
1080 CompactPoint cp(scanning_gen, NULL, NULL);
1081 while (scanning_gen != NULL) {
1082 scanning_gen->prepare_for_compaction(&cp);
1083 scanning_gen = prev_gen(scanning_gen);
1084 }
1085 }
1087 GCStats* GenCollectedHeap::gc_stats(int level) const {
1088 return _gens[level]->gc_stats();
1089 }
1091 void GenCollectedHeap::verify(bool silent, VerifyOption option /* ignored */) {
1092 for (int i = _n_gens-1; i >= 0; i--) {
1093 Generation* g = _gens[i];
1094 if (!silent) {
1095 gclog_or_tty->print(g->name());
1096 gclog_or_tty->print(" ");
1097 }
1098 g->verify();
1099 }
1100 if (!silent) {
1101 gclog_or_tty->print("remset ");
1102 }
1103 rem_set()->verify();
1104 }
1106 void GenCollectedHeap::print_on(outputStream* st) const {
1107 for (int i = 0; i < _n_gens; i++) {
1108 _gens[i]->print_on(st);
1109 }
1110 MetaspaceAux::print_on(st);
1111 }
1113 void GenCollectedHeap::gc_threads_do(ThreadClosure* tc) const {
1114 if (workers() != NULL) {
1115 workers()->threads_do(tc);
1116 }
1117 #if INCLUDE_ALL_GCS
1118 if (UseConcMarkSweepGC) {
1119 ConcurrentMarkSweepThread::threads_do(tc);
1120 }
1121 #endif // INCLUDE_ALL_GCS
1122 }
1124 void GenCollectedHeap::print_gc_threads_on(outputStream* st) const {
1125 #if INCLUDE_ALL_GCS
1126 if (UseParNewGC) {
1127 workers()->print_worker_threads_on(st);
1128 }
1129 if (UseConcMarkSweepGC) {
1130 ConcurrentMarkSweepThread::print_all_on(st);
1131 }
1132 #endif // INCLUDE_ALL_GCS
1133 }
1135 void GenCollectedHeap::print_tracing_info() const {
1136 if (TraceGen0Time) {
1137 get_gen(0)->print_summary_info();
1138 }
1139 if (TraceGen1Time) {
1140 get_gen(1)->print_summary_info();
1141 }
1142 }
1144 void GenCollectedHeap::print_heap_change(size_t prev_used) const {
1145 if (PrintGCDetails && Verbose) {
1146 gclog_or_tty->print(" " SIZE_FORMAT
1147 "->" SIZE_FORMAT
1148 "(" SIZE_FORMAT ")",
1149 prev_used, used(), capacity());
1150 } else {
1151 gclog_or_tty->print(" " SIZE_FORMAT "K"
1152 "->" SIZE_FORMAT "K"
1153 "(" SIZE_FORMAT "K)",
1154 prev_used / K, used() / K, capacity() / K);
1155 }
1156 }
1158 class GenGCPrologueClosure: public GenCollectedHeap::GenClosure {
1159 private:
1160 bool _full;
1161 public:
1162 void do_generation(Generation* gen) {
1163 gen->gc_prologue(_full);
1164 }
1165 GenGCPrologueClosure(bool full) : _full(full) {};
1166 };
1168 void GenCollectedHeap::gc_prologue(bool full) {
1169 assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
1171 always_do_update_barrier = false;
1172 // Fill TLAB's and such
1173 CollectedHeap::accumulate_statistics_all_tlabs();
1174 ensure_parsability(true); // retire TLABs
1176 // Call allocation profiler
1177 AllocationProfiler::iterate_since_last_gc();
1178 // Walk generations
1179 GenGCPrologueClosure blk(full);
1180 generation_iterate(&blk, false); // not old-to-young.
1181 };
1183 class GenGCEpilogueClosure: public GenCollectedHeap::GenClosure {
1184 private:
1185 bool _full;
1186 public:
1187 void do_generation(Generation* gen) {
1188 gen->gc_epilogue(_full);
1189 }
1190 GenGCEpilogueClosure(bool full) : _full(full) {};
1191 };
1193 void GenCollectedHeap::gc_epilogue(bool full) {
1194 #ifdef COMPILER2
1195 assert(DerivedPointerTable::is_empty(), "derived pointer present");
1196 size_t actual_gap = pointer_delta((HeapWord*) (max_uintx-3), *(end_addr()));
1197 guarantee(actual_gap > (size_t)FastAllocateSizeLimit, "inline allocation wraps");
1198 #endif /* COMPILER2 */
1200 resize_all_tlabs();
1202 GenGCEpilogueClosure blk(full);
1203 generation_iterate(&blk, false); // not old-to-young.
1205 if (!CleanChunkPoolAsync) {
1206 Chunk::clean_chunk_pool();
1207 }
1209 MetaspaceCounters::update_performance_counters();
1211 always_do_update_barrier = UseConcMarkSweepGC;
1212 };
1214 #ifndef PRODUCT
1215 class GenGCSaveTopsBeforeGCClosure: public GenCollectedHeap::GenClosure {
1216 private:
1217 public:
1218 void do_generation(Generation* gen) {
1219 gen->record_spaces_top();
1220 }
1221 };
1223 void GenCollectedHeap::record_gen_tops_before_GC() {
1224 if (ZapUnusedHeapArea) {
1225 GenGCSaveTopsBeforeGCClosure blk;
1226 generation_iterate(&blk, false); // not old-to-young.
1227 }
1228 }
1229 #endif // not PRODUCT
1231 class GenEnsureParsabilityClosure: public GenCollectedHeap::GenClosure {
1232 public:
1233 void do_generation(Generation* gen) {
1234 gen->ensure_parsability();
1235 }
1236 };
1238 void GenCollectedHeap::ensure_parsability(bool retire_tlabs) {
1239 CollectedHeap::ensure_parsability(retire_tlabs);
1240 GenEnsureParsabilityClosure ep_cl;
1241 generation_iterate(&ep_cl, false);
1242 }
1244 oop GenCollectedHeap::handle_failed_promotion(Generation* gen,
1245 oop obj,
1246 size_t obj_size) {
1247 assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
1248 HeapWord* result = NULL;
1250 // First give each higher generation a chance to allocate the promoted object.
1251 Generation* allocator = next_gen(gen);
1252 if (allocator != NULL) {
1253 do {
1254 result = allocator->allocate(obj_size, false);
1255 } while (result == NULL && (allocator = next_gen(allocator)) != NULL);
1256 }
1258 if (result == NULL) {
1259 // Then give gen and higher generations a chance to expand and allocate the
1260 // object.
1261 do {
1262 result = gen->expand_and_allocate(obj_size, false);
1263 } while (result == NULL && (gen = next_gen(gen)) != NULL);
1264 }
1266 if (result != NULL) {
1267 Copy::aligned_disjoint_words((HeapWord*)obj, result, obj_size);
1268 }
1269 return oop(result);
1270 }
1272 class GenTimeOfLastGCClosure: public GenCollectedHeap::GenClosure {
1273 jlong _time; // in ms
1274 jlong _now; // in ms
1276 public:
1277 GenTimeOfLastGCClosure(jlong now) : _time(now), _now(now) { }
1279 jlong time() { return _time; }
1281 void do_generation(Generation* gen) {
1282 _time = MIN2(_time, gen->time_of_last_gc(_now));
1283 }
1284 };
1286 jlong GenCollectedHeap::millis_since_last_gc() {
1287 // We need a monotonically non-deccreasing time in ms but
1288 // os::javaTimeMillis() does not guarantee monotonicity.
1289 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
1290 GenTimeOfLastGCClosure tolgc_cl(now);
1291 // iterate over generations getting the oldest
1292 // time that a generation was collected
1293 generation_iterate(&tolgc_cl, false);
1295 // javaTimeNanos() is guaranteed to be monotonically non-decreasing
1296 // provided the underlying platform provides such a time source
1297 // (and it is bug free). So we still have to guard against getting
1298 // back a time later than 'now'.
1299 jlong retVal = now - tolgc_cl.time();
1300 if (retVal < 0) {
1301 NOT_PRODUCT(warning("time warp: "INT64_FORMAT, retVal);)
1302 return 0;
1303 }
1304 return retVal;
1305 }