Tue, 15 Mar 2016 10:11:02 +0100
8151539: Remove duplicate AlwaysTrueClosures
Reviewed-by: tschatzl, mgerdin, kbarrett, drwhite
1 /*
2 * Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "classfile/symbolTable.hpp"
27 #include "classfile/systemDictionary.hpp"
28 #include "classfile/vmSymbols.hpp"
29 #include "code/codeCache.hpp"
30 #include "code/icBuffer.hpp"
31 #include "gc_implementation/shared/collectorCounters.hpp"
32 #include "gc_implementation/shared/gcTrace.hpp"
33 #include "gc_implementation/shared/gcTraceTime.hpp"
34 #include "gc_implementation/shared/vmGCOperations.hpp"
35 #include "gc_interface/collectedHeap.inline.hpp"
36 #include "memory/filemap.hpp"
37 #include "memory/gcLocker.inline.hpp"
38 #include "memory/genCollectedHeap.hpp"
39 #include "memory/genOopClosures.inline.hpp"
40 #include "memory/generation.inline.hpp"
41 #include "memory/generationSpec.hpp"
42 #include "memory/resourceArea.hpp"
43 #include "memory/sharedHeap.hpp"
44 #include "memory/space.hpp"
45 #include "oops/oop.inline.hpp"
46 #include "oops/oop.inline2.hpp"
47 #include "runtime/biasedLocking.hpp"
48 #include "runtime/fprofiler.hpp"
49 #include "runtime/handles.hpp"
50 #include "runtime/handles.inline.hpp"
51 #include "runtime/java.hpp"
52 #include "runtime/vmThread.hpp"
53 #include "services/management.hpp"
54 #include "services/memoryService.hpp"
55 #include "utilities/vmError.hpp"
56 #include "utilities/workgroup.hpp"
57 #include "utilities/macros.hpp"
58 #if INCLUDE_ALL_GCS
59 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp"
60 #include "gc_implementation/concurrentMarkSweep/vmCMSOperations.hpp"
61 #endif // INCLUDE_ALL_GCS
63 GenCollectedHeap* GenCollectedHeap::_gch;
64 NOT_PRODUCT(size_t GenCollectedHeap::_skip_header_HeapWords = 0;)
66 // The set of potentially parallel tasks in root scanning.
67 enum GCH_strong_roots_tasks {
68 GCH_PS_Universe_oops_do,
69 GCH_PS_JNIHandles_oops_do,
70 GCH_PS_ObjectSynchronizer_oops_do,
71 GCH_PS_FlatProfiler_oops_do,
72 GCH_PS_Management_oops_do,
73 GCH_PS_SystemDictionary_oops_do,
74 GCH_PS_ClassLoaderDataGraph_oops_do,
75 GCH_PS_jvmti_oops_do,
76 GCH_PS_CodeCache_oops_do,
77 GCH_PS_younger_gens,
78 // Leave this one last.
79 GCH_PS_NumElements
80 };
82 GenCollectedHeap::GenCollectedHeap(GenCollectorPolicy *policy) :
83 SharedHeap(policy),
84 _gen_policy(policy),
85 _process_strong_tasks(new SubTasksDone(GCH_PS_NumElements)),
86 _full_collections_completed(0)
87 {
88 assert(policy != NULL, "Sanity check");
89 }
91 jint GenCollectedHeap::initialize() {
92 CollectedHeap::pre_initialize();
94 int i;
95 _n_gens = gen_policy()->number_of_generations();
97 // While there are no constraints in the GC code that HeapWordSize
98 // be any particular value, there are multiple other areas in the
99 // system which believe this to be true (e.g. oop->object_size in some
100 // cases incorrectly returns the size in wordSize units rather than
101 // HeapWordSize).
102 guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
104 // The heap must be at least as aligned as generations.
105 size_t gen_alignment = Generation::GenGrain;
107 _gen_specs = gen_policy()->generations();
109 // Make sure the sizes are all aligned.
110 for (i = 0; i < _n_gens; i++) {
111 _gen_specs[i]->align(gen_alignment);
112 }
114 // Allocate space for the heap.
116 char* heap_address;
117 size_t total_reserved = 0;
118 int n_covered_regions = 0;
119 ReservedSpace heap_rs;
121 size_t heap_alignment = collector_policy()->heap_alignment();
123 heap_address = allocate(heap_alignment, &total_reserved,
124 &n_covered_regions, &heap_rs);
126 if (!heap_rs.is_reserved()) {
127 vm_shutdown_during_initialization(
128 "Could not reserve enough space for object heap");
129 return JNI_ENOMEM;
130 }
132 _reserved = MemRegion((HeapWord*)heap_rs.base(),
133 (HeapWord*)(heap_rs.base() + heap_rs.size()));
135 // It is important to do this in a way such that concurrent readers can't
136 // temporarily think somethings in the heap. (Seen this happen in asserts.)
137 _reserved.set_word_size(0);
138 _reserved.set_start((HeapWord*)heap_rs.base());
139 size_t actual_heap_size = heap_rs.size();
140 _reserved.set_end((HeapWord*)(heap_rs.base() + actual_heap_size));
142 _rem_set = collector_policy()->create_rem_set(_reserved, n_covered_regions);
143 set_barrier_set(rem_set()->bs());
145 _gch = this;
147 for (i = 0; i < _n_gens; i++) {
148 ReservedSpace this_rs = heap_rs.first_part(_gen_specs[i]->max_size(), false, false);
149 _gens[i] = _gen_specs[i]->init(this_rs, i, rem_set());
150 heap_rs = heap_rs.last_part(_gen_specs[i]->max_size());
151 }
152 clear_incremental_collection_failed();
154 #if INCLUDE_ALL_GCS
155 // If we are running CMS, create the collector responsible
156 // for collecting the CMS generations.
157 if (collector_policy()->is_concurrent_mark_sweep_policy()) {
158 bool success = create_cms_collector();
159 if (!success) return JNI_ENOMEM;
160 }
161 #endif // INCLUDE_ALL_GCS
163 return JNI_OK;
164 }
167 char* GenCollectedHeap::allocate(size_t alignment,
168 size_t* _total_reserved,
169 int* _n_covered_regions,
170 ReservedSpace* heap_rs){
171 const char overflow_msg[] = "The size of the object heap + VM data exceeds "
172 "the maximum representable size";
174 // Now figure out the total size.
175 size_t total_reserved = 0;
176 int n_covered_regions = 0;
177 const size_t pageSize = UseLargePages ?
178 os::large_page_size() : os::vm_page_size();
180 assert(alignment % pageSize == 0, "Must be");
182 for (int i = 0; i < _n_gens; i++) {
183 total_reserved += _gen_specs[i]->max_size();
184 if (total_reserved < _gen_specs[i]->max_size()) {
185 vm_exit_during_initialization(overflow_msg);
186 }
187 n_covered_regions += _gen_specs[i]->n_covered_regions();
188 }
189 assert(total_reserved % alignment == 0,
190 err_msg("Gen size; total_reserved=" SIZE_FORMAT ", alignment="
191 SIZE_FORMAT, total_reserved, alignment));
193 // Needed until the cardtable is fixed to have the right number
194 // of covered regions.
195 n_covered_regions += 2;
197 *_total_reserved = total_reserved;
198 *_n_covered_regions = n_covered_regions;
200 *heap_rs = Universe::reserve_heap(total_reserved, alignment);
201 return heap_rs->base();
202 }
205 void GenCollectedHeap::post_initialize() {
206 SharedHeap::post_initialize();
207 TwoGenerationCollectorPolicy *policy =
208 (TwoGenerationCollectorPolicy *)collector_policy();
209 guarantee(policy->is_two_generation_policy(), "Illegal policy type");
210 DefNewGeneration* def_new_gen = (DefNewGeneration*) get_gen(0);
211 assert(def_new_gen->kind() == Generation::DefNew ||
212 def_new_gen->kind() == Generation::ParNew ||
213 def_new_gen->kind() == Generation::ASParNew,
214 "Wrong generation kind");
216 Generation* old_gen = get_gen(1);
217 assert(old_gen->kind() == Generation::ConcurrentMarkSweep ||
218 old_gen->kind() == Generation::ASConcurrentMarkSweep ||
219 old_gen->kind() == Generation::MarkSweepCompact,
220 "Wrong generation kind");
222 policy->initialize_size_policy(def_new_gen->eden()->capacity(),
223 old_gen->capacity(),
224 def_new_gen->from()->capacity());
225 policy->initialize_gc_policy_counters();
226 }
228 void GenCollectedHeap::ref_processing_init() {
229 SharedHeap::ref_processing_init();
230 for (int i = 0; i < _n_gens; i++) {
231 _gens[i]->ref_processor_init();
232 }
233 }
235 size_t GenCollectedHeap::capacity() const {
236 size_t res = 0;
237 for (int i = 0; i < _n_gens; i++) {
238 res += _gens[i]->capacity();
239 }
240 return res;
241 }
243 size_t GenCollectedHeap::used() const {
244 size_t res = 0;
245 for (int i = 0; i < _n_gens; i++) {
246 res += _gens[i]->used();
247 }
248 return res;
249 }
251 // Save the "used_region" for generations level and lower.
252 void GenCollectedHeap::save_used_regions(int level) {
253 assert(level < _n_gens, "Illegal level parameter");
254 for (int i = level; i >= 0; i--) {
255 _gens[i]->save_used_region();
256 }
257 }
259 size_t GenCollectedHeap::max_capacity() const {
260 size_t res = 0;
261 for (int i = 0; i < _n_gens; i++) {
262 res += _gens[i]->max_capacity();
263 }
264 return res;
265 }
267 // Update the _full_collections_completed counter
268 // at the end of a stop-world full GC.
269 unsigned int GenCollectedHeap::update_full_collections_completed() {
270 MonitorLockerEx ml(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
271 assert(_full_collections_completed <= _total_full_collections,
272 "Can't complete more collections than were started");
273 _full_collections_completed = _total_full_collections;
274 ml.notify_all();
275 return _full_collections_completed;
276 }
278 // Update the _full_collections_completed counter, as appropriate,
279 // at the end of a concurrent GC cycle. Note the conditional update
280 // below to allow this method to be called by a concurrent collector
281 // without synchronizing in any manner with the VM thread (which
282 // may already have initiated a STW full collection "concurrently").
283 unsigned int GenCollectedHeap::update_full_collections_completed(unsigned int count) {
284 MonitorLockerEx ml(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
285 assert((_full_collections_completed <= _total_full_collections) &&
286 (count <= _total_full_collections),
287 "Can't complete more collections than were started");
288 if (count > _full_collections_completed) {
289 _full_collections_completed = count;
290 ml.notify_all();
291 }
292 return _full_collections_completed;
293 }
296 #ifndef PRODUCT
297 // Override of memory state checking method in CollectedHeap:
298 // Some collectors (CMS for example) can't have badHeapWordVal written
299 // in the first two words of an object. (For instance , in the case of
300 // CMS these words hold state used to synchronize between certain
301 // (concurrent) GC steps and direct allocating mutators.)
302 // The skip_header_HeapWords() method below, allows us to skip
303 // over the requisite number of HeapWord's. Note that (for
304 // generational collectors) this means that those many words are
305 // skipped in each object, irrespective of the generation in which
306 // that object lives. The resultant loss of precision seems to be
307 // harmless and the pain of avoiding that imprecision appears somewhat
308 // higher than we are prepared to pay for such rudimentary debugging
309 // support.
310 void GenCollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr,
311 size_t size) {
312 if (CheckMemoryInitialization && ZapUnusedHeapArea) {
313 // We are asked to check a size in HeapWords,
314 // but the memory is mangled in juint words.
315 juint* start = (juint*) (addr + skip_header_HeapWords());
316 juint* end = (juint*) (addr + size);
317 for (juint* slot = start; slot < end; slot += 1) {
318 assert(*slot == badHeapWordVal,
319 "Found non badHeapWordValue in pre-allocation check");
320 }
321 }
322 }
323 #endif
325 HeapWord* GenCollectedHeap::attempt_allocation(size_t size,
326 bool is_tlab,
327 bool first_only) {
328 HeapWord* res;
329 for (int i = 0; i < _n_gens; i++) {
330 if (_gens[i]->should_allocate(size, is_tlab)) {
331 res = _gens[i]->allocate(size, is_tlab);
332 if (res != NULL) return res;
333 else if (first_only) break;
334 }
335 }
336 // Otherwise...
337 return NULL;
338 }
340 HeapWord* GenCollectedHeap::mem_allocate(size_t size,
341 bool* gc_overhead_limit_was_exceeded) {
342 return collector_policy()->mem_allocate_work(size,
343 false /* is_tlab */,
344 gc_overhead_limit_was_exceeded);
345 }
347 bool GenCollectedHeap::must_clear_all_soft_refs() {
348 return _gc_cause == GCCause::_last_ditch_collection;
349 }
351 bool GenCollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
352 return UseConcMarkSweepGC &&
353 ((cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) ||
354 (cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent));
355 }
357 void GenCollectedHeap::do_collection(bool full,
358 bool clear_all_soft_refs,
359 size_t size,
360 bool is_tlab,
361 int max_level) {
362 bool prepared_for_verification = false;
363 ResourceMark rm;
364 DEBUG_ONLY(Thread* my_thread = Thread::current();)
366 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
367 assert(my_thread->is_VM_thread() ||
368 my_thread->is_ConcurrentGC_thread(),
369 "incorrect thread type capability");
370 assert(Heap_lock->is_locked(),
371 "the requesting thread should have the Heap_lock");
372 guarantee(!is_gc_active(), "collection is not reentrant");
373 assert(max_level < n_gens(), "sanity check");
375 if (GC_locker::check_active_before_gc()) {
376 return; // GC is disabled (e.g. JNI GetXXXCritical operation)
377 }
379 const bool do_clear_all_soft_refs = clear_all_soft_refs ||
380 collector_policy()->should_clear_all_soft_refs();
382 ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy());
384 const size_t metadata_prev_used = MetaspaceAux::used_bytes();
386 print_heap_before_gc();
388 {
389 FlagSetting fl(_is_gc_active, true);
391 bool complete = full && (max_level == (n_gens()-1));
392 const char* gc_cause_prefix = complete ? "Full GC" : "GC";
393 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
394 // The PrintGCDetails logging starts before we have incremented the GC id. We will do that later
395 // so we can assume here that the next GC id is what we want.
396 GCTraceTime t(GCCauseString(gc_cause_prefix, gc_cause()), PrintGCDetails, false, NULL, GCId::peek());
398 gc_prologue(complete);
399 increment_total_collections(complete);
401 size_t gch_prev_used = used();
403 int starting_level = 0;
404 if (full) {
405 // Search for the oldest generation which will collect all younger
406 // generations, and start collection loop there.
407 for (int i = max_level; i >= 0; i--) {
408 if (_gens[i]->full_collects_younger_generations()) {
409 starting_level = i;
410 break;
411 }
412 }
413 }
415 bool must_restore_marks_for_biased_locking = false;
417 int max_level_collected = starting_level;
418 for (int i = starting_level; i <= max_level; i++) {
419 if (_gens[i]->should_collect(full, size, is_tlab)) {
420 if (i == n_gens() - 1) { // a major collection is to happen
421 if (!complete) {
422 // The full_collections increment was missed above.
423 increment_total_full_collections();
424 }
425 pre_full_gc_dump(NULL); // do any pre full gc dumps
426 }
427 // Timer for individual generations. Last argument is false: no CR
428 // FIXME: We should try to start the timing earlier to cover more of the GC pause
429 // The PrintGCDetails logging starts before we have incremented the GC id. We will do that later
430 // so we can assume here that the next GC id is what we want.
431 GCTraceTime t1(_gens[i]->short_name(), PrintGCDetails, false, NULL, GCId::peek());
432 TraceCollectorStats tcs(_gens[i]->counters());
433 TraceMemoryManagerStats tmms(_gens[i]->kind(),gc_cause());
435 size_t prev_used = _gens[i]->used();
436 _gens[i]->stat_record()->invocations++;
437 _gens[i]->stat_record()->accumulated_time.start();
439 // Must be done anew before each collection because
440 // a previous collection will do mangling and will
441 // change top of some spaces.
442 record_gen_tops_before_GC();
444 if (PrintGC && Verbose) {
445 gclog_or_tty->print("level=%d invoke=%d size=" SIZE_FORMAT,
446 i,
447 _gens[i]->stat_record()->invocations,
448 size*HeapWordSize);
449 }
451 if (VerifyBeforeGC && i >= VerifyGCLevel &&
452 total_collections() >= VerifyGCStartAt) {
453 HandleMark hm; // Discard invalid handles created during verification
454 if (!prepared_for_verification) {
455 prepare_for_verify();
456 prepared_for_verification = true;
457 }
458 Universe::verify(" VerifyBeforeGC:");
459 }
460 COMPILER2_PRESENT(DerivedPointerTable::clear());
462 if (!must_restore_marks_for_biased_locking &&
463 _gens[i]->performs_in_place_marking()) {
464 // We perform this mark word preservation work lazily
465 // because it's only at this point that we know whether we
466 // absolutely have to do it; we want to avoid doing it for
467 // scavenge-only collections where it's unnecessary
468 must_restore_marks_for_biased_locking = true;
469 BiasedLocking::preserve_marks();
470 }
472 // Do collection work
473 {
474 // Note on ref discovery: For what appear to be historical reasons,
475 // GCH enables and disabled (by enqueing) refs discovery.
476 // In the future this should be moved into the generation's
477 // collect method so that ref discovery and enqueueing concerns
478 // are local to a generation. The collect method could return
479 // an appropriate indication in the case that notification on
480 // the ref lock was needed. This will make the treatment of
481 // weak refs more uniform (and indeed remove such concerns
482 // from GCH). XXX
484 HandleMark hm; // Discard invalid handles created during gc
485 save_marks(); // save marks for all gens
486 // We want to discover references, but not process them yet.
487 // This mode is disabled in process_discovered_references if the
488 // generation does some collection work, or in
489 // enqueue_discovered_references if the generation returns
490 // without doing any work.
491 ReferenceProcessor* rp = _gens[i]->ref_processor();
492 // If the discovery of ("weak") refs in this generation is
493 // atomic wrt other collectors in this configuration, we
494 // are guaranteed to have empty discovered ref lists.
495 if (rp->discovery_is_atomic()) {
496 rp->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
497 rp->setup_policy(do_clear_all_soft_refs);
498 } else {
499 // collect() below will enable discovery as appropriate
500 }
501 _gens[i]->collect(full, do_clear_all_soft_refs, size, is_tlab);
502 if (!rp->enqueuing_is_done()) {
503 rp->enqueue_discovered_references();
504 } else {
505 rp->set_enqueuing_is_done(false);
506 }
507 rp->verify_no_references_recorded();
508 }
509 max_level_collected = i;
511 // Determine if allocation request was met.
512 if (size > 0) {
513 if (!is_tlab || _gens[i]->supports_tlab_allocation()) {
514 if (size*HeapWordSize <= _gens[i]->unsafe_max_alloc_nogc()) {
515 size = 0;
516 }
517 }
518 }
520 COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
522 _gens[i]->stat_record()->accumulated_time.stop();
524 update_gc_stats(i, full);
526 if (VerifyAfterGC && i >= VerifyGCLevel &&
527 total_collections() >= VerifyGCStartAt) {
528 HandleMark hm; // Discard invalid handles created during verification
529 Universe::verify(" VerifyAfterGC:");
530 }
532 if (PrintGCDetails) {
533 gclog_or_tty->print(":");
534 _gens[i]->print_heap_change(prev_used);
535 }
536 }
537 }
539 // Update "complete" boolean wrt what actually transpired --
540 // for instance, a promotion failure could have led to
541 // a whole heap collection.
542 complete = complete || (max_level_collected == n_gens() - 1);
544 if (complete) { // We did a "major" collection
545 // FIXME: See comment at pre_full_gc_dump call
546 post_full_gc_dump(NULL); // do any post full gc dumps
547 }
549 if (PrintGCDetails) {
550 print_heap_change(gch_prev_used);
552 // Print metaspace info for full GC with PrintGCDetails flag.
553 if (complete) {
554 MetaspaceAux::print_metaspace_change(metadata_prev_used);
555 }
556 }
558 for (int j = max_level_collected; j >= 0; j -= 1) {
559 // Adjust generation sizes.
560 _gens[j]->compute_new_size();
561 }
563 if (complete) {
564 // Delete metaspaces for unloaded class loaders and clean up loader_data graph
565 ClassLoaderDataGraph::purge();
566 MetaspaceAux::verify_metrics();
567 // Resize the metaspace capacity after full collections
568 MetaspaceGC::compute_new_size();
569 update_full_collections_completed();
570 }
572 // Track memory usage and detect low memory after GC finishes
573 MemoryService::track_memory_usage();
575 gc_epilogue(complete);
577 if (must_restore_marks_for_biased_locking) {
578 BiasedLocking::restore_marks();
579 }
580 }
582 AdaptiveSizePolicy* sp = gen_policy()->size_policy();
583 AdaptiveSizePolicyOutput(sp, total_collections());
585 print_heap_after_gc();
587 #ifdef TRACESPINNING
588 ParallelTaskTerminator::print_termination_counts();
589 #endif
590 }
592 HeapWord* GenCollectedHeap::satisfy_failed_allocation(size_t size, bool is_tlab) {
593 return collector_policy()->satisfy_failed_allocation(size, is_tlab);
594 }
596 void GenCollectedHeap::set_par_threads(uint t) {
597 SharedHeap::set_par_threads(t);
598 set_n_termination(t);
599 }
601 void GenCollectedHeap::set_n_termination(uint t) {
602 _process_strong_tasks->set_n_threads(t);
603 }
605 #ifdef ASSERT
606 class AssertNonScavengableClosure: public OopClosure {
607 public:
608 virtual void do_oop(oop* p) {
609 assert(!Universe::heap()->is_in_partial_collection(*p),
610 "Referent should not be scavengable."); }
611 virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
612 };
613 static AssertNonScavengableClosure assert_is_non_scavengable_closure;
614 #endif
616 void GenCollectedHeap::process_roots(bool activate_scope,
617 ScanningOption so,
618 OopClosure* strong_roots,
619 OopClosure* weak_roots,
620 CLDClosure* strong_cld_closure,
621 CLDClosure* weak_cld_closure,
622 CodeBlobToOopClosure* code_roots) {
623 StrongRootsScope srs(this, activate_scope);
625 // General roots.
626 assert(_strong_roots_parity != 0, "must have called prologue code");
627 assert(code_roots != NULL, "code root closure should always be set");
628 // _n_termination for _process_strong_tasks should be set up stream
629 // in a method not running in a GC worker. Otherwise the GC worker
630 // could be trying to change the termination condition while the task
631 // is executing in another GC worker.
633 if (!_process_strong_tasks->is_task_claimed(GCH_PS_ClassLoaderDataGraph_oops_do)) {
634 ClassLoaderDataGraph::roots_cld_do(strong_cld_closure, weak_cld_closure);
635 }
637 // Some CLDs contained in the thread frames should be considered strong.
638 // Don't process them if they will be processed during the ClassLoaderDataGraph phase.
639 CLDClosure* roots_from_clds_p = (strong_cld_closure != weak_cld_closure) ? strong_cld_closure : NULL;
640 // Only process code roots from thread stacks if we aren't visiting the entire CodeCache anyway
641 CodeBlobToOopClosure* roots_from_code_p = (so & SO_AllCodeCache) ? NULL : code_roots;
643 Threads::possibly_parallel_oops_do(strong_roots, roots_from_clds_p, roots_from_code_p);
645 if (!_process_strong_tasks->is_task_claimed(GCH_PS_Universe_oops_do)) {
646 Universe::oops_do(strong_roots);
647 }
648 // Global (strong) JNI handles
649 if (!_process_strong_tasks->is_task_claimed(GCH_PS_JNIHandles_oops_do)) {
650 JNIHandles::oops_do(strong_roots);
651 }
653 if (!_process_strong_tasks->is_task_claimed(GCH_PS_ObjectSynchronizer_oops_do)) {
654 ObjectSynchronizer::oops_do(strong_roots);
655 }
656 if (!_process_strong_tasks->is_task_claimed(GCH_PS_FlatProfiler_oops_do)) {
657 FlatProfiler::oops_do(strong_roots);
658 }
659 if (!_process_strong_tasks->is_task_claimed(GCH_PS_Management_oops_do)) {
660 Management::oops_do(strong_roots);
661 }
662 if (!_process_strong_tasks->is_task_claimed(GCH_PS_jvmti_oops_do)) {
663 JvmtiExport::oops_do(strong_roots);
664 }
666 if (!_process_strong_tasks->is_task_claimed(GCH_PS_SystemDictionary_oops_do)) {
667 SystemDictionary::roots_oops_do(strong_roots, weak_roots);
668 }
670 // All threads execute the following. A specific chunk of buckets
671 // from the StringTable are the individual tasks.
672 if (weak_roots != NULL) {
673 if (CollectedHeap::use_parallel_gc_threads()) {
674 StringTable::possibly_parallel_oops_do(weak_roots);
675 } else {
676 StringTable::oops_do(weak_roots);
677 }
678 }
680 if (!_process_strong_tasks->is_task_claimed(GCH_PS_CodeCache_oops_do)) {
681 if (so & SO_ScavengeCodeCache) {
682 assert(code_roots != NULL, "must supply closure for code cache");
684 // We only visit parts of the CodeCache when scavenging.
685 CodeCache::scavenge_root_nmethods_do(code_roots);
686 }
687 if (so & SO_AllCodeCache) {
688 assert(code_roots != NULL, "must supply closure for code cache");
690 // CMSCollector uses this to do intermediate-strength collections.
691 // We scan the entire code cache, since CodeCache::do_unloading is not called.
692 CodeCache::blobs_do(code_roots);
693 }
694 // Verify that the code cache contents are not subject to
695 // movement by a scavenging collection.
696 DEBUG_ONLY(CodeBlobToOopClosure assert_code_is_non_scavengable(&assert_is_non_scavengable_closure, !CodeBlobToOopClosure::FixRelocations));
697 DEBUG_ONLY(CodeCache::asserted_non_scavengable_nmethods_do(&assert_code_is_non_scavengable));
698 }
700 }
702 void GenCollectedHeap::gen_process_roots(int level,
703 bool younger_gens_as_roots,
704 bool activate_scope,
705 ScanningOption so,
706 bool only_strong_roots,
707 OopsInGenClosure* not_older_gens,
708 OopsInGenClosure* older_gens,
709 CLDClosure* cld_closure) {
710 const bool is_adjust_phase = !only_strong_roots && !younger_gens_as_roots;
712 bool is_moving_collection = false;
713 if (level == 0 || is_adjust_phase) {
714 // young collections are always moving
715 is_moving_collection = true;
716 }
718 MarkingCodeBlobClosure mark_code_closure(not_older_gens, is_moving_collection);
719 OopsInGenClosure* weak_roots = only_strong_roots ? NULL : not_older_gens;
720 CLDClosure* weak_cld_closure = only_strong_roots ? NULL : cld_closure;
722 process_roots(activate_scope, so,
723 not_older_gens, weak_roots,
724 cld_closure, weak_cld_closure,
725 &mark_code_closure);
727 if (younger_gens_as_roots) {
728 if (!_process_strong_tasks->is_task_claimed(GCH_PS_younger_gens)) {
729 for (int i = 0; i < level; i++) {
730 not_older_gens->set_generation(_gens[i]);
731 _gens[i]->oop_iterate(not_older_gens);
732 }
733 not_older_gens->reset_generation();
734 }
735 }
736 // When collection is parallel, all threads get to cooperate to do
737 // older-gen scanning.
738 for (int i = level+1; i < _n_gens; i++) {
739 older_gens->set_generation(_gens[i]);
740 rem_set()->younger_refs_iterate(_gens[i], older_gens);
741 older_gens->reset_generation();
742 }
744 _process_strong_tasks->all_tasks_completed();
745 }
748 void GenCollectedHeap::gen_process_weak_roots(OopClosure* root_closure) {
749 JNIHandles::weak_oops_do(root_closure);
750 for (int i = 0; i < _n_gens; i++) {
751 _gens[i]->ref_processor()->weak_oops_do(root_closure);
752 }
753 }
755 #define GCH_SINCE_SAVE_MARKS_ITERATE_DEFN(OopClosureType, nv_suffix) \
756 void GenCollectedHeap:: \
757 oop_since_save_marks_iterate(int level, \
758 OopClosureType* cur, \
759 OopClosureType* older) { \
760 _gens[level]->oop_since_save_marks_iterate##nv_suffix(cur); \
761 for (int i = level+1; i < n_gens(); i++) { \
762 _gens[i]->oop_since_save_marks_iterate##nv_suffix(older); \
763 } \
764 }
766 ALL_SINCE_SAVE_MARKS_CLOSURES(GCH_SINCE_SAVE_MARKS_ITERATE_DEFN)
768 #undef GCH_SINCE_SAVE_MARKS_ITERATE_DEFN
770 bool GenCollectedHeap::no_allocs_since_save_marks(int level) {
771 for (int i = level; i < _n_gens; i++) {
772 if (!_gens[i]->no_allocs_since_save_marks()) return false;
773 }
774 return true;
775 }
777 bool GenCollectedHeap::supports_inline_contig_alloc() const {
778 return _gens[0]->supports_inline_contig_alloc();
779 }
781 HeapWord** GenCollectedHeap::top_addr() const {
782 return _gens[0]->top_addr();
783 }
785 HeapWord** GenCollectedHeap::end_addr() const {
786 return _gens[0]->end_addr();
787 }
789 // public collection interfaces
791 void GenCollectedHeap::collect(GCCause::Cause cause) {
792 if (should_do_concurrent_full_gc(cause)) {
793 #if INCLUDE_ALL_GCS
794 // mostly concurrent full collection
795 collect_mostly_concurrent(cause);
796 #else // INCLUDE_ALL_GCS
797 ShouldNotReachHere();
798 #endif // INCLUDE_ALL_GCS
799 } else if (cause == GCCause::_wb_young_gc) {
800 // minor collection for WhiteBox API
801 collect(cause, 0);
802 } else {
803 #ifdef ASSERT
804 if (cause == GCCause::_scavenge_alot) {
805 // minor collection only
806 collect(cause, 0);
807 } else {
808 // Stop-the-world full collection
809 collect(cause, n_gens() - 1);
810 }
811 #else
812 // Stop-the-world full collection
813 collect(cause, n_gens() - 1);
814 #endif
815 }
816 }
818 void GenCollectedHeap::collect(GCCause::Cause cause, int max_level) {
819 // The caller doesn't have the Heap_lock
820 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
821 MutexLocker ml(Heap_lock);
822 collect_locked(cause, max_level);
823 }
825 void GenCollectedHeap::collect_locked(GCCause::Cause cause) {
826 // The caller has the Heap_lock
827 assert(Heap_lock->owned_by_self(), "this thread should own the Heap_lock");
828 collect_locked(cause, n_gens() - 1);
829 }
831 // this is the private collection interface
832 // The Heap_lock is expected to be held on entry.
834 void GenCollectedHeap::collect_locked(GCCause::Cause cause, int max_level) {
835 // Read the GC count while holding the Heap_lock
836 unsigned int gc_count_before = total_collections();
837 unsigned int full_gc_count_before = total_full_collections();
838 {
839 MutexUnlocker mu(Heap_lock); // give up heap lock, execute gets it back
840 VM_GenCollectFull op(gc_count_before, full_gc_count_before,
841 cause, max_level);
842 VMThread::execute(&op);
843 }
844 }
846 #if INCLUDE_ALL_GCS
847 bool GenCollectedHeap::create_cms_collector() {
849 assert(((_gens[1]->kind() == Generation::ConcurrentMarkSweep) ||
850 (_gens[1]->kind() == Generation::ASConcurrentMarkSweep)),
851 "Unexpected generation kinds");
852 // Skip two header words in the block content verification
853 NOT_PRODUCT(_skip_header_HeapWords = CMSCollector::skip_header_HeapWords();)
854 CMSCollector* collector = new CMSCollector(
855 (ConcurrentMarkSweepGeneration*)_gens[1],
856 _rem_set->as_CardTableRS(),
857 (ConcurrentMarkSweepPolicy*) collector_policy());
859 if (collector == NULL || !collector->completed_initialization()) {
860 if (collector) {
861 delete collector; // Be nice in embedded situation
862 }
863 vm_shutdown_during_initialization("Could not create CMS collector");
864 return false;
865 }
866 return true; // success
867 }
869 void GenCollectedHeap::collect_mostly_concurrent(GCCause::Cause cause) {
870 assert(!Heap_lock->owned_by_self(), "Should not own Heap_lock");
872 MutexLocker ml(Heap_lock);
873 // Read the GC counts while holding the Heap_lock
874 unsigned int full_gc_count_before = total_full_collections();
875 unsigned int gc_count_before = total_collections();
876 {
877 MutexUnlocker mu(Heap_lock);
878 VM_GenCollectFullConcurrent op(gc_count_before, full_gc_count_before, cause);
879 VMThread::execute(&op);
880 }
881 }
882 #endif // INCLUDE_ALL_GCS
884 void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs) {
885 do_full_collection(clear_all_soft_refs, _n_gens - 1);
886 }
888 void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs,
889 int max_level) {
890 int local_max_level;
891 if (!incremental_collection_will_fail(false /* don't consult_young */) &&
892 gc_cause() == GCCause::_gc_locker) {
893 local_max_level = 0;
894 } else {
895 local_max_level = max_level;
896 }
898 do_collection(true /* full */,
899 clear_all_soft_refs /* clear_all_soft_refs */,
900 0 /* size */,
901 false /* is_tlab */,
902 local_max_level /* max_level */);
903 // Hack XXX FIX ME !!!
904 // A scavenge may not have been attempted, or may have
905 // been attempted and failed, because the old gen was too full
906 if (local_max_level == 0 && gc_cause() == GCCause::_gc_locker &&
907 incremental_collection_will_fail(false /* don't consult_young */)) {
908 if (PrintGCDetails) {
909 gclog_or_tty->print_cr("GC locker: Trying a full collection "
910 "because scavenge failed");
911 }
912 // This time allow the old gen to be collected as well
913 do_collection(true /* full */,
914 clear_all_soft_refs /* clear_all_soft_refs */,
915 0 /* size */,
916 false /* is_tlab */,
917 n_gens() - 1 /* max_level */);
918 }
919 }
921 bool GenCollectedHeap::is_in_young(oop p) {
922 bool result = ((HeapWord*)p) < _gens[_n_gens - 1]->reserved().start();
923 assert(result == _gens[0]->is_in_reserved(p),
924 err_msg("incorrect test - result=%d, p=" PTR_FORMAT, result, p2i((void*)p)));
925 return result;
926 }
928 // Returns "TRUE" iff "p" points into the committed areas of the heap.
929 bool GenCollectedHeap::is_in(const void* p) const {
930 #ifndef ASSERT
931 guarantee(VerifyBeforeGC ||
932 VerifyDuringGC ||
933 VerifyBeforeExit ||
934 VerifyDuringStartup ||
935 PrintAssembly ||
936 tty->count() != 0 || // already printing
937 VerifyAfterGC ||
938 VMError::fatal_error_in_progress(), "too expensive");
940 #endif
941 // This might be sped up with a cache of the last generation that
942 // answered yes.
943 for (int i = 0; i < _n_gens; i++) {
944 if (_gens[i]->is_in(p)) return true;
945 }
946 // Otherwise...
947 return false;
948 }
950 #ifdef ASSERT
951 // Don't implement this by using is_in_young(). This method is used
952 // in some cases to check that is_in_young() is correct.
953 bool GenCollectedHeap::is_in_partial_collection(const void* p) {
954 assert(is_in_reserved(p) || p == NULL,
955 "Does not work if address is non-null and outside of the heap");
956 return p < _gens[_n_gens - 2]->reserved().end() && p != NULL;
957 }
958 #endif
960 void GenCollectedHeap::oop_iterate(ExtendedOopClosure* cl) {
961 for (int i = 0; i < _n_gens; i++) {
962 _gens[i]->oop_iterate(cl);
963 }
964 }
966 void GenCollectedHeap::object_iterate(ObjectClosure* cl) {
967 for (int i = 0; i < _n_gens; i++) {
968 _gens[i]->object_iterate(cl);
969 }
970 }
972 void GenCollectedHeap::safe_object_iterate(ObjectClosure* cl) {
973 for (int i = 0; i < _n_gens; i++) {
974 _gens[i]->safe_object_iterate(cl);
975 }
976 }
978 Space* GenCollectedHeap::space_containing(const void* addr) const {
979 for (int i = 0; i < _n_gens; i++) {
980 Space* res = _gens[i]->space_containing(addr);
981 if (res != NULL) return res;
982 }
983 // Otherwise...
984 assert(false, "Could not find containing space");
985 return NULL;
986 }
989 HeapWord* GenCollectedHeap::block_start(const void* addr) const {
990 assert(is_in_reserved(addr), "block_start of address outside of heap");
991 for (int i = 0; i < _n_gens; i++) {
992 if (_gens[i]->is_in_reserved(addr)) {
993 assert(_gens[i]->is_in(addr),
994 "addr should be in allocated part of generation");
995 return _gens[i]->block_start(addr);
996 }
997 }
998 assert(false, "Some generation should contain the address");
999 return NULL;
1000 }
1002 size_t GenCollectedHeap::block_size(const HeapWord* addr) const {
1003 assert(is_in_reserved(addr), "block_size of address outside of heap");
1004 for (int i = 0; i < _n_gens; i++) {
1005 if (_gens[i]->is_in_reserved(addr)) {
1006 assert(_gens[i]->is_in(addr),
1007 "addr should be in allocated part of generation");
1008 return _gens[i]->block_size(addr);
1009 }
1010 }
1011 assert(false, "Some generation should contain the address");
1012 return 0;
1013 }
1015 bool GenCollectedHeap::block_is_obj(const HeapWord* addr) const {
1016 assert(is_in_reserved(addr), "block_is_obj of address outside of heap");
1017 assert(block_start(addr) == addr, "addr must be a block start");
1018 for (int i = 0; i < _n_gens; i++) {
1019 if (_gens[i]->is_in_reserved(addr)) {
1020 return _gens[i]->block_is_obj(addr);
1021 }
1022 }
1023 assert(false, "Some generation should contain the address");
1024 return false;
1025 }
1027 bool GenCollectedHeap::supports_tlab_allocation() const {
1028 for (int i = 0; i < _n_gens; i += 1) {
1029 if (_gens[i]->supports_tlab_allocation()) {
1030 return true;
1031 }
1032 }
1033 return false;
1034 }
1036 size_t GenCollectedHeap::tlab_capacity(Thread* thr) const {
1037 size_t result = 0;
1038 for (int i = 0; i < _n_gens; i += 1) {
1039 if (_gens[i]->supports_tlab_allocation()) {
1040 result += _gens[i]->tlab_capacity();
1041 }
1042 }
1043 return result;
1044 }
1046 size_t GenCollectedHeap::tlab_used(Thread* thr) const {
1047 size_t result = 0;
1048 for (int i = 0; i < _n_gens; i += 1) {
1049 if (_gens[i]->supports_tlab_allocation()) {
1050 result += _gens[i]->tlab_used();
1051 }
1052 }
1053 return result;
1054 }
1056 size_t GenCollectedHeap::unsafe_max_tlab_alloc(Thread* thr) const {
1057 size_t result = 0;
1058 for (int i = 0; i < _n_gens; i += 1) {
1059 if (_gens[i]->supports_tlab_allocation()) {
1060 result += _gens[i]->unsafe_max_tlab_alloc();
1061 }
1062 }
1063 return result;
1064 }
1066 HeapWord* GenCollectedHeap::allocate_new_tlab(size_t size) {
1067 bool gc_overhead_limit_was_exceeded;
1068 return collector_policy()->mem_allocate_work(size /* size */,
1069 true /* is_tlab */,
1070 &gc_overhead_limit_was_exceeded);
1071 }
1073 // Requires "*prev_ptr" to be non-NULL. Deletes and a block of minimal size
1074 // from the list headed by "*prev_ptr".
1075 static ScratchBlock *removeSmallestScratch(ScratchBlock **prev_ptr) {
1076 bool first = true;
1077 size_t min_size = 0; // "first" makes this conceptually infinite.
1078 ScratchBlock **smallest_ptr, *smallest;
1079 ScratchBlock *cur = *prev_ptr;
1080 while (cur) {
1081 assert(*prev_ptr == cur, "just checking");
1082 if (first || cur->num_words < min_size) {
1083 smallest_ptr = prev_ptr;
1084 smallest = cur;
1085 min_size = smallest->num_words;
1086 first = false;
1087 }
1088 prev_ptr = &cur->next;
1089 cur = cur->next;
1090 }
1091 smallest = *smallest_ptr;
1092 *smallest_ptr = smallest->next;
1093 return smallest;
1094 }
1096 // Sort the scratch block list headed by res into decreasing size order,
1097 // and set "res" to the result.
1098 static void sort_scratch_list(ScratchBlock*& list) {
1099 ScratchBlock* sorted = NULL;
1100 ScratchBlock* unsorted = list;
1101 while (unsorted) {
1102 ScratchBlock *smallest = removeSmallestScratch(&unsorted);
1103 smallest->next = sorted;
1104 sorted = smallest;
1105 }
1106 list = sorted;
1107 }
1109 ScratchBlock* GenCollectedHeap::gather_scratch(Generation* requestor,
1110 size_t max_alloc_words) {
1111 ScratchBlock* res = NULL;
1112 for (int i = 0; i < _n_gens; i++) {
1113 _gens[i]->contribute_scratch(res, requestor, max_alloc_words);
1114 }
1115 sort_scratch_list(res);
1116 return res;
1117 }
1119 void GenCollectedHeap::release_scratch() {
1120 for (int i = 0; i < _n_gens; i++) {
1121 _gens[i]->reset_scratch();
1122 }
1123 }
1125 class GenPrepareForVerifyClosure: public GenCollectedHeap::GenClosure {
1126 void do_generation(Generation* gen) {
1127 gen->prepare_for_verify();
1128 }
1129 };
1131 void GenCollectedHeap::prepare_for_verify() {
1132 ensure_parsability(false); // no need to retire TLABs
1133 GenPrepareForVerifyClosure blk;
1134 generation_iterate(&blk, false);
1135 }
1138 void GenCollectedHeap::generation_iterate(GenClosure* cl,
1139 bool old_to_young) {
1140 if (old_to_young) {
1141 for (int i = _n_gens-1; i >= 0; i--) {
1142 cl->do_generation(_gens[i]);
1143 }
1144 } else {
1145 for (int i = 0; i < _n_gens; i++) {
1146 cl->do_generation(_gens[i]);
1147 }
1148 }
1149 }
1151 void GenCollectedHeap::space_iterate(SpaceClosure* cl) {
1152 for (int i = 0; i < _n_gens; i++) {
1153 _gens[i]->space_iterate(cl, true);
1154 }
1155 }
1157 bool GenCollectedHeap::is_maximal_no_gc() const {
1158 for (int i = 0; i < _n_gens; i++) {
1159 if (!_gens[i]->is_maximal_no_gc()) {
1160 return false;
1161 }
1162 }
1163 return true;
1164 }
1166 void GenCollectedHeap::save_marks() {
1167 for (int i = 0; i < _n_gens; i++) {
1168 _gens[i]->save_marks();
1169 }
1170 }
1172 GenCollectedHeap* GenCollectedHeap::heap() {
1173 assert(_gch != NULL, "Uninitialized access to GenCollectedHeap::heap()");
1174 assert(_gch->kind() == CollectedHeap::GenCollectedHeap, "not a generational heap");
1175 return _gch;
1176 }
1179 void GenCollectedHeap::prepare_for_compaction() {
1180 guarantee(_n_gens = 2, "Wrong number of generations");
1181 Generation* old_gen = _gens[1];
1182 // Start by compacting into same gen.
1183 CompactPoint cp(old_gen);
1184 old_gen->prepare_for_compaction(&cp);
1185 Generation* young_gen = _gens[0];
1186 young_gen->prepare_for_compaction(&cp);
1187 }
1189 GCStats* GenCollectedHeap::gc_stats(int level) const {
1190 return _gens[level]->gc_stats();
1191 }
1193 void GenCollectedHeap::verify(bool silent, VerifyOption option /* ignored */) {
1194 for (int i = _n_gens-1; i >= 0; i--) {
1195 Generation* g = _gens[i];
1196 if (!silent) {
1197 gclog_or_tty->print("%s", g->name());
1198 gclog_or_tty->print(" ");
1199 }
1200 g->verify();
1201 }
1202 if (!silent) {
1203 gclog_or_tty->print("remset ");
1204 }
1205 rem_set()->verify();
1206 }
1208 void GenCollectedHeap::print_on(outputStream* st) const {
1209 for (int i = 0; i < _n_gens; i++) {
1210 _gens[i]->print_on(st);
1211 }
1212 MetaspaceAux::print_on(st);
1213 }
1215 void GenCollectedHeap::gc_threads_do(ThreadClosure* tc) const {
1216 if (workers() != NULL) {
1217 workers()->threads_do(tc);
1218 }
1219 #if INCLUDE_ALL_GCS
1220 if (UseConcMarkSweepGC) {
1221 ConcurrentMarkSweepThread::threads_do(tc);
1222 }
1223 #endif // INCLUDE_ALL_GCS
1224 }
1226 void GenCollectedHeap::print_gc_threads_on(outputStream* st) const {
1227 #if INCLUDE_ALL_GCS
1228 if (UseParNewGC) {
1229 workers()->print_worker_threads_on(st);
1230 }
1231 if (UseConcMarkSweepGC) {
1232 ConcurrentMarkSweepThread::print_all_on(st);
1233 }
1234 #endif // INCLUDE_ALL_GCS
1235 }
1237 void GenCollectedHeap::print_on_error(outputStream* st) const {
1238 this->CollectedHeap::print_on_error(st);
1240 #if INCLUDE_ALL_GCS
1241 if (UseConcMarkSweepGC) {
1242 st->cr();
1243 CMSCollector::print_on_error(st);
1244 }
1245 #endif // INCLUDE_ALL_GCS
1246 }
1248 void GenCollectedHeap::print_tracing_info() const {
1249 if (TraceGen0Time) {
1250 get_gen(0)->print_summary_info();
1251 }
1252 if (TraceGen1Time) {
1253 get_gen(1)->print_summary_info();
1254 }
1255 }
1257 void GenCollectedHeap::print_heap_change(size_t prev_used) const {
1258 if (PrintGCDetails && Verbose) {
1259 gclog_or_tty->print(" " SIZE_FORMAT
1260 "->" SIZE_FORMAT
1261 "(" SIZE_FORMAT ")",
1262 prev_used, used(), capacity());
1263 } else {
1264 gclog_or_tty->print(" " SIZE_FORMAT "K"
1265 "->" SIZE_FORMAT "K"
1266 "(" SIZE_FORMAT "K)",
1267 prev_used / K, used() / K, capacity() / K);
1268 }
1269 }
1271 class GenGCPrologueClosure: public GenCollectedHeap::GenClosure {
1272 private:
1273 bool _full;
1274 public:
1275 void do_generation(Generation* gen) {
1276 gen->gc_prologue(_full);
1277 }
1278 GenGCPrologueClosure(bool full) : _full(full) {};
1279 };
1281 void GenCollectedHeap::gc_prologue(bool full) {
1282 assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
1284 always_do_update_barrier = false;
1285 // Fill TLAB's and such
1286 CollectedHeap::accumulate_statistics_all_tlabs();
1287 ensure_parsability(true); // retire TLABs
1289 // Walk generations
1290 GenGCPrologueClosure blk(full);
1291 generation_iterate(&blk, false); // not old-to-young.
1292 };
1294 class GenGCEpilogueClosure: public GenCollectedHeap::GenClosure {
1295 private:
1296 bool _full;
1297 public:
1298 void do_generation(Generation* gen) {
1299 gen->gc_epilogue(_full);
1300 }
1301 GenGCEpilogueClosure(bool full) : _full(full) {};
1302 };
1304 void GenCollectedHeap::gc_epilogue(bool full) {
1305 #ifdef COMPILER2
1306 assert(DerivedPointerTable::is_empty(), "derived pointer present");
1307 size_t actual_gap = pointer_delta((HeapWord*) (max_uintx-3), *(end_addr()));
1308 guarantee(actual_gap > (size_t)FastAllocateSizeLimit, "inline allocation wraps");
1309 #endif /* COMPILER2 */
1311 resize_all_tlabs();
1313 GenGCEpilogueClosure blk(full);
1314 generation_iterate(&blk, false); // not old-to-young.
1316 if (!CleanChunkPoolAsync) {
1317 Chunk::clean_chunk_pool();
1318 }
1320 MetaspaceCounters::update_performance_counters();
1321 CompressedClassSpaceCounters::update_performance_counters();
1323 always_do_update_barrier = UseConcMarkSweepGC;
1324 };
1326 #ifndef PRODUCT
1327 class GenGCSaveTopsBeforeGCClosure: public GenCollectedHeap::GenClosure {
1328 private:
1329 public:
1330 void do_generation(Generation* gen) {
1331 gen->record_spaces_top();
1332 }
1333 };
1335 void GenCollectedHeap::record_gen_tops_before_GC() {
1336 if (ZapUnusedHeapArea) {
1337 GenGCSaveTopsBeforeGCClosure blk;
1338 generation_iterate(&blk, false); // not old-to-young.
1339 }
1340 }
1341 #endif // not PRODUCT
1343 class GenEnsureParsabilityClosure: public GenCollectedHeap::GenClosure {
1344 public:
1345 void do_generation(Generation* gen) {
1346 gen->ensure_parsability();
1347 }
1348 };
1350 void GenCollectedHeap::ensure_parsability(bool retire_tlabs) {
1351 CollectedHeap::ensure_parsability(retire_tlabs);
1352 GenEnsureParsabilityClosure ep_cl;
1353 generation_iterate(&ep_cl, false);
1354 }
1356 oop GenCollectedHeap::handle_failed_promotion(Generation* old_gen,
1357 oop obj,
1358 size_t obj_size) {
1359 guarantee(old_gen->level() == 1, "We only get here with an old generation");
1360 assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
1361 HeapWord* result = NULL;
1363 result = old_gen->expand_and_allocate(obj_size, false);
1365 if (result != NULL) {
1366 Copy::aligned_disjoint_words((HeapWord*)obj, result, obj_size);
1367 }
1368 return oop(result);
1369 }
1371 class GenTimeOfLastGCClosure: public GenCollectedHeap::GenClosure {
1372 jlong _time; // in ms
1373 jlong _now; // in ms
1375 public:
1376 GenTimeOfLastGCClosure(jlong now) : _time(now), _now(now) { }
1378 jlong time() { return _time; }
1380 void do_generation(Generation* gen) {
1381 _time = MIN2(_time, gen->time_of_last_gc(_now));
1382 }
1383 };
1385 jlong GenCollectedHeap::millis_since_last_gc() {
1386 // We need a monotonically non-deccreasing time in ms but
1387 // os::javaTimeMillis() does not guarantee monotonicity.
1388 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
1389 GenTimeOfLastGCClosure tolgc_cl(now);
1390 // iterate over generations getting the oldest
1391 // time that a generation was collected
1392 generation_iterate(&tolgc_cl, false);
1394 // javaTimeNanos() is guaranteed to be monotonically non-decreasing
1395 // provided the underlying platform provides such a time source
1396 // (and it is bug free). So we still have to guard against getting
1397 // back a time later than 'now'.
1398 jlong retVal = now - tolgc_cl.time();
1399 if (retVal < 0) {
1400 NOT_PRODUCT(warning("time warp: "INT64_FORMAT, (int64_t) retVal);)
1401 return 0;
1402 }
1403 return retVal;
1404 }