Mon, 12 Aug 2019 18:30:40 +0300
8223147: JFR Backport
8199712: Flight Recorder
8203346: JFR: Inconsistent signature of jfr_add_string_constant
8195817: JFR.stop should require name of recording
8195818: JFR.start should increase autogenerated name by one
8195819: Remove recording=x from jcmd JFR.check output
8203921: JFR thread sampling is missing fixes from JDK-8194552
8203929: Limit amount of data for JFR.dump
8203664: JFR start failure after AppCDS archive created with JFR StartFlightRecording
8003209: JFR events for network utilization
8207392: [PPC64] Implement JFR profiling
8202835: jfr/event/os/TestSystemProcess.java fails on missing events
Summary: Backport JFR from JDK11. Initial integration
Reviewed-by: neugens
1 /*
2 * Copyright (c) 2002, 2016, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "classfile/symbolTable.hpp"
27 #include "code/codeCache.hpp"
28 #include "gc_implementation/parallelScavenge/cardTableExtension.hpp"
29 #include "gc_implementation/parallelScavenge/gcTaskManager.hpp"
30 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
31 #include "gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp"
32 #include "gc_implementation/parallelScavenge/psMarkSweep.hpp"
33 #include "gc_implementation/parallelScavenge/psParallelCompact.hpp"
34 #include "gc_implementation/parallelScavenge/psScavenge.inline.hpp"
35 #include "gc_implementation/parallelScavenge/psTasks.hpp"
36 #include "gc_implementation/shared/gcHeapSummary.hpp"
37 #include "gc_implementation/shared/gcTimer.hpp"
38 #include "gc_implementation/shared/gcTrace.hpp"
39 #include "gc_implementation/shared/gcTraceTime.hpp"
40 #include "gc_implementation/shared/isGCActiveMark.hpp"
41 #include "gc_implementation/shared/spaceDecorator.hpp"
42 #include "gc_interface/gcCause.hpp"
43 #include "memory/collectorPolicy.hpp"
44 #include "memory/gcLocker.inline.hpp"
45 #include "memory/referencePolicy.hpp"
46 #include "memory/referenceProcessor.hpp"
47 #include "memory/resourceArea.hpp"
48 #include "oops/oop.inline.hpp"
49 #include "oops/oop.psgc.inline.hpp"
50 #include "runtime/biasedLocking.hpp"
51 #include "runtime/fprofiler.hpp"
52 #include "runtime/handles.inline.hpp"
53 #include "runtime/threadCritical.hpp"
54 #include "runtime/vmThread.hpp"
55 #include "runtime/vm_operations.hpp"
56 #include "services/memoryService.hpp"
57 #include "utilities/stack.inline.hpp"
59 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
61 HeapWord* PSScavenge::_to_space_top_before_gc = NULL;
62 int PSScavenge::_consecutive_skipped_scavenges = 0;
63 ReferenceProcessor* PSScavenge::_ref_processor = NULL;
64 CardTableExtension* PSScavenge::_card_table = NULL;
65 bool PSScavenge::_survivor_overflow = false;
66 uint PSScavenge::_tenuring_threshold = 0;
67 HeapWord* PSScavenge::_young_generation_boundary = NULL;
68 uintptr_t PSScavenge::_young_generation_boundary_compressed = 0;
69 elapsedTimer PSScavenge::_accumulated_time;
70 STWGCTimer PSScavenge::_gc_timer;
71 ParallelScavengeTracer PSScavenge::_gc_tracer;
72 Stack<markOop, mtGC> PSScavenge::_preserved_mark_stack;
73 Stack<oop, mtGC> PSScavenge::_preserved_oop_stack;
74 CollectorCounters* PSScavenge::_counters = NULL;
76 // Define before use
77 class PSIsAliveClosure: public BoolObjectClosure {
78 public:
79 bool do_object_b(oop p) {
80 return (!PSScavenge::is_obj_in_young(p)) || p->is_forwarded();
81 }
82 };
84 PSIsAliveClosure PSScavenge::_is_alive_closure;
86 class PSKeepAliveClosure: public OopClosure {
87 protected:
88 MutableSpace* _to_space;
89 PSPromotionManager* _promotion_manager;
91 public:
92 PSKeepAliveClosure(PSPromotionManager* pm) : _promotion_manager(pm) {
93 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
94 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
95 _to_space = heap->young_gen()->to_space();
97 assert(_promotion_manager != NULL, "Sanity");
98 }
100 template <class T> void do_oop_work(T* p) {
101 assert (!oopDesc::is_null(*p), "expected non-null ref");
102 assert ((oopDesc::load_decode_heap_oop_not_null(p))->is_oop(),
103 "expected an oop while scanning weak refs");
105 // Weak refs may be visited more than once.
106 if (PSScavenge::should_scavenge(p, _to_space)) {
107 PSScavenge::copy_and_push_safe_barrier<T, /*promote_immediately=*/false>(_promotion_manager, p);
108 }
109 }
110 virtual void do_oop(oop* p) { PSKeepAliveClosure::do_oop_work(p); }
111 virtual void do_oop(narrowOop* p) { PSKeepAliveClosure::do_oop_work(p); }
112 };
114 class PSEvacuateFollowersClosure: public VoidClosure {
115 private:
116 PSPromotionManager* _promotion_manager;
117 public:
118 PSEvacuateFollowersClosure(PSPromotionManager* pm) : _promotion_manager(pm) {}
120 virtual void do_void() {
121 assert(_promotion_manager != NULL, "Sanity");
122 _promotion_manager->drain_stacks(true);
123 guarantee(_promotion_manager->stacks_empty(),
124 "stacks should be empty at this point");
125 }
126 };
128 class PSPromotionFailedClosure : public ObjectClosure {
129 virtual void do_object(oop obj) {
130 if (obj->is_forwarded()) {
131 obj->init_mark();
132 }
133 }
134 };
136 class PSRefProcTaskProxy: public GCTask {
137 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
138 ProcessTask & _rp_task;
139 uint _work_id;
140 public:
141 PSRefProcTaskProxy(ProcessTask & rp_task, uint work_id)
142 : _rp_task(rp_task),
143 _work_id(work_id)
144 { }
146 private:
147 virtual char* name() { return (char *)"Process referents by policy in parallel"; }
148 virtual void do_it(GCTaskManager* manager, uint which);
149 };
151 void PSRefProcTaskProxy::do_it(GCTaskManager* manager, uint which)
152 {
153 PSPromotionManager* promotion_manager =
154 PSPromotionManager::gc_thread_promotion_manager(which);
155 assert(promotion_manager != NULL, "sanity check");
156 PSKeepAliveClosure keep_alive(promotion_manager);
157 PSEvacuateFollowersClosure evac_followers(promotion_manager);
158 PSIsAliveClosure is_alive;
159 _rp_task.work(_work_id, is_alive, keep_alive, evac_followers);
160 }
162 class PSRefEnqueueTaskProxy: public GCTask {
163 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
164 EnqueueTask& _enq_task;
165 uint _work_id;
167 public:
168 PSRefEnqueueTaskProxy(EnqueueTask& enq_task, uint work_id)
169 : _enq_task(enq_task),
170 _work_id(work_id)
171 { }
173 virtual char* name() { return (char *)"Enqueue reference objects in parallel"; }
174 virtual void do_it(GCTaskManager* manager, uint which)
175 {
176 _enq_task.work(_work_id);
177 }
178 };
180 class PSRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
181 virtual void execute(ProcessTask& task);
182 virtual void execute(EnqueueTask& task);
183 };
185 void PSRefProcTaskExecutor::execute(ProcessTask& task)
186 {
187 GCTaskQueue* q = GCTaskQueue::create();
188 GCTaskManager* manager = ParallelScavengeHeap::gc_task_manager();
189 for(uint i=0; i < manager->active_workers(); i++) {
190 q->enqueue(new PSRefProcTaskProxy(task, i));
191 }
192 ParallelTaskTerminator terminator(manager->active_workers(),
193 (TaskQueueSetSuper*) PSPromotionManager::stack_array_depth());
194 if (task.marks_oops_alive() && manager->active_workers() > 1) {
195 for (uint j = 0; j < manager->active_workers(); j++) {
196 q->enqueue(new StealTask(&terminator));
197 }
198 }
199 manager->execute_and_wait(q);
200 }
203 void PSRefProcTaskExecutor::execute(EnqueueTask& task)
204 {
205 GCTaskQueue* q = GCTaskQueue::create();
206 GCTaskManager* manager = ParallelScavengeHeap::gc_task_manager();
207 for(uint i=0; i < manager->active_workers(); i++) {
208 q->enqueue(new PSRefEnqueueTaskProxy(task, i));
209 }
210 manager->execute_and_wait(q);
211 }
213 // This method contains all heap specific policy for invoking scavenge.
214 // PSScavenge::invoke_no_policy() will do nothing but attempt to
215 // scavenge. It will not clean up after failed promotions, bail out if
216 // we've exceeded policy time limits, or any other special behavior.
217 // All such policy should be placed here.
218 //
219 // Note that this method should only be called from the vm_thread while
220 // at a safepoint!
221 bool PSScavenge::invoke() {
222 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
223 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
224 assert(!Universe::heap()->is_gc_active(), "not reentrant");
226 ParallelScavengeHeap* const heap = (ParallelScavengeHeap*)Universe::heap();
227 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
229 PSAdaptiveSizePolicy* policy = heap->size_policy();
230 IsGCActiveMark mark;
232 const bool scavenge_done = PSScavenge::invoke_no_policy();
233 const bool need_full_gc = !scavenge_done ||
234 policy->should_full_GC(heap->old_gen()->free_in_bytes());
235 bool full_gc_done = false;
237 if (UsePerfData) {
238 PSGCAdaptivePolicyCounters* const counters = heap->gc_policy_counters();
239 const int ffs_val = need_full_gc ? full_follows_scavenge : not_skipped;
240 counters->update_full_follows_scavenge(ffs_val);
241 }
243 if (need_full_gc) {
244 GCCauseSetter gccs(heap, GCCause::_adaptive_size_policy);
245 CollectorPolicy* cp = heap->collector_policy();
246 const bool clear_all_softrefs = cp->should_clear_all_soft_refs();
248 if (UseParallelOldGC) {
249 full_gc_done = PSParallelCompact::invoke_no_policy(clear_all_softrefs);
250 } else {
251 full_gc_done = PSMarkSweep::invoke_no_policy(clear_all_softrefs);
252 }
253 }
255 return full_gc_done;
256 }
258 // This method contains no policy. You should probably
259 // be calling invoke() instead.
260 bool PSScavenge::invoke_no_policy() {
261 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
262 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
264 assert(_preserved_mark_stack.is_empty(), "should be empty");
265 assert(_preserved_oop_stack.is_empty(), "should be empty");
267 _gc_timer.register_gc_start();
269 TimeStamp scavenge_entry;
270 TimeStamp scavenge_midpoint;
271 TimeStamp scavenge_exit;
273 scavenge_entry.update();
275 if (GC_locker::check_active_before_gc()) {
276 return false;
277 }
279 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
280 GCCause::Cause gc_cause = heap->gc_cause();
281 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
283 // Check for potential problems.
284 if (!should_attempt_scavenge()) {
285 return false;
286 }
288 _gc_tracer.report_gc_start(heap->gc_cause(), _gc_timer.gc_start());
290 bool promotion_failure_occurred = false;
292 PSYoungGen* young_gen = heap->young_gen();
293 PSOldGen* old_gen = heap->old_gen();
294 PSAdaptiveSizePolicy* size_policy = heap->size_policy();
296 heap->increment_total_collections();
298 AdaptiveSizePolicyOutput(size_policy, heap->total_collections());
300 if ((gc_cause != GCCause::_java_lang_system_gc) ||
301 UseAdaptiveSizePolicyWithSystemGC) {
302 // Gather the feedback data for eden occupancy.
303 young_gen->eden_space()->accumulate_statistics();
304 }
306 if (ZapUnusedHeapArea) {
307 // Save information needed to minimize mangling
308 heap->record_gen_tops_before_GC();
309 }
311 heap->print_heap_before_gc();
312 heap->trace_heap_before_gc(&_gc_tracer);
314 assert(!NeverTenure || _tenuring_threshold == markOopDesc::max_age + 1, "Sanity");
315 assert(!AlwaysTenure || _tenuring_threshold == 0, "Sanity");
317 size_t prev_used = heap->used();
319 // Fill in TLABs
320 heap->accumulate_statistics_all_tlabs();
321 heap->ensure_parsability(true); // retire TLABs
323 if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
324 HandleMark hm; // Discard invalid handles created during verification
325 Universe::verify(" VerifyBeforeGC:");
326 }
328 {
329 ResourceMark rm;
330 HandleMark hm;
332 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
333 GCTraceTime t1(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, NULL, _gc_tracer.gc_id());
334 TraceCollectorStats tcs(counters());
335 TraceMemoryManagerStats tms(false /* not full GC */,gc_cause);
337 if (TraceGen0Time) accumulated_time()->start();
339 // Let the size policy know we're starting
340 size_policy->minor_collection_begin();
342 // Verify the object start arrays.
343 if (VerifyObjectStartArray &&
344 VerifyBeforeGC) {
345 old_gen->verify_object_start_array();
346 }
348 // Verify no unmarked old->young roots
349 if (VerifyRememberedSets) {
350 CardTableExtension::verify_all_young_refs_imprecise();
351 }
353 if (!ScavengeWithObjectsInToSpace) {
354 assert(young_gen->to_space()->is_empty(),
355 "Attempt to scavenge with live objects in to_space");
356 young_gen->to_space()->clear(SpaceDecorator::Mangle);
357 } else if (ZapUnusedHeapArea) {
358 young_gen->to_space()->mangle_unused_area();
359 }
360 save_to_space_top_before_gc();
362 COMPILER2_PRESENT(DerivedPointerTable::clear());
364 reference_processor()->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
365 reference_processor()->setup_policy(false);
367 // We track how much was promoted to the next generation for
368 // the AdaptiveSizePolicy.
369 size_t old_gen_used_before = old_gen->used_in_bytes();
371 // For PrintGCDetails
372 size_t young_gen_used_before = young_gen->used_in_bytes();
374 // Reset our survivor overflow.
375 set_survivor_overflow(false);
377 // We need to save the old top values before
378 // creating the promotion_manager. We pass the top
379 // values to the card_table, to prevent it from
380 // straying into the promotion labs.
381 HeapWord* old_top = old_gen->object_space()->top();
383 // Release all previously held resources
384 gc_task_manager()->release_all_resources();
386 // Set the number of GC threads to be used in this collection
387 gc_task_manager()->set_active_gang();
388 gc_task_manager()->task_idle_workers();
389 // Get the active number of workers here and use that value
390 // throughout the methods.
391 uint active_workers = gc_task_manager()->active_workers();
392 heap->set_par_threads(active_workers);
394 PSPromotionManager::pre_scavenge();
396 // We'll use the promotion manager again later.
397 PSPromotionManager* promotion_manager = PSPromotionManager::vm_thread_promotion_manager();
398 {
399 GCTraceTime tm("Scavenge", false, false, &_gc_timer, _gc_tracer.gc_id());
400 ParallelScavengeHeap::ParStrongRootsScope psrs;
402 GCTaskQueue* q = GCTaskQueue::create();
404 if (!old_gen->object_space()->is_empty()) {
405 // There are only old-to-young pointers if there are objects
406 // in the old gen.
407 uint stripe_total = active_workers;
408 for(uint i=0; i < stripe_total; i++) {
409 q->enqueue(new OldToYoungRootsTask(old_gen, old_top, i, stripe_total));
410 }
411 }
413 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::universe));
414 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::jni_handles));
415 // We scan the thread roots in parallel
416 Threads::create_thread_roots_tasks(q);
417 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::object_synchronizer));
418 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::flat_profiler));
419 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::management));
420 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::system_dictionary));
421 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::class_loader_data));
422 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::jvmti));
423 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::code_cache));
425 ParallelTaskTerminator terminator(
426 active_workers,
427 (TaskQueueSetSuper*) promotion_manager->stack_array_depth());
428 if (active_workers > 1) {
429 for (uint j = 0; j < active_workers; j++) {
430 q->enqueue(new StealTask(&terminator));
431 }
432 }
434 gc_task_manager()->execute_and_wait(q);
435 }
437 scavenge_midpoint.update();
439 // Process reference objects discovered during scavenge
440 {
441 GCTraceTime tm("References", false, false, &_gc_timer, _gc_tracer.gc_id());
443 reference_processor()->setup_policy(false); // not always_clear
444 reference_processor()->set_active_mt_degree(active_workers);
445 PSKeepAliveClosure keep_alive(promotion_manager);
446 PSEvacuateFollowersClosure evac_followers(promotion_manager);
447 ReferenceProcessorStats stats;
448 if (reference_processor()->processing_is_mt()) {
449 PSRefProcTaskExecutor task_executor;
450 stats = reference_processor()->process_discovered_references(
451 &_is_alive_closure, &keep_alive, &evac_followers, &task_executor,
452 &_gc_timer, _gc_tracer.gc_id());
453 } else {
454 stats = reference_processor()->process_discovered_references(
455 &_is_alive_closure, &keep_alive, &evac_followers, NULL, &_gc_timer, _gc_tracer.gc_id());
456 }
458 _gc_tracer.report_gc_reference_stats(stats);
460 // Enqueue reference objects discovered during scavenge.
461 if (reference_processor()->processing_is_mt()) {
462 PSRefProcTaskExecutor task_executor;
463 reference_processor()->enqueue_discovered_references(&task_executor);
464 } else {
465 reference_processor()->enqueue_discovered_references(NULL);
466 }
467 }
469 {
470 GCTraceTime tm("StringTable", false, false, &_gc_timer, _gc_tracer.gc_id());
471 // Unlink any dead interned Strings and process the remaining live ones.
472 PSScavengeRootsClosure root_closure(promotion_manager);
473 StringTable::unlink_or_oops_do(&_is_alive_closure, &root_closure);
474 }
476 // Finally, flush the promotion_manager's labs, and deallocate its stacks.
477 promotion_failure_occurred = PSPromotionManager::post_scavenge(_gc_tracer);
478 if (promotion_failure_occurred) {
479 clean_up_failed_promotion();
480 if (PrintGC) {
481 gclog_or_tty->print("--");
482 }
483 }
485 // Let the size policy know we're done. Note that we count promotion
486 // failure cleanup time as part of the collection (otherwise, we're
487 // implicitly saying it's mutator time).
488 size_policy->minor_collection_end(gc_cause);
490 if (!promotion_failure_occurred) {
491 // Swap the survivor spaces.
492 young_gen->eden_space()->clear(SpaceDecorator::Mangle);
493 young_gen->from_space()->clear(SpaceDecorator::Mangle);
494 young_gen->swap_spaces();
496 size_t survived = young_gen->from_space()->used_in_bytes();
497 size_t promoted = old_gen->used_in_bytes() - old_gen_used_before;
498 size_policy->update_averages(_survivor_overflow, survived, promoted);
500 // A successful scavenge should restart the GC time limit count which is
501 // for full GC's.
502 size_policy->reset_gc_overhead_limit_count();
503 if (UseAdaptiveSizePolicy) {
504 // Calculate the new survivor size and tenuring threshold
506 if (PrintAdaptiveSizePolicy) {
507 gclog_or_tty->print("AdaptiveSizeStart: ");
508 gclog_or_tty->stamp();
509 gclog_or_tty->print_cr(" collection: %d ",
510 heap->total_collections());
512 if (Verbose) {
513 gclog_or_tty->print("old_gen_capacity: %d young_gen_capacity: %d",
514 old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes());
515 }
516 }
519 if (UsePerfData) {
520 PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters();
521 counters->update_old_eden_size(
522 size_policy->calculated_eden_size_in_bytes());
523 counters->update_old_promo_size(
524 size_policy->calculated_promo_size_in_bytes());
525 counters->update_old_capacity(old_gen->capacity_in_bytes());
526 counters->update_young_capacity(young_gen->capacity_in_bytes());
527 counters->update_survived(survived);
528 counters->update_promoted(promoted);
529 counters->update_survivor_overflowed(_survivor_overflow);
530 }
532 size_t max_young_size = young_gen->max_size();
534 // Deciding a free ratio in the young generation is tricky, so if
535 // MinHeapFreeRatio or MaxHeapFreeRatio are in use (implicating
536 // that the old generation size may have been limited because of them) we
537 // should then limit our young generation size using NewRatio to have it
538 // follow the old generation size.
539 if (MinHeapFreeRatio != 0 || MaxHeapFreeRatio != 100) {
540 max_young_size = MIN2(old_gen->capacity_in_bytes() / NewRatio, young_gen->max_size());
541 }
543 size_t survivor_limit =
544 size_policy->max_survivor_size(max_young_size);
545 _tenuring_threshold =
546 size_policy->compute_survivor_space_size_and_threshold(
547 _survivor_overflow,
548 _tenuring_threshold,
549 survivor_limit);
551 if (PrintTenuringDistribution) {
552 gclog_or_tty->cr();
553 gclog_or_tty->print_cr("Desired survivor size " SIZE_FORMAT " bytes, new threshold %u (max %u)",
554 size_policy->calculated_survivor_size_in_bytes(),
555 _tenuring_threshold, MaxTenuringThreshold);
556 }
558 if (UsePerfData) {
559 PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters();
560 counters->update_tenuring_threshold(_tenuring_threshold);
561 counters->update_survivor_size_counters();
562 }
564 // Do call at minor collections?
565 // Don't check if the size_policy is ready at this
566 // level. Let the size_policy check that internally.
567 if (UseAdaptiveGenerationSizePolicyAtMinorCollection &&
568 ((gc_cause != GCCause::_java_lang_system_gc) ||
569 UseAdaptiveSizePolicyWithSystemGC)) {
571 // Calculate optimial free space amounts
572 assert(young_gen->max_size() >
573 young_gen->from_space()->capacity_in_bytes() +
574 young_gen->to_space()->capacity_in_bytes(),
575 "Sizes of space in young gen are out-of-bounds");
577 size_t young_live = young_gen->used_in_bytes();
578 size_t eden_live = young_gen->eden_space()->used_in_bytes();
579 size_t cur_eden = young_gen->eden_space()->capacity_in_bytes();
580 size_t max_old_gen_size = old_gen->max_gen_size();
581 size_t max_eden_size = max_young_size -
582 young_gen->from_space()->capacity_in_bytes() -
583 young_gen->to_space()->capacity_in_bytes();
585 // Used for diagnostics
586 size_policy->clear_generation_free_space_flags();
588 size_policy->compute_eden_space_size(young_live,
589 eden_live,
590 cur_eden,
591 max_eden_size,
592 false /* not full gc*/);
594 size_policy->check_gc_overhead_limit(young_live,
595 eden_live,
596 max_old_gen_size,
597 max_eden_size,
598 false /* not full gc*/,
599 gc_cause,
600 heap->collector_policy());
602 size_policy->decay_supplemental_growth(false /* not full gc*/);
603 }
604 // Resize the young generation at every collection
605 // even if new sizes have not been calculated. This is
606 // to allow resizes that may have been inhibited by the
607 // relative location of the "to" and "from" spaces.
609 // Resizing the old gen at minor collects can cause increases
610 // that don't feed back to the generation sizing policy until
611 // a major collection. Don't resize the old gen here.
613 heap->resize_young_gen(size_policy->calculated_eden_size_in_bytes(),
614 size_policy->calculated_survivor_size_in_bytes());
616 if (PrintAdaptiveSizePolicy) {
617 gclog_or_tty->print_cr("AdaptiveSizeStop: collection: %d ",
618 heap->total_collections());
619 }
620 }
622 // Update the structure of the eden. With NUMA-eden CPU hotplugging or offlining can
623 // cause the change of the heap layout. Make sure eden is reshaped if that's the case.
624 // Also update() will case adaptive NUMA chunk resizing.
625 assert(young_gen->eden_space()->is_empty(), "eden space should be empty now");
626 young_gen->eden_space()->update();
628 heap->gc_policy_counters()->update_counters();
630 heap->resize_all_tlabs();
632 assert(young_gen->to_space()->is_empty(), "to space should be empty now");
633 }
635 COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
637 NOT_PRODUCT(reference_processor()->verify_no_references_recorded());
639 // Re-verify object start arrays
640 if (VerifyObjectStartArray &&
641 VerifyAfterGC) {
642 old_gen->verify_object_start_array();
643 }
645 // Verify all old -> young cards are now precise
646 if (VerifyRememberedSets) {
647 // Precise verification will give false positives. Until this is fixed,
648 // use imprecise verification.
649 // CardTableExtension::verify_all_young_refs_precise();
650 CardTableExtension::verify_all_young_refs_imprecise();
651 }
653 if (TraceGen0Time) accumulated_time()->stop();
655 if (PrintGC) {
656 if (PrintGCDetails) {
657 // Don't print a GC timestamp here. This is after the GC so
658 // would be confusing.
659 young_gen->print_used_change(young_gen_used_before);
660 }
661 heap->print_heap_change(prev_used);
662 }
664 // Track memory usage and detect low memory
665 MemoryService::track_memory_usage();
666 heap->update_counters();
668 gc_task_manager()->release_idle_workers();
669 }
671 if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
672 HandleMark hm; // Discard invalid handles created during verification
673 Universe::verify(" VerifyAfterGC:");
674 }
676 heap->print_heap_after_gc();
677 heap->trace_heap_after_gc(&_gc_tracer);
678 _gc_tracer.report_tenuring_threshold(tenuring_threshold());
680 if (ZapUnusedHeapArea) {
681 young_gen->eden_space()->check_mangled_unused_area_complete();
682 young_gen->from_space()->check_mangled_unused_area_complete();
683 young_gen->to_space()->check_mangled_unused_area_complete();
684 }
686 scavenge_exit.update();
688 if (PrintGCTaskTimeStamps) {
689 tty->print_cr("VM-Thread " INT64_FORMAT " " INT64_FORMAT " " INT64_FORMAT,
690 scavenge_entry.ticks(), scavenge_midpoint.ticks(),
691 scavenge_exit.ticks());
692 gc_task_manager()->print_task_time_stamps();
693 }
695 #ifdef TRACESPINNING
696 ParallelTaskTerminator::print_termination_counts();
697 #endif
700 _gc_timer.register_gc_end();
702 _gc_tracer.report_gc_end(_gc_timer.gc_end(), _gc_timer.time_partitions());
704 return !promotion_failure_occurred;
705 }
707 // This method iterates over all objects in the young generation,
708 // unforwarding markOops. It then restores any preserved mark oops,
709 // and clears the _preserved_mark_stack.
710 void PSScavenge::clean_up_failed_promotion() {
711 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
712 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
714 PSYoungGen* young_gen = heap->young_gen();
716 {
717 ResourceMark rm;
719 // Unforward all pointers in the young gen.
720 PSPromotionFailedClosure unforward_closure;
721 young_gen->object_iterate(&unforward_closure);
723 if (PrintGC && Verbose) {
724 gclog_or_tty->print_cr("Restoring %d marks", _preserved_oop_stack.size());
725 }
727 // Restore any saved marks.
728 while (!_preserved_oop_stack.is_empty()) {
729 oop obj = _preserved_oop_stack.pop();
730 markOop mark = _preserved_mark_stack.pop();
731 obj->set_mark(mark);
732 }
734 // Clear the preserved mark and oop stack caches.
735 _preserved_mark_stack.clear(true);
736 _preserved_oop_stack.clear(true);
737 }
739 // Reset the PromotionFailureALot counters.
740 NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();)
741 }
743 // This method is called whenever an attempt to promote an object
744 // fails. Some markOops will need preservation, some will not. Note
745 // that the entire eden is traversed after a failed promotion, with
746 // all forwarded headers replaced by the default markOop. This means
747 // it is not necessary to preserve most markOops.
748 void PSScavenge::oop_promotion_failed(oop obj, markOop obj_mark) {
749 if (obj_mark->must_be_preserved_for_promotion_failure(obj)) {
750 // Should use per-worker private stacks here rather than
751 // locking a common pair of stacks.
752 ThreadCritical tc;
753 _preserved_oop_stack.push(obj);
754 _preserved_mark_stack.push(obj_mark);
755 }
756 }
758 bool PSScavenge::should_attempt_scavenge() {
759 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
760 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
761 PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters();
763 if (UsePerfData) {
764 counters->update_scavenge_skipped(not_skipped);
765 }
767 PSYoungGen* young_gen = heap->young_gen();
768 PSOldGen* old_gen = heap->old_gen();
770 if (!ScavengeWithObjectsInToSpace) {
771 // Do not attempt to promote unless to_space is empty
772 if (!young_gen->to_space()->is_empty()) {
773 _consecutive_skipped_scavenges++;
774 if (UsePerfData) {
775 counters->update_scavenge_skipped(to_space_not_empty);
776 }
777 return false;
778 }
779 }
781 // Test to see if the scavenge will likely fail.
782 PSAdaptiveSizePolicy* policy = heap->size_policy();
784 // A similar test is done in the policy's should_full_GC(). If this is
785 // changed, decide if that test should also be changed.
786 size_t avg_promoted = (size_t) policy->padded_average_promoted_in_bytes();
787 size_t promotion_estimate = MIN2(avg_promoted, young_gen->used_in_bytes());
788 bool result = promotion_estimate < old_gen->free_in_bytes();
790 if (PrintGCDetails && Verbose) {
791 gclog_or_tty->print(result ? " do scavenge: " : " skip scavenge: ");
792 gclog_or_tty->print_cr(" average_promoted " SIZE_FORMAT
793 " padded_average_promoted " SIZE_FORMAT
794 " free in old gen " SIZE_FORMAT,
795 (size_t) policy->average_promoted_in_bytes(),
796 (size_t) policy->padded_average_promoted_in_bytes(),
797 old_gen->free_in_bytes());
798 if (young_gen->used_in_bytes() <
799 (size_t) policy->padded_average_promoted_in_bytes()) {
800 gclog_or_tty->print_cr(" padded_promoted_average is greater"
801 " than maximum promotion = " SIZE_FORMAT, young_gen->used_in_bytes());
802 }
803 }
805 if (result) {
806 _consecutive_skipped_scavenges = 0;
807 } else {
808 _consecutive_skipped_scavenges++;
809 if (UsePerfData) {
810 counters->update_scavenge_skipped(promoted_too_large);
811 }
812 }
813 return result;
814 }
816 // Used to add tasks
817 GCTaskManager* const PSScavenge::gc_task_manager() {
818 assert(ParallelScavengeHeap::gc_task_manager() != NULL,
819 "shouldn't return NULL");
820 return ParallelScavengeHeap::gc_task_manager();
821 }
823 void PSScavenge::initialize() {
824 // Arguments must have been parsed
826 if (AlwaysTenure) {
827 _tenuring_threshold = 0;
828 } else if (NeverTenure) {
829 _tenuring_threshold = markOopDesc::max_age + 1;
830 } else {
831 // We want to smooth out our startup times for the AdaptiveSizePolicy
832 _tenuring_threshold = (UseAdaptiveSizePolicy) ? InitialTenuringThreshold :
833 MaxTenuringThreshold;
834 }
836 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
837 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
839 PSYoungGen* young_gen = heap->young_gen();
840 PSOldGen* old_gen = heap->old_gen();
842 // Set boundary between young_gen and old_gen
843 assert(old_gen->reserved().end() <= young_gen->eden_space()->bottom(),
844 "old above young");
845 set_young_generation_boundary(young_gen->eden_space()->bottom());
847 // Initialize ref handling object for scavenging.
848 MemRegion mr = young_gen->reserved();
850 _ref_processor =
851 new ReferenceProcessor(mr, // span
852 ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing
853 (int) ParallelGCThreads, // mt processing degree
854 true, // mt discovery
855 (int) ParallelGCThreads, // mt discovery degree
856 true, // atomic_discovery
857 NULL); // header provides liveness info
859 // Cache the cardtable
860 BarrierSet* bs = Universe::heap()->barrier_set();
861 assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind");
862 _card_table = (CardTableExtension*)bs;
864 _counters = new CollectorCounters("PSScavenge", 0);
865 }