Mon, 16 Jan 2012 22:10:05 +0100
6976060: G1: humongous object allocations should initiate marking cycles when necessary
Reviewed-by: tonyp, johnc
1.1 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Mon Jan 16 11:21:21 2012 +0100 1.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Mon Jan 16 22:10:05 2012 +0100 1.3 @@ -1045,17 +1045,24 @@ 1.4 // regions, we'll first try to do the allocation without doing a 1.5 // collection hoping that there's enough space in the heap. 1.6 result = humongous_obj_allocate(word_size); 1.7 - if (result != NULL) { 1.8 - return result; 1.9 + 1.10 + if (result == NULL) { 1.11 + if (GC_locker::is_active_and_needs_gc()) { 1.12 + should_try_gc = false; 1.13 + } else { 1.14 + // Read the GC count while still holding the Heap_lock. 1.15 + gc_count_before = SharedHeap::heap()->total_collections(); 1.16 + should_try_gc = true; 1.17 + } 1.18 } 1.19 - 1.20 - if (GC_locker::is_active_and_needs_gc()) { 1.21 - should_try_gc = false; 1.22 - } else { 1.23 - // Read the GC count while still holding the Heap_lock. 1.24 - gc_count_before = SharedHeap::heap()->total_collections(); 1.25 - should_try_gc = true; 1.26 + } 1.27 + 1.28 + if (result != NULL) { 1.29 + if (g1_policy()->need_to_start_conc_mark("concurrent humongous allocation")) { 1.30 + // We need to release the Heap_lock before we try to call collect 1.31 + collect(GCCause::_g1_humongous_allocation); 1.32 } 1.33 + return result; 1.34 } 1.35 1.36 if (should_try_gc) { 1.37 @@ -1111,7 +1118,11 @@ 1.38 return _mutator_alloc_region.attempt_allocation_locked(word_size, 1.39 false /* bot_updates */); 1.40 } else { 1.41 - return humongous_obj_allocate(word_size); 1.42 + HeapWord* result = humongous_obj_allocate(word_size); 1.43 + if (result != NULL && g1_policy()->need_to_start_conc_mark("STW humongous allocation")) { 1.44 + g1_policy()->set_initiate_conc_mark_if_possible(); 1.45 + } 1.46 + return result; 1.47 } 1.48 1.49 ShouldNotReachHere(); 1.50 @@ -2295,7 +2306,8 @@ 1.51 bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) { 1.52 return 1.53 ((cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) || 1.54 - (cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent)); 1.55 + (cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent) || 1.56 + cause == GCCause::_g1_humongous_allocation); 1.57 } 1.58 1.59 #ifndef PRODUCT
2.1 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Mon Jan 16 11:21:21 2012 +0100 2.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Mon Jan 16 22:10:05 2012 +0100 2.3 @@ -355,6 +355,7 @@ 2.4 // explicitly started if: 2.5 // (a) cause == _gc_locker and +GCLockerInvokesConcurrent, or 2.6 // (b) cause == _java_lang_system_gc and +ExplicitGCInvokesConcurrent. 2.7 + // (c) cause == _g1_humongous_allocation 2.8 bool should_do_concurrent_full_gc(GCCause::Cause cause); 2.9 2.10 // Keeps track of how many "full collections" (i.e., Full GCs or 2.11 @@ -1172,6 +1173,10 @@ 2.12 _old_set.remove(hr); 2.13 } 2.14 2.15 + size_t non_young_capacity_bytes() { 2.16 + return _old_set.total_capacity_bytes() + _humongous_set.total_capacity_bytes(); 2.17 + } 2.18 + 2.19 void set_free_regions_coming(); 2.20 void reset_free_regions_coming(); 2.21 bool free_regions_coming() { return _free_regions_coming; }
3.1 --- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp Mon Jan 16 11:21:21 2012 +0100 3.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp Mon Jan 16 22:10:05 2012 +0100 3.3 @@ -213,8 +213,6 @@ 3.4 _survivor_bytes_before_gc(0), 3.5 _capacity_before_gc(0), 3.6 3.7 - _prev_collection_pause_used_at_end_bytes(0), 3.8 - 3.9 _eden_cset_region_length(0), 3.10 _survivor_cset_region_length(0), 3.11 _old_cset_region_length(0), 3.12 @@ -1140,6 +1138,45 @@ 3.13 return ret; 3.14 } 3.15 3.16 +bool G1CollectorPolicy::need_to_start_conc_mark(const char* source) { 3.17 + if (_g1->mark_in_progress()) { 3.18 + return false; 3.19 + } 3.20 + 3.21 + size_t marking_initiating_used_threshold = 3.22 + (_g1->capacity() / 100) * InitiatingHeapOccupancyPercent; 3.23 + size_t cur_used_bytes = _g1->non_young_capacity_bytes(); 3.24 + 3.25 + if (cur_used_bytes > marking_initiating_used_threshold) { 3.26 + if (gcs_are_young()) { 3.27 + ergo_verbose4(ErgoConcCycles, 3.28 + "request concurrent cycle initiation", 3.29 + ergo_format_reason("occupancy higher than threshold") 3.30 + ergo_format_byte("occupancy") 3.31 + ergo_format_byte_perc("threshold") 3.32 + ergo_format_str("source"), 3.33 + cur_used_bytes, 3.34 + marking_initiating_used_threshold, 3.35 + (double) InitiatingHeapOccupancyPercent, 3.36 + source); 3.37 + return true; 3.38 + } else { 3.39 + ergo_verbose4(ErgoConcCycles, 3.40 + "do not request concurrent cycle initiation", 3.41 + ergo_format_reason("still doing mixed collections") 3.42 + ergo_format_byte("occupancy") 3.43 + ergo_format_byte_perc("threshold") 3.44 + ergo_format_str("source"), 3.45 + cur_used_bytes, 3.46 + marking_initiating_used_threshold, 3.47 + (double) InitiatingHeapOccupancyPercent, 3.48 + source); 3.49 + } 3.50 + } 3.51 + 3.52 + return false; 3.53 +} 3.54 + 3.55 // Anything below that is considered to be zero 3.56 #define MIN_TIMER_GRANULARITY 0.0000001 3.57 3.58 @@ -1166,44 +1203,16 @@ 3.59 #endif // PRODUCT 3.60 3.61 last_pause_included_initial_mark = during_initial_mark_pause(); 3.62 - if (last_pause_included_initial_mark) 3.63 + if (last_pause_included_initial_mark) { 3.64 record_concurrent_mark_init_end(0.0); 3.65 - 3.66 - size_t marking_initiating_used_threshold = 3.67 - (_g1->capacity() / 100) * InitiatingHeapOccupancyPercent; 3.68 - 3.69 - if (!_g1->mark_in_progress() && !_last_young_gc) { 3.70 - assert(!last_pause_included_initial_mark, "invariant"); 3.71 - if (cur_used_bytes > marking_initiating_used_threshold) { 3.72 - if (cur_used_bytes > _prev_collection_pause_used_at_end_bytes) { 3.73 - assert(!during_initial_mark_pause(), "we should not see this here"); 3.74 - 3.75 - ergo_verbose3(ErgoConcCycles, 3.76 - "request concurrent cycle initiation", 3.77 - ergo_format_reason("occupancy higher than threshold") 3.78 - ergo_format_byte("occupancy") 3.79 - ergo_format_byte_perc("threshold"), 3.80 - cur_used_bytes, 3.81 - marking_initiating_used_threshold, 3.82 - (double) InitiatingHeapOccupancyPercent); 3.83 - 3.84 - // Note: this might have already been set, if during the last 3.85 - // pause we decided to start a cycle but at the beginning of 3.86 - // this pause we decided to postpone it. That's OK. 3.87 - set_initiate_conc_mark_if_possible(); 3.88 - } else { 3.89 - ergo_verbose2(ErgoConcCycles, 3.90 - "do not request concurrent cycle initiation", 3.91 - ergo_format_reason("occupancy lower than previous occupancy") 3.92 - ergo_format_byte("occupancy") 3.93 - ergo_format_byte("previous occupancy"), 3.94 - cur_used_bytes, 3.95 - _prev_collection_pause_used_at_end_bytes); 3.96 - } 3.97 - } 3.98 } 3.99 3.100 - _prev_collection_pause_used_at_end_bytes = cur_used_bytes; 3.101 + if (!_last_young_gc && need_to_start_conc_mark("end of GC")) { 3.102 + // Note: this might have already been set, if during the last 3.103 + // pause we decided to start a cycle but at the beginning of 3.104 + // this pause we decided to postpone it. That's OK. 3.105 + set_initiate_conc_mark_if_possible(); 3.106 + } 3.107 3.108 _mmu_tracker->add_pause(end_time_sec - elapsed_ms/1000.0, 3.109 end_time_sec, false);
4.1 --- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp Mon Jan 16 11:21:21 2012 +0100 4.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp Mon Jan 16 22:10:05 2012 +0100 4.3 @@ -177,7 +177,6 @@ 4.4 double _cur_collection_start_sec; 4.5 size_t _cur_collection_pause_used_at_start_bytes; 4.6 size_t _cur_collection_pause_used_regions_at_start; 4.7 - size_t _prev_collection_pause_used_at_end_bytes; 4.8 double _cur_collection_par_time_ms; 4.9 double _cur_satb_drain_time_ms; 4.10 double _cur_clear_ct_time_ms; 4.11 @@ -800,6 +799,8 @@ 4.12 4.13 GenRemSet::Name rem_set_name() { return GenRemSet::CardTable; } 4.14 4.15 + bool need_to_start_conc_mark(const char* source); 4.16 + 4.17 // Update the heuristic info to record a collection pause of the given 4.18 // start time, where the given number of bytes were used at the start. 4.19 // This may involve changing the desired size of a collection set.
5.1 --- a/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp Mon Jan 16 11:21:21 2012 +0100 5.2 +++ b/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp Mon Jan 16 22:10:05 2012 +0100 5.3 @@ -1,5 +1,5 @@ 5.4 /* 5.5 - * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. 5.6 + * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. 5.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5.8 * 5.9 * This code is free software; you can redistribute it and/or modify it 5.10 @@ -74,8 +74,9 @@ 5.11 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 5.12 assert(!_should_initiate_conc_mark || 5.13 ((_gc_cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) || 5.14 - (_gc_cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent)), 5.15 - "only a GC locker or a System.gc() induced GC should start a cycle"); 5.16 + (_gc_cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent) || 5.17 + _gc_cause == GCCause::_g1_humongous_allocation), 5.18 + "only a GC locker, a System.gc() or a hum allocation induced GC should start a cycle"); 5.19 5.20 if (_word_size > 0) { 5.21 // An allocation has been requested. So, try to do that first.
6.1 --- a/src/share/vm/gc_interface/gcCause.cpp Mon Jan 16 11:21:21 2012 +0100 6.2 +++ b/src/share/vm/gc_interface/gcCause.cpp Mon Jan 16 22:10:05 2012 +0100 6.3 @@ -1,5 +1,5 @@ 6.4 /* 6.5 - * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved. 6.6 + * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved. 6.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 6.8 * 6.9 * This code is free software; you can redistribute it and/or modify it 6.10 @@ -84,6 +84,9 @@ 6.11 case _g1_inc_collection_pause: 6.12 return "G1 Evacuation Pause"; 6.13 6.14 + case _g1_humongous_allocation: 6.15 + return "G1 Humongous Allocation"; 6.16 + 6.17 case _last_ditch_collection: 6.18 return "Last ditch collection"; 6.19
7.1 --- a/src/share/vm/gc_interface/gcCause.hpp Mon Jan 16 11:21:21 2012 +0100 7.2 +++ b/src/share/vm/gc_interface/gcCause.hpp Mon Jan 16 22:10:05 2012 +0100 7.3 @@ -1,5 +1,5 @@ 7.4 /* 7.5 - * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved. 7.6 + * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved. 7.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 7.8 * 7.9 * This code is free software; you can redistribute it and/or modify it 7.10 @@ -66,6 +66,7 @@ 7.11 _adaptive_size_policy, 7.12 7.13 _g1_inc_collection_pause, 7.14 + _g1_humongous_allocation, 7.15 7.16 _last_ditch_collection, 7.17 _last_gc_cause