Tue, 04 Feb 2020 18:13:14 +0800
Merge
1 /*
2 * Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "classfile/classLoader.hpp"
27 #include "classfile/javaClasses.hpp"
28 #include "gc_implementation/shared/vmGCOperations.hpp"
29 #include "memory/gcLocker.inline.hpp"
30 #include "memory/genCollectedHeap.hpp"
31 #include "memory/oopFactory.hpp"
32 #include "oops/instanceKlass.hpp"
33 #include "oops/instanceRefKlass.hpp"
34 #include "runtime/handles.inline.hpp"
35 #include "runtime/init.hpp"
36 #include "runtime/interfaceSupport.hpp"
37 #include "utilities/dtrace.hpp"
38 #include "utilities/preserveException.hpp"
39 #include "utilities/macros.hpp"
40 #if INCLUDE_ALL_GCS
41 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
42 #endif // INCLUDE_ALL_GCS
44 #ifndef USDT2
45 HS_DTRACE_PROBE_DECL1(hotspot, gc__begin, bool);
46 HS_DTRACE_PROBE_DECL(hotspot, gc__end);
47 #endif /* !USDT2 */
49 // The same dtrace probe can't be inserted in two different files, so we
50 // have to call it here, so it's only in one file. Can't create new probes
51 // for the other file anymore. The dtrace probes have to remain stable.
52 void VM_GC_Operation::notify_gc_begin(bool full) {
53 #ifndef USDT2
54 HS_DTRACE_PROBE1(hotspot, gc__begin, full);
55 HS_DTRACE_WORKAROUND_TAIL_CALL_BUG();
56 #else /* USDT2 */
57 HOTSPOT_GC_BEGIN(
58 full);
59 #endif /* USDT2 */
60 }
62 void VM_GC_Operation::notify_gc_end() {
63 #ifndef USDT2
64 HS_DTRACE_PROBE(hotspot, gc__end);
65 HS_DTRACE_WORKAROUND_TAIL_CALL_BUG();
66 #else /* USDT2 */
67 HOTSPOT_GC_END(
68 );
69 #endif /* USDT2 */
70 }
72 void VM_GC_Operation::acquire_pending_list_lock() {
73 // we may enter this with pending exception set
74 InstanceRefKlass::acquire_pending_list_lock(&_pending_list_basic_lock);
75 }
78 void VM_GC_Operation::release_and_notify_pending_list_lock() {
80 InstanceRefKlass::release_and_notify_pending_list_lock(&_pending_list_basic_lock);
81 }
83 // Allocations may fail in several threads at about the same time,
84 // resulting in multiple gc requests. We only want to do one of them.
85 // In case a GC locker is active and the need for a GC is already signalled,
86 // we want to skip this GC attempt altogether, without doing a futile
87 // safepoint operation.
88 bool VM_GC_Operation::skip_operation() const {
89 bool skip = (_gc_count_before != Universe::heap()->total_collections());
90 if (_full && skip) {
91 skip = (_full_gc_count_before != Universe::heap()->total_full_collections());
92 }
93 if (!skip && GC_locker::is_active_and_needs_gc()) {
94 skip = Universe::heap()->is_maximal_no_gc();
95 assert(!(skip && (_gc_cause == GCCause::_gc_locker)),
96 "GC_locker cannot be active when initiating GC");
97 }
98 return skip;
99 }
101 bool VM_GC_Operation::doit_prologue() {
102 assert(Thread::current()->is_Java_thread(), "just checking");
103 assert(((_gc_cause != GCCause::_no_gc) &&
104 (_gc_cause != GCCause::_no_cause_specified)), "Illegal GCCause");
106 // To be able to handle a GC the VM initialization needs to be completed.
107 if (!is_init_completed()) {
108 vm_exit_during_initialization(
109 err_msg("GC triggered before VM initialization completed. Try increasing "
110 "NewSize, current value " UINTX_FORMAT "%s.",
111 byte_size_in_proper_unit(NewSize),
112 proper_unit_for_byte_size(NewSize)));
113 }
115 acquire_pending_list_lock();
116 // If the GC count has changed someone beat us to the collection
117 // Get the Heap_lock after the pending_list_lock.
118 Heap_lock->lock();
120 // Check invocations
121 if (skip_operation()) {
122 // skip collection
123 Heap_lock->unlock();
124 release_and_notify_pending_list_lock();
125 _prologue_succeeded = false;
126 } else {
127 _prologue_succeeded = true;
128 SharedHeap* sh = SharedHeap::heap();
129 if (sh != NULL) sh->_thread_holds_heap_lock_for_gc = true;
130 }
131 return _prologue_succeeded;
132 }
135 void VM_GC_Operation::doit_epilogue() {
136 assert(Thread::current()->is_Java_thread(), "just checking");
137 // Release the Heap_lock first.
138 SharedHeap* sh = SharedHeap::heap();
139 if (sh != NULL) sh->_thread_holds_heap_lock_for_gc = false;
140 Heap_lock->unlock();
141 release_and_notify_pending_list_lock();
142 }
144 bool VM_GC_HeapInspection::doit_prologue() {
145 if (Universe::heap()->supports_heap_inspection()) {
146 return VM_GC_Operation::doit_prologue();
147 } else {
148 return false;
149 }
150 }
152 bool VM_GC_HeapInspection::skip_operation() const {
153 assert(Universe::heap()->supports_heap_inspection(), "huh?");
154 return false;
155 }
157 bool VM_GC_HeapInspection::collect() {
158 if (GC_locker::is_active()) {
159 return false;
160 }
161 Universe::heap()->collect_as_vm_thread(GCCause::_heap_inspection);
162 return true;
163 }
165 void VM_GC_HeapInspection::doit() {
166 HandleMark hm;
167 Universe::heap()->ensure_parsability(false); // must happen, even if collection does
168 // not happen (e.g. due to GC_locker)
169 // or _full_gc being false
170 if (_full_gc) {
171 if (!collect()) {
172 // The collection attempt was skipped because the gc locker is held.
173 // The following dump may then be a tad misleading to someone expecting
174 // only live objects to show up in the dump (see CR 6944195). Just issue
175 // a suitable warning in that case and do not attempt to do a collection.
176 // The latter is a subtle point, because even a failed attempt
177 // to GC will, in fact, induce one in the future, which we
178 // probably want to avoid in this case because the GC that we may
179 // be about to attempt holds value for us only
180 // if it happens now and not if it happens in the eventual
181 // future.
182 warning("GC locker is held; pre-dump GC was skipped");
183 }
184 }
185 HeapInspection inspect(_csv_format, _print_help, _print_class_stats,
186 _columns);
187 inspect.heap_inspection(_out);
188 }
191 void VM_GenCollectForAllocation::doit() {
192 SvcGCMarker sgcm(SvcGCMarker::MINOR);
194 GenCollectedHeap* gch = GenCollectedHeap::heap();
195 GCCauseSetter gccs(gch, _gc_cause);
196 _result = gch->satisfy_failed_allocation(_word_size, _tlab);
197 assert(gch->is_in_reserved_or_null(_result), "result not in heap");
199 if (_result == NULL && GC_locker::is_active_and_needs_gc()) {
200 set_gc_locked();
201 }
202 }
204 static bool is_full_gc(int max_level) {
205 // Return true if max_level is all generations
206 return (max_level == (GenCollectedHeap::heap()->n_gens() - 1));
207 }
209 VM_GenCollectFull::VM_GenCollectFull(uint gc_count_before,
210 uint full_gc_count_before,
211 GCCause::Cause gc_cause,
212 int max_level) :
213 VM_GC_Operation(gc_count_before, gc_cause, full_gc_count_before,
214 is_full_gc(max_level) /* full */),
215 _max_level(max_level) { }
217 void VM_GenCollectFull::doit() {
218 SvcGCMarker sgcm(SvcGCMarker::FULL);
220 GenCollectedHeap* gch = GenCollectedHeap::heap();
221 GCCauseSetter gccs(gch, _gc_cause);
222 gch->do_full_collection(gch->must_clear_all_soft_refs(), _max_level);
223 }
225 VM_CollectForMetadataAllocation::VM_CollectForMetadataAllocation(ClassLoaderData* loader_data,
226 size_t size,
227 Metaspace::MetadataType mdtype,
228 uint gc_count_before,
229 uint full_gc_count_before,
230 GCCause::Cause gc_cause)
231 : VM_GC_Operation(gc_count_before, gc_cause, full_gc_count_before, true),
232 _loader_data(loader_data), _size(size), _mdtype(mdtype), _result(NULL) {
233 assert(_size != 0, "An allocation should always be requested with this operation.");
234 AllocTracer::send_allocation_requiring_gc_event(_size * HeapWordSize, GCId::peek());
235 }
237 // Returns true iff concurrent GCs unloads metadata.
238 bool VM_CollectForMetadataAllocation::initiate_concurrent_GC() {
239 #if INCLUDE_ALL_GCS
240 if (UseConcMarkSweepGC && CMSClassUnloadingEnabled) {
241 MetaspaceGC::set_should_concurrent_collect(true);
242 return true;
243 }
245 if (UseG1GC && ClassUnloadingWithConcurrentMark) {
246 G1CollectedHeap* g1h = G1CollectedHeap::heap();
247 g1h->g1_policy()->set_initiate_conc_mark_if_possible();
249 GCCauseSetter x(g1h, _gc_cause);
251 // At this point we are supposed to start a concurrent cycle. We
252 // will do so if one is not already in progress.
253 bool should_start = g1h->g1_policy()->force_initial_mark_if_outside_cycle(_gc_cause);
255 if (should_start) {
256 double pause_target = g1h->g1_policy()->max_pause_time_ms();
257 g1h->do_collection_pause_at_safepoint(pause_target);
258 }
259 return true;
260 }
261 #endif
263 return false;
264 }
266 static void log_metaspace_alloc_failure_for_concurrent_GC() {
267 if (Verbose && PrintGCDetails) {
268 if (UseConcMarkSweepGC) {
269 gclog_or_tty->print_cr("\nCMS full GC for Metaspace");
270 } else if (UseG1GC) {
271 gclog_or_tty->print_cr("\nG1 full GC for Metaspace");
272 }
273 }
274 }
276 void VM_CollectForMetadataAllocation::doit() {
277 SvcGCMarker sgcm(SvcGCMarker::FULL);
279 CollectedHeap* heap = Universe::heap();
280 GCCauseSetter gccs(heap, _gc_cause);
282 // Check again if the space is available. Another thread
283 // may have similarly failed a metadata allocation and induced
284 // a GC that freed space for the allocation.
285 if (!MetadataAllocationFailALot) {
286 _result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype);
287 if (_result != NULL) {
288 return;
289 }
290 }
292 if (initiate_concurrent_GC()) {
293 // For CMS and G1 expand since the collection is going to be concurrent.
294 _result = _loader_data->metaspace_non_null()->expand_and_allocate(_size, _mdtype);
295 if (_result != NULL) {
296 return;
297 }
299 log_metaspace_alloc_failure_for_concurrent_GC();
300 }
302 // Don't clear the soft refs yet.
303 heap->collect_as_vm_thread(GCCause::_metadata_GC_threshold);
304 // After a GC try to allocate without expanding. Could fail
305 // and expansion will be tried below.
306 _result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype);
307 if (_result != NULL) {
308 return;
309 }
311 // If still failing, allow the Metaspace to expand.
312 // See delta_capacity_until_GC() for explanation of the
313 // amount of the expansion.
314 // This should work unless there really is no more space
315 // or a MaxMetaspaceSize has been specified on the command line.
316 _result = _loader_data->metaspace_non_null()->expand_and_allocate(_size, _mdtype);
317 if (_result != NULL) {
318 return;
319 }
321 // If expansion failed, do a last-ditch collection and try allocating
322 // again. A last-ditch collection will clear softrefs. This
323 // behavior is similar to the last-ditch collection done for perm
324 // gen when it was full and a collection for failed allocation
325 // did not free perm gen space.
326 heap->collect_as_vm_thread(GCCause::_last_ditch_collection);
327 _result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype);
328 if (_result != NULL) {
329 return;
330 }
332 if (Verbose && PrintGCDetails) {
333 gclog_or_tty->print_cr("\nAfter Metaspace GC failed to allocate size "
334 SIZE_FORMAT, _size);
335 }
337 if (GC_locker::is_active_and_needs_gc()) {
338 set_gc_locked();
339 }
340 }
342 VM_CollectForAllocation::VM_CollectForAllocation(size_t word_size, uint gc_count_before, GCCause::Cause cause)
343 : VM_GC_Operation(gc_count_before, cause), _result(NULL), _word_size(word_size) {
344 // Only report if operation was really caused by an allocation.
345 if (_word_size != 0) {
346 AllocTracer::send_allocation_requiring_gc_event(_word_size * HeapWordSize, GCId::peek());
347 }
348 }