src/share/vm/jfr/recorder/repository/jfrEmergencyDump.cpp

Tue, 02 Jun 2020 14:29:43 +0800

author
ddong
date
Tue, 02 Jun 2020 14:29:43 +0800
changeset 9941
45c8de52649c
parent 9858
b985cbb00e68
permissions
-rw-r--r--

8246310: Clean commented-out code about ModuleEntry andPackageEntry in JFR
Reviewed-by: adinn

apetushkov@9858 1 /*
apetushkov@9858 2 * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
apetushkov@9858 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
apetushkov@9858 4 *
apetushkov@9858 5 * This code is free software; you can redistribute it and/or modify it
apetushkov@9858 6 * under the terms of the GNU General Public License version 2 only, as
apetushkov@9858 7 * published by the Free Software Foundation.
apetushkov@9858 8 *
apetushkov@9858 9 * This code is distributed in the hope that it will be useful, but WITHOUT
apetushkov@9858 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
apetushkov@9858 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
apetushkov@9858 12 * version 2 for more details (a copy is included in the LICENSE file that
apetushkov@9858 13 * accompanied this code).
apetushkov@9858 14 *
apetushkov@9858 15 * You should have received a copy of the GNU General Public License version
apetushkov@9858 16 * 2 along with this work; if not, write to the Free Software Foundation,
apetushkov@9858 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
apetushkov@9858 18 *
apetushkov@9858 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
apetushkov@9858 20 * or visit www.oracle.com if you need additional information or have any
apetushkov@9858 21 * questions.
apetushkov@9858 22 *
apetushkov@9858 23 */
apetushkov@9858 24
apetushkov@9858 25 #include "precompiled.hpp"
apetushkov@9858 26 #include "jfr/jfrEvents.hpp"
apetushkov@9858 27 #include "jfr/leakprofiler/leakProfiler.hpp"
apetushkov@9858 28 #include "jfr/recorder/repository/jfrEmergencyDump.hpp"
apetushkov@9858 29 #include "jfr/recorder/service/jfrPostBox.hpp"
apetushkov@9858 30 #include "jfr/recorder/service/jfrRecorderService.hpp"
apetushkov@9858 31 #include "jfr/utilities/jfrTypes.hpp"
apetushkov@9858 32 #include "memory/resourceArea.hpp"
apetushkov@9858 33 #include "runtime/atomic.hpp"
apetushkov@9858 34 #include "runtime/handles.hpp"
apetushkov@9858 35 #include "runtime/globals.hpp"
apetushkov@9858 36 #include "runtime/mutexLocker.hpp"
apetushkov@9858 37 #include "runtime/thread.hpp"
apetushkov@9858 38
apetushkov@9858 39 /*
apetushkov@9858 40 * We are just about to exit the VM, so we will be very aggressive
apetushkov@9858 41 * at this point in order to increase overall success of dumping jfr data:
apetushkov@9858 42 *
apetushkov@9858 43 * 1. if the thread state is not "_thread_in_vm", we will quick transition
apetushkov@9858 44 * it to "_thread_in_vm".
apetushkov@9858 45 * 2. the nesting state for both resource and handle areas are unknown,
apetushkov@9858 46 * so we allocate new fresh arenas, discarding the old ones.
apetushkov@9858 47 * 3. if the thread is the owner of some critical lock(s), unlock them.
apetushkov@9858 48 *
apetushkov@9858 49 * If we end up deadlocking in the attempt of dumping out jfr data,
apetushkov@9858 50 * we rely on the WatcherThread task "is_error_reported()",
apetushkov@9858 51 * to exit the VM after a hard-coded timeout.
apetushkov@9858 52 * This "safety net" somewhat explains the aggressiveness in this attempt.
apetushkov@9858 53 *
apetushkov@9858 54 */
apetushkov@9858 55 static void prepare_for_emergency_dump(Thread* thread) {
apetushkov@9858 56 if (thread->is_Java_thread()) {
apetushkov@9858 57 ((JavaThread*)thread)->set_thread_state(_thread_in_vm);
apetushkov@9858 58 }
apetushkov@9858 59
apetushkov@9858 60 #ifdef ASSERT
apetushkov@9858 61 Monitor* owned_lock = thread->owned_locks();
apetushkov@9858 62 while (owned_lock != NULL) {
apetushkov@9858 63 Monitor* next = owned_lock->next();
apetushkov@9858 64 owned_lock->unlock();
apetushkov@9858 65 owned_lock = next;
apetushkov@9858 66 }
apetushkov@9858 67 #endif // ASSERT
apetushkov@9858 68
apetushkov@9858 69 if (Threads_lock->owned_by_self()) {
apetushkov@9858 70 Threads_lock->unlock();
apetushkov@9858 71 }
apetushkov@9858 72
ddong@9941 73 if (PackageTable_lock->owned_by_self()) {
ddong@9941 74 PackageTable_lock->unlock();
ddong@9941 75 }
apetushkov@9858 76
apetushkov@9858 77 if (Heap_lock->owned_by_self()) {
apetushkov@9858 78 Heap_lock->unlock();
apetushkov@9858 79 }
apetushkov@9858 80
apetushkov@9858 81 if (Safepoint_lock->owned_by_self()) {
apetushkov@9858 82 Safepoint_lock->unlock();
apetushkov@9858 83 }
apetushkov@9858 84
apetushkov@9858 85 if (VMOperationQueue_lock->owned_by_self()) {
apetushkov@9858 86 VMOperationQueue_lock->unlock();
apetushkov@9858 87 }
apetushkov@9858 88
apetushkov@9858 89 if (VMOperationRequest_lock->owned_by_self()) {
apetushkov@9858 90 VMOperationRequest_lock->unlock();
apetushkov@9858 91 }
apetushkov@9858 92
apetushkov@9858 93
apetushkov@9858 94 if (Service_lock->owned_by_self()) {
apetushkov@9858 95 Service_lock->unlock();
apetushkov@9858 96 }
apetushkov@9858 97
apetushkov@9858 98 if (CodeCache_lock->owned_by_self()) {
apetushkov@9858 99 CodeCache_lock->unlock();
apetushkov@9858 100 }
apetushkov@9858 101
apetushkov@9858 102 if (PeriodicTask_lock->owned_by_self()) {
apetushkov@9858 103 PeriodicTask_lock->unlock();
apetushkov@9858 104 }
apetushkov@9858 105
apetushkov@9858 106 if (JfrMsg_lock->owned_by_self()) {
apetushkov@9858 107 JfrMsg_lock->unlock();
apetushkov@9858 108 }
apetushkov@9858 109
apetushkov@9858 110 if (JfrBuffer_lock->owned_by_self()) {
apetushkov@9858 111 JfrBuffer_lock->unlock();
apetushkov@9858 112 }
apetushkov@9858 113
apetushkov@9858 114 if (JfrStream_lock->owned_by_self()) {
apetushkov@9858 115 JfrStream_lock->unlock();
apetushkov@9858 116 }
apetushkov@9858 117
apetushkov@9858 118 if (JfrStacktrace_lock->owned_by_self()) {
apetushkov@9858 119 JfrStacktrace_lock->unlock();
apetushkov@9858 120 }
apetushkov@9858 121 }
apetushkov@9858 122
apetushkov@9858 123 static volatile int jfr_shutdown_lock = 0;
apetushkov@9858 124
apetushkov@9858 125 static bool guard_reentrancy() {
apetushkov@9858 126 return Atomic::cmpxchg(1, &jfr_shutdown_lock, 0) == 0;
apetushkov@9858 127 }
apetushkov@9858 128
apetushkov@9858 129 void JfrEmergencyDump::on_vm_shutdown(bool exception_handler) {
apetushkov@9858 130 if (!guard_reentrancy()) {
apetushkov@9858 131 return;
apetushkov@9858 132 }
apetushkov@9858 133 // function made non-reentrant
apetushkov@9858 134 Thread* thread = Thread::current();
apetushkov@9858 135 if (exception_handler) {
apetushkov@9858 136 // we are crashing
apetushkov@9858 137 if (thread->is_Watcher_thread()) {
apetushkov@9858 138 // The Watcher thread runs the periodic thread sampling task.
apetushkov@9858 139 // If it has crashed, it is likely that another thread is
apetushkov@9858 140 // left in a suspended state. This would mean the system
apetushkov@9858 141 // will not be able to ever move to a safepoint. We try
apetushkov@9858 142 // to avoid issuing safepoint operations when attempting
apetushkov@9858 143 // an emergency dump, but a safepoint might be already pending.
apetushkov@9858 144 return;
apetushkov@9858 145 }
apetushkov@9858 146 prepare_for_emergency_dump(thread);
apetushkov@9858 147 }
apetushkov@9858 148 EventDumpReason event;
apetushkov@9858 149 if (event.should_commit()) {
apetushkov@9858 150 event.set_reason(exception_handler ? "Crash" : "Out of Memory");
apetushkov@9858 151 event.set_recordingId(-1);
apetushkov@9858 152 event.commit();
apetushkov@9858 153 }
apetushkov@9858 154 if (!exception_handler) {
apetushkov@9858 155 // OOM
apetushkov@9858 156 LeakProfiler::emit_events(max_jlong, false);
apetushkov@9858 157 }
apetushkov@9858 158 const int messages = MSGBIT(MSG_VM_ERROR);
apetushkov@9858 159 ResourceMark rm(thread);
apetushkov@9858 160 HandleMark hm(thread);
apetushkov@9858 161 JfrRecorderService service;
apetushkov@9858 162 service.rotate(messages);
apetushkov@9858 163 }

mercurial