src/share/vm/jfr/leakprofiler/emitEventOperation.cpp

Thu, 13 Dec 2018 14:21:04 +0100

author
egahlin
date
Thu, 13 Dec 2018 14:21:04 +0100
changeset 9874
9c3c8469d9be
parent 9867
150ab470bf7f
permissions
-rw-r--r--

8215175: Inconsistencies in JFR event metadata
Reviewed-by: mgronlun

apetushkov@9858 1 /*
apetushkov@9858 2 * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
apetushkov@9858 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
apetushkov@9858 4 *
apetushkov@9858 5 * This code is free software; you can redistribute it and/or modify it
apetushkov@9858 6 * under the terms of the GNU General Public License version 2 only, as
apetushkov@9858 7 * published by the Free Software Foundation.
apetushkov@9858 8 *
apetushkov@9858 9 * This code is distributed in the hope that it will be useful, but WITHOUT
apetushkov@9858 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
apetushkov@9858 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
apetushkov@9858 12 * version 2 for more details (a copy is included in the LICENSE file that
apetushkov@9858 13 * accompanied this code).
apetushkov@9858 14 *
apetushkov@9858 15 * You should have received a copy of the GNU General Public License version
apetushkov@9858 16 * 2 along with this work; if not, write to the Free Software Foundation,
apetushkov@9858 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
apetushkov@9858 18 *
apetushkov@9858 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
apetushkov@9858 20 * or visit www.oracle.com if you need additional information or have any
apetushkov@9858 21 * questions.
apetushkov@9858 22 *
apetushkov@9858 23 */
apetushkov@9858 24 #include "precompiled.hpp"
apetushkov@9858 25 #include "gc_interface/collectedHeap.hpp"
apetushkov@9858 26 #include "jfr/jfrEvents.hpp"
apetushkov@9858 27 #include "jfr/leakprofiler/utilities/granularTimer.hpp"
apetushkov@9858 28 #include "jfr/leakprofiler/chains/rootSetClosure.hpp"
apetushkov@9858 29 #include "jfr/leakprofiler/chains/edge.hpp"
apetushkov@9858 30 #include "jfr/leakprofiler/chains/edgeQueue.hpp"
apetushkov@9858 31 #include "jfr/leakprofiler/chains/edgeStore.hpp"
apetushkov@9858 32 #include "jfr/leakprofiler/chains/bitset.hpp"
apetushkov@9858 33 #include "jfr/leakprofiler/sampling/objectSample.hpp"
apetushkov@9858 34 #include "jfr/leakprofiler/leakProfiler.hpp"
apetushkov@9858 35 #include "jfr/leakprofiler/checkpoint/objectSampleCheckpoint.hpp"
apetushkov@9858 36 #include "jfr/leakprofiler/sampling/objectSampler.hpp"
apetushkov@9858 37 #include "jfr/leakprofiler/emitEventOperation.hpp"
apetushkov@9858 38 #include "jfr/leakprofiler/chains/bfsClosure.hpp"
apetushkov@9858 39 #include "jfr/leakprofiler/chains/dfsClosure.hpp"
apetushkov@9858 40 #include "jfr/leakprofiler/chains/objectSampleMarker.hpp"
apetushkov@9858 41 #include "jfr/recorder/checkpoint/jfrCheckpointWriter.hpp"
apetushkov@9858 42 #include "jfr/support/jfrThreadId.hpp"
apetushkov@9858 43 #include "memory/resourceArea.hpp"
apetushkov@9858 44 #include "memory/universe.hpp"
apetushkov@9858 45 #include "oops/markOop.hpp"
apetushkov@9858 46 #include "oops/oop.inline.hpp"
apetushkov@9858 47 #include "runtime/safepoint.hpp"
apetushkov@9858 48 #include "runtime/vmThread.hpp"
apetushkov@9858 49 #include "utilities/globalDefinitions.hpp"
apetushkov@9858 50
apetushkov@9858 51 /* The EdgeQueue is backed by directly managed virtual memory.
apetushkov@9858 52 * We will attempt to dimension an initial reservation
apetushkov@9858 53 * in proportion to the size of the heap (represented by heap_region).
apetushkov@9858 54 * Initial memory reservation: 5% of the heap OR at least 32 Mb
apetushkov@9858 55 * Commit ratio: 1 : 10 (subject to allocation granularties)
apetushkov@9858 56 */
apetushkov@9858 57 static size_t edge_queue_memory_reservation(const MemRegion& heap_region) {
apetushkov@9858 58 const size_t memory_reservation_bytes = MAX2(heap_region.byte_size() / 20, 32*M);
apetushkov@9858 59 assert(memory_reservation_bytes >= (size_t)32*M, "invariant");
apetushkov@9858 60 return memory_reservation_bytes;
apetushkov@9858 61 }
apetushkov@9858 62
apetushkov@9858 63 static size_t edge_queue_memory_commit_size(size_t memory_reservation_bytes) {
apetushkov@9858 64 const size_t memory_commit_block_size_bytes = memory_reservation_bytes / 10;
apetushkov@9858 65 assert(memory_commit_block_size_bytes >= (size_t)3*M, "invariant");
apetushkov@9858 66 return memory_commit_block_size_bytes;
apetushkov@9858 67 }
apetushkov@9858 68
apetushkov@9858 69 static void log_edge_queue_summary(const EdgeQueue& edge_queue) {
apetushkov@9858 70 if (LogJFR && Verbose) tty->print_cr("EdgeQueue reserved size total: " SIZE_FORMAT " [KB]", edge_queue.reserved_size() / K);
apetushkov@9858 71 if (LogJFR && Verbose) tty->print_cr("EdgeQueue edges total: " SIZE_FORMAT, edge_queue.top());
apetushkov@9858 72 if (LogJFR && Verbose) tty->print_cr("EdgeQueue liveset total: " SIZE_FORMAT " [KB]", edge_queue.live_set() / K);
apetushkov@9858 73 if (edge_queue.reserved_size() > 0) {
apetushkov@9858 74 if (LogJFR && Verbose) tty->print_cr("EdgeQueue commit reserve ratio: %f\n",
apetushkov@9858 75 ((double)edge_queue.live_set() / (double)edge_queue.reserved_size()));
apetushkov@9858 76 }
apetushkov@9858 77 }
apetushkov@9858 78
apetushkov@9858 79 void EmitEventOperation::doit() {
apetushkov@9858 80 assert(LeakProfiler::is_running(), "invariant");
apetushkov@9858 81 _object_sampler = LeakProfiler::object_sampler();
apetushkov@9858 82 assert(_object_sampler != NULL, "invariant");
apetushkov@9858 83
apetushkov@9858 84 _vm_thread = VMThread::vm_thread();
apetushkov@9858 85 assert(_vm_thread == Thread::current(), "invariant");
apetushkov@9858 86 _vm_thread_local = _vm_thread->jfr_thread_local();
apetushkov@9858 87 assert(_vm_thread_local != NULL, "invariant");
apetushkov@9858 88 assert(_vm_thread->jfr_thread_local()->thread_id() == JFR_THREAD_ID(_vm_thread), "invariant");
apetushkov@9858 89
apetushkov@9858 90 // The VM_Operation::evaluate() which invoked doit()
apetushkov@9858 91 // contains a top level ResourceMark
apetushkov@9858 92
apetushkov@9858 93 // save the original markWord for the potential leak objects
apetushkov@9858 94 // to be restored on function exit
apetushkov@9858 95 ObjectSampleMarker marker;
apetushkov@9858 96 if (ObjectSampleCheckpoint::mark(marker, _emit_all) == 0) {
apetushkov@9858 97 return;
apetushkov@9858 98 }
apetushkov@9858 99
apetushkov@9858 100 EdgeStore edge_store;
apetushkov@9858 101
apetushkov@9858 102 GranularTimer::start(_cutoff_ticks, 1000000);
apetushkov@9858 103 if (_cutoff_ticks <= 0) {
apetushkov@9858 104 // no chains
apetushkov@9858 105 write_events(&edge_store);
apetushkov@9858 106 return;
apetushkov@9858 107 }
apetushkov@9858 108
apetushkov@9858 109 assert(_cutoff_ticks > 0, "invariant");
apetushkov@9858 110
apetushkov@9858 111 // The bitset used for marking is dimensioned as a function of the heap size
apetushkov@9858 112 const MemRegion heap_region = Universe::heap()->reserved_region();
apetushkov@9858 113 BitSet mark_bits(heap_region);
apetushkov@9858 114
apetushkov@9858 115 // The edge queue is dimensioned as a fraction of the heap size
apetushkov@9858 116 const size_t edge_queue_reservation_size = edge_queue_memory_reservation(heap_region);
apetushkov@9858 117 EdgeQueue edge_queue(edge_queue_reservation_size, edge_queue_memory_commit_size(edge_queue_reservation_size));
apetushkov@9858 118
apetushkov@9858 119 // The initialize() routines will attempt to reserve and allocate backing storage memory.
apetushkov@9858 120 // Failure to accommodate will render root chain processing impossible.
apetushkov@9858 121 // As a fallback on failure, just write out the existing samples, flat, without chains.
apetushkov@9858 122 if (!(mark_bits.initialize() && edge_queue.initialize())) {
apetushkov@9858 123 if (LogJFR) tty->print_cr("Unable to allocate memory for root chain processing");
apetushkov@9858 124 write_events(&edge_store);
apetushkov@9858 125 return;
apetushkov@9858 126 }
apetushkov@9858 127
apetushkov@9858 128 // necessary condition for attempting a root set iteration
apetushkov@9858 129 Universe::heap()->ensure_parsability(false);
apetushkov@9858 130
apetushkov@9858 131 RootSetClosure::add_to_queue(&edge_queue);
apetushkov@9858 132 if (edge_queue.is_full()) {
apetushkov@9858 133 // Pathological case where roots don't fit in queue
apetushkov@9858 134 // Do a depth-first search, but mark roots first
apetushkov@9858 135 // to avoid walking sideways over roots
apetushkov@9858 136 DFSClosure::find_leaks_from_root_set(&edge_store, &mark_bits);
apetushkov@9858 137 } else {
apetushkov@9858 138 BFSClosure bfs(&edge_queue, &edge_store, &mark_bits);
apetushkov@9858 139 bfs.process();
apetushkov@9858 140 }
apetushkov@9858 141 GranularTimer::stop();
apetushkov@9858 142 write_events(&edge_store);
apetushkov@9858 143 log_edge_queue_summary(edge_queue);
apetushkov@9858 144 }
apetushkov@9858 145
apetushkov@9858 146 int EmitEventOperation::write_events(EdgeStore* edge_store) {
apetushkov@9858 147 assert(_object_sampler != NULL, "invariant");
apetushkov@9858 148 assert(edge_store != NULL, "invariant");
apetushkov@9858 149 assert(_vm_thread != NULL, "invariant");
apetushkov@9858 150 assert(_vm_thread_local != NULL, "invariant");
apetushkov@9858 151 assert(SafepointSynchronize::is_at_safepoint(), "invariant");
apetushkov@9858 152
apetushkov@9858 153 // save thread id in preparation for thread local trace data manipulations
apetushkov@9858 154 const traceid vmthread_id = _vm_thread_local->thread_id();
apetushkov@9858 155 assert(_vm_thread_local->thread_id() == JFR_THREAD_ID(_vm_thread), "invariant");
apetushkov@9858 156
apetushkov@9858 157 const jlong last_sweep = _emit_all ? max_jlong : _object_sampler->last_sweep().value();
apetushkov@9858 158 int count = 0;
apetushkov@9858 159
egahlin@9867 160 const ObjectSample* current = _object_sampler->first();
egahlin@9867 161 while (current != NULL) {
egahlin@9867 162 ObjectSample* prev = current->prev();
egahlin@9867 163 if (current->is_alive_and_older_than(last_sweep)) {
egahlin@9867 164 write_event(current, edge_store);
apetushkov@9858 165 ++count;
apetushkov@9858 166 }
egahlin@9867 167 current = prev;
apetushkov@9858 168 }
apetushkov@9858 169
apetushkov@9858 170 // restore thread local stack trace and thread id
apetushkov@9858 171 _vm_thread_local->set_thread_id(vmthread_id);
apetushkov@9858 172 _vm_thread_local->clear_cached_stack_trace();
apetushkov@9858 173 assert(_vm_thread_local->thread_id() == JFR_THREAD_ID(_vm_thread), "invariant");
apetushkov@9858 174
apetushkov@9858 175 if (count > 0) {
apetushkov@9858 176 // serialize assoicated checkpoints
apetushkov@9858 177 ObjectSampleCheckpoint::write(edge_store, _emit_all, _vm_thread);
apetushkov@9858 178 }
apetushkov@9858 179 return count;
apetushkov@9858 180 }
apetushkov@9858 181
apetushkov@9858 182 static int array_size(const oop object) {
apetushkov@9858 183 assert(object != NULL, "invariant");
apetushkov@9858 184 if (object->is_array()) {
apetushkov@9858 185 return arrayOop(object)->length();
apetushkov@9858 186 }
egahlin@9874 187 return min_jint;
apetushkov@9858 188 }
apetushkov@9858 189
apetushkov@9858 190 void EmitEventOperation::write_event(const ObjectSample* sample, EdgeStore* edge_store) {
apetushkov@9858 191 assert(sample != NULL, "invariant");
apetushkov@9858 192 assert(!sample->is_dead(), "invariant");
apetushkov@9858 193 assert(edge_store != NULL, "invariant");
apetushkov@9858 194 assert(_vm_thread_local != NULL, "invariant");
apetushkov@9858 195 const oop* object_addr = sample->object_addr();
apetushkov@9858 196 assert(*object_addr != NULL, "invariant");
apetushkov@9858 197
apetushkov@9858 198 const Edge* edge = (const Edge*)(*object_addr)->mark();
apetushkov@9858 199 traceid gc_root_id = 0;
apetushkov@9858 200 if (edge == NULL) {
apetushkov@9858 201 // In order to dump out a representation of the event
apetushkov@9858 202 // even though it was not reachable / too long to reach,
apetushkov@9858 203 // we need to register a top level edge for this object
apetushkov@9858 204 Edge e(NULL, object_addr);
apetushkov@9858 205 edge_store->add_chain(&e, 1);
apetushkov@9858 206 edge = (const Edge*)(*object_addr)->mark();
apetushkov@9858 207 } else {
apetushkov@9858 208 gc_root_id = edge_store->get_root_id(edge);
apetushkov@9858 209 }
apetushkov@9858 210
apetushkov@9858 211 assert(edge != NULL, "invariant");
apetushkov@9858 212 assert(edge->pointee() == *object_addr, "invariant");
apetushkov@9858 213 const traceid object_id = edge_store->get_id(edge);
apetushkov@9858 214 assert(object_id != 0, "invariant");
apetushkov@9858 215
apetushkov@9858 216 EventOldObjectSample e(UNTIMED);
apetushkov@9858 217 e.set_starttime(GranularTimer::start_time());
apetushkov@9858 218 e.set_endtime(GranularTimer::end_time());
apetushkov@9858 219 e.set_allocationTime(sample->allocation_time());
apetushkov@9858 220 e.set_lastKnownHeapUsage(sample->heap_used_at_last_gc());
apetushkov@9858 221 e.set_object(object_id);
apetushkov@9858 222 e.set_arrayElements(array_size(*object_addr));
apetushkov@9858 223 e.set_root(gc_root_id);
apetushkov@9858 224
apetushkov@9858 225 // Temporarily assigning both the stack trace id and thread id
apetushkov@9858 226 // onto the thread local data structure of the VMThread (for the duration
apetushkov@9858 227 // of the commit() call). This trick provides a means to override
apetushkov@9858 228 // the event generation mechanism by injecting externally provided id's.
apetushkov@9858 229 // Here, in particular, this allows us to emit an old object event
apetushkov@9858 230 // supplying information from where the actual sampling occurred.
apetushkov@9858 231 _vm_thread_local->set_cached_stack_trace_id(sample->stack_trace_id());
apetushkov@9858 232 assert(sample->has_thread(), "invariant");
apetushkov@9858 233 _vm_thread_local->set_thread_id(sample->thread_id());
apetushkov@9858 234 e.commit();
apetushkov@9858 235 }

mercurial