src/share/vm/jfr/leakprofiler/emitEventOperation.cpp

changeset 9885
8e875c964f41
parent 9874
9c3c8469d9be
equal deleted inserted replaced
9884:1258121876f8 9885:8e875c964f41
1 /*
2 * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24 #include "precompiled.hpp"
25 #include "gc_interface/collectedHeap.hpp"
26 #include "jfr/jfrEvents.hpp"
27 #include "jfr/leakprofiler/utilities/granularTimer.hpp"
28 #include "jfr/leakprofiler/chains/rootSetClosure.hpp"
29 #include "jfr/leakprofiler/chains/edge.hpp"
30 #include "jfr/leakprofiler/chains/edgeQueue.hpp"
31 #include "jfr/leakprofiler/chains/edgeStore.hpp"
32 #include "jfr/leakprofiler/chains/bitset.hpp"
33 #include "jfr/leakprofiler/sampling/objectSample.hpp"
34 #include "jfr/leakprofiler/leakProfiler.hpp"
35 #include "jfr/leakprofiler/checkpoint/objectSampleCheckpoint.hpp"
36 #include "jfr/leakprofiler/sampling/objectSampler.hpp"
37 #include "jfr/leakprofiler/emitEventOperation.hpp"
38 #include "jfr/leakprofiler/chains/bfsClosure.hpp"
39 #include "jfr/leakprofiler/chains/dfsClosure.hpp"
40 #include "jfr/leakprofiler/chains/objectSampleMarker.hpp"
41 #include "jfr/recorder/checkpoint/jfrCheckpointWriter.hpp"
42 #include "jfr/support/jfrThreadId.hpp"
43 #include "memory/resourceArea.hpp"
44 #include "memory/universe.hpp"
45 #include "oops/markOop.hpp"
46 #include "oops/oop.inline.hpp"
47 #include "runtime/safepoint.hpp"
48 #include "runtime/vmThread.hpp"
49 #include "utilities/globalDefinitions.hpp"
50
51 /* The EdgeQueue is backed by directly managed virtual memory.
52 * We will attempt to dimension an initial reservation
53 * in proportion to the size of the heap (represented by heap_region).
54 * Initial memory reservation: 5% of the heap OR at least 32 Mb
55 * Commit ratio: 1 : 10 (subject to allocation granularties)
56 */
57 static size_t edge_queue_memory_reservation(const MemRegion& heap_region) {
58 const size_t memory_reservation_bytes = MAX2(heap_region.byte_size() / 20, 32*M);
59 assert(memory_reservation_bytes >= (size_t)32*M, "invariant");
60 return memory_reservation_bytes;
61 }
62
63 static size_t edge_queue_memory_commit_size(size_t memory_reservation_bytes) {
64 const size_t memory_commit_block_size_bytes = memory_reservation_bytes / 10;
65 assert(memory_commit_block_size_bytes >= (size_t)3*M, "invariant");
66 return memory_commit_block_size_bytes;
67 }
68
69 static void log_edge_queue_summary(const EdgeQueue& edge_queue) {
70 if (LogJFR && Verbose) tty->print_cr("EdgeQueue reserved size total: " SIZE_FORMAT " [KB]", edge_queue.reserved_size() / K);
71 if (LogJFR && Verbose) tty->print_cr("EdgeQueue edges total: " SIZE_FORMAT, edge_queue.top());
72 if (LogJFR && Verbose) tty->print_cr("EdgeQueue liveset total: " SIZE_FORMAT " [KB]", edge_queue.live_set() / K);
73 if (edge_queue.reserved_size() > 0) {
74 if (LogJFR && Verbose) tty->print_cr("EdgeQueue commit reserve ratio: %f\n",
75 ((double)edge_queue.live_set() / (double)edge_queue.reserved_size()));
76 }
77 }
78
79 void EmitEventOperation::doit() {
80 assert(LeakProfiler::is_running(), "invariant");
81 _object_sampler = LeakProfiler::object_sampler();
82 assert(_object_sampler != NULL, "invariant");
83
84 _vm_thread = VMThread::vm_thread();
85 assert(_vm_thread == Thread::current(), "invariant");
86 _vm_thread_local = _vm_thread->jfr_thread_local();
87 assert(_vm_thread_local != NULL, "invariant");
88 assert(_vm_thread->jfr_thread_local()->thread_id() == JFR_THREAD_ID(_vm_thread), "invariant");
89
90 // The VM_Operation::evaluate() which invoked doit()
91 // contains a top level ResourceMark
92
93 // save the original markWord for the potential leak objects
94 // to be restored on function exit
95 ObjectSampleMarker marker;
96 if (ObjectSampleCheckpoint::mark(marker, _emit_all) == 0) {
97 return;
98 }
99
100 EdgeStore edge_store;
101
102 GranularTimer::start(_cutoff_ticks, 1000000);
103 if (_cutoff_ticks <= 0) {
104 // no chains
105 write_events(&edge_store);
106 return;
107 }
108
109 assert(_cutoff_ticks > 0, "invariant");
110
111 // The bitset used for marking is dimensioned as a function of the heap size
112 const MemRegion heap_region = Universe::heap()->reserved_region();
113 BitSet mark_bits(heap_region);
114
115 // The edge queue is dimensioned as a fraction of the heap size
116 const size_t edge_queue_reservation_size = edge_queue_memory_reservation(heap_region);
117 EdgeQueue edge_queue(edge_queue_reservation_size, edge_queue_memory_commit_size(edge_queue_reservation_size));
118
119 // The initialize() routines will attempt to reserve and allocate backing storage memory.
120 // Failure to accommodate will render root chain processing impossible.
121 // As a fallback on failure, just write out the existing samples, flat, without chains.
122 if (!(mark_bits.initialize() && edge_queue.initialize())) {
123 if (LogJFR) tty->print_cr("Unable to allocate memory for root chain processing");
124 write_events(&edge_store);
125 return;
126 }
127
128 // necessary condition for attempting a root set iteration
129 Universe::heap()->ensure_parsability(false);
130
131 RootSetClosure::add_to_queue(&edge_queue);
132 if (edge_queue.is_full()) {
133 // Pathological case where roots don't fit in queue
134 // Do a depth-first search, but mark roots first
135 // to avoid walking sideways over roots
136 DFSClosure::find_leaks_from_root_set(&edge_store, &mark_bits);
137 } else {
138 BFSClosure bfs(&edge_queue, &edge_store, &mark_bits);
139 bfs.process();
140 }
141 GranularTimer::stop();
142 write_events(&edge_store);
143 log_edge_queue_summary(edge_queue);
144 }
145
146 int EmitEventOperation::write_events(EdgeStore* edge_store) {
147 assert(_object_sampler != NULL, "invariant");
148 assert(edge_store != NULL, "invariant");
149 assert(_vm_thread != NULL, "invariant");
150 assert(_vm_thread_local != NULL, "invariant");
151 assert(SafepointSynchronize::is_at_safepoint(), "invariant");
152
153 // save thread id in preparation for thread local trace data manipulations
154 const traceid vmthread_id = _vm_thread_local->thread_id();
155 assert(_vm_thread_local->thread_id() == JFR_THREAD_ID(_vm_thread), "invariant");
156
157 const jlong last_sweep = _emit_all ? max_jlong : _object_sampler->last_sweep().value();
158 int count = 0;
159
160 const ObjectSample* current = _object_sampler->first();
161 while (current != NULL) {
162 ObjectSample* prev = current->prev();
163 if (current->is_alive_and_older_than(last_sweep)) {
164 write_event(current, edge_store);
165 ++count;
166 }
167 current = prev;
168 }
169
170 // restore thread local stack trace and thread id
171 _vm_thread_local->set_thread_id(vmthread_id);
172 _vm_thread_local->clear_cached_stack_trace();
173 assert(_vm_thread_local->thread_id() == JFR_THREAD_ID(_vm_thread), "invariant");
174
175 if (count > 0) {
176 // serialize assoicated checkpoints
177 ObjectSampleCheckpoint::write(edge_store, _emit_all, _vm_thread);
178 }
179 return count;
180 }
181
182 static int array_size(const oop object) {
183 assert(object != NULL, "invariant");
184 if (object->is_array()) {
185 return arrayOop(object)->length();
186 }
187 return min_jint;
188 }
189
190 void EmitEventOperation::write_event(const ObjectSample* sample, EdgeStore* edge_store) {
191 assert(sample != NULL, "invariant");
192 assert(!sample->is_dead(), "invariant");
193 assert(edge_store != NULL, "invariant");
194 assert(_vm_thread_local != NULL, "invariant");
195 const oop* object_addr = sample->object_addr();
196 assert(*object_addr != NULL, "invariant");
197
198 const Edge* edge = (const Edge*)(*object_addr)->mark();
199 traceid gc_root_id = 0;
200 if (edge == NULL) {
201 // In order to dump out a representation of the event
202 // even though it was not reachable / too long to reach,
203 // we need to register a top level edge for this object
204 Edge e(NULL, object_addr);
205 edge_store->add_chain(&e, 1);
206 edge = (const Edge*)(*object_addr)->mark();
207 } else {
208 gc_root_id = edge_store->get_root_id(edge);
209 }
210
211 assert(edge != NULL, "invariant");
212 assert(edge->pointee() == *object_addr, "invariant");
213 const traceid object_id = edge_store->get_id(edge);
214 assert(object_id != 0, "invariant");
215
216 EventOldObjectSample e(UNTIMED);
217 e.set_starttime(GranularTimer::start_time());
218 e.set_endtime(GranularTimer::end_time());
219 e.set_allocationTime(sample->allocation_time());
220 e.set_lastKnownHeapUsage(sample->heap_used_at_last_gc());
221 e.set_object(object_id);
222 e.set_arrayElements(array_size(*object_addr));
223 e.set_root(gc_root_id);
224
225 // Temporarily assigning both the stack trace id and thread id
226 // onto the thread local data structure of the VMThread (for the duration
227 // of the commit() call). This trick provides a means to override
228 // the event generation mechanism by injecting externally provided id's.
229 // Here, in particular, this allows us to emit an old object event
230 // supplying information from where the actual sampling occurred.
231 _vm_thread_local->set_cached_stack_trace_id(sample->stack_trace_id());
232 assert(sample->has_thread(), "invariant");
233 _vm_thread_local->set_thread_id(sample->thread_id());
234 e.commit();
235 }

mercurial