Mon, 12 Aug 2019 18:30:40 +0300
8223147: JFR Backport
8199712: Flight Recorder
8203346: JFR: Inconsistent signature of jfr_add_string_constant
8195817: JFR.stop should require name of recording
8195818: JFR.start should increase autogenerated name by one
8195819: Remove recording=x from jcmd JFR.check output
8203921: JFR thread sampling is missing fixes from JDK-8194552
8203929: Limit amount of data for JFR.dump
8203664: JFR start failure after AppCDS archive created with JFR StartFlightRecording
8003209: JFR events for network utilization
8207392: [PPC64] Implement JFR profiling
8202835: jfr/event/os/TestSystemProcess.java fails on missing events
Summary: Backport JFR from JDK11. Initial integration
Reviewed-by: neugens
1 /*
2 * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "gc_implementation/shared/copyFailedInfo.hpp"
27 #include "gc_implementation/shared/gcHeapSummary.hpp"
28 #include "gc_implementation/shared/gcId.hpp"
29 #include "gc_implementation/shared/gcTimer.hpp"
30 #include "gc_implementation/shared/gcTrace.hpp"
31 #include "gc_implementation/shared/objectCountEventSender.hpp"
32 #include "memory/heapInspection.hpp"
33 #include "memory/referenceProcessorStats.hpp"
34 #include "runtime/os.hpp"
35 #include "utilities/globalDefinitions.hpp"
36 #include "utilities/ticks.hpp"
38 #if INCLUDE_ALL_GCS
39 #include "gc_implementation/g1/evacuationInfo.hpp"
40 #endif
42 #define assert_unset_gc_id() assert(_shared_gc_info.gc_id().is_undefined(), "GC already started?")
43 #define assert_set_gc_id() assert(!_shared_gc_info.gc_id().is_undefined(), "GC not started?")
45 void GCTracer::report_gc_start_impl(GCCause::Cause cause, const Ticks& timestamp) {
46 assert_unset_gc_id();
48 GCId gc_id = GCId::create();
49 _shared_gc_info.set_gc_id(gc_id);
50 _shared_gc_info.set_cause(cause);
51 _shared_gc_info.set_start_timestamp(timestamp);
52 }
54 void GCTracer::report_gc_start(GCCause::Cause cause, const Ticks& timestamp) {
55 assert_unset_gc_id();
57 report_gc_start_impl(cause, timestamp);
58 }
60 bool GCTracer::has_reported_gc_start() const {
61 return !_shared_gc_info.gc_id().is_undefined();
62 }
64 void GCTracer::report_gc_end_impl(const Ticks& timestamp, TimePartitions* time_partitions) {
65 assert_set_gc_id();
67 _shared_gc_info.set_sum_of_pauses(time_partitions->sum_of_pauses());
68 _shared_gc_info.set_longest_pause(time_partitions->longest_pause());
69 _shared_gc_info.set_end_timestamp(timestamp);
71 send_phase_events(time_partitions);
72 send_garbage_collection_event();
73 }
75 void GCTracer::report_gc_end(const Ticks& timestamp, TimePartitions* time_partitions) {
76 assert_set_gc_id();
78 report_gc_end_impl(timestamp, time_partitions);
80 _shared_gc_info.set_gc_id(GCId::undefined());
81 }
83 void GCTracer::report_gc_reference_stats(const ReferenceProcessorStats& rps) const {
84 assert_set_gc_id();
86 send_reference_stats_event(REF_SOFT, rps.soft_count());
87 send_reference_stats_event(REF_WEAK, rps.weak_count());
88 send_reference_stats_event(REF_FINAL, rps.final_count());
89 send_reference_stats_event(REF_PHANTOM, rps.phantom_count());
90 }
92 #if INCLUDE_SERVICES
93 class ObjectCountEventSenderClosure : public KlassInfoClosure {
94 const GCId _gc_id;
95 const double _size_threshold_percentage;
96 const size_t _total_size_in_words;
97 const Ticks _timestamp;
99 public:
100 ObjectCountEventSenderClosure(GCId gc_id, size_t total_size_in_words, const Ticks& timestamp) :
101 _gc_id(gc_id),
102 _size_threshold_percentage(ObjectCountCutOffPercent / 100),
103 _total_size_in_words(total_size_in_words),
104 _timestamp(timestamp)
105 {}
107 virtual void do_cinfo(KlassInfoEntry* entry) {
108 if (should_send_event(entry)) {
109 ObjectCountEventSender::send(entry, _gc_id, _timestamp);
110 }
111 }
113 private:
114 bool should_send_event(const KlassInfoEntry* entry) const {
115 double percentage_of_heap = ((double) entry->words()) / _total_size_in_words;
116 return percentage_of_heap >= _size_threshold_percentage;
117 }
118 };
120 void GCTracer::report_object_count_after_gc(BoolObjectClosure* is_alive_cl) {
121 assert_set_gc_id();
122 assert(is_alive_cl != NULL, "Must supply function to check liveness");
124 if (ObjectCountEventSender::should_send_event()) {
125 ResourceMark rm;
127 KlassInfoTable cit(false);
128 if (!cit.allocation_failed()) {
129 HeapInspection hi(false, false, false, NULL);
130 hi.populate_table(&cit, is_alive_cl);
131 ObjectCountEventSenderClosure event_sender(_shared_gc_info.gc_id(), cit.size_of_instances_in_words(), Ticks::now());
132 cit.iterate(&event_sender);
133 }
134 }
135 }
136 #endif // INCLUDE_SERVICES
138 void GCTracer::report_gc_heap_summary(GCWhen::Type when, const GCHeapSummary& heap_summary) const {
139 assert_set_gc_id();
141 send_gc_heap_summary_event(when, heap_summary);
142 }
144 void GCTracer::report_metaspace_summary(GCWhen::Type when, const MetaspaceSummary& summary) const {
145 assert_set_gc_id();
147 send_meta_space_summary_event(when, summary);
149 send_metaspace_chunk_free_list_summary(when, Metaspace::NonClassType, summary.metaspace_chunk_free_list_summary());
150 if (UseCompressedClassPointers) {
151 send_metaspace_chunk_free_list_summary(when, Metaspace::ClassType, summary.class_chunk_free_list_summary());
152 }
153 }
155 void YoungGCTracer::report_gc_end_impl(const Ticks& timestamp, TimePartitions* time_partitions) {
156 assert_set_gc_id();
157 assert(_tenuring_threshold != UNSET_TENURING_THRESHOLD, "Tenuring threshold has not been reported");
159 GCTracer::report_gc_end_impl(timestamp, time_partitions);
160 send_young_gc_event();
162 _tenuring_threshold = UNSET_TENURING_THRESHOLD;
163 }
165 void YoungGCTracer::report_promotion_failed(const PromotionFailedInfo& pf_info) {
166 assert_set_gc_id();
168 send_promotion_failed_event(pf_info);
169 }
171 void YoungGCTracer::report_tenuring_threshold(const uint tenuring_threshold) {
172 _tenuring_threshold = tenuring_threshold;
173 }
175 bool YoungGCTracer::should_report_promotion_events() const {
176 return should_report_promotion_in_new_plab_event() ||
177 should_report_promotion_outside_plab_event();
178 }
180 bool YoungGCTracer::should_report_promotion_in_new_plab_event() const {
181 return should_send_promotion_in_new_plab_event();
182 }
184 bool YoungGCTracer::should_report_promotion_outside_plab_event() const {
185 return should_send_promotion_outside_plab_event();
186 }
188 void YoungGCTracer::report_promotion_in_new_plab_event(Klass* klass, size_t obj_size,
189 uint age, bool tenured,
190 size_t plab_size) const {
191 send_promotion_in_new_plab_event(klass, obj_size, age, tenured, plab_size);
192 }
194 void YoungGCTracer::report_promotion_outside_plab_event(Klass* klass, size_t obj_size,
195 uint age, bool tenured) const {
196 send_promotion_outside_plab_event(klass, obj_size, age, tenured);
197 }
199 void OldGCTracer::report_gc_end_impl(const Ticks& timestamp, TimePartitions* time_partitions) {
200 assert_set_gc_id();
202 GCTracer::report_gc_end_impl(timestamp, time_partitions);
203 send_old_gc_event();
204 }
206 void ParallelOldTracer::report_gc_end_impl(const Ticks& timestamp, TimePartitions* time_partitions) {
207 assert_set_gc_id();
209 OldGCTracer::report_gc_end_impl(timestamp, time_partitions);
210 send_parallel_old_event();
211 }
213 void ParallelOldTracer::report_dense_prefix(void* dense_prefix) {
214 assert_set_gc_id();
216 _parallel_old_gc_info.report_dense_prefix(dense_prefix);
217 }
219 void OldGCTracer::report_concurrent_mode_failure() {
220 assert_set_gc_id();
222 send_concurrent_mode_failure_event();
223 }
225 #if INCLUDE_ALL_GCS
226 void G1MMUTracer::report_mmu(double time_slice_sec, double gc_time_sec, double max_time_sec) {
227 send_g1_mmu_event(time_slice_sec * MILLIUNITS,
228 gc_time_sec * MILLIUNITS,
229 max_time_sec * MILLIUNITS);
230 }
232 void G1NewTracer::report_yc_type(G1YCType type) {
233 assert_set_gc_id();
235 _g1_young_gc_info.set_type(type);
236 }
238 void G1NewTracer::report_gc_end_impl(const Ticks& timestamp, TimePartitions* time_partitions) {
239 assert_set_gc_id();
241 YoungGCTracer::report_gc_end_impl(timestamp, time_partitions);
242 send_g1_young_gc_event();
243 }
245 void G1NewTracer::report_evacuation_info(EvacuationInfo* info) {
246 assert_set_gc_id();
248 send_evacuation_info_event(info);
249 }
251 void G1NewTracer::report_evacuation_failed(EvacuationFailedInfo& ef_info) {
252 assert_set_gc_id();
254 send_evacuation_failed_event(ef_info);
255 ef_info.reset();
256 }
257 #endif