Mon, 12 Aug 2019 18:30:40 +0300
8223147: JFR Backport
8199712: Flight Recorder
8203346: JFR: Inconsistent signature of jfr_add_string_constant
8195817: JFR.stop should require name of recording
8195818: JFR.start should increase autogenerated name by one
8195819: Remove recording=x from jcmd JFR.check output
8203921: JFR thread sampling is missing fixes from JDK-8194552
8203929: Limit amount of data for JFR.dump
8203664: JFR start failure after AppCDS archive created with JFR StartFlightRecording
8003209: JFR events for network utilization
8207392: [PPC64] Implement JFR profiling
8202835: jfr/event/os/TestSystemProcess.java fails on missing events
Summary: Backport JFR from JDK11. Initial integration
Reviewed-by: neugens
apetushkov@9858 | 1 | /* |
apetushkov@9858 | 2 | * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved. |
apetushkov@9858 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
apetushkov@9858 | 4 | * |
apetushkov@9858 | 5 | * This code is free software; you can redistribute it and/or modify it |
apetushkov@9858 | 6 | * under the terms of the GNU General Public License version 2 only, as |
apetushkov@9858 | 7 | * published by the Free Software Foundation. |
apetushkov@9858 | 8 | * |
apetushkov@9858 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
apetushkov@9858 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
apetushkov@9858 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
apetushkov@9858 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
apetushkov@9858 | 13 | * accompanied this code). |
apetushkov@9858 | 14 | * |
apetushkov@9858 | 15 | * You should have received a copy of the GNU General Public License version |
apetushkov@9858 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
apetushkov@9858 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
apetushkov@9858 | 18 | * |
apetushkov@9858 | 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
apetushkov@9858 | 20 | * or visit www.oracle.com if you need additional information or have any |
apetushkov@9858 | 21 | * questions. |
apetushkov@9858 | 22 | * |
apetushkov@9858 | 23 | */ |
apetushkov@9858 | 24 | |
apetushkov@9858 | 25 | #include "precompiled.hpp" |
apetushkov@9858 | 26 | #include "classfile/javaClasses.hpp" |
apetushkov@9858 | 27 | #include "jfr/leakprofiler/chains/edge.hpp" |
apetushkov@9858 | 28 | #include "jfr/leakprofiler/chains/edgeStore.hpp" |
apetushkov@9858 | 29 | #include "jfr/leakprofiler/chains/edgeUtils.hpp" |
apetushkov@9858 | 30 | #include "jfr/leakprofiler/utilities/unifiedOop.hpp" |
apetushkov@9858 | 31 | #include "oops/fieldStreams.hpp" |
apetushkov@9858 | 32 | #include "oops/instanceKlass.hpp" |
apetushkov@9858 | 33 | #include "oops/objArrayOop.hpp" |
apetushkov@9858 | 34 | #include "oops/oopsHierarchy.hpp" |
apetushkov@9858 | 35 | #include "runtime/handles.inline.hpp" |
apetushkov@9858 | 36 | |
apetushkov@9858 | 37 | bool EdgeUtils::is_leak_edge(const Edge& edge) { |
apetushkov@9858 | 38 | return (const Edge*)edge.pointee()->mark() == &edge; |
apetushkov@9858 | 39 | } |
apetushkov@9858 | 40 | |
apetushkov@9858 | 41 | bool EdgeUtils::is_root(const Edge& edge) { |
apetushkov@9858 | 42 | return edge.is_root(); |
apetushkov@9858 | 43 | } |
apetushkov@9858 | 44 | |
apetushkov@9858 | 45 | static int field_offset(const Edge& edge) { |
apetushkov@9858 | 46 | assert(!edge.is_root(), "invariant"); |
apetushkov@9858 | 47 | const oop ref_owner = edge.reference_owner(); |
apetushkov@9858 | 48 | assert(ref_owner != NULL, "invariant"); |
apetushkov@9858 | 49 | const oop* reference = UnifiedOop::decode(edge.reference()); |
apetushkov@9858 | 50 | assert(reference != NULL, "invariant"); |
apetushkov@9858 | 51 | assert(!UnifiedOop::is_narrow(reference), "invariant"); |
apetushkov@9858 | 52 | assert(!ref_owner->is_array(), "invariant"); |
apetushkov@9858 | 53 | assert(ref_owner->is_instance(), "invariant"); |
apetushkov@9858 | 54 | const int offset = (int)pointer_delta(reference, ref_owner, sizeof(char)); |
apetushkov@9858 | 55 | assert(offset < (ref_owner->size() * HeapWordSize), "invariant"); |
apetushkov@9858 | 56 | return offset; |
apetushkov@9858 | 57 | } |
apetushkov@9858 | 58 | |
apetushkov@9858 | 59 | static const InstanceKlass* field_type(const Edge& edge) { |
apetushkov@9858 | 60 | assert(!edge.is_root() || !EdgeUtils::is_array_element(edge), "invariant"); |
apetushkov@9858 | 61 | return (const InstanceKlass*)edge.reference_owner_klass(); |
apetushkov@9858 | 62 | } |
apetushkov@9858 | 63 | |
apetushkov@9858 | 64 | const Symbol* EdgeUtils::field_name_symbol(const Edge& edge) { |
apetushkov@9858 | 65 | assert(!edge.is_root(), "invariant"); |
apetushkov@9858 | 66 | assert(!is_array_element(edge), "invariant"); |
apetushkov@9858 | 67 | const int offset = field_offset(edge); |
apetushkov@9858 | 68 | const InstanceKlass* ik = field_type(edge); |
apetushkov@9858 | 69 | while (ik != NULL) { |
apetushkov@9858 | 70 | JavaFieldStream jfs(ik); |
apetushkov@9858 | 71 | while (!jfs.done()) { |
apetushkov@9858 | 72 | if (offset == jfs.offset()) { |
apetushkov@9858 | 73 | return jfs.name(); |
apetushkov@9858 | 74 | } |
apetushkov@9858 | 75 | jfs.next(); |
apetushkov@9858 | 76 | } |
apetushkov@9858 | 77 | ik = (InstanceKlass*)ik->super(); |
apetushkov@9858 | 78 | } |
apetushkov@9858 | 79 | return NULL; |
apetushkov@9858 | 80 | } |
apetushkov@9858 | 81 | |
apetushkov@9858 | 82 | jshort EdgeUtils::field_modifiers(const Edge& edge) { |
apetushkov@9858 | 83 | const int offset = field_offset(edge); |
apetushkov@9858 | 84 | const InstanceKlass* ik = field_type(edge); |
apetushkov@9858 | 85 | |
apetushkov@9858 | 86 | while (ik != NULL) { |
apetushkov@9858 | 87 | JavaFieldStream jfs(ik); |
apetushkov@9858 | 88 | while (!jfs.done()) { |
apetushkov@9858 | 89 | if (offset == jfs.offset()) { |
apetushkov@9858 | 90 | return jfs.access_flags().as_short(); |
apetushkov@9858 | 91 | } |
apetushkov@9858 | 92 | jfs.next(); |
apetushkov@9858 | 93 | } |
apetushkov@9858 | 94 | ik = (InstanceKlass*)ik->super(); |
apetushkov@9858 | 95 | } |
apetushkov@9858 | 96 | return 0; |
apetushkov@9858 | 97 | } |
apetushkov@9858 | 98 | |
apetushkov@9858 | 99 | bool EdgeUtils::is_array_element(const Edge& edge) { |
apetushkov@9858 | 100 | assert(!edge.is_root(), "invariant"); |
apetushkov@9858 | 101 | const oop ref_owner = edge.reference_owner(); |
apetushkov@9858 | 102 | assert(ref_owner != NULL, "invariant"); |
apetushkov@9858 | 103 | return ref_owner->is_objArray(); |
apetushkov@9858 | 104 | } |
apetushkov@9858 | 105 | |
apetushkov@9858 | 106 | static int array_offset(const Edge& edge) { |
apetushkov@9858 | 107 | assert(!edge.is_root(), "invariant"); |
apetushkov@9858 | 108 | const oop ref_owner = edge.reference_owner(); |
apetushkov@9858 | 109 | assert(ref_owner != NULL, "invariant"); |
apetushkov@9858 | 110 | const oop* reference = UnifiedOop::decode(edge.reference()); |
apetushkov@9858 | 111 | assert(reference != NULL, "invariant"); |
apetushkov@9858 | 112 | assert(!UnifiedOop::is_narrow(reference), "invariant"); |
apetushkov@9858 | 113 | assert(ref_owner->is_array(), "invariant"); |
apetushkov@9858 | 114 | const objArrayOop ref_owner_array = static_cast<const objArrayOop>(ref_owner); |
apetushkov@9858 | 115 | const int offset = (int)pointer_delta(reference, ref_owner_array->base(), heapOopSize); |
apetushkov@9858 | 116 | assert(offset >= 0 && offset < ref_owner_array->length(), "invariant"); |
apetushkov@9858 | 117 | return offset; |
apetushkov@9858 | 118 | } |
apetushkov@9858 | 119 | |
apetushkov@9858 | 120 | int EdgeUtils::array_index(const Edge& edge) { |
apetushkov@9858 | 121 | return is_array_element(edge) ? array_offset(edge) : 0; |
apetushkov@9858 | 122 | } |
apetushkov@9858 | 123 | |
apetushkov@9858 | 124 | int EdgeUtils::array_size(const Edge& edge) { |
apetushkov@9858 | 125 | if (is_array_element(edge)) { |
apetushkov@9858 | 126 | const oop ref_owner = edge.reference_owner(); |
apetushkov@9858 | 127 | assert(ref_owner != NULL, "invariant"); |
apetushkov@9858 | 128 | assert(ref_owner->is_objArray(), "invariant"); |
apetushkov@9858 | 129 | return ((objArrayOop)(ref_owner))->length(); |
apetushkov@9858 | 130 | } |
apetushkov@9858 | 131 | return 0; |
apetushkov@9858 | 132 | } |
apetushkov@9858 | 133 | |
apetushkov@9858 | 134 | const Edge* EdgeUtils::root(const Edge& edge) { |
apetushkov@9858 | 135 | const Edge* current = &edge; |
apetushkov@9858 | 136 | const Edge* parent = current->parent(); |
apetushkov@9858 | 137 | while (parent != NULL) { |
apetushkov@9858 | 138 | current = parent; |
apetushkov@9858 | 139 | parent = current->parent(); |
apetushkov@9858 | 140 | } |
apetushkov@9858 | 141 | return current; |
apetushkov@9858 | 142 | } |
apetushkov@9858 | 143 | |
apetushkov@9858 | 144 | // The number of references associated with the leak node; |
apetushkov@9858 | 145 | // can be viewed as the leak node "context". |
apetushkov@9858 | 146 | // Used to provide leak context for a "capped/skipped" reference chain. |
apetushkov@9858 | 147 | static const size_t leak_context = 100; |
apetushkov@9858 | 148 | |
apetushkov@9858 | 149 | // The number of references associated with the root node; |
apetushkov@9858 | 150 | // can be viewed as the root node "context". |
apetushkov@9858 | 151 | // Used to provide root context for a "capped/skipped" reference chain. |
apetushkov@9858 | 152 | static const size_t root_context = 100; |
apetushkov@9858 | 153 | |
apetushkov@9858 | 154 | // A limit on the reference chain depth to be serialized, |
apetushkov@9858 | 155 | static const size_t max_ref_chain_depth = leak_context + root_context; |
apetushkov@9858 | 156 | |
apetushkov@9858 | 157 | const RoutableEdge* skip_to(const RoutableEdge& edge, size_t skip_length) { |
apetushkov@9858 | 158 | const RoutableEdge* current = &edge; |
apetushkov@9858 | 159 | const RoutableEdge* parent = current->physical_parent(); |
apetushkov@9858 | 160 | size_t seek = 0; |
apetushkov@9858 | 161 | while (parent != NULL && seek != skip_length) { |
apetushkov@9858 | 162 | seek++; |
apetushkov@9858 | 163 | current = parent; |
apetushkov@9858 | 164 | parent = parent->physical_parent(); |
apetushkov@9858 | 165 | } |
apetushkov@9858 | 166 | return current; |
apetushkov@9858 | 167 | } |
apetushkov@9858 | 168 | |
apetushkov@9858 | 169 | #ifdef ASSERT |
apetushkov@9858 | 170 | static void validate_skip_target(const RoutableEdge* skip_target) { |
apetushkov@9858 | 171 | assert(skip_target != NULL, "invariant"); |
apetushkov@9858 | 172 | assert(skip_target->distance_to_root() + 1 == root_context, "invariant"); |
apetushkov@9858 | 173 | assert(skip_target->is_sentinel(), "invariant"); |
apetushkov@9858 | 174 | } |
apetushkov@9858 | 175 | |
apetushkov@9858 | 176 | static void validate_new_skip_edge(const RoutableEdge* new_skip_edge, const RoutableEdge* last_skip_edge, size_t adjustment) { |
apetushkov@9858 | 177 | assert(new_skip_edge != NULL, "invariant"); |
apetushkov@9858 | 178 | assert(new_skip_edge->is_skip_edge(), "invariant"); |
apetushkov@9858 | 179 | if (last_skip_edge != NULL) { |
apetushkov@9858 | 180 | const RoutableEdge* const target = skip_to(*new_skip_edge->logical_parent(), adjustment); |
apetushkov@9858 | 181 | validate_skip_target(target->logical_parent()); |
apetushkov@9858 | 182 | return; |
apetushkov@9858 | 183 | } |
apetushkov@9858 | 184 | assert(last_skip_edge == NULL, "invariant"); |
apetushkov@9858 | 185 | // only one level of logical indirection |
apetushkov@9858 | 186 | validate_skip_target(new_skip_edge->logical_parent()); |
apetushkov@9858 | 187 | } |
apetushkov@9858 | 188 | #endif // ASSERT |
apetushkov@9858 | 189 | |
apetushkov@9858 | 190 | static void install_logical_route(const RoutableEdge* new_skip_edge, size_t skip_target_distance) { |
apetushkov@9858 | 191 | assert(new_skip_edge != NULL, "invariant"); |
apetushkov@9858 | 192 | assert(!new_skip_edge->is_skip_edge(), "invariant"); |
apetushkov@9858 | 193 | assert(!new_skip_edge->processed(), "invariant"); |
apetushkov@9858 | 194 | const RoutableEdge* const skip_target = skip_to(*new_skip_edge, skip_target_distance); |
apetushkov@9858 | 195 | assert(skip_target != NULL, "invariant"); |
apetushkov@9858 | 196 | new_skip_edge->set_skip_edge(skip_target); |
apetushkov@9858 | 197 | new_skip_edge->set_skip_length(skip_target_distance); |
apetushkov@9858 | 198 | assert(new_skip_edge->is_skip_edge(), "invariant"); |
apetushkov@9858 | 199 | assert(new_skip_edge->logical_parent() == skip_target, "invariant"); |
apetushkov@9858 | 200 | } |
apetushkov@9858 | 201 | |
apetushkov@9858 | 202 | static const RoutableEdge* find_last_skip_edge(const RoutableEdge& edge, size_t& distance) { |
apetushkov@9858 | 203 | assert(distance == 0, "invariant"); |
apetushkov@9858 | 204 | const RoutableEdge* current = &edge; |
apetushkov@9858 | 205 | while (current != NULL) { |
apetushkov@9858 | 206 | if (current->is_skip_edge() && current->skip_edge()->is_sentinel()) { |
apetushkov@9858 | 207 | return current; |
apetushkov@9858 | 208 | } |
apetushkov@9858 | 209 | current = current->physical_parent(); |
apetushkov@9858 | 210 | ++distance; |
apetushkov@9858 | 211 | } |
apetushkov@9858 | 212 | return current; |
apetushkov@9858 | 213 | } |
apetushkov@9858 | 214 | |
apetushkov@9858 | 215 | static void collapse_overlapping_chain(const RoutableEdge& edge, |
apetushkov@9858 | 216 | const RoutableEdge* first_processed_edge, |
apetushkov@9858 | 217 | size_t first_processed_distance) { |
apetushkov@9858 | 218 | assert(first_processed_edge != NULL, "invariant"); |
apetushkov@9858 | 219 | // first_processed_edge is already processed / written |
apetushkov@9858 | 220 | assert(first_processed_edge->processed(), "invariant"); |
apetushkov@9858 | 221 | assert(first_processed_distance + 1 <= leak_context, "invariant"); |
apetushkov@9858 | 222 | |
apetushkov@9858 | 223 | // from this first processed edge, attempt to fetch the last skip edge |
apetushkov@9858 | 224 | size_t last_skip_edge_distance = 0; |
apetushkov@9858 | 225 | const RoutableEdge* const last_skip_edge = find_last_skip_edge(*first_processed_edge, last_skip_edge_distance); |
apetushkov@9858 | 226 | const size_t distance_discovered = first_processed_distance + last_skip_edge_distance + 1; |
apetushkov@9858 | 227 | |
apetushkov@9858 | 228 | if (distance_discovered <= leak_context || (last_skip_edge == NULL && distance_discovered <= max_ref_chain_depth)) { |
apetushkov@9858 | 229 | // complete chain can be accommodated without modification |
apetushkov@9858 | 230 | return; |
apetushkov@9858 | 231 | } |
apetushkov@9858 | 232 | |
apetushkov@9858 | 233 | // backtrack one edge from existing processed edge |
apetushkov@9858 | 234 | const RoutableEdge* const new_skip_edge = skip_to(edge, first_processed_distance - 1); |
apetushkov@9858 | 235 | assert(new_skip_edge != NULL, "invariant"); |
apetushkov@9858 | 236 | assert(!new_skip_edge->processed(), "invariant"); |
apetushkov@9858 | 237 | assert(new_skip_edge->parent() == first_processed_edge, "invariant"); |
apetushkov@9858 | 238 | |
apetushkov@9858 | 239 | size_t adjustment = 0; |
apetushkov@9858 | 240 | if (last_skip_edge != NULL) { |
apetushkov@9858 | 241 | assert(leak_context - 1 > first_processed_distance - 1, "invariant"); |
apetushkov@9858 | 242 | adjustment = leak_context - first_processed_distance - 1; |
apetushkov@9858 | 243 | assert(last_skip_edge_distance + 1 > adjustment, "invariant"); |
apetushkov@9858 | 244 | install_logical_route(new_skip_edge, last_skip_edge_distance + 1 - adjustment); |
apetushkov@9858 | 245 | } else { |
apetushkov@9858 | 246 | install_logical_route(new_skip_edge, last_skip_edge_distance + 1 - root_context); |
apetushkov@9858 | 247 | new_skip_edge->logical_parent()->set_skip_length(1); // sentinel |
apetushkov@9858 | 248 | } |
apetushkov@9858 | 249 | |
apetushkov@9858 | 250 | DEBUG_ONLY(validate_new_skip_edge(new_skip_edge, last_skip_edge, adjustment);) |
apetushkov@9858 | 251 | } |
apetushkov@9858 | 252 | |
apetushkov@9858 | 253 | static void collapse_non_overlapping_chain(const RoutableEdge& edge, |
apetushkov@9858 | 254 | const RoutableEdge* first_processed_edge, |
apetushkov@9858 | 255 | size_t first_processed_distance) { |
apetushkov@9858 | 256 | assert(first_processed_edge != NULL, "invariant"); |
apetushkov@9858 | 257 | assert(!first_processed_edge->processed(), "invariant"); |
apetushkov@9858 | 258 | // this implies that the first "processed" edge is the leak context relative "leaf" |
apetushkov@9858 | 259 | assert(first_processed_distance + 1 == leak_context, "invariant"); |
apetushkov@9858 | 260 | |
apetushkov@9858 | 261 | const size_t distance_to_root = edge.distance_to_root(); |
apetushkov@9858 | 262 | if (distance_to_root + 1 <= max_ref_chain_depth) { |
apetushkov@9858 | 263 | // complete chain can be accommodated without constructing a skip edge |
apetushkov@9858 | 264 | return; |
apetushkov@9858 | 265 | } |
apetushkov@9858 | 266 | |
apetushkov@9858 | 267 | install_logical_route(first_processed_edge, distance_to_root + 1 - first_processed_distance - root_context); |
apetushkov@9858 | 268 | first_processed_edge->logical_parent()->set_skip_length(1); // sentinel |
apetushkov@9858 | 269 | |
apetushkov@9858 | 270 | DEBUG_ONLY(validate_new_skip_edge(first_processed_edge, NULL, 0);) |
apetushkov@9858 | 271 | } |
apetushkov@9858 | 272 | |
apetushkov@9858 | 273 | static const RoutableEdge* processed_edge(const RoutableEdge& edge, size_t& distance) { |
apetushkov@9858 | 274 | assert(distance == 0, "invariant"); |
apetushkov@9858 | 275 | const RoutableEdge* current = &edge; |
apetushkov@9858 | 276 | while (current != NULL && distance < leak_context - 1) { |
apetushkov@9858 | 277 | if (current->processed()) { |
apetushkov@9858 | 278 | return current; |
apetushkov@9858 | 279 | } |
apetushkov@9858 | 280 | current = current->physical_parent(); |
apetushkov@9858 | 281 | ++distance; |
apetushkov@9858 | 282 | } |
apetushkov@9858 | 283 | assert(distance <= leak_context - 1, "invariant"); |
apetushkov@9858 | 284 | return current; |
apetushkov@9858 | 285 | } |
apetushkov@9858 | 286 | |
apetushkov@9858 | 287 | /* |
apetushkov@9858 | 288 | * Some vocabulary: |
apetushkov@9858 | 289 | * ----------- |
apetushkov@9858 | 290 | * "Context" is an interval in the chain, it is associcated with an edge and it signifies a number of connected edges. |
apetushkov@9858 | 291 | * "Processed / written" means an edge that has already been serialized. |
apetushkov@9858 | 292 | * "Skip edge" is an edge that contains additional information for logical routing purposes. |
apetushkov@9858 | 293 | * "Skip target" is an edge used as a destination for a skip edge |
apetushkov@9858 | 294 | */ |
apetushkov@9858 | 295 | void EdgeUtils::collapse_chain(const RoutableEdge& edge) { |
apetushkov@9858 | 296 | assert(is_leak_edge(edge), "invariant"); |
apetushkov@9858 | 297 | |
apetushkov@9858 | 298 | // attempt to locate an already processed edge inside current leak context (if any) |
apetushkov@9858 | 299 | size_t first_processed_distance = 0; |
apetushkov@9858 | 300 | const RoutableEdge* const first_processed_edge = processed_edge(edge, first_processed_distance); |
apetushkov@9858 | 301 | if (first_processed_edge == NULL) { |
apetushkov@9858 | 302 | return; |
apetushkov@9858 | 303 | } |
apetushkov@9858 | 304 | |
apetushkov@9858 | 305 | if (first_processed_edge->processed()) { |
apetushkov@9858 | 306 | collapse_overlapping_chain(edge, first_processed_edge, first_processed_distance); |
apetushkov@9858 | 307 | } else { |
apetushkov@9858 | 308 | collapse_non_overlapping_chain(edge, first_processed_edge, first_processed_distance); |
apetushkov@9858 | 309 | } |
apetushkov@9858 | 310 | |
apetushkov@9858 | 311 | assert(edge.logical_distance_to_root() + 1 <= max_ref_chain_depth, "invariant"); |
apetushkov@9858 | 312 | } |