1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 1.2 +++ b/src/share/vm/jfr/leakprofiler/chains/edgeUtils.cpp Mon Aug 12 18:30:40 2019 +0300 1.3 @@ -0,0 +1,312 @@ 1.4 +/* 1.5 + * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved. 1.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 1.7 + * 1.8 + * This code is free software; you can redistribute it and/or modify it 1.9 + * under the terms of the GNU General Public License version 2 only, as 1.10 + * published by the Free Software Foundation. 1.11 + * 1.12 + * This code is distributed in the hope that it will be useful, but WITHOUT 1.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 1.14 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 1.15 + * version 2 for more details (a copy is included in the LICENSE file that 1.16 + * accompanied this code). 1.17 + * 1.18 + * You should have received a copy of the GNU General Public License version 1.19 + * 2 along with this work; if not, write to the Free Software Foundation, 1.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 1.21 + * 1.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 1.23 + * or visit www.oracle.com if you need additional information or have any 1.24 + * questions. 1.25 + * 1.26 + */ 1.27 + 1.28 +#include "precompiled.hpp" 1.29 +#include "classfile/javaClasses.hpp" 1.30 +#include "jfr/leakprofiler/chains/edge.hpp" 1.31 +#include "jfr/leakprofiler/chains/edgeStore.hpp" 1.32 +#include "jfr/leakprofiler/chains/edgeUtils.hpp" 1.33 +#include "jfr/leakprofiler/utilities/unifiedOop.hpp" 1.34 +#include "oops/fieldStreams.hpp" 1.35 +#include "oops/instanceKlass.hpp" 1.36 +#include "oops/objArrayOop.hpp" 1.37 +#include "oops/oopsHierarchy.hpp" 1.38 +#include "runtime/handles.inline.hpp" 1.39 + 1.40 +bool EdgeUtils::is_leak_edge(const Edge& edge) { 1.41 + return (const Edge*)edge.pointee()->mark() == &edge; 1.42 +} 1.43 + 1.44 +bool EdgeUtils::is_root(const Edge& edge) { 1.45 + return edge.is_root(); 1.46 +} 1.47 + 1.48 +static int field_offset(const Edge& edge) { 1.49 + assert(!edge.is_root(), "invariant"); 1.50 + const oop ref_owner = edge.reference_owner(); 1.51 + assert(ref_owner != NULL, "invariant"); 1.52 + const oop* reference = UnifiedOop::decode(edge.reference()); 1.53 + assert(reference != NULL, "invariant"); 1.54 + assert(!UnifiedOop::is_narrow(reference), "invariant"); 1.55 + assert(!ref_owner->is_array(), "invariant"); 1.56 + assert(ref_owner->is_instance(), "invariant"); 1.57 + const int offset = (int)pointer_delta(reference, ref_owner, sizeof(char)); 1.58 + assert(offset < (ref_owner->size() * HeapWordSize), "invariant"); 1.59 + return offset; 1.60 +} 1.61 + 1.62 +static const InstanceKlass* field_type(const Edge& edge) { 1.63 + assert(!edge.is_root() || !EdgeUtils::is_array_element(edge), "invariant"); 1.64 + return (const InstanceKlass*)edge.reference_owner_klass(); 1.65 +} 1.66 + 1.67 +const Symbol* EdgeUtils::field_name_symbol(const Edge& edge) { 1.68 + assert(!edge.is_root(), "invariant"); 1.69 + assert(!is_array_element(edge), "invariant"); 1.70 + const int offset = field_offset(edge); 1.71 + const InstanceKlass* ik = field_type(edge); 1.72 + while (ik != NULL) { 1.73 + JavaFieldStream jfs(ik); 1.74 + while (!jfs.done()) { 1.75 + if (offset == jfs.offset()) { 1.76 + return jfs.name(); 1.77 + } 1.78 + jfs.next(); 1.79 + } 1.80 + ik = (InstanceKlass*)ik->super(); 1.81 + } 1.82 + return NULL; 1.83 +} 1.84 + 1.85 +jshort EdgeUtils::field_modifiers(const Edge& edge) { 1.86 + const int offset = field_offset(edge); 1.87 + const InstanceKlass* ik = field_type(edge); 1.88 + 1.89 + while (ik != NULL) { 1.90 + JavaFieldStream jfs(ik); 1.91 + while (!jfs.done()) { 1.92 + if (offset == jfs.offset()) { 1.93 + return jfs.access_flags().as_short(); 1.94 + } 1.95 + jfs.next(); 1.96 + } 1.97 + ik = (InstanceKlass*)ik->super(); 1.98 + } 1.99 + return 0; 1.100 +} 1.101 + 1.102 +bool EdgeUtils::is_array_element(const Edge& edge) { 1.103 + assert(!edge.is_root(), "invariant"); 1.104 + const oop ref_owner = edge.reference_owner(); 1.105 + assert(ref_owner != NULL, "invariant"); 1.106 + return ref_owner->is_objArray(); 1.107 +} 1.108 + 1.109 +static int array_offset(const Edge& edge) { 1.110 + assert(!edge.is_root(), "invariant"); 1.111 + const oop ref_owner = edge.reference_owner(); 1.112 + assert(ref_owner != NULL, "invariant"); 1.113 + const oop* reference = UnifiedOop::decode(edge.reference()); 1.114 + assert(reference != NULL, "invariant"); 1.115 + assert(!UnifiedOop::is_narrow(reference), "invariant"); 1.116 + assert(ref_owner->is_array(), "invariant"); 1.117 + const objArrayOop ref_owner_array = static_cast<const objArrayOop>(ref_owner); 1.118 + const int offset = (int)pointer_delta(reference, ref_owner_array->base(), heapOopSize); 1.119 + assert(offset >= 0 && offset < ref_owner_array->length(), "invariant"); 1.120 + return offset; 1.121 +} 1.122 + 1.123 +int EdgeUtils::array_index(const Edge& edge) { 1.124 + return is_array_element(edge) ? array_offset(edge) : 0; 1.125 +} 1.126 + 1.127 +int EdgeUtils::array_size(const Edge& edge) { 1.128 + if (is_array_element(edge)) { 1.129 + const oop ref_owner = edge.reference_owner(); 1.130 + assert(ref_owner != NULL, "invariant"); 1.131 + assert(ref_owner->is_objArray(), "invariant"); 1.132 + return ((objArrayOop)(ref_owner))->length(); 1.133 + } 1.134 + return 0; 1.135 +} 1.136 + 1.137 +const Edge* EdgeUtils::root(const Edge& edge) { 1.138 + const Edge* current = &edge; 1.139 + const Edge* parent = current->parent(); 1.140 + while (parent != NULL) { 1.141 + current = parent; 1.142 + parent = current->parent(); 1.143 + } 1.144 + return current; 1.145 +} 1.146 + 1.147 +// The number of references associated with the leak node; 1.148 +// can be viewed as the leak node "context". 1.149 +// Used to provide leak context for a "capped/skipped" reference chain. 1.150 +static const size_t leak_context = 100; 1.151 + 1.152 +// The number of references associated with the root node; 1.153 +// can be viewed as the root node "context". 1.154 +// Used to provide root context for a "capped/skipped" reference chain. 1.155 +static const size_t root_context = 100; 1.156 + 1.157 +// A limit on the reference chain depth to be serialized, 1.158 +static const size_t max_ref_chain_depth = leak_context + root_context; 1.159 + 1.160 +const RoutableEdge* skip_to(const RoutableEdge& edge, size_t skip_length) { 1.161 + const RoutableEdge* current = &edge; 1.162 + const RoutableEdge* parent = current->physical_parent(); 1.163 + size_t seek = 0; 1.164 + while (parent != NULL && seek != skip_length) { 1.165 + seek++; 1.166 + current = parent; 1.167 + parent = parent->physical_parent(); 1.168 + } 1.169 + return current; 1.170 +} 1.171 + 1.172 +#ifdef ASSERT 1.173 +static void validate_skip_target(const RoutableEdge* skip_target) { 1.174 + assert(skip_target != NULL, "invariant"); 1.175 + assert(skip_target->distance_to_root() + 1 == root_context, "invariant"); 1.176 + assert(skip_target->is_sentinel(), "invariant"); 1.177 +} 1.178 + 1.179 +static void validate_new_skip_edge(const RoutableEdge* new_skip_edge, const RoutableEdge* last_skip_edge, size_t adjustment) { 1.180 + assert(new_skip_edge != NULL, "invariant"); 1.181 + assert(new_skip_edge->is_skip_edge(), "invariant"); 1.182 + if (last_skip_edge != NULL) { 1.183 + const RoutableEdge* const target = skip_to(*new_skip_edge->logical_parent(), adjustment); 1.184 + validate_skip_target(target->logical_parent()); 1.185 + return; 1.186 + } 1.187 + assert(last_skip_edge == NULL, "invariant"); 1.188 + // only one level of logical indirection 1.189 + validate_skip_target(new_skip_edge->logical_parent()); 1.190 +} 1.191 +#endif // ASSERT 1.192 + 1.193 +static void install_logical_route(const RoutableEdge* new_skip_edge, size_t skip_target_distance) { 1.194 + assert(new_skip_edge != NULL, "invariant"); 1.195 + assert(!new_skip_edge->is_skip_edge(), "invariant"); 1.196 + assert(!new_skip_edge->processed(), "invariant"); 1.197 + const RoutableEdge* const skip_target = skip_to(*new_skip_edge, skip_target_distance); 1.198 + assert(skip_target != NULL, "invariant"); 1.199 + new_skip_edge->set_skip_edge(skip_target); 1.200 + new_skip_edge->set_skip_length(skip_target_distance); 1.201 + assert(new_skip_edge->is_skip_edge(), "invariant"); 1.202 + assert(new_skip_edge->logical_parent() == skip_target, "invariant"); 1.203 +} 1.204 + 1.205 +static const RoutableEdge* find_last_skip_edge(const RoutableEdge& edge, size_t& distance) { 1.206 + assert(distance == 0, "invariant"); 1.207 + const RoutableEdge* current = &edge; 1.208 + while (current != NULL) { 1.209 + if (current->is_skip_edge() && current->skip_edge()->is_sentinel()) { 1.210 + return current; 1.211 + } 1.212 + current = current->physical_parent(); 1.213 + ++distance; 1.214 + } 1.215 + return current; 1.216 +} 1.217 + 1.218 +static void collapse_overlapping_chain(const RoutableEdge& edge, 1.219 + const RoutableEdge* first_processed_edge, 1.220 + size_t first_processed_distance) { 1.221 + assert(first_processed_edge != NULL, "invariant"); 1.222 + // first_processed_edge is already processed / written 1.223 + assert(first_processed_edge->processed(), "invariant"); 1.224 + assert(first_processed_distance + 1 <= leak_context, "invariant"); 1.225 + 1.226 + // from this first processed edge, attempt to fetch the last skip edge 1.227 + size_t last_skip_edge_distance = 0; 1.228 + const RoutableEdge* const last_skip_edge = find_last_skip_edge(*first_processed_edge, last_skip_edge_distance); 1.229 + const size_t distance_discovered = first_processed_distance + last_skip_edge_distance + 1; 1.230 + 1.231 + if (distance_discovered <= leak_context || (last_skip_edge == NULL && distance_discovered <= max_ref_chain_depth)) { 1.232 + // complete chain can be accommodated without modification 1.233 + return; 1.234 + } 1.235 + 1.236 + // backtrack one edge from existing processed edge 1.237 + const RoutableEdge* const new_skip_edge = skip_to(edge, first_processed_distance - 1); 1.238 + assert(new_skip_edge != NULL, "invariant"); 1.239 + assert(!new_skip_edge->processed(), "invariant"); 1.240 + assert(new_skip_edge->parent() == first_processed_edge, "invariant"); 1.241 + 1.242 + size_t adjustment = 0; 1.243 + if (last_skip_edge != NULL) { 1.244 + assert(leak_context - 1 > first_processed_distance - 1, "invariant"); 1.245 + adjustment = leak_context - first_processed_distance - 1; 1.246 + assert(last_skip_edge_distance + 1 > adjustment, "invariant"); 1.247 + install_logical_route(new_skip_edge, last_skip_edge_distance + 1 - adjustment); 1.248 + } else { 1.249 + install_logical_route(new_skip_edge, last_skip_edge_distance + 1 - root_context); 1.250 + new_skip_edge->logical_parent()->set_skip_length(1); // sentinel 1.251 + } 1.252 + 1.253 + DEBUG_ONLY(validate_new_skip_edge(new_skip_edge, last_skip_edge, adjustment);) 1.254 +} 1.255 + 1.256 +static void collapse_non_overlapping_chain(const RoutableEdge& edge, 1.257 + const RoutableEdge* first_processed_edge, 1.258 + size_t first_processed_distance) { 1.259 + assert(first_processed_edge != NULL, "invariant"); 1.260 + assert(!first_processed_edge->processed(), "invariant"); 1.261 + // this implies that the first "processed" edge is the leak context relative "leaf" 1.262 + assert(first_processed_distance + 1 == leak_context, "invariant"); 1.263 + 1.264 + const size_t distance_to_root = edge.distance_to_root(); 1.265 + if (distance_to_root + 1 <= max_ref_chain_depth) { 1.266 + // complete chain can be accommodated without constructing a skip edge 1.267 + return; 1.268 + } 1.269 + 1.270 + install_logical_route(first_processed_edge, distance_to_root + 1 - first_processed_distance - root_context); 1.271 + first_processed_edge->logical_parent()->set_skip_length(1); // sentinel 1.272 + 1.273 + DEBUG_ONLY(validate_new_skip_edge(first_processed_edge, NULL, 0);) 1.274 +} 1.275 + 1.276 +static const RoutableEdge* processed_edge(const RoutableEdge& edge, size_t& distance) { 1.277 + assert(distance == 0, "invariant"); 1.278 + const RoutableEdge* current = &edge; 1.279 + while (current != NULL && distance < leak_context - 1) { 1.280 + if (current->processed()) { 1.281 + return current; 1.282 + } 1.283 + current = current->physical_parent(); 1.284 + ++distance; 1.285 + } 1.286 + assert(distance <= leak_context - 1, "invariant"); 1.287 + return current; 1.288 +} 1.289 + 1.290 +/* 1.291 + * Some vocabulary: 1.292 + * ----------- 1.293 + * "Context" is an interval in the chain, it is associcated with an edge and it signifies a number of connected edges. 1.294 + * "Processed / written" means an edge that has already been serialized. 1.295 + * "Skip edge" is an edge that contains additional information for logical routing purposes. 1.296 + * "Skip target" is an edge used as a destination for a skip edge 1.297 + */ 1.298 +void EdgeUtils::collapse_chain(const RoutableEdge& edge) { 1.299 + assert(is_leak_edge(edge), "invariant"); 1.300 + 1.301 + // attempt to locate an already processed edge inside current leak context (if any) 1.302 + size_t first_processed_distance = 0; 1.303 + const RoutableEdge* const first_processed_edge = processed_edge(edge, first_processed_distance); 1.304 + if (first_processed_edge == NULL) { 1.305 + return; 1.306 + } 1.307 + 1.308 + if (first_processed_edge->processed()) { 1.309 + collapse_overlapping_chain(edge, first_processed_edge, first_processed_distance); 1.310 + } else { 1.311 + collapse_non_overlapping_chain(edge, first_processed_edge, first_processed_distance); 1.312 + } 1.313 + 1.314 + assert(edge.logical_distance_to_root() + 1 <= max_ref_chain_depth, "invariant"); 1.315 +}