Wed, 09 Oct 2019 16:11:58 +0800
8214542: JFR: Old Object Sample event slow on a deep heap in debug builds
Reviewed-by: egahlin, rwestberg
1.1 --- a/src/share/vm/jfr/leakprofiler/chains/bfsClosure.cpp Fri Sep 27 13:23:32 2019 +0800 1.2 +++ b/src/share/vm/jfr/leakprofiler/chains/bfsClosure.cpp Wed Oct 09 16:11:58 2019 +0800 1.3 @@ -1,5 +1,5 @@ 1.4 /* 1.5 - * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved. 1.6 + * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved. 1.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 1.8 * 1.9 * This code is free software; you can redistribute it and/or modify it 1.10 @@ -97,7 +97,6 @@ 1.11 } 1.12 1.13 void BFSClosure::process() { 1.14 - 1.15 process_root_set(); 1.16 process_queue(); 1.17 } 1.18 @@ -136,7 +135,6 @@ 1.19 1.20 // if we are processinig initial root set, don't add to queue 1.21 if (_current_parent != NULL) { 1.22 - assert(_current_parent->distance_to_root() == _current_frontier_level, "invariant"); 1.23 _edge_queue->add(_current_parent, reference); 1.24 } 1.25 1.26 @@ -149,20 +147,8 @@ 1.27 void BFSClosure::add_chain(const oop* reference, const oop pointee) { 1.28 assert(pointee != NULL, "invariant"); 1.29 assert(NULL == pointee->mark(), "invariant"); 1.30 - 1.31 - const size_t length = _current_parent == NULL ? 1 : _current_parent->distance_to_root() + 2; 1.32 - ResourceMark rm; 1.33 - Edge* const chain = NEW_RESOURCE_ARRAY(Edge, length); 1.34 - size_t idx = 0; 1.35 - chain[idx++] = Edge(NULL, reference); 1.36 - // aggregate from breadth-first search 1.37 - const Edge* current = _current_parent; 1.38 - while (current != NULL) { 1.39 - chain[idx++] = Edge(NULL, current->reference()); 1.40 - current = current->parent(); 1.41 - } 1.42 - assert(length == idx, "invariant"); 1.43 - _edge_store->add_chain(chain, length); 1.44 + Edge leak_edge(_current_parent, reference); 1.45 + _edge_store->put_chain(&leak_edge, _current_parent == NULL ? 1 : _current_frontier_level + 2); 1.46 } 1.47 1.48 void BFSClosure::dfs_fallback() { 1.49 @@ -239,3 +225,10 @@ 1.50 closure_impl(UnifiedOop::encode(ref), pointee); 1.51 } 1.52 } 1.53 + 1.54 +void BFSClosure::do_root(const oop* ref) { 1.55 + assert(ref != NULL, "invariant"); 1.56 + if (!_edge_queue->is_full()) { 1.57 + _edge_queue->add(NULL, ref); 1.58 + } 1.59 +}
2.1 --- a/src/share/vm/jfr/leakprofiler/chains/bfsClosure.hpp Fri Sep 27 13:23:32 2019 +0800 2.2 +++ b/src/share/vm/jfr/leakprofiler/chains/bfsClosure.hpp Wed Oct 09 16:11:58 2019 +0800 2.3 @@ -26,7 +26,6 @@ 2.4 #define SHARE_VM_JFR_LEAKPROFILER_CHAINS_BFSCLOSURE_HPP 2.5 2.6 #include "memory/iterator.hpp" 2.7 -#include "oops/oop.hpp" 2.8 2.9 class BitSet; 2.10 class Edge; 2.11 @@ -65,6 +64,7 @@ 2.12 public: 2.13 BFSClosure(EdgeQueue* edge_queue, EdgeStore* edge_store, BitSet* mark_bits); 2.14 void process(); 2.15 + void do_root(const oop* ref); 2.16 2.17 virtual void do_oop(oop* ref); 2.18 virtual void do_oop(narrowOop* ref);
3.1 --- a/src/share/vm/jfr/leakprofiler/chains/bitset.hpp Fri Sep 27 13:23:32 2019 +0800 3.2 +++ b/src/share/vm/jfr/leakprofiler/chains/bitset.hpp Wed Oct 09 16:11:58 2019 +0800 3.3 @@ -47,7 +47,7 @@ 3.4 3.5 BitMap::idx_t mark_obj(const HeapWord* addr) { 3.6 const BitMap::idx_t bit = addr_to_bit(addr); 3.7 - _bits.par_set_bit(bit); 3.8 + _bits.set_bit(bit); 3.9 return bit; 3.10 } 3.11
4.1 --- a/src/share/vm/jfr/leakprofiler/chains/dfsClosure.cpp Fri Sep 27 13:23:32 2019 +0800 4.2 +++ b/src/share/vm/jfr/leakprofiler/chains/dfsClosure.cpp Wed Oct 09 16:11:58 2019 +0800 4.3 @@ -1,5 +1,5 @@ 4.4 /* 4.5 - * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved. 4.6 + * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved. 4.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4.8 * 4.9 * This code is free software; you can redistribute it and/or modify it 4.10 @@ -23,14 +23,14 @@ 4.11 */ 4.12 4.13 #include "precompiled.hpp" 4.14 +#include "jfr/leakprofiler/chains/bitset.hpp" 4.15 #include "jfr/leakprofiler/chains/dfsClosure.hpp" 4.16 #include "jfr/leakprofiler/chains/edge.hpp" 4.17 #include "jfr/leakprofiler/chains/edgeStore.hpp" 4.18 +#include "jfr/leakprofiler/chains/rootSetClosure.hpp" 4.19 #include "jfr/leakprofiler/utilities/granularTimer.hpp" 4.20 -#include "jfr/leakprofiler/chains/bitset.hpp" 4.21 +#include "jfr/leakprofiler/utilities/rootType.hpp" 4.22 #include "jfr/leakprofiler/utilities/unifiedOop.hpp" 4.23 -#include "jfr/leakprofiler/utilities/rootType.hpp" 4.24 -#include "jfr/leakprofiler/chains/rootSetClosure.hpp" 4.25 #include "memory/iterator.inline.hpp" 4.26 #include "memory/resourceArea.hpp" 4.27 #include "oops/oop.inline.hpp" 4.28 @@ -87,15 +87,15 @@ 4.29 // Mark root set, to avoid going sideways 4.30 _max_depth = 1; 4.31 _ignore_root_set = false; 4.32 - DFSClosure dfs1; 4.33 - RootSetClosure::process_roots(&dfs1); 4.34 + DFSClosure dfs; 4.35 + RootSetClosure<DFSClosure> rs(&dfs); 4.36 + rs.process(); 4.37 4.38 // Depth-first search 4.39 _max_depth = max_dfs_depth; 4.40 _ignore_root_set = true; 4.41 assert(_start_edge == NULL, "invariant"); 4.42 - DFSClosure dfs2; 4.43 - RootSetClosure::process_roots(&dfs2); 4.44 + rs.process(); 4.45 } 4.46 4.47 void DFSClosure::closure_impl(const oop* reference, const oop pointee) { 4.48 @@ -132,30 +132,29 @@ 4.49 } 4.50 4.51 void DFSClosure::add_chain() { 4.52 - const size_t length = _start_edge == NULL ? _depth + 1 : 4.53 - _start_edge->distance_to_root() + 1 + _depth + 1; 4.54 + const size_t array_length = _depth + 2; 4.55 4.56 ResourceMark rm; 4.57 - Edge* const chain = NEW_RESOURCE_ARRAY(Edge, length); 4.58 + Edge* const chain = NEW_RESOURCE_ARRAY(Edge, array_length); 4.59 size_t idx = 0; 4.60 4.61 // aggregate from depth-first search 4.62 const DFSClosure* c = this; 4.63 while (c != NULL) { 4.64 - chain[idx++] = Edge(NULL, c->reference()); 4.65 + const size_t next = idx + 1; 4.66 + chain[idx++] = Edge(&chain[next], c->reference()); 4.67 c = c->parent(); 4.68 } 4.69 - 4.70 - assert(idx == _depth + 1, "invariant"); 4.71 + assert(_depth + 1 == idx, "invariant"); 4.72 + assert(array_length == idx + 1, "invariant"); 4.73 4.74 // aggregate from breadth-first search 4.75 - const Edge* current = _start_edge; 4.76 - while (current != NULL) { 4.77 - chain[idx++] = Edge(NULL, current->reference()); 4.78 - current = current->parent(); 4.79 + if (_start_edge != NULL) { 4.80 + chain[idx++] = *_start_edge; 4.81 + } else { 4.82 + chain[idx - 1] = Edge(NULL, chain[idx - 1].reference()); 4.83 } 4.84 - assert(idx == length, "invariant"); 4.85 - _edge_store->add_chain(chain, length); 4.86 + _edge_store->put_chain(chain, idx + (_start_edge != NULL ? _start_edge->distance_to_root() : 0)); 4.87 } 4.88 4.89 void DFSClosure::do_oop(oop* ref) { 4.90 @@ -175,3 +174,10 @@ 4.91 closure_impl(UnifiedOop::encode(ref), pointee); 4.92 } 4.93 } 4.94 + 4.95 +void DFSClosure::do_root(const oop* ref) { 4.96 + assert(ref != NULL, "invariant"); 4.97 + const oop pointee = UnifiedOop::dereference(ref); 4.98 + assert(pointee != NULL, "invariant"); 4.99 + closure_impl(ref, pointee); 4.100 +}
5.1 --- a/src/share/vm/jfr/leakprofiler/chains/dfsClosure.hpp Fri Sep 27 13:23:32 2019 +0800 5.2 +++ b/src/share/vm/jfr/leakprofiler/chains/dfsClosure.hpp Wed Oct 09 16:11:58 2019 +0800 5.3 @@ -26,7 +26,6 @@ 5.4 #define SHARE_VM_JFR_LEAKPROFILER_CHAINS_DFSCLOSURE_HPP 5.5 5.6 #include "memory/iterator.hpp" 5.7 -#include "oops/oop.hpp" 5.8 5.9 class BitSet; 5.10 class Edge; 5.11 @@ -34,7 +33,7 @@ 5.12 class EdgeQueue; 5.13 5.14 // Class responsible for iterating the heap depth-first 5.15 -class DFSClosure: public ExtendedOopClosure { // XXX BasicOopIterateClosure 5.16 +class DFSClosure : public ExtendedOopClosure { // XXX BasicOopIterateClosure 5.17 private: 5.18 static EdgeStore* _edge_store; 5.19 static BitSet* _mark_bits; 5.20 @@ -57,6 +56,7 @@ 5.21 public: 5.22 static void find_leaks_from_edge(EdgeStore* edge_store, BitSet* mark_bits, const Edge* start_edge); 5.23 static void find_leaks_from_root_set(EdgeStore* edge_store, BitSet* mark_bits); 5.24 + void do_root(const oop* ref); 5.25 5.26 virtual void do_oop(oop* ref); 5.27 virtual void do_oop(narrowOop* ref);
6.1 --- a/src/share/vm/jfr/leakprofiler/chains/edge.hpp Fri Sep 27 13:23:32 2019 +0800 6.2 +++ b/src/share/vm/jfr/leakprofiler/chains/edge.hpp Wed Oct 09 16:11:58 2019 +0800 6.3 @@ -29,7 +29,7 @@ 6.4 #include "oops/oopsHierarchy.hpp" 6.5 6.6 class Edge { 6.7 - private: 6.8 + protected: 6.9 const Edge* _parent; 6.10 const oop* _reference; 6.11 public:
7.1 --- a/src/share/vm/jfr/leakprofiler/chains/edgeStore.cpp Fri Sep 27 13:23:32 2019 +0800 7.2 +++ b/src/share/vm/jfr/leakprofiler/chains/edgeStore.cpp Wed Oct 09 16:11:58 2019 +0800 7.3 @@ -1,5 +1,5 @@ 7.4 /* 7.5 - * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved. 7.6 + * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved. 7.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 7.8 * 7.9 * This code is free software; you can redistribute it and/or modify it 7.10 @@ -27,37 +27,17 @@ 7.11 #include "jfr/leakprofiler/chains/edgeUtils.hpp" 7.12 #include "oops/oop.inline.hpp" 7.13 7.14 -RoutableEdge::RoutableEdge() : Edge() {} 7.15 -RoutableEdge::RoutableEdge(const Edge* parent, const oop* reference) : Edge(parent, reference), 7.16 - _skip_edge(NULL), 7.17 - _skip_length(0), 7.18 - _processed(false) {} 7.19 +StoredEdge::StoredEdge() : Edge() {} 7.20 +StoredEdge::StoredEdge(const Edge* parent, const oop* reference) : Edge(parent, reference), _gc_root_id(0), _skip_length(0) {} 7.21 7.22 -RoutableEdge::RoutableEdge(const Edge& edge) : Edge(edge), 7.23 - _skip_edge(NULL), 7.24 - _skip_length(0), 7.25 - _processed(false) {} 7.26 +StoredEdge::StoredEdge(const Edge& edge) : Edge(edge), _gc_root_id(0), _skip_length(0) {} 7.27 7.28 -RoutableEdge::RoutableEdge(const RoutableEdge& edge) : Edge(edge), 7.29 - _skip_edge(edge._skip_edge), 7.30 - _skip_length(edge._skip_length), 7.31 - _processed(edge._processed) {} 7.32 +StoredEdge::StoredEdge(const StoredEdge& edge) : Edge(edge), _gc_root_id(edge._gc_root_id), _skip_length(edge._skip_length) {} 7.33 7.34 -void RoutableEdge::operator=(const RoutableEdge& edge) { 7.35 +void StoredEdge::operator=(const StoredEdge& edge) { 7.36 Edge::operator=(edge); 7.37 - _skip_edge = edge._skip_edge; 7.38 + _gc_root_id = edge._gc_root_id; 7.39 _skip_length = edge._skip_length; 7.40 - _processed = edge._processed; 7.41 -} 7.42 - 7.43 -size_t RoutableEdge::logical_distance_to_root() const { 7.44 - size_t depth = 0; 7.45 - const RoutableEdge* current = logical_parent(); 7.46 - while (current != NULL) { 7.47 - depth++; 7.48 - current = current->logical_parent(); 7.49 - } 7.50 - return depth; 7.51 } 7.52 7.53 traceid EdgeStore::_edge_id_counter = 0; 7.54 @@ -69,79 +49,12 @@ 7.55 EdgeStore::~EdgeStore() { 7.56 assert(_edges != NULL, "invariant"); 7.57 delete _edges; 7.58 - _edges = NULL; 7.59 -} 7.60 - 7.61 -const Edge* EdgeStore::get_edge(const Edge* edge) const { 7.62 - assert(edge != NULL, "invariant"); 7.63 - EdgeEntry* const entry = _edges->lookup_only(*edge, (uintptr_t)edge->reference()); 7.64 - return entry != NULL ? entry->literal_addr() : NULL; 7.65 -} 7.66 - 7.67 -const Edge* EdgeStore::put(const Edge* edge) { 7.68 - assert(edge != NULL, "invariant"); 7.69 - const RoutableEdge e = *edge; 7.70 - assert(NULL == _edges->lookup_only(e, (uintptr_t)e.reference()), "invariant"); 7.71 - EdgeEntry& entry = _edges->put(e, (uintptr_t)e.reference()); 7.72 - return entry.literal_addr(); 7.73 -} 7.74 - 7.75 -traceid EdgeStore::get_id(const Edge* edge) const { 7.76 - assert(edge != NULL, "invariant"); 7.77 - EdgeEntry* const entry = _edges->lookup_only(*edge, (uintptr_t)edge->reference()); 7.78 - assert(entry != NULL, "invariant"); 7.79 - return entry->id(); 7.80 -} 7.81 - 7.82 -traceid EdgeStore::get_root_id(const Edge* edge) const { 7.83 - assert(edge != NULL, "invariant"); 7.84 - const Edge* root = EdgeUtils::root(*edge); 7.85 - assert(root != NULL, "invariant"); 7.86 - return get_id(root); 7.87 -} 7.88 - 7.89 -void EdgeStore::add_chain(const Edge* chain, size_t length) { 7.90 - assert(chain != NULL, "invariant"); 7.91 - assert(length > 0, "invariant"); 7.92 - 7.93 - size_t bottom_index = length - 1; 7.94 - const size_t top_index = 0; 7.95 - 7.96 - const Edge* stored_parent_edge = NULL; 7.97 - 7.98 - // determine level of shared ancestry 7.99 - for (; bottom_index > top_index; --bottom_index) { 7.100 - const Edge* stored_edge = get_edge(&chain[bottom_index]); 7.101 - if (stored_edge != NULL) { 7.102 - stored_parent_edge = stored_edge; 7.103 - continue; 7.104 - } 7.105 - break; 7.106 - } 7.107 - 7.108 - // insertion of new Edges 7.109 - for (int i = (int)bottom_index; i >= (int)top_index; --i) { 7.110 - Edge edge(stored_parent_edge, chain[i].reference()); 7.111 - stored_parent_edge = put(&edge); 7.112 - } 7.113 - 7.114 - const oop sample_object = stored_parent_edge->pointee(); 7.115 - assert(sample_object != NULL, "invariant"); 7.116 - assert(NULL == sample_object->mark(), "invariant"); 7.117 - 7.118 - // Install the "top" edge of the chain into the sample object mark oop. 7.119 - // This associates the sample object with its navigable reference chain. 7.120 - sample_object->set_mark(markOop(stored_parent_edge)); 7.121 } 7.122 7.123 bool EdgeStore::is_empty() const { 7.124 return !_edges->has_entries(); 7.125 } 7.126 7.127 -size_t EdgeStore::number_of_entries() const { 7.128 - return _edges->cardinality(); 7.129 -} 7.130 - 7.131 void EdgeStore::assign_id(EdgeEntry* entry) { 7.132 assert(entry != NULL, "invariant"); 7.133 assert(entry->id() == 0, "invariant"); 7.134 @@ -153,3 +66,254 @@ 7.135 assert(entry->hash() == hash, "invariant"); 7.136 return true; 7.137 } 7.138 + 7.139 +#ifdef ASSERT 7.140 +bool EdgeStore::contains(const oop* reference) const { 7.141 + return get(reference) != NULL; 7.142 +} 7.143 +#endif 7.144 + 7.145 +StoredEdge* EdgeStore::get(const oop* reference) const { 7.146 + assert(reference != NULL, "invariant"); 7.147 + const StoredEdge e(NULL, reference); 7.148 + EdgeEntry* const entry = _edges->lookup_only(e, (uintptr_t)reference); 7.149 + return entry != NULL ? entry->literal_addr() : NULL; 7.150 +} 7.151 + 7.152 +StoredEdge* EdgeStore::put(const oop* reference) { 7.153 + assert(reference != NULL, "invariant"); 7.154 + const StoredEdge e(NULL, reference); 7.155 + assert(NULL == _edges->lookup_only(e, (uintptr_t)reference), "invariant"); 7.156 + EdgeEntry& entry = _edges->put(e, (uintptr_t)reference); 7.157 + return entry.literal_addr(); 7.158 +} 7.159 + 7.160 +traceid EdgeStore::get_id(const Edge* edge) const { 7.161 + assert(edge != NULL, "invariant"); 7.162 + EdgeEntry* const entry = _edges->lookup_only(*edge, (uintptr_t)edge->reference()); 7.163 + assert(entry != NULL, "invariant"); 7.164 + return entry->id(); 7.165 +} 7.166 + 7.167 +traceid EdgeStore::gc_root_id(const Edge* edge) const { 7.168 + assert(edge != NULL, "invariant"); 7.169 + const traceid gc_root_id = static_cast<const StoredEdge*>(edge)->gc_root_id(); 7.170 + if (gc_root_id != 0) { 7.171 + return gc_root_id; 7.172 + } 7.173 + // not cached 7.174 + assert(edge != NULL, "invariant"); 7.175 + const Edge* const root = EdgeUtils::root(*edge); 7.176 + assert(root != NULL, "invariant"); 7.177 + assert(root->parent() == NULL, "invariant"); 7.178 + return get_id(root); 7.179 +} 7.180 + 7.181 +static const Edge* get_skip_ancestor(const Edge** current, size_t distance_to_root, size_t* skip_length) { 7.182 + assert(distance_to_root >= EdgeUtils::root_context, "invariant"); 7.183 + assert(*skip_length == 0, "invariant"); 7.184 + *skip_length = distance_to_root - (EdgeUtils::root_context - 1); 7.185 + const Edge* const target = EdgeUtils::ancestor(**current, *skip_length); 7.186 + assert(target != NULL, "invariant"); 7.187 + assert(target->distance_to_root() + 1 == EdgeUtils::root_context, "invariant"); 7.188 + return target; 7.189 +} 7.190 + 7.191 +bool EdgeStore::put_skip_edge(StoredEdge** previous, const Edge** current, size_t distance_to_root) { 7.192 + assert(*previous != NULL, "invariant"); 7.193 + assert((*previous)->parent() == NULL, "invariant"); 7.194 + assert(*current != NULL, "invariant"); 7.195 + assert((*current)->distance_to_root() == distance_to_root, "invariant"); 7.196 + 7.197 + if (distance_to_root < EdgeUtils::root_context) { 7.198 + // nothing to skip 7.199 + return false; 7.200 + } 7.201 + 7.202 + size_t skip_length = 0; 7.203 + const Edge* const skip_ancestor = get_skip_ancestor(current, distance_to_root, &skip_length); 7.204 + assert(skip_ancestor != NULL, "invariant"); 7.205 + (*previous)->set_skip_length(skip_length); 7.206 + 7.207 + // lookup target 7.208 + StoredEdge* stored_target = get(skip_ancestor->reference()); 7.209 + if (stored_target != NULL) { 7.210 + (*previous)->set_parent(stored_target); 7.211 + // linked to existing, complete 7.212 + return true; 7.213 + } 7.214 + 7.215 + assert(stored_target == NULL, "invariant"); 7.216 + stored_target = put(skip_ancestor->reference()); 7.217 + assert(stored_target != NULL, "invariant"); 7.218 + (*previous)->set_parent(stored_target); 7.219 + *previous = stored_target; 7.220 + *current = skip_ancestor->parent(); 7.221 + return false; 7.222 +} 7.223 + 7.224 +static void link_edge(const StoredEdge* current_stored, StoredEdge** previous) { 7.225 + assert(current_stored != NULL, "invariant"); 7.226 + assert(*previous != NULL, "invariant"); 7.227 + assert((*previous)->parent() == NULL, "invariant"); 7.228 + (*previous)->set_parent(current_stored); 7.229 +} 7.230 + 7.231 +static const StoredEdge* find_closest_skip_edge(const StoredEdge* edge, size_t* distance) { 7.232 + assert(edge != NULL, "invariant"); 7.233 + assert(distance != NULL, "invariant"); 7.234 + const StoredEdge* current = edge; 7.235 + *distance = 1; 7.236 + while (current != NULL && !current->is_skip_edge()) { 7.237 + ++(*distance); 7.238 + current = current->parent(); 7.239 + } 7.240 + return current; 7.241 +} 7.242 + 7.243 +void EdgeStore::link_with_existing_chain(const StoredEdge* current_stored, StoredEdge** previous, size_t previous_length) { 7.244 + assert(current_stored != NULL, "invariant"); 7.245 + assert((*previous)->parent() == NULL, "invariant"); 7.246 + size_t distance_to_skip_edge; // including the skip edge itself 7.247 + const StoredEdge* const closest_skip_edge = find_closest_skip_edge(current_stored, &distance_to_skip_edge); 7.248 + if (closest_skip_edge == NULL) { 7.249 + // no found skip edge implies root 7.250 + if (distance_to_skip_edge + previous_length <= EdgeUtils::max_ref_chain_depth) { 7.251 + link_edge(current_stored, previous); 7.252 + return; 7.253 + } 7.254 + assert(current_stored->distance_to_root() == distance_to_skip_edge - 2, "invariant"); 7.255 + put_skip_edge(previous, reinterpret_cast<const Edge**>(¤t_stored), distance_to_skip_edge - 2); 7.256 + return; 7.257 + } 7.258 + assert(closest_skip_edge->is_skip_edge(), "invariant"); 7.259 + if (distance_to_skip_edge + previous_length <= EdgeUtils::leak_context) { 7.260 + link_edge(current_stored, previous); 7.261 + return; 7.262 + } 7.263 + // create a new skip edge with derived information from closest skip edge 7.264 + (*previous)->set_skip_length(distance_to_skip_edge + closest_skip_edge->skip_length()); 7.265 + (*previous)->set_parent(closest_skip_edge->parent()); 7.266 +} 7.267 + 7.268 +StoredEdge* EdgeStore::link_new_edge(StoredEdge** previous, const Edge** current) { 7.269 + assert(*previous != NULL, "invariant"); 7.270 + assert((*previous)->parent() == NULL, "invariant"); 7.271 + assert(*current != NULL, "invariant"); 7.272 + assert(!contains((*current)->reference()), "invariant"); 7.273 + StoredEdge* const stored_edge = put((*current)->reference()); 7.274 + assert(stored_edge != NULL, "invariant"); 7.275 + link_edge(stored_edge, previous); 7.276 + return stored_edge; 7.277 +} 7.278 + 7.279 +bool EdgeStore::put_edges(StoredEdge** previous, const Edge** current, size_t limit) { 7.280 + assert(*previous != NULL, "invariant"); 7.281 + assert(*current != NULL, "invariant"); 7.282 + size_t depth = 1; 7.283 + while (*current != NULL && depth < limit) { 7.284 + StoredEdge* stored_edge = get((*current)->reference()); 7.285 + if (stored_edge != NULL) { 7.286 + link_with_existing_chain(stored_edge, previous, depth); 7.287 + return true; 7.288 + } 7.289 + stored_edge = link_new_edge(previous, current); 7.290 + assert((*previous)->parent() != NULL, "invariant"); 7.291 + *previous = stored_edge; 7.292 + *current = (*current)->parent(); 7.293 + ++depth; 7.294 + } 7.295 + return NULL == *current; 7.296 +} 7.297 + 7.298 +// Install the immediate edge into the mark word of the leak candidate object 7.299 +StoredEdge* EdgeStore::associate_leak_context_with_candidate(const Edge* edge) { 7.300 + assert(edge != NULL, "invariant"); 7.301 + assert(!contains(edge->reference()), "invariant"); 7.302 + StoredEdge* const leak_context_edge = put(edge->reference()); 7.303 + oop sample_object = edge->pointee(); 7.304 + assert(sample_object != NULL, "invariant"); 7.305 + assert(NULL == sample_object->mark(), "invariant"); 7.306 + sample_object->set_mark(markOop(leak_context_edge)); 7.307 + return leak_context_edge; 7.308 +} 7.309 + 7.310 +/* 7.311 + * The purpose of put_chain() is to reify the edge sequence 7.312 + * discovered during heap traversal with a normalized logical copy. 7.313 + * This copy consist of two sub-sequences and a connecting link (skip edge). 7.314 + * 7.315 + * "current" can be thought of as the cursor (search) edge, it is not in the edge store. 7.316 + * "previous" is always an edge in the edge store. 7.317 + * The leak context edge is the edge adjacent to the leak candidate object, always an edge in the edge store. 7.318 + */ 7.319 +void EdgeStore::put_chain(const Edge* chain, size_t length) { 7.320 + assert(chain != NULL, "invariant"); 7.321 + assert(chain->distance_to_root() + 1 == length, "invariant"); 7.322 + StoredEdge* const leak_context_edge = associate_leak_context_with_candidate(chain); 7.323 + assert(leak_context_edge != NULL, "invariant"); 7.324 + assert(leak_context_edge->parent() == NULL, "invariant"); 7.325 + 7.326 + if (1 == length) { 7.327 + return; 7.328 + } 7.329 + 7.330 + const Edge* current = chain->parent(); 7.331 + assert(current != NULL, "invariant"); 7.332 + StoredEdge* previous = leak_context_edge; 7.333 + 7.334 + // a leak context is the sequence of (limited) edges reachable from the leak candidate 7.335 + if (put_edges(&previous, ¤t, EdgeUtils::leak_context)) { 7.336 + // complete 7.337 + assert(previous != NULL, "invariant"); 7.338 + put_chain_epilogue(leak_context_edge, EdgeUtils::root(*previous)); 7.339 + return; 7.340 + } 7.341 + 7.342 + const size_t distance_to_root = length > EdgeUtils::leak_context ? length - 1 - EdgeUtils::leak_context : length - 1; 7.343 + assert(current->distance_to_root() == distance_to_root, "invariant"); 7.344 + 7.345 + // a skip edge is the logical link 7.346 + // connecting the leak context sequence with the root context sequence 7.347 + if (put_skip_edge(&previous, ¤t, distance_to_root)) { 7.348 + // complete 7.349 + assert(previous != NULL, "invariant"); 7.350 + assert(previous->is_skip_edge(), "invariant"); 7.351 + assert(previous->parent() != NULL, "invariant"); 7.352 + put_chain_epilogue(leak_context_edge, EdgeUtils::root(*previous->parent())); 7.353 + return; 7.354 + } 7.355 + 7.356 + assert(current->distance_to_root() < EdgeUtils::root_context, "invariant"); 7.357 + 7.358 + // a root context is the sequence of (limited) edges reachable from the root 7.359 + put_edges(&previous, ¤t, EdgeUtils::root_context); 7.360 + assert(previous != NULL, "invariant"); 7.361 + put_chain_epilogue(leak_context_edge, EdgeUtils::root(*previous)); 7.362 +} 7.363 + 7.364 +void EdgeStore::put_chain_epilogue(StoredEdge* leak_context_edge, const Edge* root) const { 7.365 + assert(leak_context_edge != NULL, "invariant"); 7.366 + assert(root != NULL, "invariant"); 7.367 + store_gc_root_id_in_leak_context_edge(leak_context_edge, root); 7.368 + assert(leak_context_edge->distance_to_root() + 1 <= EdgeUtils::max_ref_chain_depth, "invariant"); 7.369 +} 7.370 + 7.371 +// To avoid another traversal to resolve the root edge id later, 7.372 +// cache it in the immediate leak context edge for fast retrieval. 7.373 +void EdgeStore::store_gc_root_id_in_leak_context_edge(StoredEdge* leak_context_edge, const Edge* root) const { 7.374 + assert(leak_context_edge != NULL, "invariant"); 7.375 + assert(leak_context_edge->gc_root_id() == 0, "invariant"); 7.376 + assert(root != NULL, "invariant"); 7.377 + assert(root->parent() == NULL, "invariant"); 7.378 + assert(root->distance_to_root() == 0, "invariant"); 7.379 + const StoredEdge* const stored_root = static_cast<const StoredEdge*>(root); 7.380 + traceid root_id = stored_root->gc_root_id(); 7.381 + if (root_id == 0) { 7.382 + root_id = get_id(root); 7.383 + stored_root->set_gc_root_id(root_id); 7.384 + } 7.385 + assert(root_id != 0, "invariant"); 7.386 + leak_context_edge->set_gc_root_id(root_id); 7.387 + assert(leak_context_edge->gc_root_id() == stored_root->gc_root_id(), "invariant"); 7.388 +}
8.1 --- a/src/share/vm/jfr/leakprofiler/chains/edgeStore.hpp Fri Sep 27 13:23:32 2019 +0800 8.2 +++ b/src/share/vm/jfr/leakprofiler/chains/edgeStore.hpp Wed Oct 09 16:11:58 2019 +0800 8.3 @@ -25,64 +25,40 @@ 8.4 #ifndef SHARE_VM_LEAKPROFILER_CHAINS_EDGESTORE_HPP 8.5 #define SHARE_VM_LEAKPROFILER_CHAINS_EDGESTORE_HPP 8.6 8.7 +#include "jfr/leakprofiler/chains/edge.hpp" 8.8 #include "jfr/utilities/jfrHashtable.hpp" 8.9 -#include "jfr/leakprofiler/chains/edge.hpp" 8.10 #include "memory/allocation.hpp" 8.11 8.12 typedef u8 traceid; 8.13 8.14 -class RoutableEdge : public Edge { 8.15 +class StoredEdge : public Edge { 8.16 private: 8.17 - mutable const RoutableEdge* _skip_edge; 8.18 - mutable size_t _skip_length; 8.19 - mutable bool _processed; 8.20 + mutable traceid _gc_root_id; 8.21 + size_t _skip_length; 8.22 8.23 public: 8.24 - RoutableEdge(); 8.25 - RoutableEdge(const Edge* parent, const oop* reference); 8.26 - RoutableEdge(const Edge& edge); 8.27 - RoutableEdge(const RoutableEdge& edge); 8.28 - void operator=(const RoutableEdge& edge); 8.29 + StoredEdge(); 8.30 + StoredEdge(const Edge* parent, const oop* reference); 8.31 + StoredEdge(const Edge& edge); 8.32 + StoredEdge(const StoredEdge& edge); 8.33 + void operator=(const StoredEdge& edge); 8.34 8.35 - const RoutableEdge* skip_edge() const { return _skip_edge; } 8.36 + traceid gc_root_id() const { return _gc_root_id; } 8.37 + void set_gc_root_id(traceid root_id) const { _gc_root_id = root_id; } 8.38 + 8.39 + bool is_skip_edge() const { return _skip_length != 0; } 8.40 size_t skip_length() const { return _skip_length; } 8.41 + void set_skip_length(size_t length) { _skip_length = length; } 8.42 8.43 - bool is_skip_edge() const { return _skip_edge != NULL; } 8.44 - bool processed() const { return _processed; } 8.45 - bool is_sentinel() const { 8.46 - return _skip_edge == NULL && _skip_length == 1; 8.47 + void set_parent(const Edge* edge) { this->_parent = edge; } 8.48 + 8.49 + StoredEdge* parent() const { 8.50 + return const_cast<StoredEdge*>(static_cast<const StoredEdge*>(Edge::parent())); 8.51 } 8.52 - 8.53 - void set_skip_edge(const RoutableEdge* edge) const { 8.54 - assert(!is_skip_edge(), "invariant"); 8.55 - assert(edge != this, "invariant"); 8.56 - _skip_edge = edge; 8.57 - } 8.58 - 8.59 - void set_skip_length(size_t length) const { 8.60 - _skip_length = length; 8.61 - } 8.62 - 8.63 - void set_processed() const { 8.64 - assert(!_processed, "invariant"); 8.65 - _processed = true; 8.66 - } 8.67 - 8.68 - // true navigation according to physical tree representation 8.69 - const RoutableEdge* physical_parent() const { 8.70 - return static_cast<const RoutableEdge*>(parent()); 8.71 - } 8.72 - 8.73 - // logical navigation taking skip levels into account 8.74 - const RoutableEdge* logical_parent() const { 8.75 - return is_skip_edge() ? skip_edge() : physical_parent(); 8.76 - } 8.77 - 8.78 - size_t logical_distance_to_root() const; 8.79 }; 8.80 8.81 class EdgeStore : public CHeapObj<mtTracing> { 8.82 - typedef HashTableHost<RoutableEdge, traceid, Entry, EdgeStore> EdgeHashTable; 8.83 + typedef HashTableHost<StoredEdge, traceid, Entry, EdgeStore> EdgeHashTable; 8.84 typedef EdgeHashTable::HashEntry EdgeEntry; 8.85 template <typename, 8.86 typename, 8.87 @@ -90,6 +66,9 @@ 8.88 typename, 8.89 size_t> 8.90 friend class HashTableHost; 8.91 + friend class EventEmitter; 8.92 + friend class ObjectSampleWriter; 8.93 + friend class ObjectSampleCheckpoint; 8.94 private: 8.95 static traceid _edge_id_counter; 8.96 EdgeHashTable* _edges; 8.97 @@ -98,22 +77,31 @@ 8.98 void assign_id(EdgeEntry* entry); 8.99 bool equals(const Edge& query, uintptr_t hash, const EdgeEntry* entry); 8.100 8.101 - const Edge* get_edge(const Edge* edge) const; 8.102 - const Edge* put(const Edge* edge); 8.103 + StoredEdge* get(const oop* reference) const; 8.104 + StoredEdge* put(const oop* reference); 8.105 + traceid gc_root_id(const Edge* edge) const; 8.106 + 8.107 + bool put_edges(StoredEdge** previous, const Edge** current, size_t length); 8.108 + bool put_skip_edge(StoredEdge** previous, const Edge** current, size_t distance_to_root); 8.109 + void put_chain_epilogue(StoredEdge* leak_context_edge, const Edge* root) const; 8.110 + 8.111 + StoredEdge* associate_leak_context_with_candidate(const Edge* edge); 8.112 + void store_gc_root_id_in_leak_context_edge(StoredEdge* leak_context_edge, const Edge* root) const; 8.113 + StoredEdge* link_new_edge(StoredEdge** previous, const Edge** current); 8.114 + void link_with_existing_chain(const StoredEdge* current_stored, StoredEdge** previous, size_t previous_length); 8.115 + 8.116 + template <typename T> 8.117 + void iterate(T& functor) const { _edges->iterate_value<T>(functor); } 8.118 + 8.119 + DEBUG_ONLY(bool contains(const oop* reference) const;) 8.120 8.121 public: 8.122 EdgeStore(); 8.123 ~EdgeStore(); 8.124 8.125 - void add_chain(const Edge* chain, size_t length); 8.126 bool is_empty() const; 8.127 - size_t number_of_entries() const; 8.128 - 8.129 traceid get_id(const Edge* edge) const; 8.130 - traceid get_root_id(const Edge* edge) const; 8.131 - 8.132 - template <typename T> 8.133 - void iterate_edges(T& functor) const { _edges->iterate_value<T>(functor); } 8.134 + void put_chain(const Edge* chain, size_t length); 8.135 }; 8.136 8.137 #endif // SHARE_VM_LEAKPROFILER_CHAINS_EDGESTORE_HPP
9.1 --- a/src/share/vm/jfr/leakprofiler/chains/edgeUtils.cpp Fri Sep 27 13:23:32 2019 +0800 9.2 +++ b/src/share/vm/jfr/leakprofiler/chains/edgeUtils.cpp Wed Oct 09 16:11:58 2019 +0800 9.3 @@ -1,5 +1,5 @@ 9.4 /* 9.5 - * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved. 9.6 + * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved. 9.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 9.8 * 9.9 * This code is free software; you can redistribute it and/or modify it 9.10 @@ -38,11 +38,7 @@ 9.11 return (const Edge*)edge.pointee()->mark() == &edge; 9.12 } 9.13 9.14 -bool EdgeUtils::is_root(const Edge& edge) { 9.15 - return edge.is_root(); 9.16 -} 9.17 - 9.18 -static int field_offset(const Edge& edge) { 9.19 +static int field_offset(const StoredEdge& edge) { 9.20 assert(!edge.is_root(), "invariant"); 9.21 const oop ref_owner = edge.reference_owner(); 9.22 assert(ref_owner != NULL, "invariant"); 9.23 @@ -56,7 +52,7 @@ 9.24 return offset; 9.25 } 9.26 9.27 -static const InstanceKlass* field_type(const Edge& edge) { 9.28 +static const InstanceKlass* field_type(const StoredEdge& edge) { 9.29 assert(!edge.is_root() || !EdgeUtils::is_array_element(edge), "invariant"); 9.30 return (const InstanceKlass*)edge.reference_owner_klass(); 9.31 } 9.32 @@ -138,175 +134,18 @@ 9.33 current = parent; 9.34 parent = current->parent(); 9.35 } 9.36 + assert(current != NULL, "invariant"); 9.37 return current; 9.38 } 9.39 9.40 -// The number of references associated with the leak node; 9.41 -// can be viewed as the leak node "context". 9.42 -// Used to provide leak context for a "capped/skipped" reference chain. 9.43 -static const size_t leak_context = 100; 9.44 - 9.45 -// The number of references associated with the root node; 9.46 -// can be viewed as the root node "context". 9.47 -// Used to provide root context for a "capped/skipped" reference chain. 9.48 -static const size_t root_context = 100; 9.49 - 9.50 -// A limit on the reference chain depth to be serialized, 9.51 -static const size_t max_ref_chain_depth = leak_context + root_context; 9.52 - 9.53 -const RoutableEdge* skip_to(const RoutableEdge& edge, size_t skip_length) { 9.54 - const RoutableEdge* current = &edge; 9.55 - const RoutableEdge* parent = current->physical_parent(); 9.56 +const Edge* EdgeUtils::ancestor(const Edge& edge, size_t distance) { 9.57 + const Edge* current = &edge; 9.58 + const Edge* parent = current->parent(); 9.59 size_t seek = 0; 9.60 - while (parent != NULL && seek != skip_length) { 9.61 + while (parent != NULL && seek != distance) { 9.62 seek++; 9.63 current = parent; 9.64 - parent = parent->physical_parent(); 9.65 + parent = parent->parent(); 9.66 } 9.67 return current; 9.68 } 9.69 - 9.70 -#ifdef ASSERT 9.71 -static void validate_skip_target(const RoutableEdge* skip_target) { 9.72 - assert(skip_target != NULL, "invariant"); 9.73 - assert(skip_target->distance_to_root() + 1 == root_context, "invariant"); 9.74 - assert(skip_target->is_sentinel(), "invariant"); 9.75 -} 9.76 - 9.77 -static void validate_new_skip_edge(const RoutableEdge* new_skip_edge, const RoutableEdge* last_skip_edge, size_t adjustment) { 9.78 - assert(new_skip_edge != NULL, "invariant"); 9.79 - assert(new_skip_edge->is_skip_edge(), "invariant"); 9.80 - if (last_skip_edge != NULL) { 9.81 - const RoutableEdge* const target = skip_to(*new_skip_edge->logical_parent(), adjustment); 9.82 - validate_skip_target(target->logical_parent()); 9.83 - return; 9.84 - } 9.85 - assert(last_skip_edge == NULL, "invariant"); 9.86 - // only one level of logical indirection 9.87 - validate_skip_target(new_skip_edge->logical_parent()); 9.88 -} 9.89 -#endif // ASSERT 9.90 - 9.91 -static void install_logical_route(const RoutableEdge* new_skip_edge, size_t skip_target_distance) { 9.92 - assert(new_skip_edge != NULL, "invariant"); 9.93 - assert(!new_skip_edge->is_skip_edge(), "invariant"); 9.94 - assert(!new_skip_edge->processed(), "invariant"); 9.95 - const RoutableEdge* const skip_target = skip_to(*new_skip_edge, skip_target_distance); 9.96 - assert(skip_target != NULL, "invariant"); 9.97 - new_skip_edge->set_skip_edge(skip_target); 9.98 - new_skip_edge->set_skip_length(skip_target_distance); 9.99 - assert(new_skip_edge->is_skip_edge(), "invariant"); 9.100 - assert(new_skip_edge->logical_parent() == skip_target, "invariant"); 9.101 -} 9.102 - 9.103 -static const RoutableEdge* find_last_skip_edge(const RoutableEdge& edge, size_t& distance) { 9.104 - assert(distance == 0, "invariant"); 9.105 - const RoutableEdge* current = &edge; 9.106 - while (current != NULL) { 9.107 - if (current->is_skip_edge() && current->skip_edge()->is_sentinel()) { 9.108 - return current; 9.109 - } 9.110 - current = current->physical_parent(); 9.111 - ++distance; 9.112 - } 9.113 - return current; 9.114 -} 9.115 - 9.116 -static void collapse_overlapping_chain(const RoutableEdge& edge, 9.117 - const RoutableEdge* first_processed_edge, 9.118 - size_t first_processed_distance) { 9.119 - assert(first_processed_edge != NULL, "invariant"); 9.120 - // first_processed_edge is already processed / written 9.121 - assert(first_processed_edge->processed(), "invariant"); 9.122 - assert(first_processed_distance + 1 <= leak_context, "invariant"); 9.123 - 9.124 - // from this first processed edge, attempt to fetch the last skip edge 9.125 - size_t last_skip_edge_distance = 0; 9.126 - const RoutableEdge* const last_skip_edge = find_last_skip_edge(*first_processed_edge, last_skip_edge_distance); 9.127 - const size_t distance_discovered = first_processed_distance + last_skip_edge_distance + 1; 9.128 - 9.129 - if (distance_discovered <= leak_context || (last_skip_edge == NULL && distance_discovered <= max_ref_chain_depth)) { 9.130 - // complete chain can be accommodated without modification 9.131 - return; 9.132 - } 9.133 - 9.134 - // backtrack one edge from existing processed edge 9.135 - const RoutableEdge* const new_skip_edge = skip_to(edge, first_processed_distance - 1); 9.136 - assert(new_skip_edge != NULL, "invariant"); 9.137 - assert(!new_skip_edge->processed(), "invariant"); 9.138 - assert(new_skip_edge->parent() == first_processed_edge, "invariant"); 9.139 - 9.140 - size_t adjustment = 0; 9.141 - if (last_skip_edge != NULL) { 9.142 - assert(leak_context - 1 > first_processed_distance - 1, "invariant"); 9.143 - adjustment = leak_context - first_processed_distance - 1; 9.144 - assert(last_skip_edge_distance + 1 > adjustment, "invariant"); 9.145 - install_logical_route(new_skip_edge, last_skip_edge_distance + 1 - adjustment); 9.146 - } else { 9.147 - install_logical_route(new_skip_edge, last_skip_edge_distance + 1 - root_context); 9.148 - new_skip_edge->logical_parent()->set_skip_length(1); // sentinel 9.149 - } 9.150 - 9.151 - DEBUG_ONLY(validate_new_skip_edge(new_skip_edge, last_skip_edge, adjustment);) 9.152 -} 9.153 - 9.154 -static void collapse_non_overlapping_chain(const RoutableEdge& edge, 9.155 - const RoutableEdge* first_processed_edge, 9.156 - size_t first_processed_distance) { 9.157 - assert(first_processed_edge != NULL, "invariant"); 9.158 - assert(!first_processed_edge->processed(), "invariant"); 9.159 - // this implies that the first "processed" edge is the leak context relative "leaf" 9.160 - assert(first_processed_distance + 1 == leak_context, "invariant"); 9.161 - 9.162 - const size_t distance_to_root = edge.distance_to_root(); 9.163 - if (distance_to_root + 1 <= max_ref_chain_depth) { 9.164 - // complete chain can be accommodated without constructing a skip edge 9.165 - return; 9.166 - } 9.167 - 9.168 - install_logical_route(first_processed_edge, distance_to_root + 1 - first_processed_distance - root_context); 9.169 - first_processed_edge->logical_parent()->set_skip_length(1); // sentinel 9.170 - 9.171 - DEBUG_ONLY(validate_new_skip_edge(first_processed_edge, NULL, 0);) 9.172 -} 9.173 - 9.174 -static const RoutableEdge* processed_edge(const RoutableEdge& edge, size_t& distance) { 9.175 - assert(distance == 0, "invariant"); 9.176 - const RoutableEdge* current = &edge; 9.177 - while (current != NULL && distance < leak_context - 1) { 9.178 - if (current->processed()) { 9.179 - return current; 9.180 - } 9.181 - current = current->physical_parent(); 9.182 - ++distance; 9.183 - } 9.184 - assert(distance <= leak_context - 1, "invariant"); 9.185 - return current; 9.186 -} 9.187 - 9.188 -/* 9.189 - * Some vocabulary: 9.190 - * ----------- 9.191 - * "Context" is an interval in the chain, it is associcated with an edge and it signifies a number of connected edges. 9.192 - * "Processed / written" means an edge that has already been serialized. 9.193 - * "Skip edge" is an edge that contains additional information for logical routing purposes. 9.194 - * "Skip target" is an edge used as a destination for a skip edge 9.195 - */ 9.196 -void EdgeUtils::collapse_chain(const RoutableEdge& edge) { 9.197 - assert(is_leak_edge(edge), "invariant"); 9.198 - 9.199 - // attempt to locate an already processed edge inside current leak context (if any) 9.200 - size_t first_processed_distance = 0; 9.201 - const RoutableEdge* const first_processed_edge = processed_edge(edge, first_processed_distance); 9.202 - if (first_processed_edge == NULL) { 9.203 - return; 9.204 - } 9.205 - 9.206 - if (first_processed_edge->processed()) { 9.207 - collapse_overlapping_chain(edge, first_processed_edge, first_processed_distance); 9.208 - } else { 9.209 - collapse_non_overlapping_chain(edge, first_processed_edge, first_processed_distance); 9.210 - } 9.211 - 9.212 - assert(edge.logical_distance_to_root() + 1 <= max_ref_chain_depth, "invariant"); 9.213 -}
10.1 --- a/src/share/vm/jfr/leakprofiler/chains/edgeUtils.hpp Fri Sep 27 13:23:32 2019 +0800 10.2 +++ b/src/share/vm/jfr/leakprofiler/chains/edgeUtils.hpp Wed Oct 09 16:11:58 2019 +0800 10.3 @@ -28,15 +28,17 @@ 10.4 #include "memory/allocation.hpp" 10.5 10.6 class Edge; 10.7 -class RoutableEdge; 10.8 class Symbol; 10.9 10.10 class EdgeUtils : public AllStatic { 10.11 public: 10.12 + static const size_t leak_context = 100; 10.13 + static const size_t root_context = 100; 10.14 + static const size_t max_ref_chain_depth = leak_context + root_context; 10.15 + 10.16 static bool is_leak_edge(const Edge& edge); 10.17 - 10.18 static const Edge* root(const Edge& edge); 10.19 - static bool is_root(const Edge& edge); 10.20 + static const Edge* ancestor(const Edge& edge, size_t distance); 10.21 10.22 static bool is_array_element(const Edge& edge); 10.23 static int array_index(const Edge& edge); 10.24 @@ -44,8 +46,6 @@ 10.25 10.26 static const Symbol* field_name_symbol(const Edge& edge); 10.27 static jshort field_modifiers(const Edge& edge); 10.28 - 10.29 - static void collapse_chain(const RoutableEdge& edge); 10.30 }; 10.31 10.32 #endif // SHARE_VM_LEAKPROFILER_CHAINS_EDGEUTILS_HPP
11.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 11.2 +++ b/src/share/vm/jfr/leakprofiler/chains/pathToGcRootsOperation.cpp Wed Oct 09 16:11:58 2019 +0800 11.3 @@ -0,0 +1,131 @@ 11.4 +/* 11.5 + * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. 11.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 11.7 + * 11.8 + * This code is free software; you can redistribute it and/or modify it 11.9 + * under the terms of the GNU General Public License version 2 only, as 11.10 + * published by the Free Software Foundation. 11.11 + * 11.12 + * This code is distributed in the hope that it will be useful, but WITHOUT 11.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11.14 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 11.15 + * version 2 for more details (a copy is included in the LICENSE file that 11.16 + * accompanied this code). 11.17 + * 11.18 + * You should have received a copy of the GNU General Public License version 11.19 + * 2 along with this work; if not, write to the Free Software Foundation, 11.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 11.21 + * 11.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 11.23 + * or visit www.oracle.com if you need additional information or have any 11.24 + * questions. 11.25 + * 11.26 + */ 11.27 + 11.28 +#include "precompiled.hpp" 11.29 +#include "gc_interface/collectedHeap.hpp" 11.30 +#include "jfr/leakprofiler/leakProfiler.hpp" 11.31 +#include "jfr/leakprofiler/chains/bfsClosure.hpp" 11.32 +#include "jfr/leakprofiler/chains/bitset.hpp" 11.33 +#include "jfr/leakprofiler/chains/dfsClosure.hpp" 11.34 +#include "jfr/leakprofiler/chains/edge.hpp" 11.35 +#include "jfr/leakprofiler/chains/edgeQueue.hpp" 11.36 +#include "jfr/leakprofiler/chains/edgeStore.hpp" 11.37 +#include "jfr/leakprofiler/chains/objectSampleMarker.hpp" 11.38 +#include "jfr/leakprofiler/chains/rootSetClosure.hpp" 11.39 +#include "jfr/leakprofiler/chains/edgeStore.hpp" 11.40 +#include "jfr/leakprofiler/chains/objectSampleMarker.hpp" 11.41 +#include "jfr/leakprofiler/chains/pathToGcRootsOperation.hpp" 11.42 +#include "jfr/leakprofiler/checkpoint/eventEmitter.hpp" 11.43 +#include "jfr/leakprofiler/checkpoint/objectSampleCheckpoint.hpp" 11.44 +#include "jfr/leakprofiler/sampling/objectSample.hpp" 11.45 +#include "jfr/leakprofiler/sampling/objectSampler.hpp" 11.46 +#include "jfr/leakprofiler/utilities/granularTimer.hpp" 11.47 +#include "memory/universe.hpp" 11.48 +#include "oops/markOop.hpp" 11.49 +#include "oops/oop.inline.hpp" 11.50 +#include "runtime/safepoint.hpp" 11.51 +#include "utilities/globalDefinitions.hpp" 11.52 + 11.53 +PathToGcRootsOperation::PathToGcRootsOperation(ObjectSampler* sampler, EdgeStore* edge_store, int64_t cutoff, bool emit_all) : 11.54 + _sampler(sampler),_edge_store(edge_store), _cutoff_ticks(cutoff), _emit_all(emit_all) {} 11.55 + 11.56 +/* The EdgeQueue is backed by directly managed virtual memory. 11.57 + * We will attempt to dimension an initial reservation 11.58 + * in proportion to the size of the heap (represented by heap_region). 11.59 + * Initial memory reservation: 5% of the heap OR at least 32 Mb 11.60 + * Commit ratio: 1 : 10 (subject to allocation granularties) 11.61 + */ 11.62 +static size_t edge_queue_memory_reservation(const MemRegion& heap_region) { 11.63 + const size_t memory_reservation_bytes = MAX2(heap_region.byte_size() / 20, 32*M); 11.64 + assert(memory_reservation_bytes >= (size_t)32*M, "invariant"); 11.65 + return memory_reservation_bytes; 11.66 +} 11.67 + 11.68 +static size_t edge_queue_memory_commit_size(size_t memory_reservation_bytes) { 11.69 + const size_t memory_commit_block_size_bytes = memory_reservation_bytes / 10; 11.70 + assert(memory_commit_block_size_bytes >= (size_t)3*M, "invariant"); 11.71 + return memory_commit_block_size_bytes; 11.72 +} 11.73 + 11.74 +static void log_edge_queue_summary(const EdgeQueue& edge_queue) { 11.75 + if (LogJFR && Verbose) tty->print_cr("EdgeQueue reserved size total: " SIZE_FORMAT " [KB]", edge_queue.reserved_size() / K); 11.76 + if (LogJFR && Verbose) tty->print_cr("EdgeQueue edges total: " SIZE_FORMAT, edge_queue.top()); 11.77 + if (LogJFR && Verbose) tty->print_cr("EdgeQueue liveset total: " SIZE_FORMAT " [KB]", edge_queue.live_set() / K); 11.78 + if (edge_queue.reserved_size() > 0) { 11.79 + if (LogJFR && Verbose) tty->print_cr("EdgeQueue commit reserve ratio: %f\n", 11.80 + ((double)edge_queue.live_set() / (double)edge_queue.reserved_size())); 11.81 + } 11.82 +} 11.83 + 11.84 +void PathToGcRootsOperation::doit() { 11.85 + assert(SafepointSynchronize::is_at_safepoint(), "invariant"); 11.86 + assert(_cutoff_ticks > 0, "invariant"); 11.87 + 11.88 + // The bitset used for marking is dimensioned as a function of the heap size 11.89 + const MemRegion heap_region = Universe::heap()->reserved_region(); 11.90 + BitSet mark_bits(heap_region); 11.91 + 11.92 + // The edge queue is dimensioned as a fraction of the heap size 11.93 + const size_t edge_queue_reservation_size = edge_queue_memory_reservation(heap_region); 11.94 + EdgeQueue edge_queue(edge_queue_reservation_size, edge_queue_memory_commit_size(edge_queue_reservation_size)); 11.95 + 11.96 + // The initialize() routines will attempt to reserve and allocate backing storage memory. 11.97 + // Failure to accommodate will render root chain processing impossible. 11.98 + // As a fallback on failure, just write out the existing samples, flat, without chains. 11.99 + if (!(mark_bits.initialize() && edge_queue.initialize())) { 11.100 + if (LogJFR) tty->print_cr("Unable to allocate memory for root chain processing"); 11.101 + return; 11.102 + } 11.103 + 11.104 + // Save the original markWord for the potential leak objects, 11.105 + // to be restored on function exit 11.106 + ObjectSampleMarker marker; 11.107 + if (ObjectSampleCheckpoint::mark(_sampler, marker, _emit_all) == 0) { 11.108 + // no valid samples to process 11.109 + return; 11.110 + } 11.111 + 11.112 + // Necessary condition for attempting a root set iteration 11.113 + Universe::heap()->ensure_parsability(false); 11.114 + 11.115 + BFSClosure bfs(&edge_queue, _edge_store, &mark_bits); 11.116 + RootSetClosure<BFSClosure> roots(&bfs); 11.117 + 11.118 + GranularTimer::start(_cutoff_ticks, 1000000); 11.119 + roots.process(); 11.120 + if (edge_queue.is_full()) { 11.121 + // Pathological case where roots don't fit in queue 11.122 + // Do a depth-first search, but mark roots first 11.123 + // to avoid walking sideways over roots 11.124 + DFSClosure::find_leaks_from_root_set(_edge_store, &mark_bits); 11.125 + } else { 11.126 + bfs.process(); 11.127 + } 11.128 + GranularTimer::stop(); 11.129 + log_edge_queue_summary(edge_queue); 11.130 + 11.131 + // Emit old objects including their reference chains as events 11.132 + EventEmitter emitter(GranularTimer::start_time(), GranularTimer::end_time()); 11.133 + emitter.write_events(_sampler, _edge_store, _emit_all); 11.134 +}
12.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 12.2 +++ b/src/share/vm/jfr/leakprofiler/chains/pathToGcRootsOperation.hpp Wed Oct 09 16:11:58 2019 +0800 12.3 @@ -0,0 +1,46 @@ 12.4 +/* 12.5 + * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. 12.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 12.7 + * 12.8 + * This code is free software; you can redistribute it and/or modify it 12.9 + * under the terms of the GNU General Public License version 2 only, as 12.10 + * published by the Free Software Foundation. 12.11 + * 12.12 + * This code is distributed in the hope that it will be useful, but WITHOUT 12.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12.14 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12.15 + * version 2 for more details (a copy is included in the LICENSE file that 12.16 + * accompanied this code). 12.17 + * 12.18 + * You should have received a copy of the GNU General Public License version 12.19 + * 2 along with this work; if not, write to the Free Software Foundation, 12.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 12.21 + * 12.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 12.23 + * or visit www.oracle.com if you need additional information or have any 12.24 + * questions. 12.25 + * 12.26 + */ 12.27 + 12.28 +#ifndef SHARE_JFR_LEAKPROFILER_CHAINS_PATHTOGCROOTSOPERATION_HPP 12.29 +#define SHARE_JFR_LEAKPROFILER_CHAINS_PATHTOGCROOTSOPERATION_HPP 12.30 + 12.31 +#include "jfr/leakprofiler/utilities/vmOperation.hpp" 12.32 + 12.33 +class EdgeStore; 12.34 +class ObjectSampler; 12.35 + 12.36 +// Safepoint operation for finding paths to gc roots 12.37 +class PathToGcRootsOperation : public OldObjectVMOperation { 12.38 + private: 12.39 + ObjectSampler* _sampler; 12.40 + EdgeStore* const _edge_store; 12.41 + const int64_t _cutoff_ticks; 12.42 + const bool _emit_all; 12.43 + 12.44 + public: 12.45 + PathToGcRootsOperation(ObjectSampler* sampler, EdgeStore* edge_store, int64_t cutoff, bool emit_all); 12.46 + virtual void doit(); 12.47 +}; 12.48 + 12.49 +#endif // SHARE_JFR_LEAKPROFILER_CHAINS_PATHTOGCROOTSOPERATION_HPP
13.1 --- a/src/share/vm/jfr/leakprofiler/chains/rootSetClosure.cpp Fri Sep 27 13:23:32 2019 +0800 13.2 +++ b/src/share/vm/jfr/leakprofiler/chains/rootSetClosure.cpp Wed Oct 09 16:11:58 2019 +0800 13.3 @@ -25,11 +25,14 @@ 13.4 #include "precompiled.hpp" 13.5 #include "classfile/classLoaderData.hpp" 13.6 #include "classfile/systemDictionary.hpp" 13.7 +#include "jfr/leakprofiler/chains/bfsClosure.hpp" 13.8 +#include "jfr/leakprofiler/chains/dfsClosure.hpp" 13.9 #include "jfr/leakprofiler/chains/edgeQueue.hpp" 13.10 #include "jfr/leakprofiler/chains/rootSetClosure.hpp" 13.11 #include "jfr/leakprofiler/utilities/saveRestore.hpp" 13.12 #include "jfr/leakprofiler/utilities/unifiedOop.hpp" 13.13 #include "memory/universe.hpp" 13.14 +#include "oops/oop.inline.hpp" 13.15 #include "prims/jvmtiExport.hpp" 13.16 #include "runtime/jniHandles.hpp" 13.17 #include "runtime/synchronizer.hpp" 13.18 @@ -37,11 +40,11 @@ 13.19 #include "services/management.hpp" 13.20 #include "utilities/align.hpp" 13.21 13.22 -RootSetClosure::RootSetClosure(EdgeQueue* edge_queue) : 13.23 - _edge_queue(edge_queue) { 13.24 -} 13.25 +template <typename Delegate> 13.26 +RootSetClosure<Delegate>::RootSetClosure(Delegate* delegate) : _delegate(delegate) {} 13.27 13.28 -void RootSetClosure::do_oop(oop* ref) { 13.29 +template <typename Delegate> 13.30 +void RootSetClosure<Delegate>::do_oop(oop* ref) { 13.31 assert(ref != NULL, "invariant"); 13.32 // We discard unaligned root references because 13.33 // our reference tagging scheme will use 13.34 @@ -55,48 +58,38 @@ 13.35 } 13.36 13.37 assert(is_aligned(ref, HeapWordSize), "invariant"); 13.38 - const oop pointee = *ref; 13.39 - if (pointee != NULL) { 13.40 - closure_impl(ref, pointee); 13.41 + if (*ref != NULL) { 13.42 + _delegate->do_root(ref); 13.43 } 13.44 } 13.45 13.46 -void RootSetClosure::do_oop(narrowOop* ref) { 13.47 +template <typename Delegate> 13.48 +void RootSetClosure<Delegate>::do_oop(narrowOop* ref) { 13.49 assert(ref != NULL, "invariant"); 13.50 assert(is_aligned(ref, sizeof(narrowOop)), "invariant"); 13.51 const oop pointee = oopDesc::load_decode_heap_oop(ref); 13.52 if (pointee != NULL) { 13.53 - closure_impl(UnifiedOop::encode(ref), pointee); 13.54 + _delegate->do_root(UnifiedOop::encode(ref)); 13.55 } 13.56 } 13.57 13.58 -void RootSetClosure::closure_impl(const oop* reference, const oop pointee) { 13.59 - if (!_edge_queue->is_full()) { 13.60 - _edge_queue->add(NULL, reference); 13.61 - } 13.62 +class RootSetClosureMarkScope : public MarkingCodeBlobClosure::MarkScope {}; 13.63 + 13.64 +template <typename Delegate> 13.65 +void RootSetClosure<Delegate>::process() { 13.66 + RootSetClosureMarkScope mark_scope; 13.67 + CLDToOopClosure cldt_closure(this); 13.68 + ClassLoaderDataGraph::always_strong_cld_do(&cldt_closure); 13.69 + CodeBlobToOopClosure blobs(this, false); 13.70 + Threads::oops_do(this, NULL, &blobs); // XXX set CLDClosure to NULL 13.71 + ObjectSynchronizer::oops_do(this); 13.72 + Universe::oops_do(this); 13.73 + JNIHandles::oops_do(this); 13.74 + JvmtiExport::oops_do(this); 13.75 + SystemDictionary::oops_do(this); 13.76 + Management::oops_do(this); 13.77 + StringTable::oops_do(this); 13.78 } 13.79 13.80 -void RootSetClosure::add_to_queue(EdgeQueue* edge_queue) { 13.81 - RootSetClosure rs(edge_queue); 13.82 - process_roots(&rs); 13.83 -} 13.84 - 13.85 -class RootSetClosureMarkScope : public MarkingCodeBlobClosure::MarkScope { 13.86 -}; 13.87 - 13.88 -void RootSetClosure::process_roots(OopClosure* closure) { 13.89 - SaveRestoreCLDClaimBits save_restore_cld_claim_bits; 13.90 - RootSetClosureMarkScope mark_scope; 13.91 - 13.92 - CLDToOopClosure cldt_closure(closure); 13.93 - ClassLoaderDataGraph::always_strong_cld_do(&cldt_closure); 13.94 - CodeBlobToOopClosure blobs(closure, false); 13.95 - Threads::oops_do(closure, NULL, &blobs); // XXX set CLDClosure to NULL 13.96 - ObjectSynchronizer::oops_do(closure); 13.97 - Universe::oops_do(closure); 13.98 - JNIHandles::oops_do(closure); 13.99 - JvmtiExport::oops_do(closure); 13.100 - SystemDictionary::oops_do(closure); 13.101 - Management::oops_do(closure); 13.102 - StringTable::oops_do(closure); 13.103 -} 13.104 +template class RootSetClosure<BFSClosure>; 13.105 +template class RootSetClosure<DFSClosure>;
14.1 --- a/src/share/vm/jfr/leakprofiler/chains/rootSetClosure.hpp Fri Sep 27 13:23:32 2019 +0800 14.2 +++ b/src/share/vm/jfr/leakprofiler/chains/rootSetClosure.hpp Wed Oct 09 16:11:58 2019 +0800 14.3 @@ -26,18 +26,14 @@ 14.4 #define SHARE_VM_JFR_LEAKPROFILER_CHAINS_ROOTSETCLOSURE_HPP 14.5 14.6 #include "memory/iterator.hpp" 14.7 -#include "oops/oop.hpp" 14.8 14.9 -class EdgeQueue; 14.10 - 14.11 +template <typename Delegate> 14.12 class RootSetClosure: public ExtendedOopClosure { // BasicOopIterateClosure 14.13 private: 14.14 - RootSetClosure(EdgeQueue* edge_queue); 14.15 - EdgeQueue* _edge_queue; 14.16 - void closure_impl(const oop* reference, const oop pointee); 14.17 + Delegate* const _delegate; 14.18 public: 14.19 - static void add_to_queue(EdgeQueue* edge_queue); 14.20 - static void process_roots(OopClosure* closure); 14.21 + RootSetClosure(Delegate* delegate); 14.22 + void process(); 14.23 14.24 virtual void do_oop(oop* reference); 14.25 virtual void do_oop(narrowOop* reference);
15.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 15.2 +++ b/src/share/vm/jfr/leakprofiler/checkpoint/eventEmitter.cpp Wed Oct 09 16:11:58 2019 +0800 15.3 @@ -0,0 +1,147 @@ 15.4 +/* 15.5 + * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. 15.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 15.7 + * 15.8 + * This code is free software; you can redistribute it and/or modify it 15.9 + * under the terms of the GNU General Public License version 2 only, as 15.10 + * published by the Free Software Foundation. 15.11 + * 15.12 + * This code is distributed in the hope that it will be useful, but WITHOUT 15.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 15.14 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 15.15 + * version 2 for more details (a copy is included in the LICENSE file that 15.16 + * accompanied this code). 15.17 + * 15.18 + * You should have received a copy of the GNU General Public License version 15.19 + * 2 along with this work; if not, write to the Free Software Foundation, 15.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 15.21 + * 15.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 15.23 + * or visit www.oracle.com if you need additional information or have any 15.24 + * questions. 15.25 + * 15.26 + */ 15.27 + 15.28 +#include "precompiled.hpp" 15.29 +#include "jfr/jfrEvents.hpp" 15.30 +#include "jfr/leakprofiler/chains/edgeStore.hpp" 15.31 +#include "jfr/leakprofiler/chains/pathToGcRootsOperation.hpp" 15.32 +#include "jfr/leakprofiler/checkpoint/eventEmitter.hpp" 15.33 +#include "jfr/leakprofiler/checkpoint/objectSampleCheckpoint.hpp" 15.34 +#include "jfr/leakprofiler/sampling/objectSample.hpp" 15.35 +#include "jfr/leakprofiler/sampling/objectSampler.hpp" 15.36 +#include "memory/resourceArea.hpp" 15.37 +#include "oops/markOop.hpp" 15.38 +#include "oops/oop.inline.hpp" 15.39 +#include "runtime/thread.inline.hpp" 15.40 +#include "runtime/vmThread.hpp" 15.41 + 15.42 +EventEmitter::EventEmitter(const JfrTicks& start_time, const JfrTicks& end_time) : 15.43 + _start_time(start_time), 15.44 + _end_time(end_time), 15.45 + _thread(Thread::current()), 15.46 + _jfr_thread_local(_thread->jfr_thread_local()), 15.47 + _thread_id(_thread->jfr_thread_local()->thread_id()) {} 15.48 + 15.49 +EventEmitter::~EventEmitter() { 15.50 + // restore / reset thread local stack trace and thread id 15.51 + _jfr_thread_local->set_thread_id(_thread_id); 15.52 + _jfr_thread_local->clear_cached_stack_trace(); 15.53 +} 15.54 + 15.55 +void EventEmitter::emit(ObjectSampler* sampler, int64_t cutoff_ticks, bool emit_all) { 15.56 + assert(sampler != NULL, "invariant"); 15.57 + 15.58 + ResourceMark rm; 15.59 + EdgeStore edge_store; 15.60 + if (cutoff_ticks <= 0) { 15.61 + // no reference chains 15.62 + JfrTicks time_stamp = JfrTicks::now(); 15.63 + EventEmitter emitter(time_stamp, time_stamp); 15.64 + emitter.write_events(sampler, &edge_store, emit_all); 15.65 + return; 15.66 + } 15.67 + // events emitted with reference chains require a safepoint operation 15.68 + PathToGcRootsOperation op(sampler, &edge_store, cutoff_ticks, emit_all); 15.69 + VMThread::execute(&op); 15.70 +} 15.71 + 15.72 +size_t EventEmitter::write_events(ObjectSampler* object_sampler, EdgeStore* edge_store, bool emit_all) { 15.73 + assert(_thread == Thread::current(), "invariant"); 15.74 + assert(_thread->jfr_thread_local() == _jfr_thread_local, "invariant"); 15.75 + assert(object_sampler != NULL, "invariant"); 15.76 + assert(edge_store != NULL, "invariant"); 15.77 + 15.78 + const jlong last_sweep = emit_all ? max_jlong : object_sampler->last_sweep().value(); 15.79 + size_t count = 0; 15.80 + 15.81 + const ObjectSample* current = object_sampler->first(); 15.82 + while (current != NULL) { 15.83 + ObjectSample* prev = current->prev(); 15.84 + if (current->is_alive_and_older_than(last_sweep)) { 15.85 + write_event(current, edge_store); 15.86 + ++count; 15.87 + } 15.88 + current = prev; 15.89 + } 15.90 + 15.91 + if (count > 0) { 15.92 + // serialize associated checkpoints and potential chains 15.93 + ObjectSampleCheckpoint::write(object_sampler, edge_store, emit_all, _thread); 15.94 + } 15.95 + return count; 15.96 +} 15.97 + 15.98 +static int array_size(const oop object) { 15.99 + assert(object != NULL, "invariant"); 15.100 + if (object->is_array()) { 15.101 + return arrayOop(object)->length(); 15.102 + } 15.103 + return min_jint; 15.104 +} 15.105 + 15.106 +void EventEmitter::write_event(const ObjectSample* sample, EdgeStore* edge_store) { 15.107 + assert(sample != NULL, "invariant"); 15.108 + assert(!sample->is_dead(), "invariant"); 15.109 + assert(edge_store != NULL, "invariant"); 15.110 + assert(_jfr_thread_local != NULL, "invariant"); 15.111 + 15.112 + const oop* object_addr = sample->object_addr(); 15.113 + traceid gc_root_id = 0; 15.114 + const Edge* edge = NULL; 15.115 + if (SafepointSynchronize::is_at_safepoint()) { 15.116 + edge = (const Edge*)(*object_addr)->mark(); 15.117 + } 15.118 + if (edge == NULL) { 15.119 + // In order to dump out a representation of the event 15.120 + // even though it was not reachable / too long to reach, 15.121 + // we need to register a top level edge for this object. 15.122 + edge = edge_store->put(object_addr); 15.123 + } else { 15.124 + gc_root_id = edge_store->gc_root_id(edge); 15.125 + } 15.126 + 15.127 + assert(edge != NULL, "invariant"); 15.128 + const traceid object_id = edge_store->get_id(edge); 15.129 + assert(object_id != 0, "invariant"); 15.130 + 15.131 + EventOldObjectSample e(UNTIMED); 15.132 + e.set_starttime(_start_time); 15.133 + e.set_endtime(_end_time); 15.134 + e.set_allocationTime(sample->allocation_time()); 15.135 + e.set_lastKnownHeapUsage(sample->heap_used_at_last_gc()); 15.136 + e.set_object(object_id); 15.137 + e.set_arrayElements(array_size(edge->pointee())); 15.138 + e.set_root(gc_root_id); 15.139 + 15.140 + // Temporarily assigning both the stack trace id and thread id 15.141 + // onto the thread local data structure of the emitter thread (for the duration 15.142 + // of the commit() call). This trick provides a means to override 15.143 + // the event generation mechanism by injecting externally provided id's. 15.144 + // At this particular location, it allows us to emit an old object event 15.145 + // supplying information from where the actual sampling occurred. 15.146 + _jfr_thread_local->set_cached_stack_trace_id(sample->stack_trace_id()); 15.147 + assert(sample->has_thread(), "invariant"); 15.148 + _jfr_thread_local->set_thread_id(sample->thread_id()); 15.149 + e.commit(); 15.150 +}
16.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 16.2 +++ b/src/share/vm/jfr/leakprofiler/checkpoint/eventEmitter.hpp Wed Oct 09 16:11:58 2019 +0800 16.3 @@ -0,0 +1,58 @@ 16.4 +/* 16.5 + * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. 16.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 16.7 + * 16.8 + * This code is free software; you can redistribute it and/or modify it 16.9 + * under the terms of the GNU General Public License version 2 only, as 16.10 + * published by the Free Software Foundation. 16.11 + * 16.12 + * This code is distributed in the hope that it will be useful, but WITHOUT 16.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 16.14 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 16.15 + * version 2 for more details (a copy is included in the LICENSE file that 16.16 + * accompanied this code). 16.17 + * 16.18 + * You should have received a copy of the GNU General Public License version 16.19 + * 2 along with this work; if not, write to the Free Software Foundation, 16.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 16.21 + * 16.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 16.23 + * or visit www.oracle.com if you need additional information or have any 16.24 + * questions. 16.25 + * 16.26 + */ 16.27 + 16.28 +#ifndef SHARE_JFR_LEAKPROFILER_CHECKPOINT_EVENTEMITTER_HPP 16.29 +#define SHARE_JFR_LEAKPROFILER_CHECKPOINT_EVENTEMITTER_HPP 16.30 + 16.31 +#include "memory/allocation.hpp" 16.32 +#include "jfr/utilities/jfrTime.hpp" 16.33 + 16.34 +typedef u8 traceid; 16.35 + 16.36 +class EdgeStore; 16.37 +class JfrThreadLocal; 16.38 +class ObjectSample; 16.39 +class ObjectSampler; 16.40 +class Thread; 16.41 + 16.42 +class EventEmitter : public CHeapObj<mtTracing> { 16.43 + friend class LeakProfiler; 16.44 + friend class PathToGcRootsOperation; 16.45 + private: 16.46 + const JfrTicks& _start_time; 16.47 + const JfrTicks& _end_time; 16.48 + Thread* _thread; 16.49 + JfrThreadLocal* _jfr_thread_local; 16.50 + traceid _thread_id; 16.51 + 16.52 + EventEmitter(const JfrTicks& start_time, const JfrTicks& end_time); 16.53 + ~EventEmitter(); 16.54 + 16.55 + void write_event(const ObjectSample* sample, EdgeStore* edge_store); 16.56 + size_t write_events(ObjectSampler* sampler, EdgeStore* store, bool emit_all); 16.57 + 16.58 + static void emit(ObjectSampler* sampler, int64_t cutoff_ticks, bool emit_all); 16.59 +}; 16.60 + 16.61 +#endif // SHARE_JFR_LEAKPROFILER_CHECKPOINT_EVENTEMITTER_HPP
17.1 --- a/src/share/vm/jfr/leakprofiler/checkpoint/objectSampleCheckpoint.cpp Fri Sep 27 13:23:32 2019 +0800 17.2 +++ b/src/share/vm/jfr/leakprofiler/checkpoint/objectSampleCheckpoint.cpp Wed Oct 09 16:11:58 2019 +0800 17.3 @@ -181,102 +181,89 @@ 17.4 } 17.5 }; 17.6 17.7 -void ObjectSampleCheckpoint::install(JfrCheckpointWriter& writer, bool class_unload, bool resume) { 17.8 - assert(class_unload ? SafepointSynchronize::is_at_safepoint() : LeakProfiler::is_suspended(), "invariant"); 17.9 - 17.10 +void ObjectSampleCheckpoint::install(JfrCheckpointWriter& writer, bool class_unload, bool type_set) { 17.11 if (!writer.has_data()) { 17.12 - if (!class_unload) { 17.13 - LeakProfiler::resume(); 17.14 - } 17.15 - assert(LeakProfiler::is_running(), "invariant"); 17.16 return; 17.17 } 17.18 17.19 assert(writer.has_data(), "invariant"); 17.20 const JfrCheckpointBlobHandle h_cp = writer.checkpoint_blob(); 17.21 + CheckpointInstall install(h_cp); 17.22 17.23 - const ObjectSampler* const object_sampler = LeakProfiler::object_sampler(); 17.24 + // Class unload implies a safepoint. 17.25 + // Not class unload implies the object sampler is locked, because it was claimed exclusively earlier. 17.26 + // Therefore: direct access the object sampler instance is safe. 17.27 + ObjectSampler* const object_sampler = ObjectSampler::sampler(); 17.28 assert(object_sampler != NULL, "invariant"); 17.29 17.30 ObjectSample* const last = const_cast<ObjectSample*>(object_sampler->last()); 17.31 const ObjectSample* const last_resolved = object_sampler->last_resolved(); 17.32 - CheckpointInstall install(h_cp); 17.33 17.34 - if (class_unload) { 17.35 - if (last != NULL) { 17.36 - // all samples need the class unload information 17.37 - do_samples(last, NULL, install); 17.38 - } 17.39 - assert(LeakProfiler::is_running(), "invariant"); 17.40 - return; 17.41 - } 17.42 - 17.43 - // only new samples since last resolved checkpoint 17.44 + // install only to new samples since last resolved checkpoint 17.45 if (last != last_resolved) { 17.46 do_samples(last, last_resolved, install); 17.47 - if (resume) { 17.48 - const_cast<ObjectSampler*>(object_sampler)->set_last_resolved(last); 17.49 + if (class_unload) { 17.50 + return; 17.51 } 17.52 - } 17.53 - assert(LeakProfiler::is_suspended(), "invariant"); 17.54 - if (resume) { 17.55 - LeakProfiler::resume(); 17.56 - assert(LeakProfiler::is_running(), "invariant"); 17.57 + if (type_set) { 17.58 + object_sampler->set_last_resolved(last); 17.59 + } 17.60 } 17.61 } 17.62 17.63 -void ObjectSampleCheckpoint::write(const EdgeStore* edge_store, bool emit_all, Thread* thread) { 17.64 +void ObjectSampleCheckpoint::write(ObjectSampler* sampler, EdgeStore* edge_store, bool emit_all, Thread* thread) { 17.65 + assert(sampler != NULL, "invariant"); 17.66 assert(edge_store != NULL, "invariant"); 17.67 assert(thread != NULL, "invariant"); 17.68 + 17.69 static bool types_registered = false; 17.70 if (!types_registered) { 17.71 JfrSerializer::register_serializer(TYPE_OLDOBJECTROOTSYSTEM, false, true, new RootSystemType()); 17.72 JfrSerializer::register_serializer(TYPE_OLDOBJECTROOTTYPE, false, true, new RootType()); 17.73 types_registered = true; 17.74 } 17.75 - const ObjectSampler* const object_sampler = LeakProfiler::object_sampler(); 17.76 - assert(object_sampler != NULL, "invariant"); 17.77 - const jlong last_sweep = emit_all ? max_jlong : object_sampler->last_sweep().value(); 17.78 - ObjectSample* const last = const_cast<ObjectSample*>(object_sampler->last()); 17.79 + 17.80 + const jlong last_sweep = emit_all ? max_jlong : sampler->last_sweep().value(); 17.81 + ObjectSample* const last = const_cast<ObjectSample*>(sampler->last()); 17.82 { 17.83 JfrCheckpointWriter writer(false, false, thread); 17.84 CheckpointWrite checkpoint_write(writer, last_sweep); 17.85 do_samples(last, NULL, checkpoint_write); 17.86 } 17.87 + 17.88 CheckpointStateReset state_reset(last_sweep); 17.89 do_samples(last, NULL, state_reset); 17.90 + 17.91 if (!edge_store->is_empty()) { 17.92 // java object and chain representations 17.93 JfrCheckpointWriter writer(false, true, thread); 17.94 ObjectSampleWriter osw(writer, edge_store); 17.95 - edge_store->iterate_edges(osw); 17.96 + edge_store->iterate(osw); 17.97 } 17.98 } 17.99 17.100 -WriteObjectSampleStacktrace::WriteObjectSampleStacktrace(JfrStackTraceRepository& repo) : 17.101 - _stack_trace_repo(repo) { 17.102 +int ObjectSampleCheckpoint::mark(ObjectSampler* object_sampler, ObjectSampleMarker& marker, bool emit_all) { 17.103 + assert(object_sampler != NULL, "invariant"); 17.104 + ObjectSample* const last = const_cast<ObjectSample*>(object_sampler->last()); 17.105 + if (last == NULL) { 17.106 + return 0; 17.107 + } 17.108 + const jlong last_sweep = emit_all ? max_jlong : object_sampler->last_sweep().value(); 17.109 + SampleMark mark(marker, last_sweep); 17.110 + do_samples(last, NULL, mark); 17.111 + return mark.count(); 17.112 } 17.113 17.114 +WriteObjectSampleStacktrace::WriteObjectSampleStacktrace(ObjectSampler* sampler, JfrStackTraceRepository& repo) : 17.115 + _sampler(sampler), _stack_trace_repo(repo) {} 17.116 + 17.117 bool WriteObjectSampleStacktrace::process() { 17.118 - assert(SafepointSynchronize::is_at_safepoint(), "invariant"); 17.119 - if (!LeakProfiler::is_running()) { 17.120 - return true; 17.121 - } 17.122 - // Suspend the LeakProfiler subsystem 17.123 - // to ensure stable samples even 17.124 - // after we return from the safepoint. 17.125 - LeakProfiler::suspend(); 17.126 - assert(!LeakProfiler::is_running(), "invariant"); 17.127 - assert(LeakProfiler::is_suspended(), "invariant"); 17.128 + assert(LeakProfiler::is_running(), "invariant"); 17.129 + assert(_sampler != NULL, "invariant"); 17.130 17.131 - const ObjectSampler* object_sampler = LeakProfiler::object_sampler(); 17.132 - assert(object_sampler != NULL, "invariant"); 17.133 - assert(LeakProfiler::is_suspended(), "invariant"); 17.134 - 17.135 - ObjectSample* const last = const_cast<ObjectSample*>(object_sampler->last()); 17.136 - const ObjectSample* const last_resolved = object_sampler->last_resolved(); 17.137 + ObjectSample* const last = const_cast<ObjectSample*>(_sampler->last()); 17.138 + const ObjectSample* const last_resolved = _sampler->last_resolved(); 17.139 if (last == last_resolved) { 17.140 - assert(LeakProfiler::is_suspended(), "invariant"); 17.141 return true; 17.142 } 17.143 17.144 @@ -294,27 +281,13 @@ 17.145 } 17.146 if (count == 0) { 17.147 writer.set_context(ctx); 17.148 - assert(LeakProfiler::is_suspended(), "invariant"); 17.149 return true; 17.150 } 17.151 assert(count > 0, "invariant"); 17.152 writer.write_count((u4)count, count_offset); 17.153 JfrStackTraceRepository::write_metadata(writer); 17.154 17.155 + // install the stacktrace checkpoint information to the candidates 17.156 ObjectSampleCheckpoint::install(writer, false, false); 17.157 - assert(LeakProfiler::is_suspended(), "invariant"); 17.158 return true; 17.159 } 17.160 - 17.161 -int ObjectSampleCheckpoint::mark(ObjectSampleMarker& marker, bool emit_all) { 17.162 - const ObjectSampler* object_sampler = LeakProfiler::object_sampler(); 17.163 - assert(object_sampler != NULL, "invariant"); 17.164 - ObjectSample* const last = const_cast<ObjectSample*>(object_sampler->last()); 17.165 - if (last == NULL) { 17.166 - return 0; 17.167 - } 17.168 - const jlong last_sweep = emit_all ? max_jlong : object_sampler->last_sweep().value(); 17.169 - SampleMark mark(marker, last_sweep); 17.170 - do_samples(last, NULL, mark); 17.171 - return mark.count(); 17.172 -}
18.1 --- a/src/share/vm/jfr/leakprofiler/checkpoint/objectSampleCheckpoint.hpp Fri Sep 27 13:23:32 2019 +0800 18.2 +++ b/src/share/vm/jfr/leakprofiler/checkpoint/objectSampleCheckpoint.hpp Wed Oct 09 16:11:58 2019 +0800 18.3 @@ -26,25 +26,26 @@ 18.4 #define SHARE_VM_LEAKPROFILER_CHECKPOINT_OBJECTSAMPLECHECKPOINT_HPP 18.5 18.6 #include "memory/allocation.hpp" 18.7 -#include "utilities/exceptions.hpp" 18.8 18.9 class EdgeStore; 18.10 +class JfrCheckpointWriter; 18.11 class JfrStackTraceRepository; 18.12 -class JfrCheckpointWriter; 18.13 class ObjectSampleMarker; 18.14 +class ObjectSampler; 18.15 18.16 class ObjectSampleCheckpoint : AllStatic { 18.17 public: 18.18 - static void install(JfrCheckpointWriter& writer, bool class_unload, bool resume); 18.19 - static void write(const EdgeStore* edge_store, bool emit_all, Thread* thread); 18.20 - static int mark(ObjectSampleMarker& marker, bool emit_all); 18.21 + static void install(JfrCheckpointWriter& writer, bool class_unload, bool type_set); 18.22 + static void write(ObjectSampler* sampler, EdgeStore* edge_store, bool emit_all, Thread* thread); 18.23 + static int mark(ObjectSampler* sampler, ObjectSampleMarker& marker, bool emit_all); 18.24 }; 18.25 18.26 class WriteObjectSampleStacktrace : public StackObj { 18.27 private: 18.28 + ObjectSampler* const _sampler; 18.29 JfrStackTraceRepository& _stack_trace_repo; 18.30 public: 18.31 - WriteObjectSampleStacktrace(JfrStackTraceRepository& repo); 18.32 + WriteObjectSampleStacktrace(ObjectSampler* sampler, JfrStackTraceRepository& repo); 18.33 bool process(); 18.34 }; 18.35
19.1 --- a/src/share/vm/jfr/leakprofiler/checkpoint/objectSampleWriter.cpp Fri Sep 27 13:23:32 2019 +0800 19.2 +++ b/src/share/vm/jfr/leakprofiler/checkpoint/objectSampleWriter.cpp Wed Oct 09 16:11:58 2019 +0800 19.3 @@ -1,5 +1,5 @@ 19.4 /* 19.5 - * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved. 19.6 + * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved. 19.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 19.8 * 19.9 * This code is free software; you can redistribute it and/or modify it 19.10 @@ -350,7 +350,7 @@ 19.11 return 1; 19.12 } 19.13 19.14 -static traceid get_root_description_info_id(const Edge& edge, traceid id) { 19.15 +static traceid get_gc_root_description_info_id(const Edge& edge, traceid id) { 19.16 assert(edge.is_root(), "invariant"); 19.17 if (EdgeUtils::is_leak_edge(edge)) { 19.18 return 0; 19.19 @@ -518,7 +518,7 @@ 19.20 } 19.21 } 19.22 19.23 -static void add_old_object_sample_info(const Edge* current, traceid id) { 19.24 +static void add_old_object_sample_info(const StoredEdge* current, traceid id) { 19.25 assert(current != NULL, "invariant"); 19.26 if (sample_infos == NULL) { 19.27 sample_infos = new SampleInfo(); 19.28 @@ -528,11 +528,11 @@ 19.29 assert(oosi != NULL, "invariant"); 19.30 oosi->_id = id; 19.31 oosi->_data._object = current->pointee(); 19.32 - oosi->_data._reference_id = current->is_root() ? (traceid)0 : id; 19.33 + oosi->_data._reference_id = current->parent() == NULL ? (traceid)0 : id; 19.34 sample_infos->store(oosi); 19.35 } 19.36 19.37 -static void add_reference_info(const RoutableEdge* current, traceid id, traceid parent_id) { 19.38 +static void add_reference_info(const StoredEdge* current, traceid id, traceid parent_id) { 19.39 assert(current != NULL, "invariant"); 19.40 if (ref_infos == NULL) { 19.41 ref_infos = new RefInfo(); 19.42 @@ -544,37 +544,43 @@ 19.43 19.44 ri->_id = id; 19.45 ri->_data._array_info_id = !current->is_skip_edge() ? get_array_info_id(*current, id) : 0; 19.46 - ri->_data._field_info_id = ri->_data._array_info_id == 0 && !current->is_skip_edge() ? 19.47 - get_field_info_id(*current) : (traceid)0; 19.48 + ri->_data._field_info_id = ri->_data._array_info_id == 0 && !current->is_skip_edge() ? get_field_info_id(*current) : (traceid)0; 19.49 ri->_data._old_object_sample_id = parent_id; 19.50 ri->_data._skip = current->skip_length(); 19.51 ref_infos->store(ri); 19.52 } 19.53 19.54 -static traceid add_root_info(const Edge* root, traceid id) { 19.55 - assert(root != NULL, "invariant"); 19.56 - assert(root->is_root(), "invariant"); 19.57 - return get_root_description_info_id(*root, id); 19.58 +static bool is_gc_root(const StoredEdge* current) { 19.59 + assert(current != NULL, "invariant"); 19.60 + return current->parent() == NULL && current->gc_root_id() != 0; 19.61 } 19.62 19.63 -void ObjectSampleWriter::write(const RoutableEdge* edge) { 19.64 +static traceid add_gc_root_info(const StoredEdge* root, traceid id) { 19.65 + assert(root != NULL, "invariant"); 19.66 + assert(is_gc_root(root), "invariant"); 19.67 + return get_gc_root_description_info_id(*root, id); 19.68 +} 19.69 + 19.70 +void ObjectSampleWriter::write(const StoredEdge* edge) { 19.71 assert(edge != NULL, "invariant"); 19.72 const traceid id = _store->get_id(edge); 19.73 add_old_object_sample_info(edge, id); 19.74 - const RoutableEdge* parent = edge->logical_parent(); 19.75 + const StoredEdge* const parent = edge->parent(); 19.76 if (parent != NULL) { 19.77 add_reference_info(edge, id, _store->get_id(parent)); 19.78 } else { 19.79 - assert(edge->is_root(), "invariant"); 19.80 - add_root_info(edge, id); 19.81 + if (is_gc_root(edge)) { 19.82 + assert(edge->gc_root_id() == id, "invariant"); 19.83 + add_gc_root_info(edge, id); 19.84 + } 19.85 } 19.86 } 19.87 19.88 -ObjectSampleWriter::ObjectSampleWriter(JfrCheckpointWriter& writer, const EdgeStore* store) : 19.89 +ObjectSampleWriter::ObjectSampleWriter(JfrCheckpointWriter& writer, EdgeStore* store) : 19.90 _writer(writer), 19.91 _store(store) { 19.92 assert(store != NULL, "invariant"); 19.93 - assert(store->number_of_entries() > 0, "invariant"); 19.94 + assert(!store->is_empty(), "invariant"); 19.95 sample_infos = NULL; 19.96 ref_infos = NULL; 19.97 array_infos = NULL; 19.98 @@ -590,26 +596,7 @@ 19.99 write_root_descriptors(_writer); 19.100 } 19.101 19.102 -void ObjectSampleWriter::write_chain(const RoutableEdge& edge) { 19.103 - assert(EdgeUtils::is_leak_edge(edge), "invariant"); 19.104 - if (edge.processed()) { 19.105 - return; 19.106 - } 19.107 - EdgeUtils::collapse_chain(edge); 19.108 - const RoutableEdge* current = &edge; 19.109 - while (current != NULL) { 19.110 - if (current->processed()) { 19.111 - return; 19.112 - } 19.113 - write(current); 19.114 - current->set_processed(); 19.115 - current = current->logical_parent(); 19.116 - } 19.117 -} 19.118 - 19.119 -bool ObjectSampleWriter::operator()(const RoutableEdge& edge) { 19.120 - if (EdgeUtils::is_leak_edge(edge)) { 19.121 - write_chain(edge); 19.122 - } 19.123 +bool ObjectSampleWriter::operator()(StoredEdge& e) { 19.124 + write(&e); 19.125 return true; 19.126 }
20.1 --- a/src/share/vm/jfr/leakprofiler/checkpoint/objectSampleWriter.hpp Fri Sep 27 13:23:32 2019 +0800 20.2 +++ b/src/share/vm/jfr/leakprofiler/checkpoint/objectSampleWriter.hpp Wed Oct 09 16:11:58 2019 +0800 20.3 @@ -30,21 +30,17 @@ 20.4 class Edge; 20.5 class EdgeStore; 20.6 class JfrCheckpointWriter; 20.7 -class RoutableEdge; 20.8 +class StoredEdge; 20.9 20.10 class ObjectSampleWriter : public StackObj { 20.11 private: 20.12 JfrCheckpointWriter& _writer; 20.13 - const EdgeStore* const _store; 20.14 - 20.15 - void write(const RoutableEdge* edge); 20.16 - void write_chain(const RoutableEdge& edge); 20.17 - 20.18 + EdgeStore* const _store; 20.19 + void write(const StoredEdge* edge); 20.20 public: 20.21 - ObjectSampleWriter(JfrCheckpointWriter& writer, const EdgeStore* store); 20.22 + ObjectSampleWriter(JfrCheckpointWriter& writer, EdgeStore* store); 20.23 ~ObjectSampleWriter(); 20.24 - 20.25 - bool operator()(const RoutableEdge& edge); 20.26 + bool operator()(StoredEdge& edge); 20.27 }; 20.28 20.29 #endif // SHARE_VM_LEAKPROFILER_CHECKPOINT_OBJECTSAMPLEWRITER_HPP
21.1 --- a/src/share/vm/jfr/leakprofiler/checkpoint/rootResolver.hpp Fri Sep 27 13:23:32 2019 +0800 21.2 +++ b/src/share/vm/jfr/leakprofiler/checkpoint/rootResolver.hpp Wed Oct 09 16:11:58 2019 +0800 21.3 @@ -25,8 +25,8 @@ 21.4 #ifndef SHARE_VM_JFR_LEAKPROFILER_CHECKPOINT_ROOTRESOLVER_HPP 21.5 #define SHARE_VM_JFR_LEAKPROFILER_CHECKPOINT_ROOTRESOLVER_HPP 21.6 21.7 +#include "jfr/leakprofiler/utilities/rootType.hpp" 21.8 #include "memory/allocation.hpp" 21.9 -#include "jfr/leakprofiler/utilities/rootType.hpp" 21.10 #include "oops/oopsHierarchy.hpp" 21.11 21.12 struct RootCallbackInfo {
22.1 --- a/src/share/vm/jfr/leakprofiler/emitEventOperation.cpp Fri Sep 27 13:23:32 2019 +0800 22.2 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 22.3 @@ -1,235 +0,0 @@ 22.4 -/* 22.5 - * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved. 22.6 - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 22.7 - * 22.8 - * This code is free software; you can redistribute it and/or modify it 22.9 - * under the terms of the GNU General Public License version 2 only, as 22.10 - * published by the Free Software Foundation. 22.11 - * 22.12 - * This code is distributed in the hope that it will be useful, but WITHOUT 22.13 - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 22.14 - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 22.15 - * version 2 for more details (a copy is included in the LICENSE file that 22.16 - * accompanied this code). 22.17 - * 22.18 - * You should have received a copy of the GNU General Public License version 22.19 - * 2 along with this work; if not, write to the Free Software Foundation, 22.20 - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 22.21 - * 22.22 - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 22.23 - * or visit www.oracle.com if you need additional information or have any 22.24 - * questions. 22.25 - * 22.26 - */ 22.27 -#include "precompiled.hpp" 22.28 -#include "gc_interface/collectedHeap.hpp" 22.29 -#include "jfr/jfrEvents.hpp" 22.30 -#include "jfr/leakprofiler/utilities/granularTimer.hpp" 22.31 -#include "jfr/leakprofiler/chains/rootSetClosure.hpp" 22.32 -#include "jfr/leakprofiler/chains/edge.hpp" 22.33 -#include "jfr/leakprofiler/chains/edgeQueue.hpp" 22.34 -#include "jfr/leakprofiler/chains/edgeStore.hpp" 22.35 -#include "jfr/leakprofiler/chains/bitset.hpp" 22.36 -#include "jfr/leakprofiler/sampling/objectSample.hpp" 22.37 -#include "jfr/leakprofiler/leakProfiler.hpp" 22.38 -#include "jfr/leakprofiler/checkpoint/objectSampleCheckpoint.hpp" 22.39 -#include "jfr/leakprofiler/sampling/objectSampler.hpp" 22.40 -#include "jfr/leakprofiler/emitEventOperation.hpp" 22.41 -#include "jfr/leakprofiler/chains/bfsClosure.hpp" 22.42 -#include "jfr/leakprofiler/chains/dfsClosure.hpp" 22.43 -#include "jfr/leakprofiler/chains/objectSampleMarker.hpp" 22.44 -#include "jfr/recorder/checkpoint/jfrCheckpointWriter.hpp" 22.45 -#include "jfr/support/jfrThreadId.hpp" 22.46 -#include "memory/resourceArea.hpp" 22.47 -#include "memory/universe.hpp" 22.48 -#include "oops/markOop.hpp" 22.49 -#include "oops/oop.inline.hpp" 22.50 -#include "runtime/safepoint.hpp" 22.51 -#include "runtime/vmThread.hpp" 22.52 -#include "utilities/globalDefinitions.hpp" 22.53 - 22.54 -/* The EdgeQueue is backed by directly managed virtual memory. 22.55 - * We will attempt to dimension an initial reservation 22.56 - * in proportion to the size of the heap (represented by heap_region). 22.57 - * Initial memory reservation: 5% of the heap OR at least 32 Mb 22.58 - * Commit ratio: 1 : 10 (subject to allocation granularties) 22.59 - */ 22.60 -static size_t edge_queue_memory_reservation(const MemRegion& heap_region) { 22.61 - const size_t memory_reservation_bytes = MAX2(heap_region.byte_size() / 20, 32*M); 22.62 - assert(memory_reservation_bytes >= (size_t)32*M, "invariant"); 22.63 - return memory_reservation_bytes; 22.64 -} 22.65 - 22.66 -static size_t edge_queue_memory_commit_size(size_t memory_reservation_bytes) { 22.67 - const size_t memory_commit_block_size_bytes = memory_reservation_bytes / 10; 22.68 - assert(memory_commit_block_size_bytes >= (size_t)3*M, "invariant"); 22.69 - return memory_commit_block_size_bytes; 22.70 -} 22.71 - 22.72 -static void log_edge_queue_summary(const EdgeQueue& edge_queue) { 22.73 - if (LogJFR && Verbose) tty->print_cr("EdgeQueue reserved size total: " SIZE_FORMAT " [KB]", edge_queue.reserved_size() / K); 22.74 - if (LogJFR && Verbose) tty->print_cr("EdgeQueue edges total: " SIZE_FORMAT, edge_queue.top()); 22.75 - if (LogJFR && Verbose) tty->print_cr("EdgeQueue liveset total: " SIZE_FORMAT " [KB]", edge_queue.live_set() / K); 22.76 - if (edge_queue.reserved_size() > 0) { 22.77 - if (LogJFR && Verbose) tty->print_cr("EdgeQueue commit reserve ratio: %f\n", 22.78 - ((double)edge_queue.live_set() / (double)edge_queue.reserved_size())); 22.79 - } 22.80 -} 22.81 - 22.82 -void EmitEventOperation::doit() { 22.83 - assert(LeakProfiler::is_running(), "invariant"); 22.84 - _object_sampler = LeakProfiler::object_sampler(); 22.85 - assert(_object_sampler != NULL, "invariant"); 22.86 - 22.87 - _vm_thread = VMThread::vm_thread(); 22.88 - assert(_vm_thread == Thread::current(), "invariant"); 22.89 - _vm_thread_local = _vm_thread->jfr_thread_local(); 22.90 - assert(_vm_thread_local != NULL, "invariant"); 22.91 - assert(_vm_thread->jfr_thread_local()->thread_id() == JFR_THREAD_ID(_vm_thread), "invariant"); 22.92 - 22.93 - // The VM_Operation::evaluate() which invoked doit() 22.94 - // contains a top level ResourceMark 22.95 - 22.96 - // save the original markWord for the potential leak objects 22.97 - // to be restored on function exit 22.98 - ObjectSampleMarker marker; 22.99 - if (ObjectSampleCheckpoint::mark(marker, _emit_all) == 0) { 22.100 - return; 22.101 - } 22.102 - 22.103 - EdgeStore edge_store; 22.104 - 22.105 - GranularTimer::start(_cutoff_ticks, 1000000); 22.106 - if (_cutoff_ticks <= 0) { 22.107 - // no chains 22.108 - write_events(&edge_store); 22.109 - return; 22.110 - } 22.111 - 22.112 - assert(_cutoff_ticks > 0, "invariant"); 22.113 - 22.114 - // The bitset used for marking is dimensioned as a function of the heap size 22.115 - const MemRegion heap_region = Universe::heap()->reserved_region(); 22.116 - BitSet mark_bits(heap_region); 22.117 - 22.118 - // The edge queue is dimensioned as a fraction of the heap size 22.119 - const size_t edge_queue_reservation_size = edge_queue_memory_reservation(heap_region); 22.120 - EdgeQueue edge_queue(edge_queue_reservation_size, edge_queue_memory_commit_size(edge_queue_reservation_size)); 22.121 - 22.122 - // The initialize() routines will attempt to reserve and allocate backing storage memory. 22.123 - // Failure to accommodate will render root chain processing impossible. 22.124 - // As a fallback on failure, just write out the existing samples, flat, without chains. 22.125 - if (!(mark_bits.initialize() && edge_queue.initialize())) { 22.126 - if (LogJFR) tty->print_cr("Unable to allocate memory for root chain processing"); 22.127 - write_events(&edge_store); 22.128 - return; 22.129 - } 22.130 - 22.131 - // necessary condition for attempting a root set iteration 22.132 - Universe::heap()->ensure_parsability(false); 22.133 - 22.134 - RootSetClosure::add_to_queue(&edge_queue); 22.135 - if (edge_queue.is_full()) { 22.136 - // Pathological case where roots don't fit in queue 22.137 - // Do a depth-first search, but mark roots first 22.138 - // to avoid walking sideways over roots 22.139 - DFSClosure::find_leaks_from_root_set(&edge_store, &mark_bits); 22.140 - } else { 22.141 - BFSClosure bfs(&edge_queue, &edge_store, &mark_bits); 22.142 - bfs.process(); 22.143 - } 22.144 - GranularTimer::stop(); 22.145 - write_events(&edge_store); 22.146 - log_edge_queue_summary(edge_queue); 22.147 -} 22.148 - 22.149 -int EmitEventOperation::write_events(EdgeStore* edge_store) { 22.150 - assert(_object_sampler != NULL, "invariant"); 22.151 - assert(edge_store != NULL, "invariant"); 22.152 - assert(_vm_thread != NULL, "invariant"); 22.153 - assert(_vm_thread_local != NULL, "invariant"); 22.154 - assert(SafepointSynchronize::is_at_safepoint(), "invariant"); 22.155 - 22.156 - // save thread id in preparation for thread local trace data manipulations 22.157 - const traceid vmthread_id = _vm_thread_local->thread_id(); 22.158 - assert(_vm_thread_local->thread_id() == JFR_THREAD_ID(_vm_thread), "invariant"); 22.159 - 22.160 - const jlong last_sweep = _emit_all ? max_jlong : _object_sampler->last_sweep().value(); 22.161 - int count = 0; 22.162 - 22.163 - const ObjectSample* current = _object_sampler->first(); 22.164 - while (current != NULL) { 22.165 - ObjectSample* prev = current->prev(); 22.166 - if (current->is_alive_and_older_than(last_sweep)) { 22.167 - write_event(current, edge_store); 22.168 - ++count; 22.169 - } 22.170 - current = prev; 22.171 - } 22.172 - 22.173 - // restore thread local stack trace and thread id 22.174 - _vm_thread_local->set_thread_id(vmthread_id); 22.175 - _vm_thread_local->clear_cached_stack_trace(); 22.176 - assert(_vm_thread_local->thread_id() == JFR_THREAD_ID(_vm_thread), "invariant"); 22.177 - 22.178 - if (count > 0) { 22.179 - // serialize assoicated checkpoints 22.180 - ObjectSampleCheckpoint::write(edge_store, _emit_all, _vm_thread); 22.181 - } 22.182 - return count; 22.183 -} 22.184 - 22.185 -static int array_size(const oop object) { 22.186 - assert(object != NULL, "invariant"); 22.187 - if (object->is_array()) { 22.188 - return arrayOop(object)->length(); 22.189 - } 22.190 - return min_jint; 22.191 -} 22.192 - 22.193 -void EmitEventOperation::write_event(const ObjectSample* sample, EdgeStore* edge_store) { 22.194 - assert(sample != NULL, "invariant"); 22.195 - assert(!sample->is_dead(), "invariant"); 22.196 - assert(edge_store != NULL, "invariant"); 22.197 - assert(_vm_thread_local != NULL, "invariant"); 22.198 - const oop* object_addr = sample->object_addr(); 22.199 - assert(*object_addr != NULL, "invariant"); 22.200 - 22.201 - const Edge* edge = (const Edge*)(*object_addr)->mark(); 22.202 - traceid gc_root_id = 0; 22.203 - if (edge == NULL) { 22.204 - // In order to dump out a representation of the event 22.205 - // even though it was not reachable / too long to reach, 22.206 - // we need to register a top level edge for this object 22.207 - Edge e(NULL, object_addr); 22.208 - edge_store->add_chain(&e, 1); 22.209 - edge = (const Edge*)(*object_addr)->mark(); 22.210 - } else { 22.211 - gc_root_id = edge_store->get_root_id(edge); 22.212 - } 22.213 - 22.214 - assert(edge != NULL, "invariant"); 22.215 - assert(edge->pointee() == *object_addr, "invariant"); 22.216 - const traceid object_id = edge_store->get_id(edge); 22.217 - assert(object_id != 0, "invariant"); 22.218 - 22.219 - EventOldObjectSample e(UNTIMED); 22.220 - e.set_starttime(GranularTimer::start_time()); 22.221 - e.set_endtime(GranularTimer::end_time()); 22.222 - e.set_allocationTime(sample->allocation_time()); 22.223 - e.set_lastKnownHeapUsage(sample->heap_used_at_last_gc()); 22.224 - e.set_object(object_id); 22.225 - e.set_arrayElements(array_size(*object_addr)); 22.226 - e.set_root(gc_root_id); 22.227 - 22.228 - // Temporarily assigning both the stack trace id and thread id 22.229 - // onto the thread local data structure of the VMThread (for the duration 22.230 - // of the commit() call). This trick provides a means to override 22.231 - // the event generation mechanism by injecting externally provided id's. 22.232 - // Here, in particular, this allows us to emit an old object event 22.233 - // supplying information from where the actual sampling occurred. 22.234 - _vm_thread_local->set_cached_stack_trace_id(sample->stack_trace_id()); 22.235 - assert(sample->has_thread(), "invariant"); 22.236 - _vm_thread_local->set_thread_id(sample->thread_id()); 22.237 - e.commit(); 22.238 -}
23.1 --- a/src/share/vm/jfr/leakprofiler/emitEventOperation.hpp Fri Sep 27 13:23:32 2019 +0800 23.2 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 23.3 @@ -1,69 +0,0 @@ 23.4 -/* 23.5 - * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved. 23.6 - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 23.7 - * 23.8 - * This code is free software; you can redistribute it and/or modify it 23.9 - * under the terms of the GNU General Public License version 2 only, as 23.10 - * published by the Free Software Foundation. 23.11 - * 23.12 - * This code is distributed in the hope that it will be useful, but WITHOUT 23.13 - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 23.14 - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 23.15 - * version 2 for more details (a copy is included in the LICENSE file that 23.16 - * accompanied this code). 23.17 - * 23.18 - * You should have received a copy of the GNU General Public License version 23.19 - * 2 along with this work; if not, write to the Free Software Foundation, 23.20 - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 23.21 - * 23.22 - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 23.23 - * or visit www.oracle.com if you need additional information or have any 23.24 - * questions. 23.25 - * 23.26 - */ 23.27 - 23.28 -#ifndef SHARE_VM_LEAKPROFILER_EMITEVENTOPERATION_HPP 23.29 -#define SHARE_VM_LEAKPROFILER_EMITEVENTOPERATION_HPP 23.30 - 23.31 -#include "runtime/vm_operations.hpp" 23.32 - 23.33 -class BFSClosure; 23.34 -class EdgeStore; 23.35 -class EdgeQueue; 23.36 -class JfrThreadData; 23.37 -class ObjectSample; 23.38 -class ObjectSampler; 23.39 - 23.40 -// Safepoint operation for emitting object sample events 23.41 -class EmitEventOperation : public VM_Operation { 23.42 - private: 23.43 - jlong _cutoff_ticks; 23.44 - bool _emit_all; 23.45 - VMThread* _vm_thread; 23.46 - JfrThreadLocal* _vm_thread_local; 23.47 - ObjectSampler* _object_sampler; 23.48 - 23.49 - void write_event(const ObjectSample* sample, EdgeStore* edge_store); 23.50 - int write_events(EdgeStore* edge_store); 23.51 - 23.52 - public: 23.53 - EmitEventOperation(jlong cutoff_ticks, bool emit_all) : 23.54 - _cutoff_ticks(cutoff_ticks), 23.55 - _emit_all(emit_all), 23.56 - _vm_thread(NULL), 23.57 - _vm_thread_local(NULL), 23.58 - _object_sampler(NULL) { 23.59 - } 23.60 - 23.61 - VMOp_Type type() const { 23.62 - return VMOp_GC_HeapInspection; 23.63 - } 23.64 - 23.65 - Mode evaluation_mode() const { 23.66 - return _safepoint; 23.67 - } 23.68 - 23.69 - virtual void doit(); 23.70 -}; 23.71 - 23.72 -#endif // SHARE_VM_LEAKPROFILER_EMITEVENTOPERATION_HPP
24.1 --- a/src/share/vm/jfr/leakprofiler/leakProfiler.cpp Fri Sep 27 13:23:32 2019 +0800 24.2 +++ b/src/share/vm/jfr/leakprofiler/leakProfiler.cpp Wed Oct 09 16:11:58 2019 +0800 24.3 @@ -1,5 +1,5 @@ 24.4 /* 24.5 - * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved. 24.6 + * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved. 24.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 24.8 * 24.9 * This code is free software; you can redistribute it and/or modify it 24.10 @@ -23,68 +23,80 @@ 24.11 */ 24.12 24.13 #include "precompiled.hpp" 24.14 -#include "jfr/leakprofiler/emitEventOperation.hpp" 24.15 #include "jfr/leakprofiler/leakProfiler.hpp" 24.16 #include "jfr/leakprofiler/startOperation.hpp" 24.17 #include "jfr/leakprofiler/stopOperation.hpp" 24.18 +#include "jfr/leakprofiler/checkpoint/eventEmitter.hpp" 24.19 #include "jfr/leakprofiler/sampling/objectSampler.hpp" 24.20 #include "jfr/recorder/service/jfrOptionSet.hpp" 24.21 #include "memory/iterator.hpp" 24.22 -#include "oops/oop.hpp" 24.23 -#include "runtime/atomic.hpp" 24.24 -#include "runtime/orderAccess.hpp" 24.25 #include "runtime/thread.inline.hpp" 24.26 #include "runtime/vmThread.hpp" 24.27 -#include "utilities/ostream.hpp" 24.28 24.29 -// Only to be updated during safepoint 24.30 -ObjectSampler* LeakProfiler::_object_sampler = NULL; 24.31 +bool LeakProfiler::is_running() { 24.32 + return ObjectSampler::is_created(); 24.33 +} 24.34 24.35 -static volatile jbyte suspended = 0; 24.36 -bool LeakProfiler::start(jint sample_count) { 24.37 - if (_object_sampler != NULL) { 24.38 - // already started 24.39 +bool LeakProfiler::start(int sample_count) { 24.40 + if (is_running()) { 24.41 return true; 24.42 } 24.43 + 24.44 // Allows user to disable leak profiler on command line by setting queue size to zero. 24.45 - if (sample_count > 0) { 24.46 - StartOperation op(sample_count); 24.47 - VMThread::execute(&op); 24.48 - return _object_sampler != NULL; 24.49 + if (sample_count == 0) { 24.50 + return false; 24.51 } 24.52 - return false; 24.53 + 24.54 + assert(!is_running(), "invariant"); 24.55 + assert(sample_count > 0, "invariant"); 24.56 + 24.57 + // schedule the safepoint operation for installing the object sampler 24.58 + StartOperation op(sample_count); 24.59 + VMThread::execute(&op); 24.60 + 24.61 + if (!is_running()) { 24.62 + if (LogJFR && Verbose) tty->print_cr("Object sampling could not be started because the sampler could not be allocated"); 24.63 + return false; 24.64 + } 24.65 + assert(is_running(), "invariant"); 24.66 + if (LogJFR && Verbose) tty->print_cr("Object sampling started"); 24.67 + return true; 24.68 } 24.69 24.70 bool LeakProfiler::stop() { 24.71 - if (_object_sampler == NULL) { 24.72 - // already stopped/not started 24.73 - return true; 24.74 + if (!is_running()) { 24.75 + return false; 24.76 } 24.77 + 24.78 + // schedule the safepoint operation for uninstalling and destroying the object sampler 24.79 StopOperation op; 24.80 VMThread::execute(&op); 24.81 - return _object_sampler == NULL; 24.82 + 24.83 + assert(!is_running(), "invariant"); 24.84 + if (LogJFR && Verbose) tty->print_cr("Object sampling stopped"); 24.85 + return true; 24.86 } 24.87 24.88 -void LeakProfiler::emit_events(jlong cutoff_ticks, bool emit_all) { 24.89 +void LeakProfiler::emit_events(int64_t cutoff_ticks, bool emit_all) { 24.90 if (!is_running()) { 24.91 return; 24.92 } 24.93 - EmitEventOperation op(cutoff_ticks, emit_all); 24.94 - VMThread::execute(&op); 24.95 + // exclusive access to object sampler instance 24.96 + ObjectSampler* const sampler = ObjectSampler::acquire(); 24.97 + assert(sampler != NULL, "invariant"); 24.98 + EventEmitter::emit(sampler, cutoff_ticks, emit_all); 24.99 + ObjectSampler::release(); 24.100 } 24.101 24.102 void LeakProfiler::oops_do(BoolObjectClosure* is_alive, OopClosure* f) { 24.103 assert(SafepointSynchronize::is_at_safepoint(), 24.104 "Leak Profiler::oops_do(...) may only be called during safepoint"); 24.105 - 24.106 - if (_object_sampler != NULL) { 24.107 - _object_sampler->oops_do(is_alive, f); 24.108 + if (is_running()) { 24.109 + ObjectSampler::oops_do(is_alive, f); 24.110 } 24.111 } 24.112 24.113 -void LeakProfiler::sample(HeapWord* object, 24.114 - size_t size, 24.115 - JavaThread* thread) { 24.116 +void LeakProfiler::sample(HeapWord* object, size_t size, JavaThread* thread) { 24.117 assert(is_running(), "invariant"); 24.118 assert(thread != NULL, "invariant"); 24.119 assert(thread->thread_state() == _thread_in_vm, "invariant"); 24.120 @@ -94,39 +106,5 @@ 24.121 return; 24.122 } 24.123 24.124 - _object_sampler->add(object, size, thread); 24.125 + ObjectSampler::sample(object, size, thread); 24.126 } 24.127 - 24.128 -ObjectSampler* LeakProfiler::object_sampler() { 24.129 - assert(is_suspended() || SafepointSynchronize::is_at_safepoint(), 24.130 - "Leak Profiler::object_sampler() may only be called during safepoint"); 24.131 - return _object_sampler; 24.132 -} 24.133 - 24.134 -void LeakProfiler::set_object_sampler(ObjectSampler* object_sampler) { 24.135 - assert(SafepointSynchronize::is_at_safepoint(), 24.136 - "Leak Profiler::set_object_sampler() may only be called during safepoint"); 24.137 - _object_sampler = object_sampler; 24.138 -} 24.139 - 24.140 -bool LeakProfiler::is_running() { 24.141 - return _object_sampler != NULL && !suspended; 24.142 -} 24.143 - 24.144 -bool LeakProfiler::is_suspended() { 24.145 - return _object_sampler != NULL && suspended; 24.146 -} 24.147 - 24.148 -void LeakProfiler::resume() { 24.149 - assert(is_suspended(), "invariant"); 24.150 - OrderAccess::storestore(); 24.151 - Atomic::store((jbyte)0, &suspended); 24.152 - assert(is_running(), "invariant"); 24.153 -} 24.154 - 24.155 -void LeakProfiler::suspend() { 24.156 - assert(SafepointSynchronize::is_at_safepoint(), "invariant"); 24.157 - assert(_object_sampler != NULL, "invariant"); 24.158 - assert(!is_suspended(), "invariant"); 24.159 - suspended = (jbyte)1; // safepoint visible 24.160 -}
25.1 --- a/src/share/vm/jfr/leakprofiler/leakProfiler.hpp Fri Sep 27 13:23:32 2019 +0800 25.2 +++ b/src/share/vm/jfr/leakprofiler/leakProfiler.hpp Wed Oct 09 16:11:58 2019 +0800 25.3 @@ -28,35 +28,15 @@ 25.4 #include "memory/allocation.hpp" 25.5 25.6 class BoolObjectClosure; 25.7 -class ObjectSampler; 25.8 class OopClosure; 25.9 -class Thread; 25.10 25.11 class LeakProfiler : public AllStatic { 25.12 - friend class ClassUnloadTypeSet; 25.13 - friend class EmitEventOperation; 25.14 - friend class ObjectSampleCheckpoint; 25.15 - friend class StartOperation; 25.16 - friend class StopOperation; 25.17 - friend class TypeSet; 25.18 - friend class WriteObjectSampleStacktrace; 25.19 - 25.20 - private: 25.21 - static ObjectSampler* _object_sampler; 25.22 - 25.23 - static void set_object_sampler(ObjectSampler* object_sampler); 25.24 - static ObjectSampler* object_sampler(); 25.25 - 25.26 - static void suspend(); 25.27 - static void resume(); 25.28 - static bool is_suspended(); 25.29 - 25.30 public: 25.31 - static bool start(jint sample_count); 25.32 + static bool start(int sample_count); 25.33 static bool stop(); 25.34 - static void emit_events(jlong cutoff_ticks, bool emit_all); 25.35 static bool is_running(); 25.36 25.37 + static void emit_events(int64_t cutoff_ticks, bool emit_all); 25.38 static void sample(HeapWord* object, size_t size, JavaThread* thread); 25.39 25.40 // Called by GC
26.1 --- a/src/share/vm/jfr/leakprofiler/sampling/objectSampler.cpp Fri Sep 27 13:23:32 2019 +0800 26.2 +++ b/src/share/vm/jfr/leakprofiler/sampling/objectSampler.cpp Wed Oct 09 16:11:58 2019 +0800 26.3 @@ -1,5 +1,5 @@ 26.4 /* 26.5 - * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved. 26.6 + * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved. 26.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 26.8 * 26.9 * This code is free software; you can redistribute it and/or modify it 26.10 @@ -34,8 +34,18 @@ 26.11 #include "jfr/utilities/jfrTryLock.hpp" 26.12 #include "memory/universe.hpp" 26.13 #include "oops/oop.inline.hpp" 26.14 +#include "runtime/atomic.hpp" 26.15 +#include "runtime/orderAccess.hpp" 26.16 +#include "runtime/safepoint.hpp" 26.17 #include "runtime/thread.hpp" 26.18 26.19 +static ObjectSampler* _instance = NULL; 26.20 + 26.21 +static ObjectSampler& instance() { 26.22 + assert(_instance != NULL, "invariant"); 26.23 + return *_instance; 26.24 +} 26.25 + 26.26 ObjectSampler::ObjectSampler(size_t size) : 26.27 _priority_queue(new SamplePriorityQueue(size)), 26.28 _list(new SampleList(size)), 26.29 @@ -43,7 +53,6 @@ 26.30 _total_allocated(0), 26.31 _threshold(0), 26.32 _size(size), 26.33 - _tryLock(0), 26.34 _dead_samples(false) {} 26.35 26.36 ObjectSampler::~ObjectSampler() { 26.37 @@ -53,32 +62,109 @@ 26.38 _list = NULL; 26.39 } 26.40 26.41 -void ObjectSampler::add(HeapWord* obj, size_t allocated, JavaThread* thread) { 26.42 +bool ObjectSampler::create(size_t size) { 26.43 + assert(SafepointSynchronize::is_at_safepoint(), "invariant"); 26.44 + assert(_instance == NULL, "invariant"); 26.45 + _instance = new ObjectSampler(size); 26.46 + return _instance != NULL; 26.47 +} 26.48 + 26.49 +bool ObjectSampler::is_created() { 26.50 + return _instance != NULL; 26.51 +} 26.52 + 26.53 +ObjectSampler* ObjectSampler::sampler() { 26.54 + assert(is_created(), "invariant"); 26.55 + return _instance; 26.56 +} 26.57 + 26.58 +void ObjectSampler::destroy() { 26.59 + assert(SafepointSynchronize::is_at_safepoint(), "invariant"); 26.60 + if (_instance != NULL) { 26.61 + ObjectSampler* const sampler = _instance; 26.62 + _instance = NULL; 26.63 + delete sampler; 26.64 + } 26.65 +} 26.66 + 26.67 +static volatile int _lock = 0; 26.68 + 26.69 +ObjectSampler* ObjectSampler::acquire() { 26.70 + assert(is_created(), "invariant"); 26.71 + while (Atomic::cmpxchg(1, &_lock, 0) == 1) {} 26.72 + return _instance; 26.73 +} 26.74 + 26.75 +void ObjectSampler::release() { 26.76 + assert(is_created(), "invariant"); 26.77 + OrderAccess::fence(); 26.78 + _lock = 0; 26.79 +} 26.80 + 26.81 +static traceid get_thread_id(JavaThread* thread) { 26.82 assert(thread != NULL, "invariant"); 26.83 - const traceid thread_id = thread->threadObj() != NULL ? thread->jfr_thread_local()->thread_id() : 0; 26.84 + if (thread->threadObj() == NULL) { 26.85 + return 0; 26.86 + } 26.87 + const JfrThreadLocal* const tl = thread->jfr_thread_local(); 26.88 + assert(tl != NULL, "invariant"); 26.89 + if (!tl->has_thread_checkpoint()) { 26.90 + JfrCheckpointManager::create_thread_checkpoint(thread); 26.91 + } 26.92 + assert(tl->has_thread_checkpoint(), "invariant"); 26.93 + return tl->thread_id(); 26.94 +} 26.95 + 26.96 +// Populates the thread local stack frames, but does not add them 26.97 +// to the stacktrace repository (...yet, see stacktrace_id() below) 26.98 +// 26.99 +void ObjectSampler::fill_stacktrace(JfrStackTrace* stacktrace, JavaThread* thread) { 26.100 + assert(stacktrace != NULL, "invariant"); 26.101 + assert(thread != NULL, "invariant"); 26.102 + if (JfrEventSetting::has_stacktrace(EventOldObjectSample::eventId)) { 26.103 + JfrStackTraceRepository::fill_stacktrace_for(thread, stacktrace, 0); 26.104 + } 26.105 +} 26.106 + 26.107 +// We were successful in acquiring the try lock and have been selected for adding a sample. 26.108 +// Go ahead with installing our previously taken stacktrace into the stacktrace repository. 26.109 +// 26.110 +traceid ObjectSampler::stacktrace_id(const JfrStackTrace* stacktrace, JavaThread* thread) { 26.111 + assert(stacktrace != NULL, "invariant"); 26.112 + assert(stacktrace->hash() != 0, "invariant"); 26.113 + const traceid stacktrace_id = JfrStackTraceRepository::add(stacktrace, thread); 26.114 + thread->jfr_thread_local()->set_cached_stack_trace_id(stacktrace_id, stacktrace->hash()); 26.115 + return stacktrace_id; 26.116 +} 26.117 + 26.118 +void ObjectSampler::sample(HeapWord* obj, size_t allocated, JavaThread* thread) { 26.119 + assert(thread != NULL, "invariant"); 26.120 + assert(is_created(), "invariant"); 26.121 + 26.122 + const traceid thread_id = get_thread_id(thread); 26.123 if (thread_id == 0) { 26.124 return; 26.125 } 26.126 - assert(thread_id != 0, "invariant"); 26.127 + const JfrThreadLocal* const tl = thread->jfr_thread_local(); 26.128 + JfrStackTrace stacktrace(tl->stackframes(), tl->stackdepth()); 26.129 + fill_stacktrace(&stacktrace, thread); 26.130 26.131 - if (!thread->jfr_thread_local()->has_thread_checkpoint()) { 26.132 - JfrCheckpointManager::create_thread_checkpoint(thread); 26.133 - assert(thread->jfr_thread_local()->has_thread_checkpoint(), "invariant"); 26.134 - } 26.135 - 26.136 - traceid stack_trace_id = 0; 26.137 - unsigned int stack_trace_hash = 0; 26.138 - if (JfrEventSetting::has_stacktrace(EventOldObjectSample::eventId)) { 26.139 - stack_trace_id = JfrStackTraceRepository::record(thread, 0, &stack_trace_hash); 26.140 - thread->jfr_thread_local()->set_cached_stack_trace_id(stack_trace_id, stack_trace_hash); 26.141 - } 26.142 - 26.143 - JfrTryLock tryLock(&_tryLock); 26.144 + // try enter critical section 26.145 + JfrTryLock tryLock(&_lock); 26.146 if (!tryLock.has_lock()) { 26.147 if (LogJFR && Verbose) tty->print_cr("Skipping old object sample due to lock contention"); 26.148 return; 26.149 } 26.150 26.151 + instance().add(obj, allocated, thread_id, &stacktrace, thread); 26.152 +} 26.153 + 26.154 +void ObjectSampler::add(HeapWord* obj, size_t allocated, traceid thread_id, JfrStackTrace* stacktrace, JavaThread* thread) { 26.155 + assert(stacktrace != NULL, "invariant"); 26.156 + assert(thread_id != 0, "invariant"); 26.157 + assert(thread != NULL, "invariant"); 26.158 + assert(thread->jfr_thread_local()->has_thread_checkpoint(), "invariant"); 26.159 + 26.160 if (_dead_samples) { 26.161 scavenge(); 26.162 assert(!_dead_samples, "invariant"); 26.163 @@ -100,13 +186,13 @@ 26.164 } 26.165 26.166 assert(sample != NULL, "invariant"); 26.167 - assert(thread_id != 0, "invariant"); 26.168 sample->set_thread_id(thread_id); 26.169 sample->set_thread_checkpoint(thread->jfr_thread_local()->thread_checkpoint()); 26.170 26.171 - if (stack_trace_id != 0) { 26.172 - sample->set_stack_trace_id(stack_trace_id); 26.173 - sample->set_stack_trace_hash(stack_trace_hash); 26.174 + const unsigned int stacktrace_hash = stacktrace->hash(); 26.175 + if (stacktrace_hash != 0) { 26.176 + sample->set_stack_trace_id(stacktrace_id(stacktrace, thread)); 26.177 + sample->set_stack_trace_hash(stacktrace_hash); 26.178 } 26.179 26.180 sample->set_span(allocated); 26.181 @@ -117,6 +203,53 @@ 26.182 _priority_queue->push(sample); 26.183 } 26.184 26.185 +void ObjectSampler::scavenge() { 26.186 + ObjectSample* current = _list->last(); 26.187 + while (current != NULL) { 26.188 + ObjectSample* next = current->next(); 26.189 + if (current->is_dead()) { 26.190 + remove_dead(current); 26.191 + } 26.192 + current = next; 26.193 + } 26.194 + _dead_samples = false; 26.195 +} 26.196 + 26.197 +void ObjectSampler::remove_dead(ObjectSample* sample) { 26.198 + assert(sample != NULL, "invariant"); 26.199 + assert(sample->is_dead(), "invariant"); 26.200 + ObjectSample* const previous = sample->prev(); 26.201 + // push span on to previous 26.202 + if (previous != NULL) { 26.203 + _priority_queue->remove(previous); 26.204 + previous->add_span(sample->span()); 26.205 + _priority_queue->push(previous); 26.206 + } 26.207 + _priority_queue->remove(sample); 26.208 + _list->release(sample); 26.209 +} 26.210 + 26.211 +void ObjectSampler::oops_do(BoolObjectClosure* is_alive, OopClosure* f) { 26.212 + assert(is_created(), "invariant"); 26.213 + assert(SafepointSynchronize::is_at_safepoint(), "invariant"); 26.214 + ObjectSampler& sampler = instance(); 26.215 + ObjectSample* current = sampler._list->last(); 26.216 + while (current != NULL) { 26.217 + ObjectSample* next = current->next(); 26.218 + if (!current->is_dead()) { 26.219 + if (is_alive->do_object_b(current->object())) { 26.220 + // The weakly referenced object is alive, update pointer 26.221 + f->do_oop(const_cast<oop*>(current->object_addr())); 26.222 + } else { 26.223 + current->set_dead(); 26.224 + sampler._dead_samples = true; 26.225 + } 26.226 + } 26.227 + current = next; 26.228 + } 26.229 + sampler._last_sweep = JfrTicks::now(); 26.230 +} 26.231 + 26.232 const ObjectSample* ObjectSampler::last() const { 26.233 return _list->last(); 26.234 } 26.235 @@ -133,50 +266,6 @@ 26.236 _list->set_last_resolved(sample); 26.237 } 26.238 26.239 -void ObjectSampler::oops_do(BoolObjectClosure* is_alive, OopClosure* f) { 26.240 - ObjectSample* current = _list->last(); 26.241 - while (current != NULL) { 26.242 - ObjectSample* next = current->next(); 26.243 - if (!current->is_dead()) { 26.244 - if (is_alive->do_object_b(current->object())) { 26.245 - // The weakly referenced object is alive, update pointer 26.246 - f->do_oop(const_cast<oop*>(current->object_addr())); 26.247 - } else { 26.248 - current->set_dead(); 26.249 - _dead_samples = true; 26.250 - } 26.251 - } 26.252 - current = next; 26.253 - } 26.254 - _last_sweep = JfrTicks::now(); 26.255 -} 26.256 - 26.257 -void ObjectSampler::remove_dead(ObjectSample* sample) { 26.258 - assert(sample != NULL, "invariant"); 26.259 - assert(sample->is_dead(), "invariant"); 26.260 - ObjectSample* const previous = sample->prev(); 26.261 - // push span on to previous 26.262 - if (previous != NULL) { 26.263 - _priority_queue->remove(previous); 26.264 - previous->add_span(sample->span()); 26.265 - _priority_queue->push(previous); 26.266 - } 26.267 - _priority_queue->remove(sample); 26.268 - _list->release(sample); 26.269 -} 26.270 - 26.271 -void ObjectSampler::scavenge() { 26.272 - ObjectSample* current = _list->last(); 26.273 - while (current != NULL) { 26.274 - ObjectSample* next = current->next(); 26.275 - if (current->is_dead()) { 26.276 - remove_dead(current); 26.277 - } 26.278 - current = next; 26.279 - } 26.280 - _dead_samples = false; 26.281 -} 26.282 - 26.283 int ObjectSampler::item_count() const { 26.284 return _priority_queue->count(); 26.285 } 26.286 @@ -188,7 +277,7 @@ 26.287 ObjectSample* ObjectSampler::item_at(int index) { 26.288 return const_cast<ObjectSample*>( 26.289 const_cast<const ObjectSampler*>(this)->item_at(index) 26.290 - ); 26.291 + ); 26.292 } 26.293 26.294 const JfrTicks& ObjectSampler::last_sweep() const {
27.1 --- a/src/share/vm/jfr/leakprofiler/sampling/objectSampler.hpp Fri Sep 27 13:23:32 2019 +0800 27.2 +++ b/src/share/vm/jfr/leakprofiler/sampling/objectSampler.hpp Wed Oct 09 16:11:58 2019 +0800 27.3 @@ -28,7 +28,10 @@ 27.4 #include "memory/allocation.hpp" 27.5 #include "jfr/utilities/jfrTime.hpp" 27.6 27.7 +typedef u8 traceid; 27.8 + 27.9 class BoolObjectClosure; 27.10 +class JfrStackTrace; 27.11 class OopClosure; 27.12 class ObjectSample; 27.13 class ObjectSampler; 27.14 @@ -40,11 +43,13 @@ 27.15 // making sure the samples are evenly distributed as 27.16 // new entries are added and removed. 27.17 class ObjectSampler : public CHeapObj<mtTracing> { 27.18 + friend class EventEmitter; 27.19 + friend class JfrRecorderService; 27.20 friend class LeakProfiler; 27.21 - friend class ObjectSampleCheckpoint; 27.22 friend class StartOperation; 27.23 friend class StopOperation; 27.24 - friend class EmitEventOperation; 27.25 + friend class ObjectSampleCheckpoint; 27.26 + friend class WriteObjectSampleStacktrace; 27.27 private: 27.28 SamplePriorityQueue* _priority_queue; 27.29 SampleList* _list; 27.30 @@ -52,20 +57,33 @@ 27.31 size_t _total_allocated; 27.32 size_t _threshold; 27.33 size_t _size; 27.34 - volatile int _tryLock; 27.35 bool _dead_samples; 27.36 27.37 + // Lifecycle 27.38 explicit ObjectSampler(size_t size); 27.39 ~ObjectSampler(); 27.40 + static bool create(size_t size); 27.41 + static bool is_created(); 27.42 + static ObjectSampler* sampler(); 27.43 + static void destroy(); 27.44 27.45 - void add(HeapWord* object, size_t size, JavaThread* thread); 27.46 + // For operations that require exclusive access (non-safepoint) 27.47 + static ObjectSampler* acquire(); 27.48 + static void release(); 27.49 + 27.50 + // Stacktrace 27.51 + static void fill_stacktrace(JfrStackTrace* stacktrace, JavaThread* thread); 27.52 + traceid stacktrace_id(const JfrStackTrace* stacktrace, JavaThread* thread); 27.53 + 27.54 + // Sampling 27.55 + static void sample(HeapWord* object, size_t size, JavaThread* thread); 27.56 + void add(HeapWord* object, size_t size, traceid thread_id, JfrStackTrace* stacktrace, JavaThread* thread); 27.57 + void scavenge(); 27.58 void remove_dead(ObjectSample* sample); 27.59 - void scavenge(); 27.60 27.61 // Called by GC 27.62 - void oops_do(BoolObjectClosure* is_alive, OopClosure* f); 27.63 + static void oops_do(BoolObjectClosure* is_alive, OopClosure* f); 27.64 27.65 - public: 27.66 const ObjectSample* item_at(int index) const; 27.67 ObjectSample* item_at(int index); 27.68 int item_count() const;
28.1 --- a/src/share/vm/jfr/leakprofiler/startOperation.hpp Fri Sep 27 13:23:32 2019 +0800 28.2 +++ b/src/share/vm/jfr/leakprofiler/startOperation.hpp Wed Oct 09 16:11:58 2019 +0800 28.3 @@ -25,34 +25,17 @@ 28.4 #ifndef SHARE_VM_LEAKPROFILER_STARTOPERATION_HPP 28.5 #define SHARE_VM_LEAKPROFILER_STARTOPERATION_HPP 28.6 28.7 -#include "jfr/recorder/jfrRecorder.hpp" 28.8 -#include "jfr/leakprofiler/leakProfiler.hpp" 28.9 #include "jfr/leakprofiler/sampling/objectSampler.hpp" 28.10 -#include "jfr/recorder/service/jfrOptionSet.hpp" 28.11 -#include "runtime/vm_operations.hpp" 28.12 +#include "jfr/leakprofiler/utilities/vmOperation.hpp" 28.13 28.14 -// Safepoint operation for starting leak profiler object sampler 28.15 -class StartOperation : public VM_Operation { 28.16 +// Safepoint operation for creating and starting the leak profiler object sampler 28.17 +class StartOperation : public OldObjectVMOperation { 28.18 private: 28.19 - jlong _sample_count; 28.20 + int _sample_count; 28.21 public: 28.22 - StartOperation(jlong sample_count) : 28.23 - _sample_count(sample_count) { 28.24 - } 28.25 - 28.26 - Mode evaluation_mode() const { 28.27 - return _safepoint; 28.28 - } 28.29 - 28.30 - VMOp_Type type() const { 28.31 - return VMOp_GC_HeapInspection; 28.32 - } 28.33 - 28.34 + StartOperation(int sample_count) : _sample_count(sample_count) {} 28.35 virtual void doit() { 28.36 - assert(!LeakProfiler::is_running(), "invariant"); 28.37 - jint queue_size = JfrOptionSet::old_object_queue_size(); 28.38 - LeakProfiler::set_object_sampler(new ObjectSampler(queue_size)); 28.39 - if (LogJFR && Verbose) tty->print_cr( "Object sampling started"); 28.40 + ObjectSampler::create(_sample_count); 28.41 } 28.42 }; 28.43
29.1 --- a/src/share/vm/jfr/leakprofiler/stopOperation.hpp Fri Sep 27 13:23:32 2019 +0800 29.2 +++ b/src/share/vm/jfr/leakprofiler/stopOperation.hpp Wed Oct 09 16:11:58 2019 +0800 29.3 @@ -25,30 +25,14 @@ 29.4 #ifndef SHARE_VM_LEAKPROFILER_STOPOPERATION_HPP 29.5 #define SHARE_VM_LEAKPROFILER_STOPOPERATION_HPP 29.6 29.7 -#include "jfr/leakprofiler/leakProfiler.hpp" 29.8 #include "jfr/leakprofiler/sampling/objectSampler.hpp" 29.9 -#include "jfr/recorder/service/jfrOptionSet.hpp" 29.10 -#include "runtime/vm_operations.hpp" 29.11 +#include "jfr/leakprofiler/utilities/vmOperation.hpp" 29.12 29.13 -// Safepoint operation for stopping leak profiler object sampler 29.14 -class StopOperation : public VM_Operation { 29.15 +// Safepoint operation for stopping and destroying the leak profiler object sampler 29.16 +class StopOperation : public OldObjectVMOperation { 29.17 public: 29.18 - StopOperation() {} 29.19 - 29.20 - Mode evaluation_mode() const { 29.21 - return _safepoint; 29.22 - } 29.23 - 29.24 - VMOp_Type type() const { 29.25 - return VMOp_GC_HeapInspection; 29.26 - } 29.27 - 29.28 virtual void doit() { 29.29 - assert(LeakProfiler::is_running(), "invariant"); 29.30 - ObjectSampler* object_sampler = LeakProfiler::object_sampler(); 29.31 - delete object_sampler; 29.32 - LeakProfiler::set_object_sampler(NULL); 29.33 - if (LogJFR && Verbose) tty->print_cr( "Object sampling stopped"); 29.34 + ObjectSampler::destroy(); 29.35 } 29.36 }; 29.37
30.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 30.2 +++ b/src/share/vm/jfr/leakprofiler/utilities/vmOperation.hpp Wed Oct 09 16:11:58 2019 +0800 30.3 @@ -0,0 +1,41 @@ 30.4 +/* 30.5 + * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. 30.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 30.7 + * 30.8 + * This code is free software; you can redistribute it and/or modify it 30.9 + * under the terms of the GNU General Public License version 2 only, as 30.10 + * published by the Free Software Foundation. 30.11 + * 30.12 + * This code is distributed in the hope that it will be useful, but WITHOUT 30.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 30.14 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 30.15 + * version 2 for more details (a copy is included in the LICENSE file that 30.16 + * accompanied this code). 30.17 + * 30.18 + * You should have received a copy of the GNU General Public License version 30.19 + * 2 along with this work; if not, write to the Free Software Foundation, 30.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 30.21 + * 30.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 30.23 + * or visit www.oracle.com if you need additional information or have any 30.24 + * questions. 30.25 + * 30.26 + */ 30.27 + 30.28 +#ifndef SHARE_JFR_LEAKPROFILER_UTILITIES_VMOPERATION_HPP 30.29 +#define SHARE_JFR_LEAKPROFILER_UTILITIES_VMOPERATION_HPP 30.30 + 30.31 +#include "runtime/vm_operations.hpp" 30.32 + 30.33 +class OldObjectVMOperation : public VM_Operation { 30.34 + public: 30.35 + Mode evaluation_mode() const { 30.36 + return _safepoint; 30.37 + } 30.38 + 30.39 + VMOp_Type type() const { 30.40 + return VMOp_JFROldObject; 30.41 + } 30.42 +}; 30.43 + 30.44 +#endif // SHARE_JFR_LEAKPROFILER_UTILITIES_VMOPERATION_HPP
31.1 --- a/src/share/vm/jfr/recorder/checkpoint/types/jfrType.cpp Fri Sep 27 13:23:32 2019 +0800 31.2 +++ b/src/share/vm/jfr/recorder/checkpoint/types/jfrType.cpp Wed Oct 09 16:11:58 2019 +0800 31.3 @@ -1,5 +1,5 @@ 31.4 /* 31.5 - * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved. 31.6 + * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved. 31.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 31.8 * 31.9 * This code is free software; you can redistribute it and/or modify it 31.10 @@ -316,7 +316,7 @@ 31.11 31.12 void TypeSet::serialize(JfrCheckpointWriter& writer) { 31.13 TypeSetSerialization type_set(false); 31.14 - if (LeakProfiler::is_suspended()) { 31.15 + if (LeakProfiler::is_running()) { 31.16 JfrCheckpointWriter leakp_writer(false, true, Thread::current()); 31.17 type_set.write(writer, &leakp_writer); 31.18 ObjectSampleCheckpoint::install(leakp_writer, false, true);
32.1 --- a/src/share/vm/jfr/recorder/service/jfrRecorderService.cpp Fri Sep 27 13:23:32 2019 +0800 32.2 +++ b/src/share/vm/jfr/recorder/service/jfrRecorderService.cpp Wed Oct 09 16:11:58 2019 +0800 32.3 @@ -24,7 +24,9 @@ 32.4 32.5 #include "precompiled.hpp" 32.6 #include "jfr/jni/jfrJavaSupport.hpp" 32.7 +#include "jfr/leakprofiler/leakProfiler.hpp" 32.8 #include "jfr/leakprofiler/checkpoint/objectSampleCheckpoint.hpp" 32.9 +#include "jfr/leakprofiler/sampling/objectSampler.hpp" 32.10 #include "jfr/recorder/jfrRecorder.hpp" 32.11 #include "jfr/recorder/checkpoint/jfrCheckpointManager.hpp" 32.12 #include "jfr/recorder/checkpoint/jfrMetadataEvent.hpp" 32.13 @@ -334,6 +336,7 @@ 32.14 open_new_chunk(true); 32.15 } 32.16 _checkpoint_manager.register_service_thread(Thread::current()); 32.17 + JfrMetadataEvent::lock(); 32.18 } 32.19 32.20 void JfrRecorderService::open_new_chunk(bool vm_error) { 32.21 @@ -397,6 +400,11 @@ 32.22 write_stack_trace_checkpoint.process(); 32.23 } 32.24 32.25 +static void write_object_sample_stacktrace(ObjectSampler* sampler, JfrStackTraceRepository& stack_trace_repository) { 32.26 + WriteObjectSampleStacktrace object_sample_stacktrace(sampler, stack_trace_repository); 32.27 + object_sample_stacktrace.process(); 32.28 +} 32.29 + 32.30 static void write_stringpool_checkpoint(JfrStringPool& string_pool, JfrChunkWriter& chunkwriter) { 32.31 WriteStringPool write_string_pool(string_pool); 32.32 WriteStringPoolCheckpoint write_string_pool_checkpoint(chunkwriter, TYPE_STRING, write_string_pool); 32.33 @@ -417,8 +425,9 @@ 32.34 // write checkpoint epoch transition list-> 32.35 // write stack trace checkpoint -> 32.36 // write string pool checkpoint -> 32.37 -// write storage -> 32.38 -// release stream lock 32.39 +// write object sample stacktraces -> 32.40 +// write storage -> 32.41 +// release stream lock 32.42 // 32.43 void JfrRecorderService::pre_safepoint_write() { 32.44 MutexLockerEx stream_lock(JfrStream_lock, Mutex::_no_safepoint_check_flag); 32.45 @@ -427,6 +436,13 @@ 32.46 _checkpoint_manager.write_epoch_transition_mspace(); 32.47 write_stacktrace_checkpoint(_stack_trace_repository, _chunkwriter, false); 32.48 write_stringpool_checkpoint(_string_pool, _chunkwriter); 32.49 + if (LeakProfiler::is_running()) { 32.50 + // Exclusive access to the object sampler instance. 32.51 + // The sampler is released (unlocked) later in post_safepoint_write. 32.52 + ObjectSampler* const sampler = ObjectSampler::acquire(); 32.53 + assert(sampler != NULL, "invariant"); 32.54 + write_object_sample_stacktrace(sampler, _stack_trace_repository); 32.55 + } 32.56 _storage.write(); 32.57 } 32.58 32.59 @@ -435,16 +451,10 @@ 32.60 VMThread::execute(&safepoint_task); 32.61 } 32.62 32.63 -static void write_object_sample_stacktrace(JfrStackTraceRepository& stack_trace_repository) { 32.64 - WriteObjectSampleStacktrace object_sample_stacktrace(stack_trace_repository); 32.65 - object_sample_stacktrace.process(); 32.66 -} 32.67 - 32.68 // 32.69 // safepoint write sequence 32.70 // 32.71 // lock stream lock -> 32.72 -// write object sample stacktraces -> 32.73 // write stacktrace repository -> 32.74 // write string pool -> 32.75 // write safepoint dependent types -> 32.76 @@ -457,7 +467,6 @@ 32.77 void JfrRecorderService::safepoint_write() { 32.78 assert(SafepointSynchronize::is_at_safepoint(), "invariant"); 32.79 MutexLockerEx stream_lock(JfrStream_lock, Mutex::_no_safepoint_check_flag); 32.80 - write_object_sample_stacktrace(_stack_trace_repository); 32.81 write_stacktrace_checkpoint(_stack_trace_repository, _chunkwriter, true); 32.82 write_stringpool_checkpoint_safepoint(_string_pool, _chunkwriter); 32.83 _checkpoint_manager.write_safepoint_types(); 32.84 @@ -477,13 +486,14 @@ 32.85 // 32.86 // post-safepoint write sequence 32.87 // 32.88 -// lock stream lock -> 32.89 -// write type set -> 32.90 -// write checkpoints -> 32.91 -// write metadata event -> 32.92 -// write chunk header -> 32.93 -// close chunk fd -> 32.94 -// release stream lock 32.95 +// write type set -> 32.96 +// release object sampler -> 32.97 +// lock stream lock -> 32.98 +// write checkpoints -> 32.99 +// write metadata event -> 32.100 +// write chunk header -> 32.101 +// close chunk fd -> 32.102 +// release stream lock 32.103 // 32.104 void JfrRecorderService::post_safepoint_write() { 32.105 assert(_chunkwriter.is_valid(), "invariant"); 32.106 @@ -492,7 +502,11 @@ 32.107 // already tagged artifacts for the previous epoch. We can accomplish this concurrently 32.108 // with threads now tagging artifacts in relation to the new, now updated, epoch and remain outside of a safepoint. 32.109 _checkpoint_manager.write_type_set(); 32.110 - MutexLockerEx stream_lock(JfrStream_lock, Mutex::_no_safepoint_check_flag); 32.111 + if (LeakProfiler::is_running()) { 32.112 + // The object sampler instance was exclusively acquired and locked in pre_safepoint_write. 32.113 + // Note: There is a dependency on write_type_set() above, ensure the release is subsequent. 32.114 + ObjectSampler::release(); 32.115 + } MutexLockerEx stream_lock(JfrStream_lock, Mutex::_no_safepoint_check_flag); 32.116 // serialize any outstanding checkpoint memory 32.117 _checkpoint_manager.write(); 32.118 // serialize the metadata descriptor event and close out the chunk 32.119 @@ -511,11 +525,9 @@ 32.120 void JfrRecorderService::finalize_current_chunk_on_vm_error() { 32.121 assert(_chunkwriter.is_valid(), "invariant"); 32.122 pre_safepoint_write(); 32.123 - JfrMetadataEvent::lock(); 32.124 // Do not attempt safepoint dependent operations during emergency dump. 32.125 // Optimistically write tagged artifacts. 32.126 _checkpoint_manager.shift_epoch(); 32.127 - _checkpoint_manager.write_type_set(); 32.128 // update time 32.129 _chunkwriter.time_stamp_chunk_now(); 32.130 post_safepoint_write();
33.1 --- a/src/share/vm/jfr/recorder/stacktrace/jfrStackTraceRepository.cpp Fri Sep 27 13:23:32 2019 +0800 33.2 +++ b/src/share/vm/jfr/recorder/stacktrace/jfrStackTraceRepository.cpp Wed Oct 09 16:11:58 2019 +0800 33.3 @@ -164,7 +164,13 @@ 33.4 } 33.5 33.6 traceid JfrStackTraceRepository::add(const JfrStackTrace& stacktrace) { 33.7 - return instance().add_trace(stacktrace); 33.8 + traceid tid = instance().add_trace(stacktrace); 33.9 + if (tid == 0) { 33.10 + stacktrace.resolve_linenos(); 33.11 + tid = instance().add_trace(stacktrace); 33.12 + } 33.13 + assert(tid != 0, "invariant"); 33.14 + return tid; 33.15 } 33.16 33.17 traceid JfrStackTraceRepository::record(Thread* thread, int skip /* 0 */) { 33.18 @@ -187,54 +193,29 @@ 33.19 return instance().record_for((JavaThread*)thread, skip,frames, tl->stackdepth()); 33.20 } 33.21 33.22 -traceid JfrStackTraceRepository::record(Thread* thread, int skip, unsigned int* hash) { 33.23 +traceid JfrStackTraceRepository::record_for(JavaThread* thread, int skip, JfrStackFrame *frames, u4 max_frames) { 33.24 + JfrStackTrace stacktrace(frames, max_frames); 33.25 + return stacktrace.record_safe(thread, skip) ? add(stacktrace) : 0; 33.26 +} 33.27 + 33.28 +traceid JfrStackTraceRepository::add(const JfrStackTrace* stacktrace, JavaThread* thread) { 33.29 + assert(stacktrace != NULL, "invariant"); 33.30 + assert(thread != NULL, "invariant"); 33.31 + assert(stacktrace->hash() != 0, "invariant"); 33.32 + return add(*stacktrace); 33.33 +} 33.34 + 33.35 +bool JfrStackTraceRepository::fill_stacktrace_for(JavaThread* thread, JfrStackTrace* stacktrace, int skip) { 33.36 assert(thread == Thread::current(), "invariant"); 33.37 + assert(stacktrace != NULL, "invariant"); 33.38 JfrThreadLocal* const tl = thread->jfr_thread_local(); 33.39 assert(tl != NULL, "invariant"); 33.40 - 33.41 - if (tl->has_cached_stack_trace()) { 33.42 - *hash = tl->cached_stack_trace_hash(); 33.43 - return tl->cached_stack_trace_id(); 33.44 + const unsigned int cached_stacktrace_hash = tl->cached_stack_trace_hash(); 33.45 + if (cached_stacktrace_hash != 0) { 33.46 + stacktrace->set_hash(cached_stacktrace_hash); 33.47 + return true; 33.48 } 33.49 - if (!thread->is_Java_thread() || thread->is_hidden_from_external_view()) { 33.50 - return 0; 33.51 - } 33.52 - JfrStackFrame* frames = tl->stackframes(); 33.53 - if (frames == NULL) { 33.54 - // pending oom 33.55 - return 0; 33.56 - } 33.57 - assert(frames != NULL, "invariant"); 33.58 - assert(tl->stackframes() == frames, "invariant"); 33.59 - return instance().record_for((JavaThread*)thread, skip, frames, tl->stackdepth(), hash); 33.60 -} 33.61 - 33.62 -traceid JfrStackTraceRepository::record_for(JavaThread* thread, int skip, JfrStackFrame *frames, u4 max_frames) { 33.63 - JfrStackTrace stacktrace(frames, max_frames); 33.64 - if (!stacktrace.record_safe(thread, skip)) { 33.65 - return 0; 33.66 - } 33.67 - traceid tid = add(stacktrace); 33.68 - if (tid == 0) { 33.69 - stacktrace.resolve_linenos(); 33.70 - tid = add(stacktrace); 33.71 - } 33.72 - return tid; 33.73 -} 33.74 - 33.75 -traceid JfrStackTraceRepository::record_for(JavaThread* thread, int skip, JfrStackFrame *frames, u4 max_frames, unsigned int* hash) { 33.76 - assert(hash != NULL && *hash == 0, "invariant"); 33.77 - JfrStackTrace stacktrace(frames, max_frames); 33.78 - if (!stacktrace.record_safe(thread, skip, true)) { 33.79 - return 0; 33.80 - } 33.81 - traceid tid = add(stacktrace); 33.82 - if (tid == 0) { 33.83 - stacktrace.resolve_linenos(); 33.84 - tid = add(stacktrace); 33.85 - } 33.86 - *hash = stacktrace._hash; 33.87 - return tid; 33.88 + return stacktrace->record_safe(thread, skip, true); 33.89 } 33.90 33.91 size_t JfrStackTraceRepository::write_impl(JfrChunkWriter& sw, bool clear) { 33.92 @@ -363,7 +344,7 @@ 33.93 return trace; 33.94 } 33.95 33.96 -void JfrStackFrame::resolve_lineno() { 33.97 +void JfrStackFrame::resolve_lineno() const { 33.98 assert(_method, "no method pointer"); 33.99 assert(_line == 0, "already have linenumber"); 33.100 _line = _method->line_number_from_bci(_bci); 33.101 @@ -375,7 +356,7 @@ 33.102 _frames[frame_pos] = frame; 33.103 } 33.104 33.105 -void JfrStackTrace::resolve_linenos() { 33.106 +void JfrStackTrace::resolve_linenos() const { 33.107 for(unsigned int i = 0; i < _nr_of_frames; i++) { 33.108 _frames[i].resolve_lineno(); 33.109 }
34.1 --- a/src/share/vm/jfr/recorder/stacktrace/jfrStackTraceRepository.hpp Fri Sep 27 13:23:32 2019 +0800 34.2 +++ b/src/share/vm/jfr/recorder/stacktrace/jfrStackTraceRepository.hpp Wed Oct 09 16:11:58 2019 +0800 34.3 @@ -36,9 +36,9 @@ 34.4 34.5 class JfrStackFrame { 34.6 private: 34.7 - const Method* _method; 34.8 + mutable const Method* _method; 34.9 traceid _methodid; 34.10 - int _line; 34.11 + mutable int _line; 34.12 int _bci; 34.13 u1 _type; 34.14 34.15 @@ -58,7 +58,7 @@ 34.16 bool equals(const JfrStackFrame& rhs) const; 34.17 void write(JfrChunkWriter& cw) const; 34.18 void write(JfrCheckpointWriter& cpw) const; 34.19 - void resolve_lineno(); 34.20 + void resolve_lineno() const; 34.21 }; 34.22 34.23 class JfrStackTrace : public StackObj { 34.24 @@ -70,7 +70,7 @@ 34.25 unsigned int _hash; 34.26 const u4 _max_frames; 34.27 bool _reached_root; 34.28 - bool _lineno; 34.29 + mutable bool _lineno; 34.30 34.31 public: 34.32 JfrStackTrace(JfrStackFrame* frames, u4 max_frames) : _frames(frames), 34.33 @@ -82,9 +82,10 @@ 34.34 _lineno(false) {} 34.35 bool record_thread(JavaThread& thread, frame& frame); 34.36 bool record_safe(JavaThread* thread, int skip, bool leakp = false); 34.37 - void resolve_linenos(); 34.38 + void resolve_linenos() const; 34.39 void set_nr_of_frames(u4 nr_of_frames) { _nr_of_frames = nr_of_frames; } 34.40 void set_hash(unsigned int hash) { _hash = hash; } 34.41 + unsigned int hash() const { return _hash; } 34.42 void set_frame(u4 frame_pos, JfrStackFrame& frame); 34.43 void set_reached_root(bool reached_root) { _reached_root = reached_root; } 34.44 bool full_stacktrace() const { return _reached_root; } 34.45 @@ -128,23 +129,26 @@ 34.46 traceid _next_id; 34.47 u4 _entries; 34.48 34.49 + traceid add_trace(const JfrStackTrace& stacktrace); 34.50 + static traceid add(const JfrStackTrace* stacktrace, JavaThread* thread); 34.51 + traceid record_for(JavaThread* thread, int skip, JfrStackFrame* frames, u4 max_frames); 34.52 + 34.53 size_t write_impl(JfrChunkWriter& cw, bool clear); 34.54 - traceid record_for(JavaThread* thread, int skip, JfrStackFrame* frames, u4 max_frames); 34.55 - traceid record_for(JavaThread* thread, int skip, JfrStackFrame* frames, u4 max_frames, unsigned int* hash); 34.56 - traceid add_trace(const JfrStackTrace& stacktrace); 34.57 const StackTrace* resolve_entry(unsigned int hash, traceid id) const; 34.58 - 34.59 static void write_metadata(JfrCheckpointWriter& cpw); 34.60 34.61 + static bool fill_stacktrace_for(JavaThread* thread, JfrStackTrace* stacktrace, int skip); 34.62 + 34.63 JfrStackTraceRepository(); 34.64 - static JfrStackTraceRepository& instance(); 34.65 - public: 34.66 static JfrStackTraceRepository* create(); 34.67 bool initialize(); 34.68 static void destroy(); 34.69 + 34.70 + static JfrStackTraceRepository& instance(); 34.71 + 34.72 + public: 34.73 static traceid add(const JfrStackTrace& stacktrace); 34.74 static traceid record(Thread* thread, int skip = 0); 34.75 - static traceid record(Thread* thread, int skip, unsigned int* hash); 34.76 traceid write(JfrCheckpointWriter& cpw, traceid id, unsigned int hash); 34.77 size_t write(JfrChunkWriter& cw, bool clear); 34.78 size_t clear();
35.1 --- a/src/share/vm/jfr/support/jfrFlush.hpp Fri Sep 27 13:23:32 2019 +0800 35.2 +++ b/src/share/vm/jfr/support/jfrFlush.hpp Wed Oct 09 16:11:58 2019 +0800 35.3 @@ -48,10 +48,12 @@ 35.4 35.5 template <typename Event> 35.6 class JfrConditionalFlush { 35.7 + protected: 35.8 + bool _enabled; 35.9 public: 35.10 typedef JfrBuffer Type; 35.11 - JfrConditionalFlush(Thread* t) { 35.12 - if (jfr_is_event_enabled(Event::eventId)) { 35.13 + JfrConditionalFlush(Thread* t) : _enabled(jfr_is_event_enabled(Event::eventId)) { 35.14 + if (_enabled) { 35.15 jfr_conditional_flush(Event::eventId, sizeof(Event), t); 35.16 } 35.17 } 35.18 @@ -63,7 +65,7 @@ 35.19 bool _owner; 35.20 public: 35.21 JfrConditionalFlushWithStacktrace(Thread* t) : JfrConditionalFlush<Event>(t), _t(t), _owner(false) { 35.22 - if (Event::has_stacktrace() && jfr_has_stacktrace_enabled(Event::eventId)) { 35.23 + if (this->_enabled && Event::has_stacktrace() && jfr_has_stacktrace_enabled(Event::eventId)) { 35.24 _owner = jfr_save_stacktrace(t); 35.25 } 35.26 }
36.1 --- a/src/share/vm/jfr/support/jfrThreadLocal.cpp Fri Sep 27 13:23:32 2019 +0800 36.2 +++ b/src/share/vm/jfr/support/jfrThreadLocal.cpp Wed Oct 09 16:11:58 2019 +0800 36.3 @@ -150,9 +150,7 @@ 36.4 36.5 JfrStackFrame* JfrThreadLocal::install_stackframes() const { 36.6 assert(_stackframes == NULL, "invariant"); 36.7 - _stackdepth = (u4)JfrOptionSet::stackdepth(); 36.8 - guarantee(_stackdepth > 0, "Stackdepth must be > 0"); 36.9 - _stackframes = NEW_C_HEAP_ARRAY(JfrStackFrame, _stackdepth, mtTracing); 36.10 + _stackframes = NEW_C_HEAP_ARRAY(JfrStackFrame, stackdepth(), mtTracing); 36.11 return _stackframes; 36.12 } 36.13 36.14 @@ -163,3 +161,7 @@ 36.15 ByteSize JfrThreadLocal::java_event_writer_offset() { 36.16 return in_ByteSize(offset_of(JfrThreadLocal, _java_event_writer)); 36.17 } 36.18 + 36.19 +u4 JfrThreadLocal::stackdepth() const { 36.20 + return _stackdepth != 0 ? _stackdepth : (u4)JfrOptionSet::stackdepth(); 36.21 +}
37.1 --- a/src/share/vm/jfr/support/jfrThreadLocal.hpp Fri Sep 27 13:23:32 2019 +0800 37.2 +++ b/src/share/vm/jfr/support/jfrThreadLocal.hpp Wed Oct 09 16:11:58 2019 +0800 37.3 @@ -113,9 +113,7 @@ 37.4 _stackframes = frames; 37.5 } 37.6 37.7 - u4 stackdepth() const { 37.8 - return _stackdepth; 37.9 - } 37.10 + u4 stackdepth() const; 37.11 37.12 void set_stackdepth(u4 depth) { 37.13 _stackdepth = depth;
38.1 --- a/src/share/vm/runtime/vm_operations.hpp Fri Sep 27 13:23:32 2019 +0800 38.2 +++ b/src/share/vm/runtime/vm_operations.hpp Wed Oct 09 16:11:58 2019 +0800 38.3 @@ -98,6 +98,7 @@ 38.4 template(RotateGCLog) \ 38.5 template(WhiteBoxOperation) \ 38.6 template(ClassLoaderStatsOperation) \ 38.7 + template(JFROldObject) \ 38.8 38.9 class VM_Operation: public CHeapObj<mtInternal> { 38.10 public: