Sun, 03 Feb 2013 22:28:08 +0400
8002048: Protocol to discovery of manageable Java processes on a network
Summary: Introduce a protocol to discover manageble Java instances across a network subnet, JDP
Reviewed-by: sla, dfuchs
zgu@3900 | 1 | /* |
zgu@3900 | 2 | * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved. |
zgu@3900 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
zgu@3900 | 4 | * |
zgu@3900 | 5 | * This code is free software; you can redistribute it and/or modify it |
zgu@3900 | 6 | * under the terms of the GNU General Public License version 2 only, as |
zgu@3900 | 7 | * published by the Free Software Foundation. |
zgu@3900 | 8 | * |
zgu@3900 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
zgu@3900 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
zgu@3900 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
zgu@3900 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
zgu@3900 | 13 | * accompanied this code). |
zgu@3900 | 14 | * |
zgu@3900 | 15 | * You should have received a copy of the GNU General Public License version |
zgu@3900 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
zgu@3900 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
zgu@3900 | 18 | * |
zgu@3900 | 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
zgu@3900 | 20 | * or visit www.oracle.com if you need additional information or have any |
zgu@3900 | 21 | * questions. |
zgu@3900 | 22 | * |
zgu@3900 | 23 | */ |
zgu@3900 | 24 | |
zgu@3900 | 25 | #ifndef SHARE_VM_SERVICES_MEM_SNAPSHOT_HPP |
zgu@3900 | 26 | #define SHARE_VM_SERVICES_MEM_SNAPSHOT_HPP |
zgu@3900 | 27 | |
zgu@3900 | 28 | #include "memory/allocation.hpp" |
zgu@3900 | 29 | #include "runtime/mutex.hpp" |
zgu@3900 | 30 | #include "runtime/mutexLocker.hpp" |
zgu@3900 | 31 | #include "services/memBaseline.hpp" |
zgu@3900 | 32 | #include "services/memPtrArray.hpp" |
zgu@3900 | 33 | |
zgu@3900 | 34 | // Snapshot pointer array iterator |
zgu@3900 | 35 | |
zgu@3900 | 36 | // The pointer array contains malloc-ed pointers |
zgu@3900 | 37 | class MemPointerIterator : public MemPointerArrayIteratorImpl { |
zgu@3900 | 38 | public: |
zgu@3900 | 39 | MemPointerIterator(MemPointerArray* arr): |
zgu@3900 | 40 | MemPointerArrayIteratorImpl(arr) { |
zgu@3900 | 41 | assert(arr != NULL, "null array"); |
zgu@3900 | 42 | } |
zgu@3900 | 43 | |
zgu@3900 | 44 | #ifdef ASSERT |
zgu@3900 | 45 | virtual bool is_dup_pointer(const MemPointer* ptr1, |
zgu@3900 | 46 | const MemPointer* ptr2) const { |
zgu@3900 | 47 | MemPointerRecord* p1 = (MemPointerRecord*)ptr1; |
zgu@3900 | 48 | MemPointerRecord* p2 = (MemPointerRecord*)ptr2; |
zgu@3900 | 49 | |
zgu@3900 | 50 | if (p1->addr() != p2->addr()) return false; |
zgu@3900 | 51 | if ((p1->flags() & MemPointerRecord::tag_masks) != |
zgu@3900 | 52 | (p2->flags() & MemPointerRecord::tag_masks)) { |
zgu@3900 | 53 | return false; |
zgu@3900 | 54 | } |
zgu@3900 | 55 | // we do see multiple commit/uncommit on the same memory, it is ok |
zgu@3900 | 56 | return (p1->flags() & MemPointerRecord::tag_masks) == MemPointerRecord::tag_alloc || |
zgu@3900 | 57 | (p1->flags() & MemPointerRecord::tag_masks) == MemPointerRecord::tag_release; |
zgu@3900 | 58 | } |
zgu@3900 | 59 | |
zgu@3900 | 60 | virtual bool insert(MemPointer* ptr) { |
zgu@3900 | 61 | if (_pos > 0) { |
zgu@3900 | 62 | MemPointer* p1 = (MemPointer*)ptr; |
zgu@3900 | 63 | MemPointer* p2 = (MemPointer*)_array->at(_pos - 1); |
zgu@3900 | 64 | assert(!is_dup_pointer(p1, p2), |
zgu@3986 | 65 | err_msg("duplicated pointer, flag = [%x]", (unsigned int)((MemPointerRecord*)p1)->flags())); |
zgu@3900 | 66 | } |
zgu@3900 | 67 | if (_pos < _array->length() -1) { |
zgu@3900 | 68 | MemPointer* p1 = (MemPointer*)ptr; |
zgu@3900 | 69 | MemPointer* p2 = (MemPointer*)_array->at(_pos + 1); |
zgu@3900 | 70 | assert(!is_dup_pointer(p1, p2), |
zgu@3986 | 71 | err_msg("duplicated pointer, flag = [%x]", (unsigned int)((MemPointerRecord*)p1)->flags())); |
zgu@3900 | 72 | } |
zgu@3900 | 73 | return _array->insert_at(ptr, _pos); |
zgu@3900 | 74 | } |
zgu@3900 | 75 | |
zgu@3900 | 76 | virtual bool insert_after(MemPointer* ptr) { |
zgu@3900 | 77 | if (_pos > 0) { |
zgu@3900 | 78 | MemPointer* p1 = (MemPointer*)ptr; |
zgu@3900 | 79 | MemPointer* p2 = (MemPointer*)_array->at(_pos - 1); |
zgu@3900 | 80 | assert(!is_dup_pointer(p1, p2), |
zgu@3986 | 81 | err_msg("duplicated pointer, flag = [%x]", (unsigned int)((MemPointerRecord*)p1)->flags())); |
zgu@3900 | 82 | } |
zgu@3900 | 83 | if (_pos < _array->length() - 1) { |
zgu@3900 | 84 | MemPointer* p1 = (MemPointer*)ptr; |
zgu@3900 | 85 | MemPointer* p2 = (MemPointer*)_array->at(_pos + 1); |
zgu@3900 | 86 | |
zgu@3900 | 87 | assert(!is_dup_pointer(p1, p2), |
zgu@3986 | 88 | err_msg("duplicated pointer, flag = [%x]", (unsigned int)((MemPointerRecord*)p1)->flags())); |
zgu@3900 | 89 | } |
zgu@3900 | 90 | if (_array->insert_at(ptr, _pos + 1)) { |
zgu@3900 | 91 | _pos ++; |
zgu@3900 | 92 | return true; |
zgu@3900 | 93 | } |
zgu@3900 | 94 | return false; |
zgu@3900 | 95 | } |
zgu@3900 | 96 | #endif |
zgu@3900 | 97 | |
zgu@3900 | 98 | virtual MemPointer* locate(address addr) { |
zgu@3900 | 99 | MemPointer* cur = current(); |
zgu@3900 | 100 | while (cur != NULL && cur->addr() < addr) { |
zgu@3900 | 101 | cur = next(); |
zgu@3900 | 102 | } |
zgu@3900 | 103 | return cur; |
zgu@3900 | 104 | } |
zgu@3900 | 105 | }; |
zgu@3900 | 106 | |
zgu@3900 | 107 | class VMMemPointerIterator : public MemPointerIterator { |
zgu@3900 | 108 | public: |
zgu@3900 | 109 | VMMemPointerIterator(MemPointerArray* arr): |
zgu@3900 | 110 | MemPointerIterator(arr) { |
zgu@3900 | 111 | } |
zgu@3900 | 112 | |
zgu@4193 | 113 | // locate an existing reserved memory region that contains specified address, |
zgu@4193 | 114 | // or the reserved region just above this address, where the incoming |
zgu@4193 | 115 | // reserved region should be inserted. |
zgu@3900 | 116 | virtual MemPointer* locate(address addr) { |
zgu@4193 | 117 | reset(); |
zgu@4193 | 118 | VMMemRegion* reg = (VMMemRegion*)current(); |
zgu@4193 | 119 | while (reg != NULL) { |
zgu@4193 | 120 | if (reg->is_reserved_region()) { |
zgu@4193 | 121 | if (reg->contains_address(addr) || addr < reg->base()) { |
zgu@4193 | 122 | return reg; |
zgu@3900 | 123 | } |
zgu@3900 | 124 | } |
zgu@4193 | 125 | reg = (VMMemRegion*)next(); |
zgu@4193 | 126 | } |
zgu@4053 | 127 | return NULL; |
zgu@4053 | 128 | } |
zgu@3900 | 129 | |
zgu@4193 | 130 | // following methods update virtual memory in the context |
zgu@4193 | 131 | // of 'current' position, which is properly positioned by |
zgu@4193 | 132 | // callers via locate method. |
zgu@4193 | 133 | bool add_reserved_region(MemPointerRecord* rec); |
zgu@4193 | 134 | bool add_committed_region(MemPointerRecord* rec); |
zgu@4193 | 135 | bool remove_uncommitted_region(MemPointerRecord* rec); |
zgu@4193 | 136 | bool remove_released_region(MemPointerRecord* rec); |
zgu@4193 | 137 | |
zgu@4193 | 138 | // split a reserved region to create a new memory region with specified base and size |
zgu@4193 | 139 | bool split_reserved_region(VMMemRegion* rgn, address new_rgn_addr, size_t new_rgn_size); |
zgu@4193 | 140 | private: |
zgu@4193 | 141 | bool insert_record(MemPointerRecord* rec); |
zgu@4193 | 142 | bool insert_record_after(MemPointerRecord* rec); |
zgu@4193 | 143 | |
zgu@4193 | 144 | bool insert_reserved_region(MemPointerRecord* rec); |
zgu@4193 | 145 | |
zgu@4193 | 146 | // reset current position |
zgu@4193 | 147 | inline void reset() { _pos = 0; } |
zgu@3900 | 148 | #ifdef ASSERT |
zgu@4285 | 149 | // check integrity of records on current reserved memory region. |
zgu@4285 | 150 | bool check_reserved_region() { |
zgu@4285 | 151 | VMMemRegion* reserved_region = (VMMemRegion*)current(); |
zgu@4285 | 152 | assert(reserved_region != NULL && reserved_region->is_reserved_region(), |
zgu@4285 | 153 | "Sanity check"); |
zgu@4285 | 154 | // all committed regions that follow current reserved region, should all |
zgu@4285 | 155 | // belong to the reserved region. |
zgu@4285 | 156 | VMMemRegion* next_region = (VMMemRegion*)next(); |
zgu@4285 | 157 | for (; next_region != NULL && next_region->is_committed_region(); |
zgu@4285 | 158 | next_region = (VMMemRegion*)next() ) { |
zgu@4285 | 159 | if(!reserved_region->contains_region(next_region)) { |
zgu@4285 | 160 | return false; |
zgu@4285 | 161 | } |
zgu@4285 | 162 | } |
zgu@4285 | 163 | return true; |
zgu@4285 | 164 | } |
zgu@4285 | 165 | |
zgu@3900 | 166 | virtual bool is_dup_pointer(const MemPointer* ptr1, |
zgu@3900 | 167 | const MemPointer* ptr2) const { |
zgu@3900 | 168 | VMMemRegion* p1 = (VMMemRegion*)ptr1; |
zgu@3900 | 169 | VMMemRegion* p2 = (VMMemRegion*)ptr2; |
zgu@3900 | 170 | |
zgu@3900 | 171 | if (p1->addr() != p2->addr()) return false; |
zgu@3900 | 172 | if ((p1->flags() & MemPointerRecord::tag_masks) != |
zgu@3900 | 173 | (p2->flags() & MemPointerRecord::tag_masks)) { |
zgu@3900 | 174 | return false; |
zgu@3900 | 175 | } |
zgu@3900 | 176 | // we do see multiple commit/uncommit on the same memory, it is ok |
zgu@3900 | 177 | return (p1->flags() & MemPointerRecord::tag_masks) == MemPointerRecord::tag_alloc || |
zgu@3900 | 178 | (p1->flags() & MemPointerRecord::tag_masks) == MemPointerRecord::tag_release; |
zgu@3900 | 179 | } |
zgu@3900 | 180 | #endif |
zgu@3900 | 181 | }; |
zgu@3900 | 182 | |
zgu@4053 | 183 | class MallocRecordIterator : public MemPointerArrayIterator { |
zgu@4274 | 184 | private: |
zgu@3900 | 185 | MemPointerArrayIteratorImpl _itr; |
zgu@3900 | 186 | |
zgu@4274 | 187 | |
zgu@4274 | 188 | |
zgu@3900 | 189 | public: |
zgu@4053 | 190 | MallocRecordIterator(MemPointerArray* arr) : _itr(arr) { |
zgu@3900 | 191 | } |
zgu@3900 | 192 | |
zgu@4193 | 193 | virtual MemPointer* current() const { |
zgu@4274 | 194 | #ifdef ASSERT |
zgu@4274 | 195 | MemPointer* cur_rec = _itr.current(); |
zgu@4274 | 196 | if (cur_rec != NULL) { |
zgu@4274 | 197 | MemPointer* prev_rec = _itr.peek_prev(); |
zgu@4274 | 198 | MemPointer* next_rec = _itr.peek_next(); |
zgu@4274 | 199 | assert(prev_rec == NULL || prev_rec->addr() < cur_rec->addr(), "Sorting order"); |
zgu@4274 | 200 | assert(next_rec == NULL || next_rec->addr() > cur_rec->addr(), "Sorting order"); |
zgu@3900 | 201 | } |
zgu@4274 | 202 | #endif |
zgu@4274 | 203 | return _itr.current(); |
zgu@3900 | 204 | } |
zgu@4193 | 205 | virtual MemPointer* next() { |
zgu@4274 | 206 | MemPointerRecord* next_rec = (MemPointerRecord*)_itr.next(); |
zgu@4274 | 207 | // arena memory record is a special case, which we have to compare |
zgu@4274 | 208 | // sequence number against its associated arena record. |
zgu@4274 | 209 | if (next_rec != NULL && next_rec->is_arena_memory_record()) { |
zgu@4274 | 210 | MemPointerRecord* prev_rec = (MemPointerRecord*)_itr.peek_prev(); |
zgu@4274 | 211 | // if there is an associated arena record, it has to be previous |
zgu@4274 | 212 | // record because of sorting order (by address) - NMT generates a pseudo address |
zgu@4274 | 213 | // for arena's size record by offsetting arena's address, that guarantees |
zgu@4274 | 214 | // the order of arena record and it's size record. |
zgu@4274 | 215 | if (prev_rec != NULL && prev_rec->is_arena_record() && |
zgu@4274 | 216 | next_rec->is_memory_record_of_arena(prev_rec)) { |
zgu@4274 | 217 | if (prev_rec->seq() > next_rec->seq()) { |
zgu@4274 | 218 | // Skip this arena memory record |
zgu@4274 | 219 | // Two scenarios: |
zgu@4274 | 220 | // - if the arena record is an allocation record, this early |
zgu@4274 | 221 | // size record must be leftover by previous arena, |
zgu@4274 | 222 | // and the last size record should have size = 0. |
zgu@4274 | 223 | // - if the arena record is a deallocation record, this |
zgu@4274 | 224 | // size record should be its cleanup record, which should |
zgu@4274 | 225 | // also have size = 0. In other world, arena alway reset |
zgu@4274 | 226 | // its size before gone (see Arena's destructor) |
zgu@4274 | 227 | assert(next_rec->size() == 0, "size not reset"); |
zgu@4274 | 228 | return _itr.next(); |
zgu@4274 | 229 | } else { |
zgu@4274 | 230 | assert(prev_rec->is_allocation_record(), |
zgu@4274 | 231 | "Arena size record ahead of allocation record"); |
zgu@4274 | 232 | } |
zgu@4274 | 233 | } |
zgu@4053 | 234 | } |
zgu@4274 | 235 | return next_rec; |
zgu@3900 | 236 | } |
zgu@3900 | 237 | |
zgu@4053 | 238 | MemPointer* peek_next() const { ShouldNotReachHere(); return NULL; } |
zgu@4053 | 239 | MemPointer* peek_prev() const { ShouldNotReachHere(); return NULL; } |
zgu@4053 | 240 | void remove() { ShouldNotReachHere(); } |
zgu@4053 | 241 | bool insert(MemPointer* ptr) { ShouldNotReachHere(); return false; } |
zgu@4053 | 242 | bool insert_after(MemPointer* ptr) { ShouldNotReachHere(); return false; } |
zgu@4053 | 243 | }; |
zgu@4053 | 244 | |
zgu@4193 | 245 | // collapse duplicated records. Eliminating duplicated records here, is much |
zgu@4193 | 246 | // cheaper than during promotion phase. However, it does have limitation - it |
zgu@4193 | 247 | // can only eliminate duplicated records within the generation, there are |
zgu@4193 | 248 | // still chances seeing duplicated records during promotion. |
zgu@4193 | 249 | // We want to use the record with higher sequence number, because it has |
zgu@4193 | 250 | // more accurate callsite pc. |
zgu@4274 | 251 | class VMRecordIterator : public MemPointerArrayIterator { |
zgu@4274 | 252 | private: |
zgu@4274 | 253 | MemPointerArrayIteratorImpl _itr; |
zgu@4274 | 254 | |
zgu@4193 | 255 | public: |
zgu@4274 | 256 | VMRecordIterator(MemPointerArray* arr) : _itr(arr) { |
zgu@4193 | 257 | MemPointerRecord* cur = (MemPointerRecord*)_itr.current(); |
zgu@4193 | 258 | MemPointerRecord* next = (MemPointerRecord*)_itr.peek_next(); |
zgu@4193 | 259 | while (next != NULL) { |
zgu@4193 | 260 | assert(cur != NULL, "Sanity check"); |
zgu@4193 | 261 | assert(((SeqMemPointerRecord*)next)->seq() > ((SeqMemPointerRecord*)cur)->seq(), |
zgu@4193 | 262 | "pre-sort order"); |
zgu@4193 | 263 | |
zgu@4193 | 264 | if (is_duplicated_record(cur, next)) { |
zgu@4193 | 265 | _itr.next(); |
zgu@4193 | 266 | next = (MemPointerRecord*)_itr.peek_next(); |
zgu@4193 | 267 | } else { |
zgu@4193 | 268 | break; |
zgu@4193 | 269 | } |
zgu@4193 | 270 | } |
zgu@4193 | 271 | } |
zgu@4193 | 272 | |
zgu@4193 | 273 | virtual MemPointer* current() const { |
zgu@4193 | 274 | return _itr.current(); |
zgu@4193 | 275 | } |
zgu@4193 | 276 | |
zgu@4193 | 277 | // get next record, but skip the duplicated records |
zgu@4193 | 278 | virtual MemPointer* next() { |
zgu@4193 | 279 | MemPointerRecord* cur = (MemPointerRecord*)_itr.next(); |
zgu@4193 | 280 | MemPointerRecord* next = (MemPointerRecord*)_itr.peek_next(); |
zgu@4193 | 281 | while (next != NULL) { |
zgu@4193 | 282 | assert(cur != NULL, "Sanity check"); |
zgu@4193 | 283 | assert(((SeqMemPointerRecord*)next)->seq() > ((SeqMemPointerRecord*)cur)->seq(), |
zgu@4193 | 284 | "pre-sort order"); |
zgu@4193 | 285 | |
zgu@4193 | 286 | if (is_duplicated_record(cur, next)) { |
zgu@4193 | 287 | _itr.next(); |
zgu@4193 | 288 | cur = next; |
zgu@4193 | 289 | next = (MemPointerRecord*)_itr.peek_next(); |
zgu@4193 | 290 | } else { |
zgu@4193 | 291 | break; |
zgu@4193 | 292 | } |
zgu@4193 | 293 | } |
zgu@4193 | 294 | return cur; |
zgu@4193 | 295 | } |
zgu@4193 | 296 | |
zgu@4274 | 297 | MemPointer* peek_next() const { ShouldNotReachHere(); return NULL; } |
zgu@4274 | 298 | MemPointer* peek_prev() const { ShouldNotReachHere(); return NULL; } |
zgu@4274 | 299 | void remove() { ShouldNotReachHere(); } |
zgu@4274 | 300 | bool insert(MemPointer* ptr) { ShouldNotReachHere(); return false; } |
zgu@4274 | 301 | bool insert_after(MemPointer* ptr) { ShouldNotReachHere(); return false; } |
zgu@4274 | 302 | |
zgu@4193 | 303 | private: |
zgu@4193 | 304 | bool is_duplicated_record(MemPointerRecord* p1, MemPointerRecord* p2) const { |
zgu@4193 | 305 | bool ret = (p1->addr() == p2->addr() && p1->size() == p2->size() && p1->flags() == p2->flags()); |
zgu@4193 | 306 | assert(!(ret && FLAGS_TO_MEMORY_TYPE(p1->flags()) == mtThreadStack), "dup on stack record"); |
zgu@4193 | 307 | return ret; |
zgu@4193 | 308 | } |
zgu@4193 | 309 | }; |
zgu@4193 | 310 | |
zgu@4053 | 311 | class StagingArea : public _ValueObj { |
zgu@4053 | 312 | private: |
zgu@4053 | 313 | MemPointerArray* _malloc_data; |
zgu@4053 | 314 | MemPointerArray* _vm_data; |
zgu@4053 | 315 | |
zgu@4053 | 316 | public: |
zgu@4053 | 317 | StagingArea() : _malloc_data(NULL), _vm_data(NULL) { |
zgu@4053 | 318 | init(); |
zgu@3900 | 319 | } |
zgu@3900 | 320 | |
zgu@4053 | 321 | ~StagingArea() { |
zgu@4053 | 322 | if (_malloc_data != NULL) delete _malloc_data; |
zgu@4053 | 323 | if (_vm_data != NULL) delete _vm_data; |
zgu@3900 | 324 | } |
zgu@3900 | 325 | |
zgu@4053 | 326 | MallocRecordIterator malloc_record_walker() { |
zgu@4053 | 327 | return MallocRecordIterator(malloc_data()); |
zgu@3900 | 328 | } |
zgu@3900 | 329 | |
zgu@4193 | 330 | VMRecordIterator virtual_memory_record_walker(); |
zgu@4193 | 331 | |
zgu@4053 | 332 | bool init(); |
zgu@4053 | 333 | void clear() { |
zgu@4053 | 334 | assert(_malloc_data != NULL && _vm_data != NULL, "Just check"); |
zgu@4053 | 335 | _malloc_data->shrink(); |
zgu@4053 | 336 | _malloc_data->clear(); |
zgu@4053 | 337 | _vm_data->clear(); |
zgu@3900 | 338 | } |
zgu@3900 | 339 | |
zgu@4053 | 340 | inline MemPointerArray* malloc_data() { return _malloc_data; } |
zgu@4053 | 341 | inline MemPointerArray* vm_data() { return _vm_data; } |
zgu@3900 | 342 | }; |
zgu@3900 | 343 | |
zgu@3900 | 344 | class MemBaseline; |
zgu@3900 | 345 | class MemSnapshot : public CHeapObj<mtNMT> { |
zgu@3900 | 346 | private: |
zgu@3900 | 347 | // the following two arrays contain records of all known lived memory blocks |
zgu@3900 | 348 | // live malloc-ed memory pointers |
zgu@3900 | 349 | MemPointerArray* _alloc_ptrs; |
zgu@3900 | 350 | // live virtual memory pointers |
zgu@3900 | 351 | MemPointerArray* _vm_ptrs; |
zgu@3900 | 352 | |
zgu@4053 | 353 | StagingArea _staging_area; |
zgu@3900 | 354 | |
zgu@3900 | 355 | // the lock to protect this snapshot |
zgu@3900 | 356 | Monitor* _lock; |
zgu@3900 | 357 | |
zgu@4400 | 358 | // the number of instance classes |
zgu@4400 | 359 | int _number_of_classes; |
zgu@4400 | 360 | |
zgu@3900 | 361 | NOT_PRODUCT(size_t _untracked_count;) |
zgu@3900 | 362 | friend class MemBaseline; |
zgu@3900 | 363 | |
zgu@3900 | 364 | public: |
zgu@3900 | 365 | MemSnapshot(); |
zgu@3900 | 366 | virtual ~MemSnapshot(); |
zgu@3900 | 367 | |
zgu@3900 | 368 | // if we are running out of native memory |
zgu@4053 | 369 | bool out_of_memory() { |
zgu@4053 | 370 | return (_alloc_ptrs == NULL || |
zgu@4053 | 371 | _staging_area.malloc_data() == NULL || |
zgu@4053 | 372 | _staging_area.vm_data() == NULL || |
zgu@3900 | 373 | _vm_ptrs == NULL || _lock == NULL || |
zgu@3900 | 374 | _alloc_ptrs->out_of_memory() || |
zgu@3900 | 375 | _vm_ptrs->out_of_memory()); |
zgu@3900 | 376 | } |
zgu@3900 | 377 | |
zgu@3900 | 378 | // merge a per-thread memory recorder into staging area |
zgu@3900 | 379 | bool merge(MemRecorder* rec); |
zgu@3900 | 380 | // promote staged data to snapshot |
zgu@4400 | 381 | bool promote(int number_of_classes); |
zgu@3900 | 382 | |
zgu@4400 | 383 | int number_of_classes() const { return _number_of_classes; } |
zgu@3900 | 384 | |
zgu@3900 | 385 | void wait(long timeout) { |
zgu@3900 | 386 | assert(_lock != NULL, "Just check"); |
zgu@3900 | 387 | MonitorLockerEx locker(_lock); |
zgu@3900 | 388 | locker.wait(true, timeout); |
zgu@3900 | 389 | } |
zgu@3900 | 390 | |
zgu@3900 | 391 | NOT_PRODUCT(void print_snapshot_stats(outputStream* st);) |
zgu@3900 | 392 | NOT_PRODUCT(void check_staging_data();) |
zgu@3900 | 393 | NOT_PRODUCT(void check_malloc_pointers();) |
zgu@3900 | 394 | NOT_PRODUCT(bool has_allocation_record(address addr);) |
zgu@4193 | 395 | // dump all virtual memory pointers in snapshot |
zgu@4193 | 396 | DEBUG_ONLY( void dump_all_vm_pointers();) |
zgu@3900 | 397 | |
zgu@3900 | 398 | private: |
zgu@4274 | 399 | // copy sequenced pointer from src to dest |
zgu@4274 | 400 | void copy_seq_pointer(MemPointerRecord* dest, const MemPointerRecord* src); |
zgu@4274 | 401 | // assign a sequenced pointer to non-sequenced pointer |
zgu@4274 | 402 | void assign_pointer(MemPointerRecord*dest, const MemPointerRecord* src); |
zgu@4053 | 403 | |
zgu@4053 | 404 | bool promote_malloc_records(MemPointerArrayIterator* itr); |
zgu@4053 | 405 | bool promote_virtual_memory_records(MemPointerArrayIterator* itr); |
zgu@3900 | 406 | }; |
zgu@3900 | 407 | |
zgu@3900 | 408 | #endif // SHARE_VM_SERVICES_MEM_SNAPSHOT_HPP |