Wed, 27 Aug 2014 09:36:55 +0200
Merge
zgu@3900 | 1 | /* |
zgu@5272 | 2 | * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. |
zgu@3900 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
zgu@3900 | 4 | * |
zgu@3900 | 5 | * This code is free software; you can redistribute it and/or modify it |
zgu@3900 | 6 | * under the terms of the GNU General Public License version 2 only, as |
zgu@3900 | 7 | * published by the Free Software Foundation. |
zgu@3900 | 8 | * |
zgu@3900 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
zgu@3900 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
zgu@3900 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
zgu@3900 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
zgu@3900 | 13 | * accompanied this code). |
zgu@3900 | 14 | * |
zgu@3900 | 15 | * You should have received a copy of the GNU General Public License version |
zgu@3900 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
zgu@3900 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
zgu@3900 | 18 | * |
zgu@3900 | 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
zgu@3900 | 20 | * or visit www.oracle.com if you need additional information or have any |
zgu@3900 | 21 | * questions. |
zgu@3900 | 22 | * |
zgu@3900 | 23 | */ |
zgu@3900 | 24 | |
zgu@3900 | 25 | #ifndef SHARE_VM_SERVICES_MEM_PTR_HPP |
zgu@3900 | 26 | #define SHARE_VM_SERVICES_MEM_PTR_HPP |
zgu@3900 | 27 | |
zgu@3900 | 28 | #include "memory/allocation.hpp" |
zgu@3900 | 29 | #include "runtime/atomic.hpp" |
zgu@3900 | 30 | #include "runtime/os.hpp" |
zgu@3900 | 31 | #include "runtime/safepoint.hpp" |
zgu@3900 | 32 | |
zgu@3900 | 33 | /* |
zgu@3900 | 34 | * global sequence generator that generates sequence numbers to serialize |
zgu@3900 | 35 | * memory records. |
zgu@3900 | 36 | */ |
zgu@3900 | 37 | class SequenceGenerator : AllStatic { |
zgu@3900 | 38 | public: |
zgu@3900 | 39 | static jint next(); |
zgu@3900 | 40 | |
zgu@3900 | 41 | // peek last sequence number |
zgu@3900 | 42 | static jint peek() { |
zgu@3900 | 43 | return _seq_number; |
zgu@3900 | 44 | } |
zgu@3900 | 45 | |
zgu@3900 | 46 | // reset sequence number |
zgu@3900 | 47 | static void reset() { |
zgu@3900 | 48 | assert(SafepointSynchronize::is_at_safepoint(), "Safepoint required"); |
zgu@3900 | 49 | _seq_number = 1; |
ctornqvi@4512 | 50 | _generation ++; |
zgu@3900 | 51 | }; |
zgu@3900 | 52 | |
ctornqvi@4512 | 53 | static unsigned long current_generation() { return _generation; } |
zgu@3994 | 54 | NOT_PRODUCT(static jint max_seq_num() { return _max_seq_number; }) |
zgu@3900 | 55 | |
zgu@3900 | 56 | private: |
ctornqvi@4512 | 57 | static volatile jint _seq_number; |
ctornqvi@4512 | 58 | static volatile unsigned long _generation; |
ctornqvi@4512 | 59 | NOT_PRODUCT(static jint _max_seq_number; ) |
zgu@3900 | 60 | }; |
zgu@3900 | 61 | |
zgu@3900 | 62 | /* |
zgu@3900 | 63 | * followings are the classes that are used to hold memory activity records in different stages. |
zgu@3900 | 64 | * MemPointer |
zgu@3900 | 65 | * |--------MemPointerRecord |
zgu@3900 | 66 | * | |
zgu@3900 | 67 | * |----MemPointerRecordEx |
zgu@3900 | 68 | * | | |
zgu@3900 | 69 | * | |-------SeqMemPointerRecordEx |
zgu@3900 | 70 | * | |
zgu@3900 | 71 | * |----SeqMemPointerRecord |
zgu@3900 | 72 | * | |
zgu@3900 | 73 | * |----VMMemRegion |
zgu@3900 | 74 | * | |
zgu@3900 | 75 | * |-----VMMemRegionEx |
zgu@3900 | 76 | * |
zgu@3900 | 77 | * |
zgu@3900 | 78 | * prefix 'Seq' - sequenced, the record contains a sequence number |
zgu@3900 | 79 | * surfix 'Ex' - extension, the record contains a caller's pc |
zgu@3900 | 80 | * |
zgu@3900 | 81 | * per-thread recorder : SeqMemPointerRecord(Ex) |
zgu@3900 | 82 | * snapshot staging : SeqMemPointerRecord(Ex) |
zgu@3900 | 83 | * snapshot : MemPointerRecord(Ex) and VMMemRegion(Ex) |
zgu@3900 | 84 | * |
zgu@3900 | 85 | */ |
zgu@3900 | 86 | |
zgu@3900 | 87 | /* |
zgu@3900 | 88 | * class that wraps an address to a memory block, |
zgu@3900 | 89 | * the memory pointer either points to a malloc'd |
zgu@3900 | 90 | * memory block, or a mmap'd memory block |
zgu@3900 | 91 | */ |
zgu@4959 | 92 | class MemPointer VALUE_OBJ_CLASS_SPEC { |
zgu@3900 | 93 | public: |
zgu@3900 | 94 | MemPointer(): _addr(0) { } |
zgu@3900 | 95 | MemPointer(address addr): _addr(addr) { } |
zgu@3900 | 96 | |
zgu@3900 | 97 | MemPointer(const MemPointer& copy_from) { |
zgu@3900 | 98 | _addr = copy_from.addr(); |
zgu@3900 | 99 | } |
zgu@3900 | 100 | |
zgu@3900 | 101 | inline address addr() const { |
zgu@3900 | 102 | return _addr; |
zgu@3900 | 103 | } |
zgu@3900 | 104 | |
zgu@3900 | 105 | inline operator address() const { |
zgu@3900 | 106 | return addr(); |
zgu@3900 | 107 | } |
zgu@3900 | 108 | |
zgu@3900 | 109 | inline bool operator == (const MemPointer& other) const { |
zgu@3900 | 110 | return addr() == other.addr(); |
zgu@3900 | 111 | } |
zgu@3900 | 112 | |
zgu@3900 | 113 | inline MemPointer& operator = (const MemPointer& other) { |
zgu@3900 | 114 | _addr = other.addr(); |
zgu@3900 | 115 | return *this; |
zgu@3900 | 116 | } |
zgu@3900 | 117 | |
zgu@3900 | 118 | protected: |
zgu@3900 | 119 | inline void set_addr(address addr) { _addr = addr; } |
zgu@3900 | 120 | |
zgu@3900 | 121 | protected: |
zgu@3900 | 122 | // memory address |
zgu@3900 | 123 | address _addr; |
zgu@3900 | 124 | }; |
zgu@3900 | 125 | |
zgu@3900 | 126 | /* MemPointerRecord records an activityand associated |
zgu@3900 | 127 | * attributes on a memory block. |
zgu@3900 | 128 | */ |
zgu@3900 | 129 | class MemPointerRecord : public MemPointer { |
zgu@3900 | 130 | private: |
zgu@3900 | 131 | MEMFLAGS _flags; |
zgu@3900 | 132 | size_t _size; |
zgu@3900 | 133 | |
zgu@3900 | 134 | public: |
zgu@3900 | 135 | /* extension of MemoryType enum |
zgu@3900 | 136 | * see share/vm/memory/allocation.hpp for details. |
zgu@3900 | 137 | * |
zgu@3900 | 138 | * The tag values are associated to sorting orders, so be |
zgu@3900 | 139 | * careful if changes are needed. |
zgu@3900 | 140 | * The allocation records should be sorted ahead of tagging |
zgu@3900 | 141 | * records, which in turn ahead of deallocation records |
zgu@3900 | 142 | */ |
zgu@3900 | 143 | enum MemPointerTags { |
zgu@3900 | 144 | tag_alloc = 0x0001, // malloc or reserve record |
zgu@3900 | 145 | tag_commit = 0x0002, // commit record |
zgu@3900 | 146 | tag_type = 0x0003, // tag virtual memory to a memory type |
zgu@3900 | 147 | tag_uncommit = 0x0004, // uncommit record |
zgu@3900 | 148 | tag_release = 0x0005, // free or release record |
zgu@3900 | 149 | tag_size = 0x0006, // arena size |
zgu@3900 | 150 | tag_masks = 0x0007, // all tag bits |
zgu@3900 | 151 | vmBit = 0x0008 |
zgu@3900 | 152 | }; |
zgu@3900 | 153 | |
zgu@3900 | 154 | /* helper functions to interpret the tagging flags */ |
zgu@3900 | 155 | |
zgu@3900 | 156 | inline static bool is_allocation_record(MEMFLAGS flags) { |
zgu@3900 | 157 | return (flags & tag_masks) == tag_alloc; |
zgu@3900 | 158 | } |
zgu@3900 | 159 | |
zgu@3900 | 160 | inline static bool is_deallocation_record(MEMFLAGS flags) { |
zgu@3900 | 161 | return (flags & tag_masks) == tag_release; |
zgu@3900 | 162 | } |
zgu@3900 | 163 | |
zgu@3900 | 164 | inline static bool is_arena_record(MEMFLAGS flags) { |
zgu@3900 | 165 | return (flags & (otArena | tag_size)) == otArena; |
zgu@3900 | 166 | } |
zgu@3900 | 167 | |
zgu@4274 | 168 | inline static bool is_arena_memory_record(MEMFLAGS flags) { |
zgu@3900 | 169 | return (flags & (otArena | tag_size)) == (otArena | tag_size); |
zgu@3900 | 170 | } |
zgu@3900 | 171 | |
zgu@3900 | 172 | inline static bool is_virtual_memory_record(MEMFLAGS flags) { |
zgu@3900 | 173 | return (flags & vmBit) != 0; |
zgu@3900 | 174 | } |
zgu@3900 | 175 | |
zgu@3900 | 176 | inline static bool is_virtual_memory_reserve_record(MEMFLAGS flags) { |
zgu@3900 | 177 | return (flags & 0x0F) == (tag_alloc | vmBit); |
zgu@3900 | 178 | } |
zgu@3900 | 179 | |
zgu@3900 | 180 | inline static bool is_virtual_memory_commit_record(MEMFLAGS flags) { |
zgu@3900 | 181 | return (flags & 0x0F) == (tag_commit | vmBit); |
zgu@3900 | 182 | } |
zgu@3900 | 183 | |
zgu@3900 | 184 | inline static bool is_virtual_memory_uncommit_record(MEMFLAGS flags) { |
zgu@3900 | 185 | return (flags & 0x0F) == (tag_uncommit | vmBit); |
zgu@3900 | 186 | } |
zgu@3900 | 187 | |
zgu@3900 | 188 | inline static bool is_virtual_memory_release_record(MEMFLAGS flags) { |
zgu@3900 | 189 | return (flags & 0x0F) == (tag_release | vmBit); |
zgu@3900 | 190 | } |
zgu@3900 | 191 | |
zgu@3900 | 192 | inline static bool is_virtual_memory_type_record(MEMFLAGS flags) { |
zgu@3900 | 193 | return (flags & 0x0F) == (tag_type | vmBit); |
zgu@3900 | 194 | } |
zgu@3900 | 195 | |
zgu@3900 | 196 | /* tagging flags */ |
zgu@3900 | 197 | inline static MEMFLAGS malloc_tag() { return tag_alloc; } |
zgu@3900 | 198 | inline static MEMFLAGS free_tag() { return tag_release; } |
zgu@3900 | 199 | inline static MEMFLAGS arena_size_tag() { return tag_size | otArena; } |
zgu@3900 | 200 | inline static MEMFLAGS virtual_memory_tag() { return vmBit; } |
zgu@3900 | 201 | inline static MEMFLAGS virtual_memory_reserve_tag() { return (tag_alloc | vmBit); } |
zgu@3900 | 202 | inline static MEMFLAGS virtual_memory_commit_tag() { return (tag_commit | vmBit); } |
zgu@3900 | 203 | inline static MEMFLAGS virtual_memory_uncommit_tag(){ return (tag_uncommit | vmBit); } |
zgu@3900 | 204 | inline static MEMFLAGS virtual_memory_release_tag() { return (tag_release | vmBit); } |
zgu@3900 | 205 | inline static MEMFLAGS virtual_memory_type_tag() { return (tag_type | vmBit); } |
zgu@3900 | 206 | |
zgu@3900 | 207 | public: |
zgu@3900 | 208 | MemPointerRecord(): _size(0), _flags(mtNone) { } |
zgu@3900 | 209 | |
zgu@3900 | 210 | MemPointerRecord(address addr, MEMFLAGS memflags, size_t size = 0): |
zgu@3900 | 211 | MemPointer(addr), _flags(memflags), _size(size) { } |
zgu@3900 | 212 | |
zgu@3900 | 213 | MemPointerRecord(const MemPointerRecord& copy_from): |
zgu@3900 | 214 | MemPointer(copy_from), _flags(copy_from.flags()), |
zgu@3900 | 215 | _size(copy_from.size()) { |
zgu@3900 | 216 | } |
zgu@3900 | 217 | |
zgu@3900 | 218 | /* MemPointerRecord is not sequenced, it always return |
zgu@3900 | 219 | * 0 to indicate non-sequenced |
zgu@3900 | 220 | */ |
zgu@3900 | 221 | virtual jint seq() const { return 0; } |
zgu@3900 | 222 | |
zgu@3900 | 223 | inline size_t size() const { return _size; } |
zgu@3900 | 224 | inline void set_size(size_t size) { _size = size; } |
zgu@3900 | 225 | |
zgu@3900 | 226 | inline MEMFLAGS flags() const { return _flags; } |
zgu@3900 | 227 | inline void set_flags(MEMFLAGS flags) { _flags = flags; } |
zgu@3900 | 228 | |
zgu@3900 | 229 | MemPointerRecord& operator= (const MemPointerRecord& ptr) { |
zgu@3900 | 230 | MemPointer::operator=(ptr); |
zgu@3900 | 231 | _flags = ptr.flags(); |
zgu@3900 | 232 | #ifdef ASSERT |
zgu@3900 | 233 | if (IS_ARENA_OBJ(_flags)) { |
zgu@3900 | 234 | assert(!is_vm_pointer(), "wrong flags"); |
zgu@3900 | 235 | assert((_flags & ot_masks) == otArena, "wrong flags"); |
zgu@3900 | 236 | } |
zgu@3900 | 237 | #endif |
zgu@3900 | 238 | _size = ptr.size(); |
zgu@3900 | 239 | return *this; |
zgu@3900 | 240 | } |
zgu@3900 | 241 | |
zgu@3900 | 242 | // if the pointer represents a malloc-ed memory address |
zgu@3900 | 243 | inline bool is_malloced_pointer() const { |
zgu@3900 | 244 | return !is_vm_pointer(); |
zgu@3900 | 245 | } |
zgu@3900 | 246 | |
zgu@3900 | 247 | // if the pointer represents a virtual memory address |
zgu@3900 | 248 | inline bool is_vm_pointer() const { |
zgu@3900 | 249 | return is_virtual_memory_record(_flags); |
zgu@3900 | 250 | } |
zgu@3900 | 251 | |
zgu@3900 | 252 | // if this record records a 'malloc' or virtual memory |
zgu@3900 | 253 | // 'reserve' call |
zgu@3900 | 254 | inline bool is_allocation_record() const { |
zgu@3900 | 255 | return is_allocation_record(_flags); |
zgu@3900 | 256 | } |
zgu@3900 | 257 | |
zgu@3900 | 258 | // if this record records a size information of an arena |
zgu@4274 | 259 | inline bool is_arena_memory_record() const { |
zgu@4274 | 260 | return is_arena_memory_record(_flags); |
zgu@3900 | 261 | } |
zgu@3900 | 262 | |
zgu@3900 | 263 | // if this pointer represents an address to an arena object |
zgu@3900 | 264 | inline bool is_arena_record() const { |
zgu@3900 | 265 | return is_arena_record(_flags); |
zgu@3900 | 266 | } |
zgu@3900 | 267 | |
zgu@3900 | 268 | // if this record represents a size information of specific arena |
zgu@4274 | 269 | inline bool is_memory_record_of_arena(const MemPointerRecord* arena_rc) { |
zgu@4274 | 270 | assert(is_arena_memory_record(), "not size record"); |
zgu@3900 | 271 | assert(arena_rc->is_arena_record(), "not arena record"); |
zgu@3900 | 272 | return (arena_rc->addr() + sizeof(void*)) == addr(); |
zgu@3900 | 273 | } |
zgu@3900 | 274 | |
zgu@3900 | 275 | // if this record records a 'free' or virtual memory 'free' call |
zgu@3900 | 276 | inline bool is_deallocation_record() const { |
zgu@3900 | 277 | return is_deallocation_record(_flags); |
zgu@3900 | 278 | } |
zgu@3900 | 279 | |
zgu@3900 | 280 | // if this record records a virtual memory 'commit' call |
zgu@3900 | 281 | inline bool is_commit_record() const { |
zgu@3900 | 282 | return is_virtual_memory_commit_record(_flags); |
zgu@3900 | 283 | } |
zgu@3900 | 284 | |
zgu@3900 | 285 | // if this record records a virtual memory 'uncommit' call |
zgu@3900 | 286 | inline bool is_uncommit_record() const { |
zgu@3900 | 287 | return is_virtual_memory_uncommit_record(_flags); |
zgu@3900 | 288 | } |
zgu@3900 | 289 | |
zgu@3900 | 290 | // if this record is a tagging record of a virtual memory block |
zgu@3900 | 291 | inline bool is_type_tagging_record() const { |
zgu@3900 | 292 | return is_virtual_memory_type_record(_flags); |
zgu@3900 | 293 | } |
zgu@4193 | 294 | |
zgu@4193 | 295 | // if the two memory pointer records actually represent the same |
zgu@4193 | 296 | // memory block |
zgu@4193 | 297 | inline bool is_same_region(const MemPointerRecord* other) const { |
zgu@4193 | 298 | return (addr() == other->addr() && size() == other->size()); |
zgu@4193 | 299 | } |
zgu@4193 | 300 | |
zgu@4193 | 301 | // if this memory region fully contains another one |
zgu@4193 | 302 | inline bool contains_region(const MemPointerRecord* other) const { |
zgu@4193 | 303 | return contains_region(other->addr(), other->size()); |
zgu@4193 | 304 | } |
zgu@4193 | 305 | |
zgu@4193 | 306 | // if this memory region fully contains specified memory range |
zgu@4193 | 307 | inline bool contains_region(address add, size_t sz) const { |
zgu@4193 | 308 | return (addr() <= add && addr() + size() >= add + sz); |
zgu@4193 | 309 | } |
zgu@4193 | 310 | |
zgu@4193 | 311 | inline bool contains_address(address add) const { |
zgu@4193 | 312 | return (addr() <= add && addr() + size() > add); |
zgu@4193 | 313 | } |
zgu@4248 | 314 | |
zgu@4248 | 315 | // if this memory region overlaps another region |
zgu@4248 | 316 | inline bool overlaps_region(const MemPointerRecord* other) const { |
zgu@4248 | 317 | assert(other != NULL, "Just check"); |
zgu@4248 | 318 | assert(size() > 0 && other->size() > 0, "empty range"); |
zgu@4248 | 319 | return contains_address(other->addr()) || |
zgu@4248 | 320 | contains_address(other->addr() + other->size() - 1) || // exclude end address |
zgu@4248 | 321 | other->contains_address(addr()) || |
zgu@4248 | 322 | other->contains_address(addr() + size() - 1); // exclude end address |
zgu@4248 | 323 | } |
zgu@4248 | 324 | |
zgu@3900 | 325 | }; |
zgu@3900 | 326 | |
zgu@3900 | 327 | // MemPointerRecordEx also records callsite pc, from where |
zgu@3900 | 328 | // the memory block is allocated |
zgu@3900 | 329 | class MemPointerRecordEx : public MemPointerRecord { |
zgu@3900 | 330 | private: |
zgu@3900 | 331 | address _pc; // callsite pc |
zgu@3900 | 332 | |
zgu@3900 | 333 | public: |
zgu@3900 | 334 | MemPointerRecordEx(): _pc(0) { } |
zgu@3900 | 335 | |
zgu@3900 | 336 | MemPointerRecordEx(address addr, MEMFLAGS memflags, size_t size = 0, address pc = 0): |
zgu@3900 | 337 | MemPointerRecord(addr, memflags, size), _pc(pc) {} |
zgu@3900 | 338 | |
zgu@3900 | 339 | MemPointerRecordEx(const MemPointerRecordEx& copy_from): |
zgu@3900 | 340 | MemPointerRecord(copy_from), _pc(copy_from.pc()) {} |
zgu@3900 | 341 | |
zgu@3900 | 342 | inline address pc() const { return _pc; } |
zgu@3900 | 343 | |
zgu@3900 | 344 | void init(const MemPointerRecordEx* mpe) { |
zgu@3900 | 345 | MemPointerRecord::operator=(*mpe); |
zgu@3900 | 346 | _pc = mpe->pc(); |
zgu@3900 | 347 | } |
zgu@3900 | 348 | |
zgu@3900 | 349 | void init(const MemPointerRecord* mp) { |
zgu@3900 | 350 | MemPointerRecord::operator=(*mp); |
zgu@3900 | 351 | _pc = 0; |
zgu@3900 | 352 | } |
zgu@3900 | 353 | }; |
zgu@3900 | 354 | |
zgu@4193 | 355 | // a virtual memory region. The region can represent a reserved |
zgu@4193 | 356 | // virtual memory region or a committed memory region |
zgu@3900 | 357 | class VMMemRegion : public MemPointerRecord { |
zgu@3900 | 358 | public: |
zgu@4193 | 359 | VMMemRegion() { } |
zgu@3900 | 360 | |
zgu@3900 | 361 | void init(const MemPointerRecord* mp) { |
zgu@4193 | 362 | assert(mp->is_vm_pointer(), "Sanity check"); |
zgu@3900 | 363 | _addr = mp->addr(); |
zgu@3900 | 364 | set_size(mp->size()); |
zgu@3900 | 365 | set_flags(mp->flags()); |
zgu@3900 | 366 | } |
zgu@3900 | 367 | |
zgu@3900 | 368 | VMMemRegion& operator=(const VMMemRegion& other) { |
zgu@3900 | 369 | MemPointerRecord::operator=(other); |
zgu@3900 | 370 | return *this; |
zgu@3900 | 371 | } |
zgu@3900 | 372 | |
zgu@4193 | 373 | inline bool is_reserved_region() const { |
zgu@4193 | 374 | return is_allocation_record(); |
zgu@3900 | 375 | } |
zgu@3900 | 376 | |
zgu@4193 | 377 | inline bool is_committed_region() const { |
zgu@4193 | 378 | return is_commit_record(); |
zgu@3900 | 379 | } |
zgu@3900 | 380 | |
zgu@3900 | 381 | /* base address of this virtual memory range */ |
zgu@3900 | 382 | inline address base() const { |
zgu@3900 | 383 | return addr(); |
zgu@3900 | 384 | } |
zgu@3900 | 385 | |
zgu@3900 | 386 | /* tag this virtual memory range to the specified memory type */ |
zgu@3900 | 387 | inline void tag(MEMFLAGS f) { |
zgu@3900 | 388 | set_flags(flags() | (f & mt_masks)); |
zgu@3900 | 389 | } |
zgu@3900 | 390 | |
zgu@4193 | 391 | // expand this region to also cover specified range. |
zgu@4193 | 392 | // The range has to be on either end of the memory region. |
zgu@4193 | 393 | void expand_region(address addr, size_t sz) { |
zgu@4193 | 394 | if (addr < base()) { |
zgu@4193 | 395 | assert(addr + sz == base(), "Sanity check"); |
zgu@4193 | 396 | _addr = addr; |
zgu@4193 | 397 | set_size(size() + sz); |
zgu@4193 | 398 | } else { |
zgu@4193 | 399 | assert(base() + size() == addr, "Sanity check"); |
zgu@4193 | 400 | set_size(size() + sz); |
zgu@4193 | 401 | } |
zgu@4193 | 402 | } |
zgu@4193 | 403 | |
zgu@4193 | 404 | // exclude the specified address range from this region. |
zgu@4193 | 405 | // The excluded memory range has to be on either end of this memory |
zgu@4193 | 406 | // region. |
zgu@4193 | 407 | inline void exclude_region(address add, size_t sz) { |
zgu@4193 | 408 | assert(is_reserved_region() || is_committed_region(), "Sanity check"); |
zgu@4193 | 409 | assert(addr() != NULL && size() != 0, "Sanity check"); |
zgu@4193 | 410 | assert(add >= addr() && add < addr() + size(), "Sanity check"); |
zgu@3900 | 411 | assert(add == addr() || (add + sz) == (addr() + size()), |
zgu@4193 | 412 | "exclude in the middle"); |
zgu@3900 | 413 | if (add == addr()) { |
zgu@3900 | 414 | set_addr(add + sz); |
zgu@3900 | 415 | set_size(size() - sz); |
zgu@3900 | 416 | } else { |
zgu@3900 | 417 | set_size(size() - sz); |
zgu@3900 | 418 | } |
zgu@3900 | 419 | } |
zgu@3900 | 420 | }; |
zgu@3900 | 421 | |
zgu@3900 | 422 | class VMMemRegionEx : public VMMemRegion { |
zgu@3900 | 423 | private: |
zgu@3900 | 424 | jint _seq; // sequence number |
zgu@3900 | 425 | |
zgu@3900 | 426 | public: |
zgu@3900 | 427 | VMMemRegionEx(): _pc(0) { } |
zgu@3900 | 428 | |
zgu@3900 | 429 | void init(const MemPointerRecordEx* mpe) { |
zgu@3900 | 430 | VMMemRegion::init(mpe); |
zgu@3900 | 431 | _pc = mpe->pc(); |
zgu@3900 | 432 | } |
zgu@3900 | 433 | |
zgu@3900 | 434 | void init(const MemPointerRecord* mpe) { |
zgu@3900 | 435 | VMMemRegion::init(mpe); |
zgu@3900 | 436 | _pc = 0; |
zgu@3900 | 437 | } |
zgu@3900 | 438 | |
zgu@3900 | 439 | VMMemRegionEx& operator=(const VMMemRegionEx& other) { |
zgu@3900 | 440 | VMMemRegion::operator=(other); |
zgu@3900 | 441 | _pc = other.pc(); |
zgu@3900 | 442 | return *this; |
zgu@3900 | 443 | } |
zgu@3900 | 444 | |
zgu@3900 | 445 | inline address pc() const { return _pc; } |
zgu@3900 | 446 | private: |
zgu@3900 | 447 | address _pc; |
zgu@3900 | 448 | }; |
zgu@3900 | 449 | |
zgu@3900 | 450 | /* |
zgu@3900 | 451 | * Sequenced memory record |
zgu@3900 | 452 | */ |
zgu@3900 | 453 | class SeqMemPointerRecord : public MemPointerRecord { |
zgu@3900 | 454 | private: |
zgu@3900 | 455 | jint _seq; // sequence number |
zgu@3900 | 456 | |
zgu@3900 | 457 | public: |
zgu@3900 | 458 | SeqMemPointerRecord(): _seq(0){ } |
zgu@3900 | 459 | |
zgu@5272 | 460 | SeqMemPointerRecord(address addr, MEMFLAGS flags, size_t size, jint seq) |
zgu@5272 | 461 | : MemPointerRecord(addr, flags, size), _seq(seq) { |
zgu@3900 | 462 | } |
zgu@3900 | 463 | |
zgu@3900 | 464 | SeqMemPointerRecord(const SeqMemPointerRecord& copy_from) |
zgu@3900 | 465 | : MemPointerRecord(copy_from) { |
zgu@3900 | 466 | _seq = copy_from.seq(); |
zgu@3900 | 467 | } |
zgu@3900 | 468 | |
zgu@3900 | 469 | SeqMemPointerRecord& operator= (const SeqMemPointerRecord& ptr) { |
zgu@3900 | 470 | MemPointerRecord::operator=(ptr); |
zgu@3900 | 471 | _seq = ptr.seq(); |
zgu@3900 | 472 | return *this; |
zgu@3900 | 473 | } |
zgu@3900 | 474 | |
zgu@3900 | 475 | inline jint seq() const { |
zgu@3900 | 476 | return _seq; |
zgu@3900 | 477 | } |
zgu@3900 | 478 | }; |
zgu@3900 | 479 | |
zgu@3900 | 480 | |
zgu@3900 | 481 | |
zgu@3900 | 482 | class SeqMemPointerRecordEx : public MemPointerRecordEx { |
zgu@3900 | 483 | private: |
zgu@3900 | 484 | jint _seq; // sequence number |
zgu@3900 | 485 | |
zgu@3900 | 486 | public: |
zgu@3900 | 487 | SeqMemPointerRecordEx(): _seq(0) { } |
zgu@3900 | 488 | |
zgu@3900 | 489 | SeqMemPointerRecordEx(address addr, MEMFLAGS flags, size_t size, |
zgu@5272 | 490 | jint seq, address pc): |
zgu@5272 | 491 | MemPointerRecordEx(addr, flags, size, pc), _seq(seq) { |
zgu@3900 | 492 | } |
zgu@3900 | 493 | |
zgu@3900 | 494 | SeqMemPointerRecordEx(const SeqMemPointerRecordEx& copy_from) |
zgu@3900 | 495 | : MemPointerRecordEx(copy_from) { |
zgu@3900 | 496 | _seq = copy_from.seq(); |
zgu@3900 | 497 | } |
zgu@3900 | 498 | |
zgu@3900 | 499 | SeqMemPointerRecordEx& operator= (const SeqMemPointerRecordEx& ptr) { |
zgu@3900 | 500 | MemPointerRecordEx::operator=(ptr); |
zgu@3900 | 501 | _seq = ptr.seq(); |
zgu@3900 | 502 | return *this; |
zgu@3900 | 503 | } |
zgu@3900 | 504 | |
zgu@3900 | 505 | inline jint seq() const { |
zgu@3900 | 506 | return _seq; |
zgu@3900 | 507 | } |
zgu@3900 | 508 | }; |
zgu@3900 | 509 | |
zgu@3900 | 510 | #endif // SHARE_VM_SERVICES_MEM_PTR_HPP |