1.1 --- a/src/share/vm/services/memSnapshot.cpp Mon Nov 05 13:55:31 2012 -0800 1.2 +++ b/src/share/vm/services/memSnapshot.cpp Fri Nov 09 19:24:31 2012 -0500 1.3 @@ -50,7 +50,7 @@ 1.4 tty->print_cr(" (tag)"); 1.5 } 1.6 } else { 1.7 - if (rec->is_arena_size_record()) { 1.8 + if (rec->is_arena_memory_record()) { 1.9 tty->print_cr(" (arena size)"); 1.10 } else if (rec->is_allocation_record()) { 1.11 tty->print_cr(" (malloc)"); 1.12 @@ -390,21 +390,31 @@ 1.13 } 1.14 } 1.15 1.16 -void MemSnapshot::copy_pointer(MemPointerRecord* dest, const MemPointerRecord* src) { 1.17 + 1.18 +void MemSnapshot::copy_seq_pointer(MemPointerRecord* dest, const MemPointerRecord* src) { 1.19 assert(dest != NULL && src != NULL, "Just check"); 1.20 assert(dest->addr() == src->addr(), "Just check"); 1.21 + assert(dest->seq() > 0 && src->seq() > 0, "not sequenced"); 1.22 1.23 - MEMFLAGS flags = dest->flags(); 1.24 + if (MemTracker::track_callsite()) { 1.25 + *(SeqMemPointerRecordEx*)dest = *(SeqMemPointerRecordEx*)src; 1.26 + } else { 1.27 + *(SeqMemPointerRecord*)dest = *(SeqMemPointerRecord*)src; 1.28 + } 1.29 +} 1.30 + 1.31 +void MemSnapshot::assign_pointer(MemPointerRecord*dest, const MemPointerRecord* src) { 1.32 + assert(src != NULL && dest != NULL, "Just check"); 1.33 + assert(dest->seq() == 0 && src->seq() >0, "cast away sequence"); 1.34 1.35 if (MemTracker::track_callsite()) { 1.36 *(MemPointerRecordEx*)dest = *(MemPointerRecordEx*)src; 1.37 } else { 1.38 - *dest = *src; 1.39 + *(MemPointerRecord*)dest = *(MemPointerRecord*)src; 1.40 } 1.41 } 1.42 1.43 - 1.44 -// merge a per-thread memory recorder to the staging area 1.45 +// merge a recorder to the staging area 1.46 bool MemSnapshot::merge(MemRecorder* rec) { 1.47 assert(rec != NULL && !rec->out_of_memory(), "Just check"); 1.48 1.49 @@ -412,71 +422,45 @@ 1.50 1.51 MutexLockerEx lock(_lock, true); 1.52 MemPointerIterator malloc_staging_itr(_staging_area.malloc_data()); 1.53 - MemPointerRecord *p1, *p2; 1.54 - p1 = (MemPointerRecord*) itr.current(); 1.55 - while (p1 != NULL) { 1.56 - if (p1->is_vm_pointer()) { 1.57 + MemPointerRecord* incoming_rec = (MemPointerRecord*) itr.current(); 1.58 + MemPointerRecord* matched_rec; 1.59 + 1.60 + while (incoming_rec != NULL) { 1.61 + if (incoming_rec->is_vm_pointer()) { 1.62 // we don't do anything with virtual memory records during merge 1.63 - if (!_staging_area.vm_data()->append(p1)) { 1.64 + if (!_staging_area.vm_data()->append(incoming_rec)) { 1.65 return false; 1.66 } 1.67 } else { 1.68 // locate matched record and/or also position the iterator to proper 1.69 // location for this incoming record. 1.70 - p2 = (MemPointerRecord*)malloc_staging_itr.locate(p1->addr()); 1.71 - // we have not seen this memory block, so just add to staging area 1.72 - if (p2 == NULL) { 1.73 - if (!malloc_staging_itr.insert(p1)) { 1.74 + matched_rec = (MemPointerRecord*)malloc_staging_itr.locate(incoming_rec->addr()); 1.75 + // we have not seen this memory block in this generation, 1.76 + // so just add to staging area 1.77 + if (matched_rec == NULL) { 1.78 + if (!malloc_staging_itr.insert(incoming_rec)) { 1.79 return false; 1.80 } 1.81 - } else if (p1->addr() == p2->addr()) { 1.82 - MemPointerRecord* staging_next = (MemPointerRecord*)malloc_staging_itr.peek_next(); 1.83 - // a memory block can have many tagging records, find right one to replace or 1.84 - // right position to insert 1.85 - while (staging_next != NULL && staging_next->addr() == p1->addr()) { 1.86 - if ((staging_next->flags() & MemPointerRecord::tag_masks) <= 1.87 - (p1->flags() & MemPointerRecord::tag_masks)) { 1.88 - p2 = (MemPointerRecord*)malloc_staging_itr.next(); 1.89 - staging_next = (MemPointerRecord*)malloc_staging_itr.peek_next(); 1.90 - } else { 1.91 - break; 1.92 - } 1.93 + } else if (incoming_rec->addr() == matched_rec->addr()) { 1.94 + // whoever has higher sequence number wins 1.95 + if (incoming_rec->seq() > matched_rec->seq()) { 1.96 + copy_seq_pointer(matched_rec, incoming_rec); 1.97 } 1.98 - int df = (p1->flags() & MemPointerRecord::tag_masks) - 1.99 - (p2->flags() & MemPointerRecord::tag_masks); 1.100 - if (df == 0) { 1.101 - assert(p1->seq() > 0, "not sequenced"); 1.102 - assert(p2->seq() > 0, "not sequenced"); 1.103 - if (p1->seq() > p2->seq()) { 1.104 - copy_pointer(p2, p1); 1.105 - } 1.106 - } else if (df < 0) { 1.107 - if (!malloc_staging_itr.insert(p1)) { 1.108 - return false; 1.109 - } 1.110 - } else { 1.111 - if (!malloc_staging_itr.insert_after(p1)) { 1.112 - return false; 1.113 - } 1.114 - } 1.115 - } else if (p1->addr() < p2->addr()) { 1.116 - if (!malloc_staging_itr.insert(p1)) { 1.117 + } else if (incoming_rec->addr() < matched_rec->addr()) { 1.118 + if (!malloc_staging_itr.insert(incoming_rec)) { 1.119 return false; 1.120 } 1.121 } else { 1.122 - if (!malloc_staging_itr.insert_after(p1)) { 1.123 - return false; 1.124 - } 1.125 + ShouldNotReachHere(); 1.126 } 1.127 } 1.128 - p1 = (MemPointerRecord*)itr.next(); 1.129 + incoming_rec = (MemPointerRecord*)itr.next(); 1.130 } 1.131 NOT_PRODUCT(void check_staging_data();) 1.132 return true; 1.133 } 1.134 1.135 1.136 - 1.137 // promote data to next generation 1.138 bool MemSnapshot::promote() { 1.139 assert(_alloc_ptrs != NULL && _vm_ptrs != NULL, "Just check"); 1.140 @@ -507,20 +491,25 @@ 1.141 // found matched memory block 1.142 if (matched_rec != NULL && new_rec->addr() == matched_rec->addr()) { 1.143 // snapshot already contains 'live' records 1.144 - assert(matched_rec->is_allocation_record() || matched_rec->is_arena_size_record(), 1.145 + assert(matched_rec->is_allocation_record() || matched_rec->is_arena_memory_record(), 1.146 "Sanity check"); 1.147 // update block states 1.148 - if (new_rec->is_allocation_record() || new_rec->is_arena_size_record()) { 1.149 - copy_pointer(matched_rec, new_rec); 1.150 + if (new_rec->is_allocation_record()) { 1.151 + assign_pointer(matched_rec, new_rec); 1.152 + } else if (new_rec->is_arena_memory_record()) { 1.153 + if (new_rec->size() == 0) { 1.154 + // remove size record once size drops to 0 1.155 + malloc_snapshot_itr.remove(); 1.156 + } else { 1.157 + assign_pointer(matched_rec, new_rec); 1.158 + } 1.159 } else { 1.160 // a deallocation record 1.161 assert(new_rec->is_deallocation_record(), "Sanity check"); 1.162 // an arena record can be followed by a size record, we need to remove both 1.163 if (matched_rec->is_arena_record()) { 1.164 MemPointerRecord* next = (MemPointerRecord*)malloc_snapshot_itr.peek_next(); 1.165 - if (next->is_arena_size_record()) { 1.166 - // it has to match the arena record 1.167 - assert(next->is_size_record_of_arena(matched_rec), "Sanity check"); 1.168 + if (next->is_arena_memory_record() && next->is_memory_record_of_arena(matched_rec)) { 1.169 malloc_snapshot_itr.remove(); 1.170 } 1.171 } 1.172 @@ -528,17 +517,13 @@ 1.173 malloc_snapshot_itr.remove(); 1.174 } 1.175 } else { 1.176 - // it is a new record, insert into snapshot 1.177 - if (new_rec->is_arena_size_record()) { 1.178 - MemPointerRecord* prev = (MemPointerRecord*)malloc_snapshot_itr.peek_prev(); 1.179 - if (prev == NULL || !prev->is_arena_record() || !new_rec->is_size_record_of_arena(prev)) { 1.180 - // no matched arena record, ignore the size record 1.181 - new_rec = NULL; 1.182 - } 1.183 + // don't insert size 0 record 1.184 + if (new_rec->is_arena_memory_record() && new_rec->size() == 0) { 1.185 + new_rec = NULL; 1.186 } 1.187 - // only 'live' record can go into snapshot 1.188 + 1.189 if (new_rec != NULL) { 1.190 - if (new_rec->is_allocation_record() || new_rec->is_arena_size_record()) { 1.191 + if (new_rec->is_allocation_record() || new_rec->is_arena_memory_record()) { 1.192 if (matched_rec != NULL && new_rec->addr() > matched_rec->addr()) { 1.193 if (!malloc_snapshot_itr.insert_after(new_rec)) { 1.194 return false;