src/share/vm/services/memSnapshot.cpp

changeset 4193
716c64bda5ba
parent 4053
33143ee07800
child 4248
69ad7823b1ca
     1.1 --- a/src/share/vm/services/memSnapshot.cpp	Thu Oct 18 13:09:47 2012 -0400
     1.2 +++ b/src/share/vm/services/memSnapshot.cpp	Fri Oct 19 21:40:07 2012 -0400
     1.3 @@ -31,6 +31,220 @@
     1.4  #include "services/memSnapshot.hpp"
     1.5  #include "services/memTracker.hpp"
     1.6  
     1.7 +
     1.8 +bool VMMemPointerIterator::insert_record(MemPointerRecord* rec) {
     1.9 +  VMMemRegionEx new_rec;
    1.10 +  assert(rec->is_allocation_record() || rec->is_commit_record(),
    1.11 +    "Sanity check");
    1.12 +  if (MemTracker::track_callsite()) {
    1.13 +    new_rec.init((MemPointerRecordEx*)rec);
    1.14 +  } else {
    1.15 +    new_rec.init(rec);
    1.16 +  }
    1.17 +  return insert(&new_rec);
    1.18 +}
    1.19 +
    1.20 +bool VMMemPointerIterator::insert_record_after(MemPointerRecord* rec) {
    1.21 +  VMMemRegionEx new_rec;
    1.22 +  assert(rec->is_allocation_record() || rec->is_commit_record(),
    1.23 +    "Sanity check");
    1.24 +  if (MemTracker::track_callsite()) {
    1.25 +    new_rec.init((MemPointerRecordEx*)rec);
    1.26 +  } else {
    1.27 +    new_rec.init(rec);
    1.28 +  }
    1.29 +  return insert_after(&new_rec);
    1.30 +}
    1.31 +
    1.32 +// we don't consolidate reserved regions, since they may be categorized
    1.33 +// in different types.
    1.34 +bool VMMemPointerIterator::add_reserved_region(MemPointerRecord* rec) {
    1.35 +  assert(rec->is_allocation_record(), "Sanity check");
    1.36 +  VMMemRegion* cur = (VMMemRegion*)current();
    1.37 +
    1.38 +  // we don't have anything yet
    1.39 +  if (cur == NULL) {
    1.40 +    return insert_record(rec);
    1.41 +  }
    1.42 +
    1.43 +  assert(cur->is_reserved_region(), "Sanity check");
    1.44 +  // duplicated records
    1.45 +  if (cur->is_same_region(rec)) {
    1.46 +    return true;
    1.47 +  }
    1.48 +  assert(cur->base() > rec->addr(), "Just check: locate()");
    1.49 +  assert(rec->addr() + rec->size() <= cur->base(), "Can not overlap");
    1.50 +  return insert_record(rec);
    1.51 +}
    1.52 +
    1.53 +// we do consolidate committed regions
    1.54 +bool VMMemPointerIterator::add_committed_region(MemPointerRecord* rec) {
    1.55 +  assert(rec->is_commit_record(), "Sanity check");
    1.56 +  VMMemRegion* cur;
    1.57 +  cur = (VMMemRegion*)current();
    1.58 +  assert(cur->is_reserved_region() && cur->contains_region(rec),
    1.59 +    "Sanity check");
    1.60 +
    1.61 +  // thread's native stack is always marked as "committed", ignore
    1.62 +  // the "commit" operation for creating stack guard pages
    1.63 +  if (FLAGS_TO_MEMORY_TYPE(cur->flags()) == mtThreadStack &&
    1.64 +      FLAGS_TO_MEMORY_TYPE(rec->flags()) != mtThreadStack) {
    1.65 +    return true;
    1.66 +  }
    1.67 +
    1.68 +  cur = (VMMemRegion*)next();
    1.69 +  while (cur != NULL && cur->is_committed_region()) {
    1.70 +    // duplicated commit records
    1.71 +    if(cur->contains_region(rec)) {
    1.72 +      return true;
    1.73 +    }
    1.74 +    if (cur->base() > rec->addr()) {
    1.75 +      // committed regions can not overlap
    1.76 +      assert(rec->addr() + rec->size() <= cur->base(), "Can not overlap");
    1.77 +      if (rec->addr() + rec->size() == cur->base()) {
    1.78 +        cur->expand_region(rec->addr(), rec->size());
    1.79 +        return true;
    1.80 +      } else {
    1.81 +        return insert_record(rec);
    1.82 +      }
    1.83 +    } else if (cur->base() + cur->size() == rec->addr()) {
    1.84 +      cur->expand_region(rec->addr(), rec->size());
    1.85 +      VMMemRegion* next_reg = (VMMemRegion*)next();
    1.86 +      // see if we can consolidate next committed region
    1.87 +      if (next_reg != NULL && next_reg->is_committed_region() &&
    1.88 +        next_reg->base() == cur->base() + cur->size()) {
    1.89 +          cur->expand_region(next_reg->base(), next_reg->size());
    1.90 +          remove();
    1.91 +      }
    1.92 +      return true;
    1.93 +    }
    1.94 +    cur = (VMMemRegion*)next();
    1.95 +  }
    1.96 +  return insert_record(rec);
    1.97 +}
    1.98 +
    1.99 +bool VMMemPointerIterator::remove_uncommitted_region(MemPointerRecord* rec) {
   1.100 +  assert(rec->is_uncommit_record(), "sanity check");
   1.101 +  VMMemRegion* cur;
   1.102 +  cur = (VMMemRegion*)current();
   1.103 +  assert(cur->is_reserved_region() && cur->contains_region(rec),
   1.104 +    "Sanity check");
   1.105 +  // thread's native stack is always marked as "committed", ignore
   1.106 +  // the "commit" operation for creating stack guard pages
   1.107 +  if (FLAGS_TO_MEMORY_TYPE(cur->flags()) == mtThreadStack &&
   1.108 +      FLAGS_TO_MEMORY_TYPE(rec->flags()) != mtThreadStack) {
   1.109 +    return true;
   1.110 +  }
   1.111 +
   1.112 +  cur = (VMMemRegion*)next();
   1.113 +  while (cur != NULL && cur->is_committed_region()) {
   1.114 +    // region already uncommitted, must be due to duplicated record
   1.115 +    if (cur->addr() >= rec->addr() + rec->size()) {
   1.116 +      break;
   1.117 +    } else if (cur->contains_region(rec)) {
   1.118 +      // uncommit whole region
   1.119 +      if (cur->is_same_region(rec)) {
   1.120 +        remove();
   1.121 +        break;
   1.122 +      } else if (rec->addr() == cur->addr() ||
   1.123 +        rec->addr() + rec->size() == cur->addr() + cur->size()) {
   1.124 +        // uncommitted from either end of current memory region.
   1.125 +        cur->exclude_region(rec->addr(), rec->size());
   1.126 +        break;
   1.127 +      } else { // split the committed region and release the middle
   1.128 +        address high_addr = cur->addr() + cur->size();
   1.129 +        size_t sz = high_addr - rec->addr();
   1.130 +        cur->exclude_region(rec->addr(), sz);
   1.131 +        sz = high_addr - (rec->addr() + rec->size());
   1.132 +        if (MemTracker::track_callsite()) {
   1.133 +          MemPointerRecordEx tmp(rec->addr() + rec->size(), cur->flags(), sz,
   1.134 +             ((VMMemRegionEx*)cur)->pc());
   1.135 +          return insert_record_after(&tmp);
   1.136 +        } else {
   1.137 +          MemPointerRecord tmp(rec->addr() + rec->size(), cur->flags(), sz);
   1.138 +          return insert_record_after(&tmp);
   1.139 +        }
   1.140 +      }
   1.141 +    }
   1.142 +    cur = (VMMemRegion*)next();
   1.143 +  }
   1.144 +
   1.145 +  // we may not find committed record due to duplicated records
   1.146 +  return true;
   1.147 +}
   1.148 +
   1.149 +bool VMMemPointerIterator::remove_released_region(MemPointerRecord* rec) {
   1.150 +  assert(rec->is_deallocation_record(), "Sanity check");
   1.151 +  VMMemRegion* cur = (VMMemRegion*)current();
   1.152 +  assert(cur->is_reserved_region() && cur->contains_region(rec),
   1.153 +    "Sanity check");
   1.154 +#ifdef ASSERT
   1.155 +  VMMemRegion* next_reg = (VMMemRegion*)peek_next();
   1.156 +  // should not have any committed memory in this reserved region
   1.157 +  assert(next_reg == NULL || !next_reg->is_committed_region(), "Sanity check");
   1.158 +#endif
   1.159 +  if (rec->is_same_region(cur)) {
   1.160 +    remove();
   1.161 +  } else if (rec->addr() == cur->addr() ||
   1.162 +    rec->addr() + rec->size() == cur->addr() + cur->size()) {
   1.163 +    // released region is at either end of this region
   1.164 +    cur->exclude_region(rec->addr(), rec->size());
   1.165 +  } else { // split the reserved region and release the middle
   1.166 +    address high_addr = cur->addr() + cur->size();
   1.167 +    size_t sz = high_addr - rec->addr();
   1.168 +    cur->exclude_region(rec->addr(), sz);
   1.169 +    sz = high_addr - rec->addr() - rec->size();
   1.170 +    if (MemTracker::track_callsite()) {
   1.171 +      MemPointerRecordEx tmp(rec->addr() + rec->size(), cur->flags(), sz,
   1.172 +        ((VMMemRegionEx*)cur)->pc());
   1.173 +      return insert_reserved_region(&tmp);
   1.174 +    } else {
   1.175 +      MemPointerRecord tmp(rec->addr() + rec->size(), cur->flags(), sz);
   1.176 +      return insert_reserved_region(&tmp);
   1.177 +    }
   1.178 +  }
   1.179 +  return true;
   1.180 +}
   1.181 +
   1.182 +bool VMMemPointerIterator::insert_reserved_region(MemPointerRecord* rec) {
   1.183 +  // skip all 'commit' records associated with previous reserved region
   1.184 +  VMMemRegion* p = (VMMemRegion*)next();
   1.185 +  while (p != NULL && p->is_committed_region() &&
   1.186 +         p->base() + p->size() < rec->addr()) {
   1.187 +    p = (VMMemRegion*)next();
   1.188 +  }
   1.189 +  return insert_record(rec);
   1.190 +}
   1.191 +
   1.192 +bool VMMemPointerIterator::split_reserved_region(VMMemRegion* rgn, address new_rgn_addr, size_t new_rgn_size) {
   1.193 +  assert(rgn->contains_region(new_rgn_addr, new_rgn_size), "Not fully contained");
   1.194 +  address pc = (MemTracker::track_callsite() ? ((VMMemRegionEx*)rgn)->pc() : NULL);
   1.195 +  if (rgn->base() == new_rgn_addr) { // new region is at the beginning of the region
   1.196 +    size_t sz = rgn->size() - new_rgn_size;
   1.197 +    // the original region becomes 'new' region
   1.198 +    rgn->exclude_region(new_rgn_addr + new_rgn_size, sz);
   1.199 +     // remaining becomes next region
   1.200 +    MemPointerRecordEx next_rgn(new_rgn_addr + new_rgn_size, rgn->flags(), sz, pc);
   1.201 +    return insert_reserved_region(&next_rgn);
   1.202 +  } else if (rgn->base() + rgn->size() == new_rgn_addr + new_rgn_size) {
   1.203 +    rgn->exclude_region(new_rgn_addr, new_rgn_size);
   1.204 +    MemPointerRecordEx next_rgn(new_rgn_addr, rgn->flags(), new_rgn_size, pc);
   1.205 +    return insert_reserved_region(&next_rgn);
   1.206 +  } else {
   1.207 +    // the orginal region will be split into three
   1.208 +    address rgn_high_addr = rgn->base() + rgn->size();
   1.209 +    // first region
   1.210 +    rgn->exclude_region(new_rgn_addr, (rgn_high_addr - new_rgn_addr));
   1.211 +    // the second region is the new region
   1.212 +    MemPointerRecordEx new_rgn(new_rgn_addr, rgn->flags(), new_rgn_size, pc);
   1.213 +    if (!insert_reserved_region(&new_rgn)) return false;
   1.214 +    // the remaining region
   1.215 +    MemPointerRecordEx rem_rgn(new_rgn_addr + new_rgn_size, rgn->flags(),
   1.216 +      rgn_high_addr - (new_rgn_addr + new_rgn_size), pc);
   1.217 +    return insert_reserved_region(&rem_rgn);
   1.218 +  }
   1.219 +}
   1.220 +
   1.221  static int sort_in_seq_order(const void* p1, const void* p2) {
   1.222    assert(p1 != NULL && p2 != NULL, "Sanity check");
   1.223    const MemPointerRecord* mp1 = (MemPointerRecord*)p1;
   1.224 @@ -61,11 +275,11 @@
   1.225  }
   1.226  
   1.227  
   1.228 -MemPointerArrayIteratorImpl StagingArea::virtual_memory_record_walker() {
   1.229 +VMRecordIterator StagingArea::virtual_memory_record_walker() {
   1.230    MemPointerArray* arr = vm_data();
   1.231    // sort into seq number order
   1.232    arr->sort((FN_SORT)sort_in_seq_order);
   1.233 -  return MemPointerArrayIteratorImpl(arr);
   1.234 +  return VMRecordIterator(arr);
   1.235  }
   1.236  
   1.237  
   1.238 @@ -135,6 +349,8 @@
   1.239          return false;
   1.240        }
   1.241      } else {
   1.242 +      // locate matched record and/or also position the iterator to proper
   1.243 +      // location for this incoming record.
   1.244        p2 = (MemPointerRecord*)malloc_staging_itr.locate(p1->addr());
   1.245        // we have not seen this memory block, so just add to staging area
   1.246        if (p2 == NULL) {
   1.247 @@ -199,7 +415,7 @@
   1.248    MallocRecordIterator  malloc_itr = _staging_area.malloc_record_walker();
   1.249    bool promoted = false;
   1.250    if (promote_malloc_records(&malloc_itr)) {
   1.251 -    MemPointerArrayIteratorImpl vm_itr = _staging_area.virtual_memory_record_walker();
   1.252 +    VMRecordIterator vm_itr = _staging_area.virtual_memory_record_walker();
   1.253      if (promote_virtual_memory_records(&vm_itr)) {
   1.254        promoted = true;
   1.255      }
   1.256 @@ -218,7 +434,7 @@
   1.257      matched_rec = (MemPointerRecord*)malloc_snapshot_itr.locate(new_rec->addr());
   1.258      // found matched memory block
   1.259      if (matched_rec != NULL && new_rec->addr() == matched_rec->addr()) {
   1.260 -      // snapshot already contains 'lived' records
   1.261 +      // snapshot already contains 'live' records
   1.262        assert(matched_rec->is_allocation_record() || matched_rec->is_arena_size_record(),
   1.263               "Sanity check");
   1.264        // update block states
   1.265 @@ -277,87 +493,60 @@
   1.266  bool MemSnapshot::promote_virtual_memory_records(MemPointerArrayIterator* itr) {
   1.267    VMMemPointerIterator vm_snapshot_itr(_vm_ptrs);
   1.268    MemPointerRecord* new_rec = (MemPointerRecord*)itr->current();
   1.269 -  VMMemRegionEx new_vm_rec;
   1.270 -  VMMemRegion*  matched_rec;
   1.271 +  VMMemRegion*  reserved_rec;
   1.272    while (new_rec != NULL) {
   1.273      assert(new_rec->is_vm_pointer(), "Sanity check");
   1.274 -    if (MemTracker::track_callsite()) {
   1.275 -      new_vm_rec.init((MemPointerRecordEx*)new_rec);
   1.276 -    } else {
   1.277 -      new_vm_rec.init(new_rec);
   1.278 -    }
   1.279 -    matched_rec = (VMMemRegion*)vm_snapshot_itr.locate(new_rec->addr());
   1.280 -    if (matched_rec != NULL &&
   1.281 -        (matched_rec->contains(&new_vm_rec) || matched_rec->base() == new_vm_rec.base())) {
   1.282 +
   1.283 +    // locate a reserved region that contains the specified address, or
   1.284 +    // the nearest reserved region has base address just above the specified
   1.285 +    // address
   1.286 +    reserved_rec = (VMMemRegion*)vm_snapshot_itr.locate(new_rec->addr());
   1.287 +    if (reserved_rec != NULL && reserved_rec->contains_region(new_rec)) {
   1.288        // snapshot can only have 'live' records
   1.289 -      assert(matched_rec->is_reserve_record(), "Sanity check");
   1.290 -      if (new_vm_rec.is_reserve_record() && matched_rec->base() == new_vm_rec.base()) {
   1.291 -        // resize reserved virtual memory range
   1.292 -        // resize has to cover committed area
   1.293 -        assert(new_vm_rec.size() >= matched_rec->committed_size(), "Sanity check");
   1.294 -        matched_rec->set_reserved_size(new_vm_rec.size());
   1.295 -      } else if (new_vm_rec.is_commit_record()) {
   1.296 -        // commit memory inside reserved memory range
   1.297 -        assert(new_vm_rec.committed_size() <= matched_rec->reserved_size(), "Sanity check");
   1.298 -        // thread stacks are marked committed, so we ignore 'commit' record for creating
   1.299 -        // stack guard pages
   1.300 -        if (FLAGS_TO_MEMORY_TYPE(matched_rec->flags()) != mtThreadStack) {
   1.301 -          matched_rec->commit(new_vm_rec.committed_size());
   1.302 -        }
   1.303 -      } else if (new_vm_rec.is_uncommit_record()) {
   1.304 -        if (FLAGS_TO_MEMORY_TYPE(matched_rec->flags()) == mtThreadStack) {
   1.305 -          // ignore 'uncommit' record from removing stack guard pages, uncommit
   1.306 -          // thread stack as whole
   1.307 -          if (matched_rec->committed_size() == new_vm_rec.committed_size()) {
   1.308 -            matched_rec->uncommit(new_vm_rec.committed_size());
   1.309 -          }
   1.310 -        } else {
   1.311 -          // uncommit memory inside reserved memory range
   1.312 -          assert(new_vm_rec.committed_size() <= matched_rec->committed_size(),
   1.313 -                "Sanity check");
   1.314 -          matched_rec->uncommit(new_vm_rec.committed_size());
   1.315 -        }
   1.316 -      } else if (new_vm_rec.is_type_tagging_record()) {
   1.317 -        // tag this virtual memory range to a memory type
   1.318 -        // can not re-tag a memory range to different type
   1.319 -        assert(FLAGS_TO_MEMORY_TYPE(matched_rec->flags()) == mtNone ||
   1.320 -               FLAGS_TO_MEMORY_TYPE(matched_rec->flags()) == FLAGS_TO_MEMORY_TYPE(new_vm_rec.flags()),
   1.321 -               "Sanity check");
   1.322 -        matched_rec->tag(new_vm_rec.flags());
   1.323 -      } else if (new_vm_rec.is_release_record()) {
   1.324 -        // release part or whole memory range
   1.325 -        if (new_vm_rec.base() == matched_rec->base() &&
   1.326 -            new_vm_rec.size() == matched_rec->size()) {
   1.327 -          // release whole virtual memory range
   1.328 -          assert(matched_rec->committed_size() == 0, "Sanity check");
   1.329 -          vm_snapshot_itr.remove();
   1.330 -        } else {
   1.331 -          // partial release
   1.332 -          matched_rec->partial_release(new_vm_rec.base(), new_vm_rec.size());
   1.333 -        }
   1.334 -      } else {
   1.335 -        // multiple reserve/commit on the same virtual memory range
   1.336 -        assert((new_vm_rec.is_reserve_record() || new_vm_rec.is_commit_record()) &&
   1.337 -          (new_vm_rec.base() == matched_rec->base() && new_vm_rec.size() == matched_rec->size()),
   1.338 -          "Sanity check");
   1.339 -        matched_rec->tag(new_vm_rec.flags());
   1.340 -      }
   1.341 -    } else {
   1.342 -      // no matched record
   1.343 -      if (new_vm_rec.is_reserve_record()) {
   1.344 -        if (matched_rec == NULL || matched_rec->base() > new_vm_rec.base()) {
   1.345 -          if (!vm_snapshot_itr.insert(&new_vm_rec)) {
   1.346 -            return false;
   1.347 -          }
   1.348 -        } else {
   1.349 -          if (!vm_snapshot_itr.insert_after(&new_vm_rec)) {
   1.350 +      assert(reserved_rec->is_reserved_region(), "Sanity check");
   1.351 +      if (new_rec->is_allocation_record()) {
   1.352 +        if (!reserved_rec->is_same_region(new_rec)) {
   1.353 +          // only deal with split a bigger reserved region into smaller regions.
   1.354 +          // So far, CDS is the only use case.
   1.355 +          if (!vm_snapshot_itr.split_reserved_region(reserved_rec, new_rec->addr(), new_rec->size())) {
   1.356              return false;
   1.357            }
   1.358          }
   1.359 -      } else {
   1.360 -        // throw out obsolete records, which are the commit/uncommit/release/tag records
   1.361 -        // on memory regions that are already released.
   1.362 -      }
   1.363 +      } else if (new_rec->is_uncommit_record()) {
   1.364 +        if (!vm_snapshot_itr.remove_uncommitted_region(new_rec)) {
   1.365 +          return false;
   1.366 +        }
   1.367 +      } else if (new_rec->is_commit_record()) {
   1.368 +        // insert or expand existing committed region to cover this
   1.369 +        // newly committed region
   1.370 +        if (!vm_snapshot_itr.add_committed_region(new_rec)) {
   1.371 +          return false;
   1.372 +        }
   1.373 +      } else if (new_rec->is_deallocation_record()) {
   1.374 +        // release part or all memory region
   1.375 +        if (!vm_snapshot_itr.remove_released_region(new_rec)) {
   1.376 +          return false;
   1.377 +        }
   1.378 +      } else if (new_rec->is_type_tagging_record()) {
   1.379 +        // tag this reserved virtual memory range to a memory type. Can not re-tag a memory range
   1.380 +        // to different type.
   1.381 +        assert(FLAGS_TO_MEMORY_TYPE(reserved_rec->flags()) == mtNone ||
   1.382 +               FLAGS_TO_MEMORY_TYPE(reserved_rec->flags()) == FLAGS_TO_MEMORY_TYPE(new_rec->flags()),
   1.383 +               "Sanity check");
   1.384 +        reserved_rec->tag(new_rec->flags());
   1.385 +    } else {
   1.386 +        ShouldNotReachHere();
   1.387 +          }
   1.388 +        } else {
   1.389 +      /*
   1.390 +       * The assertion failure indicates mis-matched virtual memory records. The likely
   1.391 +       * scenario is, that some virtual memory operations are not going through os::xxxx_memory()
   1.392 +       * api, which have to be tracked manually. (perfMemory is an example).
   1.393 +      */
   1.394 +      assert(new_rec->is_allocation_record(), "Sanity check");
   1.395 +      if (!vm_snapshot_itr.add_reserved_region(new_rec)) {
   1.396 +            return false;
   1.397 +          }
   1.398    }
   1.399      new_rec = (MemPointerRecord*)itr->next();
   1.400    }
   1.401 @@ -433,5 +622,33 @@
   1.402      cur = (MemPointerRecord*)vm_itr.next();
   1.403    }
   1.404  }
   1.405 +
   1.406 +void MemSnapshot::dump_all_vm_pointers() {
   1.407 +  MemPointerArrayIteratorImpl itr(_vm_ptrs);
   1.408 +  VMMemRegion* ptr = (VMMemRegion*)itr.current();
   1.409 +  tty->print_cr("dump virtual memory pointers:");
   1.410 +  while (ptr != NULL) {
   1.411 +    if (ptr->is_committed_region()) {
   1.412 +      tty->print("\t");
   1.413 +    }
   1.414 +    tty->print("[" PTR_FORMAT " - " PTR_FORMAT "] [%x]", ptr->addr(),
   1.415 +      (ptr->addr() + ptr->size()), ptr->flags());
   1.416 +
   1.417 +    if (MemTracker::track_callsite()) {
   1.418 +      VMMemRegionEx* ex = (VMMemRegionEx*)ptr;
   1.419 +      if (ex->pc() != NULL) {
   1.420 +        char buf[1024];
   1.421 +        if (os::dll_address_to_function_name(ex->pc(), buf, sizeof(buf), NULL)) {
   1.422 +          tty->print_cr("\t%s", buf);
   1.423 +        } else {
   1.424 +          tty->print_cr("");
   1.425 +        }
   1.426 +      }
   1.427 +    }
   1.428 +
   1.429 +    ptr = (VMMemRegion*)itr.next();
   1.430 +  }
   1.431 +  tty->flush();
   1.432 +}
   1.433  #endif // ASSERT
   1.434  

mercurial