8046598: Scalable Native memory tracking development

Wed, 27 Aug 2014 08:19:12 -0400

author
zgu
date
Wed, 27 Aug 2014 08:19:12 -0400
changeset 7074
833b0f92429a
parent 7073
4d3a43351904
child 7075
ac12996df59b

8046598: Scalable Native memory tracking development
Summary: Enhance scalability of native memory tracking
Reviewed-by: coleenp, ctornqvi, gtriantafill

make/excludeSrc.make file | annotate | diff | comparison | revisions
src/os/bsd/vm/os_bsd.cpp file | annotate | diff | comparison | revisions
src/os/bsd/vm/perfMemory_bsd.cpp file | annotate | diff | comparison | revisions
src/os/linux/vm/os_linux.cpp file | annotate | diff | comparison | revisions
src/os/linux/vm/os_linux.hpp file | annotate | diff | comparison | revisions
src/os/linux/vm/perfMemory_linux.cpp file | annotate | diff | comparison | revisions
src/os/posix/vm/os_posix.cpp file | annotate | diff | comparison | revisions
src/os/solaris/vm/perfMemory_solaris.cpp file | annotate | diff | comparison | revisions
src/os/windows/vm/os_windows.cpp file | annotate | diff | comparison | revisions
src/os/windows/vm/perfMemory_windows.cpp file | annotate | diff | comparison | revisions
src/share/vm/asm/codeBuffer.cpp file | annotate | diff | comparison | revisions
src/share/vm/c1/c1_Compiler.cpp file | annotate | diff | comparison | revisions
src/share/vm/ci/ciEnv.cpp file | annotate | diff | comparison | revisions
src/share/vm/ci/ciObjectFactory.cpp file | annotate | diff | comparison | revisions
src/share/vm/classfile/symbolTable.cpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.cpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp file | annotate | diff | comparison | revisions
src/share/vm/memory/allocation.cpp file | annotate | diff | comparison | revisions
src/share/vm/memory/allocation.hpp file | annotate | diff | comparison | revisions
src/share/vm/memory/allocation.inline.hpp file | annotate | diff | comparison | revisions
src/share/vm/memory/cardTableRS.cpp file | annotate | diff | comparison | revisions
src/share/vm/memory/collectorPolicy.cpp file | annotate | diff | comparison | revisions
src/share/vm/memory/heapInspection.cpp file | annotate | diff | comparison | revisions
src/share/vm/memory/memRegion.cpp file | annotate | diff | comparison | revisions
src/share/vm/memory/resourceArea.hpp file | annotate | diff | comparison | revisions
src/share/vm/opto/compile.cpp file | annotate | diff | comparison | revisions
src/share/vm/opto/type.cpp file | annotate | diff | comparison | revisions
src/share/vm/precompiled/precompiled.hpp file | annotate | diff | comparison | revisions
src/share/vm/prims/jni.cpp file | annotate | diff | comparison | revisions
src/share/vm/prims/whitebox.cpp file | annotate | diff | comparison | revisions
src/share/vm/runtime/arguments.cpp file | annotate | diff | comparison | revisions
src/share/vm/runtime/globals.hpp file | annotate | diff | comparison | revisions
src/share/vm/runtime/handles.hpp file | annotate | diff | comparison | revisions
src/share/vm/runtime/init.cpp file | annotate | diff | comparison | revisions
src/share/vm/runtime/java.cpp file | annotate | diff | comparison | revisions
src/share/vm/runtime/os.cpp file | annotate | diff | comparison | revisions
src/share/vm/runtime/os.hpp file | annotate | diff | comparison | revisions
src/share/vm/runtime/safepoint.cpp file | annotate | diff | comparison | revisions
src/share/vm/runtime/thread.cpp file | annotate | diff | comparison | revisions
src/share/vm/runtime/thread.hpp file | annotate | diff | comparison | revisions
src/share/vm/services/allocationSite.hpp file | annotate | diff | comparison | revisions
src/share/vm/services/mallocSiteTable.cpp file | annotate | diff | comparison | revisions
src/share/vm/services/mallocSiteTable.hpp file | annotate | diff | comparison | revisions
src/share/vm/services/mallocTracker.cpp file | annotate | diff | comparison | revisions
src/share/vm/services/mallocTracker.hpp file | annotate | diff | comparison | revisions
src/share/vm/services/mallocTracker.inline.hpp file | annotate | diff | comparison | revisions
src/share/vm/services/memBaseline.cpp file | annotate | diff | comparison | revisions
src/share/vm/services/memBaseline.hpp file | annotate | diff | comparison | revisions
src/share/vm/services/memPtr.cpp file | annotate | diff | comparison | revisions
src/share/vm/services/memPtr.hpp file | annotate | diff | comparison | revisions
src/share/vm/services/memPtrArray.hpp file | annotate | diff | comparison | revisions
src/share/vm/services/memRecorder.cpp file | annotate | diff | comparison | revisions
src/share/vm/services/memRecorder.hpp file | annotate | diff | comparison | revisions
src/share/vm/services/memReporter.cpp file | annotate | diff | comparison | revisions
src/share/vm/services/memReporter.hpp file | annotate | diff | comparison | revisions
src/share/vm/services/memSnapshot.cpp file | annotate | diff | comparison | revisions
src/share/vm/services/memSnapshot.hpp file | annotate | diff | comparison | revisions
src/share/vm/services/memTrackWorker.cpp file | annotate | diff | comparison | revisions
src/share/vm/services/memTrackWorker.hpp file | annotate | diff | comparison | revisions
src/share/vm/services/memTracker.cpp file | annotate | diff | comparison | revisions
src/share/vm/services/memTracker.hpp file | annotate | diff | comparison | revisions
src/share/vm/services/nmtCommon.cpp file | annotate | diff | comparison | revisions
src/share/vm/services/nmtCommon.hpp file | annotate | diff | comparison | revisions
src/share/vm/services/nmtDCmd.cpp file | annotate | diff | comparison | revisions
src/share/vm/services/nmtDCmd.hpp file | annotate | diff | comparison | revisions
src/share/vm/services/virtualMemoryTracker.cpp file | annotate | diff | comparison | revisions
src/share/vm/services/virtualMemoryTracker.hpp file | annotate | diff | comparison | revisions
src/share/vm/utilities/linkedlist.cpp file | annotate | diff | comparison | revisions
src/share/vm/utilities/linkedlist.hpp file | annotate | diff | comparison | revisions
src/share/vm/utilities/nativeCallStack.cpp file | annotate | diff | comparison | revisions
src/share/vm/utilities/nativeCallStack.hpp file | annotate | diff | comparison | revisions
src/share/vm/utilities/vmError.cpp file | annotate | diff | comparison | revisions
test/testlibrary/whitebox/sun/hotspot/WhiteBox.java file | annotate | diff | comparison | revisions
     1.1 --- a/make/excludeSrc.make	Wed Aug 27 09:36:55 2014 +0200
     1.2 +++ b/make/excludeSrc.make	Wed Aug 27 08:19:12 2014 -0400
     1.3 @@ -118,8 +118,8 @@
     1.4        CFLAGS += -DINCLUDE_NMT=0
     1.5  
     1.6        Src_Files_EXCLUDE += \
     1.7 -	 memBaseline.cpp memPtr.cpp memRecorder.cpp memReporter.cpp memSnapshot.cpp memTrackWorker.cpp \
     1.8 -	 memTracker.cpp nmtDCmd.cpp
     1.9 +	 memBaseline.cpp memReporter.cpp mallocTracker.cpp virtualMemoryTracker.cpp nmtCommon.cpp \
    1.10 +	 memTracker.cpp nmtDCmd.cpp mallocSiteTable.cpp
    1.11  endif
    1.12  
    1.13  -include $(HS_ALT_MAKE)/excludeSrc.make
     2.1 --- a/src/os/bsd/vm/os_bsd.cpp	Wed Aug 27 09:36:55 2014 +0200
     2.2 +++ b/src/os/bsd/vm/os_bsd.cpp	Wed Aug 27 08:19:12 2014 -0400
     2.3 @@ -2434,23 +2434,25 @@
     2.4    }
     2.5  
     2.6    // The memory is committed
     2.7 -  MemTracker::record_virtual_memory_reserve_and_commit((address)addr, bytes, mtNone, CALLER_PC);
     2.8 +  MemTracker::record_virtual_memory_reserve_and_commit((address)addr, bytes, CALLER_PC);
     2.9  
    2.10    return addr;
    2.11  }
    2.12  
    2.13  bool os::release_memory_special(char* base, size_t bytes) {
    2.14 -  MemTracker::Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
    2.15 -  // detaching the SHM segment will also delete it, see reserve_memory_special()
    2.16 -  int rslt = shmdt(base);
    2.17 -  if (rslt == 0) {
    2.18 -    tkr.record((address)base, bytes);
    2.19 -    return true;
    2.20 +  if (MemTracker::tracking_level() > NMT_minimal) {
    2.21 +    Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
    2.22 +    // detaching the SHM segment will also delete it, see reserve_memory_special()
    2.23 +    int rslt = shmdt(base);
    2.24 +    if (rslt == 0) {
    2.25 +      tkr.record((address)base, bytes);
    2.26 +      return true;
    2.27 +    } else {
    2.28 +      return false;
    2.29 +    }
    2.30    } else {
    2.31 -    tkr.discard();
    2.32 -    return false;
    2.33 +    return shmdt(base) == 0;
    2.34    }
    2.35 -
    2.36  }
    2.37  
    2.38  size_t os::large_page_size() {
     3.1 --- a/src/os/bsd/vm/perfMemory_bsd.cpp	Wed Aug 27 09:36:55 2014 +0200
     3.2 +++ b/src/os/bsd/vm/perfMemory_bsd.cpp	Wed Aug 27 08:19:12 2014 -0400
     3.3 @@ -753,7 +753,7 @@
     3.4    (void)::memset((void*) mapAddress, 0, size);
     3.5  
     3.6    // it does not go through os api, the operation has to record from here
     3.7 -  MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC);
     3.8 +  MemTracker::record_virtual_memory_reserve_and_commit((address)mapAddress, size, CURRENT_PC, mtInternal);
     3.9  
    3.10    return mapAddress;
    3.11  }
    3.12 @@ -918,7 +918,7 @@
    3.13    }
    3.14  
    3.15    // it does not go through os api, the operation has to record from here
    3.16 -  MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC);
    3.17 +  MemTracker::record_virtual_memory_reserve_and_commit((address)mapAddress, size, CURRENT_PC, mtInternal);
    3.18  
    3.19    *addr = mapAddress;
    3.20    *sizep = size;
     4.1 --- a/src/os/linux/vm/os_linux.cpp	Wed Aug 27 09:36:55 2014 +0200
     4.2 +++ b/src/os/linux/vm/os_linux.cpp	Wed Aug 27 08:19:12 2014 -0400
     4.3 @@ -3501,9 +3501,12 @@
     4.4  
     4.5    assert(is_ptr_aligned(start, alignment), "Must be");
     4.6  
     4.7 -  // os::reserve_memory_special will record this memory area.
     4.8 -  // Need to release it here to prevent overlapping reservations.
     4.9 -  MemTracker::record_virtual_memory_release((address)start, bytes);
    4.10 +  if (MemTracker::tracking_level() > NMT_minimal) {
    4.11 +    // os::reserve_memory_special will record this memory area.
    4.12 +    // Need to release it here to prevent overlapping reservations.
    4.13 +    Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
    4.14 +    tkr.record((address)start, bytes);
    4.15 +  }
    4.16  
    4.17    char* end = start + bytes;
    4.18  
    4.19 @@ -3598,7 +3601,7 @@
    4.20      }
    4.21  
    4.22      // The memory is committed
    4.23 -    MemTracker::record_virtual_memory_reserve_and_commit((address)addr, bytes, mtNone, CALLER_PC);
    4.24 +    MemTracker::record_virtual_memory_reserve_and_commit((address)addr, bytes, CALLER_PC);
    4.25    }
    4.26  
    4.27    return addr;
    4.28 @@ -3614,24 +3617,30 @@
    4.29  }
    4.30  
    4.31  bool os::release_memory_special(char* base, size_t bytes) {
    4.32 +  bool res;
    4.33 +  if (MemTracker::tracking_level() > NMT_minimal) {
    4.34 +    Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
    4.35 +    res = os::Linux::release_memory_special_impl(base, bytes);
    4.36 +    if (res) {
    4.37 +      tkr.record((address)base, bytes);
    4.38 +    }
    4.39 +
    4.40 +  } else {
    4.41 +    res = os::Linux::release_memory_special_impl(base, bytes);
    4.42 +  }
    4.43 +  return res;
    4.44 +}
    4.45 +
    4.46 +bool os::Linux::release_memory_special_impl(char* base, size_t bytes) {
    4.47    assert(UseLargePages, "only for large pages");
    4.48 -
    4.49 -  MemTracker::Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
    4.50 -
    4.51    bool res;
    4.52 +
    4.53    if (UseSHM) {
    4.54      res = os::Linux::release_memory_special_shm(base, bytes);
    4.55    } else {
    4.56      assert(UseHugeTLBFS, "must be");
    4.57      res = os::Linux::release_memory_special_huge_tlbfs(base, bytes);
    4.58    }
    4.59 -
    4.60 -  if (res) {
    4.61 -    tkr.record((address)base, bytes);
    4.62 -  } else {
    4.63 -    tkr.discard();
    4.64 -  }
    4.65 -
    4.66    return res;
    4.67  }
    4.68  
     5.1 --- a/src/os/linux/vm/os_linux.hpp	Wed Aug 27 09:36:55 2014 +0200
     5.2 +++ b/src/os/linux/vm/os_linux.hpp	Wed Aug 27 08:19:12 2014 -0400
     5.3 @@ -108,6 +108,7 @@
     5.4    static char* reserve_memory_special_huge_tlbfs_only(size_t bytes, char* req_addr, bool exec);
     5.5    static char* reserve_memory_special_huge_tlbfs_mixed(size_t bytes, size_t alignment, char* req_addr, bool exec);
     5.6  
     5.7 +  static bool release_memory_special_impl(char* base, size_t bytes);
     5.8    static bool release_memory_special_shm(char* base, size_t bytes);
     5.9    static bool release_memory_special_huge_tlbfs(char* base, size_t bytes);
    5.10  
     6.1 --- a/src/os/linux/vm/perfMemory_linux.cpp	Wed Aug 27 09:36:55 2014 +0200
     6.2 +++ b/src/os/linux/vm/perfMemory_linux.cpp	Wed Aug 27 08:19:12 2014 -0400
     6.3 @@ -753,7 +753,7 @@
     6.4    (void)::memset((void*) mapAddress, 0, size);
     6.5  
     6.6    // it does not go through os api, the operation has to record from here
     6.7 -  MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC);
     6.8 +  MemTracker::record_virtual_memory_reserve_and_commit((address)mapAddress, size, CURRENT_PC, mtInternal);
     6.9  
    6.10    return mapAddress;
    6.11  }
    6.12 @@ -924,7 +924,7 @@
    6.13    }
    6.14  
    6.15    // it does not go through os api, the operation has to record from here
    6.16 -  MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC);
    6.17 +  MemTracker::record_virtual_memory_reserve_and_commit((address)mapAddress, size, CURRENT_PC, mtInternal);
    6.18  
    6.19    *addr = mapAddress;
    6.20    *sizep = size;
     7.1 --- a/src/os/posix/vm/os_posix.cpp	Wed Aug 27 09:36:55 2014 +0200
     7.2 +++ b/src/os/posix/vm/os_posix.cpp	Wed Aug 27 08:19:12 2014 -0400
     7.3 @@ -74,21 +74,41 @@
     7.4    VMError::report_coredump_status(buffer, success);
     7.5  }
     7.6  
     7.7 -address os::get_caller_pc(int n) {
     7.8 +int os::get_native_stack(address* stack, int frames, int toSkip) {
     7.9  #ifdef _NMT_NOINLINE_
    7.10 -  n ++;
    7.11 +  toSkip++;
    7.12  #endif
    7.13 +
    7.14 +  int frame_idx = 0;
    7.15 +  int num_of_frames;  // number of frames captured
    7.16    frame fr = os::current_frame();
    7.17 -  while (n > 0 && fr.pc() &&
    7.18 -    !os::is_first_C_frame(&fr) && fr.sender_pc()) {
    7.19 -    fr = os::get_sender_for_C_frame(&fr);
    7.20 -    n --;
    7.21 +  while (fr.pc() && frame_idx < frames) {
    7.22 +    if (toSkip > 0) {
    7.23 +      toSkip --;
    7.24 +    } else {
    7.25 +      stack[frame_idx ++] = fr.pc();
    7.26 +    }
    7.27 +    if (fr.fp() == NULL || os::is_first_C_frame(&fr)
    7.28 +        ||fr.sender_pc() == NULL || fr.cb() != NULL) break;
    7.29 +
    7.30 +    if (fr.sender_pc() && !os::is_first_C_frame(&fr)) {
    7.31 +      fr = os::get_sender_for_C_frame(&fr);
    7.32 +    } else {
    7.33 +      break;
    7.34 +    }
    7.35    }
    7.36 -  if (n == 0) {
    7.37 -    return fr.pc();
    7.38 -  } else {
    7.39 -    return NULL;
    7.40 +  num_of_frames = frame_idx;
    7.41 +  for (; frame_idx < frames; frame_idx ++) {
    7.42 +    stack[frame_idx] = NULL;
    7.43    }
    7.44 +
    7.45 +  return num_of_frames;
    7.46 +}
    7.47 +
    7.48 +
    7.49 +bool os::unsetenv(const char* name) {
    7.50 +  assert(name != NULL, "Null pointer");
    7.51 +  return (::unsetenv(name) == 0);
    7.52  }
    7.53  
    7.54  int os::get_last_error() {
     8.1 --- a/src/os/solaris/vm/perfMemory_solaris.cpp	Wed Aug 27 09:36:55 2014 +0200
     8.2 +++ b/src/os/solaris/vm/perfMemory_solaris.cpp	Wed Aug 27 08:19:12 2014 -0400
     8.3 @@ -770,7 +770,8 @@
     8.4    (void)::memset((void*) mapAddress, 0, size);
     8.5  
     8.6    // it does not go through os api, the operation has to record from here
     8.7 -  MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC);
     8.8 +  MemTracker::record_virtual_memory_reserve_and_commit((address)mapAddress,
     8.9 +    size, CURRENT_PC, mtInternal);
    8.10  
    8.11    return mapAddress;
    8.12  }
    8.13 @@ -941,7 +942,8 @@
    8.14    }
    8.15  
    8.16    // it does not go through os api, the operation has to record from here
    8.17 -  MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC);
    8.18 +  MemTracker::record_virtual_memory_reserve_and_commit((address)mapAddress,
    8.19 +    size, CURRENT_PC, mtInternal);
    8.20  
    8.21    *addr = mapAddress;
    8.22    *sizep = size;
     9.1 --- a/src/os/windows/vm/os_windows.cpp	Wed Aug 27 09:36:55 2014 +0200
     9.2 +++ b/src/os/windows/vm/os_windows.cpp	Wed Aug 27 08:19:12 2014 -0400
     9.3 @@ -131,6 +131,12 @@
     9.4      case DLL_PROCESS_DETACH:
     9.5        if(ForceTimeHighResolution)
     9.6          timeEndPeriod(1L);
     9.7 +
     9.8 +       // Workaround for issue when a custom launcher doesn't call
     9.9 +       // DestroyJavaVM and NMT is trying to track memory when free is
    9.10 +       // called from a static destructor
    9.11 +       MemTracker::shutdown();
    9.12 +
    9.13        break;
    9.14      default:
    9.15        break;
    9.16 @@ -153,6 +159,10 @@
    9.17   return result > 0 && result < len;
    9.18  }
    9.19  
    9.20 +bool os::unsetenv(const char* name) {
    9.21 +  assert(name != NULL, "Null pointer");
    9.22 +  return (SetEnvironmentVariable(name, NULL) == TRUE);
    9.23 +}
    9.24  
    9.25  // No setuid programs under Windows.
    9.26  bool os::have_special_privileges() {
    9.27 @@ -311,15 +321,17 @@
    9.28   * So far, this method is only used by Native Memory Tracking, which is
    9.29   * only supported on Windows XP or later.
    9.30   */
    9.31 -address os::get_caller_pc(int n) {
    9.32 +
    9.33 +int os::get_native_stack(address* stack, int frames, int toSkip) {
    9.34  #ifdef _NMT_NOINLINE_
    9.35 -  n ++;
    9.36 +  toSkip ++;
    9.37  #endif
    9.38 -  address pc;
    9.39 -  if (os::Kernel32Dll::RtlCaptureStackBackTrace(n + 1, 1, (PVOID*)&pc, NULL) == 1) {
    9.40 -    return pc;
    9.41 -  }
    9.42 -  return NULL;
    9.43 +  int captured = Kernel32Dll::RtlCaptureStackBackTrace(toSkip + 1, frames,
    9.44 +    (PVOID*)stack, NULL);
    9.45 +  for (int index = captured; index < frames; index ++) {
    9.46 +    stack[index] = NULL;
    9.47 +  }
    9.48 +  return captured;
    9.49  }
    9.50  
    9.51  
    9.52 @@ -2904,7 +2916,7 @@
    9.53                                  PAGE_READWRITE);
    9.54    // If reservation failed, return NULL
    9.55    if (p_buf == NULL) return NULL;
    9.56 -  MemTracker::record_virtual_memory_reserve((address)p_buf, size_of_reserve, mtNone, CALLER_PC);
    9.57 +  MemTracker::record_virtual_memory_reserve((address)p_buf, size_of_reserve, CALLER_PC);
    9.58    os::release_memory(p_buf, bytes + chunk_size);
    9.59  
    9.60    // we still need to round up to a page boundary (in case we are using large pages)
    9.61 @@ -2970,7 +2982,7 @@
    9.62          // need to create a dummy 'reserve' record to match
    9.63          // the release.
    9.64          MemTracker::record_virtual_memory_reserve((address)p_buf,
    9.65 -          bytes_to_release, mtNone, CALLER_PC);
    9.66 +          bytes_to_release, CALLER_PC);
    9.67          os::release_memory(p_buf, bytes_to_release);
    9.68        }
    9.69  #ifdef ASSERT
    9.70 @@ -2989,11 +3001,10 @@
    9.71    }
    9.72    // Although the memory is allocated individually, it is returned as one.
    9.73    // NMT records it as one block.
    9.74 -  address pc = CALLER_PC;
    9.75    if ((flags & MEM_COMMIT) != 0) {
    9.76 -    MemTracker::record_virtual_memory_reserve_and_commit((address)p_buf, bytes, mtNone, pc);
    9.77 +    MemTracker::record_virtual_memory_reserve_and_commit((address)p_buf, bytes, CALLER_PC);
    9.78    } else {
    9.79 -    MemTracker::record_virtual_memory_reserve((address)p_buf, bytes, mtNone, pc);
    9.80 +    MemTracker::record_virtual_memory_reserve((address)p_buf, bytes, CALLER_PC);
    9.81    }
    9.82  
    9.83    // made it this far, success
    9.84 @@ -3191,8 +3202,7 @@
    9.85      DWORD flag = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
    9.86      char * res = (char *)VirtualAlloc(addr, bytes, flag, prot);
    9.87      if (res != NULL) {
    9.88 -      address pc = CALLER_PC;
    9.89 -      MemTracker::record_virtual_memory_reserve_and_commit((address)res, bytes, mtNone, pc);
    9.90 +      MemTracker::record_virtual_memory_reserve_and_commit((address)res, bytes, CALLER_PC);
    9.91      }
    9.92  
    9.93      return res;
    10.1 --- a/src/os/windows/vm/perfMemory_windows.cpp	Wed Aug 27 09:36:55 2014 +0200
    10.2 +++ b/src/os/windows/vm/perfMemory_windows.cpp	Wed Aug 27 08:19:12 2014 -0400
    10.3 @@ -1,5 +1,5 @@
    10.4  /*
    10.5 - * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
    10.6 + * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
    10.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    10.8   *
    10.9   * This code is free software; you can redistribute it and/or modify it
   10.10 @@ -1498,7 +1498,8 @@
   10.11    (void)memset(mapAddress, '\0', size);
   10.12  
   10.13    // it does not go through os api, the operation has to record from here
   10.14 -  MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC);
   10.15 +  MemTracker::record_virtual_memory_reserve_and_commit((address)mapAddress,
   10.16 +    size, CURRENT_PC, mtInternal);
   10.17  
   10.18    return (char*) mapAddress;
   10.19  }
   10.20 @@ -1680,7 +1681,8 @@
   10.21    }
   10.22  
   10.23    // it does not go through os api, the operation has to record from here
   10.24 -  MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC);
   10.25 +  MemTracker::record_virtual_memory_reserve_and_commit((address)mapAddress, size,
   10.26 +    CURRENT_PC, mtInternal);
   10.27  
   10.28  
   10.29    *addrp = (char*)mapAddress;
   10.30 @@ -1834,10 +1836,14 @@
   10.31      return;
   10.32    }
   10.33  
   10.34 -  MemTracker::Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
   10.35 -  remove_file_mapping(addr);
   10.36 -  // it does not go through os api, the operation has to record from here
   10.37 -  tkr.record((address)addr, bytes);
   10.38 +  if (MemTracker::tracking_level() > NMT_minimal) {
   10.39 +    // it does not go through os api, the operation has to record from here
   10.40 +    Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
   10.41 +    remove_file_mapping(addr);
   10.42 +    tkr.record((address)addr, bytes);
   10.43 +  } else {
   10.44 +    remove_file_mapping(addr);
   10.45 +  }
   10.46  }
   10.47  
   10.48  char* PerfMemory::backing_store_filename() {
    11.1 --- a/src/share/vm/asm/codeBuffer.cpp	Wed Aug 27 09:36:55 2014 +0200
    11.2 +++ b/src/share/vm/asm/codeBuffer.cpp	Wed Aug 27 08:19:12 2014 -0400
    11.3 @@ -268,7 +268,7 @@
    11.4  
    11.5  GrowableArray<int>* CodeBuffer::create_patch_overflow() {
    11.6    if (_overflow_arena == NULL) {
    11.7 -    _overflow_arena = new (mtCode) Arena();
    11.8 +    _overflow_arena = new (mtCode) Arena(mtCode);
    11.9    }
   11.10    return new (_overflow_arena) GrowableArray<int>(_overflow_arena, 8, 0, 0);
   11.11  }
    12.1 --- a/src/share/vm/c1/c1_Compiler.cpp	Wed Aug 27 09:36:55 2014 +0200
    12.2 +++ b/src/share/vm/c1/c1_Compiler.cpp	Wed Aug 27 08:19:12 2014 -0400
    12.3 @@ -47,7 +47,7 @@
    12.4  
    12.5  void Compiler::init_c1_runtime() {
    12.6    BufferBlob* buffer_blob = CompilerThread::current()->get_buffer_blob();
    12.7 -  Arena* arena = new (mtCompiler) Arena();
    12.8 +  Arena* arena = new (mtCompiler) Arena(mtCompiler);
    12.9    Runtime1::initialize(buffer_blob);
   12.10    FrameMap::initialize();
   12.11    // initialize data structures
    13.1 --- a/src/share/vm/ci/ciEnv.cpp	Wed Aug 27 09:36:55 2014 +0200
    13.2 +++ b/src/share/vm/ci/ciEnv.cpp	Wed Aug 27 08:19:12 2014 -0400
    13.3 @@ -86,7 +86,8 @@
    13.4  
    13.5  // ------------------------------------------------------------------
    13.6  // ciEnv::ciEnv
    13.7 -ciEnv::ciEnv(CompileTask* task, int system_dictionary_modification_counter) {
    13.8 +ciEnv::ciEnv(CompileTask* task, int system_dictionary_modification_counter)
    13.9 +  : _ciEnv_arena(mtCompiler) {
   13.10    VM_ENTRY_MARK;
   13.11  
   13.12    // Set up ciEnv::current immediately, for the sake of ciObjectFactory, etc.
   13.13 @@ -139,7 +140,7 @@
   13.14    _the_min_jint_string = NULL;
   13.15  }
   13.16  
   13.17 -ciEnv::ciEnv(Arena* arena) {
   13.18 +ciEnv::ciEnv(Arena* arena) : _ciEnv_arena(mtCompiler) {
   13.19    ASSERT_IN_VM;
   13.20  
   13.21    // Set up ciEnv::current immediately, for the sake of ciObjectFactory, etc.
    14.1 --- a/src/share/vm/ci/ciObjectFactory.cpp	Wed Aug 27 09:36:55 2014 +0200
    14.2 +++ b/src/share/vm/ci/ciObjectFactory.cpp	Wed Aug 27 08:19:12 2014 -0400
    14.3 @@ -1,5 +1,5 @@
    14.4  /*
    14.5 - * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
    14.6 + * Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
    14.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    14.8   *
    14.9   * This code is free software; you can redistribute it and/or modify it
   14.10 @@ -112,7 +112,7 @@
   14.11    // This Arena is long lived and exists in the resource mark of the
   14.12    // compiler thread that initializes the initial ciObjectFactory which
   14.13    // creates the shared ciObjects that all later ciObjectFactories use.
   14.14 -  Arena* arena = new (mtCompiler) Arena();
   14.15 +  Arena* arena = new (mtCompiler) Arena(mtCompiler);
   14.16    ciEnv initial(arena);
   14.17    ciEnv* env = ciEnv::current();
   14.18    env->_factory->init_shared_objects();
    15.1 --- a/src/share/vm/classfile/symbolTable.cpp	Wed Aug 27 09:36:55 2014 +0200
    15.2 +++ b/src/share/vm/classfile/symbolTable.cpp	Wed Aug 27 08:19:12 2014 -0400
    15.3 @@ -74,9 +74,9 @@
    15.4  void SymbolTable::initialize_symbols(int arena_alloc_size) {
    15.5    // Initialize the arena for global symbols, size passed in depends on CDS.
    15.6    if (arena_alloc_size == 0) {
    15.7 -    _arena = new (mtSymbol) Arena();
    15.8 +    _arena = new (mtSymbol) Arena(mtSymbol);
    15.9    } else {
   15.10 -    _arena = new (mtSymbol) Arena(arena_alloc_size);
   15.11 +    _arena = new (mtSymbol) Arena(mtSymbol, arena_alloc_size);
   15.12    }
   15.13  }
   15.14  
    16.1 --- a/src/share/vm/gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.cpp	Wed Aug 27 09:36:55 2014 +0200
    16.2 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.cpp	Wed Aug 27 08:19:12 2014 -0400
    16.3 @@ -1,5 +1,5 @@
    16.4  /*
    16.5 - * Copyright (c) 2007, 2013, Oracle and/or its affiliates. All rights reserved.
    16.6 + * Copyright (c) 2007, 2014, Oracle and/or its affiliates. All rights reserved.
    16.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    16.8   *
    16.9   * This code is free software; you can redistribute it and/or modify it
   16.10 @@ -53,7 +53,8 @@
   16.11  }
   16.12  
   16.13  void ConcurrentMarkSweepPolicy::initialize_generations() {
   16.14 -  _generations = NEW_C_HEAP_ARRAY3(GenerationSpecPtr, number_of_generations(), mtGC, 0, AllocFailStrategy::RETURN_NULL);
   16.15 +  _generations = NEW_C_HEAP_ARRAY3(GenerationSpecPtr, number_of_generations(), mtGC,
   16.16 +    CURRENT_PC, AllocFailStrategy::RETURN_NULL);
   16.17    if (_generations == NULL)
   16.18      vm_exit_during_initialization("Unable to allocate gen spec");
   16.19  
    17.1 --- a/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp	Wed Aug 27 09:36:55 2014 +0200
    17.2 +++ b/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp	Wed Aug 27 08:19:12 2014 -0400
    17.3 @@ -288,7 +288,7 @@
    17.4    }
    17.5  
    17.6    _fine_grain_regions = NEW_C_HEAP_ARRAY3(PerRegionTablePtr, _max_fine_entries,
    17.7 -                        mtGC, 0, AllocFailStrategy::RETURN_NULL);
    17.8 +                        mtGC, CURRENT_PC, AllocFailStrategy::RETURN_NULL);
    17.9  
   17.10    if (_fine_grain_regions == NULL) {
   17.11      vm_exit_out_of_memory(sizeof(void*)*_max_fine_entries, OOM_MALLOC_ERROR,
    18.1 --- a/src/share/vm/memory/allocation.cpp	Wed Aug 27 09:36:55 2014 +0200
    18.2 +++ b/src/share/vm/memory/allocation.cpp	Wed Aug 27 08:19:12 2014 -0400
    18.3 @@ -438,24 +438,22 @@
    18.4  }
    18.5  
    18.6  //------------------------------Arena------------------------------------------
    18.7 -NOT_PRODUCT(volatile jint Arena::_instance_count = 0;)
    18.8 -
    18.9 -Arena::Arena(size_t init_size) {
   18.10 +Arena::Arena(MEMFLAGS flag, size_t init_size) : _flags(flag), _size_in_bytes(0)  {
   18.11    size_t round_size = (sizeof (char *)) - 1;
   18.12    init_size = (init_size+round_size) & ~round_size;
   18.13    _first = _chunk = new (AllocFailStrategy::EXIT_OOM, init_size) Chunk(init_size);
   18.14    _hwm = _chunk->bottom();      // Save the cached hwm, max
   18.15    _max = _chunk->top();
   18.16 +  MemTracker::record_new_arena(flag);
   18.17    set_size_in_bytes(init_size);
   18.18 -  NOT_PRODUCT(Atomic::inc(&_instance_count);)
   18.19  }
   18.20  
   18.21 -Arena::Arena() {
   18.22 +Arena::Arena(MEMFLAGS flag) : _flags(flag), _size_in_bytes(0) {
   18.23    _first = _chunk = new (AllocFailStrategy::EXIT_OOM, Chunk::init_size) Chunk(Chunk::init_size);
   18.24    _hwm = _chunk->bottom();      // Save the cached hwm, max
   18.25    _max = _chunk->top();
   18.26 +  MemTracker::record_new_arena(flag);
   18.27    set_size_in_bytes(Chunk::init_size);
   18.28 -  NOT_PRODUCT(Atomic::inc(&_instance_count);)
   18.29  }
   18.30  
   18.31  Arena *Arena::move_contents(Arena *copy) {
   18.32 @@ -477,7 +475,7 @@
   18.33  
   18.34  Arena::~Arena() {
   18.35    destruct_contents();
   18.36 -  NOT_PRODUCT(Atomic::dec(&_instance_count);)
   18.37 +  MemTracker::record_arena_free(_flags);
   18.38  }
   18.39  
   18.40  void* Arena::operator new(size_t size) throw() {
   18.41 @@ -493,21 +491,21 @@
   18.42    // dynamic memory type binding
   18.43  void* Arena::operator new(size_t size, MEMFLAGS flags) throw() {
   18.44  #ifdef ASSERT
   18.45 -  void* p = (void*)AllocateHeap(size, flags|otArena, CALLER_PC);
   18.46 +  void* p = (void*)AllocateHeap(size, flags, CALLER_PC);
   18.47    if (PrintMallocFree) trace_heap_malloc(size, "Arena-new", p);
   18.48    return p;
   18.49  #else
   18.50 -  return (void *) AllocateHeap(size, flags|otArena, CALLER_PC);
   18.51 +  return (void *) AllocateHeap(size, flags, CALLER_PC);
   18.52  #endif
   18.53  }
   18.54  
   18.55  void* Arena::operator new(size_t size, const std::nothrow_t& nothrow_constant, MEMFLAGS flags) throw() {
   18.56  #ifdef ASSERT
   18.57 -  void* p = os::malloc(size, flags|otArena, CALLER_PC);
   18.58 +  void* p = os::malloc(size, flags, CALLER_PC);
   18.59    if (PrintMallocFree) trace_heap_malloc(size, "Arena-new", p);
   18.60    return p;
   18.61  #else
   18.62 -  return os::malloc(size, flags|otArena, CALLER_PC);
   18.63 +  return os::malloc(size, flags, CALLER_PC);
   18.64  #endif
   18.65  }
   18.66  
   18.67 @@ -532,8 +530,9 @@
   18.68  // change the size
   18.69  void Arena::set_size_in_bytes(size_t size) {
   18.70    if (_size_in_bytes != size) {
   18.71 +    long delta = (long)(size - size_in_bytes());
   18.72      _size_in_bytes = size;
   18.73 -    MemTracker::record_arena_size((address)this, size);
   18.74 +    MemTracker::record_arena_size_change(delta, _flags);
   18.75    }
   18.76  }
   18.77  
    19.1 --- a/src/share/vm/memory/allocation.hpp	Wed Aug 27 09:36:55 2014 +0200
    19.2 +++ b/src/share/vm/memory/allocation.hpp	Wed Aug 27 08:19:12 2014 -0400
    19.3 @@ -1,5 +1,5 @@
    19.4  /*
    19.5 - * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
    19.6 + * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
    19.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    19.8   *
    19.9   * This code is free software; you can redistribute it and/or modify it
   19.10 @@ -133,51 +133,34 @@
   19.11  
   19.12  
   19.13  /*
   19.14 - * MemoryType bitmap layout:
   19.15 - * | 16 15 14 13 12 11 10 09 | 08 07 06 05 | 04 03 02 01 |
   19.16 - * |      memory type        |   object    | reserved    |
   19.17 - * |                         |     type    |             |
   19.18 + * Memory types
   19.19   */
   19.20  enum MemoryType {
   19.21    // Memory type by sub systems. It occupies lower byte.
   19.22 -  mtNone              = 0x0000,  // undefined
   19.23 -  mtClass             = 0x0100,  // memory class for Java classes
   19.24 -  mtThread            = 0x0200,  // memory for thread objects
   19.25 -  mtThreadStack       = 0x0300,
   19.26 -  mtCode              = 0x0400,  // memory for generated code
   19.27 -  mtGC                = 0x0500,  // memory for GC
   19.28 -  mtCompiler          = 0x0600,  // memory for compiler
   19.29 -  mtInternal          = 0x0700,  // memory used by VM, but does not belong to
   19.30 +  mtJavaHeap          = 0x00,  // Java heap
   19.31 +  mtClass             = 0x01,  // memory class for Java classes
   19.32 +  mtThread            = 0x02,  // memory for thread objects
   19.33 +  mtThreadStack       = 0x03,
   19.34 +  mtCode              = 0x04,  // memory for generated code
   19.35 +  mtGC                = 0x05,  // memory for GC
   19.36 +  mtCompiler          = 0x06,  // memory for compiler
   19.37 +  mtInternal          = 0x07,  // memory used by VM, but does not belong to
   19.38                                   // any of above categories, and not used for
   19.39                                   // native memory tracking
   19.40 -  mtOther             = 0x0800,  // memory not used by VM
   19.41 -  mtSymbol            = 0x0900,  // symbol
   19.42 -  mtNMT               = 0x0A00,  // memory used by native memory tracking
   19.43 -  mtChunk             = 0x0B00,  // chunk that holds content of arenas
   19.44 -  mtJavaHeap          = 0x0C00,  // Java heap
   19.45 -  mtClassShared       = 0x0D00,  // class data sharing
   19.46 -  mtTest              = 0x0E00,  // Test type for verifying NMT
   19.47 -  mtTracing           = 0x0F00,  // memory used for Tracing
   19.48 -  mt_number_of_types  = 0x000F,  // number of memory types (mtDontTrack
   19.49 +  mtOther             = 0x08,  // memory not used by VM
   19.50 +  mtSymbol            = 0x09,  // symbol
   19.51 +  mtNMT               = 0x0A,  // memory used by native memory tracking
   19.52 +  mtClassShared       = 0x0B,  // class data sharing
   19.53 +  mtChunk             = 0x0C,  // chunk that holds content of arenas
   19.54 +  mtTest              = 0x0D,  // Test type for verifying NMT
   19.55 +  mtTracing           = 0x0E,  // memory used for Tracing
   19.56 +  mtNone              = 0x0F,  // undefined
   19.57 +  mt_number_of_types  = 0x10   // number of memory types (mtDontTrack
   19.58                                   // is not included as validate type)
   19.59 -  mtDontTrack         = 0x0F00,  // memory we do not or cannot track
   19.60 -  mt_masks            = 0x7F00,
   19.61 -
   19.62 -  // object type mask
   19.63 -  otArena             = 0x0010, // an arena object
   19.64 -  otNMTRecorder       = 0x0020, // memory recorder object
   19.65 -  ot_masks            = 0x00F0
   19.66  };
   19.67  
   19.68 -#define IS_MEMORY_TYPE(flags, type) ((flags & mt_masks) == type)
   19.69 -#define HAS_VALID_MEMORY_TYPE(flags)((flags & mt_masks) != mtNone)
   19.70 -#define FLAGS_TO_MEMORY_TYPE(flags) (flags & mt_masks)
   19.71 +typedef MemoryType MEMFLAGS;
   19.72  
   19.73 -#define IS_ARENA_OBJ(flags)         ((flags & ot_masks) == otArena)
   19.74 -#define IS_NMT_RECORDER(flags)      ((flags & ot_masks) == otNMTRecorder)
   19.75 -#define NMT_CAN_TRACK(flags)        (!IS_NMT_RECORDER(flags) && !(IS_MEMORY_TYPE(flags, mtDontTrack)))
   19.76 -
   19.77 -typedef unsigned short MEMFLAGS;
   19.78  
   19.79  #if INCLUDE_NMT
   19.80  
   19.81 @@ -189,27 +172,23 @@
   19.82  
   19.83  #endif // INCLUDE_NMT
   19.84  
   19.85 -// debug build does not inline
   19.86 -#if defined(_NMT_NOINLINE_)
   19.87 -  #define CURRENT_PC       (NMT_track_callsite ? os::get_caller_pc(1) : 0)
   19.88 -  #define CALLER_PC        (NMT_track_callsite ? os::get_caller_pc(2) : 0)
   19.89 -  #define CALLER_CALLER_PC (NMT_track_callsite ? os::get_caller_pc(3) : 0)
   19.90 -#else
   19.91 -  #define CURRENT_PC      (NMT_track_callsite? os::get_caller_pc(0) : 0)
   19.92 -  #define CALLER_PC       (NMT_track_callsite ? os::get_caller_pc(1) : 0)
   19.93 -  #define CALLER_CALLER_PC (NMT_track_callsite ? os::get_caller_pc(2) : 0)
   19.94 -#endif
   19.95 -
   19.96 +class NativeCallStack;
   19.97  
   19.98  
   19.99  template <MEMFLAGS F> class CHeapObj ALLOCATION_SUPER_CLASS_SPEC {
  19.100   public:
  19.101 -  _NOINLINE_ void* operator new(size_t size, address caller_pc = 0) throw();
  19.102 +  _NOINLINE_ void* operator new(size_t size, const NativeCallStack& stack) throw();
  19.103 +  _NOINLINE_ void* operator new(size_t size) throw();
  19.104    _NOINLINE_ void* operator new (size_t size, const std::nothrow_t&  nothrow_constant,
  19.105 -                               address caller_pc = 0) throw();
  19.106 -  _NOINLINE_ void* operator new [](size_t size, address caller_pc = 0) throw();
  19.107 +                               const NativeCallStack& stack) throw();
  19.108 +  _NOINLINE_ void* operator new (size_t size, const std::nothrow_t&  nothrow_constant)
  19.109 +                               throw();
  19.110 +  _NOINLINE_ void* operator new [](size_t size, const NativeCallStack& stack) throw();
  19.111 +  _NOINLINE_ void* operator new [](size_t size) throw();
  19.112    _NOINLINE_ void* operator new [](size_t size, const std::nothrow_t&  nothrow_constant,
  19.113 -                               address caller_pc = 0) throw();
  19.114 +                               const NativeCallStack& stack) throw();
  19.115 +  _NOINLINE_ void* operator new [](size_t size, const std::nothrow_t&  nothrow_constant)
  19.116 +                               throw();
  19.117    void  operator delete(void* p);
  19.118    void  operator delete [] (void* p);
  19.119  };
  19.120 @@ -384,13 +363,15 @@
  19.121  
  19.122  //------------------------------Arena------------------------------------------
  19.123  // Fast allocation of memory
  19.124 -class Arena : public CHeapObj<mtNone|otArena> {
  19.125 +class Arena : public CHeapObj<mtNone> {
  19.126  protected:
  19.127    friend class ResourceMark;
  19.128    friend class HandleMark;
  19.129    friend class NoHandleMark;
  19.130    friend class VMStructs;
  19.131  
  19.132 +  MEMFLAGS    _flags;           // Memory tracking flags
  19.133 +
  19.134    Chunk *_first;                // First chunk
  19.135    Chunk *_chunk;                // current chunk
  19.136    char *_hwm, *_max;            // High water mark and max in current chunk
  19.137 @@ -418,8 +399,8 @@
  19.138   }
  19.139  
  19.140   public:
  19.141 -  Arena();
  19.142 -  Arena(size_t init_size);
  19.143 +  Arena(MEMFLAGS memflag);
  19.144 +  Arena(MEMFLAGS memflag, size_t init_size);
  19.145    ~Arena();
  19.146    void  destruct_contents();
  19.147    char* hwm() const             { return _hwm; }
  19.148 @@ -518,8 +499,6 @@
  19.149    static void free_malloced_objects(Chunk* chunk, char* hwm, char* max, char* hwm2)  PRODUCT_RETURN;
  19.150    static void free_all(char** start, char** end)                                     PRODUCT_RETURN;
  19.151  
  19.152 -  // how many arena instances
  19.153 -  NOT_PRODUCT(static volatile jint _instance_count;)
  19.154  private:
  19.155    // Reset this Arena to empty, access will trigger grow if necessary
  19.156    void   reset(void) {
  19.157 @@ -681,7 +660,7 @@
  19.158    NEW_C_HEAP_ARRAY3(type, (size), memflags, pc, AllocFailStrategy::RETURN_NULL)
  19.159  
  19.160  #define NEW_C_HEAP_ARRAY_RETURN_NULL(type, size, memflags)\
  19.161 -  NEW_C_HEAP_ARRAY3(type, (size), memflags, (address)0, AllocFailStrategy::RETURN_NULL)
  19.162 +  NEW_C_HEAP_ARRAY3(type, (size), memflags, CURRENT_PC, AllocFailStrategy::RETURN_NULL)
  19.163  
  19.164  #define REALLOC_C_HEAP_ARRAY(type, old, size, memflags)\
  19.165    (type*) (ReallocateHeap((char*)(old), (size) * sizeof(type), memflags))
    20.1 --- a/src/share/vm/memory/allocation.inline.hpp	Wed Aug 27 09:36:55 2014 +0200
    20.2 +++ b/src/share/vm/memory/allocation.inline.hpp	Wed Aug 27 08:19:12 2014 -0400
    20.3 @@ -1,5 +1,5 @@
    20.4  /*
    20.5 - * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
    20.6 + * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
    20.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    20.8   *
    20.9   * This code is free software; you can redistribute it and/or modify it
   20.10 @@ -27,6 +27,7 @@
   20.11  
   20.12  #include "runtime/atomic.inline.hpp"
   20.13  #include "runtime/os.hpp"
   20.14 +#include "services/memTracker.hpp"
   20.15  
   20.16  // Explicit C-heap memory management
   20.17  
   20.18 @@ -49,12 +50,10 @@
   20.19  #endif
   20.20  
   20.21  // allocate using malloc; will fail if no memory available
   20.22 -inline char* AllocateHeap(size_t size, MEMFLAGS flags, address pc = 0,
   20.23 +inline char* AllocateHeap(size_t size, MEMFLAGS flags,
   20.24 +    const NativeCallStack& stack,
   20.25      AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) {
   20.26 -  if (pc == 0) {
   20.27 -    pc = CURRENT_PC;
   20.28 -  }
   20.29 -  char* p = (char*) os::malloc(size, flags, pc);
   20.30 +  char* p = (char*) os::malloc(size, flags, stack);
   20.31    #ifdef ASSERT
   20.32    if (PrintMallocFree) trace_heap_malloc(size, "AllocateHeap", p);
   20.33    #endif
   20.34 @@ -63,10 +62,14 @@
   20.35    }
   20.36    return p;
   20.37  }
   20.38 +inline char* AllocateHeap(size_t size, MEMFLAGS flags,
   20.39 +    AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) {
   20.40 +  return AllocateHeap(size, flags, CURRENT_PC, alloc_failmode);
   20.41 +}
   20.42  
   20.43 -inline char* ReallocateHeap(char *old, size_t size, MEMFLAGS flags,
   20.44 +inline char* ReallocateHeap(char *old, size_t size, MEMFLAGS flag,
   20.45      AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) {
   20.46 -  char* p = (char*) os::realloc(old, size, flags, CURRENT_PC);
   20.47 +  char* p = (char*) os::realloc(old, size, flag, CURRENT_PC);
   20.48    #ifdef ASSERT
   20.49    if (PrintMallocFree) trace_heap_malloc(size, "ReallocateHeap", p);
   20.50    #endif
   20.51 @@ -85,8 +88,22 @@
   20.52  
   20.53  
   20.54  template <MEMFLAGS F> void* CHeapObj<F>::operator new(size_t size,
   20.55 -      address caller_pc) throw() {
   20.56 -    void* p = (void*)AllocateHeap(size, F, (caller_pc != 0 ? caller_pc : CALLER_PC));
   20.57 +      const NativeCallStack& stack) throw() {
   20.58 +  void* p = (void*)AllocateHeap(size, F, stack);
   20.59 +#ifdef ASSERT
   20.60 +  if (PrintMallocFree) trace_heap_malloc(size, "CHeapObj-new", p);
   20.61 +#endif
   20.62 +  return p;
   20.63 +}
   20.64 +
   20.65 +template <MEMFLAGS F> void* CHeapObj<F>::operator new(size_t size) throw() {
   20.66 +  return CHeapObj<F>::operator new(size, CALLER_PC);
   20.67 +}
   20.68 +
   20.69 +template <MEMFLAGS F> void* CHeapObj<F>::operator new (size_t size,
   20.70 +  const std::nothrow_t&  nothrow_constant, const NativeCallStack& stack) throw() {
   20.71 +  void* p = (void*)AllocateHeap(size, F, stack,
   20.72 +      AllocFailStrategy::RETURN_NULL);
   20.73  #ifdef ASSERT
   20.74      if (PrintMallocFree) trace_heap_malloc(size, "CHeapObj-new", p);
   20.75  #endif
   20.76 @@ -94,23 +111,28 @@
   20.77    }
   20.78  
   20.79  template <MEMFLAGS F> void* CHeapObj<F>::operator new (size_t size,
   20.80 -  const std::nothrow_t&  nothrow_constant, address caller_pc) throw() {
   20.81 -  void* p = (void*)AllocateHeap(size, F, (caller_pc != 0 ? caller_pc : CALLER_PC),
   20.82 -      AllocFailStrategy::RETURN_NULL);
   20.83 -#ifdef ASSERT
   20.84 -    if (PrintMallocFree) trace_heap_malloc(size, "CHeapObj-new", p);
   20.85 -#endif
   20.86 -    return p;
   20.87 +  const std::nothrow_t& nothrow_constant) throw() {
   20.88 +  return CHeapObj<F>::operator new(size, nothrow_constant, CALLER_PC);
   20.89  }
   20.90  
   20.91  template <MEMFLAGS F> void* CHeapObj<F>::operator new [](size_t size,
   20.92 -      address caller_pc) throw() {
   20.93 -    return CHeapObj<F>::operator new(size, caller_pc);
   20.94 +      const NativeCallStack& stack) throw() {
   20.95 +  return CHeapObj<F>::operator new(size, stack);
   20.96 +}
   20.97 +
   20.98 +template <MEMFLAGS F> void* CHeapObj<F>::operator new [](size_t size)
   20.99 +  throw() {
  20.100 +  return CHeapObj<F>::operator new(size, CALLER_PC);
  20.101  }
  20.102  
  20.103  template <MEMFLAGS F> void* CHeapObj<F>::operator new [](size_t size,
  20.104 -  const std::nothrow_t&  nothrow_constant, address caller_pc) throw() {
  20.105 -    return CHeapObj<F>::operator new(size, nothrow_constant, caller_pc);
  20.106 +  const std::nothrow_t&  nothrow_constant, const NativeCallStack& stack) throw() {
  20.107 +  return CHeapObj<F>::operator new(size, nothrow_constant, stack);
  20.108 +}
  20.109 +
  20.110 +template <MEMFLAGS F> void* CHeapObj<F>::operator new [](size_t size,
  20.111 +  const std::nothrow_t& nothrow_constant) throw() {
  20.112 +  return CHeapObj<F>::operator new(size, nothrow_constant, CALLER_PC);
  20.113  }
  20.114  
  20.115  template <MEMFLAGS F> void CHeapObj<F>::operator delete(void* p){
    21.1 --- a/src/share/vm/memory/cardTableRS.cpp	Wed Aug 27 09:36:55 2014 +0200
    21.2 +++ b/src/share/vm/memory/cardTableRS.cpp	Wed Aug 27 08:19:12 2014 -0400
    21.3 @@ -56,7 +56,7 @@
    21.4    _ct_bs->initialize();
    21.5    set_bs(_ct_bs);
    21.6    _last_cur_val_in_gen = NEW_C_HEAP_ARRAY3(jbyte, GenCollectedHeap::max_gens + 1,
    21.7 -                         mtGC, 0, AllocFailStrategy::RETURN_NULL);
    21.8 +                         mtGC, CURRENT_PC, AllocFailStrategy::RETURN_NULL);
    21.9    if (_last_cur_val_in_gen == NULL) {
   21.10      vm_exit_during_initialization("Could not create last_cur_val_in_gen array.");
   21.11    }
    22.1 --- a/src/share/vm/memory/collectorPolicy.cpp	Wed Aug 27 09:36:55 2014 +0200
    22.2 +++ b/src/share/vm/memory/collectorPolicy.cpp	Wed Aug 27 08:19:12 2014 -0400
    22.3 @@ -969,7 +969,8 @@
    22.4  }
    22.5  
    22.6  void MarkSweepPolicy::initialize_generations() {
    22.7 -  _generations = NEW_C_HEAP_ARRAY3(GenerationSpecPtr, number_of_generations(), mtGC, 0, AllocFailStrategy::RETURN_NULL);
    22.8 +  _generations = NEW_C_HEAP_ARRAY3(GenerationSpecPtr, number_of_generations(), mtGC, CURRENT_PC,
    22.9 +    AllocFailStrategy::RETURN_NULL);
   22.10    if (_generations == NULL) {
   22.11      vm_exit_during_initialization("Unable to allocate gen spec");
   22.12    }
    23.1 --- a/src/share/vm/memory/heapInspection.cpp	Wed Aug 27 09:36:55 2014 +0200
    23.2 +++ b/src/share/vm/memory/heapInspection.cpp	Wed Aug 27 08:19:12 2014 -0400
    23.3 @@ -135,7 +135,7 @@
    23.4    _ref = (HeapWord*) Universe::boolArrayKlassObj();
    23.5    _buckets =
    23.6      (KlassInfoBucket*)  AllocateHeap(sizeof(KlassInfoBucket) * _num_buckets,
    23.7 -                                            mtInternal, 0, AllocFailStrategy::RETURN_NULL);
    23.8 +       mtInternal, CURRENT_PC, AllocFailStrategy::RETURN_NULL);
    23.9    if (_buckets != NULL) {
   23.10      _size = _num_buckets;
   23.11      for (int index = 0; index < _size; index++) {
    24.1 --- a/src/share/vm/memory/memRegion.cpp	Wed Aug 27 09:36:55 2014 +0200
    24.2 +++ b/src/share/vm/memory/memRegion.cpp	Wed Aug 27 08:19:12 2014 -0400
    24.3 @@ -1,5 +1,5 @@
    24.4  /*
    24.5 - * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
    24.6 + * Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved.
    24.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    24.8   *
    24.9   * This code is free software; you can redistribute it and/or modify it
   24.10 @@ -103,11 +103,13 @@
   24.11  }
   24.12  
   24.13  void* MemRegion::operator new(size_t size) throw() {
   24.14 -  return (address)AllocateHeap(size, mtGC, 0, AllocFailStrategy::RETURN_NULL);
   24.15 +  return (address)AllocateHeap(size, mtGC, CURRENT_PC,
   24.16 +    AllocFailStrategy::RETURN_NULL);
   24.17  }
   24.18  
   24.19  void* MemRegion::operator new [](size_t size) throw() {
   24.20 -  return (address)AllocateHeap(size, mtGC, 0, AllocFailStrategy::RETURN_NULL);
   24.21 +  return (address)AllocateHeap(size, mtGC, CURRENT_PC,
   24.22 +    AllocFailStrategy::RETURN_NULL);
   24.23  }
   24.24  void  MemRegion::operator delete(void* p) {
   24.25    FreeHeap(p, mtGC);
    25.1 --- a/src/share/vm/memory/resourceArea.hpp	Wed Aug 27 09:36:55 2014 +0200
    25.2 +++ b/src/share/vm/memory/resourceArea.hpp	Wed Aug 27 08:19:12 2014 -0400
    25.3 @@ -1,5 +1,5 @@
    25.4  /*
    25.5 - * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
    25.6 + * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
    25.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    25.8   *
    25.9   * This code is free software; you can redistribute it and/or modify it
   25.10 @@ -49,11 +49,11 @@
   25.11    debug_only(static int _warned;)       // to suppress multiple warnings
   25.12  
   25.13  public:
   25.14 -  ResourceArea() {
   25.15 +  ResourceArea() : Arena(mtThread) {
   25.16      debug_only(_nesting = 0;)
   25.17    }
   25.18  
   25.19 -  ResourceArea(size_t init_size) : Arena(init_size) {
   25.20 +  ResourceArea(size_t init_size) : Arena(mtThread, init_size) {
   25.21      debug_only(_nesting = 0;);
   25.22    }
   25.23  
   25.24 @@ -64,7 +64,7 @@
   25.25      if (UseMallocOnly) {
   25.26        // use malloc, but save pointer in res. area for later freeing
   25.27        char** save = (char**)internal_malloc_4(sizeof(char*));
   25.28 -      return (*save = (char*)os::malloc(size, mtThread));
   25.29 +      return (*save = (char*)os::malloc(size, mtThread, CURRENT_PC));
   25.30      }
   25.31  #endif
   25.32      return (char*)Amalloc(size, alloc_failmode);
    26.1 --- a/src/share/vm/opto/compile.cpp	Wed Aug 27 09:36:55 2014 +0200
    26.2 +++ b/src/share/vm/opto/compile.cpp	Wed Aug 27 08:19:12 2014 -0400
    26.3 @@ -665,6 +665,10 @@
    26.4                    _printer(IdealGraphPrinter::printer()),
    26.5  #endif
    26.6                    _congraph(NULL),
    26.7 +                  _comp_arena(mtCompiler),
    26.8 +                  _node_arena(mtCompiler),
    26.9 +                  _old_arena(mtCompiler),
   26.10 +                  _Compile_types(mtCompiler),
   26.11                    _replay_inline_data(NULL),
   26.12                    _late_inlines(comp_arena(), 2, 0, NULL),
   26.13                    _string_late_inlines(comp_arena(), 2, 0, NULL),
   26.14 @@ -972,6 +976,10 @@
   26.15      _in_dump_cnt(0),
   26.16      _printer(NULL),
   26.17  #endif
   26.18 +    _comp_arena(mtCompiler),
   26.19 +    _node_arena(mtCompiler),
   26.20 +    _old_arena(mtCompiler),
   26.21 +    _Compile_types(mtCompiler),
   26.22      _dead_node_list(comp_arena()),
   26.23      _dead_node_count(0),
   26.24      _congraph(NULL),
    27.1 --- a/src/share/vm/opto/type.cpp	Wed Aug 27 09:36:55 2014 +0200
    27.2 +++ b/src/share/vm/opto/type.cpp	Wed Aug 27 08:19:12 2014 -0400
    27.3 @@ -265,7 +265,7 @@
    27.4    // locking.
    27.5  
    27.6    Arena* save = current->type_arena();
    27.7 -  Arena* shared_type_arena = new (mtCompiler)Arena();
    27.8 +  Arena* shared_type_arena = new (mtCompiler)Arena(mtCompiler);
    27.9  
   27.10    current->set_type_arena(shared_type_arena);
   27.11    _shared_type_dict =
    28.1 --- a/src/share/vm/precompiled/precompiled.hpp	Wed Aug 27 09:36:55 2014 +0200
    28.2 +++ b/src/share/vm/precompiled/precompiled.hpp	Wed Aug 27 08:19:12 2014 -0400
    28.3 @@ -1,5 +1,5 @@
    28.4  /*
    28.5 - * Copyright (c) 2010, 2013, Oracle and/or its affiliates. All rights reserved.
    28.6 + * Copyright (c) 2010, 2014, Oracle and/or its affiliates. All rights reserved.
    28.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    28.8   *
    28.9   * This code is free software; you can redistribute it and/or modify it
   28.10 @@ -220,10 +220,17 @@
   28.11  # include "runtime/vmThread.hpp"
   28.12  # include "runtime/vm_operations.hpp"
   28.13  # include "runtime/vm_version.hpp"
   28.14 +# include "services/allocationSite.hpp"
   28.15  # include "services/lowMemoryDetector.hpp"
   28.16 +# include "services/mallocTracker.hpp"
   28.17 +# include "services/memBaseline.hpp"
   28.18  # include "services/memoryPool.hpp"
   28.19  # include "services/memoryService.hpp"
   28.20  # include "services/memoryUsage.hpp"
   28.21 +# include "services/memReporter.hpp"
   28.22 +# include "services/memTracker.hpp"
   28.23 +# include "services/nmtCommon.hpp"
   28.24 +# include "services/virtualMemoryTracker.hpp"
   28.25  # include "utilities/accessFlags.hpp"
   28.26  # include "utilities/array.hpp"
   28.27  # include "utilities/bitMap.hpp"
   28.28 @@ -237,6 +244,7 @@
   28.29  # include "utilities/hashtable.hpp"
   28.30  # include "utilities/histogram.hpp"
   28.31  # include "utilities/macros.hpp"
   28.32 +# include "utilities/nativeCallStack.hpp"
   28.33  # include "utilities/numberSeq.hpp"
   28.34  # include "utilities/ostream.hpp"
   28.35  # include "utilities/preserveException.hpp"
    29.1 --- a/src/share/vm/prims/jni.cpp	Wed Aug 27 09:36:55 2014 +0200
    29.2 +++ b/src/share/vm/prims/jni.cpp	Wed Aug 27 08:19:12 2014 -0400
    29.3 @@ -73,6 +73,7 @@
    29.4  #include "runtime/signature.hpp"
    29.5  #include "runtime/thread.inline.hpp"
    29.6  #include "runtime/vm_operations.hpp"
    29.7 +#include "services/memTracker.hpp"
    29.8  #include "services/runtimeService.hpp"
    29.9  #include "trace/tracing.hpp"
   29.10  #include "utilities/defaultStream.hpp"
   29.11 @@ -3582,6 +3583,7 @@
   29.12      if (bad_address != NULL) {
   29.13        os::protect_memory(bad_address, size, os::MEM_PROT_READ,
   29.14                           /*is_committed*/false);
   29.15 +      MemTracker::record_virtual_memory_type((void*)bad_address, mtInternal);
   29.16      }
   29.17    }
   29.18    return bad_address;
   29.19 @@ -5077,6 +5079,7 @@
   29.20  void TestVirtualSpaceNode_test();
   29.21  void TestNewSize_test();
   29.22  void TestKlass_test();
   29.23 +void Test_linked_list();
   29.24  #if INCLUDE_ALL_GCS
   29.25  void TestOldFreeSpaceCalculation_test();
   29.26  void TestG1BiasedArray_test();
   29.27 @@ -5104,6 +5107,7 @@
   29.28      run_unit_test(test_loggc_filename());
   29.29      run_unit_test(TestNewSize_test());
   29.30      run_unit_test(TestKlass_test());
   29.31 +    run_unit_test(Test_linked_list());
   29.32  #if INCLUDE_VM_STRUCTS
   29.33      run_unit_test(VMStructs::test());
   29.34  #endif
    30.1 --- a/src/share/vm/prims/whitebox.cpp	Wed Aug 27 09:36:55 2014 +0200
    30.2 +++ b/src/share/vm/prims/whitebox.cpp	Wed Aug 27 08:19:12 2014 -0400
    30.3 @@ -49,8 +49,10 @@
    30.4  #include "gc_implementation/g1/heapRegionRemSet.hpp"
    30.5  #endif // INCLUDE_ALL_GCS
    30.6  
    30.7 -#ifdef INCLUDE_NMT
    30.8 +#if INCLUDE_NMT
    30.9 +#include "services/mallocSiteTable.hpp"
   30.10  #include "services/memTracker.hpp"
   30.11 +#include "utilities/nativeCallStack.hpp"
   30.12  #endif // INCLUDE_NMT
   30.13  
   30.14  #include "compiler/compileBroker.hpp"
   30.15 @@ -276,12 +278,16 @@
   30.16  // NMT picks it up correctly
   30.17  WB_ENTRY(jlong, WB_NMTMalloc(JNIEnv* env, jobject o, jlong size))
   30.18    jlong addr = 0;
   30.19 +    addr = (jlong)(uintptr_t)os::malloc(size, mtTest);
   30.20 +  return addr;
   30.21 +WB_END
   30.22  
   30.23 -  if (MemTracker::is_on() && !MemTracker::shutdown_in_progress()) {
   30.24 -    addr = (jlong)(uintptr_t)os::malloc(size, mtTest);
   30.25 -  }
   30.26 -
   30.27 -  return addr;
   30.28 +// Alloc memory with pseudo call stack. The test can create psudo malloc
   30.29 +// allocation site to stress the malloc tracking.
   30.30 +WB_ENTRY(jlong, WB_NMTMallocWithPseudoStack(JNIEnv* env, jobject o, jlong size, jint pseudo_stack))
   30.31 +  address pc = (address)(size_t)pseudo_stack;
   30.32 +  NativeCallStack stack(&pc, 1);
   30.33 +  return (jlong)os::malloc(size, mtTest, stack);
   30.34  WB_END
   30.35  
   30.36  // Free the memory allocated by NMTAllocTest
   30.37 @@ -292,10 +298,8 @@
   30.38  WB_ENTRY(jlong, WB_NMTReserveMemory(JNIEnv* env, jobject o, jlong size))
   30.39    jlong addr = 0;
   30.40  
   30.41 -  if (MemTracker::is_on() && !MemTracker::shutdown_in_progress()) {
   30.42      addr = (jlong)(uintptr_t)os::reserve_memory(size);
   30.43      MemTracker::record_virtual_memory_type((address)addr, mtTest);
   30.44 -  }
   30.45  
   30.46    return addr;
   30.47  WB_END
   30.48 @@ -314,20 +318,20 @@
   30.49    os::release_memory((char *)(uintptr_t)addr, size);
   30.50  WB_END
   30.51  
   30.52 -// Block until the current generation of NMT data to be merged, used to reliably test the NMT feature
   30.53 -WB_ENTRY(jboolean, WB_NMTWaitForDataMerge(JNIEnv* env))
   30.54 -
   30.55 -  if (!MemTracker::is_on() || MemTracker::shutdown_in_progress()) {
   30.56 -    return false;
   30.57 -  }
   30.58 -
   30.59 -  return MemTracker::wbtest_wait_for_data_merge();
   30.60 +WB_ENTRY(jboolean, WB_NMTIsDetailSupported(JNIEnv* env))
   30.61 +  return MemTracker::tracking_level() == NMT_detail;
   30.62  WB_END
   30.63  
   30.64 -WB_ENTRY(jboolean, WB_NMTIsDetailSupported(JNIEnv* env))
   30.65 -  return MemTracker::tracking_level() == MemTracker::NMT_detail;
   30.66 +WB_ENTRY(void, WB_NMTOverflowHashBucket(JNIEnv* env, jobject o, jlong num))
   30.67 +  address pc = (address)1;
   30.68 +  for (jlong index = 0; index < num; index ++) {
   30.69 +    NativeCallStack stack(&pc, 1);
   30.70 +    os::malloc(0, mtTest, stack);
   30.71 +    pc += MallocSiteTable::hash_buckets();
   30.72 +  }
   30.73  WB_END
   30.74  
   30.75 +
   30.76  #endif // INCLUDE_NMT
   30.77  
   30.78  static jmethodID reflected_method_to_jmid(JavaThread* thread, JNIEnv* env, jobject method) {
   30.79 @@ -861,12 +865,13 @@
   30.80  #endif // INCLUDE_ALL_GCS
   30.81  #if INCLUDE_NMT
   30.82    {CC"NMTMalloc",           CC"(J)J",                 (void*)&WB_NMTMalloc          },
   30.83 +  {CC"NMTMallocWithPseudoStack", CC"(JI)J",           (void*)&WB_NMTMallocWithPseudoStack},
   30.84    {CC"NMTFree",             CC"(J)V",                 (void*)&WB_NMTFree            },
   30.85    {CC"NMTReserveMemory",    CC"(J)J",                 (void*)&WB_NMTReserveMemory   },
   30.86    {CC"NMTCommitMemory",     CC"(JJ)V",                (void*)&WB_NMTCommitMemory    },
   30.87    {CC"NMTUncommitMemory",   CC"(JJ)V",                (void*)&WB_NMTUncommitMemory  },
   30.88    {CC"NMTReleaseMemory",    CC"(JJ)V",                (void*)&WB_NMTReleaseMemory   },
   30.89 -  {CC"NMTWaitForDataMerge", CC"()Z",                  (void*)&WB_NMTWaitForDataMerge},
   30.90 +  {CC"NMTOverflowHashBucket", CC"(J)V",               (void*)&WB_NMTOverflowHashBucket},
   30.91    {CC"NMTIsDetailSupported",CC"()Z",                  (void*)&WB_NMTIsDetailSupported},
   30.92  #endif // INCLUDE_NMT
   30.93    {CC"deoptimizeAll",      CC"()V",                   (void*)&WB_DeoptimizeAll     },
    31.1 --- a/src/share/vm/runtime/arguments.cpp	Wed Aug 27 09:36:55 2014 +0200
    31.2 +++ b/src/share/vm/runtime/arguments.cpp	Wed Aug 27 08:19:12 2014 -0400
    31.3 @@ -294,6 +294,7 @@
    31.4    { "UseMPSS",                       JDK_Version::jdk(8), JDK_Version::jdk(9) },
    31.5    { "UseStringCache",                JDK_Version::jdk(8), JDK_Version::jdk(9) },
    31.6    { "UseOldInlining",                JDK_Version::jdk(9), JDK_Version::jdk(10) },
    31.7 +  { "AutoShutdownNMT",               JDK_Version::jdk(9), JDK_Version::jdk(10) },
    31.8  #ifdef PRODUCT
    31.9    { "DesiredMethodLimit",
   31.10                             JDK_Version::jdk_update(7, 2), JDK_Version::jdk(8) },
   31.11 @@ -2343,7 +2344,7 @@
   31.12  
   31.13    if (PrintNMTStatistics) {
   31.14  #if INCLUDE_NMT
   31.15 -    if (MemTracker::tracking_level() == MemTracker::NMT_off) {
   31.16 +    if (MemTracker::tracking_level() == NMT_off) {
   31.17  #endif // INCLUDE_NMT
   31.18        warning("PrintNMTStatistics is disabled, because native memory tracking is not enabled");
   31.19        PrintNMTStatistics = false;
   31.20 @@ -3533,15 +3534,24 @@
   31.21        CommandLineFlags::printFlags(tty, false);
   31.22        vm_exit(0);
   31.23      }
   31.24 +#if INCLUDE_NMT
   31.25      if (match_option(option, "-XX:NativeMemoryTracking", &tail)) {
   31.26 -#if INCLUDE_NMT
   31.27 -      MemTracker::init_tracking_options(tail);
   31.28 -#else
   31.29 -      jio_fprintf(defaultStream::error_stream(),
   31.30 -        "Native Memory Tracking is not supported in this VM\n");
   31.31 -      return JNI_ERR;
   31.32 +      // The launcher did not setup nmt environment variable properly.
   31.33 +//      if (!MemTracker::check_launcher_nmt_support(tail)) {
   31.34 +//        warning("Native Memory Tracking did not setup properly, using wrong launcher?");
   31.35 +//      }
   31.36 +
   31.37 +      // Verify if nmt option is valid.
   31.38 +      if (MemTracker::verify_nmt_option()) {
   31.39 +        // Late initialization, still in single-threaded mode.
   31.40 +        if (MemTracker::tracking_level() >= NMT_summary) {
   31.41 +          MemTracker::init();
   31.42 +        }
   31.43 +      } else {
   31.44 +        vm_exit_during_initialization("Syntax error, expecting -XX:NativeMemoryTracking=[off|summary|detail]", NULL);
   31.45 +      }
   31.46 +    }
   31.47  #endif
   31.48 -    }
   31.49  
   31.50  
   31.51  #ifndef PRODUCT
    32.1 --- a/src/share/vm/runtime/globals.hpp	Wed Aug 27 09:36:55 2014 +0200
    32.2 +++ b/src/share/vm/runtime/globals.hpp	Wed Aug 27 08:19:12 2014 -0400
    32.3 @@ -943,11 +943,6 @@
    32.4    diagnostic(bool, PrintNMTStatistics, false,                               \
    32.5            "Print native memory tracking summary data if it is on")          \
    32.6                                                                              \
    32.7 -  diagnostic(bool, AutoShutdownNMT, true,                                   \
    32.8 -          "Automatically shutdown native memory tracking under stress "     \
    32.9 -          "situations. When set to false, native memory tracking tries to " \
   32.10 -          "stay alive at the expense of JVM performance")                   \
   32.11 -                                                                            \
   32.12    diagnostic(bool, LogCompilation, false,                                   \
   32.13            "Log compilation activity in detail to LogFile")                  \
   32.14                                                                              \
    33.1 --- a/src/share/vm/runtime/handles.hpp	Wed Aug 27 09:36:55 2014 +0200
    33.2 +++ b/src/share/vm/runtime/handles.hpp	Wed Aug 27 08:19:12 2014 -0400
    33.3 @@ -1,5 +1,5 @@
    33.4  /*
    33.5 - * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
    33.6 + * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
    33.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    33.8   *
    33.9   * This code is free software; you can redistribute it and/or modify it
   33.10 @@ -227,7 +227,7 @@
   33.11    HandleArea* _prev;          // link to outer (older) area
   33.12   public:
   33.13    // Constructor
   33.14 -  HandleArea(HandleArea* prev) : Arena(Chunk::tiny_size) {
   33.15 +  HandleArea(HandleArea* prev) : Arena(mtThread, Chunk::tiny_size) {
   33.16      debug_only(_handle_mark_nesting    = 0);
   33.17      debug_only(_no_handle_mark_nesting = 0);
   33.18      _prev = prev;
    34.1 --- a/src/share/vm/runtime/init.cpp	Wed Aug 27 09:36:55 2014 +0200
    34.2 +++ b/src/share/vm/runtime/init.cpp	Wed Aug 27 08:19:12 2014 -0400
    34.3 @@ -1,5 +1,5 @@
    34.4  /*
    34.5 - * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
    34.6 + * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
    34.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    34.8   *
    34.9   * This code is free software; you can redistribute it and/or modify it
   34.10 @@ -34,8 +34,10 @@
   34.11  #include "runtime/init.hpp"
   34.12  #include "runtime/safepoint.hpp"
   34.13  #include "runtime/sharedRuntime.hpp"
   34.14 +#include "services/memTracker.hpp"
   34.15  #include "utilities/macros.hpp"
   34.16  
   34.17 +
   34.18  // Initialization done by VM thread in vm_init_globals()
   34.19  void check_ThreadShadow();
   34.20  void eventlog_init();
   34.21 @@ -131,6 +133,12 @@
   34.22    javaClasses_init();   // must happen after vtable initialization
   34.23    stubRoutines_init2(); // note: StubRoutines need 2-phase init
   34.24  
   34.25 +#if INCLUDE_NMT
   34.26 +  // Solaris stack is walkable only after stubRoutines are set up.
   34.27 +  // On Other platforms, the stack is always walkable.
   34.28 +  NMT_stack_walkable = true;
   34.29 +#endif // INCLUDE_NMT
   34.30 +
   34.31    // All the flags that get adjusted by VM_Version_init and os::init_2
   34.32    // have been set so dump the flags now.
   34.33    if (PrintFlagsFinal) {
    35.1 --- a/src/share/vm/runtime/java.cpp	Wed Aug 27 09:36:55 2014 +0200
    35.2 +++ b/src/share/vm/runtime/java.cpp	Wed Aug 27 08:19:12 2014 -0400
    35.3 @@ -57,7 +57,6 @@
    35.4  #include "runtime/thread.inline.hpp"
    35.5  #include "runtime/timer.hpp"
    35.6  #include "runtime/vm_operations.hpp"
    35.7 -#include "services/memReporter.hpp"
    35.8  #include "services/memTracker.hpp"
    35.9  #include "trace/tracing.hpp"
   35.10  #include "utilities/dtrace.hpp"
   35.11 @@ -364,12 +363,7 @@
   35.12  #endif // ENABLE_ZAP_DEAD_LOCALS
   35.13    // Native memory tracking data
   35.14    if (PrintNMTStatistics) {
   35.15 -    if (MemTracker::is_on()) {
   35.16 -      BaselineTTYOutputer outputer(tty);
   35.17 -      MemTracker::print_memory_usage(outputer, K, false);
   35.18 -    } else {
   35.19 -      tty->print_cr("%s", MemTracker::reason());
   35.20 -    }
   35.21 +    MemTracker::final_report(tty);
   35.22    }
   35.23  }
   35.24  
   35.25 @@ -401,12 +395,7 @@
   35.26  
   35.27    // Native memory tracking data
   35.28    if (PrintNMTStatistics) {
   35.29 -    if (MemTracker::is_on()) {
   35.30 -      BaselineTTYOutputer outputer(tty);
   35.31 -      MemTracker::print_memory_usage(outputer, K, false);
   35.32 -    } else {
   35.33 -      tty->print_cr("%s", MemTracker::reason());
   35.34 -    }
   35.35 +    MemTracker::final_report(tty);
   35.36    }
   35.37  }
   35.38  
   35.39 @@ -555,10 +544,6 @@
   35.40      BeforeExit_lock->notify_all();
   35.41    }
   35.42  
   35.43 -  // Shutdown NMT before exit. Otherwise,
   35.44 -  // it will run into trouble when system destroys static variables.
   35.45 -  MemTracker::shutdown(MemTracker::NMT_normal);
   35.46 -
   35.47    if (VerifyStringTableAtExit) {
   35.48      int fail_cnt = 0;
   35.49      {
    36.1 --- a/src/share/vm/runtime/os.cpp	Wed Aug 27 09:36:55 2014 +0200
    36.2 +++ b/src/share/vm/runtime/os.cpp	Wed Aug 27 08:19:12 2014 -0400
    36.3 @@ -49,6 +49,7 @@
    36.4  #include "runtime/stubRoutines.hpp"
    36.5  #include "runtime/thread.inline.hpp"
    36.6  #include "services/attachListener.hpp"
    36.7 +#include "services/nmtCommon.hpp"
    36.8  #include "services/memTracker.hpp"
    36.9  #include "services/threadService.hpp"
   36.10  #include "utilities/defaultStream.hpp"
   36.11 @@ -561,7 +562,11 @@
   36.12    return ptr;
   36.13  }
   36.14  
   36.15 -void* os::malloc(size_t size, MEMFLAGS memflags, address caller) {
   36.16 +void* os::malloc(size_t size, MEMFLAGS flags) {
   36.17 +  return os::malloc(size, flags, CALLER_PC);
   36.18 +}
   36.19 +
   36.20 +void* os::malloc(size_t size, MEMFLAGS memflags, const NativeCallStack& stack) {
   36.21    NOT_PRODUCT(inc_stat_counter(&num_mallocs, 1));
   36.22    NOT_PRODUCT(inc_stat_counter(&alloc_bytes, size));
   36.23  
   36.24 @@ -587,11 +592,15 @@
   36.25      size = 1;
   36.26    }
   36.27  
   36.28 +  // NMT support
   36.29 +  NMT_TrackingLevel level = MemTracker::tracking_level();
   36.30 +  size_t            nmt_header_size = MemTracker::malloc_header_size(level);
   36.31 +
   36.32  #ifndef ASSERT
   36.33 -  const size_t alloc_size = size;
   36.34 +  const size_t alloc_size = size + nmt_header_size;
   36.35  #else
   36.36 -  const size_t alloc_size = GuardedMemory::get_total_size(size);
   36.37 -  if (size > alloc_size) { // Check for rollover.
   36.38 +  const size_t alloc_size = GuardedMemory::get_total_size(size + nmt_header_size);
   36.39 +  if (size + nmt_header_size > alloc_size) { // Check for rollover.
   36.40      return NULL;
   36.41    }
   36.42  #endif
   36.43 @@ -610,7 +619,7 @@
   36.44      return NULL;
   36.45    }
   36.46    // Wrap memory with guard
   36.47 -  GuardedMemory guarded(ptr, size);
   36.48 +  GuardedMemory guarded(ptr, size + nmt_header_size);
   36.49    ptr = guarded.get_user_ptr();
   36.50  #endif
   36.51    if ((intptr_t)ptr == (intptr_t)MallocCatchPtr) {
   36.52 @@ -623,48 +632,50 @@
   36.53    }
   36.54  
   36.55    // we do not track guard memory
   36.56 -  MemTracker::record_malloc((address)ptr, size, memflags, caller == 0 ? CALLER_PC : caller);
   36.57 -
   36.58 -  return ptr;
   36.59 +  return MemTracker::record_malloc((address)ptr, size, memflags, stack, level);
   36.60  }
   36.61  
   36.62 +void* os::realloc(void *memblock, size_t size, MEMFLAGS flags) {
   36.63 +  return os::realloc(memblock, size, flags, CALLER_PC);
   36.64 +}
   36.65  
   36.66 -void* os::realloc(void *memblock, size_t size, MEMFLAGS memflags, address caller) {
   36.67 +void* os::realloc(void *memblock, size_t size, MEMFLAGS memflags, const NativeCallStack& stack) {
   36.68  #ifndef ASSERT
   36.69    NOT_PRODUCT(inc_stat_counter(&num_mallocs, 1));
   36.70    NOT_PRODUCT(inc_stat_counter(&alloc_bytes, size));
   36.71 -  MemTracker::Tracker tkr = MemTracker::get_realloc_tracker();
   36.72 -  void* ptr = ::realloc(memblock, size);
   36.73 -  if (ptr != NULL) {
   36.74 -    tkr.record((address)memblock, (address)ptr, size, memflags,
   36.75 -     caller == 0 ? CALLER_PC : caller);
   36.76 -  } else {
   36.77 -    tkr.discard();
   36.78 -  }
   36.79 -  return ptr;
   36.80 +   // NMT support
   36.81 +  void* membase = MemTracker::record_free(memblock);
   36.82 +  NMT_TrackingLevel level = MemTracker::tracking_level();
   36.83 +  size_t  nmt_header_size = MemTracker::malloc_header_size(level);
   36.84 +  void* ptr = ::realloc(membase, size + nmt_header_size);
   36.85 +  return MemTracker::record_malloc(ptr, size, memflags, stack, level);
   36.86  #else
   36.87    if (memblock == NULL) {
   36.88 -    return os::malloc(size, memflags, (caller == 0 ? CALLER_PC : caller));
   36.89 +    return os::malloc(size, memflags, stack);
   36.90    }
   36.91    if ((intptr_t)memblock == (intptr_t)MallocCatchPtr) {
   36.92      tty->print_cr("os::realloc caught " PTR_FORMAT, memblock);
   36.93      breakpoint();
   36.94    }
   36.95 -  verify_memory(memblock);
   36.96 +  // NMT support
   36.97 +  void* membase = MemTracker::malloc_base(memblock);
   36.98 +  verify_memory(membase);
   36.99    NOT_PRODUCT(if (MallocVerifyInterval > 0) check_heap());
  36.100    if (size == 0) {
  36.101      return NULL;
  36.102    }
  36.103    // always move the block
  36.104 -  void* ptr = os::malloc(size, memflags, caller == 0 ? CALLER_PC : caller);
  36.105 +  void* ptr = os::malloc(size, memflags, stack);
  36.106    if (PrintMalloc) {
  36.107      tty->print_cr("os::remalloc " SIZE_FORMAT " bytes, " PTR_FORMAT " --> " PTR_FORMAT, size, memblock, ptr);
  36.108    }
  36.109    // Copy to new memory if malloc didn't fail
  36.110    if ( ptr != NULL ) {
  36.111 -    GuardedMemory guarded(memblock);
  36.112 -    memcpy(ptr, memblock, MIN2(size, guarded.get_user_size()));
  36.113 -    if (paranoid) verify_memory(ptr);
  36.114 +    GuardedMemory guarded(MemTracker::malloc_base(memblock));
  36.115 +    // Guard's user data contains NMT header
  36.116 +    size_t memblock_size = guarded.get_user_size() - MemTracker::malloc_header_size(memblock);
  36.117 +    memcpy(ptr, memblock, MIN2(size, memblock_size));
  36.118 +    if (paranoid) verify_memory(MemTracker::malloc_base(ptr));
  36.119      if ((intptr_t)ptr == (intptr_t)MallocCatchPtr) {
  36.120        tty->print_cr("os::realloc caught, " SIZE_FORMAT " bytes --> " PTR_FORMAT, size, ptr);
  36.121        breakpoint();
  36.122 @@ -677,7 +688,6 @@
  36.123  
  36.124  
  36.125  void  os::free(void *memblock, MEMFLAGS memflags) {
  36.126 -  address trackp = (address) memblock;
  36.127    NOT_PRODUCT(inc_stat_counter(&num_frees, 1));
  36.128  #ifdef ASSERT
  36.129    if (memblock == NULL) return;
  36.130 @@ -685,20 +695,22 @@
  36.131      if (tty != NULL) tty->print_cr("os::free caught " PTR_FORMAT, memblock);
  36.132      breakpoint();
  36.133    }
  36.134 -  verify_memory(memblock);
  36.135 +  void* membase = MemTracker::record_free(memblock);
  36.136 +  verify_memory(membase);
  36.137    NOT_PRODUCT(if (MallocVerifyInterval > 0) check_heap());
  36.138  
  36.139 -  GuardedMemory guarded(memblock);
  36.140 +  GuardedMemory guarded(membase);
  36.141    size_t size = guarded.get_user_size();
  36.142    inc_stat_counter(&free_bytes, size);
  36.143 -  memblock = guarded.release_for_freeing();
  36.144 +  membase = guarded.release_for_freeing();
  36.145    if (PrintMalloc && tty != NULL) {
  36.146 -      fprintf(stderr, "os::free " SIZE_FORMAT " bytes --> " PTR_FORMAT "\n", size, (uintptr_t)memblock);
  36.147 +      fprintf(stderr, "os::free " SIZE_FORMAT " bytes --> " PTR_FORMAT "\n", size, (uintptr_t)membase);
  36.148    }
  36.149 +  ::free(membase);
  36.150 +#else
  36.151 +  void* membase = MemTracker::record_free(memblock);
  36.152 +  ::free(membase);
  36.153  #endif
  36.154 -  MemTracker::record_free(trackp, memflags);
  36.155 -
  36.156 -  ::free(memblock);
  36.157  }
  36.158  
  36.159  void os::init_random(long initval) {
  36.160 @@ -1412,7 +1424,7 @@
  36.161  char* os::reserve_memory(size_t bytes, char* addr, size_t alignment_hint) {
  36.162    char* result = pd_reserve_memory(bytes, addr, alignment_hint);
  36.163    if (result != NULL) {
  36.164 -    MemTracker::record_virtual_memory_reserve((address)result, bytes, mtNone, CALLER_PC);
  36.165 +    MemTracker::record_virtual_memory_reserve((address)result, bytes, CALLER_PC);
  36.166    }
  36.167  
  36.168    return result;
  36.169 @@ -1422,7 +1434,7 @@
  36.170     MEMFLAGS flags) {
  36.171    char* result = pd_reserve_memory(bytes, addr, alignment_hint);
  36.172    if (result != NULL) {
  36.173 -    MemTracker::record_virtual_memory_reserve((address)result, bytes, mtNone, CALLER_PC);
  36.174 +    MemTracker::record_virtual_memory_reserve((address)result, bytes, CALLER_PC);
  36.175      MemTracker::record_virtual_memory_type((address)result, flags);
  36.176    }
  36.177  
  36.178 @@ -1432,7 +1444,7 @@
  36.179  char* os::attempt_reserve_memory_at(size_t bytes, char* addr) {
  36.180    char* result = pd_attempt_reserve_memory_at(bytes, addr);
  36.181    if (result != NULL) {
  36.182 -    MemTracker::record_virtual_memory_reserve((address)result, bytes, mtNone, CALLER_PC);
  36.183 +    MemTracker::record_virtual_memory_reserve((address)result, bytes, CALLER_PC);
  36.184    }
  36.185    return result;
  36.186  }
  36.187 @@ -1472,23 +1484,29 @@
  36.188  }
  36.189  
  36.190  bool os::uncommit_memory(char* addr, size_t bytes) {
  36.191 -  MemTracker::Tracker tkr = MemTracker::get_virtual_memory_uncommit_tracker();
  36.192 -  bool res = pd_uncommit_memory(addr, bytes);
  36.193 -  if (res) {
  36.194 -    tkr.record((address)addr, bytes);
  36.195 +  bool res;
  36.196 +  if (MemTracker::tracking_level() > NMT_minimal) {
  36.197 +    Tracker tkr = MemTracker::get_virtual_memory_uncommit_tracker();
  36.198 +    res = pd_uncommit_memory(addr, bytes);
  36.199 +    if (res) {
  36.200 +      tkr.record((address)addr, bytes);
  36.201 +    }
  36.202    } else {
  36.203 -    tkr.discard();
  36.204 +    res = pd_uncommit_memory(addr, bytes);
  36.205    }
  36.206    return res;
  36.207  }
  36.208  
  36.209  bool os::release_memory(char* addr, size_t bytes) {
  36.210 -  MemTracker::Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
  36.211 -  bool res = pd_release_memory(addr, bytes);
  36.212 -  if (res) {
  36.213 -    tkr.record((address)addr, bytes);
  36.214 +  bool res;
  36.215 +  if (MemTracker::tracking_level() > NMT_minimal) {
  36.216 +    Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
  36.217 +    res = pd_release_memory(addr, bytes);
  36.218 +    if (res) {
  36.219 +      tkr.record((address)addr, bytes);
  36.220 +    }
  36.221    } else {
  36.222 -    tkr.discard();
  36.223 +    res = pd_release_memory(addr, bytes);
  36.224    }
  36.225    return res;
  36.226  }
  36.227 @@ -1499,7 +1517,7 @@
  36.228                             bool allow_exec) {
  36.229    char* result = pd_map_memory(fd, file_name, file_offset, addr, bytes, read_only, allow_exec);
  36.230    if (result != NULL) {
  36.231 -    MemTracker::record_virtual_memory_reserve_and_commit((address)result, bytes, mtNone, CALLER_PC);
  36.232 +    MemTracker::record_virtual_memory_reserve_and_commit((address)result, bytes, CALLER_PC);
  36.233    }
  36.234    return result;
  36.235  }
  36.236 @@ -1512,12 +1530,15 @@
  36.237  }
  36.238  
  36.239  bool os::unmap_memory(char *addr, size_t bytes) {
  36.240 -  MemTracker::Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
  36.241 -  bool result = pd_unmap_memory(addr, bytes);
  36.242 -  if (result) {
  36.243 -    tkr.record((address)addr, bytes);
  36.244 +  bool result;
  36.245 +  if (MemTracker::tracking_level() > NMT_minimal) {
  36.246 +    Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
  36.247 +    result = pd_unmap_memory(addr, bytes);
  36.248 +    if (result) {
  36.249 +      tkr.record((address)addr, bytes);
  36.250 +    }
  36.251    } else {
  36.252 -    tkr.discard();
  36.253 +    result = pd_unmap_memory(addr, bytes);
  36.254    }
  36.255    return result;
  36.256  }
    37.1 --- a/src/share/vm/runtime/os.hpp	Wed Aug 27 09:36:55 2014 +0200
    37.2 +++ b/src/share/vm/runtime/os.hpp	Wed Aug 27 08:19:12 2014 -0400
    37.3 @@ -66,6 +66,8 @@
    37.4  class Event;
    37.5  class DLL;
    37.6  class FileHandle;
    37.7 +class NativeCallStack;
    37.8 +
    37.9  template<class E> class GrowableArray;
   37.10  
   37.11  // %%%%% Moved ThreadState, START_FN, OSThread to new osThread.hpp. -- Rose
   37.12 @@ -97,9 +99,11 @@
   37.13  // Typedef for structured exception handling support
   37.14  typedef void (*java_call_t)(JavaValue* value, methodHandle* method, JavaCallArguments* args, Thread* thread);
   37.15  
   37.16 +class MallocTracker;
   37.17 +
   37.18  class os: AllStatic {
   37.19    friend class VMStructs;
   37.20 -
   37.21 +  friend class MallocTracker;
   37.22   public:
   37.23    enum { page_sizes_max = 9 }; // Size of _page_sizes array (8 plus a sentinel)
   37.24  
   37.25 @@ -161,7 +165,10 @@
   37.26    // Override me as needed
   37.27    static int    file_name_strcmp(const char* s1, const char* s2);
   37.28  
   37.29 +  // get/unset environment variable
   37.30    static bool getenv(const char* name, char* buffer, int len);
   37.31 +  static bool unsetenv(const char* name);
   37.32 +
   37.33    static bool have_special_privileges();
   37.34  
   37.35    static jlong  javaTimeMillis();
   37.36 @@ -207,8 +214,13 @@
   37.37  
   37.38    // Interface for detecting multiprocessor system
   37.39    static inline bool is_MP() {
   37.40 +#if !INCLUDE_NMT
   37.41      assert(_processor_count > 0, "invalid processor count");
   37.42      return _processor_count > 1 || AssumeMP;
   37.43 +#else
   37.44 +    // NMT needs atomic operations before this initialization.
   37.45 +    return true;
   37.46 +#endif
   37.47    }
   37.48    static julong available_memory();
   37.49    static julong physical_memory();
   37.50 @@ -651,12 +663,20 @@
   37.51    static void* thread_local_storage_at(int index);
   37.52    static void  free_thread_local_storage(int index);
   37.53  
   37.54 -  // Stack walk
   37.55 -  static address get_caller_pc(int n = 0);
   37.56 +  // Retrieve native stack frames.
   37.57 +  // Parameter:
   37.58 +  //   stack:  an array to storage stack pointers.
   37.59 +  //   frames: size of above array.
   37.60 +  //   toSkip: number of stack frames to skip at the beginning.
   37.61 +  // Return: number of stack frames captured.
   37.62 +  static int get_native_stack(address* stack, int size, int toSkip = 0);
   37.63  
   37.64    // General allocation (must be MT-safe)
   37.65 -  static void* malloc  (size_t size, MEMFLAGS flags, address caller_pc = 0);
   37.66 -  static void* realloc (void *memblock, size_t size, MEMFLAGS flags, address caller_pc = 0);
   37.67 +  static void* malloc  (size_t size, MEMFLAGS flags, const NativeCallStack& stack);
   37.68 +  static void* malloc  (size_t size, MEMFLAGS flags);
   37.69 +  static void* realloc (void *memblock, size_t size, MEMFLAGS flag, const NativeCallStack& stack);
   37.70 +  static void* realloc (void *memblock, size_t size, MEMFLAGS flag);
   37.71 +
   37.72    static void  free    (void *memblock, MEMFLAGS flags = mtNone);
   37.73    static bool  check_heap(bool force = false);      // verify C heap integrity
   37.74    static char* strdup(const char *, MEMFLAGS flags = mtInternal);  // Like strdup
    38.1 --- a/src/share/vm/runtime/safepoint.cpp	Wed Aug 27 09:36:55 2014 +0200
    38.2 +++ b/src/share/vm/runtime/safepoint.cpp	Wed Aug 27 08:19:12 2014 -0400
    38.3 @@ -50,7 +50,6 @@
    38.4  #include "runtime/sweeper.hpp"
    38.5  #include "runtime/synchronizer.hpp"
    38.6  #include "runtime/thread.inline.hpp"
    38.7 -#include "services/memTracker.hpp"
    38.8  #include "services/runtimeService.hpp"
    38.9  #include "utilities/events.hpp"
   38.10  #include "utilities/macros.hpp"
   38.11 @@ -547,10 +546,6 @@
   38.12      TraceTime t7("purging class loader data graph", TraceSafepointCleanupTime);
   38.13      ClassLoaderDataGraph::purge_if_needed();
   38.14    }
   38.15 -
   38.16 -  if (MemTracker::is_on()) {
   38.17 -    MemTracker::sync();
   38.18 -  }
   38.19  }
   38.20  
   38.21  
    39.1 --- a/src/share/vm/runtime/thread.cpp	Wed Aug 27 09:36:55 2014 +0200
    39.2 +++ b/src/share/vm/runtime/thread.cpp	Wed Aug 27 08:19:12 2014 -0400
    39.3 @@ -331,8 +331,7 @@
    39.4  #if INCLUDE_NMT
    39.5    // record thread's native stack, stack grows downward
    39.6    address stack_low_addr = stack_base() - stack_size();
    39.7 -  MemTracker::record_thread_stack(stack_low_addr, stack_size(), this,
    39.8 -      CURRENT_PC);
    39.9 +  MemTracker::record_thread_stack(stack_low_addr, stack_size());
   39.10  #endif // INCLUDE_NMT
   39.11  }
   39.12  
   39.13 @@ -350,7 +349,7 @@
   39.14  #if INCLUDE_NMT
   39.15    if (_stack_base != NULL) {
   39.16      address low_stack_addr = stack_base() - stack_size();
   39.17 -    MemTracker::release_thread_stack(low_stack_addr, stack_size(), this);
   39.18 +    MemTracker::release_thread_stack(low_stack_addr, stack_size());
   39.19  #ifdef ASSERT
   39.20      set_stack_base(NULL);
   39.21  #endif
   39.22 @@ -1442,9 +1441,6 @@
   39.23    set_monitor_chunks(NULL);
   39.24    set_next(NULL);
   39.25    set_thread_state(_thread_new);
   39.26 -#if INCLUDE_NMT
   39.27 -  set_recorder(NULL);
   39.28 -#endif
   39.29    _terminated = _not_terminated;
   39.30    _privileged_stack_top = NULL;
   39.31    _array_for_gc = NULL;
   39.32 @@ -1519,7 +1515,6 @@
   39.33      _jni_attach_state = _not_attaching_via_jni;
   39.34    }
   39.35    assert(deferred_card_mark().is_empty(), "Default MemRegion ctor");
   39.36 -  _safepoint_visible = false;
   39.37  }
   39.38  
   39.39  bool JavaThread::reguard_stack(address cur_sp) {
   39.40 @@ -1582,7 +1577,6 @@
   39.41    thr_type = entry_point == &compiler_thread_entry ? os::compiler_thread :
   39.42                                                       os::java_thread;
   39.43    os::create_thread(this, thr_type, stack_sz);
   39.44 -  _safepoint_visible = false;
   39.45    // The _osthread may be NULL here because we ran out of memory (too many threads active).
   39.46    // We need to throw and OutOfMemoryError - however we cannot do this here because the caller
   39.47    // may hold a lock and all locks must be unlocked before throwing the exception (throwing
   39.48 @@ -1600,13 +1594,6 @@
   39.49        tty->print_cr("terminate thread %p", this);
   39.50    }
   39.51  
   39.52 -  // By now, this thread should already be invisible to safepoint,
   39.53 -  // and its per-thread recorder also collected.
   39.54 -  assert(!is_safepoint_visible(), "wrong state");
   39.55 -#if INCLUDE_NMT
   39.56 -  assert(get_recorder() == NULL, "Already collected");
   39.57 -#endif // INCLUDE_NMT
   39.58 -
   39.59    // JSR166 -- return the parker to the free list
   39.60    Parker::Release(_parker);
   39.61    _parker = NULL ;
   39.62 @@ -3370,11 +3357,6 @@
   39.63    // intialize TLS
   39.64    ThreadLocalStorage::init();
   39.65  
   39.66 -  // Bootstrap native memory tracking, so it can start recording memory
   39.67 -  // activities before worker thread is started. This is the first phase
   39.68 -  // of bootstrapping, VM is currently running in single-thread mode.
   39.69 -  MemTracker::bootstrap_single_thread();
   39.70 -
   39.71    // Initialize output stream logging
   39.72    ostream_init_log();
   39.73  
   39.74 @@ -3425,9 +3407,6 @@
   39.75    // Initialize Java-Level synchronization subsystem
   39.76    ObjectMonitor::Initialize() ;
   39.77  
   39.78 -  // Second phase of bootstrapping, VM is about entering multi-thread mode
   39.79 -  MemTracker::bootstrap_multi_thread();
   39.80 -
   39.81    // Initialize global modules
   39.82    jint status = init_globals();
   39.83    if (status != JNI_OK) {
   39.84 @@ -3449,9 +3428,6 @@
   39.85    // real raw monitor. VM is setup enough here for raw monitor enter.
   39.86    JvmtiExport::transition_pending_onload_raw_monitors();
   39.87  
   39.88 -  // Fully start NMT
   39.89 -  MemTracker::start();
   39.90 -
   39.91    // Create the VMThread
   39.92    { TraceTime timer("Start VMThread", TraceStartupTime);
   39.93      VMThread::create();
   39.94 @@ -4089,8 +4065,6 @@
   39.95      daemon = false;
   39.96    }
   39.97  
   39.98 -  p->set_safepoint_visible(true);
   39.99 -
  39.100    ThreadService::add_thread(p, daemon);
  39.101  
  39.102    // Possible GC point.
  39.103 @@ -4136,13 +4110,6 @@
  39.104      // to do callbacks into the safepoint code. However, the safepoint code is not aware
  39.105      // of this thread since it is removed from the queue.
  39.106      p->set_terminated_value();
  39.107 -
  39.108 -    // Now, this thread is not visible to safepoint
  39.109 -    p->set_safepoint_visible(false);
  39.110 -    // once the thread becomes safepoint invisible, we can not use its per-thread
  39.111 -    // recorder. And Threads::do_threads() no longer walks this thread, so we have
  39.112 -    // to release its per-thread recorder here.
  39.113 -    MemTracker::thread_exiting(p);
  39.114    } // unlock Threads_lock
  39.115  
  39.116    // Since Events::log uses a lock, we grab it outside the Threads_lock
    40.1 --- a/src/share/vm/runtime/thread.hpp	Wed Aug 27 09:36:55 2014 +0200
    40.2 +++ b/src/share/vm/runtime/thread.hpp	Wed Aug 27 08:19:12 2014 -0400
    40.3 @@ -43,10 +43,6 @@
    40.4  #include "runtime/unhandledOops.hpp"
    40.5  #include "utilities/macros.hpp"
    40.6  
    40.7 -#if INCLUDE_NMT
    40.8 -#include "services/memRecorder.hpp"
    40.9 -#endif // INCLUDE_NMT
   40.10 -
   40.11  #include "trace/traceBackend.hpp"
   40.12  #include "trace/traceMacros.hpp"
   40.13  #include "utilities/exceptions.hpp"
   40.14 @@ -1059,16 +1055,6 @@
   40.15    bool do_not_unlock_if_synchronized()             { return _do_not_unlock_if_synchronized; }
   40.16    void set_do_not_unlock_if_synchronized(bool val) { _do_not_unlock_if_synchronized = val; }
   40.17  
   40.18 -#if INCLUDE_NMT
   40.19 -  // native memory tracking
   40.20 -  inline MemRecorder* get_recorder() const          { return (MemRecorder*)_recorder; }
   40.21 -  inline void         set_recorder(MemRecorder* rc) { _recorder = rc; }
   40.22 -
   40.23 - private:
   40.24 -  // per-thread memory recorder
   40.25 -  MemRecorder* volatile _recorder;
   40.26 -#endif // INCLUDE_NMT
   40.27 -
   40.28    // Suspend/resume support for JavaThread
   40.29   private:
   40.30    void set_ext_suspended()       { set_suspend_flag (_ext_suspended);  }
   40.31 @@ -1511,19 +1497,6 @@
   40.32       return result;
   40.33     }
   40.34  
   40.35 - // NMT (Native memory tracking) support.
   40.36 - // This flag helps NMT to determine if this JavaThread will be blocked
   40.37 - // at safepoint. If not, ThreadCritical is needed for writing memory records.
   40.38 - // JavaThread is only safepoint visible when it is in Threads' thread list,
   40.39 - // it is not visible until it is added to the list and becomes invisible
   40.40 - // once it is removed from the list.
   40.41 - public:
   40.42 -  bool is_safepoint_visible() const { return _safepoint_visible; }
   40.43 -  void set_safepoint_visible(bool visible) { _safepoint_visible = visible; }
   40.44 - private:
   40.45 -  bool _safepoint_visible;
   40.46 -
   40.47 -  // Static operations
   40.48   public:
   40.49    // Returns the running thread as a JavaThread
   40.50    static inline JavaThread* current();
    41.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    41.2 +++ b/src/share/vm/services/allocationSite.hpp	Wed Aug 27 08:19:12 2014 -0400
    41.3 @@ -0,0 +1,57 @@
    41.4 +/*
    41.5 + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
    41.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    41.7 + *
    41.8 + * This code is free software; you can redistribute it and/or modify it
    41.9 + * under the terms of the GNU General Public License version 2 only, as
   41.10 + * published by the Free Software Foundation.
   41.11 + *
   41.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
   41.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   41.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   41.15 + * version 2 for more details (a copy is included in the LICENSE file that
   41.16 + * accompanied this code).
   41.17 + *
   41.18 + * You should have received a copy of the GNU General Public License version
   41.19 + * 2 along with this work; if not, write to the Free Software Foundation,
   41.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   41.21 + *
   41.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   41.23 + * or visit www.oracle.com if you need additional information or have any
   41.24 + * questions.
   41.25 + *
   41.26 + */
   41.27 +
   41.28 +#ifndef SHARE_VM_SERVICES_ALLOCATION_SITE_HPP
   41.29 +#define SHARE_VM_SERVICES_ALLOCATION_SITE_HPP
   41.30 +
   41.31 +#include "memory/allocation.hpp"
   41.32 +#include "utilities/nativeCallStack.hpp"
   41.33 +
   41.34 +// Allocation site represents a code path that makes a memory
   41.35 +// allocation
   41.36 +template <class E> class AllocationSite VALUE_OBJ_CLASS_SPEC {
   41.37 + private:
   41.38 +  NativeCallStack  _call_stack;
   41.39 +  E                e;
   41.40 + public:
   41.41 +  AllocationSite(const NativeCallStack& stack) : _call_stack(stack) { }
   41.42 +  int hash() const { return _call_stack.hash(); }
   41.43 +  bool equals(const NativeCallStack& stack) const {
   41.44 +    return _call_stack.equals(stack);
   41.45 +  }
   41.46 +
   41.47 +  bool equals(const AllocationSite<E>& other) const {
   41.48 +    return other.equals(_call_stack);
   41.49 +  }
   41.50 +
   41.51 +  const NativeCallStack* call_stack() const {
   41.52 +    return &_call_stack;
   41.53 +  }
   41.54 +
   41.55 +  // Information regarding this allocation
   41.56 +  E* data()             { return &e; }
   41.57 +  const E* peek() const { return &e; }
   41.58 +};
   41.59 +
   41.60 +#endif  // SHARE_VM_SERVICES_ALLOCATION_SITE_HPP
    42.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    42.2 +++ b/src/share/vm/services/mallocSiteTable.cpp	Wed Aug 27 08:19:12 2014 -0400
    42.3 @@ -0,0 +1,261 @@
    42.4 +/*
    42.5 + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
    42.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    42.7 + *
    42.8 + * This code is free software; you can redistribute it and/or modify it
    42.9 + * under the terms of the GNU General Public License version 2 only, as
   42.10 + * published by the Free Software Foundation.
   42.11 + *
   42.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
   42.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   42.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   42.15 + * version 2 for more details (a copy is included in the LICENSE file that
   42.16 + * accompanied this code).
   42.17 + *
   42.18 + * You should have received a copy of the GNU General Public License version
   42.19 + * 2 along with this work; if not, write to the Free Software Foundation,
   42.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   42.21 + *
   42.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   42.23 + * or visit www.oracle.com if you need additional information or have any
   42.24 + * questions.
   42.25 + *
   42.26 + */
   42.27 +#include "precompiled.hpp"
   42.28 +
   42.29 +
   42.30 +#include "memory/allocation.inline.hpp"
   42.31 +#include "runtime/atomic.hpp"
   42.32 +#include "services/mallocSiteTable.hpp"
   42.33 +
   42.34 +/*
   42.35 + * Early os::malloc() calls come from initializations of static variables, long before entering any
   42.36 + * VM code. Upon the arrival of the first os::malloc() call, malloc site hashtable has to be
   42.37 + * initialized, along with the allocation site for the hashtable entries.
   42.38 + * To ensure that malloc site hashtable can be initialized without triggering any additional os::malloc()
   42.39 + * call, the hashtable bucket array and hashtable entry allocation site have to be static.
   42.40 + * It is not a problem for hashtable bucket, since it is an array of pointer type, C runtime just
   42.41 + * allocates a block memory and zero the memory for it.
   42.42 + * But for hashtable entry allocation site object, things get tricky. C runtime not only allocates
   42.43 + * memory for it, but also calls its constructor at some later time. If we initialize the allocation site
   42.44 + * at the first os::malloc() call, the object will be reinitialized when its constructor is called
   42.45 + * by C runtime.
   42.46 + * To workaround above issue, we declare a static size_t array with the size of the CallsiteHashtableEntry,
   42.47 + * the memory is used to instantiate CallsiteHashtableEntry for the hashtable entry allocation site.
   42.48 + * Given it is a primitive type array, C runtime will do nothing other than assign the memory block for the variable,
   42.49 + * which is exactly what we want.
   42.50 + * The same trick is also applied to create NativeCallStack object for CallsiteHashtableEntry memory allocation.
   42.51 + *
   42.52 + * Note: C++ object usually aligns to particular alignment, depends on compiler implementation, we declare
   42.53 + * the memory as size_t arrays, to ensure the memory is aligned to native machine word alignment.
   42.54 + */
   42.55 +
   42.56 +// Reserve enough memory for NativeCallStack and MallocSiteHashtableEntry objects
   42.57 +size_t MallocSiteTable::_hash_entry_allocation_stack[CALC_OBJ_SIZE_IN_TYPE(NativeCallStack, size_t)];
   42.58 +size_t MallocSiteTable::_hash_entry_allocation_site[CALC_OBJ_SIZE_IN_TYPE(MallocSiteHashtableEntry, size_t)];
   42.59 +
   42.60 +// Malloc site hashtable buckets
   42.61 +MallocSiteHashtableEntry*  MallocSiteTable::_table[MallocSiteTable::table_size];
   42.62 +
   42.63 +// concurrent access counter
   42.64 +volatile int MallocSiteTable::_access_count = 0;
   42.65 +
   42.66 +// Tracking hashtable contention
   42.67 +NOT_PRODUCT(int MallocSiteTable::_peak_count = 0;)
   42.68 +
   42.69 +
   42.70 +/*
   42.71 + * Initialize malloc site table.
   42.72 + * Hashtable entry is malloc'd, so it can cause infinite recursion.
   42.73 + * To avoid above problem, we pre-initialize a hash entry for
   42.74 + * this allocation site.
   42.75 + * The method is called during C runtime static variable initialization
   42.76 + * time, it is in single-threaded mode from JVM perspective.
   42.77 + */
   42.78 +bool MallocSiteTable::initialize() {
   42.79 +  assert(sizeof(_hash_entry_allocation_stack) >= sizeof(NativeCallStack), "Sanity Check");
   42.80 +  assert(sizeof(_hash_entry_allocation_site) >= sizeof(MallocSiteHashtableEntry),
   42.81 +    "Sanity Check");
   42.82 +  assert((size_t)table_size <= MAX_MALLOCSITE_TABLE_SIZE, "Hashtable overflow");
   42.83 +
   42.84 +  // Fake the call stack for hashtable entry allocation
   42.85 +  assert(NMT_TrackingStackDepth > 1, "At least one tracking stack");
   42.86 +
   42.87 +  // Create pseudo call stack for hashtable entry allocation
   42.88 +  address pc[3];
   42.89 +  if (NMT_TrackingStackDepth >= 3) {
   42.90 +    pc[2] = (address)MallocSiteTable::allocation_at;
   42.91 +  }
   42.92 +  if (NMT_TrackingStackDepth >= 2) {
   42.93 +    pc[1] = (address)MallocSiteTable::lookup_or_add;
   42.94 +  }
   42.95 +  pc[0] = (address)MallocSiteTable::new_entry;
   42.96 +
   42.97 +  // Instantiate NativeCallStack object, have to use placement new operator. (see comments above)
   42.98 +  NativeCallStack* stack = ::new ((void*)_hash_entry_allocation_stack)
   42.99 +    NativeCallStack(pc, MIN2(((int)(sizeof(pc) / sizeof(address))), ((int)NMT_TrackingStackDepth)));
  42.100 +
  42.101 +  // Instantiate hash entry for hashtable entry allocation callsite
  42.102 +  MallocSiteHashtableEntry* entry = ::new ((void*)_hash_entry_allocation_site)
  42.103 +    MallocSiteHashtableEntry(*stack);
  42.104 +
  42.105 +  // Add the allocation site to hashtable.
  42.106 +  int index = hash_to_index(stack->hash());
  42.107 +  _table[index] = entry;
  42.108 +
  42.109 +  return true;
  42.110 +}
  42.111 +
  42.112 +// Walks entries in the hashtable.
  42.113 +// It stops walk if the walker returns false.
  42.114 +bool MallocSiteTable::walk(MallocSiteWalker* walker) {
  42.115 +  MallocSiteHashtableEntry* head;
  42.116 +  for (int index = 0; index < table_size; index ++) {
  42.117 +    head = _table[index];
  42.118 +    while (head != NULL) {
  42.119 +      if (!walker->do_malloc_site(head->peek())) {
  42.120 +        return false;
  42.121 +      }
  42.122 +      head = (MallocSiteHashtableEntry*)head->next();
  42.123 +    }
  42.124 +  }
  42.125 +  return true;
  42.126 +}
  42.127 +
  42.128 +/*
  42.129 + *  The hashtable does not have deletion policy on individual entry,
  42.130 + *  and each linked list node is inserted via compare-and-swap,
  42.131 + *  so each linked list is stable, the contention only happens
  42.132 + *  at the end of linked list.
  42.133 + *  This method should not return NULL under normal circumstance.
  42.134 + *  If NULL is returned, it indicates:
  42.135 + *    1. Out of memory, it cannot allocate new hash entry.
  42.136 + *    2. Overflow hash bucket.
  42.137 + *  Under any of above circumstances, caller should handle the situation.
  42.138 + */
  42.139 +MallocSite* MallocSiteTable::lookup_or_add(const NativeCallStack& key, size_t* bucket_idx,
  42.140 +  size_t* pos_idx) {
  42.141 +  int index = hash_to_index(key.hash());
  42.142 +  assert(index >= 0, "Negative index");
  42.143 +  *bucket_idx = (size_t)index;
  42.144 +  *pos_idx = 0;
  42.145 +
  42.146 +  // First entry for this hash bucket
  42.147 +  if (_table[index] == NULL) {
  42.148 +    MallocSiteHashtableEntry* entry = new_entry(key);
  42.149 +    // OOM check
  42.150 +    if (entry == NULL) return NULL;
  42.151 +
  42.152 +    // swap in the head
  42.153 +    if (Atomic::cmpxchg_ptr((void*)entry, (volatile void *)&_table[index], NULL) == NULL) {
  42.154 +      return entry->data();
  42.155 +    }
  42.156 +
  42.157 +    delete entry;
  42.158 +  }
  42.159 +
  42.160 +  MallocSiteHashtableEntry* head = _table[index];
  42.161 +  while (head != NULL && (*pos_idx) <= MAX_BUCKET_LENGTH) {
  42.162 +    MallocSite* site = head->data();
  42.163 +    if (site->equals(key)) {
  42.164 +      // found matched entry
  42.165 +      return head->data();
  42.166 +    }
  42.167 +
  42.168 +    if (head->next() == NULL && (*pos_idx) < MAX_BUCKET_LENGTH) {
  42.169 +      MallocSiteHashtableEntry* entry = new_entry(key);
  42.170 +      // OOM check
  42.171 +      if (entry == NULL) return NULL;
  42.172 +      if (head->atomic_insert(entry)) {
  42.173 +        (*pos_idx) ++;
  42.174 +        return entry->data();
  42.175 +      }
  42.176 +      // contended, other thread won
  42.177 +      delete entry;
  42.178 +    }
  42.179 +    head = (MallocSiteHashtableEntry*)head->next();
  42.180 +    (*pos_idx) ++;
  42.181 +  }
  42.182 +  return NULL;
  42.183 +}
  42.184 +
  42.185 +// Access malloc site
  42.186 +MallocSite* MallocSiteTable::malloc_site(size_t bucket_idx, size_t pos_idx) {
  42.187 +  assert(bucket_idx < table_size, "Invalid bucket index");
  42.188 +  MallocSiteHashtableEntry* head = _table[bucket_idx];
  42.189 +  for (size_t index = 0; index < pos_idx && head != NULL;
  42.190 +    index ++, head = (MallocSiteHashtableEntry*)head->next());
  42.191 +  assert(head != NULL, "Invalid position index");
  42.192 +  return head->data();
  42.193 +}
  42.194 +
  42.195 +// Allocates MallocSiteHashtableEntry object. Special call stack
  42.196 +// (pre-installed allocation site) has to be used to avoid infinite
  42.197 +// recursion.
  42.198 +MallocSiteHashtableEntry* MallocSiteTable::new_entry(const NativeCallStack& key) {
  42.199 +  void* p = AllocateHeap(sizeof(MallocSiteHashtableEntry), mtNMT,
  42.200 +    *hash_entry_allocation_stack(), AllocFailStrategy::RETURN_NULL);
  42.201 +  return ::new (p) MallocSiteHashtableEntry(key);
  42.202 +}
  42.203 +
  42.204 +void MallocSiteTable::reset() {
  42.205 +  for (int index = 0; index < table_size; index ++) {
  42.206 +    MallocSiteHashtableEntry* head = _table[index];
  42.207 +    _table[index] = NULL;
  42.208 +    delete_linked_list(head);
  42.209 +  }
  42.210 +}
  42.211 +
  42.212 +void MallocSiteTable::delete_linked_list(MallocSiteHashtableEntry* head) {
  42.213 +  MallocSiteHashtableEntry* p;
  42.214 +  while (head != NULL) {
  42.215 +    p = head;
  42.216 +    head = (MallocSiteHashtableEntry*)head->next();
  42.217 +    if (p != (MallocSiteHashtableEntry*)_hash_entry_allocation_site) {
  42.218 +      delete p;
  42.219 +    }
  42.220 +  }
  42.221 +}
  42.222 +
  42.223 +void MallocSiteTable::shutdown() {
  42.224 +  AccessLock locker(&_access_count);
  42.225 +  locker.exclusiveLock();
  42.226 +  reset();
  42.227 +}
  42.228 +
  42.229 +bool MallocSiteTable::walk_malloc_site(MallocSiteWalker* walker) {
  42.230 +  assert(walker != NULL, "NuLL walker");
  42.231 +  AccessLock locker(&_access_count);
  42.232 +  if (locker.sharedLock()) {
  42.233 +    NOT_PRODUCT(_peak_count = MAX2(_peak_count, _access_count);)
  42.234 +    return walk(walker);
  42.235 +  }
  42.236 +  return false;
  42.237 +}
  42.238 +
  42.239 +
  42.240 +void MallocSiteTable::AccessLock::exclusiveLock() {
  42.241 +  jint target;
  42.242 +  jint val;
  42.243 +
  42.244 +  assert(_lock_state != ExclusiveLock, "Can only call once");
  42.245 +  assert(*_lock >= 0, "Can not content exclusive lock");
  42.246 +
  42.247 +  // make counter negative to block out shared locks
  42.248 +  do {
  42.249 +    val = *_lock;
  42.250 +    target = _MAGIC_ + *_lock;
  42.251 +  } while (Atomic::cmpxchg(target, _lock, val) != val);
  42.252 +
  42.253 +  // wait for all readers to exit
  42.254 +  while (*_lock != _MAGIC_) {
  42.255 +#ifdef _WINDOWS
  42.256 +    os::naked_short_sleep(1);
  42.257 +#else
  42.258 +    os::NakedYield();
  42.259 +#endif
  42.260 +  }
  42.261 +  _lock_state = ExclusiveLock;
  42.262 +}
  42.263 +
  42.264 +
    43.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    43.2 +++ b/src/share/vm/services/mallocSiteTable.hpp	Wed Aug 27 08:19:12 2014 -0400
    43.3 @@ -0,0 +1,268 @@
    43.4 +/*
    43.5 + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
    43.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    43.7 + *
    43.8 + * This code is free software; you can redistribute it and/or modify it
    43.9 + * under the terms of the GNU General Public License version 2 only, as
   43.10 + * published by the Free Software Foundation.
   43.11 + *
   43.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
   43.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   43.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   43.15 + * version 2 for more details (a copy is included in the LICENSE file that
   43.16 + * accompanied this code).
   43.17 + *
   43.18 + * You should have received a copy of the GNU General Public License version
   43.19 + * 2 along with this work; if not, write to the Free Software Foundation,
   43.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   43.21 + *
   43.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   43.23 + * or visit www.oracle.com if you need additional information or have any
   43.24 + * questions.
   43.25 + *
   43.26 + */
   43.27 +
   43.28 +#ifndef SHARE_VM_SERVICES_MALLOC_SITE_TABLE_HPP
   43.29 +#define SHARE_VM_SERVICES_MALLOC_SITE_TABLE_HPP
   43.30 +
   43.31 +#if INCLUDE_NMT
   43.32 +
   43.33 +#include "memory/allocation.hpp"
   43.34 +#include "runtime/atomic.hpp"
   43.35 +#include "services/allocationSite.hpp"
   43.36 +#include "services/mallocTracker.hpp"
   43.37 +#include "services/nmtCommon.hpp"
   43.38 +
   43.39 +// MallocSite represents a code path that eventually calls
   43.40 +// os::malloc() to allocate memory
   43.41 +class MallocSite : public AllocationSite<MemoryCounter> {
   43.42 + public:
   43.43 +  MallocSite() :
   43.44 +    AllocationSite<MemoryCounter>(emptyStack) { }
   43.45 +
   43.46 +  MallocSite(const NativeCallStack& stack) :
   43.47 +    AllocationSite<MemoryCounter>(stack) { }
   43.48 +
   43.49 +  void allocate(size_t size)      { data()->allocate(size);   }
   43.50 +  void deallocate(size_t size)    { data()->deallocate(size); }
   43.51 +
   43.52 +  // Memory allocated from this code path
   43.53 +  size_t size()  const { return peek()->size(); }
   43.54 +  // The number of calls were made
   43.55 +  size_t count() const { return peek()->count(); }
   43.56 +};
   43.57 +
   43.58 +// Malloc site hashtable entry
   43.59 +class MallocSiteHashtableEntry : public CHeapObj<mtNMT> {
   43.60 + private:
   43.61 +  MallocSite                _malloc_site;
   43.62 +  MallocSiteHashtableEntry* _next;
   43.63 +
   43.64 + public:
   43.65 +  MallocSiteHashtableEntry() : _next(NULL) { }
   43.66 +
   43.67 +  MallocSiteHashtableEntry(NativeCallStack stack):
   43.68 +    _malloc_site(stack), _next(NULL) { }
   43.69 +
   43.70 +  inline const MallocSiteHashtableEntry* next() const {
   43.71 +    return _next;
   43.72 +  }
   43.73 +
   43.74 +  // Insert an entry atomically.
   43.75 +  // Return true if the entry is inserted successfully.
   43.76 +  // The operation can be failed due to contention from other thread.
   43.77 +  bool atomic_insert(const MallocSiteHashtableEntry* entry) {
   43.78 +    return (Atomic::cmpxchg_ptr((void*)entry, (volatile void*)&_next,
   43.79 +      NULL) == NULL);
   43.80 +  }
   43.81 +
   43.82 +  void set_callsite(const MallocSite& site) {
   43.83 +    _malloc_site = site;
   43.84 +  }
   43.85 +
   43.86 +  inline const MallocSite* peek() const { return &_malloc_site; }
   43.87 +  inline MallocSite* data()             { return &_malloc_site; }
   43.88 +
   43.89 +  inline long hash() const { return _malloc_site.hash(); }
   43.90 +  inline bool equals(const NativeCallStack& stack) const {
   43.91 +    return _malloc_site.equals(stack);
   43.92 +  }
   43.93 +  // Allocation/deallocation on this allocation site
   43.94 +  inline void allocate(size_t size)   { _malloc_site.allocate(size);   }
   43.95 +  inline void deallocate(size_t size) { _malloc_site.deallocate(size); }
   43.96 +  // Memory counters
   43.97 +  inline size_t size() const  { return _malloc_site.size();  }
   43.98 +  inline size_t count() const { return _malloc_site.count(); }
   43.99 +};
  43.100 +
  43.101 +// The walker walks every entry on MallocSiteTable
  43.102 +class MallocSiteWalker : public StackObj {
  43.103 + public:
  43.104 +   virtual bool do_malloc_site(const MallocSite* e) { return false; }
  43.105 +};
  43.106 +
  43.107 +/*
  43.108 + * Native memory tracking call site table.
  43.109 + * The table is only needed when detail tracking is enabled.
  43.110 + */
  43.111 +class MallocSiteTable : AllStatic {
  43.112 + private:
  43.113 +  // The number of hash bucket in this hashtable. The number should
  43.114 +  // be tuned if malloc activities changed significantly.
  43.115 +  // The statistics data can be obtained via Jcmd
  43.116 +  // jcmd <pid> VM.native_memory statistics.
  43.117 +
  43.118 +  // Currently, (number of buckets / number of entires) ratio is
  43.119 +  // about 1 / 6
  43.120 +  enum {
  43.121 +    table_base_size = 128,   // The base size is calculated from statistics to give
  43.122 +                             // table ratio around 1:6
  43.123 +    table_size = (table_base_size * NMT_TrackingStackDepth - 1)
  43.124 +  };
  43.125 +
  43.126 +
  43.127 +  // This is a very special lock, that allows multiple shared accesses (sharedLock), but
  43.128 +  // once exclusive access (exclusiveLock) is requested, all shared accesses are
  43.129 +  // rejected forever.
  43.130 +  class AccessLock : public StackObj {
  43.131 +    enum LockState {
  43.132 +      NoLock,
  43.133 +      SharedLock,
  43.134 +      ExclusiveLock
  43.135 +    };
  43.136 +
  43.137 +   private:
  43.138 +    // A very large negative number. The only possibility to "overflow"
  43.139 +    // this number is when there are more than -min_jint threads in
  43.140 +    // this process, which is not going to happen in foreseeable future.
  43.141 +    const static int _MAGIC_ = min_jint;
  43.142 +
  43.143 +    LockState      _lock_state;
  43.144 +    volatile int*  _lock;
  43.145 +   public:
  43.146 +    AccessLock(volatile int* lock) :
  43.147 +      _lock(lock), _lock_state(NoLock) {
  43.148 +    }
  43.149 +
  43.150 +    ~AccessLock() {
  43.151 +      if (_lock_state == SharedLock) {
  43.152 +        Atomic::dec((volatile jint*)_lock);
  43.153 +      }
  43.154 +    }
  43.155 +    // Acquire shared lock.
  43.156 +    // Return true if shared access is granted.
  43.157 +    inline bool sharedLock() {
  43.158 +      jint res = Atomic::add(1, _lock);
  43.159 +      if (res < 0) {
  43.160 +        Atomic::add(-1, _lock);
  43.161 +        return false;
  43.162 +      }
  43.163 +      _lock_state = SharedLock;
  43.164 +      return true;
  43.165 +    }
  43.166 +    // Acquire exclusive lock
  43.167 +    void exclusiveLock();
  43.168 + };
  43.169 +
  43.170 + public:
  43.171 +  static bool initialize();
  43.172 +  static void shutdown();
  43.173 +
  43.174 +  NOT_PRODUCT(static int access_peak_count() { return _peak_count; })
  43.175 +
  43.176 +  // Number of hash buckets
  43.177 +  static inline int hash_buckets()      { return (int)table_size; }
  43.178 +
  43.179 +  // Access and copy a call stack from this table. Shared lock should be
  43.180 +  // acquired before access the entry.
  43.181 +  static inline bool access_stack(NativeCallStack& stack, size_t bucket_idx,
  43.182 +    size_t pos_idx) {
  43.183 +    AccessLock locker(&_access_count);
  43.184 +    if (locker.sharedLock()) {
  43.185 +      NOT_PRODUCT(_peak_count = MAX2(_peak_count, _access_count);)
  43.186 +      MallocSite* site = malloc_site(bucket_idx, pos_idx);
  43.187 +      if (site != NULL) {
  43.188 +        stack = *site->call_stack();
  43.189 +        return true;
  43.190 +      }
  43.191 +    }
  43.192 +    return false;
  43.193 +  }
  43.194 +
  43.195 +  // Record a new allocation from specified call path.
  43.196 +  // Return true if the allocation is recorded successfully, bucket_idx
  43.197 +  // and pos_idx are also updated to indicate the entry where the allocation
  43.198 +  // information was recorded.
  43.199 +  // Return false only occurs under rare scenarios:
  43.200 +  //  1. out of memory
  43.201 +  //  2. overflow hash bucket
  43.202 +  static inline bool allocation_at(const NativeCallStack& stack, size_t size,
  43.203 +    size_t* bucket_idx, size_t* pos_idx) {
  43.204 +    AccessLock locker(&_access_count);
  43.205 +    if (locker.sharedLock()) {
  43.206 +      NOT_PRODUCT(_peak_count = MAX2(_peak_count, _access_count);)
  43.207 +      MallocSite* site = lookup_or_add(stack, bucket_idx, pos_idx);
  43.208 +      if (site != NULL) site->allocate(size);
  43.209 +      return site != NULL;
  43.210 +    }
  43.211 +    return false;
  43.212 +  }
  43.213 +
  43.214 +  // Record memory deallocation. bucket_idx and pos_idx indicate where the allocation
  43.215 +  // information was recorded.
  43.216 +  static inline bool deallocation_at(size_t size, size_t bucket_idx, size_t pos_idx) {
  43.217 +    AccessLock locker(&_access_count);
  43.218 +    if (locker.sharedLock()) {
  43.219 +      NOT_PRODUCT(_peak_count = MAX2(_peak_count, _access_count);)
  43.220 +      MallocSite* site = malloc_site(bucket_idx, pos_idx);
  43.221 +      if (site != NULL) {
  43.222 +        site->deallocate(size);
  43.223 +        return true;
  43.224 +      }
  43.225 +    }
  43.226 +    return false;
  43.227 +  }
  43.228 +
  43.229 +  // Walk this table.
  43.230 +  static bool walk_malloc_site(MallocSiteWalker* walker);
  43.231 +
  43.232 + private:
  43.233 +  static MallocSiteHashtableEntry* new_entry(const NativeCallStack& key);
  43.234 +  static void reset();
  43.235 +
  43.236 +  // Delete a bucket linked list
  43.237 +  static void delete_linked_list(MallocSiteHashtableEntry* head);
  43.238 +
  43.239 +  static MallocSite* lookup_or_add(const NativeCallStack& key, size_t* bucket_idx, size_t* pos_idx);
  43.240 +  static MallocSite* malloc_site(size_t bucket_idx, size_t pos_idx);
  43.241 +  static bool walk(MallocSiteWalker* walker);
  43.242 +
  43.243 +  static inline int hash_to_index(int  hash) {
  43.244 +    hash = (hash > 0) ? hash : (-hash);
  43.245 +    return (hash % table_size);
  43.246 +  }
  43.247 +
  43.248 +  static inline const NativeCallStack* hash_entry_allocation_stack() {
  43.249 +    return (NativeCallStack*)_hash_entry_allocation_stack;
  43.250 +  }
  43.251 +
  43.252 + private:
  43.253 +  // Counter for counting concurrent access
  43.254 +  static volatile int                _access_count;
  43.255 +
  43.256 +  // The callsite hashtable. It has to be a static table,
  43.257 +  // since malloc call can come from C runtime linker.
  43.258 +  static MallocSiteHashtableEntry*   _table[table_size];
  43.259 +
  43.260 +
  43.261 +  // Reserve enough memory for placing the objects
  43.262 +
  43.263 +  // The memory for hashtable entry allocation stack object
  43.264 +  static size_t _hash_entry_allocation_stack[CALC_OBJ_SIZE_IN_TYPE(NativeCallStack, size_t)];
  43.265 +  // The memory for hashtable entry allocation callsite object
  43.266 +  static size_t _hash_entry_allocation_site[CALC_OBJ_SIZE_IN_TYPE(MallocSiteHashtableEntry, size_t)];
  43.267 +  NOT_PRODUCT(static int     _peak_count;)
  43.268 +};
  43.269 +
  43.270 +#endif // INCLUDE_NMT
  43.271 +#endif // SHARE_VM_SERVICES_MALLOC_SITE_TABLE_HPP
    44.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    44.2 +++ b/src/share/vm/services/mallocTracker.cpp	Wed Aug 27 08:19:12 2014 -0400
    44.3 @@ -0,0 +1,200 @@
    44.4 +/*
    44.5 + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
    44.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    44.7 + *
    44.8 + * This code is free software; you can redistribute it and/or modify it
    44.9 + * under the terms of the GNU General Public License version 2 only, as
   44.10 + * published by the Free Software Foundation.
   44.11 + *
   44.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
   44.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   44.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   44.15 + * version 2 for more details (a copy is included in the LICENSE file that
   44.16 + * accompanied this code).
   44.17 + *
   44.18 + * You should have received a copy of the GNU General Public License version
   44.19 + * 2 along with this work; if not, write to the Free Software Foundation,
   44.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   44.21 + *
   44.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   44.23 + * or visit www.oracle.com if you need additional information or have any
   44.24 + * questions.
   44.25 + *
   44.26 + */
   44.27 +#include "precompiled.hpp"
   44.28 +
   44.29 +#include "runtime/atomic.hpp"
   44.30 +#include "runtime/atomic.inline.hpp"
   44.31 +#include "services/mallocSiteTable.hpp"
   44.32 +#include "services/mallocTracker.hpp"
   44.33 +#include "services/mallocTracker.inline.hpp"
   44.34 +#include "services/memTracker.hpp"
   44.35 +
   44.36 +size_t MallocMemorySummary::_snapshot[CALC_OBJ_SIZE_IN_TYPE(MallocMemorySnapshot, size_t)];
   44.37 +
   44.38 +// Total malloc'd memory amount
   44.39 +size_t MallocMemorySnapshot::total() const {
   44.40 +  size_t amount = 0;
   44.41 +  for (int index = 0; index < mt_number_of_types; index ++) {
   44.42 +    amount += _malloc[index].malloc_size();
   44.43 +  }
   44.44 +  amount += _tracking_header.size() + total_arena();
   44.45 +  return amount;
   44.46 +}
   44.47 +
   44.48 +// Total malloc'd memory used by arenas
   44.49 +size_t MallocMemorySnapshot::total_arena() const {
   44.50 +  size_t amount = 0;
   44.51 +  for (int index = 0; index < mt_number_of_types; index ++) {
   44.52 +    amount += _malloc[index].arena_size();
   44.53 +  }
   44.54 +  return amount;
   44.55 +}
   44.56 +
   44.57 +
   44.58 +void MallocMemorySnapshot::reset() {
   44.59 +  _tracking_header.reset();
   44.60 +  for (int index = 0; index < mt_number_of_types; index ++) {
   44.61 +    _malloc[index].reset();
   44.62 +  }
   44.63 +}
   44.64 +
   44.65 +// Make adjustment by subtracting chunks used by arenas
   44.66 +// from total chunks to get total free chunck size
   44.67 +void MallocMemorySnapshot::make_adjustment() {
   44.68 +  size_t arena_size = total_arena();
   44.69 +  int chunk_idx = NMTUtil::flag_to_index(mtChunk);
   44.70 +  _malloc[chunk_idx].record_free(arena_size);
   44.71 +}
   44.72 +
   44.73 +
   44.74 +void MallocMemorySummary::initialize() {
   44.75 +  assert(sizeof(_snapshot) >= sizeof(MallocMemorySnapshot), "Sanity Check");
   44.76 +  // Uses placement new operator to initialize static area.
   44.77 +  ::new ((void*)_snapshot)MallocMemorySnapshot();
   44.78 +}
   44.79 +
   44.80 +void MallocHeader::release() const {
   44.81 +  // Tracking already shutdown, no housekeeping is needed anymore
   44.82 +  if (MemTracker::tracking_level() <= NMT_minimal) return;
   44.83 +
   44.84 +  MallocMemorySummary::record_free(size(), flags());
   44.85 +  MallocMemorySummary::record_free_malloc_header(sizeof(MallocHeader));
   44.86 +  if (tracking_level() == NMT_detail) {
   44.87 +    MallocSiteTable::deallocation_at(size(), _bucket_idx, _pos_idx);
   44.88 +  }
   44.89 +}
   44.90 +
   44.91 +bool MallocHeader::record_malloc_site(const NativeCallStack& stack, size_t size,
   44.92 +  size_t* bucket_idx, size_t* pos_idx) const {
   44.93 +  bool ret =  MallocSiteTable::allocation_at(stack, size, bucket_idx, pos_idx);
   44.94 +
   44.95 +  // Something went wrong, could be OOM or overflow malloc site table.
   44.96 +  // We want to keep tracking data under OOM circumstance, so transition to
   44.97 +  // summary tracking.
   44.98 +  if (!ret) {
   44.99 +    MemTracker::transition_to(NMT_summary);
  44.100 +  }
  44.101 +  return ret;
  44.102 +}
  44.103 +
  44.104 +bool MallocHeader::get_stack(NativeCallStack& stack) const {
  44.105 +  return MallocSiteTable::access_stack(stack, _bucket_idx, _pos_idx);
  44.106 +}
  44.107 +
  44.108 +bool MallocTracker::initialize(NMT_TrackingLevel level) {
  44.109 +  if (level >= NMT_summary) {
  44.110 +    MallocMemorySummary::initialize();
  44.111 +  }
  44.112 +
  44.113 +  if (level == NMT_detail) {
  44.114 +    return MallocSiteTable::initialize();
  44.115 +  }
  44.116 +  return true;
  44.117 +}
  44.118 +
  44.119 +bool MallocTracker::transition(NMT_TrackingLevel from, NMT_TrackingLevel to) {
  44.120 +  assert(from != NMT_off, "Can not transition from off state");
  44.121 +  assert(to != NMT_off, "Can not transition to off state");
  44.122 +  if (from == NMT_minimal) {
  44.123 +    MallocMemorySummary::reset();
  44.124 +  }
  44.125 +
  44.126 +  if (to == NMT_detail) {
  44.127 +    assert(from == NMT_minimal || from == NMT_summary, "Just check");
  44.128 +    return MallocSiteTable::initialize();
  44.129 +  } else if (from == NMT_detail) {
  44.130 +    assert(to == NMT_minimal || to == NMT_summary, "Just check");
  44.131 +    MallocSiteTable::shutdown();
  44.132 +  }
  44.133 +  return true;
  44.134 +}
  44.135 +
  44.136 +// Record a malloc memory allocation
  44.137 +void* MallocTracker::record_malloc(void* malloc_base, size_t size, MEMFLAGS flags,
  44.138 +  const NativeCallStack& stack, NMT_TrackingLevel level) {
  44.139 +  void*         memblock;      // the address for user data
  44.140 +  MallocHeader* header = NULL;
  44.141 +
  44.142 +  if (malloc_base == NULL) {
  44.143 +    return NULL;
  44.144 +  }
  44.145 +
  44.146 +  // Check malloc size, size has to <= MAX_MALLOC_SIZE. This is only possible on 32-bit
  44.147 +  // systems, when malloc size >= 1GB, but is is safe to assume it won't happen.
  44.148 +  if (size > MAX_MALLOC_SIZE) {
  44.149 +    fatal("Should not use malloc for big memory block, use virtual memory instead");
  44.150 +  }
  44.151 +  // Uses placement global new operator to initialize malloc header
  44.152 +  switch(level) {
  44.153 +    case NMT_off:
  44.154 +      return malloc_base;
  44.155 +    case NMT_minimal: {
  44.156 +      MallocHeader* hdr = ::new (malloc_base) MallocHeader();
  44.157 +      break;
  44.158 +    }
  44.159 +    case NMT_summary: {
  44.160 +      header = ::new (malloc_base) MallocHeader(size, flags);
  44.161 +      break;
  44.162 +    }
  44.163 +    case NMT_detail: {
  44.164 +      header = ::new (malloc_base) MallocHeader(size, flags, stack);
  44.165 +      break;
  44.166 +    }
  44.167 +    default:
  44.168 +      ShouldNotReachHere();
  44.169 +  }
  44.170 +  memblock = (void*)((char*)malloc_base + sizeof(MallocHeader));
  44.171 +
  44.172 +  // The alignment check: 8 bytes alignment for 32 bit systems.
  44.173 +  //                      16 bytes alignment for 64-bit systems.
  44.174 +  assert(((size_t)memblock & (sizeof(size_t) * 2 - 1)) == 0, "Alignment check");
  44.175 +
  44.176 +  // Sanity check
  44.177 +  assert(get_memory_tracking_level(memblock) == level,
  44.178 +    "Wrong tracking level");
  44.179 +
  44.180 +#ifdef ASSERT
  44.181 +  if (level > NMT_minimal) {
  44.182 +    // Read back
  44.183 +    assert(get_size(memblock) == size,   "Wrong size");
  44.184 +    assert(get_flags(memblock) == flags, "Wrong flags");
  44.185 +  }
  44.186 +#endif
  44.187 +
  44.188 +  return memblock;
  44.189 +}
  44.190 +
  44.191 +void* MallocTracker::record_free(void* memblock) {
  44.192 +  // Never turned on
  44.193 +  if (MemTracker::tracking_level() == NMT_off ||
  44.194 +      memblock == NULL) {
  44.195 +    return memblock;
  44.196 +  }
  44.197 +  MallocHeader* header = malloc_header(memblock);
  44.198 +  header->release();
  44.199 +
  44.200 +  return (void*)header;
  44.201 +}
  44.202 +
  44.203 +
    45.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    45.2 +++ b/src/share/vm/services/mallocTracker.hpp	Wed Aug 27 08:19:12 2014 -0400
    45.3 @@ -0,0 +1,424 @@
    45.4 +/*
    45.5 + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
    45.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    45.7 + *
    45.8 + * This code is free software; you can redistribute it and/or modify it
    45.9 + * under the terms of the GNU General Public License version 2 only, as
   45.10 + * published by the Free Software Foundation.
   45.11 + *
   45.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
   45.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   45.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   45.15 + * version 2 for more details (a copy is included in the LICENSE file that
   45.16 + * accompanied this code).
   45.17 + *
   45.18 + * You should have received a copy of the GNU General Public License version
   45.19 + * 2 along with this work; if not, write to the Free Software Foundation,
   45.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   45.21 + *
   45.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   45.23 + * or visit www.oracle.com if you need additional information or have any
   45.24 + * questions.
   45.25 + *
   45.26 + */
   45.27 +
   45.28 +#ifndef SHARE_VM_SERVICES_MALLOC_TRACKER_HPP
   45.29 +#define SHARE_VM_SERVICES_MALLOC_TRACKER_HPP
   45.30 +
   45.31 +#if INCLUDE_NMT
   45.32 +
   45.33 +#include "memory/allocation.hpp"
   45.34 +#include "runtime/atomic.hpp"
   45.35 +#include "services/nmtCommon.hpp"
   45.36 +#include "utilities/nativeCallStack.hpp"
   45.37 +
   45.38 +/*
   45.39 + * This counter class counts memory allocation and deallocation,
   45.40 + * records total memory allocation size and number of allocations.
   45.41 + * The counters are updated atomically.
   45.42 + */
   45.43 +class MemoryCounter VALUE_OBJ_CLASS_SPEC {
   45.44 + private:
   45.45 +  size_t   _count;
   45.46 +  size_t   _size;
   45.47 +
   45.48 +  DEBUG_ONLY(size_t   _peak_count;)
   45.49 +  DEBUG_ONLY(size_t   _peak_size; )
   45.50 +
   45.51 + public:
   45.52 +  MemoryCounter() : _count(0), _size(0) {
   45.53 +    DEBUG_ONLY(_peak_count = 0;)
   45.54 +    DEBUG_ONLY(_peak_size  = 0;)
   45.55 +  }
   45.56 +
   45.57 +  // Reset counters
   45.58 +  void reset() {
   45.59 +    _size  = 0;
   45.60 +    _count = 0;
   45.61 +    DEBUG_ONLY(_peak_size = 0;)
   45.62 +    DEBUG_ONLY(_peak_count = 0;)
   45.63 +  }
   45.64 +
   45.65 +  inline void allocate(size_t sz) {
   45.66 +    Atomic::add(1, (volatile MemoryCounterType*)&_count);
   45.67 +    if (sz > 0) {
   45.68 +      Atomic::add((MemoryCounterType)sz, (volatile MemoryCounterType*)&_size);
   45.69 +      DEBUG_ONLY(_peak_size = MAX2(_peak_size, _size));
   45.70 +    }
   45.71 +    DEBUG_ONLY(_peak_count = MAX2(_peak_count, _count);)
   45.72 +  }
   45.73 +
   45.74 +  inline void deallocate(size_t sz) {
   45.75 +    assert(_count > 0, "Negative counter");
   45.76 +    assert(_size >= sz, "Negative size");
   45.77 +    Atomic::add(-1, (volatile MemoryCounterType*)&_count);
   45.78 +    if (sz > 0) {
   45.79 +      Atomic::add(-(MemoryCounterType)sz, (volatile MemoryCounterType*)&_size);
   45.80 +    }
   45.81 +  }
   45.82 +
   45.83 +  inline void resize(long sz) {
   45.84 +    if (sz != 0) {
   45.85 +      Atomic::add((MemoryCounterType)sz, (volatile MemoryCounterType*)&_size);
   45.86 +      DEBUG_ONLY(_peak_size = MAX2(_size, _peak_size);)
   45.87 +    }
   45.88 +  }
   45.89 +
   45.90 +  inline size_t count() const { return _count; }
   45.91 +  inline size_t size()  const { return _size;  }
   45.92 +  DEBUG_ONLY(inline size_t peak_count() const { return _peak_count; })
   45.93 +  DEBUG_ONLY(inline size_t peak_size()  const { return _peak_size; })
   45.94 +
   45.95 +};
   45.96 +
   45.97 +/*
   45.98 + * Malloc memory used by a particular subsystem.
   45.99 + * It includes the memory acquired through os::malloc()
  45.100 + * call and arena's backing memory.
  45.101 + */
  45.102 +class MallocMemory VALUE_OBJ_CLASS_SPEC {
  45.103 + private:
  45.104 +  MemoryCounter _malloc;
  45.105 +  MemoryCounter _arena;
  45.106 +
  45.107 + public:
  45.108 +  MallocMemory() { }
  45.109 +
  45.110 +  inline void record_malloc(size_t sz) {
  45.111 +    _malloc.allocate(sz);
  45.112 +  }
  45.113 +
  45.114 +  inline void record_free(size_t sz) {
  45.115 +    _malloc.deallocate(sz);
  45.116 +  }
  45.117 +
  45.118 +  inline void record_new_arena() {
  45.119 +    _arena.allocate(0);
  45.120 +  }
  45.121 +
  45.122 +  inline void record_arena_free() {
  45.123 +    _arena.deallocate(0);
  45.124 +  }
  45.125 +
  45.126 +  inline void record_arena_size_change(long sz) {
  45.127 +    _arena.resize(sz);
  45.128 +  }
  45.129 +
  45.130 +  void reset() {
  45.131 +    _malloc.reset();
  45.132 +    _arena.reset();
  45.133 +  }
  45.134 +
  45.135 +  inline size_t malloc_size()  const { return _malloc.size(); }
  45.136 +  inline size_t malloc_count() const { return _malloc.count();}
  45.137 +  inline size_t arena_size()   const { return _arena.size();  }
  45.138 +  inline size_t arena_count()  const { return _arena.count(); }
  45.139 +
  45.140 +  DEBUG_ONLY(inline const MemoryCounter& malloc_counter() const { return _malloc; })
  45.141 +  DEBUG_ONLY(inline const MemoryCounter& arena_counter()  const { return _arena;  })
  45.142 +};
  45.143 +
  45.144 +class MallocMemorySummary;
  45.145 +
  45.146 +// A snapshot of malloc'd memory, includes malloc memory
  45.147 +// usage by types and memory used by tracking itself.
  45.148 +class MallocMemorySnapshot : public ResourceObj {
  45.149 +  friend class MallocMemorySummary;
  45.150 +
  45.151 + private:
  45.152 +  MallocMemory      _malloc[mt_number_of_types];
  45.153 +  MemoryCounter     _tracking_header;
  45.154 +
  45.155 +
  45.156 + public:
  45.157 +  inline MallocMemory*  by_type(MEMFLAGS flags) {
  45.158 +    int index = NMTUtil::flag_to_index(flags);
  45.159 +    return &_malloc[index];
  45.160 +  }
  45.161 +
  45.162 +  inline MallocMemory* by_index(int index) {
  45.163 +    assert(index >= 0, "Index out of bound");
  45.164 +    assert(index < mt_number_of_types, "Index out of bound");
  45.165 +    return &_malloc[index];
  45.166 +  }
  45.167 +
  45.168 +  inline MemoryCounter* malloc_overhead() {
  45.169 +    return &_tracking_header;
  45.170 +  }
  45.171 +
  45.172 +  // Total malloc'd memory amount
  45.173 +  size_t total() const;
  45.174 +  // Total malloc'd memory used by arenas
  45.175 +  size_t total_arena() const;
  45.176 +
  45.177 +  inline size_t thread_count() {
  45.178 +    return by_type(mtThreadStack)->malloc_count();
  45.179 +  }
  45.180 +
  45.181 +  void reset();
  45.182 +
  45.183 +  void copy_to(MallocMemorySnapshot* s) {
  45.184 +    s->_tracking_header = _tracking_header;
  45.185 +    for (int index = 0; index < mt_number_of_types; index ++) {
  45.186 +      s->_malloc[index] = _malloc[index];
  45.187 +    }
  45.188 +  }
  45.189 +
  45.190 +  // Make adjustment by subtracting chunks used by arenas
  45.191 +  // from total chunks to get total free chunk size
  45.192 +  void make_adjustment();
  45.193 +};
  45.194 +
  45.195 +/*
  45.196 + * This class is for collecting malloc statistics at summary level
  45.197 + */
  45.198 +class MallocMemorySummary : AllStatic {
  45.199 + private:
  45.200 +  // Reserve memory for placement of MallocMemorySnapshot object
  45.201 +  static size_t _snapshot[CALC_OBJ_SIZE_IN_TYPE(MallocMemorySnapshot, size_t)];
  45.202 +
  45.203 + public:
  45.204 +   static void initialize();
  45.205 +
  45.206 +   static inline void record_malloc(size_t size, MEMFLAGS flag) {
  45.207 +     as_snapshot()->by_type(flag)->record_malloc(size);
  45.208 +   }
  45.209 +
  45.210 +   static inline void record_free(size_t size, MEMFLAGS flag) {
  45.211 +     as_snapshot()->by_type(flag)->record_free(size);
  45.212 +   }
  45.213 +
  45.214 +   static inline void record_new_arena(MEMFLAGS flag) {
  45.215 +     as_snapshot()->by_type(flag)->record_new_arena();
  45.216 +   }
  45.217 +
  45.218 +   static inline void record_arena_free(MEMFLAGS flag) {
  45.219 +     as_snapshot()->by_type(flag)->record_arena_free();
  45.220 +   }
  45.221 +
  45.222 +   static inline void record_arena_size_change(long size, MEMFLAGS flag) {
  45.223 +     as_snapshot()->by_type(flag)->record_arena_size_change(size);
  45.224 +   }
  45.225 +
  45.226 +   static void snapshot(MallocMemorySnapshot* s) {
  45.227 +     as_snapshot()->copy_to(s);
  45.228 +     s->make_adjustment();
  45.229 +   }
  45.230 +
  45.231 +   // Record memory used by malloc tracking header
  45.232 +   static inline void record_new_malloc_header(size_t sz) {
  45.233 +     as_snapshot()->malloc_overhead()->allocate(sz);
  45.234 +   }
  45.235 +
  45.236 +   static inline void record_free_malloc_header(size_t sz) {
  45.237 +     as_snapshot()->malloc_overhead()->deallocate(sz);
  45.238 +   }
  45.239 +
  45.240 +   // The memory used by malloc tracking headers
  45.241 +   static inline size_t tracking_overhead() {
  45.242 +     return as_snapshot()->malloc_overhead()->size();
  45.243 +   }
  45.244 +
  45.245 +   // Reset all counters to zero
  45.246 +   static void reset() {
  45.247 +     as_snapshot()->reset();
  45.248 +   }
  45.249 +
  45.250 +  static MallocMemorySnapshot* as_snapshot() {
  45.251 +    return (MallocMemorySnapshot*)_snapshot;
  45.252 +  }
  45.253 +};
  45.254 +
  45.255 +
  45.256 +/*
  45.257 + * Malloc tracking header.
  45.258 + * To satisfy malloc alignment requirement, NMT uses 2 machine words for tracking purpose,
  45.259 + * which ensures 8-bytes alignment on 32-bit systems and 16-bytes on 64-bit systems (Product build).
  45.260 + */
  45.261 +
  45.262 +class MallocHeader VALUE_OBJ_CLASS_SPEC {
  45.263 +#ifdef _LP64
  45.264 +  size_t           _size      : 62;
  45.265 +  size_t           _level     : 2;
  45.266 +  size_t           _flags     : 8;
  45.267 +  size_t           _pos_idx   : 16;
  45.268 +  size_t           _bucket_idx: 40;
  45.269 +#define MAX_MALLOCSITE_TABLE_SIZE ((size_t)1 << 40)
  45.270 +#define MAX_BUCKET_LENGTH         ((size_t)(1 << 16))
  45.271 +#define MAX_MALLOC_SIZE           (((size_t)1 << 62) - 1)
  45.272 +#else
  45.273 +  size_t           _size      : 30;
  45.274 +  size_t           _level     : 2;
  45.275 +  size_t           _flags     : 8;
  45.276 +  size_t           _pos_idx   : 8;
  45.277 +  size_t           _bucket_idx: 16;
  45.278 +#define MAX_MALLOCSITE_TABLE_SIZE  ((size_t)(1 << 16))
  45.279 +#define MAX_BUCKET_LENGTH          ((size_t)(1 << 8))
  45.280 +// Max malloc size = 1GB - 1 on 32 bit system, such has total 4GB memory
  45.281 +#define MAX_MALLOC_SIZE            ((size_t)(1 << 30) - 1)
  45.282 +#endif  // _LP64
  45.283 +
  45.284 + public:
  45.285 +  // Summary tracking header
  45.286 +  MallocHeader(size_t size, MEMFLAGS flags) {
  45.287 +    assert(sizeof(MallocHeader) == sizeof(void*) * 2,
  45.288 +      "Wrong header size");
  45.289 +
  45.290 +    _level = NMT_summary;
  45.291 +    _flags = flags;
  45.292 +    set_size(size);
  45.293 +    MallocMemorySummary::record_malloc(size, flags);
  45.294 +    MallocMemorySummary::record_new_malloc_header(sizeof(MallocHeader));
  45.295 +  }
  45.296 +  // Detail tracking header
  45.297 +  MallocHeader(size_t size, MEMFLAGS flags, const NativeCallStack& stack) {
  45.298 +    assert(sizeof(MallocHeader) == sizeof(void*) * 2,
  45.299 +      "Wrong header size");
  45.300 +
  45.301 +    _level = NMT_detail;
  45.302 +    _flags = flags;
  45.303 +    set_size(size);
  45.304 +    size_t bucket_idx;
  45.305 +    size_t pos_idx;
  45.306 +    if (record_malloc_site(stack, size, &bucket_idx, &pos_idx)) {
  45.307 +      assert(bucket_idx <= MAX_MALLOCSITE_TABLE_SIZE, "Overflow bucket index");
  45.308 +      assert(pos_idx <= MAX_BUCKET_LENGTH, "Overflow bucket position index");
  45.309 +      _bucket_idx = bucket_idx;
  45.310 +      _pos_idx = pos_idx;
  45.311 +    }
  45.312 +    MallocMemorySummary::record_malloc(size, flags);
  45.313 +    MallocMemorySummary::record_new_malloc_header(sizeof(MallocHeader));
  45.314 +  }
  45.315 +  // Minimal tracking header
  45.316 +  MallocHeader() {
  45.317 +    assert(sizeof(MallocHeader) == sizeof(void*) * 2,
  45.318 +      "Wrong header size");
  45.319 +
  45.320 +    _level = (unsigned short)NMT_minimal;
  45.321 +  }
  45.322 +
  45.323 +  inline NMT_TrackingLevel tracking_level() const {
  45.324 +    return (NMT_TrackingLevel)_level;
  45.325 +  }
  45.326 +
  45.327 +  inline size_t   size()  const { return _size; }
  45.328 +  inline MEMFLAGS flags() const { return (MEMFLAGS)_flags; }
  45.329 +  bool get_stack(NativeCallStack& stack) const;
  45.330 +
  45.331 +  // Cleanup tracking information before the memory is released.
  45.332 +  void release() const;
  45.333 +
  45.334 + private:
  45.335 +  inline void set_size(size_t size) {
  45.336 +    assert(size <= MAX_MALLOC_SIZE, "Malloc size too large, should use virtual memory?");
  45.337 +    _size = size;
  45.338 +  }
  45.339 +  bool record_malloc_site(const NativeCallStack& stack, size_t size,
  45.340 +    size_t* bucket_idx, size_t* pos_idx) const;
  45.341 +};
  45.342 +
  45.343 +
  45.344 +// Main class called from MemTracker to track malloc activities
  45.345 +class MallocTracker : AllStatic {
  45.346 + public:
  45.347 +  // Initialize malloc tracker for specific tracking level
  45.348 +  static bool initialize(NMT_TrackingLevel level);
  45.349 +
  45.350 +  static bool transition(NMT_TrackingLevel from, NMT_TrackingLevel to);
  45.351 +
  45.352 +  // malloc tracking header size for specific tracking level
  45.353 +  static inline size_t malloc_header_size(NMT_TrackingLevel level) {
  45.354 +    return (level == NMT_off) ? 0 : sizeof(MallocHeader);
  45.355 +  }
  45.356 +
  45.357 +  // Parameter name convention:
  45.358 +  // memblock :   the beginning address for user data
  45.359 +  // malloc_base: the beginning address that includes malloc tracking header
  45.360 +  //
  45.361 +  // The relationship:
  45.362 +  // memblock = (char*)malloc_base + sizeof(nmt header)
  45.363 +  //
  45.364 +
  45.365 +  // Record  malloc on specified memory block
  45.366 +  static void* record_malloc(void* malloc_base, size_t size, MEMFLAGS flags,
  45.367 +    const NativeCallStack& stack, NMT_TrackingLevel level);
  45.368 +
  45.369 +  // Record free on specified memory block
  45.370 +  static void* record_free(void* memblock);
  45.371 +
  45.372 +  // Get tracking level of specified memory block
  45.373 +  static inline NMT_TrackingLevel get_memory_tracking_level(void* memblock);
  45.374 +
  45.375 +
  45.376 +  // Offset memory address to header address
  45.377 +  static inline void* get_base(void* memblock);
  45.378 +  static inline void* get_base(void* memblock, NMT_TrackingLevel level) {
  45.379 +    if (memblock == NULL || level == NMT_off) return memblock;
  45.380 +    return (char*)memblock - malloc_header_size(level);
  45.381 +  }
  45.382 +
  45.383 +  // Get memory size
  45.384 +  static inline size_t get_size(void* memblock) {
  45.385 +    MallocHeader* header = malloc_header(memblock);
  45.386 +    assert(header->tracking_level() >= NMT_summary,
  45.387 +      "Wrong tracking level");
  45.388 +    return header->size();
  45.389 +  }
  45.390 +
  45.391 +  // Get memory type
  45.392 +  static inline MEMFLAGS get_flags(void* memblock) {
  45.393 +    MallocHeader* header = malloc_header(memblock);
  45.394 +    assert(header->tracking_level() >= NMT_summary,
  45.395 +      "Wrong tracking level");
  45.396 +    return header->flags();
  45.397 +  }
  45.398 +
  45.399 +  // Get header size
  45.400 +  static inline size_t get_header_size(void* memblock) {
  45.401 +    return (memblock == NULL) ? 0 : sizeof(MallocHeader);
  45.402 +  }
  45.403 +
  45.404 +  static inline void record_new_arena(MEMFLAGS flags) {
  45.405 +    MallocMemorySummary::record_new_arena(flags);
  45.406 +  }
  45.407 +
  45.408 +  static inline void record_arena_free(MEMFLAGS flags) {
  45.409 +    MallocMemorySummary::record_arena_free(flags);
  45.410 +  }
  45.411 +
  45.412 +  static inline void record_arena_size_change(int size, MEMFLAGS flags) {
  45.413 +    MallocMemorySummary::record_arena_size_change(size, flags);
  45.414 +  }
  45.415 + private:
  45.416 +  static inline MallocHeader* malloc_header(void *memblock) {
  45.417 +    assert(memblock != NULL, "NULL pointer");
  45.418 +    MallocHeader* header = (MallocHeader*)((char*)memblock - sizeof(MallocHeader));
  45.419 +    assert(header->tracking_level() >= NMT_minimal, "Bad header");
  45.420 +    return header;
  45.421 +  }
  45.422 +};
  45.423 +
  45.424 +#endif // INCLUDE_NMT
  45.425 +
  45.426 +
  45.427 +#endif //SHARE_VM_SERVICES_MALLOC_TRACKER_HPP
    46.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    46.2 +++ b/src/share/vm/services/mallocTracker.inline.hpp	Wed Aug 27 08:19:12 2014 -0400
    46.3 @@ -0,0 +1,43 @@
    46.4 +/*
    46.5 + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
    46.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    46.7 + *
    46.8 + * This code is free software; you can redistribute it and/or modify it
    46.9 + * under the terms of the GNU General Public License version 2 only, as
   46.10 + * published by the Free Software Foundation.
   46.11 + *
   46.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
   46.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   46.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   46.15 + * version 2 for more details (a copy is included in the LICENSE file that
   46.16 + * accompanied this code).
   46.17 + *
   46.18 + * You should have received a copy of the GNU General Public License version
   46.19 + * 2 along with this work; if not, write to the Free Software Foundation,
   46.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   46.21 + *
   46.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   46.23 + * or visit www.oracle.com if you need additional information or have any
   46.24 + * questions.
   46.25 + *
   46.26 + */
   46.27 +
   46.28 +#ifndef SHARE_VM_SERVICES_MALLOC_TRACKER_INLINE_HPP
   46.29 +#define SHARE_VM_SERVICES_MALLOC_TRACKER_INLINE_HPP
   46.30 +
   46.31 +#include "services/mallocTracker.hpp"
   46.32 +#include "services/memTracker.hpp"
   46.33 +
   46.34 +inline NMT_TrackingLevel MallocTracker::get_memory_tracking_level(void* memblock) {
   46.35 +  assert(memblock != NULL, "Sanity check");
   46.36 +  if (MemTracker::tracking_level() == NMT_off) return NMT_off;
   46.37 +  MallocHeader* header = malloc_header(memblock);
   46.38 +  return header->tracking_level();
   46.39 +}
   46.40 +
   46.41 +inline void* MallocTracker::get_base(void* memblock){
   46.42 +  return get_base(memblock, MemTracker::tracking_level());
   46.43 +}
   46.44 +
   46.45 +#endif // SHARE_VM_SERVICES_MALLOC_TRACKER_INLINE_HPP
   46.46 +
    47.1 --- a/src/share/vm/services/memBaseline.cpp	Wed Aug 27 09:36:55 2014 +0200
    47.2 +++ b/src/share/vm/services/memBaseline.cpp	Wed Aug 27 08:19:12 2014 -0400
    47.3 @@ -1,5 +1,5 @@
    47.4  /*
    47.5 - * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
    47.6 + * Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
    47.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    47.8   *
    47.9   * This code is free software; you can redistribute it and/or modify it
   47.10 @@ -22,471 +22,301 @@
   47.11   *
   47.12   */
   47.13  #include "precompiled.hpp"
   47.14 +
   47.15  #include "memory/allocation.hpp"
   47.16  #include "runtime/safepoint.hpp"
   47.17  #include "runtime/thread.inline.hpp"
   47.18  #include "services/memBaseline.hpp"
   47.19  #include "services/memTracker.hpp"
   47.20  
   47.21 -
   47.22 -MemType2Name MemBaseline::MemType2NameMap[NUMBER_OF_MEMORY_TYPE] = {
   47.23 -  {mtJavaHeap,   "Java Heap"},
   47.24 -  {mtClass,      "Class"},
   47.25 -  {mtThreadStack,"Thread Stack"},
   47.26 -  {mtThread,     "Thread"},
   47.27 -  {mtCode,       "Code"},
   47.28 -  {mtGC,         "GC"},
   47.29 -  {mtCompiler,   "Compiler"},
   47.30 -  {mtInternal,   "Internal"},
   47.31 -  {mtOther,      "Other"},
   47.32 -  {mtSymbol,     "Symbol"},
   47.33 -  {mtNMT,        "Memory Tracking"},
   47.34 -  {mtTracing,    "Tracing"},
   47.35 -  {mtChunk,      "Pooled Free Chunks"},
   47.36 -  {mtClassShared,"Shared spaces for classes"},
   47.37 -  {mtTest,       "Test"},
   47.38 -  {mtNone,       "Unknown"}  // It can happen when type tagging records are lagging
   47.39 -                             // behind
   47.40 -};
   47.41 -
   47.42 -MemBaseline::MemBaseline() {
   47.43 -  _baselined = false;
   47.44 -
   47.45 -  for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
   47.46 -    _malloc_data[index].set_type(MemType2NameMap[index]._flag);
   47.47 -    _vm_data[index].set_type(MemType2NameMap[index]._flag);
   47.48 -    _arena_data[index].set_type(MemType2NameMap[index]._flag);
   47.49 -  }
   47.50 -
   47.51 -  _malloc_cs = NULL;
   47.52 -  _vm_cs = NULL;
   47.53 -  _vm_map = NULL;
   47.54 -
   47.55 -  _number_of_classes = 0;
   47.56 -  _number_of_threads = 0;
   47.57 -}
   47.58 -
   47.59 -
   47.60 -void MemBaseline::clear() {
   47.61 -  if (_malloc_cs != NULL) {
   47.62 -    delete _malloc_cs;
   47.63 -    _malloc_cs = NULL;
   47.64 -  }
   47.65 -
   47.66 -  if (_vm_cs != NULL) {
   47.67 -    delete _vm_cs;
   47.68 -    _vm_cs = NULL;
   47.69 -  }
   47.70 -
   47.71 -  if (_vm_map != NULL) {
   47.72 -    delete _vm_map;
   47.73 -    _vm_map = NULL;
   47.74 -  }
   47.75 -
   47.76 -  reset();
   47.77 -}
   47.78 -
   47.79 -
   47.80 -void MemBaseline::reset() {
   47.81 -  _baselined = false;
   47.82 -  _total_vm_reserved = 0;
   47.83 -  _total_vm_committed = 0;
   47.84 -  _total_malloced = 0;
   47.85 -  _number_of_classes = 0;
   47.86 -
   47.87 -  if (_malloc_cs != NULL) _malloc_cs->clear();
   47.88 -  if (_vm_cs != NULL) _vm_cs->clear();
   47.89 -  if (_vm_map != NULL) _vm_map->clear();
   47.90 -
   47.91 -  for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
   47.92 -    _malloc_data[index].clear();
   47.93 -    _vm_data[index].clear();
   47.94 -    _arena_data[index].clear();
   47.95 +/*
   47.96 + * Sizes are sorted in descenting order for reporting
   47.97 + */
   47.98 +int compare_malloc_size(const MallocSite& s1, const MallocSite& s2) {
   47.99 +  if (s1.size() == s2.size()) {
  47.100 +    return 0;
  47.101 +  } else if (s1.size() > s2.size()) {
  47.102 +    return -1;
  47.103 +  } else {
  47.104 +    return 1;
  47.105    }
  47.106  }
  47.107  
  47.108 -MemBaseline::~MemBaseline() {
  47.109 -  clear();
  47.110 +
  47.111 +int compare_virtual_memory_size(const VirtualMemoryAllocationSite& s1,
  47.112 +  const VirtualMemoryAllocationSite& s2) {
  47.113 +  if (s1.reserved() == s2.reserved()) {
  47.114 +    return 0;
  47.115 +  } else if (s1.reserved() > s2.reserved()) {
  47.116 +    return -1;
  47.117 +  } else {
  47.118 +    return 1;
  47.119 +  }
  47.120  }
  47.121  
  47.122 -// baseline malloc'd memory records, generate overall summary and summaries by
  47.123 -// memory types
  47.124 -bool MemBaseline::baseline_malloc_summary(const MemPointerArray* malloc_records) {
  47.125 -  MemPointerArrayIteratorImpl malloc_itr((MemPointerArray*)malloc_records);
  47.126 -  MemPointerRecord* malloc_ptr = (MemPointerRecord*)malloc_itr.current();
  47.127 -  size_t used_arena_size = 0;
  47.128 -  int index;
  47.129 -  while (malloc_ptr != NULL) {
  47.130 -    index = flag2index(FLAGS_TO_MEMORY_TYPE(malloc_ptr->flags()));
  47.131 -    size_t size = malloc_ptr->size();
  47.132 -    if (malloc_ptr->is_arena_memory_record()) {
  47.133 -      // We do have anonymous arenas, they are either used as value objects,
  47.134 -      // which are embedded inside other objects, or used as stack objects.
  47.135 -      _arena_data[index].inc(size);
  47.136 -      used_arena_size += size;
  47.137 +// Sort into allocation site addresses order for baseline comparison
  47.138 +int compare_malloc_site(const MallocSite& s1, const MallocSite& s2) {
  47.139 +  return s1.call_stack()->compare(*s2.call_stack());
  47.140 +}
  47.141 +
  47.142 +
  47.143 +int compare_virtual_memory_site(const VirtualMemoryAllocationSite& s1,
  47.144 +  const VirtualMemoryAllocationSite& s2) {
  47.145 +  return s1.call_stack()->compare(*s2.call_stack());
  47.146 +}
  47.147 +
  47.148 +/*
  47.149 + * Walker to walk malloc allocation site table
  47.150 + */
  47.151 +class MallocAllocationSiteWalker : public MallocSiteWalker {
  47.152 + private:
  47.153 +  SortedLinkedList<MallocSite, compare_malloc_size, ResourceObj::ARENA>
  47.154 +                 _malloc_sites;
  47.155 +  size_t         _count;
  47.156 +
  47.157 +  // Entries in MallocSiteTable with size = 0 and count = 0,
  47.158 +  // when the malloc site is not longer there.
  47.159 + public:
  47.160 +  MallocAllocationSiteWalker(Arena* arena) : _count(0), _malloc_sites(arena) {
  47.161 +  }
  47.162 +
  47.163 +  inline size_t count() const { return _count; }
  47.164 +
  47.165 +  LinkedList<MallocSite>* malloc_sites() {
  47.166 +    return &_malloc_sites;
  47.167 +  }
  47.168 +
  47.169 +  bool do_malloc_site(const MallocSite* site) {
  47.170 +    if (site->size() >= MemBaseline::SIZE_THRESHOLD) {
  47.171 +      if (_malloc_sites.add(*site) != NULL) {
  47.172 +        _count++;
  47.173 +        return true;
  47.174 +      } else {
  47.175 +        return false;  // OOM
  47.176 +      }
  47.177      } else {
  47.178 -      _total_malloced += size;
  47.179 -      _malloc_data[index].inc(size);
  47.180 -      if (malloc_ptr->is_arena_record()) {
  47.181 -        // see if arena memory record present
  47.182 -        MemPointerRecord* next_malloc_ptr = (MemPointerRecordEx*)malloc_itr.peek_next();
  47.183 -        if (next_malloc_ptr != NULL && next_malloc_ptr->is_arena_memory_record()) {
  47.184 -          assert(next_malloc_ptr->is_memory_record_of_arena(malloc_ptr),
  47.185 -             "Arena records do not match");
  47.186 -          size = next_malloc_ptr->size();
  47.187 -          _arena_data[index].inc(size);
  47.188 -          used_arena_size += size;
  47.189 -          malloc_itr.next();
  47.190 -        }
  47.191 +      // malloc site does not meet threshold, ignore and continue
  47.192 +      return true;
  47.193 +    }
  47.194 +  }
  47.195 +};
  47.196 +
  47.197 +// Compare virtual memory region's base address
  47.198 +int compare_virtual_memory_base(const ReservedMemoryRegion& r1, const ReservedMemoryRegion& r2) {
  47.199 +  return r1.compare(r2);
  47.200 +}
  47.201 +
  47.202 +// Walk all virtual memory regions for baselining
  47.203 +class VirtualMemoryAllocationWalker : public VirtualMemoryWalker {
  47.204 + private:
  47.205 +  SortedLinkedList<ReservedMemoryRegion, compare_virtual_memory_base, ResourceObj::ARENA>
  47.206 +                _virtual_memory_regions;
  47.207 +  size_t        _count;
  47.208 +
  47.209 + public:
  47.210 +  VirtualMemoryAllocationWalker(Arena* a) : _count(0), _virtual_memory_regions(a) {
  47.211 +  }
  47.212 +
  47.213 +  bool do_allocation_site(const ReservedMemoryRegion* rgn)  {
  47.214 +    if (rgn->size() >= MemBaseline::SIZE_THRESHOLD) {
  47.215 +      if (_virtual_memory_regions.add(*rgn) != NULL) {
  47.216 +        _count ++;
  47.217 +        return true;
  47.218 +      } else {
  47.219 +        return false;
  47.220        }
  47.221      }
  47.222 -    malloc_ptr = (MemPointerRecordEx*)malloc_itr.next();
  47.223 +    return true;
  47.224    }
  47.225  
  47.226 -  // substract used arena size to get size of arena chunk in free list
  47.227 -  index = flag2index(mtChunk);
  47.228 -  _malloc_data[index].reduce(used_arena_size);
  47.229 -  // we really don't know how many chunks in free list, so just set to
  47.230 -  // 0
  47.231 -  _malloc_data[index].overwrite_counter(0);
  47.232 +  LinkedList<ReservedMemoryRegion>* virtual_memory_allocations() {
  47.233 +    return &_virtual_memory_regions;
  47.234 +  }
  47.235 +};
  47.236 +
  47.237 +
  47.238 +bool MemBaseline::baseline_summary() {
  47.239 +  assert(_malloc_memory_snapshot == NULL, "Malloc baseline not yet reset");
  47.240 +  assert(_virtual_memory_snapshot == NULL, "Virtual baseline not yet reset");
  47.241 +
  47.242 +  _malloc_memory_snapshot =  new (arena()) MallocMemorySnapshot();
  47.243 +  _virtual_memory_snapshot = new (arena()) VirtualMemorySnapshot();
  47.244 +  if (_malloc_memory_snapshot == NULL || _virtual_memory_snapshot == NULL) {
  47.245 +    return false;
  47.246 +  }
  47.247 +  MallocMemorySummary::snapshot(_malloc_memory_snapshot);
  47.248 +  VirtualMemorySummary::snapshot(_virtual_memory_snapshot);
  47.249 +  return true;
  47.250 +}
  47.251 +
  47.252 +bool MemBaseline::baseline_allocation_sites() {
  47.253 +  assert(arena() != NULL, "Just check");
  47.254 +  // Malloc allocation sites
  47.255 +  MallocAllocationSiteWalker malloc_walker(arena());
  47.256 +  if (!MallocSiteTable::walk_malloc_site(&malloc_walker)) {
  47.257 +    return false;
  47.258 +  }
  47.259 +
  47.260 +  _malloc_sites.set_head(malloc_walker.malloc_sites()->head());
  47.261 +  // The malloc sites are collected in size order
  47.262 +  _malloc_sites_order = by_size;
  47.263 +
  47.264 +  // Virtual memory allocation sites
  47.265 +  VirtualMemoryAllocationWalker virtual_memory_walker(arena());
  47.266 +  if (!VirtualMemoryTracker::walk_virtual_memory(&virtual_memory_walker)) {
  47.267 +    return false;
  47.268 +  }
  47.269 +
  47.270 +  // Virtual memory allocations are collected in call stack order
  47.271 +  _virtual_memory_allocations.set_head(virtual_memory_walker.virtual_memory_allocations()->head());
  47.272 +
  47.273 +  if (!aggregate_virtual_memory_allocation_sites()) {
  47.274 +    return false;
  47.275 +  }
  47.276 +  // Virtual memory allocation sites are aggregrated in call stack order
  47.277 +  _virtual_memory_sites_order = by_address;
  47.278  
  47.279    return true;
  47.280  }
  47.281  
  47.282 -// check if there is a safepoint in progress, if so, block the thread
  47.283 -// for the safepoint
  47.284 -void MemBaseline::check_safepoint(JavaThread* thr) {
  47.285 -  if (SafepointSynchronize::is_synchronizing()) {
  47.286 -    // grab and drop the SR_lock to honor the safepoint protocol
  47.287 -    MutexLocker ml(thr->SR_lock());
  47.288 -  }
  47.289 -}
  47.290 -
  47.291 -// baseline mmap'd memory records, generate overall summary and summaries by
  47.292 -// memory types
  47.293 -bool MemBaseline::baseline_vm_summary(const MemPointerArray* vm_records) {
  47.294 -  MemPointerArrayIteratorImpl vm_itr((MemPointerArray*)vm_records);
  47.295 -  VMMemRegion* vm_ptr = (VMMemRegion*)vm_itr.current();
  47.296 -  int index;
  47.297 -  while (vm_ptr != NULL) {
  47.298 -    if (vm_ptr->is_reserved_region()) {
  47.299 -      index = flag2index(FLAGS_TO_MEMORY_TYPE(vm_ptr->flags()));
  47.300 -    // we use the number of thread stack to count threads
  47.301 -      if (IS_MEMORY_TYPE(vm_ptr->flags(), mtThreadStack)) {
  47.302 -      _number_of_threads ++;
  47.303 -    }
  47.304 -      _total_vm_reserved += vm_ptr->size();
  47.305 -      _vm_data[index].inc(vm_ptr->size(), 0);
  47.306 -    } else {
  47.307 -      _total_vm_committed += vm_ptr->size();
  47.308 -      _vm_data[index].inc(0, vm_ptr->size());
  47.309 -    }
  47.310 -    vm_ptr = (VMMemRegion*)vm_itr.next();
  47.311 -  }
  47.312 -  return true;
  47.313 -}
  47.314 -
  47.315 -// baseline malloc'd memory by callsites, but only the callsites with memory allocation
  47.316 -// over 1KB are stored.
  47.317 -bool MemBaseline::baseline_malloc_details(const MemPointerArray* malloc_records) {
  47.318 -  assert(MemTracker::track_callsite(), "detail tracking is off");
  47.319 -
  47.320 -  MemPointerArrayIteratorImpl malloc_itr(const_cast<MemPointerArray*>(malloc_records));
  47.321 -  MemPointerRecordEx* malloc_ptr = (MemPointerRecordEx*)malloc_itr.current();
  47.322 -  MallocCallsitePointer malloc_callsite;
  47.323 -
  47.324 -  // initailize malloc callsite array
  47.325 -  if (_malloc_cs == NULL) {
  47.326 -    _malloc_cs = new (std::nothrow) MemPointerArrayImpl<MallocCallsitePointer>(64);
  47.327 -    // out of native memory
  47.328 -    if (_malloc_cs == NULL || _malloc_cs->out_of_memory()) {
  47.329 -      return false;
  47.330 -    }
  47.331 -  } else {
  47.332 -    _malloc_cs->clear();
  47.333 +bool MemBaseline::baseline(bool summaryOnly) {
  47.334 +  if (arena() == NULL) {
  47.335 +    _arena = new (std::nothrow, mtNMT) Arena(mtNMT);
  47.336 +    if (arena() == NULL) return false;
  47.337    }
  47.338  
  47.339 -  MemPointerArray* malloc_data = const_cast<MemPointerArray*>(malloc_records);
  47.340 +  reset();
  47.341  
  47.342 -  // sort into callsite pc order. Details are aggregated by callsites
  47.343 -  malloc_data->sort((FN_SORT)malloc_sort_by_pc);
  47.344 -  bool ret = true;
  47.345 +  _class_count = InstanceKlass::number_of_instance_classes();
  47.346  
  47.347 -  // baseline memory that is totaled over 1 KB
  47.348 -  while (malloc_ptr != NULL) {
  47.349 -    if (!MemPointerRecord::is_arena_memory_record(malloc_ptr->flags())) {
  47.350 -      // skip thread stacks
  47.351 -      if (!IS_MEMORY_TYPE(malloc_ptr->flags(), mtThreadStack)) {
  47.352 -        if (malloc_callsite.addr() != malloc_ptr->pc()) {
  47.353 -          if ((malloc_callsite.amount()/K) > 0) {
  47.354 -            if (!_malloc_cs->append(&malloc_callsite)) {
  47.355 -              ret = false;
  47.356 -              break;
  47.357 -            }
  47.358 -          }
  47.359 -          malloc_callsite = MallocCallsitePointer(malloc_ptr->pc());
  47.360 -        }
  47.361 -        malloc_callsite.inc(malloc_ptr->size());
  47.362 -      }
  47.363 -    }
  47.364 -    malloc_ptr = (MemPointerRecordEx*)malloc_itr.next();
  47.365 -  }
  47.366 -
  47.367 -  // restore to address order. Snapshot malloc data is maintained in memory
  47.368 -  // address order.
  47.369 -  malloc_data->sort((FN_SORT)malloc_sort_by_addr);
  47.370 -
  47.371 -  if (!ret) {
  47.372 -              return false;
  47.373 -            }
  47.374 -  // deal with last record
  47.375 -  if (malloc_callsite.addr() != 0 && (malloc_callsite.amount()/K) > 0) {
  47.376 -    if (!_malloc_cs->append(&malloc_callsite)) {
  47.377 -      return false;
  47.378 -    }
  47.379 -  }
  47.380 -  return true;
  47.381 -}
  47.382 -
  47.383 -// baseline mmap'd memory by callsites
  47.384 -bool MemBaseline::baseline_vm_details(const MemPointerArray* vm_records) {
  47.385 -  assert(MemTracker::track_callsite(), "detail tracking is off");
  47.386 -
  47.387 -  VMCallsitePointer  vm_callsite;
  47.388 -  VMCallsitePointer* cur_callsite = NULL;
  47.389 -  MemPointerArrayIteratorImpl vm_itr((MemPointerArray*)vm_records);
  47.390 -  VMMemRegionEx* vm_ptr = (VMMemRegionEx*)vm_itr.current();
  47.391 -
  47.392 -  // initialize virtual memory map array
  47.393 -  if (_vm_map == NULL) {
  47.394 -    _vm_map = new (std::nothrow) MemPointerArrayImpl<VMMemRegionEx>(vm_records->length());
  47.395 -   if (_vm_map == NULL || _vm_map->out_of_memory()) {
  47.396 -     return false;
  47.397 -   }
  47.398 -  } else {
  47.399 -    _vm_map->clear();
  47.400 -  }
  47.401 -
  47.402 -  // initialize virtual memory callsite array
  47.403 -  if (_vm_cs == NULL) {
  47.404 -    _vm_cs = new (std::nothrow) MemPointerArrayImpl<VMCallsitePointer>(64);
  47.405 -    if (_vm_cs == NULL || _vm_cs->out_of_memory()) {
  47.406 -      return false;
  47.407 -    }
  47.408 -  } else {
  47.409 -    _vm_cs->clear();
  47.410 -  }
  47.411 -
  47.412 -  // consolidate virtual memory data
  47.413 -  VMMemRegionEx*     reserved_rec = NULL;
  47.414 -  VMMemRegionEx*     committed_rec = NULL;
  47.415 -
  47.416 -  // vm_ptr is coming in increasing base address order
  47.417 -  while (vm_ptr != NULL) {
  47.418 -    if (vm_ptr->is_reserved_region()) {
  47.419 -      // consolidate reserved memory regions for virtual memory map.
  47.420 -      // The criteria for consolidation is:
  47.421 -      // 1. two adjacent reserved memory regions
  47.422 -      // 2. belong to the same memory type
  47.423 -      // 3. reserved from the same callsite
  47.424 -      if (reserved_rec == NULL ||
  47.425 -        reserved_rec->base() + reserved_rec->size() != vm_ptr->addr() ||
  47.426 -        FLAGS_TO_MEMORY_TYPE(reserved_rec->flags()) != FLAGS_TO_MEMORY_TYPE(vm_ptr->flags()) ||
  47.427 -        reserved_rec->pc() != vm_ptr->pc()) {
  47.428 -        if (!_vm_map->append(vm_ptr)) {
  47.429 -        return false;
  47.430 -      }
  47.431 -        // inserted reserved region, we need the pointer to the element in virtual
  47.432 -        // memory map array.
  47.433 -        reserved_rec = (VMMemRegionEx*)_vm_map->at(_vm_map->length() - 1);
  47.434 -      } else {
  47.435 -        reserved_rec->expand_region(vm_ptr->addr(), vm_ptr->size());
  47.436 -    }
  47.437 -
  47.438 -      if (cur_callsite != NULL && !_vm_cs->append(cur_callsite)) {
  47.439 -      return false;
  47.440 -    }
  47.441 -      vm_callsite = VMCallsitePointer(vm_ptr->pc());
  47.442 -      cur_callsite = &vm_callsite;
  47.443 -      vm_callsite.inc(vm_ptr->size(), 0);
  47.444 -    } else {
  47.445 -      // consolidate committed memory regions for virtual memory map
  47.446 -      // The criterial is:
  47.447 -      // 1. two adjacent committed memory regions
  47.448 -      // 2. committed from the same callsite
  47.449 -      if (committed_rec == NULL ||
  47.450 -        committed_rec->base() + committed_rec->size() != vm_ptr->addr() ||
  47.451 -        committed_rec->pc() != vm_ptr->pc()) {
  47.452 -        if (!_vm_map->append(vm_ptr)) {
  47.453 -          return false;
  47.454 -        }
  47.455 -        committed_rec = (VMMemRegionEx*)_vm_map->at(_vm_map->length() - 1);
  47.456 -    } else {
  47.457 -        committed_rec->expand_region(vm_ptr->addr(), vm_ptr->size());
  47.458 -      }
  47.459 -      vm_callsite.inc(0, vm_ptr->size());
  47.460 -    }
  47.461 -    vm_ptr = (VMMemRegionEx*)vm_itr.next();
  47.462 -  }
  47.463 -  // deal with last record
  47.464 -  if (cur_callsite != NULL && !_vm_cs->append(cur_callsite)) {
  47.465 +  if (!baseline_summary()) {
  47.466      return false;
  47.467    }
  47.468  
  47.469 -  // sort it into callsite pc order. Details are aggregated by callsites
  47.470 -  _vm_cs->sort((FN_SORT)bl_vm_sort_by_pc);
  47.471 +  _baseline_type = Summary_baselined;
  47.472  
  47.473 -  // walk the array to consolidate record by pc
  47.474 -  MemPointerArrayIteratorImpl itr(_vm_cs);
  47.475 -  VMCallsitePointer* callsite_rec = (VMCallsitePointer*)itr.current();
  47.476 -  VMCallsitePointer* next_rec = (VMCallsitePointer*)itr.next();
  47.477 -  while (next_rec != NULL) {
  47.478 -    assert(callsite_rec != NULL, "Sanity check");
  47.479 -    if (next_rec->addr() == callsite_rec->addr()) {
  47.480 -      callsite_rec->inc(next_rec->reserved_amount(), next_rec->committed_amount());
  47.481 -      itr.remove();
  47.482 -      next_rec = (VMCallsitePointer*)itr.current();
  47.483 -    } else {
  47.484 -      callsite_rec = next_rec;
  47.485 -      next_rec = (VMCallsitePointer*)itr.next();
  47.486 -    }
  47.487 +  // baseline details
  47.488 +  if (!summaryOnly &&
  47.489 +      MemTracker::tracking_level() == NMT_detail) {
  47.490 +    baseline_allocation_sites();
  47.491 +    _baseline_type = Detail_baselined;
  47.492    }
  47.493  
  47.494    return true;
  47.495  }
  47.496  
  47.497 -// baseline a snapshot. If summary_only = false, memory usages aggregated by
  47.498 -// callsites are also baselined.
  47.499 -// The method call can be lengthy, especially when detail tracking info is
  47.500 -// requested. So the method checks for safepoint explicitly.
  47.501 -bool MemBaseline::baseline(MemSnapshot& snapshot, bool summary_only) {
  47.502 -  Thread* THREAD = Thread::current();
  47.503 -  assert(THREAD->is_Java_thread(), "must be a JavaThread");
  47.504 -  MutexLocker snapshot_locker(snapshot._lock);
  47.505 -  reset();
  47.506 -  _baselined = baseline_malloc_summary(snapshot._alloc_ptrs);
  47.507 -  if (_baselined) {
  47.508 -    check_safepoint((JavaThread*)THREAD);
  47.509 -    _baselined = baseline_vm_summary(snapshot._vm_ptrs);
  47.510 -  }
  47.511 -  _number_of_classes = snapshot.number_of_classes();
  47.512 -
  47.513 -  if (!summary_only && MemTracker::track_callsite() && _baselined) {
  47.514 -    check_safepoint((JavaThread*)THREAD);
  47.515 -    _baselined =  baseline_malloc_details(snapshot._alloc_ptrs);
  47.516 -    if (_baselined) {
  47.517 -      check_safepoint((JavaThread*)THREAD);
  47.518 -      _baselined =  baseline_vm_details(snapshot._vm_ptrs);
  47.519 -    }
  47.520 -  }
  47.521 -  return _baselined;
  47.522 +int compare_allocation_site(const VirtualMemoryAllocationSite& s1,
  47.523 +  const VirtualMemoryAllocationSite& s2) {
  47.524 +  return s1.call_stack()->compare(*s2.call_stack());
  47.525  }
  47.526  
  47.527 +bool MemBaseline::aggregate_virtual_memory_allocation_sites() {
  47.528 +  SortedLinkedList<VirtualMemoryAllocationSite, compare_allocation_site, ResourceObj::ARENA>
  47.529 +    allocation_sites(arena());
  47.530  
  47.531 -int MemBaseline::flag2index(MEMFLAGS flag) const {
  47.532 -  for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
  47.533 -    if (MemType2NameMap[index]._flag == flag) {
  47.534 -      return index;
  47.535 +  VirtualMemoryAllocationIterator itr = virtual_memory_allocations();
  47.536 +  const ReservedMemoryRegion* rgn;
  47.537 +  VirtualMemoryAllocationSite* site;
  47.538 +  while ((rgn = itr.next()) != NULL) {
  47.539 +    VirtualMemoryAllocationSite tmp(*rgn->call_stack());
  47.540 +    site = allocation_sites.find(tmp);
  47.541 +    if (site == NULL) {
  47.542 +      LinkedListNode<VirtualMemoryAllocationSite>* node =
  47.543 +        allocation_sites.add(tmp);
  47.544 +      if (node == NULL) return false;
  47.545 +      site = node->data();
  47.546      }
  47.547 +    site->reserve_memory(rgn->size());
  47.548 +    site->commit_memory(rgn->committed_size());
  47.549    }
  47.550 -  assert(false, "no type");
  47.551 -  return -1;
  47.552 +
  47.553 +  _virtual_memory_sites.set_head(allocation_sites.head());
  47.554 +  return true;
  47.555  }
  47.556  
  47.557 -const char* MemBaseline::type2name(MEMFLAGS type) {
  47.558 -  for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
  47.559 -    if (MemType2NameMap[index]._flag == type) {
  47.560 -      return MemType2NameMap[index]._name;
  47.561 -    }
  47.562 +MallocSiteIterator MemBaseline::malloc_sites(SortingOrder order) {
  47.563 +  assert(!_malloc_sites.is_empty(), "Detail baseline?");
  47.564 +  switch(order) {
  47.565 +    case by_size:
  47.566 +      malloc_sites_to_size_order();
  47.567 +      break;
  47.568 +    case by_site:
  47.569 +      malloc_sites_to_allocation_site_order();
  47.570 +      break;
  47.571 +    case by_address:
  47.572 +    default:
  47.573 +      ShouldNotReachHere();
  47.574    }
  47.575 -  assert(false, err_msg("bad type %x", type));
  47.576 -  return NULL;
  47.577 +  return MallocSiteIterator(_malloc_sites.head());
  47.578  }
  47.579  
  47.580 -
  47.581 -MemBaseline& MemBaseline::operator=(const MemBaseline& other) {
  47.582 -  _total_malloced = other._total_malloced;
  47.583 -  _total_vm_reserved = other._total_vm_reserved;
  47.584 -  _total_vm_committed = other._total_vm_committed;
  47.585 -
  47.586 -  _baselined = other._baselined;
  47.587 -  _number_of_classes = other._number_of_classes;
  47.588 -
  47.589 -  for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
  47.590 -    _malloc_data[index] = other._malloc_data[index];
  47.591 -    _vm_data[index] = other._vm_data[index];
  47.592 -    _arena_data[index] = other._arena_data[index];
  47.593 +VirtualMemorySiteIterator MemBaseline::virtual_memory_sites(SortingOrder order) {
  47.594 +  assert(!_virtual_memory_sites.is_empty(), "Detail baseline?");
  47.595 +  switch(order) {
  47.596 +    case by_size:
  47.597 +      virtual_memory_sites_to_size_order();
  47.598 +      break;
  47.599 +    case by_site:
  47.600 +      virtual_memory_sites_to_reservation_site_order();
  47.601 +      break;
  47.602 +    case by_address:
  47.603 +    default:
  47.604 +      ShouldNotReachHere();
  47.605    }
  47.606 -
  47.607 -  if (MemTracker::track_callsite()) {
  47.608 -    assert(_malloc_cs != NULL && _vm_cs != NULL, "out of memory");
  47.609 -    assert(other._malloc_cs != NULL && other._vm_cs != NULL,
  47.610 -           "not properly baselined");
  47.611 -    _malloc_cs->clear();
  47.612 -    _vm_cs->clear();
  47.613 -    int index;
  47.614 -    for (index = 0; index < other._malloc_cs->length(); index ++) {
  47.615 -      _malloc_cs->append(other._malloc_cs->at(index));
  47.616 -    }
  47.617 -
  47.618 -    for (index = 0; index < other._vm_cs->length(); index ++) {
  47.619 -      _vm_cs->append(other._vm_cs->at(index));
  47.620 -    }
  47.621 -  }
  47.622 -  return *this;
  47.623 +  return VirtualMemorySiteIterator(_virtual_memory_sites.head());
  47.624  }
  47.625  
  47.626 -/* compare functions for sorting */
  47.627  
  47.628 -// sort snapshot malloc'd records in callsite pc order
  47.629 -int MemBaseline::malloc_sort_by_pc(const void* p1, const void* p2) {
  47.630 -  assert(MemTracker::track_callsite(),"Just check");
  47.631 -  const MemPointerRecordEx* mp1 = (const MemPointerRecordEx*)p1;
  47.632 -  const MemPointerRecordEx* mp2 = (const MemPointerRecordEx*)p2;
  47.633 -  return UNSIGNED_COMPARE(mp1->pc(), mp2->pc());
  47.634 +// Sorting allocations sites in different orders
  47.635 +void MemBaseline::malloc_sites_to_size_order() {
  47.636 +  if (_malloc_sites_order != by_size) {
  47.637 +    SortedLinkedList<MallocSite, compare_malloc_size, ResourceObj::ARENA>
  47.638 +      tmp(arena());
  47.639 +
  47.640 +    // Add malloc sites to sorted linked list to sort into size order
  47.641 +    tmp.move(&_malloc_sites);
  47.642 +    _malloc_sites.set_head(tmp.head());
  47.643 +    tmp.set_head(NULL);
  47.644 +    _malloc_sites_order = by_size;
  47.645 +  }
  47.646  }
  47.647  
  47.648 -// sort baselined malloc'd records in size order
  47.649 -int MemBaseline::bl_malloc_sort_by_size(const void* p1, const void* p2) {
  47.650 -  assert(MemTracker::is_on(), "Just check");
  47.651 -  const MallocCallsitePointer* mp1 = (const MallocCallsitePointer*)p1;
  47.652 -  const MallocCallsitePointer* mp2 = (const MallocCallsitePointer*)p2;
  47.653 -  return UNSIGNED_COMPARE(mp2->amount(), mp1->amount());
  47.654 +void MemBaseline::malloc_sites_to_allocation_site_order() {
  47.655 +  if (_malloc_sites_order != by_site) {
  47.656 +    SortedLinkedList<MallocSite, compare_malloc_site, ResourceObj::ARENA>
  47.657 +      tmp(arena());
  47.658 +    // Add malloc sites to sorted linked list to sort into site (address) order
  47.659 +    tmp.move(&_malloc_sites);
  47.660 +    _malloc_sites.set_head(tmp.head());
  47.661 +    tmp.set_head(NULL);
  47.662 +    _malloc_sites_order = by_site;
  47.663 +  }
  47.664  }
  47.665  
  47.666 -// sort baselined malloc'd records in callsite pc order
  47.667 -int MemBaseline::bl_malloc_sort_by_pc(const void* p1, const void* p2) {
  47.668 -  assert(MemTracker::is_on(), "Just check");
  47.669 -  const MallocCallsitePointer* mp1 = (const MallocCallsitePointer*)p1;
  47.670 -  const MallocCallsitePointer* mp2 = (const MallocCallsitePointer*)p2;
  47.671 -  return UNSIGNED_COMPARE(mp1->addr(), mp2->addr());
  47.672 +void MemBaseline::virtual_memory_sites_to_size_order() {
  47.673 +  if (_virtual_memory_sites_order != by_size) {
  47.674 +    SortedLinkedList<VirtualMemoryAllocationSite, compare_virtual_memory_size, ResourceObj::ARENA>
  47.675 +      tmp(arena());
  47.676 +
  47.677 +    tmp.move(&_virtual_memory_sites);
  47.678 +
  47.679 +    _virtual_memory_sites.set_head(tmp.head());
  47.680 +    tmp.set_head(NULL);
  47.681 +    _virtual_memory_sites_order = by_size;
  47.682 +  }
  47.683  }
  47.684  
  47.685 +void MemBaseline::virtual_memory_sites_to_reservation_site_order() {
  47.686 +  if (_virtual_memory_sites_order != by_size) {
  47.687 +    SortedLinkedList<VirtualMemoryAllocationSite, compare_virtual_memory_site, ResourceObj::ARENA>
  47.688 +      tmp(arena());
  47.689  
  47.690 -// sort baselined mmap'd records in size (reserved size) order
  47.691 -int MemBaseline::bl_vm_sort_by_size(const void* p1, const void* p2) {
  47.692 -  assert(MemTracker::is_on(), "Just check");
  47.693 -  const VMCallsitePointer* mp1 = (const VMCallsitePointer*)p1;
  47.694 -  const VMCallsitePointer* mp2 = (const VMCallsitePointer*)p2;
  47.695 -  return UNSIGNED_COMPARE(mp2->reserved_amount(), mp1->reserved_amount());
  47.696 +    tmp.add(&_virtual_memory_sites);
  47.697 +
  47.698 +    _virtual_memory_sites.set_head(tmp.head());
  47.699 +    tmp.set_head(NULL);
  47.700 +
  47.701 +    _virtual_memory_sites_order = by_size;
  47.702 +  }
  47.703  }
  47.704  
  47.705 -// sort baselined mmap'd records in callsite pc order
  47.706 -int MemBaseline::bl_vm_sort_by_pc(const void* p1, const void* p2) {
  47.707 -  assert(MemTracker::is_on(), "Just check");
  47.708 -  const VMCallsitePointer* mp1 = (const VMCallsitePointer*)p1;
  47.709 -  const VMCallsitePointer* mp2 = (const VMCallsitePointer*)p2;
  47.710 -  return UNSIGNED_COMPARE(mp1->addr(), mp2->addr());
  47.711 -}
  47.712 -
  47.713 -
  47.714 -// sort snapshot malloc'd records in memory block address order
  47.715 -int MemBaseline::malloc_sort_by_addr(const void* p1, const void* p2) {
  47.716 -  assert(MemTracker::is_on(), "Just check");
  47.717 -  const MemPointerRecord* mp1 = (const MemPointerRecord*)p1;
  47.718 -  const MemPointerRecord* mp2 = (const MemPointerRecord*)p2;
  47.719 -  int delta = UNSIGNED_COMPARE(mp1->addr(), mp2->addr());
  47.720 -  assert(p1 == p2 || delta != 0, "dup pointer");
  47.721 -  return delta;
  47.722 -}
  47.723 -
    48.1 --- a/src/share/vm/services/memBaseline.hpp	Wed Aug 27 09:36:55 2014 +0200
    48.2 +++ b/src/share/vm/services/memBaseline.hpp	Wed Aug 27 08:19:12 2014 -0400
    48.3 @@ -1,5 +1,5 @@
    48.4  /*
    48.5 - * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
    48.6 + * Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
    48.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    48.8   *
    48.9   * This code is free software; you can redistribute it and/or modify it
   48.10 @@ -25,425 +25,205 @@
   48.11  #ifndef SHARE_VM_SERVICES_MEM_BASELINE_HPP
   48.12  #define SHARE_VM_SERVICES_MEM_BASELINE_HPP
   48.13  
   48.14 +#if INCLUDE_NMT
   48.15 +
   48.16  #include "memory/allocation.hpp"
   48.17  #include "runtime/mutex.hpp"
   48.18 -#include "services/memPtr.hpp"
   48.19 -#include "services/memSnapshot.hpp"
   48.20 +#include "services/mallocSiteTable.hpp"
   48.21 +#include "services/mallocTracker.hpp"
   48.22 +#include "services/nmtCommon.hpp"
   48.23 +#include "services/virtualMemoryTracker.hpp"
   48.24 +#include "utilities/linkedlist.hpp"
   48.25  
   48.26 -// compare unsigned number
   48.27 -#define UNSIGNED_COMPARE(a, b)  ((a > b) ? 1 : ((a == b) ? 0 : -1))
   48.28 +typedef LinkedListIterator<MallocSite>                   MallocSiteIterator;
   48.29 +typedef LinkedListIterator<VirtualMemoryAllocationSite>  VirtualMemorySiteIterator;
   48.30 +typedef LinkedListIterator<ReservedMemoryRegion>         VirtualMemoryAllocationIterator;
   48.31  
   48.32  /*
   48.33 - * MallocCallsitePointer and VMCallsitePointer are used
   48.34 - * to baseline memory blocks with their callsite information.
   48.35 - * They are only available when detail tracking is turned
   48.36 - * on.
   48.37 + * Baseline a memory snapshot
   48.38   */
   48.39 -
   48.40 -/* baselined malloc record aggregated by callsite */
   48.41 -class MallocCallsitePointer : public MemPointer {
   48.42 - private:
   48.43 -  size_t    _count;   // number of malloc invocation from this callsite
   48.44 -  size_t    _amount;  // total amount of memory malloc-ed from this callsite
   48.45 -
   48.46 +class MemBaseline VALUE_OBJ_CLASS_SPEC {
   48.47   public:
   48.48 -  MallocCallsitePointer() {
   48.49 -    _count = 0;
   48.50 -    _amount = 0;
   48.51 -  }
   48.52 -
   48.53 -  MallocCallsitePointer(address pc) : MemPointer(pc) {
   48.54 -    _count = 0;
   48.55 -    _amount = 0;
   48.56 -  }
   48.57 -
   48.58 -  MallocCallsitePointer& operator=(const MallocCallsitePointer& p) {
   48.59 -    MemPointer::operator=(p);
   48.60 -    _count = p.count();
   48.61 -    _amount = p.amount();
   48.62 -    return *this;
   48.63 -  }
   48.64 -
   48.65 -  inline void inc(size_t size) {
   48.66 -    _count ++;
   48.67 -    _amount += size;
   48.68 +  enum BaselineThreshold {
   48.69 +    SIZE_THRESHOLD = K        // Only allocation size over this threshold will be baselined.
   48.70    };
   48.71  
   48.72 -  inline size_t count() const {
   48.73 -    return _count;
   48.74 -  }
   48.75 +  enum BaselineType {
   48.76 +    Not_baselined,
   48.77 +    Summary_baselined,
   48.78 +    Detail_baselined
   48.79 +  };
   48.80  
   48.81 -  inline size_t amount() const {
   48.82 -    return _amount;
   48.83 -  }
   48.84 -};
   48.85 -
   48.86 -// baselined virtual memory record aggregated by callsite
   48.87 -class VMCallsitePointer : public MemPointer {
   48.88 - private:
   48.89 -  size_t     _count;              // number of invocation from this callsite
   48.90 -  size_t     _reserved_amount;    // total reserved amount
   48.91 -  size_t     _committed_amount;   // total committed amount
   48.92 -
   48.93 - public:
   48.94 -  VMCallsitePointer() {
   48.95 -    _count = 0;
   48.96 -    _reserved_amount = 0;
   48.97 -    _committed_amount = 0;
   48.98 -  }
   48.99 -
  48.100 -  VMCallsitePointer(address pc) : MemPointer(pc) {
  48.101 -    _count = 0;
  48.102 -    _reserved_amount = 0;
  48.103 -    _committed_amount = 0;
  48.104 -  }
  48.105 -
  48.106 -  VMCallsitePointer& operator=(const VMCallsitePointer& p) {
  48.107 -    MemPointer::operator=(p);
  48.108 -    _count = p.count();
  48.109 -    _reserved_amount = p.reserved_amount();
  48.110 -    _committed_amount = p.committed_amount();
  48.111 -    return *this;
  48.112 -  }
  48.113 -
  48.114 -  inline void inc(size_t reserved, size_t committed) {
  48.115 -    _count ++;
  48.116 -    _reserved_amount += reserved;
  48.117 -    _committed_amount += committed;
  48.118 -  }
  48.119 -
  48.120 -  inline size_t count() const {
  48.121 -    return _count;
  48.122 -  }
  48.123 -
  48.124 -  inline size_t reserved_amount() const {
  48.125 -    return _reserved_amount;
  48.126 -  }
  48.127 -
  48.128 -  inline size_t committed_amount() const {
  48.129 -    return _committed_amount;
  48.130 -  }
  48.131 -};
  48.132 -
  48.133 -// maps a memory type flag to readable name
  48.134 -typedef struct _memType2Name {
  48.135 -  MEMFLAGS     _flag;
  48.136 -  const char*  _name;
  48.137 -} MemType2Name;
  48.138 -
  48.139 -
  48.140 -// This class aggregates malloc'd records by memory type
  48.141 -class MallocMem VALUE_OBJ_CLASS_SPEC {
  48.142 - private:
  48.143 -  MEMFLAGS       _type;
  48.144 -
  48.145 -  size_t         _count;
  48.146 -  size_t         _amount;
  48.147 -
  48.148 - public:
  48.149 -  MallocMem() {
  48.150 -    _type = mtNone;
  48.151 -    _count = 0;
  48.152 -    _amount = 0;
  48.153 -  }
  48.154 -
  48.155 -  MallocMem(MEMFLAGS flags) {
  48.156 -    assert(HAS_VALID_MEMORY_TYPE(flags), "no type");
  48.157 -    _type = FLAGS_TO_MEMORY_TYPE(flags);
  48.158 -    _count = 0;
  48.159 -    _amount = 0;
  48.160 -  }
  48.161 -
  48.162 -  inline void set_type(MEMFLAGS flag) {
  48.163 -    _type = flag;
  48.164 -  }
  48.165 -
  48.166 -  inline void clear() {
  48.167 -    _count = 0;
  48.168 -    _amount = 0;
  48.169 -    _type = mtNone;
  48.170 -  }
  48.171 -
  48.172 -  MallocMem& operator=(const MallocMem& m) {
  48.173 -    assert(_type == m.type(), "different type");
  48.174 -    _count = m.count();
  48.175 -    _amount = m.amount();
  48.176 -    return *this;
  48.177 -  }
  48.178 -
  48.179 -  inline void inc(size_t amt) {
  48.180 -    _amount += amt;
  48.181 -    _count ++;
  48.182 -  }
  48.183 -
  48.184 -  inline void reduce(size_t amt) {
  48.185 -    assert(_amount >= amt, "Just check");
  48.186 -    _amount -= amt;
  48.187 -  }
  48.188 -
  48.189 -  inline void overwrite_counter(size_t count) {
  48.190 -    _count = count;
  48.191 -  }
  48.192 -
  48.193 -  inline MEMFLAGS type() const {
  48.194 -    return _type;
  48.195 -  }
  48.196 -
  48.197 -  inline bool is_type(MEMFLAGS flags) const {
  48.198 -    return FLAGS_TO_MEMORY_TYPE(flags) == _type;
  48.199 -  }
  48.200 -
  48.201 -  inline size_t count() const {
  48.202 -    return _count;
  48.203 -  }
  48.204 -
  48.205 -  inline size_t amount() const {
  48.206 -    return _amount;
  48.207 -  }
  48.208 -};
  48.209 -
  48.210 -// This class records live arena's memory usage
  48.211 -class ArenaMem : public MallocMem {
  48.212 - public:
  48.213 -  ArenaMem(MEMFLAGS typeflag): MallocMem(typeflag) {
  48.214 -  }
  48.215 -  ArenaMem() { }
  48.216 -};
  48.217 -
  48.218 -// This class aggregates virtual memory by its memory type
  48.219 -class VMMem VALUE_OBJ_CLASS_SPEC {
  48.220 - private:
  48.221 -  MEMFLAGS       _type;
  48.222 -
  48.223 -  size_t         _count;
  48.224 -  size_t         _reserved_amount;
  48.225 -  size_t         _committed_amount;
  48.226 -
  48.227 - public:
  48.228 -  VMMem() {
  48.229 -    _type = mtNone;
  48.230 -    _count = 0;
  48.231 -    _reserved_amount = 0;
  48.232 -    _committed_amount = 0;
  48.233 -  }
  48.234 -
  48.235 -  VMMem(MEMFLAGS flags) {
  48.236 -    assert(HAS_VALID_MEMORY_TYPE(flags), "no type");
  48.237 -    _type = FLAGS_TO_MEMORY_TYPE(flags);
  48.238 -    _count = 0;
  48.239 -    _reserved_amount = 0;
  48.240 -    _committed_amount = 0;
  48.241 -  }
  48.242 -
  48.243 -  inline void clear() {
  48.244 -    _type = mtNone;
  48.245 -    _count = 0;
  48.246 -    _reserved_amount = 0;
  48.247 -    _committed_amount = 0;
  48.248 -  }
  48.249 -
  48.250 -  inline void set_type(MEMFLAGS flags) {
  48.251 -    _type = FLAGS_TO_MEMORY_TYPE(flags);
  48.252 -  }
  48.253 -
  48.254 -  VMMem& operator=(const VMMem& m) {
  48.255 -    assert(_type == m.type(), "different type");
  48.256 -
  48.257 -    _count = m.count();
  48.258 -    _reserved_amount = m.reserved_amount();
  48.259 -    _committed_amount = m.committed_amount();
  48.260 -    return *this;
  48.261 -  }
  48.262 -
  48.263 -
  48.264 -  inline MEMFLAGS type() const {
  48.265 -    return _type;
  48.266 -  }
  48.267 -
  48.268 -  inline bool is_type(MEMFLAGS flags) const {
  48.269 -    return FLAGS_TO_MEMORY_TYPE(flags) == _type;
  48.270 -  }
  48.271 -
  48.272 -  inline void inc(size_t reserved_amt, size_t committed_amt) {
  48.273 -    _reserved_amount += reserved_amt;
  48.274 -    _committed_amount += committed_amt;
  48.275 -    _count ++;
  48.276 -  }
  48.277 -
  48.278 -  inline size_t count() const {
  48.279 -    return _count;
  48.280 -  }
  48.281 -
  48.282 -  inline size_t reserved_amount() const {
  48.283 -    return _reserved_amount;
  48.284 -  }
  48.285 -
  48.286 -  inline size_t committed_amount() const {
  48.287 -    return _committed_amount;
  48.288 -  }
  48.289 -};
  48.290 -
  48.291 -
  48.292 -
  48.293 -#define NUMBER_OF_MEMORY_TYPE    (mt_number_of_types + 1)
  48.294 -
  48.295 -class BaselineReporter;
  48.296 -class BaselineComparisonReporter;
  48.297 -
  48.298 -/*
  48.299 - * This class baselines current memory snapshot.
  48.300 - * A memory baseline summarizes memory usage by memory type,
  48.301 - * aggregates memory usage by callsites when detail tracking
  48.302 - * is on.
  48.303 - */
  48.304 -class MemBaseline VALUE_OBJ_CLASS_SPEC {
  48.305 -  friend class BaselineReporter;
  48.306 -  friend class BaselineComparisonReporter;
  48.307 +  enum SortingOrder {
  48.308 +    by_address,   // by memory address
  48.309 +    by_size,      // by memory size
  48.310 +    by_site       // by call site where the memory is allocated from
  48.311 +  };
  48.312  
  48.313   private:
  48.314 -  // overall summaries
  48.315 -  size_t        _total_malloced;
  48.316 -  size_t        _total_vm_reserved;
  48.317 -  size_t        _total_vm_committed;
  48.318 -  size_t        _number_of_classes;
  48.319 -  size_t        _number_of_threads;
  48.320 +  // All baseline data is stored in this arena
  48.321 +  Arena*                  _arena;
  48.322  
  48.323 -  // if it has properly baselined
  48.324 -  bool          _baselined;
  48.325 +  // Summary information
  48.326 +  MallocMemorySnapshot*   _malloc_memory_snapshot;
  48.327 +  VirtualMemorySnapshot*  _virtual_memory_snapshot;
  48.328  
  48.329 -  // we categorize memory into three categories within the memory type
  48.330 -  MallocMem     _malloc_data[NUMBER_OF_MEMORY_TYPE];
  48.331 -  VMMem         _vm_data[NUMBER_OF_MEMORY_TYPE];
  48.332 -  ArenaMem      _arena_data[NUMBER_OF_MEMORY_TYPE];
  48.333 +  size_t               _class_count;
  48.334  
  48.335 -  // memory records that aggregate memory usage by callsites.
  48.336 -  // only available when detail tracking is on.
  48.337 -  MemPointerArray*  _malloc_cs;
  48.338 -  MemPointerArray*  _vm_cs;
  48.339 -  // virtual memory map
  48.340 -  MemPointerArray*  _vm_map;
  48.341 +  // Allocation sites information
  48.342 +  // Malloc allocation sites
  48.343 +  LinkedListImpl<MallocSite, ResourceObj::ARENA>
  48.344 +                       _malloc_sites;
  48.345  
  48.346 - private:
  48.347 -  static MemType2Name  MemType2NameMap[NUMBER_OF_MEMORY_TYPE];
  48.348 +  // All virtual memory allocations
  48.349 +  LinkedListImpl<ReservedMemoryRegion, ResourceObj::ARENA>
  48.350 +                       _virtual_memory_allocations;
  48.351  
  48.352 - private:
  48.353 -  // should not use copy constructor
  48.354 -  MemBaseline(MemBaseline& copy) { ShouldNotReachHere(); }
  48.355 +  // Virtual memory allocations by allocation sites, always in by_address
  48.356 +  // order
  48.357 +  LinkedListImpl<VirtualMemoryAllocationSite, ResourceObj::ARENA>
  48.358 +                       _virtual_memory_sites;
  48.359  
  48.360 -  // check and block at a safepoint
  48.361 -  static inline void check_safepoint(JavaThread* thr);
  48.362 +  SortingOrder         _malloc_sites_order;
  48.363 +  SortingOrder         _virtual_memory_sites_order;
  48.364 +
  48.365 +  BaselineType         _baseline_type;
  48.366  
  48.367   public:
  48.368    // create a memory baseline
  48.369 -  MemBaseline();
  48.370 -
  48.371 -  ~MemBaseline();
  48.372 -
  48.373 -  inline bool baselined() const {
  48.374 -    return _baselined;
  48.375 +  MemBaseline():
  48.376 +    _baseline_type(Not_baselined),
  48.377 +    _class_count(0),
  48.378 +    _arena(NULL),
  48.379 +    _malloc_memory_snapshot(NULL),
  48.380 +    _virtual_memory_snapshot(NULL),
  48.381 +    _malloc_sites(NULL) {
  48.382    }
  48.383  
  48.384 -  MemBaseline& operator=(const MemBaseline& other);
  48.385 +  ~MemBaseline() {
  48.386 +    reset();
  48.387 +    if (_arena != NULL) {
  48.388 +      delete _arena;
  48.389 +    }
  48.390 +  }
  48.391 +
  48.392 +  bool baseline(bool summaryOnly = true);
  48.393 +
  48.394 +  BaselineType baseline_type() const { return _baseline_type; }
  48.395 +
  48.396 +  MallocMemorySnapshot* malloc_memory_snapshot() const {
  48.397 +    return _malloc_memory_snapshot;
  48.398 +  }
  48.399 +
  48.400 +  VirtualMemorySnapshot* virtual_memory_snapshot() const {
  48.401 +    return _virtual_memory_snapshot;
  48.402 +  }
  48.403 +
  48.404 +  MallocSiteIterator malloc_sites(SortingOrder order);
  48.405 +  VirtualMemorySiteIterator virtual_memory_sites(SortingOrder order);
  48.406 +
  48.407 +  // Virtual memory allocation iterator always returns in virtual memory
  48.408 +  // base address order.
  48.409 +  VirtualMemoryAllocationIterator virtual_memory_allocations() {
  48.410 +    assert(!_virtual_memory_allocations.is_empty(), "Not detail baseline");
  48.411 +    return VirtualMemoryAllocationIterator(_virtual_memory_allocations.head());
  48.412 +  }
  48.413 +
  48.414 +  // Total reserved memory = total malloc'd memory + total reserved virtual
  48.415 +  // memory
  48.416 +  size_t total_reserved_memory() const {
  48.417 +    assert(baseline_type() != Not_baselined, "Not yet baselined");
  48.418 +    assert(_virtual_memory_snapshot != NULL, "No virtual memory snapshot");
  48.419 +    assert(_malloc_memory_snapshot != NULL,  "No malloc memory snapshot");
  48.420 +    size_t amount = _malloc_memory_snapshot->total() +
  48.421 +           _virtual_memory_snapshot->total_reserved();
  48.422 +    return amount;
  48.423 +  }
  48.424 +
  48.425 +  // Total committed memory = total malloc'd memory + total committed
  48.426 +  // virtual memory
  48.427 +  size_t total_committed_memory() const {
  48.428 +    assert(baseline_type() != Not_baselined, "Not yet baselined");
  48.429 +    assert(_virtual_memory_snapshot != NULL,
  48.430 +      "Not a snapshot");
  48.431 +    size_t amount = _malloc_memory_snapshot->total() +
  48.432 +           _virtual_memory_snapshot->total_committed();
  48.433 +    return amount;
  48.434 +  }
  48.435 +
  48.436 +  size_t total_arena_memory() const {
  48.437 +    assert(baseline_type() != Not_baselined, "Not yet baselined");
  48.438 +    assert(_malloc_memory_snapshot != NULL, "Not yet baselined");
  48.439 +    return _malloc_memory_snapshot->total_arena();
  48.440 +  }
  48.441 +
  48.442 +  size_t malloc_tracking_overhead() const {
  48.443 +    assert(baseline_type() != Not_baselined, "Not yet baselined");
  48.444 +    return _malloc_memory_snapshot->malloc_overhead()->size();
  48.445 +  }
  48.446 +
  48.447 +  const MallocMemory* malloc_memory(MEMFLAGS flag) const {
  48.448 +    assert(_malloc_memory_snapshot != NULL, "Not a snapshot");
  48.449 +    return _malloc_memory_snapshot->by_type(flag);
  48.450 +  }
  48.451 +
  48.452 +  const VirtualMemory* virtual_memory(MEMFLAGS flag) const {
  48.453 +    assert(_virtual_memory_snapshot != NULL, "Not a snapshot");
  48.454 +    return _virtual_memory_snapshot->by_type(flag);
  48.455 +  }
  48.456 +
  48.457 +
  48.458 +  size_t class_count() const {
  48.459 +    assert(baseline_type() != Not_baselined, "Not yet baselined");
  48.460 +    return _class_count;
  48.461 +  }
  48.462 +
  48.463 +  size_t thread_count() const {
  48.464 +    assert(baseline_type() != Not_baselined, "Not yet baselined");
  48.465 +    assert(_malloc_memory_snapshot != NULL, "Baselined?");
  48.466 +    return _malloc_memory_snapshot->thread_count();
  48.467 +  }
  48.468  
  48.469    // reset the baseline for reuse
  48.470 -  void clear();
  48.471 +  void reset() {
  48.472 +    _baseline_type = Not_baselined;
  48.473 +    _malloc_memory_snapshot = NULL;
  48.474 +    _virtual_memory_snapshot = NULL;
  48.475 +    _class_count  = 0;
  48.476  
  48.477 -  // baseline the snapshot
  48.478 -  bool baseline(MemSnapshot& snapshot, bool summary_only = true);
  48.479 +    _malloc_sites = NULL;
  48.480 +    _virtual_memory_sites = NULL;
  48.481 +    _virtual_memory_allocations = NULL;
  48.482  
  48.483 -  bool baseline(const MemPointerArray* malloc_records,
  48.484 -                const MemPointerArray* vm_records,
  48.485 -                bool summary_only = true);
  48.486 -
  48.487 -  // total malloc'd memory of specified memory type
  48.488 -  inline size_t malloc_amount(MEMFLAGS flag) const {
  48.489 -    return _malloc_data[flag2index(flag)].amount();
  48.490 -  }
  48.491 -  // number of malloc'd memory blocks of specified memory type
  48.492 -  inline size_t malloc_count(MEMFLAGS flag) const {
  48.493 -    return _malloc_data[flag2index(flag)].count();
  48.494 -  }
  48.495 -  // total memory used by arenas of specified memory type
  48.496 -  inline size_t arena_amount(MEMFLAGS flag) const {
  48.497 -    return _arena_data[flag2index(flag)].amount();
  48.498 -  }
  48.499 -  // number of arenas of specified memory type
  48.500 -  inline size_t arena_count(MEMFLAGS flag) const {
  48.501 -    return _arena_data[flag2index(flag)].count();
  48.502 -  }
  48.503 -  // total reserved memory of specified memory type
  48.504 -  inline size_t reserved_amount(MEMFLAGS flag) const {
  48.505 -    return _vm_data[flag2index(flag)].reserved_amount();
  48.506 -  }
  48.507 -  // total committed memory of specified memory type
  48.508 -  inline size_t committed_amount(MEMFLAGS flag) const {
  48.509 -    return _vm_data[flag2index(flag)].committed_amount();
  48.510 -  }
  48.511 -  // total memory (malloc'd + mmap'd + arena) of specified
  48.512 -  // memory type
  48.513 -  inline size_t total_amount(MEMFLAGS flag) const {
  48.514 -    int index = flag2index(flag);
  48.515 -    return _malloc_data[index].amount() +
  48.516 -           _vm_data[index].reserved_amount() +
  48.517 -           _arena_data[index].amount();
  48.518 +    if (_arena != NULL) {
  48.519 +      _arena->destruct_contents();
  48.520 +    }
  48.521    }
  48.522  
  48.523 -  /* overall summaries */
  48.524 + private:
  48.525 +  // Baseline summary information
  48.526 +  bool baseline_summary();
  48.527  
  48.528 -  // total malloc'd memory in snapshot
  48.529 -  inline size_t total_malloc_amount() const {
  48.530 -    return _total_malloced;
  48.531 -  }
  48.532 -  // total mmap'd memory in snapshot
  48.533 -  inline size_t total_reserved_amount() const {
  48.534 -    return _total_vm_reserved;
  48.535 -  }
  48.536 -  // total committed memory in snapshot
  48.537 -  inline size_t total_committed_amount() const {
  48.538 -    return _total_vm_committed;
  48.539 -  }
  48.540 -  // number of loaded classes
  48.541 -  inline size_t number_of_classes() const {
  48.542 -    return _number_of_classes;
  48.543 -  }
  48.544 -  // number of running threads
  48.545 -  inline size_t number_of_threads() const {
  48.546 -    return _number_of_threads;
  48.547 -  }
  48.548 -  // lookup human readable name of a memory type
  48.549 -  static const char* type2name(MEMFLAGS type);
  48.550 +  // Baseline allocation sites (detail tracking only)
  48.551 +  bool baseline_allocation_sites();
  48.552  
  48.553 - private:
  48.554 -  // convert memory flag to the index to mapping table
  48.555 -  int         flag2index(MEMFLAGS flag) const;
  48.556 +  // Aggregate virtual memory allocation by allocation sites
  48.557 +  bool aggregate_virtual_memory_allocation_sites();
  48.558  
  48.559 -  // reset baseline values
  48.560 -  void reset();
  48.561 +  Arena* arena() { return _arena; }
  48.562  
  48.563 -  // summarize the records in global snapshot
  48.564 -  bool baseline_malloc_summary(const MemPointerArray* malloc_records);
  48.565 -  bool baseline_vm_summary(const MemPointerArray* vm_records);
  48.566 -  bool baseline_malloc_details(const MemPointerArray* malloc_records);
  48.567 -  bool baseline_vm_details(const MemPointerArray* vm_records);
  48.568 +  // Sorting allocation sites in different orders
  48.569 +  // Sort allocation sites in size order
  48.570 +  void malloc_sites_to_size_order();
  48.571 +  // Sort allocation sites in call site address order
  48.572 +  void malloc_sites_to_allocation_site_order();
  48.573  
  48.574 -  // print a line of malloc'd memory aggregated by callsite
  48.575 -  void print_malloc_callsite(outputStream* st, address pc, size_t size,
  48.576 -    size_t count, int diff_amt, int diff_count) const;
  48.577 -  // print a line of mmap'd memory aggregated by callsite
  48.578 -  void print_vm_callsite(outputStream* st, address pc, size_t rsz,
  48.579 -    size_t csz, int diff_rsz, int diff_csz) const;
  48.580 -
  48.581 -  // sorting functions for raw records
  48.582 -  static int malloc_sort_by_pc(const void* p1, const void* p2);
  48.583 -  static int malloc_sort_by_addr(const void* p1, const void* p2);
  48.584 -
  48.585 - private:
  48.586 -  // sorting functions for baselined records
  48.587 -  static int bl_malloc_sort_by_size(const void* p1, const void* p2);
  48.588 -  static int bl_vm_sort_by_size(const void* p1, const void* p2);
  48.589 -  static int bl_malloc_sort_by_pc(const void* p1, const void* p2);
  48.590 -  static int bl_vm_sort_by_pc(const void* p1, const void* p2);
  48.591 +  // Sort allocation sites in reserved size order
  48.592 +  void virtual_memory_sites_to_size_order();
  48.593 +  // Sort allocation sites in call site address order
  48.594 +  void virtual_memory_sites_to_reservation_site_order();
  48.595  };
  48.596  
  48.597 +#endif // INCLUDE_NMT
  48.598  
  48.599  #endif // SHARE_VM_SERVICES_MEM_BASELINE_HPP
    49.1 --- a/src/share/vm/services/memPtr.cpp	Wed Aug 27 09:36:55 2014 +0200
    49.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    49.3 @@ -1,42 +0,0 @@
    49.4 -/*
    49.5 - * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
    49.6 - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    49.7 - *
    49.8 - * This code is free software; you can redistribute it and/or modify it
    49.9 - * under the terms of the GNU General Public License version 2 only, as
   49.10 - * published by the Free Software Foundation.
   49.11 - *
   49.12 - * This code is distributed in the hope that it will be useful, but WITHOUT
   49.13 - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   49.14 - * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   49.15 - * version 2 for more details (a copy is included in the LICENSE file that
   49.16 - * accompanied this code).
   49.17 - *
   49.18 - * You should have received a copy of the GNU General Public License version
   49.19 - * 2 along with this work; if not, write to the Free Software Foundation,
   49.20 - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   49.21 - *
   49.22 - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   49.23 - * or visit www.oracle.com if you need additional information or have any
   49.24 - * questions.
   49.25 - *
   49.26 - */
   49.27 -
   49.28 -#include "precompiled.hpp"
   49.29 -#include "services/memPtr.hpp"
   49.30 -#include "services/memTracker.hpp"
   49.31 -
   49.32 -volatile jint SequenceGenerator::_seq_number = 1;
   49.33 -volatile unsigned long SequenceGenerator::_generation = 1;
   49.34 -NOT_PRODUCT(jint SequenceGenerator::_max_seq_number = 1;)
   49.35 -
   49.36 -jint SequenceGenerator::next() {
   49.37 -  jint seq = Atomic::add(1, &_seq_number);
   49.38 -  if (seq < 0) {
   49.39 -    MemTracker::shutdown(MemTracker::NMT_sequence_overflow);
   49.40 -  } else {
   49.41 -    NOT_PRODUCT(_max_seq_number = (seq > _max_seq_number) ? seq : _max_seq_number;)
   49.42 -  }
   49.43 -  return seq;
   49.44 -}
   49.45 -
    50.1 --- a/src/share/vm/services/memPtr.hpp	Wed Aug 27 09:36:55 2014 +0200
    50.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    50.3 @@ -1,510 +0,0 @@
    50.4 -/*
    50.5 - * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
    50.6 - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    50.7 - *
    50.8 - * This code is free software; you can redistribute it and/or modify it
    50.9 - * under the terms of the GNU General Public License version 2 only, as
   50.10 - * published by the Free Software Foundation.
   50.11 - *
   50.12 - * This code is distributed in the hope that it will be useful, but WITHOUT
   50.13 - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   50.14 - * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   50.15 - * version 2 for more details (a copy is included in the LICENSE file that
   50.16 - * accompanied this code).
   50.17 - *
   50.18 - * You should have received a copy of the GNU General Public License version
   50.19 - * 2 along with this work; if not, write to the Free Software Foundation,
   50.20 - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   50.21 - *
   50.22 - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   50.23 - * or visit www.oracle.com if you need additional information or have any
   50.24 - * questions.
   50.25 - *
   50.26 - */
   50.27 -
   50.28 -#ifndef SHARE_VM_SERVICES_MEM_PTR_HPP
   50.29 -#define SHARE_VM_SERVICES_MEM_PTR_HPP
   50.30 -
   50.31 -#include "memory/allocation.hpp"
   50.32 -#include "runtime/atomic.hpp"
   50.33 -#include "runtime/os.hpp"
   50.34 -#include "runtime/safepoint.hpp"
   50.35 -
   50.36 -/*
   50.37 - * global sequence generator that generates sequence numbers to serialize
   50.38 - * memory records.
   50.39 - */
   50.40 -class SequenceGenerator : AllStatic {
   50.41 - public:
   50.42 -  static jint next();
   50.43 -
   50.44 -  // peek last sequence number
   50.45 -  static jint peek() {
   50.46 -    return _seq_number;
   50.47 -  }
   50.48 -
   50.49 -  // reset sequence number
   50.50 -  static void reset() {
   50.51 -    assert(SafepointSynchronize::is_at_safepoint(), "Safepoint required");
   50.52 -    _seq_number = 1;
   50.53 -    _generation ++;
   50.54 -  };
   50.55 -
   50.56 -  static unsigned long current_generation() { return _generation; }
   50.57 -  NOT_PRODUCT(static jint max_seq_num() { return _max_seq_number; })
   50.58 -
   50.59 - private:
   50.60 -  static volatile jint             _seq_number;
   50.61 -  static volatile unsigned long    _generation;
   50.62 -  NOT_PRODUCT(static jint          _max_seq_number; )
   50.63 -};
   50.64 -
   50.65 -/*
   50.66 - * followings are the classes that are used to hold memory activity records in different stages.
   50.67 - *   MemPointer
   50.68 - *     |--------MemPointerRecord
   50.69 - *                     |
   50.70 - *                     |----MemPointerRecordEx
   50.71 - *                     |           |
   50.72 - *                     |           |-------SeqMemPointerRecordEx
   50.73 - *                     |
   50.74 - *                     |----SeqMemPointerRecord
   50.75 - *                     |
   50.76 - *                     |----VMMemRegion
   50.77 - *                               |
   50.78 - *                               |-----VMMemRegionEx
   50.79 - *
   50.80 - *
   50.81 - *  prefix 'Seq' - sequenced, the record contains a sequence number
   50.82 - *  surfix 'Ex'  - extension, the record contains a caller's pc
   50.83 - *
   50.84 - *  per-thread recorder : SeqMemPointerRecord(Ex)
   50.85 - *  snapshot staging    : SeqMemPointerRecord(Ex)
   50.86 - *  snapshot            : MemPointerRecord(Ex) and VMMemRegion(Ex)
   50.87 - *
   50.88 - */
   50.89 -
   50.90 -/*
   50.91 - * class that wraps an address to a memory block,
   50.92 - * the memory pointer either points to a malloc'd
   50.93 - * memory block, or a mmap'd memory block
   50.94 - */
   50.95 -class MemPointer VALUE_OBJ_CLASS_SPEC {
   50.96 - public:
   50.97 -  MemPointer(): _addr(0) { }
   50.98 -  MemPointer(address addr): _addr(addr) { }
   50.99 -
  50.100 -  MemPointer(const MemPointer& copy_from) {
  50.101 -    _addr = copy_from.addr();
  50.102 -  }
  50.103 -
  50.104 -  inline address addr() const {
  50.105 -    return _addr;
  50.106 -  }
  50.107 -
  50.108 -  inline operator address() const {
  50.109 -    return addr();
  50.110 -  }
  50.111 -
  50.112 -  inline bool operator == (const MemPointer& other) const {
  50.113 -    return addr() == other.addr();
  50.114 -  }
  50.115 -
  50.116 -  inline MemPointer& operator = (const MemPointer& other) {
  50.117 -    _addr = other.addr();
  50.118 -    return *this;
  50.119 -  }
  50.120 -
  50.121 - protected:
  50.122 -  inline void set_addr(address addr) { _addr = addr; }
  50.123 -
  50.124 - protected:
  50.125 -  // memory address
  50.126 -  address    _addr;
  50.127 -};
  50.128 -
  50.129 -/* MemPointerRecord records an activityand associated
  50.130 - * attributes on a memory block.
  50.131 - */
  50.132 -class MemPointerRecord : public MemPointer {
  50.133 - private:
  50.134 -  MEMFLAGS       _flags;
  50.135 -  size_t         _size;
  50.136 -
  50.137 -public:
  50.138 -  /* extension of MemoryType enum
  50.139 -   * see share/vm/memory/allocation.hpp for details.
  50.140 -   *
  50.141 -   * The tag values are associated to sorting orders, so be
  50.142 -   * careful if changes are needed.
  50.143 -   * The allocation records should be sorted ahead of tagging
  50.144 -   * records, which in turn ahead of deallocation records
  50.145 -   */
  50.146 -  enum MemPointerTags {
  50.147 -    tag_alloc            = 0x0001, // malloc or reserve record
  50.148 -    tag_commit           = 0x0002, // commit record
  50.149 -    tag_type             = 0x0003, // tag virtual memory to a memory type
  50.150 -    tag_uncommit         = 0x0004, // uncommit record
  50.151 -    tag_release          = 0x0005, // free or release record
  50.152 -    tag_size             = 0x0006, // arena size
  50.153 -    tag_masks            = 0x0007, // all tag bits
  50.154 -    vmBit                = 0x0008
  50.155 -  };
  50.156 -
  50.157 -  /* helper functions to interpret the tagging flags */
  50.158 -
  50.159 -  inline static bool is_allocation_record(MEMFLAGS flags) {
  50.160 -    return (flags & tag_masks) == tag_alloc;
  50.161 -  }
  50.162 -
  50.163 -  inline static bool is_deallocation_record(MEMFLAGS flags) {
  50.164 -    return (flags & tag_masks) == tag_release;
  50.165 -  }
  50.166 -
  50.167 -  inline static bool is_arena_record(MEMFLAGS flags) {
  50.168 -    return (flags & (otArena | tag_size)) == otArena;
  50.169 -  }
  50.170 -
  50.171 -  inline static bool is_arena_memory_record(MEMFLAGS flags) {
  50.172 -    return (flags & (otArena | tag_size)) == (otArena | tag_size);
  50.173 -  }
  50.174 -
  50.175 -  inline static bool is_virtual_memory_record(MEMFLAGS flags) {
  50.176 -    return (flags & vmBit) != 0;
  50.177 -  }
  50.178 -
  50.179 -  inline static bool is_virtual_memory_reserve_record(MEMFLAGS flags) {
  50.180 -    return (flags & 0x0F) == (tag_alloc | vmBit);
  50.181 -  }
  50.182 -
  50.183 -  inline static bool is_virtual_memory_commit_record(MEMFLAGS flags) {
  50.184 -    return (flags & 0x0F) == (tag_commit | vmBit);
  50.185 -  }
  50.186 -
  50.187 -  inline static bool is_virtual_memory_uncommit_record(MEMFLAGS flags) {
  50.188 -    return (flags & 0x0F) == (tag_uncommit | vmBit);
  50.189 -  }
  50.190 -
  50.191 -  inline static bool is_virtual_memory_release_record(MEMFLAGS flags) {
  50.192 -    return (flags & 0x0F) == (tag_release | vmBit);
  50.193 -  }
  50.194 -
  50.195 -  inline static bool is_virtual_memory_type_record(MEMFLAGS flags) {
  50.196 -    return (flags & 0x0F) == (tag_type | vmBit);
  50.197 -  }
  50.198 -
  50.199 -  /* tagging flags */
  50.200 -  inline static MEMFLAGS malloc_tag()                 { return tag_alloc;   }
  50.201 -  inline static MEMFLAGS free_tag()                   { return tag_release; }
  50.202 -  inline static MEMFLAGS arena_size_tag()             { return tag_size | otArena; }
  50.203 -  inline static MEMFLAGS virtual_memory_tag()         { return vmBit; }
  50.204 -  inline static MEMFLAGS virtual_memory_reserve_tag() { return (tag_alloc | vmBit); }
  50.205 -  inline static MEMFLAGS virtual_memory_commit_tag()  { return (tag_commit | vmBit); }
  50.206 -  inline static MEMFLAGS virtual_memory_uncommit_tag(){ return (tag_uncommit | vmBit); }
  50.207 -  inline static MEMFLAGS virtual_memory_release_tag() { return (tag_release | vmBit); }
  50.208 -  inline static MEMFLAGS virtual_memory_type_tag()    { return (tag_type | vmBit); }
  50.209 -
  50.210 - public:
  50.211 -  MemPointerRecord(): _size(0), _flags(mtNone) { }
  50.212 -
  50.213 -  MemPointerRecord(address addr, MEMFLAGS memflags, size_t size = 0):
  50.214 -      MemPointer(addr), _flags(memflags), _size(size) { }
  50.215 -
  50.216 -  MemPointerRecord(const MemPointerRecord& copy_from):
  50.217 -    MemPointer(copy_from), _flags(copy_from.flags()),
  50.218 -    _size(copy_from.size()) {
  50.219 -  }
  50.220 -
  50.221 -  /* MemPointerRecord is not sequenced, it always return
  50.222 -   * 0 to indicate non-sequenced
  50.223 -   */
  50.224 -  virtual jint seq() const               { return 0; }
  50.225 -
  50.226 -  inline size_t   size()  const          { return _size; }
  50.227 -  inline void set_size(size_t size)      { _size = size; }
  50.228 -
  50.229 -  inline MEMFLAGS flags() const          { return _flags; }
  50.230 -  inline void set_flags(MEMFLAGS flags)  { _flags = flags; }
  50.231 -
  50.232 -  MemPointerRecord& operator= (const MemPointerRecord& ptr) {
  50.233 -    MemPointer::operator=(ptr);
  50.234 -    _flags = ptr.flags();
  50.235 -#ifdef ASSERT
  50.236 -    if (IS_ARENA_OBJ(_flags)) {
  50.237 -      assert(!is_vm_pointer(), "wrong flags");
  50.238 -      assert((_flags & ot_masks) == otArena, "wrong flags");
  50.239 -    }
  50.240 -#endif
  50.241 -    _size = ptr.size();
  50.242 -    return *this;
  50.243 -  }
  50.244 -
  50.245 -  // if the pointer represents a malloc-ed memory address
  50.246 -  inline bool is_malloced_pointer() const {
  50.247 -    return !is_vm_pointer();
  50.248 -  }
  50.249 -
  50.250 -  // if the pointer represents a virtual memory address
  50.251 -  inline bool is_vm_pointer() const {
  50.252 -    return is_virtual_memory_record(_flags);
  50.253 -  }
  50.254 -
  50.255 -  // if this record records a 'malloc' or virtual memory
  50.256 -  // 'reserve' call
  50.257 -  inline bool is_allocation_record() const {
  50.258 -    return is_allocation_record(_flags);
  50.259 -  }
  50.260 -
  50.261 -  // if this record records a size information of an arena
  50.262 -  inline bool is_arena_memory_record() const {
  50.263 -    return is_arena_memory_record(_flags);
  50.264 -  }
  50.265 -
  50.266 -  // if this pointer represents an address to an arena object
  50.267 -  inline bool is_arena_record() const {
  50.268 -    return is_arena_record(_flags);
  50.269 -  }
  50.270 -
  50.271 -  // if this record represents a size information of specific arena
  50.272 -  inline bool is_memory_record_of_arena(const MemPointerRecord* arena_rc) {
  50.273 -    assert(is_arena_memory_record(), "not size record");
  50.274 -    assert(arena_rc->is_arena_record(), "not arena record");
  50.275 -    return (arena_rc->addr() + sizeof(void*)) == addr();
  50.276 -  }
  50.277 -
  50.278 -  // if this record records a 'free' or virtual memory 'free' call
  50.279 -  inline bool is_deallocation_record() const {
  50.280 -    return is_deallocation_record(_flags);
  50.281 -  }
  50.282 -
  50.283 -  // if this record records a virtual memory 'commit' call
  50.284 -  inline bool is_commit_record() const {
  50.285 -    return is_virtual_memory_commit_record(_flags);
  50.286 -  }
  50.287 -
  50.288 -  // if this record records a virtual memory 'uncommit' call
  50.289 -  inline bool is_uncommit_record() const {
  50.290 -    return is_virtual_memory_uncommit_record(_flags);
  50.291 -  }
  50.292 -
  50.293 -  // if this record is a tagging record of a virtual memory block
  50.294 -  inline bool is_type_tagging_record() const {
  50.295 -    return is_virtual_memory_type_record(_flags);
  50.296 -  }
  50.297 -
  50.298 -  // if the two memory pointer records actually represent the same
  50.299 -  // memory block
  50.300 -  inline bool is_same_region(const MemPointerRecord* other) const {
  50.301 -    return (addr() == other->addr() && size() == other->size());
  50.302 -  }
  50.303 -
  50.304 -  // if this memory region fully contains another one
  50.305 -  inline bool contains_region(const MemPointerRecord* other) const {
  50.306 -    return contains_region(other->addr(), other->size());
  50.307 -  }
  50.308 -
  50.309 -  // if this memory region fully contains specified memory range
  50.310 -  inline bool contains_region(address add, size_t sz) const {
  50.311 -    return (addr() <= add && addr() + size() >= add + sz);
  50.312 -  }
  50.313 -
  50.314 -  inline bool contains_address(address add) const {
  50.315 -    return (addr() <= add && addr() + size() > add);
  50.316 -  }
  50.317 -
  50.318 -  // if this memory region overlaps another region
  50.319 -  inline bool overlaps_region(const MemPointerRecord* other) const {
  50.320 -    assert(other != NULL, "Just check");
  50.321 -    assert(size() > 0 && other->size() > 0, "empty range");
  50.322 -    return contains_address(other->addr()) ||
  50.323 -           contains_address(other->addr() + other->size() - 1) || // exclude end address
  50.324 -           other->contains_address(addr()) ||
  50.325 -           other->contains_address(addr() + size() - 1); // exclude end address
  50.326 -  }
  50.327 -
  50.328 -};
  50.329 -
  50.330 -// MemPointerRecordEx also records callsite pc, from where
  50.331 -// the memory block is allocated
  50.332 -class MemPointerRecordEx : public MemPointerRecord {
  50.333 - private:
  50.334 -  address      _pc;  // callsite pc
  50.335 -
  50.336 - public:
  50.337 -  MemPointerRecordEx(): _pc(0) { }
  50.338 -
  50.339 -  MemPointerRecordEx(address addr, MEMFLAGS memflags, size_t size = 0, address pc = 0):
  50.340 -    MemPointerRecord(addr, memflags, size), _pc(pc) {}
  50.341 -
  50.342 -  MemPointerRecordEx(const MemPointerRecordEx& copy_from):
  50.343 -    MemPointerRecord(copy_from), _pc(copy_from.pc()) {}
  50.344 -
  50.345 -  inline address pc() const { return _pc; }
  50.346 -
  50.347 -  void init(const MemPointerRecordEx* mpe) {
  50.348 -    MemPointerRecord::operator=(*mpe);
  50.349 -    _pc = mpe->pc();
  50.350 -  }
  50.351 -
  50.352 -  void init(const MemPointerRecord* mp) {
  50.353 -    MemPointerRecord::operator=(*mp);
  50.354 -    _pc = 0;
  50.355 -  }
  50.356 -};
  50.357 -
  50.358 -// a virtual memory region. The region can represent a reserved
  50.359 -// virtual memory region or a committed memory region
  50.360 -class VMMemRegion : public MemPointerRecord {
  50.361 -public:
  50.362 -  VMMemRegion() { }
  50.363 -
  50.364 -  void init(const MemPointerRecord* mp) {
  50.365 -    assert(mp->is_vm_pointer(), "Sanity check");
  50.366 -    _addr = mp->addr();
  50.367 -      set_size(mp->size());
  50.368 -    set_flags(mp->flags());
  50.369 -  }
  50.370 -
  50.371 -  VMMemRegion& operator=(const VMMemRegion& other) {
  50.372 -    MemPointerRecord::operator=(other);
  50.373 -    return *this;
  50.374 -  }
  50.375 -
  50.376 -  inline bool is_reserved_region() const {
  50.377 -    return is_allocation_record();
  50.378 -  }
  50.379 -
  50.380 -  inline bool is_committed_region() const {
  50.381 -    return is_commit_record();
  50.382 -  }
  50.383 -
  50.384 -  /* base address of this virtual memory range */
  50.385 -  inline address base() const {
  50.386 -    return addr();
  50.387 -  }
  50.388 -
  50.389 -  /* tag this virtual memory range to the specified memory type */
  50.390 -  inline void tag(MEMFLAGS f) {
  50.391 -    set_flags(flags() | (f & mt_masks));
  50.392 -  }
  50.393 -
  50.394 -  // expand this region to also cover specified range.
  50.395 -  // The range has to be on either end of the memory region.
  50.396 -  void expand_region(address addr, size_t sz) {
  50.397 -    if (addr < base()) {
  50.398 -      assert(addr + sz == base(), "Sanity check");
  50.399 -      _addr = addr;
  50.400 -      set_size(size() + sz);
  50.401 -    } else {
  50.402 -      assert(base() + size() == addr, "Sanity check");
  50.403 -      set_size(size() + sz);
  50.404 -    }
  50.405 -  }
  50.406 -
  50.407 -  // exclude the specified address range from this region.
  50.408 -  // The excluded memory range has to be on either end of this memory
  50.409 -  // region.
  50.410 -  inline void exclude_region(address add, size_t sz) {
  50.411 -    assert(is_reserved_region() || is_committed_region(), "Sanity check");
  50.412 -    assert(addr() != NULL && size() != 0, "Sanity check");
  50.413 -    assert(add >= addr() && add < addr() + size(), "Sanity check");
  50.414 -    assert(add == addr() || (add + sz) == (addr() + size()),
  50.415 -      "exclude in the middle");
  50.416 -    if (add == addr()) {
  50.417 -      set_addr(add + sz);
  50.418 -      set_size(size() - sz);
  50.419 -    } else {
  50.420 -      set_size(size() - sz);
  50.421 -    }
  50.422 -  }
  50.423 -};
  50.424 -
  50.425 -class VMMemRegionEx : public VMMemRegion {
  50.426 - private:
  50.427 -  jint   _seq;  // sequence number
  50.428 -
  50.429 - public:
  50.430 -  VMMemRegionEx(): _pc(0) { }
  50.431 -
  50.432 -  void init(const MemPointerRecordEx* mpe) {
  50.433 -    VMMemRegion::init(mpe);
  50.434 -    _pc = mpe->pc();
  50.435 -  }
  50.436 -
  50.437 -  void init(const MemPointerRecord* mpe) {
  50.438 -    VMMemRegion::init(mpe);
  50.439 -    _pc = 0;
  50.440 -  }
  50.441 -
  50.442 -  VMMemRegionEx& operator=(const VMMemRegionEx& other) {
  50.443 -    VMMemRegion::operator=(other);
  50.444 -    _pc = other.pc();
  50.445 -    return *this;
  50.446 -  }
  50.447 -
  50.448 -  inline address pc() const { return _pc; }
  50.449 - private:
  50.450 -  address   _pc;
  50.451 -};
  50.452 -
  50.453 -/*
  50.454 - * Sequenced memory record
  50.455 - */
  50.456 -class SeqMemPointerRecord : public MemPointerRecord {
  50.457 - private:
  50.458 -   jint _seq;  // sequence number
  50.459 -
  50.460 - public:
  50.461 -  SeqMemPointerRecord(): _seq(0){ }
  50.462 -
  50.463 -  SeqMemPointerRecord(address addr, MEMFLAGS flags, size_t size, jint seq)
  50.464 -    : MemPointerRecord(addr, flags, size), _seq(seq)  {
  50.465 -  }
  50.466 -
  50.467 -  SeqMemPointerRecord(const SeqMemPointerRecord& copy_from)
  50.468 -    : MemPointerRecord(copy_from) {
  50.469 -    _seq = copy_from.seq();
  50.470 -  }
  50.471 -
  50.472 -  SeqMemPointerRecord& operator= (const SeqMemPointerRecord& ptr) {
  50.473 -    MemPointerRecord::operator=(ptr);
  50.474 -    _seq = ptr.seq();
  50.475 -    return *this;
  50.476 -  }
  50.477 -
  50.478 -  inline jint seq() const {
  50.479 -    return _seq;
  50.480 -  }
  50.481 -};
  50.482 -
  50.483 -
  50.484 -
  50.485 -class SeqMemPointerRecordEx : public MemPointerRecordEx {
  50.486 - private:
  50.487 -  jint    _seq;  // sequence number
  50.488 -
  50.489 - public:
  50.490 -  SeqMemPointerRecordEx(): _seq(0) { }
  50.491 -
  50.492 -  SeqMemPointerRecordEx(address addr, MEMFLAGS flags, size_t size,
  50.493 -    jint seq, address pc):
  50.494 -    MemPointerRecordEx(addr, flags, size, pc), _seq(seq)  {
  50.495 -  }
  50.496 -
  50.497 -  SeqMemPointerRecordEx(const SeqMemPointerRecordEx& copy_from)
  50.498 -    : MemPointerRecordEx(copy_from) {
  50.499 -    _seq = copy_from.seq();
  50.500 -  }
  50.501 -
  50.502 -  SeqMemPointerRecordEx& operator= (const SeqMemPointerRecordEx& ptr) {
  50.503 -    MemPointerRecordEx::operator=(ptr);
  50.504 -    _seq = ptr.seq();
  50.505 -    return *this;
  50.506 -  }
  50.507 -
  50.508 -  inline jint seq() const {
  50.509 -    return _seq;
  50.510 -  }
  50.511 -};
  50.512 -
  50.513 -#endif // SHARE_VM_SERVICES_MEM_PTR_HPP
    51.1 --- a/src/share/vm/services/memPtrArray.hpp	Wed Aug 27 09:36:55 2014 +0200
    51.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    51.3 @@ -1,306 +0,0 @@
    51.4 -/*
    51.5 - * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
    51.6 - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    51.7 - *
    51.8 - * This code is free software; you can redistribute it and/or modify it
    51.9 - * under the terms of the GNU General Public License version 2 only, as
   51.10 - * published by the Free Software Foundation.
   51.11 - *
   51.12 - * This code is distributed in the hope that it will be useful, but WITHOUT
   51.13 - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   51.14 - * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   51.15 - * version 2 for more details (a copy is included in the LICENSE file that
   51.16 - * accompanied this code).
   51.17 - *
   51.18 - * You should have received a copy of the GNU General Public License version
   51.19 - * 2 along with this work; if not, write to the Free Software Foundation,
   51.20 - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   51.21 - *
   51.22 - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   51.23 - * or visit www.oracle.com if you need additional information or have any
   51.24 - * questions.
   51.25 - *
   51.26 - */
   51.27 -#ifndef SHARE_VM_UTILITIES_MEM_PTR_ARRAY_HPP
   51.28 -#define SHARE_VM_UTILITIES_MEM_PTR_ARRAY_HPP
   51.29 -
   51.30 -#include "memory/allocation.hpp"
   51.31 -#include "services/memPtr.hpp"
   51.32 -
   51.33 -class MemPtr;
   51.34 -class MemRecorder;
   51.35 -class ArenaInfo;
   51.36 -class MemSnapshot;
   51.37 -
   51.38 -extern "C" {
   51.39 -  typedef int (*FN_SORT)(const void *, const void *);
   51.40 -}
   51.41 -
   51.42 -
   51.43 -// Memory pointer array interface. This array is used by NMT to hold
   51.44 -// various memory block information.
   51.45 -// The memory pointer arrays are usually walked with their iterators.
   51.46 -
   51.47 -class MemPointerArray : public CHeapObj<mtNMT> {
   51.48 - public:
   51.49 -  virtual ~MemPointerArray() { }
   51.50 -
   51.51 -  // return true if it can not allocate storage for the data
   51.52 -  virtual bool out_of_memory() const = 0;
   51.53 -  virtual bool is_empty() const = 0;
   51.54 -  virtual bool is_full() = 0;
   51.55 -  virtual int  length() const = 0;
   51.56 -  virtual void clear() = 0;
   51.57 -  virtual bool append(MemPointer* ptr) = 0;
   51.58 -  virtual bool insert_at(MemPointer* ptr, int pos) = 0;
   51.59 -  virtual bool remove_at(int pos) = 0;
   51.60 -  virtual MemPointer* at(int index) const = 0;
   51.61 -  virtual void sort(FN_SORT fn) = 0;
   51.62 -  virtual size_t instance_size() const = 0;
   51.63 -  virtual bool shrink() = 0;
   51.64 -
   51.65 -  NOT_PRODUCT(virtual int capacity() const = 0;)
   51.66 -};
   51.67 -
   51.68 -// Iterator interface
   51.69 -class MemPointerArrayIterator VALUE_OBJ_CLASS_SPEC {
   51.70 - public:
   51.71 -  // return the pointer at current position
   51.72 -  virtual MemPointer* current() const = 0;
   51.73 -  // return the next pointer and advance current position
   51.74 -  virtual MemPointer* next() = 0;
   51.75 -  // return next pointer without advancing current position
   51.76 -  virtual MemPointer* peek_next() const = 0;
   51.77 -  // return previous pointer without changing current position
   51.78 -  virtual MemPointer* peek_prev() const = 0;
   51.79 -  // remove the pointer at current position
   51.80 -  virtual void        remove() = 0;
   51.81 -  // insert the pointer at current position
   51.82 -  virtual bool        insert(MemPointer* ptr) = 0;
   51.83 -  // insert specified element after current position and
   51.84 -  // move current position to newly inserted position
   51.85 -  virtual bool        insert_after(MemPointer* ptr) = 0;
   51.86 -};
   51.87 -
   51.88 -// implementation class
   51.89 -class MemPointerArrayIteratorImpl : public MemPointerArrayIterator {
   51.90 - protected:
   51.91 -  MemPointerArray*  _array;
   51.92 -  int               _pos;
   51.93 -
   51.94 - public:
   51.95 -  MemPointerArrayIteratorImpl(MemPointerArray* arr) {
   51.96 -    assert(arr != NULL, "Parameter check");
   51.97 -    _array = arr;
   51.98 -    _pos = 0;
   51.99 -  }
  51.100 -
  51.101 -  virtual MemPointer* current() const {
  51.102 -    if (_pos < _array->length()) {
  51.103 -      return _array->at(_pos);
  51.104 -    }
  51.105 -    return NULL;
  51.106 -  }
  51.107 -
  51.108 -  virtual MemPointer* next() {
  51.109 -    if (_pos + 1 < _array->length()) {
  51.110 -      return _array->at(++_pos);
  51.111 -    }
  51.112 -    _pos = _array->length();
  51.113 -    return NULL;
  51.114 -  }
  51.115 -
  51.116 -  virtual MemPointer* peek_next() const {
  51.117 -    if (_pos + 1 < _array->length()) {
  51.118 -      return _array->at(_pos + 1);
  51.119 -    }
  51.120 -    return NULL;
  51.121 -  }
  51.122 -
  51.123 -  virtual MemPointer* peek_prev() const {
  51.124 -    if (_pos > 0) {
  51.125 -      return _array->at(_pos - 1);
  51.126 -    }
  51.127 -    return NULL;
  51.128 -  }
  51.129 -
  51.130 -  virtual void remove() {
  51.131 -    if (_pos < _array->length()) {
  51.132 -      _array->remove_at(_pos);
  51.133 -    }
  51.134 -  }
  51.135 -
  51.136 -  virtual bool insert(MemPointer* ptr) {
  51.137 -    return _array->insert_at(ptr, _pos);
  51.138 -  }
  51.139 -
  51.140 -  virtual bool insert_after(MemPointer* ptr) {
  51.141 -    if (_array->insert_at(ptr, _pos + 1)) {
  51.142 -      _pos ++;
  51.143 -      return true;
  51.144 -    }
  51.145 -    return false;
  51.146 -  }
  51.147 -};
  51.148 -
  51.149 -
  51.150 -
  51.151 -// Memory pointer array implementation.
  51.152 -// This implementation implements expandable array
  51.153 -#define DEFAULT_PTR_ARRAY_SIZE 1024
  51.154 -
  51.155 -template <class E> class MemPointerArrayImpl : public MemPointerArray {
  51.156 - private:
  51.157 -  int                   _max_size;
  51.158 -  int                   _size;
  51.159 -  bool                  _init_elements;
  51.160 -  E*                    _data;
  51.161 -
  51.162 - public:
  51.163 -  MemPointerArrayImpl(int initial_size = DEFAULT_PTR_ARRAY_SIZE, bool init_elements = true):
  51.164 -   _max_size(initial_size), _size(0), _init_elements(init_elements) {
  51.165 -    _data = (E*)raw_allocate(sizeof(E), initial_size);
  51.166 -    if (_init_elements) {
  51.167 -      for (int index = 0; index < _max_size; index ++) {
  51.168 -        ::new ((void*)&_data[index]) E();
  51.169 -      }
  51.170 -    }
  51.171 -  }
  51.172 -
  51.173 -  virtual ~MemPointerArrayImpl() {
  51.174 -    if (_data != NULL) {
  51.175 -      raw_free(_data);
  51.176 -    }
  51.177 -  }
  51.178 -
  51.179 - public:
  51.180 -  bool out_of_memory() const {
  51.181 -    return (_data == NULL);
  51.182 -  }
  51.183 -
  51.184 -  size_t instance_size() const {
  51.185 -    return sizeof(MemPointerArrayImpl<E>) + _max_size * sizeof(E);
  51.186 -  }
  51.187 -
  51.188 -  bool is_empty() const {
  51.189 -    assert(_data != NULL, "Just check");
  51.190 -    return _size == 0;
  51.191 -  }
  51.192 -
  51.193 -  bool is_full() {
  51.194 -    assert(_data != NULL, "Just check");
  51.195 -    if (_size < _max_size) {
  51.196 -      return false;
  51.197 -    } else {
  51.198 -      return !expand_array();
  51.199 -    }
  51.200 -  }
  51.201 -
  51.202 -  int length() const {
  51.203 -    assert(_data != NULL, "Just check");
  51.204 -    return _size;
  51.205 -  }
  51.206 -
  51.207 -  NOT_PRODUCT(int capacity() const { return _max_size; })
  51.208 -
  51.209 -  void clear() {
  51.210 -    assert(_data != NULL, "Just check");
  51.211 -    _size = 0;
  51.212 -  }
  51.213 -
  51.214 -  bool append(MemPointer* ptr) {
  51.215 -    assert(_data != NULL, "Just check");
  51.216 -    if (is_full()) {
  51.217 -      return false;
  51.218 -    }
  51.219 -    _data[_size ++] = *(E*)ptr;
  51.220 -    return true;
  51.221 -  }
  51.222 -
  51.223 -  bool insert_at(MemPointer* ptr, int pos) {
  51.224 -    assert(_data != NULL, "Just check");
  51.225 -    if (is_full()) {
  51.226 -      return false;
  51.227 -    }
  51.228 -    for (int index = _size; index > pos; index --) {
  51.229 -      _data[index] = _data[index - 1];
  51.230 -    }
  51.231 -    _data[pos] = *(E*)ptr;
  51.232 -    _size ++;
  51.233 -    return true;
  51.234 -  }
  51.235 -
  51.236 -  bool remove_at(int pos) {
  51.237 -    assert(_data != NULL, "Just check");
  51.238 -    if (_size <= pos && pos >= 0) {
  51.239 -      return false;
  51.240 -    }
  51.241 -    -- _size;
  51.242 -
  51.243 -    for (int index = pos; index < _size; index ++) {
  51.244 -      _data[index] = _data[index + 1];
  51.245 -    }
  51.246 -    return true;
  51.247 -  }
  51.248 -
  51.249 -  MemPointer* at(int index) const {
  51.250 -    assert(_data != NULL, "Just check");
  51.251 -    assert(index >= 0 && index < _size, "illegal index");
  51.252 -    return &_data[index];
  51.253 -  }
  51.254 -
  51.255 -  bool shrink() {
  51.256 -    float used = ((float)_size) / ((float)_max_size);
  51.257 -    if (used < 0.40) {
  51.258 -      E* old_ptr = _data;
  51.259 -      int new_size = ((_max_size) / (2 * DEFAULT_PTR_ARRAY_SIZE) + 1) * DEFAULT_PTR_ARRAY_SIZE;
  51.260 -      _data = (E*)raw_reallocate(_data, sizeof(E), new_size);
  51.261 -      if (_data == NULL) {
  51.262 -        _data = old_ptr;
  51.263 -        return false;
  51.264 -      } else {
  51.265 -        _max_size = new_size;
  51.266 -        return true;
  51.267 -      }
  51.268 -    }
  51.269 -    return false;
  51.270 -  }
  51.271 -
  51.272 -  void sort(FN_SORT fn) {
  51.273 -    assert(_data != NULL, "Just check");
  51.274 -    qsort((void*)_data, _size, sizeof(E), fn);
  51.275 -  }
  51.276 -
  51.277 - private:
  51.278 -  bool  expand_array() {
  51.279 -    assert(_data != NULL, "Not yet allocated");
  51.280 -    E* old_ptr = _data;
  51.281 -    if ((_data = (E*)raw_reallocate((void*)_data, sizeof(E),
  51.282 -      _max_size + DEFAULT_PTR_ARRAY_SIZE)) == NULL) {
  51.283 -      _data = old_ptr;
  51.284 -      return false;
  51.285 -    } else {
  51.286 -      _max_size += DEFAULT_PTR_ARRAY_SIZE;
  51.287 -      if (_init_elements) {
  51.288 -        for (int index = _size; index < _max_size; index ++) {
  51.289 -          ::new ((void*)&_data[index]) E();
  51.290 -        }
  51.291 -      }
  51.292 -      return true;
  51.293 -    }
  51.294 -  }
  51.295 -
  51.296 -  void* raw_allocate(size_t elementSize, int items) {
  51.297 -    return os::malloc(elementSize * items, mtNMT);
  51.298 -  }
  51.299 -
  51.300 -  void* raw_reallocate(void* ptr, size_t elementSize, int items) {
  51.301 -    return os::realloc(ptr, elementSize * items, mtNMT);
  51.302 -  }
  51.303 -
  51.304 -  void  raw_free(void* ptr) {
  51.305 -    os::free(ptr, mtNMT);
  51.306 -  }
  51.307 -};
  51.308 -
  51.309 -#endif // SHARE_VM_UTILITIES_MEM_PTR_ARRAY_HPP
    52.1 --- a/src/share/vm/services/memRecorder.cpp	Wed Aug 27 09:36:55 2014 +0200
    52.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    52.3 @@ -1,171 +0,0 @@
    52.4 -/*
    52.5 - * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
    52.6 - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    52.7 - *
    52.8 - * This code is free software; you can redistribute it and/or modify it
    52.9 - * under the terms of the GNU General Public License version 2 only, as
   52.10 - * published by the Free Software Foundation.
   52.11 - *
   52.12 - * This code is distributed in the hope that it will be useful, but WITHOUT
   52.13 - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   52.14 - * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   52.15 - * version 2 for more details (a copy is included in the LICENSE file that
   52.16 - * accompanied this code).
   52.17 - *
   52.18 - * You should have received a copy of the GNU General Public License version
   52.19 - * 2 along with this work; if not, write to the Free Software Foundation,
   52.20 - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   52.21 - *
   52.22 - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   52.23 - * or visit www.oracle.com if you need additional information or have any
   52.24 - * questions.
   52.25 - *
   52.26 - */
   52.27 -
   52.28 -#include "precompiled.hpp"
   52.29 -
   52.30 -#include "runtime/atomic.hpp"
   52.31 -#include "services/memBaseline.hpp"
   52.32 -#include "services/memRecorder.hpp"
   52.33 -#include "services/memPtr.hpp"
   52.34 -#include "services/memTracker.hpp"
   52.35 -
   52.36 -MemPointer* SequencedRecordIterator::next_record() {
   52.37 -  MemPointerRecord* itr_cur = (MemPointerRecord*)_itr.current();
   52.38 -  if (itr_cur == NULL)  {
   52.39 -    return itr_cur;
   52.40 -  }
   52.41 -
   52.42 -  MemPointerRecord* itr_next = (MemPointerRecord*)_itr.next();
   52.43 -
   52.44 -  // don't collapse virtual memory records
   52.45 -  while (itr_next != NULL && !itr_cur->is_vm_pointer() &&
   52.46 -    !itr_next->is_vm_pointer() &&
   52.47 -    same_kind(itr_cur, itr_next)) {
   52.48 -    itr_cur = itr_next;
   52.49 -    itr_next = (MemPointerRecord*)_itr.next();
   52.50 -  }
   52.51 -
   52.52 -  return itr_cur;
   52.53 -}
   52.54 -
   52.55 -
   52.56 -volatile jint MemRecorder::_instance_count = 0;
   52.57 -
   52.58 -MemRecorder::MemRecorder() {
   52.59 -  assert(MemTracker::is_on(), "Native memory tracking is off");
   52.60 -  Atomic::inc(&_instance_count);
   52.61 -  set_generation();
   52.62 -
   52.63 -  if (MemTracker::track_callsite()) {
   52.64 -    _pointer_records = new (std::nothrow)FixedSizeMemPointerArray<SeqMemPointerRecordEx,
   52.65 -        DEFAULT_RECORDER_PTR_ARRAY_SIZE>();
   52.66 -  } else {
   52.67 -    _pointer_records = new (std::nothrow)FixedSizeMemPointerArray<SeqMemPointerRecord,
   52.68 -        DEFAULT_RECORDER_PTR_ARRAY_SIZE>();
   52.69 -  }
   52.70 -  _next = NULL;
   52.71 -
   52.72 -
   52.73 -  if (_pointer_records != NULL) {
   52.74 -    // recode itself
   52.75 -    address pc = CURRENT_PC;
   52.76 -    record((address)this, (MemPointerRecord::malloc_tag()|mtNMT|otNMTRecorder),
   52.77 -        sizeof(MemRecorder), SequenceGenerator::next(), pc);
   52.78 -    record((address)_pointer_records, (MemPointerRecord::malloc_tag()|mtNMT|otNMTRecorder),
   52.79 -        _pointer_records->instance_size(), SequenceGenerator::next(), pc);
   52.80 -  }
   52.81 -}
   52.82 -
   52.83 -MemRecorder::~MemRecorder() {
   52.84 -  if (_pointer_records != NULL) {
   52.85 -    if (MemTracker::is_on()) {
   52.86 -      MemTracker::record_free((address)_pointer_records, mtNMT);
   52.87 -      MemTracker::record_free((address)this, mtNMT);
   52.88 -    }
   52.89 -    delete _pointer_records;
   52.90 -  }
   52.91 -  // delete all linked recorders
   52.92 -  while (_next != NULL) {
   52.93 -    MemRecorder* tmp = _next;
   52.94 -    _next = _next->next();
   52.95 -    tmp->set_next(NULL);
   52.96 -    delete tmp;
   52.97 -  }
   52.98 -  Atomic::dec(&_instance_count);
   52.99 -}
  52.100 -
  52.101 -// Sorting order:
  52.102 -//   1. memory block address
  52.103 -//   2. mem pointer record tags
  52.104 -//   3. sequence number
  52.105 -int MemRecorder::sort_record_fn(const void* e1, const void* e2) {
  52.106 -  const MemPointerRecord* p1 = (const MemPointerRecord*)e1;
  52.107 -  const MemPointerRecord* p2 = (const MemPointerRecord*)e2;
  52.108 -  int delta = UNSIGNED_COMPARE(p1->addr(), p2->addr());
  52.109 -  if (delta == 0) {
  52.110 -    int df = UNSIGNED_COMPARE((p1->flags() & MemPointerRecord::tag_masks),
  52.111 -                              (p2->flags() & MemPointerRecord::tag_masks));
  52.112 -    if (df == 0) {
  52.113 -      assert(p1->seq() != p2->seq(), "dup seq");
  52.114 -      return p1->seq() - p2->seq();
  52.115 -    } else {
  52.116 -      return df;
  52.117 -    }
  52.118 -  } else {
  52.119 -    return delta;
  52.120 -  }
  52.121 -}
  52.122 -
  52.123 -bool MemRecorder::record(address p, MEMFLAGS flags, size_t size, jint seq, address pc) {
  52.124 -  assert(seq > 0, "No sequence number");
  52.125 -#ifdef ASSERT
  52.126 -  if (MemPointerRecord::is_virtual_memory_record(flags)) {
  52.127 -    assert((flags & MemPointerRecord::tag_masks) != 0, "bad virtual memory record");
  52.128 -  } else {
  52.129 -    assert((flags & MemPointerRecord::tag_masks) == MemPointerRecord::malloc_tag() ||
  52.130 -           (flags & MemPointerRecord::tag_masks) == MemPointerRecord::free_tag() ||
  52.131 -           IS_ARENA_OBJ(flags),
  52.132 -           "bad malloc record");
  52.133 -  }
  52.134 -  // a recorder should only hold records within the same generation
  52.135 -  unsigned long cur_generation = SequenceGenerator::current_generation();
  52.136 -  assert(cur_generation == _generation,
  52.137 -         "this thread did not enter sync point");
  52.138 -#endif
  52.139 -
  52.140 -  if (MemTracker::track_callsite()) {
  52.141 -    SeqMemPointerRecordEx ap(p, flags, size, seq, pc);
  52.142 -    debug_only(check_dup_seq(ap.seq());)
  52.143 -    return _pointer_records->append(&ap);
  52.144 -  } else {
  52.145 -    SeqMemPointerRecord ap(p, flags, size, seq);
  52.146 -    debug_only(check_dup_seq(ap.seq());)
  52.147 -    return _pointer_records->append(&ap);
  52.148 -  }
  52.149 -}
  52.150 -
  52.151 -  // iterator for alloc pointers
  52.152 -SequencedRecordIterator MemRecorder::pointer_itr() {
  52.153 -  assert(_pointer_records != NULL, "just check");
  52.154 -  _pointer_records->sort((FN_SORT)sort_record_fn);
  52.155 -  return SequencedRecordIterator(_pointer_records);
  52.156 -}
  52.157 -
  52.158 -
  52.159 -void MemRecorder::set_generation() {
  52.160 -  _generation = SequenceGenerator::current_generation();
  52.161 -}
  52.162 -
  52.163 -#ifdef ASSERT
  52.164 -
  52.165 -void MemRecorder::check_dup_seq(jint seq) const {
  52.166 -  MemPointerArrayIteratorImpl itr(_pointer_records);
  52.167 -  MemPointerRecord* rc = (MemPointerRecord*)itr.current();
  52.168 -  while (rc != NULL) {
  52.169 -    assert(rc->seq() != seq, "dup seq");
  52.170 -    rc = (MemPointerRecord*)itr.next();
  52.171 -  }
  52.172 -}
  52.173 -
  52.174 -#endif
    53.1 --- a/src/share/vm/services/memRecorder.hpp	Wed Aug 27 09:36:55 2014 +0200
    53.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    53.3 @@ -1,271 +0,0 @@
    53.4 -/*
    53.5 - * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
    53.6 - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    53.7 - *
    53.8 - * This code is free software; you can redistribute it and/or modify it
    53.9 - * under the terms of the GNU General Public License version 2 only, as
   53.10 - * published by the Free Software Foundation.
   53.11 - *
   53.12 - * This code is distributed in the hope that it will be useful, but WITHOUT
   53.13 - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   53.14 - * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   53.15 - * version 2 for more details (a copy is included in the LICENSE file that
   53.16 - * accompanied this code).
   53.17 - *
   53.18 - * You should have received a copy of the GNU General Public License version
   53.19 - * 2 along with this work; if not, write to the Free Software Foundation,
   53.20 - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   53.21 - *
   53.22 - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   53.23 - * or visit www.oracle.com if you need additional information or have any
   53.24 - * questions.
   53.25 - *
   53.26 - */
   53.27 -
   53.28 -#ifndef SHARE_VM_SERVICES_MEM_RECORDER_HPP
   53.29 -#define SHARE_VM_SERVICES_MEM_RECORDER_HPP
   53.30 -
   53.31 -#include "memory/allocation.hpp"
   53.32 -#include "runtime/os.hpp"
   53.33 -#include "services/memPtrArray.hpp"
   53.34 -
   53.35 -class MemSnapshot;
   53.36 -class MemTracker;
   53.37 -class MemTrackWorker;
   53.38 -
   53.39 -// Fixed size memory pointer array implementation
   53.40 -template <class E, int SIZE> class FixedSizeMemPointerArray :
   53.41 -  public MemPointerArray {
   53.42 -  // This implementation is for memory recorder only
   53.43 -  friend class MemRecorder;
   53.44 -
   53.45 - private:
   53.46 -  E      _data[SIZE];
   53.47 -  int    _size;
   53.48 -
   53.49 - protected:
   53.50 -  FixedSizeMemPointerArray(bool init_elements = false):
   53.51 -   _size(0){
   53.52 -    if (init_elements) {
   53.53 -      for (int index = 0; index < SIZE; index ++) {
   53.54 -        ::new ((void*)&_data[index]) E();
   53.55 -      }
   53.56 -    }
   53.57 -  }
   53.58 -
   53.59 -  void* operator new(size_t size, const std::nothrow_t& nothrow_constant) throw() {
   53.60 -    // the instance is part of memRecorder, needs to be tagged with 'otNMTRecorder'
   53.61 -    // to avoid recursion
   53.62 -    return os::malloc(size, (mtNMT | otNMTRecorder));
   53.63 -  }
   53.64 -
   53.65 -  void* operator new(size_t size) throw() {
   53.66 -    assert(false, "use nothrow version");
   53.67 -    return NULL;
   53.68 -  }
   53.69 -
   53.70 -  void operator delete(void* p) {
   53.71 -    os::free(p, (mtNMT | otNMTRecorder));
   53.72 -  }
   53.73 -
   53.74 -  // instance size
   53.75 -  inline size_t instance_size() const {
   53.76 -    return sizeof(FixedSizeMemPointerArray<E, SIZE>);
   53.77 -  }
   53.78 -
   53.79 -  NOT_PRODUCT(int capacity() const { return SIZE; })
   53.80 -
   53.81 - public:
   53.82 -  // implementation of public interface
   53.83 -  bool out_of_memory() const { return false; }
   53.84 -  bool is_empty()      const { return _size == 0; }
   53.85 -  bool is_full()             { return length() >= SIZE; }
   53.86 -  int  length()        const { return _size; }
   53.87 -
   53.88 -  void clear() {
   53.89 -    _size = 0;
   53.90 -  }
   53.91 -
   53.92 -  bool append(MemPointer* ptr) {
   53.93 -    if (is_full()) return false;
   53.94 -    _data[_size ++] = *(E*)ptr;
   53.95 -    return true;
   53.96 -  }
   53.97 -
   53.98 -  virtual bool insert_at(MemPointer* p, int pos) {
   53.99 -    assert(false, "append only");
  53.100 -    return false;
  53.101 -  }
  53.102 -
  53.103 -  virtual bool remove_at(int pos) {
  53.104 -    assert(false, "not supported");
  53.105 -    return false;
  53.106 -  }
  53.107 -
  53.108 -  MemPointer* at(int index) const {
  53.109 -    assert(index >= 0 && index < length(),
  53.110 -      "parameter check");
  53.111 -    return ((E*)&_data[index]);
  53.112 -  }
  53.113 -
  53.114 -  void sort(FN_SORT fn) {
  53.115 -    qsort((void*)_data, _size, sizeof(E), fn);
  53.116 -  }
  53.117 -
  53.118 -  bool shrink() {
  53.119 -    return false;
  53.120 -  }
  53.121 -};
  53.122 -
  53.123 -
  53.124 -// This iterator requires pre-sorted MemPointerArray, which is sorted by:
  53.125 -//  1. address
  53.126 -//  2. allocation type
  53.127 -//  3. sequence number
  53.128 -// During the array walking, iterator collapses pointers with the same
  53.129 -// address and allocation type, and only returns the one with highest
  53.130 -// sequence number.
  53.131 -//
  53.132 -// This is read-only iterator, update methods are asserted.
  53.133 -class SequencedRecordIterator : public MemPointerArrayIterator {
  53.134 - private:
  53.135 -   MemPointerArrayIteratorImpl _itr;
  53.136 -   MemPointer*                 _cur;
  53.137 -
  53.138 - public:
  53.139 -  SequencedRecordIterator(const MemPointerArray* arr):
  53.140 -    _itr(const_cast<MemPointerArray*>(arr)) {
  53.141 -    _cur = next_record();
  53.142 -  }
  53.143 -
  53.144 -  SequencedRecordIterator(const SequencedRecordIterator& itr):
  53.145 -    _itr(itr._itr) {
  53.146 -    _cur = next_record();
  53.147 -  }
  53.148 -
  53.149 -  // return the pointer at current position
  53.150 -  virtual MemPointer* current() const {
  53.151 -    return _cur;
  53.152 -  };
  53.153 -
  53.154 -  // return the next pointer and advance current position
  53.155 -  virtual MemPointer* next() {
  53.156 -    _cur = next_record();
  53.157 -    return _cur;
  53.158 -  }
  53.159 -
  53.160 -  // return the next pointer without advancing current position
  53.161 -  virtual MemPointer* peek_next() const {
  53.162 -    assert(false, "not implemented");
  53.163 -    return NULL;
  53.164 -
  53.165 -  }
  53.166 -  // return the previous pointer without changing current position
  53.167 -  virtual MemPointer* peek_prev() const {
  53.168 -    assert(false, "not implemented");
  53.169 -    return NULL;
  53.170 -  }
  53.171 -
  53.172 -  // remove the pointer at current position
  53.173 -  virtual void remove() {
  53.174 -    assert(false, "read-only iterator");
  53.175 -  };
  53.176 -  // insert the pointer at current position
  53.177 -  virtual bool insert(MemPointer* ptr) {
  53.178 -    assert(false, "read-only iterator");
  53.179 -    return false;
  53.180 -  }
  53.181 -
  53.182 -  virtual bool insert_after(MemPointer* ptr) {
  53.183 -    assert(false, "read-only iterator");
  53.184 -    return false;
  53.185 -  }
  53.186 - private:
  53.187 -  // collapse the 'same kind' of records, and return this 'kind' of
  53.188 -  // record with highest sequence number
  53.189 -  MemPointer* next_record();
  53.190 -
  53.191 -  // Test if the two records are the same kind: the same memory block and allocation
  53.192 -  // type.
  53.193 -  inline bool same_kind(const MemPointerRecord* p1, const MemPointerRecord* p2) const {
  53.194 -    assert(!p1->is_vm_pointer() && !p2->is_vm_pointer(), "malloc pointer only");
  53.195 -    return (p1->addr() == p2->addr() &&
  53.196 -      (p1->flags() &MemPointerRecord::tag_masks) ==
  53.197 -      (p2->flags() & MemPointerRecord::tag_masks));
  53.198 -  }
  53.199 -};
  53.200 -
  53.201 -
  53.202 -
  53.203 -#define DEFAULT_RECORDER_PTR_ARRAY_SIZE 512
  53.204 -
  53.205 -class MemRecorder : public CHeapObj<mtNMT|otNMTRecorder> {
  53.206 -  friend class MemSnapshot;
  53.207 -  friend class MemTracker;
  53.208 -  friend class MemTrackWorker;
  53.209 -  friend class GenerationData;
  53.210 -
  53.211 - protected:
  53.212 -  // the array that holds memory records
  53.213 -  MemPointerArray*         _pointer_records;
  53.214 -
  53.215 - private:
  53.216 -  // used for linked list
  53.217 -  MemRecorder*             _next;
  53.218 -  // active recorder can only record a certain generation data
  53.219 -  unsigned long            _generation;
  53.220 -
  53.221 - protected:
  53.222 -  _NOINLINE_ MemRecorder();
  53.223 -  ~MemRecorder();
  53.224 -
  53.225 -  // record a memory operation
  53.226 -  bool record(address addr, MEMFLAGS flags, size_t size, jint seq, address caller_pc = 0);
  53.227 -
  53.228 -  // linked list support
  53.229 -  inline void set_next(MemRecorder* rec) {
  53.230 -    _next = rec;
  53.231 -  }
  53.232 -
  53.233 -  inline MemRecorder* next() const {
  53.234 -    return _next;
  53.235 -  }
  53.236 -
  53.237 -  // if the recorder is full
  53.238 -  inline bool is_full() const {
  53.239 -    assert(_pointer_records != NULL, "just check");
  53.240 -    return _pointer_records->is_full();
  53.241 -  }
  53.242 -
  53.243 -  // if running out of memory when initializing recorder's internal
  53.244 -  // data
  53.245 -  inline bool out_of_memory() const {
  53.246 -    return (_pointer_records == NULL ||
  53.247 -      _pointer_records->out_of_memory());
  53.248 -  }
  53.249 -
  53.250 -  inline void clear() {
  53.251 -    assert(_pointer_records != NULL, "Just check");
  53.252 -    _pointer_records->clear();
  53.253 -  }
  53.254 -
  53.255 -  SequencedRecordIterator pointer_itr();
  53.256 -
  53.257 -  // return the generation of this recorder which it belongs to
  53.258 -  unsigned long get_generation() const { return _generation; }
  53.259 - protected:
  53.260 -  // number of MemRecorder instance
  53.261 -  static volatile jint _instance_count;
  53.262 -
  53.263 - private:
  53.264 -  // sorting function, sort records into following order
  53.265 -  // 1. memory address
  53.266 -  // 2. allocation type
  53.267 -  // 3. sequence number
  53.268 -  static int sort_record_fn(const void* e1, const void* e2);
  53.269 -
  53.270 -  debug_only(void check_dup_seq(jint seq) const;)
  53.271 -  void set_generation();
  53.272 -};
  53.273 -
  53.274 -#endif // SHARE_VM_SERVICES_MEM_RECORDER_HPP
    54.1 --- a/src/share/vm/services/memReporter.cpp	Wed Aug 27 09:36:55 2014 +0200
    54.2 +++ b/src/share/vm/services/memReporter.cpp	Wed Aug 27 08:19:12 2014 -0400
    54.3 @@ -22,618 +22,595 @@
    54.4   *
    54.5   */
    54.6  #include "precompiled.hpp"
    54.7 -#include "classfile/systemDictionary.hpp"
    54.8 -#include "runtime/os.hpp"
    54.9 +
   54.10 +#include "memory/allocation.hpp"
   54.11 +#include "services/mallocTracker.hpp"
   54.12  #include "services/memReporter.hpp"
   54.13 -#include "services/memPtrArray.hpp"
   54.14 -#include "services/memTracker.hpp"
   54.15 +#include "services/virtualMemoryTracker.hpp"
   54.16 +#include "utilities/globalDefinitions.hpp"
   54.17  
   54.18 -PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
   54.19 -
   54.20 -const char* BaselineOutputer::memory_unit(size_t scale) {
   54.21 -  switch(scale) {
   54.22 -    case K: return "KB";
   54.23 -    case M: return "MB";
   54.24 -    case G: return "GB";
   54.25 -  }
   54.26 -  ShouldNotReachHere();
   54.27 -  return NULL;
   54.28 +size_t MemReporterBase::reserved_total(const MallocMemory* malloc, const VirtualMemory* vm) const {
   54.29 +  return malloc->malloc_size() + malloc->arena_size() + vm->reserved();
   54.30  }
   54.31  
   54.32 -
   54.33 -void BaselineReporter::report_baseline(const MemBaseline& baseline, bool summary_only) {
   54.34 -  assert(MemTracker::is_on(), "Native memory tracking is off");
   54.35 -  _outputer.start(scale());
   54.36 -  _outputer.total_usage(
   54.37 -    amount_in_current_scale(baseline.total_malloc_amount() + baseline.total_reserved_amount()),
   54.38 -    amount_in_current_scale(baseline.total_malloc_amount() + baseline.total_committed_amount()));
   54.39 -
   54.40 -  _outputer.num_of_classes(baseline.number_of_classes());
   54.41 -  _outputer.num_of_threads(baseline.number_of_threads());
   54.42 -
   54.43 -  report_summaries(baseline);
   54.44 -  if (!summary_only && MemTracker::track_callsite()) {
   54.45 -    report_virtual_memory_map(baseline);
   54.46 -    report_callsites(baseline);
   54.47 -  }
   54.48 -  _outputer.done();
   54.49 +size_t MemReporterBase::committed_total(const MallocMemory* malloc, const VirtualMemory* vm) const {
   54.50 +  return malloc->malloc_size() + malloc->arena_size() + vm->committed();
   54.51  }
   54.52  
   54.53 -void BaselineReporter::report_summaries(const MemBaseline& baseline) {
   54.54 -  _outputer.start_category_summary();
   54.55 -  MEMFLAGS type;
   54.56 +void MemReporterBase::print_total(size_t reserved, size_t committed) const {
   54.57 +  const char* scale = current_scale();
   54.58 +  output()->print("reserved=" SIZE_FORMAT "%s, committed=" SIZE_FORMAT "%s",
   54.59 +    amount_in_current_scale(reserved), scale, amount_in_current_scale(committed), scale);
   54.60 +}
   54.61  
   54.62 -  for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
   54.63 -    type = MemBaseline::MemType2NameMap[index]._flag;
   54.64 -    _outputer.category_summary(type,
   54.65 -      amount_in_current_scale(baseline.reserved_amount(type)),
   54.66 -      amount_in_current_scale(baseline.committed_amount(type)),
   54.67 -      amount_in_current_scale(baseline.malloc_amount(type)),
   54.68 -      baseline.malloc_count(type),
   54.69 -      amount_in_current_scale(baseline.arena_amount(type)),
   54.70 -      baseline.arena_count(type));
   54.71 +void MemReporterBase::print_malloc(size_t amount, size_t count) const {
   54.72 +  const char* scale = current_scale();
   54.73 +  outputStream* out = output();
   54.74 +  out->print("(malloc=" SIZE_FORMAT "%s",
   54.75 +    amount_in_current_scale(amount), scale);
   54.76 +
   54.77 +  if (count > 0) {
   54.78 +    out->print(" #" SIZE_FORMAT "", count);
   54.79    }
   54.80  
   54.81 -  _outputer.done_category_summary();
   54.82 +  out->print(")");
   54.83  }
   54.84  
   54.85 -void BaselineReporter::report_virtual_memory_map(const MemBaseline& baseline) {
   54.86 -  _outputer.start_virtual_memory_map();
   54.87 -  MemBaseline* pBL = const_cast<MemBaseline*>(&baseline);
   54.88 -  MemPointerArrayIteratorImpl itr = MemPointerArrayIteratorImpl(pBL->_vm_map);
   54.89 -  VMMemRegionEx* rgn = (VMMemRegionEx*)itr.current();
   54.90 -  while (rgn != NULL) {
   54.91 -    if (rgn->is_reserved_region()) {
   54.92 -      _outputer.reserved_memory_region(FLAGS_TO_MEMORY_TYPE(rgn->flags()),
   54.93 -        rgn->base(), rgn->base() + rgn->size(), amount_in_current_scale(rgn->size()), rgn->pc());
   54.94 -    } else {
   54.95 -      _outputer.committed_memory_region(rgn->base(), rgn->base() + rgn->size(),
   54.96 -        amount_in_current_scale(rgn->size()), rgn->pc());
   54.97 -    }
   54.98 -    rgn = (VMMemRegionEx*)itr.next();
   54.99 +void MemReporterBase::print_virtual_memory(size_t reserved, size_t committed) const {
  54.100 +  const char* scale = current_scale();
  54.101 +  output()->print("(mmap: reserved=" SIZE_FORMAT "%s, committed=" SIZE_FORMAT "%s)",
  54.102 +    amount_in_current_scale(reserved), scale, amount_in_current_scale(committed), scale);
  54.103 +}
  54.104 +
  54.105 +void MemReporterBase::print_malloc_line(size_t amount, size_t count) const {
  54.106 +  output()->print("%28s", " ");
  54.107 +  print_malloc(amount, count);
  54.108 +  output()->print_cr(" ");
  54.109 +}
  54.110 +
  54.111 +void MemReporterBase::print_virtual_memory_line(size_t reserved, size_t committed) const {
  54.112 +  output()->print("%28s", " ");
  54.113 +  print_virtual_memory(reserved, committed);
  54.114 +  output()->print_cr(" ");
  54.115 +}
  54.116 +
  54.117 +void MemReporterBase::print_arena_line(size_t amount, size_t count) const {
  54.118 +  const char* scale = current_scale();
  54.119 +  output()->print_cr("%27s (arena=" SIZE_FORMAT "%s #" SIZE_FORMAT ")", " ",
  54.120 +    amount_in_current_scale(amount), scale, count);
  54.121 +}
  54.122 +
  54.123 +void MemReporterBase::print_virtual_memory_region(const char* type, address base, size_t size) const {
  54.124 +  const char* scale = current_scale();
  54.125 +  output()->print("[" PTR_FORMAT " - " PTR_FORMAT "] %s " SIZE_FORMAT "%s",
  54.126 +    p2i(base), p2i(base + size), type, amount_in_current_scale(size), scale);
  54.127 +}
  54.128 +
  54.129 +
  54.130 +void MemSummaryReporter::report() {
  54.131 +  const char* scale = current_scale();
  54.132 +  outputStream* out = output();
  54.133 +  size_t total_reserved_amount = _malloc_snapshot->total() +
  54.134 +    _vm_snapshot->total_reserved();
  54.135 +  size_t total_committed_amount = _malloc_snapshot->total() +
  54.136 +    _vm_snapshot->total_committed();
  54.137 +
  54.138 +  // Overall total
  54.139 +  out->print_cr("\nNative Memory Tracking:\n");
  54.140 +  out->print("Total: ");
  54.141 +  print_total(total_reserved_amount, total_committed_amount);
  54.142 +  out->print("\n");
  54.143 +
  54.144 +  // Summary by memory type
  54.145 +  for (int index = 0; index < mt_number_of_types; index ++) {
  54.146 +    MEMFLAGS flag = NMTUtil::index_to_flag(index);
  54.147 +    // thread stack is reported as part of thread category
  54.148 +    if (flag == mtThreadStack) continue;
  54.149 +    MallocMemory* malloc_memory = _malloc_snapshot->by_type(flag);
  54.150 +    VirtualMemory* virtual_memory = _vm_snapshot->by_type(flag);
  54.151 +
  54.152 +    report_summary_of_type(flag, malloc_memory, virtual_memory);
  54.153 +  }
  54.154 +}
  54.155 +
  54.156 +void MemSummaryReporter::report_summary_of_type(MEMFLAGS flag,
  54.157 +  MallocMemory*  malloc_memory, VirtualMemory* virtual_memory) {
  54.158 +
  54.159 +  size_t reserved_amount  = reserved_total (malloc_memory, virtual_memory);
  54.160 +  size_t committed_amount = committed_total(malloc_memory, virtual_memory);
  54.161 +
  54.162 +  // Count thread's native stack in "Thread" category
  54.163 +  if (flag == mtThread) {
  54.164 +    const VirtualMemory* thread_stack_usage =
  54.165 +      (const VirtualMemory*)_vm_snapshot->by_type(mtThreadStack);
  54.166 +    reserved_amount  += thread_stack_usage->reserved();
  54.167 +    committed_amount += thread_stack_usage->committed();
  54.168 +  } else if (flag == mtNMT) {
  54.169 +    // Count malloc headers in "NMT" category
  54.170 +    reserved_amount  += _malloc_snapshot->malloc_overhead()->size();
  54.171 +    committed_amount += _malloc_snapshot->malloc_overhead()->size();
  54.172    }
  54.173  
  54.174 -  _outputer.done_virtual_memory_map();
  54.175 +  if (amount_in_current_scale(reserved_amount) > 0) {
  54.176 +    outputStream* out   = output();
  54.177 +    const char*   scale = current_scale();
  54.178 +    out->print("-%26s (", NMTUtil::flag_to_name(flag));
  54.179 +    print_total(reserved_amount, committed_amount);
  54.180 +    out->print_cr(")");
  54.181 +
  54.182 +    if (flag == mtClass) {
  54.183 +      // report class count
  54.184 +      out->print_cr("%27s (classes #" SIZE_FORMAT ")", " ", _class_count);
  54.185 +    } else if (flag == mtThread) {
  54.186 +      // report thread count
  54.187 +      out->print_cr("%27s (thread #" SIZE_FORMAT ")", " ", _malloc_snapshot->thread_count());
  54.188 +      const VirtualMemory* thread_stack_usage =
  54.189 +       _vm_snapshot->by_type(mtThreadStack);
  54.190 +      out->print("%27s (stack: ", " ");
  54.191 +      print_total(thread_stack_usage->reserved(), thread_stack_usage->committed());
  54.192 +      out->print_cr(")");
  54.193 +    }
  54.194 +
  54.195 +     // report malloc'd memory
  54.196 +    if (amount_in_current_scale(malloc_memory->malloc_size()) > 0) {
  54.197 +      // We don't know how many arena chunks are in used, so don't report the count
  54.198 +      size_t count = (flag == mtChunk) ? 0 : malloc_memory->malloc_count();
  54.199 +      print_malloc_line(malloc_memory->malloc_size(), count);
  54.200 +    }
  54.201 +
  54.202 +    if (amount_in_current_scale(virtual_memory->reserved()) > 0) {
  54.203 +      print_virtual_memory_line(virtual_memory->reserved(), virtual_memory->committed());
  54.204 +    }
  54.205 +
  54.206 +    if (amount_in_current_scale(malloc_memory->arena_size()) > 0) {
  54.207 +      print_arena_line(malloc_memory->arena_size(), malloc_memory->arena_count());
  54.208 +    }
  54.209 +
  54.210 +    if (flag == mtNMT &&
  54.211 +      amount_in_current_scale(_malloc_snapshot->malloc_overhead()->size()) > 0) {
  54.212 +      out->print_cr("%27s (tracking overhead=" SIZE_FORMAT "%s)", " ",
  54.213 +        amount_in_current_scale(_malloc_snapshot->malloc_overhead()->size()), scale);
  54.214 +    }
  54.215 +
  54.216 +    out->print_cr(" ");
  54.217 +  }
  54.218  }
  54.219  
  54.220 -void BaselineReporter::report_callsites(const MemBaseline& baseline) {
  54.221 -  _outputer.start_callsite();
  54.222 -  MemBaseline* pBL = const_cast<MemBaseline*>(&baseline);
  54.223 +void MemDetailReporter::report_detail() {
  54.224 +  // Start detail report
  54.225 +  outputStream* out = output();
  54.226 +  out->print_cr("Details:\n");
  54.227  
  54.228 -  pBL->_malloc_cs->sort((FN_SORT)MemBaseline::bl_malloc_sort_by_size);
  54.229 -  pBL->_vm_cs->sort((FN_SORT)MemBaseline::bl_vm_sort_by_size);
  54.230 +  report_malloc_sites();
  54.231 +  report_virtual_memory_allocation_sites();
  54.232 +}
  54.233  
  54.234 -  // walk malloc callsites
  54.235 -  MemPointerArrayIteratorImpl malloc_itr(pBL->_malloc_cs);
  54.236 -  MallocCallsitePointer*      malloc_callsite =
  54.237 -                  (MallocCallsitePointer*)malloc_itr.current();
  54.238 -  while (malloc_callsite != NULL) {
  54.239 -    _outputer.malloc_callsite(malloc_callsite->addr(),
  54.240 -        amount_in_current_scale(malloc_callsite->amount()), malloc_callsite->count());
  54.241 -    malloc_callsite = (MallocCallsitePointer*)malloc_itr.next();
  54.242 +void MemDetailReporter::report_malloc_sites() {
  54.243 +  MallocSiteIterator         malloc_itr = _baseline.malloc_sites(MemBaseline::by_size);
  54.244 +  if (malloc_itr.is_empty()) return;
  54.245 +
  54.246 +  outputStream* out = output();
  54.247 +
  54.248 +  const MallocSite* malloc_site;
  54.249 +  while ((malloc_site = malloc_itr.next()) != NULL) {
  54.250 +    // Don't report if size is too small
  54.251 +    if (amount_in_current_scale(malloc_site->size()) == 0)
  54.252 +      continue;
  54.253 +
  54.254 +    const NativeCallStack* stack = malloc_site->call_stack();
  54.255 +    stack->print_on(out);
  54.256 +    out->print("%29s", " ");
  54.257 +    print_malloc(malloc_site->size(), malloc_site->count());
  54.258 +    out->print_cr("\n");
  54.259 +  }
  54.260 +}
  54.261 +
  54.262 +void MemDetailReporter::report_virtual_memory_allocation_sites()  {
  54.263 +  VirtualMemorySiteIterator  virtual_memory_itr =
  54.264 +    _baseline.virtual_memory_sites(MemBaseline::by_size);
  54.265 +
  54.266 +  if (virtual_memory_itr.is_empty()) return;
  54.267 +
  54.268 +  outputStream* out = output();
  54.269 +  const VirtualMemoryAllocationSite*  virtual_memory_site;
  54.270 +
  54.271 +  while ((virtual_memory_site = virtual_memory_itr.next()) != NULL) {
  54.272 +    // Don't report if size is too small
  54.273 +    if (amount_in_current_scale(virtual_memory_site->reserved()) == 0)
  54.274 +      continue;
  54.275 +
  54.276 +    const NativeCallStack* stack = virtual_memory_site->call_stack();
  54.277 +    stack->print_on(out);
  54.278 +    out->print("%28s (", " ");
  54.279 +    print_total(virtual_memory_site->reserved(), virtual_memory_site->committed());
  54.280 +    out->print_cr(")\n");
  54.281 +  }
  54.282 +}
  54.283 +
  54.284 +
  54.285 +void MemDetailReporter::report_virtual_memory_map() {
  54.286 +  // Virtual memory map always in base address order
  54.287 +  VirtualMemoryAllocationIterator itr = _baseline.virtual_memory_allocations();
  54.288 +  const ReservedMemoryRegion* rgn;
  54.289 +
  54.290 +  output()->print_cr("Virtual memory map:");
  54.291 +  while ((rgn = itr.next()) != NULL) {
  54.292 +    report_virtual_memory_region(rgn);
  54.293 +  }
  54.294 +}
  54.295 +
  54.296 +void MemDetailReporter::report_virtual_memory_region(const ReservedMemoryRegion* reserved_rgn) {
  54.297 +  assert(reserved_rgn != NULL, "NULL pointer");
  54.298 +
  54.299 +  // Don't report if size is too small
  54.300 +  if (amount_in_current_scale(reserved_rgn->size()) == 0) return;
  54.301 +
  54.302 +  outputStream* out = output();
  54.303 +  const char* scale = current_scale();
  54.304 +  const NativeCallStack*  stack = reserved_rgn->call_stack();
  54.305 +  bool all_committed = reserved_rgn->all_committed();
  54.306 +  const char* region_type = (all_committed ? "reserved and committed" : "reserved");
  54.307 +  out->print_cr(" ");
  54.308 +  print_virtual_memory_region(region_type, reserved_rgn->base(), reserved_rgn->size());
  54.309 +  out->print(" for %s", NMTUtil::flag_to_name(reserved_rgn->flag()));
  54.310 +  if (stack->is_empty()) {
  54.311 +    out->print_cr(" ");
  54.312 +  } else {
  54.313 +    out->print_cr(" from");
  54.314 +    stack->print_on(out, 4);
  54.315    }
  54.316  
  54.317 -  // walk virtual memory callsite
  54.318 -  MemPointerArrayIteratorImpl vm_itr(pBL->_vm_cs);
  54.319 -  VMCallsitePointer*          vm_callsite = (VMCallsitePointer*)vm_itr.current();
  54.320 -  while (vm_callsite != NULL) {
  54.321 -    _outputer.virtual_memory_callsite(vm_callsite->addr(),
  54.322 -      amount_in_current_scale(vm_callsite->reserved_amount()),
  54.323 -      amount_in_current_scale(vm_callsite->committed_amount()));
  54.324 -    vm_callsite = (VMCallsitePointer*)vm_itr.next();
  54.325 -  }
  54.326 -  pBL->_malloc_cs->sort((FN_SORT)MemBaseline::bl_malloc_sort_by_pc);
  54.327 -  pBL->_vm_cs->sort((FN_SORT)MemBaseline::bl_vm_sort_by_pc);
  54.328 -  _outputer.done_callsite();
  54.329 -}
  54.330 +  if (all_committed) return;
  54.331  
  54.332 -void BaselineReporter::diff_baselines(const MemBaseline& cur, const MemBaseline& prev,
  54.333 -  bool summary_only) {
  54.334 -  assert(MemTracker::is_on(), "Native memory tracking is off");
  54.335 -  _outputer.start(scale());
  54.336 -  size_t total_reserved = cur.total_malloc_amount() + cur.total_reserved_amount();
  54.337 -  size_t total_committed = cur.total_malloc_amount() + cur.total_committed_amount();
  54.338 -
  54.339 -  _outputer.diff_total_usage(
  54.340 -    amount_in_current_scale(total_reserved), amount_in_current_scale(total_committed),
  54.341 -    diff_in_current_scale(total_reserved,  (prev.total_malloc_amount() + prev.total_reserved_amount())),
  54.342 -    diff_in_current_scale(total_committed, (prev.total_committed_amount() + prev.total_malloc_amount())));
  54.343 -
  54.344 -  _outputer.diff_num_of_classes(cur.number_of_classes(),
  54.345 -       diff(cur.number_of_classes(), prev.number_of_classes()));
  54.346 -  _outputer.diff_num_of_threads(cur.number_of_threads(),
  54.347 -       diff(cur.number_of_threads(), prev.number_of_threads()));
  54.348 -
  54.349 -  diff_summaries(cur, prev);
  54.350 -  if (!summary_only && MemTracker::track_callsite()) {
  54.351 -    diff_callsites(cur, prev);
  54.352 -  }
  54.353 -  _outputer.done();
  54.354 -}
  54.355 -
  54.356 -void BaselineReporter::diff_summaries(const MemBaseline& cur, const MemBaseline& prev) {
  54.357 -  _outputer.start_category_summary();
  54.358 -  MEMFLAGS type;
  54.359 -
  54.360 -  for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
  54.361 -    type = MemBaseline::MemType2NameMap[index]._flag;
  54.362 -    _outputer.diff_category_summary(type,
  54.363 -      amount_in_current_scale(cur.reserved_amount(type)),
  54.364 -      amount_in_current_scale(cur.committed_amount(type)),
  54.365 -      amount_in_current_scale(cur.malloc_amount(type)),
  54.366 -      cur.malloc_count(type),
  54.367 -      amount_in_current_scale(cur.arena_amount(type)),
  54.368 -      cur.arena_count(type),
  54.369 -      diff_in_current_scale(cur.reserved_amount(type), prev.reserved_amount(type)),
  54.370 -      diff_in_current_scale(cur.committed_amount(type), prev.committed_amount(type)),
  54.371 -      diff_in_current_scale(cur.malloc_amount(type), prev.malloc_amount(type)),
  54.372 -      diff(cur.malloc_count(type), prev.malloc_count(type)),
  54.373 -      diff_in_current_scale(cur.arena_amount(type), prev.arena_amount(type)),
  54.374 -      diff(cur.arena_count(type), prev.arena_count(type)));
  54.375 -  }
  54.376 -
  54.377 -  _outputer.done_category_summary();
  54.378 -}
  54.379 -
  54.380 -void BaselineReporter::diff_callsites(const MemBaseline& cur, const MemBaseline& prev) {
  54.381 -  _outputer.start_callsite();
  54.382 -  MemBaseline* pBL_cur = const_cast<MemBaseline*>(&cur);
  54.383 -  MemBaseline* pBL_prev = const_cast<MemBaseline*>(&prev);
  54.384 -
  54.385 -  // walk malloc callsites
  54.386 -  MemPointerArrayIteratorImpl cur_malloc_itr(pBL_cur->_malloc_cs);
  54.387 -  MemPointerArrayIteratorImpl prev_malloc_itr(pBL_prev->_malloc_cs);
  54.388 -
  54.389 -  MallocCallsitePointer*      cur_malloc_callsite =
  54.390 -                  (MallocCallsitePointer*)cur_malloc_itr.current();
  54.391 -  MallocCallsitePointer*      prev_malloc_callsite =
  54.392 -                  (MallocCallsitePointer*)prev_malloc_itr.current();
  54.393 -
  54.394 -  while (cur_malloc_callsite != NULL || prev_malloc_callsite != NULL) {
  54.395 -    if (prev_malloc_callsite == NULL) {
  54.396 -      assert(cur_malloc_callsite != NULL, "sanity check");
  54.397 -      // this is a new callsite
  54.398 -      _outputer.diff_malloc_callsite(cur_malloc_callsite->addr(),
  54.399 -        amount_in_current_scale(cur_malloc_callsite->amount()),
  54.400 -        cur_malloc_callsite->count(),
  54.401 -        diff_in_current_scale(cur_malloc_callsite->amount(), 0),
  54.402 -        diff(cur_malloc_callsite->count(), 0));
  54.403 -      cur_malloc_callsite = (MallocCallsitePointer*)cur_malloc_itr.next();
  54.404 -    } else if (cur_malloc_callsite == NULL) {
  54.405 -      assert(prev_malloc_callsite != NULL, "Sanity check");
  54.406 -      // this callsite is already gone
  54.407 -      _outputer.diff_malloc_callsite(prev_malloc_callsite->addr(),
  54.408 -        0, 0,
  54.409 -        diff_in_current_scale(0, prev_malloc_callsite->amount()),
  54.410 -        diff(0, prev_malloc_callsite->count()));
  54.411 -      prev_malloc_callsite = (MallocCallsitePointer*)prev_malloc_itr.next();
  54.412 +  CommittedRegionIterator itr = reserved_rgn->iterate_committed_regions();
  54.413 +  const CommittedMemoryRegion* committed_rgn;
  54.414 +  while ((committed_rgn = itr.next()) != NULL) {
  54.415 +    // Don't report if size is too small
  54.416 +    if (amount_in_current_scale(committed_rgn->size()) == 0) continue;
  54.417 +    stack = committed_rgn->call_stack();
  54.418 +    out->print("\n\t");
  54.419 +    print_virtual_memory_region("committed", committed_rgn->base(), committed_rgn->size());
  54.420 +    if (stack->is_empty()) {
  54.421 +      out->print_cr(" ");
  54.422      } else {
  54.423 -      assert(cur_malloc_callsite  != NULL,  "Sanity check");
  54.424 -      assert(prev_malloc_callsite != NULL,  "Sanity check");
  54.425 -      if (cur_malloc_callsite->addr() < prev_malloc_callsite->addr()) {
  54.426 -        // this is a new callsite
  54.427 -        _outputer.diff_malloc_callsite(cur_malloc_callsite->addr(),
  54.428 -          amount_in_current_scale(cur_malloc_callsite->amount()),
  54.429 -          cur_malloc_callsite->count(),
  54.430 -          diff_in_current_scale(cur_malloc_callsite->amount(), 0),
  54.431 -          diff(cur_malloc_callsite->count(), 0));
  54.432 -          cur_malloc_callsite = (MallocCallsitePointer*)cur_malloc_itr.next();
  54.433 -      } else if (cur_malloc_callsite->addr() > prev_malloc_callsite->addr()) {
  54.434 -        // this callsite is already gone
  54.435 -        _outputer.diff_malloc_callsite(prev_malloc_callsite->addr(),
  54.436 -          0, 0,
  54.437 -          diff_in_current_scale(0, prev_malloc_callsite->amount()),
  54.438 -          diff(0, prev_malloc_callsite->count()));
  54.439 -        prev_malloc_callsite = (MallocCallsitePointer*)prev_malloc_itr.next();
  54.440 -      } else {
  54.441 -        // the same callsite
  54.442 -        _outputer.diff_malloc_callsite(cur_malloc_callsite->addr(),
  54.443 -          amount_in_current_scale(cur_malloc_callsite->amount()),
  54.444 -          cur_malloc_callsite->count(),
  54.445 -          diff_in_current_scale(cur_malloc_callsite->amount(), prev_malloc_callsite->amount()),
  54.446 -          diff(cur_malloc_callsite->count(), prev_malloc_callsite->count()));
  54.447 -        cur_malloc_callsite = (MallocCallsitePointer*)cur_malloc_itr.next();
  54.448 -        prev_malloc_callsite = (MallocCallsitePointer*)prev_malloc_itr.next();
  54.449 -      }
  54.450 -    }
  54.451 -  }
  54.452 -
  54.453 -  // walk virtual memory callsite
  54.454 -  MemPointerArrayIteratorImpl cur_vm_itr(pBL_cur->_vm_cs);
  54.455 -  MemPointerArrayIteratorImpl prev_vm_itr(pBL_prev->_vm_cs);
  54.456 -  VMCallsitePointer*          cur_vm_callsite = (VMCallsitePointer*)cur_vm_itr.current();
  54.457 -  VMCallsitePointer*          prev_vm_callsite = (VMCallsitePointer*)prev_vm_itr.current();
  54.458 -  while (cur_vm_callsite != NULL || prev_vm_callsite != NULL) {
  54.459 -    if (prev_vm_callsite == NULL || cur_vm_callsite->addr() < prev_vm_callsite->addr()) {
  54.460 -      // this is a new callsite
  54.461 -      _outputer.diff_virtual_memory_callsite(cur_vm_callsite->addr(),
  54.462 -        amount_in_current_scale(cur_vm_callsite->reserved_amount()),
  54.463 -        amount_in_current_scale(cur_vm_callsite->committed_amount()),
  54.464 -        diff_in_current_scale(cur_vm_callsite->reserved_amount(), 0),
  54.465 -        diff_in_current_scale(cur_vm_callsite->committed_amount(), 0));
  54.466 -      cur_vm_callsite = (VMCallsitePointer*)cur_vm_itr.next();
  54.467 -    } else if (cur_vm_callsite == NULL || cur_vm_callsite->addr() > prev_vm_callsite->addr()) {
  54.468 -      // this callsite is already gone
  54.469 -      _outputer.diff_virtual_memory_callsite(prev_vm_callsite->addr(),
  54.470 -        amount_in_current_scale(0),
  54.471 -        amount_in_current_scale(0),
  54.472 -        diff_in_current_scale(0, prev_vm_callsite->reserved_amount()),
  54.473 -        diff_in_current_scale(0, prev_vm_callsite->committed_amount()));
  54.474 -      prev_vm_callsite = (VMCallsitePointer*)prev_vm_itr.next();
  54.475 -    } else { // the same callsite
  54.476 -      _outputer.diff_virtual_memory_callsite(cur_vm_callsite->addr(),
  54.477 -        amount_in_current_scale(cur_vm_callsite->reserved_amount()),
  54.478 -        amount_in_current_scale(cur_vm_callsite->committed_amount()),
  54.479 -        diff_in_current_scale(cur_vm_callsite->reserved_amount(), prev_vm_callsite->reserved_amount()),
  54.480 -        diff_in_current_scale(cur_vm_callsite->committed_amount(), prev_vm_callsite->committed_amount()));
  54.481 -      cur_vm_callsite  = (VMCallsitePointer*)cur_vm_itr.next();
  54.482 -      prev_vm_callsite = (VMCallsitePointer*)prev_vm_itr.next();
  54.483 -    }
  54.484 -  }
  54.485 -
  54.486 -  _outputer.done_callsite();
  54.487 -}
  54.488 -
  54.489 -size_t BaselineReporter::amount_in_current_scale(size_t amt) const {
  54.490 -  return (size_t)(((float)amt/(float)_scale) + 0.5);
  54.491 -}
  54.492 -
  54.493 -int BaselineReporter::diff_in_current_scale(size_t value1, size_t value2) const {
  54.494 -  return (int)(((float)value1 - (float)value2)/((float)_scale) + 0.5);
  54.495 -}
  54.496 -
  54.497 -int BaselineReporter::diff(size_t value1, size_t value2) const {
  54.498 -  return ((int)value1 - (int)value2);
  54.499 -}
  54.500 -
  54.501 -void BaselineTTYOutputer::start(size_t scale, bool report_diff) {
  54.502 -  _scale = scale;
  54.503 -  _output->print_cr(" ");
  54.504 -  _output->print_cr("Native Memory Tracking:");
  54.505 -  _output->print_cr(" ");
  54.506 -}
  54.507 -
  54.508 -void BaselineTTYOutputer::done() {
  54.509 -
  54.510 -}
  54.511 -
  54.512 -void BaselineTTYOutputer::total_usage(size_t total_reserved, size_t total_committed) {
  54.513 -  const char* unit = memory_unit(_scale);
  54.514 -  _output->print_cr("Total:  reserved=%d%s,  committed=%d%s",
  54.515 -    total_reserved, unit, total_committed, unit);
  54.516 -}
  54.517 -
  54.518 -void BaselineTTYOutputer::start_category_summary() {
  54.519 -  _output->print_cr(" ");
  54.520 -}
  54.521 -
  54.522 -/**
  54.523 - * report a summary of memory type
  54.524 - */
  54.525 -void BaselineTTYOutputer::category_summary(MEMFLAGS type,
  54.526 -  size_t reserved_amt, size_t committed_amt, size_t malloc_amt,
  54.527 -  size_t malloc_count, size_t arena_amt, size_t arena_count) {
  54.528 -
  54.529 -  // we report mtThreadStack under mtThread category
  54.530 -  if (type == mtThreadStack) {
  54.531 -    assert(malloc_amt == 0 && malloc_count == 0 && arena_amt == 0,
  54.532 -      "Just check");
  54.533 -    _thread_stack_reserved = reserved_amt;
  54.534 -    _thread_stack_committed = committed_amt;
  54.535 -  } else {
  54.536 -    const char* unit = memory_unit(_scale);
  54.537 -    size_t total_reserved = (reserved_amt + malloc_amt + arena_amt);
  54.538 -    size_t total_committed = (committed_amt + malloc_amt + arena_amt);
  54.539 -    if (type == mtThread) {
  54.540 -      total_reserved += _thread_stack_reserved;
  54.541 -      total_committed += _thread_stack_committed;
  54.542 -    }
  54.543 -
  54.544 -    if (total_reserved > 0) {
  54.545 -      _output->print_cr("-%26s (reserved=%d%s, committed=%d%s)",
  54.546 -        MemBaseline::type2name(type), total_reserved, unit,
  54.547 -        total_committed, unit);
  54.548 -
  54.549 -      if (type == mtClass) {
  54.550 -        _output->print_cr("%27s (classes #%d)", " ", _num_of_classes);
  54.551 -      } else if (type == mtThread) {
  54.552 -        _output->print_cr("%27s (thread #%d)", " ", _num_of_threads);
  54.553 -        _output->print_cr("%27s (stack: reserved=%d%s, committed=%d%s)", " ",
  54.554 -          _thread_stack_reserved, unit, _thread_stack_committed, unit);
  54.555 -      }
  54.556 -
  54.557 -      if (malloc_amt > 0) {
  54.558 -        if (type != mtChunk) {
  54.559 -          _output->print_cr("%27s (malloc=%d%s, #%d)", " ", malloc_amt, unit,
  54.560 -            malloc_count);
  54.561 -        } else {
  54.562 -          _output->print_cr("%27s (malloc=%d%s)", " ", malloc_amt, unit);
  54.563 -        }
  54.564 -      }
  54.565 -
  54.566 -      if (reserved_amt > 0) {
  54.567 -        _output->print_cr("%27s (mmap: reserved=%d%s, committed=%d%s)",
  54.568 -          " ", reserved_amt, unit, committed_amt, unit);
  54.569 -      }
  54.570 -
  54.571 -      if (arena_amt > 0) {
  54.572 -        _output->print_cr("%27s (arena=%d%s, #%d)", " ", arena_amt, unit, arena_count);
  54.573 -      }
  54.574 -
  54.575 -      _output->print_cr(" ");
  54.576 +      out->print_cr(" from");
  54.577 +      stack->print_on(out, 12);
  54.578      }
  54.579    }
  54.580  }
  54.581  
  54.582 -void BaselineTTYOutputer::done_category_summary() {
  54.583 -  _output->print_cr(" ");
  54.584 -}
  54.585 +void MemSummaryDiffReporter::report_diff() {
  54.586 +  const char* scale = current_scale();
  54.587 +  outputStream* out = output();
  54.588 +  out->print_cr("\nNative Memory Tracking:\n");
  54.589  
  54.590 +  // Overall diff
  54.591 +  out->print("Total: ");
  54.592 +  print_virtual_memory_diff(_current_baseline.total_reserved_memory(),
  54.593 +    _current_baseline.total_committed_memory(), _early_baseline.total_reserved_memory(),
  54.594 +    _early_baseline.total_committed_memory());
  54.595  
  54.596 -void BaselineTTYOutputer::start_virtual_memory_map() {
  54.597 -  _output->print_cr("Virtual memory map:");
  54.598 -}
  54.599 +  out->print_cr("\n");
  54.600  
  54.601 -void BaselineTTYOutputer::reserved_memory_region(MEMFLAGS type, address base, address end,
  54.602 -                                                 size_t size, address pc) {
  54.603 -  const char* unit = memory_unit(_scale);
  54.604 -  char buf[128];
  54.605 -  int  offset;
  54.606 -  _output->print_cr(" ");
  54.607 -  _output->print_cr("[" PTR_FORMAT " - " PTR_FORMAT "] reserved %d%s for %s", base, end, size, unit,
  54.608 -            MemBaseline::type2name(type));
  54.609 -  if (os::dll_address_to_function_name(pc, buf, sizeof(buf), &offset)) {
  54.610 -      _output->print_cr("\t\tfrom [%s+0x%x]", buf, offset);
  54.611 +  // Summary diff by memory type
  54.612 +  for (int index = 0; index < mt_number_of_types; index ++) {
  54.613 +    MEMFLAGS flag = NMTUtil::index_to_flag(index);
  54.614 +    // thread stack is reported as part of thread category
  54.615 +    if (flag == mtThreadStack) continue;
  54.616 +    diff_summary_of_type(flag, _early_baseline.malloc_memory(flag),
  54.617 +      _early_baseline.virtual_memory(flag), _current_baseline.malloc_memory(flag),
  54.618 +      _current_baseline.virtual_memory(flag));
  54.619    }
  54.620  }
  54.621  
  54.622 -void BaselineTTYOutputer::committed_memory_region(address base, address end, size_t size, address pc) {
  54.623 -  const char* unit = memory_unit(_scale);
  54.624 -  char buf[128];
  54.625 -  int  offset;
  54.626 -  _output->print("\t[" PTR_FORMAT " - " PTR_FORMAT "] committed %d%s", base, end, size, unit);
  54.627 -  if (os::dll_address_to_function_name(pc, buf, sizeof(buf), &offset)) {
  54.628 -      _output->print_cr(" from [%s+0x%x]", buf, offset);
  54.629 +void MemSummaryDiffReporter::print_malloc_diff(size_t current_amount, size_t current_count,
  54.630 +    size_t early_amount, size_t early_count) const {
  54.631 +  const char* scale = current_scale();
  54.632 +  outputStream* out = output();
  54.633 +
  54.634 +  out->print("malloc=" SIZE_FORMAT "%s", amount_in_current_scale(current_amount), scale);
  54.635 +  long amount_diff = diff_in_current_scale(current_amount, early_amount);
  54.636 +  if (amount_diff != 0) {
  54.637 +    out->print(" %+ld%s", amount_diff, scale);
  54.638 +  }
  54.639 +  if (current_count > 0) {
  54.640 +    out->print(" #" SIZE_FORMAT "", current_count);
  54.641 +    if (current_count != early_count) {
  54.642 +      out->print(" %+d", (int)(current_count - early_count));
  54.643 +    }
  54.644    }
  54.645  }
  54.646  
  54.647 -void BaselineTTYOutputer::done_virtual_memory_map() {
  54.648 -  _output->print_cr(" ");
  54.649 -}
  54.650 +void MemSummaryDiffReporter::print_arena_diff(size_t current_amount, size_t current_count,
  54.651 +  size_t early_amount, size_t early_count) const {
  54.652 +  const char* scale = current_scale();
  54.653 +  outputStream* out = output();
  54.654 +  out->print("arena=" SIZE_FORMAT "%s", amount_in_current_scale(current_amount), scale);
  54.655 +  if (diff_in_current_scale(current_amount, early_amount) != 0) {
  54.656 +    out->print(" %+ld", diff_in_current_scale(current_amount, early_amount));
  54.657 +  }
  54.658  
  54.659 -
  54.660 -
  54.661 -void BaselineTTYOutputer::start_callsite() {
  54.662 -  _output->print_cr("Details:");
  54.663 -  _output->print_cr(" ");
  54.664 -}
  54.665 -
  54.666 -void BaselineTTYOutputer::done_callsite() {
  54.667 -  _output->print_cr(" ");
  54.668 -}
  54.669 -
  54.670 -void BaselineTTYOutputer::malloc_callsite(address pc, size_t malloc_amt,
  54.671 -  size_t malloc_count) {
  54.672 -  if (malloc_amt > 0) {
  54.673 -    const char* unit = memory_unit(_scale);
  54.674 -    char buf[128];
  54.675 -    int  offset;
  54.676 -    if (pc == 0) {
  54.677 -      _output->print("[BOOTSTRAP]%18s", " ");
  54.678 -    } else if (os::dll_address_to_function_name(pc, buf, sizeof(buf), &offset)) {
  54.679 -      _output->print_cr("[" PTR_FORMAT "] %s+0x%x", pc, buf, offset);
  54.680 -      _output->print("%28s", " ");
  54.681 -    } else {
  54.682 -      _output->print("[" PTR_FORMAT "]%18s", pc, " ");
  54.683 -    }
  54.684 -
  54.685 -    _output->print_cr("(malloc=%d%s #%d)", malloc_amt, unit, malloc_count);
  54.686 -    _output->print_cr(" ");
  54.687 +  out->print(" #" SIZE_FORMAT "", current_count);
  54.688 +  if (current_count != early_count) {
  54.689 +    out->print(" %+d", (int)(current_count - early_count));
  54.690    }
  54.691  }
  54.692  
  54.693 -void BaselineTTYOutputer::virtual_memory_callsite(address pc, size_t reserved_amt,
  54.694 -  size_t committed_amt) {
  54.695 -  if (reserved_amt > 0) {
  54.696 -    const char* unit = memory_unit(_scale);
  54.697 -    char buf[128];
  54.698 -    int  offset;
  54.699 -    if (pc == 0) {
  54.700 -      _output->print("[BOOTSTRAP]%18s", " ");
  54.701 -    } else if (os::dll_address_to_function_name(pc, buf, sizeof(buf), &offset)) {
  54.702 -      _output->print_cr("[" PTR_FORMAT "] %s+0x%x", pc, buf, offset);
  54.703 -      _output->print("%28s", " ");
  54.704 -    } else {
  54.705 -      _output->print("[" PTR_FORMAT "]%18s", pc, " ");
  54.706 -    }
  54.707 +void MemSummaryDiffReporter::print_virtual_memory_diff(size_t current_reserved, size_t current_committed,
  54.708 +    size_t early_reserved, size_t early_committed) const {
  54.709 +  const char* scale = current_scale();
  54.710 +  outputStream* out = output();
  54.711 +  out->print("reserved=" SIZE_FORMAT "%s", amount_in_current_scale(current_reserved), scale);
  54.712 +  long reserved_diff = diff_in_current_scale(current_reserved, early_reserved);
  54.713 +  if (reserved_diff != 0) {
  54.714 +    out->print(" %+ld%s", reserved_diff, scale);
  54.715 +  }
  54.716  
  54.717 -    _output->print_cr("(mmap: reserved=%d%s, committed=%d%s)",
  54.718 -      reserved_amt, unit, committed_amt, unit);
  54.719 -    _output->print_cr(" ");
  54.720 +  out->print(", committed=" SIZE_FORMAT "%s", amount_in_current_scale(current_committed), scale);
  54.721 +  long committed_diff = diff_in_current_scale(current_committed, early_committed);
  54.722 +  if (committed_diff != 0) {
  54.723 +    out->print(" %+ld%s", committed_diff, scale);
  54.724    }
  54.725  }
  54.726  
  54.727 -void BaselineTTYOutputer::diff_total_usage(size_t total_reserved,
  54.728 -  size_t total_committed, int reserved_diff, int committed_diff) {
  54.729 -  const char* unit = memory_unit(_scale);
  54.730 -  _output->print_cr("Total:  reserved=%d%s  %+d%s, committed=%d%s %+d%s",
  54.731 -    total_reserved, unit, reserved_diff, unit, total_committed, unit,
  54.732 -    committed_diff, unit);
  54.733 -}
  54.734  
  54.735 -void BaselineTTYOutputer::diff_category_summary(MEMFLAGS type,
  54.736 -  size_t cur_reserved_amt, size_t cur_committed_amt,
  54.737 -  size_t cur_malloc_amt, size_t cur_malloc_count,
  54.738 -  size_t cur_arena_amt, size_t cur_arena_count,
  54.739 -  int reserved_diff, int committed_diff, int malloc_diff,
  54.740 -  int malloc_count_diff, int arena_diff, int arena_count_diff) {
  54.741 +void MemSummaryDiffReporter::diff_summary_of_type(MEMFLAGS flag, const MallocMemory* early_malloc,
  54.742 +  const VirtualMemory* early_vm, const MallocMemory* current_malloc,
  54.743 +  const VirtualMemory* current_vm) const {
  54.744  
  54.745 -  if (type == mtThreadStack) {
  54.746 -    assert(cur_malloc_amt == 0 && cur_malloc_count == 0 &&
  54.747 -      cur_arena_amt == 0, "Just check");
  54.748 -    _thread_stack_reserved = cur_reserved_amt;
  54.749 -    _thread_stack_committed = cur_committed_amt;
  54.750 -    _thread_stack_reserved_diff = reserved_diff;
  54.751 -    _thread_stack_committed_diff = committed_diff;
  54.752 -  } else {
  54.753 -    const char* unit = memory_unit(_scale);
  54.754 -    size_t total_reserved = (cur_reserved_amt + cur_malloc_amt + cur_arena_amt);
  54.755 -    // nothing to report in this category
  54.756 -    if (total_reserved == 0) {
  54.757 -      return;
  54.758 -    }
  54.759 -    int    diff_reserved = (reserved_diff + malloc_diff + arena_diff);
  54.760 +  outputStream* out = output();
  54.761 +  const char* scale = current_scale();
  54.762  
  54.763 -    // category summary
  54.764 -    _output->print("-%26s (reserved=%d%s", MemBaseline::type2name(type),
  54.765 -      total_reserved, unit);
  54.766 +  // Total reserved and committed memory in current baseline
  54.767 +  size_t current_reserved_amount  = reserved_total (current_malloc, current_vm);
  54.768 +  size_t current_committed_amount = committed_total(current_malloc, current_vm);
  54.769  
  54.770 -    if (diff_reserved != 0) {
  54.771 -      _output->print(" %+d%s", diff_reserved, unit);
  54.772 +  // Total reserved and committed memory in early baseline
  54.773 +  size_t early_reserved_amount  = reserved_total(early_malloc, early_vm);
  54.774 +  size_t early_committed_amount = committed_total(early_malloc, early_vm);
  54.775 +
  54.776 +  // Adjust virtual memory total
  54.777 +  if (flag == mtThread) {
  54.778 +    const VirtualMemory* early_thread_stack_usage =
  54.779 +      _early_baseline.virtual_memory(mtThreadStack);
  54.780 +    const VirtualMemory* current_thread_stack_usage =
  54.781 +      _current_baseline.virtual_memory(mtThreadStack);
  54.782 +
  54.783 +    early_reserved_amount  += early_thread_stack_usage->reserved();
  54.784 +    early_committed_amount += early_thread_stack_usage->committed();
  54.785 +
  54.786 +    current_reserved_amount  += current_thread_stack_usage->reserved();
  54.787 +    current_committed_amount += current_thread_stack_usage->committed();
  54.788 +  } else if (flag == mtNMT) {
  54.789 +    early_reserved_amount  += _early_baseline.malloc_tracking_overhead();
  54.790 +    early_committed_amount += _early_baseline.malloc_tracking_overhead();
  54.791 +
  54.792 +    current_reserved_amount  += _current_baseline.malloc_tracking_overhead();
  54.793 +    current_committed_amount += _current_baseline.malloc_tracking_overhead();
  54.794 +  }
  54.795 +
  54.796 +  if (amount_in_current_scale(current_reserved_amount) > 0 ||
  54.797 +      diff_in_current_scale(current_reserved_amount, early_reserved_amount) != 0) {
  54.798 +
  54.799 +    // print summary line
  54.800 +    out->print("-%26s (", NMTUtil::flag_to_name(flag));
  54.801 +    print_virtual_memory_diff(current_reserved_amount, current_committed_amount,
  54.802 +      early_reserved_amount, early_committed_amount);
  54.803 +    out->print_cr(")");
  54.804 +
  54.805 +    // detail lines
  54.806 +    if (flag == mtClass) {
  54.807 +      // report class count
  54.808 +      out->print("%27s (classes #" SIZE_FORMAT "", " ", _current_baseline.class_count());
  54.809 +      int class_count_diff = (int)(_current_baseline.class_count() -
  54.810 +        _early_baseline.class_count());
  54.811 +      if (_current_baseline.class_count() != _early_baseline.class_count()) {
  54.812 +        out->print(" %+d", (int)(_current_baseline.class_count() - _early_baseline.class_count()));
  54.813 +      }
  54.814 +      out->print_cr(")");
  54.815 +    } else if (flag == mtThread) {
  54.816 +      // report thread count
  54.817 +      out->print("%27s (thread #" SIZE_FORMAT "", " ", _current_baseline.thread_count());
  54.818 +      int thread_count_diff = (int)(_current_baseline.thread_count() -
  54.819 +          _early_baseline.thread_count());
  54.820 +      if (thread_count_diff != 0) {
  54.821 +        out->print(" %+d", thread_count_diff);
  54.822 +      }
  54.823 +      out->print_cr(")");
  54.824 +
  54.825 +      // report thread stack
  54.826 +      const VirtualMemory* current_thread_stack =
  54.827 +          _current_baseline.virtual_memory(mtThreadStack);
  54.828 +      const VirtualMemory* early_thread_stack =
  54.829 +        _early_baseline.virtual_memory(mtThreadStack);
  54.830 +
  54.831 +      out->print("%27s (stack: ", " ");
  54.832 +      print_virtual_memory_diff(current_thread_stack->reserved(), current_thread_stack->committed(),
  54.833 +        early_thread_stack->reserved(), early_thread_stack->committed());
  54.834 +      out->print_cr(")");
  54.835      }
  54.836  
  54.837 -    size_t total_committed = cur_committed_amt + cur_malloc_amt + cur_arena_amt;
  54.838 -    _output->print(", committed=%d%s", total_committed, unit);
  54.839 -
  54.840 -    int total_committed_diff = committed_diff + malloc_diff + arena_diff;
  54.841 -    if (total_committed_diff != 0) {
  54.842 -      _output->print(" %+d%s", total_committed_diff, unit);
  54.843 +    // Report malloc'd memory
  54.844 +    size_t current_malloc_amount = current_malloc->malloc_size();
  54.845 +    size_t early_malloc_amount   = early_malloc->malloc_size();
  54.846 +    if (amount_in_current_scale(current_malloc_amount) > 0 ||
  54.847 +        diff_in_current_scale(current_malloc_amount, early_malloc_amount) != 0) {
  54.848 +      out->print("%28s(", " ");
  54.849 +      print_malloc_diff(current_malloc_amount, (flag == mtChunk) ? 0 : current_malloc->malloc_count(),
  54.850 +        early_malloc_amount, early_malloc->malloc_count());
  54.851 +      out->print_cr(")");
  54.852      }
  54.853  
  54.854 -    _output->print_cr(")");
  54.855 -
  54.856 -    // special cases
  54.857 -    if (type == mtClass) {
  54.858 -      _output->print("%27s (classes #%d", " ", _num_of_classes);
  54.859 -      if (_num_of_classes_diff != 0) {
  54.860 -        _output->print(" %+d", _num_of_classes_diff);
  54.861 -      }
  54.862 -      _output->print_cr(")");
  54.863 -    } else if (type == mtThread) {
  54.864 -      // thread count
  54.865 -      _output->print("%27s (thread #%d", " ", _num_of_threads);
  54.866 -      if (_num_of_threads_diff != 0) {
  54.867 -        _output->print_cr(" %+d)", _num_of_threads_diff);
  54.868 -      } else {
  54.869 -        _output->print_cr(")");
  54.870 -      }
  54.871 -      _output->print("%27s (stack: reserved=%d%s", " ", _thread_stack_reserved, unit);
  54.872 -      if (_thread_stack_reserved_diff != 0) {
  54.873 -        _output->print(" %+d%s", _thread_stack_reserved_diff, unit);
  54.874 -      }
  54.875 -
  54.876 -      _output->print(", committed=%d%s", _thread_stack_committed, unit);
  54.877 -      if (_thread_stack_committed_diff != 0) {
  54.878 -        _output->print(" %+d%s",_thread_stack_committed_diff, unit);
  54.879 -      }
  54.880 -
  54.881 -      _output->print_cr(")");
  54.882 +    // Report virtual memory
  54.883 +    if (amount_in_current_scale(current_vm->reserved()) > 0 ||
  54.884 +        diff_in_current_scale(current_vm->reserved(), early_vm->reserved()) != 0) {
  54.885 +      out->print("%27s (mmap: ", " ");
  54.886 +      print_virtual_memory_diff(current_vm->reserved(), current_vm->committed(),
  54.887 +        early_vm->reserved(), early_vm->committed());
  54.888 +      out->print_cr(")");
  54.889      }
  54.890  
  54.891 -    // malloc'd memory
  54.892 -    if (cur_malloc_amt > 0) {
  54.893 -      _output->print("%27s (malloc=%d%s", " ", cur_malloc_amt, unit);
  54.894 -      if (malloc_diff != 0) {
  54.895 -        _output->print(" %+d%s", malloc_diff, unit);
  54.896 -      }
  54.897 -      if (type != mtChunk) {
  54.898 -        _output->print(", #%d", cur_malloc_count);
  54.899 -        if (malloc_count_diff) {
  54.900 -          _output->print(" %+d", malloc_count_diff);
  54.901 -        }
  54.902 -      }
  54.903 -      _output->print_cr(")");
  54.904 +    // Report arena memory
  54.905 +    if (amount_in_current_scale(current_malloc->arena_size()) > 0 ||
  54.906 +        diff_in_current_scale(current_malloc->arena_size(), early_malloc->arena_size()) != 0) {
  54.907 +      out->print("%28s(", " ");
  54.908 +      print_arena_diff(current_malloc->arena_size(), current_malloc->arena_count(),
  54.909 +        early_malloc->arena_size(), early_malloc->arena_count());
  54.910 +      out->print_cr(")");
  54.911      }
  54.912  
  54.913 -    // mmap'd memory
  54.914 -    if (cur_reserved_amt > 0) {
  54.915 -      _output->print("%27s (mmap: reserved=%d%s", " ", cur_reserved_amt, unit);
  54.916 -      if (reserved_diff != 0) {
  54.917 -        _output->print(" %+d%s", reserved_diff, unit);
  54.918 +    // Report native memory tracking overhead
  54.919 +    if (flag == mtNMT) {
  54.920 +      size_t current_tracking_overhead = amount_in_current_scale(_current_baseline.malloc_tracking_overhead());
  54.921 +      size_t early_tracking_overhead   = amount_in_current_scale(_early_baseline.malloc_tracking_overhead());
  54.922 +
  54.923 +      out->print("%27s (tracking overhead=" SIZE_FORMAT "%s", " ",
  54.924 +        amount_in_current_scale(_current_baseline.malloc_tracking_overhead()), scale);
  54.925 +
  54.926 +      long overhead_diff = diff_in_current_scale(_current_baseline.malloc_tracking_overhead(),
  54.927 +           _early_baseline.malloc_tracking_overhead());
  54.928 +      if (overhead_diff != 0) {
  54.929 +        out->print(" %+ld%s", overhead_diff, scale);
  54.930        }
  54.931 -
  54.932 -      _output->print(", committed=%d%s", cur_committed_amt, unit);
  54.933 -      if (committed_diff != 0) {
  54.934 -        _output->print(" %+d%s", committed_diff, unit);
  54.935 -      }
  54.936 -      _output->print_cr(")");
  54.937 +      out->print_cr(")");
  54.938      }
  54.939 -
  54.940 -    // arena memory
  54.941 -    if (cur_arena_amt > 0) {
  54.942 -      _output->print("%27s (arena=%d%s", " ", cur_arena_amt, unit);
  54.943 -      if (arena_diff != 0) {
  54.944 -        _output->print(" %+d%s", arena_diff, unit);
  54.945 -      }
  54.946 -      _output->print(", #%d", cur_arena_count);
  54.947 -      if (arena_count_diff != 0) {
  54.948 -        _output->print(" %+d", arena_count_diff);
  54.949 -      }
  54.950 -      _output->print_cr(")");
  54.951 -    }
  54.952 -
  54.953 -    _output->print_cr(" ");
  54.954 +    out->print_cr(" ");
  54.955    }
  54.956  }
  54.957  
  54.958 -void BaselineTTYOutputer::diff_malloc_callsite(address pc,
  54.959 -    size_t cur_malloc_amt, size_t cur_malloc_count,
  54.960 -    int malloc_diff, int malloc_count_diff) {
  54.961 -  if (malloc_diff != 0) {
  54.962 -    const char* unit = memory_unit(_scale);
  54.963 -    char buf[128];
  54.964 -    int  offset;
  54.965 -    if (pc == 0) {
  54.966 -      _output->print_cr("[BOOTSTRAP]%18s", " ");
  54.967 +void MemDetailDiffReporter::report_diff() {
  54.968 +  MemSummaryDiffReporter::report_diff();
  54.969 +  diff_malloc_sites();
  54.970 +  diff_virtual_memory_sites();
  54.971 +}
  54.972 +
  54.973 +void MemDetailDiffReporter::diff_malloc_sites() const {
  54.974 +  MallocSiteIterator early_itr = _early_baseline.malloc_sites(MemBaseline::by_site);
  54.975 +  MallocSiteIterator current_itr = _current_baseline.malloc_sites(MemBaseline::by_site);
  54.976 +
  54.977 +  const MallocSite* early_site   = early_itr.next();
  54.978 +  const MallocSite* current_site = current_itr.next();
  54.979 +
  54.980 +  while (early_site != NULL || current_site != NULL) {
  54.981 +    if (early_site == NULL) {
  54.982 +      new_malloc_site(current_site);
  54.983 +      current_site = current_itr.next();
  54.984 +    } else if (current_site == NULL) {
  54.985 +      old_malloc_site(early_site);
  54.986 +      early_site = early_itr.next();
  54.987      } else {
  54.988 -      if (os::dll_address_to_function_name(pc, buf, sizeof(buf), &offset)) {
  54.989 -        _output->print_cr("[" PTR_FORMAT "] %s+0x%x", pc, buf, offset);
  54.990 -        _output->print("%28s", " ");
  54.991 +      int compVal = current_site->call_stack()->compare(*early_site->call_stack());
  54.992 +      if (compVal < 0) {
  54.993 +        new_malloc_site(current_site);
  54.994 +        current_site = current_itr.next();
  54.995 +      } else if (compVal > 0) {
  54.996 +        old_malloc_site(early_site);
  54.997 +        early_site = early_itr.next();
  54.998        } else {
  54.999 -        _output->print("[" PTR_FORMAT "]%18s", pc, " ");
 54.1000 +        diff_malloc_site(early_site, current_site);
 54.1001 +        early_site   = early_itr.next();
 54.1002 +        current_site = current_itr.next();
 54.1003        }
 54.1004      }
 54.1005 -
 54.1006 -    _output->print("(malloc=%d%s", cur_malloc_amt, unit);
 54.1007 -    if (malloc_diff != 0) {
 54.1008 -      _output->print(" %+d%s", malloc_diff, unit);
 54.1009 -    }
 54.1010 -    _output->print(", #%d", cur_malloc_count);
 54.1011 -    if (malloc_count_diff != 0) {
 54.1012 -      _output->print(" %+d", malloc_count_diff);
 54.1013 -    }
 54.1014 -    _output->print_cr(")");
 54.1015 -    _output->print_cr(" ");
 54.1016    }
 54.1017  }
 54.1018  
 54.1019 -void BaselineTTYOutputer::diff_virtual_memory_callsite(address pc,
 54.1020 -    size_t cur_reserved_amt, size_t cur_committed_amt,
 54.1021 -    int reserved_diff, int committed_diff) {
 54.1022 -  if (reserved_diff != 0 || committed_diff != 0) {
 54.1023 -    const char* unit = memory_unit(_scale);
 54.1024 -    char buf[64];
 54.1025 -    int  offset;
 54.1026 -    if (pc == 0) {
 54.1027 -      _output->print_cr("[BOOSTRAP]%18s", " ");
 54.1028 +void MemDetailDiffReporter::diff_virtual_memory_sites() const {
 54.1029 +  VirtualMemorySiteIterator early_itr = _early_baseline.virtual_memory_sites(MemBaseline::by_site);
 54.1030 +  VirtualMemorySiteIterator current_itr = _current_baseline.virtual_memory_sites(MemBaseline::by_site);
 54.1031 +
 54.1032 +  const VirtualMemoryAllocationSite* early_site   = early_itr.next();
 54.1033 +  const VirtualMemoryAllocationSite* current_site = current_itr.next();
 54.1034 +
 54.1035 +  while (early_site != NULL || current_site != NULL) {
 54.1036 +    if (early_site == NULL) {
 54.1037 +      new_virtual_memory_site(current_site);
 54.1038 +      current_site = current_itr.next();
 54.1039 +    } else if (current_site == NULL) {
 54.1040 +      old_virtual_memory_site(early_site);
 54.1041 +      early_site = early_itr.next();
 54.1042      } else {
 54.1043 -      if (os::dll_address_to_function_name(pc, buf, sizeof(buf), &offset)) {
 54.1044 -        _output->print_cr("[" PTR_FORMAT "] %s+0x%x", pc, buf, offset);
 54.1045 -        _output->print("%28s", " ");
 54.1046 +      int compVal = current_site->call_stack()->compare(*early_site->call_stack());
 54.1047 +      if (compVal < 0) {
 54.1048 +        new_virtual_memory_site(current_site);
 54.1049 +        current_site = current_itr.next();
 54.1050 +      } else if (compVal > 0) {
 54.1051 +        old_virtual_memory_site(early_site);
 54.1052 +        early_site = early_itr.next();
 54.1053        } else {
 54.1054 -        _output->print("[" PTR_FORMAT "]%18s", pc, " ");
 54.1055 +        diff_virtual_memory_site(early_site, current_site);
 54.1056 +        early_site   = early_itr.next();
 54.1057 +        current_site = current_itr.next();
 54.1058        }
 54.1059      }
 54.1060 -
 54.1061 -    _output->print("(mmap: reserved=%d%s", cur_reserved_amt, unit);
 54.1062 -    if (reserved_diff != 0) {
 54.1063 -      _output->print(" %+d%s", reserved_diff, unit);
 54.1064 -    }
 54.1065 -    _output->print(", committed=%d%s", cur_committed_amt, unit);
 54.1066 -    if (committed_diff != 0) {
 54.1067 -      _output->print(" %+d%s", committed_diff, unit);
 54.1068 -    }
 54.1069 -    _output->print_cr(")");
 54.1070 -    _output->print_cr(" ");
 54.1071    }
 54.1072  }
 54.1073 +
 54.1074 +
 54.1075 +void MemDetailDiffReporter::new_malloc_site(const MallocSite* malloc_site) const {
 54.1076 +  diff_malloc_site(malloc_site->call_stack(), malloc_site->size(), malloc_site->count(),
 54.1077 +    0, 0);
 54.1078 +}
 54.1079 +
 54.1080 +void MemDetailDiffReporter::old_malloc_site(const MallocSite* malloc_site) const {
 54.1081 +  diff_malloc_site(malloc_site->call_stack(), 0, 0, malloc_site->size(),
 54.1082 +    malloc_site->count());
 54.1083 +}
 54.1084 +
 54.1085 +void MemDetailDiffReporter::diff_malloc_site(const MallocSite* early,
 54.1086 +  const MallocSite* current)  const {
 54.1087 +  diff_malloc_site(current->call_stack(), current->size(), current->count(),
 54.1088 +    early->size(), early->count());
 54.1089 +}
 54.1090 +
 54.1091 +void MemDetailDiffReporter::diff_malloc_site(const NativeCallStack* stack, size_t current_size,
 54.1092 +  size_t current_count, size_t early_size, size_t early_count) const {
 54.1093 +  outputStream* out = output();
 54.1094 +
 54.1095 +  assert(stack != NULL, "NULL stack");
 54.1096 +
 54.1097 +  if (diff_in_current_scale(current_size, early_size) == 0) {
 54.1098 +      return;
 54.1099 +  }
 54.1100 +
 54.1101 +  stack->print_on(out);
 54.1102 +  out->print("%28s (", " ");
 54.1103 +  print_malloc_diff(current_size, current_count,
 54.1104 +    early_size, early_count);
 54.1105 +
 54.1106 +  out->print_cr(")\n");
 54.1107 +}
 54.1108 +
 54.1109 +
 54.1110 +void MemDetailDiffReporter::new_virtual_memory_site(const VirtualMemoryAllocationSite* site) const {
 54.1111 +  diff_virtual_memory_site(site->call_stack(), site->reserved(), site->committed(), 0, 0);
 54.1112 +}
 54.1113 +
 54.1114 +void MemDetailDiffReporter::old_virtual_memory_site(const VirtualMemoryAllocationSite* site) const {
 54.1115 +  diff_virtual_memory_site(site->call_stack(), 0, 0, site->reserved(), site->committed());
 54.1116 +}
 54.1117 +
 54.1118 +void MemDetailDiffReporter::diff_virtual_memory_site(const VirtualMemoryAllocationSite* early,
 54.1119 +  const VirtualMemoryAllocationSite* current) const {
 54.1120 +  diff_virtual_memory_site(current->call_stack(), current->reserved(), current->committed(),
 54.1121 +    early->reserved(), early->committed());
 54.1122 +}
 54.1123 +
 54.1124 +void MemDetailDiffReporter::diff_virtual_memory_site(const NativeCallStack* stack, size_t current_reserved,
 54.1125 +  size_t current_committed, size_t early_reserved, size_t early_committed) const  {
 54.1126 +  outputStream* out = output();
 54.1127 +
 54.1128 +  // no change
 54.1129 +  if (diff_in_current_scale(current_reserved, early_reserved) == 0 &&
 54.1130 +      diff_in_current_scale(current_committed, early_committed) == 0) {
 54.1131 +    return;
 54.1132 +  }
 54.1133 +
 54.1134 +  stack->print_on(out);
 54.1135 +  out->print("%28s (mmap: ", " ");
 54.1136 +  print_virtual_memory_diff(current_reserved, current_committed,
 54.1137 +    early_reserved, early_committed);
 54.1138 +
 54.1139 +  out->print_cr(")\n");
 54.1140 + }
 54.1141 +
    55.1 --- a/src/share/vm/services/memReporter.hpp	Wed Aug 27 09:36:55 2014 +0200
    55.2 +++ b/src/share/vm/services/memReporter.hpp	Wed Aug 27 08:19:12 2014 -0400
    55.3 @@ -1,5 +1,5 @@
    55.4  /*
    55.5 - * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
    55.6 + * Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
    55.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    55.8   *
    55.9   * This code is free software; you can redistribute it and/or modify it
   55.10 @@ -25,262 +25,217 @@
   55.11  #ifndef SHARE_VM_SERVICES_MEM_REPORTER_HPP
   55.12  #define SHARE_VM_SERVICES_MEM_REPORTER_HPP
   55.13  
   55.14 -#include "runtime/mutexLocker.hpp"
   55.15 -#include "services/memBaseline.hpp"
   55.16 -#include "services/memTracker.hpp"
   55.17 -#include "utilities/ostream.hpp"
   55.18 -#include "utilities/macros.hpp"
   55.19 -
   55.20  #if INCLUDE_NMT
   55.21  
   55.22 +#include "oops/instanceKlass.hpp"
   55.23 +#include "services/memBaseline.hpp"
   55.24 +#include "services/nmtCommon.hpp"
   55.25 +#include "services/mallocTracker.hpp"
   55.26 +#include "services/virtualMemoryTracker.hpp"
   55.27 +
   55.28  /*
   55.29 - * MemBaselineReporter reports data to this outputer class,
   55.30 - * ReportOutputer is responsible for format, store and redirect
   55.31 - * the data to the final destination.
   55.32 - */
   55.33 -class BaselineOutputer : public StackObj {
   55.34 + * Base class that provides helpers
   55.35 +*/
   55.36 +class MemReporterBase : public StackObj {
   55.37 + private:
   55.38 +  size_t        _scale;  // report in this scale
   55.39 +  outputStream* _output; // destination
   55.40 +
   55.41   public:
   55.42 -  // start to report memory usage in specified scale.
   55.43 -  // if report_diff = true, the reporter reports baseline comparison
   55.44 -  // information.
   55.45 +  MemReporterBase(outputStream* out = NULL, size_t scale = K)
   55.46 +    : _scale(scale) {
   55.47 +    _output = (out == NULL) ? tty : out;
   55.48 +  }
   55.49  
   55.50 -  virtual void start(size_t scale, bool report_diff = false) = 0;
   55.51 -  // Done reporting
   55.52 -  virtual void done() = 0;
   55.53 + protected:
   55.54 +  inline outputStream* output() const {
   55.55 +    return _output;
   55.56 +  }
   55.57 +  // Current reporting scale
   55.58 +  inline const char* current_scale() const {
   55.59 +    return NMTUtil::scale_name(_scale);
   55.60 +  }
   55.61 +  // Convert memory amount in bytes to current reporting scale
   55.62 +  inline size_t amount_in_current_scale(size_t amount) const {
   55.63 +    return NMTUtil::amount_in_scale(amount, _scale);
   55.64 +  }
   55.65  
   55.66 -  /* report baseline summary information */
   55.67 -  virtual void total_usage(size_t total_reserved,
   55.68 -                           size_t total_committed) = 0;
   55.69 -  virtual void num_of_classes(size_t classes) = 0;
   55.70 -  virtual void num_of_threads(size_t threads) = 0;
   55.71 +  // Convert diff amount in bytes to current reporting scale
   55.72 +  inline long diff_in_current_scale(size_t s1, size_t s2) const {
   55.73 +    long amount = (long)(s1 - s2);
   55.74 +    long scale = (long)_scale;
   55.75 +    amount = (amount > 0) ? (amount + scale / 2) : (amount - scale / 2);
   55.76 +    return amount / scale;
   55.77 +  }
   55.78  
   55.79 -  virtual void thread_info(size_t stack_reserved_amt, size_t stack_committed_amt) = 0;
   55.80 +  // Helper functions
   55.81 +  // Calculate total reserved and committed amount
   55.82 +  size_t reserved_total(const MallocMemory* malloc, const VirtualMemory* vm) const;
   55.83 +  size_t committed_total(const MallocMemory* malloc, const VirtualMemory* vm) const;
   55.84  
   55.85 -  /* report baseline summary comparison */
   55.86 -  virtual void diff_total_usage(size_t total_reserved,
   55.87 -                                size_t total_committed,
   55.88 -                                int reserved_diff,
   55.89 -                                int committed_diff) = 0;
   55.90 -  virtual void diff_num_of_classes(size_t classes, int diff) = 0;
   55.91 -  virtual void diff_num_of_threads(size_t threads, int diff) = 0;
   55.92  
   55.93 -  virtual void diff_thread_info(size_t stack_reserved, size_t stack_committed,
   55.94 -        int stack_reserved_diff, int stack_committed_diff) = 0;
   55.95 +  // Print summary total, malloc and virtual memory
   55.96 +  void print_total(size_t reserved, size_t committed) const;
   55.97 +  void print_malloc(size_t amount, size_t count) const;
   55.98 +  void print_virtual_memory(size_t reserved, size_t committed) const;
   55.99  
  55.100 +  void print_malloc_line(size_t amount, size_t count) const;
  55.101 +  void print_virtual_memory_line(size_t reserved, size_t committed) const;
  55.102 +  void print_arena_line(size_t amount, size_t count) const;
  55.103  
  55.104 -  /*
  55.105 -   * memory summary by memory types.
  55.106 -   * for each memory type, following summaries are reported:
  55.107 -   *  - reserved amount, committed amount
  55.108 -   *  - malloc'd amount, malloc count
  55.109 -   *  - arena amount, arena count
  55.110 -   */
  55.111 -
  55.112 -  // start reporting memory summary by memory type
  55.113 -  virtual void start_category_summary() = 0;
  55.114 -
  55.115 -  virtual void category_summary(MEMFLAGS type, size_t reserved_amt,
  55.116 -                                size_t committed_amt,
  55.117 -                                size_t malloc_amt, size_t malloc_count,
  55.118 -                                size_t arena_amt, size_t arena_count) = 0;
  55.119 -
  55.120 -  virtual void diff_category_summary(MEMFLAGS type, size_t cur_reserved_amt,
  55.121 -                                size_t cur_committed_amt,
  55.122 -                                size_t cur_malloc_amt, size_t cur_malloc_count,
  55.123 -                                size_t cur_arena_amt, size_t cur_arena_count,
  55.124 -                                int reserved_diff, int committed_diff, int malloc_diff,
  55.125 -                                int malloc_count_diff, int arena_diff,
  55.126 -                                int arena_count_diff) = 0;
  55.127 -
  55.128 -  virtual void done_category_summary() = 0;
  55.129 -
  55.130 -  virtual void start_virtual_memory_map() = 0;
  55.131 -  virtual void reserved_memory_region(MEMFLAGS type, address base, address end, size_t size, address pc) = 0;
  55.132 -  virtual void committed_memory_region(address base, address end, size_t size, address pc) = 0;
  55.133 -  virtual void done_virtual_memory_map() = 0;
  55.134 -
  55.135 -  /*
  55.136 -   *  Report callsite information
  55.137 -   */
  55.138 -  virtual void start_callsite() = 0;
  55.139 -  virtual void malloc_callsite(address pc, size_t malloc_amt, size_t malloc_count) = 0;
  55.140 -  virtual void virtual_memory_callsite(address pc, size_t reserved_amt, size_t committed_amt) = 0;
  55.141 -
  55.142 -  virtual void diff_malloc_callsite(address pc, size_t cur_malloc_amt, size_t cur_malloc_count,
  55.143 -              int malloc_diff, int malloc_count_diff) = 0;
  55.144 -  virtual void diff_virtual_memory_callsite(address pc, size_t cur_reserved_amt, size_t cur_committed_amt,
  55.145 -              int reserved_diff, int committed_diff) = 0;
  55.146 -
  55.147 -  virtual void done_callsite() = 0;
  55.148 -
  55.149 -  // return current scale in "KB", "MB" or "GB"
  55.150 -  static const char* memory_unit(size_t scale);
  55.151 +  void print_virtual_memory_region(const char* type, address base, size_t size) const;
  55.152  };
  55.153  
  55.154  /*
  55.155 - * This class reports processed data from a baseline or
  55.156 - * the changes between the two baseline.
  55.157 + * The class is for generating summary tracking report.
  55.158   */
  55.159 -class BaselineReporter : public StackObj {
  55.160 +class MemSummaryReporter : public MemReporterBase {
  55.161   private:
  55.162 -  BaselineOutputer&  _outputer;
  55.163 -  size_t             _scale;
  55.164 +  MallocMemorySnapshot*   _malloc_snapshot;
  55.165 +  VirtualMemorySnapshot*  _vm_snapshot;
  55.166 +  size_t                  _class_count;
  55.167  
  55.168   public:
  55.169 -  // construct a reporter that reports memory usage
  55.170 -  // in specified scale
  55.171 -  BaselineReporter(BaselineOutputer& outputer, size_t scale = K):
  55.172 -    _outputer(outputer) {
  55.173 -    _scale = scale;
  55.174 +  // Report summary tracking data from global snapshots directly.
  55.175 +  // This constructor is used for final reporting and hs_err reporting.
  55.176 +  MemSummaryReporter(MallocMemorySnapshot* malloc_snapshot,
  55.177 +    VirtualMemorySnapshot* vm_snapshot, outputStream* output,
  55.178 +    size_t class_count = 0, size_t scale = K) :
  55.179 +    MemReporterBase(output, scale),
  55.180 +    _malloc_snapshot(malloc_snapshot),
  55.181 +    _vm_snapshot(vm_snapshot) {
  55.182 +    if (class_count == 0) {
  55.183 +      _class_count = InstanceKlass::number_of_instance_classes();
  55.184 +    } else {
  55.185 +      _class_count = class_count;
  55.186 +    }
  55.187    }
  55.188 -  virtual void report_baseline(const MemBaseline& baseline, bool summary_only = false);
  55.189 -  virtual void diff_baselines(const MemBaseline& cur, const MemBaseline& prev,
  55.190 -                              bool summary_only = false);
  55.191 +  // This constructor is for normal reporting from a recent baseline.
  55.192 +  MemSummaryReporter(MemBaseline& baseline, outputStream* output,
  55.193 +    size_t scale = K) : MemReporterBase(output, scale),
  55.194 +    _malloc_snapshot(baseline.malloc_memory_snapshot()),
  55.195 +    _vm_snapshot(baseline.virtual_memory_snapshot()),
  55.196 +    _class_count(baseline.class_count()) { }
  55.197  
  55.198 -  void set_scale(size_t scale);
  55.199 -  size_t scale() const { return _scale; }
  55.200  
  55.201 +  // Generate summary report
  55.202 +  virtual void report();
  55.203   private:
  55.204 -  void report_summaries(const MemBaseline& baseline);
  55.205 -  void report_virtual_memory_map(const MemBaseline& baseline);
  55.206 -  void report_callsites(const MemBaseline& baseline);
  55.207 -
  55.208 -  void diff_summaries(const MemBaseline& cur, const MemBaseline& prev);
  55.209 -  void diff_callsites(const MemBaseline& cur, const MemBaseline& prev);
  55.210 -
  55.211 -  // calculate memory size in current memory scale
  55.212 -  size_t amount_in_current_scale(size_t amt) const;
  55.213 -  // diff two unsigned values in current memory scale
  55.214 -  int    diff_in_current_scale(size_t value1, size_t value2) const;
  55.215 -  // diff two unsigned value
  55.216 -  int    diff(size_t value1, size_t value2) const;
  55.217 +  // Report summary for each memory type
  55.218 +  void report_summary_of_type(MEMFLAGS type, MallocMemory* malloc_memory,
  55.219 +    VirtualMemory* virtual_memory);
  55.220  };
  55.221  
  55.222  /*
  55.223 - * tty output implementation. Native memory tracking
  55.224 - * DCmd uses this outputer.
  55.225 + * The class is for generating detail tracking report.
  55.226   */
  55.227 -class BaselineTTYOutputer : public BaselineOutputer {
  55.228 +class MemDetailReporter : public MemSummaryReporter {
  55.229   private:
  55.230 -  size_t         _scale;
  55.231 -
  55.232 -  size_t         _num_of_classes;
  55.233 -  size_t         _num_of_threads;
  55.234 -  size_t         _thread_stack_reserved;
  55.235 -  size_t         _thread_stack_committed;
  55.236 -
  55.237 -  int            _num_of_classes_diff;
  55.238 -  int            _num_of_threads_diff;
  55.239 -  int            _thread_stack_reserved_diff;
  55.240 -  int            _thread_stack_committed_diff;
  55.241 -
  55.242 -  outputStream*  _output;
  55.243 +  MemBaseline&   _baseline;
  55.244  
  55.245   public:
  55.246 -  BaselineTTYOutputer(outputStream* st) {
  55.247 -    _scale = K;
  55.248 -    _num_of_classes = 0;
  55.249 -    _num_of_threads = 0;
  55.250 -    _thread_stack_reserved = 0;
  55.251 -    _thread_stack_committed = 0;
  55.252 -    _num_of_classes_diff = 0;
  55.253 -    _num_of_threads_diff = 0;
  55.254 -    _thread_stack_reserved_diff = 0;
  55.255 -    _thread_stack_committed_diff = 0;
  55.256 -    _output = st;
  55.257 +  MemDetailReporter(MemBaseline& baseline, outputStream* output, size_t scale = K) :
  55.258 +    MemSummaryReporter(baseline, output, scale),
  55.259 +     _baseline(baseline) { }
  55.260 +
  55.261 +  // Generate detail report.
  55.262 +  // The report contains summary and detail sections.
  55.263 +  virtual void report() {
  55.264 +    MemSummaryReporter::report();
  55.265 +    report_virtual_memory_map();
  55.266 +    report_detail();
  55.267    }
  55.268  
  55.269 -  // begin reporting memory usage in specified scale
  55.270 -  void start(size_t scale, bool report_diff = false);
  55.271 -  // done reporting
  55.272 -  void done();
  55.273 + private:
  55.274 +  // Report detail tracking data.
  55.275 +  void report_detail();
  55.276 +  // Report virtual memory map
  55.277 +  void report_virtual_memory_map();
  55.278 +  // Report malloc allocation sites
  55.279 +  void report_malloc_sites();
  55.280 +  // Report virtual memory reservation sites
  55.281 +  void report_virtual_memory_allocation_sites();
  55.282  
  55.283 -  // total memory usage
  55.284 -  void total_usage(size_t total_reserved,
  55.285 -                   size_t total_committed);
  55.286 -  // report total loaded classes
  55.287 -  void num_of_classes(size_t classes) {
  55.288 -    _num_of_classes = classes;
  55.289 +  // Report a virtual memory region
  55.290 +  void report_virtual_memory_region(const ReservedMemoryRegion* rgn);
  55.291 +};
  55.292 +
  55.293 +/*
  55.294 + * The class is for generating summary comparison report.
  55.295 + * It compares current memory baseline against an early baseline.
  55.296 + */
  55.297 +class MemSummaryDiffReporter : public MemReporterBase {
  55.298 + protected:
  55.299 +  MemBaseline&      _early_baseline;
  55.300 +  MemBaseline&      _current_baseline;
  55.301 +
  55.302 + public:
  55.303 +  MemSummaryDiffReporter(MemBaseline& early_baseline, MemBaseline& current_baseline,
  55.304 +    outputStream* output, size_t scale = K) : MemReporterBase(output, scale),
  55.305 +    _early_baseline(early_baseline), _current_baseline(current_baseline) {
  55.306 +    assert(early_baseline.baseline_type()   != MemBaseline::Not_baselined, "Not baselined");
  55.307 +    assert(current_baseline.baseline_type() != MemBaseline::Not_baselined, "Not baselined");
  55.308    }
  55.309  
  55.310 -  void num_of_threads(size_t threads) {
  55.311 -    _num_of_threads = threads;
  55.312 -  }
  55.313 +  // Generate summary comparison report
  55.314 +  virtual void report_diff();
  55.315  
  55.316 -  void thread_info(size_t stack_reserved_amt, size_t stack_committed_amt) {
  55.317 -    _thread_stack_reserved = stack_reserved_amt;
  55.318 -    _thread_stack_committed = stack_committed_amt;
  55.319 -  }
  55.320 + private:
  55.321 +  // report the comparison of each memory type
  55.322 +  void diff_summary_of_type(MEMFLAGS type,
  55.323 +    const MallocMemory* early_malloc, const VirtualMemory* early_vm,
  55.324 +    const MallocMemory* current_malloc, const VirtualMemory* current_vm) const;
  55.325  
  55.326 -  void diff_total_usage(size_t total_reserved,
  55.327 -                        size_t total_committed,
  55.328 -                        int reserved_diff,
  55.329 -                        int committed_diff);
  55.330 -
  55.331 -  void diff_num_of_classes(size_t classes, int diff) {
  55.332 -    _num_of_classes = classes;
  55.333 -    _num_of_classes_diff = diff;
  55.334 -  }
  55.335 -
  55.336 -  void diff_num_of_threads(size_t threads, int diff) {
  55.337 -    _num_of_threads = threads;
  55.338 -    _num_of_threads_diff = diff;
  55.339 -  }
  55.340 -
  55.341 -  void diff_thread_info(size_t stack_reserved_amt, size_t stack_committed_amt,
  55.342 -               int stack_reserved_diff, int stack_committed_diff) {
  55.343 -    _thread_stack_reserved = stack_reserved_amt;
  55.344 -    _thread_stack_committed = stack_committed_amt;
  55.345 -    _thread_stack_reserved_diff = stack_reserved_diff;
  55.346 -    _thread_stack_committed_diff = stack_committed_diff;
  55.347 -  }
  55.348 -
  55.349 -  /*
  55.350 -   * Report memory summary categoriuzed by memory types.
  55.351 -   * For each memory type, following summaries are reported:
  55.352 -   *  - reserved amount, committed amount
  55.353 -   *  - malloc-ed amount, malloc count
  55.354 -   *  - arena amount, arena count
  55.355 -   */
  55.356 -  // start reporting memory summary by memory type
  55.357 -  void start_category_summary();
  55.358 -  void category_summary(MEMFLAGS type, size_t reserved_amt, size_t committed_amt,
  55.359 -                               size_t malloc_amt, size_t malloc_count,
  55.360 -                               size_t arena_amt, size_t arena_count);
  55.361 -
  55.362 -  void diff_category_summary(MEMFLAGS type, size_t cur_reserved_amt,
  55.363 -                          size_t cur_committed_amt,
  55.364 -                          size_t cur_malloc_amt, size_t cur_malloc_count,
  55.365 -                          size_t cur_arena_amt, size_t cur_arena_count,
  55.366 -                          int reserved_diff, int committed_diff, int malloc_diff,
  55.367 -                          int malloc_count_diff, int arena_diff,
  55.368 -                          int arena_count_diff);
  55.369 -
  55.370 -  void done_category_summary();
  55.371 -
  55.372 -  // virtual memory map
  55.373 -  void start_virtual_memory_map();
  55.374 -  void reserved_memory_region(MEMFLAGS type, address base, address end, size_t size, address pc);
  55.375 -  void committed_memory_region(address base, address end, size_t size, address pc);
  55.376 -  void done_virtual_memory_map();
  55.377 -
  55.378 -
  55.379 -  /*
  55.380 -   *  Report callsite information
  55.381 -   */
  55.382 -  void start_callsite();
  55.383 -  void malloc_callsite(address pc, size_t malloc_amt, size_t malloc_count);
  55.384 -  void virtual_memory_callsite(address pc, size_t reserved_amt, size_t committed_amt);
  55.385 -
  55.386 -  void diff_malloc_callsite(address pc, size_t cur_malloc_amt, size_t cur_malloc_count,
  55.387 -              int malloc_diff, int malloc_count_diff);
  55.388 -  void diff_virtual_memory_callsite(address pc, size_t cur_reserved_amt, size_t cur_committed_amt,
  55.389 -              int reserved_diff, int committed_diff);
  55.390 -
  55.391 -  void done_callsite();
  55.392 + protected:
  55.393 +  void print_malloc_diff(size_t current_amount, size_t current_count,
  55.394 +    size_t early_amount, size_t early_count) const;
  55.395 +  void print_virtual_memory_diff(size_t current_reserved, size_t current_committed,
  55.396 +    size_t early_reserved, size_t early_committed) const;
  55.397 +  void print_arena_diff(size_t current_amount, size_t current_count,
  55.398 +    size_t early_amount, size_t early_count) const;
  55.399  };
  55.400  
  55.401 +/*
  55.402 + * The class is for generating detail comparison report.
  55.403 + * It compares current memory baseline against an early baseline,
  55.404 + * both baselines have to be detail baseline.
  55.405 + */
  55.406 +class MemDetailDiffReporter : public MemSummaryDiffReporter {
  55.407 + public:
  55.408 +  MemDetailDiffReporter(MemBaseline& early_baseline, MemBaseline& current_baseline,
  55.409 +    outputStream* output, size_t scale = K) :
  55.410 +    MemSummaryDiffReporter(early_baseline, current_baseline, output, scale) { }
  55.411 +
  55.412 +  // Generate detail comparison report
  55.413 +  virtual void report_diff();
  55.414 +
  55.415 +  // Malloc allocation site comparison
  55.416 +  void diff_malloc_sites() const;
  55.417 +  // Virutal memory reservation site comparison
  55.418 +  void diff_virtual_memory_sites() const;
  55.419 +
  55.420 +  // New malloc allocation site in recent baseline
  55.421 +  void new_malloc_site (const MallocSite* site) const;
  55.422 +  // The malloc allocation site is not in recent baseline
  55.423 +  void old_malloc_site (const MallocSite* site) const;
  55.424 +  // Compare malloc allocation site, it is in both baselines
  55.425 +  void diff_malloc_site(const MallocSite* early, const MallocSite* current)  const;
  55.426 +
  55.427 +  // New virtual memory allocation site in recent baseline
  55.428 +  void new_virtual_memory_site (const VirtualMemoryAllocationSite* callsite) const;
  55.429 +  // The virtual memory allocation site is not in recent baseline
  55.430 +  void old_virtual_memory_site (const VirtualMemoryAllocationSite* callsite) const;
  55.431 +  // Compare virtual memory allocation site, it is in both baseline
  55.432 +  void diff_virtual_memory_site(const VirtualMemoryAllocationSite* early,
  55.433 +                                const VirtualMemoryAllocationSite* current)  const;
  55.434 +
  55.435 +  void diff_malloc_site(const NativeCallStack* stack, size_t current_size,
  55.436 +    size_t currrent_count, size_t early_size, size_t early_count) const;
  55.437 +  void diff_virtual_memory_site(const NativeCallStack* stack, size_t current_reserved,
  55.438 +    size_t current_committed, size_t early_reserved, size_t early_committed) const;
  55.439 +};
  55.440  
  55.441  #endif // INCLUDE_NMT
  55.442  
  55.443 -#endif // SHARE_VM_SERVICES_MEM_REPORTER_HPP
  55.444 +#endif
  55.445 +
    56.1 --- a/src/share/vm/services/memSnapshot.cpp	Wed Aug 27 09:36:55 2014 +0200
    56.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    56.3 @@ -1,748 +0,0 @@
    56.4 -/*
    56.5 - * Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
    56.6 - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    56.7 - *
    56.8 - * This code is free software; you can redistribute it and/or modify it
    56.9 - * under the terms of the GNU General Public License version 2 only, as
   56.10 - * published by the Free Software Foundation.
   56.11 - *
   56.12 - * This code is distributed in the hope that it will be useful, but WITHOUT
   56.13 - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   56.14 - * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   56.15 - * version 2 for more details (a copy is included in the LICENSE file that
   56.16 - * accompanied this code).
   56.17 - *
   56.18 - * You should have received a copy of the GNU General Public License version
   56.19 - * 2 along with this work; if not, write to the Free Software Foundation,
   56.20 - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   56.21 - *
   56.22 - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   56.23 - * or visit www.oracle.com if you need additional information or have any
   56.24 - * questions.
   56.25 - *
   56.26 - */
   56.27 -
   56.28 -#include "precompiled.hpp"
   56.29 -#include "runtime/mutexLocker.hpp"
   56.30 -#include "utilities/decoder.hpp"
   56.31 -#include "services/memBaseline.hpp"
   56.32 -#include "services/memPtr.hpp"
   56.33 -#include "services/memPtrArray.hpp"
   56.34 -#include "services/memSnapshot.hpp"
   56.35 -#include "services/memTracker.hpp"
   56.36 -
   56.37 -PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
   56.38 -
   56.39 -#ifdef ASSERT
   56.40 -
   56.41 -void decode_pointer_record(MemPointerRecord* rec) {
   56.42 -  tty->print("Pointer: [" PTR_FORMAT " - " PTR_FORMAT  "] size = %d bytes", rec->addr(),
   56.43 -    rec->addr() + rec->size(), (int)rec->size());
   56.44 -  tty->print(" type = %s", MemBaseline::type2name(FLAGS_TO_MEMORY_TYPE(rec->flags())));
   56.45 -  if (rec->is_vm_pointer()) {
   56.46 -    if (rec->is_allocation_record()) {
   56.47 -      tty->print_cr(" (reserve)");
   56.48 -    } else if (rec->is_commit_record()) {
   56.49 -      tty->print_cr(" (commit)");
   56.50 -    } else if (rec->is_uncommit_record()) {
   56.51 -      tty->print_cr(" (uncommit)");
   56.52 -    } else if (rec->is_deallocation_record()) {
   56.53 -      tty->print_cr(" (release)");
   56.54 -    } else {
   56.55 -      tty->print_cr(" (tag)");
   56.56 -    }
   56.57 -  } else {
   56.58 -    if (rec->is_arena_memory_record()) {
   56.59 -      tty->print_cr(" (arena size)");
   56.60 -    } else if (rec->is_allocation_record()) {
   56.61 -      tty->print_cr(" (malloc)");
   56.62 -    } else {
   56.63 -      tty->print_cr(" (free)");
   56.64 -    }
   56.65 -  }
   56.66 -  if (MemTracker::track_callsite()) {
   56.67 -    char buf[1024];
   56.68 -    address pc = ((MemPointerRecordEx*)rec)->pc();
   56.69 -    if (pc != NULL && os::dll_address_to_function_name(pc, buf, sizeof(buf), NULL)) {
   56.70 -      tty->print_cr("\tfrom %s", buf);
   56.71 -    } else {
   56.72 -      tty->print_cr("\tcould not decode pc = " PTR_FORMAT "", pc);
   56.73 -    }
   56.74 -  }
   56.75 -}
   56.76 -
   56.77 -void decode_vm_region_record(VMMemRegion* rec) {
   56.78 -  tty->print("VM Region [" PTR_FORMAT " - " PTR_FORMAT "]", rec->addr(),
   56.79 -    rec->addr() + rec->size());
   56.80 -  tty->print(" type = %s", MemBaseline::type2name(FLAGS_TO_MEMORY_TYPE(rec->flags())));
   56.81 -  if (rec->is_allocation_record()) {
   56.82 -    tty->print_cr(" (reserved)");
   56.83 -  } else if (rec->is_commit_record()) {
   56.84 -    tty->print_cr(" (committed)");
   56.85 -  } else {
   56.86 -    ShouldNotReachHere();
   56.87 -  }
   56.88 -  if (MemTracker::track_callsite()) {
   56.89 -    char buf[1024];
   56.90 -    address pc = ((VMMemRegionEx*)rec)->pc();
   56.91 -    if (pc != NULL && os::dll_address_to_function_name(pc, buf, sizeof(buf), NULL)) {
   56.92 -      tty->print_cr("\tfrom %s", buf);
   56.93 -    } else {
   56.94 -      tty->print_cr("\tcould not decode pc = " PTR_FORMAT "", pc);
   56.95 -    }
   56.96 -
   56.97 -  }
   56.98 -}
   56.99 -
  56.100 -#endif
  56.101 -
  56.102 -
  56.103 -bool VMMemPointerIterator::insert_record(MemPointerRecord* rec) {
  56.104 -  VMMemRegionEx new_rec;
  56.105 -  assert(rec->is_allocation_record() || rec->is_commit_record(),
  56.106 -    "Sanity check");
  56.107 -  if (MemTracker::track_callsite()) {
  56.108 -    new_rec.init((MemPointerRecordEx*)rec);
  56.109 -  } else {
  56.110 -    new_rec.init(rec);
  56.111 -  }
  56.112 -  return insert(&new_rec);
  56.113 -}
  56.114 -
  56.115 -bool VMMemPointerIterator::insert_record_after(MemPointerRecord* rec) {
  56.116 -  VMMemRegionEx new_rec;
  56.117 -  assert(rec->is_allocation_record() || rec->is_commit_record(),
  56.118 -    "Sanity check");
  56.119 -  if (MemTracker::track_callsite()) {
  56.120 -    new_rec.init((MemPointerRecordEx*)rec);
  56.121 -  } else {
  56.122 -    new_rec.init(rec);
  56.123 -  }
  56.124 -  return insert_after(&new_rec);
  56.125 -}
  56.126 -
  56.127 -// we don't consolidate reserved regions, since they may be categorized
  56.128 -// in different types.
  56.129 -bool VMMemPointerIterator::add_reserved_region(MemPointerRecord* rec) {
  56.130 -  assert(rec->is_allocation_record(), "Sanity check");
  56.131 -  VMMemRegion* reserved_region = (VMMemRegion*)current();
  56.132 -
  56.133 -  // we don't have anything yet
  56.134 -  if (reserved_region == NULL) {
  56.135 -    return insert_record(rec);
  56.136 -  }
  56.137 -
  56.138 -  assert(reserved_region->is_reserved_region(), "Sanity check");
  56.139 -  // duplicated records
  56.140 -  if (reserved_region->is_same_region(rec)) {
  56.141 -    return true;
  56.142 -  }
  56.143 -  // Overlapping stack regions indicate that a JNI thread failed to
  56.144 -  // detach from the VM before exiting. This leaks the JavaThread object.
  56.145 -  if (CheckJNICalls)  {
  56.146 -      guarantee(FLAGS_TO_MEMORY_TYPE(reserved_region->flags()) != mtThreadStack ||
  56.147 -         !reserved_region->overlaps_region(rec),
  56.148 -         "Attached JNI thread exited without being detached");
  56.149 -  }
  56.150 -  // otherwise, we should not have overlapping reserved regions
  56.151 -  assert(FLAGS_TO_MEMORY_TYPE(reserved_region->flags()) == mtThreadStack ||
  56.152 -    reserved_region->base() > rec->addr(), "Just check: locate()");
  56.153 -  assert(FLAGS_TO_MEMORY_TYPE(reserved_region->flags()) == mtThreadStack ||
  56.154 -    !reserved_region->overlaps_region(rec), "overlapping reserved regions");
  56.155 -
  56.156 -  return insert_record(rec);
  56.157 -}
  56.158 -
  56.159 -// we do consolidate committed regions
  56.160 -bool VMMemPointerIterator::add_committed_region(MemPointerRecord* rec) {
  56.161 -  assert(rec->is_commit_record(), "Sanity check");
  56.162 -  VMMemRegion* reserved_rgn = (VMMemRegion*)current();
  56.163 -  assert(reserved_rgn->is_reserved_region() && reserved_rgn->contains_region(rec),
  56.164 -    "Sanity check");
  56.165 -
  56.166 -  // thread's native stack is always marked as "committed", ignore
  56.167 -  // the "commit" operation for creating stack guard pages
  56.168 -  if (FLAGS_TO_MEMORY_TYPE(reserved_rgn->flags()) == mtThreadStack &&
  56.169 -      FLAGS_TO_MEMORY_TYPE(rec->flags()) != mtThreadStack) {
  56.170 -    return true;
  56.171 -  }
  56.172 -
  56.173 -  // if the reserved region has any committed regions
  56.174 -  VMMemRegion* committed_rgn  = (VMMemRegion*)next();
  56.175 -  while (committed_rgn != NULL && committed_rgn->is_committed_region()) {
  56.176 -    // duplicated commit records
  56.177 -    if(committed_rgn->contains_region(rec)) {
  56.178 -      return true;
  56.179 -    } else if (committed_rgn->overlaps_region(rec)) {
  56.180 -      // overlaps front part
  56.181 -      if (rec->addr() < committed_rgn->addr()) {
  56.182 -        committed_rgn->expand_region(rec->addr(),
  56.183 -          committed_rgn->addr() - rec->addr());
  56.184 -      } else {
  56.185 -        // overlaps tail part
  56.186 -        address committed_rgn_end = committed_rgn->addr() +
  56.187 -              committed_rgn->size();
  56.188 -        assert(committed_rgn_end < rec->addr() + rec->size(),
  56.189 -             "overlap tail part");
  56.190 -        committed_rgn->expand_region(committed_rgn_end,
  56.191 -          (rec->addr() + rec->size()) - committed_rgn_end);
  56.192 -      }
  56.193 -    } else if (committed_rgn->base() + committed_rgn->size() == rec->addr()) {
  56.194 -      // adjunct each other
  56.195 -      committed_rgn->expand_region(rec->addr(), rec->size());
  56.196 -      VMMemRegion* next_reg = (VMMemRegion*)next();
  56.197 -      // see if we can consolidate next committed region
  56.198 -      if (next_reg != NULL && next_reg->is_committed_region() &&
  56.199 -        next_reg->base() == committed_rgn->base() + committed_rgn->size()) {
  56.200 -          committed_rgn->expand_region(next_reg->base(), next_reg->size());
  56.201 -          // delete merged region
  56.202 -          remove();
  56.203 -      }
  56.204 -      return true;
  56.205 -    } else if (committed_rgn->base() > rec->addr()) {
  56.206 -      // found the location, insert this committed region
  56.207 -      return insert_record(rec);
  56.208 -    }
  56.209 -    committed_rgn = (VMMemRegion*)next();
  56.210 -  }
  56.211 -  return insert_record(rec);
  56.212 -}
  56.213 -
  56.214 -bool VMMemPointerIterator::remove_uncommitted_region(MemPointerRecord* rec) {
  56.215 -  assert(rec->is_uncommit_record(), "sanity check");
  56.216 -  VMMemRegion* cur;
  56.217 -  cur = (VMMemRegion*)current();
  56.218 -  assert(cur->is_reserved_region() && cur->contains_region(rec),
  56.219 -    "Sanity check");
  56.220 -  // thread's native stack is always marked as "committed", ignore
  56.221 -  // the "commit" operation for creating stack guard pages
  56.222 -  if (FLAGS_TO_MEMORY_TYPE(cur->flags()) == mtThreadStack &&
  56.223 -      FLAGS_TO_MEMORY_TYPE(rec->flags()) != mtThreadStack) {
  56.224 -    return true;
  56.225 -  }
  56.226 -
  56.227 -  cur = (VMMemRegion*)next();
  56.228 -  while (cur != NULL && cur->is_committed_region()) {
  56.229 -    // region already uncommitted, must be due to duplicated record
  56.230 -    if (cur->addr() >= rec->addr() + rec->size()) {
  56.231 -      break;
  56.232 -    } else if (cur->contains_region(rec)) {
  56.233 -      // uncommit whole region
  56.234 -      if (cur->is_same_region(rec)) {
  56.235 -        remove();
  56.236 -        break;
  56.237 -      } else if (rec->addr() == cur->addr() ||
  56.238 -        rec->addr() + rec->size() == cur->addr() + cur->size()) {
  56.239 -        // uncommitted from either end of current memory region.
  56.240 -        cur->exclude_region(rec->addr(), rec->size());
  56.241 -        break;
  56.242 -      } else { // split the committed region and release the middle
  56.243 -        address high_addr = cur->addr() + cur->size();
  56.244 -        size_t sz = high_addr - rec->addr();
  56.245 -        cur->exclude_region(rec->addr(), sz);
  56.246 -        sz = high_addr - (rec->addr() + rec->size());
  56.247 -        if (MemTracker::track_callsite()) {
  56.248 -          MemPointerRecordEx tmp(rec->addr() + rec->size(), cur->flags(), sz,
  56.249 -             ((VMMemRegionEx*)cur)->pc());
  56.250 -          return insert_record_after(&tmp);
  56.251 -        } else {
  56.252 -          MemPointerRecord tmp(rec->addr() + rec->size(), cur->flags(), sz);
  56.253 -          return insert_record_after(&tmp);
  56.254 -        }
  56.255 -      }
  56.256 -    }
  56.257 -    cur = (VMMemRegion*)next();
  56.258 -  }
  56.259 -
  56.260 -  // we may not find committed record due to duplicated records
  56.261 -  return true;
  56.262 -}
  56.263 -
  56.264 -bool VMMemPointerIterator::remove_released_region(MemPointerRecord* rec) {
  56.265 -  assert(rec->is_deallocation_record(), "Sanity check");
  56.266 -  VMMemRegion* cur = (VMMemRegion*)current();
  56.267 -  assert(cur->is_reserved_region() && cur->contains_region(rec),
  56.268 -    "Sanity check");
  56.269 -  if (rec->is_same_region(cur)) {
  56.270 -
  56.271 -    // In snapshot, the virtual memory records are sorted in following orders:
  56.272 -    // 1. virtual memory's base address
  56.273 -    // 2. virtual memory reservation record, followed by commit records within this reservation.
  56.274 -    //    The commit records are also in base address order.
  56.275 -    // When a reserved region is released, we want to remove the reservation record and all
  56.276 -    // commit records following it.
  56.277 -#ifdef ASSERT
  56.278 -    address low_addr = cur->addr();
  56.279 -    address high_addr = low_addr + cur->size();
  56.280 -#endif
  56.281 -    // remove virtual memory reservation record
  56.282 -    remove();
  56.283 -    // remove committed regions within above reservation
  56.284 -    VMMemRegion* next_region = (VMMemRegion*)current();
  56.285 -    while (next_region != NULL && next_region->is_committed_region()) {
  56.286 -      assert(next_region->addr() >= low_addr &&
  56.287 -             next_region->addr() + next_region->size() <= high_addr,
  56.288 -            "Range check");
  56.289 -      remove();
  56.290 -      next_region = (VMMemRegion*)current();
  56.291 -    }
  56.292 -  } else if (rec->addr() == cur->addr() ||
  56.293 -    rec->addr() + rec->size() == cur->addr() + cur->size()) {
  56.294 -    // released region is at either end of this region
  56.295 -    cur->exclude_region(rec->addr(), rec->size());
  56.296 -    assert(check_reserved_region(), "Integrity check");
  56.297 -  } else { // split the reserved region and release the middle
  56.298 -    address high_addr = cur->addr() + cur->size();
  56.299 -    size_t sz = high_addr - rec->addr();
  56.300 -    cur->exclude_region(rec->addr(), sz);
  56.301 -    sz = high_addr - rec->addr() - rec->size();
  56.302 -    if (MemTracker::track_callsite()) {
  56.303 -      MemPointerRecordEx tmp(rec->addr() + rec->size(), cur->flags(), sz,
  56.304 -        ((VMMemRegionEx*)cur)->pc());
  56.305 -      bool ret = insert_reserved_region(&tmp);
  56.306 -      assert(!ret || check_reserved_region(), "Integrity check");
  56.307 -      return ret;
  56.308 -    } else {
  56.309 -      MemPointerRecord tmp(rec->addr() + rec->size(), cur->flags(), sz);
  56.310 -      bool ret = insert_reserved_region(&tmp);
  56.311 -      assert(!ret || check_reserved_region(), "Integrity check");
  56.312 -      return ret;
  56.313 -    }
  56.314 -  }
  56.315 -  return true;
  56.316 -}
  56.317 -
  56.318 -bool VMMemPointerIterator::insert_reserved_region(MemPointerRecord* rec) {
  56.319 -  // skip all 'commit' records associated with previous reserved region
  56.320 -  VMMemRegion* p = (VMMemRegion*)next();
  56.321 -  while (p != NULL && p->is_committed_region() &&
  56.322 -         p->base() + p->size() < rec->addr()) {
  56.323 -    p = (VMMemRegion*)next();
  56.324 -  }
  56.325 -  return insert_record(rec);
  56.326 -}
  56.327 -
  56.328 -bool VMMemPointerIterator::split_reserved_region(VMMemRegion* rgn, address new_rgn_addr, size_t new_rgn_size) {
  56.329 -  assert(rgn->contains_region(new_rgn_addr, new_rgn_size), "Not fully contained");
  56.330 -  address pc = (MemTracker::track_callsite() ? ((VMMemRegionEx*)rgn)->pc() : NULL);
  56.331 -  if (rgn->base() == new_rgn_addr) { // new region is at the beginning of the region
  56.332 -    size_t sz = rgn->size() - new_rgn_size;
  56.333 -    // the original region becomes 'new' region
  56.334 -    rgn->exclude_region(new_rgn_addr + new_rgn_size, sz);
  56.335 -     // remaining becomes next region
  56.336 -    MemPointerRecordEx next_rgn(new_rgn_addr + new_rgn_size, rgn->flags(), sz, pc);
  56.337 -    return insert_reserved_region(&next_rgn);
  56.338 -  } else if (rgn->base() + rgn->size() == new_rgn_addr + new_rgn_size) {
  56.339 -    rgn->exclude_region(new_rgn_addr, new_rgn_size);
  56.340 -    MemPointerRecordEx next_rgn(new_rgn_addr, rgn->flags(), new_rgn_size, pc);
  56.341 -    return insert_reserved_region(&next_rgn);
  56.342 -  } else {
  56.343 -    // the orginal region will be split into three
  56.344 -    address rgn_high_addr = rgn->base() + rgn->size();
  56.345 -    // first region
  56.346 -    rgn->exclude_region(new_rgn_addr, (rgn_high_addr - new_rgn_addr));
  56.347 -    // the second region is the new region
  56.348 -    MemPointerRecordEx new_rgn(new_rgn_addr, rgn->flags(), new_rgn_size, pc);
  56.349 -    if (!insert_reserved_region(&new_rgn)) return false;
  56.350 -    // the remaining region
  56.351 -    MemPointerRecordEx rem_rgn(new_rgn_addr + new_rgn_size, rgn->flags(),
  56.352 -      rgn_high_addr - (new_rgn_addr + new_rgn_size), pc);
  56.353 -    return insert_reserved_region(&rem_rgn);
  56.354 -  }
  56.355 -}
  56.356 -
  56.357 -static int sort_in_seq_order(const void* p1, const void* p2) {
  56.358 -  assert(p1 != NULL && p2 != NULL, "Sanity check");
  56.359 -  const MemPointerRecord* mp1 = (MemPointerRecord*)p1;
  56.360 -  const MemPointerRecord* mp2 = (MemPointerRecord*)p2;
  56.361 -  return (mp1->seq() - mp2->seq());
  56.362 -}
  56.363 -
  56.364 -bool StagingArea::init() {
  56.365 -  if (MemTracker::track_callsite()) {
  56.366 -    _malloc_data = new (std::nothrow)MemPointerArrayImpl<SeqMemPointerRecordEx>();
  56.367 -    _vm_data = new (std::nothrow)MemPointerArrayImpl<SeqMemPointerRecordEx>();
  56.368 -  } else {
  56.369 -    _malloc_data = new (std::nothrow)MemPointerArrayImpl<SeqMemPointerRecord>();
  56.370 -    _vm_data = new (std::nothrow)MemPointerArrayImpl<SeqMemPointerRecord>();
  56.371 -  }
  56.372 -
  56.373 -  if (_malloc_data != NULL && _vm_data != NULL &&
  56.374 -      !_malloc_data->out_of_memory() &&
  56.375 -      !_vm_data->out_of_memory()) {
  56.376 -    return true;
  56.377 -  } else {
  56.378 -    if (_malloc_data != NULL) delete _malloc_data;
  56.379 -    if (_vm_data != NULL) delete _vm_data;
  56.380 -    _malloc_data = NULL;
  56.381 -    _vm_data = NULL;
  56.382 -    return false;
  56.383 -  }
  56.384 -}
  56.385 -
  56.386 -
  56.387 -VMRecordIterator StagingArea::virtual_memory_record_walker() {
  56.388 -  MemPointerArray* arr = vm_data();
  56.389 -  // sort into seq number order
  56.390 -  arr->sort((FN_SORT)sort_in_seq_order);
  56.391 -  return VMRecordIterator(arr);
  56.392 -}
  56.393 -
  56.394 -
  56.395 -MemSnapshot::MemSnapshot() {
  56.396 -  if (MemTracker::track_callsite()) {
  56.397 -    _alloc_ptrs = new (std::nothrow) MemPointerArrayImpl<MemPointerRecordEx>();
  56.398 -    _vm_ptrs = new (std::nothrow)MemPointerArrayImpl<VMMemRegionEx>(64, true);
  56.399 -  } else {
  56.400 -    _alloc_ptrs = new (std::nothrow) MemPointerArrayImpl<MemPointerRecord>();
  56.401 -    _vm_ptrs = new (std::nothrow)MemPointerArrayImpl<VMMemRegion>(64, true);
  56.402 -  }
  56.403 -
  56.404 -  _staging_area.init();
  56.405 -  _lock = new (std::nothrow) Mutex(Monitor::max_nonleaf - 1, "memSnapshotLock");
  56.406 -  NOT_PRODUCT(_untracked_count = 0;)
  56.407 -  _number_of_classes = 0;
  56.408 -}
  56.409 -
  56.410 -MemSnapshot::~MemSnapshot() {
  56.411 -  assert(MemTracker::shutdown_in_progress(), "native memory tracking still on");
  56.412 -  {
  56.413 -    MutexLockerEx locker(_lock);
  56.414 -    if (_alloc_ptrs != NULL) {
  56.415 -      delete _alloc_ptrs;
  56.416 -      _alloc_ptrs = NULL;
  56.417 -    }
  56.418 -
  56.419 -    if (_vm_ptrs != NULL) {
  56.420 -      delete _vm_ptrs;
  56.421 -      _vm_ptrs = NULL;
  56.422 -    }
  56.423 -  }
  56.424 -
  56.425 -  if (_lock != NULL) {
  56.426 -    delete _lock;
  56.427 -    _lock = NULL;
  56.428 -  }
  56.429 -}
  56.430 -
  56.431 -
  56.432 -void MemSnapshot::copy_seq_pointer(MemPointerRecord* dest, const MemPointerRecord* src) {
  56.433 -  assert(dest != NULL && src != NULL, "Just check");
  56.434 -  assert(dest->addr() == src->addr(), "Just check");
  56.435 -  assert(dest->seq() > 0 && src->seq() > 0, "not sequenced");
  56.436 -
  56.437 -  if (MemTracker::track_callsite()) {
  56.438 -    *(SeqMemPointerRecordEx*)dest = *(SeqMemPointerRecordEx*)src;
  56.439 -  } else {
  56.440 -    *(SeqMemPointerRecord*)dest = *(SeqMemPointerRecord*)src;
  56.441 -  }
  56.442 -}
  56.443 -
  56.444 -void MemSnapshot::assign_pointer(MemPointerRecord*dest, const MemPointerRecord* src) {
  56.445 -  assert(src != NULL && dest != NULL, "Just check");
  56.446 -  assert(dest->seq() == 0 && src->seq() >0, "cast away sequence");
  56.447 -
  56.448 -  if (MemTracker::track_callsite()) {
  56.449 -    *(MemPointerRecordEx*)dest = *(MemPointerRecordEx*)src;
  56.450 -  } else {
  56.451 -    *(MemPointerRecord*)dest = *(MemPointerRecord*)src;
  56.452 -  }
  56.453 -}
  56.454 -
  56.455 -// merge a recorder to the staging area
  56.456 -bool MemSnapshot::merge(MemRecorder* rec) {
  56.457 -  assert(rec != NULL && !rec->out_of_memory(), "Just check");
  56.458 -
  56.459 -  SequencedRecordIterator itr(rec->pointer_itr());
  56.460 -
  56.461 -  MutexLockerEx lock(_lock, true);
  56.462 -  MemPointerIterator malloc_staging_itr(_staging_area.malloc_data());
  56.463 -  MemPointerRecord* incoming_rec = (MemPointerRecord*) itr.current();
  56.464 -  MemPointerRecord* matched_rec;
  56.465 -
  56.466 -  while (incoming_rec != NULL) {
  56.467 -    if (incoming_rec->is_vm_pointer()) {
  56.468 -      // we don't do anything with virtual memory records during merge
  56.469 -      if (!_staging_area.vm_data()->append(incoming_rec)) {
  56.470 -        return false;
  56.471 -      }
  56.472 -    } else {
  56.473 -      // locate matched record and/or also position the iterator to proper
  56.474 -      // location for this incoming record.
  56.475 -      matched_rec = (MemPointerRecord*)malloc_staging_itr.locate(incoming_rec->addr());
  56.476 -      // we have not seen this memory block in this generation,
  56.477 -      // so just add to staging area
  56.478 -      if (matched_rec == NULL) {
  56.479 -        if (!malloc_staging_itr.insert(incoming_rec)) {
  56.480 -          return false;
  56.481 -        }
  56.482 -      } else if (incoming_rec->addr() == matched_rec->addr()) {
  56.483 -        // whoever has higher sequence number wins
  56.484 -        if (incoming_rec->seq() > matched_rec->seq()) {
  56.485 -          copy_seq_pointer(matched_rec, incoming_rec);
  56.486 -        }
  56.487 -      } else if (incoming_rec->addr() < matched_rec->addr()) {
  56.488 -        if (!malloc_staging_itr.insert(incoming_rec)) {
  56.489 -          return false;
  56.490 -        }
  56.491 -      } else {
  56.492 -        ShouldNotReachHere();
  56.493 -      }
  56.494 -    }
  56.495 -    incoming_rec = (MemPointerRecord*)itr.next();
  56.496 -  }
  56.497 -  NOT_PRODUCT(void check_staging_data();)
  56.498 -  return true;
  56.499 -}
  56.500 -
  56.501 -
  56.502 -// promote data to next generation
  56.503 -bool MemSnapshot::promote(int number_of_classes) {
  56.504 -  assert(_alloc_ptrs != NULL && _vm_ptrs != NULL, "Just check");
  56.505 -  assert(_staging_area.malloc_data() != NULL && _staging_area.vm_data() != NULL,
  56.506 -         "Just check");
  56.507 -  MutexLockerEx lock(_lock, true);
  56.508 -
  56.509 -  MallocRecordIterator  malloc_itr = _staging_area.malloc_record_walker();
  56.510 -  bool promoted = false;
  56.511 -  if (promote_malloc_records(&malloc_itr)) {
  56.512 -    VMRecordIterator vm_itr = _staging_area.virtual_memory_record_walker();
  56.513 -    if (promote_virtual_memory_records(&vm_itr)) {
  56.514 -      promoted = true;
  56.515 -    }
  56.516 -  }
  56.517 -
  56.518 -  NOT_PRODUCT(check_malloc_pointers();)
  56.519 -  _staging_area.clear();
  56.520 -  _number_of_classes = number_of_classes;
  56.521 -  return promoted;
  56.522 -}
  56.523 -
  56.524 -bool MemSnapshot::promote_malloc_records(MemPointerArrayIterator* itr) {
  56.525 -  MemPointerIterator malloc_snapshot_itr(_alloc_ptrs);
  56.526 -  MemPointerRecord* new_rec = (MemPointerRecord*)itr->current();
  56.527 -  MemPointerRecord* matched_rec;
  56.528 -  while (new_rec != NULL) {
  56.529 -    matched_rec = (MemPointerRecord*)malloc_snapshot_itr.locate(new_rec->addr());
  56.530 -    // found matched memory block
  56.531 -    if (matched_rec != NULL && new_rec->addr() == matched_rec->addr()) {
  56.532 -      // snapshot already contains 'live' records
  56.533 -      assert(matched_rec->is_allocation_record() || matched_rec->is_arena_memory_record(),
  56.534 -             "Sanity check");
  56.535 -      // update block states
  56.536 -      if (new_rec->is_allocation_record()) {
  56.537 -        assign_pointer(matched_rec, new_rec);
  56.538 -      } else if (new_rec->is_arena_memory_record()) {
  56.539 -        if (new_rec->size() == 0) {
  56.540 -          // remove size record once size drops to 0
  56.541 -          malloc_snapshot_itr.remove();
  56.542 -        } else {
  56.543 -          assign_pointer(matched_rec, new_rec);
  56.544 -        }
  56.545 -      } else {
  56.546 -        // a deallocation record
  56.547 -        assert(new_rec->is_deallocation_record(), "Sanity check");
  56.548 -        // an arena record can be followed by a size record, we need to remove both
  56.549 -        if (matched_rec->is_arena_record()) {
  56.550 -          MemPointerRecord* next = (MemPointerRecord*)malloc_snapshot_itr.peek_next();
  56.551 -          if (next != NULL && next->is_arena_memory_record() &&
  56.552 -              next->is_memory_record_of_arena(matched_rec)) {
  56.553 -            malloc_snapshot_itr.remove();
  56.554 -          }
  56.555 -        }
  56.556 -        // the memory is deallocated, remove related record(s)
  56.557 -        malloc_snapshot_itr.remove();
  56.558 -      }
  56.559 -    } else {
  56.560 -      // don't insert size 0 record
  56.561 -      if (new_rec->is_arena_memory_record() && new_rec->size() == 0) {
  56.562 -        new_rec = NULL;
  56.563 -      }
  56.564 -
  56.565 -      if (new_rec != NULL) {
  56.566 -        if  (new_rec->is_allocation_record() || new_rec->is_arena_memory_record()) {
  56.567 -          if (matched_rec != NULL && new_rec->addr() > matched_rec->addr()) {
  56.568 -            if (!malloc_snapshot_itr.insert_after(new_rec)) {
  56.569 -              return false;
  56.570 -            }
  56.571 -          } else {
  56.572 -            if (!malloc_snapshot_itr.insert(new_rec)) {
  56.573 -              return false;
  56.574 -            }
  56.575 -          }
  56.576 -        }
  56.577 -#ifndef PRODUCT
  56.578 -        else if (!has_allocation_record(new_rec->addr())) {
  56.579 -          // NMT can not track some startup memory, which is allocated before NMT is on
  56.580 -          _untracked_count ++;
  56.581 -        }
  56.582 -#endif
  56.583 -      }
  56.584 -    }
  56.585 -    new_rec = (MemPointerRecord*)itr->next();
  56.586 -  }
  56.587 -  return true;
  56.588 -}
  56.589 -
  56.590 -bool MemSnapshot::promote_virtual_memory_records(MemPointerArrayIterator* itr) {
  56.591 -  VMMemPointerIterator vm_snapshot_itr(_vm_ptrs);
  56.592 -  MemPointerRecord* new_rec = (MemPointerRecord*)itr->current();
  56.593 -  VMMemRegion*  reserved_rec;
  56.594 -  while (new_rec != NULL) {
  56.595 -    assert(new_rec->is_vm_pointer(), "Sanity check");
  56.596 -
  56.597 -    // locate a reserved region that contains the specified address, or
  56.598 -    // the nearest reserved region has base address just above the specified
  56.599 -    // address
  56.600 -    reserved_rec = (VMMemRegion*)vm_snapshot_itr.locate(new_rec->addr());
  56.601 -    if (reserved_rec != NULL && reserved_rec->contains_region(new_rec)) {
  56.602 -      // snapshot can only have 'live' records
  56.603 -      assert(reserved_rec->is_reserved_region(), "Sanity check");
  56.604 -      if (new_rec->is_allocation_record()) {
  56.605 -        if (!reserved_rec->is_same_region(new_rec)) {
  56.606 -          // only deal with split a bigger reserved region into smaller regions.
  56.607 -          // So far, CDS is the only use case.
  56.608 -          if (!vm_snapshot_itr.split_reserved_region(reserved_rec, new_rec->addr(), new_rec->size())) {
  56.609 -            return false;
  56.610 -          }
  56.611 -        }
  56.612 -      } else if (new_rec->is_uncommit_record()) {
  56.613 -        if (!vm_snapshot_itr.remove_uncommitted_region(new_rec)) {
  56.614 -          return false;
  56.615 -        }
  56.616 -      } else if (new_rec->is_commit_record()) {
  56.617 -        // insert or expand existing committed region to cover this
  56.618 -        // newly committed region
  56.619 -        if (!vm_snapshot_itr.add_committed_region(new_rec)) {
  56.620 -          return false;
  56.621 -        }
  56.622 -      } else if (new_rec->is_deallocation_record()) {
  56.623 -        // release part or all memory region
  56.624 -        if (!vm_snapshot_itr.remove_released_region(new_rec)) {
  56.625 -          return false;
  56.626 -        }
  56.627 -      } else if (new_rec->is_type_tagging_record()) {
  56.628 -        // tag this reserved virtual memory range to a memory type. Can not re-tag a memory range
  56.629 -        // to different type.
  56.630 -        assert(FLAGS_TO_MEMORY_TYPE(reserved_rec->flags()) == mtNone ||
  56.631 -               FLAGS_TO_MEMORY_TYPE(reserved_rec->flags()) == FLAGS_TO_MEMORY_TYPE(new_rec->flags()),
  56.632 -               "Sanity check");
  56.633 -        reserved_rec->tag(new_rec->flags());
  56.634 -    } else {
  56.635 -        ShouldNotReachHere();
  56.636 -          }
  56.637 -        } else {
  56.638 -      /*
  56.639 -       * The assertion failure indicates mis-matched virtual memory records. The likely
  56.640 -       * scenario is, that some virtual memory operations are not going through os::xxxx_memory()
  56.641 -       * api, which have to be tracked manually. (perfMemory is an example).
  56.642 -      */
  56.643 -      assert(new_rec->is_allocation_record(), "Sanity check");
  56.644 -      if (!vm_snapshot_itr.add_reserved_region(new_rec)) {
  56.645 -            return false;
  56.646 -          }
  56.647 -  }
  56.648 -    new_rec = (MemPointerRecord*)itr->next();
  56.649 -  }
  56.650 -  return true;
  56.651 -}
  56.652 -
  56.653 -#ifndef PRODUCT
  56.654 -void MemSnapshot::print_snapshot_stats(outputStream* st) {
  56.655 -  st->print_cr("Snapshot:");
  56.656 -  st->print_cr("\tMalloced: %d/%d [%5.2f%%]  %dKB", _alloc_ptrs->length(), _alloc_ptrs->capacity(),
  56.657 -    (100.0 * (float)_alloc_ptrs->length()) / (float)_alloc_ptrs->capacity(), _alloc_ptrs->instance_size()/K);
  56.658 -
  56.659 -  st->print_cr("\tVM: %d/%d [%5.2f%%] %dKB", _vm_ptrs->length(), _vm_ptrs->capacity(),
  56.660 -    (100.0 * (float)_vm_ptrs->length()) / (float)_vm_ptrs->capacity(), _vm_ptrs->instance_size()/K);
  56.661 -
  56.662 -  st->print_cr("\tMalloc staging Area:     %d/%d [%5.2f%%] %dKB", _staging_area.malloc_data()->length(),
  56.663 -    _staging_area.malloc_data()->capacity(),
  56.664 -    (100.0 * (float)_staging_area.malloc_data()->length()) / (float)_staging_area.malloc_data()->capacity(),
  56.665 -    _staging_area.malloc_data()->instance_size()/K);
  56.666 -
  56.667 -  st->print_cr("\tVirtual memory staging Area:     %d/%d [%5.2f%%] %dKB", _staging_area.vm_data()->length(),
  56.668 -    _staging_area.vm_data()->capacity(),
  56.669 -    (100.0 * (float)_staging_area.vm_data()->length()) / (float)_staging_area.vm_data()->capacity(),
  56.670 -    _staging_area.vm_data()->instance_size()/K);
  56.671 -
  56.672 -  st->print_cr("\tUntracked allocation: %d", _untracked_count);
  56.673 -}
  56.674 -
  56.675 -void MemSnapshot::check_malloc_pointers() {
  56.676 -  MemPointerArrayIteratorImpl mItr(_alloc_ptrs);
  56.677 -  MemPointerRecord* p = (MemPointerRecord*)mItr.current();
  56.678 -  MemPointerRecord* prev = NULL;
  56.679 -  while (p != NULL) {
  56.680 -    if (prev != NULL) {
  56.681 -      assert(p->addr() >= prev->addr(), "sorting order");
  56.682 -    }
  56.683 -    prev = p;
  56.684 -    p = (MemPointerRecord*)mItr.next();
  56.685 -  }
  56.686 -}
  56.687 -
  56.688 -bool MemSnapshot::has_allocation_record(address addr) {
  56.689 -  MemPointerArrayIteratorImpl itr(_staging_area.malloc_data());
  56.690 -  MemPointerRecord* cur = (MemPointerRecord*)itr.current();
  56.691 -  while (cur != NULL) {
  56.692 -    if (cur->addr() == addr && cur->is_allocation_record()) {
  56.693 -      return true;
  56.694 -    }
  56.695 -    cur = (MemPointerRecord*)itr.next();
  56.696 -  }
  56.697 -  return false;
  56.698 -}
  56.699 -#endif // PRODUCT
  56.700 -
  56.701 -#ifdef ASSERT
  56.702 -void MemSnapshot::check_staging_data() {
  56.703 -  MemPointerArrayIteratorImpl itr(_staging_area.malloc_data());
  56.704 -  MemPointerRecord* cur = (MemPointerRecord*)itr.current();
  56.705 -  MemPointerRecord* next = (MemPointerRecord*)itr.next();
  56.706 -  while (next != NULL) {
  56.707 -    assert((next->addr() > cur->addr()) ||
  56.708 -      ((next->flags() & MemPointerRecord::tag_masks) >
  56.709 -       (cur->flags() & MemPointerRecord::tag_masks)),
  56.710 -       "sorting order");
  56.711 -    cur = next;
  56.712 -    next = (MemPointerRecord*)itr.next();
  56.713 -  }
  56.714 -
  56.715 -  MemPointerArrayIteratorImpl vm_itr(_staging_area.vm_data());
  56.716 -  cur = (MemPointerRecord*)vm_itr.current();
  56.717 -  while (cur != NULL) {
  56.718 -    assert(cur->is_vm_pointer(), "virtual memory pointer only");
  56.719 -    cur = (MemPointerRecord*)vm_itr.next();
  56.720 -  }
  56.721 -}
  56.722 -
  56.723 -void MemSnapshot::dump_all_vm_pointers() {
  56.724 -  MemPointerArrayIteratorImpl itr(_vm_ptrs);
  56.725 -  VMMemRegion* ptr = (VMMemRegion*)itr.current();
  56.726 -  tty->print_cr("dump virtual memory pointers:");
  56.727 -  while (ptr != NULL) {
  56.728 -    if (ptr->is_committed_region()) {
  56.729 -      tty->print("\t");
  56.730 -    }
  56.731 -    tty->print("[" PTR_FORMAT " - " PTR_FORMAT "] [%x]", ptr->addr(),
  56.732 -      (ptr->addr() + ptr->size()), ptr->flags());
  56.733 -
  56.734 -    if (MemTracker::track_callsite()) {
  56.735 -      VMMemRegionEx* ex = (VMMemRegionEx*)ptr;
  56.736 -      if (ex->pc() != NULL) {
  56.737 -        char buf[1024];
  56.738 -        if (os::dll_address_to_function_name(ex->pc(), buf, sizeof(buf), NULL)) {
  56.739 -          tty->print_cr("\t%s", buf);
  56.740 -        } else {
  56.741 -          tty->cr();
  56.742 -        }
  56.743 -      }
  56.744 -    }
  56.745 -
  56.746 -    ptr = (VMMemRegion*)itr.next();
  56.747 -  }
  56.748 -  tty->flush();
  56.749 -}
  56.750 -#endif // ASSERT
  56.751 -
    57.1 --- a/src/share/vm/services/memSnapshot.hpp	Wed Aug 27 09:36:55 2014 +0200
    57.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    57.3 @@ -1,408 +0,0 @@
    57.4 -/*
    57.5 - * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
    57.6 - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    57.7 - *
    57.8 - * This code is free software; you can redistribute it and/or modify it
    57.9 - * under the terms of the GNU General Public License version 2 only, as
   57.10 - * published by the Free Software Foundation.
   57.11 - *
   57.12 - * This code is distributed in the hope that it will be useful, but WITHOUT
   57.13 - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   57.14 - * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   57.15 - * version 2 for more details (a copy is included in the LICENSE file that
   57.16 - * accompanied this code).
   57.17 - *
   57.18 - * You should have received a copy of the GNU General Public License version
   57.19 - * 2 along with this work; if not, write to the Free Software Foundation,
   57.20 - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   57.21 - *
   57.22 - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   57.23 - * or visit www.oracle.com if you need additional information or have any
   57.24 - * questions.
   57.25 - *
   57.26 - */
   57.27 -
   57.28 -#ifndef SHARE_VM_SERVICES_MEM_SNAPSHOT_HPP
   57.29 -#define SHARE_VM_SERVICES_MEM_SNAPSHOT_HPP
   57.30 -
   57.31 -#include "memory/allocation.hpp"
   57.32 -#include "runtime/mutex.hpp"
   57.33 -#include "runtime/mutexLocker.hpp"
   57.34 -#include "services/memBaseline.hpp"
   57.35 -#include "services/memPtrArray.hpp"
   57.36 -
   57.37 -// Snapshot pointer array iterator
   57.38 -
   57.39 -// The pointer array contains malloc-ed pointers
   57.40 -class MemPointerIterator : public MemPointerArrayIteratorImpl {
   57.41 - public:
   57.42 -  MemPointerIterator(MemPointerArray* arr):
   57.43 -    MemPointerArrayIteratorImpl(arr) {
   57.44 -    assert(arr != NULL, "null array");
   57.45 -  }
   57.46 -
   57.47 -#ifdef ASSERT
   57.48 -  virtual bool is_dup_pointer(const MemPointer* ptr1,
   57.49 -    const MemPointer* ptr2) const {
   57.50 -    MemPointerRecord* p1 = (MemPointerRecord*)ptr1;
   57.51 -    MemPointerRecord* p2 = (MemPointerRecord*)ptr2;
   57.52 -
   57.53 -    if (p1->addr() != p2->addr()) return false;
   57.54 -    if ((p1->flags() & MemPointerRecord::tag_masks) !=
   57.55 -        (p2->flags() & MemPointerRecord::tag_masks)) {
   57.56 -      return false;
   57.57 -    }
   57.58 -    // we do see multiple commit/uncommit on the same memory, it is ok
   57.59 -    return (p1->flags() & MemPointerRecord::tag_masks) == MemPointerRecord::tag_alloc ||
   57.60 -           (p1->flags() & MemPointerRecord::tag_masks) == MemPointerRecord::tag_release;
   57.61 -  }
   57.62 -
   57.63 -  virtual bool insert(MemPointer* ptr) {
   57.64 -    if (_pos > 0) {
   57.65 -      MemPointer* p1 = (MemPointer*)ptr;
   57.66 -      MemPointer* p2 = (MemPointer*)_array->at(_pos - 1);
   57.67 -      assert(!is_dup_pointer(p1, p2),
   57.68 -        err_msg("duplicated pointer, flag = [%x]", (unsigned int)((MemPointerRecord*)p1)->flags()));
   57.69 -    }
   57.70 -     if (_pos < _array->length() -1) {
   57.71 -      MemPointer* p1 = (MemPointer*)ptr;
   57.72 -      MemPointer* p2 = (MemPointer*)_array->at(_pos + 1);
   57.73 -      assert(!is_dup_pointer(p1, p2),
   57.74 -        err_msg("duplicated pointer, flag = [%x]", (unsigned int)((MemPointerRecord*)p1)->flags()));
   57.75 -     }
   57.76 -    return _array->insert_at(ptr, _pos);
   57.77 -  }
   57.78 -
   57.79 -  virtual bool insert_after(MemPointer* ptr) {
   57.80 -    if (_pos > 0) {
   57.81 -      MemPointer* p1 = (MemPointer*)ptr;
   57.82 -      MemPointer* p2 = (MemPointer*)_array->at(_pos - 1);
   57.83 -      assert(!is_dup_pointer(p1, p2),
   57.84 -        err_msg("duplicated pointer, flag = [%x]", (unsigned int)((MemPointerRecord*)p1)->flags()));
   57.85 -    }
   57.86 -    if (_pos < _array->length() - 1) {
   57.87 -      MemPointer* p1 = (MemPointer*)ptr;
   57.88 -      MemPointer* p2 = (MemPointer*)_array->at(_pos + 1);
   57.89 -
   57.90 -      assert(!is_dup_pointer(p1, p2),
   57.91 -        err_msg("duplicated pointer, flag = [%x]", (unsigned int)((MemPointerRecord*)p1)->flags()));
   57.92 -     }
   57.93 -    if (_array->insert_at(ptr, _pos + 1)) {
   57.94 -      _pos ++;
   57.95 -      return true;
   57.96 -    }
   57.97 -    return false;
   57.98 -  }
   57.99 -#endif
  57.100 -
  57.101 -  virtual MemPointer* locate(address addr) {
  57.102 -    MemPointer* cur = current();
  57.103 -    while (cur != NULL && cur->addr() < addr) {
  57.104 -      cur = next();
  57.105 -    }
  57.106 -    return cur;
  57.107 -  }
  57.108 -};
  57.109 -
  57.110 -class VMMemPointerIterator : public MemPointerIterator {
  57.111 - public:
  57.112 -  VMMemPointerIterator(MemPointerArray* arr):
  57.113 -      MemPointerIterator(arr) {
  57.114 -  }
  57.115 -
  57.116 -  // locate an existing reserved memory region that contains specified address,
  57.117 -  // or the reserved region just above this address, where the incoming
  57.118 -  // reserved region should be inserted.
  57.119 -  virtual MemPointer* locate(address addr) {
  57.120 -    reset();
  57.121 -    VMMemRegion* reg = (VMMemRegion*)current();
  57.122 -    while (reg != NULL) {
  57.123 -      if (reg->is_reserved_region()) {
  57.124 -        if (reg->contains_address(addr) || addr < reg->base()) {
  57.125 -          return reg;
  57.126 -      }
  57.127 -    }
  57.128 -      reg = (VMMemRegion*)next();
  57.129 -    }
  57.130 -      return NULL;
  57.131 -    }
  57.132 -
  57.133 -  // following methods update virtual memory in the context
  57.134 -  // of 'current' position, which is properly positioned by
  57.135 -  // callers via locate method.
  57.136 -  bool add_reserved_region(MemPointerRecord* rec);
  57.137 -  bool add_committed_region(MemPointerRecord* rec);
  57.138 -  bool remove_uncommitted_region(MemPointerRecord* rec);
  57.139 -  bool remove_released_region(MemPointerRecord* rec);
  57.140 -
  57.141 -  // split a reserved region to create a new memory region with specified base and size
  57.142 -  bool split_reserved_region(VMMemRegion* rgn, address new_rgn_addr, size_t new_rgn_size);
  57.143 - private:
  57.144 -  bool insert_record(MemPointerRecord* rec);
  57.145 -  bool insert_record_after(MemPointerRecord* rec);
  57.146 -
  57.147 -  bool insert_reserved_region(MemPointerRecord* rec);
  57.148 -
  57.149 -  // reset current position
  57.150 -  inline void reset() { _pos = 0; }
  57.151 -#ifdef ASSERT
  57.152 -  // check integrity of records on current reserved memory region.
  57.153 -  bool check_reserved_region() {
  57.154 -    VMMemRegion* reserved_region = (VMMemRegion*)current();
  57.155 -    assert(reserved_region != NULL && reserved_region->is_reserved_region(),
  57.156 -          "Sanity check");
  57.157 -    // all committed regions that follow current reserved region, should all
  57.158 -    // belong to the reserved region.
  57.159 -    VMMemRegion* next_region = (VMMemRegion*)next();
  57.160 -    for (; next_region != NULL && next_region->is_committed_region();
  57.161 -         next_region = (VMMemRegion*)next() ) {
  57.162 -      if(!reserved_region->contains_region(next_region)) {
  57.163 -        return false;
  57.164 -      }
  57.165 -    }
  57.166 -    return true;
  57.167 -  }
  57.168 -
  57.169 -  virtual bool is_dup_pointer(const MemPointer* ptr1,
  57.170 -    const MemPointer* ptr2) const {
  57.171 -    VMMemRegion* p1 = (VMMemRegion*)ptr1;
  57.172 -    VMMemRegion* p2 = (VMMemRegion*)ptr2;
  57.173 -
  57.174 -    if (p1->addr() != p2->addr()) return false;
  57.175 -    if ((p1->flags() & MemPointerRecord::tag_masks) !=
  57.176 -        (p2->flags() & MemPointerRecord::tag_masks)) {
  57.177 -      return false;
  57.178 -    }
  57.179 -    // we do see multiple commit/uncommit on the same memory, it is ok
  57.180 -    return (p1->flags() & MemPointerRecord::tag_masks) == MemPointerRecord::tag_alloc ||
  57.181 -           (p1->flags() & MemPointerRecord::tag_masks) == MemPointerRecord::tag_release;
  57.182 -  }
  57.183 -#endif
  57.184 -};
  57.185 -
  57.186 -class MallocRecordIterator : public MemPointerArrayIterator {
  57.187 - private:
  57.188 -  MemPointerArrayIteratorImpl  _itr;
  57.189 -
  57.190 -
  57.191 -
  57.192 - public:
  57.193 -  MallocRecordIterator(MemPointerArray* arr) : _itr(arr) {
  57.194 -  }
  57.195 -
  57.196 -  virtual MemPointer* current() const {
  57.197 -#ifdef ASSERT
  57.198 -    MemPointer* cur_rec = _itr.current();
  57.199 -    if (cur_rec != NULL) {
  57.200 -      MemPointer* prev_rec = _itr.peek_prev();
  57.201 -      MemPointer* next_rec = _itr.peek_next();
  57.202 -      assert(prev_rec == NULL || prev_rec->addr() < cur_rec->addr(), "Sorting order");
  57.203 -      assert(next_rec == NULL || next_rec->addr() > cur_rec->addr(), "Sorting order");
  57.204 -    }
  57.205 -#endif
  57.206 -    return _itr.current();
  57.207 -  }
  57.208 -  virtual MemPointer* next() {
  57.209 -    MemPointerRecord* next_rec = (MemPointerRecord*)_itr.next();
  57.210 -    // arena memory record is a special case, which we have to compare
  57.211 -    // sequence number against its associated arena record.
  57.212 -    if (next_rec != NULL && next_rec->is_arena_memory_record()) {
  57.213 -      MemPointerRecord* prev_rec = (MemPointerRecord*)_itr.peek_prev();
  57.214 -      // if there is an associated arena record, it has to be previous
  57.215 -      // record because of sorting order (by address) - NMT generates a pseudo address
  57.216 -      // for arena's size record by offsetting arena's address, that guarantees
  57.217 -      // the order of arena record and it's size record.
  57.218 -      if (prev_rec != NULL && prev_rec->is_arena_record() &&
  57.219 -        next_rec->is_memory_record_of_arena(prev_rec)) {
  57.220 -        if (prev_rec->seq() > next_rec->seq()) {
  57.221 -          // Skip this arena memory record
  57.222 -          // Two scenarios:
  57.223 -          //   - if the arena record is an allocation record, this early
  57.224 -          //     size record must be leftover by previous arena,
  57.225 -          //     and the last size record should have size = 0.
  57.226 -          //   - if the arena record is a deallocation record, this
  57.227 -          //     size record should be its cleanup record, which should
  57.228 -          //     also have size = 0. In other world, arena alway reset
  57.229 -          //     its size before gone (see Arena's destructor)
  57.230 -          assert(next_rec->size() == 0, "size not reset");
  57.231 -          return _itr.next();
  57.232 -        } else {
  57.233 -          assert(prev_rec->is_allocation_record(),
  57.234 -            "Arena size record ahead of allocation record");
  57.235 -        }
  57.236 -      }
  57.237 -    }
  57.238 -    return next_rec;
  57.239 -  }
  57.240 -
  57.241 -  MemPointer* peek_next() const      { ShouldNotReachHere(); return NULL; }
  57.242 -  MemPointer* peek_prev() const      { ShouldNotReachHere(); return NULL; }
  57.243 -  void remove()                      { ShouldNotReachHere(); }
  57.244 -  bool insert(MemPointer* ptr)       { ShouldNotReachHere(); return false; }
  57.245 -  bool insert_after(MemPointer* ptr) { ShouldNotReachHere(); return false; }
  57.246 -};
  57.247 -
  57.248 -// collapse duplicated records. Eliminating duplicated records here, is much
  57.249 -// cheaper than during promotion phase. However, it does have limitation - it
  57.250 -// can only eliminate duplicated records within the generation, there are
  57.251 -// still chances seeing duplicated records during promotion.
  57.252 -// We want to use the record with higher sequence number, because it has
  57.253 -// more accurate callsite pc.
  57.254 -class VMRecordIterator : public MemPointerArrayIterator {
  57.255 - private:
  57.256 -  MemPointerArrayIteratorImpl  _itr;
  57.257 -
  57.258 - public:
  57.259 -  VMRecordIterator(MemPointerArray* arr) : _itr(arr) {
  57.260 -    MemPointerRecord* cur = (MemPointerRecord*)_itr.current();
  57.261 -    MemPointerRecord* next = (MemPointerRecord*)_itr.peek_next();
  57.262 -    while (next != NULL) {
  57.263 -      assert(cur != NULL, "Sanity check");
  57.264 -      assert(((SeqMemPointerRecord*)next)->seq() > ((SeqMemPointerRecord*)cur)->seq(),
  57.265 -        "pre-sort order");
  57.266 -
  57.267 -      if (is_duplicated_record(cur, next)) {
  57.268 -        _itr.next();
  57.269 -        next = (MemPointerRecord*)_itr.peek_next();
  57.270 -      } else {
  57.271 -        break;
  57.272 -      }
  57.273 -    }
  57.274 -  }
  57.275 -
  57.276 -  virtual MemPointer* current() const {
  57.277 -    return _itr.current();
  57.278 -  }
  57.279 -
  57.280 -  // get next record, but skip the duplicated records
  57.281 -  virtual MemPointer* next() {
  57.282 -    MemPointerRecord* cur = (MemPointerRecord*)_itr.next();
  57.283 -    MemPointerRecord* next = (MemPointerRecord*)_itr.peek_next();
  57.284 -    while (next != NULL) {
  57.285 -      assert(cur != NULL, "Sanity check");
  57.286 -      assert(((SeqMemPointerRecord*)next)->seq() > ((SeqMemPointerRecord*)cur)->seq(),
  57.287 -        "pre-sort order");
  57.288 -
  57.289 -      if (is_duplicated_record(cur, next)) {
  57.290 -        _itr.next();
  57.291 -        cur = next;
  57.292 -        next = (MemPointerRecord*)_itr.peek_next();
  57.293 -      } else {
  57.294 -        break;
  57.295 -      }
  57.296 -    }
  57.297 -    return cur;
  57.298 -  }
  57.299 -
  57.300 -  MemPointer* peek_next() const      { ShouldNotReachHere(); return NULL; }
  57.301 -  MemPointer* peek_prev() const      { ShouldNotReachHere(); return NULL; }
  57.302 -  void remove()                      { ShouldNotReachHere(); }
  57.303 -  bool insert(MemPointer* ptr)       { ShouldNotReachHere(); return false; }
  57.304 -  bool insert_after(MemPointer* ptr) { ShouldNotReachHere(); return false; }
  57.305 -
  57.306 - private:
  57.307 -  bool is_duplicated_record(MemPointerRecord* p1, MemPointerRecord* p2) const {
  57.308 -    bool ret = (p1->addr() == p2->addr() && p1->size() == p2->size() && p1->flags() == p2->flags());
  57.309 -    assert(!(ret && FLAGS_TO_MEMORY_TYPE(p1->flags()) == mtThreadStack), "dup on stack record");
  57.310 -    return ret;
  57.311 -  }
  57.312 -};
  57.313 -
  57.314 -class StagingArea VALUE_OBJ_CLASS_SPEC {
  57.315 - private:
  57.316 -  MemPointerArray*   _malloc_data;
  57.317 -  MemPointerArray*   _vm_data;
  57.318 -
  57.319 - public:
  57.320 -  StagingArea() : _malloc_data(NULL), _vm_data(NULL) {
  57.321 -    init();
  57.322 -  }
  57.323 -
  57.324 -  ~StagingArea() {
  57.325 -    if (_malloc_data != NULL) delete _malloc_data;
  57.326 -    if (_vm_data != NULL) delete _vm_data;
  57.327 -  }
  57.328 -
  57.329 -  MallocRecordIterator malloc_record_walker() {
  57.330 -    return MallocRecordIterator(malloc_data());
  57.331 -  }
  57.332 -
  57.333 -  VMRecordIterator virtual_memory_record_walker();
  57.334 -
  57.335 -  bool init();
  57.336 -  void clear() {
  57.337 -    assert(_malloc_data != NULL && _vm_data != NULL, "Just check");
  57.338 -    _malloc_data->shrink();
  57.339 -    _malloc_data->clear();
  57.340 -    _vm_data->clear();
  57.341 -  }
  57.342 -
  57.343 -  inline MemPointerArray* malloc_data() { return _malloc_data; }
  57.344 -  inline MemPointerArray* vm_data()     { return _vm_data; }
  57.345 -};
  57.346 -
  57.347 -class MemBaseline;
  57.348 -class MemSnapshot : public CHeapObj<mtNMT> {
  57.349 - private:
  57.350 -  // the following two arrays contain records of all known lived memory blocks
  57.351 -  // live malloc-ed memory pointers
  57.352 -  MemPointerArray*      _alloc_ptrs;
  57.353 -  // live virtual memory pointers
  57.354 -  MemPointerArray*      _vm_ptrs;
  57.355 -
  57.356 -  StagingArea           _staging_area;
  57.357 -
  57.358 -  // the lock to protect this snapshot
  57.359 -  Monitor*              _lock;
  57.360 -
  57.361 -  // the number of instance classes
  57.362 -  int                   _number_of_classes;
  57.363 -
  57.364 -  NOT_PRODUCT(size_t    _untracked_count;)
  57.365 -  friend class MemBaseline;
  57.366 -
  57.367 - public:
  57.368 -  MemSnapshot();
  57.369 -  virtual ~MemSnapshot();
  57.370 -
  57.371 -  // if we are running out of native memory
  57.372 -  bool out_of_memory() {
  57.373 -    return (_alloc_ptrs == NULL ||
  57.374 -      _staging_area.malloc_data() == NULL ||
  57.375 -      _staging_area.vm_data() == NULL ||
  57.376 -      _vm_ptrs == NULL || _lock == NULL ||
  57.377 -      _alloc_ptrs->out_of_memory() ||
  57.378 -      _vm_ptrs->out_of_memory());
  57.379 -  }
  57.380 -
  57.381 -  // merge a per-thread memory recorder into staging area
  57.382 -  bool merge(MemRecorder* rec);
  57.383 -  // promote staged data to snapshot
  57.384 -  bool promote(int number_of_classes);
  57.385 -
  57.386 -  int  number_of_classes() const { return _number_of_classes; }
  57.387 -
  57.388 -  void wait(long timeout) {
  57.389 -    assert(_lock != NULL, "Just check");
  57.390 -    MonitorLockerEx locker(_lock);
  57.391 -    locker.wait(true, timeout);
  57.392 -  }
  57.393 -
  57.394 -  NOT_PRODUCT(void print_snapshot_stats(outputStream* st);)
  57.395 -  NOT_PRODUCT(void check_staging_data();)
  57.396 -  NOT_PRODUCT(void check_malloc_pointers();)
  57.397 -  NOT_PRODUCT(bool has_allocation_record(address addr);)
  57.398 -  // dump all virtual memory pointers in snapshot
  57.399 -  DEBUG_ONLY( void dump_all_vm_pointers();)
  57.400 -
  57.401 - private:
  57.402 -   // copy sequenced pointer from src to dest
  57.403 -   void copy_seq_pointer(MemPointerRecord* dest, const MemPointerRecord* src);
  57.404 -   // assign a sequenced pointer to non-sequenced pointer
  57.405 -   void assign_pointer(MemPointerRecord*dest, const MemPointerRecord* src);
  57.406 -
  57.407 -   bool promote_malloc_records(MemPointerArrayIterator* itr);
  57.408 -   bool promote_virtual_memory_records(MemPointerArrayIterator* itr);
  57.409 -};
  57.410 -
  57.411 -#endif // SHARE_VM_SERVICES_MEM_SNAPSHOT_HPP
    58.1 --- a/src/share/vm/services/memTrackWorker.cpp	Wed Aug 27 09:36:55 2014 +0200
    58.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    58.3 @@ -1,212 +0,0 @@
    58.4 -/*
    58.5 - * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
    58.6 - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    58.7 - *
    58.8 - * This code is free software; you can redistribute it and/or modify it
    58.9 - * under the terms of the GNU General Public License version 2 only, as
   58.10 - * published by the Free Software Foundation.
   58.11 - *
   58.12 - * This code is distributed in the hope that it will be useful, but WITHOUT
   58.13 - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   58.14 - * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   58.15 - * version 2 for more details (a copy is included in the LICENSE file that
   58.16 - * accompanied this code).
   58.17 - *
   58.18 - * You should have received a copy of the GNU General Public License version
   58.19 - * 2 along with this work; if not, write to the Free Software Foundation,
   58.20 - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   58.21 - *
   58.22 - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   58.23 - * or visit www.oracle.com if you need additional information or have any
   58.24 - * questions.
   58.25 - *
   58.26 - */
   58.27 -
   58.28 -#include "precompiled.hpp"
   58.29 -#include "runtime/threadCritical.hpp"
   58.30 -#include "services/memTracker.hpp"
   58.31 -#include "services/memTrackWorker.hpp"
   58.32 -#include "utilities/decoder.hpp"
   58.33 -#include "utilities/vmError.hpp"
   58.34 -
   58.35 -
   58.36 -void GenerationData::reset() {
   58.37 -  _number_of_classes = 0;
   58.38 -  while (_recorder_list != NULL) {
   58.39 -    MemRecorder* tmp = _recorder_list;
   58.40 -    _recorder_list = _recorder_list->next();
   58.41 -    MemTracker::release_thread_recorder(tmp);
   58.42 -  }
   58.43 -}
   58.44 -
   58.45 -MemTrackWorker::MemTrackWorker(MemSnapshot* snapshot): _snapshot(snapshot) {
   58.46 -  // create thread uses cgc thread type for now. We should revisit
   58.47 -  // the option, or create new thread type.
   58.48 -  _has_error = !os::create_thread(this, os::cgc_thread);
   58.49 -  set_name("MemTrackWorker");
   58.50 -
   58.51 -  // initial generation circuit buffer
   58.52 -  if (!has_error()) {
   58.53 -    _head = _tail = 0;
   58.54 -    for(int index = 0; index < MAX_GENERATIONS; index ++) {
   58.55 -      ::new ((void*)&_gen[index]) GenerationData();
   58.56 -    }
   58.57 -  }
   58.58 -  NOT_PRODUCT(_sync_point_count = 0;)
   58.59 -  NOT_PRODUCT(_merge_count = 0;)
   58.60 -  NOT_PRODUCT(_last_gen_in_use = 0;)
   58.61 -}
   58.62 -
   58.63 -MemTrackWorker::~MemTrackWorker() {
   58.64 -  for (int index = 0; index < MAX_GENERATIONS; index ++) {
   58.65 -    _gen[index].reset();
   58.66 -  }
   58.67 -}
   58.68 -
   58.69 -void* MemTrackWorker::operator new(size_t size) throw() {
   58.70 -  assert(false, "use nothrow version");
   58.71 -  return NULL;
   58.72 -}
   58.73 -
   58.74 -void* MemTrackWorker::operator new(size_t size, const std::nothrow_t& nothrow_constant) throw() {
   58.75 -  return allocate(size, false, mtNMT);
   58.76 -}
   58.77 -
   58.78 -void MemTrackWorker::start() {
   58.79 -  os::start_thread(this);
   58.80 -}
   58.81 -
   58.82 -/*
   58.83 - * Native memory tracking worker thread loop:
   58.84 - *   1. merge one generation of memory recorders to staging area
   58.85 - *   2. promote staging data to memory snapshot
   58.86 - *
   58.87 - * This thread can run through safepoint.
   58.88 - */
   58.89 -
   58.90 -void MemTrackWorker::run() {
   58.91 -  assert(MemTracker::is_on(), "native memory tracking is off");
   58.92 -  this->initialize_thread_local_storage();
   58.93 -  this->record_stack_base_and_size();
   58.94 -  assert(_snapshot != NULL, "Worker should not be started");
   58.95 -  MemRecorder* rec;
   58.96 -  unsigned long processing_generation = 0;
   58.97 -  bool          worker_idle = false;
   58.98 -
   58.99 -  while (!MemTracker::shutdown_in_progress()) {
  58.100 -    NOT_PRODUCT(_last_gen_in_use = generations_in_use();)
  58.101 -    {
  58.102 -      // take a recorder from earliest generation in buffer
  58.103 -      ThreadCritical tc;
  58.104 -      rec = _gen[_head].next_recorder();
  58.105 -    }
  58.106 -    if (rec != NULL) {
  58.107 -      if (rec->get_generation() != processing_generation || worker_idle) {
  58.108 -        processing_generation = rec->get_generation();
  58.109 -        worker_idle = false;
  58.110 -        MemTracker::set_current_processing_generation(processing_generation);
  58.111 -      }
  58.112 -
  58.113 -      // merge the recorder into staging area
  58.114 -      if (!_snapshot->merge(rec)) {
  58.115 -        MemTracker::shutdown(MemTracker::NMT_out_of_memory);
  58.116 -      } else {
  58.117 -        NOT_PRODUCT(_merge_count ++;)
  58.118 -      }
  58.119 -      MemTracker::release_thread_recorder(rec);
  58.120 -    } else {
  58.121 -      // no more recorder to merge, promote staging area
  58.122 -      // to snapshot
  58.123 -      if (_head != _tail) {
  58.124 -        long number_of_classes;
  58.125 -        {
  58.126 -          ThreadCritical tc;
  58.127 -          if (_gen[_head].has_more_recorder() || _head == _tail) {
  58.128 -            continue;
  58.129 -          }
  58.130 -          number_of_classes = _gen[_head].number_of_classes();
  58.131 -          _gen[_head].reset();
  58.132 -
  58.133 -          // done with this generation, increment _head pointer
  58.134 -          _head = (_head + 1) % MAX_GENERATIONS;
  58.135 -        }
  58.136 -        // promote this generation data to snapshot
  58.137 -        if (!_snapshot->promote(number_of_classes)) {
  58.138 -          // failed to promote, means out of memory
  58.139 -          MemTracker::shutdown(MemTracker::NMT_out_of_memory);
  58.140 -        }
  58.141 -      } else {
  58.142 -        // worker thread is idle
  58.143 -        worker_idle = true;
  58.144 -        MemTracker::report_worker_idle();
  58.145 -        _snapshot->wait(1000);
  58.146 -        ThreadCritical tc;
  58.147 -        // check if more data arrived
  58.148 -        if (!_gen[_head].has_more_recorder()) {
  58.149 -          _gen[_head].add_recorders(MemTracker::get_pending_recorders());
  58.150 -        }
  58.151 -      }
  58.152 -    }
  58.153 -  }
  58.154 -  assert(MemTracker::shutdown_in_progress(), "just check");
  58.155 -
  58.156 -  // transits to final shutdown
  58.157 -  MemTracker::final_shutdown();
  58.158 -}
  58.159 -
  58.160 -// at synchronization point, where 'safepoint visible' Java threads are blocked
  58.161 -// at a safepoint, and the rest of threads are blocked on ThreadCritical lock.
  58.162 -// The caller MemTracker::sync() already takes ThreadCritical before calling this
  58.163 -// method.
  58.164 -//
  58.165 -// Following tasks are performed:
  58.166 -//   1. add all recorders in pending queue to current generation
  58.167 -//   2. increase generation
  58.168 -
  58.169 -void MemTrackWorker::at_sync_point(MemRecorder* rec, int number_of_classes) {
  58.170 -  NOT_PRODUCT(_sync_point_count ++;)
  58.171 -  assert(count_recorder(rec) <= MemRecorder::_instance_count,
  58.172 -    "pending queue has infinite loop");
  58.173 -
  58.174 -  bool out_of_generation_buffer = false;
  58.175 -  // check shutdown state inside ThreadCritical
  58.176 -  if (MemTracker::shutdown_in_progress()) return;
  58.177 -
  58.178 -  _gen[_tail].set_number_of_classes(number_of_classes);
  58.179 -  // append the recorders to the end of the generation
  58.180 -  _gen[_tail].add_recorders(rec);
  58.181 -  assert(count_recorder(_gen[_tail].peek()) <= MemRecorder::_instance_count,
  58.182 -    "after add to current generation has infinite loop");
  58.183 -  // we have collected all recorders for this generation. If there is data,
  58.184 -  // we need to increment _tail to start a new generation.
  58.185 -  if (_gen[_tail].has_more_recorder()  || _head == _tail) {
  58.186 -    _tail = (_tail + 1) % MAX_GENERATIONS;
  58.187 -    out_of_generation_buffer = (_tail == _head);
  58.188 -  }
  58.189 -
  58.190 -  if (out_of_generation_buffer) {
  58.191 -    MemTracker::shutdown(MemTracker::NMT_out_of_generation);
  58.192 -  }
  58.193 -}
  58.194 -
  58.195 -#ifndef PRODUCT
  58.196 -int MemTrackWorker::count_recorder(const MemRecorder* head) {
  58.197 -  int count = 0;
  58.198 -  while(head != NULL) {
  58.199 -    count ++;
  58.200 -    head = head->next();
  58.201 -  }
  58.202 -  return count;
  58.203 -}
  58.204 -
  58.205 -int MemTrackWorker::count_pending_recorders() const {
  58.206 -  int count = 0;
  58.207 -  for (int index = 0; index < MAX_GENERATIONS; index ++) {
  58.208 -    MemRecorder* head = _gen[index].peek();
  58.209 -    if (head != NULL) {
  58.210 -      count += count_recorder(head);
  58.211 -    }
  58.212 -  }
  58.213 -  return count;
  58.214 -}
  58.215 -#endif
    59.1 --- a/src/share/vm/services/memTrackWorker.hpp	Wed Aug 27 09:36:55 2014 +0200
    59.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    59.3 @@ -1,118 +0,0 @@
    59.4 -/*
    59.5 - * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
    59.6 - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    59.7 - *
    59.8 - * This code is free software; you can redistribute it and/or modify it
    59.9 - * under the terms of the GNU General Public License version 2 only, as
   59.10 - * published by the Free Software Foundation.
   59.11 - *
   59.12 - * This code is distributed in the hope that it will be useful, but WITHOUT
   59.13 - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   59.14 - * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   59.15 - * version 2 for more details (a copy is included in the LICENSE file that
   59.16 - * accompanied this code).
   59.17 - *
   59.18 - * You should have received a copy of the GNU General Public License version
   59.19 - * 2 along with this work; if not, write to the Free Software Foundation,
   59.20 - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   59.21 - *
   59.22 - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   59.23 - * or visit www.oracle.com if you need additional information or have any
   59.24 - * questions.
   59.25 - *
   59.26 - */
   59.27 -
   59.28 -#ifndef SHARE_VM_SERVICES_MEM_TRACK_WORKER_HPP
   59.29 -#define SHARE_VM_SERVICES_MEM_TRACK_WORKER_HPP
   59.30 -
   59.31 -#include "memory/allocation.hpp"
   59.32 -#include "runtime/thread.hpp"
   59.33 -#include "services/memRecorder.hpp"
   59.34 -
   59.35 -// Maximum MAX_GENERATIONS generation data can be tracked.
   59.36 -#define MAX_GENERATIONS  512
   59.37 -
   59.38 -class GenerationData VALUE_OBJ_CLASS_SPEC {
   59.39 - private:
   59.40 -  int           _number_of_classes;
   59.41 -  MemRecorder*  _recorder_list;
   59.42 -
   59.43 - public:
   59.44 -  GenerationData(): _number_of_classes(0), _recorder_list(NULL) { }
   59.45 -
   59.46 -  inline int  number_of_classes() const { return _number_of_classes; }
   59.47 -  inline void set_number_of_classes(long num) { _number_of_classes = num; }
   59.48 -
   59.49 -  inline MemRecorder* next_recorder() {
   59.50 -    if (_recorder_list == NULL) {
   59.51 -      return NULL;
   59.52 -    } else {
   59.53 -      MemRecorder* tmp = _recorder_list;
   59.54 -      _recorder_list = _recorder_list->next();
   59.55 -      return tmp;
   59.56 -    }
   59.57 -  }
   59.58 -
   59.59 -  inline bool has_more_recorder() const {
   59.60 -    return (_recorder_list != NULL);
   59.61 -  }
   59.62 -
   59.63 -  // add recorders to this generation
   59.64 -  void add_recorders(MemRecorder* head) {
   59.65 -    if (head != NULL) {
   59.66 -      if (_recorder_list == NULL) {
   59.67 -        _recorder_list = head;
   59.68 -      } else {
   59.69 -        MemRecorder* tmp = _recorder_list;
   59.70 -        for (; tmp->next() != NULL; tmp = tmp->next());
   59.71 -        tmp->set_next(head);
   59.72 -      }
   59.73 -    }
   59.74 -  }
   59.75 -
   59.76 -  void reset();
   59.77 -
   59.78 -  NOT_PRODUCT(MemRecorder* peek() const { return _recorder_list; })
   59.79 -};
   59.80 -
   59.81 -class MemTrackWorker : public NamedThread {
   59.82 - private:
   59.83 -  // circular buffer. This buffer contains generation data to be merged into global
   59.84 -  // snaphsot.
   59.85 -  // Each slot holds a generation
   59.86 -  GenerationData  _gen[MAX_GENERATIONS];
   59.87 -  int             _head, _tail; // head and tail pointers to above circular buffer
   59.88 -
   59.89 -  bool            _has_error;
   59.90 -
   59.91 -  MemSnapshot*    _snapshot;
   59.92 -
   59.93 - public:
   59.94 -  MemTrackWorker(MemSnapshot* snapshot);
   59.95 -  ~MemTrackWorker();
   59.96 -  _NOINLINE_ void* operator new(size_t size) throw();
   59.97 -  _NOINLINE_ void* operator new(size_t size, const std::nothrow_t& nothrow_constant) throw();
   59.98 -
   59.99 -  void start();
  59.100 -  void run();
  59.101 -
  59.102 -  inline bool has_error() const { return _has_error; }
  59.103 -
  59.104 -  // task at synchronization point
  59.105 -  void at_sync_point(MemRecorder* pending_recorders, int number_of_classes);
  59.106 -
  59.107 -  // for debugging purpose, they are not thread safe.
  59.108 -  NOT_PRODUCT(static int count_recorder(const MemRecorder* head);)
  59.109 -  NOT_PRODUCT(int count_pending_recorders() const;)
  59.110 -
  59.111 -  NOT_PRODUCT(int _sync_point_count;)
  59.112 -  NOT_PRODUCT(int _merge_count;)
  59.113 -  NOT_PRODUCT(int _last_gen_in_use;)
  59.114 -
  59.115 -  // how many generations are queued
  59.116 -  inline int generations_in_use() const {
  59.117 -    return (_tail >= _head ? (_tail - _head + 1) : (MAX_GENERATIONS - (_head - _tail) + 1));
  59.118 -  }
  59.119 -};
  59.120 -
  59.121 -#endif // SHARE_VM_SERVICES_MEM_TRACK_WORKER_HPP
    60.1 --- a/src/share/vm/services/memTracker.cpp	Wed Aug 27 09:36:55 2014 +0200
    60.2 +++ b/src/share/vm/services/memTracker.cpp	Wed Aug 27 08:19:12 2014 -0400
    60.3 @@ -1,5 +1,5 @@
    60.4  /*
    60.5 - * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
    60.6 + * Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
    60.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    60.8   *
    60.9   * This code is free software; you can redistribute it and/or modify it
   60.10 @@ -23,862 +23,308 @@
   60.11   */
   60.12  #include "precompiled.hpp"
   60.13  
   60.14 -#include "oops/instanceKlass.hpp"
   60.15 -#include "runtime/atomic.hpp"
   60.16 -#include "runtime/interfaceSupport.hpp"
   60.17 -#include "runtime/mutexLocker.hpp"
   60.18 -#include "runtime/safepoint.hpp"
   60.19 -#include "runtime/threadCritical.hpp"
   60.20 -#include "runtime/thread.inline.hpp"
   60.21 -#include "runtime/vm_operations.hpp"
   60.22 -#include "services/memPtr.hpp"
   60.23 +#include "runtime/mutex.hpp"
   60.24 +#include "services/memBaseline.hpp"
   60.25  #include "services/memReporter.hpp"
   60.26 +#include "services/mallocTracker.inline.hpp"
   60.27  #include "services/memTracker.hpp"
   60.28 -#include "utilities/decoder.hpp"
   60.29  #include "utilities/defaultStream.hpp"
   60.30 -#include "utilities/globalDefinitions.hpp"
   60.31  
   60.32 -bool NMT_track_callsite = false;
   60.33 +#ifdef SOLARIS
   60.34 +  volatile bool NMT_stack_walkable = false;
   60.35 +#else
   60.36 +  volatile bool NMT_stack_walkable = true;
   60.37 +#endif
   60.38  
   60.39 -// walk all 'known' threads at NMT sync point, and collect their recorders
   60.40 -void SyncThreadRecorderClosure::do_thread(Thread* thread) {
   60.41 -  assert(SafepointSynchronize::is_at_safepoint(), "Safepoint required");
   60.42 -  if (thread->is_Java_thread()) {
   60.43 -    JavaThread* javaThread = (JavaThread*)thread;
   60.44 -    MemRecorder* recorder = javaThread->get_recorder();
   60.45 -    if (recorder != NULL) {
   60.46 -      MemTracker::enqueue_pending_recorder(recorder);
   60.47 -      javaThread->set_recorder(NULL);
   60.48 +volatile NMT_TrackingLevel MemTracker::_tracking_level = NMT_unknown;
   60.49 +NMT_TrackingLevel MemTracker::_cmdline_tracking_level = NMT_unknown;
   60.50 +
   60.51 +NativeCallStack emptyStack(0, false);
   60.52 +
   60.53 +MemBaseline MemTracker::_baseline;
   60.54 +Mutex*      MemTracker::_query_lock = NULL;
   60.55 +bool MemTracker::_is_nmt_env_valid = true;
   60.56 +
   60.57 +
   60.58 +NMT_TrackingLevel MemTracker::init_tracking_level() {
   60.59 +  NMT_TrackingLevel level = NMT_off;
   60.60 +  char buf[64];
   60.61 +  char nmt_option[64];
   60.62 +  jio_snprintf(buf, sizeof(buf), "NMT_LEVEL_%d", os::current_process_id());
   60.63 +  if (os::getenv(buf, nmt_option, sizeof(nmt_option))) {
   60.64 +    if (strcmp(nmt_option, "summary") == 0) {
   60.65 +      level = NMT_summary;
   60.66 +    } else if (strcmp(nmt_option, "detail") == 0) {
   60.67 +#if PLATFORM_NATIVE_STACK_WALKING_SUPPORTED
   60.68 +      level = NMT_detail;
   60.69 +#else
   60.70 +      level = NMT_summary;
   60.71 +#endif // PLATFORM_NATIVE_STACK_WALKING_SUPPORTED
   60.72 +    } else if (strcmp(nmt_option, "off") != 0) {
   60.73 +      // The option value is invalid
   60.74 +      _is_nmt_env_valid = false;
   60.75      }
   60.76 +
   60.77 +    // Remove the environment variable to avoid leaking to child processes
   60.78 +    os::unsetenv(buf);
   60.79    }
   60.80 -  _thread_count ++;
   60.81 +
   60.82 +  if (!MallocTracker::initialize(level) ||
   60.83 +      !VirtualMemoryTracker::initialize(level)) {
   60.84 +    level = NMT_off;
   60.85 +  }
   60.86 +  return level;
   60.87  }
   60.88  
   60.89 -
   60.90 -MemRecorder* volatile           MemTracker::_global_recorder = NULL;
   60.91 -MemSnapshot*                    MemTracker::_snapshot = NULL;
   60.92 -MemBaseline                     MemTracker::_baseline;
   60.93 -Mutex*                          MemTracker::_query_lock = NULL;
   60.94 -MemRecorder* volatile           MemTracker::_merge_pending_queue = NULL;
   60.95 -MemRecorder* volatile           MemTracker::_pooled_recorders = NULL;
   60.96 -MemTrackWorker*                 MemTracker::_worker_thread = NULL;
   60.97 -int                             MemTracker::_sync_point_skip_count = 0;
   60.98 -MemTracker::NMTLevel            MemTracker::_tracking_level = MemTracker::NMT_off;
   60.99 -volatile MemTracker::NMTStates  MemTracker::_state = NMT_uninited;
  60.100 -MemTracker::ShutdownReason      MemTracker::_reason = NMT_shutdown_none;
  60.101 -int                             MemTracker::_thread_count = 255;
  60.102 -volatile jint                   MemTracker::_pooled_recorder_count = 0;
  60.103 -volatile unsigned long          MemTracker::_processing_generation = 0;
  60.104 -volatile bool                   MemTracker::_worker_thread_idle = false;
  60.105 -volatile jint                   MemTracker::_pending_op_count = 0;
  60.106 -volatile bool                   MemTracker::_slowdown_calling_thread = false;
  60.107 -debug_only(intx                 MemTracker::_main_thread_tid = 0;)
  60.108 -NOT_PRODUCT(volatile jint       MemTracker::_pending_recorder_count = 0;)
  60.109 -
  60.110 -void MemTracker::init_tracking_options(const char* option_line) {
  60.111 -  _tracking_level = NMT_off;
  60.112 -  if (strcmp(option_line, "=summary") == 0) {
  60.113 -    _tracking_level = NMT_summary;
  60.114 -  } else if (strcmp(option_line, "=detail") == 0) {
  60.115 -    // detail relies on a stack-walking ability that may not
  60.116 -    // be available depending on platform and/or compiler flags
  60.117 -#if PLATFORM_NATIVE_STACK_WALKING_SUPPORTED
  60.118 -      _tracking_level = NMT_detail;
  60.119 -#else
  60.120 -      jio_fprintf(defaultStream::error_stream(),
  60.121 -        "NMT detail is not supported on this platform.  Using NMT summary instead.\n");
  60.122 -      _tracking_level = NMT_summary;
  60.123 -#endif
  60.124 -  } else if (strcmp(option_line, "=off") != 0) {
  60.125 -    vm_exit_during_initialization("Syntax error, expecting -XX:NativeMemoryTracking=[off|summary|detail]", NULL);
  60.126 -  }
  60.127 -}
  60.128 -
  60.129 -// first phase of bootstrapping, when VM is still in single-threaded mode.
  60.130 -void MemTracker::bootstrap_single_thread() {
  60.131 -  if (_tracking_level > NMT_off) {
  60.132 -    assert(_state == NMT_uninited, "wrong state");
  60.133 -
  60.134 -    // NMT is not supported with UseMallocOnly is on. NMT can NOT
  60.135 -    // handle the amount of malloc data without significantly impacting
  60.136 -    // runtime performance when this flag is on.
  60.137 -    if (UseMallocOnly) {
  60.138 -      shutdown(NMT_use_malloc_only);
  60.139 -      return;
  60.140 -    }
  60.141 -
  60.142 +void MemTracker::init() {
  60.143 +  if (tracking_level() >= NMT_summary) {
  60.144      _query_lock = new (std::nothrow) Mutex(Monitor::max_nonleaf, "NMT_queryLock");
  60.145 +    // Already OOM. It is unlikely, but still have to handle it.
  60.146      if (_query_lock == NULL) {
  60.147 -      shutdown(NMT_out_of_memory);
  60.148 -      return;
  60.149 -    }
  60.150 -
  60.151 -    debug_only(_main_thread_tid = os::current_thread_id();)
  60.152 -    _state = NMT_bootstrapping_single_thread;
  60.153 -    NMT_track_callsite = (_tracking_level == NMT_detail && can_walk_stack());
  60.154 -  }
  60.155 -}
  60.156 -
  60.157 -// second phase of bootstrapping, when VM is about to or already entered multi-theaded mode.
  60.158 -void MemTracker::bootstrap_multi_thread() {
  60.159 -  if (_tracking_level > NMT_off && _state == NMT_bootstrapping_single_thread) {
  60.160 -  // create nmt lock for multi-thread execution
  60.161 -    assert(_main_thread_tid == os::current_thread_id(), "wrong thread");
  60.162 -    _state = NMT_bootstrapping_multi_thread;
  60.163 -    NMT_track_callsite = (_tracking_level == NMT_detail && can_walk_stack());
  60.164 -  }
  60.165 -}
  60.166 -
  60.167 -// fully start nmt
  60.168 -void MemTracker::start() {
  60.169 -  // Native memory tracking is off from command line option
  60.170 -  if (_tracking_level == NMT_off || shutdown_in_progress()) return;
  60.171 -
  60.172 -  assert(_main_thread_tid == os::current_thread_id(), "wrong thread");
  60.173 -  assert(_state == NMT_bootstrapping_multi_thread, "wrong state");
  60.174 -
  60.175 -  _snapshot = new (std::nothrow)MemSnapshot();
  60.176 -  if (_snapshot != NULL) {
  60.177 -    if (!_snapshot->out_of_memory() && start_worker(_snapshot)) {
  60.178 -      _state = NMT_started;
  60.179 -      NMT_track_callsite = (_tracking_level == NMT_detail && can_walk_stack());
  60.180 -      return;
  60.181 -    }
  60.182 -
  60.183 -    delete _snapshot;
  60.184 -    _snapshot = NULL;
  60.185 -  }
  60.186 -
  60.187 -  // fail to start native memory tracking, shut it down
  60.188 -  shutdown(NMT_initialization);
  60.189 -}
  60.190 -
  60.191 -/**
  60.192 - * Shutting down native memory tracking.
  60.193 - * We can not shutdown native memory tracking immediately, so we just
  60.194 - * setup shutdown pending flag, every native memory tracking component
  60.195 - * should orderly shut itself down.
  60.196 - *
  60.197 - * The shutdown sequences:
  60.198 - *  1. MemTracker::shutdown() sets MemTracker to shutdown pending state
  60.199 - *  2. Worker thread calls MemTracker::final_shutdown(), which transites
  60.200 - *     MemTracker to final shutdown state.
  60.201 - *  3. At sync point, MemTracker does final cleanup, before sets memory
  60.202 - *     tracking level to off to complete shutdown.
  60.203 - */
  60.204 -void MemTracker::shutdown(ShutdownReason reason) {
  60.205 -  if (_tracking_level == NMT_off) return;
  60.206 -
  60.207 -  if (_state <= NMT_bootstrapping_single_thread) {
  60.208 -    // we still in single thread mode, there is not contention
  60.209 -    _state = NMT_shutdown_pending;
  60.210 -    _reason = reason;
  60.211 -  } else {
  60.212 -    // we want to know who initialized shutdown
  60.213 -    if ((jint)NMT_started == Atomic::cmpxchg((jint)NMT_shutdown_pending,
  60.214 -                                       (jint*)&_state, (jint)NMT_started)) {
  60.215 -        _reason = reason;
  60.216 +      shutdown();
  60.217      }
  60.218    }
  60.219  }
  60.220  
  60.221 -// final phase of shutdown
  60.222 -void MemTracker::final_shutdown() {
  60.223 -  // delete all pending recorders and pooled recorders
  60.224 -  delete_all_pending_recorders();
  60.225 -  delete_all_pooled_recorders();
  60.226 -
  60.227 -  {
  60.228 -    // shared baseline and snapshot are the only objects needed to
  60.229 -    // create query results
  60.230 -    MutexLockerEx locker(_query_lock, true);
  60.231 -    // cleanup baseline data and snapshot
  60.232 -    _baseline.clear();
  60.233 -    delete _snapshot;
  60.234 -    _snapshot = NULL;
  60.235 +bool MemTracker::check_launcher_nmt_support(const char* value) {
  60.236 +  if (strcmp(value, "=detail") == 0) {
  60.237 +#if !PLATFORM_NATIVE_STACK_WALKING_SUPPORTED
  60.238 +      jio_fprintf(defaultStream::error_stream(),
  60.239 +        "NMT detail is not supported on this platform.  Using NMT summary instead.\n");
  60.240 +    if (MemTracker::tracking_level() != NMT_summary) {
  60.241 +    return false;
  60.242 +  }
  60.243 +#else
  60.244 +    if (MemTracker::tracking_level() != NMT_detail) {
  60.245 +      return false;
  60.246 +    }
  60.247 +#endif
  60.248 +  } else if (strcmp(value, "=summary") == 0) {
  60.249 +    if (MemTracker::tracking_level() != NMT_summary) {
  60.250 +      return false;
  60.251 +    }
  60.252 +  } else if (strcmp(value, "=off") == 0) {
  60.253 +    if (MemTracker::tracking_level() != NMT_off) {
  60.254 +      return false;
  60.255 +    }
  60.256 +  } else {
  60.257 +    _is_nmt_env_valid = false;
  60.258    }
  60.259  
  60.260 -  // shutdown shared decoder instance, since it is only
  60.261 -  // used by native memory tracking so far.
  60.262 -  Decoder::shutdown();
  60.263 -
  60.264 -  MemTrackWorker* worker = NULL;
  60.265 -  {
  60.266 -    ThreadCritical tc;
  60.267 -    // can not delete worker inside the thread critical
  60.268 -    if (_worker_thread != NULL && Thread::current() == _worker_thread) {
  60.269 -      worker = _worker_thread;
  60.270 -      _worker_thread = NULL;
  60.271 -    }
  60.272 -  }
  60.273 -  if (worker != NULL) {
  60.274 -    delete worker;
  60.275 -  }
  60.276 -  _state = NMT_final_shutdown;
  60.277 +  return true;
  60.278  }
  60.279  
  60.280 -// delete all pooled recorders
  60.281 -void MemTracker::delete_all_pooled_recorders() {
  60.282 -  // free all pooled recorders
  60.283 -  MemRecorder* volatile cur_head = _pooled_recorders;
  60.284 -  if (cur_head != NULL) {
  60.285 -    MemRecorder* null_ptr = NULL;
  60.286 -    while (cur_head != NULL && (void*)cur_head != Atomic::cmpxchg_ptr((void*)null_ptr,
  60.287 -      (void*)&_pooled_recorders, (void*)cur_head)) {
  60.288 -      cur_head = _pooled_recorders;
  60.289 -    }
  60.290 -    if (cur_head != NULL) {
  60.291 -      delete cur_head;
  60.292 -      _pooled_recorder_count = 0;
  60.293 -    }
  60.294 +bool MemTracker::verify_nmt_option() {
  60.295 +  return _is_nmt_env_valid;
  60.296 +}
  60.297 +
  60.298 +void* MemTracker::malloc_base(void* memblock) {
  60.299 +  return MallocTracker::get_base(memblock);
  60.300 +}
  60.301 +
  60.302 +void Tracker::record(address addr, size_t size) {
  60.303 +  if (MemTracker::tracking_level() < NMT_summary) return;
  60.304 +  switch(_type) {
  60.305 +    case uncommit:
  60.306 +      VirtualMemoryTracker::remove_uncommitted_region(addr, size);
  60.307 +      break;
  60.308 +    case release:
  60.309 +      VirtualMemoryTracker::remove_released_region(addr, size);
  60.310 +        break;
  60.311 +    default:
  60.312 +      ShouldNotReachHere();
  60.313    }
  60.314  }
  60.315  
  60.316 -// delete all recorders in pending queue
  60.317 -void MemTracker::delete_all_pending_recorders() {
  60.318 -  // free all pending recorders
  60.319 -  MemRecorder* pending_head = get_pending_recorders();
  60.320 -  if (pending_head != NULL) {
  60.321 -    delete pending_head;
  60.322 +
  60.323 +// Shutdown can only be issued via JCmd, and NMT JCmd is serialized
  60.324 +// by lock
  60.325 +void MemTracker::shutdown() {
  60.326 +  // We can only shutdown NMT to minimal tracking level if it is
  60.327 +  // ever on.
  60.328 +  if (tracking_level () > NMT_minimal) {
  60.329 +    transition_to(NMT_minimal);
  60.330    }
  60.331  }
  60.332  
  60.333 -/*
  60.334 - * retrieve per-thread recorder of specified thread.
  60.335 - * if thread == NULL, it means global recorder
  60.336 - */
  60.337 -MemRecorder* MemTracker::get_thread_recorder(JavaThread* thread) {
  60.338 -  if (shutdown_in_progress()) return NULL;
  60.339 +bool MemTracker::transition_to(NMT_TrackingLevel level) {
  60.340 +  NMT_TrackingLevel current_level = tracking_level();
  60.341  
  60.342 -  MemRecorder* rc;
  60.343 -  if (thread == NULL) {
  60.344 -    rc = _global_recorder;
  60.345 +  if (current_level == level) {
  60.346 +    return true;
  60.347 +  } else if (current_level > level) {
  60.348 +    // Downgrade tracking level, we want to lower the tracking
  60.349 +    // level first
  60.350 +    _tracking_level = level;
  60.351 +    // Make _tracking_level visible immediately.
  60.352 +    OrderAccess::fence();
  60.353 +    VirtualMemoryTracker::transition(current_level, level);
  60.354 +    MallocTracker::transition(current_level, level);
  60.355 +
  60.356 +    if (level == NMT_minimal) _baseline.reset();
  60.357    } else {
  60.358 -    rc = thread->get_recorder();
  60.359 +    VirtualMemoryTracker::transition(current_level, level);
  60.360 +    MallocTracker::transition(current_level, level);
  60.361 +
  60.362 +    _tracking_level = level;
  60.363 +    // Make _tracking_level visible immediately.
  60.364 +    OrderAccess::fence();
  60.365    }
  60.366  
  60.367 -  if (rc != NULL && rc->is_full()) {
  60.368 -    enqueue_pending_recorder(rc);
  60.369 -    rc = NULL;
  60.370 -  }
  60.371 -
  60.372 -  if (rc == NULL) {
  60.373 -    rc = get_new_or_pooled_instance();
  60.374 -    if (thread == NULL) {
  60.375 -      _global_recorder = rc;
  60.376 -    } else {
  60.377 -      thread->set_recorder(rc);
  60.378 -    }
  60.379 -  }
  60.380 -  return rc;
  60.381 +  return true;
  60.382  }
  60.383  
  60.384 -/*
  60.385 - * get a per-thread recorder from pool, or create a new one if
  60.386 - * there is not one available.
  60.387 - */
  60.388 -MemRecorder* MemTracker::get_new_or_pooled_instance() {
  60.389 -   MemRecorder* cur_head = const_cast<MemRecorder*> (_pooled_recorders);
  60.390 -   if (cur_head == NULL) {
  60.391 -     MemRecorder* rec = new (std::nothrow)MemRecorder();
  60.392 -     if (rec == NULL || rec->out_of_memory()) {
  60.393 -       shutdown(NMT_out_of_memory);
  60.394 -       if (rec != NULL) {
  60.395 -         delete rec;
  60.396 -         rec = NULL;
  60.397 -       }
  60.398 -     }
  60.399 -     return rec;
  60.400 -   } else {
  60.401 -     MemRecorder* next_head = cur_head->next();
  60.402 -     if ((void*)cur_head != Atomic::cmpxchg_ptr((void*)next_head, (void*)&_pooled_recorders,
  60.403 -       (void*)cur_head)) {
  60.404 -       return get_new_or_pooled_instance();
  60.405 -     }
  60.406 -     cur_head->set_next(NULL);
  60.407 -     Atomic::dec(&_pooled_recorder_count);
  60.408 -     cur_head->set_generation();
  60.409 -     return cur_head;
  60.410 +void MemTracker::final_report(outputStream* output) {
  60.411 +  assert(output != NULL, "No output stream");
  60.412 +  if (tracking_level() >= NMT_summary) {
  60.413 +    MallocMemorySnapshot* malloc_memory_snapshot =
  60.414 +      MallocMemorySummary::as_snapshot();
  60.415 +    malloc_memory_snapshot->make_adjustment();
  60.416 +
  60.417 +    VirtualMemorySnapshot* virtual_memory_snapshot =
  60.418 +      VirtualMemorySummary::as_snapshot();
  60.419 +
  60.420 +    MemSummaryReporter rptr(malloc_memory_snapshot,
  60.421 +      virtual_memory_snapshot, output);
  60.422 +    rptr.report();
  60.423 +    // shutdown NMT, the data no longer accurate
  60.424 +    shutdown();
  60.425    }
  60.426  }
  60.427  
  60.428 -/*
  60.429 - * retrieve all recorders in pending queue, and empty the queue
  60.430 - */
  60.431 -MemRecorder* MemTracker::get_pending_recorders() {
  60.432 -  MemRecorder* cur_head = const_cast<MemRecorder*>(_merge_pending_queue);
  60.433 -  MemRecorder* null_ptr = NULL;
  60.434 -  while ((void*)cur_head != Atomic::cmpxchg_ptr((void*)null_ptr, (void*)&_merge_pending_queue,
  60.435 -    (void*)cur_head)) {
  60.436 -    cur_head = const_cast<MemRecorder*>(_merge_pending_queue);
  60.437 -  }
  60.438 -  NOT_PRODUCT(Atomic::store(0, &_pending_recorder_count));
  60.439 -  return cur_head;
  60.440 -}
  60.441 +// This is a walker to gather malloc site hashtable statistics,
  60.442 +// the result is used for tuning.
  60.443 +class StatisticsWalker : public MallocSiteWalker {
  60.444 + private:
  60.445 +  enum Threshold {
  60.446 +    // aggregates statistics over this threshold into one
  60.447 +    // line item.
  60.448 +    report_threshold = 20
  60.449 +  };
  60.450  
  60.451 -/*
  60.452 - * release a recorder to recorder pool.
  60.453 - */
  60.454 -void MemTracker::release_thread_recorder(MemRecorder* rec) {
  60.455 -  assert(rec != NULL, "null recorder");
  60.456 -  // we don't want to pool too many recorders
  60.457 -  rec->set_next(NULL);
  60.458 -  if (shutdown_in_progress() || _pooled_recorder_count > _thread_count * 2) {
  60.459 -    delete rec;
  60.460 -    return;
  60.461 + private:
  60.462 +  // Number of allocation sites that have all memory freed
  60.463 +  int   _empty_entries;
  60.464 +  // Total number of allocation sites, include empty sites
  60.465 +  int   _total_entries;
  60.466 +  // Number of captured call stack distribution
  60.467 +  int   _stack_depth_distribution[NMT_TrackingStackDepth];
  60.468 +  // Hash distribution
  60.469 +  int   _hash_distribution[report_threshold];
  60.470 +  // Number of hash buckets that have entries over the threshold
  60.471 +  int   _bucket_over_threshold;
  60.472 +
  60.473 +  // The hash bucket that walker is currently walking
  60.474 +  int   _current_hash_bucket;
  60.475 +  // The length of current hash bucket
  60.476 +  int   _current_bucket_length;
  60.477 +  // Number of hash buckets that are not empty
  60.478 +  int   _used_buckets;
  60.479 +  // Longest hash bucket length
  60.480 +  int   _longest_bucket_length;
  60.481 +
  60.482 + public:
  60.483 +  StatisticsWalker() : _empty_entries(0), _total_entries(0) {
  60.484 +    int index = 0;
  60.485 +    for (index = 0; index < NMT_TrackingStackDepth; index ++) {
  60.486 +      _stack_depth_distribution[index] = 0;
  60.487 +    }
  60.488 +    for (index = 0; index < report_threshold; index ++) {
  60.489 +      _hash_distribution[index] = 0;
  60.490 +    }
  60.491 +    _bucket_over_threshold = 0;
  60.492 +    _longest_bucket_length = 0;
  60.493 +    _current_hash_bucket = -1;
  60.494 +    _current_bucket_length = 0;
  60.495 +    _used_buckets = 0;
  60.496    }
  60.497  
  60.498 -  rec->clear();
  60.499 -  MemRecorder* cur_head = const_cast<MemRecorder*>(_pooled_recorders);
  60.500 -  rec->set_next(cur_head);
  60.501 -  while ((void*)cur_head != Atomic::cmpxchg_ptr((void*)rec, (void*)&_pooled_recorders,
  60.502 -    (void*)cur_head)) {
  60.503 -    cur_head = const_cast<MemRecorder*>(_pooled_recorders);
  60.504 -    rec->set_next(cur_head);
  60.505 -  }
  60.506 -  Atomic::inc(&_pooled_recorder_count);
  60.507 -}
  60.508 +  virtual bool at(const MallocSite* e) {
  60.509 +    if (e->size() == 0) _empty_entries ++;
  60.510 +    _total_entries ++;
  60.511  
  60.512 -// write a record to proper recorder. No lock can be taken from this method
  60.513 -// down.
  60.514 -void MemTracker::write_tracking_record(address addr, MEMFLAGS flags,
  60.515 -    size_t size, jint seq, address pc, JavaThread* thread) {
  60.516 +    // stack depth distrubution
  60.517 +    int frames = e->call_stack()->frames();
  60.518 +    _stack_depth_distribution[frames - 1] ++;
  60.519  
  60.520 -    MemRecorder* rc = get_thread_recorder(thread);
  60.521 -    if (rc != NULL) {
  60.522 -      rc->record(addr, flags, size, seq, pc);
  60.523 +    // hash distribution
  60.524 +    int hash_bucket = e->hash() % MallocSiteTable::hash_buckets();
  60.525 +    if (_current_hash_bucket == -1) {
  60.526 +      _current_hash_bucket = hash_bucket;
  60.527 +      _current_bucket_length = 1;
  60.528 +    } else if (_current_hash_bucket == hash_bucket) {
  60.529 +      _current_bucket_length ++;
  60.530 +    } else {
  60.531 +      record_bucket_length(_current_bucket_length);
  60.532 +      _current_hash_bucket = hash_bucket;
  60.533 +      _current_bucket_length = 1;
  60.534      }
  60.535 -}
  60.536 -
  60.537 -/**
  60.538 - * enqueue a recorder to pending queue
  60.539 - */
  60.540 -void MemTracker::enqueue_pending_recorder(MemRecorder* rec) {
  60.541 -  assert(rec != NULL, "null recorder");
  60.542 -
  60.543 -  // we are shutting down, so just delete it
  60.544 -  if (shutdown_in_progress()) {
  60.545 -    rec->set_next(NULL);
  60.546 -    delete rec;
  60.547 -    return;
  60.548 +    return true;
  60.549    }
  60.550  
  60.551 -  MemRecorder* cur_head = const_cast<MemRecorder*>(_merge_pending_queue);
  60.552 -  rec->set_next(cur_head);
  60.553 -  while ((void*)cur_head != Atomic::cmpxchg_ptr((void*)rec, (void*)&_merge_pending_queue,
  60.554 -    (void*)cur_head)) {
  60.555 -    cur_head = const_cast<MemRecorder*>(_merge_pending_queue);
  60.556 -    rec->set_next(cur_head);
  60.557 +  // walk completed
  60.558 +  void completed() {
  60.559 +    record_bucket_length(_current_bucket_length);
  60.560    }
  60.561 -  NOT_PRODUCT(Atomic::inc(&_pending_recorder_count);)
  60.562 -}
  60.563  
  60.564 -/*
  60.565 - * The method is called at global safepoint
  60.566 - * during it synchronization process.
  60.567 - *   1. enqueue all JavaThreads' per-thread recorders
  60.568 - *   2. enqueue global recorder
  60.569 - *   3. retrieve all pending recorders
  60.570 - *   4. reset global sequence number generator
  60.571 - *   5. call worker's sync
  60.572 - */
  60.573 -#define MAX_SAFEPOINTS_TO_SKIP     128
  60.574 -#define SAFE_SEQUENCE_THRESHOLD    30
  60.575 -#define HIGH_GENERATION_THRESHOLD  60
  60.576 -#define MAX_RECORDER_THREAD_RATIO  30
  60.577 -#define MAX_RECORDER_PER_THREAD    100
  60.578 -
  60.579 -void MemTracker::sync() {
  60.580 -  assert(_tracking_level > NMT_off, "NMT is not enabled");
  60.581 -  assert(SafepointSynchronize::is_at_safepoint(), "Safepoint required");
  60.582 -
  60.583 -  // Some GC tests hit large number of safepoints in short period of time
  60.584 -  // without meaningful activities. We should prevent going to
  60.585 -  // sync point in these cases, which can potentially exhaust generation buffer.
  60.586 -  // Here is the factots to determine if we should go into sync point:
  60.587 -  // 1. not to overflow sequence number
  60.588 -  // 2. if we are in danger to overflow generation buffer
  60.589 -  // 3. how many safepoints we already skipped sync point
  60.590 -  if (_state == NMT_started) {
  60.591 -    // worker thread is not ready, no one can manage generation
  60.592 -    // buffer, so skip this safepoint
  60.593 -    if (_worker_thread == NULL) return;
  60.594 -
  60.595 -    if (_sync_point_skip_count < MAX_SAFEPOINTS_TO_SKIP) {
  60.596 -      int per_seq_in_use = SequenceGenerator::peek() * 100 / max_jint;
  60.597 -      int per_gen_in_use = _worker_thread->generations_in_use() * 100 / MAX_GENERATIONS;
  60.598 -      if (per_seq_in_use < SAFE_SEQUENCE_THRESHOLD && per_gen_in_use >= HIGH_GENERATION_THRESHOLD) {
  60.599 -        _sync_point_skip_count ++;
  60.600 -        return;
  60.601 +  void report_statistics(outputStream* out) {
  60.602 +    int index;
  60.603 +    out->print_cr("Malloc allocation site table:");
  60.604 +    out->print_cr("\tTotal entries: %d", _total_entries);
  60.605 +    out->print_cr("\tEmpty entries: %d (%2.2f%%)", _empty_entries, ((float)_empty_entries * 100) / _total_entries);
  60.606 +    out->print_cr(" ");
  60.607 +    out->print_cr("Hash distribution:");
  60.608 +    if (_used_buckets < MallocSiteTable::hash_buckets()) {
  60.609 +      out->print_cr("empty bucket: %d", (MallocSiteTable::hash_buckets() - _used_buckets));
  60.610 +    }
  60.611 +    for (index = 0; index < report_threshold; index ++) {
  60.612 +      if (_hash_distribution[index] != 0) {
  60.613 +        if (index == 0) {
  60.614 +          out->print_cr("  %d    entry: %d", 1, _hash_distribution[0]);
  60.615 +        } else if (index < 9) { // single digit
  60.616 +          out->print_cr("  %d  entries: %d", (index + 1), _hash_distribution[index]);
  60.617 +        } else {
  60.618 +          out->print_cr(" %d entries: %d", (index + 1), _hash_distribution[index]);
  60.619 +        }
  60.620        }
  60.621      }
  60.622 -    {
  60.623 -      // This method is running at safepoint, with ThreadCritical lock,
  60.624 -      // it should guarantee that NMT is fully sync-ed.
  60.625 -      ThreadCritical tc;
  60.626 -
  60.627 -      // We can NOT execute NMT sync-point if there are pending tracking ops.
  60.628 -      if (_pending_op_count == 0) {
  60.629 -        SequenceGenerator::reset();
  60.630 -        _sync_point_skip_count = 0;
  60.631 -
  60.632 -        // walk all JavaThreads to collect recorders
  60.633 -        SyncThreadRecorderClosure stc;
  60.634 -        Threads::threads_do(&stc);
  60.635 -
  60.636 -        _thread_count = stc.get_thread_count();
  60.637 -        MemRecorder* pending_recorders = get_pending_recorders();
  60.638 -
  60.639 -        if (_global_recorder != NULL) {
  60.640 -          _global_recorder->set_next(pending_recorders);
  60.641 -          pending_recorders = _global_recorder;
  60.642 -          _global_recorder = NULL;
  60.643 -        }
  60.644 -
  60.645 -        // see if NMT has too many outstanding recorder instances, it usually
  60.646 -        // means that worker thread is lagging behind in processing them.
  60.647 -        if (!AutoShutdownNMT) {
  60.648 -          _slowdown_calling_thread = (MemRecorder::_instance_count > MAX_RECORDER_THREAD_RATIO * _thread_count);
  60.649 -        } else {
  60.650 -          // If auto shutdown is on, enforce MAX_RECORDER_PER_THREAD threshold to prevent OOM
  60.651 -          if (MemRecorder::_instance_count >= _thread_count * MAX_RECORDER_PER_THREAD) {
  60.652 -            shutdown(NMT_out_of_memory);
  60.653 -          }
  60.654 -        }
  60.655 -
  60.656 -        // check _worker_thread with lock to avoid racing condition
  60.657 -        if (_worker_thread != NULL) {
  60.658 -          _worker_thread->at_sync_point(pending_recorders, InstanceKlass::number_of_instance_classes());
  60.659 -        }
  60.660 -        assert(SequenceGenerator::peek() == 1, "Should not have memory activities during sync-point");
  60.661 -      } else {
  60.662 -        _sync_point_skip_count ++;
  60.663 +    if (_bucket_over_threshold > 0) {
  60.664 +      out->print_cr(" >%d entries: %d", report_threshold,  _bucket_over_threshold);
  60.665 +    }
  60.666 +    out->print_cr("most entries: %d", _longest_bucket_length);
  60.667 +    out->print_cr(" ");
  60.668 +    out->print_cr("Call stack depth distribution:");
  60.669 +    for (index = 0; index < NMT_TrackingStackDepth; index ++) {
  60.670 +      if (_stack_depth_distribution[index] > 0) {
  60.671 +        out->print_cr("\t%d: %d", index + 1, _stack_depth_distribution[index]);
  60.672        }
  60.673      }
  60.674    }
  60.675  
  60.676 -  // now, it is the time to shut whole things off
  60.677 -  if (_state == NMT_final_shutdown) {
  60.678 -    // walk all JavaThreads to delete all recorders
  60.679 -    SyncThreadRecorderClosure stc;
  60.680 -    Threads::threads_do(&stc);
  60.681 -    // delete global recorder
  60.682 -    {
  60.683 -      ThreadCritical tc;
  60.684 -      if (_global_recorder != NULL) {
  60.685 -        delete _global_recorder;
  60.686 -        _global_recorder = NULL;
  60.687 -      }
  60.688 + private:
  60.689 +  void record_bucket_length(int length) {
  60.690 +    _used_buckets ++;
  60.691 +    if (length <= report_threshold) {
  60.692 +      _hash_distribution[length - 1] ++;
  60.693 +    } else {
  60.694 +      _bucket_over_threshold ++;
  60.695      }
  60.696 -    MemRecorder* pending_recorders = get_pending_recorders();
  60.697 -    if (pending_recorders != NULL) {
  60.698 -      delete pending_recorders;
  60.699 -    }
  60.700 -    // try at a later sync point to ensure MemRecorder instance drops to zero to
  60.701 -    // completely shutdown NMT
  60.702 -    if (MemRecorder::_instance_count == 0) {
  60.703 -      _state = NMT_shutdown;
  60.704 -      _tracking_level = NMT_off;
  60.705 -    }
  60.706 +    _longest_bucket_length = MAX2(_longest_bucket_length, length);
  60.707    }
  60.708 +};
  60.709 +
  60.710 +
  60.711 +void MemTracker::tuning_statistics(outputStream* out) {
  60.712 +  // NMT statistics
  60.713 +  StatisticsWalker walker;
  60.714 +  MallocSiteTable::walk_malloc_site(&walker);
  60.715 +  walker.completed();
  60.716 +
  60.717 +  out->print_cr("Native Memory Tracking Statistics:");
  60.718 +  out->print_cr("Malloc allocation site table size: %d", MallocSiteTable::hash_buckets());
  60.719 +  out->print_cr("             Tracking stack depth: %d", NMT_TrackingStackDepth);
  60.720 +  NOT_PRODUCT(out->print_cr("Peak concurrent access: %d", MallocSiteTable::access_peak_count());)
  60.721 +  out->print_cr(" ");
  60.722 +  walker.report_statistics(out);
  60.723  }
  60.724  
  60.725 -/*
  60.726 - * Start worker thread.
  60.727 - */
  60.728 -bool MemTracker::start_worker(MemSnapshot* snapshot) {
  60.729 -  assert(_worker_thread == NULL && _snapshot != NULL, "Just Check");
  60.730 -  _worker_thread = new (std::nothrow) MemTrackWorker(snapshot);
  60.731 -  if (_worker_thread == NULL) {
  60.732 -    return false;
  60.733 -  } else if (_worker_thread->has_error()) {
  60.734 -    delete _worker_thread;
  60.735 -    _worker_thread = NULL;
  60.736 -    return false;
  60.737 -  }
  60.738 -  _worker_thread->start();
  60.739 -  return true;
  60.740 -}
  60.741 -
  60.742 -/*
  60.743 - * We need to collect a JavaThread's per-thread recorder
  60.744 - * before it exits.
  60.745 - */
  60.746 -void MemTracker::thread_exiting(JavaThread* thread) {
  60.747 -  if (is_on()) {
  60.748 -    MemRecorder* rec = thread->get_recorder();
  60.749 -    if (rec != NULL) {
  60.750 -      enqueue_pending_recorder(rec);
  60.751 -      thread->set_recorder(NULL);
  60.752 -    }
  60.753 -  }
  60.754 -}
  60.755 -
  60.756 -// baseline current memory snapshot
  60.757 -bool MemTracker::baseline() {
  60.758 -  MutexLocker lock(_query_lock);
  60.759 -  MemSnapshot* snapshot = get_snapshot();
  60.760 -  if (snapshot != NULL) {
  60.761 -    return _baseline.baseline(*snapshot, false);
  60.762 -  }
  60.763 -  return false;
  60.764 -}
  60.765 -
  60.766 -// print memory usage from current snapshot
  60.767 -bool MemTracker::print_memory_usage(BaselineOutputer& out, size_t unit, bool summary_only) {
  60.768 -  MemBaseline  baseline;
  60.769 -  MutexLocker  lock(_query_lock);
  60.770 -  MemSnapshot* snapshot = get_snapshot();
  60.771 -  if (snapshot != NULL && baseline.baseline(*snapshot, summary_only)) {
  60.772 -    BaselineReporter reporter(out, unit);
  60.773 -    reporter.report_baseline(baseline, summary_only);
  60.774 -    return true;
  60.775 -  }
  60.776 -  return false;
  60.777 -}
  60.778 -
  60.779 -// Whitebox API for blocking until the current generation of NMT data has been merged
  60.780 -bool MemTracker::wbtest_wait_for_data_merge() {
  60.781 -  // NMT can't be shutdown while we're holding _query_lock
  60.782 -  MutexLocker lock(_query_lock);
  60.783 -  assert(_worker_thread != NULL, "Invalid query");
  60.784 -  // the generation at query time, so NMT will spin till this generation is processed
  60.785 -  unsigned long generation_at_query_time = SequenceGenerator::current_generation();
  60.786 -  unsigned long current_processing_generation = _processing_generation;
  60.787 -  // if generation counter overflown
  60.788 -  bool generation_overflown = (generation_at_query_time < current_processing_generation);
  60.789 -  long generations_to_wrap = MAX_UNSIGNED_LONG - current_processing_generation;
  60.790 -  // spin
  60.791 -  while (!shutdown_in_progress()) {
  60.792 -    if (!generation_overflown) {
  60.793 -      if (current_processing_generation > generation_at_query_time) {
  60.794 -        return true;
  60.795 -      }
  60.796 -    } else {
  60.797 -      assert(generations_to_wrap >= 0, "Sanity check");
  60.798 -      long current_generations_to_wrap = MAX_UNSIGNED_LONG - current_processing_generation;
  60.799 -      assert(current_generations_to_wrap >= 0, "Sanity check");
  60.800 -      // to overflow an unsigned long should take long time, so to_wrap check should be sufficient
  60.801 -      if (current_generations_to_wrap > generations_to_wrap &&
  60.802 -          current_processing_generation > generation_at_query_time) {
  60.803 -        return true;
  60.804 -      }
  60.805 -    }
  60.806 -
  60.807 -    // if worker thread is idle, but generation is not advancing, that means
  60.808 -    // there is not safepoint to let NMT advance generation, force one.
  60.809 -    if (_worker_thread_idle) {
  60.810 -      VM_ForceSafepoint vfs;
  60.811 -      VMThread::execute(&vfs);
  60.812 -    }
  60.813 -    MemSnapshot* snapshot = get_snapshot();
  60.814 -    if (snapshot == NULL) {
  60.815 -      return false;
  60.816 -    }
  60.817 -    snapshot->wait(1000);
  60.818 -    current_processing_generation = _processing_generation;
  60.819 -  }
  60.820 -  // We end up here if NMT is shutting down before our data has been merged
  60.821 -  return false;
  60.822 -}
  60.823 -
  60.824 -// compare memory usage between current snapshot and baseline
  60.825 -bool MemTracker::compare_memory_usage(BaselineOutputer& out, size_t unit, bool summary_only) {
  60.826 -  MutexLocker lock(_query_lock);
  60.827 -  if (_baseline.baselined()) {
  60.828 -    MemBaseline baseline;
  60.829 -    MemSnapshot* snapshot = get_snapshot();
  60.830 -    if (snapshot != NULL && baseline.baseline(*snapshot, summary_only)) {
  60.831 -      BaselineReporter reporter(out, unit);
  60.832 -      reporter.diff_baselines(baseline, _baseline, summary_only);
  60.833 -      return true;
  60.834 -    }
  60.835 -  }
  60.836 -  return false;
  60.837 -}
  60.838 -
  60.839 -#ifndef PRODUCT
  60.840 -void MemTracker::walk_stack(int toSkip, char* buf, int len) {
  60.841 -  int cur_len = 0;
  60.842 -  char tmp[1024];
  60.843 -  address pc;
  60.844 -
  60.845 -  while (cur_len < len) {
  60.846 -    pc = os::get_caller_pc(toSkip + 1);
  60.847 -    if (pc != NULL && os::dll_address_to_function_name(pc, tmp, sizeof(tmp), NULL)) {
  60.848 -      jio_snprintf(&buf[cur_len], (len - cur_len), "%s\n", tmp);
  60.849 -      cur_len = (int)strlen(buf);
  60.850 -    } else {
  60.851 -      buf[cur_len] = '\0';
  60.852 -      break;
  60.853 -    }
  60.854 -    toSkip ++;
  60.855 -  }
  60.856 -}
  60.857 -
  60.858 -void MemTracker::print_tracker_stats(outputStream* st) {
  60.859 -  st->print_cr("\nMemory Tracker Stats:");
  60.860 -  st->print_cr("\tMax sequence number = %d", SequenceGenerator::max_seq_num());
  60.861 -  st->print_cr("\tthead count = %d", _thread_count);
  60.862 -  st->print_cr("\tArena instance = %d", Arena::_instance_count);
  60.863 -  st->print_cr("\tpooled recorder count = %d", _pooled_recorder_count);
  60.864 -  st->print_cr("\tqueued recorder count = %d", _pending_recorder_count);
  60.865 -  st->print_cr("\tmemory recorder instance count = %d", MemRecorder::_instance_count);
  60.866 -  if (_worker_thread != NULL) {
  60.867 -    st->print_cr("\tWorker thread:");
  60.868 -    st->print_cr("\t\tSync point count = %d", _worker_thread->_sync_point_count);
  60.869 -    st->print_cr("\t\tpending recorder count = %d", _worker_thread->count_pending_recorders());
  60.870 -    st->print_cr("\t\tmerge count = %d", _worker_thread->_merge_count);
  60.871 -  } else {
  60.872 -    st->print_cr("\tWorker thread is not started");
  60.873 -  }
  60.874 -  st->print_cr(" ");
  60.875 -
  60.876 -  if (_snapshot != NULL) {
  60.877 -    _snapshot->print_snapshot_stats(st);
  60.878 -  } else {
  60.879 -    st->print_cr("No snapshot");
  60.880 -  }
  60.881 -}
  60.882 -#endif
  60.883 -
  60.884 -
  60.885 -// Tracker Implementation
  60.886 -
  60.887 -/*
  60.888 - * Create a tracker.
  60.889 - * This is a fairly complicated constructor, as it has to make two important decisions:
  60.890 - *   1) Does it need to take ThreadCritical lock to write tracking record
  60.891 - *   2) Does it need to pre-reserve a sequence number for the tracking record
  60.892 - *
  60.893 - * The rules to determine if ThreadCritical is needed:
  60.894 - *   1. When nmt is in single-threaded bootstrapping mode, no lock is needed as VM
  60.895 - *      still in single thread mode.
  60.896 - *   2. For all threads other than JavaThread, ThreadCritical is needed
  60.897 - *      to write to recorders to global recorder.
  60.898 - *   3. For JavaThreads that are no longer visible by safepoint, also
  60.899 - *      need to take ThreadCritical and records are written to global
  60.900 - *      recorders, since these threads are NOT walked by Threads.do_thread().
  60.901 - *   4. JavaThreads that are running in safepoint-safe states do not stop
  60.902 - *      for safepoints, ThreadCritical lock should be taken to write
  60.903 - *      memory records.
  60.904 - *   5. JavaThreads that are running in VM state do not need any lock and
  60.905 - *      records are written to per-thread recorders.
  60.906 - *   6. For a thread has yet to attach VM 'Thread', they need to take
  60.907 - *      ThreadCritical to write to global recorder.
  60.908 - *
  60.909 - *  The memory operations that need pre-reserve sequence numbers:
  60.910 - *    The memory operations that "release" memory blocks and the
  60.911 - *    operations can fail, need to pre-reserve sequence number. They
  60.912 - *    are realloc, uncommit and release.
  60.913 - *
  60.914 - *  The reason for pre-reserve sequence number, is to prevent race condition:
  60.915 - *    Thread 1                      Thread 2
  60.916 - *    <release>
  60.917 - *                                  <allocate>
  60.918 - *                                  <write allocate record>
  60.919 - *   <write release record>
  60.920 - *   if Thread 2 happens to obtain the memory address Thread 1 just released,
  60.921 - *   then NMT can mistakenly report the memory is free.
  60.922 - *
  60.923 - *  Noticeably, free() does not need pre-reserve sequence number, because the call
  60.924 - *  does not fail, so we can alway write "release" record before the memory is actaully
  60.925 - *  freed.
  60.926 - *
  60.927 - *  For realloc, uncommit and release, following coding pattern should be used:
  60.928 - *
  60.929 - *     MemTracker::Tracker tkr = MemTracker::get_realloc_tracker();
  60.930 - *     ptr = ::realloc(...);
  60.931 - *     if (ptr == NULL) {
  60.932 - *       tkr.record(...)
  60.933 - *     } else {
  60.934 - *       tkr.discard();
  60.935 - *     }
  60.936 - *
  60.937 - *     MemTracker::Tracker tkr = MemTracker::get_virtual_memory_uncommit_tracker();
  60.938 - *     if (uncommit(...)) {
  60.939 - *       tkr.record(...);
  60.940 - *     } else {
  60.941 - *       tkr.discard();
  60.942 - *     }
  60.943 - *
  60.944 - *     MemTracker::Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
  60.945 - *     if (release(...)) {
  60.946 - *       tkr.record(...);
  60.947 - *     } else {
  60.948 - *       tkr.discard();
  60.949 - *     }
  60.950 - *
  60.951 - * Since pre-reserved sequence number is only good for the generation that it is acquired,
  60.952 - * when there is pending Tracker that reserved sequence number, NMT sync-point has
  60.953 - * to be skipped to prevent from advancing generation. This is done by inc and dec
  60.954 - * MemTracker::_pending_op_count, when MemTracker::_pending_op_count > 0, NMT sync-point is skipped.
  60.955 - * Not all pre-reservation of sequence number will increment pending op count. For JavaThreads
  60.956 - * that honor safepoints, safepoint can not occur during the memory operations, so the
  60.957 - * pre-reserved sequence number won't cross the generation boundry.
  60.958 - */
  60.959 -MemTracker::Tracker::Tracker(MemoryOperation op, Thread* thr) {
  60.960 -  _op = NoOp;
  60.961 -  _seq = 0;
  60.962 -  if (MemTracker::is_on()) {
  60.963 -    _java_thread = NULL;
  60.964 -    _op = op;
  60.965 -
  60.966 -    // figure out if ThreadCritical lock is needed to write this operation
  60.967 -    // to MemTracker
  60.968 -    if (MemTracker::is_single_threaded_bootstrap()) {
  60.969 -      thr = NULL;
  60.970 -    } else if (thr == NULL) {
  60.971 -      // don't use Thread::current(), since it is possible that
  60.972 -      // the calling thread has yet to attach to VM 'Thread',
  60.973 -      // which will result assertion failure
  60.974 -      thr = ThreadLocalStorage::thread();
  60.975 -    }
  60.976 -
  60.977 -    if (thr != NULL) {
  60.978 -      // Check NMT load
  60.979 -      MemTracker::check_NMT_load(thr);
  60.980 -
  60.981 -      if (thr->is_Java_thread() && ((JavaThread*)thr)->is_safepoint_visible()) {
  60.982 -        _java_thread = (JavaThread*)thr;
  60.983 -        JavaThreadState  state = _java_thread->thread_state();
  60.984 -        // JavaThreads that are safepoint safe, can run through safepoint,
  60.985 -        // so ThreadCritical is needed to ensure no threads at safepoint create
  60.986 -        // new records while the records are being gathered and the sequence number is changing
  60.987 -        _need_thread_critical_lock =
  60.988 -          SafepointSynchronize::safepoint_safe(_java_thread, state);
  60.989 -      } else {
  60.990 -        _need_thread_critical_lock = true;
  60.991 -      }
  60.992 -    } else {
  60.993 -       _need_thread_critical_lock
  60.994 -         = !MemTracker::is_single_threaded_bootstrap();
  60.995 -    }
  60.996 -
  60.997 -    // see if we need to pre-reserve sequence number for this operation
  60.998 -    if (_op == Realloc || _op == Uncommit || _op == Release) {
  60.999 -      if (_need_thread_critical_lock) {
 60.1000 -        ThreadCritical tc;
 60.1001 -        MemTracker::inc_pending_op_count();
 60.1002 -        _seq = SequenceGenerator::next();
 60.1003 -      } else {
 60.1004 -        // for the threads that honor safepoints, no safepoint can occur
 60.1005 -        // during the lifespan of tracker, so we don't need to increase
 60.1006 -        // pending op count.
 60.1007 -        _seq = SequenceGenerator::next();
 60.1008 -      }
 60.1009 -    }
 60.1010 -  }
 60.1011 -}
 60.1012 -
 60.1013 -void MemTracker::Tracker::discard() {
 60.1014 -  if (MemTracker::is_on() && _seq != 0) {
 60.1015 -    if (_need_thread_critical_lock) {
 60.1016 -      ThreadCritical tc;
 60.1017 -      MemTracker::dec_pending_op_count();
 60.1018 -    }
 60.1019 -    _seq = 0;
 60.1020 -  }
 60.1021 -}
 60.1022 -
 60.1023 -
 60.1024 -void MemTracker::Tracker::record(address old_addr, address new_addr, size_t size,
 60.1025 -  MEMFLAGS flags, address pc) {
 60.1026 -  assert(old_addr != NULL && new_addr != NULL, "Sanity check");
 60.1027 -  assert(_op == Realloc || _op == NoOp, "Wrong call");
 60.1028 -  if (MemTracker::is_on() && NMT_CAN_TRACK(flags) && _op != NoOp) {
 60.1029 -    assert(_seq > 0, "Need pre-reserve sequence number");
 60.1030 -    if (_need_thread_critical_lock) {
 60.1031 -      ThreadCritical tc;
 60.1032 -      // free old address, use pre-reserved sequence number
 60.1033 -      MemTracker::write_tracking_record(old_addr, MemPointerRecord::free_tag(),
 60.1034 -        0, _seq, pc, _java_thread);
 60.1035 -      MemTracker::write_tracking_record(new_addr, flags | MemPointerRecord::malloc_tag(),
 60.1036 -        size, SequenceGenerator::next(), pc, _java_thread);
 60.1037 -      // decrement MemTracker pending_op_count
 60.1038 -      MemTracker::dec_pending_op_count();
 60.1039 -    } else {
 60.1040 -      // free old address, use pre-reserved sequence number
 60.1041 -      MemTracker::write_tracking_record(old_addr, MemPointerRecord::free_tag(),
 60.1042 -        0, _seq, pc, _java_thread);
 60.1043 -      MemTracker::write_tracking_record(new_addr, flags | MemPointerRecord::malloc_tag(),
 60.1044 -        size, SequenceGenerator::next(), pc, _java_thread);
 60.1045 -    }
 60.1046 -    _seq = 0;
 60.1047 -  }
 60.1048 -}
 60.1049 -
 60.1050 -void MemTracker::Tracker::record(address addr, size_t size, MEMFLAGS flags, address pc) {
 60.1051 -  // OOM already?
 60.1052 -  if (addr == NULL) return;
 60.1053 -
 60.1054 -  if (MemTracker::is_on() && NMT_CAN_TRACK(flags) && _op != NoOp) {
 60.1055 -    bool pre_reserved_seq = (_seq != 0);
 60.1056 -    address  pc = CALLER_CALLER_PC;
 60.1057 -    MEMFLAGS orig_flags = flags;
 60.1058 -
 60.1059 -    // or the tagging flags
 60.1060 -    switch(_op) {
 60.1061 -      case Malloc:
 60.1062 -        flags |= MemPointerRecord::malloc_tag();
 60.1063 -        break;
 60.1064 -      case Free:
 60.1065 -        flags = MemPointerRecord::free_tag();
 60.1066 -        break;
 60.1067 -      case Realloc:
 60.1068 -        fatal("Use the other Tracker::record()");
 60.1069 -        break;
 60.1070 -      case Reserve:
 60.1071 -      case ReserveAndCommit:
 60.1072 -        flags |= MemPointerRecord::virtual_memory_reserve_tag();
 60.1073 -        break;
 60.1074 -      case Commit:
 60.1075 -        flags = MemPointerRecord::virtual_memory_commit_tag();
 60.1076 -        break;
 60.1077 -      case Type:
 60.1078 -        flags |= MemPointerRecord::virtual_memory_type_tag();
 60.1079 -        break;
 60.1080 -      case Uncommit:
 60.1081 -        assert(pre_reserved_seq, "Need pre-reserve sequence number");
 60.1082 -        flags = MemPointerRecord::virtual_memory_uncommit_tag();
 60.1083 -        break;
 60.1084 -      case Release:
 60.1085 -        assert(pre_reserved_seq, "Need pre-reserve sequence number");
 60.1086 -        flags = MemPointerRecord::virtual_memory_release_tag();
 60.1087 -        break;
 60.1088 -      case ArenaSize:
 60.1089 -        // a bit of hack here, add a small postive offset to arena
 60.1090 -        // address for its size record, so the size record is sorted
 60.1091 -        // right after arena record.
 60.1092 -        flags = MemPointerRecord::arena_size_tag();
 60.1093 -        addr += sizeof(void*);
 60.1094 -        break;
 60.1095 -      case StackRelease:
 60.1096 -        flags = MemPointerRecord::virtual_memory_release_tag();
 60.1097 -        break;
 60.1098 -      default:
 60.1099 -        ShouldNotReachHere();
 60.1100 -    }
 60.1101 -
 60.1102 -    // write memory tracking record
 60.1103 -    if (_need_thread_critical_lock) {
 60.1104 -      ThreadCritical tc;
 60.1105 -      if (_seq == 0) _seq = SequenceGenerator::next();
 60.1106 -      MemTracker::write_tracking_record(addr, flags, size, _seq, pc, _java_thread);
 60.1107 -      if (_op == ReserveAndCommit) {
 60.1108 -        MemTracker::write_tracking_record(addr, orig_flags | MemPointerRecord::virtual_memory_commit_tag(),
 60.1109 -          size, SequenceGenerator::next(), pc, _java_thread);
 60.1110 -      }
 60.1111 -      if (pre_reserved_seq) MemTracker::dec_pending_op_count();
 60.1112 -    } else {
 60.1113 -      if (_seq == 0) _seq = SequenceGenerator::next();
 60.1114 -      MemTracker::write_tracking_record(addr, flags, size, _seq, pc, _java_thread);
 60.1115 -      if (_op == ReserveAndCommit) {
 60.1116 -        MemTracker::write_tracking_record(addr, orig_flags | MemPointerRecord::virtual_memory_commit_tag(),
 60.1117 -          size, SequenceGenerator::next(), pc, _java_thread);
 60.1118 -      }
 60.1119 -    }
 60.1120 -    _seq = 0;
 60.1121 -  }
 60.1122 -}
 60.1123 -
    61.1 --- a/src/share/vm/services/memTracker.hpp	Wed Aug 27 09:36:55 2014 +0200
    61.2 +++ b/src/share/vm/services/memTracker.hpp	Wed Aug 27 08:19:12 2014 -0400
    61.3 @@ -1,5 +1,5 @@
    61.4  /*
    61.5 - * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
    61.6 + * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
    61.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    61.8   *
    61.9   * This code is free software; you can redistribute it and/or modify it
   61.10 @@ -25,574 +25,289 @@
   61.11  #ifndef SHARE_VM_SERVICES_MEM_TRACKER_HPP
   61.12  #define SHARE_VM_SERVICES_MEM_TRACKER_HPP
   61.13  
   61.14 -#include "utilities/macros.hpp"
   61.15 +#include "services/nmtCommon.hpp"
   61.16 +
   61.17 +class NativeCallStack;
   61.18 +extern NativeCallStack emptyStack;
   61.19  
   61.20  #if !INCLUDE_NMT
   61.21  
   61.22 -#include "utilities/ostream.hpp"
   61.23 +#define CURRENT_PC   emptyStack
   61.24 +#define CALLER_PC    emptyStack
   61.25  
   61.26 -class BaselineOutputer : public StackObj {
   61.27 -
   61.28 -};
   61.29 -
   61.30 -class BaselineTTYOutputer : public BaselineOutputer {
   61.31 -  public:
   61.32 -    BaselineTTYOutputer(outputStream* st) { }
   61.33 +class Tracker : public StackObj {
   61.34 + public:
   61.35 +  Tracker() { }
   61.36 +  void record(address addr, size_t size) { }
   61.37  };
   61.38  
   61.39  class MemTracker : AllStatic {
   61.40 -  public:
   61.41 -   enum ShutdownReason {
   61.42 -      NMT_shutdown_none,     // no shutdown requested
   61.43 -      NMT_shutdown_user,     // user requested shutdown
   61.44 -      NMT_normal,            // normal shutdown, process exit
   61.45 -      NMT_out_of_memory,     // shutdown due to out of memory
   61.46 -      NMT_initialization,    // shutdown due to initialization failure
   61.47 -      NMT_use_malloc_only,   // can not combine NMT with UseMallocOnly flag
   61.48 -      NMT_error_reporting,   // shutdown by vmError::report_and_die()
   61.49 -      NMT_out_of_generation, // running out of generation queue
   61.50 -      NMT_sequence_overflow  // overflow the sequence number
   61.51 -   };
   61.52 + public:
   61.53 +  static inline NMT_TrackingLevel tracking_level() { return NMT_off; }
   61.54 +  static inline void shutdown() { }
   61.55 +  static inline void init() { }
   61.56 +  static bool check_launcher_nmt_support(const char* value) { return true; }
   61.57 +  static bool verify_nmt_option() { return true; }
   61.58  
   61.59 -  class Tracker {
   61.60 -   public:
   61.61 -    void discard() { }
   61.62 +  static inline void* record_malloc(void* mem_base, size_t size, MEMFLAGS flag,
   61.63 +    const NativeCallStack& stack, NMT_TrackingLevel level) { return mem_base; }
   61.64 +  static inline size_t malloc_header_size(NMT_TrackingLevel level) { return 0; }
   61.65 +  static inline size_t malloc_header_size(void* memblock) { return 0; }
   61.66 +  static inline void* malloc_base(void* memblock) { return memblock; }
   61.67 +  static inline void* record_free(void* memblock) { return memblock; }
   61.68  
   61.69 -    void record(address addr, size_t size = 0, MEMFLAGS flags = mtNone, address pc = NULL) { }
   61.70 -    void record(address old_addr, address new_addr, size_t size,
   61.71 -      MEMFLAGS flags, address pc = NULL) { }
   61.72 -  };
   61.73 +  static inline void record_new_arena(MEMFLAGS flag) { }
   61.74 +  static inline void record_arena_free(MEMFLAGS flag) { }
   61.75 +  static inline void record_arena_size_change(int diff, MEMFLAGS flag) { }
   61.76 +  static inline void record_virtual_memory_reserve(void* addr, size_t size, const NativeCallStack& stack,
   61.77 +                       MEMFLAGS flag = mtNone) { }
   61.78 +  static inline void record_virtual_memory_reserve_and_commit(void* addr, size_t size,
   61.79 +    const NativeCallStack& stack, MEMFLAGS flag = mtNone) { }
   61.80 +  static inline void record_virtual_memory_commit(void* addr, size_t size, const NativeCallStack& stack) { }
   61.81 +  static inline Tracker get_virtual_memory_uncommit_tracker() { return Tracker(); }
   61.82 +  static inline Tracker get_virtual_memory_release_tracker() { }
   61.83 +  static inline void record_virtual_memory_type(void* addr, MEMFLAGS flag) { }
   61.84 +  static inline void record_thread_stack(void* addr, size_t size) { }
   61.85 +  static inline void release_thread_stack(void* addr, size_t size) { }
   61.86  
   61.87 -  private:
   61.88 -   static Tracker  _tkr;
   61.89 -
   61.90 -
   61.91 -  public:
   61.92 -   static inline void init_tracking_options(const char* option_line) { }
   61.93 -   static inline bool is_on()   { return false; }
   61.94 -   static const char* reason()  { return "Native memory tracking is not implemented"; }
   61.95 -   static inline bool can_walk_stack() { return false; }
   61.96 -
   61.97 -   static inline void bootstrap_single_thread() { }
   61.98 -   static inline void bootstrap_multi_thread() { }
   61.99 -   static inline void start() { }
  61.100 -
  61.101 -   static inline void record_malloc(address addr, size_t size, MEMFLAGS flags,
  61.102 -        address pc = 0, Thread* thread = NULL) { }
  61.103 -   static inline void record_free(address addr, MEMFLAGS flags, Thread* thread = NULL) { }
  61.104 -   static inline void record_arena_size(address addr, size_t size) { }
  61.105 -   static inline void record_virtual_memory_reserve(address addr, size_t size,
  61.106 -        MEMFLAGS flags, address pc = 0, Thread* thread = NULL) { }
  61.107 -   static inline void record_virtual_memory_reserve_and_commit(address addr, size_t size,
  61.108 -        MEMFLAGS flags, address pc = 0, Thread* thread = NULL) { }
  61.109 -   static inline void record_virtual_memory_commit(address addr, size_t size,
  61.110 -        address pc = 0, Thread* thread = NULL) { }
  61.111 -   static inline void record_virtual_memory_release(address addr, size_t size,
  61.112 -        Thread* thread = NULL) { }
  61.113 -   static inline void record_virtual_memory_type(address base, MEMFLAGS flags,
  61.114 -        Thread* thread = NULL) { }
  61.115 -   static inline Tracker get_realloc_tracker() { return _tkr; }
  61.116 -   static inline Tracker get_virtual_memory_uncommit_tracker() { return _tkr; }
  61.117 -   static inline Tracker get_virtual_memory_release_tracker()  { return _tkr; }
  61.118 -   static inline bool baseline() { return false; }
  61.119 -   static inline bool has_baseline() { return false; }
  61.120 -
  61.121 -   static inline void set_autoShutdown(bool value) { }
  61.122 -   static void shutdown(ShutdownReason reason) { }
  61.123 -   static inline bool shutdown_in_progress() { return false; }
  61.124 -   static bool print_memory_usage(BaselineOutputer& out, size_t unit,
  61.125 -            bool summary_only = true) { return false; }
  61.126 -   static bool compare_memory_usage(BaselineOutputer& out, size_t unit,
  61.127 -            bool summary_only = true) { return false; }
  61.128 -
  61.129 -   static bool wbtest_wait_for_data_merge() { return false; }
  61.130 -
  61.131 -   static inline void sync() { }
  61.132 -   static inline void thread_exiting(JavaThread* thread) { }
  61.133 +  static void final_report(outputStream*) { }
  61.134  };
  61.135  
  61.136 +#else
  61.137  
  61.138 -#else // !INCLUDE_NMT
  61.139 +#include "runtime/atomic.hpp"
  61.140 +#include "runtime/threadCritical.hpp"
  61.141 +#include "services/mallocTracker.hpp"
  61.142 +#include "services/virtualMemoryTracker.hpp"
  61.143  
  61.144 -#include "memory/allocation.hpp"
  61.145 -#include "runtime/globals.hpp"
  61.146 -#include "runtime/mutex.hpp"
  61.147 -#include "runtime/os.hpp"
  61.148 -#include "runtime/thread.hpp"
  61.149 -#include "services/memPtr.hpp"
  61.150 -#include "services/memRecorder.hpp"
  61.151 -#include "services/memSnapshot.hpp"
  61.152 -#include "services/memTrackWorker.hpp"
  61.153 +extern volatile bool NMT_stack_walkable;
  61.154  
  61.155 -extern bool NMT_track_callsite;
  61.156 +#define CURRENT_PC ((MemTracker::tracking_level() == NMT_detail && NMT_stack_walkable) ? \
  61.157 +                    NativeCallStack(0, true) : emptyStack)
  61.158 +#define CALLER_PC  ((MemTracker::tracking_level() == NMT_detail && NMT_stack_walkable) ?  \
  61.159 +                    NativeCallStack(1, true) : emptyStack)
  61.160  
  61.161 -#ifndef MAX_UNSIGNED_LONG
  61.162 -#define MAX_UNSIGNED_LONG    (unsigned long)(-1)
  61.163 -#endif
  61.164 +class MemBaseline;
  61.165 +class Mutex;
  61.166  
  61.167 -#ifdef ASSERT
  61.168 -  #define DEBUG_CALLER_PC  (NMT_track_callsite ? os::get_caller_pc(2) : 0)
  61.169 -#else
  61.170 -  #define DEBUG_CALLER_PC  0
  61.171 -#endif
  61.172 -
  61.173 -// The thread closure walks threads to collect per-thread
  61.174 -// memory recorders at NMT sync point
  61.175 -class SyncThreadRecorderClosure : public ThreadClosure {
  61.176 - private:
  61.177 -  int _thread_count;
  61.178 -
  61.179 +// Tracker is used for guarding 'release' semantics of virtual memory operation, to avoid
  61.180 +// the other thread obtains and records the same region that is just 'released' by current
  61.181 +// thread but before it can record the operation.
  61.182 +class Tracker : public StackObj {
  61.183   public:
  61.184 -  SyncThreadRecorderClosure() {
  61.185 -    _thread_count =0;
  61.186 -  }
  61.187 -
  61.188 -  void do_thread(Thread* thread);
  61.189 -  int  get_thread_count() const {
  61.190 -    return _thread_count;
  61.191 -  }
  61.192 -};
  61.193 -
  61.194 -class BaselineOutputer;
  61.195 -class MemSnapshot;
  61.196 -class MemTrackWorker;
  61.197 -class Thread;
  61.198 -/*
  61.199 - * MemTracker is the 'gate' class to native memory tracking runtime.
  61.200 - */
  61.201 -class MemTracker : AllStatic {
  61.202 -  friend class GenerationData;
  61.203 -  friend class MemTrackWorker;
  61.204 -  friend class MemSnapshot;
  61.205 -  friend class SyncThreadRecorderClosure;
  61.206 -
  61.207 -  // NMT state
  61.208 -  enum NMTStates {
  61.209 -    NMT_uninited,                        // not yet initialized
  61.210 -    NMT_bootstrapping_single_thread,     // bootstrapping, VM is in single thread mode
  61.211 -    NMT_bootstrapping_multi_thread,      // bootstrapping, VM is about to enter multi-thread mode
  61.212 -    NMT_started,                         // NMT fully started
  61.213 -    NMT_shutdown_pending,                // shutdown pending
  61.214 -    NMT_final_shutdown,                  // in final phase of shutdown
  61.215 -    NMT_shutdown                         // shutdown
  61.216 +  enum TrackerType {
  61.217 +     uncommit,
  61.218 +     release
  61.219    };
  61.220  
  61.221   public:
  61.222 -  class Tracker : public StackObj {
  61.223 -    friend class MemTracker;
  61.224 -   public:
  61.225 -    enum MemoryOperation {
  61.226 -      NoOp,                   // no op
  61.227 -      Malloc,                 // malloc
  61.228 -      Realloc,                // realloc
  61.229 -      Free,                   // free
  61.230 -      Reserve,                // virtual memory reserve
  61.231 -      Commit,                 // virtual memory commit
  61.232 -      ReserveAndCommit,       // virtual memory reserve and commit
  61.233 -      StackAlloc = ReserveAndCommit, // allocate thread stack
  61.234 -      Type,                   // assign virtual memory type
  61.235 -      Uncommit,               // virtual memory uncommit
  61.236 -      Release,                // virtual memory release
  61.237 -      ArenaSize,              // set arena size
  61.238 -      StackRelease            // release thread stack
  61.239 -    };
  61.240 +  Tracker(enum TrackerType type) : _type(type) { }
  61.241 +  void record(address addr, size_t size);
  61.242 + private:
  61.243 +  enum TrackerType  _type;
  61.244 +  // Virtual memory tracking data structures are protected by ThreadCritical lock.
  61.245 +  ThreadCritical    _tc;
  61.246 +};
  61.247  
  61.248 -
  61.249 -   protected:
  61.250 -    Tracker(MemoryOperation op, Thread* thr = NULL);
  61.251 -
  61.252 -   public:
  61.253 -    void discard();
  61.254 -
  61.255 -    void record(address addr, size_t size = 0, MEMFLAGS flags = mtNone, address pc = NULL);
  61.256 -    void record(address old_addr, address new_addr, size_t size,
  61.257 -      MEMFLAGS flags, address pc = NULL);
  61.258 -
  61.259 -   private:
  61.260 -    bool            _need_thread_critical_lock;
  61.261 -    JavaThread*     _java_thread;
  61.262 -    MemoryOperation _op;          // memory operation
  61.263 -    jint            _seq;         // reserved sequence number
  61.264 -  };
  61.265 -
  61.266 -
  61.267 +class MemTracker : AllStatic {
  61.268   public:
  61.269 -  // native memory tracking level
  61.270 -  enum NMTLevel {
  61.271 -    NMT_off,              // native memory tracking is off
  61.272 -    NMT_summary,          // don't track callsite
  61.273 -    NMT_detail            // track callsite also
  61.274 -  };
  61.275 -
  61.276 -   enum ShutdownReason {
  61.277 -     NMT_shutdown_none,     // no shutdown requested
  61.278 -     NMT_shutdown_user,     // user requested shutdown
  61.279 -     NMT_normal,            // normal shutdown, process exit
  61.280 -     NMT_out_of_memory,     // shutdown due to out of memory
  61.281 -     NMT_initialization,    // shutdown due to initialization failure
  61.282 -     NMT_use_malloc_only,   // can not combine NMT with UseMallocOnly flag
  61.283 -     NMT_error_reporting,   // shutdown by vmError::report_and_die()
  61.284 -     NMT_out_of_generation, // running out of generation queue
  61.285 -     NMT_sequence_overflow  // overflow the sequence number
  61.286 -   };
  61.287 -
  61.288 - public:
  61.289 -  // initialize NMT tracking level from command line options, called
  61.290 -   // from VM command line parsing code
  61.291 -  static void init_tracking_options(const char* option_line);
  61.292 -
  61.293 -  // if NMT is enabled to record memory activities
  61.294 -  static inline bool is_on() {
  61.295 -    return (_tracking_level >= NMT_summary &&
  61.296 -      _state >= NMT_bootstrapping_single_thread);
  61.297 -  }
  61.298 -
  61.299 -  static inline enum NMTLevel tracking_level() {
  61.300 +  static inline NMT_TrackingLevel tracking_level() {
  61.301 +    if (_tracking_level == NMT_unknown) {
  61.302 +      // No fencing is needed here, since JVM is in single-threaded
  61.303 +      // mode.
  61.304 +      _tracking_level = init_tracking_level();
  61.305 +      _cmdline_tracking_level = _tracking_level;
  61.306 +    }
  61.307      return _tracking_level;
  61.308    }
  61.309  
  61.310 -  // user readable reason for shutting down NMT
  61.311 -  static const char* reason() {
  61.312 -    switch(_reason) {
  61.313 -      case NMT_shutdown_none:
  61.314 -        return "Native memory tracking is not enabled";
  61.315 -      case NMT_shutdown_user:
  61.316 -        return "Native memory tracking has been shutdown by user";
  61.317 -      case NMT_normal:
  61.318 -        return "Native memory tracking has been shutdown due to process exiting";
  61.319 -      case NMT_out_of_memory:
  61.320 -        return "Native memory tracking has been shutdown due to out of native memory";
  61.321 -      case NMT_initialization:
  61.322 -        return "Native memory tracking failed to initialize";
  61.323 -      case NMT_error_reporting:
  61.324 -        return "Native memory tracking has been shutdown due to error reporting";
  61.325 -      case NMT_out_of_generation:
  61.326 -        return "Native memory tracking has been shutdown due to running out of generation buffer";
  61.327 -      case NMT_sequence_overflow:
  61.328 -        return "Native memory tracking has been shutdown due to overflow the sequence number";
  61.329 -      case NMT_use_malloc_only:
  61.330 -        return "Native memory tracking is not supported when UseMallocOnly is on";
  61.331 -      default:
  61.332 -        ShouldNotReachHere();
  61.333 -        return NULL;
  61.334 +  // A late initialization, for the stuff(s) can not be
  61.335 +  // done in init_tracking_level(), which can NOT malloc
  61.336 +  // any memory.
  61.337 +  static void init();
  61.338 +
  61.339 +  // Shutdown native memory tracking
  61.340 +  static void shutdown();
  61.341 +
  61.342 +  // Verify native memory tracking command line option.
  61.343 +  // This check allows JVM to detect if compatible launcher
  61.344 +  // is used.
  61.345 +  // If an incompatible launcher is used, NMT may not be
  61.346 +  // able to start, even it is enabled by command line option.
  61.347 +  // A warning message should be given if it is encountered.
  61.348 +  static bool check_launcher_nmt_support(const char* value);
  61.349 +
  61.350 +  // This method checks native memory tracking environment
  61.351 +  // variable value passed by launcher.
  61.352 +  // Launcher only obligates to pass native memory tracking
  61.353 +  // option value, but not obligates to validate the value,
  61.354 +  // and launcher has option to discard native memory tracking
  61.355 +  // option from the command line once it sets up the environment
  61.356 +  // variable, so NMT has to catch the bad value here.
  61.357 +  static bool verify_nmt_option();
  61.358 +
  61.359 +  // Transition the tracking level to specified level
  61.360 +  static bool transition_to(NMT_TrackingLevel level);
  61.361 +
  61.362 +  static inline void* record_malloc(void* mem_base, size_t size, MEMFLAGS flag,
  61.363 +    const NativeCallStack& stack, NMT_TrackingLevel level) {
  61.364 +    return MallocTracker::record_malloc(mem_base, size, flag, stack, level);
  61.365 +  }
  61.366 +
  61.367 +  static inline size_t malloc_header_size(NMT_TrackingLevel level) {
  61.368 +    return MallocTracker::malloc_header_size(level);
  61.369 +  }
  61.370 +
  61.371 +  static size_t malloc_header_size(void* memblock) {
  61.372 +    if (tracking_level() != NMT_off) {
  61.373 +      return MallocTracker::get_header_size(memblock);
  61.374 +    }
  61.375 +    return 0;
  61.376 +  }
  61.377 +
  61.378 +  // To malloc base address, which is the starting address
  61.379 +  // of malloc tracking header if tracking is enabled.
  61.380 +  // Otherwise, it returns the same address.
  61.381 +  static void* malloc_base(void* memblock);
  61.382 +
  61.383 +  // Record malloc free and return malloc base address
  61.384 +  static inline void* record_free(void* memblock) {
  61.385 +    return MallocTracker::record_free(memblock);
  61.386 +  }
  61.387 +
  61.388 +
  61.389 +  // Record creation of an arena
  61.390 +  static inline void record_new_arena(MEMFLAGS flag) {
  61.391 +    if (tracking_level() < NMT_summary) return;
  61.392 +    MallocTracker::record_new_arena(flag);
  61.393 +  }
  61.394 +
  61.395 +  // Record destruction of an arena
  61.396 +  static inline void record_arena_free(MEMFLAGS flag) {
  61.397 +    if (tracking_level() < NMT_summary) return;
  61.398 +    MallocTracker::record_arena_free(flag);
  61.399 +  }
  61.400 +
  61.401 +  // Record arena size change. Arena size is the size of all arena
  61.402 +  // chuncks that backing up the arena.
  61.403 +  static inline void record_arena_size_change(int diff, MEMFLAGS flag) {
  61.404 +    if (tracking_level() < NMT_summary) return;
  61.405 +    MallocTracker::record_arena_size_change(diff, flag);
  61.406 +  }
  61.407 +
  61.408 +  static inline void record_virtual_memory_reserve(void* addr, size_t size, const NativeCallStack& stack,
  61.409 +    MEMFLAGS flag = mtNone) {
  61.410 +    if (tracking_level() < NMT_summary) return;
  61.411 +    if (addr != NULL) {
  61.412 +      ThreadCritical tc;
  61.413 +      // Recheck to avoid potential racing during NMT shutdown
  61.414 +      if (tracking_level() < NMT_summary) return;
  61.415 +      VirtualMemoryTracker::add_reserved_region((address)addr, size, stack, flag);
  61.416      }
  61.417    }
  61.418  
  61.419 -  // test if we can walk native stack
  61.420 -  static bool can_walk_stack() {
  61.421 -  // native stack is not walkable during bootstrapping on sparc
  61.422 -#if defined(SPARC)
  61.423 -    return (_state == NMT_started);
  61.424 -#else
  61.425 -    return (_state >= NMT_bootstrapping_single_thread && _state  <= NMT_started);
  61.426 -#endif
  61.427 -  }
  61.428 -
  61.429 -  // if native memory tracking tracks callsite
  61.430 -  static inline bool track_callsite() { return _tracking_level == NMT_detail; }
  61.431 -
  61.432 -  // NMT automatically shuts itself down under extreme situation by default.
  61.433 -  // When the value is set to false,  NMT will try its best to stay alive,
  61.434 -  // even it has to slow down VM.
  61.435 -  static inline void set_autoShutdown(bool value) {
  61.436 -    AutoShutdownNMT = value;
  61.437 -    if (AutoShutdownNMT && _slowdown_calling_thread) {
  61.438 -      _slowdown_calling_thread = false;
  61.439 +  static inline void record_virtual_memory_reserve_and_commit(void* addr, size_t size,
  61.440 +    const NativeCallStack& stack, MEMFLAGS flag = mtNone) {
  61.441 +    if (tracking_level() < NMT_summary) return;
  61.442 +    if (addr != NULL) {
  61.443 +      ThreadCritical tc;
  61.444 +      if (tracking_level() < NMT_summary) return;
  61.445 +      VirtualMemoryTracker::add_reserved_region((address)addr, size,
  61.446 +        stack, flag, true);
  61.447      }
  61.448    }
  61.449  
  61.450 -  // shutdown native memory tracking capability. Native memory tracking
  61.451 -  // can be shutdown by VM when it encounters low memory scenarios.
  61.452 -  // Memory tracker should gracefully shutdown itself, and preserve the
  61.453 -  // latest memory statistics for post morten diagnosis.
  61.454 -  static void shutdown(ShutdownReason reason);
  61.455 -
  61.456 -  // if there is shutdown requested
  61.457 -  static inline bool shutdown_in_progress() {
  61.458 -    return (_state >= NMT_shutdown_pending);
  61.459 -  }
  61.460 -
  61.461 -  // bootstrap native memory tracking, so it can start to collect raw data
  61.462 -  // before worker thread can start
  61.463 -
  61.464 -  // the first phase of bootstrapping, when VM still in single-threaded mode
  61.465 -  static void bootstrap_single_thread();
  61.466 -  // the second phase of bootstrapping, VM is about or already in multi-threaded mode
  61.467 -  static void bootstrap_multi_thread();
  61.468 -
  61.469 -
  61.470 -  // start() has to be called when VM still in single thread mode, but after
  61.471 -  // command line option parsing is done.
  61.472 -  static void start();
  61.473 -
  61.474 -  // record a 'malloc' call
  61.475 -  static inline void record_malloc(address addr, size_t size, MEMFLAGS flags,
  61.476 -                            address pc = 0, Thread* thread = NULL) {
  61.477 -    Tracker tkr(Tracker::Malloc, thread);
  61.478 -    tkr.record(addr, size, flags, pc);
  61.479 -  }
  61.480 -  // record a 'free' call
  61.481 -  static inline void record_free(address addr, MEMFLAGS flags, Thread* thread = NULL) {
  61.482 -    Tracker tkr(Tracker::Free, thread);
  61.483 -    tkr.record(addr, 0, flags, DEBUG_CALLER_PC);
  61.484 -  }
  61.485 -
  61.486 -  static inline void record_arena_size(address addr, size_t size) {
  61.487 -    Tracker tkr(Tracker::ArenaSize);
  61.488 -    tkr.record(addr, size);
  61.489 -  }
  61.490 -
  61.491 -  // record a virtual memory 'reserve' call
  61.492 -  static inline void record_virtual_memory_reserve(address addr, size_t size,
  61.493 -                     MEMFLAGS flags, address pc = 0, Thread* thread = NULL) {
  61.494 -    assert(size > 0, "Sanity check");
  61.495 -    Tracker tkr(Tracker::Reserve, thread);
  61.496 -    tkr.record(addr, size, flags, pc);
  61.497 -  }
  61.498 -
  61.499 -  static inline void record_thread_stack(address addr, size_t size, Thread* thr,
  61.500 -                           address pc = 0) {
  61.501 -    Tracker tkr(Tracker::StackAlloc, thr);
  61.502 -    tkr.record(addr, size, mtThreadStack, pc);
  61.503 -  }
  61.504 -
  61.505 -  static inline void release_thread_stack(address addr, size_t size, Thread* thr) {
  61.506 -    Tracker tkr(Tracker::StackRelease, thr);
  61.507 -    tkr.record(addr, size, mtThreadStack, DEBUG_CALLER_PC);
  61.508 -  }
  61.509 -
  61.510 -  // record a virtual memory 'commit' call
  61.511 -  static inline void record_virtual_memory_commit(address addr, size_t size,
  61.512 -                            address pc, Thread* thread = NULL) {
  61.513 -    Tracker tkr(Tracker::Commit, thread);
  61.514 -    tkr.record(addr, size, mtNone, pc);
  61.515 -  }
  61.516 -
  61.517 -  static inline void record_virtual_memory_reserve_and_commit(address addr, size_t size,
  61.518 -    MEMFLAGS flags, address pc, Thread* thread = NULL) {
  61.519 -    Tracker tkr(Tracker::ReserveAndCommit, thread);
  61.520 -    tkr.record(addr, size, flags, pc);
  61.521 -  }
  61.522 -
  61.523 -  static inline void record_virtual_memory_release(address addr, size_t size,
  61.524 -      Thread* thread = NULL) {
  61.525 -    if (is_on()) {
  61.526 -      Tracker tkr(Tracker::Release, thread);
  61.527 -      tkr.record(addr, size);
  61.528 +  static inline void record_virtual_memory_commit(void* addr, size_t size,
  61.529 +    const NativeCallStack& stack) {
  61.530 +    if (tracking_level() < NMT_summary) return;
  61.531 +    if (addr != NULL) {
  61.532 +      ThreadCritical tc;
  61.533 +      if (tracking_level() < NMT_summary) return;
  61.534 +      VirtualMemoryTracker::add_committed_region((address)addr, size, stack);
  61.535      }
  61.536    }
  61.537  
  61.538 -  // record memory type on virtual memory base address
  61.539 -  static inline void record_virtual_memory_type(address base, MEMFLAGS flags,
  61.540 -                            Thread* thread = NULL) {
  61.541 -    Tracker tkr(Tracker::Type);
  61.542 -    tkr.record(base, 0, flags);
  61.543 -  }
  61.544 -
  61.545 -  // Get memory trackers for memory operations that can result race conditions.
  61.546 -  // The memory tracker has to be obtained before realloc, virtual memory uncommit
  61.547 -  // and virtual memory release, and call tracker.record() method if operation
  61.548 -  // succeeded, or tracker.discard() to abort the tracking.
  61.549 -  static inline Tracker get_realloc_tracker() {
  61.550 -    return Tracker(Tracker::Realloc);
  61.551 -  }
  61.552 -
  61.553    static inline Tracker get_virtual_memory_uncommit_tracker() {
  61.554 -    return Tracker(Tracker::Uncommit);
  61.555 +    assert(tracking_level() >= NMT_summary, "Check by caller");
  61.556 +    return Tracker(Tracker::uncommit);
  61.557    }
  61.558  
  61.559    static inline Tracker get_virtual_memory_release_tracker() {
  61.560 -    return Tracker(Tracker::Release);
  61.561 +    assert(tracking_level() >= NMT_summary, "Check by caller");
  61.562 +    return Tracker(Tracker::release);
  61.563    }
  61.564  
  61.565 -
  61.566 -  // create memory baseline of current memory snapshot
  61.567 -  static bool baseline();
  61.568 -  // is there a memory baseline
  61.569 -  static bool has_baseline() {
  61.570 -    return _baseline.baselined();
  61.571 -  }
  61.572 -
  61.573 -  // print memory usage from current snapshot
  61.574 -  static bool print_memory_usage(BaselineOutputer& out, size_t unit,
  61.575 -           bool summary_only = true);
  61.576 -  // compare memory usage between current snapshot and baseline
  61.577 -  static bool compare_memory_usage(BaselineOutputer& out, size_t unit,
  61.578 -           bool summary_only = true);
  61.579 -
  61.580 -  // the version for whitebox testing support, it ensures that all memory
  61.581 -  // activities before this method call, are reflected in the snapshot
  61.582 -  // database.
  61.583 -  static bool wbtest_wait_for_data_merge();
  61.584 -
  61.585 -  // sync is called within global safepoint to synchronize nmt data
  61.586 -  static void sync();
  61.587 -
  61.588 -  // called when a thread is about to exit
  61.589 -  static void thread_exiting(JavaThread* thread);
  61.590 -
  61.591 -  // retrieve global snapshot
  61.592 -  static MemSnapshot* get_snapshot() {
  61.593 -    if (shutdown_in_progress()) {
  61.594 -      return NULL;
  61.595 -    }
  61.596 -    return _snapshot;
  61.597 -  }
  61.598 -
  61.599 -  // print tracker stats
  61.600 -  NOT_PRODUCT(static void print_tracker_stats(outputStream* st);)
  61.601 -  NOT_PRODUCT(static void walk_stack(int toSkip, char* buf, int len);)
  61.602 -
  61.603 - private:
  61.604 -  // start native memory tracking worker thread
  61.605 -  static bool start_worker(MemSnapshot* snapshot);
  61.606 -
  61.607 -  // called by worker thread to complete shutdown process
  61.608 -  static void final_shutdown();
  61.609 -
  61.610 - protected:
  61.611 -  // retrieve per-thread recorder of the specified thread.
  61.612 -  // if the recorder is full, it will be enqueued to overflow
  61.613 -  // queue, a new recorder is acquired from recorder pool or a
  61.614 -  // new instance is created.
  61.615 -  // when thread == NULL, it means global recorder
  61.616 -  static MemRecorder* get_thread_recorder(JavaThread* thread);
  61.617 -
  61.618 -  // per-thread recorder pool
  61.619 -  static void release_thread_recorder(MemRecorder* rec);
  61.620 -  static void delete_all_pooled_recorders();
  61.621 -
  61.622 -  // pending recorder queue. Recorders are queued to pending queue
  61.623 -  // when they are overflowed or collected at nmt sync point.
  61.624 -  static void enqueue_pending_recorder(MemRecorder* rec);
  61.625 -  static MemRecorder* get_pending_recorders();
  61.626 -  static void delete_all_pending_recorders();
  61.627 -
  61.628 -  // write a memory tracking record in recorder
  61.629 -  static void write_tracking_record(address addr, MEMFLAGS type,
  61.630 -    size_t size, jint seq, address pc, JavaThread* thread);
  61.631 -
  61.632 -  static bool is_single_threaded_bootstrap() {
  61.633 -    return _state == NMT_bootstrapping_single_thread;
  61.634 -  }
  61.635 -
  61.636 -  static void check_NMT_load(Thread* thr) {
  61.637 -    assert(thr != NULL, "Sanity check");
  61.638 -    if (_slowdown_calling_thread && thr != _worker_thread) {
  61.639 -#ifdef _WINDOWS
  61.640 -      // On Windows, os::NakedYield() does not work as well
  61.641 -      // as os::yield_all()
  61.642 -      os::yield_all();
  61.643 -#else
  61.644 -     // On Solaris, os::yield_all() depends on os::sleep()
  61.645 -     // which requires JavaTherad in _thread_in_vm state.
  61.646 -     // Transits thread to _thread_in_vm state can be dangerous
  61.647 -     // if caller holds lock, as it may deadlock with Threads_lock.
  61.648 -     // So use NaKedYield instead.
  61.649 -     //
  61.650 -     // Linux and BSD, NakedYield() and yield_all() implementations
  61.651 -     // are the same.
  61.652 -      os::NakedYield();
  61.653 -#endif
  61.654 +  static inline void record_virtual_memory_type(void* addr, MEMFLAGS flag) {
  61.655 +    if (tracking_level() < NMT_summary) return;
  61.656 +    if (addr != NULL) {
  61.657 +      ThreadCritical tc;
  61.658 +      if (tracking_level() < NMT_summary) return;
  61.659 +      VirtualMemoryTracker::set_reserved_region_type((address)addr, flag);
  61.660      }
  61.661    }
  61.662  
  61.663 -  static void inc_pending_op_count() {
  61.664 -    Atomic::inc(&_pending_op_count);
  61.665 +  static inline void record_thread_stack(void* addr, size_t size) {
  61.666 +    if (tracking_level() < NMT_summary) return;
  61.667 +    if (addr != NULL) {
  61.668 +      // uses thread stack malloc slot for book keeping number of threads
  61.669 +      MallocMemorySummary::record_malloc(0, mtThreadStack);
  61.670 +      record_virtual_memory_reserve_and_commit(addr, size, CALLER_PC, mtThreadStack);
  61.671 +    }
  61.672    }
  61.673  
  61.674 -  static void dec_pending_op_count() {
  61.675 -    Atomic::dec(&_pending_op_count);
  61.676 -    assert(_pending_op_count >= 0, "Sanity check");
  61.677 +  static inline void release_thread_stack(void* addr, size_t size) {
  61.678 +    if (tracking_level() < NMT_summary) return;
  61.679 +    if (addr != NULL) {
  61.680 +      // uses thread stack malloc slot for book keeping number of threads
  61.681 +      MallocMemorySummary::record_free(0, mtThreadStack);
  61.682 +      ThreadCritical tc;
  61.683 +      if (tracking_level() < NMT_summary) return;
  61.684 +      VirtualMemoryTracker::remove_released_region((address)addr, size);
  61.685 +    }
  61.686    }
  61.687  
  61.688 +  // Query lock is used to synchronize the access to tracking data.
  61.689 +  // So far, it is only used by JCmd query, but it may be used by
  61.690 +  // other tools.
  61.691 +  static inline Mutex* query_lock() { return _query_lock; }
  61.692 +
  61.693 +  // Make a final report and shutdown.
  61.694 +  // This function generates summary report without creating snapshots,
  61.695 +  // to avoid additional memory allocation. It uses native memory summary
  61.696 +  // counters, and makes adjustment to them, once the adjustment is made,
  61.697 +  // the counters are no longer accurate. As the result, this function
  61.698 +  // should only be used for final reporting before shutting down.
  61.699 +  static void final_report(outputStream*);
  61.700 +
  61.701 +  // Stored baseline
  61.702 +  static inline MemBaseline& get_baseline() {
  61.703 +    return _baseline;
  61.704 +  }
  61.705 +
  61.706 +  static NMT_TrackingLevel cmdline_tracking_level() {
  61.707 +    return _cmdline_tracking_level;
  61.708 +  }
  61.709 +
  61.710 +  static void tuning_statistics(outputStream* out);
  61.711  
  61.712   private:
  61.713 -  // retrieve a pooled memory record or create new one if there is not
  61.714 -  // one available
  61.715 -  static MemRecorder* get_new_or_pooled_instance();
  61.716 -  static void create_memory_record(address addr, MEMFLAGS type,
  61.717 -                   size_t size, address pc, Thread* thread);
  61.718 -  static void create_record_in_recorder(address addr, MEMFLAGS type,
  61.719 -                   size_t size, address pc, JavaThread* thread);
  61.720 -
  61.721 -  static void set_current_processing_generation(unsigned long generation) {
  61.722 -    _worker_thread_idle = false;
  61.723 -    _processing_generation = generation;
  61.724 -  }
  61.725 -
  61.726 -  static void report_worker_idle() {
  61.727 -    _worker_thread_idle = true;
  61.728 -  }
  61.729 +  static NMT_TrackingLevel init_tracking_level();
  61.730  
  61.731   private:
  61.732 -  // global memory snapshot
  61.733 -  static MemSnapshot*     _snapshot;
  61.734 -
  61.735 -  // a memory baseline of snapshot
  61.736 +  // Tracking level
  61.737 +  static volatile NMT_TrackingLevel   _tracking_level;
  61.738 +  // If NMT option value passed by launcher through environment
  61.739 +  // variable is valid
  61.740 +  static bool                         _is_nmt_env_valid;
  61.741 +  // command line tracking level
  61.742 +  static NMT_TrackingLevel            _cmdline_tracking_level;
  61.743 +  // Stored baseline
  61.744    static MemBaseline      _baseline;
  61.745 -
  61.746 -  // query lock
  61.747 +  // Query lock
  61.748    static Mutex*           _query_lock;
  61.749 -
  61.750 -  // a thread can start to allocate memory before it is attached
  61.751 -  // to VM 'Thread', those memory activities are recorded here.
  61.752 -  // ThreadCritical is required to guard this global recorder.
  61.753 -  static MemRecorder* volatile _global_recorder;
  61.754 -
  61.755 -  // main thread id
  61.756 -  debug_only(static intx   _main_thread_tid;)
  61.757 -
  61.758 -  // pending recorders to be merged
  61.759 -  static MemRecorder* volatile     _merge_pending_queue;
  61.760 -
  61.761 -  NOT_PRODUCT(static volatile jint   _pending_recorder_count;)
  61.762 -
  61.763 -  // pooled memory recorders
  61.764 -  static MemRecorder* volatile     _pooled_recorders;
  61.765 -
  61.766 -  // memory recorder pool management, uses following
  61.767 -  // counter to determine if a released memory recorder
  61.768 -  // should be pooled
  61.769 -
  61.770 -  // latest thread count
  61.771 -  static int               _thread_count;
  61.772 -  // pooled recorder count
  61.773 -  static volatile jint     _pooled_recorder_count;
  61.774 -
  61.775 -
  61.776 -  // worker thread to merge pending recorders into snapshot
  61.777 -  static MemTrackWorker*  _worker_thread;
  61.778 -
  61.779 -  // how many safepoints we skipped without entering sync point
  61.780 -  static int              _sync_point_skip_count;
  61.781 -
  61.782 -  // if the tracker is properly intialized
  61.783 -  static bool             _is_tracker_ready;
  61.784 -  // tracking level (off, summary and detail)
  61.785 -  static enum NMTLevel    _tracking_level;
  61.786 -
  61.787 -  // current nmt state
  61.788 -  static volatile enum NMTStates   _state;
  61.789 -  // the reason for shutting down nmt
  61.790 -  static enum ShutdownReason       _reason;
  61.791 -  // the generation that NMT is processing
  61.792 -  static volatile unsigned long    _processing_generation;
  61.793 -  // although NMT is still procesing current generation, but
  61.794 -  // there is not more recorder to process, set idle state
  61.795 -  static volatile bool             _worker_thread_idle;
  61.796 -
  61.797 -  // if NMT should slow down calling thread to allow
  61.798 -  // worker thread to catch up
  61.799 -  static volatile bool             _slowdown_calling_thread;
  61.800 -
  61.801 -  // pending memory op count.
  61.802 -  // Certain memory ops need to pre-reserve sequence number
  61.803 -  // before memory operation can happen to avoid race condition.
  61.804 -  // See MemTracker::Tracker for detail
  61.805 -  static volatile jint             _pending_op_count;
  61.806  };
  61.807  
  61.808 -#endif // !INCLUDE_NMT
  61.809 +#endif // INCLUDE_NMT
  61.810  
  61.811  #endif // SHARE_VM_SERVICES_MEM_TRACKER_HPP
  61.812 +
    62.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    62.2 +++ b/src/share/vm/services/nmtCommon.cpp	Wed Aug 27 08:19:12 2014 -0400
    62.3 @@ -0,0 +1,73 @@
    62.4 +/*
    62.5 + * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
    62.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    62.7 + *
    62.8 + * This code is free software; you can redistribute it and/or modify it
    62.9 + * under the terms of the GNU General Public License version 2 only, as
   62.10 + * published by the Free Software Foundation.
   62.11 + *
   62.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
   62.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   62.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   62.15 + * version 2 for more details (a copy is included in the LICENSE file that
   62.16 + * accompanied this code).
   62.17 + *
   62.18 + * You should have received a copy of the GNU General Public License version
   62.19 + * 2 along with this work; if not, write to the Free Software Foundation,
   62.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   62.21 + *
   62.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   62.23 + * or visit www.oracle.com if you need additional information or have any
   62.24 + * questions.
   62.25 + *
   62.26 + */
   62.27 +#include "precompiled.hpp"
   62.28 +#include "services/nmtCommon.hpp"
   62.29 +
   62.30 +const char* NMTUtil::_memory_type_names[] = {
   62.31 +  "Java Heap",
   62.32 +  "Class",
   62.33 +  "Thread",
   62.34 +  "Thread Stack",
   62.35 +  "Code",
   62.36 +  "GC",
   62.37 +  "Compiler",
   62.38 +  "Internal",
   62.39 +  "Other",
   62.40 +  "Symbol",
   62.41 +  "Native Memory Tracking",
   62.42 +  "Shared class space",
   62.43 +  "Arena Chunk",
   62.44 +  "Test",
   62.45 +  "Tracing",
   62.46 +  "Unknown"
   62.47 +};
   62.48 +
   62.49 +
   62.50 +const char* NMTUtil::scale_name(size_t scale) {
   62.51 +  switch(scale) {
   62.52 +    case K: return "KB";
   62.53 +    case M: return "MB";
   62.54 +    case G: return "GB";
   62.55 +  }
   62.56 +  ShouldNotReachHere();
   62.57 +  return NULL;
   62.58 +}
   62.59 +
   62.60 +size_t NMTUtil::scale_from_name(const char* scale) {
   62.61 +  assert(scale != NULL, "Null pointer check");
   62.62 +  if (strncmp(scale, "KB", 2) == 0 ||
   62.63 +      strncmp(scale, "kb", 2) == 0) {
   62.64 +    return K;
   62.65 +  } else if (strncmp(scale, "MB", 2) == 0 ||
   62.66 +             strncmp(scale, "mb", 2) == 0) {
   62.67 +    return M;
   62.68 +  } else if (strncmp(scale, "GB", 2) == 0 ||
   62.69 +             strncmp(scale, "gb", 2) == 0) {
   62.70 +    return G;
   62.71 +  } else {
   62.72 +    return 0; // Invalid value
   62.73 +  }
   62.74 +  return K;
   62.75 +}
   62.76 +
    63.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    63.2 +++ b/src/share/vm/services/nmtCommon.hpp	Wed Aug 27 08:19:12 2014 -0400
    63.3 @@ -0,0 +1,87 @@
    63.4 +/*
    63.5 + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
    63.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    63.7 + *
    63.8 + * This code is free software; you can redistribute it and/or modify it
    63.9 + * under the terms of the GNU General Public License version 2 only, as
   63.10 + * published by the Free Software Foundation.
   63.11 + *
   63.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
   63.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   63.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   63.15 + * version 2 for more details (a copy is included in the LICENSE file that
   63.16 + * accompanied this code).
   63.17 + *
   63.18 + * You should have received a copy of the GNU General Public License version
   63.19 + * 2 along with this work; if not, write to the Free Software Foundation,
   63.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   63.21 + *
   63.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   63.23 + * or visit www.oracle.com if you need additional information or have any
   63.24 + * questions.
   63.25 + *
   63.26 + */
   63.27 +
   63.28 +#ifndef SHARE_VM_SERVICES_NMT_COMMON_HPP
   63.29 +#define SHARE_VM_SERVICES_NMT_COMMON_HPP
   63.30 +
   63.31 +#include "memory/allocation.hpp"
   63.32 +#include "utilities/globalDefinitions.hpp"
   63.33 +
   63.34 +#define CALC_OBJ_SIZE_IN_TYPE(obj, type) (align_size_up_(sizeof(obj), sizeof(type))/sizeof(type))
   63.35 +
   63.36 +// Data type for memory counters
   63.37 +#ifdef _LP64
   63.38 +  typedef jlong    MemoryCounterType;
   63.39 +#else
   63.40 +  typedef jint     MemoryCounterType;
   63.41 +#endif
   63.42 +
   63.43 +// Native memory tracking level
   63.44 +enum NMT_TrackingLevel {
   63.45 +  NMT_unknown = 0xFF,
   63.46 +  NMT_off     = 0x00,
   63.47 +  NMT_minimal = 0x01,
   63.48 +  NMT_summary = 0x02,
   63.49 +  NMT_detail  = 0x03
   63.50 +};
   63.51 +
   63.52 +// Number of stack frames to capture. This is a
   63.53 +// build time decision.
   63.54 +const int NMT_TrackingStackDepth = 4;
   63.55 +
   63.56 +class NativeCallStack;
   63.57 +extern NativeCallStack emptyStack;
   63.58 +
   63.59 +// A few common utilities for native memory tracking
   63.60 +class NMTUtil : AllStatic {
   63.61 + public:
   63.62 +  // Map memory type to index
   63.63 +  static inline int flag_to_index(MEMFLAGS flag) {
   63.64 +    return (flag & 0xff);
   63.65 +  }
   63.66 +
   63.67 +  // Map memory type to human readable name
   63.68 +  static const char* flag_to_name(MEMFLAGS flag) {
   63.69 +    return _memory_type_names[flag_to_index(flag)];
   63.70 +  }
   63.71 +
   63.72 +  // Map an index to memory type
   63.73 +  static MEMFLAGS index_to_flag(int index) {
   63.74 +    return (MEMFLAGS)index;
   63.75 +  }
   63.76 +
   63.77 +  // Memory size scale
   63.78 +  static const char* scale_name(size_t scale);
   63.79 +  static size_t scale_from_name(const char* scale);
   63.80 +
   63.81 +  // Translate memory size in specified scale
   63.82 +  static size_t amount_in_scale(size_t amount, size_t scale) {
   63.83 +    return (amount + scale / 2) / scale;
   63.84 +  }
   63.85 + private:
   63.86 +  static const char* _memory_type_names[mt_number_of_types];
   63.87 +};
   63.88 +
   63.89 +
   63.90 +#endif
    64.1 --- a/src/share/vm/services/nmtDCmd.cpp	Wed Aug 27 09:36:55 2014 +0200
    64.2 +++ b/src/share/vm/services/nmtDCmd.cpp	Wed Aug 27 08:19:12 2014 -0400
    64.3 @@ -22,6 +22,8 @@
    64.4   *
    64.5   */
    64.6  #include "precompiled.hpp"
    64.7 +
    64.8 +#include "runtime/mutexLocker.hpp"
    64.9  #include "services/nmtDCmd.hpp"
   64.10  #include "services/memReporter.hpp"
   64.11  #include "services/memTracker.hpp"
   64.12 @@ -49,13 +51,8 @@
   64.13    _shutdown("shutdown", "request runtime to shutdown itself and free the " \
   64.14              "memory used by runtime.",
   64.15              "BOOLEAN", false, "false"),
   64.16 -  _auto_shutdown("autoShutdown", "automatically shutdown itself under "    \
   64.17 -            "stress situation",
   64.18 -            "BOOLEAN", true, "true"),
   64.19 -#ifndef PRODUCT
   64.20 -  _debug("debug", "print tracker statistics. Debug only, not thread safe", \
   64.21 +  _statistics("statistics", "print tracker statistics for tuning purpose.", \
   64.22              "BOOLEAN", false, "false"),
   64.23 -#endif
   64.24    _scale("scale", "Memory usage in which scale, KB, MB or GB",
   64.25         "STRING", false, "KB") {
   64.26    _dcmdparser.add_dcmd_option(&_summary);
   64.27 @@ -64,25 +61,30 @@
   64.28    _dcmdparser.add_dcmd_option(&_summary_diff);
   64.29    _dcmdparser.add_dcmd_option(&_detail_diff);
   64.30    _dcmdparser.add_dcmd_option(&_shutdown);
   64.31 -  _dcmdparser.add_dcmd_option(&_auto_shutdown);
   64.32 -#ifndef PRODUCT
   64.33 -  _dcmdparser.add_dcmd_option(&_debug);
   64.34 -#endif
   64.35 +  _dcmdparser.add_dcmd_option(&_statistics);
   64.36    _dcmdparser.add_dcmd_option(&_scale);
   64.37  }
   64.38  
   64.39 +
   64.40 +size_t NMTDCmd::get_scale(const char* scale) const {
   64.41 +  if (scale == NULL) return 0;
   64.42 +  return NMTUtil::scale_from_name(scale);
   64.43 +}
   64.44 +
   64.45  void NMTDCmd::execute(DCmdSource source, TRAPS) {
   64.46 +  // Check NMT state
   64.47 +  //  native memory tracking has to be on
   64.48 +  if (MemTracker::tracking_level() == NMT_off) {
   64.49 +    output()->print_cr("Native memory tracking is not enabled");
   64.50 +    return;
   64.51 +  } else if (MemTracker::tracking_level() == NMT_minimal) {
   64.52 +     output()->print_cr("Native memory tracking has been shutdown");
   64.53 +     return;
   64.54 +  }
   64.55 +
   64.56    const char* scale_value = _scale.value();
   64.57 -  size_t scale_unit;
   64.58 -  if (strcmp(scale_value, "KB") == 0 || strcmp(scale_value, "kb") == 0) {
   64.59 -    scale_unit = K;
   64.60 -  } else if (strcmp(scale_value, "MB") == 0 ||
   64.61 -             strcmp(scale_value, "mb") == 0) {
   64.62 -    scale_unit = M;
   64.63 -  } else if (strcmp(scale_value, "GB") == 0 ||
   64.64 -             strcmp(scale_value, "gb") == 0) {
   64.65 -    scale_unit = G;
   64.66 -  } else {
   64.67 +  size_t scale_unit = get_scale(scale_value);
   64.68 +  if (scale_unit == 0) {
   64.69      output()->print_cr("Incorrect scale value: %s", scale_value);
   64.70      return;
   64.71    }
   64.72 @@ -94,19 +96,11 @@
   64.73    if (_summary_diff.is_set() && _summary_diff.value()) { ++nopt; }
   64.74    if (_detail_diff.is_set() && _detail_diff.value()) { ++nopt; }
   64.75    if (_shutdown.is_set() && _shutdown.value()) { ++nopt; }
   64.76 -  if (_auto_shutdown.is_set()) { ++nopt; }
   64.77 -
   64.78 -#ifndef PRODUCT
   64.79 -  if (_debug.is_set() && _debug.value()) { ++nopt; }
   64.80 -#endif
   64.81 +  if (_statistics.is_set() && _statistics.value()) { ++nopt; }
   64.82  
   64.83    if (nopt > 1) {
   64.84        output()->print_cr("At most one of the following option can be specified: " \
   64.85 -        "summary, detail, baseline, summary.diff, detail.diff, shutdown"
   64.86 -#ifndef PRODUCT
   64.87 -        ", debug"
   64.88 -#endif
   64.89 -      );
   64.90 +        "summary, detail, baseline, summary.diff, detail.diff, shutdown");
   64.91        return;
   64.92    } else if (nopt == 0) {
   64.93      if (_summary.is_set()) {
   64.94 @@ -117,53 +111,47 @@
   64.95      }
   64.96    }
   64.97  
   64.98 -#ifndef PRODUCT
   64.99 -  if (_debug.value()) {
  64.100 -    output()->print_cr("debug command is NOT thread-safe, may cause crash");
  64.101 -    MemTracker::print_tracker_stats(output());
  64.102 +  // Serialize NMT query
  64.103 +  MutexLocker locker(MemTracker::query_lock());
  64.104 +
  64.105 +  if (_summary.value()) {
  64.106 +    report(true, scale_unit);
  64.107 +  } else if (_detail.value()) {
  64.108 +    if (!check_detail_tracking_level(output())) {
  64.109      return;
  64.110    }
  64.111 -#endif
  64.112 -
  64.113 -  // native memory tracking has to be on
  64.114 -  if (!MemTracker::is_on() || MemTracker::shutdown_in_progress()) {
  64.115 -    // if it is not on, what's the reason?
  64.116 -    output()->print_cr("%s", MemTracker::reason());
  64.117 +    report(false, scale_unit);
  64.118 +  } else if (_baseline.value()) {
  64.119 +    MemBaseline& baseline = MemTracker::get_baseline();
  64.120 +    if (!baseline.baseline(MemTracker::tracking_level() != NMT_detail)) {
  64.121 +      output()->print_cr("Baseline failed");
  64.122 +    } else {
  64.123 +      output()->print_cr("Baseline succeeded");
  64.124 +    }
  64.125 +  } else if (_summary_diff.value()) {
  64.126 +    MemBaseline& baseline = MemTracker::get_baseline();
  64.127 +    if (baseline.baseline_type() >= MemBaseline::Summary_baselined) {
  64.128 +      report_diff(true, scale_unit);
  64.129 +    } else {
  64.130 +      output()->print_cr("No baseline for comparison");
  64.131 +    }
  64.132 +  } else if (_detail_diff.value()) {
  64.133 +    if (!check_detail_tracking_level(output())) {
  64.134      return;
  64.135    }
  64.136 -
  64.137 -  if (_summary.value()) {
  64.138 -    BaselineTTYOutputer outputer(output());
  64.139 -    MemTracker::print_memory_usage(outputer, scale_unit, true);
  64.140 -  } else if (_detail.value()) {
  64.141 -    BaselineTTYOutputer outputer(output());
  64.142 -    MemTracker::print_memory_usage(outputer, scale_unit, false);
  64.143 -  } else if (_baseline.value()) {
  64.144 -    if (MemTracker::baseline()) {
  64.145 -      output()->print_cr("Successfully baselined.");
  64.146 +    MemBaseline& baseline = MemTracker::get_baseline();
  64.147 +    if (baseline.baseline_type() == MemBaseline::Detail_baselined) {
  64.148 +      report_diff(false, scale_unit);
  64.149      } else {
  64.150 -      output()->print_cr("Baseline failed.");
  64.151 -    }
  64.152 -  } else if (_summary_diff.value()) {
  64.153 -    if (MemTracker::has_baseline()) {
  64.154 -      BaselineTTYOutputer outputer(output());
  64.155 -      MemTracker::compare_memory_usage(outputer, scale_unit, true);
  64.156 -    } else {
  64.157 -      output()->print_cr("No baseline to compare, run 'baseline' command first");
  64.158 -    }
  64.159 -  } else if (_detail_diff.value()) {
  64.160 -    if (MemTracker::has_baseline()) {
  64.161 -      BaselineTTYOutputer outputer(output());
  64.162 -      MemTracker::compare_memory_usage(outputer, scale_unit, false);
  64.163 -    } else {
  64.164 -      output()->print_cr("No baseline to compare to, run 'baseline' command first");
  64.165 +      output()->print_cr("No detail baseline for comparison");
  64.166      }
  64.167    } else if (_shutdown.value()) {
  64.168 -    MemTracker::shutdown(MemTracker::NMT_shutdown_user);
  64.169 -    output()->print_cr("Shutdown is in progress, it will take a few moments to " \
  64.170 -      "completely shutdown");
  64.171 -  } else if (_auto_shutdown.is_set()) {
  64.172 -    MemTracker::set_autoShutdown(_auto_shutdown.value());
  64.173 +    MemTracker::shutdown();
  64.174 +    output()->print_cr("Native memory tracking has been turned off");
  64.175 +  } else if (_statistics.value()) {
  64.176 +    if (check_detail_tracking_level(output())) {
  64.177 +      MemTracker::tuning_statistics(output());
  64.178 +    }
  64.179    } else {
  64.180      ShouldNotReachHere();
  64.181      output()->print_cr("Unknown command");
  64.182 @@ -181,3 +169,46 @@
  64.183    }
  64.184  }
  64.185  
  64.186 +void NMTDCmd::report(bool summaryOnly, size_t scale_unit) {
  64.187 +  MemBaseline baseline;
  64.188 +  if (baseline.baseline(summaryOnly)) {
  64.189 +    if (summaryOnly) {
  64.190 +      MemSummaryReporter rpt(baseline, output(), scale_unit);
  64.191 +      rpt.report();
  64.192 +    } else {
  64.193 +      MemDetailReporter rpt(baseline, output(), scale_unit);
  64.194 +      rpt.report();
  64.195 +    }
  64.196 +  }
  64.197 +}
  64.198 +
  64.199 +void NMTDCmd::report_diff(bool summaryOnly, size_t scale_unit) {
  64.200 +  MemBaseline& early_baseline = MemTracker::get_baseline();
  64.201 +  assert(early_baseline.baseline_type() != MemBaseline::Not_baselined,
  64.202 +    "Not yet baselined");
  64.203 +  assert(summaryOnly || early_baseline.baseline_type() == MemBaseline::Detail_baselined,
  64.204 +    "Not a detail baseline");
  64.205 +
  64.206 +  MemBaseline baseline;
  64.207 +  if (baseline.baseline(summaryOnly)) {
  64.208 +    if (summaryOnly) {
  64.209 +      MemSummaryDiffReporter rpt(early_baseline, baseline, output(), scale_unit);
  64.210 +      rpt.report_diff();
  64.211 +    } else {
  64.212 +      MemDetailDiffReporter rpt(early_baseline, baseline, output(), scale_unit);
  64.213 +      rpt.report_diff();
  64.214 +    }
  64.215 +  }
  64.216 +}
  64.217 +
  64.218 +bool NMTDCmd::check_detail_tracking_level(outputStream* out) {
  64.219 +  if (MemTracker::tracking_level() == NMT_detail) {
  64.220 +    return true;
  64.221 +  } else if (MemTracker::cmdline_tracking_level() == NMT_detail) {
  64.222 +    out->print_cr("Tracking level has been downgraded due to lack of resources");
  64.223 +    return false;
  64.224 +  } else {
  64.225 +    out->print_cr("Detail tracking is not enabled");
  64.226 +    return false;
  64.227 +  }
  64.228 +}
    65.1 --- a/src/share/vm/services/nmtDCmd.hpp	Wed Aug 27 09:36:55 2014 +0200
    65.2 +++ b/src/share/vm/services/nmtDCmd.hpp	Wed Aug 27 08:19:12 2014 -0400
    65.3 @@ -1,5 +1,5 @@
    65.4  /*
    65.5 - * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
    65.6 + * Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
    65.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    65.8   *
    65.9   * This code is free software; you can redistribute it and/or modify it
   65.10 @@ -25,8 +25,12 @@
   65.11  #ifndef SHARE_VM_SERVICES_NMT_DCMD_HPP
   65.12  #define SHARE_VM_SERVICES_NMT_DCMD_HPP
   65.13  
   65.14 +#if INCLUDE_NMT
   65.15 +
   65.16  #include "services/diagnosticArgument.hpp"
   65.17  #include "services/diagnosticFramework.hpp"
   65.18 +#include "services/memBaseline.hpp"
   65.19 +#include "services/mallocTracker.hpp"
   65.20  
   65.21  /**
   65.22   * Native memory tracking DCmd implementation
   65.23 @@ -39,10 +43,7 @@
   65.24    DCmdArgument<bool>  _summary_diff;
   65.25    DCmdArgument<bool>  _detail_diff;
   65.26    DCmdArgument<bool>  _shutdown;
   65.27 -  DCmdArgument<bool>  _auto_shutdown;
   65.28 -#ifndef PRODUCT
   65.29 -  DCmdArgument<bool>  _debug;
   65.30 -#endif
   65.31 +  DCmdArgument<bool>  _statistics;
   65.32    DCmdArgument<char*> _scale;
   65.33  
   65.34   public:
   65.35 @@ -61,6 +62,17 @@
   65.36    }
   65.37    static int num_arguments();
   65.38    virtual void execute(DCmdSource source, TRAPS);
   65.39 +
   65.40 + private:
   65.41 +  void report(bool summaryOnly, size_t scale);
   65.42 +  void report_diff(bool summaryOnly, size_t scale);
   65.43 +
   65.44 +  size_t get_scale(const char* scale) const;
   65.45 +
   65.46 +  // check if NMT running at detail tracking level
   65.47 +  bool check_detail_tracking_level(outputStream* out);
   65.48  };
   65.49  
   65.50 +#endif // INCLUDE_NMT
   65.51 +
   65.52  #endif // SHARE_VM_SERVICES_NMT_DCMD_HPP
    66.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    66.2 +++ b/src/share/vm/services/virtualMemoryTracker.cpp	Wed Aug 27 08:19:12 2014 -0400
    66.3 @@ -0,0 +1,448 @@
    66.4 +/*
    66.5 + * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
    66.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    66.7 + *
    66.8 + * This code is free software; you can redistribute it and/or modify it
    66.9 + * under the terms of the GNU General Public License version 2 only, as
   66.10 + * published by the Free Software Foundation.
   66.11 + *
   66.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
   66.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   66.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   66.15 + * version 2 for more details (a copy is included in the LICENSE file that
   66.16 + * accompanied this code).
   66.17 + *
   66.18 + * You should have received a copy of the GNU General Public License version
   66.19 + * 2 along with this work; if not, write to the Free Software Foundation,
   66.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   66.21 + *
   66.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   66.23 + * or visit www.oracle.com if you need additional information or have any
   66.24 + * questions.
   66.25 + *
   66.26 + */
   66.27 +#include "precompiled.hpp"
   66.28 +
   66.29 +#include "runtime/threadCritical.hpp"
   66.30 +#include "services/virtualMemoryTracker.hpp"
   66.31 +
   66.32 +size_t VirtualMemorySummary::_snapshot[CALC_OBJ_SIZE_IN_TYPE(VirtualMemorySnapshot, size_t)];
   66.33 +
   66.34 +void VirtualMemorySummary::initialize() {
   66.35 +  assert(sizeof(_snapshot) >= sizeof(VirtualMemorySnapshot), "Sanity Check");
   66.36 +  // Use placement operator new to initialize static data area.
   66.37 +  ::new ((void*)_snapshot) VirtualMemorySnapshot();
   66.38 +}
   66.39 +
   66.40 +SortedLinkedList<ReservedMemoryRegion, compare_reserved_region_base> VirtualMemoryTracker::_reserved_regions;
   66.41 +
   66.42 +int compare_committed_region(const CommittedMemoryRegion& r1, const CommittedMemoryRegion& r2) {
   66.43 +  return r1.compare(r2);
   66.44 +}
   66.45 +
   66.46 +int compare_reserved_region_base(const ReservedMemoryRegion& r1, const ReservedMemoryRegion& r2) {
   66.47 +  return r1.compare(r2);
   66.48 +}
   66.49 +
   66.50 +bool ReservedMemoryRegion::add_committed_region(address addr, size_t size, const NativeCallStack& stack) {
   66.51 +  assert(addr != NULL, "Invalid address");
   66.52 +  assert(size > 0, "Invalid size");
   66.53 +  assert(contain_region(addr, size), "Not contain this region");
   66.54 +
   66.55 +  if (all_committed()) return true;
   66.56 +
   66.57 +  CommittedMemoryRegion committed_rgn(addr, size, stack);
   66.58 +  LinkedListNode<CommittedMemoryRegion>* node = _committed_regions.find_node(committed_rgn);
   66.59 +  if (node != NULL) {
   66.60 +    CommittedMemoryRegion* rgn = node->data();
   66.61 +    if (rgn->same_region(addr, size)) {
   66.62 +      return true;
   66.63 +    }
   66.64 +
   66.65 +    if (rgn->adjacent_to(addr, size)) {
   66.66 +      // check if the next region covers this committed region,
   66.67 +      // the regions may not be merged due to different call stacks
   66.68 +      LinkedListNode<CommittedMemoryRegion>* next =
   66.69 +        node->next();
   66.70 +      if (next != NULL && next->data()->contain_region(addr, size)) {
   66.71 +        if (next->data()->same_region(addr, size)) {
   66.72 +          next->data()->set_call_stack(stack);
   66.73 +        }
   66.74 +        return true;
   66.75 +      }
   66.76 +      if (rgn->call_stack()->equals(stack)) {
   66.77 +        VirtualMemorySummary::record_uncommitted_memory(rgn->size(), flag());
   66.78 +        // the two adjacent regions have the same call stack, merge them
   66.79 +        rgn->expand_region(addr, size);
   66.80 +        VirtualMemorySummary::record_committed_memory(rgn->size(), flag());
   66.81 +        return true;
   66.82 +      }
   66.83 +      VirtualMemorySummary::record_committed_memory(size, flag());
   66.84 +      if (rgn->base() > addr) {
   66.85 +        return _committed_regions.insert_before(committed_rgn, node) != NULL;
   66.86 +      } else {
   66.87 +        return _committed_regions.insert_after(committed_rgn, node) != NULL;
   66.88 +      }
   66.89 +    }
   66.90 +    assert(rgn->contain_region(addr, size), "Must cover this region");
   66.91 +    return true;
   66.92 +  } else {
   66.93 +    // New committed region
   66.94 +    VirtualMemorySummary::record_committed_memory(size, flag());
   66.95 +    return add_committed_region(committed_rgn);
   66.96 +  }
   66.97 +}
   66.98 +
   66.99 +void ReservedMemoryRegion::set_all_committed(bool b) {
  66.100 +  if (all_committed() != b) {
  66.101 +    _all_committed = b;
  66.102 +    if (b) {
  66.103 +      VirtualMemorySummary::record_committed_memory(size(), flag());
  66.104 +    }
  66.105 +  }
  66.106 +}
  66.107 +
  66.108 +bool ReservedMemoryRegion::remove_uncommitted_region(LinkedListNode<CommittedMemoryRegion>* node,
  66.109 +  address addr, size_t size) {
  66.110 +  assert(addr != NULL, "Invalid address");
  66.111 +  assert(size > 0, "Invalid size");
  66.112 +
  66.113 +  CommittedMemoryRegion* rgn = node->data();
  66.114 +  assert(rgn->contain_region(addr, size), "Has to be contained");
  66.115 +  assert(!rgn->same_region(addr, size), "Can not be the same region");
  66.116 +
  66.117 +  if (rgn->base() == addr ||
  66.118 +      rgn->end() == addr + size) {
  66.119 +    rgn->exclude_region(addr, size);
  66.120 +    return true;
  66.121 +  } else {
  66.122 +    // split this region
  66.123 +    address top =rgn->end();
  66.124 +    // use this region for lower part
  66.125 +    size_t exclude_size = rgn->end() - addr;
  66.126 +    rgn->exclude_region(addr, exclude_size);
  66.127 +
  66.128 +    // higher part
  66.129 +    address high_base = addr + size;
  66.130 +    size_t  high_size = top - high_base;
  66.131 +
  66.132 +    CommittedMemoryRegion high_rgn(high_base, high_size, *rgn->call_stack());
  66.133 +    LinkedListNode<CommittedMemoryRegion>* high_node = _committed_regions.add(high_rgn);
  66.134 +    assert(high_node == NULL || node->next() == high_node, "Should be right after");
  66.135 +    return (high_node != NULL);
  66.136 +  }
  66.137 +
  66.138 +  return false;
  66.139 +}
  66.140 +
  66.141 +bool ReservedMemoryRegion::remove_uncommitted_region(address addr, size_t sz) {
  66.142 +  // uncommit stack guard pages
  66.143 +  if (flag() == mtThreadStack && !same_region(addr, sz)) {
  66.144 +    return true;
  66.145 +  }
  66.146 +
  66.147 +  assert(addr != NULL, "Invalid address");
  66.148 +  assert(sz > 0, "Invalid size");
  66.149 +
  66.150 +  if (all_committed()) {
  66.151 +    assert(_committed_regions.is_empty(), "Sanity check");
  66.152 +    assert(contain_region(addr, sz), "Reserved region does not contain this region");
  66.153 +    set_all_committed(false);
  66.154 +    VirtualMemorySummary::record_uncommitted_memory(sz, flag());
  66.155 +    if (same_region(addr, sz)) {
  66.156 +      return true;
  66.157 +    } else {
  66.158 +      CommittedMemoryRegion rgn(base(), size(), *call_stack());
  66.159 +      if (rgn.base() == addr || rgn.end() == (addr + sz)) {
  66.160 +        rgn.exclude_region(addr, sz);
  66.161 +        return add_committed_region(rgn);
  66.162 +      } else {
  66.163 +        // split this region
  66.164 +        // top of the whole region
  66.165 +        address top =rgn.end();
  66.166 +        // use this region for lower part
  66.167 +        size_t exclude_size = rgn.end() - addr;
  66.168 +        rgn.exclude_region(addr, exclude_size);
  66.169 +        if (add_committed_region(rgn)) {
  66.170 +          // higher part
  66.171 +          address high_base = addr + sz;
  66.172 +          size_t  high_size = top - high_base;
  66.173 +          CommittedMemoryRegion high_rgn(high_base, high_size, emptyStack);
  66.174 +          return add_committed_region(high_rgn);
  66.175 +        } else {
  66.176 +          return false;
  66.177 +        }
  66.178 +      }
  66.179 +    }
  66.180 +  } else {
  66.181 +    // we have to walk whole list to remove the committed regions in
  66.182 +    // specified range
  66.183 +    LinkedListNode<CommittedMemoryRegion>* head =
  66.184 +      _committed_regions.head();
  66.185 +    LinkedListNode<CommittedMemoryRegion>* prev = NULL;
  66.186 +    VirtualMemoryRegion uncommitted_rgn(addr, sz);
  66.187 +
  66.188 +    while (head != NULL && !uncommitted_rgn.is_empty()) {
  66.189 +      CommittedMemoryRegion* crgn = head->data();
  66.190 +      // this committed region overlaps to region to uncommit
  66.191 +      if (crgn->overlap_region(uncommitted_rgn.base(), uncommitted_rgn.size())) {
  66.192 +        if (crgn->same_region(uncommitted_rgn.base(), uncommitted_rgn.size())) {
  66.193 +          // find matched region, remove the node will do
  66.194 +          VirtualMemorySummary::record_uncommitted_memory(uncommitted_rgn.size(), flag());
  66.195 +          _committed_regions.remove_after(prev);
  66.196 +          return true;
  66.197 +        } else if (crgn->contain_region(uncommitted_rgn.base(), uncommitted_rgn.size())) {
  66.198 +          // this committed region contains whole uncommitted region
  66.199 +          VirtualMemorySummary::record_uncommitted_memory(uncommitted_rgn.size(), flag());
  66.200 +          return remove_uncommitted_region(head, uncommitted_rgn.base(), uncommitted_rgn.size());
  66.201 +        } else if (uncommitted_rgn.contain_region(crgn->base(), crgn->size())) {
  66.202 +          // this committed region has been uncommitted
  66.203 +          size_t exclude_size = crgn->end() - uncommitted_rgn.base();
  66.204 +          uncommitted_rgn.exclude_region(uncommitted_rgn.base(), exclude_size);
  66.205 +          VirtualMemorySummary::record_uncommitted_memory(crgn->size(), flag());
  66.206 +          LinkedListNode<CommittedMemoryRegion>* tmp = head;
  66.207 +          head = head->next();
  66.208 +          _committed_regions.remove_after(prev);
  66.209 +          continue;
  66.210 +        } else if (crgn->contain_address(uncommitted_rgn.base())) {
  66.211 +          size_t toUncommitted = crgn->end() - uncommitted_rgn.base();
  66.212 +          crgn->exclude_region(uncommitted_rgn.base(), toUncommitted);
  66.213 +          uncommitted_rgn.exclude_region(uncommitted_rgn.base(), toUncommitted);
  66.214 +          VirtualMemorySummary::record_uncommitted_memory(toUncommitted, flag());
  66.215 +        } else if (uncommitted_rgn.contain_address(crgn->base())) {
  66.216 +          size_t toUncommitted = uncommitted_rgn.end() - crgn->base();
  66.217 +          crgn->exclude_region(crgn->base(), toUncommitted);
  66.218 +          uncommitted_rgn.exclude_region(uncommitted_rgn.end() - toUncommitted,
  66.219 +            toUncommitted);
  66.220 +          VirtualMemorySummary::record_uncommitted_memory(toUncommitted, flag());
  66.221 +        }
  66.222 +      }
  66.223 +      prev = head;
  66.224 +      head = head->next();
  66.225 +    }
  66.226 +  }
  66.227 +
  66.228 +  return true;
  66.229 +}
  66.230 +
  66.231 +void ReservedMemoryRegion::move_committed_regions(address addr, ReservedMemoryRegion& rgn) {
  66.232 +  assert(addr != NULL, "Invalid address");
  66.233 +
  66.234 +  // split committed regions
  66.235 +  LinkedListNode<CommittedMemoryRegion>* head =
  66.236 +    _committed_regions.head();
  66.237 +  LinkedListNode<CommittedMemoryRegion>* prev = NULL;
  66.238 +
  66.239 +  while (head != NULL) {
  66.240 +    if (head->data()->base() >= addr) {
  66.241 +      break;
  66.242 +    }
  66.243 +    prev = head;
  66.244 +    head = head->next();
  66.245 +  }
  66.246 +
  66.247 +  if (head != NULL) {
  66.248 +    if (prev != NULL) {
  66.249 +      prev->set_next(head->next());
  66.250 +    } else {
  66.251 +      _committed_regions.set_head(NULL);
  66.252 +    }
  66.253 +  }
  66.254 +
  66.255 +  rgn._committed_regions.set_head(head);
  66.256 +}
  66.257 +
  66.258 +size_t ReservedMemoryRegion::committed_size() const {
  66.259 +  if (all_committed()) {
  66.260 +    return size();
  66.261 +  } else {
  66.262 +    size_t committed = 0;
  66.263 +    LinkedListNode<CommittedMemoryRegion>* head =
  66.264 +      _committed_regions.head();
  66.265 +    while (head != NULL) {
  66.266 +      committed += head->data()->size();
  66.267 +      head = head->next();
  66.268 +    }
  66.269 +    return committed;
  66.270 +  }
  66.271 +}
  66.272 +
  66.273 +void ReservedMemoryRegion::set_flag(MEMFLAGS f) {
  66.274 +  assert((flag() == mtNone || flag() == f), "Overwrite memory type");
  66.275 +  if (flag() != f) {
  66.276 +    VirtualMemorySummary::move_reserved_memory(flag(), f, size());
  66.277 +    VirtualMemorySummary::move_committed_memory(flag(), f, committed_size());
  66.278 +    _flag = f;
  66.279 +  }
  66.280 +}
  66.281 +
  66.282 +bool VirtualMemoryTracker::initialize(NMT_TrackingLevel level) {
  66.283 +  if (level >= NMT_summary) {
  66.284 +    VirtualMemorySummary::initialize();
  66.285 +  }
  66.286 +  return true;
  66.287 +}
  66.288 +
  66.289 +bool VirtualMemoryTracker::add_reserved_region(address base_addr, size_t size,
  66.290 +   const NativeCallStack& stack, MEMFLAGS flag, bool all_committed) {
  66.291 +  assert(base_addr != NULL, "Invalid address");
  66.292 +  assert(size > 0, "Invalid size");
  66.293 +
  66.294 +  ReservedMemoryRegion  rgn(base_addr, size, stack, flag);
  66.295 +  ReservedMemoryRegion* reserved_rgn = _reserved_regions.find(rgn);
  66.296 +  LinkedListNode<ReservedMemoryRegion>* node;
  66.297 +  if (reserved_rgn == NULL) {
  66.298 +    VirtualMemorySummary::record_reserved_memory(size, flag);
  66.299 +    node = _reserved_regions.add(rgn);
  66.300 +    if (node != NULL) {
  66.301 +      node->data()->set_all_committed(all_committed);
  66.302 +      return true;
  66.303 +    } else {
  66.304 +      return false;
  66.305 +    }
  66.306 +  } else {
  66.307 +    if (reserved_rgn->same_region(base_addr, size)) {
  66.308 +      reserved_rgn->set_call_stack(stack);
  66.309 +      reserved_rgn->set_flag(flag);
  66.310 +      return true;
  66.311 +    } else if (reserved_rgn->adjacent_to(base_addr, size)) {
  66.312 +      VirtualMemorySummary::record_reserved_memory(size, flag);
  66.313 +      reserved_rgn->expand_region(base_addr, size);
  66.314 +      reserved_rgn->set_call_stack(stack);
  66.315 +      return true;
  66.316 +    } else {
  66.317 +      // Overlapped reservation.
  66.318 +      // It can happen when the regions are thread stacks, as JNI
  66.319 +      // thread does not detach from VM before exits, and leads to
  66.320 +      // leak JavaThread object
  66.321 +      if (reserved_rgn->flag() == mtThreadStack) {
  66.322 +        guarantee(!CheckJNICalls, "Attached JNI thread exited without being detached");
  66.323 +        // Overwrite with new region
  66.324 +
  66.325 +        // Release old region
  66.326 +        VirtualMemorySummary::record_uncommitted_memory(reserved_rgn->committed_size(), reserved_rgn->flag());
  66.327 +        VirtualMemorySummary::record_released_memory(reserved_rgn->size(), reserved_rgn->flag());
  66.328 +
  66.329 +        // Add new region
  66.330 +        VirtualMemorySummary::record_reserved_memory(rgn.size(), flag);
  66.331 +
  66.332 +        *reserved_rgn = rgn;
  66.333 +        return true;
  66.334 +      } else {
  66.335 +        ShouldNotReachHere();
  66.336 +        return false;
  66.337 +      }
  66.338 +    }
  66.339 +  }
  66.340 +}
  66.341 +
  66.342 +void VirtualMemoryTracker::set_reserved_region_type(address addr, MEMFLAGS flag) {
  66.343 +  assert(addr != NULL, "Invalid address");
  66.344 +
  66.345 +  ReservedMemoryRegion   rgn(addr, 1);
  66.346 +  ReservedMemoryRegion*  reserved_rgn = _reserved_regions.find(rgn);
  66.347 +  if (reserved_rgn != NULL) {
  66.348 +    assert(reserved_rgn->contain_address(addr), "Containment");
  66.349 +    if (reserved_rgn->flag() != flag) {
  66.350 +      assert(reserved_rgn->flag() == mtNone, "Overwrite memory type");
  66.351 +      reserved_rgn->set_flag(flag);
  66.352 +    }
  66.353 +  }
  66.354 +}
  66.355 +
  66.356 +bool VirtualMemoryTracker::add_committed_region(address addr, size_t size,
  66.357 +  const NativeCallStack& stack) {
  66.358 +  assert(addr != NULL, "Invalid address");
  66.359 +  assert(size > 0, "Invalid size");
  66.360 +  ReservedMemoryRegion  rgn(addr, size);
  66.361 +  ReservedMemoryRegion* reserved_rgn = _reserved_regions.find(rgn);
  66.362 +
  66.363 +  assert(reserved_rgn != NULL, "No reserved region");
  66.364 +  assert(reserved_rgn->contain_region(addr, size), "Not completely contained");
  66.365 +  return reserved_rgn->add_committed_region(addr, size, stack);
  66.366 +}
  66.367 +
  66.368 +bool VirtualMemoryTracker::remove_uncommitted_region(address addr, size_t size) {
  66.369 +  assert(addr != NULL, "Invalid address");
  66.370 +  assert(size > 0, "Invalid size");
  66.371 +  ReservedMemoryRegion  rgn(addr, size);
  66.372 +  ReservedMemoryRegion* reserved_rgn = _reserved_regions.find(rgn);
  66.373 +  assert(reserved_rgn != NULL, "No reserved region");
  66.374 +  assert(reserved_rgn->contain_region(addr, size), "Not completely contained");
  66.375 +  return reserved_rgn->remove_uncommitted_region(addr, size);
  66.376 +}
  66.377 +
  66.378 +bool VirtualMemoryTracker::remove_released_region(address addr, size_t size) {
  66.379 +  assert(addr != NULL, "Invalid address");
  66.380 +  assert(size > 0, "Invalid size");
  66.381 +
  66.382 +  ReservedMemoryRegion  rgn(addr, size);
  66.383 +  ReservedMemoryRegion* reserved_rgn = _reserved_regions.find(rgn);
  66.384 +
  66.385 +  assert(reserved_rgn != NULL, "No reserved region");
  66.386 +
  66.387 +  // uncommit regions within the released region
  66.388 +  if (!reserved_rgn->remove_uncommitted_region(addr, size)) {
  66.389 +    return false;
  66.390 +  }
  66.391 +
  66.392 +
  66.393 +  VirtualMemorySummary::record_released_memory(size, reserved_rgn->flag());
  66.394 +
  66.395 +  if (reserved_rgn->same_region(addr, size)) {
  66.396 +    return _reserved_regions.remove(rgn);
  66.397 +  } else {
  66.398 +    assert(reserved_rgn->contain_region(addr, size), "Not completely contained");
  66.399 +    if (reserved_rgn->base() == addr ||
  66.400 +        reserved_rgn->end() == addr + size) {
  66.401 +        reserved_rgn->exclude_region(addr, size);
  66.402 +      return true;
  66.403 +    } else {
  66.404 +      address top = reserved_rgn->end();
  66.405 +      address high_base = addr + size;
  66.406 +      ReservedMemoryRegion high_rgn(high_base, top - high_base,
  66.407 +        *reserved_rgn->call_stack(), reserved_rgn->flag());
  66.408 +
  66.409 +      // use original region for lower region
  66.410 +      reserved_rgn->exclude_region(addr, top - addr);
  66.411 +      LinkedListNode<ReservedMemoryRegion>* new_rgn = _reserved_regions.add(high_rgn);
  66.412 +      if (new_rgn == NULL) {
  66.413 +        return false;
  66.414 +      } else {
  66.415 +        reserved_rgn->move_committed_regions(addr, *new_rgn->data());
  66.416 +        return true;
  66.417 +      }
  66.418 +    }
  66.419 +  }
  66.420 +}
  66.421 +
  66.422 +
  66.423 +bool VirtualMemoryTracker::walk_virtual_memory(VirtualMemoryWalker* walker) {
  66.424 +  ThreadCritical tc;
  66.425 +  LinkedListNode<ReservedMemoryRegion>* head = _reserved_regions.head();
  66.426 +  while (head != NULL) {
  66.427 +    const ReservedMemoryRegion* rgn = head->peek();
  66.428 +    if (!walker->do_allocation_site(rgn)) {
  66.429 +      return false;
  66.430 +    }
  66.431 +    head = head->next();
  66.432 +  }
  66.433 +  return true;
  66.434 +}
  66.435 +
  66.436 +// Transition virtual memory tracking level.
  66.437 +bool VirtualMemoryTracker::transition(NMT_TrackingLevel from, NMT_TrackingLevel to) {
  66.438 +  if (from == NMT_minimal) {
  66.439 +    assert(to == NMT_summary || to == NMT_detail, "Just check");
  66.440 +    VirtualMemorySummary::reset();
  66.441 +  } else if (to == NMT_minimal) {
  66.442 +    assert(from == NMT_summary || from == NMT_detail, "Just check");
  66.443 +    // Clean up virtual memory tracking data structures.
  66.444 +    ThreadCritical tc;
  66.445 +    _reserved_regions.clear();
  66.446 +  }
  66.447 +
  66.448 +  return true;
  66.449 +}
  66.450 +
  66.451 +
    67.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    67.2 +++ b/src/share/vm/services/virtualMemoryTracker.hpp	Wed Aug 27 08:19:12 2014 -0400
    67.3 @@ -0,0 +1,437 @@
    67.4 +/*
    67.5 + * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
    67.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    67.7 + *
    67.8 + * This code is free software; you can redistribute it and/or modify it
    67.9 + * under the terms of the GNU General Public License version 2 only, as
   67.10 + * published by the Free Software Foundation.
   67.11 + *
   67.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
   67.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   67.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   67.15 + * version 2 for more details (a copy is included in the LICENSE file that
   67.16 + * accompanied this code).
   67.17 + *
   67.18 + * You should have received a copy of the GNU General Public License version
   67.19 + * 2 along with this work; if not, write to the Free Software Foundation,
   67.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   67.21 + *
   67.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   67.23 + * or visit www.oracle.com if you need additional information or have any
   67.24 + * questions.
   67.25 + *
   67.26 + */
   67.27 +
   67.28 +#ifndef SHARE_VM_SERVICES_VIRTUAL_MEMORY_TRACKER_HPP
   67.29 +#define SHARE_VM_SERVICES_VIRTUAL_MEMORY_TRACKER_HPP
   67.30 +
   67.31 +#if INCLUDE_NMT
   67.32 +
   67.33 +#include "memory/allocation.hpp"
   67.34 +#include "services/allocationSite.hpp"
   67.35 +#include "services/nmtCommon.hpp"
   67.36 +#include "utilities/linkedlist.hpp"
   67.37 +#include "utilities/nativeCallStack.hpp"
   67.38 +#include "utilities/ostream.hpp"
   67.39 +
   67.40 +
   67.41 +/*
   67.42 + * Virtual memory counter
   67.43 + */
   67.44 +class VirtualMemory VALUE_OBJ_CLASS_SPEC {
   67.45 + private:
   67.46 +  size_t     _reserved;
   67.47 +  size_t     _committed;
   67.48 +
   67.49 + public:
   67.50 +  VirtualMemory() : _reserved(0), _committed(0) { }
   67.51 +
   67.52 +  inline void reserve_memory(size_t sz) { _reserved += sz; }
   67.53 +  inline void commit_memory (size_t sz) {
   67.54 +    _committed += sz;
   67.55 +    assert(_committed <= _reserved, "Sanity check");
   67.56 +  }
   67.57 +
   67.58 +  inline void release_memory (size_t sz) {
   67.59 +    assert(_reserved >= sz, "Negative amount");
   67.60 +    _reserved -= sz;
   67.61 +  }
   67.62 +
   67.63 +  inline void uncommit_memory(size_t sz) {
   67.64 +    assert(_committed >= sz, "Negative amount");
   67.65 +    _committed -= sz;
   67.66 +  }
   67.67 +
   67.68 +  void reset() {
   67.69 +    _reserved  = 0;
   67.70 +    _committed = 0;
   67.71 +  }
   67.72 +
   67.73 +  inline size_t reserved()  const { return _reserved;  }
   67.74 +  inline size_t committed() const { return _committed; }
   67.75 +};
   67.76 +
   67.77 +// Virtual memory allocation site, keeps track where the virtual memory is reserved.
   67.78 +class VirtualMemoryAllocationSite : public AllocationSite<VirtualMemory> {
   67.79 + public:
   67.80 +  VirtualMemoryAllocationSite(const NativeCallStack& stack) :
   67.81 +    AllocationSite<VirtualMemory>(stack) { }
   67.82 +
   67.83 +  inline void reserve_memory(size_t sz)  { data()->reserve_memory(sz);  }
   67.84 +  inline void commit_memory (size_t sz)  { data()->commit_memory(sz);   }
   67.85 +  inline void uncommit_memory(size_t sz) { data()->uncommit_memory(sz); }
   67.86 +  inline void release_memory(size_t sz)  { data()->release_memory(sz);  }
   67.87 +  inline size_t reserved() const  { return peek()->reserved(); }
   67.88 +  inline size_t committed() const { return peek()->committed(); }
   67.89 +};
   67.90 +
   67.91 +class VirtualMemorySummary;
   67.92 +
   67.93 +// This class represents a snapshot of virtual memory at a given time.
   67.94 +// The latest snapshot is saved in a static area.
   67.95 +class VirtualMemorySnapshot : public ResourceObj {
   67.96 +  friend class VirtualMemorySummary;
   67.97 +
   67.98 + private:
   67.99 +  VirtualMemory  _virtual_memory[mt_number_of_types];
  67.100 +
  67.101 + public:
  67.102 +  inline VirtualMemory* by_type(MEMFLAGS flag) {
  67.103 +    int index = NMTUtil::flag_to_index(flag);
  67.104 +    return &_virtual_memory[index];
  67.105 +  }
  67.106 +
  67.107 +  inline VirtualMemory* by_index(int index) {
  67.108 +    assert(index >= 0, "Index out of bound");
  67.109 +    assert(index < mt_number_of_types, "Index out of bound");
  67.110 +    return &_virtual_memory[index];
  67.111 +  }
  67.112 +
  67.113 +  inline size_t total_reserved() const {
  67.114 +    size_t amount = 0;
  67.115 +    for (int index = 0; index < mt_number_of_types; index ++) {
  67.116 +      amount += _virtual_memory[index].reserved();
  67.117 +    }
  67.118 +    return amount;
  67.119 +  }
  67.120 +
  67.121 +  inline size_t total_committed() const {
  67.122 +    size_t amount = 0;
  67.123 +    for (int index = 0; index < mt_number_of_types; index ++) {
  67.124 +      amount += _virtual_memory[index].committed();
  67.125 +    }
  67.126 +    return amount;
  67.127 +  }
  67.128 +
  67.129 +  inline void reset() {
  67.130 +    for (int index = 0; index < mt_number_of_types; index ++) {
  67.131 +      _virtual_memory[index].reset();
  67.132 +    }
  67.133 +  }
  67.134 +
  67.135 +  void copy_to(VirtualMemorySnapshot* s) {
  67.136 +    for (int index = 0; index < mt_number_of_types; index ++) {
  67.137 +      s->_virtual_memory[index] = _virtual_memory[index];
  67.138 +    }
  67.139 +  }
  67.140 +};
  67.141 +
  67.142 +class VirtualMemorySummary : AllStatic {
  67.143 + public:
  67.144 +  static void initialize();
  67.145 +
  67.146 +  static inline void record_reserved_memory(size_t size, MEMFLAGS flag) {
  67.147 +    as_snapshot()->by_type(flag)->reserve_memory(size);
  67.148 +  }
  67.149 +
  67.150 +  static inline void record_committed_memory(size_t size, MEMFLAGS flag) {
  67.151 +    as_snapshot()->by_type(flag)->commit_memory(size);
  67.152 +  }
  67.153 +
  67.154 +  static inline void record_uncommitted_memory(size_t size, MEMFLAGS flag) {
  67.155 +    as_snapshot()->by_type(flag)->uncommit_memory(size);
  67.156 +  }
  67.157 +
  67.158 +  static inline void record_released_memory(size_t size, MEMFLAGS flag) {
  67.159 +    as_snapshot()->by_type(flag)->release_memory(size);
  67.160 +  }
  67.161 +
  67.162 +  // Move virtual memory from one memory type to another.
  67.163 +  // Virtual memory can be reserved before it is associated with a memory type, and tagged
  67.164 +  // as 'unknown'. Once the memory is tagged, the virtual memory will be moved from 'unknown'
  67.165 +  // type to specified memory type.
  67.166 +  static inline void move_reserved_memory(MEMFLAGS from, MEMFLAGS to, size_t size) {
  67.167 +    as_snapshot()->by_type(from)->release_memory(size);
  67.168 +    as_snapshot()->by_type(to)->reserve_memory(size);
  67.169 +  }
  67.170 +
  67.171 +  static inline void move_committed_memory(MEMFLAGS from, MEMFLAGS to, size_t size) {
  67.172 +    as_snapshot()->by_type(from)->uncommit_memory(size);
  67.173 +    as_snapshot()->by_type(to)->commit_memory(size);
  67.174 +  }
  67.175 +
  67.176 +  static inline void snapshot(VirtualMemorySnapshot* s) {
  67.177 +    as_snapshot()->copy_to(s);
  67.178 +  }
  67.179 +
  67.180 +  static inline void reset() {
  67.181 +    as_snapshot()->reset();
  67.182 +  }
  67.183 +
  67.184 +  static VirtualMemorySnapshot* as_snapshot() {
  67.185 +    return (VirtualMemorySnapshot*)_snapshot;
  67.186 +  }
  67.187 +
  67.188 + private:
  67.189 +  static size_t _snapshot[CALC_OBJ_SIZE_IN_TYPE(VirtualMemorySnapshot, size_t)];
  67.190 +};
  67.191 +
  67.192 +
  67.193 +
  67.194 +/*
  67.195 + * A virtual memory region
  67.196 + */
  67.197 +class VirtualMemoryRegion VALUE_OBJ_CLASS_SPEC {
  67.198 + private:
  67.199 +  address      _base_address;
  67.200 +  size_t       _size;
  67.201 +
  67.202 + public:
  67.203 +  VirtualMemoryRegion(address addr, size_t size) :
  67.204 +    _base_address(addr), _size(size) {
  67.205 +     assert(addr != NULL, "Invalid address");
  67.206 +     assert(size > 0, "Invalid size");
  67.207 +   }
  67.208 +
  67.209 +  inline address base() const { return _base_address;   }
  67.210 +  inline address end()  const { return base() + size(); }
  67.211 +  inline size_t  size() const { return _size;           }
  67.212 +
  67.213 +  inline bool is_empty() const { return size() == 0; }
  67.214 +
  67.215 +  inline bool contain_address(address addr) const {
  67.216 +    return (addr >= base() && addr < end());
  67.217 +  }
  67.218 +
  67.219 +
  67.220 +  inline bool contain_region(address addr, size_t size) const {
  67.221 +    return contain_address(addr) && contain_address(addr + size - 1);
  67.222 +  }
  67.223 +
  67.224 +  inline bool same_region(address addr, size_t sz) const {
  67.225 +    return (addr == base() && sz == size());
  67.226 +  }
  67.227 +
  67.228 +
  67.229 +  inline bool overlap_region(address addr, size_t sz) const {
  67.230 +    VirtualMemoryRegion rgn(addr, sz);
  67.231 +    return contain_address(addr) ||
  67.232 +           contain_address(addr + sz - 1) ||
  67.233 +           rgn.contain_address(base()) ||
  67.234 +           rgn.contain_address(end() - 1);
  67.235 +  }
  67.236 +
  67.237 +  inline bool adjacent_to(address addr, size_t sz) const {
  67.238 +    return (addr == end() || (addr + sz) == base());
  67.239 +  }
  67.240 +
  67.241 +  void exclude_region(address addr, size_t sz) {
  67.242 +    assert(contain_region(addr, sz), "Not containment");
  67.243 +    assert(addr == base() || addr + sz == end(), "Can not exclude from middle");
  67.244 +    size_t new_size = size() - sz;
  67.245 +
  67.246 +    if (addr == base()) {
  67.247 +      set_base(addr + sz);
  67.248 +    }
  67.249 +    set_size(new_size);
  67.250 +  }
  67.251 +
  67.252 +  void expand_region(address addr, size_t sz) {
  67.253 +    assert(adjacent_to(addr, sz), "Not adjacent regions");
  67.254 +    if (base() == addr + sz) {
  67.255 +      set_base(addr);
  67.256 +    }
  67.257 +    set_size(size() + sz);
  67.258 +  }
  67.259 +
  67.260 + protected:
  67.261 +  void set_base(address base) {
  67.262 +    assert(base != NULL, "Sanity check");
  67.263 +    _base_address = base;
  67.264 +  }
  67.265 +
  67.266 +  void set_size(size_t  size) {
  67.267 +    assert(size > 0, "Sanity check");
  67.268 +    _size = size;
  67.269 +  }
  67.270 +};
  67.271 +
  67.272 +
  67.273 +class CommittedMemoryRegion : public VirtualMemoryRegion {
  67.274 + private:
  67.275 +  NativeCallStack  _stack;
  67.276 +
  67.277 + public:
  67.278 +  CommittedMemoryRegion(address addr, size_t size, const NativeCallStack& stack) :
  67.279 +    VirtualMemoryRegion(addr, size), _stack(stack) { }
  67.280 +
  67.281 +  inline int compare(const CommittedMemoryRegion& rgn) const {
  67.282 +    if (overlap_region(rgn.base(), rgn.size()) ||
  67.283 +        adjacent_to   (rgn.base(), rgn.size())) {
  67.284 +      return 0;
  67.285 +    } else {
  67.286 +      if (base() == rgn.base()) {
  67.287 +        return 0;
  67.288 +      } else if (base() > rgn.base()) {
  67.289 +        return 1;
  67.290 +      } else {
  67.291 +        return -1;
  67.292 +      }
  67.293 +    }
  67.294 +  }
  67.295 +
  67.296 +  inline bool equals(const CommittedMemoryRegion& rgn) const {
  67.297 +    return compare(rgn) == 0;
  67.298 +  }
  67.299 +
  67.300 +  inline void set_call_stack(const NativeCallStack& stack) { _stack = stack; }
  67.301 +  inline const NativeCallStack* call_stack() const         { return &_stack; }
  67.302 +};
  67.303 +
  67.304 +
  67.305 +typedef LinkedListIterator<CommittedMemoryRegion> CommittedRegionIterator;
  67.306 +
  67.307 +int compare_committed_region(const CommittedMemoryRegion&, const CommittedMemoryRegion&);
  67.308 +class ReservedMemoryRegion : public VirtualMemoryRegion {
  67.309 + private:
  67.310 +  SortedLinkedList<CommittedMemoryRegion, compare_committed_region>
  67.311 +    _committed_regions;
  67.312 +
  67.313 +  NativeCallStack  _stack;
  67.314 +  MEMFLAGS         _flag;
  67.315 +
  67.316 +  bool             _all_committed;
  67.317 +
  67.318 + public:
  67.319 +  ReservedMemoryRegion(address base, size_t size, const NativeCallStack& stack,
  67.320 +    MEMFLAGS flag = mtNone) :
  67.321 +    VirtualMemoryRegion(base, size), _stack(stack), _flag(flag),
  67.322 +    _all_committed(false) { }
  67.323 +
  67.324 +
  67.325 +  ReservedMemoryRegion(address base, size_t size) :
  67.326 +    VirtualMemoryRegion(base, size), _stack(emptyStack), _flag(mtNone),
  67.327 +    _all_committed(false) { }
  67.328 +
  67.329 +  // Copy constructor
  67.330 +  ReservedMemoryRegion(const ReservedMemoryRegion& rr) :
  67.331 +    VirtualMemoryRegion(rr.base(), rr.size()) {
  67.332 +    *this = rr;
  67.333 +  }
  67.334 +
  67.335 +  inline void  set_call_stack(const NativeCallStack& stack) { _stack = stack; }
  67.336 +  inline const NativeCallStack* call_stack() const          { return &_stack;  }
  67.337 +
  67.338 +  void  set_flag(MEMFLAGS flag);
  67.339 +  inline MEMFLAGS flag() const            { return _flag;  }
  67.340 +
  67.341 +  inline int compare(const ReservedMemoryRegion& rgn) const {
  67.342 +    if (overlap_region(rgn.base(), rgn.size())) {
  67.343 +      return 0;
  67.344 +    } else {
  67.345 +      if (base() == rgn.base()) {
  67.346 +        return 0;
  67.347 +      } else if (base() > rgn.base()) {
  67.348 +        return 1;
  67.349 +      } else {
  67.350 +        return -1;
  67.351 +      }
  67.352 +    }
  67.353 +  }
  67.354 +
  67.355 +  inline bool equals(const ReservedMemoryRegion& rgn) const {
  67.356 +    return compare(rgn) == 0;
  67.357 +  }
  67.358 +
  67.359 +  bool    add_committed_region(address addr, size_t size, const NativeCallStack& stack);
  67.360 +  bool    remove_uncommitted_region(address addr, size_t size);
  67.361 +
  67.362 +  size_t  committed_size() const;
  67.363 +
  67.364 +  // move committed regions that higher than specified address to
  67.365 +  // the new region
  67.366 +  void    move_committed_regions(address addr, ReservedMemoryRegion& rgn);
  67.367 +
  67.368 +  inline bool all_committed() const { return _all_committed; }
  67.369 +  void        set_all_committed(bool b);
  67.370 +
  67.371 +  CommittedRegionIterator iterate_committed_regions() const {
  67.372 +    return CommittedRegionIterator(_committed_regions.head());
  67.373 +  }
  67.374 +
  67.375 +  ReservedMemoryRegion& operator= (const ReservedMemoryRegion& other) {
  67.376 +    set_base(other.base());
  67.377 +    set_size(other.size());
  67.378 +
  67.379 +    _stack =         *other.call_stack();
  67.380 +    _flag  =         other.flag();
  67.381 +    _all_committed = other.all_committed();
  67.382 +    if (other.all_committed()) {
  67.383 +      set_all_committed(true);
  67.384 +    } else {
  67.385 +      CommittedRegionIterator itr = other.iterate_committed_regions();
  67.386 +      const CommittedMemoryRegion* rgn = itr.next();
  67.387 +      while (rgn != NULL) {
  67.388 +        _committed_regions.add(*rgn);
  67.389 +        rgn = itr.next();
  67.390 +      }
  67.391 +    }
  67.392 +    return *this;
  67.393 +  }
  67.394 +
  67.395 + private:
  67.396 +  // The committed region contains the uncommitted region, subtract the uncommitted
  67.397 +  // region from this committed region
  67.398 +  bool remove_uncommitted_region(LinkedListNode<CommittedMemoryRegion>* node,
  67.399 +    address addr, size_t sz);
  67.400 +
  67.401 +  bool add_committed_region(const CommittedMemoryRegion& rgn) {
  67.402 +    assert(rgn.base() != NULL, "Invalid base address");
  67.403 +    assert(size() > 0, "Invalid size");
  67.404 +    return _committed_regions.add(rgn) != NULL;
  67.405 +  }
  67.406 +};
  67.407 +
  67.408 +int compare_reserved_region_base(const ReservedMemoryRegion& r1, const ReservedMemoryRegion& r2);
  67.409 +
  67.410 +class VirtualMemoryWalker : public StackObj {
  67.411 + public:
  67.412 +   virtual bool do_allocation_site(const ReservedMemoryRegion* rgn) { return false; }
  67.413 +};
  67.414 +
  67.415 +// Main class called from MemTracker to track virtual memory allocations, commits and releases.
  67.416 +class VirtualMemoryTracker : AllStatic {
  67.417 + public:
  67.418 +  static bool initialize(NMT_TrackingLevel level);
  67.419 +
  67.420 +  static bool add_reserved_region (address base_addr, size_t size, const NativeCallStack& stack,
  67.421 +    MEMFLAGS flag = mtNone, bool all_committed = false);
  67.422 +
  67.423 +  static bool add_committed_region      (address base_addr, size_t size, const NativeCallStack& stack);
  67.424 +  static bool remove_uncommitted_region (address base_addr, size_t size);
  67.425 +  static bool remove_released_region    (address base_addr, size_t size);
  67.426 +  static void set_reserved_region_type  (address addr, MEMFLAGS flag);
  67.427 +
  67.428 +  // Walk virtual memory data structure for creating baseline, etc.
  67.429 +  static bool walk_virtual_memory(VirtualMemoryWalker* walker);
  67.430 +
  67.431 +  static bool transition(NMT_TrackingLevel from, NMT_TrackingLevel to);
  67.432 +
  67.433 + private:
  67.434 +  static SortedLinkedList<ReservedMemoryRegion, compare_reserved_region_base> _reserved_regions;
  67.435 +};
  67.436 +
  67.437 +
  67.438 +#endif // INCLUDE_NMT
  67.439 +
  67.440 +#endif // SHARE_VM_SERVICES_VIRTUAL_MEMORY_TRACKER_HPP
    68.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    68.2 +++ b/src/share/vm/utilities/linkedlist.cpp	Wed Aug 27 08:19:12 2014 -0400
    68.3 @@ -0,0 +1,114 @@
    68.4 +/*
    68.5 + * Copyright (c) 2011, 2014, Oracle and/or its affiliates. All rights reserved.
    68.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    68.7 + *
    68.8 + * This code is free software; you can redistribute it and/or modify it
    68.9 + * under the terms of the GNU General Public License version 2 only, as
   68.10 + * published by the Free Software Foundation.
   68.11 + *
   68.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
   68.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   68.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   68.15 + * version 2 for more details (a copy is included in the LICENSE file that
   68.16 + * accompanied this code).
   68.17 + *
   68.18 + * You should have received a copy of the GNU General Public License version
   68.19 + * 2 along with this work; if not, write to the Free Software Foundation,
   68.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   68.21 + *
   68.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   68.23 + * or visit www.oracle.com if you need additional information or have any
   68.24 + * questions.
   68.25 + *
   68.26 + */
   68.27 +
   68.28 +#include "precompiled.hpp"
   68.29 +
   68.30 +/////////////// Unit tests ///////////////
   68.31 +
   68.32 +#ifndef PRODUCT
   68.33 +
   68.34 +#include "runtime/os.hpp"
   68.35 +#include "utilities/linkedlist.hpp"
   68.36 +#include "memory/allocation.hpp"
   68.37 +#include "memory/allocation.inline.hpp"
   68.38 +
   68.39 +class Integer : public StackObj {
   68.40 + private:
   68.41 +  int  _value;
   68.42 + public:
   68.43 +  Integer(int i) : _value(i) { }
   68.44 +
   68.45 +  int   value() const { return _value; }
   68.46 +  bool  equals(const Integer& i) const {
   68.47 +   return _value == i.value();
   68.48 +  }
   68.49 +};
   68.50 +
   68.51 +int compare_Integer(const Integer& i1, const Integer& i2) {
   68.52 +  return i1.value() - i2.value();
   68.53 +}
   68.54 +
   68.55 +void check_list_values(const int* expected, const LinkedList<Integer>* list) {
   68.56 +  LinkedListNode<Integer>* head = list->head();
   68.57 +  int index = 0;
   68.58 +  while (head != NULL) {
   68.59 +    assert(head->peek()->value() == expected[index], "Unexpected value");
   68.60 +    head = head->next();
   68.61 +    index ++;
   68.62 +  }
   68.63 +}
   68.64 +
   68.65 +void Test_linked_list() {
   68.66 +  LinkedListImpl<Integer, ResourceObj::C_HEAP, mtTest>  ll;
   68.67 +
   68.68 +
   68.69 +  // Test regular linked list
   68.70 +  assert(ll.is_empty(), "Start with empty list");
   68.71 +  Integer one(1), two(2), three(3), four(4), five(5), six(6);
   68.72 +
   68.73 +  ll.add(six);
   68.74 +  assert(!ll.is_empty(), "Should not be empty");
   68.75 +
   68.76 +  Integer* i = ll.find(six);
   68.77 +  assert(i != NULL, "Should find it");
   68.78 +
   68.79 +  i = ll.find(three);
   68.80 +  assert(i == NULL, "Not in the list");
   68.81 +
   68.82 +  LinkedListNode<Integer>* node = ll.find_node(six);
   68.83 +  assert(node != NULL, "6 is in the list");
   68.84 +
   68.85 +  ll.insert_after(three, node);
   68.86 +  ll.insert_before(one, node);
   68.87 +  int expected[3] = {1, 6, 3};
   68.88 +  check_list_values(expected, &ll);
   68.89 +
   68.90 +  ll.add(two);
   68.91 +  ll.add(four);
   68.92 +  ll.add(five);
   68.93 +
   68.94 +  // Test sorted linked list
   68.95 +  SortedLinkedList<Integer, compare_Integer, ResourceObj::C_HEAP, mtTest> sl;
   68.96 +  assert(sl.is_empty(), "Start with empty list");
   68.97 +
   68.98 +  size_t ll_size = ll.size();
   68.99 +  sl.move(&ll);
  68.100 +  size_t sl_size = sl.size();
  68.101 +
  68.102 +  assert(ll_size == sl_size, "Should be the same size");
  68.103 +  assert(ll.is_empty(), "No more entires");
  68.104 +
  68.105 +  // sorted result
  68.106 +  int sorted_result[] = {1, 2, 3, 4, 5, 6};
  68.107 +  check_list_values(sorted_result, &sl);
  68.108 +
  68.109 +  node = sl.find_node(four);
  68.110 +  assert(node != NULL, "4 is in the list");
  68.111 +  sl.remove_before(node);
  68.112 +  sl.remove_after(node);
  68.113 +  int remains[] = {1, 2, 4, 6};
  68.114 +  check_list_values(remains, &sl);
  68.115 +}
  68.116 +#endif // PRODUCT
  68.117 +
    69.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    69.2 +++ b/src/share/vm/utilities/linkedlist.hpp	Wed Aug 27 08:19:12 2014 -0400
    69.3 @@ -0,0 +1,416 @@
    69.4 +/*
    69.5 + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
    69.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    69.7 + *
    69.8 + * This code is free software; you can redistribute it and/or modify it
    69.9 + * under the terms of the GNU General Public License version 2 only, as
   69.10 + * published by the Free Software Foundation.
   69.11 + *
   69.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
   69.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   69.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   69.15 + * version 2 for more details (a copy is included in the LICENSE file that
   69.16 + * accompanied this code).
   69.17 + *
   69.18 + * You should have received a copy of the GNU General Public License version
   69.19 + * 2 along with this work; if not, write to the Free Software Foundation,
   69.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   69.21 + *
   69.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   69.23 + * or visit www.oracle.com if you need additional information or have any
   69.24 + * questions.
   69.25 + *
   69.26 + */
   69.27 +
   69.28 +#ifndef SHARE_VM_UTILITIES_LINKED_LIST_HPP
   69.29 +#define SHARE_VM_UTILITIES_LINKED_LIST_HPP
   69.30 +
   69.31 +#include "memory/allocation.hpp"
   69.32 +
   69.33 +/*
   69.34 + * The implementation of a generic linked list, which uses various
   69.35 + * backing storages, such as C heap, arena and resource, etc.
   69.36 + */
   69.37 +
   69.38 +
   69.39 +// An entry in a linked list. It should use the same backing storage
   69.40 +// as the linked list that contains this entry.
   69.41 +template <class E> class LinkedListNode : public ResourceObj {
   69.42 + private:
   69.43 +  E                       _data;  // embedded content
   69.44 +  LinkedListNode<E>*      _next;  // next entry
   69.45 +
   69.46 + protected:
   69.47 +  LinkedListNode() : _next(NULL) { }
   69.48 +
   69.49 + public:
   69.50 +  LinkedListNode(const E& e): _data(e), _next(NULL) { }
   69.51 +
   69.52 +  inline void set_next(LinkedListNode<E>* node) { _next = node; }
   69.53 +  inline LinkedListNode<E> * next() const       { return _next; }
   69.54 +
   69.55 +  E*  data() { return &_data; }
   69.56 +  const E* peek() const { return &_data; }
   69.57 +};
   69.58 +
   69.59 +// A linked list interface. It does not specify
   69.60 +// any storage type it uses, so all methods involving
   69.61 +// memory allocation or deallocation are pure virtual
   69.62 +template <class E> class LinkedList : public ResourceObj {
   69.63 + protected:
   69.64 +  LinkedListNode<E>*    _head;
   69.65 +
   69.66 + public:
   69.67 +  LinkedList() : _head(NULL) { }
   69.68 +
   69.69 +  inline void set_head(LinkedListNode<E>* h) { _head = h; }
   69.70 +  inline LinkedListNode<E>* head() const     { return _head; }
   69.71 +  inline bool is_empty()           const     { return head() == NULL; }
   69.72 +
   69.73 +  inline size_t size() const {
   69.74 +    LinkedListNode<E>* p;
   69.75 +    size_t count = 0;
   69.76 +    for (p = head(); p != NULL; count++, p = p->next());
   69.77 +    return count;
   69.78 + }
   69.79 +
   69.80 +  // Move all entries from specified linked list to this one
   69.81 +  virtual void move(LinkedList<E>* list) = 0;
   69.82 +
   69.83 +  // Add an entry to this linked list
   69.84 +  virtual LinkedListNode<E>* add(const E& e) = 0;
   69.85 +  // Add all entries from specified linked list to this one,
   69.86 +  virtual void add(LinkedListNode<E>* node) = 0;
   69.87 +
   69.88 +  // Add a linked list to this linked list
   69.89 +  virtual bool  add(const LinkedList<E>* list) = 0;
   69.90 +
   69.91 +  // Search entry in the linked list
   69.92 +  virtual LinkedListNode<E>* find_node(const E& e) = 0;
   69.93 +  virtual E* find(const E& e) = 0;
   69.94 +
   69.95 +  // Insert entry to the linked list
   69.96 +  virtual LinkedListNode<E>* insert_before(const E& e, LinkedListNode<E>* ref) = 0;
   69.97 +  virtual LinkedListNode<E>* insert_after (const E& e, LinkedListNode<E>* ref) = 0;
   69.98 +
   69.99 +  // Remove entry from the linked list
  69.100 +  virtual bool               remove(const E& e) = 0;
  69.101 +  virtual bool               remove(LinkedListNode<E>* node) = 0;
  69.102 +  virtual bool               remove_before(LinkedListNode<E>* ref) = 0;
  69.103 +  virtual bool               remove_after(LinkedListNode<E>*  ref) = 0;
  69.104 +
  69.105 +  LinkedListNode<E>* unlink_head() {
  69.106 +    LinkedListNode<E>* h = this->head();
  69.107 +    if (h != NULL) {
  69.108 +      this->set_head(h->next());
  69.109 +    }
  69.110 +    return h;
  69.111 +  }
  69.112 +
  69.113 +  DEBUG_ONLY(virtual ResourceObj::allocation_type storage_type() = 0;)
  69.114 +};
  69.115 +
  69.116 +// A linked list implementation.
  69.117 +// The linked list can be allocated in various type of memory: C heap, arena and resource area, etc.
  69.118 +template <class E, ResourceObj::allocation_type T = ResourceObj::C_HEAP,
  69.119 +  MEMFLAGS F = mtNMT, AllocFailType alloc_failmode = AllocFailStrategy::RETURN_NULL>
  69.120 +  class LinkedListImpl : public LinkedList<E> {
  69.121 + protected:
  69.122 +  Arena*                 _arena;
  69.123 + public:
  69.124 +  LinkedListImpl() :  _arena(NULL) { }
  69.125 +  LinkedListImpl(Arena* a) : _arena(a) { }
  69.126 +
  69.127 +  virtual ~LinkedListImpl() {
  69.128 +    clear();
  69.129 +  }
  69.130 +
  69.131 +  virtual void clear() {
  69.132 +    LinkedListNode<E>* p = this->head();
  69.133 +    this->set_head(NULL);
  69.134 +    while (p != NULL) {
  69.135 +      LinkedListNode<E>* to_delete = p;
  69.136 +      p = p->next();
  69.137 +      delete_node(to_delete);
  69.138 +    }
  69.139 +  }
  69.140 +
  69.141 +  // Add an entry to the linked list
  69.142 +  virtual LinkedListNode<E>* add(const E& e)  {
  69.143 +    LinkedListNode<E>* node = this->new_node(e);
  69.144 +    if (node != NULL) {
  69.145 +      this->add(node);
  69.146 +    }
  69.147 +
  69.148 +    return node;
  69.149 +  }
  69.150 +
  69.151 +  virtual void add(LinkedListNode<E>* node) {
  69.152 +    assert(node != NULL, "NULL pointer");
  69.153 +    node->set_next(this->head());
  69.154 +    this->set_head(node);
  69.155 +  }
  69.156 +
  69.157 +  // Move a linked list to this linked list, both have to be allocated on the same
  69.158 +  // storage type.
  69.159 +  virtual void move(LinkedList<E>* list) {
  69.160 +    assert(list->storage_type() == this->storage_type(), "Different storage type");
  69.161 +    LinkedListNode<E>* node = this->head();
  69.162 +    while (node != NULL && node->next() != NULL) {
  69.163 +      node = node->next();
  69.164 +    }
  69.165 +    if (node == NULL) {
  69.166 +      this->set_head(list->head());
  69.167 +    } else {
  69.168 +      node->set_next(list->head());
  69.169 +    }
  69.170 +    // All entries are moved
  69.171 +    list->set_head(NULL);
  69.172 +  }
  69.173 +
  69.174 +  virtual bool add(const LinkedList<E>* list) {
  69.175 +    LinkedListNode<E>* node = list->head();
  69.176 +    while (node != NULL) {
  69.177 +      if (this->add(*node->peek()) == NULL) {
  69.178 +        return false;
  69.179 +      }
  69.180 +      node = node->next();
  69.181 +    }
  69.182 +    return true;
  69.183 +  }
  69.184 +
  69.185 +
  69.186 +  virtual LinkedListNode<E>* find_node(const E& e) {
  69.187 +    LinkedListNode<E>* p = this->head();
  69.188 +    while (p != NULL && !p->peek()->equals(e)) {
  69.189 +      p = p->next();
  69.190 +    }
  69.191 +    return p;
  69.192 +  }
  69.193 +
  69.194 +  E* find(const E& e) {
  69.195 +    LinkedListNode<E>* node = find_node(e);
  69.196 +    return (node == NULL) ? NULL : node->data();
  69.197 +  }
  69.198 +
  69.199 +
  69.200 +  // Add an entry in front of the reference entry
  69.201 +  LinkedListNode<E>* insert_before(const E& e, LinkedListNode<E>* ref_node) {
  69.202 +    LinkedListNode<E>* node = this->new_node(e);
  69.203 +    if (node == NULL) return NULL;
  69.204 +    if (ref_node == this->head()) {
  69.205 +      node->set_next(ref_node);
  69.206 +      this->set_head(node);
  69.207 +    } else {
  69.208 +      LinkedListNode<E>* p = this->head();
  69.209 +      while (p != NULL && p->next() != ref_node) {
  69.210 +        p = p->next();
  69.211 +      }
  69.212 +      assert(p != NULL, "ref_node not in the list");
  69.213 +      node->set_next(ref_node);
  69.214 +      p->set_next(node);
  69.215 +    }
  69.216 +    return node;
  69.217 +  }
  69.218 +
  69.219 +   // Add an entry behind the reference entry
  69.220 +   LinkedListNode<E>* insert_after(const E& e, LinkedListNode<E>* ref_node) {
  69.221 +     LinkedListNode<E>* node = this->new_node(e);
  69.222 +     if (node == NULL) return NULL;
  69.223 +     node->set_next(ref_node->next());
  69.224 +     ref_node->set_next(node);
  69.225 +     return node;
  69.226 +   }
  69.227 +
  69.228 +   // Remove an entry from the linked list.
  69.229 +   // Return true if the entry is successfully removed
  69.230 +   virtual bool remove(const E& e) {
  69.231 +     LinkedListNode<E>* tmp = this->head();
  69.232 +     LinkedListNode<E>* prev = NULL;
  69.233 +
  69.234 +     while (tmp != NULL) {
  69.235 +       if (tmp->peek()->equals(e)) {
  69.236 +         return remove_after(prev);
  69.237 +       }
  69.238 +       prev = tmp;
  69.239 +       tmp = tmp->next();
  69.240 +     }
  69.241 +     return false;
  69.242 +  }
  69.243 +
  69.244 +  // Remove the node after the reference entry
  69.245 +  virtual bool remove_after(LinkedListNode<E>* prev) {
  69.246 +    LinkedListNode<E>* to_delete;
  69.247 +    if (prev == NULL) {
  69.248 +      to_delete = this->unlink_head();
  69.249 +    } else {
  69.250 +      to_delete = prev->next();
  69.251 +      if (to_delete != NULL) {
  69.252 +        prev->set_next(to_delete->next());
  69.253 +      }
  69.254 +    }
  69.255 +
  69.256 +    if (to_delete != NULL) {
  69.257 +      delete_node(to_delete);
  69.258 +      return true;
  69.259 +    }
  69.260 +    return false;
  69.261 +  }
  69.262 +
  69.263 +  virtual bool remove(LinkedListNode<E>* node) {
  69.264 +    LinkedListNode<E>* p = this->head();
  69.265 +    while (p != NULL && p->next() != node) {
  69.266 +      p = p->next();
  69.267 +    }
  69.268 +    if (p != NULL) {
  69.269 +      p->set_next(node->next());
  69.270 +      delete_node(node);
  69.271 +      return true;
  69.272 +    } else {
  69.273 +      return false;
  69.274 +    }
  69.275 +  }
  69.276 +
  69.277 +  virtual bool remove_before(LinkedListNode<E>* ref) {
  69.278 +    assert(ref != NULL, "NULL pointer");
  69.279 +    LinkedListNode<E>* p = this->head();
  69.280 +    LinkedListNode<E>* to_delete = NULL; // to be deleted
  69.281 +    LinkedListNode<E>* prev = NULL;      // node before the node to be deleted
  69.282 +    while (p != NULL && p != ref) {
  69.283 +      prev = to_delete;
  69.284 +      to_delete = p;
  69.285 +      p = p->next();
  69.286 +    }
  69.287 +    if (p == NULL || to_delete == NULL) return false;
  69.288 +    assert(to_delete->next() == ref, "Wrong node to delete");
  69.289 +    assert(prev == NULL || prev->next() == to_delete,
  69.290 +      "Sanity check");
  69.291 +    if (prev == NULL) {
  69.292 +      assert(to_delete == this->head(), "Must be head");
  69.293 +      this->set_head(to_delete->next());
  69.294 +    } else {
  69.295 +      prev->set_next(to_delete->next());
  69.296 +    }
  69.297 +    delete_node(to_delete);
  69.298 +    return true;
  69.299 +  }
  69.300 +
  69.301 +  DEBUG_ONLY(ResourceObj::allocation_type storage_type() { return T; })
  69.302 + protected:
  69.303 +  // Create new linked list node object in specified storage
  69.304 +  LinkedListNode<E>* new_node(const E& e) const {
  69.305 +     switch(T) {
  69.306 +       case ResourceObj::ARENA: {
  69.307 +         assert(_arena != NULL, "Arena not set");
  69.308 +         return new(_arena) LinkedListNode<E>(e);
  69.309 +       }
  69.310 +       case ResourceObj::RESOURCE_AREA:
  69.311 +       case ResourceObj::C_HEAP: {
  69.312 +         if (alloc_failmode == AllocFailStrategy::RETURN_NULL) {
  69.313 +           return new(std::nothrow, T, F) LinkedListNode<E>(e);
  69.314 +         } else {
  69.315 +           return new(T, F) LinkedListNode<E>(e);
  69.316 +         }
  69.317 +       }
  69.318 +       default:
  69.319 +         ShouldNotReachHere();
  69.320 +     }
  69.321 +     return NULL;
  69.322 +  }
  69.323 +
  69.324 +  // Delete linked list node object
  69.325 +  void delete_node(LinkedListNode<E>* node) {
  69.326 +    if (T == ResourceObj::C_HEAP) {
  69.327 +      delete node;
  69.328 +    }
  69.329 +  }
  69.330 +};
  69.331 +
  69.332 +// Sorted linked list. The linked list maintains sorting order specified by the comparison
  69.333 +// function
  69.334 +template <class E, int (*FUNC)(const E&, const E&),
  69.335 +  ResourceObj::allocation_type T = ResourceObj::C_HEAP,
  69.336 +  MEMFLAGS F = mtNMT, AllocFailType alloc_failmode = AllocFailStrategy::RETURN_NULL>
  69.337 +  class SortedLinkedList : public LinkedListImpl<E, T, F, alloc_failmode> {
  69.338 + public:
  69.339 +  SortedLinkedList() { }
  69.340 +  SortedLinkedList(Arena* a) : LinkedListImpl<E, T, F, alloc_failmode>(a) { }
  69.341 +
  69.342 +  virtual LinkedListNode<E>* add(const E& e) {
  69.343 +    return LinkedListImpl<E, T, F, alloc_failmode>::add(e);
  69.344 +  }
  69.345 +
  69.346 +  virtual void move(LinkedList<E>* list) {
  69.347 +    assert(list->storage_type() == this->storage_type(), "Different storage type");
  69.348 +    LinkedListNode<E>* node;
  69.349 +    while ((node = list->unlink_head()) != NULL) {
  69.350 +      this->add(node);
  69.351 +    }
  69.352 +    assert(list->is_empty(), "All entries are moved");
  69.353 +  }
  69.354 +
  69.355 +  virtual void add(LinkedListNode<E>* node) {
  69.356 +    assert(node != NULL, "NULL pointer");
  69.357 +    LinkedListNode<E>* tmp = this->head();
  69.358 +    LinkedListNode<E>* prev = NULL;
  69.359 +
  69.360 +    int cmp_val;
  69.361 +    while (tmp != NULL) {
  69.362 +      cmp_val = FUNC(*tmp->peek(), *node->peek());
  69.363 +      if (cmp_val >= 0) {
  69.364 +        break;
  69.365 +      }
  69.366 +      prev = tmp;
  69.367 +      tmp = tmp->next();
  69.368 +    }
  69.369 +
  69.370 +    if (prev != NULL) {
  69.371 +      node->set_next(prev->next());
  69.372 +      prev->set_next(node);
  69.373 +    } else {
  69.374 +      node->set_next(this->head());
  69.375 +      this->set_head(node);
  69.376 +    }
  69.377 +  }
  69.378 +
  69.379 +  virtual bool add(const LinkedList<E>* list) {
  69.380 +    return LinkedListImpl<E, T, F, alloc_failmode>::add(list);
  69.381 +  }
  69.382 +
  69.383 +  virtual LinkedListNode<E>* find_node(const E& e) {
  69.384 +    LinkedListNode<E>* p = this->head();
  69.385 +
  69.386 +    while (p != NULL) {
  69.387 +      int comp_val = FUNC(*p->peek(), e);
  69.388 +      if (comp_val == 0) {
  69.389 +        return p;
  69.390 +      } else if (comp_val > 0) {
  69.391 +        return NULL;
  69.392 +      }
  69.393 +      p = p->next();
  69.394 +    }
  69.395 +    return NULL;
  69.396 +  }
  69.397 +};
  69.398 +
  69.399 +// Iterates all entries in the list
  69.400 +template <class E> class LinkedListIterator : public StackObj {
  69.401 + private:
  69.402 +  LinkedListNode<E>* _p;
  69.403 +  bool               _is_empty;
  69.404 + public:
  69.405 +  LinkedListIterator(LinkedListNode<E>* head) : _p(head) {
  69.406 +    _is_empty = (head == NULL);
  69.407 +  }
  69.408 +
  69.409 +  bool is_empty() const { return _is_empty; }
  69.410 +
  69.411 +  const E* next() {
  69.412 +    if (_p == NULL) return NULL;
  69.413 +    const E* e = _p->peek();
  69.414 +    _p = _p->next();
  69.415 +    return e;
  69.416 +  }
  69.417 +};
  69.418 +
  69.419 +#endif
    70.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    70.2 +++ b/src/share/vm/utilities/nativeCallStack.cpp	Wed Aug 27 08:19:12 2014 -0400
    70.3 @@ -0,0 +1,118 @@
    70.4 +/*
    70.5 + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
    70.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    70.7 + *
    70.8 + * This code is free software; you can redistribute it and/or modify it
    70.9 + * under the terms of the GNU General Public License version 2 only, as
   70.10 + * published by the Free Software Foundation.
   70.11 + *
   70.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
   70.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   70.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   70.15 + * version 2 for more details (a copy is included in the LICENSE file that
   70.16 + * accompanied this code).
   70.17 + *
   70.18 + * You should have received a copy of the GNU General Public License version
   70.19 + * 2 along with this work; if not, write to the Free Software Foundation,
   70.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   70.21 + *
   70.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   70.23 + * or visit www.oracle.com if you need additional information or have any
   70.24 + * questions.
   70.25 + *
   70.26 + */
   70.27 +
   70.28 +#include "precompiled.hpp"
   70.29 +#include "runtime/os.hpp"
   70.30 +#include "utilities/globalDefinitions.hpp"
   70.31 +#include "utilities/nativeCallStack.hpp"
   70.32 +
   70.33 +
   70.34 +NativeCallStack::NativeCallStack(int toSkip, bool fillStack) :
   70.35 +  _hash_value(0) {
   70.36 +
   70.37 +#if !PLATFORM_NATIVE_STACK_WALKING_SUPPORTED
   70.38 +  fillStack = false;
   70.39 +#endif
   70.40 +
   70.41 +  if (fillStack) {
   70.42 +    os::get_native_stack(_stack, NMT_TrackingStackDepth, toSkip);
   70.43 +  } else {
   70.44 +    for (int index = 0; index < NMT_TrackingStackDepth; index ++) {
   70.45 +      _stack[index] = NULL;
   70.46 +    }
   70.47 +  }
   70.48 +}
   70.49 +
   70.50 +NativeCallStack::NativeCallStack(address* pc, int frameCount) {
   70.51 +  int frameToCopy = (frameCount < NMT_TrackingStackDepth) ?
   70.52 +    frameCount : NMT_TrackingStackDepth;
   70.53 +  int index;
   70.54 +  for (index = 0; index < frameToCopy; index ++) {
   70.55 +    _stack[index] = pc[index];
   70.56 +  }
   70.57 +  for (; index < NMT_TrackingStackDepth; index ++) {
   70.58 +    _stack[index] = NULL;
   70.59 +  }
   70.60 +}
   70.61 +
   70.62 +// number of stack frames captured
   70.63 +int NativeCallStack::frames() const {
   70.64 +  int index;
   70.65 +  for (index = 0; index < NMT_TrackingStackDepth; index ++) {
   70.66 +    if (_stack[index] == NULL) {
   70.67 +      break;
   70.68 +    }
   70.69 +  }
   70.70 +  return index;
   70.71 +}
   70.72 +
   70.73 +// Hash code. Any better algorithm?
   70.74 +int NativeCallStack::hash() const {
   70.75 +  long hash_val = _hash_value;
   70.76 +  if (hash_val == 0) {
   70.77 +    long pc;
   70.78 +    int  index;
   70.79 +    for (index = 0; index < NMT_TrackingStackDepth; index ++) {
   70.80 +      pc = (long)_stack[index];
   70.81 +      if (pc == 0) break;
   70.82 +      hash_val += pc;
   70.83 +    }
   70.84 +
   70.85 +    NativeCallStack* p = const_cast<NativeCallStack*>(this);
   70.86 +    p->_hash_value = (int)(hash_val & 0xFFFFFFFF);
   70.87 +  }
   70.88 +  return _hash_value;
   70.89 +}
   70.90 +
   70.91 +void NativeCallStack::print_on(outputStream* out) const {
   70.92 +  print_on(out, 0);
   70.93 +}
   70.94 +
   70.95 +// Decode and print this call path
   70.96 +void NativeCallStack::print_on(outputStream* out, int indent) const {
   70.97 +  address pc;
   70.98 +  char    buf[1024];
   70.99 +  int     offset;
  70.100 +  if (is_empty()) {
  70.101 +    for (int index = 0; index < indent; index ++) out->print(" ");
  70.102 +#if PLATFORM_NATIVE_STACK_WALKING_SUPPORTED
  70.103 +    out->print("[BOOTSTRAP]");
  70.104 +#else
  70.105 +    out->print("[No stack]");
  70.106 +#endif
  70.107 +  } else {
  70.108 +    for (int frame = 0; frame < NMT_TrackingStackDepth; frame ++) {
  70.109 +      pc = get_frame(frame);
  70.110 +      if (pc == NULL) break;
  70.111 +      // Print indent
  70.112 +      for (int index = 0; index < indent; index ++) out->print(" ");
  70.113 +      if (os::dll_address_to_function_name(pc, buf, sizeof(buf), &offset)) {
  70.114 +        out->print_cr("[" PTR_FORMAT "] %s+0x%x", p2i(pc), buf, offset);
  70.115 +      } else {
  70.116 +        out->print_cr("[" PTR_FORMAT "]", p2i(pc));
  70.117 +      }
  70.118 +    }
  70.119 +  }
  70.120 +}
  70.121 +
    71.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    71.2 +++ b/src/share/vm/utilities/nativeCallStack.hpp	Wed Aug 27 08:19:12 2014 -0400
    71.3 @@ -0,0 +1,95 @@
    71.4 +/*
    71.5 + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
    71.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    71.7 + *
    71.8 + * This code is free software; you can redistribute it and/or modify it
    71.9 + * under the terms of the GNU General Public License version 2 only, as
   71.10 + * published by the Free Software Foundation.
   71.11 + *
   71.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
   71.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   71.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   71.15 + * version 2 for more details (a copy is included in the LICENSE file that
   71.16 + * accompanied this code).
   71.17 + *
   71.18 + * You should have received a copy of the GNU General Public License version
   71.19 + * 2 along with this work; if not, write to the Free Software Foundation,
   71.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   71.21 + *
   71.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   71.23 + * or visit www.oracle.com if you need additional information or have any
   71.24 + * questions.
   71.25 + *
   71.26 + */
   71.27 +
   71.28 +#ifndef SHARE_VM_UTILITIES_NATIVE_CALL_STACK_HPP
   71.29 +#define SHARE_VM_UTILITIES_NATIVE_CALL_STACK_HPP
   71.30 +
   71.31 +#include "memory/allocation.hpp"
   71.32 +#include "services/nmtCommon.hpp"
   71.33 +#include "utilities/ostream.hpp"
   71.34 +
   71.35 +/*
   71.36 + * This class represents a native call path (does not include Java frame)
   71.37 + *
   71.38 + * This class is developed in the context of native memory tracking, it can
   71.39 + * be an useful tool for debugging purpose.
   71.40 + *
   71.41 + * For example, following code should print out native call path:
   71.42 + *
   71.43 + *   ....
   71.44 + *   NativeCallStack here;
   71.45 + *   here.print_on(tty);
   71.46 + *   ....
   71.47 + *
   71.48 + * However, there are a couple of restrictions on this class. If the restrictions are
   71.49 + * not strictly followed, it may break native memory tracking badly.
   71.50 + *
   71.51 + * 1. Number of stack frames to capture, is defined by native memory tracking.
   71.52 + *    This number has impacts on how much memory to be used by native
   71.53 + *    memory tracking.
   71.54 + * 2. The class is strict stack object, no heap or virtual memory can be allocated
   71.55 + *    from it.
   71.56 + */
   71.57 +class NativeCallStack : public StackObj {
   71.58 + private:
   71.59 +  address   _stack[NMT_TrackingStackDepth];
   71.60 +  int       _hash_value;
   71.61 +
   71.62 + public:
   71.63 +  NativeCallStack(int toSkip = 0, bool fillStack = false);
   71.64 +  NativeCallStack(address* pc, int frameCount);
   71.65 +
   71.66 +
   71.67 +  // if it is an empty stack
   71.68 +  inline bool is_empty() const {
   71.69 +    return _stack[0] == NULL;
   71.70 +  }
   71.71 +
   71.72 +  // number of stack frames captured
   71.73 +  int frames() const;
   71.74 +
   71.75 +  inline int compare(const NativeCallStack& other) const {
   71.76 +    return memcmp(_stack, other._stack, sizeof(_stack));
   71.77 +  }
   71.78 +
   71.79 +  inline bool equals(const NativeCallStack& other) const {
   71.80 +    // compare hash values
   71.81 +    if (hash() != other.hash()) return false;
   71.82 +    // compare each frame
   71.83 +    return compare(other) == 0;
   71.84 +  }
   71.85 +
   71.86 +  inline address get_frame(int index) const {
   71.87 +    assert(index >= 0 && index < NMT_TrackingStackDepth, "Index out of bound");
   71.88 +    return _stack[index];
   71.89 +  }
   71.90 +
   71.91 +  // Hash code. Any better algorithm?
   71.92 +  int hash() const;
   71.93 +
   71.94 +  void print_on(outputStream* out) const;
   71.95 +  void print_on(outputStream* out, int indent) const;
   71.96 +};
   71.97 +
   71.98 +#endif
    72.1 --- a/src/share/vm/utilities/vmError.cpp	Wed Aug 27 09:36:55 2014 +0200
    72.2 +++ b/src/share/vm/utilities/vmError.cpp	Wed Aug 27 08:19:12 2014 -0400
    72.3 @@ -772,6 +772,11 @@
    72.4         st->cr();
    72.5       }
    72.6  
    72.7 +  STEP(228, "(Native Memory Tracking)" )
    72.8 +     if (_verbose) {
    72.9 +       MemTracker::final_report(st);
   72.10 +     }
   72.11 +
   72.12    STEP(230, "" )
   72.13  
   72.14       if (_verbose) {
   72.15 @@ -895,9 +900,6 @@
   72.16    static bool log_done = false;         // done saving error log
   72.17    static bool transmit_report_done = false; // done error reporting
   72.18  
   72.19 -  // disble NMT to avoid further exception
   72.20 -  MemTracker::shutdown(MemTracker::NMT_error_reporting);
   72.21 -
   72.22    if (SuppressFatalErrorMessage) {
   72.23        os::abort();
   72.24    }
    73.1 --- a/test/testlibrary/whitebox/sun/hotspot/WhiteBox.java	Wed Aug 27 09:36:55 2014 +0200
    73.2 +++ b/test/testlibrary/whitebox/sun/hotspot/WhiteBox.java	Wed Aug 27 08:19:12 2014 -0400
    73.3 @@ -93,7 +93,8 @@
    73.4    public native void NMTCommitMemory(long addr, long size);
    73.5    public native void NMTUncommitMemory(long addr, long size);
    73.6    public native void NMTReleaseMemory(long addr, long size);
    73.7 -  public native boolean NMTWaitForDataMerge();
    73.8 +  public native void NMTOverflowHashBucket(long num);
    73.9 +  public native long NMTMallocWithPseudoStack(long size, int index);
   73.10    public native boolean NMTIsDetailSupported();
   73.11  
   73.12    // Compiler

mercurial