src/share/vm/services/mallocSiteTable.hpp

Wed, 03 Jul 2019 20:42:37 +0800

author
aoqi
date
Wed, 03 Jul 2019 20:42:37 +0800
changeset 9637
eef07cd490d4
parent 9485
7a6239517d46
child 9778
bf6ea7319424
permissions
-rw-r--r--

Merge

     1 /*
     2  * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #ifndef SHARE_VM_SERVICES_MALLOC_SITE_TABLE_HPP
    26 #define SHARE_VM_SERVICES_MALLOC_SITE_TABLE_HPP
    28 #if INCLUDE_NMT
    30 #include "memory/allocation.hpp"
    31 #include "runtime/atomic.hpp"
    32 #include "services/allocationSite.hpp"
    33 #include "services/mallocTracker.hpp"
    34 #include "services/nmtCommon.hpp"
    35 #include "utilities/nativeCallStack.hpp"
    37 // MallocSite represents a code path that eventually calls
    38 // os::malloc() to allocate memory
    39 class MallocSite : public AllocationSite<MemoryCounter> {
    40  private:
    41   MEMFLAGS _flags;
    43  public:
    44   MallocSite() :
    45     AllocationSite<MemoryCounter>(NativeCallStack::empty_stack()), _flags(mtNone) {}
    47   MallocSite(const NativeCallStack& stack, MEMFLAGS flags) :
    48     AllocationSite<MemoryCounter>(stack), _flags(flags) {}
    51   void allocate(size_t size)      { data()->allocate(size);   }
    52   void deallocate(size_t size)    { data()->deallocate(size); }
    54   // Memory allocated from this code path
    55   size_t size()  const { return peek()->size(); }
    56   // The number of calls were made
    57   size_t count() const { return peek()->count(); }
    58   MEMFLAGS flags() const { return (MEMFLAGS)_flags; }
    59 };
    61 // Malloc site hashtable entry
    62 class MallocSiteHashtableEntry : public CHeapObj<mtNMT> {
    63  private:
    64   MallocSite                _malloc_site;
    65   MallocSiteHashtableEntry* _next;
    67  public:
    68   MallocSiteHashtableEntry() : _next(NULL) { }
    70   MallocSiteHashtableEntry(NativeCallStack stack, MEMFLAGS flags):
    71     _malloc_site(stack, flags), _next(NULL) {
    72     assert(flags != mtNone, "Expect a real memory type");
    73   }
    75   inline const MallocSiteHashtableEntry* next() const {
    76     return _next;
    77   }
    79   // Insert an entry atomically.
    80   // Return true if the entry is inserted successfully.
    81   // The operation can be failed due to contention from other thread.
    82   bool atomic_insert(const MallocSiteHashtableEntry* entry) {
    83     return (Atomic::cmpxchg_ptr((void*)entry, (volatile void*)&_next,
    84       NULL) == NULL);
    85   }
    87   void set_callsite(const MallocSite& site) {
    88     _malloc_site = site;
    89   }
    91   inline const MallocSite* peek() const { return &_malloc_site; }
    92   inline MallocSite* data()             { return &_malloc_site; }
    94   inline long hash() const { return _malloc_site.hash(); }
    95   inline bool equals(const NativeCallStack& stack) const {
    96     return _malloc_site.equals(stack);
    97   }
    98   // Allocation/deallocation on this allocation site
    99   inline void allocate(size_t size)   { _malloc_site.allocate(size);   }
   100   inline void deallocate(size_t size) { _malloc_site.deallocate(size); }
   101   // Memory counters
   102   inline size_t size() const  { return _malloc_site.size();  }
   103   inline size_t count() const { return _malloc_site.count(); }
   104 };
   106 // The walker walks every entry on MallocSiteTable
   107 class MallocSiteWalker : public StackObj {
   108  public:
   109    virtual bool do_malloc_site(const MallocSite* e) { return false; }
   110 };
   112 /*
   113  * Native memory tracking call site table.
   114  * The table is only needed when detail tracking is enabled.
   115  */
   116 class MallocSiteTable : AllStatic {
   117  private:
   118   // The number of hash bucket in this hashtable. The number should
   119   // be tuned if malloc activities changed significantly.
   120   // The statistics data can be obtained via Jcmd
   121   // jcmd <pid> VM.native_memory statistics.
   123   // Currently, (number of buckets / number of entires) ratio is
   124   // about 1 / 6
   125   enum {
   126     table_base_size = 128,   // The base size is calculated from statistics to give
   127                              // table ratio around 1:6
   128     table_size = (table_base_size * NMT_TrackingStackDepth - 1)
   129   };
   132   // This is a very special lock, that allows multiple shared accesses (sharedLock), but
   133   // once exclusive access (exclusiveLock) is requested, all shared accesses are
   134   // rejected forever.
   135   class AccessLock : public StackObj {
   136     enum LockState {
   137       NoLock,
   138       SharedLock,
   139       ExclusiveLock
   140     };
   142    private:
   143     // A very large negative number. The only possibility to "overflow"
   144     // this number is when there are more than -min_jint threads in
   145     // this process, which is not going to happen in foreseeable future.
   146     const static int _MAGIC_ = min_jint;
   148     LockState      _lock_state;
   149     volatile int*  _lock;
   150    public:
   151     AccessLock(volatile int* lock) :
   152       _lock(lock), _lock_state(NoLock) {
   153     }
   155     ~AccessLock() {
   156       if (_lock_state == SharedLock) {
   157         Atomic::dec((volatile jint*)_lock);
   158       }
   159     }
   160     // Acquire shared lock.
   161     // Return true if shared access is granted.
   162     inline bool sharedLock() {
   163       jint res = Atomic::add(1, _lock);
   164       if (res < 0) {
   165         Atomic::add(-1, _lock);
   166         return false;
   167       }
   168       _lock_state = SharedLock;
   169       return true;
   170     }
   171     // Acquire exclusive lock
   172     void exclusiveLock();
   173  };
   175  public:
   176   static bool initialize();
   177   static void shutdown();
   179   NOT_PRODUCT(static int access_peak_count() { return _peak_count; })
   181   // Number of hash buckets
   182   static inline int hash_buckets()      { return (int)table_size; }
   184   // Access and copy a call stack from this table. Shared lock should be
   185   // acquired before access the entry.
   186   static inline bool access_stack(NativeCallStack& stack, size_t bucket_idx,
   187     size_t pos_idx) {
   188     AccessLock locker(&_access_count);
   189     if (locker.sharedLock()) {
   190       NOT_PRODUCT(_peak_count = MAX2(_peak_count, _access_count);)
   191       MallocSite* site = malloc_site(bucket_idx, pos_idx);
   192       if (site != NULL) {
   193         stack = *site->call_stack();
   194         return true;
   195       }
   196     }
   197     return false;
   198   }
   200   // Record a new allocation from specified call path.
   201   // Return true if the allocation is recorded successfully, bucket_idx
   202   // and pos_idx are also updated to indicate the entry where the allocation
   203   // information was recorded.
   204   // Return false only occurs under rare scenarios:
   205   //  1. out of memory
   206   //  2. overflow hash bucket
   207   static inline bool allocation_at(const NativeCallStack& stack, size_t size,
   208     size_t* bucket_idx, size_t* pos_idx, MEMFLAGS flags) {
   209     AccessLock locker(&_access_count);
   210     if (locker.sharedLock()) {
   211       NOT_PRODUCT(_peak_count = MAX2(_peak_count, _access_count);)
   212       MallocSite* site = lookup_or_add(stack, bucket_idx, pos_idx, flags);
   213       if (site != NULL) site->allocate(size);
   214       return site != NULL;
   215     }
   216     return false;
   217   }
   219   // Record memory deallocation. bucket_idx and pos_idx indicate where the allocation
   220   // information was recorded.
   221   static inline bool deallocation_at(size_t size, size_t bucket_idx, size_t pos_idx) {
   222     AccessLock locker(&_access_count);
   223     if (locker.sharedLock()) {
   224       NOT_PRODUCT(_peak_count = MAX2(_peak_count, _access_count);)
   225       MallocSite* site = malloc_site(bucket_idx, pos_idx);
   226       if (site != NULL) {
   227         site->deallocate(size);
   228         return true;
   229       }
   230     }
   231     return false;
   232   }
   234   // Walk this table.
   235   static bool walk_malloc_site(MallocSiteWalker* walker);
   237  private:
   238   static MallocSiteHashtableEntry* new_entry(const NativeCallStack& key, MEMFLAGS flags);
   239   static void reset();
   241   // Delete a bucket linked list
   242   static void delete_linked_list(MallocSiteHashtableEntry* head);
   244   static MallocSite* lookup_or_add(const NativeCallStack& key, size_t* bucket_idx, size_t* pos_idx, MEMFLAGS flags);
   245   static MallocSite* malloc_site(size_t bucket_idx, size_t pos_idx);
   246   static bool walk(MallocSiteWalker* walker);
   248   static inline unsigned int hash_to_index(unsigned int hash) {
   249     return (hash % table_size);
   250   }
   252   static inline const NativeCallStack* hash_entry_allocation_stack() {
   253     return (NativeCallStack*)_hash_entry_allocation_stack;
   254   }
   256  private:
   257   // Counter for counting concurrent access
   258   static volatile int                _access_count;
   260   // The callsite hashtable. It has to be a static table,
   261   // since malloc call can come from C runtime linker.
   262   static MallocSiteHashtableEntry*   _table[table_size];
   265   // Reserve enough memory for placing the objects
   267   // The memory for hashtable entry allocation stack object
   268   static size_t _hash_entry_allocation_stack[CALC_OBJ_SIZE_IN_TYPE(NativeCallStack, size_t)];
   269   // The memory for hashtable entry allocation callsite object
   270   static size_t _hash_entry_allocation_site[CALC_OBJ_SIZE_IN_TYPE(MallocSiteHashtableEntry, size_t)];
   271   NOT_PRODUCT(static int     _peak_count;)
   272 };
   274 #endif // INCLUDE_NMT
   275 #endif // SHARE_VM_SERVICES_MALLOC_SITE_TABLE_HPP

mercurial