src/share/vm/services/mallocSiteTable.hpp

changeset 7074
833b0f92429a
child 7078
c6211b707068
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/src/share/vm/services/mallocSiteTable.hpp	Wed Aug 27 08:19:12 2014 -0400
     1.3 @@ -0,0 +1,268 @@
     1.4 +/*
     1.5 + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
     1.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     1.7 + *
     1.8 + * This code is free software; you can redistribute it and/or modify it
     1.9 + * under the terms of the GNU General Public License version 2 only, as
    1.10 + * published by the Free Software Foundation.
    1.11 + *
    1.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
    1.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    1.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    1.15 + * version 2 for more details (a copy is included in the LICENSE file that
    1.16 + * accompanied this code).
    1.17 + *
    1.18 + * You should have received a copy of the GNU General Public License version
    1.19 + * 2 along with this work; if not, write to the Free Software Foundation,
    1.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    1.21 + *
    1.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    1.23 + * or visit www.oracle.com if you need additional information or have any
    1.24 + * questions.
    1.25 + *
    1.26 + */
    1.27 +
    1.28 +#ifndef SHARE_VM_SERVICES_MALLOC_SITE_TABLE_HPP
    1.29 +#define SHARE_VM_SERVICES_MALLOC_SITE_TABLE_HPP
    1.30 +
    1.31 +#if INCLUDE_NMT
    1.32 +
    1.33 +#include "memory/allocation.hpp"
    1.34 +#include "runtime/atomic.hpp"
    1.35 +#include "services/allocationSite.hpp"
    1.36 +#include "services/mallocTracker.hpp"
    1.37 +#include "services/nmtCommon.hpp"
    1.38 +
    1.39 +// MallocSite represents a code path that eventually calls
    1.40 +// os::malloc() to allocate memory
    1.41 +class MallocSite : public AllocationSite<MemoryCounter> {
    1.42 + public:
    1.43 +  MallocSite() :
    1.44 +    AllocationSite<MemoryCounter>(emptyStack) { }
    1.45 +
    1.46 +  MallocSite(const NativeCallStack& stack) :
    1.47 +    AllocationSite<MemoryCounter>(stack) { }
    1.48 +
    1.49 +  void allocate(size_t size)      { data()->allocate(size);   }
    1.50 +  void deallocate(size_t size)    { data()->deallocate(size); }
    1.51 +
    1.52 +  // Memory allocated from this code path
    1.53 +  size_t size()  const { return peek()->size(); }
    1.54 +  // The number of calls were made
    1.55 +  size_t count() const { return peek()->count(); }
    1.56 +};
    1.57 +
    1.58 +// Malloc site hashtable entry
    1.59 +class MallocSiteHashtableEntry : public CHeapObj<mtNMT> {
    1.60 + private:
    1.61 +  MallocSite                _malloc_site;
    1.62 +  MallocSiteHashtableEntry* _next;
    1.63 +
    1.64 + public:
    1.65 +  MallocSiteHashtableEntry() : _next(NULL) { }
    1.66 +
    1.67 +  MallocSiteHashtableEntry(NativeCallStack stack):
    1.68 +    _malloc_site(stack), _next(NULL) { }
    1.69 +
    1.70 +  inline const MallocSiteHashtableEntry* next() const {
    1.71 +    return _next;
    1.72 +  }
    1.73 +
    1.74 +  // Insert an entry atomically.
    1.75 +  // Return true if the entry is inserted successfully.
    1.76 +  // The operation can be failed due to contention from other thread.
    1.77 +  bool atomic_insert(const MallocSiteHashtableEntry* entry) {
    1.78 +    return (Atomic::cmpxchg_ptr((void*)entry, (volatile void*)&_next,
    1.79 +      NULL) == NULL);
    1.80 +  }
    1.81 +
    1.82 +  void set_callsite(const MallocSite& site) {
    1.83 +    _malloc_site = site;
    1.84 +  }
    1.85 +
    1.86 +  inline const MallocSite* peek() const { return &_malloc_site; }
    1.87 +  inline MallocSite* data()             { return &_malloc_site; }
    1.88 +
    1.89 +  inline long hash() const { return _malloc_site.hash(); }
    1.90 +  inline bool equals(const NativeCallStack& stack) const {
    1.91 +    return _malloc_site.equals(stack);
    1.92 +  }
    1.93 +  // Allocation/deallocation on this allocation site
    1.94 +  inline void allocate(size_t size)   { _malloc_site.allocate(size);   }
    1.95 +  inline void deallocate(size_t size) { _malloc_site.deallocate(size); }
    1.96 +  // Memory counters
    1.97 +  inline size_t size() const  { return _malloc_site.size();  }
    1.98 +  inline size_t count() const { return _malloc_site.count(); }
    1.99 +};
   1.100 +
   1.101 +// The walker walks every entry on MallocSiteTable
   1.102 +class MallocSiteWalker : public StackObj {
   1.103 + public:
   1.104 +   virtual bool do_malloc_site(const MallocSite* e) { return false; }
   1.105 +};
   1.106 +
   1.107 +/*
   1.108 + * Native memory tracking call site table.
   1.109 + * The table is only needed when detail tracking is enabled.
   1.110 + */
   1.111 +class MallocSiteTable : AllStatic {
   1.112 + private:
   1.113 +  // The number of hash bucket in this hashtable. The number should
   1.114 +  // be tuned if malloc activities changed significantly.
   1.115 +  // The statistics data can be obtained via Jcmd
   1.116 +  // jcmd <pid> VM.native_memory statistics.
   1.117 +
   1.118 +  // Currently, (number of buckets / number of entires) ratio is
   1.119 +  // about 1 / 6
   1.120 +  enum {
   1.121 +    table_base_size = 128,   // The base size is calculated from statistics to give
   1.122 +                             // table ratio around 1:6
   1.123 +    table_size = (table_base_size * NMT_TrackingStackDepth - 1)
   1.124 +  };
   1.125 +
   1.126 +
   1.127 +  // This is a very special lock, that allows multiple shared accesses (sharedLock), but
   1.128 +  // once exclusive access (exclusiveLock) is requested, all shared accesses are
   1.129 +  // rejected forever.
   1.130 +  class AccessLock : public StackObj {
   1.131 +    enum LockState {
   1.132 +      NoLock,
   1.133 +      SharedLock,
   1.134 +      ExclusiveLock
   1.135 +    };
   1.136 +
   1.137 +   private:
   1.138 +    // A very large negative number. The only possibility to "overflow"
   1.139 +    // this number is when there are more than -min_jint threads in
   1.140 +    // this process, which is not going to happen in foreseeable future.
   1.141 +    const static int _MAGIC_ = min_jint;
   1.142 +
   1.143 +    LockState      _lock_state;
   1.144 +    volatile int*  _lock;
   1.145 +   public:
   1.146 +    AccessLock(volatile int* lock) :
   1.147 +      _lock(lock), _lock_state(NoLock) {
   1.148 +    }
   1.149 +
   1.150 +    ~AccessLock() {
   1.151 +      if (_lock_state == SharedLock) {
   1.152 +        Atomic::dec((volatile jint*)_lock);
   1.153 +      }
   1.154 +    }
   1.155 +    // Acquire shared lock.
   1.156 +    // Return true if shared access is granted.
   1.157 +    inline bool sharedLock() {
   1.158 +      jint res = Atomic::add(1, _lock);
   1.159 +      if (res < 0) {
   1.160 +        Atomic::add(-1, _lock);
   1.161 +        return false;
   1.162 +      }
   1.163 +      _lock_state = SharedLock;
   1.164 +      return true;
   1.165 +    }
   1.166 +    // Acquire exclusive lock
   1.167 +    void exclusiveLock();
   1.168 + };
   1.169 +
   1.170 + public:
   1.171 +  static bool initialize();
   1.172 +  static void shutdown();
   1.173 +
   1.174 +  NOT_PRODUCT(static int access_peak_count() { return _peak_count; })
   1.175 +
   1.176 +  // Number of hash buckets
   1.177 +  static inline int hash_buckets()      { return (int)table_size; }
   1.178 +
   1.179 +  // Access and copy a call stack from this table. Shared lock should be
   1.180 +  // acquired before access the entry.
   1.181 +  static inline bool access_stack(NativeCallStack& stack, size_t bucket_idx,
   1.182 +    size_t pos_idx) {
   1.183 +    AccessLock locker(&_access_count);
   1.184 +    if (locker.sharedLock()) {
   1.185 +      NOT_PRODUCT(_peak_count = MAX2(_peak_count, _access_count);)
   1.186 +      MallocSite* site = malloc_site(bucket_idx, pos_idx);
   1.187 +      if (site != NULL) {
   1.188 +        stack = *site->call_stack();
   1.189 +        return true;
   1.190 +      }
   1.191 +    }
   1.192 +    return false;
   1.193 +  }
   1.194 +
   1.195 +  // Record a new allocation from specified call path.
   1.196 +  // Return true if the allocation is recorded successfully, bucket_idx
   1.197 +  // and pos_idx are also updated to indicate the entry where the allocation
   1.198 +  // information was recorded.
   1.199 +  // Return false only occurs under rare scenarios:
   1.200 +  //  1. out of memory
   1.201 +  //  2. overflow hash bucket
   1.202 +  static inline bool allocation_at(const NativeCallStack& stack, size_t size,
   1.203 +    size_t* bucket_idx, size_t* pos_idx) {
   1.204 +    AccessLock locker(&_access_count);
   1.205 +    if (locker.sharedLock()) {
   1.206 +      NOT_PRODUCT(_peak_count = MAX2(_peak_count, _access_count);)
   1.207 +      MallocSite* site = lookup_or_add(stack, bucket_idx, pos_idx);
   1.208 +      if (site != NULL) site->allocate(size);
   1.209 +      return site != NULL;
   1.210 +    }
   1.211 +    return false;
   1.212 +  }
   1.213 +
   1.214 +  // Record memory deallocation. bucket_idx and pos_idx indicate where the allocation
   1.215 +  // information was recorded.
   1.216 +  static inline bool deallocation_at(size_t size, size_t bucket_idx, size_t pos_idx) {
   1.217 +    AccessLock locker(&_access_count);
   1.218 +    if (locker.sharedLock()) {
   1.219 +      NOT_PRODUCT(_peak_count = MAX2(_peak_count, _access_count);)
   1.220 +      MallocSite* site = malloc_site(bucket_idx, pos_idx);
   1.221 +      if (site != NULL) {
   1.222 +        site->deallocate(size);
   1.223 +        return true;
   1.224 +      }
   1.225 +    }
   1.226 +    return false;
   1.227 +  }
   1.228 +
   1.229 +  // Walk this table.
   1.230 +  static bool walk_malloc_site(MallocSiteWalker* walker);
   1.231 +
   1.232 + private:
   1.233 +  static MallocSiteHashtableEntry* new_entry(const NativeCallStack& key);
   1.234 +  static void reset();
   1.235 +
   1.236 +  // Delete a bucket linked list
   1.237 +  static void delete_linked_list(MallocSiteHashtableEntry* head);
   1.238 +
   1.239 +  static MallocSite* lookup_or_add(const NativeCallStack& key, size_t* bucket_idx, size_t* pos_idx);
   1.240 +  static MallocSite* malloc_site(size_t bucket_idx, size_t pos_idx);
   1.241 +  static bool walk(MallocSiteWalker* walker);
   1.242 +
   1.243 +  static inline int hash_to_index(int  hash) {
   1.244 +    hash = (hash > 0) ? hash : (-hash);
   1.245 +    return (hash % table_size);
   1.246 +  }
   1.247 +
   1.248 +  static inline const NativeCallStack* hash_entry_allocation_stack() {
   1.249 +    return (NativeCallStack*)_hash_entry_allocation_stack;
   1.250 +  }
   1.251 +
   1.252 + private:
   1.253 +  // Counter for counting concurrent access
   1.254 +  static volatile int                _access_count;
   1.255 +
   1.256 +  // The callsite hashtable. It has to be a static table,
   1.257 +  // since malloc call can come from C runtime linker.
   1.258 +  static MallocSiteHashtableEntry*   _table[table_size];
   1.259 +
   1.260 +
   1.261 +  // Reserve enough memory for placing the objects
   1.262 +
   1.263 +  // The memory for hashtable entry allocation stack object
   1.264 +  static size_t _hash_entry_allocation_stack[CALC_OBJ_SIZE_IN_TYPE(NativeCallStack, size_t)];
   1.265 +  // The memory for hashtable entry allocation callsite object
   1.266 +  static size_t _hash_entry_allocation_site[CALC_OBJ_SIZE_IN_TYPE(MallocSiteHashtableEntry, size_t)];
   1.267 +  NOT_PRODUCT(static int     _peak_count;)
   1.268 +};
   1.269 +
   1.270 +#endif // INCLUDE_NMT
   1.271 +#endif // SHARE_VM_SERVICES_MALLOC_SITE_TABLE_HPP

mercurial