src/share/vm/services/memTracker.hpp

Sat, 19 Oct 2013 21:29:57 +0400

author
dsamersoff
date
Sat, 19 Oct 2013 21:29:57 +0400
changeset 5968
996d1f2f056f
parent 5578
4c84d351cca9
child 6876
710a3c8b516e
child 7074
833b0f92429a
permissions
-rw-r--r--

8026930: In ManagementAgent.start it should be possible to set the jdp.name parameter (hotspot part)
Summary: Pass one more property from Agent to JdpController
Reviewed-by: jbachorik, sla

     1 /*
     2  * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #ifndef SHARE_VM_SERVICES_MEM_TRACKER_HPP
    26 #define SHARE_VM_SERVICES_MEM_TRACKER_HPP
    28 #include "utilities/macros.hpp"
    30 #if !INCLUDE_NMT
    32 #include "utilities/ostream.hpp"
    34 class BaselineOutputer : public StackObj {
    36 };
    38 class BaselineTTYOutputer : public BaselineOutputer {
    39   public:
    40     BaselineTTYOutputer(outputStream* st) { }
    41 };
    43 class MemTracker : AllStatic {
    44   public:
    45    enum ShutdownReason {
    46       NMT_shutdown_none,     // no shutdown requested
    47       NMT_shutdown_user,     // user requested shutdown
    48       NMT_normal,            // normal shutdown, process exit
    49       NMT_out_of_memory,     // shutdown due to out of memory
    50       NMT_initialization,    // shutdown due to initialization failure
    51       NMT_use_malloc_only,   // can not combine NMT with UseMallocOnly flag
    52       NMT_error_reporting,   // shutdown by vmError::report_and_die()
    53       NMT_out_of_generation, // running out of generation queue
    54       NMT_sequence_overflow  // overflow the sequence number
    55    };
    57   class Tracker {
    58    public:
    59     void discard() { }
    61     void record(address addr, size_t size = 0, MEMFLAGS flags = mtNone, address pc = NULL) { }
    62     void record(address old_addr, address new_addr, size_t size,
    63       MEMFLAGS flags, address pc = NULL) { }
    64   };
    66   private:
    67    static Tracker  _tkr;
    70   public:
    71    static inline void init_tracking_options(const char* option_line) { }
    72    static inline bool is_on()   { return false; }
    73    static const char* reason()  { return "Native memory tracking is not implemented"; }
    74    static inline bool can_walk_stack() { return false; }
    76    static inline void bootstrap_single_thread() { }
    77    static inline void bootstrap_multi_thread() { }
    78    static inline void start() { }
    80    static inline void record_malloc(address addr, size_t size, MEMFLAGS flags,
    81         address pc = 0, Thread* thread = NULL) { }
    82    static inline void record_free(address addr, MEMFLAGS flags, Thread* thread = NULL) { }
    83    static inline void record_arena_size(address addr, size_t size) { }
    84    static inline void record_virtual_memory_reserve(address addr, size_t size,
    85         MEMFLAGS flags, address pc = 0, Thread* thread = NULL) { }
    86    static inline void record_virtual_memory_reserve_and_commit(address addr, size_t size,
    87         MEMFLAGS flags, address pc = 0, Thread* thread = NULL) { }
    88    static inline void record_virtual_memory_commit(address addr, size_t size,
    89         address pc = 0, Thread* thread = NULL) { }
    90    static inline void record_virtual_memory_release(address addr, size_t size,
    91         Thread* thread = NULL) { }
    92    static inline void record_virtual_memory_type(address base, MEMFLAGS flags,
    93         Thread* thread = NULL) { }
    94    static inline Tracker get_realloc_tracker() { return _tkr; }
    95    static inline Tracker get_virtual_memory_uncommit_tracker() { return _tkr; }
    96    static inline Tracker get_virtual_memory_release_tracker()  { return _tkr; }
    97    static inline bool baseline() { return false; }
    98    static inline bool has_baseline() { return false; }
   100    static inline void set_autoShutdown(bool value) { }
   101    static void shutdown(ShutdownReason reason) { }
   102    static inline bool shutdown_in_progress() { return false; }
   103    static bool print_memory_usage(BaselineOutputer& out, size_t unit,
   104             bool summary_only = true) { return false; }
   105    static bool compare_memory_usage(BaselineOutputer& out, size_t unit,
   106             bool summary_only = true) { return false; }
   108    static bool wbtest_wait_for_data_merge() { return false; }
   110    static inline void sync() { }
   111    static inline void thread_exiting(JavaThread* thread) { }
   112 };
   115 #else // !INCLUDE_NMT
   117 #include "memory/allocation.hpp"
   118 #include "runtime/globals.hpp"
   119 #include "runtime/mutex.hpp"
   120 #include "runtime/os.hpp"
   121 #include "runtime/thread.hpp"
   122 #include "services/memPtr.hpp"
   123 #include "services/memRecorder.hpp"
   124 #include "services/memSnapshot.hpp"
   125 #include "services/memTrackWorker.hpp"
   127 extern bool NMT_track_callsite;
   129 #ifndef MAX_UNSIGNED_LONG
   130 #define MAX_UNSIGNED_LONG    (unsigned long)(-1)
   131 #endif
   133 #ifdef ASSERT
   134   #define DEBUG_CALLER_PC  (NMT_track_callsite ? os::get_caller_pc(2) : 0)
   135 #else
   136   #define DEBUG_CALLER_PC  0
   137 #endif
   139 // The thread closure walks threads to collect per-thread
   140 // memory recorders at NMT sync point
   141 class SyncThreadRecorderClosure : public ThreadClosure {
   142  private:
   143   int _thread_count;
   145  public:
   146   SyncThreadRecorderClosure() {
   147     _thread_count =0;
   148   }
   150   void do_thread(Thread* thread);
   151   int  get_thread_count() const {
   152     return _thread_count;
   153   }
   154 };
   156 class BaselineOutputer;
   157 class MemSnapshot;
   158 class MemTrackWorker;
   159 class Thread;
   160 /*
   161  * MemTracker is the 'gate' class to native memory tracking runtime.
   162  */
   163 class MemTracker : AllStatic {
   164   friend class GenerationData;
   165   friend class MemTrackWorker;
   166   friend class MemSnapshot;
   167   friend class SyncThreadRecorderClosure;
   169   // NMT state
   170   enum NMTStates {
   171     NMT_uninited,                        // not yet initialized
   172     NMT_bootstrapping_single_thread,     // bootstrapping, VM is in single thread mode
   173     NMT_bootstrapping_multi_thread,      // bootstrapping, VM is about to enter multi-thread mode
   174     NMT_started,                         // NMT fully started
   175     NMT_shutdown_pending,                // shutdown pending
   176     NMT_final_shutdown,                  // in final phase of shutdown
   177     NMT_shutdown                         // shutdown
   178   };
   180  public:
   181   class Tracker : public StackObj {
   182     friend class MemTracker;
   183    public:
   184     enum MemoryOperation {
   185       NoOp,                   // no op
   186       Malloc,                 // malloc
   187       Realloc,                // realloc
   188       Free,                   // free
   189       Reserve,                // virtual memory reserve
   190       Commit,                 // virtual memory commit
   191       ReserveAndCommit,       // virtual memory reserve and commit
   192       StackAlloc = ReserveAndCommit, // allocate thread stack
   193       Type,                   // assign virtual memory type
   194       Uncommit,               // virtual memory uncommit
   195       Release,                // virtual memory release
   196       ArenaSize,              // set arena size
   197       StackRelease            // release thread stack
   198     };
   201    protected:
   202     Tracker(MemoryOperation op, Thread* thr = NULL);
   204    public:
   205     void discard();
   207     void record(address addr, size_t size = 0, MEMFLAGS flags = mtNone, address pc = NULL);
   208     void record(address old_addr, address new_addr, size_t size,
   209       MEMFLAGS flags, address pc = NULL);
   211    private:
   212     bool            _need_thread_critical_lock;
   213     JavaThread*     _java_thread;
   214     MemoryOperation _op;          // memory operation
   215     jint            _seq;         // reserved sequence number
   216   };
   219  public:
   220   // native memory tracking level
   221   enum NMTLevel {
   222     NMT_off,              // native memory tracking is off
   223     NMT_summary,          // don't track callsite
   224     NMT_detail            // track callsite also
   225   };
   227    enum ShutdownReason {
   228      NMT_shutdown_none,     // no shutdown requested
   229      NMT_shutdown_user,     // user requested shutdown
   230      NMT_normal,            // normal shutdown, process exit
   231      NMT_out_of_memory,     // shutdown due to out of memory
   232      NMT_initialization,    // shutdown due to initialization failure
   233      NMT_use_malloc_only,   // can not combine NMT with UseMallocOnly flag
   234      NMT_error_reporting,   // shutdown by vmError::report_and_die()
   235      NMT_out_of_generation, // running out of generation queue
   236      NMT_sequence_overflow  // overflow the sequence number
   237    };
   239  public:
   240   // initialize NMT tracking level from command line options, called
   241    // from VM command line parsing code
   242   static void init_tracking_options(const char* option_line);
   244   // if NMT is enabled to record memory activities
   245   static inline bool is_on() {
   246     return (_tracking_level >= NMT_summary &&
   247       _state >= NMT_bootstrapping_single_thread);
   248   }
   250   static inline enum NMTLevel tracking_level() {
   251     return _tracking_level;
   252   }
   254   // user readable reason for shutting down NMT
   255   static const char* reason() {
   256     switch(_reason) {
   257       case NMT_shutdown_none:
   258         return "Native memory tracking is not enabled";
   259       case NMT_shutdown_user:
   260         return "Native memory tracking has been shutdown by user";
   261       case NMT_normal:
   262         return "Native memory tracking has been shutdown due to process exiting";
   263       case NMT_out_of_memory:
   264         return "Native memory tracking has been shutdown due to out of native memory";
   265       case NMT_initialization:
   266         return "Native memory tracking failed to initialize";
   267       case NMT_error_reporting:
   268         return "Native memory tracking has been shutdown due to error reporting";
   269       case NMT_out_of_generation:
   270         return "Native memory tracking has been shutdown due to running out of generation buffer";
   271       case NMT_sequence_overflow:
   272         return "Native memory tracking has been shutdown due to overflow the sequence number";
   273       case NMT_use_malloc_only:
   274         return "Native memory tracking is not supported when UseMallocOnly is on";
   275       default:
   276         ShouldNotReachHere();
   277         return NULL;
   278     }
   279   }
   281   // test if we can walk native stack
   282   static bool can_walk_stack() {
   283   // native stack is not walkable during bootstrapping on sparc
   284 #if defined(SPARC)
   285     return (_state == NMT_started);
   286 #else
   287     return (_state >= NMT_bootstrapping_single_thread && _state  <= NMT_started);
   288 #endif
   289   }
   291   // if native memory tracking tracks callsite
   292   static inline bool track_callsite() { return _tracking_level == NMT_detail; }
   294   // NMT automatically shuts itself down under extreme situation by default.
   295   // When the value is set to false,  NMT will try its best to stay alive,
   296   // even it has to slow down VM.
   297   static inline void set_autoShutdown(bool value) {
   298     AutoShutdownNMT = value;
   299     if (AutoShutdownNMT && _slowdown_calling_thread) {
   300       _slowdown_calling_thread = false;
   301     }
   302   }
   304   // shutdown native memory tracking capability. Native memory tracking
   305   // can be shutdown by VM when it encounters low memory scenarios.
   306   // Memory tracker should gracefully shutdown itself, and preserve the
   307   // latest memory statistics for post morten diagnosis.
   308   static void shutdown(ShutdownReason reason);
   310   // if there is shutdown requested
   311   static inline bool shutdown_in_progress() {
   312     return (_state >= NMT_shutdown_pending);
   313   }
   315   // bootstrap native memory tracking, so it can start to collect raw data
   316   // before worker thread can start
   318   // the first phase of bootstrapping, when VM still in single-threaded mode
   319   static void bootstrap_single_thread();
   320   // the second phase of bootstrapping, VM is about or already in multi-threaded mode
   321   static void bootstrap_multi_thread();
   324   // start() has to be called when VM still in single thread mode, but after
   325   // command line option parsing is done.
   326   static void start();
   328   // record a 'malloc' call
   329   static inline void record_malloc(address addr, size_t size, MEMFLAGS flags,
   330                             address pc = 0, Thread* thread = NULL) {
   331     Tracker tkr(Tracker::Malloc, thread);
   332     tkr.record(addr, size, flags, pc);
   333   }
   334   // record a 'free' call
   335   static inline void record_free(address addr, MEMFLAGS flags, Thread* thread = NULL) {
   336     Tracker tkr(Tracker::Free, thread);
   337     tkr.record(addr, 0, flags, DEBUG_CALLER_PC);
   338   }
   340   static inline void record_arena_size(address addr, size_t size) {
   341     Tracker tkr(Tracker::ArenaSize);
   342     tkr.record(addr, size);
   343   }
   345   // record a virtual memory 'reserve' call
   346   static inline void record_virtual_memory_reserve(address addr, size_t size,
   347                      MEMFLAGS flags, address pc = 0, Thread* thread = NULL) {
   348     assert(size > 0, "Sanity check");
   349     Tracker tkr(Tracker::Reserve, thread);
   350     tkr.record(addr, size, flags, pc);
   351   }
   353   static inline void record_thread_stack(address addr, size_t size, Thread* thr,
   354                            address pc = 0) {
   355     Tracker tkr(Tracker::StackAlloc, thr);
   356     tkr.record(addr, size, mtThreadStack, pc);
   357   }
   359   static inline void release_thread_stack(address addr, size_t size, Thread* thr) {
   360     Tracker tkr(Tracker::StackRelease, thr);
   361     tkr.record(addr, size, mtThreadStack, DEBUG_CALLER_PC);
   362   }
   364   // record a virtual memory 'commit' call
   365   static inline void record_virtual_memory_commit(address addr, size_t size,
   366                             address pc, Thread* thread = NULL) {
   367     Tracker tkr(Tracker::Commit, thread);
   368     tkr.record(addr, size, mtNone, pc);
   369   }
   371   static inline void record_virtual_memory_reserve_and_commit(address addr, size_t size,
   372     MEMFLAGS flags, address pc, Thread* thread = NULL) {
   373     Tracker tkr(Tracker::ReserveAndCommit, thread);
   374     tkr.record(addr, size, flags, pc);
   375   }
   377   static inline void record_virtual_memory_release(address addr, size_t size,
   378       Thread* thread = NULL) {
   379     if (is_on()) {
   380       Tracker tkr(Tracker::Release, thread);
   381       tkr.record(addr, size);
   382     }
   383   }
   385   // record memory type on virtual memory base address
   386   static inline void record_virtual_memory_type(address base, MEMFLAGS flags,
   387                             Thread* thread = NULL) {
   388     Tracker tkr(Tracker::Type);
   389     tkr.record(base, 0, flags);
   390   }
   392   // Get memory trackers for memory operations that can result race conditions.
   393   // The memory tracker has to be obtained before realloc, virtual memory uncommit
   394   // and virtual memory release, and call tracker.record() method if operation
   395   // succeeded, or tracker.discard() to abort the tracking.
   396   static inline Tracker get_realloc_tracker() {
   397     return Tracker(Tracker::Realloc);
   398   }
   400   static inline Tracker get_virtual_memory_uncommit_tracker() {
   401     return Tracker(Tracker::Uncommit);
   402   }
   404   static inline Tracker get_virtual_memory_release_tracker() {
   405     return Tracker(Tracker::Release);
   406   }
   409   // create memory baseline of current memory snapshot
   410   static bool baseline();
   411   // is there a memory baseline
   412   static bool has_baseline() {
   413     return _baseline.baselined();
   414   }
   416   // print memory usage from current snapshot
   417   static bool print_memory_usage(BaselineOutputer& out, size_t unit,
   418            bool summary_only = true);
   419   // compare memory usage between current snapshot and baseline
   420   static bool compare_memory_usage(BaselineOutputer& out, size_t unit,
   421            bool summary_only = true);
   423   // the version for whitebox testing support, it ensures that all memory
   424   // activities before this method call, are reflected in the snapshot
   425   // database.
   426   static bool wbtest_wait_for_data_merge();
   428   // sync is called within global safepoint to synchronize nmt data
   429   static void sync();
   431   // called when a thread is about to exit
   432   static void thread_exiting(JavaThread* thread);
   434   // retrieve global snapshot
   435   static MemSnapshot* get_snapshot() {
   436     if (shutdown_in_progress()) {
   437       return NULL;
   438     }
   439     return _snapshot;
   440   }
   442   // print tracker stats
   443   NOT_PRODUCT(static void print_tracker_stats(outputStream* st);)
   444   NOT_PRODUCT(static void walk_stack(int toSkip, char* buf, int len);)
   446  private:
   447   // start native memory tracking worker thread
   448   static bool start_worker(MemSnapshot* snapshot);
   450   // called by worker thread to complete shutdown process
   451   static void final_shutdown();
   453  protected:
   454   // retrieve per-thread recorder of the specified thread.
   455   // if the recorder is full, it will be enqueued to overflow
   456   // queue, a new recorder is acquired from recorder pool or a
   457   // new instance is created.
   458   // when thread == NULL, it means global recorder
   459   static MemRecorder* get_thread_recorder(JavaThread* thread);
   461   // per-thread recorder pool
   462   static void release_thread_recorder(MemRecorder* rec);
   463   static void delete_all_pooled_recorders();
   465   // pending recorder queue. Recorders are queued to pending queue
   466   // when they are overflowed or collected at nmt sync point.
   467   static void enqueue_pending_recorder(MemRecorder* rec);
   468   static MemRecorder* get_pending_recorders();
   469   static void delete_all_pending_recorders();
   471   // write a memory tracking record in recorder
   472   static void write_tracking_record(address addr, MEMFLAGS type,
   473     size_t size, jint seq, address pc, JavaThread* thread);
   475   static bool is_single_threaded_bootstrap() {
   476     return _state == NMT_bootstrapping_single_thread;
   477   }
   479   static void check_NMT_load(Thread* thr) {
   480     assert(thr != NULL, "Sanity check");
   481     if (_slowdown_calling_thread && thr != _worker_thread) {
   482 #ifdef _WINDOWS
   483       // On Windows, os::NakedYield() does not work as well
   484       // as os::yield_all()
   485       os::yield_all();
   486 #else
   487      // On Solaris, os::yield_all() depends on os::sleep()
   488      // which requires JavaTherad in _thread_in_vm state.
   489      // Transits thread to _thread_in_vm state can be dangerous
   490      // if caller holds lock, as it may deadlock with Threads_lock.
   491      // So use NaKedYield instead.
   492      //
   493      // Linux and BSD, NakedYield() and yield_all() implementations
   494      // are the same.
   495       os::NakedYield();
   496 #endif
   497     }
   498   }
   500   static void inc_pending_op_count() {
   501     Atomic::inc(&_pending_op_count);
   502   }
   504   static void dec_pending_op_count() {
   505     Atomic::dec(&_pending_op_count);
   506     assert(_pending_op_count >= 0, "Sanity check");
   507   }
   510  private:
   511   // retrieve a pooled memory record or create new one if there is not
   512   // one available
   513   static MemRecorder* get_new_or_pooled_instance();
   514   static void create_memory_record(address addr, MEMFLAGS type,
   515                    size_t size, address pc, Thread* thread);
   516   static void create_record_in_recorder(address addr, MEMFLAGS type,
   517                    size_t size, address pc, JavaThread* thread);
   519   static void set_current_processing_generation(unsigned long generation) {
   520     _worker_thread_idle = false;
   521     _processing_generation = generation;
   522   }
   524   static void report_worker_idle() {
   525     _worker_thread_idle = true;
   526   }
   528  private:
   529   // global memory snapshot
   530   static MemSnapshot*     _snapshot;
   532   // a memory baseline of snapshot
   533   static MemBaseline      _baseline;
   535   // query lock
   536   static Mutex*           _query_lock;
   538   // a thread can start to allocate memory before it is attached
   539   // to VM 'Thread', those memory activities are recorded here.
   540   // ThreadCritical is required to guard this global recorder.
   541   static MemRecorder* volatile _global_recorder;
   543   // main thread id
   544   debug_only(static intx   _main_thread_tid;)
   546   // pending recorders to be merged
   547   static MemRecorder* volatile     _merge_pending_queue;
   549   NOT_PRODUCT(static volatile jint   _pending_recorder_count;)
   551   // pooled memory recorders
   552   static MemRecorder* volatile     _pooled_recorders;
   554   // memory recorder pool management, uses following
   555   // counter to determine if a released memory recorder
   556   // should be pooled
   558   // latest thread count
   559   static int               _thread_count;
   560   // pooled recorder count
   561   static volatile jint     _pooled_recorder_count;
   564   // worker thread to merge pending recorders into snapshot
   565   static MemTrackWorker*  _worker_thread;
   567   // how many safepoints we skipped without entering sync point
   568   static int              _sync_point_skip_count;
   570   // if the tracker is properly intialized
   571   static bool             _is_tracker_ready;
   572   // tracking level (off, summary and detail)
   573   static enum NMTLevel    _tracking_level;
   575   // current nmt state
   576   static volatile enum NMTStates   _state;
   577   // the reason for shutting down nmt
   578   static enum ShutdownReason       _reason;
   579   // the generation that NMT is processing
   580   static volatile unsigned long    _processing_generation;
   581   // although NMT is still procesing current generation, but
   582   // there is not more recorder to process, set idle state
   583   static volatile bool             _worker_thread_idle;
   585   // if NMT should slow down calling thread to allow
   586   // worker thread to catch up
   587   static volatile bool             _slowdown_calling_thread;
   589   // pending memory op count.
   590   // Certain memory ops need to pre-reserve sequence number
   591   // before memory operation can happen to avoid race condition.
   592   // See MemTracker::Tracker for detail
   593   static volatile jint             _pending_op_count;
   594 };
   596 #endif // !INCLUDE_NMT
   598 #endif // SHARE_VM_SERVICES_MEM_TRACKER_HPP

mercurial