src/share/vm/runtime/fprofiler.hpp

changeset 0
f90c822e73f8
child 6876
710a3c8b516e
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/src/share/vm/runtime/fprofiler.hpp	Wed Apr 27 01:25:04 2016 +0800
     1.3 @@ -0,0 +1,319 @@
     1.4 +/*
     1.5 + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
     1.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     1.7 + *
     1.8 + * This code is free software; you can redistribute it and/or modify it
     1.9 + * under the terms of the GNU General Public License version 2 only, as
    1.10 + * published by the Free Software Foundation.
    1.11 + *
    1.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
    1.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    1.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    1.15 + * version 2 for more details (a copy is included in the LICENSE file that
    1.16 + * accompanied this code).
    1.17 + *
    1.18 + * You should have received a copy of the GNU General Public License version
    1.19 + * 2 along with this work; if not, write to the Free Software Foundation,
    1.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    1.21 + *
    1.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    1.23 + * or visit www.oracle.com if you need additional information or have any
    1.24 + * questions.
    1.25 + *
    1.26 + */
    1.27 +
    1.28 +#ifndef SHARE_VM_RUNTIME_FPROFILER_HPP
    1.29 +#define SHARE_VM_RUNTIME_FPROFILER_HPP
    1.30 +
    1.31 +#include "utilities/macros.hpp"
    1.32 +#include "runtime/timer.hpp"
    1.33 +
    1.34 +// a simple flat profiler for Java
    1.35 +
    1.36 +
    1.37 +// Forward declaration of classes defined in this header file
    1.38 +class ThreadProfiler;
    1.39 +class ThreadProfilerMark;
    1.40 +class FlatProfiler;
    1.41 +class IntervalData;
    1.42 +
    1.43 +// Declarations of classes defined only in the implementation.
    1.44 +class ProfilerNode;
    1.45 +class FlatProfilerTask;
    1.46 +
    1.47 +enum TickPosition {
    1.48 +  tp_code,
    1.49 +  tp_native
    1.50 +};
    1.51 +
    1.52 +// One of these guys is constructed as we enter interesting regions
    1.53 +// and destructed as we exit the region.  While we are in the region
    1.54 +// ticks are allotted to the region.
    1.55 +class ThreadProfilerMark: public StackObj {
    1.56 +public:
    1.57 +  // For now, the only thread-specific region is the class loader.
    1.58 +  enum Region { noRegion, classLoaderRegion, extraRegion, maxRegion };
    1.59 +
    1.60 +  ThreadProfilerMark(Region)  NOT_FPROF_RETURN;
    1.61 +  ~ThreadProfilerMark()       NOT_FPROF_RETURN;
    1.62 +
    1.63 +private:
    1.64 +  ThreadProfiler* _pp;
    1.65 +  Region _r;
    1.66 +};
    1.67 +
    1.68 +#if INCLUDE_FPROF
    1.69 +
    1.70 +class IntervalData VALUE_OBJ_CLASS_SPEC {
    1.71 +  // Just to keep these things all together
    1.72 +private:
    1.73 +  int _interpreted;
    1.74 +  int _compiled;
    1.75 +  int _native;
    1.76 +  int _compiling;
    1.77 +public:
    1.78 +  int interpreted() {
    1.79 +    return _interpreted;
    1.80 +  }
    1.81 +  int compiled() {
    1.82 +    return _compiled;
    1.83 +  }
    1.84 +  int native() {
    1.85 +    return _native;
    1.86 +  }
    1.87 +  int compiling() {
    1.88 +    return _compiling;
    1.89 +  }
    1.90 +  int total() {
    1.91 +    return (interpreted() + compiled() + native() + compiling());
    1.92 +  }
    1.93 +  void inc_interpreted() {
    1.94 +    _interpreted += 1;
    1.95 +  }
    1.96 +  void inc_compiled() {
    1.97 +    _compiled += 1;
    1.98 +  }
    1.99 +  void inc_native() {
   1.100 +    _native += 1;
   1.101 +  }
   1.102 +  void inc_compiling() {
   1.103 +    _compiling += 1;
   1.104 +  }
   1.105 +  void reset() {
   1.106 +    _interpreted = 0;
   1.107 +    _compiled = 0;
   1.108 +    _native = 0;
   1.109 +    _compiling = 0;
   1.110 +  }
   1.111 +  static void print_header(outputStream* st);
   1.112 +  void print_data(outputStream* st);
   1.113 +};
   1.114 +#endif // INCLUDE_FPROF
   1.115 +
   1.116 +class ThreadProfiler: public CHeapObj<mtInternal> {
   1.117 +public:
   1.118 +  ThreadProfiler()    NOT_FPROF_RETURN;
   1.119 +  ~ThreadProfiler()   NOT_FPROF_RETURN;
   1.120 +
   1.121 +  // Resets the profiler
   1.122 +  void reset()        NOT_FPROF_RETURN;
   1.123 +
   1.124 +  // Activates the profiler for a certain thread
   1.125 +  void engage()       NOT_FPROF_RETURN;
   1.126 +
   1.127 +  // Deactivates the profiler
   1.128 +  void disengage()    NOT_FPROF_RETURN;
   1.129 +
   1.130 +  // Prints the collected profiling information
   1.131 +  void print(const char* thread_name) NOT_FPROF_RETURN;
   1.132 +
   1.133 +  // Garbage Collection Support
   1.134 +  void oops_do(OopClosure* f)         NOT_FPROF_RETURN;
   1.135 +
   1.136 +#if INCLUDE_FPROF
   1.137 +private:
   1.138 +  // for recording ticks.
   1.139 +  friend class ProfilerNode;
   1.140 +  char* area_bottom; // preallocated area for pnodes
   1.141 +  char* area_top;
   1.142 +  char* area_limit;
   1.143 +  static int            table_size;
   1.144 +  ProfilerNode** table;
   1.145 +
   1.146 +private:
   1.147 +  void record_interpreted_tick(JavaThread* thread, frame fr, TickPosition where, int* ticks);
   1.148 +  void record_compiled_tick   (JavaThread* thread, frame fr, TickPosition where);
   1.149 +  void interpreted_update(Method* method, TickPosition where);
   1.150 +  void compiled_update   (Method* method, TickPosition where);
   1.151 +  void stub_update       (Method* method, const char* name, TickPosition where);
   1.152 +  void adapter_update    (TickPosition where);
   1.153 +
   1.154 +  void runtime_stub_update(const CodeBlob* stub, const char* name, TickPosition where);
   1.155 +  void unknown_compiled_update    (const CodeBlob* cb, TickPosition where);
   1.156 +
   1.157 +  void vm_update    (TickPosition where);
   1.158 +  void vm_update    (const char* name, TickPosition where);
   1.159 +
   1.160 +  void record_tick_for_running_frame(JavaThread* thread, frame fr);
   1.161 +  void record_tick_for_calling_frame(JavaThread* thread, frame fr);
   1.162 +
   1.163 +  void initialize();
   1.164 +
   1.165 +  static int  entry(int value);
   1.166 +
   1.167 +
   1.168 +private:
   1.169 +  friend class FlatProfiler;
   1.170 +  void record_tick(JavaThread* thread);
   1.171 +  bool engaged;
   1.172 +  // so we can do percentages for this thread, and quick checks for activity
   1.173 +  int thread_ticks;
   1.174 +  int compiler_ticks;
   1.175 +  int interpreter_ticks;
   1.176 +
   1.177 +public:
   1.178 +  void inc_thread_ticks() { thread_ticks += 1; }
   1.179 +
   1.180 +private:
   1.181 +  friend class ThreadProfilerMark;
   1.182 +  // counters for thread-specific regions
   1.183 +  bool region_flag[ThreadProfilerMark::maxRegion];
   1.184 +  int class_loader_ticks;
   1.185 +  int extra_ticks;
   1.186 +
   1.187 +private:
   1.188 +  // other thread-specific regions
   1.189 +  int blocked_ticks;
   1.190 +  enum UnknownTickSites {
   1.191 +      ut_null_method,
   1.192 +      ut_vtable_stubs,
   1.193 +      ut_running_frame,
   1.194 +      ut_calling_frame,
   1.195 +      ut_no_pc,
   1.196 +      ut_no_last_Java_frame,
   1.197 +      ut_unknown_thread_state,
   1.198 +      ut_end
   1.199 +  };
   1.200 +  int unknown_ticks_array[ut_end];
   1.201 +  int unknown_ticks() {
   1.202 +    int result = 0;
   1.203 +    for (int ut = 0; ut < ut_end; ut += 1) {
   1.204 +      result += unknown_ticks_array[ut];
   1.205 +    }
   1.206 +    return result;
   1.207 +  }
   1.208 +
   1.209 +  elapsedTimer timer;
   1.210 +
   1.211 +  // For interval timing
   1.212 +private:
   1.213 +  IntervalData _interval_data;
   1.214 +  IntervalData interval_data() {
   1.215 +    return _interval_data;
   1.216 +  }
   1.217 +  IntervalData* interval_data_ref() {
   1.218 +    return &_interval_data;
   1.219 +  }
   1.220 +#endif // INCLUDE_FPROF
   1.221 +};
   1.222 +
   1.223 +class FlatProfiler: AllStatic {
   1.224 +public:
   1.225 +  static void reset() NOT_FPROF_RETURN ;
   1.226 +  static void engage(JavaThread* mainThread, bool fullProfile) NOT_FPROF_RETURN ;
   1.227 +  static void disengage() NOT_FPROF_RETURN ;
   1.228 +  static void print(int unused) NOT_FPROF_RETURN ;
   1.229 +  static bool is_active() NOT_FPROF_RETURN_(false) ;
   1.230 +
   1.231 +  // This is NULL if each thread has its own thread profiler,
   1.232 +  // else this is the single thread profiler used by all threads.
   1.233 +  // In particular it makes a difference during garbage collection,
   1.234 +  // where you only want to traverse each thread profiler once.
   1.235 +  static ThreadProfiler* get_thread_profiler() NOT_FPROF_RETURN_(NULL);
   1.236 +
   1.237 +  // Garbage Collection Support
   1.238 +  static void oops_do(OopClosure* f) NOT_FPROF_RETURN ;
   1.239 +
   1.240 +  // Support for disassembler to inspect the PCRecorder
   1.241 +
   1.242 +  // Returns the start address for a given pc
   1.243 +  // NULL is returned if the PCRecorder is inactive
   1.244 +  static address bucket_start_for(address pc) NOT_FPROF_RETURN_(NULL);
   1.245 +
   1.246 +  enum { MillisecsPerTick = 10 };   // ms per profiling ticks
   1.247 +
   1.248 +  // Returns the number of ticks recorded for the bucket
   1.249 +  // pc belongs to.
   1.250 +  static int bucket_count_for(address pc) NOT_FPROF_RETURN_(0);
   1.251 +
   1.252 +#if INCLUDE_FPROF
   1.253 +
   1.254 + private:
   1.255 +  static bool full_profile() {
   1.256 +    return full_profile_flag;
   1.257 +  }
   1.258 +
   1.259 +  friend class ThreadProfiler;
   1.260 +  // the following group of ticks cover everything that's not attributed to individual Java methods
   1.261 +  static int  received_gc_ticks;      // ticks during which gc was active
   1.262 +  static int vm_operation_ticks;      // total ticks in vm_operations other than GC
   1.263 +  static int threads_lock_ticks;      // the number of times we couldn't get the Threads_lock without blocking
   1.264 +  static int      blocked_ticks;      // ticks when the thread was blocked.
   1.265 +  static int class_loader_ticks;      // total ticks in class loader
   1.266 +  static int        extra_ticks;      // total ticks an extra temporary measuring
   1.267 +  static int     compiler_ticks;      // total ticks in compilation
   1.268 +  static int  interpreter_ticks;      // ticks in unknown interpreted method
   1.269 +  static int        deopt_ticks;      // ticks in deoptimization
   1.270 +  static int      unknown_ticks;      // ticks that cannot be categorized
   1.271 +  static int     received_ticks;      // ticks that were received by task
   1.272 +  static int    delivered_ticks;      // ticks that were delivered by task
   1.273 +  static int non_method_ticks() {
   1.274 +    return
   1.275 +      ( received_gc_ticks
   1.276 +      + vm_operation_ticks
   1.277 +      + deopt_ticks
   1.278 +      + threads_lock_ticks
   1.279 +      + blocked_ticks
   1.280 +      + compiler_ticks
   1.281 +      + interpreter_ticks
   1.282 +      + unknown_ticks );
   1.283 +  }
   1.284 +  static elapsedTimer timer;
   1.285 +
   1.286 +  // Counts of each of the byte codes
   1.287 +  static int*           bytecode_ticks;
   1.288 +  static int*           bytecode_ticks_stub;
   1.289 +  static void print_byte_code_statistics();
   1.290 +
   1.291 +  // the ticks below are for continuous profiling (to adjust recompilation, etc.)
   1.292 +  static int          all_ticks;      // total count of ticks received so far
   1.293 +  static int      all_int_ticks;      // ticks in interpreter
   1.294 +  static int     all_comp_ticks;      // ticks in compiled code (+ native)
   1.295 +  static bool full_profile_flag;      // collecting full profile?
   1.296 +
   1.297 +  // to accumulate thread-specific data
   1.298 +  // if we aren't profiling individual threads.
   1.299 +  static ThreadProfiler* thread_profiler;
   1.300 +  static ThreadProfiler* vm_thread_profiler;
   1.301 +
   1.302 +  static void allocate_table();
   1.303 +
   1.304 +  // The task that periodically interrupts things.
   1.305 +  friend class FlatProfilerTask;
   1.306 +  static FlatProfilerTask* task;
   1.307 +  static void record_vm_operation();
   1.308 +  static void record_vm_tick();
   1.309 +  static void record_thread_ticks();
   1.310 +
   1.311 +  // For interval analysis
   1.312 + private:
   1.313 +  static int interval_ticks_previous;  // delivered_ticks from the last interval
   1.314 +  static void interval_record_thread(ThreadProfiler* tp); // extract ticks from ThreadProfiler.
   1.315 +  static void interval_print();       // print interval data.
   1.316 +  static void interval_reset();       // reset interval data.
   1.317 +  enum {interval_print_size = 10};
   1.318 +  static IntervalData* interval_data;
   1.319 +#endif // INCLUDE_FPROF
   1.320 +};
   1.321 +
   1.322 +#endif // SHARE_VM_RUNTIME_FPROFILER_HPP

mercurial