src/share/vm/runtime/fprofiler.hpp

Thu, 28 Jun 2012 17:03:16 -0400

author
zgu
date
Thu, 28 Jun 2012 17:03:16 -0400
changeset 3900
d2a62e0f25eb
parent 3156
f08d439fab8c
child 4037
da91efe96a93
permissions
-rw-r--r--

6995781: Native Memory Tracking (Phase 1)
7151532: DCmd for hotspot native memory tracking
Summary: Implementation of native memory tracking phase 1, which tracks VM native memory usage, and related DCmd
Reviewed-by: acorn, coleenp, fparain

duke@435 1 /*
trims@2708 2 * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
duke@435 22 *
duke@435 23 */
duke@435 24
stefank@2314 25 #ifndef SHARE_VM_RUNTIME_FPROFILER_HPP
stefank@2314 26 #define SHARE_VM_RUNTIME_FPROFILER_HPP
stefank@2314 27
stefank@2314 28 #include "runtime/timer.hpp"
stefank@2314 29 #ifdef TARGET_OS_FAMILY_linux
stefank@2314 30 # include "thread_linux.inline.hpp"
stefank@2314 31 #endif
stefank@2314 32 #ifdef TARGET_OS_FAMILY_solaris
stefank@2314 33 # include "thread_solaris.inline.hpp"
stefank@2314 34 #endif
stefank@2314 35 #ifdef TARGET_OS_FAMILY_windows
stefank@2314 36 # include "thread_windows.inline.hpp"
stefank@2314 37 #endif
never@3156 38 #ifdef TARGET_OS_FAMILY_bsd
never@3156 39 # include "thread_bsd.inline.hpp"
never@3156 40 #endif
stefank@2314 41
duke@435 42 // a simple flat profiler for Java
duke@435 43
duke@435 44
duke@435 45 // Forward declaration of classes defined in this header file
duke@435 46 class ThreadProfiler;
duke@435 47 class ThreadProfilerMark;
duke@435 48 class FlatProfiler;
duke@435 49 class IntervalData;
duke@435 50
duke@435 51 // Declarations of classes defined only in the implementation.
duke@435 52 class ProfilerNode;
duke@435 53 class FlatProfilerTask;
duke@435 54
duke@435 55 enum TickPosition {
duke@435 56 tp_code,
duke@435 57 tp_native
duke@435 58 };
duke@435 59
duke@435 60 // One of these guys is constructed as we enter interesting regions
duke@435 61 // and destructed as we exit the region. While we are in the region
duke@435 62 // ticks are allotted to the region.
duke@435 63 class ThreadProfilerMark: public StackObj {
duke@435 64 public:
duke@435 65 // For now, the only thread-specific region is the class loader.
duke@435 66 enum Region { noRegion, classLoaderRegion, extraRegion, maxRegion };
duke@435 67
duke@435 68 ThreadProfilerMark(Region) KERNEL_RETURN;
duke@435 69 ~ThreadProfilerMark() KERNEL_RETURN;
duke@435 70
duke@435 71 private:
duke@435 72 ThreadProfiler* _pp;
duke@435 73 Region _r;
duke@435 74 };
duke@435 75
duke@435 76 #ifndef FPROF_KERNEL
duke@435 77
duke@435 78 class IntervalData VALUE_OBJ_CLASS_SPEC {
duke@435 79 // Just to keep these things all together
duke@435 80 private:
duke@435 81 int _interpreted;
duke@435 82 int _compiled;
duke@435 83 int _native;
duke@435 84 int _compiling;
duke@435 85 public:
duke@435 86 int interpreted() {
duke@435 87 return _interpreted;
duke@435 88 }
duke@435 89 int compiled() {
duke@435 90 return _compiled;
duke@435 91 }
duke@435 92 int native() {
duke@435 93 return _native;
duke@435 94 }
duke@435 95 int compiling() {
duke@435 96 return _compiling;
duke@435 97 }
duke@435 98 int total() {
duke@435 99 return (interpreted() + compiled() + native() + compiling());
duke@435 100 }
duke@435 101 void inc_interpreted() {
duke@435 102 _interpreted += 1;
duke@435 103 }
duke@435 104 void inc_compiled() {
duke@435 105 _compiled += 1;
duke@435 106 }
duke@435 107 void inc_native() {
duke@435 108 _native += 1;
duke@435 109 }
duke@435 110 void inc_compiling() {
duke@435 111 _compiling += 1;
duke@435 112 }
duke@435 113 void reset() {
duke@435 114 _interpreted = 0;
duke@435 115 _compiled = 0;
duke@435 116 _native = 0;
duke@435 117 _compiling = 0;
duke@435 118 }
duke@435 119 static void print_header(outputStream* st);
duke@435 120 void print_data(outputStream* st);
duke@435 121 };
duke@435 122 #endif // FPROF_KERNEL
duke@435 123
zgu@3900 124 class ThreadProfiler: public CHeapObj<mtInternal> {
duke@435 125 public:
duke@435 126 ThreadProfiler() KERNEL_RETURN;
duke@435 127 ~ThreadProfiler() KERNEL_RETURN;
duke@435 128
duke@435 129 // Resets the profiler
duke@435 130 void reset() KERNEL_RETURN;
duke@435 131
duke@435 132 // Activates the profiler for a certain thread
duke@435 133 void engage() KERNEL_RETURN;
duke@435 134
duke@435 135 // Deactivates the profiler
duke@435 136 void disengage() KERNEL_RETURN;
duke@435 137
duke@435 138 // Prints the collected profiling information
duke@435 139 void print(const char* thread_name) KERNEL_RETURN;
duke@435 140
duke@435 141 // Garbage Collection Support
duke@435 142 void oops_do(OopClosure* f) KERNEL_RETURN;
duke@435 143
duke@435 144 #ifndef FPROF_KERNEL
duke@435 145 private:
duke@435 146 // for recording ticks.
duke@435 147 friend class ProfilerNode;
duke@435 148 char* area_bottom; // preallocated area for pnodes
duke@435 149 char* area_top;
duke@435 150 char* area_limit;
duke@435 151 static int table_size;
duke@435 152 ProfilerNode** table;
duke@435 153
duke@435 154 private:
sgoldman@542 155 void record_interpreted_tick(JavaThread* thread, frame fr, TickPosition where, int* ticks);
duke@435 156 void record_compiled_tick (JavaThread* thread, frame fr, TickPosition where);
duke@435 157 void interpreted_update(methodOop method, TickPosition where);
duke@435 158 void compiled_update (methodOop method, TickPosition where);
duke@435 159 void stub_update (methodOop method, const char* name, TickPosition where);
duke@435 160 void adapter_update (TickPosition where);
duke@435 161
duke@435 162 void runtime_stub_update(const CodeBlob* stub, const char* name, TickPosition where);
duke@435 163 void unknown_compiled_update (const CodeBlob* cb, TickPosition where);
duke@435 164
duke@435 165 void vm_update (TickPosition where);
duke@435 166 void vm_update (const char* name, TickPosition where);
duke@435 167
duke@435 168 void record_tick_for_running_frame(JavaThread* thread, frame fr);
duke@435 169 void record_tick_for_calling_frame(JavaThread* thread, frame fr);
duke@435 170
duke@435 171 void initialize();
duke@435 172
duke@435 173 static int entry(int value);
duke@435 174
duke@435 175
duke@435 176 private:
duke@435 177 friend class FlatProfiler;
duke@435 178 void record_tick(JavaThread* thread);
duke@435 179 bool engaged;
duke@435 180 // so we can do percentages for this thread, and quick checks for activity
duke@435 181 int thread_ticks;
duke@435 182 int compiler_ticks;
duke@435 183 int interpreter_ticks;
duke@435 184
duke@435 185 public:
duke@435 186 void inc_thread_ticks() { thread_ticks += 1; }
duke@435 187
duke@435 188 private:
duke@435 189 friend class ThreadProfilerMark;
duke@435 190 // counters for thread-specific regions
duke@435 191 bool region_flag[ThreadProfilerMark::maxRegion];
duke@435 192 int class_loader_ticks;
duke@435 193 int extra_ticks;
duke@435 194
duke@435 195 private:
duke@435 196 // other thread-specific regions
duke@435 197 int blocked_ticks;
duke@435 198 enum UnknownTickSites {
duke@435 199 ut_null_method,
duke@435 200 ut_vtable_stubs,
duke@435 201 ut_running_frame,
duke@435 202 ut_calling_frame,
duke@435 203 ut_no_pc,
duke@435 204 ut_no_last_Java_frame,
duke@435 205 ut_unknown_thread_state,
duke@435 206 ut_end
duke@435 207 };
duke@435 208 int unknown_ticks_array[ut_end];
duke@435 209 int unknown_ticks() {
duke@435 210 int result = 0;
duke@435 211 for (int ut = 0; ut < ut_end; ut += 1) {
duke@435 212 result += unknown_ticks_array[ut];
duke@435 213 }
duke@435 214 return result;
duke@435 215 }
duke@435 216
duke@435 217 elapsedTimer timer;
duke@435 218
duke@435 219 // For interval timing
duke@435 220 private:
duke@435 221 IntervalData _interval_data;
duke@435 222 IntervalData interval_data() {
duke@435 223 return _interval_data;
duke@435 224 }
duke@435 225 IntervalData* interval_data_ref() {
duke@435 226 return &_interval_data;
duke@435 227 }
duke@435 228 #endif // FPROF_KERNEL
duke@435 229 };
duke@435 230
duke@435 231 class FlatProfiler: AllStatic {
duke@435 232 public:
duke@435 233 static void reset() KERNEL_RETURN ;
duke@435 234 static void engage(JavaThread* mainThread, bool fullProfile) KERNEL_RETURN ;
duke@435 235 static void disengage() KERNEL_RETURN ;
duke@435 236 static void print(int unused) KERNEL_RETURN ;
kamg@2511 237 static bool is_active() KERNEL_RETURN_(false) ;
duke@435 238
duke@435 239 // This is NULL if each thread has its own thread profiler,
duke@435 240 // else this is the single thread profiler used by all threads.
duke@435 241 // In particular it makes a difference during garbage collection,
duke@435 242 // where you only want to traverse each thread profiler once.
kamg@2511 243 static ThreadProfiler* get_thread_profiler() KERNEL_RETURN_(NULL);
duke@435 244
duke@435 245 // Garbage Collection Support
duke@435 246 static void oops_do(OopClosure* f) KERNEL_RETURN ;
duke@435 247
duke@435 248 // Support for disassembler to inspect the PCRecorder
duke@435 249
duke@435 250 // Returns the start address for a given pc
duke@435 251 // NULL is returned if the PCRecorder is inactive
kamg@2511 252 static address bucket_start_for(address pc) KERNEL_RETURN_(NULL);
duke@435 253
duke@435 254 enum { MillisecsPerTick = 10 }; // ms per profiling ticks
duke@435 255
duke@435 256 // Returns the number of ticks recorded for the bucket
duke@435 257 // pc belongs to.
kamg@2511 258 static int bucket_count_for(address pc) KERNEL_RETURN_(0);
duke@435 259
duke@435 260 #ifndef FPROF_KERNEL
duke@435 261
duke@435 262 private:
duke@435 263 static bool full_profile() {
duke@435 264 return full_profile_flag;
duke@435 265 }
duke@435 266
duke@435 267 friend class ThreadProfiler;
duke@435 268 // the following group of ticks cover everything that's not attributed to individual Java methods
duke@435 269 static int received_gc_ticks; // ticks during which gc was active
duke@435 270 static int vm_operation_ticks; // total ticks in vm_operations other than GC
duke@435 271 static int threads_lock_ticks; // the number of times we couldn't get the Threads_lock without blocking
duke@435 272 static int blocked_ticks; // ticks when the thread was blocked.
duke@435 273 static int class_loader_ticks; // total ticks in class loader
duke@435 274 static int extra_ticks; // total ticks an extra temporary measuring
duke@435 275 static int compiler_ticks; // total ticks in compilation
duke@435 276 static int interpreter_ticks; // ticks in unknown interpreted method
duke@435 277 static int deopt_ticks; // ticks in deoptimization
duke@435 278 static int unknown_ticks; // ticks that cannot be categorized
duke@435 279 static int received_ticks; // ticks that were received by task
duke@435 280 static int delivered_ticks; // ticks that were delivered by task
duke@435 281 static int non_method_ticks() {
duke@435 282 return
duke@435 283 ( received_gc_ticks
duke@435 284 + vm_operation_ticks
duke@435 285 + deopt_ticks
duke@435 286 + threads_lock_ticks
duke@435 287 + blocked_ticks
duke@435 288 + compiler_ticks
duke@435 289 + interpreter_ticks
duke@435 290 + unknown_ticks );
duke@435 291 }
duke@435 292 static elapsedTimer timer;
duke@435 293
duke@435 294 // Counts of each of the byte codes
duke@435 295 static int* bytecode_ticks;
duke@435 296 static int* bytecode_ticks_stub;
duke@435 297 static void print_byte_code_statistics();
duke@435 298
duke@435 299 // the ticks below are for continuous profiling (to adjust recompilation, etc.)
duke@435 300 static int all_ticks; // total count of ticks received so far
duke@435 301 static int all_int_ticks; // ticks in interpreter
duke@435 302 static int all_comp_ticks; // ticks in compiled code (+ native)
duke@435 303 static bool full_profile_flag; // collecting full profile?
duke@435 304
duke@435 305 // to accumulate thread-specific data
duke@435 306 // if we aren't profiling individual threads.
duke@435 307 static ThreadProfiler* thread_profiler;
duke@435 308 static ThreadProfiler* vm_thread_profiler;
duke@435 309
duke@435 310 static void allocate_table();
duke@435 311
duke@435 312 // The task that periodically interrupts things.
duke@435 313 friend class FlatProfilerTask;
duke@435 314 static FlatProfilerTask* task;
duke@435 315 static void record_vm_operation();
duke@435 316 static void record_vm_tick();
duke@435 317 static void record_thread_ticks();
duke@435 318
duke@435 319 // For interval analysis
duke@435 320 private:
duke@435 321 static int interval_ticks_previous; // delivered_ticks from the last interval
duke@435 322 static void interval_record_thread(ThreadProfiler* tp); // extract ticks from ThreadProfiler.
duke@435 323 static void interval_print(); // print interval data.
duke@435 324 static void interval_reset(); // reset interval data.
duke@435 325 enum {interval_print_size = 10};
duke@435 326 static IntervalData* interval_data;
duke@435 327 #endif // FPROF_KERNEL
duke@435 328 };
stefank@2314 329
stefank@2314 330 #endif // SHARE_VM_RUNTIME_FPROFILER_HPP

mercurial