src/share/vm/runtime/fprofiler.hpp

Tue, 26 Apr 2011 14:04:43 -0400

author
coleenp
date
Tue, 26 Apr 2011 14:04:43 -0400
changeset 2804
01147d8aac1d
parent 2708
1d1603768966
child 3156
f08d439fab8c
permissions
-rw-r--r--

7009923: JSR 292: VM crash in JavaThread::last_frame
Summary: Handle stack overflow before the first frame is called, by printing out the called method and not walking the stack.
Reviewed-by: dholmes, phh, dsamersoff

duke@435 1 /*
trims@2708 2 * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
duke@435 22 *
duke@435 23 */
duke@435 24
stefank@2314 25 #ifndef SHARE_VM_RUNTIME_FPROFILER_HPP
stefank@2314 26 #define SHARE_VM_RUNTIME_FPROFILER_HPP
stefank@2314 27
stefank@2314 28 #include "runtime/timer.hpp"
stefank@2314 29 #ifdef TARGET_OS_FAMILY_linux
stefank@2314 30 # include "thread_linux.inline.hpp"
stefank@2314 31 #endif
stefank@2314 32 #ifdef TARGET_OS_FAMILY_solaris
stefank@2314 33 # include "thread_solaris.inline.hpp"
stefank@2314 34 #endif
stefank@2314 35 #ifdef TARGET_OS_FAMILY_windows
stefank@2314 36 # include "thread_windows.inline.hpp"
stefank@2314 37 #endif
stefank@2314 38
duke@435 39 // a simple flat profiler for Java
duke@435 40
duke@435 41
duke@435 42 // Forward declaration of classes defined in this header file
duke@435 43 class ThreadProfiler;
duke@435 44 class ThreadProfilerMark;
duke@435 45 class FlatProfiler;
duke@435 46 class IntervalData;
duke@435 47
duke@435 48 // Declarations of classes defined only in the implementation.
duke@435 49 class ProfilerNode;
duke@435 50 class FlatProfilerTask;
duke@435 51
duke@435 52 enum TickPosition {
duke@435 53 tp_code,
duke@435 54 tp_native
duke@435 55 };
duke@435 56
duke@435 57 // One of these guys is constructed as we enter interesting regions
duke@435 58 // and destructed as we exit the region. While we are in the region
duke@435 59 // ticks are allotted to the region.
duke@435 60 class ThreadProfilerMark: public StackObj {
duke@435 61 public:
duke@435 62 // For now, the only thread-specific region is the class loader.
duke@435 63 enum Region { noRegion, classLoaderRegion, extraRegion, maxRegion };
duke@435 64
duke@435 65 ThreadProfilerMark(Region) KERNEL_RETURN;
duke@435 66 ~ThreadProfilerMark() KERNEL_RETURN;
duke@435 67
duke@435 68 private:
duke@435 69 ThreadProfiler* _pp;
duke@435 70 Region _r;
duke@435 71 };
duke@435 72
duke@435 73 #ifndef FPROF_KERNEL
duke@435 74
duke@435 75 class IntervalData VALUE_OBJ_CLASS_SPEC {
duke@435 76 // Just to keep these things all together
duke@435 77 private:
duke@435 78 int _interpreted;
duke@435 79 int _compiled;
duke@435 80 int _native;
duke@435 81 int _compiling;
duke@435 82 public:
duke@435 83 int interpreted() {
duke@435 84 return _interpreted;
duke@435 85 }
duke@435 86 int compiled() {
duke@435 87 return _compiled;
duke@435 88 }
duke@435 89 int native() {
duke@435 90 return _native;
duke@435 91 }
duke@435 92 int compiling() {
duke@435 93 return _compiling;
duke@435 94 }
duke@435 95 int total() {
duke@435 96 return (interpreted() + compiled() + native() + compiling());
duke@435 97 }
duke@435 98 void inc_interpreted() {
duke@435 99 _interpreted += 1;
duke@435 100 }
duke@435 101 void inc_compiled() {
duke@435 102 _compiled += 1;
duke@435 103 }
duke@435 104 void inc_native() {
duke@435 105 _native += 1;
duke@435 106 }
duke@435 107 void inc_compiling() {
duke@435 108 _compiling += 1;
duke@435 109 }
duke@435 110 void reset() {
duke@435 111 _interpreted = 0;
duke@435 112 _compiled = 0;
duke@435 113 _native = 0;
duke@435 114 _compiling = 0;
duke@435 115 }
duke@435 116 static void print_header(outputStream* st);
duke@435 117 void print_data(outputStream* st);
duke@435 118 };
duke@435 119 #endif // FPROF_KERNEL
duke@435 120
duke@435 121 class ThreadProfiler: public CHeapObj {
duke@435 122 public:
duke@435 123 ThreadProfiler() KERNEL_RETURN;
duke@435 124 ~ThreadProfiler() KERNEL_RETURN;
duke@435 125
duke@435 126 // Resets the profiler
duke@435 127 void reset() KERNEL_RETURN;
duke@435 128
duke@435 129 // Activates the profiler for a certain thread
duke@435 130 void engage() KERNEL_RETURN;
duke@435 131
duke@435 132 // Deactivates the profiler
duke@435 133 void disengage() KERNEL_RETURN;
duke@435 134
duke@435 135 // Prints the collected profiling information
duke@435 136 void print(const char* thread_name) KERNEL_RETURN;
duke@435 137
duke@435 138 // Garbage Collection Support
duke@435 139 void oops_do(OopClosure* f) KERNEL_RETURN;
duke@435 140
duke@435 141 #ifndef FPROF_KERNEL
duke@435 142 private:
duke@435 143 // for recording ticks.
duke@435 144 friend class ProfilerNode;
duke@435 145 char* area_bottom; // preallocated area for pnodes
duke@435 146 char* area_top;
duke@435 147 char* area_limit;
duke@435 148 static int table_size;
duke@435 149 ProfilerNode** table;
duke@435 150
duke@435 151 private:
sgoldman@542 152 void record_interpreted_tick(JavaThread* thread, frame fr, TickPosition where, int* ticks);
duke@435 153 void record_compiled_tick (JavaThread* thread, frame fr, TickPosition where);
duke@435 154 void interpreted_update(methodOop method, TickPosition where);
duke@435 155 void compiled_update (methodOop method, TickPosition where);
duke@435 156 void stub_update (methodOop method, const char* name, TickPosition where);
duke@435 157 void adapter_update (TickPosition where);
duke@435 158
duke@435 159 void runtime_stub_update(const CodeBlob* stub, const char* name, TickPosition where);
duke@435 160 void unknown_compiled_update (const CodeBlob* cb, TickPosition where);
duke@435 161
duke@435 162 void vm_update (TickPosition where);
duke@435 163 void vm_update (const char* name, TickPosition where);
duke@435 164
duke@435 165 void record_tick_for_running_frame(JavaThread* thread, frame fr);
duke@435 166 void record_tick_for_calling_frame(JavaThread* thread, frame fr);
duke@435 167
duke@435 168 void initialize();
duke@435 169
duke@435 170 static int entry(int value);
duke@435 171
duke@435 172
duke@435 173 private:
duke@435 174 friend class FlatProfiler;
duke@435 175 void record_tick(JavaThread* thread);
duke@435 176 bool engaged;
duke@435 177 // so we can do percentages for this thread, and quick checks for activity
duke@435 178 int thread_ticks;
duke@435 179 int compiler_ticks;
duke@435 180 int interpreter_ticks;
duke@435 181
duke@435 182 public:
duke@435 183 void inc_thread_ticks() { thread_ticks += 1; }
duke@435 184
duke@435 185 private:
duke@435 186 friend class ThreadProfilerMark;
duke@435 187 // counters for thread-specific regions
duke@435 188 bool region_flag[ThreadProfilerMark::maxRegion];
duke@435 189 int class_loader_ticks;
duke@435 190 int extra_ticks;
duke@435 191
duke@435 192 private:
duke@435 193 // other thread-specific regions
duke@435 194 int blocked_ticks;
duke@435 195 enum UnknownTickSites {
duke@435 196 ut_null_method,
duke@435 197 ut_vtable_stubs,
duke@435 198 ut_running_frame,
duke@435 199 ut_calling_frame,
duke@435 200 ut_no_pc,
duke@435 201 ut_no_last_Java_frame,
duke@435 202 ut_unknown_thread_state,
duke@435 203 ut_end
duke@435 204 };
duke@435 205 int unknown_ticks_array[ut_end];
duke@435 206 int unknown_ticks() {
duke@435 207 int result = 0;
duke@435 208 for (int ut = 0; ut < ut_end; ut += 1) {
duke@435 209 result += unknown_ticks_array[ut];
duke@435 210 }
duke@435 211 return result;
duke@435 212 }
duke@435 213
duke@435 214 elapsedTimer timer;
duke@435 215
duke@435 216 // For interval timing
duke@435 217 private:
duke@435 218 IntervalData _interval_data;
duke@435 219 IntervalData interval_data() {
duke@435 220 return _interval_data;
duke@435 221 }
duke@435 222 IntervalData* interval_data_ref() {
duke@435 223 return &_interval_data;
duke@435 224 }
duke@435 225 #endif // FPROF_KERNEL
duke@435 226 };
duke@435 227
duke@435 228 class FlatProfiler: AllStatic {
duke@435 229 public:
duke@435 230 static void reset() KERNEL_RETURN ;
duke@435 231 static void engage(JavaThread* mainThread, bool fullProfile) KERNEL_RETURN ;
duke@435 232 static void disengage() KERNEL_RETURN ;
duke@435 233 static void print(int unused) KERNEL_RETURN ;
kamg@2511 234 static bool is_active() KERNEL_RETURN_(false) ;
duke@435 235
duke@435 236 // This is NULL if each thread has its own thread profiler,
duke@435 237 // else this is the single thread profiler used by all threads.
duke@435 238 // In particular it makes a difference during garbage collection,
duke@435 239 // where you only want to traverse each thread profiler once.
kamg@2511 240 static ThreadProfiler* get_thread_profiler() KERNEL_RETURN_(NULL);
duke@435 241
duke@435 242 // Garbage Collection Support
duke@435 243 static void oops_do(OopClosure* f) KERNEL_RETURN ;
duke@435 244
duke@435 245 // Support for disassembler to inspect the PCRecorder
duke@435 246
duke@435 247 // Returns the start address for a given pc
duke@435 248 // NULL is returned if the PCRecorder is inactive
kamg@2511 249 static address bucket_start_for(address pc) KERNEL_RETURN_(NULL);
duke@435 250
duke@435 251 enum { MillisecsPerTick = 10 }; // ms per profiling ticks
duke@435 252
duke@435 253 // Returns the number of ticks recorded for the bucket
duke@435 254 // pc belongs to.
kamg@2511 255 static int bucket_count_for(address pc) KERNEL_RETURN_(0);
duke@435 256
duke@435 257 #ifndef FPROF_KERNEL
duke@435 258
duke@435 259 private:
duke@435 260 static bool full_profile() {
duke@435 261 return full_profile_flag;
duke@435 262 }
duke@435 263
duke@435 264 friend class ThreadProfiler;
duke@435 265 // the following group of ticks cover everything that's not attributed to individual Java methods
duke@435 266 static int received_gc_ticks; // ticks during which gc was active
duke@435 267 static int vm_operation_ticks; // total ticks in vm_operations other than GC
duke@435 268 static int threads_lock_ticks; // the number of times we couldn't get the Threads_lock without blocking
duke@435 269 static int blocked_ticks; // ticks when the thread was blocked.
duke@435 270 static int class_loader_ticks; // total ticks in class loader
duke@435 271 static int extra_ticks; // total ticks an extra temporary measuring
duke@435 272 static int compiler_ticks; // total ticks in compilation
duke@435 273 static int interpreter_ticks; // ticks in unknown interpreted method
duke@435 274 static int deopt_ticks; // ticks in deoptimization
duke@435 275 static int unknown_ticks; // ticks that cannot be categorized
duke@435 276 static int received_ticks; // ticks that were received by task
duke@435 277 static int delivered_ticks; // ticks that were delivered by task
duke@435 278 static int non_method_ticks() {
duke@435 279 return
duke@435 280 ( received_gc_ticks
duke@435 281 + vm_operation_ticks
duke@435 282 + deopt_ticks
duke@435 283 + threads_lock_ticks
duke@435 284 + blocked_ticks
duke@435 285 + compiler_ticks
duke@435 286 + interpreter_ticks
duke@435 287 + unknown_ticks );
duke@435 288 }
duke@435 289 static elapsedTimer timer;
duke@435 290
duke@435 291 // Counts of each of the byte codes
duke@435 292 static int* bytecode_ticks;
duke@435 293 static int* bytecode_ticks_stub;
duke@435 294 static void print_byte_code_statistics();
duke@435 295
duke@435 296 // the ticks below are for continuous profiling (to adjust recompilation, etc.)
duke@435 297 static int all_ticks; // total count of ticks received so far
duke@435 298 static int all_int_ticks; // ticks in interpreter
duke@435 299 static int all_comp_ticks; // ticks in compiled code (+ native)
duke@435 300 static bool full_profile_flag; // collecting full profile?
duke@435 301
duke@435 302 // to accumulate thread-specific data
duke@435 303 // if we aren't profiling individual threads.
duke@435 304 static ThreadProfiler* thread_profiler;
duke@435 305 static ThreadProfiler* vm_thread_profiler;
duke@435 306
duke@435 307 static void allocate_table();
duke@435 308
duke@435 309 // The task that periodically interrupts things.
duke@435 310 friend class FlatProfilerTask;
duke@435 311 static FlatProfilerTask* task;
duke@435 312 static void record_vm_operation();
duke@435 313 static void record_vm_tick();
duke@435 314 static void record_thread_ticks();
duke@435 315
duke@435 316 // For interval analysis
duke@435 317 private:
duke@435 318 static int interval_ticks_previous; // delivered_ticks from the last interval
duke@435 319 static void interval_record_thread(ThreadProfiler* tp); // extract ticks from ThreadProfiler.
duke@435 320 static void interval_print(); // print interval data.
duke@435 321 static void interval_reset(); // reset interval data.
duke@435 322 enum {interval_print_size = 10};
duke@435 323 static IntervalData* interval_data;
duke@435 324 #endif // FPROF_KERNEL
duke@435 325 };
stefank@2314 326
stefank@2314 327 #endif // SHARE_VM_RUNTIME_FPROFILER_HPP

mercurial