Wed, 16 Jan 2013 16:30:04 +0100
8006403: Regression: jstack failed due to the FieldInfo regression in SA
Reviewed-by: sla, dholmes
Contributed-by: Aleksey Shipilev <aleksey.shipilev@oracle.com>
1 /*
2 * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #ifndef SHARE_VM_RUNTIME_FPROFILER_HPP
26 #define SHARE_VM_RUNTIME_FPROFILER_HPP
28 #include "runtime/timer.hpp"
30 // a simple flat profiler for Java
33 // Forward declaration of classes defined in this header file
34 class ThreadProfiler;
35 class ThreadProfilerMark;
36 class FlatProfiler;
37 class IntervalData;
39 // Declarations of classes defined only in the implementation.
40 class ProfilerNode;
41 class FlatProfilerTask;
43 enum TickPosition {
44 tp_code,
45 tp_native
46 };
48 // One of these guys is constructed as we enter interesting regions
49 // and destructed as we exit the region. While we are in the region
50 // ticks are allotted to the region.
51 class ThreadProfilerMark: public StackObj {
52 public:
53 // For now, the only thread-specific region is the class loader.
54 enum Region { noRegion, classLoaderRegion, extraRegion, maxRegion };
56 ThreadProfilerMark(Region) NOT_FPROF_RETURN;
57 ~ThreadProfilerMark() NOT_FPROF_RETURN;
59 private:
60 ThreadProfiler* _pp;
61 Region _r;
62 };
64 #if INCLUDE_FPROF
66 class IntervalData VALUE_OBJ_CLASS_SPEC {
67 // Just to keep these things all together
68 private:
69 int _interpreted;
70 int _compiled;
71 int _native;
72 int _compiling;
73 public:
74 int interpreted() {
75 return _interpreted;
76 }
77 int compiled() {
78 return _compiled;
79 }
80 int native() {
81 return _native;
82 }
83 int compiling() {
84 return _compiling;
85 }
86 int total() {
87 return (interpreted() + compiled() + native() + compiling());
88 }
89 void inc_interpreted() {
90 _interpreted += 1;
91 }
92 void inc_compiled() {
93 _compiled += 1;
94 }
95 void inc_native() {
96 _native += 1;
97 }
98 void inc_compiling() {
99 _compiling += 1;
100 }
101 void reset() {
102 _interpreted = 0;
103 _compiled = 0;
104 _native = 0;
105 _compiling = 0;
106 }
107 static void print_header(outputStream* st);
108 void print_data(outputStream* st);
109 };
110 #endif // INCLUDE_FPROF
112 class ThreadProfiler: public CHeapObj<mtInternal> {
113 public:
114 ThreadProfiler() NOT_FPROF_RETURN;
115 ~ThreadProfiler() NOT_FPROF_RETURN;
117 // Resets the profiler
118 void reset() NOT_FPROF_RETURN;
120 // Activates the profiler for a certain thread
121 void engage() NOT_FPROF_RETURN;
123 // Deactivates the profiler
124 void disengage() NOT_FPROF_RETURN;
126 // Prints the collected profiling information
127 void print(const char* thread_name) NOT_FPROF_RETURN;
129 // Garbage Collection Support
130 void oops_do(OopClosure* f) NOT_FPROF_RETURN;
132 #if INCLUDE_FPROF
133 private:
134 // for recording ticks.
135 friend class ProfilerNode;
136 char* area_bottom; // preallocated area for pnodes
137 char* area_top;
138 char* area_limit;
139 static int table_size;
140 ProfilerNode** table;
142 private:
143 void record_interpreted_tick(JavaThread* thread, frame fr, TickPosition where, int* ticks);
144 void record_compiled_tick (JavaThread* thread, frame fr, TickPosition where);
145 void interpreted_update(Method* method, TickPosition where);
146 void compiled_update (Method* method, TickPosition where);
147 void stub_update (Method* method, const char* name, TickPosition where);
148 void adapter_update (TickPosition where);
150 void runtime_stub_update(const CodeBlob* stub, const char* name, TickPosition where);
151 void unknown_compiled_update (const CodeBlob* cb, TickPosition where);
153 void vm_update (TickPosition where);
154 void vm_update (const char* name, TickPosition where);
156 void record_tick_for_running_frame(JavaThread* thread, frame fr);
157 void record_tick_for_calling_frame(JavaThread* thread, frame fr);
159 void initialize();
161 static int entry(int value);
164 private:
165 friend class FlatProfiler;
166 void record_tick(JavaThread* thread);
167 bool engaged;
168 // so we can do percentages for this thread, and quick checks for activity
169 int thread_ticks;
170 int compiler_ticks;
171 int interpreter_ticks;
173 public:
174 void inc_thread_ticks() { thread_ticks += 1; }
176 private:
177 friend class ThreadProfilerMark;
178 // counters for thread-specific regions
179 bool region_flag[ThreadProfilerMark::maxRegion];
180 int class_loader_ticks;
181 int extra_ticks;
183 private:
184 // other thread-specific regions
185 int blocked_ticks;
186 enum UnknownTickSites {
187 ut_null_method,
188 ut_vtable_stubs,
189 ut_running_frame,
190 ut_calling_frame,
191 ut_no_pc,
192 ut_no_last_Java_frame,
193 ut_unknown_thread_state,
194 ut_end
195 };
196 int unknown_ticks_array[ut_end];
197 int unknown_ticks() {
198 int result = 0;
199 for (int ut = 0; ut < ut_end; ut += 1) {
200 result += unknown_ticks_array[ut];
201 }
202 return result;
203 }
205 elapsedTimer timer;
207 // For interval timing
208 private:
209 IntervalData _interval_data;
210 IntervalData interval_data() {
211 return _interval_data;
212 }
213 IntervalData* interval_data_ref() {
214 return &_interval_data;
215 }
216 #endif // INCLUDE_FPROF
217 };
219 class FlatProfiler: AllStatic {
220 public:
221 static void reset() NOT_FPROF_RETURN ;
222 static void engage(JavaThread* mainThread, bool fullProfile) NOT_FPROF_RETURN ;
223 static void disengage() NOT_FPROF_RETURN ;
224 static void print(int unused) NOT_FPROF_RETURN ;
225 static bool is_active() NOT_FPROF_RETURN_(false) ;
227 // This is NULL if each thread has its own thread profiler,
228 // else this is the single thread profiler used by all threads.
229 // In particular it makes a difference during garbage collection,
230 // where you only want to traverse each thread profiler once.
231 static ThreadProfiler* get_thread_profiler() NOT_FPROF_RETURN_(NULL);
233 // Garbage Collection Support
234 static void oops_do(OopClosure* f) NOT_FPROF_RETURN ;
236 // Support for disassembler to inspect the PCRecorder
238 // Returns the start address for a given pc
239 // NULL is returned if the PCRecorder is inactive
240 static address bucket_start_for(address pc) NOT_FPROF_RETURN_(NULL);
242 enum { MillisecsPerTick = 10 }; // ms per profiling ticks
244 // Returns the number of ticks recorded for the bucket
245 // pc belongs to.
246 static int bucket_count_for(address pc) NOT_FPROF_RETURN_(0);
248 #if INCLUDE_FPROF
250 private:
251 static bool full_profile() {
252 return full_profile_flag;
253 }
255 friend class ThreadProfiler;
256 // the following group of ticks cover everything that's not attributed to individual Java methods
257 static int received_gc_ticks; // ticks during which gc was active
258 static int vm_operation_ticks; // total ticks in vm_operations other than GC
259 static int threads_lock_ticks; // the number of times we couldn't get the Threads_lock without blocking
260 static int blocked_ticks; // ticks when the thread was blocked.
261 static int class_loader_ticks; // total ticks in class loader
262 static int extra_ticks; // total ticks an extra temporary measuring
263 static int compiler_ticks; // total ticks in compilation
264 static int interpreter_ticks; // ticks in unknown interpreted method
265 static int deopt_ticks; // ticks in deoptimization
266 static int unknown_ticks; // ticks that cannot be categorized
267 static int received_ticks; // ticks that were received by task
268 static int delivered_ticks; // ticks that were delivered by task
269 static int non_method_ticks() {
270 return
271 ( received_gc_ticks
272 + vm_operation_ticks
273 + deopt_ticks
274 + threads_lock_ticks
275 + blocked_ticks
276 + compiler_ticks
277 + interpreter_ticks
278 + unknown_ticks );
279 }
280 static elapsedTimer timer;
282 // Counts of each of the byte codes
283 static int* bytecode_ticks;
284 static int* bytecode_ticks_stub;
285 static void print_byte_code_statistics();
287 // the ticks below are for continuous profiling (to adjust recompilation, etc.)
288 static int all_ticks; // total count of ticks received so far
289 static int all_int_ticks; // ticks in interpreter
290 static int all_comp_ticks; // ticks in compiled code (+ native)
291 static bool full_profile_flag; // collecting full profile?
293 // to accumulate thread-specific data
294 // if we aren't profiling individual threads.
295 static ThreadProfiler* thread_profiler;
296 static ThreadProfiler* vm_thread_profiler;
298 static void allocate_table();
300 // The task that periodically interrupts things.
301 friend class FlatProfilerTask;
302 static FlatProfilerTask* task;
303 static void record_vm_operation();
304 static void record_vm_tick();
305 static void record_thread_ticks();
307 // For interval analysis
308 private:
309 static int interval_ticks_previous; // delivered_ticks from the last interval
310 static void interval_record_thread(ThreadProfiler* tp); // extract ticks from ThreadProfiler.
311 static void interval_print(); // print interval data.
312 static void interval_reset(); // reset interval data.
313 enum {interval_print_size = 10};
314 static IntervalData* interval_data;
315 #endif // INCLUDE_FPROF
316 };
318 #endif // SHARE_VM_RUNTIME_FPROFILER_HPP