src/share/vm/runtime/fprofiler.cpp

Thu, 22 May 2014 15:52:41 -0400

author
drchase
date
Thu, 22 May 2014 15:52:41 -0400
changeset 6680
78bbf4d43a14
parent 5617
491de79915eb
child 6876
710a3c8b516e
child 6911
ce8f6bb717c9
permissions
-rw-r--r--

8037816: Fix for 8036122 breaks build with Xcode5/clang
8043029: Change 8037816 breaks HS build with older GCC versions which don't support diagnostic pragmas
8043164: Format warning in traceStream.hpp
Summary: Backport of main fix + two corrections, enables clang compilation, turns on format attributes, corrects/mutes warnings
Reviewed-by: kvn, coleenp, iveresov, twisti

     1 /*
     2  * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "classfile/classLoader.hpp"
    27 #include "code/vtableStubs.hpp"
    28 #include "gc_interface/collectedHeap.inline.hpp"
    29 #include "interpreter/interpreter.hpp"
    30 #include "memory/allocation.inline.hpp"
    31 #include "memory/universe.inline.hpp"
    32 #include "oops/oop.inline.hpp"
    33 #include "oops/oop.inline2.hpp"
    34 #include "oops/symbol.hpp"
    35 #include "runtime/deoptimization.hpp"
    36 #include "runtime/fprofiler.hpp"
    37 #include "runtime/mutexLocker.hpp"
    38 #include "runtime/stubCodeGenerator.hpp"
    39 #include "runtime/stubRoutines.hpp"
    40 #include "runtime/task.hpp"
    41 #include "runtime/vframe.hpp"
    42 #include "utilities/macros.hpp"
    44 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
    46 // Static fields of FlatProfiler
    47 int               FlatProfiler::received_gc_ticks   = 0;
    48 int               FlatProfiler::vm_operation_ticks  = 0;
    49 int               FlatProfiler::threads_lock_ticks  = 0;
    50 int               FlatProfiler::class_loader_ticks  = 0;
    51 int               FlatProfiler::extra_ticks         = 0;
    52 int               FlatProfiler::blocked_ticks       = 0;
    53 int               FlatProfiler::deopt_ticks         = 0;
    54 int               FlatProfiler::unknown_ticks       = 0;
    55 int               FlatProfiler::interpreter_ticks   = 0;
    56 int               FlatProfiler::compiler_ticks      = 0;
    57 int               FlatProfiler::received_ticks      = 0;
    58 int               FlatProfiler::delivered_ticks     = 0;
    59 int*              FlatProfiler::bytecode_ticks      = NULL;
    60 int*              FlatProfiler::bytecode_ticks_stub = NULL;
    61 int               FlatProfiler::all_int_ticks       = 0;
    62 int               FlatProfiler::all_comp_ticks      = 0;
    63 int               FlatProfiler::all_ticks           = 0;
    64 bool              FlatProfiler::full_profile_flag   = false;
    65 ThreadProfiler*   FlatProfiler::thread_profiler     = NULL;
    66 ThreadProfiler*   FlatProfiler::vm_thread_profiler  = NULL;
    67 FlatProfilerTask* FlatProfiler::task                = NULL;
    68 elapsedTimer      FlatProfiler::timer;
    69 int               FlatProfiler::interval_ticks_previous = 0;
    70 IntervalData*     FlatProfiler::interval_data       = NULL;
    72 ThreadProfiler::ThreadProfiler() {
    73   // Space for the ProfilerNodes
    74   const int area_size = 1 * ProfilerNodeSize * 1024;
    75   area_bottom = AllocateHeap(area_size, mtInternal);
    76   area_top    = area_bottom;
    77   area_limit  = area_bottom + area_size;
    79   // ProfilerNode pointer table
    80   table = NEW_C_HEAP_ARRAY(ProfilerNode*, table_size, mtInternal);
    81   initialize();
    82   engaged = false;
    83 }
    85 ThreadProfiler::~ThreadProfiler() {
    86   FreeHeap(area_bottom);
    87   area_bottom = NULL;
    88   area_top = NULL;
    89   area_limit = NULL;
    90   FreeHeap(table);
    91   table = NULL;
    92 }
    94 // Statics for ThreadProfiler
    95 int ThreadProfiler::table_size = 1024;
    97 int ThreadProfiler::entry(int  value) {
    98   value = (value > 0) ? value : -value;
    99   return value % table_size;
   100 }
   102 ThreadProfilerMark::ThreadProfilerMark(ThreadProfilerMark::Region r) {
   103   _r = r;
   104   _pp = NULL;
   105   assert(((r > ThreadProfilerMark::noRegion) && (r < ThreadProfilerMark::maxRegion)), "ThreadProfilerMark::Region out of bounds");
   106   Thread* tp = Thread::current();
   107   if (tp != NULL && tp->is_Java_thread()) {
   108     JavaThread* jtp = (JavaThread*) tp;
   109     ThreadProfiler* pp = jtp->get_thread_profiler();
   110     _pp = pp;
   111     if (pp != NULL) {
   112       pp->region_flag[r] = true;
   113     }
   114   }
   115 }
   117 ThreadProfilerMark::~ThreadProfilerMark() {
   118   if (_pp != NULL) {
   119     _pp->region_flag[_r] = false;
   120   }
   121   _pp = NULL;
   122 }
   124 // Random other statics
   125 static const int col1 = 2;      // position of output column 1
   126 static const int col2 = 11;     // position of output column 2
   127 static const int col3 = 25;     // position of output column 3
   128 static const int col4 = 55;     // position of output column 4
   131 // Used for detailed profiling of nmethods.
   132 class PCRecorder : AllStatic {
   133  private:
   134   static int*    counters;
   135   static address base;
   136   enum {
   137    bucket_size = 16
   138   };
   139   static int     index_for(address pc) { return (pc - base)/bucket_size;   }
   140   static address pc_for(int index)     { return base + (index * bucket_size); }
   141   static int     size() {
   142     return ((int)CodeCache::max_capacity())/bucket_size * BytesPerWord;
   143   }
   144  public:
   145   static address bucket_start_for(address pc) {
   146     if (counters == NULL) return NULL;
   147     return pc_for(index_for(pc));
   148   }
   149   static int bucket_count_for(address pc)  { return counters[index_for(pc)]; }
   150   static void init();
   151   static void record(address pc);
   152   static void print();
   153   static void print_blobs(CodeBlob* cb);
   154 };
   156 int*    PCRecorder::counters = NULL;
   157 address PCRecorder::base     = NULL;
   159 void PCRecorder::init() {
   160   MutexLockerEx lm(CodeCache_lock, Mutex::_no_safepoint_check_flag);
   161   int s = size();
   162   counters = NEW_C_HEAP_ARRAY(int, s, mtInternal);
   163   for (int index = 0; index < s; index++) {
   164     counters[index] = 0;
   165   }
   166   base = CodeCache::first_address();
   167 }
   169 void PCRecorder::record(address pc) {
   170   if (counters == NULL) return;
   171   assert(CodeCache::contains(pc), "must be in CodeCache");
   172   counters[index_for(pc)]++;
   173 }
   176 address FlatProfiler::bucket_start_for(address pc) {
   177   return PCRecorder::bucket_start_for(pc);
   178 }
   180 int FlatProfiler::bucket_count_for(address pc) {
   181   return PCRecorder::bucket_count_for(pc);
   182 }
   184 void PCRecorder::print() {
   185   if (counters == NULL) return;
   187   tty->cr();
   188   tty->print_cr("Printing compiled methods with PC buckets having more than %d ticks", ProfilerPCTickThreshold);
   189   tty->print_cr("===================================================================");
   190   tty->cr();
   192   GrowableArray<CodeBlob*>* candidates = new GrowableArray<CodeBlob*>(20);
   195   int s;
   196   {
   197     MutexLockerEx lm(CodeCache_lock, Mutex::_no_safepoint_check_flag);
   198     s = size();
   199   }
   201   for (int index = 0; index < s; index++) {
   202     int count = counters[index];
   203     if (count > ProfilerPCTickThreshold) {
   204       address pc = pc_for(index);
   205       CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
   206       if (cb != NULL && candidates->find(cb) < 0) {
   207         candidates->push(cb);
   208       }
   209     }
   210   }
   211   for (int i = 0; i < candidates->length(); i++) {
   212     print_blobs(candidates->at(i));
   213   }
   214 }
   216 void PCRecorder::print_blobs(CodeBlob* cb) {
   217   if (cb != NULL) {
   218     cb->print();
   219     if (cb->is_nmethod()) {
   220       ((nmethod*)cb)->print_code();
   221     }
   222     tty->cr();
   223   } else {
   224     tty->print_cr("stub code");
   225   }
   226 }
   228 class tick_counter {            // holds tick info for one node
   229  public:
   230   int ticks_in_code;
   231   int ticks_in_native;
   233   tick_counter()                     {  ticks_in_code = ticks_in_native = 0; }
   234   tick_counter(int code, int native) {  ticks_in_code = code; ticks_in_native = native; }
   236   int total() const {
   237     return (ticks_in_code + ticks_in_native);
   238   }
   240   void add(tick_counter* a) {
   241     ticks_in_code += a->ticks_in_code;
   242     ticks_in_native += a->ticks_in_native;
   243   }
   245   void update(TickPosition where) {
   246     switch(where) {
   247       case tp_code:     ticks_in_code++;       break;
   248       case tp_native:   ticks_in_native++;      break;
   249     }
   250   }
   252   void print_code(outputStream* st, int total_ticks) {
   253     st->print("%5.1f%% %5d ", total() * 100.0 / total_ticks, ticks_in_code);
   254   }
   256   void print_native(outputStream* st) {
   257     st->print(" + %5d ", ticks_in_native);
   258   }
   259 };
   261 class ProfilerNode {
   262  private:
   263   ProfilerNode* _next;
   264  public:
   265   tick_counter ticks;
   267  public:
   269   void* operator new(size_t size, ThreadProfiler* tp) throw();
   270   void  operator delete(void* p);
   272   ProfilerNode() {
   273     _next = NULL;
   274   }
   276   virtual ~ProfilerNode() {
   277     if (_next)
   278       delete _next;
   279   }
   281   void set_next(ProfilerNode* n) { _next = n; }
   282   ProfilerNode* next()           { return _next; }
   284   void update(TickPosition where) { ticks.update(where);}
   285   int total_ticks() { return ticks.total(); }
   287   virtual bool is_interpreted() const { return false; }
   288   virtual bool is_compiled()    const { return false; }
   289   virtual bool is_stub()        const { return false; }
   290   virtual bool is_runtime_stub() const{ return false; }
   291   virtual void oops_do(OopClosure* f) = 0;
   293   virtual bool interpreted_match(Method* m) const { return false; }
   294   virtual bool compiled_match(Method* m ) const { return false; }
   295   virtual bool stub_match(Method* m, const char* name) const { return false; }
   296   virtual bool adapter_match() const { return false; }
   297   virtual bool runtimeStub_match(const CodeBlob* stub, const char* name) const { return false; }
   298   virtual bool unknown_compiled_match(const CodeBlob* cb) const { return false; }
   300   static void print_title(outputStream* st) {
   301     st->print(" + native");
   302     st->fill_to(col3);
   303     st->print("Method");
   304     st->fill_to(col4);
   305     st->cr();
   306   }
   308   static void print_total(outputStream* st, tick_counter* t, int total, const char* msg) {
   309     t->print_code(st, total);
   310     st->fill_to(col2);
   311     t->print_native(st);
   312     st->fill_to(col3);
   313     st->print("%s", msg);
   314     st->cr();
   315   }
   317   virtual Method* method()         = 0;
   319   virtual void print_method_on(outputStream* st) {
   320     int limit;
   321     int i;
   322     Method* m = method();
   323     Symbol* k = m->klass_name();
   324     // Print the class name with dots instead of slashes
   325     limit = k->utf8_length();
   326     for (i = 0 ; i < limit ; i += 1) {
   327       char c = (char) k->byte_at(i);
   328       if (c == '/') {
   329         c = '.';
   330       }
   331       st->print("%c", c);
   332     }
   333     if (limit > 0) {
   334       st->print(".");
   335     }
   336     Symbol* n = m->name();
   337     limit = n->utf8_length();
   338     for (i = 0 ; i < limit ; i += 1) {
   339       char c = (char) n->byte_at(i);
   340       st->print("%c", c);
   341     }
   342     if (Verbose || WizardMode) {
   343       // Disambiguate overloaded methods
   344       Symbol* sig = m->signature();
   345       sig->print_symbol_on(st);
   346     } else if (MethodHandles::is_signature_polymorphic(m->intrinsic_id()))
   347       // compare with Method::print_short_name
   348       MethodHandles::print_as_basic_type_signature_on(st, m->signature(), true);
   349   }
   351   virtual void print(outputStream* st, int total_ticks) {
   352     ticks.print_code(st, total_ticks);
   353     st->fill_to(col2);
   354     ticks.print_native(st);
   355     st->fill_to(col3);
   356     print_method_on(st);
   357     st->cr();
   358   }
   360   // for hashing into the table
   361   static int hash(Method* method) {
   362       // The point here is to try to make something fairly unique
   363       // out of the fields we can read without grabbing any locks
   364       // since the method may be locked when we need the hash.
   365       return (
   366           method->code_size() ^
   367           method->max_stack() ^
   368           method->max_locals() ^
   369           method->size_of_parameters());
   370   }
   372   // for sorting
   373   static int compare(ProfilerNode** a, ProfilerNode** b) {
   374     return (*b)->total_ticks() - (*a)->total_ticks();
   375   }
   376 };
   378 void* ProfilerNode::operator new(size_t size, ThreadProfiler* tp) throw() {
   379   void* result = (void*) tp->area_top;
   380   tp->area_top += size;
   382   if (tp->area_top > tp->area_limit) {
   383     fatal("flat profiler buffer overflow");
   384   }
   385   return result;
   386 }
   388 void ProfilerNode::operator delete(void* p){
   389 }
   391 class interpretedNode : public ProfilerNode {
   392  private:
   393    Method* _method;
   394    oop       _class_loader;  // needed to keep metadata for the method alive
   395  public:
   396    interpretedNode(Method* method, TickPosition where) : ProfilerNode() {
   397      _method = method;
   398      _class_loader = method->method_holder()->class_loader();
   399      update(where);
   400    }
   402    bool is_interpreted() const { return true; }
   404    bool interpreted_match(Method* m) const {
   405       return _method == m;
   406    }
   408    void oops_do(OopClosure* f) {
   409      f->do_oop(&_class_loader);
   410    }
   412    Method* method() { return _method; }
   414    static void print_title(outputStream* st) {
   415      st->fill_to(col1);
   416      st->print("%11s", "Interpreted");
   417      ProfilerNode::print_title(st);
   418    }
   420    void print(outputStream* st, int total_ticks) {
   421      ProfilerNode::print(st, total_ticks);
   422    }
   424    void print_method_on(outputStream* st) {
   425      ProfilerNode::print_method_on(st);
   426      MethodCounters* mcs = method()->method_counters();
   427      if (Verbose && mcs != NULL) mcs->invocation_counter()->print_short();
   428    }
   429 };
   431 class compiledNode : public ProfilerNode {
   432  private:
   433    Method* _method;
   434    oop       _class_loader;  // needed to keep metadata for the method alive
   435  public:
   436    compiledNode(Method* method, TickPosition where) : ProfilerNode() {
   437      _method = method;
   438      _class_loader = method->method_holder()->class_loader();
   439      update(where);
   440   }
   441   bool is_compiled()    const { return true; }
   443   bool compiled_match(Method* m) const {
   444     return _method == m;
   445   }
   447   Method* method()         { return _method; }
   449   void oops_do(OopClosure* f) {
   450     f->do_oop(&_class_loader);
   451   }
   453   static void print_title(outputStream* st) {
   454     st->fill_to(col1);
   455     st->print("%11s", "Compiled");
   456     ProfilerNode::print_title(st);
   457   }
   459   void print(outputStream* st, int total_ticks) {
   460     ProfilerNode::print(st, total_ticks);
   461   }
   463   void print_method_on(outputStream* st) {
   464     ProfilerNode::print_method_on(st);
   465   }
   466 };
   468 class stubNode : public ProfilerNode {
   469  private:
   470   Method* _method;
   471   oop       _class_loader;  // needed to keep metadata for the method alive
   472   const char* _symbol;   // The name of the nearest VM symbol (for +ProfileVM). Points to a unique string
   473  public:
   474    stubNode(Method* method, const char* name, TickPosition where) : ProfilerNode() {
   475      _method = method;
   476      _class_loader = method->method_holder()->class_loader();
   477      _symbol = name;
   478      update(where);
   479    }
   481    bool is_stub() const { return true; }
   483    void oops_do(OopClosure* f) {
   484      f->do_oop(&_class_loader);
   485    }
   487    bool stub_match(Method* m, const char* name) const {
   488      return (_method == m) && (_symbol == name);
   489    }
   491    Method* method() { return _method; }
   493    static void print_title(outputStream* st) {
   494      st->fill_to(col1);
   495      st->print("%11s", "Stub");
   496      ProfilerNode::print_title(st);
   497    }
   499    void print(outputStream* st, int total_ticks) {
   500      ProfilerNode::print(st, total_ticks);
   501    }
   503    void print_method_on(outputStream* st) {
   504      ProfilerNode::print_method_on(st);
   505      print_symbol_on(st);
   506    }
   508   void print_symbol_on(outputStream* st) {
   509     if(_symbol) {
   510       st->print("  (%s)", _symbol);
   511     }
   512   }
   513 };
   515 class adapterNode : public ProfilerNode {
   516  public:
   517    adapterNode(TickPosition where) : ProfilerNode() {
   518      update(where);
   519   }
   520   bool is_compiled()    const { return true; }
   522   bool adapter_match() const { return true; }
   524   Method* method()         { return NULL; }
   526   void oops_do(OopClosure* f) {
   527     ;
   528   }
   530   void print(outputStream* st, int total_ticks) {
   531     ProfilerNode::print(st, total_ticks);
   532   }
   534   void print_method_on(outputStream* st) {
   535     st->print("%s", "adapters");
   536   }
   537 };
   539 class runtimeStubNode : public ProfilerNode {
   540  private:
   541    const CodeBlob* _stub;
   542   const char* _symbol;     // The name of the nearest VM symbol when ProfileVM is on. Points to a unique string.
   543  public:
   544    runtimeStubNode(const CodeBlob* stub, const char* name, TickPosition where) : ProfilerNode(), _stub(stub),  _symbol(name) {
   545      assert(stub->is_runtime_stub(), "wrong code blob");
   546      update(where);
   547    }
   549   bool is_runtime_stub() const { return true; }
   551   bool runtimeStub_match(const CodeBlob* stub, const char* name) const {
   552     assert(stub->is_runtime_stub(), "wrong code blob");
   553     return ((RuntimeStub*)_stub)->entry_point() == ((RuntimeStub*)stub)->entry_point() &&
   554             (_symbol == name);
   555   }
   557   Method* method() { return NULL; }
   559   static void print_title(outputStream* st) {
   560     st->fill_to(col1);
   561     st->print("%11s", "Runtime stub");
   562     ProfilerNode::print_title(st);
   563   }
   565   void oops_do(OopClosure* f) {
   566     ;
   567   }
   569   void print(outputStream* st, int total_ticks) {
   570     ProfilerNode::print(st, total_ticks);
   571   }
   573   void print_method_on(outputStream* st) {
   574     st->print("%s", ((RuntimeStub*)_stub)->name());
   575     print_symbol_on(st);
   576   }
   578   void print_symbol_on(outputStream* st) {
   579     if(_symbol) {
   580       st->print("  (%s)", _symbol);
   581     }
   582   }
   583 };
   586 class unknown_compiledNode : public ProfilerNode {
   587  const char *_name;
   588  public:
   589    unknown_compiledNode(const CodeBlob* cb, TickPosition where) : ProfilerNode() {
   590      if ( cb->is_buffer_blob() )
   591        _name = ((BufferBlob*)cb)->name();
   592      else
   593        _name = ((SingletonBlob*)cb)->name();
   594      update(where);
   595   }
   596   bool is_compiled()    const { return true; }
   598   bool unknown_compiled_match(const CodeBlob* cb) const {
   599      if ( cb->is_buffer_blob() )
   600        return !strcmp(((BufferBlob*)cb)->name(), _name);
   601      else
   602        return !strcmp(((SingletonBlob*)cb)->name(), _name);
   603   }
   605   Method* method()         { return NULL; }
   607   void oops_do(OopClosure* f) {
   608     ;
   609   }
   611   void print(outputStream* st, int total_ticks) {
   612     ProfilerNode::print(st, total_ticks);
   613   }
   615   void print_method_on(outputStream* st) {
   616     st->print("%s", _name);
   617   }
   618 };
   620 class vmNode : public ProfilerNode {
   621  private:
   622   const char* _name; // "optional" name obtained by os means such as dll lookup
   623  public:
   624   vmNode(const TickPosition where) : ProfilerNode() {
   625     _name = NULL;
   626     update(where);
   627   }
   629   vmNode(const char* name, const TickPosition where) : ProfilerNode() {
   630     _name = name;
   631     update(where);
   632   }
   634   const char *name()    const { return _name; }
   635   bool is_compiled()    const { return true; }
   637   bool vm_match(const char* name) const { return strcmp(name, _name) == 0; }
   639   Method* method()          { return NULL; }
   641   static int hash(const char* name){
   642     // Compute a simple hash
   643     const char* cp = name;
   644     int h = 0;
   646     if(name != NULL){
   647       while(*cp != '\0'){
   648         h = (h << 1) ^ *cp;
   649         cp++;
   650       }
   651     }
   652     return h;
   653   }
   655   void oops_do(OopClosure* f) {
   656     ;
   657   }
   659   void print(outputStream* st, int total_ticks) {
   660     ProfilerNode::print(st, total_ticks);
   661   }
   663   void print_method_on(outputStream* st) {
   664     if(_name==NULL){
   665       st->print("%s", "unknown code");
   666     }
   667     else {
   668       st->print("%s", _name);
   669     }
   670   }
   671 };
   673 void ThreadProfiler::interpreted_update(Method* method, TickPosition where) {
   674   int index = entry(ProfilerNode::hash(method));
   675   if (!table[index]) {
   676     table[index] = new (this) interpretedNode(method, where);
   677   } else {
   678     ProfilerNode* prev = table[index];
   679     for(ProfilerNode* node = prev; node; node = node->next()) {
   680       if (node->interpreted_match(method)) {
   681         node->update(where);
   682         return;
   683       }
   684       prev = node;
   685     }
   686     prev->set_next(new (this) interpretedNode(method, where));
   687   }
   688 }
   690 void ThreadProfiler::compiled_update(Method* method, TickPosition where) {
   691   int index = entry(ProfilerNode::hash(method));
   692   if (!table[index]) {
   693     table[index] = new (this) compiledNode(method, where);
   694   } else {
   695     ProfilerNode* prev = table[index];
   696     for(ProfilerNode* node = prev; node; node = node->next()) {
   697       if (node->compiled_match(method)) {
   698         node->update(where);
   699         return;
   700       }
   701       prev = node;
   702     }
   703     prev->set_next(new (this) compiledNode(method, where));
   704   }
   705 }
   707 void ThreadProfiler::stub_update(Method* method, const char* name, TickPosition where) {
   708   int index = entry(ProfilerNode::hash(method));
   709   if (!table[index]) {
   710     table[index] = new (this) stubNode(method, name, where);
   711   } else {
   712     ProfilerNode* prev = table[index];
   713     for(ProfilerNode* node = prev; node; node = node->next()) {
   714       if (node->stub_match(method, name)) {
   715         node->update(where);
   716         return;
   717       }
   718       prev = node;
   719     }
   720     prev->set_next(new (this) stubNode(method, name, where));
   721   }
   722 }
   724 void ThreadProfiler::adapter_update(TickPosition where) {
   725   int index = 0;
   726   if (!table[index]) {
   727     table[index] = new (this) adapterNode(where);
   728   } else {
   729     ProfilerNode* prev = table[index];
   730     for(ProfilerNode* node = prev; node; node = node->next()) {
   731       if (node->adapter_match()) {
   732         node->update(where);
   733         return;
   734       }
   735       prev = node;
   736     }
   737     prev->set_next(new (this) adapterNode(where));
   738   }
   739 }
   741 void ThreadProfiler::runtime_stub_update(const CodeBlob* stub, const char* name, TickPosition where) {
   742   int index = 0;
   743   if (!table[index]) {
   744     table[index] = new (this) runtimeStubNode(stub, name, where);
   745   } else {
   746     ProfilerNode* prev = table[index];
   747     for(ProfilerNode* node = prev; node; node = node->next()) {
   748       if (node->runtimeStub_match(stub, name)) {
   749         node->update(where);
   750         return;
   751       }
   752       prev = node;
   753     }
   754     prev->set_next(new (this) runtimeStubNode(stub, name, where));
   755   }
   756 }
   759 void ThreadProfiler::unknown_compiled_update(const CodeBlob* cb, TickPosition where) {
   760   int index = 0;
   761   if (!table[index]) {
   762     table[index] = new (this) unknown_compiledNode(cb, where);
   763   } else {
   764     ProfilerNode* prev = table[index];
   765     for(ProfilerNode* node = prev; node; node = node->next()) {
   766       if (node->unknown_compiled_match(cb)) {
   767         node->update(where);
   768         return;
   769       }
   770       prev = node;
   771     }
   772     prev->set_next(new (this) unknown_compiledNode(cb, where));
   773   }
   774 }
   776 void ThreadProfiler::vm_update(TickPosition where) {
   777   vm_update(NULL, where);
   778 }
   780 void ThreadProfiler::vm_update(const char* name, TickPosition where) {
   781   int index = entry(vmNode::hash(name));
   782   assert(index >= 0, "Must be positive");
   783   // Note that we call strdup below since the symbol may be resource allocated
   784   if (!table[index]) {
   785     table[index] = new (this) vmNode(os::strdup(name), where);
   786   } else {
   787     ProfilerNode* prev = table[index];
   788     for(ProfilerNode* node = prev; node; node = node->next()) {
   789       if (((vmNode *)node)->vm_match(name)) {
   790         node->update(where);
   791         return;
   792       }
   793       prev = node;
   794     }
   795     prev->set_next(new (this) vmNode(os::strdup(name), where));
   796   }
   797 }
   800 class FlatProfilerTask : public PeriodicTask {
   801 public:
   802   FlatProfilerTask(int interval_time) : PeriodicTask(interval_time) {}
   803   void task();
   804 };
   806 void FlatProfiler::record_vm_operation() {
   807   if (Universe::heap()->is_gc_active()) {
   808     FlatProfiler::received_gc_ticks += 1;
   809     return;
   810   }
   812   if (DeoptimizationMarker::is_active()) {
   813     FlatProfiler::deopt_ticks += 1;
   814     return;
   815   }
   817   FlatProfiler::vm_operation_ticks += 1;
   818 }
   820 void FlatProfiler::record_vm_tick() {
   821   // Profile the VM Thread itself if needed
   822   // This is done without getting the Threads_lock and we can go deep
   823   // inside Safepoint, etc.
   824   if( ProfileVM  ) {
   825     ResourceMark rm;
   826     ExtendedPC epc;
   827     const char *name = NULL;
   828     char buf[256];
   829     buf[0] = '\0';
   831     vm_thread_profiler->inc_thread_ticks();
   833     // Get a snapshot of a current VMThread pc (and leave it running!)
   834     // The call may fail if, for instance the VM thread is interrupted while
   835     // holding the Interrupt_lock or for other reasons.
   836     epc = os::get_thread_pc(VMThread::vm_thread());
   837     if(epc.pc() != NULL) {
   838       if (os::dll_address_to_function_name(epc.pc(), buf, sizeof(buf), NULL)) {
   839          name = buf;
   840       }
   841     }
   842     if (name != NULL) {
   843       vm_thread_profiler->vm_update(name, tp_native);
   844     }
   845   }
   846 }
   848 void FlatProfiler::record_thread_ticks() {
   850   int maxthreads, suspendedthreadcount;
   851   JavaThread** threadsList;
   852   bool interval_expired = false;
   854   if (ProfileIntervals &&
   855       (FlatProfiler::received_ticks >= interval_ticks_previous + ProfileIntervalsTicks)) {
   856     interval_expired = true;
   857     interval_ticks_previous = FlatProfiler::received_ticks;
   858   }
   860   // Try not to wait for the Threads_lock
   861   if (Threads_lock->try_lock()) {
   862     {  // Threads_lock scope
   863       maxthreads = Threads::number_of_threads();
   864       threadsList = NEW_C_HEAP_ARRAY(JavaThread *, maxthreads, mtInternal);
   865       suspendedthreadcount = 0;
   866       for (JavaThread* tp = Threads::first(); tp != NULL; tp = tp->next()) {
   867         if (tp->is_Compiler_thread()) {
   868           // Only record ticks for active compiler threads
   869           CompilerThread* cthread = (CompilerThread*)tp;
   870           if (cthread->task() != NULL) {
   871             // The compiler is active.  If we need to access any of the fields
   872             // of the compiler task we should suspend the CompilerThread first.
   873             FlatProfiler::compiler_ticks += 1;
   874             continue;
   875           }
   876         }
   878         // First externally suspend all threads by marking each for
   879         // external suspension - so it will stop at its next transition
   880         // Then do a safepoint
   881         ThreadProfiler* pp = tp->get_thread_profiler();
   882         if (pp != NULL && pp->engaged) {
   883           MutexLockerEx ml(tp->SR_lock(), Mutex::_no_safepoint_check_flag);
   884           if (!tp->is_external_suspend() && !tp->is_exiting()) {
   885             tp->set_external_suspend();
   886             threadsList[suspendedthreadcount++] = tp;
   887           }
   888         }
   889       }
   890       Threads_lock->unlock();
   891     }
   892     // Suspend each thread. This call should just return
   893     // for any threads that have already self-suspended
   894     // Net result should be one safepoint
   895     for (int j = 0; j < suspendedthreadcount; j++) {
   896       JavaThread *tp = threadsList[j];
   897       if (tp) {
   898         tp->java_suspend();
   899       }
   900     }
   902     // We are responsible for resuming any thread on this list
   903     for (int i = 0; i < suspendedthreadcount; i++) {
   904       JavaThread *tp = threadsList[i];
   905       if (tp) {
   906         ThreadProfiler* pp = tp->get_thread_profiler();
   907         if (pp != NULL && pp->engaged) {
   908           HandleMark hm;
   909           FlatProfiler::delivered_ticks += 1;
   910           if (interval_expired) {
   911           FlatProfiler::interval_record_thread(pp);
   912           }
   913           // This is the place where we check to see if a user thread is
   914           // blocked waiting for compilation.
   915           if (tp->blocked_on_compilation()) {
   916             pp->compiler_ticks += 1;
   917             pp->interval_data_ref()->inc_compiling();
   918           } else {
   919             pp->record_tick(tp);
   920           }
   921         }
   922         MutexLocker ml(Threads_lock);
   923         tp->java_resume();
   924       }
   925     }
   926     if (interval_expired) {
   927       FlatProfiler::interval_print();
   928       FlatProfiler::interval_reset();
   929     }
   931     FREE_C_HEAP_ARRAY(JavaThread *, threadsList, mtInternal);
   932   } else {
   933     // Couldn't get the threads lock, just record that rather than blocking
   934     FlatProfiler::threads_lock_ticks += 1;
   935   }
   937 }
   939 void FlatProfilerTask::task() {
   940   FlatProfiler::received_ticks += 1;
   942   if (ProfileVM) {
   943     FlatProfiler::record_vm_tick();
   944   }
   946   VM_Operation* op = VMThread::vm_operation();
   947   if (op != NULL) {
   948     FlatProfiler::record_vm_operation();
   949     if (SafepointSynchronize::is_at_safepoint()) {
   950       return;
   951     }
   952   }
   953   FlatProfiler::record_thread_ticks();
   954 }
   956 void ThreadProfiler::record_interpreted_tick(JavaThread* thread, frame fr, TickPosition where, int* ticks) {
   957   FlatProfiler::all_int_ticks++;
   958   if (!FlatProfiler::full_profile()) {
   959     return;
   960   }
   962   if (!fr.is_interpreted_frame_valid(thread)) {
   963     // tick came at a bad time
   964     interpreter_ticks += 1;
   965     FlatProfiler::interpreter_ticks += 1;
   966     return;
   967   }
   969   // The frame has been fully validated so we can trust the method and bci
   971   Method* method = *fr.interpreter_frame_method_addr();
   973   interpreted_update(method, where);
   975   // update byte code table
   976   InterpreterCodelet* desc = Interpreter::codelet_containing(fr.pc());
   977   if (desc != NULL && desc->bytecode() >= 0) {
   978     ticks[desc->bytecode()]++;
   979   }
   980 }
   982 void ThreadProfiler::record_compiled_tick(JavaThread* thread, frame fr, TickPosition where) {
   983   const char *name = NULL;
   984   TickPosition localwhere = where;
   986   FlatProfiler::all_comp_ticks++;
   987   if (!FlatProfiler::full_profile()) return;
   989   CodeBlob* cb = fr.cb();
   991 // For runtime stubs, record as native rather than as compiled
   992    if (cb->is_runtime_stub()) {
   993         RegisterMap map(thread, false);
   994         fr = fr.sender(&map);
   995         cb = fr.cb();
   996         localwhere = tp_native;
   997   }
   998   Method* method = (cb->is_nmethod()) ? ((nmethod *)cb)->method() :
   999                                           (Method*)NULL;
  1001   if (method == NULL) {
  1002     if (cb->is_runtime_stub())
  1003       runtime_stub_update(cb, name, localwhere);
  1004     else
  1005       unknown_compiled_update(cb, localwhere);
  1007   else {
  1008     if (method->is_native()) {
  1009       stub_update(method, name, localwhere);
  1010     } else {
  1011       compiled_update(method, localwhere);
  1016 extern "C" void find(int x);
  1019 void ThreadProfiler::record_tick_for_running_frame(JavaThread* thread, frame fr) {
  1020   // The tick happened in real code -> non VM code
  1021   if (fr.is_interpreted_frame()) {
  1022     interval_data_ref()->inc_interpreted();
  1023     record_interpreted_tick(thread, fr, tp_code, FlatProfiler::bytecode_ticks);
  1024     return;
  1027   if (CodeCache::contains(fr.pc())) {
  1028     interval_data_ref()->inc_compiled();
  1029     PCRecorder::record(fr.pc());
  1030     record_compiled_tick(thread, fr, tp_code);
  1031     return;
  1034   if (VtableStubs::stub_containing(fr.pc()) != NULL) {
  1035     unknown_ticks_array[ut_vtable_stubs] += 1;
  1036     return;
  1039   frame caller = fr.profile_find_Java_sender_frame(thread);
  1041   if (caller.sp() != NULL && caller.pc() != NULL) {
  1042     record_tick_for_calling_frame(thread, caller);
  1043     return;
  1046   unknown_ticks_array[ut_running_frame] += 1;
  1047   FlatProfiler::unknown_ticks += 1;
  1050 void ThreadProfiler::record_tick_for_calling_frame(JavaThread* thread, frame fr) {
  1051   // The tick happened in VM code
  1052   interval_data_ref()->inc_native();
  1053   if (fr.is_interpreted_frame()) {
  1054     record_interpreted_tick(thread, fr, tp_native, FlatProfiler::bytecode_ticks_stub);
  1055     return;
  1057   if (CodeCache::contains(fr.pc())) {
  1058     record_compiled_tick(thread, fr, tp_native);
  1059     return;
  1062   frame caller = fr.profile_find_Java_sender_frame(thread);
  1064   if (caller.sp() != NULL && caller.pc() != NULL) {
  1065     record_tick_for_calling_frame(thread, caller);
  1066     return;
  1069   unknown_ticks_array[ut_calling_frame] += 1;
  1070   FlatProfiler::unknown_ticks += 1;
  1073 void ThreadProfiler::record_tick(JavaThread* thread) {
  1074   FlatProfiler::all_ticks++;
  1075   thread_ticks += 1;
  1077   // Here's another way to track global state changes.
  1078   // When the class loader starts it marks the ThreadProfiler to tell it it is in the class loader
  1079   // and we check that here.
  1080   // This is more direct, and more than one thread can be in the class loader at a time,
  1081   // but it does mean the class loader has to know about the profiler.
  1082   if (region_flag[ThreadProfilerMark::classLoaderRegion]) {
  1083     class_loader_ticks += 1;
  1084     FlatProfiler::class_loader_ticks += 1;
  1085     return;
  1086   } else if (region_flag[ThreadProfilerMark::extraRegion]) {
  1087     extra_ticks += 1;
  1088     FlatProfiler::extra_ticks += 1;
  1089     return;
  1091   // Note that the WatcherThread can now stop for safepoints
  1092   uint32_t debug_bits = 0;
  1093   if (!thread->wait_for_ext_suspend_completion(SuspendRetryCount,
  1094       SuspendRetryDelay, &debug_bits)) {
  1095     unknown_ticks_array[ut_unknown_thread_state] += 1;
  1096     FlatProfiler::unknown_ticks += 1;
  1097     return;
  1100   frame fr;
  1102   switch (thread->thread_state()) {
  1103   case _thread_in_native:
  1104   case _thread_in_native_trans:
  1105   case _thread_in_vm:
  1106   case _thread_in_vm_trans:
  1107     if (thread->profile_last_Java_frame(&fr)) {
  1108       if (fr.is_runtime_frame()) {
  1109         RegisterMap map(thread, false);
  1110         fr = fr.sender(&map);
  1112       record_tick_for_calling_frame(thread, fr);
  1113     } else {
  1114       unknown_ticks_array[ut_no_last_Java_frame] += 1;
  1115       FlatProfiler::unknown_ticks += 1;
  1117     break;
  1118   // handle_special_runtime_exit_condition self-suspends threads in Java
  1119   case _thread_in_Java:
  1120   case _thread_in_Java_trans:
  1121     if (thread->profile_last_Java_frame(&fr)) {
  1122       if (fr.is_safepoint_blob_frame()) {
  1123         RegisterMap map(thread, false);
  1124         fr = fr.sender(&map);
  1126       record_tick_for_running_frame(thread, fr);
  1127     } else {
  1128       unknown_ticks_array[ut_no_last_Java_frame] += 1;
  1129       FlatProfiler::unknown_ticks += 1;
  1131     break;
  1132   case _thread_blocked:
  1133   case _thread_blocked_trans:
  1134     if (thread->osthread() && thread->osthread()->get_state() == RUNNABLE) {
  1135         if (thread->profile_last_Java_frame(&fr)) {
  1136           if (fr.is_safepoint_blob_frame()) {
  1137             RegisterMap map(thread, false);
  1138             fr = fr.sender(&map);
  1139             record_tick_for_running_frame(thread, fr);
  1140           } else {
  1141             record_tick_for_calling_frame(thread, fr);
  1143         } else {
  1144           unknown_ticks_array[ut_no_last_Java_frame] += 1;
  1145           FlatProfiler::unknown_ticks += 1;
  1147     } else {
  1148           blocked_ticks += 1;
  1149           FlatProfiler::blocked_ticks += 1;
  1151     break;
  1152   case _thread_uninitialized:
  1153   case _thread_new:
  1154   // not used, included for completeness
  1155   case _thread_new_trans:
  1156      unknown_ticks_array[ut_no_last_Java_frame] += 1;
  1157      FlatProfiler::unknown_ticks += 1;
  1158      break;
  1159   default:
  1160     unknown_ticks_array[ut_unknown_thread_state] += 1;
  1161     FlatProfiler::unknown_ticks += 1;
  1162     break;
  1164   return;
  1167 void ThreadProfiler::engage() {
  1168   engaged = true;
  1169   timer.start();
  1172 void ThreadProfiler::disengage() {
  1173   engaged = false;
  1174   timer.stop();
  1177 void ThreadProfiler::initialize() {
  1178   for (int index = 0; index < table_size; index++) {
  1179     table[index] = NULL;
  1181   thread_ticks = 0;
  1182   blocked_ticks = 0;
  1183   compiler_ticks = 0;
  1184   interpreter_ticks = 0;
  1185   for (int ut = 0; ut < ut_end; ut += 1) {
  1186     unknown_ticks_array[ut] = 0;
  1188   region_flag[ThreadProfilerMark::classLoaderRegion] = false;
  1189   class_loader_ticks = 0;
  1190   region_flag[ThreadProfilerMark::extraRegion] = false;
  1191   extra_ticks = 0;
  1192   timer.start();
  1193   interval_data_ref()->reset();
  1196 void ThreadProfiler::reset() {
  1197   timer.stop();
  1198   if (table != NULL) {
  1199     for (int index = 0; index < table_size; index++) {
  1200       ProfilerNode* n = table[index];
  1201       if (n != NULL) {
  1202         delete n;
  1206   initialize();
  1209 void FlatProfiler::allocate_table() {
  1210   { // Bytecode table
  1211     bytecode_ticks      = NEW_C_HEAP_ARRAY(int, Bytecodes::number_of_codes, mtInternal);
  1212     bytecode_ticks_stub = NEW_C_HEAP_ARRAY(int, Bytecodes::number_of_codes, mtInternal);
  1213     for(int index = 0; index < Bytecodes::number_of_codes; index++) {
  1214       bytecode_ticks[index]      = 0;
  1215       bytecode_ticks_stub[index] = 0;
  1219   if (ProfilerRecordPC) PCRecorder::init();
  1221   interval_data         = NEW_C_HEAP_ARRAY(IntervalData, interval_print_size, mtInternal);
  1222   FlatProfiler::interval_reset();
  1225 void FlatProfiler::engage(JavaThread* mainThread, bool fullProfile) {
  1226   full_profile_flag = fullProfile;
  1227   if (bytecode_ticks == NULL) {
  1228     allocate_table();
  1230   if(ProfileVM && (vm_thread_profiler == NULL)){
  1231     vm_thread_profiler = new ThreadProfiler();
  1233   if (task == NULL) {
  1234     task = new FlatProfilerTask(WatcherThread::delay_interval);
  1235     task->enroll();
  1237   timer.start();
  1238   if (mainThread != NULL) {
  1239     // When mainThread was created, it might not have a ThreadProfiler
  1240     ThreadProfiler* pp = mainThread->get_thread_profiler();
  1241     if (pp == NULL) {
  1242       mainThread->set_thread_profiler(new ThreadProfiler());
  1243     } else {
  1244       pp->reset();
  1246     mainThread->get_thread_profiler()->engage();
  1248   // This is where we would assign thread_profiler
  1249   // if we wanted only one thread_profiler for all threads.
  1250   thread_profiler = NULL;
  1253 void FlatProfiler::disengage() {
  1254   if (!task) {
  1255     return;
  1257   timer.stop();
  1258   task->disenroll();
  1259   delete task;
  1260   task = NULL;
  1261   if (thread_profiler != NULL) {
  1262     thread_profiler->disengage();
  1263   } else {
  1264     MutexLocker tl(Threads_lock);
  1265     for (JavaThread* tp = Threads::first(); tp != NULL; tp = tp->next()) {
  1266       ThreadProfiler* pp = tp->get_thread_profiler();
  1267       if (pp != NULL) {
  1268         pp->disengage();
  1274 void FlatProfiler::reset() {
  1275   if (task) {
  1276     disengage();
  1279   class_loader_ticks = 0;
  1280   extra_ticks        = 0;
  1281   received_gc_ticks  = 0;
  1282   vm_operation_ticks = 0;
  1283   compiler_ticks     = 0;
  1284   deopt_ticks        = 0;
  1285   interpreter_ticks  = 0;
  1286   blocked_ticks      = 0;
  1287   unknown_ticks      = 0;
  1288   received_ticks     = 0;
  1289   delivered_ticks    = 0;
  1290   timer.stop();
  1293 bool FlatProfiler::is_active() {
  1294   return task != NULL;
  1297 void FlatProfiler::print_byte_code_statistics() {
  1298   GrowableArray <ProfilerNode*>* array = new GrowableArray<ProfilerNode*>(200);
  1300   tty->print_cr(" Bytecode ticks:");
  1301   for (int index = 0; index < Bytecodes::number_of_codes; index++) {
  1302     if (FlatProfiler::bytecode_ticks[index] > 0 || FlatProfiler::bytecode_ticks_stub[index] > 0) {
  1303       tty->print_cr("  %4d %4d = %s",
  1304         FlatProfiler::bytecode_ticks[index],
  1305         FlatProfiler::bytecode_ticks_stub[index],
  1306         Bytecodes::name( (Bytecodes::Code) index));
  1309   tty->cr();
  1312 void print_ticks(const char* title, int ticks, int total) {
  1313   if (ticks > 0) {
  1314     tty->print("%5.1f%% %5d", ticks * 100.0 / total, ticks);
  1315     tty->fill_to(col3);
  1316     tty->print("%s", title);
  1317     tty->cr();
  1321 void ThreadProfiler::print(const char* thread_name) {
  1322   ResourceMark rm;
  1323   MutexLocker ppl(ProfilePrint_lock);
  1324   int index = 0; // Declared outside for loops for portability
  1326   if (table == NULL) {
  1327     return;
  1330   if (thread_ticks <= 0) {
  1331     return;
  1334   const char* title = "too soon to tell";
  1335   double secs = timer.seconds();
  1337   GrowableArray <ProfilerNode*>* array = new GrowableArray<ProfilerNode*>(200);
  1338   for(index = 0; index < table_size; index++) {
  1339     for(ProfilerNode* node = table[index]; node; node = node->next())
  1340       array->append(node);
  1343   array->sort(&ProfilerNode::compare);
  1345   // compute total (sanity check)
  1346   int active =
  1347     class_loader_ticks +
  1348     compiler_ticks +
  1349     interpreter_ticks +
  1350     unknown_ticks();
  1351   for (index = 0; index < array->length(); index++) {
  1352     active += array->at(index)->ticks.total();
  1354   int total = active + blocked_ticks;
  1356   tty->cr();
  1357   tty->print_cr("Flat profile of %3.2f secs (%d total ticks): %s", secs, total, thread_name);
  1358   if (total != thread_ticks) {
  1359     print_ticks("Lost ticks", thread_ticks-total, thread_ticks);
  1361   tty->cr();
  1363   // print interpreted methods
  1364   tick_counter interpreted_ticks;
  1365   bool has_interpreted_ticks = false;
  1366   int print_count = 0;
  1367   for (index = 0; index < array->length(); index++) {
  1368     ProfilerNode* n = array->at(index);
  1369     if (n->is_interpreted()) {
  1370       interpreted_ticks.add(&n->ticks);
  1371       if (!has_interpreted_ticks) {
  1372         interpretedNode::print_title(tty);
  1373         has_interpreted_ticks = true;
  1375       if (print_count++ < ProfilerNumberOfInterpretedMethods) {
  1376         n->print(tty, active);
  1380   if (has_interpreted_ticks) {
  1381     if (print_count <= ProfilerNumberOfInterpretedMethods) {
  1382       title = "Total interpreted";
  1383     } else {
  1384       title = "Total interpreted (including elided)";
  1386     interpretedNode::print_total(tty, &interpreted_ticks, active, title);
  1387     tty->cr();
  1390   // print compiled methods
  1391   tick_counter compiled_ticks;
  1392   bool has_compiled_ticks = false;
  1393   print_count = 0;
  1394   for (index = 0; index < array->length(); index++) {
  1395     ProfilerNode* n = array->at(index);
  1396     if (n->is_compiled()) {
  1397       compiled_ticks.add(&n->ticks);
  1398       if (!has_compiled_ticks) {
  1399         compiledNode::print_title(tty);
  1400         has_compiled_ticks = true;
  1402       if (print_count++ < ProfilerNumberOfCompiledMethods) {
  1403         n->print(tty, active);
  1407   if (has_compiled_ticks) {
  1408     if (print_count <= ProfilerNumberOfCompiledMethods) {
  1409       title = "Total compiled";
  1410     } else {
  1411       title = "Total compiled (including elided)";
  1413     compiledNode::print_total(tty, &compiled_ticks, active, title);
  1414     tty->cr();
  1417   // print stub methods
  1418   tick_counter stub_ticks;
  1419   bool has_stub_ticks = false;
  1420   print_count = 0;
  1421   for (index = 0; index < array->length(); index++) {
  1422     ProfilerNode* n = array->at(index);
  1423     if (n->is_stub()) {
  1424       stub_ticks.add(&n->ticks);
  1425       if (!has_stub_ticks) {
  1426         stubNode::print_title(tty);
  1427         has_stub_ticks = true;
  1429       if (print_count++ < ProfilerNumberOfStubMethods) {
  1430         n->print(tty, active);
  1434   if (has_stub_ticks) {
  1435     if (print_count <= ProfilerNumberOfStubMethods) {
  1436       title = "Total stub";
  1437     } else {
  1438       title = "Total stub (including elided)";
  1440     stubNode::print_total(tty, &stub_ticks, active, title);
  1441     tty->cr();
  1444   // print runtime stubs
  1445   tick_counter runtime_stub_ticks;
  1446   bool has_runtime_stub_ticks = false;
  1447   print_count = 0;
  1448   for (index = 0; index < array->length(); index++) {
  1449     ProfilerNode* n = array->at(index);
  1450     if (n->is_runtime_stub()) {
  1451       runtime_stub_ticks.add(&n->ticks);
  1452       if (!has_runtime_stub_ticks) {
  1453         runtimeStubNode::print_title(tty);
  1454         has_runtime_stub_ticks = true;
  1456       if (print_count++ < ProfilerNumberOfRuntimeStubNodes) {
  1457         n->print(tty, active);
  1461   if (has_runtime_stub_ticks) {
  1462     if (print_count <= ProfilerNumberOfRuntimeStubNodes) {
  1463       title = "Total runtime stubs";
  1464     } else {
  1465       title = "Total runtime stubs (including elided)";
  1467     runtimeStubNode::print_total(tty, &runtime_stub_ticks, active, title);
  1468     tty->cr();
  1471   if (blocked_ticks + class_loader_ticks + interpreter_ticks + compiler_ticks + unknown_ticks() != 0) {
  1472     tty->fill_to(col1);
  1473     tty->print_cr("Thread-local ticks:");
  1474     print_ticks("Blocked (of total)",  blocked_ticks,      total);
  1475     print_ticks("Class loader",        class_loader_ticks, active);
  1476     print_ticks("Extra",               extra_ticks,        active);
  1477     print_ticks("Interpreter",         interpreter_ticks,  active);
  1478     print_ticks("Compilation",         compiler_ticks,     active);
  1479     print_ticks("Unknown: vtable stubs",  unknown_ticks_array[ut_vtable_stubs],         active);
  1480     print_ticks("Unknown: null method",   unknown_ticks_array[ut_null_method],          active);
  1481     print_ticks("Unknown: running frame", unknown_ticks_array[ut_running_frame],        active);
  1482     print_ticks("Unknown: calling frame", unknown_ticks_array[ut_calling_frame],        active);
  1483     print_ticks("Unknown: no pc",         unknown_ticks_array[ut_no_pc],                active);
  1484     print_ticks("Unknown: no last frame", unknown_ticks_array[ut_no_last_Java_frame],   active);
  1485     print_ticks("Unknown: thread_state",  unknown_ticks_array[ut_unknown_thread_state], active);
  1486     tty->cr();
  1489   if (WizardMode) {
  1490     tty->print_cr("Node area used: %dKb", (area_top - area_bottom) / 1024);
  1492   reset();
  1495 /*
  1496 ThreadProfiler::print_unknown(){
  1497   if (table == NULL) {
  1498     return;
  1501   if (thread_ticks <= 0) {
  1502     return;
  1504 } */
  1506 void FlatProfiler::print(int unused) {
  1507   ResourceMark rm;
  1508   if (thread_profiler != NULL) {
  1509     thread_profiler->print("All threads");
  1510   } else {
  1511     MutexLocker tl(Threads_lock);
  1512     for (JavaThread* tp = Threads::first(); tp != NULL; tp = tp->next()) {
  1513       ThreadProfiler* pp = tp->get_thread_profiler();
  1514       if (pp != NULL) {
  1515         pp->print(tp->get_thread_name());
  1520   if (ProfilerPrintByteCodeStatistics) {
  1521     print_byte_code_statistics();
  1524   if (non_method_ticks() > 0) {
  1525     tty->cr();
  1526     tty->print_cr("Global summary of %3.2f seconds:", timer.seconds());
  1527     print_ticks("Received ticks",      received_ticks,     received_ticks);
  1528     print_ticks("Received GC ticks",   received_gc_ticks,  received_ticks);
  1529     print_ticks("Compilation",         compiler_ticks,     received_ticks);
  1530     print_ticks("Deoptimization",      deopt_ticks,        received_ticks);
  1531     print_ticks("Other VM operations", vm_operation_ticks, received_ticks);
  1532 #ifndef PRODUCT
  1533     print_ticks("Blocked ticks",       blocked_ticks,      received_ticks);
  1534     print_ticks("Threads_lock blocks", threads_lock_ticks, received_ticks);
  1535     print_ticks("Delivered ticks",     delivered_ticks,    received_ticks);
  1536     print_ticks("All ticks",           all_ticks,          received_ticks);
  1537 #endif
  1538     print_ticks("Class loader",        class_loader_ticks, received_ticks);
  1539     print_ticks("Extra       ",        extra_ticks,        received_ticks);
  1540     print_ticks("Interpreter",         interpreter_ticks,  received_ticks);
  1541     print_ticks("Unknown code",        unknown_ticks,      received_ticks);
  1544   PCRecorder::print();
  1546   if(ProfileVM){
  1547     tty->cr();
  1548     vm_thread_profiler->print("VM Thread");
  1552 void IntervalData::print_header(outputStream* st) {
  1553   st->print("i/c/n/g");
  1556 void IntervalData::print_data(outputStream* st) {
  1557   st->print("%d/%d/%d/%d", interpreted(), compiled(), native(), compiling());
  1560 void FlatProfiler::interval_record_thread(ThreadProfiler* tp) {
  1561   IntervalData id = tp->interval_data();
  1562   int total = id.total();
  1563   tp->interval_data_ref()->reset();
  1565   // Insertion sort the data, if it's relevant.
  1566   for (int i = 0; i < interval_print_size; i += 1) {
  1567     if (total > interval_data[i].total()) {
  1568       for (int j = interval_print_size - 1; j > i; j -= 1) {
  1569         interval_data[j] = interval_data[j-1];
  1571       interval_data[i] = id;
  1572       break;
  1577 void FlatProfiler::interval_print() {
  1578   if ((interval_data[0].total() > 0)) {
  1579     tty->stamp();
  1580     tty->print("\t");
  1581     IntervalData::print_header(tty);
  1582     for (int i = 0; i < interval_print_size; i += 1) {
  1583       if (interval_data[i].total() > 0) {
  1584         tty->print("\t");
  1585         interval_data[i].print_data(tty);
  1588     tty->cr();
  1592 void FlatProfiler::interval_reset() {
  1593   for (int i = 0; i < interval_print_size; i += 1) {
  1594     interval_data[i].reset();
  1598 void ThreadProfiler::oops_do(OopClosure* f) {
  1599   if (table == NULL) return;
  1601   for(int index = 0; index < table_size; index++) {
  1602     for(ProfilerNode* node = table[index]; node; node = node->next())
  1603       node->oops_do(f);
  1607 void FlatProfiler::oops_do(OopClosure* f) {
  1608   if (thread_profiler != NULL) {
  1609     thread_profiler->oops_do(f);
  1610   } else {
  1611     for (JavaThread* tp = Threads::first(); tp != NULL; tp = tp->next()) {
  1612       ThreadProfiler* pp = tp->get_thread_profiler();
  1613       if (pp != NULL) {
  1614         pp->oops_do(f);

mercurial