src/share/vm/prims/jvmtiImpl.cpp

Thu, 28 Apr 2011 08:24:46 -0700

author
dcubed
date
Thu, 28 Apr 2011 08:24:46 -0700
changeset 2836
0cddebc420d8
parent 2624
46a56fac55c7
child 3156
f08d439fab8c
permissions
-rw-r--r--

7039447: 2/1 java profiling is broken in build 139 (garbage in function name)
Summary: The name in a deferred JVM/TI DynamicCodeGenerated event needs to be explicitly saved.
Reviewed-by: acorn, never, dsamersoff, dholmes

     1 /*
     2  * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "classfile/systemDictionary.hpp"
    27 #include "interpreter/interpreter.hpp"
    28 #include "jvmtifiles/jvmtiEnv.hpp"
    29 #include "memory/resourceArea.hpp"
    30 #include "oops/instanceKlass.hpp"
    31 #include "prims/jvmtiAgentThread.hpp"
    32 #include "prims/jvmtiEventController.inline.hpp"
    33 #include "prims/jvmtiImpl.hpp"
    34 #include "prims/jvmtiRedefineClasses.hpp"
    35 #include "runtime/atomic.hpp"
    36 #include "runtime/deoptimization.hpp"
    37 #include "runtime/handles.hpp"
    38 #include "runtime/handles.inline.hpp"
    39 #include "runtime/interfaceSupport.hpp"
    40 #include "runtime/javaCalls.hpp"
    41 #include "runtime/os.hpp"
    42 #include "runtime/serviceThread.hpp"
    43 #include "runtime/signature.hpp"
    44 #include "runtime/vframe.hpp"
    45 #include "runtime/vframe_hp.hpp"
    46 #include "runtime/vm_operations.hpp"
    47 #include "utilities/exceptions.hpp"
    48 #ifdef TARGET_OS_FAMILY_linux
    49 # include "thread_linux.inline.hpp"
    50 #endif
    51 #ifdef TARGET_OS_FAMILY_solaris
    52 # include "thread_solaris.inline.hpp"
    53 #endif
    54 #ifdef TARGET_OS_FAMILY_windows
    55 # include "thread_windows.inline.hpp"
    56 #endif
    58 //
    59 // class JvmtiAgentThread
    60 //
    61 // JavaThread used to wrap a thread started by an agent
    62 // using the JVMTI method RunAgentThread.
    63 //
    65 JvmtiAgentThread::JvmtiAgentThread(JvmtiEnv* env, jvmtiStartFunction start_fn, const void *start_arg)
    66     : JavaThread(start_function_wrapper) {
    67     _env = env;
    68     _start_fn = start_fn;
    69     _start_arg = start_arg;
    70 }
    72 void
    73 JvmtiAgentThread::start_function_wrapper(JavaThread *thread, TRAPS) {
    74     // It is expected that any Agent threads will be created as
    75     // Java Threads.  If this is the case, notification of the creation
    76     // of the thread is given in JavaThread::thread_main().
    77     assert(thread->is_Java_thread(), "debugger thread should be a Java Thread");
    78     assert(thread == JavaThread::current(), "sanity check");
    80     JvmtiAgentThread *dthread = (JvmtiAgentThread *)thread;
    81     dthread->call_start_function();
    82 }
    84 void
    85 JvmtiAgentThread::call_start_function() {
    86     ThreadToNativeFromVM transition(this);
    87     _start_fn(_env->jvmti_external(), jni_environment(), (void*)_start_arg);
    88 }
    91 //
    92 // class GrowableCache - private methods
    93 //
    95 void GrowableCache::recache() {
    96   int len = _elements->length();
    98   FREE_C_HEAP_ARRAY(address, _cache);
    99   _cache = NEW_C_HEAP_ARRAY(address,len+1);
   101   for (int i=0; i<len; i++) {
   102     _cache[i] = _elements->at(i)->getCacheValue();
   103     //
   104     // The cache entry has gone bad. Without a valid frame pointer
   105     // value, the entry is useless so we simply delete it in product
   106     // mode. The call to remove() will rebuild the cache again
   107     // without the bad entry.
   108     //
   109     if (_cache[i] == NULL) {
   110       assert(false, "cannot recache NULL elements");
   111       remove(i);
   112       return;
   113     }
   114   }
   115   _cache[len] = NULL;
   117   _listener_fun(_this_obj,_cache);
   118 }
   120 bool GrowableCache::equals(void* v, GrowableElement *e2) {
   121   GrowableElement *e1 = (GrowableElement *) v;
   122   assert(e1 != NULL, "e1 != NULL");
   123   assert(e2 != NULL, "e2 != NULL");
   125   return e1->equals(e2);
   126 }
   128 //
   129 // class GrowableCache - public methods
   130 //
   132 GrowableCache::GrowableCache() {
   133   _this_obj       = NULL;
   134   _listener_fun   = NULL;
   135   _elements       = NULL;
   136   _cache          = NULL;
   137 }
   139 GrowableCache::~GrowableCache() {
   140   clear();
   141   delete _elements;
   142   FREE_C_HEAP_ARRAY(address, _cache);
   143 }
   145 void GrowableCache::initialize(void *this_obj, void listener_fun(void *, address*) ) {
   146   _this_obj       = this_obj;
   147   _listener_fun   = listener_fun;
   148   _elements       = new (ResourceObj::C_HEAP) GrowableArray<GrowableElement*>(5,true);
   149   recache();
   150 }
   152 // number of elements in the collection
   153 int GrowableCache::length() {
   154   return _elements->length();
   155 }
   157 // get the value of the index element in the collection
   158 GrowableElement* GrowableCache::at(int index) {
   159   GrowableElement *e = (GrowableElement *) _elements->at(index);
   160   assert(e != NULL, "e != NULL");
   161   return e;
   162 }
   164 int GrowableCache::find(GrowableElement* e) {
   165   return _elements->find(e, GrowableCache::equals);
   166 }
   168 // append a copy of the element to the end of the collection
   169 void GrowableCache::append(GrowableElement* e) {
   170   GrowableElement *new_e = e->clone();
   171   _elements->append(new_e);
   172   recache();
   173 }
   175 // insert a copy of the element using lessthan()
   176 void GrowableCache::insert(GrowableElement* e) {
   177   GrowableElement *new_e = e->clone();
   178   _elements->append(new_e);
   180   int n = length()-2;
   181   for (int i=n; i>=0; i--) {
   182     GrowableElement *e1 = _elements->at(i);
   183     GrowableElement *e2 = _elements->at(i+1);
   184     if (e2->lessThan(e1)) {
   185       _elements->at_put(i+1, e1);
   186       _elements->at_put(i,   e2);
   187     }
   188   }
   190   recache();
   191 }
   193 // remove the element at index
   194 void GrowableCache::remove (int index) {
   195   GrowableElement *e = _elements->at(index);
   196   assert(e != NULL, "e != NULL");
   197   _elements->remove(e);
   198   delete e;
   199   recache();
   200 }
   202 // clear out all elements, release all heap space and
   203 // let our listener know that things have changed.
   204 void GrowableCache::clear() {
   205   int len = _elements->length();
   206   for (int i=0; i<len; i++) {
   207     delete _elements->at(i);
   208   }
   209   _elements->clear();
   210   recache();
   211 }
   213 void GrowableCache::oops_do(OopClosure* f) {
   214   int len = _elements->length();
   215   for (int i=0; i<len; i++) {
   216     GrowableElement *e = _elements->at(i);
   217     e->oops_do(f);
   218   }
   219 }
   221 void GrowableCache::gc_epilogue() {
   222   int len = _elements->length();
   223   for (int i=0; i<len; i++) {
   224     _cache[i] = _elements->at(i)->getCacheValue();
   225   }
   226 }
   228 //
   229 // class JvmtiBreakpoint
   230 //
   232 JvmtiBreakpoint::JvmtiBreakpoint() {
   233   _method = NULL;
   234   _bci    = 0;
   235 #ifdef CHECK_UNHANDLED_OOPS
   236   // This one is always allocated with new, but check it just in case.
   237   Thread *thread = Thread::current();
   238   if (thread->is_in_stack((address)&_method)) {
   239     thread->allow_unhandled_oop((oop*)&_method);
   240   }
   241 #endif // CHECK_UNHANDLED_OOPS
   242 }
   244 JvmtiBreakpoint::JvmtiBreakpoint(methodOop m_method, jlocation location) {
   245   _method        = m_method;
   246   assert(_method != NULL, "_method != NULL");
   247   _bci           = (int) location;
   248 #ifdef CHECK_UNHANDLED_OOPS
   249   // Could be allocated with new and wouldn't be on the unhandled oop list.
   250   Thread *thread = Thread::current();
   251   if (thread->is_in_stack((address)&_method)) {
   252     thread->allow_unhandled_oop(&_method);
   253   }
   254 #endif // CHECK_UNHANDLED_OOPS
   256   assert(_bci >= 0, "_bci >= 0");
   257 }
   259 void JvmtiBreakpoint::copy(JvmtiBreakpoint& bp) {
   260   _method   = bp._method;
   261   _bci      = bp._bci;
   262 }
   264 bool JvmtiBreakpoint::lessThan(JvmtiBreakpoint& bp) {
   265   Unimplemented();
   266   return false;
   267 }
   269 bool JvmtiBreakpoint::equals(JvmtiBreakpoint& bp) {
   270   return _method   == bp._method
   271     &&   _bci      == bp._bci;
   272 }
   274 bool JvmtiBreakpoint::is_valid() {
   275   return _method != NULL &&
   276          _bci >= 0;
   277 }
   279 address JvmtiBreakpoint::getBcp() {
   280   return _method->bcp_from(_bci);
   281 }
   283 void JvmtiBreakpoint::each_method_version_do(method_action meth_act) {
   284   ((methodOopDesc*)_method->*meth_act)(_bci);
   286   // add/remove breakpoint to/from versions of the method that
   287   // are EMCP. Directly or transitively obsolete methods are
   288   // not saved in the PreviousVersionInfo.
   289   Thread *thread = Thread::current();
   290   instanceKlassHandle ikh = instanceKlassHandle(thread, _method->method_holder());
   291   Symbol* m_name = _method->name();
   292   Symbol* m_signature = _method->signature();
   294   {
   295     ResourceMark rm(thread);
   296     // PreviousVersionInfo objects returned via PreviousVersionWalker
   297     // contain a GrowableArray of handles. We have to clean up the
   298     // GrowableArray _after_ the PreviousVersionWalker destructor
   299     // has destroyed the handles.
   300     {
   301       // search previous versions if they exist
   302       PreviousVersionWalker pvw((instanceKlass *)ikh()->klass_part());
   303       for (PreviousVersionInfo * pv_info = pvw.next_previous_version();
   304            pv_info != NULL; pv_info = pvw.next_previous_version()) {
   305         GrowableArray<methodHandle>* methods =
   306           pv_info->prev_EMCP_method_handles();
   308         if (methods == NULL) {
   309           // We have run into a PreviousVersion generation where
   310           // all methods were made obsolete during that generation's
   311           // RedefineClasses() operation. At the time of that
   312           // operation, all EMCP methods were flushed so we don't
   313           // have to go back any further.
   314           //
   315           // A NULL methods array is different than an empty methods
   316           // array. We cannot infer any optimizations about older
   317           // generations from an empty methods array for the current
   318           // generation.
   319           break;
   320         }
   322         for (int i = methods->length() - 1; i >= 0; i--) {
   323           methodHandle method = methods->at(i);
   324           if (method->name() == m_name && method->signature() == m_signature) {
   325             RC_TRACE(0x00000800, ("%sing breakpoint in %s(%s)",
   326               meth_act == &methodOopDesc::set_breakpoint ? "sett" : "clear",
   327               method->name()->as_C_string(),
   328               method->signature()->as_C_string()));
   329             assert(!method->is_obsolete(), "only EMCP methods here");
   331             ((methodOopDesc*)method()->*meth_act)(_bci);
   332             break;
   333           }
   334         }
   335       }
   336     } // pvw is cleaned up
   337   } // rm is cleaned up
   338 }
   340 void JvmtiBreakpoint::set() {
   341   each_method_version_do(&methodOopDesc::set_breakpoint);
   342 }
   344 void JvmtiBreakpoint::clear() {
   345   each_method_version_do(&methodOopDesc::clear_breakpoint);
   346 }
   348 void JvmtiBreakpoint::print() {
   349 #ifndef PRODUCT
   350   const char *class_name  = (_method == NULL) ? "NULL" : _method->klass_name()->as_C_string();
   351   const char *method_name = (_method == NULL) ? "NULL" : _method->name()->as_C_string();
   353   tty->print("Breakpoint(%s,%s,%d,%p)",class_name, method_name, _bci, getBcp());
   354 #endif
   355 }
   358 //
   359 // class VM_ChangeBreakpoints
   360 //
   361 // Modify the Breakpoints data structure at a safepoint
   362 //
   364 void VM_ChangeBreakpoints::doit() {
   365   switch (_operation) {
   366   case SET_BREAKPOINT:
   367     _breakpoints->set_at_safepoint(*_bp);
   368     break;
   369   case CLEAR_BREAKPOINT:
   370     _breakpoints->clear_at_safepoint(*_bp);
   371     break;
   372   case CLEAR_ALL_BREAKPOINT:
   373     _breakpoints->clearall_at_safepoint();
   374     break;
   375   default:
   376     assert(false, "Unknown operation");
   377   }
   378 }
   380 void VM_ChangeBreakpoints::oops_do(OopClosure* f) {
   381   // This operation keeps breakpoints alive
   382   if (_breakpoints != NULL) {
   383     _breakpoints->oops_do(f);
   384   }
   385   if (_bp != NULL) {
   386     _bp->oops_do(f);
   387   }
   388 }
   390 //
   391 // class JvmtiBreakpoints
   392 //
   393 // a JVMTI internal collection of JvmtiBreakpoint
   394 //
   396 JvmtiBreakpoints::JvmtiBreakpoints(void listener_fun(void *,address *)) {
   397   _bps.initialize(this,listener_fun);
   398 }
   400 JvmtiBreakpoints:: ~JvmtiBreakpoints() {}
   402 void  JvmtiBreakpoints::oops_do(OopClosure* f) {
   403   _bps.oops_do(f);
   404 }
   406 void JvmtiBreakpoints::gc_epilogue() {
   407   _bps.gc_epilogue();
   408 }
   410 void  JvmtiBreakpoints::print() {
   411 #ifndef PRODUCT
   412   ResourceMark rm;
   414   int n = _bps.length();
   415   for (int i=0; i<n; i++) {
   416     JvmtiBreakpoint& bp = _bps.at(i);
   417     tty->print("%d: ", i);
   418     bp.print();
   419     tty->print_cr("");
   420   }
   421 #endif
   422 }
   425 void JvmtiBreakpoints::set_at_safepoint(JvmtiBreakpoint& bp) {
   426   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
   428   int i = _bps.find(bp);
   429   if (i == -1) {
   430     _bps.append(bp);
   431     bp.set();
   432   }
   433 }
   435 void JvmtiBreakpoints::clear_at_safepoint(JvmtiBreakpoint& bp) {
   436   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
   438   int i = _bps.find(bp);
   439   if (i != -1) {
   440     _bps.remove(i);
   441     bp.clear();
   442   }
   443 }
   445 void JvmtiBreakpoints::clearall_at_safepoint() {
   446   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
   448   int len = _bps.length();
   449   for (int i=0; i<len; i++) {
   450     _bps.at(i).clear();
   451   }
   452   _bps.clear();
   453 }
   455 int JvmtiBreakpoints::length() { return _bps.length(); }
   457 int JvmtiBreakpoints::set(JvmtiBreakpoint& bp) {
   458   if ( _bps.find(bp) != -1) {
   459      return JVMTI_ERROR_DUPLICATE;
   460   }
   461   VM_ChangeBreakpoints set_breakpoint(this,VM_ChangeBreakpoints::SET_BREAKPOINT, &bp);
   462   VMThread::execute(&set_breakpoint);
   463   return JVMTI_ERROR_NONE;
   464 }
   466 int JvmtiBreakpoints::clear(JvmtiBreakpoint& bp) {
   467   if ( _bps.find(bp) == -1) {
   468      return JVMTI_ERROR_NOT_FOUND;
   469   }
   471   VM_ChangeBreakpoints clear_breakpoint(this,VM_ChangeBreakpoints::CLEAR_BREAKPOINT, &bp);
   472   VMThread::execute(&clear_breakpoint);
   473   return JVMTI_ERROR_NONE;
   474 }
   476 void JvmtiBreakpoints::clearall_in_class_at_safepoint(klassOop klass) {
   477   bool changed = true;
   478   // We are going to run thru the list of bkpts
   479   // and delete some.  This deletion probably alters
   480   // the list in some implementation defined way such
   481   // that when we delete entry i, the next entry might
   482   // no longer be at i+1.  To be safe, each time we delete
   483   // an entry, we'll just start again from the beginning.
   484   // We'll stop when we make a pass thru the whole list without
   485   // deleting anything.
   486   while (changed) {
   487     int len = _bps.length();
   488     changed = false;
   489     for (int i = 0; i < len; i++) {
   490       JvmtiBreakpoint& bp = _bps.at(i);
   491       if (bp.method()->method_holder() == klass) {
   492         bp.clear();
   493         _bps.remove(i);
   494         // This changed 'i' so we have to start over.
   495         changed = true;
   496         break;
   497       }
   498     }
   499   }
   500 }
   502 void JvmtiBreakpoints::clearall() {
   503   VM_ChangeBreakpoints clearall_breakpoint(this,VM_ChangeBreakpoints::CLEAR_ALL_BREAKPOINT);
   504   VMThread::execute(&clearall_breakpoint);
   505 }
   507 //
   508 // class JvmtiCurrentBreakpoints
   509 //
   511 JvmtiBreakpoints *JvmtiCurrentBreakpoints::_jvmti_breakpoints  = NULL;
   512 address *         JvmtiCurrentBreakpoints::_breakpoint_list    = NULL;
   515 JvmtiBreakpoints& JvmtiCurrentBreakpoints::get_jvmti_breakpoints() {
   516   if (_jvmti_breakpoints != NULL) return (*_jvmti_breakpoints);
   517   _jvmti_breakpoints = new JvmtiBreakpoints(listener_fun);
   518   assert(_jvmti_breakpoints != NULL, "_jvmti_breakpoints != NULL");
   519   return (*_jvmti_breakpoints);
   520 }
   522 void  JvmtiCurrentBreakpoints::listener_fun(void *this_obj, address *cache) {
   523   JvmtiBreakpoints *this_jvmti = (JvmtiBreakpoints *) this_obj;
   524   assert(this_jvmti != NULL, "this_jvmti != NULL");
   526   debug_only(int n = this_jvmti->length(););
   527   assert(cache[n] == NULL, "cache must be NULL terminated");
   529   set_breakpoint_list(cache);
   530 }
   533 void JvmtiCurrentBreakpoints::oops_do(OopClosure* f) {
   534   if (_jvmti_breakpoints != NULL) {
   535     _jvmti_breakpoints->oops_do(f);
   536   }
   537 }
   539 void JvmtiCurrentBreakpoints::gc_epilogue() {
   540   if (_jvmti_breakpoints != NULL) {
   541     _jvmti_breakpoints->gc_epilogue();
   542   }
   543 }
   545 ///////////////////////////////////////////////////////////////
   546 //
   547 // class VM_GetOrSetLocal
   548 //
   550 // Constructor for non-object getter
   551 VM_GetOrSetLocal::VM_GetOrSetLocal(JavaThread* thread, jint depth, int index, BasicType type)
   552   : _thread(thread)
   553   , _calling_thread(NULL)
   554   , _depth(depth)
   555   , _index(index)
   556   , _type(type)
   557   , _set(false)
   558   , _jvf(NULL)
   559   , _result(JVMTI_ERROR_NONE)
   560 {
   561 }
   563 // Constructor for object or non-object setter
   564 VM_GetOrSetLocal::VM_GetOrSetLocal(JavaThread* thread, jint depth, int index, BasicType type, jvalue value)
   565   : _thread(thread)
   566   , _calling_thread(NULL)
   567   , _depth(depth)
   568   , _index(index)
   569   , _type(type)
   570   , _value(value)
   571   , _set(true)
   572   , _jvf(NULL)
   573   , _result(JVMTI_ERROR_NONE)
   574 {
   575 }
   577 // Constructor for object getter
   578 VM_GetOrSetLocal::VM_GetOrSetLocal(JavaThread* thread, JavaThread* calling_thread, jint depth, int index)
   579   : _thread(thread)
   580   , _calling_thread(calling_thread)
   581   , _depth(depth)
   582   , _index(index)
   583   , _type(T_OBJECT)
   584   , _set(false)
   585   , _jvf(NULL)
   586   , _result(JVMTI_ERROR_NONE)
   587 {
   588 }
   590 vframe *VM_GetOrSetLocal::get_vframe() {
   591   if (!_thread->has_last_Java_frame()) {
   592     return NULL;
   593   }
   594   RegisterMap reg_map(_thread);
   595   vframe *vf = _thread->last_java_vframe(&reg_map);
   596   int d = 0;
   597   while ((vf != NULL) && (d < _depth)) {
   598     vf = vf->java_sender();
   599     d++;
   600   }
   601   return vf;
   602 }
   604 javaVFrame *VM_GetOrSetLocal::get_java_vframe() {
   605   vframe* vf = get_vframe();
   606   if (vf == NULL) {
   607     _result = JVMTI_ERROR_NO_MORE_FRAMES;
   608     return NULL;
   609   }
   610   javaVFrame *jvf = (javaVFrame*)vf;
   612   if (!vf->is_java_frame()) {
   613     _result = JVMTI_ERROR_OPAQUE_FRAME;
   614     return NULL;
   615   }
   616   return jvf;
   617 }
   619 // Check that the klass is assignable to a type with the given signature.
   620 // Another solution could be to use the function Klass::is_subtype_of(type).
   621 // But the type class can be forced to load/initialize eagerly in such a case.
   622 // This may cause unexpected consequences like CFLH or class-init JVMTI events.
   623 // It is better to avoid such a behavior.
   624 bool VM_GetOrSetLocal::is_assignable(const char* ty_sign, Klass* klass, Thread* thread) {
   625   assert(ty_sign != NULL, "type signature must not be NULL");
   626   assert(thread != NULL, "thread must not be NULL");
   627   assert(klass != NULL, "klass must not be NULL");
   629   int len = (int) strlen(ty_sign);
   630   if (ty_sign[0] == 'L' && ty_sign[len-1] == ';') { // Need pure class/interface name
   631     ty_sign++;
   632     len -= 2;
   633   }
   634   TempNewSymbol ty_sym = SymbolTable::new_symbol(ty_sign, len, thread);
   635   if (klass->name() == ty_sym) {
   636     return true;
   637   }
   638   // Compare primary supers
   639   int super_depth = klass->super_depth();
   640   int idx;
   641   for (idx = 0; idx < super_depth; idx++) {
   642     if (Klass::cast(klass->primary_super_of_depth(idx))->name() == ty_sym) {
   643       return true;
   644     }
   645   }
   646   // Compare secondary supers
   647   objArrayOop sec_supers = klass->secondary_supers();
   648   for (idx = 0; idx < sec_supers->length(); idx++) {
   649     if (Klass::cast((klassOop) sec_supers->obj_at(idx))->name() == ty_sym) {
   650       return true;
   651     }
   652   }
   653   return false;
   654 }
   656 // Checks error conditions:
   657 //   JVMTI_ERROR_INVALID_SLOT
   658 //   JVMTI_ERROR_TYPE_MISMATCH
   659 // Returns: 'true' - everything is Ok, 'false' - error code
   661 bool VM_GetOrSetLocal::check_slot_type(javaVFrame* jvf) {
   662   methodOop method_oop = jvf->method();
   663   if (!method_oop->has_localvariable_table()) {
   664     // Just to check index boundaries
   665     jint extra_slot = (_type == T_LONG || _type == T_DOUBLE) ? 1 : 0;
   666     if (_index < 0 || _index + extra_slot >= method_oop->max_locals()) {
   667       _result = JVMTI_ERROR_INVALID_SLOT;
   668       return false;
   669     }
   670     return true;
   671   }
   673   jint num_entries = method_oop->localvariable_table_length();
   674   if (num_entries == 0) {
   675     _result = JVMTI_ERROR_INVALID_SLOT;
   676     return false;       // There are no slots
   677   }
   678   int signature_idx = -1;
   679   int vf_bci = jvf->bci();
   680   LocalVariableTableElement* table = method_oop->localvariable_table_start();
   681   for (int i = 0; i < num_entries; i++) {
   682     int start_bci = table[i].start_bci;
   683     int end_bci = start_bci + table[i].length;
   685     // Here we assume that locations of LVT entries
   686     // with the same slot number cannot be overlapped
   687     if (_index == (jint) table[i].slot && start_bci <= vf_bci && vf_bci <= end_bci) {
   688       signature_idx = (int) table[i].descriptor_cp_index;
   689       break;
   690     }
   691   }
   692   if (signature_idx == -1) {
   693     _result = JVMTI_ERROR_INVALID_SLOT;
   694     return false;       // Incorrect slot index
   695   }
   696   Symbol*   sign_sym  = method_oop->constants()->symbol_at(signature_idx);
   697   const char* signature = (const char *) sign_sym->as_utf8();
   698   BasicType slot_type = char2type(signature[0]);
   700   switch (slot_type) {
   701   case T_BYTE:
   702   case T_SHORT:
   703   case T_CHAR:
   704   case T_BOOLEAN:
   705     slot_type = T_INT;
   706     break;
   707   case T_ARRAY:
   708     slot_type = T_OBJECT;
   709     break;
   710   };
   711   if (_type != slot_type) {
   712     _result = JVMTI_ERROR_TYPE_MISMATCH;
   713     return false;
   714   }
   716   jobject jobj = _value.l;
   717   if (_set && slot_type == T_OBJECT && jobj != NULL) { // NULL reference is allowed
   718     // Check that the jobject class matches the return type signature.
   719     JavaThread* cur_thread = JavaThread::current();
   720     HandleMark hm(cur_thread);
   722     Handle obj = Handle(cur_thread, JNIHandles::resolve_external_guard(jobj));
   723     NULL_CHECK(obj, (_result = JVMTI_ERROR_INVALID_OBJECT, false));
   724     KlassHandle ob_kh = KlassHandle(cur_thread, obj->klass());
   725     NULL_CHECK(ob_kh, (_result = JVMTI_ERROR_INVALID_OBJECT, false));
   727     if (!is_assignable(signature, Klass::cast(ob_kh()), cur_thread)) {
   728       _result = JVMTI_ERROR_TYPE_MISMATCH;
   729       return false;
   730     }
   731   }
   732   return true;
   733 }
   735 static bool can_be_deoptimized(vframe* vf) {
   736   return (vf->is_compiled_frame() && vf->fr().can_be_deoptimized());
   737 }
   739 bool VM_GetOrSetLocal::doit_prologue() {
   740   _jvf = get_java_vframe();
   741   NULL_CHECK(_jvf, false);
   743   if (_jvf->method()->is_native()) {
   744     if (getting_receiver() && !_jvf->method()->is_static()) {
   745       return true;
   746     } else {
   747       _result = JVMTI_ERROR_OPAQUE_FRAME;
   748       return false;
   749     }
   750   }
   752   if (!check_slot_type(_jvf)) {
   753     return false;
   754   }
   755   return true;
   756 }
   758 void VM_GetOrSetLocal::doit() {
   759   if (_set) {
   760     // Force deoptimization of frame if compiled because it's
   761     // possible the compiler emitted some locals as constant values,
   762     // meaning they are not mutable.
   763     if (can_be_deoptimized(_jvf)) {
   765       // Schedule deoptimization so that eventually the local
   766       // update will be written to an interpreter frame.
   767       Deoptimization::deoptimize_frame(_jvf->thread(), _jvf->fr().id());
   769       // Now store a new value for the local which will be applied
   770       // once deoptimization occurs. Note however that while this
   771       // write is deferred until deoptimization actually happens
   772       // can vframe created after this point will have its locals
   773       // reflecting this update so as far as anyone can see the
   774       // write has already taken place.
   776       // If we are updating an oop then get the oop from the handle
   777       // since the handle will be long gone by the time the deopt
   778       // happens. The oop stored in the deferred local will be
   779       // gc'd on its own.
   780       if (_type == T_OBJECT) {
   781         _value.l = (jobject) (JNIHandles::resolve_external_guard(_value.l));
   782       }
   783       // Re-read the vframe so we can see that it is deoptimized
   784       // [ Only need because of assert in update_local() ]
   785       _jvf = get_java_vframe();
   786       ((compiledVFrame*)_jvf)->update_local(_type, _index, _value);
   787       return;
   788     }
   789     StackValueCollection *locals = _jvf->locals();
   790     HandleMark hm;
   792     switch (_type) {
   793       case T_INT:    locals->set_int_at   (_index, _value.i); break;
   794       case T_LONG:   locals->set_long_at  (_index, _value.j); break;
   795       case T_FLOAT:  locals->set_float_at (_index, _value.f); break;
   796       case T_DOUBLE: locals->set_double_at(_index, _value.d); break;
   797       case T_OBJECT: {
   798         Handle ob_h(JNIHandles::resolve_external_guard(_value.l));
   799         locals->set_obj_at (_index, ob_h);
   800         break;
   801       }
   802       default: ShouldNotReachHere();
   803     }
   804     _jvf->set_locals(locals);
   805   } else {
   806     if (_jvf->method()->is_native() && _jvf->is_compiled_frame()) {
   807       assert(getting_receiver(), "Can only get here when getting receiver");
   808       oop receiver = _jvf->fr().get_native_receiver();
   809       _value.l = JNIHandles::make_local(_calling_thread, receiver);
   810     } else {
   811       StackValueCollection *locals = _jvf->locals();
   813       if (locals->at(_index)->type() == T_CONFLICT) {
   814         memset(&_value, 0, sizeof(_value));
   815         _value.l = NULL;
   816         return;
   817       }
   819       switch (_type) {
   820         case T_INT:    _value.i = locals->int_at   (_index);   break;
   821         case T_LONG:   _value.j = locals->long_at  (_index);   break;
   822         case T_FLOAT:  _value.f = locals->float_at (_index);   break;
   823         case T_DOUBLE: _value.d = locals->double_at(_index);   break;
   824         case T_OBJECT: {
   825           // Wrap the oop to be returned in a local JNI handle since
   826           // oops_do() no longer applies after doit() is finished.
   827           oop obj = locals->obj_at(_index)();
   828           _value.l = JNIHandles::make_local(_calling_thread, obj);
   829           break;
   830         }
   831         default: ShouldNotReachHere();
   832       }
   833     }
   834   }
   835 }
   838 bool VM_GetOrSetLocal::allow_nested_vm_operations() const {
   839   return true; // May need to deoptimize
   840 }
   843 VM_GetReceiver::VM_GetReceiver(
   844     JavaThread* thread, JavaThread* caller_thread, jint depth)
   845     : VM_GetOrSetLocal(thread, caller_thread, depth, 0) {}
   847 /////////////////////////////////////////////////////////////////////////////////////////
   849 //
   850 // class JvmtiSuspendControl - see comments in jvmtiImpl.hpp
   851 //
   853 bool JvmtiSuspendControl::suspend(JavaThread *java_thread) {
   854   // external suspend should have caught suspending a thread twice
   856   // Immediate suspension required for JPDA back-end so JVMTI agent threads do
   857   // not deadlock due to later suspension on transitions while holding
   858   // raw monitors.  Passing true causes the immediate suspension.
   859   // java_suspend() will catch threads in the process of exiting
   860   // and will ignore them.
   861   java_thread->java_suspend();
   863   // It would be nice to have the following assertion in all the time,
   864   // but it is possible for a racing resume request to have resumed
   865   // this thread right after we suspended it. Temporarily enable this
   866   // assertion if you are chasing a different kind of bug.
   867   //
   868   // assert(java_lang_Thread::thread(java_thread->threadObj()) == NULL ||
   869   //   java_thread->is_being_ext_suspended(), "thread is not suspended");
   871   if (java_lang_Thread::thread(java_thread->threadObj()) == NULL) {
   872     // check again because we can get delayed in java_suspend():
   873     // the thread is in process of exiting.
   874     return false;
   875   }
   877   return true;
   878 }
   880 bool JvmtiSuspendControl::resume(JavaThread *java_thread) {
   881   // external suspend should have caught resuming a thread twice
   882   assert(java_thread->is_being_ext_suspended(), "thread should be suspended");
   884   // resume thread
   885   {
   886     // must always grab Threads_lock, see JVM_SuspendThread
   887     MutexLocker ml(Threads_lock);
   888     java_thread->java_resume();
   889   }
   891   return true;
   892 }
   895 void JvmtiSuspendControl::print() {
   896 #ifndef PRODUCT
   897   MutexLocker mu(Threads_lock);
   898   ResourceMark rm;
   900   tty->print("Suspended Threads: [");
   901   for (JavaThread *thread = Threads::first(); thread != NULL; thread = thread->next()) {
   902 #if JVMTI_TRACE
   903     const char *name   = JvmtiTrace::safe_get_thread_name(thread);
   904 #else
   905     const char *name   = "";
   906 #endif /*JVMTI_TRACE */
   907     tty->print("%s(%c ", name, thread->is_being_ext_suspended() ? 'S' : '_');
   908     if (!thread->has_last_Java_frame()) {
   909       tty->print("no stack");
   910     }
   911     tty->print(") ");
   912   }
   913   tty->print_cr("]");
   914 #endif
   915 }
   917 #ifndef KERNEL
   919 JvmtiDeferredEvent JvmtiDeferredEvent::compiled_method_load_event(
   920     nmethod* nm) {
   921   JvmtiDeferredEvent event = JvmtiDeferredEvent(TYPE_COMPILED_METHOD_LOAD);
   922   event._event_data.compiled_method_load = nm;
   923   // Keep the nmethod alive until the ServiceThread can process
   924   // this deferred event.
   925   nmethodLocker::lock_nmethod(nm);
   926   return event;
   927 }
   929 JvmtiDeferredEvent JvmtiDeferredEvent::compiled_method_unload_event(
   930     nmethod* nm, jmethodID id, const void* code) {
   931   JvmtiDeferredEvent event = JvmtiDeferredEvent(TYPE_COMPILED_METHOD_UNLOAD);
   932   event._event_data.compiled_method_unload.nm = nm;
   933   event._event_data.compiled_method_unload.method_id = id;
   934   event._event_data.compiled_method_unload.code_begin = code;
   935   // Keep the nmethod alive until the ServiceThread can process
   936   // this deferred event. This will keep the memory for the
   937   // generated code from being reused too early. We pass
   938   // zombie_ok == true here so that our nmethod that was just
   939   // made into a zombie can be locked.
   940   nmethodLocker::lock_nmethod(nm, true /* zombie_ok */);
   941   return event;
   942 }
   944 JvmtiDeferredEvent JvmtiDeferredEvent::dynamic_code_generated_event(
   945       const char* name, const void* code_begin, const void* code_end) {
   946   JvmtiDeferredEvent event = JvmtiDeferredEvent(TYPE_DYNAMIC_CODE_GENERATED);
   947   // Need to make a copy of the name since we don't know how long
   948   // the event poster will keep it around after we enqueue the
   949   // deferred event and return. strdup() failure is handled in
   950   // the post() routine below.
   951   event._event_data.dynamic_code_generated.name = os::strdup(name);
   952   event._event_data.dynamic_code_generated.code_begin = code_begin;
   953   event._event_data.dynamic_code_generated.code_end = code_end;
   954   return event;
   955 }
   957 void JvmtiDeferredEvent::post() {
   958   assert(ServiceThread::is_service_thread(Thread::current()),
   959          "Service thread must post enqueued events");
   960   switch(_type) {
   961     case TYPE_COMPILED_METHOD_LOAD: {
   962       nmethod* nm = _event_data.compiled_method_load;
   963       JvmtiExport::post_compiled_method_load(nm);
   964       // done with the deferred event so unlock the nmethod
   965       nmethodLocker::unlock_nmethod(nm);
   966       break;
   967     }
   968     case TYPE_COMPILED_METHOD_UNLOAD: {
   969       nmethod* nm = _event_data.compiled_method_unload.nm;
   970       JvmtiExport::post_compiled_method_unload(
   971         _event_data.compiled_method_unload.method_id,
   972         _event_data.compiled_method_unload.code_begin);
   973       // done with the deferred event so unlock the nmethod
   974       nmethodLocker::unlock_nmethod(nm);
   975       break;
   976     }
   977     case TYPE_DYNAMIC_CODE_GENERATED: {
   978       JvmtiExport::post_dynamic_code_generated_internal(
   979         // if strdup failed give the event a default name
   980         (_event_data.dynamic_code_generated.name == NULL)
   981           ? "unknown_code" : _event_data.dynamic_code_generated.name,
   982         _event_data.dynamic_code_generated.code_begin,
   983         _event_data.dynamic_code_generated.code_end);
   984       if (_event_data.dynamic_code_generated.name != NULL) {
   985         // release our copy
   986         os::free((void *)_event_data.dynamic_code_generated.name);
   987       }
   988       break;
   989     }
   990     default:
   991       ShouldNotReachHere();
   992   }
   993 }
   995 JvmtiDeferredEventQueue::QueueNode* JvmtiDeferredEventQueue::_queue_tail = NULL;
   996 JvmtiDeferredEventQueue::QueueNode* JvmtiDeferredEventQueue::_queue_head = NULL;
   998 volatile JvmtiDeferredEventQueue::QueueNode*
   999     JvmtiDeferredEventQueue::_pending_list = NULL;
  1001 bool JvmtiDeferredEventQueue::has_events() {
  1002   assert(Service_lock->owned_by_self(), "Must own Service_lock");
  1003   return _queue_head != NULL || _pending_list != NULL;
  1006 void JvmtiDeferredEventQueue::enqueue(const JvmtiDeferredEvent& event) {
  1007   assert(Service_lock->owned_by_self(), "Must own Service_lock");
  1009   process_pending_events();
  1011   // Events get added to the end of the queue (and are pulled off the front).
  1012   QueueNode* node = new QueueNode(event);
  1013   if (_queue_tail == NULL) {
  1014     _queue_tail = _queue_head = node;
  1015   } else {
  1016     assert(_queue_tail->next() == NULL, "Must be the last element in the list");
  1017     _queue_tail->set_next(node);
  1018     _queue_tail = node;
  1021   Service_lock->notify_all();
  1022   assert((_queue_head == NULL) == (_queue_tail == NULL),
  1023          "Inconsistent queue markers");
  1026 JvmtiDeferredEvent JvmtiDeferredEventQueue::dequeue() {
  1027   assert(Service_lock->owned_by_self(), "Must own Service_lock");
  1029   process_pending_events();
  1031   assert(_queue_head != NULL, "Nothing to dequeue");
  1033   if (_queue_head == NULL) {
  1034     // Just in case this happens in product; it shouldn't but let's not crash
  1035     return JvmtiDeferredEvent();
  1038   QueueNode* node = _queue_head;
  1039   _queue_head = _queue_head->next();
  1040   if (_queue_head == NULL) {
  1041     _queue_tail = NULL;
  1044   assert((_queue_head == NULL) == (_queue_tail == NULL),
  1045          "Inconsistent queue markers");
  1047   JvmtiDeferredEvent event = node->event();
  1048   delete node;
  1049   return event;
  1052 void JvmtiDeferredEventQueue::add_pending_event(
  1053     const JvmtiDeferredEvent& event) {
  1055   QueueNode* node = new QueueNode(event);
  1057   bool success = false;
  1058   QueueNode* prev_value = (QueueNode*)_pending_list;
  1059   do {
  1060     node->set_next(prev_value);
  1061     prev_value = (QueueNode*)Atomic::cmpxchg_ptr(
  1062         (void*)node, (volatile void*)&_pending_list, (void*)node->next());
  1063   } while (prev_value != node->next());
  1066 // This method transfers any events that were added by someone NOT holding
  1067 // the lock into the mainline queue.
  1068 void JvmtiDeferredEventQueue::process_pending_events() {
  1069   assert(Service_lock->owned_by_self(), "Must own Service_lock");
  1071   if (_pending_list != NULL) {
  1072     QueueNode* head =
  1073         (QueueNode*)Atomic::xchg_ptr(NULL, (volatile void*)&_pending_list);
  1075     assert((_queue_head == NULL) == (_queue_tail == NULL),
  1076            "Inconsistent queue markers");
  1078     if (head != NULL) {
  1079       // Since we've treated the pending list as a stack (with newer
  1080       // events at the beginning), we need to join the bottom of the stack
  1081       // with the 'tail' of the queue in order to get the events in the
  1082       // right order.  We do this by reversing the pending list and appending
  1083       // it to the queue.
  1085       QueueNode* new_tail = head;
  1086       QueueNode* new_head = NULL;
  1088       // This reverses the list
  1089       QueueNode* prev = new_tail;
  1090       QueueNode* node = new_tail->next();
  1091       new_tail->set_next(NULL);
  1092       while (node != NULL) {
  1093         QueueNode* next = node->next();
  1094         node->set_next(prev);
  1095         prev = node;
  1096         node = next;
  1098       new_head = prev;
  1100       // Now append the new list to the queue
  1101       if (_queue_tail != NULL) {
  1102         _queue_tail->set_next(new_head);
  1103       } else { // _queue_head == NULL
  1104         _queue_head = new_head;
  1106       _queue_tail = new_tail;
  1111 #endif // ndef KERNEL

mercurial