src/share/vm/prims/jvmtiImpl.cpp

Sat, 26 Feb 2011 13:33:23 -0500

author
kamg
date
Sat, 26 Feb 2011 13:33:23 -0500
changeset 2583
f91db74a6810
parent 2511
bf8517f4e4d0
child 2624
46a56fac55c7
permissions
-rw-r--r--

7017640: Fix for 6766644 deadlocks on some NSK tests when running with -Xcomp
Summary: Dynamic-code generated events should be deferred and processed by service thread
Reviewed-by: dsamersoff, dcubed

     1 /*
     2  * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "classfile/systemDictionary.hpp"
    27 #include "interpreter/interpreter.hpp"
    28 #include "jvmtifiles/jvmtiEnv.hpp"
    29 #include "memory/resourceArea.hpp"
    30 #include "oops/instanceKlass.hpp"
    31 #include "prims/jvmtiAgentThread.hpp"
    32 #include "prims/jvmtiEventController.inline.hpp"
    33 #include "prims/jvmtiImpl.hpp"
    34 #include "prims/jvmtiRedefineClasses.hpp"
    35 #include "runtime/atomic.hpp"
    36 #include "runtime/deoptimization.hpp"
    37 #include "runtime/handles.hpp"
    38 #include "runtime/handles.inline.hpp"
    39 #include "runtime/interfaceSupport.hpp"
    40 #include "runtime/javaCalls.hpp"
    41 #include "runtime/serviceThread.hpp"
    42 #include "runtime/signature.hpp"
    43 #include "runtime/vframe.hpp"
    44 #include "runtime/vframe_hp.hpp"
    45 #include "runtime/vm_operations.hpp"
    46 #include "utilities/exceptions.hpp"
    47 #ifdef TARGET_OS_FAMILY_linux
    48 # include "thread_linux.inline.hpp"
    49 #endif
    50 #ifdef TARGET_OS_FAMILY_solaris
    51 # include "thread_solaris.inline.hpp"
    52 #endif
    53 #ifdef TARGET_OS_FAMILY_windows
    54 # include "thread_windows.inline.hpp"
    55 #endif
    57 //
    58 // class JvmtiAgentThread
    59 //
    60 // JavaThread used to wrap a thread started by an agent
    61 // using the JVMTI method RunAgentThread.
    62 //
    64 JvmtiAgentThread::JvmtiAgentThread(JvmtiEnv* env, jvmtiStartFunction start_fn, const void *start_arg)
    65     : JavaThread(start_function_wrapper) {
    66     _env = env;
    67     _start_fn = start_fn;
    68     _start_arg = start_arg;
    69 }
    71 void
    72 JvmtiAgentThread::start_function_wrapper(JavaThread *thread, TRAPS) {
    73     // It is expected that any Agent threads will be created as
    74     // Java Threads.  If this is the case, notification of the creation
    75     // of the thread is given in JavaThread::thread_main().
    76     assert(thread->is_Java_thread(), "debugger thread should be a Java Thread");
    77     assert(thread == JavaThread::current(), "sanity check");
    79     JvmtiAgentThread *dthread = (JvmtiAgentThread *)thread;
    80     dthread->call_start_function();
    81 }
    83 void
    84 JvmtiAgentThread::call_start_function() {
    85     ThreadToNativeFromVM transition(this);
    86     _start_fn(_env->jvmti_external(), jni_environment(), (void*)_start_arg);
    87 }
    90 //
    91 // class GrowableCache - private methods
    92 //
    94 void GrowableCache::recache() {
    95   int len = _elements->length();
    97   FREE_C_HEAP_ARRAY(address, _cache);
    98   _cache = NEW_C_HEAP_ARRAY(address,len+1);
   100   for (int i=0; i<len; i++) {
   101     _cache[i] = _elements->at(i)->getCacheValue();
   102     //
   103     // The cache entry has gone bad. Without a valid frame pointer
   104     // value, the entry is useless so we simply delete it in product
   105     // mode. The call to remove() will rebuild the cache again
   106     // without the bad entry.
   107     //
   108     if (_cache[i] == NULL) {
   109       assert(false, "cannot recache NULL elements");
   110       remove(i);
   111       return;
   112     }
   113   }
   114   _cache[len] = NULL;
   116   _listener_fun(_this_obj,_cache);
   117 }
   119 bool GrowableCache::equals(void* v, GrowableElement *e2) {
   120   GrowableElement *e1 = (GrowableElement *) v;
   121   assert(e1 != NULL, "e1 != NULL");
   122   assert(e2 != NULL, "e2 != NULL");
   124   return e1->equals(e2);
   125 }
   127 //
   128 // class GrowableCache - public methods
   129 //
   131 GrowableCache::GrowableCache() {
   132   _this_obj       = NULL;
   133   _listener_fun   = NULL;
   134   _elements       = NULL;
   135   _cache          = NULL;
   136 }
   138 GrowableCache::~GrowableCache() {
   139   clear();
   140   delete _elements;
   141   FREE_C_HEAP_ARRAY(address, _cache);
   142 }
   144 void GrowableCache::initialize(void *this_obj, void listener_fun(void *, address*) ) {
   145   _this_obj       = this_obj;
   146   _listener_fun   = listener_fun;
   147   _elements       = new (ResourceObj::C_HEAP) GrowableArray<GrowableElement*>(5,true);
   148   recache();
   149 }
   151 // number of elements in the collection
   152 int GrowableCache::length() {
   153   return _elements->length();
   154 }
   156 // get the value of the index element in the collection
   157 GrowableElement* GrowableCache::at(int index) {
   158   GrowableElement *e = (GrowableElement *) _elements->at(index);
   159   assert(e != NULL, "e != NULL");
   160   return e;
   161 }
   163 int GrowableCache::find(GrowableElement* e) {
   164   return _elements->find(e, GrowableCache::equals);
   165 }
   167 // append a copy of the element to the end of the collection
   168 void GrowableCache::append(GrowableElement* e) {
   169   GrowableElement *new_e = e->clone();
   170   _elements->append(new_e);
   171   recache();
   172 }
   174 // insert a copy of the element using lessthan()
   175 void GrowableCache::insert(GrowableElement* e) {
   176   GrowableElement *new_e = e->clone();
   177   _elements->append(new_e);
   179   int n = length()-2;
   180   for (int i=n; i>=0; i--) {
   181     GrowableElement *e1 = _elements->at(i);
   182     GrowableElement *e2 = _elements->at(i+1);
   183     if (e2->lessThan(e1)) {
   184       _elements->at_put(i+1, e1);
   185       _elements->at_put(i,   e2);
   186     }
   187   }
   189   recache();
   190 }
   192 // remove the element at index
   193 void GrowableCache::remove (int index) {
   194   GrowableElement *e = _elements->at(index);
   195   assert(e != NULL, "e != NULL");
   196   _elements->remove(e);
   197   delete e;
   198   recache();
   199 }
   201 // clear out all elements, release all heap space and
   202 // let our listener know that things have changed.
   203 void GrowableCache::clear() {
   204   int len = _elements->length();
   205   for (int i=0; i<len; i++) {
   206     delete _elements->at(i);
   207   }
   208   _elements->clear();
   209   recache();
   210 }
   212 void GrowableCache::oops_do(OopClosure* f) {
   213   int len = _elements->length();
   214   for (int i=0; i<len; i++) {
   215     GrowableElement *e = _elements->at(i);
   216     e->oops_do(f);
   217   }
   218 }
   220 void GrowableCache::gc_epilogue() {
   221   int len = _elements->length();
   222   for (int i=0; i<len; i++) {
   223     _cache[i] = _elements->at(i)->getCacheValue();
   224   }
   225 }
   227 //
   228 // class JvmtiBreakpoint
   229 //
   231 JvmtiBreakpoint::JvmtiBreakpoint() {
   232   _method = NULL;
   233   _bci    = 0;
   234 #ifdef CHECK_UNHANDLED_OOPS
   235   // This one is always allocated with new, but check it just in case.
   236   Thread *thread = Thread::current();
   237   if (thread->is_in_stack((address)&_method)) {
   238     thread->allow_unhandled_oop((oop*)&_method);
   239   }
   240 #endif // CHECK_UNHANDLED_OOPS
   241 }
   243 JvmtiBreakpoint::JvmtiBreakpoint(methodOop m_method, jlocation location) {
   244   _method        = m_method;
   245   assert(_method != NULL, "_method != NULL");
   246   _bci           = (int) location;
   247 #ifdef CHECK_UNHANDLED_OOPS
   248   // Could be allocated with new and wouldn't be on the unhandled oop list.
   249   Thread *thread = Thread::current();
   250   if (thread->is_in_stack((address)&_method)) {
   251     thread->allow_unhandled_oop(&_method);
   252   }
   253 #endif // CHECK_UNHANDLED_OOPS
   255   assert(_bci >= 0, "_bci >= 0");
   256 }
   258 void JvmtiBreakpoint::copy(JvmtiBreakpoint& bp) {
   259   _method   = bp._method;
   260   _bci      = bp._bci;
   261 }
   263 bool JvmtiBreakpoint::lessThan(JvmtiBreakpoint& bp) {
   264   Unimplemented();
   265   return false;
   266 }
   268 bool JvmtiBreakpoint::equals(JvmtiBreakpoint& bp) {
   269   return _method   == bp._method
   270     &&   _bci      == bp._bci;
   271 }
   273 bool JvmtiBreakpoint::is_valid() {
   274   return _method != NULL &&
   275          _bci >= 0;
   276 }
   278 address JvmtiBreakpoint::getBcp() {
   279   return _method->bcp_from(_bci);
   280 }
   282 void JvmtiBreakpoint::each_method_version_do(method_action meth_act) {
   283   ((methodOopDesc*)_method->*meth_act)(_bci);
   285   // add/remove breakpoint to/from versions of the method that
   286   // are EMCP. Directly or transitively obsolete methods are
   287   // not saved in the PreviousVersionInfo.
   288   Thread *thread = Thread::current();
   289   instanceKlassHandle ikh = instanceKlassHandle(thread, _method->method_holder());
   290   Symbol* m_name = _method->name();
   291   Symbol* m_signature = _method->signature();
   293   {
   294     ResourceMark rm(thread);
   295     // PreviousVersionInfo objects returned via PreviousVersionWalker
   296     // contain a GrowableArray of handles. We have to clean up the
   297     // GrowableArray _after_ the PreviousVersionWalker destructor
   298     // has destroyed the handles.
   299     {
   300       // search previous versions if they exist
   301       PreviousVersionWalker pvw((instanceKlass *)ikh()->klass_part());
   302       for (PreviousVersionInfo * pv_info = pvw.next_previous_version();
   303            pv_info != NULL; pv_info = pvw.next_previous_version()) {
   304         GrowableArray<methodHandle>* methods =
   305           pv_info->prev_EMCP_method_handles();
   307         if (methods == NULL) {
   308           // We have run into a PreviousVersion generation where
   309           // all methods were made obsolete during that generation's
   310           // RedefineClasses() operation. At the time of that
   311           // operation, all EMCP methods were flushed so we don't
   312           // have to go back any further.
   313           //
   314           // A NULL methods array is different than an empty methods
   315           // array. We cannot infer any optimizations about older
   316           // generations from an empty methods array for the current
   317           // generation.
   318           break;
   319         }
   321         for (int i = methods->length() - 1; i >= 0; i--) {
   322           methodHandle method = methods->at(i);
   323           if (method->name() == m_name && method->signature() == m_signature) {
   324             RC_TRACE(0x00000800, ("%sing breakpoint in %s(%s)",
   325               meth_act == &methodOopDesc::set_breakpoint ? "sett" : "clear",
   326               method->name()->as_C_string(),
   327               method->signature()->as_C_string()));
   328             assert(!method->is_obsolete(), "only EMCP methods here");
   330             ((methodOopDesc*)method()->*meth_act)(_bci);
   331             break;
   332           }
   333         }
   334       }
   335     } // pvw is cleaned up
   336   } // rm is cleaned up
   337 }
   339 void JvmtiBreakpoint::set() {
   340   each_method_version_do(&methodOopDesc::set_breakpoint);
   341 }
   343 void JvmtiBreakpoint::clear() {
   344   each_method_version_do(&methodOopDesc::clear_breakpoint);
   345 }
   347 void JvmtiBreakpoint::print() {
   348 #ifndef PRODUCT
   349   const char *class_name  = (_method == NULL) ? "NULL" : _method->klass_name()->as_C_string();
   350   const char *method_name = (_method == NULL) ? "NULL" : _method->name()->as_C_string();
   352   tty->print("Breakpoint(%s,%s,%d,%p)",class_name, method_name, _bci, getBcp());
   353 #endif
   354 }
   357 //
   358 // class VM_ChangeBreakpoints
   359 //
   360 // Modify the Breakpoints data structure at a safepoint
   361 //
   363 void VM_ChangeBreakpoints::doit() {
   364   switch (_operation) {
   365   case SET_BREAKPOINT:
   366     _breakpoints->set_at_safepoint(*_bp);
   367     break;
   368   case CLEAR_BREAKPOINT:
   369     _breakpoints->clear_at_safepoint(*_bp);
   370     break;
   371   case CLEAR_ALL_BREAKPOINT:
   372     _breakpoints->clearall_at_safepoint();
   373     break;
   374   default:
   375     assert(false, "Unknown operation");
   376   }
   377 }
   379 void VM_ChangeBreakpoints::oops_do(OopClosure* f) {
   380   // This operation keeps breakpoints alive
   381   if (_breakpoints != NULL) {
   382     _breakpoints->oops_do(f);
   383   }
   384   if (_bp != NULL) {
   385     _bp->oops_do(f);
   386   }
   387 }
   389 //
   390 // class JvmtiBreakpoints
   391 //
   392 // a JVMTI internal collection of JvmtiBreakpoint
   393 //
   395 JvmtiBreakpoints::JvmtiBreakpoints(void listener_fun(void *,address *)) {
   396   _bps.initialize(this,listener_fun);
   397 }
   399 JvmtiBreakpoints:: ~JvmtiBreakpoints() {}
   401 void  JvmtiBreakpoints::oops_do(OopClosure* f) {
   402   _bps.oops_do(f);
   403 }
   405 void JvmtiBreakpoints::gc_epilogue() {
   406   _bps.gc_epilogue();
   407 }
   409 void  JvmtiBreakpoints::print() {
   410 #ifndef PRODUCT
   411   ResourceMark rm;
   413   int n = _bps.length();
   414   for (int i=0; i<n; i++) {
   415     JvmtiBreakpoint& bp = _bps.at(i);
   416     tty->print("%d: ", i);
   417     bp.print();
   418     tty->print_cr("");
   419   }
   420 #endif
   421 }
   424 void JvmtiBreakpoints::set_at_safepoint(JvmtiBreakpoint& bp) {
   425   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
   427   int i = _bps.find(bp);
   428   if (i == -1) {
   429     _bps.append(bp);
   430     bp.set();
   431   }
   432 }
   434 void JvmtiBreakpoints::clear_at_safepoint(JvmtiBreakpoint& bp) {
   435   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
   437   int i = _bps.find(bp);
   438   if (i != -1) {
   439     _bps.remove(i);
   440     bp.clear();
   441   }
   442 }
   444 void JvmtiBreakpoints::clearall_at_safepoint() {
   445   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
   447   int len = _bps.length();
   448   for (int i=0; i<len; i++) {
   449     _bps.at(i).clear();
   450   }
   451   _bps.clear();
   452 }
   454 int JvmtiBreakpoints::length() { return _bps.length(); }
   456 int JvmtiBreakpoints::set(JvmtiBreakpoint& bp) {
   457   if ( _bps.find(bp) != -1) {
   458      return JVMTI_ERROR_DUPLICATE;
   459   }
   460   VM_ChangeBreakpoints set_breakpoint(this,VM_ChangeBreakpoints::SET_BREAKPOINT, &bp);
   461   VMThread::execute(&set_breakpoint);
   462   return JVMTI_ERROR_NONE;
   463 }
   465 int JvmtiBreakpoints::clear(JvmtiBreakpoint& bp) {
   466   if ( _bps.find(bp) == -1) {
   467      return JVMTI_ERROR_NOT_FOUND;
   468   }
   470   VM_ChangeBreakpoints clear_breakpoint(this,VM_ChangeBreakpoints::CLEAR_BREAKPOINT, &bp);
   471   VMThread::execute(&clear_breakpoint);
   472   return JVMTI_ERROR_NONE;
   473 }
   475 void JvmtiBreakpoints::clearall_in_class_at_safepoint(klassOop klass) {
   476   bool changed = true;
   477   // We are going to run thru the list of bkpts
   478   // and delete some.  This deletion probably alters
   479   // the list in some implementation defined way such
   480   // that when we delete entry i, the next entry might
   481   // no longer be at i+1.  To be safe, each time we delete
   482   // an entry, we'll just start again from the beginning.
   483   // We'll stop when we make a pass thru the whole list without
   484   // deleting anything.
   485   while (changed) {
   486     int len = _bps.length();
   487     changed = false;
   488     for (int i = 0; i < len; i++) {
   489       JvmtiBreakpoint& bp = _bps.at(i);
   490       if (bp.method()->method_holder() == klass) {
   491         bp.clear();
   492         _bps.remove(i);
   493         // This changed 'i' so we have to start over.
   494         changed = true;
   495         break;
   496       }
   497     }
   498   }
   499 }
   501 void JvmtiBreakpoints::clearall() {
   502   VM_ChangeBreakpoints clearall_breakpoint(this,VM_ChangeBreakpoints::CLEAR_ALL_BREAKPOINT);
   503   VMThread::execute(&clearall_breakpoint);
   504 }
   506 //
   507 // class JvmtiCurrentBreakpoints
   508 //
   510 JvmtiBreakpoints *JvmtiCurrentBreakpoints::_jvmti_breakpoints  = NULL;
   511 address *         JvmtiCurrentBreakpoints::_breakpoint_list    = NULL;
   514 JvmtiBreakpoints& JvmtiCurrentBreakpoints::get_jvmti_breakpoints() {
   515   if (_jvmti_breakpoints != NULL) return (*_jvmti_breakpoints);
   516   _jvmti_breakpoints = new JvmtiBreakpoints(listener_fun);
   517   assert(_jvmti_breakpoints != NULL, "_jvmti_breakpoints != NULL");
   518   return (*_jvmti_breakpoints);
   519 }
   521 void  JvmtiCurrentBreakpoints::listener_fun(void *this_obj, address *cache) {
   522   JvmtiBreakpoints *this_jvmti = (JvmtiBreakpoints *) this_obj;
   523   assert(this_jvmti != NULL, "this_jvmti != NULL");
   525   debug_only(int n = this_jvmti->length(););
   526   assert(cache[n] == NULL, "cache must be NULL terminated");
   528   set_breakpoint_list(cache);
   529 }
   532 void JvmtiCurrentBreakpoints::oops_do(OopClosure* f) {
   533   if (_jvmti_breakpoints != NULL) {
   534     _jvmti_breakpoints->oops_do(f);
   535   }
   536 }
   538 void JvmtiCurrentBreakpoints::gc_epilogue() {
   539   if (_jvmti_breakpoints != NULL) {
   540     _jvmti_breakpoints->gc_epilogue();
   541   }
   542 }
   544 ///////////////////////////////////////////////////////////////
   545 //
   546 // class VM_GetOrSetLocal
   547 //
   549 // Constructor for non-object getter
   550 VM_GetOrSetLocal::VM_GetOrSetLocal(JavaThread* thread, jint depth, int index, BasicType type)
   551   : _thread(thread)
   552   , _calling_thread(NULL)
   553   , _depth(depth)
   554   , _index(index)
   555   , _type(type)
   556   , _set(false)
   557   , _jvf(NULL)
   558   , _result(JVMTI_ERROR_NONE)
   559 {
   560 }
   562 // Constructor for object or non-object setter
   563 VM_GetOrSetLocal::VM_GetOrSetLocal(JavaThread* thread, jint depth, int index, BasicType type, jvalue value)
   564   : _thread(thread)
   565   , _calling_thread(NULL)
   566   , _depth(depth)
   567   , _index(index)
   568   , _type(type)
   569   , _value(value)
   570   , _set(true)
   571   , _jvf(NULL)
   572   , _result(JVMTI_ERROR_NONE)
   573 {
   574 }
   576 // Constructor for object getter
   577 VM_GetOrSetLocal::VM_GetOrSetLocal(JavaThread* thread, JavaThread* calling_thread, jint depth, int index)
   578   : _thread(thread)
   579   , _calling_thread(calling_thread)
   580   , _depth(depth)
   581   , _index(index)
   582   , _type(T_OBJECT)
   583   , _set(false)
   584   , _jvf(NULL)
   585   , _result(JVMTI_ERROR_NONE)
   586 {
   587 }
   589 vframe *VM_GetOrSetLocal::get_vframe() {
   590   if (!_thread->has_last_Java_frame()) {
   591     return NULL;
   592   }
   593   RegisterMap reg_map(_thread);
   594   vframe *vf = _thread->last_java_vframe(&reg_map);
   595   int d = 0;
   596   while ((vf != NULL) && (d < _depth)) {
   597     vf = vf->java_sender();
   598     d++;
   599   }
   600   return vf;
   601 }
   603 javaVFrame *VM_GetOrSetLocal::get_java_vframe() {
   604   vframe* vf = get_vframe();
   605   if (vf == NULL) {
   606     _result = JVMTI_ERROR_NO_MORE_FRAMES;
   607     return NULL;
   608   }
   609   javaVFrame *jvf = (javaVFrame*)vf;
   611   if (!vf->is_java_frame()) {
   612     _result = JVMTI_ERROR_OPAQUE_FRAME;
   613     return NULL;
   614   }
   615   return jvf;
   616 }
   618 // Check that the klass is assignable to a type with the given signature.
   619 // Another solution could be to use the function Klass::is_subtype_of(type).
   620 // But the type class can be forced to load/initialize eagerly in such a case.
   621 // This may cause unexpected consequences like CFLH or class-init JVMTI events.
   622 // It is better to avoid such a behavior.
   623 bool VM_GetOrSetLocal::is_assignable(const char* ty_sign, Klass* klass, Thread* thread) {
   624   assert(ty_sign != NULL, "type signature must not be NULL");
   625   assert(thread != NULL, "thread must not be NULL");
   626   assert(klass != NULL, "klass must not be NULL");
   628   int len = (int) strlen(ty_sign);
   629   if (ty_sign[0] == 'L' && ty_sign[len-1] == ';') { // Need pure class/interface name
   630     ty_sign++;
   631     len -= 2;
   632   }
   633   TempNewSymbol ty_sym = SymbolTable::new_symbol(ty_sign, len, thread);
   634   if (klass->name() == ty_sym) {
   635     return true;
   636   }
   637   // Compare primary supers
   638   int super_depth = klass->super_depth();
   639   int idx;
   640   for (idx = 0; idx < super_depth; idx++) {
   641     if (Klass::cast(klass->primary_super_of_depth(idx))->name() == ty_sym) {
   642       return true;
   643     }
   644   }
   645   // Compare secondary supers
   646   objArrayOop sec_supers = klass->secondary_supers();
   647   for (idx = 0; idx < sec_supers->length(); idx++) {
   648     if (Klass::cast((klassOop) sec_supers->obj_at(idx))->name() == ty_sym) {
   649       return true;
   650     }
   651   }
   652   return false;
   653 }
   655 // Checks error conditions:
   656 //   JVMTI_ERROR_INVALID_SLOT
   657 //   JVMTI_ERROR_TYPE_MISMATCH
   658 // Returns: 'true' - everything is Ok, 'false' - error code
   660 bool VM_GetOrSetLocal::check_slot_type(javaVFrame* jvf) {
   661   methodOop method_oop = jvf->method();
   662   if (!method_oop->has_localvariable_table()) {
   663     // Just to check index boundaries
   664     jint extra_slot = (_type == T_LONG || _type == T_DOUBLE) ? 1 : 0;
   665     if (_index < 0 || _index + extra_slot >= method_oop->max_locals()) {
   666       _result = JVMTI_ERROR_INVALID_SLOT;
   667       return false;
   668     }
   669     return true;
   670   }
   672   jint num_entries = method_oop->localvariable_table_length();
   673   if (num_entries == 0) {
   674     _result = JVMTI_ERROR_INVALID_SLOT;
   675     return false;       // There are no slots
   676   }
   677   int signature_idx = -1;
   678   int vf_bci = jvf->bci();
   679   LocalVariableTableElement* table = method_oop->localvariable_table_start();
   680   for (int i = 0; i < num_entries; i++) {
   681     int start_bci = table[i].start_bci;
   682     int end_bci = start_bci + table[i].length;
   684     // Here we assume that locations of LVT entries
   685     // with the same slot number cannot be overlapped
   686     if (_index == (jint) table[i].slot && start_bci <= vf_bci && vf_bci <= end_bci) {
   687       signature_idx = (int) table[i].descriptor_cp_index;
   688       break;
   689     }
   690   }
   691   if (signature_idx == -1) {
   692     _result = JVMTI_ERROR_INVALID_SLOT;
   693     return false;       // Incorrect slot index
   694   }
   695   Symbol*   sign_sym  = method_oop->constants()->symbol_at(signature_idx);
   696   const char* signature = (const char *) sign_sym->as_utf8();
   697   BasicType slot_type = char2type(signature[0]);
   699   switch (slot_type) {
   700   case T_BYTE:
   701   case T_SHORT:
   702   case T_CHAR:
   703   case T_BOOLEAN:
   704     slot_type = T_INT;
   705     break;
   706   case T_ARRAY:
   707     slot_type = T_OBJECT;
   708     break;
   709   };
   710   if (_type != slot_type) {
   711     _result = JVMTI_ERROR_TYPE_MISMATCH;
   712     return false;
   713   }
   715   jobject jobj = _value.l;
   716   if (_set && slot_type == T_OBJECT && jobj != NULL) { // NULL reference is allowed
   717     // Check that the jobject class matches the return type signature.
   718     JavaThread* cur_thread = JavaThread::current();
   719     HandleMark hm(cur_thread);
   721     Handle obj = Handle(cur_thread, JNIHandles::resolve_external_guard(jobj));
   722     NULL_CHECK(obj, (_result = JVMTI_ERROR_INVALID_OBJECT, false));
   723     KlassHandle ob_kh = KlassHandle(cur_thread, obj->klass());
   724     NULL_CHECK(ob_kh, (_result = JVMTI_ERROR_INVALID_OBJECT, false));
   726     if (!is_assignable(signature, Klass::cast(ob_kh()), cur_thread)) {
   727       _result = JVMTI_ERROR_TYPE_MISMATCH;
   728       return false;
   729     }
   730   }
   731   return true;
   732 }
   734 static bool can_be_deoptimized(vframe* vf) {
   735   return (vf->is_compiled_frame() && vf->fr().can_be_deoptimized());
   736 }
   738 bool VM_GetOrSetLocal::doit_prologue() {
   739   _jvf = get_java_vframe();
   740   NULL_CHECK(_jvf, false);
   742   if (_jvf->method()->is_native()) {
   743     if (getting_receiver() && !_jvf->method()->is_static()) {
   744       return true;
   745     } else {
   746       _result = JVMTI_ERROR_OPAQUE_FRAME;
   747       return false;
   748     }
   749   }
   751   if (!check_slot_type(_jvf)) {
   752     return false;
   753   }
   754   return true;
   755 }
   757 void VM_GetOrSetLocal::doit() {
   758   if (_set) {
   759     // Force deoptimization of frame if compiled because it's
   760     // possible the compiler emitted some locals as constant values,
   761     // meaning they are not mutable.
   762     if (can_be_deoptimized(_jvf)) {
   764       // Schedule deoptimization so that eventually the local
   765       // update will be written to an interpreter frame.
   766       Deoptimization::deoptimize_frame(_jvf->thread(), _jvf->fr().id());
   768       // Now store a new value for the local which will be applied
   769       // once deoptimization occurs. Note however that while this
   770       // write is deferred until deoptimization actually happens
   771       // can vframe created after this point will have its locals
   772       // reflecting this update so as far as anyone can see the
   773       // write has already taken place.
   775       // If we are updating an oop then get the oop from the handle
   776       // since the handle will be long gone by the time the deopt
   777       // happens. The oop stored in the deferred local will be
   778       // gc'd on its own.
   779       if (_type == T_OBJECT) {
   780         _value.l = (jobject) (JNIHandles::resolve_external_guard(_value.l));
   781       }
   782       // Re-read the vframe so we can see that it is deoptimized
   783       // [ Only need because of assert in update_local() ]
   784       _jvf = get_java_vframe();
   785       ((compiledVFrame*)_jvf)->update_local(_type, _index, _value);
   786       return;
   787     }
   788     StackValueCollection *locals = _jvf->locals();
   789     HandleMark hm;
   791     switch (_type) {
   792       case T_INT:    locals->set_int_at   (_index, _value.i); break;
   793       case T_LONG:   locals->set_long_at  (_index, _value.j); break;
   794       case T_FLOAT:  locals->set_float_at (_index, _value.f); break;
   795       case T_DOUBLE: locals->set_double_at(_index, _value.d); break;
   796       case T_OBJECT: {
   797         Handle ob_h(JNIHandles::resolve_external_guard(_value.l));
   798         locals->set_obj_at (_index, ob_h);
   799         break;
   800       }
   801       default: ShouldNotReachHere();
   802     }
   803     _jvf->set_locals(locals);
   804   } else {
   805     if (_jvf->method()->is_native() && _jvf->is_compiled_frame()) {
   806       assert(getting_receiver(), "Can only get here when getting receiver");
   807       oop receiver = _jvf->fr().get_native_receiver();
   808       _value.l = JNIHandles::make_local(_calling_thread, receiver);
   809     } else {
   810       StackValueCollection *locals = _jvf->locals();
   812       if (locals->at(_index)->type() == T_CONFLICT) {
   813         memset(&_value, 0, sizeof(_value));
   814         _value.l = NULL;
   815         return;
   816       }
   818       switch (_type) {
   819         case T_INT:    _value.i = locals->int_at   (_index);   break;
   820         case T_LONG:   _value.j = locals->long_at  (_index);   break;
   821         case T_FLOAT:  _value.f = locals->float_at (_index);   break;
   822         case T_DOUBLE: _value.d = locals->double_at(_index);   break;
   823         case T_OBJECT: {
   824           // Wrap the oop to be returned in a local JNI handle since
   825           // oops_do() no longer applies after doit() is finished.
   826           oop obj = locals->obj_at(_index)();
   827           _value.l = JNIHandles::make_local(_calling_thread, obj);
   828           break;
   829         }
   830         default: ShouldNotReachHere();
   831       }
   832     }
   833   }
   834 }
   837 bool VM_GetOrSetLocal::allow_nested_vm_operations() const {
   838   return true; // May need to deoptimize
   839 }
   842 VM_GetReceiver::VM_GetReceiver(
   843     JavaThread* thread, JavaThread* caller_thread, jint depth)
   844     : VM_GetOrSetLocal(thread, caller_thread, depth, 0) {}
   846 /////////////////////////////////////////////////////////////////////////////////////////
   848 //
   849 // class JvmtiSuspendControl - see comments in jvmtiImpl.hpp
   850 //
   852 bool JvmtiSuspendControl::suspend(JavaThread *java_thread) {
   853   // external suspend should have caught suspending a thread twice
   855   // Immediate suspension required for JPDA back-end so JVMTI agent threads do
   856   // not deadlock due to later suspension on transitions while holding
   857   // raw monitors.  Passing true causes the immediate suspension.
   858   // java_suspend() will catch threads in the process of exiting
   859   // and will ignore them.
   860   java_thread->java_suspend();
   862   // It would be nice to have the following assertion in all the time,
   863   // but it is possible for a racing resume request to have resumed
   864   // this thread right after we suspended it. Temporarily enable this
   865   // assertion if you are chasing a different kind of bug.
   866   //
   867   // assert(java_lang_Thread::thread(java_thread->threadObj()) == NULL ||
   868   //   java_thread->is_being_ext_suspended(), "thread is not suspended");
   870   if (java_lang_Thread::thread(java_thread->threadObj()) == NULL) {
   871     // check again because we can get delayed in java_suspend():
   872     // the thread is in process of exiting.
   873     return false;
   874   }
   876   return true;
   877 }
   879 bool JvmtiSuspendControl::resume(JavaThread *java_thread) {
   880   // external suspend should have caught resuming a thread twice
   881   assert(java_thread->is_being_ext_suspended(), "thread should be suspended");
   883   // resume thread
   884   {
   885     // must always grab Threads_lock, see JVM_SuspendThread
   886     MutexLocker ml(Threads_lock);
   887     java_thread->java_resume();
   888   }
   890   return true;
   891 }
   894 void JvmtiSuspendControl::print() {
   895 #ifndef PRODUCT
   896   MutexLocker mu(Threads_lock);
   897   ResourceMark rm;
   899   tty->print("Suspended Threads: [");
   900   for (JavaThread *thread = Threads::first(); thread != NULL; thread = thread->next()) {
   901 #if JVMTI_TRACE
   902     const char *name   = JvmtiTrace::safe_get_thread_name(thread);
   903 #else
   904     const char *name   = "";
   905 #endif /*JVMTI_TRACE */
   906     tty->print("%s(%c ", name, thread->is_being_ext_suspended() ? 'S' : '_');
   907     if (!thread->has_last_Java_frame()) {
   908       tty->print("no stack");
   909     }
   910     tty->print(") ");
   911   }
   912   tty->print_cr("]");
   913 #endif
   914 }
   916 #ifndef KERNEL
   918 JvmtiDeferredEvent JvmtiDeferredEvent::compiled_method_load_event(
   919     nmethod* nm) {
   920   JvmtiDeferredEvent event = JvmtiDeferredEvent(TYPE_COMPILED_METHOD_LOAD);
   921   event._event_data.compiled_method_load = nm;
   922   nmethodLocker::lock_nmethod(nm); // will be unlocked when posted
   923   return event;
   924 }
   926 JvmtiDeferredEvent JvmtiDeferredEvent::compiled_method_unload_event(
   927     jmethodID id, const void* code) {
   928   JvmtiDeferredEvent event = JvmtiDeferredEvent(TYPE_COMPILED_METHOD_UNLOAD);
   929   event._event_data.compiled_method_unload.method_id = id;
   930   event._event_data.compiled_method_unload.code_begin = code;
   931   return event;
   932 }
   933 JvmtiDeferredEvent JvmtiDeferredEvent::dynamic_code_generated_event(
   934       const char* name, const void* code_begin, const void* code_end) {
   935   JvmtiDeferredEvent event = JvmtiDeferredEvent(TYPE_DYNAMIC_CODE_GENERATED);
   936   event._event_data.dynamic_code_generated.name = name;
   937   event._event_data.dynamic_code_generated.code_begin = code_begin;
   938   event._event_data.dynamic_code_generated.code_end = code_end;
   939   return event;
   940 }
   942 void JvmtiDeferredEvent::post() {
   943   assert(ServiceThread::is_service_thread(Thread::current()),
   944          "Service thread must post enqueued events");
   945   switch(_type) {
   946     case TYPE_COMPILED_METHOD_LOAD: {
   947       nmethod* nm = _event_data.compiled_method_load;
   948       JvmtiExport::post_compiled_method_load(nm);
   949       nmethodLocker::unlock_nmethod(nm);
   950       break;
   951     }
   952     case TYPE_COMPILED_METHOD_UNLOAD:
   953       JvmtiExport::post_compiled_method_unload(
   954         _event_data.compiled_method_unload.method_id,
   955         _event_data.compiled_method_unload.code_begin);
   956       break;
   957     case TYPE_DYNAMIC_CODE_GENERATED:
   958       JvmtiExport::post_dynamic_code_generated_internal(
   959         _event_data.dynamic_code_generated.name,
   960         _event_data.dynamic_code_generated.code_begin,
   961         _event_data.dynamic_code_generated.code_end);
   962       break;
   963     default:
   964       ShouldNotReachHere();
   965   }
   966 }
   968 JvmtiDeferredEventQueue::QueueNode* JvmtiDeferredEventQueue::_queue_tail = NULL;
   969 JvmtiDeferredEventQueue::QueueNode* JvmtiDeferredEventQueue::_queue_head = NULL;
   971 volatile JvmtiDeferredEventQueue::QueueNode*
   972     JvmtiDeferredEventQueue::_pending_list = NULL;
   974 bool JvmtiDeferredEventQueue::has_events() {
   975   assert(Service_lock->owned_by_self(), "Must own Service_lock");
   976   return _queue_head != NULL || _pending_list != NULL;
   977 }
   979 void JvmtiDeferredEventQueue::enqueue(const JvmtiDeferredEvent& event) {
   980   assert(Service_lock->owned_by_self(), "Must own Service_lock");
   982   process_pending_events();
   984   // Events get added to the end of the queue (and are pulled off the front).
   985   QueueNode* node = new QueueNode(event);
   986   if (_queue_tail == NULL) {
   987     _queue_tail = _queue_head = node;
   988   } else {
   989     assert(_queue_tail->next() == NULL, "Must be the last element in the list");
   990     _queue_tail->set_next(node);
   991     _queue_tail = node;
   992   }
   994   Service_lock->notify_all();
   995   assert((_queue_head == NULL) == (_queue_tail == NULL),
   996          "Inconsistent queue markers");
   997 }
   999 JvmtiDeferredEvent JvmtiDeferredEventQueue::dequeue() {
  1000   assert(Service_lock->owned_by_self(), "Must own Service_lock");
  1002   process_pending_events();
  1004   assert(_queue_head != NULL, "Nothing to dequeue");
  1006   if (_queue_head == NULL) {
  1007     // Just in case this happens in product; it shouldn't but let's not crash
  1008     return JvmtiDeferredEvent();
  1011   QueueNode* node = _queue_head;
  1012   _queue_head = _queue_head->next();
  1013   if (_queue_head == NULL) {
  1014     _queue_tail = NULL;
  1017   assert((_queue_head == NULL) == (_queue_tail == NULL),
  1018          "Inconsistent queue markers");
  1020   JvmtiDeferredEvent event = node->event();
  1021   delete node;
  1022   return event;
  1025 void JvmtiDeferredEventQueue::add_pending_event(
  1026     const JvmtiDeferredEvent& event) {
  1028   QueueNode* node = new QueueNode(event);
  1030   bool success = false;
  1031   QueueNode* prev_value = (QueueNode*)_pending_list;
  1032   do {
  1033     node->set_next(prev_value);
  1034     prev_value = (QueueNode*)Atomic::cmpxchg_ptr(
  1035         (void*)node, (volatile void*)&_pending_list, (void*)node->next());
  1036   } while (prev_value != node->next());
  1039 // This method transfers any events that were added by someone NOT holding
  1040 // the lock into the mainline queue.
  1041 void JvmtiDeferredEventQueue::process_pending_events() {
  1042   assert(Service_lock->owned_by_self(), "Must own Service_lock");
  1044   if (_pending_list != NULL) {
  1045     QueueNode* head =
  1046         (QueueNode*)Atomic::xchg_ptr(NULL, (volatile void*)&_pending_list);
  1048     assert((_queue_head == NULL) == (_queue_tail == NULL),
  1049            "Inconsistent queue markers");
  1051     if (head != NULL) {
  1052       // Since we've treated the pending list as a stack (with newer
  1053       // events at the beginning), we need to join the bottom of the stack
  1054       // with the 'tail' of the queue in order to get the events in the
  1055       // right order.  We do this by reversing the pending list and appending
  1056       // it to the queue.
  1058       QueueNode* new_tail = head;
  1059       QueueNode* new_head = NULL;
  1061       // This reverses the list
  1062       QueueNode* prev = new_tail;
  1063       QueueNode* node = new_tail->next();
  1064       new_tail->set_next(NULL);
  1065       while (node != NULL) {
  1066         QueueNode* next = node->next();
  1067         node->set_next(prev);
  1068         prev = node;
  1069         node = next;
  1071       new_head = prev;
  1073       // Now append the new list to the queue
  1074       if (_queue_tail != NULL) {
  1075         _queue_tail->set_next(new_head);
  1076       } else { // _queue_head == NULL
  1077         _queue_head = new_head;
  1079       _queue_tail = new_tail;
  1084 #endif // ndef KERNEL

mercurial