7024970: 2/3 assert(ServiceThread::is_service_thread(Thread::current())) failed: Service thread must post enqueue

Tue, 15 Mar 2011 06:37:31 -0700

author
dcubed
date
Tue, 15 Mar 2011 06:37:31 -0700
changeset 2624
46a56fac55c7
parent 2623
216d916d5c12
child 2625
65f880e2869b

7024970: 2/3 assert(ServiceThread::is_service_thread(Thread::current())) failed: Service thread must post enqueue
Summary: Change nmethod_lock() to also prevent zombification of the nmethod. CompiledMethodUnload events also need to lock the nmethod. Clean ups in nmethod::make_not_entrant_or_zombie()
Reviewed-by: dholmes, kamg, never, dsamersoff, ysr, coleenp, acorn

src/share/vm/code/nmethod.cpp file | annotate | diff | comparison | revisions
src/share/vm/code/nmethod.hpp file | annotate | diff | comparison | revisions
src/share/vm/prims/jvmtiImpl.cpp file | annotate | diff | comparison | revisions
src/share/vm/prims/jvmtiImpl.hpp file | annotate | diff | comparison | revisions
     1.1 --- a/src/share/vm/code/nmethod.cpp	Tue Mar 15 06:35:10 2011 -0700
     1.2 +++ b/src/share/vm/code/nmethod.cpp	Tue Mar 15 06:37:31 2011 -0700
     1.3 @@ -1180,14 +1180,17 @@
     1.4    set_stack_traversal_mark(NMethodSweeper::traversal_count());
     1.5  }
     1.6  
     1.7 -// Tell if a non-entrant method can be converted to a zombie (i.e., there is no activations on the stack)
     1.8 +// Tell if a non-entrant method can be converted to a zombie (i.e.,
     1.9 +// there are no activations on the stack, not in use by the VM,
    1.10 +// and not in use by the ServiceThread)
    1.11  bool nmethod::can_not_entrant_be_converted() {
    1.12    assert(is_not_entrant(), "must be a non-entrant method");
    1.13  
    1.14    // Since the nmethod sweeper only does partial sweep the sweeper's traversal
    1.15    // count can be greater than the stack traversal count before it hits the
    1.16    // nmethod for the second time.
    1.17 -  return stack_traversal_mark()+1 < NMethodSweeper::traversal_count();
    1.18 +  return stack_traversal_mark()+1 < NMethodSweeper::traversal_count() &&
    1.19 +         !is_locked_by_vm();
    1.20  }
    1.21  
    1.22  void nmethod::inc_decompile_count() {
    1.23 @@ -1294,6 +1297,7 @@
    1.24  // Common functionality for both make_not_entrant and make_zombie
    1.25  bool nmethod::make_not_entrant_or_zombie(unsigned int state) {
    1.26    assert(state == zombie || state == not_entrant, "must be zombie or not_entrant");
    1.27 +  assert(!is_zombie(), "should not already be a zombie");
    1.28  
    1.29    // Make sure neither the nmethod nor the method is flushed in case of a safepoint in code below.
    1.30    nmethodLocker nml(this);
    1.31 @@ -1301,11 +1305,6 @@
    1.32    No_Safepoint_Verifier nsv;
    1.33  
    1.34    {
    1.35 -    // If the method is already zombie there is nothing to do
    1.36 -    if (is_zombie()) {
    1.37 -      return false;
    1.38 -    }
    1.39 -
    1.40      // invalidate osr nmethod before acquiring the patching lock since
    1.41      // they both acquire leaf locks and we don't want a deadlock.
    1.42      // This logic is equivalent to the logic below for patching the
    1.43 @@ -1375,13 +1374,12 @@
    1.44        flush_dependencies(NULL);
    1.45      }
    1.46  
    1.47 -    {
    1.48 -      // zombie only - if a JVMTI agent has enabled the CompiledMethodUnload event
    1.49 -      // and it hasn't already been reported for this nmethod then report it now.
    1.50 -      // (the event may have been reported earilier if the GC marked it for unloading).
    1.51 -      Pause_No_Safepoint_Verifier pnsv(&nsv);
    1.52 -      post_compiled_method_unload();
    1.53 -    }
    1.54 +    // zombie only - if a JVMTI agent has enabled the CompiledMethodUnload
    1.55 +    // event and it hasn't already been reported for this nmethod then
    1.56 +    // report it now. The event may have been reported earilier if the GC
    1.57 +    // marked it for unloading). JvmtiDeferredEventQueue support means
    1.58 +    // we no longer go to a safepoint here.
    1.59 +    post_compiled_method_unload();
    1.60  
    1.61  #ifdef ASSERT
    1.62      // It's no longer safe to access the oops section since zombie
    1.63 @@ -1566,7 +1564,7 @@
    1.64    if (_jmethod_id != NULL && JvmtiExport::should_post_compiled_method_unload()) {
    1.65      assert(!unload_reported(), "already unloaded");
    1.66      JvmtiDeferredEvent event =
    1.67 -      JvmtiDeferredEvent::compiled_method_unload_event(
    1.68 +      JvmtiDeferredEvent::compiled_method_unload_event(this,
    1.69            _jmethod_id, insts_begin());
    1.70      if (SafepointSynchronize::is_at_safepoint()) {
    1.71        // Don't want to take the queueing lock. Add it as pending and
    1.72 @@ -2171,10 +2169,12 @@
    1.73    lock_nmethod(_nm);
    1.74  }
    1.75  
    1.76 -void nmethodLocker::lock_nmethod(nmethod* nm) {
    1.77 +// Only JvmtiDeferredEvent::compiled_method_unload_event()
    1.78 +// should pass zombie_ok == true.
    1.79 +void nmethodLocker::lock_nmethod(nmethod* nm, bool zombie_ok) {
    1.80    if (nm == NULL)  return;
    1.81    Atomic::inc(&nm->_lock_count);
    1.82 -  guarantee(!nm->is_zombie(), "cannot lock a zombie method");
    1.83 +  guarantee(zombie_ok || !nm->is_zombie(), "cannot lock a zombie method");
    1.84  }
    1.85  
    1.86  void nmethodLocker::unlock_nmethod(nmethod* nm) {
     2.1 --- a/src/share/vm/code/nmethod.hpp	Tue Mar 15 06:35:10 2011 -0700
     2.2 +++ b/src/share/vm/code/nmethod.hpp	Tue Mar 15 06:37:31 2011 -0700
     2.3 @@ -194,7 +194,10 @@
     2.4  
     2.5    NOT_PRODUCT(bool _has_debug_info; )
     2.6  
     2.7 -  // Nmethod Flushing lock (if non-zero, then the nmethod is not removed)
     2.8 +  // Nmethod Flushing lock. If non-zero, then the nmethod is not removed
     2.9 +  // and is not made into a zombie. However, once the nmethod is made into
    2.10 +  // a zombie, it will be locked one final time if CompiledMethodUnload
    2.11 +  // event processing needs to be done.
    2.12    jint  _lock_count;
    2.13  
    2.14    // not_entrant method removal. Each mark_sweep pass will update
    2.15 @@ -522,8 +525,9 @@
    2.16    void flush();
    2.17  
    2.18   public:
    2.19 -  // If returning true, it is unsafe to remove this nmethod even though it is a zombie
    2.20 -  // nmethod, since the VM might have a reference to it. Should only be called from a  safepoint.
    2.21 +  // When true is returned, it is unsafe to remove this nmethod even if
    2.22 +  // it is a zombie, since the VM or the ServiceThread might still be
    2.23 +  // using it.
    2.24    bool is_locked_by_vm() const                    { return _lock_count >0; }
    2.25  
    2.26    // See comment at definition of _last_seen_on_stack
    2.27 @@ -689,13 +693,20 @@
    2.28  
    2.29  };
    2.30  
    2.31 -// Locks an nmethod so its code will not get removed, even if it is a zombie/not_entrant method
    2.32 +// Locks an nmethod so its code will not get removed and it will not
    2.33 +// be made into a zombie, even if it is a not_entrant method. After the
    2.34 +// nmethod becomes a zombie, if CompiledMethodUnload event processing
    2.35 +// needs to be done, then lock_nmethod() is used directly to keep the
    2.36 +// generated code from being reused too early.
    2.37  class nmethodLocker : public StackObj {
    2.38    nmethod* _nm;
    2.39  
    2.40   public:
    2.41  
    2.42 -  static void lock_nmethod(nmethod* nm);   // note: nm can be NULL
    2.43 +  // note: nm can be NULL
    2.44 +  // Only JvmtiDeferredEvent::compiled_method_unload_event()
    2.45 +  // should pass zombie_ok == true.
    2.46 +  static void lock_nmethod(nmethod* nm, bool zombie_ok = false);
    2.47    static void unlock_nmethod(nmethod* nm); // (ditto)
    2.48  
    2.49    nmethodLocker(address pc); // derive nm from pc
     3.1 --- a/src/share/vm/prims/jvmtiImpl.cpp	Tue Mar 15 06:35:10 2011 -0700
     3.2 +++ b/src/share/vm/prims/jvmtiImpl.cpp	Tue Mar 15 06:37:31 2011 -0700
     3.3 @@ -919,15 +919,24 @@
     3.4      nmethod* nm) {
     3.5    JvmtiDeferredEvent event = JvmtiDeferredEvent(TYPE_COMPILED_METHOD_LOAD);
     3.6    event._event_data.compiled_method_load = nm;
     3.7 -  nmethodLocker::lock_nmethod(nm); // will be unlocked when posted
     3.8 +  // Keep the nmethod alive until the ServiceThread can process
     3.9 +  // this deferred event.
    3.10 +  nmethodLocker::lock_nmethod(nm);
    3.11    return event;
    3.12  }
    3.13  
    3.14  JvmtiDeferredEvent JvmtiDeferredEvent::compiled_method_unload_event(
    3.15 -    jmethodID id, const void* code) {
    3.16 +    nmethod* nm, jmethodID id, const void* code) {
    3.17    JvmtiDeferredEvent event = JvmtiDeferredEvent(TYPE_COMPILED_METHOD_UNLOAD);
    3.18 +  event._event_data.compiled_method_unload.nm = nm;
    3.19    event._event_data.compiled_method_unload.method_id = id;
    3.20    event._event_data.compiled_method_unload.code_begin = code;
    3.21 +  // Keep the nmethod alive until the ServiceThread can process
    3.22 +  // this deferred event. This will keep the memory for the
    3.23 +  // generated code from being reused too early. We pass
    3.24 +  // zombie_ok == true here so that our nmethod that was just
    3.25 +  // made into a zombie can be locked.
    3.26 +  nmethodLocker::lock_nmethod(nm, true /* zombie_ok */);
    3.27    return event;
    3.28  }
    3.29  JvmtiDeferredEvent JvmtiDeferredEvent::dynamic_code_generated_event(
    3.30 @@ -946,14 +955,19 @@
    3.31      case TYPE_COMPILED_METHOD_LOAD: {
    3.32        nmethod* nm = _event_data.compiled_method_load;
    3.33        JvmtiExport::post_compiled_method_load(nm);
    3.34 +      // done with the deferred event so unlock the nmethod
    3.35        nmethodLocker::unlock_nmethod(nm);
    3.36        break;
    3.37      }
    3.38 -    case TYPE_COMPILED_METHOD_UNLOAD:
    3.39 +    case TYPE_COMPILED_METHOD_UNLOAD: {
    3.40 +      nmethod* nm = _event_data.compiled_method_unload.nm;
    3.41        JvmtiExport::post_compiled_method_unload(
    3.42          _event_data.compiled_method_unload.method_id,
    3.43          _event_data.compiled_method_unload.code_begin);
    3.44 +      // done with the deferred event so unlock the nmethod
    3.45 +      nmethodLocker::unlock_nmethod(nm);
    3.46        break;
    3.47 +    }
    3.48      case TYPE_DYNAMIC_CODE_GENERATED:
    3.49        JvmtiExport::post_dynamic_code_generated_internal(
    3.50          _event_data.dynamic_code_generated.name,
     4.1 --- a/src/share/vm/prims/jvmtiImpl.hpp	Tue Mar 15 06:35:10 2011 -0700
     4.2 +++ b/src/share/vm/prims/jvmtiImpl.hpp	Tue Mar 15 06:37:31 2011 -0700
     4.3 @@ -458,6 +458,7 @@
     4.4    union {
     4.5      nmethod* compiled_method_load;
     4.6      struct {
     4.7 +      nmethod* nm;
     4.8        jmethodID method_id;
     4.9        const void* code_begin;
    4.10      } compiled_method_unload;
    4.11 @@ -477,7 +478,7 @@
    4.12    // Factory methods
    4.13    static JvmtiDeferredEvent compiled_method_load_event(nmethod* nm)
    4.14      KERNEL_RETURN_(JvmtiDeferredEvent());
    4.15 -  static JvmtiDeferredEvent compiled_method_unload_event(
    4.16 +  static JvmtiDeferredEvent compiled_method_unload_event(nmethod* nm,
    4.17        jmethodID id, const void* code) KERNEL_RETURN_(JvmtiDeferredEvent());
    4.18    static JvmtiDeferredEvent dynamic_code_generated_event(
    4.19        const char* name, const void* begin, const void* end)

mercurial