src/share/vm/code/nmethod.cpp

changeset 2642
fc5ebbb2d1a8
parent 2635
1c0cf339481b
parent 2625
65f880e2869b
child 2657
d673ef06fe96
     1.1 --- a/src/share/vm/code/nmethod.cpp	Thu Mar 17 18:29:18 2011 -0700
     1.2 +++ b/src/share/vm/code/nmethod.cpp	Fri Mar 18 01:44:15 2011 -0700
     1.3 @@ -170,7 +170,7 @@
     1.4    int pc_desc_resets;   // number of resets (= number of caches)
     1.5    int pc_desc_queries;  // queries to nmethod::find_pc_desc
     1.6    int pc_desc_approx;   // number of those which have approximate true
     1.7 -  int pc_desc_repeats;  // number of _last_pc_desc hits
     1.8 +  int pc_desc_repeats;  // number of _pc_descs[0] hits
     1.9    int pc_desc_hits;     // number of LRU cache hits
    1.10    int pc_desc_tests;    // total number of PcDesc examinations
    1.11    int pc_desc_searches; // total number of quasi-binary search steps
    1.12 @@ -278,40 +278,44 @@
    1.13  
    1.14  void PcDescCache::reset_to(PcDesc* initial_pc_desc) {
    1.15    if (initial_pc_desc == NULL) {
    1.16 -    _last_pc_desc = NULL;  // native method
    1.17 +    _pc_descs[0] = NULL; // native method; no PcDescs at all
    1.18      return;
    1.19    }
    1.20    NOT_PRODUCT(++nmethod_stats.pc_desc_resets);
    1.21    // reset the cache by filling it with benign (non-null) values
    1.22    assert(initial_pc_desc->pc_offset() < 0, "must be sentinel");
    1.23 -  _last_pc_desc = initial_pc_desc + 1;  // first valid one is after sentinel
    1.24    for (int i = 0; i < cache_size; i++)
    1.25      _pc_descs[i] = initial_pc_desc;
    1.26  }
    1.27  
    1.28  PcDesc* PcDescCache::find_pc_desc(int pc_offset, bool approximate) {
    1.29    NOT_PRODUCT(++nmethod_stats.pc_desc_queries);
    1.30 -  NOT_PRODUCT(if (approximate)  ++nmethod_stats.pc_desc_approx);
    1.31 +  NOT_PRODUCT(if (approximate) ++nmethod_stats.pc_desc_approx);
    1.32 +
    1.33 +  // Note: one might think that caching the most recently
    1.34 +  // read value separately would be a win, but one would be
    1.35 +  // wrong.  When many threads are updating it, the cache
    1.36 +  // line it's in would bounce between caches, negating
    1.37 +  // any benefit.
    1.38  
    1.39    // In order to prevent race conditions do not load cache elements
    1.40    // repeatedly, but use a local copy:
    1.41    PcDesc* res;
    1.42  
    1.43 -  // Step one:  Check the most recently returned value.
    1.44 -  res = _last_pc_desc;
    1.45 -  if (res == NULL)  return NULL;  // native method; no PcDescs at all
    1.46 +  // Step one:  Check the most recently added value.
    1.47 +  res = _pc_descs[0];
    1.48 +  if (res == NULL) return NULL;  // native method; no PcDescs at all
    1.49    if (match_desc(res, pc_offset, approximate)) {
    1.50      NOT_PRODUCT(++nmethod_stats.pc_desc_repeats);
    1.51      return res;
    1.52    }
    1.53  
    1.54 -  // Step two:  Check the LRU cache.
    1.55 -  for (int i = 0; i < cache_size; i++) {
    1.56 +  // Step two:  Check the rest of the LRU cache.
    1.57 +  for (int i = 1; i < cache_size; ++i) {
    1.58      res = _pc_descs[i];
    1.59 -    if (res->pc_offset() < 0)  break;  // optimization: skip empty cache
    1.60 +    if (res->pc_offset() < 0) break;  // optimization: skip empty cache
    1.61      if (match_desc(res, pc_offset, approximate)) {
    1.62        NOT_PRODUCT(++nmethod_stats.pc_desc_hits);
    1.63 -      _last_pc_desc = res;  // record this cache hit in case of repeat
    1.64        return res;
    1.65      }
    1.66    }
    1.67 @@ -322,24 +326,23 @@
    1.68  
    1.69  void PcDescCache::add_pc_desc(PcDesc* pc_desc) {
    1.70    NOT_PRODUCT(++nmethod_stats.pc_desc_adds);
    1.71 -  // Update the LRU cache by shifting pc_desc forward:
    1.72 +  // Update the LRU cache by shifting pc_desc forward.
    1.73    for (int i = 0; i < cache_size; i++)  {
    1.74      PcDesc* next = _pc_descs[i];
    1.75      _pc_descs[i] = pc_desc;
    1.76      pc_desc = next;
    1.77    }
    1.78 -  // Note:  Do not update _last_pc_desc.  It fronts for the LRU cache.
    1.79  }
    1.80  
    1.81  // adjust pcs_size so that it is a multiple of both oopSize and
    1.82  // sizeof(PcDesc) (assumes that if sizeof(PcDesc) is not a multiple
    1.83  // of oopSize, then 2*sizeof(PcDesc) is)
    1.84 -static int  adjust_pcs_size(int pcs_size) {
    1.85 +static int adjust_pcs_size(int pcs_size) {
    1.86    int nsize = round_to(pcs_size,   oopSize);
    1.87    if ((nsize % sizeof(PcDesc)) != 0) {
    1.88      nsize = pcs_size + sizeof(PcDesc);
    1.89    }
    1.90 -  assert((nsize %  oopSize) == 0, "correct alignment");
    1.91 +  assert((nsize % oopSize) == 0, "correct alignment");
    1.92    return nsize;
    1.93  }
    1.94  
    1.95 @@ -1180,14 +1183,17 @@
    1.96    set_stack_traversal_mark(NMethodSweeper::traversal_count());
    1.97  }
    1.98  
    1.99 -// Tell if a non-entrant method can be converted to a zombie (i.e., there is no activations on the stack)
   1.100 +// Tell if a non-entrant method can be converted to a zombie (i.e.,
   1.101 +// there are no activations on the stack, not in use by the VM,
   1.102 +// and not in use by the ServiceThread)
   1.103  bool nmethod::can_not_entrant_be_converted() {
   1.104    assert(is_not_entrant(), "must be a non-entrant method");
   1.105  
   1.106    // Since the nmethod sweeper only does partial sweep the sweeper's traversal
   1.107    // count can be greater than the stack traversal count before it hits the
   1.108    // nmethod for the second time.
   1.109 -  return stack_traversal_mark()+1 < NMethodSweeper::traversal_count();
   1.110 +  return stack_traversal_mark()+1 < NMethodSweeper::traversal_count() &&
   1.111 +         !is_locked_by_vm();
   1.112  }
   1.113  
   1.114  void nmethod::inc_decompile_count() {
   1.115 @@ -1294,6 +1300,7 @@
   1.116  // Common functionality for both make_not_entrant and make_zombie
   1.117  bool nmethod::make_not_entrant_or_zombie(unsigned int state) {
   1.118    assert(state == zombie || state == not_entrant, "must be zombie or not_entrant");
   1.119 +  assert(!is_zombie(), "should not already be a zombie");
   1.120  
   1.121    // Make sure neither the nmethod nor the method is flushed in case of a safepoint in code below.
   1.122    nmethodLocker nml(this);
   1.123 @@ -1301,11 +1308,6 @@
   1.124    No_Safepoint_Verifier nsv;
   1.125  
   1.126    {
   1.127 -    // If the method is already zombie there is nothing to do
   1.128 -    if (is_zombie()) {
   1.129 -      return false;
   1.130 -    }
   1.131 -
   1.132      // invalidate osr nmethod before acquiring the patching lock since
   1.133      // they both acquire leaf locks and we don't want a deadlock.
   1.134      // This logic is equivalent to the logic below for patching the
   1.135 @@ -1375,13 +1377,12 @@
   1.136        flush_dependencies(NULL);
   1.137      }
   1.138  
   1.139 -    {
   1.140 -      // zombie only - if a JVMTI agent has enabled the CompiledMethodUnload event
   1.141 -      // and it hasn't already been reported for this nmethod then report it now.
   1.142 -      // (the event may have been reported earilier if the GC marked it for unloading).
   1.143 -      Pause_No_Safepoint_Verifier pnsv(&nsv);
   1.144 -      post_compiled_method_unload();
   1.145 -    }
   1.146 +    // zombie only - if a JVMTI agent has enabled the CompiledMethodUnload
   1.147 +    // event and it hasn't already been reported for this nmethod then
   1.148 +    // report it now. The event may have been reported earilier if the GC
   1.149 +    // marked it for unloading). JvmtiDeferredEventQueue support means
   1.150 +    // we no longer go to a safepoint here.
   1.151 +    post_compiled_method_unload();
   1.152  
   1.153  #ifdef ASSERT
   1.154      // It's no longer safe to access the oops section since zombie
   1.155 @@ -1566,7 +1567,7 @@
   1.156    if (_jmethod_id != NULL && JvmtiExport::should_post_compiled_method_unload()) {
   1.157      assert(!unload_reported(), "already unloaded");
   1.158      JvmtiDeferredEvent event =
   1.159 -      JvmtiDeferredEvent::compiled_method_unload_event(
   1.160 +      JvmtiDeferredEvent::compiled_method_unload_event(this,
   1.161            _jmethod_id, insts_begin());
   1.162      if (SafepointSynchronize::is_at_safepoint()) {
   1.163        // Don't want to take the queueing lock. Add it as pending and
   1.164 @@ -2171,10 +2172,12 @@
   1.165    lock_nmethod(_nm);
   1.166  }
   1.167  
   1.168 -void nmethodLocker::lock_nmethod(nmethod* nm) {
   1.169 +// Only JvmtiDeferredEvent::compiled_method_unload_event()
   1.170 +// should pass zombie_ok == true.
   1.171 +void nmethodLocker::lock_nmethod(nmethod* nm, bool zombie_ok) {
   1.172    if (nm == NULL)  return;
   1.173    Atomic::inc(&nm->_lock_count);
   1.174 -  guarantee(!nm->is_zombie(), "cannot lock a zombie method");
   1.175 +  guarantee(zombie_ok || !nm->is_zombie(), "cannot lock a zombie method");
   1.176  }
   1.177  
   1.178  void nmethodLocker::unlock_nmethod(nmethod* nm) {

mercurial