src/share/vm/prims/jvmtiImpl.cpp

changeset 0
f90c822e73f8
child 6876
710a3c8b516e
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/src/share/vm/prims/jvmtiImpl.cpp	Wed Apr 27 01:25:04 2016 +0800
     1.3 @@ -0,0 +1,1095 @@
     1.4 +/*
     1.5 + * Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved.
     1.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     1.7 + *
     1.8 + * This code is free software; you can redistribute it and/or modify it
     1.9 + * under the terms of the GNU General Public License version 2 only, as
    1.10 + * published by the Free Software Foundation.
    1.11 + *
    1.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
    1.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    1.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    1.15 + * version 2 for more details (a copy is included in the LICENSE file that
    1.16 + * accompanied this code).
    1.17 + *
    1.18 + * You should have received a copy of the GNU General Public License version
    1.19 + * 2 along with this work; if not, write to the Free Software Foundation,
    1.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    1.21 + *
    1.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    1.23 + * or visit www.oracle.com if you need additional information or have any
    1.24 + * questions.
    1.25 + *
    1.26 + */
    1.27 +
    1.28 +#include "precompiled.hpp"
    1.29 +#include "classfile/systemDictionary.hpp"
    1.30 +#include "interpreter/interpreter.hpp"
    1.31 +#include "jvmtifiles/jvmtiEnv.hpp"
    1.32 +#include "memory/resourceArea.hpp"
    1.33 +#include "oops/instanceKlass.hpp"
    1.34 +#include "prims/jvmtiAgentThread.hpp"
    1.35 +#include "prims/jvmtiEventController.inline.hpp"
    1.36 +#include "prims/jvmtiImpl.hpp"
    1.37 +#include "prims/jvmtiRedefineClasses.hpp"
    1.38 +#include "runtime/atomic.hpp"
    1.39 +#include "runtime/deoptimization.hpp"
    1.40 +#include "runtime/handles.hpp"
    1.41 +#include "runtime/handles.inline.hpp"
    1.42 +#include "runtime/interfaceSupport.hpp"
    1.43 +#include "runtime/javaCalls.hpp"
    1.44 +#include "runtime/os.hpp"
    1.45 +#include "runtime/serviceThread.hpp"
    1.46 +#include "runtime/signature.hpp"
    1.47 +#include "runtime/thread.inline.hpp"
    1.48 +#include "runtime/vframe.hpp"
    1.49 +#include "runtime/vframe_hp.hpp"
    1.50 +#include "runtime/vm_operations.hpp"
    1.51 +#include "utilities/exceptions.hpp"
    1.52 +
    1.53 +//
    1.54 +// class JvmtiAgentThread
    1.55 +//
    1.56 +// JavaThread used to wrap a thread started by an agent
    1.57 +// using the JVMTI method RunAgentThread.
    1.58 +//
    1.59 +
    1.60 +JvmtiAgentThread::JvmtiAgentThread(JvmtiEnv* env, jvmtiStartFunction start_fn, const void *start_arg)
    1.61 +    : JavaThread(start_function_wrapper) {
    1.62 +    _env = env;
    1.63 +    _start_fn = start_fn;
    1.64 +    _start_arg = start_arg;
    1.65 +}
    1.66 +
    1.67 +void
    1.68 +JvmtiAgentThread::start_function_wrapper(JavaThread *thread, TRAPS) {
    1.69 +    // It is expected that any Agent threads will be created as
    1.70 +    // Java Threads.  If this is the case, notification of the creation
    1.71 +    // of the thread is given in JavaThread::thread_main().
    1.72 +    assert(thread->is_Java_thread(), "debugger thread should be a Java Thread");
    1.73 +    assert(thread == JavaThread::current(), "sanity check");
    1.74 +
    1.75 +    JvmtiAgentThread *dthread = (JvmtiAgentThread *)thread;
    1.76 +    dthread->call_start_function();
    1.77 +}
    1.78 +
    1.79 +void
    1.80 +JvmtiAgentThread::call_start_function() {
    1.81 +    ThreadToNativeFromVM transition(this);
    1.82 +    _start_fn(_env->jvmti_external(), jni_environment(), (void*)_start_arg);
    1.83 +}
    1.84 +
    1.85 +
    1.86 +//
    1.87 +// class GrowableCache - private methods
    1.88 +//
    1.89 +
    1.90 +void GrowableCache::recache() {
    1.91 +  int len = _elements->length();
    1.92 +
    1.93 +  FREE_C_HEAP_ARRAY(address, _cache, mtInternal);
    1.94 +  _cache = NEW_C_HEAP_ARRAY(address,len+1, mtInternal);
    1.95 +
    1.96 +  for (int i=0; i<len; i++) {
    1.97 +    _cache[i] = _elements->at(i)->getCacheValue();
    1.98 +    //
    1.99 +    // The cache entry has gone bad. Without a valid frame pointer
   1.100 +    // value, the entry is useless so we simply delete it in product
   1.101 +    // mode. The call to remove() will rebuild the cache again
   1.102 +    // without the bad entry.
   1.103 +    //
   1.104 +    if (_cache[i] == NULL) {
   1.105 +      assert(false, "cannot recache NULL elements");
   1.106 +      remove(i);
   1.107 +      return;
   1.108 +    }
   1.109 +  }
   1.110 +  _cache[len] = NULL;
   1.111 +
   1.112 +  _listener_fun(_this_obj,_cache);
   1.113 +}
   1.114 +
   1.115 +bool GrowableCache::equals(void* v, GrowableElement *e2) {
   1.116 +  GrowableElement *e1 = (GrowableElement *) v;
   1.117 +  assert(e1 != NULL, "e1 != NULL");
   1.118 +  assert(e2 != NULL, "e2 != NULL");
   1.119 +
   1.120 +  return e1->equals(e2);
   1.121 +}
   1.122 +
   1.123 +//
   1.124 +// class GrowableCache - public methods
   1.125 +//
   1.126 +
   1.127 +GrowableCache::GrowableCache() {
   1.128 +  _this_obj       = NULL;
   1.129 +  _listener_fun   = NULL;
   1.130 +  _elements       = NULL;
   1.131 +  _cache          = NULL;
   1.132 +}
   1.133 +
   1.134 +GrowableCache::~GrowableCache() {
   1.135 +  clear();
   1.136 +  delete _elements;
   1.137 +  FREE_C_HEAP_ARRAY(address, _cache, mtInternal);
   1.138 +}
   1.139 +
   1.140 +void GrowableCache::initialize(void *this_obj, void listener_fun(void *, address*) ) {
   1.141 +  _this_obj       = this_obj;
   1.142 +  _listener_fun   = listener_fun;
   1.143 +  _elements       = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<GrowableElement*>(5,true);
   1.144 +  recache();
   1.145 +}
   1.146 +
   1.147 +// number of elements in the collection
   1.148 +int GrowableCache::length() {
   1.149 +  return _elements->length();
   1.150 +}
   1.151 +
   1.152 +// get the value of the index element in the collection
   1.153 +GrowableElement* GrowableCache::at(int index) {
   1.154 +  GrowableElement *e = (GrowableElement *) _elements->at(index);
   1.155 +  assert(e != NULL, "e != NULL");
   1.156 +  return e;
   1.157 +}
   1.158 +
   1.159 +int GrowableCache::find(GrowableElement* e) {
   1.160 +  return _elements->find(e, GrowableCache::equals);
   1.161 +}
   1.162 +
   1.163 +// append a copy of the element to the end of the collection
   1.164 +void GrowableCache::append(GrowableElement* e) {
   1.165 +  GrowableElement *new_e = e->clone();
   1.166 +  _elements->append(new_e);
   1.167 +  recache();
   1.168 +}
   1.169 +
   1.170 +// insert a copy of the element using lessthan()
   1.171 +void GrowableCache::insert(GrowableElement* e) {
   1.172 +  GrowableElement *new_e = e->clone();
   1.173 +  _elements->append(new_e);
   1.174 +
   1.175 +  int n = length()-2;
   1.176 +  for (int i=n; i>=0; i--) {
   1.177 +    GrowableElement *e1 = _elements->at(i);
   1.178 +    GrowableElement *e2 = _elements->at(i+1);
   1.179 +    if (e2->lessThan(e1)) {
   1.180 +      _elements->at_put(i+1, e1);
   1.181 +      _elements->at_put(i,   e2);
   1.182 +    }
   1.183 +  }
   1.184 +
   1.185 +  recache();
   1.186 +}
   1.187 +
   1.188 +// remove the element at index
   1.189 +void GrowableCache::remove (int index) {
   1.190 +  GrowableElement *e = _elements->at(index);
   1.191 +  assert(e != NULL, "e != NULL");
   1.192 +  _elements->remove(e);
   1.193 +  delete e;
   1.194 +  recache();
   1.195 +}
   1.196 +
   1.197 +// clear out all elements, release all heap space and
   1.198 +// let our listener know that things have changed.
   1.199 +void GrowableCache::clear() {
   1.200 +  int len = _elements->length();
   1.201 +  for (int i=0; i<len; i++) {
   1.202 +    delete _elements->at(i);
   1.203 +  }
   1.204 +  _elements->clear();
   1.205 +  recache();
   1.206 +}
   1.207 +
   1.208 +void GrowableCache::oops_do(OopClosure* f) {
   1.209 +  int len = _elements->length();
   1.210 +  for (int i=0; i<len; i++) {
   1.211 +    GrowableElement *e = _elements->at(i);
   1.212 +    e->oops_do(f);
   1.213 +  }
   1.214 +}
   1.215 +
   1.216 +void GrowableCache::metadata_do(void f(Metadata*)) {
   1.217 +  int len = _elements->length();
   1.218 +  for (int i=0; i<len; i++) {
   1.219 +    GrowableElement *e = _elements->at(i);
   1.220 +    e->metadata_do(f);
   1.221 +  }
   1.222 +}
   1.223 +
   1.224 +void GrowableCache::gc_epilogue() {
   1.225 +  int len = _elements->length();
   1.226 +  for (int i=0; i<len; i++) {
   1.227 +    _cache[i] = _elements->at(i)->getCacheValue();
   1.228 +  }
   1.229 +}
   1.230 +
   1.231 +//
   1.232 +// class JvmtiBreakpoint
   1.233 +//
   1.234 +
   1.235 +JvmtiBreakpoint::JvmtiBreakpoint() {
   1.236 +  _method = NULL;
   1.237 +  _bci    = 0;
   1.238 +  _class_holder = NULL;
   1.239 +}
   1.240 +
   1.241 +JvmtiBreakpoint::JvmtiBreakpoint(Method* m_method, jlocation location) {
   1.242 +  _method        = m_method;
   1.243 +  _class_holder  = _method->method_holder()->klass_holder();
   1.244 +#ifdef CHECK_UNHANDLED_OOPS
   1.245 +  // _class_holder can't be wrapped in a Handle, because JvmtiBreakpoints are
   1.246 +  // sometimes allocated on the heap.
   1.247 +  //
   1.248 +  // The code handling JvmtiBreakpoints allocated on the stack can't be
   1.249 +  // interrupted by a GC until _class_holder is reachable by the GC via the
   1.250 +  // oops_do method.
   1.251 +  Thread::current()->allow_unhandled_oop(&_class_holder);
   1.252 +#endif // CHECK_UNHANDLED_OOPS
   1.253 +  assert(_method != NULL, "_method != NULL");
   1.254 +  _bci           = (int) location;
   1.255 +  assert(_bci >= 0, "_bci >= 0");
   1.256 +}
   1.257 +
   1.258 +void JvmtiBreakpoint::copy(JvmtiBreakpoint& bp) {
   1.259 +  _method   = bp._method;
   1.260 +  _bci      = bp._bci;
   1.261 +  _class_holder = bp._class_holder;
   1.262 +}
   1.263 +
   1.264 +bool JvmtiBreakpoint::lessThan(JvmtiBreakpoint& bp) {
   1.265 +  Unimplemented();
   1.266 +  return false;
   1.267 +}
   1.268 +
   1.269 +bool JvmtiBreakpoint::equals(JvmtiBreakpoint& bp) {
   1.270 +  return _method   == bp._method
   1.271 +    &&   _bci      == bp._bci;
   1.272 +}
   1.273 +
   1.274 +bool JvmtiBreakpoint::is_valid() {
   1.275 +  // class loader can be NULL
   1.276 +  return _method != NULL &&
   1.277 +         _bci >= 0;
   1.278 +}
   1.279 +
   1.280 +address JvmtiBreakpoint::getBcp() {
   1.281 +  return _method->bcp_from(_bci);
   1.282 +}
   1.283 +
   1.284 +void JvmtiBreakpoint::each_method_version_do(method_action meth_act) {
   1.285 +  ((Method*)_method->*meth_act)(_bci);
   1.286 +
   1.287 +  // add/remove breakpoint to/from versions of the method that
   1.288 +  // are EMCP. Directly or transitively obsolete methods are
   1.289 +  // not saved in the PreviousVersionNodes.
   1.290 +  Thread *thread = Thread::current();
   1.291 +  instanceKlassHandle ikh = instanceKlassHandle(thread, _method->method_holder());
   1.292 +  Symbol* m_name = _method->name();
   1.293 +  Symbol* m_signature = _method->signature();
   1.294 +
   1.295 +  // search previous versions if they exist
   1.296 +  PreviousVersionWalker pvw(thread, (InstanceKlass *)ikh());
   1.297 +  for (PreviousVersionNode * pv_node = pvw.next_previous_version();
   1.298 +       pv_node != NULL; pv_node = pvw.next_previous_version()) {
   1.299 +    GrowableArray<Method*>* methods = pv_node->prev_EMCP_methods();
   1.300 +
   1.301 +    if (methods == NULL) {
   1.302 +      // We have run into a PreviousVersion generation where
   1.303 +      // all methods were made obsolete during that generation's
   1.304 +      // RedefineClasses() operation. At the time of that
   1.305 +      // operation, all EMCP methods were flushed so we don't
   1.306 +      // have to go back any further.
   1.307 +      //
   1.308 +      // A NULL methods array is different than an empty methods
   1.309 +      // array. We cannot infer any optimizations about older
   1.310 +      // generations from an empty methods array for the current
   1.311 +      // generation.
   1.312 +      break;
   1.313 +    }
   1.314 +
   1.315 +    for (int i = methods->length() - 1; i >= 0; i--) {
   1.316 +      Method* method = methods->at(i);
   1.317 +      // obsolete methods that are running are not deleted from
   1.318 +      // previous version array, but they are skipped here.
   1.319 +      if (!method->is_obsolete() &&
   1.320 +          method->name() == m_name &&
   1.321 +          method->signature() == m_signature) {
   1.322 +        RC_TRACE(0x00000800, ("%sing breakpoint in %s(%s)",
   1.323 +          meth_act == &Method::set_breakpoint ? "sett" : "clear",
   1.324 +          method->name()->as_C_string(),
   1.325 +          method->signature()->as_C_string()));
   1.326 +
   1.327 +        (method->*meth_act)(_bci);
   1.328 +        break;
   1.329 +      }
   1.330 +    }
   1.331 +  }
   1.332 +}
   1.333 +
   1.334 +void JvmtiBreakpoint::set() {
   1.335 +  each_method_version_do(&Method::set_breakpoint);
   1.336 +}
   1.337 +
   1.338 +void JvmtiBreakpoint::clear() {
   1.339 +  each_method_version_do(&Method::clear_breakpoint);
   1.340 +}
   1.341 +
   1.342 +void JvmtiBreakpoint::print() {
   1.343 +#ifndef PRODUCT
   1.344 +  const char *class_name  = (_method == NULL) ? "NULL" : _method->klass_name()->as_C_string();
   1.345 +  const char *method_name = (_method == NULL) ? "NULL" : _method->name()->as_C_string();
   1.346 +
   1.347 +  tty->print("Breakpoint(%s,%s,%d,%p)",class_name, method_name, _bci, getBcp());
   1.348 +#endif
   1.349 +}
   1.350 +
   1.351 +
   1.352 +//
   1.353 +// class VM_ChangeBreakpoints
   1.354 +//
   1.355 +// Modify the Breakpoints data structure at a safepoint
   1.356 +//
   1.357 +
   1.358 +void VM_ChangeBreakpoints::doit() {
   1.359 +  switch (_operation) {
   1.360 +  case SET_BREAKPOINT:
   1.361 +    _breakpoints->set_at_safepoint(*_bp);
   1.362 +    break;
   1.363 +  case CLEAR_BREAKPOINT:
   1.364 +    _breakpoints->clear_at_safepoint(*_bp);
   1.365 +    break;
   1.366 +  default:
   1.367 +    assert(false, "Unknown operation");
   1.368 +  }
   1.369 +}
   1.370 +
   1.371 +void VM_ChangeBreakpoints::oops_do(OopClosure* f) {
   1.372 +  // The JvmtiBreakpoints in _breakpoints will be visited via
   1.373 +  // JvmtiExport::oops_do.
   1.374 +  if (_bp != NULL) {
   1.375 +    _bp->oops_do(f);
   1.376 +  }
   1.377 +}
   1.378 +
   1.379 +void VM_ChangeBreakpoints::metadata_do(void f(Metadata*)) {
   1.380 +  // Walk metadata in breakpoints to keep from being deallocated with RedefineClasses
   1.381 +  if (_bp != NULL) {
   1.382 +    _bp->metadata_do(f);
   1.383 +  }
   1.384 +}
   1.385 +
   1.386 +//
   1.387 +// class JvmtiBreakpoints
   1.388 +//
   1.389 +// a JVMTI internal collection of JvmtiBreakpoint
   1.390 +//
   1.391 +
   1.392 +JvmtiBreakpoints::JvmtiBreakpoints(void listener_fun(void *,address *)) {
   1.393 +  _bps.initialize(this,listener_fun);
   1.394 +}
   1.395 +
   1.396 +JvmtiBreakpoints:: ~JvmtiBreakpoints() {}
   1.397 +
   1.398 +void  JvmtiBreakpoints::oops_do(OopClosure* f) {
   1.399 +  _bps.oops_do(f);
   1.400 +}
   1.401 +
   1.402 +void  JvmtiBreakpoints::metadata_do(void f(Metadata*)) {
   1.403 +  _bps.metadata_do(f);
   1.404 +}
   1.405 +
   1.406 +void JvmtiBreakpoints::gc_epilogue() {
   1.407 +  _bps.gc_epilogue();
   1.408 +}
   1.409 +
   1.410 +void  JvmtiBreakpoints::print() {
   1.411 +#ifndef PRODUCT
   1.412 +  ResourceMark rm;
   1.413 +
   1.414 +  int n = _bps.length();
   1.415 +  for (int i=0; i<n; i++) {
   1.416 +    JvmtiBreakpoint& bp = _bps.at(i);
   1.417 +    tty->print("%d: ", i);
   1.418 +    bp.print();
   1.419 +    tty->cr();
   1.420 +  }
   1.421 +#endif
   1.422 +}
   1.423 +
   1.424 +
   1.425 +void JvmtiBreakpoints::set_at_safepoint(JvmtiBreakpoint& bp) {
   1.426 +  assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
   1.427 +
   1.428 +  int i = _bps.find(bp);
   1.429 +  if (i == -1) {
   1.430 +    _bps.append(bp);
   1.431 +    bp.set();
   1.432 +  }
   1.433 +}
   1.434 +
   1.435 +void JvmtiBreakpoints::clear_at_safepoint(JvmtiBreakpoint& bp) {
   1.436 +  assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
   1.437 +
   1.438 +  int i = _bps.find(bp);
   1.439 +  if (i != -1) {
   1.440 +    _bps.remove(i);
   1.441 +    bp.clear();
   1.442 +  }
   1.443 +}
   1.444 +
   1.445 +int JvmtiBreakpoints::length() { return _bps.length(); }
   1.446 +
   1.447 +int JvmtiBreakpoints::set(JvmtiBreakpoint& bp) {
   1.448 +  if ( _bps.find(bp) != -1) {
   1.449 +     return JVMTI_ERROR_DUPLICATE;
   1.450 +  }
   1.451 +  VM_ChangeBreakpoints set_breakpoint(VM_ChangeBreakpoints::SET_BREAKPOINT, &bp);
   1.452 +  VMThread::execute(&set_breakpoint);
   1.453 +  return JVMTI_ERROR_NONE;
   1.454 +}
   1.455 +
   1.456 +int JvmtiBreakpoints::clear(JvmtiBreakpoint& bp) {
   1.457 +  if ( _bps.find(bp) == -1) {
   1.458 +     return JVMTI_ERROR_NOT_FOUND;
   1.459 +  }
   1.460 +
   1.461 +  VM_ChangeBreakpoints clear_breakpoint(VM_ChangeBreakpoints::CLEAR_BREAKPOINT, &bp);
   1.462 +  VMThread::execute(&clear_breakpoint);
   1.463 +  return JVMTI_ERROR_NONE;
   1.464 +}
   1.465 +
   1.466 +void JvmtiBreakpoints::clearall_in_class_at_safepoint(Klass* klass) {
   1.467 +  bool changed = true;
   1.468 +  // We are going to run thru the list of bkpts
   1.469 +  // and delete some.  This deletion probably alters
   1.470 +  // the list in some implementation defined way such
   1.471 +  // that when we delete entry i, the next entry might
   1.472 +  // no longer be at i+1.  To be safe, each time we delete
   1.473 +  // an entry, we'll just start again from the beginning.
   1.474 +  // We'll stop when we make a pass thru the whole list without
   1.475 +  // deleting anything.
   1.476 +  while (changed) {
   1.477 +    int len = _bps.length();
   1.478 +    changed = false;
   1.479 +    for (int i = 0; i < len; i++) {
   1.480 +      JvmtiBreakpoint& bp = _bps.at(i);
   1.481 +      if (bp.method()->method_holder() == klass) {
   1.482 +        bp.clear();
   1.483 +        _bps.remove(i);
   1.484 +        // This changed 'i' so we have to start over.
   1.485 +        changed = true;
   1.486 +        break;
   1.487 +      }
   1.488 +    }
   1.489 +  }
   1.490 +}
   1.491 +
   1.492 +//
   1.493 +// class JvmtiCurrentBreakpoints
   1.494 +//
   1.495 +
   1.496 +JvmtiBreakpoints *JvmtiCurrentBreakpoints::_jvmti_breakpoints  = NULL;
   1.497 +address *         JvmtiCurrentBreakpoints::_breakpoint_list    = NULL;
   1.498 +
   1.499 +
   1.500 +JvmtiBreakpoints& JvmtiCurrentBreakpoints::get_jvmti_breakpoints() {
   1.501 +  if (_jvmti_breakpoints != NULL) return (*_jvmti_breakpoints);
   1.502 +  _jvmti_breakpoints = new JvmtiBreakpoints(listener_fun);
   1.503 +  assert(_jvmti_breakpoints != NULL, "_jvmti_breakpoints != NULL");
   1.504 +  return (*_jvmti_breakpoints);
   1.505 +}
   1.506 +
   1.507 +void  JvmtiCurrentBreakpoints::listener_fun(void *this_obj, address *cache) {
   1.508 +  JvmtiBreakpoints *this_jvmti = (JvmtiBreakpoints *) this_obj;
   1.509 +  assert(this_jvmti != NULL, "this_jvmti != NULL");
   1.510 +
   1.511 +  debug_only(int n = this_jvmti->length(););
   1.512 +  assert(cache[n] == NULL, "cache must be NULL terminated");
   1.513 +
   1.514 +  set_breakpoint_list(cache);
   1.515 +}
   1.516 +
   1.517 +
   1.518 +void JvmtiCurrentBreakpoints::oops_do(OopClosure* f) {
   1.519 +  if (_jvmti_breakpoints != NULL) {
   1.520 +    _jvmti_breakpoints->oops_do(f);
   1.521 +  }
   1.522 +}
   1.523 +
   1.524 +void JvmtiCurrentBreakpoints::metadata_do(void f(Metadata*)) {
   1.525 +  if (_jvmti_breakpoints != NULL) {
   1.526 +    _jvmti_breakpoints->metadata_do(f);
   1.527 +  }
   1.528 +}
   1.529 +
   1.530 +void JvmtiCurrentBreakpoints::gc_epilogue() {
   1.531 +  if (_jvmti_breakpoints != NULL) {
   1.532 +    _jvmti_breakpoints->gc_epilogue();
   1.533 +  }
   1.534 +}
   1.535 +
   1.536 +///////////////////////////////////////////////////////////////
   1.537 +//
   1.538 +// class VM_GetOrSetLocal
   1.539 +//
   1.540 +
   1.541 +// Constructor for non-object getter
   1.542 +VM_GetOrSetLocal::VM_GetOrSetLocal(JavaThread* thread, jint depth, int index, BasicType type)
   1.543 +  : _thread(thread)
   1.544 +  , _calling_thread(NULL)
   1.545 +  , _depth(depth)
   1.546 +  , _index(index)
   1.547 +  , _type(type)
   1.548 +  , _set(false)
   1.549 +  , _jvf(NULL)
   1.550 +  , _result(JVMTI_ERROR_NONE)
   1.551 +{
   1.552 +}
   1.553 +
   1.554 +// Constructor for object or non-object setter
   1.555 +VM_GetOrSetLocal::VM_GetOrSetLocal(JavaThread* thread, jint depth, int index, BasicType type, jvalue value)
   1.556 +  : _thread(thread)
   1.557 +  , _calling_thread(NULL)
   1.558 +  , _depth(depth)
   1.559 +  , _index(index)
   1.560 +  , _type(type)
   1.561 +  , _value(value)
   1.562 +  , _set(true)
   1.563 +  , _jvf(NULL)
   1.564 +  , _result(JVMTI_ERROR_NONE)
   1.565 +{
   1.566 +}
   1.567 +
   1.568 +// Constructor for object getter
   1.569 +VM_GetOrSetLocal::VM_GetOrSetLocal(JavaThread* thread, JavaThread* calling_thread, jint depth, int index)
   1.570 +  : _thread(thread)
   1.571 +  , _calling_thread(calling_thread)
   1.572 +  , _depth(depth)
   1.573 +  , _index(index)
   1.574 +  , _type(T_OBJECT)
   1.575 +  , _set(false)
   1.576 +  , _jvf(NULL)
   1.577 +  , _result(JVMTI_ERROR_NONE)
   1.578 +{
   1.579 +}
   1.580 +
   1.581 +vframe *VM_GetOrSetLocal::get_vframe() {
   1.582 +  if (!_thread->has_last_Java_frame()) {
   1.583 +    return NULL;
   1.584 +  }
   1.585 +  RegisterMap reg_map(_thread);
   1.586 +  vframe *vf = _thread->last_java_vframe(&reg_map);
   1.587 +  int d = 0;
   1.588 +  while ((vf != NULL) && (d < _depth)) {
   1.589 +    vf = vf->java_sender();
   1.590 +    d++;
   1.591 +  }
   1.592 +  return vf;
   1.593 +}
   1.594 +
   1.595 +javaVFrame *VM_GetOrSetLocal::get_java_vframe() {
   1.596 +  vframe* vf = get_vframe();
   1.597 +  if (vf == NULL) {
   1.598 +    _result = JVMTI_ERROR_NO_MORE_FRAMES;
   1.599 +    return NULL;
   1.600 +  }
   1.601 +  javaVFrame *jvf = (javaVFrame*)vf;
   1.602 +
   1.603 +  if (!vf->is_java_frame()) {
   1.604 +    _result = JVMTI_ERROR_OPAQUE_FRAME;
   1.605 +    return NULL;
   1.606 +  }
   1.607 +  return jvf;
   1.608 +}
   1.609 +
   1.610 +// Check that the klass is assignable to a type with the given signature.
   1.611 +// Another solution could be to use the function Klass::is_subtype_of(type).
   1.612 +// But the type class can be forced to load/initialize eagerly in such a case.
   1.613 +// This may cause unexpected consequences like CFLH or class-init JVMTI events.
   1.614 +// It is better to avoid such a behavior.
   1.615 +bool VM_GetOrSetLocal::is_assignable(const char* ty_sign, Klass* klass, Thread* thread) {
   1.616 +  assert(ty_sign != NULL, "type signature must not be NULL");
   1.617 +  assert(thread != NULL, "thread must not be NULL");
   1.618 +  assert(klass != NULL, "klass must not be NULL");
   1.619 +
   1.620 +  int len = (int) strlen(ty_sign);
   1.621 +  if (ty_sign[0] == 'L' && ty_sign[len-1] == ';') { // Need pure class/interface name
   1.622 +    ty_sign++;
   1.623 +    len -= 2;
   1.624 +  }
   1.625 +  TempNewSymbol ty_sym = SymbolTable::new_symbol(ty_sign, len, thread);
   1.626 +  if (klass->name() == ty_sym) {
   1.627 +    return true;
   1.628 +  }
   1.629 +  // Compare primary supers
   1.630 +  int super_depth = klass->super_depth();
   1.631 +  int idx;
   1.632 +  for (idx = 0; idx < super_depth; idx++) {
   1.633 +    if (klass->primary_super_of_depth(idx)->name() == ty_sym) {
   1.634 +      return true;
   1.635 +    }
   1.636 +  }
   1.637 +  // Compare secondary supers
   1.638 +  Array<Klass*>* sec_supers = klass->secondary_supers();
   1.639 +  for (idx = 0; idx < sec_supers->length(); idx++) {
   1.640 +    if (((Klass*) sec_supers->at(idx))->name() == ty_sym) {
   1.641 +      return true;
   1.642 +    }
   1.643 +  }
   1.644 +  return false;
   1.645 +}
   1.646 +
   1.647 +// Checks error conditions:
   1.648 +//   JVMTI_ERROR_INVALID_SLOT
   1.649 +//   JVMTI_ERROR_TYPE_MISMATCH
   1.650 +// Returns: 'true' - everything is Ok, 'false' - error code
   1.651 +
   1.652 +bool VM_GetOrSetLocal::check_slot_type(javaVFrame* jvf) {
   1.653 +  Method* method_oop = jvf->method();
   1.654 +  if (!method_oop->has_localvariable_table()) {
   1.655 +    // Just to check index boundaries
   1.656 +    jint extra_slot = (_type == T_LONG || _type == T_DOUBLE) ? 1 : 0;
   1.657 +    if (_index < 0 || _index + extra_slot >= method_oop->max_locals()) {
   1.658 +      _result = JVMTI_ERROR_INVALID_SLOT;
   1.659 +      return false;
   1.660 +    }
   1.661 +    return true;
   1.662 +  }
   1.663 +
   1.664 +  jint num_entries = method_oop->localvariable_table_length();
   1.665 +  if (num_entries == 0) {
   1.666 +    _result = JVMTI_ERROR_INVALID_SLOT;
   1.667 +    return false;       // There are no slots
   1.668 +  }
   1.669 +  int signature_idx = -1;
   1.670 +  int vf_bci = jvf->bci();
   1.671 +  LocalVariableTableElement* table = method_oop->localvariable_table_start();
   1.672 +  for (int i = 0; i < num_entries; i++) {
   1.673 +    int start_bci = table[i].start_bci;
   1.674 +    int end_bci = start_bci + table[i].length;
   1.675 +
   1.676 +    // Here we assume that locations of LVT entries
   1.677 +    // with the same slot number cannot be overlapped
   1.678 +    if (_index == (jint) table[i].slot && start_bci <= vf_bci && vf_bci <= end_bci) {
   1.679 +      signature_idx = (int) table[i].descriptor_cp_index;
   1.680 +      break;
   1.681 +    }
   1.682 +  }
   1.683 +  if (signature_idx == -1) {
   1.684 +    _result = JVMTI_ERROR_INVALID_SLOT;
   1.685 +    return false;       // Incorrect slot index
   1.686 +  }
   1.687 +  Symbol*   sign_sym  = method_oop->constants()->symbol_at(signature_idx);
   1.688 +  const char* signature = (const char *) sign_sym->as_utf8();
   1.689 +  BasicType slot_type = char2type(signature[0]);
   1.690 +
   1.691 +  switch (slot_type) {
   1.692 +  case T_BYTE:
   1.693 +  case T_SHORT:
   1.694 +  case T_CHAR:
   1.695 +  case T_BOOLEAN:
   1.696 +    slot_type = T_INT;
   1.697 +    break;
   1.698 +  case T_ARRAY:
   1.699 +    slot_type = T_OBJECT;
   1.700 +    break;
   1.701 +  };
   1.702 +  if (_type != slot_type) {
   1.703 +    _result = JVMTI_ERROR_TYPE_MISMATCH;
   1.704 +    return false;
   1.705 +  }
   1.706 +
   1.707 +  jobject jobj = _value.l;
   1.708 +  if (_set && slot_type == T_OBJECT && jobj != NULL) { // NULL reference is allowed
   1.709 +    // Check that the jobject class matches the return type signature.
   1.710 +    JavaThread* cur_thread = JavaThread::current();
   1.711 +    HandleMark hm(cur_thread);
   1.712 +
   1.713 +    Handle obj = Handle(cur_thread, JNIHandles::resolve_external_guard(jobj));
   1.714 +    NULL_CHECK(obj, (_result = JVMTI_ERROR_INVALID_OBJECT, false));
   1.715 +    KlassHandle ob_kh = KlassHandle(cur_thread, obj->klass());
   1.716 +    NULL_CHECK(ob_kh, (_result = JVMTI_ERROR_INVALID_OBJECT, false));
   1.717 +
   1.718 +    if (!is_assignable(signature, ob_kh(), cur_thread)) {
   1.719 +      _result = JVMTI_ERROR_TYPE_MISMATCH;
   1.720 +      return false;
   1.721 +    }
   1.722 +  }
   1.723 +  return true;
   1.724 +}
   1.725 +
   1.726 +static bool can_be_deoptimized(vframe* vf) {
   1.727 +  return (vf->is_compiled_frame() && vf->fr().can_be_deoptimized());
   1.728 +}
   1.729 +
   1.730 +bool VM_GetOrSetLocal::doit_prologue() {
   1.731 +  _jvf = get_java_vframe();
   1.732 +  NULL_CHECK(_jvf, false);
   1.733 +
   1.734 +  if (_jvf->method()->is_native()) {
   1.735 +    if (getting_receiver() && !_jvf->method()->is_static()) {
   1.736 +      return true;
   1.737 +    } else {
   1.738 +      _result = JVMTI_ERROR_OPAQUE_FRAME;
   1.739 +      return false;
   1.740 +    }
   1.741 +  }
   1.742 +
   1.743 +  if (!check_slot_type(_jvf)) {
   1.744 +    return false;
   1.745 +  }
   1.746 +  return true;
   1.747 +}
   1.748 +
   1.749 +void VM_GetOrSetLocal::doit() {
   1.750 +  if (_set) {
   1.751 +    // Force deoptimization of frame if compiled because it's
   1.752 +    // possible the compiler emitted some locals as constant values,
   1.753 +    // meaning they are not mutable.
   1.754 +    if (can_be_deoptimized(_jvf)) {
   1.755 +
   1.756 +      // Schedule deoptimization so that eventually the local
   1.757 +      // update will be written to an interpreter frame.
   1.758 +      Deoptimization::deoptimize_frame(_jvf->thread(), _jvf->fr().id());
   1.759 +
   1.760 +      // Now store a new value for the local which will be applied
   1.761 +      // once deoptimization occurs. Note however that while this
   1.762 +      // write is deferred until deoptimization actually happens
   1.763 +      // can vframe created after this point will have its locals
   1.764 +      // reflecting this update so as far as anyone can see the
   1.765 +      // write has already taken place.
   1.766 +
   1.767 +      // If we are updating an oop then get the oop from the handle
   1.768 +      // since the handle will be long gone by the time the deopt
   1.769 +      // happens. The oop stored in the deferred local will be
   1.770 +      // gc'd on its own.
   1.771 +      if (_type == T_OBJECT) {
   1.772 +        _value.l = (jobject) (JNIHandles::resolve_external_guard(_value.l));
   1.773 +      }
   1.774 +      // Re-read the vframe so we can see that it is deoptimized
   1.775 +      // [ Only need because of assert in update_local() ]
   1.776 +      _jvf = get_java_vframe();
   1.777 +      ((compiledVFrame*)_jvf)->update_local(_type, _index, _value);
   1.778 +      return;
   1.779 +    }
   1.780 +    StackValueCollection *locals = _jvf->locals();
   1.781 +    HandleMark hm;
   1.782 +
   1.783 +    switch (_type) {
   1.784 +      case T_INT:    locals->set_int_at   (_index, _value.i); break;
   1.785 +      case T_LONG:   locals->set_long_at  (_index, _value.j); break;
   1.786 +      case T_FLOAT:  locals->set_float_at (_index, _value.f); break;
   1.787 +      case T_DOUBLE: locals->set_double_at(_index, _value.d); break;
   1.788 +      case T_OBJECT: {
   1.789 +        Handle ob_h(JNIHandles::resolve_external_guard(_value.l));
   1.790 +        locals->set_obj_at (_index, ob_h);
   1.791 +        break;
   1.792 +      }
   1.793 +      default: ShouldNotReachHere();
   1.794 +    }
   1.795 +    _jvf->set_locals(locals);
   1.796 +  } else {
   1.797 +    if (_jvf->method()->is_native() && _jvf->is_compiled_frame()) {
   1.798 +      assert(getting_receiver(), "Can only get here when getting receiver");
   1.799 +      oop receiver = _jvf->fr().get_native_receiver();
   1.800 +      _value.l = JNIHandles::make_local(_calling_thread, receiver);
   1.801 +    } else {
   1.802 +      StackValueCollection *locals = _jvf->locals();
   1.803 +
   1.804 +      if (locals->at(_index)->type() == T_CONFLICT) {
   1.805 +        memset(&_value, 0, sizeof(_value));
   1.806 +        _value.l = NULL;
   1.807 +        return;
   1.808 +      }
   1.809 +
   1.810 +      switch (_type) {
   1.811 +        case T_INT:    _value.i = locals->int_at   (_index);   break;
   1.812 +        case T_LONG:   _value.j = locals->long_at  (_index);   break;
   1.813 +        case T_FLOAT:  _value.f = locals->float_at (_index);   break;
   1.814 +        case T_DOUBLE: _value.d = locals->double_at(_index);   break;
   1.815 +        case T_OBJECT: {
   1.816 +          // Wrap the oop to be returned in a local JNI handle since
   1.817 +          // oops_do() no longer applies after doit() is finished.
   1.818 +          oop obj = locals->obj_at(_index)();
   1.819 +          _value.l = JNIHandles::make_local(_calling_thread, obj);
   1.820 +          break;
   1.821 +        }
   1.822 +        default: ShouldNotReachHere();
   1.823 +      }
   1.824 +    }
   1.825 +  }
   1.826 +}
   1.827 +
   1.828 +
   1.829 +bool VM_GetOrSetLocal::allow_nested_vm_operations() const {
   1.830 +  return true; // May need to deoptimize
   1.831 +}
   1.832 +
   1.833 +
   1.834 +VM_GetReceiver::VM_GetReceiver(
   1.835 +    JavaThread* thread, JavaThread* caller_thread, jint depth)
   1.836 +    : VM_GetOrSetLocal(thread, caller_thread, depth, 0) {}
   1.837 +
   1.838 +/////////////////////////////////////////////////////////////////////////////////////////
   1.839 +
   1.840 +//
   1.841 +// class JvmtiSuspendControl - see comments in jvmtiImpl.hpp
   1.842 +//
   1.843 +
   1.844 +bool JvmtiSuspendControl::suspend(JavaThread *java_thread) {
   1.845 +  // external suspend should have caught suspending a thread twice
   1.846 +
   1.847 +  // Immediate suspension required for JPDA back-end so JVMTI agent threads do
   1.848 +  // not deadlock due to later suspension on transitions while holding
   1.849 +  // raw monitors.  Passing true causes the immediate suspension.
   1.850 +  // java_suspend() will catch threads in the process of exiting
   1.851 +  // and will ignore them.
   1.852 +  java_thread->java_suspend();
   1.853 +
   1.854 +  // It would be nice to have the following assertion in all the time,
   1.855 +  // but it is possible for a racing resume request to have resumed
   1.856 +  // this thread right after we suspended it. Temporarily enable this
   1.857 +  // assertion if you are chasing a different kind of bug.
   1.858 +  //
   1.859 +  // assert(java_lang_Thread::thread(java_thread->threadObj()) == NULL ||
   1.860 +  //   java_thread->is_being_ext_suspended(), "thread is not suspended");
   1.861 +
   1.862 +  if (java_lang_Thread::thread(java_thread->threadObj()) == NULL) {
   1.863 +    // check again because we can get delayed in java_suspend():
   1.864 +    // the thread is in process of exiting.
   1.865 +    return false;
   1.866 +  }
   1.867 +
   1.868 +  return true;
   1.869 +}
   1.870 +
   1.871 +bool JvmtiSuspendControl::resume(JavaThread *java_thread) {
   1.872 +  // external suspend should have caught resuming a thread twice
   1.873 +  assert(java_thread->is_being_ext_suspended(), "thread should be suspended");
   1.874 +
   1.875 +  // resume thread
   1.876 +  {
   1.877 +    // must always grab Threads_lock, see JVM_SuspendThread
   1.878 +    MutexLocker ml(Threads_lock);
   1.879 +    java_thread->java_resume();
   1.880 +  }
   1.881 +
   1.882 +  return true;
   1.883 +}
   1.884 +
   1.885 +
   1.886 +void JvmtiSuspendControl::print() {
   1.887 +#ifndef PRODUCT
   1.888 +  MutexLocker mu(Threads_lock);
   1.889 +  ResourceMark rm;
   1.890 +
   1.891 +  tty->print("Suspended Threads: [");
   1.892 +  for (JavaThread *thread = Threads::first(); thread != NULL; thread = thread->next()) {
   1.893 +#ifdef JVMTI_TRACE
   1.894 +    const char *name   = JvmtiTrace::safe_get_thread_name(thread);
   1.895 +#else
   1.896 +    const char *name   = "";
   1.897 +#endif /*JVMTI_TRACE */
   1.898 +    tty->print("%s(%c ", name, thread->is_being_ext_suspended() ? 'S' : '_');
   1.899 +    if (!thread->has_last_Java_frame()) {
   1.900 +      tty->print("no stack");
   1.901 +    }
   1.902 +    tty->print(") ");
   1.903 +  }
   1.904 +  tty->print_cr("]");
   1.905 +#endif
   1.906 +}
   1.907 +
   1.908 +JvmtiDeferredEvent JvmtiDeferredEvent::compiled_method_load_event(
   1.909 +    nmethod* nm) {
   1.910 +  JvmtiDeferredEvent event = JvmtiDeferredEvent(TYPE_COMPILED_METHOD_LOAD);
   1.911 +  event._event_data.compiled_method_load = nm;
   1.912 +  // Keep the nmethod alive until the ServiceThread can process
   1.913 +  // this deferred event.
   1.914 +  nmethodLocker::lock_nmethod(nm);
   1.915 +  return event;
   1.916 +}
   1.917 +
   1.918 +JvmtiDeferredEvent JvmtiDeferredEvent::compiled_method_unload_event(
   1.919 +    nmethod* nm, jmethodID id, const void* code) {
   1.920 +  JvmtiDeferredEvent event = JvmtiDeferredEvent(TYPE_COMPILED_METHOD_UNLOAD);
   1.921 +  event._event_data.compiled_method_unload.nm = nm;
   1.922 +  event._event_data.compiled_method_unload.method_id = id;
   1.923 +  event._event_data.compiled_method_unload.code_begin = code;
   1.924 +  // Keep the nmethod alive until the ServiceThread can process
   1.925 +  // this deferred event. This will keep the memory for the
   1.926 +  // generated code from being reused too early. We pass
   1.927 +  // zombie_ok == true here so that our nmethod that was just
   1.928 +  // made into a zombie can be locked.
   1.929 +  nmethodLocker::lock_nmethod(nm, true /* zombie_ok */);
   1.930 +  return event;
   1.931 +}
   1.932 +
   1.933 +JvmtiDeferredEvent JvmtiDeferredEvent::dynamic_code_generated_event(
   1.934 +      const char* name, const void* code_begin, const void* code_end) {
   1.935 +  JvmtiDeferredEvent event = JvmtiDeferredEvent(TYPE_DYNAMIC_CODE_GENERATED);
   1.936 +  // Need to make a copy of the name since we don't know how long
   1.937 +  // the event poster will keep it around after we enqueue the
   1.938 +  // deferred event and return. strdup() failure is handled in
   1.939 +  // the post() routine below.
   1.940 +  event._event_data.dynamic_code_generated.name = os::strdup(name);
   1.941 +  event._event_data.dynamic_code_generated.code_begin = code_begin;
   1.942 +  event._event_data.dynamic_code_generated.code_end = code_end;
   1.943 +  return event;
   1.944 +}
   1.945 +
   1.946 +void JvmtiDeferredEvent::post() {
   1.947 +  assert(ServiceThread::is_service_thread(Thread::current()),
   1.948 +         "Service thread must post enqueued events");
   1.949 +  switch(_type) {
   1.950 +    case TYPE_COMPILED_METHOD_LOAD: {
   1.951 +      nmethod* nm = _event_data.compiled_method_load;
   1.952 +      JvmtiExport::post_compiled_method_load(nm);
   1.953 +      // done with the deferred event so unlock the nmethod
   1.954 +      nmethodLocker::unlock_nmethod(nm);
   1.955 +      break;
   1.956 +    }
   1.957 +    case TYPE_COMPILED_METHOD_UNLOAD: {
   1.958 +      nmethod* nm = _event_data.compiled_method_unload.nm;
   1.959 +      JvmtiExport::post_compiled_method_unload(
   1.960 +        _event_data.compiled_method_unload.method_id,
   1.961 +        _event_data.compiled_method_unload.code_begin);
   1.962 +      // done with the deferred event so unlock the nmethod
   1.963 +      nmethodLocker::unlock_nmethod(nm);
   1.964 +      break;
   1.965 +    }
   1.966 +    case TYPE_DYNAMIC_CODE_GENERATED: {
   1.967 +      JvmtiExport::post_dynamic_code_generated_internal(
   1.968 +        // if strdup failed give the event a default name
   1.969 +        (_event_data.dynamic_code_generated.name == NULL)
   1.970 +          ? "unknown_code" : _event_data.dynamic_code_generated.name,
   1.971 +        _event_data.dynamic_code_generated.code_begin,
   1.972 +        _event_data.dynamic_code_generated.code_end);
   1.973 +      if (_event_data.dynamic_code_generated.name != NULL) {
   1.974 +        // release our copy
   1.975 +        os::free((void *)_event_data.dynamic_code_generated.name);
   1.976 +      }
   1.977 +      break;
   1.978 +    }
   1.979 +    default:
   1.980 +      ShouldNotReachHere();
   1.981 +  }
   1.982 +}
   1.983 +
   1.984 +JvmtiDeferredEventQueue::QueueNode* JvmtiDeferredEventQueue::_queue_tail = NULL;
   1.985 +JvmtiDeferredEventQueue::QueueNode* JvmtiDeferredEventQueue::_queue_head = NULL;
   1.986 +
   1.987 +volatile JvmtiDeferredEventQueue::QueueNode*
   1.988 +    JvmtiDeferredEventQueue::_pending_list = NULL;
   1.989 +
   1.990 +bool JvmtiDeferredEventQueue::has_events() {
   1.991 +  assert(Service_lock->owned_by_self(), "Must own Service_lock");
   1.992 +  return _queue_head != NULL || _pending_list != NULL;
   1.993 +}
   1.994 +
   1.995 +void JvmtiDeferredEventQueue::enqueue(const JvmtiDeferredEvent& event) {
   1.996 +  assert(Service_lock->owned_by_self(), "Must own Service_lock");
   1.997 +
   1.998 +  process_pending_events();
   1.999 +
  1.1000 +  // Events get added to the end of the queue (and are pulled off the front).
  1.1001 +  QueueNode* node = new QueueNode(event);
  1.1002 +  if (_queue_tail == NULL) {
  1.1003 +    _queue_tail = _queue_head = node;
  1.1004 +  } else {
  1.1005 +    assert(_queue_tail->next() == NULL, "Must be the last element in the list");
  1.1006 +    _queue_tail->set_next(node);
  1.1007 +    _queue_tail = node;
  1.1008 +  }
  1.1009 +
  1.1010 +  Service_lock->notify_all();
  1.1011 +  assert((_queue_head == NULL) == (_queue_tail == NULL),
  1.1012 +         "Inconsistent queue markers");
  1.1013 +}
  1.1014 +
  1.1015 +JvmtiDeferredEvent JvmtiDeferredEventQueue::dequeue() {
  1.1016 +  assert(Service_lock->owned_by_self(), "Must own Service_lock");
  1.1017 +
  1.1018 +  process_pending_events();
  1.1019 +
  1.1020 +  assert(_queue_head != NULL, "Nothing to dequeue");
  1.1021 +
  1.1022 +  if (_queue_head == NULL) {
  1.1023 +    // Just in case this happens in product; it shouldn't but let's not crash
  1.1024 +    return JvmtiDeferredEvent();
  1.1025 +  }
  1.1026 +
  1.1027 +  QueueNode* node = _queue_head;
  1.1028 +  _queue_head = _queue_head->next();
  1.1029 +  if (_queue_head == NULL) {
  1.1030 +    _queue_tail = NULL;
  1.1031 +  }
  1.1032 +
  1.1033 +  assert((_queue_head == NULL) == (_queue_tail == NULL),
  1.1034 +         "Inconsistent queue markers");
  1.1035 +
  1.1036 +  JvmtiDeferredEvent event = node->event();
  1.1037 +  delete node;
  1.1038 +  return event;
  1.1039 +}
  1.1040 +
  1.1041 +void JvmtiDeferredEventQueue::add_pending_event(
  1.1042 +    const JvmtiDeferredEvent& event) {
  1.1043 +
  1.1044 +  QueueNode* node = new QueueNode(event);
  1.1045 +
  1.1046 +  bool success = false;
  1.1047 +  QueueNode* prev_value = (QueueNode*)_pending_list;
  1.1048 +  do {
  1.1049 +    node->set_next(prev_value);
  1.1050 +    prev_value = (QueueNode*)Atomic::cmpxchg_ptr(
  1.1051 +        (void*)node, (volatile void*)&_pending_list, (void*)node->next());
  1.1052 +  } while (prev_value != node->next());
  1.1053 +}
  1.1054 +
  1.1055 +// This method transfers any events that were added by someone NOT holding
  1.1056 +// the lock into the mainline queue.
  1.1057 +void JvmtiDeferredEventQueue::process_pending_events() {
  1.1058 +  assert(Service_lock->owned_by_self(), "Must own Service_lock");
  1.1059 +
  1.1060 +  if (_pending_list != NULL) {
  1.1061 +    QueueNode* head =
  1.1062 +        (QueueNode*)Atomic::xchg_ptr(NULL, (volatile void*)&_pending_list);
  1.1063 +
  1.1064 +    assert((_queue_head == NULL) == (_queue_tail == NULL),
  1.1065 +           "Inconsistent queue markers");
  1.1066 +
  1.1067 +    if (head != NULL) {
  1.1068 +      // Since we've treated the pending list as a stack (with newer
  1.1069 +      // events at the beginning), we need to join the bottom of the stack
  1.1070 +      // with the 'tail' of the queue in order to get the events in the
  1.1071 +      // right order.  We do this by reversing the pending list and appending
  1.1072 +      // it to the queue.
  1.1073 +
  1.1074 +      QueueNode* new_tail = head;
  1.1075 +      QueueNode* new_head = NULL;
  1.1076 +
  1.1077 +      // This reverses the list
  1.1078 +      QueueNode* prev = new_tail;
  1.1079 +      QueueNode* node = new_tail->next();
  1.1080 +      new_tail->set_next(NULL);
  1.1081 +      while (node != NULL) {
  1.1082 +        QueueNode* next = node->next();
  1.1083 +        node->set_next(prev);
  1.1084 +        prev = node;
  1.1085 +        node = next;
  1.1086 +      }
  1.1087 +      new_head = prev;
  1.1088 +
  1.1089 +      // Now append the new list to the queue
  1.1090 +      if (_queue_tail != NULL) {
  1.1091 +        _queue_tail->set_next(new_head);
  1.1092 +      } else { // _queue_head == NULL
  1.1093 +        _queue_head = new_head;
  1.1094 +      }
  1.1095 +      _queue_tail = new_tail;
  1.1096 +    }
  1.1097 +  }
  1.1098 +}

mercurial