1.1 --- a/src/share/vm/prims/jvmtiImpl.cpp Wed Feb 02 18:38:40 2011 -0500 1.2 +++ b/src/share/vm/prims/jvmtiImpl.cpp Wed Feb 02 14:38:01 2011 -0500 1.3 @@ -1,5 +1,5 @@ 1.4 /* 1.5 - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. 1.6 + * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. 1.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 1.8 * 1.9 * This code is free software; you can redistribute it and/or modify it 1.10 @@ -32,11 +32,13 @@ 1.11 #include "prims/jvmtiEventController.inline.hpp" 1.12 #include "prims/jvmtiImpl.hpp" 1.13 #include "prims/jvmtiRedefineClasses.hpp" 1.14 +#include "runtime/atomic.hpp" 1.15 #include "runtime/deoptimization.hpp" 1.16 #include "runtime/handles.hpp" 1.17 #include "runtime/handles.inline.hpp" 1.18 #include "runtime/interfaceSupport.hpp" 1.19 #include "runtime/javaCalls.hpp" 1.20 +#include "runtime/serviceThread.hpp" 1.21 #include "runtime/signature.hpp" 1.22 #include "runtime/vframe.hpp" 1.23 #include "runtime/vframe_hp.hpp" 1.24 @@ -910,3 +912,207 @@ 1.25 tty->print_cr("]"); 1.26 #endif 1.27 } 1.28 + 1.29 +#ifndef KERNEL 1.30 + 1.31 +JvmtiDeferredEvent JvmtiDeferredEvent::compiled_method_load_event( 1.32 + nmethod* nm) { 1.33 + JvmtiDeferredEvent event = JvmtiDeferredEvent(TYPE_COMPILED_METHOD_LOAD); 1.34 + event.set_compiled_method_load(nm); 1.35 + nmethodLocker::lock_nmethod(nm); // will be unlocked when posted 1.36 + return event; 1.37 +} 1.38 + 1.39 +JvmtiDeferredEvent JvmtiDeferredEvent::compiled_method_unload_event( 1.40 + jmethodID id, const void* code) { 1.41 + JvmtiDeferredEvent event = JvmtiDeferredEvent(TYPE_COMPILED_METHOD_UNLOAD); 1.42 + event.set_compiled_method_unload(id, code); 1.43 + return event; 1.44 +} 1.45 + 1.46 +void JvmtiDeferredEvent::post() { 1.47 + switch(_type) { 1.48 + case TYPE_COMPILED_METHOD_LOAD: 1.49 + JvmtiExport::post_compiled_method_load(compiled_method_load()); 1.50 + nmethodLocker::unlock_nmethod(compiled_method_load()); 1.51 + break; 1.52 + case TYPE_COMPILED_METHOD_UNLOAD: 1.53 + JvmtiExport::post_compiled_method_unload( 1.54 + compiled_method_unload_method_id(), 1.55 + compiled_method_unload_code_begin()); 1.56 + break; 1.57 + case TYPE_FLUSH: 1.58 + JvmtiDeferredEventQueue::flush_complete(flush_state_addr()); 1.59 + break; 1.60 + default: 1.61 + ShouldNotReachHere(); 1.62 + } 1.63 +} 1.64 + 1.65 +JvmtiDeferredEventQueue::QueueNode* JvmtiDeferredEventQueue::_queue_tail = NULL; 1.66 +JvmtiDeferredEventQueue::QueueNode* JvmtiDeferredEventQueue::_queue_head = NULL; 1.67 + 1.68 +volatile JvmtiDeferredEventQueue::QueueNode* 1.69 + JvmtiDeferredEventQueue::_pending_list = NULL; 1.70 + 1.71 +bool JvmtiDeferredEventQueue::has_events() { 1.72 + assert(Service_lock->owned_by_self(), "Must own Service_lock"); 1.73 + return _queue_head != NULL || _pending_list != NULL; 1.74 +} 1.75 + 1.76 +void JvmtiDeferredEventQueue::enqueue(const JvmtiDeferredEvent& event) { 1.77 + assert(Service_lock->owned_by_self(), "Must own Service_lock"); 1.78 + 1.79 + process_pending_events(); 1.80 + 1.81 + // Events get added to the end of the queue (and are pulled off the front). 1.82 + QueueNode* node = new QueueNode(event); 1.83 + if (_queue_tail == NULL) { 1.84 + _queue_tail = _queue_head = node; 1.85 + } else { 1.86 + assert(_queue_tail->next() == NULL, "Must be the last element in the list"); 1.87 + _queue_tail->set_next(node); 1.88 + _queue_tail = node; 1.89 + } 1.90 + 1.91 + Service_lock->notify_all(); 1.92 + assert((_queue_head == NULL) == (_queue_tail == NULL), 1.93 + "Inconsistent queue markers"); 1.94 +} 1.95 + 1.96 +JvmtiDeferredEvent JvmtiDeferredEventQueue::dequeue() { 1.97 + assert(Service_lock->owned_by_self(), "Must own Service_lock"); 1.98 + 1.99 + process_pending_events(); 1.100 + 1.101 + assert(_queue_head != NULL, "Nothing to dequeue"); 1.102 + 1.103 + if (_queue_head == NULL) { 1.104 + // Just in case this happens in product; it shouldn't but let's not crash 1.105 + return JvmtiDeferredEvent(); 1.106 + } 1.107 + 1.108 + QueueNode* node = _queue_head; 1.109 + _queue_head = _queue_head->next(); 1.110 + if (_queue_head == NULL) { 1.111 + _queue_tail = NULL; 1.112 + } 1.113 + 1.114 + assert((_queue_head == NULL) == (_queue_tail == NULL), 1.115 + "Inconsistent queue markers"); 1.116 + 1.117 + JvmtiDeferredEvent event = node->event(); 1.118 + delete node; 1.119 + return event; 1.120 +} 1.121 + 1.122 +void JvmtiDeferredEventQueue::add_pending_event( 1.123 + const JvmtiDeferredEvent& event) { 1.124 + 1.125 + QueueNode* node = new QueueNode(event); 1.126 + 1.127 + bool success = false; 1.128 + QueueNode* prev_value = (QueueNode*)_pending_list; 1.129 + do { 1.130 + node->set_next(prev_value); 1.131 + prev_value = (QueueNode*)Atomic::cmpxchg_ptr( 1.132 + (void*)node, (volatile void*)&_pending_list, (void*)node->next()); 1.133 + } while (prev_value != node->next()); 1.134 +} 1.135 + 1.136 +// This method transfers any events that were added by someone NOT holding 1.137 +// the lock into the mainline queue. 1.138 +void JvmtiDeferredEventQueue::process_pending_events() { 1.139 + assert(Service_lock->owned_by_self(), "Must own Service_lock"); 1.140 + 1.141 + if (_pending_list != NULL) { 1.142 + QueueNode* head = 1.143 + (QueueNode*)Atomic::xchg_ptr(NULL, (volatile void*)&_pending_list); 1.144 + 1.145 + assert((_queue_head == NULL) == (_queue_tail == NULL), 1.146 + "Inconsistent queue markers"); 1.147 + 1.148 + if (head != NULL) { 1.149 + // Since we've treated the pending list as a stack (with newer 1.150 + // events at the beginning), we need to join the bottom of the stack 1.151 + // with the 'tail' of the queue in order to get the events in the 1.152 + // right order. We do this by reversing the pending list and appending 1.153 + // it to the queue. 1.154 + 1.155 + QueueNode* new_tail = head; 1.156 + QueueNode* new_head = NULL; 1.157 + 1.158 + // This reverses the list 1.159 + QueueNode* prev = new_tail; 1.160 + QueueNode* node = new_tail->next(); 1.161 + new_tail->set_next(NULL); 1.162 + while (node != NULL) { 1.163 + QueueNode* next = node->next(); 1.164 + node->set_next(prev); 1.165 + prev = node; 1.166 + node = next; 1.167 + } 1.168 + new_head = prev; 1.169 + 1.170 + // Now append the new list to the queue 1.171 + if (_queue_tail != NULL) { 1.172 + _queue_tail->set_next(new_head); 1.173 + } else { // _queue_head == NULL 1.174 + _queue_head = new_head; 1.175 + } 1.176 + _queue_tail = new_tail; 1.177 + } 1.178 + } 1.179 +} 1.180 + 1.181 +enum { 1.182 + // Random - used for debugging 1.183 + FLUSHING = 0x50403020, 1.184 + FLUSHED = 0x09080706 1.185 +}; 1.186 + 1.187 +void JvmtiDeferredEventQueue::flush_queue(Thread* thread) { 1.188 + 1.189 + volatile int flush_state = FLUSHING; 1.190 + 1.191 + JvmtiDeferredEvent flush(JvmtiDeferredEvent::TYPE_FLUSH); 1.192 + flush.set_flush_state_addr((int*)&flush_state); 1.193 + 1.194 + if (ServiceThread::is_service_thread(thread)) { 1.195 + // If we are the service thread we have to post all preceding events 1.196 + // Use the flush event as a token to indicate when we can stop 1.197 + JvmtiDeferredEvent event; 1.198 + { 1.199 + MutexLockerEx ml(Service_lock, Mutex::_no_safepoint_check_flag); 1.200 + enqueue(flush); 1.201 + event = dequeue(); 1.202 + } 1.203 + while (!event.is_flush_event() || 1.204 + event.flush_state_addr() != &flush_state) { 1.205 + event.post(); 1.206 + { 1.207 + MutexLockerEx ml(Service_lock, Mutex::_no_safepoint_check_flag); 1.208 + event = dequeue(); 1.209 + } 1.210 + } 1.211 + } else { 1.212 + // Wake up the service thread so it will process events. When it gets 1.213 + // to the flush event it will set 'flush_complete' and notify us. 1.214 + MutexLockerEx ml(Service_lock, Mutex::_no_safepoint_check_flag); 1.215 + enqueue(flush); 1.216 + while (flush_state != FLUSHED) { 1.217 + assert(flush_state == FLUSHING || flush_state == FLUSHED, 1.218 + "only valid values for this"); 1.219 + Service_lock->wait(Mutex::_no_safepoint_check_flag); 1.220 + } 1.221 + } 1.222 +} 1.223 + 1.224 +void JvmtiDeferredEventQueue::flush_complete(int* state_addr) { 1.225 + assert(state_addr != NULL && *state_addr == FLUSHING, "must be"); 1.226 + MutexLockerEx ml(Service_lock, Mutex::_no_safepoint_check_flag); 1.227 + *state_addr = FLUSHED; 1.228 + Service_lock->notify_all(); 1.229 +} 1.230 + 1.231 +#endif // ndef KERNEL