duke@435: /* stefank@2314: * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. duke@435: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. duke@435: * duke@435: * This code is free software; you can redistribute it and/or modify it duke@435: * under the terms of the GNU General Public License version 2 only, as duke@435: * published by the Free Software Foundation. duke@435: * duke@435: * This code is distributed in the hope that it will be useful, but WITHOUT duke@435: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or duke@435: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License duke@435: * version 2 for more details (a copy is included in the LICENSE file that duke@435: * accompanied this code). duke@435: * duke@435: * You should have received a copy of the GNU General Public License version duke@435: * 2 along with this work; if not, write to the Free Software Foundation, duke@435: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. duke@435: * trims@1907: * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA trims@1907: * or visit www.oracle.com if you need additional information or have any trims@1907: * questions. duke@435: * duke@435: */ duke@435: stefank@2314: #ifndef SHARE_VM_RUNTIME_INTERFACESUPPORT_HPP stefank@2314: #define SHARE_VM_RUNTIME_INTERFACESUPPORT_HPP stefank@2314: stefank@2314: #include "memory/gcLocker.hpp" stefank@2314: #include "runtime/handles.inline.hpp" stefank@2314: #include "runtime/mutexLocker.hpp" stefank@2314: #include "runtime/orderAccess.hpp" stefank@2314: #include "runtime/os.hpp" stefank@2314: #include "runtime/safepoint.hpp" stefank@2314: #include "runtime/vmThread.hpp" stefank@2314: #include "utilities/globalDefinitions.hpp" stefank@2314: #include "utilities/preserveException.hpp" stefank@2314: #include "utilities/top.hpp" stefank@2314: #ifdef TARGET_OS_FAMILY_linux stefank@2314: # include "thread_linux.inline.hpp" stefank@2314: #endif stefank@2314: #ifdef TARGET_OS_FAMILY_solaris stefank@2314: # include "thread_solaris.inline.hpp" stefank@2314: #endif stefank@2314: #ifdef TARGET_OS_FAMILY_windows stefank@2314: # include "thread_windows.inline.hpp" stefank@2314: #endif stefank@2314: duke@435: // Wrapper for all entry points to the virtual machine. duke@435: // The HandleMarkCleaner is a faster version of HandleMark. duke@435: // It relies on the fact that there is a HandleMark further duke@435: // down the stack (in JavaCalls::call_helper), and just resets duke@435: // to the saved values in that HandleMark. duke@435: duke@435: class HandleMarkCleaner: public StackObj { duke@435: private: duke@435: Thread* _thread; duke@435: public: duke@435: HandleMarkCleaner(Thread* thread) { duke@435: _thread = thread; duke@435: _thread->last_handle_mark()->push(); duke@435: } duke@435: ~HandleMarkCleaner() { duke@435: _thread->last_handle_mark()->pop_and_restore(); duke@435: } duke@435: duke@435: private: duke@435: inline void* operator new(size_t size, void* ptr) { duke@435: return ptr; duke@435: } duke@435: }; duke@435: duke@435: // InterfaceSupport provides functionality used by the __LEAF and __ENTRY duke@435: // macros. These macros are used to guard entry points into the VM and duke@435: // perform checks upon leave of the VM. duke@435: duke@435: duke@435: class InterfaceSupport: AllStatic { duke@435: # ifdef ASSERT duke@435: public: duke@435: static long _scavenge_alot_counter; duke@435: static long _fullgc_alot_counter; duke@435: static long _number_of_calls; duke@435: static long _fullgc_alot_invocation; duke@435: duke@435: // tracing duke@435: static void trace(const char* result_type, const char* header); duke@435: duke@435: // Helper methods used to implement +ScavengeALot and +FullGCALot duke@435: static void check_gc_alot() { if (ScavengeALot || FullGCALot) gc_alot(); } duke@435: static void gc_alot(); duke@435: duke@435: static void walk_stack_from(vframe* start_vf); duke@435: static void walk_stack(); duke@435: duke@435: # ifdef ENABLE_ZAP_DEAD_LOCALS duke@435: static void zap_dead_locals_old(); duke@435: # endif duke@435: duke@435: static void zombieAll(); duke@435: static void deoptimizeAll(); duke@435: static void stress_derived_pointers(); duke@435: static void verify_stack(); duke@435: static void verify_last_frame(); duke@435: # endif duke@435: duke@435: public: duke@435: // OS dependent stuff stefank@2314: #ifdef TARGET_OS_FAMILY_linux stefank@2314: # include "interfaceSupport_linux.hpp" stefank@2314: #endif stefank@2314: #ifdef TARGET_OS_FAMILY_solaris stefank@2314: # include "interfaceSupport_solaris.hpp" stefank@2314: #endif stefank@2314: #ifdef TARGET_OS_FAMILY_windows stefank@2314: # include "interfaceSupport_windows.hpp" stefank@2314: #endif stefank@2314: duke@435: }; duke@435: duke@435: duke@435: // Basic class for all thread transition classes. duke@435: duke@435: class ThreadStateTransition : public StackObj { duke@435: protected: duke@435: JavaThread* _thread; duke@435: public: duke@435: ThreadStateTransition(JavaThread *thread) { duke@435: _thread = thread; duke@435: assert(thread != NULL && thread->is_Java_thread(), "must be Java thread"); duke@435: } duke@435: duke@435: // Change threadstate in a manner, so safepoint can detect changes. duke@435: // Time-critical: called on exit from every runtime routine duke@435: static inline void transition(JavaThread *thread, JavaThreadState from, JavaThreadState to) { duke@435: assert(from != _thread_in_Java, "use transition_from_java"); duke@435: assert(from != _thread_in_native, "use transition_from_native"); duke@435: assert((from & 1) == 0 && (to & 1) == 0, "odd numbers are transitions states"); duke@435: assert(thread->thread_state() == from, "coming from wrong thread state"); duke@435: // Change to transition state (assumes total store ordering! -Urs) duke@435: thread->set_thread_state((JavaThreadState)(from + 1)); duke@435: duke@435: // Make sure new state is seen by VM thread duke@435: if (os::is_MP()) { duke@435: if (UseMembar) { duke@435: // Force a fence between the write above and read below duke@435: OrderAccess::fence(); duke@435: } else { duke@435: // store to serialize page so VM thread can do pseudo remote membar duke@435: os::write_memory_serialize_page(thread); duke@435: } duke@435: } duke@435: duke@435: if (SafepointSynchronize::do_call_back()) { duke@435: SafepointSynchronize::block(thread); duke@435: } duke@435: thread->set_thread_state(to); duke@435: duke@435: CHECK_UNHANDLED_OOPS_ONLY(thread->clear_unhandled_oops();) duke@435: } duke@435: duke@435: // transition_and_fence must be used on any thread state transition duke@435: // where there might not be a Java call stub on the stack, in duke@435: // particular on Windows where the Structured Exception Handler is duke@435: // set up in the call stub. os::write_memory_serialize_page() can duke@435: // fault and we can't recover from it on Windows without a SEH in duke@435: // place. duke@435: static inline void transition_and_fence(JavaThread *thread, JavaThreadState from, JavaThreadState to) { duke@435: assert(thread->thread_state() == from, "coming from wrong thread state"); duke@435: assert((from & 1) == 0 && (to & 1) == 0, "odd numbers are transitions states"); duke@435: // Change to transition state (assumes total store ordering! -Urs) duke@435: thread->set_thread_state((JavaThreadState)(from + 1)); duke@435: duke@435: // Make sure new state is seen by VM thread duke@435: if (os::is_MP()) { duke@435: if (UseMembar) { duke@435: // Force a fence between the write above and read below duke@435: OrderAccess::fence(); duke@435: } else { duke@435: // Must use this rather than serialization page in particular on Windows duke@435: InterfaceSupport::serialize_memory(thread); duke@435: } duke@435: } duke@435: duke@435: if (SafepointSynchronize::do_call_back()) { duke@435: SafepointSynchronize::block(thread); duke@435: } duke@435: thread->set_thread_state(to); duke@435: duke@435: CHECK_UNHANDLED_OOPS_ONLY(thread->clear_unhandled_oops();) duke@435: } duke@435: duke@435: // Same as above, but assumes from = _thread_in_Java. This is simpler, since we duke@435: // never block on entry to the VM. This will break the code, since e.g. preserve arguments duke@435: // have not been setup. duke@435: static inline void transition_from_java(JavaThread *thread, JavaThreadState to) { duke@435: assert(thread->thread_state() == _thread_in_Java, "coming from wrong thread state"); duke@435: thread->set_thread_state(to); duke@435: } duke@435: duke@435: static inline void transition_from_native(JavaThread *thread, JavaThreadState to) { duke@435: assert((to & 1) == 0, "odd numbers are transitions states"); duke@435: assert(thread->thread_state() == _thread_in_native, "coming from wrong thread state"); duke@435: // Change to transition state (assumes total store ordering! -Urs) duke@435: thread->set_thread_state(_thread_in_native_trans); duke@435: duke@435: // Make sure new state is seen by GC thread duke@435: if (os::is_MP()) { duke@435: if (UseMembar) { duke@435: // Force a fence between the write above and read below duke@435: OrderAccess::fence(); duke@435: } else { duke@435: // Must use this rather than serialization page in particular on Windows duke@435: InterfaceSupport::serialize_memory(thread); duke@435: } duke@435: } duke@435: duke@435: // We never install asynchronous exceptions when coming (back) in duke@435: // to the runtime from native code because the runtime is not set duke@435: // up to handle exceptions floating around at arbitrary points. duke@435: if (SafepointSynchronize::do_call_back() || thread->is_suspend_after_native()) { duke@435: JavaThread::check_safepoint_and_suspend_for_native_trans(thread); duke@435: duke@435: // Clear unhandled oops anywhere where we could block, even if we don't. duke@435: CHECK_UNHANDLED_OOPS_ONLY(thread->clear_unhandled_oops();) duke@435: } duke@435: duke@435: thread->set_thread_state(to); duke@435: } duke@435: protected: duke@435: void trans(JavaThreadState from, JavaThreadState to) { transition(_thread, from, to); } duke@435: void trans_from_java(JavaThreadState to) { transition_from_java(_thread, to); } duke@435: void trans_from_native(JavaThreadState to) { transition_from_native(_thread, to); } duke@435: void trans_and_fence(JavaThreadState from, JavaThreadState to) { transition_and_fence(_thread, from, to); } duke@435: }; duke@435: duke@435: duke@435: class ThreadInVMfromJava : public ThreadStateTransition { duke@435: public: duke@435: ThreadInVMfromJava(JavaThread* thread) : ThreadStateTransition(thread) { duke@435: trans_from_java(_thread_in_vm); duke@435: } duke@435: ~ThreadInVMfromJava() { duke@435: trans(_thread_in_vm, _thread_in_Java); duke@435: // Check for pending. async. exceptions or suspends. duke@435: if (_thread->has_special_runtime_exit_condition()) _thread->handle_special_runtime_exit_condition(); duke@435: } duke@435: }; duke@435: duke@435: duke@435: class ThreadInVMfromUnknown { duke@435: private: duke@435: JavaThread* _thread; duke@435: public: duke@435: ThreadInVMfromUnknown() : _thread(NULL) { duke@435: Thread* t = Thread::current(); duke@435: if (t->is_Java_thread()) { duke@435: JavaThread* t2 = (JavaThread*) t; duke@435: if (t2->thread_state() == _thread_in_native) { duke@435: _thread = t2; duke@435: ThreadStateTransition::transition_from_native(t2, _thread_in_vm); duke@435: // Used to have a HandleMarkCleaner but that is dangerous as duke@435: // it could free a handle in our (indirect, nested) caller. duke@435: // We expect any handles will be short lived and figure we duke@435: // don't need an actual HandleMark. duke@435: } duke@435: } duke@435: } duke@435: ~ThreadInVMfromUnknown() { duke@435: if (_thread) { duke@435: ThreadStateTransition::transition_and_fence(_thread, _thread_in_vm, _thread_in_native); duke@435: } duke@435: } duke@435: }; duke@435: duke@435: duke@435: class ThreadInVMfromNative : public ThreadStateTransition { duke@435: public: duke@435: ThreadInVMfromNative(JavaThread* thread) : ThreadStateTransition(thread) { duke@435: trans_from_native(_thread_in_vm); duke@435: } duke@435: ~ThreadInVMfromNative() { duke@435: trans_and_fence(_thread_in_vm, _thread_in_native); duke@435: } duke@435: }; duke@435: duke@435: duke@435: class ThreadToNativeFromVM : public ThreadStateTransition { duke@435: public: duke@435: ThreadToNativeFromVM(JavaThread *thread) : ThreadStateTransition(thread) { duke@435: // We are leaving the VM at this point and going directly to native code. duke@435: // Block, if we are in the middle of a safepoint synchronization. duke@435: assert(!thread->owns_locks(), "must release all locks when leaving VM"); duke@435: thread->frame_anchor()->make_walkable(thread); duke@435: trans_and_fence(_thread_in_vm, _thread_in_native); duke@435: // Check for pending. async. exceptions or suspends. duke@435: if (_thread->has_special_runtime_exit_condition()) _thread->handle_special_runtime_exit_condition(false); duke@435: } duke@435: duke@435: ~ThreadToNativeFromVM() { duke@435: trans_from_native(_thread_in_vm); duke@435: // We don't need to clear_walkable because it will happen automagically when we return to java duke@435: } duke@435: }; duke@435: duke@435: duke@435: class ThreadBlockInVM : public ThreadStateTransition { duke@435: public: duke@435: ThreadBlockInVM(JavaThread *thread) duke@435: : ThreadStateTransition(thread) { duke@435: // Once we are blocked vm expects stack to be walkable duke@435: thread->frame_anchor()->make_walkable(thread); duke@435: trans_and_fence(_thread_in_vm, _thread_blocked); duke@435: } duke@435: ~ThreadBlockInVM() { duke@435: trans_and_fence(_thread_blocked, _thread_in_vm); duke@435: // We don't need to clear_walkable because it will happen automagically when we return to java duke@435: } duke@435: }; duke@435: duke@435: duke@435: // This special transition class is only used to prevent asynchronous exceptions duke@435: // from being installed on vm exit in situations where we can't tolerate them. duke@435: // See bugs: 4324348, 4854693, 4998314, 5040492, 5050705. duke@435: class ThreadInVMfromJavaNoAsyncException : public ThreadStateTransition { duke@435: public: duke@435: ThreadInVMfromJavaNoAsyncException(JavaThread* thread) : ThreadStateTransition(thread) { duke@435: trans_from_java(_thread_in_vm); duke@435: } duke@435: ~ThreadInVMfromJavaNoAsyncException() { duke@435: trans(_thread_in_vm, _thread_in_Java); duke@435: // NOTE: We do not check for pending. async. exceptions. duke@435: // If we did and moved the pending async exception over into the duke@435: // pending exception field, we would need to deopt (currently C2 duke@435: // only). However, to do so would require that we transition back duke@435: // to the _thread_in_vm state. Instead we postpone the handling of duke@435: // the async exception. duke@435: duke@435: // Check for pending. suspends only. duke@435: if (_thread->has_special_runtime_exit_condition()) duke@435: _thread->handle_special_runtime_exit_condition(false); duke@435: } duke@435: }; duke@435: duke@435: // Debug class instantiated in JRT_ENTRY and ITR_ENTRY macro. duke@435: // Can be used to verify properties on enter/exit of the VM. duke@435: duke@435: #ifdef ASSERT duke@435: class VMEntryWrapper { duke@435: public: duke@435: VMEntryWrapper() { duke@435: if (VerifyLastFrame) { duke@435: InterfaceSupport::verify_last_frame(); duke@435: } duke@435: } duke@435: duke@435: ~VMEntryWrapper() { duke@435: InterfaceSupport::check_gc_alot(); duke@435: if (WalkStackALot) { duke@435: InterfaceSupport::walk_stack(); duke@435: } duke@435: #ifdef ENABLE_ZAP_DEAD_LOCALS duke@435: if (ZapDeadLocalsOld) { duke@435: InterfaceSupport::zap_dead_locals_old(); duke@435: } duke@435: #endif duke@435: #ifdef COMPILER2 duke@435: // This option is not used by Compiler 1 duke@435: if (StressDerivedPointers) { duke@435: InterfaceSupport::stress_derived_pointers(); duke@435: } duke@435: #endif duke@435: if (DeoptimizeALot || DeoptimizeRandom) { duke@435: InterfaceSupport::deoptimizeAll(); duke@435: } duke@435: if (ZombieALot) { duke@435: InterfaceSupport::zombieAll(); duke@435: } duke@435: // do verification AFTER potential deoptimization duke@435: if (VerifyStack) { duke@435: InterfaceSupport::verify_stack(); duke@435: } duke@435: duke@435: } duke@435: }; duke@435: duke@435: duke@435: class VMNativeEntryWrapper { duke@435: public: duke@435: VMNativeEntryWrapper() { duke@435: if (GCALotAtAllSafepoints) InterfaceSupport::check_gc_alot(); duke@435: } duke@435: duke@435: ~VMNativeEntryWrapper() { duke@435: if (GCALotAtAllSafepoints) InterfaceSupport::check_gc_alot(); duke@435: } duke@435: }; duke@435: duke@435: #endif duke@435: duke@435: duke@435: // VM-internal runtime interface support duke@435: duke@435: #ifdef ASSERT duke@435: duke@435: class RuntimeHistogramElement : public HistogramElement { duke@435: public: duke@435: RuntimeHistogramElement(const char* name); duke@435: }; duke@435: duke@435: #define TRACE_CALL(result_type, header) \ duke@435: InterfaceSupport::_number_of_calls++; \ duke@435: if (TraceRuntimeCalls) \ duke@435: InterfaceSupport::trace(#result_type, #header); \ duke@435: if (CountRuntimeCalls) { \ duke@435: static RuntimeHistogramElement* e = new RuntimeHistogramElement(#header); \ duke@435: if (e != NULL) e->increment_count(); \ duke@435: } duke@435: #else duke@435: #define TRACE_CALL(result_type, header) \ duke@435: /* do nothing */ duke@435: #endif duke@435: duke@435: duke@435: // LEAF routines do not lock, GC or throw exceptions duke@435: duke@435: #define __LEAF(result_type, header) \ duke@435: TRACE_CALL(result_type, header) \ duke@435: debug_only(NoHandleMark __hm;) \ duke@435: /* begin of body */ duke@435: duke@435: duke@435: // ENTRY routines may lock, GC and throw exceptions duke@435: duke@435: #define __ENTRY(result_type, header, thread) \ duke@435: TRACE_CALL(result_type, header) \ duke@435: HandleMarkCleaner __hm(thread); \ duke@435: Thread* THREAD = thread; \ duke@435: /* begin of body */ duke@435: duke@435: duke@435: // QUICK_ENTRY routines behave like ENTRY but without a handle mark duke@435: duke@435: #define __QUICK_ENTRY(result_type, header, thread) \ duke@435: TRACE_CALL(result_type, header) \ duke@435: debug_only(NoHandleMark __hm;) \ duke@435: Thread* THREAD = thread; \ duke@435: /* begin of body */ duke@435: duke@435: duke@435: // Definitions for IRT (Interpreter Runtime) duke@435: // (thread is an argument passed in to all these routines) duke@435: duke@435: #define IRT_ENTRY(result_type, header) \ duke@435: result_type header { \ duke@435: ThreadInVMfromJava __tiv(thread); \ duke@435: __ENTRY(result_type, header, thread) \ duke@435: debug_only(VMEntryWrapper __vew;) duke@435: duke@435: duke@435: #define IRT_LEAF(result_type, header) \ duke@435: result_type header { \ duke@435: __LEAF(result_type, header) \ duke@435: debug_only(No_Safepoint_Verifier __nspv(true);) duke@435: duke@435: duke@435: #define IRT_ENTRY_NO_ASYNC(result_type, header) \ duke@435: result_type header { \ duke@435: ThreadInVMfromJavaNoAsyncException __tiv(thread); \ duke@435: __ENTRY(result_type, header, thread) \ duke@435: debug_only(VMEntryWrapper __vew;) duke@435: duke@435: // Another special case for nmethod_entry_point so the nmethod that the duke@435: // interpreter is about to branch to doesn't get flushed before as we duke@435: // branch to it's interpreter_entry_point. Skip stress testing here too. duke@435: // Also we don't allow async exceptions because it is just too painful. duke@435: #define IRT_ENTRY_FOR_NMETHOD(result_type, header) \ duke@435: result_type header { \ duke@435: nmethodLocker _nmlock(nm); \ duke@435: ThreadInVMfromJavaNoAsyncException __tiv(thread); \ duke@435: __ENTRY(result_type, header, thread) duke@435: duke@435: #define IRT_END } duke@435: duke@435: duke@435: // Definitions for JRT (Java (Compiler/Shared) Runtime) duke@435: duke@435: #define JRT_ENTRY(result_type, header) \ duke@435: result_type header { \ duke@435: ThreadInVMfromJava __tiv(thread); \ duke@435: __ENTRY(result_type, header, thread) \ duke@435: debug_only(VMEntryWrapper __vew;) duke@435: duke@435: duke@435: #define JRT_LEAF(result_type, header) \ duke@435: result_type header { \ duke@435: __LEAF(result_type, header) \ duke@435: debug_only(JRT_Leaf_Verifier __jlv;) duke@435: duke@435: duke@435: #define JRT_ENTRY_NO_ASYNC(result_type, header) \ duke@435: result_type header { \ duke@435: ThreadInVMfromJavaNoAsyncException __tiv(thread); \ duke@435: __ENTRY(result_type, header, thread) \ duke@435: debug_only(VMEntryWrapper __vew;) duke@435: duke@435: // Same as JRT Entry but allows for return value after the safepoint duke@435: // to get back into Java from the VM duke@435: #define JRT_BLOCK_ENTRY(result_type, header) \ duke@435: result_type header { \ duke@435: TRACE_CALL(result_type, header) \ duke@435: HandleMarkCleaner __hm(thread); duke@435: duke@435: #define JRT_BLOCK \ duke@435: { \ duke@435: ThreadInVMfromJava __tiv(thread); \ duke@435: Thread* THREAD = thread; \ duke@435: debug_only(VMEntryWrapper __vew;) duke@435: duke@435: #define JRT_BLOCK_END } duke@435: duke@435: #define JRT_END } duke@435: duke@435: // Definitions for JNI duke@435: duke@435: #define JNI_ENTRY(result_type, header) \ duke@435: JNI_ENTRY_NO_PRESERVE(result_type, header) \ duke@435: WeakPreserveExceptionMark __wem(thread); duke@435: duke@435: #define JNI_ENTRY_NO_PRESERVE(result_type, header) \ duke@435: extern "C" { \ duke@435: result_type JNICALL header { \ duke@435: JavaThread* thread=JavaThread::thread_from_jni_environment(env); \ duke@435: assert( !VerifyJNIEnvThread || (thread == Thread::current()), "JNIEnv is only valid in same thread"); \ duke@435: ThreadInVMfromNative __tiv(thread); \ duke@435: debug_only(VMNativeEntryWrapper __vew;) \ duke@435: __ENTRY(result_type, header, thread) duke@435: duke@435: duke@435: // Ensure that the VMNativeEntryWrapper constructor, which can cause duke@435: // a GC, is called outside the NoHandleMark (set via __QUICK_ENTRY). duke@435: #define JNI_QUICK_ENTRY(result_type, header) \ duke@435: extern "C" { \ duke@435: result_type JNICALL header { \ duke@435: JavaThread* thread=JavaThread::thread_from_jni_environment(env); \ duke@435: assert( !VerifyJNIEnvThread || (thread == Thread::current()), "JNIEnv is only valid in same thread"); \ duke@435: ThreadInVMfromNative __tiv(thread); \ duke@435: debug_only(VMNativeEntryWrapper __vew;) \ duke@435: __QUICK_ENTRY(result_type, header, thread) duke@435: duke@435: duke@435: #define JNI_LEAF(result_type, header) \ duke@435: extern "C" { \ duke@435: result_type JNICALL header { \ duke@435: JavaThread* thread=JavaThread::thread_from_jni_environment(env); \ duke@435: assert( !VerifyJNIEnvThread || (thread == Thread::current()), "JNIEnv is only valid in same thread"); \ duke@435: __LEAF(result_type, header) duke@435: duke@435: duke@435: // Close the routine and the extern "C" duke@435: #define JNI_END } } duke@435: duke@435: duke@435: duke@435: // Definitions for JVM duke@435: duke@435: #define JVM_ENTRY(result_type, header) \ duke@435: extern "C" { \ duke@435: result_type JNICALL header { \ duke@435: JavaThread* thread=JavaThread::thread_from_jni_environment(env); \ duke@435: ThreadInVMfromNative __tiv(thread); \ duke@435: debug_only(VMNativeEntryWrapper __vew;) \ duke@435: __ENTRY(result_type, header, thread) duke@435: duke@435: duke@435: #define JVM_ENTRY_NO_ENV(result_type, header) \ duke@435: extern "C" { \ duke@435: result_type JNICALL header { \ duke@435: JavaThread* thread = (JavaThread*)ThreadLocalStorage::thread(); \ duke@435: ThreadInVMfromNative __tiv(thread); \ duke@435: debug_only(VMNativeEntryWrapper __vew;) \ duke@435: __ENTRY(result_type, header, thread) duke@435: duke@435: duke@435: #define JVM_QUICK_ENTRY(result_type, header) \ duke@435: extern "C" { \ duke@435: result_type JNICALL header { \ duke@435: JavaThread* thread=JavaThread::thread_from_jni_environment(env); \ duke@435: ThreadInVMfromNative __tiv(thread); \ duke@435: debug_only(VMNativeEntryWrapper __vew;) \ duke@435: __QUICK_ENTRY(result_type, header, thread) duke@435: duke@435: duke@435: #define JVM_LEAF(result_type, header) \ duke@435: extern "C" { \ duke@435: result_type JNICALL header { \ duke@435: VM_Exit::block_if_vm_exited(); \ duke@435: __LEAF(result_type, header) duke@435: duke@435: duke@435: #define JVM_END } } stefank@2314: stefank@2314: #endif // SHARE_VM_RUNTIME_INTERFACESUPPORT_HPP