aoqi@0: /* dbuck@9562: * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. aoqi@0: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. aoqi@0: * aoqi@0: * This code is free software; you can redistribute it and/or modify it aoqi@0: * under the terms of the GNU General Public License version 2 only, as aoqi@0: * published by the Free Software Foundation. aoqi@0: * aoqi@0: * This code is distributed in the hope that it will be useful, but WITHOUT aoqi@0: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or aoqi@0: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License aoqi@0: * version 2 for more details (a copy is included in the LICENSE file that aoqi@0: * accompanied this code). aoqi@0: * aoqi@0: * You should have received a copy of the GNU General Public License version aoqi@0: * 2 along with this work; if not, write to the Free Software Foundation, aoqi@0: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. aoqi@0: * aoqi@0: * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA aoqi@0: * or visit www.oracle.com if you need additional information or have any aoqi@0: * questions. aoqi@0: * aoqi@0: */ aoqi@0: aoqi@0: #ifndef SHARE_VM_RUNTIME_INTERFACESUPPORT_HPP aoqi@0: #define SHARE_VM_RUNTIME_INTERFACESUPPORT_HPP aoqi@0: aoqi@0: #include "memory/gcLocker.hpp" aoqi@0: #include "runtime/handles.inline.hpp" aoqi@0: #include "runtime/mutexLocker.hpp" aoqi@0: #include "runtime/orderAccess.hpp" aoqi@0: #include "runtime/os.hpp" aoqi@0: #include "runtime/safepoint.hpp" aoqi@0: #include "runtime/thread.inline.hpp" aoqi@0: #include "runtime/vmThread.hpp" aoqi@0: #include "utilities/globalDefinitions.hpp" aoqi@0: #include "utilities/preserveException.hpp" aoqi@0: #include "utilities/top.hpp" aoqi@0: aoqi@0: // Wrapper for all entry points to the virtual machine. aoqi@0: // The HandleMarkCleaner is a faster version of HandleMark. aoqi@0: // It relies on the fact that there is a HandleMark further aoqi@0: // down the stack (in JavaCalls::call_helper), and just resets aoqi@0: // to the saved values in that HandleMark. aoqi@0: aoqi@0: class HandleMarkCleaner: public StackObj { aoqi@0: private: aoqi@0: Thread* _thread; aoqi@0: public: aoqi@0: HandleMarkCleaner(Thread* thread) { aoqi@0: _thread = thread; aoqi@0: _thread->last_handle_mark()->push(); aoqi@0: } aoqi@0: ~HandleMarkCleaner() { aoqi@0: _thread->last_handle_mark()->pop_and_restore(); aoqi@0: } aoqi@0: aoqi@0: private: aoqi@0: inline void* operator new(size_t size, void* ptr) throw() { aoqi@0: return ptr; aoqi@0: } aoqi@0: }; aoqi@0: aoqi@0: // InterfaceSupport provides functionality used by the VM_LEAF_BASE and aoqi@0: // VM_ENTRY_BASE macros. These macros are used to guard entry points into aoqi@0: // the VM and perform checks upon leave of the VM. aoqi@0: aoqi@0: aoqi@0: class InterfaceSupport: AllStatic { aoqi@0: # ifdef ASSERT aoqi@0: public: aoqi@0: static long _scavenge_alot_counter; aoqi@0: static long _fullgc_alot_counter; aoqi@0: static long _number_of_calls; aoqi@0: static long _fullgc_alot_invocation; aoqi@0: aoqi@0: // tracing aoqi@0: static void trace(const char* result_type, const char* header); aoqi@0: aoqi@0: // Helper methods used to implement +ScavengeALot and +FullGCALot aoqi@0: static void check_gc_alot() { if (ScavengeALot || FullGCALot) gc_alot(); } aoqi@0: static void gc_alot(); aoqi@0: aoqi@0: static void walk_stack_from(vframe* start_vf); aoqi@0: static void walk_stack(); aoqi@0: aoqi@0: # ifdef ENABLE_ZAP_DEAD_LOCALS aoqi@0: static void zap_dead_locals_old(); aoqi@0: # endif aoqi@0: aoqi@0: static void zombieAll(); aoqi@0: static void unlinkSymbols(); aoqi@0: static void deoptimizeAll(); aoqi@0: static void stress_derived_pointers(); aoqi@0: static void verify_stack(); aoqi@0: static void verify_last_frame(); aoqi@0: # endif aoqi@0: aoqi@0: public: aoqi@0: // OS dependent stuff aoqi@0: #ifdef TARGET_OS_FAMILY_linux aoqi@0: # include "interfaceSupport_linux.hpp" aoqi@0: #endif aoqi@0: #ifdef TARGET_OS_FAMILY_solaris aoqi@0: # include "interfaceSupport_solaris.hpp" aoqi@0: #endif aoqi@0: #ifdef TARGET_OS_FAMILY_windows aoqi@0: # include "interfaceSupport_windows.hpp" aoqi@0: #endif aoqi@0: #ifdef TARGET_OS_FAMILY_aix aoqi@0: # include "interfaceSupport_aix.hpp" aoqi@0: #endif aoqi@0: #ifdef TARGET_OS_FAMILY_bsd aoqi@0: # include "interfaceSupport_bsd.hpp" aoqi@0: #endif aoqi@0: aoqi@0: }; aoqi@0: aoqi@0: aoqi@0: // Basic class for all thread transition classes. aoqi@0: aoqi@0: class ThreadStateTransition : public StackObj { aoqi@0: protected: aoqi@0: JavaThread* _thread; aoqi@0: public: aoqi@0: ThreadStateTransition(JavaThread *thread) { aoqi@0: _thread = thread; aoqi@0: assert(thread != NULL && thread->is_Java_thread(), "must be Java thread"); aoqi@0: } aoqi@0: aoqi@0: // Change threadstate in a manner, so safepoint can detect changes. aoqi@0: // Time-critical: called on exit from every runtime routine aoqi@0: static inline void transition(JavaThread *thread, JavaThreadState from, JavaThreadState to) { aoqi@0: assert(from != _thread_in_Java, "use transition_from_java"); aoqi@0: assert(from != _thread_in_native, "use transition_from_native"); aoqi@0: assert((from & 1) == 0 && (to & 1) == 0, "odd numbers are transitions states"); aoqi@0: assert(thread->thread_state() == from, "coming from wrong thread state"); aoqi@0: // Change to transition state (assumes total store ordering! -Urs) aoqi@0: thread->set_thread_state((JavaThreadState)(from + 1)); aoqi@0: aoqi@0: // Make sure new state is seen by VM thread aoqi@0: if (os::is_MP()) { aoqi@0: if (UseMembar) { aoqi@0: // Force a fence between the write above and read below aoqi@0: OrderAccess::fence(); aoqi@0: } else { aoqi@0: // store to serialize page so VM thread can do pseudo remote membar aoqi@0: os::write_memory_serialize_page(thread); aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: if (SafepointSynchronize::do_call_back()) { aoqi@0: SafepointSynchronize::block(thread); aoqi@0: } aoqi@0: thread->set_thread_state(to); aoqi@0: aoqi@0: CHECK_UNHANDLED_OOPS_ONLY(thread->clear_unhandled_oops();) aoqi@0: } aoqi@0: aoqi@0: // transition_and_fence must be used on any thread state transition aoqi@0: // where there might not be a Java call stub on the stack, in aoqi@0: // particular on Windows where the Structured Exception Handler is aoqi@0: // set up in the call stub. os::write_memory_serialize_page() can aoqi@0: // fault and we can't recover from it on Windows without a SEH in aoqi@0: // place. aoqi@0: static inline void transition_and_fence(JavaThread *thread, JavaThreadState from, JavaThreadState to) { aoqi@0: assert(thread->thread_state() == from, "coming from wrong thread state"); aoqi@0: assert((from & 1) == 0 && (to & 1) == 0, "odd numbers are transitions states"); aoqi@0: // Change to transition state (assumes total store ordering! -Urs) aoqi@0: thread->set_thread_state((JavaThreadState)(from + 1)); aoqi@0: aoqi@0: // Make sure new state is seen by VM thread aoqi@0: if (os::is_MP()) { aoqi@0: if (UseMembar) { aoqi@0: // Force a fence between the write above and read below aoqi@0: OrderAccess::fence(); aoqi@0: } else { aoqi@0: // Must use this rather than serialization page in particular on Windows aoqi@0: InterfaceSupport::serialize_memory(thread); aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: if (SafepointSynchronize::do_call_back()) { aoqi@0: SafepointSynchronize::block(thread); aoqi@0: } aoqi@0: thread->set_thread_state(to); aoqi@0: aoqi@0: CHECK_UNHANDLED_OOPS_ONLY(thread->clear_unhandled_oops();) aoqi@0: } aoqi@0: aoqi@0: // Same as above, but assumes from = _thread_in_Java. This is simpler, since we aoqi@0: // never block on entry to the VM. This will break the code, since e.g. preserve arguments aoqi@0: // have not been setup. aoqi@0: static inline void transition_from_java(JavaThread *thread, JavaThreadState to) { aoqi@0: assert(thread->thread_state() == _thread_in_Java, "coming from wrong thread state"); aoqi@0: thread->set_thread_state(to); aoqi@0: } aoqi@0: aoqi@0: static inline void transition_from_native(JavaThread *thread, JavaThreadState to) { aoqi@0: assert((to & 1) == 0, "odd numbers are transitions states"); aoqi@0: assert(thread->thread_state() == _thread_in_native, "coming from wrong thread state"); aoqi@0: // Change to transition state (assumes total store ordering! -Urs) aoqi@0: thread->set_thread_state(_thread_in_native_trans); aoqi@0: aoqi@0: // Make sure new state is seen by GC thread aoqi@0: if (os::is_MP()) { aoqi@0: if (UseMembar) { aoqi@0: // Force a fence between the write above and read below aoqi@0: OrderAccess::fence(); aoqi@0: } else { aoqi@0: // Must use this rather than serialization page in particular on Windows aoqi@0: InterfaceSupport::serialize_memory(thread); aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: // We never install asynchronous exceptions when coming (back) in aoqi@0: // to the runtime from native code because the runtime is not set aoqi@0: // up to handle exceptions floating around at arbitrary points. aoqi@0: if (SafepointSynchronize::do_call_back() || thread->is_suspend_after_native()) { aoqi@0: JavaThread::check_safepoint_and_suspend_for_native_trans(thread); aoqi@0: aoqi@0: // Clear unhandled oops anywhere where we could block, even if we don't. aoqi@0: CHECK_UNHANDLED_OOPS_ONLY(thread->clear_unhandled_oops();) aoqi@0: } aoqi@0: aoqi@0: thread->set_thread_state(to); aoqi@0: } aoqi@0: protected: aoqi@0: void trans(JavaThreadState from, JavaThreadState to) { transition(_thread, from, to); } aoqi@0: void trans_from_java(JavaThreadState to) { transition_from_java(_thread, to); } aoqi@0: void trans_from_native(JavaThreadState to) { transition_from_native(_thread, to); } aoqi@0: void trans_and_fence(JavaThreadState from, JavaThreadState to) { transition_and_fence(_thread, from, to); } aoqi@0: }; aoqi@0: aoqi@0: aoqi@0: class ThreadInVMfromJava : public ThreadStateTransition { aoqi@0: public: aoqi@0: ThreadInVMfromJava(JavaThread* thread) : ThreadStateTransition(thread) { aoqi@0: trans_from_java(_thread_in_vm); aoqi@0: } aoqi@0: ~ThreadInVMfromJava() { aoqi@0: trans(_thread_in_vm, _thread_in_Java); aoqi@0: // Check for pending. async. exceptions or suspends. aoqi@0: if (_thread->has_special_runtime_exit_condition()) _thread->handle_special_runtime_exit_condition(); aoqi@0: } aoqi@0: }; aoqi@0: aoqi@0: aoqi@0: class ThreadInVMfromUnknown { aoqi@0: private: aoqi@0: JavaThread* _thread; aoqi@0: public: aoqi@0: ThreadInVMfromUnknown() : _thread(NULL) { aoqi@0: Thread* t = Thread::current(); aoqi@0: if (t->is_Java_thread()) { aoqi@0: JavaThread* t2 = (JavaThread*) t; aoqi@0: if (t2->thread_state() == _thread_in_native) { aoqi@0: _thread = t2; aoqi@0: ThreadStateTransition::transition_from_native(t2, _thread_in_vm); aoqi@0: // Used to have a HandleMarkCleaner but that is dangerous as aoqi@0: // it could free a handle in our (indirect, nested) caller. aoqi@0: // We expect any handles will be short lived and figure we aoqi@0: // don't need an actual HandleMark. aoqi@0: } aoqi@0: } aoqi@0: } aoqi@0: ~ThreadInVMfromUnknown() { aoqi@0: if (_thread) { aoqi@0: ThreadStateTransition::transition_and_fence(_thread, _thread_in_vm, _thread_in_native); aoqi@0: } aoqi@0: } aoqi@0: }; aoqi@0: aoqi@0: aoqi@0: class ThreadInVMfromNative : public ThreadStateTransition { aoqi@0: public: aoqi@0: ThreadInVMfromNative(JavaThread* thread) : ThreadStateTransition(thread) { aoqi@0: trans_from_native(_thread_in_vm); aoqi@0: } aoqi@0: ~ThreadInVMfromNative() { aoqi@0: trans_and_fence(_thread_in_vm, _thread_in_native); aoqi@0: } aoqi@0: }; aoqi@0: aoqi@0: aoqi@0: class ThreadToNativeFromVM : public ThreadStateTransition { aoqi@0: public: aoqi@0: ThreadToNativeFromVM(JavaThread *thread) : ThreadStateTransition(thread) { aoqi@0: // We are leaving the VM at this point and going directly to native code. aoqi@0: // Block, if we are in the middle of a safepoint synchronization. aoqi@0: assert(!thread->owns_locks(), "must release all locks when leaving VM"); aoqi@0: thread->frame_anchor()->make_walkable(thread); aoqi@0: trans_and_fence(_thread_in_vm, _thread_in_native); aoqi@0: // Check for pending. async. exceptions or suspends. aoqi@0: if (_thread->has_special_runtime_exit_condition()) _thread->handle_special_runtime_exit_condition(false); aoqi@0: } aoqi@0: aoqi@0: ~ThreadToNativeFromVM() { aoqi@0: trans_from_native(_thread_in_vm); aoqi@0: // We don't need to clear_walkable because it will happen automagically when we return to java aoqi@0: } aoqi@0: }; aoqi@0: aoqi@0: aoqi@0: class ThreadBlockInVM : public ThreadStateTransition { aoqi@0: public: aoqi@0: ThreadBlockInVM(JavaThread *thread) aoqi@0: : ThreadStateTransition(thread) { aoqi@0: // Once we are blocked vm expects stack to be walkable aoqi@0: thread->frame_anchor()->make_walkable(thread); aoqi@0: trans_and_fence(_thread_in_vm, _thread_blocked); aoqi@0: } aoqi@0: ~ThreadBlockInVM() { aoqi@0: trans_and_fence(_thread_blocked, _thread_in_vm); aoqi@0: // We don't need to clear_walkable because it will happen automagically when we return to java aoqi@0: } aoqi@0: }; aoqi@0: aoqi@0: aoqi@0: // This special transition class is only used to prevent asynchronous exceptions aoqi@0: // from being installed on vm exit in situations where we can't tolerate them. aoqi@0: // See bugs: 4324348, 4854693, 4998314, 5040492, 5050705. aoqi@0: class ThreadInVMfromJavaNoAsyncException : public ThreadStateTransition { aoqi@0: public: aoqi@0: ThreadInVMfromJavaNoAsyncException(JavaThread* thread) : ThreadStateTransition(thread) { aoqi@0: trans_from_java(_thread_in_vm); aoqi@0: } aoqi@0: ~ThreadInVMfromJavaNoAsyncException() { aoqi@0: trans(_thread_in_vm, _thread_in_Java); aoqi@0: // NOTE: We do not check for pending. async. exceptions. aoqi@0: // If we did and moved the pending async exception over into the aoqi@0: // pending exception field, we would need to deopt (currently C2 aoqi@0: // only). However, to do so would require that we transition back aoqi@0: // to the _thread_in_vm state. Instead we postpone the handling of aoqi@0: // the async exception. aoqi@0: aoqi@0: // Check for pending. suspends only. aoqi@0: if (_thread->has_special_runtime_exit_condition()) aoqi@0: _thread->handle_special_runtime_exit_condition(false); aoqi@0: } aoqi@0: }; aoqi@0: aoqi@0: // Debug class instantiated in JRT_ENTRY and ITR_ENTRY macro. aoqi@0: // Can be used to verify properties on enter/exit of the VM. aoqi@0: aoqi@0: #ifdef ASSERT aoqi@0: class VMEntryWrapper { aoqi@0: public: aoqi@0: VMEntryWrapper() { aoqi@0: if (VerifyLastFrame) { aoqi@0: InterfaceSupport::verify_last_frame(); aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: ~VMEntryWrapper() { aoqi@0: InterfaceSupport::check_gc_alot(); aoqi@0: if (WalkStackALot) { aoqi@0: InterfaceSupport::walk_stack(); aoqi@0: } aoqi@0: #ifdef ENABLE_ZAP_DEAD_LOCALS aoqi@0: if (ZapDeadLocalsOld) { aoqi@0: InterfaceSupport::zap_dead_locals_old(); aoqi@0: } aoqi@0: #endif aoqi@0: #ifdef COMPILER2 aoqi@0: // This option is not used by Compiler 1 aoqi@0: if (StressDerivedPointers) { aoqi@0: InterfaceSupport::stress_derived_pointers(); aoqi@0: } aoqi@0: #endif aoqi@0: if (DeoptimizeALot || DeoptimizeRandom) { aoqi@0: InterfaceSupport::deoptimizeAll(); aoqi@0: } aoqi@0: if (ZombieALot) { aoqi@0: InterfaceSupport::zombieAll(); aoqi@0: } aoqi@0: if (UnlinkSymbolsALot) { aoqi@0: InterfaceSupport::unlinkSymbols(); aoqi@0: } aoqi@0: // do verification AFTER potential deoptimization aoqi@0: if (VerifyStack) { aoqi@0: InterfaceSupport::verify_stack(); aoqi@0: } aoqi@0: aoqi@0: } aoqi@0: }; aoqi@0: aoqi@0: aoqi@0: class VMNativeEntryWrapper { aoqi@0: public: aoqi@0: VMNativeEntryWrapper() { aoqi@0: if (GCALotAtAllSafepoints) InterfaceSupport::check_gc_alot(); aoqi@0: } aoqi@0: aoqi@0: ~VMNativeEntryWrapper() { aoqi@0: if (GCALotAtAllSafepoints) InterfaceSupport::check_gc_alot(); aoqi@0: } aoqi@0: }; aoqi@0: aoqi@0: #endif aoqi@0: aoqi@0: aoqi@0: // VM-internal runtime interface support aoqi@0: aoqi@0: #ifdef ASSERT aoqi@0: aoqi@0: class RuntimeHistogramElement : public HistogramElement { aoqi@0: public: aoqi@0: RuntimeHistogramElement(const char* name); aoqi@0: }; aoqi@0: aoqi@0: #define TRACE_CALL(result_type, header) \ aoqi@0: InterfaceSupport::_number_of_calls++; \ aoqi@0: if (TraceRuntimeCalls) \ aoqi@0: InterfaceSupport::trace(#result_type, #header); \ aoqi@0: if (CountRuntimeCalls) { \ aoqi@0: static RuntimeHistogramElement* e = new RuntimeHistogramElement(#header); \ aoqi@0: if (e != NULL) e->increment_count(); \ aoqi@0: } aoqi@0: #else aoqi@0: #define TRACE_CALL(result_type, header) \ aoqi@0: /* do nothing */ aoqi@0: #endif aoqi@0: aoqi@0: aoqi@0: // LEAF routines do not lock, GC or throw exceptions aoqi@0: aoqi@0: #define VM_LEAF_BASE(result_type, header) \ aoqi@0: TRACE_CALL(result_type, header) \ aoqi@0: debug_only(NoHandleMark __hm;) \ aoqi@0: os::verify_stack_alignment(); \ aoqi@0: /* begin of body */ aoqi@0: dbuck@9562: #define VM_ENTRY_BASE_FROM_LEAF(result_type, header, thread) \ dbuck@9562: TRACE_CALL(result_type, header) \ dbuck@9562: debug_only(ResetNoHandleMark __rnhm;) \ dbuck@9562: HandleMarkCleaner __hm(thread); \ dbuck@9562: Thread* THREAD = thread; \ dbuck@9562: os::verify_stack_alignment(); \ dbuck@9562: /* begin of body */ dbuck@9562: aoqi@0: aoqi@0: // ENTRY routines may lock, GC and throw exceptions aoqi@0: aoqi@0: #define VM_ENTRY_BASE(result_type, header, thread) \ aoqi@0: TRACE_CALL(result_type, header) \ aoqi@0: HandleMarkCleaner __hm(thread); \ aoqi@0: Thread* THREAD = thread; \ aoqi@0: os::verify_stack_alignment(); \ aoqi@0: /* begin of body */ aoqi@0: aoqi@0: aoqi@0: // QUICK_ENTRY routines behave like ENTRY but without a handle mark aoqi@0: aoqi@0: #define VM_QUICK_ENTRY_BASE(result_type, header, thread) \ aoqi@0: TRACE_CALL(result_type, header) \ aoqi@0: debug_only(NoHandleMark __hm;) \ aoqi@0: Thread* THREAD = thread; \ aoqi@0: os::verify_stack_alignment(); \ aoqi@0: /* begin of body */ aoqi@0: aoqi@0: aoqi@0: // Definitions for IRT (Interpreter Runtime) aoqi@0: // (thread is an argument passed in to all these routines) aoqi@0: aoqi@0: #define IRT_ENTRY(result_type, header) \ aoqi@0: result_type header { \ aoqi@0: ThreadInVMfromJava __tiv(thread); \ aoqi@0: VM_ENTRY_BASE(result_type, header, thread) \ aoqi@0: debug_only(VMEntryWrapper __vew;) aoqi@0: aoqi@0: aoqi@0: #define IRT_LEAF(result_type, header) \ aoqi@0: result_type header { \ aoqi@0: VM_LEAF_BASE(result_type, header) \ aoqi@0: debug_only(No_Safepoint_Verifier __nspv(true);) aoqi@0: aoqi@0: aoqi@0: #define IRT_ENTRY_NO_ASYNC(result_type, header) \ aoqi@0: result_type header { \ aoqi@0: ThreadInVMfromJavaNoAsyncException __tiv(thread); \ aoqi@0: VM_ENTRY_BASE(result_type, header, thread) \ aoqi@0: debug_only(VMEntryWrapper __vew;) aoqi@0: aoqi@0: #define IRT_END } aoqi@0: aoqi@0: aoqi@0: // Definitions for JRT (Java (Compiler/Shared) Runtime) aoqi@0: aoqi@0: #define JRT_ENTRY(result_type, header) \ aoqi@0: result_type header { \ aoqi@0: ThreadInVMfromJava __tiv(thread); \ aoqi@0: VM_ENTRY_BASE(result_type, header, thread) \ aoqi@0: debug_only(VMEntryWrapper __vew;) aoqi@0: aoqi@0: aoqi@0: #define JRT_LEAF(result_type, header) \ aoqi@0: result_type header { \ aoqi@0: VM_LEAF_BASE(result_type, header) \ aoqi@0: debug_only(JRT_Leaf_Verifier __jlv;) aoqi@0: aoqi@0: aoqi@0: #define JRT_ENTRY_NO_ASYNC(result_type, header) \ aoqi@0: result_type header { \ aoqi@0: ThreadInVMfromJavaNoAsyncException __tiv(thread); \ aoqi@0: VM_ENTRY_BASE(result_type, header, thread) \ aoqi@0: debug_only(VMEntryWrapper __vew;) aoqi@0: aoqi@0: // Same as JRT Entry but allows for return value after the safepoint aoqi@0: // to get back into Java from the VM aoqi@0: #define JRT_BLOCK_ENTRY(result_type, header) \ aoqi@0: result_type header { \ aoqi@0: TRACE_CALL(result_type, header) \ aoqi@0: HandleMarkCleaner __hm(thread); aoqi@0: aoqi@0: #define JRT_BLOCK \ aoqi@0: { \ aoqi@0: ThreadInVMfromJava __tiv(thread); \ aoqi@0: Thread* THREAD = thread; \ aoqi@0: debug_only(VMEntryWrapper __vew;) aoqi@0: aoqi@0: #define JRT_BLOCK_END } aoqi@0: aoqi@0: #define JRT_END } aoqi@0: aoqi@0: // Definitions for JNI aoqi@0: aoqi@0: #define JNI_ENTRY(result_type, header) \ aoqi@0: JNI_ENTRY_NO_PRESERVE(result_type, header) \ aoqi@0: WeakPreserveExceptionMark __wem(thread); aoqi@0: aoqi@0: #define JNI_ENTRY_NO_PRESERVE(result_type, header) \ aoqi@0: extern "C" { \ aoqi@0: result_type JNICALL header { \ aoqi@0: JavaThread* thread=JavaThread::thread_from_jni_environment(env); \ aoqi@0: assert( !VerifyJNIEnvThread || (thread == Thread::current()), "JNIEnv is only valid in same thread"); \ aoqi@0: ThreadInVMfromNative __tiv(thread); \ aoqi@0: debug_only(VMNativeEntryWrapper __vew;) \ aoqi@0: VM_ENTRY_BASE(result_type, header, thread) aoqi@0: aoqi@0: aoqi@0: // Ensure that the VMNativeEntryWrapper constructor, which can cause aoqi@0: // a GC, is called outside the NoHandleMark (set via VM_QUICK_ENTRY_BASE). aoqi@0: #define JNI_QUICK_ENTRY(result_type, header) \ aoqi@0: extern "C" { \ aoqi@0: result_type JNICALL header { \ aoqi@0: JavaThread* thread=JavaThread::thread_from_jni_environment(env); \ aoqi@0: assert( !VerifyJNIEnvThread || (thread == Thread::current()), "JNIEnv is only valid in same thread"); \ aoqi@0: ThreadInVMfromNative __tiv(thread); \ aoqi@0: debug_only(VMNativeEntryWrapper __vew;) \ aoqi@0: VM_QUICK_ENTRY_BASE(result_type, header, thread) aoqi@0: aoqi@0: aoqi@0: #define JNI_LEAF(result_type, header) \ aoqi@0: extern "C" { \ aoqi@0: result_type JNICALL header { \ aoqi@0: JavaThread* thread=JavaThread::thread_from_jni_environment(env); \ aoqi@0: assert( !VerifyJNIEnvThread || (thread == Thread::current()), "JNIEnv is only valid in same thread"); \ aoqi@0: VM_LEAF_BASE(result_type, header) aoqi@0: aoqi@0: aoqi@0: // Close the routine and the extern "C" aoqi@0: #define JNI_END } } aoqi@0: aoqi@0: aoqi@0: aoqi@0: // Definitions for JVM aoqi@0: aoqi@0: #define JVM_ENTRY(result_type, header) \ aoqi@0: extern "C" { \ aoqi@0: result_type JNICALL header { \ aoqi@0: JavaThread* thread=JavaThread::thread_from_jni_environment(env); \ aoqi@0: ThreadInVMfromNative __tiv(thread); \ aoqi@0: debug_only(VMNativeEntryWrapper __vew;) \ aoqi@0: VM_ENTRY_BASE(result_type, header, thread) aoqi@0: aoqi@0: aoqi@0: #define JVM_ENTRY_NO_ENV(result_type, header) \ aoqi@0: extern "C" { \ aoqi@0: result_type JNICALL header { \ aoqi@0: JavaThread* thread = (JavaThread*)ThreadLocalStorage::thread(); \ aoqi@0: ThreadInVMfromNative __tiv(thread); \ aoqi@0: debug_only(VMNativeEntryWrapper __vew;) \ aoqi@0: VM_ENTRY_BASE(result_type, header, thread) aoqi@0: aoqi@0: aoqi@0: #define JVM_QUICK_ENTRY(result_type, header) \ aoqi@0: extern "C" { \ aoqi@0: result_type JNICALL header { \ aoqi@0: JavaThread* thread=JavaThread::thread_from_jni_environment(env); \ aoqi@0: ThreadInVMfromNative __tiv(thread); \ aoqi@0: debug_only(VMNativeEntryWrapper __vew;) \ aoqi@0: VM_QUICK_ENTRY_BASE(result_type, header, thread) aoqi@0: aoqi@0: aoqi@0: #define JVM_LEAF(result_type, header) \ aoqi@0: extern "C" { \ aoqi@0: result_type JNICALL header { \ aoqi@0: VM_Exit::block_if_vm_exited(); \ aoqi@0: VM_LEAF_BASE(result_type, header) aoqi@0: aoqi@0: dbuck@9562: #define JVM_ENTRY_FROM_LEAF(env, result_type, header) \ dbuck@9562: { { \ dbuck@9562: JavaThread* thread=JavaThread::thread_from_jni_environment(env); \ dbuck@9562: ThreadInVMfromNative __tiv(thread); \ dbuck@9562: debug_only(VMNativeEntryWrapper __vew;) \ dbuck@9562: VM_ENTRY_BASE_FROM_LEAF(result_type, header, thread) dbuck@9562: dbuck@9562: aoqi@0: #define JVM_END } } aoqi@0: aoqi@0: #endif // SHARE_VM_RUNTIME_INTERFACESUPPORT_HPP