src/share/vm/runtime/interfaceSupport.hpp

Tue, 05 Apr 2011 14:12:31 -0700

author
trims
date
Tue, 05 Apr 2011 14:12:31 -0700
changeset 2708
1d1603768966
parent 2497
3582bf76420e
child 3156
f08d439fab8c
permissions
-rw-r--r--

7010070: Update all 2010 Oracle-changed OpenJDK files to have the proper copyright dates - second pass
Summary: Update the copyright to be 2010 on all changed files in OpenJDK
Reviewed-by: ohair

     1 /*
     2  * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #ifndef SHARE_VM_RUNTIME_INTERFACESUPPORT_HPP
    26 #define SHARE_VM_RUNTIME_INTERFACESUPPORT_HPP
    28 #include "memory/gcLocker.hpp"
    29 #include "runtime/handles.inline.hpp"
    30 #include "runtime/mutexLocker.hpp"
    31 #include "runtime/orderAccess.hpp"
    32 #include "runtime/os.hpp"
    33 #include "runtime/safepoint.hpp"
    34 #include "runtime/vmThread.hpp"
    35 #include "utilities/globalDefinitions.hpp"
    36 #include "utilities/preserveException.hpp"
    37 #include "utilities/top.hpp"
    38 #ifdef TARGET_OS_FAMILY_linux
    39 # include "thread_linux.inline.hpp"
    40 #endif
    41 #ifdef TARGET_OS_FAMILY_solaris
    42 # include "thread_solaris.inline.hpp"
    43 #endif
    44 #ifdef TARGET_OS_FAMILY_windows
    45 # include "thread_windows.inline.hpp"
    46 #endif
    48 // Wrapper for all entry points to the virtual machine.
    49 // The HandleMarkCleaner is a faster version of HandleMark.
    50 // It relies on the fact that there is a HandleMark further
    51 // down the stack (in JavaCalls::call_helper), and just resets
    52 // to the saved values in that HandleMark.
    54 class HandleMarkCleaner: public StackObj {
    55  private:
    56   Thread* _thread;
    57  public:
    58   HandleMarkCleaner(Thread* thread) {
    59     _thread = thread;
    60     _thread->last_handle_mark()->push();
    61   }
    62   ~HandleMarkCleaner() {
    63     _thread->last_handle_mark()->pop_and_restore();
    64   }
    66  private:
    67   inline void* operator new(size_t size, void* ptr) {
    68     return ptr;
    69   }
    70 };
    72 // InterfaceSupport provides functionality used by the __LEAF and __ENTRY
    73 // macros. These macros are used to guard entry points into the VM and
    74 // perform checks upon leave of the VM.
    77 class InterfaceSupport: AllStatic {
    78 # ifdef ASSERT
    79  public:
    80   static long _scavenge_alot_counter;
    81   static long _fullgc_alot_counter;
    82   static long _number_of_calls;
    83   static long _fullgc_alot_invocation;
    85   // tracing
    86   static void trace(const char* result_type, const char* header);
    88   // Helper methods used to implement +ScavengeALot and +FullGCALot
    89   static void check_gc_alot() { if (ScavengeALot || FullGCALot) gc_alot(); }
    90   static void gc_alot();
    92   static void walk_stack_from(vframe* start_vf);
    93   static void walk_stack();
    95 # ifdef ENABLE_ZAP_DEAD_LOCALS
    96   static void zap_dead_locals_old();
    97 # endif
    99   static void zombieAll();
   100   static void unlinkSymbols();
   101   static void deoptimizeAll();
   102   static void stress_derived_pointers();
   103   static void verify_stack();
   104   static void verify_last_frame();
   105 # endif
   107  public:
   108   // OS dependent stuff
   109 #ifdef TARGET_OS_FAMILY_linux
   110 # include "interfaceSupport_linux.hpp"
   111 #endif
   112 #ifdef TARGET_OS_FAMILY_solaris
   113 # include "interfaceSupport_solaris.hpp"
   114 #endif
   115 #ifdef TARGET_OS_FAMILY_windows
   116 # include "interfaceSupport_windows.hpp"
   117 #endif
   119 };
   122 // Basic class for all thread transition classes.
   124 class ThreadStateTransition : public StackObj {
   125  protected:
   126   JavaThread* _thread;
   127  public:
   128   ThreadStateTransition(JavaThread *thread) {
   129     _thread = thread;
   130     assert(thread != NULL && thread->is_Java_thread(), "must be Java thread");
   131   }
   133   // Change threadstate in a manner, so safepoint can detect changes.
   134   // Time-critical: called on exit from every runtime routine
   135   static inline void transition(JavaThread *thread, JavaThreadState from, JavaThreadState to) {
   136     assert(from != _thread_in_Java, "use transition_from_java");
   137     assert(from != _thread_in_native, "use transition_from_native");
   138     assert((from & 1) == 0 && (to & 1) == 0, "odd numbers are transitions states");
   139     assert(thread->thread_state() == from, "coming from wrong thread state");
   140     // Change to transition state (assumes total store ordering!  -Urs)
   141     thread->set_thread_state((JavaThreadState)(from + 1));
   143     // Make sure new state is seen by VM thread
   144     if (os::is_MP()) {
   145       if (UseMembar) {
   146         // Force a fence between the write above and read below
   147         OrderAccess::fence();
   148       } else {
   149         // store to serialize page so VM thread can do pseudo remote membar
   150         os::write_memory_serialize_page(thread);
   151       }
   152     }
   154     if (SafepointSynchronize::do_call_back()) {
   155       SafepointSynchronize::block(thread);
   156     }
   157     thread->set_thread_state(to);
   159     CHECK_UNHANDLED_OOPS_ONLY(thread->clear_unhandled_oops();)
   160   }
   162   // transition_and_fence must be used on any thread state transition
   163   // where there might not be a Java call stub on the stack, in
   164   // particular on Windows where the Structured Exception Handler is
   165   // set up in the call stub. os::write_memory_serialize_page() can
   166   // fault and we can't recover from it on Windows without a SEH in
   167   // place.
   168   static inline void transition_and_fence(JavaThread *thread, JavaThreadState from, JavaThreadState to) {
   169     assert(thread->thread_state() == from, "coming from wrong thread state");
   170     assert((from & 1) == 0 && (to & 1) == 0, "odd numbers are transitions states");
   171     // Change to transition state (assumes total store ordering!  -Urs)
   172     thread->set_thread_state((JavaThreadState)(from + 1));
   174     // Make sure new state is seen by VM thread
   175     if (os::is_MP()) {
   176       if (UseMembar) {
   177         // Force a fence between the write above and read below
   178         OrderAccess::fence();
   179       } else {
   180         // Must use this rather than serialization page in particular on Windows
   181         InterfaceSupport::serialize_memory(thread);
   182       }
   183     }
   185     if (SafepointSynchronize::do_call_back()) {
   186       SafepointSynchronize::block(thread);
   187     }
   188     thread->set_thread_state(to);
   190     CHECK_UNHANDLED_OOPS_ONLY(thread->clear_unhandled_oops();)
   191   }
   193   // Same as above, but assumes from = _thread_in_Java. This is simpler, since we
   194   // never block on entry to the VM. This will break the code, since e.g. preserve arguments
   195   // have not been setup.
   196   static inline void transition_from_java(JavaThread *thread, JavaThreadState to) {
   197     assert(thread->thread_state() == _thread_in_Java, "coming from wrong thread state");
   198     thread->set_thread_state(to);
   199   }
   201   static inline void transition_from_native(JavaThread *thread, JavaThreadState to) {
   202     assert((to & 1) == 0, "odd numbers are transitions states");
   203     assert(thread->thread_state() == _thread_in_native, "coming from wrong thread state");
   204     // Change to transition state (assumes total store ordering!  -Urs)
   205     thread->set_thread_state(_thread_in_native_trans);
   207     // Make sure new state is seen by GC thread
   208     if (os::is_MP()) {
   209       if (UseMembar) {
   210         // Force a fence between the write above and read below
   211         OrderAccess::fence();
   212       } else {
   213         // Must use this rather than serialization page in particular on Windows
   214         InterfaceSupport::serialize_memory(thread);
   215       }
   216     }
   218     // We never install asynchronous exceptions when coming (back) in
   219     // to the runtime from native code because the runtime is not set
   220     // up to handle exceptions floating around at arbitrary points.
   221     if (SafepointSynchronize::do_call_back() || thread->is_suspend_after_native()) {
   222       JavaThread::check_safepoint_and_suspend_for_native_trans(thread);
   224       // Clear unhandled oops anywhere where we could block, even if we don't.
   225       CHECK_UNHANDLED_OOPS_ONLY(thread->clear_unhandled_oops();)
   226     }
   228     thread->set_thread_state(to);
   229   }
   230  protected:
   231    void trans(JavaThreadState from, JavaThreadState to)  { transition(_thread, from, to); }
   232    void trans_from_java(JavaThreadState to)              { transition_from_java(_thread, to); }
   233    void trans_from_native(JavaThreadState to)            { transition_from_native(_thread, to); }
   234    void trans_and_fence(JavaThreadState from, JavaThreadState to) { transition_and_fence(_thread, from, to); }
   235 };
   238 class ThreadInVMfromJava : public ThreadStateTransition {
   239  public:
   240   ThreadInVMfromJava(JavaThread* thread) : ThreadStateTransition(thread) {
   241     trans_from_java(_thread_in_vm);
   242   }
   243   ~ThreadInVMfromJava()  {
   244     trans(_thread_in_vm, _thread_in_Java);
   245     // Check for pending. async. exceptions or suspends.
   246     if (_thread->has_special_runtime_exit_condition()) _thread->handle_special_runtime_exit_condition();
   247   }
   248 };
   251 class ThreadInVMfromUnknown {
   252  private:
   253   JavaThread* _thread;
   254  public:
   255   ThreadInVMfromUnknown() : _thread(NULL) {
   256     Thread* t = Thread::current();
   257     if (t->is_Java_thread()) {
   258       JavaThread* t2 = (JavaThread*) t;
   259       if (t2->thread_state() == _thread_in_native) {
   260         _thread = t2;
   261         ThreadStateTransition::transition_from_native(t2, _thread_in_vm);
   262         // Used to have a HandleMarkCleaner but that is dangerous as
   263         // it could free a handle in our (indirect, nested) caller.
   264         // We expect any handles will be short lived and figure we
   265         // don't need an actual HandleMark.
   266       }
   267     }
   268   }
   269   ~ThreadInVMfromUnknown()  {
   270     if (_thread) {
   271       ThreadStateTransition::transition_and_fence(_thread, _thread_in_vm, _thread_in_native);
   272     }
   273   }
   274 };
   277 class ThreadInVMfromNative : public ThreadStateTransition {
   278  public:
   279   ThreadInVMfromNative(JavaThread* thread) : ThreadStateTransition(thread) {
   280     trans_from_native(_thread_in_vm);
   281   }
   282   ~ThreadInVMfromNative() {
   283     trans_and_fence(_thread_in_vm, _thread_in_native);
   284   }
   285 };
   288 class ThreadToNativeFromVM : public ThreadStateTransition {
   289  public:
   290   ThreadToNativeFromVM(JavaThread *thread) : ThreadStateTransition(thread) {
   291     // We are leaving the VM at this point and going directly to native code.
   292     // Block, if we are in the middle of a safepoint synchronization.
   293     assert(!thread->owns_locks(), "must release all locks when leaving VM");
   294     thread->frame_anchor()->make_walkable(thread);
   295     trans_and_fence(_thread_in_vm, _thread_in_native);
   296     // Check for pending. async. exceptions or suspends.
   297     if (_thread->has_special_runtime_exit_condition()) _thread->handle_special_runtime_exit_condition(false);
   298   }
   300   ~ThreadToNativeFromVM() {
   301     trans_from_native(_thread_in_vm);
   302     // We don't need to clear_walkable because it will happen automagically when we return to java
   303   }
   304 };
   307 class ThreadBlockInVM : public ThreadStateTransition {
   308  public:
   309   ThreadBlockInVM(JavaThread *thread)
   310   : ThreadStateTransition(thread) {
   311     // Once we are blocked vm expects stack to be walkable
   312     thread->frame_anchor()->make_walkable(thread);
   313     trans_and_fence(_thread_in_vm, _thread_blocked);
   314   }
   315   ~ThreadBlockInVM() {
   316     trans_and_fence(_thread_blocked, _thread_in_vm);
   317     // We don't need to clear_walkable because it will happen automagically when we return to java
   318   }
   319 };
   322 // This special transition class is only used to prevent asynchronous exceptions
   323 // from being installed on vm exit in situations where we can't tolerate them.
   324 // See bugs: 4324348, 4854693, 4998314, 5040492, 5050705.
   325 class ThreadInVMfromJavaNoAsyncException : public ThreadStateTransition {
   326  public:
   327   ThreadInVMfromJavaNoAsyncException(JavaThread* thread) : ThreadStateTransition(thread) {
   328     trans_from_java(_thread_in_vm);
   329   }
   330   ~ThreadInVMfromJavaNoAsyncException()  {
   331     trans(_thread_in_vm, _thread_in_Java);
   332     // NOTE: We do not check for pending. async. exceptions.
   333     // If we did and moved the pending async exception over into the
   334     // pending exception field, we would need to deopt (currently C2
   335     // only). However, to do so would require that we transition back
   336     // to the _thread_in_vm state. Instead we postpone the handling of
   337     // the async exception.
   339     // Check for pending. suspends only.
   340     if (_thread->has_special_runtime_exit_condition())
   341       _thread->handle_special_runtime_exit_condition(false);
   342   }
   343 };
   345 // Debug class instantiated in JRT_ENTRY and ITR_ENTRY macro.
   346 // Can be used to verify properties on enter/exit of the VM.
   348 #ifdef ASSERT
   349 class VMEntryWrapper {
   350  public:
   351   VMEntryWrapper() {
   352     if (VerifyLastFrame) {
   353       InterfaceSupport::verify_last_frame();
   354     }
   355   }
   357   ~VMEntryWrapper() {
   358     InterfaceSupport::check_gc_alot();
   359     if (WalkStackALot) {
   360       InterfaceSupport::walk_stack();
   361     }
   362 #ifdef ENABLE_ZAP_DEAD_LOCALS
   363     if (ZapDeadLocalsOld) {
   364       InterfaceSupport::zap_dead_locals_old();
   365     }
   366 #endif
   367 #ifdef COMPILER2
   368     // This option is not used by Compiler 1
   369     if (StressDerivedPointers) {
   370       InterfaceSupport::stress_derived_pointers();
   371     }
   372 #endif
   373     if (DeoptimizeALot || DeoptimizeRandom) {
   374       InterfaceSupport::deoptimizeAll();
   375     }
   376     if (ZombieALot) {
   377       InterfaceSupport::zombieAll();
   378     }
   379     if (UnlinkSymbolsALot) {
   380       InterfaceSupport::unlinkSymbols();
   381     }
   382     // do verification AFTER potential deoptimization
   383     if (VerifyStack) {
   384       InterfaceSupport::verify_stack();
   385     }
   387   }
   388 };
   391 class VMNativeEntryWrapper {
   392  public:
   393   VMNativeEntryWrapper() {
   394     if (GCALotAtAllSafepoints) InterfaceSupport::check_gc_alot();
   395   }
   397   ~VMNativeEntryWrapper() {
   398     if (GCALotAtAllSafepoints) InterfaceSupport::check_gc_alot();
   399   }
   400 };
   402 #endif
   405 // VM-internal runtime interface support
   407 #ifdef ASSERT
   409 class RuntimeHistogramElement : public HistogramElement {
   410   public:
   411    RuntimeHistogramElement(const char* name);
   412 };
   414 #define TRACE_CALL(result_type, header)                            \
   415   InterfaceSupport::_number_of_calls++;                            \
   416   if (TraceRuntimeCalls)                                           \
   417     InterfaceSupport::trace(#result_type, #header);                \
   418   if (CountRuntimeCalls) {                                         \
   419     static RuntimeHistogramElement* e = new RuntimeHistogramElement(#header); \
   420     if (e != NULL) e->increment_count();                           \
   421   }
   422 #else
   423 #define TRACE_CALL(result_type, header)                            \
   424   /* do nothing */
   425 #endif
   428 // LEAF routines do not lock, GC or throw exceptions
   430 #define __LEAF(result_type, header)                                  \
   431   TRACE_CALL(result_type, header)                                    \
   432   debug_only(NoHandleMark __hm;)                                     \
   433   /* begin of body */
   436 // ENTRY routines may lock, GC and throw exceptions
   438 #define __ENTRY(result_type, header, thread)                         \
   439   TRACE_CALL(result_type, header)                                    \
   440   HandleMarkCleaner __hm(thread);                                    \
   441   Thread* THREAD = thread;                                           \
   442   /* begin of body */
   445 // QUICK_ENTRY routines behave like ENTRY but without a handle mark
   447 #define __QUICK_ENTRY(result_type, header, thread)                   \
   448   TRACE_CALL(result_type, header)                                    \
   449   debug_only(NoHandleMark __hm;)                                     \
   450   Thread* THREAD = thread;                                           \
   451   /* begin of body */
   454 // Definitions for IRT (Interpreter Runtime)
   455 // (thread is an argument passed in to all these routines)
   457 #define IRT_ENTRY(result_type, header)                               \
   458   result_type header {                                               \
   459     ThreadInVMfromJava __tiv(thread);                                \
   460     __ENTRY(result_type, header, thread)                             \
   461     debug_only(VMEntryWrapper __vew;)
   464 #define IRT_LEAF(result_type, header)                                \
   465   result_type header {                                               \
   466     __LEAF(result_type, header)                                      \
   467     debug_only(No_Safepoint_Verifier __nspv(true);)
   470 #define IRT_ENTRY_NO_ASYNC(result_type, header)                      \
   471   result_type header {                                               \
   472     ThreadInVMfromJavaNoAsyncException __tiv(thread);                \
   473     __ENTRY(result_type, header, thread)                             \
   474     debug_only(VMEntryWrapper __vew;)
   476 // Another special case for nmethod_entry_point so the nmethod that the
   477 // interpreter is about to branch to doesn't get flushed before as we
   478 // branch to it's interpreter_entry_point.  Skip stress testing here too.
   479 // Also we don't allow async exceptions because it is just too painful.
   480 #define IRT_ENTRY_FOR_NMETHOD(result_type, header)                   \
   481   result_type header {                                               \
   482     nmethodLocker _nmlock(nm);                                       \
   483     ThreadInVMfromJavaNoAsyncException __tiv(thread);                                \
   484     __ENTRY(result_type, header, thread)
   486 #define IRT_END }
   489 // Definitions for JRT (Java (Compiler/Shared) Runtime)
   491 #define JRT_ENTRY(result_type, header)                               \
   492   result_type header {                                               \
   493     ThreadInVMfromJava __tiv(thread);                                \
   494     __ENTRY(result_type, header, thread)                             \
   495     debug_only(VMEntryWrapper __vew;)
   498 #define JRT_LEAF(result_type, header)                                \
   499   result_type header {                                               \
   500   __LEAF(result_type, header)                                        \
   501   debug_only(JRT_Leaf_Verifier __jlv;)
   504 #define JRT_ENTRY_NO_ASYNC(result_type, header)                      \
   505   result_type header {                                               \
   506     ThreadInVMfromJavaNoAsyncException __tiv(thread);                \
   507     __ENTRY(result_type, header, thread)                             \
   508     debug_only(VMEntryWrapper __vew;)
   510 // Same as JRT Entry but allows for return value after the safepoint
   511 // to get back into Java from the VM
   512 #define JRT_BLOCK_ENTRY(result_type, header)                         \
   513   result_type header {                                               \
   514     TRACE_CALL(result_type, header)                                  \
   515     HandleMarkCleaner __hm(thread);
   517 #define JRT_BLOCK                                                    \
   518     {                                                                \
   519     ThreadInVMfromJava __tiv(thread);                                \
   520     Thread* THREAD = thread;                                         \
   521     debug_only(VMEntryWrapper __vew;)
   523 #define JRT_BLOCK_END }
   525 #define JRT_END }
   527 // Definitions for JNI
   529 #define JNI_ENTRY(result_type, header)                               \
   530     JNI_ENTRY_NO_PRESERVE(result_type, header)                       \
   531     WeakPreserveExceptionMark __wem(thread);
   533 #define JNI_ENTRY_NO_PRESERVE(result_type, header)             \
   534 extern "C" {                                                         \
   535   result_type JNICALL header {                                \
   536     JavaThread* thread=JavaThread::thread_from_jni_environment(env); \
   537     assert( !VerifyJNIEnvThread || (thread == Thread::current()), "JNIEnv is only valid in same thread"); \
   538     ThreadInVMfromNative __tiv(thread);                              \
   539     debug_only(VMNativeEntryWrapper __vew;)                          \
   540     __ENTRY(result_type, header, thread)
   543 // Ensure that the VMNativeEntryWrapper constructor, which can cause
   544 // a GC, is called outside the NoHandleMark (set via __QUICK_ENTRY).
   545 #define JNI_QUICK_ENTRY(result_type, header)                         \
   546 extern "C" {                                                         \
   547   result_type JNICALL header {                                \
   548     JavaThread* thread=JavaThread::thread_from_jni_environment(env); \
   549     assert( !VerifyJNIEnvThread || (thread == Thread::current()), "JNIEnv is only valid in same thread"); \
   550     ThreadInVMfromNative __tiv(thread);                              \
   551     debug_only(VMNativeEntryWrapper __vew;)                          \
   552     __QUICK_ENTRY(result_type, header, thread)
   555 #define JNI_LEAF(result_type, header)                                \
   556 extern "C" {                                                         \
   557   result_type JNICALL header {                                \
   558     JavaThread* thread=JavaThread::thread_from_jni_environment(env); \
   559     assert( !VerifyJNIEnvThread || (thread == Thread::current()), "JNIEnv is only valid in same thread"); \
   560     __LEAF(result_type, header)
   563 // Close the routine and the extern "C"
   564 #define JNI_END } }
   568 // Definitions for JVM
   570 #define JVM_ENTRY(result_type, header)                               \
   571 extern "C" {                                                         \
   572   result_type JNICALL header {                                       \
   573     JavaThread* thread=JavaThread::thread_from_jni_environment(env); \
   574     ThreadInVMfromNative __tiv(thread);                              \
   575     debug_only(VMNativeEntryWrapper __vew;)                          \
   576     __ENTRY(result_type, header, thread)
   579 #define JVM_ENTRY_NO_ENV(result_type, header)                        \
   580 extern "C" {                                                         \
   581   result_type JNICALL header {                                       \
   582     JavaThread* thread = (JavaThread*)ThreadLocalStorage::thread();  \
   583     ThreadInVMfromNative __tiv(thread);                              \
   584     debug_only(VMNativeEntryWrapper __vew;)                          \
   585     __ENTRY(result_type, header, thread)
   588 #define JVM_QUICK_ENTRY(result_type, header)                         \
   589 extern "C" {                                                         \
   590   result_type JNICALL header {                                       \
   591     JavaThread* thread=JavaThread::thread_from_jni_environment(env); \
   592     ThreadInVMfromNative __tiv(thread);                              \
   593     debug_only(VMNativeEntryWrapper __vew;)                          \
   594     __QUICK_ENTRY(result_type, header, thread)
   597 #define JVM_LEAF(result_type, header)                                \
   598 extern "C" {                                                         \
   599   result_type JNICALL header {                                       \
   600     VM_Exit::block_if_vm_exited();                                   \
   601     __LEAF(result_type, header)
   604 #define JVM_END } }
   606 #endif // SHARE_VM_RUNTIME_INTERFACESUPPORT_HPP

mercurial