src/share/vm/runtime/interfaceSupport.hpp

Tue, 11 May 2010 14:35:43 -0700

author
prr
date
Tue, 11 May 2010 14:35:43 -0700
changeset 1840
fb57d4cf76c2
parent 435
a61af66fc99e
child 1907
c18cbe5936b8
permissions
-rw-r--r--

6931180: Migration to recent versions of MS Platform SDK
6951582: Build problems on win64
Summary: Changes to enable building JDK7 with Microsoft Visual Studio 2010
Reviewed-by: ohair, art, ccheung, dcubed

     1 /*
     2  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
    20  * CA 95054 USA or visit www.sun.com if you need additional information or
    21  * have any questions.
    22  *
    23  */
    25 // Wrapper for all entry points to the virtual machine.
    26 // The HandleMarkCleaner is a faster version of HandleMark.
    27 // It relies on the fact that there is a HandleMark further
    28 // down the stack (in JavaCalls::call_helper), and just resets
    29 // to the saved values in that HandleMark.
    31 class HandleMarkCleaner: public StackObj {
    32  private:
    33   Thread* _thread;
    34  public:
    35   HandleMarkCleaner(Thread* thread) {
    36     _thread = thread;
    37     _thread->last_handle_mark()->push();
    38   }
    39   ~HandleMarkCleaner() {
    40     _thread->last_handle_mark()->pop_and_restore();
    41   }
    43  private:
    44   inline void* operator new(size_t size, void* ptr) {
    45     return ptr;
    46   }
    47 };
    49 // InterfaceSupport provides functionality used by the __LEAF and __ENTRY
    50 // macros. These macros are used to guard entry points into the VM and
    51 // perform checks upon leave of the VM.
    54 class InterfaceSupport: AllStatic {
    55 # ifdef ASSERT
    56  public:
    57   static long _scavenge_alot_counter;
    58   static long _fullgc_alot_counter;
    59   static long _number_of_calls;
    60   static long _fullgc_alot_invocation;
    62   // tracing
    63   static void trace(const char* result_type, const char* header);
    65   // Helper methods used to implement +ScavengeALot and +FullGCALot
    66   static void check_gc_alot() { if (ScavengeALot || FullGCALot) gc_alot(); }
    67   static void gc_alot();
    69   static void walk_stack_from(vframe* start_vf);
    70   static void walk_stack();
    72 # ifdef ENABLE_ZAP_DEAD_LOCALS
    73   static void zap_dead_locals_old();
    74 # endif
    76   static void zombieAll();
    77   static void deoptimizeAll();
    78   static void stress_derived_pointers();
    79   static void verify_stack();
    80   static void verify_last_frame();
    81 # endif
    83  public:
    84   // OS dependent stuff
    85   #include "incls/_interfaceSupport_pd.hpp.incl"
    86 };
    89 // Basic class for all thread transition classes.
    91 class ThreadStateTransition : public StackObj {
    92  protected:
    93   JavaThread* _thread;
    94  public:
    95   ThreadStateTransition(JavaThread *thread) {
    96     _thread = thread;
    97     assert(thread != NULL && thread->is_Java_thread(), "must be Java thread");
    98   }
   100   // Change threadstate in a manner, so safepoint can detect changes.
   101   // Time-critical: called on exit from every runtime routine
   102   static inline void transition(JavaThread *thread, JavaThreadState from, JavaThreadState to) {
   103     assert(from != _thread_in_Java, "use transition_from_java");
   104     assert(from != _thread_in_native, "use transition_from_native");
   105     assert((from & 1) == 0 && (to & 1) == 0, "odd numbers are transitions states");
   106     assert(thread->thread_state() == from, "coming from wrong thread state");
   107     // Change to transition state (assumes total store ordering!  -Urs)
   108     thread->set_thread_state((JavaThreadState)(from + 1));
   110     // Make sure new state is seen by VM thread
   111     if (os::is_MP()) {
   112       if (UseMembar) {
   113         // Force a fence between the write above and read below
   114         OrderAccess::fence();
   115       } else {
   116         // store to serialize page so VM thread can do pseudo remote membar
   117         os::write_memory_serialize_page(thread);
   118       }
   119     }
   121     if (SafepointSynchronize::do_call_back()) {
   122       SafepointSynchronize::block(thread);
   123     }
   124     thread->set_thread_state(to);
   126     CHECK_UNHANDLED_OOPS_ONLY(thread->clear_unhandled_oops();)
   127   }
   129   // transition_and_fence must be used on any thread state transition
   130   // where there might not be a Java call stub on the stack, in
   131   // particular on Windows where the Structured Exception Handler is
   132   // set up in the call stub. os::write_memory_serialize_page() can
   133   // fault and we can't recover from it on Windows without a SEH in
   134   // place.
   135   static inline void transition_and_fence(JavaThread *thread, JavaThreadState from, JavaThreadState to) {
   136     assert(thread->thread_state() == from, "coming from wrong thread state");
   137     assert((from & 1) == 0 && (to & 1) == 0, "odd numbers are transitions states");
   138     // Change to transition state (assumes total store ordering!  -Urs)
   139     thread->set_thread_state((JavaThreadState)(from + 1));
   141     // Make sure new state is seen by VM thread
   142     if (os::is_MP()) {
   143       if (UseMembar) {
   144         // Force a fence between the write above and read below
   145         OrderAccess::fence();
   146       } else {
   147         // Must use this rather than serialization page in particular on Windows
   148         InterfaceSupport::serialize_memory(thread);
   149       }
   150     }
   152     if (SafepointSynchronize::do_call_back()) {
   153       SafepointSynchronize::block(thread);
   154     }
   155     thread->set_thread_state(to);
   157     CHECK_UNHANDLED_OOPS_ONLY(thread->clear_unhandled_oops();)
   158   }
   160   // Same as above, but assumes from = _thread_in_Java. This is simpler, since we
   161   // never block on entry to the VM. This will break the code, since e.g. preserve arguments
   162   // have not been setup.
   163   static inline void transition_from_java(JavaThread *thread, JavaThreadState to) {
   164     assert(thread->thread_state() == _thread_in_Java, "coming from wrong thread state");
   165     thread->set_thread_state(to);
   166   }
   168   static inline void transition_from_native(JavaThread *thread, JavaThreadState to) {
   169     assert((to & 1) == 0, "odd numbers are transitions states");
   170     assert(thread->thread_state() == _thread_in_native, "coming from wrong thread state");
   171     // Change to transition state (assumes total store ordering!  -Urs)
   172     thread->set_thread_state(_thread_in_native_trans);
   174     // Make sure new state is seen by GC thread
   175     if (os::is_MP()) {
   176       if (UseMembar) {
   177         // Force a fence between the write above and read below
   178         OrderAccess::fence();
   179       } else {
   180         // Must use this rather than serialization page in particular on Windows
   181         InterfaceSupport::serialize_memory(thread);
   182       }
   183     }
   185     // We never install asynchronous exceptions when coming (back) in
   186     // to the runtime from native code because the runtime is not set
   187     // up to handle exceptions floating around at arbitrary points.
   188     if (SafepointSynchronize::do_call_back() || thread->is_suspend_after_native()) {
   189       JavaThread::check_safepoint_and_suspend_for_native_trans(thread);
   191       // Clear unhandled oops anywhere where we could block, even if we don't.
   192       CHECK_UNHANDLED_OOPS_ONLY(thread->clear_unhandled_oops();)
   193     }
   195     thread->set_thread_state(to);
   196   }
   197  protected:
   198    void trans(JavaThreadState from, JavaThreadState to)  { transition(_thread, from, to); }
   199    void trans_from_java(JavaThreadState to)              { transition_from_java(_thread, to); }
   200    void trans_from_native(JavaThreadState to)            { transition_from_native(_thread, to); }
   201    void trans_and_fence(JavaThreadState from, JavaThreadState to) { transition_and_fence(_thread, from, to); }
   202 };
   205 class ThreadInVMfromJava : public ThreadStateTransition {
   206  public:
   207   ThreadInVMfromJava(JavaThread* thread) : ThreadStateTransition(thread) {
   208     trans_from_java(_thread_in_vm);
   209   }
   210   ~ThreadInVMfromJava()  {
   211     trans(_thread_in_vm, _thread_in_Java);
   212     // Check for pending. async. exceptions or suspends.
   213     if (_thread->has_special_runtime_exit_condition()) _thread->handle_special_runtime_exit_condition();
   214   }
   215 };
   218 class ThreadInVMfromUnknown {
   219  private:
   220   JavaThread* _thread;
   221  public:
   222   ThreadInVMfromUnknown() : _thread(NULL) {
   223     Thread* t = Thread::current();
   224     if (t->is_Java_thread()) {
   225       JavaThread* t2 = (JavaThread*) t;
   226       if (t2->thread_state() == _thread_in_native) {
   227         _thread = t2;
   228         ThreadStateTransition::transition_from_native(t2, _thread_in_vm);
   229         // Used to have a HandleMarkCleaner but that is dangerous as
   230         // it could free a handle in our (indirect, nested) caller.
   231         // We expect any handles will be short lived and figure we
   232         // don't need an actual HandleMark.
   233       }
   234     }
   235   }
   236   ~ThreadInVMfromUnknown()  {
   237     if (_thread) {
   238       ThreadStateTransition::transition_and_fence(_thread, _thread_in_vm, _thread_in_native);
   239     }
   240   }
   241 };
   244 class ThreadInVMfromNative : public ThreadStateTransition {
   245  public:
   246   ThreadInVMfromNative(JavaThread* thread) : ThreadStateTransition(thread) {
   247     trans_from_native(_thread_in_vm);
   248   }
   249   ~ThreadInVMfromNative() {
   250     trans_and_fence(_thread_in_vm, _thread_in_native);
   251   }
   252 };
   255 class ThreadToNativeFromVM : public ThreadStateTransition {
   256  public:
   257   ThreadToNativeFromVM(JavaThread *thread) : ThreadStateTransition(thread) {
   258     // We are leaving the VM at this point and going directly to native code.
   259     // Block, if we are in the middle of a safepoint synchronization.
   260     assert(!thread->owns_locks(), "must release all locks when leaving VM");
   261     thread->frame_anchor()->make_walkable(thread);
   262     trans_and_fence(_thread_in_vm, _thread_in_native);
   263     // Check for pending. async. exceptions or suspends.
   264     if (_thread->has_special_runtime_exit_condition()) _thread->handle_special_runtime_exit_condition(false);
   265   }
   267   ~ThreadToNativeFromVM() {
   268     trans_from_native(_thread_in_vm);
   269     // We don't need to clear_walkable because it will happen automagically when we return to java
   270   }
   271 };
   274 class ThreadBlockInVM : public ThreadStateTransition {
   275  public:
   276   ThreadBlockInVM(JavaThread *thread)
   277   : ThreadStateTransition(thread) {
   278     // Once we are blocked vm expects stack to be walkable
   279     thread->frame_anchor()->make_walkable(thread);
   280     trans_and_fence(_thread_in_vm, _thread_blocked);
   281   }
   282   ~ThreadBlockInVM() {
   283     trans_and_fence(_thread_blocked, _thread_in_vm);
   284     // We don't need to clear_walkable because it will happen automagically when we return to java
   285   }
   286 };
   289 // This special transition class is only used to prevent asynchronous exceptions
   290 // from being installed on vm exit in situations where we can't tolerate them.
   291 // See bugs: 4324348, 4854693, 4998314, 5040492, 5050705.
   292 class ThreadInVMfromJavaNoAsyncException : public ThreadStateTransition {
   293  public:
   294   ThreadInVMfromJavaNoAsyncException(JavaThread* thread) : ThreadStateTransition(thread) {
   295     trans_from_java(_thread_in_vm);
   296   }
   297   ~ThreadInVMfromJavaNoAsyncException()  {
   298     trans(_thread_in_vm, _thread_in_Java);
   299     // NOTE: We do not check for pending. async. exceptions.
   300     // If we did and moved the pending async exception over into the
   301     // pending exception field, we would need to deopt (currently C2
   302     // only). However, to do so would require that we transition back
   303     // to the _thread_in_vm state. Instead we postpone the handling of
   304     // the async exception.
   306     // Check for pending. suspends only.
   307     if (_thread->has_special_runtime_exit_condition())
   308       _thread->handle_special_runtime_exit_condition(false);
   309   }
   310 };
   312 // Debug class instantiated in JRT_ENTRY and ITR_ENTRY macro.
   313 // Can be used to verify properties on enter/exit of the VM.
   315 #ifdef ASSERT
   316 class VMEntryWrapper {
   317  public:
   318   VMEntryWrapper() {
   319     if (VerifyLastFrame) {
   320       InterfaceSupport::verify_last_frame();
   321     }
   322   }
   324   ~VMEntryWrapper() {
   325     InterfaceSupport::check_gc_alot();
   326     if (WalkStackALot) {
   327       InterfaceSupport::walk_stack();
   328     }
   329 #ifdef ENABLE_ZAP_DEAD_LOCALS
   330     if (ZapDeadLocalsOld) {
   331       InterfaceSupport::zap_dead_locals_old();
   332     }
   333 #endif
   334 #ifdef COMPILER2
   335     // This option is not used by Compiler 1
   336     if (StressDerivedPointers) {
   337       InterfaceSupport::stress_derived_pointers();
   338     }
   339 #endif
   340     if (DeoptimizeALot || DeoptimizeRandom) {
   341       InterfaceSupport::deoptimizeAll();
   342     }
   343     if (ZombieALot) {
   344       InterfaceSupport::zombieAll();
   345     }
   346     // do verification AFTER potential deoptimization
   347     if (VerifyStack) {
   348       InterfaceSupport::verify_stack();
   349     }
   351   }
   352 };
   355 class VMNativeEntryWrapper {
   356  public:
   357   VMNativeEntryWrapper() {
   358     if (GCALotAtAllSafepoints) InterfaceSupport::check_gc_alot();
   359   }
   361   ~VMNativeEntryWrapper() {
   362     if (GCALotAtAllSafepoints) InterfaceSupport::check_gc_alot();
   363   }
   364 };
   366 #endif
   369 // VM-internal runtime interface support
   371 #ifdef ASSERT
   373 class RuntimeHistogramElement : public HistogramElement {
   374   public:
   375    RuntimeHistogramElement(const char* name);
   376 };
   378 #define TRACE_CALL(result_type, header)                            \
   379   InterfaceSupport::_number_of_calls++;                            \
   380   if (TraceRuntimeCalls)                                           \
   381     InterfaceSupport::trace(#result_type, #header);                \
   382   if (CountRuntimeCalls) {                                         \
   383     static RuntimeHistogramElement* e = new RuntimeHistogramElement(#header); \
   384     if (e != NULL) e->increment_count();                           \
   385   }
   386 #else
   387 #define TRACE_CALL(result_type, header)                            \
   388   /* do nothing */
   389 #endif
   392 // LEAF routines do not lock, GC or throw exceptions
   394 #define __LEAF(result_type, header)                                  \
   395   TRACE_CALL(result_type, header)                                    \
   396   debug_only(NoHandleMark __hm;)                                     \
   397   /* begin of body */
   400 // ENTRY routines may lock, GC and throw exceptions
   402 #define __ENTRY(result_type, header, thread)                         \
   403   TRACE_CALL(result_type, header)                                    \
   404   HandleMarkCleaner __hm(thread);                                    \
   405   Thread* THREAD = thread;                                           \
   406   /* begin of body */
   409 // QUICK_ENTRY routines behave like ENTRY but without a handle mark
   411 #define __QUICK_ENTRY(result_type, header, thread)                   \
   412   TRACE_CALL(result_type, header)                                    \
   413   debug_only(NoHandleMark __hm;)                                     \
   414   Thread* THREAD = thread;                                           \
   415   /* begin of body */
   418 // Definitions for IRT (Interpreter Runtime)
   419 // (thread is an argument passed in to all these routines)
   421 #define IRT_ENTRY(result_type, header)                               \
   422   result_type header {                                               \
   423     ThreadInVMfromJava __tiv(thread);                                \
   424     __ENTRY(result_type, header, thread)                             \
   425     debug_only(VMEntryWrapper __vew;)
   428 #define IRT_LEAF(result_type, header)                                \
   429   result_type header {                                               \
   430     __LEAF(result_type, header)                                      \
   431     debug_only(No_Safepoint_Verifier __nspv(true);)
   434 #define IRT_ENTRY_NO_ASYNC(result_type, header)                      \
   435   result_type header {                                               \
   436     ThreadInVMfromJavaNoAsyncException __tiv(thread);                \
   437     __ENTRY(result_type, header, thread)                             \
   438     debug_only(VMEntryWrapper __vew;)
   440 // Another special case for nmethod_entry_point so the nmethod that the
   441 // interpreter is about to branch to doesn't get flushed before as we
   442 // branch to it's interpreter_entry_point.  Skip stress testing here too.
   443 // Also we don't allow async exceptions because it is just too painful.
   444 #define IRT_ENTRY_FOR_NMETHOD(result_type, header)                   \
   445   result_type header {                                               \
   446     nmethodLocker _nmlock(nm);                                       \
   447     ThreadInVMfromJavaNoAsyncException __tiv(thread);                                \
   448     __ENTRY(result_type, header, thread)
   450 #define IRT_END }
   453 // Definitions for JRT (Java (Compiler/Shared) Runtime)
   455 #define JRT_ENTRY(result_type, header)                               \
   456   result_type header {                                               \
   457     ThreadInVMfromJava __tiv(thread);                                \
   458     __ENTRY(result_type, header, thread)                             \
   459     debug_only(VMEntryWrapper __vew;)
   462 #define JRT_LEAF(result_type, header)                                \
   463   result_type header {                                               \
   464   __LEAF(result_type, header)                                        \
   465   debug_only(JRT_Leaf_Verifier __jlv;)
   468 #define JRT_ENTRY_NO_ASYNC(result_type, header)                      \
   469   result_type header {                                               \
   470     ThreadInVMfromJavaNoAsyncException __tiv(thread);                \
   471     __ENTRY(result_type, header, thread)                             \
   472     debug_only(VMEntryWrapper __vew;)
   474 // Same as JRT Entry but allows for return value after the safepoint
   475 // to get back into Java from the VM
   476 #define JRT_BLOCK_ENTRY(result_type, header)                         \
   477   result_type header {                                               \
   478     TRACE_CALL(result_type, header)                                  \
   479     HandleMarkCleaner __hm(thread);
   481 #define JRT_BLOCK                                                    \
   482     {                                                                \
   483     ThreadInVMfromJava __tiv(thread);                                \
   484     Thread* THREAD = thread;                                         \
   485     debug_only(VMEntryWrapper __vew;)
   487 #define JRT_BLOCK_END }
   489 #define JRT_END }
   491 // Definitions for JNI
   493 #define JNI_ENTRY(result_type, header)                               \
   494     JNI_ENTRY_NO_PRESERVE(result_type, header)                       \
   495     WeakPreserveExceptionMark __wem(thread);
   497 #define JNI_ENTRY_NO_PRESERVE(result_type, header)             \
   498 extern "C" {                                                         \
   499   result_type JNICALL header {                                \
   500     JavaThread* thread=JavaThread::thread_from_jni_environment(env); \
   501     assert( !VerifyJNIEnvThread || (thread == Thread::current()), "JNIEnv is only valid in same thread"); \
   502     ThreadInVMfromNative __tiv(thread);                              \
   503     debug_only(VMNativeEntryWrapper __vew;)                          \
   504     __ENTRY(result_type, header, thread)
   507 // Ensure that the VMNativeEntryWrapper constructor, which can cause
   508 // a GC, is called outside the NoHandleMark (set via __QUICK_ENTRY).
   509 #define JNI_QUICK_ENTRY(result_type, header)                         \
   510 extern "C" {                                                         \
   511   result_type JNICALL header {                                \
   512     JavaThread* thread=JavaThread::thread_from_jni_environment(env); \
   513     assert( !VerifyJNIEnvThread || (thread == Thread::current()), "JNIEnv is only valid in same thread"); \
   514     ThreadInVMfromNative __tiv(thread);                              \
   515     debug_only(VMNativeEntryWrapper __vew;)                          \
   516     __QUICK_ENTRY(result_type, header, thread)
   519 #define JNI_LEAF(result_type, header)                                \
   520 extern "C" {                                                         \
   521   result_type JNICALL header {                                \
   522     JavaThread* thread=JavaThread::thread_from_jni_environment(env); \
   523     assert( !VerifyJNIEnvThread || (thread == Thread::current()), "JNIEnv is only valid in same thread"); \
   524     __LEAF(result_type, header)
   527 // Close the routine and the extern "C"
   528 #define JNI_END } }
   532 // Definitions for JVM
   534 #define JVM_ENTRY(result_type, header)                               \
   535 extern "C" {                                                         \
   536   result_type JNICALL header {                                       \
   537     JavaThread* thread=JavaThread::thread_from_jni_environment(env); \
   538     ThreadInVMfromNative __tiv(thread);                              \
   539     debug_only(VMNativeEntryWrapper __vew;)                          \
   540     __ENTRY(result_type, header, thread)
   543 #define JVM_ENTRY_NO_ENV(result_type, header)                        \
   544 extern "C" {                                                         \
   545   result_type JNICALL header {                                       \
   546     JavaThread* thread = (JavaThread*)ThreadLocalStorage::thread();  \
   547     ThreadInVMfromNative __tiv(thread);                              \
   548     debug_only(VMNativeEntryWrapper __vew;)                          \
   549     __ENTRY(result_type, header, thread)
   552 #define JVM_QUICK_ENTRY(result_type, header)                         \
   553 extern "C" {                                                         \
   554   result_type JNICALL header {                                       \
   555     JavaThread* thread=JavaThread::thread_from_jni_environment(env); \
   556     ThreadInVMfromNative __tiv(thread);                              \
   557     debug_only(VMNativeEntryWrapper __vew;)                          \
   558     __QUICK_ENTRY(result_type, header, thread)
   561 #define JVM_LEAF(result_type, header)                                \
   562 extern "C" {                                                         \
   563   result_type JNICALL header {                                       \
   564     VM_Exit::block_if_vm_exited();                                   \
   565     __LEAF(result_type, header)
   568 #define JVM_END } }

mercurial