src/share/vm/runtime/interfaceSupport.hpp

changeset 0
f90c822e73f8
child 6876
710a3c8b516e
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/src/share/vm/runtime/interfaceSupport.hpp	Wed Apr 27 01:25:04 2016 +0800
     1.3 @@ -0,0 +1,597 @@
     1.4 +/*
     1.5 + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
     1.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     1.7 + *
     1.8 + * This code is free software; you can redistribute it and/or modify it
     1.9 + * under the terms of the GNU General Public License version 2 only, as
    1.10 + * published by the Free Software Foundation.
    1.11 + *
    1.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
    1.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    1.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    1.15 + * version 2 for more details (a copy is included in the LICENSE file that
    1.16 + * accompanied this code).
    1.17 + *
    1.18 + * You should have received a copy of the GNU General Public License version
    1.19 + * 2 along with this work; if not, write to the Free Software Foundation,
    1.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    1.21 + *
    1.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    1.23 + * or visit www.oracle.com if you need additional information or have any
    1.24 + * questions.
    1.25 + *
    1.26 + */
    1.27 +
    1.28 +#ifndef SHARE_VM_RUNTIME_INTERFACESUPPORT_HPP
    1.29 +#define SHARE_VM_RUNTIME_INTERFACESUPPORT_HPP
    1.30 +
    1.31 +#include "memory/gcLocker.hpp"
    1.32 +#include "runtime/handles.inline.hpp"
    1.33 +#include "runtime/mutexLocker.hpp"
    1.34 +#include "runtime/orderAccess.hpp"
    1.35 +#include "runtime/os.hpp"
    1.36 +#include "runtime/safepoint.hpp"
    1.37 +#include "runtime/thread.inline.hpp"
    1.38 +#include "runtime/vmThread.hpp"
    1.39 +#include "utilities/globalDefinitions.hpp"
    1.40 +#include "utilities/preserveException.hpp"
    1.41 +#include "utilities/top.hpp"
    1.42 +
    1.43 +// Wrapper for all entry points to the virtual machine.
    1.44 +// The HandleMarkCleaner is a faster version of HandleMark.
    1.45 +// It relies on the fact that there is a HandleMark further
    1.46 +// down the stack (in JavaCalls::call_helper), and just resets
    1.47 +// to the saved values in that HandleMark.
    1.48 +
    1.49 +class HandleMarkCleaner: public StackObj {
    1.50 + private:
    1.51 +  Thread* _thread;
    1.52 + public:
    1.53 +  HandleMarkCleaner(Thread* thread) {
    1.54 +    _thread = thread;
    1.55 +    _thread->last_handle_mark()->push();
    1.56 +  }
    1.57 +  ~HandleMarkCleaner() {
    1.58 +    _thread->last_handle_mark()->pop_and_restore();
    1.59 +  }
    1.60 +
    1.61 + private:
    1.62 +  inline void* operator new(size_t size, void* ptr) throw() {
    1.63 +    return ptr;
    1.64 +  }
    1.65 +};
    1.66 +
    1.67 +// InterfaceSupport provides functionality used by the VM_LEAF_BASE and
    1.68 +// VM_ENTRY_BASE macros. These macros are used to guard entry points into
    1.69 +// the VM and perform checks upon leave of the VM.
    1.70 +
    1.71 +
    1.72 +class InterfaceSupport: AllStatic {
    1.73 +# ifdef ASSERT
    1.74 + public:
    1.75 +  static long _scavenge_alot_counter;
    1.76 +  static long _fullgc_alot_counter;
    1.77 +  static long _number_of_calls;
    1.78 +  static long _fullgc_alot_invocation;
    1.79 +
    1.80 +  // tracing
    1.81 +  static void trace(const char* result_type, const char* header);
    1.82 +
    1.83 +  // Helper methods used to implement +ScavengeALot and +FullGCALot
    1.84 +  static void check_gc_alot() { if (ScavengeALot || FullGCALot) gc_alot(); }
    1.85 +  static void gc_alot();
    1.86 +
    1.87 +  static void walk_stack_from(vframe* start_vf);
    1.88 +  static void walk_stack();
    1.89 +
    1.90 +# ifdef ENABLE_ZAP_DEAD_LOCALS
    1.91 +  static void zap_dead_locals_old();
    1.92 +# endif
    1.93 +
    1.94 +  static void zombieAll();
    1.95 +  static void unlinkSymbols();
    1.96 +  static void deoptimizeAll();
    1.97 +  static void stress_derived_pointers();
    1.98 +  static void verify_stack();
    1.99 +  static void verify_last_frame();
   1.100 +# endif
   1.101 +
   1.102 + public:
   1.103 +  // OS dependent stuff
   1.104 +#ifdef TARGET_OS_FAMILY_linux
   1.105 +# include "interfaceSupport_linux.hpp"
   1.106 +#endif
   1.107 +#ifdef TARGET_OS_FAMILY_solaris
   1.108 +# include "interfaceSupport_solaris.hpp"
   1.109 +#endif
   1.110 +#ifdef TARGET_OS_FAMILY_windows
   1.111 +# include "interfaceSupport_windows.hpp"
   1.112 +#endif
   1.113 +#ifdef TARGET_OS_FAMILY_aix
   1.114 +# include "interfaceSupport_aix.hpp"
   1.115 +#endif
   1.116 +#ifdef TARGET_OS_FAMILY_bsd
   1.117 +# include "interfaceSupport_bsd.hpp"
   1.118 +#endif
   1.119 +
   1.120 +};
   1.121 +
   1.122 +
   1.123 +// Basic class for all thread transition classes.
   1.124 +
   1.125 +class ThreadStateTransition : public StackObj {
   1.126 + protected:
   1.127 +  JavaThread* _thread;
   1.128 + public:
   1.129 +  ThreadStateTransition(JavaThread *thread) {
   1.130 +    _thread = thread;
   1.131 +    assert(thread != NULL && thread->is_Java_thread(), "must be Java thread");
   1.132 +  }
   1.133 +
   1.134 +  // Change threadstate in a manner, so safepoint can detect changes.
   1.135 +  // Time-critical: called on exit from every runtime routine
   1.136 +  static inline void transition(JavaThread *thread, JavaThreadState from, JavaThreadState to) {
   1.137 +    assert(from != _thread_in_Java, "use transition_from_java");
   1.138 +    assert(from != _thread_in_native, "use transition_from_native");
   1.139 +    assert((from & 1) == 0 && (to & 1) == 0, "odd numbers are transitions states");
   1.140 +    assert(thread->thread_state() == from, "coming from wrong thread state");
   1.141 +    // Change to transition state (assumes total store ordering!  -Urs)
   1.142 +    thread->set_thread_state((JavaThreadState)(from + 1));
   1.143 +
   1.144 +    // Make sure new state is seen by VM thread
   1.145 +    if (os::is_MP()) {
   1.146 +      if (UseMembar) {
   1.147 +        // Force a fence between the write above and read below
   1.148 +        OrderAccess::fence();
   1.149 +      } else {
   1.150 +        // store to serialize page so VM thread can do pseudo remote membar
   1.151 +        os::write_memory_serialize_page(thread);
   1.152 +      }
   1.153 +    }
   1.154 +
   1.155 +    if (SafepointSynchronize::do_call_back()) {
   1.156 +      SafepointSynchronize::block(thread);
   1.157 +    }
   1.158 +    thread->set_thread_state(to);
   1.159 +
   1.160 +    CHECK_UNHANDLED_OOPS_ONLY(thread->clear_unhandled_oops();)
   1.161 +  }
   1.162 +
   1.163 +  // transition_and_fence must be used on any thread state transition
   1.164 +  // where there might not be a Java call stub on the stack, in
   1.165 +  // particular on Windows where the Structured Exception Handler is
   1.166 +  // set up in the call stub. os::write_memory_serialize_page() can
   1.167 +  // fault and we can't recover from it on Windows without a SEH in
   1.168 +  // place.
   1.169 +  static inline void transition_and_fence(JavaThread *thread, JavaThreadState from, JavaThreadState to) {
   1.170 +    assert(thread->thread_state() == from, "coming from wrong thread state");
   1.171 +    assert((from & 1) == 0 && (to & 1) == 0, "odd numbers are transitions states");
   1.172 +    // Change to transition state (assumes total store ordering!  -Urs)
   1.173 +    thread->set_thread_state((JavaThreadState)(from + 1));
   1.174 +
   1.175 +    // Make sure new state is seen by VM thread
   1.176 +    if (os::is_MP()) {
   1.177 +      if (UseMembar) {
   1.178 +        // Force a fence between the write above and read below
   1.179 +        OrderAccess::fence();
   1.180 +      } else {
   1.181 +        // Must use this rather than serialization page in particular on Windows
   1.182 +        InterfaceSupport::serialize_memory(thread);
   1.183 +      }
   1.184 +    }
   1.185 +
   1.186 +    if (SafepointSynchronize::do_call_back()) {
   1.187 +      SafepointSynchronize::block(thread);
   1.188 +    }
   1.189 +    thread->set_thread_state(to);
   1.190 +
   1.191 +    CHECK_UNHANDLED_OOPS_ONLY(thread->clear_unhandled_oops();)
   1.192 +  }
   1.193 +
   1.194 +  // Same as above, but assumes from = _thread_in_Java. This is simpler, since we
   1.195 +  // never block on entry to the VM. This will break the code, since e.g. preserve arguments
   1.196 +  // have not been setup.
   1.197 +  static inline void transition_from_java(JavaThread *thread, JavaThreadState to) {
   1.198 +    assert(thread->thread_state() == _thread_in_Java, "coming from wrong thread state");
   1.199 +    thread->set_thread_state(to);
   1.200 +  }
   1.201 +
   1.202 +  static inline void transition_from_native(JavaThread *thread, JavaThreadState to) {
   1.203 +    assert((to & 1) == 0, "odd numbers are transitions states");
   1.204 +    assert(thread->thread_state() == _thread_in_native, "coming from wrong thread state");
   1.205 +    // Change to transition state (assumes total store ordering!  -Urs)
   1.206 +    thread->set_thread_state(_thread_in_native_trans);
   1.207 +
   1.208 +    // Make sure new state is seen by GC thread
   1.209 +    if (os::is_MP()) {
   1.210 +      if (UseMembar) {
   1.211 +        // Force a fence between the write above and read below
   1.212 +        OrderAccess::fence();
   1.213 +      } else {
   1.214 +        // Must use this rather than serialization page in particular on Windows
   1.215 +        InterfaceSupport::serialize_memory(thread);
   1.216 +      }
   1.217 +    }
   1.218 +
   1.219 +    // We never install asynchronous exceptions when coming (back) in
   1.220 +    // to the runtime from native code because the runtime is not set
   1.221 +    // up to handle exceptions floating around at arbitrary points.
   1.222 +    if (SafepointSynchronize::do_call_back() || thread->is_suspend_after_native()) {
   1.223 +      JavaThread::check_safepoint_and_suspend_for_native_trans(thread);
   1.224 +
   1.225 +      // Clear unhandled oops anywhere where we could block, even if we don't.
   1.226 +      CHECK_UNHANDLED_OOPS_ONLY(thread->clear_unhandled_oops();)
   1.227 +    }
   1.228 +
   1.229 +    thread->set_thread_state(to);
   1.230 +  }
   1.231 + protected:
   1.232 +   void trans(JavaThreadState from, JavaThreadState to)  { transition(_thread, from, to); }
   1.233 +   void trans_from_java(JavaThreadState to)              { transition_from_java(_thread, to); }
   1.234 +   void trans_from_native(JavaThreadState to)            { transition_from_native(_thread, to); }
   1.235 +   void trans_and_fence(JavaThreadState from, JavaThreadState to) { transition_and_fence(_thread, from, to); }
   1.236 +};
   1.237 +
   1.238 +
   1.239 +class ThreadInVMfromJava : public ThreadStateTransition {
   1.240 + public:
   1.241 +  ThreadInVMfromJava(JavaThread* thread) : ThreadStateTransition(thread) {
   1.242 +    trans_from_java(_thread_in_vm);
   1.243 +  }
   1.244 +  ~ThreadInVMfromJava()  {
   1.245 +    trans(_thread_in_vm, _thread_in_Java);
   1.246 +    // Check for pending. async. exceptions or suspends.
   1.247 +    if (_thread->has_special_runtime_exit_condition()) _thread->handle_special_runtime_exit_condition();
   1.248 +  }
   1.249 +};
   1.250 +
   1.251 +
   1.252 +class ThreadInVMfromUnknown {
   1.253 + private:
   1.254 +  JavaThread* _thread;
   1.255 + public:
   1.256 +  ThreadInVMfromUnknown() : _thread(NULL) {
   1.257 +    Thread* t = Thread::current();
   1.258 +    if (t->is_Java_thread()) {
   1.259 +      JavaThread* t2 = (JavaThread*) t;
   1.260 +      if (t2->thread_state() == _thread_in_native) {
   1.261 +        _thread = t2;
   1.262 +        ThreadStateTransition::transition_from_native(t2, _thread_in_vm);
   1.263 +        // Used to have a HandleMarkCleaner but that is dangerous as
   1.264 +        // it could free a handle in our (indirect, nested) caller.
   1.265 +        // We expect any handles will be short lived and figure we
   1.266 +        // don't need an actual HandleMark.
   1.267 +      }
   1.268 +    }
   1.269 +  }
   1.270 +  ~ThreadInVMfromUnknown()  {
   1.271 +    if (_thread) {
   1.272 +      ThreadStateTransition::transition_and_fence(_thread, _thread_in_vm, _thread_in_native);
   1.273 +    }
   1.274 +  }
   1.275 +};
   1.276 +
   1.277 +
   1.278 +class ThreadInVMfromNative : public ThreadStateTransition {
   1.279 + public:
   1.280 +  ThreadInVMfromNative(JavaThread* thread) : ThreadStateTransition(thread) {
   1.281 +    trans_from_native(_thread_in_vm);
   1.282 +  }
   1.283 +  ~ThreadInVMfromNative() {
   1.284 +    trans_and_fence(_thread_in_vm, _thread_in_native);
   1.285 +  }
   1.286 +};
   1.287 +
   1.288 +
   1.289 +class ThreadToNativeFromVM : public ThreadStateTransition {
   1.290 + public:
   1.291 +  ThreadToNativeFromVM(JavaThread *thread) : ThreadStateTransition(thread) {
   1.292 +    // We are leaving the VM at this point and going directly to native code.
   1.293 +    // Block, if we are in the middle of a safepoint synchronization.
   1.294 +    assert(!thread->owns_locks(), "must release all locks when leaving VM");
   1.295 +    thread->frame_anchor()->make_walkable(thread);
   1.296 +    trans_and_fence(_thread_in_vm, _thread_in_native);
   1.297 +    // Check for pending. async. exceptions or suspends.
   1.298 +    if (_thread->has_special_runtime_exit_condition()) _thread->handle_special_runtime_exit_condition(false);
   1.299 +  }
   1.300 +
   1.301 +  ~ThreadToNativeFromVM() {
   1.302 +    trans_from_native(_thread_in_vm);
   1.303 +    // We don't need to clear_walkable because it will happen automagically when we return to java
   1.304 +  }
   1.305 +};
   1.306 +
   1.307 +
   1.308 +class ThreadBlockInVM : public ThreadStateTransition {
   1.309 + public:
   1.310 +  ThreadBlockInVM(JavaThread *thread)
   1.311 +  : ThreadStateTransition(thread) {
   1.312 +    // Once we are blocked vm expects stack to be walkable
   1.313 +    thread->frame_anchor()->make_walkable(thread);
   1.314 +    trans_and_fence(_thread_in_vm, _thread_blocked);
   1.315 +  }
   1.316 +  ~ThreadBlockInVM() {
   1.317 +    trans_and_fence(_thread_blocked, _thread_in_vm);
   1.318 +    // We don't need to clear_walkable because it will happen automagically when we return to java
   1.319 +  }
   1.320 +};
   1.321 +
   1.322 +
   1.323 +// This special transition class is only used to prevent asynchronous exceptions
   1.324 +// from being installed on vm exit in situations where we can't tolerate them.
   1.325 +// See bugs: 4324348, 4854693, 4998314, 5040492, 5050705.
   1.326 +class ThreadInVMfromJavaNoAsyncException : public ThreadStateTransition {
   1.327 + public:
   1.328 +  ThreadInVMfromJavaNoAsyncException(JavaThread* thread) : ThreadStateTransition(thread) {
   1.329 +    trans_from_java(_thread_in_vm);
   1.330 +  }
   1.331 +  ~ThreadInVMfromJavaNoAsyncException()  {
   1.332 +    trans(_thread_in_vm, _thread_in_Java);
   1.333 +    // NOTE: We do not check for pending. async. exceptions.
   1.334 +    // If we did and moved the pending async exception over into the
   1.335 +    // pending exception field, we would need to deopt (currently C2
   1.336 +    // only). However, to do so would require that we transition back
   1.337 +    // to the _thread_in_vm state. Instead we postpone the handling of
   1.338 +    // the async exception.
   1.339 +
   1.340 +    // Check for pending. suspends only.
   1.341 +    if (_thread->has_special_runtime_exit_condition())
   1.342 +      _thread->handle_special_runtime_exit_condition(false);
   1.343 +  }
   1.344 +};
   1.345 +
   1.346 +// Debug class instantiated in JRT_ENTRY and ITR_ENTRY macro.
   1.347 +// Can be used to verify properties on enter/exit of the VM.
   1.348 +
   1.349 +#ifdef ASSERT
   1.350 +class VMEntryWrapper {
   1.351 + public:
   1.352 +  VMEntryWrapper() {
   1.353 +    if (VerifyLastFrame) {
   1.354 +      InterfaceSupport::verify_last_frame();
   1.355 +    }
   1.356 +  }
   1.357 +
   1.358 +  ~VMEntryWrapper() {
   1.359 +    InterfaceSupport::check_gc_alot();
   1.360 +    if (WalkStackALot) {
   1.361 +      InterfaceSupport::walk_stack();
   1.362 +    }
   1.363 +#ifdef ENABLE_ZAP_DEAD_LOCALS
   1.364 +    if (ZapDeadLocalsOld) {
   1.365 +      InterfaceSupport::zap_dead_locals_old();
   1.366 +    }
   1.367 +#endif
   1.368 +#ifdef COMPILER2
   1.369 +    // This option is not used by Compiler 1
   1.370 +    if (StressDerivedPointers) {
   1.371 +      InterfaceSupport::stress_derived_pointers();
   1.372 +    }
   1.373 +#endif
   1.374 +    if (DeoptimizeALot || DeoptimizeRandom) {
   1.375 +      InterfaceSupport::deoptimizeAll();
   1.376 +    }
   1.377 +    if (ZombieALot) {
   1.378 +      InterfaceSupport::zombieAll();
   1.379 +    }
   1.380 +    if (UnlinkSymbolsALot) {
   1.381 +      InterfaceSupport::unlinkSymbols();
   1.382 +    }
   1.383 +    // do verification AFTER potential deoptimization
   1.384 +    if (VerifyStack) {
   1.385 +      InterfaceSupport::verify_stack();
   1.386 +    }
   1.387 +
   1.388 +  }
   1.389 +};
   1.390 +
   1.391 +
   1.392 +class VMNativeEntryWrapper {
   1.393 + public:
   1.394 +  VMNativeEntryWrapper() {
   1.395 +    if (GCALotAtAllSafepoints) InterfaceSupport::check_gc_alot();
   1.396 +  }
   1.397 +
   1.398 +  ~VMNativeEntryWrapper() {
   1.399 +    if (GCALotAtAllSafepoints) InterfaceSupport::check_gc_alot();
   1.400 +  }
   1.401 +};
   1.402 +
   1.403 +#endif
   1.404 +
   1.405 +
   1.406 +// VM-internal runtime interface support
   1.407 +
   1.408 +#ifdef ASSERT
   1.409 +
   1.410 +class RuntimeHistogramElement : public HistogramElement {
   1.411 +  public:
   1.412 +   RuntimeHistogramElement(const char* name);
   1.413 +};
   1.414 +
   1.415 +#define TRACE_CALL(result_type, header)                            \
   1.416 +  InterfaceSupport::_number_of_calls++;                            \
   1.417 +  if (TraceRuntimeCalls)                                           \
   1.418 +    InterfaceSupport::trace(#result_type, #header);                \
   1.419 +  if (CountRuntimeCalls) {                                         \
   1.420 +    static RuntimeHistogramElement* e = new RuntimeHistogramElement(#header); \
   1.421 +    if (e != NULL) e->increment_count();                           \
   1.422 +  }
   1.423 +#else
   1.424 +#define TRACE_CALL(result_type, header)                            \
   1.425 +  /* do nothing */
   1.426 +#endif
   1.427 +
   1.428 +
   1.429 +// LEAF routines do not lock, GC or throw exceptions
   1.430 +
   1.431 +#define VM_LEAF_BASE(result_type, header)                            \
   1.432 +  TRACE_CALL(result_type, header)                                    \
   1.433 +  debug_only(NoHandleMark __hm;)                                     \
   1.434 +  os::verify_stack_alignment();                                      \
   1.435 +  /* begin of body */
   1.436 +
   1.437 +
   1.438 +// ENTRY routines may lock, GC and throw exceptions
   1.439 +
   1.440 +#define VM_ENTRY_BASE(result_type, header, thread)                   \
   1.441 +  TRACE_CALL(result_type, header)                                    \
   1.442 +  HandleMarkCleaner __hm(thread);                                    \
   1.443 +  Thread* THREAD = thread;                                           \
   1.444 +  os::verify_stack_alignment();                                      \
   1.445 +  /* begin of body */
   1.446 +
   1.447 +
   1.448 +// QUICK_ENTRY routines behave like ENTRY but without a handle mark
   1.449 +
   1.450 +#define VM_QUICK_ENTRY_BASE(result_type, header, thread)             \
   1.451 +  TRACE_CALL(result_type, header)                                    \
   1.452 +  debug_only(NoHandleMark __hm;)                                     \
   1.453 +  Thread* THREAD = thread;                                           \
   1.454 +  os::verify_stack_alignment();                                      \
   1.455 +  /* begin of body */
   1.456 +
   1.457 +
   1.458 +// Definitions for IRT (Interpreter Runtime)
   1.459 +// (thread is an argument passed in to all these routines)
   1.460 +
   1.461 +#define IRT_ENTRY(result_type, header)                               \
   1.462 +  result_type header {                                               \
   1.463 +    ThreadInVMfromJava __tiv(thread);                                \
   1.464 +    VM_ENTRY_BASE(result_type, header, thread)                       \
   1.465 +    debug_only(VMEntryWrapper __vew;)
   1.466 +
   1.467 +
   1.468 +#define IRT_LEAF(result_type, header)                                \
   1.469 +  result_type header {                                               \
   1.470 +    VM_LEAF_BASE(result_type, header)                                \
   1.471 +    debug_only(No_Safepoint_Verifier __nspv(true);)
   1.472 +
   1.473 +
   1.474 +#define IRT_ENTRY_NO_ASYNC(result_type, header)                      \
   1.475 +  result_type header {                                               \
   1.476 +    ThreadInVMfromJavaNoAsyncException __tiv(thread);                \
   1.477 +    VM_ENTRY_BASE(result_type, header, thread)                       \
   1.478 +    debug_only(VMEntryWrapper __vew;)
   1.479 +
   1.480 +#define IRT_END }
   1.481 +
   1.482 +
   1.483 +// Definitions for JRT (Java (Compiler/Shared) Runtime)
   1.484 +
   1.485 +#define JRT_ENTRY(result_type, header)                               \
   1.486 +  result_type header {                                               \
   1.487 +    ThreadInVMfromJava __tiv(thread);                                \
   1.488 +    VM_ENTRY_BASE(result_type, header, thread)                       \
   1.489 +    debug_only(VMEntryWrapper __vew;)
   1.490 +
   1.491 +
   1.492 +#define JRT_LEAF(result_type, header)                                \
   1.493 +  result_type header {                                               \
   1.494 +  VM_LEAF_BASE(result_type, header)                                  \
   1.495 +  debug_only(JRT_Leaf_Verifier __jlv;)
   1.496 +
   1.497 +
   1.498 +#define JRT_ENTRY_NO_ASYNC(result_type, header)                      \
   1.499 +  result_type header {                                               \
   1.500 +    ThreadInVMfromJavaNoAsyncException __tiv(thread);                \
   1.501 +    VM_ENTRY_BASE(result_type, header, thread)                       \
   1.502 +    debug_only(VMEntryWrapper __vew;)
   1.503 +
   1.504 +// Same as JRT Entry but allows for return value after the safepoint
   1.505 +// to get back into Java from the VM
   1.506 +#define JRT_BLOCK_ENTRY(result_type, header)                         \
   1.507 +  result_type header {                                               \
   1.508 +    TRACE_CALL(result_type, header)                                  \
   1.509 +    HandleMarkCleaner __hm(thread);
   1.510 +
   1.511 +#define JRT_BLOCK                                                    \
   1.512 +    {                                                                \
   1.513 +    ThreadInVMfromJava __tiv(thread);                                \
   1.514 +    Thread* THREAD = thread;                                         \
   1.515 +    debug_only(VMEntryWrapper __vew;)
   1.516 +
   1.517 +#define JRT_BLOCK_END }
   1.518 +
   1.519 +#define JRT_END }
   1.520 +
   1.521 +// Definitions for JNI
   1.522 +
   1.523 +#define JNI_ENTRY(result_type, header)                               \
   1.524 +    JNI_ENTRY_NO_PRESERVE(result_type, header)                       \
   1.525 +    WeakPreserveExceptionMark __wem(thread);
   1.526 +
   1.527 +#define JNI_ENTRY_NO_PRESERVE(result_type, header)             \
   1.528 +extern "C" {                                                         \
   1.529 +  result_type JNICALL header {                                \
   1.530 +    JavaThread* thread=JavaThread::thread_from_jni_environment(env); \
   1.531 +    assert( !VerifyJNIEnvThread || (thread == Thread::current()), "JNIEnv is only valid in same thread"); \
   1.532 +    ThreadInVMfromNative __tiv(thread);                              \
   1.533 +    debug_only(VMNativeEntryWrapper __vew;)                          \
   1.534 +    VM_ENTRY_BASE(result_type, header, thread)
   1.535 +
   1.536 +
   1.537 +// Ensure that the VMNativeEntryWrapper constructor, which can cause
   1.538 +// a GC, is called outside the NoHandleMark (set via VM_QUICK_ENTRY_BASE).
   1.539 +#define JNI_QUICK_ENTRY(result_type, header)                         \
   1.540 +extern "C" {                                                         \
   1.541 +  result_type JNICALL header {                                \
   1.542 +    JavaThread* thread=JavaThread::thread_from_jni_environment(env); \
   1.543 +    assert( !VerifyJNIEnvThread || (thread == Thread::current()), "JNIEnv is only valid in same thread"); \
   1.544 +    ThreadInVMfromNative __tiv(thread);                              \
   1.545 +    debug_only(VMNativeEntryWrapper __vew;)                          \
   1.546 +    VM_QUICK_ENTRY_BASE(result_type, header, thread)
   1.547 +
   1.548 +
   1.549 +#define JNI_LEAF(result_type, header)                                \
   1.550 +extern "C" {                                                         \
   1.551 +  result_type JNICALL header {                                \
   1.552 +    JavaThread* thread=JavaThread::thread_from_jni_environment(env); \
   1.553 +    assert( !VerifyJNIEnvThread || (thread == Thread::current()), "JNIEnv is only valid in same thread"); \
   1.554 +    VM_LEAF_BASE(result_type, header)
   1.555 +
   1.556 +
   1.557 +// Close the routine and the extern "C"
   1.558 +#define JNI_END } }
   1.559 +
   1.560 +
   1.561 +
   1.562 +// Definitions for JVM
   1.563 +
   1.564 +#define JVM_ENTRY(result_type, header)                               \
   1.565 +extern "C" {                                                         \
   1.566 +  result_type JNICALL header {                                       \
   1.567 +    JavaThread* thread=JavaThread::thread_from_jni_environment(env); \
   1.568 +    ThreadInVMfromNative __tiv(thread);                              \
   1.569 +    debug_only(VMNativeEntryWrapper __vew;)                          \
   1.570 +    VM_ENTRY_BASE(result_type, header, thread)
   1.571 +
   1.572 +
   1.573 +#define JVM_ENTRY_NO_ENV(result_type, header)                        \
   1.574 +extern "C" {                                                         \
   1.575 +  result_type JNICALL header {                                       \
   1.576 +    JavaThread* thread = (JavaThread*)ThreadLocalStorage::thread();  \
   1.577 +    ThreadInVMfromNative __tiv(thread);                              \
   1.578 +    debug_only(VMNativeEntryWrapper __vew;)                          \
   1.579 +    VM_ENTRY_BASE(result_type, header, thread)
   1.580 +
   1.581 +
   1.582 +#define JVM_QUICK_ENTRY(result_type, header)                         \
   1.583 +extern "C" {                                                         \
   1.584 +  result_type JNICALL header {                                       \
   1.585 +    JavaThread* thread=JavaThread::thread_from_jni_environment(env); \
   1.586 +    ThreadInVMfromNative __tiv(thread);                              \
   1.587 +    debug_only(VMNativeEntryWrapper __vew;)                          \
   1.588 +    VM_QUICK_ENTRY_BASE(result_type, header, thread)
   1.589 +
   1.590 +
   1.591 +#define JVM_LEAF(result_type, header)                                \
   1.592 +extern "C" {                                                         \
   1.593 +  result_type JNICALL header {                                       \
   1.594 +    VM_Exit::block_if_vm_exited();                                   \
   1.595 +    VM_LEAF_BASE(result_type, header)
   1.596 +
   1.597 +
   1.598 +#define JVM_END } }
   1.599 +
   1.600 +#endif // SHARE_VM_RUNTIME_INTERFACESUPPORT_HPP

mercurial