src/share/vm/runtime/interfaceSupport.hpp

Tue, 09 Oct 2012 10:09:34 -0700

author
mikael
date
Tue, 09 Oct 2012 10:09:34 -0700
changeset 4153
b9a9ed0f8eeb
parent 3606
da4be62fb889
child 4299
f34d701e952e
permissions
-rw-r--r--

7197424: update copyright year to match last edit in jdk8 hotspot repository
Summary: Update copyright year to 2012 for relevant files
Reviewed-by: dholmes, coleenp

duke@435 1 /*
mikael@4153 2 * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
duke@435 22 *
duke@435 23 */
duke@435 24
stefank@2314 25 #ifndef SHARE_VM_RUNTIME_INTERFACESUPPORT_HPP
stefank@2314 26 #define SHARE_VM_RUNTIME_INTERFACESUPPORT_HPP
stefank@2314 27
stefank@2314 28 #include "memory/gcLocker.hpp"
stefank@2314 29 #include "runtime/handles.inline.hpp"
stefank@2314 30 #include "runtime/mutexLocker.hpp"
stefank@2314 31 #include "runtime/orderAccess.hpp"
stefank@2314 32 #include "runtime/os.hpp"
stefank@2314 33 #include "runtime/safepoint.hpp"
stefank@2314 34 #include "runtime/vmThread.hpp"
stefank@2314 35 #include "utilities/globalDefinitions.hpp"
stefank@2314 36 #include "utilities/preserveException.hpp"
stefank@2314 37 #include "utilities/top.hpp"
stefank@2314 38 #ifdef TARGET_OS_FAMILY_linux
stefank@2314 39 # include "thread_linux.inline.hpp"
stefank@2314 40 #endif
stefank@2314 41 #ifdef TARGET_OS_FAMILY_solaris
stefank@2314 42 # include "thread_solaris.inline.hpp"
stefank@2314 43 #endif
stefank@2314 44 #ifdef TARGET_OS_FAMILY_windows
stefank@2314 45 # include "thread_windows.inline.hpp"
stefank@2314 46 #endif
never@3156 47 #ifdef TARGET_OS_FAMILY_bsd
never@3156 48 # include "thread_bsd.inline.hpp"
never@3156 49 #endif
stefank@2314 50
duke@435 51 // Wrapper for all entry points to the virtual machine.
duke@435 52 // The HandleMarkCleaner is a faster version of HandleMark.
duke@435 53 // It relies on the fact that there is a HandleMark further
duke@435 54 // down the stack (in JavaCalls::call_helper), and just resets
duke@435 55 // to the saved values in that HandleMark.
duke@435 56
duke@435 57 class HandleMarkCleaner: public StackObj {
duke@435 58 private:
duke@435 59 Thread* _thread;
duke@435 60 public:
duke@435 61 HandleMarkCleaner(Thread* thread) {
duke@435 62 _thread = thread;
duke@435 63 _thread->last_handle_mark()->push();
duke@435 64 }
duke@435 65 ~HandleMarkCleaner() {
duke@435 66 _thread->last_handle_mark()->pop_and_restore();
duke@435 67 }
duke@435 68
duke@435 69 private:
duke@435 70 inline void* operator new(size_t size, void* ptr) {
duke@435 71 return ptr;
duke@435 72 }
duke@435 73 };
duke@435 74
never@3241 75 // InterfaceSupport provides functionality used by the VM_LEAF_BASE and
never@3241 76 // VM_ENTRY_BASE macros. These macros are used to guard entry points into
never@3241 77 // the VM and perform checks upon leave of the VM.
duke@435 78
duke@435 79
duke@435 80 class InterfaceSupport: AllStatic {
duke@435 81 # ifdef ASSERT
duke@435 82 public:
duke@435 83 static long _scavenge_alot_counter;
duke@435 84 static long _fullgc_alot_counter;
duke@435 85 static long _number_of_calls;
duke@435 86 static long _fullgc_alot_invocation;
duke@435 87
duke@435 88 // tracing
duke@435 89 static void trace(const char* result_type, const char* header);
duke@435 90
duke@435 91 // Helper methods used to implement +ScavengeALot and +FullGCALot
duke@435 92 static void check_gc_alot() { if (ScavengeALot || FullGCALot) gc_alot(); }
duke@435 93 static void gc_alot();
duke@435 94
duke@435 95 static void walk_stack_from(vframe* start_vf);
duke@435 96 static void walk_stack();
duke@435 97
duke@435 98 # ifdef ENABLE_ZAP_DEAD_LOCALS
duke@435 99 static void zap_dead_locals_old();
duke@435 100 # endif
duke@435 101
duke@435 102 static void zombieAll();
coleenp@2497 103 static void unlinkSymbols();
duke@435 104 static void deoptimizeAll();
duke@435 105 static void stress_derived_pointers();
duke@435 106 static void verify_stack();
duke@435 107 static void verify_last_frame();
duke@435 108 # endif
duke@435 109
duke@435 110 public:
duke@435 111 // OS dependent stuff
stefank@2314 112 #ifdef TARGET_OS_FAMILY_linux
stefank@2314 113 # include "interfaceSupport_linux.hpp"
stefank@2314 114 #endif
stefank@2314 115 #ifdef TARGET_OS_FAMILY_solaris
stefank@2314 116 # include "interfaceSupport_solaris.hpp"
stefank@2314 117 #endif
stefank@2314 118 #ifdef TARGET_OS_FAMILY_windows
stefank@2314 119 # include "interfaceSupport_windows.hpp"
stefank@2314 120 #endif
never@3156 121 #ifdef TARGET_OS_FAMILY_bsd
never@3156 122 # include "interfaceSupport_bsd.hpp"
never@3156 123 #endif
stefank@2314 124
duke@435 125 };
duke@435 126
duke@435 127
duke@435 128 // Basic class for all thread transition classes.
duke@435 129
duke@435 130 class ThreadStateTransition : public StackObj {
duke@435 131 protected:
duke@435 132 JavaThread* _thread;
duke@435 133 public:
duke@435 134 ThreadStateTransition(JavaThread *thread) {
duke@435 135 _thread = thread;
duke@435 136 assert(thread != NULL && thread->is_Java_thread(), "must be Java thread");
duke@435 137 }
duke@435 138
duke@435 139 // Change threadstate in a manner, so safepoint can detect changes.
duke@435 140 // Time-critical: called on exit from every runtime routine
duke@435 141 static inline void transition(JavaThread *thread, JavaThreadState from, JavaThreadState to) {
duke@435 142 assert(from != _thread_in_Java, "use transition_from_java");
duke@435 143 assert(from != _thread_in_native, "use transition_from_native");
duke@435 144 assert((from & 1) == 0 && (to & 1) == 0, "odd numbers are transitions states");
duke@435 145 assert(thread->thread_state() == from, "coming from wrong thread state");
duke@435 146 // Change to transition state (assumes total store ordering! -Urs)
duke@435 147 thread->set_thread_state((JavaThreadState)(from + 1));
duke@435 148
duke@435 149 // Make sure new state is seen by VM thread
duke@435 150 if (os::is_MP()) {
duke@435 151 if (UseMembar) {
duke@435 152 // Force a fence between the write above and read below
duke@435 153 OrderAccess::fence();
duke@435 154 } else {
duke@435 155 // store to serialize page so VM thread can do pseudo remote membar
duke@435 156 os::write_memory_serialize_page(thread);
duke@435 157 }
duke@435 158 }
duke@435 159
duke@435 160 if (SafepointSynchronize::do_call_back()) {
duke@435 161 SafepointSynchronize::block(thread);
duke@435 162 }
duke@435 163 thread->set_thread_state(to);
duke@435 164
duke@435 165 CHECK_UNHANDLED_OOPS_ONLY(thread->clear_unhandled_oops();)
duke@435 166 }
duke@435 167
duke@435 168 // transition_and_fence must be used on any thread state transition
duke@435 169 // where there might not be a Java call stub on the stack, in
duke@435 170 // particular on Windows where the Structured Exception Handler is
duke@435 171 // set up in the call stub. os::write_memory_serialize_page() can
duke@435 172 // fault and we can't recover from it on Windows without a SEH in
duke@435 173 // place.
duke@435 174 static inline void transition_and_fence(JavaThread *thread, JavaThreadState from, JavaThreadState to) {
duke@435 175 assert(thread->thread_state() == from, "coming from wrong thread state");
duke@435 176 assert((from & 1) == 0 && (to & 1) == 0, "odd numbers are transitions states");
duke@435 177 // Change to transition state (assumes total store ordering! -Urs)
duke@435 178 thread->set_thread_state((JavaThreadState)(from + 1));
duke@435 179
duke@435 180 // Make sure new state is seen by VM thread
duke@435 181 if (os::is_MP()) {
duke@435 182 if (UseMembar) {
duke@435 183 // Force a fence between the write above and read below
duke@435 184 OrderAccess::fence();
duke@435 185 } else {
duke@435 186 // Must use this rather than serialization page in particular on Windows
duke@435 187 InterfaceSupport::serialize_memory(thread);
duke@435 188 }
duke@435 189 }
duke@435 190
duke@435 191 if (SafepointSynchronize::do_call_back()) {
duke@435 192 SafepointSynchronize::block(thread);
duke@435 193 }
duke@435 194 thread->set_thread_state(to);
duke@435 195
duke@435 196 CHECK_UNHANDLED_OOPS_ONLY(thread->clear_unhandled_oops();)
duke@435 197 }
duke@435 198
duke@435 199 // Same as above, but assumes from = _thread_in_Java. This is simpler, since we
duke@435 200 // never block on entry to the VM. This will break the code, since e.g. preserve arguments
duke@435 201 // have not been setup.
duke@435 202 static inline void transition_from_java(JavaThread *thread, JavaThreadState to) {
duke@435 203 assert(thread->thread_state() == _thread_in_Java, "coming from wrong thread state");
duke@435 204 thread->set_thread_state(to);
duke@435 205 }
duke@435 206
duke@435 207 static inline void transition_from_native(JavaThread *thread, JavaThreadState to) {
duke@435 208 assert((to & 1) == 0, "odd numbers are transitions states");
duke@435 209 assert(thread->thread_state() == _thread_in_native, "coming from wrong thread state");
duke@435 210 // Change to transition state (assumes total store ordering! -Urs)
duke@435 211 thread->set_thread_state(_thread_in_native_trans);
duke@435 212
duke@435 213 // Make sure new state is seen by GC thread
duke@435 214 if (os::is_MP()) {
duke@435 215 if (UseMembar) {
duke@435 216 // Force a fence between the write above and read below
duke@435 217 OrderAccess::fence();
duke@435 218 } else {
duke@435 219 // Must use this rather than serialization page in particular on Windows
duke@435 220 InterfaceSupport::serialize_memory(thread);
duke@435 221 }
duke@435 222 }
duke@435 223
duke@435 224 // We never install asynchronous exceptions when coming (back) in
duke@435 225 // to the runtime from native code because the runtime is not set
duke@435 226 // up to handle exceptions floating around at arbitrary points.
duke@435 227 if (SafepointSynchronize::do_call_back() || thread->is_suspend_after_native()) {
duke@435 228 JavaThread::check_safepoint_and_suspend_for_native_trans(thread);
duke@435 229
duke@435 230 // Clear unhandled oops anywhere where we could block, even if we don't.
duke@435 231 CHECK_UNHANDLED_OOPS_ONLY(thread->clear_unhandled_oops();)
duke@435 232 }
duke@435 233
duke@435 234 thread->set_thread_state(to);
duke@435 235 }
duke@435 236 protected:
duke@435 237 void trans(JavaThreadState from, JavaThreadState to) { transition(_thread, from, to); }
duke@435 238 void trans_from_java(JavaThreadState to) { transition_from_java(_thread, to); }
duke@435 239 void trans_from_native(JavaThreadState to) { transition_from_native(_thread, to); }
duke@435 240 void trans_and_fence(JavaThreadState from, JavaThreadState to) { transition_and_fence(_thread, from, to); }
duke@435 241 };
duke@435 242
duke@435 243
duke@435 244 class ThreadInVMfromJava : public ThreadStateTransition {
duke@435 245 public:
duke@435 246 ThreadInVMfromJava(JavaThread* thread) : ThreadStateTransition(thread) {
duke@435 247 trans_from_java(_thread_in_vm);
duke@435 248 }
duke@435 249 ~ThreadInVMfromJava() {
duke@435 250 trans(_thread_in_vm, _thread_in_Java);
duke@435 251 // Check for pending. async. exceptions or suspends.
duke@435 252 if (_thread->has_special_runtime_exit_condition()) _thread->handle_special_runtime_exit_condition();
duke@435 253 }
duke@435 254 };
duke@435 255
duke@435 256
duke@435 257 class ThreadInVMfromUnknown {
duke@435 258 private:
duke@435 259 JavaThread* _thread;
duke@435 260 public:
duke@435 261 ThreadInVMfromUnknown() : _thread(NULL) {
duke@435 262 Thread* t = Thread::current();
duke@435 263 if (t->is_Java_thread()) {
duke@435 264 JavaThread* t2 = (JavaThread*) t;
duke@435 265 if (t2->thread_state() == _thread_in_native) {
duke@435 266 _thread = t2;
duke@435 267 ThreadStateTransition::transition_from_native(t2, _thread_in_vm);
duke@435 268 // Used to have a HandleMarkCleaner but that is dangerous as
duke@435 269 // it could free a handle in our (indirect, nested) caller.
duke@435 270 // We expect any handles will be short lived and figure we
duke@435 271 // don't need an actual HandleMark.
duke@435 272 }
duke@435 273 }
duke@435 274 }
duke@435 275 ~ThreadInVMfromUnknown() {
duke@435 276 if (_thread) {
duke@435 277 ThreadStateTransition::transition_and_fence(_thread, _thread_in_vm, _thread_in_native);
duke@435 278 }
duke@435 279 }
duke@435 280 };
duke@435 281
duke@435 282
duke@435 283 class ThreadInVMfromNative : public ThreadStateTransition {
duke@435 284 public:
duke@435 285 ThreadInVMfromNative(JavaThread* thread) : ThreadStateTransition(thread) {
duke@435 286 trans_from_native(_thread_in_vm);
duke@435 287 }
duke@435 288 ~ThreadInVMfromNative() {
duke@435 289 trans_and_fence(_thread_in_vm, _thread_in_native);
duke@435 290 }
duke@435 291 };
duke@435 292
duke@435 293
duke@435 294 class ThreadToNativeFromVM : public ThreadStateTransition {
duke@435 295 public:
duke@435 296 ThreadToNativeFromVM(JavaThread *thread) : ThreadStateTransition(thread) {
duke@435 297 // We are leaving the VM at this point and going directly to native code.
duke@435 298 // Block, if we are in the middle of a safepoint synchronization.
duke@435 299 assert(!thread->owns_locks(), "must release all locks when leaving VM");
duke@435 300 thread->frame_anchor()->make_walkable(thread);
duke@435 301 trans_and_fence(_thread_in_vm, _thread_in_native);
duke@435 302 // Check for pending. async. exceptions or suspends.
duke@435 303 if (_thread->has_special_runtime_exit_condition()) _thread->handle_special_runtime_exit_condition(false);
duke@435 304 }
duke@435 305
duke@435 306 ~ThreadToNativeFromVM() {
duke@435 307 trans_from_native(_thread_in_vm);
duke@435 308 // We don't need to clear_walkable because it will happen automagically when we return to java
duke@435 309 }
duke@435 310 };
duke@435 311
duke@435 312
duke@435 313 class ThreadBlockInVM : public ThreadStateTransition {
duke@435 314 public:
duke@435 315 ThreadBlockInVM(JavaThread *thread)
duke@435 316 : ThreadStateTransition(thread) {
duke@435 317 // Once we are blocked vm expects stack to be walkable
duke@435 318 thread->frame_anchor()->make_walkable(thread);
duke@435 319 trans_and_fence(_thread_in_vm, _thread_blocked);
duke@435 320 }
duke@435 321 ~ThreadBlockInVM() {
duke@435 322 trans_and_fence(_thread_blocked, _thread_in_vm);
duke@435 323 // We don't need to clear_walkable because it will happen automagically when we return to java
duke@435 324 }
duke@435 325 };
duke@435 326
duke@435 327
duke@435 328 // This special transition class is only used to prevent asynchronous exceptions
duke@435 329 // from being installed on vm exit in situations where we can't tolerate them.
duke@435 330 // See bugs: 4324348, 4854693, 4998314, 5040492, 5050705.
duke@435 331 class ThreadInVMfromJavaNoAsyncException : public ThreadStateTransition {
duke@435 332 public:
duke@435 333 ThreadInVMfromJavaNoAsyncException(JavaThread* thread) : ThreadStateTransition(thread) {
duke@435 334 trans_from_java(_thread_in_vm);
duke@435 335 }
duke@435 336 ~ThreadInVMfromJavaNoAsyncException() {
duke@435 337 trans(_thread_in_vm, _thread_in_Java);
duke@435 338 // NOTE: We do not check for pending. async. exceptions.
duke@435 339 // If we did and moved the pending async exception over into the
duke@435 340 // pending exception field, we would need to deopt (currently C2
duke@435 341 // only). However, to do so would require that we transition back
duke@435 342 // to the _thread_in_vm state. Instead we postpone the handling of
duke@435 343 // the async exception.
duke@435 344
duke@435 345 // Check for pending. suspends only.
duke@435 346 if (_thread->has_special_runtime_exit_condition())
duke@435 347 _thread->handle_special_runtime_exit_condition(false);
duke@435 348 }
duke@435 349 };
duke@435 350
duke@435 351 // Debug class instantiated in JRT_ENTRY and ITR_ENTRY macro.
duke@435 352 // Can be used to verify properties on enter/exit of the VM.
duke@435 353
duke@435 354 #ifdef ASSERT
duke@435 355 class VMEntryWrapper {
duke@435 356 public:
duke@435 357 VMEntryWrapper() {
duke@435 358 if (VerifyLastFrame) {
duke@435 359 InterfaceSupport::verify_last_frame();
duke@435 360 }
duke@435 361 }
duke@435 362
duke@435 363 ~VMEntryWrapper() {
duke@435 364 InterfaceSupport::check_gc_alot();
duke@435 365 if (WalkStackALot) {
duke@435 366 InterfaceSupport::walk_stack();
duke@435 367 }
duke@435 368 #ifdef ENABLE_ZAP_DEAD_LOCALS
duke@435 369 if (ZapDeadLocalsOld) {
duke@435 370 InterfaceSupport::zap_dead_locals_old();
duke@435 371 }
duke@435 372 #endif
duke@435 373 #ifdef COMPILER2
duke@435 374 // This option is not used by Compiler 1
duke@435 375 if (StressDerivedPointers) {
duke@435 376 InterfaceSupport::stress_derived_pointers();
duke@435 377 }
duke@435 378 #endif
duke@435 379 if (DeoptimizeALot || DeoptimizeRandom) {
duke@435 380 InterfaceSupport::deoptimizeAll();
duke@435 381 }
duke@435 382 if (ZombieALot) {
duke@435 383 InterfaceSupport::zombieAll();
duke@435 384 }
coleenp@2497 385 if (UnlinkSymbolsALot) {
coleenp@2497 386 InterfaceSupport::unlinkSymbols();
coleenp@2497 387 }
duke@435 388 // do verification AFTER potential deoptimization
duke@435 389 if (VerifyStack) {
duke@435 390 InterfaceSupport::verify_stack();
duke@435 391 }
duke@435 392
duke@435 393 }
duke@435 394 };
duke@435 395
duke@435 396
duke@435 397 class VMNativeEntryWrapper {
duke@435 398 public:
duke@435 399 VMNativeEntryWrapper() {
duke@435 400 if (GCALotAtAllSafepoints) InterfaceSupport::check_gc_alot();
duke@435 401 }
duke@435 402
duke@435 403 ~VMNativeEntryWrapper() {
duke@435 404 if (GCALotAtAllSafepoints) InterfaceSupport::check_gc_alot();
duke@435 405 }
duke@435 406 };
duke@435 407
duke@435 408 #endif
duke@435 409
duke@435 410
duke@435 411 // VM-internal runtime interface support
duke@435 412
duke@435 413 #ifdef ASSERT
duke@435 414
duke@435 415 class RuntimeHistogramElement : public HistogramElement {
duke@435 416 public:
duke@435 417 RuntimeHistogramElement(const char* name);
duke@435 418 };
duke@435 419
duke@435 420 #define TRACE_CALL(result_type, header) \
duke@435 421 InterfaceSupport::_number_of_calls++; \
duke@435 422 if (TraceRuntimeCalls) \
duke@435 423 InterfaceSupport::trace(#result_type, #header); \
duke@435 424 if (CountRuntimeCalls) { \
duke@435 425 static RuntimeHistogramElement* e = new RuntimeHistogramElement(#header); \
duke@435 426 if (e != NULL) e->increment_count(); \
duke@435 427 }
duke@435 428 #else
duke@435 429 #define TRACE_CALL(result_type, header) \
duke@435 430 /* do nothing */
duke@435 431 #endif
duke@435 432
duke@435 433
duke@435 434 // LEAF routines do not lock, GC or throw exceptions
duke@435 435
never@3241 436 #define VM_LEAF_BASE(result_type, header) \
duke@435 437 TRACE_CALL(result_type, header) \
duke@435 438 debug_only(NoHandleMark __hm;) \
roland@3606 439 os::verify_stack_alignment(); \
duke@435 440 /* begin of body */
duke@435 441
duke@435 442
duke@435 443 // ENTRY routines may lock, GC and throw exceptions
duke@435 444
never@3241 445 #define VM_ENTRY_BASE(result_type, header, thread) \
duke@435 446 TRACE_CALL(result_type, header) \
duke@435 447 HandleMarkCleaner __hm(thread); \
duke@435 448 Thread* THREAD = thread; \
roland@3606 449 os::verify_stack_alignment(); \
duke@435 450 /* begin of body */
duke@435 451
duke@435 452
duke@435 453 // QUICK_ENTRY routines behave like ENTRY but without a handle mark
duke@435 454
never@3241 455 #define VM_QUICK_ENTRY_BASE(result_type, header, thread) \
duke@435 456 TRACE_CALL(result_type, header) \
duke@435 457 debug_only(NoHandleMark __hm;) \
duke@435 458 Thread* THREAD = thread; \
roland@3606 459 os::verify_stack_alignment(); \
duke@435 460 /* begin of body */
duke@435 461
duke@435 462
duke@435 463 // Definitions for IRT (Interpreter Runtime)
duke@435 464 // (thread is an argument passed in to all these routines)
duke@435 465
duke@435 466 #define IRT_ENTRY(result_type, header) \
duke@435 467 result_type header { \
duke@435 468 ThreadInVMfromJava __tiv(thread); \
never@3241 469 VM_ENTRY_BASE(result_type, header, thread) \
duke@435 470 debug_only(VMEntryWrapper __vew;)
duke@435 471
duke@435 472
duke@435 473 #define IRT_LEAF(result_type, header) \
duke@435 474 result_type header { \
never@3241 475 VM_LEAF_BASE(result_type, header) \
duke@435 476 debug_only(No_Safepoint_Verifier __nspv(true);)
duke@435 477
duke@435 478
duke@435 479 #define IRT_ENTRY_NO_ASYNC(result_type, header) \
duke@435 480 result_type header { \
duke@435 481 ThreadInVMfromJavaNoAsyncException __tiv(thread); \
never@3241 482 VM_ENTRY_BASE(result_type, header, thread) \
duke@435 483 debug_only(VMEntryWrapper __vew;)
duke@435 484
duke@435 485 // Another special case for nmethod_entry_point so the nmethod that the
duke@435 486 // interpreter is about to branch to doesn't get flushed before as we
duke@435 487 // branch to it's interpreter_entry_point. Skip stress testing here too.
duke@435 488 // Also we don't allow async exceptions because it is just too painful.
duke@435 489 #define IRT_ENTRY_FOR_NMETHOD(result_type, header) \
duke@435 490 result_type header { \
duke@435 491 nmethodLocker _nmlock(nm); \
duke@435 492 ThreadInVMfromJavaNoAsyncException __tiv(thread); \
never@3241 493 VM_ENTRY_BASE(result_type, header, thread)
duke@435 494
duke@435 495 #define IRT_END }
duke@435 496
duke@435 497
duke@435 498 // Definitions for JRT (Java (Compiler/Shared) Runtime)
duke@435 499
duke@435 500 #define JRT_ENTRY(result_type, header) \
duke@435 501 result_type header { \
duke@435 502 ThreadInVMfromJava __tiv(thread); \
never@3241 503 VM_ENTRY_BASE(result_type, header, thread) \
duke@435 504 debug_only(VMEntryWrapper __vew;)
duke@435 505
duke@435 506
duke@435 507 #define JRT_LEAF(result_type, header) \
duke@435 508 result_type header { \
never@3241 509 VM_LEAF_BASE(result_type, header) \
duke@435 510 debug_only(JRT_Leaf_Verifier __jlv;)
duke@435 511
duke@435 512
duke@435 513 #define JRT_ENTRY_NO_ASYNC(result_type, header) \
duke@435 514 result_type header { \
duke@435 515 ThreadInVMfromJavaNoAsyncException __tiv(thread); \
never@3241 516 VM_ENTRY_BASE(result_type, header, thread) \
duke@435 517 debug_only(VMEntryWrapper __vew;)
duke@435 518
duke@435 519 // Same as JRT Entry but allows for return value after the safepoint
duke@435 520 // to get back into Java from the VM
duke@435 521 #define JRT_BLOCK_ENTRY(result_type, header) \
duke@435 522 result_type header { \
duke@435 523 TRACE_CALL(result_type, header) \
duke@435 524 HandleMarkCleaner __hm(thread);
duke@435 525
duke@435 526 #define JRT_BLOCK \
duke@435 527 { \
duke@435 528 ThreadInVMfromJava __tiv(thread); \
duke@435 529 Thread* THREAD = thread; \
duke@435 530 debug_only(VMEntryWrapper __vew;)
duke@435 531
duke@435 532 #define JRT_BLOCK_END }
duke@435 533
duke@435 534 #define JRT_END }
duke@435 535
duke@435 536 // Definitions for JNI
duke@435 537
duke@435 538 #define JNI_ENTRY(result_type, header) \
duke@435 539 JNI_ENTRY_NO_PRESERVE(result_type, header) \
duke@435 540 WeakPreserveExceptionMark __wem(thread);
duke@435 541
duke@435 542 #define JNI_ENTRY_NO_PRESERVE(result_type, header) \
duke@435 543 extern "C" { \
duke@435 544 result_type JNICALL header { \
duke@435 545 JavaThread* thread=JavaThread::thread_from_jni_environment(env); \
duke@435 546 assert( !VerifyJNIEnvThread || (thread == Thread::current()), "JNIEnv is only valid in same thread"); \
duke@435 547 ThreadInVMfromNative __tiv(thread); \
duke@435 548 debug_only(VMNativeEntryWrapper __vew;) \
never@3241 549 VM_ENTRY_BASE(result_type, header, thread)
duke@435 550
duke@435 551
duke@435 552 // Ensure that the VMNativeEntryWrapper constructor, which can cause
never@3241 553 // a GC, is called outside the NoHandleMark (set via VM_QUICK_ENTRY_BASE).
duke@435 554 #define JNI_QUICK_ENTRY(result_type, header) \
duke@435 555 extern "C" { \
duke@435 556 result_type JNICALL header { \
duke@435 557 JavaThread* thread=JavaThread::thread_from_jni_environment(env); \
duke@435 558 assert( !VerifyJNIEnvThread || (thread == Thread::current()), "JNIEnv is only valid in same thread"); \
duke@435 559 ThreadInVMfromNative __tiv(thread); \
duke@435 560 debug_only(VMNativeEntryWrapper __vew;) \
never@3241 561 VM_QUICK_ENTRY_BASE(result_type, header, thread)
duke@435 562
duke@435 563
duke@435 564 #define JNI_LEAF(result_type, header) \
duke@435 565 extern "C" { \
duke@435 566 result_type JNICALL header { \
duke@435 567 JavaThread* thread=JavaThread::thread_from_jni_environment(env); \
duke@435 568 assert( !VerifyJNIEnvThread || (thread == Thread::current()), "JNIEnv is only valid in same thread"); \
never@3241 569 VM_LEAF_BASE(result_type, header)
duke@435 570
duke@435 571
duke@435 572 // Close the routine and the extern "C"
duke@435 573 #define JNI_END } }
duke@435 574
duke@435 575
duke@435 576
duke@435 577 // Definitions for JVM
duke@435 578
duke@435 579 #define JVM_ENTRY(result_type, header) \
duke@435 580 extern "C" { \
duke@435 581 result_type JNICALL header { \
duke@435 582 JavaThread* thread=JavaThread::thread_from_jni_environment(env); \
duke@435 583 ThreadInVMfromNative __tiv(thread); \
duke@435 584 debug_only(VMNativeEntryWrapper __vew;) \
never@3241 585 VM_ENTRY_BASE(result_type, header, thread)
duke@435 586
duke@435 587
duke@435 588 #define JVM_ENTRY_NO_ENV(result_type, header) \
duke@435 589 extern "C" { \
duke@435 590 result_type JNICALL header { \
duke@435 591 JavaThread* thread = (JavaThread*)ThreadLocalStorage::thread(); \
duke@435 592 ThreadInVMfromNative __tiv(thread); \
duke@435 593 debug_only(VMNativeEntryWrapper __vew;) \
never@3241 594 VM_ENTRY_BASE(result_type, header, thread)
duke@435 595
duke@435 596
duke@435 597 #define JVM_QUICK_ENTRY(result_type, header) \
duke@435 598 extern "C" { \
duke@435 599 result_type JNICALL header { \
duke@435 600 JavaThread* thread=JavaThread::thread_from_jni_environment(env); \
duke@435 601 ThreadInVMfromNative __tiv(thread); \
duke@435 602 debug_only(VMNativeEntryWrapper __vew;) \
never@3241 603 VM_QUICK_ENTRY_BASE(result_type, header, thread)
duke@435 604
duke@435 605
duke@435 606 #define JVM_LEAF(result_type, header) \
duke@435 607 extern "C" { \
duke@435 608 result_type JNICALL header { \
duke@435 609 VM_Exit::block_if_vm_exited(); \
never@3241 610 VM_LEAF_BASE(result_type, header)
duke@435 611
duke@435 612
duke@435 613 #define JVM_END } }
stefank@2314 614
stefank@2314 615 #endif // SHARE_VM_RUNTIME_INTERFACESUPPORT_HPP

mercurial