src/share/vm/runtime/thread.hpp

changeset 435
a61af66fc99e
child 777
37f87013dfd8
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/src/share/vm/runtime/thread.hpp	Sat Dec 01 00:00:00 2007 +0000
     1.3 @@ -0,0 +1,1757 @@
     1.4 +/*
     1.5 + * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
     1.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     1.7 + *
     1.8 + * This code is free software; you can redistribute it and/or modify it
     1.9 + * under the terms of the GNU General Public License version 2 only, as
    1.10 + * published by the Free Software Foundation.
    1.11 + *
    1.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
    1.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    1.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    1.15 + * version 2 for more details (a copy is included in the LICENSE file that
    1.16 + * accompanied this code).
    1.17 + *
    1.18 + * You should have received a copy of the GNU General Public License version
    1.19 + * 2 along with this work; if not, write to the Free Software Foundation,
    1.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    1.21 + *
    1.22 + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
    1.23 + * CA 95054 USA or visit www.sun.com if you need additional information or
    1.24 + * have any questions.
    1.25 + *
    1.26 + */
    1.27 +
    1.28 +class ThreadSafepointState;
    1.29 +class ThreadProfiler;
    1.30 +
    1.31 +class JvmtiThreadState;
    1.32 +class JvmtiGetLoadedClassesClosure;
    1.33 +class ThreadStatistics;
    1.34 +class ConcurrentLocksDump;
    1.35 +class ParkEvent ;
    1.36 +
    1.37 +class ciEnv;
    1.38 +class CompileThread;
    1.39 +class CompileLog;
    1.40 +class CompileTask;
    1.41 +class CompileQueue;
    1.42 +class CompilerCounters;
    1.43 +class vframeArray;
    1.44 +
    1.45 +class DeoptResourceMark;
    1.46 +class jvmtiDeferredLocalVariableSet;
    1.47 +
    1.48 +class GCTaskQueue;
    1.49 +class ThreadClosure;
    1.50 +class IdealGraphPrinter;
    1.51 +
    1.52 +// Class hierarchy
    1.53 +// - Thread
    1.54 +//   - VMThread
    1.55 +//   - JavaThread
    1.56 +//   - WatcherThread
    1.57 +
    1.58 +class Thread: public ThreadShadow {
    1.59 +  friend class VMStructs;
    1.60 + private:
    1.61 +  // Exception handling
    1.62 +  // (Note: _pending_exception and friends are in ThreadShadow)
    1.63 +  //oop       _pending_exception;                // pending exception for current thread
    1.64 +  // const char* _exception_file;                   // file information for exception (debugging only)
    1.65 +  // int         _exception_line;                   // line information for exception (debugging only)
    1.66 +
    1.67 +  // Support for forcing alignment of thread objects for biased locking
    1.68 +  void*       _real_malloc_address;
    1.69 + public:
    1.70 +  void* operator new(size_t size);
    1.71 +  void  operator delete(void* p);
    1.72 + private:
    1.73 +
    1.74 +  // ***************************************************************
    1.75 +  // Suspend and resume support
    1.76 +  // ***************************************************************
    1.77 +  //
    1.78 +  // VM suspend/resume no longer exists - it was once used for various
    1.79 +  // things including safepoints but was deprecated and finally removed
    1.80 +  // in Java 7. Because VM suspension was considered "internal" Java-level
    1.81 +  // suspension was considered "external", and this legacy naming scheme
    1.82 +  // remains.
    1.83 +  //
    1.84 +  // External suspend/resume requests come from JVM_SuspendThread,
    1.85 +  // JVM_ResumeThread, JVMTI SuspendThread, and finally JVMTI
    1.86 +  // ResumeThread. External
    1.87 +  // suspend requests cause _external_suspend to be set and external
    1.88 +  // resume requests cause _external_suspend to be cleared.
    1.89 +  // External suspend requests do not nest on top of other external
    1.90 +  // suspend requests. The higher level APIs reject suspend requests
    1.91 +  // for already suspended threads.
    1.92 +  //
    1.93 +  // The external_suspend
    1.94 +  // flag is checked by has_special_runtime_exit_condition() and java thread
    1.95 +  // will self-suspend when handle_special_runtime_exit_condition() is
    1.96 +  // called. Most uses of the _thread_blocked state in JavaThreads are
    1.97 +  // considered the same as being externally suspended; if the blocking
    1.98 +  // condition lifts, the JavaThread will self-suspend. Other places
    1.99 +  // where VM checks for external_suspend include:
   1.100 +  //   + mutex granting (do not enter monitors when thread is suspended)
   1.101 +  //   + state transitions from _thread_in_native
   1.102 +  //
   1.103 +  // In general, java_suspend() does not wait for an external suspend
   1.104 +  // request to complete. When it returns, the only guarantee is that
   1.105 +  // the _external_suspend field is true.
   1.106 +  //
   1.107 +  // wait_for_ext_suspend_completion() is used to wait for an external
   1.108 +  // suspend request to complete. External suspend requests are usually
   1.109 +  // followed by some other interface call that requires the thread to
   1.110 +  // be quiescent, e.g., GetCallTrace(). By moving the "wait time" into
   1.111 +  // the interface that requires quiescence, we give the JavaThread a
   1.112 +  // chance to self-suspend before we need it to be quiescent. This
   1.113 +  // improves overall suspend/query performance.
   1.114 +  //
   1.115 +  // _suspend_flags controls the behavior of java_ suspend/resume.
   1.116 +  // It must be set under the protection of SR_lock. Read from the flag is
   1.117 +  // OK without SR_lock as long as the value is only used as a hint.
   1.118 +  // (e.g., check _external_suspend first without lock and then recheck
   1.119 +  // inside SR_lock and finish the suspension)
   1.120 +  //
   1.121 +  // _suspend_flags is also overloaded for other "special conditions" so
   1.122 +  // that a single check indicates whether any special action is needed
   1.123 +  // eg. for async exceptions.
   1.124 +  // -------------------------------------------------------------------
   1.125 +  // Notes:
   1.126 +  // 1. The suspend/resume logic no longer uses ThreadState in OSThread
   1.127 +  // but we still update its value to keep other part of the system (mainly
   1.128 +  // JVMTI) happy. ThreadState is legacy code (see notes in
   1.129 +  // osThread.hpp).
   1.130 +  //
   1.131 +  // 2. It would be more natural if set_external_suspend() is private and
   1.132 +  // part of java_suspend(), but that probably would affect the suspend/query
   1.133 +  // performance. Need more investigation on this.
   1.134 +  //
   1.135 +
   1.136 +  // suspend/resume lock: used for self-suspend
   1.137 +  Monitor*    _SR_lock;
   1.138 +
   1.139 + protected:
   1.140 +  enum SuspendFlags {
   1.141 +    // NOTE: avoid using the sign-bit as cc generates different test code
   1.142 +    //       when the sign-bit is used, and sometimes incorrectly - see CR 6398077
   1.143 +
   1.144 +    _external_suspend       = 0x20000000U, // thread is asked to self suspend
   1.145 +    _ext_suspended          = 0x40000000U, // thread has self-suspended
   1.146 +    _deopt_suspend          = 0x10000000U, // thread needs to self suspend for deopt
   1.147 +
   1.148 +    _has_async_exception    = 0x00000001U  // there is a pending async exception
   1.149 +  };
   1.150 +
   1.151 +  // various suspension related flags - atomically updated
   1.152 +  // overloaded for async exception checking in check_special_condition_for_native_trans.
   1.153 +  volatile uint32_t _suspend_flags;
   1.154 +
   1.155 + private:
   1.156 +  int _num_nested_signal;
   1.157 +
   1.158 + public:
   1.159 +  void enter_signal_handler() { _num_nested_signal++; }
   1.160 +  void leave_signal_handler() { _num_nested_signal--; }
   1.161 +  bool is_inside_signal_handler() const  { return _num_nested_signal > 0; }
   1.162 +
   1.163 + private:
   1.164 +  // Debug tracing
   1.165 +  static void trace(const char* msg, const Thread* const thread) PRODUCT_RETURN;
   1.166 +
   1.167 +  // Active_handles points to a block of handles
   1.168 +  JNIHandleBlock* _active_handles;
   1.169 +
   1.170 +  // One-element thread local free list
   1.171 +  JNIHandleBlock* _free_handle_block;
   1.172 +
   1.173 +  // Point to the last handle mark
   1.174 +  HandleMark* _last_handle_mark;
   1.175 +
   1.176 +  // The parity of the last strong_roots iteration in which this thread was
   1.177 +  // claimed as a task.
   1.178 +  jint _oops_do_parity;
   1.179 +
   1.180 +  public:
   1.181 +   void set_last_handle_mark(HandleMark* mark)   { _last_handle_mark = mark; }
   1.182 +    HandleMark* last_handle_mark() const          { return _last_handle_mark; }
   1.183 +  private:
   1.184 +
   1.185 +  // debug support for checking if code does allow safepoints or not
   1.186 +  // GC points in the VM can happen because of allocation, invoking a VM operation, or blocking on
   1.187 +  // mutex, or blocking on an object synchronizer (Java locking).
   1.188 +  // If !allow_safepoint(), then an assertion failure will happen in any of the above cases
   1.189 +  // If !allow_allocation(), then an assertion failure will happen during allocation
   1.190 +  // (Hence, !allow_safepoint() => !allow_allocation()).
   1.191 +  //
   1.192 +  // The two classes No_Safepoint_Verifier and No_Allocation_Verifier are used to set these counters.
   1.193 +  //
   1.194 +  NOT_PRODUCT(int _allow_safepoint_count;)       // If 0, thread allow a safepoint to happen
   1.195 +  debug_only (int _allow_allocation_count;)      // If 0, the thread is allowed to allocate oops.
   1.196 +
   1.197 +  // Record when GC is locked out via the GC_locker mechanism
   1.198 +  CHECK_UNHANDLED_OOPS_ONLY(int _gc_locked_out_count;)
   1.199 +
   1.200 +  friend class No_Alloc_Verifier;
   1.201 +  friend class No_Safepoint_Verifier;
   1.202 +  friend class Pause_No_Safepoint_Verifier;
   1.203 +  friend class ThreadLocalStorage;
   1.204 +  friend class GC_locker;
   1.205 +
   1.206 +  // In order for all threads to be able to use fast locking, we need to know the highest stack
   1.207 +  // address of where a lock is on the stack (stacks normally grow towards lower addresses). This
   1.208 +  // variable is initially set to NULL, indicating no locks are used by the thread. During the thread's
   1.209 +  // execution, it will be set whenever locking can happen, i.e., when we call out to Java code or use
   1.210 +  // an ObjectLocker. The value is never decreased, hence, it will over the lifetime of a thread
   1.211 +  // approximate the real stackbase.
   1.212 +  address _highest_lock;                         // Highest stack address where a JavaLock exist
   1.213 +
   1.214 +  ThreadLocalAllocBuffer _tlab;                  // Thread-local eden
   1.215 +
   1.216 +  int   _vm_operation_started_count;             // VM_Operation support
   1.217 +  int   _vm_operation_completed_count;           // VM_Operation support
   1.218 +
   1.219 +  ObjectMonitor* _current_pending_monitor;       // ObjectMonitor this thread
   1.220 +                                                 // is waiting to lock
   1.221 +  bool _current_pending_monitor_is_from_java;    // locking is from Java code
   1.222 +
   1.223 +  // ObjectMonitor on which this thread called Object.wait()
   1.224 +  ObjectMonitor* _current_waiting_monitor;
   1.225 +
   1.226 +  // Private thread-local objectmonitor list - a simple cache organized as a SLL.
   1.227 + public:
   1.228 +  ObjectMonitor * omFreeList ;
   1.229 +  int omFreeCount ;                             // length of omFreeList
   1.230 +  int omFreeProvision ;                         // reload chunk size
   1.231 +
   1.232 + public:
   1.233 +  enum {
   1.234 +    is_definitely_current_thread = true
   1.235 +  };
   1.236 +
   1.237 +  // Constructor
   1.238 +  Thread();
   1.239 +  virtual ~Thread();
   1.240 +
   1.241 +  // initializtion
   1.242 +  void initialize_thread_local_storage();
   1.243 +
   1.244 +  // thread entry point
   1.245 +  virtual void run();
   1.246 +
   1.247 +  // Testers
   1.248 +  virtual bool is_VM_thread()       const            { return false; }
   1.249 +  virtual bool is_Java_thread()     const            { return false; }
   1.250 +  // Remove this ifdef when C1 is ported to the compiler interface.
   1.251 +  virtual bool is_Compiler_thread() const            { return false; }
   1.252 +  virtual bool is_hidden_from_external_view() const  { return false; }
   1.253 +  virtual bool is_jvmti_agent_thread() const         { return false; }
   1.254 +  // True iff the thread can perform GC operations at a safepoint.
   1.255 +  // Generally will be true only of VM thread and parallel GC WorkGang
   1.256 +  // threads.
   1.257 +  virtual bool is_GC_task_thread() const             { return false; }
   1.258 +  virtual bool is_Watcher_thread() const             { return false; }
   1.259 +  virtual bool is_ConcurrentGC_thread() const        { return false; }
   1.260 +
   1.261 +  virtual char* name() const { return (char*)"Unknown thread"; }
   1.262 +
   1.263 +  // Returns the current thread
   1.264 +  static inline Thread* current();
   1.265 +
   1.266 +  // Common thread operations
   1.267 +  static void set_priority(Thread* thread, ThreadPriority priority);
   1.268 +  static ThreadPriority get_priority(const Thread* const thread);
   1.269 +  static void start(Thread* thread);
   1.270 +  static void interrupt(Thread* thr);
   1.271 +  static bool is_interrupted(Thread* thr, bool clear_interrupted);
   1.272 +
   1.273 +  Monitor* SR_lock() const                       { return _SR_lock; }
   1.274 +
   1.275 +  bool has_async_exception() const { return (_suspend_flags & _has_async_exception) != 0; }
   1.276 +
   1.277 +  void set_suspend_flag(SuspendFlags f) {
   1.278 +    assert(sizeof(jint) == sizeof(_suspend_flags), "size mismatch");
   1.279 +    uint32_t flags;
   1.280 +    do {
   1.281 +      flags = _suspend_flags;
   1.282 +    }
   1.283 +    while (Atomic::cmpxchg((jint)(flags | f),
   1.284 +                           (volatile jint*)&_suspend_flags,
   1.285 +                           (jint)flags) != (jint)flags);
   1.286 +  }
   1.287 +  void clear_suspend_flag(SuspendFlags f) {
   1.288 +    assert(sizeof(jint) == sizeof(_suspend_flags), "size mismatch");
   1.289 +    uint32_t flags;
   1.290 +    do {
   1.291 +      flags = _suspend_flags;
   1.292 +    }
   1.293 +    while (Atomic::cmpxchg((jint)(flags & ~f),
   1.294 +                           (volatile jint*)&_suspend_flags,
   1.295 +                           (jint)flags) != (jint)flags);
   1.296 +  }
   1.297 +
   1.298 +  void set_has_async_exception() {
   1.299 +    set_suspend_flag(_has_async_exception);
   1.300 +  }
   1.301 +  void clear_has_async_exception() {
   1.302 +    clear_suspend_flag(_has_async_exception);
   1.303 +  }
   1.304 +
   1.305 +  // Support for Unhandled Oop detection
   1.306 +#ifdef CHECK_UNHANDLED_OOPS
   1.307 + private:
   1.308 +  UnhandledOops *_unhandled_oops;
   1.309 + public:
   1.310 +  UnhandledOops* unhandled_oops()               { return _unhandled_oops; }
   1.311 +  // Mark oop safe for gc.  It may be stack allocated but won't move.
   1.312 +  void allow_unhandled_oop(oop *op)              {
   1.313 +    if (CheckUnhandledOops) unhandled_oops()->allow_unhandled_oop(op);
   1.314 +  }
   1.315 +  // Clear oops at safepoint so crashes point to unhandled oop violator
   1.316 +  void clear_unhandled_oops()                   {
   1.317 +    if (CheckUnhandledOops) unhandled_oops()->clear_unhandled_oops();
   1.318 +  }
   1.319 +  bool is_gc_locked_out() { return _gc_locked_out_count > 0; }
   1.320 +#endif // CHECK_UNHANDLED_OOPS
   1.321 +
   1.322 + public:
   1.323 +  // Installs a pending exception to be inserted later
   1.324 +  static void send_async_exception(oop thread_oop, oop java_throwable);
   1.325 +
   1.326 +  // Resource area
   1.327 +  ResourceArea* resource_area() const            { return _resource_area; }
   1.328 +  void set_resource_area(ResourceArea* area)     { _resource_area = area; }
   1.329 +
   1.330 +  OSThread* osthread() const                     { return _osthread;   }
   1.331 +  void set_osthread(OSThread* thread)            { _osthread = thread; }
   1.332 +
   1.333 +  // JNI handle support
   1.334 +  JNIHandleBlock* active_handles() const         { return _active_handles; }
   1.335 +  void set_active_handles(JNIHandleBlock* block) { _active_handles = block; }
   1.336 +  JNIHandleBlock* free_handle_block() const      { return _free_handle_block; }
   1.337 +  void set_free_handle_block(JNIHandleBlock* block) { _free_handle_block = block; }
   1.338 +
   1.339 +  // Internal handle support
   1.340 +  HandleArea* handle_area() const                { return _handle_area; }
   1.341 +  void set_handle_area(HandleArea* area)         { _handle_area = area; }
   1.342 +
   1.343 +  // Thread-Local Allocation Buffer (TLAB) support
   1.344 +  ThreadLocalAllocBuffer& tlab()                 { return _tlab; }
   1.345 +  void initialize_tlab() {
   1.346 +    if (UseTLAB) {
   1.347 +      tlab().initialize();
   1.348 +    }
   1.349 +  }
   1.350 +
   1.351 +  // VM operation support
   1.352 +  int vm_operation_ticket()                      { return ++_vm_operation_started_count; }
   1.353 +  int vm_operation_completed_count()             { return _vm_operation_completed_count; }
   1.354 +  void increment_vm_operation_completed_count()  { _vm_operation_completed_count++; }
   1.355 +
   1.356 +  // For tracking the heavyweight monitor the thread is pending on.
   1.357 +  ObjectMonitor* current_pending_monitor() {
   1.358 +    return _current_pending_monitor;
   1.359 +  }
   1.360 +  void set_current_pending_monitor(ObjectMonitor* monitor) {
   1.361 +    _current_pending_monitor = monitor;
   1.362 +  }
   1.363 +  void set_current_pending_monitor_is_from_java(bool from_java) {
   1.364 +    _current_pending_monitor_is_from_java = from_java;
   1.365 +  }
   1.366 +  bool current_pending_monitor_is_from_java() {
   1.367 +    return _current_pending_monitor_is_from_java;
   1.368 +  }
   1.369 +
   1.370 +  // For tracking the ObjectMonitor on which this thread called Object.wait()
   1.371 +  ObjectMonitor* current_waiting_monitor() {
   1.372 +    return _current_waiting_monitor;
   1.373 +  }
   1.374 +  void set_current_waiting_monitor(ObjectMonitor* monitor) {
   1.375 +    _current_waiting_monitor = monitor;
   1.376 +  }
   1.377 +
   1.378 +  // GC support
   1.379 +  // Apply "f->do_oop" to all root oops in "this".
   1.380 +  void oops_do(OopClosure* f);
   1.381 +
   1.382 +  // Handles the parallel case for the method below.
   1.383 +private:
   1.384 +  bool claim_oops_do_par_case(int collection_parity);
   1.385 +public:
   1.386 +  // Requires that "collection_parity" is that of the current strong roots
   1.387 +  // iteration.  If "is_par" is false, sets the parity of "this" to
   1.388 +  // "collection_parity", and returns "true".  If "is_par" is true,
   1.389 +  // uses an atomic instruction to set the current threads parity to
   1.390 +  // "collection_parity", if it is not already.  Returns "true" iff the
   1.391 +  // calling thread does the update, this indicates that the calling thread
   1.392 +  // has claimed the thread's stack as a root groop in the current
   1.393 +  // collection.
   1.394 +  bool claim_oops_do(bool is_par, int collection_parity) {
   1.395 +    if (!is_par) {
   1.396 +      _oops_do_parity = collection_parity;
   1.397 +      return true;
   1.398 +    } else {
   1.399 +      return claim_oops_do_par_case(collection_parity);
   1.400 +    }
   1.401 +  }
   1.402 +
   1.403 +  // Sweeper support
   1.404 +  void nmethods_do();
   1.405 +
   1.406 +  // Fast-locking support
   1.407 +  address highest_lock() const                   { return _highest_lock; }
   1.408 +  void update_highest_lock(address base)         { if (base > _highest_lock) _highest_lock = base; }
   1.409 +
   1.410 +  // Tells if adr belong to this thread. This is used
   1.411 +  // for checking if a lock is owned by the running thread.
   1.412 +  // Warning: the method can only be used on the running thread
   1.413 +  // Fast lock support uses these methods
   1.414 +  virtual bool lock_is_in_stack(address adr) const;
   1.415 +  virtual bool is_lock_owned(address adr) const;
   1.416 +
   1.417 +  // Check if address is in the stack of the thread (not just for locks).
   1.418 +  bool is_in_stack(address adr) const;
   1.419 +
   1.420 +  // Sets this thread as starting thread. Returns failure if thread
   1.421 +  // creation fails due to lack of memory, too many threads etc.
   1.422 +  bool set_as_starting_thread();
   1.423 +
   1.424 + protected:
   1.425 +  // OS data associated with the thread
   1.426 +  OSThread* _osthread;  // Platform-specific thread information
   1.427 +
   1.428 +  // Thread local resource area for temporary allocation within the VM
   1.429 +  ResourceArea* _resource_area;
   1.430 +
   1.431 +  // Thread local handle area for allocation of handles within the VM
   1.432 +  HandleArea* _handle_area;
   1.433 +
   1.434 +  // Support for stack overflow handling, get_thread, etc.
   1.435 +  address          _stack_base;
   1.436 +  size_t           _stack_size;
   1.437 +  uintptr_t        _self_raw_id;      // used by get_thread (mutable)
   1.438 +  int              _lgrp_id;
   1.439 +
   1.440 + public:
   1.441 +  // Stack overflow support
   1.442 +  address stack_base() const           { assert(_stack_base != NULL,"Sanity check"); return _stack_base; }
   1.443 +
   1.444 +  void    set_stack_base(address base) { _stack_base = base; }
   1.445 +  size_t  stack_size() const           { return _stack_size; }
   1.446 +  void    set_stack_size(size_t size)  { _stack_size = size; }
   1.447 +  void    record_stack_base_and_size();
   1.448 +
   1.449 +  int     lgrp_id() const                 { return _lgrp_id; }
   1.450 +  void    set_lgrp_id(int value)          { _lgrp_id = value; }
   1.451 +
   1.452 +  // Printing
   1.453 +  void print_on(outputStream* st) const;
   1.454 +  void print() const { print_on(tty); }
   1.455 +  virtual void print_on_error(outputStream* st, char* buf, int buflen) const;
   1.456 +
   1.457 +  // Debug-only code
   1.458 +
   1.459 +#ifdef ASSERT
   1.460 + private:
   1.461 +  // Deadlock detection support for Mutex locks. List of locks own by thread.
   1.462 +  Monitor *_owned_locks;
   1.463 +  // Mutex::set_owner_implementation is the only place where _owned_locks is modified,
   1.464 +  // thus the friendship
   1.465 +  friend class Mutex;
   1.466 +  friend class Monitor;
   1.467 +
   1.468 + public:
   1.469 +  void print_owned_locks_on(outputStream* st) const;
   1.470 +  void print_owned_locks() const                 { print_owned_locks_on(tty);    }
   1.471 +  Monitor * owned_locks() const                  { return _owned_locks;          }
   1.472 +  bool owns_locks() const                        { return owned_locks() != NULL; }
   1.473 +  bool owns_locks_but_compiled_lock() const;
   1.474 +
   1.475 +  // Deadlock detection
   1.476 +  bool allow_allocation()                        { return _allow_allocation_count == 0; }
   1.477 +#endif
   1.478 +
   1.479 +  void check_for_valid_safepoint_state(bool potential_vm_operation) PRODUCT_RETURN;
   1.480 +
   1.481 + private:
   1.482 +  volatile int _jvmti_env_iteration_count;
   1.483 +
   1.484 + public:
   1.485 +  void entering_jvmti_env_iteration()            { ++_jvmti_env_iteration_count; }
   1.486 +  void leaving_jvmti_env_iteration()             { --_jvmti_env_iteration_count; }
   1.487 +  bool is_inside_jvmti_env_iteration()           { return _jvmti_env_iteration_count > 0; }
   1.488 +
   1.489 +  // Code generation
   1.490 +  static ByteSize exception_file_offset()        { return byte_offset_of(Thread, _exception_file   ); }
   1.491 +  static ByteSize exception_line_offset()        { return byte_offset_of(Thread, _exception_line   ); }
   1.492 +  static ByteSize active_handles_offset()        { return byte_offset_of(Thread, _active_handles   ); }
   1.493 +
   1.494 +  static ByteSize stack_base_offset()            { return byte_offset_of(Thread, _stack_base ); }
   1.495 +  static ByteSize stack_size_offset()            { return byte_offset_of(Thread, _stack_size ); }
   1.496 +  static ByteSize omFreeList_offset()            { return byte_offset_of(Thread, omFreeList); }
   1.497 +
   1.498 +#define TLAB_FIELD_OFFSET(name) \
   1.499 +  static ByteSize tlab_##name##_offset()            { return byte_offset_of(Thread, _tlab) + ThreadLocalAllocBuffer::name##_offset(); }
   1.500 +
   1.501 +  TLAB_FIELD_OFFSET(start)
   1.502 +  TLAB_FIELD_OFFSET(end)
   1.503 +  TLAB_FIELD_OFFSET(top)
   1.504 +  TLAB_FIELD_OFFSET(pf_top)
   1.505 +  TLAB_FIELD_OFFSET(size)                   // desired_size
   1.506 +  TLAB_FIELD_OFFSET(refill_waste_limit)
   1.507 +  TLAB_FIELD_OFFSET(number_of_refills)
   1.508 +  TLAB_FIELD_OFFSET(fast_refill_waste)
   1.509 +  TLAB_FIELD_OFFSET(slow_allocations)
   1.510 +
   1.511 +#undef TLAB_FIELD_OFFSET
   1.512 +
   1.513 + public:
   1.514 +  volatile intptr_t _Stalled ;
   1.515 +  volatile int _TypeTag ;
   1.516 +  ParkEvent * _ParkEvent ;                     // for synchronized()
   1.517 +  ParkEvent * _SleepEvent ;                    // for Thread.sleep
   1.518 +  ParkEvent * _MutexEvent ;                    // for native internal Mutex/Monitor
   1.519 +  ParkEvent * _MuxEvent ;                      // for low-level muxAcquire-muxRelease
   1.520 +  int NativeSyncRecursion ;                    // diagnostic
   1.521 +
   1.522 +  volatile int _OnTrap ;                       // Resume-at IP delta
   1.523 +  jint _hashStateW ;                           // Marsaglia Shift-XOR thread-local RNG
   1.524 +  jint _hashStateX ;                           // thread-specific hashCode generator state
   1.525 +  jint _hashStateY ;
   1.526 +  jint _hashStateZ ;
   1.527 +  void * _schedctl ;
   1.528 +
   1.529 +  intptr_t _ScratchA, _ScratchB ;              // Scratch locations for fast-path sync code
   1.530 +  static ByteSize ScratchA_offset()            { return byte_offset_of(Thread, _ScratchA ); }
   1.531 +  static ByteSize ScratchB_offset()            { return byte_offset_of(Thread, _ScratchB ); }
   1.532 +
   1.533 +  volatile jint rng [4] ;                      // RNG for spin loop
   1.534 +
   1.535 +  // Low-level leaf-lock primitives used to implement synchronization
   1.536 +  // and native monitor-mutex infrastructure.
   1.537 +  // Not for general synchronization use.
   1.538 +  static void SpinAcquire (volatile int * Lock, const char * Name) ;
   1.539 +  static void SpinRelease (volatile int * Lock) ;
   1.540 +  static void muxAcquire  (volatile intptr_t * Lock, const char * Name) ;
   1.541 +  static void muxAcquireW (volatile intptr_t * Lock, ParkEvent * ev) ;
   1.542 +  static void muxRelease  (volatile intptr_t * Lock) ;
   1.543 +
   1.544 +};
   1.545 +
   1.546 +// Inline implementation of Thread::current()
   1.547 +// Thread::current is "hot" it's called > 128K times in the 1st 500 msecs of
   1.548 +// startup.
   1.549 +// ThreadLocalStorage::thread is warm -- it's called > 16K times in the same
   1.550 +// period.   This is inlined in thread_<os_family>.inline.hpp.
   1.551 +
   1.552 +inline Thread* Thread::current() {
   1.553 +#ifdef ASSERT
   1.554 +// This function is very high traffic. Define PARANOID to enable expensive
   1.555 +// asserts.
   1.556 +#ifdef PARANOID
   1.557 +  // Signal handler should call ThreadLocalStorage::get_thread_slow()
   1.558 +  Thread* t = ThreadLocalStorage::get_thread_slow();
   1.559 +  assert(t != NULL && !t->is_inside_signal_handler(),
   1.560 +         "Don't use Thread::current() inside signal handler");
   1.561 +#endif
   1.562 +#endif
   1.563 +  Thread* thread = ThreadLocalStorage::thread();
   1.564 +  assert(thread != NULL, "just checking");
   1.565 +  return thread;
   1.566 +}
   1.567 +
   1.568 +// Name support for threads.  non-JavaThread subclasses with multiple
   1.569 +// uniquely named instances should derive from this.
   1.570 +class NamedThread: public Thread {
   1.571 +  friend class VMStructs;
   1.572 +  enum {
   1.573 +    max_name_len = 64
   1.574 +  };
   1.575 + private:
   1.576 +  char* _name;
   1.577 + public:
   1.578 +  NamedThread();
   1.579 +  ~NamedThread();
   1.580 +  // May only be called once per thread.
   1.581 +  void set_name(const char* format, ...);
   1.582 +  virtual char* name() const { return _name == NULL ? (char*)"Unknown Thread" : _name; }
   1.583 +};
   1.584 +
   1.585 +// Worker threads are named and have an id of an assigned work.
   1.586 +class WorkerThread: public NamedThread {
   1.587 +private:
   1.588 +  uint _id;
   1.589 +public:
   1.590 +  WorkerThread() : _id(0) { }
   1.591 +  void set_id(uint work_id) { _id = work_id; }
   1.592 +  uint id() const { return _id; }
   1.593 +};
   1.594 +
   1.595 +// A single WatcherThread is used for simulating timer interrupts.
   1.596 +class WatcherThread: public Thread {
   1.597 +  friend class VMStructs;
   1.598 + public:
   1.599 +  virtual void run();
   1.600 +
   1.601 + private:
   1.602 +  static WatcherThread* _watcher_thread;
   1.603 +
   1.604 +  static bool _should_terminate;
   1.605 + public:
   1.606 +  enum SomeConstants {
   1.607 +    delay_interval = 10                          // interrupt delay in milliseconds
   1.608 +  };
   1.609 +
   1.610 +  // Constructor
   1.611 +  WatcherThread();
   1.612 +
   1.613 +  // Tester
   1.614 +  bool is_Watcher_thread() const                 { return true; }
   1.615 +
   1.616 +  // Printing
   1.617 +  char* name() const { return (char*)"VM Periodic Task Thread"; }
   1.618 +  void print_on(outputStream* st) const;
   1.619 +  void print() const { print_on(tty); }
   1.620 +
   1.621 +  // Returns the single instance of WatcherThread
   1.622 +  static WatcherThread* watcher_thread()         { return _watcher_thread; }
   1.623 +
   1.624 +  // Create and start the single instance of WatcherThread, or stop it on shutdown
   1.625 +  static void start();
   1.626 +  static void stop();
   1.627 +};
   1.628 +
   1.629 +
   1.630 +class CompilerThread;
   1.631 +
   1.632 +typedef void (*ThreadFunction)(JavaThread*, TRAPS);
   1.633 +
   1.634 +class JavaThread: public Thread {
   1.635 +  friend class VMStructs;
   1.636 + private:
   1.637 +  JavaThread*    _next;                          // The next thread in the Threads list
   1.638 +  oop            _threadObj;                     // The Java level thread object
   1.639 +
   1.640 +#ifdef ASSERT
   1.641 + private:
   1.642 +  int _java_call_counter;
   1.643 +
   1.644 + public:
   1.645 +  int  java_call_counter()                       { return _java_call_counter; }
   1.646 +  void inc_java_call_counter()                   { _java_call_counter++; }
   1.647 +  void dec_java_call_counter() {
   1.648 +    assert(_java_call_counter > 0, "Invalid nesting of JavaCallWrapper");
   1.649 +    _java_call_counter--;
   1.650 +  }
   1.651 + private:  // restore original namespace restriction
   1.652 +#endif  // ifdef ASSERT
   1.653 +
   1.654 +#ifndef PRODUCT
   1.655 + public:
   1.656 +  enum {
   1.657 +    jump_ring_buffer_size = 16
   1.658 +  };
   1.659 + private:  // restore original namespace restriction
   1.660 +#endif
   1.661 +
   1.662 +  JavaFrameAnchor _anchor;                       // Encapsulation of current java frame and it state
   1.663 +
   1.664 +  ThreadFunction _entry_point;
   1.665 +
   1.666 +  JNIEnv        _jni_environment;
   1.667 +
   1.668 +  // Deopt support
   1.669 +  DeoptResourceMark*  _deopt_mark;               // Holds special ResourceMark for deoptimization
   1.670 +
   1.671 +  intptr_t*      _must_deopt_id;                 // id of frame that needs to be deopted once we
   1.672 +                                                 // transition out of native
   1.673 +
   1.674 +  vframeArray*  _vframe_array_head;              // Holds the heap of the active vframeArrays
   1.675 +  vframeArray*  _vframe_array_last;              // Holds last vFrameArray we popped
   1.676 +  // Because deoptimization is lazy we must save jvmti requests to set locals
   1.677 +  // in compiled frames until we deoptimize and we have an interpreter frame.
   1.678 +  // This holds the pointer to array (yeah like there might be more than one) of
   1.679 +  // description of compiled vframes that have locals that need to be updated.
   1.680 +  GrowableArray<jvmtiDeferredLocalVariableSet*>* _deferred_locals_updates;
   1.681 +
   1.682 +  // Handshake value for fixing 6243940. We need a place for the i2c
   1.683 +  // adapter to store the callee methodOop. This value is NEVER live
   1.684 +  // across a gc point so it does NOT have to be gc'd
   1.685 +  // The handshake is open ended since we can't be certain that it will
   1.686 +  // be NULLed. This is because we rarely ever see the race and end up
   1.687 +  // in handle_wrong_method which is the backend of the handshake. See
   1.688 +  // code in i2c adapters and handle_wrong_method.
   1.689 +
   1.690 +  methodOop     _callee_target;
   1.691 +
   1.692 +  // Oop results of VM runtime calls
   1.693 +  oop           _vm_result;                      // Used to pass back an oop result into Java code, GC-preserved
   1.694 +  oop           _vm_result_2;                    // Used to pass back an oop result into Java code, GC-preserved
   1.695 +
   1.696 +  MonitorChunk* _monitor_chunks;                 // Contains the off stack monitors
   1.697 +                                                 // allocated during deoptimization
   1.698 +                                                 // and by JNI_MonitorEnter/Exit
   1.699 +
   1.700 +  // Async. requests support
   1.701 +  enum AsyncRequests {
   1.702 +    _no_async_condition = 0,
   1.703 +    _async_exception,
   1.704 +    _async_unsafe_access_error
   1.705 +  };
   1.706 +  AsyncRequests _special_runtime_exit_condition; // Enum indicating pending async. request
   1.707 +  oop           _pending_async_exception;
   1.708 +
   1.709 +  // Safepoint support
   1.710 + public:                                         // Expose _thread_state for SafeFetchInt()
   1.711 +  volatile JavaThreadState _thread_state;
   1.712 + private:
   1.713 +  ThreadSafepointState *_safepoint_state;        // Holds information about a thread during a safepoint
   1.714 +  address               _saved_exception_pc;     // Saved pc of instruction where last implicit exception happened
   1.715 +
   1.716 +  // JavaThread termination support
   1.717 +  enum TerminatedTypes {
   1.718 +    _not_terminated = 0xDEAD - 2,
   1.719 +    _thread_exiting,                             // JavaThread::exit() has been called for this thread
   1.720 +    _thread_terminated,                          // JavaThread is removed from thread list
   1.721 +    _vm_exited                                   // JavaThread is still executing native code, but VM is terminated
   1.722 +                                                 // only VM_Exit can set _vm_exited
   1.723 +  };
   1.724 +
   1.725 +  // In general a JavaThread's _terminated field transitions as follows:
   1.726 +  //
   1.727 +  //   _not_terminated => _thread_exiting => _thread_terminated
   1.728 +  //
   1.729 +  // _vm_exited is a special value to cover the case of a JavaThread
   1.730 +  // executing native code after the VM itself is terminated.
   1.731 +  TerminatedTypes       _terminated;
   1.732 +  // suspend/resume support
   1.733 +  volatile bool         _suspend_equivalent;     // Suspend equivalent condition
   1.734 +  jint                  _in_deopt_handler;       // count of deoptimization
   1.735 +                                                 // handlers thread is in
   1.736 +  volatile bool         _doing_unsafe_access;    // Thread may fault due to unsafe access
   1.737 +  bool                  _do_not_unlock_if_synchronized; // Do not unlock the receiver of a synchronized method (since it was
   1.738 +                                                 // never locked) when throwing an exception. Used by interpreter only.
   1.739 +
   1.740 +  //  Flag to mark a JNI thread in the process of attaching - See CR 6404306
   1.741 +  //  This flag is never set true other than at construction, and in that case
   1.742 +  //  is shortly thereafter set false
   1.743 +  volatile bool _is_attaching;
   1.744 +
   1.745 + public:
   1.746 +  // State of the stack guard pages for this thread.
   1.747 +  enum StackGuardState {
   1.748 +    stack_guard_unused,         // not needed
   1.749 +    stack_guard_yellow_disabled,// disabled (temporarily) after stack overflow
   1.750 +    stack_guard_enabled         // enabled
   1.751 +  };
   1.752 +
   1.753 + private:
   1.754 +
   1.755 +  StackGuardState        _stack_guard_state;
   1.756 +
   1.757 +  // Compiler exception handling (NOTE: The _exception_oop is *NOT* the same as _pending_exception. It is
   1.758 +  // used to temp. parsing values into and out of the runtime system during exception handling for compiled
   1.759 +  // code)
   1.760 +  volatile oop     _exception_oop;               // Exception thrown in compiled code
   1.761 +  volatile address _exception_pc;                // PC where exception happened
   1.762 +  volatile address _exception_handler_pc;        // PC for handler of exception
   1.763 +  volatile int     _exception_stack_size;        // Size of frame where exception happened
   1.764 +
   1.765 +  // support for compilation
   1.766 +  bool    _is_compiling;                         // is true if a compilation is active inthis thread (one compilation per thread possible)
   1.767 +
   1.768 +  // support for JNI critical regions
   1.769 +  jint    _jni_active_critical;                  // count of entries into JNI critical region
   1.770 +
   1.771 +  // For deadlock detection.
   1.772 +  int _depth_first_number;
   1.773 +
   1.774 +  // JVMTI PopFrame support
   1.775 +  // This is set to popframe_pending to signal that top Java frame should be popped immediately
   1.776 +  int _popframe_condition;
   1.777 +
   1.778 +#ifndef PRODUCT
   1.779 +  int _jmp_ring_index;
   1.780 +  struct {
   1.781 +      // We use intptr_t instead of address so debugger doesn't try and display strings
   1.782 +      intptr_t _target;
   1.783 +      intptr_t _instruction;
   1.784 +      const char*  _file;
   1.785 +      int _line;
   1.786 +  }   _jmp_ring[ jump_ring_buffer_size ];
   1.787 +#endif /* PRODUCT */
   1.788 +
   1.789 +  friend class VMThread;
   1.790 +  friend class ThreadWaitTransition;
   1.791 +  friend class VM_Exit;
   1.792 +
   1.793 +  void initialize();                             // Initialized the instance variables
   1.794 +
   1.795 + public:
   1.796 +  // Constructor
   1.797 +  JavaThread(bool is_attaching = false); // for main thread and JNI attached threads
   1.798 +  JavaThread(ThreadFunction entry_point, size_t stack_size = 0);
   1.799 +  ~JavaThread();
   1.800 +
   1.801 +#ifdef ASSERT
   1.802 +  // verify this JavaThread hasn't be published in the Threads::list yet
   1.803 +  void verify_not_published();
   1.804 +#endif
   1.805 +
   1.806 +  //JNI functiontable getter/setter for JVMTI jni function table interception API.
   1.807 +  void set_jni_functions(struct JNINativeInterface_* functionTable) {
   1.808 +    _jni_environment.functions = functionTable;
   1.809 +  }
   1.810 +  struct JNINativeInterface_* get_jni_functions() {
   1.811 +    return (struct JNINativeInterface_ *)_jni_environment.functions;
   1.812 +  }
   1.813 +
   1.814 +  // Executes Shutdown.shutdown()
   1.815 +  void invoke_shutdown_hooks();
   1.816 +
   1.817 +  // Cleanup on thread exit
   1.818 +  enum ExitType {
   1.819 +    normal_exit,
   1.820 +    jni_detach
   1.821 +  };
   1.822 +  void exit(bool destroy_vm, ExitType exit_type = normal_exit);
   1.823 +
   1.824 +  void cleanup_failed_attach_current_thread();
   1.825 +
   1.826 +  // Testers
   1.827 +  virtual bool is_Java_thread() const            { return true;  }
   1.828 +
   1.829 +  // compilation
   1.830 +  void set_is_compiling(bool f)                  { _is_compiling = f; }
   1.831 +  bool is_compiling() const                      { return _is_compiling; }
   1.832 +
   1.833 +  // Thread chain operations
   1.834 +  JavaThread* next() const                       { return _next; }
   1.835 +  void set_next(JavaThread* p)                   { _next = p; }
   1.836 +
   1.837 +  // Thread oop. threadObj() can be NULL for initial JavaThread
   1.838 +  // (or for threads attached via JNI)
   1.839 +  oop threadObj() const                          { return _threadObj; }
   1.840 +  void set_threadObj(oop p)                      { _threadObj = p; }
   1.841 +
   1.842 +  ThreadPriority java_priority() const;          // Read from threadObj()
   1.843 +
   1.844 +  // Prepare thread and add to priority queue.  If a priority is
   1.845 +  // not specified, use the priority of the thread object. Threads_lock
   1.846 +  // must be held while this function is called.
   1.847 +  void prepare(jobject jni_thread, ThreadPriority prio=NoPriority);
   1.848 +
   1.849 +  void set_saved_exception_pc(address pc)        { _saved_exception_pc = pc; }
   1.850 +  address saved_exception_pc()                   { return _saved_exception_pc; }
   1.851 +
   1.852 +
   1.853 +  ThreadFunction entry_point() const             { return _entry_point; }
   1.854 +
   1.855 +  // Allocates a new Java level thread object for this thread. thread_name may be NULL.
   1.856 +  void allocate_threadObj(Handle thread_group, char* thread_name, bool daemon, TRAPS);
   1.857 +
   1.858 +  // Last frame anchor routines
   1.859 +
   1.860 +  JavaFrameAnchor* frame_anchor(void)                { return &_anchor; }
   1.861 +
   1.862 +  // last_Java_sp
   1.863 +  bool has_last_Java_frame() const                   { return _anchor.has_last_Java_frame(); }
   1.864 +  intptr_t* last_Java_sp() const                     { return _anchor.last_Java_sp(); }
   1.865 +
   1.866 +  // last_Java_pc
   1.867 +
   1.868 +  address last_Java_pc(void)                         { return _anchor.last_Java_pc(); }
   1.869 +
   1.870 +  // Safepoint support
   1.871 +  JavaThreadState thread_state() const           { return _thread_state; }
   1.872 +  void set_thread_state(JavaThreadState s)       { _thread_state=s;      }
   1.873 +  ThreadSafepointState *safepoint_state() const  { return _safepoint_state;  }
   1.874 +  void set_safepoint_state(ThreadSafepointState *state) { _safepoint_state = state; }
   1.875 +  bool is_at_poll_safepoint()                    { return _safepoint_state->is_at_poll_safepoint(); }
   1.876 +
   1.877 +  // thread has called JavaThread::exit() or is terminated
   1.878 +  bool is_exiting()                              { return _terminated == _thread_exiting || is_terminated(); }
   1.879 +  // thread is terminated (no longer on the threads list); we compare
   1.880 +  // against the two non-terminated values so that a freed JavaThread
   1.881 +  // will also be considered terminated.
   1.882 +  bool is_terminated()                           { return _terminated != _not_terminated && _terminated != _thread_exiting; }
   1.883 +  void set_terminated(TerminatedTypes t)         { _terminated = t; }
   1.884 +  // special for Threads::remove() which is static:
   1.885 +  void set_terminated_value()                    { _terminated = _thread_terminated; }
   1.886 +  void block_if_vm_exited();
   1.887 +
   1.888 +  bool doing_unsafe_access()                     { return _doing_unsafe_access; }
   1.889 +  void set_doing_unsafe_access(bool val)         { _doing_unsafe_access = val; }
   1.890 +
   1.891 +  bool do_not_unlock_if_synchronized()             { return _do_not_unlock_if_synchronized; }
   1.892 +  void set_do_not_unlock_if_synchronized(bool val) { _do_not_unlock_if_synchronized = val; }
   1.893 +
   1.894 +
   1.895 +  // Suspend/resume support for JavaThread
   1.896 +
   1.897 + private:
   1.898 +  void set_ext_suspended()       { set_suspend_flag (_ext_suspended);  }
   1.899 +  void clear_ext_suspended()     { clear_suspend_flag(_ext_suspended); }
   1.900 +
   1.901 + public:
   1.902 +  void java_suspend();
   1.903 +  void java_resume();
   1.904 +  int  java_suspend_self();
   1.905 +
   1.906 +  void check_and_wait_while_suspended() {
   1.907 +    assert(JavaThread::current() == this, "sanity check");
   1.908 +
   1.909 +    bool do_self_suspend;
   1.910 +    do {
   1.911 +      // were we externally suspended while we were waiting?
   1.912 +      do_self_suspend = handle_special_suspend_equivalent_condition();
   1.913 +      if (do_self_suspend) {
   1.914 +        // don't surprise the thread that suspended us by returning
   1.915 +        java_suspend_self();
   1.916 +        set_suspend_equivalent();
   1.917 +      }
   1.918 +    } while (do_self_suspend);
   1.919 +  }
   1.920 +  static void check_safepoint_and_suspend_for_native_trans(JavaThread *thread);
   1.921 +  // Check for async exception in addition to safepoint and suspend request.
   1.922 +  static void check_special_condition_for_native_trans(JavaThread *thread);
   1.923 +
   1.924 +  bool is_ext_suspend_completed(bool called_by_wait, int delay, uint32_t *bits);
   1.925 +  bool is_ext_suspend_completed_with_lock(uint32_t *bits) {
   1.926 +    MutexLockerEx ml(SR_lock(), Mutex::_no_safepoint_check_flag);
   1.927 +    // Warning: is_ext_suspend_completed() may temporarily drop the
   1.928 +    // SR_lock to allow the thread to reach a stable thread state if
   1.929 +    // it is currently in a transient thread state.
   1.930 +    return is_ext_suspend_completed(false /*!called_by_wait */,
   1.931 +                                    SuspendRetryDelay, bits);
   1.932 +  }
   1.933 +
   1.934 +  // We cannot allow wait_for_ext_suspend_completion() to run forever or
   1.935 +  // we could hang. SuspendRetryCount and SuspendRetryDelay are normally
   1.936 +  // passed as the count and delay parameters. Experiments with specific
   1.937 +  // calls to wait_for_ext_suspend_completion() can be done by passing
   1.938 +  // other values in the code. Experiments with all calls can be done
   1.939 +  // via the appropriate -XX options.
   1.940 +  bool wait_for_ext_suspend_completion(int count, int delay, uint32_t *bits);
   1.941 +
   1.942 +  void set_external_suspend()     { set_suspend_flag  (_external_suspend); }
   1.943 +  void clear_external_suspend()   { clear_suspend_flag(_external_suspend); }
   1.944 +
   1.945 +  void set_deopt_suspend()        { set_suspend_flag  (_deopt_suspend); }
   1.946 +  void clear_deopt_suspend()      { clear_suspend_flag(_deopt_suspend); }
   1.947 +  bool is_deopt_suspend()         { return (_suspend_flags & _deopt_suspend) != 0; }
   1.948 +
   1.949 +  bool is_external_suspend() const {
   1.950 +    return (_suspend_flags & _external_suspend) != 0;
   1.951 +  }
   1.952 +  // Whenever a thread transitions from native to vm/java it must suspend
   1.953 +  // if external|deopt suspend is present.
   1.954 +  bool is_suspend_after_native() const {
   1.955 +    return (_suspend_flags & (_external_suspend | _deopt_suspend) ) != 0;
   1.956 +  }
   1.957 +
   1.958 +  // external suspend request is completed
   1.959 +  bool is_ext_suspended() const {
   1.960 +    return (_suspend_flags & _ext_suspended) != 0;
   1.961 +  }
   1.962 +
   1.963 +  // legacy method that checked for either external suspension or vm suspension
   1.964 +  bool is_any_suspended() const {
   1.965 +    return is_ext_suspended();
   1.966 +  }
   1.967 +
   1.968 +  bool is_external_suspend_with_lock() const {
   1.969 +    MutexLockerEx ml(SR_lock(), Mutex::_no_safepoint_check_flag);
   1.970 +    return is_external_suspend();
   1.971 +  }
   1.972 +
   1.973 +  // Special method to handle a pending external suspend request
   1.974 +  // when a suspend equivalent condition lifts.
   1.975 +  bool handle_special_suspend_equivalent_condition() {
   1.976 +    assert(is_suspend_equivalent(),
   1.977 +      "should only be called in a suspend equivalence condition");
   1.978 +    MutexLockerEx ml(SR_lock(), Mutex::_no_safepoint_check_flag);
   1.979 +    bool ret = is_external_suspend();
   1.980 +    if (!ret) {
   1.981 +      // not about to self-suspend so clear suspend equivalence
   1.982 +      clear_suspend_equivalent();
   1.983 +    }
   1.984 +    // implied else:
   1.985 +    // We have a pending external suspend request so we leave the
   1.986 +    // suspend_equivalent flag set until java_suspend_self() sets
   1.987 +    // the ext_suspended flag and clears the suspend_equivalent
   1.988 +    // flag. This insures that wait_for_ext_suspend_completion()
   1.989 +    // will return consistent values.
   1.990 +    return ret;
   1.991 +  }
   1.992 +
   1.993 +  bool is_any_suspended_with_lock() const {
   1.994 +    MutexLockerEx ml(SR_lock(), Mutex::_no_safepoint_check_flag);
   1.995 +    return is_any_suspended();
   1.996 +  }
   1.997 +  // utility methods to see if we are doing some kind of suspension
   1.998 +  bool is_being_ext_suspended() const            {
   1.999 +    MutexLockerEx ml(SR_lock(), Mutex::_no_safepoint_check_flag);
  1.1000 +    return is_ext_suspended() || is_external_suspend();
  1.1001 +  }
  1.1002 +
  1.1003 +  bool is_suspend_equivalent() const             { return _suspend_equivalent; }
  1.1004 +
  1.1005 +  void set_suspend_equivalent()                  { _suspend_equivalent = true; };
  1.1006 +  void clear_suspend_equivalent()                { _suspend_equivalent = false; };
  1.1007 +
  1.1008 +  // Thread.stop support
  1.1009 +  void send_thread_stop(oop throwable);
  1.1010 +  AsyncRequests clear_special_runtime_exit_condition() {
  1.1011 +    AsyncRequests x = _special_runtime_exit_condition;
  1.1012 +    _special_runtime_exit_condition = _no_async_condition;
  1.1013 +    return x;
  1.1014 +  }
  1.1015 +
  1.1016 +  // Are any async conditions present?
  1.1017 +  bool has_async_condition() { return (_special_runtime_exit_condition != _no_async_condition); }
  1.1018 +
  1.1019 +  void check_and_handle_async_exceptions(bool check_unsafe_error = true);
  1.1020 +
  1.1021 +  // these next two are also used for self-suspension and async exception support
  1.1022 +  void handle_special_runtime_exit_condition(bool check_asyncs = true);
  1.1023 +
  1.1024 +  // Return true if JavaThread has an asynchronous condition or
  1.1025 +  // if external suspension is requested.
  1.1026 +  bool has_special_runtime_exit_condition() {
  1.1027 +    // We call is_external_suspend() last since external suspend should
  1.1028 +    // be less common. Because we don't use is_external_suspend_with_lock
  1.1029 +    // it is possible that we won't see an asynchronous external suspend
  1.1030 +    // request that has just gotten started, i.e., SR_lock grabbed but
  1.1031 +    // _external_suspend field change either not made yet or not visible
  1.1032 +    // yet. However, this is okay because the request is asynchronous and
  1.1033 +    // we will see the new flag value the next time through. It's also
  1.1034 +    // possible that the external suspend request is dropped after
  1.1035 +    // we have checked is_external_suspend(), we will recheck its value
  1.1036 +    // under SR_lock in java_suspend_self().
  1.1037 +    return (_special_runtime_exit_condition != _no_async_condition) ||
  1.1038 +            is_external_suspend() || is_deopt_suspend();
  1.1039 +  }
  1.1040 +
  1.1041 +  void set_pending_unsafe_access_error()          { _special_runtime_exit_condition = _async_unsafe_access_error; }
  1.1042 +
  1.1043 +  void set_pending_async_exception(oop e) {
  1.1044 +    _pending_async_exception = e;
  1.1045 +    _special_runtime_exit_condition = _async_exception;
  1.1046 +    set_has_async_exception();
  1.1047 +  }
  1.1048 +
  1.1049 +  // Fast-locking support
  1.1050 +  bool is_lock_owned(address adr) const;
  1.1051 +
  1.1052 +  // Accessors for vframe array top
  1.1053 +  // The linked list of vframe arrays are sorted on sp. This means when we
  1.1054 +  // unpack the head must contain the vframe array to unpack.
  1.1055 +  void set_vframe_array_head(vframeArray* value) { _vframe_array_head = value; }
  1.1056 +  vframeArray* vframe_array_head() const         { return _vframe_array_head;  }
  1.1057 +
  1.1058 +  // Side structure for defering update of java frame locals until deopt occurs
  1.1059 +  GrowableArray<jvmtiDeferredLocalVariableSet*>* deferred_locals() const { return _deferred_locals_updates; }
  1.1060 +  void set_deferred_locals(GrowableArray<jvmtiDeferredLocalVariableSet *>* vf) { _deferred_locals_updates = vf; }
  1.1061 +
  1.1062 +  // These only really exist to make debugging deopt problems simpler
  1.1063 +
  1.1064 +  void set_vframe_array_last(vframeArray* value) { _vframe_array_last = value; }
  1.1065 +  vframeArray* vframe_array_last() const         { return _vframe_array_last;  }
  1.1066 +
  1.1067 +  // The special resourceMark used during deoptimization
  1.1068 +
  1.1069 +  void set_deopt_mark(DeoptResourceMark* value)  { _deopt_mark = value; }
  1.1070 +  DeoptResourceMark* deopt_mark(void)            { return _deopt_mark; }
  1.1071 +
  1.1072 +  intptr_t* must_deopt_id()                      { return _must_deopt_id; }
  1.1073 +  void     set_must_deopt_id(intptr_t* id)       { _must_deopt_id = id; }
  1.1074 +  void     clear_must_deopt_id()                 { _must_deopt_id = NULL; }
  1.1075 +
  1.1076 +  methodOop  callee_target() const               { return _callee_target; }
  1.1077 +  void set_callee_target  (methodOop x)          { _callee_target   = x; }
  1.1078 +
  1.1079 +  // Oop results of vm runtime calls
  1.1080 +  oop  vm_result() const                         { return _vm_result; }
  1.1081 +  void set_vm_result  (oop x)                    { _vm_result   = x; }
  1.1082 +
  1.1083 +  oop  vm_result_2() const                       { return _vm_result_2; }
  1.1084 +  void set_vm_result_2  (oop x)                  { _vm_result_2   = x; }
  1.1085 +
  1.1086 +  // Exception handling for compiled methods
  1.1087 +  oop      exception_oop() const                 { return _exception_oop; }
  1.1088 +  int      exception_stack_size() const          { return _exception_stack_size; }
  1.1089 +  address  exception_pc() const                  { return _exception_pc; }
  1.1090 +  address  exception_handler_pc() const          { return _exception_handler_pc; }
  1.1091 +
  1.1092 +  void set_exception_oop(oop o)                  { _exception_oop = o; }
  1.1093 +  void set_exception_pc(address a)               { _exception_pc = a; }
  1.1094 +  void set_exception_handler_pc(address a)       { _exception_handler_pc = a; }
  1.1095 +  void set_exception_stack_size(int size)        { _exception_stack_size = size; }
  1.1096 +
  1.1097 +  // Stack overflow support
  1.1098 +  inline size_t stack_available(address cur_sp);
  1.1099 +  address stack_yellow_zone_base()
  1.1100 +    { return (address)(stack_base() - (stack_size() - (stack_red_zone_size() + stack_yellow_zone_size()))); }
  1.1101 +  size_t  stack_yellow_zone_size()
  1.1102 +    { return StackYellowPages * os::vm_page_size(); }
  1.1103 +  address stack_red_zone_base()
  1.1104 +    { return (address)(stack_base() - (stack_size() - stack_red_zone_size())); }
  1.1105 +  size_t stack_red_zone_size()
  1.1106 +    { return StackRedPages * os::vm_page_size(); }
  1.1107 +  bool in_stack_yellow_zone(address a)
  1.1108 +    { return (a <= stack_yellow_zone_base()) && (a >= stack_red_zone_base()); }
  1.1109 +  bool in_stack_red_zone(address a)
  1.1110 +    { return (a <= stack_red_zone_base()) && (a >= (address)((intptr_t)stack_base() - stack_size())); }
  1.1111 +
  1.1112 +  void create_stack_guard_pages();
  1.1113 +  void remove_stack_guard_pages();
  1.1114 +
  1.1115 +  void enable_stack_yellow_zone();
  1.1116 +  void disable_stack_yellow_zone();
  1.1117 +  void enable_stack_red_zone();
  1.1118 +  void disable_stack_red_zone();
  1.1119 +
  1.1120 +  inline bool stack_yellow_zone_disabled();
  1.1121 +  inline bool stack_yellow_zone_enabled();
  1.1122 +
  1.1123 +  // Attempt to reguard the stack after a stack overflow may have occurred.
  1.1124 +  // Returns true if (a) guard pages are not needed on this thread, (b) the
  1.1125 +  // pages are already guarded, or (c) the pages were successfully reguarded.
  1.1126 +  // Returns false if there is not enough stack space to reguard the pages, in
  1.1127 +  // which case the caller should unwind a frame and try again.  The argument
  1.1128 +  // should be the caller's (approximate) sp.
  1.1129 +  bool reguard_stack(address cur_sp);
  1.1130 +  // Similar to above but see if current stackpoint is out of the guard area
  1.1131 +  // and reguard if possible.
  1.1132 +  bool reguard_stack(void);
  1.1133 +
  1.1134 +  // Misc. accessors/mutators
  1.1135 +  void set_do_not_unlock(void)                   { _do_not_unlock_if_synchronized = true; }
  1.1136 +  void clr_do_not_unlock(void)                   { _do_not_unlock_if_synchronized = false; }
  1.1137 +  bool do_not_unlock(void)                       { return _do_not_unlock_if_synchronized; }
  1.1138 +
  1.1139 +#ifndef PRODUCT
  1.1140 +  void record_jump(address target, address instr, const char* file, int line);
  1.1141 +#endif /* PRODUCT */
  1.1142 +
  1.1143 +  // For assembly stub generation
  1.1144 +  static ByteSize threadObj_offset()             { return byte_offset_of(JavaThread, _threadObj           ); }
  1.1145 +#ifndef PRODUCT
  1.1146 +  static ByteSize jmp_ring_index_offset()        { return byte_offset_of(JavaThread, _jmp_ring_index      ); }
  1.1147 +  static ByteSize jmp_ring_offset()              { return byte_offset_of(JavaThread, _jmp_ring            ); }
  1.1148 +#endif /* PRODUCT */
  1.1149 +  static ByteSize jni_environment_offset()       { return byte_offset_of(JavaThread, _jni_environment     ); }
  1.1150 +  static ByteSize last_Java_sp_offset()          {
  1.1151 +    return byte_offset_of(JavaThread, _anchor) + JavaFrameAnchor::last_Java_sp_offset();
  1.1152 +  }
  1.1153 +  static ByteSize last_Java_pc_offset()          {
  1.1154 +    return byte_offset_of(JavaThread, _anchor) + JavaFrameAnchor::last_Java_pc_offset();
  1.1155 +  }
  1.1156 +  static ByteSize frame_anchor_offset()          {
  1.1157 +    return byte_offset_of(JavaThread, _anchor);
  1.1158 +  }
  1.1159 +  static ByteSize callee_target_offset()         { return byte_offset_of(JavaThread, _callee_target       ); }
  1.1160 +  static ByteSize vm_result_offset()             { return byte_offset_of(JavaThread, _vm_result           ); }
  1.1161 +  static ByteSize vm_result_2_offset()           { return byte_offset_of(JavaThread, _vm_result_2         ); }
  1.1162 +  static ByteSize thread_state_offset()          { return byte_offset_of(JavaThread, _thread_state        ); }
  1.1163 +  static ByteSize saved_exception_pc_offset()    { return byte_offset_of(JavaThread, _saved_exception_pc  ); }
  1.1164 +  static ByteSize osthread_offset()              { return byte_offset_of(JavaThread, _osthread            ); }
  1.1165 +  static ByteSize exception_oop_offset()         { return byte_offset_of(JavaThread, _exception_oop       ); }
  1.1166 +  static ByteSize exception_pc_offset()          { return byte_offset_of(JavaThread, _exception_pc        ); }
  1.1167 +  static ByteSize exception_handler_pc_offset()  { return byte_offset_of(JavaThread, _exception_handler_pc); }
  1.1168 +  static ByteSize exception_stack_size_offset()  { return byte_offset_of(JavaThread, _exception_stack_size); }
  1.1169 +  static ByteSize stack_guard_state_offset()     { return byte_offset_of(JavaThread, _stack_guard_state   ); }
  1.1170 +  static ByteSize suspend_flags_offset()         { return byte_offset_of(JavaThread, _suspend_flags       ); }
  1.1171 +
  1.1172 +  static ByteSize do_not_unlock_if_synchronized_offset() { return byte_offset_of(JavaThread, _do_not_unlock_if_synchronized); }
  1.1173 +
  1.1174 +  // Returns the jni environment for this thread
  1.1175 +  JNIEnv* jni_environment()                      { return &_jni_environment; }
  1.1176 +
  1.1177 +  static JavaThread* thread_from_jni_environment(JNIEnv* env) {
  1.1178 +    JavaThread *thread_from_jni_env = (JavaThread*)((intptr_t)env - in_bytes(jni_environment_offset()));
  1.1179 +    // Only return NULL if thread is off the thread list; starting to
  1.1180 +    // exit should not return NULL.
  1.1181 +    if (thread_from_jni_env->is_terminated()) {
  1.1182 +       thread_from_jni_env->block_if_vm_exited();
  1.1183 +       return NULL;
  1.1184 +    } else {
  1.1185 +       return thread_from_jni_env;
  1.1186 +    }
  1.1187 +  }
  1.1188 +
  1.1189 +  // JNI critical regions. These can nest.
  1.1190 +  bool in_critical()    { return _jni_active_critical > 0; }
  1.1191 +  void enter_critical() { assert(Thread::current() == this,
  1.1192 +                                 "this must be current thread");
  1.1193 +                          _jni_active_critical++; }
  1.1194 +  void exit_critical()  { assert(Thread::current() == this,
  1.1195 +                                 "this must be current thread");
  1.1196 +                          _jni_active_critical--;
  1.1197 +                          assert(_jni_active_critical >= 0,
  1.1198 +                                 "JNI critical nesting problem?"); }
  1.1199 +
  1.1200 +  // For deadlock detection
  1.1201 +  int depth_first_number() { return _depth_first_number; }
  1.1202 +  void set_depth_first_number(int dfn) { _depth_first_number = dfn; }
  1.1203 +
  1.1204 + private:
  1.1205 +  void set_monitor_chunks(MonitorChunk* monitor_chunks) { _monitor_chunks = monitor_chunks; }
  1.1206 +
  1.1207 + public:
  1.1208 +  MonitorChunk* monitor_chunks() const           { return _monitor_chunks; }
  1.1209 +  void add_monitor_chunk(MonitorChunk* chunk);
  1.1210 +  void remove_monitor_chunk(MonitorChunk* chunk);
  1.1211 +  bool in_deopt_handler() const                  { return _in_deopt_handler > 0; }
  1.1212 +  void inc_in_deopt_handler()                    { _in_deopt_handler++; }
  1.1213 +  void dec_in_deopt_handler()                    {
  1.1214 +    assert(_in_deopt_handler > 0, "mismatched deopt nesting");
  1.1215 +    if (_in_deopt_handler > 0) { // robustness
  1.1216 +      _in_deopt_handler--;
  1.1217 +    }
  1.1218 +  }
  1.1219 +
  1.1220 + private:
  1.1221 +  void set_entry_point(ThreadFunction entry_point) { _entry_point = entry_point; }
  1.1222 +
  1.1223 + public:
  1.1224 +
  1.1225 +  // Frame iteration; calls the function f for all frames on the stack
  1.1226 +  void frames_do(void f(frame*, const RegisterMap*));
  1.1227 +
  1.1228 +  // Memory operations
  1.1229 +  void oops_do(OopClosure* f);
  1.1230 +
  1.1231 +  // Sweeper operations
  1.1232 +  void nmethods_do();
  1.1233 +
  1.1234 +  // Memory management operations
  1.1235 +  void gc_epilogue();
  1.1236 +  void gc_prologue();
  1.1237 +
  1.1238 +  // Misc. operations
  1.1239 +  char* name() const { return (char*)get_thread_name(); }
  1.1240 +  void print_on(outputStream* st) const;
  1.1241 +  void print() const { print_on(tty); }
  1.1242 +  void print_value();
  1.1243 +  void print_thread_state_on(outputStream* ) const      PRODUCT_RETURN;
  1.1244 +  void print_thread_state() const                       PRODUCT_RETURN;
  1.1245 +  void print_on_error(outputStream* st, char* buf, int buflen) const;
  1.1246 +  void verify();
  1.1247 +  const char* get_thread_name() const;
  1.1248 +private:
  1.1249 +  // factor out low-level mechanics for use in both normal and error cases
  1.1250 +  const char* get_thread_name_string(char* buf = NULL, int buflen = 0) const;
  1.1251 +public:
  1.1252 +  const char* get_threadgroup_name() const;
  1.1253 +  const char* get_parent_name() const;
  1.1254 +
  1.1255 +  // Accessing frames
  1.1256 +  frame last_frame() {
  1.1257 +    _anchor.make_walkable(this);
  1.1258 +    return pd_last_frame();
  1.1259 +  }
  1.1260 +  javaVFrame* last_java_vframe(RegisterMap* reg_map);
  1.1261 +
  1.1262 +  // Returns method at 'depth' java or native frames down the stack
  1.1263 +  // Used for security checks
  1.1264 +  klassOop security_get_caller_class(int depth);
  1.1265 +
  1.1266 +  // Print stack trace in external format
  1.1267 +  void print_stack_on(outputStream* st);
  1.1268 +  void print_stack() { print_stack_on(tty); }
  1.1269 +
  1.1270 +  // Print stack traces in various internal formats
  1.1271 +  void trace_stack()                             PRODUCT_RETURN;
  1.1272 +  void trace_stack_from(vframe* start_vf)        PRODUCT_RETURN;
  1.1273 +  void trace_frames()                            PRODUCT_RETURN;
  1.1274 +
  1.1275 +  // Returns the number of stack frames on the stack
  1.1276 +  int depth() const;
  1.1277 +
  1.1278 +  // Function for testing deoptimization
  1.1279 +  void deoptimize();
  1.1280 +  void make_zombies();
  1.1281 +
  1.1282 +  void deoptimized_wrt_marked_nmethods();
  1.1283 +
  1.1284 +  // Profiling operation (see fprofile.cpp)
  1.1285 + public:
  1.1286 +   bool profile_last_Java_frame(frame* fr);
  1.1287 +
  1.1288 + private:
  1.1289 +   ThreadProfiler* _thread_profiler;
  1.1290 + private:
  1.1291 +   friend class FlatProfiler;                    // uses both [gs]et_thread_profiler.
  1.1292 +   friend class FlatProfilerTask;                // uses get_thread_profiler.
  1.1293 +   friend class ThreadProfilerMark;              // uses get_thread_profiler.
  1.1294 +   ThreadProfiler* get_thread_profiler()         { return _thread_profiler; }
  1.1295 +   ThreadProfiler* set_thread_profiler(ThreadProfiler* tp) {
  1.1296 +     ThreadProfiler* result = _thread_profiler;
  1.1297 +     _thread_profiler = tp;
  1.1298 +     return result;
  1.1299 +   }
  1.1300 +
  1.1301 +  // Static operations
  1.1302 + public:
  1.1303 +  // Returns the running thread as a JavaThread
  1.1304 +  static inline JavaThread* current();
  1.1305 +
  1.1306 +  // Returns the active Java thread.  Do not use this if you know you are calling
  1.1307 +  // from a JavaThread, as it's slower than JavaThread::current.  If called from
  1.1308 +  // the VMThread, it also returns the JavaThread that instigated the VMThread's
  1.1309 +  // operation.  You may not want that either.
  1.1310 +  static JavaThread* active();
  1.1311 +
  1.1312 +  inline CompilerThread* as_CompilerThread();
  1.1313 +
  1.1314 + public:
  1.1315 +  virtual void run();
  1.1316 +  void thread_main_inner();
  1.1317 +
  1.1318 + private:
  1.1319 +  // PRIVILEGED STACK
  1.1320 +  PrivilegedElement*  _privileged_stack_top;
  1.1321 +  GrowableArray<oop>* _array_for_gc;
  1.1322 + public:
  1.1323 +
  1.1324 +  // Returns the privileged_stack information.
  1.1325 +  PrivilegedElement* privileged_stack_top() const       { return _privileged_stack_top; }
  1.1326 +  void set_privileged_stack_top(PrivilegedElement *e)   { _privileged_stack_top = e; }
  1.1327 +  void register_array_for_gc(GrowableArray<oop>* array) { _array_for_gc = array; }
  1.1328 +
  1.1329 + public:
  1.1330 +  // Thread local information maintained by JVMTI.
  1.1331 +  void set_jvmti_thread_state(JvmtiThreadState *value)                           { _jvmti_thread_state = value; }
  1.1332 +  JvmtiThreadState *jvmti_thread_state() const                                   { return _jvmti_thread_state; }
  1.1333 +  static ByteSize jvmti_thread_state_offset()                                    { return byte_offset_of(JavaThread, _jvmti_thread_state); }
  1.1334 +  void set_jvmti_get_loaded_classes_closure(JvmtiGetLoadedClassesClosure* value) { _jvmti_get_loaded_classes_closure = value; }
  1.1335 +  JvmtiGetLoadedClassesClosure* get_jvmti_get_loaded_classes_closure() const     { return _jvmti_get_loaded_classes_closure; }
  1.1336 +
  1.1337 +  // JVMTI PopFrame support
  1.1338 +  // Setting and clearing popframe_condition
  1.1339 +  // All of these enumerated values are bits. popframe_pending
  1.1340 +  // indicates that a PopFrame() has been requested and not yet been
  1.1341 +  // completed. popframe_processing indicates that that PopFrame() is in
  1.1342 +  // the process of being completed. popframe_force_deopt_reexecution_bit
  1.1343 +  // indicates that special handling is required when returning to a
  1.1344 +  // deoptimized caller.
  1.1345 +  enum PopCondition {
  1.1346 +    popframe_inactive                      = 0x00,
  1.1347 +    popframe_pending_bit                   = 0x01,
  1.1348 +    popframe_processing_bit                = 0x02,
  1.1349 +    popframe_force_deopt_reexecution_bit   = 0x04
  1.1350 +  };
  1.1351 +  PopCondition popframe_condition()                   { return (PopCondition) _popframe_condition; }
  1.1352 +  void set_popframe_condition(PopCondition c)         { _popframe_condition = c; }
  1.1353 +  void set_popframe_condition_bit(PopCondition c)     { _popframe_condition |= c; }
  1.1354 +  void clear_popframe_condition()                     { _popframe_condition = popframe_inactive; }
  1.1355 +  static ByteSize popframe_condition_offset()         { return byte_offset_of(JavaThread, _popframe_condition); }
  1.1356 +  bool has_pending_popframe()                         { return (popframe_condition() & popframe_pending_bit) != 0; }
  1.1357 +  bool popframe_forcing_deopt_reexecution()           { return (popframe_condition() & popframe_force_deopt_reexecution_bit) != 0; }
  1.1358 +  void clear_popframe_forcing_deopt_reexecution()     { _popframe_condition &= ~popframe_force_deopt_reexecution_bit; }
  1.1359 +#ifdef CC_INTERP
  1.1360 +  bool pop_frame_pending(void)                        { return ((_popframe_condition & popframe_pending_bit) != 0); }
  1.1361 +  void clr_pop_frame_pending(void)                    { _popframe_condition = popframe_inactive; }
  1.1362 +  bool pop_frame_in_process(void)                     { return ((_popframe_condition & popframe_processing_bit) != 0); }
  1.1363 +  void set_pop_frame_in_process(void)                 { _popframe_condition |= popframe_processing_bit; }
  1.1364 +  void clr_pop_frame_in_process(void)                 { _popframe_condition &= ~popframe_processing_bit; }
  1.1365 +#endif
  1.1366 +
  1.1367 + private:
  1.1368 +  // Saved incoming arguments to popped frame.
  1.1369 +  // Used only when popped interpreted frame returns to deoptimized frame.
  1.1370 +  void*    _popframe_preserved_args;
  1.1371 +  int      _popframe_preserved_args_size;
  1.1372 +
  1.1373 + public:
  1.1374 +  void  popframe_preserve_args(ByteSize size_in_bytes, void* start);
  1.1375 +  void* popframe_preserved_args();
  1.1376 +  ByteSize popframe_preserved_args_size();
  1.1377 +  WordSize popframe_preserved_args_size_in_words();
  1.1378 +  void  popframe_free_preserved_args();
  1.1379 +
  1.1380 +
  1.1381 + private:
  1.1382 +  JvmtiThreadState *_jvmti_thread_state;
  1.1383 +  JvmtiGetLoadedClassesClosure* _jvmti_get_loaded_classes_closure;
  1.1384 +
  1.1385 +  // Used by the interpreter in fullspeed mode for frame pop, method
  1.1386 +  // entry, method exit and single stepping support. This field is
  1.1387 +  // only set to non-zero by the VM_EnterInterpOnlyMode VM operation.
  1.1388 +  // It can be set to zero asynchronously (i.e., without a VM operation
  1.1389 +  // or a lock) so we have to be very careful.
  1.1390 +  int               _interp_only_mode;
  1.1391 +
  1.1392 + public:
  1.1393 +  // used by the interpreter for fullspeed debugging support (see above)
  1.1394 +  static ByteSize interp_only_mode_offset() { return byte_offset_of(JavaThread, _interp_only_mode); }
  1.1395 +  bool is_interp_only_mode()                { return (_interp_only_mode != 0); }
  1.1396 +  int get_interp_only_mode()                { return _interp_only_mode; }
  1.1397 +  void increment_interp_only_mode()         { ++_interp_only_mode; }
  1.1398 +  void decrement_interp_only_mode()         { --_interp_only_mode; }
  1.1399 +
  1.1400 + private:
  1.1401 +  ThreadStatistics *_thread_stat;
  1.1402 +
  1.1403 + public:
  1.1404 +  ThreadStatistics* get_thread_stat() const    { return _thread_stat; }
  1.1405 +
  1.1406 +  // Return a blocker object for which this thread is blocked parking.
  1.1407 +  oop current_park_blocker();
  1.1408 +
  1.1409 + private:
  1.1410 +  static size_t _stack_size_at_create;
  1.1411 +
  1.1412 + public:
  1.1413 +  static inline size_t stack_size_at_create(void) {
  1.1414 +    return _stack_size_at_create;
  1.1415 +  }
  1.1416 +  static inline void set_stack_size_at_create(size_t value) {
  1.1417 +    _stack_size_at_create = value;
  1.1418 +  }
  1.1419 +
  1.1420 +  // Machine dependent stuff
  1.1421 +  #include "incls/_thread_pd.hpp.incl"
  1.1422 +
  1.1423 + public:
  1.1424 +  void set_blocked_on_compilation(bool value) {
  1.1425 +    _blocked_on_compilation = value;
  1.1426 +  }
  1.1427 +
  1.1428 +  bool blocked_on_compilation() {
  1.1429 +    return _blocked_on_compilation;
  1.1430 +  }
  1.1431 + protected:
  1.1432 +  bool         _blocked_on_compilation;
  1.1433 +
  1.1434 +
  1.1435 +  // JSR166 per-thread parker
  1.1436 +private:
  1.1437 +  Parker*    _parker;
  1.1438 +public:
  1.1439 +  Parker*     parker() { return _parker; }
  1.1440 +
  1.1441 +  // Biased locking support
  1.1442 +private:
  1.1443 +  GrowableArray<MonitorInfo*>* _cached_monitor_info;
  1.1444 +public:
  1.1445 +  GrowableArray<MonitorInfo*>* cached_monitor_info() { return _cached_monitor_info; }
  1.1446 +  void set_cached_monitor_info(GrowableArray<MonitorInfo*>* info) { _cached_monitor_info = info; }
  1.1447 +
  1.1448 +  // clearing/querying jni attach status
  1.1449 +  bool is_attaching() const { return _is_attaching; }
  1.1450 +  void set_attached() { _is_attaching = false; OrderAccess::fence(); }
  1.1451 +};
  1.1452 +
  1.1453 +// Inline implementation of JavaThread::current
  1.1454 +inline JavaThread* JavaThread::current() {
  1.1455 +  Thread* thread = ThreadLocalStorage::thread();
  1.1456 +  assert(thread != NULL && thread->is_Java_thread(), "just checking");
  1.1457 +  return (JavaThread*)thread;
  1.1458 +}
  1.1459 +
  1.1460 +inline CompilerThread* JavaThread::as_CompilerThread() {
  1.1461 +  assert(is_Compiler_thread(), "just checking");
  1.1462 +  return (CompilerThread*)this;
  1.1463 +}
  1.1464 +
  1.1465 +inline bool JavaThread::stack_yellow_zone_disabled() {
  1.1466 +  return _stack_guard_state == stack_guard_yellow_disabled;
  1.1467 +}
  1.1468 +
  1.1469 +inline bool JavaThread::stack_yellow_zone_enabled() {
  1.1470 +#ifdef ASSERT
  1.1471 +  if (os::uses_stack_guard_pages()) {
  1.1472 +    assert(_stack_guard_state != stack_guard_unused, "guard pages must be in use");
  1.1473 +  }
  1.1474 +#endif
  1.1475 +    return _stack_guard_state == stack_guard_enabled;
  1.1476 +}
  1.1477 +
  1.1478 +inline size_t JavaThread::stack_available(address cur_sp) {
  1.1479 +  // This code assumes java stacks grow down
  1.1480 +  address low_addr; // Limit on the address for deepest stack depth
  1.1481 +  if ( _stack_guard_state == stack_guard_unused) {
  1.1482 +    low_addr =  stack_base() - stack_size();
  1.1483 +  } else {
  1.1484 +    low_addr = stack_yellow_zone_base();
  1.1485 +  }
  1.1486 +  return cur_sp > low_addr ? cur_sp - low_addr : 0;
  1.1487 +}
  1.1488 +
  1.1489 +// A JavaThread for low memory detection support
  1.1490 +class LowMemoryDetectorThread : public JavaThread {
  1.1491 +  friend class VMStructs;
  1.1492 +public:
  1.1493 +  LowMemoryDetectorThread(ThreadFunction entry_point) : JavaThread(entry_point) {};
  1.1494 +
  1.1495 +  // Hide this thread from external view.
  1.1496 +  bool is_hidden_from_external_view() const      { return true; }
  1.1497 +};
  1.1498 +
  1.1499 +// A thread used for Compilation.
  1.1500 +class CompilerThread : public JavaThread {
  1.1501 +  friend class VMStructs;
  1.1502 + private:
  1.1503 +  CompilerCounters* _counters;
  1.1504 +
  1.1505 +  ciEnv*        _env;
  1.1506 +  CompileLog*   _log;
  1.1507 +  CompileTask*  _task;
  1.1508 +  CompileQueue* _queue;
  1.1509 +
  1.1510 + public:
  1.1511 +
  1.1512 +  static CompilerThread* current();
  1.1513 +
  1.1514 +  CompilerThread(CompileQueue* queue, CompilerCounters* counters);
  1.1515 +
  1.1516 +  bool is_Compiler_thread() const                { return true; }
  1.1517 +  // Hide this compiler thread from external view.
  1.1518 +  bool is_hidden_from_external_view() const      { return true; }
  1.1519 +
  1.1520 +  CompileQueue* queue()                          { return _queue; }
  1.1521 +  CompilerCounters* counters()                   { return _counters; }
  1.1522 +
  1.1523 +  // Get/set the thread's compilation environment.
  1.1524 +  ciEnv*        env()                            { return _env; }
  1.1525 +  void          set_env(ciEnv* env)              { _env = env; }
  1.1526 +
  1.1527 +  // Get/set the thread's logging information
  1.1528 +  CompileLog*   log()                            { return _log; }
  1.1529 +  void          init_log(CompileLog* log) {
  1.1530 +    // Set once, for good.
  1.1531 +    assert(_log == NULL, "set only once");
  1.1532 +    _log = log;
  1.1533 +  }
  1.1534 +
  1.1535 +#ifndef PRODUCT
  1.1536 +private:
  1.1537 +  IdealGraphPrinter *_ideal_graph_printer;
  1.1538 +public:
  1.1539 +  IdealGraphPrinter *ideal_graph_printer()                       { return _ideal_graph_printer; }
  1.1540 +  void set_ideal_graph_printer(IdealGraphPrinter *n)             { _ideal_graph_printer = n; }
  1.1541 +#endif
  1.1542 +
  1.1543 +  // Get/set the thread's current task
  1.1544 +  CompileTask*  task()                           { return _task; }
  1.1545 +  void          set_task(CompileTask* task)      { _task = task; }
  1.1546 +};
  1.1547 +
  1.1548 +inline CompilerThread* CompilerThread::current() {
  1.1549 +  return JavaThread::current()->as_CompilerThread();
  1.1550 +}
  1.1551 +
  1.1552 +
  1.1553 +// The active thread queue. It also keeps track of the current used
  1.1554 +// thread priorities.
  1.1555 +class Threads: AllStatic {
  1.1556 +  friend class VMStructs;
  1.1557 + private:
  1.1558 +  static JavaThread* _thread_list;
  1.1559 +  static int         _number_of_threads;
  1.1560 +  static int         _number_of_non_daemon_threads;
  1.1561 +  static int         _return_code;
  1.1562 +
  1.1563 + public:
  1.1564 +  // Thread management
  1.1565 +  // force_daemon is a concession to JNI, where we may need to add a
  1.1566 +  // thread to the thread list before allocating its thread object
  1.1567 +  static void add(JavaThread* p, bool force_daemon = false);
  1.1568 +  static void remove(JavaThread* p);
  1.1569 +  static bool includes(JavaThread* p);
  1.1570 +  static JavaThread* first()                     { return _thread_list; }
  1.1571 +  static void threads_do(ThreadClosure* tc);
  1.1572 +
  1.1573 +  // Initializes the vm and creates the vm thread
  1.1574 +  static jint create_vm(JavaVMInitArgs* args, bool* canTryAgain);
  1.1575 +  static void convert_vm_init_libraries_to_agents();
  1.1576 +  static void create_vm_init_libraries();
  1.1577 +  static void create_vm_init_agents();
  1.1578 +  static void shutdown_vm_agents();
  1.1579 +  static bool destroy_vm();
  1.1580 +  // Supported VM versions via JNI
  1.1581 +  // Includes JNI_VERSION_1_1
  1.1582 +  static jboolean is_supported_jni_version_including_1_1(jint version);
  1.1583 +  // Does not include JNI_VERSION_1_1
  1.1584 +  static jboolean is_supported_jni_version(jint version);
  1.1585 +
  1.1586 +  // Garbage collection
  1.1587 +  static void follow_other_roots(void f(oop*));
  1.1588 +
  1.1589 +  // Apply "f->do_oop" to all root oops in all threads.
  1.1590 +  // This version may only be called by sequential code.
  1.1591 +  static void oops_do(OopClosure* f);
  1.1592 +  // This version may be called by sequential or parallel code.
  1.1593 +  static void possibly_parallel_oops_do(OopClosure* f);
  1.1594 +  // This creates a list of GCTasks, one per thread.
  1.1595 +  static void create_thread_roots_tasks(GCTaskQueue* q);
  1.1596 +  // This creates a list of GCTasks, one per thread, for marking objects.
  1.1597 +  static void create_thread_roots_marking_tasks(GCTaskQueue* q);
  1.1598 +
  1.1599 +  // Apply "f->do_oop" to roots in all threads that
  1.1600 +  // are part of compiled frames
  1.1601 +  static void compiled_frame_oops_do(OopClosure* f);
  1.1602 +
  1.1603 +  static void convert_hcode_pointers();
  1.1604 +  static void restore_hcode_pointers();
  1.1605 +
  1.1606 +  // Sweeper
  1.1607 +  static void nmethods_do();
  1.1608 +
  1.1609 +  static void gc_epilogue();
  1.1610 +  static void gc_prologue();
  1.1611 +
  1.1612 +  // Verification
  1.1613 +  static void verify();
  1.1614 +  static void print_on(outputStream* st, bool print_stacks, bool internal_format, bool print_concurrent_locks);
  1.1615 +  static void print(bool print_stacks, bool internal_format) {
  1.1616 +    // this function is only used by debug.cpp
  1.1617 +    print_on(tty, print_stacks, internal_format, false /* no concurrent lock printed */);
  1.1618 +  }
  1.1619 +  static void print_on_error(outputStream* st, Thread* current, char* buf, int buflen);
  1.1620 +
  1.1621 +  // Get Java threads that are waiting to enter a monitor. If doLock
  1.1622 +  // is true, then Threads_lock is grabbed as needed. Otherwise, the
  1.1623 +  // VM needs to be at a safepoint.
  1.1624 +  static GrowableArray<JavaThread*>* get_pending_threads(int count,
  1.1625 +    address monitor, bool doLock);
  1.1626 +
  1.1627 +  // Get owning Java thread from the monitor's owner field. If doLock
  1.1628 +  // is true, then Threads_lock is grabbed as needed. Otherwise, the
  1.1629 +  // VM needs to be at a safepoint.
  1.1630 +  static JavaThread *owning_thread_from_monitor_owner(address owner,
  1.1631 +    bool doLock);
  1.1632 +
  1.1633 +  // Number of threads on the active threads list
  1.1634 +  static int number_of_threads()                 { return _number_of_threads; }
  1.1635 +  // Number of non-daemon threads on the active threads list
  1.1636 +  static int number_of_non_daemon_threads()      { return _number_of_non_daemon_threads; }
  1.1637 +
  1.1638 +  // Deoptimizes all frames tied to marked nmethods
  1.1639 +  static void deoptimized_wrt_marked_nmethods();
  1.1640 +
  1.1641 +};
  1.1642 +
  1.1643 +
  1.1644 +// Thread iterator
  1.1645 +class ThreadClosure: public StackObj {
  1.1646 + public:
  1.1647 +  virtual void do_thread(Thread* thread) = 0;
  1.1648 +};
  1.1649 +
  1.1650 +class SignalHandlerMark: public StackObj {
  1.1651 +private:
  1.1652 +  Thread* _thread;
  1.1653 +public:
  1.1654 +  SignalHandlerMark(Thread* t) {
  1.1655 +    _thread = t;
  1.1656 +    if (_thread) _thread->enter_signal_handler();
  1.1657 +  }
  1.1658 +  ~SignalHandlerMark() {
  1.1659 +    if (_thread) _thread->leave_signal_handler();
  1.1660 +    _thread = NULL;
  1.1661 +  }
  1.1662 +};
  1.1663 +
  1.1664 +// ParkEvents are type-stable and immortal.
  1.1665 +//
  1.1666 +// Lifecycle: Once a ParkEvent is associated with a thread that ParkEvent remains
  1.1667 +// associated with the thread for the thread's entire lifetime - the relationship is
  1.1668 +// stable. A thread will be associated at most one ParkEvent.  When the thread
  1.1669 +// expires, the ParkEvent moves to the EventFreeList.  New threads attempt to allocate from
  1.1670 +// the EventFreeList before creating a new Event.  Type-stability frees us from
  1.1671 +// worrying about stale Event or Thread references in the objectMonitor subsystem.
  1.1672 +// (A reference to ParkEvent is always valid, even though the event may no longer be associated
  1.1673 +// with the desired or expected thread.  A key aspect of this design is that the callers of
  1.1674 +// park, unpark, etc must tolerate stale references and spurious wakeups).
  1.1675 +//
  1.1676 +// Only the "associated" thread can block (park) on the ParkEvent, although
  1.1677 +// any other thread can unpark a reachable parkevent.  Park() is allowed to
  1.1678 +// return spuriously.  In fact park-unpark a really just an optimization to
  1.1679 +// avoid unbounded spinning and surrender the CPU to be a polite system citizen.
  1.1680 +// A degenerate albeit "impolite" park-unpark implementation could simply return.
  1.1681 +// See http://blogs.sun.com/dave for more details.
  1.1682 +//
  1.1683 +// Eventually I'd like to eliminate Events and ObjectWaiters, both of which serve as
  1.1684 +// thread proxies, and simply make the THREAD structure type-stable and persistent.
  1.1685 +// Currently, we unpark events associated with threads, but ideally we'd just
  1.1686 +// unpark threads.
  1.1687 +//
  1.1688 +// The base-class, PlatformEvent, is platform-specific while the ParkEvent is
  1.1689 +// platform-independent.  PlatformEvent provides park(), unpark(), etc., and
  1.1690 +// is abstract -- that is, a PlatformEvent should never be instantiated except
  1.1691 +// as part of a ParkEvent.
  1.1692 +// Equivalently we could have defined a platform-independent base-class that
  1.1693 +// exported Allocate(), Release(), etc.  The platform-specific class would extend
  1.1694 +// that base-class, adding park(), unpark(), etc.
  1.1695 +//
  1.1696 +// A word of caution: The JVM uses 2 very similar constructs:
  1.1697 +// 1. ParkEvent are used for Java-level "monitor" synchronization.
  1.1698 +// 2. Parkers are used by JSR166-JUC park-unpark.
  1.1699 +//
  1.1700 +// We'll want to eventually merge these redundant facilities and use ParkEvent.
  1.1701 +
  1.1702 +
  1.1703 +class ParkEvent : public os::PlatformEvent {
  1.1704 +  private:
  1.1705 +    ParkEvent * FreeNext ;
  1.1706 +
  1.1707 +    // Current association
  1.1708 +    Thread * AssociatedWith ;
  1.1709 +    intptr_t RawThreadIdentity ;        // LWPID etc
  1.1710 +    volatile int Incarnation ;
  1.1711 +
  1.1712 +    // diagnostic : keep track of last thread to wake this thread.
  1.1713 +    // this is useful for construction of dependency graphs.
  1.1714 +    void * LastWaker ;
  1.1715 +
  1.1716 +  public:
  1.1717 +    // MCS-CLH list linkage and Native Mutex/Monitor
  1.1718 +    ParkEvent * volatile ListNext ;
  1.1719 +    ParkEvent * volatile ListPrev ;
  1.1720 +    volatile intptr_t OnList ;
  1.1721 +    volatile int TState ;
  1.1722 +    volatile int Notified ;             // for native monitor construct
  1.1723 +    volatile int IsWaiting ;            // Enqueued on WaitSet
  1.1724 +
  1.1725 +
  1.1726 +  private:
  1.1727 +    static ParkEvent * volatile FreeList ;
  1.1728 +    static volatile int ListLock ;
  1.1729 +
  1.1730 +    // It's prudent to mark the dtor as "private"
  1.1731 +    // ensuring that it's not visible outside the package.
  1.1732 +    // Unfortunately gcc warns about such usage, so
  1.1733 +    // we revert to the less desirable "protected" visibility.
  1.1734 +    // The other compilers accept private dtors.
  1.1735 +
  1.1736 +  protected:        // Ensure dtor is never invoked
  1.1737 +    ~ParkEvent() { guarantee (0, "invariant") ; }
  1.1738 +
  1.1739 +    ParkEvent() : PlatformEvent() {
  1.1740 +       AssociatedWith = NULL ;
  1.1741 +       FreeNext       = NULL ;
  1.1742 +       ListNext       = NULL ;
  1.1743 +       ListPrev       = NULL ;
  1.1744 +       OnList         = 0 ;
  1.1745 +       TState         = 0 ;
  1.1746 +       Notified       = 0 ;
  1.1747 +       IsWaiting      = 0 ;
  1.1748 +    }
  1.1749 +
  1.1750 +    // We use placement-new to force ParkEvent instances to be
  1.1751 +    // aligned on 256-byte address boundaries.  This ensures that the least
  1.1752 +    // significant byte of a ParkEvent address is always 0.
  1.1753 +
  1.1754 +    void * operator new (size_t sz) ;
  1.1755 +    void operator delete (void * a) ;
  1.1756 +
  1.1757 +  public:
  1.1758 +    static ParkEvent * Allocate (Thread * t) ;
  1.1759 +    static void Release (ParkEvent * e) ;
  1.1760 +} ;

mercurial