src/share/vm/memory/gcLocker.hpp

changeset 3494
1a2723f7ad8e
parent 3156
f08d439fab8c
child 3500
0382d2b469b2
     1.1 --- a/src/share/vm/memory/gcLocker.hpp	Thu Jan 26 19:39:08 2012 -0800
     1.2 +++ b/src/share/vm/memory/gcLocker.hpp	Sun Jan 29 16:46:04 2012 -0800
     1.3 @@ -1,5 +1,5 @@
     1.4  /*
     1.5 - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
     1.6 + * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
     1.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     1.8   *
     1.9   * This code is free software; you can redistribute it and/or modify it
    1.10 @@ -51,53 +51,70 @@
    1.11  
    1.12  class GC_locker: public AllStatic {
    1.13   private:
    1.14 -  static volatile jint _jni_lock_count;  // number of jni active instances
    1.15 +  // The _jni_lock_count keeps track of the number of threads that are
    1.16 +  // currently in a critical region.  It's only kept up to date when
    1.17 +  // _needs_gc is true.  The current value is computed during
    1.18 +  // safepointing and decremented during the slow path of GC_locker
    1.19 +  // unlocking.
    1.20 +  static volatile jint _jni_lock_count;  // number of jni active instances.
    1.21 +
    1.22    static volatile jint _lock_count;      // number of other active instances
    1.23    static volatile bool _needs_gc;        // heap is filling, we need a GC
    1.24                                           // note: bool is typedef'd as jint
    1.25    static volatile bool _doing_gc;        // unlock_critical() is doing a GC
    1.26  
    1.27 +  static jlong         _wait_begin;      // Timestamp for the setting of _needs_gc.
    1.28 +                                         // Used only by printing code.
    1.29 +
    1.30 +#ifdef ASSERT
    1.31 +  // This lock count is updated for all operations and is used to
    1.32 +  // validate the jni_lock_count that is computed during safepoints.
    1.33 +  static volatile jint _debug_jni_lock_count;
    1.34 +#endif
    1.35 +
    1.36    // Accessors
    1.37    static bool is_jni_active() {
    1.38 +    assert(_needs_gc, "only valid when _needs_gc is set");
    1.39      return _jni_lock_count > 0;
    1.40    }
    1.41  
    1.42 -  static void set_needs_gc() {
    1.43 -    assert(SafepointSynchronize::is_at_safepoint(),
    1.44 -      "needs_gc is only set at a safepoint");
    1.45 -    _needs_gc = true;
    1.46 -  }
    1.47 +  // At a safepoint, visit all threads and count the number of active
    1.48 +  // critical sections.  This is used to ensure that all active
    1.49 +  // critical sections are exited before a new one is started.
    1.50 +  static void verify_critical_count() NOT_DEBUG_RETURN;
    1.51  
    1.52 -  static void clear_needs_gc() {
    1.53 -    assert_lock_strong(JNICritical_lock);
    1.54 -    _needs_gc = false;
    1.55 -  }
    1.56 -
    1.57 -  static void jni_lock() {
    1.58 -    Atomic::inc(&_jni_lock_count);
    1.59 -    CHECK_UNHANDLED_OOPS_ONLY(
    1.60 -      if (CheckUnhandledOops) { Thread::current()->_gc_locked_out_count++; })
    1.61 -    assert(Universe::heap() == NULL || !Universe::heap()->is_gc_active(),
    1.62 -           "locking failed");
    1.63 -  }
    1.64 -
    1.65 -  static void jni_unlock() {
    1.66 -    Atomic::dec(&_jni_lock_count);
    1.67 -    CHECK_UNHANDLED_OOPS_ONLY(
    1.68 -      if (CheckUnhandledOops) { Thread::current()->_gc_locked_out_count--; })
    1.69 -  }
    1.70 -
    1.71 -  static void jni_lock_slow();
    1.72 -  static void jni_unlock_slow();
    1.73 +  static void jni_lock(JavaThread* thread);
    1.74 +  static void jni_unlock(JavaThread* thread);
    1.75  
    1.76   public:
    1.77    // Accessors
    1.78    static bool is_active();
    1.79    static bool needs_gc()       { return _needs_gc;                        }
    1.80 +
    1.81    // Shorthand
    1.82 -  static bool is_active_and_needs_gc() { return is_active() && needs_gc();}
    1.83 +  static bool is_active_and_needs_gc() { return needs_gc() && is_active(); }
    1.84  
    1.85 -  // Calls set_needs_gc() if is_active() is true. Returns is_active().
    1.86 +  // In debug mode track the locking state at all times
    1.87 +  static void increment_debug_jni_lock_count() {
    1.88 +#ifdef ASSERT
    1.89 +    assert(_debug_jni_lock_count >= 0, "bad value");
    1.90 +    Atomic::inc(&_debug_jni_lock_count);
    1.91 +#endif
    1.92 +  }
    1.93 +  static void decrement_debug_jni_lock_count() {
    1.94 +#ifdef ASSERT
    1.95 +    assert(_debug_jni_lock_count > 0, "bad value");
    1.96 +    Atomic::dec(&_debug_jni_lock_count);
    1.97 +#endif
    1.98 +  }
    1.99 +
   1.100 +  // Set the current lock count
   1.101 +  static void set_jni_lock_count(int count) {
   1.102 +    _jni_lock_count = count;
   1.103 +    verify_critical_count();
   1.104 +  }
   1.105 +
   1.106 +  // Sets _needs_gc if is_active() is true. Returns is_active().
   1.107    static bool check_active_before_gc();
   1.108  
   1.109    // Stalls the caller (who should not be in a jni critical section)
   1.110 @@ -131,20 +148,20 @@
   1.111    // JNI critical regions are the only participants in this scheme
   1.112    // because they are, by spec, well bounded while in a critical region.
   1.113    //
   1.114 -  // Each of the following two method is split into a fast path and a slow
   1.115 -  // path. JNICritical_lock is only grabbed in the slow path.
   1.116 +  // Each of the following two method is split into a fast path and a
   1.117 +  // slow path. JNICritical_lock is only grabbed in the slow path.
   1.118    // _needs_gc is initially false and every java thread will go
   1.119 -  // through the fast path (which does the same thing as the slow path
   1.120 -  // when _needs_gc is false). When GC happens at a safepoint,
   1.121 -  // GC_locker::is_active() is checked. Since there is no safepoint in the
   1.122 -  // fast path of lock_critical() and unlock_critical(), there is no race
   1.123 -  // condition between the fast path and GC. After _needs_gc is set at a
   1.124 -  // safepoint, every thread will go through the slow path after the safepoint.
   1.125 -  // Since after a safepoint, each of the following two methods is either
   1.126 -  // entered from the method entry and falls into the slow path, or is
   1.127 -  // resumed from the safepoints in the method, which only exist in the slow
   1.128 -  // path. So when _needs_gc is set, the slow path is always taken, till
   1.129 -  // _needs_gc is cleared.
   1.130 +  // through the fast path, which simply increments or decrements the
   1.131 +  // current thread's critical count.  When GC happens at a safepoint,
   1.132 +  // GC_locker::is_active() is checked. Since there is no safepoint in
   1.133 +  // the fast path of lock_critical() and unlock_critical(), there is
   1.134 +  // no race condition between the fast path and GC. After _needs_gc
   1.135 +  // is set at a safepoint, every thread will go through the slow path
   1.136 +  // after the safepoint.  Since after a safepoint, each of the
   1.137 +  // following two methods is either entered from the method entry and
   1.138 +  // falls into the slow path, or is resumed from the safepoints in
   1.139 +  // the method, which only exist in the slow path. So when _needs_gc
   1.140 +  // is set, the slow path is always taken, till _needs_gc is cleared.
   1.141    static void lock_critical(JavaThread* thread);
   1.142    static void unlock_critical(JavaThread* thread);
   1.143  };

mercurial