49 // The direct lock/unlock calls do not force a collection if an unlock |
49 // The direct lock/unlock calls do not force a collection if an unlock |
50 // decrements the count to zero. Avoid calling these if at all possible. |
50 // decrements the count to zero. Avoid calling these if at all possible. |
51 |
51 |
52 class GC_locker: public AllStatic { |
52 class GC_locker: public AllStatic { |
53 private: |
53 private: |
54 static volatile jint _jni_lock_count; // number of jni active instances |
54 // The _jni_lock_count keeps track of the number of threads that are |
|
55 // currently in a critical region. It's only kept up to date when |
|
56 // _needs_gc is true. The current value is computed during |
|
57 // safepointing and decremented during the slow path of GC_locker |
|
58 // unlocking. |
|
59 static volatile jint _jni_lock_count; // number of jni active instances. |
|
60 |
55 static volatile jint _lock_count; // number of other active instances |
61 static volatile jint _lock_count; // number of other active instances |
56 static volatile bool _needs_gc; // heap is filling, we need a GC |
62 static volatile bool _needs_gc; // heap is filling, we need a GC |
57 // note: bool is typedef'd as jint |
63 // note: bool is typedef'd as jint |
58 static volatile bool _doing_gc; // unlock_critical() is doing a GC |
64 static volatile bool _doing_gc; // unlock_critical() is doing a GC |
59 |
65 |
|
66 static jlong _wait_begin; // Timestamp for the setting of _needs_gc. |
|
67 // Used only by printing code. |
|
68 |
|
69 #ifdef ASSERT |
|
70 // This lock count is updated for all operations and is used to |
|
71 // validate the jni_lock_count that is computed during safepoints. |
|
72 static volatile jint _debug_jni_lock_count; |
|
73 #endif |
|
74 |
60 // Accessors |
75 // Accessors |
61 static bool is_jni_active() { |
76 static bool is_jni_active() { |
|
77 assert(_needs_gc, "only valid when _needs_gc is set"); |
62 return _jni_lock_count > 0; |
78 return _jni_lock_count > 0; |
63 } |
79 } |
64 |
80 |
65 static void set_needs_gc() { |
81 // At a safepoint, visit all threads and count the number of active |
66 assert(SafepointSynchronize::is_at_safepoint(), |
82 // critical sections. This is used to ensure that all active |
67 "needs_gc is only set at a safepoint"); |
83 // critical sections are exited before a new one is started. |
68 _needs_gc = true; |
84 static void verify_critical_count() NOT_DEBUG_RETURN; |
69 } |
85 |
70 |
86 static void jni_lock(JavaThread* thread); |
71 static void clear_needs_gc() { |
87 static void jni_unlock(JavaThread* thread); |
72 assert_lock_strong(JNICritical_lock); |
|
73 _needs_gc = false; |
|
74 } |
|
75 |
|
76 static void jni_lock() { |
|
77 Atomic::inc(&_jni_lock_count); |
|
78 CHECK_UNHANDLED_OOPS_ONLY( |
|
79 if (CheckUnhandledOops) { Thread::current()->_gc_locked_out_count++; }) |
|
80 assert(Universe::heap() == NULL || !Universe::heap()->is_gc_active(), |
|
81 "locking failed"); |
|
82 } |
|
83 |
|
84 static void jni_unlock() { |
|
85 Atomic::dec(&_jni_lock_count); |
|
86 CHECK_UNHANDLED_OOPS_ONLY( |
|
87 if (CheckUnhandledOops) { Thread::current()->_gc_locked_out_count--; }) |
|
88 } |
|
89 |
|
90 static void jni_lock_slow(); |
|
91 static void jni_unlock_slow(); |
|
92 |
88 |
93 public: |
89 public: |
94 // Accessors |
90 // Accessors |
95 static bool is_active(); |
91 static bool is_active(); |
96 static bool needs_gc() { return _needs_gc; } |
92 static bool needs_gc() { return _needs_gc; } |
|
93 |
97 // Shorthand |
94 // Shorthand |
98 static bool is_active_and_needs_gc() { return is_active() && needs_gc();} |
95 static bool is_active_and_needs_gc() { return needs_gc() && is_active(); } |
99 |
96 |
100 // Calls set_needs_gc() if is_active() is true. Returns is_active(). |
97 // In debug mode track the locking state at all times |
|
98 static void increment_debug_jni_lock_count() { |
|
99 #ifdef ASSERT |
|
100 assert(_debug_jni_lock_count >= 0, "bad value"); |
|
101 Atomic::inc(&_debug_jni_lock_count); |
|
102 #endif |
|
103 } |
|
104 static void decrement_debug_jni_lock_count() { |
|
105 #ifdef ASSERT |
|
106 assert(_debug_jni_lock_count > 0, "bad value"); |
|
107 Atomic::dec(&_debug_jni_lock_count); |
|
108 #endif |
|
109 } |
|
110 |
|
111 // Set the current lock count |
|
112 static void set_jni_lock_count(int count) { |
|
113 _jni_lock_count = count; |
|
114 verify_critical_count(); |
|
115 } |
|
116 |
|
117 // Sets _needs_gc if is_active() is true. Returns is_active(). |
101 static bool check_active_before_gc(); |
118 static bool check_active_before_gc(); |
102 |
119 |
103 // Stalls the caller (who should not be in a jni critical section) |
120 // Stalls the caller (who should not be in a jni critical section) |
104 // until needs_gc() clears. Note however that needs_gc() may be |
121 // until needs_gc() clears. Note however that needs_gc() may be |
105 // set at a subsequent safepoint and/or cleared under the |
122 // set at a subsequent safepoint and/or cleared under the |
129 // we must allow threads already in critical regions to continue. |
146 // we must allow threads already in critical regions to continue. |
130 // |
147 // |
131 // JNI critical regions are the only participants in this scheme |
148 // JNI critical regions are the only participants in this scheme |
132 // because they are, by spec, well bounded while in a critical region. |
149 // because they are, by spec, well bounded while in a critical region. |
133 // |
150 // |
134 // Each of the following two method is split into a fast path and a slow |
151 // Each of the following two method is split into a fast path and a |
135 // path. JNICritical_lock is only grabbed in the slow path. |
152 // slow path. JNICritical_lock is only grabbed in the slow path. |
136 // _needs_gc is initially false and every java thread will go |
153 // _needs_gc is initially false and every java thread will go |
137 // through the fast path (which does the same thing as the slow path |
154 // through the fast path, which simply increments or decrements the |
138 // when _needs_gc is false). When GC happens at a safepoint, |
155 // current thread's critical count. When GC happens at a safepoint, |
139 // GC_locker::is_active() is checked. Since there is no safepoint in the |
156 // GC_locker::is_active() is checked. Since there is no safepoint in |
140 // fast path of lock_critical() and unlock_critical(), there is no race |
157 // the fast path of lock_critical() and unlock_critical(), there is |
141 // condition between the fast path and GC. After _needs_gc is set at a |
158 // no race condition between the fast path and GC. After _needs_gc |
142 // safepoint, every thread will go through the slow path after the safepoint. |
159 // is set at a safepoint, every thread will go through the slow path |
143 // Since after a safepoint, each of the following two methods is either |
160 // after the safepoint. Since after a safepoint, each of the |
144 // entered from the method entry and falls into the slow path, or is |
161 // following two methods is either entered from the method entry and |
145 // resumed from the safepoints in the method, which only exist in the slow |
162 // falls into the slow path, or is resumed from the safepoints in |
146 // path. So when _needs_gc is set, the slow path is always taken, till |
163 // the method, which only exist in the slow path. So when _needs_gc |
147 // _needs_gc is cleared. |
164 // is set, the slow path is always taken, till _needs_gc is cleared. |
148 static void lock_critical(JavaThread* thread); |
165 static void lock_critical(JavaThread* thread); |
149 static void unlock_critical(JavaThread* thread); |
166 static void unlock_critical(JavaThread* thread); |
150 }; |
167 }; |
151 |
168 |
152 |
169 |