Mon, 25 Jan 2010 18:03:29 -0500
6919980: G1: remove +UseG1GC from under experimental options (second attempt)
Summary: Trying this again, as the original change was lost.
Reviewed-by: ysr, jmasa
1 /*
2 * Copyright 2005-2009 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
25 # include "incls/_precompiled.incl"
26 # include "incls/_biasedLocking.cpp.incl"
28 static bool _biased_locking_enabled = false;
29 BiasedLockingCounters BiasedLocking::_counters;
31 static GrowableArray<Handle>* _preserved_oop_stack = NULL;
32 static GrowableArray<markOop>* _preserved_mark_stack = NULL;
34 static void enable_biased_locking(klassOop k) {
35 Klass::cast(k)->set_prototype_header(markOopDesc::biased_locking_prototype());
36 }
38 class VM_EnableBiasedLocking: public VM_Operation {
39 private:
40 bool _is_cheap_allocated;
41 public:
42 VM_EnableBiasedLocking(bool is_cheap_allocated) { _is_cheap_allocated = is_cheap_allocated; }
43 VMOp_Type type() const { return VMOp_EnableBiasedLocking; }
44 Mode evaluation_mode() const { return _is_cheap_allocated ? _async_safepoint : _safepoint; }
45 bool is_cheap_allocated() const { return _is_cheap_allocated; }
47 void doit() {
48 // Iterate the system dictionary enabling biased locking for all
49 // currently loaded classes
50 SystemDictionary::classes_do(enable_biased_locking);
51 // Indicate that future instances should enable it as well
52 _biased_locking_enabled = true;
54 if (TraceBiasedLocking) {
55 tty->print_cr("Biased locking enabled");
56 }
57 }
59 bool allow_nested_vm_operations() const { return false; }
60 };
63 // One-shot PeriodicTask subclass for enabling biased locking
64 class EnableBiasedLockingTask : public PeriodicTask {
65 public:
66 EnableBiasedLockingTask(size_t interval_time) : PeriodicTask(interval_time) {}
68 virtual void task() {
69 // Use async VM operation to avoid blocking the Watcher thread.
70 // VM Thread will free C heap storage.
71 VM_EnableBiasedLocking *op = new VM_EnableBiasedLocking(true);
72 VMThread::execute(op);
74 // Reclaim our storage and disenroll ourself
75 delete this;
76 }
77 };
80 void BiasedLocking::init() {
81 // If biased locking is enabled, schedule a task to fire a few
82 // seconds into the run which turns on biased locking for all
83 // currently loaded classes as well as future ones. This is a
84 // workaround for startup time regressions due to a large number of
85 // safepoints being taken during VM startup for bias revocation.
86 // Ideally we would have a lower cost for individual bias revocation
87 // and not need a mechanism like this.
88 if (UseBiasedLocking) {
89 if (BiasedLockingStartupDelay > 0) {
90 EnableBiasedLockingTask* task = new EnableBiasedLockingTask(BiasedLockingStartupDelay);
91 task->enroll();
92 } else {
93 VM_EnableBiasedLocking op(false);
94 VMThread::execute(&op);
95 }
96 }
97 }
100 bool BiasedLocking::enabled() {
101 return _biased_locking_enabled;
102 }
104 // Returns MonitorInfos for all objects locked on this thread in youngest to oldest order
105 static GrowableArray<MonitorInfo*>* get_or_compute_monitor_info(JavaThread* thread) {
106 GrowableArray<MonitorInfo*>* info = thread->cached_monitor_info();
107 if (info != NULL) {
108 return info;
109 }
111 info = new GrowableArray<MonitorInfo*>();
113 // It's possible for the thread to not have any Java frames on it,
114 // i.e., if it's the main thread and it's already returned from main()
115 if (thread->has_last_Java_frame()) {
116 RegisterMap rm(thread);
117 for (javaVFrame* vf = thread->last_java_vframe(&rm); vf != NULL; vf = vf->java_sender()) {
118 GrowableArray<MonitorInfo*> *monitors = vf->monitors();
119 if (monitors != NULL) {
120 int len = monitors->length();
121 // Walk monitors youngest to oldest
122 for (int i = len - 1; i >= 0; i--) {
123 MonitorInfo* mon_info = monitors->at(i);
124 if (mon_info->owner_is_scalar_replaced()) continue;
125 oop owner = mon_info->owner();
126 if (owner != NULL) {
127 info->append(mon_info);
128 }
129 }
130 }
131 }
132 }
134 thread->set_cached_monitor_info(info);
135 return info;
136 }
139 static BiasedLocking::Condition revoke_bias(oop obj, bool allow_rebias, bool is_bulk, JavaThread* requesting_thread) {
140 markOop mark = obj->mark();
141 if (!mark->has_bias_pattern()) {
142 if (TraceBiasedLocking) {
143 ResourceMark rm;
144 tty->print_cr(" (Skipping revocation of object of type %s because it's no longer biased)",
145 Klass::cast(obj->klass())->external_name());
146 }
147 return BiasedLocking::NOT_BIASED;
148 }
150 int age = mark->age();
151 markOop biased_prototype = markOopDesc::biased_locking_prototype()->set_age(age);
152 markOop unbiased_prototype = markOopDesc::prototype()->set_age(age);
154 if (TraceBiasedLocking && (Verbose || !is_bulk)) {
155 ResourceMark rm;
156 tty->print_cr("Revoking bias of object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s , prototype header " INTPTR_FORMAT " , allow rebias %d , requesting thread " INTPTR_FORMAT,
157 (intptr_t) obj, (intptr_t) mark, Klass::cast(obj->klass())->external_name(), (intptr_t) Klass::cast(obj->klass())->prototype_header(), (allow_rebias ? 1 : 0), (intptr_t) requesting_thread);
158 }
160 JavaThread* biased_thread = mark->biased_locker();
161 if (biased_thread == NULL) {
162 // Object is anonymously biased. We can get here if, for
163 // example, we revoke the bias due to an identity hash code
164 // being computed for an object.
165 if (!allow_rebias) {
166 obj->set_mark(unbiased_prototype);
167 }
168 if (TraceBiasedLocking && (Verbose || !is_bulk)) {
169 tty->print_cr(" Revoked bias of anonymously-biased object");
170 }
171 return BiasedLocking::BIAS_REVOKED;
172 }
174 // Handle case where the thread toward which the object was biased has exited
175 bool thread_is_alive = false;
176 if (requesting_thread == biased_thread) {
177 thread_is_alive = true;
178 } else {
179 for (JavaThread* cur_thread = Threads::first(); cur_thread != NULL; cur_thread = cur_thread->next()) {
180 if (cur_thread == biased_thread) {
181 thread_is_alive = true;
182 break;
183 }
184 }
185 }
186 if (!thread_is_alive) {
187 if (allow_rebias) {
188 obj->set_mark(biased_prototype);
189 } else {
190 obj->set_mark(unbiased_prototype);
191 }
192 if (TraceBiasedLocking && (Verbose || !is_bulk)) {
193 tty->print_cr(" Revoked bias of object biased toward dead thread");
194 }
195 return BiasedLocking::BIAS_REVOKED;
196 }
198 // Thread owning bias is alive.
199 // Check to see whether it currently owns the lock and, if so,
200 // write down the needed displaced headers to the thread's stack.
201 // Otherwise, restore the object's header either to the unlocked
202 // or unbiased state.
203 GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(biased_thread);
204 BasicLock* highest_lock = NULL;
205 for (int i = 0; i < cached_monitor_info->length(); i++) {
206 MonitorInfo* mon_info = cached_monitor_info->at(i);
207 if (mon_info->owner() == obj) {
208 if (TraceBiasedLocking && Verbose) {
209 tty->print_cr(" mon_info->owner (" PTR_FORMAT ") == obj (" PTR_FORMAT ")",
210 (intptr_t) mon_info->owner(),
211 (intptr_t) obj);
212 }
213 // Assume recursive case and fix up highest lock later
214 markOop mark = markOopDesc::encode((BasicLock*) NULL);
215 highest_lock = mon_info->lock();
216 highest_lock->set_displaced_header(mark);
217 } else {
218 if (TraceBiasedLocking && Verbose) {
219 tty->print_cr(" mon_info->owner (" PTR_FORMAT ") != obj (" PTR_FORMAT ")",
220 (intptr_t) mon_info->owner(),
221 (intptr_t) obj);
222 }
223 }
224 }
225 if (highest_lock != NULL) {
226 // Fix up highest lock to contain displaced header and point
227 // object at it
228 highest_lock->set_displaced_header(unbiased_prototype);
229 // Reset object header to point to displaced mark
230 obj->set_mark(markOopDesc::encode(highest_lock));
231 assert(!obj->mark()->has_bias_pattern(), "illegal mark state: stack lock used bias bit");
232 if (TraceBiasedLocking && (Verbose || !is_bulk)) {
233 tty->print_cr(" Revoked bias of currently-locked object");
234 }
235 } else {
236 if (TraceBiasedLocking && (Verbose || !is_bulk)) {
237 tty->print_cr(" Revoked bias of currently-unlocked object");
238 }
239 if (allow_rebias) {
240 obj->set_mark(biased_prototype);
241 } else {
242 // Store the unlocked value into the object's header.
243 obj->set_mark(unbiased_prototype);
244 }
245 }
247 return BiasedLocking::BIAS_REVOKED;
248 }
251 enum HeuristicsResult {
252 HR_NOT_BIASED = 1,
253 HR_SINGLE_REVOKE = 2,
254 HR_BULK_REBIAS = 3,
255 HR_BULK_REVOKE = 4
256 };
259 static HeuristicsResult update_heuristics(oop o, bool allow_rebias) {
260 markOop mark = o->mark();
261 if (!mark->has_bias_pattern()) {
262 return HR_NOT_BIASED;
263 }
265 // Heuristics to attempt to throttle the number of revocations.
266 // Stages:
267 // 1. Revoke the biases of all objects in the heap of this type,
268 // but allow rebiasing of those objects if unlocked.
269 // 2. Revoke the biases of all objects in the heap of this type
270 // and don't allow rebiasing of these objects. Disable
271 // allocation of objects of that type with the bias bit set.
272 Klass* k = o->blueprint();
273 jlong cur_time = os::javaTimeMillis();
274 jlong last_bulk_revocation_time = k->last_biased_lock_bulk_revocation_time();
275 int revocation_count = k->biased_lock_revocation_count();
276 if ((revocation_count >= BiasedLockingBulkRebiasThreshold) &&
277 (revocation_count < BiasedLockingBulkRevokeThreshold) &&
278 (last_bulk_revocation_time != 0) &&
279 (cur_time - last_bulk_revocation_time >= BiasedLockingDecayTime)) {
280 // This is the first revocation we've seen in a while of an
281 // object of this type since the last time we performed a bulk
282 // rebiasing operation. The application is allocating objects in
283 // bulk which are biased toward a thread and then handing them
284 // off to another thread. We can cope with this allocation
285 // pattern via the bulk rebiasing mechanism so we reset the
286 // klass's revocation count rather than allow it to increase
287 // monotonically. If we see the need to perform another bulk
288 // rebias operation later, we will, and if subsequently we see
289 // many more revocation operations in a short period of time we
290 // will completely disable biasing for this type.
291 k->set_biased_lock_revocation_count(0);
292 revocation_count = 0;
293 }
295 // Make revocation count saturate just beyond BiasedLockingBulkRevokeThreshold
296 if (revocation_count <= BiasedLockingBulkRevokeThreshold) {
297 revocation_count = k->atomic_incr_biased_lock_revocation_count();
298 }
300 if (revocation_count == BiasedLockingBulkRevokeThreshold) {
301 return HR_BULK_REVOKE;
302 }
304 if (revocation_count == BiasedLockingBulkRebiasThreshold) {
305 return HR_BULK_REBIAS;
306 }
308 return HR_SINGLE_REVOKE;
309 }
312 static BiasedLocking::Condition bulk_revoke_or_rebias_at_safepoint(oop o,
313 bool bulk_rebias,
314 bool attempt_rebias_of_object,
315 JavaThread* requesting_thread) {
316 assert(SafepointSynchronize::is_at_safepoint(), "must be done at safepoint");
318 if (TraceBiasedLocking) {
319 tty->print_cr("* Beginning bulk revocation (kind == %s) because of object "
320 INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
321 (bulk_rebias ? "rebias" : "revoke"),
322 (intptr_t) o, (intptr_t) o->mark(), Klass::cast(o->klass())->external_name());
323 }
325 jlong cur_time = os::javaTimeMillis();
326 o->blueprint()->set_last_biased_lock_bulk_revocation_time(cur_time);
329 klassOop k_o = o->klass();
330 Klass* klass = Klass::cast(k_o);
332 if (bulk_rebias) {
333 // Use the epoch in the klass of the object to implicitly revoke
334 // all biases of objects of this data type and force them to be
335 // reacquired. However, we also need to walk the stacks of all
336 // threads and update the headers of lightweight locked objects
337 // with biases to have the current epoch.
339 // If the prototype header doesn't have the bias pattern, don't
340 // try to update the epoch -- assume another VM operation came in
341 // and reset the header to the unbiased state, which will
342 // implicitly cause all existing biases to be revoked
343 if (klass->prototype_header()->has_bias_pattern()) {
344 int prev_epoch = klass->prototype_header()->bias_epoch();
345 klass->set_prototype_header(klass->prototype_header()->incr_bias_epoch());
346 int cur_epoch = klass->prototype_header()->bias_epoch();
348 // Now walk all threads' stacks and adjust epochs of any biased
349 // and locked objects of this data type we encounter
350 for (JavaThread* thr = Threads::first(); thr != NULL; thr = thr->next()) {
351 GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(thr);
352 for (int i = 0; i < cached_monitor_info->length(); i++) {
353 MonitorInfo* mon_info = cached_monitor_info->at(i);
354 oop owner = mon_info->owner();
355 markOop mark = owner->mark();
356 if ((owner->klass() == k_o) && mark->has_bias_pattern()) {
357 // We might have encountered this object already in the case of recursive locking
358 assert(mark->bias_epoch() == prev_epoch || mark->bias_epoch() == cur_epoch, "error in bias epoch adjustment");
359 owner->set_mark(mark->set_bias_epoch(cur_epoch));
360 }
361 }
362 }
363 }
365 // At this point we're done. All we have to do is potentially
366 // adjust the header of the given object to revoke its bias.
367 revoke_bias(o, attempt_rebias_of_object && klass->prototype_header()->has_bias_pattern(), true, requesting_thread);
368 } else {
369 if (TraceBiasedLocking) {
370 ResourceMark rm;
371 tty->print_cr("* Disabling biased locking for type %s", klass->external_name());
372 }
374 // Disable biased locking for this data type. Not only will this
375 // cause future instances to not be biased, but existing biased
376 // instances will notice that this implicitly caused their biases
377 // to be revoked.
378 klass->set_prototype_header(markOopDesc::prototype());
380 // Now walk all threads' stacks and forcibly revoke the biases of
381 // any locked and biased objects of this data type we encounter.
382 for (JavaThread* thr = Threads::first(); thr != NULL; thr = thr->next()) {
383 GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(thr);
384 for (int i = 0; i < cached_monitor_info->length(); i++) {
385 MonitorInfo* mon_info = cached_monitor_info->at(i);
386 oop owner = mon_info->owner();
387 markOop mark = owner->mark();
388 if ((owner->klass() == k_o) && mark->has_bias_pattern()) {
389 revoke_bias(owner, false, true, requesting_thread);
390 }
391 }
392 }
394 // Must force the bias of the passed object to be forcibly revoked
395 // as well to ensure guarantees to callers
396 revoke_bias(o, false, true, requesting_thread);
397 }
399 if (TraceBiasedLocking) {
400 tty->print_cr("* Ending bulk revocation");
401 }
403 BiasedLocking::Condition status_code = BiasedLocking::BIAS_REVOKED;
405 if (attempt_rebias_of_object &&
406 o->mark()->has_bias_pattern() &&
407 klass->prototype_header()->has_bias_pattern()) {
408 markOop new_mark = markOopDesc::encode(requesting_thread, o->mark()->age(),
409 klass->prototype_header()->bias_epoch());
410 o->set_mark(new_mark);
411 status_code = BiasedLocking::BIAS_REVOKED_AND_REBIASED;
412 if (TraceBiasedLocking) {
413 tty->print_cr(" Rebiased object toward thread " INTPTR_FORMAT, (intptr_t) requesting_thread);
414 }
415 }
417 assert(!o->mark()->has_bias_pattern() ||
418 (attempt_rebias_of_object && (o->mark()->biased_locker() == requesting_thread)),
419 "bug in bulk bias revocation");
421 return status_code;
422 }
425 static void clean_up_cached_monitor_info() {
426 // Walk the thread list clearing out the cached monitors
427 for (JavaThread* thr = Threads::first(); thr != NULL; thr = thr->next()) {
428 thr->set_cached_monitor_info(NULL);
429 }
430 }
433 class VM_RevokeBias : public VM_Operation {
434 protected:
435 Handle* _obj;
436 GrowableArray<Handle>* _objs;
437 JavaThread* _requesting_thread;
438 BiasedLocking::Condition _status_code;
440 public:
441 VM_RevokeBias(Handle* obj, JavaThread* requesting_thread)
442 : _obj(obj)
443 , _objs(NULL)
444 , _requesting_thread(requesting_thread)
445 , _status_code(BiasedLocking::NOT_BIASED) {}
447 VM_RevokeBias(GrowableArray<Handle>* objs, JavaThread* requesting_thread)
448 : _obj(NULL)
449 , _objs(objs)
450 , _requesting_thread(requesting_thread)
451 , _status_code(BiasedLocking::NOT_BIASED) {}
453 virtual VMOp_Type type() const { return VMOp_RevokeBias; }
455 virtual bool doit_prologue() {
456 // Verify that there is actual work to do since the callers just
457 // give us locked object(s). If we don't find any biased objects
458 // there is nothing to do and we avoid a safepoint.
459 if (_obj != NULL) {
460 markOop mark = (*_obj)()->mark();
461 if (mark->has_bias_pattern()) {
462 return true;
463 }
464 } else {
465 for ( int i = 0 ; i < _objs->length(); i++ ) {
466 markOop mark = (_objs->at(i))()->mark();
467 if (mark->has_bias_pattern()) {
468 return true;
469 }
470 }
471 }
472 return false;
473 }
475 virtual void doit() {
476 if (_obj != NULL) {
477 if (TraceBiasedLocking) {
478 tty->print_cr("Revoking bias with potentially per-thread safepoint:");
479 }
480 _status_code = revoke_bias((*_obj)(), false, false, _requesting_thread);
481 clean_up_cached_monitor_info();
482 return;
483 } else {
484 if (TraceBiasedLocking) {
485 tty->print_cr("Revoking bias with global safepoint:");
486 }
487 BiasedLocking::revoke_at_safepoint(_objs);
488 }
489 }
491 BiasedLocking::Condition status_code() const {
492 return _status_code;
493 }
494 };
497 class VM_BulkRevokeBias : public VM_RevokeBias {
498 private:
499 bool _bulk_rebias;
500 bool _attempt_rebias_of_object;
502 public:
503 VM_BulkRevokeBias(Handle* obj, JavaThread* requesting_thread,
504 bool bulk_rebias,
505 bool attempt_rebias_of_object)
506 : VM_RevokeBias(obj, requesting_thread)
507 , _bulk_rebias(bulk_rebias)
508 , _attempt_rebias_of_object(attempt_rebias_of_object) {}
510 virtual VMOp_Type type() const { return VMOp_BulkRevokeBias; }
511 virtual bool doit_prologue() { return true; }
513 virtual void doit() {
514 _status_code = bulk_revoke_or_rebias_at_safepoint((*_obj)(), _bulk_rebias, _attempt_rebias_of_object, _requesting_thread);
515 clean_up_cached_monitor_info();
516 }
517 };
520 BiasedLocking::Condition BiasedLocking::revoke_and_rebias(Handle obj, bool attempt_rebias, TRAPS) {
521 assert(!SafepointSynchronize::is_at_safepoint(), "must not be called while at safepoint");
523 // We can revoke the biases of anonymously-biased objects
524 // efficiently enough that we should not cause these revocations to
525 // update the heuristics because doing so may cause unwanted bulk
526 // revocations (which are expensive) to occur.
527 markOop mark = obj->mark();
528 if (mark->is_biased_anonymously() && !attempt_rebias) {
529 // We are probably trying to revoke the bias of this object due to
530 // an identity hash code computation. Try to revoke the bias
531 // without a safepoint. This is possible if we can successfully
532 // compare-and-exchange an unbiased header into the mark word of
533 // the object, meaning that no other thread has raced to acquire
534 // the bias of the object.
535 markOop biased_value = mark;
536 markOop unbiased_prototype = markOopDesc::prototype()->set_age(mark->age());
537 markOop res_mark = (markOop) Atomic::cmpxchg_ptr(unbiased_prototype, obj->mark_addr(), mark);
538 if (res_mark == biased_value) {
539 return BIAS_REVOKED;
540 }
541 } else if (mark->has_bias_pattern()) {
542 Klass* k = Klass::cast(obj->klass());
543 markOop prototype_header = k->prototype_header();
544 if (!prototype_header->has_bias_pattern()) {
545 // This object has a stale bias from before the bulk revocation
546 // for this data type occurred. It's pointless to update the
547 // heuristics at this point so simply update the header with a
548 // CAS. If we fail this race, the object's bias has been revoked
549 // by another thread so we simply return and let the caller deal
550 // with it.
551 markOop biased_value = mark;
552 markOop res_mark = (markOop) Atomic::cmpxchg_ptr(prototype_header, obj->mark_addr(), mark);
553 assert(!(*(obj->mark_addr()))->has_bias_pattern(), "even if we raced, should still be revoked");
554 return BIAS_REVOKED;
555 } else if (prototype_header->bias_epoch() != mark->bias_epoch()) {
556 // The epoch of this biasing has expired indicating that the
557 // object is effectively unbiased. Depending on whether we need
558 // to rebias or revoke the bias of this object we can do it
559 // efficiently enough with a CAS that we shouldn't update the
560 // heuristics. This is normally done in the assembly code but we
561 // can reach this point due to various points in the runtime
562 // needing to revoke biases.
563 if (attempt_rebias) {
564 assert(THREAD->is_Java_thread(), "");
565 markOop biased_value = mark;
566 markOop rebiased_prototype = markOopDesc::encode((JavaThread*) THREAD, mark->age(), prototype_header->bias_epoch());
567 markOop res_mark = (markOop) Atomic::cmpxchg_ptr(rebiased_prototype, obj->mark_addr(), mark);
568 if (res_mark == biased_value) {
569 return BIAS_REVOKED_AND_REBIASED;
570 }
571 } else {
572 markOop biased_value = mark;
573 markOop unbiased_prototype = markOopDesc::prototype()->set_age(mark->age());
574 markOop res_mark = (markOop) Atomic::cmpxchg_ptr(unbiased_prototype, obj->mark_addr(), mark);
575 if (res_mark == biased_value) {
576 return BIAS_REVOKED;
577 }
578 }
579 }
580 }
582 HeuristicsResult heuristics = update_heuristics(obj(), attempt_rebias);
583 if (heuristics == HR_NOT_BIASED) {
584 return NOT_BIASED;
585 } else if (heuristics == HR_SINGLE_REVOKE) {
586 Klass *k = Klass::cast(obj->klass());
587 markOop prototype_header = k->prototype_header();
588 if (mark->biased_locker() == THREAD &&
589 prototype_header->bias_epoch() == mark->bias_epoch()) {
590 // A thread is trying to revoke the bias of an object biased
591 // toward it, again likely due to an identity hash code
592 // computation. We can again avoid a safepoint in this case
593 // since we are only going to walk our own stack. There are no
594 // races with revocations occurring in other threads because we
595 // reach no safepoints in the revocation path.
596 // Also check the epoch because even if threads match, another thread
597 // can come in with a CAS to steal the bias of an object that has a
598 // stale epoch.
599 ResourceMark rm;
600 if (TraceBiasedLocking) {
601 tty->print_cr("Revoking bias by walking my own stack:");
602 }
603 BiasedLocking::Condition cond = revoke_bias(obj(), false, false, (JavaThread*) THREAD);
604 ((JavaThread*) THREAD)->set_cached_monitor_info(NULL);
605 assert(cond == BIAS_REVOKED, "why not?");
606 return cond;
607 } else {
608 VM_RevokeBias revoke(&obj, (JavaThread*) THREAD);
609 VMThread::execute(&revoke);
610 return revoke.status_code();
611 }
612 }
614 assert((heuristics == HR_BULK_REVOKE) ||
615 (heuristics == HR_BULK_REBIAS), "?");
616 VM_BulkRevokeBias bulk_revoke(&obj, (JavaThread*) THREAD,
617 (heuristics == HR_BULK_REBIAS),
618 attempt_rebias);
619 VMThread::execute(&bulk_revoke);
620 return bulk_revoke.status_code();
621 }
624 void BiasedLocking::revoke(GrowableArray<Handle>* objs) {
625 assert(!SafepointSynchronize::is_at_safepoint(), "must not be called while at safepoint");
626 if (objs->length() == 0) {
627 return;
628 }
629 VM_RevokeBias revoke(objs, JavaThread::current());
630 VMThread::execute(&revoke);
631 }
634 void BiasedLocking::revoke_at_safepoint(Handle h_obj) {
635 assert(SafepointSynchronize::is_at_safepoint(), "must only be called while at safepoint");
636 oop obj = h_obj();
637 HeuristicsResult heuristics = update_heuristics(obj, false);
638 if (heuristics == HR_SINGLE_REVOKE) {
639 revoke_bias(obj, false, false, NULL);
640 } else if ((heuristics == HR_BULK_REBIAS) ||
641 (heuristics == HR_BULK_REVOKE)) {
642 bulk_revoke_or_rebias_at_safepoint(obj, (heuristics == HR_BULK_REBIAS), false, NULL);
643 }
644 clean_up_cached_monitor_info();
645 }
648 void BiasedLocking::revoke_at_safepoint(GrowableArray<Handle>* objs) {
649 assert(SafepointSynchronize::is_at_safepoint(), "must only be called while at safepoint");
650 int len = objs->length();
651 for (int i = 0; i < len; i++) {
652 oop obj = (objs->at(i))();
653 HeuristicsResult heuristics = update_heuristics(obj, false);
654 if (heuristics == HR_SINGLE_REVOKE) {
655 revoke_bias(obj, false, false, NULL);
656 } else if ((heuristics == HR_BULK_REBIAS) ||
657 (heuristics == HR_BULK_REVOKE)) {
658 bulk_revoke_or_rebias_at_safepoint(obj, (heuristics == HR_BULK_REBIAS), false, NULL);
659 }
660 }
661 clean_up_cached_monitor_info();
662 }
665 void BiasedLocking::preserve_marks() {
666 if (!UseBiasedLocking)
667 return;
669 assert(SafepointSynchronize::is_at_safepoint(), "must only be called while at safepoint");
671 assert(_preserved_oop_stack == NULL, "double initialization");
672 assert(_preserved_mark_stack == NULL, "double initialization");
674 // In order to reduce the number of mark words preserved during GC
675 // due to the presence of biased locking, we reinitialize most mark
676 // words to the class's prototype during GC -- even those which have
677 // a currently valid bias owner. One important situation where we
678 // must not clobber a bias is when a biased object is currently
679 // locked. To handle this case we iterate over the currently-locked
680 // monitors in a prepass and, if they are biased, preserve their
681 // mark words here. This should be a relatively small set of objects
682 // especially compared to the number of objects in the heap.
683 _preserved_mark_stack = new (ResourceObj::C_HEAP) GrowableArray<markOop>(10, true);
684 _preserved_oop_stack = new (ResourceObj::C_HEAP) GrowableArray<Handle>(10, true);
686 ResourceMark rm;
687 Thread* cur = Thread::current();
688 for (JavaThread* thread = Threads::first(); thread != NULL; thread = thread->next()) {
689 if (thread->has_last_Java_frame()) {
690 RegisterMap rm(thread);
691 for (javaVFrame* vf = thread->last_java_vframe(&rm); vf != NULL; vf = vf->java_sender()) {
692 GrowableArray<MonitorInfo*> *monitors = vf->monitors();
693 if (monitors != NULL) {
694 int len = monitors->length();
695 // Walk monitors youngest to oldest
696 for (int i = len - 1; i >= 0; i--) {
697 MonitorInfo* mon_info = monitors->at(i);
698 if (mon_info->owner_is_scalar_replaced()) continue;
699 oop owner = mon_info->owner();
700 if (owner != NULL) {
701 markOop mark = owner->mark();
702 if (mark->has_bias_pattern()) {
703 _preserved_oop_stack->push(Handle(cur, owner));
704 _preserved_mark_stack->push(mark);
705 }
706 }
707 }
708 }
709 }
710 }
711 }
712 }
715 void BiasedLocking::restore_marks() {
716 if (!UseBiasedLocking)
717 return;
719 assert(_preserved_oop_stack != NULL, "double free");
720 assert(_preserved_mark_stack != NULL, "double free");
722 int len = _preserved_oop_stack->length();
723 for (int i = 0; i < len; i++) {
724 Handle owner = _preserved_oop_stack->at(i);
725 markOop mark = _preserved_mark_stack->at(i);
726 owner->set_mark(mark);
727 }
729 delete _preserved_oop_stack;
730 _preserved_oop_stack = NULL;
731 delete _preserved_mark_stack;
732 _preserved_mark_stack = NULL;
733 }
736 int* BiasedLocking::total_entry_count_addr() { return _counters.total_entry_count_addr(); }
737 int* BiasedLocking::biased_lock_entry_count_addr() { return _counters.biased_lock_entry_count_addr(); }
738 int* BiasedLocking::anonymously_biased_lock_entry_count_addr() { return _counters.anonymously_biased_lock_entry_count_addr(); }
739 int* BiasedLocking::rebiased_lock_entry_count_addr() { return _counters.rebiased_lock_entry_count_addr(); }
740 int* BiasedLocking::revoked_lock_entry_count_addr() { return _counters.revoked_lock_entry_count_addr(); }
741 int* BiasedLocking::fast_path_entry_count_addr() { return _counters.fast_path_entry_count_addr(); }
742 int* BiasedLocking::slow_path_entry_count_addr() { return _counters.slow_path_entry_count_addr(); }
745 // BiasedLockingCounters
747 int BiasedLockingCounters::slow_path_entry_count() {
748 if (_slow_path_entry_count != 0) {
749 return _slow_path_entry_count;
750 }
751 int sum = _biased_lock_entry_count + _anonymously_biased_lock_entry_count +
752 _rebiased_lock_entry_count + _revoked_lock_entry_count +
753 _fast_path_entry_count;
755 return _total_entry_count - sum;
756 }
758 void BiasedLockingCounters::print_on(outputStream* st) {
759 tty->print_cr("# total entries: %d", _total_entry_count);
760 tty->print_cr("# biased lock entries: %d", _biased_lock_entry_count);
761 tty->print_cr("# anonymously biased lock entries: %d", _anonymously_biased_lock_entry_count);
762 tty->print_cr("# rebiased lock entries: %d", _rebiased_lock_entry_count);
763 tty->print_cr("# revoked lock entries: %d", _revoked_lock_entry_count);
764 tty->print_cr("# fast path lock entries: %d", _fast_path_entry_count);
765 tty->print_cr("# slow path lock entries: %d", slow_path_entry_count());
766 }