duke@435: /* mikael@4153: * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved. duke@435: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. duke@435: * duke@435: * This code is free software; you can redistribute it and/or modify it duke@435: * under the terms of the GNU General Public License version 2 only, as duke@435: * published by the Free Software Foundation. duke@435: * duke@435: * This code is distributed in the hope that it will be useful, but WITHOUT duke@435: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or duke@435: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License duke@435: * version 2 for more details (a copy is included in the LICENSE file that duke@435: * accompanied this code). duke@435: * duke@435: * You should have received a copy of the GNU General Public License version duke@435: * 2 along with this work; if not, write to the Free Software Foundation, duke@435: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. duke@435: * trims@1907: * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA trims@1907: * or visit www.oracle.com if you need additional information or have any trims@1907: * questions. duke@435: * duke@435: */ duke@435: stefank@2314: #include "precompiled.hpp" stefank@2314: #include "opto/locknode.hpp" stefank@2314: #include "opto/parse.hpp" stefank@2314: #include "opto/rootnode.hpp" stefank@2314: #include "opto/runtime.hpp" duke@435: duke@435: //============================================================================= duke@435: const RegMask &BoxLockNode::in_RegMask(uint i) const { duke@435: return _inmask; duke@435: } duke@435: duke@435: const RegMask &BoxLockNode::out_RegMask() const { duke@435: return *Matcher::idealreg2regmask[Op_RegP]; duke@435: } duke@435: duke@435: uint BoxLockNode::size_of() const { return sizeof(*this); } duke@435: kvn@501: BoxLockNode::BoxLockNode( int slot ) : Node( Compile::current()->root() ), kvn@501: _slot(slot), _is_eliminated(false) { duke@435: init_class_id(Class_BoxLock); duke@435: init_flags(Flag_rematerialize); duke@435: OptoReg::Name reg = OptoReg::stack2reg(_slot); duke@435: _inmask.Insert(reg); duke@435: } duke@435: kvn@895: //-----------------------------hash-------------------------------------------- kvn@895: uint BoxLockNode::hash() const { kvn@3406: if (EliminateNestedLocks) kvn@3406: return NO_HASH; // Each locked region has own BoxLock node kvn@895: return Node::hash() + _slot + (_is_eliminated ? Compile::current()->fixed_slots() : 0); kvn@895: } kvn@895: duke@435: //------------------------------cmp-------------------------------------------- duke@435: uint BoxLockNode::cmp( const Node &n ) const { kvn@3406: if (EliminateNestedLocks) kvn@3406: return (&n == this); // Always fail except on self duke@435: const BoxLockNode &bn = (const BoxLockNode &)n; kvn@895: return bn._slot == _slot && bn._is_eliminated == _is_eliminated; duke@435: } duke@435: kvn@3406: BoxLockNode* BoxLockNode::box_node(Node* box) { kvn@3419: // Chase down the BoxNode after RA which may spill box nodes. kvn@3406: while (!box->is_BoxLock()) { duke@435: // if (box_node->is_SpillCopy()) { duke@435: // Node *m = box_node->in(1); duke@435: // if (m->is_Mach() && m->as_Mach()->ideal_Opcode() == Op_StoreP) { duke@435: // box_node = m->in(m->as_Mach()->operand_index(2)); duke@435: // continue; duke@435: // } duke@435: // } kvn@3406: assert(box->is_SpillCopy() || box->is_Phi(), "Bad spill of Lock."); kvn@3406: // Only BoxLock nodes with the same stack slot are merged. kvn@3406: // So it is enough to trace one path to find the slot value. kvn@3406: box = box->in(1); duke@435: } kvn@3406: return box->as_BoxLock(); kvn@3406: } kvn@3406: kvn@3406: OptoReg::Name BoxLockNode::reg(Node* box) { kvn@3406: return box_node(box)->in_RegMask(0).find_first_elem(); kvn@3406: } kvn@3406: kvn@3406: // Is BoxLock node used for one simple lock region (same box and obj)? kvn@3406: bool BoxLockNode::is_simple_lock_region(LockNode** unique_lock, Node* obj) { kvn@3406: LockNode* lock = NULL; kvn@3406: bool has_one_lock = false; kvn@3406: for (uint i = 0; i < this->outcnt(); i++) { kvn@3406: Node* n = this->raw_out(i); kvn@3419: assert(!n->is_Phi(), "should not merge BoxLock nodes"); kvn@3406: if (n->is_AbstractLock()) { kvn@3406: AbstractLockNode* alock = n->as_AbstractLock(); kvn@3406: // Check lock's box since box could be referenced by Lock's debug info. kvn@3406: if (alock->box_node() == this) { kvn@3407: if (alock->obj_node()->eqv_uncast(obj)) { kvn@3406: if ((unique_lock != NULL) && alock->is_Lock()) { kvn@3406: if (lock == NULL) { kvn@3406: lock = alock->as_Lock(); kvn@3406: has_one_lock = true; kvn@3406: } else if (lock != alock->as_Lock()) { kvn@3406: has_one_lock = false; kvn@3406: } kvn@3406: } kvn@3406: } else { kvn@3406: return false; // Different objects kvn@3406: } kvn@3406: } kvn@3406: } kvn@3406: } kvn@3406: #ifdef ASSERT kvn@3406: // Verify that FastLock and Safepoint reference only this lock region. kvn@3406: for (uint i = 0; i < this->outcnt(); i++) { kvn@3406: Node* n = this->raw_out(i); kvn@3406: if (n->is_FastLock()) { kvn@3406: FastLockNode* flock = n->as_FastLock(); kvn@3407: assert((flock->box_node() == this) && flock->obj_node()->eqv_uncast(obj),""); kvn@3406: } kvn@3421: // Don't check monitor info in safepoints since the referenced object could kvn@3421: // be different from the locked object. It could be Phi node of different kvn@3421: // cast nodes which point to this locked object. kvn@3421: // We assume that no other objects could be referenced in monitor info kvn@3421: // associated with this BoxLock node because all associated locks and kvn@3421: // unlocks are reference only this one object. kvn@3406: } kvn@3406: #endif kvn@3406: if (unique_lock != NULL && has_one_lock) { kvn@3406: *unique_lock = lock; kvn@3406: } kvn@3406: return true; duke@435: } duke@435: duke@435: //============================================================================= duke@435: //-----------------------------hash-------------------------------------------- duke@435: uint FastLockNode::hash() const { return NO_HASH; } duke@435: duke@435: //------------------------------cmp-------------------------------------------- duke@435: uint FastLockNode::cmp( const Node &n ) const { duke@435: return (&n == this); // Always fail except on self duke@435: } duke@435: duke@435: //============================================================================= duke@435: //-----------------------------hash-------------------------------------------- duke@435: uint FastUnlockNode::hash() const { return NO_HASH; } duke@435: duke@435: //------------------------------cmp-------------------------------------------- duke@435: uint FastUnlockNode::cmp( const Node &n ) const { duke@435: return (&n == this); // Always fail except on self duke@435: } duke@435: duke@435: // duke@435: // Create a counter which counts the number of times this lock is acquired duke@435: // duke@435: void FastLockNode::create_lock_counter(JVMState* state) { duke@435: BiasedLockingNamedCounter* blnc = (BiasedLockingNamedCounter*) duke@435: OptoRuntime::new_named_counter(state, NamedCounter::BiasedLockingCounter); duke@435: _counters = blnc->counters(); duke@435: } duke@435: duke@435: //============================================================================= duke@435: //------------------------------do_monitor_enter------------------------------- duke@435: void Parse::do_monitor_enter() { duke@435: kill_dead_locals(); duke@435: duke@435: // Null check; get casted pointer. twisti@4313: Node* obj = null_check(peek()); duke@435: // Check for locking null object duke@435: if (stopped()) return; duke@435: duke@435: // the monitor object is not part of debug info expression stack duke@435: pop(); duke@435: duke@435: // Insert a FastLockNode which takes as arguments the current thread pointer, duke@435: // the obj pointer & the address of the stack slot pair used for the lock. duke@435: shared_lock(obj); duke@435: } duke@435: duke@435: //------------------------------do_monitor_exit-------------------------------- duke@435: void Parse::do_monitor_exit() { duke@435: kill_dead_locals(); duke@435: duke@435: pop(); // Pop oop to unlock twisti@1040: // Because monitors are guaranteed paired (else we bail out), we know duke@435: // the matching Lock for this Unlock. Hence we know there is no need duke@435: // for a null check on Unlock. duke@435: shared_unlock(map()->peek_monitor_box(), map()->peek_monitor_obj()); duke@435: }