src/share/vm/opto/locknode.cpp

Tue, 10 Jan 2012 18:05:38 -0800

author
kvn
date
Tue, 10 Jan 2012 18:05:38 -0800
changeset 3407
35acf8f0a2e4
parent 3406
e9a5e0a812c8
child 3419
b0ff910edfc9
permissions
-rw-r--r--

7128352: assert(obj_node == obj) failed
Summary: Compare uncasted object nodes.
Reviewed-by: never

duke@435 1 /*
stefank@2314 2 * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
duke@435 22 *
duke@435 23 */
duke@435 24
stefank@2314 25 #include "precompiled.hpp"
stefank@2314 26 #include "opto/locknode.hpp"
stefank@2314 27 #include "opto/parse.hpp"
stefank@2314 28 #include "opto/rootnode.hpp"
stefank@2314 29 #include "opto/runtime.hpp"
duke@435 30
duke@435 31 //=============================================================================
duke@435 32 const RegMask &BoxLockNode::in_RegMask(uint i) const {
duke@435 33 return _inmask;
duke@435 34 }
duke@435 35
duke@435 36 const RegMask &BoxLockNode::out_RegMask() const {
duke@435 37 return *Matcher::idealreg2regmask[Op_RegP];
duke@435 38 }
duke@435 39
duke@435 40 uint BoxLockNode::size_of() const { return sizeof(*this); }
duke@435 41
kvn@501 42 BoxLockNode::BoxLockNode( int slot ) : Node( Compile::current()->root() ),
kvn@501 43 _slot(slot), _is_eliminated(false) {
duke@435 44 init_class_id(Class_BoxLock);
duke@435 45 init_flags(Flag_rematerialize);
duke@435 46 OptoReg::Name reg = OptoReg::stack2reg(_slot);
duke@435 47 _inmask.Insert(reg);
duke@435 48 }
duke@435 49
kvn@895 50 //-----------------------------hash--------------------------------------------
kvn@895 51 uint BoxLockNode::hash() const {
kvn@3406 52 if (EliminateNestedLocks)
kvn@3406 53 return NO_HASH; // Each locked region has own BoxLock node
kvn@895 54 return Node::hash() + _slot + (_is_eliminated ? Compile::current()->fixed_slots() : 0);
kvn@895 55 }
kvn@895 56
duke@435 57 //------------------------------cmp--------------------------------------------
duke@435 58 uint BoxLockNode::cmp( const Node &n ) const {
kvn@3406 59 if (EliminateNestedLocks)
kvn@3406 60 return (&n == this); // Always fail except on self
duke@435 61 const BoxLockNode &bn = (const BoxLockNode &)n;
kvn@895 62 return bn._slot == _slot && bn._is_eliminated == _is_eliminated;
duke@435 63 }
duke@435 64
kvn@3406 65 BoxLockNode* BoxLockNode::box_node(Node* box) {
duke@435 66 // Chase down the BoxNode
kvn@3406 67 while (!box->is_BoxLock()) {
duke@435 68 // if (box_node->is_SpillCopy()) {
duke@435 69 // Node *m = box_node->in(1);
duke@435 70 // if (m->is_Mach() && m->as_Mach()->ideal_Opcode() == Op_StoreP) {
duke@435 71 // box_node = m->in(m->as_Mach()->operand_index(2));
duke@435 72 // continue;
duke@435 73 // }
duke@435 74 // }
kvn@3406 75 assert(box->is_SpillCopy() || box->is_Phi(), "Bad spill of Lock.");
kvn@3406 76 // Only BoxLock nodes with the same stack slot are merged.
kvn@3406 77 // So it is enough to trace one path to find the slot value.
kvn@3406 78 box = box->in(1);
duke@435 79 }
kvn@3406 80 return box->as_BoxLock();
kvn@3406 81 }
kvn@3406 82
kvn@3406 83 OptoReg::Name BoxLockNode::reg(Node* box) {
kvn@3406 84 return box_node(box)->in_RegMask(0).find_first_elem();
kvn@3406 85 }
kvn@3406 86
kvn@3406 87 bool BoxLockNode::same_slot(Node* box1, Node* box2) {
kvn@3406 88 return box_node(box1)->_slot == box_node(box2)->_slot;
kvn@3406 89 }
kvn@3406 90
kvn@3406 91 // Is BoxLock node used for one simple lock region (same box and obj)?
kvn@3406 92 bool BoxLockNode::is_simple_lock_region(LockNode** unique_lock, Node* obj) {
kvn@3406 93 LockNode* lock = NULL;
kvn@3406 94 bool has_one_lock = false;
kvn@3406 95 for (uint i = 0; i < this->outcnt(); i++) {
kvn@3406 96 Node* n = this->raw_out(i);
kvn@3406 97 if (n->is_Phi())
kvn@3406 98 return false; // Merged regions
kvn@3406 99 if (n->is_AbstractLock()) {
kvn@3406 100 AbstractLockNode* alock = n->as_AbstractLock();
kvn@3406 101 // Check lock's box since box could be referenced by Lock's debug info.
kvn@3406 102 if (alock->box_node() == this) {
kvn@3407 103 if (alock->obj_node()->eqv_uncast(obj)) {
kvn@3406 104 if ((unique_lock != NULL) && alock->is_Lock()) {
kvn@3406 105 if (lock == NULL) {
kvn@3406 106 lock = alock->as_Lock();
kvn@3406 107 has_one_lock = true;
kvn@3406 108 } else if (lock != alock->as_Lock()) {
kvn@3406 109 has_one_lock = false;
kvn@3406 110 }
kvn@3406 111 }
kvn@3406 112 } else {
kvn@3406 113 return false; // Different objects
kvn@3406 114 }
kvn@3406 115 }
kvn@3406 116 }
kvn@3406 117 }
kvn@3406 118 #ifdef ASSERT
kvn@3406 119 // Verify that FastLock and Safepoint reference only this lock region.
kvn@3406 120 for (uint i = 0; i < this->outcnt(); i++) {
kvn@3406 121 Node* n = this->raw_out(i);
kvn@3406 122 if (n->is_FastLock()) {
kvn@3406 123 FastLockNode* flock = n->as_FastLock();
kvn@3407 124 assert((flock->box_node() == this) && flock->obj_node()->eqv_uncast(obj),"");
kvn@3406 125 }
kvn@3406 126 if (n->is_SafePoint() && n->as_SafePoint()->jvms()) {
kvn@3406 127 SafePointNode* sfn = n->as_SafePoint();
kvn@3406 128 JVMState* youngest_jvms = sfn->jvms();
kvn@3406 129 int max_depth = youngest_jvms->depth();
kvn@3406 130 for (int depth = 1; depth <= max_depth; depth++) {
kvn@3406 131 JVMState* jvms = youngest_jvms->of_depth(depth);
kvn@3406 132 int num_mon = jvms->nof_monitors();
kvn@3406 133 // Loop over monitors
kvn@3406 134 for (int idx = 0; idx < num_mon; idx++) {
kvn@3406 135 Node* obj_node = sfn->monitor_obj(jvms, idx);
kvn@3406 136 Node* box_node = sfn->monitor_box(jvms, idx);
kvn@3406 137 if (box_node == this) {
kvn@3407 138 assert(obj_node->eqv_uncast(obj),"");
kvn@3406 139 }
kvn@3406 140 }
kvn@3406 141 }
kvn@3406 142 }
kvn@3406 143 }
kvn@3406 144 #endif
kvn@3406 145 if (unique_lock != NULL && has_one_lock) {
kvn@3406 146 *unique_lock = lock;
kvn@3406 147 }
kvn@3406 148 return true;
duke@435 149 }
duke@435 150
duke@435 151 //=============================================================================
duke@435 152 //-----------------------------hash--------------------------------------------
duke@435 153 uint FastLockNode::hash() const { return NO_HASH; }
duke@435 154
duke@435 155 //------------------------------cmp--------------------------------------------
duke@435 156 uint FastLockNode::cmp( const Node &n ) const {
duke@435 157 return (&n == this); // Always fail except on self
duke@435 158 }
duke@435 159
duke@435 160 //=============================================================================
duke@435 161 //-----------------------------hash--------------------------------------------
duke@435 162 uint FastUnlockNode::hash() const { return NO_HASH; }
duke@435 163
duke@435 164 //------------------------------cmp--------------------------------------------
duke@435 165 uint FastUnlockNode::cmp( const Node &n ) const {
duke@435 166 return (&n == this); // Always fail except on self
duke@435 167 }
duke@435 168
duke@435 169 //
duke@435 170 // Create a counter which counts the number of times this lock is acquired
duke@435 171 //
duke@435 172 void FastLockNode::create_lock_counter(JVMState* state) {
duke@435 173 BiasedLockingNamedCounter* blnc = (BiasedLockingNamedCounter*)
duke@435 174 OptoRuntime::new_named_counter(state, NamedCounter::BiasedLockingCounter);
duke@435 175 _counters = blnc->counters();
duke@435 176 }
duke@435 177
duke@435 178 //=============================================================================
duke@435 179 //------------------------------do_monitor_enter-------------------------------
duke@435 180 void Parse::do_monitor_enter() {
duke@435 181 kill_dead_locals();
duke@435 182
duke@435 183 // Null check; get casted pointer.
duke@435 184 Node *obj = do_null_check(peek(), T_OBJECT);
duke@435 185 // Check for locking null object
duke@435 186 if (stopped()) return;
duke@435 187
duke@435 188 // the monitor object is not part of debug info expression stack
duke@435 189 pop();
duke@435 190
duke@435 191 // Insert a FastLockNode which takes as arguments the current thread pointer,
duke@435 192 // the obj pointer & the address of the stack slot pair used for the lock.
duke@435 193 shared_lock(obj);
duke@435 194 }
duke@435 195
duke@435 196 //------------------------------do_monitor_exit--------------------------------
duke@435 197 void Parse::do_monitor_exit() {
duke@435 198 kill_dead_locals();
duke@435 199
duke@435 200 pop(); // Pop oop to unlock
twisti@1040 201 // Because monitors are guaranteed paired (else we bail out), we know
duke@435 202 // the matching Lock for this Unlock. Hence we know there is no need
duke@435 203 // for a null check on Unlock.
duke@435 204 shared_unlock(map()->peek_monitor_box(), map()->peek_monitor_obj());
duke@435 205 }

mercurial