Thu, 12 Jan 2012 14:45:04 -0800
7128355: assert(!nocreate) failed: Cannot build a phi for a block already parsed
Summary: Do not common BoxLock nodes and avoid creating phis of boxes.
Reviewed-by: never
1 /*
2 * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "opto/locknode.hpp"
27 #include "opto/parse.hpp"
28 #include "opto/rootnode.hpp"
29 #include "opto/runtime.hpp"
31 //=============================================================================
32 const RegMask &BoxLockNode::in_RegMask(uint i) const {
33 return _inmask;
34 }
36 const RegMask &BoxLockNode::out_RegMask() const {
37 return *Matcher::idealreg2regmask[Op_RegP];
38 }
40 uint BoxLockNode::size_of() const { return sizeof(*this); }
42 BoxLockNode::BoxLockNode( int slot ) : Node( Compile::current()->root() ),
43 _slot(slot), _is_eliminated(false) {
44 init_class_id(Class_BoxLock);
45 init_flags(Flag_rematerialize);
46 OptoReg::Name reg = OptoReg::stack2reg(_slot);
47 _inmask.Insert(reg);
48 }
50 //-----------------------------hash--------------------------------------------
51 uint BoxLockNode::hash() const {
52 if (EliminateNestedLocks)
53 return NO_HASH; // Each locked region has own BoxLock node
54 return Node::hash() + _slot + (_is_eliminated ? Compile::current()->fixed_slots() : 0);
55 }
57 //------------------------------cmp--------------------------------------------
58 uint BoxLockNode::cmp( const Node &n ) const {
59 if (EliminateNestedLocks)
60 return (&n == this); // Always fail except on self
61 const BoxLockNode &bn = (const BoxLockNode &)n;
62 return bn._slot == _slot && bn._is_eliminated == _is_eliminated;
63 }
65 BoxLockNode* BoxLockNode::box_node(Node* box) {
66 // Chase down the BoxNode after RA which may spill box nodes.
67 while (!box->is_BoxLock()) {
68 // if (box_node->is_SpillCopy()) {
69 // Node *m = box_node->in(1);
70 // if (m->is_Mach() && m->as_Mach()->ideal_Opcode() == Op_StoreP) {
71 // box_node = m->in(m->as_Mach()->operand_index(2));
72 // continue;
73 // }
74 // }
75 assert(box->is_SpillCopy() || box->is_Phi(), "Bad spill of Lock.");
76 // Only BoxLock nodes with the same stack slot are merged.
77 // So it is enough to trace one path to find the slot value.
78 box = box->in(1);
79 }
80 return box->as_BoxLock();
81 }
83 OptoReg::Name BoxLockNode::reg(Node* box) {
84 return box_node(box)->in_RegMask(0).find_first_elem();
85 }
87 // Is BoxLock node used for one simple lock region (same box and obj)?
88 bool BoxLockNode::is_simple_lock_region(LockNode** unique_lock, Node* obj) {
89 LockNode* lock = NULL;
90 bool has_one_lock = false;
91 for (uint i = 0; i < this->outcnt(); i++) {
92 Node* n = this->raw_out(i);
93 assert(!n->is_Phi(), "should not merge BoxLock nodes");
94 if (n->is_AbstractLock()) {
95 AbstractLockNode* alock = n->as_AbstractLock();
96 // Check lock's box since box could be referenced by Lock's debug info.
97 if (alock->box_node() == this) {
98 if (alock->obj_node()->eqv_uncast(obj)) {
99 if ((unique_lock != NULL) && alock->is_Lock()) {
100 if (lock == NULL) {
101 lock = alock->as_Lock();
102 has_one_lock = true;
103 } else if (lock != alock->as_Lock()) {
104 has_one_lock = false;
105 }
106 }
107 } else {
108 return false; // Different objects
109 }
110 }
111 }
112 }
113 #ifdef ASSERT
114 // Verify that FastLock and Safepoint reference only this lock region.
115 for (uint i = 0; i < this->outcnt(); i++) {
116 Node* n = this->raw_out(i);
117 if (n->is_FastLock()) {
118 FastLockNode* flock = n->as_FastLock();
119 assert((flock->box_node() == this) && flock->obj_node()->eqv_uncast(obj),"");
120 }
121 if (n->is_SafePoint() && n->as_SafePoint()->jvms()) {
122 SafePointNode* sfn = n->as_SafePoint();
123 JVMState* youngest_jvms = sfn->jvms();
124 int max_depth = youngest_jvms->depth();
125 for (int depth = 1; depth <= max_depth; depth++) {
126 JVMState* jvms = youngest_jvms->of_depth(depth);
127 int num_mon = jvms->nof_monitors();
128 // Loop over monitors
129 for (int idx = 0; idx < num_mon; idx++) {
130 Node* obj_node = sfn->monitor_obj(jvms, idx);
131 Node* box_node = sfn->monitor_box(jvms, idx);
132 if (box_node == this) {
133 if (!obj_node->eqv_uncast(obj)) {
134 tty->cr();
135 tty->print_cr("=====monitor info has different obj=====");
136 tty->print_cr("obj:");
137 obj->dump(1); tty->cr();
138 tty->print_cr("obj uncast:");
139 obj->uncast()->dump(); tty->cr();
140 tty->print_cr("obj_node:");
141 obj_node->dump(1); tty->cr();
142 tty->print_cr("obj_node uncast:");
143 obj_node->uncast()->dump();
144 }
145 assert(obj_node->eqv_uncast(obj),"monitor info has different obj");
146 }
147 }
148 }
149 }
150 }
151 #endif
152 if (unique_lock != NULL && has_one_lock) {
153 *unique_lock = lock;
154 }
155 return true;
156 }
158 //=============================================================================
159 //-----------------------------hash--------------------------------------------
160 uint FastLockNode::hash() const { return NO_HASH; }
162 //------------------------------cmp--------------------------------------------
163 uint FastLockNode::cmp( const Node &n ) const {
164 return (&n == this); // Always fail except on self
165 }
167 //=============================================================================
168 //-----------------------------hash--------------------------------------------
169 uint FastUnlockNode::hash() const { return NO_HASH; }
171 //------------------------------cmp--------------------------------------------
172 uint FastUnlockNode::cmp( const Node &n ) const {
173 return (&n == this); // Always fail except on self
174 }
176 //
177 // Create a counter which counts the number of times this lock is acquired
178 //
179 void FastLockNode::create_lock_counter(JVMState* state) {
180 BiasedLockingNamedCounter* blnc = (BiasedLockingNamedCounter*)
181 OptoRuntime::new_named_counter(state, NamedCounter::BiasedLockingCounter);
182 _counters = blnc->counters();
183 }
185 //=============================================================================
186 //------------------------------do_monitor_enter-------------------------------
187 void Parse::do_monitor_enter() {
188 kill_dead_locals();
190 // Null check; get casted pointer.
191 Node *obj = do_null_check(peek(), T_OBJECT);
192 // Check for locking null object
193 if (stopped()) return;
195 // the monitor object is not part of debug info expression stack
196 pop();
198 // Insert a FastLockNode which takes as arguments the current thread pointer,
199 // the obj pointer & the address of the stack slot pair used for the lock.
200 shared_lock(obj);
201 }
203 //------------------------------do_monitor_exit--------------------------------
204 void Parse::do_monitor_exit() {
205 kill_dead_locals();
207 pop(); // Pop oop to unlock
208 // Because monitors are guaranteed paired (else we bail out), we know
209 // the matching Lock for this Unlock. Hence we know there is no need
210 // for a null check on Unlock.
211 shared_unlock(map()->peek_monitor_box(), map()->peek_monitor_obj());
212 }