src/share/vm/opto/locknode.cpp

Thu, 24 May 2018 19:26:50 +0800

author
aoqi
date
Thu, 24 May 2018 19:26:50 +0800
changeset 8862
fd13a567f179
parent 6876
710a3c8b516e
permissions
-rw-r--r--

#7046 C2 supports long branch
Contributed-by: fujie

aoqi@0 1 /*
aoqi@0 2 * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
aoqi@0 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
aoqi@0 4 *
aoqi@0 5 * This code is free software; you can redistribute it and/or modify it
aoqi@0 6 * under the terms of the GNU General Public License version 2 only, as
aoqi@0 7 * published by the Free Software Foundation.
aoqi@0 8 *
aoqi@0 9 * This code is distributed in the hope that it will be useful, but WITHOUT
aoqi@0 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
aoqi@0 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
aoqi@0 12 * version 2 for more details (a copy is included in the LICENSE file that
aoqi@0 13 * accompanied this code).
aoqi@0 14 *
aoqi@0 15 * You should have received a copy of the GNU General Public License version
aoqi@0 16 * 2 along with this work; if not, write to the Free Software Foundation,
aoqi@0 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
aoqi@0 18 *
aoqi@0 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
aoqi@0 20 * or visit www.oracle.com if you need additional information or have any
aoqi@0 21 * questions.
aoqi@0 22 *
aoqi@0 23 */
aoqi@0 24
aoqi@0 25 #include "precompiled.hpp"
aoqi@0 26 #include "opto/locknode.hpp"
aoqi@0 27 #include "opto/parse.hpp"
aoqi@0 28 #include "opto/rootnode.hpp"
aoqi@0 29 #include "opto/runtime.hpp"
aoqi@0 30
aoqi@0 31 //=============================================================================
aoqi@0 32 const RegMask &BoxLockNode::in_RegMask(uint i) const {
aoqi@0 33 return _inmask;
aoqi@0 34 }
aoqi@0 35
aoqi@0 36 const RegMask &BoxLockNode::out_RegMask() const {
aoqi@0 37 return *Matcher::idealreg2regmask[Op_RegP];
aoqi@0 38 }
aoqi@0 39
aoqi@0 40 uint BoxLockNode::size_of() const { return sizeof(*this); }
aoqi@0 41
aoqi@0 42 BoxLockNode::BoxLockNode( int slot ) : Node( Compile::current()->root() ),
aoqi@0 43 _slot(slot), _is_eliminated(false) {
aoqi@0 44 init_class_id(Class_BoxLock);
aoqi@0 45 init_flags(Flag_rematerialize);
aoqi@0 46 OptoReg::Name reg = OptoReg::stack2reg(_slot);
aoqi@0 47 _inmask.Insert(reg);
aoqi@0 48 }
aoqi@0 49
aoqi@0 50 //-----------------------------hash--------------------------------------------
aoqi@0 51 uint BoxLockNode::hash() const {
aoqi@0 52 if (EliminateNestedLocks)
aoqi@0 53 return NO_HASH; // Each locked region has own BoxLock node
aoqi@0 54 return Node::hash() + _slot + (_is_eliminated ? Compile::current()->fixed_slots() : 0);
aoqi@0 55 }
aoqi@0 56
aoqi@0 57 //------------------------------cmp--------------------------------------------
aoqi@0 58 uint BoxLockNode::cmp( const Node &n ) const {
aoqi@0 59 if (EliminateNestedLocks)
aoqi@0 60 return (&n == this); // Always fail except on self
aoqi@0 61 const BoxLockNode &bn = (const BoxLockNode &)n;
aoqi@0 62 return bn._slot == _slot && bn._is_eliminated == _is_eliminated;
aoqi@0 63 }
aoqi@0 64
aoqi@0 65 BoxLockNode* BoxLockNode::box_node(Node* box) {
aoqi@0 66 // Chase down the BoxNode after RA which may spill box nodes.
aoqi@0 67 while (!box->is_BoxLock()) {
aoqi@0 68 // if (box_node->is_SpillCopy()) {
aoqi@0 69 // Node *m = box_node->in(1);
aoqi@0 70 // if (m->is_Mach() && m->as_Mach()->ideal_Opcode() == Op_StoreP) {
aoqi@0 71 // box_node = m->in(m->as_Mach()->operand_index(2));
aoqi@0 72 // continue;
aoqi@0 73 // }
aoqi@0 74 // }
aoqi@0 75 assert(box->is_SpillCopy() || box->is_Phi(), "Bad spill of Lock.");
aoqi@0 76 // Only BoxLock nodes with the same stack slot are merged.
aoqi@0 77 // So it is enough to trace one path to find the slot value.
aoqi@0 78 box = box->in(1);
aoqi@0 79 }
aoqi@0 80 return box->as_BoxLock();
aoqi@0 81 }
aoqi@0 82
aoqi@0 83 OptoReg::Name BoxLockNode::reg(Node* box) {
aoqi@0 84 return box_node(box)->in_RegMask(0).find_first_elem();
aoqi@0 85 }
aoqi@0 86
aoqi@0 87 // Is BoxLock node used for one simple lock region (same box and obj)?
aoqi@0 88 bool BoxLockNode::is_simple_lock_region(LockNode** unique_lock, Node* obj) {
aoqi@0 89 LockNode* lock = NULL;
aoqi@0 90 bool has_one_lock = false;
aoqi@0 91 for (uint i = 0; i < this->outcnt(); i++) {
aoqi@0 92 Node* n = this->raw_out(i);
aoqi@0 93 assert(!n->is_Phi(), "should not merge BoxLock nodes");
aoqi@0 94 if (n->is_AbstractLock()) {
aoqi@0 95 AbstractLockNode* alock = n->as_AbstractLock();
aoqi@0 96 // Check lock's box since box could be referenced by Lock's debug info.
aoqi@0 97 if (alock->box_node() == this) {
aoqi@0 98 if (alock->obj_node()->eqv_uncast(obj)) {
aoqi@0 99 if ((unique_lock != NULL) && alock->is_Lock()) {
aoqi@0 100 if (lock == NULL) {
aoqi@0 101 lock = alock->as_Lock();
aoqi@0 102 has_one_lock = true;
aoqi@0 103 } else if (lock != alock->as_Lock()) {
aoqi@0 104 has_one_lock = false;
aoqi@0 105 }
aoqi@0 106 }
aoqi@0 107 } else {
aoqi@0 108 return false; // Different objects
aoqi@0 109 }
aoqi@0 110 }
aoqi@0 111 }
aoqi@0 112 }
aoqi@0 113 #ifdef ASSERT
aoqi@0 114 // Verify that FastLock and Safepoint reference only this lock region.
aoqi@0 115 for (uint i = 0; i < this->outcnt(); i++) {
aoqi@0 116 Node* n = this->raw_out(i);
aoqi@0 117 if (n->is_FastLock()) {
aoqi@0 118 FastLockNode* flock = n->as_FastLock();
aoqi@0 119 assert((flock->box_node() == this) && flock->obj_node()->eqv_uncast(obj),"");
aoqi@0 120 }
aoqi@0 121 // Don't check monitor info in safepoints since the referenced object could
aoqi@0 122 // be different from the locked object. It could be Phi node of different
aoqi@0 123 // cast nodes which point to this locked object.
aoqi@0 124 // We assume that no other objects could be referenced in monitor info
aoqi@0 125 // associated with this BoxLock node because all associated locks and
aoqi@0 126 // unlocks are reference only this one object.
aoqi@0 127 }
aoqi@0 128 #endif
aoqi@0 129 if (unique_lock != NULL && has_one_lock) {
aoqi@0 130 *unique_lock = lock;
aoqi@0 131 }
aoqi@0 132 return true;
aoqi@0 133 }
aoqi@0 134
aoqi@0 135 //=============================================================================
aoqi@0 136 //-----------------------------hash--------------------------------------------
aoqi@0 137 uint FastLockNode::hash() const { return NO_HASH; }
aoqi@0 138
aoqi@0 139 uint FastLockNode::size_of() const { return sizeof(*this); }
aoqi@0 140
aoqi@0 141 //------------------------------cmp--------------------------------------------
aoqi@0 142 uint FastLockNode::cmp( const Node &n ) const {
aoqi@0 143 return (&n == this); // Always fail except on self
aoqi@0 144 }
aoqi@0 145
aoqi@0 146 //=============================================================================
aoqi@0 147 //-----------------------------hash--------------------------------------------
aoqi@0 148 uint FastUnlockNode::hash() const { return NO_HASH; }
aoqi@0 149
aoqi@0 150 //------------------------------cmp--------------------------------------------
aoqi@0 151 uint FastUnlockNode::cmp( const Node &n ) const {
aoqi@0 152 return (&n == this); // Always fail except on self
aoqi@0 153 }
aoqi@0 154
aoqi@0 155 //
aoqi@0 156 // Create a counter which counts the number of times this lock is acquired
aoqi@0 157 //
aoqi@0 158 void FastLockNode::create_lock_counter(JVMState* state) {
aoqi@0 159 BiasedLockingNamedCounter* blnc = (BiasedLockingNamedCounter*)
aoqi@0 160 OptoRuntime::new_named_counter(state, NamedCounter::BiasedLockingCounter);
aoqi@0 161 _counters = blnc->counters();
aoqi@0 162 }
aoqi@0 163
aoqi@0 164 void FastLockNode::create_rtm_lock_counter(JVMState* state) {
aoqi@0 165 #if INCLUDE_RTM_OPT
aoqi@0 166 Compile* C = Compile::current();
aoqi@0 167 if (C->profile_rtm() || (PrintPreciseRTMLockingStatistics && C->use_rtm())) {
aoqi@0 168 RTMLockingNamedCounter* rlnc = (RTMLockingNamedCounter*)
aoqi@0 169 OptoRuntime::new_named_counter(state, NamedCounter::RTMLockingCounter);
aoqi@0 170 _rtm_counters = rlnc->counters();
aoqi@0 171 if (UseRTMForStackLocks) {
aoqi@0 172 rlnc = (RTMLockingNamedCounter*)
aoqi@0 173 OptoRuntime::new_named_counter(state, NamedCounter::RTMLockingCounter);
aoqi@0 174 _stack_rtm_counters = rlnc->counters();
aoqi@0 175 }
aoqi@0 176 }
aoqi@0 177 #endif
aoqi@0 178 }
aoqi@0 179
aoqi@0 180 //=============================================================================
aoqi@0 181 //------------------------------do_monitor_enter-------------------------------
aoqi@0 182 void Parse::do_monitor_enter() {
aoqi@0 183 kill_dead_locals();
aoqi@0 184
aoqi@0 185 // Null check; get casted pointer.
aoqi@0 186 Node* obj = null_check(peek());
aoqi@0 187 // Check for locking null object
aoqi@0 188 if (stopped()) return;
aoqi@0 189
aoqi@0 190 // the monitor object is not part of debug info expression stack
aoqi@0 191 pop();
aoqi@0 192
aoqi@0 193 // Insert a FastLockNode which takes as arguments the current thread pointer,
aoqi@0 194 // the obj pointer & the address of the stack slot pair used for the lock.
aoqi@0 195 shared_lock(obj);
aoqi@0 196 }
aoqi@0 197
aoqi@0 198 //------------------------------do_monitor_exit--------------------------------
aoqi@0 199 void Parse::do_monitor_exit() {
aoqi@0 200 kill_dead_locals();
aoqi@0 201
aoqi@0 202 pop(); // Pop oop to unlock
aoqi@0 203 // Because monitors are guaranteed paired (else we bail out), we know
aoqi@0 204 // the matching Lock for this Unlock. Hence we know there is no need
aoqi@0 205 // for a null check on Unlock.
aoqi@0 206 shared_unlock(map()->peek_monitor_box(), map()->peek_monitor_obj());
aoqi@0 207 }

mercurial