src/share/vm/opto/locknode.cpp

Sat, 07 Jan 2012 13:26:43 -0800

author
kvn
date
Sat, 07 Jan 2012 13:26:43 -0800
changeset 3406
e9a5e0a812c8
parent 2314
f95d63e2154a
child 3407
35acf8f0a2e4
permissions
-rw-r--r--

7125896: Eliminate nested locks
Summary: Nested locks elimination done before lock nodes expansion by looking for outer locks of the same object.
Reviewed-by: never, twisti

     1 /*
     2  * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "opto/locknode.hpp"
    27 #include "opto/parse.hpp"
    28 #include "opto/rootnode.hpp"
    29 #include "opto/runtime.hpp"
    31 //=============================================================================
    32 const RegMask &BoxLockNode::in_RegMask(uint i) const {
    33   return _inmask;
    34 }
    36 const RegMask &BoxLockNode::out_RegMask() const {
    37   return *Matcher::idealreg2regmask[Op_RegP];
    38 }
    40 uint BoxLockNode::size_of() const { return sizeof(*this); }
    42 BoxLockNode::BoxLockNode( int slot ) : Node( Compile::current()->root() ),
    43                                        _slot(slot), _is_eliminated(false) {
    44   init_class_id(Class_BoxLock);
    45   init_flags(Flag_rematerialize);
    46   OptoReg::Name reg = OptoReg::stack2reg(_slot);
    47   _inmask.Insert(reg);
    48 }
    50 //-----------------------------hash--------------------------------------------
    51 uint BoxLockNode::hash() const {
    52   if (EliminateNestedLocks)
    53     return NO_HASH; // Each locked region has own BoxLock node
    54   return Node::hash() + _slot + (_is_eliminated ? Compile::current()->fixed_slots() : 0);
    55 }
    57 //------------------------------cmp--------------------------------------------
    58 uint BoxLockNode::cmp( const Node &n ) const {
    59   if (EliminateNestedLocks)
    60     return (&n == this); // Always fail except on self
    61   const BoxLockNode &bn = (const BoxLockNode &)n;
    62   return bn._slot == _slot && bn._is_eliminated == _is_eliminated;
    63 }
    65 BoxLockNode* BoxLockNode::box_node(Node* box) {
    66   // Chase down the BoxNode
    67   while (!box->is_BoxLock()) {
    68     //    if (box_node->is_SpillCopy()) {
    69     //      Node *m = box_node->in(1);
    70     //      if (m->is_Mach() && m->as_Mach()->ideal_Opcode() == Op_StoreP) {
    71     //        box_node = m->in(m->as_Mach()->operand_index(2));
    72     //        continue;
    73     //      }
    74     //    }
    75     assert(box->is_SpillCopy() || box->is_Phi(), "Bad spill of Lock.");
    76     // Only BoxLock nodes with the same stack slot are merged.
    77     // So it is enough to trace one path to find the slot value.
    78     box = box->in(1);
    79   }
    80   return box->as_BoxLock();
    81 }
    83 OptoReg::Name BoxLockNode::reg(Node* box) {
    84   return box_node(box)->in_RegMask(0).find_first_elem();
    85 }
    87 bool BoxLockNode::same_slot(Node* box1, Node* box2) {
    88   return box_node(box1)->_slot == box_node(box2)->_slot;
    89 }
    91 // Is BoxLock node used for one simple lock region (same box and obj)?
    92 bool BoxLockNode::is_simple_lock_region(LockNode** unique_lock, Node* obj) {
    93   LockNode* lock = NULL;
    94   bool has_one_lock = false;
    95   for (uint i = 0; i < this->outcnt(); i++) {
    96     Node* n = this->raw_out(i);
    97     if (n->is_Phi())
    98       return false; // Merged regions
    99     if (n->is_AbstractLock()) {
   100       AbstractLockNode* alock = n->as_AbstractLock();
   101       // Check lock's box since box could be referenced by Lock's debug info.
   102       if (alock->box_node() == this) {
   103         if (alock->obj_node() == obj) {
   104           if ((unique_lock != NULL) && alock->is_Lock()) {
   105             if (lock == NULL) {
   106               lock = alock->as_Lock();
   107               has_one_lock = true;
   108             } else if (lock != alock->as_Lock()) {
   109               has_one_lock = false;
   110             }
   111           }
   112         } else {
   113           return false; // Different objects
   114         }
   115       }
   116     }
   117   }
   118 #ifdef ASSERT
   119   // Verify that FastLock and Safepoint reference only this lock region.
   120   for (uint i = 0; i < this->outcnt(); i++) {
   121     Node* n = this->raw_out(i);
   122     if (n->is_FastLock()) {
   123       FastLockNode* flock = n->as_FastLock();
   124       assert((flock->box_node() == this) && (flock->obj_node() == obj),"");
   125     }
   126     if (n->is_SafePoint() && n->as_SafePoint()->jvms()) {
   127       SafePointNode* sfn = n->as_SafePoint();
   128       JVMState* youngest_jvms = sfn->jvms();
   129       int max_depth = youngest_jvms->depth();
   130       for (int depth = 1; depth <= max_depth; depth++) {
   131         JVMState* jvms = youngest_jvms->of_depth(depth);
   132         int num_mon  = jvms->nof_monitors();
   133         // Loop over monitors
   134         for (int idx = 0; idx < num_mon; idx++) {
   135           Node* obj_node = sfn->monitor_obj(jvms, idx);
   136           Node* box_node = sfn->monitor_box(jvms, idx);
   137           if (box_node == this) {
   138             assert(obj_node == obj,"");
   139           }
   140         }
   141       }
   142     }
   143   }
   144 #endif
   145   if (unique_lock != NULL && has_one_lock) {
   146     *unique_lock = lock;
   147   }
   148   return true;
   149 }
   151 //=============================================================================
   152 //-----------------------------hash--------------------------------------------
   153 uint FastLockNode::hash() const { return NO_HASH; }
   155 //------------------------------cmp--------------------------------------------
   156 uint FastLockNode::cmp( const Node &n ) const {
   157   return (&n == this);                // Always fail except on self
   158 }
   160 //=============================================================================
   161 //-----------------------------hash--------------------------------------------
   162 uint FastUnlockNode::hash() const { return NO_HASH; }
   164 //------------------------------cmp--------------------------------------------
   165 uint FastUnlockNode::cmp( const Node &n ) const {
   166   return (&n == this);                // Always fail except on self
   167 }
   169 //
   170 // Create a counter which counts the number of times this lock is acquired
   171 //
   172 void FastLockNode::create_lock_counter(JVMState* state) {
   173   BiasedLockingNamedCounter* blnc = (BiasedLockingNamedCounter*)
   174            OptoRuntime::new_named_counter(state, NamedCounter::BiasedLockingCounter);
   175   _counters = blnc->counters();
   176 }
   178 //=============================================================================
   179 //------------------------------do_monitor_enter-------------------------------
   180 void Parse::do_monitor_enter() {
   181   kill_dead_locals();
   183   // Null check; get casted pointer.
   184   Node *obj = do_null_check(peek(), T_OBJECT);
   185   // Check for locking null object
   186   if (stopped()) return;
   188   // the monitor object is not part of debug info expression stack
   189   pop();
   191   // Insert a FastLockNode which takes as arguments the current thread pointer,
   192   // the obj pointer & the address of the stack slot pair used for the lock.
   193   shared_lock(obj);
   194 }
   196 //------------------------------do_monitor_exit--------------------------------
   197 void Parse::do_monitor_exit() {
   198   kill_dead_locals();
   200   pop();                        // Pop oop to unlock
   201   // Because monitors are guaranteed paired (else we bail out), we know
   202   // the matching Lock for this Unlock.  Hence we know there is no need
   203   // for a null check on Unlock.
   204   shared_unlock(map()->peek_monitor_box(), map()->peek_monitor_obj());
   205 }

mercurial