Wed, 13 Apr 2011 14:33:03 -0700
6988308: assert((cnt > 0.0f) && (prob > 0.0f)) failed: Bad frequency assignment in if
Summary: Make sure cnt doesn't become negative and integer overflow doesn't happen.
Reviewed-by: kvn, twisti
duke@435 | 1 | /* |
stefank@2314 | 2 | * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. |
duke@435 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
duke@435 | 4 | * |
duke@435 | 5 | * This code is free software; you can redistribute it and/or modify it |
duke@435 | 6 | * under the terms of the GNU General Public License version 2 only, as |
duke@435 | 7 | * published by the Free Software Foundation. |
duke@435 | 8 | * |
duke@435 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
duke@435 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
duke@435 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
duke@435 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
duke@435 | 13 | * accompanied this code). |
duke@435 | 14 | * |
duke@435 | 15 | * You should have received a copy of the GNU General Public License version |
duke@435 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
duke@435 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
duke@435 | 18 | * |
trims@1907 | 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
trims@1907 | 20 | * or visit www.oracle.com if you need additional information or have any |
trims@1907 | 21 | * questions. |
duke@435 | 22 | * |
duke@435 | 23 | */ |
duke@435 | 24 | |
stefank@2314 | 25 | #include "precompiled.hpp" |
stefank@2314 | 26 | #include "opto/locknode.hpp" |
stefank@2314 | 27 | #include "opto/parse.hpp" |
stefank@2314 | 28 | #include "opto/rootnode.hpp" |
stefank@2314 | 29 | #include "opto/runtime.hpp" |
duke@435 | 30 | |
duke@435 | 31 | //============================================================================= |
duke@435 | 32 | const RegMask &BoxLockNode::in_RegMask(uint i) const { |
duke@435 | 33 | return _inmask; |
duke@435 | 34 | } |
duke@435 | 35 | |
duke@435 | 36 | const RegMask &BoxLockNode::out_RegMask() const { |
duke@435 | 37 | return *Matcher::idealreg2regmask[Op_RegP]; |
duke@435 | 38 | } |
duke@435 | 39 | |
duke@435 | 40 | uint BoxLockNode::size_of() const { return sizeof(*this); } |
duke@435 | 41 | |
kvn@501 | 42 | BoxLockNode::BoxLockNode( int slot ) : Node( Compile::current()->root() ), |
kvn@501 | 43 | _slot(slot), _is_eliminated(false) { |
duke@435 | 44 | init_class_id(Class_BoxLock); |
duke@435 | 45 | init_flags(Flag_rematerialize); |
duke@435 | 46 | OptoReg::Name reg = OptoReg::stack2reg(_slot); |
duke@435 | 47 | _inmask.Insert(reg); |
duke@435 | 48 | } |
duke@435 | 49 | |
kvn@895 | 50 | //-----------------------------hash-------------------------------------------- |
kvn@895 | 51 | uint BoxLockNode::hash() const { |
kvn@895 | 52 | return Node::hash() + _slot + (_is_eliminated ? Compile::current()->fixed_slots() : 0); |
kvn@895 | 53 | } |
kvn@895 | 54 | |
duke@435 | 55 | //------------------------------cmp-------------------------------------------- |
duke@435 | 56 | uint BoxLockNode::cmp( const Node &n ) const { |
duke@435 | 57 | const BoxLockNode &bn = (const BoxLockNode &)n; |
kvn@895 | 58 | return bn._slot == _slot && bn._is_eliminated == _is_eliminated; |
duke@435 | 59 | } |
duke@435 | 60 | |
duke@435 | 61 | OptoReg::Name BoxLockNode::stack_slot(Node* box_node) { |
duke@435 | 62 | // Chase down the BoxNode |
duke@435 | 63 | while (!box_node->is_BoxLock()) { |
duke@435 | 64 | // if (box_node->is_SpillCopy()) { |
duke@435 | 65 | // Node *m = box_node->in(1); |
duke@435 | 66 | // if (m->is_Mach() && m->as_Mach()->ideal_Opcode() == Op_StoreP) { |
duke@435 | 67 | // box_node = m->in(m->as_Mach()->operand_index(2)); |
duke@435 | 68 | // continue; |
duke@435 | 69 | // } |
duke@435 | 70 | // } |
duke@435 | 71 | assert(box_node->is_SpillCopy() || box_node->is_Phi(), "Bad spill of Lock."); |
duke@435 | 72 | box_node = box_node->in(1); |
duke@435 | 73 | } |
duke@435 | 74 | return box_node->in_RegMask(0).find_first_elem(); |
duke@435 | 75 | } |
duke@435 | 76 | |
duke@435 | 77 | //============================================================================= |
duke@435 | 78 | //-----------------------------hash-------------------------------------------- |
duke@435 | 79 | uint FastLockNode::hash() const { return NO_HASH; } |
duke@435 | 80 | |
duke@435 | 81 | //------------------------------cmp-------------------------------------------- |
duke@435 | 82 | uint FastLockNode::cmp( const Node &n ) const { |
duke@435 | 83 | return (&n == this); // Always fail except on self |
duke@435 | 84 | } |
duke@435 | 85 | |
duke@435 | 86 | //============================================================================= |
duke@435 | 87 | //-----------------------------hash-------------------------------------------- |
duke@435 | 88 | uint FastUnlockNode::hash() const { return NO_HASH; } |
duke@435 | 89 | |
duke@435 | 90 | //------------------------------cmp-------------------------------------------- |
duke@435 | 91 | uint FastUnlockNode::cmp( const Node &n ) const { |
duke@435 | 92 | return (&n == this); // Always fail except on self |
duke@435 | 93 | } |
duke@435 | 94 | |
duke@435 | 95 | // |
duke@435 | 96 | // Create a counter which counts the number of times this lock is acquired |
duke@435 | 97 | // |
duke@435 | 98 | void FastLockNode::create_lock_counter(JVMState* state) { |
duke@435 | 99 | BiasedLockingNamedCounter* blnc = (BiasedLockingNamedCounter*) |
duke@435 | 100 | OptoRuntime::new_named_counter(state, NamedCounter::BiasedLockingCounter); |
duke@435 | 101 | _counters = blnc->counters(); |
duke@435 | 102 | } |
duke@435 | 103 | |
duke@435 | 104 | //============================================================================= |
duke@435 | 105 | //------------------------------do_monitor_enter------------------------------- |
duke@435 | 106 | void Parse::do_monitor_enter() { |
duke@435 | 107 | kill_dead_locals(); |
duke@435 | 108 | |
duke@435 | 109 | // Null check; get casted pointer. |
duke@435 | 110 | Node *obj = do_null_check(peek(), T_OBJECT); |
duke@435 | 111 | // Check for locking null object |
duke@435 | 112 | if (stopped()) return; |
duke@435 | 113 | |
duke@435 | 114 | // the monitor object is not part of debug info expression stack |
duke@435 | 115 | pop(); |
duke@435 | 116 | |
duke@435 | 117 | // Insert a FastLockNode which takes as arguments the current thread pointer, |
duke@435 | 118 | // the obj pointer & the address of the stack slot pair used for the lock. |
duke@435 | 119 | shared_lock(obj); |
duke@435 | 120 | } |
duke@435 | 121 | |
duke@435 | 122 | //------------------------------do_monitor_exit-------------------------------- |
duke@435 | 123 | void Parse::do_monitor_exit() { |
duke@435 | 124 | kill_dead_locals(); |
duke@435 | 125 | |
duke@435 | 126 | pop(); // Pop oop to unlock |
twisti@1040 | 127 | // Because monitors are guaranteed paired (else we bail out), we know |
duke@435 | 128 | // the matching Lock for this Unlock. Hence we know there is no need |
duke@435 | 129 | // for a null check on Unlock. |
duke@435 | 130 | shared_unlock(map()->peek_monitor_box(), map()->peek_monitor_obj()); |
duke@435 | 131 | } |