Thu, 08 Oct 2015 09:37:23 +0200
8058737: CodeCache::find_blob fails with 'unsafe access to zombie method'
Summary: Remove active ICStubs from zombie nmethods
Reviewed-by: kvn, iveresov
1 /*
2 * Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #ifndef SHARE_VM_CODE_VMREG_HPP
26 #define SHARE_VM_CODE_VMREG_HPP
28 #include "memory/allocation.hpp"
29 #include "utilities/globalDefinitions.hpp"
30 #include "asm/register.hpp"
32 #ifdef COMPILER2
33 #include "opto/adlcVMDeps.hpp"
34 #include "utilities/ostream.hpp"
35 #if defined ADGLOBALS_MD_HPP
36 # include ADGLOBALS_MD_HPP
37 #elif defined TARGET_ARCH_MODEL_x86_32
38 # include "adfiles/adGlobals_x86_32.hpp"
39 #elif defined TARGET_ARCH_MODEL_x86_64
40 # include "adfiles/adGlobals_x86_64.hpp"
41 #elif defined TARGET_ARCH_MODEL_sparc
42 # include "adfiles/adGlobals_sparc.hpp"
43 #elif defined TARGET_ARCH_MODEL_zero
44 # include "adfiles/adGlobals_zero.hpp"
45 #elif defined TARGET_ARCH_MODEL_ppc_64
46 # include "adfiles/adGlobals_ppc_64.hpp"
47 #endif
48 #endif
50 //------------------------------VMReg------------------------------------------
51 // The VM uses 'unwarped' stack slots; the compiler uses 'warped' stack slots.
52 // Register numbers below VMRegImpl::stack0 are the same for both. Register
53 // numbers above stack0 are either warped (in the compiler) or unwarped
54 // (in the VM). Unwarped numbers represent stack indices, offsets from
55 // the current stack pointer. Warped numbers are required during compilation
56 // when we do not yet know how big the frame will be.
58 class VMRegImpl;
59 typedef VMRegImpl* VMReg;
61 class VMRegImpl {
62 // friend class OopMap;
63 friend class VMStructs;
64 friend class OptoReg;
65 // friend class Location;
66 private:
67 enum {
68 BAD_REG = -1
69 };
73 static VMReg stack0;
74 // Names for registers
75 static const char *regName[];
76 static const int register_count;
79 public:
81 static VMReg as_VMReg(int val, bool bad_ok = false) { assert(val > BAD_REG || bad_ok, "invalid"); return (VMReg) (intptr_t) val; }
83 const char* name() {
84 if (is_reg()) {
85 return regName[value()];
86 } else if (!is_valid()) {
87 return "BAD";
88 } else {
89 // shouldn't really be called with stack
90 return "STACKED REG";
91 }
92 }
93 static VMReg Bad() { return (VMReg) (intptr_t) BAD_REG; }
94 bool is_valid() const { return ((intptr_t) this) != BAD_REG; }
95 bool is_stack() const { return (intptr_t) this >= (intptr_t) stack0; }
96 bool is_reg() const { return is_valid() && !is_stack(); }
98 // A concrete register is a value that returns true for is_reg() and is
99 // also a register you could use in the assembler. On machines with
100 // 64bit registers only one half of the VMReg (and OptoReg) is considered
101 // concrete.
102 bool is_concrete();
104 // VMRegs are 4 bytes wide on all platforms
105 static const int stack_slot_size;
106 static const int slots_per_word;
109 // This really ought to check that the register is "real" in the sense that
110 // we don't try and get the VMReg number of a physical register that doesn't
111 // have an expressible part. That would be pd specific code
112 VMReg next() {
113 assert((is_reg() && value() < stack0->value() - 1) || is_stack(), "must be");
114 return (VMReg)(intptr_t)(value() + 1);
115 }
116 VMReg next(int i) {
117 assert((is_reg() && value() < stack0->value() - i) || is_stack(), "must be");
118 return (VMReg)(intptr_t)(value() + i);
119 }
120 VMReg prev() {
121 assert((is_stack() && value() > stack0->value()) || (is_reg() && value() != 0), "must be");
122 return (VMReg)(intptr_t)(value() - 1);
123 }
126 intptr_t value() const {return (intptr_t) this; }
128 void print_on(outputStream* st) const;
129 void print() const { print_on(tty); }
131 // bias a stack slot.
132 // Typically used to adjust a virtual frame slots by amounts that are offset by
133 // amounts that are part of the native abi. The VMReg must be a stack slot
134 // and the result must be also.
136 VMReg bias(int offset) {
137 assert(is_stack(), "must be");
138 // VMReg res = VMRegImpl::as_VMReg(value() + offset);
139 VMReg res = stack2reg(reg2stack() + offset);
140 assert(res->is_stack(), "must be");
141 return res;
142 }
144 // Convert register numbers to stack slots and vice versa
145 static VMReg stack2reg( int idx ) {
146 return (VMReg) (intptr_t) (stack0->value() + idx);
147 }
149 uintptr_t reg2stack() {
150 assert( is_stack(), "Not a stack-based register" );
151 return value() - stack0->value();
152 }
154 static void set_regName();
156 #ifdef TARGET_ARCH_x86
157 # include "vmreg_x86.hpp"
158 #endif
159 #ifdef TARGET_ARCH_sparc
160 # include "vmreg_sparc.hpp"
161 #endif
162 #ifdef TARGET_ARCH_zero
163 # include "vmreg_zero.hpp"
164 #endif
165 #ifdef TARGET_ARCH_arm
166 # include "vmreg_arm.hpp"
167 #endif
168 #ifdef TARGET_ARCH_ppc
169 # include "vmreg_ppc.hpp"
170 #endif
173 };
175 //---------------------------VMRegPair-------------------------------------------
176 // Pairs of 32-bit registers for arguments.
177 // SharedRuntime::java_calling_convention will overwrite the structs with
178 // the calling convention's registers. VMRegImpl::Bad is returned for any
179 // unused 32-bit register. This happens for the unused high half of Int
180 // arguments, or for 32-bit pointers or for longs in the 32-bit sparc build
181 // (which are passed to natives in low 32-bits of e.g. O0/O1 and the high
182 // 32-bits of O0/O1 are set to VMRegImpl::Bad). Longs in one register & doubles
183 // always return a high and a low register, as do 64-bit pointers.
184 //
185 class VMRegPair {
186 private:
187 VMReg _second;
188 VMReg _first;
189 public:
190 void set_bad ( ) { _second=VMRegImpl::Bad(); _first=VMRegImpl::Bad(); }
191 void set1 ( VMReg v ) { _second=VMRegImpl::Bad(); _first=v; }
192 void set2 ( VMReg v ) { _second=v->next(); _first=v; }
193 void set_pair( VMReg second, VMReg first ) { _second= second; _first= first; }
194 void set_ptr ( VMReg ptr ) {
195 #ifdef _LP64
196 _second = ptr->next();
197 #else
198 _second = VMRegImpl::Bad();
199 #endif
200 _first = ptr;
201 }
202 // Return true if single register, even if the pair is really just adjacent stack slots
203 bool is_single_reg() const {
204 return (_first->is_valid()) && (_first->value() + 1 == _second->value());
205 }
207 // Return true if single stack based "register" where the slot alignment matches input alignment
208 bool is_adjacent_on_stack(int alignment) const {
209 return (_first->is_stack() && (_first->value() + 1 == _second->value()) && ((_first->value() & (alignment-1)) == 0));
210 }
212 // Return true if single stack based "register" where the slot alignment matches input alignment
213 bool is_adjacent_aligned_on_stack(int alignment) const {
214 return (_first->is_stack() && (_first->value() + 1 == _second->value()) && ((_first->value() & (alignment-1)) == 0));
215 }
217 // Return true if single register but adjacent stack slots do not count
218 bool is_single_phys_reg() const {
219 return (_first->is_reg() && (_first->value() + 1 == _second->value()));
220 }
222 VMReg second() const { return _second; }
223 VMReg first() const { return _first; }
224 VMRegPair(VMReg s, VMReg f) { _second = s; _first = f; }
225 VMRegPair(VMReg f) { _second = VMRegImpl::Bad(); _first = f; }
226 VMRegPair() { _second = VMRegImpl::Bad(); _first = VMRegImpl::Bad(); }
227 };
229 #endif // SHARE_VM_CODE_VMREG_HPP