Tue, 10 Mar 2009 08:52:16 -0700
Merge
1 /*
2 * Copyright 1999-2008 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
25 # include "incls/_precompiled.incl"
26 # include "incls/_c1_FrameMap_x86.cpp.incl"
28 const int FrameMap::pd_c_runtime_reserved_arg_size = 0;
30 LIR_Opr FrameMap::map_to_opr(BasicType type, VMRegPair* reg, bool) {
31 LIR_Opr opr = LIR_OprFact::illegalOpr;
32 VMReg r_1 = reg->first();
33 VMReg r_2 = reg->second();
34 if (r_1->is_stack()) {
35 // Convert stack slot to an SP offset
36 // The calling convention does not count the SharedRuntime::out_preserve_stack_slots() value
37 // so we must add it in here.
38 int st_off = (r_1->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
39 opr = LIR_OprFact::address(new LIR_Address(rsp_opr, st_off, type));
40 } else if (r_1->is_Register()) {
41 Register reg = r_1->as_Register();
42 if (r_2->is_Register() && (type == T_LONG || type == T_DOUBLE)) {
43 Register reg2 = r_2->as_Register();
44 #ifdef _LP64
45 assert(reg2 == reg, "must be same register");
46 opr = as_long_opr(reg);
47 #else
48 opr = as_long_opr(reg2, reg);
49 #endif // _LP64
50 } else if (type == T_OBJECT || type == T_ARRAY) {
51 opr = as_oop_opr(reg);
52 } else {
53 opr = as_opr(reg);
54 }
55 } else if (r_1->is_FloatRegister()) {
56 assert(type == T_DOUBLE || type == T_FLOAT, "wrong type");
57 int num = r_1->as_FloatRegister()->encoding();
58 if (type == T_FLOAT) {
59 opr = LIR_OprFact::single_fpu(num);
60 } else {
61 opr = LIR_OprFact::double_fpu(num);
62 }
63 } else if (r_1->is_XMMRegister()) {
64 assert(type == T_DOUBLE || type == T_FLOAT, "wrong type");
65 int num = r_1->as_XMMRegister()->encoding();
66 if (type == T_FLOAT) {
67 opr = LIR_OprFact::single_xmm(num);
68 } else {
69 opr = LIR_OprFact::double_xmm(num);
70 }
71 } else {
72 ShouldNotReachHere();
73 }
74 return opr;
75 }
78 LIR_Opr FrameMap::rsi_opr;
79 LIR_Opr FrameMap::rdi_opr;
80 LIR_Opr FrameMap::rbx_opr;
81 LIR_Opr FrameMap::rax_opr;
82 LIR_Opr FrameMap::rdx_opr;
83 LIR_Opr FrameMap::rcx_opr;
84 LIR_Opr FrameMap::rsp_opr;
85 LIR_Opr FrameMap::rbp_opr;
87 LIR_Opr FrameMap::receiver_opr;
89 LIR_Opr FrameMap::rsi_oop_opr;
90 LIR_Opr FrameMap::rdi_oop_opr;
91 LIR_Opr FrameMap::rbx_oop_opr;
92 LIR_Opr FrameMap::rax_oop_opr;
93 LIR_Opr FrameMap::rdx_oop_opr;
94 LIR_Opr FrameMap::rcx_oop_opr;
96 LIR_Opr FrameMap::long0_opr;
97 LIR_Opr FrameMap::long1_opr;
98 LIR_Opr FrameMap::fpu0_float_opr;
99 LIR_Opr FrameMap::fpu0_double_opr;
100 LIR_Opr FrameMap::xmm0_float_opr;
101 LIR_Opr FrameMap::xmm0_double_opr;
103 #ifdef _LP64
105 LIR_Opr FrameMap::r8_opr;
106 LIR_Opr FrameMap::r9_opr;
107 LIR_Opr FrameMap::r10_opr;
108 LIR_Opr FrameMap::r11_opr;
109 LIR_Opr FrameMap::r12_opr;
110 LIR_Opr FrameMap::r13_opr;
111 LIR_Opr FrameMap::r14_opr;
112 LIR_Opr FrameMap::r15_opr;
114 // r10 and r15 can never contain oops since they aren't available to
115 // the allocator
116 LIR_Opr FrameMap::r8_oop_opr;
117 LIR_Opr FrameMap::r9_oop_opr;
118 LIR_Opr FrameMap::r11_oop_opr;
119 LIR_Opr FrameMap::r12_oop_opr;
120 LIR_Opr FrameMap::r13_oop_opr;
121 LIR_Opr FrameMap::r14_oop_opr;
122 #endif // _LP64
124 LIR_Opr FrameMap::_caller_save_cpu_regs[] = { 0, };
125 LIR_Opr FrameMap::_caller_save_fpu_regs[] = { 0, };
126 LIR_Opr FrameMap::_caller_save_xmm_regs[] = { 0, };
128 XMMRegister FrameMap::_xmm_regs [] = { 0, };
130 XMMRegister FrameMap::nr2xmmreg(int rnr) {
131 assert(_init_done, "tables not initialized");
132 return _xmm_regs[rnr];
133 }
135 //--------------------------------------------------------
136 // FrameMap
137 //--------------------------------------------------------
139 void FrameMap::init() {
140 if (_init_done) return;
142 assert(nof_cpu_regs == LP64_ONLY(16) NOT_LP64(8), "wrong number of CPU registers");
143 map_register(0, rsi); rsi_opr = LIR_OprFact::single_cpu(0);
144 map_register(1, rdi); rdi_opr = LIR_OprFact::single_cpu(1);
145 map_register(2, rbx); rbx_opr = LIR_OprFact::single_cpu(2);
146 map_register(3, rax); rax_opr = LIR_OprFact::single_cpu(3);
147 map_register(4, rdx); rdx_opr = LIR_OprFact::single_cpu(4);
148 map_register(5, rcx); rcx_opr = LIR_OprFact::single_cpu(5);
150 #ifndef _LP64
151 // The unallocatable registers are at the end
152 map_register(6, rsp);
153 map_register(7, rbp);
154 #else
155 map_register( 6, r8); r8_opr = LIR_OprFact::single_cpu(6);
156 map_register( 7, r9); r9_opr = LIR_OprFact::single_cpu(7);
157 map_register( 8, r11); r11_opr = LIR_OprFact::single_cpu(8);
158 map_register( 9, r12); r12_opr = LIR_OprFact::single_cpu(9);
159 map_register(10, r13); r13_opr = LIR_OprFact::single_cpu(10);
160 map_register(11, r14); r14_opr = LIR_OprFact::single_cpu(11);
161 // The unallocatable registers are at the end
162 map_register(12, r10); r10_opr = LIR_OprFact::single_cpu(12);
163 map_register(13, r15); r15_opr = LIR_OprFact::single_cpu(13);
164 map_register(14, rsp);
165 map_register(15, rbp);
166 #endif // _LP64
168 #ifdef _LP64
169 long0_opr = LIR_OprFact::double_cpu(3 /*eax*/, 3 /*eax*/);
170 long1_opr = LIR_OprFact::double_cpu(2 /*ebx*/, 2 /*ebx*/);
171 #else
172 long0_opr = LIR_OprFact::double_cpu(3 /*eax*/, 4 /*edx*/);
173 long1_opr = LIR_OprFact::double_cpu(2 /*ebx*/, 5 /*ecx*/);
174 #endif // _LP64
175 fpu0_float_opr = LIR_OprFact::single_fpu(0);
176 fpu0_double_opr = LIR_OprFact::double_fpu(0);
177 xmm0_float_opr = LIR_OprFact::single_xmm(0);
178 xmm0_double_opr = LIR_OprFact::double_xmm(0);
180 _caller_save_cpu_regs[0] = rsi_opr;
181 _caller_save_cpu_regs[1] = rdi_opr;
182 _caller_save_cpu_regs[2] = rbx_opr;
183 _caller_save_cpu_regs[3] = rax_opr;
184 _caller_save_cpu_regs[4] = rdx_opr;
185 _caller_save_cpu_regs[5] = rcx_opr;
187 #ifdef _LP64
188 _caller_save_cpu_regs[6] = r8_opr;
189 _caller_save_cpu_regs[7] = r9_opr;
190 _caller_save_cpu_regs[8] = r11_opr;
191 _caller_save_cpu_regs[9] = r12_opr;
192 _caller_save_cpu_regs[10] = r13_opr;
193 _caller_save_cpu_regs[11] = r14_opr;
194 #endif // _LP64
197 _xmm_regs[0] = xmm0;
198 _xmm_regs[1] = xmm1;
199 _xmm_regs[2] = xmm2;
200 _xmm_regs[3] = xmm3;
201 _xmm_regs[4] = xmm4;
202 _xmm_regs[5] = xmm5;
203 _xmm_regs[6] = xmm6;
204 _xmm_regs[7] = xmm7;
206 #ifdef _LP64
207 _xmm_regs[8] = xmm8;
208 _xmm_regs[9] = xmm9;
209 _xmm_regs[10] = xmm10;
210 _xmm_regs[11] = xmm11;
211 _xmm_regs[12] = xmm12;
212 _xmm_regs[13] = xmm13;
213 _xmm_regs[14] = xmm14;
214 _xmm_regs[15] = xmm15;
215 #endif // _LP64
217 for (int i = 0; i < 8; i++) {
218 _caller_save_fpu_regs[i] = LIR_OprFact::single_fpu(i);
219 }
221 for (int i = 0; i < nof_caller_save_xmm_regs ; i++) {
222 _caller_save_xmm_regs[i] = LIR_OprFact::single_xmm(i);
223 }
225 _init_done = true;
227 rsi_oop_opr = as_oop_opr(rsi);
228 rdi_oop_opr = as_oop_opr(rdi);
229 rbx_oop_opr = as_oop_opr(rbx);
230 rax_oop_opr = as_oop_opr(rax);
231 rdx_oop_opr = as_oop_opr(rdx);
232 rcx_oop_opr = as_oop_opr(rcx);
234 rsp_opr = as_pointer_opr(rsp);
235 rbp_opr = as_pointer_opr(rbp);
237 #ifdef _LP64
238 r8_oop_opr = as_oop_opr(r8);
239 r9_oop_opr = as_oop_opr(r9);
240 r11_oop_opr = as_oop_opr(r11);
241 r12_oop_opr = as_oop_opr(r12);
242 r13_oop_opr = as_oop_opr(r13);
243 r14_oop_opr = as_oop_opr(r14);
244 #endif // _LP64
246 VMRegPair regs;
247 BasicType sig_bt = T_OBJECT;
248 SharedRuntime::java_calling_convention(&sig_bt, ®s, 1, true);
249 receiver_opr = as_oop_opr(regs.first()->as_Register());
251 }
254 Address FrameMap::make_new_address(ByteSize sp_offset) const {
255 // for rbp, based address use this:
256 // return Address(rbp, in_bytes(sp_offset) - (framesize() - 2) * 4);
257 return Address(rsp, in_bytes(sp_offset));
258 }
261 // ----------------mapping-----------------------
262 // all mapping is based on rbp, addressing, except for simple leaf methods where we access
263 // the locals rsp based (and no frame is built)
266 // Frame for simple leaf methods (quick entries)
267 //
268 // +----------+
269 // | ret addr | <- TOS
270 // +----------+
271 // | args |
272 // | ...... |
274 // Frame for standard methods
275 //
276 // | .........| <- TOS
277 // | locals |
278 // +----------+
279 // | old rbp, | <- EBP
280 // +----------+
281 // | ret addr |
282 // +----------+
283 // | args |
284 // | .........|
287 // For OopMaps, map a local variable or spill index to an VMRegImpl name.
288 // This is the offset from sp() in the frame of the slot for the index,
289 // skewed by VMRegImpl::stack0 to indicate a stack location (vs.a register.)
290 //
291 // framesize +
292 // stack0 stack0 0 <- VMReg
293 // | | <registers> |
294 // ...........|..............|.............|
295 // 0 1 2 3 x x 4 5 6 ... | <- local indices
296 // ^ ^ sp() ( x x indicate link
297 // | | and return addr)
298 // arguments non-argument locals
301 VMReg FrameMap::fpu_regname (int n) {
302 // Return the OptoReg name for the fpu stack slot "n"
303 // A spilled fpu stack slot comprises to two single-word OptoReg's.
304 return as_FloatRegister(n)->as_VMReg();
305 }
307 LIR_Opr FrameMap::stack_pointer() {
308 return FrameMap::rsp_opr;
309 }
312 bool FrameMap::validate_frame() {
313 return true;
314 }