Wed, 13 Mar 2013 09:44:45 +0100
8009761: Deoptimization on sparc doesn't set Llast_SP correctly in the interpreter frames it creates
Summary: deoptimization doesn't set up callee frames so that they restore caller frames correctly.
Reviewed-by: kvn
1 /*
2 * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "c1/c1_CodeStubs.hpp"
27 #include "c1/c1_FrameMap.hpp"
28 #include "c1/c1_LIRAssembler.hpp"
29 #include "c1/c1_MacroAssembler.hpp"
30 #include "c1/c1_Runtime1.hpp"
31 #include "nativeInst_sparc.hpp"
32 #include "runtime/sharedRuntime.hpp"
33 #include "utilities/macros.hpp"
34 #include "vmreg_sparc.inline.hpp"
35 #if INCLUDE_ALL_GCS
36 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
37 #endif // INCLUDE_ALL_GCS
39 #define __ ce->masm()->
41 RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index,
42 bool throw_index_out_of_bounds_exception)
43 : _throw_index_out_of_bounds_exception(throw_index_out_of_bounds_exception)
44 , _index(index)
45 {
46 assert(info != NULL, "must have info");
47 _info = new CodeEmitInfo(info);
48 }
51 void RangeCheckStub::emit_code(LIR_Assembler* ce) {
52 __ bind(_entry);
54 if (_index->is_register()) {
55 __ mov(_index->as_register(), G4);
56 } else {
57 __ set(_index->as_jint(), G4);
58 }
59 if (_throw_index_out_of_bounds_exception) {
60 __ call(Runtime1::entry_for(Runtime1::throw_index_exception_id), relocInfo::runtime_call_type);
61 } else {
62 __ call(Runtime1::entry_for(Runtime1::throw_range_check_failed_id), relocInfo::runtime_call_type);
63 }
64 __ delayed()->nop();
65 ce->add_call_info_here(_info);
66 ce->verify_oop_map(_info);
67 #ifdef ASSERT
68 __ should_not_reach_here();
69 #endif
70 }
73 void CounterOverflowStub::emit_code(LIR_Assembler* ce) {
74 __ bind(_entry);
75 __ set(_bci, G4);
76 __ call(Runtime1::entry_for(Runtime1::counter_overflow_id), relocInfo::runtime_call_type);
77 __ delayed()->mov_or_nop(_method->as_register(), G5);
78 ce->add_call_info_here(_info);
79 ce->verify_oop_map(_info);
81 __ br(Assembler::always, true, Assembler::pt, _continuation);
82 __ delayed()->nop();
83 }
86 void DivByZeroStub::emit_code(LIR_Assembler* ce) {
87 if (_offset != -1) {
88 ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
89 }
90 __ bind(_entry);
91 __ call(Runtime1::entry_for(Runtime1::throw_div0_exception_id), relocInfo::runtime_call_type);
92 __ delayed()->nop();
93 ce->add_call_info_here(_info);
94 ce->verify_oop_map(_info);
95 #ifdef ASSERT
96 __ should_not_reach_here();
97 #endif
98 }
101 void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) {
102 ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
103 __ bind(_entry);
104 __ call(Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id),
105 relocInfo::runtime_call_type);
106 __ delayed()->nop();
107 ce->add_call_info_here(_info);
108 ce->verify_oop_map(_info);
109 #ifdef ASSERT
110 __ should_not_reach_here();
111 #endif
112 }
115 // Implementation of SimpleExceptionStub
116 // Note: %g1 and %g3 are already in use
117 void SimpleExceptionStub::emit_code(LIR_Assembler* ce) {
118 __ bind(_entry);
119 __ call(Runtime1::entry_for(_stub), relocInfo::runtime_call_type);
121 if (_obj->is_valid()) {
122 __ delayed()->mov(_obj->as_register(), G4); // _obj contains the optional argument to the stub
123 } else {
124 __ delayed()->mov(G0, G4);
125 }
126 ce->add_call_info_here(_info);
127 #ifdef ASSERT
128 __ should_not_reach_here();
129 #endif
130 }
133 // Implementation of NewInstanceStub
135 NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, Runtime1::StubID stub_id) {
136 _result = result;
137 _klass = klass;
138 _klass_reg = klass_reg;
139 _info = new CodeEmitInfo(info);
140 assert(stub_id == Runtime1::new_instance_id ||
141 stub_id == Runtime1::fast_new_instance_id ||
142 stub_id == Runtime1::fast_new_instance_init_check_id,
143 "need new_instance id");
144 _stub_id = stub_id;
145 }
148 void NewInstanceStub::emit_code(LIR_Assembler* ce) {
149 __ bind(_entry);
150 __ call(Runtime1::entry_for(_stub_id), relocInfo::runtime_call_type);
151 __ delayed()->mov_or_nop(_klass_reg->as_register(), G5);
152 ce->add_call_info_here(_info);
153 ce->verify_oop_map(_info);
154 __ br(Assembler::always, false, Assembler::pt, _continuation);
155 __ delayed()->mov_or_nop(O0, _result->as_register());
156 }
159 // Implementation of NewTypeArrayStub
160 NewTypeArrayStub::NewTypeArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
161 _klass_reg = klass_reg;
162 _length = length;
163 _result = result;
164 _info = new CodeEmitInfo(info);
165 }
168 void NewTypeArrayStub::emit_code(LIR_Assembler* ce) {
169 __ bind(_entry);
171 __ mov(_length->as_register(), G4);
172 __ call(Runtime1::entry_for(Runtime1::new_type_array_id), relocInfo::runtime_call_type);
173 __ delayed()->mov_or_nop(_klass_reg->as_register(), G5);
174 ce->add_call_info_here(_info);
175 ce->verify_oop_map(_info);
176 __ br(Assembler::always, false, Assembler::pt, _continuation);
177 __ delayed()->mov_or_nop(O0, _result->as_register());
178 }
181 // Implementation of NewObjectArrayStub
183 NewObjectArrayStub::NewObjectArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
184 _klass_reg = klass_reg;
185 _length = length;
186 _result = result;
187 _info = new CodeEmitInfo(info);
188 }
191 void NewObjectArrayStub::emit_code(LIR_Assembler* ce) {
192 __ bind(_entry);
194 __ mov(_length->as_register(), G4);
195 __ call(Runtime1::entry_for(Runtime1::new_object_array_id), relocInfo::runtime_call_type);
196 __ delayed()->mov_or_nop(_klass_reg->as_register(), G5);
197 ce->add_call_info_here(_info);
198 ce->verify_oop_map(_info);
199 __ br(Assembler::always, false, Assembler::pt, _continuation);
200 __ delayed()->mov_or_nop(O0, _result->as_register());
201 }
204 // Implementation of MonitorAccessStubs
205 MonitorEnterStub::MonitorEnterStub(LIR_Opr obj_reg, LIR_Opr lock_reg, CodeEmitInfo* info)
206 : MonitorAccessStub(obj_reg, lock_reg) {
207 _info = new CodeEmitInfo(info);
208 }
211 void MonitorEnterStub::emit_code(LIR_Assembler* ce) {
212 __ bind(_entry);
213 __ mov(_obj_reg->as_register(), G4);
214 if (ce->compilation()->has_fpu_code()) {
215 __ call(Runtime1::entry_for(Runtime1::monitorenter_id), relocInfo::runtime_call_type);
216 } else {
217 __ call(Runtime1::entry_for(Runtime1::monitorenter_nofpu_id), relocInfo::runtime_call_type);
218 }
219 __ delayed()->mov_or_nop(_lock_reg->as_register(), G5);
220 ce->add_call_info_here(_info);
221 ce->verify_oop_map(_info);
222 __ br(Assembler::always, true, Assembler::pt, _continuation);
223 __ delayed()->nop();
224 }
227 void MonitorExitStub::emit_code(LIR_Assembler* ce) {
228 __ bind(_entry);
229 if (_compute_lock) {
230 ce->monitor_address(_monitor_ix, _lock_reg);
231 }
232 if (ce->compilation()->has_fpu_code()) {
233 __ call(Runtime1::entry_for(Runtime1::monitorexit_id), relocInfo::runtime_call_type);
234 } else {
235 __ call(Runtime1::entry_for(Runtime1::monitorexit_nofpu_id), relocInfo::runtime_call_type);
236 }
238 __ delayed()->mov_or_nop(_lock_reg->as_register(), G4);
239 __ br(Assembler::always, true, Assembler::pt, _continuation);
240 __ delayed()->nop();
241 }
243 // Implementation of patching:
244 // - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes)
245 // - Replace original code with a call to the stub
246 // At Runtime:
247 // - call to stub, jump to runtime
248 // - in runtime: preserve all registers (especially objects, i.e., source and destination object)
249 // - in runtime: after initializing class, restore original code, reexecute instruction
251 int PatchingStub::_patch_info_offset = -NativeGeneralJump::instruction_size;
253 void PatchingStub::align_patch_site(MacroAssembler* ) {
254 // patch sites on sparc are always properly aligned.
255 }
257 void PatchingStub::emit_code(LIR_Assembler* ce) {
258 // copy original code here
259 assert(NativeCall::instruction_size <= _bytes_to_copy && _bytes_to_copy <= 0xFF,
260 "not enough room for call");
261 assert((_bytes_to_copy & 0x3) == 0, "must copy a multiple of four bytes");
263 Label call_patch;
265 int being_initialized_entry = __ offset();
267 if (_id == load_klass_id) {
268 // produce a copy of the load klass instruction for use by the being initialized case
269 #ifdef ASSERT
270 address start = __ pc();
271 #endif
272 AddressLiteral addrlit(NULL, metadata_Relocation::spec(_index));
273 __ patchable_set(addrlit, _obj);
275 #ifdef ASSERT
276 for (int i = 0; i < _bytes_to_copy; i++) {
277 address ptr = (address)(_pc_start + i);
278 int a_byte = (*ptr) & 0xFF;
279 assert(a_byte == *start++, "should be the same code");
280 }
281 #endif
282 } else if (_id == load_mirror_id) {
283 // produce a copy of the load mirror instruction for use by the being initialized case
284 #ifdef ASSERT
285 address start = __ pc();
286 #endif
287 AddressLiteral addrlit(NULL, oop_Relocation::spec(_index));
288 __ patchable_set(addrlit, _obj);
290 #ifdef ASSERT
291 for (int i = 0; i < _bytes_to_copy; i++) {
292 address ptr = (address)(_pc_start + i);
293 int a_byte = (*ptr) & 0xFF;
294 assert(a_byte == *start++, "should be the same code");
295 }
296 #endif
297 } else {
298 // make a copy the code which is going to be patched.
299 for (int i = 0; i < _bytes_to_copy; i++) {
300 address ptr = (address)(_pc_start + i);
301 int a_byte = (*ptr) & 0xFF;
302 __ emit_int8 (a_byte);
303 }
304 }
306 address end_of_patch = __ pc();
307 int bytes_to_skip = 0;
308 if (_id == load_mirror_id) {
309 int offset = __ offset();
310 if (CommentedAssembly) {
311 __ block_comment(" being_initialized check");
312 }
314 // static field accesses have special semantics while the class
315 // initializer is being run so we emit a test which can be used to
316 // check that this code is being executed by the initializing
317 // thread.
318 assert(_obj != noreg, "must be a valid register");
319 assert(_index >= 0, "must have oop index");
320 __ ld_ptr(_obj, java_lang_Class::klass_offset_in_bytes(), G3);
321 __ ld_ptr(G3, in_bytes(InstanceKlass::init_thread_offset()), G3);
322 __ cmp_and_brx_short(G2_thread, G3, Assembler::notEqual, Assembler::pn, call_patch);
324 // load_klass patches may execute the patched code before it's
325 // copied back into place so we need to jump back into the main
326 // code of the nmethod to continue execution.
327 __ br(Assembler::always, false, Assembler::pt, _patch_site_continuation);
328 __ delayed()->nop();
330 // make sure this extra code gets skipped
331 bytes_to_skip += __ offset() - offset;
332 }
334 // Now emit the patch record telling the runtime how to find the
335 // pieces of the patch. We only need 3 bytes but it has to be
336 // aligned as an instruction so emit 4 bytes.
337 int sizeof_patch_record = 4;
338 bytes_to_skip += sizeof_patch_record;
340 // emit the offsets needed to find the code to patch
341 int being_initialized_entry_offset = __ offset() - being_initialized_entry + sizeof_patch_record;
343 // Emit the patch record. We need to emit a full word, so emit an extra empty byte
344 __ emit_int8(0);
345 __ emit_int8(being_initialized_entry_offset);
346 __ emit_int8(bytes_to_skip);
347 __ emit_int8(_bytes_to_copy);
348 address patch_info_pc = __ pc();
349 assert(patch_info_pc - end_of_patch == bytes_to_skip, "incorrect patch info");
351 address entry = __ pc();
352 NativeGeneralJump::insert_unconditional((address)_pc_start, entry);
353 address target = NULL;
354 relocInfo::relocType reloc_type = relocInfo::none;
355 switch (_id) {
356 case access_field_id: target = Runtime1::entry_for(Runtime1::access_field_patching_id); break;
357 case load_klass_id: target = Runtime1::entry_for(Runtime1::load_klass_patching_id); reloc_type = relocInfo::metadata_type; break;
358 case load_mirror_id: target = Runtime1::entry_for(Runtime1::load_mirror_patching_id); reloc_type = relocInfo::oop_type; break;
359 default: ShouldNotReachHere();
360 }
361 __ bind(call_patch);
363 if (CommentedAssembly) {
364 __ block_comment("patch entry point");
365 }
366 __ call(target, relocInfo::runtime_call_type);
367 __ delayed()->nop();
368 assert(_patch_info_offset == (patch_info_pc - __ pc()), "must not change");
369 ce->add_call_info_here(_info);
370 __ br(Assembler::always, false, Assembler::pt, _patch_site_entry);
371 __ delayed()->nop();
372 if (_id == load_klass_id || _id == load_mirror_id) {
373 CodeSection* cs = __ code_section();
374 address pc = (address)_pc_start;
375 RelocIterator iter(cs, pc, pc + 1);
376 relocInfo::change_reloc_info_for_address(&iter, (address) pc, reloc_type, relocInfo::none);
378 pc = (address)(_pc_start + NativeMovConstReg::add_offset);
379 RelocIterator iter2(cs, pc, pc+1);
380 relocInfo::change_reloc_info_for_address(&iter2, (address) pc, reloc_type, relocInfo::none);
381 }
383 }
386 void DeoptimizeStub::emit_code(LIR_Assembler* ce) {
387 __ bind(_entry);
388 __ call(Runtime1::entry_for(Runtime1::deoptimize_id), relocInfo::runtime_call_type);
389 __ delayed()->nop();
390 ce->add_call_info_here(_info);
391 DEBUG_ONLY(__ should_not_reach_here());
392 }
395 void ArrayCopyStub::emit_code(LIR_Assembler* ce) {
396 //---------------slow case: call to native-----------------
397 __ bind(_entry);
398 __ mov(src()->as_register(), O0);
399 __ mov(src_pos()->as_register(), O1);
400 __ mov(dst()->as_register(), O2);
401 __ mov(dst_pos()->as_register(), O3);
402 __ mov(length()->as_register(), O4);
404 ce->emit_static_call_stub();
406 __ call(SharedRuntime::get_resolve_static_call_stub(), relocInfo::static_call_type);
407 __ delayed()->nop();
408 ce->add_call_info_here(info());
409 ce->verify_oop_map(info());
411 #ifndef PRODUCT
412 __ set((intptr_t)&Runtime1::_arraycopy_slowcase_cnt, O0);
413 __ ld(O0, 0, O1);
414 __ inc(O1);
415 __ st(O1, 0, O0);
416 #endif
418 __ br(Assembler::always, false, Assembler::pt, _continuation);
419 __ delayed()->nop();
420 }
423 ///////////////////////////////////////////////////////////////////////////////////
424 #if INCLUDE_ALL_GCS
426 void G1PreBarrierStub::emit_code(LIR_Assembler* ce) {
427 // At this point we know that marking is in progress.
428 // If do_load() is true then we have to emit the
429 // load of the previous value; otherwise it has already
430 // been loaded into _pre_val.
432 __ bind(_entry);
434 assert(pre_val()->is_register(), "Precondition.");
435 Register pre_val_reg = pre_val()->as_register();
437 if (do_load()) {
438 ce->mem2reg(addr(), pre_val(), T_OBJECT, patch_code(), info(), false /*wide*/, false /*unaligned*/);
439 }
441 if (__ is_in_wdisp16_range(_continuation)) {
442 __ br_null(pre_val_reg, /*annul*/false, Assembler::pt, _continuation);
443 } else {
444 __ cmp(pre_val_reg, G0);
445 __ brx(Assembler::equal, false, Assembler::pn, _continuation);
446 }
447 __ delayed()->nop();
449 __ call(Runtime1::entry_for(Runtime1::Runtime1::g1_pre_barrier_slow_id));
450 __ delayed()->mov(pre_val_reg, G4);
451 __ br(Assembler::always, false, Assembler::pt, _continuation);
452 __ delayed()->nop();
454 }
456 jbyte* G1PostBarrierStub::_byte_map_base = NULL;
458 jbyte* G1PostBarrierStub::byte_map_base_slow() {
459 BarrierSet* bs = Universe::heap()->barrier_set();
460 assert(bs->is_a(BarrierSet::G1SATBCTLogging),
461 "Must be if we're using this.");
462 return ((G1SATBCardTableModRefBS*)bs)->byte_map_base;
463 }
465 void G1PostBarrierStub::emit_code(LIR_Assembler* ce) {
466 __ bind(_entry);
468 assert(addr()->is_register(), "Precondition.");
469 assert(new_val()->is_register(), "Precondition.");
470 Register addr_reg = addr()->as_pointer_register();
471 Register new_val_reg = new_val()->as_register();
473 if (__ is_in_wdisp16_range(_continuation)) {
474 __ br_null(new_val_reg, /*annul*/false, Assembler::pt, _continuation);
475 } else {
476 __ cmp(new_val_reg, G0);
477 __ brx(Assembler::equal, false, Assembler::pn, _continuation);
478 }
479 __ delayed()->nop();
481 __ call(Runtime1::entry_for(Runtime1::Runtime1::g1_post_barrier_slow_id));
482 __ delayed()->mov(addr_reg, G4);
483 __ br(Assembler::always, false, Assembler::pt, _continuation);
484 __ delayed()->nop();
485 }
487 #endif // INCLUDE_ALL_GCS
488 ///////////////////////////////////////////////////////////////////////////////////
490 #undef __