Mon, 11 Oct 2010 04:18:58 -0700
6829194: JSR 292 needs to support compressed oops
Reviewed-by: kvn, jrose
1 /*
2 * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "incls/_precompiled.incl"
26 #include "incls/_c1_CodeStubs_sparc.cpp.incl"
28 #define __ ce->masm()->
30 RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index,
31 bool throw_index_out_of_bounds_exception)
32 : _throw_index_out_of_bounds_exception(throw_index_out_of_bounds_exception)
33 , _index(index)
34 {
35 assert(info != NULL, "must have info");
36 _info = new CodeEmitInfo(info);
37 }
40 void RangeCheckStub::emit_code(LIR_Assembler* ce) {
41 __ bind(_entry);
43 if (_index->is_register()) {
44 __ mov(_index->as_register(), G4);
45 } else {
46 __ set(_index->as_jint(), G4);
47 }
48 if (_throw_index_out_of_bounds_exception) {
49 __ call(Runtime1::entry_for(Runtime1::throw_index_exception_id), relocInfo::runtime_call_type);
50 } else {
51 __ call(Runtime1::entry_for(Runtime1::throw_range_check_failed_id), relocInfo::runtime_call_type);
52 }
53 __ delayed()->nop();
54 ce->add_call_info_here(_info);
55 ce->verify_oop_map(_info);
56 #ifdef ASSERT
57 __ should_not_reach_here();
58 #endif
59 }
62 void CounterOverflowStub::emit_code(LIR_Assembler* ce) {
63 __ bind(_entry);
64 __ set(_bci, G4);
65 __ call(Runtime1::entry_for(Runtime1::counter_overflow_id), relocInfo::runtime_call_type);
66 __ delayed()->mov_or_nop(_method->as_register(), G5);
67 ce->add_call_info_here(_info);
68 ce->verify_oop_map(_info);
70 __ br(Assembler::always, true, Assembler::pt, _continuation);
71 __ delayed()->nop();
72 }
75 void DivByZeroStub::emit_code(LIR_Assembler* ce) {
76 if (_offset != -1) {
77 ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
78 }
79 __ bind(_entry);
80 __ call(Runtime1::entry_for(Runtime1::throw_div0_exception_id), relocInfo::runtime_call_type);
81 __ delayed()->nop();
82 ce->add_call_info_here(_info);
83 ce->verify_oop_map(_info);
84 #ifdef ASSERT
85 __ should_not_reach_here();
86 #endif
87 }
90 void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) {
91 ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
92 __ bind(_entry);
93 __ call(Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id),
94 relocInfo::runtime_call_type);
95 __ delayed()->nop();
96 ce->add_call_info_here(_info);
97 ce->verify_oop_map(_info);
98 #ifdef ASSERT
99 __ should_not_reach_here();
100 #endif
101 }
104 // Implementation of SimpleExceptionStub
105 // Note: %g1 and %g3 are already in use
106 void SimpleExceptionStub::emit_code(LIR_Assembler* ce) {
107 __ bind(_entry);
108 __ call(Runtime1::entry_for(_stub), relocInfo::runtime_call_type);
110 if (_obj->is_valid()) {
111 __ delayed()->mov(_obj->as_register(), G4); // _obj contains the optional argument to the stub
112 } else {
113 __ delayed()->mov(G0, G4);
114 }
115 ce->add_call_info_here(_info);
116 #ifdef ASSERT
117 __ should_not_reach_here();
118 #endif
119 }
122 // Implementation of ArrayStoreExceptionStub
124 ArrayStoreExceptionStub::ArrayStoreExceptionStub(CodeEmitInfo* info):
125 _info(info) {
126 }
129 void ArrayStoreExceptionStub::emit_code(LIR_Assembler* ce) {
130 __ bind(_entry);
131 __ call(Runtime1::entry_for(Runtime1::throw_array_store_exception_id), relocInfo::runtime_call_type);
132 __ delayed()->nop();
133 ce->add_call_info_here(_info);
134 ce->verify_oop_map(_info);
135 #ifdef ASSERT
136 __ should_not_reach_here();
137 #endif
138 }
143 // Implementation of NewInstanceStub
145 NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, Runtime1::StubID stub_id) {
146 _result = result;
147 _klass = klass;
148 _klass_reg = klass_reg;
149 _info = new CodeEmitInfo(info);
150 assert(stub_id == Runtime1::new_instance_id ||
151 stub_id == Runtime1::fast_new_instance_id ||
152 stub_id == Runtime1::fast_new_instance_init_check_id,
153 "need new_instance id");
154 _stub_id = stub_id;
155 }
158 void NewInstanceStub::emit_code(LIR_Assembler* ce) {
159 __ bind(_entry);
160 __ call(Runtime1::entry_for(_stub_id), relocInfo::runtime_call_type);
161 __ delayed()->mov_or_nop(_klass_reg->as_register(), G5);
162 ce->add_call_info_here(_info);
163 ce->verify_oop_map(_info);
164 __ br(Assembler::always, false, Assembler::pt, _continuation);
165 __ delayed()->mov_or_nop(O0, _result->as_register());
166 }
169 // Implementation of NewTypeArrayStub
170 NewTypeArrayStub::NewTypeArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
171 _klass_reg = klass_reg;
172 _length = length;
173 _result = result;
174 _info = new CodeEmitInfo(info);
175 }
178 void NewTypeArrayStub::emit_code(LIR_Assembler* ce) {
179 __ bind(_entry);
181 __ mov(_length->as_register(), G4);
182 __ call(Runtime1::entry_for(Runtime1::new_type_array_id), relocInfo::runtime_call_type);
183 __ delayed()->mov_or_nop(_klass_reg->as_register(), G5);
184 ce->add_call_info_here(_info);
185 ce->verify_oop_map(_info);
186 __ br(Assembler::always, false, Assembler::pt, _continuation);
187 __ delayed()->mov_or_nop(O0, _result->as_register());
188 }
191 // Implementation of NewObjectArrayStub
193 NewObjectArrayStub::NewObjectArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
194 _klass_reg = klass_reg;
195 _length = length;
196 _result = result;
197 _info = new CodeEmitInfo(info);
198 }
201 void NewObjectArrayStub::emit_code(LIR_Assembler* ce) {
202 __ bind(_entry);
204 __ mov(_length->as_register(), G4);
205 __ call(Runtime1::entry_for(Runtime1::new_object_array_id), relocInfo::runtime_call_type);
206 __ delayed()->mov_or_nop(_klass_reg->as_register(), G5);
207 ce->add_call_info_here(_info);
208 ce->verify_oop_map(_info);
209 __ br(Assembler::always, false, Assembler::pt, _continuation);
210 __ delayed()->mov_or_nop(O0, _result->as_register());
211 }
214 // Implementation of MonitorAccessStubs
215 MonitorEnterStub::MonitorEnterStub(LIR_Opr obj_reg, LIR_Opr lock_reg, CodeEmitInfo* info)
216 : MonitorAccessStub(obj_reg, lock_reg) {
217 _info = new CodeEmitInfo(info);
218 }
221 void MonitorEnterStub::emit_code(LIR_Assembler* ce) {
222 __ bind(_entry);
223 __ mov(_obj_reg->as_register(), G4);
224 if (ce->compilation()->has_fpu_code()) {
225 __ call(Runtime1::entry_for(Runtime1::monitorenter_id), relocInfo::runtime_call_type);
226 } else {
227 __ call(Runtime1::entry_for(Runtime1::monitorenter_nofpu_id), relocInfo::runtime_call_type);
228 }
229 __ delayed()->mov_or_nop(_lock_reg->as_register(), G5);
230 ce->add_call_info_here(_info);
231 ce->verify_oop_map(_info);
232 __ br(Assembler::always, true, Assembler::pt, _continuation);
233 __ delayed()->nop();
234 }
237 void MonitorExitStub::emit_code(LIR_Assembler* ce) {
238 __ bind(_entry);
239 if (_compute_lock) {
240 ce->monitor_address(_monitor_ix, _lock_reg);
241 }
242 if (ce->compilation()->has_fpu_code()) {
243 __ call(Runtime1::entry_for(Runtime1::monitorexit_id), relocInfo::runtime_call_type);
244 } else {
245 __ call(Runtime1::entry_for(Runtime1::monitorexit_nofpu_id), relocInfo::runtime_call_type);
246 }
248 __ delayed()->mov_or_nop(_lock_reg->as_register(), G4);
249 __ br(Assembler::always, true, Assembler::pt, _continuation);
250 __ delayed()->nop();
251 }
253 // Implementation of patching:
254 // - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes)
255 // - Replace original code with a call to the stub
256 // At Runtime:
257 // - call to stub, jump to runtime
258 // - in runtime: preserve all registers (especially objects, i.e., source and destination object)
259 // - in runtime: after initializing class, restore original code, reexecute instruction
261 int PatchingStub::_patch_info_offset = -NativeGeneralJump::instruction_size;
263 void PatchingStub::align_patch_site(MacroAssembler* ) {
264 // patch sites on sparc are always properly aligned.
265 }
267 void PatchingStub::emit_code(LIR_Assembler* ce) {
268 // copy original code here
269 assert(NativeCall::instruction_size <= _bytes_to_copy && _bytes_to_copy <= 0xFF,
270 "not enough room for call");
271 assert((_bytes_to_copy & 0x3) == 0, "must copy a multiple of four bytes");
273 Label call_patch;
275 int being_initialized_entry = __ offset();
277 if (_id == load_klass_id) {
278 // produce a copy of the load klass instruction for use by the being initialized case
279 #ifdef ASSERT
280 address start = __ pc();
281 #endif
282 AddressLiteral addrlit(NULL, oop_Relocation::spec(_oop_index));
283 __ patchable_set(addrlit, _obj);
285 #ifdef ASSERT
286 for (int i = 0; i < _bytes_to_copy; i++) {
287 address ptr = (address)(_pc_start + i);
288 int a_byte = (*ptr) & 0xFF;
289 assert(a_byte == *start++, "should be the same code");
290 }
291 #endif
292 } else {
293 // make a copy the code which is going to be patched.
294 for (int i = 0; i < _bytes_to_copy; i++) {
295 address ptr = (address)(_pc_start + i);
296 int a_byte = (*ptr) & 0xFF;
297 __ a_byte (a_byte);
298 }
299 }
301 address end_of_patch = __ pc();
302 int bytes_to_skip = 0;
303 if (_id == load_klass_id) {
304 int offset = __ offset();
305 if (CommentedAssembly) {
306 __ block_comment(" being_initialized check");
307 }
309 // static field accesses have special semantics while the class
310 // initializer is being run so we emit a test which can be used to
311 // check that this code is being executed by the initializing
312 // thread.
313 assert(_obj != noreg, "must be a valid register");
314 assert(_oop_index >= 0, "must have oop index");
315 __ ld_ptr(_obj, instanceKlass::init_thread_offset_in_bytes() + sizeof(klassOopDesc), G3);
316 __ cmp(G2_thread, G3);
317 __ br(Assembler::notEqual, false, Assembler::pn, call_patch);
318 __ delayed()->nop();
320 // load_klass patches may execute the patched code before it's
321 // copied back into place so we need to jump back into the main
322 // code of the nmethod to continue execution.
323 __ br(Assembler::always, false, Assembler::pt, _patch_site_continuation);
324 __ delayed()->nop();
326 // make sure this extra code gets skipped
327 bytes_to_skip += __ offset() - offset;
328 }
330 // Now emit the patch record telling the runtime how to find the
331 // pieces of the patch. We only need 3 bytes but it has to be
332 // aligned as an instruction so emit 4 bytes.
333 int sizeof_patch_record = 4;
334 bytes_to_skip += sizeof_patch_record;
336 // emit the offsets needed to find the code to patch
337 int being_initialized_entry_offset = __ offset() - being_initialized_entry + sizeof_patch_record;
339 // Emit the patch record. We need to emit a full word, so emit an extra empty byte
340 __ a_byte(0);
341 __ a_byte(being_initialized_entry_offset);
342 __ a_byte(bytes_to_skip);
343 __ a_byte(_bytes_to_copy);
344 address patch_info_pc = __ pc();
345 assert(patch_info_pc - end_of_patch == bytes_to_skip, "incorrect patch info");
347 address entry = __ pc();
348 NativeGeneralJump::insert_unconditional((address)_pc_start, entry);
349 address target = NULL;
350 switch (_id) {
351 case access_field_id: target = Runtime1::entry_for(Runtime1::access_field_patching_id); break;
352 case load_klass_id: target = Runtime1::entry_for(Runtime1::load_klass_patching_id); break;
353 default: ShouldNotReachHere();
354 }
355 __ bind(call_patch);
357 if (CommentedAssembly) {
358 __ block_comment("patch entry point");
359 }
360 __ call(target, relocInfo::runtime_call_type);
361 __ delayed()->nop();
362 assert(_patch_info_offset == (patch_info_pc - __ pc()), "must not change");
363 ce->add_call_info_here(_info);
364 __ br(Assembler::always, false, Assembler::pt, _patch_site_entry);
365 __ delayed()->nop();
366 if (_id == load_klass_id) {
367 CodeSection* cs = __ code_section();
368 address pc = (address)_pc_start;
369 RelocIterator iter(cs, pc, pc + 1);
370 relocInfo::change_reloc_info_for_address(&iter, (address) pc, relocInfo::oop_type, relocInfo::none);
372 pc = (address)(_pc_start + NativeMovConstReg::add_offset);
373 RelocIterator iter2(cs, pc, pc+1);
374 relocInfo::change_reloc_info_for_address(&iter2, (address) pc, relocInfo::oop_type, relocInfo::none);
375 }
377 }
380 void DeoptimizeStub::emit_code(LIR_Assembler* ce) {
381 __ bind(_entry);
382 __ call(SharedRuntime::deopt_blob()->unpack_with_reexecution());
383 __ delayed()->nop();
384 ce->add_call_info_here(_info);
385 debug_only(__ should_not_reach_here());
386 }
389 void ArrayCopyStub::emit_code(LIR_Assembler* ce) {
390 //---------------slow case: call to native-----------------
391 __ bind(_entry);
392 __ mov(src()->as_register(), O0);
393 __ mov(src_pos()->as_register(), O1);
394 __ mov(dst()->as_register(), O2);
395 __ mov(dst_pos()->as_register(), O3);
396 __ mov(length()->as_register(), O4);
398 ce->emit_static_call_stub();
400 __ call(SharedRuntime::get_resolve_static_call_stub(), relocInfo::static_call_type);
401 __ delayed()->nop();
402 ce->add_call_info_here(info());
403 ce->verify_oop_map(info());
405 #ifndef PRODUCT
406 __ set((intptr_t)&Runtime1::_arraycopy_slowcase_cnt, O0);
407 __ ld(O0, 0, O1);
408 __ inc(O1);
409 __ st(O1, 0, O0);
410 #endif
412 __ br(Assembler::always, false, Assembler::pt, _continuation);
413 __ delayed()->nop();
414 }
417 ///////////////////////////////////////////////////////////////////////////////////
418 #ifndef SERIALGC
420 void G1PreBarrierStub::emit_code(LIR_Assembler* ce) {
421 __ bind(_entry);
423 assert(pre_val()->is_register(), "Precondition.");
425 Register pre_val_reg = pre_val()->as_register();
427 ce->mem2reg(addr(), pre_val(), T_OBJECT, patch_code(), info(), false);
428 __ br_on_reg_cond(Assembler::rc_z, /*annul*/false, Assembler::pt,
429 pre_val_reg, _continuation);
430 __ delayed()->nop();
432 __ call(Runtime1::entry_for(Runtime1::Runtime1::g1_pre_barrier_slow_id));
433 __ delayed()->mov(pre_val_reg, G4);
434 __ br(Assembler::always, false, Assembler::pt, _continuation);
435 __ delayed()->nop();
437 }
439 jbyte* G1PostBarrierStub::_byte_map_base = NULL;
441 jbyte* G1PostBarrierStub::byte_map_base_slow() {
442 BarrierSet* bs = Universe::heap()->barrier_set();
443 assert(bs->is_a(BarrierSet::G1SATBCTLogging),
444 "Must be if we're using this.");
445 return ((G1SATBCardTableModRefBS*)bs)->byte_map_base;
446 }
448 void G1PostBarrierStub::emit_code(LIR_Assembler* ce) {
449 __ bind(_entry);
451 assert(addr()->is_register(), "Precondition.");
452 assert(new_val()->is_register(), "Precondition.");
453 Register addr_reg = addr()->as_pointer_register();
454 Register new_val_reg = new_val()->as_register();
455 __ br_on_reg_cond(Assembler::rc_z, /*annul*/false, Assembler::pt,
456 new_val_reg, _continuation);
457 __ delayed()->nop();
459 __ call(Runtime1::entry_for(Runtime1::Runtime1::g1_post_barrier_slow_id));
460 __ delayed()->mov(addr_reg, G4);
461 __ br(Assembler::always, false, Assembler::pt, _continuation);
462 __ delayed()->nop();
463 }
465 #endif // SERIALGC
466 ///////////////////////////////////////////////////////////////////////////////////
468 #undef __