Tue, 04 Sep 2018 21:25:12 +0800
#7517 mRegP match a0_RegP
1 /*
2 * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2015, 2018, Loongson Technology. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
26 #include "precompiled.hpp"
27 #include "c1/c1_CodeStubs.hpp"
28 #include "c1/c1_FrameMap.hpp"
29 #include "c1/c1_LIRAssembler.hpp"
30 #include "c1/c1_MacroAssembler.hpp"
31 #include "c1/c1_Runtime1.hpp"
32 #include "nativeInst_mips.hpp"
33 #include "runtime/sharedRuntime.hpp"
34 #include "utilities/macros.hpp"
35 #include "vmreg_mips.inline.hpp"
36 #if INCLUDE_ALL_GCS
37 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
38 #endif
41 #define __ ce->masm()->
43 float ConversionStub::float_zero = 0.0;
44 double ConversionStub::double_zero = 0.0;
46 void ConversionStub::emit_code(LIR_Assembler* ce) {
47 __ bind(_entry);
48 assert(bytecode() == Bytecodes::_f2i || bytecode() == Bytecodes::_d2i, "other conversions do not require stub");
49 }
51 #ifdef TIERED
52 void CounterOverflowStub::emit_code(LIR_Assembler* ce) {
53 __ bind(_entry);
54 ce->store_parameter(_method->as_register(), 1);
55 ce->store_parameter(_bci, 0);
56 __ call(Runtime1::entry_for(Runtime1::counter_overflow_id), relocInfo::runtime_call_type);
57 __ delayed()->nop();
58 ce->add_call_info_here(_info);
59 ce->verify_oop_map(_info);
61 __ b_far(_continuation);
62 __ delayed()->nop();
63 }
64 #endif // TIERED
66 RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index,
67 bool throw_index_out_of_bounds_exception)
68 : _throw_index_out_of_bounds_exception(throw_index_out_of_bounds_exception)
69 , _index(index)
70 {
71 _info = info == NULL ? NULL : new CodeEmitInfo(info);
72 }
75 void RangeCheckStub::emit_code(LIR_Assembler* ce) {
76 __ bind(_entry);
77 if (_info->deoptimize_on_exception()) {
78 address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
79 __ call(a, relocInfo::runtime_call_type);
80 __ delayed()->nop();
81 ce->add_call_info_here(_info);
82 ce->verify_oop_map(_info);
83 debug_only(__ should_not_reach_here());
84 return;
85 }
87 // pass the array index on stack because all registers must be preserved
88 if (_index->is_cpu_register()) {
89 ce->store_parameter(_index->as_register(), 0);
90 } else {
91 ce->store_parameter(_index->as_jint(), 0);
92 }
93 Runtime1::StubID stub_id;
94 if (_throw_index_out_of_bounds_exception) {
95 stub_id = Runtime1::throw_index_exception_id;
96 } else {
97 stub_id = Runtime1::throw_range_check_failed_id;
98 }
99 __ call(Runtime1::entry_for(stub_id), relocInfo::runtime_call_type);
100 __ delayed()->nop();
101 ce->add_call_info_here(_info);
102 ce->verify_oop_map(_info);
103 debug_only(__ should_not_reach_here());
104 }
106 PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) {
107 _info = new CodeEmitInfo(info);
108 }
110 void PredicateFailedStub::emit_code(LIR_Assembler* ce) {
111 __ bind(_entry);
112 address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
113 __ call(a, relocInfo::runtime_call_type);
114 __ delayed()->nop();
115 ce->add_call_info_here(_info);
116 ce->verify_oop_map(_info);
117 debug_only(__ should_not_reach_here());
118 }
120 void DivByZeroStub::emit_code(LIR_Assembler* ce) {
121 if (_offset != -1) {
122 ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
123 }
124 __ bind(_entry);
125 __ call(Runtime1::entry_for(Runtime1::throw_div0_exception_id), relocInfo::runtime_call_type);
126 __ delayed()->nop();
127 ce->add_call_info_here(_info);
128 debug_only(__ should_not_reach_here());
129 }
132 // Implementation of NewInstanceStub
134 NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, Runtime1::StubID stub_id) {
135 _result = result;
136 _klass = klass;
137 _klass_reg = klass_reg;
138 _info = new CodeEmitInfo(info);
139 assert(stub_id == Runtime1::new_instance_id ||
140 stub_id == Runtime1::fast_new_instance_id ||
141 stub_id == Runtime1::fast_new_instance_init_check_id,
142 "need new_instance id");
143 _stub_id = stub_id;
144 }
146 // i use T4 as klass register, V0 as result register. MUST accord with Runtime1::generate_code_for.
147 void NewInstanceStub::emit_code(LIR_Assembler* ce) {
148 assert(__ sp_offset() == 0, "frame size should be fixed");
149 __ bind(_entry);
150 #ifndef _LP64
151 assert(_klass_reg->as_register() == T4, "klass_reg must in T4");
152 #else
153 assert(_klass_reg->as_register() == A4, "klass_reg must in A4");
154 #endif
155 __ call(Runtime1::entry_for(_stub_id), relocInfo::runtime_call_type);
156 __ delayed()->nop();
157 ce->add_call_info_here(_info);
158 ce->verify_oop_map(_info);
159 assert(_result->as_register() == V0, "result must in V0,");
160 __ b_far(_continuation);
161 __ delayed()->nop();
162 }
165 // Implementation of NewTypeArrayStub
167 NewTypeArrayStub::NewTypeArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
168 _klass_reg = klass_reg;
169 _length = length;
170 _result = result;
171 _info = new CodeEmitInfo(info);
172 }
174 // i use T2 as length register, T4 as klass register, V0 as result register.
175 // MUST accord with Runtime1::generate_code_for
176 void NewTypeArrayStub::emit_code(LIR_Assembler* ce) {
177 assert(__ sp_offset() == 0, "frame size should be fixed");
178 __ bind(_entry);
179 assert(_length->as_register() == T2, "length must in T2,");
180 #ifndef _LP64
181 assert(_klass_reg->as_register() == T4, "klass_reg must in T4");
182 #else
183 assert(_klass_reg->as_register() == A4, "klass_reg must in A4");
184 #endif
186 __ call(Runtime1::entry_for(Runtime1::new_type_array_id), relocInfo::runtime_call_type);
187 __ delayed()->nop();
188 ce->add_call_info_here(_info);
189 ce->verify_oop_map(_info);
191 assert(_result->as_register() == V0, "result must in V0,");
192 __ b_far(_continuation);
193 __ delayed()->nop();
194 }
197 // Implementation of NewObjectArrayStub
199 NewObjectArrayStub::NewObjectArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
200 _klass_reg = klass_reg;
201 _result = result;
202 _length = length;
203 _info = new CodeEmitInfo(info);
204 }
207 void NewObjectArrayStub::emit_code(LIR_Assembler* ce) {
208 assert(__ sp_offset() == 0, "frame size should be fixed");
209 __ bind(_entry);
210 assert(_length->as_register() == T2, "length must in T2");
211 #ifndef _LP64
212 assert(_klass_reg->as_register() == T4, "klass_reg must in T4");
213 #else
214 assert(_klass_reg->as_register() == A4, "klass_reg must in A4");
215 #endif
216 __ call(Runtime1::entry_for(Runtime1::new_object_array_id), relocInfo::runtime_call_type);
217 __ delayed()->nop();
218 ce->add_call_info_here(_info);
219 ce->verify_oop_map(_info);
220 assert(_result->as_register() == V0, "result must in V0");
221 __ b_far(_continuation);
222 __ delayed()->nop();
223 }
226 // Implementation of MonitorAccessStubs
228 MonitorEnterStub::MonitorEnterStub(LIR_Opr obj_reg, LIR_Opr lock_reg, CodeEmitInfo* info)
229 : MonitorAccessStub(obj_reg, lock_reg)
230 {
231 _info = new CodeEmitInfo(info);
232 }
235 void MonitorEnterStub::emit_code(LIR_Assembler* ce) {
236 assert(__ sp_offset() == 0, "frame size should be fixed");
237 __ bind(_entry);
238 ce->store_parameter(_obj_reg->as_register(), 1);
239 ce->store_parameter(_lock_reg->is_single_cpu()? _lock_reg->as_register() : _lock_reg->as_register_lo(), 0);
240 Runtime1::StubID enter_id;
241 if (ce->compilation()->has_fpu_code()) {
242 enter_id = Runtime1::monitorenter_id;
243 } else {
244 enter_id = Runtime1::monitorenter_nofpu_id;
245 }
246 __ call(Runtime1::entry_for(enter_id), relocInfo::runtime_call_type);
247 __ delayed()->nop();
248 ce->add_call_info_here(_info);
249 ce->verify_oop_map(_info);
250 __ b_far(_continuation);
251 __ delayed()->nop();
252 }
255 void MonitorExitStub::emit_code(LIR_Assembler* ce) {
256 __ bind(_entry);
257 if (_compute_lock) {
258 // lock_reg was destroyed by fast unlocking attempt => recompute it
259 ce->monitor_address(_monitor_ix, _lock_reg);
260 }
261 ce->store_parameter(_lock_reg->as_register(), 0);
262 // note: non-blocking leaf routine => no call info needed
263 Runtime1::StubID exit_id;
264 if (ce->compilation()->has_fpu_code()) {
265 exit_id = Runtime1::monitorexit_id;
266 } else {
267 exit_id = Runtime1::monitorexit_nofpu_id;
268 }
269 __ call(Runtime1::entry_for(exit_id), relocInfo::runtime_call_type);
270 __ delayed()->nop();
272 __ b_far(_continuation);
273 __ delayed()->nop();
274 }
277 // Implementation of patching:
278 // - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes)
279 // - Replace original code with a call to the stub
280 // At Runtime:
281 // - call to stub, jump to runtime
282 // - in runtime: preserve all registers (especially objects, i.e., source and destination object)
283 // - in runtime: after initializing class, restore original code, reexecute instruction
285 //int PatchingStub::_patch_info_offset = -NativeGeneralJump::instruction_size;
286 int PatchingStub::_patch_info_offset = -NativeCall::instruction_size;
288 void PatchingStub::align_patch_site(MacroAssembler* masm) {
289 masm->align(round_to(NativeGeneralJump::instruction_size, wordSize));
290 }
292 void PatchingStub::emit_code(LIR_Assembler* ce) {
293 assert(NativeCall::instruction_size <= _bytes_to_copy && _bytes_to_copy <= 0xFF, "not enough room for call");
294 assert(_bytes_to_copy <= 0xFF, "not enough room for call");
296 Label call_patch;
298 // static field accesses have special semantics while the class
299 // initializer is being run so we emit a test which can be used to
300 // check that this code is being executed by the initializing
301 // thread.
302 address being_initialized_entry = __ pc();
303 if (CommentedAssembly) {
304 __ block_comment(" patch template");
305 }
306 if (_id == load_klass_id) {
307 // produce a copy of the load klass instruction for use by the being initialized case
308 //#ifdef ASSERT
309 address start = __ pc();
310 //#endif
311 Metadata* o = NULL;
312 RelocationHolder rspec = metadata_Relocation::spec(_index);
313 __ relocate(rspec);
314 __ li48(_obj, (long)o);
315 while ((intx)__ pc() - (intx)start < NativeCall::instruction_size) {
316 __ nop();
317 }
318 #ifdef ASSERT
319 for (int i = 0; i < _bytes_to_copy; i++) {
320 address ptr = (address)(_pc_start + i);
321 int a_byte = (*ptr) & 0xFF;
322 assert(a_byte == *start++, "should be the same code");
323 }
324 #endif
325 } else if (_id == load_mirror_id || _id == load_appendix_id) {
326 //#ifdef ASSERT
327 address start = __ pc();
328 //#endif
329 jobject o = NULL;
330 RelocationHolder rspec = oop_Relocation::spec(_index);
331 __ relocate(rspec);
332 __ li48(_obj, (long)o);
333 while ((intx)__ pc() - (intx)start < NativeCall::instruction_size) {
334 __ nop();
335 }
336 #ifdef ASSERT
337 for (int i = 0; i < _bytes_to_copy; i++) {
338 address ptr = (address)(_pc_start + i);
339 int a_byte = (*ptr) & 0xFF;
340 assert(a_byte == *start++, "should be the same code");
341 }
342 #endif
343 } else {
344 // make a copy the code which is going to be patched.
345 assert((_bytes_to_copy&3)==0, "change this code");
346 address start = __ pc();
347 for ( int i = 0; i < _bytes_to_copy; i+=4) {
348 __ emit_int32(*(int*)(_pc_start + i));
349 //make the site look like a nop
350 *(int*)(_pc_start + i)=0;
351 }
352 while ((intx)__ pc() - (intx)start < NativeCall::instruction_size) {
353 __ nop();
354 }
355 }
357 address end_of_patch = __ pc();
358 int bytes_to_skip = 0;
359 if (_id == load_mirror_id) {
360 int offset = __ offset();
361 if (CommentedAssembly) {
362 __ block_comment(" being_initialized check");
363 }
364 assert(_obj != NOREG, "must be a valid register");
365 #ifndef OPT_THREAD
366 Register tmp = AT;
367 __ get_thread(tmp);
368 #else
369 Register tmp = TREG;
370 #endif
371 Register tmp2 = T9;
372 __ ld_ptr(tmp2, Address(_obj, java_lang_Class::klass_offset_in_bytes()));
373 __ ld_ptr(tmp2, Address(tmp2, InstanceKlass::init_thread_offset()));
374 __ bne(tmp, tmp2, call_patch);
375 __ delayed()->nop();
377 // access_field patches may execute the patched code before it's
378 // copied back into place so we need to jump back into the main
379 // code of the nmethod to continue execution.
380 __ b_far(_patch_site_continuation);
381 __ delayed()->nop();
382 bytes_to_skip += __ offset() - offset;
384 }
386 if (CommentedAssembly) {
387 __ block_comment("patch data");
388 }
389 // Now emit the patch record telling the runtime how to find the
390 // pieces of the patch. We only need 3 bytes but for alignment, we
391 // need 4 bytes
392 int sizeof_patch_record = 4;
393 bytes_to_skip += sizeof_patch_record;
395 // emit the offsets needed to find the code to patch
396 int being_initialized_entry_offset = __ pc() - being_initialized_entry + sizeof_patch_record;
398 // patch_info_pc offset | size of b instruction(8)| patched code size
399 assert(being_initialized_entry_offset % 4 == 0, "must be divided by four");
400 assert(bytes_to_skip % 4 == 0, "must be divided by four");
401 // Fix out of range of char. refer to load_klass_or_mirror_patch_id.
402 being_initialized_entry_offset >>= 2;
403 bytes_to_skip >>= 2;
404 assert((char)being_initialized_entry_offset == being_initialized_entry_offset, "just check");
405 assert((char)bytes_to_skip == bytes_to_skip, "just check");
406 assert((char)_bytes_to_copy == _bytes_to_copy, "just check");
407 __ emit_int32(being_initialized_entry_offset<<8 | (bytes_to_skip<<16) | (_bytes_to_copy<<24) );
409 address patch_info_pc = __ pc();
410 // FIXME: byte_skip can not be contained in a byte
411 bytes_to_skip <<= 2;
412 assert(patch_info_pc - end_of_patch == bytes_to_skip, "incorrect patch info");
414 address entry = __ pc();
415 NativeGeneralJump::insert_unconditional((address)_pc_start, entry);
416 address target = NULL;
417 relocInfo::relocType reloc_type = relocInfo::none;
418 switch (_id) {
419 case access_field_id: target = Runtime1::entry_for(Runtime1::access_field_patching_id); break;
420 case load_klass_id: target = Runtime1::entry_for(Runtime1::load_klass_patching_id); reloc_type = relocInfo::metadata_type; break;
421 case load_mirror_id: target = Runtime1::entry_for(Runtime1::load_mirror_patching_id); reloc_type = relocInfo::oop_type; break;
422 case load_appendix_id: target = Runtime1::entry_for(Runtime1::load_appendix_patching_id); reloc_type = relocInfo::oop_type; break;
423 default: ShouldNotReachHere();
424 }
425 __ bind(call_patch);
428 if (CommentedAssembly) {
429 __ block_comment("patch entry point");
430 }
431 __ relocate(relocInfo::runtime_call_type);
432 __ li48(T9, (long)target);
433 __ jalr(T9);
434 __ delayed()->nop();
435 assert(_patch_info_offset == (patch_info_pc - __ pc()), "must not change");
436 ce->add_call_info_here(_info);
437 int jmp_off = __ offset();
438 __ b_far(_patch_site_entry);
439 __ delayed()->nop();
440 // Add enough nops so deoptimization can overwrite the jmp above with a call
441 // and not destroy the world.
442 for (int j = __ offset(); j < jmp_off + NativeCall::instruction_size; j += 4 ) {
443 __ nop();
444 }
445 if (_id == load_klass_id || _id == load_mirror_id || _id == load_appendix_id) {
446 CodeSection* cs = __ code_section();
447 address pc = (address)_pc_start;
448 RelocIterator iter(cs, pc, pc + 1);
449 relocInfo::change_reloc_info_for_address(&iter, pc, reloc_type, relocInfo::none);
450 }
451 }
454 void DeoptimizeStub::emit_code(LIR_Assembler* ce) {
455 __ bind(_entry);
456 __ call(Runtime1::entry_for(Runtime1::deoptimize_id), relocInfo::runtime_call_type);
457 __ delayed()->nop();
458 ce->add_call_info_here(_info);
459 DEBUG_ONLY(__ should_not_reach_here());
460 }
463 void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) {
464 ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
465 __ bind(_entry);
466 __ call(Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id), relocInfo::runtime_call_type);
467 __ delayed()->nop();
468 ce->add_call_info_here(_info);
469 debug_only(__ should_not_reach_here());
470 }
473 // i dont know which register to use here, i just assume A1 here. FIXME
474 void SimpleExceptionStub::emit_code(LIR_Assembler* ce) {
475 assert(__ sp_offset() == 0, "frame size should be fixed");
477 __ bind(_entry);
478 // pass the object on stack because all registers must be preserved
479 if (_obj->is_cpu_register()) {
480 ce->store_parameter(_obj->as_register(), 0);
481 }
482 __ call(Runtime1::entry_for(_stub), relocInfo::runtime_call_type);
483 __ delayed()->nop();
484 ce->add_call_info_here(_info);
485 debug_only(__ should_not_reach_here());
486 }
489 void ArrayCopyStub::emit_code(LIR_Assembler* ce) {
490 //---------------slow case: call to native-----------------
491 __ bind(_entry);
492 // Figure out where the args should go
493 // This should really convert the IntrinsicID to the methodOop and signature
494 // but I don't know how to do that.
495 //
496 VMRegPair args[5];
497 BasicType signature[5] = { T_OBJECT, T_INT, T_OBJECT, T_INT, T_INT};
498 SharedRuntime::java_calling_convention(signature, args, 5, true);
500 // push parameters
501 // (src, src_pos, dest, destPos, length)
502 Register r[5];
503 r[0] = src()->as_register();
504 r[1] = src_pos()->as_register();
505 r[2] = dst()->as_register();
506 r[3] = dst_pos()->as_register();
507 r[4] = length()->as_register();
509 // next registers will get stored on the stack
510 for (int i = 0; i < 5 ; i++ ) {
511 VMReg r_1 = args[i].first();
512 if (r_1->is_stack()) {
513 int st_off = r_1->reg2stack() * wordSize;
514 __ sw( r[i], SP, st_off);
515 } else {
516 assert(r[i] == args[i].first()->as_Register(), "Wrong register for arg ");
517 }
518 }
520 ce->align_call(lir_static_call);
522 ce->emit_static_call_stub();
523 __ call(SharedRuntime::get_resolve_static_call_stub(), relocInfo::static_call_type);
524 __ delayed()->nop();
525 ce->add_call_info_here(info());
527 #ifndef PRODUCT
528 #ifndef _LP64
529 __ lui(T8, Assembler::split_high((int)&Runtime1::_arraycopy_slowcase_cnt));
530 __ lw(AT, T8, Assembler::split_low((int)&Runtime1::_arraycopy_slowcase_cnt));
531 __ addiu(AT, AT, 1);
532 __ sw(AT, T8, Assembler::split_low((int)&Runtime1::_arraycopy_slowcase_cnt));
533 #else
534 __ li(T8, (long)&Runtime1::_arraycopy_slowcase_cnt);
535 __ lw(AT, T8, 0);
536 __ daddiu(AT, AT, 1);
537 __ sw(AT, T8, 0);
538 #endif
539 #endif
541 __ b_far(_continuation);
542 __ delayed()->nop();
543 }
545 /////////////////////////////////////////////////////////////////////////////
546 #if INCLUDE_ALL_GCS
548 void G1PreBarrierStub::emit_code(LIR_Assembler* ce) {
549 Unimplemented();
550 }
552 void G1PostBarrierStub::emit_code(LIR_Assembler* ce) {
553 Unimplemented();
554 }
556 #endif // INCLUDE_ALL_GCS
557 /////////////////////////////////////////////////////////////////////////////
559 #undef __