Thu, 24 May 2018 19:49:50 +0800
some C1 fix
Contributed-by: chenhaoxuan, zhaixiang, aoqi
1 /*
2 * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2015, 2016, Loongson Technology. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
26 #include "precompiled.hpp"
27 #include "c1/c1_CodeStubs.hpp"
28 #include "c1/c1_FrameMap.hpp"
29 #include "c1/c1_LIRAssembler.hpp"
30 #include "c1/c1_MacroAssembler.hpp"
31 #include "c1/c1_Runtime1.hpp"
32 #include "nativeInst_mips.hpp"
33 #include "runtime/sharedRuntime.hpp"
34 #include "utilities/macros.hpp"
35 #include "vmreg_mips.inline.hpp"
36 #if INCLUDE_ALL_GCS
37 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
38 #endif
41 #define __ ce->masm()->
43 float ConversionStub::float_zero = 0.0;
44 double ConversionStub::double_zero = 0.0;
46 void ConversionStub::emit_code(LIR_Assembler* ce) {
47 __ bind(_entry);
48 assert(bytecode() == Bytecodes::_f2i || bytecode() == Bytecodes::_d2i, "other conversions do not require stub");
49 }
51 #ifdef TIERED
52 void CounterOverflowStub::emit_code(LIR_Assembler* ce) {
53 __ bind(_entry);
54 ce->store_parameter(_method->as_register(), 1);
55 ce->store_parameter(_bci, 0);
56 __ call(Runtime1::entry_for(Runtime1::counter_overflow_id), relocInfo::runtime_call_type);
57 __ delayed()->nop();
58 ce->add_call_info_here(_info);
59 ce->verify_oop_map(_info);
61 __ b_far(_continuation);
62 __ delayed()->nop();
63 }
64 #endif // TIERED
66 RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index,
67 bool throw_index_out_of_bounds_exception)
68 : _throw_index_out_of_bounds_exception(throw_index_out_of_bounds_exception)
69 , _index(index)
70 {
71 _info = info == NULL ? NULL : new CodeEmitInfo(info);
72 }
75 void RangeCheckStub::emit_code(LIR_Assembler* ce) {
76 #ifdef OPT_RANGECHECK
77 if (_throw_pc != -1) {
78 ce->compilation()->null_check_table()->append(_throw_pc, __ offset());
79 }
80 #endif
81 __ bind(_entry);
82 //// Pass the array index in eax since the runtime stub will add register state to the stack
83 // pass the array index on stack because all registers must be preserved
84 if (_index->is_cpu_register()) {
85 ce->store_parameter(_index->as_register(), 0);
86 } else {
87 ce->store_parameter(_index->as_jint(), 0);
88 }
90 if (_throw_index_out_of_bounds_exception) {
91 __ call(Runtime1::entry_for(Runtime1::throw_index_exception_id), relocInfo::runtime_call_type);
92 } else {
93 __ call(Runtime1::entry_for(Runtime1::throw_range_check_failed_id), relocInfo::runtime_call_type);
94 }
95 __ delayed()->nop();
96 ce->add_call_info_here(_info);
97 debug_only(__ should_not_reach_here());
98 }
100 PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) { // Fu:20130814
101 _info = new CodeEmitInfo(info);
102 }
104 void PredicateFailedStub::emit_code(LIR_Assembler* ce) { // Fu:20130814
105 tty->print_cr("PredicateFailedStub::emit_code unimplemented yet!");
106 Unimplemented();
107 }
109 void DivByZeroStub::emit_code(LIR_Assembler* ce) {
110 if (_offset != -1) {
111 // ce->compilation()->null_check_table()->append(_offset, __ offset());
112 ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
113 }
114 __ bind(_entry);
115 __ call(Runtime1::entry_for(Runtime1::throw_div0_exception_id), relocInfo::runtime_call_type);
116 __ delayed()->nop();
117 ce->add_call_info_here(_info);
118 debug_only(__ should_not_reach_here());
119 }
122 // Implementation of NewInstanceStub
124 NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, Runtime1::StubID stub_id) {
125 _result = result;
126 _klass = klass;
127 _klass_reg = klass_reg;
128 _info = new CodeEmitInfo(info);
129 assert(stub_id == Runtime1::new_instance_id ||
130 stub_id == Runtime1::fast_new_instance_id ||
131 stub_id == Runtime1::fast_new_instance_init_check_id,
132 "need new_instance id");
133 _stub_id = stub_id;
134 }
136 // i use T4 as klass register, V0 as result register. MUST accord with Runtime1::generate_code_for.
137 void NewInstanceStub::emit_code(LIR_Assembler* ce) {
138 assert(__ sp_offset() == 0, "frame size should be fixed");
139 __ bind(_entry);
140 #ifndef _LP64
141 assert(_klass_reg->as_register() == T4, "klass_reg must in T4");
142 #else
143 //FIXME. in A4? aoqi
144 assert(_klass_reg->as_register() == A4, "klass_reg must in A4");
145 #endif
146 __ call(Runtime1::entry_for(_stub_id), relocInfo::runtime_call_type);
147 __ delayed()->nop();
148 ce->add_call_info_here(_info);
149 ce->verify_oop_map(_info);
150 assert(_result->as_register() == V0, "result must in V0,");
151 __ b_far(_continuation);
152 __ delayed()->nop();
153 }
156 // Implementation of NewTypeArrayStub
158 NewTypeArrayStub::NewTypeArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
159 _klass_reg = klass_reg;
160 _length = length;
161 _result = result;
162 _info = new CodeEmitInfo(info);
163 }
165 // i use T2 as length register, T4 as klass register, V0 as result register.
166 // MUST accord with Runtime1::generate_code_for
167 void NewTypeArrayStub::emit_code(LIR_Assembler* ce) {
168 assert(__ sp_offset() == 0, "frame size should be fixed");
169 __ bind(_entry);
170 assert(_length->as_register() == T2, "length must in T2,");
171 #ifndef _LP64
172 assert(_klass_reg->as_register() == T4, "klass_reg must in T4");
173 #else
174 //FIXME. in A4? aoqi
175 assert(_klass_reg->as_register() == A4, "klass_reg must in A4");
176 #endif
178 __ call(Runtime1::entry_for(Runtime1::new_type_array_id), relocInfo::runtime_call_type);
179 __ delayed()->nop();
180 ce->add_call_info_here(_info);
181 ce->verify_oop_map(_info);
183 assert(_result->as_register() == V0, "result must in V0,");
184 __ b_far(_continuation);
185 __ delayed()->nop();
186 }
189 // Implementation of NewObjectArrayStub
191 NewObjectArrayStub::NewObjectArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
192 _klass_reg = klass_reg;
193 _result = result;
194 _length = length;
195 _info = new CodeEmitInfo(info);
196 }
199 void NewObjectArrayStub::emit_code(LIR_Assembler* ce) {
200 assert(__ sp_offset() == 0, "frame size should be fixed");
201 __ bind(_entry);
202 assert(_length->as_register() == T2, "length must in ebx");
203 #ifndef _LP64
204 assert(_klass_reg->as_register() == T4, "klass_reg must in T4");
205 #else
206 assert(_klass_reg->as_register() == A4, "klass_reg must in A4");
207 #endif
208 __ call(Runtime1::entry_for(Runtime1::new_object_array_id), relocInfo::runtime_call_type);
209 __ delayed()->nop();
210 ce->add_call_info_here(_info);
211 ce->verify_oop_map(_info);
212 assert(_result->as_register() == V0, "result must in eax");
213 __ b_far(_continuation);
214 __ delayed()->nop();
215 }
218 // Implementation of MonitorAccessStubs
220 MonitorEnterStub::MonitorEnterStub(LIR_Opr obj_reg, LIR_Opr lock_reg, CodeEmitInfo* info)
221 : MonitorAccessStub(obj_reg, lock_reg)
222 {
223 _info = new CodeEmitInfo(info);
224 }
227 void MonitorEnterStub::emit_code(LIR_Assembler* ce) {
228 assert(__ sp_offset() == 0, "frame size should be fixed");
229 __ bind(_entry);
230 ce->store_parameter(_obj_reg->as_register(), 1);
231 ce->store_parameter(_lock_reg->is_single_cpu()? _lock_reg->as_register() : _lock_reg->as_register_lo(), 0);
232 Runtime1::StubID enter_id;
233 if (ce->compilation()->has_fpu_code()) {
234 enter_id = Runtime1::monitorenter_id;
235 } else {
236 enter_id = Runtime1::monitorenter_nofpu_id;
237 }
238 //__ call(RuntimeAddress(Runtime1::entry_for(enter_id)));
239 __ call(Runtime1::entry_for(enter_id), relocInfo::runtime_call_type);
240 __ delayed()->nop();
241 ce->add_call_info_here(_info);
242 ce->verify_oop_map(_info);
243 __ b_far(_continuation);
244 __ delayed()->nop();
245 }
248 void MonitorExitStub::emit_code(LIR_Assembler* ce) {
249 __ bind(_entry);
250 if (_compute_lock) {
251 // lock_reg was destroyed by fast unlocking attempt => recompute it
252 ce->monitor_address(_monitor_ix, _lock_reg);
253 }
254 ce->store_parameter(_lock_reg->as_register(), 0);
255 // note: non-blocking leaf routine => no call info needed
256 Runtime1::StubID exit_id;
257 if (ce->compilation()->has_fpu_code()) {
258 exit_id = Runtime1::monitorexit_id;
259 } else {
260 exit_id = Runtime1::monitorexit_nofpu_id;
261 }
262 //__ call(RuntimeAddress(Runtime1::entry_for(exit_id)));
263 __ call(Runtime1::entry_for(exit_id), relocInfo::runtime_call_type);
264 __ delayed()->nop();
266 __ b_far(_continuation);
267 __ delayed()->nop();
268 }
271 // Implementation of patching:
272 // - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes)
273 // - Replace original code with a call to the stub
274 // At Runtime:
275 // - call to stub, jump to runtime
276 // - in runtime: preserve all registers (especially objects, i.e., source and destination object)
277 // - in runtime: after initializing class, restore original code, reexecute instruction
279 //int PatchingStub::_patch_info_offset = -NativeGeneralJump::instruction_size;
280 int PatchingStub::_patch_info_offset = -NativeCall::instruction_size;
282 void PatchingStub::align_patch_site(MacroAssembler* masm) {
283 // We're patching a 5-7 byte instruction on intel and we need to
284 // make sure that we don't see a piece of the instruction. It
285 // appears mostly impossible on Intel to simply invalidate other
286 // processors caches and since they may do aggressive prefetch it's
287 // very hard to make a guess about what code might be in the icache.
288 // Force the instruction to be double word aligned so that it
289 // doesn't span a cache line.
291 // the NativeJump is not finished, i am not sure what to do here. FIXME
292 // masm->align(round_to(NativeGeneralJump::instruction_size, wordSize));
293 //tty->print_cr("align_patch_site has not finished yet!!!");
294 }
296 void PatchingStub::emit_code(LIR_Assembler* ce) {
297 assert(NativeCall::instruction_size <= _bytes_to_copy && _bytes_to_copy <= 0xFF, "not enough room for call");
298 assert(_bytes_to_copy <= 0xFF, "not enough room for call");
300 Label call_patch;
302 // static field accesses have special semantics while the class
303 // initializer is being run so we emit a test which can be used to
304 // check that this code is being executed by the initializing
305 // thread.
306 address being_initialized_entry = __ pc();
307 if (CommentedAssembly) {
308 __ block_comment(" patch template");
309 }
310 if (_id == load_klass_id) {
311 // produce a copy of the load klass instruction for use by the being initialized case
312 //#ifdef ASSERT
313 address start = __ pc();
314 //#endif
315 Metadata* o = NULL;
316 RelocationHolder rspec = metadata_Relocation::spec(_index);
317 __ relocate(rspec);
318 __ li48(_obj, (long)o);
319 while ((intx)__ pc() - (intx)start < NativeCall::instruction_size) {
320 __ nop();
321 }
322 #ifdef ASSERT
323 for (int i = 0; i < _bytes_to_copy; i++) {
324 address ptr = (address)(_pc_start + i);
325 int a_byte = (*ptr) & 0xFF;
326 assert(a_byte == *start++, "should be the same code");
327 }
328 #endif
329 } else if (_id == load_mirror_id || _id == load_appendix_id) {
330 //#ifdef ASSERT
331 address start = __ pc();
332 //#endif
333 jobject o = NULL;
334 RelocationHolder rspec = oop_Relocation::spec(_index);
335 __ relocate(rspec);
336 __ li48(_obj, (long)o);
337 while ((intx)__ pc() - (intx)start < NativeCall::instruction_size) {
338 __ nop();
339 }
340 #ifdef ASSERT
341 for (int i = 0; i < _bytes_to_copy; i++) {
342 address ptr = (address)(_pc_start + i);
343 int a_byte = (*ptr) & 0xFF;
344 assert(a_byte == *start++, "should be the same code");
345 }
346 #endif
347 } else {
348 // make a copy the code which is going to be patched.
349 assert((_bytes_to_copy&3)==0, "change this code");
350 address start = __ pc();
351 for ( int i = 0; i < _bytes_to_copy; i+=4) {
352 __ emit_int32(*(int*)(_pc_start + i));
353 //make the site look like a nop, @jerome
354 *(int*)(_pc_start + i)=0;
355 }
356 while ((intx)__ pc() - (intx)start < NativeCall::instruction_size) {
357 __ nop();
358 }
359 }
361 address end_of_patch = __ pc();
362 int bytes_to_skip = 0;
363 if (_id == load_mirror_id) {
364 int offset = __ offset();
365 if (CommentedAssembly) {
366 __ block_comment(" being_initialized check");
367 }
368 assert(_obj != NOREG, "must be a valid register");
369 Register tmp = AT;
370 Register tmp2 = T9;
371 __ ld_ptr(tmp2, Address(_obj, java_lang_Class::klass_offset_in_bytes()));
372 __ get_thread(tmp);
373 __ ld_ptr(tmp2, Address(tmp2, InstanceKlass::init_thread_offset()));
374 __ bne(tmp, tmp2, call_patch);
375 __ delayed()->nop();
377 // access_field patches may execute the patched code before it's
378 // copied back into place so we need to jump back into the main
379 // code of the nmethod to continue execution.
380 __ b_far(_patch_site_continuation);
381 __ delayed()->nop();
382 bytes_to_skip += __ offset() - offset;
384 }
386 if (CommentedAssembly) {
387 __ block_comment("patch data");
388 }
389 // Now emit the patch record telling the runtime how to find the
390 // pieces of the patch. We only need 3 bytes but for alignment, we
391 // need 4 bytes
392 int sizeof_patch_record = 4;
393 bytes_to_skip += sizeof_patch_record;
395 // emit the offsets needed to find the code to patch
396 int being_initialized_entry_offset = __ pc() - being_initialized_entry + sizeof_patch_record;
398 // patch_info_pc offset | size of b instruction(8)| patched code size
399 assert((char)being_initialized_entry_offset==being_initialized_entry_offset, "just check");
400 assert((char)bytes_to_skip==bytes_to_skip, "just check");
401 assert((char)_bytes_to_copy==_bytes_to_copy, "just check");
402 __ emit_int32(being_initialized_entry_offset<<8 | (bytes_to_skip<<16) | (_bytes_to_copy<<24) );
404 address patch_info_pc = __ pc();
405 assert(patch_info_pc - end_of_patch == bytes_to_skip, "incorrect patch info");
407 address entry = __ pc();
408 NativeGeneralJump::insert_unconditional((address)_pc_start, entry);
409 address target = NULL;
410 relocInfo::relocType reloc_type = relocInfo::none;
411 switch (_id) {
412 case access_field_id: target = Runtime1::entry_for(Runtime1::access_field_patching_id); break;
413 case load_klass_id: target = Runtime1::entry_for(Runtime1::load_klass_patching_id); reloc_type = relocInfo::metadata_type; break;
414 case load_mirror_id: target = Runtime1::entry_for(Runtime1::load_mirror_patching_id); reloc_type = relocInfo::oop_type; break;
415 case load_appendix_id: target = Runtime1::entry_for(Runtime1::load_appendix_patching_id); reloc_type = relocInfo::oop_type; break;
416 default: ShouldNotReachHere();
417 }
418 __ bind(call_patch);
421 if (CommentedAssembly) {
422 __ block_comment("patch entry point");
423 }
424 __ li48(T9, (long)target);
425 __ jalr(T9);
426 __ delayed()->nop();
427 assert(_patch_info_offset == (patch_info_pc - __ pc()), "must not change");
428 ce->add_call_info_here(_info);
429 int jmp_off = __ offset();
430 __ b_far(_patch_site_entry);
431 __ delayed()->nop();
432 // Add enough nops so deoptimization can overwrite the jmp above with a call
433 // and not destroy the world.
434 for (int j = __ offset(); j < jmp_off + NativeCall::instruction_size; j += 4 ) {
435 __ nop();
436 }
437 if (_id == load_klass_id || _id == load_mirror_id || _id == load_appendix_id) {
438 CodeSection* cs = __ code_section();
439 address pc = (address)_pc_start;
440 RelocIterator iter(cs, pc, pc + 1);
441 relocInfo::change_reloc_info_for_address(&iter, pc, reloc_type, relocInfo::none);
442 }
443 }
446 void DeoptimizeStub::emit_code(LIR_Assembler* ce) {
447 __ bind(_entry);
448 __ call(Runtime1::entry_for(Runtime1::deoptimize_id), relocInfo::runtime_call_type);
449 ce->add_call_info_here(_info);
450 DEBUG_ONLY(__ should_not_reach_here());
451 }
454 void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) {
455 ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
456 __ bind(_entry);
457 __ call(Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id), relocInfo::runtime_call_type);
458 __ delayed()->nop();
459 ce->add_call_info_here(_info);
460 debug_only(__ should_not_reach_here());
461 }
464 // i dont know which register to use here, i just assume A1 here. FIXME
465 void SimpleExceptionStub::emit_code(LIR_Assembler* ce) {
466 assert(__ sp_offset() == 0, "frame size should be fixed");
468 __ bind(_entry);
469 // pass the object on stack because all registers must be preserved
470 if (_obj->is_cpu_register()) {
471 ce->store_parameter(_obj->as_register(), 0);
472 }
473 __ call(Runtime1::entry_for(_stub), relocInfo::runtime_call_type);
474 __ delayed()->nop();
475 ce->add_call_info_here(_info);
476 debug_only(__ should_not_reach_here());
477 }
480 void ArrayCopyStub::emit_code(LIR_Assembler* ce) {
481 //---------------slow case: call to native-----------------
482 __ bind(_entry);
483 // Figure out where the args should go
484 // This should really convert the IntrinsicID to the methodOop and signature
485 // but I don't know how to do that.
486 //
487 VMRegPair args[5];
488 BasicType signature[5] = { T_OBJECT, T_INT, T_OBJECT, T_INT, T_INT};
489 SharedRuntime::java_calling_convention(signature, args, 5, true);
491 // push parameters
492 // (src, src_pos, dest, destPos, length)
493 Register r[5];
494 r[0] = src()->as_register();
495 r[1] = src_pos()->as_register();
496 r[2] = dst()->as_register();
497 r[3] = dst_pos()->as_register();
498 r[4] = length()->as_register();
500 // next registers will get stored on the stack
501 for (int i = 0; i < 5 ; i++ ) {
502 VMReg r_1 = args[i].first();
503 if (r_1->is_stack()) {
504 int st_off = r_1->reg2stack() * wordSize;
505 __ sw( r[i], SP, st_off);
506 } else {
507 assert(r[i] == args[i].first()->as_Register(), "Wrong register for arg ");
508 }
509 }
511 ce->align_call(lir_static_call);
513 ce->emit_static_call_stub();
514 __ call(SharedRuntime::get_resolve_static_call_stub(), relocInfo::static_call_type);
515 __ delayed()->nop();
516 ce->add_call_info_here(info());
518 #ifndef PRODUCT
519 #ifndef _LP64
520 __ lui(T8, Assembler::split_high((int)&Runtime1::_arraycopy_slowcase_cnt));
521 __ lw(AT, T8, Assembler::split_low((int)&Runtime1::_arraycopy_slowcase_cnt));
522 __ addiu(AT, AT, 1);
523 __ sw(AT, T8, Assembler::split_low((int)&Runtime1::_arraycopy_slowcase_cnt));
524 #else
525 __ li(T8, (long)&Runtime1::_arraycopy_slowcase_cnt);
526 __ lw(AT, T8, 0);
527 __ daddiu(AT, AT, 1);
528 __ sw(AT, T8, 0);
529 #endif
530 #endif
532 __ b_far(_continuation);
533 __ delayed()->nop();
534 }
536 /////////////////////////////////////////////////////////////////////////////
537 #if INCLUDE_ALL_GCS
539 void G1PreBarrierStub::emit_code(LIR_Assembler* ce) {
540 Unimplemented();
541 }
543 void G1PostBarrierStub::emit_code(LIR_Assembler* ce) {
544 Unimplemented();
545 }
547 #endif // INCLUDE_ALL_GCS
548 /////////////////////////////////////////////////////////////////////////////
550 #undef __