src/cpu/mips/vm/c1_CodeStubs_mips.cpp

changeset 8865
ffcdff41a92f
parent 6880
52ea28d233d2
child 9127
0f3853aec741
equal deleted inserted replaced
8864:e4aeef458496 8865:ffcdff41a92f
49 } 49 }
50 50
51 #ifdef TIERED 51 #ifdef TIERED
52 void CounterOverflowStub::emit_code(LIR_Assembler* ce) { 52 void CounterOverflowStub::emit_code(LIR_Assembler* ce) {
53 __ bind(_entry); 53 __ bind(_entry);
54 ce->store_parameter(_method->as_register(), 1);
54 ce->store_parameter(_bci, 0); 55 ce->store_parameter(_bci, 0);
55 __ call(Runtime1::entry_for(Runtime1::counter_overflow_id), relocInfo::runtime_call_type); 56 __ call(Runtime1::entry_for(Runtime1::counter_overflow_id), relocInfo::runtime_call_type);
56 __ delayed()->nop(); 57 __ delayed()->nop();
57 ce->add_call_info_here(_info); 58 ce->add_call_info_here(_info);
58 ce->verify_oop_map(_info); 59 ce->verify_oop_map(_info);
196 197
197 198
198 void NewObjectArrayStub::emit_code(LIR_Assembler* ce) { 199 void NewObjectArrayStub::emit_code(LIR_Assembler* ce) {
199 assert(__ sp_offset() == 0, "frame size should be fixed"); 200 assert(__ sp_offset() == 0, "frame size should be fixed");
200 __ bind(_entry); 201 __ bind(_entry);
201 //assert(_length->as_register() == rbx, "length must in rbx,");
202 //assert(_klass_reg->as_register() == rdx, "klass_reg must in rdx");
203 //__ call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_object_array_id)));
204 assert(_length->as_register() == T2, "length must in ebx"); 202 assert(_length->as_register() == T2, "length must in ebx");
205 #ifndef _LP64 203 #ifndef _LP64
206 assert(_klass_reg->as_register() == T4, "klass_reg must in T4"); 204 assert(_klass_reg->as_register() == T4, "klass_reg must in T4");
207 #else 205 #else
208 //FIXME. in A4? aoqi
209 assert(_klass_reg->as_register() == A4, "klass_reg must in A4"); 206 assert(_klass_reg->as_register() == A4, "klass_reg must in A4");
210 #endif 207 #endif
211 __ call(Runtime1::entry_for(Runtime1::new_object_array_id), relocInfo::runtime_call_type); 208 __ call(Runtime1::entry_for(Runtime1::new_object_array_id), relocInfo::runtime_call_type);
212 __ delayed()->nop(); 209 __ delayed()->nop();
213 ce->add_call_info_here(_info); 210 ce->add_call_info_here(_info);
214 ce->verify_oop_map(_info); 211 ce->verify_oop_map(_info);
215 //assert(_result->as_register() == rax, "result must in rax,");
216 //__ jmp(_continuation);
217 assert(_result->as_register() == V0, "result must in eax"); 212 assert(_result->as_register() == V0, "result must in eax");
218 __ b_far(_continuation); 213 __ b_far(_continuation);
219 __ delayed()->nop(); 214 __ delayed()->nop();
220 } 215 }
221 216
232 void MonitorEnterStub::emit_code(LIR_Assembler* ce) { 227 void MonitorEnterStub::emit_code(LIR_Assembler* ce) {
233 assert(__ sp_offset() == 0, "frame size should be fixed"); 228 assert(__ sp_offset() == 0, "frame size should be fixed");
234 __ bind(_entry); 229 __ bind(_entry);
235 ce->store_parameter(_obj_reg->as_register(), 1); 230 ce->store_parameter(_obj_reg->as_register(), 1);
236 ce->store_parameter(_lock_reg->is_single_cpu()? _lock_reg->as_register() : _lock_reg->as_register_lo(), 0); 231 ce->store_parameter(_lock_reg->is_single_cpu()? _lock_reg->as_register() : _lock_reg->as_register_lo(), 0);
237 /* 232 Runtime1::StubID enter_id;
238 Runtime1::StubID enter_id;
239 if (ce->compilation()->has_fpu_code()) {
240 enter_id = Runtime1::monitorenter_id;
241 } else {
242 enter_id = Runtime1::monitorenter_nofpu_id;
243 }
244 __ call(RuntimeAddress(Runtime1::entry_for(enter_id)));
245 */
246 if (ce->compilation()->has_fpu_code()) { 233 if (ce->compilation()->has_fpu_code()) {
247 __ call(Runtime1::entry_for(Runtime1::monitorenter_id), relocInfo::runtime_call_type); 234 enter_id = Runtime1::monitorenter_id;
248 } else { 235 } else {
249 __ call(Runtime1::entry_for(Runtime1::monitorenter_nofpu_id), relocInfo::runtime_call_type); 236 enter_id = Runtime1::monitorenter_nofpu_id;
250 } 237 }
238 //__ call(RuntimeAddress(Runtime1::entry_for(enter_id)));
239 __ call(Runtime1::entry_for(enter_id), relocInfo::runtime_call_type);
251 __ delayed()->nop(); 240 __ delayed()->nop();
252 ce->add_call_info_here(_info); 241 ce->add_call_info_here(_info);
253 ce->verify_oop_map(_info); 242 ce->verify_oop_map(_info);
254 __ b_far(_continuation); 243 __ b_far(_continuation);
255 __ delayed()->nop(); 244 __ delayed()->nop();
262 // lock_reg was destroyed by fast unlocking attempt => recompute it 251 // lock_reg was destroyed by fast unlocking attempt => recompute it
263 ce->monitor_address(_monitor_ix, _lock_reg); 252 ce->monitor_address(_monitor_ix, _lock_reg);
264 } 253 }
265 ce->store_parameter(_lock_reg->as_register(), 0); 254 ce->store_parameter(_lock_reg->as_register(), 0);
266 // note: non-blocking leaf routine => no call info needed 255 // note: non-blocking leaf routine => no call info needed
267 /* 256 Runtime1::StubID exit_id;
268 Runtime1::StubID exit_id;
269 if (ce->compilation()->has_fpu_code()) {
270 exit_id = Runtime1::monitorexit_id;
271 } else {
272 exit_id = Runtime1::monitorexit_nofpu_id;
273 }
274 __ call(RuntimeAddress(Runtime1::entry_for(exit_id)));
275 __ jmp(_continuation);
276 */
277 if (ce->compilation()->has_fpu_code()) { 257 if (ce->compilation()->has_fpu_code()) {
278 __ call(Runtime1::entry_for(Runtime1::monitorexit_id), relocInfo::runtime_call_type); 258 exit_id = Runtime1::monitorexit_id;
279 } else { 259 } else {
280 __ call(Runtime1::entry_for(Runtime1::monitorexit_nofpu_id), relocInfo::runtime_call_type); 260 exit_id = Runtime1::monitorexit_nofpu_id;
281 } 261 }
282 __ delayed()->nop(); 262 //__ call(RuntimeAddress(Runtime1::entry_for(exit_id)));
283 263 __ call(Runtime1::entry_for(exit_id), relocInfo::runtime_call_type);
284 //__ jmp(_continuation); 264 __ delayed()->nop();
265
285 __ b_far(_continuation); 266 __ b_far(_continuation);
286 __ delayed()->nop(); 267 __ delayed()->nop();
287 } 268 }
288 269
289 270
306 // very hard to make a guess about what code might be in the icache. 287 // very hard to make a guess about what code might be in the icache.
307 // Force the instruction to be double word aligned so that it 288 // Force the instruction to be double word aligned so that it
308 // doesn't span a cache line. 289 // doesn't span a cache line.
309 290
310 // the NativeJump is not finished, i am not sure what to do here. FIXME 291 // the NativeJump is not finished, i am not sure what to do here. FIXME
311 //masm->align(round_to(NativeGeneralJump::instruction_size, wordSize)); 292 // masm->align(round_to(NativeGeneralJump::instruction_size, wordSize));
293 //tty->print_cr("align_patch_site has not finished yet!!!");
312 } 294 }
313 295
314 void PatchingStub::emit_code(LIR_Assembler* ce) { 296 void PatchingStub::emit_code(LIR_Assembler* ce) {
315 assert(NativeCall::instruction_size <= _bytes_to_copy && _bytes_to_copy <= 0xFF, "not enough room for call"); 297 assert(NativeCall::instruction_size <= _bytes_to_copy && _bytes_to_copy <= 0xFF, "not enough room for call");
316 assert(_bytes_to_copy <= 0xFF, "not enough room for call"); 298 assert(_bytes_to_copy <= 0xFF, "not enough room for call");
325 if (CommentedAssembly) { 307 if (CommentedAssembly) {
326 __ block_comment(" patch template"); 308 __ block_comment(" patch template");
327 } 309 }
328 if (_id == load_klass_id) { 310 if (_id == load_klass_id) {
329 // produce a copy of the load klass instruction for use by the being initialized case 311 // produce a copy of the load klass instruction for use by the being initialized case
312 //#ifdef ASSERT
330 address start = __ pc(); 313 address start = __ pc();
314 //#endif
315 Metadata* o = NULL;
316 RelocationHolder rspec = metadata_Relocation::spec(_index);
317 __ relocate(rspec);
318 __ li48(_obj, (long)o);
319 while ((intx)__ pc() - (intx)start < NativeCall::instruction_size) {
320 __ nop();
321 }
322 #ifdef ASSERT
323 for (int i = 0; i < _bytes_to_copy; i++) {
324 address ptr = (address)(_pc_start + i);
325 int a_byte = (*ptr) & 0xFF;
326 assert(a_byte == *start++, "should be the same code");
327 }
328 #endif
329 } else if (_id == load_mirror_id || _id == load_appendix_id) {
330 //#ifdef ASSERT
331 address start = __ pc();
332 //#endif
331 jobject o = NULL; 333 jobject o = NULL;
332 int oop_index = __ oop_recorder()->allocate_oop_index(o); 334 RelocationHolder rspec = oop_Relocation::spec(_index);
333 RelocationHolder rspec = oop_Relocation::spec(oop_index);
334 __ relocate(rspec); 335 __ relocate(rspec);
335 #ifndef _LP64
336 //by_css
337 __ lui(_obj, Assembler::split_high((int)o));
338 __ addiu(_obj, _obj, Assembler::split_low((int)o));
339 #else
340 //This should be same as jobject2reg_with_patching.
341 __ li48(_obj, (long)o); 336 __ li48(_obj, (long)o);
342 #endif
343 while ((intx)__ pc() - (intx)start < NativeCall::instruction_size) { 337 while ((intx)__ pc() - (intx)start < NativeCall::instruction_size) {
344 __ nop(); 338 __ nop();
345 } 339 }
346 #ifdef ASSERT 340 #ifdef ASSERT
347 for (int i = 0; i < _bytes_to_copy; i++) { 341 for (int i = 0; i < _bytes_to_copy; i++) {
364 } 358 }
365 } 359 }
366 360
367 address end_of_patch = __ pc(); 361 address end_of_patch = __ pc();
368 int bytes_to_skip = 0; 362 int bytes_to_skip = 0;
369 if (_id == load_klass_id) { 363 if (_id == load_mirror_id) {
370 int offset = __ offset(); 364 int offset = __ offset();
371 if (CommentedAssembly) { 365 if (CommentedAssembly) {
372 __ block_comment(" being_initialized check"); 366 __ block_comment(" being_initialized check");
373 } 367 }
374 assert(_obj != NOREG, "must be a valid register"); 368 assert(_obj != NOREG, "must be a valid register");
375 #ifndef OPT_THREAD 369 Register tmp = AT;
376 //FIXME, T8 need be saved ? 370 Register tmp2 = T9;
377 Register thread = T8; 371 __ ld_ptr(tmp2, Address(_obj, java_lang_Class::klass_offset_in_bytes()));
378 __ get_thread(thread); 372 __ get_thread(tmp);
379 #else 373 __ ld_ptr(tmp2, Address(tmp2, InstanceKlass::init_thread_offset()));
380 Register thread = TREG; 374 __ bne(tmp, tmp2, call_patch);
381 #endif
382 __ ld(AT, _obj, in_bytes(InstanceKlass::init_thread_offset()));
383 __ bne(thread, AT, call_patch);
384 __ delayed()->nop(); 375 __ delayed()->nop();
385 376
386 // access_field patches may execute the patched code before it's 377 // access_field patches may execute the patched code before it's
387 // copied back into place so we need to jump back into the main 378 // copied back into place so we need to jump back into the main
388 // code of the nmethod to continue execution. 379 // code of the nmethod to continue execution.
400 // need 4 bytes 391 // need 4 bytes
401 int sizeof_patch_record = 4; 392 int sizeof_patch_record = 4;
402 bytes_to_skip += sizeof_patch_record; 393 bytes_to_skip += sizeof_patch_record;
403 394
404 // emit the offsets needed to find the code to patch 395 // emit the offsets needed to find the code to patch
405 int being_initialized_entry_offset = __ pc() - being_initialized_entry + patch_info_size; 396 int being_initialized_entry_offset = __ pc() - being_initialized_entry + sizeof_patch_record;
406 397
407 #ifdef _LP64
408 /* Jin: In MIPS64, byte_skip is much larger than that in X86. It can not be contained in a byte:
409 * - bytes_to_skip = 0x190;
410 * - _bytes_to_copy = 0x20;
411 * - being_initialized_entry_offset = 0x1b0;
412 *
413 * To minimize the modification of share codes, the values are decreased 4 times when generated,
414 * thus can be packed into a long type.
415 *
416 * See [share/vm/c1/c1_Runtime1.cpp 918] Runtime1::patch_code()
417 */
418 being_initialized_entry_offset /= 4;
419 _bytes_to_copy /= 4;
420 bytes_to_skip /= 4;
421 #endif
422 // patch_info_pc offset | size of b instruction(8)| patched code size 398 // patch_info_pc offset | size of b instruction(8)| patched code size
423 assert((char)being_initialized_entry_offset==being_initialized_entry_offset, "just check"); 399 assert((char)being_initialized_entry_offset==being_initialized_entry_offset, "just check");
424 assert((char)bytes_to_skip==bytes_to_skip, "just check"); 400 assert((char)bytes_to_skip==bytes_to_skip, "just check");
425 assert((char)_bytes_to_copy==_bytes_to_copy, "just check"); 401 assert((char)_bytes_to_copy==_bytes_to_copy, "just check");
426 __ emit_int32(being_initialized_entry_offset<<8 | (bytes_to_skip<<16) | (_bytes_to_copy<<24) ); 402 __ emit_int32(being_initialized_entry_offset<<8 | (bytes_to_skip<<16) | (_bytes_to_copy<<24) );
427 403
428 address patch_info_pc = __ pc(); 404 address patch_info_pc = __ pc();
429 #ifdef _LP64
430 assert(patch_info_pc - end_of_patch == bytes_to_skip * 4, "incorrect patch info");
431 #else
432 assert(patch_info_pc - end_of_patch == bytes_to_skip, "incorrect patch info"); 405 assert(patch_info_pc - end_of_patch == bytes_to_skip, "incorrect patch info");
433 #endif
434 406
435 address entry = __ pc(); 407 address entry = __ pc();
436 NativeGeneralJump::insert_unconditional((address)_pc_start, entry); 408 NativeGeneralJump::insert_unconditional((address)_pc_start, entry);
437 address target = NULL; 409 address target = NULL;
410 relocInfo::relocType reloc_type = relocInfo::none;
438 switch (_id) { 411 switch (_id) {
439 case access_field_id: target = Runtime1::entry_for(Runtime1::access_field_patching_id); break; 412 case access_field_id: target = Runtime1::entry_for(Runtime1::access_field_patching_id); break;
440 case load_klass_id: target = Runtime1::entry_for(Runtime1::load_klass_patching_id); break; 413 case load_klass_id: target = Runtime1::entry_for(Runtime1::load_klass_patching_id); reloc_type = relocInfo::metadata_type; break;
414 case load_mirror_id: target = Runtime1::entry_for(Runtime1::load_mirror_patching_id); reloc_type = relocInfo::oop_type; break;
415 case load_appendix_id: target = Runtime1::entry_for(Runtime1::load_appendix_patching_id); reloc_type = relocInfo::oop_type; break;
441 default: ShouldNotReachHere(); 416 default: ShouldNotReachHere();
442 } 417 }
443 __ bind(call_patch); 418 __ bind(call_patch);
444 419
445 420
446 if (CommentedAssembly) { 421 if (CommentedAssembly) {
447 __ block_comment("patch entry point"); 422 __ block_comment("patch entry point");
448 } 423 }
449 #ifndef _LP64
450 __ lui(T9, Assembler::split_high((int)target));
451 __ addiu(T9, T9, Assembler::split_low((int)target));
452 #else
453 __ li48(T9, (long)target); 424 __ li48(T9, (long)target);
454 #endif
455 __ jalr(T9); 425 __ jalr(T9);
456 __ delayed()->nop(); 426 __ delayed()->nop();
457 assert(_patch_info_offset == (patch_info_pc - __ pc()), "must not change"); 427 assert(_patch_info_offset == (patch_info_pc - __ pc()), "must not change");
458 ce->add_call_info_here(_info); 428 ce->add_call_info_here(_info);
459 int jmp_off = __ offset(); 429 int jmp_off = __ offset();
462 // Add enough nops so deoptimization can overwrite the jmp above with a call 432 // Add enough nops so deoptimization can overwrite the jmp above with a call
463 // and not destroy the world. 433 // and not destroy the world.
464 for (int j = __ offset(); j < jmp_off + NativeCall::instruction_size; j += 4 ) { 434 for (int j = __ offset(); j < jmp_off + NativeCall::instruction_size; j += 4 ) {
465 __ nop(); 435 __ nop();
466 } 436 }
467 if (_id == load_klass_id) { 437 if (_id == load_klass_id || _id == load_mirror_id || _id == load_appendix_id) {
468 CodeSection* cs = __ code_section(); 438 CodeSection* cs = __ code_section();
469 address pc = (address)_pc_start; 439 address pc = (address)_pc_start;
470 RelocIterator iter(cs, pc, pc + 1); 440 RelocIterator iter(cs, pc, pc + 1);
471 relocInfo::change_reloc_info_for_address(&iter, pc, relocInfo::oop_type, relocInfo::none); 441 relocInfo::change_reloc_info_for_address(&iter, pc, reloc_type, relocInfo::none);
472 } 442 }
443 }
444
445
446 void DeoptimizeStub::emit_code(LIR_Assembler* ce) {
447 __ bind(_entry);
448 __ call(Runtime1::entry_for(Runtime1::deoptimize_id), relocInfo::runtime_call_type);
449 ce->add_call_info_here(_info);
450 DEBUG_ONLY(__ should_not_reach_here());
473 } 451 }
474 452
475 453
476 void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) { 454 void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) {
477 ce->compilation()->implicit_exception_table()->append(_offset, __ offset()); 455 ce->compilation()->implicit_exception_table()->append(_offset, __ offset());

mercurial