src/cpu/mips/vm/c1_CodeStubs_mips.cpp

changeset 8865
ffcdff41a92f
parent 6880
52ea28d233d2
child 9127
0f3853aec741
     1.1 --- a/src/cpu/mips/vm/c1_CodeStubs_mips.cpp	Sat Jan 06 16:30:58 2018 +0800
     1.2 +++ b/src/cpu/mips/vm/c1_CodeStubs_mips.cpp	Thu May 24 19:49:50 2018 +0800
     1.3 @@ -51,6 +51,7 @@
     1.4  #ifdef TIERED
     1.5  void CounterOverflowStub::emit_code(LIR_Assembler* ce) {
     1.6    __ bind(_entry);
     1.7 +  ce->store_parameter(_method->as_register(), 1);
     1.8    ce->store_parameter(_bci, 0);
     1.9    __ call(Runtime1::entry_for(Runtime1::counter_overflow_id), relocInfo::runtime_call_type);
    1.10    __ delayed()->nop();
    1.11 @@ -198,22 +199,16 @@
    1.12  void NewObjectArrayStub::emit_code(LIR_Assembler* ce) {
    1.13    assert(__ sp_offset() == 0, "frame size should be fixed");
    1.14    __ bind(_entry);
    1.15 -  //assert(_length->as_register() == rbx, "length must in rbx,");
    1.16 -  //assert(_klass_reg->as_register() == rdx, "klass_reg must in rdx");
    1.17 -  //__ call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_object_array_id)));
    1.18    assert(_length->as_register() == T2, "length must in ebx");
    1.19  #ifndef _LP64
    1.20    assert(_klass_reg->as_register() == T4, "klass_reg must in T4");
    1.21  #else
    1.22 -  //FIXME. in A4? aoqi
    1.23    assert(_klass_reg->as_register() == A4, "klass_reg must in A4");
    1.24  #endif
    1.25    __ call(Runtime1::entry_for(Runtime1::new_object_array_id), relocInfo::runtime_call_type);
    1.26    __ delayed()->nop();
    1.27    ce->add_call_info_here(_info);
    1.28    ce->verify_oop_map(_info);
    1.29 -  //assert(_result->as_register() == rax, "result must in rax,");
    1.30 -  //__ jmp(_continuation);
    1.31    assert(_result->as_register() == V0, "result must in eax");
    1.32    __ b_far(_continuation);
    1.33    __ delayed()->nop();
    1.34 @@ -234,20 +229,14 @@
    1.35    __ bind(_entry);
    1.36    ce->store_parameter(_obj_reg->as_register(),  1);
    1.37    ce->store_parameter(_lock_reg->is_single_cpu()? _lock_reg->as_register() : _lock_reg->as_register_lo(), 0);
    1.38 -  /*
    1.39 -     Runtime1::StubID enter_id;
    1.40 -     if (ce->compilation()->has_fpu_code()) {
    1.41 -     enter_id = Runtime1::monitorenter_id;
    1.42 -     } else {
    1.43 -     enter_id = Runtime1::monitorenter_nofpu_id;
    1.44 -     }
    1.45 -     __ call(RuntimeAddress(Runtime1::entry_for(enter_id)));
    1.46 -   */
    1.47 +  Runtime1::StubID enter_id;
    1.48    if (ce->compilation()->has_fpu_code()) {
    1.49 -    __ call(Runtime1::entry_for(Runtime1::monitorenter_id), relocInfo::runtime_call_type);
    1.50 +    enter_id = Runtime1::monitorenter_id;
    1.51    } else {
    1.52 -    __ call(Runtime1::entry_for(Runtime1::monitorenter_nofpu_id), relocInfo::runtime_call_type);
    1.53 +    enter_id = Runtime1::monitorenter_nofpu_id;
    1.54    }
    1.55 +  //__ call(RuntimeAddress(Runtime1::entry_for(enter_id)));
    1.56 +  __ call(Runtime1::entry_for(enter_id), relocInfo::runtime_call_type);
    1.57    __ delayed()->nop();
    1.58    ce->add_call_info_here(_info);
    1.59    ce->verify_oop_map(_info);
    1.60 @@ -264,24 +253,16 @@
    1.61    }
    1.62    ce->store_parameter(_lock_reg->as_register(), 0);
    1.63    // note: non-blocking leaf routine => no call info needed
    1.64 -  /*
    1.65 -     Runtime1::StubID exit_id;
    1.66 -     if (ce->compilation()->has_fpu_code()) {
    1.67 -     exit_id = Runtime1::monitorexit_id;
    1.68 -     } else {
    1.69 -     exit_id = Runtime1::monitorexit_nofpu_id;
    1.70 -     }
    1.71 -     __ call(RuntimeAddress(Runtime1::entry_for(exit_id)));
    1.72 -     __ jmp(_continuation);
    1.73 -   */
    1.74 +  Runtime1::StubID exit_id;
    1.75    if (ce->compilation()->has_fpu_code()) {
    1.76 -    __ call(Runtime1::entry_for(Runtime1::monitorexit_id), relocInfo::runtime_call_type);
    1.77 +    exit_id = Runtime1::monitorexit_id;
    1.78    } else {
    1.79 -    __ call(Runtime1::entry_for(Runtime1::monitorexit_nofpu_id), relocInfo::runtime_call_type);
    1.80 +    exit_id = Runtime1::monitorexit_nofpu_id;
    1.81    }
    1.82 +  //__ call(RuntimeAddress(Runtime1::entry_for(exit_id)));
    1.83 +  __ call(Runtime1::entry_for(exit_id), relocInfo::runtime_call_type);
    1.84    __ delayed()->nop();
    1.85  
    1.86 -  //__ jmp(_continuation);
    1.87    __ b_far(_continuation);
    1.88    __ delayed()->nop();
    1.89  }
    1.90 @@ -308,7 +289,8 @@
    1.91    // doesn't span a cache line.
    1.92  
    1.93    // the NativeJump is not finished, i am not sure what to do here. FIXME
    1.94 -  //masm->align(round_to(NativeGeneralJump::instruction_size, wordSize));
    1.95 +  //  masm->align(round_to(NativeGeneralJump::instruction_size, wordSize));
    1.96 +  //tty->print_cr("align_patch_site has not finished yet!!!");
    1.97  }
    1.98  
    1.99  void PatchingStub::emit_code(LIR_Assembler* ce) {
   1.100 @@ -327,19 +309,31 @@
   1.101    }
   1.102    if (_id == load_klass_id) {
   1.103      // produce a copy of the load klass instruction for use by the being initialized case
   1.104 +//#ifdef ASSERT
   1.105      address start = __ pc();
   1.106 +//#endif
   1.107 +    Metadata* o = NULL;
   1.108 +    RelocationHolder rspec = metadata_Relocation::spec(_index);
   1.109 +    __ relocate(rspec);
   1.110 +    __ li48(_obj, (long)o);
   1.111 +    while ((intx)__ pc() - (intx)start < NativeCall::instruction_size) {
   1.112 +      __ nop();
   1.113 +    }
   1.114 +#ifdef ASSERT
   1.115 +    for (int i = 0; i < _bytes_to_copy; i++) {
   1.116 +      address ptr = (address)(_pc_start + i);
   1.117 +      int a_byte = (*ptr) & 0xFF;
   1.118 +      assert(a_byte == *start++, "should be the same code");
   1.119 +    }
   1.120 +#endif
   1.121 +  } else if (_id == load_mirror_id || _id == load_appendix_id) {
   1.122 +//#ifdef ASSERT
   1.123 +    address start = __ pc();
   1.124 +//#endif
   1.125      jobject o = NULL;
   1.126 -    int oop_index = __ oop_recorder()->allocate_oop_index(o);
   1.127 -    RelocationHolder rspec = oop_Relocation::spec(oop_index);
   1.128 +    RelocationHolder rspec = oop_Relocation::spec(_index);
   1.129      __ relocate(rspec);
   1.130 -#ifndef _LP64
   1.131 -    //by_css
   1.132 -    __ lui(_obj, Assembler::split_high((int)o));
   1.133 -    __ addiu(_obj, _obj, Assembler::split_low((int)o));
   1.134 -#else
   1.135 -    //This should be same as jobject2reg_with_patching.
   1.136      __ li48(_obj, (long)o);
   1.137 -#endif
   1.138      while ((intx)__ pc() - (intx)start < NativeCall::instruction_size) {
   1.139        __ nop();
   1.140      }
   1.141 @@ -366,21 +360,18 @@
   1.142  
   1.143    address end_of_patch = __ pc();
   1.144    int bytes_to_skip = 0;
   1.145 -  if (_id == load_klass_id) {
   1.146 +  if (_id == load_mirror_id) {
   1.147      int offset = __ offset();
   1.148      if (CommentedAssembly) {
   1.149        __ block_comment(" being_initialized check");
   1.150      }
   1.151      assert(_obj != NOREG, "must be a valid register");
   1.152 -#ifndef OPT_THREAD
   1.153 -    //FIXME, T8 need be saved ?
   1.154 -    Register thread = T8;
   1.155 -    __ get_thread(thread);
   1.156 -#else
   1.157 -    Register thread = TREG;
   1.158 -#endif
   1.159 -    __ ld(AT, _obj, in_bytes(InstanceKlass::init_thread_offset()));
   1.160 -    __ bne(thread, AT, call_patch);
   1.161 +    Register tmp = AT;
   1.162 +    Register tmp2 = T9;
   1.163 +    __ ld_ptr(tmp2, Address(_obj, java_lang_Class::klass_offset_in_bytes()));
   1.164 +    __ get_thread(tmp);
   1.165 +    __ ld_ptr(tmp2, Address(tmp2, InstanceKlass::init_thread_offset()));
   1.166 +    __ bne(tmp, tmp2, call_patch);
   1.167      __ delayed()->nop();
   1.168  
   1.169      // access_field patches may execute the patched code before it's
   1.170 @@ -402,23 +393,8 @@
   1.171    bytes_to_skip += sizeof_patch_record;
   1.172  
   1.173    // emit the offsets needed to find the code to patch
   1.174 -  int being_initialized_entry_offset = __ pc() - being_initialized_entry + patch_info_size;
   1.175 +  int being_initialized_entry_offset = __ pc() - being_initialized_entry + sizeof_patch_record;
   1.176  
   1.177 -#ifdef _LP64
   1.178 -  /* Jin: In MIPS64, byte_skip is much larger than that in X86. It can not be contained in a byte:
   1.179 -   *   - bytes_to_skip = 0x190;
   1.180 -   *   - _bytes_to_copy = 0x20;
   1.181 -   *   - being_initialized_entry_offset = 0x1b0;
   1.182 -   *
   1.183 -   *   To minimize the modification of share codes, the values are decreased 4 times when generated,
   1.184 -   *   thus can be packed into a long type.
   1.185 -   *
   1.186 -   *   See [share/vm/c1/c1_Runtime1.cpp 918] Runtime1::patch_code()
   1.187 -   */
   1.188 -  being_initialized_entry_offset /= 4;
   1.189 -  _bytes_to_copy /= 4;
   1.190 -  bytes_to_skip /= 4;
   1.191 -#endif
   1.192    // patch_info_pc offset | size of b instruction(8)| patched code size
   1.193    assert((char)being_initialized_entry_offset==being_initialized_entry_offset, "just check");
   1.194    assert((char)bytes_to_skip==bytes_to_skip, "just check");
   1.195 @@ -426,18 +402,17 @@
   1.196    __ emit_int32(being_initialized_entry_offset<<8 | (bytes_to_skip<<16) | (_bytes_to_copy<<24) );
   1.197  
   1.198    address patch_info_pc = __ pc();
   1.199 -#ifdef _LP64
   1.200 -  assert(patch_info_pc - end_of_patch == bytes_to_skip * 4, "incorrect patch info");
   1.201 -#else
   1.202    assert(patch_info_pc - end_of_patch == bytes_to_skip, "incorrect patch info");
   1.203 -#endif
   1.204  
   1.205    address entry = __ pc();
   1.206    NativeGeneralJump::insert_unconditional((address)_pc_start, entry);
   1.207    address target = NULL;
   1.208 +  relocInfo::relocType reloc_type = relocInfo::none;
   1.209    switch (_id) {
   1.210      case access_field_id:  target = Runtime1::entry_for(Runtime1::access_field_patching_id); break;
   1.211 -    case load_klass_id:    target = Runtime1::entry_for(Runtime1::load_klass_patching_id); break;
   1.212 +    case load_klass_id:    target = Runtime1::entry_for(Runtime1::load_klass_patching_id); reloc_type = relocInfo::metadata_type; break;
   1.213 +    case load_mirror_id:   target = Runtime1::entry_for(Runtime1::load_mirror_patching_id); reloc_type = relocInfo::oop_type; break;
   1.214 +    case load_appendix_id: target = Runtime1::entry_for(Runtime1::load_appendix_patching_id); reloc_type = relocInfo::oop_type; break;
   1.215      default: ShouldNotReachHere();
   1.216    }
   1.217    __ bind(call_patch);
   1.218 @@ -446,12 +421,7 @@
   1.219    if (CommentedAssembly) {
   1.220      __ block_comment("patch entry point");
   1.221    }
   1.222 -#ifndef _LP64
   1.223 -  __ lui(T9, Assembler::split_high((int)target));
   1.224 -  __ addiu(T9, T9, Assembler::split_low((int)target));
   1.225 -#else
   1.226    __ li48(T9, (long)target);
   1.227 -#endif
   1.228    __ jalr(T9);
   1.229    __ delayed()->nop();
   1.230    assert(_patch_info_offset == (patch_info_pc - __ pc()), "must not change");
   1.231 @@ -464,15 +434,23 @@
   1.232    for (int j = __ offset(); j < jmp_off + NativeCall::instruction_size; j += 4 ) {
   1.233      __ nop();
   1.234    }
   1.235 -  if (_id == load_klass_id) {
   1.236 +  if (_id == load_klass_id || _id == load_mirror_id || _id == load_appendix_id) {
   1.237      CodeSection* cs = __ code_section();
   1.238      address pc = (address)_pc_start;
   1.239      RelocIterator iter(cs, pc, pc + 1);
   1.240 -    relocInfo::change_reloc_info_for_address(&iter, pc, relocInfo::oop_type, relocInfo::none);
   1.241 +    relocInfo::change_reloc_info_for_address(&iter, pc, reloc_type, relocInfo::none);
   1.242    }
   1.243  }
   1.244  
   1.245  
   1.246 +void DeoptimizeStub::emit_code(LIR_Assembler* ce) {
   1.247 +  __ bind(_entry);
   1.248 +  __ call(Runtime1::entry_for(Runtime1::deoptimize_id), relocInfo::runtime_call_type);
   1.249 +  ce->add_call_info_here(_info);
   1.250 +  DEBUG_ONLY(__ should_not_reach_here());
   1.251 +}
   1.252 +
   1.253 +
   1.254  void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) {
   1.255    ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
   1.256    __ bind(_entry);

mercurial