#9815 Backport of #9802 Code cleanup

Thu, 05 Sep 2019 13:10:50 +0800

author
huangjia
date
Thu, 05 Sep 2019 13:10:50 +0800
changeset 9645
ac996ba07f9d
parent 9644
3089aa0aa0ee
child 9646
5734aa7c320f

#9815 Backport of #9802 Code cleanup
Reviewed-by: aoqi

src/cpu/mips/vm/assembler_mips.cpp file | annotate | diff | comparison | revisions
src/cpu/mips/vm/bytes_mips.hpp file | annotate | diff | comparison | revisions
src/cpu/mips/vm/c1_LIRAssembler_mips.cpp file | annotate | diff | comparison | revisions
src/cpu/mips/vm/c1_LIRGenerator_mips.cpp file | annotate | diff | comparison | revisions
src/cpu/mips/vm/c1_MacroAssembler_mips.cpp file | annotate | diff | comparison | revisions
src/cpu/mips/vm/c1_Runtime1_mips.cpp file | annotate | diff | comparison | revisions
src/cpu/mips/vm/compiledIC_mips.cpp file | annotate | diff | comparison | revisions
src/cpu/mips/vm/disassembler_mips.hpp file | annotate | diff | comparison | revisions
src/cpu/mips/vm/frame_mips.cpp file | annotate | diff | comparison | revisions
src/cpu/mips/vm/interp_masm_mips_64.cpp file | annotate | diff | comparison | revisions
src/cpu/mips/vm/mips_64.ad file | annotate | diff | comparison | revisions
src/cpu/mips/vm/nativeInst_mips.cpp file | annotate | diff | comparison | revisions
src/cpu/mips/vm/runtime_mips_64.cpp file | annotate | diff | comparison | revisions
src/cpu/mips/vm/sharedRuntime_mips_64.cpp file | annotate | diff | comparison | revisions
src/cpu/mips/vm/stubGenerator_mips_64.cpp file | annotate | diff | comparison | revisions
src/cpu/mips/vm/templateInterpreter_mips_64.cpp file | annotate | diff | comparison | revisions
src/cpu/mips/vm/templateTable_mips_64.cpp file | annotate | diff | comparison | revisions
src/cpu/mips/vm/vmreg_mips.hpp file | annotate | diff | comparison | revisions
src/os_cpu/linux_mips/vm/prefetch_linux_mips.inline.hpp file | annotate | diff | comparison | revisions
src/os_cpu/linux_mips/vm/thread_linux_mips.cpp file | annotate | diff | comparison | revisions
src/os_cpu/linux_mips/vm/thread_linux_mips.hpp file | annotate | diff | comparison | revisions
     1.1 --- a/src/cpu/mips/vm/assembler_mips.cpp	Thu Sep 05 13:07:31 2019 +0800
     1.2 +++ b/src/cpu/mips/vm/assembler_mips.cpp	Thu Sep 05 13:10:50 2019 +0800
     1.3 @@ -63,7 +63,6 @@
     1.4  
     1.5  // Implementation of Address
     1.6  
     1.7 -
     1.8  Address Address::make_array(ArrayAddress adr) {
     1.9    AddressLiteral base = adr.base();
    1.10    Address index = adr.index();
     2.1 --- a/src/cpu/mips/vm/bytes_mips.hpp	Thu Sep 05 13:07:31 2019 +0800
     2.2 +++ b/src/cpu/mips/vm/bytes_mips.hpp	Thu Sep 05 13:10:50 2019 +0800
     2.3 @@ -161,7 +161,7 @@
     2.4  
     2.5    // Efficient reading and writing of unaligned unsigned data in Java
     2.6    // byte ordering (i.e. big-endian ordering). Byte-order reversal is
     2.7 -  // needed since x86 CPUs use little-endian format.
     2.8 +  // needed since MIPS64EL CPUs use little-endian format.
     2.9    static inline u2   get_Java_u2(address p)           { return swap_u2(get_native_u2(p)); }
    2.10    static inline u4   get_Java_u4(address p)           { return swap_u4(get_native_u4(p)); }
    2.11    static inline u8   get_Java_u8(address p)           { return swap_u8(get_native_u8(p)); }
     3.1 --- a/src/cpu/mips/vm/c1_LIRAssembler_mips.cpp	Thu Sep 05 13:07:31 2019 +0800
     3.2 +++ b/src/cpu/mips/vm/c1_LIRAssembler_mips.cpp	Thu Sep 05 13:10:50 2019 +0800
     3.3 @@ -1300,7 +1300,7 @@
     3.4  #endif
     3.5      __ ld_ptr(dest->as_register_lo(), src_addr_LO);
     3.6      NOT_LP64(__ ld_ptr(dest->as_register_hi(), src_addr_HI));
     3.7 -  }else if (dest->is_single_fpu()) {
     3.8 +  } else if (dest->is_single_fpu()) {
     3.9      Address addr = frame_map()->address_for_slot(src->single_stack_ix());
    3.10      __ lwc1(dest->as_float_reg(), addr);
    3.11    } else if (dest->is_double_fpu())  {
    3.12 @@ -3039,7 +3039,7 @@
    3.13            __ delayed()->nop();
    3.14  #else
    3.15            if (is_zero) {
    3.16 -            if(op->label()==NULL)  //by liaob2
    3.17 +            if(op->label()==NULL)
    3.18                __ b(*op->label());
    3.19              else
    3.20                __ b_far(*op->label());
    3.21 @@ -5832,7 +5832,7 @@
    3.22    } else if (left->is_double_fpu()) {
    3.23      //for mips , does it required ?
    3.24      __ neg_d(dest->as_double_reg(), left->as_double_reg());
    3.25 -  }else {
    3.26 +  } else {
    3.27      ShouldNotReachHere();
    3.28    }
    3.29  }
     4.1 --- a/src/cpu/mips/vm/c1_LIRGenerator_mips.cpp	Thu Sep 05 13:07:31 2019 +0800
     4.2 +++ b/src/cpu/mips/vm/c1_LIRGenerator_mips.cpp	Thu Sep 05 13:10:50 2019 +0800
     4.3 @@ -820,7 +820,7 @@
     4.4    set_no_result(x);
     4.5  }
     4.6  
     4.7 -void LIRGenerator::do_update_CRC32(Intrinsic* x) {    // Fu: 20130832
     4.8 +void LIRGenerator::do_update_CRC32(Intrinsic* x) {
     4.9    Unimplemented();
    4.10  }
    4.11  
     5.1 --- a/src/cpu/mips/vm/c1_MacroAssembler_mips.cpp	Thu Sep 05 13:07:31 2019 +0800
     5.2 +++ b/src/cpu/mips/vm/c1_MacroAssembler_mips.cpp	Thu Sep 05 13:10:50 2019 +0800
     5.3 @@ -218,7 +218,6 @@
     5.4        "header size is not a multiple of BytesPerWord");
     5.5    Register index = len_in_bytes;
     5.6  
     5.7 -//tty->print_cr("C1_MacroAssembler::initialize_body LEN=0x%x, hdr_size=0x%x", len_in_bytes, hdr_size_in_bytes);
     5.8    assert(is_simm16(hdr_size_in_bytes), "change this code");
     5.9    addi(index, index, - hdr_size_in_bytes);
    5.10    beq(index, R0, done);
    5.11 @@ -290,14 +289,13 @@
    5.12    //Merged from b25
    5.13    const int hdr_size_in_bytes = instanceOopDesc::header_size() * HeapWordSize;
    5.14  
    5.15 -  //  initialize_header(obj, klass, NOREG);
    5.16 -  initialize_header(obj, klass, NOREG,t1,t2);
    5.17 +  initialize_header(obj, klass, NOREG, t1, t2);
    5.18  
    5.19    // clear rest of allocated space
    5.20    const Register index = t2;
    5.21    //FIXME, x86 changed the value in jdk6
    5.22    // const int threshold = hdr_size_in_bytes + 36;
    5.23 -  // // approximate break even point for code size (see comments below)
    5.24 +  // approximate break even point for code size (see comments below)
    5.25    const int threshold = 6 * BytesPerWord;
    5.26    // approximate break even point for code size (see comments below)
    5.27    if (var_size_in_bytes != NOREG) {
    5.28 @@ -410,7 +408,6 @@
    5.29    // explicit NULL check not needed since load from [klass_offset] causes a trap
    5.30    // check against inline cache
    5.31    assert(!MacroAssembler::needs_explicit_null_check(oopDesc::klass_offset_in_bytes()), "must add explicit null check");
    5.32 -  ///cmpl(iCache, Address(receiver, oopDesc::klass_offset_in_bytes()));
    5.33    // if icache check fails, then jump to runtime routine
    5.34    // Note: RECEIVER must still contain the receiver!
    5.35    Label L;
    5.36 @@ -423,7 +420,6 @@
    5.37  #endif
    5.38    beq(T9, iCache, L);
    5.39    delayed()->nop();
    5.40 -  //  jmp(Runtime1::entry_for(Runtime1::handle_ic_miss_id), relocInfo::runtime_call_type);
    5.41    jmp(SharedRuntime::get_ic_miss_stub(), relocInfo::runtime_call_type);
    5.42    delayed()->nop();
    5.43    bind(L);
    5.44 @@ -475,9 +471,7 @@
    5.45  void C1_MacroAssembler::verify_not_null_oop(Register r) {
    5.46    if (!VerifyOops) return;
    5.47    Label not_null;
    5.48 -  // testl(r, r);
    5.49 -  //jcc(Assembler::notZero, not_null);
    5.50 -  bne(r,R0,not_null);
    5.51 +  bne(r, R0, not_null);
    5.52    delayed()->nop();
    5.53    stop("non-null oop required");
    5.54    bind(not_null);
     6.1 --- a/src/cpu/mips/vm/c1_Runtime1_mips.cpp	Thu Sep 05 13:07:31 2019 +0800
     6.2 +++ b/src/cpu/mips/vm/c1_Runtime1_mips.cpp	Thu Sep 05 13:10:50 2019 +0800
     6.3 @@ -730,10 +730,6 @@
     6.4    return oop_maps;
     6.5  }
     6.6  
     6.7 -
     6.8 -
     6.9 -
    6.10 -
    6.11  void Runtime1::generate_unwind_exception(StubAssembler *sasm) {
    6.12    // incoming parameters
    6.13    const Register exception_oop = V0;
     7.1 --- a/src/cpu/mips/vm/compiledIC_mips.cpp	Thu Sep 05 13:07:31 2019 +0800
     7.2 +++ b/src/cpu/mips/vm/compiledIC_mips.cpp	Thu Sep 05 13:10:50 2019 +0800
     7.3 @@ -65,7 +65,7 @@
     7.4  
     7.5    __ relocate(static_stub_Relocation::spec(mark), 0);
     7.6  
     7.7 -  // 2012/10/29 Jin: Rmethod contains methodOop, it should be relocated for GC
     7.8 +  // Rmethod contains methodOop, it should be relocated for GC
     7.9  
    7.10    // static stub relocation also tags the methodOop in the code-stream.
    7.11    __ patchable_set48(S3, (long)0);
     8.1 --- a/src/cpu/mips/vm/disassembler_mips.hpp	Thu Sep 05 13:07:31 2019 +0800
     8.2 +++ b/src/cpu/mips/vm/disassembler_mips.hpp	Thu Sep 05 13:10:50 2019 +0800
     8.3 @@ -1,6 +1,6 @@
     8.4  /*
     8.5   * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
     8.6 - * Copyright (c) 2015, 2016, 2017 Loongson Technology. All rights reserved.
     8.7 + * Copyright (c) 2015, 2019, Loongson Technology. All rights reserved.
     8.8   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     8.9   *
    8.10   * This code is free software; you can redistribute it and/or modify it
     9.1 --- a/src/cpu/mips/vm/frame_mips.cpp	Thu Sep 05 13:07:31 2019 +0800
     9.2 +++ b/src/cpu/mips/vm/frame_mips.cpp	Thu Sep 05 13:10:50 2019 +0800
     9.3 @@ -229,7 +229,6 @@
     9.4    // we don't have to always save FP on entry and exit to c2 compiled
     9.5    // code, on entry will be enough.
     9.6  #ifdef COMPILER2
     9.7 -//FIXME aoqi
     9.8    if (map->update_map()) {
     9.9      map->set_location(FP->as_VMReg(), (address) addr_at(link_offset));
    9.10    }
    10.1 --- a/src/cpu/mips/vm/interp_masm_mips_64.cpp	Thu Sep 05 13:07:31 2019 +0800
    10.2 +++ b/src/cpu/mips/vm/interp_masm_mips_64.cpp	Thu Sep 05 13:10:50 2019 +0800
    10.3 @@ -48,8 +48,8 @@
    10.4  #endif // CC_INTERP
    10.5  
    10.6  void InterpreterMacroAssembler::get_2_byte_integer_at_bcp(Register reg, Register tmp, int offset) {
    10.7 -  /* 2016/5/6 Jin: the runtime address of BCP may be unaligned.
    10.8 -   *   Refer to the SPARC implementation. */
    10.9 +  // The runtime address of BCP may be unaligned.
   10.10 +  // Refer to the SPARC implementation.
   10.11    lbu(reg, BCP, offset+1);
   10.12    lbu(tmp, BCP, offset);
   10.13  #ifdef _LP64
   10.14 @@ -387,7 +387,7 @@
   10.15    assert( Rsub_klass != T1, "T1 holds 2ndary super array length" );
   10.16    assert( Rsub_klass != T0, "T0 holds 2ndary super array scan ptr" );
   10.17    // Profile the not-null value's klass.
   10.18 -  // [20130904] Fu: Here T9 and T1 are used as temporary registers.
   10.19 +  // Here T9 and T1 are used as temporary registers.
   10.20    profile_typecheck(T9, Rsub_klass, T1); // blows T9, reloads T1
   10.21  
   10.22    // Do the check.
   10.23 @@ -432,7 +432,7 @@
   10.24  }
   10.25  
   10.26  void InterpreterMacroAssembler::push_i(Register r) {
   10.27 -  // 20170925: For compatibility reason, don't change to sw.
   10.28 +  // For compatibility reason, don't change to sw.
   10.29    sd(r, SP, - Interpreter::stackElementSize);
   10.30    daddiu(SP, SP, - Interpreter::stackElementSize);
   10.31  }
   10.32 @@ -567,7 +567,7 @@
   10.33       int table_size = (long)Interpreter::dispatch_table(itos) - (long)Interpreter::dispatch_table(stos);
   10.34       int table_offset = ((int)state - (int)itos) * table_size;
   10.35  
   10.36 -     // 2013/12/17 Fu: GP points to the starting address of Interpreter::dispatch_table(itos).
   10.37 +     // GP points to the starting address of Interpreter::dispatch_table(itos).
   10.38       // See StubGenerator::generate_call_stub(address& return_address) for the initialization of GP.
   10.39       if(table_offset != 0) {
   10.40          daddiu(T3, GP, table_offset);
   10.41 @@ -1178,8 +1178,6 @@
   10.42                                                       Register reg,
   10.43                                                       int offset_of_disp) {
   10.44    assert(ProfileInterpreter, "must be profiling interpreter");
   10.45 -//  Attention: Until now (20121217), we do not support this kind of addressing on Loongson.
   10.46 -//  Address disp_address(mdp_in, reg, Address::times_1, offset_of_disp);
   10.47    daddu(AT, reg, mdp_in);
   10.48    assert(Assembler::is_simm16(offset_of_disp), "offset is not an simm16");
   10.49    ld(AT, AT, offset_of_disp);
    11.1 --- a/src/cpu/mips/vm/mips_64.ad	Thu Sep 05 13:07:31 2019 +0800
    11.2 +++ b/src/cpu/mips/vm/mips_64.ad	Thu Sep 05 13:10:50 2019 +0800
    11.3 @@ -6480,7 +6480,7 @@
    11.4  
    11.5        __ relocate(rspec);
    11.6        __ patchable_set48(dst, (long)value);
    11.7 -    }else if($src->constant_reloc() == relocInfo::oop_type){
    11.8 +    } else if($src->constant_reloc() == relocInfo::oop_type){
    11.9        int oop_index = __ oop_recorder()->find_index((jobject)value);
   11.10        RelocationHolder rspec = oop_Relocation::spec(oop_index);
   11.11  
   11.12 @@ -6872,7 +6872,7 @@
   11.13  %}
   11.14  
   11.15  
   11.16 -// This match pattern is created for StoreIConditional since I cannot match IfNode without a RegFlags! fujie 2012/07/17
   11.17 +// This match pattern is created for StoreIConditional since I cannot match IfNode without a RegFlags!
   11.18  instruct jmpCon_flags_long(cmpOp cop, FlagsReg cr, label labl) %{
   11.19    match(If cop cr);
   11.20    effect(USE labl);
   11.21 @@ -7793,7 +7793,7 @@
   11.22  %}
   11.23  
   11.24  
   11.25 -// This match pattern is created for StoreIConditional since I cannot match IfNode without a RegFlags! fujie 2012/07/17
   11.26 +// This match pattern is created for StoreIConditional since I cannot match IfNode without a RegFlags!
   11.27  instruct jmpCon_flags_short(cmpOp cop, FlagsReg cr, label labl) %{
   11.28    match(If cop cr);
   11.29    effect(USE labl);
   11.30 @@ -11239,7 +11239,6 @@
   11.31  
   11.32      //if (UseLEXT1) {
   11.33      if (0) {
   11.34 -      // 2016.08.10
   11.35        // Experiments show that gsmod is slower that div+mfhi.
   11.36        // So I just disable it here.
   11.37        __ gsmod(dst, src1, src2);
    12.1 --- a/src/cpu/mips/vm/nativeInst_mips.cpp	Thu Sep 05 13:07:31 2019 +0800
    12.2 +++ b/src/cpu/mips/vm/nativeInst_mips.cpp	Thu Sep 05 13:10:50 2019 +0800
    12.3 @@ -426,7 +426,7 @@
    12.4  #endif
    12.5  }
    12.6  
    12.7 -// 2013/6/14 Jin: manual implementation of GSSQ
    12.8 +// manual implementation of GSSQ
    12.9  //
   12.10  //  00000001200009c0 <atomic_store128>:
   12.11  //     1200009c0:   0085202d        daddu   a0, a0, a1
   12.12 @@ -1405,7 +1405,7 @@
   12.13  #ifndef _LP64
   12.14    return (address)Assembler::merge(int_at(4)&0xffff, long_at(instruction_offset)&0xffff);
   12.15  #else
   12.16 -  // 2012/4/19 Jin: Assembler::merge() is not correct in MIPS_64!
   12.17 +  // Assembler::merge() is not correct in MIPS_64!
   12.18    //
   12.19    //   Example:
   12.20    //     hi16 = 0xfffd,
   12.21 @@ -1617,7 +1617,7 @@
   12.22    assert(NativeGeneralJump::instruction_size == NativeCall::instruction_size,
   12.23            "note::Runtime1::patch_code uses NativeCall::instruction_size");
   12.24  
   12.25 -  // 2013/6/13 Jin: ensure 100% atomicity
   12.26 +  // ensure 100% atomicity
   12.27    guarantee(!os::is_MP() || (((long)instr_addr % BytesPerWord) == 0), "destination must be aligned for SD");
   12.28  
   12.29    int *p = (int *)instr_addr;
   12.30 @@ -1630,7 +1630,7 @@
   12.31  
   12.32  // Must ensure atomicity
   12.33  void NativeGeneralJump::patch_verified_entry(address entry, address verified_entry, address dest) {
   12.34 -  // 2013/11/5 Jin: ensure 100% atomicity.
   12.35 +  // ensure 100% atomicity.
   12.36    // The destination is fixed and can be cached in JavaThread.
   12.37    //
   12.38    // Destination must be aligned for GSSQ.
    13.1 --- a/src/cpu/mips/vm/runtime_mips_64.cpp	Thu Sep 05 13:07:31 2019 +0800
    13.2 +++ b/src/cpu/mips/vm/runtime_mips_64.cpp	Thu Sep 05 13:10:50 2019 +0800
    13.3 @@ -175,8 +175,6 @@
    13.4    //    Eclipse + Plugin + Debug As
    13.5    //  This is the only condition where C2 calls SharedRuntime::generate_deopt_blob()
    13.6    //
    13.7 -  //  Ref:  http://10.2.5.21:8000/projects/java/wiki/Jgj-log-2014-5-12_
    13.8 -  //
    13.9    __ move(V0, A0);
   13.10    __ move(V1, A1);
   13.11  
    14.1 --- a/src/cpu/mips/vm/sharedRuntime_mips_64.cpp	Thu Sep 05 13:07:31 2019 +0800
    14.2 +++ b/src/cpu/mips/vm/sharedRuntime_mips_64.cpp	Thu Sep 05 13:10:50 2019 +0800
    14.3 @@ -521,7 +521,7 @@
    14.4    Label L;
    14.5    __ verify_oop(Rmethod);
    14.6    __ ld_ptr(AT, Rmethod, in_bytes(Method::code_offset()));
    14.7 -  __ beq(AT,R0,L);
    14.8 +  __ beq(AT, R0, L);
    14.9    __ delayed()->nop();
   14.10    // Schedule the branch target address early.
   14.11    // Call into the VM to patch the caller, then jump to compiled callee
   14.12 @@ -675,17 +675,17 @@
   14.13  
   14.14          // Ref to is_Register condition
   14.15          if(sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE)
   14.16 -          __ st_ptr(AT,SP,st_off - 8);
   14.17 +          __ st_ptr(AT, SP, st_off - 8);
   14.18        }
   14.19      } else if (r_1->is_Register()) {
   14.20        Register r = r_1->as_Register();
   14.21        if (!r_2->is_valid()) {
   14.22 -          __ sd(r,SP, st_off);
   14.23 +          __ sd(r, SP, st_off);
   14.24        } else {
   14.25          //FIXME, mips will not enter here
   14.26          // long/double in gpr
   14.27 -        __ sd(r,SP, st_off);
   14.28 -        // Jin: In [java/util/zip/ZipFile.java]
   14.29 +        __ sd(r, SP, st_off);
   14.30 +        // In [java/util/zip/ZipFile.java]
   14.31          //
   14.32          //    private static native long open(String name, int mode, long lastModified);
   14.33          //    private static native int getTotal(long jzfile);
   14.34 @@ -732,23 +732,23 @@
   14.35          // So I stored another 8 bytes in the T_VOID slot. It then can be accessed from generate_native_entry().
   14.36          //
   14.37          if (sig_bt[i] == T_LONG)
   14.38 -          __ sd(r,SP, st_off - 8);
   14.39 +          __ sd(r, SP, st_off - 8);
   14.40        }
   14.41      } else if (r_1->is_FloatRegister()) {
   14.42        assert(sig_bt[i] == T_FLOAT || sig_bt[i] == T_DOUBLE, "Must be a float register");
   14.43  
   14.44        FloatRegister fr = r_1->as_FloatRegister();
   14.45        if (sig_bt[i] == T_FLOAT)
   14.46 -        __ swc1(fr,SP, st_off);
   14.47 +        __ swc1(fr, SP, st_off);
   14.48        else {
   14.49 -        __ sdc1(fr,SP, st_off);
   14.50 -        __ sdc1(fr,SP, st_off - 8);  // T_DOUBLE needs two slots
   14.51 +        __ sdc1(fr, SP, st_off);
   14.52 +        __ sdc1(fr, SP, st_off - 8);  // T_DOUBLE needs two slots
   14.53        }
   14.54      }
   14.55    }
   14.56  
   14.57    // Schedule the branch target address early.
   14.58 -  __ ld_ptr(AT, Rmethod,in_bytes(Method::interpreter_entry_offset()) );
   14.59 +  __ ld_ptr(AT, Rmethod, in_bytes(Method::interpreter_entry_offset()) );
   14.60    // And repush original return address
   14.61    __ move(RA, V0);
   14.62    __ jr (AT);
   14.63 @@ -850,7 +850,6 @@
   14.64          // ld_off is MSW so get LSW
   14.65          // st_off is LSW (i.e. reg.first())
   14.66  
   14.67 -        // 2012/4/9 Jin
   14.68          // [./org/eclipse/swt/graphics/GC.java]
   14.69          // void drawImageXRender(Image srcImage, int srcX, int srcY, int srcWidth, int srcHeight,
   14.70          //  int destX, int destY, int destWidth, int destHeight,
   14.71 @@ -873,7 +872,6 @@
   14.72          assert(r_2->as_Register() == r_1->as_Register(), "");
   14.73          __ ld(r, saved_sp, ld_off);
   14.74  
   14.75 -        // Jin:
   14.76          //
   14.77          // For T_LONG type, the real layout is as below:
   14.78          //
   14.79 @@ -990,7 +988,7 @@
   14.80    agen.gen_c2i_adapter(total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup);
   14.81  
   14.82    __ flush();
   14.83 -  return  AdapterHandlerLibrary::new_entry(fingerprint,i2c_entry, c2i_entry, c2i_unverified_entry);
   14.84 +  return  AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry);
   14.85  }
   14.86  
   14.87  int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
   14.88 @@ -1189,7 +1187,7 @@
   14.89      if (dst.first()->is_stack()) {
   14.90        // stack to stack
   14.91        __ lw(AT, FP, reg2offset_in(src.first()));
   14.92 -      __ sd(AT,SP, reg2offset_out(dst.first()));
   14.93 +      __ sd(AT, SP, reg2offset_out(dst.first()));
   14.94      } else {
   14.95        // stack to reg
   14.96        __ lw(dst.first()->as_Register(),  FP, reg2offset_in(src.first()));
   14.97 @@ -1223,12 +1221,12 @@
   14.98      Label nil;
   14.99      __ xorr(rHandle, rHandle, rHandle);
  14.100      __ ld(AT, FP, reg2offset_in(src.first()));
  14.101 -    __ beq(AT,R0, nil);
  14.102 +    __ beq(AT, R0, nil);
  14.103      __ delayed()->nop();
  14.104      __ lea(rHandle, Address(FP, reg2offset_in(src.first())));
  14.105      __ bind(nil);
  14.106      if(dst.first()->is_stack())__ sd( rHandle, SP, reg2offset_out(dst.first()));
  14.107 -    else                       __ move( (dst.first())->as_Register(),rHandle);
  14.108 +    else                       __ move( (dst.first())->as_Register(), rHandle);
  14.109      //if dst is register
  14.110      //FIXME, do mips need out preserve stack slots?
  14.111      int offset_in_older_frame = src.first()->reg2stack()
  14.112 @@ -1272,17 +1270,17 @@
  14.113  
  14.114    if (src.first()->is_stack()) {
  14.115      if (dst.first()->is_stack()) {
  14.116 -      __ lwc1(F12 , FP, reg2offset_in(src.first()));
  14.117 -      __ swc1(F12 ,SP, reg2offset_out(dst.first()));
  14.118 +      __ lwc1(F12, FP, reg2offset_in(src.first()));
  14.119 +      __ swc1(F12, SP, reg2offset_out(dst.first()));
  14.120      }
  14.121      else
  14.122 -      __ lwc1( dst.first()->as_FloatRegister(), FP, reg2offset_in(src.first()));
  14.123 +      __ lwc1(dst.first()->as_FloatRegister(), FP, reg2offset_in(src.first()));
  14.124    } else {
  14.125      // reg to stack
  14.126      if(dst.first()->is_stack())
  14.127 -      __ swc1( src.first()->as_FloatRegister(),SP, reg2offset_out(dst.first()));
  14.128 +      __ swc1(src.first()->as_FloatRegister(), SP, reg2offset_out(dst.first()));
  14.129      else
  14.130 -      __ mov_s( dst.first()->as_FloatRegister(), src.first()->as_FloatRegister());
  14.131 +      __ mov_s(dst.first()->as_FloatRegister(), src.first()->as_FloatRegister());
  14.132    }
  14.133  }
  14.134  
  14.135 @@ -1305,7 +1303,7 @@
  14.136    } else {
  14.137      if( dst.first()->is_stack()){
  14.138        __ sd( (src.first())->as_Register(), SP, reg2offset_out(dst.first()));
  14.139 -    } else{
  14.140 +    } else {
  14.141        __ move( (dst.first())->as_Register() , (src.first())->as_Register());
  14.142      }
  14.143    }
  14.144 @@ -1329,7 +1327,7 @@
  14.145        __ ldc1(F12, FP, reg2offset_in(src.first()));
  14.146  
  14.147        __ sdc1(F12, SP, reg2offset_out(dst.first()));
  14.148 -    } else{
  14.149 +    } else {
  14.150        __ ldc1( (dst.first())->as_FloatRegister(), FP, reg2offset_in(src.first()));
  14.151      }
  14.152  
  14.153 @@ -1337,7 +1335,7 @@
  14.154      // reg to stack
  14.155      // No worries about stack alignment
  14.156      if( dst.first()->is_stack()){
  14.157 -      __ sdc1( src.first()->as_FloatRegister(),SP, reg2offset_out(dst.first()));
  14.158 +      __ sdc1(src.first()->as_FloatRegister(), SP, reg2offset_out(dst.first()));
  14.159      }
  14.160      else
  14.161        __ mov_d( dst.first()->as_FloatRegister(), src.first()->as_FloatRegister());
  14.162 @@ -1714,7 +1712,7 @@
  14.163      if (UseBiasedLocking) {
  14.164        // Check if biased and fall through to runtime if so
  14.165        __ andi (AT, result, markOopDesc::biased_lock_bit_in_place);
  14.166 -      __ bne(AT,R0, slowCase);
  14.167 +      __ bne(AT, R0, slowCase);
  14.168        __ delayed()->nop();
  14.169      }
  14.170      // get hash
  14.171 @@ -2100,7 +2098,7 @@
  14.172    // Unpack native results.
  14.173    switch (ret_type) {
  14.174    case T_BOOLEAN: __ c2bool(V0);            break;
  14.175 -  case T_CHAR   : __ andi(V0,V0, 0xFFFF);      break;
  14.176 +  case T_CHAR   : __ andi(V0, V0, 0xFFFF);      break;
  14.177    case T_BYTE   : __ sign_extend_byte (V0); break;
  14.178    case T_SHORT  : __ sign_extend_short(V0); break;
  14.179    case T_INT    : // nothing to do         break;
  14.180 @@ -2136,7 +2134,7 @@
  14.181      __ lw(A0, AT, 0);
  14.182      __ addi(AT, A0, -SafepointSynchronize::_not_synchronized);
  14.183      Label L;
  14.184 -    __ bne(AT,R0, L);
  14.185 +    __ bne(AT, R0, L);
  14.186      __ delayed()->nop();
  14.187      __ lw(AT, thread, in_bytes(JavaThread::suspend_flags_offset()));
  14.188      __ beq(AT, R0, Continue);
  14.189 @@ -2162,7 +2160,7 @@
  14.190      }
  14.191      __ move(SP, S2);     // use S2 as a sender SP holder
  14.192      __ pop(S2);
  14.193 -    __ addi(SP,SP, wordSize);
  14.194 +    __ addi(SP, SP, wordSize);
  14.195      //add for compressedoops
  14.196      __ reinit_heapbase();
  14.197      // Restore any method result value
  14.198 @@ -2361,7 +2359,7 @@
  14.199      __ call(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C),
  14.200          relocInfo::runtime_call_type);
  14.201      __ delayed()->nop();
  14.202 -    __ addi(SP,SP, 2*wordSize);
  14.203 +    __ addi(SP, SP, 2*wordSize);
  14.204                  __ move(SP, S2);
  14.205      //add for compressedoops
  14.206      __ reinit_heapbase();
  14.207 @@ -3496,7 +3494,7 @@
  14.208      long save_pc = (long)__ pc() + 52;
  14.209      __ patchable_set48(AT, (long)save_pc);
  14.210    }
  14.211 -  __ sd(AT, thread,in_bytes(JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset()));
  14.212 +  __ sd(AT, thread, in_bytes(JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset()));
  14.213  
  14.214    // Call C code.  Need thread but NOT official VM entry
  14.215    // crud.  We cannot block on this call, no GC can happen.  Call should
  14.216 @@ -3505,8 +3503,7 @@
  14.217    __ move(A1, Deoptimization::Unpack_uncommon_trap);
  14.218    __ patchable_call((address)Deoptimization::unpack_frames);
  14.219    // Set an oopmap for the call site
  14.220 -  //oop_maps->add_gc_map( __ offset(), true, new OopMap( framesize, 0 ) );
  14.221 -  oop_maps->add_gc_map( __ offset(),  new OopMap( framesize, 0 ) );//Fu
  14.222 +  oop_maps->add_gc_map( __ offset(),  new OopMap( framesize, 0 ) );
  14.223  
  14.224    __ reset_last_Java_frame(true);
  14.225  
    15.1 --- a/src/cpu/mips/vm/stubGenerator_mips_64.cpp	Thu Sep 05 13:07:31 2019 +0800
    15.2 +++ b/src/cpu/mips/vm/stubGenerator_mips_64.cpp	Thu Sep 05 13:10:50 2019 +0800
    15.3 @@ -133,7 +133,7 @@
    15.4    // -1 [ return address       ]
    15.5    //  0 [                      ] <--- old sp
    15.6    //
    15.7 -  // 2014/01/16 Fu: Find a right place in the call_stub for GP.
    15.8 +  // Find a right place in the call_stub for GP.
    15.9    // GP will point to the starting point of Interpreter::dispatch_table(itos).
   15.10    // It should be saved/restored before/after Java calls.
   15.11    //
   15.12 @@ -517,7 +517,7 @@
   15.13      __ bgtz(AT, no_overlap_target);
   15.14      __ delayed()->nop();
   15.15  
   15.16 -    // 2016/05/10 aoqi: If A0 = 0xf... and A1 = 0x0..., than goto no_overlap_target
   15.17 +    // If A0 = 0xf... and A1 = 0x0..., than goto no_overlap_target
   15.18      Label L;
   15.19      __ bgez(A0, L);
   15.20      __ delayed()->nop();
   15.21 @@ -931,7 +931,7 @@
   15.22      __ dadd(end_from, from, end_count);
   15.23      __ dadd(end_to, to, end_count);
   15.24  
   15.25 -    // 2016/05/08 aoqi: If end_from and end_to has differante alignment, unaligned copy is performed.
   15.26 +    // If end_from and end_to has differante alignment, unaligned copy is performed.
   15.27      __ andi(AT, end_from, 3);
   15.28      __ andi(T8, end_to, 3);
   15.29      __ bne(AT, T8, l_copy_byte);
   15.30 @@ -1543,7 +1543,7 @@
   15.31        nooverlap_target = aligned ?
   15.32                StubRoutines::arrayof_oop_disjoint_arraycopy() :
   15.33                StubRoutines::oop_disjoint_arraycopy();
   15.34 -    }else {
   15.35 +    } else {
   15.36        nooverlap_target = aligned ?
   15.37                StubRoutines::arrayof_jint_disjoint_arraycopy() :
   15.38                StubRoutines::jint_disjoint_arraycopy();
   15.39 @@ -1701,7 +1701,7 @@
   15.40        nooverlap_target = aligned ?
   15.41                StubRoutines::arrayof_oop_disjoint_arraycopy() :
   15.42                StubRoutines::oop_disjoint_arraycopy();
   15.43 -    }else {
   15.44 +    } else {
   15.45        nooverlap_target = aligned ?
   15.46                StubRoutines::arrayof_jlong_disjoint_arraycopy() :
   15.47                StubRoutines::jlong_disjoint_arraycopy();
    16.1 --- a/src/cpu/mips/vm/templateInterpreter_mips_64.cpp	Thu Sep 05 13:07:31 2019 +0800
    16.2 +++ b/src/cpu/mips/vm/templateInterpreter_mips_64.cpp	Thu Sep 05 13:10:50 2019 +0800
    16.3 @@ -181,7 +181,6 @@
    16.4    __ restore_bcp();
    16.5    __ restore_locals();
    16.6  
    16.7 -  // 2014/11/24 Fu
    16.8    // mdp: T8
    16.9    // ret: FSR
   16.10    // tmp: T9
   16.11 @@ -404,7 +403,6 @@
   16.12    // tos - the last parameter to Java method
   16.13    // SP - sender_sp
   16.14  
   16.15 -  //const Address size_of_parameters(Rmethod,in_bytes( Method::size_of_parameters_offset()));
   16.16  
   16.17    // the bcp is valid if and only if it's not null
   16.18    __ call_VM(NOREG, CAST_FROM_FN_PTR(address,
   16.19 @@ -869,14 +867,14 @@
   16.20    address entry_point = __ pc();
   16.21  
   16.22  #ifndef CORE
   16.23 -  const Address invocation_counter(Rmethod,in_bytes(MethodCounters::invocation_counter_offset() +   // Fu: 20130814
   16.24 +  const Address invocation_counter(Rmethod,in_bytes(MethodCounters::invocation_counter_offset() +
   16.25    InvocationCounter::counter_offset()));
   16.26  #endif
   16.27  
   16.28    // get parameter size (always needed)
   16.29    // the size in the java stack
   16.30    __ ld(V0, Rmethod, in_bytes(Method::const_offset()));
   16.31 -  __ lhu(V0, V0, in_bytes(ConstMethod::size_of_parameters_offset()));   // Fu: 20130814
   16.32 +  __ lhu(V0, V0, in_bytes(ConstMethod::size_of_parameters_offset()));
   16.33  
   16.34    // native calls don't need the stack size check since they have no expression stack
   16.35    // and the arguments are already on the stack and we only add a handful of words
   16.36 @@ -1570,7 +1568,6 @@
   16.37  
   16.38  #ifndef CORE
   16.39  
   16.40 -  // 2014/11/24 Fu
   16.41    // mdp : T8
   16.42    // tmp1: T9
   16.43    // tmp2: T2
    17.1 --- a/src/cpu/mips/vm/templateTable_mips_64.cpp	Thu Sep 05 13:07:31 2019 +0800
    17.2 +++ b/src/cpu/mips/vm/templateTable_mips_64.cpp	Thu Sep 05 13:10:50 2019 +0800
    17.3 @@ -3716,7 +3716,7 @@
    17.4    if (load_receiver) {
    17.5      __ move(AT, ConstantPoolCacheEntry::parameter_size_mask);
    17.6      __ andr(recv, flags, AT);
    17.7 -    // 2014/07/31 Fu: Since we won't push RA on stack, no_return_pc_pushed_yet should be 0.
    17.8 +    // Since we won't push RA on stack, no_return_pc_pushed_yet should be 0.
    17.9      const int no_return_pc_pushed_yet = 0;  // argument slot correction before we push return address
   17.10      const int receiver_is_at_end      = -1;  // back off one slot to get receiver
   17.11      Address recv_addr = __ argument_address(recv, no_return_pc_pushed_yet + receiver_is_at_end);
   17.12 @@ -3780,7 +3780,6 @@
   17.13    // profile this call
   17.14    __ profile_final_call(T2);
   17.15  
   17.16 -  // 2014/11/24 Fu
   17.17    // T2: tmp, used for mdp
   17.18    // method: callee
   17.19    // T9: tmp
   17.20 @@ -3831,7 +3830,6 @@
   17.21    __ null_check(T3);
   17.22    __ profile_call(T9);
   17.23  
   17.24 -  // 2014/11/24 Fu
   17.25    // T8: tmp, used for mdp
   17.26    // Rmethod: callee
   17.27    // T9: tmp
   17.28 @@ -3839,7 +3837,7 @@
   17.29    __ profile_arguments_type(T8, Rmethod, T9, false);
   17.30  
   17.31    __ jump_from_interpreted(Rmethod, T9);
   17.32 -  __ move(T0, T3);//aoqi ?
   17.33 +  __ move(T0, T3);
   17.34  }
   17.35  
   17.36  void TemplateTable::invokestatic(int byte_no) {
   17.37 @@ -3850,7 +3848,6 @@
   17.38  
   17.39    __ profile_call(T9);
   17.40  
   17.41 -  // 2014/11/24 Fu
   17.42    // T8: tmp, used for mdp
   17.43    // Rmethod: callee
   17.44    // T9: tmp
   17.45 @@ -3992,7 +3989,6 @@
   17.46     // FIXME: profile the LambdaForm also
   17.47     __ profile_final_call(T9);
   17.48  
   17.49 -   // 2014/11/24 Fu
   17.50     // T8: tmp, used for mdp
   17.51     // T2_method: callee
   17.52     // T9: tmp
   17.53 @@ -4030,7 +4026,6 @@
   17.54     // profile this call
   17.55     __ profile_call(T9);
   17.56  
   17.57 -   // 2014/11/24 Fu
   17.58     // T8: tmp, used for mdp
   17.59     // Rmethod: callee
   17.60     // T9: tmp
   17.61 @@ -4275,11 +4270,11 @@
   17.62    __ beq(AT, R0, quicked);
   17.63    __ delayed()->nop();
   17.64  
   17.65 -  // 2012/6/2 Jin: In InterpreterRuntime::quicken_io_cc, lots of new classes may be loaded.
   17.66 -  //  Then, GC will move the object in V0 to another places in heap.
   17.67 -  //  Therefore, We should never save such an object in register.
   17.68 -  //  Instead, we should save it in the stack. It can be modified automatically by the GC thread.
   17.69 -  //  After GC, the object address in FSR is changed to a new place.
   17.70 +  // In InterpreterRuntime::quicken_io_cc, lots of new classes may be loaded.
   17.71 +  // Then, GC will move the object in V0 to another places in heap.
   17.72 +  // Therefore, We should never save such an object in register.
   17.73 +  // Instead, we should save it in the stack. It can be modified automatically by the GC thread.
   17.74 +  // After GC, the object address in FSR is changed to a new place.
   17.75    //
   17.76    __ push(atos);
   17.77    const Register thread = TREG;
    18.1 --- a/src/cpu/mips/vm/vmreg_mips.hpp	Thu Sep 05 13:07:31 2019 +0800
    18.2 +++ b/src/cpu/mips/vm/vmreg_mips.hpp	Thu Sep 05 13:10:50 2019 +0800
    18.3 @@ -1,6 +1,6 @@
    18.4  /*
    18.5   * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
    18.6 - * Copyright (c) 2015, 2016, Loongson Technology. All rights reserved.
    18.7 + * Copyright (c) 2015, 2019, Loongson Technology. All rights reserved.
    18.8   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    18.9   *
   18.10   * This code is free software; you can redistribute it and/or modify it
   18.11 @@ -26,11 +26,10 @@
   18.12  #ifndef CPU_MIPS_VM_VMREG_MIPS_HPP
   18.13  #define CPU_MIPS_VM_VMREG_MIPS_HPP
   18.14  
   18.15 -  bool is_Register();
   18.16 -  Register as_Register();
   18.17 +bool is_Register();
   18.18 +Register as_Register();
   18.19  
   18.20 -  bool is_FloatRegister();
   18.21 -  FloatRegister as_FloatRegister();
   18.22 -
   18.23 +bool is_FloatRegister();
   18.24 +FloatRegister as_FloatRegister();
   18.25  
   18.26  #endif // CPU_MIPS_VM_VMREG_MIPS_HPP
    19.1 --- a/src/os_cpu/linux_mips/vm/prefetch_linux_mips.inline.hpp	Thu Sep 05 13:07:31 2019 +0800
    19.2 +++ b/src/os_cpu/linux_mips/vm/prefetch_linux_mips.inline.hpp	Thu Sep 05 13:10:50 2019 +0800
    19.3 @@ -1,6 +1,6 @@
    19.4  /*
    19.5   * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
    19.6 - * Copyright (c) 2015, 2016, Loongson Technology. All rights reserved.
    19.7 + * Copyright (c) 2015, 2019, Loongson Technology. All rights reserved.
    19.8   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    19.9   *
   19.10   * This code is free software; you can redistribute it and/or modify it
   19.11 @@ -28,7 +28,7 @@
   19.12  
   19.13  
   19.14  inline void Prefetch::read (void *loc, intx interval) {
   19.15 -        /* 2013.3.13 Jin: 'pref' is implemented as NOP in Loongson 3A */
   19.16 +        // 'pref' is implemented as NOP in Loongson 3A
   19.17          __asm__ __volatile__ (
   19.18                          "               .set push\n"
   19.19                          "               .set mips32\n"
    20.1 --- a/src/os_cpu/linux_mips/vm/thread_linux_mips.cpp	Thu Sep 05 13:07:31 2019 +0800
    20.2 +++ b/src/os_cpu/linux_mips/vm/thread_linux_mips.cpp	Thu Sep 05 13:10:50 2019 +0800
    20.3 @@ -1,6 +1,6 @@
    20.4  /*
    20.5   * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
    20.6 - * Copyright (c) 2015, 2016, Loongson Technology. All rights reserved.
    20.7 + * Copyright (c) 2015, 2019, Loongson Technology. All rights reserved.
    20.8   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    20.9   *
   20.10   * This code is free software; you can redistribute it and/or modify it
   20.11 @@ -32,12 +32,11 @@
   20.12  {
   20.13      _anchor.clear();
   20.14  
   20.15 -    /* A non-existing address as error detector */
   20.16 -/* Jin */
   20.17 +    // A non-existing address as error detector
   20.18      if (CompileBroker::get_compilation_id() > 0)
   20.19 -        _handle_wrong_method_stub = (address)SharedRuntime::get_handle_wrong_method_stub();
   20.20 +      _handle_wrong_method_stub = (address)SharedRuntime::get_handle_wrong_method_stub();
   20.21      else
   20.22 -        _handle_wrong_method_stub = (address)0x2B2B2B;
   20.23 +      _handle_wrong_method_stub = (address)0x2B2B2B;
   20.24  }
   20.25  
   20.26  // For Forte Analyzer AsyncGetCallTrace profiling support - thread is
    21.1 --- a/src/os_cpu/linux_mips/vm/thread_linux_mips.hpp	Thu Sep 05 13:07:31 2019 +0800
    21.2 +++ b/src/os_cpu/linux_mips/vm/thread_linux_mips.hpp	Thu Sep 05 13:10:50 2019 +0800
    21.3 @@ -1,6 +1,6 @@
    21.4  /*
    21.5   * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
    21.6 - * Copyright (c) 2015, 2016, Loongson Technology. All rights reserved.
    21.7 + * Copyright (c) 2015, 2019, Loongson Technology. All rights reserved.
    21.8   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    21.9   *
   21.10   * This code is free software; you can redistribute it and/or modify it
   21.11 @@ -63,7 +63,7 @@
   21.12    static void enable_register_stack_guard() {}
   21.13    static void disable_register_stack_guard() {}
   21.14  
   21.15 -  /* 2013/11/5 Jin: For convenient implementation of NativeGeneralJump::replace_mt_safe() */
   21.16 +  // For convenient implementation of NativeGeneralJump::replace_mt_safe()
   21.17    volatile address _handle_wrong_method_stub;
   21.18    static ByteSize handle_wrong_method_stub_offset()          { return byte_offset_of(JavaThread, _handle_wrong_method_stub); }
   21.19    void set_handle_wrong_method_stub(address stub)          { _handle_wrong_method_stub = stub; }

mercurial