src/share/vm/c1/c1_LIRGenerator.cpp

changeset 8865
ffcdff41a92f
parent 8860
43b19021a5a9
child 9041
95a08233f46c
     1.1 --- a/src/share/vm/c1/c1_LIRGenerator.cpp	Sat Jan 06 16:30:58 2018 +0800
     1.2 +++ b/src/share/vm/c1/c1_LIRGenerator.cpp	Thu May 24 19:49:50 2018 +0800
     1.3 @@ -1,5 +1,5 @@
     1.4  /*
     1.5 - * Copyright (c) 2005, 2016, Oracle and/or its affiliates. All rights reserved.
     1.6 + * Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved.
     1.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     1.8   *
     1.9   * This code is free software; you can redistribute it and/or modify it
    1.10 @@ -312,12 +312,12 @@
    1.11  void LIRGenerator::init() {
    1.12    _bs = Universe::heap()->barrier_set();
    1.13  #ifdef MIPS64
    1.14 -        assert(_bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind");
    1.15 -        CardTableModRefBS* ct = (CardTableModRefBS*)_bs;
    1.16 -        assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
    1.17 -        //_card_table_base = new LIR_Const((intptr_t)ct->byte_map_base);
    1.18 -        //        //FIXME, untested in 32bit. by aoqi
    1.19 -        _card_table_base = new LIR_Const(ct->byte_map_base);
    1.20 +  assert(_bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind");
    1.21 +  CardTableModRefBS* ct = (CardTableModRefBS*)_bs;
    1.22 +  assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
    1.23 +  //_card_table_base = new LIR_Const((intptr_t)ct->byte_map_base);
    1.24 +  //        //FIXME, untested in 32bit. by aoqi
    1.25 +  _card_table_base = new LIR_Const(ct->byte_map_base);
    1.26  #endif
    1.27  }
    1.28  
    1.29 @@ -528,10 +528,10 @@
    1.30      cmp_mem_int(lir_cond_belowEqual, buffer, java_nio_Buffer::limit_offset(), index->as_jint(), info);
    1.31      __ branch(lir_cond_belowEqual, T_INT, stub); // forward branch
    1.32  #else
    1.33 -   LIR_Opr left = LIR_OprFact::address(new LIR_Address(buffer, java_nio_Buffer::limit_offset(),T_INT));
    1.34 -   LIR_Opr right = LIR_OprFact::intConst(index->as_jint());
    1.35 -   __ null_check_for_branch(lir_cond_belowEqual, left, right, info);
    1.36 -   __ branch(lir_cond_belowEqual,left, right ,T_INT, stub); // forward branch
    1.37 +    LIR_Opr left = LIR_OprFact::address(new LIR_Address(buffer, java_nio_Buffer::limit_offset(),T_INT));
    1.38 +    LIR_Opr right = LIR_OprFact::intConst(index->as_jint());
    1.39 +    __ null_check_for_branch(lir_cond_belowEqual, left, right, info);
    1.40 +    __ branch(lir_cond_belowEqual,left, right ,T_INT, stub); // forward branch
    1.41  #endif
    1.42    } else {
    1.43  #ifndef MIPS64
    1.44 @@ -716,7 +716,8 @@
    1.45  #ifndef MIPS64
    1.46  void LIRGenerator::new_instance(LIR_Opr dst, ciInstanceKlass* klass, bool is_unresolved, LIR_Opr scratch1, LIR_Opr scratch2, LIR_Opr scratch3, LIR_Opr scratch4, LIR_Opr klass_reg, CodeEmitInfo* info) {
    1.47  #else
    1.48 -void LIRGenerator::new_instance(LIR_Opr dst, ciInstanceKlass* klass, bool is_unresolved, LIR_Opr scratch1, LIR_Opr scratch2, LIR_Opr scratch3, LIR_Opr scratch4, LIR_Opr scratch5, LIR_Opr scratch6, LIR_Opr klass_reg, CodeEmitInfo* info) {
    1.49 +void LIRGenerator::new_instance(LIR_Opr dst, ciInstanceKlass* klass, bool is_unresolved, LIR_Opr scratch1, LIR_Opr scratch2, LIR_Opr scratch3,
    1.50 +                                LIR_Opr scratch4, LIR_Opr scratch5, LIR_Opr scratch6,LIR_Opr klass_reg, CodeEmitInfo* info) {
    1.51  #endif
    1.52    klass2reg_with_patching(klass_reg, klass, info, is_unresolved);
    1.53    // If klass is not loaded we do not know if the klass has finalizers:
    1.54 @@ -1030,50 +1031,48 @@
    1.55  }
    1.56  #else
    1.57  void LIRGenerator::profile_branch(If* if_instr, If::Condition cond , LIR_Opr left, LIR_Opr right) {
    1.58 -        if (if_instr->should_profile()) {
    1.59 -                ciMethod* method = if_instr->profiled_method();
    1.60 -                assert(method != NULL, "method should be set if branch is profiled");
    1.61 -                ciMethodData* md = method->method_data_or_null();
    1.62 -                if (md == NULL) {
    1.63 -                        bailout("out of memory building methodDataOop");
    1.64 -                        return;
    1.65 -                }
    1.66 -                ciProfileData* data = md->bci_to_data(if_instr->profiled_bci());
    1.67 -                assert(data != NULL, "must have profiling data");
    1.68 -                assert(data->is_BranchData(), "need BranchData for two-way branches");
    1.69 -                int taken_count_offset     = md->byte_offset_of_slot(data, BranchData::taken_offset());
    1.70 -                int not_taken_count_offset = md->byte_offset_of_slot(data, BranchData::not_taken_offset());
    1.71 -                if (if_instr->is_swapped()) {
    1.72 -                 int t = taken_count_offset;
    1.73 -                 taken_count_offset = not_taken_count_offset;
    1.74 -                 not_taken_count_offset = t;
    1.75 -                }
    1.76 -                LIR_Opr md_reg = new_register(T_METADATA);
    1.77 -                __ metadata2reg(md->constant_encoding(), md_reg);
    1.78 -                //__ move(LIR_OprFact::oopConst(md->constant_encoding()), md_reg);
    1.79 -                LIR_Opr data_offset_reg = new_pointer_register();
    1.80 -
    1.81 -                LIR_Opr opr1 =  LIR_OprFact::intConst(taken_count_offset);
    1.82 -                LIR_Opr opr2 =  LIR_OprFact::intConst(not_taken_count_offset);
    1.83 -                LabelObj* skip = new LabelObj();
    1.84 -
    1.85 -                __ move(opr1, data_offset_reg);
    1.86 -                __ branch( lir_cond(cond), left, right, skip->label());
    1.87 -                __ move(opr2, data_offset_reg);
    1.88 -                __ branch_destination(skip->label());
    1.89 -
    1.90 -                LIR_Opr data_reg = new_pointer_register();
    1.91 -                LIR_Opr tmp_reg = new_pointer_register();
    1.92 -                // LIR_Address* data_addr = new LIR_Address(md_reg, data_offset_reg, T_INT);
    1.93 -                                __ move(data_offset_reg, tmp_reg);
    1.94 -                __ add(tmp_reg, md_reg, tmp_reg);
    1.95 -                LIR_Address* data_addr = new LIR_Address(tmp_reg, 0, T_INT);
    1.96 -                __ move(LIR_OprFact::address(data_addr), data_reg);
    1.97 -                LIR_Address* fake_incr_value = new LIR_Address(data_reg, DataLayout::counter_increment, T_INT);
    1.98 -                // Use leal instead of add to avoid destroying condition codes on x86
    1.99 -                                __ leal(LIR_OprFact::address(fake_incr_value), data_reg);
   1.100 -                __ move(data_reg, LIR_OprFact::address(data_addr));
   1.101 -        }
   1.102 +  if (if_instr->should_profile()) {
   1.103 +    ciMethod* method = if_instr->profiled_method();
   1.104 +    assert(method != NULL, "method should be set if branch is profiled");
   1.105 +    ciMethodData* md = method->method_data_or_null();
   1.106 +    if (md == NULL) {
   1.107 +      bailout("out of memory building methodDataOop");
   1.108 +      return;
   1.109 +    }
   1.110 +    ciProfileData* data = md->bci_to_data(if_instr->profiled_bci());
   1.111 +    assert(data != NULL, "must have profiling data");
   1.112 +    assert(data->is_BranchData(), "need BranchData for two-way branches");
   1.113 +    int taken_count_offset     = md->byte_offset_of_slot(data, BranchData::taken_offset());
   1.114 +    int not_taken_count_offset = md->byte_offset_of_slot(data, BranchData::not_taken_offset());
   1.115 +    if (if_instr->is_swapped()) {
   1.116 +      int t = taken_count_offset;
   1.117 +      taken_count_offset = not_taken_count_offset;
   1.118 +      not_taken_count_offset = t;
   1.119 +    }
   1.120 +    LIR_Opr md_reg = new_register(T_METADATA);
   1.121 +    __ metadata2reg(md->constant_encoding(), md_reg);
   1.122 +    LIR_Opr data_offset_reg = new_pointer_register();
   1.123 +
   1.124 +    LIR_Opr opr1 =  LIR_OprFact::intptrConst(taken_count_offset);
   1.125 +    LIR_Opr opr2 =  LIR_OprFact::intptrConst(not_taken_count_offset);
   1.126 +    LabelObj* skip = new LabelObj();
   1.127 +
   1.128 +    __ move(opr1, data_offset_reg);
   1.129 +    __ branch( lir_cond(cond), left, right, skip->label());
   1.130 +    __ move(opr2, data_offset_reg);
   1.131 +    __ branch_destination(skip->label());
   1.132 +
   1.133 +    LIR_Opr data_reg = new_pointer_register();
   1.134 +    LIR_Opr tmp_reg = new_pointer_register();
   1.135 +    __ move(data_offset_reg, tmp_reg);
   1.136 +    __ add(tmp_reg, md_reg, tmp_reg);
   1.137 +    LIR_Address* data_addr = new LIR_Address(tmp_reg, 0, data_reg->type());
   1.138 +    __ move(LIR_OprFact::address(data_addr), data_reg);
   1.139 +    LIR_Address* fake_incr_value = new LIR_Address(data_reg, DataLayout::counter_increment, T_INT);
   1.140 +    // Use leal instead of add to avoid destroying condition codes on x86
   1.141 +    __ leal(LIR_OprFact::address(fake_incr_value), data_reg);
   1.142 +    __ move(data_reg, LIR_OprFact::address(data_addr));
   1.143 +  }
   1.144  }
   1.145  
   1.146  #endif
   1.147 @@ -1996,11 +1995,11 @@
   1.148        cmp_mem_int(lir_cond_belowEqual, buf.result(), java_nio_Buffer::limit_offset(), index.result()->as_jint(), info);
   1.149        __ branch(lir_cond_belowEqual, T_INT, stub);
   1.150  #else
   1.151 -            LIR_Opr left = LIR_OprFact::address(new LIR_Address( buf.result(),
   1.152 +      LIR_Opr left = LIR_OprFact::address(new LIR_Address( buf.result(),
   1.153                                                  java_nio_Buffer::limit_offset(),T_INT));
   1.154 -        LIR_Opr right = LIR_OprFact::intConst(index.result()->as_jint());
   1.155 +      LIR_Opr right = LIR_OprFact::intConst(index.result()->as_jint());
   1.156        __ null_check_for_branch(lir_cond_belowEqual, left, right, info);
   1.157 -            __ branch(lir_cond_belowEqual,left, right ,T_INT, stub); // forward branch
   1.158 +      __ branch(lir_cond_belowEqual,left, right ,T_INT, stub); // forward branch
   1.159  
   1.160  #endif
   1.161      } else {
   1.162 @@ -2009,10 +2008,10 @@
   1.163                    java_nio_Buffer::limit_offset(), T_INT, info);
   1.164        __ branch(lir_cond_aboveEqual, T_INT, stub);
   1.165  #else
   1.166 -            LIR_Opr right = LIR_OprFact::address(new LIR_Address( buf.result(), java_nio_Buffer::limit_offset(),T_INT));
   1.167 -            LIR_Opr left =  index.result();
   1.168 +      LIR_Opr right = LIR_OprFact::address(new LIR_Address( buf.result(), java_nio_Buffer::limit_offset(),T_INT));
   1.169 +      LIR_Opr left =  index.result();
   1.170        __ null_check_for_branch(lir_cond_aboveEqual, left, right, info);
   1.171 -            __ branch(lir_cond_aboveEqual, left, right , T_INT, stub); // forward branch
   1.172 +      __ branch(lir_cond_aboveEqual, left, right , T_INT, stub); // forward branch
   1.173  #endif
   1.174      }
   1.175      __ move(index.result(), result);
   1.176 @@ -2093,8 +2092,8 @@
   1.177  #ifndef MIPS64
   1.178        __ branch(lir_cond_always, T_ILLEGAL, new RangeCheckStub(range_check_info, index.result()));
   1.179  #else
   1.180 -     tty->print_cr("LIRGenerator::do_LoadIndexed(LoadIndexed* x) unimplemented yet!");
   1.181 -     Unimplemented();
   1.182 +      tty->print_cr("LIRGenerator::do_LoadIndexed(LoadIndexed* x) unimplemented yet!");
   1.183 +      Unimplemented();
   1.184  #endif
   1.185      } else if (use_length) {
   1.186        // TODO: use a (modified) version of array_range_check that does not require a
   1.187 @@ -2826,7 +2825,6 @@
   1.188      }
   1.189      LIR_Opr md_reg = new_register(T_METADATA);
   1.190      __ metadata2reg(md->constant_encoding(), md_reg);
   1.191 -
   1.192      increment_counter(new LIR_Address(md_reg, offset,
   1.193                                        NOT_LP64(T_INT) LP64_ONLY(T_LONG)), DataLayout::counter_increment);
   1.194    }

mercurial