src/share/vm/c1/c1_FrameMap.cpp

Tue, 08 Aug 2017 15:57:29 +0800

author
aoqi
date
Tue, 08 Aug 2017 15:57:29 +0800
changeset 6876
710a3c8b516e
parent 6503
a9becfeecd1b
parent 1
2d8a650513c2
permissions
-rw-r--r--

merge

     1 /*
     2  * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 /*
    26  * This file has been modified by Loongson Technology in 2015. These
    27  * modifications are Copyright (c) 2015 Loongson Technology, and are made
    28  * available on the same license terms set forth above.
    29  */
    31 #include "precompiled.hpp"
    32 #include "c1/c1_FrameMap.hpp"
    33 #include "c1/c1_LIR.hpp"
    34 #include "runtime/sharedRuntime.hpp"
    35 #ifdef TARGET_ARCH_x86
    36 # include "vmreg_x86.inline.hpp"
    37 #endif
    38 #ifdef TARGET_ARCH_mips
    39 # include "vmreg_mips.inline.hpp"
    40 #endif
    41 #ifdef TARGET_ARCH_sparc
    42 # include "vmreg_sparc.inline.hpp"
    43 #endif
    44 #ifdef TARGET_ARCH_zero
    45 # include "vmreg_zero.inline.hpp"
    46 #endif
    47 #ifdef TARGET_ARCH_arm
    48 # include "vmreg_arm.inline.hpp"
    49 #endif
    50 #ifdef TARGET_ARCH_ppc
    51 # include "vmreg_ppc.inline.hpp"
    52 #endif
    56 //-----------------------------------------------------
    58 // Convert method signature into an array of BasicTypes for the arguments
    59 BasicTypeArray* FrameMap::signature_type_array_for(const ciMethod* method) {
    60   ciSignature* sig = method->signature();
    61   BasicTypeList* sta = new BasicTypeList(method->arg_size());
    62   // add receiver, if any
    63   if (!method->is_static()) sta->append(T_OBJECT);
    64   // add remaining arguments
    65   for (int i = 0; i < sig->count(); i++) {
    66     ciType* type = sig->type_at(i);
    67     BasicType t = type->basic_type();
    68     if (t == T_ARRAY) {
    69       t = T_OBJECT;
    70     }
    71     sta->append(t);
    72   }
    73   // done
    74   return sta;
    75 }
    78 CallingConvention* FrameMap::java_calling_convention(const BasicTypeArray* signature, bool outgoing) {
    79   // compute the size of the arguments first.  The signature array
    80   // that java_calling_convention takes includes a T_VOID after double
    81   // work items but our signatures do not.
    82   int i;
    83   int sizeargs = 0;
    84   for (i = 0; i < signature->length(); i++) {
    85     sizeargs += type2size[signature->at(i)];
    86   }
    88   BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType, sizeargs);
    89   VMRegPair* regs = NEW_RESOURCE_ARRAY(VMRegPair, sizeargs);
    90   int sig_index = 0;
    91   for (i = 0; i < sizeargs; i++, sig_index++) {
    92     sig_bt[i] = signature->at(sig_index);
    93     if (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
    94       sig_bt[i + 1] = T_VOID;
    95       i++;
    96     }
    97   }
    99   intptr_t out_preserve = SharedRuntime::java_calling_convention(sig_bt, regs, sizeargs, outgoing);
   100   LIR_OprList* args = new LIR_OprList(signature->length());
   101   for (i = 0; i < sizeargs;) {
   102     BasicType t = sig_bt[i];
   103     assert(t != T_VOID, "should be skipping these");
   104     LIR_Opr opr = map_to_opr(t, regs + i, outgoing);
   105     args->append(opr);
   106     if (opr->is_address()) {
   107       LIR_Address* addr = opr->as_address_ptr();
   108       assert(addr->disp() == (int)addr->disp(), "out of range value");
   109       out_preserve = MAX2(out_preserve, (intptr_t)(addr->disp() - STACK_BIAS) / 4);
   110     }
   111     i += type2size[t];
   112   }
   113   assert(args->length() == signature->length(), "size mismatch");
   114   out_preserve += SharedRuntime::out_preserve_stack_slots();
   116   if (outgoing) {
   117     // update the space reserved for arguments.
   118     update_reserved_argument_area_size(out_preserve * BytesPerWord);
   119   }
   120   return new CallingConvention(args, out_preserve);
   121 }
   124 CallingConvention* FrameMap::c_calling_convention(const BasicTypeArray* signature) {
   125   // compute the size of the arguments first.  The signature array
   126   // that java_calling_convention takes includes a T_VOID after double
   127   // work items but our signatures do not.
   128   int i;
   129   int sizeargs = 0;
   130   for (i = 0; i < signature->length(); i++) {
   131     sizeargs += type2size[signature->at(i)];
   132   }
   134   BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType, sizeargs);
   135   VMRegPair* regs = NEW_RESOURCE_ARRAY(VMRegPair, sizeargs);
   136   int sig_index = 0;
   137   for (i = 0; i < sizeargs; i++, sig_index++) {
   138     sig_bt[i] = signature->at(sig_index);
   139     if (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
   140       sig_bt[i + 1] = T_VOID;
   141       i++;
   142     }
   143   }
   145   intptr_t out_preserve = SharedRuntime::c_calling_convention(sig_bt, regs, NULL, sizeargs);
   146   LIR_OprList* args = new LIR_OprList(signature->length());
   147   for (i = 0; i < sizeargs;) {
   148     BasicType t = sig_bt[i];
   149     assert(t != T_VOID, "should be skipping these");
   151     // C calls are always outgoing
   152     bool outgoing = true;
   153     LIR_Opr opr = map_to_opr(t, regs + i, outgoing);
   154     // they might be of different types if for instance floating point
   155     // values are passed in cpu registers, but the sizes must match.
   156     assert(type2size[opr->type()] == type2size[t], "type mismatch");
   157     args->append(opr);
   158     if (opr->is_address()) {
   159       LIR_Address* addr = opr->as_address_ptr();
   160       out_preserve = MAX2(out_preserve, (intptr_t)(addr->disp() - STACK_BIAS) / 4);
   161     }
   162     i += type2size[t];
   163   }
   164   assert(args->length() == signature->length(), "size mismatch");
   165   out_preserve += SharedRuntime::out_preserve_stack_slots();
   166   update_reserved_argument_area_size(out_preserve * BytesPerWord);
   167   return new CallingConvention(args, out_preserve);
   168 }
   171 //--------------------------------------------------------
   172 //               FrameMap
   173 //--------------------------------------------------------
   175 bool      FrameMap::_init_done = false;
   176 Register  FrameMap::_cpu_rnr2reg [FrameMap::nof_cpu_regs];
   177 int       FrameMap::_cpu_reg2rnr [FrameMap::nof_cpu_regs];
   180 FrameMap::FrameMap(ciMethod* method, int monitors, int reserved_argument_area_size) {
   181   assert(_init_done, "should already be completed");
   183   _framesize = -1;
   184   _num_spills = -1;
   186   assert(monitors >= 0, "not set");
   187   _num_monitors = monitors;
   188   assert(reserved_argument_area_size >= 0, "not set");
   189   _reserved_argument_area_size = MAX2(4, reserved_argument_area_size) * BytesPerWord;
   191   _argcount = method->arg_size();
   192   _argument_locations = new intArray(_argcount, -1);
   193   _incoming_arguments = java_calling_convention(signature_type_array_for(method), false);
   194   _oop_map_arg_count = _incoming_arguments->reserved_stack_slots();
   196   int java_index = 0;
   197   for (int i = 0; i < _incoming_arguments->length(); i++) {
   198     LIR_Opr opr = _incoming_arguments->at(i);
   199     if (opr->is_address()) {
   200       LIR_Address* address = opr->as_address_ptr();
   201       _argument_locations->at_put(java_index, address->disp() - STACK_BIAS);
   202       _incoming_arguments->args()->at_put(i, LIR_OprFact::stack(java_index, as_BasicType(as_ValueType(address->type()))));
   203     }
   204     java_index += type2size[opr->type()];
   205   }
   207 }
   210 bool FrameMap::finalize_frame(int nof_slots) {
   211   assert(nof_slots >= 0, "must be positive");
   212   assert(_num_spills == -1, "can only be set once");
   213   _num_spills = nof_slots;
   214   assert(_framesize == -1, "should only be calculated once");
   215   _framesize =  round_to(in_bytes(sp_offset_for_monitor_base(0)) +
   216                          _num_monitors * sizeof(BasicObjectLock) +
   217                          sizeof(intptr_t) +                        // offset of deopt orig pc
   218                          frame_pad_in_bytes,
   219                          StackAlignmentInBytes) / 4;
   220   int java_index = 0;
   221   for (int i = 0; i < _incoming_arguments->length(); i++) {
   222     LIR_Opr opr = _incoming_arguments->at(i);
   223     if (opr->is_stack()) {
   224       _argument_locations->at_put(java_index, in_bytes(framesize_in_bytes()) +
   225                                   _argument_locations->at(java_index));
   226     }
   227     java_index += type2size[opr->type()];
   228   }
   229   // make sure it's expressible on the platform
   230   return validate_frame();
   231 }
   233 VMReg FrameMap::sp_offset2vmreg(ByteSize offset) const {
   234   int offset_in_bytes = in_bytes(offset);
   235   assert(offset_in_bytes % 4 == 0, "must be multiple of 4 bytes");
   236   assert(offset_in_bytes / 4 < framesize() + oop_map_arg_count(), "out of range");
   237   return VMRegImpl::stack2reg(offset_in_bytes / 4);
   238 }
   241 bool FrameMap::location_for_sp_offset(ByteSize byte_offset_from_sp,
   242                                       Location::Type loc_type,
   243                                       Location* loc) const {
   244   int offset = in_bytes(byte_offset_from_sp);
   245   assert(offset >= 0, "incorrect offset");
   246   if (!Location::legal_offset_in_bytes(offset)) {
   247     return false;
   248   }
   249   Location tmp_loc = Location::new_stk_loc(loc_type, offset);
   250   *loc = tmp_loc;
   251   return true;
   252 }
   255 bool FrameMap::locations_for_slot  (int index, Location::Type loc_type,
   256                                      Location* loc, Location* second) const {
   257   ByteSize offset_from_sp = sp_offset_for_slot(index);
   258   if (!location_for_sp_offset(offset_from_sp, loc_type, loc)) {
   259     return false;
   260   }
   261   if (second != NULL) {
   262     // two word item
   263     offset_from_sp = offset_from_sp + in_ByteSize(4);
   264     return location_for_sp_offset(offset_from_sp, loc_type, second);
   265   }
   266   return true;
   267 }
   269 //////////////////////
   270 // Public accessors //
   271 //////////////////////
   274 ByteSize FrameMap::sp_offset_for_slot(const int index) const {
   275   if (index < argcount()) {
   276     int offset = _argument_locations->at(index);
   277     assert(offset != -1, "not a memory argument");
   278     assert(offset >= framesize() * 4, "argument inside of frame");
   279     return in_ByteSize(offset);
   280   }
   281   ByteSize offset = sp_offset_for_spill(index - argcount());
   282   assert(in_bytes(offset) < framesize() * 4, "spill outside of frame");
   283   return offset;
   284 }
   287 ByteSize FrameMap::sp_offset_for_double_slot(const int index) const {
   288   ByteSize offset = sp_offset_for_slot(index);
   289   if (index >= argcount()) {
   290     assert(in_bytes(offset) + 4 < framesize() * 4, "spill outside of frame");
   291   }
   292   return offset;
   293 }
   296 ByteSize FrameMap::sp_offset_for_spill(const int index) const {
   297   assert(index >= 0 && index < _num_spills, "out of range");
   298   int offset = round_to(first_available_sp_in_frame + _reserved_argument_area_size, sizeof(double)) +
   299     index * spill_slot_size_in_bytes;
   300   return in_ByteSize(offset);
   301 }
   303 ByteSize FrameMap::sp_offset_for_monitor_base(const int index) const {
   304   int end_of_spills = round_to(first_available_sp_in_frame + _reserved_argument_area_size, sizeof(double)) +
   305     _num_spills * spill_slot_size_in_bytes;
   306   int offset = (int) round_to(end_of_spills, HeapWordSize) + index * sizeof(BasicObjectLock);
   307   return in_ByteSize(offset);
   308 }
   310 ByteSize FrameMap::sp_offset_for_monitor_lock(int index) const {
   311   check_monitor_index(index);
   312   return sp_offset_for_monitor_base(index) + in_ByteSize(BasicObjectLock::lock_offset_in_bytes());;
   313 }
   315 ByteSize FrameMap::sp_offset_for_monitor_object(int index) const {
   316   check_monitor_index(index);
   317   return sp_offset_for_monitor_base(index) + in_ByteSize(BasicObjectLock::obj_offset_in_bytes());
   318 }
   321 // For OopMaps, map a local variable or spill index to an VMReg.
   322 // This is the offset from sp() in the frame of the slot for the index,
   323 // skewed by SharedInfo::stack0 to indicate a stack location (vs.a register.)
   324 //
   325 //         C ABI size +
   326 //         framesize +     framesize +
   327 //         stack0          stack0         stack0          0 <- VMReg->value()
   328 //            |              |              | <registers> |
   329 //  ..........|..............|..............|.............|
   330 //    0 1 2 3 | <C ABI area> | 4 5 6 ...... |               <- local indices
   331 //    ^                        ^          sp()
   332 //    |                        |
   333 //  arguments            non-argument locals
   336 VMReg FrameMap::regname(LIR_Opr opr) const {
   337   if (opr->is_single_cpu()) {
   338     assert(!opr->is_virtual(), "should not see virtual registers here");
   339     return opr->as_register()->as_VMReg();
   340   } else if (opr->is_single_stack()) {
   341     return sp_offset2vmreg(sp_offset_for_slot(opr->single_stack_ix()));
   342   } else if (opr->is_address()) {
   343     LIR_Address* addr = opr->as_address_ptr();
   344     assert(addr->base() == stack_pointer(), "sp based addressing only");
   345     return sp_offset2vmreg(in_ByteSize(addr->index()->as_jint()));
   346   }
   347   ShouldNotReachHere();
   348   return VMRegImpl::Bad();
   349 }
   354 // ------------ extra spill slots ---------------

mercurial