src/share/vm/code/vmreg.hpp

Tue, 17 Oct 2017 12:58:25 +0800

author
aoqi
date
Tue, 17 Oct 2017 12:58:25 +0800
changeset 7994
04ff2f6cd0eb
parent 7598
ddce0b7cee93
parent 6876
710a3c8b516e
permissions
-rw-r--r--

merge

     1 /*
     2  * Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 /*
    26  * This file has been modified by Loongson Technology in 2015. These
    27  * modifications are Copyright (c) 2015 Loongson Technology, and are made
    28  * available on the same license terms set forth above.
    29  */
    31 #ifndef SHARE_VM_CODE_VMREG_HPP
    32 #define SHARE_VM_CODE_VMREG_HPP
    34 #include "memory/allocation.hpp"
    35 #include "utilities/globalDefinitions.hpp"
    36 #include "asm/register.hpp"
    38 #ifdef COMPILER2
    39 #include "opto/adlcVMDeps.hpp"
    40 #include "utilities/ostream.hpp"
    41 #if defined ADGLOBALS_MD_HPP
    42 # include ADGLOBALS_MD_HPP
    43 #elif defined TARGET_ARCH_MODEL_x86_32
    44 # include "adfiles/adGlobals_x86_32.hpp"
    45 #elif defined TARGET_ARCH_MODEL_x86_64
    46 # include "adfiles/adGlobals_x86_64.hpp"
    47 #elif defined TARGET_ARCH_MODEL_sparc
    48 # include "adfiles/adGlobals_sparc.hpp"
    49 #elif defined TARGET_ARCH_MODEL_zero
    50 # include "adfiles/adGlobals_zero.hpp"
    51 #elif defined TARGET_ARCH_MODEL_ppc_64
    52 # include "adfiles/adGlobals_ppc_64.hpp"
    53 #endif
    54 #ifdef TARGET_ARCH_MODEL_mips_64
    55 # include "adfiles/adGlobals_mips_64.hpp"
    56 #endif
    57 #endif
    59 //------------------------------VMReg------------------------------------------
    60 // The VM uses 'unwarped' stack slots; the compiler uses 'warped' stack slots.
    61 // Register numbers below VMRegImpl::stack0 are the same for both.  Register
    62 // numbers above stack0 are either warped (in the compiler) or unwarped
    63 // (in the VM).  Unwarped numbers represent stack indices, offsets from
    64 // the current stack pointer.  Warped numbers are required during compilation
    65 // when we do not yet know how big the frame will be.
    67 class VMRegImpl;
    68 typedef VMRegImpl* VMReg;
    70 class VMRegImpl {
    71 // friend class OopMap;
    72 friend class VMStructs;
    73 friend class OptoReg;
    74 // friend class Location;
    75 private:
    76   enum {
    77     BAD_REG = -1
    78   };
    82   static VMReg stack0;
    83   // Names for registers
    84   static const char *regName[];
    85   static const int register_count;
    88 public:
    90   static VMReg  as_VMReg(int val, bool bad_ok = false) { assert(val > BAD_REG || bad_ok, "invalid"); return (VMReg) (intptr_t) val; }
    92   const char*  name() {
    93     if (is_reg()) {
    94       return regName[value()];
    95     } else if (!is_valid()) {
    96       return "BAD";
    97     } else {
    98       // shouldn't really be called with stack
    99       return "STACKED REG";
   100     }
   101   }
   102   static VMReg Bad() { return (VMReg) (intptr_t) BAD_REG; }
   103   bool is_valid() const { return ((intptr_t) this) != BAD_REG; }
   104   bool is_stack() const { return (intptr_t) this >= (intptr_t) stack0; }
   105   bool is_reg()   const { return is_valid() && !is_stack(); }
   107   // A concrete register is a value that returns true for is_reg() and is
   108   // also a register you could use in the assembler. On machines with
   109   // 64bit registers only one half of the VMReg (and OptoReg) is considered
   110   // concrete.
   111   bool is_concrete();
   113   // VMRegs are 4 bytes wide on all platforms
   114   static const int stack_slot_size;
   115   static const int slots_per_word;
   118   // This really ought to check that the register is "real" in the sense that
   119   // we don't try and get the VMReg number of a physical register that doesn't
   120   // have an expressible part. That would be pd specific code
   121   VMReg next() {
   122     assert((is_reg() && value() < stack0->value() - 1) || is_stack(), "must be");
   123     return (VMReg)(intptr_t)(value() + 1);
   124   }
   125   VMReg next(int i) {
   126     assert((is_reg() && value() < stack0->value() - i) || is_stack(), "must be");
   127     return (VMReg)(intptr_t)(value() + i);
   128   }
   129   VMReg prev() {
   130     assert((is_stack() && value() > stack0->value()) || (is_reg() && value() != 0), "must be");
   131     return (VMReg)(intptr_t)(value() - 1);
   132   }
   135   intptr_t value() const         {return (intptr_t) this; }
   137   void print_on(outputStream* st) const;
   138   void print() const { print_on(tty); }
   140   // bias a stack slot.
   141   // Typically used to adjust a virtual frame slots by amounts that are offset by
   142   // amounts that are part of the native abi. The VMReg must be a stack slot
   143   // and the result must be also.
   145   VMReg bias(int offset) {
   146     assert(is_stack(), "must be");
   147     // VMReg res = VMRegImpl::as_VMReg(value() + offset);
   148     VMReg res = stack2reg(reg2stack() + offset);
   149     assert(res->is_stack(), "must be");
   150     return res;
   151   }
   153   // Convert register numbers to stack slots and vice versa
   154   static VMReg stack2reg( int idx ) {
   155     return (VMReg) (intptr_t) (stack0->value() + idx);
   156   }
   158   uintptr_t reg2stack() {
   159     assert( is_stack(), "Not a stack-based register" );
   160     return value() - stack0->value();
   161   }
   163   static void set_regName();
   165 #ifdef TARGET_ARCH_x86
   166 # include "vmreg_x86.hpp"
   167 #endif
   168 #ifdef TARGET_ARCH_mips
   169 # include "vmreg_mips.hpp"
   170 #endif
   171 #ifdef TARGET_ARCH_sparc
   172 # include "vmreg_sparc.hpp"
   173 #endif
   174 #ifdef TARGET_ARCH_zero
   175 # include "vmreg_zero.hpp"
   176 #endif
   177 #ifdef TARGET_ARCH_arm
   178 # include "vmreg_arm.hpp"
   179 #endif
   180 #ifdef TARGET_ARCH_ppc
   181 # include "vmreg_ppc.hpp"
   182 #endif
   185 };
   187 //---------------------------VMRegPair-------------------------------------------
   188 // Pairs of 32-bit registers for arguments.
   189 // SharedRuntime::java_calling_convention will overwrite the structs with
   190 // the calling convention's registers.  VMRegImpl::Bad is returned for any
   191 // unused 32-bit register.  This happens for the unused high half of Int
   192 // arguments, or for 32-bit pointers or for longs in the 32-bit sparc build
   193 // (which are passed to natives in low 32-bits of e.g. O0/O1 and the high
   194 // 32-bits of O0/O1 are set to VMRegImpl::Bad).  Longs in one register & doubles
   195 // always return a high and a low register, as do 64-bit pointers.
   196 //
   197 class VMRegPair {
   198 private:
   199   VMReg _second;
   200   VMReg _first;
   201 public:
   202   void set_bad (                   ) { _second=VMRegImpl::Bad(); _first=VMRegImpl::Bad(); }
   203   void set1    (         VMReg v  ) { _second=VMRegImpl::Bad(); _first=v; }
   204   void set2    (         VMReg v  ) { _second=v->next();  _first=v; }
   205   void set_pair( VMReg second, VMReg first    ) { _second= second;    _first= first; }
   206   void set_ptr ( VMReg ptr ) {
   207 #ifdef _LP64
   208     _second = ptr->next();
   209 #else
   210     _second = VMRegImpl::Bad();
   211 #endif
   212     _first = ptr;
   213   }
   214   // Return true if single register, even if the pair is really just adjacent stack slots
   215   bool is_single_reg() const {
   216     return (_first->is_valid()) && (_first->value() + 1 == _second->value());
   217   }
   219   // Return true if single stack based "register" where the slot alignment matches input alignment
   220   bool is_adjacent_on_stack(int alignment) const {
   221     return (_first->is_stack() && (_first->value() + 1 == _second->value()) && ((_first->value() & (alignment-1)) == 0));
   222   }
   224   // Return true if single stack based "register" where the slot alignment matches input alignment
   225   bool is_adjacent_aligned_on_stack(int alignment) const {
   226     return (_first->is_stack() && (_first->value() + 1 == _second->value()) && ((_first->value() & (alignment-1)) == 0));
   227   }
   229   // Return true if single register but adjacent stack slots do not count
   230   bool is_single_phys_reg() const {
   231     return (_first->is_reg() && (_first->value() + 1 == _second->value()));
   232   }
   234   VMReg second() const { return _second; }
   235   VMReg first()  const { return _first; }
   236   VMRegPair(VMReg s, VMReg f) {  _second = s; _first = f; }
   237   VMRegPair(VMReg f) { _second = VMRegImpl::Bad(); _first = f; }
   238   VMRegPair() { _second = VMRegImpl::Bad(); _first = VMRegImpl::Bad(); }
   239 };
   241 #endif // SHARE_VM_CODE_VMREG_HPP

mercurial