src/share/vm/asm/assembler.hpp

Wed, 03 Jul 2019 20:42:37 +0800

author
aoqi
date
Wed, 03 Jul 2019 20:42:37 +0800
changeset 9637
eef07cd490d4
parent 6876
710a3c8b516e
parent 9604
da2e98c027fd
permissions
-rw-r--r--

Merge

     1 /*
     2  * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 /*
    26  * This file has been modified by Loongson Technology in 2015. These
    27  * modifications are Copyright (c) 2015 Loongson Technology, and are made
    28  * available on the same license terms set forth above.
    29  */
    31 #ifndef SHARE_VM_ASM_ASSEMBLER_HPP
    32 #define SHARE_VM_ASM_ASSEMBLER_HPP
    34 #include "asm/codeBuffer.hpp"
    35 #include "code/oopRecorder.hpp"
    36 #include "code/relocInfo.hpp"
    37 #include "memory/allocation.hpp"
    38 #include "utilities/debug.hpp"
    39 #include "utilities/growableArray.hpp"
    40 #include "utilities/top.hpp"
    42 #ifdef TARGET_ARCH_x86
    43 # include "register_x86.hpp"
    44 # include "vm_version_x86.hpp"
    45 #endif
    46 #ifdef TARGET_ARCH_sparc
    47 # include "register_sparc.hpp"
    48 # include "vm_version_sparc.hpp"
    49 #endif
    50 #ifdef TARGET_ARCH_zero
    51 # include "register_zero.hpp"
    52 # include "vm_version_zero.hpp"
    53 #endif
    54 #ifdef TARGET_ARCH_arm
    55 # include "register_arm.hpp"
    56 # include "vm_version_arm.hpp"
    57 #endif
    58 #ifdef TARGET_ARCH_ppc
    59 # include "register_ppc.hpp"
    60 # include "vm_version_ppc.hpp"
    61 #endif
    62 #ifdef TARGET_ARCH_mips
    63 # include "register_mips.hpp"
    64 # include "vm_version_mips.hpp"
    65 #endif
    67 // This file contains platform-independent assembler declarations.
    69 class MacroAssembler;
    70 class AbstractAssembler;
    71 class Label;
    73 /**
    74  * Labels represent destinations for control transfer instructions.  Such
    75  * instructions can accept a Label as their target argument.  A Label is
    76  * bound to the current location in the code stream by calling the
    77  * MacroAssembler's 'bind' method, which in turn calls the Label's 'bind'
    78  * method.  A Label may be referenced by an instruction before it's bound
    79  * (i.e., 'forward referenced').  'bind' stores the current code offset
    80  * in the Label object.
    81  *
    82  * If an instruction references a bound Label, the offset field(s) within
    83  * the instruction are immediately filled in based on the Label's code
    84  * offset.  If an instruction references an unbound label, that
    85  * instruction is put on a list of instructions that must be patched
    86  * (i.e., 'resolved') when the Label is bound.
    87  *
    88  * 'bind' will call the platform-specific 'patch_instruction' method to
    89  * fill in the offset field(s) for each unresolved instruction (if there
    90  * are any).  'patch_instruction' lives in one of the
    91  * cpu/<arch>/vm/assembler_<arch>* files.
    92  *
    93  * Instead of using a linked list of unresolved instructions, a Label has
    94  * an array of unresolved instruction code offsets.  _patch_index
    95  * contains the total number of forward references.  If the Label's array
    96  * overflows (i.e., _patch_index grows larger than the array size), a
    97  * GrowableArray is allocated to hold the remaining offsets.  (The cache
    98  * size is 4 for now, which handles over 99.5% of the cases)
    99  *
   100  * Labels may only be used within a single CodeSection.  If you need
   101  * to create references between code sections, use explicit relocations.
   102  */
   103 class Label VALUE_OBJ_CLASS_SPEC {
   104  private:
   105   enum { PatchCacheSize = 4 };
   107   // _loc encodes both the binding state (via its sign)
   108   // and the binding locator (via its value) of a label.
   109   //
   110   // _loc >= 0   bound label, loc() encodes the target (jump) position
   111   // _loc == -1  unbound label
   112   int _loc;
   114   // References to instructions that jump to this unresolved label.
   115   // These instructions need to be patched when the label is bound
   116   // using the platform-specific patchInstruction() method.
   117   //
   118   // To avoid having to allocate from the C-heap each time, we provide
   119   // a local cache and use the overflow only if we exceed the local cache
   120   int _patches[PatchCacheSize];
   121   int _patch_index;
   122   GrowableArray<int>* _patch_overflow;
   124   Label(const Label&) { ShouldNotReachHere(); }
   126  public:
   128   /**
   129    * After binding, be sure 'patch_instructions' is called later to link
   130    */
   131   void bind_loc(int loc) {
   132     assert(loc >= 0, "illegal locator");
   133     assert(_loc == -1, "already bound");
   134     _loc = loc;
   135   }
   136   void bind_loc(int pos, int sect) { bind_loc(CodeBuffer::locator(pos, sect)); }
   138 #ifndef PRODUCT
   139   // Iterates over all unresolved instructions for printing
   140   void print_instructions(MacroAssembler* masm) const;
   141 #endif // PRODUCT
   143   /**
   144    * Returns the position of the the Label in the code buffer
   145    * The position is a 'locator', which encodes both offset and section.
   146    */
   147   int loc() const {
   148     assert(_loc >= 0, "unbound label");
   149     return _loc;
   150   }
   151   int loc_pos()  const { return CodeBuffer::locator_pos(loc()); }
   152   int loc_sect() const { return CodeBuffer::locator_sect(loc()); }
   154   bool is_bound() const    { return _loc >=  0; }
   155   bool is_unbound() const  { return _loc == -1 && _patch_index > 0; }
   156   bool is_unused() const   { return _loc == -1 && _patch_index == 0; }
   158   /**
   159    * Adds a reference to an unresolved displacement instruction to
   160    * this unbound label
   161    *
   162    * @param cb         the code buffer being patched
   163    * @param branch_loc the locator of the branch instruction in the code buffer
   164    */
   165   void add_patch_at(CodeBuffer* cb, int branch_loc);
   167   /**
   168    * Iterate over the list of patches, resolving the instructions
   169    * Call patch_instruction on each 'branch_loc' value
   170    */
   171   void patch_instructions(MacroAssembler* masm);
   173   void init() {
   174     _loc = -1;
   175     _patch_index = 0;
   176     _patch_overflow = NULL;
   177   }
   179   Label() {
   180     init();
   181   }
   183   ~Label() {
   184     assert(is_bound() || is_unused(), "Label was never bound to a location, but it was used as a jmp target");
   185   }
   187   void reset() {
   188     init(); //leave _patch_overflow because it points to CodeBuffer.
   189   }
   190 };
   192 // A union type for code which has to assemble both constant and
   193 // non-constant operands, when the distinction cannot be made
   194 // statically.
   195 class RegisterOrConstant VALUE_OBJ_CLASS_SPEC {
   196  private:
   197   Register _r;
   198   intptr_t _c;
   200  public:
   201   RegisterOrConstant(): _r(noreg), _c(0) {}
   202   RegisterOrConstant(Register r): _r(r), _c(0) {}
   203   RegisterOrConstant(intptr_t c): _r(noreg), _c(c) {}
   205   Register as_register() const { assert(is_register(),""); return _r; }
   206   intptr_t as_constant() const { assert(is_constant(),""); return _c; }
   208   Register register_or_noreg() const { return _r; }
   209   intptr_t constant_or_zero() const  { return _c; }
   211   bool is_register() const { return _r != noreg; }
   212   bool is_constant() const { return _r == noreg; }
   213 };
   215 // The Abstract Assembler: Pure assembler doing NO optimizations on the
   216 // instruction level; i.e., what you write is what you get.
   217 // The Assembler is generating code into a CodeBuffer.
   218 class AbstractAssembler : public ResourceObj  {
   219   friend class Label;
   221  protected:
   222   CodeSection* _code_section;          // section within the code buffer
   223   OopRecorder* _oop_recorder;          // support for relocInfo::oop_type
   225  public:
   226   // Code emission & accessing
   227   address addr_at(int pos) const { return code_section()->start() + pos; }
   229  protected:
   230   // This routine is called with a label is used for an address.
   231   // Labels and displacements truck in offsets, but target must return a PC.
   232   address target(Label& L)             { return code_section()->target(L, pc()); }
   234   bool is8bit(int x) const             { return -0x80 <= x && x < 0x80; }
   235   bool isByte(int x) const             { return 0 <= x && x < 0x100; }
   236   bool isShiftCount(int x) const       { return 0 <= x && x < 32; }
   238   // Instruction boundaries (required when emitting relocatable values).
   239   class InstructionMark: public StackObj {
   240    private:
   241     AbstractAssembler* _assm;
   243    public:
   244     InstructionMark(AbstractAssembler* assm) : _assm(assm) {
   245       assert(assm->inst_mark() == NULL, "overlapping instructions");
   246       _assm->set_inst_mark();
   247     }
   248     ~InstructionMark() {
   249       _assm->clear_inst_mark();
   250     }
   251   };
   252   friend class InstructionMark;
   253 #ifdef ASSERT
   254   // Make it return true on platforms which need to verify
   255   // instruction boundaries for some operations.
   256   static bool pd_check_instruction_mark();
   258   // Add delta to short branch distance to verify that it still fit into imm8.
   259   int _short_branch_delta;
   261   int  short_branch_delta() const { return _short_branch_delta; }
   262   void set_short_branch_delta()   { _short_branch_delta = 32; }
   263   void clear_short_branch_delta() { _short_branch_delta = 0; }
   265   class ShortBranchVerifier: public StackObj {
   266    private:
   267     AbstractAssembler* _assm;
   269    public:
   270     ShortBranchVerifier(AbstractAssembler* assm) : _assm(assm) {
   271       assert(assm->short_branch_delta() == 0, "overlapping instructions");
   272       _assm->set_short_branch_delta();
   273     }
   274     ~ShortBranchVerifier() {
   275       _assm->clear_short_branch_delta();
   276     }
   277   };
   278 #else
   279   // Dummy in product.
   280   class ShortBranchVerifier: public StackObj {
   281    public:
   282     ShortBranchVerifier(AbstractAssembler* assm) {}
   283   };
   284 #endif
   286  public:
   288   // Creation
   289   AbstractAssembler(CodeBuffer* code);
   291   // ensure buf contains all code (call this before using/copying the code)
   292   void flush();
   294   void emit_int8(   int8_t  x) { code_section()->emit_int8(   x); }
   295   void emit_int16(  int16_t x) { code_section()->emit_int16(  x); }
   296   void emit_int32(  int32_t x) { code_section()->emit_int32(  x); }
   297   void emit_int64(  int64_t x) { code_section()->emit_int64(  x); }
   299   void emit_float(  jfloat  x) { code_section()->emit_float(  x); }
   300   void emit_double( jdouble x) { code_section()->emit_double( x); }
   301   void emit_address(address x) { code_section()->emit_address(x); }
   303   // min and max values for signed immediate ranges
   304   static int min_simm(int nbits) { return -(intptr_t(1) << (nbits - 1))    ; }
   305   static int max_simm(int nbits) { return  (intptr_t(1) << (nbits - 1)) - 1; }
   307   // Define some:
   308   static int min_simm10() { return min_simm(10); }
   309   static int min_simm13() { return min_simm(13); }
   310   static int min_simm16() { return min_simm(16); }
   312   // Test if x is within signed immediate range for nbits
   313   static bool is_simm(intptr_t x, int nbits) { return min_simm(nbits) <= x && x <= max_simm(nbits); }
   315   // Define some:
   316   static bool is_simm5( intptr_t x) { return is_simm(x, 5 ); }
   317   static bool is_simm8( intptr_t x) { return is_simm(x, 8 ); }
   318   static bool is_simm10(intptr_t x) { return is_simm(x, 10); }
   319   static bool is_simm11(intptr_t x) { return is_simm(x, 11); }
   320   static bool is_simm12(intptr_t x) { return is_simm(x, 12); }
   321   static bool is_simm13(intptr_t x) { return is_simm(x, 13); }
   322   static bool is_simm16(intptr_t x) { return is_simm(x, 16); }
   323   static bool is_simm26(intptr_t x) { return is_simm(x, 26); }
   324   static bool is_simm32(intptr_t x) { return is_simm(x, 32); }
   326   // Accessors
   327   CodeSection*  code_section() const   { return _code_section; }
   328   CodeBuffer*   code()         const   { return code_section()->outer(); }
   329   int           sect()         const   { return code_section()->index(); }
   330   address       pc()           const   { return code_section()->end();   }
   331   int           offset()       const   { return code_section()->size();  }
   332   int           locator()      const   { return CodeBuffer::locator(offset(), sect()); }
   334   OopRecorder*  oop_recorder() const   { return _oop_recorder; }
   335   void      set_oop_recorder(OopRecorder* r) { _oop_recorder = r; }
   337   address       inst_mark() const { return code_section()->mark();       }
   338   void      set_inst_mark()       {        code_section()->set_mark();   }
   339   void    clear_inst_mark()       {        code_section()->clear_mark(); }
   341   // Constants in code
   342   void relocate(RelocationHolder const& rspec, int format = 0) {
   343     assert(!pd_check_instruction_mark()
   344         || inst_mark() == NULL || inst_mark() == code_section()->end(),
   345         "call relocate() between instructions");
   346     code_section()->relocate(code_section()->end(), rspec, format);
   347   }
   348   void relocate(   relocInfo::relocType rtype, int format = 0) {
   349     code_section()->relocate(code_section()->end(), rtype, format);
   350   }
   352   static int code_fill_byte();         // used to pad out odd-sized code buffers
   354   // Associate a comment with the current offset.  It will be printed
   355   // along with the disassembly when printing nmethods.  Currently
   356   // only supported in the instruction section of the code buffer.
   357   void block_comment(const char* comment);
   358   // Copy str to a buffer that has the same lifetime as the CodeBuffer
   359   const char* code_string(const char* str);
   361   // Label functions
   362   void bind(Label& L); // binds an unbound label L to the current code position
   364   // Move to a different section in the same code buffer.
   365   void set_code_section(CodeSection* cs);
   367   // Inform assembler when generating stub code and relocation info
   368   address    start_a_stub(int required_space);
   369   void       end_a_stub();
   370   // Ditto for constants.
   371   address    start_a_const(int required_space, int required_align = sizeof(double));
   372   void       end_a_const(CodeSection* cs);  // Pass the codesection to continue in (insts or stubs?).
   374   // constants support
   375   //
   376   // We must remember the code section (insts or stubs) in c1
   377   // so we can reset to the proper section in end_a_const().
   378   address long_constant(jlong c) {
   379     CodeSection* c1 = _code_section;
   380     address ptr = start_a_const(sizeof(c), sizeof(c));
   381     if (ptr != NULL) {
   382       emit_int64(c);
   383       end_a_const(c1);
   384     }
   385     return ptr;
   386   }
   387   address double_constant(jdouble c) {
   388     CodeSection* c1 = _code_section;
   389     address ptr = start_a_const(sizeof(c), sizeof(c));
   390     if (ptr != NULL) {
   391       emit_double(c);
   392       end_a_const(c1);
   393     }
   394     return ptr;
   395   }
   396   address float_constant(jfloat c) {
   397     CodeSection* c1 = _code_section;
   398     address ptr = start_a_const(sizeof(c), sizeof(c));
   399     if (ptr != NULL) {
   400       emit_float(c);
   401       end_a_const(c1);
   402     }
   403     return ptr;
   404   }
   405   address address_constant(address c) {
   406     CodeSection* c1 = _code_section;
   407     address ptr = start_a_const(sizeof(c), sizeof(c));
   408     if (ptr != NULL) {
   409       emit_address(c);
   410       end_a_const(c1);
   411     }
   412     return ptr;
   413   }
   414   address address_constant(address c, RelocationHolder const& rspec) {
   415     CodeSection* c1 = _code_section;
   416     address ptr = start_a_const(sizeof(c), sizeof(c));
   417     if (ptr != NULL) {
   418       relocate(rspec);
   419       emit_address(c);
   420       end_a_const(c1);
   421     }
   422     return ptr;
   423   }
   425   // Bootstrapping aid to cope with delayed determination of constants.
   426   // Returns a static address which will eventually contain the constant.
   427   // The value zero (NULL) stands instead of a constant which is still uncomputed.
   428   // Thus, the eventual value of the constant must not be zero.
   429   // This is fine, since this is designed for embedding object field
   430   // offsets in code which must be generated before the object class is loaded.
   431   // Field offsets are never zero, since an object's header (mark word)
   432   // is located at offset zero.
   433   RegisterOrConstant delayed_value(int(*value_fn)(), Register tmp, int offset = 0);
   434   RegisterOrConstant delayed_value(address(*value_fn)(), Register tmp, int offset = 0);
   435   virtual RegisterOrConstant delayed_value_impl(intptr_t* delayed_value_addr, Register tmp, int offset) = 0;
   436   // Last overloading is platform-dependent; look in assembler_<arch>.cpp.
   437   static intptr_t* delayed_value_addr(int(*constant_fn)());
   438   static intptr_t* delayed_value_addr(address(*constant_fn)());
   439   static void update_delayed_values();
   441   // Bang stack to trigger StackOverflowError at a safe location
   442   // implementation delegates to machine-specific bang_stack_with_offset
   443   void generate_stack_overflow_check( int frame_size_in_bytes );
   444   virtual void bang_stack_with_offset(int offset) = 0;
   447   /**
   448    * A platform-dependent method to patch a jump instruction that refers
   449    * to this label.
   450    *
   451    * @param branch the location of the instruction to patch
   452    * @param masm the assembler which generated the branch
   453    */
   454   void pd_patch_instruction(address branch, address target);
   456 };
   458 #ifdef TARGET_ARCH_x86
   459 # include "assembler_x86.hpp"
   460 #endif
   461 #ifdef TARGET_ARCH_sparc
   462 # include "assembler_sparc.hpp"
   463 #endif
   464 #ifdef TARGET_ARCH_zero
   465 # include "assembler_zero.hpp"
   466 #endif
   467 #ifdef TARGET_ARCH_arm
   468 # include "assembler_arm.hpp"
   469 #endif
   470 #ifdef TARGET_ARCH_ppc
   471 # include "assembler_ppc.hpp"
   472 #endif
   473 #ifdef TARGET_ARCH_mips
   474 # include "assembler_mips.hpp"
   475 #endif
   478 #endif // SHARE_VM_ASM_ASSEMBLER_HPP

mercurial