src/cpu/x86/vm/c1_LinearScan_x86.hpp

Tue, 30 Nov 2010 23:23:40 -0800

author
iveresov
date
Tue, 30 Nov 2010 23:23:40 -0800
changeset 2344
ac637b7220d1
parent 2314
f95d63e2154a
child 6876
710a3c8b516e
permissions
-rw-r--r--

6985015: C1 needs to support compressed oops
Summary: This change implements compressed oops for C1 for x64 and sparc. The changes are mostly on the codegen level, with a few exceptions when we do access things outside of the heap that are uncompressed from the IR. Compressed oops are now also enabled with tiered.
Reviewed-by: twisti, kvn, never, phh

     1 /*
     2  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #ifndef CPU_X86_VM_C1_LINEARSCAN_X86_HPP
    26 #define CPU_X86_VM_C1_LINEARSCAN_X86_HPP
    28 inline bool LinearScan::is_processed_reg_num(int reg_num) {
    29 #ifndef _LP64
    30   // rsp and rbp (numbers 6 ancd 7) are ignored
    31   assert(FrameMap::rsp_opr->cpu_regnr() == 6, "wrong assumption below");
    32   assert(FrameMap::rbp_opr->cpu_regnr() == 7, "wrong assumption below");
    33   assert(reg_num >= 0, "invalid reg_num");
    34 #else
    35   // rsp and rbp, r10, r15 (numbers [12,15]) are ignored
    36   // r12 (number 11) is conditional on compressed oops.
    37   assert(FrameMap::r12_opr->cpu_regnr() == 11, "wrong assumption below");
    38   assert(FrameMap::r10_opr->cpu_regnr() == 12, "wrong assumption below");
    39   assert(FrameMap::r15_opr->cpu_regnr() == 13, "wrong assumption below");
    40   assert(FrameMap::rsp_opr->cpu_regnrLo() == 14, "wrong assumption below");
    41   assert(FrameMap::rbp_opr->cpu_regnrLo() == 15, "wrong assumption below");
    42   assert(reg_num >= 0, "invalid reg_num");
    43 #endif // _LP64
    44   return reg_num <= FrameMap::last_cpu_reg() || reg_num >= pd_nof_cpu_regs_frame_map;
    45 }
    47 inline int LinearScan::num_physical_regs(BasicType type) {
    48   // Intel requires two cpu registers for long,
    49   // but requires only one fpu register for double
    50   if (LP64_ONLY(false &&) type == T_LONG) {
    51     return 2;
    52   }
    53   return 1;
    54 }
    57 inline bool LinearScan::requires_adjacent_regs(BasicType type) {
    58   return false;
    59 }
    61 inline bool LinearScan::is_caller_save(int assigned_reg) {
    62   assert(assigned_reg >= 0 && assigned_reg < nof_regs, "should call this only for registers");
    63   return true; // no callee-saved registers on Intel
    65 }
    68 inline void LinearScan::pd_add_temps(LIR_Op* op) {
    69   switch (op->code()) {
    70     case lir_tan:
    71     case lir_sin:
    72     case lir_cos: {
    73       // The slow path for these functions may need to save and
    74       // restore all live registers but we don't want to save and
    75       // restore everything all the time, so mark the xmms as being
    76       // killed.  If the slow path were explicit or we could propagate
    77       // live register masks down to the assembly we could do better
    78       // but we don't have any easy way to do that right now.  We
    79       // could also consider not killing all xmm registers if we
    80       // assume that slow paths are uncommon but it's not clear that
    81       // would be a good idea.
    82       if (UseSSE > 0) {
    83 #ifndef PRODUCT
    84         if (TraceLinearScanLevel >= 2) {
    85           tty->print_cr("killing XMMs for trig");
    86         }
    87 #endif
    88         int op_id = op->id();
    89         for (int xmm = 0; xmm < FrameMap::nof_caller_save_xmm_regs; xmm++) {
    90           LIR_Opr opr = FrameMap::caller_save_xmm_reg_at(xmm);
    91           add_temp(reg_num(opr), op_id, noUse, T_ILLEGAL);
    92         }
    93       }
    94       break;
    95     }
    96   }
    97 }
   100 // Implementation of LinearScanWalker
   102 inline bool LinearScanWalker::pd_init_regs_for_alloc(Interval* cur) {
   103   if (allocator()->gen()->is_vreg_flag_set(cur->reg_num(), LIRGenerator::byte_reg)) {
   104     assert(cur->type() != T_FLOAT && cur->type() != T_DOUBLE, "cpu regs only");
   105     _first_reg = pd_first_byte_reg;
   106     _last_reg = FrameMap::last_byte_reg();
   107     return true;
   108   } else if ((UseSSE >= 1 && cur->type() == T_FLOAT) || (UseSSE >= 2 && cur->type() == T_DOUBLE)) {
   109     _first_reg = pd_first_xmm_reg;
   110     _last_reg = pd_last_xmm_reg;
   111     return true;
   112   }
   114   return false;
   115 }
   118 class FpuStackAllocator VALUE_OBJ_CLASS_SPEC {
   119  private:
   120   Compilation* _compilation;
   121   LinearScan* _allocator;
   123   LIR_OpVisitState visitor;
   125   LIR_List* _lir;
   126   int _pos;
   127   FpuStackSim _sim;
   128   FpuStackSim _temp_sim;
   130   bool _debug_information_computed;
   132   LinearScan*   allocator()                      { return _allocator; }
   133   Compilation*  compilation() const              { return _compilation; }
   135   // unified bailout support
   136   void          bailout(const char* msg) const   { compilation()->bailout(msg); }
   137   bool          bailed_out() const               { return compilation()->bailed_out(); }
   139   int pos() { return _pos; }
   140   void set_pos(int pos) { _pos = pos; }
   141   LIR_Op* cur_op() { return lir()->instructions_list()->at(pos()); }
   142   LIR_List* lir() { return _lir; }
   143   void set_lir(LIR_List* lir) { _lir = lir; }
   144   FpuStackSim* sim() { return &_sim; }
   145   FpuStackSim* temp_sim() { return &_temp_sim; }
   147   int fpu_num(LIR_Opr opr);
   148   int tos_offset(LIR_Opr opr);
   149   LIR_Opr to_fpu_stack_top(LIR_Opr opr, bool dont_check_offset = false);
   151   // Helper functions for handling operations
   152   void insert_op(LIR_Op* op);
   153   void insert_exchange(int offset);
   154   void insert_exchange(LIR_Opr opr);
   155   void insert_free(int offset);
   156   void insert_free_if_dead(LIR_Opr opr);
   157   void insert_free_if_dead(LIR_Opr opr, LIR_Opr ignore);
   158   void insert_copy(LIR_Opr from, LIR_Opr to);
   159   void do_rename(LIR_Opr from, LIR_Opr to);
   160   void do_push(LIR_Opr opr);
   161   void pop_if_last_use(LIR_Op* op, LIR_Opr opr);
   162   void pop_always(LIR_Op* op, LIR_Opr opr);
   163   void clear_fpu_stack(LIR_Opr preserve);
   164   void handle_op1(LIR_Op1* op1);
   165   void handle_op2(LIR_Op2* op2);
   166   void handle_opCall(LIR_OpCall* opCall);
   167   void compute_debug_information(LIR_Op* op);
   168   void allocate_exception_handler(XHandler* xhandler);
   169   void allocate_block(BlockBegin* block);
   171 #ifndef PRODUCT
   172   void check_invalid_lir_op(LIR_Op* op);
   173 #endif
   175   // Helper functions for merging of fpu stacks
   176   void merge_insert_add(LIR_List* instrs, FpuStackSim* cur_sim, int reg);
   177   void merge_insert_xchg(LIR_List* instrs, FpuStackSim* cur_sim, int slot);
   178   void merge_insert_pop(LIR_List* instrs, FpuStackSim* cur_sim);
   179   bool merge_rename(FpuStackSim* cur_sim, FpuStackSim* sux_sim, int start_slot, int change_slot);
   180   void merge_fpu_stack(LIR_List* instrs, FpuStackSim* cur_sim, FpuStackSim* sux_sim);
   181   void merge_cleanup_fpu_stack(LIR_List* instrs, FpuStackSim* cur_sim, BitMap& live_fpu_regs);
   182   bool merge_fpu_stack_with_successors(BlockBegin* block);
   184  public:
   185   LIR_Opr to_fpu_stack(LIR_Opr opr); // used by LinearScan for creation of debug information
   187   FpuStackAllocator(Compilation* compilation, LinearScan* allocator);
   188   void allocate();
   189 };
   191 #endif // CPU_X86_VM_C1_LINEARSCAN_X86_HPP

mercurial