src/cpu/x86/vm/c1_Runtime1_x86.cpp

Thu, 05 Jun 2008 15:57:56 -0700

author
ysr
date
Thu, 05 Jun 2008 15:57:56 -0700
changeset 777
37f87013dfd8
parent 435
a61af66fc99e
child 797
f8199438385b
permissions
-rw-r--r--

6711316: Open source the Garbage-First garbage collector
Summary: First mercurial integration of the code for the Garbage-First garbage collector.
Reviewed-by: apetrusenko, iveresov, jmasa, sgoldman, tonyp, ysr

     1 /*
     2  * Copyright 1999-2007 Sun Microsystems, Inc.  All Rights Reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
    20  * CA 95054 USA or visit www.sun.com if you need additional information or
    21  * have any questions.
    22  *
    23  */
    25 #include "incls/_precompiled.incl"
    26 #include "incls/_c1_Runtime1_x86.cpp.incl"
    29 // Implementation of StubAssembler
    31 int StubAssembler::call_RT(Register oop_result1, Register oop_result2, address entry, int args_size) {
    32   // setup registers
    33   const Register thread = rdi; // is callee-saved register (Visual C++ calling conventions)
    34   assert(!(oop_result1->is_valid() || oop_result2->is_valid()) || oop_result1 != oop_result2, "registers must be different");
    35   assert(oop_result1 != thread && oop_result2 != thread, "registers must be different");
    36   assert(args_size >= 0, "illegal args_size");
    38   set_num_rt_args(1 + args_size);
    40   // push java thread (becomes first argument of C function)
    41   get_thread(thread);
    42   pushl(thread);
    44   set_last_Java_frame(thread, noreg, rbp, NULL);
    45   // do the call
    46   call(RuntimeAddress(entry));
    47   int call_offset = offset();
    48   // verify callee-saved register
    49 #ifdef ASSERT
    50   guarantee(thread != rax, "change this code");
    51   pushl(rax);
    52   { Label L;
    53     get_thread(rax);
    54     cmpl(thread, rax);
    55     jcc(Assembler::equal, L);
    56     int3();
    57     stop("StubAssembler::call_RT: rdi not callee saved?");
    58     bind(L);
    59   }
    60   popl(rax);
    61 #endif
    62   reset_last_Java_frame(thread, true, false);
    64   // discard thread and arguments
    65   addl(rsp, (1 + args_size)*BytesPerWord);
    67   // check for pending exceptions
    68   { Label L;
    69     cmpl(Address(thread, Thread::pending_exception_offset()), NULL_WORD);
    70     jcc(Assembler::equal, L);
    71     // exception pending => remove activation and forward to exception handler
    72     movl(rax, Address(thread, Thread::pending_exception_offset()));
    73     // make sure that the vm_results are cleared
    74     if (oop_result1->is_valid()) {
    75       movl(Address(thread, JavaThread::vm_result_offset()), NULL_WORD);
    76     }
    77     if (oop_result2->is_valid()) {
    78       movl(Address(thread, JavaThread::vm_result_2_offset()), NULL_WORD);
    79     }
    80     if (frame_size() == no_frame_size) {
    81       leave();
    82       jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
    83     } else if (_stub_id == Runtime1::forward_exception_id) {
    84       should_not_reach_here();
    85     } else {
    86       jump(RuntimeAddress(Runtime1::entry_for(Runtime1::forward_exception_id)));
    87     }
    88     bind(L);
    89   }
    90   // get oop results if there are any and reset the values in the thread
    91   if (oop_result1->is_valid()) {
    92     movl(oop_result1, Address(thread, JavaThread::vm_result_offset()));
    93     movl(Address(thread, JavaThread::vm_result_offset()), NULL_WORD);
    94     verify_oop(oop_result1);
    95   }
    96   if (oop_result2->is_valid()) {
    97     movl(oop_result2, Address(thread, JavaThread::vm_result_2_offset()));
    98     movl(Address(thread, JavaThread::vm_result_2_offset()), NULL_WORD);
    99     verify_oop(oop_result2);
   100   }
   101   return call_offset;
   102 }
   105 int StubAssembler::call_RT(Register oop_result1, Register oop_result2, address entry, Register arg1) {
   106   pushl(arg1);
   107   return call_RT(oop_result1, oop_result2, entry, 1);
   108 }
   111 int StubAssembler::call_RT(Register oop_result1, Register oop_result2, address entry, Register arg1, Register arg2) {
   112   pushl(arg2);
   113   pushl(arg1);
   114   return call_RT(oop_result1, oop_result2, entry, 2);
   115 }
   118 int StubAssembler::call_RT(Register oop_result1, Register oop_result2, address entry, Register arg1, Register arg2, Register arg3) {
   119   pushl(arg3);
   120   pushl(arg2);
   121   pushl(arg1);
   122   return call_RT(oop_result1, oop_result2, entry, 3);
   123 }
   126 // Implementation of StubFrame
   128 class StubFrame: public StackObj {
   129  private:
   130   StubAssembler* _sasm;
   132  public:
   133   StubFrame(StubAssembler* sasm, const char* name, bool must_gc_arguments);
   134   void load_argument(int offset_in_words, Register reg);
   136   ~StubFrame();
   137 };
   140 #define __ _sasm->
   142 StubFrame::StubFrame(StubAssembler* sasm, const char* name, bool must_gc_arguments) {
   143   _sasm = sasm;
   144   __ set_info(name, must_gc_arguments);
   145   __ enter();
   146 }
   148 // load parameters that were stored with LIR_Assembler::store_parameter
   149 // Note: offsets for store_parameter and load_argument must match
   150 void StubFrame::load_argument(int offset_in_words, Register reg) {
   151   // rbp, + 0: link
   152   //     + 1: return address
   153   //     + 2: argument with offset 0
   154   //     + 3: argument with offset 1
   155   //     + 4: ...
   157   __ movl(reg, Address(rbp, (offset_in_words + 2) * BytesPerWord));
   158 }
   161 StubFrame::~StubFrame() {
   162   __ leave();
   163   __ ret(0);
   164 }
   166 #undef __
   169 // Implementation of Runtime1
   171 #define __ sasm->
   173 const int float_regs_as_doubles_size_in_words = 16;
   174 const int xmm_regs_as_doubles_size_in_words = 16;
   176 // Stack layout for saving/restoring  all the registers needed during a runtime
   177 // call (this includes deoptimization)
   178 // Note: note that users of this frame may well have arguments to some runtime
   179 // while these values are on the stack. These positions neglect those arguments
   180 // but the code in save_live_registers will take the argument count into
   181 // account.
   182 //
   183 enum reg_save_layout {
   184   dummy1,
   185   dummy2,
   186   // Two temps to be used as needed by users of save/restore callee registers
   187   temp_2_off,
   188   temp_1_off,
   189   xmm_regs_as_doubles_off,
   190   float_regs_as_doubles_off = xmm_regs_as_doubles_off + xmm_regs_as_doubles_size_in_words,
   191   fpu_state_off = float_regs_as_doubles_off + float_regs_as_doubles_size_in_words,
   192   fpu_state_end_off = fpu_state_off + FPUStateSizeInWords,
   193   marker = fpu_state_end_off,
   194   extra_space_offset,
   195   rdi_off = extra_space_offset,
   196   rsi_off,
   197   rbp_off,
   198   rsp_off,
   199   rbx_off,
   200   rdx_off,
   201   rcx_off,
   202   rax_off,
   203   saved_rbp_off,
   204   return_off,
   205   reg_save_frame_size,  // As noted: neglects any parameters to runtime
   207   // equates
   209   // illegal instruction handler
   210   continue_dest_off = temp_1_off,
   212   // deoptimization equates
   213   fp0_off = float_regs_as_doubles_off, // slot for java float/double return value
   214   xmm0_off = xmm_regs_as_doubles_off,  // slot for java float/double return value
   215   deopt_type = temp_2_off,             // slot for type of deopt in progress
   216   ret_type = temp_1_off                // slot for return type
   217 };
   221 // Save off registers which might be killed by calls into the runtime.
   222 // Tries to smart of about FP registers.  In particular we separate
   223 // saving and describing the FPU registers for deoptimization since we
   224 // have to save the FPU registers twice if we describe them and on P4
   225 // saving FPU registers which don't contain anything appears
   226 // expensive.  The deopt blob is the only thing which needs to
   227 // describe FPU registers.  In all other cases it should be sufficient
   228 // to simply save their current value.
   230 static OopMap* generate_oop_map(StubAssembler* sasm, int num_rt_args,
   231                                 bool save_fpu_registers = true) {
   232   int frame_size = reg_save_frame_size + num_rt_args; // args + thread
   233   sasm->set_frame_size(frame_size);
   235   // record saved value locations in an OopMap
   236   // locations are offsets from sp after runtime call; num_rt_args is number of arguments in call, including thread
   237   OopMap* map = new OopMap(frame_size, 0);
   238   map->set_callee_saved(VMRegImpl::stack2reg(rax_off + num_rt_args), rax->as_VMReg());
   239   map->set_callee_saved(VMRegImpl::stack2reg(rcx_off + num_rt_args), rcx->as_VMReg());
   240   map->set_callee_saved(VMRegImpl::stack2reg(rdx_off + num_rt_args), rdx->as_VMReg());
   241   map->set_callee_saved(VMRegImpl::stack2reg(rbx_off + num_rt_args), rbx->as_VMReg());
   242   map->set_callee_saved(VMRegImpl::stack2reg(rsi_off + num_rt_args), rsi->as_VMReg());
   243   map->set_callee_saved(VMRegImpl::stack2reg(rdi_off + num_rt_args), rdi->as_VMReg());
   245   if (save_fpu_registers) {
   246     if (UseSSE < 2) {
   247       int fpu_off = float_regs_as_doubles_off;
   248       for (int n = 0; n < FrameMap::nof_fpu_regs; n++) {
   249         VMReg fpu_name_0 = FrameMap::fpu_regname(n);
   250         map->set_callee_saved(VMRegImpl::stack2reg(fpu_off +     num_rt_args), fpu_name_0);
   251         // %%% This is really a waste but we'll keep things as they were for now
   252         if (true) {
   253           map->set_callee_saved(VMRegImpl::stack2reg(fpu_off + 1 + num_rt_args), fpu_name_0->next());
   254         }
   255         fpu_off += 2;
   256       }
   257       assert(fpu_off == fpu_state_off, "incorrect number of fpu stack slots");
   258     }
   260     if (UseSSE >= 2) {
   261       int xmm_off = xmm_regs_as_doubles_off;
   262       for (int n = 0; n < FrameMap::nof_xmm_regs; n++) {
   263         VMReg xmm_name_0 = as_XMMRegister(n)->as_VMReg();
   264         map->set_callee_saved(VMRegImpl::stack2reg(xmm_off +     num_rt_args), xmm_name_0);
   265         // %%% This is really a waste but we'll keep things as they were for now
   266         if (true) {
   267           map->set_callee_saved(VMRegImpl::stack2reg(xmm_off + 1 + num_rt_args), xmm_name_0->next());
   268         }
   269         xmm_off += 2;
   270       }
   271       assert(xmm_off == float_regs_as_doubles_off, "incorrect number of xmm registers");
   273     } else if (UseSSE == 1) {
   274       int xmm_off = xmm_regs_as_doubles_off;
   275       for (int n = 0; n < FrameMap::nof_xmm_regs; n++) {
   276         VMReg xmm_name_0 = as_XMMRegister(n)->as_VMReg();
   277         map->set_callee_saved(VMRegImpl::stack2reg(xmm_off +     num_rt_args), xmm_name_0);
   278         xmm_off += 2;
   279       }
   280       assert(xmm_off == float_regs_as_doubles_off, "incorrect number of xmm registers");
   281     }
   282   }
   284   return map;
   285 }
   287 static OopMap* save_live_registers(StubAssembler* sasm, int num_rt_args,
   288                                    bool save_fpu_registers = true) {
   289   __ block_comment("save_live_registers");
   291   int frame_size = reg_save_frame_size + num_rt_args; // args + thread
   292   // frame_size = round_to(frame_size, 4);
   293   sasm->set_frame_size(frame_size);
   295   __ pushad();         // integer registers
   297   // assert(float_regs_as_doubles_off % 2 == 0, "misaligned offset");
   298   // assert(xmm_regs_as_doubles_off % 2 == 0, "misaligned offset");
   300   __ subl(rsp, extra_space_offset * wordSize);
   302 #ifdef ASSERT
   303   __ movl(Address(rsp, marker * wordSize), 0xfeedbeef);
   304 #endif
   306   if (save_fpu_registers) {
   307     if (UseSSE < 2) {
   308       // save FPU stack
   309       __ fnsave(Address(rsp, fpu_state_off * wordSize));
   310       __ fwait();
   312 #ifdef ASSERT
   313       Label ok;
   314       __ cmpw(Address(rsp, fpu_state_off * wordSize), StubRoutines::fpu_cntrl_wrd_std());
   315       __ jccb(Assembler::equal, ok);
   316       __ stop("corrupted control word detected");
   317       __ bind(ok);
   318 #endif
   320       // Reset the control word to guard against exceptions being unmasked
   321       // since fstp_d can cause FPU stack underflow exceptions.  Write it
   322       // into the on stack copy and then reload that to make sure that the
   323       // current and future values are correct.
   324       __ movw(Address(rsp, fpu_state_off * wordSize), StubRoutines::fpu_cntrl_wrd_std());
   325       __ frstor(Address(rsp, fpu_state_off * wordSize));
   327       // Save the FPU registers in de-opt-able form
   328       __ fstp_d(Address(rsp, float_regs_as_doubles_off * BytesPerWord +  0));
   329       __ fstp_d(Address(rsp, float_regs_as_doubles_off * BytesPerWord +  8));
   330       __ fstp_d(Address(rsp, float_regs_as_doubles_off * BytesPerWord + 16));
   331       __ fstp_d(Address(rsp, float_regs_as_doubles_off * BytesPerWord + 24));
   332       __ fstp_d(Address(rsp, float_regs_as_doubles_off * BytesPerWord + 32));
   333       __ fstp_d(Address(rsp, float_regs_as_doubles_off * BytesPerWord + 40));
   334       __ fstp_d(Address(rsp, float_regs_as_doubles_off * BytesPerWord + 48));
   335       __ fstp_d(Address(rsp, float_regs_as_doubles_off * BytesPerWord + 56));
   336     }
   338     if (UseSSE >= 2) {
   339       // save XMM registers
   340       // XMM registers can contain float or double values, but this is not known here,
   341       // so always save them as doubles.
   342       // note that float values are _not_ converted automatically, so for float values
   343       // the second word contains only garbage data.
   344       __ movdbl(Address(rsp, xmm_regs_as_doubles_off * wordSize +  0), xmm0);
   345       __ movdbl(Address(rsp, xmm_regs_as_doubles_off * wordSize +  8), xmm1);
   346       __ movdbl(Address(rsp, xmm_regs_as_doubles_off * wordSize + 16), xmm2);
   347       __ movdbl(Address(rsp, xmm_regs_as_doubles_off * wordSize + 24), xmm3);
   348       __ movdbl(Address(rsp, xmm_regs_as_doubles_off * wordSize + 32), xmm4);
   349       __ movdbl(Address(rsp, xmm_regs_as_doubles_off * wordSize + 40), xmm5);
   350       __ movdbl(Address(rsp, xmm_regs_as_doubles_off * wordSize + 48), xmm6);
   351       __ movdbl(Address(rsp, xmm_regs_as_doubles_off * wordSize + 56), xmm7);
   352     } else if (UseSSE == 1) {
   353       // save XMM registers as float because double not supported without SSE2
   354       __ movflt(Address(rsp, xmm_regs_as_doubles_off * wordSize +  0), xmm0);
   355       __ movflt(Address(rsp, xmm_regs_as_doubles_off * wordSize +  8), xmm1);
   356       __ movflt(Address(rsp, xmm_regs_as_doubles_off * wordSize + 16), xmm2);
   357       __ movflt(Address(rsp, xmm_regs_as_doubles_off * wordSize + 24), xmm3);
   358       __ movflt(Address(rsp, xmm_regs_as_doubles_off * wordSize + 32), xmm4);
   359       __ movflt(Address(rsp, xmm_regs_as_doubles_off * wordSize + 40), xmm5);
   360       __ movflt(Address(rsp, xmm_regs_as_doubles_off * wordSize + 48), xmm6);
   361       __ movflt(Address(rsp, xmm_regs_as_doubles_off * wordSize + 56), xmm7);
   362     }
   363   }
   365   // FPU stack must be empty now
   366   __ verify_FPU(0, "save_live_registers");
   368   return generate_oop_map(sasm, num_rt_args, save_fpu_registers);
   369 }
   372 static void restore_fpu(StubAssembler* sasm, bool restore_fpu_registers = true) {
   373   if (restore_fpu_registers) {
   374     if (UseSSE >= 2) {
   375       // restore XMM registers
   376       __ movdbl(xmm0, Address(rsp, xmm_regs_as_doubles_off * wordSize +  0));
   377       __ movdbl(xmm1, Address(rsp, xmm_regs_as_doubles_off * wordSize +  8));
   378       __ movdbl(xmm2, Address(rsp, xmm_regs_as_doubles_off * wordSize + 16));
   379       __ movdbl(xmm3, Address(rsp, xmm_regs_as_doubles_off * wordSize + 24));
   380       __ movdbl(xmm4, Address(rsp, xmm_regs_as_doubles_off * wordSize + 32));
   381       __ movdbl(xmm5, Address(rsp, xmm_regs_as_doubles_off * wordSize + 40));
   382       __ movdbl(xmm6, Address(rsp, xmm_regs_as_doubles_off * wordSize + 48));
   383       __ movdbl(xmm7, Address(rsp, xmm_regs_as_doubles_off * wordSize + 56));
   384     } else if (UseSSE == 1) {
   385       // restore XMM registers
   386       __ movflt(xmm0, Address(rsp, xmm_regs_as_doubles_off * wordSize +  0));
   387       __ movflt(xmm1, Address(rsp, xmm_regs_as_doubles_off * wordSize +  8));
   388       __ movflt(xmm2, Address(rsp, xmm_regs_as_doubles_off * wordSize + 16));
   389       __ movflt(xmm3, Address(rsp, xmm_regs_as_doubles_off * wordSize + 24));
   390       __ movflt(xmm4, Address(rsp, xmm_regs_as_doubles_off * wordSize + 32));
   391       __ movflt(xmm5, Address(rsp, xmm_regs_as_doubles_off * wordSize + 40));
   392       __ movflt(xmm6, Address(rsp, xmm_regs_as_doubles_off * wordSize + 48));
   393       __ movflt(xmm7, Address(rsp, xmm_regs_as_doubles_off * wordSize + 56));
   394     }
   396     if (UseSSE < 2) {
   397       __ frstor(Address(rsp, fpu_state_off * wordSize));
   398     } else {
   399       // check that FPU stack is really empty
   400       __ verify_FPU(0, "restore_live_registers");
   401     }
   403   } else {
   404     // check that FPU stack is really empty
   405     __ verify_FPU(0, "restore_live_registers");
   406   }
   408 #ifdef ASSERT
   409   {
   410     Label ok;
   411     __ cmpl(Address(rsp, marker * wordSize), 0xfeedbeef);
   412     __ jcc(Assembler::equal, ok);
   413     __ stop("bad offsets in frame");
   414     __ bind(ok);
   415   }
   416 #endif
   418   __ addl(rsp, extra_space_offset * wordSize);
   419 }
   422 static void restore_live_registers(StubAssembler* sasm, bool restore_fpu_registers = true) {
   423   __ block_comment("restore_live_registers");
   425   restore_fpu(sasm, restore_fpu_registers);
   426   __ popad();
   427 }
   430 static void restore_live_registers_except_rax(StubAssembler* sasm, bool restore_fpu_registers = true) {
   431   __ block_comment("restore_live_registers_except_rax");
   433   restore_fpu(sasm, restore_fpu_registers);
   435   __ popl(rdi);
   436   __ popl(rsi);
   437   __ popl(rbp);
   438   __ popl(rbx); // skip this value
   439   __ popl(rbx);
   440   __ popl(rdx);
   441   __ popl(rcx);
   442   __ addl(rsp, 4);
   443 }
   446 void Runtime1::initialize_pd() {
   447   // nothing to do
   448 }
   451 // target: the entry point of the method that creates and posts the exception oop
   452 // has_argument: true if the exception needs an argument (passed on stack because registers must be preserved)
   454 OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address target, bool has_argument) {
   455   // preserve all registers
   456   int num_rt_args = has_argument ? 2 : 1;
   457   OopMap* oop_map = save_live_registers(sasm, num_rt_args);
   459   // now all registers are saved and can be used freely
   460   // verify that no old value is used accidentally
   461   __ invalidate_registers(true, true, true, true, true, true);
   463   // registers used by this stub
   464   const Register temp_reg = rbx;
   466   // load argument for exception that is passed as an argument into the stub
   467   if (has_argument) {
   468     __ movl(temp_reg, Address(rbp, 2*BytesPerWord));
   469     __ pushl(temp_reg);
   470   }
   472   int call_offset = __ call_RT(noreg, noreg, target, num_rt_args - 1);
   474   OopMapSet* oop_maps = new OopMapSet();
   475   oop_maps->add_gc_map(call_offset, oop_map);
   477   __ stop("should not reach here");
   479   return oop_maps;
   480 }
   483 void Runtime1::generate_handle_exception(StubAssembler *sasm, OopMapSet* oop_maps, OopMap* oop_map, bool save_fpu_registers) {
   484   // incoming parameters
   485   const Register exception_oop = rax;
   486   const Register exception_pc = rdx;
   487   // other registers used in this stub
   488   const Register real_return_addr = rbx;
   489   const Register thread = rdi;
   491   __ block_comment("generate_handle_exception");
   493 #ifdef TIERED
   494   // C2 can leave the fpu stack dirty
   495   if (UseSSE < 2 ) {
   496     __ empty_FPU_stack();
   497   }
   498 #endif // TIERED
   500   // verify that only rax, and rdx is valid at this time
   501   __ invalidate_registers(false, true, true, false, true, true);
   502   // verify that rax, contains a valid exception
   503   __ verify_not_null_oop(exception_oop);
   505   // load address of JavaThread object for thread-local data
   506   __ get_thread(thread);
   508 #ifdef ASSERT
   509   // check that fields in JavaThread for exception oop and issuing pc are
   510   // empty before writing to them
   511   Label oop_empty;
   512   __ cmpl(Address(thread, JavaThread::exception_oop_offset()), 0);
   513   __ jcc(Assembler::equal, oop_empty);
   514   __ stop("exception oop already set");
   515   __ bind(oop_empty);
   517   Label pc_empty;
   518   __ cmpl(Address(thread, JavaThread::exception_pc_offset()), 0);
   519   __ jcc(Assembler::equal, pc_empty);
   520   __ stop("exception pc already set");
   521   __ bind(pc_empty);
   522 #endif
   524   // save exception oop and issuing pc into JavaThread
   525   // (exception handler will load it from here)
   526   __ movl(Address(thread, JavaThread::exception_oop_offset()), exception_oop);
   527   __ movl(Address(thread, JavaThread::exception_pc_offset()), exception_pc);
   529   // save real return address (pc that called this stub)
   530   __ movl(real_return_addr, Address(rbp, 1*BytesPerWord));
   531   __ movl(Address(rsp, temp_1_off * BytesPerWord), real_return_addr);
   533   // patch throwing pc into return address (has bci & oop map)
   534   __ movl(Address(rbp, 1*BytesPerWord), exception_pc);
   536   // compute the exception handler.
   537   // the exception oop and the throwing pc are read from the fields in JavaThread
   538   int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, exception_handler_for_pc));
   539   oop_maps->add_gc_map(call_offset, oop_map);
   541   // rax,: handler address or NULL if no handler exists
   542   //      will be the deopt blob if nmethod was deoptimized while we looked up
   543   //      handler regardless of whether handler existed in the nmethod.
   545   // only rax, is valid at this time, all other registers have been destroyed by the runtime call
   546   __ invalidate_registers(false, true, true, true, true, true);
   548   // Do we have an exception handler in the nmethod?
   549   Label no_handler;
   550   Label done;
   551   __ testl(rax, rax);
   552   __ jcc(Assembler::zero, no_handler);
   554   // exception handler found
   555   // patch the return address -> the stub will directly return to the exception handler
   556   __ movl(Address(rbp, 1*BytesPerWord), rax);
   558   // restore registers
   559   restore_live_registers(sasm, save_fpu_registers);
   561   // return to exception handler
   562   __ leave();
   563   __ ret(0);
   565   __ bind(no_handler);
   566   // no exception handler found in this method, so the exception is
   567   // forwarded to the caller (using the unwind code of the nmethod)
   568   // there is no need to restore the registers
   570   // restore the real return address that was saved before the RT-call
   571   __ movl(real_return_addr, Address(rsp, temp_1_off * BytesPerWord));
   572   __ movl(Address(rbp, 1*BytesPerWord), real_return_addr);
   574   // load address of JavaThread object for thread-local data
   575   __ get_thread(thread);
   576   // restore exception oop into rax, (convention for unwind code)
   577   __ movl(exception_oop, Address(thread, JavaThread::exception_oop_offset()));
   579   // clear exception fields in JavaThread because they are no longer needed
   580   // (fields must be cleared because they are processed by GC otherwise)
   581   __ movl(Address(thread, JavaThread::exception_oop_offset()), NULL_WORD);
   582   __ movl(Address(thread, JavaThread::exception_pc_offset()), NULL_WORD);
   584   // pop the stub frame off
   585   __ leave();
   587   generate_unwind_exception(sasm);
   588   __ stop("should not reach here");
   589 }
   592 void Runtime1::generate_unwind_exception(StubAssembler *sasm) {
   593   // incoming parameters
   594   const Register exception_oop = rax;
   595   // other registers used in this stub
   596   const Register exception_pc = rdx;
   597   const Register handler_addr = rbx;
   598   const Register thread = rdi;
   600   // verify that only rax, is valid at this time
   601   __ invalidate_registers(false, true, true, true, true, true);
   603 #ifdef ASSERT
   604   // check that fields in JavaThread for exception oop and issuing pc are empty
   605   __ get_thread(thread);
   606   Label oop_empty;
   607   __ cmpl(Address(thread, JavaThread::exception_oop_offset()), 0);
   608   __ jcc(Assembler::equal, oop_empty);
   609   __ stop("exception oop must be empty");
   610   __ bind(oop_empty);
   612   Label pc_empty;
   613   __ cmpl(Address(thread, JavaThread::exception_pc_offset()), 0);
   614   __ jcc(Assembler::equal, pc_empty);
   615   __ stop("exception pc must be empty");
   616   __ bind(pc_empty);
   617 #endif
   619   // clear the FPU stack in case any FPU results are left behind
   620   __ empty_FPU_stack();
   622   // leave activation of nmethod
   623   __ leave();
   624   // store return address (is on top of stack after leave)
   625   __ movl(exception_pc, Address(rsp, 0));
   627   __ verify_oop(exception_oop);
   629   // save exception oop from rax, to stack before call
   630   __ pushl(exception_oop);
   632   // search the exception handler address of the caller (using the return address)
   633   __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), exception_pc);
   634   // rax,: exception handler address of the caller
   636   // only rax, is valid at this time, all other registers have been destroyed by the call
   637   __ invalidate_registers(false, true, true, true, true, true);
   639   // move result of call into correct register
   640   __ movl(handler_addr, rax);
   642   // restore exception oop in rax, (required convention of exception handler)
   643   __ popl(exception_oop);
   645   __ verify_oop(exception_oop);
   647   // get throwing pc (= return address).
   648   // rdx has been destroyed by the call, so it must be set again
   649   // the pop is also necessary to simulate the effect of a ret(0)
   650   __ popl(exception_pc);
   652   // verify that that there is really a valid exception in rax,
   653   __ verify_not_null_oop(exception_oop);
   655   // continue at exception handler (return address removed)
   656   // note: do *not* remove arguments when unwinding the
   657   //       activation since the caller assumes having
   658   //       all arguments on the stack when entering the
   659   //       runtime to determine the exception handler
   660   //       (GC happens at call site with arguments!)
   661   // rax,: exception oop
   662   // rdx: throwing pc
   663   // rbx,: exception handler
   664   __ jmp(handler_addr);
   665 }
   668 OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) {
   669   // use the maximum number of runtime-arguments here because it is difficult to
   670   // distinguish each RT-Call.
   671   // Note: This number affects also the RT-Call in generate_handle_exception because
   672   //       the oop-map is shared for all calls.
   673   const int num_rt_args = 2;  // thread + dummy
   675   DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
   676   assert(deopt_blob != NULL, "deoptimization blob must have been created");
   678   OopMap* oop_map = save_live_registers(sasm, num_rt_args);
   680   __ pushl(rax); // push dummy
   682   const Register thread = rdi; // is callee-saved register (Visual C++ calling conventions)
   683   // push java thread (becomes first argument of C function)
   684   __ get_thread(thread);
   685   __ pushl(thread);
   686   __ set_last_Java_frame(thread, noreg, rbp, NULL);
   687   // do the call
   688   __ call(RuntimeAddress(target));
   689   OopMapSet* oop_maps = new OopMapSet();
   690   oop_maps->add_gc_map(__ offset(), oop_map);
   691   // verify callee-saved register
   692 #ifdef ASSERT
   693   guarantee(thread != rax, "change this code");
   694   __ pushl(rax);
   695   { Label L;
   696     __ get_thread(rax);
   697     __ cmpl(thread, rax);
   698     __ jcc(Assembler::equal, L);
   699     __ stop("StubAssembler::call_RT: rdi not callee saved?");
   700     __ bind(L);
   701   }
   702   __ popl(rax);
   703 #endif
   704   __ reset_last_Java_frame(thread, true, false);
   705   __ popl(rcx); // discard thread arg
   706   __ popl(rcx); // discard dummy
   708   // check for pending exceptions
   709   { Label L;
   710     __ cmpl(Address(thread, Thread::pending_exception_offset()), NULL_WORD);
   711     __ jcc(Assembler::equal, L);
   712     // exception pending => remove activation and forward to exception handler
   714     __ testl(rax, rax);                                   // have we deoptimized?
   715     __ jump_cc(Assembler::equal,
   716                RuntimeAddress(Runtime1::entry_for(Runtime1::forward_exception_id)));
   718     // the deopt blob expects exceptions in the special fields of
   719     // JavaThread, so copy and clear pending exception.
   721     // load and clear pending exception
   722     __ movl(rax, Address(thread, Thread::pending_exception_offset()));
   723     __ movl(Address(thread, Thread::pending_exception_offset()), NULL_WORD);
   725     // check that there is really a valid exception
   726     __ verify_not_null_oop(rax);
   728     // load throwing pc: this is the return address of the stub
   729     __ movl(rdx, Address(rsp, return_off * BytesPerWord));
   731 #ifdef ASSERT
   732     // check that fields in JavaThread for exception oop and issuing pc are empty
   733     Label oop_empty;
   734     __ cmpoop(Address(thread, JavaThread::exception_oop_offset()), 0);
   735     __ jcc(Assembler::equal, oop_empty);
   736     __ stop("exception oop must be empty");
   737     __ bind(oop_empty);
   739     Label pc_empty;
   740     __ cmpl(Address(thread, JavaThread::exception_pc_offset()), 0);
   741     __ jcc(Assembler::equal, pc_empty);
   742     __ stop("exception pc must be empty");
   743     __ bind(pc_empty);
   744 #endif
   746     // store exception oop and throwing pc to JavaThread
   747     __ movl(Address(thread, JavaThread::exception_oop_offset()), rax);
   748     __ movl(Address(thread, JavaThread::exception_pc_offset()), rdx);
   750     restore_live_registers(sasm);
   752     __ leave();
   753     __ addl(rsp, 4);  // remove return address from stack
   755     // Forward the exception directly to deopt blob. We can blow no
   756     // registers and must leave throwing pc on the stack.  A patch may
   757     // have values live in registers so the entry point with the
   758     // exception in tls.
   759     __ jump(RuntimeAddress(deopt_blob->unpack_with_exception_in_tls()));
   761     __ bind(L);
   762   }
   765   // Runtime will return true if the nmethod has been deoptimized during
   766   // the patching process. In that case we must do a deopt reexecute instead.
   768   Label reexecuteEntry, cont;
   770   __ testl(rax, rax);                                   // have we deoptimized?
   771   __ jcc(Assembler::equal, cont);                       // no
   773   // Will reexecute. Proper return address is already on the stack we just restore
   774   // registers, pop all of our frame but the return address and jump to the deopt blob
   775   restore_live_registers(sasm);
   776   __ leave();
   777   __ jump(RuntimeAddress(deopt_blob->unpack_with_reexecution()));
   779   __ bind(cont);
   780   restore_live_registers(sasm);
   781   __ leave();
   782   __ ret(0);
   784   return oop_maps;
   786 }
   789 OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
   791   // for better readability
   792   const bool must_gc_arguments = true;
   793   const bool dont_gc_arguments = false;
   795   // default value; overwritten for some optimized stubs that are called from methods that do not use the fpu
   796   bool save_fpu_registers = true;
   798   // stub code & info for the different stubs
   799   OopMapSet* oop_maps = NULL;
   800   switch (id) {
   801     case forward_exception_id:
   802       {
   803         // we're handling an exception in the context of a compiled
   804         // frame.  The registers have been saved in the standard
   805         // places.  Perform an exception lookup in the caller and
   806         // dispatch to the handler if found.  Otherwise unwind and
   807         // dispatch to the callers exception handler.
   809         const Register thread = rdi;
   810         const Register exception_oop = rax;
   811         const Register exception_pc = rdx;
   813         // load pending exception oop into rax,
   814         __ movl(exception_oop, Address(thread, Thread::pending_exception_offset()));
   815         // clear pending exception
   816         __ movl(Address(thread, Thread::pending_exception_offset()), NULL_WORD);
   818         // load issuing PC (the return address for this stub) into rdx
   819         __ movl(exception_pc, Address(rbp, 1*BytesPerWord));
   821         // make sure that the vm_results are cleared (may be unnecessary)
   822         __ movl(Address(thread, JavaThread::vm_result_offset()), NULL_WORD);
   823         __ movl(Address(thread, JavaThread::vm_result_2_offset()), NULL_WORD);
   825         // verify that that there is really a valid exception in rax,
   826         __ verify_not_null_oop(exception_oop);
   829         oop_maps = new OopMapSet();
   830         OopMap* oop_map = generate_oop_map(sasm, 1);
   831         generate_handle_exception(sasm, oop_maps, oop_map);
   832         __ stop("should not reach here");
   833       }
   834       break;
   836     case new_instance_id:
   837     case fast_new_instance_id:
   838     case fast_new_instance_init_check_id:
   839       {
   840         Register klass = rdx; // Incoming
   841         Register obj   = rax; // Result
   843         if (id == new_instance_id) {
   844           __ set_info("new_instance", dont_gc_arguments);
   845         } else if (id == fast_new_instance_id) {
   846           __ set_info("fast new_instance", dont_gc_arguments);
   847         } else {
   848           assert(id == fast_new_instance_init_check_id, "bad StubID");
   849           __ set_info("fast new_instance init check", dont_gc_arguments);
   850         }
   852         if ((id == fast_new_instance_id || id == fast_new_instance_init_check_id) &&
   853             UseTLAB && FastTLABRefill) {
   854           Label slow_path;
   855           Register obj_size = rcx;
   856           Register t1       = rbx;
   857           Register t2       = rsi;
   858           assert_different_registers(klass, obj, obj_size, t1, t2);
   860           __ pushl(rdi);
   861           __ pushl(rbx);
   863           if (id == fast_new_instance_init_check_id) {
   864             // make sure the klass is initialized
   865             __ cmpl(Address(klass, instanceKlass::init_state_offset_in_bytes() + sizeof(oopDesc)), instanceKlass::fully_initialized);
   866             __ jcc(Assembler::notEqual, slow_path);
   867           }
   869 #ifdef ASSERT
   870           // assert object can be fast path allocated
   871           {
   872             Label ok, not_ok;
   873             __ movl(obj_size, Address(klass, Klass::layout_helper_offset_in_bytes() + sizeof(oopDesc)));
   874             __ cmpl(obj_size, 0);  // make sure it's an instance (LH > 0)
   875             __ jcc(Assembler::lessEqual, not_ok);
   876             __ testl(obj_size, Klass::_lh_instance_slow_path_bit);
   877             __ jcc(Assembler::zero, ok);
   878             __ bind(not_ok);
   879             __ stop("assert(can be fast path allocated)");
   880             __ should_not_reach_here();
   881             __ bind(ok);
   882           }
   883 #endif // ASSERT
   885           // if we got here then the TLAB allocation failed, so try
   886           // refilling the TLAB or allocating directly from eden.
   887           Label retry_tlab, try_eden;
   888           __ tlab_refill(retry_tlab, try_eden, slow_path); // does not destroy rdx (klass)
   890           __ bind(retry_tlab);
   892           // get the instance size
   893           __ movl(obj_size, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes()));
   894           __ tlab_allocate(obj, obj_size, 0, t1, t2, slow_path);
   895           __ initialize_object(obj, klass, obj_size, 0, t1, t2);
   896           __ verify_oop(obj);
   897           __ popl(rbx);
   898           __ popl(rdi);
   899           __ ret(0);
   901           __ bind(try_eden);
   902           // get the instance size
   903           __ movl(obj_size, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes()));
   904           __ eden_allocate(obj, obj_size, 0, t1, slow_path);
   905           __ initialize_object(obj, klass, obj_size, 0, t1, t2);
   906           __ verify_oop(obj);
   907           __ popl(rbx);
   908           __ popl(rdi);
   909           __ ret(0);
   911           __ bind(slow_path);
   912           __ popl(rbx);
   913           __ popl(rdi);
   914         }
   916         __ enter();
   917         OopMap* map = save_live_registers(sasm, 2);
   918         int call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_instance), klass);
   919         oop_maps = new OopMapSet();
   920         oop_maps->add_gc_map(call_offset, map);
   921         restore_live_registers_except_rax(sasm);
   922         __ verify_oop(obj);
   923         __ leave();
   924         __ ret(0);
   926         // rax,: new instance
   927       }
   929       break;
   931 #ifdef TIERED
   932     case counter_overflow_id:
   933       {
   934         Register bci = rax;
   935         __ enter();
   936         OopMap* map = save_live_registers(sasm, 2);
   937         // Retrieve bci
   938         __ movl(bci, Address(rbp, 2*BytesPerWord));
   939         int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, counter_overflow), bci);
   940         oop_maps = new OopMapSet();
   941         oop_maps->add_gc_map(call_offset, map);
   942         restore_live_registers(sasm);
   943         __ leave();
   944         __ ret(0);
   945       }
   946       break;
   947 #endif // TIERED
   949     case new_type_array_id:
   950     case new_object_array_id:
   951       {
   952         Register length   = rbx; // Incoming
   953         Register klass    = rdx; // Incoming
   954         Register obj      = rax; // Result
   956         if (id == new_type_array_id) {
   957           __ set_info("new_type_array", dont_gc_arguments);
   958         } else {
   959           __ set_info("new_object_array", dont_gc_arguments);
   960         }
   962 #ifdef ASSERT
   963         // assert object type is really an array of the proper kind
   964         {
   965           Label ok;
   966           Register t0 = obj;
   967           __ movl(t0, Address(klass, Klass::layout_helper_offset_in_bytes() + sizeof(oopDesc)));
   968           __ sarl(t0, Klass::_lh_array_tag_shift);
   969           int tag = ((id == new_type_array_id)
   970                      ? Klass::_lh_array_tag_type_value
   971                      : Klass::_lh_array_tag_obj_value);
   972           __ cmpl(t0, tag);
   973           __ jcc(Assembler::equal, ok);
   974           __ stop("assert(is an array klass)");
   975           __ should_not_reach_here();
   976           __ bind(ok);
   977         }
   978 #endif // ASSERT
   980         if (UseTLAB && FastTLABRefill) {
   981           Register arr_size = rsi;
   982           Register t1       = rcx;  // must be rcx for use as shift count
   983           Register t2       = rdi;
   984           Label slow_path;
   985           assert_different_registers(length, klass, obj, arr_size, t1, t2);
   987           // check that array length is small enough for fast path.
   988           __ cmpl(length, C1_MacroAssembler::max_array_allocation_length);
   989           __ jcc(Assembler::above, slow_path);
   991           // if we got here then the TLAB allocation failed, so try
   992           // refilling the TLAB or allocating directly from eden.
   993           Label retry_tlab, try_eden;
   994           __ tlab_refill(retry_tlab, try_eden, slow_path); // preserves rbx, & rdx
   996           __ bind(retry_tlab);
   998           // get the allocation size: round_up(hdr + length << (layout_helper & 0x1F))
   999           __ movl(t1, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes()));
  1000           __ movl(arr_size, length);
  1001           assert(t1 == rcx, "fixed register usage");
  1002           __ shll(arr_size /* by t1=rcx, mod 32 */);
  1003           __ shrl(t1, Klass::_lh_header_size_shift);
  1004           __ andl(t1, Klass::_lh_header_size_mask);
  1005           __ addl(arr_size, t1);
  1006           __ addl(arr_size, MinObjAlignmentInBytesMask); // align up
  1007           __ andl(arr_size, ~MinObjAlignmentInBytesMask);
  1009           __ tlab_allocate(obj, arr_size, 0, t1, t2, slow_path);  // preserves arr_size
  1011           __ initialize_header(obj, klass, length, t1, t2);
  1012           __ movb(t1, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes() + (Klass::_lh_header_size_shift / BitsPerByte)));
  1013           assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise");
  1014           assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise");
  1015           __ andl(t1, Klass::_lh_header_size_mask);
  1016           __ subl(arr_size, t1);  // body length
  1017           __ addl(t1, obj);       // body start
  1018           __ initialize_body(t1, arr_size, 0, t2);
  1019           __ verify_oop(obj);
  1020           __ ret(0);
  1022           __ bind(try_eden);
  1023           // get the allocation size: round_up(hdr + length << (layout_helper & 0x1F))
  1024           __ movl(t1, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes()));
  1025           __ movl(arr_size, length);
  1026           assert(t1 == rcx, "fixed register usage");
  1027           __ shll(arr_size /* by t1=rcx, mod 32 */);
  1028           __ shrl(t1, Klass::_lh_header_size_shift);
  1029           __ andl(t1, Klass::_lh_header_size_mask);
  1030           __ addl(arr_size, t1);
  1031           __ addl(arr_size, MinObjAlignmentInBytesMask); // align up
  1032           __ andl(arr_size, ~MinObjAlignmentInBytesMask);
  1034           __ eden_allocate(obj, arr_size, 0, t1, slow_path);  // preserves arr_size
  1036           __ initialize_header(obj, klass, length, t1, t2);
  1037           __ movb(t1, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes() + (Klass::_lh_header_size_shift / BitsPerByte)));
  1038           assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise");
  1039           assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise");
  1040           __ andl(t1, Klass::_lh_header_size_mask);
  1041           __ subl(arr_size, t1);  // body length
  1042           __ addl(t1, obj);       // body start
  1043           __ initialize_body(t1, arr_size, 0, t2);
  1044           __ verify_oop(obj);
  1045           __ ret(0);
  1047           __ bind(slow_path);
  1050         __ enter();
  1051         OopMap* map = save_live_registers(sasm, 3);
  1052         int call_offset;
  1053         if (id == new_type_array_id) {
  1054           call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_type_array), klass, length);
  1055         } else {
  1056           call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_object_array), klass, length);
  1059         oop_maps = new OopMapSet();
  1060         oop_maps->add_gc_map(call_offset, map);
  1061         restore_live_registers_except_rax(sasm);
  1063         __ verify_oop(obj);
  1064         __ leave();
  1065         __ ret(0);
  1067         // rax,: new array
  1069       break;
  1071     case new_multi_array_id:
  1072       { StubFrame f(sasm, "new_multi_array", dont_gc_arguments);
  1073         // rax,: klass
  1074         // rbx,: rank
  1075         // rcx: address of 1st dimension
  1076         OopMap* map = save_live_registers(sasm, 4);
  1077         int call_offset = __ call_RT(rax, noreg, CAST_FROM_FN_PTR(address, new_multi_array), rax, rbx, rcx);
  1079         oop_maps = new OopMapSet();
  1080         oop_maps->add_gc_map(call_offset, map);
  1081         restore_live_registers_except_rax(sasm);
  1083         // rax,: new multi array
  1084         __ verify_oop(rax);
  1086       break;
  1088     case register_finalizer_id:
  1090         __ set_info("register_finalizer", dont_gc_arguments);
  1092         // The object is passed on the stack and we haven't pushed a
  1093         // frame yet so it's one work away from top of stack.
  1094         __ movl(rax, Address(rsp, 1 * BytesPerWord));
  1095         __ verify_oop(rax);
  1097         // load the klass and check the has finalizer flag
  1098         Label register_finalizer;
  1099         Register t = rsi;
  1100         __ movl(t, Address(rax, oopDesc::klass_offset_in_bytes()));
  1101         __ movl(t, Address(t, Klass::access_flags_offset_in_bytes() + sizeof(oopDesc)));
  1102         __ testl(t, JVM_ACC_HAS_FINALIZER);
  1103         __ jcc(Assembler::notZero, register_finalizer);
  1104         __ ret(0);
  1106         __ bind(register_finalizer);
  1107         __ enter();
  1108         OopMap* oop_map = save_live_registers(sasm, 2 /*num_rt_args */);
  1109         int call_offset = __ call_RT(noreg, noreg,
  1110                                      CAST_FROM_FN_PTR(address, SharedRuntime::register_finalizer), rax);
  1111         oop_maps = new OopMapSet();
  1112         oop_maps->add_gc_map(call_offset, oop_map);
  1114         // Now restore all the live registers
  1115         restore_live_registers(sasm);
  1117         __ leave();
  1118         __ ret(0);
  1120       break;
  1122     case throw_range_check_failed_id:
  1123       { StubFrame f(sasm, "range_check_failed", dont_gc_arguments);
  1124         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_range_check_exception), true);
  1126       break;
  1128     case throw_index_exception_id:
  1129       { StubFrame f(sasm, "index_range_check_failed", dont_gc_arguments);
  1130         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_index_exception), true);
  1132       break;
  1134     case throw_div0_exception_id:
  1135       { StubFrame f(sasm, "throw_div0_exception", dont_gc_arguments);
  1136         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_div0_exception), false);
  1138       break;
  1140     case throw_null_pointer_exception_id:
  1141       { StubFrame f(sasm, "throw_null_pointer_exception", dont_gc_arguments);
  1142         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_null_pointer_exception), false);
  1144       break;
  1146     case handle_exception_nofpu_id:
  1147       save_fpu_registers = false;
  1148       // fall through
  1149     case handle_exception_id:
  1150       { StubFrame f(sasm, "handle_exception", dont_gc_arguments);
  1151         oop_maps = new OopMapSet();
  1152         OopMap* oop_map = save_live_registers(sasm, 1, save_fpu_registers);
  1153         generate_handle_exception(sasm, oop_maps, oop_map, save_fpu_registers);
  1155       break;
  1157     case unwind_exception_id:
  1158       { __ set_info("unwind_exception", dont_gc_arguments);
  1159         // note: no stubframe since we are about to leave the current
  1160         //       activation and we are calling a leaf VM function only.
  1161         generate_unwind_exception(sasm);
  1163       break;
  1165     case throw_array_store_exception_id:
  1166       { StubFrame f(sasm, "throw_array_store_exception", dont_gc_arguments);
  1167         // tos + 0: link
  1168         //     + 1: return address
  1169         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_array_store_exception), false);
  1171       break;
  1173     case throw_class_cast_exception_id:
  1174       { StubFrame f(sasm, "throw_class_cast_exception", dont_gc_arguments);
  1175         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_class_cast_exception), true);
  1177       break;
  1179     case throw_incompatible_class_change_error_id:
  1180       { StubFrame f(sasm, "throw_incompatible_class_cast_exception", dont_gc_arguments);
  1181         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false);
  1183       break;
  1185     case slow_subtype_check_id:
  1187         enum layout {
  1188           rax_off,
  1189           rcx_off,
  1190           rsi_off,
  1191           rdi_off,
  1192           saved_rbp_off,
  1193           return_off,
  1194           sub_off,
  1195           super_off,
  1196           framesize
  1197         };
  1199         __ set_info("slow_subtype_check", dont_gc_arguments);
  1200         __ pushl(rdi);
  1201         __ pushl(rsi);
  1202         __ pushl(rcx);
  1203         __ pushl(rax);
  1204         __ movl(rsi, Address(rsp, (super_off - 1) * BytesPerWord)); // super
  1205         __ movl(rax, Address(rsp, (sub_off   - 1) * BytesPerWord)); // sub
  1207         __ movl(rdi,Address(rsi,sizeof(oopDesc) + Klass::secondary_supers_offset_in_bytes()));
  1208         __ movl(rcx,Address(rdi,arrayOopDesc::length_offset_in_bytes()));
  1209         __ addl(rdi,arrayOopDesc::base_offset_in_bytes(T_OBJECT));
  1211         Label miss;
  1212         __ repne_scan();
  1213         __ jcc(Assembler::notEqual, miss);
  1214         __ movl(Address(rsi,sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes()), rax);
  1215         __ movl(Address(rsp, (super_off   - 1) * BytesPerWord), 1); // result
  1216         __ popl(rax);
  1217         __ popl(rcx);
  1218         __ popl(rsi);
  1219         __ popl(rdi);
  1220         __ ret(0);
  1222         __ bind(miss);
  1223         __ movl(Address(rsp, (super_off   - 1) * BytesPerWord), 0); // result
  1224         __ popl(rax);
  1225         __ popl(rcx);
  1226         __ popl(rsi);
  1227         __ popl(rdi);
  1228         __ ret(0);
  1230       break;
  1232     case monitorenter_nofpu_id:
  1233       save_fpu_registers = false;
  1234       // fall through
  1235     case monitorenter_id:
  1237         StubFrame f(sasm, "monitorenter", dont_gc_arguments);
  1238         OopMap* map = save_live_registers(sasm, 3, save_fpu_registers);
  1240         f.load_argument(1, rax); // rax,: object
  1241         f.load_argument(0, rbx); // rbx,: lock address
  1243         int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorenter), rax, rbx);
  1245         oop_maps = new OopMapSet();
  1246         oop_maps->add_gc_map(call_offset, map);
  1247         restore_live_registers(sasm, save_fpu_registers);
  1249       break;
  1251     case monitorexit_nofpu_id:
  1252       save_fpu_registers = false;
  1253       // fall through
  1254     case monitorexit_id:
  1256         StubFrame f(sasm, "monitorexit", dont_gc_arguments);
  1257         OopMap* map = save_live_registers(sasm, 2, save_fpu_registers);
  1259         f.load_argument(0, rax); // rax,: lock address
  1261         // note: really a leaf routine but must setup last java sp
  1262         //       => use call_RT for now (speed can be improved by
  1263         //       doing last java sp setup manually)
  1264         int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorexit), rax);
  1266         oop_maps = new OopMapSet();
  1267         oop_maps->add_gc_map(call_offset, map);
  1268         restore_live_registers(sasm, save_fpu_registers);
  1271       break;
  1273     case access_field_patching_id:
  1274       { StubFrame f(sasm, "access_field_patching", dont_gc_arguments);
  1275         // we should set up register map
  1276         oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, access_field_patching));
  1278       break;
  1280     case load_klass_patching_id:
  1281       { StubFrame f(sasm, "load_klass_patching", dont_gc_arguments);
  1282         // we should set up register map
  1283         oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_klass_patching));
  1285       break;
  1287     case jvmti_exception_throw_id:
  1288       { // rax,: exception oop
  1289         StubFrame f(sasm, "jvmti_exception_throw", dont_gc_arguments);
  1290         // Preserve all registers across this potentially blocking call
  1291         const int num_rt_args = 2;  // thread, exception oop
  1292         OopMap* map = save_live_registers(sasm, num_rt_args);
  1293         int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, Runtime1::post_jvmti_exception_throw), rax);
  1294         oop_maps = new OopMapSet();
  1295         oop_maps->add_gc_map(call_offset, map);
  1296         restore_live_registers(sasm);
  1298       break;
  1300     case dtrace_object_alloc_id:
  1301       { // rax,: object
  1302         StubFrame f(sasm, "dtrace_object_alloc", dont_gc_arguments);
  1303         // we can't gc here so skip the oopmap but make sure that all
  1304         // the live registers get saved.
  1305         save_live_registers(sasm, 1);
  1307         __ pushl(rax);
  1308         __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc)));
  1309         __ popl(rax);
  1311         restore_live_registers(sasm);
  1313       break;
  1315     case fpu2long_stub_id:
  1317         // rax, and rdx are destroyed, but should be free since the result is returned there
  1318         // preserve rsi,ecx
  1319         __ pushl(rsi);
  1320         __ pushl(rcx);
  1322         // check for NaN
  1323         Label return0, do_return, return_min_jlong, do_convert;
  1325         Address value_high_word(rsp, 8);
  1326         Address value_low_word(rsp, 4);
  1327         Address result_high_word(rsp, 16);
  1328         Address result_low_word(rsp, 12);
  1330         __ subl(rsp, 20);
  1331         __ fst_d(value_low_word);
  1332         __ movl(rax, value_high_word);
  1333         __ andl(rax, 0x7ff00000);
  1334         __ cmpl(rax, 0x7ff00000);
  1335         __ jcc(Assembler::notEqual, do_convert);
  1336         __ movl(rax, value_high_word);
  1337         __ andl(rax, 0xfffff);
  1338         __ orl(rax, value_low_word);
  1339         __ jcc(Assembler::notZero, return0);
  1341         __ bind(do_convert);
  1342         __ fnstcw(Address(rsp, 0));
  1343         __ movzxw(rax, Address(rsp, 0));
  1344         __ orl(rax, 0xc00);
  1345         __ movw(Address(rsp, 2), rax);
  1346         __ fldcw(Address(rsp, 2));
  1347         __ fwait();
  1348         __ fistp_d(result_low_word);
  1349         __ fldcw(Address(rsp, 0));
  1350         __ fwait();
  1351         __ movl(rax, result_low_word);
  1352         __ movl(rdx, result_high_word);
  1353         __ movl(rcx, rax);
  1354         // What the heck is the point of the next instruction???
  1355         __ xorl(rcx, 0x0);
  1356         __ movl(rsi, 0x80000000);
  1357         __ xorl(rsi, rdx);
  1358         __ orl(rcx, rsi);
  1359         __ jcc(Assembler::notEqual, do_return);
  1360         __ fldz();
  1361         __ fcomp_d(value_low_word);
  1362         __ fnstsw_ax();
  1363         __ sahf();
  1364         __ jcc(Assembler::above, return_min_jlong);
  1365         // return max_jlong
  1366         __ movl(rdx, 0x7fffffff);
  1367         __ movl(rax, 0xffffffff);
  1368         __ jmp(do_return);
  1370         __ bind(return_min_jlong);
  1371         __ movl(rdx, 0x80000000);
  1372         __ xorl(rax, rax);
  1373         __ jmp(do_return);
  1375         __ bind(return0);
  1376         __ fpop();
  1377         __ xorl(rdx,rdx);
  1378         __ xorl(rax,rax);
  1380         __ bind(do_return);
  1381         __ addl(rsp, 20);
  1382         __ popl(rcx);
  1383         __ popl(rsi);
  1384         __ ret(0);
  1386       break;
  1388 #ifndef SERIALGC
  1389     case g1_pre_barrier_slow_id:
  1391         StubFrame f(sasm, "g1_pre_barrier", dont_gc_arguments);
  1392         // arg0 : previous value of memory
  1394         BarrierSet* bs = Universe::heap()->barrier_set();
  1395         if (bs->kind() != BarrierSet::G1SATBCTLogging) {
  1396           __ movl(rax, (int)id);
  1397           __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), rax);
  1398           __ should_not_reach_here();
  1399           break;
  1402         __ pushl(rax);
  1403         __ pushl(rdx);
  1405         const Register pre_val = rax;
  1406         const Register thread = rax;
  1407         const Register tmp = rdx;
  1409         __ get_thread(thread);
  1411         Address in_progress(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
  1412                                              PtrQueue::byte_offset_of_active()));
  1414         Address queue_index(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
  1415                                              PtrQueue::byte_offset_of_index()));
  1416         Address buffer(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
  1417                                         PtrQueue::byte_offset_of_buf()));
  1420         Label done;
  1421         Label runtime;
  1423         // Can we store original value in the thread's buffer?
  1425         __ cmpl(queue_index, 0);
  1426         __ jcc(Assembler::equal, runtime);
  1427         __ subl(queue_index, wordSize);
  1428         __ movl(tmp, buffer);
  1429         __ addl(tmp, queue_index);
  1430         // prev_val (rax)
  1431         f.load_argument(0, pre_val);
  1432         __ movl(Address(tmp, 0), pre_val);
  1433         __ jmp(done);
  1435         __ bind(runtime);
  1436         // load the pre-value
  1437         __ pushl(rcx);
  1438         f.load_argument(0, rcx);
  1439         __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), rcx, thread);
  1440         __ popl(rcx);
  1442         __ bind(done);
  1443         __ popl(rdx);
  1444         __ popl(rax);
  1446       break;
  1448     case g1_post_barrier_slow_id:
  1450         StubFrame f(sasm, "g1_post_barrier", dont_gc_arguments);
  1453         // arg0: store_address
  1454         Address store_addr(rbp, 2*BytesPerWord);
  1456         BarrierSet* bs = Universe::heap()->barrier_set();
  1457         CardTableModRefBS* ct = (CardTableModRefBS*)bs;
  1458         Label done;
  1459         Label runtime;
  1461         // At this point we know new_value is non-NULL and the new_value crosses regsion.
  1462         // Must check to see if card is already dirty
  1464         const Register card_index = rdx;
  1466         const Register thread = rax;
  1467         Address queue_index(thread, in_bytes(JavaThread::dirty_card_queue_offset() +
  1468                                              PtrQueue::byte_offset_of_index()));
  1469         Address buffer(thread, in_bytes(JavaThread::dirty_card_queue_offset() +
  1470                                         PtrQueue::byte_offset_of_buf()));
  1472         __ pushl(rax);
  1473         __ pushl(rdx);
  1475         __ movl(card_index, store_addr);
  1476         __ get_thread(rax);
  1477         __ shrl(card_index, CardTableModRefBS::card_shift);
  1478         assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
  1480         ExternalAddress cardtable((address)ct->byte_map_base);
  1481         Address index(noreg, card_index, Address::times_1);
  1482         const Register card_addr = rdx;
  1483         __ leal(card_addr, __ as_Address(ArrayAddress(cardtable, index)));
  1484         __ cmpb(Address(card_addr, 0), 0);
  1485         __ jcc(Assembler::equal, done);
  1487         // storing region crossing non-NULL, card is clean.
  1488         // dirty card and log.
  1490         __ movb(Address(card_addr, 0), 0);
  1492         __ cmpl(queue_index, 0);
  1493         __ jcc(Assembler::equal, runtime);
  1494         __ subl(queue_index, wordSize);
  1496         const Register buffer_addr = rbx;
  1497         __ pushl(rbx);
  1499         __ movl(buffer_addr, buffer);
  1500         __ addl(buffer_addr, queue_index);
  1501         __ movl(Address(buffer_addr, 0), card_addr);
  1502         __ popl(rbx);
  1503         __ jmp(done);
  1505         __ bind(runtime);
  1506         __ pushl(rcx);
  1507         __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), card_addr, thread);
  1508         __ popl(rcx);
  1510         __ bind(done);
  1511         __ popl(rdx);
  1512         __ popl(rax);
  1515       break;
  1516 #endif // !SERIALGC
  1518     default:
  1519       { StubFrame f(sasm, "unimplemented entry", dont_gc_arguments);
  1520         __ movl(rax, (int)id);
  1521         __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), rax);
  1522         __ should_not_reach_here();
  1524       break;
  1526   return oop_maps;
  1529 #undef __

mercurial