src/cpu/x86/vm/methodHandles_x86.cpp

changeset 0
f90c822e73f8
child 6876
710a3c8b516e
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/src/cpu/x86/vm/methodHandles_x86.cpp	Wed Apr 27 01:25:04 2016 +0800
     1.3 @@ -0,0 +1,629 @@
     1.4 +/*
     1.5 + * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
     1.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     1.7 + *
     1.8 + * This code is free software; you can redistribute it and/or modify it
     1.9 + * under the terms of the GNU General Public License version 2 only, as
    1.10 + * published by the Free Software Foundation.
    1.11 + *
    1.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
    1.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    1.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    1.15 + * version 2 for more details (a copy is included in the LICENSE file that
    1.16 + * accompanied this code).
    1.17 + *
    1.18 + * You should have received a copy of the GNU General Public License version
    1.19 + * 2 along with this work; if not, write to the Free Software Foundation,
    1.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    1.21 + *
    1.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    1.23 + * or visit www.oracle.com if you need additional information or have any
    1.24 + * questions.
    1.25 + *
    1.26 + */
    1.27 +
    1.28 +#include "precompiled.hpp"
    1.29 +#include "asm/macroAssembler.hpp"
    1.30 +#include "interpreter/interpreter.hpp"
    1.31 +#include "interpreter/interpreterRuntime.hpp"
    1.32 +#include "memory/allocation.inline.hpp"
    1.33 +#include "prims/methodHandles.hpp"
    1.34 +
    1.35 +PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
    1.36 +
    1.37 +#define __ _masm->
    1.38 +
    1.39 +#ifdef PRODUCT
    1.40 +#define BLOCK_COMMENT(str) /* nothing */
    1.41 +#define STOP(error) stop(error)
    1.42 +#else
    1.43 +#define BLOCK_COMMENT(str) __ block_comment(str)
    1.44 +#define STOP(error) block_comment(error); __ stop(error)
    1.45 +#endif
    1.46 +
    1.47 +#define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
    1.48 +
    1.49 +void MethodHandles::load_klass_from_Class(MacroAssembler* _masm, Register klass_reg) {
    1.50 +  if (VerifyMethodHandles)
    1.51 +    verify_klass(_masm, klass_reg, SystemDictionary::WK_KLASS_ENUM_NAME(java_lang_Class),
    1.52 +                 "MH argument is a Class");
    1.53 +  __ movptr(klass_reg, Address(klass_reg, java_lang_Class::klass_offset_in_bytes()));
    1.54 +}
    1.55 +
    1.56 +#ifdef ASSERT
    1.57 +static int check_nonzero(const char* xname, int x) {
    1.58 +  assert(x != 0, err_msg("%s should be nonzero", xname));
    1.59 +  return x;
    1.60 +}
    1.61 +#define NONZERO(x) check_nonzero(#x, x)
    1.62 +#else //ASSERT
    1.63 +#define NONZERO(x) (x)
    1.64 +#endif //ASSERT
    1.65 +
    1.66 +#ifdef ASSERT
    1.67 +void MethodHandles::verify_klass(MacroAssembler* _masm,
    1.68 +                                 Register obj, SystemDictionary::WKID klass_id,
    1.69 +                                 const char* error_message) {
    1.70 +  Klass** klass_addr = SystemDictionary::well_known_klass_addr(klass_id);
    1.71 +  KlassHandle klass = SystemDictionary::well_known_klass(klass_id);
    1.72 +  Register temp = rdi;
    1.73 +  Register temp2 = noreg;
    1.74 +  LP64_ONLY(temp2 = rscratch1);  // used by MacroAssembler::cmpptr
    1.75 +  Label L_ok, L_bad;
    1.76 +  BLOCK_COMMENT("verify_klass {");
    1.77 +  __ verify_oop(obj);
    1.78 +  __ testptr(obj, obj);
    1.79 +  __ jcc(Assembler::zero, L_bad);
    1.80 +  __ push(temp); if (temp2 != noreg)  __ push(temp2);
    1.81 +#define UNPUSH { if (temp2 != noreg)  __ pop(temp2);  __ pop(temp); }
    1.82 +  __ load_klass(temp, obj);
    1.83 +  __ cmpptr(temp, ExternalAddress((address) klass_addr));
    1.84 +  __ jcc(Assembler::equal, L_ok);
    1.85 +  intptr_t super_check_offset = klass->super_check_offset();
    1.86 +  __ movptr(temp, Address(temp, super_check_offset));
    1.87 +  __ cmpptr(temp, ExternalAddress((address) klass_addr));
    1.88 +  __ jcc(Assembler::equal, L_ok);
    1.89 +  UNPUSH;
    1.90 +  __ bind(L_bad);
    1.91 +  __ STOP(error_message);
    1.92 +  __ BIND(L_ok);
    1.93 +  UNPUSH;
    1.94 +  BLOCK_COMMENT("} verify_klass");
    1.95 +}
    1.96 +
    1.97 +void MethodHandles::verify_ref_kind(MacroAssembler* _masm, int ref_kind, Register member_reg, Register temp) {
    1.98 +  Label L;
    1.99 +  BLOCK_COMMENT("verify_ref_kind {");
   1.100 +  __ movl(temp, Address(member_reg, NONZERO(java_lang_invoke_MemberName::flags_offset_in_bytes())));
   1.101 +  __ shrl(temp, java_lang_invoke_MemberName::MN_REFERENCE_KIND_SHIFT);
   1.102 +  __ andl(temp, java_lang_invoke_MemberName::MN_REFERENCE_KIND_MASK);
   1.103 +  __ cmpl(temp, ref_kind);
   1.104 +  __ jcc(Assembler::equal, L);
   1.105 +  { char* buf = NEW_C_HEAP_ARRAY(char, 100, mtInternal);
   1.106 +    jio_snprintf(buf, 100, "verify_ref_kind expected %x", ref_kind);
   1.107 +    if (ref_kind == JVM_REF_invokeVirtual ||
   1.108 +        ref_kind == JVM_REF_invokeSpecial)
   1.109 +      // could do this for all ref_kinds, but would explode assembly code size
   1.110 +      trace_method_handle(_masm, buf);
   1.111 +    __ STOP(buf);
   1.112 +  }
   1.113 +  BLOCK_COMMENT("} verify_ref_kind");
   1.114 +  __ bind(L);
   1.115 +}
   1.116 +
   1.117 +#endif //ASSERT
   1.118 +
   1.119 +void MethodHandles::jump_from_method_handle(MacroAssembler* _masm, Register method, Register temp,
   1.120 +                                            bool for_compiler_entry) {
   1.121 +  assert(method == rbx, "interpreter calling convention");
   1.122 +
   1.123 +   Label L_no_such_method;
   1.124 +   __ testptr(rbx, rbx);
   1.125 +   __ jcc(Assembler::zero, L_no_such_method);
   1.126 +
   1.127 +  __ verify_method_ptr(method);
   1.128 +
   1.129 +  if (!for_compiler_entry && JvmtiExport::can_post_interpreter_events()) {
   1.130 +    Label run_compiled_code;
   1.131 +    // JVMTI events, such as single-stepping, are implemented partly by avoiding running
   1.132 +    // compiled code in threads for which the event is enabled.  Check here for
   1.133 +    // interp_only_mode if these events CAN be enabled.
   1.134 +#ifdef _LP64
   1.135 +    Register rthread = r15_thread;
   1.136 +#else
   1.137 +    Register rthread = temp;
   1.138 +    __ get_thread(rthread);
   1.139 +#endif
   1.140 +    // interp_only is an int, on little endian it is sufficient to test the byte only
   1.141 +    // Is a cmpl faster?
   1.142 +    __ cmpb(Address(rthread, JavaThread::interp_only_mode_offset()), 0);
   1.143 +    __ jccb(Assembler::zero, run_compiled_code);
   1.144 +    __ jmp(Address(method, Method::interpreter_entry_offset()));
   1.145 +    __ BIND(run_compiled_code);
   1.146 +  }
   1.147 +
   1.148 +  const ByteSize entry_offset = for_compiler_entry ? Method::from_compiled_offset() :
   1.149 +                                                     Method::from_interpreted_offset();
   1.150 +  __ jmp(Address(method, entry_offset));
   1.151 +
   1.152 +  __ bind(L_no_such_method);
   1.153 +  __ jump(RuntimeAddress(StubRoutines::throw_AbstractMethodError_entry()));
   1.154 +}
   1.155 +
   1.156 +void MethodHandles::jump_to_lambda_form(MacroAssembler* _masm,
   1.157 +                                        Register recv, Register method_temp,
   1.158 +                                        Register temp2,
   1.159 +                                        bool for_compiler_entry) {
   1.160 +  BLOCK_COMMENT("jump_to_lambda_form {");
   1.161 +  // This is the initial entry point of a lazy method handle.
   1.162 +  // After type checking, it picks up the invoker from the LambdaForm.
   1.163 +  assert_different_registers(recv, method_temp, temp2);
   1.164 +  assert(recv != noreg, "required register");
   1.165 +  assert(method_temp == rbx, "required register for loading method");
   1.166 +
   1.167 +  //NOT_PRODUCT({ FlagSetting fs(TraceMethodHandles, true); trace_method_handle(_masm, "LZMH"); });
   1.168 +
   1.169 +  // Load the invoker, as MH -> MH.form -> LF.vmentry
   1.170 +  __ verify_oop(recv);
   1.171 +  __ load_heap_oop(method_temp, Address(recv, NONZERO(java_lang_invoke_MethodHandle::form_offset_in_bytes())));
   1.172 +  __ verify_oop(method_temp);
   1.173 +  __ load_heap_oop(method_temp, Address(method_temp, NONZERO(java_lang_invoke_LambdaForm::vmentry_offset_in_bytes())));
   1.174 +  __ verify_oop(method_temp);
   1.175 +  // the following assumes that a Method* is normally compressed in the vmtarget field:
   1.176 +  __ movptr(method_temp, Address(method_temp, NONZERO(java_lang_invoke_MemberName::vmtarget_offset_in_bytes())));
   1.177 +
   1.178 +  if (VerifyMethodHandles && !for_compiler_entry) {
   1.179 +    // make sure recv is already on stack
   1.180 +    __ movptr(temp2, Address(method_temp, Method::const_offset()));
   1.181 +    __ load_sized_value(temp2,
   1.182 +                        Address(temp2, ConstMethod::size_of_parameters_offset()),
   1.183 +                        sizeof(u2), /*is_signed*/ false);
   1.184 +    // assert(sizeof(u2) == sizeof(Method::_size_of_parameters), "");
   1.185 +    Label L;
   1.186 +    __ cmpptr(recv, __ argument_address(temp2, -1));
   1.187 +    __ jcc(Assembler::equal, L);
   1.188 +    __ movptr(rax, __ argument_address(temp2, -1));
   1.189 +    __ STOP("receiver not on stack");
   1.190 +    __ BIND(L);
   1.191 +  }
   1.192 +
   1.193 +  jump_from_method_handle(_masm, method_temp, temp2, for_compiler_entry);
   1.194 +  BLOCK_COMMENT("} jump_to_lambda_form");
   1.195 +}
   1.196 +
   1.197 +
   1.198 +// Code generation
   1.199 +address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler* _masm,
   1.200 +                                                                vmIntrinsics::ID iid) {
   1.201 +  const bool not_for_compiler_entry = false;  // this is the interpreter entry
   1.202 +  assert(is_signature_polymorphic(iid), "expected invoke iid");
   1.203 +  if (iid == vmIntrinsics::_invokeGeneric ||
   1.204 +      iid == vmIntrinsics::_compiledLambdaForm) {
   1.205 +    // Perhaps surprisingly, the symbolic references visible to Java are not directly used.
   1.206 +    // They are linked to Java-generated adapters via MethodHandleNatives.linkMethod.
   1.207 +    // They all allow an appendix argument.
   1.208 +    __ hlt();           // empty stubs make SG sick
   1.209 +    return NULL;
   1.210 +  }
   1.211 +
   1.212 +  // rsi/r13: sender SP (must preserve; see prepare_to_jump_from_interpreted)
   1.213 +  // rbx: Method*
   1.214 +  // rdx: argument locator (parameter slot count, added to rsp)
   1.215 +  // rcx: used as temp to hold mh or receiver
   1.216 +  // rax, rdi: garbage temps, blown away
   1.217 +  Register rdx_argp   = rdx;   // argument list ptr, live on error paths
   1.218 +  Register rax_temp   = rax;
   1.219 +  Register rcx_mh     = rcx;   // MH receiver; dies quickly and is recycled
   1.220 +  Register rbx_method = rbx;   // eventual target of this invocation
   1.221 +
   1.222 +  // here's where control starts out:
   1.223 +  __ align(CodeEntryAlignment);
   1.224 +  address entry_point = __ pc();
   1.225 +
   1.226 +  if (VerifyMethodHandles) {
   1.227 +    Label L;
   1.228 +    BLOCK_COMMENT("verify_intrinsic_id {");
   1.229 +    __ cmpb(Address(rbx_method, Method::intrinsic_id_offset_in_bytes()), (int) iid);
   1.230 +    __ jcc(Assembler::equal, L);
   1.231 +    if (iid == vmIntrinsics::_linkToVirtual ||
   1.232 +        iid == vmIntrinsics::_linkToSpecial) {
   1.233 +      // could do this for all kinds, but would explode assembly code size
   1.234 +      trace_method_handle(_masm, "bad Method*::intrinsic_id");
   1.235 +    }
   1.236 +    __ STOP("bad Method*::intrinsic_id");
   1.237 +    __ bind(L);
   1.238 +    BLOCK_COMMENT("} verify_intrinsic_id");
   1.239 +  }
   1.240 +
   1.241 +  // First task:  Find out how big the argument list is.
   1.242 +  Address rdx_first_arg_addr;
   1.243 +  int ref_kind = signature_polymorphic_intrinsic_ref_kind(iid);
   1.244 +  assert(ref_kind != 0 || iid == vmIntrinsics::_invokeBasic, "must be _invokeBasic or a linkTo intrinsic");
   1.245 +  if (ref_kind == 0 || MethodHandles::ref_kind_has_receiver(ref_kind)) {
   1.246 +    __ movptr(rdx_argp, Address(rbx_method, Method::const_offset()));
   1.247 +    __ load_sized_value(rdx_argp,
   1.248 +                        Address(rdx_argp, ConstMethod::size_of_parameters_offset()),
   1.249 +                        sizeof(u2), /*is_signed*/ false);
   1.250 +    // assert(sizeof(u2) == sizeof(Method::_size_of_parameters), "");
   1.251 +    rdx_first_arg_addr = __ argument_address(rdx_argp, -1);
   1.252 +  } else {
   1.253 +    DEBUG_ONLY(rdx_argp = noreg);
   1.254 +  }
   1.255 +
   1.256 +  if (!is_signature_polymorphic_static(iid)) {
   1.257 +    __ movptr(rcx_mh, rdx_first_arg_addr);
   1.258 +    DEBUG_ONLY(rdx_argp = noreg);
   1.259 +  }
   1.260 +
   1.261 +  // rdx_first_arg_addr is live!
   1.262 +
   1.263 +  trace_method_handle_interpreter_entry(_masm, iid);
   1.264 +
   1.265 +  if (iid == vmIntrinsics::_invokeBasic) {
   1.266 +    generate_method_handle_dispatch(_masm, iid, rcx_mh, noreg, not_for_compiler_entry);
   1.267 +
   1.268 +  } else {
   1.269 +    // Adjust argument list by popping the trailing MemberName argument.
   1.270 +    Register rcx_recv = noreg;
   1.271 +    if (MethodHandles::ref_kind_has_receiver(ref_kind)) {
   1.272 +      // Load the receiver (not the MH; the actual MemberName's receiver) up from the interpreter stack.
   1.273 +      __ movptr(rcx_recv = rcx, rdx_first_arg_addr);
   1.274 +    }
   1.275 +    DEBUG_ONLY(rdx_argp = noreg);
   1.276 +    Register rbx_member = rbx_method;  // MemberName ptr; incoming method ptr is dead now
   1.277 +    __ pop(rax_temp);           // return address
   1.278 +    __ pop(rbx_member);         // extract last argument
   1.279 +    __ push(rax_temp);          // re-push return address
   1.280 +    generate_method_handle_dispatch(_masm, iid, rcx_recv, rbx_member, not_for_compiler_entry);
   1.281 +  }
   1.282 +
   1.283 +  return entry_point;
   1.284 +}
   1.285 +
   1.286 +void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
   1.287 +                                                    vmIntrinsics::ID iid,
   1.288 +                                                    Register receiver_reg,
   1.289 +                                                    Register member_reg,
   1.290 +                                                    bool for_compiler_entry) {
   1.291 +  assert(is_signature_polymorphic(iid), "expected invoke iid");
   1.292 +  Register rbx_method = rbx;   // eventual target of this invocation
   1.293 +  // temps used in this code are not used in *either* compiled or interpreted calling sequences
   1.294 +#ifdef _LP64
   1.295 +  Register temp1 = rscratch1;
   1.296 +  Register temp2 = rscratch2;
   1.297 +  Register temp3 = rax;
   1.298 +  if (for_compiler_entry) {
   1.299 +    assert(receiver_reg == (iid == vmIntrinsics::_linkToStatic ? noreg : j_rarg0), "only valid assignment");
   1.300 +    assert_different_registers(temp1,        j_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4, j_rarg5);
   1.301 +    assert_different_registers(temp2,        j_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4, j_rarg5);
   1.302 +    assert_different_registers(temp3,        j_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4, j_rarg5);
   1.303 +  }
   1.304 +#else
   1.305 +  Register temp1 = (for_compiler_entry ? rsi : rdx);
   1.306 +  Register temp2 = rdi;
   1.307 +  Register temp3 = rax;
   1.308 +  if (for_compiler_entry) {
   1.309 +    assert(receiver_reg == (iid == vmIntrinsics::_linkToStatic ? noreg : rcx), "only valid assignment");
   1.310 +    assert_different_registers(temp1,        rcx, rdx);
   1.311 +    assert_different_registers(temp2,        rcx, rdx);
   1.312 +    assert_different_registers(temp3,        rcx, rdx);
   1.313 +  }
   1.314 +#endif
   1.315 +  else {
   1.316 +    assert_different_registers(temp1, temp2, temp3, saved_last_sp_register());  // don't trash lastSP
   1.317 +  }
   1.318 +  assert_different_registers(temp1, temp2, temp3, receiver_reg);
   1.319 +  assert_different_registers(temp1, temp2, temp3, member_reg);
   1.320 +
   1.321 +  if (iid == vmIntrinsics::_invokeBasic) {
   1.322 +    // indirect through MH.form.vmentry.vmtarget
   1.323 +    jump_to_lambda_form(_masm, receiver_reg, rbx_method, temp1, for_compiler_entry);
   1.324 +
   1.325 +  } else {
   1.326 +    // The method is a member invoker used by direct method handles.
   1.327 +    if (VerifyMethodHandles) {
   1.328 +      // make sure the trailing argument really is a MemberName (caller responsibility)
   1.329 +      verify_klass(_masm, member_reg, SystemDictionary::WK_KLASS_ENUM_NAME(java_lang_invoke_MemberName),
   1.330 +                   "MemberName required for invokeVirtual etc.");
   1.331 +    }
   1.332 +
   1.333 +    Address member_clazz(    member_reg, NONZERO(java_lang_invoke_MemberName::clazz_offset_in_bytes()));
   1.334 +    Address member_vmindex(  member_reg, NONZERO(java_lang_invoke_MemberName::vmindex_offset_in_bytes()));
   1.335 +    Address member_vmtarget( member_reg, NONZERO(java_lang_invoke_MemberName::vmtarget_offset_in_bytes()));
   1.336 +
   1.337 +    Register temp1_recv_klass = temp1;
   1.338 +    if (iid != vmIntrinsics::_linkToStatic) {
   1.339 +      __ verify_oop(receiver_reg);
   1.340 +      if (iid == vmIntrinsics::_linkToSpecial) {
   1.341 +        // Don't actually load the klass; just null-check the receiver.
   1.342 +        __ null_check(receiver_reg);
   1.343 +      } else {
   1.344 +        // load receiver klass itself
   1.345 +        __ null_check(receiver_reg, oopDesc::klass_offset_in_bytes());
   1.346 +        __ load_klass(temp1_recv_klass, receiver_reg);
   1.347 +        __ verify_klass_ptr(temp1_recv_klass);
   1.348 +      }
   1.349 +      BLOCK_COMMENT("check_receiver {");
   1.350 +      // The receiver for the MemberName must be in receiver_reg.
   1.351 +      // Check the receiver against the MemberName.clazz
   1.352 +      if (VerifyMethodHandles && iid == vmIntrinsics::_linkToSpecial) {
   1.353 +        // Did not load it above...
   1.354 +        __ load_klass(temp1_recv_klass, receiver_reg);
   1.355 +        __ verify_klass_ptr(temp1_recv_klass);
   1.356 +      }
   1.357 +      if (VerifyMethodHandles && iid != vmIntrinsics::_linkToInterface) {
   1.358 +        Label L_ok;
   1.359 +        Register temp2_defc = temp2;
   1.360 +        __ load_heap_oop(temp2_defc, member_clazz);
   1.361 +        load_klass_from_Class(_masm, temp2_defc);
   1.362 +        __ verify_klass_ptr(temp2_defc);
   1.363 +        __ check_klass_subtype(temp1_recv_klass, temp2_defc, temp3, L_ok);
   1.364 +        // If we get here, the type check failed!
   1.365 +        __ STOP("receiver class disagrees with MemberName.clazz");
   1.366 +        __ bind(L_ok);
   1.367 +      }
   1.368 +      BLOCK_COMMENT("} check_receiver");
   1.369 +    }
   1.370 +    if (iid == vmIntrinsics::_linkToSpecial ||
   1.371 +        iid == vmIntrinsics::_linkToStatic) {
   1.372 +      DEBUG_ONLY(temp1_recv_klass = noreg);  // these guys didn't load the recv_klass
   1.373 +    }
   1.374 +
   1.375 +    // Live registers at this point:
   1.376 +    //  member_reg - MemberName that was the trailing argument
   1.377 +    //  temp1_recv_klass - klass of stacked receiver, if needed
   1.378 +    //  rsi/r13 - interpreter linkage (if interpreted)
   1.379 +    //  rcx, rdx, rsi, rdi, r8, r8 - compiler arguments (if compiled)
   1.380 +
   1.381 +    Label L_incompatible_class_change_error;
   1.382 +    switch (iid) {
   1.383 +    case vmIntrinsics::_linkToSpecial:
   1.384 +      if (VerifyMethodHandles) {
   1.385 +        verify_ref_kind(_masm, JVM_REF_invokeSpecial, member_reg, temp3);
   1.386 +      }
   1.387 +      __ movptr(rbx_method, member_vmtarget);
   1.388 +      break;
   1.389 +
   1.390 +    case vmIntrinsics::_linkToStatic:
   1.391 +      if (VerifyMethodHandles) {
   1.392 +        verify_ref_kind(_masm, JVM_REF_invokeStatic, member_reg, temp3);
   1.393 +      }
   1.394 +      __ movptr(rbx_method, member_vmtarget);
   1.395 +      break;
   1.396 +
   1.397 +    case vmIntrinsics::_linkToVirtual:
   1.398 +    {
   1.399 +      // same as TemplateTable::invokevirtual,
   1.400 +      // minus the CP setup and profiling:
   1.401 +
   1.402 +      if (VerifyMethodHandles) {
   1.403 +        verify_ref_kind(_masm, JVM_REF_invokeVirtual, member_reg, temp3);
   1.404 +      }
   1.405 +
   1.406 +      // pick out the vtable index from the MemberName, and then we can discard it:
   1.407 +      Register temp2_index = temp2;
   1.408 +      __ movptr(temp2_index, member_vmindex);
   1.409 +
   1.410 +      if (VerifyMethodHandles) {
   1.411 +        Label L_index_ok;
   1.412 +        __ cmpl(temp2_index, 0);
   1.413 +        __ jcc(Assembler::greaterEqual, L_index_ok);
   1.414 +        __ STOP("no virtual index");
   1.415 +        __ BIND(L_index_ok);
   1.416 +      }
   1.417 +
   1.418 +      // Note:  The verifier invariants allow us to ignore MemberName.clazz and vmtarget
   1.419 +      // at this point.  And VerifyMethodHandles has already checked clazz, if needed.
   1.420 +
   1.421 +      // get target Method* & entry point
   1.422 +      __ lookup_virtual_method(temp1_recv_klass, temp2_index, rbx_method);
   1.423 +      break;
   1.424 +    }
   1.425 +
   1.426 +    case vmIntrinsics::_linkToInterface:
   1.427 +    {
   1.428 +      // same as TemplateTable::invokeinterface
   1.429 +      // (minus the CP setup and profiling, with different argument motion)
   1.430 +      if (VerifyMethodHandles) {
   1.431 +        verify_ref_kind(_masm, JVM_REF_invokeInterface, member_reg, temp3);
   1.432 +      }
   1.433 +
   1.434 +      Register temp3_intf = temp3;
   1.435 +      __ load_heap_oop(temp3_intf, member_clazz);
   1.436 +      load_klass_from_Class(_masm, temp3_intf);
   1.437 +      __ verify_klass_ptr(temp3_intf);
   1.438 +
   1.439 +      Register rbx_index = rbx_method;
   1.440 +      __ movptr(rbx_index, member_vmindex);
   1.441 +      if (VerifyMethodHandles) {
   1.442 +        Label L;
   1.443 +        __ cmpl(rbx_index, 0);
   1.444 +        __ jcc(Assembler::greaterEqual, L);
   1.445 +        __ STOP("invalid vtable index for MH.invokeInterface");
   1.446 +        __ bind(L);
   1.447 +      }
   1.448 +
   1.449 +      // given intf, index, and recv klass, dispatch to the implementation method
   1.450 +      __ lookup_interface_method(temp1_recv_klass, temp3_intf,
   1.451 +                                 // note: next two args must be the same:
   1.452 +                                 rbx_index, rbx_method,
   1.453 +                                 temp2,
   1.454 +                                 L_incompatible_class_change_error);
   1.455 +      break;
   1.456 +    }
   1.457 +
   1.458 +    default:
   1.459 +      fatal(err_msg_res("unexpected intrinsic %d: %s", iid, vmIntrinsics::name_at(iid)));
   1.460 +      break;
   1.461 +    }
   1.462 +
   1.463 +    // Live at this point:
   1.464 +    //   rbx_method
   1.465 +    //   rsi/r13 (if interpreted)
   1.466 +
   1.467 +    // After figuring out which concrete method to call, jump into it.
   1.468 +    // Note that this works in the interpreter with no data motion.
   1.469 +    // But the compiled version will require that rcx_recv be shifted out.
   1.470 +    __ verify_method_ptr(rbx_method);
   1.471 +    jump_from_method_handle(_masm, rbx_method, temp1, for_compiler_entry);
   1.472 +
   1.473 +    if (iid == vmIntrinsics::_linkToInterface) {
   1.474 +      __ bind(L_incompatible_class_change_error);
   1.475 +      __ jump(RuntimeAddress(StubRoutines::throw_IncompatibleClassChangeError_entry()));
   1.476 +    }
   1.477 +  }
   1.478 +}
   1.479 +
   1.480 +#ifndef PRODUCT
   1.481 +void trace_method_handle_stub(const char* adaptername,
   1.482 +                              oop mh,
   1.483 +                              intptr_t* saved_regs,
   1.484 +                              intptr_t* entry_sp) {
   1.485 +  // called as a leaf from native code: do not block the JVM!
   1.486 +  bool has_mh = (strstr(adaptername, "/static") == NULL &&
   1.487 +                 strstr(adaptername, "linkTo") == NULL);    // static linkers don't have MH
   1.488 +  const char* mh_reg_name = has_mh ? "rcx_mh" : "rcx";
   1.489 +  tty->print_cr("MH %s %s="PTR_FORMAT" sp="PTR_FORMAT,
   1.490 +                adaptername, mh_reg_name,
   1.491 +                (void *)mh, entry_sp);
   1.492 +
   1.493 +  if (Verbose) {
   1.494 +    tty->print_cr("Registers:");
   1.495 +    const int saved_regs_count = RegisterImpl::number_of_registers;
   1.496 +    for (int i = 0; i < saved_regs_count; i++) {
   1.497 +      Register r = as_Register(i);
   1.498 +      // The registers are stored in reverse order on the stack (by pusha).
   1.499 +      tty->print("%3s=" PTR_FORMAT, r->name(), saved_regs[((saved_regs_count - 1) - i)]);
   1.500 +      if ((i + 1) % 4 == 0) {
   1.501 +        tty->cr();
   1.502 +      } else {
   1.503 +        tty->print(", ");
   1.504 +      }
   1.505 +    }
   1.506 +    tty->cr();
   1.507 +
   1.508 +    {
   1.509 +     // dumping last frame with frame::describe
   1.510 +
   1.511 +      JavaThread* p = JavaThread::active();
   1.512 +
   1.513 +      ResourceMark rm;
   1.514 +      PRESERVE_EXCEPTION_MARK; // may not be needed by safer and unexpensive here
   1.515 +      FrameValues values;
   1.516 +
   1.517 +      // Note: We want to allow trace_method_handle from any call site.
   1.518 +      // While trace_method_handle creates a frame, it may be entered
   1.519 +      // without a PC on the stack top (e.g. not just after a call).
   1.520 +      // Walking that frame could lead to failures due to that invalid PC.
   1.521 +      // => carefully detect that frame when doing the stack walking
   1.522 +
   1.523 +      // Current C frame
   1.524 +      frame cur_frame = os::current_frame();
   1.525 +
   1.526 +      // Robust search of trace_calling_frame (independant of inlining).
   1.527 +      // Assumes saved_regs comes from a pusha in the trace_calling_frame.
   1.528 +      assert(cur_frame.sp() < saved_regs, "registers not saved on stack ?");
   1.529 +      frame trace_calling_frame = os::get_sender_for_C_frame(&cur_frame);
   1.530 +      while (trace_calling_frame.fp() < saved_regs) {
   1.531 +        trace_calling_frame = os::get_sender_for_C_frame(&trace_calling_frame);
   1.532 +      }
   1.533 +
   1.534 +      // safely create a frame and call frame::describe
   1.535 +      intptr_t *dump_sp = trace_calling_frame.sender_sp();
   1.536 +      intptr_t *dump_fp = trace_calling_frame.link();
   1.537 +
   1.538 +      bool walkable = has_mh; // whether the traced frame shoud be walkable
   1.539 +
   1.540 +      if (walkable) {
   1.541 +        // The previous definition of walkable may have to be refined
   1.542 +        // if new call sites cause the next frame constructor to start
   1.543 +        // failing. Alternatively, frame constructors could be
   1.544 +        // modified to support the current or future non walkable
   1.545 +        // frames (but this is more intrusive and is not considered as
   1.546 +        // part of this RFE, which will instead use a simpler output).
   1.547 +        frame dump_frame = frame(dump_sp, dump_fp);
   1.548 +        dump_frame.describe(values, 1);
   1.549 +      } else {
   1.550 +        // Stack may not be walkable (invalid PC above FP):
   1.551 +        // Add descriptions without building a Java frame to avoid issues
   1.552 +        values.describe(-1, dump_fp, "fp for #1 <not parsed, cannot trust pc>");
   1.553 +        values.describe(-1, dump_sp, "sp for #1");
   1.554 +      }
   1.555 +      values.describe(-1, entry_sp, "raw top of stack");
   1.556 +
   1.557 +      tty->print_cr("Stack layout:");
   1.558 +      values.print(p);
   1.559 +    }
   1.560 +    if (has_mh && mh->is_oop()) {
   1.561 +      mh->print();
   1.562 +      if (java_lang_invoke_MethodHandle::is_instance(mh)) {
   1.563 +        if (java_lang_invoke_MethodHandle::form_offset_in_bytes() != 0)
   1.564 +          java_lang_invoke_MethodHandle::form(mh)->print();
   1.565 +      }
   1.566 +    }
   1.567 +  }
   1.568 +}
   1.569 +
   1.570 +// The stub wraps the arguments in a struct on the stack to avoid
   1.571 +// dealing with the different calling conventions for passing 6
   1.572 +// arguments.
   1.573 +struct MethodHandleStubArguments {
   1.574 +  const char* adaptername;
   1.575 +  oopDesc* mh;
   1.576 +  intptr_t* saved_regs;
   1.577 +  intptr_t* entry_sp;
   1.578 +};
   1.579 +void trace_method_handle_stub_wrapper(MethodHandleStubArguments* args) {
   1.580 +  trace_method_handle_stub(args->adaptername,
   1.581 +                           args->mh,
   1.582 +                           args->saved_regs,
   1.583 +                           args->entry_sp);
   1.584 +}
   1.585 +
   1.586 +void MethodHandles::trace_method_handle(MacroAssembler* _masm, const char* adaptername) {
   1.587 +  if (!TraceMethodHandles)  return;
   1.588 +  BLOCK_COMMENT("trace_method_handle {");
   1.589 +  __ enter();
   1.590 +  __ andptr(rsp, -16); // align stack if needed for FPU state
   1.591 +  __ pusha();
   1.592 +  __ mov(rbx, rsp); // for retreiving saved_regs
   1.593 +  // Note: saved_regs must be in the entered frame for the
   1.594 +  // robust stack walking implemented in trace_method_handle_stub.
   1.595 +
   1.596 +  // save FP result, valid at some call sites (adapter_opt_return_float, ...)
   1.597 +  __ increment(rsp, -2 * wordSize);
   1.598 +  if  (UseSSE >= 2) {
   1.599 +    __ movdbl(Address(rsp, 0), xmm0);
   1.600 +  } else if (UseSSE == 1) {
   1.601 +    __ movflt(Address(rsp, 0), xmm0);
   1.602 +  } else {
   1.603 +    __ fst_d(Address(rsp, 0));
   1.604 +  }
   1.605 +
   1.606 +  // Incoming state:
   1.607 +  // rcx: method handle
   1.608 +  //
   1.609 +  // To avoid calling convention issues, build a record on the stack
   1.610 +  // and pass the pointer to that instead.
   1.611 +  __ push(rbp);               // entry_sp (with extra align space)
   1.612 +  __ push(rbx);               // pusha saved_regs
   1.613 +  __ push(rcx);               // mh
   1.614 +  __ push(rcx);               // slot for adaptername
   1.615 +  __ movptr(Address(rsp, 0), (intptr_t) adaptername);
   1.616 +  __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, trace_method_handle_stub_wrapper), rsp);
   1.617 +  __ increment(rsp, sizeof(MethodHandleStubArguments));
   1.618 +
   1.619 +  if  (UseSSE >= 2) {
   1.620 +    __ movdbl(xmm0, Address(rsp, 0));
   1.621 +  } else if (UseSSE == 1) {
   1.622 +    __ movflt(xmm0, Address(rsp, 0));
   1.623 +  } else {
   1.624 +    __ fld_d(Address(rsp, 0));
   1.625 +  }
   1.626 +  __ increment(rsp, 2 * wordSize);
   1.627 +
   1.628 +  __ popa();
   1.629 +  __ leave();
   1.630 +  BLOCK_COMMENT("} trace_method_handle");
   1.631 +}
   1.632 +#endif //PRODUCT

mercurial