Wed, 29 Mar 2017 09:41:51 +0800
#4662 TieredCompilation is turned off.
TieredCompilation is not supported yet.
1 /*
2 * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2015, 2016, Loongson Technology. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
26 #include "precompiled.hpp"
27 #include "asm/macroAssembler.hpp"
28 #include "interpreter/interpreter.hpp"
29 #include "interpreter/interpreterRuntime.hpp"
30 #include "memory/allocation.inline.hpp"
31 #include "prims/methodHandles.hpp"
33 #define __ _masm->
35 #ifdef PRODUCT
36 #define BLOCK_COMMENT(str) /* nothing */
37 #define STOP(error) stop(error)
38 #else
39 #define BLOCK_COMMENT(str) __ block_comment(str)
40 #define STOP(error) block_comment(error); __ stop(error)
41 #endif
43 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
45 void MethodHandles::load_klass_from_Class(MacroAssembler* _masm, Register klass_reg) {
46 if (VerifyMethodHandles)
47 verify_klass(_masm, klass_reg, SystemDictionary::WK_KLASS_ENUM_NAME(java_lang_Class),
48 "MH argument is a Class");
49 __ ld(klass_reg, Address(klass_reg, java_lang_Class::klass_offset_in_bytes()));
50 }
52 #ifdef ASSERT
53 static int check_nonzero(const char* xname, int x) {
54 assert(x != 0, err_msg("%s should be nonzero", xname));
55 return x;
56 }
57 #define NONZERO(x) check_nonzero(#x, x)
58 #else //ASSERT
59 #define NONZERO(x) (x)
60 #endif //ASSERT
62 #ifdef ASSERT
63 void MethodHandles::verify_klass(MacroAssembler* _masm,
64 Register obj, SystemDictionary::WKID klass_id,
65 const char* error_message) {
66 /*
67 Klass** klass_addr = SystemDictionary::well_known_klass_addr(klass_id);
68 KlassHandle klass = SystemDictionary::well_known_klass(klass_id);
69 Register temp = S0;
70 // Register temp2 = noreg;
71 // LP64_ONLY(temp2 = S1); // used by MacroAssembler::cmpptr
72 Label L_ok, L_bad;
73 BLOCK_COMMENT("verify_klass {");
74 __ verify_oop(obj);
75 // __ testptr(obj, obj);
76 // __ jcc(Assembler::zero, L_bad);
77 __ beq(obj, R0, L_bad);
78 __ nop();
79 __ push(temp); //if (temp2 != noreg) __ push(temp2);
80 #define UNPUSH { __ pop(temp); }
81 __ load_klass(temp, obj);
82 // __ cmpptr(temp, ExternalAddress((address) klass_addr));
83 // __ jcc(Assembler::equal, L_ok);
84 __ li(AT, (long)&klass_addr);
85 __ ld(AT, AT, 0);
86 __ beq(temp, AT, L_ok);
87 __ nop();
88 intptr_t super_check_offset = klass->super_check_offset();
89 __ ld(temp, Address(temp, super_check_offset));
90 // __ cmpptr(temp, ExternalAddress((address) klass_addr));
91 // __ jcc(Assembler::equal, L_ok);
92 __ li(AT, (long)&klass_addr);
93 __ ld(AT, AT, 0);
94 __ beq(AT, temp, L_ok);
95 __ nop();
96 UNPUSH;
97 __ bind(L_bad);
98 __ STOP(error_message);
99 __ BIND(L_ok);
100 UNPUSH;
101 BLOCK_COMMENT("} verify_klass");
102 */
103 }
105 void MethodHandles::verify_ref_kind(MacroAssembler* _masm, int ref_kind, Register member_reg, Register temp) {
106 Label L;
107 BLOCK_COMMENT("verify_ref_kind {");
108 __ lw(temp, Address(member_reg, NONZERO(java_lang_invoke_MemberName::flags_offset_in_bytes())));
109 // __ shrl(temp, java_lang_invoke_MemberName::MN_REFERENCE_KIND_SHIFT);
110 __ sra(temp, temp, java_lang_invoke_MemberName::MN_REFERENCE_KIND_SHIFT);
111 // __ andl(temp, java_lang_invoke_MemberName::MN_REFERENCE_KIND_MASK);
112 __ move(AT, java_lang_invoke_MemberName::MN_REFERENCE_KIND_MASK);
113 __ andr(temp, temp, AT);
114 // __ cmpl(temp, ref_kind);
115 // __ jcc(Assembler::equal, L);
116 __ move(AT, ref_kind);
117 __ beq(temp, AT, L);
118 __ nop();
119 { char* buf = NEW_C_HEAP_ARRAY(char, 100, mtInternal);
120 jio_snprintf(buf, 100, "verify_ref_kind expected %x", ref_kind);
121 if (ref_kind == JVM_REF_invokeVirtual ||
122 ref_kind == JVM_REF_invokeSpecial)
123 // could do this for all ref_kinds, but would explode assembly code size
124 trace_method_handle(_masm, buf);
125 __ STOP(buf);
126 }
127 BLOCK_COMMENT("} verify_ref_kind");
128 __ bind(L);
129 }
131 #endif //ASSERT
133 void MethodHandles::jump_from_method_handle(MacroAssembler* _masm, Register method, Register temp,
134 bool for_compiler_entry) {
135 assert(method == Rmethod, "interpreter calling convention");
137 Label L_no_such_method;
138 __ beq(method, R0, L_no_such_method);
139 __ nop();
141 __ verify_method_ptr(method);
143 if (!for_compiler_entry && JvmtiExport::can_post_interpreter_events()) {
144 Label run_compiled_code;
145 // JVMTI events, such as single-stepping, are implemented partly by avoiding running
146 // compiled code in threads for which the event is enabled. Check here for
147 // interp_only_mode if these events CAN be enabled.
148 #ifdef _LP64
149 Register rthread = TREG;
150 #else
151 Register rthread = temp;
152 __ get_thread(rthread);
153 #endif
154 // interp_only is an int, on little endian it is sufficient to test the byte only
155 // Is a cmpl faster?
156 // __ cmpb(Address(rthread, JavaThread::interp_only_mode_offset()), 0);
157 __ lbu(AT, rthread, in_bytes(JavaThread::interp_only_mode_offset()));
158 // __ jccb(Assembler::zero, run_compiled_code);
159 __ beq(AT, R0, run_compiled_code);
160 __ nop();
161 // __ jmp(Address(method, Method::interpreter_entry_offset()));
162 __ ld(T9, method, in_bytes(Method::interpreter_entry_offset()));
163 __ jr(T9);
164 __ nop();
165 __ BIND(run_compiled_code);
166 }
168 const ByteSize entry_offset = for_compiler_entry ? Method::from_compiled_offset() :
169 Method::from_interpreted_offset();
170 // __ jmp(Address(method, entry_offset));
171 __ ld(T9, method, in_bytes(entry_offset));
172 __ jr(T9);
173 __ nop();
175 __ bind(L_no_such_method);
176 // __ jump(RuntimeAddress(StubRoutines::throw_AbstractMethodError_entry()));
177 address wrong_method = StubRoutines::throw_AbstractMethodError_entry();
178 __ jmp(wrong_method, relocInfo::runtime_call_type);
179 __ nop();
180 }
182 void MethodHandles::jump_to_lambda_form(MacroAssembler* _masm,
183 Register recv, Register method_temp,
184 Register temp2,
185 bool for_compiler_entry) {
186 BLOCK_COMMENT("jump_to_lambda_form {");
187 // This is the initial entry point of a lazy method handle.
188 // After type checking, it picks up the invoker from the LambdaForm.
189 assert_different_registers(recv, method_temp, temp2);
190 assert(recv != noreg, "required register");
191 assert(method_temp == Rmethod, "required register for loading method");
193 //NOT_PRODUCT({ FlagSetting fs(TraceMethodHandles, true); trace_method_handle(_masm, "LZMH"); });
195 // Load the invoker, as MH -> MH.form -> LF.vmentry
196 __ verify_oop(recv);
197 __ load_heap_oop(method_temp, Address(recv, NONZERO(java_lang_invoke_MethodHandle::form_offset_in_bytes())));
198 __ verify_oop(method_temp);
199 __ load_heap_oop(method_temp, Address(method_temp, NONZERO(java_lang_invoke_LambdaForm::vmentry_offset_in_bytes())));
200 __ verify_oop(method_temp);
201 // the following assumes that a Method* is normally compressed in the vmtarget field:
202 // __ movptr(method_temp, Address(method_temp, NONZERO(java_lang_invoke_MemberName::vmtarget_offset_in_bytes())));
203 __ ld(method_temp, Address(method_temp, NONZERO(java_lang_invoke_MemberName::vmtarget_offset_in_bytes())));
205 if (VerifyMethodHandles && !for_compiler_entry) {
206 // make sure recv is already on stack
207 // __ movptr(temp2, Address(method_temp, Method::const_offset()));
208 __ ld(temp2, Address(method_temp, Method::const_offset()));
209 __ load_sized_value(temp2,
210 Address(temp2, ConstMethod::size_of_parameters_offset()),
211 sizeof(u2), false);
212 // assert(sizeof(u2) == sizeof(Method::_size_of_parameters), "");
213 Label L;
214 // __ cmpptr(recv, __ argument_address(temp2, -1));
215 // __ jcc(Assembler::equal, L);
216 Address recv_addr = __ argument_address(temp2, -1);
217 __ ld(AT, recv_addr);
218 __ beq(recv, AT, L);
219 __ nop();
221 // __ movptr(rax, __ argument_address(temp2, -1));
222 recv_addr = __ argument_address(temp2, -1);
223 __ ld(V0, recv_addr);
224 __ STOP("receiver not on stack");
225 __ BIND(L);
226 }
228 jump_from_method_handle(_masm, method_temp, temp2, for_compiler_entry);
229 BLOCK_COMMENT("} jump_to_lambda_form");
230 }
233 // Code generation
234 address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler* _masm,
235 vmIntrinsics::ID iid) {
236 const bool not_for_compiler_entry = false; // this is the interpreter entry
237 assert(is_signature_polymorphic(iid), "expected invoke iid");
238 if (iid == vmIntrinsics::_invokeGeneric ||
239 iid == vmIntrinsics::_compiledLambdaForm) {
240 // Perhaps surprisingly, the symbolic references visible to Java are not directly used.
241 // They are linked to Java-generated adapters via MethodHandleNatives.linkMethod.
242 // They all allow an appendix argument.
243 // __ hlt(); // empty stubs make SG sick
244 __ stop("empty stubs make SG sick");
245 return NULL;
246 }
248 // rsi/r13: sender SP (must preserve; see prepare_to_jump_from_interpreted)
249 // rbx: Method*
250 // rdx: argument locator (parameter slot count, added to rsp)
251 // rcx: used as temp to hold mh or receiver
252 // rax, rdi: garbage temps, blown away
253 Register rdx_argp = T9; // argument list ptr, live on error paths
254 // Register rax_temp = rax;
255 Register rcx_mh = S7; // MH receiver; dies quickly and is recycled
256 Register rbx_method = Rmethod; // eventual target of this invocation
258 // here's where control starts out:
259 __ align(CodeEntryAlignment);
260 address entry_point = __ pc();
262 if (VerifyMethodHandles) {
263 Label L;
264 BLOCK_COMMENT("verify_intrinsic_id {");
265 // __ cmpb(Address(rbx_method, Method::intrinsic_id_offset_in_bytes()), (int) iid);
266 // __ jcc(Assembler::equal, L);
267 __ lbu(AT, rbx_method, Method::intrinsic_id_offset_in_bytes());
268 guarantee(Assembler::is_simm16(iid), "Oops, iid is not simm16! Change the instructions.");
269 __ addiu(AT, AT, -1 * (int) iid);
270 __ beq(AT, R0, L);
271 __ nop();
272 if (iid == vmIntrinsics::_linkToVirtual ||
273 iid == vmIntrinsics::_linkToSpecial) {
274 // could do this for all kinds, but would explode assembly code size
275 trace_method_handle(_masm, "bad Method*::intrinsic_id");
276 }
277 __ STOP("bad Method*::intrinsic_id");
278 __ bind(L);
279 BLOCK_COMMENT("} verify_intrinsic_id");
280 }
282 // First task: Find out how big the argument list is.
283 Address rdx_first_arg_addr;
284 int ref_kind = signature_polymorphic_intrinsic_ref_kind(iid);
285 assert(ref_kind != 0 || iid == vmIntrinsics::_invokeBasic, "must be _invokeBasic or a linkTo intrinsic");
286 if (ref_kind == 0 || MethodHandles::ref_kind_has_receiver(ref_kind)) {
287 __ ld(rdx_argp, Address(rbx_method, Method::const_offset()));
288 __ load_sized_value(rdx_argp,
289 Address(rdx_argp, ConstMethod::size_of_parameters_offset()),
290 sizeof(u2), false);
291 // assert(sizeof(u2) == sizeof(Method::_size_of_parameters), "");
292 rdx_first_arg_addr = __ argument_address(rdx_argp, -1);
293 } else {
294 DEBUG_ONLY(rdx_argp = noreg);
295 }
297 if (!is_signature_polymorphic_static(iid)) {
298 __ ld(rcx_mh, rdx_first_arg_addr);
299 DEBUG_ONLY(rdx_argp = noreg);
300 }
302 // rdx_first_arg_addr is live!
304 trace_method_handle_interpreter_entry(_masm, iid);
306 if (iid == vmIntrinsics::_invokeBasic) {
307 generate_method_handle_dispatch(_masm, iid, rcx_mh, noreg, not_for_compiler_entry);
309 } else {
310 // Adjust argument list by popping the trailing MemberName argument.
311 Register rcx_recv = noreg;
312 if (MethodHandles::ref_kind_has_receiver(ref_kind)) {
313 // Load the receiver (not the MH; the actual MemberName's receiver) up from the interpreter stack.
314 __ ld(rcx_recv = T2, rdx_first_arg_addr);
315 }
316 DEBUG_ONLY(rdx_argp = noreg);
317 Register rbx_member = rbx_method; // MemberName ptr; incoming method ptr is dead now
318 // __ pop(AT); // return address
319 __ pop(rbx_member); // extract last argument
320 // __ push(AT); // re-push return address
321 generate_method_handle_dispatch(_masm, iid, rcx_recv, rbx_member, not_for_compiler_entry);
322 }
324 return entry_point;
325 }
327 void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
328 vmIntrinsics::ID iid,
329 Register receiver_reg,
330 Register member_reg,
331 bool for_compiler_entry) {
332 assert(is_signature_polymorphic(iid), "expected invoke iid");
333 Register rbx_method = Rmethod; // eventual target of this invocation
334 // temps used in this code are not used in *either* compiled or interpreted calling sequences
335 #ifdef _LP64
336 Register j_rarg0 = T0;
337 Register j_rarg1 = A0;
338 Register j_rarg2 = A1;
339 Register j_rarg3 = A2;
340 Register j_rarg4 = A3;
341 Register j_rarg5 = A4;
343 Register temp1 = T8;
344 Register temp2 = T9;
345 Register temp3 = V0;
346 if (for_compiler_entry) {
347 assert(receiver_reg == (iid == vmIntrinsics::_linkToStatic ? noreg : j_rarg0), "only valid assignment");
348 assert_different_registers(temp1, j_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4, j_rarg5);
349 assert_different_registers(temp2, j_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4, j_rarg5);
350 assert_different_registers(temp3, j_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4, j_rarg5);
351 }
352 #else
353 /*
354 Register temp1 = (for_compiler_entry ? rsi : rdx);
355 Register temp2 = rdi;
356 Register temp3 = rax;
357 if (for_compiler_entry) {
358 assert(receiver_reg == (iid == vmIntrinsics::_linkToStatic ? noreg : rcx), "only valid assignment");
359 assert_different_registers(temp1, rcx, rdx);
360 assert_different_registers(temp2, rcx, rdx);
361 assert_different_registers(temp3, rcx, rdx);
362 }
363 */
364 #endif
365 else {
366 assert_different_registers(temp1, temp2, temp3, saved_last_sp_register()); // don't trash lastSP
367 }
368 assert_different_registers(temp1, temp2, temp3, receiver_reg);
369 assert_different_registers(temp1, temp2, temp3, member_reg);
371 if (iid == vmIntrinsics::_invokeBasic) {
372 // indirect through MH.form.vmentry.vmtarget
373 jump_to_lambda_form(_masm, receiver_reg, rbx_method, temp1, for_compiler_entry);
375 } else {
376 // The method is a member invoker used by direct method handles.
377 if (VerifyMethodHandles) {
378 // make sure the trailing argument really is a MemberName (caller responsibility)
379 verify_klass(_masm, member_reg, SystemDictionary::WK_KLASS_ENUM_NAME(java_lang_invoke_MemberName),
380 "MemberName required for invokeVirtual etc.");
381 }
383 Address member_clazz( member_reg, NONZERO(java_lang_invoke_MemberName::clazz_offset_in_bytes()));
384 Address member_vmindex( member_reg, NONZERO(java_lang_invoke_MemberName::vmindex_offset_in_bytes()));
385 Address member_vmtarget( member_reg, NONZERO(java_lang_invoke_MemberName::vmtarget_offset_in_bytes()));
387 Register temp1_recv_klass = temp1;
388 if (iid != vmIntrinsics::_linkToStatic) {
389 __ verify_oop(receiver_reg);
390 if (iid == vmIntrinsics::_linkToSpecial) {
391 // Don't actually load the klass; just null-check the receiver.
392 __ null_check(receiver_reg);
393 } else {
394 // load receiver klass itself
395 __ null_check(receiver_reg, oopDesc::klass_offset_in_bytes());
396 __ load_klass(temp1_recv_klass, receiver_reg);
397 __ verify_klass_ptr(temp1_recv_klass);
398 }
399 BLOCK_COMMENT("check_receiver {");
400 // The receiver for the MemberName must be in receiver_reg.
401 // Check the receiver against the MemberName.clazz
402 if (VerifyMethodHandles && iid == vmIntrinsics::_linkToSpecial) {
403 // Did not load it above...
404 __ load_klass(temp1_recv_klass, receiver_reg);
405 __ verify_klass_ptr(temp1_recv_klass);
406 }
407 if (VerifyMethodHandles && iid != vmIntrinsics::_linkToInterface) {
408 Label L_ok;
409 Register temp2_defc = temp2;
410 __ load_heap_oop(temp2_defc, member_clazz);
411 load_klass_from_Class(_masm, temp2_defc);
412 __ verify_klass_ptr(temp2_defc);
413 __ check_klass_subtype(temp1_recv_klass, temp2_defc, temp3, L_ok);
414 // If we get here, the type check failed!
415 __ STOP("receiver class disagrees with MemberName.clazz");
416 __ bind(L_ok);
417 }
418 BLOCK_COMMENT("} check_receiver");
419 }
420 if (iid == vmIntrinsics::_linkToSpecial ||
421 iid == vmIntrinsics::_linkToStatic) {
422 DEBUG_ONLY(temp1_recv_klass = noreg); // these guys didn't load the recv_klass
423 }
425 // Live registers at this point:
426 // member_reg - MemberName that was the trailing argument
427 // temp1_recv_klass - klass of stacked receiver, if needed
428 // rsi/r13 - interpreter linkage (if interpreted)
429 // rcx, rdx, rsi, rdi, r8, r8 - compiler arguments (if compiled)
431 Label L_incompatible_class_change_error;
432 switch (iid) {
433 case vmIntrinsics::_linkToSpecial:
434 if (VerifyMethodHandles) {
435 verify_ref_kind(_masm, JVM_REF_invokeSpecial, member_reg, temp3);
436 }
437 __ ld(rbx_method, member_vmtarget);
438 break;
440 case vmIntrinsics::_linkToStatic:
441 if (VerifyMethodHandles) {
442 verify_ref_kind(_masm, JVM_REF_invokeStatic, member_reg, temp3);
443 }
444 __ ld(rbx_method, member_vmtarget);
445 break;
447 case vmIntrinsics::_linkToVirtual:
448 {
449 // same as TemplateTable::invokevirtual,
450 // minus the CP setup and profiling:
452 if (VerifyMethodHandles) {
453 verify_ref_kind(_masm, JVM_REF_invokeVirtual, member_reg, temp3);
454 }
456 // pick out the vtable index from the MemberName, and then we can discard it:
457 Register temp2_index = temp2;
458 __ ld(temp2_index, member_vmindex);
460 if (VerifyMethodHandles) {
461 Label L_index_ok;
462 // __ cmpl(temp2_index, 0);
463 // __ jcc(Assembler::greaterEqual, L_index_ok);
465 __ slt(AT, R0, temp2_index);
466 __ bne(AT, R0, L_index_ok);
467 __ nop();
468 __ STOP("no virtual index");
469 __ BIND(L_index_ok);
470 }
472 // Note: The verifier invariants allow us to ignore MemberName.clazz and vmtarget
473 // at this point. And VerifyMethodHandles has already checked clazz, if needed.
475 // get target Method* & entry point
476 __ lookup_virtual_method(temp1_recv_klass, temp2_index, rbx_method);
477 break;
478 }
480 case vmIntrinsics::_linkToInterface:
481 {
482 // same as TemplateTable::invokeinterface
483 // (minus the CP setup and profiling, with different argument motion)
484 if (VerifyMethodHandles) {
485 verify_ref_kind(_masm, JVM_REF_invokeInterface, member_reg, temp3);
486 }
488 Register temp3_intf = temp3;
489 __ load_heap_oop(temp3_intf, member_clazz);
490 load_klass_from_Class(_masm, temp3_intf);
491 __ verify_klass_ptr(temp3_intf);
493 Register rbx_index = rbx_method;
494 __ ld(rbx_index, member_vmindex);
495 if (VerifyMethodHandles) {
496 Label L;
497 // __ cmpl(rbx_index, 0);
498 // __ jcc(Assembler::greaterEqual, L);
499 __ slt(AT, R0, rbx_index);
500 __ bne(AT, R0, L);
501 __ nop();
502 __ STOP("invalid vtable index for MH.invokeInterface");
503 __ bind(L);
504 }
506 // given intf, index, and recv klass, dispatch to the implementation method
507 __ lookup_interface_method(temp1_recv_klass, temp3_intf,
508 // note: next two args must be the same:
509 rbx_index, rbx_method,
510 temp2,
511 L_incompatible_class_change_error);
512 break;
513 }
515 default:
516 fatal(err_msg_res("unexpected intrinsic %d: %s", iid, vmIntrinsics::name_at(iid)));
517 break;
518 }
520 // Live at this point:
521 // rbx_method
522 // rsi/r13 (if interpreted)
524 // After figuring out which concrete method to call, jump into it.
525 // Note that this works in the interpreter with no data motion.
526 // But the compiled version will require that rcx_recv be shifted out.
527 __ verify_method_ptr(rbx_method);
528 jump_from_method_handle(_masm, rbx_method, temp1, for_compiler_entry);
530 if (iid == vmIntrinsics::_linkToInterface) {
531 __ bind(L_incompatible_class_change_error);
532 // __ jump(RuntimeAddress(StubRoutines::throw_IncompatibleClassChangeError_entry()));
533 address icce_entry= StubRoutines::throw_IncompatibleClassChangeError_entry();
534 __ jmp(icce_entry, relocInfo::runtime_call_type);
535 __ nop();
536 }
537 }
538 }
540 #ifndef PRODUCT
541 void trace_method_handle_stub(const char* adaptername,
542 oop mh,
543 intptr_t* saved_regs,
544 intptr_t* entry_sp) {
545 // called as a leaf from native code: do not block the JVM!
546 bool has_mh = (strstr(adaptername, "/static") == NULL &&
547 strstr(adaptername, "linkTo") == NULL); // static linkers don't have MH
548 const char* mh_reg_name = has_mh ? "rcx_mh" : "rcx";
549 tty->print_cr("MH %s %s="PTR_FORMAT" sp="PTR_FORMAT,
550 adaptername, mh_reg_name,
551 (void *)mh, entry_sp);
553 if (Verbose) {
554 tty->print_cr("Registers:");
555 const int saved_regs_count = RegisterImpl::number_of_registers;
556 for (int i = 0; i < saved_regs_count; i++) {
557 Register r = as_Register(i);
558 // The registers are stored in reverse order on the stack (by pusha).
559 tty->print("%3s=" PTR_FORMAT, r->name(), saved_regs[((saved_regs_count - 1) - i)]);
560 if ((i + 1) % 4 == 0) {
561 tty->cr();
562 } else {
563 tty->print(", ");
564 }
565 }
566 tty->cr();
568 {
569 // dumping last frame with frame::describe
571 JavaThread* p = JavaThread::active();
573 ResourceMark rm;
574 PRESERVE_EXCEPTION_MARK; // may not be needed by safer and unexpensive here
575 FrameValues values;
577 // Note: We want to allow trace_method_handle from any call site.
578 // While trace_method_handle creates a frame, it may be entered
579 // without a PC on the stack top (e.g. not just after a call).
580 // Walking that frame could lead to failures due to that invalid PC.
581 // => carefully detect that frame when doing the stack walking
583 // Current C frame
584 frame cur_frame = os::current_frame();
586 // Robust search of trace_calling_frame (independant of inlining).
587 // Assumes saved_regs comes from a pusha in the trace_calling_frame.
588 assert(cur_frame.sp() < saved_regs, "registers not saved on stack ?");
589 frame trace_calling_frame = os::get_sender_for_C_frame(&cur_frame);
590 while (trace_calling_frame.fp() < saved_regs) {
591 trace_calling_frame = os::get_sender_for_C_frame(&trace_calling_frame);
592 }
594 // safely create a frame and call frame::describe
595 intptr_t *dump_sp = trace_calling_frame.sender_sp();
596 intptr_t *dump_fp = trace_calling_frame.link();
598 bool walkable = has_mh; // whether the traced frame shoud be walkable
600 if (walkable) {
601 // The previous definition of walkable may have to be refined
602 // if new call sites cause the next frame constructor to start
603 // failing. Alternatively, frame constructors could be
604 // modified to support the current or future non walkable
605 // frames (but this is more intrusive and is not considered as
606 // part of this RFE, which will instead use a simpler output).
607 frame dump_frame = frame(dump_sp, dump_fp);
608 dump_frame.describe(values, 1);
609 } else {
610 // Stack may not be walkable (invalid PC above FP):
611 // Add descriptions without building a Java frame to avoid issues
612 values.describe(-1, dump_fp, "fp for #1 <not parsed, cannot trust pc>");
613 values.describe(-1, dump_sp, "sp for #1");
614 }
615 values.describe(-1, entry_sp, "raw top of stack");
617 tty->print_cr("Stack layout:");
618 values.print(p);
619 }
620 if (has_mh && mh->is_oop()) {
621 mh->print();
622 if (java_lang_invoke_MethodHandle::is_instance(mh)) {
623 if (java_lang_invoke_MethodHandle::form_offset_in_bytes() != 0)
624 java_lang_invoke_MethodHandle::form(mh)->print();
625 }
626 }
627 }
628 }
630 // The stub wraps the arguments in a struct on the stack to avoid
631 // dealing with the different calling conventions for passing 6
632 // arguments.
633 struct MethodHandleStubArguments {
634 const char* adaptername;
635 oopDesc* mh;
636 intptr_t* saved_regs;
637 intptr_t* entry_sp;
638 };
639 void trace_method_handle_stub_wrapper(MethodHandleStubArguments* args) {
640 trace_method_handle_stub(args->adaptername,
641 args->mh,
642 args->saved_regs,
643 args->entry_sp);
644 }
646 void MethodHandles::trace_method_handle(MacroAssembler* _masm, const char* adaptername) {
647 /*
648 if (!TraceMethodHandles) return;
649 BLOCK_COMMENT("trace_method_handle {");
650 __ enter();
651 __ andptr(rsp, -16); // align stack if needed for FPU state
652 __ pusha();
653 __ mov(rbx, rsp); // for retreiving saved_regs
654 // Note: saved_regs must be in the entered frame for the
655 // robust stack walking implemented in trace_method_handle_stub.
657 // save FP result, valid at some call sites (adapter_opt_return_float, ...)
658 __ increment(rsp, -2 * wordSize);
659 if (UseSSE >= 2) {
660 __ movdbl(Address(rsp, 0), xmm0);
661 } else if (UseSSE == 1) {
662 __ movflt(Address(rsp, 0), xmm0);
663 } else {
664 __ fst_d(Address(rsp, 0));
665 }
667 // Incoming state:
668 // rcx: method handle
669 //
670 // To avoid calling convention issues, build a record on the stack
671 // and pass the pointer to that instead.
672 __ push(rbp); // entry_sp (with extra align space)
673 __ push(rbx); // pusha saved_regs
674 __ push(rcx); // mh
675 __ push(rcx); // slot for adaptername
676 __ movptr(Address(rsp, 0), (intptr_t) adaptername);
677 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, trace_method_handle_stub_wrapper), rsp);
678 __ increment(rsp, sizeof(MethodHandleStubArguments));
680 if (UseSSE >= 2) {
681 __ movdbl(xmm0, Address(rsp, 0));
682 } else if (UseSSE == 1) {
683 __ movflt(xmm0, Address(rsp, 0));
684 } else {
685 __ fld_d(Address(rsp, 0));
686 }
687 __ increment(rsp, 2 * wordSize);
689 __ popa();
690 __ leave();
691 BLOCK_COMMENT("} trace_method_handle");
692 */
693 }
694 #endif //PRODUCT