Thu, 15 Aug 2013 20:04:10 -0400
8003424: Enable Class Data Sharing for CompressedOops
8016729: ObjectAlignmentInBytes=16 now forces the use of heap based compressed oops
8005933: The -Xshare:auto option is ignored for -server
Summary: Move klass metaspace above the heap and support CDS with compressed klass ptrs.
Reviewed-by: coleenp, kvn, mgerdin, tschatzl, stefank
1 /*
2 * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "asm/macroAssembler.hpp"
27 #include "interpreter/interpreter.hpp"
28 #include "interpreter/interpreterRuntime.hpp"
29 #include "memory/allocation.inline.hpp"
30 #include "prims/methodHandles.hpp"
32 #define __ _masm->
34 #ifdef PRODUCT
35 #define BLOCK_COMMENT(str) /* nothing */
36 #define STOP(error) stop(error)
37 #else
38 #define BLOCK_COMMENT(str) __ block_comment(str)
39 #define STOP(error) block_comment(error); __ stop(error)
40 #endif
42 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
44 void MethodHandles::load_klass_from_Class(MacroAssembler* _masm, Register klass_reg) {
45 if (VerifyMethodHandles)
46 verify_klass(_masm, klass_reg, SystemDictionary::WK_KLASS_ENUM_NAME(java_lang_Class),
47 "MH argument is a Class");
48 __ movptr(klass_reg, Address(klass_reg, java_lang_Class::klass_offset_in_bytes()));
49 }
51 #ifdef ASSERT
52 static int check_nonzero(const char* xname, int x) {
53 assert(x != 0, err_msg("%s should be nonzero", xname));
54 return x;
55 }
56 #define NONZERO(x) check_nonzero(#x, x)
57 #else //ASSERT
58 #define NONZERO(x) (x)
59 #endif //ASSERT
61 #ifdef ASSERT
62 void MethodHandles::verify_klass(MacroAssembler* _masm,
63 Register obj, SystemDictionary::WKID klass_id,
64 const char* error_message) {
65 Klass** klass_addr = SystemDictionary::well_known_klass_addr(klass_id);
66 KlassHandle klass = SystemDictionary::well_known_klass(klass_id);
67 Register temp = rdi;
68 Register temp2 = noreg;
69 LP64_ONLY(temp2 = rscratch1); // used by MacroAssembler::cmpptr
70 Label L_ok, L_bad;
71 BLOCK_COMMENT("verify_klass {");
72 __ verify_oop(obj);
73 __ testptr(obj, obj);
74 __ jcc(Assembler::zero, L_bad);
75 __ push(temp); if (temp2 != noreg) __ push(temp2);
76 #define UNPUSH { if (temp2 != noreg) __ pop(temp2); __ pop(temp); }
77 __ load_klass(temp, obj);
78 __ cmpptr(temp, ExternalAddress((address) klass_addr));
79 __ jcc(Assembler::equal, L_ok);
80 intptr_t super_check_offset = klass->super_check_offset();
81 __ movptr(temp, Address(temp, super_check_offset));
82 __ cmpptr(temp, ExternalAddress((address) klass_addr));
83 __ jcc(Assembler::equal, L_ok);
84 UNPUSH;
85 __ bind(L_bad);
86 __ STOP(error_message);
87 __ BIND(L_ok);
88 UNPUSH;
89 BLOCK_COMMENT("} verify_klass");
90 }
92 void MethodHandles::verify_ref_kind(MacroAssembler* _masm, int ref_kind, Register member_reg, Register temp) {
93 Label L;
94 BLOCK_COMMENT("verify_ref_kind {");
95 __ movl(temp, Address(member_reg, NONZERO(java_lang_invoke_MemberName::flags_offset_in_bytes())));
96 __ shrl(temp, java_lang_invoke_MemberName::MN_REFERENCE_KIND_SHIFT);
97 __ andl(temp, java_lang_invoke_MemberName::MN_REFERENCE_KIND_MASK);
98 __ cmpl(temp, ref_kind);
99 __ jcc(Assembler::equal, L);
100 { char* buf = NEW_C_HEAP_ARRAY(char, 100, mtInternal);
101 jio_snprintf(buf, 100, "verify_ref_kind expected %x", ref_kind);
102 if (ref_kind == JVM_REF_invokeVirtual ||
103 ref_kind == JVM_REF_invokeSpecial)
104 // could do this for all ref_kinds, but would explode assembly code size
105 trace_method_handle(_masm, buf);
106 __ STOP(buf);
107 }
108 BLOCK_COMMENT("} verify_ref_kind");
109 __ bind(L);
110 }
112 #endif //ASSERT
114 void MethodHandles::jump_from_method_handle(MacroAssembler* _masm, Register method, Register temp,
115 bool for_compiler_entry) {
116 assert(method == rbx, "interpreter calling convention");
117 __ verify_method_ptr(method);
119 if (!for_compiler_entry && JvmtiExport::can_post_interpreter_events()) {
120 Label run_compiled_code;
121 // JVMTI events, such as single-stepping, are implemented partly by avoiding running
122 // compiled code in threads for which the event is enabled. Check here for
123 // interp_only_mode if these events CAN be enabled.
124 #ifdef _LP64
125 Register rthread = r15_thread;
126 #else
127 Register rthread = temp;
128 __ get_thread(rthread);
129 #endif
130 // interp_only is an int, on little endian it is sufficient to test the byte only
131 // Is a cmpl faster?
132 __ cmpb(Address(rthread, JavaThread::interp_only_mode_offset()), 0);
133 __ jccb(Assembler::zero, run_compiled_code);
134 __ jmp(Address(method, Method::interpreter_entry_offset()));
135 __ BIND(run_compiled_code);
136 }
138 const ByteSize entry_offset = for_compiler_entry ? Method::from_compiled_offset() :
139 Method::from_interpreted_offset();
140 __ jmp(Address(method, entry_offset));
141 }
143 void MethodHandles::jump_to_lambda_form(MacroAssembler* _masm,
144 Register recv, Register method_temp,
145 Register temp2,
146 bool for_compiler_entry) {
147 BLOCK_COMMENT("jump_to_lambda_form {");
148 // This is the initial entry point of a lazy method handle.
149 // After type checking, it picks up the invoker from the LambdaForm.
150 assert_different_registers(recv, method_temp, temp2);
151 assert(recv != noreg, "required register");
152 assert(method_temp == rbx, "required register for loading method");
154 //NOT_PRODUCT({ FlagSetting fs(TraceMethodHandles, true); trace_method_handle(_masm, "LZMH"); });
156 // Load the invoker, as MH -> MH.form -> LF.vmentry
157 __ verify_oop(recv);
158 __ load_heap_oop(method_temp, Address(recv, NONZERO(java_lang_invoke_MethodHandle::form_offset_in_bytes())));
159 __ verify_oop(method_temp);
160 __ load_heap_oop(method_temp, Address(method_temp, NONZERO(java_lang_invoke_LambdaForm::vmentry_offset_in_bytes())));
161 __ verify_oop(method_temp);
162 // the following assumes that a Method* is normally compressed in the vmtarget field:
163 __ movptr(method_temp, Address(method_temp, NONZERO(java_lang_invoke_MemberName::vmtarget_offset_in_bytes())));
165 if (VerifyMethodHandles && !for_compiler_entry) {
166 // make sure recv is already on stack
167 __ movptr(temp2, Address(method_temp, Method::const_offset()));
168 __ load_sized_value(temp2,
169 Address(temp2, ConstMethod::size_of_parameters_offset()),
170 sizeof(u2), /*is_signed*/ false);
171 // assert(sizeof(u2) == sizeof(Method::_size_of_parameters), "");
172 Label L;
173 __ cmpptr(recv, __ argument_address(temp2, -1));
174 __ jcc(Assembler::equal, L);
175 __ movptr(rax, __ argument_address(temp2, -1));
176 __ STOP("receiver not on stack");
177 __ BIND(L);
178 }
180 jump_from_method_handle(_masm, method_temp, temp2, for_compiler_entry);
181 BLOCK_COMMENT("} jump_to_lambda_form");
182 }
185 // Code generation
186 address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler* _masm,
187 vmIntrinsics::ID iid) {
188 const bool not_for_compiler_entry = false; // this is the interpreter entry
189 assert(is_signature_polymorphic(iid), "expected invoke iid");
190 if (iid == vmIntrinsics::_invokeGeneric ||
191 iid == vmIntrinsics::_compiledLambdaForm) {
192 // Perhaps surprisingly, the symbolic references visible to Java are not directly used.
193 // They are linked to Java-generated adapters via MethodHandleNatives.linkMethod.
194 // They all allow an appendix argument.
195 __ hlt(); // empty stubs make SG sick
196 return NULL;
197 }
199 // rsi/r13: sender SP (must preserve; see prepare_to_jump_from_interpreted)
200 // rbx: Method*
201 // rdx: argument locator (parameter slot count, added to rsp)
202 // rcx: used as temp to hold mh or receiver
203 // rax, rdi: garbage temps, blown away
204 Register rdx_argp = rdx; // argument list ptr, live on error paths
205 Register rax_temp = rax;
206 Register rcx_mh = rcx; // MH receiver; dies quickly and is recycled
207 Register rbx_method = rbx; // eventual target of this invocation
209 // here's where control starts out:
210 __ align(CodeEntryAlignment);
211 address entry_point = __ pc();
213 if (VerifyMethodHandles) {
214 Label L;
215 BLOCK_COMMENT("verify_intrinsic_id {");
216 __ cmpb(Address(rbx_method, Method::intrinsic_id_offset_in_bytes()), (int) iid);
217 __ jcc(Assembler::equal, L);
218 if (iid == vmIntrinsics::_linkToVirtual ||
219 iid == vmIntrinsics::_linkToSpecial) {
220 // could do this for all kinds, but would explode assembly code size
221 trace_method_handle(_masm, "bad Method*::intrinsic_id");
222 }
223 __ STOP("bad Method*::intrinsic_id");
224 __ bind(L);
225 BLOCK_COMMENT("} verify_intrinsic_id");
226 }
228 // First task: Find out how big the argument list is.
229 Address rdx_first_arg_addr;
230 int ref_kind = signature_polymorphic_intrinsic_ref_kind(iid);
231 assert(ref_kind != 0 || iid == vmIntrinsics::_invokeBasic, "must be _invokeBasic or a linkTo intrinsic");
232 if (ref_kind == 0 || MethodHandles::ref_kind_has_receiver(ref_kind)) {
233 __ movptr(rdx_argp, Address(rbx_method, Method::const_offset()));
234 __ load_sized_value(rdx_argp,
235 Address(rdx_argp, ConstMethod::size_of_parameters_offset()),
236 sizeof(u2), /*is_signed*/ false);
237 // assert(sizeof(u2) == sizeof(Method::_size_of_parameters), "");
238 rdx_first_arg_addr = __ argument_address(rdx_argp, -1);
239 } else {
240 DEBUG_ONLY(rdx_argp = noreg);
241 }
243 if (!is_signature_polymorphic_static(iid)) {
244 __ movptr(rcx_mh, rdx_first_arg_addr);
245 DEBUG_ONLY(rdx_argp = noreg);
246 }
248 // rdx_first_arg_addr is live!
250 trace_method_handle_interpreter_entry(_masm, iid);
252 if (iid == vmIntrinsics::_invokeBasic) {
253 generate_method_handle_dispatch(_masm, iid, rcx_mh, noreg, not_for_compiler_entry);
255 } else {
256 // Adjust argument list by popping the trailing MemberName argument.
257 Register rcx_recv = noreg;
258 if (MethodHandles::ref_kind_has_receiver(ref_kind)) {
259 // Load the receiver (not the MH; the actual MemberName's receiver) up from the interpreter stack.
260 __ movptr(rcx_recv = rcx, rdx_first_arg_addr);
261 }
262 DEBUG_ONLY(rdx_argp = noreg);
263 Register rbx_member = rbx_method; // MemberName ptr; incoming method ptr is dead now
264 __ pop(rax_temp); // return address
265 __ pop(rbx_member); // extract last argument
266 __ push(rax_temp); // re-push return address
267 generate_method_handle_dispatch(_masm, iid, rcx_recv, rbx_member, not_for_compiler_entry);
268 }
270 return entry_point;
271 }
273 void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
274 vmIntrinsics::ID iid,
275 Register receiver_reg,
276 Register member_reg,
277 bool for_compiler_entry) {
278 assert(is_signature_polymorphic(iid), "expected invoke iid");
279 Register rbx_method = rbx; // eventual target of this invocation
280 // temps used in this code are not used in *either* compiled or interpreted calling sequences
281 #ifdef _LP64
282 Register temp1 = rscratch1;
283 Register temp2 = rscratch2;
284 Register temp3 = rax;
285 if (for_compiler_entry) {
286 assert(receiver_reg == (iid == vmIntrinsics::_linkToStatic ? noreg : j_rarg0), "only valid assignment");
287 assert_different_registers(temp1, j_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4, j_rarg5);
288 assert_different_registers(temp2, j_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4, j_rarg5);
289 assert_different_registers(temp3, j_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4, j_rarg5);
290 }
291 #else
292 Register temp1 = (for_compiler_entry ? rsi : rdx);
293 Register temp2 = rdi;
294 Register temp3 = rax;
295 if (for_compiler_entry) {
296 assert(receiver_reg == (iid == vmIntrinsics::_linkToStatic ? noreg : rcx), "only valid assignment");
297 assert_different_registers(temp1, rcx, rdx);
298 assert_different_registers(temp2, rcx, rdx);
299 assert_different_registers(temp3, rcx, rdx);
300 }
301 #endif
302 else {
303 assert_different_registers(temp1, temp2, temp3, saved_last_sp_register()); // don't trash lastSP
304 }
305 assert_different_registers(temp1, temp2, temp3, receiver_reg);
306 assert_different_registers(temp1, temp2, temp3, member_reg);
308 if (iid == vmIntrinsics::_invokeBasic) {
309 // indirect through MH.form.vmentry.vmtarget
310 jump_to_lambda_form(_masm, receiver_reg, rbx_method, temp1, for_compiler_entry);
312 } else {
313 // The method is a member invoker used by direct method handles.
314 if (VerifyMethodHandles) {
315 // make sure the trailing argument really is a MemberName (caller responsibility)
316 verify_klass(_masm, member_reg, SystemDictionary::WK_KLASS_ENUM_NAME(java_lang_invoke_MemberName),
317 "MemberName required for invokeVirtual etc.");
318 }
320 Address member_clazz( member_reg, NONZERO(java_lang_invoke_MemberName::clazz_offset_in_bytes()));
321 Address member_vmindex( member_reg, NONZERO(java_lang_invoke_MemberName::vmindex_offset_in_bytes()));
322 Address member_vmtarget( member_reg, NONZERO(java_lang_invoke_MemberName::vmtarget_offset_in_bytes()));
324 Register temp1_recv_klass = temp1;
325 if (iid != vmIntrinsics::_linkToStatic) {
326 __ verify_oop(receiver_reg);
327 if (iid == vmIntrinsics::_linkToSpecial) {
328 // Don't actually load the klass; just null-check the receiver.
329 __ null_check(receiver_reg);
330 } else {
331 // load receiver klass itself
332 __ null_check(receiver_reg, oopDesc::klass_offset_in_bytes());
333 __ load_klass(temp1_recv_klass, receiver_reg);
334 __ verify_klass_ptr(temp1_recv_klass);
335 }
336 BLOCK_COMMENT("check_receiver {");
337 // The receiver for the MemberName must be in receiver_reg.
338 // Check the receiver against the MemberName.clazz
339 if (VerifyMethodHandles && iid == vmIntrinsics::_linkToSpecial) {
340 // Did not load it above...
341 __ load_klass(temp1_recv_klass, receiver_reg);
342 __ verify_klass_ptr(temp1_recv_klass);
343 }
344 if (VerifyMethodHandles && iid != vmIntrinsics::_linkToInterface) {
345 Label L_ok;
346 Register temp2_defc = temp2;
347 __ load_heap_oop(temp2_defc, member_clazz);
348 load_klass_from_Class(_masm, temp2_defc);
349 __ verify_klass_ptr(temp2_defc);
350 __ check_klass_subtype(temp1_recv_klass, temp2_defc, temp3, L_ok);
351 // If we get here, the type check failed!
352 __ STOP("receiver class disagrees with MemberName.clazz");
353 __ bind(L_ok);
354 }
355 BLOCK_COMMENT("} check_receiver");
356 }
357 if (iid == vmIntrinsics::_linkToSpecial ||
358 iid == vmIntrinsics::_linkToStatic) {
359 DEBUG_ONLY(temp1_recv_klass = noreg); // these guys didn't load the recv_klass
360 }
362 // Live registers at this point:
363 // member_reg - MemberName that was the trailing argument
364 // temp1_recv_klass - klass of stacked receiver, if needed
365 // rsi/r13 - interpreter linkage (if interpreted)
366 // rcx, rdx, rsi, rdi, r8, r8 - compiler arguments (if compiled)
368 Label L_incompatible_class_change_error;
369 switch (iid) {
370 case vmIntrinsics::_linkToSpecial:
371 if (VerifyMethodHandles) {
372 verify_ref_kind(_masm, JVM_REF_invokeSpecial, member_reg, temp3);
373 }
374 __ movptr(rbx_method, member_vmtarget);
375 break;
377 case vmIntrinsics::_linkToStatic:
378 if (VerifyMethodHandles) {
379 verify_ref_kind(_masm, JVM_REF_invokeStatic, member_reg, temp3);
380 }
381 __ movptr(rbx_method, member_vmtarget);
382 break;
384 case vmIntrinsics::_linkToVirtual:
385 {
386 // same as TemplateTable::invokevirtual,
387 // minus the CP setup and profiling:
389 if (VerifyMethodHandles) {
390 verify_ref_kind(_masm, JVM_REF_invokeVirtual, member_reg, temp3);
391 }
393 // pick out the vtable index from the MemberName, and then we can discard it:
394 Register temp2_index = temp2;
395 __ movptr(temp2_index, member_vmindex);
397 if (VerifyMethodHandles) {
398 Label L_index_ok;
399 __ cmpl(temp2_index, 0);
400 __ jcc(Assembler::greaterEqual, L_index_ok);
401 __ STOP("no virtual index");
402 __ BIND(L_index_ok);
403 }
405 // Note: The verifier invariants allow us to ignore MemberName.clazz and vmtarget
406 // at this point. And VerifyMethodHandles has already checked clazz, if needed.
408 // get target Method* & entry point
409 __ lookup_virtual_method(temp1_recv_klass, temp2_index, rbx_method);
410 break;
411 }
413 case vmIntrinsics::_linkToInterface:
414 {
415 // same as TemplateTable::invokeinterface
416 // (minus the CP setup and profiling, with different argument motion)
417 if (VerifyMethodHandles) {
418 verify_ref_kind(_masm, JVM_REF_invokeInterface, member_reg, temp3);
419 }
421 Register temp3_intf = temp3;
422 __ load_heap_oop(temp3_intf, member_clazz);
423 load_klass_from_Class(_masm, temp3_intf);
424 __ verify_klass_ptr(temp3_intf);
426 Register rbx_index = rbx_method;
427 __ movptr(rbx_index, member_vmindex);
428 if (VerifyMethodHandles) {
429 Label L;
430 __ cmpl(rbx_index, 0);
431 __ jcc(Assembler::greaterEqual, L);
432 __ STOP("invalid vtable index for MH.invokeInterface");
433 __ bind(L);
434 }
436 // given intf, index, and recv klass, dispatch to the implementation method
437 __ lookup_interface_method(temp1_recv_klass, temp3_intf,
438 // note: next two args must be the same:
439 rbx_index, rbx_method,
440 temp2,
441 L_incompatible_class_change_error);
442 break;
443 }
445 default:
446 fatal(err_msg_res("unexpected intrinsic %d: %s", iid, vmIntrinsics::name_at(iid)));
447 break;
448 }
450 // Live at this point:
451 // rbx_method
452 // rsi/r13 (if interpreted)
454 // After figuring out which concrete method to call, jump into it.
455 // Note that this works in the interpreter with no data motion.
456 // But the compiled version will require that rcx_recv be shifted out.
457 __ verify_method_ptr(rbx_method);
458 jump_from_method_handle(_masm, rbx_method, temp1, for_compiler_entry);
460 if (iid == vmIntrinsics::_linkToInterface) {
461 __ bind(L_incompatible_class_change_error);
462 __ jump(RuntimeAddress(StubRoutines::throw_IncompatibleClassChangeError_entry()));
463 }
464 }
465 }
467 #ifndef PRODUCT
468 void trace_method_handle_stub(const char* adaptername,
469 oop mh,
470 intptr_t* saved_regs,
471 intptr_t* entry_sp) {
472 // called as a leaf from native code: do not block the JVM!
473 bool has_mh = (strstr(adaptername, "/static") == NULL &&
474 strstr(adaptername, "linkTo") == NULL); // static linkers don't have MH
475 const char* mh_reg_name = has_mh ? "rcx_mh" : "rcx";
476 tty->print_cr("MH %s %s="PTR_FORMAT" sp="PTR_FORMAT,
477 adaptername, mh_reg_name,
478 mh, entry_sp);
480 if (Verbose) {
481 tty->print_cr("Registers:");
482 const int saved_regs_count = RegisterImpl::number_of_registers;
483 for (int i = 0; i < saved_regs_count; i++) {
484 Register r = as_Register(i);
485 // The registers are stored in reverse order on the stack (by pusha).
486 tty->print("%3s=" PTR_FORMAT, r->name(), saved_regs[((saved_regs_count - 1) - i)]);
487 if ((i + 1) % 4 == 0) {
488 tty->cr();
489 } else {
490 tty->print(", ");
491 }
492 }
493 tty->cr();
495 {
496 // dumping last frame with frame::describe
498 JavaThread* p = JavaThread::active();
500 ResourceMark rm;
501 PRESERVE_EXCEPTION_MARK; // may not be needed by safer and unexpensive here
502 FrameValues values;
504 // Note: We want to allow trace_method_handle from any call site.
505 // While trace_method_handle creates a frame, it may be entered
506 // without a PC on the stack top (e.g. not just after a call).
507 // Walking that frame could lead to failures due to that invalid PC.
508 // => carefully detect that frame when doing the stack walking
510 // Current C frame
511 frame cur_frame = os::current_frame();
513 // Robust search of trace_calling_frame (independant of inlining).
514 // Assumes saved_regs comes from a pusha in the trace_calling_frame.
515 assert(cur_frame.sp() < saved_regs, "registers not saved on stack ?");
516 frame trace_calling_frame = os::get_sender_for_C_frame(&cur_frame);
517 while (trace_calling_frame.fp() < saved_regs) {
518 trace_calling_frame = os::get_sender_for_C_frame(&trace_calling_frame);
519 }
521 // safely create a frame and call frame::describe
522 intptr_t *dump_sp = trace_calling_frame.sender_sp();
523 intptr_t *dump_fp = trace_calling_frame.link();
525 bool walkable = has_mh; // whether the traced frame shoud be walkable
527 if (walkable) {
528 // The previous definition of walkable may have to be refined
529 // if new call sites cause the next frame constructor to start
530 // failing. Alternatively, frame constructors could be
531 // modified to support the current or future non walkable
532 // frames (but this is more intrusive and is not considered as
533 // part of this RFE, which will instead use a simpler output).
534 frame dump_frame = frame(dump_sp, dump_fp);
535 dump_frame.describe(values, 1);
536 } else {
537 // Stack may not be walkable (invalid PC above FP):
538 // Add descriptions without building a Java frame to avoid issues
539 values.describe(-1, dump_fp, "fp for #1 <not parsed, cannot trust pc>");
540 values.describe(-1, dump_sp, "sp for #1");
541 }
542 values.describe(-1, entry_sp, "raw top of stack");
544 tty->print_cr("Stack layout:");
545 values.print(p);
546 }
547 if (has_mh && mh->is_oop()) {
548 mh->print();
549 if (java_lang_invoke_MethodHandle::is_instance(mh)) {
550 if (java_lang_invoke_MethodHandle::form_offset_in_bytes() != 0)
551 java_lang_invoke_MethodHandle::form(mh)->print();
552 }
553 }
554 }
555 }
557 // The stub wraps the arguments in a struct on the stack to avoid
558 // dealing with the different calling conventions for passing 6
559 // arguments.
560 struct MethodHandleStubArguments {
561 const char* adaptername;
562 oopDesc* mh;
563 intptr_t* saved_regs;
564 intptr_t* entry_sp;
565 };
566 void trace_method_handle_stub_wrapper(MethodHandleStubArguments* args) {
567 trace_method_handle_stub(args->adaptername,
568 args->mh,
569 args->saved_regs,
570 args->entry_sp);
571 }
573 void MethodHandles::trace_method_handle(MacroAssembler* _masm, const char* adaptername) {
574 if (!TraceMethodHandles) return;
575 BLOCK_COMMENT("trace_method_handle {");
576 __ enter();
577 __ andptr(rsp, -16); // align stack if needed for FPU state
578 __ pusha();
579 __ mov(rbx, rsp); // for retreiving saved_regs
580 // Note: saved_regs must be in the entered frame for the
581 // robust stack walking implemented in trace_method_handle_stub.
583 // save FP result, valid at some call sites (adapter_opt_return_float, ...)
584 __ increment(rsp, -2 * wordSize);
585 if (UseSSE >= 2) {
586 __ movdbl(Address(rsp, 0), xmm0);
587 } else if (UseSSE == 1) {
588 __ movflt(Address(rsp, 0), xmm0);
589 } else {
590 __ fst_d(Address(rsp, 0));
591 }
593 // Incoming state:
594 // rcx: method handle
595 //
596 // To avoid calling convention issues, build a record on the stack
597 // and pass the pointer to that instead.
598 __ push(rbp); // entry_sp (with extra align space)
599 __ push(rbx); // pusha saved_regs
600 __ push(rcx); // mh
601 __ push(rcx); // slot for adaptername
602 __ movptr(Address(rsp, 0), (intptr_t) adaptername);
603 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, trace_method_handle_stub_wrapper), rsp);
604 __ increment(rsp, sizeof(MethodHandleStubArguments));
606 if (UseSSE >= 2) {
607 __ movdbl(xmm0, Address(rsp, 0));
608 } else if (UseSSE == 1) {
609 __ movflt(xmm0, Address(rsp, 0));
610 } else {
611 __ fld_d(Address(rsp, 0));
612 }
613 __ increment(rsp, 2 * wordSize);
615 __ popa();
616 __ leave();
617 BLOCK_COMMENT("} trace_method_handle");
618 }
619 #endif //PRODUCT