Mon, 26 Sep 2011 10:24:05 -0700
7081933: Use zeroing elimination optimization for large array
Summary: Don't zero new typeArray during runtime call if the allocation is followed by arraycopy into it.
Reviewed-by: twisti
1 /*
2 * Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "interpreter/interpreter.hpp"
27 #include "memory/allocation.inline.hpp"
28 #include "prims/methodHandles.hpp"
30 #define __ _masm->
32 #ifdef PRODUCT
33 #define BLOCK_COMMENT(str) /* nothing */
34 #else
35 #define BLOCK_COMMENT(str) __ block_comment(str)
36 #endif
38 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
40 address MethodHandleEntry::start_compiled_entry(MacroAssembler* _masm,
41 address interpreted_entry) {
42 // Just before the actual machine code entry point, allocate space
43 // for a MethodHandleEntry::Data record, so that we can manage everything
44 // from one base pointer.
45 __ align(wordSize);
46 address target = __ pc() + sizeof(Data);
47 while (__ pc() < target) {
48 __ nop();
49 __ align(wordSize);
50 }
52 MethodHandleEntry* me = (MethodHandleEntry*) __ pc();
53 me->set_end_address(__ pc()); // set a temporary end_address
54 me->set_from_interpreted_entry(interpreted_entry);
55 me->set_type_checking_entry(NULL);
57 return (address) me;
58 }
60 MethodHandleEntry* MethodHandleEntry::finish_compiled_entry(MacroAssembler* _masm,
61 address start_addr) {
62 MethodHandleEntry* me = (MethodHandleEntry*) start_addr;
63 assert(me->end_address() == start_addr, "valid ME");
65 // Fill in the real end_address:
66 __ align(wordSize);
67 me->set_end_address(__ pc());
69 return me;
70 }
72 // stack walking support
74 frame MethodHandles::ricochet_frame_sender(const frame& fr, RegisterMap *map) {
75 //RicochetFrame* f = RicochetFrame::from_frame(fr);
76 // Cf. is_interpreted_frame path of frame::sender
77 intptr_t* younger_sp = fr.sp();
78 intptr_t* sp = fr.sender_sp();
79 map->make_integer_regs_unsaved();
80 map->shift_window(sp, younger_sp);
81 bool this_frame_adjusted_stack = true; // I5_savedSP is live in this RF
82 return frame(sp, younger_sp, this_frame_adjusted_stack);
83 }
85 void MethodHandles::ricochet_frame_oops_do(const frame& fr, OopClosure* blk, const RegisterMap* reg_map) {
86 ResourceMark rm;
87 RicochetFrame* f = RicochetFrame::from_frame(fr);
89 // pick up the argument type descriptor:
90 Thread* thread = Thread::current();
91 Handle cookie(thread, f->compute_saved_args_layout(true, true));
93 // process fixed part
94 blk->do_oop((oop*)f->saved_target_addr());
95 blk->do_oop((oop*)f->saved_args_layout_addr());
97 // process variable arguments:
98 if (cookie.is_null()) return; // no arguments to describe
100 // the cookie is actually the invokeExact method for my target
101 // his argument signature is what I'm interested in
102 assert(cookie->is_method(), "");
103 methodHandle invoker(thread, methodOop(cookie()));
104 assert(invoker->name() == vmSymbols::invokeExact_name(), "must be this kind of method");
105 assert(!invoker->is_static(), "must have MH argument");
106 int slot_count = invoker->size_of_parameters();
107 assert(slot_count >= 1, "must include 'this'");
108 intptr_t* base = f->saved_args_base();
109 intptr_t* retval = NULL;
110 if (f->has_return_value_slot())
111 retval = f->return_value_slot_addr();
112 int slot_num = slot_count - 1;
113 intptr_t* loc = &base[slot_num];
114 //blk->do_oop((oop*) loc); // original target, which is irrelevant
115 int arg_num = 0;
116 for (SignatureStream ss(invoker->signature()); !ss.is_done(); ss.next()) {
117 if (ss.at_return_type()) continue;
118 BasicType ptype = ss.type();
119 if (ptype == T_ARRAY) ptype = T_OBJECT; // fold all refs to T_OBJECT
120 assert(ptype >= T_BOOLEAN && ptype <= T_OBJECT, "not array or void");
121 slot_num -= type2size[ptype];
122 loc = &base[slot_num];
123 bool is_oop = (ptype == T_OBJECT && loc != retval);
124 if (is_oop) blk->do_oop((oop*)loc);
125 arg_num += 1;
126 }
127 assert(slot_num == 0, "must have processed all the arguments");
128 }
130 // Ricochet Frames
131 const Register MethodHandles::RicochetFrame::L1_continuation = L1;
132 const Register MethodHandles::RicochetFrame::L2_saved_target = L2;
133 const Register MethodHandles::RicochetFrame::L3_saved_args_layout = L3;
134 const Register MethodHandles::RicochetFrame::L4_saved_args_base = L4; // cf. Gargs = G4
135 const Register MethodHandles::RicochetFrame::L5_conversion = L5;
136 #ifdef ASSERT
137 const Register MethodHandles::RicochetFrame::L0_magic_number_1 = L0;
138 #endif //ASSERT
140 oop MethodHandles::RicochetFrame::compute_saved_args_layout(bool read_cache, bool write_cache) {
141 if (read_cache) {
142 oop cookie = saved_args_layout();
143 if (cookie != NULL) return cookie;
144 }
145 oop target = saved_target();
146 oop mtype = java_lang_invoke_MethodHandle::type(target);
147 oop mtform = java_lang_invoke_MethodType::form(mtype);
148 oop cookie = java_lang_invoke_MethodTypeForm::vmlayout(mtform);
149 if (write_cache) {
150 (*saved_args_layout_addr()) = cookie;
151 }
152 return cookie;
153 }
155 void MethodHandles::RicochetFrame::generate_ricochet_blob(MacroAssembler* _masm,
156 // output params:
157 int* bounce_offset,
158 int* exception_offset,
159 int* frame_size_in_words) {
160 (*frame_size_in_words) = RicochetFrame::frame_size_in_bytes() / wordSize;
162 address start = __ pc();
164 #ifdef ASSERT
165 __ illtrap(0); __ illtrap(0); __ illtrap(0);
166 // here's a hint of something special:
167 __ set(MAGIC_NUMBER_1, G0);
168 __ set(MAGIC_NUMBER_2, G0);
169 #endif //ASSERT
170 __ illtrap(0); // not reached
172 // Return values are in registers.
173 // L1_continuation contains a cleanup continuation we must return
174 // to.
176 (*bounce_offset) = __ pc() - start;
177 BLOCK_COMMENT("ricochet_blob.bounce");
179 if (VerifyMethodHandles) RicochetFrame::verify_clean(_masm);
180 trace_method_handle(_masm, "ricochet_blob.bounce");
182 __ JMP(L1_continuation, 0);
183 __ delayed()->nop();
184 __ illtrap(0);
186 DEBUG_ONLY(__ set(MAGIC_NUMBER_2, G0));
188 (*exception_offset) = __ pc() - start;
189 BLOCK_COMMENT("ricochet_blob.exception");
191 // compare this to Interpreter::rethrow_exception_entry, which is parallel code
192 // for example, see TemplateInterpreterGenerator::generate_throw_exception
193 // Live registers in:
194 // Oexception (O0): exception
195 // Oissuing_pc (O1): return address/pc that threw exception (ignored, always equal to bounce addr)
196 __ verify_oop(Oexception);
198 // Take down the frame.
200 // Cf. InterpreterMacroAssembler::remove_activation.
201 leave_ricochet_frame(_masm, /*recv_reg=*/ noreg, I5_savedSP, I7);
203 // We are done with this activation frame; find out where to go next.
204 // The continuation point will be an exception handler, which expects
205 // the following registers set up:
206 //
207 // Oexception: exception
208 // Oissuing_pc: the local call that threw exception
209 // Other On: garbage
210 // In/Ln: the contents of the caller's register window
211 //
212 // We do the required restore at the last possible moment, because we
213 // need to preserve some state across a runtime call.
214 // (Remember that the caller activation is unknown--it might not be
215 // interpreted, so things like Lscratch are useless in the caller.)
216 __ mov(Oexception, Oexception ->after_save()); // get exception in I0 so it will be on O0 after restore
217 __ add(I7, frame::pc_return_offset, Oissuing_pc->after_save()); // likewise set I1 to a value local to the caller
218 __ call_VM_leaf(L7_thread_cache,
219 CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address),
220 G2_thread, Oissuing_pc->after_save());
222 // The caller's SP was adjusted upon method entry to accomodate
223 // the callee's non-argument locals. Undo that adjustment.
224 __ JMP(O0, 0); // return exception handler in caller
225 __ delayed()->restore(I5_savedSP, G0, SP);
227 // (same old exception object is already in Oexception; see above)
228 // Note that an "issuing PC" is actually the next PC after the call
229 }
231 void MethodHandles::RicochetFrame::enter_ricochet_frame(MacroAssembler* _masm,
232 Register recv_reg,
233 Register argv_reg,
234 address return_handler) {
235 // does not include the __ save()
236 assert(argv_reg == Gargs, "");
237 Address G3_mh_vmtarget( recv_reg, java_lang_invoke_MethodHandle::vmtarget_offset_in_bytes());
238 Address G3_amh_conversion(recv_reg, java_lang_invoke_AdapterMethodHandle::conversion_offset_in_bytes());
240 // Create the RicochetFrame.
241 // Unlike on x86 we can store all required information in local
242 // registers.
243 BLOCK_COMMENT("push RicochetFrame {");
244 __ set(ExternalAddress(return_handler), L1_continuation);
245 __ load_heap_oop(G3_mh_vmtarget, L2_saved_target);
246 __ mov(G0, L3_saved_args_layout);
247 __ mov(Gargs, L4_saved_args_base);
248 __ lduw(G3_amh_conversion, L5_conversion); // 32-bit field
249 // I5, I6, I7 are already set up
250 DEBUG_ONLY(__ set((int32_t) MAGIC_NUMBER_1, L0_magic_number_1));
251 BLOCK_COMMENT("} RicochetFrame");
252 }
254 void MethodHandles::RicochetFrame::leave_ricochet_frame(MacroAssembler* _masm,
255 Register recv_reg,
256 Register new_sp_reg,
257 Register sender_pc_reg) {
258 assert(new_sp_reg == I5_savedSP, "exact_sender_sp already in place");
259 assert(sender_pc_reg == I7, "in a fixed place");
260 // does not include the __ ret() & __ restore()
261 assert_different_registers(recv_reg, new_sp_reg, sender_pc_reg);
262 // Take down the frame.
263 // Cf. InterpreterMacroAssembler::remove_activation.
264 BLOCK_COMMENT("end_ricochet_frame {");
265 if (recv_reg->is_valid())
266 __ mov(L2_saved_target, recv_reg);
267 BLOCK_COMMENT("} end_ricochet_frame");
268 }
270 // Emit code to verify that FP is pointing at a valid ricochet frame.
271 #ifdef ASSERT
272 enum {
273 ARG_LIMIT = 255, SLOP = 45,
274 // use this parameter for checking for garbage stack movements:
275 UNREASONABLE_STACK_MOVE = (ARG_LIMIT + SLOP)
276 // the slop defends against false alarms due to fencepost errors
277 };
279 void MethodHandles::RicochetFrame::verify_clean(MacroAssembler* _masm) {
280 // The stack should look like this:
281 // ... keep1 | dest=42 | keep2 | magic | handler | magic | recursive args | [RF]
282 // Check various invariants.
284 Register O7_temp = O7, O5_temp = O5;
286 Label L_ok_1, L_ok_2, L_ok_3, L_ok_4;
287 BLOCK_COMMENT("verify_clean {");
288 // Magic numbers must check out:
289 __ set((int32_t) MAGIC_NUMBER_1, O7_temp);
290 __ cmp_and_br_short(O7_temp, L0_magic_number_1, Assembler::equal, Assembler::pt, L_ok_1);
291 __ stop("damaged ricochet frame: MAGIC_NUMBER_1 not found");
293 __ BIND(L_ok_1);
295 // Arguments pointer must look reasonable:
296 #ifdef _LP64
297 Register FP_temp = O5_temp;
298 __ add(FP, STACK_BIAS, FP_temp);
299 #else
300 Register FP_temp = FP;
301 #endif
302 __ cmp_and_brx_short(L4_saved_args_base, FP_temp, Assembler::greaterEqualUnsigned, Assembler::pt, L_ok_2);
303 __ stop("damaged ricochet frame: L4 < FP");
305 __ BIND(L_ok_2);
306 // Disable until we decide on it's fate
307 // __ sub(L4_saved_args_base, UNREASONABLE_STACK_MOVE * Interpreter::stackElementSize, O7_temp);
308 // __ cmp(O7_temp, FP_temp);
309 // __ br(Assembler::lessEqualUnsigned, false, Assembler::pt, L_ok_3);
310 // __ delayed()->nop();
311 // __ stop("damaged ricochet frame: (L4 - UNREASONABLE_STACK_MOVE) > FP");
313 __ BIND(L_ok_3);
314 extract_conversion_dest_type(_masm, L5_conversion, O7_temp);
315 __ cmp_and_br_short(O7_temp, T_VOID, Assembler::equal, Assembler::pt, L_ok_4);
316 extract_conversion_vminfo(_masm, L5_conversion, O5_temp);
317 __ ld_ptr(L4_saved_args_base, __ argument_offset(O5_temp, O5_temp), O7_temp);
318 assert(__ is_simm13(RETURN_VALUE_PLACEHOLDER), "must be simm13");
319 __ cmp_and_brx_short(O7_temp, (int32_t) RETURN_VALUE_PLACEHOLDER, Assembler::equal, Assembler::pt, L_ok_4);
320 __ stop("damaged ricochet frame: RETURN_VALUE_PLACEHOLDER not found");
321 __ BIND(L_ok_4);
322 BLOCK_COMMENT("} verify_clean");
323 }
324 #endif //ASSERT
326 void MethodHandles::load_klass_from_Class(MacroAssembler* _masm, Register klass_reg, Register temp_reg, Register temp2_reg) {
327 if (VerifyMethodHandles)
328 verify_klass(_masm, klass_reg, SystemDictionaryHandles::Class_klass(), temp_reg, temp2_reg,
329 "AMH argument is a Class");
330 __ load_heap_oop(Address(klass_reg, java_lang_Class::klass_offset_in_bytes()), klass_reg);
331 }
333 void MethodHandles::load_conversion_vminfo(MacroAssembler* _masm, Address conversion_field_addr, Register reg) {
334 assert(CONV_VMINFO_SHIFT == 0, "preshifted");
335 assert(CONV_VMINFO_MASK == right_n_bits(BitsPerByte), "else change type of following load");
336 __ ldub(conversion_field_addr.plus_disp(BytesPerInt - 1), reg);
337 }
339 void MethodHandles::extract_conversion_vminfo(MacroAssembler* _masm, Register conversion_field_reg, Register reg) {
340 assert(CONV_VMINFO_SHIFT == 0, "preshifted");
341 __ and3(conversion_field_reg, CONV_VMINFO_MASK, reg);
342 }
344 void MethodHandles::extract_conversion_dest_type(MacroAssembler* _masm, Register conversion_field_reg, Register reg) {
345 __ srl(conversion_field_reg, CONV_DEST_TYPE_SHIFT, reg);
346 __ and3(reg, 0x0F, reg);
347 }
349 void MethodHandles::load_stack_move(MacroAssembler* _masm,
350 Address G3_amh_conversion,
351 Register stack_move_reg) {
352 BLOCK_COMMENT("load_stack_move {");
353 __ ldsw(G3_amh_conversion, stack_move_reg);
354 __ sra(stack_move_reg, CONV_STACK_MOVE_SHIFT, stack_move_reg);
355 if (VerifyMethodHandles) {
356 Label L_ok, L_bad;
357 int32_t stack_move_limit = 0x0800; // extra-large
358 __ cmp_and_br_short(stack_move_reg, stack_move_limit, Assembler::greaterEqual, Assembler::pn, L_bad);
359 __ cmp(stack_move_reg, -stack_move_limit);
360 __ br(Assembler::greater, false, Assembler::pt, L_ok);
361 __ delayed()->nop();
362 __ BIND(L_bad);
363 __ stop("load_stack_move of garbage value");
364 __ BIND(L_ok);
365 }
366 BLOCK_COMMENT("} load_stack_move");
367 }
369 #ifdef ASSERT
370 void MethodHandles::RicochetFrame::verify() const {
371 assert(magic_number_1() == MAGIC_NUMBER_1, "");
372 if (!Universe::heap()->is_gc_active()) {
373 if (saved_args_layout() != NULL) {
374 assert(saved_args_layout()->is_method(), "must be valid oop");
375 }
376 if (saved_target() != NULL) {
377 assert(java_lang_invoke_MethodHandle::is_instance(saved_target()), "checking frame value");
378 }
379 }
380 int conv_op = adapter_conversion_op(conversion());
381 assert(conv_op == java_lang_invoke_AdapterMethodHandle::OP_COLLECT_ARGS ||
382 conv_op == java_lang_invoke_AdapterMethodHandle::OP_FOLD_ARGS ||
383 conv_op == java_lang_invoke_AdapterMethodHandle::OP_PRIM_TO_REF,
384 "must be a sane conversion");
385 if (has_return_value_slot()) {
386 assert(*return_value_slot_addr() == RETURN_VALUE_PLACEHOLDER, "");
387 }
388 }
390 void MethodHandles::verify_argslot(MacroAssembler* _masm, Register argslot_reg, Register temp_reg, const char* error_message) {
391 // Verify that argslot lies within (Gargs, FP].
392 Label L_ok, L_bad;
393 BLOCK_COMMENT("verify_argslot {");
394 __ cmp_and_brx_short(Gargs, argslot_reg, Assembler::greaterUnsigned, Assembler::pn, L_bad);
395 __ add(FP, STACK_BIAS, temp_reg); // STACK_BIAS is zero on !_LP64
396 __ cmp_and_brx_short(argslot_reg, temp_reg, Assembler::lessEqualUnsigned, Assembler::pt, L_ok);
397 __ BIND(L_bad);
398 __ stop(error_message);
399 __ BIND(L_ok);
400 BLOCK_COMMENT("} verify_argslot");
401 }
403 void MethodHandles::verify_argslots(MacroAssembler* _masm,
404 RegisterOrConstant arg_slots,
405 Register arg_slot_base_reg,
406 Register temp_reg,
407 Register temp2_reg,
408 bool negate_argslots,
409 const char* error_message) {
410 // Verify that [argslot..argslot+size) lies within (Gargs, FP).
411 Label L_ok, L_bad;
412 BLOCK_COMMENT("verify_argslots {");
413 if (negate_argslots) {
414 if (arg_slots.is_constant()) {
415 arg_slots = -1 * arg_slots.as_constant();
416 } else {
417 __ neg(arg_slots.as_register(), temp_reg);
418 arg_slots = temp_reg;
419 }
420 }
421 __ add(arg_slot_base_reg, __ argument_offset(arg_slots, temp_reg), temp_reg);
422 __ add(FP, STACK_BIAS, temp2_reg); // STACK_BIAS is zero on !_LP64
423 __ cmp_and_brx_short(temp_reg, temp2_reg, Assembler::greaterUnsigned, Assembler::pn, L_bad);
424 // Gargs points to the first word so adjust by BytesPerWord
425 __ add(arg_slot_base_reg, BytesPerWord, temp_reg);
426 __ cmp_and_brx_short(Gargs, temp_reg, Assembler::lessEqualUnsigned, Assembler::pt, L_ok);
427 __ BIND(L_bad);
428 __ stop(error_message);
429 __ BIND(L_ok);
430 BLOCK_COMMENT("} verify_argslots");
431 }
433 // Make sure that arg_slots has the same sign as the given direction.
434 // If (and only if) arg_slots is a assembly-time constant, also allow it to be zero.
435 void MethodHandles::verify_stack_move(MacroAssembler* _masm,
436 RegisterOrConstant arg_slots, int direction) {
437 enum { UNREASONABLE_STACK_MOVE = 256 * 4 }; // limit of 255 arguments
438 bool allow_zero = arg_slots.is_constant();
439 if (direction == 0) { direction = +1; allow_zero = true; }
440 assert(stack_move_unit() == -1, "else add extra checks here");
441 if (arg_slots.is_register()) {
442 Label L_ok, L_bad;
443 BLOCK_COMMENT("verify_stack_move {");
444 // __ btst(-stack_move_unit() - 1, arg_slots.as_register()); // no need
445 // __ br(Assembler::notZero, false, Assembler::pn, L_bad);
446 // __ delayed()->nop();
447 __ cmp(arg_slots.as_register(), (int32_t) NULL_WORD);
448 if (direction > 0) {
449 __ br(allow_zero ? Assembler::less : Assembler::lessEqual, false, Assembler::pn, L_bad);
450 __ delayed()->nop();
451 __ cmp(arg_slots.as_register(), (int32_t) UNREASONABLE_STACK_MOVE);
452 __ br(Assembler::less, false, Assembler::pn, L_ok);
453 __ delayed()->nop();
454 } else {
455 __ br(allow_zero ? Assembler::greater : Assembler::greaterEqual, false, Assembler::pn, L_bad);
456 __ delayed()->nop();
457 __ cmp(arg_slots.as_register(), (int32_t) -UNREASONABLE_STACK_MOVE);
458 __ br(Assembler::greater, false, Assembler::pn, L_ok);
459 __ delayed()->nop();
460 }
461 __ BIND(L_bad);
462 if (direction > 0)
463 __ stop("assert arg_slots > 0");
464 else
465 __ stop("assert arg_slots < 0");
466 __ BIND(L_ok);
467 BLOCK_COMMENT("} verify_stack_move");
468 } else {
469 intptr_t size = arg_slots.as_constant();
470 if (direction < 0) size = -size;
471 assert(size >= 0, "correct direction of constant move");
472 assert(size < UNREASONABLE_STACK_MOVE, "reasonable size of constant move");
473 }
474 }
476 void MethodHandles::verify_klass(MacroAssembler* _masm,
477 Register obj_reg, KlassHandle klass,
478 Register temp_reg, Register temp2_reg,
479 const char* error_message) {
480 oop* klass_addr = klass.raw_value();
481 assert(klass_addr >= SystemDictionaryHandles::Object_klass().raw_value() &&
482 klass_addr <= SystemDictionaryHandles::Long_klass().raw_value(),
483 "must be one of the SystemDictionaryHandles");
484 Label L_ok, L_bad;
485 BLOCK_COMMENT("verify_klass {");
486 __ verify_oop(obj_reg);
487 __ br_null_short(obj_reg, Assembler::pn, L_bad);
488 __ load_klass(obj_reg, temp_reg);
489 __ set(ExternalAddress(klass_addr), temp2_reg);
490 __ ld_ptr(Address(temp2_reg, 0), temp2_reg);
491 __ cmp_and_brx_short(temp_reg, temp2_reg, Assembler::equal, Assembler::pt, L_ok);
492 intptr_t super_check_offset = klass->super_check_offset();
493 __ ld_ptr(Address(temp_reg, super_check_offset), temp_reg);
494 __ set(ExternalAddress(klass_addr), temp2_reg);
495 __ ld_ptr(Address(temp2_reg, 0), temp2_reg);
496 __ cmp_and_brx_short(temp_reg, temp2_reg, Assembler::equal, Assembler::pt, L_ok);
497 __ BIND(L_bad);
498 __ stop(error_message);
499 __ BIND(L_ok);
500 BLOCK_COMMENT("} verify_klass");
501 }
502 #endif // ASSERT
505 void MethodHandles::jump_from_method_handle(MacroAssembler* _masm, Register method, Register target, Register temp) {
506 assert(method == G5_method, "interpreter calling convention");
507 __ verify_oop(method);
508 __ ld_ptr(G5_method, in_bytes(methodOopDesc::from_interpreted_offset()), target);
509 if (JvmtiExport::can_post_interpreter_events()) {
510 // JVMTI events, such as single-stepping, are implemented partly by avoiding running
511 // compiled code in threads for which the event is enabled. Check here for
512 // interp_only_mode if these events CAN be enabled.
513 __ verify_thread();
514 Label skip_compiled_code;
516 const Address interp_only(G2_thread, JavaThread::interp_only_mode_offset());
517 __ ld(interp_only, temp);
518 __ tst(temp);
519 __ br(Assembler::notZero, true, Assembler::pn, skip_compiled_code);
520 __ delayed()->ld_ptr(G5_method, in_bytes(methodOopDesc::interpreter_entry_offset()), target);
521 __ bind(skip_compiled_code);
522 }
523 __ jmp(target, 0);
524 __ delayed()->nop();
525 }
528 // Code generation
529 address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler* _masm) {
530 // I5_savedSP/O5_savedSP: sender SP (must preserve)
531 // G4 (Gargs): incoming argument list (must preserve)
532 // G5_method: invoke methodOop
533 // G3_method_handle: receiver method handle (must load from sp[MethodTypeForm.vmslots])
534 // O0, O1, O2, O3, O4: garbage temps, blown away
535 Register O0_mtype = O0;
536 Register O1_scratch = O1;
537 Register O2_scratch = O2;
538 Register O3_scratch = O3;
539 Register O4_argslot = O4;
540 Register O4_argbase = O4;
542 // emit WrongMethodType path first, to enable back-branch from main path
543 Label wrong_method_type;
544 __ bind(wrong_method_type);
545 Label invoke_generic_slow_path;
546 assert(methodOopDesc::intrinsic_id_size_in_bytes() == sizeof(u1), "");;
547 __ ldub(Address(G5_method, methodOopDesc::intrinsic_id_offset_in_bytes()), O1_scratch);
548 __ cmp(O1_scratch, (int) vmIntrinsics::_invokeExact);
549 __ brx(Assembler::notEqual, false, Assembler::pt, invoke_generic_slow_path);
550 __ delayed()->nop();
551 __ mov(O0_mtype, G5_method_type); // required by throw_WrongMethodType
552 __ mov(G3_method_handle, G3_method_handle); // already in this register
553 // O0 will be filled in with JavaThread in stub
554 __ jump_to(AddressLiteral(StubRoutines::throw_WrongMethodTypeException_entry()), O3_scratch);
555 __ delayed()->nop();
557 // here's where control starts out:
558 __ align(CodeEntryAlignment);
559 address entry_point = __ pc();
561 // fetch the MethodType from the method handle
562 // FIXME: Interpreter should transmit pre-popped stack pointer, to locate base of arg list.
563 // This would simplify several touchy bits of code.
564 // See 6984712: JSR 292 method handle calls need a clean argument base pointer
565 {
566 Register tem = G5_method;
567 for (jint* pchase = methodOopDesc::method_type_offsets_chain(); (*pchase) != -1; pchase++) {
568 __ ld_ptr(Address(tem, *pchase), O0_mtype);
569 tem = O0_mtype; // in case there is another indirection
570 }
571 }
573 // given the MethodType, find out where the MH argument is buried
574 __ load_heap_oop(Address(O0_mtype, __ delayed_value(java_lang_invoke_MethodType::form_offset_in_bytes, O1_scratch)), O4_argslot);
575 __ ldsw( Address(O4_argslot, __ delayed_value(java_lang_invoke_MethodTypeForm::vmslots_offset_in_bytes, O1_scratch)), O4_argslot);
576 __ add(__ argument_address(O4_argslot, O4_argslot, 1), O4_argbase);
577 // Note: argument_address uses its input as a scratch register!
578 Address mh_receiver_slot_addr(O4_argbase, -Interpreter::stackElementSize);
579 __ ld_ptr(mh_receiver_slot_addr, G3_method_handle);
581 trace_method_handle(_masm, "invokeExact");
583 __ check_method_handle_type(O0_mtype, G3_method_handle, O1_scratch, wrong_method_type);
585 // Nobody uses the MH receiver slot after this. Make sure.
586 DEBUG_ONLY(__ set((int32_t) 0x999999, O1_scratch); __ st_ptr(O1_scratch, mh_receiver_slot_addr));
588 __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
590 // for invokeGeneric (only), apply argument and result conversions on the fly
591 __ bind(invoke_generic_slow_path);
592 #ifdef ASSERT
593 if (VerifyMethodHandles) {
594 Label L;
595 __ ldub(Address(G5_method, methodOopDesc::intrinsic_id_offset_in_bytes()), O1_scratch);
596 __ cmp(O1_scratch, (int) vmIntrinsics::_invokeGeneric);
597 __ brx(Assembler::equal, false, Assembler::pt, L);
598 __ delayed()->nop();
599 __ stop("bad methodOop::intrinsic_id");
600 __ bind(L);
601 }
602 #endif //ASSERT
604 // make room on the stack for another pointer:
605 insert_arg_slots(_masm, 2 * stack_move_unit(), O4_argbase, O1_scratch, O2_scratch, O3_scratch);
606 // load up an adapter from the calling type (Java weaves this)
607 Register O2_form = O2_scratch;
608 Register O3_adapter = O3_scratch;
609 __ load_heap_oop(Address(O0_mtype, __ delayed_value(java_lang_invoke_MethodType::form_offset_in_bytes, O1_scratch)), O2_form);
610 __ load_heap_oop(Address(O2_form, __ delayed_value(java_lang_invoke_MethodTypeForm::genericInvoker_offset_in_bytes, O1_scratch)), O3_adapter);
611 __ verify_oop(O3_adapter);
612 __ st_ptr(O3_adapter, Address(O4_argbase, 1 * Interpreter::stackElementSize));
613 // As a trusted first argument, pass the type being called, so the adapter knows
614 // the actual types of the arguments and return values.
615 // (Generic invokers are shared among form-families of method-type.)
616 __ st_ptr(O0_mtype, Address(O4_argbase, 0 * Interpreter::stackElementSize));
617 // FIXME: assert that O3_adapter is of the right method-type.
618 __ mov(O3_adapter, G3_method_handle);
619 trace_method_handle(_masm, "invokeGeneric");
620 __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
622 return entry_point;
623 }
625 // Workaround for C++ overloading nastiness on '0' for RegisterOrConstant.
626 static RegisterOrConstant constant(int value) {
627 return RegisterOrConstant(value);
628 }
630 static void load_vmargslot(MacroAssembler* _masm, Address vmargslot_addr, Register result) {
631 __ ldsw(vmargslot_addr, result);
632 }
634 static RegisterOrConstant adjust_SP_and_Gargs_down_by_slots(MacroAssembler* _masm,
635 RegisterOrConstant arg_slots,
636 Register temp_reg, Register temp2_reg) {
637 // Keep the stack pointer 2*wordSize aligned.
638 const int TwoWordAlignmentMask = right_n_bits(LogBytesPerWord + 1);
639 if (arg_slots.is_constant()) {
640 const int offset = arg_slots.as_constant() << LogBytesPerWord;
641 const int masked_offset = round_to(offset, 2 * BytesPerWord);
642 const int masked_offset2 = (offset + 1*BytesPerWord) & ~TwoWordAlignmentMask;
643 assert(masked_offset == masked_offset2, "must agree");
644 __ sub(Gargs, offset, Gargs);
645 __ sub(SP, masked_offset, SP );
646 return offset;
647 } else {
648 #ifdef ASSERT
649 {
650 Label L_ok;
651 __ cmp_and_br_short(arg_slots.as_register(), 0, Assembler::greaterEqual, Assembler::pt, L_ok);
652 __ stop("negative arg_slots");
653 __ bind(L_ok);
654 }
655 #endif
656 __ sll_ptr(arg_slots.as_register(), LogBytesPerWord, temp_reg);
657 __ add( temp_reg, 1*BytesPerWord, temp2_reg);
658 __ andn(temp2_reg, TwoWordAlignmentMask, temp2_reg);
659 __ sub(Gargs, temp_reg, Gargs);
660 __ sub(SP, temp2_reg, SP );
661 return temp_reg;
662 }
663 }
665 static RegisterOrConstant adjust_SP_and_Gargs_up_by_slots(MacroAssembler* _masm,
666 RegisterOrConstant arg_slots,
667 Register temp_reg, Register temp2_reg) {
668 // Keep the stack pointer 2*wordSize aligned.
669 const int TwoWordAlignmentMask = right_n_bits(LogBytesPerWord + 1);
670 if (arg_slots.is_constant()) {
671 const int offset = arg_slots.as_constant() << LogBytesPerWord;
672 const int masked_offset = offset & ~TwoWordAlignmentMask;
673 __ add(Gargs, offset, Gargs);
674 __ add(SP, masked_offset, SP );
675 return offset;
676 } else {
677 __ sll_ptr(arg_slots.as_register(), LogBytesPerWord, temp_reg);
678 __ andn(temp_reg, TwoWordAlignmentMask, temp2_reg);
679 __ add(Gargs, temp_reg, Gargs);
680 __ add(SP, temp2_reg, SP );
681 return temp_reg;
682 }
683 }
685 // Helper to insert argument slots into the stack.
686 // arg_slots must be a multiple of stack_move_unit() and < 0
687 // argslot_reg is decremented to point to the new (shifted) location of the argslot
688 // But, temp_reg ends up holding the original value of argslot_reg.
689 void MethodHandles::insert_arg_slots(MacroAssembler* _masm,
690 RegisterOrConstant arg_slots,
691 Register argslot_reg,
692 Register temp_reg, Register temp2_reg, Register temp3_reg) {
693 // allow constant zero
694 if (arg_slots.is_constant() && arg_slots.as_constant() == 0)
695 return;
697 assert_different_registers(argslot_reg, temp_reg, temp2_reg, temp3_reg,
698 (!arg_slots.is_register() ? Gargs : arg_slots.as_register()));
700 BLOCK_COMMENT("insert_arg_slots {");
701 if (VerifyMethodHandles)
702 verify_argslot(_masm, argslot_reg, temp_reg, "insertion point must fall within current frame");
703 if (VerifyMethodHandles)
704 verify_stack_move(_masm, arg_slots, -1);
706 // Make space on the stack for the inserted argument(s).
707 // Then pull down everything shallower than argslot_reg.
708 // The stacked return address gets pulled down with everything else.
709 // That is, copy [sp, argslot) downward by -size words. In pseudo-code:
710 // sp -= size;
711 // for (temp = sp + size; temp < argslot; temp++)
712 // temp[-size] = temp[0]
713 // argslot -= size;
715 // offset is temp3_reg in case of arg_slots being a register.
716 RegisterOrConstant offset = adjust_SP_and_Gargs_up_by_slots(_masm, arg_slots, temp3_reg, temp_reg);
717 __ sub(Gargs, offset, temp_reg); // source pointer for copy
719 {
720 Label loop;
721 __ BIND(loop);
722 // pull one word down each time through the loop
723 __ ld_ptr( Address(temp_reg, 0 ), temp2_reg);
724 __ st_ptr(temp2_reg, Address(temp_reg, offset) );
725 __ add(temp_reg, wordSize, temp_reg);
726 __ cmp_and_brx_short(temp_reg, argslot_reg, Assembler::lessUnsigned, Assembler::pt, loop);
727 }
729 // Now move the argslot down, to point to the opened-up space.
730 __ add(argslot_reg, offset, argslot_reg);
731 BLOCK_COMMENT("} insert_arg_slots");
732 }
735 // Helper to remove argument slots from the stack.
736 // arg_slots must be a multiple of stack_move_unit() and > 0
737 void MethodHandles::remove_arg_slots(MacroAssembler* _masm,
738 RegisterOrConstant arg_slots,
739 Register argslot_reg,
740 Register temp_reg, Register temp2_reg, Register temp3_reg) {
741 // allow constant zero
742 if (arg_slots.is_constant() && arg_slots.as_constant() == 0)
743 return;
744 assert_different_registers(argslot_reg, temp_reg, temp2_reg, temp3_reg,
745 (!arg_slots.is_register() ? Gargs : arg_slots.as_register()));
747 BLOCK_COMMENT("remove_arg_slots {");
748 if (VerifyMethodHandles)
749 verify_argslots(_masm, arg_slots, argslot_reg, temp_reg, temp2_reg, false,
750 "deleted argument(s) must fall within current frame");
751 if (VerifyMethodHandles)
752 verify_stack_move(_masm, arg_slots, +1);
754 // Pull up everything shallower than argslot.
755 // Then remove the excess space on the stack.
756 // The stacked return address gets pulled up with everything else.
757 // That is, copy [sp, argslot) upward by size words. In pseudo-code:
758 // for (temp = argslot-1; temp >= sp; --temp)
759 // temp[size] = temp[0]
760 // argslot += size;
761 // sp += size;
763 RegisterOrConstant offset = __ regcon_sll_ptr(arg_slots, LogBytesPerWord, temp3_reg);
764 __ sub(argslot_reg, wordSize, temp_reg); // source pointer for copy
766 {
767 Label L_loop;
768 __ BIND(L_loop);
769 // pull one word up each time through the loop
770 __ ld_ptr( Address(temp_reg, 0 ), temp2_reg);
771 __ st_ptr(temp2_reg, Address(temp_reg, offset) );
772 __ sub(temp_reg, wordSize, temp_reg);
773 __ cmp_and_brx_short(temp_reg, Gargs, Assembler::greaterEqualUnsigned, Assembler::pt, L_loop);
774 }
776 // And adjust the argslot address to point at the deletion point.
777 __ add(argslot_reg, offset, argslot_reg);
779 // We don't need the offset at this point anymore, just adjust SP and Gargs.
780 (void) adjust_SP_and_Gargs_up_by_slots(_masm, arg_slots, temp3_reg, temp_reg);
782 BLOCK_COMMENT("} remove_arg_slots");
783 }
785 // Helper to copy argument slots to the top of the stack.
786 // The sequence starts with argslot_reg and is counted by slot_count
787 // slot_count must be a multiple of stack_move_unit() and >= 0
788 // This function blows the temps but does not change argslot_reg.
789 void MethodHandles::push_arg_slots(MacroAssembler* _masm,
790 Register argslot_reg,
791 RegisterOrConstant slot_count,
792 Register temp_reg, Register temp2_reg) {
793 // allow constant zero
794 if (slot_count.is_constant() && slot_count.as_constant() == 0)
795 return;
796 assert_different_registers(argslot_reg, temp_reg, temp2_reg,
797 (!slot_count.is_register() ? Gargs : slot_count.as_register()),
798 SP);
799 assert(Interpreter::stackElementSize == wordSize, "else change this code");
801 BLOCK_COMMENT("push_arg_slots {");
802 if (VerifyMethodHandles)
803 verify_stack_move(_masm, slot_count, 0);
805 RegisterOrConstant offset = adjust_SP_and_Gargs_down_by_slots(_masm, slot_count, temp2_reg, temp_reg);
807 if (slot_count.is_constant()) {
808 for (int i = slot_count.as_constant() - 1; i >= 0; i--) {
809 __ ld_ptr( Address(argslot_reg, i * wordSize), temp_reg);
810 __ st_ptr(temp_reg, Address(Gargs, i * wordSize));
811 }
812 } else {
813 Label L_plural, L_loop, L_break;
814 // Emit code to dynamically check for the common cases, zero and one slot.
815 __ cmp(slot_count.as_register(), (int32_t) 1);
816 __ br(Assembler::greater, false, Assembler::pn, L_plural);
817 __ delayed()->nop();
818 __ br(Assembler::less, false, Assembler::pn, L_break);
819 __ delayed()->nop();
820 __ ld_ptr( Address(argslot_reg, 0), temp_reg);
821 __ st_ptr(temp_reg, Address(Gargs, 0));
822 __ ba_short(L_break);
823 __ BIND(L_plural);
825 // Loop for 2 or more:
826 // top = &argslot[slot_count]
827 // while (top > argslot) *(--Gargs) = *(--top)
828 Register top_reg = temp_reg;
829 __ add(argslot_reg, offset, top_reg);
830 __ add(Gargs, offset, Gargs ); // move back up again so we can go down
831 __ BIND(L_loop);
832 __ sub(top_reg, wordSize, top_reg);
833 __ sub(Gargs, wordSize, Gargs );
834 __ ld_ptr( Address(top_reg, 0), temp2_reg);
835 __ st_ptr(temp2_reg, Address(Gargs, 0));
836 __ cmp_and_brx_short(top_reg, argslot_reg, Assembler::greaterUnsigned, Assembler::pt, L_loop);
837 __ BIND(L_break);
838 }
839 BLOCK_COMMENT("} push_arg_slots");
840 }
842 // in-place movement; no change to Gargs
843 // blows temp_reg, temp2_reg
844 void MethodHandles::move_arg_slots_up(MacroAssembler* _masm,
845 Register bottom_reg, // invariant
846 Address top_addr, // can use temp_reg
847 RegisterOrConstant positive_distance_in_slots, // destroyed if register
848 Register temp_reg, Register temp2_reg) {
849 assert_different_registers(bottom_reg,
850 temp_reg, temp2_reg,
851 positive_distance_in_slots.register_or_noreg());
852 BLOCK_COMMENT("move_arg_slots_up {");
853 Label L_loop, L_break;
854 Register top_reg = temp_reg;
855 if (!top_addr.is_same_address(Address(top_reg, 0))) {
856 __ add(top_addr, top_reg);
857 }
858 // Detect empty (or broken) loop:
859 #ifdef ASSERT
860 if (VerifyMethodHandles) {
861 // Verify that &bottom < &top (non-empty interval)
862 Label L_ok, L_bad;
863 if (positive_distance_in_slots.is_register()) {
864 __ cmp(positive_distance_in_slots.as_register(), (int32_t) 0);
865 __ br(Assembler::lessEqual, false, Assembler::pn, L_bad);
866 __ delayed()->nop();
867 }
868 __ cmp_and_brx_short(bottom_reg, top_reg, Assembler::lessUnsigned, Assembler::pt, L_ok);
869 __ BIND(L_bad);
870 __ stop("valid bounds (copy up)");
871 __ BIND(L_ok);
872 }
873 #endif
874 __ cmp_and_brx_short(bottom_reg, top_reg, Assembler::greaterEqualUnsigned, Assembler::pn, L_break);
875 // work top down to bottom, copying contiguous data upwards
876 // In pseudo-code:
877 // while (--top >= bottom) *(top + distance) = *(top + 0);
878 RegisterOrConstant offset = __ argument_offset(positive_distance_in_slots, positive_distance_in_slots.register_or_noreg());
879 __ BIND(L_loop);
880 __ sub(top_reg, wordSize, top_reg);
881 __ ld_ptr( Address(top_reg, 0 ), temp2_reg);
882 __ st_ptr(temp2_reg, Address(top_reg, offset) );
883 __ cmp_and_brx_short(top_reg, bottom_reg, Assembler::greaterUnsigned, Assembler::pt, L_loop);
884 assert(Interpreter::stackElementSize == wordSize, "else change loop");
885 __ BIND(L_break);
886 BLOCK_COMMENT("} move_arg_slots_up");
887 }
889 // in-place movement; no change to rsp
890 // blows temp_reg, temp2_reg
891 void MethodHandles::move_arg_slots_down(MacroAssembler* _masm,
892 Address bottom_addr, // can use temp_reg
893 Register top_reg, // invariant
894 RegisterOrConstant negative_distance_in_slots, // destroyed if register
895 Register temp_reg, Register temp2_reg) {
896 assert_different_registers(top_reg,
897 negative_distance_in_slots.register_or_noreg(),
898 temp_reg, temp2_reg);
899 BLOCK_COMMENT("move_arg_slots_down {");
900 Label L_loop, L_break;
901 Register bottom_reg = temp_reg;
902 if (!bottom_addr.is_same_address(Address(bottom_reg, 0))) {
903 __ add(bottom_addr, bottom_reg);
904 }
905 // Detect empty (or broken) loop:
906 #ifdef ASSERT
907 assert(!negative_distance_in_slots.is_constant() || negative_distance_in_slots.as_constant() < 0, "");
908 if (VerifyMethodHandles) {
909 // Verify that &bottom < &top (non-empty interval)
910 Label L_ok, L_bad;
911 if (negative_distance_in_slots.is_register()) {
912 __ cmp(negative_distance_in_slots.as_register(), (int32_t) 0);
913 __ br(Assembler::greaterEqual, false, Assembler::pn, L_bad);
914 __ delayed()->nop();
915 }
916 __ cmp_and_brx_short(bottom_reg, top_reg, Assembler::lessUnsigned, Assembler::pt, L_ok);
917 __ BIND(L_bad);
918 __ stop("valid bounds (copy down)");
919 __ BIND(L_ok);
920 }
921 #endif
922 __ cmp_and_brx_short(bottom_reg, top_reg, Assembler::greaterEqualUnsigned, Assembler::pn, L_break);
923 // work bottom up to top, copying contiguous data downwards
924 // In pseudo-code:
925 // while (bottom < top) *(bottom - distance) = *(bottom + 0), bottom++;
926 RegisterOrConstant offset = __ argument_offset(negative_distance_in_slots, negative_distance_in_slots.register_or_noreg());
927 __ BIND(L_loop);
928 __ ld_ptr( Address(bottom_reg, 0 ), temp2_reg);
929 __ st_ptr(temp2_reg, Address(bottom_reg, offset) );
930 __ add(bottom_reg, wordSize, bottom_reg);
931 __ cmp_and_brx_short(bottom_reg, top_reg, Assembler::lessUnsigned, Assembler::pt, L_loop);
932 assert(Interpreter::stackElementSize == wordSize, "else change loop");
933 __ BIND(L_break);
934 BLOCK_COMMENT("} move_arg_slots_down");
935 }
937 // Copy from a field or array element to a stacked argument slot.
938 // is_element (ignored) says whether caller is loading an array element instead of an instance field.
939 void MethodHandles::move_typed_arg(MacroAssembler* _masm,
940 BasicType type, bool is_element,
941 Address value_src, Address slot_dest,
942 Register temp_reg) {
943 assert(!slot_dest.uses(temp_reg), "must be different register");
944 BLOCK_COMMENT(!is_element ? "move_typed_arg {" : "move_typed_arg { (array element)");
945 if (type == T_OBJECT || type == T_ARRAY) {
946 __ load_heap_oop(value_src, temp_reg);
947 __ verify_oop(temp_reg);
948 __ st_ptr(temp_reg, slot_dest);
949 } else if (type != T_VOID) {
950 int arg_size = type2aelembytes(type);
951 bool arg_is_signed = is_signed_subword_type(type);
952 int slot_size = is_subword_type(type) ? type2aelembytes(T_INT) : arg_size; // store int sub-words as int
953 __ load_sized_value( value_src, temp_reg, arg_size, arg_is_signed);
954 __ store_sized_value(temp_reg, slot_dest, slot_size );
955 }
956 BLOCK_COMMENT("} move_typed_arg");
957 }
959 // Cf. TemplateInterpreterGenerator::generate_return_entry_for and
960 // InterpreterMacroAssembler::save_return_value
961 void MethodHandles::move_return_value(MacroAssembler* _masm, BasicType type,
962 Address return_slot) {
963 BLOCK_COMMENT("move_return_value {");
964 // Look at the type and pull the value out of the corresponding register.
965 if (type == T_VOID) {
966 // nothing to do
967 } else if (type == T_OBJECT) {
968 __ verify_oop(O0);
969 __ st_ptr(O0, return_slot);
970 } else if (type == T_INT || is_subword_type(type)) {
971 int type_size = type2aelembytes(T_INT);
972 __ store_sized_value(O0, return_slot, type_size);
973 } else if (type == T_LONG) {
974 // store the value by parts
975 // Note: We assume longs are continguous (if misaligned) on the interpreter stack.
976 #if !defined(_LP64) && defined(COMPILER2)
977 __ stx(G1, return_slot);
978 #else
979 #ifdef _LP64
980 __ stx(O0, return_slot);
981 #else
982 if (return_slot.has_disp()) {
983 // The displacement is a constant
984 __ st(O0, return_slot);
985 __ st(O1, return_slot.plus_disp(Interpreter::stackElementSize));
986 } else {
987 __ std(O0, return_slot);
988 }
989 #endif
990 #endif
991 } else if (type == T_FLOAT) {
992 __ stf(FloatRegisterImpl::S, Ftos_f, return_slot);
993 } else if (type == T_DOUBLE) {
994 __ stf(FloatRegisterImpl::D, Ftos_f, return_slot);
995 } else {
996 ShouldNotReachHere();
997 }
998 BLOCK_COMMENT("} move_return_value");
999 }
1001 #ifndef PRODUCT
1002 extern "C" void print_method_handle(oop mh);
1003 void trace_method_handle_stub(const char* adaptername,
1004 oopDesc* mh,
1005 intptr_t* saved_sp) {
1006 bool has_mh = (strstr(adaptername, "return/") == NULL); // return adapters don't have mh
1007 tty->print_cr("MH %s mh="INTPTR_FORMAT " saved_sp=" INTPTR_FORMAT, adaptername, (intptr_t) mh, saved_sp);
1008 if (has_mh)
1009 print_method_handle(mh);
1010 }
1011 void MethodHandles::trace_method_handle(MacroAssembler* _masm, const char* adaptername) {
1012 if (!TraceMethodHandles) return;
1013 BLOCK_COMMENT("trace_method_handle {");
1014 // save: Gargs, O5_savedSP
1015 __ save_frame(16);
1016 __ set((intptr_t) adaptername, O0);
1017 __ mov(G3_method_handle, O1);
1018 __ mov(I5_savedSP, O2);
1019 __ mov(G3_method_handle, L3);
1020 __ mov(Gargs, L4);
1021 __ mov(G5_method_type, L5);
1022 __ call_VM_leaf(L7, CAST_FROM_FN_PTR(address, trace_method_handle_stub));
1024 __ mov(L3, G3_method_handle);
1025 __ mov(L4, Gargs);
1026 __ mov(L5, G5_method_type);
1027 __ restore();
1028 BLOCK_COMMENT("} trace_method_handle");
1029 }
1030 #endif // PRODUCT
1032 // which conversion op types are implemented here?
1033 int MethodHandles::adapter_conversion_ops_supported_mask() {
1034 return ((1<<java_lang_invoke_AdapterMethodHandle::OP_RETYPE_ONLY)
1035 |(1<<java_lang_invoke_AdapterMethodHandle::OP_RETYPE_RAW)
1036 |(1<<java_lang_invoke_AdapterMethodHandle::OP_CHECK_CAST)
1037 |(1<<java_lang_invoke_AdapterMethodHandle::OP_PRIM_TO_PRIM)
1038 |(1<<java_lang_invoke_AdapterMethodHandle::OP_REF_TO_PRIM)
1039 // OP_PRIM_TO_REF is below...
1040 |(1<<java_lang_invoke_AdapterMethodHandle::OP_SWAP_ARGS)
1041 |(1<<java_lang_invoke_AdapterMethodHandle::OP_ROT_ARGS)
1042 |(1<<java_lang_invoke_AdapterMethodHandle::OP_DUP_ARGS)
1043 |(1<<java_lang_invoke_AdapterMethodHandle::OP_DROP_ARGS)
1044 // OP_COLLECT_ARGS is below...
1045 |(1<<java_lang_invoke_AdapterMethodHandle::OP_SPREAD_ARGS)
1046 |(!UseRicochetFrames ? 0 :
1047 java_lang_invoke_MethodTypeForm::vmlayout_offset_in_bytes() <= 0 ? 0 :
1048 ((1<<java_lang_invoke_AdapterMethodHandle::OP_PRIM_TO_REF)
1049 |(1<<java_lang_invoke_AdapterMethodHandle::OP_COLLECT_ARGS)
1050 |(1<<java_lang_invoke_AdapterMethodHandle::OP_FOLD_ARGS)
1051 )
1052 )
1053 );
1054 }
1056 //------------------------------------------------------------------------------
1057 // MethodHandles::generate_method_handle_stub
1058 //
1059 // Generate an "entry" field for a method handle.
1060 // This determines how the method handle will respond to calls.
1061 void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHandles::EntryKind ek) {
1062 MethodHandles::EntryKind ek_orig = ek_original_kind(ek);
1064 // Here is the register state during an interpreted call,
1065 // as set up by generate_method_handle_interpreter_entry():
1066 // - G5: garbage temp (was MethodHandle.invoke methodOop, unused)
1067 // - G3: receiver method handle
1068 // - O5_savedSP: sender SP (must preserve)
1070 const Register O0_scratch = O0;
1071 const Register O1_scratch = O1;
1072 const Register O2_scratch = O2;
1073 const Register O3_scratch = O3;
1074 const Register O4_scratch = O4;
1075 const Register G5_scratch = G5;
1077 // Often used names:
1078 const Register O0_argslot = O0;
1080 // Argument registers for _raise_exception:
1081 const Register O0_code = O0;
1082 const Register O1_actual = O1;
1083 const Register O2_required = O2;
1085 guarantee(java_lang_invoke_MethodHandle::vmentry_offset_in_bytes() != 0, "must have offsets");
1087 // Some handy addresses:
1088 Address G3_mh_vmtarget( G3_method_handle, java_lang_invoke_MethodHandle::vmtarget_offset_in_bytes());
1090 Address G3_dmh_vmindex( G3_method_handle, java_lang_invoke_DirectMethodHandle::vmindex_offset_in_bytes());
1092 Address G3_bmh_vmargslot( G3_method_handle, java_lang_invoke_BoundMethodHandle::vmargslot_offset_in_bytes());
1093 Address G3_bmh_argument( G3_method_handle, java_lang_invoke_BoundMethodHandle::argument_offset_in_bytes());
1095 Address G3_amh_vmargslot( G3_method_handle, java_lang_invoke_AdapterMethodHandle::vmargslot_offset_in_bytes());
1096 Address G3_amh_argument ( G3_method_handle, java_lang_invoke_AdapterMethodHandle::argument_offset_in_bytes());
1097 Address G3_amh_conversion(G3_method_handle, java_lang_invoke_AdapterMethodHandle::conversion_offset_in_bytes());
1099 const int java_mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes();
1101 if (have_entry(ek)) {
1102 __ nop(); // empty stubs make SG sick
1103 return;
1104 }
1106 address interp_entry = __ pc();
1108 trace_method_handle(_masm, entry_name(ek));
1110 BLOCK_COMMENT(err_msg("Entry %s {", entry_name(ek)));
1112 switch ((int) ek) {
1113 case _raise_exception:
1114 {
1115 // Not a real MH entry, but rather shared code for raising an
1116 // exception. For sharing purposes the arguments are passed into registers
1117 // and then placed in the intepreter calling convention here.
1118 assert(raise_exception_method(), "must be set");
1119 assert(raise_exception_method()->from_compiled_entry(), "method must be linked");
1121 __ set(AddressLiteral((address) &_raise_exception_method), G5_method);
1122 __ ld_ptr(Address(G5_method, 0), G5_method);
1124 const int jobject_oop_offset = 0;
1125 __ ld_ptr(Address(G5_method, jobject_oop_offset), G5_method);
1127 adjust_SP_and_Gargs_down_by_slots(_masm, 3, noreg, noreg);
1129 __ st (O0_code, __ argument_address(constant(2), noreg, 0));
1130 __ st_ptr(O1_actual, __ argument_address(constant(1), noreg, 0));
1131 __ st_ptr(O2_required, __ argument_address(constant(0), noreg, 0));
1132 jump_from_method_handle(_masm, G5_method, O1_scratch, O2_scratch);
1133 }
1134 break;
1136 case _invokestatic_mh:
1137 case _invokespecial_mh:
1138 {
1139 __ load_heap_oop(G3_mh_vmtarget, G5_method); // target is a methodOop
1140 // Same as TemplateTable::invokestatic or invokespecial,
1141 // minus the CP setup and profiling:
1142 if (ek == _invokespecial_mh) {
1143 // Must load & check the first argument before entering the target method.
1144 __ load_method_handle_vmslots(O0_argslot, G3_method_handle, O1_scratch);
1145 __ ld_ptr(__ argument_address(O0_argslot, O0_argslot, -1), G3_method_handle);
1146 __ null_check(G3_method_handle);
1147 __ verify_oop(G3_method_handle);
1148 }
1149 jump_from_method_handle(_masm, G5_method, O1_scratch, O2_scratch);
1150 }
1151 break;
1153 case _invokevirtual_mh:
1154 {
1155 // Same as TemplateTable::invokevirtual,
1156 // minus the CP setup and profiling:
1158 // Pick out the vtable index and receiver offset from the MH,
1159 // and then we can discard it:
1160 Register O2_index = O2_scratch;
1161 __ load_method_handle_vmslots(O0_argslot, G3_method_handle, O1_scratch);
1162 __ ldsw(G3_dmh_vmindex, O2_index);
1163 // Note: The verifier allows us to ignore G3_mh_vmtarget.
1164 __ ld_ptr(__ argument_address(O0_argslot, O0_argslot, -1), G3_method_handle);
1165 __ null_check(G3_method_handle, oopDesc::klass_offset_in_bytes());
1167 // Get receiver klass:
1168 Register O0_klass = O0_argslot;
1169 __ load_klass(G3_method_handle, O0_klass);
1170 __ verify_oop(O0_klass);
1172 // Get target methodOop & entry point:
1173 const int base = instanceKlass::vtable_start_offset() * wordSize;
1174 assert(vtableEntry::size() * wordSize == wordSize, "adjust the scaling in the code below");
1176 __ sll_ptr(O2_index, LogBytesPerWord, O2_index);
1177 __ add(O0_klass, O2_index, O0_klass);
1178 Address vtable_entry_addr(O0_klass, base + vtableEntry::method_offset_in_bytes());
1179 __ ld_ptr(vtable_entry_addr, G5_method);
1181 jump_from_method_handle(_masm, G5_method, O1_scratch, O2_scratch);
1182 }
1183 break;
1185 case _invokeinterface_mh:
1186 {
1187 // Same as TemplateTable::invokeinterface,
1188 // minus the CP setup and profiling:
1189 __ load_method_handle_vmslots(O0_argslot, G3_method_handle, O1_scratch);
1190 Register O1_intf = O1_scratch;
1191 Register G5_index = G5_scratch;
1192 __ load_heap_oop(G3_mh_vmtarget, O1_intf);
1193 __ ldsw(G3_dmh_vmindex, G5_index);
1194 __ ld_ptr(__ argument_address(O0_argslot, O0_argslot, -1), G3_method_handle);
1195 __ null_check(G3_method_handle, oopDesc::klass_offset_in_bytes());
1197 // Get receiver klass:
1198 Register O0_klass = O0_argslot;
1199 __ load_klass(G3_method_handle, O0_klass);
1200 __ verify_oop(O0_klass);
1202 // Get interface:
1203 Label no_such_interface;
1204 __ verify_oop(O1_intf);
1205 __ lookup_interface_method(O0_klass, O1_intf,
1206 // Note: next two args must be the same:
1207 G5_index, G5_method,
1208 O2_scratch,
1209 O3_scratch,
1210 no_such_interface);
1212 jump_from_method_handle(_masm, G5_method, O1_scratch, O2_scratch);
1214 __ bind(no_such_interface);
1215 // Throw an exception.
1216 // For historical reasons, it will be IncompatibleClassChangeError.
1217 __ unimplemented("not tested yet");
1218 __ ld_ptr(Address(O1_intf, java_mirror_offset), O2_required); // required interface
1219 __ mov( O0_klass, O1_actual); // bad receiver
1220 __ jump_to(AddressLiteral(from_interpreted_entry(_raise_exception)), O3_scratch);
1221 __ delayed()->mov(Bytecodes::_invokeinterface, O0_code); // who is complaining?
1222 }
1223 break;
1225 case _bound_ref_mh:
1226 case _bound_int_mh:
1227 case _bound_long_mh:
1228 case _bound_ref_direct_mh:
1229 case _bound_int_direct_mh:
1230 case _bound_long_direct_mh:
1231 {
1232 const bool direct_to_method = (ek >= _bound_ref_direct_mh);
1233 BasicType arg_type = ek_bound_mh_arg_type(ek);
1234 int arg_slots = type2size[arg_type];
1236 // Make room for the new argument:
1237 load_vmargslot(_masm, G3_bmh_vmargslot, O0_argslot);
1238 __ add(__ argument_address(O0_argslot, O0_argslot), O0_argslot);
1240 insert_arg_slots(_masm, arg_slots * stack_move_unit(), O0_argslot, O1_scratch, O2_scratch, O3_scratch);
1242 // Store bound argument into the new stack slot:
1243 __ load_heap_oop(G3_bmh_argument, O1_scratch);
1244 if (arg_type == T_OBJECT) {
1245 __ st_ptr(O1_scratch, Address(O0_argslot, 0));
1246 } else {
1247 Address prim_value_addr(O1_scratch, java_lang_boxing_object::value_offset_in_bytes(arg_type));
1248 move_typed_arg(_masm, arg_type, false,
1249 prim_value_addr,
1250 Address(O0_argslot, 0),
1251 O2_scratch); // must be an even register for !_LP64 long moves (uses O2/O3)
1252 }
1254 if (direct_to_method) {
1255 __ load_heap_oop(G3_mh_vmtarget, G5_method); // target is a methodOop
1256 jump_from_method_handle(_masm, G5_method, O1_scratch, O2_scratch);
1257 } else {
1258 __ load_heap_oop(G3_mh_vmtarget, G3_method_handle); // target is a methodOop
1259 __ verify_oop(G3_method_handle);
1260 __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
1261 }
1262 }
1263 break;
1265 case _adapter_opt_profiling:
1266 if (java_lang_invoke_CountingMethodHandle::vmcount_offset_in_bytes() != 0) {
1267 Address G3_mh_vmcount(G3_method_handle, java_lang_invoke_CountingMethodHandle::vmcount_offset_in_bytes());
1268 __ ld(G3_mh_vmcount, O1_scratch);
1269 __ add(O1_scratch, 1, O1_scratch);
1270 __ st(O1_scratch, G3_mh_vmcount);
1271 }
1272 // fall through
1274 case _adapter_retype_only:
1275 case _adapter_retype_raw:
1276 // Immediately jump to the next MH layer:
1277 __ load_heap_oop(G3_mh_vmtarget, G3_method_handle);
1278 __ verify_oop(G3_method_handle);
1279 __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
1280 // This is OK when all parameter types widen.
1281 // It is also OK when a return type narrows.
1282 break;
1284 case _adapter_check_cast:
1285 {
1286 // Check a reference argument before jumping to the next layer of MH:
1287 load_vmargslot(_masm, G3_amh_vmargslot, O0_argslot);
1288 Address vmarg = __ argument_address(O0_argslot, O0_argslot);
1290 // What class are we casting to?
1291 Register O1_klass = O1_scratch; // Interesting AMH data.
1292 __ load_heap_oop(G3_amh_argument, O1_klass); // This is a Class object!
1293 load_klass_from_Class(_masm, O1_klass, O2_scratch, O3_scratch);
1295 Label L_done;
1296 __ ld_ptr(vmarg, O2_scratch);
1297 __ br_null_short(O2_scratch, Assembler::pn, L_done); // No cast if null.
1298 __ load_klass(O2_scratch, O2_scratch);
1300 // Live at this point:
1301 // - O0_argslot : argslot index in vmarg; may be required in the failing path
1302 // - O1_klass : klass required by the target method
1303 // - O2_scratch : argument klass to test
1304 // - G3_method_handle: adapter method handle
1305 __ check_klass_subtype(O2_scratch, O1_klass, O3_scratch, O4_scratch, L_done);
1307 // If we get here, the type check failed!
1308 __ load_heap_oop(G3_amh_argument, O2_required); // required class
1309 __ ld_ptr( vmarg, O1_actual); // bad object
1310 __ jump_to(AddressLiteral(from_interpreted_entry(_raise_exception)), O3_scratch);
1311 __ delayed()->mov(Bytecodes::_checkcast, O0_code); // who is complaining?
1313 __ BIND(L_done);
1314 // Get the new MH:
1315 __ load_heap_oop(G3_mh_vmtarget, G3_method_handle);
1316 __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
1317 }
1318 break;
1320 case _adapter_prim_to_prim:
1321 case _adapter_ref_to_prim:
1322 // Handled completely by optimized cases.
1323 __ stop("init_AdapterMethodHandle should not issue this");
1324 break;
1326 case _adapter_opt_i2i: // optimized subcase of adapt_prim_to_prim
1327 //case _adapter_opt_f2i: // optimized subcase of adapt_prim_to_prim
1328 case _adapter_opt_l2i: // optimized subcase of adapt_prim_to_prim
1329 case _adapter_opt_unboxi: // optimized subcase of adapt_ref_to_prim
1330 {
1331 // Perform an in-place conversion to int or an int subword.
1332 load_vmargslot(_masm, G3_amh_vmargslot, O0_argslot);
1333 Address value;
1334 Address vmarg;
1335 bool value_left_justified = false;
1337 switch (ek) {
1338 case _adapter_opt_i2i:
1339 value = vmarg = __ argument_address(O0_argslot, O0_argslot);
1340 break;
1341 case _adapter_opt_l2i:
1342 {
1343 // just delete the extra slot
1344 #ifdef _LP64
1345 // In V9, longs are given 2 64-bit slots in the interpreter, but the
1346 // data is passed in only 1 slot.
1347 // Keep the second slot.
1348 __ add(__ argument_address(O0_argslot, O0_argslot, -1), O0_argslot);
1349 remove_arg_slots(_masm, -stack_move_unit(), O0_argslot, O1_scratch, O2_scratch, O3_scratch);
1350 value = Address(O0_argslot, 4); // Get least-significant 32-bit of 64-bit value.
1351 vmarg = Address(O0_argslot, Interpreter::stackElementSize);
1352 #else
1353 // Keep the first slot.
1354 __ add(__ argument_address(O0_argslot, O0_argslot), O0_argslot);
1355 remove_arg_slots(_masm, -stack_move_unit(), O0_argslot, O1_scratch, O2_scratch, O3_scratch);
1356 value = Address(O0_argslot, 0);
1357 vmarg = value;
1358 #endif
1359 }
1360 break;
1361 case _adapter_opt_unboxi:
1362 {
1363 vmarg = __ argument_address(O0_argslot, O0_argslot);
1364 // Load the value up from the heap.
1365 __ ld_ptr(vmarg, O1_scratch);
1366 int value_offset = java_lang_boxing_object::value_offset_in_bytes(T_INT);
1367 #ifdef ASSERT
1368 for (int bt = T_BOOLEAN; bt < T_INT; bt++) {
1369 if (is_subword_type(BasicType(bt)))
1370 assert(value_offset == java_lang_boxing_object::value_offset_in_bytes(BasicType(bt)), "");
1371 }
1372 #endif
1373 __ null_check(O1_scratch, value_offset);
1374 value = Address(O1_scratch, value_offset);
1375 #ifdef _BIG_ENDIAN
1376 // Values stored in objects are packed.
1377 value_left_justified = true;
1378 #endif
1379 }
1380 break;
1381 default:
1382 ShouldNotReachHere();
1383 }
1385 // This check is required on _BIG_ENDIAN
1386 Register G5_vminfo = G5_scratch;
1387 __ ldsw(G3_amh_conversion, G5_vminfo);
1388 assert(CONV_VMINFO_SHIFT == 0, "preshifted");
1390 // Original 32-bit vmdata word must be of this form:
1391 // | MBZ:6 | signBitCount:8 | srcDstTypes:8 | conversionOp:8 |
1392 __ lduw(value, O1_scratch);
1393 if (!value_left_justified)
1394 __ sll(O1_scratch, G5_vminfo, O1_scratch);
1395 Label zero_extend, done;
1396 __ btst(CONV_VMINFO_SIGN_FLAG, G5_vminfo);
1397 __ br(Assembler::zero, false, Assembler::pn, zero_extend);
1398 __ delayed()->nop();
1400 // this path is taken for int->byte, int->short
1401 __ sra(O1_scratch, G5_vminfo, O1_scratch);
1402 __ ba_short(done);
1404 __ bind(zero_extend);
1405 // this is taken for int->char
1406 __ srl(O1_scratch, G5_vminfo, O1_scratch);
1408 __ bind(done);
1409 __ st(O1_scratch, vmarg);
1411 // Get the new MH:
1412 __ load_heap_oop(G3_mh_vmtarget, G3_method_handle);
1413 __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
1414 }
1415 break;
1417 case _adapter_opt_i2l: // optimized subcase of adapt_prim_to_prim
1418 case _adapter_opt_unboxl: // optimized subcase of adapt_ref_to_prim
1419 {
1420 // Perform an in-place int-to-long or ref-to-long conversion.
1421 load_vmargslot(_masm, G3_amh_vmargslot, O0_argslot);
1423 // On big-endian machine we duplicate the slot and store the MSW
1424 // in the first slot.
1425 __ add(__ argument_address(O0_argslot, O0_argslot, 1), O0_argslot);
1427 insert_arg_slots(_masm, stack_move_unit(), O0_argslot, O1_scratch, O2_scratch, O3_scratch);
1429 Address arg_lsw(O0_argslot, 0);
1430 Address arg_msw(O0_argslot, -Interpreter::stackElementSize);
1432 switch (ek) {
1433 case _adapter_opt_i2l:
1434 {
1435 #ifdef _LP64
1436 __ ldsw(arg_lsw, O2_scratch); // Load LSW sign-extended
1437 #else
1438 __ ldsw(arg_lsw, O3_scratch); // Load LSW sign-extended
1439 __ srlx(O3_scratch, BitsPerInt, O2_scratch); // Move MSW value to lower 32-bits for std
1440 #endif
1441 __ st_long(O2_scratch, arg_msw); // Uses O2/O3 on !_LP64
1442 }
1443 break;
1444 case _adapter_opt_unboxl:
1445 {
1446 // Load the value up from the heap.
1447 __ ld_ptr(arg_lsw, O1_scratch);
1448 int value_offset = java_lang_boxing_object::value_offset_in_bytes(T_LONG);
1449 assert(value_offset == java_lang_boxing_object::value_offset_in_bytes(T_DOUBLE), "");
1450 __ null_check(O1_scratch, value_offset);
1451 __ ld_long(Address(O1_scratch, value_offset), O2_scratch); // Uses O2/O3 on !_LP64
1452 __ st_long(O2_scratch, arg_msw);
1453 }
1454 break;
1455 default:
1456 ShouldNotReachHere();
1457 }
1459 __ load_heap_oop(G3_mh_vmtarget, G3_method_handle);
1460 __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
1461 }
1462 break;
1464 case _adapter_opt_f2d: // optimized subcase of adapt_prim_to_prim
1465 case _adapter_opt_d2f: // optimized subcase of adapt_prim_to_prim
1466 {
1467 // perform an in-place floating primitive conversion
1468 __ unimplemented(entry_name(ek));
1469 }
1470 break;
1472 case _adapter_prim_to_ref:
1473 __ unimplemented(entry_name(ek)); // %%% FIXME: NYI
1474 break;
1476 case _adapter_swap_args:
1477 case _adapter_rot_args:
1478 // handled completely by optimized cases
1479 __ stop("init_AdapterMethodHandle should not issue this");
1480 break;
1482 case _adapter_opt_swap_1:
1483 case _adapter_opt_swap_2:
1484 case _adapter_opt_rot_1_up:
1485 case _adapter_opt_rot_1_down:
1486 case _adapter_opt_rot_2_up:
1487 case _adapter_opt_rot_2_down:
1488 {
1489 int swap_slots = ek_adapter_opt_swap_slots(ek);
1490 int rotate = ek_adapter_opt_swap_mode(ek);
1492 // 'argslot' is the position of the first argument to swap.
1493 load_vmargslot(_masm, G3_amh_vmargslot, O0_argslot);
1494 __ add(__ argument_address(O0_argslot, O0_argslot), O0_argslot);
1495 if (VerifyMethodHandles)
1496 verify_argslot(_masm, O0_argslot, O2_scratch, "swap point must fall within current frame");
1498 // 'vminfo' is the second.
1499 Register O1_destslot = O1_scratch;
1500 load_conversion_vminfo(_masm, G3_amh_conversion, O1_destslot);
1501 __ add(__ argument_address(O1_destslot, O1_destslot), O1_destslot);
1502 if (VerifyMethodHandles)
1503 verify_argslot(_masm, O1_destslot, O2_scratch, "swap point must fall within current frame");
1505 assert(Interpreter::stackElementSize == wordSize, "else rethink use of wordSize here");
1506 if (!rotate) {
1507 // simple swap
1508 for (int i = 0; i < swap_slots; i++) {
1509 __ ld_ptr( Address(O0_argslot, i * wordSize), O2_scratch);
1510 __ ld_ptr( Address(O1_destslot, i * wordSize), O3_scratch);
1511 __ st_ptr(O3_scratch, Address(O0_argslot, i * wordSize));
1512 __ st_ptr(O2_scratch, Address(O1_destslot, i * wordSize));
1513 }
1514 } else {
1515 // A rotate is actually pair of moves, with an "odd slot" (or pair)
1516 // changing place with a series of other slots.
1517 // First, push the "odd slot", which is going to get overwritten
1518 switch (swap_slots) {
1519 case 2 : __ ld_ptr(Address(O0_argslot, 1 * wordSize), O4_scratch); // fall-thru
1520 case 1 : __ ld_ptr(Address(O0_argslot, 0 * wordSize), O3_scratch); break;
1521 default: ShouldNotReachHere();
1522 }
1523 if (rotate > 0) {
1524 // Here is rotate > 0:
1525 // (low mem) (high mem)
1526 // | dest: more_slots... | arg: odd_slot :arg+1 |
1527 // =>
1528 // | dest: odd_slot | dest+1: more_slots... :arg+1 |
1529 // work argslot down to destslot, copying contiguous data upwards
1530 // pseudo-code:
1531 // argslot = src_addr - swap_bytes
1532 // destslot = dest_addr
1533 // while (argslot >= destslot) *(argslot + swap_bytes) = *(argslot + 0), argslot--;
1534 move_arg_slots_up(_masm,
1535 O1_destslot,
1536 Address(O0_argslot, 0),
1537 swap_slots,
1538 O0_argslot, O2_scratch);
1539 } else {
1540 // Here is the other direction, rotate < 0:
1541 // (low mem) (high mem)
1542 // | arg: odd_slot | arg+1: more_slots... :dest+1 |
1543 // =>
1544 // | arg: more_slots... | dest: odd_slot :dest+1 |
1545 // work argslot up to destslot, copying contiguous data downwards
1546 // pseudo-code:
1547 // argslot = src_addr + swap_bytes
1548 // destslot = dest_addr
1549 // while (argslot <= destslot) *(argslot - swap_bytes) = *(argslot + 0), argslot++;
1550 // dest_slot denotes an exclusive upper limit
1551 int limit_bias = OP_ROT_ARGS_DOWN_LIMIT_BIAS;
1552 if (limit_bias != 0)
1553 __ add(O1_destslot, - limit_bias * wordSize, O1_destslot);
1554 move_arg_slots_down(_masm,
1555 Address(O0_argslot, swap_slots * wordSize),
1556 O1_destslot,
1557 -swap_slots,
1558 O0_argslot, O2_scratch);
1560 __ sub(O1_destslot, swap_slots * wordSize, O1_destslot);
1561 }
1562 // pop the original first chunk into the destination slot, now free
1563 switch (swap_slots) {
1564 case 2 : __ st_ptr(O4_scratch, Address(O1_destslot, 1 * wordSize)); // fall-thru
1565 case 1 : __ st_ptr(O3_scratch, Address(O1_destslot, 0 * wordSize)); break;
1566 default: ShouldNotReachHere();
1567 }
1568 }
1570 __ load_heap_oop(G3_mh_vmtarget, G3_method_handle);
1571 __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
1572 }
1573 break;
1575 case _adapter_dup_args:
1576 {
1577 // 'argslot' is the position of the first argument to duplicate.
1578 load_vmargslot(_masm, G3_amh_vmargslot, O0_argslot);
1579 __ add(__ argument_address(O0_argslot, O0_argslot), O0_argslot);
1581 // 'stack_move' is negative number of words to duplicate.
1582 Register O1_stack_move = O1_scratch;
1583 load_stack_move(_masm, G3_amh_conversion, O1_stack_move);
1585 if (VerifyMethodHandles) {
1586 verify_argslots(_masm, O1_stack_move, O0_argslot, O2_scratch, O3_scratch, true,
1587 "copied argument(s) must fall within current frame");
1588 }
1590 // insert location is always the bottom of the argument list:
1591 __ neg(O1_stack_move);
1592 push_arg_slots(_masm, O0_argslot, O1_stack_move, O2_scratch, O3_scratch);
1594 __ load_heap_oop(G3_mh_vmtarget, G3_method_handle);
1595 __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
1596 }
1597 break;
1599 case _adapter_drop_args:
1600 {
1601 // 'argslot' is the position of the first argument to nuke.
1602 load_vmargslot(_masm, G3_amh_vmargslot, O0_argslot);
1603 __ add(__ argument_address(O0_argslot, O0_argslot), O0_argslot);
1605 // 'stack_move' is number of words to drop.
1606 Register O1_stack_move = O1_scratch;
1607 load_stack_move(_masm, G3_amh_conversion, O1_stack_move);
1609 remove_arg_slots(_masm, O1_stack_move, O0_argslot, O2_scratch, O3_scratch, O4_scratch);
1611 __ load_heap_oop(G3_mh_vmtarget, G3_method_handle);
1612 __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
1613 }
1614 break;
1616 case _adapter_collect_args:
1617 case _adapter_fold_args:
1618 case _adapter_spread_args:
1619 // Handled completely by optimized cases.
1620 __ stop("init_AdapterMethodHandle should not issue this");
1621 break;
1623 case _adapter_opt_collect_ref:
1624 case _adapter_opt_collect_int:
1625 case _adapter_opt_collect_long:
1626 case _adapter_opt_collect_float:
1627 case _adapter_opt_collect_double:
1628 case _adapter_opt_collect_void:
1629 case _adapter_opt_collect_0_ref:
1630 case _adapter_opt_collect_1_ref:
1631 case _adapter_opt_collect_2_ref:
1632 case _adapter_opt_collect_3_ref:
1633 case _adapter_opt_collect_4_ref:
1634 case _adapter_opt_collect_5_ref:
1635 case _adapter_opt_filter_S0_ref:
1636 case _adapter_opt_filter_S1_ref:
1637 case _adapter_opt_filter_S2_ref:
1638 case _adapter_opt_filter_S3_ref:
1639 case _adapter_opt_filter_S4_ref:
1640 case _adapter_opt_filter_S5_ref:
1641 case _adapter_opt_collect_2_S0_ref:
1642 case _adapter_opt_collect_2_S1_ref:
1643 case _adapter_opt_collect_2_S2_ref:
1644 case _adapter_opt_collect_2_S3_ref:
1645 case _adapter_opt_collect_2_S4_ref:
1646 case _adapter_opt_collect_2_S5_ref:
1647 case _adapter_opt_fold_ref:
1648 case _adapter_opt_fold_int:
1649 case _adapter_opt_fold_long:
1650 case _adapter_opt_fold_float:
1651 case _adapter_opt_fold_double:
1652 case _adapter_opt_fold_void:
1653 case _adapter_opt_fold_1_ref:
1654 case _adapter_opt_fold_2_ref:
1655 case _adapter_opt_fold_3_ref:
1656 case _adapter_opt_fold_4_ref:
1657 case _adapter_opt_fold_5_ref:
1658 {
1659 // Given a fresh incoming stack frame, build a new ricochet frame.
1660 // On entry, TOS points at a return PC, and FP is the callers frame ptr.
1661 // RSI/R13 has the caller's exact stack pointer, which we must also preserve.
1662 // RCX contains an AdapterMethodHandle of the indicated kind.
1664 // Relevant AMH fields:
1665 // amh.vmargslot:
1666 // points to the trailing edge of the arguments
1667 // to filter, collect, or fold. For a boxing operation,
1668 // it points just after the single primitive value.
1669 // amh.argument:
1670 // recursively called MH, on |collect| arguments
1671 // amh.vmtarget:
1672 // final destination MH, on return value, etc.
1673 // amh.conversion.dest:
1674 // tells what is the type of the return value
1675 // (not needed here, since dest is also derived from ek)
1676 // amh.conversion.vminfo:
1677 // points to the trailing edge of the return value
1678 // when the vmtarget is to be called; this is
1679 // equal to vmargslot + (retained ? |collect| : 0)
1681 // Pass 0 or more argument slots to the recursive target.
1682 int collect_count_constant = ek_adapter_opt_collect_count(ek);
1684 // The collected arguments are copied from the saved argument list:
1685 int collect_slot_constant = ek_adapter_opt_collect_slot(ek);
1687 assert(ek_orig == _adapter_collect_args ||
1688 ek_orig == _adapter_fold_args, "");
1689 bool retain_original_args = (ek_orig == _adapter_fold_args);
1691 // The return value is replaced (or inserted) at the 'vminfo' argslot.
1692 // Sometimes we can compute this statically.
1693 int dest_slot_constant = -1;
1694 if (!retain_original_args)
1695 dest_slot_constant = collect_slot_constant;
1696 else if (collect_slot_constant >= 0 && collect_count_constant >= 0)
1697 // We are preserving all the arguments, and the return value is prepended,
1698 // so the return slot is to the left (above) the |collect| sequence.
1699 dest_slot_constant = collect_slot_constant + collect_count_constant;
1701 // Replace all those slots by the result of the recursive call.
1702 // The result type can be one of ref, int, long, float, double, void.
1703 // In the case of void, nothing is pushed on the stack after return.
1704 BasicType dest = ek_adapter_opt_collect_type(ek);
1705 assert(dest == type2wfield[dest], "dest is a stack slot type");
1706 int dest_count = type2size[dest];
1707 assert(dest_count == 1 || dest_count == 2 || (dest_count == 0 && dest == T_VOID), "dest has a size");
1709 // Choose a return continuation.
1710 EntryKind ek_ret = _adapter_opt_return_any;
1711 if (dest != T_CONFLICT && OptimizeMethodHandles) {
1712 switch (dest) {
1713 case T_INT : ek_ret = _adapter_opt_return_int; break;
1714 case T_LONG : ek_ret = _adapter_opt_return_long; break;
1715 case T_FLOAT : ek_ret = _adapter_opt_return_float; break;
1716 case T_DOUBLE : ek_ret = _adapter_opt_return_double; break;
1717 case T_OBJECT : ek_ret = _adapter_opt_return_ref; break;
1718 case T_VOID : ek_ret = _adapter_opt_return_void; break;
1719 default : ShouldNotReachHere();
1720 }
1721 if (dest == T_OBJECT && dest_slot_constant >= 0) {
1722 EntryKind ek_try = EntryKind(_adapter_opt_return_S0_ref + dest_slot_constant);
1723 if (ek_try <= _adapter_opt_return_LAST &&
1724 ek_adapter_opt_return_slot(ek_try) == dest_slot_constant) {
1725 ek_ret = ek_try;
1726 }
1727 }
1728 assert(ek_adapter_opt_return_type(ek_ret) == dest, "");
1729 }
1731 // Already pushed: ... keep1 | collect | keep2 |
1733 // Push a few extra argument words, if we need them to store the return value.
1734 {
1735 int extra_slots = 0;
1736 if (retain_original_args) {
1737 extra_slots = dest_count;
1738 } else if (collect_count_constant == -1) {
1739 extra_slots = dest_count; // collect_count might be zero; be generous
1740 } else if (dest_count > collect_count_constant) {
1741 extra_slots = (dest_count - collect_count_constant);
1742 } else {
1743 // else we know we have enough dead space in |collect| to repurpose for return values
1744 }
1745 if (extra_slots != 0) {
1746 __ sub(SP, round_to(extra_slots, 2) * Interpreter::stackElementSize, SP);
1747 }
1748 }
1750 // Set up Ricochet Frame.
1751 __ mov(SP, O5_savedSP); // record SP for the callee
1753 // One extra (empty) slot for outgoing target MH (see Gargs computation below).
1754 __ save_frame(2); // Note: we need to add 2 slots since frame::memory_parameter_word_sp_offset is 23.
1756 // Note: Gargs is live throughout the following, until we make our recursive call.
1757 // And the RF saves a copy in L4_saved_args_base.
1759 RicochetFrame::enter_ricochet_frame(_masm, G3_method_handle, Gargs,
1760 entry(ek_ret)->from_interpreted_entry());
1762 // Compute argument base:
1763 // Set up Gargs for current frame, extra (empty) slot is for outgoing target MH (space reserved by save_frame above).
1764 __ add(FP, STACK_BIAS - (1 * Interpreter::stackElementSize), Gargs);
1766 // Now pushed: ... keep1 | collect | keep2 | extra | [RF]
1768 #ifdef ASSERT
1769 if (VerifyMethodHandles && dest != T_CONFLICT) {
1770 BLOCK_COMMENT("verify AMH.conv.dest {");
1771 extract_conversion_dest_type(_masm, RicochetFrame::L5_conversion, O1_scratch);
1772 Label L_dest_ok;
1773 __ cmp(O1_scratch, (int) dest);
1774 __ br(Assembler::equal, false, Assembler::pt, L_dest_ok);
1775 __ delayed()->nop();
1776 if (dest == T_INT) {
1777 for (int bt = T_BOOLEAN; bt < T_INT; bt++) {
1778 if (is_subword_type(BasicType(bt))) {
1779 __ cmp(O1_scratch, (int) bt);
1780 __ br(Assembler::equal, false, Assembler::pt, L_dest_ok);
1781 __ delayed()->nop();
1782 }
1783 }
1784 }
1785 __ stop("bad dest in AMH.conv");
1786 __ BIND(L_dest_ok);
1787 BLOCK_COMMENT("} verify AMH.conv.dest");
1788 }
1789 #endif //ASSERT
1791 // Find out where the original copy of the recursive argument sequence begins.
1792 Register O0_coll = O0_scratch;
1793 {
1794 RegisterOrConstant collect_slot = collect_slot_constant;
1795 if (collect_slot_constant == -1) {
1796 load_vmargslot(_masm, G3_amh_vmargslot, O1_scratch);
1797 collect_slot = O1_scratch;
1798 }
1799 // collect_slot might be 0, but we need the move anyway.
1800 __ add(RicochetFrame::L4_saved_args_base, __ argument_offset(collect_slot, collect_slot.register_or_noreg()), O0_coll);
1801 // O0_coll now points at the trailing edge of |collect| and leading edge of |keep2|
1802 }
1804 // Replace the old AMH with the recursive MH. (No going back now.)
1805 // In the case of a boxing call, the recursive call is to a 'boxer' method,
1806 // such as Integer.valueOf or Long.valueOf. In the case of a filter
1807 // or collect call, it will take one or more arguments, transform them,
1808 // and return some result, to store back into argument_base[vminfo].
1809 __ load_heap_oop(G3_amh_argument, G3_method_handle);
1810 if (VerifyMethodHandles) verify_method_handle(_masm, G3_method_handle, O1_scratch, O2_scratch);
1812 // Calculate |collect|, the number of arguments we are collecting.
1813 Register O1_collect_count = O1_scratch;
1814 RegisterOrConstant collect_count;
1815 if (collect_count_constant < 0) {
1816 __ load_method_handle_vmslots(O1_collect_count, G3_method_handle, O2_scratch);
1817 collect_count = O1_collect_count;
1818 } else {
1819 collect_count = collect_count_constant;
1820 #ifdef ASSERT
1821 if (VerifyMethodHandles) {
1822 BLOCK_COMMENT("verify collect_count_constant {");
1823 __ load_method_handle_vmslots(O3_scratch, G3_method_handle, O2_scratch);
1824 Label L_count_ok;
1825 __ cmp_and_br_short(O3_scratch, collect_count_constant, Assembler::equal, Assembler::pt, L_count_ok);
1826 __ stop("bad vminfo in AMH.conv");
1827 __ BIND(L_count_ok);
1828 BLOCK_COMMENT("} verify collect_count_constant");
1829 }
1830 #endif //ASSERT
1831 }
1833 // copy |collect| slots directly to TOS:
1834 push_arg_slots(_masm, O0_coll, collect_count, O2_scratch, O3_scratch);
1835 // Now pushed: ... keep1 | collect | keep2 | RF... | collect |
1836 // O0_coll still points at the trailing edge of |collect| and leading edge of |keep2|
1838 // If necessary, adjust the saved arguments to make room for the eventual return value.
1839 // Normal adjustment: ... keep1 | +dest+ | -collect- | keep2 | RF... | collect |
1840 // If retaining args: ... keep1 | +dest+ | collect | keep2 | RF... | collect |
1841 // In the non-retaining case, this might move keep2 either up or down.
1842 // We don't have to copy the whole | RF... collect | complex,
1843 // but we must adjust RF.saved_args_base.
1844 // Also, from now on, we will forget about the original copy of |collect|.
1845 // If we are retaining it, we will treat it as part of |keep2|.
1846 // For clarity we will define |keep3| = |collect|keep2| or |keep2|.
1848 BLOCK_COMMENT("adjust trailing arguments {");
1849 // Compare the sizes of |+dest+| and |-collect-|, which are opposed opening and closing movements.
1850 int open_count = dest_count;
1851 RegisterOrConstant close_count = collect_count_constant;
1852 Register O1_close_count = O1_collect_count;
1853 if (retain_original_args) {
1854 close_count = constant(0);
1855 } else if (collect_count_constant == -1) {
1856 close_count = O1_collect_count;
1857 }
1859 // How many slots need moving? This is simply dest_slot (0 => no |keep3|).
1860 RegisterOrConstant keep3_count;
1861 Register O2_keep3_count = O2_scratch;
1862 if (dest_slot_constant < 0) {
1863 extract_conversion_vminfo(_masm, RicochetFrame::L5_conversion, O2_keep3_count);
1864 keep3_count = O2_keep3_count;
1865 } else {
1866 keep3_count = dest_slot_constant;
1867 #ifdef ASSERT
1868 if (VerifyMethodHandles && dest_slot_constant < 0) {
1869 BLOCK_COMMENT("verify dest_slot_constant {");
1870 extract_conversion_vminfo(_masm, RicochetFrame::L5_conversion, O3_scratch);
1871 Label L_vminfo_ok;
1872 __ cmp_and_br_short(O3_scratch, dest_slot_constant, Assembler::equal, Assembler::pt, L_vminfo_ok);
1873 __ stop("bad vminfo in AMH.conv");
1874 __ BIND(L_vminfo_ok);
1875 BLOCK_COMMENT("} verify dest_slot_constant");
1876 }
1877 #endif //ASSERT
1878 }
1880 // tasks remaining:
1881 bool move_keep3 = (!keep3_count.is_constant() || keep3_count.as_constant() != 0);
1882 bool stomp_dest = (NOT_DEBUG(dest == T_OBJECT) DEBUG_ONLY(dest_count != 0));
1883 bool fix_arg_base = (!close_count.is_constant() || open_count != close_count.as_constant());
1885 // Old and new argument locations (based at slot 0).
1886 // Net shift (&new_argv - &old_argv) is (close_count - open_count).
1887 bool zero_open_count = (open_count == 0); // remember this bit of info
1888 if (move_keep3 && fix_arg_base) {
1889 // It will be easier to have everything in one register:
1890 if (close_count.is_register()) {
1891 // Deduct open_count from close_count register to get a clean +/- value.
1892 __ sub(close_count.as_register(), open_count, close_count.as_register());
1893 } else {
1894 close_count = close_count.as_constant() - open_count;
1895 }
1896 open_count = 0;
1897 }
1898 Register L4_old_argv = RicochetFrame::L4_saved_args_base;
1899 Register O3_new_argv = O3_scratch;
1900 if (fix_arg_base) {
1901 __ add(L4_old_argv, __ argument_offset(close_count, O4_scratch), O3_new_argv,
1902 -(open_count * Interpreter::stackElementSize));
1903 }
1905 // First decide if any actual data are to be moved.
1906 // We can skip if (a) |keep3| is empty, or (b) the argument list size didn't change.
1907 // (As it happens, all movements involve an argument list size change.)
1909 // If there are variable parameters, use dynamic checks to skip around the whole mess.
1910 Label L_done;
1911 if (keep3_count.is_register()) {
1912 __ cmp_and_br_short(keep3_count.as_register(), 0, Assembler::equal, Assembler::pn, L_done);
1913 }
1914 if (close_count.is_register()) {
1915 __ cmp_and_br_short(close_count.as_register(), open_count, Assembler::equal, Assembler::pn, L_done);
1916 }
1918 if (move_keep3 && fix_arg_base) {
1919 bool emit_move_down = false, emit_move_up = false, emit_guard = false;
1920 if (!close_count.is_constant()) {
1921 emit_move_down = emit_guard = !zero_open_count;
1922 emit_move_up = true;
1923 } else if (open_count != close_count.as_constant()) {
1924 emit_move_down = (open_count > close_count.as_constant());
1925 emit_move_up = !emit_move_down;
1926 }
1927 Label L_move_up;
1928 if (emit_guard) {
1929 __ cmp(close_count.as_register(), open_count);
1930 __ br(Assembler::greater, false, Assembler::pn, L_move_up);
1931 __ delayed()->nop();
1932 }
1934 if (emit_move_down) {
1935 // Move arguments down if |+dest+| > |-collect-|
1936 // (This is rare, except when arguments are retained.)
1937 // This opens space for the return value.
1938 if (keep3_count.is_constant()) {
1939 for (int i = 0; i < keep3_count.as_constant(); i++) {
1940 __ ld_ptr( Address(L4_old_argv, i * Interpreter::stackElementSize), O4_scratch);
1941 __ st_ptr(O4_scratch, Address(O3_new_argv, i * Interpreter::stackElementSize) );
1942 }
1943 } else {
1944 // Live: O1_close_count, O2_keep3_count, O3_new_argv
1945 Register argv_top = O0_scratch;
1946 __ add(L4_old_argv, __ argument_offset(keep3_count, O4_scratch), argv_top);
1947 move_arg_slots_down(_masm,
1948 Address(L4_old_argv, 0), // beginning of old argv
1949 argv_top, // end of old argv
1950 close_count, // distance to move down (must be negative)
1951 O4_scratch, G5_scratch);
1952 }
1953 }
1955 if (emit_guard) {
1956 __ ba_short(L_done); // assumes emit_move_up is true also
1957 __ BIND(L_move_up);
1958 }
1960 if (emit_move_up) {
1961 // Move arguments up if |+dest+| < |-collect-|
1962 // (This is usual, except when |keep3| is empty.)
1963 // This closes up the space occupied by the now-deleted collect values.
1964 if (keep3_count.is_constant()) {
1965 for (int i = keep3_count.as_constant() - 1; i >= 0; i--) {
1966 __ ld_ptr( Address(L4_old_argv, i * Interpreter::stackElementSize), O4_scratch);
1967 __ st_ptr(O4_scratch, Address(O3_new_argv, i * Interpreter::stackElementSize) );
1968 }
1969 } else {
1970 Address argv_top(L4_old_argv, __ argument_offset(keep3_count, O4_scratch));
1971 // Live: O1_close_count, O2_keep3_count, O3_new_argv
1972 move_arg_slots_up(_masm,
1973 L4_old_argv, // beginning of old argv
1974 argv_top, // end of old argv
1975 close_count, // distance to move up (must be positive)
1976 O4_scratch, G5_scratch);
1977 }
1978 }
1979 }
1980 __ BIND(L_done);
1982 if (fix_arg_base) {
1983 // adjust RF.saved_args_base
1984 __ mov(O3_new_argv, RicochetFrame::L4_saved_args_base);
1985 }
1987 if (stomp_dest) {
1988 // Stomp the return slot, so it doesn't hold garbage.
1989 // This isn't strictly necessary, but it may help detect bugs.
1990 __ set(RicochetFrame::RETURN_VALUE_PLACEHOLDER, O4_scratch);
1991 __ st_ptr(O4_scratch, Address(RicochetFrame::L4_saved_args_base,
1992 __ argument_offset(keep3_count, keep3_count.register_or_noreg()))); // uses O2_keep3_count
1993 }
1994 BLOCK_COMMENT("} adjust trailing arguments");
1996 BLOCK_COMMENT("do_recursive_call");
1997 __ mov(SP, O5_savedSP); // record SP for the callee
1998 __ set(ExternalAddress(SharedRuntime::ricochet_blob()->bounce_addr() - frame::pc_return_offset), O7);
1999 // The globally unique bounce address has two purposes:
2000 // 1. It helps the JVM recognize this frame (frame::is_ricochet_frame).
2001 // 2. When returned to, it cuts back the stack and redirects control flow
2002 // to the return handler.
2003 // The return handler will further cut back the stack when it takes
2004 // down the RF. Perhaps there is a way to streamline this further.
2006 // State during recursive call:
2007 // ... keep1 | dest | dest=42 | keep3 | RF... | collect | bounce_pc |
2008 __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
2009 }
2010 break;
2012 case _adapter_opt_return_ref:
2013 case _adapter_opt_return_int:
2014 case _adapter_opt_return_long:
2015 case _adapter_opt_return_float:
2016 case _adapter_opt_return_double:
2017 case _adapter_opt_return_void:
2018 case _adapter_opt_return_S0_ref:
2019 case _adapter_opt_return_S1_ref:
2020 case _adapter_opt_return_S2_ref:
2021 case _adapter_opt_return_S3_ref:
2022 case _adapter_opt_return_S4_ref:
2023 case _adapter_opt_return_S5_ref:
2024 {
2025 BasicType dest_type_constant = ek_adapter_opt_return_type(ek);
2026 int dest_slot_constant = ek_adapter_opt_return_slot(ek);
2028 if (VerifyMethodHandles) RicochetFrame::verify_clean(_masm);
2030 if (dest_slot_constant == -1) {
2031 // The current stub is a general handler for this dest_type.
2032 // It can be called from _adapter_opt_return_any below.
2033 // Stash the address in a little table.
2034 assert((dest_type_constant & CONV_TYPE_MASK) == dest_type_constant, "oob");
2035 address return_handler = __ pc();
2036 _adapter_return_handlers[dest_type_constant] = return_handler;
2037 if (dest_type_constant == T_INT) {
2038 // do the subword types too
2039 for (int bt = T_BOOLEAN; bt < T_INT; bt++) {
2040 if (is_subword_type(BasicType(bt)) &&
2041 _adapter_return_handlers[bt] == NULL) {
2042 _adapter_return_handlers[bt] = return_handler;
2043 }
2044 }
2045 }
2046 }
2048 // On entry to this continuation handler, make Gargs live again.
2049 __ mov(RicochetFrame::L4_saved_args_base, Gargs);
2051 Register O7_temp = O7;
2052 Register O5_vminfo = O5;
2054 RegisterOrConstant dest_slot = dest_slot_constant;
2055 if (dest_slot_constant == -1) {
2056 extract_conversion_vminfo(_masm, RicochetFrame::L5_conversion, O5_vminfo);
2057 dest_slot = O5_vminfo;
2058 }
2059 // Store the result back into the argslot.
2060 // This code uses the interpreter calling sequence, in which the return value
2061 // is usually left in the TOS register, as defined by InterpreterMacroAssembler::pop.
2062 // There are certain irregularities with floating point values, which can be seen
2063 // in TemplateInterpreterGenerator::generate_return_entry_for.
2064 move_return_value(_masm, dest_type_constant, __ argument_address(dest_slot, O7_temp));
2066 RicochetFrame::leave_ricochet_frame(_masm, G3_method_handle, I5_savedSP, I7);
2068 // Load the final target and go.
2069 if (VerifyMethodHandles) verify_method_handle(_masm, G3_method_handle, O0_scratch, O1_scratch);
2070 __ restore(I5_savedSP, G0, SP);
2071 __ jump_to_method_handle_entry(G3_method_handle, O0_scratch);
2072 __ illtrap(0);
2073 }
2074 break;
2076 case _adapter_opt_return_any:
2077 {
2078 Register O7_temp = O7;
2079 Register O5_dest_type = O5;
2081 if (VerifyMethodHandles) RicochetFrame::verify_clean(_masm);
2082 extract_conversion_dest_type(_masm, RicochetFrame::L5_conversion, O5_dest_type);
2083 __ set(ExternalAddress((address) &_adapter_return_handlers[0]), O7_temp);
2084 __ sll_ptr(O5_dest_type, LogBytesPerWord, O5_dest_type);
2085 __ ld_ptr(O7_temp, O5_dest_type, O7_temp);
2087 #ifdef ASSERT
2088 { Label L_ok;
2089 __ br_notnull_short(O7_temp, Assembler::pt, L_ok);
2090 __ stop("bad method handle return");
2091 __ BIND(L_ok);
2092 }
2093 #endif //ASSERT
2094 __ JMP(O7_temp, 0);
2095 __ delayed()->nop();
2096 }
2097 break;
2099 case _adapter_opt_spread_0:
2100 case _adapter_opt_spread_1_ref:
2101 case _adapter_opt_spread_2_ref:
2102 case _adapter_opt_spread_3_ref:
2103 case _adapter_opt_spread_4_ref:
2104 case _adapter_opt_spread_5_ref:
2105 case _adapter_opt_spread_ref:
2106 case _adapter_opt_spread_byte:
2107 case _adapter_opt_spread_char:
2108 case _adapter_opt_spread_short:
2109 case _adapter_opt_spread_int:
2110 case _adapter_opt_spread_long:
2111 case _adapter_opt_spread_float:
2112 case _adapter_opt_spread_double:
2113 {
2114 // spread an array out into a group of arguments
2115 int length_constant = ek_adapter_opt_spread_count(ek);
2116 bool length_can_be_zero = (length_constant == 0);
2117 if (length_constant < 0) {
2118 // some adapters with variable length must handle the zero case
2119 if (!OptimizeMethodHandles ||
2120 ek_adapter_opt_spread_type(ek) != T_OBJECT)
2121 length_can_be_zero = true;
2122 }
2124 // find the address of the array argument
2125 load_vmargslot(_masm, G3_amh_vmargslot, O0_argslot);
2126 __ add(__ argument_address(O0_argslot, O0_argslot), O0_argslot);
2128 // O0_argslot points both to the array and to the first output arg
2129 Address vmarg = Address(O0_argslot, 0);
2131 // Get the array value.
2132 Register O1_array = O1_scratch;
2133 Register O2_array_klass = O2_scratch;
2134 BasicType elem_type = ek_adapter_opt_spread_type(ek);
2135 int elem_slots = type2size[elem_type]; // 1 or 2
2136 int array_slots = 1; // array is always a T_OBJECT
2137 int length_offset = arrayOopDesc::length_offset_in_bytes();
2138 int elem0_offset = arrayOopDesc::base_offset_in_bytes(elem_type);
2139 __ ld_ptr(vmarg, O1_array);
2141 Label L_array_is_empty, L_insert_arg_space, L_copy_args, L_args_done;
2142 if (length_can_be_zero) {
2143 // handle the null pointer case, if zero is allowed
2144 Label L_skip;
2145 if (length_constant < 0) {
2146 load_conversion_vminfo(_masm, G3_amh_conversion, O3_scratch);
2147 __ cmp_zero_and_br(Assembler::notZero, O3_scratch, L_skip);
2148 __ delayed()->nop(); // to avoid back-to-back cbcond instructions
2149 }
2150 __ br_null_short(O1_array, Assembler::pn, L_array_is_empty);
2151 __ BIND(L_skip);
2152 }
2153 __ null_check(O1_array, oopDesc::klass_offset_in_bytes());
2154 __ load_klass(O1_array, O2_array_klass);
2156 // Check the array type.
2157 Register O3_klass = O3_scratch;
2158 __ load_heap_oop(G3_amh_argument, O3_klass); // this is a Class object!
2159 load_klass_from_Class(_masm, O3_klass, O4_scratch, G5_scratch);
2161 Label L_ok_array_klass, L_bad_array_klass, L_bad_array_length;
2162 __ check_klass_subtype(O2_array_klass, O3_klass, O4_scratch, G5_scratch, L_ok_array_klass);
2163 // If we get here, the type check failed!
2164 __ ba_short(L_bad_array_klass);
2165 __ BIND(L_ok_array_klass);
2167 // Check length.
2168 if (length_constant >= 0) {
2169 __ ldsw(Address(O1_array, length_offset), O4_scratch);
2170 __ cmp(O4_scratch, length_constant);
2171 } else {
2172 Register O3_vminfo = O3_scratch;
2173 load_conversion_vminfo(_masm, G3_amh_conversion, O3_vminfo);
2174 __ ldsw(Address(O1_array, length_offset), O4_scratch);
2175 __ cmp(O3_vminfo, O4_scratch);
2176 }
2177 __ br(Assembler::notEqual, false, Assembler::pn, L_bad_array_length);
2178 __ delayed()->nop();
2180 Register O2_argslot_limit = O2_scratch;
2182 // Array length checks out. Now insert any required stack slots.
2183 if (length_constant == -1) {
2184 // Form a pointer to the end of the affected region.
2185 __ add(O0_argslot, Interpreter::stackElementSize, O2_argslot_limit);
2186 // 'stack_move' is negative number of words to insert
2187 // This number already accounts for elem_slots.
2188 Register O3_stack_move = O3_scratch;
2189 load_stack_move(_masm, G3_amh_conversion, O3_stack_move);
2190 __ cmp(O3_stack_move, 0);
2191 assert(stack_move_unit() < 0, "else change this comparison");
2192 __ br(Assembler::less, false, Assembler::pn, L_insert_arg_space);
2193 __ delayed()->nop();
2194 __ br(Assembler::equal, false, Assembler::pn, L_copy_args);
2195 __ delayed()->nop();
2196 // single argument case, with no array movement
2197 __ BIND(L_array_is_empty);
2198 remove_arg_slots(_masm, -stack_move_unit() * array_slots,
2199 O0_argslot, O1_scratch, O2_scratch, O3_scratch);
2200 __ ba_short(L_args_done); // no spreading to do
2201 __ BIND(L_insert_arg_space);
2202 // come here in the usual case, stack_move < 0 (2 or more spread arguments)
2203 // Live: O1_array, O2_argslot_limit, O3_stack_move
2204 insert_arg_slots(_masm, O3_stack_move,
2205 O0_argslot, O4_scratch, G5_scratch, O1_scratch);
2206 // reload from rdx_argslot_limit since rax_argslot is now decremented
2207 __ ld_ptr(Address(O2_argslot_limit, -Interpreter::stackElementSize), O1_array);
2208 } else if (length_constant >= 1) {
2209 int new_slots = (length_constant * elem_slots) - array_slots;
2210 insert_arg_slots(_masm, new_slots * stack_move_unit(),
2211 O0_argslot, O2_scratch, O3_scratch, O4_scratch);
2212 } else if (length_constant == 0) {
2213 __ BIND(L_array_is_empty);
2214 remove_arg_slots(_masm, -stack_move_unit() * array_slots,
2215 O0_argslot, O1_scratch, O2_scratch, O3_scratch);
2216 } else {
2217 ShouldNotReachHere();
2218 }
2220 // Copy from the array to the new slots.
2221 // Note: Stack change code preserves integrity of O0_argslot pointer.
2222 // So even after slot insertions, O0_argslot still points to first argument.
2223 // Beware: Arguments that are shallow on the stack are deep in the array,
2224 // and vice versa. So a downward-growing stack (the usual) has to be copied
2225 // elementwise in reverse order from the source array.
2226 __ BIND(L_copy_args);
2227 if (length_constant == -1) {
2228 // [O0_argslot, O2_argslot_limit) is the area we are inserting into.
2229 // Array element [0] goes at O0_argslot_limit[-wordSize].
2230 Register O1_source = O1_array;
2231 __ add(Address(O1_array, elem0_offset), O1_source);
2232 Register O4_fill_ptr = O4_scratch;
2233 __ mov(O2_argslot_limit, O4_fill_ptr);
2234 Label L_loop;
2235 __ BIND(L_loop);
2236 __ add(O4_fill_ptr, -Interpreter::stackElementSize * elem_slots, O4_fill_ptr);
2237 move_typed_arg(_masm, elem_type, true,
2238 Address(O1_source, 0), Address(O4_fill_ptr, 0),
2239 O2_scratch); // must be an even register for !_LP64 long moves (uses O2/O3)
2240 __ add(O1_source, type2aelembytes(elem_type), O1_source);
2241 __ cmp_and_brx_short(O4_fill_ptr, O0_argslot, Assembler::greaterUnsigned, Assembler::pt, L_loop);
2242 } else if (length_constant == 0) {
2243 // nothing to copy
2244 } else {
2245 int elem_offset = elem0_offset;
2246 int slot_offset = length_constant * Interpreter::stackElementSize;
2247 for (int index = 0; index < length_constant; index++) {
2248 slot_offset -= Interpreter::stackElementSize * elem_slots; // fill backward
2249 move_typed_arg(_masm, elem_type, true,
2250 Address(O1_array, elem_offset), Address(O0_argslot, slot_offset),
2251 O2_scratch); // must be an even register for !_LP64 long moves (uses O2/O3)
2252 elem_offset += type2aelembytes(elem_type);
2253 }
2254 }
2255 __ BIND(L_args_done);
2257 // Arguments are spread. Move to next method handle.
2258 __ load_heap_oop(G3_mh_vmtarget, G3_method_handle);
2259 __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
2261 __ BIND(L_bad_array_klass);
2262 assert(!vmarg.uses(O2_required), "must be different registers");
2263 __ load_heap_oop(Address(O2_array_klass, java_mirror_offset), O2_required); // required class
2264 __ ld_ptr( vmarg, O1_actual); // bad object
2265 __ jump_to(AddressLiteral(from_interpreted_entry(_raise_exception)), O3_scratch);
2266 __ delayed()->mov(Bytecodes::_aaload, O0_code); // who is complaining?
2268 __ bind(L_bad_array_length);
2269 assert(!vmarg.uses(O2_required), "must be different registers");
2270 __ mov( G3_method_handle, O2_required); // required class
2271 __ ld_ptr(vmarg, O1_actual); // bad object
2272 __ jump_to(AddressLiteral(from_interpreted_entry(_raise_exception)), O3_scratch);
2273 __ delayed()->mov(Bytecodes::_arraylength, O0_code); // who is complaining?
2274 }
2275 break;
2277 default:
2278 DEBUG_ONLY(tty->print_cr("bad ek=%d (%s)", (int)ek, entry_name(ek)));
2279 ShouldNotReachHere();
2280 }
2281 BLOCK_COMMENT(err_msg("} Entry %s", entry_name(ek)));
2283 address me_cookie = MethodHandleEntry::start_compiled_entry(_masm, interp_entry);
2284 __ unimplemented(entry_name(ek)); // %%% FIXME: NYI
2286 init_entry(ek, MethodHandleEntry::finish_compiled_entry(_masm, me_cookie));
2287 }