src/cpu/x86/vm/methodHandles_x86.cpp

changeset 2895
167b70ff3abc
parent 2868
2e038ad0c1d0
child 2903
fabcf26ee72f
equal deleted inserted replaced
2894:d4c1fbc3de95 2895:167b70ff3abc
67 me->set_end_address(__ pc()); 67 me->set_end_address(__ pc());
68 68
69 return me; 69 return me;
70 } 70 }
71 71
72 // stack walking support
73
74 frame MethodHandles::ricochet_frame_sender(const frame& fr, RegisterMap *map) {
75 RicochetFrame* f = RicochetFrame::from_frame(fr);
76 if (map->update_map())
77 frame::update_map_with_saved_link(map, &f->_sender_link);
78 return frame(f->extended_sender_sp(), f->exact_sender_sp(), f->sender_link(), f->sender_pc());
79 }
80
81 void MethodHandles::ricochet_frame_oops_do(const frame& fr, OopClosure* blk, const RegisterMap* reg_map) {
82 RicochetFrame* f = RicochetFrame::from_frame(fr);
83
84 // pick up the argument type descriptor:
85 Thread* thread = Thread::current();
86 Handle cookie(thread, f->compute_saved_args_layout(true, true));
87
88 // process fixed part
89 blk->do_oop((oop*)f->saved_target_addr());
90 blk->do_oop((oop*)f->saved_args_layout_addr());
91
92 // process variable arguments:
93 if (cookie.is_null()) return; // no arguments to describe
94
95 // the cookie is actually the invokeExact method for my target
96 // his argument signature is what I'm interested in
97 assert(cookie->is_method(), "");
98 methodHandle invoker(thread, methodOop(cookie()));
99 assert(invoker->name() == vmSymbols::invokeExact_name(), "must be this kind of method");
100 assert(!invoker->is_static(), "must have MH argument");
101 int slot_count = invoker->size_of_parameters();
102 assert(slot_count >= 1, "must include 'this'");
103 intptr_t* base = f->saved_args_base();
104 intptr_t* retval = NULL;
105 if (f->has_return_value_slot())
106 retval = f->return_value_slot_addr();
107 int slot_num = slot_count;
108 intptr_t* loc = &base[slot_num -= 1];
109 //blk->do_oop((oop*) loc); // original target, which is irrelevant
110 int arg_num = 0;
111 for (SignatureStream ss(invoker->signature()); !ss.is_done(); ss.next()) {
112 if (ss.at_return_type()) continue;
113 BasicType ptype = ss.type();
114 if (ptype == T_ARRAY) ptype = T_OBJECT; // fold all refs to T_OBJECT
115 assert(ptype >= T_BOOLEAN && ptype <= T_OBJECT, "not array or void");
116 loc = &base[slot_num -= type2size[ptype]];
117 bool is_oop = (ptype == T_OBJECT && loc != retval);
118 if (is_oop) blk->do_oop((oop*)loc);
119 arg_num += 1;
120 }
121 assert(slot_num == 0, "must have processed all the arguments");
122 }
123
124 oop MethodHandles::RicochetFrame::compute_saved_args_layout(bool read_cache, bool write_cache) {
125 oop cookie = NULL;
126 if (read_cache) {
127 cookie = saved_args_layout();
128 if (cookie != NULL) return cookie;
129 }
130 oop target = saved_target();
131 oop mtype = java_lang_invoke_MethodHandle::type(target);
132 oop mtform = java_lang_invoke_MethodType::form(mtype);
133 cookie = java_lang_invoke_MethodTypeForm::vmlayout(mtform);
134 if (write_cache) {
135 (*saved_args_layout_addr()) = cookie;
136 }
137 return cookie;
138 }
139
140 void MethodHandles::RicochetFrame::generate_ricochet_blob(MacroAssembler* _masm,
141 // output params:
142 int* frame_size_in_words,
143 int* bounce_offset,
144 int* exception_offset) {
145 (*frame_size_in_words) = RicochetFrame::frame_size_in_bytes() / wordSize;
146
147 address start = __ pc();
148
72 #ifdef ASSERT 149 #ifdef ASSERT
73 static void verify_argslot(MacroAssembler* _masm, Register argslot_reg, 150 __ hlt(); __ hlt(); __ hlt();
74 const char* error_message) { 151 // here's a hint of something special:
152 __ push(MAGIC_NUMBER_1);
153 __ push(MAGIC_NUMBER_2);
154 #endif //ASSERT
155 __ hlt(); // not reached
156
157 // A return PC has just been popped from the stack.
158 // Return values are in registers.
159 // The ebp points into the RicochetFrame, which contains
160 // a cleanup continuation we must return to.
161
162 (*bounce_offset) = __ pc() - start;
163 BLOCK_COMMENT("ricochet_blob.bounce");
164
165 if (VerifyMethodHandles) RicochetFrame::verify_clean(_masm);
166 trace_method_handle(_masm, "ricochet_blob.bounce");
167
168 __ jmp(frame_address(continuation_offset_in_bytes()));
169 __ hlt();
170 DEBUG_ONLY(__ push(MAGIC_NUMBER_2));
171
172 (*exception_offset) = __ pc() - start;
173 BLOCK_COMMENT("ricochet_blob.exception");
174
175 // compare this to Interpreter::rethrow_exception_entry, which is parallel code
176 // for example, see TemplateInterpreterGenerator::generate_throw_exception
177 // Live registers in:
178 // rax: exception
179 // rdx: return address/pc that threw exception (ignored, always equal to bounce addr)
180 __ verify_oop(rax);
181
182 // no need to empty_FPU_stack or reinit_heapbase, since caller frame will do the same if needed
183
184 // Take down the frame.
185
186 // Cf. InterpreterMacroAssembler::remove_activation.
187 leave_ricochet_frame(_masm, /*rcx_recv=*/ noreg,
188 saved_last_sp_register(),
189 /*sender_pc_reg=*/ rdx);
190
191 // In between activations - previous activation type unknown yet
192 // compute continuation point - the continuation point expects the
193 // following registers set up:
194 //
195 // rax: exception
196 // rdx: return address/pc that threw exception
197 // rsp: expression stack of caller
198 // rbp: ebp of caller
199 __ push(rax); // save exception
200 __ push(rdx); // save return address
201 Register thread_reg = LP64_ONLY(r15_thread) NOT_LP64(rdi);
202 NOT_LP64(__ get_thread(thread_reg));
203 __ call_VM_leaf(CAST_FROM_FN_PTR(address,
204 SharedRuntime::exception_handler_for_return_address),
205 thread_reg, rdx);
206 __ mov(rbx, rax); // save exception handler
207 __ pop(rdx); // restore return address
208 __ pop(rax); // restore exception
209 __ jmp(rbx); // jump to exception
210 // handler of caller
211 }
212
213 void MethodHandles::RicochetFrame::enter_ricochet_frame(MacroAssembler* _masm,
214 Register rcx_recv,
215 Register rax_argv,
216 address return_handler,
217 Register rbx_temp) {
218 const Register saved_last_sp = saved_last_sp_register();
219 Address rcx_mh_vmtarget( rcx_recv, java_lang_invoke_MethodHandle::vmtarget_offset_in_bytes() );
220 Address rcx_amh_conversion( rcx_recv, java_lang_invoke_AdapterMethodHandle::conversion_offset_in_bytes() );
221
222 // Push the RicochetFrame a word at a time.
223 // This creates something similar to an interpreter frame.
224 // Cf. TemplateInterpreterGenerator::generate_fixed_frame.
225 BLOCK_COMMENT("push RicochetFrame {");
226 DEBUG_ONLY(int rfo = (int) sizeof(RicochetFrame));
227 assert((rfo -= wordSize) == RicochetFrame::sender_pc_offset_in_bytes(), "");
228 #define RF_FIELD(push_value, name) \
229 { push_value; \
230 assert((rfo -= wordSize) == RicochetFrame::name##_offset_in_bytes(), ""); }
231 RF_FIELD(__ push(rbp), sender_link);
232 RF_FIELD(__ push(saved_last_sp), exact_sender_sp); // rsi/r13
233 RF_FIELD(__ pushptr(rcx_amh_conversion), conversion);
234 RF_FIELD(__ push(rax_argv), saved_args_base); // can be updated if args are shifted
235 RF_FIELD(__ push((int32_t) NULL_WORD), saved_args_layout); // cache for GC layout cookie
236 if (UseCompressedOops) {
237 __ load_heap_oop(rbx_temp, rcx_mh_vmtarget);
238 RF_FIELD(__ push(rbx_temp), saved_target);
239 } else {
240 RF_FIELD(__ pushptr(rcx_mh_vmtarget), saved_target);
241 }
242 __ lea(rbx_temp, ExternalAddress(return_handler));
243 RF_FIELD(__ push(rbx_temp), continuation);
244 #undef RF_FIELD
245 assert(rfo == 0, "fully initialized the RicochetFrame");
246 // compute new frame pointer:
247 __ lea(rbp, Address(rsp, RicochetFrame::sender_link_offset_in_bytes()));
248 // Push guard word #1 in debug mode.
249 DEBUG_ONLY(__ push((int32_t) RicochetFrame::MAGIC_NUMBER_1));
250 // For debugging, leave behind an indication of which stub built this frame.
251 DEBUG_ONLY({ Label L; __ call(L, relocInfo::none); __ bind(L); });
252 BLOCK_COMMENT("} RicochetFrame");
253 }
254
255 void MethodHandles::RicochetFrame::leave_ricochet_frame(MacroAssembler* _masm,
256 Register rcx_recv,
257 Register new_sp_reg,
258 Register sender_pc_reg) {
259 assert_different_registers(rcx_recv, new_sp_reg, sender_pc_reg);
260 const Register saved_last_sp = saved_last_sp_register();
261 // Take down the frame.
262 // Cf. InterpreterMacroAssembler::remove_activation.
263 BLOCK_COMMENT("end_ricochet_frame {");
264 // TO DO: If (exact_sender_sp - extended_sender_sp) > THRESH, compact the frame down.
265 // This will keep stack in bounds even with unlimited tailcalls, each with an adapter.
266 if (rcx_recv->is_valid())
267 __ movptr(rcx_recv, RicochetFrame::frame_address(RicochetFrame::saved_target_offset_in_bytes()));
268 __ movptr(sender_pc_reg, RicochetFrame::frame_address(RicochetFrame::sender_pc_offset_in_bytes()));
269 __ movptr(saved_last_sp, RicochetFrame::frame_address(RicochetFrame::exact_sender_sp_offset_in_bytes()));
270 __ movptr(rbp, RicochetFrame::frame_address(RicochetFrame::sender_link_offset_in_bytes()));
271 __ mov(rsp, new_sp_reg);
272 BLOCK_COMMENT("} end_ricochet_frame");
273 }
274
275 // Emit code to verify that RBP is pointing at a valid ricochet frame.
276 #ifdef ASSERT
277 enum {
278 ARG_LIMIT = 255, SLOP = 4,
279 // use this parameter for checking for garbage stack movements:
280 UNREASONABLE_STACK_MOVE = (ARG_LIMIT + SLOP)
281 // the slop defends against false alarms due to fencepost errors
282 };
283
284 void MethodHandles::RicochetFrame::verify_clean(MacroAssembler* _masm) {
285 // The stack should look like this:
286 // ... keep1 | dest=42 | keep2 | RF | magic | handler | magic | recursive args |
287 // Check various invariants.
288 verify_offsets();
289
290 Register rdi_temp = rdi;
291 Register rcx_temp = rcx;
292 { __ push(rdi_temp); __ push(rcx_temp); }
293 #define UNPUSH_TEMPS \
294 { __ pop(rcx_temp); __ pop(rdi_temp); }
295
296 Address magic_number_1_addr = RicochetFrame::frame_address(RicochetFrame::magic_number_1_offset_in_bytes());
297 Address magic_number_2_addr = RicochetFrame::frame_address(RicochetFrame::magic_number_2_offset_in_bytes());
298 Address continuation_addr = RicochetFrame::frame_address(RicochetFrame::continuation_offset_in_bytes());
299 Address conversion_addr = RicochetFrame::frame_address(RicochetFrame::conversion_offset_in_bytes());
300 Address saved_args_base_addr = RicochetFrame::frame_address(RicochetFrame::saved_args_base_offset_in_bytes());
301
302 Label L_bad, L_ok;
303 BLOCK_COMMENT("verify_clean {");
304 // Magic numbers must check out:
305 __ cmpptr(magic_number_1_addr, (int32_t) MAGIC_NUMBER_1);
306 __ jcc(Assembler::notEqual, L_bad);
307 __ cmpptr(magic_number_2_addr, (int32_t) MAGIC_NUMBER_2);
308 __ jcc(Assembler::notEqual, L_bad);
309
310 // Arguments pointer must look reasonable:
311 __ movptr(rcx_temp, saved_args_base_addr);
312 __ cmpptr(rcx_temp, rbp);
313 __ jcc(Assembler::below, L_bad);
314 __ subptr(rcx_temp, UNREASONABLE_STACK_MOVE * Interpreter::stackElementSize);
315 __ cmpptr(rcx_temp, rbp);
316 __ jcc(Assembler::above, L_bad);
317
318 load_conversion_dest_type(_masm, rdi_temp, conversion_addr);
319 __ cmpl(rdi_temp, T_VOID);
320 __ jcc(Assembler::equal, L_ok);
321 __ movptr(rcx_temp, saved_args_base_addr);
322 load_conversion_vminfo(_masm, rdi_temp, conversion_addr);
323 __ cmpptr(Address(rcx_temp, rdi_temp, Interpreter::stackElementScale()),
324 (int32_t) RETURN_VALUE_PLACEHOLDER);
325 __ jcc(Assembler::equal, L_ok);
326 __ BIND(L_bad);
327 UNPUSH_TEMPS;
328 __ stop("damaged ricochet frame");
329 __ BIND(L_ok);
330 UNPUSH_TEMPS;
331 BLOCK_COMMENT("} verify_clean");
332
333 #undef UNPUSH_TEMPS
334
335 }
336 #endif //ASSERT
337
338 void MethodHandles::load_klass_from_Class(MacroAssembler* _masm, Register klass_reg) {
339 if (VerifyMethodHandles)
340 verify_klass(_masm, klass_reg, SystemDictionaryHandles::Class_klass(),
341 "AMH argument is a Class");
342 __ load_heap_oop(klass_reg, Address(klass_reg, java_lang_Class::klass_offset_in_bytes()));
343 }
344
345 void MethodHandles::load_conversion_vminfo(MacroAssembler* _masm, Register reg, Address conversion_field_addr) {
346 int bits = BitsPerByte;
347 int offset = (CONV_VMINFO_SHIFT / bits);
348 int shift = (CONV_VMINFO_SHIFT % bits);
349 __ load_unsigned_byte(reg, conversion_field_addr.plus_disp(offset));
350 assert(CONV_VMINFO_MASK == right_n_bits(bits - shift), "else change type of previous load");
351 assert(shift == 0, "no shift needed");
352 }
353
354 void MethodHandles::load_conversion_dest_type(MacroAssembler* _masm, Register reg, Address conversion_field_addr) {
355 int bits = BitsPerByte;
356 int offset = (CONV_DEST_TYPE_SHIFT / bits);
357 int shift = (CONV_DEST_TYPE_SHIFT % bits);
358 __ load_unsigned_byte(reg, conversion_field_addr.plus_disp(offset));
359 assert(CONV_TYPE_MASK == right_n_bits(bits - shift), "else change type of previous load");
360 __ shrl(reg, shift);
361 DEBUG_ONLY(int conv_type_bits = (int) exact_log2(CONV_TYPE_MASK+1));
362 assert((shift + conv_type_bits) == bits, "left justified in byte");
363 }
364
365 void MethodHandles::load_stack_move(MacroAssembler* _masm,
366 Register rdi_stack_move,
367 Register rcx_amh,
368 bool might_be_negative) {
369 BLOCK_COMMENT("load_stack_move");
370 Address rcx_amh_conversion(rcx_amh, java_lang_invoke_AdapterMethodHandle::conversion_offset_in_bytes());
371 __ movl(rdi_stack_move, rcx_amh_conversion);
372 __ sarl(rdi_stack_move, CONV_STACK_MOVE_SHIFT);
373 #ifdef _LP64
374 if (might_be_negative) {
375 // clean high bits of stack motion register (was loaded as an int)
376 __ movslq(rdi_stack_move, rdi_stack_move);
377 }
378 #endif //_LP64
379 if (VerifyMethodHandles) {
380 Label L_ok, L_bad;
381 int32_t stack_move_limit = 0x4000; // extra-large
382 __ cmpptr(rdi_stack_move, stack_move_limit);
383 __ jcc(Assembler::greaterEqual, L_bad);
384 __ cmpptr(rdi_stack_move, -stack_move_limit);
385 __ jcc(Assembler::greater, L_ok);
386 __ bind(L_bad);
387 __ stop("load_stack_move of garbage value");
388 __ BIND(L_ok);
389 }
390 }
391
392 #ifndef PRODUCT
393 void MethodHandles::RicochetFrame::verify_offsets() {
394 // Check compatibility of this struct with the more generally used offsets of class frame:
395 int ebp_off = sender_link_offset_in_bytes(); // offset from struct base to local rbp value
396 assert(ebp_off + wordSize*frame::interpreter_frame_method_offset == saved_args_base_offset_in_bytes(), "");
397 assert(ebp_off + wordSize*frame::interpreter_frame_last_sp_offset == conversion_offset_in_bytes(), "");
398 assert(ebp_off + wordSize*frame::interpreter_frame_sender_sp_offset == exact_sender_sp_offset_in_bytes(), "");
399 // These last two have to be exact:
400 assert(ebp_off + wordSize*frame::link_offset == sender_link_offset_in_bytes(), "");
401 assert(ebp_off + wordSize*frame::return_addr_offset == sender_pc_offset_in_bytes(), "");
402 }
403
404 void MethodHandles::RicochetFrame::verify() const {
405 verify_offsets();
406 assert(magic_number_1() == MAGIC_NUMBER_1, "");
407 assert(magic_number_2() == MAGIC_NUMBER_2, "");
408 if (!Universe::heap()->is_gc_active()) {
409 if (saved_args_layout() != NULL) {
410 assert(saved_args_layout()->is_method(), "must be valid oop");
411 }
412 if (saved_target() != NULL) {
413 assert(java_lang_invoke_MethodHandle::is_instance(saved_target()), "checking frame value");
414 }
415 }
416 int conv_op = adapter_conversion_op(conversion());
417 assert(conv_op == java_lang_invoke_AdapterMethodHandle::OP_COLLECT_ARGS ||
418 conv_op == java_lang_invoke_AdapterMethodHandle::OP_FOLD_ARGS ||
419 conv_op == java_lang_invoke_AdapterMethodHandle::OP_PRIM_TO_REF,
420 "must be a sane conversion");
421 if (has_return_value_slot()) {
422 assert(*return_value_slot_addr() == RETURN_VALUE_PLACEHOLDER, "");
423 }
424 }
425 #endif //PRODUCT
426
427 #ifdef ASSERT
428 void MethodHandles::verify_argslot(MacroAssembler* _masm,
429 Register argslot_reg,
430 const char* error_message) {
75 // Verify that argslot lies within (rsp, rbp]. 431 // Verify that argslot lies within (rsp, rbp].
76 Label L_ok, L_bad; 432 Label L_ok, L_bad;
77 BLOCK_COMMENT("{ verify_argslot"); 433 BLOCK_COMMENT("verify_argslot {");
78 __ cmpptr(argslot_reg, rbp); 434 __ cmpptr(argslot_reg, rbp);
79 __ jccb(Assembler::above, L_bad); 435 __ jccb(Assembler::above, L_bad);
80 __ cmpptr(rsp, argslot_reg); 436 __ cmpptr(rsp, argslot_reg);
81 __ jccb(Assembler::below, L_ok); 437 __ jccb(Assembler::below, L_ok);
82 __ bind(L_bad); 438 __ bind(L_bad);
83 __ stop(error_message); 439 __ stop(error_message);
84 __ bind(L_ok); 440 __ BIND(L_ok);
85 BLOCK_COMMENT("} verify_argslot"); 441 BLOCK_COMMENT("} verify_argslot");
86 } 442 }
87 #endif 443
88 444 void MethodHandles::verify_argslots(MacroAssembler* _masm,
445 RegisterOrConstant arg_slots,
446 Register arg_slot_base_reg,
447 bool negate_argslots,
448 const char* error_message) {
449 // Verify that [argslot..argslot+size) lies within (rsp, rbp).
450 Label L_ok, L_bad;
451 Register rdi_temp = rdi;
452 BLOCK_COMMENT("verify_argslots {");
453 __ push(rdi_temp);
454 if (negate_argslots) {
455 if (arg_slots.is_constant()) {
456 arg_slots = -1 * arg_slots.as_constant();
457 } else {
458 __ movptr(rdi_temp, arg_slots);
459 __ negptr(rdi_temp);
460 arg_slots = rdi_temp;
461 }
462 }
463 __ lea(rdi_temp, Address(arg_slot_base_reg, arg_slots, Interpreter::stackElementScale()));
464 __ cmpptr(rdi_temp, rbp);
465 __ pop(rdi_temp);
466 __ jcc(Assembler::above, L_bad);
467 __ cmpptr(rsp, arg_slot_base_reg);
468 __ jcc(Assembler::below, L_ok);
469 __ bind(L_bad);
470 __ stop(error_message);
471 __ BIND(L_ok);
472 BLOCK_COMMENT("} verify_argslots");
473 }
474
475 // Make sure that arg_slots has the same sign as the given direction.
476 // If (and only if) arg_slots is a assembly-time constant, also allow it to be zero.
477 void MethodHandles::verify_stack_move(MacroAssembler* _masm,
478 RegisterOrConstant arg_slots, int direction) {
479 bool allow_zero = arg_slots.is_constant();
480 if (direction == 0) { direction = +1; allow_zero = true; }
481 assert(stack_move_unit() == -1, "else add extra checks here");
482 if (arg_slots.is_register()) {
483 Label L_ok, L_bad;
484 BLOCK_COMMENT("verify_stack_move {");
485 // testl(arg_slots.as_register(), -stack_move_unit() - 1); // no need
486 // jcc(Assembler::notZero, L_bad);
487 __ cmpptr(arg_slots.as_register(), (int32_t) NULL_WORD);
488 if (direction > 0) {
489 __ jcc(allow_zero ? Assembler::less : Assembler::lessEqual, L_bad);
490 __ cmpptr(arg_slots.as_register(), (int32_t) UNREASONABLE_STACK_MOVE);
491 __ jcc(Assembler::less, L_ok);
492 } else {
493 __ jcc(allow_zero ? Assembler::greater : Assembler::greaterEqual, L_bad);
494 __ cmpptr(arg_slots.as_register(), (int32_t) -UNREASONABLE_STACK_MOVE);
495 __ jcc(Assembler::greater, L_ok);
496 }
497 __ bind(L_bad);
498 if (direction > 0)
499 __ stop("assert arg_slots > 0");
500 else
501 __ stop("assert arg_slots < 0");
502 __ BIND(L_ok);
503 BLOCK_COMMENT("} verify_stack_move");
504 } else {
505 intptr_t size = arg_slots.as_constant();
506 if (direction < 0) size = -size;
507 assert(size >= 0, "correct direction of constant move");
508 assert(size < UNREASONABLE_STACK_MOVE, "reasonable size of constant move");
509 }
510 }
511
512 void MethodHandles::verify_klass(MacroAssembler* _masm,
513 Register obj, KlassHandle klass,
514 const char* error_message) {
515 oop* klass_addr = klass.raw_value();
516 assert(klass_addr >= SystemDictionaryHandles::Object_klass().raw_value() &&
517 klass_addr <= SystemDictionaryHandles::Long_klass().raw_value(),
518 "must be one of the SystemDictionaryHandles");
519 Register temp = rdi;
520 Label L_ok, L_bad;
521 BLOCK_COMMENT("verify_klass {");
522 __ verify_oop(obj);
523 __ testptr(obj, obj);
524 __ jcc(Assembler::zero, L_bad);
525 __ push(temp);
526 __ load_klass(temp, obj);
527 __ cmpptr(temp, ExternalAddress((address) klass_addr));
528 __ jcc(Assembler::equal, L_ok);
529 intptr_t super_check_offset = klass->super_check_offset();
530 __ movptr(temp, Address(temp, super_check_offset));
531 __ cmpptr(temp, ExternalAddress((address) klass_addr));
532 __ jcc(Assembler::equal, L_ok);
533 __ pop(temp);
534 __ bind(L_bad);
535 __ stop(error_message);
536 __ BIND(L_ok);
537 __ pop(temp);
538 BLOCK_COMMENT("} verify_klass");
539 }
540 #endif //ASSERT
89 541
90 // Code generation 542 // Code generation
91 address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler* _masm) { 543 address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler* _masm) {
92 // rbx: methodOop 544 // rbx: methodOop
93 // rcx: receiver method handle (must load from sp[MethodTypeForm.vmslots]) 545 // rcx: receiver method handle (must load from sp[MethodTypeForm.vmslots])
114 // here's where control starts out: 566 // here's where control starts out:
115 __ align(CodeEntryAlignment); 567 __ align(CodeEntryAlignment);
116 address entry_point = __ pc(); 568 address entry_point = __ pc();
117 569
118 // fetch the MethodType from the method handle into rax (the 'check' register) 570 // fetch the MethodType from the method handle into rax (the 'check' register)
571 // FIXME: Interpreter should transmit pre-popped stack pointer, to locate base of arg list.
572 // This would simplify several touchy bits of code.
573 // See 6984712: JSR 292 method handle calls need a clean argument base pointer
119 { 574 {
120 Register tem = rbx_method; 575 Register tem = rbx_method;
121 for (jint* pchase = methodOopDesc::method_type_offsets_chain(); (*pchase) != -1; pchase++) { 576 for (jint* pchase = methodOopDesc::method_type_offsets_chain(); (*pchase) != -1; pchase++) {
122 __ movptr(rax_mtype, Address(tem, *pchase)); 577 __ movptr(rax_mtype, Address(tem, *pchase));
123 tem = rax_mtype; // in case there is another indirection 578 tem = rax_mtype; // in case there is another indirection
126 581
127 // given the MethodType, find out where the MH argument is buried 582 // given the MethodType, find out where the MH argument is buried
128 __ load_heap_oop(rdx_temp, Address(rax_mtype, __ delayed_value(java_lang_invoke_MethodType::form_offset_in_bytes, rdi_temp))); 583 __ load_heap_oop(rdx_temp, Address(rax_mtype, __ delayed_value(java_lang_invoke_MethodType::form_offset_in_bytes, rdi_temp)));
129 Register rdx_vmslots = rdx_temp; 584 Register rdx_vmslots = rdx_temp;
130 __ movl(rdx_vmslots, Address(rdx_temp, __ delayed_value(java_lang_invoke_MethodTypeForm::vmslots_offset_in_bytes, rdi_temp))); 585 __ movl(rdx_vmslots, Address(rdx_temp, __ delayed_value(java_lang_invoke_MethodTypeForm::vmslots_offset_in_bytes, rdi_temp)));
131 __ movptr(rcx_recv, __ argument_address(rdx_vmslots)); 586 Address mh_receiver_slot_addr = __ argument_address(rdx_vmslots);
587 __ movptr(rcx_recv, mh_receiver_slot_addr);
132 588
133 trace_method_handle(_masm, "invokeExact"); 589 trace_method_handle(_masm, "invokeExact");
134 590
135 __ check_method_handle_type(rax_mtype, rcx_recv, rdi_temp, wrong_method_type); 591 __ check_method_handle_type(rax_mtype, rcx_recv, rdi_temp, wrong_method_type);
592
593 // Nobody uses the MH receiver slot after this. Make sure.
594 DEBUG_ONLY(__ movptr(mh_receiver_slot_addr, (int32_t)0x999999));
595
136 __ jump_to_method_handle_entry(rcx_recv, rdi_temp); 596 __ jump_to_method_handle_entry(rcx_recv, rdi_temp);
137 597
138 // for invokeGeneric (only), apply argument and result conversions on the fly 598 // for invokeGeneric (only), apply argument and result conversions on the fly
139 __ bind(invoke_generic_slow_path); 599 __ bind(invoke_generic_slow_path);
140 #ifdef ASSERT 600 #ifdef ASSERT
141 { Label L; 601 if (VerifyMethodHandles) {
602 Label L;
142 __ cmpb(Address(rbx_method, methodOopDesc::intrinsic_id_offset_in_bytes()), (int) vmIntrinsics::_invokeGeneric); 603 __ cmpb(Address(rbx_method, methodOopDesc::intrinsic_id_offset_in_bytes()), (int) vmIntrinsics::_invokeGeneric);
143 __ jcc(Assembler::equal, L); 604 __ jcc(Assembler::equal, L);
144 __ stop("bad methodOop::intrinsic_id"); 605 __ stop("bad methodOop::intrinsic_id");
145 __ bind(L); 606 __ bind(L);
146 } 607 }
148 Register rbx_temp = rbx_method; // don't need it now 609 Register rbx_temp = rbx_method; // don't need it now
149 610
150 // make room on the stack for another pointer: 611 // make room on the stack for another pointer:
151 Register rcx_argslot = rcx_recv; 612 Register rcx_argslot = rcx_recv;
152 __ lea(rcx_argslot, __ argument_address(rdx_vmslots, 1)); 613 __ lea(rcx_argslot, __ argument_address(rdx_vmslots, 1));
153 insert_arg_slots(_masm, 2 * stack_move_unit(), _INSERT_REF_MASK, 614 insert_arg_slots(_masm, 2 * stack_move_unit(),
154 rcx_argslot, rbx_temp, rdx_temp); 615 rcx_argslot, rbx_temp, rdx_temp);
155 616
156 // load up an adapter from the calling type (Java weaves this) 617 // load up an adapter from the calling type (Java weaves this)
157 __ load_heap_oop(rdx_temp, Address(rax_mtype, __ delayed_value(java_lang_invoke_MethodType::form_offset_in_bytes, rdi_temp))); 618 __ load_heap_oop(rdx_temp, Address(rax_mtype, __ delayed_value(java_lang_invoke_MethodType::form_offset_in_bytes, rdi_temp)));
158 Register rdx_adapter = rdx_temp; 619 Register rdx_adapter = rdx_temp;
183 __ jump(ExternalAddress(Interpreter::throw_WrongMethodType_entry())); 644 __ jump(ExternalAddress(Interpreter::throw_WrongMethodType_entry()));
184 645
185 return entry_point; 646 return entry_point;
186 } 647 }
187 648
649 // Workaround for C++ overloading nastiness on '0' for RegisterOrConstant.
650 static RegisterOrConstant constant(int value) {
651 return RegisterOrConstant(value);
652 }
653
188 // Helper to insert argument slots into the stack. 654 // Helper to insert argument slots into the stack.
189 // arg_slots must be a multiple of stack_move_unit() and <= 0 655 // arg_slots must be a multiple of stack_move_unit() and < 0
656 // rax_argslot is decremented to point to the new (shifted) location of the argslot
657 // But, rdx_temp ends up holding the original value of rax_argslot.
190 void MethodHandles::insert_arg_slots(MacroAssembler* _masm, 658 void MethodHandles::insert_arg_slots(MacroAssembler* _masm,
191 RegisterOrConstant arg_slots, 659 RegisterOrConstant arg_slots,
192 int arg_mask,
193 Register rax_argslot, 660 Register rax_argslot,
194 Register rbx_temp, Register rdx_temp, Register temp3_reg) { 661 Register rbx_temp, Register rdx_temp) {
195 assert(temp3_reg == noreg, "temp3 not required"); 662 // allow constant zero
663 if (arg_slots.is_constant() && arg_slots.as_constant() == 0)
664 return;
196 assert_different_registers(rax_argslot, rbx_temp, rdx_temp, 665 assert_different_registers(rax_argslot, rbx_temp, rdx_temp,
197 (!arg_slots.is_register() ? rsp : arg_slots.as_register())); 666 (!arg_slots.is_register() ? rsp : arg_slots.as_register()));
198 667 if (VerifyMethodHandles)
199 #ifdef ASSERT 668 verify_argslot(_masm, rax_argslot, "insertion point must fall within current frame");
200 verify_argslot(_masm, rax_argslot, "insertion point must fall within current frame"); 669 if (VerifyMethodHandles)
201 if (arg_slots.is_register()) { 670 verify_stack_move(_masm, arg_slots, -1);
202 Label L_ok, L_bad;
203 __ cmpptr(arg_slots.as_register(), (int32_t) NULL_WORD);
204 __ jccb(Assembler::greater, L_bad);
205 __ testl(arg_slots.as_register(), -stack_move_unit() - 1);
206 __ jccb(Assembler::zero, L_ok);
207 __ bind(L_bad);
208 __ stop("assert arg_slots <= 0 and clear low bits");
209 __ bind(L_ok);
210 } else {
211 assert(arg_slots.as_constant() <= 0, "");
212 assert(arg_slots.as_constant() % -stack_move_unit() == 0, "");
213 }
214 #endif //ASSERT
215
216 #ifdef _LP64
217 if (arg_slots.is_register()) {
218 // clean high bits of stack motion register (was loaded as an int)
219 __ movslq(arg_slots.as_register(), arg_slots.as_register());
220 }
221 #endif
222 671
223 // Make space on the stack for the inserted argument(s). 672 // Make space on the stack for the inserted argument(s).
224 // Then pull down everything shallower than rax_argslot. 673 // Then pull down everything shallower than rax_argslot.
225 // The stacked return address gets pulled down with everything else. 674 // The stacked return address gets pulled down with everything else.
226 // That is, copy [rsp, argslot) downward by -size words. In pseudo-code: 675 // That is, copy [rsp, argslot) downward by -size words. In pseudo-code:
228 // for (rdx = rsp + size; rdx < argslot; rdx++) 677 // for (rdx = rsp + size; rdx < argslot; rdx++)
229 // rdx[-size] = rdx[0] 678 // rdx[-size] = rdx[0]
230 // argslot -= size; 679 // argslot -= size;
231 BLOCK_COMMENT("insert_arg_slots {"); 680 BLOCK_COMMENT("insert_arg_slots {");
232 __ mov(rdx_temp, rsp); // source pointer for copy 681 __ mov(rdx_temp, rsp); // source pointer for copy
233 __ lea(rsp, Address(rsp, arg_slots, Address::times_ptr)); 682 __ lea(rsp, Address(rsp, arg_slots, Interpreter::stackElementScale()));
234 { 683 {
235 Label loop; 684 Label loop;
236 __ BIND(loop); 685 __ BIND(loop);
237 // pull one word down each time through the loop 686 // pull one word down each time through the loop
238 __ movptr(rbx_temp, Address(rdx_temp, 0)); 687 __ movptr(rbx_temp, Address(rdx_temp, 0));
239 __ movptr(Address(rdx_temp, arg_slots, Address::times_ptr), rbx_temp); 688 __ movptr(Address(rdx_temp, arg_slots, Interpreter::stackElementScale()), rbx_temp);
240 __ addptr(rdx_temp, wordSize); 689 __ addptr(rdx_temp, wordSize);
241 __ cmpptr(rdx_temp, rax_argslot); 690 __ cmpptr(rdx_temp, rax_argslot);
242 __ jccb(Assembler::less, loop); 691 __ jcc(Assembler::less, loop);
243 } 692 }
244 693
245 // Now move the argslot down, to point to the opened-up space. 694 // Now move the argslot down, to point to the opened-up space.
246 __ lea(rax_argslot, Address(rax_argslot, arg_slots, Address::times_ptr)); 695 __ lea(rax_argslot, Address(rax_argslot, arg_slots, Interpreter::stackElementScale()));
247 BLOCK_COMMENT("} insert_arg_slots"); 696 BLOCK_COMMENT("} insert_arg_slots");
248 } 697 }
249 698
250 // Helper to remove argument slots from the stack. 699 // Helper to remove argument slots from the stack.
251 // arg_slots must be a multiple of stack_move_unit() and >= 0 700 // arg_slots must be a multiple of stack_move_unit() and > 0
252 void MethodHandles::remove_arg_slots(MacroAssembler* _masm, 701 void MethodHandles::remove_arg_slots(MacroAssembler* _masm,
253 RegisterOrConstant arg_slots, 702 RegisterOrConstant arg_slots,
254 Register rax_argslot, 703 Register rax_argslot,
255 Register rbx_temp, Register rdx_temp, Register temp3_reg) { 704 Register rbx_temp, Register rdx_temp) {
256 assert(temp3_reg == noreg, "temp3 not required"); 705 // allow constant zero
706 if (arg_slots.is_constant() && arg_slots.as_constant() == 0)
707 return;
257 assert_different_registers(rax_argslot, rbx_temp, rdx_temp, 708 assert_different_registers(rax_argslot, rbx_temp, rdx_temp,
258 (!arg_slots.is_register() ? rsp : arg_slots.as_register())); 709 (!arg_slots.is_register() ? rsp : arg_slots.as_register()));
259 710 if (VerifyMethodHandles)
260 #ifdef ASSERT 711 verify_argslots(_masm, arg_slots, rax_argslot, false,
261 // Verify that [argslot..argslot+size) lies within (rsp, rbp). 712 "deleted argument(s) must fall within current frame");
262 __ lea(rbx_temp, Address(rax_argslot, arg_slots, Address::times_ptr)); 713 if (VerifyMethodHandles)
263 verify_argslot(_masm, rbx_temp, "deleted argument(s) must fall within current frame"); 714 verify_stack_move(_masm, arg_slots, +1);
264 if (arg_slots.is_register()) {
265 Label L_ok, L_bad;
266 __ cmpptr(arg_slots.as_register(), (int32_t) NULL_WORD);
267 __ jccb(Assembler::less, L_bad);
268 __ testl(arg_slots.as_register(), -stack_move_unit() - 1);
269 __ jccb(Assembler::zero, L_ok);
270 __ bind(L_bad);
271 __ stop("assert arg_slots >= 0 and clear low bits");
272 __ bind(L_ok);
273 } else {
274 assert(arg_slots.as_constant() >= 0, "");
275 assert(arg_slots.as_constant() % -stack_move_unit() == 0, "");
276 }
277 #endif //ASSERT
278
279 #ifdef _LP64
280 if (false) { // not needed, since register is positive
281 // clean high bits of stack motion register (was loaded as an int)
282 if (arg_slots.is_register())
283 __ movslq(arg_slots.as_register(), arg_slots.as_register());
284 }
285 #endif
286 715
287 BLOCK_COMMENT("remove_arg_slots {"); 716 BLOCK_COMMENT("remove_arg_slots {");
288 // Pull up everything shallower than rax_argslot. 717 // Pull up everything shallower than rax_argslot.
289 // Then remove the excess space on the stack. 718 // Then remove the excess space on the stack.
290 // The stacked return address gets pulled up with everything else. 719 // The stacked return address gets pulled up with everything else.
297 { 726 {
298 Label loop; 727 Label loop;
299 __ BIND(loop); 728 __ BIND(loop);
300 // pull one word up each time through the loop 729 // pull one word up each time through the loop
301 __ movptr(rbx_temp, Address(rdx_temp, 0)); 730 __ movptr(rbx_temp, Address(rdx_temp, 0));
302 __ movptr(Address(rdx_temp, arg_slots, Address::times_ptr), rbx_temp); 731 __ movptr(Address(rdx_temp, arg_slots, Interpreter::stackElementScale()), rbx_temp);
303 __ addptr(rdx_temp, -wordSize); 732 __ addptr(rdx_temp, -wordSize);
304 __ cmpptr(rdx_temp, rsp); 733 __ cmpptr(rdx_temp, rsp);
305 __ jccb(Assembler::greaterEqual, loop); 734 __ jcc(Assembler::greaterEqual, loop);
306 } 735 }
307 736
308 // Now move the argslot up, to point to the just-copied block. 737 // Now move the argslot up, to point to the just-copied block.
309 __ lea(rsp, Address(rsp, arg_slots, Address::times_ptr)); 738 __ lea(rsp, Address(rsp, arg_slots, Interpreter::stackElementScale()));
310 // And adjust the argslot address to point at the deletion point. 739 // And adjust the argslot address to point at the deletion point.
311 __ lea(rax_argslot, Address(rax_argslot, arg_slots, Address::times_ptr)); 740 __ lea(rax_argslot, Address(rax_argslot, arg_slots, Interpreter::stackElementScale()));
312 BLOCK_COMMENT("} remove_arg_slots"); 741 BLOCK_COMMENT("} remove_arg_slots");
313 } 742 }
743
744 // Helper to copy argument slots to the top of the stack.
745 // The sequence starts with rax_argslot and is counted by slot_count
746 // slot_count must be a multiple of stack_move_unit() and >= 0
747 // This function blows the temps but does not change rax_argslot.
748 void MethodHandles::push_arg_slots(MacroAssembler* _masm,
749 Register rax_argslot,
750 RegisterOrConstant slot_count,
751 int skip_words_count,
752 Register rbx_temp, Register rdx_temp) {
753 assert_different_registers(rax_argslot, rbx_temp, rdx_temp,
754 (!slot_count.is_register() ? rbp : slot_count.as_register()),
755 rsp);
756 assert(Interpreter::stackElementSize == wordSize, "else change this code");
757
758 if (VerifyMethodHandles)
759 verify_stack_move(_masm, slot_count, 0);
760
761 // allow constant zero
762 if (slot_count.is_constant() && slot_count.as_constant() == 0)
763 return;
764
765 BLOCK_COMMENT("push_arg_slots {");
766
767 Register rbx_top = rbx_temp;
768
769 // There is at most 1 word to carry down with the TOS.
770 switch (skip_words_count) {
771 case 1: __ pop(rdx_temp); break;
772 case 0: break;
773 default: ShouldNotReachHere();
774 }
775
776 if (slot_count.is_constant()) {
777 for (int i = slot_count.as_constant() - 1; i >= 0; i--) {
778 __ pushptr(Address(rax_argslot, i * wordSize));
779 }
780 } else {
781 Label L_plural, L_loop, L_break;
782 // Emit code to dynamically check for the common cases, zero and one slot.
783 __ cmpl(slot_count.as_register(), (int32_t) 1);
784 __ jccb(Assembler::greater, L_plural);
785 __ jccb(Assembler::less, L_break);
786 __ pushptr(Address(rax_argslot, 0));
787 __ jmpb(L_break);
788 __ BIND(L_plural);
789
790 // Loop for 2 or more:
791 // rbx = &rax[slot_count]
792 // while (rbx > rax) *(--rsp) = *(--rbx)
793 __ lea(rbx_top, Address(rax_argslot, slot_count, Address::times_ptr));
794 __ BIND(L_loop);
795 __ subptr(rbx_top, wordSize);
796 __ pushptr(Address(rbx_top, 0));
797 __ cmpptr(rbx_top, rax_argslot);
798 __ jcc(Assembler::above, L_loop);
799 __ bind(L_break);
800 }
801 switch (skip_words_count) {
802 case 1: __ push(rdx_temp); break;
803 case 0: break;
804 default: ShouldNotReachHere();
805 }
806 BLOCK_COMMENT("} push_arg_slots");
807 }
808
809 // in-place movement; no change to rsp
810 // blows rax_temp, rdx_temp
811 void MethodHandles::move_arg_slots_up(MacroAssembler* _masm,
812 Register rbx_bottom, // invariant
813 Address top_addr, // can use rax_temp
814 RegisterOrConstant positive_distance_in_slots,
815 Register rax_temp, Register rdx_temp) {
816 BLOCK_COMMENT("move_arg_slots_up {");
817 assert_different_registers(rbx_bottom,
818 rax_temp, rdx_temp,
819 positive_distance_in_slots.register_or_noreg());
820 Label L_loop, L_break;
821 Register rax_top = rax_temp;
822 if (!top_addr.is_same_address(Address(rax_top, 0)))
823 __ lea(rax_top, top_addr);
824 // Detect empty (or broken) loop:
825 #ifdef ASSERT
826 if (VerifyMethodHandles) {
827 // Verify that &bottom < &top (non-empty interval)
828 Label L_ok, L_bad;
829 if (positive_distance_in_slots.is_register()) {
830 __ cmpptr(positive_distance_in_slots.as_register(), (int32_t) 0);
831 __ jcc(Assembler::lessEqual, L_bad);
832 }
833 __ cmpptr(rbx_bottom, rax_top);
834 __ jcc(Assembler::below, L_ok);
835 __ bind(L_bad);
836 __ stop("valid bounds (copy up)");
837 __ BIND(L_ok);
838 }
839 #endif
840 __ cmpptr(rbx_bottom, rax_top);
841 __ jccb(Assembler::aboveEqual, L_break);
842 // work rax down to rbx, copying contiguous data upwards
843 // In pseudo-code:
844 // [rbx, rax) = &[bottom, top)
845 // while (--rax >= rbx) *(rax + distance) = *(rax + 0), rax--;
846 __ BIND(L_loop);
847 __ subptr(rax_top, wordSize);
848 __ movptr(rdx_temp, Address(rax_top, 0));
849 __ movptr( Address(rax_top, positive_distance_in_slots, Address::times_ptr), rdx_temp);
850 __ cmpptr(rax_top, rbx_bottom);
851 __ jcc(Assembler::above, L_loop);
852 assert(Interpreter::stackElementSize == wordSize, "else change loop");
853 __ bind(L_break);
854 BLOCK_COMMENT("} move_arg_slots_up");
855 }
856
857 // in-place movement; no change to rsp
858 // blows rax_temp, rdx_temp
859 void MethodHandles::move_arg_slots_down(MacroAssembler* _masm,
860 Address bottom_addr, // can use rax_temp
861 Register rbx_top, // invariant
862 RegisterOrConstant negative_distance_in_slots,
863 Register rax_temp, Register rdx_temp) {
864 BLOCK_COMMENT("move_arg_slots_down {");
865 assert_different_registers(rbx_top,
866 negative_distance_in_slots.register_or_noreg(),
867 rax_temp, rdx_temp);
868 Label L_loop, L_break;
869 Register rax_bottom = rax_temp;
870 if (!bottom_addr.is_same_address(Address(rax_bottom, 0)))
871 __ lea(rax_bottom, bottom_addr);
872 // Detect empty (or broken) loop:
873 #ifdef ASSERT
874 assert(!negative_distance_in_slots.is_constant() || negative_distance_in_slots.as_constant() < 0, "");
875 if (VerifyMethodHandles) {
876 // Verify that &bottom < &top (non-empty interval)
877 Label L_ok, L_bad;
878 if (negative_distance_in_slots.is_register()) {
879 __ cmpptr(negative_distance_in_slots.as_register(), (int32_t) 0);
880 __ jcc(Assembler::greaterEqual, L_bad);
881 }
882 __ cmpptr(rax_bottom, rbx_top);
883 __ jcc(Assembler::below, L_ok);
884 __ bind(L_bad);
885 __ stop("valid bounds (copy down)");
886 __ BIND(L_ok);
887 }
888 #endif
889 __ cmpptr(rax_bottom, rbx_top);
890 __ jccb(Assembler::aboveEqual, L_break);
891 // work rax up to rbx, copying contiguous data downwards
892 // In pseudo-code:
893 // [rax, rbx) = &[bottom, top)
894 // while (rax < rbx) *(rax - distance) = *(rax + 0), rax++;
895 __ BIND(L_loop);
896 __ movptr(rdx_temp, Address(rax_bottom, 0));
897 __ movptr( Address(rax_bottom, negative_distance_in_slots, Address::times_ptr), rdx_temp);
898 __ addptr(rax_bottom, wordSize);
899 __ cmpptr(rax_bottom, rbx_top);
900 __ jcc(Assembler::below, L_loop);
901 assert(Interpreter::stackElementSize == wordSize, "else change loop");
902 __ bind(L_break);
903 BLOCK_COMMENT("} move_arg_slots_down");
904 }
905
906 // Copy from a field or array element to a stacked argument slot.
907 // is_element (ignored) says whether caller is loading an array element instead of an instance field.
908 void MethodHandles::move_typed_arg(MacroAssembler* _masm,
909 BasicType type, bool is_element,
910 Address slot_dest, Address value_src,
911 Register rbx_temp, Register rdx_temp) {
912 BLOCK_COMMENT(!is_element ? "move_typed_arg {" : "move_typed_arg { (array element)");
913 if (type == T_OBJECT || type == T_ARRAY) {
914 __ load_heap_oop(rbx_temp, value_src);
915 __ movptr(slot_dest, rbx_temp);
916 } else if (type != T_VOID) {
917 int arg_size = type2aelembytes(type);
918 bool arg_is_signed = is_signed_subword_type(type);
919 int slot_size = (arg_size > wordSize) ? arg_size : wordSize;
920 __ load_sized_value( rdx_temp, value_src, arg_size, arg_is_signed, rbx_temp);
921 __ store_sized_value( slot_dest, rdx_temp, slot_size, rbx_temp);
922 }
923 BLOCK_COMMENT("} move_typed_arg");
924 }
925
926 void MethodHandles::move_return_value(MacroAssembler* _masm, BasicType type,
927 Address return_slot) {
928 BLOCK_COMMENT("move_return_value {");
929 // Old versions of the JVM must clean the FPU stack after every return.
930 #ifndef _LP64
931 #ifdef COMPILER2
932 // The FPU stack is clean if UseSSE >= 2 but must be cleaned in other cases
933 if ((type == T_FLOAT && UseSSE < 1) || (type == T_DOUBLE && UseSSE < 2)) {
934 for (int i = 1; i < 8; i++) {
935 __ ffree(i);
936 }
937 } else if (UseSSE < 2) {
938 __ empty_FPU_stack();
939 }
940 #endif //COMPILER2
941 #endif //!_LP64
942
943 // Look at the type and pull the value out of the corresponding register.
944 if (type == T_VOID) {
945 // nothing to do
946 } else if (type == T_OBJECT) {
947 __ movptr(return_slot, rax);
948 } else if (type == T_INT || is_subword_type(type)) {
949 // write the whole word, even if only 32 bits is significant
950 __ movptr(return_slot, rax);
951 } else if (type == T_LONG) {
952 // store the value by parts
953 // Note: We assume longs are continguous (if misaligned) on the interpreter stack.
954 __ store_sized_value(return_slot, rax, BytesPerLong, rdx);
955 } else if (NOT_LP64((type == T_FLOAT && UseSSE < 1) ||
956 (type == T_DOUBLE && UseSSE < 2) ||)
957 false) {
958 // Use old x86 FPU registers:
959 if (type == T_FLOAT)
960 __ fstp_s(return_slot);
961 else
962 __ fstp_d(return_slot);
963 } else if (type == T_FLOAT) {
964 __ movflt(return_slot, xmm0);
965 } else if (type == T_DOUBLE) {
966 __ movdbl(return_slot, xmm0);
967 } else {
968 ShouldNotReachHere();
969 }
970 BLOCK_COMMENT("} move_return_value");
971 }
972
314 973
315 #ifndef PRODUCT 974 #ifndef PRODUCT
316 extern "C" void print_method_handle(oop mh); 975 extern "C" void print_method_handle(oop mh);
317 void trace_method_handle_stub(const char* adaptername, 976 void trace_method_handle_stub(const char* adaptername,
977 oop mh,
978 intptr_t* saved_regs,
979 intptr_t* entry_sp,
318 intptr_t* saved_sp, 980 intptr_t* saved_sp,
319 oop mh, 981 intptr_t* saved_bp) {
320 intptr_t* sp) {
321 // called as a leaf from native code: do not block the JVM! 982 // called as a leaf from native code: do not block the JVM!
322 intptr_t* entry_sp = sp + LP64_ONLY(16) NOT_LP64(8); 983 intptr_t* last_sp = (intptr_t*) saved_bp[frame::interpreter_frame_last_sp_offset];
323 tty->print_cr("MH %s mh="INTPTR_FORMAT" sp="INTPTR_FORMAT" saved_sp="INTPTR_FORMAT")", 984 intptr_t* base_sp = (intptr_t*) saved_bp[frame::interpreter_frame_monitor_block_top_offset];
324 adaptername, (intptr_t)mh, (intptr_t)entry_sp, saved_sp); 985 tty->print_cr("MH %s mh="INTPTR_FORMAT" sp=("INTPTR_FORMAT"+"INTX_FORMAT") stack_size="INTX_FORMAT" bp="INTPTR_FORMAT,
986 adaptername, (intptr_t)mh, (intptr_t)entry_sp, (intptr_t)(saved_sp - entry_sp), (intptr_t)(base_sp - last_sp), (intptr_t)saved_bp);
987 if (last_sp != saved_sp && last_sp != NULL)
988 tty->print_cr("*** last_sp="INTPTR_FORMAT, (intptr_t)last_sp);
325 if (Verbose) { 989 if (Verbose) {
990 tty->print(" reg dump: ");
991 int saved_regs_count = (entry_sp-1) - saved_regs;
992 // 32 bit: rdi rsi rbp rsp; rbx rdx rcx (*) rax
993 int i;
994 for (i = 0; i <= saved_regs_count; i++) {
995 if (i > 0 && i % 4 == 0 && i != saved_regs_count) {
996 tty->cr();
997 tty->print(" + dump: ");
998 }
999 tty->print(" %d: "INTPTR_FORMAT, i, saved_regs[i]);
1000 }
1001 tty->cr();
1002 int stack_dump_count = 16;
1003 if (stack_dump_count < (int)(saved_bp + 2 - saved_sp))
1004 stack_dump_count = (int)(saved_bp + 2 - saved_sp);
1005 if (stack_dump_count > 64) stack_dump_count = 48;
1006 for (i = 0; i < stack_dump_count; i += 4) {
1007 tty->print_cr(" dump at SP[%d] "INTPTR_FORMAT": "INTPTR_FORMAT" "INTPTR_FORMAT" "INTPTR_FORMAT" "INTPTR_FORMAT,
1008 i, (intptr_t) &entry_sp[i+0], entry_sp[i+0], entry_sp[i+1], entry_sp[i+2], entry_sp[i+3]);
1009 }
326 print_method_handle(mh); 1010 print_method_handle(mh);
327 } 1011 }
328 } 1012 }
1013
1014 // The stub wraps the arguments in a struct on the stack to avoid
1015 // dealing with the different calling conventions for passing 6
1016 // arguments.
1017 struct MethodHandleStubArguments {
1018 const char* adaptername;
1019 oopDesc* mh;
1020 intptr_t* saved_regs;
1021 intptr_t* entry_sp;
1022 intptr_t* saved_sp;
1023 intptr_t* saved_bp;
1024 };
1025 void trace_method_handle_stub_wrapper(MethodHandleStubArguments* args) {
1026 trace_method_handle_stub(args->adaptername,
1027 args->mh,
1028 args->saved_regs,
1029 args->entry_sp,
1030 args->saved_sp,
1031 args->saved_bp);
1032 }
1033
329 void MethodHandles::trace_method_handle(MacroAssembler* _masm, const char* adaptername) { 1034 void MethodHandles::trace_method_handle(MacroAssembler* _masm, const char* adaptername) {
330 if (!TraceMethodHandles) return; 1035 if (!TraceMethodHandles) return;
331 BLOCK_COMMENT("trace_method_handle {"); 1036 BLOCK_COMMENT("trace_method_handle {");
1037 __ push(rax);
1038 __ lea(rax, Address(rsp, wordSize * NOT_LP64(6) LP64_ONLY(14))); // entry_sp __ pusha();
332 __ pusha(); 1039 __ pusha();
333 #ifdef _LP64 1040 __ mov(rbx, rsp);
334 // Pass arguments carefully since the registers overlap with the calling convention. 1041 __ enter();
1042 // incoming state:
335 // rcx: method handle 1043 // rcx: method handle
336 // r13: saved sp 1044 // r13 or rsi: saved sp
337 __ mov(c_rarg2, rcx); // mh 1045 // To avoid calling convention issues, build a record on the stack and pass the pointer to that instead.
338 __ mov(c_rarg1, r13); // saved sp 1046 __ push(rbp); // saved_bp
339 __ mov(c_rarg3, rsp); // sp 1047 __ push(rsi); // saved_sp
340 __ movptr(c_rarg0, (intptr_t) adaptername); 1048 __ push(rax); // entry_sp
341 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, trace_method_handle_stub), c_rarg0, c_rarg1, c_rarg2, c_rarg3); 1049 __ push(rbx); // pusha saved_regs
342 #else 1050 __ push(rcx); // mh
343 // arguments: 1051 __ push(rcx); // adaptername
344 // rcx: method handle 1052 __ movptr(Address(rsp, 0), (intptr_t) adaptername);
345 // rsi: saved sp 1053 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, trace_method_handle_stub_wrapper), rsp);
346 __ movptr(rbx, (intptr_t) adaptername); 1054 __ leave();
347 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, trace_method_handle_stub), rbx, rsi, rcx, rsp);
348 #endif
349 __ popa(); 1055 __ popa();
1056 __ pop(rax);
350 BLOCK_COMMENT("} trace_method_handle"); 1057 BLOCK_COMMENT("} trace_method_handle");
351 } 1058 }
352 #endif //PRODUCT 1059 #endif //PRODUCT
353 1060
354 // which conversion op types are implemented here? 1061 // which conversion op types are implemented here?
356 return ((1<<java_lang_invoke_AdapterMethodHandle::OP_RETYPE_ONLY) 1063 return ((1<<java_lang_invoke_AdapterMethodHandle::OP_RETYPE_ONLY)
357 |(1<<java_lang_invoke_AdapterMethodHandle::OP_RETYPE_RAW) 1064 |(1<<java_lang_invoke_AdapterMethodHandle::OP_RETYPE_RAW)
358 |(1<<java_lang_invoke_AdapterMethodHandle::OP_CHECK_CAST) 1065 |(1<<java_lang_invoke_AdapterMethodHandle::OP_CHECK_CAST)
359 |(1<<java_lang_invoke_AdapterMethodHandle::OP_PRIM_TO_PRIM) 1066 |(1<<java_lang_invoke_AdapterMethodHandle::OP_PRIM_TO_PRIM)
360 |(1<<java_lang_invoke_AdapterMethodHandle::OP_REF_TO_PRIM) 1067 |(1<<java_lang_invoke_AdapterMethodHandle::OP_REF_TO_PRIM)
1068 //OP_PRIM_TO_REF is below...
361 |(1<<java_lang_invoke_AdapterMethodHandle::OP_SWAP_ARGS) 1069 |(1<<java_lang_invoke_AdapterMethodHandle::OP_SWAP_ARGS)
362 |(1<<java_lang_invoke_AdapterMethodHandle::OP_ROT_ARGS) 1070 |(1<<java_lang_invoke_AdapterMethodHandle::OP_ROT_ARGS)
363 |(1<<java_lang_invoke_AdapterMethodHandle::OP_DUP_ARGS) 1071 |(1<<java_lang_invoke_AdapterMethodHandle::OP_DUP_ARGS)
364 |(1<<java_lang_invoke_AdapterMethodHandle::OP_DROP_ARGS) 1072 |(1<<java_lang_invoke_AdapterMethodHandle::OP_DROP_ARGS)
365 //|(1<<java_lang_invoke_AdapterMethodHandle::OP_SPREAD_ARGS) //BUG! 1073 //OP_COLLECT_ARGS is below...
1074 |(1<<java_lang_invoke_AdapterMethodHandle::OP_SPREAD_ARGS)
1075 |(!UseRicochetFrames ? 0 :
1076 LP64_ONLY(FLAG_IS_DEFAULT(UseRicochetFrames) ? 0 :)
1077 java_lang_invoke_MethodTypeForm::vmlayout_offset_in_bytes() <= 0 ? 0 :
1078 ((1<<java_lang_invoke_AdapterMethodHandle::OP_PRIM_TO_REF)
1079 |(1<<java_lang_invoke_AdapterMethodHandle::OP_COLLECT_ARGS)
1080 |(1<<java_lang_invoke_AdapterMethodHandle::OP_FOLD_ARGS)
1081 ))
366 ); 1082 );
367 // FIXME: MethodHandlesTest gets a crash if we enable OP_SPREAD_ARGS.
368 } 1083 }
369 1084
370 //------------------------------------------------------------------------------ 1085 //------------------------------------------------------------------------------
371 // MethodHandles::generate_method_handle_stub 1086 // MethodHandles::generate_method_handle_stub
372 // 1087 //
373 // Generate an "entry" field for a method handle. 1088 // Generate an "entry" field for a method handle.
374 // This determines how the method handle will respond to calls. 1089 // This determines how the method handle will respond to calls.
375 void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHandles::EntryKind ek) { 1090 void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHandles::EntryKind ek) {
1091 MethodHandles::EntryKind ek_orig = ek_original_kind(ek);
1092
376 // Here is the register state during an interpreted call, 1093 // Here is the register state during an interpreted call,
377 // as set up by generate_method_handle_interpreter_entry(): 1094 // as set up by generate_method_handle_interpreter_entry():
378 // - rbx: garbage temp (was MethodHandle.invoke methodOop, unused) 1095 // - rbx: garbage temp (was MethodHandle.invoke methodOop, unused)
379 // - rcx: receiver method handle 1096 // - rcx: receiver method handle
380 // - rax: method handle type (only used by the check_mtype entry point) 1097 // - rax: method handle type (only used by the check_mtype entry point)
383 1100
384 const Register rcx_recv = rcx; 1101 const Register rcx_recv = rcx;
385 const Register rax_argslot = rax; 1102 const Register rax_argslot = rax;
386 const Register rbx_temp = rbx; 1103 const Register rbx_temp = rbx;
387 const Register rdx_temp = rdx; 1104 const Register rdx_temp = rdx;
1105 const Register rdi_temp = rdi;
388 1106
389 // This guy is set up by prepare_to_jump_from_interpreted (from interpreted calls) 1107 // This guy is set up by prepare_to_jump_from_interpreted (from interpreted calls)
390 // and gen_c2i_adapter (from compiled calls): 1108 // and gen_c2i_adapter (from compiled calls):
391 const Register saved_last_sp = LP64_ONLY(r13) NOT_LP64(rsi); 1109 const Register saved_last_sp = saved_last_sp_register();
392 1110
393 // Argument registers for _raise_exception. 1111 // Argument registers for _raise_exception.
394 // 32-bit: Pass first two oop/int args in registers ECX and EDX. 1112 // 32-bit: Pass first two oop/int args in registers ECX and EDX.
395 const Register rarg0_code = LP64_ONLY(j_rarg0) NOT_LP64(rcx); 1113 const Register rarg0_code = LP64_ONLY(j_rarg0) NOT_LP64(rcx);
396 const Register rarg1_actual = LP64_ONLY(j_rarg1) NOT_LP64(rdx); 1114 const Register rarg1_actual = LP64_ONLY(j_rarg1) NOT_LP64(rdx);
418 1136
419 if (have_entry(ek)) { 1137 if (have_entry(ek)) {
420 __ nop(); // empty stubs make SG sick 1138 __ nop(); // empty stubs make SG sick
421 return; 1139 return;
422 } 1140 }
1141
1142 #ifdef ASSERT
1143 __ push((int32_t) 0xEEEEEEEE);
1144 __ push((int32_t) (intptr_t) entry_name(ek));
1145 LP64_ONLY(__ push((int32_t) high((intptr_t) entry_name(ek))));
1146 __ push((int32_t) 0x33333333);
1147 #endif //ASSERT
423 1148
424 address interp_entry = __ pc(); 1149 address interp_entry = __ pc();
425 1150
426 trace_method_handle(_masm, entry_name(ek)); 1151 trace_method_handle(_masm, entry_name(ek));
427 1152
534 // get receiver klass 1259 // get receiver klass
535 Register rax_klass = rax_argslot; 1260 Register rax_klass = rax_argslot;
536 __ load_klass(rax_klass, rcx_recv); 1261 __ load_klass(rax_klass, rcx_recv);
537 __ verify_oop(rax_klass); 1262 __ verify_oop(rax_klass);
538 1263
539 Register rdi_temp = rdi;
540 Register rbx_method = rbx_index; 1264 Register rbx_method = rbx_index;
541 1265
542 // get interface klass 1266 // get interface klass
543 Label no_such_interface; 1267 Label no_such_interface;
544 __ verify_oop(rdx_intf); 1268 __ verify_oop(rdx_intf);
570 case _bound_ref_direct_mh: 1294 case _bound_ref_direct_mh:
571 case _bound_int_direct_mh: 1295 case _bound_int_direct_mh:
572 case _bound_long_direct_mh: 1296 case _bound_long_direct_mh:
573 { 1297 {
574 bool direct_to_method = (ek >= _bound_ref_direct_mh); 1298 bool direct_to_method = (ek >= _bound_ref_direct_mh);
575 BasicType arg_type = T_ILLEGAL; 1299 BasicType arg_type = ek_bound_mh_arg_type(ek);
576 int arg_mask = _INSERT_NO_MASK; 1300 int arg_slots = type2size[arg_type];
577 int arg_slots = -1;
578 get_ek_bound_mh_info(ek, arg_type, arg_mask, arg_slots);
579 1301
580 // make room for the new argument: 1302 // make room for the new argument:
581 __ movl(rax_argslot, rcx_bmh_vmargslot); 1303 __ movl(rax_argslot, rcx_bmh_vmargslot);
582 __ lea(rax_argslot, __ argument_address(rax_argslot)); 1304 __ lea(rax_argslot, __ argument_address(rax_argslot));
583 1305
584 insert_arg_slots(_masm, arg_slots * stack_move_unit(), arg_mask, rax_argslot, rbx_temp, rdx_temp); 1306 insert_arg_slots(_masm, arg_slots * stack_move_unit(), rax_argslot, rbx_temp, rdx_temp);
585 1307
586 // store bound argument into the new stack slot: 1308 // store bound argument into the new stack slot:
587 __ load_heap_oop(rbx_temp, rcx_bmh_argument); 1309 __ load_heap_oop(rbx_temp, rcx_bmh_argument);
588 if (arg_type == T_OBJECT) { 1310 if (arg_type == T_OBJECT) {
589 __ movptr(Address(rax_argslot, 0), rbx_temp); 1311 __ movptr(Address(rax_argslot, 0), rbx_temp);
590 } else { 1312 } else {
591 Address prim_value_addr(rbx_temp, java_lang_boxing_object::value_offset_in_bytes(arg_type)); 1313 Address prim_value_addr(rbx_temp, java_lang_boxing_object::value_offset_in_bytes(arg_type));
592 const int arg_size = type2aelembytes(arg_type); 1314 move_typed_arg(_masm, arg_type, false,
593 __ load_sized_value(rdx_temp, prim_value_addr, arg_size, is_signed_subword_type(arg_type), rbx_temp); 1315 Address(rax_argslot, 0),
594 __ store_sized_value(Address(rax_argslot, 0), rdx_temp, arg_size, rbx_temp); 1316 prim_value_addr,
1317 rbx_temp, rdx_temp);
595 } 1318 }
596 1319
597 if (direct_to_method) { 1320 if (direct_to_method) {
598 Register rbx_method = rbx_temp; 1321 Register rbx_method = rbx_temp;
599 __ load_heap_oop(rbx_method, rcx_mh_vmtarget); 1322 __ load_heap_oop(rbx_method, rcx_mh_vmtarget);
626 __ movl(rax_argslot, rcx_amh_vmargslot); 1349 __ movl(rax_argslot, rcx_amh_vmargslot);
627 vmarg = __ argument_address(rax_argslot); 1350 vmarg = __ argument_address(rax_argslot);
628 1351
629 // What class are we casting to? 1352 // What class are we casting to?
630 __ load_heap_oop(rbx_klass, rcx_amh_argument); // this is a Class object! 1353 __ load_heap_oop(rbx_klass, rcx_amh_argument); // this is a Class object!
631 __ load_heap_oop(rbx_klass, Address(rbx_klass, java_lang_Class::klass_offset_in_bytes())); 1354 load_klass_from_Class(_masm, rbx_klass);
632 1355
633 Label done; 1356 Label done;
634 __ movptr(rdx_temp, vmarg); 1357 __ movptr(rdx_temp, vmarg);
635 __ testptr(rdx_temp, rdx_temp); 1358 __ testptr(rdx_temp, rdx_temp);
636 __ jcc(Assembler::zero, done); // no cast if null 1359 __ jcc(Assembler::zero, done); // no cast if null
661 } 1384 }
662 break; 1385 break;
663 1386
664 case _adapter_prim_to_prim: 1387 case _adapter_prim_to_prim:
665 case _adapter_ref_to_prim: 1388 case _adapter_ref_to_prim:
1389 case _adapter_prim_to_ref:
666 // handled completely by optimized cases 1390 // handled completely by optimized cases
667 __ stop("init_AdapterMethodHandle should not issue this"); 1391 __ stop("init_AdapterMethodHandle should not issue this");
668 break; 1392 break;
669 1393
670 case _adapter_opt_i2i: // optimized subcase of adapt_prim_to_prim 1394 case _adapter_opt_i2i: // optimized subcase of adapt_prim_to_prim
712 ShouldNotReachHere(); 1436 ShouldNotReachHere();
713 } 1437 }
714 1438
715 // Do the requested conversion and store the value. 1439 // Do the requested conversion and store the value.
716 Register rbx_vminfo = rbx_temp; 1440 Register rbx_vminfo = rbx_temp;
717 __ movl(rbx_vminfo, rcx_amh_conversion); 1441 load_conversion_vminfo(_masm, rbx_vminfo, rcx_amh_conversion);
718 assert(CONV_VMINFO_SHIFT == 0, "preshifted");
719 1442
720 // get the new MH: 1443 // get the new MH:
721 __ load_heap_oop(rcx_recv, rcx_mh_vmtarget); 1444 __ load_heap_oop(rcx_recv, rcx_mh_vmtarget);
722 // (now we are done with the old MH) 1445 // (now we are done with the old MH)
723 1446
751 // perform an in-place int-to-long or ref-to-long conversion 1474 // perform an in-place int-to-long or ref-to-long conversion
752 __ movl(rax_argslot, rcx_amh_vmargslot); 1475 __ movl(rax_argslot, rcx_amh_vmargslot);
753 1476
754 // on a little-endian machine we keep the first slot and add another after 1477 // on a little-endian machine we keep the first slot and add another after
755 __ lea(rax_argslot, __ argument_address(rax_argslot, 1)); 1478 __ lea(rax_argslot, __ argument_address(rax_argslot, 1));
756 insert_arg_slots(_masm, stack_move_unit(), _INSERT_INT_MASK, 1479 insert_arg_slots(_masm, stack_move_unit(),
757 rax_argslot, rbx_temp, rdx_temp); 1480 rax_argslot, rbx_temp, rdx_temp);
758 Address vmarg1(rax_argslot, -Interpreter::stackElementSize); 1481 Address vmarg1(rax_argslot, -Interpreter::stackElementSize);
759 Address vmarg2 = vmarg1.plus_disp(Interpreter::stackElementSize); 1482 Address vmarg2 = vmarg1.plus_disp(Interpreter::stackElementSize);
760 1483
761 switch (ek) { 1484 switch (ek) {
803 { 1526 {
804 // perform an in-place floating primitive conversion 1527 // perform an in-place floating primitive conversion
805 __ movl(rax_argslot, rcx_amh_vmargslot); 1528 __ movl(rax_argslot, rcx_amh_vmargslot);
806 __ lea(rax_argslot, __ argument_address(rax_argslot, 1)); 1529 __ lea(rax_argslot, __ argument_address(rax_argslot, 1));
807 if (ek == _adapter_opt_f2d) { 1530 if (ek == _adapter_opt_f2d) {
808 insert_arg_slots(_masm, stack_move_unit(), _INSERT_INT_MASK, 1531 insert_arg_slots(_masm, stack_move_unit(),
809 rax_argslot, rbx_temp, rdx_temp); 1532 rax_argslot, rbx_temp, rdx_temp);
810 } 1533 }
811 Address vmarg(rax_argslot, -Interpreter::stackElementSize); 1534 Address vmarg(rax_argslot, -Interpreter::stackElementSize);
812 1535
813 #ifdef _LP64 1536 #ifdef _LP64
838 __ load_heap_oop(rcx_recv, rcx_mh_vmtarget); 1561 __ load_heap_oop(rcx_recv, rcx_mh_vmtarget);
839 __ jump_to_method_handle_entry(rcx_recv, rdx_temp); 1562 __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
840 } 1563 }
841 break; 1564 break;
842 1565
843 case _adapter_prim_to_ref:
844 __ unimplemented(entry_name(ek)); // %%% FIXME: NYI
845 break;
846
847 case _adapter_swap_args: 1566 case _adapter_swap_args:
848 case _adapter_rot_args: 1567 case _adapter_rot_args:
849 // handled completely by optimized cases 1568 // handled completely by optimized cases
850 __ stop("init_AdapterMethodHandle should not issue this"); 1569 __ stop("init_AdapterMethodHandle should not issue this");
851 break; 1570 break;
855 case _adapter_opt_rot_1_up: 1574 case _adapter_opt_rot_1_up:
856 case _adapter_opt_rot_1_down: 1575 case _adapter_opt_rot_1_down:
857 case _adapter_opt_rot_2_up: 1576 case _adapter_opt_rot_2_up:
858 case _adapter_opt_rot_2_down: 1577 case _adapter_opt_rot_2_down:
859 { 1578 {
860 int swap_bytes = 0, rotate = 0; 1579 int swap_slots = ek_adapter_opt_swap_slots(ek);
861 get_ek_adapter_opt_swap_rot_info(ek, swap_bytes, rotate); 1580 int rotate = ek_adapter_opt_swap_mode(ek);
862 1581
863 // 'argslot' is the position of the first argument to swap 1582 // 'argslot' is the position of the first argument to swap
864 __ movl(rax_argslot, rcx_amh_vmargslot); 1583 __ movl(rax_argslot, rcx_amh_vmargslot);
865 __ lea(rax_argslot, __ argument_address(rax_argslot)); 1584 __ lea(rax_argslot, __ argument_address(rax_argslot));
866 1585
867 // 'vminfo' is the second 1586 // 'vminfo' is the second
868 Register rbx_destslot = rbx_temp; 1587 Register rbx_destslot = rbx_temp;
869 __ movl(rbx_destslot, rcx_amh_conversion); 1588 load_conversion_vminfo(_masm, rbx_destslot, rcx_amh_conversion);
870 assert(CONV_VMINFO_SHIFT == 0, "preshifted");
871 __ andl(rbx_destslot, CONV_VMINFO_MASK);
872 __ lea(rbx_destslot, __ argument_address(rbx_destslot)); 1589 __ lea(rbx_destslot, __ argument_address(rbx_destslot));
873 DEBUG_ONLY(verify_argslot(_masm, rbx_destslot, "swap point must fall within current frame")); 1590 if (VerifyMethodHandles)
874 1591 verify_argslot(_masm, rbx_destslot, "swap point must fall within current frame");
1592
1593 assert(Interpreter::stackElementSize == wordSize, "else rethink use of wordSize here");
875 if (!rotate) { 1594 if (!rotate) {
876 for (int i = 0; i < swap_bytes; i += wordSize) { 1595 // simple swap
877 __ movptr(rdx_temp, Address(rax_argslot , i)); 1596 for (int i = 0; i < swap_slots; i++) {
878 __ push(rdx_temp); 1597 __ movptr(rdi_temp, Address(rax_argslot, i * wordSize));
879 __ movptr(rdx_temp, Address(rbx_destslot, i)); 1598 __ movptr(rdx_temp, Address(rbx_destslot, i * wordSize));
880 __ movptr(Address(rax_argslot, i), rdx_temp); 1599 __ movptr(Address(rax_argslot, i * wordSize), rdx_temp);
881 __ pop(rdx_temp); 1600 __ movptr(Address(rbx_destslot, i * wordSize), rdi_temp);
882 __ movptr(Address(rbx_destslot, i), rdx_temp);
883 } 1601 }
884 } else { 1602 } else {
885 // push the first chunk, which is going to get overwritten 1603 // A rotate is actually pair of moves, with an "odd slot" (or pair)
886 for (int i = swap_bytes; (i -= wordSize) >= 0; ) { 1604 // changing place with a series of other slots.
887 __ movptr(rdx_temp, Address(rax_argslot, i)); 1605 // First, push the "odd slot", which is going to get overwritten
888 __ push(rdx_temp); 1606 for (int i = swap_slots - 1; i >= 0; i--) {
1607 // handle one with rdi_temp instead of a push:
1608 if (i == 0) __ movptr(rdi_temp, Address(rax_argslot, i * wordSize));
1609 else __ pushptr( Address(rax_argslot, i * wordSize));
889 } 1610 }
890
891 if (rotate > 0) { 1611 if (rotate > 0) {
892 // rotate upward 1612 // Here is rotate > 0:
893 __ subptr(rax_argslot, swap_bytes); 1613 // (low mem) (high mem)
894 #ifdef ASSERT 1614 // | dest: more_slots... | arg: odd_slot :arg+1 |
895 { 1615 // =>
896 // Verify that argslot > destslot, by at least swap_bytes. 1616 // | dest: odd_slot | dest+1: more_slots... :arg+1 |
897 Label L_ok;
898 __ cmpptr(rax_argslot, rbx_destslot);
899 __ jccb(Assembler::aboveEqual, L_ok);
900 __ stop("source must be above destination (upward rotation)");
901 __ bind(L_ok);
902 }
903 #endif
904 // work argslot down to destslot, copying contiguous data upwards 1617 // work argslot down to destslot, copying contiguous data upwards
905 // pseudo-code: 1618 // pseudo-code:
906 // rax = src_addr - swap_bytes 1619 // rax = src_addr - swap_bytes
907 // rbx = dest_addr 1620 // rbx = dest_addr
908 // while (rax >= rbx) *(rax + swap_bytes) = *(rax + 0), rax--; 1621 // while (rax >= rbx) *(rax + swap_bytes) = *(rax + 0), rax--;
909 Label loop; 1622 move_arg_slots_up(_masm,
910 __ bind(loop); 1623 rbx_destslot,
911 __ movptr(rdx_temp, Address(rax_argslot, 0)); 1624 Address(rax_argslot, 0),
912 __ movptr(Address(rax_argslot, swap_bytes), rdx_temp); 1625 swap_slots,
913 __ addptr(rax_argslot, -wordSize); 1626 rax_argslot, rdx_temp);
914 __ cmpptr(rax_argslot, rbx_destslot);
915 __ jccb(Assembler::aboveEqual, loop);
916 } else { 1627 } else {
917 __ addptr(rax_argslot, swap_bytes); 1628 // Here is the other direction, rotate < 0:
918 #ifdef ASSERT 1629 // (low mem) (high mem)
919 { 1630 // | arg: odd_slot | arg+1: more_slots... :dest+1 |
920 // Verify that argslot < destslot, by at least swap_bytes. 1631 // =>
921 Label L_ok; 1632 // | arg: more_slots... | dest: odd_slot :dest+1 |
922 __ cmpptr(rax_argslot, rbx_destslot);
923 __ jccb(Assembler::belowEqual, L_ok);
924 __ stop("source must be below destination (downward rotation)");
925 __ bind(L_ok);
926 }
927 #endif
928 // work argslot up to destslot, copying contiguous data downwards 1633 // work argslot up to destslot, copying contiguous data downwards
929 // pseudo-code: 1634 // pseudo-code:
930 // rax = src_addr + swap_bytes 1635 // rax = src_addr + swap_bytes
931 // rbx = dest_addr 1636 // rbx = dest_addr
932 // while (rax <= rbx) *(rax - swap_bytes) = *(rax + 0), rax++; 1637 // while (rax <= rbx) *(rax - swap_bytes) = *(rax + 0), rax++;
933 Label loop; 1638 __ addptr(rbx_destslot, wordSize);
934 __ bind(loop); 1639 move_arg_slots_down(_masm,
935 __ movptr(rdx_temp, Address(rax_argslot, 0)); 1640 Address(rax_argslot, swap_slots * wordSize),
936 __ movptr(Address(rax_argslot, -swap_bytes), rdx_temp); 1641 rbx_destslot,
937 __ addptr(rax_argslot, wordSize); 1642 -swap_slots,
938 __ cmpptr(rax_argslot, rbx_destslot); 1643 rax_argslot, rdx_temp);
939 __ jccb(Assembler::belowEqual, loop); 1644
1645 __ subptr(rbx_destslot, wordSize);
940 } 1646 }
941
942 // pop the original first chunk into the destination slot, now free 1647 // pop the original first chunk into the destination slot, now free
943 for (int i = 0; i < swap_bytes; i += wordSize) { 1648 for (int i = 0; i < swap_slots; i++) {
944 __ pop(rdx_temp); 1649 if (i == 0) __ movptr(Address(rbx_destslot, i * wordSize), rdi_temp);
945 __ movptr(Address(rbx_destslot, i), rdx_temp); 1650 else __ popptr(Address(rbx_destslot, i * wordSize));
946 } 1651 }
947 } 1652 }
948 1653
949 __ load_heap_oop(rcx_recv, rcx_mh_vmtarget); 1654 __ load_heap_oop(rcx_recv, rcx_mh_vmtarget);
950 __ jump_to_method_handle_entry(rcx_recv, rdx_temp); 1655 __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
956 // 'argslot' is the position of the first argument to duplicate 1661 // 'argslot' is the position of the first argument to duplicate
957 __ movl(rax_argslot, rcx_amh_vmargslot); 1662 __ movl(rax_argslot, rcx_amh_vmargslot);
958 __ lea(rax_argslot, __ argument_address(rax_argslot)); 1663 __ lea(rax_argslot, __ argument_address(rax_argslot));
959 1664
960 // 'stack_move' is negative number of words to duplicate 1665 // 'stack_move' is negative number of words to duplicate
961 Register rdx_stack_move = rdx_temp; 1666 Register rdi_stack_move = rdi_temp;
962 __ movl2ptr(rdx_stack_move, rcx_amh_conversion); 1667 load_stack_move(_masm, rdi_stack_move, rcx_recv, true);
963 __ sarptr(rdx_stack_move, CONV_STACK_MOVE_SHIFT); 1668
964 1669 if (VerifyMethodHandles) {
965 int argslot0_num = 0; 1670 verify_argslots(_masm, rdi_stack_move, rax_argslot, true,
966 Address argslot0 = __ argument_address(RegisterOrConstant(argslot0_num)); 1671 "copied argument(s) must fall within current frame");
967 assert(argslot0.base() == rsp, ""); 1672 }
968 int pre_arg_size = argslot0.disp(); 1673
969 assert(pre_arg_size % wordSize == 0, ""); 1674 // insert location is always the bottom of the argument list:
970 assert(pre_arg_size > 0, "must include PC"); 1675 Address insert_location = __ argument_address(constant(0));
971 1676 int pre_arg_words = insert_location.disp() / wordSize; // return PC is pushed
972 // remember the old rsp+1 (argslot[0]) 1677 assert(insert_location.base() == rsp, "");
973 Register rbx_oldarg = rbx_temp; 1678
974 __ lea(rbx_oldarg, argslot0); 1679 __ negl(rdi_stack_move);
975 1680 push_arg_slots(_masm, rax_argslot, rdi_stack_move,
976 // move rsp down to make room for dups 1681 pre_arg_words, rbx_temp, rdx_temp);
977 __ lea(rsp, Address(rsp, rdx_stack_move, Address::times_ptr));
978
979 // compute the new rsp+1 (argslot[0])
980 Register rdx_newarg = rdx_temp;
981 __ lea(rdx_newarg, argslot0);
982
983 __ push(rdi); // need a temp
984 // (preceding push must be done after arg addresses are taken!)
985
986 // pull down the pre_arg_size data (PC)
987 for (int i = -pre_arg_size; i < 0; i += wordSize) {
988 __ movptr(rdi, Address(rbx_oldarg, i));
989 __ movptr(Address(rdx_newarg, i), rdi);
990 }
991
992 // copy from rax_argslot[0...] down to new_rsp[1...]
993 // pseudo-code:
994 // rbx = old_rsp+1
995 // rdx = new_rsp+1
996 // rax = argslot
997 // while (rdx < rbx) *rdx++ = *rax++
998 Label loop;
999 __ bind(loop);
1000 __ movptr(rdi, Address(rax_argslot, 0));
1001 __ movptr(Address(rdx_newarg, 0), rdi);
1002 __ addptr(rax_argslot, wordSize);
1003 __ addptr(rdx_newarg, wordSize);
1004 __ cmpptr(rdx_newarg, rbx_oldarg);
1005 __ jccb(Assembler::less, loop);
1006
1007 __ pop(rdi); // restore temp
1008 1682
1009 __ load_heap_oop(rcx_recv, rcx_mh_vmtarget); 1683 __ load_heap_oop(rcx_recv, rcx_mh_vmtarget);
1010 __ jump_to_method_handle_entry(rcx_recv, rdx_temp); 1684 __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
1011 } 1685 }
1012 break; 1686 break;
1015 { 1689 {
1016 // 'argslot' is the position of the first argument to nuke 1690 // 'argslot' is the position of the first argument to nuke
1017 __ movl(rax_argslot, rcx_amh_vmargslot); 1691 __ movl(rax_argslot, rcx_amh_vmargslot);
1018 __ lea(rax_argslot, __ argument_address(rax_argslot)); 1692 __ lea(rax_argslot, __ argument_address(rax_argslot));
1019 1693
1020 __ push(rdi); // need a temp
1021 // (must do previous push after argslot address is taken) 1694 // (must do previous push after argslot address is taken)
1022 1695
1023 // 'stack_move' is number of words to drop 1696 // 'stack_move' is number of words to drop
1024 Register rdi_stack_move = rdi; 1697 Register rdi_stack_move = rdi_temp;
1025 __ movl2ptr(rdi_stack_move, rcx_amh_conversion); 1698 load_stack_move(_masm, rdi_stack_move, rcx_recv, false);
1026 __ sarptr(rdi_stack_move, CONV_STACK_MOVE_SHIFT);
1027 remove_arg_slots(_masm, rdi_stack_move, 1699 remove_arg_slots(_masm, rdi_stack_move,
1028 rax_argslot, rbx_temp, rdx_temp); 1700 rax_argslot, rbx_temp, rdx_temp);
1029 1701
1030 __ pop(rdi); // restore temp
1031
1032 __ load_heap_oop(rcx_recv, rcx_mh_vmtarget); 1702 __ load_heap_oop(rcx_recv, rcx_mh_vmtarget);
1033 __ jump_to_method_handle_entry(rcx_recv, rdx_temp); 1703 __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
1034 } 1704 }
1035 break; 1705 break;
1036 1706
1037 case _adapter_collect_args: 1707 case _adapter_collect_args:
1038 __ unimplemented(entry_name(ek)); // %%% FIXME: NYI 1708 case _adapter_fold_args:
1039 break;
1040
1041 case _adapter_spread_args: 1709 case _adapter_spread_args:
1042 // handled completely by optimized cases 1710 // handled completely by optimized cases
1043 __ stop("init_AdapterMethodHandle should not issue this"); 1711 __ stop("init_AdapterMethodHandle should not issue this");
1044 break; 1712 break;
1045 1713
1714 case _adapter_opt_collect_ref:
1715 case _adapter_opt_collect_int:
1716 case _adapter_opt_collect_long:
1717 case _adapter_opt_collect_float:
1718 case _adapter_opt_collect_double:
1719 case _adapter_opt_collect_void:
1720 case _adapter_opt_collect_0_ref:
1721 case _adapter_opt_collect_1_ref:
1722 case _adapter_opt_collect_2_ref:
1723 case _adapter_opt_collect_3_ref:
1724 case _adapter_opt_collect_4_ref:
1725 case _adapter_opt_collect_5_ref:
1726 case _adapter_opt_filter_S0_ref:
1727 case _adapter_opt_filter_S1_ref:
1728 case _adapter_opt_filter_S2_ref:
1729 case _adapter_opt_filter_S3_ref:
1730 case _adapter_opt_filter_S4_ref:
1731 case _adapter_opt_filter_S5_ref:
1732 case _adapter_opt_collect_2_S0_ref:
1733 case _adapter_opt_collect_2_S1_ref:
1734 case _adapter_opt_collect_2_S2_ref:
1735 case _adapter_opt_collect_2_S3_ref:
1736 case _adapter_opt_collect_2_S4_ref:
1737 case _adapter_opt_collect_2_S5_ref:
1738 case _adapter_opt_fold_ref:
1739 case _adapter_opt_fold_int:
1740 case _adapter_opt_fold_long:
1741 case _adapter_opt_fold_float:
1742 case _adapter_opt_fold_double:
1743 case _adapter_opt_fold_void:
1744 case _adapter_opt_fold_1_ref:
1745 case _adapter_opt_fold_2_ref:
1746 case _adapter_opt_fold_3_ref:
1747 case _adapter_opt_fold_4_ref:
1748 case _adapter_opt_fold_5_ref:
1749 {
1750 // Given a fresh incoming stack frame, build a new ricochet frame.
1751 // On entry, TOS points at a return PC, and RBP is the callers frame ptr.
1752 // RSI/R13 has the caller's exact stack pointer, which we must also preserve.
1753 // RCX contains an AdapterMethodHandle of the indicated kind.
1754
1755 // Relevant AMH fields:
1756 // amh.vmargslot:
1757 // points to the trailing edge of the arguments
1758 // to filter, collect, or fold. For a boxing operation,
1759 // it points just after the single primitive value.
1760 // amh.argument:
1761 // recursively called MH, on |collect| arguments
1762 // amh.vmtarget:
1763 // final destination MH, on return value, etc.
1764 // amh.conversion.dest:
1765 // tells what is the type of the return value
1766 // (not needed here, since dest is also derived from ek)
1767 // amh.conversion.vminfo:
1768 // points to the trailing edge of the return value
1769 // when the vmtarget is to be called; this is
1770 // equal to vmargslot + (retained ? |collect| : 0)
1771
1772 // Pass 0 or more argument slots to the recursive target.
1773 int collect_count_constant = ek_adapter_opt_collect_count(ek);
1774
1775 // The collected arguments are copied from the saved argument list:
1776 int collect_slot_constant = ek_adapter_opt_collect_slot(ek);
1777
1778 assert(ek_orig == _adapter_collect_args ||
1779 ek_orig == _adapter_fold_args, "");
1780 bool retain_original_args = (ek_orig == _adapter_fold_args);
1781
1782 // The return value is replaced (or inserted) at the 'vminfo' argslot.
1783 // Sometimes we can compute this statically.
1784 int dest_slot_constant = -1;
1785 if (!retain_original_args)
1786 dest_slot_constant = collect_slot_constant;
1787 else if (collect_slot_constant >= 0 && collect_count_constant >= 0)
1788 // We are preserving all the arguments, and the return value is prepended,
1789 // so the return slot is to the left (above) the |collect| sequence.
1790 dest_slot_constant = collect_slot_constant + collect_count_constant;
1791
1792 // Replace all those slots by the result of the recursive call.
1793 // The result type can be one of ref, int, long, float, double, void.
1794 // In the case of void, nothing is pushed on the stack after return.
1795 BasicType dest = ek_adapter_opt_collect_type(ek);
1796 assert(dest == type2wfield[dest], "dest is a stack slot type");
1797 int dest_count = type2size[dest];
1798 assert(dest_count == 1 || dest_count == 2 || (dest_count == 0 && dest == T_VOID), "dest has a size");
1799
1800 // Choose a return continuation.
1801 EntryKind ek_ret = _adapter_opt_return_any;
1802 if (dest != T_CONFLICT && OptimizeMethodHandles) {
1803 switch (dest) {
1804 case T_INT : ek_ret = _adapter_opt_return_int; break;
1805 case T_LONG : ek_ret = _adapter_opt_return_long; break;
1806 case T_FLOAT : ek_ret = _adapter_opt_return_float; break;
1807 case T_DOUBLE : ek_ret = _adapter_opt_return_double; break;
1808 case T_OBJECT : ek_ret = _adapter_opt_return_ref; break;
1809 case T_VOID : ek_ret = _adapter_opt_return_void; break;
1810 default : ShouldNotReachHere();
1811 }
1812 if (dest == T_OBJECT && dest_slot_constant >= 0) {
1813 EntryKind ek_try = EntryKind(_adapter_opt_return_S0_ref + dest_slot_constant);
1814 if (ek_try <= _adapter_opt_return_LAST &&
1815 ek_adapter_opt_return_slot(ek_try) == dest_slot_constant) {
1816 ek_ret = ek_try;
1817 }
1818 }
1819 assert(ek_adapter_opt_return_type(ek_ret) == dest, "");
1820 }
1821
1822 // Already pushed: ... keep1 | collect | keep2 | sender_pc |
1823 // push(sender_pc);
1824
1825 // Compute argument base:
1826 Register rax_argv = rax_argslot;
1827 __ lea(rax_argv, __ argument_address(constant(0)));
1828
1829 // Push a few extra argument words, if we need them to store the return value.
1830 {
1831 int extra_slots = 0;
1832 if (retain_original_args) {
1833 extra_slots = dest_count;
1834 } else if (collect_count_constant == -1) {
1835 extra_slots = dest_count; // collect_count might be zero; be generous
1836 } else if (dest_count > collect_count_constant) {
1837 extra_slots = (dest_count - collect_count_constant);
1838 } else {
1839 // else we know we have enough dead space in |collect| to repurpose for return values
1840 }
1841 DEBUG_ONLY(extra_slots += 1);
1842 if (extra_slots > 0) {
1843 __ pop(rbx_temp); // return value
1844 __ subptr(rsp, (extra_slots * Interpreter::stackElementSize));
1845 // Push guard word #2 in debug mode.
1846 DEBUG_ONLY(__ movptr(Address(rsp, 0), (int32_t) RicochetFrame::MAGIC_NUMBER_2));
1847 __ push(rbx_temp);
1848 }
1849 }
1850
1851 RicochetFrame::enter_ricochet_frame(_masm, rcx_recv, rax_argv,
1852 entry(ek_ret)->from_interpreted_entry(), rbx_temp);
1853
1854 // Now pushed: ... keep1 | collect | keep2 | RF |
1855 // some handy frame slots:
1856 Address exact_sender_sp_addr = RicochetFrame::frame_address(RicochetFrame::exact_sender_sp_offset_in_bytes());
1857 Address conversion_addr = RicochetFrame::frame_address(RicochetFrame::conversion_offset_in_bytes());
1858 Address saved_args_base_addr = RicochetFrame::frame_address(RicochetFrame::saved_args_base_offset_in_bytes());
1859
1860 #ifdef ASSERT
1861 if (VerifyMethodHandles && dest != T_CONFLICT) {
1862 BLOCK_COMMENT("verify AMH.conv.dest");
1863 load_conversion_dest_type(_masm, rbx_temp, conversion_addr);
1864 Label L_dest_ok;
1865 __ cmpl(rbx_temp, (int) dest);
1866 __ jcc(Assembler::equal, L_dest_ok);
1867 if (dest == T_INT) {
1868 for (int bt = T_BOOLEAN; bt < T_INT; bt++) {
1869 if (is_subword_type(BasicType(bt))) {
1870 __ cmpl(rbx_temp, (int) bt);
1871 __ jcc(Assembler::equal, L_dest_ok);
1872 }
1873 }
1874 }
1875 __ stop("bad dest in AMH.conv");
1876 __ BIND(L_dest_ok);
1877 }
1878 #endif //ASSERT
1879
1880 // Find out where the original copy of the recursive argument sequence begins.
1881 Register rax_coll = rax_argv;
1882 {
1883 RegisterOrConstant collect_slot = collect_slot_constant;
1884 if (collect_slot_constant == -1) {
1885 __ movl(rdi_temp, rcx_amh_vmargslot);
1886 collect_slot = rdi_temp;
1887 }
1888 if (collect_slot_constant != 0)
1889 __ lea(rax_coll, Address(rax_argv, collect_slot, Interpreter::stackElementScale()));
1890 // rax_coll now points at the trailing edge of |collect| and leading edge of |keep2|
1891 }
1892
1893 // Replace the old AMH with the recursive MH. (No going back now.)
1894 // In the case of a boxing call, the recursive call is to a 'boxer' method,
1895 // such as Integer.valueOf or Long.valueOf. In the case of a filter
1896 // or collect call, it will take one or more arguments, transform them,
1897 // and return some result, to store back into argument_base[vminfo].
1898 __ load_heap_oop(rcx_recv, rcx_amh_argument);
1899 if (VerifyMethodHandles) verify_method_handle(_masm, rcx_recv);
1900
1901 // Push a space for the recursively called MH first:
1902 __ push((int32_t)NULL_WORD);
1903
1904 // Calculate |collect|, the number of arguments we are collecting.
1905 Register rdi_collect_count = rdi_temp;
1906 RegisterOrConstant collect_count;
1907 if (collect_count_constant >= 0) {
1908 collect_count = collect_count_constant;
1909 } else {
1910 __ load_method_handle_vmslots(rdi_collect_count, rcx_recv, rdx_temp);
1911 collect_count = rdi_collect_count;
1912 }
1913 #ifdef ASSERT
1914 if (VerifyMethodHandles && collect_count_constant >= 0) {
1915 __ load_method_handle_vmslots(rbx_temp, rcx_recv, rdx_temp);
1916 Label L_count_ok;
1917 __ cmpl(rbx_temp, collect_count_constant);
1918 __ jcc(Assembler::equal, L_count_ok);
1919 __ stop("bad vminfo in AMH.conv");
1920 __ BIND(L_count_ok);
1921 }
1922 #endif //ASSERT
1923
1924 // copy |collect| slots directly to TOS:
1925 push_arg_slots(_masm, rax_coll, collect_count, 0, rbx_temp, rdx_temp);
1926 // Now pushed: ... keep1 | collect | keep2 | RF... | collect |
1927 // rax_coll still points at the trailing edge of |collect| and leading edge of |keep2|
1928
1929 // If necessary, adjust the saved arguments to make room for the eventual return value.
1930 // Normal adjustment: ... keep1 | +dest+ | -collect- | keep2 | RF... | collect |
1931 // If retaining args: ... keep1 | +dest+ | collect | keep2 | RF... | collect |
1932 // In the non-retaining case, this might move keep2 either up or down.
1933 // We don't have to copy the whole | RF... collect | complex,
1934 // but we must adjust RF.saved_args_base.
1935 // Also, from now on, we will forget about the origial copy of |collect|.
1936 // If we are retaining it, we will treat it as part of |keep2|.
1937 // For clarity we will define |keep3| = |collect|keep2| or |keep2|.
1938
1939 BLOCK_COMMENT("adjust trailing arguments {");
1940 // Compare the sizes of |+dest+| and |-collect-|, which are opposed opening and closing movements.
1941 int open_count = dest_count;
1942 RegisterOrConstant close_count = collect_count_constant;
1943 Register rdi_close_count = rdi_collect_count;
1944 if (retain_original_args) {
1945 close_count = constant(0);
1946 } else if (collect_count_constant == -1) {
1947 close_count = rdi_collect_count;
1948 }
1949
1950 // How many slots need moving? This is simply dest_slot (0 => no |keep3|).
1951 RegisterOrConstant keep3_count;
1952 Register rsi_keep3_count = rsi; // can repair from RF.exact_sender_sp
1953 if (dest_slot_constant >= 0) {
1954 keep3_count = dest_slot_constant;
1955 } else {
1956 load_conversion_vminfo(_masm, rsi_keep3_count, conversion_addr);
1957 keep3_count = rsi_keep3_count;
1958 }
1959 #ifdef ASSERT
1960 if (VerifyMethodHandles && dest_slot_constant >= 0) {
1961 load_conversion_vminfo(_masm, rbx_temp, conversion_addr);
1962 Label L_vminfo_ok;
1963 __ cmpl(rbx_temp, dest_slot_constant);
1964 __ jcc(Assembler::equal, L_vminfo_ok);
1965 __ stop("bad vminfo in AMH.conv");
1966 __ BIND(L_vminfo_ok);
1967 }
1968 #endif //ASSERT
1969
1970 // tasks remaining:
1971 bool move_keep3 = (!keep3_count.is_constant() || keep3_count.as_constant() != 0);
1972 bool stomp_dest = (NOT_DEBUG(dest == T_OBJECT) DEBUG_ONLY(dest_count != 0));
1973 bool fix_arg_base = (!close_count.is_constant() || open_count != close_count.as_constant());
1974
1975 if (stomp_dest | fix_arg_base) {
1976 // we will probably need an updated rax_argv value
1977 if (collect_slot_constant >= 0) {
1978 // rax_coll already holds the leading edge of |keep2|, so tweak it
1979 assert(rax_coll == rax_argv, "elided a move");
1980 if (collect_slot_constant != 0)
1981 __ subptr(rax_argv, collect_slot_constant * Interpreter::stackElementSize);
1982 } else {
1983 // Just reload from RF.saved_args_base.
1984 __ movptr(rax_argv, saved_args_base_addr);
1985 }
1986 }
1987
1988 // Old and new argument locations (based at slot 0).
1989 // Net shift (&new_argv - &old_argv) is (close_count - open_count).
1990 bool zero_open_count = (open_count == 0); // remember this bit of info
1991 if (move_keep3 && fix_arg_base) {
1992 // It will be easier t have everything in one register:
1993 if (close_count.is_register()) {
1994 // Deduct open_count from close_count register to get a clean +/- value.
1995 __ subptr(close_count.as_register(), open_count);
1996 } else {
1997 close_count = close_count.as_constant() - open_count;
1998 }
1999 open_count = 0;
2000 }
2001 Address old_argv(rax_argv, 0);
2002 Address new_argv(rax_argv, close_count, Interpreter::stackElementScale(),
2003 - open_count * Interpreter::stackElementSize);
2004
2005 // First decide if any actual data are to be moved.
2006 // We can skip if (a) |keep3| is empty, or (b) the argument list size didn't change.
2007 // (As it happens, all movements involve an argument list size change.)
2008
2009 // If there are variable parameters, use dynamic checks to skip around the whole mess.
2010 Label L_done;
2011 if (!keep3_count.is_constant()) {
2012 __ testl(keep3_count.as_register(), keep3_count.as_register());
2013 __ jcc(Assembler::zero, L_done);
2014 }
2015 if (!close_count.is_constant()) {
2016 __ cmpl(close_count.as_register(), open_count);
2017 __ jcc(Assembler::equal, L_done);
2018 }
2019
2020 if (move_keep3 && fix_arg_base) {
2021 bool emit_move_down = false, emit_move_up = false, emit_guard = false;
2022 if (!close_count.is_constant()) {
2023 emit_move_down = emit_guard = !zero_open_count;
2024 emit_move_up = true;
2025 } else if (open_count != close_count.as_constant()) {
2026 emit_move_down = (open_count > close_count.as_constant());
2027 emit_move_up = !emit_move_down;
2028 }
2029 Label L_move_up;
2030 if (emit_guard) {
2031 __ cmpl(close_count.as_register(), open_count);
2032 __ jcc(Assembler::greater, L_move_up);
2033 }
2034
2035 if (emit_move_down) {
2036 // Move arguments down if |+dest+| > |-collect-|
2037 // (This is rare, except when arguments are retained.)
2038 // This opens space for the return value.
2039 if (keep3_count.is_constant()) {
2040 for (int i = 0; i < keep3_count.as_constant(); i++) {
2041 __ movptr(rdx_temp, old_argv.plus_disp(i * Interpreter::stackElementSize));
2042 __ movptr( new_argv.plus_disp(i * Interpreter::stackElementSize), rdx_temp);
2043 }
2044 } else {
2045 Register rbx_argv_top = rbx_temp;
2046 __ lea(rbx_argv_top, old_argv.plus_disp(keep3_count, Interpreter::stackElementScale()));
2047 move_arg_slots_down(_masm,
2048 old_argv, // beginning of old argv
2049 rbx_argv_top, // end of old argv
2050 close_count, // distance to move down (must be negative)
2051 rax_argv, rdx_temp);
2052 // Used argv as an iteration variable; reload from RF.saved_args_base.
2053 __ movptr(rax_argv, saved_args_base_addr);
2054 }
2055 }
2056
2057 if (emit_guard) {
2058 __ jmp(L_done); // assumes emit_move_up is true also
2059 __ BIND(L_move_up);
2060 }
2061
2062 if (emit_move_up) {
2063
2064 // Move arguments up if |+dest+| < |-collect-|
2065 // (This is usual, except when |keep3| is empty.)
2066 // This closes up the space occupied by the now-deleted collect values.
2067 if (keep3_count.is_constant()) {
2068 for (int i = keep3_count.as_constant() - 1; i >= 0; i--) {
2069 __ movptr(rdx_temp, old_argv.plus_disp(i * Interpreter::stackElementSize));
2070 __ movptr( new_argv.plus_disp(i * Interpreter::stackElementSize), rdx_temp);
2071 }
2072 } else {
2073 Address argv_top = old_argv.plus_disp(keep3_count, Interpreter::stackElementScale());
2074 move_arg_slots_up(_masm,
2075 rax_argv, // beginning of old argv
2076 argv_top, // end of old argv
2077 close_count, // distance to move up (must be positive)
2078 rbx_temp, rdx_temp);
2079 }
2080 }
2081 }
2082 __ BIND(L_done);
2083
2084 if (fix_arg_base) {
2085 // adjust RF.saved_args_base by adding (close_count - open_count)
2086 if (!new_argv.is_same_address(Address(rax_argv, 0)))
2087 __ lea(rax_argv, new_argv);
2088 __ movptr(saved_args_base_addr, rax_argv);
2089 }
2090
2091 if (stomp_dest) {
2092 // Stomp the return slot, so it doesn't hold garbage.
2093 // This isn't strictly necessary, but it may help detect bugs.
2094 int forty_two = RicochetFrame::RETURN_VALUE_PLACEHOLDER;
2095 __ movptr(Address(rax_argv, keep3_count, Address::times_ptr),
2096 (int32_t) forty_two);
2097 // uses rsi_keep3_count
2098 }
2099 BLOCK_COMMENT("} adjust trailing arguments");
2100
2101 BLOCK_COMMENT("do_recursive_call");
2102 __ mov(saved_last_sp, rsp); // set rsi/r13 for callee
2103 __ pushptr(ExternalAddress(SharedRuntime::ricochet_blob()->bounce_addr()).addr());
2104 // The globally unique bounce address has two purposes:
2105 // 1. It helps the JVM recognize this frame (frame::is_ricochet_frame).
2106 // 2. When returned to, it cuts back the stack and redirects control flow
2107 // to the return handler.
2108 // The return handler will further cut back the stack when it takes
2109 // down the RF. Perhaps there is a way to streamline this further.
2110
2111 // State during recursive call:
2112 // ... keep1 | dest | dest=42 | keep3 | RF... | collect | bounce_pc |
2113 __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
2114
2115 break;
2116 }
2117
2118 case _adapter_opt_return_ref:
2119 case _adapter_opt_return_int:
2120 case _adapter_opt_return_long:
2121 case _adapter_opt_return_float:
2122 case _adapter_opt_return_double:
2123 case _adapter_opt_return_void:
2124 case _adapter_opt_return_S0_ref:
2125 case _adapter_opt_return_S1_ref:
2126 case _adapter_opt_return_S2_ref:
2127 case _adapter_opt_return_S3_ref:
2128 case _adapter_opt_return_S4_ref:
2129 case _adapter_opt_return_S5_ref:
2130 {
2131 BasicType dest_type_constant = ek_adapter_opt_return_type(ek);
2132 int dest_slot_constant = ek_adapter_opt_return_slot(ek);
2133
2134 if (VerifyMethodHandles) RicochetFrame::verify_clean(_masm);
2135
2136 if (dest_slot_constant == -1) {
2137 // The current stub is a general handler for this dest_type.
2138 // It can be called from _adapter_opt_return_any below.
2139 // Stash the address in a little table.
2140 assert((dest_type_constant & CONV_TYPE_MASK) == dest_type_constant, "oob");
2141 address return_handler = __ pc();
2142 _adapter_return_handlers[dest_type_constant] = return_handler;
2143 if (dest_type_constant == T_INT) {
2144 // do the subword types too
2145 for (int bt = T_BOOLEAN; bt < T_INT; bt++) {
2146 if (is_subword_type(BasicType(bt)) &&
2147 _adapter_return_handlers[bt] == NULL) {
2148 _adapter_return_handlers[bt] = return_handler;
2149 }
2150 }
2151 }
2152 }
2153
2154 Register rbx_arg_base = rbx_temp;
2155 assert_different_registers(rax, rdx, // possibly live return value registers
2156 rdi_temp, rbx_arg_base);
2157
2158 Address conversion_addr = RicochetFrame::frame_address(RicochetFrame::conversion_offset_in_bytes());
2159 Address saved_args_base_addr = RicochetFrame::frame_address(RicochetFrame::saved_args_base_offset_in_bytes());
2160
2161 __ movptr(rbx_arg_base, saved_args_base_addr);
2162 RegisterOrConstant dest_slot = dest_slot_constant;
2163 if (dest_slot_constant == -1) {
2164 load_conversion_vminfo(_masm, rdi_temp, conversion_addr);
2165 dest_slot = rdi_temp;
2166 }
2167 // Store the result back into the argslot.
2168 // This code uses the interpreter calling sequence, in which the return value
2169 // is usually left in the TOS register, as defined by InterpreterMacroAssembler::pop.
2170 // There are certain irregularities with floating point values, which can be seen
2171 // in TemplateInterpreterGenerator::generate_return_entry_for.
2172 move_return_value(_masm, dest_type_constant, Address(rbx_arg_base, dest_slot, Interpreter::stackElementScale()));
2173
2174 RicochetFrame::leave_ricochet_frame(_masm, rcx_recv, rbx_arg_base, rdx_temp);
2175 __ push(rdx_temp); // repush the return PC
2176
2177 // Load the final target and go.
2178 if (VerifyMethodHandles) verify_method_handle(_masm, rcx_recv);
2179 __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
2180 __ hlt(); // --------------------
2181 break;
2182 }
2183
2184 case _adapter_opt_return_any:
2185 {
2186 if (VerifyMethodHandles) RicochetFrame::verify_clean(_masm);
2187 Register rdi_conv = rdi_temp;
2188 assert_different_registers(rax, rdx, // possibly live return value registers
2189 rdi_conv, rbx_temp);
2190
2191 Address conversion_addr = RicochetFrame::frame_address(RicochetFrame::conversion_offset_in_bytes());
2192 load_conversion_dest_type(_masm, rdi_conv, conversion_addr);
2193 __ lea(rbx_temp, ExternalAddress((address) &_adapter_return_handlers[0]));
2194 __ movptr(rbx_temp, Address(rbx_temp, rdi_conv, Address::times_ptr));
2195
2196 #ifdef ASSERT
2197 { Label L_badconv;
2198 __ testptr(rbx_temp, rbx_temp);
2199 __ jccb(Assembler::zero, L_badconv);
2200 __ jmp(rbx_temp);
2201 __ bind(L_badconv);
2202 __ stop("bad method handle return");
2203 }
2204 #else //ASSERT
2205 __ jmp(rbx_temp);
2206 #endif //ASSERT
2207 break;
2208 }
2209
1046 case _adapter_opt_spread_0: 2210 case _adapter_opt_spread_0:
1047 case _adapter_opt_spread_1: 2211 case _adapter_opt_spread_1_ref:
1048 case _adapter_opt_spread_more: 2212 case _adapter_opt_spread_2_ref:
2213 case _adapter_opt_spread_3_ref:
2214 case _adapter_opt_spread_4_ref:
2215 case _adapter_opt_spread_5_ref:
2216 case _adapter_opt_spread_ref:
2217 case _adapter_opt_spread_byte:
2218 case _adapter_opt_spread_char:
2219 case _adapter_opt_spread_short:
2220 case _adapter_opt_spread_int:
2221 case _adapter_opt_spread_long:
2222 case _adapter_opt_spread_float:
2223 case _adapter_opt_spread_double:
1049 { 2224 {
1050 // spread an array out into a group of arguments 2225 // spread an array out into a group of arguments
1051 int length_constant = get_ek_adapter_opt_spread_info(ek); 2226 int length_constant = ek_adapter_opt_spread_count(ek);
2227 bool length_can_be_zero = (length_constant == 0);
2228 if (length_constant < 0) {
2229 // some adapters with variable length must handle the zero case
2230 if (!OptimizeMethodHandles ||
2231 ek_adapter_opt_spread_type(ek) != T_OBJECT)
2232 length_can_be_zero = true;
2233 }
1052 2234
1053 // find the address of the array argument 2235 // find the address of the array argument
1054 __ movl(rax_argslot, rcx_amh_vmargslot); 2236 __ movl(rax_argslot, rcx_amh_vmargslot);
1055 __ lea(rax_argslot, __ argument_address(rax_argslot)); 2237 __ lea(rax_argslot, __ argument_address(rax_argslot));
1056 2238
1057 // grab some temps 2239 // grab another temp
1058 { __ push(rsi); __ push(rdi); } 2240 Register rsi_temp = rsi;
1059 // (preceding pushes must be done after argslot address is taken!) 2241 { if (rsi_temp == saved_last_sp) __ push(saved_last_sp); }
1060 #define UNPUSH_RSI_RDI \ 2242 // (preceding push must be done after argslot address is taken!)
1061 { __ pop(rdi); __ pop(rsi); } 2243 #define UNPUSH_RSI \
2244 { if (rsi_temp == saved_last_sp) __ pop(saved_last_sp); }
1062 2245
1063 // arx_argslot points both to the array and to the first output arg 2246 // arx_argslot points both to the array and to the first output arg
1064 vmarg = Address(rax_argslot, 0); 2247 vmarg = Address(rax_argslot, 0);
1065 2248
1066 // Get the array value. 2249 // Get the array value.
1067 Register rsi_array = rsi; 2250 Register rsi_array = rsi_temp;
1068 Register rdx_array_klass = rdx_temp; 2251 Register rdx_array_klass = rdx_temp;
1069 BasicType elem_type = T_OBJECT; 2252 BasicType elem_type = ek_adapter_opt_spread_type(ek);
2253 int elem_slots = type2size[elem_type]; // 1 or 2
2254 int array_slots = 1; // array is always a T_OBJECT
1070 int length_offset = arrayOopDesc::length_offset_in_bytes(); 2255 int length_offset = arrayOopDesc::length_offset_in_bytes();
1071 int elem0_offset = arrayOopDesc::base_offset_in_bytes(elem_type); 2256 int elem0_offset = arrayOopDesc::base_offset_in_bytes(elem_type);
1072 __ movptr(rsi_array, vmarg); 2257 __ movptr(rsi_array, vmarg);
1073 Label skip_array_check; 2258
1074 if (length_constant == 0) { 2259 Label L_array_is_empty, L_insert_arg_space, L_copy_args, L_args_done;
2260 if (length_can_be_zero) {
2261 // handle the null pointer case, if zero is allowed
2262 Label L_skip;
2263 if (length_constant < 0) {
2264 load_conversion_vminfo(_masm, rbx_temp, rcx_amh_conversion);
2265 __ testl(rbx_temp, rbx_temp);
2266 __ jcc(Assembler::notZero, L_skip);
2267 }
1075 __ testptr(rsi_array, rsi_array); 2268 __ testptr(rsi_array, rsi_array);
1076 __ jcc(Assembler::zero, skip_array_check); 2269 __ jcc(Assembler::zero, L_array_is_empty);
2270 __ bind(L_skip);
1077 } 2271 }
1078 __ null_check(rsi_array, oopDesc::klass_offset_in_bytes()); 2272 __ null_check(rsi_array, oopDesc::klass_offset_in_bytes());
1079 __ load_klass(rdx_array_klass, rsi_array); 2273 __ load_klass(rdx_array_klass, rsi_array);
1080 2274
1081 // Check the array type. 2275 // Check the array type.
1082 Register rbx_klass = rbx_temp; 2276 Register rbx_klass = rbx_temp;
1083 __ load_heap_oop(rbx_klass, rcx_amh_argument); // this is a Class object! 2277 __ load_heap_oop(rbx_klass, rcx_amh_argument); // this is a Class object!
1084 __ load_heap_oop(rbx_klass, Address(rbx_klass, java_lang_Class::klass_offset_in_bytes())); 2278 load_klass_from_Class(_masm, rbx_klass);
1085 2279
1086 Label ok_array_klass, bad_array_klass, bad_array_length; 2280 Label ok_array_klass, bad_array_klass, bad_array_length;
1087 __ check_klass_subtype(rdx_array_klass, rbx_klass, rdi, ok_array_klass); 2281 __ check_klass_subtype(rdx_array_klass, rbx_klass, rdi_temp, ok_array_klass);
1088 // If we get here, the type check failed! 2282 // If we get here, the type check failed!
1089 __ jmp(bad_array_klass); 2283 __ jmp(bad_array_klass);
1090 __ bind(ok_array_klass); 2284 __ BIND(ok_array_klass);
1091 2285
1092 // Check length. 2286 // Check length.
1093 if (length_constant >= 0) { 2287 if (length_constant >= 0) {
1094 __ cmpl(Address(rsi_array, length_offset), length_constant); 2288 __ cmpl(Address(rsi_array, length_offset), length_constant);
1095 } else { 2289 } else {
1096 Register rbx_vminfo = rbx_temp; 2290 Register rbx_vminfo = rbx_temp;
1097 __ movl(rbx_vminfo, rcx_amh_conversion); 2291 load_conversion_vminfo(_masm, rbx_vminfo, rcx_amh_conversion);
1098 assert(CONV_VMINFO_SHIFT == 0, "preshifted");
1099 __ andl(rbx_vminfo, CONV_VMINFO_MASK);
1100 __ cmpl(rbx_vminfo, Address(rsi_array, length_offset)); 2292 __ cmpl(rbx_vminfo, Address(rsi_array, length_offset));
1101 } 2293 }
1102 __ jcc(Assembler::notEqual, bad_array_length); 2294 __ jcc(Assembler::notEqual, bad_array_length);
1103 2295
1104 Register rdx_argslot_limit = rdx_temp; 2296 Register rdx_argslot_limit = rdx_temp;
1106 // Array length checks out. Now insert any required stack slots. 2298 // Array length checks out. Now insert any required stack slots.
1107 if (length_constant == -1) { 2299 if (length_constant == -1) {
1108 // Form a pointer to the end of the affected region. 2300 // Form a pointer to the end of the affected region.
1109 __ lea(rdx_argslot_limit, Address(rax_argslot, Interpreter::stackElementSize)); 2301 __ lea(rdx_argslot_limit, Address(rax_argslot, Interpreter::stackElementSize));
1110 // 'stack_move' is negative number of words to insert 2302 // 'stack_move' is negative number of words to insert
1111 Register rdi_stack_move = rdi; 2303 // This number already accounts for elem_slots.
1112 __ movl2ptr(rdi_stack_move, rcx_amh_conversion); 2304 Register rdi_stack_move = rdi_temp;
1113 __ sarptr(rdi_stack_move, CONV_STACK_MOVE_SHIFT); 2305 load_stack_move(_masm, rdi_stack_move, rcx_recv, true);
2306 __ cmpptr(rdi_stack_move, 0);
2307 assert(stack_move_unit() < 0, "else change this comparison");
2308 __ jcc(Assembler::less, L_insert_arg_space);
2309 __ jcc(Assembler::equal, L_copy_args);
2310 // single argument case, with no array movement
2311 __ BIND(L_array_is_empty);
2312 remove_arg_slots(_masm, -stack_move_unit() * array_slots,
2313 rax_argslot, rbx_temp, rdx_temp);
2314 __ jmp(L_args_done); // no spreading to do
2315 __ BIND(L_insert_arg_space);
2316 // come here in the usual case, stack_move < 0 (2 or more spread arguments)
1114 Register rsi_temp = rsi_array; // spill this 2317 Register rsi_temp = rsi_array; // spill this
1115 insert_arg_slots(_masm, rdi_stack_move, -1, 2318 insert_arg_slots(_masm, rdi_stack_move,
1116 rax_argslot, rbx_temp, rsi_temp); 2319 rax_argslot, rbx_temp, rsi_temp);
1117 // reload the array (since rsi was killed) 2320 // reload the array since rsi was killed
1118 __ movptr(rsi_array, vmarg); 2321 // reload from rdx_argslot_limit since rax_argslot is now decremented
1119 } else if (length_constant > 1) { 2322 __ movptr(rsi_array, Address(rdx_argslot_limit, -Interpreter::stackElementSize));
1120 int arg_mask = 0; 2323 } else if (length_constant >= 1) {
1121 int new_slots = (length_constant - 1); 2324 int new_slots = (length_constant * elem_slots) - array_slots;
1122 for (int i = 0; i < new_slots; i++) { 2325 insert_arg_slots(_masm, new_slots * stack_move_unit(),
1123 arg_mask <<= 1;
1124 arg_mask |= _INSERT_REF_MASK;
1125 }
1126 insert_arg_slots(_masm, new_slots * stack_move_unit(), arg_mask,
1127 rax_argslot, rbx_temp, rdx_temp); 2326 rax_argslot, rbx_temp, rdx_temp);
1128 } else if (length_constant == 1) {
1129 // no stack resizing required
1130 } else if (length_constant == 0) { 2327 } else if (length_constant == 0) {
1131 remove_arg_slots(_masm, -stack_move_unit(), 2328 __ BIND(L_array_is_empty);
2329 remove_arg_slots(_masm, -stack_move_unit() * array_slots,
1132 rax_argslot, rbx_temp, rdx_temp); 2330 rax_argslot, rbx_temp, rdx_temp);
2331 } else {
2332 ShouldNotReachHere();
1133 } 2333 }
1134 2334
1135 // Copy from the array to the new slots. 2335 // Copy from the array to the new slots.
1136 // Note: Stack change code preserves integrity of rax_argslot pointer. 2336 // Note: Stack change code preserves integrity of rax_argslot pointer.
1137 // So even after slot insertions, rax_argslot still points to first argument. 2337 // So even after slot insertions, rax_argslot still points to first argument.
2338 // Beware: Arguments that are shallow on the stack are deep in the array,
2339 // and vice versa. So a downward-growing stack (the usual) has to be copied
2340 // elementwise in reverse order from the source array.
2341 __ BIND(L_copy_args);
1138 if (length_constant == -1) { 2342 if (length_constant == -1) {
1139 // [rax_argslot, rdx_argslot_limit) is the area we are inserting into. 2343 // [rax_argslot, rdx_argslot_limit) is the area we are inserting into.
2344 // Array element [0] goes at rdx_argslot_limit[-wordSize].
1140 Register rsi_source = rsi_array; 2345 Register rsi_source = rsi_array;
1141 __ lea(rsi_source, Address(rsi_array, elem0_offset)); 2346 __ lea(rsi_source, Address(rsi_array, elem0_offset));
2347 Register rdx_fill_ptr = rdx_argslot_limit;
1142 Label loop; 2348 Label loop;
1143 __ bind(loop); 2349 __ BIND(loop);
1144 __ movptr(rbx_temp, Address(rsi_source, 0)); 2350 __ addptr(rdx_fill_ptr, -Interpreter::stackElementSize * elem_slots);
1145 __ movptr(Address(rax_argslot, 0), rbx_temp); 2351 move_typed_arg(_masm, elem_type, true,
2352 Address(rdx_fill_ptr, 0), Address(rsi_source, 0),
2353 rbx_temp, rdi_temp);
1146 __ addptr(rsi_source, type2aelembytes(elem_type)); 2354 __ addptr(rsi_source, type2aelembytes(elem_type));
1147 __ addptr(rax_argslot, Interpreter::stackElementSize); 2355 __ cmpptr(rdx_fill_ptr, rax_argslot);
1148 __ cmpptr(rax_argslot, rdx_argslot_limit); 2356 __ jcc(Assembler::greater, loop);
1149 __ jccb(Assembler::less, loop);
1150 } else if (length_constant == 0) { 2357 } else if (length_constant == 0) {
1151 __ bind(skip_array_check);
1152 // nothing to copy 2358 // nothing to copy
1153 } else { 2359 } else {
1154 int elem_offset = elem0_offset; 2360 int elem_offset = elem0_offset;
1155 int slot_offset = 0; 2361 int slot_offset = length_constant * Interpreter::stackElementSize;
1156 for (int index = 0; index < length_constant; index++) { 2362 for (int index = 0; index < length_constant; index++) {
1157 __ movptr(rbx_temp, Address(rsi_array, elem_offset)); 2363 slot_offset -= Interpreter::stackElementSize * elem_slots; // fill backward
1158 __ movptr(Address(rax_argslot, slot_offset), rbx_temp); 2364 move_typed_arg(_masm, elem_type, true,
2365 Address(rax_argslot, slot_offset), Address(rsi_array, elem_offset),
2366 rbx_temp, rdi_temp);
1159 elem_offset += type2aelembytes(elem_type); 2367 elem_offset += type2aelembytes(elem_type);
1160 slot_offset += Interpreter::stackElementSize;
1161 } 2368 }
1162 } 2369 }
2370 __ BIND(L_args_done);
1163 2371
1164 // Arguments are spread. Move to next method handle. 2372 // Arguments are spread. Move to next method handle.
1165 UNPUSH_RSI_RDI; 2373 UNPUSH_RSI;
1166 __ load_heap_oop(rcx_recv, rcx_mh_vmtarget); 2374 __ load_heap_oop(rcx_recv, rcx_mh_vmtarget);
1167 __ jump_to_method_handle_entry(rcx_recv, rdx_temp); 2375 __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
1168 2376
1169 __ bind(bad_array_klass); 2377 __ bind(bad_array_klass);
1170 UNPUSH_RSI_RDI; 2378 UNPUSH_RSI;
1171 assert(!vmarg.uses(rarg2_required), "must be different registers"); 2379 assert(!vmarg.uses(rarg2_required), "must be different registers");
1172 __ movptr(rarg2_required, Address(rdx_array_klass, java_mirror_offset)); // required type 2380 __ load_heap_oop( rarg2_required, Address(rdx_array_klass, java_mirror_offset)); // required type
1173 __ movptr(rarg1_actual, vmarg); // bad array 2381 __ movptr( rarg1_actual, vmarg); // bad array
1174 __ movl( rarg0_code, (int) Bytecodes::_aaload); // who is complaining? 2382 __ movl( rarg0_code, (int) Bytecodes::_aaload); // who is complaining?
1175 __ jump(ExternalAddress(from_interpreted_entry(_raise_exception))); 2383 __ jump(ExternalAddress(from_interpreted_entry(_raise_exception)));
1176 2384
1177 __ bind(bad_array_length); 2385 __ bind(bad_array_length);
1178 UNPUSH_RSI_RDI; 2386 UNPUSH_RSI;
1179 assert(!vmarg.uses(rarg2_required), "must be different registers"); 2387 assert(!vmarg.uses(rarg2_required), "must be different registers");
1180 __ mov (rarg2_required, rcx_recv); // AMH requiring a certain length 2388 __ mov( rarg2_required, rcx_recv); // AMH requiring a certain length
1181 __ movptr(rarg1_actual, vmarg); // bad array 2389 __ movptr( rarg1_actual, vmarg); // bad array
1182 __ movl( rarg0_code, (int) Bytecodes::_arraylength); // who is complaining? 2390 __ movl( rarg0_code, (int) Bytecodes::_arraylength); // who is complaining?
1183 __ jump(ExternalAddress(from_interpreted_entry(_raise_exception))); 2391 __ jump(ExternalAddress(from_interpreted_entry(_raise_exception)));
1184 2392 #undef UNPUSH_RSI
1185 #undef UNPUSH_RSI_RDI 2393
1186 } 2394 break;
1187 break; 2395 }
1188 2396
1189 case _adapter_flyby: 2397 default:
1190 case _adapter_ricochet: 2398 // do not require all platforms to recognize all adapter types
1191 __ unimplemented(entry_name(ek)); // %%% FIXME: NYI 2399 __ nop();
1192 break; 2400 return;
1193
1194 default: ShouldNotReachHere();
1195 } 2401 }
1196 __ hlt(); 2402 __ hlt();
1197 2403
1198 address me_cookie = MethodHandleEntry::start_compiled_entry(_masm, interp_entry); 2404 address me_cookie = MethodHandleEntry::start_compiled_entry(_masm, interp_entry);
1199 __ unimplemented(entry_name(ek)); // %%% FIXME: NYI 2405 __ unimplemented(entry_name(ek)); // %%% FIXME: NYI

mercurial