29 |
29 |
30 #define __ _masm-> |
30 #define __ _masm-> |
31 |
31 |
32 #ifdef PRODUCT |
32 #ifdef PRODUCT |
33 #define BLOCK_COMMENT(str) /* nothing */ |
33 #define BLOCK_COMMENT(str) /* nothing */ |
|
34 #define STOP(error) stop(error) |
34 #else |
35 #else |
35 #define BLOCK_COMMENT(str) __ block_comment(str) |
36 #define BLOCK_COMMENT(str) __ block_comment(str) |
|
37 #define STOP(error) block_comment(error); __ stop(error) |
36 #endif |
38 #endif |
37 |
39 |
38 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") |
40 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") |
39 |
41 |
40 address MethodHandleEntry::start_compiled_entry(MacroAssembler* _masm, |
42 // Workaround for C++ overloading nastiness on '0' for RegisterOrConstant. |
41 address interpreted_entry) { |
43 static RegisterOrConstant constant(int value) { |
42 // Just before the actual machine code entry point, allocate space |
44 return RegisterOrConstant(value); |
43 // for a MethodHandleEntry::Data record, so that we can manage everything |
45 } |
44 // from one base pointer. |
|
45 __ align(wordSize); |
|
46 address target = __ pc() + sizeof(Data); |
|
47 while (__ pc() < target) { |
|
48 __ nop(); |
|
49 __ align(wordSize); |
|
50 } |
|
51 |
|
52 MethodHandleEntry* me = (MethodHandleEntry*) __ pc(); |
|
53 me->set_end_address(__ pc()); // set a temporary end_address |
|
54 me->set_from_interpreted_entry(interpreted_entry); |
|
55 me->set_type_checking_entry(NULL); |
|
56 |
|
57 return (address) me; |
|
58 } |
|
59 |
|
60 MethodHandleEntry* MethodHandleEntry::finish_compiled_entry(MacroAssembler* _masm, |
|
61 address start_addr) { |
|
62 MethodHandleEntry* me = (MethodHandleEntry*) start_addr; |
|
63 assert(me->end_address() == start_addr, "valid ME"); |
|
64 |
|
65 // Fill in the real end_address: |
|
66 __ align(wordSize); |
|
67 me->set_end_address(__ pc()); |
|
68 |
|
69 return me; |
|
70 } |
|
71 |
|
72 // stack walking support |
|
73 |
|
74 frame MethodHandles::ricochet_frame_sender(const frame& fr, RegisterMap *map) { |
|
75 //RicochetFrame* f = RicochetFrame::from_frame(fr); |
|
76 // Cf. is_interpreted_frame path of frame::sender |
|
77 intptr_t* younger_sp = fr.sp(); |
|
78 intptr_t* sp = fr.sender_sp(); |
|
79 map->make_integer_regs_unsaved(); |
|
80 map->shift_window(sp, younger_sp); |
|
81 bool this_frame_adjusted_stack = true; // I5_savedSP is live in this RF |
|
82 return frame(sp, younger_sp, this_frame_adjusted_stack); |
|
83 } |
|
84 |
|
85 void MethodHandles::ricochet_frame_oops_do(const frame& fr, OopClosure* blk, const RegisterMap* reg_map) { |
|
86 ResourceMark rm; |
|
87 RicochetFrame* f = RicochetFrame::from_frame(fr); |
|
88 |
|
89 // pick up the argument type descriptor: |
|
90 Thread* thread = Thread::current(); |
|
91 Handle cookie(thread, f->compute_saved_args_layout(true, true)); |
|
92 |
|
93 // process fixed part |
|
94 blk->do_oop((oop*)f->saved_target_addr()); |
|
95 blk->do_oop((oop*)f->saved_args_layout_addr()); |
|
96 |
|
97 // process variable arguments: |
|
98 if (cookie.is_null()) return; // no arguments to describe |
|
99 |
|
100 // the cookie is actually the invokeExact method for my target |
|
101 // his argument signature is what I'm interested in |
|
102 assert(cookie->is_method(), ""); |
|
103 methodHandle invoker(thread, methodOop(cookie())); |
|
104 assert(invoker->name() == vmSymbols::invokeExact_name(), "must be this kind of method"); |
|
105 assert(!invoker->is_static(), "must have MH argument"); |
|
106 int slot_count = invoker->size_of_parameters(); |
|
107 assert(slot_count >= 1, "must include 'this'"); |
|
108 intptr_t* base = f->saved_args_base(); |
|
109 intptr_t* retval = NULL; |
|
110 if (f->has_return_value_slot()) |
|
111 retval = f->return_value_slot_addr(); |
|
112 int slot_num = slot_count - 1; |
|
113 intptr_t* loc = &base[slot_num]; |
|
114 //blk->do_oop((oop*) loc); // original target, which is irrelevant |
|
115 int arg_num = 0; |
|
116 for (SignatureStream ss(invoker->signature()); !ss.is_done(); ss.next()) { |
|
117 if (ss.at_return_type()) continue; |
|
118 BasicType ptype = ss.type(); |
|
119 if (ptype == T_ARRAY) ptype = T_OBJECT; // fold all refs to T_OBJECT |
|
120 assert(ptype >= T_BOOLEAN && ptype <= T_OBJECT, "not array or void"); |
|
121 slot_num -= type2size[ptype]; |
|
122 loc = &base[slot_num]; |
|
123 bool is_oop = (ptype == T_OBJECT && loc != retval); |
|
124 if (is_oop) blk->do_oop((oop*)loc); |
|
125 arg_num += 1; |
|
126 } |
|
127 assert(slot_num == 0, "must have processed all the arguments"); |
|
128 } |
|
129 |
|
130 // Ricochet Frames |
|
131 const Register MethodHandles::RicochetFrame::L1_continuation = L1; |
|
132 const Register MethodHandles::RicochetFrame::L2_saved_target = L2; |
|
133 const Register MethodHandles::RicochetFrame::L3_saved_args_layout = L3; |
|
134 const Register MethodHandles::RicochetFrame::L4_saved_args_base = L4; // cf. Gargs = G4 |
|
135 const Register MethodHandles::RicochetFrame::L5_conversion = L5; |
|
136 #ifdef ASSERT |
|
137 const Register MethodHandles::RicochetFrame::L0_magic_number_1 = L0; |
|
138 #endif //ASSERT |
|
139 |
|
140 oop MethodHandles::RicochetFrame::compute_saved_args_layout(bool read_cache, bool write_cache) { |
|
141 if (read_cache) { |
|
142 oop cookie = saved_args_layout(); |
|
143 if (cookie != NULL) return cookie; |
|
144 } |
|
145 oop target = saved_target(); |
|
146 oop mtype = java_lang_invoke_MethodHandle::type(target); |
|
147 oop mtform = java_lang_invoke_MethodType::form(mtype); |
|
148 oop cookie = java_lang_invoke_MethodTypeForm::vmlayout(mtform); |
|
149 if (write_cache) { |
|
150 (*saved_args_layout_addr()) = cookie; |
|
151 } |
|
152 return cookie; |
|
153 } |
|
154 |
|
155 void MethodHandles::RicochetFrame::generate_ricochet_blob(MacroAssembler* _masm, |
|
156 // output params: |
|
157 int* bounce_offset, |
|
158 int* exception_offset, |
|
159 int* frame_size_in_words) { |
|
160 (*frame_size_in_words) = RicochetFrame::frame_size_in_bytes() / wordSize; |
|
161 |
|
162 address start = __ pc(); |
|
163 |
|
164 #ifdef ASSERT |
|
165 __ illtrap(0); __ illtrap(0); __ illtrap(0); |
|
166 // here's a hint of something special: |
|
167 __ set(MAGIC_NUMBER_1, G0); |
|
168 __ set(MAGIC_NUMBER_2, G0); |
|
169 #endif //ASSERT |
|
170 __ illtrap(0); // not reached |
|
171 |
|
172 // Return values are in registers. |
|
173 // L1_continuation contains a cleanup continuation we must return |
|
174 // to. |
|
175 |
|
176 (*bounce_offset) = __ pc() - start; |
|
177 BLOCK_COMMENT("ricochet_blob.bounce"); |
|
178 |
|
179 if (VerifyMethodHandles) RicochetFrame::verify_clean(_masm); |
|
180 trace_method_handle(_masm, "return/ricochet_blob.bounce"); |
|
181 |
|
182 __ JMP(L1_continuation, 0); |
|
183 __ delayed()->nop(); |
|
184 __ illtrap(0); |
|
185 |
|
186 DEBUG_ONLY(__ set(MAGIC_NUMBER_2, G0)); |
|
187 |
|
188 (*exception_offset) = __ pc() - start; |
|
189 BLOCK_COMMENT("ricochet_blob.exception"); |
|
190 |
|
191 // compare this to Interpreter::rethrow_exception_entry, which is parallel code |
|
192 // for example, see TemplateInterpreterGenerator::generate_throw_exception |
|
193 // Live registers in: |
|
194 // Oexception (O0): exception |
|
195 // Oissuing_pc (O1): return address/pc that threw exception (ignored, always equal to bounce addr) |
|
196 __ verify_oop(Oexception); |
|
197 |
|
198 // Take down the frame. |
|
199 |
|
200 // Cf. InterpreterMacroAssembler::remove_activation. |
|
201 leave_ricochet_frame(_masm, /*recv_reg=*/ noreg, I5_savedSP, I7); |
|
202 |
|
203 // We are done with this activation frame; find out where to go next. |
|
204 // The continuation point will be an exception handler, which expects |
|
205 // the following registers set up: |
|
206 // |
|
207 // Oexception: exception |
|
208 // Oissuing_pc: the local call that threw exception |
|
209 // Other On: garbage |
|
210 // In/Ln: the contents of the caller's register window |
|
211 // |
|
212 // We do the required restore at the last possible moment, because we |
|
213 // need to preserve some state across a runtime call. |
|
214 // (Remember that the caller activation is unknown--it might not be |
|
215 // interpreted, so things like Lscratch are useless in the caller.) |
|
216 __ mov(Oexception, Oexception ->after_save()); // get exception in I0 so it will be on O0 after restore |
|
217 __ add(I7, frame::pc_return_offset, Oissuing_pc->after_save()); // likewise set I1 to a value local to the caller |
|
218 __ call_VM_leaf(L7_thread_cache, |
|
219 CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), |
|
220 G2_thread, Oissuing_pc->after_save()); |
|
221 |
|
222 // The caller's SP was adjusted upon method entry to accomodate |
|
223 // the callee's non-argument locals. Undo that adjustment. |
|
224 __ JMP(O0, 0); // return exception handler in caller |
|
225 __ delayed()->restore(I5_savedSP, G0, SP); |
|
226 |
|
227 // (same old exception object is already in Oexception; see above) |
|
228 // Note that an "issuing PC" is actually the next PC after the call |
|
229 } |
|
230 |
|
231 void MethodHandles::RicochetFrame::enter_ricochet_frame(MacroAssembler* _masm, |
|
232 Register recv_reg, |
|
233 Register argv_reg, |
|
234 address return_handler) { |
|
235 // does not include the __ save() |
|
236 assert(argv_reg == Gargs, ""); |
|
237 Address G3_mh_vmtarget( recv_reg, java_lang_invoke_MethodHandle::vmtarget_offset_in_bytes()); |
|
238 Address G3_amh_conversion(recv_reg, java_lang_invoke_AdapterMethodHandle::conversion_offset_in_bytes()); |
|
239 |
|
240 // Create the RicochetFrame. |
|
241 // Unlike on x86 we can store all required information in local |
|
242 // registers. |
|
243 BLOCK_COMMENT("push RicochetFrame {"); |
|
244 __ set(ExternalAddress(return_handler), L1_continuation); |
|
245 __ load_heap_oop(G3_mh_vmtarget, L2_saved_target); |
|
246 __ mov(G0, L3_saved_args_layout); |
|
247 __ mov(Gargs, L4_saved_args_base); |
|
248 __ lduw(G3_amh_conversion, L5_conversion); // 32-bit field |
|
249 // I5, I6, I7 are already set up |
|
250 DEBUG_ONLY(__ set((int32_t) MAGIC_NUMBER_1, L0_magic_number_1)); |
|
251 BLOCK_COMMENT("} RicochetFrame"); |
|
252 } |
|
253 |
|
254 void MethodHandles::RicochetFrame::leave_ricochet_frame(MacroAssembler* _masm, |
|
255 Register recv_reg, |
|
256 Register new_sp_reg, |
|
257 Register sender_pc_reg) { |
|
258 assert(new_sp_reg == I5_savedSP, "exact_sender_sp already in place"); |
|
259 assert(sender_pc_reg == I7, "in a fixed place"); |
|
260 // does not include the __ ret() & __ restore() |
|
261 assert_different_registers(recv_reg, new_sp_reg, sender_pc_reg); |
|
262 // Take down the frame. |
|
263 // Cf. InterpreterMacroAssembler::remove_activation. |
|
264 BLOCK_COMMENT("end_ricochet_frame {"); |
|
265 if (recv_reg->is_valid()) |
|
266 __ mov(L2_saved_target, recv_reg); |
|
267 BLOCK_COMMENT("} end_ricochet_frame"); |
|
268 } |
|
269 |
|
270 // Emit code to verify that FP is pointing at a valid ricochet frame. |
|
271 #ifndef PRODUCT |
|
272 enum { |
|
273 ARG_LIMIT = 255, SLOP = 45, |
|
274 // use this parameter for checking for garbage stack movements: |
|
275 UNREASONABLE_STACK_MOVE = (ARG_LIMIT + SLOP) |
|
276 // the slop defends against false alarms due to fencepost errors |
|
277 }; |
|
278 #endif |
|
279 |
|
280 #ifdef ASSERT |
|
281 void MethodHandles::RicochetFrame::verify_clean(MacroAssembler* _masm) { |
|
282 // The stack should look like this: |
|
283 // ... keep1 | dest=42 | keep2 | magic | handler | magic | recursive args | [RF] |
|
284 // Check various invariants. |
|
285 |
|
286 Register O7_temp = O7, O5_temp = O5; |
|
287 |
|
288 Label L_ok_1, L_ok_2, L_ok_3, L_ok_4; |
|
289 BLOCK_COMMENT("verify_clean {"); |
|
290 // Magic numbers must check out: |
|
291 __ set((int32_t) MAGIC_NUMBER_1, O7_temp); |
|
292 __ cmp_and_br_short(O7_temp, L0_magic_number_1, Assembler::equal, Assembler::pt, L_ok_1); |
|
293 __ stop("damaged ricochet frame: MAGIC_NUMBER_1 not found"); |
|
294 |
|
295 __ BIND(L_ok_1); |
|
296 |
|
297 // Arguments pointer must look reasonable: |
|
298 #ifdef _LP64 |
|
299 Register FP_temp = O5_temp; |
|
300 __ add(FP, STACK_BIAS, FP_temp); |
|
301 #else |
|
302 Register FP_temp = FP; |
|
303 #endif |
|
304 __ cmp_and_brx_short(L4_saved_args_base, FP_temp, Assembler::greaterEqualUnsigned, Assembler::pt, L_ok_2); |
|
305 __ stop("damaged ricochet frame: L4 < FP"); |
|
306 |
|
307 __ BIND(L_ok_2); |
|
308 // Disable until we decide on it's fate |
|
309 // __ sub(L4_saved_args_base, UNREASONABLE_STACK_MOVE * Interpreter::stackElementSize, O7_temp); |
|
310 // __ cmp(O7_temp, FP_temp); |
|
311 // __ br(Assembler::lessEqualUnsigned, false, Assembler::pt, L_ok_3); |
|
312 // __ delayed()->nop(); |
|
313 // __ stop("damaged ricochet frame: (L4 - UNREASONABLE_STACK_MOVE) > FP"); |
|
314 |
|
315 __ BIND(L_ok_3); |
|
316 extract_conversion_dest_type(_masm, L5_conversion, O7_temp); |
|
317 __ cmp_and_br_short(O7_temp, T_VOID, Assembler::equal, Assembler::pt, L_ok_4); |
|
318 extract_conversion_vminfo(_masm, L5_conversion, O5_temp); |
|
319 __ ld_ptr(L4_saved_args_base, __ argument_offset(O5_temp, O5_temp), O7_temp); |
|
320 assert(Assembler::is_simm13(RETURN_VALUE_PLACEHOLDER), "must be simm13"); |
|
321 __ cmp_and_brx_short(O7_temp, (int32_t) RETURN_VALUE_PLACEHOLDER, Assembler::equal, Assembler::pt, L_ok_4); |
|
322 __ stop("damaged ricochet frame: RETURN_VALUE_PLACEHOLDER not found"); |
|
323 __ BIND(L_ok_4); |
|
324 BLOCK_COMMENT("} verify_clean"); |
|
325 } |
|
326 #endif //ASSERT |
|
327 |
46 |
328 void MethodHandles::load_klass_from_Class(MacroAssembler* _masm, Register klass_reg, Register temp_reg, Register temp2_reg) { |
47 void MethodHandles::load_klass_from_Class(MacroAssembler* _masm, Register klass_reg, Register temp_reg, Register temp2_reg) { |
329 if (VerifyMethodHandles) |
48 if (VerifyMethodHandles) |
330 verify_klass(_masm, klass_reg, SystemDictionaryHandles::Class_klass(), temp_reg, temp2_reg, |
49 verify_klass(_masm, klass_reg, SystemDictionaryHandles::Class_klass(), temp_reg, temp2_reg, |
331 "AMH argument is a Class"); |
50 "MH argument is a Class"); |
332 __ load_heap_oop(Address(klass_reg, java_lang_Class::klass_offset_in_bytes()), klass_reg); |
51 __ load_heap_oop(Address(klass_reg, java_lang_Class::klass_offset_in_bytes()), klass_reg); |
333 } |
52 } |
334 |
53 |
335 void MethodHandles::load_conversion_vminfo(MacroAssembler* _masm, Address conversion_field_addr, Register reg) { |
|
336 assert(CONV_VMINFO_SHIFT == 0, "preshifted"); |
|
337 assert(CONV_VMINFO_MASK == right_n_bits(BitsPerByte), "else change type of following load"); |
|
338 __ ldub(conversion_field_addr.plus_disp(BytesPerInt - 1), reg); |
|
339 } |
|
340 |
|
341 void MethodHandles::extract_conversion_vminfo(MacroAssembler* _masm, Register conversion_field_reg, Register reg) { |
|
342 assert(CONV_VMINFO_SHIFT == 0, "preshifted"); |
|
343 __ and3(conversion_field_reg, CONV_VMINFO_MASK, reg); |
|
344 } |
|
345 |
|
346 void MethodHandles::extract_conversion_dest_type(MacroAssembler* _masm, Register conversion_field_reg, Register reg) { |
|
347 __ srl(conversion_field_reg, CONV_DEST_TYPE_SHIFT, reg); |
|
348 __ and3(reg, 0x0F, reg); |
|
349 } |
|
350 |
|
351 void MethodHandles::load_stack_move(MacroAssembler* _masm, |
|
352 Address G3_amh_conversion, |
|
353 Register stack_move_reg) { |
|
354 BLOCK_COMMENT("load_stack_move {"); |
|
355 __ ldsw(G3_amh_conversion, stack_move_reg); |
|
356 __ sra(stack_move_reg, CONV_STACK_MOVE_SHIFT, stack_move_reg); |
|
357 #ifdef ASSERT |
54 #ifdef ASSERT |
358 if (VerifyMethodHandles) { |
55 static int check_nonzero(const char* xname, int x) { |
359 Label L_ok, L_bad; |
56 assert(x != 0, err_msg("%s should be nonzero", xname)); |
360 int32_t stack_move_limit = 0x0800; // extra-large |
57 return x; |
361 __ cmp_and_br_short(stack_move_reg, stack_move_limit, Assembler::greaterEqual, Assembler::pn, L_bad); |
58 } |
362 __ cmp(stack_move_reg, -stack_move_limit); |
59 #define NONZERO(x) check_nonzero(#x, x) |
363 __ br(Assembler::greater, false, Assembler::pt, L_ok); |
60 #else //ASSERT |
364 __ delayed()->nop(); |
61 #define NONZERO(x) (x) |
365 __ BIND(L_bad); |
62 #endif //ASSERT |
366 __ stop("load_stack_move of garbage value"); |
|
367 __ BIND(L_ok); |
|
368 } |
|
369 #endif |
|
370 BLOCK_COMMENT("} load_stack_move"); |
|
371 } |
|
372 |
63 |
373 #ifdef ASSERT |
64 #ifdef ASSERT |
374 void MethodHandles::RicochetFrame::verify() const { |
|
375 assert(magic_number_1() == MAGIC_NUMBER_1, ""); |
|
376 if (!Universe::heap()->is_gc_active()) { |
|
377 if (saved_args_layout() != NULL) { |
|
378 assert(saved_args_layout()->is_method(), "must be valid oop"); |
|
379 } |
|
380 if (saved_target() != NULL) { |
|
381 assert(java_lang_invoke_MethodHandle::is_instance(saved_target()), "checking frame value"); |
|
382 } |
|
383 } |
|
384 int conv_op = adapter_conversion_op(conversion()); |
|
385 assert(conv_op == java_lang_invoke_AdapterMethodHandle::OP_COLLECT_ARGS || |
|
386 conv_op == java_lang_invoke_AdapterMethodHandle::OP_FOLD_ARGS || |
|
387 conv_op == java_lang_invoke_AdapterMethodHandle::OP_PRIM_TO_REF, |
|
388 "must be a sane conversion"); |
|
389 if (has_return_value_slot()) { |
|
390 assert(*return_value_slot_addr() == RETURN_VALUE_PLACEHOLDER, ""); |
|
391 } |
|
392 } |
|
393 |
|
394 void MethodHandles::verify_argslot(MacroAssembler* _masm, Register argslot_reg, Register temp_reg, const char* error_message) { |
|
395 // Verify that argslot lies within (Gargs, FP]. |
|
396 Label L_ok, L_bad; |
|
397 BLOCK_COMMENT("verify_argslot {"); |
|
398 __ cmp_and_brx_short(Gargs, argslot_reg, Assembler::greaterUnsigned, Assembler::pn, L_bad); |
|
399 __ add(FP, STACK_BIAS, temp_reg); // STACK_BIAS is zero on !_LP64 |
|
400 __ cmp_and_brx_short(argslot_reg, temp_reg, Assembler::lessEqualUnsigned, Assembler::pt, L_ok); |
|
401 __ BIND(L_bad); |
|
402 __ stop(error_message); |
|
403 __ BIND(L_ok); |
|
404 BLOCK_COMMENT("} verify_argslot"); |
|
405 } |
|
406 |
|
407 void MethodHandles::verify_argslots(MacroAssembler* _masm, |
|
408 RegisterOrConstant arg_slots, |
|
409 Register arg_slot_base_reg, |
|
410 Register temp_reg, |
|
411 Register temp2_reg, |
|
412 bool negate_argslots, |
|
413 const char* error_message) { |
|
414 // Verify that [argslot..argslot+size) lies within (Gargs, FP). |
|
415 Label L_ok, L_bad; |
|
416 BLOCK_COMMENT("verify_argslots {"); |
|
417 if (negate_argslots) { |
|
418 if (arg_slots.is_constant()) { |
|
419 arg_slots = -1 * arg_slots.as_constant(); |
|
420 } else { |
|
421 __ neg(arg_slots.as_register(), temp_reg); |
|
422 arg_slots = temp_reg; |
|
423 } |
|
424 } |
|
425 __ add(arg_slot_base_reg, __ argument_offset(arg_slots, temp_reg), temp_reg); |
|
426 __ add(FP, STACK_BIAS, temp2_reg); // STACK_BIAS is zero on !_LP64 |
|
427 __ cmp_and_brx_short(temp_reg, temp2_reg, Assembler::greaterUnsigned, Assembler::pn, L_bad); |
|
428 // Gargs points to the first word so adjust by BytesPerWord |
|
429 __ add(arg_slot_base_reg, BytesPerWord, temp_reg); |
|
430 __ cmp_and_brx_short(Gargs, temp_reg, Assembler::lessEqualUnsigned, Assembler::pt, L_ok); |
|
431 __ BIND(L_bad); |
|
432 __ stop(error_message); |
|
433 __ BIND(L_ok); |
|
434 BLOCK_COMMENT("} verify_argslots"); |
|
435 } |
|
436 |
|
437 // Make sure that arg_slots has the same sign as the given direction. |
|
438 // If (and only if) arg_slots is a assembly-time constant, also allow it to be zero. |
|
439 void MethodHandles::verify_stack_move(MacroAssembler* _masm, |
|
440 RegisterOrConstant arg_slots, int direction) { |
|
441 enum { UNREASONABLE_STACK_MOVE = 256 * 4 }; // limit of 255 arguments |
|
442 bool allow_zero = arg_slots.is_constant(); |
|
443 if (direction == 0) { direction = +1; allow_zero = true; } |
|
444 assert(stack_move_unit() == -1, "else add extra checks here"); |
|
445 if (arg_slots.is_register()) { |
|
446 Label L_ok, L_bad; |
|
447 BLOCK_COMMENT("verify_stack_move {"); |
|
448 // __ btst(-stack_move_unit() - 1, arg_slots.as_register()); // no need |
|
449 // __ br(Assembler::notZero, false, Assembler::pn, L_bad); |
|
450 // __ delayed()->nop(); |
|
451 __ cmp(arg_slots.as_register(), (int32_t) NULL_WORD); |
|
452 if (direction > 0) { |
|
453 __ br(allow_zero ? Assembler::less : Assembler::lessEqual, false, Assembler::pn, L_bad); |
|
454 __ delayed()->nop(); |
|
455 __ cmp(arg_slots.as_register(), (int32_t) UNREASONABLE_STACK_MOVE); |
|
456 __ br(Assembler::less, false, Assembler::pn, L_ok); |
|
457 __ delayed()->nop(); |
|
458 } else { |
|
459 __ br(allow_zero ? Assembler::greater : Assembler::greaterEqual, false, Assembler::pn, L_bad); |
|
460 __ delayed()->nop(); |
|
461 __ cmp(arg_slots.as_register(), (int32_t) -UNREASONABLE_STACK_MOVE); |
|
462 __ br(Assembler::greater, false, Assembler::pn, L_ok); |
|
463 __ delayed()->nop(); |
|
464 } |
|
465 __ BIND(L_bad); |
|
466 if (direction > 0) |
|
467 __ stop("assert arg_slots > 0"); |
|
468 else |
|
469 __ stop("assert arg_slots < 0"); |
|
470 __ BIND(L_ok); |
|
471 BLOCK_COMMENT("} verify_stack_move"); |
|
472 } else { |
|
473 intptr_t size = arg_slots.as_constant(); |
|
474 if (direction < 0) size = -size; |
|
475 assert(size >= 0, "correct direction of constant move"); |
|
476 assert(size < UNREASONABLE_STACK_MOVE, "reasonable size of constant move"); |
|
477 } |
|
478 } |
|
479 |
|
480 void MethodHandles::verify_klass(MacroAssembler* _masm, |
65 void MethodHandles::verify_klass(MacroAssembler* _masm, |
481 Register obj_reg, KlassHandle klass, |
66 Register obj_reg, KlassHandle klass, |
482 Register temp_reg, Register temp2_reg, |
67 Register temp_reg, Register temp2_reg, |
483 const char* error_message) { |
68 const char* error_message) { |
484 oop* klass_addr = klass.raw_value(); |
69 oop* klass_addr = klass.raw_value(); |
485 assert(klass_addr >= SystemDictionaryHandles::Object_klass().raw_value() && |
70 assert(klass_addr >= SystemDictionaryHandles::Object_klass().raw_value() && |
486 klass_addr <= SystemDictionaryHandles::Long_klass().raw_value(), |
71 klass_addr <= SystemDictionaryHandles::Long_klass().raw_value(), |
487 "must be one of the SystemDictionaryHandles"); |
72 "must be one of the SystemDictionaryHandles"); |
|
73 bool did_save = false; |
|
74 if (temp_reg == noreg || temp2_reg == noreg) { |
|
75 temp_reg = L1; |
|
76 temp2_reg = L2; |
|
77 __ save_frame_and_mov(0, obj_reg, L0); |
|
78 obj_reg = L0; |
|
79 did_save = true; |
|
80 } |
488 Label L_ok, L_bad; |
81 Label L_ok, L_bad; |
489 BLOCK_COMMENT("verify_klass {"); |
82 BLOCK_COMMENT("verify_klass {"); |
490 __ verify_oop(obj_reg); |
83 __ verify_oop(obj_reg); |
491 __ br_null_short(obj_reg, Assembler::pn, L_bad); |
84 __ br_null_short(obj_reg, Assembler::pn, L_bad); |
492 __ load_klass(obj_reg, temp_reg); |
85 __ load_klass(obj_reg, temp_reg); |
497 __ ld_ptr(Address(temp_reg, super_check_offset), temp_reg); |
90 __ ld_ptr(Address(temp_reg, super_check_offset), temp_reg); |
498 __ set(ExternalAddress(klass_addr), temp2_reg); |
91 __ set(ExternalAddress(klass_addr), temp2_reg); |
499 __ ld_ptr(Address(temp2_reg, 0), temp2_reg); |
92 __ ld_ptr(Address(temp2_reg, 0), temp2_reg); |
500 __ cmp_and_brx_short(temp_reg, temp2_reg, Assembler::equal, Assembler::pt, L_ok); |
93 __ cmp_and_brx_short(temp_reg, temp2_reg, Assembler::equal, Assembler::pt, L_ok); |
501 __ BIND(L_bad); |
94 __ BIND(L_bad); |
502 __ stop(error_message); |
95 if (did_save) __ restore(); |
|
96 __ STOP(error_message); |
503 __ BIND(L_ok); |
97 __ BIND(L_ok); |
|
98 if (did_save) __ restore(); |
504 BLOCK_COMMENT("} verify_klass"); |
99 BLOCK_COMMENT("} verify_klass"); |
505 } |
100 } |
|
101 |
|
102 void MethodHandles::verify_ref_kind(MacroAssembler* _masm, int ref_kind, Register member_reg, Register temp) { |
|
103 Label L; |
|
104 BLOCK_COMMENT("verify_ref_kind {"); |
|
105 __ lduw(Address(member_reg, NONZERO(java_lang_invoke_MemberName::flags_offset_in_bytes())), temp); |
|
106 __ srl( temp, java_lang_invoke_MemberName::MN_REFERENCE_KIND_SHIFT, temp); |
|
107 __ and3(temp, java_lang_invoke_MemberName::MN_REFERENCE_KIND_MASK, temp); |
|
108 __ cmp_and_br_short(temp, ref_kind, Assembler::equal, Assembler::pt, L); |
|
109 { char* buf = NEW_C_HEAP_ARRAY(char, 100, mtInternal); |
|
110 jio_snprintf(buf, 100, "verify_ref_kind expected %x", ref_kind); |
|
111 if (ref_kind == JVM_REF_invokeVirtual || |
|
112 ref_kind == JVM_REF_invokeSpecial) |
|
113 // could do this for all ref_kinds, but would explode assembly code size |
|
114 trace_method_handle(_masm, buf); |
|
115 __ STOP(buf); |
|
116 } |
|
117 BLOCK_COMMENT("} verify_ref_kind"); |
|
118 __ bind(L); |
|
119 } |
|
120 |
506 #endif // ASSERT |
121 #endif // ASSERT |
507 |
122 |
508 |
123 void MethodHandles::jump_from_method_handle(MacroAssembler* _masm, Register method, Register target, Register temp, |
509 void MethodHandles::jump_from_method_handle(MacroAssembler* _masm, Register method, Register target, Register temp) { |
124 bool for_compiler_entry) { |
510 assert(method == G5_method, "interpreter calling convention"); |
125 assert(method == G5_method, "interpreter calling convention"); |
511 __ verify_oop(method); |
126 __ verify_oop(method); |
512 __ ld_ptr(G5_method, in_bytes(methodOopDesc::from_interpreted_offset()), target); |
127 |
513 if (JvmtiExport::can_post_interpreter_events()) { |
128 if (!for_compiler_entry && JvmtiExport::can_post_interpreter_events()) { |
|
129 Label run_compiled_code; |
514 // JVMTI events, such as single-stepping, are implemented partly by avoiding running |
130 // JVMTI events, such as single-stepping, are implemented partly by avoiding running |
515 // compiled code in threads for which the event is enabled. Check here for |
131 // compiled code in threads for which the event is enabled. Check here for |
516 // interp_only_mode if these events CAN be enabled. |
132 // interp_only_mode if these events CAN be enabled. |
517 __ verify_thread(); |
133 __ verify_thread(); |
518 Label skip_compiled_code; |
|
519 |
|
520 const Address interp_only(G2_thread, JavaThread::interp_only_mode_offset()); |
134 const Address interp_only(G2_thread, JavaThread::interp_only_mode_offset()); |
521 __ ld(interp_only, temp); |
135 __ ld(interp_only, temp); |
522 __ tst(temp); |
136 __ cmp_and_br_short(temp, 0, Assembler::zero, Assembler::pt, run_compiled_code); |
523 __ br(Assembler::notZero, true, Assembler::pn, skip_compiled_code); |
137 __ ld_ptr(G5_method, in_bytes(methodOopDesc::interpreter_entry_offset()), target); |
524 __ delayed()->ld_ptr(G5_method, in_bytes(methodOopDesc::interpreter_entry_offset()), target); |
138 __ jmp(target, 0); |
525 __ bind(skip_compiled_code); |
139 __ delayed()->nop(); |
526 } |
140 __ BIND(run_compiled_code); |
|
141 // Note: we could fill some delay slots here, but |
|
142 // it doesn't matter, since this is interpreter code. |
|
143 } |
|
144 |
|
145 const ByteSize entry_offset = for_compiler_entry ? methodOopDesc::from_compiled_offset() : |
|
146 methodOopDesc::from_interpreted_offset(); |
|
147 __ ld_ptr(G5_method, in_bytes(entry_offset), target); |
527 __ jmp(target, 0); |
148 __ jmp(target, 0); |
528 __ delayed()->nop(); |
149 __ delayed()->nop(); |
529 } |
150 } |
530 |
151 |
|
152 void MethodHandles::jump_to_lambda_form(MacroAssembler* _masm, |
|
153 Register recv, Register method_temp, |
|
154 Register temp2, Register temp3, |
|
155 bool for_compiler_entry) { |
|
156 BLOCK_COMMENT("jump_to_lambda_form {"); |
|
157 // This is the initial entry point of a lazy method handle. |
|
158 // After type checking, it picks up the invoker from the LambdaForm. |
|
159 assert_different_registers(recv, method_temp, temp2, temp3); |
|
160 assert(method_temp == G5_method, "required register for loading method"); |
|
161 |
|
162 //NOT_PRODUCT({ FlagSetting fs(TraceMethodHandles, true); trace_method_handle(_masm, "LZMH"); }); |
|
163 |
|
164 // Load the invoker, as MH -> MH.form -> LF.vmentry |
|
165 __ verify_oop(recv); |
|
166 __ load_heap_oop(Address(recv, NONZERO(java_lang_invoke_MethodHandle::form_offset_in_bytes())), method_temp); |
|
167 __ verify_oop(method_temp); |
|
168 __ load_heap_oop(Address(method_temp, NONZERO(java_lang_invoke_LambdaForm::vmentry_offset_in_bytes())), method_temp); |
|
169 __ verify_oop(method_temp); |
|
170 // the following assumes that a methodOop is normally compressed in the vmtarget field: |
|
171 __ load_heap_oop(Address(method_temp, NONZERO(java_lang_invoke_MemberName::vmtarget_offset_in_bytes())), method_temp); |
|
172 __ verify_oop(method_temp); |
|
173 |
|
174 if (VerifyMethodHandles && !for_compiler_entry) { |
|
175 // make sure recv is already on stack |
|
176 __ load_sized_value(Address(method_temp, methodOopDesc::size_of_parameters_offset()), |
|
177 temp2, |
|
178 sizeof(u2), /*is_signed*/ false); |
|
179 // assert(sizeof(u2) == sizeof(methodOopDesc::_size_of_parameters), ""); |
|
180 Label L; |
|
181 __ ld_ptr(__ argument_address(temp2, temp2, -1), temp2); |
|
182 __ cmp_and_br_short(temp2, recv, Assembler::equal, Assembler::pt, L); |
|
183 __ STOP("receiver not on stack"); |
|
184 __ BIND(L); |
|
185 } |
|
186 |
|
187 jump_from_method_handle(_masm, method_temp, temp2, temp3, for_compiler_entry); |
|
188 BLOCK_COMMENT("} jump_to_lambda_form"); |
|
189 } |
|
190 |
531 |
191 |
532 // Code generation |
192 // Code generation |
533 address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler* _masm) { |
193 address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler* _masm, |
534 // I5_savedSP/O5_savedSP: sender SP (must preserve) |
194 vmIntrinsics::ID iid) { |
|
195 const bool not_for_compiler_entry = false; // this is the interpreter entry |
|
196 assert(is_signature_polymorphic(iid), "expected invoke iid"); |
|
197 if (iid == vmIntrinsics::_invokeGeneric || |
|
198 iid == vmIntrinsics::_compiledLambdaForm) { |
|
199 // Perhaps surprisingly, the symbolic references visible to Java are not directly used. |
|
200 // They are linked to Java-generated adapters via MethodHandleNatives.linkMethod. |
|
201 // They all allow an appendix argument. |
|
202 __ should_not_reach_here(); // empty stubs make SG sick |
|
203 return NULL; |
|
204 } |
|
205 |
|
206 // I5_savedSP/O5_savedSP: sender SP (must preserve; see prepare_to_jump_from_interpreted) |
|
207 // G5_method: methodOop |
535 // G4 (Gargs): incoming argument list (must preserve) |
208 // G4 (Gargs): incoming argument list (must preserve) |
536 // G5_method: invoke methodOop |
209 // O0: used as temp to hold mh or receiver |
537 // G3_method_handle: receiver method handle (must load from sp[MethodTypeForm.vmslots]) |
210 // O1, O4: garbage temps, blown away |
538 // O0, O1, O2, O3, O4: garbage temps, blown away |
211 Register O1_scratch = O1; |
539 Register O0_mtype = O0; |
212 Register O4_param_size = O4; // size of parameters |
540 Register O1_scratch = O1; |
213 |
541 Register O2_scratch = O2; |
214 address code_start = __ pc(); |
542 Register O3_scratch = O3; |
|
543 Register O4_argslot = O4; |
|
544 Register O4_argbase = O4; |
|
545 |
|
546 // emit WrongMethodType path first, to enable back-branch from main path |
|
547 Label wrong_method_type; |
|
548 __ bind(wrong_method_type); |
|
549 Label invoke_generic_slow_path; |
|
550 assert(methodOopDesc::intrinsic_id_size_in_bytes() == sizeof(u1), "");; |
|
551 __ ldub(Address(G5_method, methodOopDesc::intrinsic_id_offset_in_bytes()), O1_scratch); |
|
552 __ cmp(O1_scratch, (int) vmIntrinsics::_invokeExact); |
|
553 __ brx(Assembler::notEqual, false, Assembler::pt, invoke_generic_slow_path); |
|
554 __ delayed()->nop(); |
|
555 __ mov(O0_mtype, G5_method_type); // required by throw_WrongMethodType |
|
556 __ mov(G3_method_handle, G3_method_handle); // already in this register |
|
557 // O0 will be filled in with JavaThread in stub |
|
558 __ jump_to(AddressLiteral(StubRoutines::throw_WrongMethodTypeException_entry()), O3_scratch); |
|
559 __ delayed()->nop(); |
|
560 |
215 |
561 // here's where control starts out: |
216 // here's where control starts out: |
562 __ align(CodeEntryAlignment); |
217 __ align(CodeEntryAlignment); |
563 address entry_point = __ pc(); |
218 address entry_point = __ pc(); |
564 |
219 |
565 // fetch the MethodType from the method handle |
|
566 // FIXME: Interpreter should transmit pre-popped stack pointer, to locate base of arg list. |
|
567 // This would simplify several touchy bits of code. |
|
568 // See 6984712: JSR 292 method handle calls need a clean argument base pointer |
|
569 { |
|
570 Register tem = G5_method; |
|
571 for (jint* pchase = methodOopDesc::method_type_offsets_chain(); (*pchase) != -1; pchase++) { |
|
572 __ ld_ptr(Address(tem, *pchase), O0_mtype); |
|
573 tem = O0_mtype; // in case there is another indirection |
|
574 } |
|
575 } |
|
576 |
|
577 // given the MethodType, find out where the MH argument is buried |
|
578 __ load_heap_oop(Address(O0_mtype, __ delayed_value(java_lang_invoke_MethodType::form_offset_in_bytes, O1_scratch)), O4_argslot); |
|
579 __ ldsw( Address(O4_argslot, __ delayed_value(java_lang_invoke_MethodTypeForm::vmslots_offset_in_bytes, O1_scratch)), O4_argslot); |
|
580 __ add(__ argument_address(O4_argslot, O4_argslot, 1), O4_argbase); |
|
581 // Note: argument_address uses its input as a scratch register! |
|
582 Address mh_receiver_slot_addr(O4_argbase, -Interpreter::stackElementSize); |
|
583 __ ld_ptr(mh_receiver_slot_addr, G3_method_handle); |
|
584 |
|
585 trace_method_handle(_masm, "invokeExact"); |
|
586 |
|
587 __ check_method_handle_type(O0_mtype, G3_method_handle, O1_scratch, wrong_method_type); |
|
588 |
|
589 // Nobody uses the MH receiver slot after this. Make sure. |
|
590 DEBUG_ONLY(__ set((int32_t) 0x999999, O1_scratch); __ st_ptr(O1_scratch, mh_receiver_slot_addr)); |
|
591 |
|
592 __ jump_to_method_handle_entry(G3_method_handle, O1_scratch); |
|
593 |
|
594 // for invokeGeneric (only), apply argument and result conversions on the fly |
|
595 __ bind(invoke_generic_slow_path); |
|
596 #ifdef ASSERT |
|
597 if (VerifyMethodHandles) { |
220 if (VerifyMethodHandles) { |
598 Label L; |
221 Label L; |
|
222 BLOCK_COMMENT("verify_intrinsic_id {"); |
599 __ ldub(Address(G5_method, methodOopDesc::intrinsic_id_offset_in_bytes()), O1_scratch); |
223 __ ldub(Address(G5_method, methodOopDesc::intrinsic_id_offset_in_bytes()), O1_scratch); |
600 __ cmp(O1_scratch, (int) vmIntrinsics::_invokeGeneric); |
224 __ cmp_and_br_short(O1_scratch, (int) iid, Assembler::equal, Assembler::pt, L); |
601 __ brx(Assembler::equal, false, Assembler::pt, L); |
225 if (iid == vmIntrinsics::_linkToVirtual || |
602 __ delayed()->nop(); |
226 iid == vmIntrinsics::_linkToSpecial) { |
603 __ stop("bad methodOop::intrinsic_id"); |
227 // could do this for all kinds, but would explode assembly code size |
|
228 trace_method_handle(_masm, "bad methodOop::intrinsic_id"); |
|
229 } |
|
230 __ STOP("bad methodOop::intrinsic_id"); |
604 __ bind(L); |
231 __ bind(L); |
605 } |
232 BLOCK_COMMENT("} verify_intrinsic_id"); |
606 #endif //ASSERT |
233 } |
607 |
234 |
608 // make room on the stack for another pointer: |
235 // First task: Find out how big the argument list is. |
609 insert_arg_slots(_masm, 2 * stack_move_unit(), O4_argbase, O1_scratch, O2_scratch, O3_scratch); |
236 Address O4_first_arg_addr; |
610 // load up an adapter from the calling type (Java weaves this) |
237 int ref_kind = signature_polymorphic_intrinsic_ref_kind(iid); |
611 Register O2_form = O2_scratch; |
238 assert(ref_kind != 0 || iid == vmIntrinsics::_invokeBasic, "must be _invokeBasic or a linkTo intrinsic"); |
612 Register O3_adapter = O3_scratch; |
239 if (ref_kind == 0 || MethodHandles::ref_kind_has_receiver(ref_kind)) { |
613 __ load_heap_oop(Address(O0_mtype, __ delayed_value(java_lang_invoke_MethodType::form_offset_in_bytes, O1_scratch)), O2_form); |
240 __ load_sized_value(Address(G5_method, methodOopDesc::size_of_parameters_offset()), |
614 __ load_heap_oop(Address(O2_form, __ delayed_value(java_lang_invoke_MethodTypeForm::genericInvoker_offset_in_bytes, O1_scratch)), O3_adapter); |
241 O4_param_size, |
615 __ verify_oop(O3_adapter); |
242 sizeof(u2), /*is_signed*/ false); |
616 __ st_ptr(O3_adapter, Address(O4_argbase, 1 * Interpreter::stackElementSize)); |
243 // assert(sizeof(u2) == sizeof(methodOopDesc::_size_of_parameters), ""); |
617 // As a trusted first argument, pass the type being called, so the adapter knows |
244 O4_first_arg_addr = __ argument_address(O4_param_size, O4_param_size, -1); |
618 // the actual types of the arguments and return values. |
245 } else { |
619 // (Generic invokers are shared among form-families of method-type.) |
246 DEBUG_ONLY(O4_param_size = noreg); |
620 __ st_ptr(O0_mtype, Address(O4_argbase, 0 * Interpreter::stackElementSize)); |
247 } |
621 // FIXME: assert that O3_adapter is of the right method-type. |
248 |
622 __ mov(O3_adapter, G3_method_handle); |
249 Register O0_mh = noreg; |
623 trace_method_handle(_masm, "invokeGeneric"); |
250 if (!is_signature_polymorphic_static(iid)) { |
624 __ jump_to_method_handle_entry(G3_method_handle, O1_scratch); |
251 __ ld_ptr(O4_first_arg_addr, O0_mh = O0); |
|
252 DEBUG_ONLY(O4_param_size = noreg); |
|
253 } |
|
254 |
|
255 // O4_first_arg_addr is live! |
|
256 |
|
257 if (TraceMethodHandles) { |
|
258 const char* name = vmIntrinsics::name_at(iid); |
|
259 if (*name == '_') name += 1; |
|
260 const size_t len = strlen(name) + 50; |
|
261 char* qname = NEW_C_HEAP_ARRAY(char, len, mtInternal); |
|
262 const char* suffix = ""; |
|
263 if (vmIntrinsics::method_for(iid) == NULL || |
|
264 !vmIntrinsics::method_for(iid)->access_flags().is_public()) { |
|
265 if (is_signature_polymorphic_static(iid)) |
|
266 suffix = "/static"; |
|
267 else |
|
268 suffix = "/private"; |
|
269 } |
|
270 jio_snprintf(qname, len, "MethodHandle::interpreter_entry::%s%s", name, suffix); |
|
271 if (O0_mh != noreg) |
|
272 __ mov(O0_mh, G3_method_handle); // make stub happy |
|
273 trace_method_handle(_masm, qname); |
|
274 } |
|
275 |
|
276 if (iid == vmIntrinsics::_invokeBasic) { |
|
277 generate_method_handle_dispatch(_masm, iid, O0_mh, noreg, not_for_compiler_entry); |
|
278 |
|
279 } else { |
|
280 // Adjust argument list by popping the trailing MemberName argument. |
|
281 Register O0_recv = noreg; |
|
282 if (MethodHandles::ref_kind_has_receiver(ref_kind)) { |
|
283 // Load the receiver (not the MH; the actual MemberName's receiver) up from the interpreter stack. |
|
284 __ ld_ptr(O4_first_arg_addr, O0_recv = O0); |
|
285 DEBUG_ONLY(O4_param_size = noreg); |
|
286 } |
|
287 Register G5_member = G5_method; // MemberName ptr; incoming method ptr is dead now |
|
288 __ ld_ptr(__ argument_address(constant(0)), G5_member); |
|
289 __ add(Gargs, Interpreter::stackElementSize, Gargs); |
|
290 generate_method_handle_dispatch(_masm, iid, O0_recv, G5_member, not_for_compiler_entry); |
|
291 } |
|
292 |
|
293 if (PrintMethodHandleStubs) { |
|
294 address code_end = __ pc(); |
|
295 tty->print_cr("--------"); |
|
296 tty->print_cr("method handle interpreter entry for %s", vmIntrinsics::name_at(iid)); |
|
297 Disassembler::decode(code_start, code_end); |
|
298 tty->cr(); |
|
299 } |
625 |
300 |
626 return entry_point; |
301 return entry_point; |
627 } |
302 } |
628 |
303 |
629 // Workaround for C++ overloading nastiness on '0' for RegisterOrConstant. |
304 void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm, |
630 static RegisterOrConstant constant(int value) { |
305 vmIntrinsics::ID iid, |
631 return RegisterOrConstant(value); |
306 Register receiver_reg, |
632 } |
307 Register member_reg, |
633 |
308 bool for_compiler_entry) { |
634 static void load_vmargslot(MacroAssembler* _masm, Address vmargslot_addr, Register result) { |
309 assert(is_signature_polymorphic(iid), "expected invoke iid"); |
635 __ ldsw(vmargslot_addr, result); |
310 // temps used in this code are not used in *either* compiled or interpreted calling sequences |
636 } |
311 Register temp1 = (for_compiler_entry ? G1_scratch : O1); |
637 |
312 Register temp2 = (for_compiler_entry ? G4_scratch : O4); |
638 static RegisterOrConstant adjust_SP_and_Gargs_down_by_slots(MacroAssembler* _masm, |
313 Register temp3 = G3_scratch; |
639 RegisterOrConstant arg_slots, |
314 Register temp4 = (for_compiler_entry ? noreg : O2); |
640 Register temp_reg, Register temp2_reg) { |
315 if (for_compiler_entry) { |
641 // Keep the stack pointer 2*wordSize aligned. |
316 assert(receiver_reg == (iid == vmIntrinsics::_linkToStatic ? noreg : O0), "only valid assignment"); |
642 const int TwoWordAlignmentMask = right_n_bits(LogBytesPerWord + 1); |
317 assert_different_registers(temp1, O0, O1, O2, O3, O4, O5); |
643 if (arg_slots.is_constant()) { |
318 assert_different_registers(temp2, O0, O1, O2, O3, O4, O5); |
644 const int offset = arg_slots.as_constant() << LogBytesPerWord; |
319 assert_different_registers(temp3, O0, O1, O2, O3, O4, O5); |
645 const int masked_offset = round_to(offset, 2 * BytesPerWord); |
320 assert_different_registers(temp4, O0, O1, O2, O3, O4, O5); |
646 const int masked_offset2 = (offset + 1*BytesPerWord) & ~TwoWordAlignmentMask; |
321 } |
647 assert(masked_offset == masked_offset2, "must agree"); |
322 if (receiver_reg != noreg) assert_different_registers(temp1, temp2, temp3, temp4, receiver_reg); |
648 __ sub(Gargs, offset, Gargs); |
323 if (member_reg != noreg) assert_different_registers(temp1, temp2, temp3, temp4, member_reg); |
649 __ sub(SP, masked_offset, SP ); |
324 if (!for_compiler_entry) assert_different_registers(temp1, temp2, temp3, temp4, O5_savedSP); // don't trash lastSP |
650 return offset; |
325 |
|
326 if (iid == vmIntrinsics::_invokeBasic) { |
|
327 // indirect through MH.form.vmentry.vmtarget |
|
328 jump_to_lambda_form(_masm, receiver_reg, G5_method, temp2, temp3, for_compiler_entry); |
|
329 |
651 } else { |
330 } else { |
652 #ifdef ASSERT |
331 // The method is a member invoker used by direct method handles. |
|
332 if (VerifyMethodHandles) { |
|
333 // make sure the trailing argument really is a MemberName (caller responsibility) |
|
334 verify_klass(_masm, member_reg, SystemDictionaryHandles::MemberName_klass(), |
|
335 temp1, temp2, |
|
336 "MemberName required for invokeVirtual etc."); |
|
337 } |
|
338 |
|
339 Address member_clazz( member_reg, NONZERO(java_lang_invoke_MemberName::clazz_offset_in_bytes())); |
|
340 Address member_vmindex( member_reg, NONZERO(java_lang_invoke_MemberName::vmindex_offset_in_bytes())); |
|
341 Address member_vmtarget( member_reg, NONZERO(java_lang_invoke_MemberName::vmtarget_offset_in_bytes())); |
|
342 |
|
343 Register temp1_recv_klass = temp1; |
|
344 if (iid != vmIntrinsics::_linkToStatic) { |
|
345 __ verify_oop(receiver_reg); |
|
346 if (iid == vmIntrinsics::_linkToSpecial) { |
|
347 // Don't actually load the klass; just null-check the receiver. |
|
348 __ null_check(receiver_reg); |
|
349 } else { |
|
350 // load receiver klass itself |
|
351 __ null_check(receiver_reg, oopDesc::klass_offset_in_bytes()); |
|
352 __ load_klass(receiver_reg, temp1_recv_klass); |
|
353 __ verify_oop(temp1_recv_klass); |
|
354 } |
|
355 BLOCK_COMMENT("check_receiver {"); |
|
356 // The receiver for the MemberName must be in receiver_reg. |
|
357 // Check the receiver against the MemberName.clazz |
|
358 if (VerifyMethodHandles && iid == vmIntrinsics::_linkToSpecial) { |
|
359 // Did not load it above... |
|
360 __ load_klass(receiver_reg, temp1_recv_klass); |
|
361 __ verify_oop(temp1_recv_klass); |
|
362 } |
|
363 if (VerifyMethodHandles && iid != vmIntrinsics::_linkToInterface) { |
|
364 Label L_ok; |
|
365 Register temp2_defc = temp2; |
|
366 __ load_heap_oop(member_clazz, temp2_defc); |
|
367 load_klass_from_Class(_masm, temp2_defc, temp3, temp4); |
|
368 __ verify_oop(temp2_defc); |
|
369 __ check_klass_subtype(temp1_recv_klass, temp2_defc, temp3, temp4, L_ok); |
|
370 // If we get here, the type check failed! |
|
371 __ STOP("receiver class disagrees with MemberName.clazz"); |
|
372 __ bind(L_ok); |
|
373 } |
|
374 BLOCK_COMMENT("} check_receiver"); |
|
375 } |
|
376 if (iid == vmIntrinsics::_linkToSpecial || |
|
377 iid == vmIntrinsics::_linkToStatic) { |
|
378 DEBUG_ONLY(temp1_recv_klass = noreg); // these guys didn't load the recv_klass |
|
379 } |
|
380 |
|
381 // Live registers at this point: |
|
382 // member_reg - MemberName that was the trailing argument |
|
383 // temp1_recv_klass - klass of stacked receiver, if needed |
|
384 // O5_savedSP - interpreter linkage (if interpreted) |
|
385 // O0..O7,G1,G4 - compiler arguments (if compiled) |
|
386 |
|
387 bool method_is_live = false; |
|
388 switch (iid) { |
|
389 case vmIntrinsics::_linkToSpecial: |
|
390 if (VerifyMethodHandles) { |
|
391 verify_ref_kind(_masm, JVM_REF_invokeSpecial, member_reg, temp3); |
|
392 } |
|
393 __ load_heap_oop(member_vmtarget, G5_method); |
|
394 method_is_live = true; |
|
395 break; |
|
396 |
|
397 case vmIntrinsics::_linkToStatic: |
|
398 if (VerifyMethodHandles) { |
|
399 verify_ref_kind(_masm, JVM_REF_invokeStatic, member_reg, temp3); |
|
400 } |
|
401 __ load_heap_oop(member_vmtarget, G5_method); |
|
402 method_is_live = true; |
|
403 break; |
|
404 |
|
405 case vmIntrinsics::_linkToVirtual: |
653 { |
406 { |
654 Label L_ok; |
407 // same as TemplateTable::invokevirtual, |
655 __ cmp_and_br_short(arg_slots.as_register(), 0, Assembler::greaterEqual, Assembler::pt, L_ok); |
408 // minus the CP setup and profiling: |
656 __ stop("negative arg_slots"); |
409 |
657 __ bind(L_ok); |
410 if (VerifyMethodHandles) { |
658 } |
411 verify_ref_kind(_masm, JVM_REF_invokeVirtual, member_reg, temp3); |
659 #endif |
412 } |
660 __ sll_ptr(arg_slots.as_register(), LogBytesPerWord, temp_reg); |
413 |
661 __ add( temp_reg, 1*BytesPerWord, temp2_reg); |
414 // pick out the vtable index from the MemberName, and then we can discard it: |
662 __ andn(temp2_reg, TwoWordAlignmentMask, temp2_reg); |
415 Register temp2_index = temp2; |
663 __ sub(Gargs, temp_reg, Gargs); |
416 __ ld_ptr(member_vmindex, temp2_index); |
664 __ sub(SP, temp2_reg, SP ); |
417 |
665 return temp_reg; |
418 if (VerifyMethodHandles) { |
666 } |
419 Label L_index_ok; |
667 } |
420 __ cmp_and_br_short(temp2_index, (int) 0, Assembler::greaterEqual, Assembler::pn, L_index_ok); |
668 |
421 __ STOP("no virtual index"); |
669 static RegisterOrConstant adjust_SP_and_Gargs_up_by_slots(MacroAssembler* _masm, |
422 __ BIND(L_index_ok); |
670 RegisterOrConstant arg_slots, |
423 } |
671 Register temp_reg, Register temp2_reg) { |
424 |
672 // Keep the stack pointer 2*wordSize aligned. |
425 // Note: The verifier invariants allow us to ignore MemberName.clazz and vmtarget |
673 const int TwoWordAlignmentMask = right_n_bits(LogBytesPerWord + 1); |
426 // at this point. And VerifyMethodHandles has already checked clazz, if needed. |
674 if (arg_slots.is_constant()) { |
427 |
675 const int offset = arg_slots.as_constant() << LogBytesPerWord; |
428 // get target methodOop & entry point |
676 const int masked_offset = offset & ~TwoWordAlignmentMask; |
429 __ lookup_virtual_method(temp1_recv_klass, temp2_index, G5_method); |
677 __ add(Gargs, offset, Gargs); |
430 method_is_live = true; |
678 __ add(SP, masked_offset, SP ); |
431 break; |
679 return offset; |
432 } |
680 } else { |
433 |
681 __ sll_ptr(arg_slots.as_register(), LogBytesPerWord, temp_reg); |
434 case vmIntrinsics::_linkToInterface: |
682 __ andn(temp_reg, TwoWordAlignmentMask, temp2_reg); |
435 { |
683 __ add(Gargs, temp_reg, Gargs); |
436 // same as TemplateTable::invokeinterface |
684 __ add(SP, temp2_reg, SP ); |
437 // (minus the CP setup and profiling, with different argument motion) |
685 return temp_reg; |
438 if (VerifyMethodHandles) { |
686 } |
439 verify_ref_kind(_masm, JVM_REF_invokeInterface, member_reg, temp3); |
687 } |
440 } |
688 |
441 |
689 // Helper to insert argument slots into the stack. |
442 Register temp3_intf = temp3; |
690 // arg_slots must be a multiple of stack_move_unit() and < 0 |
443 __ load_heap_oop(member_clazz, temp3_intf); |
691 // argslot_reg is decremented to point to the new (shifted) location of the argslot |
444 load_klass_from_Class(_masm, temp3_intf, temp2, temp4); |
692 // But, temp_reg ends up holding the original value of argslot_reg. |
445 __ verify_oop(temp3_intf); |
693 void MethodHandles::insert_arg_slots(MacroAssembler* _masm, |
446 |
694 RegisterOrConstant arg_slots, |
447 Register G5_index = G5_method; |
695 Register argslot_reg, |
448 __ ld_ptr(member_vmindex, G5_index); |
696 Register temp_reg, Register temp2_reg, Register temp3_reg) { |
449 if (VerifyMethodHandles) { |
697 // allow constant zero |
450 Label L; |
698 if (arg_slots.is_constant() && arg_slots.as_constant() == 0) |
451 __ cmp_and_br_short(G5_index, 0, Assembler::greaterEqual, Assembler::pt, L); |
699 return; |
452 __ STOP("invalid vtable index for MH.invokeInterface"); |
700 |
453 __ bind(L); |
701 assert_different_registers(argslot_reg, temp_reg, temp2_reg, temp3_reg, |
454 } |
702 (!arg_slots.is_register() ? Gargs : arg_slots.as_register())); |
455 |
703 |
456 // given intf, index, and recv klass, dispatch to the implementation method |
704 BLOCK_COMMENT("insert_arg_slots {"); |
457 Label L_no_such_interface; |
705 if (VerifyMethodHandles) |
458 Register no_sethi_temp = noreg; |
706 verify_argslot(_masm, argslot_reg, temp_reg, "insertion point must fall within current frame"); |
459 __ lookup_interface_method(temp1_recv_klass, temp3_intf, |
707 if (VerifyMethodHandles) |
460 // note: next two args must be the same: |
708 verify_stack_move(_masm, arg_slots, -1); |
461 G5_index, G5_method, |
709 |
462 temp2, no_sethi_temp, |
710 // Make space on the stack for the inserted argument(s). |
463 L_no_such_interface); |
711 // Then pull down everything shallower than argslot_reg. |
464 |
712 // The stacked return address gets pulled down with everything else. |
465 __ verify_oop(G5_method); |
713 // That is, copy [sp, argslot) downward by -size words. In pseudo-code: |
466 jump_from_method_handle(_masm, G5_method, temp2, temp3, for_compiler_entry); |
714 // sp -= size; |
467 |
715 // for (temp = sp + size; temp < argslot; temp++) |
468 __ bind(L_no_such_interface); |
716 // temp[-size] = temp[0] |
469 AddressLiteral icce(StubRoutines::throw_IncompatibleClassChangeError_entry()); |
717 // argslot -= size; |
470 __ jump_to(icce, temp3); |
718 |
|
719 // offset is temp3_reg in case of arg_slots being a register. |
|
720 RegisterOrConstant offset = adjust_SP_and_Gargs_up_by_slots(_masm, arg_slots, temp3_reg, temp_reg); |
|
721 __ sub(Gargs, offset, temp_reg); // source pointer for copy |
|
722 |
|
723 { |
|
724 Label loop; |
|
725 __ BIND(loop); |
|
726 // pull one word down each time through the loop |
|
727 __ ld_ptr( Address(temp_reg, 0 ), temp2_reg); |
|
728 __ st_ptr(temp2_reg, Address(temp_reg, offset) ); |
|
729 __ add(temp_reg, wordSize, temp_reg); |
|
730 __ cmp_and_brx_short(temp_reg, argslot_reg, Assembler::lessUnsigned, Assembler::pt, loop); |
|
731 } |
|
732 |
|
733 // Now move the argslot down, to point to the opened-up space. |
|
734 __ add(argslot_reg, offset, argslot_reg); |
|
735 BLOCK_COMMENT("} insert_arg_slots"); |
|
736 } |
|
737 |
|
738 |
|
739 // Helper to remove argument slots from the stack. |
|
740 // arg_slots must be a multiple of stack_move_unit() and > 0 |
|
741 void MethodHandles::remove_arg_slots(MacroAssembler* _masm, |
|
742 RegisterOrConstant arg_slots, |
|
743 Register argslot_reg, |
|
744 Register temp_reg, Register temp2_reg, Register temp3_reg) { |
|
745 // allow constant zero |
|
746 if (arg_slots.is_constant() && arg_slots.as_constant() == 0) |
|
747 return; |
|
748 assert_different_registers(argslot_reg, temp_reg, temp2_reg, temp3_reg, |
|
749 (!arg_slots.is_register() ? Gargs : arg_slots.as_register())); |
|
750 |
|
751 BLOCK_COMMENT("remove_arg_slots {"); |
|
752 if (VerifyMethodHandles) |
|
753 verify_argslots(_masm, arg_slots, argslot_reg, temp_reg, temp2_reg, false, |
|
754 "deleted argument(s) must fall within current frame"); |
|
755 if (VerifyMethodHandles) |
|
756 verify_stack_move(_masm, arg_slots, +1); |
|
757 |
|
758 // Pull up everything shallower than argslot. |
|
759 // Then remove the excess space on the stack. |
|
760 // The stacked return address gets pulled up with everything else. |
|
761 // That is, copy [sp, argslot) upward by size words. In pseudo-code: |
|
762 // for (temp = argslot-1; temp >= sp; --temp) |
|
763 // temp[size] = temp[0] |
|
764 // argslot += size; |
|
765 // sp += size; |
|
766 |
|
767 RegisterOrConstant offset = __ regcon_sll_ptr(arg_slots, LogBytesPerWord, temp3_reg); |
|
768 __ sub(argslot_reg, wordSize, temp_reg); // source pointer for copy |
|
769 |
|
770 { |
|
771 Label L_loop; |
|
772 __ BIND(L_loop); |
|
773 // pull one word up each time through the loop |
|
774 __ ld_ptr( Address(temp_reg, 0 ), temp2_reg); |
|
775 __ st_ptr(temp2_reg, Address(temp_reg, offset) ); |
|
776 __ sub(temp_reg, wordSize, temp_reg); |
|
777 __ cmp_and_brx_short(temp_reg, Gargs, Assembler::greaterEqualUnsigned, Assembler::pt, L_loop); |
|
778 } |
|
779 |
|
780 // And adjust the argslot address to point at the deletion point. |
|
781 __ add(argslot_reg, offset, argslot_reg); |
|
782 |
|
783 // We don't need the offset at this point anymore, just adjust SP and Gargs. |
|
784 (void) adjust_SP_and_Gargs_up_by_slots(_masm, arg_slots, temp3_reg, temp_reg); |
|
785 |
|
786 BLOCK_COMMENT("} remove_arg_slots"); |
|
787 } |
|
788 |
|
789 // Helper to copy argument slots to the top of the stack. |
|
790 // The sequence starts with argslot_reg and is counted by slot_count |
|
791 // slot_count must be a multiple of stack_move_unit() and >= 0 |
|
792 // This function blows the temps but does not change argslot_reg. |
|
793 void MethodHandles::push_arg_slots(MacroAssembler* _masm, |
|
794 Register argslot_reg, |
|
795 RegisterOrConstant slot_count, |
|
796 Register temp_reg, Register temp2_reg) { |
|
797 // allow constant zero |
|
798 if (slot_count.is_constant() && slot_count.as_constant() == 0) |
|
799 return; |
|
800 assert_different_registers(argslot_reg, temp_reg, temp2_reg, |
|
801 (!slot_count.is_register() ? Gargs : slot_count.as_register()), |
|
802 SP); |
|
803 assert(Interpreter::stackElementSize == wordSize, "else change this code"); |
|
804 |
|
805 BLOCK_COMMENT("push_arg_slots {"); |
|
806 if (VerifyMethodHandles) |
|
807 verify_stack_move(_masm, slot_count, 0); |
|
808 |
|
809 RegisterOrConstant offset = adjust_SP_and_Gargs_down_by_slots(_masm, slot_count, temp2_reg, temp_reg); |
|
810 |
|
811 if (slot_count.is_constant()) { |
|
812 for (int i = slot_count.as_constant() - 1; i >= 0; i--) { |
|
813 __ ld_ptr( Address(argslot_reg, i * wordSize), temp_reg); |
|
814 __ st_ptr(temp_reg, Address(Gargs, i * wordSize)); |
|
815 } |
|
816 } else { |
|
817 Label L_plural, L_loop, L_break; |
|
818 // Emit code to dynamically check for the common cases, zero and one slot. |
|
819 __ cmp(slot_count.as_register(), (int32_t) 1); |
|
820 __ br(Assembler::greater, false, Assembler::pn, L_plural); |
|
821 __ delayed()->nop(); |
|
822 __ br(Assembler::less, false, Assembler::pn, L_break); |
|
823 __ delayed()->nop(); |
|
824 __ ld_ptr( Address(argslot_reg, 0), temp_reg); |
|
825 __ st_ptr(temp_reg, Address(Gargs, 0)); |
|
826 __ ba_short(L_break); |
|
827 __ BIND(L_plural); |
|
828 |
|
829 // Loop for 2 or more: |
|
830 // top = &argslot[slot_count] |
|
831 // while (top > argslot) *(--Gargs) = *(--top) |
|
832 Register top_reg = temp_reg; |
|
833 __ add(argslot_reg, offset, top_reg); |
|
834 __ add(Gargs, offset, Gargs ); // move back up again so we can go down |
|
835 __ BIND(L_loop); |
|
836 __ sub(top_reg, wordSize, top_reg); |
|
837 __ sub(Gargs, wordSize, Gargs ); |
|
838 __ ld_ptr( Address(top_reg, 0), temp2_reg); |
|
839 __ st_ptr(temp2_reg, Address(Gargs, 0)); |
|
840 __ cmp_and_brx_short(top_reg, argslot_reg, Assembler::greaterUnsigned, Assembler::pt, L_loop); |
|
841 __ BIND(L_break); |
|
842 } |
|
843 BLOCK_COMMENT("} push_arg_slots"); |
|
844 } |
|
845 |
|
846 // in-place movement; no change to Gargs |
|
847 // blows temp_reg, temp2_reg |
|
848 void MethodHandles::move_arg_slots_up(MacroAssembler* _masm, |
|
849 Register bottom_reg, // invariant |
|
850 Address top_addr, // can use temp_reg |
|
851 RegisterOrConstant positive_distance_in_slots, // destroyed if register |
|
852 Register temp_reg, Register temp2_reg) { |
|
853 assert_different_registers(bottom_reg, |
|
854 temp_reg, temp2_reg, |
|
855 positive_distance_in_slots.register_or_noreg()); |
|
856 BLOCK_COMMENT("move_arg_slots_up {"); |
|
857 Label L_loop, L_break; |
|
858 Register top_reg = temp_reg; |
|
859 if (!top_addr.is_same_address(Address(top_reg, 0))) { |
|
860 __ add(top_addr, top_reg); |
|
861 } |
|
862 // Detect empty (or broken) loop: |
|
863 #ifdef ASSERT |
|
864 if (VerifyMethodHandles) { |
|
865 // Verify that &bottom < &top (non-empty interval) |
|
866 Label L_ok, L_bad; |
|
867 if (positive_distance_in_slots.is_register()) { |
|
868 __ cmp(positive_distance_in_slots.as_register(), (int32_t) 0); |
|
869 __ br(Assembler::lessEqual, false, Assembler::pn, L_bad); |
|
870 __ delayed()->nop(); |
471 __ delayed()->nop(); |
871 } |
472 break; |
872 __ cmp_and_brx_short(bottom_reg, top_reg, Assembler::lessUnsigned, Assembler::pt, L_ok); |
473 } |
873 __ BIND(L_bad); |
474 |
874 __ stop("valid bounds (copy up)"); |
475 default: |
875 __ BIND(L_ok); |
476 fatal(err_msg("unexpected intrinsic %d: %s", iid, vmIntrinsics::name_at(iid))); |
876 } |
477 break; |
877 #endif |
478 } |
878 __ cmp_and_brx_short(bottom_reg, top_reg, Assembler::greaterEqualUnsigned, Assembler::pn, L_break); |
479 |
879 // work top down to bottom, copying contiguous data upwards |
480 if (method_is_live) { |
880 // In pseudo-code: |
481 // live at this point: G5_method, O5_savedSP (if interpreted) |
881 // while (--top >= bottom) *(top + distance) = *(top + 0); |
482 |
882 RegisterOrConstant offset = __ argument_offset(positive_distance_in_slots, positive_distance_in_slots.register_or_noreg()); |
483 // After figuring out which concrete method to call, jump into it. |
883 __ BIND(L_loop); |
484 // Note that this works in the interpreter with no data motion. |
884 __ sub(top_reg, wordSize, top_reg); |
485 // But the compiled version will require that rcx_recv be shifted out. |
885 __ ld_ptr( Address(top_reg, 0 ), temp2_reg); |
486 __ verify_oop(G5_method); |
886 __ st_ptr(temp2_reg, Address(top_reg, offset) ); |
487 jump_from_method_handle(_masm, G5_method, temp1, temp3, for_compiler_entry); |
887 __ cmp_and_brx_short(top_reg, bottom_reg, Assembler::greaterUnsigned, Assembler::pt, L_loop); |
488 } |
888 assert(Interpreter::stackElementSize == wordSize, "else change loop"); |
489 } |
889 __ BIND(L_break); |
|
890 BLOCK_COMMENT("} move_arg_slots_up"); |
|
891 } |
|
892 |
|
893 // in-place movement; no change to rsp |
|
894 // blows temp_reg, temp2_reg |
|
895 void MethodHandles::move_arg_slots_down(MacroAssembler* _masm, |
|
896 Address bottom_addr, // can use temp_reg |
|
897 Register top_reg, // invariant |
|
898 RegisterOrConstant negative_distance_in_slots, // destroyed if register |
|
899 Register temp_reg, Register temp2_reg) { |
|
900 assert_different_registers(top_reg, |
|
901 negative_distance_in_slots.register_or_noreg(), |
|
902 temp_reg, temp2_reg); |
|
903 BLOCK_COMMENT("move_arg_slots_down {"); |
|
904 Label L_loop, L_break; |
|
905 Register bottom_reg = temp_reg; |
|
906 if (!bottom_addr.is_same_address(Address(bottom_reg, 0))) { |
|
907 __ add(bottom_addr, bottom_reg); |
|
908 } |
|
909 // Detect empty (or broken) loop: |
|
910 #ifdef ASSERT |
|
911 assert(!negative_distance_in_slots.is_constant() || negative_distance_in_slots.as_constant() < 0, ""); |
|
912 if (VerifyMethodHandles) { |
|
913 // Verify that &bottom < &top (non-empty interval) |
|
914 Label L_ok, L_bad; |
|
915 if (negative_distance_in_slots.is_register()) { |
|
916 __ cmp(negative_distance_in_slots.as_register(), (int32_t) 0); |
|
917 __ br(Assembler::greaterEqual, false, Assembler::pn, L_bad); |
|
918 __ delayed()->nop(); |
|
919 } |
|
920 __ cmp_and_brx_short(bottom_reg, top_reg, Assembler::lessUnsigned, Assembler::pt, L_ok); |
|
921 __ BIND(L_bad); |
|
922 __ stop("valid bounds (copy down)"); |
|
923 __ BIND(L_ok); |
|
924 } |
|
925 #endif |
|
926 __ cmp_and_brx_short(bottom_reg, top_reg, Assembler::greaterEqualUnsigned, Assembler::pn, L_break); |
|
927 // work bottom up to top, copying contiguous data downwards |
|
928 // In pseudo-code: |
|
929 // while (bottom < top) *(bottom - distance) = *(bottom + 0), bottom++; |
|
930 RegisterOrConstant offset = __ argument_offset(negative_distance_in_slots, negative_distance_in_slots.register_or_noreg()); |
|
931 __ BIND(L_loop); |
|
932 __ ld_ptr( Address(bottom_reg, 0 ), temp2_reg); |
|
933 __ st_ptr(temp2_reg, Address(bottom_reg, offset) ); |
|
934 __ add(bottom_reg, wordSize, bottom_reg); |
|
935 __ cmp_and_brx_short(bottom_reg, top_reg, Assembler::lessUnsigned, Assembler::pt, L_loop); |
|
936 assert(Interpreter::stackElementSize == wordSize, "else change loop"); |
|
937 __ BIND(L_break); |
|
938 BLOCK_COMMENT("} move_arg_slots_down"); |
|
939 } |
|
940 |
|
941 // Copy from a field or array element to a stacked argument slot. |
|
942 // is_element (ignored) says whether caller is loading an array element instead of an instance field. |
|
943 void MethodHandles::move_typed_arg(MacroAssembler* _masm, |
|
944 BasicType type, bool is_element, |
|
945 Address value_src, Address slot_dest, |
|
946 Register temp_reg) { |
|
947 assert(!slot_dest.uses(temp_reg), "must be different register"); |
|
948 BLOCK_COMMENT(!is_element ? "move_typed_arg {" : "move_typed_arg { (array element)"); |
|
949 if (type == T_OBJECT || type == T_ARRAY) { |
|
950 __ load_heap_oop(value_src, temp_reg); |
|
951 __ verify_oop(temp_reg); |
|
952 __ st_ptr(temp_reg, slot_dest); |
|
953 } else if (type != T_VOID) { |
|
954 int arg_size = type2aelembytes(type); |
|
955 bool arg_is_signed = is_signed_subword_type(type); |
|
956 int slot_size = is_subword_type(type) ? type2aelembytes(T_INT) : arg_size; // store int sub-words as int |
|
957 __ load_sized_value( value_src, temp_reg, arg_size, arg_is_signed); |
|
958 __ store_sized_value(temp_reg, slot_dest, slot_size ); |
|
959 } |
|
960 BLOCK_COMMENT("} move_typed_arg"); |
|
961 } |
|
962 |
|
963 // Cf. TemplateInterpreterGenerator::generate_return_entry_for and |
|
964 // InterpreterMacroAssembler::save_return_value |
|
965 void MethodHandles::move_return_value(MacroAssembler* _masm, BasicType type, |
|
966 Address return_slot) { |
|
967 BLOCK_COMMENT("move_return_value {"); |
|
968 // Look at the type and pull the value out of the corresponding register. |
|
969 if (type == T_VOID) { |
|
970 // nothing to do |
|
971 } else if (type == T_OBJECT) { |
|
972 __ verify_oop(O0); |
|
973 __ st_ptr(O0, return_slot); |
|
974 } else if (type == T_INT || is_subword_type(type)) { |
|
975 int type_size = type2aelembytes(T_INT); |
|
976 __ store_sized_value(O0, return_slot, type_size); |
|
977 } else if (type == T_LONG) { |
|
978 // store the value by parts |
|
979 // Note: We assume longs are continguous (if misaligned) on the interpreter stack. |
|
980 #if !defined(_LP64) && defined(COMPILER2) |
|
981 __ stx(G1, return_slot); |
|
982 #else |
|
983 #ifdef _LP64 |
|
984 __ stx(O0, return_slot); |
|
985 #else |
|
986 if (return_slot.has_disp()) { |
|
987 // The displacement is a constant |
|
988 __ st(O0, return_slot); |
|
989 __ st(O1, return_slot.plus_disp(Interpreter::stackElementSize)); |
|
990 } else { |
|
991 __ std(O0, return_slot); |
|
992 } |
|
993 #endif |
|
994 #endif |
|
995 } else if (type == T_FLOAT) { |
|
996 __ stf(FloatRegisterImpl::S, Ftos_f, return_slot); |
|
997 } else if (type == T_DOUBLE) { |
|
998 __ stf(FloatRegisterImpl::D, Ftos_f, return_slot); |
|
999 } else { |
|
1000 ShouldNotReachHere(); |
|
1001 } |
|
1002 BLOCK_COMMENT("} move_return_value"); |
|
1003 } |
490 } |
1004 |
491 |
1005 #ifndef PRODUCT |
492 #ifndef PRODUCT |
1006 void MethodHandles::RicochetFrame::describe(const frame* fr, FrameValues& values, int frame_no) { |
|
1007 RicochetFrame* rf = new RicochetFrame(*fr); |
|
1008 |
|
1009 // ricochet slots (kept in registers for sparc) |
|
1010 values.describe(frame_no, rf->register_addr(I5_savedSP), err_msg("exact_sender_sp reg for #%d", frame_no)); |
|
1011 values.describe(frame_no, rf->register_addr(L5_conversion), err_msg("conversion reg for #%d", frame_no)); |
|
1012 values.describe(frame_no, rf->register_addr(L4_saved_args_base), err_msg("saved_args_base reg for #%d", frame_no)); |
|
1013 values.describe(frame_no, rf->register_addr(L3_saved_args_layout), err_msg("saved_args_layout reg for #%d", frame_no)); |
|
1014 values.describe(frame_no, rf->register_addr(L2_saved_target), err_msg("saved_target reg for #%d", frame_no)); |
|
1015 values.describe(frame_no, rf->register_addr(L1_continuation), err_msg("continuation reg for #%d", frame_no)); |
|
1016 |
|
1017 // relevant ricochet targets (in caller frame) |
|
1018 values.describe(-1, rf->saved_args_base(), err_msg("*saved_args_base for #%d", frame_no)); |
|
1019 values.describe(-1, (intptr_t *)(STACK_BIAS+(uintptr_t)rf->exact_sender_sp()), err_msg("*exact_sender_sp+STACK_BIAS for #%d", frame_no)); |
|
1020 } |
|
1021 #endif // ASSERT |
|
1022 |
|
1023 #ifndef PRODUCT |
|
1024 extern "C" void print_method_handle(oop mh); |
|
1025 void trace_method_handle_stub(const char* adaptername, |
493 void trace_method_handle_stub(const char* adaptername, |
1026 oopDesc* mh, |
494 oopDesc* mh, |
1027 intptr_t* saved_sp, |
495 intptr_t* saved_sp, |
1028 intptr_t* args, |
496 intptr_t* args, |
1029 intptr_t* tracing_fp) { |
497 intptr_t* tracing_fp) { |
1030 bool has_mh = (strstr(adaptername, "return/") == NULL); // return adapters don't have mh |
498 bool has_mh = (strstr(adaptername, "/static") == NULL && |
1031 |
499 strstr(adaptername, "linkTo") == NULL); // static linkers don't have MH |
1032 tty->print_cr("MH %s mh="INTPTR_FORMAT " saved_sp=" INTPTR_FORMAT " args=" INTPTR_FORMAT, adaptername, (intptr_t) mh, saved_sp, args); |
500 const char* mh_reg_name = has_mh ? "G3_mh" : "G3"; |
|
501 tty->print_cr("MH %s %s="INTPTR_FORMAT " saved_sp=" INTPTR_FORMAT " args=" INTPTR_FORMAT, |
|
502 adaptername, mh_reg_name, |
|
503 (intptr_t) mh, saved_sp, args); |
1033 |
504 |
1034 if (Verbose) { |
505 if (Verbose) { |
1035 // dumping last frame with frame::describe |
506 // dumping last frame with frame::describe |
1036 |
507 |
1037 JavaThread* p = JavaThread::active(); |
508 JavaThread* p = JavaThread::active(); |
1141 |
616 |
1142 __ restore(); |
617 __ restore(); |
1143 BLOCK_COMMENT("} trace_method_handle"); |
618 BLOCK_COMMENT("} trace_method_handle"); |
1144 } |
619 } |
1145 #endif // PRODUCT |
620 #endif // PRODUCT |
1146 |
|
1147 // which conversion op types are implemented here? |
|
1148 int MethodHandles::adapter_conversion_ops_supported_mask() { |
|
1149 return ((1<<java_lang_invoke_AdapterMethodHandle::OP_RETYPE_ONLY) |
|
1150 |(1<<java_lang_invoke_AdapterMethodHandle::OP_RETYPE_RAW) |
|
1151 |(1<<java_lang_invoke_AdapterMethodHandle::OP_CHECK_CAST) |
|
1152 |(1<<java_lang_invoke_AdapterMethodHandle::OP_PRIM_TO_PRIM) |
|
1153 |(1<<java_lang_invoke_AdapterMethodHandle::OP_REF_TO_PRIM) |
|
1154 // OP_PRIM_TO_REF is below... |
|
1155 |(1<<java_lang_invoke_AdapterMethodHandle::OP_SWAP_ARGS) |
|
1156 |(1<<java_lang_invoke_AdapterMethodHandle::OP_ROT_ARGS) |
|
1157 |(1<<java_lang_invoke_AdapterMethodHandle::OP_DUP_ARGS) |
|
1158 |(1<<java_lang_invoke_AdapterMethodHandle::OP_DROP_ARGS) |
|
1159 // OP_COLLECT_ARGS is below... |
|
1160 |(1<<java_lang_invoke_AdapterMethodHandle::OP_SPREAD_ARGS) |
|
1161 |( |
|
1162 java_lang_invoke_MethodTypeForm::vmlayout_offset_in_bytes() <= 0 ? 0 : |
|
1163 ((1<<java_lang_invoke_AdapterMethodHandle::OP_PRIM_TO_REF) |
|
1164 |(1<<java_lang_invoke_AdapterMethodHandle::OP_COLLECT_ARGS) |
|
1165 |(1<<java_lang_invoke_AdapterMethodHandle::OP_FOLD_ARGS) |
|
1166 ) |
|
1167 ) |
|
1168 ); |
|
1169 } |
|
1170 |
|
1171 //------------------------------------------------------------------------------ |
|
1172 // MethodHandles::generate_method_handle_stub |
|
1173 // |
|
1174 // Generate an "entry" field for a method handle. |
|
1175 // This determines how the method handle will respond to calls. |
|
1176 void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHandles::EntryKind ek) { |
|
1177 MethodHandles::EntryKind ek_orig = ek_original_kind(ek); |
|
1178 |
|
1179 // Here is the register state during an interpreted call, |
|
1180 // as set up by generate_method_handle_interpreter_entry(): |
|
1181 // - G5: garbage temp (was MethodHandle.invoke methodOop, unused) |
|
1182 // - G3: receiver method handle |
|
1183 // - O5_savedSP: sender SP (must preserve) |
|
1184 |
|
1185 const Register O0_scratch = O0; |
|
1186 const Register O1_scratch = O1; |
|
1187 const Register O2_scratch = O2; |
|
1188 const Register O3_scratch = O3; |
|
1189 const Register O4_scratch = O4; |
|
1190 const Register G5_scratch = G5; |
|
1191 |
|
1192 // Often used names: |
|
1193 const Register O0_argslot = O0; |
|
1194 |
|
1195 // Argument registers for _raise_exception: |
|
1196 const Register O0_code = O0; |
|
1197 const Register O1_actual = O1; |
|
1198 const Register O2_required = O2; |
|
1199 |
|
1200 guarantee(java_lang_invoke_MethodHandle::vmentry_offset_in_bytes() != 0, "must have offsets"); |
|
1201 |
|
1202 // Some handy addresses: |
|
1203 Address G3_mh_vmtarget( G3_method_handle, java_lang_invoke_MethodHandle::vmtarget_offset_in_bytes()); |
|
1204 |
|
1205 Address G3_dmh_vmindex( G3_method_handle, java_lang_invoke_DirectMethodHandle::vmindex_offset_in_bytes()); |
|
1206 |
|
1207 Address G3_bmh_vmargslot( G3_method_handle, java_lang_invoke_BoundMethodHandle::vmargslot_offset_in_bytes()); |
|
1208 Address G3_bmh_argument( G3_method_handle, java_lang_invoke_BoundMethodHandle::argument_offset_in_bytes()); |
|
1209 |
|
1210 Address G3_amh_vmargslot( G3_method_handle, java_lang_invoke_AdapterMethodHandle::vmargslot_offset_in_bytes()); |
|
1211 Address G3_amh_argument ( G3_method_handle, java_lang_invoke_AdapterMethodHandle::argument_offset_in_bytes()); |
|
1212 Address G3_amh_conversion(G3_method_handle, java_lang_invoke_AdapterMethodHandle::conversion_offset_in_bytes()); |
|
1213 |
|
1214 const int java_mirror_offset = in_bytes(Klass::java_mirror_offset()); |
|
1215 |
|
1216 if (have_entry(ek)) { |
|
1217 __ nop(); // empty stubs make SG sick |
|
1218 return; |
|
1219 } |
|
1220 |
|
1221 address interp_entry = __ pc(); |
|
1222 |
|
1223 trace_method_handle(_masm, entry_name(ek)); |
|
1224 |
|
1225 BLOCK_COMMENT(err_msg("Entry %s {", entry_name(ek))); |
|
1226 |
|
1227 switch ((int) ek) { |
|
1228 case _raise_exception: |
|
1229 { |
|
1230 // Not a real MH entry, but rather shared code for raising an |
|
1231 // exception. For sharing purposes the arguments are passed into registers |
|
1232 // and then placed in the intepreter calling convention here. |
|
1233 assert(raise_exception_method(), "must be set"); |
|
1234 assert(raise_exception_method()->from_compiled_entry(), "method must be linked"); |
|
1235 |
|
1236 __ set(AddressLiteral((address) &_raise_exception_method), G5_method); |
|
1237 __ ld_ptr(Address(G5_method, 0), G5_method); |
|
1238 |
|
1239 const int jobject_oop_offset = 0; |
|
1240 __ ld_ptr(Address(G5_method, jobject_oop_offset), G5_method); |
|
1241 |
|
1242 adjust_SP_and_Gargs_down_by_slots(_masm, 3, noreg, noreg); |
|
1243 |
|
1244 __ st (O0_code, __ argument_address(constant(2), noreg, 0)); |
|
1245 __ st_ptr(O1_actual, __ argument_address(constant(1), noreg, 0)); |
|
1246 __ st_ptr(O2_required, __ argument_address(constant(0), noreg, 0)); |
|
1247 jump_from_method_handle(_masm, G5_method, O1_scratch, O2_scratch); |
|
1248 } |
|
1249 break; |
|
1250 |
|
1251 case _invokestatic_mh: |
|
1252 case _invokespecial_mh: |
|
1253 { |
|
1254 __ load_heap_oop(G3_mh_vmtarget, G5_method); // target is a methodOop |
|
1255 // Same as TemplateTable::invokestatic or invokespecial, |
|
1256 // minus the CP setup and profiling: |
|
1257 if (ek == _invokespecial_mh) { |
|
1258 // Must load & check the first argument before entering the target method. |
|
1259 __ load_method_handle_vmslots(O0_argslot, G3_method_handle, O1_scratch); |
|
1260 __ ld_ptr(__ argument_address(O0_argslot, O0_argslot, -1), G3_method_handle); |
|
1261 __ null_check(G3_method_handle); |
|
1262 __ verify_oop(G3_method_handle); |
|
1263 } |
|
1264 jump_from_method_handle(_masm, G5_method, O1_scratch, O2_scratch); |
|
1265 } |
|
1266 break; |
|
1267 |
|
1268 case _invokevirtual_mh: |
|
1269 { |
|
1270 // Same as TemplateTable::invokevirtual, |
|
1271 // minus the CP setup and profiling: |
|
1272 |
|
1273 // Pick out the vtable index and receiver offset from the MH, |
|
1274 // and then we can discard it: |
|
1275 Register O2_index = O2_scratch; |
|
1276 __ load_method_handle_vmslots(O0_argslot, G3_method_handle, O1_scratch); |
|
1277 __ ldsw(G3_dmh_vmindex, O2_index); |
|
1278 // Note: The verifier allows us to ignore G3_mh_vmtarget. |
|
1279 __ ld_ptr(__ argument_address(O0_argslot, O0_argslot, -1), G3_method_handle); |
|
1280 __ null_check(G3_method_handle, oopDesc::klass_offset_in_bytes()); |
|
1281 |
|
1282 // Get receiver klass: |
|
1283 Register O0_klass = O0_argslot; |
|
1284 __ load_klass(G3_method_handle, O0_klass); |
|
1285 __ verify_oop(O0_klass); |
|
1286 |
|
1287 // Get target methodOop & entry point: |
|
1288 const int base = instanceKlass::vtable_start_offset() * wordSize; |
|
1289 assert(vtableEntry::size() * wordSize == wordSize, "adjust the scaling in the code below"); |
|
1290 |
|
1291 __ sll_ptr(O2_index, LogBytesPerWord, O2_index); |
|
1292 __ add(O0_klass, O2_index, O0_klass); |
|
1293 Address vtable_entry_addr(O0_klass, base + vtableEntry::method_offset_in_bytes()); |
|
1294 __ ld_ptr(vtable_entry_addr, G5_method); |
|
1295 |
|
1296 jump_from_method_handle(_masm, G5_method, O1_scratch, O2_scratch); |
|
1297 } |
|
1298 break; |
|
1299 |
|
1300 case _invokeinterface_mh: |
|
1301 { |
|
1302 // Same as TemplateTable::invokeinterface, |
|
1303 // minus the CP setup and profiling: |
|
1304 __ load_method_handle_vmslots(O0_argslot, G3_method_handle, O1_scratch); |
|
1305 Register O1_intf = O1_scratch; |
|
1306 Register G5_index = G5_scratch; |
|
1307 __ load_heap_oop(G3_mh_vmtarget, O1_intf); |
|
1308 __ ldsw(G3_dmh_vmindex, G5_index); |
|
1309 __ ld_ptr(__ argument_address(O0_argslot, O0_argslot, -1), G3_method_handle); |
|
1310 __ null_check(G3_method_handle, oopDesc::klass_offset_in_bytes()); |
|
1311 |
|
1312 // Get receiver klass: |
|
1313 Register O0_klass = O0_argslot; |
|
1314 __ load_klass(G3_method_handle, O0_klass); |
|
1315 __ verify_oop(O0_klass); |
|
1316 |
|
1317 // Get interface: |
|
1318 Label no_such_interface; |
|
1319 __ verify_oop(O1_intf); |
|
1320 __ lookup_interface_method(O0_klass, O1_intf, |
|
1321 // Note: next two args must be the same: |
|
1322 G5_index, G5_method, |
|
1323 O2_scratch, |
|
1324 O3_scratch, |
|
1325 no_such_interface); |
|
1326 |
|
1327 jump_from_method_handle(_masm, G5_method, O1_scratch, O2_scratch); |
|
1328 |
|
1329 __ bind(no_such_interface); |
|
1330 // Throw an exception. |
|
1331 // For historical reasons, it will be IncompatibleClassChangeError. |
|
1332 __ unimplemented("not tested yet"); |
|
1333 __ ld_ptr(Address(O1_intf, java_mirror_offset), O2_required); // required interface |
|
1334 __ mov( O0_klass, O1_actual); // bad receiver |
|
1335 __ jump_to(AddressLiteral(from_interpreted_entry(_raise_exception)), O3_scratch); |
|
1336 __ delayed()->mov(Bytecodes::_invokeinterface, O0_code); // who is complaining? |
|
1337 } |
|
1338 break; |
|
1339 |
|
1340 case _bound_ref_mh: |
|
1341 case _bound_int_mh: |
|
1342 case _bound_long_mh: |
|
1343 case _bound_ref_direct_mh: |
|
1344 case _bound_int_direct_mh: |
|
1345 case _bound_long_direct_mh: |
|
1346 { |
|
1347 const bool direct_to_method = (ek >= _bound_ref_direct_mh); |
|
1348 BasicType arg_type = ek_bound_mh_arg_type(ek); |
|
1349 int arg_slots = type2size[arg_type]; |
|
1350 |
|
1351 // Make room for the new argument: |
|
1352 load_vmargslot(_masm, G3_bmh_vmargslot, O0_argslot); |
|
1353 __ add(__ argument_address(O0_argslot, O0_argslot), O0_argslot); |
|
1354 |
|
1355 insert_arg_slots(_masm, arg_slots * stack_move_unit(), O0_argslot, O1_scratch, O2_scratch, O3_scratch); |
|
1356 |
|
1357 // Store bound argument into the new stack slot: |
|
1358 __ load_heap_oop(G3_bmh_argument, O1_scratch); |
|
1359 if (arg_type == T_OBJECT) { |
|
1360 __ st_ptr(O1_scratch, Address(O0_argslot, 0)); |
|
1361 } else { |
|
1362 Address prim_value_addr(O1_scratch, java_lang_boxing_object::value_offset_in_bytes(arg_type)); |
|
1363 move_typed_arg(_masm, arg_type, false, |
|
1364 prim_value_addr, |
|
1365 Address(O0_argslot, 0), |
|
1366 O2_scratch); // must be an even register for !_LP64 long moves (uses O2/O3) |
|
1367 } |
|
1368 |
|
1369 if (direct_to_method) { |
|
1370 __ load_heap_oop(G3_mh_vmtarget, G5_method); // target is a methodOop |
|
1371 jump_from_method_handle(_masm, G5_method, O1_scratch, O2_scratch); |
|
1372 } else { |
|
1373 __ load_heap_oop(G3_mh_vmtarget, G3_method_handle); // target is a methodOop |
|
1374 __ verify_oop(G3_method_handle); |
|
1375 __ jump_to_method_handle_entry(G3_method_handle, O1_scratch); |
|
1376 } |
|
1377 } |
|
1378 break; |
|
1379 |
|
1380 case _adapter_opt_profiling: |
|
1381 if (java_lang_invoke_CountingMethodHandle::vmcount_offset_in_bytes() != 0) { |
|
1382 Address G3_mh_vmcount(G3_method_handle, java_lang_invoke_CountingMethodHandle::vmcount_offset_in_bytes()); |
|
1383 __ ld(G3_mh_vmcount, O1_scratch); |
|
1384 __ add(O1_scratch, 1, O1_scratch); |
|
1385 __ st(O1_scratch, G3_mh_vmcount); |
|
1386 } |
|
1387 // fall through |
|
1388 |
|
1389 case _adapter_retype_only: |
|
1390 case _adapter_retype_raw: |
|
1391 // Immediately jump to the next MH layer: |
|
1392 __ load_heap_oop(G3_mh_vmtarget, G3_method_handle); |
|
1393 __ verify_oop(G3_method_handle); |
|
1394 __ jump_to_method_handle_entry(G3_method_handle, O1_scratch); |
|
1395 // This is OK when all parameter types widen. |
|
1396 // It is also OK when a return type narrows. |
|
1397 break; |
|
1398 |
|
1399 case _adapter_check_cast: |
|
1400 { |
|
1401 // Check a reference argument before jumping to the next layer of MH: |
|
1402 load_vmargslot(_masm, G3_amh_vmargslot, O0_argslot); |
|
1403 Address vmarg = __ argument_address(O0_argslot, O0_argslot); |
|
1404 |
|
1405 // What class are we casting to? |
|
1406 Register O1_klass = O1_scratch; // Interesting AMH data. |
|
1407 __ load_heap_oop(G3_amh_argument, O1_klass); // This is a Class object! |
|
1408 load_klass_from_Class(_masm, O1_klass, O2_scratch, O3_scratch); |
|
1409 |
|
1410 Label L_done; |
|
1411 __ ld_ptr(vmarg, O2_scratch); |
|
1412 __ br_null_short(O2_scratch, Assembler::pn, L_done); // No cast if null. |
|
1413 __ load_klass(O2_scratch, O2_scratch); |
|
1414 |
|
1415 // Live at this point: |
|
1416 // - O0_argslot : argslot index in vmarg; may be required in the failing path |
|
1417 // - O1_klass : klass required by the target method |
|
1418 // - O2_scratch : argument klass to test |
|
1419 // - G3_method_handle: adapter method handle |
|
1420 __ check_klass_subtype(O2_scratch, O1_klass, O3_scratch, O4_scratch, L_done); |
|
1421 |
|
1422 // If we get here, the type check failed! |
|
1423 __ load_heap_oop(G3_amh_argument, O2_required); // required class |
|
1424 __ ld_ptr( vmarg, O1_actual); // bad object |
|
1425 __ jump_to(AddressLiteral(from_interpreted_entry(_raise_exception)), O3_scratch); |
|
1426 __ delayed()->mov(Bytecodes::_checkcast, O0_code); // who is complaining? |
|
1427 |
|
1428 __ BIND(L_done); |
|
1429 // Get the new MH: |
|
1430 __ load_heap_oop(G3_mh_vmtarget, G3_method_handle); |
|
1431 __ jump_to_method_handle_entry(G3_method_handle, O1_scratch); |
|
1432 } |
|
1433 break; |
|
1434 |
|
1435 case _adapter_prim_to_prim: |
|
1436 case _adapter_ref_to_prim: |
|
1437 // Handled completely by optimized cases. |
|
1438 __ stop("init_AdapterMethodHandle should not issue this"); |
|
1439 break; |
|
1440 |
|
1441 case _adapter_opt_i2i: // optimized subcase of adapt_prim_to_prim |
|
1442 //case _adapter_opt_f2i: // optimized subcase of adapt_prim_to_prim |
|
1443 case _adapter_opt_l2i: // optimized subcase of adapt_prim_to_prim |
|
1444 case _adapter_opt_unboxi: // optimized subcase of adapt_ref_to_prim |
|
1445 { |
|
1446 // Perform an in-place conversion to int or an int subword. |
|
1447 load_vmargslot(_masm, G3_amh_vmargslot, O0_argslot); |
|
1448 Address value; |
|
1449 Address vmarg; |
|
1450 bool value_left_justified = false; |
|
1451 |
|
1452 switch (ek) { |
|
1453 case _adapter_opt_i2i: |
|
1454 value = vmarg = __ argument_address(O0_argslot, O0_argslot); |
|
1455 break; |
|
1456 case _adapter_opt_l2i: |
|
1457 { |
|
1458 // just delete the extra slot |
|
1459 #ifdef _LP64 |
|
1460 // In V9, longs are given 2 64-bit slots in the interpreter, but the |
|
1461 // data is passed in only 1 slot. |
|
1462 // Keep the second slot. |
|
1463 __ add(__ argument_address(O0_argslot, O0_argslot, -1), O0_argslot); |
|
1464 remove_arg_slots(_masm, -stack_move_unit(), O0_argslot, O1_scratch, O2_scratch, O3_scratch); |
|
1465 value = Address(O0_argslot, 4); // Get least-significant 32-bit of 64-bit value. |
|
1466 vmarg = Address(O0_argslot, Interpreter::stackElementSize); |
|
1467 #else |
|
1468 // Keep the first slot. |
|
1469 __ add(__ argument_address(O0_argslot, O0_argslot), O0_argslot); |
|
1470 remove_arg_slots(_masm, -stack_move_unit(), O0_argslot, O1_scratch, O2_scratch, O3_scratch); |
|
1471 value = Address(O0_argslot, 0); |
|
1472 vmarg = value; |
|
1473 #endif |
|
1474 } |
|
1475 break; |
|
1476 case _adapter_opt_unboxi: |
|
1477 { |
|
1478 vmarg = __ argument_address(O0_argslot, O0_argslot); |
|
1479 // Load the value up from the heap. |
|
1480 __ ld_ptr(vmarg, O1_scratch); |
|
1481 int value_offset = java_lang_boxing_object::value_offset_in_bytes(T_INT); |
|
1482 #ifdef ASSERT |
|
1483 for (int bt = T_BOOLEAN; bt < T_INT; bt++) { |
|
1484 if (is_subword_type(BasicType(bt))) |
|
1485 assert(value_offset == java_lang_boxing_object::value_offset_in_bytes(BasicType(bt)), ""); |
|
1486 } |
|
1487 #endif |
|
1488 __ null_check(O1_scratch, value_offset); |
|
1489 value = Address(O1_scratch, value_offset); |
|
1490 #ifdef _BIG_ENDIAN |
|
1491 // Values stored in objects are packed. |
|
1492 value_left_justified = true; |
|
1493 #endif |
|
1494 } |
|
1495 break; |
|
1496 default: |
|
1497 ShouldNotReachHere(); |
|
1498 } |
|
1499 |
|
1500 // This check is required on _BIG_ENDIAN |
|
1501 Register G5_vminfo = G5_scratch; |
|
1502 __ ldsw(G3_amh_conversion, G5_vminfo); |
|
1503 assert(CONV_VMINFO_SHIFT == 0, "preshifted"); |
|
1504 |
|
1505 // Original 32-bit vmdata word must be of this form: |
|
1506 // | MBZ:6 | signBitCount:8 | srcDstTypes:8 | conversionOp:8 | |
|
1507 __ lduw(value, O1_scratch); |
|
1508 if (!value_left_justified) |
|
1509 __ sll(O1_scratch, G5_vminfo, O1_scratch); |
|
1510 Label zero_extend, done; |
|
1511 __ btst(CONV_VMINFO_SIGN_FLAG, G5_vminfo); |
|
1512 __ br(Assembler::zero, false, Assembler::pn, zero_extend); |
|
1513 __ delayed()->nop(); |
|
1514 |
|
1515 // this path is taken for int->byte, int->short |
|
1516 __ sra(O1_scratch, G5_vminfo, O1_scratch); |
|
1517 __ ba_short(done); |
|
1518 |
|
1519 __ bind(zero_extend); |
|
1520 // this is taken for int->char |
|
1521 __ srl(O1_scratch, G5_vminfo, O1_scratch); |
|
1522 |
|
1523 __ bind(done); |
|
1524 __ st(O1_scratch, vmarg); |
|
1525 |
|
1526 // Get the new MH: |
|
1527 __ load_heap_oop(G3_mh_vmtarget, G3_method_handle); |
|
1528 __ jump_to_method_handle_entry(G3_method_handle, O1_scratch); |
|
1529 } |
|
1530 break; |
|
1531 |
|
1532 case _adapter_opt_i2l: // optimized subcase of adapt_prim_to_prim |
|
1533 case _adapter_opt_unboxl: // optimized subcase of adapt_ref_to_prim |
|
1534 { |
|
1535 // Perform an in-place int-to-long or ref-to-long conversion. |
|
1536 load_vmargslot(_masm, G3_amh_vmargslot, O0_argslot); |
|
1537 |
|
1538 // On big-endian machine we duplicate the slot and store the MSW |
|
1539 // in the first slot. |
|
1540 __ add(__ argument_address(O0_argslot, O0_argslot, 1), O0_argslot); |
|
1541 |
|
1542 insert_arg_slots(_masm, stack_move_unit(), O0_argslot, O1_scratch, O2_scratch, O3_scratch); |
|
1543 |
|
1544 Address arg_lsw(O0_argslot, 0); |
|
1545 Address arg_msw(O0_argslot, -Interpreter::stackElementSize); |
|
1546 |
|
1547 switch (ek) { |
|
1548 case _adapter_opt_i2l: |
|
1549 { |
|
1550 #ifdef _LP64 |
|
1551 __ ldsw(arg_lsw, O2_scratch); // Load LSW sign-extended |
|
1552 #else |
|
1553 __ ldsw(arg_lsw, O3_scratch); // Load LSW sign-extended |
|
1554 __ srlx(O3_scratch, BitsPerInt, O2_scratch); // Move MSW value to lower 32-bits for std |
|
1555 #endif |
|
1556 __ st_long(O2_scratch, arg_msw); // Uses O2/O3 on !_LP64 |
|
1557 } |
|
1558 break; |
|
1559 case _adapter_opt_unboxl: |
|
1560 { |
|
1561 // Load the value up from the heap. |
|
1562 __ ld_ptr(arg_lsw, O1_scratch); |
|
1563 int value_offset = java_lang_boxing_object::value_offset_in_bytes(T_LONG); |
|
1564 assert(value_offset == java_lang_boxing_object::value_offset_in_bytes(T_DOUBLE), ""); |
|
1565 __ null_check(O1_scratch, value_offset); |
|
1566 __ ld_long(Address(O1_scratch, value_offset), O2_scratch); // Uses O2/O3 on !_LP64 |
|
1567 __ st_long(O2_scratch, arg_msw); |
|
1568 } |
|
1569 break; |
|
1570 default: |
|
1571 ShouldNotReachHere(); |
|
1572 } |
|
1573 |
|
1574 __ load_heap_oop(G3_mh_vmtarget, G3_method_handle); |
|
1575 __ jump_to_method_handle_entry(G3_method_handle, O1_scratch); |
|
1576 } |
|
1577 break; |
|
1578 |
|
1579 case _adapter_opt_f2d: // optimized subcase of adapt_prim_to_prim |
|
1580 case _adapter_opt_d2f: // optimized subcase of adapt_prim_to_prim |
|
1581 { |
|
1582 // perform an in-place floating primitive conversion |
|
1583 __ unimplemented(entry_name(ek)); |
|
1584 } |
|
1585 break; |
|
1586 |
|
1587 case _adapter_prim_to_ref: |
|
1588 __ unimplemented(entry_name(ek)); // %%% FIXME: NYI |
|
1589 break; |
|
1590 |
|
1591 case _adapter_swap_args: |
|
1592 case _adapter_rot_args: |
|
1593 // handled completely by optimized cases |
|
1594 __ stop("init_AdapterMethodHandle should not issue this"); |
|
1595 break; |
|
1596 |
|
1597 case _adapter_opt_swap_1: |
|
1598 case _adapter_opt_swap_2: |
|
1599 case _adapter_opt_rot_1_up: |
|
1600 case _adapter_opt_rot_1_down: |
|
1601 case _adapter_opt_rot_2_up: |
|
1602 case _adapter_opt_rot_2_down: |
|
1603 { |
|
1604 int swap_slots = ek_adapter_opt_swap_slots(ek); |
|
1605 int rotate = ek_adapter_opt_swap_mode(ek); |
|
1606 |
|
1607 // 'argslot' is the position of the first argument to swap. |
|
1608 load_vmargslot(_masm, G3_amh_vmargslot, O0_argslot); |
|
1609 __ add(__ argument_address(O0_argslot, O0_argslot), O0_argslot); |
|
1610 if (VerifyMethodHandles) |
|
1611 verify_argslot(_masm, O0_argslot, O2_scratch, "swap point must fall within current frame"); |
|
1612 |
|
1613 // 'vminfo' is the second. |
|
1614 Register O1_destslot = O1_scratch; |
|
1615 load_conversion_vminfo(_masm, G3_amh_conversion, O1_destslot); |
|
1616 __ add(__ argument_address(O1_destslot, O1_destslot), O1_destslot); |
|
1617 if (VerifyMethodHandles) |
|
1618 verify_argslot(_masm, O1_destslot, O2_scratch, "swap point must fall within current frame"); |
|
1619 |
|
1620 assert(Interpreter::stackElementSize == wordSize, "else rethink use of wordSize here"); |
|
1621 if (!rotate) { |
|
1622 // simple swap |
|
1623 for (int i = 0; i < swap_slots; i++) { |
|
1624 __ ld_ptr( Address(O0_argslot, i * wordSize), O2_scratch); |
|
1625 __ ld_ptr( Address(O1_destslot, i * wordSize), O3_scratch); |
|
1626 __ st_ptr(O3_scratch, Address(O0_argslot, i * wordSize)); |
|
1627 __ st_ptr(O2_scratch, Address(O1_destslot, i * wordSize)); |
|
1628 } |
|
1629 } else { |
|
1630 // A rotate is actually pair of moves, with an "odd slot" (or pair) |
|
1631 // changing place with a series of other slots. |
|
1632 // First, push the "odd slot", which is going to get overwritten |
|
1633 switch (swap_slots) { |
|
1634 case 2 : __ ld_ptr(Address(O0_argslot, 1 * wordSize), O4_scratch); // fall-thru |
|
1635 case 1 : __ ld_ptr(Address(O0_argslot, 0 * wordSize), O3_scratch); break; |
|
1636 default: ShouldNotReachHere(); |
|
1637 } |
|
1638 if (rotate > 0) { |
|
1639 // Here is rotate > 0: |
|
1640 // (low mem) (high mem) |
|
1641 // | dest: more_slots... | arg: odd_slot :arg+1 | |
|
1642 // => |
|
1643 // | dest: odd_slot | dest+1: more_slots... :arg+1 | |
|
1644 // work argslot down to destslot, copying contiguous data upwards |
|
1645 // pseudo-code: |
|
1646 // argslot = src_addr - swap_bytes |
|
1647 // destslot = dest_addr |
|
1648 // while (argslot >= destslot) *(argslot + swap_bytes) = *(argslot + 0), argslot--; |
|
1649 move_arg_slots_up(_masm, |
|
1650 O1_destslot, |
|
1651 Address(O0_argslot, 0), |
|
1652 swap_slots, |
|
1653 O0_argslot, O2_scratch); |
|
1654 } else { |
|
1655 // Here is the other direction, rotate < 0: |
|
1656 // (low mem) (high mem) |
|
1657 // | arg: odd_slot | arg+1: more_slots... :dest+1 | |
|
1658 // => |
|
1659 // | arg: more_slots... | dest: odd_slot :dest+1 | |
|
1660 // work argslot up to destslot, copying contiguous data downwards |
|
1661 // pseudo-code: |
|
1662 // argslot = src_addr + swap_bytes |
|
1663 // destslot = dest_addr |
|
1664 // while (argslot <= destslot) *(argslot - swap_bytes) = *(argslot + 0), argslot++; |
|
1665 // dest_slot denotes an exclusive upper limit |
|
1666 int limit_bias = OP_ROT_ARGS_DOWN_LIMIT_BIAS; |
|
1667 if (limit_bias != 0) |
|
1668 __ add(O1_destslot, - limit_bias * wordSize, O1_destslot); |
|
1669 move_arg_slots_down(_masm, |
|
1670 Address(O0_argslot, swap_slots * wordSize), |
|
1671 O1_destslot, |
|
1672 -swap_slots, |
|
1673 O0_argslot, O2_scratch); |
|
1674 |
|
1675 __ sub(O1_destslot, swap_slots * wordSize, O1_destslot); |
|
1676 } |
|
1677 // pop the original first chunk into the destination slot, now free |
|
1678 switch (swap_slots) { |
|
1679 case 2 : __ st_ptr(O4_scratch, Address(O1_destslot, 1 * wordSize)); // fall-thru |
|
1680 case 1 : __ st_ptr(O3_scratch, Address(O1_destslot, 0 * wordSize)); break; |
|
1681 default: ShouldNotReachHere(); |
|
1682 } |
|
1683 } |
|
1684 |
|
1685 __ load_heap_oop(G3_mh_vmtarget, G3_method_handle); |
|
1686 __ jump_to_method_handle_entry(G3_method_handle, O1_scratch); |
|
1687 } |
|
1688 break; |
|
1689 |
|
1690 case _adapter_dup_args: |
|
1691 { |
|
1692 // 'argslot' is the position of the first argument to duplicate. |
|
1693 load_vmargslot(_masm, G3_amh_vmargslot, O0_argslot); |
|
1694 __ add(__ argument_address(O0_argslot, O0_argslot), O0_argslot); |
|
1695 |
|
1696 // 'stack_move' is negative number of words to duplicate. |
|
1697 Register O1_stack_move = O1_scratch; |
|
1698 load_stack_move(_masm, G3_amh_conversion, O1_stack_move); |
|
1699 |
|
1700 if (VerifyMethodHandles) { |
|
1701 verify_argslots(_masm, O1_stack_move, O0_argslot, O2_scratch, O3_scratch, true, |
|
1702 "copied argument(s) must fall within current frame"); |
|
1703 } |
|
1704 |
|
1705 // insert location is always the bottom of the argument list: |
|
1706 __ neg(O1_stack_move); |
|
1707 push_arg_slots(_masm, O0_argslot, O1_stack_move, O2_scratch, O3_scratch); |
|
1708 |
|
1709 __ load_heap_oop(G3_mh_vmtarget, G3_method_handle); |
|
1710 __ jump_to_method_handle_entry(G3_method_handle, O1_scratch); |
|
1711 } |
|
1712 break; |
|
1713 |
|
1714 case _adapter_drop_args: |
|
1715 { |
|
1716 // 'argslot' is the position of the first argument to nuke. |
|
1717 load_vmargslot(_masm, G3_amh_vmargslot, O0_argslot); |
|
1718 __ add(__ argument_address(O0_argslot, O0_argslot), O0_argslot); |
|
1719 |
|
1720 // 'stack_move' is number of words to drop. |
|
1721 Register O1_stack_move = O1_scratch; |
|
1722 load_stack_move(_masm, G3_amh_conversion, O1_stack_move); |
|
1723 |
|
1724 remove_arg_slots(_masm, O1_stack_move, O0_argslot, O2_scratch, O3_scratch, O4_scratch); |
|
1725 |
|
1726 __ load_heap_oop(G3_mh_vmtarget, G3_method_handle); |
|
1727 __ jump_to_method_handle_entry(G3_method_handle, O1_scratch); |
|
1728 } |
|
1729 break; |
|
1730 |
|
1731 case _adapter_collect_args: |
|
1732 case _adapter_fold_args: |
|
1733 case _adapter_spread_args: |
|
1734 // Handled completely by optimized cases. |
|
1735 __ stop("init_AdapterMethodHandle should not issue this"); |
|
1736 break; |
|
1737 |
|
1738 case _adapter_opt_collect_ref: |
|
1739 case _adapter_opt_collect_int: |
|
1740 case _adapter_opt_collect_long: |
|
1741 case _adapter_opt_collect_float: |
|
1742 case _adapter_opt_collect_double: |
|
1743 case _adapter_opt_collect_void: |
|
1744 case _adapter_opt_collect_0_ref: |
|
1745 case _adapter_opt_collect_1_ref: |
|
1746 case _adapter_opt_collect_2_ref: |
|
1747 case _adapter_opt_collect_3_ref: |
|
1748 case _adapter_opt_collect_4_ref: |
|
1749 case _adapter_opt_collect_5_ref: |
|
1750 case _adapter_opt_filter_S0_ref: |
|
1751 case _adapter_opt_filter_S1_ref: |
|
1752 case _adapter_opt_filter_S2_ref: |
|
1753 case _adapter_opt_filter_S3_ref: |
|
1754 case _adapter_opt_filter_S4_ref: |
|
1755 case _adapter_opt_filter_S5_ref: |
|
1756 case _adapter_opt_collect_2_S0_ref: |
|
1757 case _adapter_opt_collect_2_S1_ref: |
|
1758 case _adapter_opt_collect_2_S2_ref: |
|
1759 case _adapter_opt_collect_2_S3_ref: |
|
1760 case _adapter_opt_collect_2_S4_ref: |
|
1761 case _adapter_opt_collect_2_S5_ref: |
|
1762 case _adapter_opt_fold_ref: |
|
1763 case _adapter_opt_fold_int: |
|
1764 case _adapter_opt_fold_long: |
|
1765 case _adapter_opt_fold_float: |
|
1766 case _adapter_opt_fold_double: |
|
1767 case _adapter_opt_fold_void: |
|
1768 case _adapter_opt_fold_1_ref: |
|
1769 case _adapter_opt_fold_2_ref: |
|
1770 case _adapter_opt_fold_3_ref: |
|
1771 case _adapter_opt_fold_4_ref: |
|
1772 case _adapter_opt_fold_5_ref: |
|
1773 { |
|
1774 // Given a fresh incoming stack frame, build a new ricochet frame. |
|
1775 // On entry, TOS points at a return PC, and FP is the callers frame ptr. |
|
1776 // RSI/R13 has the caller's exact stack pointer, which we must also preserve. |
|
1777 // RCX contains an AdapterMethodHandle of the indicated kind. |
|
1778 |
|
1779 // Relevant AMH fields: |
|
1780 // amh.vmargslot: |
|
1781 // points to the trailing edge of the arguments |
|
1782 // to filter, collect, or fold. For a boxing operation, |
|
1783 // it points just after the single primitive value. |
|
1784 // amh.argument: |
|
1785 // recursively called MH, on |collect| arguments |
|
1786 // amh.vmtarget: |
|
1787 // final destination MH, on return value, etc. |
|
1788 // amh.conversion.dest: |
|
1789 // tells what is the type of the return value |
|
1790 // (not needed here, since dest is also derived from ek) |
|
1791 // amh.conversion.vminfo: |
|
1792 // points to the trailing edge of the return value |
|
1793 // when the vmtarget is to be called; this is |
|
1794 // equal to vmargslot + (retained ? |collect| : 0) |
|
1795 |
|
1796 // Pass 0 or more argument slots to the recursive target. |
|
1797 int collect_count_constant = ek_adapter_opt_collect_count(ek); |
|
1798 |
|
1799 // The collected arguments are copied from the saved argument list: |
|
1800 int collect_slot_constant = ek_adapter_opt_collect_slot(ek); |
|
1801 |
|
1802 assert(ek_orig == _adapter_collect_args || |
|
1803 ek_orig == _adapter_fold_args, ""); |
|
1804 bool retain_original_args = (ek_orig == _adapter_fold_args); |
|
1805 |
|
1806 // The return value is replaced (or inserted) at the 'vminfo' argslot. |
|
1807 // Sometimes we can compute this statically. |
|
1808 int dest_slot_constant = -1; |
|
1809 if (!retain_original_args) |
|
1810 dest_slot_constant = collect_slot_constant; |
|
1811 else if (collect_slot_constant >= 0 && collect_count_constant >= 0) |
|
1812 // We are preserving all the arguments, and the return value is prepended, |
|
1813 // so the return slot is to the left (above) the |collect| sequence. |
|
1814 dest_slot_constant = collect_slot_constant + collect_count_constant; |
|
1815 |
|
1816 // Replace all those slots by the result of the recursive call. |
|
1817 // The result type can be one of ref, int, long, float, double, void. |
|
1818 // In the case of void, nothing is pushed on the stack after return. |
|
1819 BasicType dest = ek_adapter_opt_collect_type(ek); |
|
1820 assert(dest == type2wfield[dest], "dest is a stack slot type"); |
|
1821 int dest_count = type2size[dest]; |
|
1822 assert(dest_count == 1 || dest_count == 2 || (dest_count == 0 && dest == T_VOID), "dest has a size"); |
|
1823 |
|
1824 // Choose a return continuation. |
|
1825 EntryKind ek_ret = _adapter_opt_return_any; |
|
1826 if (dest != T_CONFLICT && OptimizeMethodHandles) { |
|
1827 switch (dest) { |
|
1828 case T_INT : ek_ret = _adapter_opt_return_int; break; |
|
1829 case T_LONG : ek_ret = _adapter_opt_return_long; break; |
|
1830 case T_FLOAT : ek_ret = _adapter_opt_return_float; break; |
|
1831 case T_DOUBLE : ek_ret = _adapter_opt_return_double; break; |
|
1832 case T_OBJECT : ek_ret = _adapter_opt_return_ref; break; |
|
1833 case T_VOID : ek_ret = _adapter_opt_return_void; break; |
|
1834 default : ShouldNotReachHere(); |
|
1835 } |
|
1836 if (dest == T_OBJECT && dest_slot_constant >= 0) { |
|
1837 EntryKind ek_try = EntryKind(_adapter_opt_return_S0_ref + dest_slot_constant); |
|
1838 if (ek_try <= _adapter_opt_return_LAST && |
|
1839 ek_adapter_opt_return_slot(ek_try) == dest_slot_constant) { |
|
1840 ek_ret = ek_try; |
|
1841 } |
|
1842 } |
|
1843 assert(ek_adapter_opt_return_type(ek_ret) == dest, ""); |
|
1844 } |
|
1845 |
|
1846 // Already pushed: ... keep1 | collect | keep2 | |
|
1847 |
|
1848 // Push a few extra argument words, if we need them to store the return value. |
|
1849 { |
|
1850 int extra_slots = 0; |
|
1851 if (retain_original_args) { |
|
1852 extra_slots = dest_count; |
|
1853 } else if (collect_count_constant == -1) { |
|
1854 extra_slots = dest_count; // collect_count might be zero; be generous |
|
1855 } else if (dest_count > collect_count_constant) { |
|
1856 extra_slots = (dest_count - collect_count_constant); |
|
1857 } else { |
|
1858 // else we know we have enough dead space in |collect| to repurpose for return values |
|
1859 } |
|
1860 if (extra_slots != 0) { |
|
1861 __ sub(SP, round_to(extra_slots, 2) * Interpreter::stackElementSize, SP); |
|
1862 } |
|
1863 } |
|
1864 |
|
1865 // Set up Ricochet Frame. |
|
1866 __ mov(SP, O5_savedSP); // record SP for the callee |
|
1867 |
|
1868 // One extra (empty) slot for outgoing target MH (see Gargs computation below). |
|
1869 __ save_frame(2); // Note: we need to add 2 slots since frame::memory_parameter_word_sp_offset is 23. |
|
1870 |
|
1871 // Note: Gargs is live throughout the following, until we make our recursive call. |
|
1872 // And the RF saves a copy in L4_saved_args_base. |
|
1873 |
|
1874 RicochetFrame::enter_ricochet_frame(_masm, G3_method_handle, Gargs, |
|
1875 entry(ek_ret)->from_interpreted_entry()); |
|
1876 |
|
1877 // Compute argument base: |
|
1878 // Set up Gargs for current frame, extra (empty) slot is for outgoing target MH (space reserved by save_frame above). |
|
1879 __ add(FP, STACK_BIAS - (1 * Interpreter::stackElementSize), Gargs); |
|
1880 |
|
1881 // Now pushed: ... keep1 | collect | keep2 | extra | [RF] |
|
1882 |
|
1883 #ifdef ASSERT |
|
1884 if (VerifyMethodHandles && dest != T_CONFLICT) { |
|
1885 BLOCK_COMMENT("verify AMH.conv.dest {"); |
|
1886 extract_conversion_dest_type(_masm, RicochetFrame::L5_conversion, O1_scratch); |
|
1887 Label L_dest_ok; |
|
1888 __ cmp(O1_scratch, (int) dest); |
|
1889 __ br(Assembler::equal, false, Assembler::pt, L_dest_ok); |
|
1890 __ delayed()->nop(); |
|
1891 if (dest == T_INT) { |
|
1892 for (int bt = T_BOOLEAN; bt < T_INT; bt++) { |
|
1893 if (is_subword_type(BasicType(bt))) { |
|
1894 __ cmp(O1_scratch, (int) bt); |
|
1895 __ br(Assembler::equal, false, Assembler::pt, L_dest_ok); |
|
1896 __ delayed()->nop(); |
|
1897 } |
|
1898 } |
|
1899 } |
|
1900 __ stop("bad dest in AMH.conv"); |
|
1901 __ BIND(L_dest_ok); |
|
1902 BLOCK_COMMENT("} verify AMH.conv.dest"); |
|
1903 } |
|
1904 #endif //ASSERT |
|
1905 |
|
1906 // Find out where the original copy of the recursive argument sequence begins. |
|
1907 Register O0_coll = O0_scratch; |
|
1908 { |
|
1909 RegisterOrConstant collect_slot = collect_slot_constant; |
|
1910 if (collect_slot_constant == -1) { |
|
1911 load_vmargslot(_masm, G3_amh_vmargslot, O1_scratch); |
|
1912 collect_slot = O1_scratch; |
|
1913 } |
|
1914 // collect_slot might be 0, but we need the move anyway. |
|
1915 __ add(RicochetFrame::L4_saved_args_base, __ argument_offset(collect_slot, collect_slot.register_or_noreg()), O0_coll); |
|
1916 // O0_coll now points at the trailing edge of |collect| and leading edge of |keep2| |
|
1917 } |
|
1918 |
|
1919 // Replace the old AMH with the recursive MH. (No going back now.) |
|
1920 // In the case of a boxing call, the recursive call is to a 'boxer' method, |
|
1921 // such as Integer.valueOf or Long.valueOf. In the case of a filter |
|
1922 // or collect call, it will take one or more arguments, transform them, |
|
1923 // and return some result, to store back into argument_base[vminfo]. |
|
1924 __ load_heap_oop(G3_amh_argument, G3_method_handle); |
|
1925 if (VerifyMethodHandles) verify_method_handle(_masm, G3_method_handle, O1_scratch, O2_scratch); |
|
1926 |
|
1927 // Calculate |collect|, the number of arguments we are collecting. |
|
1928 Register O1_collect_count = O1_scratch; |
|
1929 RegisterOrConstant collect_count; |
|
1930 if (collect_count_constant < 0) { |
|
1931 __ load_method_handle_vmslots(O1_collect_count, G3_method_handle, O2_scratch); |
|
1932 collect_count = O1_collect_count; |
|
1933 } else { |
|
1934 collect_count = collect_count_constant; |
|
1935 #ifdef ASSERT |
|
1936 if (VerifyMethodHandles) { |
|
1937 BLOCK_COMMENT("verify collect_count_constant {"); |
|
1938 __ load_method_handle_vmslots(O3_scratch, G3_method_handle, O2_scratch); |
|
1939 Label L_count_ok; |
|
1940 __ cmp_and_br_short(O3_scratch, collect_count_constant, Assembler::equal, Assembler::pt, L_count_ok); |
|
1941 __ stop("bad vminfo in AMH.conv"); |
|
1942 __ BIND(L_count_ok); |
|
1943 BLOCK_COMMENT("} verify collect_count_constant"); |
|
1944 } |
|
1945 #endif //ASSERT |
|
1946 } |
|
1947 |
|
1948 // copy |collect| slots directly to TOS: |
|
1949 push_arg_slots(_masm, O0_coll, collect_count, O2_scratch, O3_scratch); |
|
1950 // Now pushed: ... keep1 | collect | keep2 | RF... | collect | |
|
1951 // O0_coll still points at the trailing edge of |collect| and leading edge of |keep2| |
|
1952 |
|
1953 // If necessary, adjust the saved arguments to make room for the eventual return value. |
|
1954 // Normal adjustment: ... keep1 | +dest+ | -collect- | keep2 | RF... | collect | |
|
1955 // If retaining args: ... keep1 | +dest+ | collect | keep2 | RF... | collect | |
|
1956 // In the non-retaining case, this might move keep2 either up or down. |
|
1957 // We don't have to copy the whole | RF... collect | complex, |
|
1958 // but we must adjust RF.saved_args_base. |
|
1959 // Also, from now on, we will forget about the original copy of |collect|. |
|
1960 // If we are retaining it, we will treat it as part of |keep2|. |
|
1961 // For clarity we will define |keep3| = |collect|keep2| or |keep2|. |
|
1962 |
|
1963 BLOCK_COMMENT("adjust trailing arguments {"); |
|
1964 // Compare the sizes of |+dest+| and |-collect-|, which are opposed opening and closing movements. |
|
1965 int open_count = dest_count; |
|
1966 RegisterOrConstant close_count = collect_count_constant; |
|
1967 Register O1_close_count = O1_collect_count; |
|
1968 if (retain_original_args) { |
|
1969 close_count = constant(0); |
|
1970 } else if (collect_count_constant == -1) { |
|
1971 close_count = O1_collect_count; |
|
1972 } |
|
1973 |
|
1974 // How many slots need moving? This is simply dest_slot (0 => no |keep3|). |
|
1975 RegisterOrConstant keep3_count; |
|
1976 Register O2_keep3_count = O2_scratch; |
|
1977 if (dest_slot_constant < 0) { |
|
1978 extract_conversion_vminfo(_masm, RicochetFrame::L5_conversion, O2_keep3_count); |
|
1979 keep3_count = O2_keep3_count; |
|
1980 } else { |
|
1981 keep3_count = dest_slot_constant; |
|
1982 #ifdef ASSERT |
|
1983 if (VerifyMethodHandles && dest_slot_constant < 0) { |
|
1984 BLOCK_COMMENT("verify dest_slot_constant {"); |
|
1985 extract_conversion_vminfo(_masm, RicochetFrame::L5_conversion, O3_scratch); |
|
1986 Label L_vminfo_ok; |
|
1987 __ cmp_and_br_short(O3_scratch, dest_slot_constant, Assembler::equal, Assembler::pt, L_vminfo_ok); |
|
1988 __ stop("bad vminfo in AMH.conv"); |
|
1989 __ BIND(L_vminfo_ok); |
|
1990 BLOCK_COMMENT("} verify dest_slot_constant"); |
|
1991 } |
|
1992 #endif //ASSERT |
|
1993 } |
|
1994 |
|
1995 // tasks remaining: |
|
1996 bool move_keep3 = (!keep3_count.is_constant() || keep3_count.as_constant() != 0); |
|
1997 bool stomp_dest = (NOT_DEBUG(dest == T_OBJECT) DEBUG_ONLY(dest_count != 0)); |
|
1998 bool fix_arg_base = (!close_count.is_constant() || open_count != close_count.as_constant()); |
|
1999 |
|
2000 // Old and new argument locations (based at slot 0). |
|
2001 // Net shift (&new_argv - &old_argv) is (close_count - open_count). |
|
2002 bool zero_open_count = (open_count == 0); // remember this bit of info |
|
2003 if (move_keep3 && fix_arg_base) { |
|
2004 // It will be easier to have everything in one register: |
|
2005 if (close_count.is_register()) { |
|
2006 // Deduct open_count from close_count register to get a clean +/- value. |
|
2007 __ sub(close_count.as_register(), open_count, close_count.as_register()); |
|
2008 } else { |
|
2009 close_count = close_count.as_constant() - open_count; |
|
2010 } |
|
2011 open_count = 0; |
|
2012 } |
|
2013 Register L4_old_argv = RicochetFrame::L4_saved_args_base; |
|
2014 Register O3_new_argv = O3_scratch; |
|
2015 if (fix_arg_base) { |
|
2016 __ add(L4_old_argv, __ argument_offset(close_count, O4_scratch), O3_new_argv, |
|
2017 -(open_count * Interpreter::stackElementSize)); |
|
2018 } |
|
2019 |
|
2020 // First decide if any actual data are to be moved. |
|
2021 // We can skip if (a) |keep3| is empty, or (b) the argument list size didn't change. |
|
2022 // (As it happens, all movements involve an argument list size change.) |
|
2023 |
|
2024 // If there are variable parameters, use dynamic checks to skip around the whole mess. |
|
2025 Label L_done; |
|
2026 if (keep3_count.is_register()) { |
|
2027 __ cmp_and_br_short(keep3_count.as_register(), 0, Assembler::equal, Assembler::pn, L_done); |
|
2028 } |
|
2029 if (close_count.is_register()) { |
|
2030 __ cmp_and_br_short(close_count.as_register(), open_count, Assembler::equal, Assembler::pn, L_done); |
|
2031 } |
|
2032 |
|
2033 if (move_keep3 && fix_arg_base) { |
|
2034 bool emit_move_down = false, emit_move_up = false, emit_guard = false; |
|
2035 if (!close_count.is_constant()) { |
|
2036 emit_move_down = emit_guard = !zero_open_count; |
|
2037 emit_move_up = true; |
|
2038 } else if (open_count != close_count.as_constant()) { |
|
2039 emit_move_down = (open_count > close_count.as_constant()); |
|
2040 emit_move_up = !emit_move_down; |
|
2041 } |
|
2042 Label L_move_up; |
|
2043 if (emit_guard) { |
|
2044 __ cmp(close_count.as_register(), open_count); |
|
2045 __ br(Assembler::greater, false, Assembler::pn, L_move_up); |
|
2046 __ delayed()->nop(); |
|
2047 } |
|
2048 |
|
2049 if (emit_move_down) { |
|
2050 // Move arguments down if |+dest+| > |-collect-| |
|
2051 // (This is rare, except when arguments are retained.) |
|
2052 // This opens space for the return value. |
|
2053 if (keep3_count.is_constant()) { |
|
2054 for (int i = 0; i < keep3_count.as_constant(); i++) { |
|
2055 __ ld_ptr( Address(L4_old_argv, i * Interpreter::stackElementSize), O4_scratch); |
|
2056 __ st_ptr(O4_scratch, Address(O3_new_argv, i * Interpreter::stackElementSize) ); |
|
2057 } |
|
2058 } else { |
|
2059 // Live: O1_close_count, O2_keep3_count, O3_new_argv |
|
2060 Register argv_top = O0_scratch; |
|
2061 __ add(L4_old_argv, __ argument_offset(keep3_count, O4_scratch), argv_top); |
|
2062 move_arg_slots_down(_masm, |
|
2063 Address(L4_old_argv, 0), // beginning of old argv |
|
2064 argv_top, // end of old argv |
|
2065 close_count, // distance to move down (must be negative) |
|
2066 O4_scratch, G5_scratch); |
|
2067 } |
|
2068 } |
|
2069 |
|
2070 if (emit_guard) { |
|
2071 __ ba_short(L_done); // assumes emit_move_up is true also |
|
2072 __ BIND(L_move_up); |
|
2073 } |
|
2074 |
|
2075 if (emit_move_up) { |
|
2076 // Move arguments up if |+dest+| < |-collect-| |
|
2077 // (This is usual, except when |keep3| is empty.) |
|
2078 // This closes up the space occupied by the now-deleted collect values. |
|
2079 if (keep3_count.is_constant()) { |
|
2080 for (int i = keep3_count.as_constant() - 1; i >= 0; i--) { |
|
2081 __ ld_ptr( Address(L4_old_argv, i * Interpreter::stackElementSize), O4_scratch); |
|
2082 __ st_ptr(O4_scratch, Address(O3_new_argv, i * Interpreter::stackElementSize) ); |
|
2083 } |
|
2084 } else { |
|
2085 Address argv_top(L4_old_argv, __ argument_offset(keep3_count, O4_scratch)); |
|
2086 // Live: O1_close_count, O2_keep3_count, O3_new_argv |
|
2087 move_arg_slots_up(_masm, |
|
2088 L4_old_argv, // beginning of old argv |
|
2089 argv_top, // end of old argv |
|
2090 close_count, // distance to move up (must be positive) |
|
2091 O4_scratch, G5_scratch); |
|
2092 } |
|
2093 } |
|
2094 } |
|
2095 __ BIND(L_done); |
|
2096 |
|
2097 if (fix_arg_base) { |
|
2098 // adjust RF.saved_args_base |
|
2099 __ mov(O3_new_argv, RicochetFrame::L4_saved_args_base); |
|
2100 } |
|
2101 |
|
2102 if (stomp_dest) { |
|
2103 // Stomp the return slot, so it doesn't hold garbage. |
|
2104 // This isn't strictly necessary, but it may help detect bugs. |
|
2105 __ set(RicochetFrame::RETURN_VALUE_PLACEHOLDER, O4_scratch); |
|
2106 __ st_ptr(O4_scratch, Address(RicochetFrame::L4_saved_args_base, |
|
2107 __ argument_offset(keep3_count, keep3_count.register_or_noreg()))); // uses O2_keep3_count |
|
2108 } |
|
2109 BLOCK_COMMENT("} adjust trailing arguments"); |
|
2110 |
|
2111 BLOCK_COMMENT("do_recursive_call"); |
|
2112 __ mov(SP, O5_savedSP); // record SP for the callee |
|
2113 __ set(ExternalAddress(SharedRuntime::ricochet_blob()->bounce_addr() - frame::pc_return_offset), O7); |
|
2114 // The globally unique bounce address has two purposes: |
|
2115 // 1. It helps the JVM recognize this frame (frame::is_ricochet_frame). |
|
2116 // 2. When returned to, it cuts back the stack and redirects control flow |
|
2117 // to the return handler. |
|
2118 // The return handler will further cut back the stack when it takes |
|
2119 // down the RF. Perhaps there is a way to streamline this further. |
|
2120 |
|
2121 // State during recursive call: |
|
2122 // ... keep1 | dest | dest=42 | keep3 | RF... | collect | bounce_pc | |
|
2123 __ jump_to_method_handle_entry(G3_method_handle, O1_scratch); |
|
2124 } |
|
2125 break; |
|
2126 |
|
2127 case _adapter_opt_return_ref: |
|
2128 case _adapter_opt_return_int: |
|
2129 case _adapter_opt_return_long: |
|
2130 case _adapter_opt_return_float: |
|
2131 case _adapter_opt_return_double: |
|
2132 case _adapter_opt_return_void: |
|
2133 case _adapter_opt_return_S0_ref: |
|
2134 case _adapter_opt_return_S1_ref: |
|
2135 case _adapter_opt_return_S2_ref: |
|
2136 case _adapter_opt_return_S3_ref: |
|
2137 case _adapter_opt_return_S4_ref: |
|
2138 case _adapter_opt_return_S5_ref: |
|
2139 { |
|
2140 BasicType dest_type_constant = ek_adapter_opt_return_type(ek); |
|
2141 int dest_slot_constant = ek_adapter_opt_return_slot(ek); |
|
2142 |
|
2143 if (VerifyMethodHandles) RicochetFrame::verify_clean(_masm); |
|
2144 |
|
2145 if (dest_slot_constant == -1) { |
|
2146 // The current stub is a general handler for this dest_type. |
|
2147 // It can be called from _adapter_opt_return_any below. |
|
2148 // Stash the address in a little table. |
|
2149 assert((dest_type_constant & CONV_TYPE_MASK) == dest_type_constant, "oob"); |
|
2150 address return_handler = __ pc(); |
|
2151 _adapter_return_handlers[dest_type_constant] = return_handler; |
|
2152 if (dest_type_constant == T_INT) { |
|
2153 // do the subword types too |
|
2154 for (int bt = T_BOOLEAN; bt < T_INT; bt++) { |
|
2155 if (is_subword_type(BasicType(bt)) && |
|
2156 _adapter_return_handlers[bt] == NULL) { |
|
2157 _adapter_return_handlers[bt] = return_handler; |
|
2158 } |
|
2159 } |
|
2160 } |
|
2161 } |
|
2162 |
|
2163 // On entry to this continuation handler, make Gargs live again. |
|
2164 __ mov(RicochetFrame::L4_saved_args_base, Gargs); |
|
2165 |
|
2166 Register O7_temp = O7; |
|
2167 Register O5_vminfo = O5; |
|
2168 |
|
2169 RegisterOrConstant dest_slot = dest_slot_constant; |
|
2170 if (dest_slot_constant == -1) { |
|
2171 extract_conversion_vminfo(_masm, RicochetFrame::L5_conversion, O5_vminfo); |
|
2172 dest_slot = O5_vminfo; |
|
2173 } |
|
2174 // Store the result back into the argslot. |
|
2175 // This code uses the interpreter calling sequence, in which the return value |
|
2176 // is usually left in the TOS register, as defined by InterpreterMacroAssembler::pop. |
|
2177 // There are certain irregularities with floating point values, which can be seen |
|
2178 // in TemplateInterpreterGenerator::generate_return_entry_for. |
|
2179 move_return_value(_masm, dest_type_constant, __ argument_address(dest_slot, O7_temp)); |
|
2180 |
|
2181 RicochetFrame::leave_ricochet_frame(_masm, G3_method_handle, I5_savedSP, I7); |
|
2182 |
|
2183 // Load the final target and go. |
|
2184 if (VerifyMethodHandles) verify_method_handle(_masm, G3_method_handle, O0_scratch, O1_scratch); |
|
2185 __ restore(I5_savedSP, G0, SP); |
|
2186 __ jump_to_method_handle_entry(G3_method_handle, O0_scratch); |
|
2187 __ illtrap(0); |
|
2188 } |
|
2189 break; |
|
2190 |
|
2191 case _adapter_opt_return_any: |
|
2192 { |
|
2193 Register O7_temp = O7; |
|
2194 Register O5_dest_type = O5; |
|
2195 |
|
2196 if (VerifyMethodHandles) RicochetFrame::verify_clean(_masm); |
|
2197 extract_conversion_dest_type(_masm, RicochetFrame::L5_conversion, O5_dest_type); |
|
2198 __ set(ExternalAddress((address) &_adapter_return_handlers[0]), O7_temp); |
|
2199 __ sll_ptr(O5_dest_type, LogBytesPerWord, O5_dest_type); |
|
2200 __ ld_ptr(O7_temp, O5_dest_type, O7_temp); |
|
2201 |
|
2202 #ifdef ASSERT |
|
2203 { Label L_ok; |
|
2204 __ br_notnull_short(O7_temp, Assembler::pt, L_ok); |
|
2205 __ stop("bad method handle return"); |
|
2206 __ BIND(L_ok); |
|
2207 } |
|
2208 #endif //ASSERT |
|
2209 __ JMP(O7_temp, 0); |
|
2210 __ delayed()->nop(); |
|
2211 } |
|
2212 break; |
|
2213 |
|
2214 case _adapter_opt_spread_0: |
|
2215 case _adapter_opt_spread_1_ref: |
|
2216 case _adapter_opt_spread_2_ref: |
|
2217 case _adapter_opt_spread_3_ref: |
|
2218 case _adapter_opt_spread_4_ref: |
|
2219 case _adapter_opt_spread_5_ref: |
|
2220 case _adapter_opt_spread_ref: |
|
2221 case _adapter_opt_spread_byte: |
|
2222 case _adapter_opt_spread_char: |
|
2223 case _adapter_opt_spread_short: |
|
2224 case _adapter_opt_spread_int: |
|
2225 case _adapter_opt_spread_long: |
|
2226 case _adapter_opt_spread_float: |
|
2227 case _adapter_opt_spread_double: |
|
2228 { |
|
2229 // spread an array out into a group of arguments |
|
2230 int length_constant = ek_adapter_opt_spread_count(ek); |
|
2231 bool length_can_be_zero = (length_constant == 0); |
|
2232 if (length_constant < 0) { |
|
2233 // some adapters with variable length must handle the zero case |
|
2234 if (!OptimizeMethodHandles || |
|
2235 ek_adapter_opt_spread_type(ek) != T_OBJECT) |
|
2236 length_can_be_zero = true; |
|
2237 } |
|
2238 |
|
2239 // find the address of the array argument |
|
2240 load_vmargslot(_masm, G3_amh_vmargslot, O0_argslot); |
|
2241 __ add(__ argument_address(O0_argslot, O0_argslot), O0_argslot); |
|
2242 |
|
2243 // O0_argslot points both to the array and to the first output arg |
|
2244 Address vmarg = Address(O0_argslot, 0); |
|
2245 |
|
2246 // Get the array value. |
|
2247 Register O1_array = O1_scratch; |
|
2248 Register O2_array_klass = O2_scratch; |
|
2249 BasicType elem_type = ek_adapter_opt_spread_type(ek); |
|
2250 int elem_slots = type2size[elem_type]; // 1 or 2 |
|
2251 int array_slots = 1; // array is always a T_OBJECT |
|
2252 int length_offset = arrayOopDesc::length_offset_in_bytes(); |
|
2253 int elem0_offset = arrayOopDesc::base_offset_in_bytes(elem_type); |
|
2254 __ ld_ptr(vmarg, O1_array); |
|
2255 |
|
2256 Label L_array_is_empty, L_insert_arg_space, L_copy_args, L_args_done; |
|
2257 if (length_can_be_zero) { |
|
2258 // handle the null pointer case, if zero is allowed |
|
2259 Label L_skip; |
|
2260 if (length_constant < 0) { |
|
2261 load_conversion_vminfo(_masm, G3_amh_conversion, O3_scratch); |
|
2262 __ cmp_zero_and_br(Assembler::notZero, O3_scratch, L_skip); |
|
2263 __ delayed()->nop(); // to avoid back-to-back cbcond instructions |
|
2264 } |
|
2265 __ br_null_short(O1_array, Assembler::pn, L_array_is_empty); |
|
2266 __ BIND(L_skip); |
|
2267 } |
|
2268 __ null_check(O1_array, oopDesc::klass_offset_in_bytes()); |
|
2269 __ load_klass(O1_array, O2_array_klass); |
|
2270 |
|
2271 // Check the array type. |
|
2272 Register O3_klass = O3_scratch; |
|
2273 __ load_heap_oop(G3_amh_argument, O3_klass); // this is a Class object! |
|
2274 load_klass_from_Class(_masm, O3_klass, O4_scratch, G5_scratch); |
|
2275 |
|
2276 Label L_ok_array_klass, L_bad_array_klass, L_bad_array_length; |
|
2277 __ check_klass_subtype(O2_array_klass, O3_klass, O4_scratch, G5_scratch, L_ok_array_klass); |
|
2278 // If we get here, the type check failed! |
|
2279 __ ba_short(L_bad_array_klass); |
|
2280 __ BIND(L_ok_array_klass); |
|
2281 |
|
2282 // Check length. |
|
2283 if (length_constant >= 0) { |
|
2284 __ ldsw(Address(O1_array, length_offset), O4_scratch); |
|
2285 __ cmp(O4_scratch, length_constant); |
|
2286 } else { |
|
2287 Register O3_vminfo = O3_scratch; |
|
2288 load_conversion_vminfo(_masm, G3_amh_conversion, O3_vminfo); |
|
2289 __ ldsw(Address(O1_array, length_offset), O4_scratch); |
|
2290 __ cmp(O3_vminfo, O4_scratch); |
|
2291 } |
|
2292 __ br(Assembler::notEqual, false, Assembler::pn, L_bad_array_length); |
|
2293 __ delayed()->nop(); |
|
2294 |
|
2295 Register O2_argslot_limit = O2_scratch; |
|
2296 |
|
2297 // Array length checks out. Now insert any required stack slots. |
|
2298 if (length_constant == -1) { |
|
2299 // Form a pointer to the end of the affected region. |
|
2300 __ add(O0_argslot, Interpreter::stackElementSize, O2_argslot_limit); |
|
2301 // 'stack_move' is negative number of words to insert |
|
2302 // This number already accounts for elem_slots. |
|
2303 Register O3_stack_move = O3_scratch; |
|
2304 load_stack_move(_masm, G3_amh_conversion, O3_stack_move); |
|
2305 __ cmp(O3_stack_move, 0); |
|
2306 assert(stack_move_unit() < 0, "else change this comparison"); |
|
2307 __ br(Assembler::less, false, Assembler::pn, L_insert_arg_space); |
|
2308 __ delayed()->nop(); |
|
2309 __ br(Assembler::equal, false, Assembler::pn, L_copy_args); |
|
2310 __ delayed()->nop(); |
|
2311 // single argument case, with no array movement |
|
2312 __ BIND(L_array_is_empty); |
|
2313 remove_arg_slots(_masm, -stack_move_unit() * array_slots, |
|
2314 O0_argslot, O1_scratch, O2_scratch, O3_scratch); |
|
2315 __ ba_short(L_args_done); // no spreading to do |
|
2316 __ BIND(L_insert_arg_space); |
|
2317 // come here in the usual case, stack_move < 0 (2 or more spread arguments) |
|
2318 // Live: O1_array, O2_argslot_limit, O3_stack_move |
|
2319 insert_arg_slots(_masm, O3_stack_move, |
|
2320 O0_argslot, O4_scratch, G5_scratch, O1_scratch); |
|
2321 // reload from rdx_argslot_limit since rax_argslot is now decremented |
|
2322 __ ld_ptr(Address(O2_argslot_limit, -Interpreter::stackElementSize), O1_array); |
|
2323 } else if (length_constant >= 1) { |
|
2324 int new_slots = (length_constant * elem_slots) - array_slots; |
|
2325 insert_arg_slots(_masm, new_slots * stack_move_unit(), |
|
2326 O0_argslot, O2_scratch, O3_scratch, O4_scratch); |
|
2327 } else if (length_constant == 0) { |
|
2328 __ BIND(L_array_is_empty); |
|
2329 remove_arg_slots(_masm, -stack_move_unit() * array_slots, |
|
2330 O0_argslot, O1_scratch, O2_scratch, O3_scratch); |
|
2331 } else { |
|
2332 ShouldNotReachHere(); |
|
2333 } |
|
2334 |
|
2335 // Copy from the array to the new slots. |
|
2336 // Note: Stack change code preserves integrity of O0_argslot pointer. |
|
2337 // So even after slot insertions, O0_argslot still points to first argument. |
|
2338 // Beware: Arguments that are shallow on the stack are deep in the array, |
|
2339 // and vice versa. So a downward-growing stack (the usual) has to be copied |
|
2340 // elementwise in reverse order from the source array. |
|
2341 __ BIND(L_copy_args); |
|
2342 if (length_constant == -1) { |
|
2343 // [O0_argslot, O2_argslot_limit) is the area we are inserting into. |
|
2344 // Array element [0] goes at O0_argslot_limit[-wordSize]. |
|
2345 Register O1_source = O1_array; |
|
2346 __ add(Address(O1_array, elem0_offset), O1_source); |
|
2347 Register O4_fill_ptr = O4_scratch; |
|
2348 __ mov(O2_argslot_limit, O4_fill_ptr); |
|
2349 Label L_loop; |
|
2350 __ BIND(L_loop); |
|
2351 __ add(O4_fill_ptr, -Interpreter::stackElementSize * elem_slots, O4_fill_ptr); |
|
2352 move_typed_arg(_masm, elem_type, true, |
|
2353 Address(O1_source, 0), Address(O4_fill_ptr, 0), |
|
2354 O2_scratch); // must be an even register for !_LP64 long moves (uses O2/O3) |
|
2355 __ add(O1_source, type2aelembytes(elem_type), O1_source); |
|
2356 __ cmp_and_brx_short(O4_fill_ptr, O0_argslot, Assembler::greaterUnsigned, Assembler::pt, L_loop); |
|
2357 } else if (length_constant == 0) { |
|
2358 // nothing to copy |
|
2359 } else { |
|
2360 int elem_offset = elem0_offset; |
|
2361 int slot_offset = length_constant * Interpreter::stackElementSize; |
|
2362 for (int index = 0; index < length_constant; index++) { |
|
2363 slot_offset -= Interpreter::stackElementSize * elem_slots; // fill backward |
|
2364 move_typed_arg(_masm, elem_type, true, |
|
2365 Address(O1_array, elem_offset), Address(O0_argslot, slot_offset), |
|
2366 O2_scratch); // must be an even register for !_LP64 long moves (uses O2/O3) |
|
2367 elem_offset += type2aelembytes(elem_type); |
|
2368 } |
|
2369 } |
|
2370 __ BIND(L_args_done); |
|
2371 |
|
2372 // Arguments are spread. Move to next method handle. |
|
2373 __ load_heap_oop(G3_mh_vmtarget, G3_method_handle); |
|
2374 __ jump_to_method_handle_entry(G3_method_handle, O1_scratch); |
|
2375 |
|
2376 __ BIND(L_bad_array_klass); |
|
2377 assert(!vmarg.uses(O2_required), "must be different registers"); |
|
2378 __ load_heap_oop(Address(O2_array_klass, java_mirror_offset), O2_required); // required class |
|
2379 __ ld_ptr( vmarg, O1_actual); // bad object |
|
2380 __ jump_to(AddressLiteral(from_interpreted_entry(_raise_exception)), O3_scratch); |
|
2381 __ delayed()->mov(Bytecodes::_aaload, O0_code); // who is complaining? |
|
2382 |
|
2383 __ bind(L_bad_array_length); |
|
2384 assert(!vmarg.uses(O2_required), "must be different registers"); |
|
2385 __ mov( G3_method_handle, O2_required); // required class |
|
2386 __ ld_ptr(vmarg, O1_actual); // bad object |
|
2387 __ jump_to(AddressLiteral(from_interpreted_entry(_raise_exception)), O3_scratch); |
|
2388 __ delayed()->mov(Bytecodes::_arraylength, O0_code); // who is complaining? |
|
2389 } |
|
2390 break; |
|
2391 |
|
2392 default: |
|
2393 DEBUG_ONLY(tty->print_cr("bad ek=%d (%s)", (int)ek, entry_name(ek))); |
|
2394 ShouldNotReachHere(); |
|
2395 } |
|
2396 BLOCK_COMMENT(err_msg("} Entry %s", entry_name(ek))); |
|
2397 |
|
2398 address me_cookie = MethodHandleEntry::start_compiled_entry(_masm, interp_entry); |
|
2399 __ unimplemented(entry_name(ek)); // %%% FIXME: NYI |
|
2400 |
|
2401 init_entry(ek, MethodHandleEntry::finish_compiled_entry(_masm, me_cookie)); |
|
2402 } |
|