Wed, 21 Aug 2013 13:34:45 +0200
7199175: JSR 292: C1 needs patching when invokedynamic/invokehandle call site is not linked
Summary: Do patching rather bailing out for unlinked call with appendix
Reviewed-by: twisti, kvn
1 /*
2 * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "c1/c1_Compilation.hpp"
27 #include "c1/c1_Instruction.hpp"
28 #include "c1/c1_InstructionPrinter.hpp"
29 #include "c1/c1_LIRAssembler.hpp"
30 #include "c1/c1_MacroAssembler.hpp"
31 #include "c1/c1_ValueStack.hpp"
32 #include "ci/ciInstance.hpp"
33 #ifdef TARGET_ARCH_x86
34 # include "nativeInst_x86.hpp"
35 # include "vmreg_x86.inline.hpp"
36 #endif
37 #ifdef TARGET_ARCH_sparc
38 # include "nativeInst_sparc.hpp"
39 # include "vmreg_sparc.inline.hpp"
40 #endif
41 #ifdef TARGET_ARCH_zero
42 # include "nativeInst_zero.hpp"
43 # include "vmreg_zero.inline.hpp"
44 #endif
45 #ifdef TARGET_ARCH_arm
46 # include "nativeInst_arm.hpp"
47 # include "vmreg_arm.inline.hpp"
48 #endif
49 #ifdef TARGET_ARCH_ppc
50 # include "nativeInst_ppc.hpp"
51 # include "vmreg_ppc.inline.hpp"
52 #endif
55 void LIR_Assembler::patching_epilog(PatchingStub* patch, LIR_PatchCode patch_code, Register obj, CodeEmitInfo* info) {
56 // we must have enough patching space so that call can be inserted
57 while ((intx) _masm->pc() - (intx) patch->pc_start() < NativeCall::instruction_size) {
58 _masm->nop();
59 }
60 patch->install(_masm, patch_code, obj, info);
61 append_patching_stub(patch);
63 #ifdef ASSERT
64 Bytecodes::Code code = info->scope()->method()->java_code_at_bci(info->stack()->bci());
65 if (patch->id() == PatchingStub::access_field_id) {
66 switch (code) {
67 case Bytecodes::_putstatic:
68 case Bytecodes::_getstatic:
69 case Bytecodes::_putfield:
70 case Bytecodes::_getfield:
71 break;
72 default:
73 ShouldNotReachHere();
74 }
75 } else if (patch->id() == PatchingStub::load_klass_id) {
76 switch (code) {
77 case Bytecodes::_new:
78 case Bytecodes::_anewarray:
79 case Bytecodes::_multianewarray:
80 case Bytecodes::_instanceof:
81 case Bytecodes::_checkcast:
82 break;
83 default:
84 ShouldNotReachHere();
85 }
86 } else if (patch->id() == PatchingStub::load_mirror_id) {
87 switch (code) {
88 case Bytecodes::_putstatic:
89 case Bytecodes::_getstatic:
90 case Bytecodes::_ldc:
91 case Bytecodes::_ldc_w:
92 break;
93 default:
94 ShouldNotReachHere();
95 }
96 } else if (patch->id() == PatchingStub::load_appendix_id) {
97 Bytecodes::Code bc_raw = info->scope()->method()->raw_code_at_bci(info->stack()->bci());
98 assert(Bytecodes::has_optional_appendix(bc_raw), "unexpected appendix resolution");
99 } else {
100 ShouldNotReachHere();
101 }
102 #endif
103 }
105 PatchingStub::PatchID LIR_Assembler::patching_id(CodeEmitInfo* info) {
106 IRScope* scope = info->scope();
107 Bytecodes::Code bc_raw = scope->method()->raw_code_at_bci(info->stack()->bci());
108 if (Bytecodes::has_optional_appendix(bc_raw)) {
109 return PatchingStub::load_appendix_id;
110 }
111 return PatchingStub::load_mirror_id;
112 }
114 //---------------------------------------------------------------
117 LIR_Assembler::LIR_Assembler(Compilation* c):
118 _compilation(c)
119 , _masm(c->masm())
120 , _bs(Universe::heap()->barrier_set())
121 , _frame_map(c->frame_map())
122 , _current_block(NULL)
123 , _pending_non_safepoint(NULL)
124 , _pending_non_safepoint_offset(0)
125 {
126 _slow_case_stubs = new CodeStubList();
127 }
130 LIR_Assembler::~LIR_Assembler() {
131 }
134 void LIR_Assembler::append_patching_stub(PatchingStub* stub) {
135 _slow_case_stubs->append(stub);
136 }
139 void LIR_Assembler::check_codespace() {
140 CodeSection* cs = _masm->code_section();
141 if (cs->remaining() < (int)(NOT_LP64(1*K)LP64_ONLY(2*K))) {
142 BAILOUT("CodeBuffer overflow");
143 }
144 }
147 void LIR_Assembler::emit_code_stub(CodeStub* stub) {
148 _slow_case_stubs->append(stub);
149 }
151 void LIR_Assembler::emit_stubs(CodeStubList* stub_list) {
152 for (int m = 0; m < stub_list->length(); m++) {
153 CodeStub* s = (*stub_list)[m];
155 check_codespace();
156 CHECK_BAILOUT();
158 #ifndef PRODUCT
159 if (CommentedAssembly) {
160 stringStream st;
161 s->print_name(&st);
162 st.print(" slow case");
163 _masm->block_comment(st.as_string());
164 }
165 #endif
166 s->emit_code(this);
167 #ifdef ASSERT
168 s->assert_no_unbound_labels();
169 #endif
170 }
171 }
174 void LIR_Assembler::emit_slow_case_stubs() {
175 emit_stubs(_slow_case_stubs);
176 }
179 bool LIR_Assembler::needs_icache(ciMethod* method) const {
180 return !method->is_static();
181 }
184 int LIR_Assembler::code_offset() const {
185 return _masm->offset();
186 }
189 address LIR_Assembler::pc() const {
190 return _masm->pc();
191 }
194 void LIR_Assembler::emit_exception_entries(ExceptionInfoList* info_list) {
195 for (int i = 0; i < info_list->length(); i++) {
196 XHandlers* handlers = info_list->at(i)->exception_handlers();
198 for (int j = 0; j < handlers->length(); j++) {
199 XHandler* handler = handlers->handler_at(j);
200 assert(handler->lir_op_id() != -1, "handler not processed by LinearScan");
201 assert(handler->entry_code() == NULL ||
202 handler->entry_code()->instructions_list()->last()->code() == lir_branch ||
203 handler->entry_code()->instructions_list()->last()->code() == lir_delay_slot, "last operation must be branch");
205 if (handler->entry_pco() == -1) {
206 // entry code not emitted yet
207 if (handler->entry_code() != NULL && handler->entry_code()->instructions_list()->length() > 1) {
208 handler->set_entry_pco(code_offset());
209 if (CommentedAssembly) {
210 _masm->block_comment("Exception adapter block");
211 }
212 emit_lir_list(handler->entry_code());
213 } else {
214 handler->set_entry_pco(handler->entry_block()->exception_handler_pco());
215 }
217 assert(handler->entry_pco() != -1, "must be set now");
218 }
219 }
220 }
221 }
224 void LIR_Assembler::emit_code(BlockList* hir) {
225 if (PrintLIR) {
226 print_LIR(hir);
227 }
229 int n = hir->length();
230 for (int i = 0; i < n; i++) {
231 emit_block(hir->at(i));
232 CHECK_BAILOUT();
233 }
235 flush_debug_info(code_offset());
237 DEBUG_ONLY(check_no_unbound_labels());
238 }
241 void LIR_Assembler::emit_block(BlockBegin* block) {
242 if (block->is_set(BlockBegin::backward_branch_target_flag)) {
243 align_backward_branch_target();
244 }
246 // if this block is the start of an exception handler, record the
247 // PC offset of the first instruction for later construction of
248 // the ExceptionHandlerTable
249 if (block->is_set(BlockBegin::exception_entry_flag)) {
250 block->set_exception_handler_pco(code_offset());
251 }
253 #ifndef PRODUCT
254 if (PrintLIRWithAssembly) {
255 // don't print Phi's
256 InstructionPrinter ip(false);
257 block->print(ip);
258 }
259 #endif /* PRODUCT */
261 assert(block->lir() != NULL, "must have LIR");
262 X86_ONLY(assert(_masm->rsp_offset() == 0, "frame size should be fixed"));
264 #ifndef PRODUCT
265 if (CommentedAssembly) {
266 stringStream st;
267 st.print_cr(" block B%d [%d, %d]", block->block_id(), block->bci(), block->end()->printable_bci());
268 _masm->block_comment(st.as_string());
269 }
270 #endif
272 emit_lir_list(block->lir());
274 X86_ONLY(assert(_masm->rsp_offset() == 0, "frame size should be fixed"));
275 }
278 void LIR_Assembler::emit_lir_list(LIR_List* list) {
279 peephole(list);
281 int n = list->length();
282 for (int i = 0; i < n; i++) {
283 LIR_Op* op = list->at(i);
285 check_codespace();
286 CHECK_BAILOUT();
288 #ifndef PRODUCT
289 if (CommentedAssembly) {
290 // Don't record out every op since that's too verbose. Print
291 // branches since they include block and stub names. Also print
292 // patching moves since they generate funny looking code.
293 if (op->code() == lir_branch ||
294 (op->code() == lir_move && op->as_Op1()->patch_code() != lir_patch_none)) {
295 stringStream st;
296 op->print_on(&st);
297 _masm->block_comment(st.as_string());
298 }
299 }
300 if (PrintLIRWithAssembly) {
301 // print out the LIR operation followed by the resulting assembly
302 list->at(i)->print(); tty->cr();
303 }
304 #endif /* PRODUCT */
306 op->emit_code(this);
308 if (compilation()->debug_info_recorder()->recording_non_safepoints()) {
309 process_debug_info(op);
310 }
312 #ifndef PRODUCT
313 if (PrintLIRWithAssembly) {
314 _masm->code()->decode();
315 }
316 #endif /* PRODUCT */
317 }
318 }
320 #ifdef ASSERT
321 void LIR_Assembler::check_no_unbound_labels() {
322 CHECK_BAILOUT();
324 for (int i = 0; i < _branch_target_blocks.length() - 1; i++) {
325 if (!_branch_target_blocks.at(i)->label()->is_bound()) {
326 tty->print_cr("label of block B%d is not bound", _branch_target_blocks.at(i)->block_id());
327 assert(false, "unbound label");
328 }
329 }
330 }
331 #endif
333 //----------------------------------debug info--------------------------------
336 void LIR_Assembler::add_debug_info_for_branch(CodeEmitInfo* info) {
337 _masm->code_section()->relocate(pc(), relocInfo::poll_type);
338 int pc_offset = code_offset();
339 flush_debug_info(pc_offset);
340 info->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
341 if (info->exception_handlers() != NULL) {
342 compilation()->add_exception_handlers_for_pco(pc_offset, info->exception_handlers());
343 }
344 }
347 void LIR_Assembler::add_call_info(int pc_offset, CodeEmitInfo* cinfo) {
348 flush_debug_info(pc_offset);
349 cinfo->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
350 if (cinfo->exception_handlers() != NULL) {
351 compilation()->add_exception_handlers_for_pco(pc_offset, cinfo->exception_handlers());
352 }
353 }
355 static ValueStack* debug_info(Instruction* ins) {
356 StateSplit* ss = ins->as_StateSplit();
357 if (ss != NULL) return ss->state();
358 return ins->state_before();
359 }
361 void LIR_Assembler::process_debug_info(LIR_Op* op) {
362 Instruction* src = op->source();
363 if (src == NULL) return;
364 int pc_offset = code_offset();
365 if (_pending_non_safepoint == src) {
366 _pending_non_safepoint_offset = pc_offset;
367 return;
368 }
369 ValueStack* vstack = debug_info(src);
370 if (vstack == NULL) return;
371 if (_pending_non_safepoint != NULL) {
372 // Got some old debug info. Get rid of it.
373 if (debug_info(_pending_non_safepoint) == vstack) {
374 _pending_non_safepoint_offset = pc_offset;
375 return;
376 }
377 if (_pending_non_safepoint_offset < pc_offset) {
378 record_non_safepoint_debug_info();
379 }
380 _pending_non_safepoint = NULL;
381 }
382 // Remember the debug info.
383 if (pc_offset > compilation()->debug_info_recorder()->last_pc_offset()) {
384 _pending_non_safepoint = src;
385 _pending_non_safepoint_offset = pc_offset;
386 }
387 }
389 // Index caller states in s, where 0 is the oldest, 1 its callee, etc.
390 // Return NULL if n is too large.
391 // Returns the caller_bci for the next-younger state, also.
392 static ValueStack* nth_oldest(ValueStack* s, int n, int& bci_result) {
393 ValueStack* t = s;
394 for (int i = 0; i < n; i++) {
395 if (t == NULL) break;
396 t = t->caller_state();
397 }
398 if (t == NULL) return NULL;
399 for (;;) {
400 ValueStack* tc = t->caller_state();
401 if (tc == NULL) return s;
402 t = tc;
403 bci_result = tc->bci();
404 s = s->caller_state();
405 }
406 }
408 void LIR_Assembler::record_non_safepoint_debug_info() {
409 int pc_offset = _pending_non_safepoint_offset;
410 ValueStack* vstack = debug_info(_pending_non_safepoint);
411 int bci = vstack->bci();
413 DebugInformationRecorder* debug_info = compilation()->debug_info_recorder();
414 assert(debug_info->recording_non_safepoints(), "sanity");
416 debug_info->add_non_safepoint(pc_offset);
418 // Visit scopes from oldest to youngest.
419 for (int n = 0; ; n++) {
420 int s_bci = bci;
421 ValueStack* s = nth_oldest(vstack, n, s_bci);
422 if (s == NULL) break;
423 IRScope* scope = s->scope();
424 //Always pass false for reexecute since these ScopeDescs are never used for deopt
425 debug_info->describe_scope(pc_offset, scope->method(), s->bci(), false/*reexecute*/);
426 }
428 debug_info->end_non_safepoint(pc_offset);
429 }
432 void LIR_Assembler::add_debug_info_for_null_check_here(CodeEmitInfo* cinfo) {
433 add_debug_info_for_null_check(code_offset(), cinfo);
434 }
436 void LIR_Assembler::add_debug_info_for_null_check(int pc_offset, CodeEmitInfo* cinfo) {
437 ImplicitNullCheckStub* stub = new ImplicitNullCheckStub(pc_offset, cinfo);
438 emit_code_stub(stub);
439 }
441 void LIR_Assembler::add_debug_info_for_div0_here(CodeEmitInfo* info) {
442 add_debug_info_for_div0(code_offset(), info);
443 }
445 void LIR_Assembler::add_debug_info_for_div0(int pc_offset, CodeEmitInfo* cinfo) {
446 DivByZeroStub* stub = new DivByZeroStub(pc_offset, cinfo);
447 emit_code_stub(stub);
448 }
450 void LIR_Assembler::emit_rtcall(LIR_OpRTCall* op) {
451 rt_call(op->result_opr(), op->addr(), op->arguments(), op->tmp(), op->info());
452 }
455 void LIR_Assembler::emit_call(LIR_OpJavaCall* op) {
456 verify_oop_map(op->info());
458 if (os::is_MP()) {
459 // must align calls sites, otherwise they can't be updated atomically on MP hardware
460 align_call(op->code());
461 }
463 // emit the static call stub stuff out of line
464 emit_static_call_stub();
466 switch (op->code()) {
467 case lir_static_call:
468 case lir_dynamic_call:
469 call(op, relocInfo::static_call_type);
470 break;
471 case lir_optvirtual_call:
472 call(op, relocInfo::opt_virtual_call_type);
473 break;
474 case lir_icvirtual_call:
475 ic_call(op);
476 break;
477 case lir_virtual_call:
478 vtable_call(op);
479 break;
480 default:
481 fatal(err_msg_res("unexpected op code: %s", op->name()));
482 break;
483 }
485 // JSR 292
486 // Record if this method has MethodHandle invokes.
487 if (op->is_method_handle_invoke()) {
488 compilation()->set_has_method_handle_invokes(true);
489 }
491 #if defined(X86) && defined(TIERED)
492 // C2 leave fpu stack dirty clean it
493 if (UseSSE < 2) {
494 int i;
495 for ( i = 1; i <= 7 ; i++ ) {
496 ffree(i);
497 }
498 if (!op->result_opr()->is_float_kind()) {
499 ffree(0);
500 }
501 }
502 #endif // X86 && TIERED
503 }
506 void LIR_Assembler::emit_opLabel(LIR_OpLabel* op) {
507 _masm->bind (*(op->label()));
508 }
511 void LIR_Assembler::emit_op1(LIR_Op1* op) {
512 switch (op->code()) {
513 case lir_move:
514 if (op->move_kind() == lir_move_volatile) {
515 assert(op->patch_code() == lir_patch_none, "can't patch volatiles");
516 volatile_move_op(op->in_opr(), op->result_opr(), op->type(), op->info());
517 } else {
518 move_op(op->in_opr(), op->result_opr(), op->type(),
519 op->patch_code(), op->info(), op->pop_fpu_stack(),
520 op->move_kind() == lir_move_unaligned,
521 op->move_kind() == lir_move_wide);
522 }
523 break;
525 case lir_prefetchr:
526 prefetchr(op->in_opr());
527 break;
529 case lir_prefetchw:
530 prefetchw(op->in_opr());
531 break;
533 case lir_roundfp: {
534 LIR_OpRoundFP* round_op = op->as_OpRoundFP();
535 roundfp_op(round_op->in_opr(), round_op->tmp(), round_op->result_opr(), round_op->pop_fpu_stack());
536 break;
537 }
539 case lir_return:
540 return_op(op->in_opr());
541 break;
543 case lir_safepoint:
544 if (compilation()->debug_info_recorder()->last_pc_offset() == code_offset()) {
545 _masm->nop();
546 }
547 safepoint_poll(op->in_opr(), op->info());
548 break;
550 case lir_fxch:
551 fxch(op->in_opr()->as_jint());
552 break;
554 case lir_fld:
555 fld(op->in_opr()->as_jint());
556 break;
558 case lir_ffree:
559 ffree(op->in_opr()->as_jint());
560 break;
562 case lir_branch:
563 break;
565 case lir_push:
566 push(op->in_opr());
567 break;
569 case lir_pop:
570 pop(op->in_opr());
571 break;
573 case lir_neg:
574 negate(op->in_opr(), op->result_opr());
575 break;
577 case lir_leal:
578 leal(op->in_opr(), op->result_opr());
579 break;
581 case lir_null_check:
582 if (GenerateCompilerNullChecks) {
583 add_debug_info_for_null_check_here(op->info());
585 if (op->in_opr()->is_single_cpu()) {
586 _masm->null_check(op->in_opr()->as_register());
587 } else {
588 Unimplemented();
589 }
590 }
591 break;
593 case lir_monaddr:
594 monitor_address(op->in_opr()->as_constant_ptr()->as_jint(), op->result_opr());
595 break;
597 #ifdef SPARC
598 case lir_pack64:
599 pack64(op->in_opr(), op->result_opr());
600 break;
602 case lir_unpack64:
603 unpack64(op->in_opr(), op->result_opr());
604 break;
605 #endif
607 case lir_unwind:
608 unwind_op(op->in_opr());
609 break;
611 default:
612 Unimplemented();
613 break;
614 }
615 }
618 void LIR_Assembler::emit_op0(LIR_Op0* op) {
619 switch (op->code()) {
620 case lir_word_align: {
621 while (code_offset() % BytesPerWord != 0) {
622 _masm->nop();
623 }
624 break;
625 }
627 case lir_nop:
628 assert(op->info() == NULL, "not supported");
629 _masm->nop();
630 break;
632 case lir_label:
633 Unimplemented();
634 break;
636 case lir_build_frame:
637 build_frame();
638 break;
640 case lir_std_entry:
641 // init offsets
642 offsets()->set_value(CodeOffsets::OSR_Entry, _masm->offset());
643 _masm->align(CodeEntryAlignment);
644 if (needs_icache(compilation()->method())) {
645 check_icache();
646 }
647 offsets()->set_value(CodeOffsets::Verified_Entry, _masm->offset());
648 _masm->verified_entry();
649 build_frame();
650 offsets()->set_value(CodeOffsets::Frame_Complete, _masm->offset());
651 break;
653 case lir_osr_entry:
654 offsets()->set_value(CodeOffsets::OSR_Entry, _masm->offset());
655 osr_entry();
656 break;
658 case lir_24bit_FPU:
659 set_24bit_FPU();
660 break;
662 case lir_reset_FPU:
663 reset_FPU();
664 break;
666 case lir_breakpoint:
667 breakpoint();
668 break;
670 case lir_fpop_raw:
671 fpop();
672 break;
674 case lir_membar:
675 membar();
676 break;
678 case lir_membar_acquire:
679 membar_acquire();
680 break;
682 case lir_membar_release:
683 membar_release();
684 break;
686 case lir_membar_loadload:
687 membar_loadload();
688 break;
690 case lir_membar_storestore:
691 membar_storestore();
692 break;
694 case lir_membar_loadstore:
695 membar_loadstore();
696 break;
698 case lir_membar_storeload:
699 membar_storeload();
700 break;
702 case lir_get_thread:
703 get_thread(op->result_opr());
704 break;
706 default:
707 ShouldNotReachHere();
708 break;
709 }
710 }
713 void LIR_Assembler::emit_op2(LIR_Op2* op) {
714 switch (op->code()) {
715 case lir_cmp:
716 if (op->info() != NULL) {
717 assert(op->in_opr1()->is_address() || op->in_opr2()->is_address(),
718 "shouldn't be codeemitinfo for non-address operands");
719 add_debug_info_for_null_check_here(op->info()); // exception possible
720 }
721 comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op);
722 break;
724 case lir_cmp_l2i:
725 case lir_cmp_fd2i:
726 case lir_ucmp_fd2i:
727 comp_fl2i(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op);
728 break;
730 case lir_cmove:
731 cmove(op->condition(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->type());
732 break;
734 case lir_shl:
735 case lir_shr:
736 case lir_ushr:
737 if (op->in_opr2()->is_constant()) {
738 shift_op(op->code(), op->in_opr1(), op->in_opr2()->as_constant_ptr()->as_jint(), op->result_opr());
739 } else {
740 shift_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->tmp1_opr());
741 }
742 break;
744 case lir_add:
745 case lir_sub:
746 case lir_mul:
747 case lir_mul_strictfp:
748 case lir_div:
749 case lir_div_strictfp:
750 case lir_rem:
751 assert(op->fpu_pop_count() < 2, "");
752 arith_op(
753 op->code(),
754 op->in_opr1(),
755 op->in_opr2(),
756 op->result_opr(),
757 op->info(),
758 op->fpu_pop_count() == 1);
759 break;
761 case lir_abs:
762 case lir_sqrt:
763 case lir_sin:
764 case lir_tan:
765 case lir_cos:
766 case lir_log:
767 case lir_log10:
768 case lir_exp:
769 case lir_pow:
770 intrinsic_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op);
771 break;
773 case lir_logic_and:
774 case lir_logic_or:
775 case lir_logic_xor:
776 logic_op(
777 op->code(),
778 op->in_opr1(),
779 op->in_opr2(),
780 op->result_opr());
781 break;
783 case lir_throw:
784 throw_op(op->in_opr1(), op->in_opr2(), op->info());
785 break;
787 case lir_xadd:
788 case lir_xchg:
789 atomic_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->tmp1_opr());
790 break;
792 default:
793 Unimplemented();
794 break;
795 }
796 }
799 void LIR_Assembler::build_frame() {
800 _masm->build_frame(initial_frame_size_in_bytes());
801 }
804 void LIR_Assembler::roundfp_op(LIR_Opr src, LIR_Opr tmp, LIR_Opr dest, bool pop_fpu_stack) {
805 assert((src->is_single_fpu() && dest->is_single_stack()) ||
806 (src->is_double_fpu() && dest->is_double_stack()),
807 "round_fp: rounds register -> stack location");
809 reg2stack (src, dest, src->type(), pop_fpu_stack);
810 }
813 void LIR_Assembler::move_op(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool unaligned, bool wide) {
814 if (src->is_register()) {
815 if (dest->is_register()) {
816 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
817 reg2reg(src, dest);
818 } else if (dest->is_stack()) {
819 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
820 reg2stack(src, dest, type, pop_fpu_stack);
821 } else if (dest->is_address()) {
822 reg2mem(src, dest, type, patch_code, info, pop_fpu_stack, wide, unaligned);
823 } else {
824 ShouldNotReachHere();
825 }
827 } else if (src->is_stack()) {
828 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
829 if (dest->is_register()) {
830 stack2reg(src, dest, type);
831 } else if (dest->is_stack()) {
832 stack2stack(src, dest, type);
833 } else {
834 ShouldNotReachHere();
835 }
837 } else if (src->is_constant()) {
838 if (dest->is_register()) {
839 const2reg(src, dest, patch_code, info); // patching is possible
840 } else if (dest->is_stack()) {
841 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
842 const2stack(src, dest);
843 } else if (dest->is_address()) {
844 assert(patch_code == lir_patch_none, "no patching allowed here");
845 const2mem(src, dest, type, info, wide);
846 } else {
847 ShouldNotReachHere();
848 }
850 } else if (src->is_address()) {
851 mem2reg(src, dest, type, patch_code, info, wide, unaligned);
853 } else {
854 ShouldNotReachHere();
855 }
856 }
859 void LIR_Assembler::verify_oop_map(CodeEmitInfo* info) {
860 #ifndef PRODUCT
861 if (VerifyOopMaps || VerifyOops) {
862 bool v = VerifyOops;
863 VerifyOops = true;
864 OopMapStream s(info->oop_map());
865 while (!s.is_done()) {
866 OopMapValue v = s.current();
867 if (v.is_oop()) {
868 VMReg r = v.reg();
869 if (!r->is_stack()) {
870 stringStream st;
871 st.print("bad oop %s at %d", r->as_Register()->name(), _masm->offset());
872 #ifdef SPARC
873 _masm->_verify_oop(r->as_Register(), strdup(st.as_string()), __FILE__, __LINE__);
874 #else
875 _masm->verify_oop(r->as_Register());
876 #endif
877 } else {
878 _masm->verify_stack_oop(r->reg2stack() * VMRegImpl::stack_slot_size);
879 }
880 }
881 check_codespace();
882 CHECK_BAILOUT();
884 s.next();
885 }
886 VerifyOops = v;
887 }
888 #endif
889 }