Wed, 21 May 2014 11:25:25 +0200
8031475: Missing oopmap in patching stubs
Summary: Add patch test for lir_checkcast in compute_oop_map
Reviewed-by: roland, twisti
1 /*
2 * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "c1/c1_Compilation.hpp"
27 #include "c1/c1_Instruction.hpp"
28 #include "c1/c1_InstructionPrinter.hpp"
29 #include "c1/c1_LIRAssembler.hpp"
30 #include "c1/c1_MacroAssembler.hpp"
31 #include "c1/c1_ValueStack.hpp"
32 #include "ci/ciInstance.hpp"
33 #ifdef TARGET_ARCH_x86
34 # include "nativeInst_x86.hpp"
35 # include "vmreg_x86.inline.hpp"
36 #endif
37 #ifdef TARGET_ARCH_sparc
38 # include "nativeInst_sparc.hpp"
39 # include "vmreg_sparc.inline.hpp"
40 #endif
41 #ifdef TARGET_ARCH_zero
42 # include "nativeInst_zero.hpp"
43 # include "vmreg_zero.inline.hpp"
44 #endif
45 #ifdef TARGET_ARCH_arm
46 # include "nativeInst_arm.hpp"
47 # include "vmreg_arm.inline.hpp"
48 #endif
49 #ifdef TARGET_ARCH_ppc
50 # include "nativeInst_ppc.hpp"
51 # include "vmreg_ppc.inline.hpp"
52 #endif
55 void LIR_Assembler::patching_epilog(PatchingStub* patch, LIR_PatchCode patch_code, Register obj, CodeEmitInfo* info) {
56 // we must have enough patching space so that call can be inserted
57 while ((intx) _masm->pc() - (intx) patch->pc_start() < NativeCall::instruction_size) {
58 _masm->nop();
59 }
60 patch->install(_masm, patch_code, obj, info);
61 append_code_stub(patch);
63 #ifdef ASSERT
64 Bytecodes::Code code = info->scope()->method()->java_code_at_bci(info->stack()->bci());
65 if (patch->id() == PatchingStub::access_field_id) {
66 switch (code) {
67 case Bytecodes::_putstatic:
68 case Bytecodes::_getstatic:
69 case Bytecodes::_putfield:
70 case Bytecodes::_getfield:
71 break;
72 default:
73 ShouldNotReachHere();
74 }
75 } else if (patch->id() == PatchingStub::load_klass_id) {
76 switch (code) {
77 case Bytecodes::_new:
78 case Bytecodes::_anewarray:
79 case Bytecodes::_multianewarray:
80 case Bytecodes::_instanceof:
81 case Bytecodes::_checkcast:
82 break;
83 default:
84 ShouldNotReachHere();
85 }
86 } else if (patch->id() == PatchingStub::load_mirror_id) {
87 switch (code) {
88 case Bytecodes::_putstatic:
89 case Bytecodes::_getstatic:
90 case Bytecodes::_ldc:
91 case Bytecodes::_ldc_w:
92 break;
93 default:
94 ShouldNotReachHere();
95 }
96 } else if (patch->id() == PatchingStub::load_appendix_id) {
97 Bytecodes::Code bc_raw = info->scope()->method()->raw_code_at_bci(info->stack()->bci());
98 assert(Bytecodes::has_optional_appendix(bc_raw), "unexpected appendix resolution");
99 } else {
100 ShouldNotReachHere();
101 }
102 #endif
103 }
105 PatchingStub::PatchID LIR_Assembler::patching_id(CodeEmitInfo* info) {
106 IRScope* scope = info->scope();
107 Bytecodes::Code bc_raw = scope->method()->raw_code_at_bci(info->stack()->bci());
108 if (Bytecodes::has_optional_appendix(bc_raw)) {
109 return PatchingStub::load_appendix_id;
110 }
111 return PatchingStub::load_mirror_id;
112 }
114 //---------------------------------------------------------------
117 LIR_Assembler::LIR_Assembler(Compilation* c):
118 _compilation(c)
119 , _masm(c->masm())
120 , _bs(Universe::heap()->barrier_set())
121 , _frame_map(c->frame_map())
122 , _current_block(NULL)
123 , _pending_non_safepoint(NULL)
124 , _pending_non_safepoint_offset(0)
125 {
126 _slow_case_stubs = new CodeStubList();
127 }
130 LIR_Assembler::~LIR_Assembler() {
131 }
134 void LIR_Assembler::check_codespace() {
135 CodeSection* cs = _masm->code_section();
136 if (cs->remaining() < (int)(NOT_LP64(1*K)LP64_ONLY(2*K))) {
137 BAILOUT("CodeBuffer overflow");
138 }
139 }
142 void LIR_Assembler::append_code_stub(CodeStub* stub) {
143 _slow_case_stubs->append(stub);
144 }
146 void LIR_Assembler::emit_stubs(CodeStubList* stub_list) {
147 for (int m = 0; m < stub_list->length(); m++) {
148 CodeStub* s = (*stub_list)[m];
150 check_codespace();
151 CHECK_BAILOUT();
153 #ifndef PRODUCT
154 if (CommentedAssembly) {
155 stringStream st;
156 s->print_name(&st);
157 st.print(" slow case");
158 _masm->block_comment(st.as_string());
159 }
160 #endif
161 s->emit_code(this);
162 #ifdef ASSERT
163 s->assert_no_unbound_labels();
164 #endif
165 }
166 }
169 void LIR_Assembler::emit_slow_case_stubs() {
170 emit_stubs(_slow_case_stubs);
171 }
174 bool LIR_Assembler::needs_icache(ciMethod* method) const {
175 return !method->is_static();
176 }
179 int LIR_Assembler::code_offset() const {
180 return _masm->offset();
181 }
184 address LIR_Assembler::pc() const {
185 return _masm->pc();
186 }
189 void LIR_Assembler::emit_exception_entries(ExceptionInfoList* info_list) {
190 for (int i = 0; i < info_list->length(); i++) {
191 XHandlers* handlers = info_list->at(i)->exception_handlers();
193 for (int j = 0; j < handlers->length(); j++) {
194 XHandler* handler = handlers->handler_at(j);
195 assert(handler->lir_op_id() != -1, "handler not processed by LinearScan");
196 assert(handler->entry_code() == NULL ||
197 handler->entry_code()->instructions_list()->last()->code() == lir_branch ||
198 handler->entry_code()->instructions_list()->last()->code() == lir_delay_slot, "last operation must be branch");
200 if (handler->entry_pco() == -1) {
201 // entry code not emitted yet
202 if (handler->entry_code() != NULL && handler->entry_code()->instructions_list()->length() > 1) {
203 handler->set_entry_pco(code_offset());
204 if (CommentedAssembly) {
205 _masm->block_comment("Exception adapter block");
206 }
207 emit_lir_list(handler->entry_code());
208 } else {
209 handler->set_entry_pco(handler->entry_block()->exception_handler_pco());
210 }
212 assert(handler->entry_pco() != -1, "must be set now");
213 }
214 }
215 }
216 }
219 void LIR_Assembler::emit_code(BlockList* hir) {
220 if (PrintLIR) {
221 print_LIR(hir);
222 }
224 int n = hir->length();
225 for (int i = 0; i < n; i++) {
226 emit_block(hir->at(i));
227 CHECK_BAILOUT();
228 }
230 flush_debug_info(code_offset());
232 DEBUG_ONLY(check_no_unbound_labels());
233 }
236 void LIR_Assembler::emit_block(BlockBegin* block) {
237 if (block->is_set(BlockBegin::backward_branch_target_flag)) {
238 align_backward_branch_target();
239 }
241 // if this block is the start of an exception handler, record the
242 // PC offset of the first instruction for later construction of
243 // the ExceptionHandlerTable
244 if (block->is_set(BlockBegin::exception_entry_flag)) {
245 block->set_exception_handler_pco(code_offset());
246 }
248 #ifndef PRODUCT
249 if (PrintLIRWithAssembly) {
250 // don't print Phi's
251 InstructionPrinter ip(false);
252 block->print(ip);
253 }
254 #endif /* PRODUCT */
256 assert(block->lir() != NULL, "must have LIR");
257 X86_ONLY(assert(_masm->rsp_offset() == 0, "frame size should be fixed"));
259 #ifndef PRODUCT
260 if (CommentedAssembly) {
261 stringStream st;
262 st.print_cr(" block B%d [%d, %d]", block->block_id(), block->bci(), block->end()->printable_bci());
263 _masm->block_comment(st.as_string());
264 }
265 #endif
267 emit_lir_list(block->lir());
269 X86_ONLY(assert(_masm->rsp_offset() == 0, "frame size should be fixed"));
270 }
273 void LIR_Assembler::emit_lir_list(LIR_List* list) {
274 peephole(list);
276 int n = list->length();
277 for (int i = 0; i < n; i++) {
278 LIR_Op* op = list->at(i);
280 check_codespace();
281 CHECK_BAILOUT();
283 #ifndef PRODUCT
284 if (CommentedAssembly) {
285 // Don't record out every op since that's too verbose. Print
286 // branches since they include block and stub names. Also print
287 // patching moves since they generate funny looking code.
288 if (op->code() == lir_branch ||
289 (op->code() == lir_move && op->as_Op1()->patch_code() != lir_patch_none)) {
290 stringStream st;
291 op->print_on(&st);
292 _masm->block_comment(st.as_string());
293 }
294 }
295 if (PrintLIRWithAssembly) {
296 // print out the LIR operation followed by the resulting assembly
297 list->at(i)->print(); tty->cr();
298 }
299 #endif /* PRODUCT */
301 op->emit_code(this);
303 if (compilation()->debug_info_recorder()->recording_non_safepoints()) {
304 process_debug_info(op);
305 }
307 #ifndef PRODUCT
308 if (PrintLIRWithAssembly) {
309 _masm->code()->decode();
310 }
311 #endif /* PRODUCT */
312 }
313 }
315 #ifdef ASSERT
316 void LIR_Assembler::check_no_unbound_labels() {
317 CHECK_BAILOUT();
319 for (int i = 0; i < _branch_target_blocks.length() - 1; i++) {
320 if (!_branch_target_blocks.at(i)->label()->is_bound()) {
321 tty->print_cr("label of block B%d is not bound", _branch_target_blocks.at(i)->block_id());
322 assert(false, "unbound label");
323 }
324 }
325 }
326 #endif
328 //----------------------------------debug info--------------------------------
331 void LIR_Assembler::add_debug_info_for_branch(CodeEmitInfo* info) {
332 _masm->code_section()->relocate(pc(), relocInfo::poll_type);
333 int pc_offset = code_offset();
334 flush_debug_info(pc_offset);
335 info->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
336 if (info->exception_handlers() != NULL) {
337 compilation()->add_exception_handlers_for_pco(pc_offset, info->exception_handlers());
338 }
339 }
342 void LIR_Assembler::add_call_info(int pc_offset, CodeEmitInfo* cinfo) {
343 flush_debug_info(pc_offset);
344 cinfo->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
345 if (cinfo->exception_handlers() != NULL) {
346 compilation()->add_exception_handlers_for_pco(pc_offset, cinfo->exception_handlers());
347 }
348 }
350 static ValueStack* debug_info(Instruction* ins) {
351 StateSplit* ss = ins->as_StateSplit();
352 if (ss != NULL) return ss->state();
353 return ins->state_before();
354 }
356 void LIR_Assembler::process_debug_info(LIR_Op* op) {
357 Instruction* src = op->source();
358 if (src == NULL) return;
359 int pc_offset = code_offset();
360 if (_pending_non_safepoint == src) {
361 _pending_non_safepoint_offset = pc_offset;
362 return;
363 }
364 ValueStack* vstack = debug_info(src);
365 if (vstack == NULL) return;
366 if (_pending_non_safepoint != NULL) {
367 // Got some old debug info. Get rid of it.
368 if (debug_info(_pending_non_safepoint) == vstack) {
369 _pending_non_safepoint_offset = pc_offset;
370 return;
371 }
372 if (_pending_non_safepoint_offset < pc_offset) {
373 record_non_safepoint_debug_info();
374 }
375 _pending_non_safepoint = NULL;
376 }
377 // Remember the debug info.
378 if (pc_offset > compilation()->debug_info_recorder()->last_pc_offset()) {
379 _pending_non_safepoint = src;
380 _pending_non_safepoint_offset = pc_offset;
381 }
382 }
384 // Index caller states in s, where 0 is the oldest, 1 its callee, etc.
385 // Return NULL if n is too large.
386 // Returns the caller_bci for the next-younger state, also.
387 static ValueStack* nth_oldest(ValueStack* s, int n, int& bci_result) {
388 ValueStack* t = s;
389 for (int i = 0; i < n; i++) {
390 if (t == NULL) break;
391 t = t->caller_state();
392 }
393 if (t == NULL) return NULL;
394 for (;;) {
395 ValueStack* tc = t->caller_state();
396 if (tc == NULL) return s;
397 t = tc;
398 bci_result = tc->bci();
399 s = s->caller_state();
400 }
401 }
403 void LIR_Assembler::record_non_safepoint_debug_info() {
404 int pc_offset = _pending_non_safepoint_offset;
405 ValueStack* vstack = debug_info(_pending_non_safepoint);
406 int bci = vstack->bci();
408 DebugInformationRecorder* debug_info = compilation()->debug_info_recorder();
409 assert(debug_info->recording_non_safepoints(), "sanity");
411 debug_info->add_non_safepoint(pc_offset);
413 // Visit scopes from oldest to youngest.
414 for (int n = 0; ; n++) {
415 int s_bci = bci;
416 ValueStack* s = nth_oldest(vstack, n, s_bci);
417 if (s == NULL) break;
418 IRScope* scope = s->scope();
419 //Always pass false for reexecute since these ScopeDescs are never used for deopt
420 debug_info->describe_scope(pc_offset, scope->method(), s->bci(), false/*reexecute*/);
421 }
423 debug_info->end_non_safepoint(pc_offset);
424 }
427 void LIR_Assembler::add_debug_info_for_null_check_here(CodeEmitInfo* cinfo) {
428 add_debug_info_for_null_check(code_offset(), cinfo);
429 }
431 void LIR_Assembler::add_debug_info_for_null_check(int pc_offset, CodeEmitInfo* cinfo) {
432 ImplicitNullCheckStub* stub = new ImplicitNullCheckStub(pc_offset, cinfo);
433 append_code_stub(stub);
434 }
436 void LIR_Assembler::add_debug_info_for_div0_here(CodeEmitInfo* info) {
437 add_debug_info_for_div0(code_offset(), info);
438 }
440 void LIR_Assembler::add_debug_info_for_div0(int pc_offset, CodeEmitInfo* cinfo) {
441 DivByZeroStub* stub = new DivByZeroStub(pc_offset, cinfo);
442 append_code_stub(stub);
443 }
445 void LIR_Assembler::emit_rtcall(LIR_OpRTCall* op) {
446 rt_call(op->result_opr(), op->addr(), op->arguments(), op->tmp(), op->info());
447 }
450 void LIR_Assembler::emit_call(LIR_OpJavaCall* op) {
451 verify_oop_map(op->info());
453 if (os::is_MP()) {
454 // must align calls sites, otherwise they can't be updated atomically on MP hardware
455 align_call(op->code());
456 }
458 // emit the static call stub stuff out of line
459 emit_static_call_stub();
461 switch (op->code()) {
462 case lir_static_call:
463 case lir_dynamic_call:
464 call(op, relocInfo::static_call_type);
465 break;
466 case lir_optvirtual_call:
467 call(op, relocInfo::opt_virtual_call_type);
468 break;
469 case lir_icvirtual_call:
470 ic_call(op);
471 break;
472 case lir_virtual_call:
473 vtable_call(op);
474 break;
475 default:
476 fatal(err_msg_res("unexpected op code: %s", op->name()));
477 break;
478 }
480 // JSR 292
481 // Record if this method has MethodHandle invokes.
482 if (op->is_method_handle_invoke()) {
483 compilation()->set_has_method_handle_invokes(true);
484 }
486 #if defined(X86) && defined(TIERED)
487 // C2 leave fpu stack dirty clean it
488 if (UseSSE < 2) {
489 int i;
490 for ( i = 1; i <= 7 ; i++ ) {
491 ffree(i);
492 }
493 if (!op->result_opr()->is_float_kind()) {
494 ffree(0);
495 }
496 }
497 #endif // X86 && TIERED
498 }
501 void LIR_Assembler::emit_opLabel(LIR_OpLabel* op) {
502 _masm->bind (*(op->label()));
503 }
506 void LIR_Assembler::emit_op1(LIR_Op1* op) {
507 switch (op->code()) {
508 case lir_move:
509 if (op->move_kind() == lir_move_volatile) {
510 assert(op->patch_code() == lir_patch_none, "can't patch volatiles");
511 volatile_move_op(op->in_opr(), op->result_opr(), op->type(), op->info());
512 } else {
513 move_op(op->in_opr(), op->result_opr(), op->type(),
514 op->patch_code(), op->info(), op->pop_fpu_stack(),
515 op->move_kind() == lir_move_unaligned,
516 op->move_kind() == lir_move_wide);
517 }
518 break;
520 case lir_prefetchr:
521 prefetchr(op->in_opr());
522 break;
524 case lir_prefetchw:
525 prefetchw(op->in_opr());
526 break;
528 case lir_roundfp: {
529 LIR_OpRoundFP* round_op = op->as_OpRoundFP();
530 roundfp_op(round_op->in_opr(), round_op->tmp(), round_op->result_opr(), round_op->pop_fpu_stack());
531 break;
532 }
534 case lir_return:
535 return_op(op->in_opr());
536 break;
538 case lir_safepoint:
539 if (compilation()->debug_info_recorder()->last_pc_offset() == code_offset()) {
540 _masm->nop();
541 }
542 safepoint_poll(op->in_opr(), op->info());
543 break;
545 case lir_fxch:
546 fxch(op->in_opr()->as_jint());
547 break;
549 case lir_fld:
550 fld(op->in_opr()->as_jint());
551 break;
553 case lir_ffree:
554 ffree(op->in_opr()->as_jint());
555 break;
557 case lir_branch:
558 break;
560 case lir_push:
561 push(op->in_opr());
562 break;
564 case lir_pop:
565 pop(op->in_opr());
566 break;
568 case lir_neg:
569 negate(op->in_opr(), op->result_opr());
570 break;
572 case lir_leal:
573 leal(op->in_opr(), op->result_opr());
574 break;
576 case lir_null_check:
577 if (GenerateCompilerNullChecks) {
578 add_debug_info_for_null_check_here(op->info());
580 if (op->in_opr()->is_single_cpu()) {
581 _masm->null_check(op->in_opr()->as_register());
582 } else {
583 Unimplemented();
584 }
585 }
586 break;
588 case lir_monaddr:
589 monitor_address(op->in_opr()->as_constant_ptr()->as_jint(), op->result_opr());
590 break;
592 #ifdef SPARC
593 case lir_pack64:
594 pack64(op->in_opr(), op->result_opr());
595 break;
597 case lir_unpack64:
598 unpack64(op->in_opr(), op->result_opr());
599 break;
600 #endif
602 case lir_unwind:
603 unwind_op(op->in_opr());
604 break;
606 default:
607 Unimplemented();
608 break;
609 }
610 }
613 void LIR_Assembler::emit_op0(LIR_Op0* op) {
614 switch (op->code()) {
615 case lir_word_align: {
616 while (code_offset() % BytesPerWord != 0) {
617 _masm->nop();
618 }
619 break;
620 }
622 case lir_nop:
623 assert(op->info() == NULL, "not supported");
624 _masm->nop();
625 break;
627 case lir_label:
628 Unimplemented();
629 break;
631 case lir_build_frame:
632 build_frame();
633 break;
635 case lir_std_entry:
636 // init offsets
637 offsets()->set_value(CodeOffsets::OSR_Entry, _masm->offset());
638 _masm->align(CodeEntryAlignment);
639 if (needs_icache(compilation()->method())) {
640 check_icache();
641 }
642 offsets()->set_value(CodeOffsets::Verified_Entry, _masm->offset());
643 _masm->verified_entry();
644 build_frame();
645 offsets()->set_value(CodeOffsets::Frame_Complete, _masm->offset());
646 break;
648 case lir_osr_entry:
649 offsets()->set_value(CodeOffsets::OSR_Entry, _masm->offset());
650 osr_entry();
651 break;
653 case lir_24bit_FPU:
654 set_24bit_FPU();
655 break;
657 case lir_reset_FPU:
658 reset_FPU();
659 break;
661 case lir_breakpoint:
662 breakpoint();
663 break;
665 case lir_fpop_raw:
666 fpop();
667 break;
669 case lir_membar:
670 membar();
671 break;
673 case lir_membar_acquire:
674 membar_acquire();
675 break;
677 case lir_membar_release:
678 membar_release();
679 break;
681 case lir_membar_loadload:
682 membar_loadload();
683 break;
685 case lir_membar_storestore:
686 membar_storestore();
687 break;
689 case lir_membar_loadstore:
690 membar_loadstore();
691 break;
693 case lir_membar_storeload:
694 membar_storeload();
695 break;
697 case lir_get_thread:
698 get_thread(op->result_opr());
699 break;
701 default:
702 ShouldNotReachHere();
703 break;
704 }
705 }
708 void LIR_Assembler::emit_op2(LIR_Op2* op) {
709 switch (op->code()) {
710 case lir_cmp:
711 if (op->info() != NULL) {
712 assert(op->in_opr1()->is_address() || op->in_opr2()->is_address(),
713 "shouldn't be codeemitinfo for non-address operands");
714 add_debug_info_for_null_check_here(op->info()); // exception possible
715 }
716 comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op);
717 break;
719 case lir_cmp_l2i:
720 case lir_cmp_fd2i:
721 case lir_ucmp_fd2i:
722 comp_fl2i(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op);
723 break;
725 case lir_cmove:
726 cmove(op->condition(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->type());
727 break;
729 case lir_shl:
730 case lir_shr:
731 case lir_ushr:
732 if (op->in_opr2()->is_constant()) {
733 shift_op(op->code(), op->in_opr1(), op->in_opr2()->as_constant_ptr()->as_jint(), op->result_opr());
734 } else {
735 shift_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->tmp1_opr());
736 }
737 break;
739 case lir_add:
740 case lir_sub:
741 case lir_mul:
742 case lir_mul_strictfp:
743 case lir_div:
744 case lir_div_strictfp:
745 case lir_rem:
746 assert(op->fpu_pop_count() < 2, "");
747 arith_op(
748 op->code(),
749 op->in_opr1(),
750 op->in_opr2(),
751 op->result_opr(),
752 op->info(),
753 op->fpu_pop_count() == 1);
754 break;
756 case lir_abs:
757 case lir_sqrt:
758 case lir_sin:
759 case lir_tan:
760 case lir_cos:
761 case lir_log:
762 case lir_log10:
763 case lir_exp:
764 case lir_pow:
765 intrinsic_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op);
766 break;
768 case lir_logic_and:
769 case lir_logic_or:
770 case lir_logic_xor:
771 logic_op(
772 op->code(),
773 op->in_opr1(),
774 op->in_opr2(),
775 op->result_opr());
776 break;
778 case lir_throw:
779 throw_op(op->in_opr1(), op->in_opr2(), op->info());
780 break;
782 case lir_xadd:
783 case lir_xchg:
784 atomic_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->tmp1_opr());
785 break;
787 default:
788 Unimplemented();
789 break;
790 }
791 }
794 void LIR_Assembler::build_frame() {
795 _masm->build_frame(initial_frame_size_in_bytes());
796 }
799 void LIR_Assembler::roundfp_op(LIR_Opr src, LIR_Opr tmp, LIR_Opr dest, bool pop_fpu_stack) {
800 assert((src->is_single_fpu() && dest->is_single_stack()) ||
801 (src->is_double_fpu() && dest->is_double_stack()),
802 "round_fp: rounds register -> stack location");
804 reg2stack (src, dest, src->type(), pop_fpu_stack);
805 }
808 void LIR_Assembler::move_op(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool unaligned, bool wide) {
809 if (src->is_register()) {
810 if (dest->is_register()) {
811 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
812 reg2reg(src, dest);
813 } else if (dest->is_stack()) {
814 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
815 reg2stack(src, dest, type, pop_fpu_stack);
816 } else if (dest->is_address()) {
817 reg2mem(src, dest, type, patch_code, info, pop_fpu_stack, wide, unaligned);
818 } else {
819 ShouldNotReachHere();
820 }
822 } else if (src->is_stack()) {
823 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
824 if (dest->is_register()) {
825 stack2reg(src, dest, type);
826 } else if (dest->is_stack()) {
827 stack2stack(src, dest, type);
828 } else {
829 ShouldNotReachHere();
830 }
832 } else if (src->is_constant()) {
833 if (dest->is_register()) {
834 const2reg(src, dest, patch_code, info); // patching is possible
835 } else if (dest->is_stack()) {
836 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
837 const2stack(src, dest);
838 } else if (dest->is_address()) {
839 assert(patch_code == lir_patch_none, "no patching allowed here");
840 const2mem(src, dest, type, info, wide);
841 } else {
842 ShouldNotReachHere();
843 }
845 } else if (src->is_address()) {
846 mem2reg(src, dest, type, patch_code, info, wide, unaligned);
848 } else {
849 ShouldNotReachHere();
850 }
851 }
854 void LIR_Assembler::verify_oop_map(CodeEmitInfo* info) {
855 #ifndef PRODUCT
856 if (VerifyOops) {
857 OopMapStream s(info->oop_map());
858 while (!s.is_done()) {
859 OopMapValue v = s.current();
860 if (v.is_oop()) {
861 VMReg r = v.reg();
862 if (!r->is_stack()) {
863 stringStream st;
864 st.print("bad oop %s at %d", r->as_Register()->name(), _masm->offset());
865 #ifdef SPARC
866 _masm->_verify_oop(r->as_Register(), strdup(st.as_string()), __FILE__, __LINE__);
867 #else
868 _masm->verify_oop(r->as_Register());
869 #endif
870 } else {
871 _masm->verify_stack_oop(r->reg2stack() * VMRegImpl::stack_slot_size);
872 }
873 }
874 check_codespace();
875 CHECK_BAILOUT();
877 s.next();
878 }
879 }
880 #endif
881 }