Mon, 04 Apr 2011 19:03:35 -0700
6528013: C1 CTW failure with -XX:+VerifyOops assert(allocates2(pc),"")
Reviewed-by: kvn, iveresov
1 /*
2 * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "c1/c1_Compilation.hpp"
27 #include "c1/c1_Instruction.hpp"
28 #include "c1/c1_InstructionPrinter.hpp"
29 #include "c1/c1_LIRAssembler.hpp"
30 #include "c1/c1_MacroAssembler.hpp"
31 #include "c1/c1_ValueStack.hpp"
32 #include "ci/ciInstance.hpp"
33 #ifdef TARGET_ARCH_x86
34 # include "nativeInst_x86.hpp"
35 # include "vmreg_x86.inline.hpp"
36 #endif
37 #ifdef TARGET_ARCH_sparc
38 # include "nativeInst_sparc.hpp"
39 # include "vmreg_sparc.inline.hpp"
40 #endif
41 #ifdef TARGET_ARCH_zero
42 # include "nativeInst_zero.hpp"
43 # include "vmreg_zero.inline.hpp"
44 #endif
45 #ifdef TARGET_ARCH_arm
46 # include "nativeInst_arm.hpp"
47 # include "vmreg_arm.inline.hpp"
48 #endif
49 #ifdef TARGET_ARCH_ppc
50 # include "nativeInst_ppc.hpp"
51 # include "vmreg_ppc.inline.hpp"
52 #endif
55 void LIR_Assembler::patching_epilog(PatchingStub* patch, LIR_PatchCode patch_code, Register obj, CodeEmitInfo* info) {
56 // we must have enough patching space so that call can be inserted
57 while ((intx) _masm->pc() - (intx) patch->pc_start() < NativeCall::instruction_size) {
58 _masm->nop();
59 }
60 patch->install(_masm, patch_code, obj, info);
61 append_patching_stub(patch);
63 #ifdef ASSERT
64 Bytecodes::Code code = info->scope()->method()->java_code_at_bci(info->stack()->bci());
65 if (patch->id() == PatchingStub::access_field_id) {
66 switch (code) {
67 case Bytecodes::_putstatic:
68 case Bytecodes::_getstatic:
69 case Bytecodes::_putfield:
70 case Bytecodes::_getfield:
71 break;
72 default:
73 ShouldNotReachHere();
74 }
75 } else if (patch->id() == PatchingStub::load_klass_id) {
76 switch (code) {
77 case Bytecodes::_putstatic:
78 case Bytecodes::_getstatic:
79 case Bytecodes::_new:
80 case Bytecodes::_anewarray:
81 case Bytecodes::_multianewarray:
82 case Bytecodes::_instanceof:
83 case Bytecodes::_checkcast:
84 case Bytecodes::_ldc:
85 case Bytecodes::_ldc_w:
86 break;
87 default:
88 ShouldNotReachHere();
89 }
90 } else {
91 ShouldNotReachHere();
92 }
93 #endif
94 }
97 //---------------------------------------------------------------
100 LIR_Assembler::LIR_Assembler(Compilation* c):
101 _compilation(c)
102 , _masm(c->masm())
103 , _bs(Universe::heap()->barrier_set())
104 , _frame_map(c->frame_map())
105 , _current_block(NULL)
106 , _pending_non_safepoint(NULL)
107 , _pending_non_safepoint_offset(0)
108 {
109 _slow_case_stubs = new CodeStubList();
110 }
113 LIR_Assembler::~LIR_Assembler() {
114 }
117 void LIR_Assembler::append_patching_stub(PatchingStub* stub) {
118 _slow_case_stubs->append(stub);
119 }
122 void LIR_Assembler::check_codespace() {
123 CodeSection* cs = _masm->code_section();
124 if (cs->remaining() < (int)(1*K)) {
125 BAILOUT("CodeBuffer overflow");
126 }
127 }
130 void LIR_Assembler::emit_code_stub(CodeStub* stub) {
131 _slow_case_stubs->append(stub);
132 }
134 void LIR_Assembler::emit_stubs(CodeStubList* stub_list) {
135 for (int m = 0; m < stub_list->length(); m++) {
136 CodeStub* s = (*stub_list)[m];
138 check_codespace();
139 CHECK_BAILOUT();
141 #ifndef PRODUCT
142 if (CommentedAssembly) {
143 stringStream st;
144 s->print_name(&st);
145 st.print(" slow case");
146 _masm->block_comment(st.as_string());
147 }
148 #endif
149 s->emit_code(this);
150 #ifdef ASSERT
151 s->assert_no_unbound_labels();
152 #endif
153 }
154 }
157 void LIR_Assembler::emit_slow_case_stubs() {
158 emit_stubs(_slow_case_stubs);
159 }
162 bool LIR_Assembler::needs_icache(ciMethod* method) const {
163 return !method->is_static();
164 }
167 int LIR_Assembler::code_offset() const {
168 return _masm->offset();
169 }
172 address LIR_Assembler::pc() const {
173 return _masm->pc();
174 }
177 void LIR_Assembler::emit_exception_entries(ExceptionInfoList* info_list) {
178 for (int i = 0; i < info_list->length(); i++) {
179 XHandlers* handlers = info_list->at(i)->exception_handlers();
181 for (int j = 0; j < handlers->length(); j++) {
182 XHandler* handler = handlers->handler_at(j);
183 assert(handler->lir_op_id() != -1, "handler not processed by LinearScan");
184 assert(handler->entry_code() == NULL ||
185 handler->entry_code()->instructions_list()->last()->code() == lir_branch ||
186 handler->entry_code()->instructions_list()->last()->code() == lir_delay_slot, "last operation must be branch");
188 if (handler->entry_pco() == -1) {
189 // entry code not emitted yet
190 if (handler->entry_code() != NULL && handler->entry_code()->instructions_list()->length() > 1) {
191 handler->set_entry_pco(code_offset());
192 if (CommentedAssembly) {
193 _masm->block_comment("Exception adapter block");
194 }
195 emit_lir_list(handler->entry_code());
196 } else {
197 handler->set_entry_pco(handler->entry_block()->exception_handler_pco());
198 }
200 assert(handler->entry_pco() != -1, "must be set now");
201 }
202 }
203 }
204 }
207 void LIR_Assembler::emit_code(BlockList* hir) {
208 if (PrintLIR) {
209 print_LIR(hir);
210 }
212 int n = hir->length();
213 for (int i = 0; i < n; i++) {
214 emit_block(hir->at(i));
215 CHECK_BAILOUT();
216 }
218 flush_debug_info(code_offset());
220 DEBUG_ONLY(check_no_unbound_labels());
221 }
224 void LIR_Assembler::emit_block(BlockBegin* block) {
225 if (block->is_set(BlockBegin::backward_branch_target_flag)) {
226 align_backward_branch_target();
227 }
229 // if this block is the start of an exception handler, record the
230 // PC offset of the first instruction for later construction of
231 // the ExceptionHandlerTable
232 if (block->is_set(BlockBegin::exception_entry_flag)) {
233 block->set_exception_handler_pco(code_offset());
234 }
236 #ifndef PRODUCT
237 if (PrintLIRWithAssembly) {
238 // don't print Phi's
239 InstructionPrinter ip(false);
240 block->print(ip);
241 }
242 #endif /* PRODUCT */
244 assert(block->lir() != NULL, "must have LIR");
245 X86_ONLY(assert(_masm->rsp_offset() == 0, "frame size should be fixed"));
247 #ifndef PRODUCT
248 if (CommentedAssembly) {
249 stringStream st;
250 st.print_cr(" block B%d [%d, %d]", block->block_id(), block->bci(), block->end()->printable_bci());
251 _masm->block_comment(st.as_string());
252 }
253 #endif
255 emit_lir_list(block->lir());
257 X86_ONLY(assert(_masm->rsp_offset() == 0, "frame size should be fixed"));
258 }
261 void LIR_Assembler::emit_lir_list(LIR_List* list) {
262 peephole(list);
264 int n = list->length();
265 for (int i = 0; i < n; i++) {
266 LIR_Op* op = list->at(i);
268 check_codespace();
269 CHECK_BAILOUT();
271 #ifndef PRODUCT
272 if (CommentedAssembly) {
273 // Don't record out every op since that's too verbose. Print
274 // branches since they include block and stub names. Also print
275 // patching moves since they generate funny looking code.
276 if (op->code() == lir_branch ||
277 (op->code() == lir_move && op->as_Op1()->patch_code() != lir_patch_none)) {
278 stringStream st;
279 op->print_on(&st);
280 _masm->block_comment(st.as_string());
281 }
282 }
283 if (PrintLIRWithAssembly) {
284 // print out the LIR operation followed by the resulting assembly
285 list->at(i)->print(); tty->cr();
286 }
287 #endif /* PRODUCT */
289 op->emit_code(this);
291 if (compilation()->debug_info_recorder()->recording_non_safepoints()) {
292 process_debug_info(op);
293 }
295 #ifndef PRODUCT
296 if (PrintLIRWithAssembly) {
297 _masm->code()->decode();
298 }
299 #endif /* PRODUCT */
300 }
301 }
303 #ifdef ASSERT
304 void LIR_Assembler::check_no_unbound_labels() {
305 CHECK_BAILOUT();
307 for (int i = 0; i < _branch_target_blocks.length() - 1; i++) {
308 if (!_branch_target_blocks.at(i)->label()->is_bound()) {
309 tty->print_cr("label of block B%d is not bound", _branch_target_blocks.at(i)->block_id());
310 assert(false, "unbound label");
311 }
312 }
313 }
314 #endif
316 //----------------------------------debug info--------------------------------
319 void LIR_Assembler::add_debug_info_for_branch(CodeEmitInfo* info) {
320 _masm->code_section()->relocate(pc(), relocInfo::poll_type);
321 int pc_offset = code_offset();
322 flush_debug_info(pc_offset);
323 info->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
324 if (info->exception_handlers() != NULL) {
325 compilation()->add_exception_handlers_for_pco(pc_offset, info->exception_handlers());
326 }
327 }
330 void LIR_Assembler::add_call_info(int pc_offset, CodeEmitInfo* cinfo) {
331 flush_debug_info(pc_offset);
332 cinfo->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
333 if (cinfo->exception_handlers() != NULL) {
334 compilation()->add_exception_handlers_for_pco(pc_offset, cinfo->exception_handlers());
335 }
336 }
338 static ValueStack* debug_info(Instruction* ins) {
339 StateSplit* ss = ins->as_StateSplit();
340 if (ss != NULL) return ss->state();
341 return ins->state_before();
342 }
344 void LIR_Assembler::process_debug_info(LIR_Op* op) {
345 Instruction* src = op->source();
346 if (src == NULL) return;
347 int pc_offset = code_offset();
348 if (_pending_non_safepoint == src) {
349 _pending_non_safepoint_offset = pc_offset;
350 return;
351 }
352 ValueStack* vstack = debug_info(src);
353 if (vstack == NULL) return;
354 if (_pending_non_safepoint != NULL) {
355 // Got some old debug info. Get rid of it.
356 if (debug_info(_pending_non_safepoint) == vstack) {
357 _pending_non_safepoint_offset = pc_offset;
358 return;
359 }
360 if (_pending_non_safepoint_offset < pc_offset) {
361 record_non_safepoint_debug_info();
362 }
363 _pending_non_safepoint = NULL;
364 }
365 // Remember the debug info.
366 if (pc_offset > compilation()->debug_info_recorder()->last_pc_offset()) {
367 _pending_non_safepoint = src;
368 _pending_non_safepoint_offset = pc_offset;
369 }
370 }
372 // Index caller states in s, where 0 is the oldest, 1 its callee, etc.
373 // Return NULL if n is too large.
374 // Returns the caller_bci for the next-younger state, also.
375 static ValueStack* nth_oldest(ValueStack* s, int n, int& bci_result) {
376 ValueStack* t = s;
377 for (int i = 0; i < n; i++) {
378 if (t == NULL) break;
379 t = t->caller_state();
380 }
381 if (t == NULL) return NULL;
382 for (;;) {
383 ValueStack* tc = t->caller_state();
384 if (tc == NULL) return s;
385 t = tc;
386 bci_result = tc->bci();
387 s = s->caller_state();
388 }
389 }
391 void LIR_Assembler::record_non_safepoint_debug_info() {
392 int pc_offset = _pending_non_safepoint_offset;
393 ValueStack* vstack = debug_info(_pending_non_safepoint);
394 int bci = vstack->bci();
396 DebugInformationRecorder* debug_info = compilation()->debug_info_recorder();
397 assert(debug_info->recording_non_safepoints(), "sanity");
399 debug_info->add_non_safepoint(pc_offset);
401 // Visit scopes from oldest to youngest.
402 for (int n = 0; ; n++) {
403 int s_bci = bci;
404 ValueStack* s = nth_oldest(vstack, n, s_bci);
405 if (s == NULL) break;
406 IRScope* scope = s->scope();
407 //Always pass false for reexecute since these ScopeDescs are never used for deopt
408 debug_info->describe_scope(pc_offset, scope->method(), s->bci(), false/*reexecute*/);
409 }
411 debug_info->end_non_safepoint(pc_offset);
412 }
415 void LIR_Assembler::add_debug_info_for_null_check_here(CodeEmitInfo* cinfo) {
416 add_debug_info_for_null_check(code_offset(), cinfo);
417 }
419 void LIR_Assembler::add_debug_info_for_null_check(int pc_offset, CodeEmitInfo* cinfo) {
420 ImplicitNullCheckStub* stub = new ImplicitNullCheckStub(pc_offset, cinfo);
421 emit_code_stub(stub);
422 }
424 void LIR_Assembler::add_debug_info_for_div0_here(CodeEmitInfo* info) {
425 add_debug_info_for_div0(code_offset(), info);
426 }
428 void LIR_Assembler::add_debug_info_for_div0(int pc_offset, CodeEmitInfo* cinfo) {
429 DivByZeroStub* stub = new DivByZeroStub(pc_offset, cinfo);
430 emit_code_stub(stub);
431 }
433 void LIR_Assembler::emit_rtcall(LIR_OpRTCall* op) {
434 rt_call(op->result_opr(), op->addr(), op->arguments(), op->tmp(), op->info());
435 }
438 void LIR_Assembler::emit_call(LIR_OpJavaCall* op) {
439 verify_oop_map(op->info());
441 if (os::is_MP()) {
442 // must align calls sites, otherwise they can't be updated atomically on MP hardware
443 align_call(op->code());
444 }
446 // emit the static call stub stuff out of line
447 emit_static_call_stub();
449 switch (op->code()) {
450 case lir_static_call:
451 call(op, relocInfo::static_call_type);
452 break;
453 case lir_optvirtual_call:
454 case lir_dynamic_call:
455 call(op, relocInfo::opt_virtual_call_type);
456 break;
457 case lir_icvirtual_call:
458 ic_call(op);
459 break;
460 case lir_virtual_call:
461 vtable_call(op);
462 break;
463 default: ShouldNotReachHere();
464 }
466 // JSR 292
467 // Record if this method has MethodHandle invokes.
468 if (op->is_method_handle_invoke()) {
469 compilation()->set_has_method_handle_invokes(true);
470 }
472 #if defined(X86) && defined(TIERED)
473 // C2 leave fpu stack dirty clean it
474 if (UseSSE < 2) {
475 int i;
476 for ( i = 1; i <= 7 ; i++ ) {
477 ffree(i);
478 }
479 if (!op->result_opr()->is_float_kind()) {
480 ffree(0);
481 }
482 }
483 #endif // X86 && TIERED
484 }
487 void LIR_Assembler::emit_opLabel(LIR_OpLabel* op) {
488 _masm->bind (*(op->label()));
489 }
492 void LIR_Assembler::emit_op1(LIR_Op1* op) {
493 switch (op->code()) {
494 case lir_move:
495 if (op->move_kind() == lir_move_volatile) {
496 assert(op->patch_code() == lir_patch_none, "can't patch volatiles");
497 volatile_move_op(op->in_opr(), op->result_opr(), op->type(), op->info());
498 } else {
499 move_op(op->in_opr(), op->result_opr(), op->type(),
500 op->patch_code(), op->info(), op->pop_fpu_stack(),
501 op->move_kind() == lir_move_unaligned,
502 op->move_kind() == lir_move_wide);
503 }
504 break;
506 case lir_prefetchr:
507 prefetchr(op->in_opr());
508 break;
510 case lir_prefetchw:
511 prefetchw(op->in_opr());
512 break;
514 case lir_roundfp: {
515 LIR_OpRoundFP* round_op = op->as_OpRoundFP();
516 roundfp_op(round_op->in_opr(), round_op->tmp(), round_op->result_opr(), round_op->pop_fpu_stack());
517 break;
518 }
520 case lir_return:
521 return_op(op->in_opr());
522 break;
524 case lir_safepoint:
525 if (compilation()->debug_info_recorder()->last_pc_offset() == code_offset()) {
526 _masm->nop();
527 }
528 safepoint_poll(op->in_opr(), op->info());
529 break;
531 case lir_fxch:
532 fxch(op->in_opr()->as_jint());
533 break;
535 case lir_fld:
536 fld(op->in_opr()->as_jint());
537 break;
539 case lir_ffree:
540 ffree(op->in_opr()->as_jint());
541 break;
543 case lir_branch:
544 break;
546 case lir_push:
547 push(op->in_opr());
548 break;
550 case lir_pop:
551 pop(op->in_opr());
552 break;
554 case lir_neg:
555 negate(op->in_opr(), op->result_opr());
556 break;
558 case lir_leal:
559 leal(op->in_opr(), op->result_opr());
560 break;
562 case lir_null_check:
563 if (GenerateCompilerNullChecks) {
564 add_debug_info_for_null_check_here(op->info());
566 if (op->in_opr()->is_single_cpu()) {
567 _masm->null_check(op->in_opr()->as_register());
568 } else {
569 Unimplemented();
570 }
571 }
572 break;
574 case lir_monaddr:
575 monitor_address(op->in_opr()->as_constant_ptr()->as_jint(), op->result_opr());
576 break;
578 #ifdef SPARC
579 case lir_pack64:
580 pack64(op->in_opr(), op->result_opr());
581 break;
583 case lir_unpack64:
584 unpack64(op->in_opr(), op->result_opr());
585 break;
586 #endif
588 case lir_unwind:
589 unwind_op(op->in_opr());
590 break;
592 default:
593 Unimplemented();
594 break;
595 }
596 }
599 void LIR_Assembler::emit_op0(LIR_Op0* op) {
600 switch (op->code()) {
601 case lir_word_align: {
602 while (code_offset() % BytesPerWord != 0) {
603 _masm->nop();
604 }
605 break;
606 }
608 case lir_nop:
609 assert(op->info() == NULL, "not supported");
610 _masm->nop();
611 break;
613 case lir_label:
614 Unimplemented();
615 break;
617 case lir_build_frame:
618 build_frame();
619 break;
621 case lir_std_entry:
622 // init offsets
623 offsets()->set_value(CodeOffsets::OSR_Entry, _masm->offset());
624 _masm->align(CodeEntryAlignment);
625 if (needs_icache(compilation()->method())) {
626 check_icache();
627 }
628 offsets()->set_value(CodeOffsets::Verified_Entry, _masm->offset());
629 _masm->verified_entry();
630 build_frame();
631 offsets()->set_value(CodeOffsets::Frame_Complete, _masm->offset());
632 break;
634 case lir_osr_entry:
635 offsets()->set_value(CodeOffsets::OSR_Entry, _masm->offset());
636 osr_entry();
637 break;
639 case lir_24bit_FPU:
640 set_24bit_FPU();
641 break;
643 case lir_reset_FPU:
644 reset_FPU();
645 break;
647 case lir_breakpoint:
648 breakpoint();
649 break;
651 case lir_fpop_raw:
652 fpop();
653 break;
655 case lir_membar:
656 membar();
657 break;
659 case lir_membar_acquire:
660 membar_acquire();
661 break;
663 case lir_membar_release:
664 membar_release();
665 break;
667 case lir_get_thread:
668 get_thread(op->result_opr());
669 break;
671 default:
672 ShouldNotReachHere();
673 break;
674 }
675 }
678 void LIR_Assembler::emit_op2(LIR_Op2* op) {
679 switch (op->code()) {
680 case lir_cmp:
681 if (op->info() != NULL) {
682 assert(op->in_opr1()->is_address() || op->in_opr2()->is_address(),
683 "shouldn't be codeemitinfo for non-address operands");
684 add_debug_info_for_null_check_here(op->info()); // exception possible
685 }
686 comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op);
687 break;
689 case lir_cmp_l2i:
690 case lir_cmp_fd2i:
691 case lir_ucmp_fd2i:
692 comp_fl2i(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op);
693 break;
695 case lir_cmove:
696 cmove(op->condition(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->type());
697 break;
699 case lir_shl:
700 case lir_shr:
701 case lir_ushr:
702 if (op->in_opr2()->is_constant()) {
703 shift_op(op->code(), op->in_opr1(), op->in_opr2()->as_constant_ptr()->as_jint(), op->result_opr());
704 } else {
705 shift_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->tmp_opr());
706 }
707 break;
709 case lir_add:
710 case lir_sub:
711 case lir_mul:
712 case lir_mul_strictfp:
713 case lir_div:
714 case lir_div_strictfp:
715 case lir_rem:
716 assert(op->fpu_pop_count() < 2, "");
717 arith_op(
718 op->code(),
719 op->in_opr1(),
720 op->in_opr2(),
721 op->result_opr(),
722 op->info(),
723 op->fpu_pop_count() == 1);
724 break;
726 case lir_abs:
727 case lir_sqrt:
728 case lir_sin:
729 case lir_tan:
730 case lir_cos:
731 case lir_log:
732 case lir_log10:
733 intrinsic_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op);
734 break;
736 case lir_logic_and:
737 case lir_logic_or:
738 case lir_logic_xor:
739 logic_op(
740 op->code(),
741 op->in_opr1(),
742 op->in_opr2(),
743 op->result_opr());
744 break;
746 case lir_throw:
747 throw_op(op->in_opr1(), op->in_opr2(), op->info());
748 break;
750 default:
751 Unimplemented();
752 break;
753 }
754 }
757 void LIR_Assembler::build_frame() {
758 _masm->build_frame(initial_frame_size_in_bytes());
759 }
762 void LIR_Assembler::roundfp_op(LIR_Opr src, LIR_Opr tmp, LIR_Opr dest, bool pop_fpu_stack) {
763 assert((src->is_single_fpu() && dest->is_single_stack()) ||
764 (src->is_double_fpu() && dest->is_double_stack()),
765 "round_fp: rounds register -> stack location");
767 reg2stack (src, dest, src->type(), pop_fpu_stack);
768 }
771 void LIR_Assembler::move_op(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool unaligned, bool wide) {
772 if (src->is_register()) {
773 if (dest->is_register()) {
774 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
775 reg2reg(src, dest);
776 } else if (dest->is_stack()) {
777 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
778 reg2stack(src, dest, type, pop_fpu_stack);
779 } else if (dest->is_address()) {
780 reg2mem(src, dest, type, patch_code, info, pop_fpu_stack, wide, unaligned);
781 } else {
782 ShouldNotReachHere();
783 }
785 } else if (src->is_stack()) {
786 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
787 if (dest->is_register()) {
788 stack2reg(src, dest, type);
789 } else if (dest->is_stack()) {
790 stack2stack(src, dest, type);
791 } else {
792 ShouldNotReachHere();
793 }
795 } else if (src->is_constant()) {
796 if (dest->is_register()) {
797 const2reg(src, dest, patch_code, info); // patching is possible
798 } else if (dest->is_stack()) {
799 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
800 const2stack(src, dest);
801 } else if (dest->is_address()) {
802 assert(patch_code == lir_patch_none, "no patching allowed here");
803 const2mem(src, dest, type, info, wide);
804 } else {
805 ShouldNotReachHere();
806 }
808 } else if (src->is_address()) {
809 mem2reg(src, dest, type, patch_code, info, wide, unaligned);
811 } else {
812 ShouldNotReachHere();
813 }
814 }
817 void LIR_Assembler::verify_oop_map(CodeEmitInfo* info) {
818 #ifndef PRODUCT
819 if (VerifyOopMaps || VerifyOops) {
820 bool v = VerifyOops;
821 VerifyOops = true;
822 OopMapStream s(info->oop_map());
823 while (!s.is_done()) {
824 OopMapValue v = s.current();
825 if (v.is_oop()) {
826 VMReg r = v.reg();
827 if (!r->is_stack()) {
828 stringStream st;
829 st.print("bad oop %s at %d", r->as_Register()->name(), _masm->offset());
830 #ifdef SPARC
831 _masm->_verify_oop(r->as_Register(), strdup(st.as_string()), __FILE__, __LINE__);
832 #else
833 _masm->verify_oop(r->as_Register());
834 #endif
835 } else {
836 _masm->verify_stack_oop(r->reg2stack() * VMRegImpl::stack_slot_size);
837 }
838 }
839 check_codespace();
840 CHECK_BAILOUT();
842 s.next();
843 }
844 VerifyOops = v;
845 }
846 #endif
847 }