Tue, 29 Dec 2009 19:08:54 +0100
6986046: C1 valuestack cleanup
Summary: fixes an historical oddity in C1 with inlining where all of the expression stacks are kept in the topmost ValueStack instead of being in their respective ValueStacks.
Reviewed-by: never
Contributed-by: Christian Wimmer <cwimmer@uci.edu>
1 /*
2 * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 # include "incls/_precompiled.incl"
26 # include "incls/_c1_LIRAssembler.cpp.incl"
29 void LIR_Assembler::patching_epilog(PatchingStub* patch, LIR_PatchCode patch_code, Register obj, CodeEmitInfo* info) {
30 // we must have enough patching space so that call can be inserted
31 while ((intx) _masm->pc() - (intx) patch->pc_start() < NativeCall::instruction_size) {
32 _masm->nop();
33 }
34 patch->install(_masm, patch_code, obj, info);
35 append_patching_stub(patch);
37 #ifdef ASSERT
38 Bytecodes::Code code = info->scope()->method()->java_code_at_bci(info->stack()->bci());
39 if (patch->id() == PatchingStub::access_field_id) {
40 switch (code) {
41 case Bytecodes::_putstatic:
42 case Bytecodes::_getstatic:
43 case Bytecodes::_putfield:
44 case Bytecodes::_getfield:
45 break;
46 default:
47 ShouldNotReachHere();
48 }
49 } else if (patch->id() == PatchingStub::load_klass_id) {
50 switch (code) {
51 case Bytecodes::_putstatic:
52 case Bytecodes::_getstatic:
53 case Bytecodes::_new:
54 case Bytecodes::_anewarray:
55 case Bytecodes::_multianewarray:
56 case Bytecodes::_instanceof:
57 case Bytecodes::_checkcast:
58 case Bytecodes::_ldc:
59 case Bytecodes::_ldc_w:
60 break;
61 default:
62 ShouldNotReachHere();
63 }
64 } else {
65 ShouldNotReachHere();
66 }
67 #endif
68 }
71 //---------------------------------------------------------------
74 LIR_Assembler::LIR_Assembler(Compilation* c):
75 _compilation(c)
76 , _masm(c->masm())
77 , _bs(Universe::heap()->barrier_set())
78 , _frame_map(c->frame_map())
79 , _current_block(NULL)
80 , _pending_non_safepoint(NULL)
81 , _pending_non_safepoint_offset(0)
82 {
83 _slow_case_stubs = new CodeStubList();
84 }
87 LIR_Assembler::~LIR_Assembler() {
88 }
91 void LIR_Assembler::append_patching_stub(PatchingStub* stub) {
92 _slow_case_stubs->append(stub);
93 }
96 void LIR_Assembler::check_codespace() {
97 CodeSection* cs = _masm->code_section();
98 if (cs->remaining() < (int)(1*K)) {
99 BAILOUT("CodeBuffer overflow");
100 }
101 }
104 void LIR_Assembler::emit_code_stub(CodeStub* stub) {
105 _slow_case_stubs->append(stub);
106 }
108 void LIR_Assembler::emit_stubs(CodeStubList* stub_list) {
109 for (int m = 0; m < stub_list->length(); m++) {
110 CodeStub* s = (*stub_list)[m];
112 check_codespace();
113 CHECK_BAILOUT();
115 #ifndef PRODUCT
116 if (CommentedAssembly) {
117 stringStream st;
118 s->print_name(&st);
119 st.print(" slow case");
120 _masm->block_comment(st.as_string());
121 }
122 #endif
123 s->emit_code(this);
124 #ifdef ASSERT
125 s->assert_no_unbound_labels();
126 #endif
127 }
128 }
131 void LIR_Assembler::emit_slow_case_stubs() {
132 emit_stubs(_slow_case_stubs);
133 }
136 bool LIR_Assembler::needs_icache(ciMethod* method) const {
137 return !method->is_static();
138 }
141 int LIR_Assembler::code_offset() const {
142 return _masm->offset();
143 }
146 address LIR_Assembler::pc() const {
147 return _masm->pc();
148 }
151 void LIR_Assembler::emit_exception_entries(ExceptionInfoList* info_list) {
152 for (int i = 0; i < info_list->length(); i++) {
153 XHandlers* handlers = info_list->at(i)->exception_handlers();
155 for (int j = 0; j < handlers->length(); j++) {
156 XHandler* handler = handlers->handler_at(j);
157 assert(handler->lir_op_id() != -1, "handler not processed by LinearScan");
158 assert(handler->entry_code() == NULL ||
159 handler->entry_code()->instructions_list()->last()->code() == lir_branch ||
160 handler->entry_code()->instructions_list()->last()->code() == lir_delay_slot, "last operation must be branch");
162 if (handler->entry_pco() == -1) {
163 // entry code not emitted yet
164 if (handler->entry_code() != NULL && handler->entry_code()->instructions_list()->length() > 1) {
165 handler->set_entry_pco(code_offset());
166 if (CommentedAssembly) {
167 _masm->block_comment("Exception adapter block");
168 }
169 emit_lir_list(handler->entry_code());
170 } else {
171 handler->set_entry_pco(handler->entry_block()->exception_handler_pco());
172 }
174 assert(handler->entry_pco() != -1, "must be set now");
175 }
176 }
177 }
178 }
181 void LIR_Assembler::emit_code(BlockList* hir) {
182 if (PrintLIR) {
183 print_LIR(hir);
184 }
186 int n = hir->length();
187 for (int i = 0; i < n; i++) {
188 emit_block(hir->at(i));
189 CHECK_BAILOUT();
190 }
192 flush_debug_info(code_offset());
194 DEBUG_ONLY(check_no_unbound_labels());
195 }
198 void LIR_Assembler::emit_block(BlockBegin* block) {
199 if (block->is_set(BlockBegin::backward_branch_target_flag)) {
200 align_backward_branch_target();
201 }
203 // if this block is the start of an exception handler, record the
204 // PC offset of the first instruction for later construction of
205 // the ExceptionHandlerTable
206 if (block->is_set(BlockBegin::exception_entry_flag)) {
207 block->set_exception_handler_pco(code_offset());
208 }
210 #ifndef PRODUCT
211 if (PrintLIRWithAssembly) {
212 // don't print Phi's
213 InstructionPrinter ip(false);
214 block->print(ip);
215 }
216 #endif /* PRODUCT */
218 assert(block->lir() != NULL, "must have LIR");
219 X86_ONLY(assert(_masm->rsp_offset() == 0, "frame size should be fixed"));
221 #ifndef PRODUCT
222 if (CommentedAssembly) {
223 stringStream st;
224 st.print_cr(" block B%d [%d, %d]", block->block_id(), block->bci(), block->end()->printable_bci());
225 _masm->block_comment(st.as_string());
226 }
227 #endif
229 emit_lir_list(block->lir());
231 X86_ONLY(assert(_masm->rsp_offset() == 0, "frame size should be fixed"));
232 }
235 void LIR_Assembler::emit_lir_list(LIR_List* list) {
236 peephole(list);
238 int n = list->length();
239 for (int i = 0; i < n; i++) {
240 LIR_Op* op = list->at(i);
242 check_codespace();
243 CHECK_BAILOUT();
245 #ifndef PRODUCT
246 if (CommentedAssembly) {
247 // Don't record out every op since that's too verbose. Print
248 // branches since they include block and stub names. Also print
249 // patching moves since they generate funny looking code.
250 if (op->code() == lir_branch ||
251 (op->code() == lir_move && op->as_Op1()->patch_code() != lir_patch_none)) {
252 stringStream st;
253 op->print_on(&st);
254 _masm->block_comment(st.as_string());
255 }
256 }
257 if (PrintLIRWithAssembly) {
258 // print out the LIR operation followed by the resulting assembly
259 list->at(i)->print(); tty->cr();
260 }
261 #endif /* PRODUCT */
263 op->emit_code(this);
265 if (compilation()->debug_info_recorder()->recording_non_safepoints()) {
266 process_debug_info(op);
267 }
269 #ifndef PRODUCT
270 if (PrintLIRWithAssembly) {
271 _masm->code()->decode();
272 }
273 #endif /* PRODUCT */
274 }
275 }
277 #ifdef ASSERT
278 void LIR_Assembler::check_no_unbound_labels() {
279 CHECK_BAILOUT();
281 for (int i = 0; i < _branch_target_blocks.length() - 1; i++) {
282 if (!_branch_target_blocks.at(i)->label()->is_bound()) {
283 tty->print_cr("label of block B%d is not bound", _branch_target_blocks.at(i)->block_id());
284 assert(false, "unbound label");
285 }
286 }
287 }
288 #endif
290 //----------------------------------debug info--------------------------------
293 void LIR_Assembler::add_debug_info_for_branch(CodeEmitInfo* info) {
294 _masm->code_section()->relocate(pc(), relocInfo::poll_type);
295 int pc_offset = code_offset();
296 flush_debug_info(pc_offset);
297 info->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
298 if (info->exception_handlers() != NULL) {
299 compilation()->add_exception_handlers_for_pco(pc_offset, info->exception_handlers());
300 }
301 }
304 void LIR_Assembler::add_call_info(int pc_offset, CodeEmitInfo* cinfo) {
305 flush_debug_info(pc_offset);
306 cinfo->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
307 if (cinfo->exception_handlers() != NULL) {
308 compilation()->add_exception_handlers_for_pco(pc_offset, cinfo->exception_handlers());
309 }
310 }
312 static ValueStack* debug_info(Instruction* ins) {
313 StateSplit* ss = ins->as_StateSplit();
314 if (ss != NULL) return ss->state();
315 return ins->state_before();
316 }
318 void LIR_Assembler::process_debug_info(LIR_Op* op) {
319 Instruction* src = op->source();
320 if (src == NULL) return;
321 int pc_offset = code_offset();
322 if (_pending_non_safepoint == src) {
323 _pending_non_safepoint_offset = pc_offset;
324 return;
325 }
326 ValueStack* vstack = debug_info(src);
327 if (vstack == NULL) return;
328 if (_pending_non_safepoint != NULL) {
329 // Got some old debug info. Get rid of it.
330 if (debug_info(_pending_non_safepoint) == vstack) {
331 _pending_non_safepoint_offset = pc_offset;
332 return;
333 }
334 if (_pending_non_safepoint_offset < pc_offset) {
335 record_non_safepoint_debug_info();
336 }
337 _pending_non_safepoint = NULL;
338 }
339 // Remember the debug info.
340 if (pc_offset > compilation()->debug_info_recorder()->last_pc_offset()) {
341 _pending_non_safepoint = src;
342 _pending_non_safepoint_offset = pc_offset;
343 }
344 }
346 // Index caller states in s, where 0 is the oldest, 1 its callee, etc.
347 // Return NULL if n is too large.
348 // Returns the caller_bci for the next-younger state, also.
349 static ValueStack* nth_oldest(ValueStack* s, int n, int& bci_result) {
350 ValueStack* t = s;
351 for (int i = 0; i < n; i++) {
352 if (t == NULL) break;
353 t = t->caller_state();
354 }
355 if (t == NULL) return NULL;
356 for (;;) {
357 ValueStack* tc = t->caller_state();
358 if (tc == NULL) return s;
359 t = tc;
360 bci_result = tc->bci();
361 s = s->caller_state();
362 }
363 }
365 void LIR_Assembler::record_non_safepoint_debug_info() {
366 int pc_offset = _pending_non_safepoint_offset;
367 ValueStack* vstack = debug_info(_pending_non_safepoint);
368 int bci = vstack->bci();
370 DebugInformationRecorder* debug_info = compilation()->debug_info_recorder();
371 assert(debug_info->recording_non_safepoints(), "sanity");
373 debug_info->add_non_safepoint(pc_offset);
375 // Visit scopes from oldest to youngest.
376 for (int n = 0; ; n++) {
377 int s_bci = bci;
378 ValueStack* s = nth_oldest(vstack, n, s_bci);
379 if (s == NULL) break;
380 IRScope* scope = s->scope();
381 //Always pass false for reexecute since these ScopeDescs are never used for deopt
382 debug_info->describe_scope(pc_offset, scope->method(), s->bci(), false/*reexecute*/);
383 }
385 debug_info->end_non_safepoint(pc_offset);
386 }
389 void LIR_Assembler::add_debug_info_for_null_check_here(CodeEmitInfo* cinfo) {
390 add_debug_info_for_null_check(code_offset(), cinfo);
391 }
393 void LIR_Assembler::add_debug_info_for_null_check(int pc_offset, CodeEmitInfo* cinfo) {
394 ImplicitNullCheckStub* stub = new ImplicitNullCheckStub(pc_offset, cinfo);
395 emit_code_stub(stub);
396 }
398 void LIR_Assembler::add_debug_info_for_div0_here(CodeEmitInfo* info) {
399 add_debug_info_for_div0(code_offset(), info);
400 }
402 void LIR_Assembler::add_debug_info_for_div0(int pc_offset, CodeEmitInfo* cinfo) {
403 DivByZeroStub* stub = new DivByZeroStub(pc_offset, cinfo);
404 emit_code_stub(stub);
405 }
407 void LIR_Assembler::emit_rtcall(LIR_OpRTCall* op) {
408 rt_call(op->result_opr(), op->addr(), op->arguments(), op->tmp(), op->info());
409 }
412 void LIR_Assembler::emit_call(LIR_OpJavaCall* op) {
413 verify_oop_map(op->info());
415 if (os::is_MP()) {
416 // must align calls sites, otherwise they can't be updated atomically on MP hardware
417 align_call(op->code());
418 }
420 // emit the static call stub stuff out of line
421 emit_static_call_stub();
423 switch (op->code()) {
424 case lir_static_call:
425 call(op, relocInfo::static_call_type);
426 break;
427 case lir_optvirtual_call:
428 case lir_dynamic_call:
429 call(op, relocInfo::opt_virtual_call_type);
430 break;
431 case lir_icvirtual_call:
432 ic_call(op);
433 break;
434 case lir_virtual_call:
435 vtable_call(op);
436 break;
437 default: ShouldNotReachHere();
438 }
440 // JSR 292
441 // Record if this method has MethodHandle invokes.
442 if (op->is_method_handle_invoke()) {
443 compilation()->set_has_method_handle_invokes(true);
444 }
446 #if defined(X86) && defined(TIERED)
447 // C2 leave fpu stack dirty clean it
448 if (UseSSE < 2) {
449 int i;
450 for ( i = 1; i <= 7 ; i++ ) {
451 ffree(i);
452 }
453 if (!op->result_opr()->is_float_kind()) {
454 ffree(0);
455 }
456 }
457 #endif // X86 && TIERED
458 }
461 void LIR_Assembler::emit_opLabel(LIR_OpLabel* op) {
462 _masm->bind (*(op->label()));
463 }
466 void LIR_Assembler::emit_op1(LIR_Op1* op) {
467 switch (op->code()) {
468 case lir_move:
469 if (op->move_kind() == lir_move_volatile) {
470 assert(op->patch_code() == lir_patch_none, "can't patch volatiles");
471 volatile_move_op(op->in_opr(), op->result_opr(), op->type(), op->info());
472 } else {
473 move_op(op->in_opr(), op->result_opr(), op->type(),
474 op->patch_code(), op->info(), op->pop_fpu_stack(), op->move_kind() == lir_move_unaligned);
475 }
476 break;
478 case lir_prefetchr:
479 prefetchr(op->in_opr());
480 break;
482 case lir_prefetchw:
483 prefetchw(op->in_opr());
484 break;
486 case lir_roundfp: {
487 LIR_OpRoundFP* round_op = op->as_OpRoundFP();
488 roundfp_op(round_op->in_opr(), round_op->tmp(), round_op->result_opr(), round_op->pop_fpu_stack());
489 break;
490 }
492 case lir_return:
493 return_op(op->in_opr());
494 break;
496 case lir_safepoint:
497 if (compilation()->debug_info_recorder()->last_pc_offset() == code_offset()) {
498 _masm->nop();
499 }
500 safepoint_poll(op->in_opr(), op->info());
501 break;
503 case lir_fxch:
504 fxch(op->in_opr()->as_jint());
505 break;
507 case lir_fld:
508 fld(op->in_opr()->as_jint());
509 break;
511 case lir_ffree:
512 ffree(op->in_opr()->as_jint());
513 break;
515 case lir_branch:
516 break;
518 case lir_push:
519 push(op->in_opr());
520 break;
522 case lir_pop:
523 pop(op->in_opr());
524 break;
526 case lir_neg:
527 negate(op->in_opr(), op->result_opr());
528 break;
530 case lir_leal:
531 leal(op->in_opr(), op->result_opr());
532 break;
534 case lir_null_check:
535 if (GenerateCompilerNullChecks) {
536 add_debug_info_for_null_check_here(op->info());
538 if (op->in_opr()->is_single_cpu()) {
539 _masm->null_check(op->in_opr()->as_register());
540 } else {
541 Unimplemented();
542 }
543 }
544 break;
546 case lir_monaddr:
547 monitor_address(op->in_opr()->as_constant_ptr()->as_jint(), op->result_opr());
548 break;
550 #ifdef SPARC
551 case lir_pack64:
552 pack64(op->in_opr(), op->result_opr());
553 break;
555 case lir_unpack64:
556 unpack64(op->in_opr(), op->result_opr());
557 break;
558 #endif
560 case lir_unwind:
561 unwind_op(op->in_opr());
562 break;
564 default:
565 Unimplemented();
566 break;
567 }
568 }
571 void LIR_Assembler::emit_op0(LIR_Op0* op) {
572 switch (op->code()) {
573 case lir_word_align: {
574 while (code_offset() % BytesPerWord != 0) {
575 _masm->nop();
576 }
577 break;
578 }
580 case lir_nop:
581 assert(op->info() == NULL, "not supported");
582 _masm->nop();
583 break;
585 case lir_label:
586 Unimplemented();
587 break;
589 case lir_build_frame:
590 build_frame();
591 break;
593 case lir_std_entry:
594 // init offsets
595 offsets()->set_value(CodeOffsets::OSR_Entry, _masm->offset());
596 _masm->align(CodeEntryAlignment);
597 if (needs_icache(compilation()->method())) {
598 check_icache();
599 }
600 offsets()->set_value(CodeOffsets::Verified_Entry, _masm->offset());
601 _masm->verified_entry();
602 build_frame();
603 offsets()->set_value(CodeOffsets::Frame_Complete, _masm->offset());
604 break;
606 case lir_osr_entry:
607 offsets()->set_value(CodeOffsets::OSR_Entry, _masm->offset());
608 osr_entry();
609 break;
611 case lir_24bit_FPU:
612 set_24bit_FPU();
613 break;
615 case lir_reset_FPU:
616 reset_FPU();
617 break;
619 case lir_breakpoint:
620 breakpoint();
621 break;
623 case lir_fpop_raw:
624 fpop();
625 break;
627 case lir_membar:
628 membar();
629 break;
631 case lir_membar_acquire:
632 membar_acquire();
633 break;
635 case lir_membar_release:
636 membar_release();
637 break;
639 case lir_get_thread:
640 get_thread(op->result_opr());
641 break;
643 default:
644 ShouldNotReachHere();
645 break;
646 }
647 }
650 void LIR_Assembler::emit_op2(LIR_Op2* op) {
651 switch (op->code()) {
652 case lir_cmp:
653 if (op->info() != NULL) {
654 assert(op->in_opr1()->is_address() || op->in_opr2()->is_address(),
655 "shouldn't be codeemitinfo for non-address operands");
656 add_debug_info_for_null_check_here(op->info()); // exception possible
657 }
658 comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op);
659 break;
661 case lir_cmp_l2i:
662 case lir_cmp_fd2i:
663 case lir_ucmp_fd2i:
664 comp_fl2i(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op);
665 break;
667 case lir_cmove:
668 cmove(op->condition(), op->in_opr1(), op->in_opr2(), op->result_opr());
669 break;
671 case lir_shl:
672 case lir_shr:
673 case lir_ushr:
674 if (op->in_opr2()->is_constant()) {
675 shift_op(op->code(), op->in_opr1(), op->in_opr2()->as_constant_ptr()->as_jint(), op->result_opr());
676 } else {
677 shift_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->tmp_opr());
678 }
679 break;
681 case lir_add:
682 case lir_sub:
683 case lir_mul:
684 case lir_mul_strictfp:
685 case lir_div:
686 case lir_div_strictfp:
687 case lir_rem:
688 assert(op->fpu_pop_count() < 2, "");
689 arith_op(
690 op->code(),
691 op->in_opr1(),
692 op->in_opr2(),
693 op->result_opr(),
694 op->info(),
695 op->fpu_pop_count() == 1);
696 break;
698 case lir_abs:
699 case lir_sqrt:
700 case lir_sin:
701 case lir_tan:
702 case lir_cos:
703 case lir_log:
704 case lir_log10:
705 intrinsic_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op);
706 break;
708 case lir_logic_and:
709 case lir_logic_or:
710 case lir_logic_xor:
711 logic_op(
712 op->code(),
713 op->in_opr1(),
714 op->in_opr2(),
715 op->result_opr());
716 break;
718 case lir_throw:
719 throw_op(op->in_opr1(), op->in_opr2(), op->info());
720 break;
722 default:
723 Unimplemented();
724 break;
725 }
726 }
729 void LIR_Assembler::build_frame() {
730 _masm->build_frame(initial_frame_size_in_bytes());
731 }
734 void LIR_Assembler::roundfp_op(LIR_Opr src, LIR_Opr tmp, LIR_Opr dest, bool pop_fpu_stack) {
735 assert((src->is_single_fpu() && dest->is_single_stack()) ||
736 (src->is_double_fpu() && dest->is_double_stack()),
737 "round_fp: rounds register -> stack location");
739 reg2stack (src, dest, src->type(), pop_fpu_stack);
740 }
743 void LIR_Assembler::move_op(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool unaligned) {
744 if (src->is_register()) {
745 if (dest->is_register()) {
746 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
747 reg2reg(src, dest);
748 } else if (dest->is_stack()) {
749 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
750 reg2stack(src, dest, type, pop_fpu_stack);
751 } else if (dest->is_address()) {
752 reg2mem(src, dest, type, patch_code, info, pop_fpu_stack, unaligned);
753 } else {
754 ShouldNotReachHere();
755 }
757 } else if (src->is_stack()) {
758 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
759 if (dest->is_register()) {
760 stack2reg(src, dest, type);
761 } else if (dest->is_stack()) {
762 stack2stack(src, dest, type);
763 } else {
764 ShouldNotReachHere();
765 }
767 } else if (src->is_constant()) {
768 if (dest->is_register()) {
769 const2reg(src, dest, patch_code, info); // patching is possible
770 } else if (dest->is_stack()) {
771 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
772 const2stack(src, dest);
773 } else if (dest->is_address()) {
774 assert(patch_code == lir_patch_none, "no patching allowed here");
775 const2mem(src, dest, type, info);
776 } else {
777 ShouldNotReachHere();
778 }
780 } else if (src->is_address()) {
781 mem2reg(src, dest, type, patch_code, info, unaligned);
783 } else {
784 ShouldNotReachHere();
785 }
786 }
789 void LIR_Assembler::verify_oop_map(CodeEmitInfo* info) {
790 #ifndef PRODUCT
791 if (VerifyOopMaps || VerifyOops) {
792 bool v = VerifyOops;
793 VerifyOops = true;
794 OopMapStream s(info->oop_map());
795 while (!s.is_done()) {
796 OopMapValue v = s.current();
797 if (v.is_oop()) {
798 VMReg r = v.reg();
799 if (!r->is_stack()) {
800 stringStream st;
801 st.print("bad oop %s at %d", r->as_Register()->name(), _masm->offset());
802 #ifdef SPARC
803 _masm->_verify_oop(r->as_Register(), strdup(st.as_string()), __FILE__, __LINE__);
804 #else
805 _masm->verify_oop(r->as_Register());
806 #endif
807 } else {
808 _masm->verify_stack_oop(r->reg2stack() * VMRegImpl::stack_slot_size);
809 }
810 }
811 s.next();
812 }
813 VerifyOops = v;
814 }
815 #endif
816 }