Tue, 30 Nov 2010 23:23:40 -0800
6985015: C1 needs to support compressed oops
Summary: This change implements compressed oops for C1 for x64 and sparc. The changes are mostly on the codegen level, with a few exceptions when we do access things outside of the heap that are uncompressed from the IR. Compressed oops are now also enabled with tiered.
Reviewed-by: twisti, kvn, never, phh
1 /*
2 * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "c1/c1_Compilation.hpp"
27 #include "c1/c1_Instruction.hpp"
28 #include "c1/c1_InstructionPrinter.hpp"
29 #include "c1/c1_LIRAssembler.hpp"
30 #include "c1/c1_MacroAssembler.hpp"
31 #include "c1/c1_ValueStack.hpp"
32 #include "ci/ciInstance.hpp"
33 #ifdef TARGET_ARCH_x86
34 # include "nativeInst_x86.hpp"
35 # include "vmreg_x86.inline.hpp"
36 #endif
37 #ifdef TARGET_ARCH_sparc
38 # include "nativeInst_sparc.hpp"
39 # include "vmreg_sparc.inline.hpp"
40 #endif
41 #ifdef TARGET_ARCH_zero
42 # include "nativeInst_zero.hpp"
43 # include "vmreg_zero.inline.hpp"
44 #endif
47 void LIR_Assembler::patching_epilog(PatchingStub* patch, LIR_PatchCode patch_code, Register obj, CodeEmitInfo* info) {
48 // we must have enough patching space so that call can be inserted
49 while ((intx) _masm->pc() - (intx) patch->pc_start() < NativeCall::instruction_size) {
50 _masm->nop();
51 }
52 patch->install(_masm, patch_code, obj, info);
53 append_patching_stub(patch);
55 #ifdef ASSERT
56 Bytecodes::Code code = info->scope()->method()->java_code_at_bci(info->stack()->bci());
57 if (patch->id() == PatchingStub::access_field_id) {
58 switch (code) {
59 case Bytecodes::_putstatic:
60 case Bytecodes::_getstatic:
61 case Bytecodes::_putfield:
62 case Bytecodes::_getfield:
63 break;
64 default:
65 ShouldNotReachHere();
66 }
67 } else if (patch->id() == PatchingStub::load_klass_id) {
68 switch (code) {
69 case Bytecodes::_putstatic:
70 case Bytecodes::_getstatic:
71 case Bytecodes::_new:
72 case Bytecodes::_anewarray:
73 case Bytecodes::_multianewarray:
74 case Bytecodes::_instanceof:
75 case Bytecodes::_checkcast:
76 case Bytecodes::_ldc:
77 case Bytecodes::_ldc_w:
78 break;
79 default:
80 ShouldNotReachHere();
81 }
82 } else {
83 ShouldNotReachHere();
84 }
85 #endif
86 }
89 //---------------------------------------------------------------
92 LIR_Assembler::LIR_Assembler(Compilation* c):
93 _compilation(c)
94 , _masm(c->masm())
95 , _bs(Universe::heap()->barrier_set())
96 , _frame_map(c->frame_map())
97 , _current_block(NULL)
98 , _pending_non_safepoint(NULL)
99 , _pending_non_safepoint_offset(0)
100 {
101 _slow_case_stubs = new CodeStubList();
102 }
105 LIR_Assembler::~LIR_Assembler() {
106 }
109 void LIR_Assembler::append_patching_stub(PatchingStub* stub) {
110 _slow_case_stubs->append(stub);
111 }
114 void LIR_Assembler::check_codespace() {
115 CodeSection* cs = _masm->code_section();
116 if (cs->remaining() < (int)(1*K)) {
117 BAILOUT("CodeBuffer overflow");
118 }
119 }
122 void LIR_Assembler::emit_code_stub(CodeStub* stub) {
123 _slow_case_stubs->append(stub);
124 }
126 void LIR_Assembler::emit_stubs(CodeStubList* stub_list) {
127 for (int m = 0; m < stub_list->length(); m++) {
128 CodeStub* s = (*stub_list)[m];
130 check_codespace();
131 CHECK_BAILOUT();
133 #ifndef PRODUCT
134 if (CommentedAssembly) {
135 stringStream st;
136 s->print_name(&st);
137 st.print(" slow case");
138 _masm->block_comment(st.as_string());
139 }
140 #endif
141 s->emit_code(this);
142 #ifdef ASSERT
143 s->assert_no_unbound_labels();
144 #endif
145 }
146 }
149 void LIR_Assembler::emit_slow_case_stubs() {
150 emit_stubs(_slow_case_stubs);
151 }
154 bool LIR_Assembler::needs_icache(ciMethod* method) const {
155 return !method->is_static();
156 }
159 int LIR_Assembler::code_offset() const {
160 return _masm->offset();
161 }
164 address LIR_Assembler::pc() const {
165 return _masm->pc();
166 }
169 void LIR_Assembler::emit_exception_entries(ExceptionInfoList* info_list) {
170 for (int i = 0; i < info_list->length(); i++) {
171 XHandlers* handlers = info_list->at(i)->exception_handlers();
173 for (int j = 0; j < handlers->length(); j++) {
174 XHandler* handler = handlers->handler_at(j);
175 assert(handler->lir_op_id() != -1, "handler not processed by LinearScan");
176 assert(handler->entry_code() == NULL ||
177 handler->entry_code()->instructions_list()->last()->code() == lir_branch ||
178 handler->entry_code()->instructions_list()->last()->code() == lir_delay_slot, "last operation must be branch");
180 if (handler->entry_pco() == -1) {
181 // entry code not emitted yet
182 if (handler->entry_code() != NULL && handler->entry_code()->instructions_list()->length() > 1) {
183 handler->set_entry_pco(code_offset());
184 if (CommentedAssembly) {
185 _masm->block_comment("Exception adapter block");
186 }
187 emit_lir_list(handler->entry_code());
188 } else {
189 handler->set_entry_pco(handler->entry_block()->exception_handler_pco());
190 }
192 assert(handler->entry_pco() != -1, "must be set now");
193 }
194 }
195 }
196 }
199 void LIR_Assembler::emit_code(BlockList* hir) {
200 if (PrintLIR) {
201 print_LIR(hir);
202 }
204 int n = hir->length();
205 for (int i = 0; i < n; i++) {
206 emit_block(hir->at(i));
207 CHECK_BAILOUT();
208 }
210 flush_debug_info(code_offset());
212 DEBUG_ONLY(check_no_unbound_labels());
213 }
216 void LIR_Assembler::emit_block(BlockBegin* block) {
217 if (block->is_set(BlockBegin::backward_branch_target_flag)) {
218 align_backward_branch_target();
219 }
221 // if this block is the start of an exception handler, record the
222 // PC offset of the first instruction for later construction of
223 // the ExceptionHandlerTable
224 if (block->is_set(BlockBegin::exception_entry_flag)) {
225 block->set_exception_handler_pco(code_offset());
226 }
228 #ifndef PRODUCT
229 if (PrintLIRWithAssembly) {
230 // don't print Phi's
231 InstructionPrinter ip(false);
232 block->print(ip);
233 }
234 #endif /* PRODUCT */
236 assert(block->lir() != NULL, "must have LIR");
237 X86_ONLY(assert(_masm->rsp_offset() == 0, "frame size should be fixed"));
239 #ifndef PRODUCT
240 if (CommentedAssembly) {
241 stringStream st;
242 st.print_cr(" block B%d [%d, %d]", block->block_id(), block->bci(), block->end()->printable_bci());
243 _masm->block_comment(st.as_string());
244 }
245 #endif
247 emit_lir_list(block->lir());
249 X86_ONLY(assert(_masm->rsp_offset() == 0, "frame size should be fixed"));
250 }
253 void LIR_Assembler::emit_lir_list(LIR_List* list) {
254 peephole(list);
256 int n = list->length();
257 for (int i = 0; i < n; i++) {
258 LIR_Op* op = list->at(i);
260 check_codespace();
261 CHECK_BAILOUT();
263 #ifndef PRODUCT
264 if (CommentedAssembly) {
265 // Don't record out every op since that's too verbose. Print
266 // branches since they include block and stub names. Also print
267 // patching moves since they generate funny looking code.
268 if (op->code() == lir_branch ||
269 (op->code() == lir_move && op->as_Op1()->patch_code() != lir_patch_none)) {
270 stringStream st;
271 op->print_on(&st);
272 _masm->block_comment(st.as_string());
273 }
274 }
275 if (PrintLIRWithAssembly) {
276 // print out the LIR operation followed by the resulting assembly
277 list->at(i)->print(); tty->cr();
278 }
279 #endif /* PRODUCT */
281 op->emit_code(this);
283 if (compilation()->debug_info_recorder()->recording_non_safepoints()) {
284 process_debug_info(op);
285 }
287 #ifndef PRODUCT
288 if (PrintLIRWithAssembly) {
289 _masm->code()->decode();
290 }
291 #endif /* PRODUCT */
292 }
293 }
295 #ifdef ASSERT
296 void LIR_Assembler::check_no_unbound_labels() {
297 CHECK_BAILOUT();
299 for (int i = 0; i < _branch_target_blocks.length() - 1; i++) {
300 if (!_branch_target_blocks.at(i)->label()->is_bound()) {
301 tty->print_cr("label of block B%d is not bound", _branch_target_blocks.at(i)->block_id());
302 assert(false, "unbound label");
303 }
304 }
305 }
306 #endif
308 //----------------------------------debug info--------------------------------
311 void LIR_Assembler::add_debug_info_for_branch(CodeEmitInfo* info) {
312 _masm->code_section()->relocate(pc(), relocInfo::poll_type);
313 int pc_offset = code_offset();
314 flush_debug_info(pc_offset);
315 info->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
316 if (info->exception_handlers() != NULL) {
317 compilation()->add_exception_handlers_for_pco(pc_offset, info->exception_handlers());
318 }
319 }
322 void LIR_Assembler::add_call_info(int pc_offset, CodeEmitInfo* cinfo) {
323 flush_debug_info(pc_offset);
324 cinfo->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
325 if (cinfo->exception_handlers() != NULL) {
326 compilation()->add_exception_handlers_for_pco(pc_offset, cinfo->exception_handlers());
327 }
328 }
330 static ValueStack* debug_info(Instruction* ins) {
331 StateSplit* ss = ins->as_StateSplit();
332 if (ss != NULL) return ss->state();
333 return ins->state_before();
334 }
336 void LIR_Assembler::process_debug_info(LIR_Op* op) {
337 Instruction* src = op->source();
338 if (src == NULL) return;
339 int pc_offset = code_offset();
340 if (_pending_non_safepoint == src) {
341 _pending_non_safepoint_offset = pc_offset;
342 return;
343 }
344 ValueStack* vstack = debug_info(src);
345 if (vstack == NULL) return;
346 if (_pending_non_safepoint != NULL) {
347 // Got some old debug info. Get rid of it.
348 if (debug_info(_pending_non_safepoint) == vstack) {
349 _pending_non_safepoint_offset = pc_offset;
350 return;
351 }
352 if (_pending_non_safepoint_offset < pc_offset) {
353 record_non_safepoint_debug_info();
354 }
355 _pending_non_safepoint = NULL;
356 }
357 // Remember the debug info.
358 if (pc_offset > compilation()->debug_info_recorder()->last_pc_offset()) {
359 _pending_non_safepoint = src;
360 _pending_non_safepoint_offset = pc_offset;
361 }
362 }
364 // Index caller states in s, where 0 is the oldest, 1 its callee, etc.
365 // Return NULL if n is too large.
366 // Returns the caller_bci for the next-younger state, also.
367 static ValueStack* nth_oldest(ValueStack* s, int n, int& bci_result) {
368 ValueStack* t = s;
369 for (int i = 0; i < n; i++) {
370 if (t == NULL) break;
371 t = t->caller_state();
372 }
373 if (t == NULL) return NULL;
374 for (;;) {
375 ValueStack* tc = t->caller_state();
376 if (tc == NULL) return s;
377 t = tc;
378 bci_result = tc->bci();
379 s = s->caller_state();
380 }
381 }
383 void LIR_Assembler::record_non_safepoint_debug_info() {
384 int pc_offset = _pending_non_safepoint_offset;
385 ValueStack* vstack = debug_info(_pending_non_safepoint);
386 int bci = vstack->bci();
388 DebugInformationRecorder* debug_info = compilation()->debug_info_recorder();
389 assert(debug_info->recording_non_safepoints(), "sanity");
391 debug_info->add_non_safepoint(pc_offset);
393 // Visit scopes from oldest to youngest.
394 for (int n = 0; ; n++) {
395 int s_bci = bci;
396 ValueStack* s = nth_oldest(vstack, n, s_bci);
397 if (s == NULL) break;
398 IRScope* scope = s->scope();
399 //Always pass false for reexecute since these ScopeDescs are never used for deopt
400 debug_info->describe_scope(pc_offset, scope->method(), s->bci(), false/*reexecute*/);
401 }
403 debug_info->end_non_safepoint(pc_offset);
404 }
407 void LIR_Assembler::add_debug_info_for_null_check_here(CodeEmitInfo* cinfo) {
408 add_debug_info_for_null_check(code_offset(), cinfo);
409 }
411 void LIR_Assembler::add_debug_info_for_null_check(int pc_offset, CodeEmitInfo* cinfo) {
412 ImplicitNullCheckStub* stub = new ImplicitNullCheckStub(pc_offset, cinfo);
413 emit_code_stub(stub);
414 }
416 void LIR_Assembler::add_debug_info_for_div0_here(CodeEmitInfo* info) {
417 add_debug_info_for_div0(code_offset(), info);
418 }
420 void LIR_Assembler::add_debug_info_for_div0(int pc_offset, CodeEmitInfo* cinfo) {
421 DivByZeroStub* stub = new DivByZeroStub(pc_offset, cinfo);
422 emit_code_stub(stub);
423 }
425 void LIR_Assembler::emit_rtcall(LIR_OpRTCall* op) {
426 rt_call(op->result_opr(), op->addr(), op->arguments(), op->tmp(), op->info());
427 }
430 void LIR_Assembler::emit_call(LIR_OpJavaCall* op) {
431 verify_oop_map(op->info());
433 if (os::is_MP()) {
434 // must align calls sites, otherwise they can't be updated atomically on MP hardware
435 align_call(op->code());
436 }
438 // emit the static call stub stuff out of line
439 emit_static_call_stub();
441 switch (op->code()) {
442 case lir_static_call:
443 call(op, relocInfo::static_call_type);
444 break;
445 case lir_optvirtual_call:
446 case lir_dynamic_call:
447 call(op, relocInfo::opt_virtual_call_type);
448 break;
449 case lir_icvirtual_call:
450 ic_call(op);
451 break;
452 case lir_virtual_call:
453 vtable_call(op);
454 break;
455 default: ShouldNotReachHere();
456 }
458 // JSR 292
459 // Record if this method has MethodHandle invokes.
460 if (op->is_method_handle_invoke()) {
461 compilation()->set_has_method_handle_invokes(true);
462 }
464 #if defined(X86) && defined(TIERED)
465 // C2 leave fpu stack dirty clean it
466 if (UseSSE < 2) {
467 int i;
468 for ( i = 1; i <= 7 ; i++ ) {
469 ffree(i);
470 }
471 if (!op->result_opr()->is_float_kind()) {
472 ffree(0);
473 }
474 }
475 #endif // X86 && TIERED
476 }
479 void LIR_Assembler::emit_opLabel(LIR_OpLabel* op) {
480 _masm->bind (*(op->label()));
481 }
484 void LIR_Assembler::emit_op1(LIR_Op1* op) {
485 switch (op->code()) {
486 case lir_move:
487 if (op->move_kind() == lir_move_volatile) {
488 assert(op->patch_code() == lir_patch_none, "can't patch volatiles");
489 volatile_move_op(op->in_opr(), op->result_opr(), op->type(), op->info());
490 } else {
491 move_op(op->in_opr(), op->result_opr(), op->type(),
492 op->patch_code(), op->info(), op->pop_fpu_stack(),
493 op->move_kind() == lir_move_unaligned,
494 op->move_kind() == lir_move_wide);
495 }
496 break;
498 case lir_prefetchr:
499 prefetchr(op->in_opr());
500 break;
502 case lir_prefetchw:
503 prefetchw(op->in_opr());
504 break;
506 case lir_roundfp: {
507 LIR_OpRoundFP* round_op = op->as_OpRoundFP();
508 roundfp_op(round_op->in_opr(), round_op->tmp(), round_op->result_opr(), round_op->pop_fpu_stack());
509 break;
510 }
512 case lir_return:
513 return_op(op->in_opr());
514 break;
516 case lir_safepoint:
517 if (compilation()->debug_info_recorder()->last_pc_offset() == code_offset()) {
518 _masm->nop();
519 }
520 safepoint_poll(op->in_opr(), op->info());
521 break;
523 case lir_fxch:
524 fxch(op->in_opr()->as_jint());
525 break;
527 case lir_fld:
528 fld(op->in_opr()->as_jint());
529 break;
531 case lir_ffree:
532 ffree(op->in_opr()->as_jint());
533 break;
535 case lir_branch:
536 break;
538 case lir_push:
539 push(op->in_opr());
540 break;
542 case lir_pop:
543 pop(op->in_opr());
544 break;
546 case lir_neg:
547 negate(op->in_opr(), op->result_opr());
548 break;
550 case lir_leal:
551 leal(op->in_opr(), op->result_opr());
552 break;
554 case lir_null_check:
555 if (GenerateCompilerNullChecks) {
556 add_debug_info_for_null_check_here(op->info());
558 if (op->in_opr()->is_single_cpu()) {
559 _masm->null_check(op->in_opr()->as_register());
560 } else {
561 Unimplemented();
562 }
563 }
564 break;
566 case lir_monaddr:
567 monitor_address(op->in_opr()->as_constant_ptr()->as_jint(), op->result_opr());
568 break;
570 #ifdef SPARC
571 case lir_pack64:
572 pack64(op->in_opr(), op->result_opr());
573 break;
575 case lir_unpack64:
576 unpack64(op->in_opr(), op->result_opr());
577 break;
578 #endif
580 case lir_unwind:
581 unwind_op(op->in_opr());
582 break;
584 default:
585 Unimplemented();
586 break;
587 }
588 }
591 void LIR_Assembler::emit_op0(LIR_Op0* op) {
592 switch (op->code()) {
593 case lir_word_align: {
594 while (code_offset() % BytesPerWord != 0) {
595 _masm->nop();
596 }
597 break;
598 }
600 case lir_nop:
601 assert(op->info() == NULL, "not supported");
602 _masm->nop();
603 break;
605 case lir_label:
606 Unimplemented();
607 break;
609 case lir_build_frame:
610 build_frame();
611 break;
613 case lir_std_entry:
614 // init offsets
615 offsets()->set_value(CodeOffsets::OSR_Entry, _masm->offset());
616 _masm->align(CodeEntryAlignment);
617 if (needs_icache(compilation()->method())) {
618 check_icache();
619 }
620 offsets()->set_value(CodeOffsets::Verified_Entry, _masm->offset());
621 _masm->verified_entry();
622 build_frame();
623 offsets()->set_value(CodeOffsets::Frame_Complete, _masm->offset());
624 break;
626 case lir_osr_entry:
627 offsets()->set_value(CodeOffsets::OSR_Entry, _masm->offset());
628 osr_entry();
629 break;
631 case lir_24bit_FPU:
632 set_24bit_FPU();
633 break;
635 case lir_reset_FPU:
636 reset_FPU();
637 break;
639 case lir_breakpoint:
640 breakpoint();
641 break;
643 case lir_fpop_raw:
644 fpop();
645 break;
647 case lir_membar:
648 membar();
649 break;
651 case lir_membar_acquire:
652 membar_acquire();
653 break;
655 case lir_membar_release:
656 membar_release();
657 break;
659 case lir_get_thread:
660 get_thread(op->result_opr());
661 break;
663 default:
664 ShouldNotReachHere();
665 break;
666 }
667 }
670 void LIR_Assembler::emit_op2(LIR_Op2* op) {
671 switch (op->code()) {
672 case lir_cmp:
673 if (op->info() != NULL) {
674 assert(op->in_opr1()->is_address() || op->in_opr2()->is_address(),
675 "shouldn't be codeemitinfo for non-address operands");
676 add_debug_info_for_null_check_here(op->info()); // exception possible
677 }
678 comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op);
679 break;
681 case lir_cmp_l2i:
682 case lir_cmp_fd2i:
683 case lir_ucmp_fd2i:
684 comp_fl2i(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op);
685 break;
687 case lir_cmove:
688 cmove(op->condition(), op->in_opr1(), op->in_opr2(), op->result_opr());
689 break;
691 case lir_shl:
692 case lir_shr:
693 case lir_ushr:
694 if (op->in_opr2()->is_constant()) {
695 shift_op(op->code(), op->in_opr1(), op->in_opr2()->as_constant_ptr()->as_jint(), op->result_opr());
696 } else {
697 shift_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->tmp_opr());
698 }
699 break;
701 case lir_add:
702 case lir_sub:
703 case lir_mul:
704 case lir_mul_strictfp:
705 case lir_div:
706 case lir_div_strictfp:
707 case lir_rem:
708 assert(op->fpu_pop_count() < 2, "");
709 arith_op(
710 op->code(),
711 op->in_opr1(),
712 op->in_opr2(),
713 op->result_opr(),
714 op->info(),
715 op->fpu_pop_count() == 1);
716 break;
718 case lir_abs:
719 case lir_sqrt:
720 case lir_sin:
721 case lir_tan:
722 case lir_cos:
723 case lir_log:
724 case lir_log10:
725 intrinsic_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op);
726 break;
728 case lir_logic_and:
729 case lir_logic_or:
730 case lir_logic_xor:
731 logic_op(
732 op->code(),
733 op->in_opr1(),
734 op->in_opr2(),
735 op->result_opr());
736 break;
738 case lir_throw:
739 throw_op(op->in_opr1(), op->in_opr2(), op->info());
740 break;
742 default:
743 Unimplemented();
744 break;
745 }
746 }
749 void LIR_Assembler::build_frame() {
750 _masm->build_frame(initial_frame_size_in_bytes());
751 }
754 void LIR_Assembler::roundfp_op(LIR_Opr src, LIR_Opr tmp, LIR_Opr dest, bool pop_fpu_stack) {
755 assert((src->is_single_fpu() && dest->is_single_stack()) ||
756 (src->is_double_fpu() && dest->is_double_stack()),
757 "round_fp: rounds register -> stack location");
759 reg2stack (src, dest, src->type(), pop_fpu_stack);
760 }
763 void LIR_Assembler::move_op(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool unaligned, bool wide) {
764 if (src->is_register()) {
765 if (dest->is_register()) {
766 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
767 reg2reg(src, dest);
768 } else if (dest->is_stack()) {
769 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
770 reg2stack(src, dest, type, pop_fpu_stack);
771 } else if (dest->is_address()) {
772 reg2mem(src, dest, type, patch_code, info, pop_fpu_stack, wide, unaligned);
773 } else {
774 ShouldNotReachHere();
775 }
777 } else if (src->is_stack()) {
778 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
779 if (dest->is_register()) {
780 stack2reg(src, dest, type);
781 } else if (dest->is_stack()) {
782 stack2stack(src, dest, type);
783 } else {
784 ShouldNotReachHere();
785 }
787 } else if (src->is_constant()) {
788 if (dest->is_register()) {
789 const2reg(src, dest, patch_code, info); // patching is possible
790 } else if (dest->is_stack()) {
791 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
792 const2stack(src, dest);
793 } else if (dest->is_address()) {
794 assert(patch_code == lir_patch_none, "no patching allowed here");
795 const2mem(src, dest, type, info, wide);
796 } else {
797 ShouldNotReachHere();
798 }
800 } else if (src->is_address()) {
801 mem2reg(src, dest, type, patch_code, info, wide, unaligned);
803 } else {
804 ShouldNotReachHere();
805 }
806 }
809 void LIR_Assembler::verify_oop_map(CodeEmitInfo* info) {
810 #ifndef PRODUCT
811 if (VerifyOopMaps || VerifyOops) {
812 bool v = VerifyOops;
813 VerifyOops = true;
814 OopMapStream s(info->oop_map());
815 while (!s.is_done()) {
816 OopMapValue v = s.current();
817 if (v.is_oop()) {
818 VMReg r = v.reg();
819 if (!r->is_stack()) {
820 stringStream st;
821 st.print("bad oop %s at %d", r->as_Register()->name(), _masm->offset());
822 #ifdef SPARC
823 _masm->_verify_oop(r->as_Register(), strdup(st.as_string()), __FILE__, __LINE__);
824 #else
825 _masm->verify_oop(r->as_Register());
826 #endif
827 } else {
828 _masm->verify_stack_oop(r->reg2stack() * VMRegImpl::stack_slot_size);
829 }
830 }
831 s.next();
832 }
833 VerifyOops = v;
834 }
835 #endif
836 }