Tue, 18 Jun 2013 12:31:07 -0700
8015237: Parallelize string table scanning during strong root processing
Summary: Parallelize the scanning of the intern string table by having each GC worker claim a given number of buckets. Changes were also reviewed by Per Liden <per.liden@oracle.com>.
Reviewed-by: tschatzl, stefank, twisti
1 /*
2 * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "c1/c1_Compilation.hpp"
27 #include "c1/c1_Instruction.hpp"
28 #include "c1/c1_InstructionPrinter.hpp"
29 #include "c1/c1_LIRAssembler.hpp"
30 #include "c1/c1_MacroAssembler.hpp"
31 #include "c1/c1_ValueStack.hpp"
32 #include "ci/ciInstance.hpp"
33 #ifdef TARGET_ARCH_x86
34 # include "nativeInst_x86.hpp"
35 # include "vmreg_x86.inline.hpp"
36 #endif
37 #ifdef TARGET_ARCH_sparc
38 # include "nativeInst_sparc.hpp"
39 # include "vmreg_sparc.inline.hpp"
40 #endif
41 #ifdef TARGET_ARCH_zero
42 # include "nativeInst_zero.hpp"
43 # include "vmreg_zero.inline.hpp"
44 #endif
45 #ifdef TARGET_ARCH_arm
46 # include "nativeInst_arm.hpp"
47 # include "vmreg_arm.inline.hpp"
48 #endif
49 #ifdef TARGET_ARCH_ppc
50 # include "nativeInst_ppc.hpp"
51 # include "vmreg_ppc.inline.hpp"
52 #endif
55 void LIR_Assembler::patching_epilog(PatchingStub* patch, LIR_PatchCode patch_code, Register obj, CodeEmitInfo* info) {
56 // we must have enough patching space so that call can be inserted
57 while ((intx) _masm->pc() - (intx) patch->pc_start() < NativeCall::instruction_size) {
58 _masm->nop();
59 }
60 patch->install(_masm, patch_code, obj, info);
61 append_patching_stub(patch);
63 #ifdef ASSERT
64 Bytecodes::Code code = info->scope()->method()->java_code_at_bci(info->stack()->bci());
65 if (patch->id() == PatchingStub::access_field_id) {
66 switch (code) {
67 case Bytecodes::_putstatic:
68 case Bytecodes::_getstatic:
69 case Bytecodes::_putfield:
70 case Bytecodes::_getfield:
71 break;
72 default:
73 ShouldNotReachHere();
74 }
75 } else if (patch->id() == PatchingStub::load_klass_id) {
76 switch (code) {
77 case Bytecodes::_new:
78 case Bytecodes::_anewarray:
79 case Bytecodes::_multianewarray:
80 case Bytecodes::_instanceof:
81 case Bytecodes::_checkcast:
82 break;
83 default:
84 ShouldNotReachHere();
85 }
86 } else if (patch->id() == PatchingStub::load_mirror_id) {
87 switch (code) {
88 case Bytecodes::_putstatic:
89 case Bytecodes::_getstatic:
90 case Bytecodes::_ldc:
91 case Bytecodes::_ldc_w:
92 break;
93 default:
94 ShouldNotReachHere();
95 }
96 } else {
97 ShouldNotReachHere();
98 }
99 #endif
100 }
103 //---------------------------------------------------------------
106 LIR_Assembler::LIR_Assembler(Compilation* c):
107 _compilation(c)
108 , _masm(c->masm())
109 , _bs(Universe::heap()->barrier_set())
110 , _frame_map(c->frame_map())
111 , _current_block(NULL)
112 , _pending_non_safepoint(NULL)
113 , _pending_non_safepoint_offset(0)
114 {
115 _slow_case_stubs = new CodeStubList();
116 }
119 LIR_Assembler::~LIR_Assembler() {
120 }
123 void LIR_Assembler::append_patching_stub(PatchingStub* stub) {
124 _slow_case_stubs->append(stub);
125 }
128 void LIR_Assembler::check_codespace() {
129 CodeSection* cs = _masm->code_section();
130 if (cs->remaining() < (int)(NOT_LP64(1*K)LP64_ONLY(2*K))) {
131 BAILOUT("CodeBuffer overflow");
132 }
133 }
136 void LIR_Assembler::emit_code_stub(CodeStub* stub) {
137 _slow_case_stubs->append(stub);
138 }
140 void LIR_Assembler::emit_stubs(CodeStubList* stub_list) {
141 for (int m = 0; m < stub_list->length(); m++) {
142 CodeStub* s = (*stub_list)[m];
144 check_codespace();
145 CHECK_BAILOUT();
147 #ifndef PRODUCT
148 if (CommentedAssembly) {
149 stringStream st;
150 s->print_name(&st);
151 st.print(" slow case");
152 _masm->block_comment(st.as_string());
153 }
154 #endif
155 s->emit_code(this);
156 #ifdef ASSERT
157 s->assert_no_unbound_labels();
158 #endif
159 }
160 }
163 void LIR_Assembler::emit_slow_case_stubs() {
164 emit_stubs(_slow_case_stubs);
165 }
168 bool LIR_Assembler::needs_icache(ciMethod* method) const {
169 return !method->is_static();
170 }
173 int LIR_Assembler::code_offset() const {
174 return _masm->offset();
175 }
178 address LIR_Assembler::pc() const {
179 return _masm->pc();
180 }
183 void LIR_Assembler::emit_exception_entries(ExceptionInfoList* info_list) {
184 for (int i = 0; i < info_list->length(); i++) {
185 XHandlers* handlers = info_list->at(i)->exception_handlers();
187 for (int j = 0; j < handlers->length(); j++) {
188 XHandler* handler = handlers->handler_at(j);
189 assert(handler->lir_op_id() != -1, "handler not processed by LinearScan");
190 assert(handler->entry_code() == NULL ||
191 handler->entry_code()->instructions_list()->last()->code() == lir_branch ||
192 handler->entry_code()->instructions_list()->last()->code() == lir_delay_slot, "last operation must be branch");
194 if (handler->entry_pco() == -1) {
195 // entry code not emitted yet
196 if (handler->entry_code() != NULL && handler->entry_code()->instructions_list()->length() > 1) {
197 handler->set_entry_pco(code_offset());
198 if (CommentedAssembly) {
199 _masm->block_comment("Exception adapter block");
200 }
201 emit_lir_list(handler->entry_code());
202 } else {
203 handler->set_entry_pco(handler->entry_block()->exception_handler_pco());
204 }
206 assert(handler->entry_pco() != -1, "must be set now");
207 }
208 }
209 }
210 }
213 void LIR_Assembler::emit_code(BlockList* hir) {
214 if (PrintLIR) {
215 print_LIR(hir);
216 }
218 int n = hir->length();
219 for (int i = 0; i < n; i++) {
220 emit_block(hir->at(i));
221 CHECK_BAILOUT();
222 }
224 flush_debug_info(code_offset());
226 DEBUG_ONLY(check_no_unbound_labels());
227 }
230 void LIR_Assembler::emit_block(BlockBegin* block) {
231 if (block->is_set(BlockBegin::backward_branch_target_flag)) {
232 align_backward_branch_target();
233 }
235 // if this block is the start of an exception handler, record the
236 // PC offset of the first instruction for later construction of
237 // the ExceptionHandlerTable
238 if (block->is_set(BlockBegin::exception_entry_flag)) {
239 block->set_exception_handler_pco(code_offset());
240 }
242 #ifndef PRODUCT
243 if (PrintLIRWithAssembly) {
244 // don't print Phi's
245 InstructionPrinter ip(false);
246 block->print(ip);
247 }
248 #endif /* PRODUCT */
250 assert(block->lir() != NULL, "must have LIR");
251 X86_ONLY(assert(_masm->rsp_offset() == 0, "frame size should be fixed"));
253 #ifndef PRODUCT
254 if (CommentedAssembly) {
255 stringStream st;
256 st.print_cr(" block B%d [%d, %d]", block->block_id(), block->bci(), block->end()->printable_bci());
257 _masm->block_comment(st.as_string());
258 }
259 #endif
261 emit_lir_list(block->lir());
263 X86_ONLY(assert(_masm->rsp_offset() == 0, "frame size should be fixed"));
264 }
267 void LIR_Assembler::emit_lir_list(LIR_List* list) {
268 peephole(list);
270 int n = list->length();
271 for (int i = 0; i < n; i++) {
272 LIR_Op* op = list->at(i);
274 check_codespace();
275 CHECK_BAILOUT();
277 #ifndef PRODUCT
278 if (CommentedAssembly) {
279 // Don't record out every op since that's too verbose. Print
280 // branches since they include block and stub names. Also print
281 // patching moves since they generate funny looking code.
282 if (op->code() == lir_branch ||
283 (op->code() == lir_move && op->as_Op1()->patch_code() != lir_patch_none)) {
284 stringStream st;
285 op->print_on(&st);
286 _masm->block_comment(st.as_string());
287 }
288 }
289 if (PrintLIRWithAssembly) {
290 // print out the LIR operation followed by the resulting assembly
291 list->at(i)->print(); tty->cr();
292 }
293 #endif /* PRODUCT */
295 op->emit_code(this);
297 if (compilation()->debug_info_recorder()->recording_non_safepoints()) {
298 process_debug_info(op);
299 }
301 #ifndef PRODUCT
302 if (PrintLIRWithAssembly) {
303 _masm->code()->decode();
304 }
305 #endif /* PRODUCT */
306 }
307 }
309 #ifdef ASSERT
310 void LIR_Assembler::check_no_unbound_labels() {
311 CHECK_BAILOUT();
313 for (int i = 0; i < _branch_target_blocks.length() - 1; i++) {
314 if (!_branch_target_blocks.at(i)->label()->is_bound()) {
315 tty->print_cr("label of block B%d is not bound", _branch_target_blocks.at(i)->block_id());
316 assert(false, "unbound label");
317 }
318 }
319 }
320 #endif
322 //----------------------------------debug info--------------------------------
325 void LIR_Assembler::add_debug_info_for_branch(CodeEmitInfo* info) {
326 _masm->code_section()->relocate(pc(), relocInfo::poll_type);
327 int pc_offset = code_offset();
328 flush_debug_info(pc_offset);
329 info->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
330 if (info->exception_handlers() != NULL) {
331 compilation()->add_exception_handlers_for_pco(pc_offset, info->exception_handlers());
332 }
333 }
336 void LIR_Assembler::add_call_info(int pc_offset, CodeEmitInfo* cinfo) {
337 flush_debug_info(pc_offset);
338 cinfo->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
339 if (cinfo->exception_handlers() != NULL) {
340 compilation()->add_exception_handlers_for_pco(pc_offset, cinfo->exception_handlers());
341 }
342 }
344 static ValueStack* debug_info(Instruction* ins) {
345 StateSplit* ss = ins->as_StateSplit();
346 if (ss != NULL) return ss->state();
347 return ins->state_before();
348 }
350 void LIR_Assembler::process_debug_info(LIR_Op* op) {
351 Instruction* src = op->source();
352 if (src == NULL) return;
353 int pc_offset = code_offset();
354 if (_pending_non_safepoint == src) {
355 _pending_non_safepoint_offset = pc_offset;
356 return;
357 }
358 ValueStack* vstack = debug_info(src);
359 if (vstack == NULL) return;
360 if (_pending_non_safepoint != NULL) {
361 // Got some old debug info. Get rid of it.
362 if (debug_info(_pending_non_safepoint) == vstack) {
363 _pending_non_safepoint_offset = pc_offset;
364 return;
365 }
366 if (_pending_non_safepoint_offset < pc_offset) {
367 record_non_safepoint_debug_info();
368 }
369 _pending_non_safepoint = NULL;
370 }
371 // Remember the debug info.
372 if (pc_offset > compilation()->debug_info_recorder()->last_pc_offset()) {
373 _pending_non_safepoint = src;
374 _pending_non_safepoint_offset = pc_offset;
375 }
376 }
378 // Index caller states in s, where 0 is the oldest, 1 its callee, etc.
379 // Return NULL if n is too large.
380 // Returns the caller_bci for the next-younger state, also.
381 static ValueStack* nth_oldest(ValueStack* s, int n, int& bci_result) {
382 ValueStack* t = s;
383 for (int i = 0; i < n; i++) {
384 if (t == NULL) break;
385 t = t->caller_state();
386 }
387 if (t == NULL) return NULL;
388 for (;;) {
389 ValueStack* tc = t->caller_state();
390 if (tc == NULL) return s;
391 t = tc;
392 bci_result = tc->bci();
393 s = s->caller_state();
394 }
395 }
397 void LIR_Assembler::record_non_safepoint_debug_info() {
398 int pc_offset = _pending_non_safepoint_offset;
399 ValueStack* vstack = debug_info(_pending_non_safepoint);
400 int bci = vstack->bci();
402 DebugInformationRecorder* debug_info = compilation()->debug_info_recorder();
403 assert(debug_info->recording_non_safepoints(), "sanity");
405 debug_info->add_non_safepoint(pc_offset);
407 // Visit scopes from oldest to youngest.
408 for (int n = 0; ; n++) {
409 int s_bci = bci;
410 ValueStack* s = nth_oldest(vstack, n, s_bci);
411 if (s == NULL) break;
412 IRScope* scope = s->scope();
413 //Always pass false for reexecute since these ScopeDescs are never used for deopt
414 debug_info->describe_scope(pc_offset, scope->method(), s->bci(), false/*reexecute*/);
415 }
417 debug_info->end_non_safepoint(pc_offset);
418 }
421 void LIR_Assembler::add_debug_info_for_null_check_here(CodeEmitInfo* cinfo) {
422 add_debug_info_for_null_check(code_offset(), cinfo);
423 }
425 void LIR_Assembler::add_debug_info_for_null_check(int pc_offset, CodeEmitInfo* cinfo) {
426 ImplicitNullCheckStub* stub = new ImplicitNullCheckStub(pc_offset, cinfo);
427 emit_code_stub(stub);
428 }
430 void LIR_Assembler::add_debug_info_for_div0_here(CodeEmitInfo* info) {
431 add_debug_info_for_div0(code_offset(), info);
432 }
434 void LIR_Assembler::add_debug_info_for_div0(int pc_offset, CodeEmitInfo* cinfo) {
435 DivByZeroStub* stub = new DivByZeroStub(pc_offset, cinfo);
436 emit_code_stub(stub);
437 }
439 void LIR_Assembler::emit_rtcall(LIR_OpRTCall* op) {
440 rt_call(op->result_opr(), op->addr(), op->arguments(), op->tmp(), op->info());
441 }
444 void LIR_Assembler::emit_call(LIR_OpJavaCall* op) {
445 verify_oop_map(op->info());
447 if (os::is_MP()) {
448 // must align calls sites, otherwise they can't be updated atomically on MP hardware
449 align_call(op->code());
450 }
452 // emit the static call stub stuff out of line
453 emit_static_call_stub();
455 switch (op->code()) {
456 case lir_static_call:
457 case lir_dynamic_call:
458 call(op, relocInfo::static_call_type);
459 break;
460 case lir_optvirtual_call:
461 call(op, relocInfo::opt_virtual_call_type);
462 break;
463 case lir_icvirtual_call:
464 ic_call(op);
465 break;
466 case lir_virtual_call:
467 vtable_call(op);
468 break;
469 default:
470 fatal(err_msg_res("unexpected op code: %s", op->name()));
471 break;
472 }
474 // JSR 292
475 // Record if this method has MethodHandle invokes.
476 if (op->is_method_handle_invoke()) {
477 compilation()->set_has_method_handle_invokes(true);
478 }
480 #if defined(X86) && defined(TIERED)
481 // C2 leave fpu stack dirty clean it
482 if (UseSSE < 2) {
483 int i;
484 for ( i = 1; i <= 7 ; i++ ) {
485 ffree(i);
486 }
487 if (!op->result_opr()->is_float_kind()) {
488 ffree(0);
489 }
490 }
491 #endif // X86 && TIERED
492 }
495 void LIR_Assembler::emit_opLabel(LIR_OpLabel* op) {
496 _masm->bind (*(op->label()));
497 }
500 void LIR_Assembler::emit_op1(LIR_Op1* op) {
501 switch (op->code()) {
502 case lir_move:
503 if (op->move_kind() == lir_move_volatile) {
504 assert(op->patch_code() == lir_patch_none, "can't patch volatiles");
505 volatile_move_op(op->in_opr(), op->result_opr(), op->type(), op->info());
506 } else {
507 move_op(op->in_opr(), op->result_opr(), op->type(),
508 op->patch_code(), op->info(), op->pop_fpu_stack(),
509 op->move_kind() == lir_move_unaligned,
510 op->move_kind() == lir_move_wide);
511 }
512 break;
514 case lir_prefetchr:
515 prefetchr(op->in_opr());
516 break;
518 case lir_prefetchw:
519 prefetchw(op->in_opr());
520 break;
522 case lir_roundfp: {
523 LIR_OpRoundFP* round_op = op->as_OpRoundFP();
524 roundfp_op(round_op->in_opr(), round_op->tmp(), round_op->result_opr(), round_op->pop_fpu_stack());
525 break;
526 }
528 case lir_return:
529 return_op(op->in_opr());
530 break;
532 case lir_safepoint:
533 if (compilation()->debug_info_recorder()->last_pc_offset() == code_offset()) {
534 _masm->nop();
535 }
536 safepoint_poll(op->in_opr(), op->info());
537 break;
539 case lir_fxch:
540 fxch(op->in_opr()->as_jint());
541 break;
543 case lir_fld:
544 fld(op->in_opr()->as_jint());
545 break;
547 case lir_ffree:
548 ffree(op->in_opr()->as_jint());
549 break;
551 case lir_branch:
552 break;
554 case lir_push:
555 push(op->in_opr());
556 break;
558 case lir_pop:
559 pop(op->in_opr());
560 break;
562 case lir_neg:
563 negate(op->in_opr(), op->result_opr());
564 break;
566 case lir_leal:
567 leal(op->in_opr(), op->result_opr());
568 break;
570 case lir_null_check:
571 if (GenerateCompilerNullChecks) {
572 add_debug_info_for_null_check_here(op->info());
574 if (op->in_opr()->is_single_cpu()) {
575 _masm->null_check(op->in_opr()->as_register());
576 } else {
577 Unimplemented();
578 }
579 }
580 break;
582 case lir_monaddr:
583 monitor_address(op->in_opr()->as_constant_ptr()->as_jint(), op->result_opr());
584 break;
586 #ifdef SPARC
587 case lir_pack64:
588 pack64(op->in_opr(), op->result_opr());
589 break;
591 case lir_unpack64:
592 unpack64(op->in_opr(), op->result_opr());
593 break;
594 #endif
596 case lir_unwind:
597 unwind_op(op->in_opr());
598 break;
600 default:
601 Unimplemented();
602 break;
603 }
604 }
607 void LIR_Assembler::emit_op0(LIR_Op0* op) {
608 switch (op->code()) {
609 case lir_word_align: {
610 while (code_offset() % BytesPerWord != 0) {
611 _masm->nop();
612 }
613 break;
614 }
616 case lir_nop:
617 assert(op->info() == NULL, "not supported");
618 _masm->nop();
619 break;
621 case lir_label:
622 Unimplemented();
623 break;
625 case lir_build_frame:
626 build_frame();
627 break;
629 case lir_std_entry:
630 // init offsets
631 offsets()->set_value(CodeOffsets::OSR_Entry, _masm->offset());
632 _masm->align(CodeEntryAlignment);
633 if (needs_icache(compilation()->method())) {
634 check_icache();
635 }
636 offsets()->set_value(CodeOffsets::Verified_Entry, _masm->offset());
637 _masm->verified_entry();
638 build_frame();
639 offsets()->set_value(CodeOffsets::Frame_Complete, _masm->offset());
640 break;
642 case lir_osr_entry:
643 offsets()->set_value(CodeOffsets::OSR_Entry, _masm->offset());
644 osr_entry();
645 break;
647 case lir_24bit_FPU:
648 set_24bit_FPU();
649 break;
651 case lir_reset_FPU:
652 reset_FPU();
653 break;
655 case lir_breakpoint:
656 breakpoint();
657 break;
659 case lir_fpop_raw:
660 fpop();
661 break;
663 case lir_membar:
664 membar();
665 break;
667 case lir_membar_acquire:
668 membar_acquire();
669 break;
671 case lir_membar_release:
672 membar_release();
673 break;
675 case lir_membar_loadload:
676 membar_loadload();
677 break;
679 case lir_membar_storestore:
680 membar_storestore();
681 break;
683 case lir_membar_loadstore:
684 membar_loadstore();
685 break;
687 case lir_membar_storeload:
688 membar_storeload();
689 break;
691 case lir_get_thread:
692 get_thread(op->result_opr());
693 break;
695 default:
696 ShouldNotReachHere();
697 break;
698 }
699 }
702 void LIR_Assembler::emit_op2(LIR_Op2* op) {
703 switch (op->code()) {
704 case lir_cmp:
705 if (op->info() != NULL) {
706 assert(op->in_opr1()->is_address() || op->in_opr2()->is_address(),
707 "shouldn't be codeemitinfo for non-address operands");
708 add_debug_info_for_null_check_here(op->info()); // exception possible
709 }
710 comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op);
711 break;
713 case lir_cmp_l2i:
714 case lir_cmp_fd2i:
715 case lir_ucmp_fd2i:
716 comp_fl2i(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op);
717 break;
719 case lir_cmove:
720 cmove(op->condition(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->type());
721 break;
723 case lir_shl:
724 case lir_shr:
725 case lir_ushr:
726 if (op->in_opr2()->is_constant()) {
727 shift_op(op->code(), op->in_opr1(), op->in_opr2()->as_constant_ptr()->as_jint(), op->result_opr());
728 } else {
729 shift_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->tmp1_opr());
730 }
731 break;
733 case lir_add:
734 case lir_sub:
735 case lir_mul:
736 case lir_mul_strictfp:
737 case lir_div:
738 case lir_div_strictfp:
739 case lir_rem:
740 assert(op->fpu_pop_count() < 2, "");
741 arith_op(
742 op->code(),
743 op->in_opr1(),
744 op->in_opr2(),
745 op->result_opr(),
746 op->info(),
747 op->fpu_pop_count() == 1);
748 break;
750 case lir_abs:
751 case lir_sqrt:
752 case lir_sin:
753 case lir_tan:
754 case lir_cos:
755 case lir_log:
756 case lir_log10:
757 case lir_exp:
758 case lir_pow:
759 intrinsic_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op);
760 break;
762 case lir_logic_and:
763 case lir_logic_or:
764 case lir_logic_xor:
765 logic_op(
766 op->code(),
767 op->in_opr1(),
768 op->in_opr2(),
769 op->result_opr());
770 break;
772 case lir_throw:
773 throw_op(op->in_opr1(), op->in_opr2(), op->info());
774 break;
776 case lir_xadd:
777 case lir_xchg:
778 atomic_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->tmp1_opr());
779 break;
781 default:
782 Unimplemented();
783 break;
784 }
785 }
788 void LIR_Assembler::build_frame() {
789 _masm->build_frame(initial_frame_size_in_bytes());
790 }
793 void LIR_Assembler::roundfp_op(LIR_Opr src, LIR_Opr tmp, LIR_Opr dest, bool pop_fpu_stack) {
794 assert((src->is_single_fpu() && dest->is_single_stack()) ||
795 (src->is_double_fpu() && dest->is_double_stack()),
796 "round_fp: rounds register -> stack location");
798 reg2stack (src, dest, src->type(), pop_fpu_stack);
799 }
802 void LIR_Assembler::move_op(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool unaligned, bool wide) {
803 if (src->is_register()) {
804 if (dest->is_register()) {
805 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
806 reg2reg(src, dest);
807 } else if (dest->is_stack()) {
808 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
809 reg2stack(src, dest, type, pop_fpu_stack);
810 } else if (dest->is_address()) {
811 reg2mem(src, dest, type, patch_code, info, pop_fpu_stack, wide, unaligned);
812 } else {
813 ShouldNotReachHere();
814 }
816 } else if (src->is_stack()) {
817 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
818 if (dest->is_register()) {
819 stack2reg(src, dest, type);
820 } else if (dest->is_stack()) {
821 stack2stack(src, dest, type);
822 } else {
823 ShouldNotReachHere();
824 }
826 } else if (src->is_constant()) {
827 if (dest->is_register()) {
828 const2reg(src, dest, patch_code, info); // patching is possible
829 } else if (dest->is_stack()) {
830 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
831 const2stack(src, dest);
832 } else if (dest->is_address()) {
833 assert(patch_code == lir_patch_none, "no patching allowed here");
834 const2mem(src, dest, type, info, wide);
835 } else {
836 ShouldNotReachHere();
837 }
839 } else if (src->is_address()) {
840 mem2reg(src, dest, type, patch_code, info, wide, unaligned);
842 } else {
843 ShouldNotReachHere();
844 }
845 }
848 void LIR_Assembler::verify_oop_map(CodeEmitInfo* info) {
849 #ifndef PRODUCT
850 if (VerifyOopMaps || VerifyOops) {
851 bool v = VerifyOops;
852 VerifyOops = true;
853 OopMapStream s(info->oop_map());
854 while (!s.is_done()) {
855 OopMapValue v = s.current();
856 if (v.is_oop()) {
857 VMReg r = v.reg();
858 if (!r->is_stack()) {
859 stringStream st;
860 st.print("bad oop %s at %d", r->as_Register()->name(), _masm->offset());
861 #ifdef SPARC
862 _masm->_verify_oop(r->as_Register(), strdup(st.as_string()), __FILE__, __LINE__);
863 #else
864 _masm->verify_oop(r->as_Register());
865 #endif
866 } else {
867 _masm->verify_stack_oop(r->reg2stack() * VMRegImpl::stack_slot_size);
868 }
869 }
870 check_codespace();
871 CHECK_BAILOUT();
873 s.next();
874 }
875 VerifyOops = v;
876 }
877 #endif
878 }