Fri, 13 Oct 2017 11:29:53 +0800
#6126 Fixed ForceEarlyReturn error
call_VM_leaf() handles the stack alignment when the assemble code calls c code.
Modified part of debugging code of call_VM_base() in macroAssembler_mips.cpp.
1 /*
2 * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2017, Loongson Technology. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
26 #include "precompiled.hpp"
27 #include "asm/assembler.hpp"
28 #include "asm/assembler.inline.hpp"
29 #include "asm/macroAssembler.inline.hpp"
30 #include "compiler/disassembler.hpp"
31 #include "gc_interface/collectedHeap.inline.hpp"
32 #include "interpreter/interpreter.hpp"
33 #include "memory/cardTableModRefBS.hpp"
34 #include "memory/resourceArea.hpp"
35 #include "memory/universe.hpp"
36 #include "prims/methodHandles.hpp"
37 #include "runtime/biasedLocking.hpp"
38 #include "runtime/interfaceSupport.hpp"
39 #include "runtime/objectMonitor.hpp"
40 #include "runtime/os.hpp"
41 #include "runtime/sharedRuntime.hpp"
42 #include "runtime/stubRoutines.hpp"
43 #include "utilities/macros.hpp"
44 #if INCLUDE_ALL_GCS
45 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
46 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
47 #include "gc_implementation/g1/heapRegion.hpp"
48 #endif // INCLUDE_ALL_GCS
50 // Implementation of MacroAssembler
52 intptr_t MacroAssembler::i[32] = {0};
53 float MacroAssembler::f[32] = {0.0};
55 void MacroAssembler::print(outputStream *s) {
56 unsigned int k;
57 for(k=0; k<sizeof(i)/sizeof(i[0]); k++) {
58 s->print_cr("i%d = 0x%.16lx", k, i[k]);
59 }
60 s->cr();
62 for(k=0; k<sizeof(f)/sizeof(f[0]); k++) {
63 s->print_cr("f%d = %f", k, f[k]);
64 }
65 s->cr();
66 }
68 int MacroAssembler::i_offset(unsigned int k) { return (intptr_t)&((MacroAssembler*)0)->i[k]; }
69 int MacroAssembler::f_offset(unsigned int k) { return (intptr_t)&((MacroAssembler*)0)->f[k]; }
71 void MacroAssembler::save_registers(MacroAssembler *masm) {
72 #define __ masm->
73 for(int k=0; k<32; k++) {
74 __ sw (as_Register(k), A0, i_offset(k));
75 }
77 for(int k=0; k<32; k++) {
78 __ swc1 (as_FloatRegister(k), A0, f_offset(k));
79 }
80 #undef __
81 }
83 void MacroAssembler::restore_registers(MacroAssembler *masm) {
84 #define __ masm->
85 for(int k=0; k<32; k++) {
86 __ lw (as_Register(k), A0, i_offset(k));
87 }
89 for(int k=0; k<32; k++) {
90 __ lwc1 (as_FloatRegister(k), A0, f_offset(k));
91 }
92 #undef __
93 }
96 void MacroAssembler::pd_patch_instruction(address branch, address target) {
97 jint& stub_inst = *(jint*) branch;
99 /* *
100 move(AT, RA); // dadd
101 emit_long(insn_ORRI(regimm_op, 0, bgezal_op, 1));
102 nop();
103 lui(T9, 0); // to be patched
104 ori(T9, 0);
105 daddu(T9, T9, RA);
106 move(RA, AT);
107 jr(T9);
108 */
109 if(special(stub_inst) == dadd_op) {
110 jint *pc = (jint *)branch;
112 assert(opcode(pc[3]) == lui_op
113 && opcode(pc[4]) == ori_op
114 && special(pc[5]) == daddu_op, "Not a branch label patch");
115 if(!(opcode(pc[3]) == lui_op
116 && opcode(pc[4]) == ori_op
117 && special(pc[5]) == daddu_op)) { tty->print_cr("Not a branch label patch"); }
119 int offset = target - branch;
120 if (!is_simm16(offset))
121 {
122 pc[3] = (pc[3] & 0xffff0000) | high16(offset - 12);
123 pc[4] = (pc[4] & 0xffff0000) | low16(offset - 12);
124 }
125 else
126 {
127 /* revert to "beq + nop" */
128 CodeBuffer cb(branch, 4 * 10);
129 MacroAssembler masm(&cb);
130 #define __ masm.
131 __ b(target);
132 __ nop();
133 __ nop();
134 __ nop();
135 __ nop();
136 __ nop();
137 __ nop();
138 __ nop();
139 }
140 return;
141 }
143 #ifndef PRODUCT
144 if (!is_simm16((target - branch - 4) >> 2))
145 {
146 tty->print_cr("Illegal patching: target=0x%lx", target);
147 int *p = (int *)branch;
148 for (int i = -10; i < 10; i++)
149 {
150 tty->print("0x%lx, ", p[i]);
151 }
152 tty->print_cr("");
153 }
154 #endif
156 stub_inst = patched_branch(target - branch, stub_inst, 0);
157 }
159 static inline address first_cache_address() {
160 return CodeCache::low_bound() + sizeof(HeapBlock::Header);
161 }
163 static inline address last_cache_address() {
164 return CodeCache::high_bound() - Assembler::InstructionSize;
165 }
167 int MacroAssembler::call_size(address target, bool far, bool patchable) {
168 if (patchable) return 6 << Assembler::LogInstructionSize;
169 if (!far) return 2 << Assembler::LogInstructionSize; // jal + nop
170 return (insts_for_set64((jlong)target) + 2) << Assembler::LogInstructionSize;
171 }
173 // Can we reach target using jal/j from anywhere
174 // in the code cache (because code can be relocated)?
175 bool MacroAssembler::reachable_from_cache(address target) {
176 address cl = first_cache_address();
177 address ch = last_cache_address();
179 return fit_in_jal(target, cl) && fit_in_jal(target, ch);
180 }
182 void MacroAssembler::general_jump(address target) {
183 if (reachable_from_cache(target)) {
184 j(target);
185 nop();
186 } else {
187 set64(T9, (long)target);
188 jr(T9);
189 nop();
190 }
191 }
193 int MacroAssembler::insts_for_general_jump(address target) {
194 if (reachable_from_cache(target)) {
195 //j(target);
196 //nop();
197 return 2;
198 } else {
199 //set64(T9, (long)target);
200 //jr(T9);
201 //nop();
202 return insts_for_set64((jlong)target) + 2;
203 }
204 }
206 void MacroAssembler::patchable_jump(address target) {
207 if (reachable_from_cache(target)) {
208 nop();
209 nop();
210 nop();
211 nop();
212 j(target);
213 nop();
214 } else {
215 patchable_set48(T9, (long)target);
216 jr(T9);
217 nop();
218 }
219 }
221 int MacroAssembler::insts_for_patchable_jump(address target) {
222 return 6;
223 }
225 void MacroAssembler::general_call(address target) {
226 if (reachable_from_cache(target)) {
227 jal(target);
228 nop();
229 } else {
230 set64(T9, (long)target);
231 jalr(T9);
232 nop();
233 }
234 }
236 int MacroAssembler::insts_for_general_call(address target) {
237 if (reachable_from_cache(target)) {
238 //jal(target);
239 //nop();
240 return 2;
241 } else {
242 //set64(T9, (long)target);
243 //jalr(T9);
244 //nop();
245 return insts_for_set64((jlong)target) + 2;
246 }
247 }
249 void MacroAssembler::patchable_call(address target) {
250 if (reachable_from_cache(target)) {
251 nop();
252 nop();
253 nop();
254 nop();
255 jal(target);
256 nop();
257 } else {
258 patchable_set48(T9, (long)target);
259 jalr(T9);
260 nop();
261 }
262 }
264 int MacroAssembler::insts_for_patchable_call(address target) {
265 return 6;
266 }
268 void MacroAssembler::beq_far(Register rs, Register rt, address entry)
269 {
270 u_char * cur_pc = pc();
272 /* Jin: Near/Far jump */
273 if(is_simm16((entry - pc() - 4) / 4))
274 {
275 Assembler::beq(rs, rt, offset(entry));
276 }
277 else
278 {
279 Label not_jump;
280 bne(rs, rt, not_jump);
281 delayed()->nop();
283 b_far(entry);
284 delayed()->nop();
286 bind(not_jump);
287 has_delay_slot();
288 }
289 }
291 void MacroAssembler::beq_far(Register rs, Register rt, Label& L)
292 {
293 if (L.is_bound()) {
294 beq_far(rs, rt, target(L));
295 } else {
296 u_char * cur_pc = pc();
297 Label not_jump;
298 bne(rs, rt, not_jump);
299 delayed()->nop();
301 b_far(L);
302 delayed()->nop();
304 bind(not_jump);
305 has_delay_slot();
306 }
307 }
309 void MacroAssembler::bne_far(Register rs, Register rt, address entry)
310 {
311 u_char * cur_pc = pc();
313 /* Jin: Near/Far jump */
314 if(is_simm16((entry - pc() - 4) / 4))
315 {
316 Assembler::bne(rs, rt, offset(entry));
317 }
318 else
319 {
320 Label not_jump;
321 beq(rs, rt, not_jump);
322 delayed()->nop();
324 b_far(entry);
325 delayed()->nop();
327 bind(not_jump);
328 has_delay_slot();
329 }
330 }
332 void MacroAssembler::bne_far(Register rs, Register rt, Label& L)
333 {
334 if (L.is_bound()) {
335 bne_far(rs, rt, target(L));
336 } else {
337 u_char * cur_pc = pc();
338 Label not_jump;
339 beq(rs, rt, not_jump);
340 delayed()->nop();
342 b_far(L);
343 delayed()->nop();
345 bind(not_jump);
346 has_delay_slot();
347 }
348 }
350 void MacroAssembler::b_far(Label& L)
351 {
352 if (L.is_bound()) {
353 b_far(target(L));
354 } else {
355 volatile address dest = target(L);
356 /*
357 MacroAssembler::pd_patch_instruction branch=55651ed514, target=55651ef6d8
358 0x00000055651ed514: dadd at, ra, zero
359 0x00000055651ed518: [4110001]bgezal zero, 0x00000055651ed520
361 0x00000055651ed51c: sll zero, zero, 0
362 0x00000055651ed520: lui t9, 0x0
363 0x00000055651ed524: ori t9, t9, 0x21b8
364 0x00000055651ed528: daddu t9, t9, ra
365 0x00000055651ed52c: dadd ra, at, zero
366 0x00000055651ed530: jr t9
367 0x00000055651ed534: sll zero, zero, 0
368 */
369 move(AT, RA);
370 emit_long(insn_ORRI(regimm_op, 0, bgezal_op, 1));
371 nop();
372 lui(T9, 0); // to be patched
373 ori(T9, T9, 0);
374 daddu(T9, T9, RA);
375 move(RA, AT);
376 jr(T9);
377 }
378 }
380 void MacroAssembler::b_far(address entry)
381 {
382 u_char * cur_pc = pc();
384 /* Jin: Near/Far jump */
385 if(is_simm16((entry - pc() - 4) / 4))
386 {
387 b(offset(entry));
388 }
389 else
390 {
391 /* address must be bounded */
392 move(AT, RA);
393 emit_long(insn_ORRI(regimm_op, 0, bgezal_op, 1));
394 nop();
395 li32(T9, entry - pc());
396 daddu(T9, T9, RA);
397 move(RA, AT);
398 jr(T9);
399 }
400 }
402 void MacroAssembler::ld_ptr(Register rt, Register offset, Register base) {
403 addu_long(AT, base, offset);
404 ld_ptr(rt, 0, AT);
405 }
407 void MacroAssembler::st_ptr(Register rt, Register offset, Register base) {
408 addu_long(AT, base, offset);
409 st_ptr(rt, 0, AT);
410 }
412 void MacroAssembler::ld_long(Register rt, Register offset, Register base) {
413 addu_long(AT, base, offset);
414 ld_long(rt, 0, AT);
415 }
417 void MacroAssembler::st_long(Register rt, Register offset, Register base) {
418 addu_long(AT, base, offset);
419 st_long(rt, 0, AT);
420 }
422 Address MacroAssembler::as_Address(AddressLiteral adr) {
423 return Address(adr.target(), adr.rspec());
424 }
426 Address MacroAssembler::as_Address(ArrayAddress adr) {
427 return Address::make_array(adr);
428 }
430 // tmp_reg1 and tmp_reg2 should be saved outside of atomic_inc32 (caller saved).
431 void MacroAssembler::atomic_inc32(address counter_addr, int inc, Register tmp_reg1, Register tmp_reg2) {
432 Label again;
434 li(tmp_reg1, counter_addr);
435 bind(again);
436 if(!Use3A2000) sync();
437 ll(tmp_reg2, tmp_reg1, 0);
438 addi(tmp_reg2, tmp_reg2, inc);
439 sc(tmp_reg2, tmp_reg1, 0);
440 beq(tmp_reg2, R0, again);
441 delayed()->nop();
442 }
444 int MacroAssembler::biased_locking_enter(Register lock_reg,
445 Register obj_reg,
446 Register swap_reg,
447 Register tmp_reg,
448 bool swap_reg_contains_mark,
449 Label& done,
450 Label* slow_case,
451 BiasedLockingCounters* counters) {
452 assert(UseBiasedLocking, "why call this otherwise?");
453 bool need_tmp_reg = false;
454 if (tmp_reg == noreg) {
455 need_tmp_reg = true;
456 tmp_reg = T9;
457 }
458 assert_different_registers(lock_reg, obj_reg, swap_reg, tmp_reg, AT);
459 assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, "biased locking makes assumptions about bit layout");
460 Address mark_addr (obj_reg, oopDesc::mark_offset_in_bytes());
461 Address saved_mark_addr(lock_reg, 0);
463 // Biased locking
464 // See whether the lock is currently biased toward our thread and
465 // whether the epoch is still valid
466 // Note that the runtime guarantees sufficient alignment of JavaThread
467 // pointers to allow age to be placed into low bits
468 // First check to see whether biasing is even enabled for this object
469 Label cas_label;
470 int null_check_offset = -1;
471 if (!swap_reg_contains_mark) {
472 null_check_offset = offset();
473 ld_ptr(swap_reg, mark_addr);
474 }
476 if (need_tmp_reg) {
477 push(tmp_reg);
478 }
479 move(tmp_reg, swap_reg);
480 andi(tmp_reg, tmp_reg, markOopDesc::biased_lock_mask_in_place);
481 #ifdef _LP64
482 daddi(AT, R0, markOopDesc::biased_lock_pattern);
483 dsub(AT, AT, tmp_reg);
484 #else
485 addi(AT, R0, markOopDesc::biased_lock_pattern);
486 sub(AT, AT, tmp_reg);
487 #endif
488 if (need_tmp_reg) {
489 pop(tmp_reg);
490 }
492 bne(AT, R0, cas_label);
493 delayed()->nop();
496 // The bias pattern is present in the object's header. Need to check
497 // whether the bias owner and the epoch are both still current.
498 // Note that because there is no current thread register on MIPS we
499 // need to store off the mark word we read out of the object to
500 // avoid reloading it and needing to recheck invariants below. This
501 // store is unfortunate but it makes the overall code shorter and
502 // simpler.
503 st_ptr(swap_reg, saved_mark_addr);
504 if (need_tmp_reg) {
505 push(tmp_reg);
506 }
507 if (swap_reg_contains_mark) {
508 null_check_offset = offset();
509 }
510 load_prototype_header(tmp_reg, obj_reg);
511 xorr(tmp_reg, tmp_reg, swap_reg);
512 get_thread(swap_reg);
513 xorr(swap_reg, swap_reg, tmp_reg);
515 move(AT, ~((int) markOopDesc::age_mask_in_place));
516 andr(swap_reg, swap_reg, AT);
518 if (PrintBiasedLockingStatistics) {
519 Label L;
520 bne(swap_reg, R0, L);
521 delayed()->nop();
522 push(tmp_reg);
523 push(A0);
524 atomic_inc32((address)BiasedLocking::biased_lock_entry_count_addr(), 1, A0, tmp_reg);
525 pop(A0);
526 pop(tmp_reg);
527 bind(L);
528 }
529 if (need_tmp_reg) {
530 pop(tmp_reg);
531 }
532 beq(swap_reg, R0, done);
533 delayed()->nop();
534 Label try_revoke_bias;
535 Label try_rebias;
537 // At this point we know that the header has the bias pattern and
538 // that we are not the bias owner in the current epoch. We need to
539 // figure out more details about the state of the header in order to
540 // know what operations can be legally performed on the object's
541 // header.
543 // If the low three bits in the xor result aren't clear, that means
544 // the prototype header is no longer biased and we have to revoke
545 // the bias on this object.
547 move(AT, markOopDesc::biased_lock_mask_in_place);
548 andr(AT, swap_reg, AT);
549 bne(AT, R0, try_revoke_bias);
550 delayed()->nop();
551 // Biasing is still enabled for this data type. See whether the
552 // epoch of the current bias is still valid, meaning that the epoch
553 // bits of the mark word are equal to the epoch bits of the
554 // prototype header. (Note that the prototype header's epoch bits
555 // only change at a safepoint.) If not, attempt to rebias the object
556 // toward the current thread. Note that we must be absolutely sure
557 // that the current epoch is invalid in order to do this because
558 // otherwise the manipulations it performs on the mark word are
559 // illegal.
561 move(AT, markOopDesc::epoch_mask_in_place);
562 andr(AT,swap_reg, AT);
563 bne(AT, R0, try_rebias);
564 delayed()->nop();
565 // The epoch of the current bias is still valid but we know nothing
566 // about the owner; it might be set or it might be clear. Try to
567 // acquire the bias of the object using an atomic operation. If this
568 // fails we will go in to the runtime to revoke the object's bias.
569 // Note that we first construct the presumed unbiased header so we
570 // don't accidentally blow away another thread's valid bias.
572 ld_ptr(swap_reg, saved_mark_addr);
574 move(AT, markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place);
575 andr(swap_reg, swap_reg, AT);
577 if (need_tmp_reg) {
578 push(tmp_reg);
579 }
580 get_thread(tmp_reg);
581 orr(tmp_reg, tmp_reg, swap_reg);
582 //if (os::is_MP()) {
583 // sync();
584 //}
585 cmpxchg(tmp_reg, Address(obj_reg, 0), swap_reg);
586 if (need_tmp_reg) {
587 pop(tmp_reg);
588 }
589 // If the biasing toward our thread failed, this means that
590 // another thread succeeded in biasing it toward itself and we
591 // need to revoke that bias. The revocation will occur in the
592 // interpreter runtime in the slow case.
593 if (PrintBiasedLockingStatistics) {
594 Label L;
595 bne(AT, R0, L);
596 delayed()->nop();
597 push(tmp_reg);
598 push(A0);
599 atomic_inc32((address)BiasedLocking::anonymously_biased_lock_entry_count_addr(), 1, A0, tmp_reg);
600 pop(A0);
601 pop(tmp_reg);
602 bind(L);
603 }
604 if (slow_case != NULL) {
605 beq_far(AT, R0, *slow_case);
606 delayed()->nop();
607 }
608 b(done);
609 delayed()->nop();
611 bind(try_rebias);
612 // At this point we know the epoch has expired, meaning that the
613 // current "bias owner", if any, is actually invalid. Under these
614 // circumstances _only_, we are allowed to use the current header's
615 // value as the comparison value when doing the cas to acquire the
616 // bias in the current epoch. In other words, we allow transfer of
617 // the bias from one thread to another directly in this situation.
618 //
619 // FIXME: due to a lack of registers we currently blow away the age
620 // bits in this situation. Should attempt to preserve them.
621 if (need_tmp_reg) {
622 push(tmp_reg);
623 }
624 load_prototype_header(tmp_reg, obj_reg);
625 get_thread(swap_reg);
626 orr(tmp_reg, tmp_reg, swap_reg);
627 ld_ptr(swap_reg, saved_mark_addr);
629 //if (os::is_MP()) {
630 // sync();
631 //}
632 cmpxchg(tmp_reg, Address(obj_reg, 0), swap_reg);
633 if (need_tmp_reg) {
634 pop(tmp_reg);
635 }
636 // If the biasing toward our thread failed, then another thread
637 // succeeded in biasing it toward itself and we need to revoke that
638 // bias. The revocation will occur in the runtime in the slow case.
639 if (PrintBiasedLockingStatistics) {
640 Label L;
641 bne(AT, R0, L);
642 delayed()->nop();
643 push(AT);
644 push(tmp_reg);
645 atomic_inc32((address)BiasedLocking::rebiased_lock_entry_count_addr(), 1, AT, tmp_reg);
646 pop(tmp_reg);
647 pop(AT);
648 bind(L);
649 }
650 if (slow_case != NULL) {
651 beq_far(AT, R0, *slow_case);
652 delayed()->nop();
653 }
655 b(done);
656 delayed()->nop();
657 bind(try_revoke_bias);
658 // The prototype mark in the klass doesn't have the bias bit set any
659 // more, indicating that objects of this data type are not supposed
660 // to be biased any more. We are going to try to reset the mark of
661 // this object to the prototype value and fall through to the
662 // CAS-based locking scheme. Note that if our CAS fails, it means
663 // that another thread raced us for the privilege of revoking the
664 // bias of this particular object, so it's okay to continue in the
665 // normal locking code.
666 //
667 // FIXME: due to a lack of registers we currently blow away the age
668 // bits in this situation. Should attempt to preserve them.
669 ld_ptr(swap_reg, saved_mark_addr);
671 if (need_tmp_reg) {
672 push(tmp_reg);
673 }
674 load_prototype_header(tmp_reg, obj_reg);
675 //if (os::is_MP()) {
676 // lock();
677 //}
678 cmpxchg(tmp_reg, Address(obj_reg, 0), swap_reg);
679 if (need_tmp_reg) {
680 pop(tmp_reg);
681 }
682 // Fall through to the normal CAS-based lock, because no matter what
683 // the result of the above CAS, some thread must have succeeded in
684 // removing the bias bit from the object's header.
685 if (PrintBiasedLockingStatistics) {
686 Label L;
687 bne(AT, R0, L);
688 delayed()->nop();
689 push(AT);
690 push(tmp_reg);
691 atomic_inc32((address)BiasedLocking::revoked_lock_entry_count_addr(), 1, AT, tmp_reg);
692 pop(tmp_reg);
693 pop(AT);
694 bind(L);
695 }
697 bind(cas_label);
698 return null_check_offset;
699 }
701 void MacroAssembler::biased_locking_exit(Register obj_reg, Register temp_reg, Label& done) {
702 assert(UseBiasedLocking, "why call this otherwise?");
704 // Check for biased locking unlock case, which is a no-op
705 // Note: we do not have to check the thread ID for two reasons.
706 // First, the interpreter checks for IllegalMonitorStateException at
707 // a higher level. Second, if the bias was revoked while we held the
708 // lock, the object could not be rebiased toward another thread, so
709 // the bias bit would be clear.
710 #ifdef _LP64
711 ld(temp_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
712 andi(temp_reg, temp_reg, markOopDesc::biased_lock_mask_in_place);
713 daddi(AT, R0, markOopDesc::biased_lock_pattern);
714 #else
715 lw(temp_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
716 andi(temp_reg, temp_reg, markOopDesc::biased_lock_mask_in_place);
717 addi(AT, R0, markOopDesc::biased_lock_pattern);
718 #endif
720 beq(AT, temp_reg, done);
721 delayed()->nop();
722 }
724 // NOTE: we dont increment the SP after call like the x86 version, maybe this is a problem, FIXME.
725 // the stack pointer adjustment is needed. see InterpreterMacroAssembler::super_call_VM_leaf
726 // this method will handle the stack problem, you need not to preserve the stack space for the argument now
727 void MacroAssembler::call_VM_leaf_base(address entry_point,
728 int number_of_arguments) {
729 //call(RuntimeAddress(entry_point));
730 //increment(rsp, number_of_arguments * wordSize);
731 Label L, E;
733 assert(number_of_arguments <= 4, "just check");
735 andi(AT, SP, 0xf);
736 beq(AT, R0, L);
737 delayed()->nop();
738 daddi(SP, SP, -8);
739 call(entry_point, relocInfo::runtime_call_type);
740 delayed()->nop();
741 daddi(SP, SP, 8);
742 b(E);
743 delayed()->nop();
745 bind(L);
746 call(entry_point, relocInfo::runtime_call_type);
747 delayed()->nop();
748 bind(E);
749 }
752 void MacroAssembler::jmp(address entry) {
753 patchable_set48(T9, (long)entry);
754 jr(T9);
755 }
757 void MacroAssembler::jmp(address entry, relocInfo::relocType rtype) {
758 switch (rtype) {
759 case relocInfo::runtime_call_type:
760 case relocInfo::none:
761 jmp(entry);
762 break;
763 default:
764 {
765 InstructionMark im(this);
766 relocate(rtype);
767 patchable_set48(T9, (long)entry);
768 jr(T9);
769 }
770 break;
771 }
772 }
774 void MacroAssembler::call(address entry) {
775 // c/c++ code assume T9 is entry point, so we just always move entry to t9
776 // maybe there is some more graceful method to handle this. FIXME
777 // For more info, see class NativeCall.
778 #ifndef _LP64
779 move(T9, (int)entry);
780 #else
781 patchable_set48(T9, (long)entry);
782 #endif
783 jalr(T9);
784 }
786 void MacroAssembler::call(address entry, relocInfo::relocType rtype) {
787 switch (rtype) {
788 case relocInfo::runtime_call_type:
789 case relocInfo::none:
790 call(entry);
791 break;
792 default:
793 {
794 InstructionMark im(this);
795 relocate(rtype);
796 call(entry);
797 }
798 break;
799 }
800 }
802 void MacroAssembler::call(address entry, RelocationHolder& rh)
803 {
804 switch (rh.type()) {
805 case relocInfo::runtime_call_type:
806 case relocInfo::none:
807 call(entry);
808 break;
809 default:
810 {
811 InstructionMark im(this);
812 relocate(rh);
813 call(entry);
814 }
815 break;
816 }
817 }
819 void MacroAssembler::ic_call(address entry) {
820 RelocationHolder rh = virtual_call_Relocation::spec(pc());
821 patchable_set48(IC_Klass, (long)Universe::non_oop_word());
822 assert(entry != NULL, "call most probably wrong");
823 InstructionMark im(this);
824 relocate(rh);
825 patchable_call(entry);
826 }
828 void MacroAssembler::c2bool(Register r) {
829 Label L;
830 Assembler::beq(r, R0, L);
831 delayed()->nop();
832 move(r, 1);
833 bind(L);
834 }
836 #ifndef PRODUCT
837 extern "C" void findpc(intptr_t x);
838 #endif
840 void MacroAssembler::debug32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip, char* msg) {
841 // In order to get locks to work, we need to fake a in_VM state
842 JavaThread* thread = JavaThread::current();
843 JavaThreadState saved_state = thread->thread_state();
844 thread->set_thread_state(_thread_in_vm);
845 if (ShowMessageBoxOnError) {
846 JavaThread* thread = JavaThread::current();
847 JavaThreadState saved_state = thread->thread_state();
848 thread->set_thread_state(_thread_in_vm);
849 if (CountBytecodes || TraceBytecodes || StopInterpreterAt) {
850 ttyLocker ttyl;
851 BytecodeCounter::print();
852 }
853 // To see where a verify_oop failed, get $ebx+40/X for this frame.
854 // This is the value of eip which points to where verify_oop will return.
855 if (os::message_box(msg, "Execution stopped, print registers?")) {
856 ttyLocker ttyl;
857 tty->print_cr("eip = 0x%08x", eip);
858 #ifndef PRODUCT
859 tty->cr();
860 findpc(eip);
861 tty->cr();
862 #endif
863 tty->print_cr("rax, = 0x%08x", rax);
864 tty->print_cr("rbx, = 0x%08x", rbx);
865 tty->print_cr("rcx = 0x%08x", rcx);
866 tty->print_cr("rdx = 0x%08x", rdx);
867 tty->print_cr("rdi = 0x%08x", rdi);
868 tty->print_cr("rsi = 0x%08x", rsi);
869 tty->print_cr("rbp, = 0x%08x", rbp);
870 tty->print_cr("rsp = 0x%08x", rsp);
871 BREAKPOINT;
872 }
873 } else {
874 ttyLocker ttyl;
875 ::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n", msg);
876 assert(false, "DEBUG MESSAGE");
877 }
878 ThreadStateTransition::transition(thread, _thread_in_vm, saved_state);
879 }
881 void MacroAssembler::debug(char* msg/*, RegistersForDebugging* regs*/) {
882 if ( ShowMessageBoxOnError ) {
883 JavaThreadState saved_state = JavaThread::current()->thread_state();
884 JavaThread::current()->set_thread_state(_thread_in_vm);
885 {
886 // In order to get locks work, we need to fake a in_VM state
887 ttyLocker ttyl;
888 ::tty->print_cr("EXECUTION STOPPED: %s\n", msg);
889 if (CountBytecodes || TraceBytecodes || StopInterpreterAt) {
890 BytecodeCounter::print();
891 }
893 // if (os::message_box(msg, "Execution stopped, print registers?"))
894 // regs->print(::tty);
895 }
896 ThreadStateTransition::transition(JavaThread::current(), _thread_in_vm, saved_state);
897 }
898 else
899 ::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n", msg);
900 }
903 void MacroAssembler::stop(const char* msg) {
904 li(A0, (long)msg);
905 #ifndef _LP64
906 //reserver space for argument. added by yjl 7/10/2005
907 addiu(SP, SP, - 1 * wordSize);
908 #endif
909 call(CAST_FROM_FN_PTR(address, MacroAssembler::debug), relocInfo::runtime_call_type);
910 delayed()->nop();
911 #ifndef _LP64
912 //restore space for argument
913 addiu(SP, SP, 1 * wordSize);
914 #endif
915 brk(17);
916 }
918 void MacroAssembler::warn(const char* msg) {
919 #ifdef _LP64
920 pushad();
921 li(A0, (long)msg);
922 push(S2);
923 move(AT, -(StackAlignmentInBytes));
924 move(S2, SP); // use S2 as a sender SP holder
925 andr(SP, SP, AT); // align stack as required by ABI
926 call(CAST_FROM_FN_PTR(address, MacroAssembler::debug), relocInfo::runtime_call_type);
927 delayed()->nop();
928 move(SP, S2); // use S2 as a sender SP holder
929 pop(S2);
930 popad();
931 #else
932 pushad();
933 addi(SP, SP, -4);
934 sw(A0, SP, -1 * wordSize);
935 li(A0, (long)msg);
936 addi(SP, SP, -1 * wordSize);
937 call(CAST_FROM_FN_PTR(address, MacroAssembler::debug), relocInfo::runtime_call_type);
938 delayed()->nop();
939 addi(SP, SP, 1 * wordSize);
940 lw(A0, SP, -1 * wordSize);
941 addi(SP, SP, 4);
942 popad();
943 #endif
944 }
946 void MacroAssembler::print_reg(Register reg) {
947 /*
948 char *s = getenv("PRINT_REG");
949 if (s == NULL)
950 return;
951 if (strcmp(s, "1") != 0)
952 return;
953 */
954 void * cur_pc = pc();
955 pushad();
956 NOT_LP64(push(FP);)
958 li(A0, (long)reg->name());
959 if (reg == SP)
960 addiu(A1, SP, wordSize * 23); //23 registers saved in pushad()
961 else if (reg == A0)
962 ld(A1, SP, wordSize * 19); //A0 has been modified by li(A0, (long)reg->name()). Ugly Code!
963 else
964 move(A1, reg);
965 li(A2, (long)cur_pc);
966 push(S2);
967 move(AT, -(StackAlignmentInBytes));
968 move(S2, SP); // use S2 as a sender SP holder
969 andr(SP, SP, AT); // align stack as required by ABI
970 call(CAST_FROM_FN_PTR(address, SharedRuntime::print_reg_with_pc),relocInfo::runtime_call_type);
971 delayed()->nop();
972 move(SP, S2); // use S2 as a sender SP holder
973 pop(S2);
974 NOT_LP64(pop(FP);)
975 popad();
977 /*
978 pushad();
979 #ifdef _LP64
980 if (reg == SP)
981 addiu(A0, SP, wordSize * 23); //23 registers saved in pushad()
982 else
983 move(A0, reg);
984 call(CAST_FROM_FN_PTR(address, SharedRuntime::print_long),relocInfo::runtime_call_type);
985 delayed()->nop();
986 #else
987 push(FP);
988 move(A0, reg);
989 dsrl32(A1, reg, 0);
990 //call(CAST_FROM_FN_PTR(address, SharedRuntime::print_int),relocInfo::runtime_call_type);
991 call(CAST_FROM_FN_PTR(address, SharedRuntime::print_long),relocInfo::runtime_call_type);
992 delayed()->nop();
993 pop(FP);
994 #endif
995 popad();
996 pushad();
997 NOT_LP64(push(FP);)
998 char b[50];
999 sprintf((char *)b, " pc: %p\n",cur_pc);
1000 li(A0, (long)(char *)b);
1001 call(CAST_FROM_FN_PTR(address, SharedRuntime::print_str),relocInfo::runtime_call_type);
1002 delayed()->nop();
1003 NOT_LP64(pop(FP);)
1004 popad();
1005 */
1006 }
1008 void MacroAssembler::print_reg(FloatRegister reg) {
1009 void * cur_pc = pc();
1010 pushad();
1011 NOT_LP64(push(FP);)
1012 li(A0, (long)reg->name());
1013 push(S2);
1014 move(AT, -(StackAlignmentInBytes));
1015 move(S2, SP); // use S2 as a sender SP holder
1016 andr(SP, SP, AT); // align stack as required by ABI
1017 call(CAST_FROM_FN_PTR(address, SharedRuntime::print_str),relocInfo::runtime_call_type);
1018 delayed()->nop();
1019 move(SP, S2); // use S2 as a sender SP holder
1020 pop(S2);
1021 NOT_LP64(pop(FP);)
1022 popad();
1024 pushad();
1025 NOT_LP64(push(FP);)
1026 #if 1
1027 move(FP, SP);
1028 move(AT, -(StackAlignmentInBytes));
1029 andr(SP , SP , AT);
1030 mov_d(F12, reg);
1031 call(CAST_FROM_FN_PTR(address, SharedRuntime::print_double),relocInfo::runtime_call_type);
1032 delayed()->nop();
1033 move(SP, FP);
1034 #else
1035 mov_s(F12, reg);
1036 //call(CAST_FROM_FN_PTR(address, SharedRuntime::print_float),relocInfo::runtime_call_type);
1037 //delayed()->nop();
1038 #endif
1039 NOT_LP64(pop(FP);)
1040 popad();
1042 #if 0
1043 pushad();
1044 NOT_LP64(push(FP);)
1045 char* b = new char[50];
1046 sprintf(b, " pc: %p\n", cur_pc);
1047 li(A0, (long)b);
1048 call(CAST_FROM_FN_PTR(address, SharedRuntime::print_str),relocInfo::runtime_call_type);
1049 delayed()->nop();
1050 NOT_LP64(pop(FP);)
1051 popad();
1052 #endif
1053 }
1055 void MacroAssembler::increment(Register reg, int imm) {
1056 if (!imm) return;
1057 if (is_simm16(imm)) {
1058 #ifdef _LP64
1059 daddiu(reg, reg, imm);
1060 #else
1061 addiu(reg, reg, imm);
1062 #endif
1063 } else {
1064 move(AT, imm);
1065 #ifdef _LP64
1066 daddu(reg, reg, AT);
1067 #else
1068 addu(reg, reg, AT);
1069 #endif
1070 }
1071 }
1073 void MacroAssembler::decrement(Register reg, int imm) {
1074 increment(reg, -imm);
1075 }
1078 void MacroAssembler::call_VM(Register oop_result,
1079 address entry_point,
1080 bool check_exceptions) {
1081 call_VM_helper(oop_result, entry_point, 0, check_exceptions);
1082 }
1084 void MacroAssembler::call_VM(Register oop_result,
1085 address entry_point,
1086 Register arg_1,
1087 bool check_exceptions) {
1088 if (arg_1!=A1) move(A1, arg_1);
1089 call_VM_helper(oop_result, entry_point, 1, check_exceptions);
1090 }
1092 void MacroAssembler::call_VM(Register oop_result,
1093 address entry_point,
1094 Register arg_1,
1095 Register arg_2,
1096 bool check_exceptions) {
1097 if (arg_1!=A1) move(A1, arg_1);
1098 if (arg_2!=A2) move(A2, arg_2);
1099 assert(arg_2 != A1, "smashed argument");
1100 call_VM_helper(oop_result, entry_point, 2, check_exceptions);
1101 }
1103 void MacroAssembler::call_VM(Register oop_result,
1104 address entry_point,
1105 Register arg_1,
1106 Register arg_2,
1107 Register arg_3,
1108 bool check_exceptions) {
1109 if (arg_1!=A1) move(A1, arg_1);
1110 if (arg_2!=A2) move(A2, arg_2); assert(arg_2 != A1, "smashed argument");
1111 if (arg_3!=A3) move(A3, arg_3); assert(arg_3 != A1 && arg_3 != A2, "smashed argument");
1112 call_VM_helper(oop_result, entry_point, 3, check_exceptions);
1113 }
1115 void MacroAssembler::call_VM(Register oop_result,
1116 Register last_java_sp,
1117 address entry_point,
1118 int number_of_arguments,
1119 bool check_exceptions) {
1120 call_VM_base(oop_result, NOREG, last_java_sp, entry_point, number_of_arguments, check_exceptions);
1121 }
1123 void MacroAssembler::call_VM(Register oop_result,
1124 Register last_java_sp,
1125 address entry_point,
1126 Register arg_1,
1127 bool check_exceptions) {
1128 if (arg_1 != A1) move(A1, arg_1);
1129 call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions);
1130 }
1132 void MacroAssembler::call_VM(Register oop_result,
1133 Register last_java_sp,
1134 address entry_point,
1135 Register arg_1,
1136 Register arg_2,
1137 bool check_exceptions) {
1138 if (arg_1 != A1) move(A1, arg_1);
1139 if (arg_2 != A2) move(A2, arg_2); assert(arg_2 != A1, "smashed argument");
1140 call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions);
1141 }
1143 void MacroAssembler::call_VM(Register oop_result,
1144 Register last_java_sp,
1145 address entry_point,
1146 Register arg_1,
1147 Register arg_2,
1148 Register arg_3,
1149 bool check_exceptions) {
1150 if (arg_1 != A1) move(A1, arg_1);
1151 if (arg_2 != A2) move(A2, arg_2); assert(arg_2 != A1, "smashed argument");
1152 if (arg_3 != A3) move(A3, arg_3); assert(arg_3 != A1 && arg_3 != A2, "smashed argument");
1153 call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions);
1154 }
1156 void MacroAssembler::call_VM_base(Register oop_result,
1157 Register java_thread,
1158 Register last_java_sp,
1159 address entry_point,
1160 int number_of_arguments,
1161 bool check_exceptions) {
1163 address before_call_pc;
1164 // determine java_thread register
1165 if (!java_thread->is_valid()) {
1166 #ifndef OPT_THREAD
1167 java_thread = T2;
1168 get_thread(java_thread);
1169 #else
1170 java_thread = TREG;
1171 #endif
1172 }
1173 // determine last_java_sp register
1174 if (!last_java_sp->is_valid()) {
1175 last_java_sp = SP;
1176 }
1177 // debugging support
1178 assert(number_of_arguments >= 0 , "cannot have negative number of arguments");
1179 assert(number_of_arguments <= 4 , "cannot have negative number of arguments");
1180 assert(java_thread != oop_result , "cannot use the same register for java_thread & oop_result");
1181 assert(java_thread != last_java_sp, "cannot use the same register for java_thread & last_java_sp");
1183 assert(last_java_sp != FP, "this code doesn't work for last_java_sp == fp, which currently can't portably work anyway since C2 doesn't save ebp");
1185 // set last Java frame before call
1186 before_call_pc = (address)pc();
1187 set_last_Java_frame(java_thread, last_java_sp, FP, before_call_pc);
1189 // do the call
1190 move(A0, java_thread);
1191 call(entry_point, relocInfo::runtime_call_type);
1192 delayed()->nop();
1194 // restore the thread (cannot use the pushed argument since arguments
1195 // may be overwritten by C code generated by an optimizing compiler);
1196 // however can use the register value directly if it is callee saved.
1197 #ifndef OPT_THREAD
1198 get_thread(java_thread);
1199 #else
1200 #ifdef ASSERT
1201 {
1202 Label L;
1203 get_thread(AT);
1204 beq(java_thread, AT, L);
1205 delayed()->nop();
1206 stop("MacroAssembler::call_VM_base: edi not callee saved?");
1207 bind(L);
1208 }
1209 #endif
1210 #endif
1212 // discard thread and arguments
1213 ld_ptr(SP, java_thread, in_bytes(JavaThread::last_Java_sp_offset()));
1214 // reset last Java frame
1215 reset_last_Java_frame(java_thread, false, true);
1217 check_and_handle_popframe(java_thread);
1218 check_and_handle_earlyret(java_thread);
1219 if (check_exceptions) {
1220 // check for pending exceptions (java_thread is set upon return)
1221 Label L;
1222 #ifdef _LP64
1223 ld(AT, java_thread, in_bytes(Thread::pending_exception_offset()));
1224 #else
1225 lw(AT, java_thread, in_bytes(Thread::pending_exception_offset()));
1226 #endif
1227 beq(AT, R0, L);
1228 delayed()->nop();
1229 li(AT, before_call_pc);
1230 push(AT);
1231 jmp(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type);
1232 delayed()->nop();
1233 bind(L);
1234 }
1236 // get oop result if there is one and reset the value in the thread
1237 if (oop_result->is_valid()) {
1238 #ifdef _LP64
1239 ld(oop_result, java_thread, in_bytes(JavaThread::vm_result_offset()));
1240 sd(R0, java_thread, in_bytes(JavaThread::vm_result_offset()));
1241 #else
1242 lw(oop_result, java_thread, in_bytes(JavaThread::vm_result_offset()));
1243 sw(R0, java_thread, in_bytes(JavaThread::vm_result_offset()));
1244 #endif
1245 verify_oop(oop_result);
1246 }
1247 }
1249 void MacroAssembler::call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) {
1251 move(V0, SP);
1252 //we also reserve space for java_thread here
1253 #ifndef _LP64
1254 daddi(SP, SP, (1 + number_of_arguments) * (- wordSize));
1255 #endif
1256 move(AT, -(StackAlignmentInBytes));
1257 andr(SP, SP, AT);
1258 call_VM_base(oop_result, NOREG, V0, entry_point, number_of_arguments, check_exceptions);
1260 }
1262 void MacroAssembler::call_VM_leaf(address entry_point, int number_of_arguments) {
1263 call_VM_leaf_base(entry_point, number_of_arguments);
1264 }
1266 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0) {
1267 if (arg_0 != A0) move(A0, arg_0);
1268 call_VM_leaf(entry_point, 1);
1269 }
1271 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1) {
1272 if (arg_0 != A0) move(A0, arg_0);
1273 if (arg_1 != A1) move(A1, arg_1); assert(arg_1 != A0, "smashed argument");
1274 call_VM_leaf(entry_point, 2);
1275 }
1277 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) {
1278 if (arg_0 != A0) move(A0, arg_0);
1279 if (arg_1 != A1) move(A1, arg_1); assert(arg_1 != A0, "smashed argument");
1280 if (arg_2 != A2) move(A2, arg_2); assert(arg_2 != A0 && arg_2 != A1, "smashed argument");
1281 call_VM_leaf(entry_point, 3);
1282 }
1283 void MacroAssembler::super_call_VM_leaf(address entry_point) {
1284 MacroAssembler::call_VM_leaf_base(entry_point, 0);
1285 }
1288 void MacroAssembler::super_call_VM_leaf(address entry_point,
1289 Register arg_1) {
1290 if (arg_1 != A0) move(A0, arg_1);
1291 MacroAssembler::call_VM_leaf_base(entry_point, 1);
1292 }
1295 void MacroAssembler::super_call_VM_leaf(address entry_point,
1296 Register arg_1,
1297 Register arg_2) {
1298 if (arg_1 != A0) move(A0, arg_1);
1299 if (arg_2 != A1) move(A1, arg_2); assert(arg_2 != A0, "smashed argument");
1300 MacroAssembler::call_VM_leaf_base(entry_point, 2);
1301 }
1302 void MacroAssembler::super_call_VM_leaf(address entry_point,
1303 Register arg_1,
1304 Register arg_2,
1305 Register arg_3) {
1306 if (arg_1 != A0) move(A0, arg_1);
1307 if (arg_2 != A1) move(A1, arg_2); assert(arg_2 != A0, "smashed argument");
1308 if (arg_3 != A2) move(A2, arg_3); assert(arg_3 != A0 && arg_3 != A1, "smashed argument");
1309 MacroAssembler::call_VM_leaf_base(entry_point, 3);
1310 }
1312 void MacroAssembler::check_and_handle_earlyret(Register java_thread) {
1313 }
1315 void MacroAssembler::check_and_handle_popframe(Register java_thread) {
1316 }
1318 void MacroAssembler::null_check(Register reg, int offset) {
1319 if (needs_explicit_null_check(offset)) {
1320 // provoke OS NULL exception if reg = NULL by
1321 // accessing M[reg] w/o changing any (non-CC) registers
1322 // NOTE: cmpl is plenty here to provoke a segv
1323 lw(AT, reg, 0);
1324 // Note: should probably use testl(rax, Address(reg, 0));
1325 // may be shorter code (however, this version of
1326 // testl needs to be implemented first)
1327 } else {
1328 // nothing to do, (later) access of M[reg + offset]
1329 // will provoke OS NULL exception if reg = NULL
1330 }
1331 }
1333 void MacroAssembler::enter() {
1334 push2(RA, FP);
1335 move(FP, SP);
1336 }
1338 void MacroAssembler::leave() {
1339 #ifndef _LP64
1340 //move(SP, FP);
1341 //pop2(FP, RA);
1342 addi(SP, FP, 2 * wordSize);
1343 lw(RA, SP, - 1 * wordSize);
1344 lw(FP, SP, - 2 * wordSize);
1345 #else
1346 daddi(SP, FP, 2 * wordSize);
1347 ld(RA, SP, - 1 * wordSize);
1348 ld(FP, SP, - 2 * wordSize);
1349 #endif
1350 }
1351 /*
1352 void MacroAssembler::os_breakpoint() {
1353 // instead of directly emitting a breakpoint, call os:breakpoint for better debugability
1354 // (e.g., MSVC can't call ps() otherwise)
1355 call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint)));
1356 }
1357 */
1358 void MacroAssembler::reset_last_Java_frame(Register java_thread, bool clear_fp, bool clear_pc) {
1359 // determine java_thread register
1360 if (!java_thread->is_valid()) {
1361 #ifndef OPT_THREAD
1362 java_thread = T1;
1363 get_thread(java_thread);
1364 #else
1365 java_thread = TREG;
1366 #endif
1367 }
1368 // we must set sp to zero to clear frame
1369 st_ptr(R0, java_thread, in_bytes(JavaThread::last_Java_sp_offset()));
1370 // must clear fp, so that compiled frames are not confused; it is possible
1371 // that we need it only for debugging
1372 if(clear_fp)
1373 st_ptr(R0, java_thread, in_bytes(JavaThread::last_Java_fp_offset()));
1375 if (clear_pc)
1376 st_ptr(R0, java_thread, in_bytes(JavaThread::last_Java_pc_offset()));
1377 }
1379 void MacroAssembler::reset_last_Java_frame(bool clear_fp,
1380 bool clear_pc) {
1381 Register thread = TREG;
1382 #ifndef OPT_THREAD
1383 get_thread(thread);
1384 #endif
1385 // we must set sp to zero to clear frame
1386 sd(R0, Address(thread, JavaThread::last_Java_sp_offset()));
1387 // must clear fp, so that compiled frames are not confused; it is
1388 // possible that we need it only for debugging
1389 if (clear_fp) {
1390 sd(R0, Address(thread, JavaThread::last_Java_fp_offset()));
1391 }
1393 if (clear_pc) {
1394 sd(R0, Address(thread, JavaThread::last_Java_pc_offset()));
1395 }
1396 }
1398 // Write serialization page so VM thread can do a pseudo remote membar.
1399 // We use the current thread pointer to calculate a thread specific
1400 // offset to write to within the page. This minimizes bus traffic
1401 // due to cache line collision.
1402 void MacroAssembler::serialize_memory(Register thread, Register tmp) {
1403 move(tmp, thread);
1404 srl(tmp, tmp,os::get_serialize_page_shift_count());
1405 move(AT, (os::vm_page_size() - sizeof(int)));
1406 andr(tmp, tmp,AT);
1407 sw(tmp,Address(tmp, (intptr_t)os::get_memory_serialize_page()));
1408 }
1410 // Calls to C land
1411 //
1412 // When entering C land, the rbp, & rsp of the last Java frame have to be recorded
1413 // in the (thread-local) JavaThread object. When leaving C land, the last Java fp
1414 // has to be reset to 0. This is required to allow proper stack traversal.
1415 void MacroAssembler::set_last_Java_frame(Register java_thread,
1416 Register last_java_sp,
1417 Register last_java_fp,
1418 address last_java_pc) {
1419 // determine java_thread register
1420 if (!java_thread->is_valid()) {
1421 #ifndef OPT_THREAD
1422 java_thread = T2;
1423 get_thread(java_thread);
1424 #else
1425 java_thread = TREG;
1426 #endif
1427 }
1428 // determine last_java_sp register
1429 if (!last_java_sp->is_valid()) {
1430 last_java_sp = SP;
1431 }
1433 // last_java_fp is optional
1435 if (last_java_fp->is_valid()) {
1436 st_ptr(last_java_fp, java_thread, in_bytes(JavaThread::last_Java_fp_offset()));
1437 }
1439 // last_java_pc is optional
1441 if (last_java_pc != NULL) {
1442 relocate(relocInfo::internal_pc_type);
1443 patchable_set48(AT, (long)last_java_pc);
1444 st_ptr(AT, java_thread, in_bytes(JavaThread::last_Java_pc_offset()));
1445 }
1446 st_ptr(last_java_sp, java_thread, in_bytes(JavaThread::last_Java_sp_offset()));
1447 }
1449 void MacroAssembler::set_last_Java_frame(Register last_java_sp,
1450 Register last_java_fp,
1451 address last_java_pc) {
1452 // determine last_java_sp register
1453 if (!last_java_sp->is_valid()) {
1454 last_java_sp = SP;
1455 }
1457 Register thread = TREG;
1458 #ifndef OPT_THREAD
1459 get_thread(thread);
1460 #endif
1461 // last_java_fp is optional
1462 if (last_java_fp->is_valid()) {
1463 sd(last_java_fp, Address(thread, JavaThread::last_Java_fp_offset()));
1464 }
1466 // last_java_pc is optional
1467 if (last_java_pc != NULL) {
1468 Address java_pc(thread,
1469 JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset());
1470 li(AT, (intptr_t)(last_java_pc));
1471 sd(AT, java_pc);
1472 }
1474 sd(last_java_sp, Address(thread, JavaThread::last_Java_sp_offset()));
1475 }
1477 //////////////////////////////////////////////////////////////////////////////////
1478 #if INCLUDE_ALL_GCS
1480 void MacroAssembler::g1_write_barrier_pre(Register obj,
1481 #ifndef _LP64
1482 Register thread,
1483 #endif
1484 Register tmp,
1485 Register tmp2,
1486 bool tosca_live) {
1487 Unimplemented();
1488 }
1490 void MacroAssembler::g1_write_barrier_post(Register store_addr,
1491 Register new_val,
1492 #ifndef _LP64
1493 Register thread,
1494 #endif
1495 Register tmp,
1496 Register tmp2) {
1498 Unimplemented();
1499 }
1501 #endif // INCLUDE_ALL_GCS
1502 //////////////////////////////////////////////////////////////////////////////////
1505 void MacroAssembler::store_check(Register obj) {
1506 // Does a store check for the oop in register obj. The content of
1507 // register obj is destroyed afterwards.
1508 store_check_part_1(obj);
1509 store_check_part_2(obj);
1510 }
1512 void MacroAssembler::store_check(Register obj, Address dst) {
1513 store_check(obj);
1514 }
1517 // split the store check operation so that other instructions can be scheduled inbetween
1518 void MacroAssembler::store_check_part_1(Register obj) {
1519 BarrierSet* bs = Universe::heap()->barrier_set();
1520 assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind");
1521 #ifdef _LP64
1522 dsrl(obj, obj, CardTableModRefBS::card_shift);
1523 #else
1524 shr(obj, CardTableModRefBS::card_shift);
1525 #endif
1526 }
1528 void MacroAssembler::store_check_part_2(Register obj) {
1529 BarrierSet* bs = Universe::heap()->barrier_set();
1530 assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind");
1531 CardTableModRefBS* ct = (CardTableModRefBS*)bs;
1532 assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
1534 li(AT, (long)ct->byte_map_base);
1535 #ifdef _LP64
1536 dadd(AT, AT, obj);
1537 #else
1538 add(AT, AT, obj);
1539 #endif
1540 sb(R0, AT, 0);
1541 sync();
1542 }
1544 // Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes.
1545 void MacroAssembler::tlab_allocate(Register obj, Register var_size_in_bytes, int con_size_in_bytes,
1546 Register t1, Register t2, Label& slow_case) {
1547 assert_different_registers(obj, var_size_in_bytes, t1, t2, AT);
1549 Register end = t2;
1550 #ifndef OPT_THREAD
1551 Register thread = t1;
1552 get_thread(thread);
1553 #else
1554 Register thread = TREG;
1555 #endif
1556 verify_tlab(t1, t2);//blows t1&t2
1558 ld_ptr(obj, thread, in_bytes(JavaThread::tlab_top_offset()));
1560 if (var_size_in_bytes == NOREG) {
1561 // i dont think we need move con_size_in_bytes to a register first.
1562 // by yjl 8/17/2005
1563 assert(is_simm16(con_size_in_bytes), "fixme by moving imm to a register first");
1564 addi(end, obj, con_size_in_bytes);
1565 } else {
1566 add(end, obj, var_size_in_bytes);
1567 }
1569 ld_ptr(AT, thread, in_bytes(JavaThread::tlab_end_offset()));
1570 sltu(AT, AT, end);
1571 bne_far(AT, R0, slow_case);
1572 delayed()->nop();
1575 // update the tlab top pointer
1576 st_ptr(end, thread, in_bytes(JavaThread::tlab_top_offset()));
1578 // recover var_size_in_bytes if necessary
1579 /*if (var_size_in_bytes == end) {
1580 sub(var_size_in_bytes, end, obj);
1581 }*/
1583 verify_tlab(t1, t2);
1584 }
1586 // Defines obj, preserves var_size_in_bytes
1587 void MacroAssembler::eden_allocate(Register obj, Register var_size_in_bytes, int con_size_in_bytes,
1588 Register t1, Register t2, Label& slow_case) {
1589 assert_different_registers(obj, var_size_in_bytes, t1, AT);
1590 if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) { //by yyq
1591 // No allocation in the shared eden.
1592 b_far(slow_case);
1593 delayed()->nop();
1594 } else {
1596 #ifndef _LP64
1597 Address heap_top(t1, Assembler::split_low((intptr_t)Universe::heap()->top_addr()));
1598 lui(t1, split_high((intptr_t)Universe::heap()->top_addr()));
1599 #else
1600 Address heap_top(t1);
1601 li(t1, (long)Universe::heap()->top_addr());
1602 #endif
1603 ld_ptr(obj, heap_top);
1605 Register end = t2;
1606 Label retry;
1608 bind(retry);
1609 if (var_size_in_bytes == NOREG) {
1610 // i dont think we need move con_size_in_bytes to a register first.
1611 assert(is_simm16(con_size_in_bytes), "fixme by moving imm to a register first");
1612 addi(end, obj, con_size_in_bytes);
1613 } else {
1614 add(end, obj, var_size_in_bytes);
1615 }
1616 // if end < obj then we wrapped around => object too long => slow case
1617 sltu(AT, end, obj);
1618 bne_far(AT, R0, slow_case);
1619 delayed()->nop();
1621 li(AT, (long)Universe::heap()->end_addr());
1622 sltu(AT, AT, end);
1623 bne_far(AT, R0, slow_case);
1624 delayed()->nop();
1625 // Compare obj with the top addr, and if still equal, store the new top addr in
1626 // end at the address of the top addr pointer. Sets ZF if was equal, and clears
1627 // it otherwise. Use lock prefix for atomicity on MPs.
1628 //if (os::is_MP()) {
1629 // sync();
1630 //}
1632 // if someone beat us on the allocation, try again, otherwise continue
1633 cmpxchg(end, heap_top, obj);
1634 beq_far(AT, R0, retry); //by yyq
1635 delayed()->nop();
1637 }
1638 }
1640 // C2 doesn't invoke this one.
1641 void MacroAssembler::tlab_refill(Label& retry, Label& try_eden, Label& slow_case) {
1642 Register top = T0;
1643 Register t1 = T1;
1644 /* Jin: tlab_refill() is called in
1646 [c1_Runtime1_mips.cpp] Runtime1::generate_code_for(new_type_array_id);
1648 In generate_code_for(), T2 has been assigned as a register(length), which is used
1649 after calling tlab_refill();
1650 Therefore, tlab_refill() should not use T2.
1652 Source:
1654 Exception in thread "main" java.lang.ArrayIndexOutOfBoundsException
1655 at java.lang.System.arraycopy(Native Method)
1656 at java.util.Arrays.copyOf(Arrays.java:2799) <-- alloc_array
1657 at sun.misc.Resource.getBytes(Resource.java:117)
1658 at java.net.URLClassLoader.defineClass(URLClassLoader.java:273)
1659 at java.net.URLClassLoader.findClass(URLClassLoader.java:205)
1660 at java.lang.ClassLoader.loadClass(ClassLoader.java:321)
1661 */
1662 Register t2 = T9;
1663 Register t3 = T3;
1664 Register thread_reg = T8;
1665 Label do_refill, discard_tlab;
1666 if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) { //by yyq
1667 // No allocation in the shared eden.
1668 b(slow_case);
1669 delayed()->nop();
1670 }
1672 get_thread(thread_reg);
1674 ld_ptr(top, thread_reg, in_bytes(JavaThread::tlab_top_offset()));
1675 ld_ptr(t1, thread_reg, in_bytes(JavaThread::tlab_end_offset()));
1677 // calculate amount of free space
1678 sub(t1, t1, top);
1679 shr(t1, LogHeapWordSize);
1681 // Retain tlab and allocate object in shared space if
1682 // the amount free in the tlab is too large to discard.
1683 ld_ptr(t2, thread_reg, in_bytes(JavaThread::tlab_refill_waste_limit_offset()));
1684 slt(AT, t2, t1);
1685 beq(AT, R0, discard_tlab);
1686 delayed()->nop();
1688 // Retain
1690 #ifndef _LP64
1691 move(AT, ThreadLocalAllocBuffer::refill_waste_limit_increment());
1692 #else
1693 li(AT, ThreadLocalAllocBuffer::refill_waste_limit_increment());
1694 #endif
1695 add(t2, t2, AT);
1696 st_ptr(t2, thread_reg, in_bytes(JavaThread::tlab_refill_waste_limit_offset()));
1698 if (TLABStats) {
1699 // increment number of slow_allocations
1700 lw(AT, thread_reg, in_bytes(JavaThread::tlab_slow_allocations_offset()));
1701 addiu(AT, AT, 1);
1702 sw(AT, thread_reg, in_bytes(JavaThread::tlab_slow_allocations_offset()));
1703 }
1704 b(try_eden);
1705 delayed()->nop();
1707 bind(discard_tlab);
1708 if (TLABStats) {
1709 // increment number of refills
1710 lw(AT, thread_reg, in_bytes(JavaThread::tlab_number_of_refills_offset()));
1711 addi(AT, AT, 1);
1712 sw(AT, thread_reg, in_bytes(JavaThread::tlab_number_of_refills_offset()));
1713 // accumulate wastage -- t1 is amount free in tlab
1714 lw(AT, thread_reg, in_bytes(JavaThread::tlab_fast_refill_waste_offset()));
1715 add(AT, AT, t1);
1716 sw(AT, thread_reg, in_bytes(JavaThread::tlab_fast_refill_waste_offset()));
1717 }
1719 // if tlab is currently allocated (top or end != null) then
1720 // fill [top, end + alignment_reserve) with array object
1721 beq(top, R0, do_refill);
1722 delayed()->nop();
1724 // set up the mark word
1725 li(AT, (long)markOopDesc::prototype()->copy_set_hash(0x2));
1726 st_ptr(AT, top, oopDesc::mark_offset_in_bytes());
1728 // set the length to the remaining space
1729 addi(t1, t1, - typeArrayOopDesc::header_size(T_INT));
1730 addi(t1, t1, ThreadLocalAllocBuffer::alignment_reserve());
1731 shl(t1, log2_intptr(HeapWordSize/sizeof(jint)));
1732 sw(t1, top, arrayOopDesc::length_offset_in_bytes());
1734 // set klass to intArrayKlass
1735 #ifndef _LP64
1736 lui(AT, split_high((intptr_t)Universe::intArrayKlassObj_addr()));
1737 lw(t1, AT, split_low((intptr_t)Universe::intArrayKlassObj_addr()));
1738 #else
1739 li(AT, (intptr_t)Universe::intArrayKlassObj_addr());
1740 ld_ptr(t1, AT, 0);
1741 #endif
1742 //st_ptr(t1, top, oopDesc::klass_offset_in_bytes());
1743 store_klass(top, t1);
1745 // refill the tlab with an eden allocation
1746 bind(do_refill);
1747 ld_ptr(t1, thread_reg, in_bytes(JavaThread::tlab_size_offset()));
1748 shl(t1, LogHeapWordSize);
1749 // add object_size ??
1750 eden_allocate(top, t1, 0, t2, t3, slow_case);
1752 // Check that t1 was preserved in eden_allocate.
1753 #ifdef ASSERT
1754 if (UseTLAB) {
1755 Label ok;
1756 assert_different_registers(thread_reg, t1);
1757 ld_ptr(AT, thread_reg, in_bytes(JavaThread::tlab_size_offset()));
1758 shl(AT, LogHeapWordSize);
1759 beq(AT, t1, ok);
1760 delayed()->nop();
1761 stop("assert(t1 != tlab size)");
1762 should_not_reach_here();
1764 bind(ok);
1765 }
1766 #endif
1767 st_ptr(top, thread_reg, in_bytes(JavaThread::tlab_start_offset()));
1768 st_ptr(top, thread_reg, in_bytes(JavaThread::tlab_top_offset()));
1769 add(top, top, t1);
1770 addi(top, top, - ThreadLocalAllocBuffer::alignment_reserve_in_bytes());
1771 st_ptr(top, thread_reg, in_bytes(JavaThread::tlab_end_offset()));
1772 verify_tlab(t1, t2);
1773 b(retry);
1774 delayed()->nop();
1775 }
1777 static const double pi_4 = 0.7853981633974483;
1779 // the x86 version is to clumsy, i dont think we need that fuss. maybe i'm wrong, FIXME
1780 // must get argument(a double) in F12/F13
1781 //void MacroAssembler::trigfunc(char trig, bool preserve_cpu_regs, int num_fpu_regs_in_use) {
1782 //We need to preseve the register which maybe modified during the Call @Jerome
1783 void MacroAssembler::trigfunc(char trig, int num_fpu_regs_in_use) {
1784 //save all modified register here
1785 // if (preserve_cpu_regs) {
1786 // }
1787 //FIXME, in the disassembly of tirgfunc, only used V0,V1,T9, SP,RA,so we ony save V0,V1,T9
1788 pushad();
1789 //we should preserve the stack space before we call
1790 addi(SP, SP, -wordSize * 2);
1791 switch (trig){
1792 case 's' :
1793 call( CAST_FROM_FN_PTR(address, SharedRuntime::dsin), relocInfo::runtime_call_type );
1794 delayed()->nop();
1795 break;
1796 case 'c':
1797 call( CAST_FROM_FN_PTR(address, SharedRuntime::dcos), relocInfo::runtime_call_type );
1798 delayed()->nop();
1799 break;
1800 case 't':
1801 call( CAST_FROM_FN_PTR(address, SharedRuntime::dtan), relocInfo::runtime_call_type );
1802 delayed()->nop();
1803 break;
1804 default:assert (false, "bad intrinsic");
1805 break;
1807 }
1809 addi(SP, SP, wordSize * 2);
1810 popad();
1811 // if (preserve_cpu_regs) {
1812 // }
1813 }
1815 #ifdef _LP64
1816 void MacroAssembler::li(Register rd, long imm) {
1817 if (imm <= max_jint && imm >= min_jint) {
1818 li32(rd, (int)imm);
1819 } else if (julong(imm) <= 0xFFFFFFFF) {
1820 assert_not_delayed();
1821 // lui sign-extends, so we can't use that.
1822 ori(rd, R0, julong(imm) >> 16);
1823 dsll(rd, rd, 16);
1824 ori(rd, rd, split_low(imm));
1825 //aoqi_test
1826 //} else if ((imm > 0) && ((imm >> 48) == 0)) {
1827 } else if ((imm > 0) && is_simm16(imm >> 32)) {
1828 /* A 48-bit address */
1829 li48(rd, imm);
1830 } else {
1831 li64(rd, imm);
1832 }
1833 }
1834 #else
1835 void MacroAssembler::li(Register rd, long imm) {
1836 li32(rd, (int)imm);
1837 }
1838 #endif
1840 void MacroAssembler::li32(Register reg, int imm) {
1841 if (is_simm16(imm)) {
1842 /* Jin: for imm < 0, we should use addi instead of addiu.
1843 *
1844 * java.lang.StringCoding$StringDecoder.decode(jobject, jint, jint)
1845 *
1846 * 78 move [int:-1|I] [a0|I]
1847 * : daddi a0, zero, 0xffffffff (correct)
1848 * : daddiu a0, zero, 0xffffffff (incorrect)
1849 */
1850 if (imm >= 0)
1851 addiu(reg, R0, imm);
1852 else
1853 addi(reg, R0, imm);
1854 } else {
1855 lui(reg, split_low(imm >> 16));
1856 if (split_low(imm))
1857 ori(reg, reg, split_low(imm));
1858 }
1859 }
1861 #ifdef _LP64
1862 void MacroAssembler::set64(Register d, jlong value) {
1863 assert_not_delayed();
1865 int hi = (int)(value >> 32);
1866 int lo = (int)(value & ~0);
1868 if (value == lo) { // 32-bit integer
1869 if (is_simm16(value)) {
1870 daddiu(d, R0, value);
1871 } else {
1872 lui(d, split_low(value >> 16));
1873 if (split_low(value)) {
1874 ori(d, d, split_low(value));
1875 }
1876 }
1877 } else if (hi == 0) { // hardware zero-extends to upper 32
1878 ori(d, R0, julong(value) >> 16);
1879 dsll(d, d, 16);
1880 if (split_low(value)) {
1881 ori(d, d, split_low(value));
1882 }
1883 } else if ((value> 0) && is_simm16(value >> 32)) { // li48
1884 // 4 insts
1885 li48(d, value);
1886 } else { // li64
1887 // 6 insts
1888 li64(d, value);
1889 }
1890 }
1893 int MacroAssembler::insts_for_set64(jlong value) {
1894 int hi = (int)(value >> 32);
1895 int lo = (int)(value & ~0);
1897 int count = 0;
1899 if (value == lo) { // 32-bit integer
1900 if (is_simm16(value)) {
1901 //daddiu(d, R0, value);
1902 count++;
1903 } else {
1904 //lui(d, split_low(value >> 16));
1905 count++;
1906 if (split_low(value)) {
1907 //ori(d, d, split_low(value));
1908 count++;
1909 }
1910 }
1911 } else if (hi == 0) { // hardware zero-extends to upper 32
1912 //ori(d, R0, julong(value) >> 16);
1913 //dsll(d, d, 16);
1914 count += 2;
1915 if (split_low(value)) {
1916 //ori(d, d, split_low(value));
1917 count++;
1918 }
1919 } else if ((value> 0) && is_simm16(value >> 32)) { // li48
1920 // 4 insts
1921 //li48(d, value);
1922 count += 4;
1923 } else { // li64
1924 // 6 insts
1925 //li64(d, value);
1926 count += 6;
1927 }
1929 return count;
1930 }
1932 void MacroAssembler::patchable_set48(Register d, jlong value) {
1933 assert_not_delayed();
1935 int hi = (int)(value >> 32);
1936 int lo = (int)(value & ~0);
1938 int count = 0;
1940 if (value == lo) { // 32-bit integer
1941 if (is_simm16(value)) {
1942 daddiu(d, R0, value);
1943 count += 1;
1944 } else {
1945 lui(d, split_low(value >> 16));
1946 count += 1;
1947 if (split_low(value)) {
1948 ori(d, d, split_low(value));
1949 count += 1;
1950 }
1951 }
1952 } else if (hi == 0) { // hardware zero-extends to upper 32
1953 ori(d, R0, julong(value) >> 16);
1954 dsll(d, d, 16);
1955 count += 2;
1956 if (split_low(value)) {
1957 ori(d, d, split_low(value));
1958 count += 1;
1959 }
1960 } else if ((value> 0) && is_simm16(value >> 32)) { // li48
1961 // 4 insts
1962 li48(d, value);
1963 count += 4;
1964 } else { // li64
1965 tty->print_cr("value = 0x%x", value);
1966 guarantee(false, "Not supported yet !");
1967 }
1969 for (count; count < 4; count++) {
1970 nop();
1971 }
1972 }
1974 void MacroAssembler::patchable_set32(Register d, jlong value) {
1975 assert_not_delayed();
1977 int hi = (int)(value >> 32);
1978 int lo = (int)(value & ~0);
1980 int count = 0;
1982 if (value == lo) { // 32-bit integer
1983 if (is_simm16(value)) {
1984 daddiu(d, R0, value);
1985 count += 1;
1986 } else {
1987 lui(d, split_low(value >> 16));
1988 count += 1;
1989 if (split_low(value)) {
1990 ori(d, d, split_low(value));
1991 count += 1;
1992 }
1993 }
1994 } else if (hi == 0) { // hardware zero-extends to upper 32
1995 ori(d, R0, julong(value) >> 16);
1996 dsll(d, d, 16);
1997 count += 2;
1998 if (split_low(value)) {
1999 ori(d, d, split_low(value));
2000 count += 1;
2001 }
2002 } else {
2003 tty->print_cr("value = 0x%x", value);
2004 guarantee(false, "Not supported yet !");
2005 }
2007 for (count; count < 3; count++) {
2008 nop();
2009 }
2010 }
2012 void MacroAssembler::patchable_call32(Register d, jlong value) {
2013 assert_not_delayed();
2015 int hi = (int)(value >> 32);
2016 int lo = (int)(value & ~0);
2018 int count = 0;
2020 if (value == lo) { // 32-bit integer
2021 if (is_simm16(value)) {
2022 daddiu(d, R0, value);
2023 count += 1;
2024 } else {
2025 lui(d, split_low(value >> 16));
2026 count += 1;
2027 if (split_low(value)) {
2028 ori(d, d, split_low(value));
2029 count += 1;
2030 }
2031 }
2032 } else {
2033 tty->print_cr("value = 0x%x", value);
2034 guarantee(false, "Not supported yet !");
2035 }
2037 for (count; count < 2; count++) {
2038 nop();
2039 }
2040 }
2042 void MacroAssembler::set_narrow_klass(Register dst, Klass* k) {
2043 assert(UseCompressedClassPointers, "should only be used for compressed header");
2044 assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
2046 int klass_index = oop_recorder()->find_index(k);
2047 RelocationHolder rspec = metadata_Relocation::spec(klass_index);
2048 long narrowKlass = (long)Klass::encode_klass(k);
2050 relocate(rspec, Assembler::narrow_oop_operand);
2051 patchable_set48(dst, narrowKlass);
2052 }
2055 void MacroAssembler::set_narrow_oop(Register dst, jobject obj) {
2056 assert(UseCompressedOops, "should only be used for compressed header");
2057 assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
2059 int oop_index = oop_recorder()->find_index(obj);
2060 RelocationHolder rspec = oop_Relocation::spec(oop_index);
2062 relocate(rspec, Assembler::narrow_oop_operand);
2063 patchable_set48(dst, oop_index);
2064 }
2066 void MacroAssembler::li64(Register rd, long imm) {
2067 assert_not_delayed();
2068 lui(rd, imm >> 48);
2069 ori(rd, rd, split_low(imm >> 32));
2070 dsll(rd, rd, 16);
2071 ori(rd, rd, split_low(imm >> 16));
2072 dsll(rd, rd, 16);
2073 ori(rd, rd, split_low(imm));
2074 }
2076 void MacroAssembler::li48(Register rd, long imm) {
2077 assert_not_delayed();
2078 assert(is_simm16(imm >> 32), "Not a 48-bit address");
2079 lui(rd, imm >> 32);
2080 ori(rd, rd, split_low(imm >> 16));
2081 dsll(rd, rd, 16);
2082 ori(rd, rd, split_low(imm));
2083 }
2084 #endif
2085 // NOTE: i dont push eax as i486.
2086 // the x86 save eax for it use eax as the jump register
2087 void MacroAssembler::verify_oop(Register reg, const char* s) {
2088 /*
2089 if (!VerifyOops) return;
2091 // Pass register number to verify_oop_subroutine
2092 char* b = new char[strlen(s) + 50];
2093 sprintf(b, "verify_oop: %s: %s", reg->name(), s);
2094 push(rax); // save rax,
2095 push(reg); // pass register argument
2096 ExternalAddress buffer((address) b);
2097 // avoid using pushptr, as it modifies scratch registers
2098 // and our contract is not to modify anything
2099 movptr(rax, buffer.addr());
2100 push(rax);
2101 // call indirectly to solve generation ordering problem
2102 movptr(rax, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address()));
2103 call(rax);
2104 */
2105 if (!VerifyOops) return;
2106 const char * b = NULL;
2107 stringStream ss;
2108 ss.print("verify_oop: %s: %s", reg->name(), s);
2109 b = code_string(ss.as_string());
2110 #ifdef _LP64
2111 pushad();
2112 move(A1, reg);
2113 li(A0, (long)b);
2114 li(AT, (long)StubRoutines::verify_oop_subroutine_entry_address());
2115 ld(T9, AT, 0);
2116 jalr(T9);
2117 delayed()->nop();
2118 popad();
2119 #else
2120 // Pass register number to verify_oop_subroutine
2121 sw(T0, SP, - wordSize);
2122 sw(T1, SP, - 2*wordSize);
2123 sw(RA, SP, - 3*wordSize);
2124 sw(A0, SP ,- 4*wordSize);
2125 sw(A1, SP ,- 5*wordSize);
2126 sw(AT, SP ,- 6*wordSize);
2127 sw(T9, SP ,- 7*wordSize);
2128 addiu(SP, SP, - 7 * wordSize);
2129 move(A1, reg);
2130 li(A0, (long)b);
2131 // call indirectly to solve generation ordering problem
2132 li(AT, (long)StubRoutines::verify_oop_subroutine_entry_address());
2133 lw(T9, AT, 0);
2134 jalr(T9);
2135 delayed()->nop();
2136 lw(T0, SP, 6* wordSize);
2137 lw(T1, SP, 5* wordSize);
2138 lw(RA, SP, 4* wordSize);
2139 lw(A0, SP, 3* wordSize);
2140 lw(A1, SP, 2* wordSize);
2141 lw(AT, SP, 1* wordSize);
2142 lw(T9, SP, 0* wordSize);
2143 addiu(SP, SP, 7 * wordSize);
2144 #endif
2145 }
2148 void MacroAssembler::verify_oop_addr(Address addr, const char* s) {
2149 if (!VerifyOops) {
2150 nop();
2151 return;
2152 }
2153 // Pass register number to verify_oop_subroutine
2154 const char * b = NULL;
2155 stringStream ss;
2156 ss.print("verify_oop_addr: %s", s);
2157 b = code_string(ss.as_string());
2159 st_ptr(T0, SP, - wordSize);
2160 st_ptr(T1, SP, - 2*wordSize);
2161 st_ptr(RA, SP, - 3*wordSize);
2162 st_ptr(A0, SP, - 4*wordSize);
2163 st_ptr(A1, SP, - 5*wordSize);
2164 st_ptr(AT, SP, - 6*wordSize);
2165 st_ptr(T9, SP, - 7*wordSize);
2166 ld_ptr(A1, addr); // addr may use SP, so load from it before change SP
2167 addiu(SP, SP, - 7 * wordSize);
2169 li(A0, (long)b);
2170 // call indirectly to solve generation ordering problem
2171 li(AT, (long)StubRoutines::verify_oop_subroutine_entry_address());
2172 ld_ptr(T9, AT, 0);
2173 jalr(T9);
2174 delayed()->nop();
2175 ld_ptr(T0, SP, 6* wordSize);
2176 ld_ptr(T1, SP, 5* wordSize);
2177 ld_ptr(RA, SP, 4* wordSize);
2178 ld_ptr(A0, SP, 3* wordSize);
2179 ld_ptr(A1, SP, 2* wordSize);
2180 ld_ptr(AT, SP, 1* wordSize);
2181 ld_ptr(T9, SP, 0* wordSize);
2182 addiu(SP, SP, 7 * wordSize);
2183 }
2185 // used registers : T0, T1
2186 void MacroAssembler::verify_oop_subroutine() {
2187 // RA: ra
2188 // A0: char* error message
2189 // A1: oop object to verify
2191 Label exit, error;
2192 // increment counter
2193 li(T0, (long)StubRoutines::verify_oop_count_addr());
2194 lw(AT, T0, 0);
2195 #ifdef _LP64
2196 daddi(AT, AT, 1);
2197 #else
2198 addi(AT, AT, 1);
2199 #endif
2200 sw(AT, T0, 0);
2202 // make sure object is 'reasonable'
2203 beq(A1, R0, exit); // if obj is NULL it is ok
2204 delayed()->nop();
2206 // Check if the oop is in the right area of memory
2207 //const int oop_mask = Universe::verify_oop_mask();
2208 //const int oop_bits = Universe::verify_oop_bits();
2209 const uintptr_t oop_mask = Universe::verify_oop_mask();
2210 const uintptr_t oop_bits = Universe::verify_oop_bits();
2211 li(AT, oop_mask);
2212 andr(T0, A1, AT);
2213 li(AT, oop_bits);
2214 bne(T0, AT, error);
2215 delayed()->nop();
2217 // make sure klass is 'reasonable'
2218 //add for compressedoops
2219 reinit_heapbase();
2220 //add for compressedoops
2221 load_klass(T0, A1);
2222 beq(T0, R0, error); // if klass is NULL it is broken
2223 delayed()->nop();
2224 #if 0
2225 //FIXME:wuhui.
2226 // Check if the klass is in the right area of memory
2227 //const int klass_mask = Universe::verify_klass_mask();
2228 //const int klass_bits = Universe::verify_klass_bits();
2229 const uintptr_t klass_mask = Universe::verify_klass_mask();
2230 const uintptr_t klass_bits = Universe::verify_klass_bits();
2232 li(AT, klass_mask);
2233 andr(T1, T0, AT);
2234 li(AT, klass_bits);
2235 bne(T1, AT, error);
2236 delayed()->nop();
2237 // make sure klass' klass is 'reasonable'
2238 //add for compressedoops
2239 load_klass(T0, T0);
2240 beq(T0, R0, error); // if klass' klass is NULL it is broken
2241 delayed()->nop();
2243 li(AT, klass_mask);
2244 andr(T1, T0, AT);
2245 li(AT, klass_bits);
2246 bne(T1, AT, error);
2247 delayed()->nop(); // if klass not in right area of memory it is broken too.
2248 #endif
2249 // return if everything seems ok
2250 bind(exit);
2252 jr(RA);
2253 delayed()->nop();
2255 // handle errors
2256 bind(error);
2257 pushad();
2258 #ifndef _LP64
2259 addi(SP, SP, (-1) * wordSize);
2260 #endif
2261 call(CAST_FROM_FN_PTR(address, MacroAssembler::debug), relocInfo::runtime_call_type);
2262 delayed()->nop();
2263 #ifndef _LP64
2264 addiu(SP, SP, 1 * wordSize);
2265 #endif
2266 popad();
2267 jr(RA);
2268 delayed()->nop();
2269 }
2271 void MacroAssembler::verify_tlab(Register t1, Register t2) {
2272 #ifdef ASSERT
2273 assert_different_registers(t1, t2, AT);
2274 if (UseTLAB && VerifyOops) {
2275 Label next, ok;
2277 get_thread(t1);
2279 ld_ptr(t2, t1, in_bytes(JavaThread::tlab_top_offset()));
2280 ld_ptr(AT, t1, in_bytes(JavaThread::tlab_start_offset()));
2281 sltu(AT, t2, AT);
2282 beq(AT, R0, next);
2283 delayed()->nop();
2285 stop("assert(top >= start)");
2287 bind(next);
2288 ld_ptr(AT, t1, in_bytes(JavaThread::tlab_end_offset()));
2289 sltu(AT, AT, t2);
2290 beq(AT, R0, ok);
2291 delayed()->nop();
2293 stop("assert(top <= end)");
2295 bind(ok);
2297 }
2298 #endif
2299 }
2300 RegisterOrConstant MacroAssembler::delayed_value_impl(intptr_t* delayed_value_addr,
2301 Register tmp,
2302 int offset) {
2303 intptr_t value = *delayed_value_addr;
2304 if (value != 0)
2305 return RegisterOrConstant(value + offset);
2306 AddressLiteral a(delayed_value_addr);
2307 // load indirectly to solve generation ordering problem
2308 //movptr(tmp, ExternalAddress((address) delayed_value_addr));
2309 //ld(tmp, a);
2310 if (offset != 0)
2311 daddi(tmp,tmp, offset);
2313 return RegisterOrConstant(tmp);
2314 }
2316 void MacroAssembler::hswap(Register reg) {
2317 //short
2318 //andi(reg, reg, 0xffff);
2319 srl(AT, reg, 8);
2320 sll(reg, reg, 24);
2321 sra(reg, reg, 16);
2322 orr(reg, reg, AT);
2323 }
2325 void MacroAssembler::huswap(Register reg) {
2326 #ifdef _LP64
2327 dsrl(AT, reg, 8);
2328 dsll(reg, reg, 24);
2329 dsrl(reg, reg, 16);
2330 orr(reg, reg, AT);
2331 andi(reg, reg, 0xffff);
2332 #else
2333 //andi(reg, reg, 0xffff);
2334 srl(AT, reg, 8);
2335 sll(reg, reg, 24);
2336 srl(reg, reg, 16);
2337 orr(reg, reg, AT);
2338 #endif
2339 }
2341 // something funny to do this will only one more register AT
2342 // 32 bits
2343 void MacroAssembler::swap(Register reg) {
2344 srl(AT, reg, 8);
2345 sll(reg, reg, 24);
2346 orr(reg, reg, AT);
2347 //reg : 4 1 2 3
2348 srl(AT, AT, 16);
2349 xorr(AT, AT, reg);
2350 andi(AT, AT, 0xff);
2351 //AT : 0 0 0 1^3);
2352 xorr(reg, reg, AT);
2353 //reg : 4 1 2 1
2354 sll(AT, AT, 16);
2355 xorr(reg, reg, AT);
2356 //reg : 4 3 2 1
2357 }
2359 #ifdef _LP64
2361 /* do 32-bit CAS using MIPS64 lld/scd
2363 Jin: cas_int should only compare 32-bits of the memory value.
2364 However, lld/scd will do 64-bit operation, which violates the intention of cas_int.
2365 To simulate a 32-bit atomic operation, the value loaded with LLD should be split into
2366 tow halves, and only the low-32 bits is compared. If equals, the low-32 bits of newval,
2367 plus the high-32 bits or memory value, are stored togethor with SCD.
2369 Example:
2371 double d = 3.1415926;
2372 System.err.println("hello" + d);
2374 sun.misc.FloatingDecimal$1.<init>()
2375 |
2376 `- java.util.concurrent.atomic.AtomicInteger::compareAndSet()
2378 38 cas_int [a7a7|J] [a0|I] [a6|I]
2379 // a0: 0xffffffffe8ea9f63 pc: 0x55647f3354
2380 // a6: 0x4ab325aa
2382 again:
2383 0x00000055647f3c5c: lld at, 0x0(a7) ; 64-bit load, "0xe8ea9f63"
2385 0x00000055647f3c60: sll t9, at, 0 ; t9: low-32 bits (sign extended)
2386 0x00000055647f3c64: dsrl32 t8, at, 0 ; t8: high-32 bits
2387 0x00000055647f3c68: dsll32 t8, t8, 0
2388 0x00000055647f3c6c: bne t9, a0, 0x00000055647f3c9c ; goto nequal
2389 0x00000055647f3c70: sll zero, zero, 0
2391 0x00000055647f3c74: ori v1, zero, 0xffffffff ; v1: low-32 bits of newval (sign unextended)
2392 0x00000055647f3c78: dsll v1, v1, 16 ; v1 = a6 & 0xFFFFFFFF;
2393 0x00000055647f3c7c: ori v1, v1, 0xffffffff
2394 0x00000055647f3c80: and v1, a6, v1
2395 0x00000055647f3c84: or at, t8, v1
2396 0x00000055647f3c88: scd at, 0x0(a7)
2397 0x00000055647f3c8c: beq at, zero, 0x00000055647f3c5c ; goto again
2398 0x00000055647f3c90: sll zero, zero, 0
2399 0x00000055647f3c94: beq zero, zero, 0x00000055647f45ac ; goto done
2400 0x00000055647f3c98: sll zero, zero, 0
2401 nequal:
2402 0x00000055647f45a4: dadd a0, t9, zero
2403 0x00000055647f45a8: dadd at, zero, zero
2404 done:
2405 */
2407 void MacroAssembler::cmpxchg32(Register x_reg, Address dest, Register c_reg) {
2408 /* 2012/11/11 Jin: MIPS64 can use ll/sc for 32-bit atomic memory access */
2409 Label done, again, nequal;
2411 bind(again);
2413 if(!Use3A2000) sync();
2414 ll(AT, dest);
2415 bne(AT, c_reg, nequal);
2416 delayed()->nop();
2418 move(AT, x_reg);
2419 sc(AT, dest);
2420 beq(AT, R0, again);
2421 delayed()->nop();
2422 b(done);
2423 delayed()->nop();
2425 // not xchged
2426 bind(nequal);
2427 sync();
2428 move(c_reg, AT);
2429 move(AT, R0);
2431 bind(done);
2432 }
2433 #endif // cmpxchg32
2435 void MacroAssembler::cmpxchg(Register x_reg, Address dest, Register c_reg) {
2436 Label done, again, nequal;
2438 bind(again);
2439 #ifdef _LP64
2440 if(!Use3A2000) sync();
2441 lld(AT, dest);
2442 #else
2443 if(!Use3A2000) sync();
2444 ll(AT, dest);
2445 #endif
2446 bne(AT, c_reg, nequal);
2447 delayed()->nop();
2449 move(AT, x_reg);
2450 #ifdef _LP64
2451 scd(AT, dest);
2452 #else
2453 sc(AT, dest);
2454 #endif
2455 beq(AT, R0, again);
2456 delayed()->nop();
2457 b(done);
2458 delayed()->nop();
2460 // not xchged
2461 bind(nequal);
2462 sync();
2463 move(c_reg, AT);
2464 move(AT, R0);
2466 bind(done);
2467 }
2469 void MacroAssembler::cmpxchg8(Register x_regLo, Register x_regHi, Address dest, Register c_regLo, Register c_regHi) {
2470 Label done, again, nequal;
2472 Register x_reg = x_regLo;
2473 dsll32(x_regHi, x_regHi, 0);
2474 dsll32(x_regLo, x_regLo, 0);
2475 dsrl32(x_regLo, x_regLo, 0);
2476 orr(x_reg, x_regLo, x_regHi);
2478 Register c_reg = c_regLo;
2479 dsll32(c_regHi, c_regHi, 0);
2480 dsll32(c_regLo, c_regLo, 0);
2481 dsrl32(c_regLo, c_regLo, 0);
2482 orr(c_reg, c_regLo, c_regHi);
2484 bind(again);
2486 if(!Use3A2000) sync();
2487 lld(AT, dest);
2488 bne(AT, c_reg, nequal);
2489 delayed()->nop();
2491 //move(AT, x_reg);
2492 dadd(AT, x_reg, R0);
2493 scd(AT, dest);
2494 beq(AT, R0, again);
2495 delayed()->nop();
2496 b(done);
2497 delayed()->nop();
2499 // not xchged
2500 bind(nequal);
2501 sync();
2502 //move(c_reg, AT);
2503 //move(AT, R0);
2504 dadd(c_reg, AT, R0);
2505 dadd(AT, R0, R0);
2506 bind(done);
2507 }
2509 // be sure the three register is different
2510 void MacroAssembler::rem_s(FloatRegister fd, FloatRegister fs, FloatRegister ft, FloatRegister tmp) {
2511 assert_different_registers(tmp, fs, ft);
2512 div_s(tmp, fs, ft);
2513 trunc_l_s(tmp, tmp);
2514 cvt_s_l(tmp, tmp);
2515 mul_s(tmp, tmp, ft);
2516 sub_s(fd, fs, tmp);
2517 }
2519 // be sure the three register is different
2520 void MacroAssembler::rem_d(FloatRegister fd, FloatRegister fs, FloatRegister ft, FloatRegister tmp) {
2521 assert_different_registers(tmp, fs, ft);
2522 div_d(tmp, fs, ft);
2523 trunc_l_d(tmp, tmp);
2524 cvt_d_l(tmp, tmp);
2525 mul_d(tmp, tmp, ft);
2526 sub_d(fd, fs, tmp);
2527 }
2529 // Fast_Lock and Fast_Unlock used by C2
2531 // Because the transitions from emitted code to the runtime
2532 // monitorenter/exit helper stubs are so slow it's critical that
2533 // we inline both the stack-locking fast-path and the inflated fast path.
2534 //
2535 // See also: cmpFastLock and cmpFastUnlock.
2536 //
2537 // What follows is a specialized inline transliteration of the code
2538 // in slow_enter() and slow_exit(). If we're concerned about I$ bloat
2539 // another option would be to emit TrySlowEnter and TrySlowExit methods
2540 // at startup-time. These methods would accept arguments as
2541 // (rax,=Obj, rbx=Self, rcx=box, rdx=Scratch) and return success-failure
2542 // indications in the icc.ZFlag. Fast_Lock and Fast_Unlock would simply
2543 // marshal the arguments and emit calls to TrySlowEnter and TrySlowExit.
2544 // In practice, however, the # of lock sites is bounded and is usually small.
2545 // Besides the call overhead, TrySlowEnter and TrySlowExit might suffer
2546 // if the processor uses simple bimodal branch predictors keyed by EIP
2547 // Since the helper routines would be called from multiple synchronization
2548 // sites.
2549 //
2550 // An even better approach would be write "MonitorEnter()" and "MonitorExit()"
2551 // in java - using j.u.c and unsafe - and just bind the lock and unlock sites
2552 // to those specialized methods. That'd give us a mostly platform-independent
2553 // implementation that the JITs could optimize and inline at their pleasure.
2554 // Done correctly, the only time we'd need to cross to native could would be
2555 // to park() or unpark() threads. We'd also need a few more unsafe operators
2556 // to (a) prevent compiler-JIT reordering of non-volatile accesses, and
2557 // (b) explicit barriers or fence operations.
2558 //
2559 // TODO:
2560 //
2561 // * Arrange for C2 to pass "Self" into Fast_Lock and Fast_Unlock in one of the registers (scr).
2562 // This avoids manifesting the Self pointer in the Fast_Lock and Fast_Unlock terminals.
2563 // Given TLAB allocation, Self is usually manifested in a register, so passing it into
2564 // the lock operators would typically be faster than reifying Self.
2565 //
2566 // * Ideally I'd define the primitives as:
2567 // fast_lock (nax Obj, nax box, EAX tmp, nax scr) where box, tmp and scr are KILLED.
2568 // fast_unlock (nax Obj, EAX box, nax tmp) where box and tmp are KILLED
2569 // Unfortunately ADLC bugs prevent us from expressing the ideal form.
2570 // Instead, we're stuck with a rather awkward and brittle register assignments below.
2571 // Furthermore the register assignments are overconstrained, possibly resulting in
2572 // sub-optimal code near the synchronization site.
2573 //
2574 // * Eliminate the sp-proximity tests and just use "== Self" tests instead.
2575 // Alternately, use a better sp-proximity test.
2576 //
2577 // * Currently ObjectMonitor._Owner can hold either an sp value or a (THREAD *) value.
2578 // Either one is sufficient to uniquely identify a thread.
2579 // TODO: eliminate use of sp in _owner and use get_thread(tr) instead.
2580 //
2581 // * Intrinsify notify() and notifyAll() for the common cases where the
2582 // object is locked by the calling thread but the waitlist is empty.
2583 // avoid the expensive JNI call to JVM_Notify() and JVM_NotifyAll().
2584 //
2585 // * use jccb and jmpb instead of jcc and jmp to improve code density.
2586 // But beware of excessive branch density on AMD Opterons.
2587 //
2588 // * Both Fast_Lock and Fast_Unlock set the ICC.ZF to indicate success
2589 // or failure of the fast-path. If the fast-path fails then we pass
2590 // control to the slow-path, typically in C. In Fast_Lock and
2591 // Fast_Unlock we often branch to DONE_LABEL, just to find that C2
2592 // will emit a conditional branch immediately after the node.
2593 // So we have branches to branches and lots of ICC.ZF games.
2594 // Instead, it might be better to have C2 pass a "FailureLabel"
2595 // into Fast_Lock and Fast_Unlock. In the case of success, control
2596 // will drop through the node. ICC.ZF is undefined at exit.
2597 // In the case of failure, the node will branch directly to the
2598 // FailureLabel
2601 // obj: object to lock
2602 // box: on-stack box address (displaced header location) - KILLED
2603 // rax,: tmp -- KILLED
2604 // scr: tmp -- KILLED
2605 void MacroAssembler::fast_lock(Register objReg, Register boxReg, Register tmpReg, Register scrReg) {
2607 // Ensure the register assignents are disjoint
2608 guarantee (objReg != boxReg, "") ;
2609 guarantee (objReg != tmpReg, "") ;
2610 guarantee (objReg != scrReg, "") ;
2611 guarantee (boxReg != tmpReg, "") ;
2612 guarantee (boxReg != scrReg, "") ;
2615 block_comment("FastLock");
2616 /*
2617 move(AT, 0x0);
2618 return;
2619 */
2620 if (PrintBiasedLockingStatistics) {
2621 push(tmpReg);
2622 atomic_inc32((address)BiasedLocking::total_entry_count_addr(), 1, AT, tmpReg);
2623 pop(tmpReg);
2624 }
2626 if (EmitSync & 1) {
2627 move(AT, 0x0);
2628 return;
2629 } else
2630 if (EmitSync & 2) {
2631 Label DONE_LABEL ;
2632 if (UseBiasedLocking) {
2633 // Note: tmpReg maps to the swap_reg argument and scrReg to the tmp_reg argument.
2634 biased_locking_enter(boxReg, objReg, tmpReg, scrReg, false, DONE_LABEL, NULL);
2635 }
2637 ld(tmpReg, Address(objReg, 0)) ; // fetch markword
2638 ori(tmpReg, tmpReg, 0x1);
2639 sd(tmpReg, Address(boxReg, 0)); // Anticipate successful CAS
2641 cmpxchg(boxReg, Address(objReg, 0), tmpReg); // Updates tmpReg
2642 bne(AT, R0, DONE_LABEL);
2643 delayed()->nop();
2645 // Recursive locking
2646 dsubu(tmpReg, tmpReg, SP);
2647 li(AT, (7 - os::vm_page_size() ));
2648 andr(tmpReg, tmpReg, AT);
2649 sd(tmpReg, Address(boxReg, 0));
2650 bind(DONE_LABEL) ;
2651 } else {
2652 // Possible cases that we'll encounter in fast_lock
2653 // ------------------------------------------------
2654 // * Inflated
2655 // -- unlocked
2656 // -- Locked
2657 // = by self
2658 // = by other
2659 // * biased
2660 // -- by Self
2661 // -- by other
2662 // * neutral
2663 // * stack-locked
2664 // -- by self
2665 // = sp-proximity test hits
2666 // = sp-proximity test generates false-negative
2667 // -- by other
2668 //
2670 Label IsInflated, DONE_LABEL, PopDone ;
2672 // TODO: optimize away redundant LDs of obj->mark and improve the markword triage
2673 // order to reduce the number of conditional branches in the most common cases.
2674 // Beware -- there's a subtle invariant that fetch of the markword
2675 // at [FETCH], below, will never observe a biased encoding (*101b).
2676 // If this invariant is not held we risk exclusion (safety) failure.
2677 if (UseBiasedLocking && !UseOptoBiasInlining) {
2678 biased_locking_enter(boxReg, objReg, tmpReg, scrReg, false, DONE_LABEL, NULL);
2679 }
2681 ld(tmpReg, Address(objReg, 0)) ; //Fetch the markword of the object.
2682 andi(AT, tmpReg, markOopDesc::monitor_value);
2683 bne(AT, R0, IsInflated); // inflated vs stack-locked|neutral|bias
2684 delayed()->nop();
2686 // Attempt stack-locking ...
2687 ori (tmpReg, tmpReg, markOopDesc::unlocked_value);
2688 sd(tmpReg, Address(boxReg, 0)); // Anticipate successful CAS
2689 //if (os::is_MP()) {
2690 // sync();
2691 //}
2693 cmpxchg(boxReg, Address(objReg, 0), tmpReg); // Updates tmpReg
2694 //AT == 1: unlocked
2696 if (PrintBiasedLockingStatistics) {
2697 Label L;
2698 beq(AT, R0, L);
2699 delayed()->nop();
2700 push(T0);
2701 push(T1);
2702 atomic_inc32((address)BiasedLocking::fast_path_entry_count_addr(), 1, T0, T1);
2703 pop(T1);
2704 pop(T0);
2705 bind(L);
2706 }
2707 bne(AT, R0, DONE_LABEL);
2708 delayed()->nop();
2710 // Recursive locking
2711 // The object is stack-locked: markword contains stack pointer to BasicLock.
2712 // Locked by current thread if difference with current SP is less than one page.
2713 dsubu(tmpReg, tmpReg, SP);
2714 li(AT, 7 - os::vm_page_size() );
2715 andr(tmpReg, tmpReg, AT);
2716 sd(tmpReg, Address(boxReg, 0));
2717 if (PrintBiasedLockingStatistics) {
2718 Label L;
2719 // tmpReg == 0 => BiasedLocking::_fast_path_entry_count++
2720 bne(tmpReg, R0, L);
2721 delayed()->nop();
2722 push(T0);
2723 push(T1);
2724 atomic_inc32((address)BiasedLocking::fast_path_entry_count_addr(), 1, T0, T1);
2725 pop(T1);
2726 pop(T0);
2727 bind(L);
2728 }
2729 sltiu(AT, tmpReg, 1); /* AT = (tmpReg == 0) ? 1 : 0 */
2731 b(DONE_LABEL) ;
2732 delayed()->nop();
2734 bind(IsInflated) ;
2735 // The object's monitor m is unlocked iff m->owner == NULL,
2736 // otherwise m->owner may contain a thread or a stack address.
2738 // TODO: someday avoid the ST-before-CAS penalty by
2739 // relocating (deferring) the following ST.
2740 // We should also think about trying a CAS without having
2741 // fetched _owner. If the CAS is successful we may
2742 // avoid an RTO->RTS upgrade on the $line.
2743 // Without cast to int32_t a movptr will destroy r10 which is typically obj
2744 li(AT, (int32_t)intptr_t(markOopDesc::unused_mark()));
2745 sd(AT, Address(boxReg, 0));
2747 move(boxReg, tmpReg) ;
2748 ld(tmpReg, Address(tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)) ;
2749 // if (m->owner != 0) => AT = 0, goto slow path.
2750 move(AT, R0);
2751 bne(tmpReg, R0, DONE_LABEL);
2752 delayed()->nop();
2754 #ifndef OPT_THREAD
2755 get_thread (TREG) ;
2756 #endif
2757 // It's inflated and appears unlocked
2758 //if (os::is_MP()) {
2759 // sync();
2760 //}
2761 cmpxchg(TREG, Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2), tmpReg) ;
2762 // Intentional fall-through into DONE_LABEL ...
2765 // DONE_LABEL is a hot target - we'd really like to place it at the
2766 // start of cache line by padding with NOPs.
2767 // See the AMD and Intel software optimization manuals for the
2768 // most efficient "long" NOP encodings.
2769 // Unfortunately none of our alignment mechanisms suffice.
2770 bind(DONE_LABEL);
2772 // At DONE_LABEL the AT is set as follows ...
2773 // Fast_Unlock uses the same protocol.
2774 // AT == 1 -> Success
2775 // AT == 0 -> Failure - force control through the slow-path
2777 // Avoid branch-to-branch on AMD processors
2778 // This appears to be superstition.
2779 if (EmitSync & 32) nop() ;
2781 }
2782 }
2784 // obj: object to unlock
2785 // box: box address (displaced header location), killed. Must be EAX.
2786 // rbx,: killed tmp; cannot be obj nor box.
2787 //
2788 // Some commentary on balanced locking:
2789 //
2790 // Fast_Lock and Fast_Unlock are emitted only for provably balanced lock sites.
2791 // Methods that don't have provably balanced locking are forced to run in the
2792 // interpreter - such methods won't be compiled to use fast_lock and fast_unlock.
2793 // The interpreter provides two properties:
2794 // I1: At return-time the interpreter automatically and quietly unlocks any
2795 // objects acquired the current activation (frame). Recall that the
2796 // interpreter maintains an on-stack list of locks currently held by
2797 // a frame.
2798 // I2: If a method attempts to unlock an object that is not held by the
2799 // the frame the interpreter throws IMSX.
2800 //
2801 // Lets say A(), which has provably balanced locking, acquires O and then calls B().
2802 // B() doesn't have provably balanced locking so it runs in the interpreter.
2803 // Control returns to A() and A() unlocks O. By I1 and I2, above, we know that O
2804 // is still locked by A().
2805 //
2806 // The only other source of unbalanced locking would be JNI. The "Java Native Interface:
2807 // Programmer's Guide and Specification" claims that an object locked by jni_monitorenter
2808 // should not be unlocked by "normal" java-level locking and vice-versa. The specification
2809 // doesn't specify what will occur if a program engages in such mixed-mode locking, however.
2811 void MacroAssembler::fast_unlock(Register objReg, Register boxReg, Register tmpReg) {
2813 guarantee (objReg != boxReg, "") ;
2814 guarantee (objReg != tmpReg, "") ;
2815 guarantee (boxReg != tmpReg, "") ;
2819 block_comment("FastUnlock");
2822 if (EmitSync & 4) {
2823 // Disable - inhibit all inlining. Force control through the slow-path
2824 move(AT, 0x0);
2825 return;
2826 } else
2827 if (EmitSync & 8) {
2828 Label DONE_LABEL ;
2829 if (UseBiasedLocking) {
2830 biased_locking_exit(objReg, tmpReg, DONE_LABEL);
2831 }
2832 // classic stack-locking code ...
2833 ld(tmpReg, Address(boxReg, 0)) ;
2834 beq(tmpReg, R0, DONE_LABEL) ;
2835 move(AT, 0x1); // delay slot
2837 cmpxchg(tmpReg, Address(objReg, 0), boxReg); // Uses EAX which is box
2838 bind(DONE_LABEL);
2839 } else {
2840 Label DONE_LABEL, Stacked, CheckSucc, Inflated ;
2842 // Critically, the biased locking test must have precedence over
2843 // and appear before the (box->dhw == 0) recursive stack-lock test.
2844 if (UseBiasedLocking && !UseOptoBiasInlining) {
2845 biased_locking_exit(objReg, tmpReg, DONE_LABEL);
2846 }
2848 ld(AT, Address(boxReg, 0)) ; // Examine the displaced header
2849 beq(AT, R0, DONE_LABEL) ; // 0 indicates recursive stack-lock
2850 delayed()->daddiu(AT, R0, 0x1);
2852 ld(tmpReg, Address(objReg, 0)) ; // Examine the object's markword
2853 andi(AT, tmpReg, markOopDesc::monitor_value) ; // Inflated?
2854 beq(AT, R0, Stacked) ; // Inflated?
2855 delayed()->nop();
2857 bind(Inflated) ;
2858 // It's inflated.
2859 // Despite our balanced locking property we still check that m->_owner == Self
2860 // as java routines or native JNI code called by this thread might
2861 // have released the lock.
2862 // Refer to the comments in synchronizer.cpp for how we might encode extra
2863 // state in _succ so we can avoid fetching EntryList|cxq.
2864 //
2865 // I'd like to add more cases in fast_lock() and fast_unlock() --
2866 // such as recursive enter and exit -- but we have to be wary of
2867 // I$ bloat, T$ effects and BP$ effects.
2868 //
2869 // If there's no contention try a 1-0 exit. That is, exit without
2870 // a costly MEMBAR or CAS. See synchronizer.cpp for details on how
2871 // we detect and recover from the race that the 1-0 exit admits.
2872 //
2873 // Conceptually Fast_Unlock() must execute a STST|LDST "release" barrier
2874 // before it STs null into _owner, releasing the lock. Updates
2875 // to data protected by the critical section must be visible before
2876 // we drop the lock (and thus before any other thread could acquire
2877 // the lock and observe the fields protected by the lock).
2878 // IA32's memory-model is SPO, so STs are ordered with respect to
2879 // each other and there's no need for an explicit barrier (fence).
2880 // See also http://gee.cs.oswego.edu/dl/jmm/cookbook.html.
2881 #ifndef OPT_THREAD
2882 get_thread (TREG) ;
2883 #endif
2885 // It's inflated
2886 ld(boxReg, Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)) ;
2887 xorr(boxReg, boxReg, TREG);
2889 ld(AT, Address (tmpReg, ObjectMonitor::recursions_offset_in_bytes()-2)) ;
2890 orr(boxReg, boxReg, AT);
2892 move(AT, R0);
2893 bne(boxReg, R0, DONE_LABEL);
2894 delayed()->nop();
2896 ld(boxReg, Address (tmpReg, ObjectMonitor::cxq_offset_in_bytes()-2)) ;
2897 ld(AT, Address (tmpReg, ObjectMonitor::EntryList_offset_in_bytes()-2)) ;
2898 orr(boxReg, boxReg, AT);
2900 move(AT, R0);
2901 bne(boxReg, R0, DONE_LABEL);
2902 delayed()->nop();
2904 sync();
2905 sd(R0, Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)) ;
2906 move(AT, 0x1);
2907 b(DONE_LABEL);
2908 delayed()->nop();
2910 bind (Stacked);
2911 ld(tmpReg, Address(boxReg, 0)) ;
2912 //if (os::is_MP()) { sync(); }
2913 cmpxchg(tmpReg, Address(objReg, 0), boxReg);
2915 if (EmitSync & 65536) {
2916 bind (CheckSucc);
2917 }
2919 bind(DONE_LABEL);
2921 // Avoid branch to branch on AMD processors
2922 if (EmitSync & 32768) { nop() ; }
2923 }
2924 }
2926 void MacroAssembler::align(int modulus) {
2927 while (offset() % modulus != 0) nop();
2928 }
2931 void MacroAssembler::verify_FPU(int stack_depth, const char* s) {
2932 //Unimplemented();
2933 }
2935 #ifdef _LP64
2936 Register caller_saved_registers[] = {AT, V0, V1, A0, A1, A2, A3, A4, A5, A6, A7, T0, T1, T2, T3, T8, T9, GP, RA, FP};
2938 /* FIXME: Jin: In MIPS64, F0~23 are all caller-saved registers */
2939 FloatRegister caller_saved_fpu_registers[] = {F0, F12, F13};
2940 #else
2941 Register caller_saved_registers[] = {AT, V0, V1, A0, A1, A2, A3, T4, T5, T6, T7, T0, T1, T2, T3, T8, T9, GP, RA, FP};
2943 Register caller_saved_fpu_registers[] = {};
2944 #endif
2946 //We preserve all caller-saved register
2947 void MacroAssembler::pushad(){
2948 int i;
2950 /* Fixed-point registers */
2951 int len = sizeof(caller_saved_registers) / sizeof(caller_saved_registers[0]);
2952 daddi(SP, SP, -1 * len * wordSize);
2953 for (i = 0; i < len; i++)
2954 {
2955 #ifdef _LP64
2956 sd(caller_saved_registers[i], SP, (len - i - 1) * wordSize);
2957 #else
2958 sw(caller_saved_registers[i], SP, (len - i - 1) * wordSize);
2959 #endif
2960 }
2962 /* Floating-point registers */
2963 len = sizeof(caller_saved_fpu_registers) / sizeof(caller_saved_fpu_registers[0]);
2964 daddi(SP, SP, -1 * len * wordSize);
2965 for (i = 0; i < len; i++)
2966 {
2967 #ifdef _LP64
2968 sdc1(caller_saved_fpu_registers[i], SP, (len - i - 1) * wordSize);
2969 #else
2970 swc1(caller_saved_fpu_registers[i], SP, (len - i - 1) * wordSize);
2971 #endif
2972 }
2973 };
2975 void MacroAssembler::popad(){
2976 int i;
2978 /* Floating-point registers */
2979 int len = sizeof(caller_saved_fpu_registers) / sizeof(caller_saved_fpu_registers[0]);
2980 for (i = 0; i < len; i++)
2981 {
2982 #ifdef _LP64
2983 ldc1(caller_saved_fpu_registers[i], SP, (len - i - 1) * wordSize);
2984 #else
2985 lwc1(caller_saved_fpu_registers[i], SP, (len - i - 1) * wordSize);
2986 #endif
2987 }
2988 daddi(SP, SP, len * wordSize);
2990 /* Fixed-point registers */
2991 len = sizeof(caller_saved_registers) / sizeof(caller_saved_registers[0]);
2992 for (i = 0; i < len; i++)
2993 {
2994 #ifdef _LP64
2995 ld(caller_saved_registers[i], SP, (len - i - 1) * wordSize);
2996 #else
2997 lw(caller_saved_registers[i], SP, (len - i - 1) * wordSize);
2998 #endif
2999 }
3000 daddi(SP, SP, len * wordSize);
3001 };
3003 void MacroAssembler::push2(Register reg1, Register reg2) {
3004 #ifdef _LP64
3005 daddi(SP, SP, -16);
3006 sd(reg2, SP, 0);
3007 sd(reg1, SP, 8);
3008 #else
3009 addi(SP, SP, -8);
3010 sw(reg2, SP, 0);
3011 sw(reg1, SP, 4);
3012 #endif
3013 }
3015 void MacroAssembler::pop2(Register reg1, Register reg2) {
3016 #ifdef _LP64
3017 ld(reg1, SP, 0);
3018 ld(reg2, SP, 8);
3019 daddi(SP, SP, 16);
3020 #else
3021 lw(reg1, SP, 0);
3022 lw(reg2, SP, 4);
3023 addi(SP, SP, 8);
3024 #endif
3025 }
3027 //for UseCompressedOops Option
3028 void MacroAssembler::load_klass(Register dst, Register src) {
3029 #ifdef _LP64
3030 if(UseCompressedClassPointers){
3031 lwu(dst, Address(src, oopDesc::klass_offset_in_bytes()));
3032 decode_klass_not_null(dst);
3033 } else
3034 #endif
3035 ld(dst, src, oopDesc::klass_offset_in_bytes());
3036 }
3038 void MacroAssembler::store_klass(Register dst, Register src) {
3039 #ifdef _LP64
3040 if(UseCompressedClassPointers){
3041 encode_klass_not_null(src);
3042 sw(src, dst, oopDesc::klass_offset_in_bytes());
3043 } else {
3044 #endif
3045 sd(src, dst, oopDesc::klass_offset_in_bytes());
3046 }
3047 }
3049 void MacroAssembler::load_prototype_header(Register dst, Register src) {
3050 load_klass(dst, src);
3051 ld(dst, Address(dst, Klass::prototype_header_offset()));
3052 }
3054 #ifdef _LP64
3055 void MacroAssembler::store_klass_gap(Register dst, Register src) {
3056 if (UseCompressedClassPointers) {
3057 sw(src, dst, oopDesc::klass_gap_offset_in_bytes());
3058 }
3059 }
3061 void MacroAssembler::load_heap_oop(Register dst, Address src) {
3062 if(UseCompressedOops){
3063 lwu(dst, src);
3064 decode_heap_oop(dst);
3065 } else{
3066 ld(dst, src);
3067 }
3068 }
3070 void MacroAssembler::store_heap_oop(Address dst, Register src){
3071 if(UseCompressedOops){
3072 assert(!dst.uses(src), "not enough registers");
3073 encode_heap_oop(src);
3074 sw(src, dst);
3075 } else{
3076 sd(src, dst);
3077 }
3078 }
3080 #ifdef ASSERT
3081 void MacroAssembler::verify_heapbase(const char* msg) {
3082 assert (UseCompressedOops || UseCompressedClassPointers, "should be compressed");
3083 assert (Universe::heap() != NULL, "java heap should be initialized");
3084 }
3085 #endif
3088 // Algorithm must match oop.inline.hpp encode_heap_oop.
3089 void MacroAssembler::encode_heap_oop(Register r) {
3090 #ifdef ASSERT
3091 verify_heapbase("MacroAssembler::encode_heap_oop:heap base corrupted?");
3092 #endif
3093 verify_oop(r, "broken oop in encode_heap_oop");
3094 if (Universe::narrow_oop_base() == NULL) {
3095 if (Universe::narrow_oop_shift() != 0) {
3096 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
3097 shr(r, LogMinObjAlignmentInBytes);
3098 }
3099 return;
3100 }
3102 movz(r, S5_heapbase, r);
3103 dsub(r, r, S5_heapbase);
3104 if (Universe::narrow_oop_shift() != 0) {
3105 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
3106 shr(r, LogMinObjAlignmentInBytes);
3107 }
3108 }
3110 void MacroAssembler::encode_heap_oop(Register dst, Register src) {
3111 #ifdef ASSERT
3112 verify_heapbase("MacroAssembler::encode_heap_oop:heap base corrupted?");
3113 #endif
3114 verify_oop(src, "broken oop in encode_heap_oop");
3115 if (Universe::narrow_oop_base() == NULL) {
3116 if (Universe::narrow_oop_shift() != 0) {
3117 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
3118 dsrl(dst, src, LogMinObjAlignmentInBytes);
3119 } else {
3120 if (dst != src) move(dst, src);
3121 }
3122 } else {
3123 if (dst == src) {
3124 movz(dst, S5_heapbase, dst);
3125 dsub(dst, dst, S5_heapbase);
3126 if (Universe::narrow_oop_shift() != 0) {
3127 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
3128 shr(dst, LogMinObjAlignmentInBytes);
3129 }
3130 } else {
3131 dsub(dst, src, S5_heapbase);
3132 if (Universe::narrow_oop_shift() != 0) {
3133 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
3134 shr(dst, LogMinObjAlignmentInBytes);
3135 }
3136 movz(dst, R0, src);
3137 }
3138 }
3139 }
3141 void MacroAssembler::encode_heap_oop_not_null(Register r) {
3142 assert (UseCompressedOops, "should be compressed");
3143 #ifdef ASSERT
3144 if (CheckCompressedOops) {
3145 Label ok;
3146 bne(r, R0, ok);
3147 delayed()->nop();
3148 stop("null oop passed to encode_heap_oop_not_null");
3149 bind(ok);
3150 }
3151 #endif
3152 verify_oop(r, "broken oop in encode_heap_oop_not_null");
3153 if (Universe::narrow_oop_base() != NULL) {
3154 dsub(r, r, S5_heapbase);
3155 }
3156 if (Universe::narrow_oop_shift() != 0) {
3157 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
3158 shr(r, LogMinObjAlignmentInBytes);
3159 }
3161 }
3163 void MacroAssembler::encode_heap_oop_not_null(Register dst, Register src) {
3164 assert (UseCompressedOops, "should be compressed");
3165 #ifdef ASSERT
3166 if (CheckCompressedOops) {
3167 Label ok;
3168 bne(src, R0, ok);
3169 delayed()->nop();
3170 stop("null oop passed to encode_heap_oop_not_null2");
3171 bind(ok);
3172 }
3173 #endif
3174 verify_oop(src, "broken oop in encode_heap_oop_not_null2");
3176 if (Universe::narrow_oop_base() != NULL) {
3177 dsub(dst, src, S5_heapbase);
3178 if (Universe::narrow_oop_shift() != 0) {
3179 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
3180 shr(dst, LogMinObjAlignmentInBytes);
3181 }
3182 } else {
3183 if (Universe::narrow_oop_shift() != 0) {
3184 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
3185 dsrl(dst, src, LogMinObjAlignmentInBytes);
3186 } else {
3187 if (dst != src) move(dst, src);
3188 }
3189 }
3190 }
3192 void MacroAssembler::decode_heap_oop(Register r) {
3193 #ifdef ASSERT
3194 verify_heapbase("MacroAssembler::decode_heap_oop corrupted?");
3195 #endif
3196 if (Universe::narrow_oop_base() == NULL) {
3197 if (Universe::narrow_oop_shift() != 0) {
3198 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
3199 shl(r, LogMinObjAlignmentInBytes);
3200 }
3201 } else {
3202 move(AT, r);
3203 if (Universe::narrow_oop_shift() != 0) {
3204 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
3205 shl(r, LogMinObjAlignmentInBytes);
3206 }
3207 dadd(r, r, S5_heapbase);
3208 movz(r, R0, AT);
3209 }
3210 verify_oop(r, "broken oop in decode_heap_oop");
3211 }
3213 void MacroAssembler::decode_heap_oop(Register dst, Register src) {
3214 #ifdef ASSERT
3215 verify_heapbase("MacroAssembler::decode_heap_oop corrupted?");
3216 #endif
3217 if (Universe::narrow_oop_base() == NULL) {
3218 if (Universe::narrow_oop_shift() != 0) {
3219 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
3220 if (dst != src) nop(); // DON'T DELETE THIS GUY.
3221 dsll(dst, src, LogMinObjAlignmentInBytes);
3222 } else {
3223 if (dst != src) move(dst, src);
3224 }
3225 } else {
3226 if (dst == src) {
3227 move(AT, dst);
3228 if (Universe::narrow_oop_shift() != 0) {
3229 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
3230 shl(dst, LogMinObjAlignmentInBytes);
3231 }
3232 dadd(dst, dst, S5_heapbase);
3233 movz(dst, R0, AT);
3234 } else {
3235 if (Universe::narrow_oop_shift() != 0) {
3236 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
3237 dsll(dst, src, LogMinObjAlignmentInBytes);
3238 daddu(dst, dst, S5_heapbase);
3239 } else {
3240 daddu(dst, src, S5_heapbase);
3241 }
3242 movz(dst, R0, src);
3243 }
3244 }
3245 verify_oop(dst, "broken oop in decode_heap_oop");
3246 }
3248 void MacroAssembler::decode_heap_oop_not_null(Register r) {
3249 // Note: it will change flags
3250 assert (UseCompressedOops, "should only be used for compressed headers");
3251 assert (Universe::heap() != NULL, "java heap should be initialized");
3252 // Cannot assert, unverified entry point counts instructions (see .ad file)
3253 // vtableStubs also counts instructions in pd_code_size_limit.
3254 // Also do not verify_oop as this is called by verify_oop.
3255 if (Universe::narrow_oop_shift() != 0) {
3256 assert(LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
3257 shl(r, LogMinObjAlignmentInBytes);
3258 if (Universe::narrow_oop_base() != NULL) {
3259 daddu(r, r, S5_heapbase);
3260 }
3261 } else {
3262 assert (Universe::narrow_oop_base() == NULL, "sanity");
3263 }
3264 }
3266 void MacroAssembler::decode_heap_oop_not_null(Register dst, Register src) {
3267 assert (UseCompressedOops, "should only be used for compressed headers");
3268 assert (Universe::heap() != NULL, "java heap should be initialized");
3270 // Cannot assert, unverified entry point counts instructions (see .ad file)
3271 // vtableStubs also counts instructions in pd_code_size_limit.
3272 // Also do not verify_oop as this is called by verify_oop.
3273 //lea(dst, Address(S5_heapbase, src, Address::times_8, 0));
3274 if (Universe::narrow_oop_shift() != 0) {
3275 assert(LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
3276 if (LogMinObjAlignmentInBytes == Address::times_8) {
3277 dsll(dst, src, LogMinObjAlignmentInBytes);
3278 daddu(dst, dst, S5_heapbase);
3279 } else {
3280 dsll(dst, src, LogMinObjAlignmentInBytes);
3281 if (Universe::narrow_oop_base() != NULL) {
3282 daddu(dst, dst, S5_heapbase);
3283 }
3284 }
3285 } else {
3286 assert (Universe::narrow_oop_base() == NULL, "sanity");
3287 if (dst != src) {
3288 move(dst, src);
3289 }
3290 }
3291 }
3293 void MacroAssembler::encode_klass_not_null(Register r) {
3294 if (Universe::narrow_klass_base() != NULL) {
3295 assert(r != AT, "Encoding a klass in AT");
3296 set64(AT, (int64_t)Universe::narrow_klass_base());
3297 dsub(r, r, AT);
3298 }
3299 if (Universe::narrow_klass_shift() != 0) {
3300 assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
3301 shr(r, LogKlassAlignmentInBytes);
3302 }
3303 // Not neccessary for MIPS at all.
3304 //if (Universe::narrow_klass_base() != NULL) {
3305 // reinit_heapbase();
3306 //}
3307 }
3309 void MacroAssembler::encode_klass_not_null(Register dst, Register src) {
3310 if (dst == src) {
3311 encode_klass_not_null(src);
3312 } else {
3313 if (Universe::narrow_klass_base() != NULL) {
3314 set64(dst, (int64_t)Universe::narrow_klass_base());
3315 dsub(dst, src, dst);
3316 if (Universe::narrow_klass_shift() != 0) {
3317 assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
3318 shr(dst, LogKlassAlignmentInBytes);
3319 }
3320 } else {
3321 if (Universe::narrow_klass_shift() != 0) {
3322 assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
3323 dsrl(dst, src, LogKlassAlignmentInBytes);
3324 } else {
3325 move(dst, src);
3326 }
3327 }
3328 }
3329 }
3331 // Function instr_size_for_decode_klass_not_null() counts the instructions
3332 // generated by decode_klass_not_null(register r) and reinit_heapbase(),
3333 // when (Universe::heap() != NULL). Hence, if the instructions they
3334 // generate change, then this method needs to be updated.
3335 int MacroAssembler::instr_size_for_decode_klass_not_null() {
3336 assert (UseCompressedClassPointers, "only for compressed klass ptrs");
3337 if (Universe::narrow_klass_base() != NULL) {
3338 // mov64 + addq + shlq? + mov64 (for reinit_heapbase()).
3339 return (Universe::narrow_klass_shift() == 0 ? 4 * 9 : 4 * 10);
3340 } else {
3341 // longest load decode klass function, mov64, leaq
3342 return (Universe::narrow_klass_shift() == 0 ? 4 * 0 : 4 * 1);
3343 }
3344 }
3346 void MacroAssembler::decode_klass_not_null(Register r) {
3347 assert (UseCompressedClassPointers, "should only be used for compressed headers");
3348 assert(r != AT, "Decoding a klass in AT");
3349 // Cannot assert, unverified entry point counts instructions (see .ad file)
3350 // vtableStubs also counts instructions in pd_code_size_limit.
3351 // Also do not verify_oop as this is called by verify_oop.
3352 if (Universe::narrow_klass_shift() != 0) {
3353 assert(LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
3354 shl(r, LogKlassAlignmentInBytes);
3355 }
3356 if (Universe::narrow_klass_base() != NULL) {
3357 set64(AT, (int64_t)Universe::narrow_klass_base());
3358 daddu(r, r, AT);
3359 //Not neccessary for MIPS at all.
3360 //reinit_heapbase();
3361 }
3362 }
3364 void MacroAssembler::decode_klass_not_null(Register dst, Register src) {
3365 assert (UseCompressedClassPointers, "should only be used for compressed headers");
3367 if (dst == src) {
3368 decode_klass_not_null(dst);
3369 } else {
3370 // Cannot assert, unverified entry point counts instructions (see .ad file)
3371 // vtableStubs also counts instructions in pd_code_size_limit.
3372 // Also do not verify_oop as this is called by verify_oop.
3373 set64(dst, (int64_t)Universe::narrow_klass_base());
3374 if (Universe::narrow_klass_shift() != 0) {
3375 assert(LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
3376 assert(LogKlassAlignmentInBytes == Address::times_8, "klass not aligned on 64bits?");
3377 dsll(AT, src, Address::times_8);
3378 daddu(dst, dst, AT);
3379 } else {
3380 daddu(dst, src, dst);
3381 }
3382 }
3383 }
3385 void MacroAssembler::incrementl(Register reg, int value) {
3386 if (value == min_jint) {
3387 move(AT, value);
3388 LP64_ONLY(addu32(reg, reg, AT)) NOT_LP64(addu(reg, reg, AT));
3389 return;
3390 }
3391 if (value < 0) { decrementl(reg, -value); return; }
3392 if (value == 0) { ; return; }
3394 if(Assembler::is_simm16(value)) {
3395 NOT_LP64(addiu(reg, reg, value));
3396 LP64_ONLY(move(AT, value); addu32(reg, reg, AT));
3397 } else {
3398 move(AT, value);
3399 LP64_ONLY(addu32(reg, reg, AT)) NOT_LP64(addu(reg, reg, AT));
3400 }
3401 }
3403 void MacroAssembler::decrementl(Register reg, int value) {
3404 if (value == min_jint) {
3405 move(AT, value);
3406 LP64_ONLY(subu32(reg, reg, AT)) NOT_LP64(subu(reg, reg, AT));
3407 return;
3408 }
3409 if (value < 0) { incrementl(reg, -value); return; }
3410 if (value == 0) { ; return; }
3412 if(Assembler::is_simm16(value)) {
3413 NOT_LP64(addiu(reg, reg, -value));
3414 LP64_ONLY(move(AT, value); subu32(reg, reg, AT));
3415 } else {
3416 move(AT, value);
3417 LP64_ONLY(subu32(reg, reg, AT)) NOT_LP64(subu(reg, reg, AT));
3418 }
3419 }
3421 void MacroAssembler::reinit_heapbase() {
3422 if (UseCompressedOops || UseCompressedClassPointers) {
3423 if (Universe::heap() != NULL) {
3424 if (Universe::narrow_oop_base() == NULL) {
3425 move(S5_heapbase, R0);
3426 } else {
3427 set64(S5_heapbase, (int64_t)Universe::narrow_ptrs_base());
3428 }
3429 } else {
3430 set64(S5_heapbase, (intptr_t)Universe::narrow_ptrs_base_addr());
3431 ld(S5_heapbase, S5_heapbase, 0);
3432 }
3433 }
3434 }
3435 #endif // _LP64
3437 void MacroAssembler::check_klass_subtype(Register sub_klass,
3438 Register super_klass,
3439 Register temp_reg,
3440 Label& L_success) {
3441 //implement ind gen_subtype_check
3442 Label L_failure;
3443 check_klass_subtype_fast_path(sub_klass, super_klass, temp_reg, &L_success, &L_failure, NULL);
3444 check_klass_subtype_slow_path(sub_klass, super_klass, temp_reg, noreg, &L_success, NULL);
3445 bind(L_failure);
3446 }
3448 SkipIfEqual::SkipIfEqual(
3449 MacroAssembler* masm, const bool* flag_addr, bool value) {
3450 _masm = masm;
3451 _masm->li(AT, (address)flag_addr);
3452 _masm->lb(AT,AT,0);
3453 _masm->addi(AT,AT,-value);
3454 _masm->beq(AT,R0,_label);
3455 _masm->delayed()->nop();
3456 }
3457 void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass,
3458 Register super_klass,
3459 Register temp_reg,
3460 Label* L_success,
3461 Label* L_failure,
3462 Label* L_slow_path,
3463 RegisterOrConstant super_check_offset) {
3464 assert_different_registers(sub_klass, super_klass, temp_reg);
3465 bool must_load_sco = (super_check_offset.constant_or_zero() == -1);
3466 if (super_check_offset.is_register()) {
3467 assert_different_registers(sub_klass, super_klass,
3468 super_check_offset.as_register());
3469 } else if (must_load_sco) {
3470 assert(temp_reg != noreg, "supply either a temp or a register offset");
3471 }
3473 Label L_fallthrough;
3474 int label_nulls = 0;
3475 if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; }
3476 if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; }
3477 if (L_slow_path == NULL) { L_slow_path = &L_fallthrough; label_nulls++; }
3478 assert(label_nulls <= 1, "at most one NULL in the batch");
3480 int sc_offset = in_bytes(Klass::secondary_super_cache_offset());
3481 int sco_offset = in_bytes(Klass::super_check_offset_offset());
3482 // If the pointers are equal, we are done (e.g., String[] elements).
3483 // This self-check enables sharing of secondary supertype arrays among
3484 // non-primary types such as array-of-interface. Otherwise, each such
3485 // type would need its own customized SSA.
3486 // We move this check to the front of the fast path because many
3487 // type checks are in fact trivially successful in this manner,
3488 // so we get a nicely predicted branch right at the start of the check.
3489 //cmpptr(sub_klass, super_klass);
3490 //local_jcc(Assembler::equal, *L_success);
3491 beq(sub_klass, super_klass, *L_success);
3492 delayed()->nop();
3493 // Check the supertype display:
3494 if (must_load_sco) {
3495 // Positive movl does right thing on LP64.
3496 lwu(temp_reg, super_klass, sco_offset);
3497 super_check_offset = RegisterOrConstant(temp_reg);
3498 }
3499 dsll(AT, super_check_offset.register_or_noreg(), Address::times_1);
3500 daddu(AT, sub_klass, AT);
3501 ld(AT, AT, super_check_offset.constant_or_zero()*Address::times_1);
3503 // This check has worked decisively for primary supers.
3504 // Secondary supers are sought in the super_cache ('super_cache_addr').
3505 // (Secondary supers are interfaces and very deeply nested subtypes.)
3506 // This works in the same check above because of a tricky aliasing
3507 // between the super_cache and the primary super display elements.
3508 // (The 'super_check_addr' can address either, as the case requires.)
3509 // Note that the cache is updated below if it does not help us find
3510 // what we need immediately.
3511 // So if it was a primary super, we can just fail immediately.
3512 // Otherwise, it's the slow path for us (no success at this point).
3514 if (super_check_offset.is_register()) {
3515 beq(super_klass, AT, *L_success);
3516 delayed()->nop();
3517 addi(AT, super_check_offset.as_register(), -sc_offset);
3518 if (L_failure == &L_fallthrough) {
3519 beq(AT, R0, *L_slow_path);
3520 delayed()->nop();
3521 } else {
3522 bne(AT, R0, *L_failure);
3523 delayed()->nop();
3524 b(*L_slow_path);
3525 delayed()->nop();
3526 }
3527 } else if (super_check_offset.as_constant() == sc_offset) {
3528 // Need a slow path; fast failure is impossible.
3529 if (L_slow_path == &L_fallthrough) {
3530 beq(super_klass, AT, *L_success);
3531 delayed()->nop();
3532 } else {
3533 bne(super_klass, AT, *L_slow_path);
3534 delayed()->nop();
3535 b(*L_success);
3536 delayed()->nop();
3537 }
3538 } else {
3539 // No slow path; it's a fast decision.
3540 if (L_failure == &L_fallthrough) {
3541 beq(super_klass, AT, *L_success);
3542 delayed()->nop();
3543 } else {
3544 bne(super_klass, AT, *L_failure);
3545 delayed()->nop();
3546 b(*L_success);
3547 delayed()->nop();
3548 }
3549 }
3551 bind(L_fallthrough);
3553 }
3556 void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass,
3557 Register super_klass,
3558 Register temp_reg,
3559 Register temp2_reg,
3560 Label* L_success,
3561 Label* L_failure,
3562 bool set_cond_codes) {
3563 assert_different_registers(sub_klass, super_klass, temp_reg);
3564 if (temp2_reg != noreg)
3565 assert_different_registers(sub_klass, super_klass, temp_reg, temp2_reg);
3566 else
3567 temp2_reg = T9;
3568 #define IS_A_TEMP(reg) ((reg) == temp_reg || (reg) == temp2_reg)
3570 Label L_fallthrough;
3571 int label_nulls = 0;
3572 if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; }
3573 if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; }
3574 assert(label_nulls <= 1, "at most one NULL in the batch");
3576 // a couple of useful fields in sub_klass:
3577 int ss_offset = in_bytes(Klass::secondary_supers_offset());
3578 int sc_offset = in_bytes(Klass::secondary_super_cache_offset());
3579 Address secondary_supers_addr(sub_klass, ss_offset);
3580 Address super_cache_addr( sub_klass, sc_offset);
3582 // Do a linear scan of the secondary super-klass chain.
3583 // This code is rarely used, so simplicity is a virtue here.
3584 // The repne_scan instruction uses fixed registers, which we must spill.
3585 // Don't worry too much about pre-existing connections with the input regs.
3587 #if 0
3588 assert(sub_klass != T9, "killed reg"); // killed by mov(rax, super)
3589 assert(sub_klass != T1, "killed reg"); // killed by lea(rcx, &pst_counter)
3590 #endif
3592 // Get super_klass value into rax (even if it was in rdi or rcx).
3593 #ifndef PRODUCT
3594 int* pst_counter = &SharedRuntime::_partial_subtype_ctr;
3595 ExternalAddress pst_counter_addr((address) pst_counter);
3596 NOT_LP64( incrementl(pst_counter_addr) );
3597 //LP64_ONLY( lea(rcx, pst_counter_addr) );
3598 //LP64_ONLY( incrementl(Address(rcx, 0)) );
3599 #endif //PRODUCT
3601 // We will consult the secondary-super array.
3602 ld(temp_reg, secondary_supers_addr);
3603 // Load the array length. (Positive movl does right thing on LP64.)
3604 lw(temp2_reg, Address(temp_reg, Array<Klass*>::length_offset_in_bytes()));
3605 // Skip to start of data.
3606 daddiu(temp_reg, temp_reg, Array<Klass*>::base_offset_in_bytes());
3608 // Scan RCX words at [RDI] for an occurrence of RAX.
3609 // Set NZ/Z based on last compare.
3610 // Z flag value will not be set by 'repne' if RCX == 0 since 'repne' does
3611 // not change flags (only scas instruction which is repeated sets flags).
3612 // Set Z = 0 (not equal) before 'repne' to indicate that class was not found.
3614 /* 2013/4/3 Jin: OpenJDK8 never compresses klass pointers in secondary-super array. */
3615 Label Loop, subtype;
3616 bind(Loop);
3617 beq(temp2_reg, R0, *L_failure);
3618 delayed()->nop();
3619 ld(AT, temp_reg, 0);
3620 beq(AT, super_klass, subtype);
3621 delayed()->daddi(temp_reg, temp_reg, 1 * wordSize);
3622 b(Loop);
3623 delayed()->daddi(temp2_reg, temp2_reg, -1);
3625 bind(subtype);
3626 sd(super_klass, super_cache_addr);
3627 if (L_success != &L_fallthrough) {
3628 b(*L_success);
3629 delayed()->nop();
3630 }
3632 // Success. Cache the super we found and proceed in triumph.
3633 #undef IS_A_TEMP
3635 bind(L_fallthrough);
3636 }
3637 void MacroAssembler::get_vm_result(Register oop_result, Register java_thread) {
3638 ld(oop_result, Address(java_thread, JavaThread::vm_result_offset()));
3639 sd(R0, Address(java_thread, JavaThread::vm_result_offset()));
3640 verify_oop(oop_result, "broken oop in call_VM_base");
3641 }
3643 void MacroAssembler::get_vm_result_2(Register metadata_result, Register java_thread) {
3644 ld(metadata_result, Address(java_thread, JavaThread::vm_result_2_offset()));
3645 sd(R0, Address(java_thread, JavaThread::vm_result_2_offset()));
3646 }
3648 Address MacroAssembler::argument_address(RegisterOrConstant arg_slot,
3649 int extra_slot_offset) {
3650 // cf. TemplateTable::prepare_invoke(), if (load_receiver).
3651 int stackElementSize = Interpreter::stackElementSize;
3652 int offset = Interpreter::expr_offset_in_bytes(extra_slot_offset+0);
3653 #ifdef ASSERT
3654 int offset1 = Interpreter::expr_offset_in_bytes(extra_slot_offset+1);
3655 assert(offset1 - offset == stackElementSize, "correct arithmetic");
3656 #endif
3657 Register scale_reg = NOREG;
3658 Address::ScaleFactor scale_factor = Address::no_scale;
3659 if (arg_slot.is_constant()) {
3660 offset += arg_slot.as_constant() * stackElementSize;
3661 } else {
3662 scale_reg = arg_slot.as_register();
3663 scale_factor = Address::times_8;
3664 }
3665 // 2014/07/31 Fu: We don't push RA on stack in prepare_invoke.
3666 // offset += wordSize; // return PC is on stack
3667 if(scale_reg==NOREG) return Address(SP, offset);
3668 else {
3669 dsll(scale_reg, scale_reg, scale_factor);
3670 daddu(scale_reg, SP, scale_reg);
3671 return Address(scale_reg, offset);
3672 }
3673 }
3675 SkipIfEqual::~SkipIfEqual() {
3676 _masm->bind(_label);
3677 }
3679 void MacroAssembler::load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, Register dst2) {
3680 switch (size_in_bytes) {
3681 #ifndef _LP64
3682 case 8:
3683 assert(dst2 != noreg, "second dest register required");
3684 lw(dst, src);
3685 lw(dst2, src.plus_disp(BytesPerInt));
3686 break;
3687 #else
3688 case 8: ld(dst, src); break;
3689 #endif
3690 case 4: lw(dst, src); break;
3691 case 2: is_signed ? lh(dst, src) : lhu(dst, src); break;
3692 case 1: is_signed ? lb( dst, src) : lbu( dst, src); break;
3693 default: ShouldNotReachHere();
3694 }
3695 }
3697 void MacroAssembler::store_sized_value(Address dst, Register src, size_t size_in_bytes, Register src2) {
3698 switch (size_in_bytes) {
3699 #ifndef _LP64
3700 case 8:
3701 assert(src2 != noreg, "second source register required");
3702 sw(src, dst);
3703 sw(src2, dst.plus_disp(BytesPerInt));
3704 break;
3705 #else
3706 case 8: sd(src, dst); break;
3707 #endif
3708 case 4: sw(src, dst); break;
3709 case 2: sh(src, dst); break;
3710 case 1: sb(src, dst); break;
3711 default: ShouldNotReachHere();
3712 }
3713 }
3715 // Look up the method for a megamorphic invokeinterface call.
3716 // The target method is determined by <intf_klass, itable_index>.
3717 // The receiver klass is in recv_klass.
3718 // On success, the result will be in method_result, and execution falls through.
3719 // On failure, execution transfers to the given label.
3720 void MacroAssembler::lookup_interface_method(Register recv_klass,
3721 Register intf_klass,
3722 RegisterOrConstant itable_index,
3723 Register method_result,
3724 Register scan_temp,
3725 Label& L_no_such_interface) {
3726 assert_different_registers(recv_klass, intf_klass, method_result, scan_temp);
3727 assert(itable_index.is_constant() || itable_index.as_register() == method_result,
3728 "caller must use same register for non-constant itable index as for method");
3730 // Compute start of first itableOffsetEntry (which is at the end of the vtable)
3731 int vtable_base = InstanceKlass::vtable_start_offset() * wordSize;
3732 int itentry_off = itableMethodEntry::method_offset_in_bytes();
3733 int scan_step = itableOffsetEntry::size() * wordSize;
3734 int vte_size = vtableEntry::size() * wordSize;
3735 Address::ScaleFactor times_vte_scale = Address::times_ptr;
3736 assert(vte_size == wordSize, "else adjust times_vte_scale");
3738 lw(scan_temp, Address(recv_klass, InstanceKlass::vtable_length_offset() * wordSize));
3740 // %%% Could store the aligned, prescaled offset in the klassoop.
3741 dsll(scan_temp, scan_temp, times_vte_scale);
3742 daddu(scan_temp, recv_klass, scan_temp);
3743 daddiu(scan_temp, scan_temp, vtable_base);
3744 if (HeapWordsPerLong > 1) {
3745 // Round up to align_object_offset boundary
3746 // see code for InstanceKlass::start_of_itable!
3747 round_to(scan_temp, BytesPerLong);
3748 }
3750 // Adjust recv_klass by scaled itable_index, so we can free itable_index.
3751 assert(itableMethodEntry::size() * wordSize == wordSize, "adjust the scaling in the code below");
3752 // lea(recv_klass, Address(recv_klass, itable_index, Address::times_ptr, itentry_off));
3753 if (itable_index.is_constant()) {
3754 set64(AT, (int)itable_index.is_constant());
3755 dsll(AT, AT, (int)Address::times_ptr);
3756 } else {
3757 dsll(AT, itable_index.as_register(), (int)Address::times_ptr);
3758 }
3759 daddu(AT, AT, recv_klass);
3760 daddiu(recv_klass, AT, itentry_off);
3762 // for (scan = klass->itable(); scan->interface() != NULL; scan += scan_step) {
3763 // if (scan->interface() == intf) {
3764 // result = (klass + scan->offset() + itable_index);
3765 // }
3766 // }
3767 Label search, found_method;
3769 for (int peel = 1; peel >= 0; peel--) {
3770 ld(method_result, Address(scan_temp, itableOffsetEntry::interface_offset_in_bytes()));
3772 if (peel) {
3773 beq(intf_klass, method_result, found_method);
3774 nop();
3775 } else {
3776 bne(intf_klass, method_result, search);
3777 nop();
3778 // (invert the test to fall through to found_method...)
3779 }
3781 if (!peel) break;
3783 bind(search);
3785 // Check that the previous entry is non-null. A null entry means that
3786 // the receiver class doesn't implement the interface, and wasn't the
3787 // same as when the caller was compiled.
3788 beq(method_result, R0, L_no_such_interface);
3789 nop();
3790 daddiu(scan_temp, scan_temp, scan_step);
3791 }
3793 bind(found_method);
3795 // Got a hit.
3796 lw(scan_temp, Address(scan_temp, itableOffsetEntry::offset_offset_in_bytes()));
3797 //ld(method_result, Address(recv_klass, scan_temp, Address::times_1));
3798 if(UseLoongsonISA) {
3799 gsldx(method_result, recv_klass, scan_temp, 0);
3800 } else {
3801 daddu(AT, recv_klass, scan_temp);
3802 ld(method_result, AT);
3803 }
3804 }
3807 // virtual method calling
3808 void MacroAssembler::lookup_virtual_method(Register recv_klass,
3809 RegisterOrConstant vtable_index,
3810 Register method_result) {
3811 Register tmp = GP;
3812 push(tmp);
3814 if (vtable_index.is_constant()) {
3815 assert_different_registers(recv_klass, method_result, tmp);
3816 } else {
3817 assert_different_registers(recv_klass, method_result, vtable_index.as_register(), tmp);
3818 }
3819 const int base = InstanceKlass::vtable_start_offset() * wordSize;
3820 assert(vtableEntry::size() * wordSize == wordSize, "else adjust the scaling in the code below");
3821 /*
3822 Address vtable_entry_addr(recv_klass,
3823 vtable_index, Address::times_ptr,
3824 base + vtableEntry::method_offset_in_bytes());
3825 */
3826 if (vtable_index.is_constant()) {
3827 set64(AT, vtable_index.as_constant());
3828 dsll(AT, AT, (int)Address::times_ptr);
3829 } else {
3830 dsll(AT, vtable_index.as_register(), (int)Address::times_ptr);
3831 }
3832 set64(tmp, base + vtableEntry::method_offset_in_bytes());
3833 daddu(tmp, tmp, AT);
3834 daddu(tmp, tmp, recv_klass);
3835 ld(method_result, tmp, 0);
3837 pop(tmp);
3838 }