Fri, 27 Jul 2018 15:17:45 +0800
#7376 Implement MacroAssembler::incr_allocated_bytes
1 /*
2 * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2017, 2018, Loongson Technology. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
26 #include "precompiled.hpp"
27 #include "asm/assembler.hpp"
28 #include "asm/assembler.inline.hpp"
29 #include "asm/macroAssembler.inline.hpp"
30 #include "compiler/disassembler.hpp"
31 #include "gc_interface/collectedHeap.inline.hpp"
32 #include "interpreter/interpreter.hpp"
33 #include "memory/cardTableModRefBS.hpp"
34 #include "memory/resourceArea.hpp"
35 #include "memory/universe.hpp"
36 #include "prims/methodHandles.hpp"
37 #include "runtime/biasedLocking.hpp"
38 #include "runtime/interfaceSupport.hpp"
39 #include "runtime/objectMonitor.hpp"
40 #include "runtime/os.hpp"
41 #include "runtime/sharedRuntime.hpp"
42 #include "runtime/stubRoutines.hpp"
43 #include "utilities/macros.hpp"
44 #if INCLUDE_ALL_GCS
45 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
46 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
47 #include "gc_implementation/g1/heapRegion.hpp"
48 #endif // INCLUDE_ALL_GCS
50 // Implementation of MacroAssembler
52 intptr_t MacroAssembler::i[32] = {0};
53 float MacroAssembler::f[32] = {0.0};
55 void MacroAssembler::print(outputStream *s) {
56 unsigned int k;
57 for(k=0; k<sizeof(i)/sizeof(i[0]); k++) {
58 s->print_cr("i%d = 0x%.16lx", k, i[k]);
59 }
60 s->cr();
62 for(k=0; k<sizeof(f)/sizeof(f[0]); k++) {
63 s->print_cr("f%d = %f", k, f[k]);
64 }
65 s->cr();
66 }
68 int MacroAssembler::i_offset(unsigned int k) { return (intptr_t)&((MacroAssembler*)0)->i[k]; }
69 int MacroAssembler::f_offset(unsigned int k) { return (intptr_t)&((MacroAssembler*)0)->f[k]; }
71 void MacroAssembler::save_registers(MacroAssembler *masm) {
72 #define __ masm->
73 for(int k=0; k<32; k++) {
74 __ sw (as_Register(k), A0, i_offset(k));
75 }
77 for(int k=0; k<32; k++) {
78 __ swc1 (as_FloatRegister(k), A0, f_offset(k));
79 }
80 #undef __
81 }
83 void MacroAssembler::restore_registers(MacroAssembler *masm) {
84 #define __ masm->
85 for(int k=0; k<32; k++) {
86 __ lw (as_Register(k), A0, i_offset(k));
87 }
89 for(int k=0; k<32; k++) {
90 __ lwc1 (as_FloatRegister(k), A0, f_offset(k));
91 }
92 #undef __
93 }
96 void MacroAssembler::pd_patch_instruction(address branch, address target) {
97 jint& stub_inst = *(jint*) branch;
98 jint *pc = (jint *)branch;
100 if((opcode(stub_inst) == special_op) && (special(stub_inst) == dadd_op)) {
101 //b_far:
102 // move(AT, RA); // dadd
103 // emit_long(insn_ORRI(regimm_op, 0, bgezal_op, 1));
104 // nop();
105 // lui(T9, 0); // to be patched
106 // ori(T9, 0);
107 // daddu(T9, T9, RA);
108 // move(RA, AT);
109 // jr(T9);
111 assert(opcode(pc[3]) == lui_op
112 && opcode(pc[4]) == ori_op
113 && special(pc[5]) == daddu_op, "Not a branch label patch");
114 if(!(opcode(pc[3]) == lui_op
115 && opcode(pc[4]) == ori_op
116 && special(pc[5]) == daddu_op)) { tty->print_cr("Not a branch label patch"); }
118 int offset = target - branch;
119 if (!is_simm16(offset)) {
120 pc[3] = (pc[3] & 0xffff0000) | high16(offset - 12);
121 pc[4] = (pc[4] & 0xffff0000) | low16(offset - 12);
122 } else {
123 /* revert to "beq + nop" */
124 CodeBuffer cb(branch, 4 * 10);
125 MacroAssembler masm(&cb);
126 #define __ masm.
127 __ b(target);
128 __ delayed()->nop();
129 __ nop();
130 __ nop();
131 __ nop();
132 __ nop();
133 __ nop();
134 __ nop();
135 }
136 return;
137 } else if (special(pc[4]) == jr_op
138 && opcode(pc[4]) == special_op
139 && (((opcode(pc[0]) == lui_op) || opcode(pc[0]) == daddiu_op) || (opcode(pc[0]) == ori_op))) {
140 //jmp_far:
141 // patchable_set48(T9, target);
142 // jr(T9);
143 // nop();
145 CodeBuffer cb(branch, 4 * 4);
146 MacroAssembler masm(&cb);
147 masm.patchable_set48(T9, (long)(target));
148 return;
149 }
151 #ifndef PRODUCT
152 if (!is_simm16((target - branch - 4) >> 2)) {
153 tty->print_cr("Illegal patching: branch = 0x%lx, target = 0x%lx", branch, target);
154 tty->print_cr("======= Start decoding at branch = 0x%lx =======", branch);
155 Disassembler::decode(branch - 4 * 16, branch + 4 * 16, tty);
156 tty->print_cr("======= End of decoding =======");
157 }
158 #endif
160 stub_inst = patched_branch(target - branch, stub_inst, 0);
161 }
163 static inline address first_cache_address() {
164 return CodeCache::low_bound() + sizeof(HeapBlock::Header);
165 }
167 static inline address last_cache_address() {
168 return CodeCache::high_bound() - Assembler::InstructionSize;
169 }
171 int MacroAssembler::call_size(address target, bool far, bool patchable) {
172 if (patchable) return 6 << Assembler::LogInstructionSize;
173 if (!far) return 2 << Assembler::LogInstructionSize; // jal + nop
174 return (insts_for_set64((jlong)target) + 2) << Assembler::LogInstructionSize;
175 }
177 // Can we reach target using jal/j from anywhere
178 // in the code cache (because code can be relocated)?
179 bool MacroAssembler::reachable_from_cache(address target) {
180 address cl = first_cache_address();
181 address ch = last_cache_address();
183 return (cl <= target) && (target <= ch) && fit_in_jal(cl, ch);
184 }
186 void MacroAssembler::general_jump(address target) {
187 if (reachable_from_cache(target)) {
188 j(target);
189 delayed()->nop();
190 } else {
191 set64(T9, (long)target);
192 jr(T9);
193 delayed()->nop();
194 }
195 }
197 int MacroAssembler::insts_for_general_jump(address target) {
198 if (reachable_from_cache(target)) {
199 //j(target);
200 //nop();
201 return 2;
202 } else {
203 //set64(T9, (long)target);
204 //jr(T9);
205 //nop();
206 return insts_for_set64((jlong)target) + 2;
207 }
208 }
210 void MacroAssembler::patchable_jump(address target) {
211 if (reachable_from_cache(target)) {
212 nop();
213 nop();
214 nop();
215 nop();
216 j(target);
217 delayed()->nop();
218 } else {
219 patchable_set48(T9, (long)target);
220 jr(T9);
221 delayed()->nop();
222 }
223 }
225 int MacroAssembler::insts_for_patchable_jump(address target) {
226 return 6;
227 }
229 void MacroAssembler::general_call(address target) {
230 if (reachable_from_cache(target)) {
231 jal(target);
232 delayed()->nop();
233 } else {
234 set64(T9, (long)target);
235 jalr(T9);
236 delayed()->nop();
237 }
238 }
240 int MacroAssembler::insts_for_general_call(address target) {
241 if (reachable_from_cache(target)) {
242 //jal(target);
243 //nop();
244 return 2;
245 } else {
246 //set64(T9, (long)target);
247 //jalr(T9);
248 //nop();
249 return insts_for_set64((jlong)target) + 2;
250 }
251 }
253 void MacroAssembler::patchable_call(address target) {
254 if (reachable_from_cache(target)) {
255 nop();
256 nop();
257 nop();
258 nop();
259 jal(target);
260 delayed()->nop();
261 } else {
262 patchable_set48(T9, (long)target);
263 jalr(T9);
264 delayed()->nop();
265 }
266 }
268 int MacroAssembler::insts_for_patchable_call(address target) {
269 return 6;
270 }
272 void MacroAssembler::beq_far(Register rs, Register rt, address entry) {
273 u_char * cur_pc = pc();
275 /* Jin: Near/Far jump */
276 if(is_simm16((entry - pc() - 4) / 4)) {
277 Assembler::beq(rs, rt, offset(entry));
278 } else {
279 Label not_jump;
280 bne(rs, rt, not_jump);
281 delayed()->nop();
283 b_far(entry);
284 delayed()->nop();
286 bind(not_jump);
287 has_delay_slot();
288 }
289 }
291 void MacroAssembler::beq_far(Register rs, Register rt, Label& L) {
292 if (L.is_bound()) {
293 beq_far(rs, rt, target(L));
294 } else {
295 u_char * cur_pc = pc();
296 Label not_jump;
297 bne(rs, rt, not_jump);
298 delayed()->nop();
300 b_far(L);
301 delayed()->nop();
303 bind(not_jump);
304 has_delay_slot();
305 }
306 }
308 void MacroAssembler::bne_far(Register rs, Register rt, address entry) {
309 u_char * cur_pc = pc();
311 /* Jin: Near/Far jump */
312 if(is_simm16((entry - pc() - 4) / 4)) {
313 Assembler::bne(rs, rt, offset(entry));
314 } else {
315 Label not_jump;
316 beq(rs, rt, not_jump);
317 delayed()->nop();
319 b_far(entry);
320 delayed()->nop();
322 bind(not_jump);
323 has_delay_slot();
324 }
325 }
327 void MacroAssembler::bne_far(Register rs, Register rt, Label& L) {
328 if (L.is_bound()) {
329 bne_far(rs, rt, target(L));
330 } else {
331 u_char * cur_pc = pc();
332 Label not_jump;
333 beq(rs, rt, not_jump);
334 delayed()->nop();
336 b_far(L);
337 delayed()->nop();
339 bind(not_jump);
340 has_delay_slot();
341 }
342 }
344 void MacroAssembler::beq_long(Register rs, Register rt, Label& L) {
345 Label not_taken;
347 bne(rs, rt, not_taken);
348 delayed()->nop();
350 jmp_far(L);
352 bind(not_taken);
353 }
355 void MacroAssembler::bne_long(Register rs, Register rt, Label& L) {
356 Label not_taken;
358 beq(rs, rt, not_taken);
359 delayed()->nop();
361 jmp_far(L);
363 bind(not_taken);
364 }
366 void MacroAssembler::bc1t_long(Label& L) {
367 Label not_taken;
369 bc1f(not_taken);
370 delayed()->nop();
372 jmp_far(L);
374 bind(not_taken);
375 }
377 void MacroAssembler::bc1f_long(Label& L) {
378 Label not_taken;
380 bc1t(not_taken);
381 delayed()->nop();
383 jmp_far(L);
385 bind(not_taken);
386 }
388 void MacroAssembler::b_far(Label& L) {
389 if (L.is_bound()) {
390 b_far(target(L));
391 } else {
392 volatile address dest = target(L);
393 /*
394 MacroAssembler::pd_patch_instruction branch=55651ed514, target=55651ef6d8
395 0x00000055651ed514: dadd at, ra, zero
396 0x00000055651ed518: [4110001]bgezal zero, 0x00000055651ed520
398 0x00000055651ed51c: sll zero, zero, 0
399 0x00000055651ed520: lui t9, 0x0
400 0x00000055651ed524: ori t9, t9, 0x21b8
401 0x00000055651ed528: daddu t9, t9, ra
402 0x00000055651ed52c: dadd ra, at, zero
403 0x00000055651ed530: jr t9
404 0x00000055651ed534: sll zero, zero, 0
405 */
406 move(AT, RA);
407 emit_long(insn_ORRI(regimm_op, 0, bgezal_op, 1));
408 nop();
409 lui(T9, 0); // to be patched
410 ori(T9, T9, 0);
411 daddu(T9, T9, RA);
412 move(RA, AT);
413 jr(T9);
414 }
415 }
417 void MacroAssembler::b_far(address entry) {
418 u_char * cur_pc = pc();
420 /* Jin: Near/Far jump */
421 if(is_simm16((entry - pc() - 4) / 4)) {
422 b(offset(entry));
423 } else {
424 /* address must be bounded */
425 move(AT, RA);
426 emit_long(insn_ORRI(regimm_op, 0, bgezal_op, 1));
427 nop();
428 li32(T9, entry - pc());
429 daddu(T9, T9, RA);
430 move(RA, AT);
431 jr(T9);
432 }
433 }
435 void MacroAssembler::ld_ptr(Register rt, Register offset, Register base) {
436 addu_long(AT, base, offset);
437 ld_ptr(rt, 0, AT);
438 }
440 void MacroAssembler::st_ptr(Register rt, Register offset, Register base) {
441 addu_long(AT, base, offset);
442 st_ptr(rt, 0, AT);
443 }
445 void MacroAssembler::ld_long(Register rt, Register offset, Register base) {
446 addu_long(AT, base, offset);
447 ld_long(rt, 0, AT);
448 }
450 void MacroAssembler::st_long(Register rt, Register offset, Register base) {
451 addu_long(AT, base, offset);
452 st_long(rt, 0, AT);
453 }
455 Address MacroAssembler::as_Address(AddressLiteral adr) {
456 return Address(adr.target(), adr.rspec());
457 }
459 Address MacroAssembler::as_Address(ArrayAddress adr) {
460 return Address::make_array(adr);
461 }
463 // tmp_reg1 and tmp_reg2 should be saved outside of atomic_inc32 (caller saved).
464 void MacroAssembler::atomic_inc32(address counter_addr, int inc, Register tmp_reg1, Register tmp_reg2) {
465 Label again;
467 li(tmp_reg1, counter_addr);
468 bind(again);
469 if(UseSyncLevel >= 3000 || UseSyncLevel < 2000) sync();
470 ll(tmp_reg2, tmp_reg1, 0);
471 addi(tmp_reg2, tmp_reg2, inc);
472 sc(tmp_reg2, tmp_reg1, 0);
473 beq(tmp_reg2, R0, again);
474 delayed()->nop();
475 }
477 int MacroAssembler::biased_locking_enter(Register lock_reg,
478 Register obj_reg,
479 Register swap_reg,
480 Register tmp_reg,
481 bool swap_reg_contains_mark,
482 Label& done,
483 Label* slow_case,
484 BiasedLockingCounters* counters) {
485 assert(UseBiasedLocking, "why call this otherwise?");
486 bool need_tmp_reg = false;
487 if (tmp_reg == noreg) {
488 need_tmp_reg = true;
489 tmp_reg = T9;
490 }
491 assert_different_registers(lock_reg, obj_reg, swap_reg, tmp_reg, AT);
492 assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, "biased locking makes assumptions about bit layout");
493 Address mark_addr (obj_reg, oopDesc::mark_offset_in_bytes());
494 Address saved_mark_addr(lock_reg, 0);
496 // Biased locking
497 // See whether the lock is currently biased toward our thread and
498 // whether the epoch is still valid
499 // Note that the runtime guarantees sufficient alignment of JavaThread
500 // pointers to allow age to be placed into low bits
501 // First check to see whether biasing is even enabled for this object
502 Label cas_label;
503 int null_check_offset = -1;
504 if (!swap_reg_contains_mark) {
505 null_check_offset = offset();
506 ld_ptr(swap_reg, mark_addr);
507 }
509 if (need_tmp_reg) {
510 push(tmp_reg);
511 }
512 move(tmp_reg, swap_reg);
513 andi(tmp_reg, tmp_reg, markOopDesc::biased_lock_mask_in_place);
514 #ifdef _LP64
515 daddi(AT, R0, markOopDesc::biased_lock_pattern);
516 dsub(AT, AT, tmp_reg);
517 #else
518 addi(AT, R0, markOopDesc::biased_lock_pattern);
519 sub(AT, AT, tmp_reg);
520 #endif
521 if (need_tmp_reg) {
522 pop(tmp_reg);
523 }
525 bne(AT, R0, cas_label);
526 delayed()->nop();
529 // The bias pattern is present in the object's header. Need to check
530 // whether the bias owner and the epoch are both still current.
531 // Note that because there is no current thread register on MIPS we
532 // need to store off the mark word we read out of the object to
533 // avoid reloading it and needing to recheck invariants below. This
534 // store is unfortunate but it makes the overall code shorter and
535 // simpler.
536 st_ptr(swap_reg, saved_mark_addr);
537 if (need_tmp_reg) {
538 push(tmp_reg);
539 }
540 if (swap_reg_contains_mark) {
541 null_check_offset = offset();
542 }
543 load_prototype_header(tmp_reg, obj_reg);
544 xorr(tmp_reg, tmp_reg, swap_reg);
545 get_thread(swap_reg);
546 xorr(swap_reg, swap_reg, tmp_reg);
548 move(AT, ~((int) markOopDesc::age_mask_in_place));
549 andr(swap_reg, swap_reg, AT);
551 if (PrintBiasedLockingStatistics) {
552 Label L;
553 bne(swap_reg, R0, L);
554 delayed()->nop();
555 push(tmp_reg);
556 push(A0);
557 atomic_inc32((address)BiasedLocking::biased_lock_entry_count_addr(), 1, A0, tmp_reg);
558 pop(A0);
559 pop(tmp_reg);
560 bind(L);
561 }
562 if (need_tmp_reg) {
563 pop(tmp_reg);
564 }
565 beq(swap_reg, R0, done);
566 delayed()->nop();
567 Label try_revoke_bias;
568 Label try_rebias;
570 // At this point we know that the header has the bias pattern and
571 // that we are not the bias owner in the current epoch. We need to
572 // figure out more details about the state of the header in order to
573 // know what operations can be legally performed on the object's
574 // header.
576 // If the low three bits in the xor result aren't clear, that means
577 // the prototype header is no longer biased and we have to revoke
578 // the bias on this object.
580 move(AT, markOopDesc::biased_lock_mask_in_place);
581 andr(AT, swap_reg, AT);
582 bne(AT, R0, try_revoke_bias);
583 delayed()->nop();
584 // Biasing is still enabled for this data type. See whether the
585 // epoch of the current bias is still valid, meaning that the epoch
586 // bits of the mark word are equal to the epoch bits of the
587 // prototype header. (Note that the prototype header's epoch bits
588 // only change at a safepoint.) If not, attempt to rebias the object
589 // toward the current thread. Note that we must be absolutely sure
590 // that the current epoch is invalid in order to do this because
591 // otherwise the manipulations it performs on the mark word are
592 // illegal.
594 move(AT, markOopDesc::epoch_mask_in_place);
595 andr(AT,swap_reg, AT);
596 bne(AT, R0, try_rebias);
597 delayed()->nop();
598 // The epoch of the current bias is still valid but we know nothing
599 // about the owner; it might be set or it might be clear. Try to
600 // acquire the bias of the object using an atomic operation. If this
601 // fails we will go in to the runtime to revoke the object's bias.
602 // Note that we first construct the presumed unbiased header so we
603 // don't accidentally blow away another thread's valid bias.
605 ld_ptr(swap_reg, saved_mark_addr);
607 move(AT, markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place);
608 andr(swap_reg, swap_reg, AT);
610 if (need_tmp_reg) {
611 push(tmp_reg);
612 }
613 get_thread(tmp_reg);
614 orr(tmp_reg, tmp_reg, swap_reg);
615 //if (os::is_MP()) {
616 // sync();
617 //}
618 cmpxchg(tmp_reg, Address(obj_reg, 0), swap_reg);
619 if (need_tmp_reg) {
620 pop(tmp_reg);
621 }
622 // If the biasing toward our thread failed, this means that
623 // another thread succeeded in biasing it toward itself and we
624 // need to revoke that bias. The revocation will occur in the
625 // interpreter runtime in the slow case.
626 if (PrintBiasedLockingStatistics) {
627 Label L;
628 bne(AT, R0, L);
629 delayed()->nop();
630 push(tmp_reg);
631 push(A0);
632 atomic_inc32((address)BiasedLocking::anonymously_biased_lock_entry_count_addr(), 1, A0, tmp_reg);
633 pop(A0);
634 pop(tmp_reg);
635 bind(L);
636 }
637 if (slow_case != NULL) {
638 beq_far(AT, R0, *slow_case);
639 delayed()->nop();
640 }
641 b(done);
642 delayed()->nop();
644 bind(try_rebias);
645 // At this point we know the epoch has expired, meaning that the
646 // current "bias owner", if any, is actually invalid. Under these
647 // circumstances _only_, we are allowed to use the current header's
648 // value as the comparison value when doing the cas to acquire the
649 // bias in the current epoch. In other words, we allow transfer of
650 // the bias from one thread to another directly in this situation.
651 //
652 // FIXME: due to a lack of registers we currently blow away the age
653 // bits in this situation. Should attempt to preserve them.
654 if (need_tmp_reg) {
655 push(tmp_reg);
656 }
657 load_prototype_header(tmp_reg, obj_reg);
658 get_thread(swap_reg);
659 orr(tmp_reg, tmp_reg, swap_reg);
660 ld_ptr(swap_reg, saved_mark_addr);
662 //if (os::is_MP()) {
663 // sync();
664 //}
665 cmpxchg(tmp_reg, Address(obj_reg, 0), swap_reg);
666 if (need_tmp_reg) {
667 pop(tmp_reg);
668 }
669 // If the biasing toward our thread failed, then another thread
670 // succeeded in biasing it toward itself and we need to revoke that
671 // bias. The revocation will occur in the runtime in the slow case.
672 if (PrintBiasedLockingStatistics) {
673 Label L;
674 bne(AT, R0, L);
675 delayed()->nop();
676 push(AT);
677 push(tmp_reg);
678 atomic_inc32((address)BiasedLocking::rebiased_lock_entry_count_addr(), 1, AT, tmp_reg);
679 pop(tmp_reg);
680 pop(AT);
681 bind(L);
682 }
683 if (slow_case != NULL) {
684 beq_far(AT, R0, *slow_case);
685 delayed()->nop();
686 }
688 b(done);
689 delayed()->nop();
690 bind(try_revoke_bias);
691 // The prototype mark in the klass doesn't have the bias bit set any
692 // more, indicating that objects of this data type are not supposed
693 // to be biased any more. We are going to try to reset the mark of
694 // this object to the prototype value and fall through to the
695 // CAS-based locking scheme. Note that if our CAS fails, it means
696 // that another thread raced us for the privilege of revoking the
697 // bias of this particular object, so it's okay to continue in the
698 // normal locking code.
699 //
700 // FIXME: due to a lack of registers we currently blow away the age
701 // bits in this situation. Should attempt to preserve them.
702 ld_ptr(swap_reg, saved_mark_addr);
704 if (need_tmp_reg) {
705 push(tmp_reg);
706 }
707 load_prototype_header(tmp_reg, obj_reg);
708 //if (os::is_MP()) {
709 // lock();
710 //}
711 cmpxchg(tmp_reg, Address(obj_reg, 0), swap_reg);
712 if (need_tmp_reg) {
713 pop(tmp_reg);
714 }
715 // Fall through to the normal CAS-based lock, because no matter what
716 // the result of the above CAS, some thread must have succeeded in
717 // removing the bias bit from the object's header.
718 if (PrintBiasedLockingStatistics) {
719 Label L;
720 bne(AT, R0, L);
721 delayed()->nop();
722 push(AT);
723 push(tmp_reg);
724 atomic_inc32((address)BiasedLocking::revoked_lock_entry_count_addr(), 1, AT, tmp_reg);
725 pop(tmp_reg);
726 pop(AT);
727 bind(L);
728 }
730 bind(cas_label);
731 return null_check_offset;
732 }
734 void MacroAssembler::biased_locking_exit(Register obj_reg, Register temp_reg, Label& done) {
735 assert(UseBiasedLocking, "why call this otherwise?");
737 // Check for biased locking unlock case, which is a no-op
738 // Note: we do not have to check the thread ID for two reasons.
739 // First, the interpreter checks for IllegalMonitorStateException at
740 // a higher level. Second, if the bias was revoked while we held the
741 // lock, the object could not be rebiased toward another thread, so
742 // the bias bit would be clear.
743 #ifdef _LP64
744 ld(temp_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
745 andi(temp_reg, temp_reg, markOopDesc::biased_lock_mask_in_place);
746 daddi(AT, R0, markOopDesc::biased_lock_pattern);
747 #else
748 lw(temp_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
749 andi(temp_reg, temp_reg, markOopDesc::biased_lock_mask_in_place);
750 addi(AT, R0, markOopDesc::biased_lock_pattern);
751 #endif
753 beq(AT, temp_reg, done);
754 delayed()->nop();
755 }
757 // the stack pointer adjustment is needed. see InterpreterMacroAssembler::super_call_VM_leaf
758 // this method will handle the stack problem, you need not to preserve the stack space for the argument now
759 void MacroAssembler::call_VM_leaf_base(address entry_point, int number_of_arguments) {
760 Label L, E;
762 assert(number_of_arguments <= 4, "just check");
764 andi(AT, SP, 0xf);
765 beq(AT, R0, L);
766 delayed()->nop();
767 daddi(SP, SP, -8);
768 call(entry_point, relocInfo::runtime_call_type);
769 delayed()->nop();
770 daddi(SP, SP, 8);
771 b(E);
772 delayed()->nop();
774 bind(L);
775 call(entry_point, relocInfo::runtime_call_type);
776 delayed()->nop();
777 bind(E);
778 }
781 void MacroAssembler::jmp(address entry) {
782 patchable_set48(T9, (long)entry);
783 jr(T9);
784 }
786 void MacroAssembler::jmp(address entry, relocInfo::relocType rtype) {
787 switch (rtype) {
788 case relocInfo::runtime_call_type:
789 case relocInfo::none:
790 jmp(entry);
791 break;
792 default:
793 {
794 InstructionMark im(this);
795 relocate(rtype);
796 patchable_set48(T9, (long)entry);
797 jr(T9);
798 }
799 break;
800 }
801 }
803 void MacroAssembler::jmp_far(Label& L) {
804 if (L.is_bound()) {
805 address entry = target(L);
806 assert(entry != NULL, "jmp most probably wrong");
807 InstructionMark im(this);
809 relocate(relocInfo::internal_word_type);
810 patchable_set48(T9, (long)entry);
811 } else {
812 InstructionMark im(this);
813 L.add_patch_at(code(), locator());
815 relocate(relocInfo::internal_word_type);
816 patchable_set48(T9, (long)pc());
817 }
819 jr(T9);
820 delayed()->nop();
821 }
822 void MacroAssembler::mov_metadata(Address dst, Metadata* obj) {
823 int oop_index;
824 if (obj) {
825 oop_index = oop_recorder()->find_index(obj);
826 } else {
827 oop_index = oop_recorder()->allocate_metadata_index(obj);
828 }
829 relocate(metadata_Relocation::spec(oop_index));
830 patchable_set48(AT, (long)obj);
831 sd(AT, dst);
832 }
834 void MacroAssembler::mov_metadata(Register dst, Metadata* obj) {
835 int oop_index;
836 if (obj) {
837 oop_index = oop_recorder()->find_index(obj);
838 } else {
839 oop_index = oop_recorder()->allocate_metadata_index(obj);
840 }
841 relocate(metadata_Relocation::spec(oop_index));
842 patchable_set48(dst, (long)obj);
843 }
845 void MacroAssembler::call(address entry) {
846 // c/c++ code assume T9 is entry point, so we just always move entry to t9
847 // maybe there is some more graceful method to handle this. FIXME
848 // For more info, see class NativeCall.
849 #ifndef _LP64
850 move(T9, (int)entry);
851 #else
852 patchable_set48(T9, (long)entry);
853 #endif
854 jalr(T9);
855 }
857 void MacroAssembler::call(address entry, relocInfo::relocType rtype) {
858 switch (rtype) {
859 case relocInfo::runtime_call_type:
860 case relocInfo::none:
861 call(entry);
862 break;
863 default:
864 {
865 InstructionMark im(this);
866 relocate(rtype);
867 call(entry);
868 }
869 break;
870 }
871 }
873 void MacroAssembler::call(address entry, RelocationHolder& rh)
874 {
875 switch (rh.type()) {
876 case relocInfo::runtime_call_type:
877 case relocInfo::none:
878 call(entry);
879 break;
880 default:
881 {
882 InstructionMark im(this);
883 relocate(rh);
884 call(entry);
885 }
886 break;
887 }
888 }
890 void MacroAssembler::ic_call(address entry) {
891 RelocationHolder rh = virtual_call_Relocation::spec(pc());
892 patchable_set48(IC_Klass, (long)Universe::non_oop_word());
893 assert(entry != NULL, "call most probably wrong");
894 InstructionMark im(this);
895 relocate(rh);
896 patchable_call(entry);
897 }
899 void MacroAssembler::c2bool(Register r) {
900 Label L;
901 Assembler::beq(r, R0, L);
902 delayed()->nop();
903 move(r, 1);
904 bind(L);
905 }
907 #ifndef PRODUCT
908 extern "C" void findpc(intptr_t x);
909 #endif
911 void MacroAssembler::debug32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip, char* msg) {
912 // In order to get locks to work, we need to fake a in_VM state
913 JavaThread* thread = JavaThread::current();
914 JavaThreadState saved_state = thread->thread_state();
915 thread->set_thread_state(_thread_in_vm);
916 if (ShowMessageBoxOnError) {
917 JavaThread* thread = JavaThread::current();
918 JavaThreadState saved_state = thread->thread_state();
919 thread->set_thread_state(_thread_in_vm);
920 if (CountBytecodes || TraceBytecodes || StopInterpreterAt) {
921 ttyLocker ttyl;
922 BytecodeCounter::print();
923 }
924 // To see where a verify_oop failed, get $ebx+40/X for this frame.
925 // This is the value of eip which points to where verify_oop will return.
926 if (os::message_box(msg, "Execution stopped, print registers?")) {
927 ttyLocker ttyl;
928 tty->print_cr("eip = 0x%08x", eip);
929 #ifndef PRODUCT
930 tty->cr();
931 findpc(eip);
932 tty->cr();
933 #endif
934 tty->print_cr("rax, = 0x%08x", rax);
935 tty->print_cr("rbx, = 0x%08x", rbx);
936 tty->print_cr("rcx = 0x%08x", rcx);
937 tty->print_cr("rdx = 0x%08x", rdx);
938 tty->print_cr("rdi = 0x%08x", rdi);
939 tty->print_cr("rsi = 0x%08x", rsi);
940 tty->print_cr("rbp, = 0x%08x", rbp);
941 tty->print_cr("rsp = 0x%08x", rsp);
942 BREAKPOINT;
943 }
944 } else {
945 ttyLocker ttyl;
946 ::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n", msg);
947 assert(false, "DEBUG MESSAGE");
948 }
949 ThreadStateTransition::transition(thread, _thread_in_vm, saved_state);
950 }
952 void MacroAssembler::debug(char* msg/*, RegistersForDebugging* regs*/) {
953 if ( ShowMessageBoxOnError ) {
954 JavaThreadState saved_state = JavaThread::current()->thread_state();
955 JavaThread::current()->set_thread_state(_thread_in_vm);
956 {
957 // In order to get locks work, we need to fake a in_VM state
958 ttyLocker ttyl;
959 ::tty->print_cr("EXECUTION STOPPED: %s\n", msg);
960 if (CountBytecodes || TraceBytecodes || StopInterpreterAt) {
961 BytecodeCounter::print();
962 }
964 // if (os::message_box(msg, "Execution stopped, print registers?"))
965 // regs->print(::tty);
966 }
967 ThreadStateTransition::transition(JavaThread::current(), _thread_in_vm, saved_state);
968 }
969 else
970 ::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n", msg);
971 }
974 void MacroAssembler::stop(const char* msg) {
975 li(A0, (long)msg);
976 #ifndef _LP64
977 //reserver space for argument. added by yjl 7/10/2005
978 addiu(SP, SP, - 1 * wordSize);
979 #endif
980 call(CAST_FROM_FN_PTR(address, MacroAssembler::debug), relocInfo::runtime_call_type);
981 delayed()->nop();
982 #ifndef _LP64
983 //restore space for argument
984 addiu(SP, SP, 1 * wordSize);
985 #endif
986 brk(17);
987 }
989 void MacroAssembler::warn(const char* msg) {
990 #ifdef _LP64
991 pushad();
992 li(A0, (long)msg);
993 push(S2);
994 move(AT, -(StackAlignmentInBytes));
995 move(S2, SP); // use S2 as a sender SP holder
996 andr(SP, SP, AT); // align stack as required by ABI
997 call(CAST_FROM_FN_PTR(address, MacroAssembler::debug), relocInfo::runtime_call_type);
998 delayed()->nop();
999 move(SP, S2); // use S2 as a sender SP holder
1000 pop(S2);
1001 popad();
1002 #else
1003 pushad();
1004 addi(SP, SP, -4);
1005 sw(A0, SP, -1 * wordSize);
1006 li(A0, (long)msg);
1007 addi(SP, SP, -1 * wordSize);
1008 call(CAST_FROM_FN_PTR(address, MacroAssembler::debug), relocInfo::runtime_call_type);
1009 delayed()->nop();
1010 addi(SP, SP, 1 * wordSize);
1011 lw(A0, SP, -1 * wordSize);
1012 addi(SP, SP, 4);
1013 popad();
1014 #endif
1015 }
1017 void MacroAssembler::print_reg(Register reg) {
1018 /*
1019 char *s = getenv("PRINT_REG");
1020 if (s == NULL)
1021 return;
1022 if (strcmp(s, "1") != 0)
1023 return;
1024 */
1025 void * cur_pc = pc();
1026 pushad();
1027 NOT_LP64(push(FP);)
1029 li(A0, (long)reg->name());
1030 if (reg == SP)
1031 addiu(A1, SP, wordSize * 23); //23 registers saved in pushad()
1032 else if (reg == A0)
1033 ld(A1, SP, wordSize * 19); //A0 has been modified by li(A0, (long)reg->name()). Ugly Code!
1034 else
1035 move(A1, reg);
1036 li(A2, (long)cur_pc);
1037 push(S2);
1038 move(AT, -(StackAlignmentInBytes));
1039 move(S2, SP); // use S2 as a sender SP holder
1040 andr(SP, SP, AT); // align stack as required by ABI
1041 call(CAST_FROM_FN_PTR(address, SharedRuntime::print_reg_with_pc),relocInfo::runtime_call_type);
1042 delayed()->nop();
1043 move(SP, S2); // use S2 as a sender SP holder
1044 pop(S2);
1045 NOT_LP64(pop(FP);)
1046 popad();
1048 /*
1049 pushad();
1050 #ifdef _LP64
1051 if (reg == SP)
1052 addiu(A0, SP, wordSize * 23); //23 registers saved in pushad()
1053 else
1054 move(A0, reg);
1055 call(CAST_FROM_FN_PTR(address, SharedRuntime::print_long),relocInfo::runtime_call_type);
1056 delayed()->nop();
1057 #else
1058 push(FP);
1059 move(A0, reg);
1060 dsrl32(A1, reg, 0);
1061 //call(CAST_FROM_FN_PTR(address, SharedRuntime::print_int),relocInfo::runtime_call_type);
1062 call(CAST_FROM_FN_PTR(address, SharedRuntime::print_long),relocInfo::runtime_call_type);
1063 delayed()->nop();
1064 pop(FP);
1065 #endif
1066 popad();
1067 pushad();
1068 NOT_LP64(push(FP);)
1069 char b[50];
1070 sprintf((char *)b, " pc: %p\n",cur_pc);
1071 li(A0, (long)(char *)b);
1072 call(CAST_FROM_FN_PTR(address, SharedRuntime::print_str),relocInfo::runtime_call_type);
1073 delayed()->nop();
1074 NOT_LP64(pop(FP);)
1075 popad();
1076 */
1077 }
1079 void MacroAssembler::print_reg(FloatRegister reg) {
1080 void * cur_pc = pc();
1081 pushad();
1082 NOT_LP64(push(FP);)
1083 li(A0, (long)reg->name());
1084 push(S2);
1085 move(AT, -(StackAlignmentInBytes));
1086 move(S2, SP); // use S2 as a sender SP holder
1087 andr(SP, SP, AT); // align stack as required by ABI
1088 call(CAST_FROM_FN_PTR(address, SharedRuntime::print_str),relocInfo::runtime_call_type);
1089 delayed()->nop();
1090 move(SP, S2); // use S2 as a sender SP holder
1091 pop(S2);
1092 NOT_LP64(pop(FP);)
1093 popad();
1095 pushad();
1096 NOT_LP64(push(FP);)
1097 #if 1
1098 move(FP, SP);
1099 move(AT, -(StackAlignmentInBytes));
1100 andr(SP , SP , AT);
1101 mov_d(F12, reg);
1102 call(CAST_FROM_FN_PTR(address, SharedRuntime::print_double),relocInfo::runtime_call_type);
1103 delayed()->nop();
1104 move(SP, FP);
1105 #else
1106 mov_s(F12, reg);
1107 //call(CAST_FROM_FN_PTR(address, SharedRuntime::print_float),relocInfo::runtime_call_type);
1108 //delayed()->nop();
1109 #endif
1110 NOT_LP64(pop(FP);)
1111 popad();
1113 #if 0
1114 pushad();
1115 NOT_LP64(push(FP);)
1116 char* b = new char[50];
1117 sprintf(b, " pc: %p\n", cur_pc);
1118 li(A0, (long)b);
1119 call(CAST_FROM_FN_PTR(address, SharedRuntime::print_str),relocInfo::runtime_call_type);
1120 delayed()->nop();
1121 NOT_LP64(pop(FP);)
1122 popad();
1123 #endif
1124 }
1126 void MacroAssembler::increment(Register reg, int imm) {
1127 if (!imm) return;
1128 if (is_simm16(imm)) {
1129 #ifdef _LP64
1130 daddiu(reg, reg, imm);
1131 #else
1132 addiu(reg, reg, imm);
1133 #endif
1134 } else {
1135 move(AT, imm);
1136 #ifdef _LP64
1137 daddu(reg, reg, AT);
1138 #else
1139 addu(reg, reg, AT);
1140 #endif
1141 }
1142 }
1144 void MacroAssembler::decrement(Register reg, int imm) {
1145 increment(reg, -imm);
1146 }
1149 void MacroAssembler::call_VM(Register oop_result,
1150 address entry_point,
1151 bool check_exceptions) {
1152 call_VM_helper(oop_result, entry_point, 0, check_exceptions);
1153 }
1155 void MacroAssembler::call_VM(Register oop_result,
1156 address entry_point,
1157 Register arg_1,
1158 bool check_exceptions) {
1159 if (arg_1!=A1) move(A1, arg_1);
1160 call_VM_helper(oop_result, entry_point, 1, check_exceptions);
1161 }
1163 void MacroAssembler::call_VM(Register oop_result,
1164 address entry_point,
1165 Register arg_1,
1166 Register arg_2,
1167 bool check_exceptions) {
1168 if (arg_1!=A1) move(A1, arg_1);
1169 if (arg_2!=A2) move(A2, arg_2);
1170 assert(arg_2 != A1, "smashed argument");
1171 call_VM_helper(oop_result, entry_point, 2, check_exceptions);
1172 }
1174 void MacroAssembler::call_VM(Register oop_result,
1175 address entry_point,
1176 Register arg_1,
1177 Register arg_2,
1178 Register arg_3,
1179 bool check_exceptions) {
1180 if (arg_1!=A1) move(A1, arg_1);
1181 if (arg_2!=A2) move(A2, arg_2); assert(arg_2 != A1, "smashed argument");
1182 if (arg_3!=A3) move(A3, arg_3); assert(arg_3 != A1 && arg_3 != A2, "smashed argument");
1183 call_VM_helper(oop_result, entry_point, 3, check_exceptions);
1184 }
1186 void MacroAssembler::call_VM(Register oop_result,
1187 Register last_java_sp,
1188 address entry_point,
1189 int number_of_arguments,
1190 bool check_exceptions) {
1191 call_VM_base(oop_result, NOREG, last_java_sp, entry_point, number_of_arguments, check_exceptions);
1192 }
1194 void MacroAssembler::call_VM(Register oop_result,
1195 Register last_java_sp,
1196 address entry_point,
1197 Register arg_1,
1198 bool check_exceptions) {
1199 if (arg_1 != A1) move(A1, arg_1);
1200 call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions);
1201 }
1203 void MacroAssembler::call_VM(Register oop_result,
1204 Register last_java_sp,
1205 address entry_point,
1206 Register arg_1,
1207 Register arg_2,
1208 bool check_exceptions) {
1209 if (arg_1 != A1) move(A1, arg_1);
1210 if (arg_2 != A2) move(A2, arg_2); assert(arg_2 != A1, "smashed argument");
1211 call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions);
1212 }
1214 void MacroAssembler::call_VM(Register oop_result,
1215 Register last_java_sp,
1216 address entry_point,
1217 Register arg_1,
1218 Register arg_2,
1219 Register arg_3,
1220 bool check_exceptions) {
1221 if (arg_1 != A1) move(A1, arg_1);
1222 if (arg_2 != A2) move(A2, arg_2); assert(arg_2 != A1, "smashed argument");
1223 if (arg_3 != A3) move(A3, arg_3); assert(arg_3 != A1 && arg_3 != A2, "smashed argument");
1224 call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions);
1225 }
1227 void MacroAssembler::call_VM_base(Register oop_result,
1228 Register java_thread,
1229 Register last_java_sp,
1230 address entry_point,
1231 int number_of_arguments,
1232 bool check_exceptions) {
1234 address before_call_pc;
1235 // determine java_thread register
1236 if (!java_thread->is_valid()) {
1237 #ifndef OPT_THREAD
1238 java_thread = T2;
1239 get_thread(java_thread);
1240 #else
1241 java_thread = TREG;
1242 #endif
1243 }
1244 // determine last_java_sp register
1245 if (!last_java_sp->is_valid()) {
1246 last_java_sp = SP;
1247 }
1248 // debugging support
1249 assert(number_of_arguments >= 0 , "cannot have negative number of arguments");
1250 assert(number_of_arguments <= 4 , "cannot have negative number of arguments");
1251 assert(java_thread != oop_result , "cannot use the same register for java_thread & oop_result");
1252 assert(java_thread != last_java_sp, "cannot use the same register for java_thread & last_java_sp");
1254 assert(last_java_sp != FP, "this code doesn't work for last_java_sp == fp, which currently can't portably work anyway since C2 doesn't save ebp");
1256 // set last Java frame before call
1257 before_call_pc = (address)pc();
1258 set_last_Java_frame(java_thread, last_java_sp, FP, before_call_pc);
1260 // do the call
1261 move(A0, java_thread);
1262 call(entry_point, relocInfo::runtime_call_type);
1263 delayed()->nop();
1265 // restore the thread (cannot use the pushed argument since arguments
1266 // may be overwritten by C code generated by an optimizing compiler);
1267 // however can use the register value directly if it is callee saved.
1268 #ifndef OPT_THREAD
1269 get_thread(java_thread);
1270 #else
1271 #ifdef ASSERT
1272 {
1273 Label L;
1274 get_thread(AT);
1275 beq(java_thread, AT, L);
1276 delayed()->nop();
1277 stop("MacroAssembler::call_VM_base: TREG not callee saved?");
1278 bind(L);
1279 }
1280 #endif
1281 #endif
1283 // discard thread and arguments
1284 ld_ptr(SP, java_thread, in_bytes(JavaThread::last_Java_sp_offset()));
1285 // reset last Java frame
1286 reset_last_Java_frame(java_thread, false);
1288 check_and_handle_popframe(java_thread);
1289 check_and_handle_earlyret(java_thread);
1290 if (check_exceptions) {
1291 // check for pending exceptions (java_thread is set upon return)
1292 Label L;
1293 #ifdef _LP64
1294 ld(AT, java_thread, in_bytes(Thread::pending_exception_offset()));
1295 #else
1296 lw(AT, java_thread, in_bytes(Thread::pending_exception_offset()));
1297 #endif
1298 beq(AT, R0, L);
1299 delayed()->nop();
1300 li(AT, before_call_pc);
1301 push(AT);
1302 jmp(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type);
1303 delayed()->nop();
1304 bind(L);
1305 }
1307 // get oop result if there is one and reset the value in the thread
1308 if (oop_result->is_valid()) {
1309 #ifdef _LP64
1310 ld(oop_result, java_thread, in_bytes(JavaThread::vm_result_offset()));
1311 sd(R0, java_thread, in_bytes(JavaThread::vm_result_offset()));
1312 #else
1313 lw(oop_result, java_thread, in_bytes(JavaThread::vm_result_offset()));
1314 sw(R0, java_thread, in_bytes(JavaThread::vm_result_offset()));
1315 #endif
1316 verify_oop(oop_result);
1317 }
1318 }
1320 void MacroAssembler::call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) {
1322 move(V0, SP);
1323 //we also reserve space for java_thread here
1324 #ifndef _LP64
1325 daddi(SP, SP, (1 + number_of_arguments) * (- wordSize));
1326 #endif
1327 move(AT, -(StackAlignmentInBytes));
1328 andr(SP, SP, AT);
1329 call_VM_base(oop_result, NOREG, V0, entry_point, number_of_arguments, check_exceptions);
1331 }
1333 void MacroAssembler::call_VM_leaf(address entry_point, int number_of_arguments) {
1334 call_VM_leaf_base(entry_point, number_of_arguments);
1335 }
1337 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0) {
1338 if (arg_0 != A0) move(A0, arg_0);
1339 call_VM_leaf(entry_point, 1);
1340 }
1342 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1) {
1343 if (arg_0 != A0) move(A0, arg_0);
1344 if (arg_1 != A1) move(A1, arg_1); assert(arg_1 != A0, "smashed argument");
1345 call_VM_leaf(entry_point, 2);
1346 }
1348 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) {
1349 if (arg_0 != A0) move(A0, arg_0);
1350 if (arg_1 != A1) move(A1, arg_1); assert(arg_1 != A0, "smashed argument");
1351 if (arg_2 != A2) move(A2, arg_2); assert(arg_2 != A0 && arg_2 != A1, "smashed argument");
1352 call_VM_leaf(entry_point, 3);
1353 }
1354 void MacroAssembler::super_call_VM_leaf(address entry_point) {
1355 MacroAssembler::call_VM_leaf_base(entry_point, 0);
1356 }
1359 void MacroAssembler::super_call_VM_leaf(address entry_point,
1360 Register arg_1) {
1361 if (arg_1 != A0) move(A0, arg_1);
1362 MacroAssembler::call_VM_leaf_base(entry_point, 1);
1363 }
1366 void MacroAssembler::super_call_VM_leaf(address entry_point,
1367 Register arg_1,
1368 Register arg_2) {
1369 if (arg_1 != A0) move(A0, arg_1);
1370 if (arg_2 != A1) move(A1, arg_2); assert(arg_2 != A0, "smashed argument");
1371 MacroAssembler::call_VM_leaf_base(entry_point, 2);
1372 }
1373 void MacroAssembler::super_call_VM_leaf(address entry_point,
1374 Register arg_1,
1375 Register arg_2,
1376 Register arg_3) {
1377 if (arg_1 != A0) move(A0, arg_1);
1378 if (arg_2 != A1) move(A1, arg_2); assert(arg_2 != A0, "smashed argument");
1379 if (arg_3 != A2) move(A2, arg_3); assert(arg_3 != A0 && arg_3 != A1, "smashed argument");
1380 MacroAssembler::call_VM_leaf_base(entry_point, 3);
1381 }
1383 void MacroAssembler::check_and_handle_earlyret(Register java_thread) {
1384 }
1386 void MacroAssembler::check_and_handle_popframe(Register java_thread) {
1387 }
1389 void MacroAssembler::null_check(Register reg, int offset) {
1390 if (needs_explicit_null_check(offset)) {
1391 // provoke OS NULL exception if reg = NULL by
1392 // accessing M[reg] w/o changing any (non-CC) registers
1393 // NOTE: cmpl is plenty here to provoke a segv
1394 lw(AT, reg, 0);
1395 // Note: should probably use testl(rax, Address(reg, 0));
1396 // may be shorter code (however, this version of
1397 // testl needs to be implemented first)
1398 } else {
1399 // nothing to do, (later) access of M[reg + offset]
1400 // will provoke OS NULL exception if reg = NULL
1401 }
1402 }
1404 void MacroAssembler::enter() {
1405 push2(RA, FP);
1406 move(FP, SP);
1407 }
1409 void MacroAssembler::leave() {
1410 #ifndef _LP64
1411 //move(SP, FP);
1412 //pop2(FP, RA);
1413 addi(SP, FP, 2 * wordSize);
1414 lw(RA, SP, - 1 * wordSize);
1415 lw(FP, SP, - 2 * wordSize);
1416 #else
1417 daddi(SP, FP, 2 * wordSize);
1418 ld(RA, SP, - 1 * wordSize);
1419 ld(FP, SP, - 2 * wordSize);
1420 #endif
1421 }
1422 /*
1423 void MacroAssembler::os_breakpoint() {
1424 // instead of directly emitting a breakpoint, call os:breakpoint for better debugability
1425 // (e.g., MSVC can't call ps() otherwise)
1426 call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint)));
1427 }
1428 */
1429 void MacroAssembler::reset_last_Java_frame(Register java_thread, bool clear_fp) {
1430 // determine java_thread register
1431 if (!java_thread->is_valid()) {
1432 #ifndef OPT_THREAD
1433 java_thread = T1;
1434 get_thread(java_thread);
1435 #else
1436 java_thread = TREG;
1437 #endif
1438 }
1439 // we must set sp to zero to clear frame
1440 st_ptr(R0, java_thread, in_bytes(JavaThread::last_Java_sp_offset()));
1441 // must clear fp, so that compiled frames are not confused; it is possible
1442 // that we need it only for debugging
1443 if(clear_fp) {
1444 st_ptr(R0, java_thread, in_bytes(JavaThread::last_Java_fp_offset()));
1445 }
1447 // Always clear the pc because it could have been set by make_walkable()
1448 st_ptr(R0, java_thread, in_bytes(JavaThread::last_Java_pc_offset()));
1449 }
1451 void MacroAssembler::reset_last_Java_frame(bool clear_fp) {
1452 Register thread = TREG;
1453 #ifndef OPT_THREAD
1454 get_thread(thread);
1455 #endif
1456 // we must set sp to zero to clear frame
1457 sd(R0, Address(thread, JavaThread::last_Java_sp_offset()));
1458 // must clear fp, so that compiled frames are not confused; it is
1459 // possible that we need it only for debugging
1460 if (clear_fp) {
1461 sd(R0, Address(thread, JavaThread::last_Java_fp_offset()));
1462 }
1464 // Always clear the pc because it could have been set by make_walkable()
1465 sd(R0, Address(thread, JavaThread::last_Java_pc_offset()));
1466 }
1468 // Write serialization page so VM thread can do a pseudo remote membar.
1469 // We use the current thread pointer to calculate a thread specific
1470 // offset to write to within the page. This minimizes bus traffic
1471 // due to cache line collision.
1472 void MacroAssembler::serialize_memory(Register thread, Register tmp) {
1473 move(tmp, thread);
1474 srl(tmp, tmp,os::get_serialize_page_shift_count());
1475 move(AT, (os::vm_page_size() - sizeof(int)));
1476 andr(tmp, tmp,AT);
1477 sw(tmp,Address(tmp, (intptr_t)os::get_memory_serialize_page()));
1478 }
1480 // Calls to C land
1481 //
1482 // When entering C land, the rbp, & rsp of the last Java frame have to be recorded
1483 // in the (thread-local) JavaThread object. When leaving C land, the last Java fp
1484 // has to be reset to 0. This is required to allow proper stack traversal.
1485 void MacroAssembler::set_last_Java_frame(Register java_thread,
1486 Register last_java_sp,
1487 Register last_java_fp,
1488 address last_java_pc) {
1489 // determine java_thread register
1490 if (!java_thread->is_valid()) {
1491 #ifndef OPT_THREAD
1492 java_thread = T2;
1493 get_thread(java_thread);
1494 #else
1495 java_thread = TREG;
1496 #endif
1497 }
1498 // determine last_java_sp register
1499 if (!last_java_sp->is_valid()) {
1500 last_java_sp = SP;
1501 }
1503 // last_java_fp is optional
1504 if (last_java_fp->is_valid()) {
1505 st_ptr(last_java_fp, java_thread, in_bytes(JavaThread::last_Java_fp_offset()));
1506 }
1508 // last_java_pc is optional
1509 if (last_java_pc != NULL) {
1510 relocate(relocInfo::internal_word_type);
1511 patchable_set48(AT, (long)last_java_pc);
1512 st_ptr(AT, java_thread, in_bytes(JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset()));
1513 }
1514 st_ptr(last_java_sp, java_thread, in_bytes(JavaThread::last_Java_sp_offset()));
1515 }
1517 void MacroAssembler::set_last_Java_frame(Register last_java_sp,
1518 Register last_java_fp,
1519 address last_java_pc) {
1520 // determine last_java_sp register
1521 if (!last_java_sp->is_valid()) {
1522 last_java_sp = SP;
1523 }
1525 Register thread = TREG;
1526 #ifndef OPT_THREAD
1527 get_thread(thread);
1528 #endif
1529 // last_java_fp is optional
1530 if (last_java_fp->is_valid()) {
1531 sd(last_java_fp, Address(thread, JavaThread::last_Java_fp_offset()));
1532 }
1534 // last_java_pc is optional
1535 if (last_java_pc != NULL) {
1536 relocate(relocInfo::internal_word_type);
1537 patchable_set48(AT, (long)last_java_pc);
1538 st_ptr(AT, thread, in_bytes(JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset()));
1539 }
1541 sd(last_java_sp, Address(thread, JavaThread::last_Java_sp_offset()));
1542 }
1544 //////////////////////////////////////////////////////////////////////////////////
1545 #if INCLUDE_ALL_GCS
1547 void MacroAssembler::g1_write_barrier_pre(Register obj,
1548 Register pre_val,
1549 Register thread,
1550 Register tmp,
1551 bool tosca_live,
1552 bool expand_call) {
1554 // If expand_call is true then we expand the call_VM_leaf macro
1555 // directly to skip generating the check by
1556 // InterpreterMacroAssembler::call_VM_leaf_base that checks _last_sp.
1558 #ifdef _LP64
1559 assert(thread == TREG, "must be");
1560 #endif // _LP64
1562 Label done;
1563 Label runtime;
1565 assert(pre_val != noreg, "check this code");
1567 if (obj != noreg) {
1568 assert_different_registers(obj, pre_val, tmp);
1569 assert(pre_val != V0, "check this code");
1570 }
1572 Address in_progress(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
1573 PtrQueue::byte_offset_of_active()));
1574 Address index(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
1575 PtrQueue::byte_offset_of_index()));
1576 Address buffer(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
1577 PtrQueue::byte_offset_of_buf()));
1580 // Is marking active?
1581 if (in_bytes(PtrQueue::byte_width_of_active()) == 4) {
1582 lw(AT, in_progress);
1583 } else {
1584 assert(in_bytes(PtrQueue::byte_width_of_active()) == 1, "Assumption");
1585 lb(AT, in_progress);
1586 }
1587 beq(AT, R0, done);
1588 delayed()->nop();
1590 // Do we need to load the previous value?
1591 if (obj != noreg) {
1592 load_heap_oop(pre_val, Address(obj, 0));
1593 }
1595 // Is the previous value null?
1596 beq(pre_val, R0, done);
1597 delayed()->nop();
1599 // Can we store original value in the thread's buffer?
1600 // Is index == 0?
1601 // (The index field is typed as size_t.)
1603 ld(tmp, index);
1604 beq(tmp, R0, runtime);
1605 delayed()->nop();
1607 daddiu(tmp, tmp, -1 * wordSize);
1608 sd(tmp, index);
1609 ld(AT, buffer);
1610 daddu(tmp, tmp, AT);
1612 // Record the previous value
1613 sd(pre_val, tmp, 0);
1614 beq(R0, R0, done);
1615 delayed()->nop();
1617 bind(runtime);
1618 // save the live input values
1619 if (tosca_live) push(V0);
1621 if (obj != noreg && obj != V0) push(obj);
1623 if (pre_val != V0) push(pre_val);
1625 // Calling the runtime using the regular call_VM_leaf mechanism generates
1626 // code (generated by InterpreterMacroAssember::call_VM_leaf_base)
1627 // that checks that the *(ebp+frame::interpreter_frame_last_sp) == NULL.
1628 //
1629 // If we care generating the pre-barrier without a frame (e.g. in the
1630 // intrinsified Reference.get() routine) then ebp might be pointing to
1631 // the caller frame and so this check will most likely fail at runtime.
1632 //
1633 // Expanding the call directly bypasses the generation of the check.
1634 // So when we do not have have a full interpreter frame on the stack
1635 // expand_call should be passed true.
1637 NOT_LP64( push(thread); )
1639 if (expand_call) {
1640 LP64_ONLY( assert(pre_val != A1, "smashed arg"); )
1641 if (thread != A1) move(A1, thread);
1642 if (pre_val != A0) move(A0, pre_val);
1643 MacroAssembler::call_VM_leaf_base(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), 2);
1644 } else {
1645 call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), pre_val, thread);
1646 }
1648 NOT_LP64( pop(thread); )
1650 // save the live input values
1651 if (pre_val != V0)
1652 pop(pre_val);
1654 if (obj != noreg && obj != V0)
1655 pop(obj);
1657 if(tosca_live) pop(V0);
1659 bind(done);
1660 }
1662 void MacroAssembler::g1_write_barrier_post(Register store_addr,
1663 Register new_val,
1664 Register thread,
1665 Register tmp,
1666 Register tmp2) {
1667 assert(tmp != AT, "must be");
1668 assert(tmp2 != AT, "must be");
1669 #ifdef _LP64
1670 assert(thread == TREG, "must be");
1671 #endif // _LP64
1673 Address queue_index(thread, in_bytes(JavaThread::dirty_card_queue_offset() +
1674 PtrQueue::byte_offset_of_index()));
1675 Address buffer(thread, in_bytes(JavaThread::dirty_card_queue_offset() +
1676 PtrQueue::byte_offset_of_buf()));
1678 BarrierSet* bs = Universe::heap()->barrier_set();
1679 CardTableModRefBS* ct = (CardTableModRefBS*)bs;
1680 assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
1682 Label done;
1683 Label runtime;
1685 // Does store cross heap regions?
1686 xorr(AT, store_addr, new_val);
1687 dsrl(AT, AT, HeapRegion::LogOfHRGrainBytes);
1688 beq(AT, R0, done);
1689 delayed()->nop();
1692 // crosses regions, storing NULL?
1693 beq(new_val, R0, done);
1694 delayed()->nop();
1696 // storing region crossing non-NULL, is card already dirty?
1697 const Register card_addr = tmp;
1698 const Register cardtable = tmp2;
1700 move(card_addr, store_addr);
1701 dsrl(card_addr, card_addr, CardTableModRefBS::card_shift);
1702 // Do not use ExternalAddress to load 'byte_map_base', since 'byte_map_base' is NOT
1703 // a valid address and therefore is not properly handled by the relocation code.
1704 set64(cardtable, (intptr_t)ct->byte_map_base);
1705 daddu(card_addr, card_addr, cardtable);
1707 lb(AT, card_addr, 0);
1708 daddiu(AT, AT, -1 * (int)G1SATBCardTableModRefBS::g1_young_card_val());
1709 beq(AT, R0, done);
1710 delayed()->nop();
1712 sync();
1713 lb(AT, card_addr, 0);
1714 daddiu(AT, AT, -1 * (int)(int)CardTableModRefBS::dirty_card_val());
1715 beq(AT, R0, done);
1716 delayed()->nop();
1719 // storing a region crossing, non-NULL oop, card is clean.
1720 // dirty card and log.
1721 move(AT, (int)CardTableModRefBS::dirty_card_val());
1722 sb(AT, card_addr, 0);
1724 lw(AT, queue_index);
1725 beq(AT, R0, runtime);
1726 delayed()->nop();
1727 daddiu(AT, AT, -1 * wordSize);
1728 sw(AT, queue_index);
1729 ld(tmp2, buffer);
1730 #ifdef _LP64
1731 ld(AT, queue_index);
1732 daddu(tmp2, tmp2, AT);
1733 sd(card_addr, tmp2, 0);
1734 #else
1735 lw(AT, queue_index);
1736 addu32(tmp2, tmp2, AT);
1737 sw(card_addr, tmp2, 0);
1738 #endif
1739 beq(R0, R0, done);
1740 delayed()->nop();
1742 bind(runtime);
1743 // save the live input values
1744 push(store_addr);
1745 push(new_val);
1746 #ifdef _LP64
1747 call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), card_addr, TREG);
1748 #else
1749 push(thread);
1750 call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), card_addr, thread);
1751 pop(thread);
1752 #endif
1753 pop(new_val);
1754 pop(store_addr);
1756 bind(done);
1757 }
1759 #endif // INCLUDE_ALL_GCS
1760 //////////////////////////////////////////////////////////////////////////////////
1763 void MacroAssembler::store_check(Register obj) {
1764 // Does a store check for the oop in register obj. The content of
1765 // register obj is destroyed afterwards.
1766 store_check_part_1(obj);
1767 store_check_part_2(obj);
1768 }
1770 void MacroAssembler::store_check(Register obj, Address dst) {
1771 store_check(obj);
1772 }
1775 // split the store check operation so that other instructions can be scheduled inbetween
1776 void MacroAssembler::store_check_part_1(Register obj) {
1777 BarrierSet* bs = Universe::heap()->barrier_set();
1778 assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind");
1779 #ifdef _LP64
1780 dsrl(obj, obj, CardTableModRefBS::card_shift);
1781 #else
1782 shr(obj, CardTableModRefBS::card_shift);
1783 #endif
1784 }
1786 void MacroAssembler::store_check_part_2(Register obj) {
1787 BarrierSet* bs = Universe::heap()->barrier_set();
1788 assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind");
1789 CardTableModRefBS* ct = (CardTableModRefBS*)bs;
1790 assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
1792 set64(AT, (long)ct->byte_map_base);
1793 #ifdef _LP64
1794 dadd(AT, AT, obj);
1795 #else
1796 add(AT, AT, obj);
1797 #endif
1798 if (UseConcMarkSweepGC) sync();
1799 sb(R0, AT, 0);
1800 }
1802 // Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes.
1803 void MacroAssembler::tlab_allocate(Register obj, Register var_size_in_bytes, int con_size_in_bytes,
1804 Register t1, Register t2, Label& slow_case) {
1805 assert_different_registers(obj, var_size_in_bytes, t1, t2, AT);
1807 Register end = t2;
1808 #ifndef OPT_THREAD
1809 Register thread = t1;
1810 get_thread(thread);
1811 #else
1812 Register thread = TREG;
1813 #endif
1814 verify_tlab(t1, t2);//blows t1&t2
1816 ld_ptr(obj, thread, in_bytes(JavaThread::tlab_top_offset()));
1818 if (var_size_in_bytes == NOREG) {
1819 // i dont think we need move con_size_in_bytes to a register first.
1820 // by yjl 8/17/2005
1821 assert(is_simm16(con_size_in_bytes), "fixme by moving imm to a register first");
1822 addi(end, obj, con_size_in_bytes);
1823 } else {
1824 add(end, obj, var_size_in_bytes);
1825 }
1827 ld_ptr(AT, thread, in_bytes(JavaThread::tlab_end_offset()));
1828 sltu(AT, AT, end);
1829 bne_far(AT, R0, slow_case);
1830 delayed()->nop();
1833 // update the tlab top pointer
1834 st_ptr(end, thread, in_bytes(JavaThread::tlab_top_offset()));
1836 // recover var_size_in_bytes if necessary
1837 /*if (var_size_in_bytes == end) {
1838 sub(var_size_in_bytes, end, obj);
1839 }*/
1841 verify_tlab(t1, t2);
1842 }
1844 // Defines obj, preserves var_size_in_bytes
1845 void MacroAssembler::eden_allocate(Register obj, Register var_size_in_bytes, int con_size_in_bytes,
1846 Register t1, Register t2, Label& slow_case) {
1847 assert_different_registers(obj, var_size_in_bytes, t1, AT);
1848 if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) { //by yyq
1849 // No allocation in the shared eden.
1850 b_far(slow_case);
1851 delayed()->nop();
1852 } else {
1854 #ifndef _LP64
1855 Address heap_top(t1, Assembler::split_low((intptr_t)Universe::heap()->top_addr()));
1856 lui(t1, split_high((intptr_t)Universe::heap()->top_addr()));
1857 #else
1858 Address heap_top(t1);
1859 li(t1, (long)Universe::heap()->top_addr());
1860 #endif
1861 ld_ptr(obj, heap_top);
1863 Register end = t2;
1864 Label retry;
1866 bind(retry);
1867 if (var_size_in_bytes == NOREG) {
1868 // i dont think we need move con_size_in_bytes to a register first.
1869 assert(is_simm16(con_size_in_bytes), "fixme by moving imm to a register first");
1870 addi(end, obj, con_size_in_bytes);
1871 } else {
1872 add(end, obj, var_size_in_bytes);
1873 }
1874 // if end < obj then we wrapped around => object too long => slow case
1875 sltu(AT, end, obj);
1876 bne_far(AT, R0, slow_case);
1877 delayed()->nop();
1879 li(AT, (long)Universe::heap()->end_addr());
1880 ld_ptr(AT, AT, 0);
1881 sltu(AT, AT, end);
1882 bne_far(AT, R0, slow_case);
1883 delayed()->nop();
1884 // Compare obj with the top addr, and if still equal, store the new top addr in
1885 // end at the address of the top addr pointer. Sets ZF if was equal, and clears
1886 // it otherwise. Use lock prefix for atomicity on MPs.
1887 //if (os::is_MP()) {
1888 // sync();
1889 //}
1891 // if someone beat us on the allocation, try again, otherwise continue
1892 cmpxchg(end, heap_top, obj);
1893 beq_far(AT, R0, retry); //by yyq
1894 delayed()->nop();
1895 }
1896 }
1898 // C2 doesn't invoke this one.
1899 void MacroAssembler::tlab_refill(Label& retry, Label& try_eden, Label& slow_case) {
1900 Register top = T0;
1901 Register t1 = T1;
1902 Register t2 = T9;
1903 Register t3 = T3;
1904 Register thread_reg = T8;
1905 assert_different_registers(top, thread_reg, t1, t2, /* preserve: */ T2, A4);
1906 Label do_refill, discard_tlab;
1908 if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) { //by yyq
1909 // No allocation in the shared eden.
1910 b(slow_case);
1911 delayed()->nop();
1912 }
1914 get_thread(thread_reg);
1916 ld_ptr(top, thread_reg, in_bytes(JavaThread::tlab_top_offset()));
1917 ld_ptr(t1, thread_reg, in_bytes(JavaThread::tlab_end_offset()));
1919 // calculate amount of free space
1920 sub(t1, t1, top);
1921 shr(t1, LogHeapWordSize);
1923 // Retain tlab and allocate object in shared space if
1924 // the amount free in the tlab is too large to discard.
1925 ld_ptr(t2, thread_reg, in_bytes(JavaThread::tlab_refill_waste_limit_offset()));
1926 slt(AT, t2, t1);
1927 beq(AT, R0, discard_tlab);
1928 delayed()->nop();
1930 // Retain
1931 #ifndef _LP64
1932 move(AT, ThreadLocalAllocBuffer::refill_waste_limit_increment());
1933 #else
1934 li(AT, ThreadLocalAllocBuffer::refill_waste_limit_increment());
1935 #endif
1936 add(t2, t2, AT);
1937 st_ptr(t2, thread_reg, in_bytes(JavaThread::tlab_refill_waste_limit_offset()));
1939 if (TLABStats) {
1940 // increment number of slow_allocations
1941 lw(AT, thread_reg, in_bytes(JavaThread::tlab_slow_allocations_offset()));
1942 addiu(AT, AT, 1);
1943 sw(AT, thread_reg, in_bytes(JavaThread::tlab_slow_allocations_offset()));
1944 }
1945 b(try_eden);
1946 delayed()->nop();
1948 bind(discard_tlab);
1949 if (TLABStats) {
1950 // increment number of refills
1951 lw(AT, thread_reg, in_bytes(JavaThread::tlab_number_of_refills_offset()));
1952 addi(AT, AT, 1);
1953 sw(AT, thread_reg, in_bytes(JavaThread::tlab_number_of_refills_offset()));
1954 // accumulate wastage -- t1 is amount free in tlab
1955 lw(AT, thread_reg, in_bytes(JavaThread::tlab_fast_refill_waste_offset()));
1956 add(AT, AT, t1);
1957 sw(AT, thread_reg, in_bytes(JavaThread::tlab_fast_refill_waste_offset()));
1958 }
1960 // if tlab is currently allocated (top or end != null) then
1961 // fill [top, end + alignment_reserve) with array object
1962 beq(top, R0, do_refill);
1963 delayed()->nop();
1965 // set up the mark word
1966 li(AT, (long)markOopDesc::prototype()->copy_set_hash(0x2));
1967 st_ptr(AT, top, oopDesc::mark_offset_in_bytes());
1969 // set the length to the remaining space
1970 addi(t1, t1, - typeArrayOopDesc::header_size(T_INT));
1971 addi(t1, t1, ThreadLocalAllocBuffer::alignment_reserve());
1972 shl(t1, log2_intptr(HeapWordSize/sizeof(jint)));
1973 sw(t1, top, arrayOopDesc::length_offset_in_bytes());
1975 // set klass to intArrayKlass
1976 #ifndef _LP64
1977 lui(AT, split_high((intptr_t)Universe::intArrayKlassObj_addr()));
1978 lw(t1, AT, split_low((intptr_t)Universe::intArrayKlassObj_addr()));
1979 #else
1980 li(AT, (intptr_t)Universe::intArrayKlassObj_addr());
1981 ld_ptr(t1, AT, 0);
1982 #endif
1983 //st_ptr(t1, top, oopDesc::klass_offset_in_bytes());
1984 store_klass(top, t1);
1986 ld_ptr(t1, thread_reg, in_bytes(JavaThread::tlab_start_offset()));
1987 subu(t1, top, t1);
1988 incr_allocated_bytes(thread_reg, t1, 0);
1990 // refill the tlab with an eden allocation
1991 bind(do_refill);
1992 ld_ptr(t1, thread_reg, in_bytes(JavaThread::tlab_size_offset()));
1993 shl(t1, LogHeapWordSize);
1994 // add object_size ??
1995 eden_allocate(top, t1, 0, t2, t3, slow_case);
1997 // Check that t1 was preserved in eden_allocate.
1998 #ifdef ASSERT
1999 if (UseTLAB) {
2000 Label ok;
2001 assert_different_registers(thread_reg, t1);
2002 ld_ptr(AT, thread_reg, in_bytes(JavaThread::tlab_size_offset()));
2003 shl(AT, LogHeapWordSize);
2004 beq(AT, t1, ok);
2005 delayed()->nop();
2006 stop("assert(t1 != tlab size)");
2007 should_not_reach_here();
2009 bind(ok);
2010 }
2011 #endif
2012 st_ptr(top, thread_reg, in_bytes(JavaThread::tlab_start_offset()));
2013 st_ptr(top, thread_reg, in_bytes(JavaThread::tlab_top_offset()));
2014 add(top, top, t1);
2015 addi(top, top, - ThreadLocalAllocBuffer::alignment_reserve_in_bytes());
2016 st_ptr(top, thread_reg, in_bytes(JavaThread::tlab_end_offset()));
2017 verify_tlab(t1, t2);
2018 b(retry);
2019 delayed()->nop();
2020 }
2022 void MacroAssembler::incr_allocated_bytes(Register thread,
2023 Register var_size_in_bytes,
2024 int con_size_in_bytes,
2025 Register t1) {
2026 if (!thread->is_valid()) {
2027 #ifndef OPT_THREAD
2028 assert(t1->is_valid(), "need temp reg");
2029 thread = t1;
2030 get_thread(thread);
2031 #else
2032 thread = TREG;
2033 #endif
2034 }
2036 ld_ptr(AT, thread, in_bytes(JavaThread::allocated_bytes_offset()));
2037 if (var_size_in_bytes->is_valid()) {
2038 addu(AT, AT, var_size_in_bytes);
2039 } else {
2040 addiu(AT, AT, con_size_in_bytes);
2041 }
2042 st_ptr(AT, thread, in_bytes(JavaThread::allocated_bytes_offset()));
2043 }
2045 static const double pi_4 = 0.7853981633974483;
2047 // the x86 version is to clumsy, i dont think we need that fuss. maybe i'm wrong, FIXME
2048 // must get argument(a double) in F12/F13
2049 //void MacroAssembler::trigfunc(char trig, bool preserve_cpu_regs, int num_fpu_regs_in_use) {
2050 //We need to preseve the register which maybe modified during the Call @Jerome
2051 void MacroAssembler::trigfunc(char trig, int num_fpu_regs_in_use) {
2052 //save all modified register here
2053 // if (preserve_cpu_regs) {
2054 // }
2055 //FIXME, in the disassembly of tirgfunc, only used V0,V1,T9, SP,RA,so we ony save V0,V1,T9
2056 pushad();
2057 //we should preserve the stack space before we call
2058 addi(SP, SP, -wordSize * 2);
2059 switch (trig){
2060 case 's' :
2061 call( CAST_FROM_FN_PTR(address, SharedRuntime::dsin), relocInfo::runtime_call_type );
2062 delayed()->nop();
2063 break;
2064 case 'c':
2065 call( CAST_FROM_FN_PTR(address, SharedRuntime::dcos), relocInfo::runtime_call_type );
2066 delayed()->nop();
2067 break;
2068 case 't':
2069 call( CAST_FROM_FN_PTR(address, SharedRuntime::dtan), relocInfo::runtime_call_type );
2070 delayed()->nop();
2071 break;
2072 default:assert (false, "bad intrinsic");
2073 break;
2075 }
2077 addi(SP, SP, wordSize * 2);
2078 popad();
2079 // if (preserve_cpu_regs) {
2080 // }
2081 }
2083 #ifdef _LP64
2084 void MacroAssembler::li(Register rd, long imm) {
2085 if (imm <= max_jint && imm >= min_jint) {
2086 li32(rd, (int)imm);
2087 } else if (julong(imm) <= 0xFFFFFFFF) {
2088 assert_not_delayed();
2089 // lui sign-extends, so we can't use that.
2090 ori(rd, R0, julong(imm) >> 16);
2091 dsll(rd, rd, 16);
2092 ori(rd, rd, split_low(imm));
2093 //aoqi_test
2094 //} else if ((imm > 0) && ((imm >> 48) == 0)) {
2095 } else if ((imm > 0) && is_simm16(imm >> 32)) {
2096 /* A 48-bit address */
2097 li48(rd, imm);
2098 } else {
2099 li64(rd, imm);
2100 }
2101 }
2102 #else
2103 void MacroAssembler::li(Register rd, long imm) {
2104 li32(rd, (int)imm);
2105 }
2106 #endif
2108 void MacroAssembler::li32(Register reg, int imm) {
2109 if (is_simm16(imm)) {
2110 /* Jin: for imm < 0, we should use addi instead of addiu.
2111 *
2112 * java.lang.StringCoding$StringDecoder.decode(jobject, jint, jint)
2113 *
2114 * 78 move [int:-1|I] [a0|I]
2115 * : daddi a0, zero, 0xffffffff (correct)
2116 * : daddiu a0, zero, 0xffffffff (incorrect)
2117 */
2118 if (imm >= 0)
2119 addiu(reg, R0, imm);
2120 else
2121 addi(reg, R0, imm);
2122 } else {
2123 lui(reg, split_low(imm >> 16));
2124 if (split_low(imm))
2125 ori(reg, reg, split_low(imm));
2126 }
2127 }
2129 #ifdef _LP64
2130 void MacroAssembler::set64(Register d, jlong value) {
2131 assert_not_delayed();
2133 int hi = (int)(value >> 32);
2134 int lo = (int)(value & ~0);
2136 if (value == lo) { // 32-bit integer
2137 if (is_simm16(value)) {
2138 daddiu(d, R0, value);
2139 } else {
2140 lui(d, split_low(value >> 16));
2141 if (split_low(value)) {
2142 ori(d, d, split_low(value));
2143 }
2144 }
2145 } else if (hi == 0) { // hardware zero-extends to upper 32
2146 ori(d, R0, julong(value) >> 16);
2147 dsll(d, d, 16);
2148 if (split_low(value)) {
2149 ori(d, d, split_low(value));
2150 }
2151 } else if ((value> 0) && is_simm16(value >> 32)) { // li48
2152 // 4 insts
2153 li48(d, value);
2154 } else { // li64
2155 // 6 insts
2156 li64(d, value);
2157 }
2158 }
2161 int MacroAssembler::insts_for_set64(jlong value) {
2162 int hi = (int)(value >> 32);
2163 int lo = (int)(value & ~0);
2165 int count = 0;
2167 if (value == lo) { // 32-bit integer
2168 if (is_simm16(value)) {
2169 //daddiu(d, R0, value);
2170 count++;
2171 } else {
2172 //lui(d, split_low(value >> 16));
2173 count++;
2174 if (split_low(value)) {
2175 //ori(d, d, split_low(value));
2176 count++;
2177 }
2178 }
2179 } else if (hi == 0) { // hardware zero-extends to upper 32
2180 //ori(d, R0, julong(value) >> 16);
2181 //dsll(d, d, 16);
2182 count += 2;
2183 if (split_low(value)) {
2184 //ori(d, d, split_low(value));
2185 count++;
2186 }
2187 } else if ((value> 0) && is_simm16(value >> 32)) { // li48
2188 // 4 insts
2189 //li48(d, value);
2190 count += 4;
2191 } else { // li64
2192 // 6 insts
2193 //li64(d, value);
2194 count += 6;
2195 }
2197 return count;
2198 }
2200 void MacroAssembler::patchable_set48(Register d, jlong value) {
2201 assert_not_delayed();
2203 int hi = (int)(value >> 32);
2204 int lo = (int)(value & ~0);
2206 int count = 0;
2208 if (value == lo) { // 32-bit integer
2209 if (is_simm16(value)) {
2210 daddiu(d, R0, value);
2211 count += 1;
2212 } else {
2213 lui(d, split_low(value >> 16));
2214 count += 1;
2215 if (split_low(value)) {
2216 ori(d, d, split_low(value));
2217 count += 1;
2218 }
2219 }
2220 } else if (hi == 0) { // hardware zero-extends to upper 32
2221 ori(d, R0, julong(value) >> 16);
2222 dsll(d, d, 16);
2223 count += 2;
2224 if (split_low(value)) {
2225 ori(d, d, split_low(value));
2226 count += 1;
2227 }
2228 } else if ((value> 0) && is_simm16(value >> 32)) { // li48
2229 // 4 insts
2230 li48(d, value);
2231 count += 4;
2232 } else { // li64
2233 tty->print_cr("value = 0x%x", value);
2234 guarantee(false, "Not supported yet !");
2235 }
2237 for (count; count < 4; count++) {
2238 nop();
2239 }
2240 }
2242 void MacroAssembler::patchable_set32(Register d, jlong value) {
2243 assert_not_delayed();
2245 int hi = (int)(value >> 32);
2246 int lo = (int)(value & ~0);
2248 int count = 0;
2250 if (value == lo) { // 32-bit integer
2251 if (is_simm16(value)) {
2252 daddiu(d, R0, value);
2253 count += 1;
2254 } else {
2255 lui(d, split_low(value >> 16));
2256 count += 1;
2257 if (split_low(value)) {
2258 ori(d, d, split_low(value));
2259 count += 1;
2260 }
2261 }
2262 } else if (hi == 0) { // hardware zero-extends to upper 32
2263 ori(d, R0, julong(value) >> 16);
2264 dsll(d, d, 16);
2265 count += 2;
2266 if (split_low(value)) {
2267 ori(d, d, split_low(value));
2268 count += 1;
2269 }
2270 } else {
2271 tty->print_cr("value = 0x%x", value);
2272 guarantee(false, "Not supported yet !");
2273 }
2275 for (count; count < 3; count++) {
2276 nop();
2277 }
2278 }
2280 void MacroAssembler::patchable_call32(Register d, jlong value) {
2281 assert_not_delayed();
2283 int hi = (int)(value >> 32);
2284 int lo = (int)(value & ~0);
2286 int count = 0;
2288 if (value == lo) { // 32-bit integer
2289 if (is_simm16(value)) {
2290 daddiu(d, R0, value);
2291 count += 1;
2292 } else {
2293 lui(d, split_low(value >> 16));
2294 count += 1;
2295 if (split_low(value)) {
2296 ori(d, d, split_low(value));
2297 count += 1;
2298 }
2299 }
2300 } else {
2301 tty->print_cr("value = 0x%x", value);
2302 guarantee(false, "Not supported yet !");
2303 }
2305 for (count; count < 2; count++) {
2306 nop();
2307 }
2308 }
2310 void MacroAssembler::set_narrow_klass(Register dst, Klass* k) {
2311 assert(UseCompressedClassPointers, "should only be used for compressed header");
2312 assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
2314 int klass_index = oop_recorder()->find_index(k);
2315 RelocationHolder rspec = metadata_Relocation::spec(klass_index);
2316 long narrowKlass = (long)Klass::encode_klass(k);
2318 relocate(rspec, Assembler::narrow_oop_operand);
2319 patchable_set48(dst, narrowKlass);
2320 }
2323 void MacroAssembler::set_narrow_oop(Register dst, jobject obj) {
2324 assert(UseCompressedOops, "should only be used for compressed header");
2325 assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
2327 int oop_index = oop_recorder()->find_index(obj);
2328 RelocationHolder rspec = oop_Relocation::spec(oop_index);
2330 relocate(rspec, Assembler::narrow_oop_operand);
2331 patchable_set48(dst, oop_index);
2332 }
2334 void MacroAssembler::li64(Register rd, long imm) {
2335 assert_not_delayed();
2336 lui(rd, imm >> 48);
2337 ori(rd, rd, split_low(imm >> 32));
2338 dsll(rd, rd, 16);
2339 ori(rd, rd, split_low(imm >> 16));
2340 dsll(rd, rd, 16);
2341 ori(rd, rd, split_low(imm));
2342 }
2344 void MacroAssembler::li48(Register rd, long imm) {
2345 assert_not_delayed();
2346 assert(is_simm16(imm >> 32), "Not a 48-bit address");
2347 lui(rd, imm >> 32);
2348 ori(rd, rd, split_low(imm >> 16));
2349 dsll(rd, rd, 16);
2350 ori(rd, rd, split_low(imm));
2351 }
2352 #endif
2353 // NOTE: i dont push eax as i486.
2354 // the x86 save eax for it use eax as the jump register
2355 void MacroAssembler::verify_oop(Register reg, const char* s) {
2356 /*
2357 if (!VerifyOops) return;
2359 // Pass register number to verify_oop_subroutine
2360 char* b = new char[strlen(s) + 50];
2361 sprintf(b, "verify_oop: %s: %s", reg->name(), s);
2362 push(rax); // save rax,
2363 push(reg); // pass register argument
2364 ExternalAddress buffer((address) b);
2365 // avoid using pushptr, as it modifies scratch registers
2366 // and our contract is not to modify anything
2367 movptr(rax, buffer.addr());
2368 push(rax);
2369 // call indirectly to solve generation ordering problem
2370 movptr(rax, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address()));
2371 call(rax);
2372 */
2373 if (!VerifyOops) return;
2374 const char * b = NULL;
2375 stringStream ss;
2376 ss.print("verify_oop: %s: %s", reg->name(), s);
2377 b = code_string(ss.as_string());
2378 #ifdef _LP64
2379 pushad();
2380 move(A1, reg);
2381 li(A0, (long)b);
2382 li(AT, (long)StubRoutines::verify_oop_subroutine_entry_address());
2383 ld(T9, AT, 0);
2384 jalr(T9);
2385 delayed()->nop();
2386 popad();
2387 #else
2388 // Pass register number to verify_oop_subroutine
2389 sw(T0, SP, - wordSize);
2390 sw(T1, SP, - 2*wordSize);
2391 sw(RA, SP, - 3*wordSize);
2392 sw(A0, SP ,- 4*wordSize);
2393 sw(A1, SP ,- 5*wordSize);
2394 sw(AT, SP ,- 6*wordSize);
2395 sw(T9, SP ,- 7*wordSize);
2396 addiu(SP, SP, - 7 * wordSize);
2397 move(A1, reg);
2398 li(A0, (long)b);
2399 // call indirectly to solve generation ordering problem
2400 li(AT, (long)StubRoutines::verify_oop_subroutine_entry_address());
2401 lw(T9, AT, 0);
2402 jalr(T9);
2403 delayed()->nop();
2404 lw(T0, SP, 6* wordSize);
2405 lw(T1, SP, 5* wordSize);
2406 lw(RA, SP, 4* wordSize);
2407 lw(A0, SP, 3* wordSize);
2408 lw(A1, SP, 2* wordSize);
2409 lw(AT, SP, 1* wordSize);
2410 lw(T9, SP, 0* wordSize);
2411 addiu(SP, SP, 7 * wordSize);
2412 #endif
2413 }
2416 void MacroAssembler::verify_oop_addr(Address addr, const char* s) {
2417 if (!VerifyOops) {
2418 nop();
2419 return;
2420 }
2421 // Pass register number to verify_oop_subroutine
2422 const char * b = NULL;
2423 stringStream ss;
2424 ss.print("verify_oop_addr: %s", s);
2425 b = code_string(ss.as_string());
2427 st_ptr(T0, SP, - wordSize);
2428 st_ptr(T1, SP, - 2*wordSize);
2429 st_ptr(RA, SP, - 3*wordSize);
2430 st_ptr(A0, SP, - 4*wordSize);
2431 st_ptr(A1, SP, - 5*wordSize);
2432 st_ptr(AT, SP, - 6*wordSize);
2433 st_ptr(T9, SP, - 7*wordSize);
2434 ld_ptr(A1, addr); // addr may use SP, so load from it before change SP
2435 addiu(SP, SP, - 7 * wordSize);
2437 li(A0, (long)b);
2438 // call indirectly to solve generation ordering problem
2439 li(AT, (long)StubRoutines::verify_oop_subroutine_entry_address());
2440 ld_ptr(T9, AT, 0);
2441 jalr(T9);
2442 delayed()->nop();
2443 ld_ptr(T0, SP, 6* wordSize);
2444 ld_ptr(T1, SP, 5* wordSize);
2445 ld_ptr(RA, SP, 4* wordSize);
2446 ld_ptr(A0, SP, 3* wordSize);
2447 ld_ptr(A1, SP, 2* wordSize);
2448 ld_ptr(AT, SP, 1* wordSize);
2449 ld_ptr(T9, SP, 0* wordSize);
2450 addiu(SP, SP, 7 * wordSize);
2451 }
2453 // used registers : T0, T1
2454 void MacroAssembler::verify_oop_subroutine() {
2455 // RA: ra
2456 // A0: char* error message
2457 // A1: oop object to verify
2459 Label exit, error;
2460 // increment counter
2461 li(T0, (long)StubRoutines::verify_oop_count_addr());
2462 lw(AT, T0, 0);
2463 #ifdef _LP64
2464 daddi(AT, AT, 1);
2465 #else
2466 addi(AT, AT, 1);
2467 #endif
2468 sw(AT, T0, 0);
2470 // make sure object is 'reasonable'
2471 beq(A1, R0, exit); // if obj is NULL it is ok
2472 delayed()->nop();
2474 // Check if the oop is in the right area of memory
2475 //const int oop_mask = Universe::verify_oop_mask();
2476 //const int oop_bits = Universe::verify_oop_bits();
2477 const uintptr_t oop_mask = Universe::verify_oop_mask();
2478 const uintptr_t oop_bits = Universe::verify_oop_bits();
2479 li(AT, oop_mask);
2480 andr(T0, A1, AT);
2481 li(AT, oop_bits);
2482 bne(T0, AT, error);
2483 delayed()->nop();
2485 // make sure klass is 'reasonable'
2486 //add for compressedoops
2487 reinit_heapbase();
2488 //add for compressedoops
2489 load_klass(T0, A1);
2490 beq(T0, R0, error); // if klass is NULL it is broken
2491 delayed()->nop();
2492 #if 0
2493 //FIXME:wuhui.
2494 // Check if the klass is in the right area of memory
2495 //const int klass_mask = Universe::verify_klass_mask();
2496 //const int klass_bits = Universe::verify_klass_bits();
2497 const uintptr_t klass_mask = Universe::verify_klass_mask();
2498 const uintptr_t klass_bits = Universe::verify_klass_bits();
2500 li(AT, klass_mask);
2501 andr(T1, T0, AT);
2502 li(AT, klass_bits);
2503 bne(T1, AT, error);
2504 delayed()->nop();
2505 // make sure klass' klass is 'reasonable'
2506 //add for compressedoops
2507 load_klass(T0, T0);
2508 beq(T0, R0, error); // if klass' klass is NULL it is broken
2509 delayed()->nop();
2511 li(AT, klass_mask);
2512 andr(T1, T0, AT);
2513 li(AT, klass_bits);
2514 bne(T1, AT, error);
2515 delayed()->nop(); // if klass not in right area of memory it is broken too.
2516 #endif
2517 // return if everything seems ok
2518 bind(exit);
2520 jr(RA);
2521 delayed()->nop();
2523 // handle errors
2524 bind(error);
2525 pushad();
2526 #ifndef _LP64
2527 addi(SP, SP, (-1) * wordSize);
2528 #endif
2529 call(CAST_FROM_FN_PTR(address, MacroAssembler::debug), relocInfo::runtime_call_type);
2530 delayed()->nop();
2531 #ifndef _LP64
2532 addiu(SP, SP, 1 * wordSize);
2533 #endif
2534 popad();
2535 jr(RA);
2536 delayed()->nop();
2537 }
2539 void MacroAssembler::verify_tlab(Register t1, Register t2) {
2540 #ifdef ASSERT
2541 assert_different_registers(t1, t2, AT);
2542 if (UseTLAB && VerifyOops) {
2543 Label next, ok;
2545 get_thread(t1);
2547 ld_ptr(t2, t1, in_bytes(JavaThread::tlab_top_offset()));
2548 ld_ptr(AT, t1, in_bytes(JavaThread::tlab_start_offset()));
2549 sltu(AT, t2, AT);
2550 beq(AT, R0, next);
2551 delayed()->nop();
2553 stop("assert(top >= start)");
2555 bind(next);
2556 ld_ptr(AT, t1, in_bytes(JavaThread::tlab_end_offset()));
2557 sltu(AT, AT, t2);
2558 beq(AT, R0, ok);
2559 delayed()->nop();
2561 stop("assert(top <= end)");
2563 bind(ok);
2565 }
2566 #endif
2567 }
2568 RegisterOrConstant MacroAssembler::delayed_value_impl(intptr_t* delayed_value_addr,
2569 Register tmp,
2570 int offset) {
2571 intptr_t value = *delayed_value_addr;
2572 if (value != 0)
2573 return RegisterOrConstant(value + offset);
2574 AddressLiteral a(delayed_value_addr);
2575 // load indirectly to solve generation ordering problem
2576 //movptr(tmp, ExternalAddress((address) delayed_value_addr));
2577 //ld(tmp, a);
2578 if (offset != 0)
2579 daddi(tmp,tmp, offset);
2581 return RegisterOrConstant(tmp);
2582 }
2584 void MacroAssembler::hswap(Register reg) {
2585 //short
2586 //andi(reg, reg, 0xffff);
2587 srl(AT, reg, 8);
2588 sll(reg, reg, 24);
2589 sra(reg, reg, 16);
2590 orr(reg, reg, AT);
2591 }
2593 void MacroAssembler::huswap(Register reg) {
2594 #ifdef _LP64
2595 dsrl(AT, reg, 8);
2596 dsll(reg, reg, 24);
2597 dsrl(reg, reg, 16);
2598 orr(reg, reg, AT);
2599 andi(reg, reg, 0xffff);
2600 #else
2601 //andi(reg, reg, 0xffff);
2602 srl(AT, reg, 8);
2603 sll(reg, reg, 24);
2604 srl(reg, reg, 16);
2605 orr(reg, reg, AT);
2606 #endif
2607 }
2609 // something funny to do this will only one more register AT
2610 // 32 bits
2611 void MacroAssembler::swap(Register reg) {
2612 srl(AT, reg, 8);
2613 sll(reg, reg, 24);
2614 orr(reg, reg, AT);
2615 //reg : 4 1 2 3
2616 srl(AT, AT, 16);
2617 xorr(AT, AT, reg);
2618 andi(AT, AT, 0xff);
2619 //AT : 0 0 0 1^3);
2620 xorr(reg, reg, AT);
2621 //reg : 4 1 2 1
2622 sll(AT, AT, 16);
2623 xorr(reg, reg, AT);
2624 //reg : 4 3 2 1
2625 }
2627 #ifdef _LP64
2629 /* do 32-bit CAS using MIPS64 lld/scd
2631 Jin: cas_int should only compare 32-bits of the memory value.
2632 However, lld/scd will do 64-bit operation, which violates the intention of cas_int.
2633 To simulate a 32-bit atomic operation, the value loaded with LLD should be split into
2634 tow halves, and only the low-32 bits is compared. If equals, the low-32 bits of newval,
2635 plus the high-32 bits or memory value, are stored togethor with SCD.
2637 Example:
2639 double d = 3.1415926;
2640 System.err.println("hello" + d);
2642 sun.misc.FloatingDecimal$1.<init>()
2643 |
2644 `- java.util.concurrent.atomic.AtomicInteger::compareAndSet()
2646 38 cas_int [a7a7|J] [a0|I] [a6|I]
2647 // a0: 0xffffffffe8ea9f63 pc: 0x55647f3354
2648 // a6: 0x4ab325aa
2650 again:
2651 0x00000055647f3c5c: lld at, 0x0(a7) ; 64-bit load, "0xe8ea9f63"
2653 0x00000055647f3c60: sll t9, at, 0 ; t9: low-32 bits (sign extended)
2654 0x00000055647f3c64: dsrl32 t8, at, 0 ; t8: high-32 bits
2655 0x00000055647f3c68: dsll32 t8, t8, 0
2656 0x00000055647f3c6c: bne t9, a0, 0x00000055647f3c9c ; goto nequal
2657 0x00000055647f3c70: sll zero, zero, 0
2659 0x00000055647f3c74: ori v1, zero, 0xffffffff ; v1: low-32 bits of newval (sign unextended)
2660 0x00000055647f3c78: dsll v1, v1, 16 ; v1 = a6 & 0xFFFFFFFF;
2661 0x00000055647f3c7c: ori v1, v1, 0xffffffff
2662 0x00000055647f3c80: and v1, a6, v1
2663 0x00000055647f3c84: or at, t8, v1
2664 0x00000055647f3c88: scd at, 0x0(a7)
2665 0x00000055647f3c8c: beq at, zero, 0x00000055647f3c5c ; goto again
2666 0x00000055647f3c90: sll zero, zero, 0
2667 0x00000055647f3c94: beq zero, zero, 0x00000055647f45ac ; goto done
2668 0x00000055647f3c98: sll zero, zero, 0
2669 nequal:
2670 0x00000055647f45a4: dadd a0, t9, zero
2671 0x00000055647f45a8: dadd at, zero, zero
2672 done:
2673 */
2675 void MacroAssembler::cmpxchg32(Register x_reg, Address dest, Register c_reg) {
2676 /* 2012/11/11 Jin: MIPS64 can use ll/sc for 32-bit atomic memory access */
2677 Label done, again, nequal;
2679 bind(again);
2681 if(UseSyncLevel >= 3000 || UseSyncLevel < 2000) sync();
2682 ll(AT, dest);
2683 bne(AT, c_reg, nequal);
2684 delayed()->nop();
2686 move(AT, x_reg);
2687 sc(AT, dest);
2688 beq(AT, R0, again);
2689 delayed()->nop();
2690 b(done);
2691 delayed()->nop();
2693 // not xchged
2694 bind(nequal);
2695 sync();
2696 move(c_reg, AT);
2697 move(AT, R0);
2699 bind(done);
2700 }
2701 #endif // cmpxchg32
2703 void MacroAssembler::cmpxchg(Register x_reg, Address dest, Register c_reg) {
2704 Label done, again, nequal;
2706 bind(again);
2707 if(UseSyncLevel >= 3000 || UseSyncLevel < 2000) sync();
2708 #ifdef _LP64
2709 lld(AT, dest);
2710 #else
2711 ll(AT, dest);
2712 #endif
2713 bne(AT, c_reg, nequal);
2714 delayed()->nop();
2716 move(AT, x_reg);
2717 #ifdef _LP64
2718 scd(AT, dest);
2719 #else
2720 sc(AT, dest);
2721 #endif
2722 beq(AT, R0, again);
2723 delayed()->nop();
2724 b(done);
2725 delayed()->nop();
2727 // not xchged
2728 bind(nequal);
2729 sync();
2730 move(c_reg, AT);
2731 move(AT, R0);
2733 bind(done);
2734 }
2736 void MacroAssembler::cmpxchg8(Register x_regLo, Register x_regHi, Address dest, Register c_regLo, Register c_regHi) {
2737 Label done, again, nequal;
2739 Register x_reg = x_regLo;
2740 dsll32(x_regHi, x_regHi, 0);
2741 dsll32(x_regLo, x_regLo, 0);
2742 dsrl32(x_regLo, x_regLo, 0);
2743 orr(x_reg, x_regLo, x_regHi);
2745 Register c_reg = c_regLo;
2746 dsll32(c_regHi, c_regHi, 0);
2747 dsll32(c_regLo, c_regLo, 0);
2748 dsrl32(c_regLo, c_regLo, 0);
2749 orr(c_reg, c_regLo, c_regHi);
2751 bind(again);
2753 if(UseSyncLevel >= 3000 || UseSyncLevel < 2000) sync();
2754 lld(AT, dest);
2755 bne(AT, c_reg, nequal);
2756 delayed()->nop();
2758 //move(AT, x_reg);
2759 dadd(AT, x_reg, R0);
2760 scd(AT, dest);
2761 beq(AT, R0, again);
2762 delayed()->nop();
2763 b(done);
2764 delayed()->nop();
2766 // not xchged
2767 bind(nequal);
2768 sync();
2769 //move(c_reg, AT);
2770 //move(AT, R0);
2771 dadd(c_reg, AT, R0);
2772 dadd(AT, R0, R0);
2773 bind(done);
2774 }
2776 // be sure the three register is different
2777 void MacroAssembler::rem_s(FloatRegister fd, FloatRegister fs, FloatRegister ft, FloatRegister tmp) {
2778 assert_different_registers(tmp, fs, ft);
2779 div_s(tmp, fs, ft);
2780 trunc_l_s(tmp, tmp);
2781 cvt_s_l(tmp, tmp);
2782 mul_s(tmp, tmp, ft);
2783 sub_s(fd, fs, tmp);
2784 }
2786 // be sure the three register is different
2787 void MacroAssembler::rem_d(FloatRegister fd, FloatRegister fs, FloatRegister ft, FloatRegister tmp) {
2788 assert_different_registers(tmp, fs, ft);
2789 div_d(tmp, fs, ft);
2790 trunc_l_d(tmp, tmp);
2791 cvt_d_l(tmp, tmp);
2792 mul_d(tmp, tmp, ft);
2793 sub_d(fd, fs, tmp);
2794 }
2796 // Fast_Lock and Fast_Unlock used by C2
2798 // Because the transitions from emitted code to the runtime
2799 // monitorenter/exit helper stubs are so slow it's critical that
2800 // we inline both the stack-locking fast-path and the inflated fast path.
2801 //
2802 // See also: cmpFastLock and cmpFastUnlock.
2803 //
2804 // What follows is a specialized inline transliteration of the code
2805 // in slow_enter() and slow_exit(). If we're concerned about I$ bloat
2806 // another option would be to emit TrySlowEnter and TrySlowExit methods
2807 // at startup-time. These methods would accept arguments as
2808 // (rax,=Obj, rbx=Self, rcx=box, rdx=Scratch) and return success-failure
2809 // indications in the icc.ZFlag. Fast_Lock and Fast_Unlock would simply
2810 // marshal the arguments and emit calls to TrySlowEnter and TrySlowExit.
2811 // In practice, however, the # of lock sites is bounded and is usually small.
2812 // Besides the call overhead, TrySlowEnter and TrySlowExit might suffer
2813 // if the processor uses simple bimodal branch predictors keyed by EIP
2814 // Since the helper routines would be called from multiple synchronization
2815 // sites.
2816 //
2817 // An even better approach would be write "MonitorEnter()" and "MonitorExit()"
2818 // in java - using j.u.c and unsafe - and just bind the lock and unlock sites
2819 // to those specialized methods. That'd give us a mostly platform-independent
2820 // implementation that the JITs could optimize and inline at their pleasure.
2821 // Done correctly, the only time we'd need to cross to native could would be
2822 // to park() or unpark() threads. We'd also need a few more unsafe operators
2823 // to (a) prevent compiler-JIT reordering of non-volatile accesses, and
2824 // (b) explicit barriers or fence operations.
2825 //
2826 // TODO:
2827 //
2828 // * Arrange for C2 to pass "Self" into Fast_Lock and Fast_Unlock in one of the registers (scr).
2829 // This avoids manifesting the Self pointer in the Fast_Lock and Fast_Unlock terminals.
2830 // Given TLAB allocation, Self is usually manifested in a register, so passing it into
2831 // the lock operators would typically be faster than reifying Self.
2832 //
2833 // * Ideally I'd define the primitives as:
2834 // fast_lock (nax Obj, nax box, EAX tmp, nax scr) where box, tmp and scr are KILLED.
2835 // fast_unlock (nax Obj, EAX box, nax tmp) where box and tmp are KILLED
2836 // Unfortunately ADLC bugs prevent us from expressing the ideal form.
2837 // Instead, we're stuck with a rather awkward and brittle register assignments below.
2838 // Furthermore the register assignments are overconstrained, possibly resulting in
2839 // sub-optimal code near the synchronization site.
2840 //
2841 // * Eliminate the sp-proximity tests and just use "== Self" tests instead.
2842 // Alternately, use a better sp-proximity test.
2843 //
2844 // * Currently ObjectMonitor._Owner can hold either an sp value or a (THREAD *) value.
2845 // Either one is sufficient to uniquely identify a thread.
2846 // TODO: eliminate use of sp in _owner and use get_thread(tr) instead.
2847 //
2848 // * Intrinsify notify() and notifyAll() for the common cases where the
2849 // object is locked by the calling thread but the waitlist is empty.
2850 // avoid the expensive JNI call to JVM_Notify() and JVM_NotifyAll().
2851 //
2852 // * use jccb and jmpb instead of jcc and jmp to improve code density.
2853 // But beware of excessive branch density on AMD Opterons.
2854 //
2855 // * Both Fast_Lock and Fast_Unlock set the ICC.ZF to indicate success
2856 // or failure of the fast-path. If the fast-path fails then we pass
2857 // control to the slow-path, typically in C. In Fast_Lock and
2858 // Fast_Unlock we often branch to DONE_LABEL, just to find that C2
2859 // will emit a conditional branch immediately after the node.
2860 // So we have branches to branches and lots of ICC.ZF games.
2861 // Instead, it might be better to have C2 pass a "FailureLabel"
2862 // into Fast_Lock and Fast_Unlock. In the case of success, control
2863 // will drop through the node. ICC.ZF is undefined at exit.
2864 // In the case of failure, the node will branch directly to the
2865 // FailureLabel
2868 // obj: object to lock
2869 // box: on-stack box address (displaced header location) - KILLED
2870 // rax,: tmp -- KILLED
2871 // scr: tmp -- KILLED
2872 void MacroAssembler::fast_lock(Register objReg, Register boxReg, Register tmpReg, Register scrReg) {
2874 // Ensure the register assignents are disjoint
2875 guarantee (objReg != boxReg, "") ;
2876 guarantee (objReg != tmpReg, "") ;
2877 guarantee (objReg != scrReg, "") ;
2878 guarantee (boxReg != tmpReg, "") ;
2879 guarantee (boxReg != scrReg, "") ;
2882 block_comment("FastLock");
2883 /*
2884 move(AT, 0x0);
2885 return;
2886 */
2887 if (PrintBiasedLockingStatistics) {
2888 push(tmpReg);
2889 atomic_inc32((address)BiasedLocking::total_entry_count_addr(), 1, AT, tmpReg);
2890 pop(tmpReg);
2891 }
2893 if (EmitSync & 1) {
2894 move(AT, 0x0);
2895 return;
2896 } else
2897 if (EmitSync & 2) {
2898 Label DONE_LABEL ;
2899 if (UseBiasedLocking) {
2900 // Note: tmpReg maps to the swap_reg argument and scrReg to the tmp_reg argument.
2901 biased_locking_enter(boxReg, objReg, tmpReg, scrReg, false, DONE_LABEL, NULL);
2902 }
2904 ld(tmpReg, Address(objReg, 0)) ; // fetch markword
2905 ori(tmpReg, tmpReg, 0x1);
2906 sd(tmpReg, Address(boxReg, 0)); // Anticipate successful CAS
2908 cmpxchg(boxReg, Address(objReg, 0), tmpReg); // Updates tmpReg
2909 bne(AT, R0, DONE_LABEL);
2910 delayed()->nop();
2912 // Recursive locking
2913 dsubu(tmpReg, tmpReg, SP);
2914 li(AT, (7 - os::vm_page_size() ));
2915 andr(tmpReg, tmpReg, AT);
2916 sd(tmpReg, Address(boxReg, 0));
2917 bind(DONE_LABEL) ;
2918 } else {
2919 // Possible cases that we'll encounter in fast_lock
2920 // ------------------------------------------------
2921 // * Inflated
2922 // -- unlocked
2923 // -- Locked
2924 // = by self
2925 // = by other
2926 // * biased
2927 // -- by Self
2928 // -- by other
2929 // * neutral
2930 // * stack-locked
2931 // -- by self
2932 // = sp-proximity test hits
2933 // = sp-proximity test generates false-negative
2934 // -- by other
2935 //
2937 Label IsInflated, DONE_LABEL, PopDone ;
2939 // TODO: optimize away redundant LDs of obj->mark and improve the markword triage
2940 // order to reduce the number of conditional branches in the most common cases.
2941 // Beware -- there's a subtle invariant that fetch of the markword
2942 // at [FETCH], below, will never observe a biased encoding (*101b).
2943 // If this invariant is not held we risk exclusion (safety) failure.
2944 if (UseBiasedLocking && !UseOptoBiasInlining) {
2945 biased_locking_enter(boxReg, objReg, tmpReg, scrReg, false, DONE_LABEL, NULL);
2946 }
2948 ld(tmpReg, Address(objReg, 0)) ; //Fetch the markword of the object.
2949 andi(AT, tmpReg, markOopDesc::monitor_value);
2950 bne(AT, R0, IsInflated); // inflated vs stack-locked|neutral|bias
2951 delayed()->nop();
2953 // Attempt stack-locking ...
2954 ori (tmpReg, tmpReg, markOopDesc::unlocked_value);
2955 sd(tmpReg, Address(boxReg, 0)); // Anticipate successful CAS
2956 //if (os::is_MP()) {
2957 // sync();
2958 //}
2960 cmpxchg(boxReg, Address(objReg, 0), tmpReg); // Updates tmpReg
2961 //AT == 1: unlocked
2963 if (PrintBiasedLockingStatistics) {
2964 Label L;
2965 beq(AT, R0, L);
2966 delayed()->nop();
2967 push(T0);
2968 push(T1);
2969 atomic_inc32((address)BiasedLocking::fast_path_entry_count_addr(), 1, T0, T1);
2970 pop(T1);
2971 pop(T0);
2972 bind(L);
2973 }
2974 bne(AT, R0, DONE_LABEL);
2975 delayed()->nop();
2977 // Recursive locking
2978 // The object is stack-locked: markword contains stack pointer to BasicLock.
2979 // Locked by current thread if difference with current SP is less than one page.
2980 dsubu(tmpReg, tmpReg, SP);
2981 li(AT, 7 - os::vm_page_size() );
2982 andr(tmpReg, tmpReg, AT);
2983 sd(tmpReg, Address(boxReg, 0));
2984 if (PrintBiasedLockingStatistics) {
2985 Label L;
2986 // tmpReg == 0 => BiasedLocking::_fast_path_entry_count++
2987 bne(tmpReg, R0, L);
2988 delayed()->nop();
2989 push(T0);
2990 push(T1);
2991 atomic_inc32((address)BiasedLocking::fast_path_entry_count_addr(), 1, T0, T1);
2992 pop(T1);
2993 pop(T0);
2994 bind(L);
2995 }
2996 sltiu(AT, tmpReg, 1); /* AT = (tmpReg == 0) ? 1 : 0 */
2998 b(DONE_LABEL) ;
2999 delayed()->nop();
3001 bind(IsInflated) ;
3002 // The object's monitor m is unlocked iff m->owner == NULL,
3003 // otherwise m->owner may contain a thread or a stack address.
3005 // TODO: someday avoid the ST-before-CAS penalty by
3006 // relocating (deferring) the following ST.
3007 // We should also think about trying a CAS without having
3008 // fetched _owner. If the CAS is successful we may
3009 // avoid an RTO->RTS upgrade on the $line.
3010 // Without cast to int32_t a movptr will destroy r10 which is typically obj
3011 li(AT, (int32_t)intptr_t(markOopDesc::unused_mark()));
3012 sd(AT, Address(boxReg, 0));
3014 move(boxReg, tmpReg) ;
3015 ld(tmpReg, Address(tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)) ;
3016 // if (m->owner != 0) => AT = 0, goto slow path.
3017 move(AT, R0);
3018 bne(tmpReg, R0, DONE_LABEL);
3019 delayed()->nop();
3021 #ifndef OPT_THREAD
3022 get_thread (TREG) ;
3023 #endif
3024 // It's inflated and appears unlocked
3025 //if (os::is_MP()) {
3026 // sync();
3027 //}
3028 cmpxchg(TREG, Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2), tmpReg) ;
3029 // Intentional fall-through into DONE_LABEL ...
3032 // DONE_LABEL is a hot target - we'd really like to place it at the
3033 // start of cache line by padding with NOPs.
3034 // See the AMD and Intel software optimization manuals for the
3035 // most efficient "long" NOP encodings.
3036 // Unfortunately none of our alignment mechanisms suffice.
3037 bind(DONE_LABEL);
3039 // At DONE_LABEL the AT is set as follows ...
3040 // Fast_Unlock uses the same protocol.
3041 // AT == 1 -> Success
3042 // AT == 0 -> Failure - force control through the slow-path
3044 // Avoid branch-to-branch on AMD processors
3045 // This appears to be superstition.
3046 if (EmitSync & 32) nop() ;
3048 }
3049 }
3051 // obj: object to unlock
3052 // box: box address (displaced header location), killed. Must be EAX.
3053 // rbx,: killed tmp; cannot be obj nor box.
3054 //
3055 // Some commentary on balanced locking:
3056 //
3057 // Fast_Lock and Fast_Unlock are emitted only for provably balanced lock sites.
3058 // Methods that don't have provably balanced locking are forced to run in the
3059 // interpreter - such methods won't be compiled to use fast_lock and fast_unlock.
3060 // The interpreter provides two properties:
3061 // I1: At return-time the interpreter automatically and quietly unlocks any
3062 // objects acquired the current activation (frame). Recall that the
3063 // interpreter maintains an on-stack list of locks currently held by
3064 // a frame.
3065 // I2: If a method attempts to unlock an object that is not held by the
3066 // the frame the interpreter throws IMSX.
3067 //
3068 // Lets say A(), which has provably balanced locking, acquires O and then calls B().
3069 // B() doesn't have provably balanced locking so it runs in the interpreter.
3070 // Control returns to A() and A() unlocks O. By I1 and I2, above, we know that O
3071 // is still locked by A().
3072 //
3073 // The only other source of unbalanced locking would be JNI. The "Java Native Interface:
3074 // Programmer's Guide and Specification" claims that an object locked by jni_monitorenter
3075 // should not be unlocked by "normal" java-level locking and vice-versa. The specification
3076 // doesn't specify what will occur if a program engages in such mixed-mode locking, however.
3078 void MacroAssembler::fast_unlock(Register objReg, Register boxReg, Register tmpReg) {
3080 guarantee (objReg != boxReg, "") ;
3081 guarantee (objReg != tmpReg, "") ;
3082 guarantee (boxReg != tmpReg, "") ;
3086 block_comment("FastUnlock");
3089 if (EmitSync & 4) {
3090 // Disable - inhibit all inlining. Force control through the slow-path
3091 move(AT, 0x0);
3092 return;
3093 } else
3094 if (EmitSync & 8) {
3095 Label DONE_LABEL ;
3096 if (UseBiasedLocking) {
3097 biased_locking_exit(objReg, tmpReg, DONE_LABEL);
3098 }
3099 // classic stack-locking code ...
3100 ld(tmpReg, Address(boxReg, 0)) ;
3101 beq(tmpReg, R0, DONE_LABEL) ;
3102 move(AT, 0x1); // delay slot
3104 cmpxchg(tmpReg, Address(objReg, 0), boxReg); // Uses EAX which is box
3105 bind(DONE_LABEL);
3106 } else {
3107 Label DONE_LABEL, Stacked, CheckSucc, Inflated ;
3109 // Critically, the biased locking test must have precedence over
3110 // and appear before the (box->dhw == 0) recursive stack-lock test.
3111 if (UseBiasedLocking && !UseOptoBiasInlining) {
3112 biased_locking_exit(objReg, tmpReg, DONE_LABEL);
3113 }
3115 ld(AT, Address(boxReg, 0)) ; // Examine the displaced header
3116 beq(AT, R0, DONE_LABEL) ; // 0 indicates recursive stack-lock
3117 delayed()->daddiu(AT, R0, 0x1);
3119 ld(tmpReg, Address(objReg, 0)) ; // Examine the object's markword
3120 andi(AT, tmpReg, markOopDesc::monitor_value) ; // Inflated?
3121 beq(AT, R0, Stacked) ; // Inflated?
3122 delayed()->nop();
3124 bind(Inflated) ;
3125 // It's inflated.
3126 // Despite our balanced locking property we still check that m->_owner == Self
3127 // as java routines or native JNI code called by this thread might
3128 // have released the lock.
3129 // Refer to the comments in synchronizer.cpp for how we might encode extra
3130 // state in _succ so we can avoid fetching EntryList|cxq.
3131 //
3132 // I'd like to add more cases in fast_lock() and fast_unlock() --
3133 // such as recursive enter and exit -- but we have to be wary of
3134 // I$ bloat, T$ effects and BP$ effects.
3135 //
3136 // If there's no contention try a 1-0 exit. That is, exit without
3137 // a costly MEMBAR or CAS. See synchronizer.cpp for details on how
3138 // we detect and recover from the race that the 1-0 exit admits.
3139 //
3140 // Conceptually Fast_Unlock() must execute a STST|LDST "release" barrier
3141 // before it STs null into _owner, releasing the lock. Updates
3142 // to data protected by the critical section must be visible before
3143 // we drop the lock (and thus before any other thread could acquire
3144 // the lock and observe the fields protected by the lock).
3145 // IA32's memory-model is SPO, so STs are ordered with respect to
3146 // each other and there's no need for an explicit barrier (fence).
3147 // See also http://gee.cs.oswego.edu/dl/jmm/cookbook.html.
3148 #ifndef OPT_THREAD
3149 get_thread (TREG) ;
3150 #endif
3152 // It's inflated
3153 ld(boxReg, Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)) ;
3154 xorr(boxReg, boxReg, TREG);
3156 ld(AT, Address (tmpReg, ObjectMonitor::recursions_offset_in_bytes()-2)) ;
3157 orr(boxReg, boxReg, AT);
3159 move(AT, R0);
3160 bne(boxReg, R0, DONE_LABEL);
3161 delayed()->nop();
3163 ld(boxReg, Address (tmpReg, ObjectMonitor::cxq_offset_in_bytes()-2)) ;
3164 ld(AT, Address (tmpReg, ObjectMonitor::EntryList_offset_in_bytes()-2)) ;
3165 orr(boxReg, boxReg, AT);
3167 move(AT, R0);
3168 bne(boxReg, R0, DONE_LABEL);
3169 delayed()->nop();
3171 sync();
3172 sd(R0, Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)) ;
3173 move(AT, 0x1);
3174 b(DONE_LABEL);
3175 delayed()->nop();
3177 bind (Stacked);
3178 ld(tmpReg, Address(boxReg, 0)) ;
3179 //if (os::is_MP()) { sync(); }
3180 cmpxchg(tmpReg, Address(objReg, 0), boxReg);
3182 if (EmitSync & 65536) {
3183 bind (CheckSucc);
3184 }
3186 bind(DONE_LABEL);
3188 // Avoid branch to branch on AMD processors
3189 if (EmitSync & 32768) { nop() ; }
3190 }
3191 }
3193 void MacroAssembler::align(int modulus) {
3194 while (offset() % modulus != 0) nop();
3195 }
3198 void MacroAssembler::verify_FPU(int stack_depth, const char* s) {
3199 //Unimplemented();
3200 }
3202 #ifdef _LP64
3203 Register caller_saved_registers[] = {AT, V0, V1, A0, A1, A2, A3, A4, A5, A6, A7, T0, T1, T2, T3, T8, T9, GP, RA, FP};
3204 Register caller_saved_registers_except_v0[] = {AT, V1, A0, A1, A2, A3, A4, A5, A6, A7, T0, T1, T2, T3, T8, T9, GP, RA, FP};
3206 /* FIXME: Jin: In MIPS64, F0~23 are all caller-saved registers */
3207 FloatRegister caller_saved_fpu_registers[] = {F0, F12, F13};
3208 #else
3209 Register caller_saved_registers[] = {AT, V0, V1, A0, A1, A2, A3, T4, T5, T6, T7, T0, T1, T2, T3, T8, T9, GP, RA, FP};
3210 Register caller_saved_registers_except_v0[] = {AT, V1, A0, A1, A2, A3, T4, T5, T6, T7, T0, T1, T2, T3, T8, T9, GP, RA, FP};
3212 Register caller_saved_fpu_registers[] = {};
3213 #endif
3215 //We preserve all caller-saved register
3216 void MacroAssembler::pushad(){
3217 int i;
3219 /* Fixed-point registers */
3220 int len = sizeof(caller_saved_registers) / sizeof(caller_saved_registers[0]);
3221 daddi(SP, SP, -1 * len * wordSize);
3222 for (i = 0; i < len; i++)
3223 {
3224 #ifdef _LP64
3225 sd(caller_saved_registers[i], SP, (len - i - 1) * wordSize);
3226 #else
3227 sw(caller_saved_registers[i], SP, (len - i - 1) * wordSize);
3228 #endif
3229 }
3231 /* Floating-point registers */
3232 len = sizeof(caller_saved_fpu_registers) / sizeof(caller_saved_fpu_registers[0]);
3233 daddi(SP, SP, -1 * len * wordSize);
3234 for (i = 0; i < len; i++)
3235 {
3236 #ifdef _LP64
3237 sdc1(caller_saved_fpu_registers[i], SP, (len - i - 1) * wordSize);
3238 #else
3239 swc1(caller_saved_fpu_registers[i], SP, (len - i - 1) * wordSize);
3240 #endif
3241 }
3242 };
3244 void MacroAssembler::popad(){
3245 int i;
3247 /* Floating-point registers */
3248 int len = sizeof(caller_saved_fpu_registers) / sizeof(caller_saved_fpu_registers[0]);
3249 for (i = 0; i < len; i++)
3250 {
3251 #ifdef _LP64
3252 ldc1(caller_saved_fpu_registers[i], SP, (len - i - 1) * wordSize);
3253 #else
3254 lwc1(caller_saved_fpu_registers[i], SP, (len - i - 1) * wordSize);
3255 #endif
3256 }
3257 daddi(SP, SP, len * wordSize);
3259 /* Fixed-point registers */
3260 len = sizeof(caller_saved_registers) / sizeof(caller_saved_registers[0]);
3261 for (i = 0; i < len; i++)
3262 {
3263 #ifdef _LP64
3264 ld(caller_saved_registers[i], SP, (len - i - 1) * wordSize);
3265 #else
3266 lw(caller_saved_registers[i], SP, (len - i - 1) * wordSize);
3267 #endif
3268 }
3269 daddi(SP, SP, len * wordSize);
3270 };
3272 // We preserve all caller-saved register except V0
3273 void MacroAssembler::pushad_except_v0() {
3274 int i;
3276 /* Fixed-point registers */
3277 int len = sizeof(caller_saved_registers_except_v0) / sizeof(caller_saved_registers_except_v0[0]);
3278 daddi(SP, SP, -1 * len * wordSize);
3279 for (i = 0; i < len; i++) {
3280 #ifdef _LP64
3281 sd(caller_saved_registers_except_v0[i], SP, (len - i - 1) * wordSize);
3282 #else
3283 sw(caller_saved_registers_except_v0[i], SP, (len - i - 1) * wordSize);
3284 #endif
3285 }
3287 /* Floating-point registers */
3288 len = sizeof(caller_saved_fpu_registers) / sizeof(caller_saved_fpu_registers[0]);
3289 daddi(SP, SP, -1 * len * wordSize);
3290 for (i = 0; i < len; i++) {
3291 #ifdef _LP64
3292 sdc1(caller_saved_fpu_registers[i], SP, (len - i - 1) * wordSize);
3293 #else
3294 swc1(caller_saved_fpu_registers[i], SP, (len - i - 1) * wordSize);
3295 #endif
3296 }
3297 }
3299 void MacroAssembler::popad_except_v0() {
3300 int i;
3302 /* Floating-point registers */
3303 int len = sizeof(caller_saved_fpu_registers) / sizeof(caller_saved_fpu_registers[0]);
3304 for (i = 0; i < len; i++) {
3305 #ifdef _LP64
3306 ldc1(caller_saved_fpu_registers[i], SP, (len - i - 1) * wordSize);
3307 #else
3308 lwc1(caller_saved_fpu_registers[i], SP, (len - i - 1) * wordSize);
3309 #endif
3310 }
3311 daddi(SP, SP, len * wordSize);
3313 /* Fixed-point registers */
3314 len = sizeof(caller_saved_registers_except_v0) / sizeof(caller_saved_registers_except_v0[0]);
3315 for (i = 0; i < len; i++) {
3316 #ifdef _LP64
3317 ld(caller_saved_registers_except_v0[i], SP, (len - i - 1) * wordSize);
3318 #else
3319 lw(caller_saved_registers_except_v0[i], SP, (len - i - 1) * wordSize);
3320 #endif
3321 }
3322 daddi(SP, SP, len * wordSize);
3323 }
3325 void MacroAssembler::push2(Register reg1, Register reg2) {
3326 #ifdef _LP64
3327 daddi(SP, SP, -16);
3328 sd(reg2, SP, 0);
3329 sd(reg1, SP, 8);
3330 #else
3331 addi(SP, SP, -8);
3332 sw(reg2, SP, 0);
3333 sw(reg1, SP, 4);
3334 #endif
3335 }
3337 void MacroAssembler::pop2(Register reg1, Register reg2) {
3338 #ifdef _LP64
3339 ld(reg1, SP, 0);
3340 ld(reg2, SP, 8);
3341 daddi(SP, SP, 16);
3342 #else
3343 lw(reg1, SP, 0);
3344 lw(reg2, SP, 4);
3345 addi(SP, SP, 8);
3346 #endif
3347 }
3349 //for UseCompressedOops Option
3350 void MacroAssembler::load_klass(Register dst, Register src) {
3351 #ifdef _LP64
3352 if(UseCompressedClassPointers){
3353 lwu(dst, Address(src, oopDesc::klass_offset_in_bytes()));
3354 decode_klass_not_null(dst);
3355 } else
3356 #endif
3357 ld(dst, src, oopDesc::klass_offset_in_bytes());
3358 }
3360 void MacroAssembler::store_klass(Register dst, Register src) {
3361 #ifdef _LP64
3362 if(UseCompressedClassPointers){
3363 encode_klass_not_null(src);
3364 sw(src, dst, oopDesc::klass_offset_in_bytes());
3365 } else {
3366 #endif
3367 sd(src, dst, oopDesc::klass_offset_in_bytes());
3368 }
3369 }
3371 void MacroAssembler::load_prototype_header(Register dst, Register src) {
3372 load_klass(dst, src);
3373 ld(dst, Address(dst, Klass::prototype_header_offset()));
3374 }
3376 #ifdef _LP64
3377 void MacroAssembler::store_klass_gap(Register dst, Register src) {
3378 if (UseCompressedClassPointers) {
3379 sw(src, dst, oopDesc::klass_gap_offset_in_bytes());
3380 }
3381 }
3383 void MacroAssembler::load_heap_oop(Register dst, Address src) {
3384 if(UseCompressedOops){
3385 lwu(dst, src);
3386 decode_heap_oop(dst);
3387 } else {
3388 ld(dst, src);
3389 }
3390 }
3392 void MacroAssembler::store_heap_oop(Address dst, Register src){
3393 if(UseCompressedOops){
3394 assert(!dst.uses(src), "not enough registers");
3395 encode_heap_oop(src);
3396 sw(src, dst);
3397 } else {
3398 sd(src, dst);
3399 }
3400 }
3402 void MacroAssembler::store_heap_oop_null(Address dst){
3403 if(UseCompressedOops){
3404 sw(R0, dst);
3405 } else {
3406 sd(R0, dst);
3407 }
3408 }
3410 #ifdef ASSERT
3411 void MacroAssembler::verify_heapbase(const char* msg) {
3412 assert (UseCompressedOops || UseCompressedClassPointers, "should be compressed");
3413 assert (Universe::heap() != NULL, "java heap should be initialized");
3414 }
3415 #endif
3418 // Algorithm must match oop.inline.hpp encode_heap_oop.
3419 void MacroAssembler::encode_heap_oop(Register r) {
3420 #ifdef ASSERT
3421 verify_heapbase("MacroAssembler::encode_heap_oop:heap base corrupted?");
3422 #endif
3423 verify_oop(r, "broken oop in encode_heap_oop");
3424 if (Universe::narrow_oop_base() == NULL) {
3425 if (Universe::narrow_oop_shift() != 0) {
3426 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
3427 shr(r, LogMinObjAlignmentInBytes);
3428 }
3429 return;
3430 }
3432 movz(r, S5_heapbase, r);
3433 dsub(r, r, S5_heapbase);
3434 if (Universe::narrow_oop_shift() != 0) {
3435 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
3436 shr(r, LogMinObjAlignmentInBytes);
3437 }
3438 }
3440 void MacroAssembler::encode_heap_oop(Register dst, Register src) {
3441 #ifdef ASSERT
3442 verify_heapbase("MacroAssembler::encode_heap_oop:heap base corrupted?");
3443 #endif
3444 verify_oop(src, "broken oop in encode_heap_oop");
3445 if (Universe::narrow_oop_base() == NULL) {
3446 if (Universe::narrow_oop_shift() != 0) {
3447 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
3448 dsrl(dst, src, LogMinObjAlignmentInBytes);
3449 } else {
3450 if (dst != src) move(dst, src);
3451 }
3452 } else {
3453 if (dst == src) {
3454 movz(dst, S5_heapbase, dst);
3455 dsub(dst, dst, S5_heapbase);
3456 if (Universe::narrow_oop_shift() != 0) {
3457 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
3458 shr(dst, LogMinObjAlignmentInBytes);
3459 }
3460 } else {
3461 dsub(dst, src, S5_heapbase);
3462 if (Universe::narrow_oop_shift() != 0) {
3463 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
3464 shr(dst, LogMinObjAlignmentInBytes);
3465 }
3466 movz(dst, R0, src);
3467 }
3468 }
3469 }
3471 void MacroAssembler::encode_heap_oop_not_null(Register r) {
3472 assert (UseCompressedOops, "should be compressed");
3473 #ifdef ASSERT
3474 if (CheckCompressedOops) {
3475 Label ok;
3476 bne(r, R0, ok);
3477 delayed()->nop();
3478 stop("null oop passed to encode_heap_oop_not_null");
3479 bind(ok);
3480 }
3481 #endif
3482 verify_oop(r, "broken oop in encode_heap_oop_not_null");
3483 if (Universe::narrow_oop_base() != NULL) {
3484 dsub(r, r, S5_heapbase);
3485 }
3486 if (Universe::narrow_oop_shift() != 0) {
3487 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
3488 shr(r, LogMinObjAlignmentInBytes);
3489 }
3491 }
3493 void MacroAssembler::encode_heap_oop_not_null(Register dst, Register src) {
3494 assert (UseCompressedOops, "should be compressed");
3495 #ifdef ASSERT
3496 if (CheckCompressedOops) {
3497 Label ok;
3498 bne(src, R0, ok);
3499 delayed()->nop();
3500 stop("null oop passed to encode_heap_oop_not_null2");
3501 bind(ok);
3502 }
3503 #endif
3504 verify_oop(src, "broken oop in encode_heap_oop_not_null2");
3506 if (Universe::narrow_oop_base() != NULL) {
3507 dsub(dst, src, S5_heapbase);
3508 if (Universe::narrow_oop_shift() != 0) {
3509 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
3510 shr(dst, LogMinObjAlignmentInBytes);
3511 }
3512 } else {
3513 if (Universe::narrow_oop_shift() != 0) {
3514 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
3515 dsrl(dst, src, LogMinObjAlignmentInBytes);
3516 } else {
3517 if (dst != src) move(dst, src);
3518 }
3519 }
3520 }
3522 void MacroAssembler::decode_heap_oop(Register r) {
3523 #ifdef ASSERT
3524 verify_heapbase("MacroAssembler::decode_heap_oop corrupted?");
3525 #endif
3526 if (Universe::narrow_oop_base() == NULL) {
3527 if (Universe::narrow_oop_shift() != 0) {
3528 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
3529 shl(r, LogMinObjAlignmentInBytes);
3530 }
3531 } else {
3532 move(AT, r);
3533 if (Universe::narrow_oop_shift() != 0) {
3534 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
3535 shl(r, LogMinObjAlignmentInBytes);
3536 }
3537 dadd(r, r, S5_heapbase);
3538 movz(r, R0, AT);
3539 }
3540 verify_oop(r, "broken oop in decode_heap_oop");
3541 }
3543 void MacroAssembler::decode_heap_oop(Register dst, Register src) {
3544 #ifdef ASSERT
3545 verify_heapbase("MacroAssembler::decode_heap_oop corrupted?");
3546 #endif
3547 if (Universe::narrow_oop_base() == NULL) {
3548 if (Universe::narrow_oop_shift() != 0) {
3549 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
3550 if (dst != src) nop(); // DON'T DELETE THIS GUY.
3551 dsll(dst, src, LogMinObjAlignmentInBytes);
3552 } else {
3553 if (dst != src) move(dst, src);
3554 }
3555 } else {
3556 if (dst == src) {
3557 move(AT, dst);
3558 if (Universe::narrow_oop_shift() != 0) {
3559 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
3560 shl(dst, LogMinObjAlignmentInBytes);
3561 }
3562 dadd(dst, dst, S5_heapbase);
3563 movz(dst, R0, AT);
3564 } else {
3565 if (Universe::narrow_oop_shift() != 0) {
3566 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
3567 dsll(dst, src, LogMinObjAlignmentInBytes);
3568 daddu(dst, dst, S5_heapbase);
3569 } else {
3570 daddu(dst, src, S5_heapbase);
3571 }
3572 movz(dst, R0, src);
3573 }
3574 }
3575 verify_oop(dst, "broken oop in decode_heap_oop");
3576 }
3578 void MacroAssembler::decode_heap_oop_not_null(Register r) {
3579 // Note: it will change flags
3580 assert (UseCompressedOops, "should only be used for compressed headers");
3581 assert (Universe::heap() != NULL, "java heap should be initialized");
3582 // Cannot assert, unverified entry point counts instructions (see .ad file)
3583 // vtableStubs also counts instructions in pd_code_size_limit.
3584 // Also do not verify_oop as this is called by verify_oop.
3585 if (Universe::narrow_oop_shift() != 0) {
3586 assert(LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
3587 shl(r, LogMinObjAlignmentInBytes);
3588 if (Universe::narrow_oop_base() != NULL) {
3589 daddu(r, r, S5_heapbase);
3590 }
3591 } else {
3592 assert (Universe::narrow_oop_base() == NULL, "sanity");
3593 }
3594 }
3596 void MacroAssembler::decode_heap_oop_not_null(Register dst, Register src) {
3597 assert (UseCompressedOops, "should only be used for compressed headers");
3598 assert (Universe::heap() != NULL, "java heap should be initialized");
3600 // Cannot assert, unverified entry point counts instructions (see .ad file)
3601 // vtableStubs also counts instructions in pd_code_size_limit.
3602 // Also do not verify_oop as this is called by verify_oop.
3603 //lea(dst, Address(S5_heapbase, src, Address::times_8, 0));
3604 if (Universe::narrow_oop_shift() != 0) {
3605 assert(LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
3606 if (LogMinObjAlignmentInBytes == Address::times_8) {
3607 dsll(dst, src, LogMinObjAlignmentInBytes);
3608 daddu(dst, dst, S5_heapbase);
3609 } else {
3610 dsll(dst, src, LogMinObjAlignmentInBytes);
3611 if (Universe::narrow_oop_base() != NULL) {
3612 daddu(dst, dst, S5_heapbase);
3613 }
3614 }
3615 } else {
3616 assert (Universe::narrow_oop_base() == NULL, "sanity");
3617 if (dst != src) {
3618 move(dst, src);
3619 }
3620 }
3621 }
3623 void MacroAssembler::encode_klass_not_null(Register r) {
3624 if (Universe::narrow_klass_base() != NULL) {
3625 assert(r != AT, "Encoding a klass in AT");
3626 set64(AT, (int64_t)Universe::narrow_klass_base());
3627 dsub(r, r, AT);
3628 }
3629 if (Universe::narrow_klass_shift() != 0) {
3630 assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
3631 shr(r, LogKlassAlignmentInBytes);
3632 }
3633 }
3635 void MacroAssembler::encode_klass_not_null(Register dst, Register src) {
3636 if (dst == src) {
3637 encode_klass_not_null(src);
3638 } else {
3639 if (Universe::narrow_klass_base() != NULL) {
3640 set64(dst, (int64_t)Universe::narrow_klass_base());
3641 dsub(dst, src, dst);
3642 if (Universe::narrow_klass_shift() != 0) {
3643 assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
3644 shr(dst, LogKlassAlignmentInBytes);
3645 }
3646 } else {
3647 if (Universe::narrow_klass_shift() != 0) {
3648 assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
3649 dsrl(dst, src, LogKlassAlignmentInBytes);
3650 } else {
3651 move(dst, src);
3652 }
3653 }
3654 }
3655 }
3657 // Function instr_size_for_decode_klass_not_null() counts the instructions
3658 // generated by decode_klass_not_null(register r) and reinit_heapbase(),
3659 // when (Universe::heap() != NULL). Hence, if the instructions they
3660 // generate change, then this method needs to be updated.
3661 int MacroAssembler::instr_size_for_decode_klass_not_null() {
3662 assert (UseCompressedClassPointers, "only for compressed klass ptrs");
3663 if (Universe::narrow_klass_base() != NULL) {
3664 // mov64 + addq + shlq? + mov64 (for reinit_heapbase()).
3665 return (Universe::narrow_klass_shift() == 0 ? 4 * 9 : 4 * 10);
3666 } else {
3667 // longest load decode klass function, mov64, leaq
3668 return (Universe::narrow_klass_shift() == 0 ? 4 * 0 : 4 * 1);
3669 }
3670 }
3672 void MacroAssembler::decode_klass_not_null(Register r) {
3673 assert (UseCompressedClassPointers, "should only be used for compressed headers");
3674 assert(r != AT, "Decoding a klass in AT");
3675 // Cannot assert, unverified entry point counts instructions (see .ad file)
3676 // vtableStubs also counts instructions in pd_code_size_limit.
3677 // Also do not verify_oop as this is called by verify_oop.
3678 if (Universe::narrow_klass_shift() != 0) {
3679 assert(LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
3680 shl(r, LogKlassAlignmentInBytes);
3681 }
3682 if (Universe::narrow_klass_base() != NULL) {
3683 set64(AT, (int64_t)Universe::narrow_klass_base());
3684 daddu(r, r, AT);
3685 //Not neccessary for MIPS at all.
3686 //reinit_heapbase();
3687 }
3688 }
3690 void MacroAssembler::decode_klass_not_null(Register dst, Register src) {
3691 assert (UseCompressedClassPointers, "should only be used for compressed headers");
3693 if (dst == src) {
3694 decode_klass_not_null(dst);
3695 } else {
3696 // Cannot assert, unverified entry point counts instructions (see .ad file)
3697 // vtableStubs also counts instructions in pd_code_size_limit.
3698 // Also do not verify_oop as this is called by verify_oop.
3699 set64(dst, (int64_t)Universe::narrow_klass_base());
3700 if (Universe::narrow_klass_shift() != 0) {
3701 assert(LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
3702 assert(LogKlassAlignmentInBytes == Address::times_8, "klass not aligned on 64bits?");
3703 dsll(AT, src, Address::times_8);
3704 daddu(dst, dst, AT);
3705 } else {
3706 daddu(dst, src, dst);
3707 }
3708 }
3709 }
3711 void MacroAssembler::incrementl(Register reg, int value) {
3712 if (value == min_jint) {
3713 move(AT, value);
3714 LP64_ONLY(addu32(reg, reg, AT)) NOT_LP64(addu(reg, reg, AT));
3715 return;
3716 }
3717 if (value < 0) { decrementl(reg, -value); return; }
3718 if (value == 0) { ; return; }
3720 if(Assembler::is_simm16(value)) {
3721 NOT_LP64(addiu(reg, reg, value));
3722 LP64_ONLY(move(AT, value); addu32(reg, reg, AT));
3723 } else {
3724 move(AT, value);
3725 LP64_ONLY(addu32(reg, reg, AT)) NOT_LP64(addu(reg, reg, AT));
3726 }
3727 }
3729 void MacroAssembler::decrementl(Register reg, int value) {
3730 if (value == min_jint) {
3731 move(AT, value);
3732 LP64_ONLY(subu32(reg, reg, AT)) NOT_LP64(subu(reg, reg, AT));
3733 return;
3734 }
3735 if (value < 0) { incrementl(reg, -value); return; }
3736 if (value == 0) { ; return; }
3738 if (Assembler::is_simm16(value)) {
3739 NOT_LP64(addiu(reg, reg, -value));
3740 LP64_ONLY(move(AT, value); subu32(reg, reg, AT));
3741 } else {
3742 move(AT, value);
3743 LP64_ONLY(subu32(reg, reg, AT)) NOT_LP64(subu(reg, reg, AT));
3744 }
3745 }
3747 void MacroAssembler::reinit_heapbase() {
3748 if (UseCompressedOops || UseCompressedClassPointers) {
3749 if (Universe::heap() != NULL) {
3750 if (Universe::narrow_oop_base() == NULL) {
3751 move(S5_heapbase, R0);
3752 } else {
3753 set64(S5_heapbase, (int64_t)Universe::narrow_ptrs_base());
3754 }
3755 } else {
3756 set64(S5_heapbase, (intptr_t)Universe::narrow_ptrs_base_addr());
3757 ld(S5_heapbase, S5_heapbase, 0);
3758 }
3759 }
3760 }
3761 #endif // _LP64
3763 void MacroAssembler::check_klass_subtype(Register sub_klass,
3764 Register super_klass,
3765 Register temp_reg,
3766 Label& L_success) {
3767 //implement ind gen_subtype_check
3768 Label L_failure;
3769 check_klass_subtype_fast_path(sub_klass, super_klass, temp_reg, &L_success, &L_failure, NULL);
3770 check_klass_subtype_slow_path(sub_klass, super_klass, temp_reg, noreg, &L_success, NULL);
3771 bind(L_failure);
3772 }
3774 SkipIfEqual::SkipIfEqual(
3775 MacroAssembler* masm, const bool* flag_addr, bool value) {
3776 _masm = masm;
3777 _masm->li(AT, (address)flag_addr);
3778 _masm->lb(AT,AT,0);
3779 _masm->addi(AT,AT,-value);
3780 _masm->beq(AT,R0,_label);
3781 _masm->delayed()->nop();
3782 }
3783 void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass,
3784 Register super_klass,
3785 Register temp_reg,
3786 Label* L_success,
3787 Label* L_failure,
3788 Label* L_slow_path,
3789 RegisterOrConstant super_check_offset) {
3790 assert_different_registers(sub_klass, super_klass, temp_reg);
3791 bool must_load_sco = (super_check_offset.constant_or_zero() == -1);
3792 if (super_check_offset.is_register()) {
3793 assert_different_registers(sub_klass, super_klass,
3794 super_check_offset.as_register());
3795 } else if (must_load_sco) {
3796 assert(temp_reg != noreg, "supply either a temp or a register offset");
3797 }
3799 Label L_fallthrough;
3800 int label_nulls = 0;
3801 if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; }
3802 if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; }
3803 if (L_slow_path == NULL) { L_slow_path = &L_fallthrough; label_nulls++; }
3804 assert(label_nulls <= 1, "at most one NULL in the batch");
3806 int sc_offset = in_bytes(Klass::secondary_super_cache_offset());
3807 int sco_offset = in_bytes(Klass::super_check_offset_offset());
3808 // If the pointers are equal, we are done (e.g., String[] elements).
3809 // This self-check enables sharing of secondary supertype arrays among
3810 // non-primary types such as array-of-interface. Otherwise, each such
3811 // type would need its own customized SSA.
3812 // We move this check to the front of the fast path because many
3813 // type checks are in fact trivially successful in this manner,
3814 // so we get a nicely predicted branch right at the start of the check.
3815 beq(sub_klass, super_klass, *L_success);
3816 delayed()->nop();
3817 // Check the supertype display:
3818 if (must_load_sco) {
3819 // Positive movl does right thing on LP64.
3820 lwu(temp_reg, super_klass, sco_offset);
3821 super_check_offset = RegisterOrConstant(temp_reg);
3822 }
3823 dsll(AT, super_check_offset.register_or_noreg(), Address::times_1);
3824 daddu(AT, sub_klass, AT);
3825 ld(AT, AT, super_check_offset.constant_or_zero()*Address::times_1);
3827 // This check has worked decisively for primary supers.
3828 // Secondary supers are sought in the super_cache ('super_cache_addr').
3829 // (Secondary supers are interfaces and very deeply nested subtypes.)
3830 // This works in the same check above because of a tricky aliasing
3831 // between the super_cache and the primary super display elements.
3832 // (The 'super_check_addr' can address either, as the case requires.)
3833 // Note that the cache is updated below if it does not help us find
3834 // what we need immediately.
3835 // So if it was a primary super, we can just fail immediately.
3836 // Otherwise, it's the slow path for us (no success at this point).
3838 if (super_check_offset.is_register()) {
3839 beq(super_klass, AT, *L_success);
3840 delayed()->nop();
3841 addi(AT, super_check_offset.as_register(), -sc_offset);
3842 if (L_failure == &L_fallthrough) {
3843 beq(AT, R0, *L_slow_path);
3844 delayed()->nop();
3845 } else {
3846 bne_far(AT, R0, *L_failure);
3847 delayed()->nop();
3848 b(*L_slow_path);
3849 delayed()->nop();
3850 }
3851 } else if (super_check_offset.as_constant() == sc_offset) {
3852 // Need a slow path; fast failure is impossible.
3853 if (L_slow_path == &L_fallthrough) {
3854 beq(super_klass, AT, *L_success);
3855 delayed()->nop();
3856 } else {
3857 bne(super_klass, AT, *L_slow_path);
3858 delayed()->nop();
3859 b(*L_success);
3860 delayed()->nop();
3861 }
3862 } else {
3863 // No slow path; it's a fast decision.
3864 if (L_failure == &L_fallthrough) {
3865 beq(super_klass, AT, *L_success);
3866 delayed()->nop();
3867 } else {
3868 bne_far(super_klass, AT, *L_failure);
3869 delayed()->nop();
3870 b(*L_success);
3871 delayed()->nop();
3872 }
3873 }
3875 bind(L_fallthrough);
3877 }
3880 void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass,
3881 Register super_klass,
3882 Register temp_reg,
3883 Register temp2_reg,
3884 Label* L_success,
3885 Label* L_failure,
3886 bool set_cond_codes) {
3887 assert_different_registers(sub_klass, super_klass, temp_reg);
3888 if (temp2_reg != noreg)
3889 assert_different_registers(sub_klass, super_klass, temp_reg, temp2_reg);
3890 else
3891 temp2_reg = T9;
3892 #define IS_A_TEMP(reg) ((reg) == temp_reg || (reg) == temp2_reg)
3894 Label L_fallthrough;
3895 int label_nulls = 0;
3896 if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; }
3897 if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; }
3898 assert(label_nulls <= 1, "at most one NULL in the batch");
3900 // a couple of useful fields in sub_klass:
3901 int ss_offset = in_bytes(Klass::secondary_supers_offset());
3902 int sc_offset = in_bytes(Klass::secondary_super_cache_offset());
3903 Address secondary_supers_addr(sub_klass, ss_offset);
3904 Address super_cache_addr( sub_klass, sc_offset);
3906 // Do a linear scan of the secondary super-klass chain.
3907 // This code is rarely used, so simplicity is a virtue here.
3908 // The repne_scan instruction uses fixed registers, which we must spill.
3909 // Don't worry too much about pre-existing connections with the input regs.
3911 // Get super_klass value into rax (even if it was in rdi or rcx).
3912 #ifndef PRODUCT
3913 int* pst_counter = &SharedRuntime::_partial_subtype_ctr;
3914 ExternalAddress pst_counter_addr((address) pst_counter);
3915 NOT_LP64( incrementl(pst_counter_addr) );
3916 #endif //PRODUCT
3918 // We will consult the secondary-super array.
3919 ld(temp_reg, secondary_supers_addr);
3920 // Load the array length. (Positive movl does right thing on LP64.)
3921 lw(temp2_reg, Address(temp_reg, Array<Klass*>::length_offset_in_bytes()));
3922 // Skip to start of data.
3923 daddiu(temp_reg, temp_reg, Array<Klass*>::base_offset_in_bytes());
3925 // Scan RCX words at [RDI] for an occurrence of RAX.
3926 // Set NZ/Z based on last compare.
3927 // Z flag value will not be set by 'repne' if RCX == 0 since 'repne' does
3928 // not change flags (only scas instruction which is repeated sets flags).
3929 // Set Z = 0 (not equal) before 'repne' to indicate that class was not found.
3931 /* 2013/4/3 Jin: OpenJDK8 never compresses klass pointers in secondary-super array. */
3932 Label Loop, subtype;
3933 bind(Loop);
3934 beq(temp2_reg, R0, *L_failure);
3935 delayed()->nop();
3936 ld(AT, temp_reg, 0);
3937 beq(AT, super_klass, subtype);
3938 delayed()->daddi(temp_reg, temp_reg, 1 * wordSize);
3939 b(Loop);
3940 delayed()->daddi(temp2_reg, temp2_reg, -1);
3942 bind(subtype);
3943 sd(super_klass, super_cache_addr);
3944 if (L_success != &L_fallthrough) {
3945 b(*L_success);
3946 delayed()->nop();
3947 }
3949 // Success. Cache the super we found and proceed in triumph.
3950 #undef IS_A_TEMP
3952 bind(L_fallthrough);
3953 }
3955 void MacroAssembler::get_vm_result(Register oop_result, Register java_thread) {
3956 ld(oop_result, Address(java_thread, JavaThread::vm_result_offset()));
3957 sd(R0, Address(java_thread, JavaThread::vm_result_offset()));
3958 verify_oop(oop_result, "broken oop in call_VM_base");
3959 }
3961 void MacroAssembler::get_vm_result_2(Register metadata_result, Register java_thread) {
3962 ld(metadata_result, Address(java_thread, JavaThread::vm_result_2_offset()));
3963 sd(R0, Address(java_thread, JavaThread::vm_result_2_offset()));
3964 }
3966 Address MacroAssembler::argument_address(RegisterOrConstant arg_slot,
3967 int extra_slot_offset) {
3968 // cf. TemplateTable::prepare_invoke(), if (load_receiver).
3969 int stackElementSize = Interpreter::stackElementSize;
3970 int offset = Interpreter::expr_offset_in_bytes(extra_slot_offset+0);
3971 #ifdef ASSERT
3972 int offset1 = Interpreter::expr_offset_in_bytes(extra_slot_offset+1);
3973 assert(offset1 - offset == stackElementSize, "correct arithmetic");
3974 #endif
3975 Register scale_reg = NOREG;
3976 Address::ScaleFactor scale_factor = Address::no_scale;
3977 if (arg_slot.is_constant()) {
3978 offset += arg_slot.as_constant() * stackElementSize;
3979 } else {
3980 scale_reg = arg_slot.as_register();
3981 scale_factor = Address::times_8;
3982 }
3983 // 2014/07/31 Fu: We don't push RA on stack in prepare_invoke.
3984 // offset += wordSize; // return PC is on stack
3985 if(scale_reg==NOREG) return Address(SP, offset);
3986 else {
3987 dsll(scale_reg, scale_reg, scale_factor);
3988 daddu(scale_reg, SP, scale_reg);
3989 return Address(scale_reg, offset);
3990 }
3991 }
3993 SkipIfEqual::~SkipIfEqual() {
3994 _masm->bind(_label);
3995 }
3997 void MacroAssembler::load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, Register dst2) {
3998 switch (size_in_bytes) {
3999 #ifndef _LP64
4000 case 8:
4001 assert(dst2 != noreg, "second dest register required");
4002 lw(dst, src);
4003 lw(dst2, src.plus_disp(BytesPerInt));
4004 break;
4005 #else
4006 case 8: ld(dst, src); break;
4007 #endif
4008 case 4: lw(dst, src); break;
4009 case 2: is_signed ? lh(dst, src) : lhu(dst, src); break;
4010 case 1: is_signed ? lb( dst, src) : lbu( dst, src); break;
4011 default: ShouldNotReachHere();
4012 }
4013 }
4015 void MacroAssembler::store_sized_value(Address dst, Register src, size_t size_in_bytes, Register src2) {
4016 switch (size_in_bytes) {
4017 #ifndef _LP64
4018 case 8:
4019 assert(src2 != noreg, "second source register required");
4020 sw(src, dst);
4021 sw(src2, dst.plus_disp(BytesPerInt));
4022 break;
4023 #else
4024 case 8: sd(src, dst); break;
4025 #endif
4026 case 4: sw(src, dst); break;
4027 case 2: sh(src, dst); break;
4028 case 1: sb(src, dst); break;
4029 default: ShouldNotReachHere();
4030 }
4031 }
4033 // Look up the method for a megamorphic invokeinterface call.
4034 // The target method is determined by <intf_klass, itable_index>.
4035 // The receiver klass is in recv_klass.
4036 // On success, the result will be in method_result, and execution falls through.
4037 // On failure, execution transfers to the given label.
4038 void MacroAssembler::lookup_interface_method(Register recv_klass,
4039 Register intf_klass,
4040 RegisterOrConstant itable_index,
4041 Register method_result,
4042 Register scan_temp,
4043 Label& L_no_such_interface,
4044 bool return_method) {
4045 assert_different_registers(recv_klass, intf_klass, scan_temp, AT);
4046 assert_different_registers(method_result, intf_klass, scan_temp, AT);
4047 assert(recv_klass != method_result || !return_method,
4048 "recv_klass can be destroyed when method isn't needed");
4050 assert(itable_index.is_constant() || itable_index.as_register() == method_result,
4051 "caller must use same register for non-constant itable index as for method");
4053 // Compute start of first itableOffsetEntry (which is at the end of the vtable)
4054 int vtable_base = InstanceKlass::vtable_start_offset() * wordSize;
4055 int itentry_off = itableMethodEntry::method_offset_in_bytes();
4056 int scan_step = itableOffsetEntry::size() * wordSize;
4057 int vte_size = vtableEntry::size() * wordSize;
4058 Address::ScaleFactor times_vte_scale = Address::times_ptr;
4059 assert(vte_size == wordSize, "else adjust times_vte_scale");
4061 lw(scan_temp, Address(recv_klass, InstanceKlass::vtable_length_offset() * wordSize));
4063 // %%% Could store the aligned, prescaled offset in the klassoop.
4064 dsll(scan_temp, scan_temp, times_vte_scale);
4065 daddu(scan_temp, recv_klass, scan_temp);
4066 daddiu(scan_temp, scan_temp, vtable_base);
4067 if (HeapWordsPerLong > 1) {
4068 // Round up to align_object_offset boundary
4069 // see code for InstanceKlass::start_of_itable!
4070 round_to(scan_temp, BytesPerLong);
4071 }
4073 if (return_method) {
4074 // Adjust recv_klass by scaled itable_index, so we can free itable_index.
4075 assert(itableMethodEntry::size() * wordSize == wordSize, "adjust the scaling in the code below");
4076 if (itable_index.is_constant()) {
4077 set64(AT, (int)itable_index.is_constant());
4078 dsll(AT, AT, (int)Address::times_ptr);
4079 } else {
4080 dsll(AT, itable_index.as_register(), (int)Address::times_ptr);
4081 }
4082 daddu(AT, AT, recv_klass);
4083 daddiu(recv_klass, AT, itentry_off);
4084 }
4086 Label search, found_method;
4088 for (int peel = 1; peel >= 0; peel--) {
4089 ld(method_result, Address(scan_temp, itableOffsetEntry::interface_offset_in_bytes()));
4091 if (peel) {
4092 beq(intf_klass, method_result, found_method);
4093 delayed()->nop();
4094 } else {
4095 bne(intf_klass, method_result, search);
4096 delayed()->nop();
4097 // (invert the test to fall through to found_method...)
4098 }
4100 if (!peel) break;
4102 bind(search);
4104 // Check that the previous entry is non-null. A null entry means that
4105 // the receiver class doesn't implement the interface, and wasn't the
4106 // same as when the caller was compiled.
4107 beq(method_result, R0, L_no_such_interface);
4108 delayed()->nop();
4109 daddiu(scan_temp, scan_temp, scan_step);
4110 }
4112 bind(found_method);
4114 if (return_method) {
4115 // Got a hit.
4116 lw(scan_temp, Address(scan_temp, itableOffsetEntry::offset_offset_in_bytes()));
4117 if(UseLoongsonISA) {
4118 gsldx(method_result, recv_klass, scan_temp, 0);
4119 } else {
4120 daddu(AT, recv_klass, scan_temp);
4121 ld(method_result, AT);
4122 }
4123 }
4124 }
4126 // virtual method calling
4127 void MacroAssembler::lookup_virtual_method(Register recv_klass,
4128 RegisterOrConstant vtable_index,
4129 Register method_result) {
4130 Register tmp = GP;
4131 push(tmp);
4133 if (vtable_index.is_constant()) {
4134 assert_different_registers(recv_klass, method_result, tmp);
4135 } else {
4136 assert_different_registers(recv_klass, method_result, vtable_index.as_register(), tmp);
4137 }
4138 const int base = InstanceKlass::vtable_start_offset() * wordSize;
4139 assert(vtableEntry::size() * wordSize == wordSize, "else adjust the scaling in the code below");
4140 /*
4141 Address vtable_entry_addr(recv_klass,
4142 vtable_index, Address::times_ptr,
4143 base + vtableEntry::method_offset_in_bytes());
4144 */
4145 if (vtable_index.is_constant()) {
4146 set64(AT, vtable_index.as_constant());
4147 dsll(AT, AT, (int)Address::times_ptr);
4148 } else {
4149 dsll(AT, vtable_index.as_register(), (int)Address::times_ptr);
4150 }
4151 set64(tmp, base + vtableEntry::method_offset_in_bytes());
4152 daddu(tmp, tmp, AT);
4153 daddu(tmp, tmp, recv_klass);
4154 ld(method_result, tmp, 0);
4156 pop(tmp);
4157 }