115 if(!(opcode(pc[3]) == lui_op |
115 if(!(opcode(pc[3]) == lui_op |
116 && opcode(pc[4]) == ori_op |
116 && opcode(pc[4]) == ori_op |
117 && special(pc[5]) == daddu_op)) { tty->print_cr("Not a branch label patch"); } |
117 && special(pc[5]) == daddu_op)) { tty->print_cr("Not a branch label patch"); } |
118 |
118 |
119 int offset = target - branch; |
119 int offset = target - branch; |
120 if (!is_simm16(offset)) |
120 if (!is_simm16(offset)) { |
121 { |
|
122 pc[3] = (pc[3] & 0xffff0000) | high16(offset - 12); |
121 pc[3] = (pc[3] & 0xffff0000) | high16(offset - 12); |
123 pc[4] = (pc[4] & 0xffff0000) | low16(offset - 12); |
122 pc[4] = (pc[4] & 0xffff0000) | low16(offset - 12); |
124 } |
123 } else { |
125 else |
|
126 { |
|
127 /* revert to "beq + nop" */ |
124 /* revert to "beq + nop" */ |
128 CodeBuffer cb(branch, 4 * 10); |
125 CodeBuffer cb(branch, 4 * 10); |
129 MacroAssembler masm(&cb); |
126 MacroAssembler masm(&cb); |
130 #define __ masm. |
127 #define __ masm. |
131 __ b(target); |
128 __ b(target); |
139 } |
136 } |
140 return; |
137 return; |
141 } |
138 } |
142 |
139 |
143 #ifndef PRODUCT |
140 #ifndef PRODUCT |
144 if (!is_simm16((target - branch - 4) >> 2)) |
141 if (!is_simm16((target - branch - 4) >> 2)) { |
145 { |
|
146 tty->print_cr("Illegal patching: target=0x%lx", target); |
142 tty->print_cr("Illegal patching: target=0x%lx", target); |
147 int *p = (int *)branch; |
143 int *p = (int *)branch; |
148 for (int i = -10; i < 10; i++) |
144 for (int i = -10; i < 10; i++) { |
149 { |
|
150 tty->print("0x%lx, ", p[i]); |
145 tty->print("0x%lx, ", p[i]); |
151 } |
146 } |
152 tty->print_cr(""); |
147 tty->print_cr(""); |
153 } |
148 } |
154 #endif |
149 #endif |
263 |
258 |
264 int MacroAssembler::insts_for_patchable_call(address target) { |
259 int MacroAssembler::insts_for_patchable_call(address target) { |
265 return 6; |
260 return 6; |
266 } |
261 } |
267 |
262 |
268 void MacroAssembler::beq_far(Register rs, Register rt, address entry) |
263 void MacroAssembler::beq_far(Register rs, Register rt, address entry) { |
269 { |
|
270 u_char * cur_pc = pc(); |
264 u_char * cur_pc = pc(); |
271 |
265 |
272 /* Jin: Near/Far jump */ |
266 /* Jin: Near/Far jump */ |
273 if(is_simm16((entry - pc() - 4) / 4)) |
267 if(is_simm16((entry - pc() - 4) / 4)) { |
274 { |
|
275 Assembler::beq(rs, rt, offset(entry)); |
268 Assembler::beq(rs, rt, offset(entry)); |
276 } |
269 } else { |
277 else |
|
278 { |
|
279 Label not_jump; |
270 Label not_jump; |
280 bne(rs, rt, not_jump); |
271 bne(rs, rt, not_jump); |
281 delayed()->nop(); |
272 delayed()->nop(); |
282 |
273 |
283 b_far(entry); |
274 b_far(entry); |
304 bind(not_jump); |
294 bind(not_jump); |
305 has_delay_slot(); |
295 has_delay_slot(); |
306 } |
296 } |
307 } |
297 } |
308 |
298 |
309 void MacroAssembler::bne_far(Register rs, Register rt, address entry) |
299 void MacroAssembler::bne_far(Register rs, Register rt, address entry) { |
310 { |
|
311 u_char * cur_pc = pc(); |
300 u_char * cur_pc = pc(); |
312 |
301 |
313 /* Jin: Near/Far jump */ |
302 /* Jin: Near/Far jump */ |
314 if(is_simm16((entry - pc() - 4) / 4)) |
303 if(is_simm16((entry - pc() - 4) / 4)) { |
315 { |
|
316 Assembler::bne(rs, rt, offset(entry)); |
304 Assembler::bne(rs, rt, offset(entry)); |
317 } |
305 } else { |
318 else |
|
319 { |
|
320 Label not_jump; |
306 Label not_jump; |
321 beq(rs, rt, not_jump); |
307 beq(rs, rt, not_jump); |
322 delayed()->nop(); |
308 delayed()->nop(); |
323 |
309 |
324 b_far(entry); |
310 b_far(entry); |
345 bind(not_jump); |
330 bind(not_jump); |
346 has_delay_slot(); |
331 has_delay_slot(); |
347 } |
332 } |
348 } |
333 } |
349 |
334 |
350 void MacroAssembler::b_far(Label& L) |
335 void MacroAssembler::b_far(Label& L) { |
351 { |
|
352 if (L.is_bound()) { |
336 if (L.is_bound()) { |
353 b_far(target(L)); |
337 b_far(target(L)); |
354 } else { |
338 } else { |
355 volatile address dest = target(L); |
339 volatile address dest = target(L); |
356 /* |
340 /* |
357 MacroAssembler::pd_patch_instruction branch=55651ed514, target=55651ef6d8 |
341 MacroAssembler::pd_patch_instruction branch=55651ed514, target=55651ef6d8 |
358 0x00000055651ed514: dadd at, ra, zero |
342 0x00000055651ed514: dadd at, ra, zero |
359 0x00000055651ed518: [4110001]bgezal zero, 0x00000055651ed520 |
343 0x00000055651ed518: [4110001]bgezal zero, 0x00000055651ed520 |
360 |
344 |
364 0x00000055651ed528: daddu t9, t9, ra |
348 0x00000055651ed528: daddu t9, t9, ra |
365 0x00000055651ed52c: dadd ra, at, zero |
349 0x00000055651ed52c: dadd ra, at, zero |
366 0x00000055651ed530: jr t9 |
350 0x00000055651ed530: jr t9 |
367 0x00000055651ed534: sll zero, zero, 0 |
351 0x00000055651ed534: sll zero, zero, 0 |
368 */ |
352 */ |
369 move(AT, RA); |
353 move(AT, RA); |
370 emit_long(insn_ORRI(regimm_op, 0, bgezal_op, 1)); |
354 emit_long(insn_ORRI(regimm_op, 0, bgezal_op, 1)); |
371 nop(); |
355 nop(); |
372 lui(T9, 0); // to be patched |
356 lui(T9, 0); // to be patched |
373 ori(T9, T9, 0); |
357 ori(T9, T9, 0); |
374 daddu(T9, T9, RA); |
358 daddu(T9, T9, RA); |
375 move(RA, AT); |
359 move(RA, AT); |
376 jr(T9); |
360 jr(T9); |
377 } |
361 } |
378 } |
362 } |
379 |
363 |
380 void MacroAssembler::b_far(address entry) |
364 void MacroAssembler::b_far(address entry) { |
381 { |
|
382 u_char * cur_pc = pc(); |
365 u_char * cur_pc = pc(); |
383 |
366 |
384 /* Jin: Near/Far jump */ |
367 /* Jin: Near/Far jump */ |
385 if(is_simm16((entry - pc() - 4) / 4)) |
368 if(is_simm16((entry - pc() - 4) / 4)) { |
386 { |
|
387 b(offset(entry)); |
369 b(offset(entry)); |
388 } |
370 } else { |
389 else |
|
390 { |
|
391 /* address must be bounded */ |
371 /* address must be bounded */ |
392 move(AT, RA); |
372 move(AT, RA); |
393 emit_long(insn_ORRI(regimm_op, 0, bgezal_op, 1)); |
373 emit_long(insn_ORRI(regimm_op, 0, bgezal_op, 1)); |
394 nop(); |
374 nop(); |
395 li32(T9, entry - pc()); |
375 li32(T9, entry - pc()); |
396 daddu(T9, T9, RA); |
376 daddu(T9, T9, RA); |
397 move(RA, AT); |
377 move(RA, AT); |
398 jr(T9); |
378 jr(T9); |
719 |
699 |
720 beq(AT, temp_reg, done); |
700 beq(AT, temp_reg, done); |
721 delayed()->nop(); |
701 delayed()->nop(); |
722 } |
702 } |
723 |
703 |
724 // NOTE: we dont increment the SP after call like the x86 version, maybe this is a problem, FIXME. |
|
725 // the stack pointer adjustment is needed. see InterpreterMacroAssembler::super_call_VM_leaf |
704 // the stack pointer adjustment is needed. see InterpreterMacroAssembler::super_call_VM_leaf |
726 // this method will handle the stack problem, you need not to preserve the stack space for the argument now |
705 // this method will handle the stack problem, you need not to preserve the stack space for the argument now |
727 void MacroAssembler::call_VM_leaf_base(address entry_point, |
706 void MacroAssembler::call_VM_leaf_base(address entry_point, int number_of_arguments) { |
728 int number_of_arguments) { |
|
729 //call(RuntimeAddress(entry_point)); |
|
730 //increment(rsp, number_of_arguments * wordSize); |
|
731 Label L, E; |
707 Label L, E; |
732 |
708 |
733 assert(number_of_arguments <= 4, "just check"); |
709 assert(number_of_arguments <= 4, "just check"); |
734 |
710 |
735 andi(AT, SP, 0xf); |
711 andi(AT, SP, 0xf); |
1156 void MacroAssembler::call_VM_base(Register oop_result, |
1132 void MacroAssembler::call_VM_base(Register oop_result, |
1157 Register java_thread, |
1133 Register java_thread, |
1158 Register last_java_sp, |
1134 Register last_java_sp, |
1159 address entry_point, |
1135 address entry_point, |
1160 int number_of_arguments, |
1136 int number_of_arguments, |
1161 bool check_exceptions) { |
1137 bool check_exceptions) { |
1162 |
1138 |
1163 address before_call_pc; |
1139 address before_call_pc; |
1164 // determine java_thread register |
1140 // determine java_thread register |
1165 if (!java_thread->is_valid()) { |
1141 if (!java_thread->is_valid()) { |
1166 #ifndef OPT_THREAD |
1142 #ifndef OPT_THREAD |
1188 |
1164 |
1189 // do the call |
1165 // do the call |
1190 move(A0, java_thread); |
1166 move(A0, java_thread); |
1191 call(entry_point, relocInfo::runtime_call_type); |
1167 call(entry_point, relocInfo::runtime_call_type); |
1192 delayed()->nop(); |
1168 delayed()->nop(); |
|
1169 //MacroAssembler::call_VM_leaf_base(entry_point, number_of_arguments); |
1193 |
1170 |
1194 // restore the thread (cannot use the pushed argument since arguments |
1171 // restore the thread (cannot use the pushed argument since arguments |
1195 // may be overwritten by C code generated by an optimizing compiler); |
1172 // may be overwritten by C code generated by an optimizing compiler); |
1196 // however can use the register value directly if it is callee saved. |
1173 // however can use the register value directly if it is callee saved. |
1197 #ifndef OPT_THREAD |
1174 #ifndef OPT_THREAD |
1619 // Does store cross heap regions? |
1595 // Does store cross heap regions? |
1620 xorr(AT, store_addr, new_val); |
1596 xorr(AT, store_addr, new_val); |
1621 dsrl(AT, AT, HeapRegion::LogOfHRGrainBytes); |
1597 dsrl(AT, AT, HeapRegion::LogOfHRGrainBytes); |
1622 beq(AT, R0, done); |
1598 beq(AT, R0, done); |
1623 nop(); |
1599 nop(); |
1624 |
1600 |
1625 |
1601 |
1626 // crosses regions, storing NULL? |
1602 // crosses regions, storing NULL? |
1627 beq(new_val, R0, done); |
1603 beq(new_val, R0, done); |
1628 nop(); |
1604 nop(); |
1629 |
1605 |
1650 nop(); |
1626 nop(); |
1651 |
1627 |
1652 |
1628 |
1653 // storing a region crossing, non-NULL oop, card is clean. |
1629 // storing a region crossing, non-NULL oop, card is clean. |
1654 // dirty card and log. |
1630 // dirty card and log. |
1655 move(AT, (int)CardTableModRefBS::dirty_card_val()); |
1631 move(AT, (int)CardTableModRefBS::dirty_card_val()); |
1656 sb(AT, card_addr, 0); |
1632 sb(AT, card_addr, 0); |
1657 |
1633 |
1658 lw(AT, queue_index); |
1634 lw(AT, queue_index); |
1659 beq(AT, R0, runtime); |
1635 beq(AT, R0, runtime); |
1660 nop(); |
1636 nop(); |
3217 } |
3193 } |
3218 |
3194 |
3219 //for UseCompressedOops Option |
3195 //for UseCompressedOops Option |
3220 void MacroAssembler::load_klass(Register dst, Register src) { |
3196 void MacroAssembler::load_klass(Register dst, Register src) { |
3221 #ifdef _LP64 |
3197 #ifdef _LP64 |
3222 if(UseCompressedClassPointers){ |
3198 if(UseCompressedClassPointers){ |
3223 lwu(dst, Address(src, oopDesc::klass_offset_in_bytes())); |
3199 lwu(dst, Address(src, oopDesc::klass_offset_in_bytes())); |
3224 decode_klass_not_null(dst); |
3200 decode_klass_not_null(dst); |
3225 } else |
3201 } else |
3226 #endif |
3202 #endif |
3227 ld(dst, src, oopDesc::klass_offset_in_bytes()); |
3203 ld(dst, src, oopDesc::klass_offset_in_bytes()); |
3228 } |
3204 } |
3229 |
3205 |
3230 void MacroAssembler::store_klass(Register dst, Register src) { |
3206 void MacroAssembler::store_klass(Register dst, Register src) { |
3231 #ifdef _LP64 |
3207 #ifdef _LP64 |
3232 if(UseCompressedClassPointers){ |
3208 if(UseCompressedClassPointers){ |
3233 encode_klass_not_null(src); |
3209 encode_klass_not_null(src); |
3234 sw(src, dst, oopDesc::klass_offset_in_bytes()); |
3210 sw(src, dst, oopDesc::klass_offset_in_bytes()); |
3235 } else { |
3211 } else { |
3236 #endif |
3212 #endif |
3237 sd(src, dst, oopDesc::klass_offset_in_bytes()); |
3213 sd(src, dst, oopDesc::klass_offset_in_bytes()); |
3238 } |
3214 } |
3239 } |
3215 } |
3240 |
3216 |
3241 void MacroAssembler::load_prototype_header(Register dst, Register src) { |
3217 void MacroAssembler::load_prototype_header(Register dst, Register src) { |
3242 load_klass(dst, src); |
3218 load_klass(dst, src); |
3243 ld(dst, Address(dst, Klass::prototype_header_offset())); |
3219 ld(dst, Address(dst, Klass::prototype_header_offset())); |
3249 sw(src, dst, oopDesc::klass_gap_offset_in_bytes()); |
3225 sw(src, dst, oopDesc::klass_gap_offset_in_bytes()); |
3250 } |
3226 } |
3251 } |
3227 } |
3252 |
3228 |
3253 void MacroAssembler::load_heap_oop(Register dst, Address src) { |
3229 void MacroAssembler::load_heap_oop(Register dst, Address src) { |
3254 if(UseCompressedOops){ |
3230 if(UseCompressedOops){ |
3255 lwu(dst, src); |
3231 lwu(dst, src); |
3256 decode_heap_oop(dst); |
3232 decode_heap_oop(dst); |
3257 } else{ |
3233 } else { |
3258 ld(dst, src); |
3234 ld(dst, src); |
3259 } |
3235 } |
3260 } |
3236 } |
3261 |
3237 |
3262 void MacroAssembler::store_heap_oop(Address dst, Register src){ |
3238 void MacroAssembler::store_heap_oop(Address dst, Register src){ |
3263 if(UseCompressedOops){ |
3239 if(UseCompressedOops){ |
3264 assert(!dst.uses(src), "not enough registers"); |
3240 assert(!dst.uses(src), "not enough registers"); |
3265 encode_heap_oop(src); |
3241 encode_heap_oop(src); |
3266 sw(src, dst); |
3242 sw(src, dst); |
3267 } else{ |
3243 } else { |
3268 sd(src, dst); |
3244 sd(src, dst); |
3269 } |
3245 } |
3270 } |
3246 } |
3271 |
3247 |
3272 void MacroAssembler::store_heap_oop_null(Address dst){ |
3248 void MacroAssembler::store_heap_oop_null(Address dst){ |
3273 if(UseCompressedOops){ |
3249 if(UseCompressedOops){ |
3274 sw(R0, dst); |
3250 sw(R0, dst); |
3275 } else{ |
3251 } else { |
3276 sd(R0, dst); |
3252 sd(R0, dst); |
3277 } |
3253 } |
3278 } |
3254 } |
3279 |
3255 |
3280 #ifdef ASSERT |
3256 #ifdef ASSERT |
3281 void MacroAssembler::verify_heapbase(const char* msg) { |
3257 void MacroAssembler::verify_heapbase(const char* msg) { |
3282 assert (UseCompressedOops || UseCompressedClassPointers, "should be compressed"); |
3258 assert (UseCompressedOops || UseCompressedClassPointers, "should be compressed"); |
3297 shr(r, LogMinObjAlignmentInBytes); |
3273 shr(r, LogMinObjAlignmentInBytes); |
3298 } |
3274 } |
3299 return; |
3275 return; |
3300 } |
3276 } |
3301 |
3277 |
3302 movz(r, S5_heapbase, r); |
3278 movz(r, S5_heapbase, r); |
3303 dsub(r, r, S5_heapbase); |
3279 dsub(r, r, S5_heapbase); |
3304 if (Universe::narrow_oop_shift() != 0) { |
3280 if (Universe::narrow_oop_shift() != 0) { |
3305 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); |
3281 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); |
3306 shr(r, LogMinObjAlignmentInBytes); |
3282 shr(r, LogMinObjAlignmentInBytes); |
3307 } |
3283 } |
3308 } |
3284 } |
3309 |
3285 |
3310 void MacroAssembler::encode_heap_oop(Register dst, Register src) { |
3286 void MacroAssembler::encode_heap_oop(Register dst, Register src) { |
3311 #ifdef ASSERT |
3287 #ifdef ASSERT |
3312 verify_heapbase("MacroAssembler::encode_heap_oop:heap base corrupted?"); |
3288 verify_heapbase("MacroAssembler::encode_heap_oop:heap base corrupted?"); |
3337 } |
3313 } |
3338 } |
3314 } |
3339 } |
3315 } |
3340 |
3316 |
3341 void MacroAssembler::encode_heap_oop_not_null(Register r) { |
3317 void MacroAssembler::encode_heap_oop_not_null(Register r) { |
3342 assert (UseCompressedOops, "should be compressed"); |
3318 assert (UseCompressedOops, "should be compressed"); |
3343 #ifdef ASSERT |
3319 #ifdef ASSERT |
3344 if (CheckCompressedOops) { |
3320 if (CheckCompressedOops) { |
3345 Label ok; |
3321 Label ok; |
3346 bne(r, R0, ok); |
3322 bne(r, R0, ok); |
3347 delayed()->nop(); |
3323 delayed()->nop(); |
3348 stop("null oop passed to encode_heap_oop_not_null"); |
3324 stop("null oop passed to encode_heap_oop_not_null"); |
3349 bind(ok); |
3325 bind(ok); |
3350 } |
3326 } |
3351 #endif |
3327 #endif |
3352 verify_oop(r, "broken oop in encode_heap_oop_not_null"); |
3328 verify_oop(r, "broken oop in encode_heap_oop_not_null"); |
3353 if (Universe::narrow_oop_base() != NULL) { |
3329 if (Universe::narrow_oop_base() != NULL) { |
3354 dsub(r, r, S5_heapbase); |
3330 dsub(r, r, S5_heapbase); |
3355 } |
3331 } |
3359 } |
3335 } |
3360 |
3336 |
3361 } |
3337 } |
3362 |
3338 |
3363 void MacroAssembler::encode_heap_oop_not_null(Register dst, Register src) { |
3339 void MacroAssembler::encode_heap_oop_not_null(Register dst, Register src) { |
3364 assert (UseCompressedOops, "should be compressed"); |
3340 assert (UseCompressedOops, "should be compressed"); |
3365 #ifdef ASSERT |
3341 #ifdef ASSERT |
3366 if (CheckCompressedOops) { |
3342 if (CheckCompressedOops) { |
3367 Label ok; |
3343 Label ok; |
3368 bne(src, R0, ok); |
3344 bne(src, R0, ok); |
3369 delayed()->nop(); |
3345 delayed()->nop(); |
3370 stop("null oop passed to encode_heap_oop_not_null2"); |
3346 stop("null oop passed to encode_heap_oop_not_null2"); |
3371 bind(ok); |
3347 bind(ok); |
|
3348 } |
|
3349 #endif |
|
3350 verify_oop(src, "broken oop in encode_heap_oop_not_null2"); |
|
3351 |
|
3352 if (Universe::narrow_oop_base() != NULL) { |
|
3353 dsub(dst, src, S5_heapbase); |
|
3354 if (Universe::narrow_oop_shift() != 0) { |
|
3355 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); |
|
3356 shr(dst, LogMinObjAlignmentInBytes); |
3372 } |
3357 } |
3373 #endif |
3358 } else { |
3374 verify_oop(src, "broken oop in encode_heap_oop_not_null2"); |
3359 if (Universe::narrow_oop_shift() != 0) { |
3375 |
3360 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); |
3376 if (Universe::narrow_oop_base() != NULL) { |
3361 dsrl(dst, src, LogMinObjAlignmentInBytes); |
3377 dsub(dst, src, S5_heapbase); |
|
3378 if (Universe::narrow_oop_shift() != 0) { |
|
3379 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); |
|
3380 shr(dst, LogMinObjAlignmentInBytes); |
|
3381 } |
|
3382 } else { |
3362 } else { |
3383 if (Universe::narrow_oop_shift() != 0) { |
3363 if (dst != src) move(dst, src); |
3384 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); |
|
3385 dsrl(dst, src, LogMinObjAlignmentInBytes); |
|
3386 } else { |
|
3387 if (dst != src) move(dst, src); |
|
3388 } |
|
3389 } |
3364 } |
|
3365 } |
3390 } |
3366 } |
3391 |
3367 |
3392 void MacroAssembler::decode_heap_oop(Register r) { |
3368 void MacroAssembler::decode_heap_oop(Register r) { |
3393 #ifdef ASSERT |
3369 #ifdef ASSERT |
3394 verify_heapbase("MacroAssembler::decode_heap_oop corrupted?"); |
3370 verify_heapbase("MacroAssembler::decode_heap_oop corrupted?"); |
3498 } |
3474 } |
3499 if (Universe::narrow_klass_shift() != 0) { |
3475 if (Universe::narrow_klass_shift() != 0) { |
3500 assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong"); |
3476 assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong"); |
3501 shr(r, LogKlassAlignmentInBytes); |
3477 shr(r, LogKlassAlignmentInBytes); |
3502 } |
3478 } |
3503 // Not neccessary for MIPS at all. |
|
3504 //if (Universe::narrow_klass_base() != NULL) { |
|
3505 // reinit_heapbase(); |
|
3506 //} |
|
3507 } |
3479 } |
3508 |
3480 |
3509 void MacroAssembler::encode_klass_not_null(Register dst, Register src) { |
3481 void MacroAssembler::encode_klass_not_null(Register dst, Register src) { |
3510 if (dst == src) { |
3482 if (dst == src) { |
3511 encode_klass_not_null(src); |
3483 encode_klass_not_null(src); |
3607 return; |
3579 return; |
3608 } |
3580 } |
3609 if (value < 0) { incrementl(reg, -value); return; } |
3581 if (value < 0) { incrementl(reg, -value); return; } |
3610 if (value == 0) { ; return; } |
3582 if (value == 0) { ; return; } |
3611 |
3583 |
3612 if(Assembler::is_simm16(value)) { |
3584 if (Assembler::is_simm16(value)) { |
3613 NOT_LP64(addiu(reg, reg, -value)); |
3585 NOT_LP64(addiu(reg, reg, -value)); |
3614 LP64_ONLY(move(AT, value); subu32(reg, reg, AT)); |
3586 LP64_ONLY(move(AT, value); subu32(reg, reg, AT)); |
3615 } else { |
3587 } else { |
3616 move(AT, value); |
3588 move(AT, value); |
3617 LP64_ONLY(subu32(reg, reg, AT)) NOT_LP64(subu(reg, reg, AT)); |
3589 LP64_ONLY(subu32(reg, reg, AT)) NOT_LP64(subu(reg, reg, AT)); |
3684 // non-primary types such as array-of-interface. Otherwise, each such |
3656 // non-primary types such as array-of-interface. Otherwise, each such |
3685 // type would need its own customized SSA. |
3657 // type would need its own customized SSA. |
3686 // We move this check to the front of the fast path because many |
3658 // We move this check to the front of the fast path because many |
3687 // type checks are in fact trivially successful in this manner, |
3659 // type checks are in fact trivially successful in this manner, |
3688 // so we get a nicely predicted branch right at the start of the check. |
3660 // so we get a nicely predicted branch right at the start of the check. |
3689 //cmpptr(sub_klass, super_klass); |
|
3690 //local_jcc(Assembler::equal, *L_success); |
|
3691 beq(sub_klass, super_klass, *L_success); |
3661 beq(sub_klass, super_klass, *L_success); |
3692 delayed()->nop(); |
3662 delayed()->nop(); |
3693 // Check the supertype display: |
3663 // Check the supertype display: |
3694 if (must_load_sco) { |
3664 if (must_load_sco) { |
3695 // Positive movl does right thing on LP64. |
3665 // Positive movl does right thing on LP64. |
3696 lwu(temp_reg, super_klass, sco_offset); |
3666 lwu(temp_reg, super_klass, sco_offset); |
3697 super_check_offset = RegisterOrConstant(temp_reg); |
3667 super_check_offset = RegisterOrConstant(temp_reg); |
3698 } |
3668 } |
3699 dsll(AT, super_check_offset.register_or_noreg(), Address::times_1); |
3669 dsll(AT, super_check_offset.register_or_noreg(), Address::times_1); |
3700 daddu(AT, sub_klass, AT); |
3670 daddu(AT, sub_klass, AT); |
3701 ld(AT, AT, super_check_offset.constant_or_zero()*Address::times_1); |
3671 ld(AT, AT, super_check_offset.constant_or_zero()*Address::times_1); |
3710 // what we need immediately. |
3680 // what we need immediately. |
3711 // So if it was a primary super, we can just fail immediately. |
3681 // So if it was a primary super, we can just fail immediately. |
3712 // Otherwise, it's the slow path for us (no success at this point). |
3682 // Otherwise, it's the slow path for us (no success at this point). |
3713 |
3683 |
3714 if (super_check_offset.is_register()) { |
3684 if (super_check_offset.is_register()) { |
3715 beq(super_klass, AT, *L_success); |
3685 beq(super_klass, AT, *L_success); |
3716 delayed()->nop(); |
3686 delayed()->nop(); |
3717 addi(AT, super_check_offset.as_register(), -sc_offset); |
3687 addi(AT, super_check_offset.as_register(), -sc_offset); |
3718 if (L_failure == &L_fallthrough) { |
3688 if (L_failure == &L_fallthrough) { |
3719 beq(AT, R0, *L_slow_path); |
3689 beq(AT, R0, *L_slow_path); |
3720 delayed()->nop(); |
3690 delayed()->nop(); |
3721 } else { |
3691 } else { |
3722 bne(AT, R0, *L_failure); |
3692 bne(AT, R0, *L_failure); |
3723 delayed()->nop(); |
3693 delayed()->nop(); |
3724 b(*L_slow_path); |
3694 b(*L_slow_path); |
3725 delayed()->nop(); |
3695 delayed()->nop(); |
3726 } |
3696 } |
3727 } else if (super_check_offset.as_constant() == sc_offset) { |
3697 } else if (super_check_offset.as_constant() == sc_offset) { |
3728 // Need a slow path; fast failure is impossible. |
3698 // Need a slow path; fast failure is impossible. |
3729 if (L_slow_path == &L_fallthrough) { |
3699 if (L_slow_path == &L_fallthrough) { |
3730 beq(super_klass, AT, *L_success); |
3700 beq(super_klass, AT, *L_success); |
3731 delayed()->nop(); |
3701 delayed()->nop(); |
3732 } else { |
3702 } else { |
3733 bne(super_klass, AT, *L_slow_path); |
3703 bne(super_klass, AT, *L_slow_path); |
3734 delayed()->nop(); |
3704 delayed()->nop(); |
3735 b(*L_success); |
3705 b(*L_success); |
3736 delayed()->nop(); |
3706 delayed()->nop(); |
3737 } |
3707 } |
3738 } else { |
3708 } else { |
3739 // No slow path; it's a fast decision. |
3709 // No slow path; it's a fast decision. |
3740 if (L_failure == &L_fallthrough) { |
3710 if (L_failure == &L_fallthrough) { |
3741 beq(super_klass, AT, *L_success); |
3711 beq(super_klass, AT, *L_success); |
3742 delayed()->nop(); |
3712 delayed()->nop(); |
3743 } else { |
3713 } else { |
3744 bne(super_klass, AT, *L_failure); |
3714 bne(super_klass, AT, *L_failure); |
3745 delayed()->nop(); |
3715 delayed()->nop(); |
3746 b(*L_success); |
3716 b(*L_success); |
3747 delayed()->nop(); |
3717 delayed()->nop(); |
3748 } |
3718 } |
3749 } |
3719 } |
3750 |
3720 |
3751 bind(L_fallthrough); |
3721 bind(L_fallthrough); |
3752 |
3722 |
3782 // Do a linear scan of the secondary super-klass chain. |
3752 // Do a linear scan of the secondary super-klass chain. |
3783 // This code is rarely used, so simplicity is a virtue here. |
3753 // This code is rarely used, so simplicity is a virtue here. |
3784 // The repne_scan instruction uses fixed registers, which we must spill. |
3754 // The repne_scan instruction uses fixed registers, which we must spill. |
3785 // Don't worry too much about pre-existing connections with the input regs. |
3755 // Don't worry too much about pre-existing connections with the input regs. |
3786 |
3756 |
3787 #if 0 |
|
3788 assert(sub_klass != T9, "killed reg"); // killed by mov(rax, super) |
|
3789 assert(sub_klass != T1, "killed reg"); // killed by lea(rcx, &pst_counter) |
|
3790 #endif |
|
3791 |
|
3792 // Get super_klass value into rax (even if it was in rdi or rcx). |
3757 // Get super_klass value into rax (even if it was in rdi or rcx). |
3793 #ifndef PRODUCT |
3758 #ifndef PRODUCT |
3794 int* pst_counter = &SharedRuntime::_partial_subtype_ctr; |
3759 int* pst_counter = &SharedRuntime::_partial_subtype_ctr; |
3795 ExternalAddress pst_counter_addr((address) pst_counter); |
3760 ExternalAddress pst_counter_addr((address) pst_counter); |
3796 NOT_LP64( incrementl(pst_counter_addr) ); |
3761 NOT_LP64( incrementl(pst_counter_addr) ); |
3797 //LP64_ONLY( lea(rcx, pst_counter_addr) ); |
|
3798 //LP64_ONLY( incrementl(Address(rcx, 0)) ); |
|
3799 #endif //PRODUCT |
3762 #endif //PRODUCT |
3800 |
3763 |
3801 // We will consult the secondary-super array. |
3764 // We will consult the secondary-super array. |
3802 ld(temp_reg, secondary_supers_addr); |
3765 ld(temp_reg, secondary_supers_addr); |
3803 // Load the array length. (Positive movl does right thing on LP64.) |
3766 // Load the array length. (Positive movl does right thing on LP64.) |
3832 // Success. Cache the super we found and proceed in triumph. |
3795 // Success. Cache the super we found and proceed in triumph. |
3833 #undef IS_A_TEMP |
3796 #undef IS_A_TEMP |
3834 |
3797 |
3835 bind(L_fallthrough); |
3798 bind(L_fallthrough); |
3836 } |
3799 } |
|
3800 |
3837 void MacroAssembler::get_vm_result(Register oop_result, Register java_thread) { |
3801 void MacroAssembler::get_vm_result(Register oop_result, Register java_thread) { |
3838 ld(oop_result, Address(java_thread, JavaThread::vm_result_offset())); |
3802 ld(oop_result, Address(java_thread, JavaThread::vm_result_offset())); |
3839 sd(R0, Address(java_thread, JavaThread::vm_result_offset())); |
3803 sd(R0, Address(java_thread, JavaThread::vm_result_offset())); |
3840 verify_oop(oop_result, "broken oop in call_VM_base"); |
3804 verify_oop(oop_result, "broken oop in call_VM_base"); |
3841 } |
3805 } |
3947 round_to(scan_temp, BytesPerLong); |
3911 round_to(scan_temp, BytesPerLong); |
3948 } |
3912 } |
3949 |
3913 |
3950 // Adjust recv_klass by scaled itable_index, so we can free itable_index. |
3914 // Adjust recv_klass by scaled itable_index, so we can free itable_index. |
3951 assert(itableMethodEntry::size() * wordSize == wordSize, "adjust the scaling in the code below"); |
3915 assert(itableMethodEntry::size() * wordSize == wordSize, "adjust the scaling in the code below"); |
3952 // lea(recv_klass, Address(recv_klass, itable_index, Address::times_ptr, itentry_off)); |
|
3953 if (itable_index.is_constant()) { |
3916 if (itable_index.is_constant()) { |
3954 set64(AT, (int)itable_index.is_constant()); |
3917 set64(AT, (int)itable_index.is_constant()); |
3955 dsll(AT, AT, (int)Address::times_ptr); |
3918 dsll(AT, AT, (int)Address::times_ptr); |
3956 } else { |
3919 } else { |
3957 dsll(AT, itable_index.as_register(), (int)Address::times_ptr); |
3920 dsll(AT, itable_index.as_register(), (int)Address::times_ptr); |
3958 } |
3921 } |
3959 daddu(AT, AT, recv_klass); |
3922 daddu(AT, AT, recv_klass); |
3960 daddiu(recv_klass, AT, itentry_off); |
3923 daddiu(recv_klass, AT, itentry_off); |
3961 |
3924 |
3962 // for (scan = klass->itable(); scan->interface() != NULL; scan += scan_step) { |
|
3963 // if (scan->interface() == intf) { |
|
3964 // result = (klass + scan->offset() + itable_index); |
|
3965 // } |
|
3966 // } |
|
3967 Label search, found_method; |
3925 Label search, found_method; |
3968 |
3926 |
3969 for (int peel = 1; peel >= 0; peel--) { |
3927 for (int peel = 1; peel >= 0; peel--) { |
3970 ld(method_result, Address(scan_temp, itableOffsetEntry::interface_offset_in_bytes())); |
3928 ld(method_result, Address(scan_temp, itableOffsetEntry::interface_offset_in_bytes())); |
3971 |
3929 |
3992 |
3950 |
3993 bind(found_method); |
3951 bind(found_method); |
3994 |
3952 |
3995 // Got a hit. |
3953 // Got a hit. |
3996 lw(scan_temp, Address(scan_temp, itableOffsetEntry::offset_offset_in_bytes())); |
3954 lw(scan_temp, Address(scan_temp, itableOffsetEntry::offset_offset_in_bytes())); |
3997 //ld(method_result, Address(recv_klass, scan_temp, Address::times_1)); |
|
3998 if(UseLoongsonISA) { |
3955 if(UseLoongsonISA) { |
3999 gsldx(method_result, recv_klass, scan_temp, 0); |
3956 gsldx(method_result, recv_klass, scan_temp, 0); |
4000 } else { |
3957 } else { |
4001 daddu(AT, recv_klass, scan_temp); |
3958 daddu(AT, recv_klass, scan_temp); |
4002 ld(method_result, AT); |
3959 ld(method_result, AT); |
4003 } |
3960 } |
4004 } |
3961 } |
4005 |
|
4006 |
3962 |
4007 // virtual method calling |
3963 // virtual method calling |
4008 void MacroAssembler::lookup_virtual_method(Register recv_klass, |
3964 void MacroAssembler::lookup_virtual_method(Register recv_klass, |
4009 RegisterOrConstant vtable_index, |
3965 RegisterOrConstant vtable_index, |
4010 Register method_result) { |
3966 Register method_result) { |