Tue, 24 Oct 2017 15:06:31 +0800
Refine the oop store.
aoqi@6880 | 1 | /* |
aoqi@6880 | 2 | * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. |
aoqi@6880 | 3 | * Copyright (c) 2017, Loongson Technology. All rights reserved. |
aoqi@6880 | 4 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
aoqi@6880 | 5 | * |
aoqi@6880 | 6 | * This code is free software; you can redistribute it and/or modify it |
aoqi@6880 | 7 | * under the terms of the GNU General Public License version 2 only, as |
aoqi@6880 | 8 | * published by the Free Software Foundation. |
aoqi@6880 | 9 | * |
aoqi@6880 | 10 | * This code is distributed in the hope that it will be useful, but WITHOUT |
aoqi@6880 | 11 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
aoqi@6880 | 12 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
aoqi@6880 | 13 | * version 2 for more details (a copy is included in the LICENSE file that |
aoqi@6880 | 14 | * accompanied this code). |
aoqi@6880 | 15 | * |
aoqi@6880 | 16 | * You should have received a copy of the GNU General Public License version |
aoqi@6880 | 17 | * 2 along with this work; if not, write to the Free Software Foundation, |
aoqi@6880 | 18 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
aoqi@6880 | 19 | * |
aoqi@6880 | 20 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
aoqi@6880 | 21 | * or visit www.oracle.com if you need additional information or have any |
aoqi@6880 | 22 | * questions. |
aoqi@6880 | 23 | * |
aoqi@6880 | 24 | */ |
aoqi@6880 | 25 | |
aoqi@6880 | 26 | #include "precompiled.hpp" |
aoqi@6880 | 27 | #include "asm/assembler.hpp" |
aoqi@6880 | 28 | #include "asm/assembler.inline.hpp" |
aoqi@6880 | 29 | #include "asm/macroAssembler.inline.hpp" |
aoqi@6880 | 30 | #include "compiler/disassembler.hpp" |
aoqi@6880 | 31 | #include "gc_interface/collectedHeap.inline.hpp" |
aoqi@6880 | 32 | #include "interpreter/interpreter.hpp" |
aoqi@6880 | 33 | #include "memory/cardTableModRefBS.hpp" |
aoqi@6880 | 34 | #include "memory/resourceArea.hpp" |
aoqi@6880 | 35 | #include "memory/universe.hpp" |
aoqi@6880 | 36 | #include "prims/methodHandles.hpp" |
aoqi@6880 | 37 | #include "runtime/biasedLocking.hpp" |
aoqi@6880 | 38 | #include "runtime/interfaceSupport.hpp" |
aoqi@6880 | 39 | #include "runtime/objectMonitor.hpp" |
aoqi@6880 | 40 | #include "runtime/os.hpp" |
aoqi@6880 | 41 | #include "runtime/sharedRuntime.hpp" |
aoqi@6880 | 42 | #include "runtime/stubRoutines.hpp" |
aoqi@6880 | 43 | #include "utilities/macros.hpp" |
aoqi@6880 | 44 | #if INCLUDE_ALL_GCS |
aoqi@6880 | 45 | #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" |
aoqi@6880 | 46 | #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp" |
aoqi@6880 | 47 | #include "gc_implementation/g1/heapRegion.hpp" |
aoqi@6880 | 48 | #endif // INCLUDE_ALL_GCS |
aoqi@6880 | 49 | |
aoqi@6880 | 50 | // Implementation of MacroAssembler |
aoqi@6880 | 51 | |
aoqi@6880 | 52 | intptr_t MacroAssembler::i[32] = {0}; |
aoqi@6880 | 53 | float MacroAssembler::f[32] = {0.0}; |
aoqi@6880 | 54 | |
aoqi@6880 | 55 | void MacroAssembler::print(outputStream *s) { |
aoqi@6880 | 56 | unsigned int k; |
aoqi@6880 | 57 | for(k=0; k<sizeof(i)/sizeof(i[0]); k++) { |
aoqi@6880 | 58 | s->print_cr("i%d = 0x%.16lx", k, i[k]); |
aoqi@6880 | 59 | } |
aoqi@6880 | 60 | s->cr(); |
aoqi@6880 | 61 | |
aoqi@6880 | 62 | for(k=0; k<sizeof(f)/sizeof(f[0]); k++) { |
aoqi@6880 | 63 | s->print_cr("f%d = %f", k, f[k]); |
aoqi@6880 | 64 | } |
aoqi@6880 | 65 | s->cr(); |
aoqi@6880 | 66 | } |
aoqi@6880 | 67 | |
aoqi@6880 | 68 | int MacroAssembler::i_offset(unsigned int k) { return (intptr_t)&((MacroAssembler*)0)->i[k]; } |
aoqi@6880 | 69 | int MacroAssembler::f_offset(unsigned int k) { return (intptr_t)&((MacroAssembler*)0)->f[k]; } |
aoqi@6880 | 70 | |
aoqi@6880 | 71 | void MacroAssembler::save_registers(MacroAssembler *masm) { |
aoqi@6880 | 72 | #define __ masm-> |
aoqi@6880 | 73 | for(int k=0; k<32; k++) { |
aoqi@6880 | 74 | __ sw (as_Register(k), A0, i_offset(k)); |
aoqi@6880 | 75 | } |
aoqi@6880 | 76 | |
aoqi@6880 | 77 | for(int k=0; k<32; k++) { |
aoqi@6880 | 78 | __ swc1 (as_FloatRegister(k), A0, f_offset(k)); |
aoqi@6880 | 79 | } |
aoqi@6880 | 80 | #undef __ |
aoqi@6880 | 81 | } |
aoqi@6880 | 82 | |
aoqi@6880 | 83 | void MacroAssembler::restore_registers(MacroAssembler *masm) { |
aoqi@6880 | 84 | #define __ masm-> |
aoqi@6880 | 85 | for(int k=0; k<32; k++) { |
aoqi@6880 | 86 | __ lw (as_Register(k), A0, i_offset(k)); |
aoqi@6880 | 87 | } |
aoqi@6880 | 88 | |
aoqi@6880 | 89 | for(int k=0; k<32; k++) { |
aoqi@6880 | 90 | __ lwc1 (as_FloatRegister(k), A0, f_offset(k)); |
aoqi@6880 | 91 | } |
aoqi@6880 | 92 | #undef __ |
aoqi@6880 | 93 | } |
aoqi@6880 | 94 | |
aoqi@6880 | 95 | |
aoqi@6880 | 96 | void MacroAssembler::pd_patch_instruction(address branch, address target) { |
aoqi@6880 | 97 | jint& stub_inst = *(jint*) branch; |
aoqi@6880 | 98 | |
aoqi@6880 | 99 | /* * |
aoqi@6880 | 100 | move(AT, RA); // dadd |
aoqi@6880 | 101 | emit_long(insn_ORRI(regimm_op, 0, bgezal_op, 1)); |
aoqi@6880 | 102 | nop(); |
aoqi@6880 | 103 | lui(T9, 0); // to be patched |
aoqi@6880 | 104 | ori(T9, 0); |
aoqi@6880 | 105 | daddu(T9, T9, RA); |
aoqi@6880 | 106 | move(RA, AT); |
aoqi@6880 | 107 | jr(T9); |
aoqi@6880 | 108 | */ |
aoqi@6880 | 109 | if(special(stub_inst) == dadd_op) { |
aoqi@6880 | 110 | jint *pc = (jint *)branch; |
aoqi@6880 | 111 | |
aoqi@6880 | 112 | assert(opcode(pc[3]) == lui_op |
aoqi@6880 | 113 | && opcode(pc[4]) == ori_op |
aoqi@6880 | 114 | && special(pc[5]) == daddu_op, "Not a branch label patch"); |
aoqi@6880 | 115 | if(!(opcode(pc[3]) == lui_op |
aoqi@6880 | 116 | && opcode(pc[4]) == ori_op |
aoqi@6880 | 117 | && special(pc[5]) == daddu_op)) { tty->print_cr("Not a branch label patch"); } |
aoqi@6880 | 118 | |
aoqi@6880 | 119 | int offset = target - branch; |
aoqi@6880 | 120 | if (!is_simm16(offset)) |
aoqi@6880 | 121 | { |
aoqi@6880 | 122 | pc[3] = (pc[3] & 0xffff0000) | high16(offset - 12); |
aoqi@6880 | 123 | pc[4] = (pc[4] & 0xffff0000) | low16(offset - 12); |
aoqi@6880 | 124 | } |
aoqi@6880 | 125 | else |
aoqi@6880 | 126 | { |
aoqi@6880 | 127 | /* revert to "beq + nop" */ |
aoqi@6880 | 128 | CodeBuffer cb(branch, 4 * 10); |
aoqi@6880 | 129 | MacroAssembler masm(&cb); |
aoqi@6880 | 130 | #define __ masm. |
aoqi@6880 | 131 | __ b(target); |
aoqi@6880 | 132 | __ nop(); |
aoqi@6880 | 133 | __ nop(); |
aoqi@6880 | 134 | __ nop(); |
aoqi@6880 | 135 | __ nop(); |
aoqi@6880 | 136 | __ nop(); |
aoqi@6880 | 137 | __ nop(); |
aoqi@6880 | 138 | __ nop(); |
aoqi@6880 | 139 | } |
aoqi@6880 | 140 | return; |
aoqi@6880 | 141 | } |
aoqi@6880 | 142 | |
aoqi@6880 | 143 | #ifndef PRODUCT |
aoqi@6880 | 144 | if (!is_simm16((target - branch - 4) >> 2)) |
aoqi@6880 | 145 | { |
aoqi@6880 | 146 | tty->print_cr("Illegal patching: target=0x%lx", target); |
aoqi@6880 | 147 | int *p = (int *)branch; |
aoqi@6880 | 148 | for (int i = -10; i < 10; i++) |
aoqi@6880 | 149 | { |
aoqi@6880 | 150 | tty->print("0x%lx, ", p[i]); |
aoqi@6880 | 151 | } |
aoqi@6880 | 152 | tty->print_cr(""); |
aoqi@6880 | 153 | } |
aoqi@6880 | 154 | #endif |
aoqi@6880 | 155 | |
aoqi@6880 | 156 | stub_inst = patched_branch(target - branch, stub_inst, 0); |
aoqi@6880 | 157 | } |
aoqi@6880 | 158 | |
aoqi@6880 | 159 | static inline address first_cache_address() { |
aoqi@6880 | 160 | return CodeCache::low_bound() + sizeof(HeapBlock::Header); |
aoqi@6880 | 161 | } |
aoqi@6880 | 162 | |
aoqi@6880 | 163 | static inline address last_cache_address() { |
aoqi@6880 | 164 | return CodeCache::high_bound() - Assembler::InstructionSize; |
aoqi@6880 | 165 | } |
aoqi@6880 | 166 | |
aoqi@6880 | 167 | int MacroAssembler::call_size(address target, bool far, bool patchable) { |
aoqi@6880 | 168 | if (patchable) return 6 << Assembler::LogInstructionSize; |
aoqi@6880 | 169 | if (!far) return 2 << Assembler::LogInstructionSize; // jal + nop |
aoqi@6880 | 170 | return (insts_for_set64((jlong)target) + 2) << Assembler::LogInstructionSize; |
aoqi@6880 | 171 | } |
aoqi@6880 | 172 | |
aoqi@6880 | 173 | // Can we reach target using jal/j from anywhere |
aoqi@6880 | 174 | // in the code cache (because code can be relocated)? |
aoqi@6880 | 175 | bool MacroAssembler::reachable_from_cache(address target) { |
aoqi@6880 | 176 | address cl = first_cache_address(); |
aoqi@6880 | 177 | address ch = last_cache_address(); |
aoqi@6880 | 178 | |
aoqi@6880 | 179 | return fit_in_jal(target, cl) && fit_in_jal(target, ch); |
aoqi@6880 | 180 | } |
aoqi@6880 | 181 | |
aoqi@6880 | 182 | void MacroAssembler::general_jump(address target) { |
aoqi@6880 | 183 | if (reachable_from_cache(target)) { |
aoqi@6880 | 184 | j(target); |
aoqi@6880 | 185 | nop(); |
aoqi@6880 | 186 | } else { |
aoqi@6880 | 187 | set64(T9, (long)target); |
aoqi@6880 | 188 | jr(T9); |
aoqi@6880 | 189 | nop(); |
aoqi@6880 | 190 | } |
aoqi@6880 | 191 | } |
aoqi@6880 | 192 | |
aoqi@6880 | 193 | int MacroAssembler::insts_for_general_jump(address target) { |
aoqi@6880 | 194 | if (reachable_from_cache(target)) { |
aoqi@6880 | 195 | //j(target); |
aoqi@6880 | 196 | //nop(); |
aoqi@6880 | 197 | return 2; |
aoqi@6880 | 198 | } else { |
aoqi@6880 | 199 | //set64(T9, (long)target); |
aoqi@6880 | 200 | //jr(T9); |
aoqi@6880 | 201 | //nop(); |
aoqi@6880 | 202 | return insts_for_set64((jlong)target) + 2; |
aoqi@6880 | 203 | } |
aoqi@6880 | 204 | } |
aoqi@6880 | 205 | |
aoqi@6880 | 206 | void MacroAssembler::patchable_jump(address target) { |
aoqi@6880 | 207 | if (reachable_from_cache(target)) { |
aoqi@6880 | 208 | nop(); |
aoqi@6880 | 209 | nop(); |
aoqi@6880 | 210 | nop(); |
aoqi@6880 | 211 | nop(); |
aoqi@6880 | 212 | j(target); |
aoqi@6880 | 213 | nop(); |
aoqi@6880 | 214 | } else { |
aoqi@6880 | 215 | patchable_set48(T9, (long)target); |
aoqi@6880 | 216 | jr(T9); |
aoqi@6880 | 217 | nop(); |
aoqi@6880 | 218 | } |
aoqi@6880 | 219 | } |
aoqi@6880 | 220 | |
aoqi@6880 | 221 | int MacroAssembler::insts_for_patchable_jump(address target) { |
aoqi@6880 | 222 | return 6; |
aoqi@6880 | 223 | } |
aoqi@6880 | 224 | |
aoqi@6880 | 225 | void MacroAssembler::general_call(address target) { |
aoqi@6880 | 226 | if (reachable_from_cache(target)) { |
aoqi@6880 | 227 | jal(target); |
aoqi@6880 | 228 | nop(); |
aoqi@6880 | 229 | } else { |
aoqi@6880 | 230 | set64(T9, (long)target); |
aoqi@6880 | 231 | jalr(T9); |
aoqi@6880 | 232 | nop(); |
aoqi@6880 | 233 | } |
aoqi@6880 | 234 | } |
aoqi@6880 | 235 | |
aoqi@6880 | 236 | int MacroAssembler::insts_for_general_call(address target) { |
aoqi@6880 | 237 | if (reachable_from_cache(target)) { |
aoqi@6880 | 238 | //jal(target); |
aoqi@6880 | 239 | //nop(); |
aoqi@6880 | 240 | return 2; |
aoqi@6880 | 241 | } else { |
aoqi@6880 | 242 | //set64(T9, (long)target); |
aoqi@6880 | 243 | //jalr(T9); |
aoqi@6880 | 244 | //nop(); |
aoqi@6880 | 245 | return insts_for_set64((jlong)target) + 2; |
aoqi@6880 | 246 | } |
aoqi@6880 | 247 | } |
aoqi@6880 | 248 | |
aoqi@6880 | 249 | void MacroAssembler::patchable_call(address target) { |
aoqi@6880 | 250 | if (reachable_from_cache(target)) { |
aoqi@6880 | 251 | nop(); |
aoqi@6880 | 252 | nop(); |
aoqi@6880 | 253 | nop(); |
aoqi@6880 | 254 | nop(); |
aoqi@6880 | 255 | jal(target); |
aoqi@6880 | 256 | nop(); |
aoqi@6880 | 257 | } else { |
aoqi@6880 | 258 | patchable_set48(T9, (long)target); |
aoqi@6880 | 259 | jalr(T9); |
aoqi@6880 | 260 | nop(); |
aoqi@6880 | 261 | } |
aoqi@6880 | 262 | } |
aoqi@6880 | 263 | |
aoqi@6880 | 264 | int MacroAssembler::insts_for_patchable_call(address target) { |
aoqi@6880 | 265 | return 6; |
aoqi@6880 | 266 | } |
aoqi@6880 | 267 | |
aoqi@6880 | 268 | void MacroAssembler::beq_far(Register rs, Register rt, address entry) |
aoqi@6880 | 269 | { |
aoqi@6880 | 270 | u_char * cur_pc = pc(); |
aoqi@6880 | 271 | |
aoqi@6880 | 272 | /* Jin: Near/Far jump */ |
aoqi@6880 | 273 | if(is_simm16((entry - pc() - 4) / 4)) |
aoqi@6880 | 274 | { |
aoqi@6880 | 275 | Assembler::beq(rs, rt, offset(entry)); |
aoqi@6880 | 276 | } |
aoqi@6880 | 277 | else |
aoqi@6880 | 278 | { |
aoqi@6880 | 279 | Label not_jump; |
aoqi@6880 | 280 | bne(rs, rt, not_jump); |
aoqi@6880 | 281 | delayed()->nop(); |
aoqi@6880 | 282 | |
aoqi@6880 | 283 | b_far(entry); |
aoqi@6880 | 284 | delayed()->nop(); |
aoqi@6880 | 285 | |
aoqi@6880 | 286 | bind(not_jump); |
aoqi@6880 | 287 | has_delay_slot(); |
aoqi@6880 | 288 | } |
aoqi@6880 | 289 | } |
aoqi@6880 | 290 | |
aoqi@6880 | 291 | void MacroAssembler::beq_far(Register rs, Register rt, Label& L) |
aoqi@6880 | 292 | { |
aoqi@6880 | 293 | if (L.is_bound()) { |
aoqi@6880 | 294 | beq_far(rs, rt, target(L)); |
aoqi@6880 | 295 | } else { |
aoqi@6880 | 296 | u_char * cur_pc = pc(); |
aoqi@6880 | 297 | Label not_jump; |
aoqi@6880 | 298 | bne(rs, rt, not_jump); |
aoqi@6880 | 299 | delayed()->nop(); |
aoqi@6880 | 300 | |
aoqi@6880 | 301 | b_far(L); |
aoqi@6880 | 302 | delayed()->nop(); |
aoqi@6880 | 303 | |
aoqi@6880 | 304 | bind(not_jump); |
aoqi@6880 | 305 | has_delay_slot(); |
aoqi@6880 | 306 | } |
aoqi@6880 | 307 | } |
aoqi@6880 | 308 | |
aoqi@6880 | 309 | void MacroAssembler::bne_far(Register rs, Register rt, address entry) |
aoqi@6880 | 310 | { |
aoqi@6880 | 311 | u_char * cur_pc = pc(); |
aoqi@6880 | 312 | |
aoqi@6880 | 313 | /* Jin: Near/Far jump */ |
aoqi@6880 | 314 | if(is_simm16((entry - pc() - 4) / 4)) |
aoqi@6880 | 315 | { |
aoqi@6880 | 316 | Assembler::bne(rs, rt, offset(entry)); |
aoqi@6880 | 317 | } |
aoqi@6880 | 318 | else |
aoqi@6880 | 319 | { |
aoqi@6880 | 320 | Label not_jump; |
aoqi@6880 | 321 | beq(rs, rt, not_jump); |
aoqi@6880 | 322 | delayed()->nop(); |
aoqi@6880 | 323 | |
aoqi@6880 | 324 | b_far(entry); |
aoqi@6880 | 325 | delayed()->nop(); |
aoqi@6880 | 326 | |
aoqi@6880 | 327 | bind(not_jump); |
aoqi@6880 | 328 | has_delay_slot(); |
aoqi@6880 | 329 | } |
aoqi@6880 | 330 | } |
aoqi@6880 | 331 | |
aoqi@6880 | 332 | void MacroAssembler::bne_far(Register rs, Register rt, Label& L) |
aoqi@6880 | 333 | { |
aoqi@6880 | 334 | if (L.is_bound()) { |
aoqi@6880 | 335 | bne_far(rs, rt, target(L)); |
aoqi@6880 | 336 | } else { |
aoqi@6880 | 337 | u_char * cur_pc = pc(); |
aoqi@6880 | 338 | Label not_jump; |
aoqi@6880 | 339 | beq(rs, rt, not_jump); |
aoqi@6880 | 340 | delayed()->nop(); |
aoqi@6880 | 341 | |
aoqi@6880 | 342 | b_far(L); |
aoqi@6880 | 343 | delayed()->nop(); |
aoqi@6880 | 344 | |
aoqi@6880 | 345 | bind(not_jump); |
aoqi@6880 | 346 | has_delay_slot(); |
aoqi@6880 | 347 | } |
aoqi@6880 | 348 | } |
aoqi@6880 | 349 | |
aoqi@6880 | 350 | void MacroAssembler::b_far(Label& L) |
aoqi@6880 | 351 | { |
aoqi@6880 | 352 | if (L.is_bound()) { |
aoqi@6880 | 353 | b_far(target(L)); |
aoqi@6880 | 354 | } else { |
aoqi@6880 | 355 | volatile address dest = target(L); |
aoqi@6880 | 356 | /* |
aoqi@6880 | 357 | MacroAssembler::pd_patch_instruction branch=55651ed514, target=55651ef6d8 |
aoqi@6880 | 358 | 0x00000055651ed514: dadd at, ra, zero |
aoqi@6880 | 359 | 0x00000055651ed518: [4110001]bgezal zero, 0x00000055651ed520 |
aoqi@6880 | 360 | |
aoqi@6880 | 361 | 0x00000055651ed51c: sll zero, zero, 0 |
aoqi@6880 | 362 | 0x00000055651ed520: lui t9, 0x0 |
aoqi@6880 | 363 | 0x00000055651ed524: ori t9, t9, 0x21b8 |
aoqi@6880 | 364 | 0x00000055651ed528: daddu t9, t9, ra |
aoqi@6880 | 365 | 0x00000055651ed52c: dadd ra, at, zero |
aoqi@6880 | 366 | 0x00000055651ed530: jr t9 |
aoqi@6880 | 367 | 0x00000055651ed534: sll zero, zero, 0 |
aoqi@6880 | 368 | */ |
aoqi@6880 | 369 | move(AT, RA); |
aoqi@6880 | 370 | emit_long(insn_ORRI(regimm_op, 0, bgezal_op, 1)); |
aoqi@6880 | 371 | nop(); |
aoqi@6880 | 372 | lui(T9, 0); // to be patched |
aoqi@6880 | 373 | ori(T9, T9, 0); |
aoqi@6880 | 374 | daddu(T9, T9, RA); |
aoqi@6880 | 375 | move(RA, AT); |
aoqi@6880 | 376 | jr(T9); |
aoqi@6880 | 377 | } |
aoqi@6880 | 378 | } |
aoqi@6880 | 379 | |
aoqi@6880 | 380 | void MacroAssembler::b_far(address entry) |
aoqi@6880 | 381 | { |
aoqi@6880 | 382 | u_char * cur_pc = pc(); |
aoqi@6880 | 383 | |
aoqi@6880 | 384 | /* Jin: Near/Far jump */ |
aoqi@6880 | 385 | if(is_simm16((entry - pc() - 4) / 4)) |
aoqi@6880 | 386 | { |
aoqi@6880 | 387 | b(offset(entry)); |
aoqi@6880 | 388 | } |
aoqi@6880 | 389 | else |
aoqi@6880 | 390 | { |
aoqi@6880 | 391 | /* address must be bounded */ |
aoqi@6880 | 392 | move(AT, RA); |
aoqi@6880 | 393 | emit_long(insn_ORRI(regimm_op, 0, bgezal_op, 1)); |
aoqi@6880 | 394 | nop(); |
aoqi@6880 | 395 | li32(T9, entry - pc()); |
aoqi@6880 | 396 | daddu(T9, T9, RA); |
aoqi@6880 | 397 | move(RA, AT); |
aoqi@6880 | 398 | jr(T9); |
aoqi@6880 | 399 | } |
aoqi@6880 | 400 | } |
aoqi@6880 | 401 | |
aoqi@6880 | 402 | void MacroAssembler::ld_ptr(Register rt, Register offset, Register base) { |
aoqi@6880 | 403 | addu_long(AT, base, offset); |
aoqi@6880 | 404 | ld_ptr(rt, 0, AT); |
aoqi@6880 | 405 | } |
aoqi@6880 | 406 | |
aoqi@6880 | 407 | void MacroAssembler::st_ptr(Register rt, Register offset, Register base) { |
aoqi@6880 | 408 | addu_long(AT, base, offset); |
aoqi@6880 | 409 | st_ptr(rt, 0, AT); |
aoqi@6880 | 410 | } |
aoqi@6880 | 411 | |
aoqi@6880 | 412 | void MacroAssembler::ld_long(Register rt, Register offset, Register base) { |
aoqi@6880 | 413 | addu_long(AT, base, offset); |
aoqi@6880 | 414 | ld_long(rt, 0, AT); |
aoqi@6880 | 415 | } |
aoqi@6880 | 416 | |
aoqi@6880 | 417 | void MacroAssembler::st_long(Register rt, Register offset, Register base) { |
aoqi@6880 | 418 | addu_long(AT, base, offset); |
aoqi@6880 | 419 | st_long(rt, 0, AT); |
aoqi@6880 | 420 | } |
aoqi@6880 | 421 | |
aoqi@6880 | 422 | Address MacroAssembler::as_Address(AddressLiteral adr) { |
aoqi@6880 | 423 | return Address(adr.target(), adr.rspec()); |
aoqi@6880 | 424 | } |
aoqi@6880 | 425 | |
aoqi@6880 | 426 | Address MacroAssembler::as_Address(ArrayAddress adr) { |
aoqi@6880 | 427 | return Address::make_array(adr); |
aoqi@6880 | 428 | } |
aoqi@6880 | 429 | |
aoqi@6880 | 430 | // tmp_reg1 and tmp_reg2 should be saved outside of atomic_inc32 (caller saved). |
aoqi@6880 | 431 | void MacroAssembler::atomic_inc32(address counter_addr, int inc, Register tmp_reg1, Register tmp_reg2) { |
aoqi@6880 | 432 | Label again; |
aoqi@6880 | 433 | |
aoqi@6880 | 434 | li(tmp_reg1, counter_addr); |
aoqi@6880 | 435 | bind(again); |
aoqi@6880 | 436 | if(!Use3A2000) sync(); |
aoqi@6880 | 437 | ll(tmp_reg2, tmp_reg1, 0); |
aoqi@6880 | 438 | addi(tmp_reg2, tmp_reg2, inc); |
aoqi@6880 | 439 | sc(tmp_reg2, tmp_reg1, 0); |
aoqi@6880 | 440 | beq(tmp_reg2, R0, again); |
aoqi@6880 | 441 | delayed()->nop(); |
aoqi@6880 | 442 | } |
aoqi@6880 | 443 | |
aoqi@6880 | 444 | int MacroAssembler::biased_locking_enter(Register lock_reg, |
aoqi@6880 | 445 | Register obj_reg, |
aoqi@6880 | 446 | Register swap_reg, |
aoqi@6880 | 447 | Register tmp_reg, |
aoqi@6880 | 448 | bool swap_reg_contains_mark, |
aoqi@6880 | 449 | Label& done, |
aoqi@6880 | 450 | Label* slow_case, |
aoqi@6880 | 451 | BiasedLockingCounters* counters) { |
aoqi@6880 | 452 | assert(UseBiasedLocking, "why call this otherwise?"); |
aoqi@6880 | 453 | bool need_tmp_reg = false; |
aoqi@6880 | 454 | if (tmp_reg == noreg) { |
aoqi@6880 | 455 | need_tmp_reg = true; |
aoqi@6880 | 456 | tmp_reg = T9; |
aoqi@6880 | 457 | } |
aoqi@6880 | 458 | assert_different_registers(lock_reg, obj_reg, swap_reg, tmp_reg, AT); |
aoqi@6880 | 459 | assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, "biased locking makes assumptions about bit layout"); |
aoqi@6880 | 460 | Address mark_addr (obj_reg, oopDesc::mark_offset_in_bytes()); |
aoqi@6880 | 461 | Address saved_mark_addr(lock_reg, 0); |
aoqi@6880 | 462 | |
aoqi@6880 | 463 | // Biased locking |
aoqi@6880 | 464 | // See whether the lock is currently biased toward our thread and |
aoqi@6880 | 465 | // whether the epoch is still valid |
aoqi@6880 | 466 | // Note that the runtime guarantees sufficient alignment of JavaThread |
aoqi@6880 | 467 | // pointers to allow age to be placed into low bits |
aoqi@6880 | 468 | // First check to see whether biasing is even enabled for this object |
aoqi@6880 | 469 | Label cas_label; |
aoqi@6880 | 470 | int null_check_offset = -1; |
aoqi@6880 | 471 | if (!swap_reg_contains_mark) { |
aoqi@6880 | 472 | null_check_offset = offset(); |
aoqi@6880 | 473 | ld_ptr(swap_reg, mark_addr); |
aoqi@6880 | 474 | } |
aoqi@6880 | 475 | |
aoqi@6880 | 476 | if (need_tmp_reg) { |
aoqi@6880 | 477 | push(tmp_reg); |
aoqi@6880 | 478 | } |
aoqi@6880 | 479 | move(tmp_reg, swap_reg); |
aoqi@6880 | 480 | andi(tmp_reg, tmp_reg, markOopDesc::biased_lock_mask_in_place); |
aoqi@6880 | 481 | #ifdef _LP64 |
aoqi@6880 | 482 | daddi(AT, R0, markOopDesc::biased_lock_pattern); |
aoqi@6880 | 483 | dsub(AT, AT, tmp_reg); |
aoqi@6880 | 484 | #else |
aoqi@6880 | 485 | addi(AT, R0, markOopDesc::biased_lock_pattern); |
aoqi@6880 | 486 | sub(AT, AT, tmp_reg); |
aoqi@6880 | 487 | #endif |
aoqi@6880 | 488 | if (need_tmp_reg) { |
aoqi@6880 | 489 | pop(tmp_reg); |
aoqi@6880 | 490 | } |
aoqi@6880 | 491 | |
aoqi@6880 | 492 | bne(AT, R0, cas_label); |
aoqi@6880 | 493 | delayed()->nop(); |
aoqi@6880 | 494 | |
aoqi@6880 | 495 | |
aoqi@6880 | 496 | // The bias pattern is present in the object's header. Need to check |
aoqi@6880 | 497 | // whether the bias owner and the epoch are both still current. |
aoqi@6880 | 498 | // Note that because there is no current thread register on MIPS we |
aoqi@6880 | 499 | // need to store off the mark word we read out of the object to |
aoqi@6880 | 500 | // avoid reloading it and needing to recheck invariants below. This |
aoqi@6880 | 501 | // store is unfortunate but it makes the overall code shorter and |
aoqi@6880 | 502 | // simpler. |
aoqi@6880 | 503 | st_ptr(swap_reg, saved_mark_addr); |
aoqi@6880 | 504 | if (need_tmp_reg) { |
aoqi@6880 | 505 | push(tmp_reg); |
aoqi@6880 | 506 | } |
aoqi@6880 | 507 | if (swap_reg_contains_mark) { |
aoqi@6880 | 508 | null_check_offset = offset(); |
aoqi@6880 | 509 | } |
aoqi@6880 | 510 | load_prototype_header(tmp_reg, obj_reg); |
aoqi@6880 | 511 | xorr(tmp_reg, tmp_reg, swap_reg); |
aoqi@6880 | 512 | get_thread(swap_reg); |
aoqi@6880 | 513 | xorr(swap_reg, swap_reg, tmp_reg); |
aoqi@6880 | 514 | |
aoqi@6880 | 515 | move(AT, ~((int) markOopDesc::age_mask_in_place)); |
aoqi@6880 | 516 | andr(swap_reg, swap_reg, AT); |
aoqi@6880 | 517 | |
aoqi@6880 | 518 | if (PrintBiasedLockingStatistics) { |
aoqi@6880 | 519 | Label L; |
aoqi@6880 | 520 | bne(swap_reg, R0, L); |
aoqi@6880 | 521 | delayed()->nop(); |
aoqi@6880 | 522 | push(tmp_reg); |
aoqi@6880 | 523 | push(A0); |
aoqi@6880 | 524 | atomic_inc32((address)BiasedLocking::biased_lock_entry_count_addr(), 1, A0, tmp_reg); |
aoqi@6880 | 525 | pop(A0); |
aoqi@6880 | 526 | pop(tmp_reg); |
aoqi@6880 | 527 | bind(L); |
aoqi@6880 | 528 | } |
aoqi@6880 | 529 | if (need_tmp_reg) { |
aoqi@6880 | 530 | pop(tmp_reg); |
aoqi@6880 | 531 | } |
aoqi@6880 | 532 | beq(swap_reg, R0, done); |
aoqi@6880 | 533 | delayed()->nop(); |
aoqi@6880 | 534 | Label try_revoke_bias; |
aoqi@6880 | 535 | Label try_rebias; |
aoqi@6880 | 536 | |
aoqi@6880 | 537 | // At this point we know that the header has the bias pattern and |
aoqi@6880 | 538 | // that we are not the bias owner in the current epoch. We need to |
aoqi@6880 | 539 | // figure out more details about the state of the header in order to |
aoqi@6880 | 540 | // know what operations can be legally performed on the object's |
aoqi@6880 | 541 | // header. |
aoqi@6880 | 542 | |
aoqi@6880 | 543 | // If the low three bits in the xor result aren't clear, that means |
aoqi@6880 | 544 | // the prototype header is no longer biased and we have to revoke |
aoqi@6880 | 545 | // the bias on this object. |
aoqi@6880 | 546 | |
aoqi@6880 | 547 | move(AT, markOopDesc::biased_lock_mask_in_place); |
aoqi@6880 | 548 | andr(AT, swap_reg, AT); |
aoqi@6880 | 549 | bne(AT, R0, try_revoke_bias); |
aoqi@6880 | 550 | delayed()->nop(); |
aoqi@6880 | 551 | // Biasing is still enabled for this data type. See whether the |
aoqi@6880 | 552 | // epoch of the current bias is still valid, meaning that the epoch |
aoqi@6880 | 553 | // bits of the mark word are equal to the epoch bits of the |
aoqi@6880 | 554 | // prototype header. (Note that the prototype header's epoch bits |
aoqi@6880 | 555 | // only change at a safepoint.) If not, attempt to rebias the object |
aoqi@6880 | 556 | // toward the current thread. Note that we must be absolutely sure |
aoqi@6880 | 557 | // that the current epoch is invalid in order to do this because |
aoqi@6880 | 558 | // otherwise the manipulations it performs on the mark word are |
aoqi@6880 | 559 | // illegal. |
aoqi@6880 | 560 | |
aoqi@6880 | 561 | move(AT, markOopDesc::epoch_mask_in_place); |
aoqi@6880 | 562 | andr(AT,swap_reg, AT); |
aoqi@6880 | 563 | bne(AT, R0, try_rebias); |
aoqi@6880 | 564 | delayed()->nop(); |
aoqi@6880 | 565 | // The epoch of the current bias is still valid but we know nothing |
aoqi@6880 | 566 | // about the owner; it might be set or it might be clear. Try to |
aoqi@6880 | 567 | // acquire the bias of the object using an atomic operation. If this |
aoqi@6880 | 568 | // fails we will go in to the runtime to revoke the object's bias. |
aoqi@6880 | 569 | // Note that we first construct the presumed unbiased header so we |
aoqi@6880 | 570 | // don't accidentally blow away another thread's valid bias. |
aoqi@6880 | 571 | |
aoqi@6880 | 572 | ld_ptr(swap_reg, saved_mark_addr); |
aoqi@6880 | 573 | |
aoqi@6880 | 574 | move(AT, markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place); |
aoqi@6880 | 575 | andr(swap_reg, swap_reg, AT); |
aoqi@6880 | 576 | |
aoqi@6880 | 577 | if (need_tmp_reg) { |
aoqi@6880 | 578 | push(tmp_reg); |
aoqi@6880 | 579 | } |
aoqi@6880 | 580 | get_thread(tmp_reg); |
aoqi@6880 | 581 | orr(tmp_reg, tmp_reg, swap_reg); |
aoqi@6880 | 582 | //if (os::is_MP()) { |
aoqi@6880 | 583 | // sync(); |
aoqi@6880 | 584 | //} |
aoqi@6880 | 585 | cmpxchg(tmp_reg, Address(obj_reg, 0), swap_reg); |
aoqi@6880 | 586 | if (need_tmp_reg) { |
aoqi@6880 | 587 | pop(tmp_reg); |
aoqi@6880 | 588 | } |
aoqi@6880 | 589 | // If the biasing toward our thread failed, this means that |
aoqi@6880 | 590 | // another thread succeeded in biasing it toward itself and we |
aoqi@6880 | 591 | // need to revoke that bias. The revocation will occur in the |
aoqi@6880 | 592 | // interpreter runtime in the slow case. |
aoqi@6880 | 593 | if (PrintBiasedLockingStatistics) { |
aoqi@6880 | 594 | Label L; |
aoqi@6880 | 595 | bne(AT, R0, L); |
aoqi@6880 | 596 | delayed()->nop(); |
aoqi@6880 | 597 | push(tmp_reg); |
aoqi@6880 | 598 | push(A0); |
aoqi@6880 | 599 | atomic_inc32((address)BiasedLocking::anonymously_biased_lock_entry_count_addr(), 1, A0, tmp_reg); |
aoqi@6880 | 600 | pop(A0); |
aoqi@6880 | 601 | pop(tmp_reg); |
aoqi@6880 | 602 | bind(L); |
aoqi@6880 | 603 | } |
aoqi@6880 | 604 | if (slow_case != NULL) { |
aoqi@6880 | 605 | beq_far(AT, R0, *slow_case); |
aoqi@6880 | 606 | delayed()->nop(); |
aoqi@6880 | 607 | } |
aoqi@6880 | 608 | b(done); |
aoqi@6880 | 609 | delayed()->nop(); |
aoqi@6880 | 610 | |
aoqi@6880 | 611 | bind(try_rebias); |
aoqi@6880 | 612 | // At this point we know the epoch has expired, meaning that the |
aoqi@6880 | 613 | // current "bias owner", if any, is actually invalid. Under these |
aoqi@6880 | 614 | // circumstances _only_, we are allowed to use the current header's |
aoqi@6880 | 615 | // value as the comparison value when doing the cas to acquire the |
aoqi@6880 | 616 | // bias in the current epoch. In other words, we allow transfer of |
aoqi@6880 | 617 | // the bias from one thread to another directly in this situation. |
aoqi@6880 | 618 | // |
aoqi@6880 | 619 | // FIXME: due to a lack of registers we currently blow away the age |
aoqi@6880 | 620 | // bits in this situation. Should attempt to preserve them. |
aoqi@6880 | 621 | if (need_tmp_reg) { |
aoqi@6880 | 622 | push(tmp_reg); |
aoqi@6880 | 623 | } |
aoqi@6880 | 624 | load_prototype_header(tmp_reg, obj_reg); |
aoqi@6880 | 625 | get_thread(swap_reg); |
aoqi@6880 | 626 | orr(tmp_reg, tmp_reg, swap_reg); |
aoqi@6880 | 627 | ld_ptr(swap_reg, saved_mark_addr); |
aoqi@6880 | 628 | |
aoqi@6880 | 629 | //if (os::is_MP()) { |
aoqi@6880 | 630 | // sync(); |
aoqi@6880 | 631 | //} |
aoqi@6880 | 632 | cmpxchg(tmp_reg, Address(obj_reg, 0), swap_reg); |
aoqi@6880 | 633 | if (need_tmp_reg) { |
aoqi@6880 | 634 | pop(tmp_reg); |
aoqi@6880 | 635 | } |
aoqi@6880 | 636 | // If the biasing toward our thread failed, then another thread |
aoqi@6880 | 637 | // succeeded in biasing it toward itself and we need to revoke that |
aoqi@6880 | 638 | // bias. The revocation will occur in the runtime in the slow case. |
aoqi@6880 | 639 | if (PrintBiasedLockingStatistics) { |
aoqi@6880 | 640 | Label L; |
aoqi@6880 | 641 | bne(AT, R0, L); |
aoqi@6880 | 642 | delayed()->nop(); |
aoqi@6880 | 643 | push(AT); |
aoqi@6880 | 644 | push(tmp_reg); |
aoqi@6880 | 645 | atomic_inc32((address)BiasedLocking::rebiased_lock_entry_count_addr(), 1, AT, tmp_reg); |
aoqi@6880 | 646 | pop(tmp_reg); |
aoqi@6880 | 647 | pop(AT); |
aoqi@6880 | 648 | bind(L); |
aoqi@6880 | 649 | } |
aoqi@6880 | 650 | if (slow_case != NULL) { |
aoqi@6880 | 651 | beq_far(AT, R0, *slow_case); |
aoqi@6880 | 652 | delayed()->nop(); |
aoqi@6880 | 653 | } |
aoqi@6880 | 654 | |
aoqi@6880 | 655 | b(done); |
aoqi@6880 | 656 | delayed()->nop(); |
aoqi@6880 | 657 | bind(try_revoke_bias); |
aoqi@6880 | 658 | // The prototype mark in the klass doesn't have the bias bit set any |
aoqi@6880 | 659 | // more, indicating that objects of this data type are not supposed |
aoqi@6880 | 660 | // to be biased any more. We are going to try to reset the mark of |
aoqi@6880 | 661 | // this object to the prototype value and fall through to the |
aoqi@6880 | 662 | // CAS-based locking scheme. Note that if our CAS fails, it means |
aoqi@6880 | 663 | // that another thread raced us for the privilege of revoking the |
aoqi@6880 | 664 | // bias of this particular object, so it's okay to continue in the |
aoqi@6880 | 665 | // normal locking code. |
aoqi@6880 | 666 | // |
aoqi@6880 | 667 | // FIXME: due to a lack of registers we currently blow away the age |
aoqi@6880 | 668 | // bits in this situation. Should attempt to preserve them. |
aoqi@6880 | 669 | ld_ptr(swap_reg, saved_mark_addr); |
aoqi@6880 | 670 | |
aoqi@6880 | 671 | if (need_tmp_reg) { |
aoqi@6880 | 672 | push(tmp_reg); |
aoqi@6880 | 673 | } |
aoqi@6880 | 674 | load_prototype_header(tmp_reg, obj_reg); |
aoqi@6880 | 675 | //if (os::is_MP()) { |
aoqi@6880 | 676 | // lock(); |
aoqi@6880 | 677 | //} |
aoqi@6880 | 678 | cmpxchg(tmp_reg, Address(obj_reg, 0), swap_reg); |
aoqi@6880 | 679 | if (need_tmp_reg) { |
aoqi@6880 | 680 | pop(tmp_reg); |
aoqi@6880 | 681 | } |
aoqi@6880 | 682 | // Fall through to the normal CAS-based lock, because no matter what |
aoqi@6880 | 683 | // the result of the above CAS, some thread must have succeeded in |
aoqi@6880 | 684 | // removing the bias bit from the object's header. |
aoqi@6880 | 685 | if (PrintBiasedLockingStatistics) { |
aoqi@6880 | 686 | Label L; |
aoqi@6880 | 687 | bne(AT, R0, L); |
aoqi@6880 | 688 | delayed()->nop(); |
aoqi@6880 | 689 | push(AT); |
aoqi@6880 | 690 | push(tmp_reg); |
aoqi@6880 | 691 | atomic_inc32((address)BiasedLocking::revoked_lock_entry_count_addr(), 1, AT, tmp_reg); |
aoqi@6880 | 692 | pop(tmp_reg); |
aoqi@6880 | 693 | pop(AT); |
aoqi@6880 | 694 | bind(L); |
aoqi@6880 | 695 | } |
aoqi@6880 | 696 | |
aoqi@6880 | 697 | bind(cas_label); |
aoqi@6880 | 698 | return null_check_offset; |
aoqi@6880 | 699 | } |
aoqi@6880 | 700 | |
aoqi@6880 | 701 | void MacroAssembler::biased_locking_exit(Register obj_reg, Register temp_reg, Label& done) { |
aoqi@6880 | 702 | assert(UseBiasedLocking, "why call this otherwise?"); |
aoqi@6880 | 703 | |
aoqi@6880 | 704 | // Check for biased locking unlock case, which is a no-op |
aoqi@6880 | 705 | // Note: we do not have to check the thread ID for two reasons. |
aoqi@6880 | 706 | // First, the interpreter checks for IllegalMonitorStateException at |
aoqi@6880 | 707 | // a higher level. Second, if the bias was revoked while we held the |
aoqi@6880 | 708 | // lock, the object could not be rebiased toward another thread, so |
aoqi@6880 | 709 | // the bias bit would be clear. |
aoqi@6880 | 710 | #ifdef _LP64 |
aoqi@6880 | 711 | ld(temp_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes())); |
aoqi@6880 | 712 | andi(temp_reg, temp_reg, markOopDesc::biased_lock_mask_in_place); |
aoqi@6880 | 713 | daddi(AT, R0, markOopDesc::biased_lock_pattern); |
aoqi@6880 | 714 | #else |
aoqi@6880 | 715 | lw(temp_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes())); |
aoqi@6880 | 716 | andi(temp_reg, temp_reg, markOopDesc::biased_lock_mask_in_place); |
aoqi@6880 | 717 | addi(AT, R0, markOopDesc::biased_lock_pattern); |
aoqi@6880 | 718 | #endif |
aoqi@6880 | 719 | |
aoqi@6880 | 720 | beq(AT, temp_reg, done); |
aoqi@6880 | 721 | delayed()->nop(); |
aoqi@6880 | 722 | } |
aoqi@6880 | 723 | |
aoqi@6880 | 724 | // NOTE: we dont increment the SP after call like the x86 version, maybe this is a problem, FIXME. |
aoqi@6880 | 725 | // the stack pointer adjustment is needed. see InterpreterMacroAssembler::super_call_VM_leaf |
aoqi@6880 | 726 | // this method will handle the stack problem, you need not to preserve the stack space for the argument now |
aoqi@6880 | 727 | void MacroAssembler::call_VM_leaf_base(address entry_point, |
aoqi@6880 | 728 | int number_of_arguments) { |
aoqi@6880 | 729 | //call(RuntimeAddress(entry_point)); |
aoqi@6880 | 730 | //increment(rsp, number_of_arguments * wordSize); |
aoqi@6880 | 731 | Label L, E; |
aoqi@6880 | 732 | |
aoqi@6880 | 733 | assert(number_of_arguments <= 4, "just check"); |
aoqi@6880 | 734 | |
aoqi@6880 | 735 | andi(AT, SP, 0xf); |
aoqi@6880 | 736 | beq(AT, R0, L); |
aoqi@6880 | 737 | delayed()->nop(); |
aoqi@6880 | 738 | daddi(SP, SP, -8); |
aoqi@6880 | 739 | call(entry_point, relocInfo::runtime_call_type); |
aoqi@6880 | 740 | delayed()->nop(); |
aoqi@6880 | 741 | daddi(SP, SP, 8); |
aoqi@6880 | 742 | b(E); |
aoqi@6880 | 743 | delayed()->nop(); |
aoqi@6880 | 744 | |
aoqi@6880 | 745 | bind(L); |
aoqi@6880 | 746 | call(entry_point, relocInfo::runtime_call_type); |
aoqi@6880 | 747 | delayed()->nop(); |
aoqi@6880 | 748 | bind(E); |
aoqi@6880 | 749 | } |
aoqi@6880 | 750 | |
aoqi@6880 | 751 | |
aoqi@6880 | 752 | void MacroAssembler::jmp(address entry) { |
aoqi@6880 | 753 | patchable_set48(T9, (long)entry); |
aoqi@6880 | 754 | jr(T9); |
aoqi@6880 | 755 | } |
aoqi@6880 | 756 | |
aoqi@6880 | 757 | void MacroAssembler::jmp(address entry, relocInfo::relocType rtype) { |
aoqi@6880 | 758 | switch (rtype) { |
aoqi@6880 | 759 | case relocInfo::runtime_call_type: |
aoqi@6880 | 760 | case relocInfo::none: |
aoqi@6880 | 761 | jmp(entry); |
aoqi@6880 | 762 | break; |
aoqi@6880 | 763 | default: |
aoqi@6880 | 764 | { |
aoqi@6880 | 765 | InstructionMark im(this); |
aoqi@6880 | 766 | relocate(rtype); |
aoqi@6880 | 767 | patchable_set48(T9, (long)entry); |
aoqi@6880 | 768 | jr(T9); |
aoqi@6880 | 769 | } |
aoqi@6880 | 770 | break; |
aoqi@6880 | 771 | } |
aoqi@6880 | 772 | } |
aoqi@6880 | 773 | |
aoqi@6880 | 774 | void MacroAssembler::call(address entry) { |
aoqi@6880 | 775 | // c/c++ code assume T9 is entry point, so we just always move entry to t9 |
aoqi@6880 | 776 | // maybe there is some more graceful method to handle this. FIXME |
aoqi@6880 | 777 | // For more info, see class NativeCall. |
aoqi@6880 | 778 | #ifndef _LP64 |
aoqi@6880 | 779 | move(T9, (int)entry); |
aoqi@6880 | 780 | #else |
aoqi@6880 | 781 | patchable_set48(T9, (long)entry); |
aoqi@6880 | 782 | #endif |
aoqi@6880 | 783 | jalr(T9); |
aoqi@6880 | 784 | } |
aoqi@6880 | 785 | |
aoqi@6880 | 786 | void MacroAssembler::call(address entry, relocInfo::relocType rtype) { |
aoqi@6880 | 787 | switch (rtype) { |
aoqi@6880 | 788 | case relocInfo::runtime_call_type: |
aoqi@6880 | 789 | case relocInfo::none: |
aoqi@6880 | 790 | call(entry); |
aoqi@6880 | 791 | break; |
aoqi@6880 | 792 | default: |
aoqi@6880 | 793 | { |
aoqi@6880 | 794 | InstructionMark im(this); |
aoqi@6880 | 795 | relocate(rtype); |
aoqi@6880 | 796 | call(entry); |
aoqi@6880 | 797 | } |
aoqi@6880 | 798 | break; |
aoqi@6880 | 799 | } |
aoqi@6880 | 800 | } |
aoqi@6880 | 801 | |
aoqi@6880 | 802 | void MacroAssembler::call(address entry, RelocationHolder& rh) |
aoqi@6880 | 803 | { |
aoqi@6880 | 804 | switch (rh.type()) { |
aoqi@6880 | 805 | case relocInfo::runtime_call_type: |
aoqi@6880 | 806 | case relocInfo::none: |
aoqi@6880 | 807 | call(entry); |
aoqi@6880 | 808 | break; |
aoqi@6880 | 809 | default: |
aoqi@6880 | 810 | { |
aoqi@6880 | 811 | InstructionMark im(this); |
aoqi@6880 | 812 | relocate(rh); |
aoqi@6880 | 813 | call(entry); |
aoqi@6880 | 814 | } |
aoqi@6880 | 815 | break; |
aoqi@6880 | 816 | } |
aoqi@6880 | 817 | } |
aoqi@6880 | 818 | |
aoqi@6880 | 819 | void MacroAssembler::ic_call(address entry) { |
aoqi@6880 | 820 | RelocationHolder rh = virtual_call_Relocation::spec(pc()); |
aoqi@6880 | 821 | patchable_set48(IC_Klass, (long)Universe::non_oop_word()); |
aoqi@6880 | 822 | assert(entry != NULL, "call most probably wrong"); |
aoqi@6880 | 823 | InstructionMark im(this); |
aoqi@6880 | 824 | relocate(rh); |
aoqi@6880 | 825 | patchable_call(entry); |
aoqi@6880 | 826 | } |
aoqi@6880 | 827 | |
aoqi@6880 | 828 | void MacroAssembler::c2bool(Register r) { |
aoqi@6880 | 829 | Label L; |
aoqi@6880 | 830 | Assembler::beq(r, R0, L); |
aoqi@6880 | 831 | delayed()->nop(); |
aoqi@6880 | 832 | move(r, 1); |
aoqi@6880 | 833 | bind(L); |
aoqi@6880 | 834 | } |
aoqi@6880 | 835 | |
aoqi@6880 | 836 | #ifndef PRODUCT |
aoqi@6880 | 837 | extern "C" void findpc(intptr_t x); |
aoqi@6880 | 838 | #endif |
aoqi@6880 | 839 | |
aoqi@6880 | 840 | void MacroAssembler::debug32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip, char* msg) { |
aoqi@6880 | 841 | // In order to get locks to work, we need to fake a in_VM state |
aoqi@6880 | 842 | JavaThread* thread = JavaThread::current(); |
aoqi@6880 | 843 | JavaThreadState saved_state = thread->thread_state(); |
aoqi@6880 | 844 | thread->set_thread_state(_thread_in_vm); |
aoqi@6880 | 845 | if (ShowMessageBoxOnError) { |
aoqi@6880 | 846 | JavaThread* thread = JavaThread::current(); |
aoqi@6880 | 847 | JavaThreadState saved_state = thread->thread_state(); |
aoqi@6880 | 848 | thread->set_thread_state(_thread_in_vm); |
aoqi@6880 | 849 | if (CountBytecodes || TraceBytecodes || StopInterpreterAt) { |
aoqi@6880 | 850 | ttyLocker ttyl; |
aoqi@6880 | 851 | BytecodeCounter::print(); |
aoqi@6880 | 852 | } |
aoqi@6880 | 853 | // To see where a verify_oop failed, get $ebx+40/X for this frame. |
aoqi@6880 | 854 | // This is the value of eip which points to where verify_oop will return. |
aoqi@6880 | 855 | if (os::message_box(msg, "Execution stopped, print registers?")) { |
aoqi@6880 | 856 | ttyLocker ttyl; |
aoqi@6880 | 857 | tty->print_cr("eip = 0x%08x", eip); |
aoqi@6880 | 858 | #ifndef PRODUCT |
aoqi@6880 | 859 | tty->cr(); |
aoqi@6880 | 860 | findpc(eip); |
aoqi@6880 | 861 | tty->cr(); |
aoqi@6880 | 862 | #endif |
aoqi@6880 | 863 | tty->print_cr("rax, = 0x%08x", rax); |
aoqi@6880 | 864 | tty->print_cr("rbx, = 0x%08x", rbx); |
aoqi@6880 | 865 | tty->print_cr("rcx = 0x%08x", rcx); |
aoqi@6880 | 866 | tty->print_cr("rdx = 0x%08x", rdx); |
aoqi@6880 | 867 | tty->print_cr("rdi = 0x%08x", rdi); |
aoqi@6880 | 868 | tty->print_cr("rsi = 0x%08x", rsi); |
aoqi@6880 | 869 | tty->print_cr("rbp, = 0x%08x", rbp); |
aoqi@6880 | 870 | tty->print_cr("rsp = 0x%08x", rsp); |
aoqi@6880 | 871 | BREAKPOINT; |
aoqi@6880 | 872 | } |
aoqi@6880 | 873 | } else { |
aoqi@6880 | 874 | ttyLocker ttyl; |
aoqi@6880 | 875 | ::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n", msg); |
aoqi@6880 | 876 | assert(false, "DEBUG MESSAGE"); |
aoqi@6880 | 877 | } |
aoqi@6880 | 878 | ThreadStateTransition::transition(thread, _thread_in_vm, saved_state); |
aoqi@6880 | 879 | } |
aoqi@6880 | 880 | |
aoqi@6880 | 881 | void MacroAssembler::debug(char* msg/*, RegistersForDebugging* regs*/) { |
aoqi@6880 | 882 | if ( ShowMessageBoxOnError ) { |
aoqi@6880 | 883 | JavaThreadState saved_state = JavaThread::current()->thread_state(); |
aoqi@6880 | 884 | JavaThread::current()->set_thread_state(_thread_in_vm); |
aoqi@6880 | 885 | { |
aoqi@6880 | 886 | // In order to get locks work, we need to fake a in_VM state |
aoqi@6880 | 887 | ttyLocker ttyl; |
aoqi@6880 | 888 | ::tty->print_cr("EXECUTION STOPPED: %s\n", msg); |
aoqi@6880 | 889 | if (CountBytecodes || TraceBytecodes || StopInterpreterAt) { |
aoqi@6880 | 890 | BytecodeCounter::print(); |
aoqi@6880 | 891 | } |
aoqi@6880 | 892 | |
aoqi@6880 | 893 | // if (os::message_box(msg, "Execution stopped, print registers?")) |
aoqi@6880 | 894 | // regs->print(::tty); |
aoqi@6880 | 895 | } |
aoqi@6880 | 896 | ThreadStateTransition::transition(JavaThread::current(), _thread_in_vm, saved_state); |
aoqi@6880 | 897 | } |
aoqi@6880 | 898 | else |
aoqi@6880 | 899 | ::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n", msg); |
aoqi@6880 | 900 | } |
aoqi@6880 | 901 | |
aoqi@6880 | 902 | |
aoqi@6880 | 903 | void MacroAssembler::stop(const char* msg) { |
aoqi@6880 | 904 | li(A0, (long)msg); |
aoqi@6880 | 905 | #ifndef _LP64 |
aoqi@6880 | 906 | //reserver space for argument. added by yjl 7/10/2005 |
aoqi@6880 | 907 | addiu(SP, SP, - 1 * wordSize); |
aoqi@6880 | 908 | #endif |
aoqi@6880 | 909 | call(CAST_FROM_FN_PTR(address, MacroAssembler::debug), relocInfo::runtime_call_type); |
aoqi@6880 | 910 | delayed()->nop(); |
aoqi@6880 | 911 | #ifndef _LP64 |
aoqi@6880 | 912 | //restore space for argument |
aoqi@6880 | 913 | addiu(SP, SP, 1 * wordSize); |
aoqi@6880 | 914 | #endif |
aoqi@6880 | 915 | brk(17); |
aoqi@6880 | 916 | } |
aoqi@6880 | 917 | |
aoqi@6880 | 918 | void MacroAssembler::warn(const char* msg) { |
aoqi@6880 | 919 | #ifdef _LP64 |
aoqi@6880 | 920 | pushad(); |
aoqi@6880 | 921 | li(A0, (long)msg); |
aoqi@6880 | 922 | push(S2); |
aoqi@6880 | 923 | move(AT, -(StackAlignmentInBytes)); |
aoqi@6880 | 924 | move(S2, SP); // use S2 as a sender SP holder |
aoqi@6880 | 925 | andr(SP, SP, AT); // align stack as required by ABI |
aoqi@6880 | 926 | call(CAST_FROM_FN_PTR(address, MacroAssembler::debug), relocInfo::runtime_call_type); |
aoqi@6880 | 927 | delayed()->nop(); |
aoqi@6880 | 928 | move(SP, S2); // use S2 as a sender SP holder |
aoqi@6880 | 929 | pop(S2); |
aoqi@6880 | 930 | popad(); |
aoqi@6880 | 931 | #else |
aoqi@6880 | 932 | pushad(); |
aoqi@6880 | 933 | addi(SP, SP, -4); |
aoqi@6880 | 934 | sw(A0, SP, -1 * wordSize); |
aoqi@6880 | 935 | li(A0, (long)msg); |
aoqi@6880 | 936 | addi(SP, SP, -1 * wordSize); |
aoqi@6880 | 937 | call(CAST_FROM_FN_PTR(address, MacroAssembler::debug), relocInfo::runtime_call_type); |
aoqi@6880 | 938 | delayed()->nop(); |
aoqi@6880 | 939 | addi(SP, SP, 1 * wordSize); |
aoqi@6880 | 940 | lw(A0, SP, -1 * wordSize); |
aoqi@6880 | 941 | addi(SP, SP, 4); |
aoqi@6880 | 942 | popad(); |
aoqi@6880 | 943 | #endif |
aoqi@6880 | 944 | } |
aoqi@6880 | 945 | |
aoqi@6880 | 946 | void MacroAssembler::print_reg(Register reg) { |
aoqi@6880 | 947 | /* |
aoqi@6880 | 948 | char *s = getenv("PRINT_REG"); |
aoqi@6880 | 949 | if (s == NULL) |
aoqi@6880 | 950 | return; |
aoqi@6880 | 951 | if (strcmp(s, "1") != 0) |
aoqi@6880 | 952 | return; |
aoqi@6880 | 953 | */ |
aoqi@6880 | 954 | void * cur_pc = pc(); |
aoqi@6880 | 955 | pushad(); |
aoqi@6880 | 956 | NOT_LP64(push(FP);) |
aoqi@6880 | 957 | |
aoqi@6880 | 958 | li(A0, (long)reg->name()); |
aoqi@6880 | 959 | if (reg == SP) |
aoqi@6880 | 960 | addiu(A1, SP, wordSize * 23); //23 registers saved in pushad() |
aoqi@6880 | 961 | else if (reg == A0) |
aoqi@6880 | 962 | ld(A1, SP, wordSize * 19); //A0 has been modified by li(A0, (long)reg->name()). Ugly Code! |
aoqi@6880 | 963 | else |
aoqi@6880 | 964 | move(A1, reg); |
aoqi@6880 | 965 | li(A2, (long)cur_pc); |
aoqi@6880 | 966 | push(S2); |
aoqi@6880 | 967 | move(AT, -(StackAlignmentInBytes)); |
aoqi@6880 | 968 | move(S2, SP); // use S2 as a sender SP holder |
aoqi@6880 | 969 | andr(SP, SP, AT); // align stack as required by ABI |
aoqi@6880 | 970 | call(CAST_FROM_FN_PTR(address, SharedRuntime::print_reg_with_pc),relocInfo::runtime_call_type); |
aoqi@6880 | 971 | delayed()->nop(); |
aoqi@6880 | 972 | move(SP, S2); // use S2 as a sender SP holder |
aoqi@6880 | 973 | pop(S2); |
aoqi@6880 | 974 | NOT_LP64(pop(FP);) |
aoqi@6880 | 975 | popad(); |
aoqi@6880 | 976 | |
aoqi@6880 | 977 | /* |
aoqi@6880 | 978 | pushad(); |
aoqi@6880 | 979 | #ifdef _LP64 |
aoqi@6880 | 980 | if (reg == SP) |
aoqi@6880 | 981 | addiu(A0, SP, wordSize * 23); //23 registers saved in pushad() |
aoqi@6880 | 982 | else |
aoqi@6880 | 983 | move(A0, reg); |
aoqi@6880 | 984 | call(CAST_FROM_FN_PTR(address, SharedRuntime::print_long),relocInfo::runtime_call_type); |
aoqi@6880 | 985 | delayed()->nop(); |
aoqi@6880 | 986 | #else |
aoqi@6880 | 987 | push(FP); |
aoqi@6880 | 988 | move(A0, reg); |
aoqi@6880 | 989 | dsrl32(A1, reg, 0); |
aoqi@6880 | 990 | //call(CAST_FROM_FN_PTR(address, SharedRuntime::print_int),relocInfo::runtime_call_type); |
aoqi@6880 | 991 | call(CAST_FROM_FN_PTR(address, SharedRuntime::print_long),relocInfo::runtime_call_type); |
aoqi@6880 | 992 | delayed()->nop(); |
aoqi@6880 | 993 | pop(FP); |
aoqi@6880 | 994 | #endif |
aoqi@6880 | 995 | popad(); |
aoqi@6880 | 996 | pushad(); |
aoqi@6880 | 997 | NOT_LP64(push(FP);) |
aoqi@6880 | 998 | char b[50]; |
aoqi@6880 | 999 | sprintf((char *)b, " pc: %p\n",cur_pc); |
aoqi@6880 | 1000 | li(A0, (long)(char *)b); |
aoqi@6880 | 1001 | call(CAST_FROM_FN_PTR(address, SharedRuntime::print_str),relocInfo::runtime_call_type); |
aoqi@6880 | 1002 | delayed()->nop(); |
aoqi@6880 | 1003 | NOT_LP64(pop(FP);) |
aoqi@6880 | 1004 | popad(); |
aoqi@6880 | 1005 | */ |
aoqi@6880 | 1006 | } |
aoqi@6880 | 1007 | |
aoqi@6880 | 1008 | void MacroAssembler::print_reg(FloatRegister reg) { |
aoqi@6880 | 1009 | void * cur_pc = pc(); |
aoqi@6880 | 1010 | pushad(); |
aoqi@6880 | 1011 | NOT_LP64(push(FP);) |
aoqi@6880 | 1012 | li(A0, (long)reg->name()); |
aoqi@6880 | 1013 | push(S2); |
aoqi@6880 | 1014 | move(AT, -(StackAlignmentInBytes)); |
aoqi@6880 | 1015 | move(S2, SP); // use S2 as a sender SP holder |
aoqi@6880 | 1016 | andr(SP, SP, AT); // align stack as required by ABI |
aoqi@6880 | 1017 | call(CAST_FROM_FN_PTR(address, SharedRuntime::print_str),relocInfo::runtime_call_type); |
aoqi@6880 | 1018 | delayed()->nop(); |
aoqi@6880 | 1019 | move(SP, S2); // use S2 as a sender SP holder |
aoqi@6880 | 1020 | pop(S2); |
aoqi@6880 | 1021 | NOT_LP64(pop(FP);) |
aoqi@6880 | 1022 | popad(); |
aoqi@6880 | 1023 | |
aoqi@6880 | 1024 | pushad(); |
aoqi@6880 | 1025 | NOT_LP64(push(FP);) |
aoqi@6880 | 1026 | #if 1 |
aoqi@6880 | 1027 | move(FP, SP); |
aoqi@6880 | 1028 | move(AT, -(StackAlignmentInBytes)); |
aoqi@6880 | 1029 | andr(SP , SP , AT); |
aoqi@6880 | 1030 | mov_d(F12, reg); |
aoqi@6880 | 1031 | call(CAST_FROM_FN_PTR(address, SharedRuntime::print_double),relocInfo::runtime_call_type); |
aoqi@6880 | 1032 | delayed()->nop(); |
aoqi@6880 | 1033 | move(SP, FP); |
aoqi@6880 | 1034 | #else |
aoqi@6880 | 1035 | mov_s(F12, reg); |
aoqi@6880 | 1036 | //call(CAST_FROM_FN_PTR(address, SharedRuntime::print_float),relocInfo::runtime_call_type); |
aoqi@6880 | 1037 | //delayed()->nop(); |
aoqi@6880 | 1038 | #endif |
aoqi@6880 | 1039 | NOT_LP64(pop(FP);) |
aoqi@6880 | 1040 | popad(); |
aoqi@6880 | 1041 | |
aoqi@6880 | 1042 | #if 0 |
aoqi@6880 | 1043 | pushad(); |
aoqi@6880 | 1044 | NOT_LP64(push(FP);) |
aoqi@6880 | 1045 | char* b = new char[50]; |
aoqi@6880 | 1046 | sprintf(b, " pc: %p\n", cur_pc); |
aoqi@6880 | 1047 | li(A0, (long)b); |
aoqi@6880 | 1048 | call(CAST_FROM_FN_PTR(address, SharedRuntime::print_str),relocInfo::runtime_call_type); |
aoqi@6880 | 1049 | delayed()->nop(); |
aoqi@6880 | 1050 | NOT_LP64(pop(FP);) |
aoqi@6880 | 1051 | popad(); |
aoqi@6880 | 1052 | #endif |
aoqi@6880 | 1053 | } |
aoqi@6880 | 1054 | |
aoqi@6880 | 1055 | void MacroAssembler::increment(Register reg, int imm) { |
aoqi@6880 | 1056 | if (!imm) return; |
aoqi@6880 | 1057 | if (is_simm16(imm)) { |
aoqi@6880 | 1058 | #ifdef _LP64 |
aoqi@6880 | 1059 | daddiu(reg, reg, imm); |
aoqi@6880 | 1060 | #else |
aoqi@6880 | 1061 | addiu(reg, reg, imm); |
aoqi@6880 | 1062 | #endif |
aoqi@6880 | 1063 | } else { |
aoqi@6880 | 1064 | move(AT, imm); |
aoqi@6880 | 1065 | #ifdef _LP64 |
aoqi@6880 | 1066 | daddu(reg, reg, AT); |
aoqi@6880 | 1067 | #else |
aoqi@6880 | 1068 | addu(reg, reg, AT); |
aoqi@6880 | 1069 | #endif |
aoqi@6880 | 1070 | } |
aoqi@6880 | 1071 | } |
aoqi@6880 | 1072 | |
aoqi@6880 | 1073 | void MacroAssembler::decrement(Register reg, int imm) { |
aoqi@6880 | 1074 | increment(reg, -imm); |
aoqi@6880 | 1075 | } |
aoqi@6880 | 1076 | |
aoqi@6880 | 1077 | |
aoqi@6880 | 1078 | void MacroAssembler::call_VM(Register oop_result, |
aoqi@6880 | 1079 | address entry_point, |
aoqi@6880 | 1080 | bool check_exceptions) { |
aoqi@6880 | 1081 | call_VM_helper(oop_result, entry_point, 0, check_exceptions); |
aoqi@6880 | 1082 | } |
aoqi@6880 | 1083 | |
aoqi@6880 | 1084 | void MacroAssembler::call_VM(Register oop_result, |
aoqi@6880 | 1085 | address entry_point, |
aoqi@6880 | 1086 | Register arg_1, |
aoqi@6880 | 1087 | bool check_exceptions) { |
aoqi@6880 | 1088 | if (arg_1!=A1) move(A1, arg_1); |
aoqi@6880 | 1089 | call_VM_helper(oop_result, entry_point, 1, check_exceptions); |
aoqi@6880 | 1090 | } |
aoqi@6880 | 1091 | |
aoqi@6880 | 1092 | void MacroAssembler::call_VM(Register oop_result, |
aoqi@6880 | 1093 | address entry_point, |
aoqi@6880 | 1094 | Register arg_1, |
aoqi@6880 | 1095 | Register arg_2, |
aoqi@6880 | 1096 | bool check_exceptions) { |
aoqi@6880 | 1097 | if (arg_1!=A1) move(A1, arg_1); |
aoqi@6880 | 1098 | if (arg_2!=A2) move(A2, arg_2); |
aoqi@6880 | 1099 | assert(arg_2 != A1, "smashed argument"); |
aoqi@6880 | 1100 | call_VM_helper(oop_result, entry_point, 2, check_exceptions); |
aoqi@6880 | 1101 | } |
aoqi@6880 | 1102 | |
aoqi@6880 | 1103 | void MacroAssembler::call_VM(Register oop_result, |
aoqi@6880 | 1104 | address entry_point, |
aoqi@6880 | 1105 | Register arg_1, |
aoqi@6880 | 1106 | Register arg_2, |
aoqi@6880 | 1107 | Register arg_3, |
aoqi@6880 | 1108 | bool check_exceptions) { |
aoqi@6880 | 1109 | if (arg_1!=A1) move(A1, arg_1); |
aoqi@6880 | 1110 | if (arg_2!=A2) move(A2, arg_2); assert(arg_2 != A1, "smashed argument"); |
aoqi@6880 | 1111 | if (arg_3!=A3) move(A3, arg_3); assert(arg_3 != A1 && arg_3 != A2, "smashed argument"); |
aoqi@6880 | 1112 | call_VM_helper(oop_result, entry_point, 3, check_exceptions); |
aoqi@6880 | 1113 | } |
aoqi@6880 | 1114 | |
aoqi@6880 | 1115 | void MacroAssembler::call_VM(Register oop_result, |
aoqi@6880 | 1116 | Register last_java_sp, |
aoqi@6880 | 1117 | address entry_point, |
aoqi@6880 | 1118 | int number_of_arguments, |
aoqi@6880 | 1119 | bool check_exceptions) { |
aoqi@6880 | 1120 | call_VM_base(oop_result, NOREG, last_java_sp, entry_point, number_of_arguments, check_exceptions); |
aoqi@6880 | 1121 | } |
aoqi@6880 | 1122 | |
aoqi@6880 | 1123 | void MacroAssembler::call_VM(Register oop_result, |
aoqi@6880 | 1124 | Register last_java_sp, |
aoqi@6880 | 1125 | address entry_point, |
aoqi@6880 | 1126 | Register arg_1, |
aoqi@6880 | 1127 | bool check_exceptions) { |
aoqi@6880 | 1128 | if (arg_1 != A1) move(A1, arg_1); |
aoqi@6880 | 1129 | call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions); |
aoqi@6880 | 1130 | } |
aoqi@6880 | 1131 | |
aoqi@6880 | 1132 | void MacroAssembler::call_VM(Register oop_result, |
aoqi@6880 | 1133 | Register last_java_sp, |
aoqi@6880 | 1134 | address entry_point, |
aoqi@6880 | 1135 | Register arg_1, |
aoqi@6880 | 1136 | Register arg_2, |
aoqi@6880 | 1137 | bool check_exceptions) { |
aoqi@6880 | 1138 | if (arg_1 != A1) move(A1, arg_1); |
aoqi@6880 | 1139 | if (arg_2 != A2) move(A2, arg_2); assert(arg_2 != A1, "smashed argument"); |
aoqi@6880 | 1140 | call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions); |
aoqi@6880 | 1141 | } |
aoqi@6880 | 1142 | |
aoqi@6880 | 1143 | void MacroAssembler::call_VM(Register oop_result, |
aoqi@6880 | 1144 | Register last_java_sp, |
aoqi@6880 | 1145 | address entry_point, |
aoqi@6880 | 1146 | Register arg_1, |
aoqi@6880 | 1147 | Register arg_2, |
aoqi@6880 | 1148 | Register arg_3, |
aoqi@6880 | 1149 | bool check_exceptions) { |
aoqi@6880 | 1150 | if (arg_1 != A1) move(A1, arg_1); |
aoqi@6880 | 1151 | if (arg_2 != A2) move(A2, arg_2); assert(arg_2 != A1, "smashed argument"); |
aoqi@6880 | 1152 | if (arg_3 != A3) move(A3, arg_3); assert(arg_3 != A1 && arg_3 != A2, "smashed argument"); |
aoqi@6880 | 1153 | call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions); |
aoqi@6880 | 1154 | } |
aoqi@6880 | 1155 | |
aoqi@6880 | 1156 | void MacroAssembler::call_VM_base(Register oop_result, |
aoqi@6880 | 1157 | Register java_thread, |
aoqi@6880 | 1158 | Register last_java_sp, |
aoqi@6880 | 1159 | address entry_point, |
aoqi@6880 | 1160 | int number_of_arguments, |
aoqi@6880 | 1161 | bool check_exceptions) { |
aoqi@6880 | 1162 | |
aoqi@6880 | 1163 | address before_call_pc; |
aoqi@6880 | 1164 | // determine java_thread register |
aoqi@6880 | 1165 | if (!java_thread->is_valid()) { |
aoqi@6880 | 1166 | #ifndef OPT_THREAD |
aoqi@6880 | 1167 | java_thread = T2; |
aoqi@6880 | 1168 | get_thread(java_thread); |
aoqi@6880 | 1169 | #else |
aoqi@6880 | 1170 | java_thread = TREG; |
aoqi@6880 | 1171 | #endif |
aoqi@6880 | 1172 | } |
aoqi@6880 | 1173 | // determine last_java_sp register |
aoqi@6880 | 1174 | if (!last_java_sp->is_valid()) { |
aoqi@6880 | 1175 | last_java_sp = SP; |
aoqi@6880 | 1176 | } |
aoqi@6880 | 1177 | // debugging support |
aoqi@6880 | 1178 | assert(number_of_arguments >= 0 , "cannot have negative number of arguments"); |
aoqi@6880 | 1179 | assert(number_of_arguments <= 4 , "cannot have negative number of arguments"); |
aoqi@6880 | 1180 | assert(java_thread != oop_result , "cannot use the same register for java_thread & oop_result"); |
aoqi@6880 | 1181 | assert(java_thread != last_java_sp, "cannot use the same register for java_thread & last_java_sp"); |
aoqi@6880 | 1182 | |
aoqi@6880 | 1183 | assert(last_java_sp != FP, "this code doesn't work for last_java_sp == fp, which currently can't portably work anyway since C2 doesn't save ebp"); |
aoqi@6880 | 1184 | |
aoqi@6880 | 1185 | // set last Java frame before call |
aoqi@6880 | 1186 | before_call_pc = (address)pc(); |
aoqi@6880 | 1187 | set_last_Java_frame(java_thread, last_java_sp, FP, before_call_pc); |
aoqi@6880 | 1188 | |
aoqi@6880 | 1189 | // do the call |
aoqi@6880 | 1190 | move(A0, java_thread); |
aoqi@6880 | 1191 | call(entry_point, relocInfo::runtime_call_type); |
aoqi@6880 | 1192 | delayed()->nop(); |
aoqi@6880 | 1193 | |
aoqi@6880 | 1194 | // restore the thread (cannot use the pushed argument since arguments |
aoqi@6880 | 1195 | // may be overwritten by C code generated by an optimizing compiler); |
aoqi@6880 | 1196 | // however can use the register value directly if it is callee saved. |
aoqi@6880 | 1197 | #ifndef OPT_THREAD |
wangxue@7995 | 1198 | get_thread(java_thread); |
wangxue@7995 | 1199 | #else |
aoqi@6880 | 1200 | #ifdef ASSERT |
aoqi@7997 | 1201 | { |
wangxue@7995 | 1202 | Label L; |
wangxue@7995 | 1203 | get_thread(AT); |
wangxue@7995 | 1204 | beq(java_thread, AT, L); |
wangxue@7995 | 1205 | delayed()->nop(); |
wangxue@7995 | 1206 | stop("MacroAssembler::call_VM_base: edi not callee saved?"); |
wangxue@7995 | 1207 | bind(L); |
wangxue@7995 | 1208 | } |
aoqi@6880 | 1209 | #endif |
aoqi@6880 | 1210 | #endif |
aoqi@6880 | 1211 | |
aoqi@6880 | 1212 | // discard thread and arguments |
aoqi@6880 | 1213 | ld_ptr(SP, java_thread, in_bytes(JavaThread::last_Java_sp_offset())); |
aoqi@6880 | 1214 | // reset last Java frame |
aoqi@6880 | 1215 | reset_last_Java_frame(java_thread, false, true); |
aoqi@6880 | 1216 | |
aoqi@6880 | 1217 | check_and_handle_popframe(java_thread); |
aoqi@6880 | 1218 | check_and_handle_earlyret(java_thread); |
aoqi@6880 | 1219 | if (check_exceptions) { |
aoqi@6880 | 1220 | // check for pending exceptions (java_thread is set upon return) |
aoqi@6880 | 1221 | Label L; |
aoqi@6880 | 1222 | #ifdef _LP64 |
aoqi@6880 | 1223 | ld(AT, java_thread, in_bytes(Thread::pending_exception_offset())); |
aoqi@6880 | 1224 | #else |
aoqi@6880 | 1225 | lw(AT, java_thread, in_bytes(Thread::pending_exception_offset())); |
aoqi@6880 | 1226 | #endif |
aoqi@6880 | 1227 | beq(AT, R0, L); |
aoqi@6880 | 1228 | delayed()->nop(); |
aoqi@6880 | 1229 | li(AT, before_call_pc); |
aoqi@6880 | 1230 | push(AT); |
aoqi@6880 | 1231 | jmp(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type); |
aoqi@6880 | 1232 | delayed()->nop(); |
aoqi@6880 | 1233 | bind(L); |
aoqi@6880 | 1234 | } |
aoqi@6880 | 1235 | |
aoqi@6880 | 1236 | // get oop result if there is one and reset the value in the thread |
aoqi@6880 | 1237 | if (oop_result->is_valid()) { |
aoqi@6880 | 1238 | #ifdef _LP64 |
aoqi@6880 | 1239 | ld(oop_result, java_thread, in_bytes(JavaThread::vm_result_offset())); |
aoqi@6880 | 1240 | sd(R0, java_thread, in_bytes(JavaThread::vm_result_offset())); |
aoqi@6880 | 1241 | #else |
aoqi@6880 | 1242 | lw(oop_result, java_thread, in_bytes(JavaThread::vm_result_offset())); |
aoqi@6880 | 1243 | sw(R0, java_thread, in_bytes(JavaThread::vm_result_offset())); |
aoqi@6880 | 1244 | #endif |
aoqi@6880 | 1245 | verify_oop(oop_result); |
aoqi@6880 | 1246 | } |
aoqi@6880 | 1247 | } |
aoqi@6880 | 1248 | |
aoqi@6880 | 1249 | void MacroAssembler::call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) { |
aoqi@6880 | 1250 | |
aoqi@6880 | 1251 | move(V0, SP); |
aoqi@6880 | 1252 | //we also reserve space for java_thread here |
aoqi@6880 | 1253 | #ifndef _LP64 |
aoqi@6880 | 1254 | daddi(SP, SP, (1 + number_of_arguments) * (- wordSize)); |
aoqi@6880 | 1255 | #endif |
aoqi@6880 | 1256 | move(AT, -(StackAlignmentInBytes)); |
aoqi@6880 | 1257 | andr(SP, SP, AT); |
aoqi@6880 | 1258 | call_VM_base(oop_result, NOREG, V0, entry_point, number_of_arguments, check_exceptions); |
aoqi@6880 | 1259 | |
aoqi@6880 | 1260 | } |
aoqi@6880 | 1261 | |
aoqi@6880 | 1262 | void MacroAssembler::call_VM_leaf(address entry_point, int number_of_arguments) { |
aoqi@6880 | 1263 | call_VM_leaf_base(entry_point, number_of_arguments); |
aoqi@6880 | 1264 | } |
aoqi@6880 | 1265 | |
aoqi@6880 | 1266 | void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0) { |
aoqi@6880 | 1267 | if (arg_0 != A0) move(A0, arg_0); |
aoqi@6880 | 1268 | call_VM_leaf(entry_point, 1); |
aoqi@6880 | 1269 | } |
aoqi@6880 | 1270 | |
aoqi@6880 | 1271 | void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1) { |
aoqi@6880 | 1272 | if (arg_0 != A0) move(A0, arg_0); |
aoqi@6880 | 1273 | if (arg_1 != A1) move(A1, arg_1); assert(arg_1 != A0, "smashed argument"); |
aoqi@6880 | 1274 | call_VM_leaf(entry_point, 2); |
aoqi@6880 | 1275 | } |
aoqi@6880 | 1276 | |
aoqi@6880 | 1277 | void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) { |
aoqi@6880 | 1278 | if (arg_0 != A0) move(A0, arg_0); |
aoqi@6880 | 1279 | if (arg_1 != A1) move(A1, arg_1); assert(arg_1 != A0, "smashed argument"); |
aoqi@6880 | 1280 | if (arg_2 != A2) move(A2, arg_2); assert(arg_2 != A0 && arg_2 != A1, "smashed argument"); |
aoqi@6880 | 1281 | call_VM_leaf(entry_point, 3); |
aoqi@6880 | 1282 | } |
aoqi@6880 | 1283 | void MacroAssembler::super_call_VM_leaf(address entry_point) { |
aoqi@6880 | 1284 | MacroAssembler::call_VM_leaf_base(entry_point, 0); |
aoqi@6880 | 1285 | } |
aoqi@6880 | 1286 | |
aoqi@6880 | 1287 | |
aoqi@6880 | 1288 | void MacroAssembler::super_call_VM_leaf(address entry_point, |
aoqi@6880 | 1289 | Register arg_1) { |
aoqi@6880 | 1290 | if (arg_1 != A0) move(A0, arg_1); |
aoqi@6880 | 1291 | MacroAssembler::call_VM_leaf_base(entry_point, 1); |
aoqi@6880 | 1292 | } |
aoqi@6880 | 1293 | |
aoqi@6880 | 1294 | |
aoqi@6880 | 1295 | void MacroAssembler::super_call_VM_leaf(address entry_point, |
aoqi@6880 | 1296 | Register arg_1, |
aoqi@6880 | 1297 | Register arg_2) { |
aoqi@6880 | 1298 | if (arg_1 != A0) move(A0, arg_1); |
aoqi@6880 | 1299 | if (arg_2 != A1) move(A1, arg_2); assert(arg_2 != A0, "smashed argument"); |
aoqi@6880 | 1300 | MacroAssembler::call_VM_leaf_base(entry_point, 2); |
aoqi@6880 | 1301 | } |
aoqi@6880 | 1302 | void MacroAssembler::super_call_VM_leaf(address entry_point, |
aoqi@6880 | 1303 | Register arg_1, |
aoqi@6880 | 1304 | Register arg_2, |
aoqi@6880 | 1305 | Register arg_3) { |
aoqi@6880 | 1306 | if (arg_1 != A0) move(A0, arg_1); |
aoqi@6880 | 1307 | if (arg_2 != A1) move(A1, arg_2); assert(arg_2 != A0, "smashed argument"); |
aoqi@6880 | 1308 | if (arg_3 != A2) move(A2, arg_3); assert(arg_3 != A0 && arg_3 != A1, "smashed argument"); |
aoqi@6880 | 1309 | MacroAssembler::call_VM_leaf_base(entry_point, 3); |
aoqi@6880 | 1310 | } |
aoqi@6880 | 1311 | |
aoqi@6880 | 1312 | void MacroAssembler::check_and_handle_earlyret(Register java_thread) { |
aoqi@6880 | 1313 | } |
aoqi@6880 | 1314 | |
aoqi@6880 | 1315 | void MacroAssembler::check_and_handle_popframe(Register java_thread) { |
aoqi@6880 | 1316 | } |
aoqi@6880 | 1317 | |
aoqi@6880 | 1318 | void MacroAssembler::null_check(Register reg, int offset) { |
aoqi@6880 | 1319 | if (needs_explicit_null_check(offset)) { |
aoqi@6880 | 1320 | // provoke OS NULL exception if reg = NULL by |
aoqi@6880 | 1321 | // accessing M[reg] w/o changing any (non-CC) registers |
aoqi@6880 | 1322 | // NOTE: cmpl is plenty here to provoke a segv |
aoqi@6880 | 1323 | lw(AT, reg, 0); |
aoqi@6880 | 1324 | // Note: should probably use testl(rax, Address(reg, 0)); |
aoqi@6880 | 1325 | // may be shorter code (however, this version of |
aoqi@6880 | 1326 | // testl needs to be implemented first) |
aoqi@6880 | 1327 | } else { |
aoqi@6880 | 1328 | // nothing to do, (later) access of M[reg + offset] |
aoqi@6880 | 1329 | // will provoke OS NULL exception if reg = NULL |
aoqi@6880 | 1330 | } |
aoqi@6880 | 1331 | } |
aoqi@6880 | 1332 | |
aoqi@6880 | 1333 | void MacroAssembler::enter() { |
aoqi@6880 | 1334 | push2(RA, FP); |
aoqi@6880 | 1335 | move(FP, SP); |
aoqi@6880 | 1336 | } |
aoqi@6880 | 1337 | |
aoqi@6880 | 1338 | void MacroAssembler::leave() { |
aoqi@6880 | 1339 | #ifndef _LP64 |
aoqi@6880 | 1340 | //move(SP, FP); |
aoqi@6880 | 1341 | //pop2(FP, RA); |
aoqi@6880 | 1342 | addi(SP, FP, 2 * wordSize); |
aoqi@6880 | 1343 | lw(RA, SP, - 1 * wordSize); |
aoqi@6880 | 1344 | lw(FP, SP, - 2 * wordSize); |
aoqi@6880 | 1345 | #else |
aoqi@6880 | 1346 | daddi(SP, FP, 2 * wordSize); |
aoqi@6880 | 1347 | ld(RA, SP, - 1 * wordSize); |
aoqi@6880 | 1348 | ld(FP, SP, - 2 * wordSize); |
aoqi@6880 | 1349 | #endif |
aoqi@6880 | 1350 | } |
aoqi@6880 | 1351 | /* |
aoqi@6880 | 1352 | void MacroAssembler::os_breakpoint() { |
aoqi@6880 | 1353 | // instead of directly emitting a breakpoint, call os:breakpoint for better debugability |
aoqi@6880 | 1354 | // (e.g., MSVC can't call ps() otherwise) |
aoqi@6880 | 1355 | call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint))); |
aoqi@6880 | 1356 | } |
aoqi@6880 | 1357 | */ |
aoqi@6880 | 1358 | void MacroAssembler::reset_last_Java_frame(Register java_thread, bool clear_fp, bool clear_pc) { |
aoqi@6880 | 1359 | // determine java_thread register |
aoqi@6880 | 1360 | if (!java_thread->is_valid()) { |
aoqi@6880 | 1361 | #ifndef OPT_THREAD |
aoqi@6880 | 1362 | java_thread = T1; |
aoqi@6880 | 1363 | get_thread(java_thread); |
aoqi@6880 | 1364 | #else |
aoqi@6880 | 1365 | java_thread = TREG; |
aoqi@6880 | 1366 | #endif |
aoqi@6880 | 1367 | } |
aoqi@6880 | 1368 | // we must set sp to zero to clear frame |
aoqi@6880 | 1369 | st_ptr(R0, java_thread, in_bytes(JavaThread::last_Java_sp_offset())); |
aoqi@6880 | 1370 | // must clear fp, so that compiled frames are not confused; it is possible |
aoqi@6880 | 1371 | // that we need it only for debugging |
aoqi@6880 | 1372 | if(clear_fp) |
aoqi@6880 | 1373 | st_ptr(R0, java_thread, in_bytes(JavaThread::last_Java_fp_offset())); |
aoqi@6880 | 1374 | |
aoqi@6880 | 1375 | if (clear_pc) |
aoqi@6880 | 1376 | st_ptr(R0, java_thread, in_bytes(JavaThread::last_Java_pc_offset())); |
aoqi@6880 | 1377 | } |
aoqi@6880 | 1378 | |
aoqi@6880 | 1379 | void MacroAssembler::reset_last_Java_frame(bool clear_fp, |
aoqi@6880 | 1380 | bool clear_pc) { |
aoqi@6880 | 1381 | Register thread = TREG; |
aoqi@6880 | 1382 | #ifndef OPT_THREAD |
aoqi@6880 | 1383 | get_thread(thread); |
aoqi@6880 | 1384 | #endif |
aoqi@6880 | 1385 | // we must set sp to zero to clear frame |
aoqi@6880 | 1386 | sd(R0, Address(thread, JavaThread::last_Java_sp_offset())); |
aoqi@6880 | 1387 | // must clear fp, so that compiled frames are not confused; it is |
aoqi@6880 | 1388 | // possible that we need it only for debugging |
aoqi@6880 | 1389 | if (clear_fp) { |
aoqi@6880 | 1390 | sd(R0, Address(thread, JavaThread::last_Java_fp_offset())); |
aoqi@6880 | 1391 | } |
aoqi@6880 | 1392 | |
aoqi@6880 | 1393 | if (clear_pc) { |
aoqi@6880 | 1394 | sd(R0, Address(thread, JavaThread::last_Java_pc_offset())); |
aoqi@6880 | 1395 | } |
aoqi@6880 | 1396 | } |
aoqi@6880 | 1397 | |
aoqi@6880 | 1398 | // Write serialization page so VM thread can do a pseudo remote membar. |
aoqi@6880 | 1399 | // We use the current thread pointer to calculate a thread specific |
aoqi@6880 | 1400 | // offset to write to within the page. This minimizes bus traffic |
aoqi@6880 | 1401 | // due to cache line collision. |
aoqi@6880 | 1402 | void MacroAssembler::serialize_memory(Register thread, Register tmp) { |
aoqi@6880 | 1403 | move(tmp, thread); |
aoqi@6880 | 1404 | srl(tmp, tmp,os::get_serialize_page_shift_count()); |
aoqi@6880 | 1405 | move(AT, (os::vm_page_size() - sizeof(int))); |
aoqi@6880 | 1406 | andr(tmp, tmp,AT); |
aoqi@6880 | 1407 | sw(tmp,Address(tmp, (intptr_t)os::get_memory_serialize_page())); |
aoqi@6880 | 1408 | } |
aoqi@6880 | 1409 | |
aoqi@6880 | 1410 | // Calls to C land |
aoqi@6880 | 1411 | // |
aoqi@6880 | 1412 | // When entering C land, the rbp, & rsp of the last Java frame have to be recorded |
aoqi@6880 | 1413 | // in the (thread-local) JavaThread object. When leaving C land, the last Java fp |
aoqi@6880 | 1414 | // has to be reset to 0. This is required to allow proper stack traversal. |
aoqi@6880 | 1415 | void MacroAssembler::set_last_Java_frame(Register java_thread, |
aoqi@6880 | 1416 | Register last_java_sp, |
aoqi@6880 | 1417 | Register last_java_fp, |
aoqi@6880 | 1418 | address last_java_pc) { |
aoqi@6880 | 1419 | // determine java_thread register |
aoqi@6880 | 1420 | if (!java_thread->is_valid()) { |
aoqi@6880 | 1421 | #ifndef OPT_THREAD |
aoqi@6880 | 1422 | java_thread = T2; |
aoqi@6880 | 1423 | get_thread(java_thread); |
aoqi@6880 | 1424 | #else |
aoqi@6880 | 1425 | java_thread = TREG; |
aoqi@6880 | 1426 | #endif |
aoqi@6880 | 1427 | } |
aoqi@6880 | 1428 | // determine last_java_sp register |
aoqi@6880 | 1429 | if (!last_java_sp->is_valid()) { |
aoqi@6880 | 1430 | last_java_sp = SP; |
aoqi@6880 | 1431 | } |
aoqi@6880 | 1432 | |
aoqi@6880 | 1433 | // last_java_fp is optional |
aoqi@6880 | 1434 | |
aoqi@6880 | 1435 | if (last_java_fp->is_valid()) { |
aoqi@6880 | 1436 | st_ptr(last_java_fp, java_thread, in_bytes(JavaThread::last_Java_fp_offset())); |
aoqi@6880 | 1437 | } |
aoqi@6880 | 1438 | |
aoqi@6880 | 1439 | // last_java_pc is optional |
aoqi@6880 | 1440 | |
aoqi@6880 | 1441 | if (last_java_pc != NULL) { |
aoqi@6880 | 1442 | relocate(relocInfo::internal_pc_type); |
aoqi@6880 | 1443 | patchable_set48(AT, (long)last_java_pc); |
aoqi@6880 | 1444 | st_ptr(AT, java_thread, in_bytes(JavaThread::last_Java_pc_offset())); |
aoqi@6880 | 1445 | } |
aoqi@6880 | 1446 | st_ptr(last_java_sp, java_thread, in_bytes(JavaThread::last_Java_sp_offset())); |
aoqi@6880 | 1447 | } |
aoqi@6880 | 1448 | |
aoqi@6880 | 1449 | void MacroAssembler::set_last_Java_frame(Register last_java_sp, |
aoqi@6880 | 1450 | Register last_java_fp, |
aoqi@6880 | 1451 | address last_java_pc) { |
aoqi@6880 | 1452 | // determine last_java_sp register |
aoqi@6880 | 1453 | if (!last_java_sp->is_valid()) { |
aoqi@6880 | 1454 | last_java_sp = SP; |
aoqi@6880 | 1455 | } |
aoqi@6880 | 1456 | |
aoqi@6880 | 1457 | Register thread = TREG; |
aoqi@6880 | 1458 | #ifndef OPT_THREAD |
aoqi@6880 | 1459 | get_thread(thread); |
aoqi@6880 | 1460 | #endif |
aoqi@6880 | 1461 | // last_java_fp is optional |
aoqi@6880 | 1462 | if (last_java_fp->is_valid()) { |
aoqi@6880 | 1463 | sd(last_java_fp, Address(thread, JavaThread::last_Java_fp_offset())); |
aoqi@6880 | 1464 | } |
aoqi@6880 | 1465 | |
aoqi@6880 | 1466 | // last_java_pc is optional |
aoqi@6880 | 1467 | if (last_java_pc != NULL) { |
aoqi@6880 | 1468 | Address java_pc(thread, |
aoqi@6880 | 1469 | JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset()); |
aoqi@6880 | 1470 | li(AT, (intptr_t)(last_java_pc)); |
aoqi@6880 | 1471 | sd(AT, java_pc); |
aoqi@6880 | 1472 | } |
aoqi@6880 | 1473 | |
aoqi@6880 | 1474 | sd(last_java_sp, Address(thread, JavaThread::last_Java_sp_offset())); |
aoqi@6880 | 1475 | } |
aoqi@6880 | 1476 | |
fujie@8000 | 1477 | |
aoqi@6880 | 1478 | ////////////////////////////////////////////////////////////////////////////////// |
aoqi@6880 | 1479 | #if INCLUDE_ALL_GCS |
aoqi@6880 | 1480 | |
aoqi@6880 | 1481 | void MacroAssembler::g1_write_barrier_pre(Register obj, |
fujie@8000 | 1482 | Register pre_val, |
aoqi@6880 | 1483 | Register thread, |
aoqi@6880 | 1484 | Register tmp, |
fujie@8000 | 1485 | bool tosca_live, |
fujie@8000 | 1486 | bool expand_call) { |
fujie@8000 | 1487 | |
fujie@8000 | 1488 | // If expand_call is true then we expand the call_VM_leaf macro |
fujie@8000 | 1489 | // directly to skip generating the check by |
fujie@8000 | 1490 | // InterpreterMacroAssembler::call_VM_leaf_base that checks _last_sp. |
fujie@8000 | 1491 | |
fujie@8000 | 1492 | #ifdef _LP64 |
fujie@8000 | 1493 | assert(thread == TREG, "must be"); |
fujie@8000 | 1494 | #endif // _LP64 |
fujie@8000 | 1495 | |
fujie@8000 | 1496 | Label done; |
fujie@8000 | 1497 | Label runtime; |
fujie@8000 | 1498 | |
fujie@8000 | 1499 | assert(pre_val != noreg, "check this code"); |
fujie@8000 | 1500 | |
fujie@8000 | 1501 | if (obj != noreg) { |
fujie@8000 | 1502 | assert_different_registers(obj, pre_val, tmp); |
fujie@8000 | 1503 | assert(pre_val != V0, "check this code"); |
fujie@8000 | 1504 | } |
fujie@8000 | 1505 | |
fujie@8000 | 1506 | Address in_progress(thread, in_bytes(JavaThread::satb_mark_queue_offset() + |
fujie@8000 | 1507 | PtrQueue::byte_offset_of_active())); |
fujie@8000 | 1508 | Address index(thread, in_bytes(JavaThread::satb_mark_queue_offset() + |
fujie@8000 | 1509 | PtrQueue::byte_offset_of_index())); |
fujie@8000 | 1510 | Address buffer(thread, in_bytes(JavaThread::satb_mark_queue_offset() + |
fujie@8000 | 1511 | PtrQueue::byte_offset_of_buf())); |
fujie@8000 | 1512 | |
fujie@8000 | 1513 | |
fujie@8000 | 1514 | // Is marking active? |
fujie@8000 | 1515 | if (in_bytes(PtrQueue::byte_width_of_active()) == 4) { |
fujie@8000 | 1516 | //cmpl(in_progress, 0); |
fujie@8000 | 1517 | lw(AT, in_progress); |
fujie@8000 | 1518 | } else { |
fujie@8000 | 1519 | assert(in_bytes(PtrQueue::byte_width_of_active()) == 1, "Assumption"); |
fujie@8000 | 1520 | //cmpb(in_progress, 0); |
fujie@8000 | 1521 | lb(AT, in_progress); |
fujie@8000 | 1522 | } |
fujie@8000 | 1523 | //jcc(Assembler::equal, done); |
fujie@8000 | 1524 | beq(AT, R0, done); |
fujie@8000 | 1525 | nop(); |
fujie@8000 | 1526 | |
fujie@8000 | 1527 | // Do we need to load the previous value? |
fujie@8000 | 1528 | if (obj != noreg) { |
fujie@8000 | 1529 | load_heap_oop(pre_val, Address(obj, 0)); |
fujie@8000 | 1530 | } |
fujie@8000 | 1531 | |
fujie@8000 | 1532 | // Is the previous value null? |
fujie@8000 | 1533 | //cmpptr(pre_val, (int32_t) NULL_WORD); |
fujie@8000 | 1534 | //jcc(Assembler::equal, done); |
fujie@8000 | 1535 | beq(pre_val, R0, done); |
fujie@8000 | 1536 | nop(); |
fujie@8000 | 1537 | |
fujie@8000 | 1538 | // Can we store original value in the thread's buffer? |
fujie@8000 | 1539 | // Is index == 0? |
fujie@8000 | 1540 | // (The index field is typed as size_t.) |
fujie@8000 | 1541 | |
fujie@8000 | 1542 | //movptr(tmp, index); // tmp := *index_adr |
fujie@8000 | 1543 | ld(tmp, index); |
fujie@8000 | 1544 | //cmpptr(tmp, 0); // tmp == 0? |
fujie@8000 | 1545 | //jcc(Assembler::equal, runtime); // If yes, goto runtime |
fujie@8000 | 1546 | beq(tmp, R0, runtime); |
fujie@8000 | 1547 | nop(); |
fujie@8000 | 1548 | |
fujie@8000 | 1549 | //subptr(tmp, wordSize); // tmp := tmp - wordSize |
fujie@8000 | 1550 | //movptr(index, tmp); // *index_adr := tmp |
fujie@8000 | 1551 | //addptr(tmp, buffer); // tmp := tmp + *buffer_adr |
fujie@8000 | 1552 | daddiu(tmp, tmp, -1 * wordSize); |
fujie@8000 | 1553 | sd(tmp, index); |
fujie@8000 | 1554 | ld(AT, buffer); |
fujie@8000 | 1555 | daddu(tmp, tmp, AT); |
fujie@8000 | 1556 | |
fujie@8000 | 1557 | // Record the previous value |
fujie@8000 | 1558 | //movptr(Address(tmp, 0), pre_val); |
fujie@8000 | 1559 | //jmp(done); |
fujie@8000 | 1560 | sd(pre_val, tmp, 0); |
fujie@8000 | 1561 | beq(R0, R0, done); |
fujie@8000 | 1562 | nop(); |
fujie@8000 | 1563 | |
fujie@8000 | 1564 | bind(runtime); |
fujie@8000 | 1565 | // save the live input values |
fujie@8000 | 1566 | //if(tosca_live) push(rax); |
fujie@8000 | 1567 | if(tosca_live) push(V0); |
fujie@8000 | 1568 | |
fujie@8000 | 1569 | //if (obj != noreg && obj != rax) |
fujie@8000 | 1570 | if (obj != noreg && obj != V0) |
fujie@8000 | 1571 | push(obj); |
fujie@8000 | 1572 | |
fujie@8000 | 1573 | //if (pre_val != rax) |
fujie@8000 | 1574 | if (pre_val != V0) |
fujie@8000 | 1575 | push(pre_val); |
fujie@8000 | 1576 | |
fujie@8000 | 1577 | // Calling the runtime using the regular call_VM_leaf mechanism generates |
fujie@8000 | 1578 | // code (generated by InterpreterMacroAssember::call_VM_leaf_base) |
fujie@8000 | 1579 | // that checks that the *(ebp+frame::interpreter_frame_last_sp) == NULL. |
fujie@8000 | 1580 | // |
fujie@8000 | 1581 | // If we care generating the pre-barrier without a frame (e.g. in the |
fujie@8000 | 1582 | // intrinsified Reference.get() routine) then ebp might be pointing to |
fujie@8000 | 1583 | // the caller frame and so this check will most likely fail at runtime. |
fujie@8000 | 1584 | // |
fujie@8000 | 1585 | // Expanding the call directly bypasses the generation of the check. |
fujie@8000 | 1586 | // So when we do not have have a full interpreter frame on the stack |
fujie@8000 | 1587 | // expand_call should be passed true. |
fujie@8000 | 1588 | |
fujie@8000 | 1589 | NOT_LP64( push(thread); ) |
fujie@8000 | 1590 | |
fujie@8000 | 1591 | if (expand_call) { |
fujie@8000 | 1592 | //LP64_ONLY( assert(pre_val != c_rarg1, "smashed arg"); ) |
fujie@8000 | 1593 | LP64_ONLY( assert(pre_val != A1, "smashed arg"); ) |
fujie@8000 | 1594 | //pass_arg1(this, thread); |
fujie@8000 | 1595 | if (thread != A1) move(A1, thread); |
fujie@8000 | 1596 | //pass_arg0(this, pre_val); |
fujie@8000 | 1597 | if (pre_val != A0) move(A0, pre_val); |
fujie@8000 | 1598 | MacroAssembler::call_VM_leaf_base(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), 2); |
fujie@8000 | 1599 | } else { |
fujie@8000 | 1600 | call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), pre_val, thread); |
fujie@8000 | 1601 | } |
fujie@8000 | 1602 | |
fujie@8000 | 1603 | NOT_LP64( pop(thread); ) |
fujie@8000 | 1604 | |
fujie@8000 | 1605 | // save the live input values |
fujie@8000 | 1606 | //if (pre_val != rax) |
fujie@8000 | 1607 | if (pre_val != V0) |
fujie@8000 | 1608 | pop(pre_val); |
fujie@8000 | 1609 | |
fujie@8000 | 1610 | //if (obj != noreg && obj != rax) |
fujie@8000 | 1611 | if (obj != noreg && obj != V0) |
fujie@8000 | 1612 | pop(obj); |
fujie@8000 | 1613 | |
fujie@8000 | 1614 | //if(tosca_live) pop(rax); |
fujie@8000 | 1615 | if(tosca_live) pop(V0); |
fujie@8000 | 1616 | |
fujie@8000 | 1617 | bind(done); |
aoqi@6880 | 1618 | } |
aoqi@6880 | 1619 | |
aoqi@6880 | 1620 | void MacroAssembler::g1_write_barrier_post(Register store_addr, |
aoqi@6880 | 1621 | Register new_val, |
aoqi@6880 | 1622 | Register thread, |
aoqi@6880 | 1623 | Register tmp, |
aoqi@6880 | 1624 | Register tmp2) { |
fujie@8000 | 1625 | assert(tmp == AT, "must be"); |
fujie@8000 | 1626 | assert(tmp2 == AT, "must be"); |
fujie@8000 | 1627 | #ifdef _LP64 |
fujie@8000 | 1628 | assert(thread == TREG, "must be"); |
fujie@8000 | 1629 | #endif // _LP64 |
fujie@8000 | 1630 | |
fujie@8000 | 1631 | Address queue_index(thread, in_bytes(JavaThread::dirty_card_queue_offset() + |
fujie@8000 | 1632 | PtrQueue::byte_offset_of_index())); |
fujie@8000 | 1633 | Address buffer(thread, in_bytes(JavaThread::dirty_card_queue_offset() + |
fujie@8000 | 1634 | PtrQueue::byte_offset_of_buf())); |
fujie@8000 | 1635 | |
fujie@8000 | 1636 | BarrierSet* bs = Universe::heap()->barrier_set(); |
fujie@8000 | 1637 | CardTableModRefBS* ct = (CardTableModRefBS*)bs; |
fujie@8000 | 1638 | assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code"); |
fujie@8000 | 1639 | |
fujie@8000 | 1640 | Label done; |
fujie@8000 | 1641 | Label runtime; |
fujie@8000 | 1642 | |
fujie@8000 | 1643 | // Does store cross heap regions? |
fujie@8000 | 1644 | |
fujie@8000 | 1645 | //movptr(tmp, store_addr); |
fujie@8000 | 1646 | //xorptr(tmp, new_val); |
fujie@8000 | 1647 | //shrptr(tmp, HeapRegion::LogOfHRGrainBytes); |
fujie@8000 | 1648 | //jcc(Assembler::equal, done); |
fujie@8000 | 1649 | xorr(AT, store_addr, new_val); |
fujie@8000 | 1650 | dsrl(AT, AT, HeapRegion::LogOfHRGrainBytes); |
fujie@8000 | 1651 | beq(AT, R0, done); |
fujie@8000 | 1652 | nop(); |
fujie@8000 | 1653 | |
fujie@8000 | 1654 | |
fujie@8000 | 1655 | // crosses regions, storing NULL? |
fujie@8000 | 1656 | |
fujie@8000 | 1657 | //cmpptr(new_val, (int32_t) NULL_WORD); |
fujie@8000 | 1658 | //jcc(Assembler::equal, done); |
fujie@8000 | 1659 | beq(new_val, R0, done); |
fujie@8000 | 1660 | nop(); |
fujie@8000 | 1661 | |
fujie@8000 | 1662 | // storing region crossing non-NULL, is card already dirty? |
fujie@8000 | 1663 | |
fujie@8000 | 1664 | const Register card_addr = tmp; |
fujie@8000 | 1665 | const Register cardtable = tmp2; |
fujie@8000 | 1666 | |
fujie@8000 | 1667 | //movptr(card_addr, store_addr); |
fujie@8000 | 1668 | //shrptr(card_addr, CardTableModRefBS::card_shift); |
fujie@8000 | 1669 | move(card_addr, store_addr); |
fujie@8000 | 1670 | dsrl(card_addr, card_addr, CardTableModRefBS::card_shift); |
fujie@8000 | 1671 | // Do not use ExternalAddress to load 'byte_map_base', since 'byte_map_base' is NOT |
fujie@8000 | 1672 | // a valid address and therefore is not properly handled by the relocation code. |
fujie@8000 | 1673 | //movptr(cardtable, (intptr_t)ct->byte_map_base); |
fujie@8000 | 1674 | //addptr(card_addr, cardtable); |
fujie@8000 | 1675 | set64(cardtable, (intptr_t)ct->byte_map_base); |
fujie@8000 | 1676 | daddu(card_addr, card_addr, cardtable); |
fujie@8000 | 1677 | |
fujie@8000 | 1678 | //cmpb(Address(card_addr, 0), (int)G1SATBCardTableModRefBS::g1_young_card_val()); |
fujie@8000 | 1679 | //jcc(Assembler::equal, done); |
fujie@8000 | 1680 | lb(AT, card_addr, 0); |
fujie@8000 | 1681 | daddiu(AT, AT, -1 * (int)G1SATBCardTableModRefBS::g1_young_card_val()); |
fujie@8000 | 1682 | beq(AT, R0, done); |
fujie@8000 | 1683 | nop(); |
fujie@8000 | 1684 | |
fujie@8000 | 1685 | //membar(Assembler::Membar_mask_bits(Assembler::StoreLoad)); |
fujie@8000 | 1686 | //cmpb(Address(card_addr, 0), (int)CardTableModRefBS::dirty_card_val()); |
fujie@8000 | 1687 | //jcc(Assembler::equal, done); |
fujie@8000 | 1688 | sync(); |
fujie@8000 | 1689 | lb(AT, card_addr, 0); |
fujie@8000 | 1690 | daddiu(AT, AT, -1 * (int)(int)CardTableModRefBS::dirty_card_val()); |
fujie@8000 | 1691 | beq(AT, R0, done); |
fujie@8000 | 1692 | nop(); |
fujie@8000 | 1693 | |
fujie@8000 | 1694 | |
fujie@8000 | 1695 | // storing a region crossing, non-NULL oop, card is clean. |
fujie@8000 | 1696 | // dirty card and log. |
fujie@8000 | 1697 | |
fujie@8000 | 1698 | //movb(Address(card_addr, 0), (int)CardTableModRefBS::dirty_card_val()); |
fujie@8000 | 1699 | move(AT, (int)CardTableModRefBS::dirty_card_val()); |
fujie@8000 | 1700 | sb(AT, card_addr, 0); |
fujie@8000 | 1701 | |
fujie@8000 | 1702 | //cmpl(queue_index, 0); |
fujie@8000 | 1703 | //jcc(Assembler::equal, runtime); |
fujie@8000 | 1704 | //subl(queue_index, wordSize); |
fujie@8000 | 1705 | //movptr(tmp2, buffer); |
fujie@8000 | 1706 | lw(AT, queue_index); |
fujie@8000 | 1707 | beq(AT, R0, runtime); |
fujie@8000 | 1708 | nop(); |
fujie@8000 | 1709 | daddiu(AT, AT, -1 * wordSize); |
fujie@8000 | 1710 | sw(AT, queue_index); |
fujie@8000 | 1711 | ld(tmp2, buffer); |
fujie@8000 | 1712 | #ifdef _LP64 |
fujie@8000 | 1713 | //movslq(rscratch1, queue_index); |
fujie@8000 | 1714 | //addq(tmp2, rscratch1); |
fujie@8000 | 1715 | //movq(Address(tmp2, 0), card_addr); |
fujie@8000 | 1716 | ld(AT, queue_index); |
fujie@8000 | 1717 | daddu(tmp2, tmp2, AT); |
fujie@8000 | 1718 | sd(card_addr, tmp2, 0); |
fujie@8000 | 1719 | #else |
fujie@8000 | 1720 | //addl(tmp2, queue_index); |
fujie@8000 | 1721 | //movl(Address(tmp2, 0), card_addr); |
fujie@8000 | 1722 | lw(AT, queue_index); |
fujie@8000 | 1723 | addu32(tmp2, tmp2, AT); |
fujie@8000 | 1724 | sw(card_addr, tmp2, 0); |
fujie@8000 | 1725 | #endif |
fujie@8000 | 1726 | //jmp(done); |
fujie@8000 | 1727 | beq(R0, R0, done); |
fujie@8000 | 1728 | nop(); |
fujie@8000 | 1729 | |
fujie@8000 | 1730 | bind(runtime); |
fujie@8000 | 1731 | // save the live input values |
fujie@8000 | 1732 | push(store_addr); |
fujie@8000 | 1733 | push(new_val); |
fujie@8000 | 1734 | #ifdef _LP64 |
fujie@8000 | 1735 | call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), card_addr, TREG); |
fujie@8000 | 1736 | #else |
fujie@8000 | 1737 | push(thread); |
fujie@8000 | 1738 | call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), card_addr, thread); |
fujie@8000 | 1739 | pop(thread); |
fujie@8000 | 1740 | #endif |
fujie@8000 | 1741 | pop(new_val); |
fujie@8000 | 1742 | pop(store_addr); |
fujie@8000 | 1743 | |
fujie@8000 | 1744 | bind(done); |
aoqi@6880 | 1745 | } |
aoqi@6880 | 1746 | |
aoqi@6880 | 1747 | #endif // INCLUDE_ALL_GCS |
aoqi@6880 | 1748 | ////////////////////////////////////////////////////////////////////////////////// |
aoqi@6880 | 1749 | |
aoqi@6880 | 1750 | |
aoqi@6880 | 1751 | void MacroAssembler::store_check(Register obj) { |
aoqi@6880 | 1752 | // Does a store check for the oop in register obj. The content of |
aoqi@6880 | 1753 | // register obj is destroyed afterwards. |
aoqi@6880 | 1754 | store_check_part_1(obj); |
aoqi@6880 | 1755 | store_check_part_2(obj); |
aoqi@6880 | 1756 | } |
aoqi@6880 | 1757 | |
aoqi@6880 | 1758 | void MacroAssembler::store_check(Register obj, Address dst) { |
aoqi@6880 | 1759 | store_check(obj); |
aoqi@6880 | 1760 | } |
aoqi@6880 | 1761 | |
aoqi@6880 | 1762 | |
aoqi@6880 | 1763 | // split the store check operation so that other instructions can be scheduled inbetween |
aoqi@6880 | 1764 | void MacroAssembler::store_check_part_1(Register obj) { |
aoqi@6880 | 1765 | BarrierSet* bs = Universe::heap()->barrier_set(); |
aoqi@6880 | 1766 | assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind"); |
aoqi@6880 | 1767 | #ifdef _LP64 |
aoqi@6880 | 1768 | dsrl(obj, obj, CardTableModRefBS::card_shift); |
aoqi@6880 | 1769 | #else |
aoqi@6880 | 1770 | shr(obj, CardTableModRefBS::card_shift); |
aoqi@6880 | 1771 | #endif |
aoqi@6880 | 1772 | } |
aoqi@6880 | 1773 | |
aoqi@6880 | 1774 | void MacroAssembler::store_check_part_2(Register obj) { |
aoqi@6880 | 1775 | BarrierSet* bs = Universe::heap()->barrier_set(); |
aoqi@6880 | 1776 | assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind"); |
aoqi@6880 | 1777 | CardTableModRefBS* ct = (CardTableModRefBS*)bs; |
aoqi@6880 | 1778 | assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code"); |
aoqi@6880 | 1779 | |
fujie@8002 | 1780 | set64(AT, (long)ct->byte_map_base); |
aoqi@6880 | 1781 | #ifdef _LP64 |
aoqi@6880 | 1782 | dadd(AT, AT, obj); |
aoqi@6880 | 1783 | #else |
aoqi@6880 | 1784 | add(AT, AT, obj); |
aoqi@6880 | 1785 | #endif |
fujie@8002 | 1786 | if (UseConcMarkSweepGC) sync(); |
aoqi@6880 | 1787 | sb(R0, AT, 0); |
aoqi@6880 | 1788 | } |
aoqi@6880 | 1789 | |
aoqi@6880 | 1790 | // Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes. |
aoqi@6880 | 1791 | void MacroAssembler::tlab_allocate(Register obj, Register var_size_in_bytes, int con_size_in_bytes, |
aoqi@6880 | 1792 | Register t1, Register t2, Label& slow_case) { |
aoqi@6880 | 1793 | assert_different_registers(obj, var_size_in_bytes, t1, t2, AT); |
aoqi@6880 | 1794 | |
aoqi@6880 | 1795 | Register end = t2; |
aoqi@6880 | 1796 | #ifndef OPT_THREAD |
aoqi@6880 | 1797 | Register thread = t1; |
aoqi@6880 | 1798 | get_thread(thread); |
aoqi@6880 | 1799 | #else |
aoqi@6880 | 1800 | Register thread = TREG; |
aoqi@6880 | 1801 | #endif |
aoqi@6880 | 1802 | verify_tlab(t1, t2);//blows t1&t2 |
aoqi@6880 | 1803 | |
aoqi@6880 | 1804 | ld_ptr(obj, thread, in_bytes(JavaThread::tlab_top_offset())); |
aoqi@6880 | 1805 | |
aoqi@6880 | 1806 | if (var_size_in_bytes == NOREG) { |
aoqi@6880 | 1807 | // i dont think we need move con_size_in_bytes to a register first. |
aoqi@6880 | 1808 | // by yjl 8/17/2005 |
aoqi@6880 | 1809 | assert(is_simm16(con_size_in_bytes), "fixme by moving imm to a register first"); |
aoqi@6880 | 1810 | addi(end, obj, con_size_in_bytes); |
aoqi@6880 | 1811 | } else { |
aoqi@6880 | 1812 | add(end, obj, var_size_in_bytes); |
aoqi@6880 | 1813 | } |
aoqi@6880 | 1814 | |
aoqi@6880 | 1815 | ld_ptr(AT, thread, in_bytes(JavaThread::tlab_end_offset())); |
aoqi@6880 | 1816 | sltu(AT, AT, end); |
aoqi@6880 | 1817 | bne_far(AT, R0, slow_case); |
aoqi@6880 | 1818 | delayed()->nop(); |
aoqi@6880 | 1819 | |
aoqi@6880 | 1820 | |
aoqi@6880 | 1821 | // update the tlab top pointer |
aoqi@6880 | 1822 | st_ptr(end, thread, in_bytes(JavaThread::tlab_top_offset())); |
aoqi@6880 | 1823 | |
aoqi@6880 | 1824 | // recover var_size_in_bytes if necessary |
aoqi@6880 | 1825 | /*if (var_size_in_bytes == end) { |
aoqi@6880 | 1826 | sub(var_size_in_bytes, end, obj); |
aoqi@6880 | 1827 | }*/ |
aoqi@6880 | 1828 | |
aoqi@6880 | 1829 | verify_tlab(t1, t2); |
aoqi@6880 | 1830 | } |
aoqi@6880 | 1831 | |
aoqi@6880 | 1832 | // Defines obj, preserves var_size_in_bytes |
aoqi@6880 | 1833 | void MacroAssembler::eden_allocate(Register obj, Register var_size_in_bytes, int con_size_in_bytes, |
aoqi@6880 | 1834 | Register t1, Register t2, Label& slow_case) { |
aoqi@6880 | 1835 | assert_different_registers(obj, var_size_in_bytes, t1, AT); |
aoqi@6880 | 1836 | if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) { //by yyq |
aoqi@6880 | 1837 | // No allocation in the shared eden. |
aoqi@6880 | 1838 | b_far(slow_case); |
aoqi@6880 | 1839 | delayed()->nop(); |
aoqi@6880 | 1840 | } else { |
aoqi@6880 | 1841 | |
aoqi@6880 | 1842 | #ifndef _LP64 |
aoqi@6880 | 1843 | Address heap_top(t1, Assembler::split_low((intptr_t)Universe::heap()->top_addr())); |
aoqi@6880 | 1844 | lui(t1, split_high((intptr_t)Universe::heap()->top_addr())); |
aoqi@6880 | 1845 | #else |
aoqi@6880 | 1846 | Address heap_top(t1); |
aoqi@6880 | 1847 | li(t1, (long)Universe::heap()->top_addr()); |
aoqi@6880 | 1848 | #endif |
aoqi@6880 | 1849 | ld_ptr(obj, heap_top); |
aoqi@6880 | 1850 | |
aoqi@6880 | 1851 | Register end = t2; |
aoqi@6880 | 1852 | Label retry; |
aoqi@6880 | 1853 | |
aoqi@6880 | 1854 | bind(retry); |
aoqi@6880 | 1855 | if (var_size_in_bytes == NOREG) { |
aoqi@6880 | 1856 | // i dont think we need move con_size_in_bytes to a register first. |
aoqi@6880 | 1857 | assert(is_simm16(con_size_in_bytes), "fixme by moving imm to a register first"); |
aoqi@6880 | 1858 | addi(end, obj, con_size_in_bytes); |
aoqi@6880 | 1859 | } else { |
aoqi@6880 | 1860 | add(end, obj, var_size_in_bytes); |
aoqi@6880 | 1861 | } |
aoqi@6880 | 1862 | // if end < obj then we wrapped around => object too long => slow case |
aoqi@6880 | 1863 | sltu(AT, end, obj); |
aoqi@6880 | 1864 | bne_far(AT, R0, slow_case); |
aoqi@6880 | 1865 | delayed()->nop(); |
aoqi@6880 | 1866 | |
aoqi@6880 | 1867 | li(AT, (long)Universe::heap()->end_addr()); |
aoqi@6880 | 1868 | sltu(AT, AT, end); |
aoqi@6880 | 1869 | bne_far(AT, R0, slow_case); |
aoqi@6880 | 1870 | delayed()->nop(); |
aoqi@6880 | 1871 | // Compare obj with the top addr, and if still equal, store the new top addr in |
aoqi@6880 | 1872 | // end at the address of the top addr pointer. Sets ZF if was equal, and clears |
aoqi@6880 | 1873 | // it otherwise. Use lock prefix for atomicity on MPs. |
aoqi@6880 | 1874 | //if (os::is_MP()) { |
aoqi@6880 | 1875 | // sync(); |
aoqi@6880 | 1876 | //} |
aoqi@6880 | 1877 | |
aoqi@6880 | 1878 | // if someone beat us on the allocation, try again, otherwise continue |
aoqi@6880 | 1879 | cmpxchg(end, heap_top, obj); |
aoqi@6880 | 1880 | beq_far(AT, R0, retry); //by yyq |
aoqi@6880 | 1881 | delayed()->nop(); |
aoqi@6880 | 1882 | |
aoqi@6880 | 1883 | } |
aoqi@6880 | 1884 | } |
aoqi@6880 | 1885 | |
aoqi@6880 | 1886 | // C2 doesn't invoke this one. |
aoqi@6880 | 1887 | void MacroAssembler::tlab_refill(Label& retry, Label& try_eden, Label& slow_case) { |
aoqi@6880 | 1888 | Register top = T0; |
aoqi@6880 | 1889 | Register t1 = T1; |
aoqi@6880 | 1890 | /* Jin: tlab_refill() is called in |
aoqi@6880 | 1891 | |
aoqi@6880 | 1892 | [c1_Runtime1_mips.cpp] Runtime1::generate_code_for(new_type_array_id); |
aoqi@6880 | 1893 | |
aoqi@6880 | 1894 | In generate_code_for(), T2 has been assigned as a register(length), which is used |
aoqi@6880 | 1895 | after calling tlab_refill(); |
aoqi@6880 | 1896 | Therefore, tlab_refill() should not use T2. |
aoqi@6880 | 1897 | |
aoqi@6880 | 1898 | Source: |
aoqi@6880 | 1899 | |
aoqi@6880 | 1900 | Exception in thread "main" java.lang.ArrayIndexOutOfBoundsException |
aoqi@6880 | 1901 | at java.lang.System.arraycopy(Native Method) |
aoqi@6880 | 1902 | at java.util.Arrays.copyOf(Arrays.java:2799) <-- alloc_array |
aoqi@6880 | 1903 | at sun.misc.Resource.getBytes(Resource.java:117) |
aoqi@6880 | 1904 | at java.net.URLClassLoader.defineClass(URLClassLoader.java:273) |
aoqi@6880 | 1905 | at java.net.URLClassLoader.findClass(URLClassLoader.java:205) |
aoqi@6880 | 1906 | at java.lang.ClassLoader.loadClass(ClassLoader.java:321) |
aoqi@6880 | 1907 | */ |
aoqi@6880 | 1908 | Register t2 = T9; |
aoqi@6880 | 1909 | Register t3 = T3; |
aoqi@6880 | 1910 | Register thread_reg = T8; |
aoqi@6880 | 1911 | Label do_refill, discard_tlab; |
aoqi@6880 | 1912 | if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) { //by yyq |
aoqi@6880 | 1913 | // No allocation in the shared eden. |
aoqi@6880 | 1914 | b(slow_case); |
aoqi@6880 | 1915 | delayed()->nop(); |
aoqi@6880 | 1916 | } |
aoqi@6880 | 1917 | |
aoqi@6880 | 1918 | get_thread(thread_reg); |
aoqi@6880 | 1919 | |
aoqi@6880 | 1920 | ld_ptr(top, thread_reg, in_bytes(JavaThread::tlab_top_offset())); |
aoqi@6880 | 1921 | ld_ptr(t1, thread_reg, in_bytes(JavaThread::tlab_end_offset())); |
aoqi@6880 | 1922 | |
aoqi@6880 | 1923 | // calculate amount of free space |
aoqi@6880 | 1924 | sub(t1, t1, top); |
aoqi@6880 | 1925 | shr(t1, LogHeapWordSize); |
aoqi@6880 | 1926 | |
aoqi@6880 | 1927 | // Retain tlab and allocate object in shared space if |
aoqi@6880 | 1928 | // the amount free in the tlab is too large to discard. |
aoqi@6880 | 1929 | ld_ptr(t2, thread_reg, in_bytes(JavaThread::tlab_refill_waste_limit_offset())); |
aoqi@6880 | 1930 | slt(AT, t2, t1); |
aoqi@6880 | 1931 | beq(AT, R0, discard_tlab); |
aoqi@6880 | 1932 | delayed()->nop(); |
aoqi@6880 | 1933 | |
aoqi@6880 | 1934 | // Retain |
aoqi@6880 | 1935 | |
aoqi@6880 | 1936 | #ifndef _LP64 |
aoqi@6880 | 1937 | move(AT, ThreadLocalAllocBuffer::refill_waste_limit_increment()); |
aoqi@6880 | 1938 | #else |
aoqi@6880 | 1939 | li(AT, ThreadLocalAllocBuffer::refill_waste_limit_increment()); |
aoqi@6880 | 1940 | #endif |
aoqi@6880 | 1941 | add(t2, t2, AT); |
aoqi@6880 | 1942 | st_ptr(t2, thread_reg, in_bytes(JavaThread::tlab_refill_waste_limit_offset())); |
aoqi@6880 | 1943 | |
aoqi@6880 | 1944 | if (TLABStats) { |
aoqi@6880 | 1945 | // increment number of slow_allocations |
aoqi@6880 | 1946 | lw(AT, thread_reg, in_bytes(JavaThread::tlab_slow_allocations_offset())); |
aoqi@6880 | 1947 | addiu(AT, AT, 1); |
aoqi@6880 | 1948 | sw(AT, thread_reg, in_bytes(JavaThread::tlab_slow_allocations_offset())); |
aoqi@6880 | 1949 | } |
aoqi@6880 | 1950 | b(try_eden); |
aoqi@6880 | 1951 | delayed()->nop(); |
aoqi@6880 | 1952 | |
aoqi@6880 | 1953 | bind(discard_tlab); |
aoqi@6880 | 1954 | if (TLABStats) { |
aoqi@6880 | 1955 | // increment number of refills |
aoqi@6880 | 1956 | lw(AT, thread_reg, in_bytes(JavaThread::tlab_number_of_refills_offset())); |
aoqi@6880 | 1957 | addi(AT, AT, 1); |
aoqi@6880 | 1958 | sw(AT, thread_reg, in_bytes(JavaThread::tlab_number_of_refills_offset())); |
aoqi@6880 | 1959 | // accumulate wastage -- t1 is amount free in tlab |
aoqi@6880 | 1960 | lw(AT, thread_reg, in_bytes(JavaThread::tlab_fast_refill_waste_offset())); |
aoqi@6880 | 1961 | add(AT, AT, t1); |
aoqi@6880 | 1962 | sw(AT, thread_reg, in_bytes(JavaThread::tlab_fast_refill_waste_offset())); |
aoqi@6880 | 1963 | } |
aoqi@6880 | 1964 | |
aoqi@6880 | 1965 | // if tlab is currently allocated (top or end != null) then |
aoqi@6880 | 1966 | // fill [top, end + alignment_reserve) with array object |
aoqi@6880 | 1967 | beq(top, R0, do_refill); |
aoqi@6880 | 1968 | delayed()->nop(); |
aoqi@6880 | 1969 | |
aoqi@6880 | 1970 | // set up the mark word |
aoqi@6880 | 1971 | li(AT, (long)markOopDesc::prototype()->copy_set_hash(0x2)); |
aoqi@6880 | 1972 | st_ptr(AT, top, oopDesc::mark_offset_in_bytes()); |
aoqi@6880 | 1973 | |
aoqi@6880 | 1974 | // set the length to the remaining space |
aoqi@6880 | 1975 | addi(t1, t1, - typeArrayOopDesc::header_size(T_INT)); |
aoqi@6880 | 1976 | addi(t1, t1, ThreadLocalAllocBuffer::alignment_reserve()); |
aoqi@6880 | 1977 | shl(t1, log2_intptr(HeapWordSize/sizeof(jint))); |
aoqi@6880 | 1978 | sw(t1, top, arrayOopDesc::length_offset_in_bytes()); |
aoqi@6880 | 1979 | |
aoqi@6880 | 1980 | // set klass to intArrayKlass |
aoqi@6880 | 1981 | #ifndef _LP64 |
aoqi@6880 | 1982 | lui(AT, split_high((intptr_t)Universe::intArrayKlassObj_addr())); |
aoqi@6880 | 1983 | lw(t1, AT, split_low((intptr_t)Universe::intArrayKlassObj_addr())); |
aoqi@6880 | 1984 | #else |
aoqi@6880 | 1985 | li(AT, (intptr_t)Universe::intArrayKlassObj_addr()); |
aoqi@6880 | 1986 | ld_ptr(t1, AT, 0); |
aoqi@6880 | 1987 | #endif |
aoqi@6880 | 1988 | //st_ptr(t1, top, oopDesc::klass_offset_in_bytes()); |
aoqi@6880 | 1989 | store_klass(top, t1); |
aoqi@6880 | 1990 | |
aoqi@6880 | 1991 | // refill the tlab with an eden allocation |
aoqi@6880 | 1992 | bind(do_refill); |
aoqi@6880 | 1993 | ld_ptr(t1, thread_reg, in_bytes(JavaThread::tlab_size_offset())); |
aoqi@6880 | 1994 | shl(t1, LogHeapWordSize); |
aoqi@6880 | 1995 | // add object_size ?? |
aoqi@6880 | 1996 | eden_allocate(top, t1, 0, t2, t3, slow_case); |
aoqi@6880 | 1997 | |
aoqi@6880 | 1998 | // Check that t1 was preserved in eden_allocate. |
aoqi@6880 | 1999 | #ifdef ASSERT |
aoqi@6880 | 2000 | if (UseTLAB) { |
aoqi@6880 | 2001 | Label ok; |
aoqi@6880 | 2002 | assert_different_registers(thread_reg, t1); |
aoqi@6880 | 2003 | ld_ptr(AT, thread_reg, in_bytes(JavaThread::tlab_size_offset())); |
aoqi@6880 | 2004 | shl(AT, LogHeapWordSize); |
aoqi@6880 | 2005 | beq(AT, t1, ok); |
aoqi@6880 | 2006 | delayed()->nop(); |
aoqi@6880 | 2007 | stop("assert(t1 != tlab size)"); |
aoqi@6880 | 2008 | should_not_reach_here(); |
aoqi@6880 | 2009 | |
aoqi@6880 | 2010 | bind(ok); |
aoqi@6880 | 2011 | } |
aoqi@6880 | 2012 | #endif |
aoqi@6880 | 2013 | st_ptr(top, thread_reg, in_bytes(JavaThread::tlab_start_offset())); |
aoqi@6880 | 2014 | st_ptr(top, thread_reg, in_bytes(JavaThread::tlab_top_offset())); |
aoqi@6880 | 2015 | add(top, top, t1); |
aoqi@6880 | 2016 | addi(top, top, - ThreadLocalAllocBuffer::alignment_reserve_in_bytes()); |
aoqi@6880 | 2017 | st_ptr(top, thread_reg, in_bytes(JavaThread::tlab_end_offset())); |
aoqi@6880 | 2018 | verify_tlab(t1, t2); |
aoqi@6880 | 2019 | b(retry); |
aoqi@6880 | 2020 | delayed()->nop(); |
aoqi@6880 | 2021 | } |
aoqi@6880 | 2022 | |
aoqi@6880 | 2023 | static const double pi_4 = 0.7853981633974483; |
aoqi@6880 | 2024 | |
aoqi@6880 | 2025 | // the x86 version is to clumsy, i dont think we need that fuss. maybe i'm wrong, FIXME |
aoqi@6880 | 2026 | // must get argument(a double) in F12/F13 |
aoqi@6880 | 2027 | //void MacroAssembler::trigfunc(char trig, bool preserve_cpu_regs, int num_fpu_regs_in_use) { |
aoqi@6880 | 2028 | //We need to preseve the register which maybe modified during the Call @Jerome |
aoqi@6880 | 2029 | void MacroAssembler::trigfunc(char trig, int num_fpu_regs_in_use) { |
aoqi@6880 | 2030 | //save all modified register here |
aoqi@6880 | 2031 | // if (preserve_cpu_regs) { |
aoqi@6880 | 2032 | // } |
aoqi@6880 | 2033 | //FIXME, in the disassembly of tirgfunc, only used V0,V1,T9, SP,RA,so we ony save V0,V1,T9 |
aoqi@6880 | 2034 | pushad(); |
aoqi@6880 | 2035 | //we should preserve the stack space before we call |
aoqi@6880 | 2036 | addi(SP, SP, -wordSize * 2); |
aoqi@6880 | 2037 | switch (trig){ |
aoqi@6880 | 2038 | case 's' : |
aoqi@6880 | 2039 | call( CAST_FROM_FN_PTR(address, SharedRuntime::dsin), relocInfo::runtime_call_type ); |
aoqi@6880 | 2040 | delayed()->nop(); |
aoqi@6880 | 2041 | break; |
aoqi@6880 | 2042 | case 'c': |
aoqi@6880 | 2043 | call( CAST_FROM_FN_PTR(address, SharedRuntime::dcos), relocInfo::runtime_call_type ); |
aoqi@6880 | 2044 | delayed()->nop(); |
aoqi@6880 | 2045 | break; |
aoqi@6880 | 2046 | case 't': |
aoqi@6880 | 2047 | call( CAST_FROM_FN_PTR(address, SharedRuntime::dtan), relocInfo::runtime_call_type ); |
aoqi@6880 | 2048 | delayed()->nop(); |
aoqi@6880 | 2049 | break; |
aoqi@6880 | 2050 | default:assert (false, "bad intrinsic"); |
aoqi@6880 | 2051 | break; |
aoqi@6880 | 2052 | |
aoqi@6880 | 2053 | } |
aoqi@6880 | 2054 | |
aoqi@6880 | 2055 | addi(SP, SP, wordSize * 2); |
aoqi@6880 | 2056 | popad(); |
aoqi@6880 | 2057 | // if (preserve_cpu_regs) { |
aoqi@6880 | 2058 | // } |
aoqi@6880 | 2059 | } |
aoqi@6880 | 2060 | |
aoqi@6880 | 2061 | #ifdef _LP64 |
aoqi@6880 | 2062 | void MacroAssembler::li(Register rd, long imm) { |
aoqi@6880 | 2063 | if (imm <= max_jint && imm >= min_jint) { |
aoqi@6880 | 2064 | li32(rd, (int)imm); |
aoqi@6880 | 2065 | } else if (julong(imm) <= 0xFFFFFFFF) { |
aoqi@6880 | 2066 | assert_not_delayed(); |
aoqi@6880 | 2067 | // lui sign-extends, so we can't use that. |
aoqi@6880 | 2068 | ori(rd, R0, julong(imm) >> 16); |
aoqi@6880 | 2069 | dsll(rd, rd, 16); |
aoqi@6880 | 2070 | ori(rd, rd, split_low(imm)); |
aoqi@6880 | 2071 | //aoqi_test |
aoqi@6880 | 2072 | //} else if ((imm > 0) && ((imm >> 48) == 0)) { |
aoqi@6880 | 2073 | } else if ((imm > 0) && is_simm16(imm >> 32)) { |
aoqi@6880 | 2074 | /* A 48-bit address */ |
aoqi@6880 | 2075 | li48(rd, imm); |
aoqi@6880 | 2076 | } else { |
aoqi@6880 | 2077 | li64(rd, imm); |
aoqi@6880 | 2078 | } |
aoqi@6880 | 2079 | } |
aoqi@6880 | 2080 | #else |
aoqi@6880 | 2081 | void MacroAssembler::li(Register rd, long imm) { |
aoqi@6880 | 2082 | li32(rd, (int)imm); |
aoqi@6880 | 2083 | } |
aoqi@6880 | 2084 | #endif |
aoqi@6880 | 2085 | |
aoqi@6880 | 2086 | void MacroAssembler::li32(Register reg, int imm) { |
aoqi@6880 | 2087 | if (is_simm16(imm)) { |
aoqi@6880 | 2088 | /* Jin: for imm < 0, we should use addi instead of addiu. |
aoqi@6880 | 2089 | * |
aoqi@6880 | 2090 | * java.lang.StringCoding$StringDecoder.decode(jobject, jint, jint) |
aoqi@6880 | 2091 | * |
aoqi@6880 | 2092 | * 78 move [int:-1|I] [a0|I] |
aoqi@6880 | 2093 | * : daddi a0, zero, 0xffffffff (correct) |
aoqi@6880 | 2094 | * : daddiu a0, zero, 0xffffffff (incorrect) |
aoqi@6880 | 2095 | */ |
aoqi@6880 | 2096 | if (imm >= 0) |
aoqi@6880 | 2097 | addiu(reg, R0, imm); |
aoqi@6880 | 2098 | else |
aoqi@6880 | 2099 | addi(reg, R0, imm); |
aoqi@6880 | 2100 | } else { |
aoqi@6880 | 2101 | lui(reg, split_low(imm >> 16)); |
aoqi@6880 | 2102 | if (split_low(imm)) |
aoqi@6880 | 2103 | ori(reg, reg, split_low(imm)); |
aoqi@6880 | 2104 | } |
aoqi@6880 | 2105 | } |
aoqi@6880 | 2106 | |
aoqi@6880 | 2107 | #ifdef _LP64 |
aoqi@6880 | 2108 | void MacroAssembler::set64(Register d, jlong value) { |
aoqi@6880 | 2109 | assert_not_delayed(); |
aoqi@6880 | 2110 | |
aoqi@6880 | 2111 | int hi = (int)(value >> 32); |
aoqi@6880 | 2112 | int lo = (int)(value & ~0); |
aoqi@6880 | 2113 | |
aoqi@6880 | 2114 | if (value == lo) { // 32-bit integer |
aoqi@6880 | 2115 | if (is_simm16(value)) { |
aoqi@6880 | 2116 | daddiu(d, R0, value); |
aoqi@6880 | 2117 | } else { |
aoqi@6880 | 2118 | lui(d, split_low(value >> 16)); |
aoqi@6880 | 2119 | if (split_low(value)) { |
aoqi@6880 | 2120 | ori(d, d, split_low(value)); |
aoqi@6880 | 2121 | } |
aoqi@6880 | 2122 | } |
aoqi@6880 | 2123 | } else if (hi == 0) { // hardware zero-extends to upper 32 |
aoqi@6880 | 2124 | ori(d, R0, julong(value) >> 16); |
aoqi@6880 | 2125 | dsll(d, d, 16); |
aoqi@6880 | 2126 | if (split_low(value)) { |
aoqi@6880 | 2127 | ori(d, d, split_low(value)); |
aoqi@6880 | 2128 | } |
aoqi@6880 | 2129 | } else if ((value> 0) && is_simm16(value >> 32)) { // li48 |
aoqi@6880 | 2130 | // 4 insts |
aoqi@6880 | 2131 | li48(d, value); |
aoqi@6880 | 2132 | } else { // li64 |
aoqi@6880 | 2133 | // 6 insts |
aoqi@6880 | 2134 | li64(d, value); |
aoqi@6880 | 2135 | } |
aoqi@6880 | 2136 | } |
aoqi@6880 | 2137 | |
aoqi@6880 | 2138 | |
aoqi@6880 | 2139 | int MacroAssembler::insts_for_set64(jlong value) { |
aoqi@6880 | 2140 | int hi = (int)(value >> 32); |
aoqi@6880 | 2141 | int lo = (int)(value & ~0); |
aoqi@6880 | 2142 | |
aoqi@6880 | 2143 | int count = 0; |
aoqi@6880 | 2144 | |
aoqi@6880 | 2145 | if (value == lo) { // 32-bit integer |
aoqi@6880 | 2146 | if (is_simm16(value)) { |
aoqi@6880 | 2147 | //daddiu(d, R0, value); |
aoqi@6880 | 2148 | count++; |
aoqi@6880 | 2149 | } else { |
aoqi@6880 | 2150 | //lui(d, split_low(value >> 16)); |
aoqi@6880 | 2151 | count++; |
aoqi@6880 | 2152 | if (split_low(value)) { |
aoqi@6880 | 2153 | //ori(d, d, split_low(value)); |
aoqi@6880 | 2154 | count++; |
aoqi@6880 | 2155 | } |
aoqi@6880 | 2156 | } |
aoqi@6880 | 2157 | } else if (hi == 0) { // hardware zero-extends to upper 32 |
aoqi@6880 | 2158 | //ori(d, R0, julong(value) >> 16); |
aoqi@6880 | 2159 | //dsll(d, d, 16); |
aoqi@6880 | 2160 | count += 2; |
aoqi@6880 | 2161 | if (split_low(value)) { |
aoqi@6880 | 2162 | //ori(d, d, split_low(value)); |
aoqi@6880 | 2163 | count++; |
aoqi@6880 | 2164 | } |
aoqi@6880 | 2165 | } else if ((value> 0) && is_simm16(value >> 32)) { // li48 |
aoqi@6880 | 2166 | // 4 insts |
aoqi@6880 | 2167 | //li48(d, value); |
aoqi@6880 | 2168 | count += 4; |
aoqi@6880 | 2169 | } else { // li64 |
aoqi@6880 | 2170 | // 6 insts |
aoqi@6880 | 2171 | //li64(d, value); |
aoqi@6880 | 2172 | count += 6; |
aoqi@6880 | 2173 | } |
aoqi@6880 | 2174 | |
aoqi@6880 | 2175 | return count; |
aoqi@6880 | 2176 | } |
aoqi@6880 | 2177 | |
aoqi@6880 | 2178 | void MacroAssembler::patchable_set48(Register d, jlong value) { |
aoqi@6880 | 2179 | assert_not_delayed(); |
aoqi@6880 | 2180 | |
aoqi@6880 | 2181 | int hi = (int)(value >> 32); |
aoqi@6880 | 2182 | int lo = (int)(value & ~0); |
aoqi@6880 | 2183 | |
aoqi@6880 | 2184 | int count = 0; |
aoqi@6880 | 2185 | |
aoqi@6880 | 2186 | if (value == lo) { // 32-bit integer |
aoqi@6880 | 2187 | if (is_simm16(value)) { |
aoqi@6880 | 2188 | daddiu(d, R0, value); |
aoqi@6880 | 2189 | count += 1; |
aoqi@6880 | 2190 | } else { |
aoqi@6880 | 2191 | lui(d, split_low(value >> 16)); |
aoqi@6880 | 2192 | count += 1; |
aoqi@6880 | 2193 | if (split_low(value)) { |
aoqi@6880 | 2194 | ori(d, d, split_low(value)); |
aoqi@6880 | 2195 | count += 1; |
aoqi@6880 | 2196 | } |
aoqi@6880 | 2197 | } |
aoqi@6880 | 2198 | } else if (hi == 0) { // hardware zero-extends to upper 32 |
aoqi@6880 | 2199 | ori(d, R0, julong(value) >> 16); |
aoqi@6880 | 2200 | dsll(d, d, 16); |
aoqi@6880 | 2201 | count += 2; |
aoqi@6880 | 2202 | if (split_low(value)) { |
aoqi@6880 | 2203 | ori(d, d, split_low(value)); |
aoqi@6880 | 2204 | count += 1; |
aoqi@6880 | 2205 | } |
aoqi@6880 | 2206 | } else if ((value> 0) && is_simm16(value >> 32)) { // li48 |
aoqi@6880 | 2207 | // 4 insts |
aoqi@6880 | 2208 | li48(d, value); |
aoqi@6880 | 2209 | count += 4; |
aoqi@6880 | 2210 | } else { // li64 |
aoqi@6880 | 2211 | tty->print_cr("value = 0x%x", value); |
aoqi@6880 | 2212 | guarantee(false, "Not supported yet !"); |
aoqi@6880 | 2213 | } |
aoqi@6880 | 2214 | |
aoqi@6880 | 2215 | for (count; count < 4; count++) { |
aoqi@6880 | 2216 | nop(); |
aoqi@6880 | 2217 | } |
aoqi@6880 | 2218 | } |
aoqi@6880 | 2219 | |
aoqi@6880 | 2220 | void MacroAssembler::patchable_set32(Register d, jlong value) { |
aoqi@6880 | 2221 | assert_not_delayed(); |
aoqi@6880 | 2222 | |
aoqi@6880 | 2223 | int hi = (int)(value >> 32); |
aoqi@6880 | 2224 | int lo = (int)(value & ~0); |
aoqi@6880 | 2225 | |
aoqi@6880 | 2226 | int count = 0; |
aoqi@6880 | 2227 | |
aoqi@6880 | 2228 | if (value == lo) { // 32-bit integer |
aoqi@6880 | 2229 | if (is_simm16(value)) { |
aoqi@6880 | 2230 | daddiu(d, R0, value); |
aoqi@6880 | 2231 | count += 1; |
aoqi@6880 | 2232 | } else { |
aoqi@6880 | 2233 | lui(d, split_low(value >> 16)); |
aoqi@6880 | 2234 | count += 1; |
aoqi@6880 | 2235 | if (split_low(value)) { |
aoqi@6880 | 2236 | ori(d, d, split_low(value)); |
aoqi@6880 | 2237 | count += 1; |
aoqi@6880 | 2238 | } |
aoqi@6880 | 2239 | } |
aoqi@6880 | 2240 | } else if (hi == 0) { // hardware zero-extends to upper 32 |
aoqi@6880 | 2241 | ori(d, R0, julong(value) >> 16); |
aoqi@6880 | 2242 | dsll(d, d, 16); |
aoqi@6880 | 2243 | count += 2; |
aoqi@6880 | 2244 | if (split_low(value)) { |
aoqi@6880 | 2245 | ori(d, d, split_low(value)); |
aoqi@6880 | 2246 | count += 1; |
aoqi@6880 | 2247 | } |
aoqi@6880 | 2248 | } else { |
aoqi@6880 | 2249 | tty->print_cr("value = 0x%x", value); |
aoqi@6880 | 2250 | guarantee(false, "Not supported yet !"); |
aoqi@6880 | 2251 | } |
aoqi@6880 | 2252 | |
aoqi@6880 | 2253 | for (count; count < 3; count++) { |
aoqi@6880 | 2254 | nop(); |
aoqi@6880 | 2255 | } |
aoqi@6880 | 2256 | } |
aoqi@6880 | 2257 | |
aoqi@6880 | 2258 | void MacroAssembler::patchable_call32(Register d, jlong value) { |
aoqi@6880 | 2259 | assert_not_delayed(); |
aoqi@6880 | 2260 | |
aoqi@6880 | 2261 | int hi = (int)(value >> 32); |
aoqi@6880 | 2262 | int lo = (int)(value & ~0); |
aoqi@6880 | 2263 | |
aoqi@6880 | 2264 | int count = 0; |
aoqi@6880 | 2265 | |
aoqi@6880 | 2266 | if (value == lo) { // 32-bit integer |
aoqi@6880 | 2267 | if (is_simm16(value)) { |
aoqi@6880 | 2268 | daddiu(d, R0, value); |
aoqi@6880 | 2269 | count += 1; |
aoqi@6880 | 2270 | } else { |
aoqi@6880 | 2271 | lui(d, split_low(value >> 16)); |
aoqi@6880 | 2272 | count += 1; |
aoqi@6880 | 2273 | if (split_low(value)) { |
aoqi@6880 | 2274 | ori(d, d, split_low(value)); |
aoqi@6880 | 2275 | count += 1; |
aoqi@6880 | 2276 | } |
aoqi@6880 | 2277 | } |
aoqi@6880 | 2278 | } else { |
aoqi@6880 | 2279 | tty->print_cr("value = 0x%x", value); |
aoqi@6880 | 2280 | guarantee(false, "Not supported yet !"); |
aoqi@6880 | 2281 | } |
aoqi@6880 | 2282 | |
aoqi@6880 | 2283 | for (count; count < 2; count++) { |
aoqi@6880 | 2284 | nop(); |
aoqi@6880 | 2285 | } |
aoqi@6880 | 2286 | } |
aoqi@6880 | 2287 | |
aoqi@6880 | 2288 | void MacroAssembler::set_narrow_klass(Register dst, Klass* k) { |
aoqi@6880 | 2289 | assert(UseCompressedClassPointers, "should only be used for compressed header"); |
aoqi@6880 | 2290 | assert(oop_recorder() != NULL, "this assembler needs an OopRecorder"); |
aoqi@6880 | 2291 | |
aoqi@6880 | 2292 | int klass_index = oop_recorder()->find_index(k); |
aoqi@6880 | 2293 | RelocationHolder rspec = metadata_Relocation::spec(klass_index); |
aoqi@6880 | 2294 | long narrowKlass = (long)Klass::encode_klass(k); |
aoqi@6880 | 2295 | |
aoqi@6880 | 2296 | relocate(rspec, Assembler::narrow_oop_operand); |
aoqi@6880 | 2297 | patchable_set48(dst, narrowKlass); |
aoqi@6880 | 2298 | } |
aoqi@6880 | 2299 | |
aoqi@6880 | 2300 | |
aoqi@6880 | 2301 | void MacroAssembler::set_narrow_oop(Register dst, jobject obj) { |
aoqi@6880 | 2302 | assert(UseCompressedOops, "should only be used for compressed header"); |
aoqi@6880 | 2303 | assert(oop_recorder() != NULL, "this assembler needs an OopRecorder"); |
aoqi@6880 | 2304 | |
aoqi@6880 | 2305 | int oop_index = oop_recorder()->find_index(obj); |
aoqi@6880 | 2306 | RelocationHolder rspec = oop_Relocation::spec(oop_index); |
aoqi@6880 | 2307 | |
aoqi@6880 | 2308 | relocate(rspec, Assembler::narrow_oop_operand); |
aoqi@6880 | 2309 | patchable_set48(dst, oop_index); |
aoqi@6880 | 2310 | } |
aoqi@6880 | 2311 | |
aoqi@6880 | 2312 | void MacroAssembler::li64(Register rd, long imm) { |
aoqi@6880 | 2313 | assert_not_delayed(); |
aoqi@6880 | 2314 | lui(rd, imm >> 48); |
aoqi@6880 | 2315 | ori(rd, rd, split_low(imm >> 32)); |
aoqi@6880 | 2316 | dsll(rd, rd, 16); |
aoqi@6880 | 2317 | ori(rd, rd, split_low(imm >> 16)); |
aoqi@6880 | 2318 | dsll(rd, rd, 16); |
aoqi@6880 | 2319 | ori(rd, rd, split_low(imm)); |
aoqi@6880 | 2320 | } |
aoqi@6880 | 2321 | |
aoqi@6880 | 2322 | void MacroAssembler::li48(Register rd, long imm) { |
aoqi@6880 | 2323 | assert_not_delayed(); |
aoqi@6880 | 2324 | assert(is_simm16(imm >> 32), "Not a 48-bit address"); |
aoqi@6880 | 2325 | lui(rd, imm >> 32); |
aoqi@6880 | 2326 | ori(rd, rd, split_low(imm >> 16)); |
aoqi@6880 | 2327 | dsll(rd, rd, 16); |
aoqi@6880 | 2328 | ori(rd, rd, split_low(imm)); |
aoqi@6880 | 2329 | } |
aoqi@6880 | 2330 | #endif |
aoqi@6880 | 2331 | // NOTE: i dont push eax as i486. |
aoqi@6880 | 2332 | // the x86 save eax for it use eax as the jump register |
aoqi@6880 | 2333 | void MacroAssembler::verify_oop(Register reg, const char* s) { |
aoqi@6880 | 2334 | /* |
aoqi@6880 | 2335 | if (!VerifyOops) return; |
aoqi@6880 | 2336 | |
aoqi@6880 | 2337 | // Pass register number to verify_oop_subroutine |
aoqi@6880 | 2338 | char* b = new char[strlen(s) + 50]; |
aoqi@6880 | 2339 | sprintf(b, "verify_oop: %s: %s", reg->name(), s); |
aoqi@6880 | 2340 | push(rax); // save rax, |
aoqi@6880 | 2341 | push(reg); // pass register argument |
aoqi@6880 | 2342 | ExternalAddress buffer((address) b); |
aoqi@6880 | 2343 | // avoid using pushptr, as it modifies scratch registers |
aoqi@6880 | 2344 | // and our contract is not to modify anything |
aoqi@6880 | 2345 | movptr(rax, buffer.addr()); |
aoqi@6880 | 2346 | push(rax); |
aoqi@6880 | 2347 | // call indirectly to solve generation ordering problem |
aoqi@6880 | 2348 | movptr(rax, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address())); |
aoqi@6880 | 2349 | call(rax); |
aoqi@6880 | 2350 | */ |
aoqi@6880 | 2351 | if (!VerifyOops) return; |
aoqi@6880 | 2352 | const char * b = NULL; |
aoqi@6880 | 2353 | stringStream ss; |
aoqi@6880 | 2354 | ss.print("verify_oop: %s: %s", reg->name(), s); |
aoqi@6880 | 2355 | b = code_string(ss.as_string()); |
aoqi@6880 | 2356 | #ifdef _LP64 |
aoqi@6880 | 2357 | pushad(); |
aoqi@6880 | 2358 | move(A1, reg); |
aoqi@6880 | 2359 | li(A0, (long)b); |
aoqi@6880 | 2360 | li(AT, (long)StubRoutines::verify_oop_subroutine_entry_address()); |
aoqi@6880 | 2361 | ld(T9, AT, 0); |
aoqi@6880 | 2362 | jalr(T9); |
aoqi@6880 | 2363 | delayed()->nop(); |
aoqi@6880 | 2364 | popad(); |
aoqi@6880 | 2365 | #else |
aoqi@6880 | 2366 | // Pass register number to verify_oop_subroutine |
aoqi@6880 | 2367 | sw(T0, SP, - wordSize); |
aoqi@6880 | 2368 | sw(T1, SP, - 2*wordSize); |
aoqi@6880 | 2369 | sw(RA, SP, - 3*wordSize); |
aoqi@6880 | 2370 | sw(A0, SP ,- 4*wordSize); |
aoqi@6880 | 2371 | sw(A1, SP ,- 5*wordSize); |
aoqi@6880 | 2372 | sw(AT, SP ,- 6*wordSize); |
aoqi@6880 | 2373 | sw(T9, SP ,- 7*wordSize); |
aoqi@6880 | 2374 | addiu(SP, SP, - 7 * wordSize); |
aoqi@6880 | 2375 | move(A1, reg); |
aoqi@6880 | 2376 | li(A0, (long)b); |
aoqi@6880 | 2377 | // call indirectly to solve generation ordering problem |
aoqi@6880 | 2378 | li(AT, (long)StubRoutines::verify_oop_subroutine_entry_address()); |
aoqi@6880 | 2379 | lw(T9, AT, 0); |
aoqi@6880 | 2380 | jalr(T9); |
aoqi@6880 | 2381 | delayed()->nop(); |
aoqi@6880 | 2382 | lw(T0, SP, 6* wordSize); |
aoqi@6880 | 2383 | lw(T1, SP, 5* wordSize); |
aoqi@6880 | 2384 | lw(RA, SP, 4* wordSize); |
aoqi@6880 | 2385 | lw(A0, SP, 3* wordSize); |
aoqi@6880 | 2386 | lw(A1, SP, 2* wordSize); |
aoqi@6880 | 2387 | lw(AT, SP, 1* wordSize); |
aoqi@6880 | 2388 | lw(T9, SP, 0* wordSize); |
aoqi@6880 | 2389 | addiu(SP, SP, 7 * wordSize); |
aoqi@6880 | 2390 | #endif |
aoqi@6880 | 2391 | } |
aoqi@6880 | 2392 | |
aoqi@6880 | 2393 | |
aoqi@6880 | 2394 | void MacroAssembler::verify_oop_addr(Address addr, const char* s) { |
aoqi@6880 | 2395 | if (!VerifyOops) { |
aoqi@6880 | 2396 | nop(); |
aoqi@6880 | 2397 | return; |
aoqi@6880 | 2398 | } |
aoqi@6880 | 2399 | // Pass register number to verify_oop_subroutine |
aoqi@6880 | 2400 | const char * b = NULL; |
aoqi@6880 | 2401 | stringStream ss; |
aoqi@6880 | 2402 | ss.print("verify_oop_addr: %s", s); |
aoqi@6880 | 2403 | b = code_string(ss.as_string()); |
aoqi@6880 | 2404 | |
aoqi@6880 | 2405 | st_ptr(T0, SP, - wordSize); |
aoqi@6880 | 2406 | st_ptr(T1, SP, - 2*wordSize); |
aoqi@6880 | 2407 | st_ptr(RA, SP, - 3*wordSize); |
aoqi@6880 | 2408 | st_ptr(A0, SP, - 4*wordSize); |
aoqi@6880 | 2409 | st_ptr(A1, SP, - 5*wordSize); |
aoqi@6880 | 2410 | st_ptr(AT, SP, - 6*wordSize); |
aoqi@6880 | 2411 | st_ptr(T9, SP, - 7*wordSize); |
aoqi@6880 | 2412 | ld_ptr(A1, addr); // addr may use SP, so load from it before change SP |
aoqi@6880 | 2413 | addiu(SP, SP, - 7 * wordSize); |
aoqi@6880 | 2414 | |
aoqi@6880 | 2415 | li(A0, (long)b); |
aoqi@6880 | 2416 | // call indirectly to solve generation ordering problem |
aoqi@6880 | 2417 | li(AT, (long)StubRoutines::verify_oop_subroutine_entry_address()); |
aoqi@6880 | 2418 | ld_ptr(T9, AT, 0); |
aoqi@6880 | 2419 | jalr(T9); |
aoqi@6880 | 2420 | delayed()->nop(); |
aoqi@6880 | 2421 | ld_ptr(T0, SP, 6* wordSize); |
aoqi@6880 | 2422 | ld_ptr(T1, SP, 5* wordSize); |
aoqi@6880 | 2423 | ld_ptr(RA, SP, 4* wordSize); |
aoqi@6880 | 2424 | ld_ptr(A0, SP, 3* wordSize); |
aoqi@6880 | 2425 | ld_ptr(A1, SP, 2* wordSize); |
aoqi@6880 | 2426 | ld_ptr(AT, SP, 1* wordSize); |
aoqi@6880 | 2427 | ld_ptr(T9, SP, 0* wordSize); |
aoqi@6880 | 2428 | addiu(SP, SP, 7 * wordSize); |
aoqi@6880 | 2429 | } |
aoqi@6880 | 2430 | |
aoqi@6880 | 2431 | // used registers : T0, T1 |
aoqi@6880 | 2432 | void MacroAssembler::verify_oop_subroutine() { |
aoqi@6880 | 2433 | // RA: ra |
aoqi@6880 | 2434 | // A0: char* error message |
aoqi@6880 | 2435 | // A1: oop object to verify |
aoqi@6880 | 2436 | |
aoqi@6880 | 2437 | Label exit, error; |
aoqi@6880 | 2438 | // increment counter |
aoqi@6880 | 2439 | li(T0, (long)StubRoutines::verify_oop_count_addr()); |
aoqi@6880 | 2440 | lw(AT, T0, 0); |
aoqi@6880 | 2441 | #ifdef _LP64 |
aoqi@6880 | 2442 | daddi(AT, AT, 1); |
aoqi@6880 | 2443 | #else |
aoqi@6880 | 2444 | addi(AT, AT, 1); |
aoqi@6880 | 2445 | #endif |
aoqi@6880 | 2446 | sw(AT, T0, 0); |
aoqi@6880 | 2447 | |
aoqi@6880 | 2448 | // make sure object is 'reasonable' |
aoqi@6880 | 2449 | beq(A1, R0, exit); // if obj is NULL it is ok |
aoqi@6880 | 2450 | delayed()->nop(); |
aoqi@6880 | 2451 | |
aoqi@6880 | 2452 | // Check if the oop is in the right area of memory |
aoqi@6880 | 2453 | //const int oop_mask = Universe::verify_oop_mask(); |
aoqi@6880 | 2454 | //const int oop_bits = Universe::verify_oop_bits(); |
aoqi@6880 | 2455 | const uintptr_t oop_mask = Universe::verify_oop_mask(); |
aoqi@6880 | 2456 | const uintptr_t oop_bits = Universe::verify_oop_bits(); |
aoqi@6880 | 2457 | li(AT, oop_mask); |
aoqi@6880 | 2458 | andr(T0, A1, AT); |
aoqi@6880 | 2459 | li(AT, oop_bits); |
aoqi@6880 | 2460 | bne(T0, AT, error); |
aoqi@6880 | 2461 | delayed()->nop(); |
aoqi@6880 | 2462 | |
aoqi@6880 | 2463 | // make sure klass is 'reasonable' |
aoqi@6880 | 2464 | //add for compressedoops |
aoqi@6880 | 2465 | reinit_heapbase(); |
aoqi@6880 | 2466 | //add for compressedoops |
aoqi@6880 | 2467 | load_klass(T0, A1); |
aoqi@6880 | 2468 | beq(T0, R0, error); // if klass is NULL it is broken |
aoqi@6880 | 2469 | delayed()->nop(); |
aoqi@6880 | 2470 | #if 0 |
aoqi@6880 | 2471 | //FIXME:wuhui. |
aoqi@6880 | 2472 | // Check if the klass is in the right area of memory |
aoqi@6880 | 2473 | //const int klass_mask = Universe::verify_klass_mask(); |
aoqi@6880 | 2474 | //const int klass_bits = Universe::verify_klass_bits(); |
aoqi@6880 | 2475 | const uintptr_t klass_mask = Universe::verify_klass_mask(); |
aoqi@6880 | 2476 | const uintptr_t klass_bits = Universe::verify_klass_bits(); |
aoqi@6880 | 2477 | |
aoqi@6880 | 2478 | li(AT, klass_mask); |
aoqi@6880 | 2479 | andr(T1, T0, AT); |
aoqi@6880 | 2480 | li(AT, klass_bits); |
aoqi@6880 | 2481 | bne(T1, AT, error); |
aoqi@6880 | 2482 | delayed()->nop(); |
aoqi@6880 | 2483 | // make sure klass' klass is 'reasonable' |
aoqi@6880 | 2484 | //add for compressedoops |
aoqi@6880 | 2485 | load_klass(T0, T0); |
aoqi@6880 | 2486 | beq(T0, R0, error); // if klass' klass is NULL it is broken |
aoqi@6880 | 2487 | delayed()->nop(); |
aoqi@6880 | 2488 | |
aoqi@6880 | 2489 | li(AT, klass_mask); |
aoqi@6880 | 2490 | andr(T1, T0, AT); |
aoqi@6880 | 2491 | li(AT, klass_bits); |
aoqi@6880 | 2492 | bne(T1, AT, error); |
aoqi@6880 | 2493 | delayed()->nop(); // if klass not in right area of memory it is broken too. |
aoqi@6880 | 2494 | #endif |
aoqi@6880 | 2495 | // return if everything seems ok |
aoqi@6880 | 2496 | bind(exit); |
aoqi@6880 | 2497 | |
aoqi@6880 | 2498 | jr(RA); |
aoqi@6880 | 2499 | delayed()->nop(); |
aoqi@6880 | 2500 | |
aoqi@6880 | 2501 | // handle errors |
aoqi@6880 | 2502 | bind(error); |
aoqi@6880 | 2503 | pushad(); |
aoqi@6880 | 2504 | #ifndef _LP64 |
aoqi@6880 | 2505 | addi(SP, SP, (-1) * wordSize); |
aoqi@6880 | 2506 | #endif |
aoqi@6880 | 2507 | call(CAST_FROM_FN_PTR(address, MacroAssembler::debug), relocInfo::runtime_call_type); |
aoqi@6880 | 2508 | delayed()->nop(); |
aoqi@6880 | 2509 | #ifndef _LP64 |
aoqi@6880 | 2510 | addiu(SP, SP, 1 * wordSize); |
aoqi@6880 | 2511 | #endif |
aoqi@6880 | 2512 | popad(); |
aoqi@6880 | 2513 | jr(RA); |
aoqi@6880 | 2514 | delayed()->nop(); |
aoqi@6880 | 2515 | } |
aoqi@6880 | 2516 | |
aoqi@6880 | 2517 | void MacroAssembler::verify_tlab(Register t1, Register t2) { |
aoqi@6880 | 2518 | #ifdef ASSERT |
aoqi@6880 | 2519 | assert_different_registers(t1, t2, AT); |
aoqi@6880 | 2520 | if (UseTLAB && VerifyOops) { |
aoqi@6880 | 2521 | Label next, ok; |
aoqi@6880 | 2522 | |
aoqi@6880 | 2523 | get_thread(t1); |
aoqi@6880 | 2524 | |
aoqi@6880 | 2525 | ld_ptr(t2, t1, in_bytes(JavaThread::tlab_top_offset())); |
aoqi@6880 | 2526 | ld_ptr(AT, t1, in_bytes(JavaThread::tlab_start_offset())); |
aoqi@6880 | 2527 | sltu(AT, t2, AT); |
aoqi@6880 | 2528 | beq(AT, R0, next); |
aoqi@6880 | 2529 | delayed()->nop(); |
aoqi@6880 | 2530 | |
aoqi@6880 | 2531 | stop("assert(top >= start)"); |
aoqi@6880 | 2532 | |
aoqi@6880 | 2533 | bind(next); |
aoqi@6880 | 2534 | ld_ptr(AT, t1, in_bytes(JavaThread::tlab_end_offset())); |
aoqi@6880 | 2535 | sltu(AT, AT, t2); |
aoqi@6880 | 2536 | beq(AT, R0, ok); |
aoqi@6880 | 2537 | delayed()->nop(); |
aoqi@6880 | 2538 | |
aoqi@6880 | 2539 | stop("assert(top <= end)"); |
aoqi@6880 | 2540 | |
aoqi@6880 | 2541 | bind(ok); |
aoqi@6880 | 2542 | |
aoqi@6880 | 2543 | } |
aoqi@6880 | 2544 | #endif |
aoqi@6880 | 2545 | } |
aoqi@6880 | 2546 | RegisterOrConstant MacroAssembler::delayed_value_impl(intptr_t* delayed_value_addr, |
aoqi@6880 | 2547 | Register tmp, |
aoqi@6880 | 2548 | int offset) { |
aoqi@6880 | 2549 | intptr_t value = *delayed_value_addr; |
aoqi@6880 | 2550 | if (value != 0) |
aoqi@6880 | 2551 | return RegisterOrConstant(value + offset); |
aoqi@6880 | 2552 | AddressLiteral a(delayed_value_addr); |
aoqi@6880 | 2553 | // load indirectly to solve generation ordering problem |
aoqi@6880 | 2554 | //movptr(tmp, ExternalAddress((address) delayed_value_addr)); |
aoqi@6880 | 2555 | //ld(tmp, a); |
aoqi@6880 | 2556 | if (offset != 0) |
aoqi@6880 | 2557 | daddi(tmp,tmp, offset); |
aoqi@6880 | 2558 | |
aoqi@6880 | 2559 | return RegisterOrConstant(tmp); |
aoqi@6880 | 2560 | } |
aoqi@6880 | 2561 | |
aoqi@6880 | 2562 | void MacroAssembler::hswap(Register reg) { |
aoqi@6880 | 2563 | //short |
aoqi@6880 | 2564 | //andi(reg, reg, 0xffff); |
aoqi@6880 | 2565 | srl(AT, reg, 8); |
aoqi@6880 | 2566 | sll(reg, reg, 24); |
aoqi@6880 | 2567 | sra(reg, reg, 16); |
aoqi@6880 | 2568 | orr(reg, reg, AT); |
aoqi@6880 | 2569 | } |
aoqi@6880 | 2570 | |
aoqi@6880 | 2571 | void MacroAssembler::huswap(Register reg) { |
aoqi@6880 | 2572 | #ifdef _LP64 |
aoqi@6880 | 2573 | dsrl(AT, reg, 8); |
aoqi@6880 | 2574 | dsll(reg, reg, 24); |
aoqi@6880 | 2575 | dsrl(reg, reg, 16); |
aoqi@6880 | 2576 | orr(reg, reg, AT); |
aoqi@6880 | 2577 | andi(reg, reg, 0xffff); |
aoqi@6880 | 2578 | #else |
aoqi@6880 | 2579 | //andi(reg, reg, 0xffff); |
aoqi@6880 | 2580 | srl(AT, reg, 8); |
aoqi@6880 | 2581 | sll(reg, reg, 24); |
aoqi@6880 | 2582 | srl(reg, reg, 16); |
aoqi@6880 | 2583 | orr(reg, reg, AT); |
aoqi@6880 | 2584 | #endif |
aoqi@6880 | 2585 | } |
aoqi@6880 | 2586 | |
aoqi@6880 | 2587 | // something funny to do this will only one more register AT |
aoqi@6880 | 2588 | // 32 bits |
aoqi@6880 | 2589 | void MacroAssembler::swap(Register reg) { |
aoqi@6880 | 2590 | srl(AT, reg, 8); |
aoqi@6880 | 2591 | sll(reg, reg, 24); |
aoqi@6880 | 2592 | orr(reg, reg, AT); |
aoqi@6880 | 2593 | //reg : 4 1 2 3 |
aoqi@6880 | 2594 | srl(AT, AT, 16); |
aoqi@6880 | 2595 | xorr(AT, AT, reg); |
aoqi@6880 | 2596 | andi(AT, AT, 0xff); |
aoqi@6880 | 2597 | //AT : 0 0 0 1^3); |
aoqi@6880 | 2598 | xorr(reg, reg, AT); |
aoqi@6880 | 2599 | //reg : 4 1 2 1 |
aoqi@6880 | 2600 | sll(AT, AT, 16); |
aoqi@6880 | 2601 | xorr(reg, reg, AT); |
aoqi@6880 | 2602 | //reg : 4 3 2 1 |
aoqi@6880 | 2603 | } |
aoqi@6880 | 2604 | |
aoqi@6880 | 2605 | #ifdef _LP64 |
aoqi@6880 | 2606 | |
aoqi@6880 | 2607 | /* do 32-bit CAS using MIPS64 lld/scd |
aoqi@6880 | 2608 | |
aoqi@6880 | 2609 | Jin: cas_int should only compare 32-bits of the memory value. |
aoqi@6880 | 2610 | However, lld/scd will do 64-bit operation, which violates the intention of cas_int. |
aoqi@6880 | 2611 | To simulate a 32-bit atomic operation, the value loaded with LLD should be split into |
aoqi@6880 | 2612 | tow halves, and only the low-32 bits is compared. If equals, the low-32 bits of newval, |
aoqi@6880 | 2613 | plus the high-32 bits or memory value, are stored togethor with SCD. |
aoqi@6880 | 2614 | |
aoqi@6880 | 2615 | Example: |
aoqi@6880 | 2616 | |
aoqi@6880 | 2617 | double d = 3.1415926; |
aoqi@6880 | 2618 | System.err.println("hello" + d); |
aoqi@6880 | 2619 | |
aoqi@6880 | 2620 | sun.misc.FloatingDecimal$1.<init>() |
aoqi@6880 | 2621 | | |
aoqi@6880 | 2622 | `- java.util.concurrent.atomic.AtomicInteger::compareAndSet() |
aoqi@6880 | 2623 | |
aoqi@6880 | 2624 | 38 cas_int [a7a7|J] [a0|I] [a6|I] |
aoqi@6880 | 2625 | // a0: 0xffffffffe8ea9f63 pc: 0x55647f3354 |
aoqi@6880 | 2626 | // a6: 0x4ab325aa |
aoqi@6880 | 2627 | |
aoqi@6880 | 2628 | again: |
aoqi@6880 | 2629 | 0x00000055647f3c5c: lld at, 0x0(a7) ; 64-bit load, "0xe8ea9f63" |
aoqi@6880 | 2630 | |
aoqi@6880 | 2631 | 0x00000055647f3c60: sll t9, at, 0 ; t9: low-32 bits (sign extended) |
aoqi@6880 | 2632 | 0x00000055647f3c64: dsrl32 t8, at, 0 ; t8: high-32 bits |
aoqi@6880 | 2633 | 0x00000055647f3c68: dsll32 t8, t8, 0 |
aoqi@6880 | 2634 | 0x00000055647f3c6c: bne t9, a0, 0x00000055647f3c9c ; goto nequal |
aoqi@6880 | 2635 | 0x00000055647f3c70: sll zero, zero, 0 |
aoqi@6880 | 2636 | |
aoqi@6880 | 2637 | 0x00000055647f3c74: ori v1, zero, 0xffffffff ; v1: low-32 bits of newval (sign unextended) |
aoqi@6880 | 2638 | 0x00000055647f3c78: dsll v1, v1, 16 ; v1 = a6 & 0xFFFFFFFF; |
aoqi@6880 | 2639 | 0x00000055647f3c7c: ori v1, v1, 0xffffffff |
aoqi@6880 | 2640 | 0x00000055647f3c80: and v1, a6, v1 |
aoqi@6880 | 2641 | 0x00000055647f3c84: or at, t8, v1 |
aoqi@6880 | 2642 | 0x00000055647f3c88: scd at, 0x0(a7) |
aoqi@6880 | 2643 | 0x00000055647f3c8c: beq at, zero, 0x00000055647f3c5c ; goto again |
aoqi@6880 | 2644 | 0x00000055647f3c90: sll zero, zero, 0 |
aoqi@6880 | 2645 | 0x00000055647f3c94: beq zero, zero, 0x00000055647f45ac ; goto done |
aoqi@6880 | 2646 | 0x00000055647f3c98: sll zero, zero, 0 |
aoqi@6880 | 2647 | nequal: |
aoqi@6880 | 2648 | 0x00000055647f45a4: dadd a0, t9, zero |
aoqi@6880 | 2649 | 0x00000055647f45a8: dadd at, zero, zero |
aoqi@6880 | 2650 | done: |
aoqi@6880 | 2651 | */ |
aoqi@6880 | 2652 | |
aoqi@6880 | 2653 | void MacroAssembler::cmpxchg32(Register x_reg, Address dest, Register c_reg) { |
aoqi@6880 | 2654 | /* 2012/11/11 Jin: MIPS64 can use ll/sc for 32-bit atomic memory access */ |
aoqi@6880 | 2655 | Label done, again, nequal; |
aoqi@6880 | 2656 | |
aoqi@6880 | 2657 | bind(again); |
aoqi@6880 | 2658 | |
aoqi@6880 | 2659 | if(!Use3A2000) sync(); |
aoqi@6880 | 2660 | ll(AT, dest); |
aoqi@6880 | 2661 | bne(AT, c_reg, nequal); |
aoqi@6880 | 2662 | delayed()->nop(); |
aoqi@6880 | 2663 | |
aoqi@6880 | 2664 | move(AT, x_reg); |
aoqi@6880 | 2665 | sc(AT, dest); |
aoqi@6880 | 2666 | beq(AT, R0, again); |
aoqi@6880 | 2667 | delayed()->nop(); |
aoqi@6880 | 2668 | b(done); |
aoqi@6880 | 2669 | delayed()->nop(); |
aoqi@6880 | 2670 | |
aoqi@6880 | 2671 | // not xchged |
aoqi@6880 | 2672 | bind(nequal); |
aoqi@6880 | 2673 | sync(); |
aoqi@6880 | 2674 | move(c_reg, AT); |
aoqi@6880 | 2675 | move(AT, R0); |
aoqi@6880 | 2676 | |
aoqi@6880 | 2677 | bind(done); |
aoqi@6880 | 2678 | } |
aoqi@6880 | 2679 | #endif // cmpxchg32 |
aoqi@6880 | 2680 | |
aoqi@6880 | 2681 | void MacroAssembler::cmpxchg(Register x_reg, Address dest, Register c_reg) { |
aoqi@6880 | 2682 | Label done, again, nequal; |
aoqi@6880 | 2683 | |
aoqi@6880 | 2684 | bind(again); |
aoqi@6880 | 2685 | #ifdef _LP64 |
aoqi@6880 | 2686 | if(!Use3A2000) sync(); |
aoqi@6880 | 2687 | lld(AT, dest); |
aoqi@6880 | 2688 | #else |
aoqi@6880 | 2689 | if(!Use3A2000) sync(); |
aoqi@6880 | 2690 | ll(AT, dest); |
aoqi@6880 | 2691 | #endif |
aoqi@6880 | 2692 | bne(AT, c_reg, nequal); |
aoqi@6880 | 2693 | delayed()->nop(); |
aoqi@6880 | 2694 | |
aoqi@6880 | 2695 | move(AT, x_reg); |
aoqi@6880 | 2696 | #ifdef _LP64 |
aoqi@6880 | 2697 | scd(AT, dest); |
aoqi@6880 | 2698 | #else |
aoqi@6880 | 2699 | sc(AT, dest); |
aoqi@6880 | 2700 | #endif |
aoqi@6880 | 2701 | beq(AT, R0, again); |
aoqi@6880 | 2702 | delayed()->nop(); |
aoqi@6880 | 2703 | b(done); |
aoqi@6880 | 2704 | delayed()->nop(); |
aoqi@6880 | 2705 | |
aoqi@6880 | 2706 | // not xchged |
aoqi@6880 | 2707 | bind(nequal); |
aoqi@6880 | 2708 | sync(); |
aoqi@6880 | 2709 | move(c_reg, AT); |
aoqi@6880 | 2710 | move(AT, R0); |
aoqi@6880 | 2711 | |
aoqi@6880 | 2712 | bind(done); |
aoqi@6880 | 2713 | } |
aoqi@6880 | 2714 | |
aoqi@6880 | 2715 | void MacroAssembler::cmpxchg8(Register x_regLo, Register x_regHi, Address dest, Register c_regLo, Register c_regHi) { |
aoqi@6880 | 2716 | Label done, again, nequal; |
aoqi@6880 | 2717 | |
aoqi@6880 | 2718 | Register x_reg = x_regLo; |
aoqi@6880 | 2719 | dsll32(x_regHi, x_regHi, 0); |
aoqi@6880 | 2720 | dsll32(x_regLo, x_regLo, 0); |
aoqi@6880 | 2721 | dsrl32(x_regLo, x_regLo, 0); |
aoqi@6880 | 2722 | orr(x_reg, x_regLo, x_regHi); |
aoqi@6880 | 2723 | |
aoqi@6880 | 2724 | Register c_reg = c_regLo; |
aoqi@6880 | 2725 | dsll32(c_regHi, c_regHi, 0); |
aoqi@6880 | 2726 | dsll32(c_regLo, c_regLo, 0); |
aoqi@6880 | 2727 | dsrl32(c_regLo, c_regLo, 0); |
aoqi@6880 | 2728 | orr(c_reg, c_regLo, c_regHi); |
aoqi@6880 | 2729 | |
aoqi@6880 | 2730 | bind(again); |
aoqi@6880 | 2731 | |
aoqi@6880 | 2732 | if(!Use3A2000) sync(); |
aoqi@6880 | 2733 | lld(AT, dest); |
aoqi@6880 | 2734 | bne(AT, c_reg, nequal); |
aoqi@6880 | 2735 | delayed()->nop(); |
aoqi@6880 | 2736 | |
aoqi@6880 | 2737 | //move(AT, x_reg); |
aoqi@6880 | 2738 | dadd(AT, x_reg, R0); |
aoqi@6880 | 2739 | scd(AT, dest); |
aoqi@6880 | 2740 | beq(AT, R0, again); |
aoqi@6880 | 2741 | delayed()->nop(); |
aoqi@6880 | 2742 | b(done); |
aoqi@6880 | 2743 | delayed()->nop(); |
aoqi@6880 | 2744 | |
aoqi@6880 | 2745 | // not xchged |
aoqi@6880 | 2746 | bind(nequal); |
aoqi@6880 | 2747 | sync(); |
aoqi@6880 | 2748 | //move(c_reg, AT); |
aoqi@6880 | 2749 | //move(AT, R0); |
aoqi@6880 | 2750 | dadd(c_reg, AT, R0); |
aoqi@6880 | 2751 | dadd(AT, R0, R0); |
aoqi@6880 | 2752 | bind(done); |
aoqi@6880 | 2753 | } |
aoqi@6880 | 2754 | |
aoqi@6880 | 2755 | // be sure the three register is different |
aoqi@6880 | 2756 | void MacroAssembler::rem_s(FloatRegister fd, FloatRegister fs, FloatRegister ft, FloatRegister tmp) { |
aoqi@6880 | 2757 | assert_different_registers(tmp, fs, ft); |
aoqi@6880 | 2758 | div_s(tmp, fs, ft); |
aoqi@6880 | 2759 | trunc_l_s(tmp, tmp); |
aoqi@6880 | 2760 | cvt_s_l(tmp, tmp); |
aoqi@6880 | 2761 | mul_s(tmp, tmp, ft); |
aoqi@6880 | 2762 | sub_s(fd, fs, tmp); |
aoqi@6880 | 2763 | } |
aoqi@6880 | 2764 | |
aoqi@6880 | 2765 | // be sure the three register is different |
aoqi@6880 | 2766 | void MacroAssembler::rem_d(FloatRegister fd, FloatRegister fs, FloatRegister ft, FloatRegister tmp) { |
aoqi@6880 | 2767 | assert_different_registers(tmp, fs, ft); |
aoqi@6880 | 2768 | div_d(tmp, fs, ft); |
aoqi@6880 | 2769 | trunc_l_d(tmp, tmp); |
aoqi@6880 | 2770 | cvt_d_l(tmp, tmp); |
aoqi@6880 | 2771 | mul_d(tmp, tmp, ft); |
aoqi@6880 | 2772 | sub_d(fd, fs, tmp); |
aoqi@6880 | 2773 | } |
aoqi@6880 | 2774 | |
aoqi@6880 | 2775 | // Fast_Lock and Fast_Unlock used by C2 |
aoqi@6880 | 2776 | |
aoqi@6880 | 2777 | // Because the transitions from emitted code to the runtime |
aoqi@6880 | 2778 | // monitorenter/exit helper stubs are so slow it's critical that |
aoqi@6880 | 2779 | // we inline both the stack-locking fast-path and the inflated fast path. |
aoqi@6880 | 2780 | // |
aoqi@6880 | 2781 | // See also: cmpFastLock and cmpFastUnlock. |
aoqi@6880 | 2782 | // |
aoqi@6880 | 2783 | // What follows is a specialized inline transliteration of the code |
aoqi@6880 | 2784 | // in slow_enter() and slow_exit(). If we're concerned about I$ bloat |
aoqi@6880 | 2785 | // another option would be to emit TrySlowEnter and TrySlowExit methods |
aoqi@6880 | 2786 | // at startup-time. These methods would accept arguments as |
aoqi@6880 | 2787 | // (rax,=Obj, rbx=Self, rcx=box, rdx=Scratch) and return success-failure |
aoqi@6880 | 2788 | // indications in the icc.ZFlag. Fast_Lock and Fast_Unlock would simply |
aoqi@6880 | 2789 | // marshal the arguments and emit calls to TrySlowEnter and TrySlowExit. |
aoqi@6880 | 2790 | // In practice, however, the # of lock sites is bounded and is usually small. |
aoqi@6880 | 2791 | // Besides the call overhead, TrySlowEnter and TrySlowExit might suffer |
aoqi@6880 | 2792 | // if the processor uses simple bimodal branch predictors keyed by EIP |
aoqi@6880 | 2793 | // Since the helper routines would be called from multiple synchronization |
aoqi@6880 | 2794 | // sites. |
aoqi@6880 | 2795 | // |
aoqi@6880 | 2796 | // An even better approach would be write "MonitorEnter()" and "MonitorExit()" |
aoqi@6880 | 2797 | // in java - using j.u.c and unsafe - and just bind the lock and unlock sites |
aoqi@6880 | 2798 | // to those specialized methods. That'd give us a mostly platform-independent |
aoqi@6880 | 2799 | // implementation that the JITs could optimize and inline at their pleasure. |
aoqi@6880 | 2800 | // Done correctly, the only time we'd need to cross to native could would be |
aoqi@6880 | 2801 | // to park() or unpark() threads. We'd also need a few more unsafe operators |
aoqi@6880 | 2802 | // to (a) prevent compiler-JIT reordering of non-volatile accesses, and |
aoqi@6880 | 2803 | // (b) explicit barriers or fence operations. |
aoqi@6880 | 2804 | // |
aoqi@6880 | 2805 | // TODO: |
aoqi@6880 | 2806 | // |
aoqi@6880 | 2807 | // * Arrange for C2 to pass "Self" into Fast_Lock and Fast_Unlock in one of the registers (scr). |
aoqi@6880 | 2808 | // This avoids manifesting the Self pointer in the Fast_Lock and Fast_Unlock terminals. |
aoqi@6880 | 2809 | // Given TLAB allocation, Self is usually manifested in a register, so passing it into |
aoqi@6880 | 2810 | // the lock operators would typically be faster than reifying Self. |
aoqi@6880 | 2811 | // |
aoqi@6880 | 2812 | // * Ideally I'd define the primitives as: |
aoqi@6880 | 2813 | // fast_lock (nax Obj, nax box, EAX tmp, nax scr) where box, tmp and scr are KILLED. |
aoqi@6880 | 2814 | // fast_unlock (nax Obj, EAX box, nax tmp) where box and tmp are KILLED |
aoqi@6880 | 2815 | // Unfortunately ADLC bugs prevent us from expressing the ideal form. |
aoqi@6880 | 2816 | // Instead, we're stuck with a rather awkward and brittle register assignments below. |
aoqi@6880 | 2817 | // Furthermore the register assignments are overconstrained, possibly resulting in |
aoqi@6880 | 2818 | // sub-optimal code near the synchronization site. |
aoqi@6880 | 2819 | // |
aoqi@6880 | 2820 | // * Eliminate the sp-proximity tests and just use "== Self" tests instead. |
aoqi@6880 | 2821 | // Alternately, use a better sp-proximity test. |
aoqi@6880 | 2822 | // |
aoqi@6880 | 2823 | // * Currently ObjectMonitor._Owner can hold either an sp value or a (THREAD *) value. |
aoqi@6880 | 2824 | // Either one is sufficient to uniquely identify a thread. |
aoqi@6880 | 2825 | // TODO: eliminate use of sp in _owner and use get_thread(tr) instead. |
aoqi@6880 | 2826 | // |
aoqi@6880 | 2827 | // * Intrinsify notify() and notifyAll() for the common cases where the |
aoqi@6880 | 2828 | // object is locked by the calling thread but the waitlist is empty. |
aoqi@6880 | 2829 | // avoid the expensive JNI call to JVM_Notify() and JVM_NotifyAll(). |
aoqi@6880 | 2830 | // |
aoqi@6880 | 2831 | // * use jccb and jmpb instead of jcc and jmp to improve code density. |
aoqi@6880 | 2832 | // But beware of excessive branch density on AMD Opterons. |
aoqi@6880 | 2833 | // |
aoqi@6880 | 2834 | // * Both Fast_Lock and Fast_Unlock set the ICC.ZF to indicate success |
aoqi@6880 | 2835 | // or failure of the fast-path. If the fast-path fails then we pass |
aoqi@6880 | 2836 | // control to the slow-path, typically in C. In Fast_Lock and |
aoqi@6880 | 2837 | // Fast_Unlock we often branch to DONE_LABEL, just to find that C2 |
aoqi@6880 | 2838 | // will emit a conditional branch immediately after the node. |
aoqi@6880 | 2839 | // So we have branches to branches and lots of ICC.ZF games. |
aoqi@6880 | 2840 | // Instead, it might be better to have C2 pass a "FailureLabel" |
aoqi@6880 | 2841 | // into Fast_Lock and Fast_Unlock. In the case of success, control |
aoqi@6880 | 2842 | // will drop through the node. ICC.ZF is undefined at exit. |
aoqi@6880 | 2843 | // In the case of failure, the node will branch directly to the |
aoqi@6880 | 2844 | // FailureLabel |
aoqi@6880 | 2845 | |
aoqi@6880 | 2846 | |
aoqi@6880 | 2847 | // obj: object to lock |
aoqi@6880 | 2848 | // box: on-stack box address (displaced header location) - KILLED |
aoqi@6880 | 2849 | // rax,: tmp -- KILLED |
aoqi@6880 | 2850 | // scr: tmp -- KILLED |
aoqi@6880 | 2851 | void MacroAssembler::fast_lock(Register objReg, Register boxReg, Register tmpReg, Register scrReg) { |
aoqi@6880 | 2852 | |
aoqi@6880 | 2853 | // Ensure the register assignents are disjoint |
aoqi@6880 | 2854 | guarantee (objReg != boxReg, "") ; |
aoqi@6880 | 2855 | guarantee (objReg != tmpReg, "") ; |
aoqi@6880 | 2856 | guarantee (objReg != scrReg, "") ; |
aoqi@6880 | 2857 | guarantee (boxReg != tmpReg, "") ; |
aoqi@6880 | 2858 | guarantee (boxReg != scrReg, "") ; |
aoqi@6880 | 2859 | |
aoqi@6880 | 2860 | |
aoqi@6880 | 2861 | block_comment("FastLock"); |
aoqi@6880 | 2862 | /* |
aoqi@6880 | 2863 | move(AT, 0x0); |
aoqi@6880 | 2864 | return; |
aoqi@6880 | 2865 | */ |
aoqi@6880 | 2866 | if (PrintBiasedLockingStatistics) { |
aoqi@6880 | 2867 | push(tmpReg); |
aoqi@6880 | 2868 | atomic_inc32((address)BiasedLocking::total_entry_count_addr(), 1, AT, tmpReg); |
aoqi@6880 | 2869 | pop(tmpReg); |
aoqi@6880 | 2870 | } |
aoqi@6880 | 2871 | |
aoqi@6880 | 2872 | if (EmitSync & 1) { |
aoqi@6880 | 2873 | move(AT, 0x0); |
aoqi@6880 | 2874 | return; |
aoqi@6880 | 2875 | } else |
aoqi@6880 | 2876 | if (EmitSync & 2) { |
aoqi@6880 | 2877 | Label DONE_LABEL ; |
aoqi@6880 | 2878 | if (UseBiasedLocking) { |
aoqi@6880 | 2879 | // Note: tmpReg maps to the swap_reg argument and scrReg to the tmp_reg argument. |
aoqi@6880 | 2880 | biased_locking_enter(boxReg, objReg, tmpReg, scrReg, false, DONE_LABEL, NULL); |
aoqi@6880 | 2881 | } |
aoqi@6880 | 2882 | |
aoqi@6880 | 2883 | ld(tmpReg, Address(objReg, 0)) ; // fetch markword |
aoqi@6880 | 2884 | ori(tmpReg, tmpReg, 0x1); |
aoqi@6880 | 2885 | sd(tmpReg, Address(boxReg, 0)); // Anticipate successful CAS |
aoqi@6880 | 2886 | |
aoqi@6880 | 2887 | cmpxchg(boxReg, Address(objReg, 0), tmpReg); // Updates tmpReg |
aoqi@6880 | 2888 | bne(AT, R0, DONE_LABEL); |
aoqi@6880 | 2889 | delayed()->nop(); |
aoqi@6880 | 2890 | |
aoqi@6880 | 2891 | // Recursive locking |
aoqi@6880 | 2892 | dsubu(tmpReg, tmpReg, SP); |
aoqi@6880 | 2893 | li(AT, (7 - os::vm_page_size() )); |
aoqi@6880 | 2894 | andr(tmpReg, tmpReg, AT); |
aoqi@6880 | 2895 | sd(tmpReg, Address(boxReg, 0)); |
aoqi@6880 | 2896 | bind(DONE_LABEL) ; |
aoqi@6880 | 2897 | } else { |
aoqi@6880 | 2898 | // Possible cases that we'll encounter in fast_lock |
aoqi@6880 | 2899 | // ------------------------------------------------ |
aoqi@6880 | 2900 | // * Inflated |
aoqi@6880 | 2901 | // -- unlocked |
aoqi@6880 | 2902 | // -- Locked |
aoqi@6880 | 2903 | // = by self |
aoqi@6880 | 2904 | // = by other |
aoqi@6880 | 2905 | // * biased |
aoqi@6880 | 2906 | // -- by Self |
aoqi@6880 | 2907 | // -- by other |
aoqi@6880 | 2908 | // * neutral |
aoqi@6880 | 2909 | // * stack-locked |
aoqi@6880 | 2910 | // -- by self |
aoqi@6880 | 2911 | // = sp-proximity test hits |
aoqi@6880 | 2912 | // = sp-proximity test generates false-negative |
aoqi@6880 | 2913 | // -- by other |
aoqi@6880 | 2914 | // |
aoqi@6880 | 2915 | |
aoqi@6880 | 2916 | Label IsInflated, DONE_LABEL, PopDone ; |
aoqi@6880 | 2917 | |
aoqi@6880 | 2918 | // TODO: optimize away redundant LDs of obj->mark and improve the markword triage |
aoqi@6880 | 2919 | // order to reduce the number of conditional branches in the most common cases. |
aoqi@6880 | 2920 | // Beware -- there's a subtle invariant that fetch of the markword |
aoqi@6880 | 2921 | // at [FETCH], below, will never observe a biased encoding (*101b). |
aoqi@6880 | 2922 | // If this invariant is not held we risk exclusion (safety) failure. |
aoqi@6880 | 2923 | if (UseBiasedLocking && !UseOptoBiasInlining) { |
aoqi@6880 | 2924 | biased_locking_enter(boxReg, objReg, tmpReg, scrReg, false, DONE_LABEL, NULL); |
aoqi@6880 | 2925 | } |
aoqi@6880 | 2926 | |
aoqi@6880 | 2927 | ld(tmpReg, Address(objReg, 0)) ; //Fetch the markword of the object. |
aoqi@6880 | 2928 | andi(AT, tmpReg, markOopDesc::monitor_value); |
aoqi@6880 | 2929 | bne(AT, R0, IsInflated); // inflated vs stack-locked|neutral|bias |
aoqi@6880 | 2930 | delayed()->nop(); |
aoqi@6880 | 2931 | |
aoqi@6880 | 2932 | // Attempt stack-locking ... |
aoqi@6880 | 2933 | ori (tmpReg, tmpReg, markOopDesc::unlocked_value); |
aoqi@6880 | 2934 | sd(tmpReg, Address(boxReg, 0)); // Anticipate successful CAS |
aoqi@6880 | 2935 | //if (os::is_MP()) { |
aoqi@6880 | 2936 | // sync(); |
aoqi@6880 | 2937 | //} |
aoqi@6880 | 2938 | |
aoqi@6880 | 2939 | cmpxchg(boxReg, Address(objReg, 0), tmpReg); // Updates tmpReg |
aoqi@6880 | 2940 | //AT == 1: unlocked |
aoqi@6880 | 2941 | |
aoqi@6880 | 2942 | if (PrintBiasedLockingStatistics) { |
aoqi@6880 | 2943 | Label L; |
aoqi@6880 | 2944 | beq(AT, R0, L); |
aoqi@6880 | 2945 | delayed()->nop(); |
aoqi@6880 | 2946 | push(T0); |
aoqi@6880 | 2947 | push(T1); |
aoqi@6880 | 2948 | atomic_inc32((address)BiasedLocking::fast_path_entry_count_addr(), 1, T0, T1); |
aoqi@6880 | 2949 | pop(T1); |
aoqi@6880 | 2950 | pop(T0); |
aoqi@6880 | 2951 | bind(L); |
aoqi@6880 | 2952 | } |
aoqi@6880 | 2953 | bne(AT, R0, DONE_LABEL); |
aoqi@6880 | 2954 | delayed()->nop(); |
aoqi@6880 | 2955 | |
aoqi@6880 | 2956 | // Recursive locking |
aoqi@6880 | 2957 | // The object is stack-locked: markword contains stack pointer to BasicLock. |
aoqi@6880 | 2958 | // Locked by current thread if difference with current SP is less than one page. |
aoqi@6880 | 2959 | dsubu(tmpReg, tmpReg, SP); |
aoqi@6880 | 2960 | li(AT, 7 - os::vm_page_size() ); |
aoqi@6880 | 2961 | andr(tmpReg, tmpReg, AT); |
aoqi@6880 | 2962 | sd(tmpReg, Address(boxReg, 0)); |
aoqi@6880 | 2963 | if (PrintBiasedLockingStatistics) { |
aoqi@6880 | 2964 | Label L; |
aoqi@6880 | 2965 | // tmpReg == 0 => BiasedLocking::_fast_path_entry_count++ |
aoqi@6880 | 2966 | bne(tmpReg, R0, L); |
aoqi@6880 | 2967 | delayed()->nop(); |
aoqi@6880 | 2968 | push(T0); |
aoqi@6880 | 2969 | push(T1); |
aoqi@6880 | 2970 | atomic_inc32((address)BiasedLocking::fast_path_entry_count_addr(), 1, T0, T1); |
aoqi@6880 | 2971 | pop(T1); |
aoqi@6880 | 2972 | pop(T0); |
aoqi@6880 | 2973 | bind(L); |
aoqi@6880 | 2974 | } |
aoqi@6880 | 2975 | sltiu(AT, tmpReg, 1); /* AT = (tmpReg == 0) ? 1 : 0 */ |
aoqi@6880 | 2976 | |
aoqi@6880 | 2977 | b(DONE_LABEL) ; |
aoqi@6880 | 2978 | delayed()->nop(); |
aoqi@6880 | 2979 | |
aoqi@6880 | 2980 | bind(IsInflated) ; |
aoqi@6880 | 2981 | // The object's monitor m is unlocked iff m->owner == NULL, |
aoqi@6880 | 2982 | // otherwise m->owner may contain a thread or a stack address. |
aoqi@6880 | 2983 | |
aoqi@6880 | 2984 | // TODO: someday avoid the ST-before-CAS penalty by |
aoqi@6880 | 2985 | // relocating (deferring) the following ST. |
aoqi@6880 | 2986 | // We should also think about trying a CAS without having |
aoqi@6880 | 2987 | // fetched _owner. If the CAS is successful we may |
aoqi@6880 | 2988 | // avoid an RTO->RTS upgrade on the $line. |
aoqi@6880 | 2989 | // Without cast to int32_t a movptr will destroy r10 which is typically obj |
aoqi@6880 | 2990 | li(AT, (int32_t)intptr_t(markOopDesc::unused_mark())); |
aoqi@6880 | 2991 | sd(AT, Address(boxReg, 0)); |
aoqi@6880 | 2992 | |
aoqi@6880 | 2993 | move(boxReg, tmpReg) ; |
aoqi@6880 | 2994 | ld(tmpReg, Address(tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)) ; |
aoqi@6880 | 2995 | // if (m->owner != 0) => AT = 0, goto slow path. |
aoqi@6880 | 2996 | move(AT, R0); |
aoqi@6880 | 2997 | bne(tmpReg, R0, DONE_LABEL); |
aoqi@6880 | 2998 | delayed()->nop(); |
aoqi@6880 | 2999 | |
aoqi@6880 | 3000 | #ifndef OPT_THREAD |
aoqi@6880 | 3001 | get_thread (TREG) ; |
aoqi@6880 | 3002 | #endif |
aoqi@6880 | 3003 | // It's inflated and appears unlocked |
aoqi@6880 | 3004 | //if (os::is_MP()) { |
aoqi@6880 | 3005 | // sync(); |
aoqi@6880 | 3006 | //} |
aoqi@6880 | 3007 | cmpxchg(TREG, Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2), tmpReg) ; |
aoqi@6880 | 3008 | // Intentional fall-through into DONE_LABEL ... |
aoqi@6880 | 3009 | |
aoqi@6880 | 3010 | |
aoqi@6880 | 3011 | // DONE_LABEL is a hot target - we'd really like to place it at the |
aoqi@6880 | 3012 | // start of cache line by padding with NOPs. |
aoqi@6880 | 3013 | // See the AMD and Intel software optimization manuals for the |
aoqi@6880 | 3014 | // most efficient "long" NOP encodings. |
aoqi@6880 | 3015 | // Unfortunately none of our alignment mechanisms suffice. |
aoqi@6880 | 3016 | bind(DONE_LABEL); |
aoqi@6880 | 3017 | |
aoqi@6880 | 3018 | // At DONE_LABEL the AT is set as follows ... |
aoqi@6880 | 3019 | // Fast_Unlock uses the same protocol. |
aoqi@6880 | 3020 | // AT == 1 -> Success |
aoqi@6880 | 3021 | // AT == 0 -> Failure - force control through the slow-path |
aoqi@6880 | 3022 | |
aoqi@6880 | 3023 | // Avoid branch-to-branch on AMD processors |
aoqi@6880 | 3024 | // This appears to be superstition. |
aoqi@6880 | 3025 | if (EmitSync & 32) nop() ; |
aoqi@6880 | 3026 | |
aoqi@6880 | 3027 | } |
aoqi@6880 | 3028 | } |
aoqi@6880 | 3029 | |
aoqi@6880 | 3030 | // obj: object to unlock |
aoqi@6880 | 3031 | // box: box address (displaced header location), killed. Must be EAX. |
aoqi@6880 | 3032 | // rbx,: killed tmp; cannot be obj nor box. |
aoqi@6880 | 3033 | // |
aoqi@6880 | 3034 | // Some commentary on balanced locking: |
aoqi@6880 | 3035 | // |
aoqi@6880 | 3036 | // Fast_Lock and Fast_Unlock are emitted only for provably balanced lock sites. |
aoqi@6880 | 3037 | // Methods that don't have provably balanced locking are forced to run in the |
aoqi@6880 | 3038 | // interpreter - such methods won't be compiled to use fast_lock and fast_unlock. |
aoqi@6880 | 3039 | // The interpreter provides two properties: |
aoqi@6880 | 3040 | // I1: At return-time the interpreter automatically and quietly unlocks any |
aoqi@6880 | 3041 | // objects acquired the current activation (frame). Recall that the |
aoqi@6880 | 3042 | // interpreter maintains an on-stack list of locks currently held by |
aoqi@6880 | 3043 | // a frame. |
aoqi@6880 | 3044 | // I2: If a method attempts to unlock an object that is not held by the |
aoqi@6880 | 3045 | // the frame the interpreter throws IMSX. |
aoqi@6880 | 3046 | // |
aoqi@6880 | 3047 | // Lets say A(), which has provably balanced locking, acquires O and then calls B(). |
aoqi@6880 | 3048 | // B() doesn't have provably balanced locking so it runs in the interpreter. |
aoqi@6880 | 3049 | // Control returns to A() and A() unlocks O. By I1 and I2, above, we know that O |
aoqi@6880 | 3050 | // is still locked by A(). |
aoqi@6880 | 3051 | // |
aoqi@6880 | 3052 | // The only other source of unbalanced locking would be JNI. The "Java Native Interface: |
aoqi@6880 | 3053 | // Programmer's Guide and Specification" claims that an object locked by jni_monitorenter |
aoqi@6880 | 3054 | // should not be unlocked by "normal" java-level locking and vice-versa. The specification |
aoqi@6880 | 3055 | // doesn't specify what will occur if a program engages in such mixed-mode locking, however. |
aoqi@6880 | 3056 | |
aoqi@6880 | 3057 | void MacroAssembler::fast_unlock(Register objReg, Register boxReg, Register tmpReg) { |
aoqi@6880 | 3058 | |
aoqi@6880 | 3059 | guarantee (objReg != boxReg, "") ; |
aoqi@6880 | 3060 | guarantee (objReg != tmpReg, "") ; |
aoqi@6880 | 3061 | guarantee (boxReg != tmpReg, "") ; |
aoqi@6880 | 3062 | |
aoqi@6880 | 3063 | |
aoqi@6880 | 3064 | |
aoqi@6880 | 3065 | block_comment("FastUnlock"); |
aoqi@6880 | 3066 | |
aoqi@6880 | 3067 | |
aoqi@6880 | 3068 | if (EmitSync & 4) { |
aoqi@6880 | 3069 | // Disable - inhibit all inlining. Force control through the slow-path |
aoqi@6880 | 3070 | move(AT, 0x0); |
aoqi@6880 | 3071 | return; |
aoqi@6880 | 3072 | } else |
aoqi@6880 | 3073 | if (EmitSync & 8) { |
aoqi@6880 | 3074 | Label DONE_LABEL ; |
aoqi@6880 | 3075 | if (UseBiasedLocking) { |
aoqi@6880 | 3076 | biased_locking_exit(objReg, tmpReg, DONE_LABEL); |
aoqi@6880 | 3077 | } |
aoqi@6880 | 3078 | // classic stack-locking code ... |
aoqi@6880 | 3079 | ld(tmpReg, Address(boxReg, 0)) ; |
aoqi@6880 | 3080 | beq(tmpReg, R0, DONE_LABEL) ; |
aoqi@6880 | 3081 | move(AT, 0x1); // delay slot |
aoqi@6880 | 3082 | |
aoqi@6880 | 3083 | cmpxchg(tmpReg, Address(objReg, 0), boxReg); // Uses EAX which is box |
aoqi@6880 | 3084 | bind(DONE_LABEL); |
aoqi@6880 | 3085 | } else { |
aoqi@6880 | 3086 | Label DONE_LABEL, Stacked, CheckSucc, Inflated ; |
aoqi@6880 | 3087 | |
aoqi@6880 | 3088 | // Critically, the biased locking test must have precedence over |
aoqi@6880 | 3089 | // and appear before the (box->dhw == 0) recursive stack-lock test. |
aoqi@6880 | 3090 | if (UseBiasedLocking && !UseOptoBiasInlining) { |
aoqi@6880 | 3091 | biased_locking_exit(objReg, tmpReg, DONE_LABEL); |
aoqi@6880 | 3092 | } |
aoqi@6880 | 3093 | |
aoqi@6880 | 3094 | ld(AT, Address(boxReg, 0)) ; // Examine the displaced header |
aoqi@6880 | 3095 | beq(AT, R0, DONE_LABEL) ; // 0 indicates recursive stack-lock |
aoqi@6880 | 3096 | delayed()->daddiu(AT, R0, 0x1); |
aoqi@6880 | 3097 | |
aoqi@6880 | 3098 | ld(tmpReg, Address(objReg, 0)) ; // Examine the object's markword |
aoqi@6880 | 3099 | andi(AT, tmpReg, markOopDesc::monitor_value) ; // Inflated? |
aoqi@6880 | 3100 | beq(AT, R0, Stacked) ; // Inflated? |
aoqi@6880 | 3101 | delayed()->nop(); |
aoqi@6880 | 3102 | |
aoqi@6880 | 3103 | bind(Inflated) ; |
aoqi@6880 | 3104 | // It's inflated. |
aoqi@6880 | 3105 | // Despite our balanced locking property we still check that m->_owner == Self |
aoqi@6880 | 3106 | // as java routines or native JNI code called by this thread might |
aoqi@6880 | 3107 | // have released the lock. |
aoqi@6880 | 3108 | // Refer to the comments in synchronizer.cpp for how we might encode extra |
aoqi@6880 | 3109 | // state in _succ so we can avoid fetching EntryList|cxq. |
aoqi@6880 | 3110 | // |
aoqi@6880 | 3111 | // I'd like to add more cases in fast_lock() and fast_unlock() -- |
aoqi@6880 | 3112 | // such as recursive enter and exit -- but we have to be wary of |
aoqi@6880 | 3113 | // I$ bloat, T$ effects and BP$ effects. |
aoqi@6880 | 3114 | // |
aoqi@6880 | 3115 | // If there's no contention try a 1-0 exit. That is, exit without |
aoqi@6880 | 3116 | // a costly MEMBAR or CAS. See synchronizer.cpp for details on how |
aoqi@6880 | 3117 | // we detect and recover from the race that the 1-0 exit admits. |
aoqi@6880 | 3118 | // |
aoqi@6880 | 3119 | // Conceptually Fast_Unlock() must execute a STST|LDST "release" barrier |
aoqi@6880 | 3120 | // before it STs null into _owner, releasing the lock. Updates |
aoqi@6880 | 3121 | // to data protected by the critical section must be visible before |
aoqi@6880 | 3122 | // we drop the lock (and thus before any other thread could acquire |
aoqi@6880 | 3123 | // the lock and observe the fields protected by the lock). |
aoqi@6880 | 3124 | // IA32's memory-model is SPO, so STs are ordered with respect to |
aoqi@6880 | 3125 | // each other and there's no need for an explicit barrier (fence). |
aoqi@6880 | 3126 | // See also http://gee.cs.oswego.edu/dl/jmm/cookbook.html. |
aoqi@6880 | 3127 | #ifndef OPT_THREAD |
aoqi@6880 | 3128 | get_thread (TREG) ; |
aoqi@6880 | 3129 | #endif |
aoqi@6880 | 3130 | |
aoqi@6880 | 3131 | // It's inflated |
aoqi@6880 | 3132 | ld(boxReg, Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)) ; |
aoqi@6880 | 3133 | xorr(boxReg, boxReg, TREG); |
aoqi@6880 | 3134 | |
aoqi@6880 | 3135 | ld(AT, Address (tmpReg, ObjectMonitor::recursions_offset_in_bytes()-2)) ; |
aoqi@6880 | 3136 | orr(boxReg, boxReg, AT); |
aoqi@6880 | 3137 | |
aoqi@6880 | 3138 | move(AT, R0); |
aoqi@6880 | 3139 | bne(boxReg, R0, DONE_LABEL); |
aoqi@6880 | 3140 | delayed()->nop(); |
aoqi@6880 | 3141 | |
aoqi@6880 | 3142 | ld(boxReg, Address (tmpReg, ObjectMonitor::cxq_offset_in_bytes()-2)) ; |
aoqi@6880 | 3143 | ld(AT, Address (tmpReg, ObjectMonitor::EntryList_offset_in_bytes()-2)) ; |
aoqi@6880 | 3144 | orr(boxReg, boxReg, AT); |
aoqi@6880 | 3145 | |
aoqi@6880 | 3146 | move(AT, R0); |
aoqi@6880 | 3147 | bne(boxReg, R0, DONE_LABEL); |
aoqi@6880 | 3148 | delayed()->nop(); |
aoqi@6880 | 3149 | |
aoqi@6880 | 3150 | sync(); |
aoqi@6880 | 3151 | sd(R0, Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)) ; |
aoqi@6880 | 3152 | move(AT, 0x1); |
aoqi@6880 | 3153 | b(DONE_LABEL); |
aoqi@6880 | 3154 | delayed()->nop(); |
aoqi@6880 | 3155 | |
aoqi@6880 | 3156 | bind (Stacked); |
aoqi@6880 | 3157 | ld(tmpReg, Address(boxReg, 0)) ; |
aoqi@6880 | 3158 | //if (os::is_MP()) { sync(); } |
aoqi@6880 | 3159 | cmpxchg(tmpReg, Address(objReg, 0), boxReg); |
aoqi@6880 | 3160 | |
aoqi@6880 | 3161 | if (EmitSync & 65536) { |
aoqi@6880 | 3162 | bind (CheckSucc); |
aoqi@6880 | 3163 | } |
aoqi@6880 | 3164 | |
aoqi@6880 | 3165 | bind(DONE_LABEL); |
aoqi@6880 | 3166 | |
aoqi@6880 | 3167 | // Avoid branch to branch on AMD processors |
aoqi@6880 | 3168 | if (EmitSync & 32768) { nop() ; } |
aoqi@6880 | 3169 | } |
aoqi@6880 | 3170 | } |
aoqi@6880 | 3171 | |
aoqi@6880 | 3172 | void MacroAssembler::align(int modulus) { |
aoqi@6880 | 3173 | while (offset() % modulus != 0) nop(); |
aoqi@6880 | 3174 | } |
aoqi@6880 | 3175 | |
aoqi@6880 | 3176 | |
aoqi@6880 | 3177 | void MacroAssembler::verify_FPU(int stack_depth, const char* s) { |
aoqi@6880 | 3178 | //Unimplemented(); |
aoqi@6880 | 3179 | } |
aoqi@6880 | 3180 | |
aoqi@6880 | 3181 | #ifdef _LP64 |
aoqi@6880 | 3182 | Register caller_saved_registers[] = {AT, V0, V1, A0, A1, A2, A3, A4, A5, A6, A7, T0, T1, T2, T3, T8, T9, GP, RA, FP}; |
aoqi@6880 | 3183 | |
aoqi@6880 | 3184 | /* FIXME: Jin: In MIPS64, F0~23 are all caller-saved registers */ |
aoqi@6880 | 3185 | FloatRegister caller_saved_fpu_registers[] = {F0, F12, F13}; |
aoqi@6880 | 3186 | #else |
aoqi@6880 | 3187 | Register caller_saved_registers[] = {AT, V0, V1, A0, A1, A2, A3, T4, T5, T6, T7, T0, T1, T2, T3, T8, T9, GP, RA, FP}; |
aoqi@6880 | 3188 | |
aoqi@6880 | 3189 | Register caller_saved_fpu_registers[] = {}; |
aoqi@6880 | 3190 | #endif |
aoqi@6880 | 3191 | |
aoqi@6880 | 3192 | //We preserve all caller-saved register |
aoqi@6880 | 3193 | void MacroAssembler::pushad(){ |
aoqi@6880 | 3194 | int i; |
aoqi@6880 | 3195 | |
aoqi@6880 | 3196 | /* Fixed-point registers */ |
aoqi@6880 | 3197 | int len = sizeof(caller_saved_registers) / sizeof(caller_saved_registers[0]); |
aoqi@6880 | 3198 | daddi(SP, SP, -1 * len * wordSize); |
aoqi@6880 | 3199 | for (i = 0; i < len; i++) |
aoqi@6880 | 3200 | { |
aoqi@6880 | 3201 | #ifdef _LP64 |
aoqi@6880 | 3202 | sd(caller_saved_registers[i], SP, (len - i - 1) * wordSize); |
aoqi@6880 | 3203 | #else |
aoqi@6880 | 3204 | sw(caller_saved_registers[i], SP, (len - i - 1) * wordSize); |
aoqi@6880 | 3205 | #endif |
aoqi@6880 | 3206 | } |
aoqi@6880 | 3207 | |
aoqi@6880 | 3208 | /* Floating-point registers */ |
aoqi@6880 | 3209 | len = sizeof(caller_saved_fpu_registers) / sizeof(caller_saved_fpu_registers[0]); |
aoqi@6880 | 3210 | daddi(SP, SP, -1 * len * wordSize); |
aoqi@6880 | 3211 | for (i = 0; i < len; i++) |
aoqi@6880 | 3212 | { |
aoqi@6880 | 3213 | #ifdef _LP64 |
aoqi@6880 | 3214 | sdc1(caller_saved_fpu_registers[i], SP, (len - i - 1) * wordSize); |
aoqi@6880 | 3215 | #else |
aoqi@6880 | 3216 | swc1(caller_saved_fpu_registers[i], SP, (len - i - 1) * wordSize); |
aoqi@6880 | 3217 | #endif |
aoqi@6880 | 3218 | } |
aoqi@6880 | 3219 | }; |
aoqi@6880 | 3220 | |
aoqi@6880 | 3221 | void MacroAssembler::popad(){ |
aoqi@6880 | 3222 | int i; |
aoqi@6880 | 3223 | |
aoqi@6880 | 3224 | /* Floating-point registers */ |
aoqi@6880 | 3225 | int len = sizeof(caller_saved_fpu_registers) / sizeof(caller_saved_fpu_registers[0]); |
aoqi@6880 | 3226 | for (i = 0; i < len; i++) |
aoqi@6880 | 3227 | { |
aoqi@6880 | 3228 | #ifdef _LP64 |
aoqi@6880 | 3229 | ldc1(caller_saved_fpu_registers[i], SP, (len - i - 1) * wordSize); |
aoqi@6880 | 3230 | #else |
aoqi@6880 | 3231 | lwc1(caller_saved_fpu_registers[i], SP, (len - i - 1) * wordSize); |
aoqi@6880 | 3232 | #endif |
aoqi@6880 | 3233 | } |
aoqi@6880 | 3234 | daddi(SP, SP, len * wordSize); |
aoqi@6880 | 3235 | |
aoqi@6880 | 3236 | /* Fixed-point registers */ |
aoqi@6880 | 3237 | len = sizeof(caller_saved_registers) / sizeof(caller_saved_registers[0]); |
aoqi@6880 | 3238 | for (i = 0; i < len; i++) |
aoqi@6880 | 3239 | { |
aoqi@6880 | 3240 | #ifdef _LP64 |
aoqi@6880 | 3241 | ld(caller_saved_registers[i], SP, (len - i - 1) * wordSize); |
aoqi@6880 | 3242 | #else |
aoqi@6880 | 3243 | lw(caller_saved_registers[i], SP, (len - i - 1) * wordSize); |
aoqi@6880 | 3244 | #endif |
aoqi@6880 | 3245 | } |
aoqi@6880 | 3246 | daddi(SP, SP, len * wordSize); |
aoqi@6880 | 3247 | }; |
aoqi@6880 | 3248 | |
aoqi@6880 | 3249 | void MacroAssembler::push2(Register reg1, Register reg2) { |
aoqi@6880 | 3250 | #ifdef _LP64 |
aoqi@6880 | 3251 | daddi(SP, SP, -16); |
aoqi@6880 | 3252 | sd(reg2, SP, 0); |
aoqi@6880 | 3253 | sd(reg1, SP, 8); |
aoqi@6880 | 3254 | #else |
aoqi@6880 | 3255 | addi(SP, SP, -8); |
aoqi@6880 | 3256 | sw(reg2, SP, 0); |
aoqi@6880 | 3257 | sw(reg1, SP, 4); |
aoqi@6880 | 3258 | #endif |
aoqi@6880 | 3259 | } |
aoqi@6880 | 3260 | |
aoqi@6880 | 3261 | void MacroAssembler::pop2(Register reg1, Register reg2) { |
aoqi@6880 | 3262 | #ifdef _LP64 |
aoqi@6880 | 3263 | ld(reg1, SP, 0); |
aoqi@6880 | 3264 | ld(reg2, SP, 8); |
aoqi@6880 | 3265 | daddi(SP, SP, 16); |
aoqi@6880 | 3266 | #else |
aoqi@6880 | 3267 | lw(reg1, SP, 0); |
aoqi@6880 | 3268 | lw(reg2, SP, 4); |
aoqi@6880 | 3269 | addi(SP, SP, 8); |
aoqi@6880 | 3270 | #endif |
aoqi@6880 | 3271 | } |
aoqi@6880 | 3272 | |
aoqi@6880 | 3273 | //for UseCompressedOops Option |
aoqi@6880 | 3274 | void MacroAssembler::load_klass(Register dst, Register src) { |
aoqi@6880 | 3275 | #ifdef _LP64 |
aoqi@6880 | 3276 | if(UseCompressedClassPointers){ |
aoqi@6880 | 3277 | lwu(dst, Address(src, oopDesc::klass_offset_in_bytes())); |
aoqi@6880 | 3278 | decode_klass_not_null(dst); |
aoqi@6880 | 3279 | } else |
aoqi@6880 | 3280 | #endif |
aoqi@6880 | 3281 | ld(dst, src, oopDesc::klass_offset_in_bytes()); |
aoqi@6880 | 3282 | } |
aoqi@6880 | 3283 | |
aoqi@6880 | 3284 | void MacroAssembler::store_klass(Register dst, Register src) { |
aoqi@6880 | 3285 | #ifdef _LP64 |
aoqi@6880 | 3286 | if(UseCompressedClassPointers){ |
aoqi@6880 | 3287 | encode_klass_not_null(src); |
aoqi@6880 | 3288 | sw(src, dst, oopDesc::klass_offset_in_bytes()); |
aoqi@6880 | 3289 | } else { |
aoqi@6880 | 3290 | #endif |
aoqi@6880 | 3291 | sd(src, dst, oopDesc::klass_offset_in_bytes()); |
aoqi@6880 | 3292 | } |
aoqi@6880 | 3293 | } |
aoqi@6880 | 3294 | |
aoqi@6880 | 3295 | void MacroAssembler::load_prototype_header(Register dst, Register src) { |
aoqi@6880 | 3296 | load_klass(dst, src); |
aoqi@6880 | 3297 | ld(dst, Address(dst, Klass::prototype_header_offset())); |
aoqi@6880 | 3298 | } |
aoqi@6880 | 3299 | |
aoqi@6880 | 3300 | #ifdef _LP64 |
aoqi@6880 | 3301 | void MacroAssembler::store_klass_gap(Register dst, Register src) { |
aoqi@6880 | 3302 | if (UseCompressedClassPointers) { |
aoqi@6880 | 3303 | sw(src, dst, oopDesc::klass_gap_offset_in_bytes()); |
aoqi@6880 | 3304 | } |
aoqi@6880 | 3305 | } |
aoqi@6880 | 3306 | |
aoqi@6880 | 3307 | void MacroAssembler::load_heap_oop(Register dst, Address src) { |
aoqi@6880 | 3308 | if(UseCompressedOops){ |
aoqi@6880 | 3309 | lwu(dst, src); |
aoqi@6880 | 3310 | decode_heap_oop(dst); |
aoqi@6880 | 3311 | } else{ |
aoqi@6880 | 3312 | ld(dst, src); |
aoqi@6880 | 3313 | } |
aoqi@6880 | 3314 | } |
aoqi@6880 | 3315 | |
aoqi@6880 | 3316 | void MacroAssembler::store_heap_oop(Address dst, Register src){ |
aoqi@6880 | 3317 | if(UseCompressedOops){ |
aoqi@6880 | 3318 | assert(!dst.uses(src), "not enough registers"); |
aoqi@6880 | 3319 | encode_heap_oop(src); |
aoqi@6880 | 3320 | sw(src, dst); |
aoqi@6880 | 3321 | } else{ |
aoqi@6880 | 3322 | sd(src, dst); |
aoqi@6880 | 3323 | } |
aoqi@6880 | 3324 | } |
aoqi@6880 | 3325 | |
fujie@8001 | 3326 | void MacroAssembler::store_heap_oop_null(Address dst){ |
fujie@8001 | 3327 | if(UseCompressedOops){ |
fujie@8001 | 3328 | sw(R0, dst); |
fujie@8001 | 3329 | } else{ |
fujie@8001 | 3330 | sd(R0, dst); |
fujie@8001 | 3331 | } |
fujie@8001 | 3332 | } |
fujie@8001 | 3333 | |
aoqi@6880 | 3334 | #ifdef ASSERT |
aoqi@6880 | 3335 | void MacroAssembler::verify_heapbase(const char* msg) { |
aoqi@6880 | 3336 | assert (UseCompressedOops || UseCompressedClassPointers, "should be compressed"); |
aoqi@6880 | 3337 | assert (Universe::heap() != NULL, "java heap should be initialized"); |
aoqi@6880 | 3338 | } |
aoqi@6880 | 3339 | #endif |
aoqi@6880 | 3340 | |
aoqi@6880 | 3341 | |
aoqi@6880 | 3342 | // Algorithm must match oop.inline.hpp encode_heap_oop. |
aoqi@6880 | 3343 | void MacroAssembler::encode_heap_oop(Register r) { |
aoqi@6880 | 3344 | #ifdef ASSERT |
aoqi@6880 | 3345 | verify_heapbase("MacroAssembler::encode_heap_oop:heap base corrupted?"); |
aoqi@6880 | 3346 | #endif |
aoqi@6880 | 3347 | verify_oop(r, "broken oop in encode_heap_oop"); |
aoqi@6880 | 3348 | if (Universe::narrow_oop_base() == NULL) { |
aoqi@6880 | 3349 | if (Universe::narrow_oop_shift() != 0) { |
aoqi@6880 | 3350 | assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); |
aoqi@6880 | 3351 | shr(r, LogMinObjAlignmentInBytes); |
aoqi@6880 | 3352 | } |
aoqi@6880 | 3353 | return; |
aoqi@6880 | 3354 | } |
aoqi@6880 | 3355 | |
aoqi@6880 | 3356 | movz(r, S5_heapbase, r); |
aoqi@6880 | 3357 | dsub(r, r, S5_heapbase); |
aoqi@6880 | 3358 | if (Universe::narrow_oop_shift() != 0) { |
aoqi@6880 | 3359 | assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); |
aoqi@6880 | 3360 | shr(r, LogMinObjAlignmentInBytes); |
aoqi@6880 | 3361 | } |
aoqi@6880 | 3362 | } |
aoqi@6880 | 3363 | |
aoqi@6880 | 3364 | void MacroAssembler::encode_heap_oop(Register dst, Register src) { |
aoqi@6880 | 3365 | #ifdef ASSERT |
aoqi@6880 | 3366 | verify_heapbase("MacroAssembler::encode_heap_oop:heap base corrupted?"); |
aoqi@6880 | 3367 | #endif |
aoqi@6880 | 3368 | verify_oop(src, "broken oop in encode_heap_oop"); |
aoqi@6880 | 3369 | if (Universe::narrow_oop_base() == NULL) { |
aoqi@6880 | 3370 | if (Universe::narrow_oop_shift() != 0) { |
aoqi@6880 | 3371 | assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); |
aoqi@6880 | 3372 | dsrl(dst, src, LogMinObjAlignmentInBytes); |
aoqi@6880 | 3373 | } else { |
aoqi@6880 | 3374 | if (dst != src) move(dst, src); |
aoqi@6880 | 3375 | } |
aoqi@6880 | 3376 | } else { |
aoqi@6880 | 3377 | if (dst == src) { |
aoqi@6880 | 3378 | movz(dst, S5_heapbase, dst); |
aoqi@6880 | 3379 | dsub(dst, dst, S5_heapbase); |
aoqi@6880 | 3380 | if (Universe::narrow_oop_shift() != 0) { |
aoqi@6880 | 3381 | assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); |
aoqi@6880 | 3382 | shr(dst, LogMinObjAlignmentInBytes); |
aoqi@6880 | 3383 | } |
aoqi@6880 | 3384 | } else { |
aoqi@6880 | 3385 | dsub(dst, src, S5_heapbase); |
aoqi@6880 | 3386 | if (Universe::narrow_oop_shift() != 0) { |
aoqi@6880 | 3387 | assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); |
aoqi@6880 | 3388 | shr(dst, LogMinObjAlignmentInBytes); |
aoqi@6880 | 3389 | } |
aoqi@6880 | 3390 | movz(dst, R0, src); |
aoqi@6880 | 3391 | } |
aoqi@6880 | 3392 | } |
aoqi@6880 | 3393 | } |
aoqi@6880 | 3394 | |
aoqi@6880 | 3395 | void MacroAssembler::encode_heap_oop_not_null(Register r) { |
aoqi@6880 | 3396 | assert (UseCompressedOops, "should be compressed"); |
aoqi@6880 | 3397 | #ifdef ASSERT |
aoqi@6880 | 3398 | if (CheckCompressedOops) { |
aoqi@6880 | 3399 | Label ok; |
aoqi@6880 | 3400 | bne(r, R0, ok); |
aoqi@6880 | 3401 | delayed()->nop(); |
aoqi@6880 | 3402 | stop("null oop passed to encode_heap_oop_not_null"); |
aoqi@6880 | 3403 | bind(ok); |
aoqi@6880 | 3404 | } |
aoqi@6880 | 3405 | #endif |
aoqi@6880 | 3406 | verify_oop(r, "broken oop in encode_heap_oop_not_null"); |
aoqi@6880 | 3407 | if (Universe::narrow_oop_base() != NULL) { |
aoqi@6880 | 3408 | dsub(r, r, S5_heapbase); |
aoqi@6880 | 3409 | } |
aoqi@6880 | 3410 | if (Universe::narrow_oop_shift() != 0) { |
aoqi@6880 | 3411 | assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); |
aoqi@6880 | 3412 | shr(r, LogMinObjAlignmentInBytes); |
aoqi@6880 | 3413 | } |
aoqi@6880 | 3414 | |
aoqi@6880 | 3415 | } |
aoqi@6880 | 3416 | |
aoqi@6880 | 3417 | void MacroAssembler::encode_heap_oop_not_null(Register dst, Register src) { |
aoqi@6880 | 3418 | assert (UseCompressedOops, "should be compressed"); |
aoqi@6880 | 3419 | #ifdef ASSERT |
aoqi@6880 | 3420 | if (CheckCompressedOops) { |
aoqi@6880 | 3421 | Label ok; |
aoqi@6880 | 3422 | bne(src, R0, ok); |
aoqi@6880 | 3423 | delayed()->nop(); |
aoqi@6880 | 3424 | stop("null oop passed to encode_heap_oop_not_null2"); |
aoqi@6880 | 3425 | bind(ok); |
aoqi@6880 | 3426 | } |
aoqi@6880 | 3427 | #endif |
aoqi@6880 | 3428 | verify_oop(src, "broken oop in encode_heap_oop_not_null2"); |
aoqi@6880 | 3429 | |
aoqi@6880 | 3430 | if (Universe::narrow_oop_base() != NULL) { |
aoqi@6880 | 3431 | dsub(dst, src, S5_heapbase); |
aoqi@6880 | 3432 | if (Universe::narrow_oop_shift() != 0) { |
aoqi@6880 | 3433 | assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); |
aoqi@6880 | 3434 | shr(dst, LogMinObjAlignmentInBytes); |
aoqi@6880 | 3435 | } |
aoqi@6880 | 3436 | } else { |
aoqi@6880 | 3437 | if (Universe::narrow_oop_shift() != 0) { |
aoqi@6880 | 3438 | assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); |
aoqi@6880 | 3439 | dsrl(dst, src, LogMinObjAlignmentInBytes); |
aoqi@6880 | 3440 | } else { |
aoqi@6880 | 3441 | if (dst != src) move(dst, src); |
aoqi@6880 | 3442 | } |
aoqi@6880 | 3443 | } |
aoqi@6880 | 3444 | } |
aoqi@6880 | 3445 | |
aoqi@6880 | 3446 | void MacroAssembler::decode_heap_oop(Register r) { |
aoqi@6880 | 3447 | #ifdef ASSERT |
aoqi@6880 | 3448 | verify_heapbase("MacroAssembler::decode_heap_oop corrupted?"); |
aoqi@6880 | 3449 | #endif |
aoqi@6880 | 3450 | if (Universe::narrow_oop_base() == NULL) { |
aoqi@6880 | 3451 | if (Universe::narrow_oop_shift() != 0) { |
aoqi@6880 | 3452 | assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); |
aoqi@6880 | 3453 | shl(r, LogMinObjAlignmentInBytes); |
aoqi@6880 | 3454 | } |
aoqi@6880 | 3455 | } else { |
aoqi@6880 | 3456 | move(AT, r); |
aoqi@6880 | 3457 | if (Universe::narrow_oop_shift() != 0) { |
aoqi@6880 | 3458 | assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); |
aoqi@6880 | 3459 | shl(r, LogMinObjAlignmentInBytes); |
aoqi@6880 | 3460 | } |
aoqi@6880 | 3461 | dadd(r, r, S5_heapbase); |
aoqi@6880 | 3462 | movz(r, R0, AT); |
aoqi@6880 | 3463 | } |
aoqi@6880 | 3464 | verify_oop(r, "broken oop in decode_heap_oop"); |
aoqi@6880 | 3465 | } |
aoqi@6880 | 3466 | |
aoqi@6880 | 3467 | void MacroAssembler::decode_heap_oop(Register dst, Register src) { |
aoqi@6880 | 3468 | #ifdef ASSERT |
aoqi@6880 | 3469 | verify_heapbase("MacroAssembler::decode_heap_oop corrupted?"); |
aoqi@6880 | 3470 | #endif |
aoqi@6880 | 3471 | if (Universe::narrow_oop_base() == NULL) { |
aoqi@6880 | 3472 | if (Universe::narrow_oop_shift() != 0) { |
aoqi@6880 | 3473 | assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); |
aoqi@6880 | 3474 | if (dst != src) nop(); // DON'T DELETE THIS GUY. |
aoqi@6880 | 3475 | dsll(dst, src, LogMinObjAlignmentInBytes); |
aoqi@6880 | 3476 | } else { |
aoqi@6880 | 3477 | if (dst != src) move(dst, src); |
aoqi@6880 | 3478 | } |
aoqi@6880 | 3479 | } else { |
aoqi@6880 | 3480 | if (dst == src) { |
aoqi@6880 | 3481 | move(AT, dst); |
aoqi@6880 | 3482 | if (Universe::narrow_oop_shift() != 0) { |
aoqi@6880 | 3483 | assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); |
aoqi@6880 | 3484 | shl(dst, LogMinObjAlignmentInBytes); |
aoqi@6880 | 3485 | } |
aoqi@6880 | 3486 | dadd(dst, dst, S5_heapbase); |
aoqi@6880 | 3487 | movz(dst, R0, AT); |
aoqi@6880 | 3488 | } else { |
aoqi@6880 | 3489 | if (Universe::narrow_oop_shift() != 0) { |
aoqi@6880 | 3490 | assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); |
aoqi@6880 | 3491 | dsll(dst, src, LogMinObjAlignmentInBytes); |
aoqi@6880 | 3492 | daddu(dst, dst, S5_heapbase); |
aoqi@6880 | 3493 | } else { |
aoqi@6880 | 3494 | daddu(dst, src, S5_heapbase); |
aoqi@6880 | 3495 | } |
aoqi@6880 | 3496 | movz(dst, R0, src); |
aoqi@6880 | 3497 | } |
aoqi@6880 | 3498 | } |
aoqi@6880 | 3499 | verify_oop(dst, "broken oop in decode_heap_oop"); |
aoqi@6880 | 3500 | } |
aoqi@6880 | 3501 | |
aoqi@6880 | 3502 | void MacroAssembler::decode_heap_oop_not_null(Register r) { |
aoqi@6880 | 3503 | // Note: it will change flags |
aoqi@6880 | 3504 | assert (UseCompressedOops, "should only be used for compressed headers"); |
aoqi@6880 | 3505 | assert (Universe::heap() != NULL, "java heap should be initialized"); |
aoqi@6880 | 3506 | // Cannot assert, unverified entry point counts instructions (see .ad file) |
aoqi@6880 | 3507 | // vtableStubs also counts instructions in pd_code_size_limit. |
aoqi@6880 | 3508 | // Also do not verify_oop as this is called by verify_oop. |
aoqi@6880 | 3509 | if (Universe::narrow_oop_shift() != 0) { |
aoqi@6880 | 3510 | assert(LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); |
aoqi@6880 | 3511 | shl(r, LogMinObjAlignmentInBytes); |
aoqi@6880 | 3512 | if (Universe::narrow_oop_base() != NULL) { |
aoqi@6880 | 3513 | daddu(r, r, S5_heapbase); |
aoqi@6880 | 3514 | } |
aoqi@6880 | 3515 | } else { |
aoqi@6880 | 3516 | assert (Universe::narrow_oop_base() == NULL, "sanity"); |
aoqi@6880 | 3517 | } |
aoqi@6880 | 3518 | } |
aoqi@6880 | 3519 | |
aoqi@6880 | 3520 | void MacroAssembler::decode_heap_oop_not_null(Register dst, Register src) { |
aoqi@6880 | 3521 | assert (UseCompressedOops, "should only be used for compressed headers"); |
aoqi@6880 | 3522 | assert (Universe::heap() != NULL, "java heap should be initialized"); |
aoqi@6880 | 3523 | |
aoqi@6880 | 3524 | // Cannot assert, unverified entry point counts instructions (see .ad file) |
aoqi@6880 | 3525 | // vtableStubs also counts instructions in pd_code_size_limit. |
aoqi@6880 | 3526 | // Also do not verify_oop as this is called by verify_oop. |
aoqi@6880 | 3527 | //lea(dst, Address(S5_heapbase, src, Address::times_8, 0)); |
aoqi@6880 | 3528 | if (Universe::narrow_oop_shift() != 0) { |
aoqi@6880 | 3529 | assert(LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); |
aoqi@6880 | 3530 | if (LogMinObjAlignmentInBytes == Address::times_8) { |
aoqi@6880 | 3531 | dsll(dst, src, LogMinObjAlignmentInBytes); |
aoqi@6880 | 3532 | daddu(dst, dst, S5_heapbase); |
aoqi@6880 | 3533 | } else { |
aoqi@6880 | 3534 | dsll(dst, src, LogMinObjAlignmentInBytes); |
aoqi@6880 | 3535 | if (Universe::narrow_oop_base() != NULL) { |
aoqi@6880 | 3536 | daddu(dst, dst, S5_heapbase); |
aoqi@6880 | 3537 | } |
aoqi@6880 | 3538 | } |
aoqi@6880 | 3539 | } else { |
aoqi@6880 | 3540 | assert (Universe::narrow_oop_base() == NULL, "sanity"); |
aoqi@6880 | 3541 | if (dst != src) { |
aoqi@6880 | 3542 | move(dst, src); |
aoqi@6880 | 3543 | } |
aoqi@6880 | 3544 | } |
aoqi@6880 | 3545 | } |
aoqi@6880 | 3546 | |
aoqi@6880 | 3547 | void MacroAssembler::encode_klass_not_null(Register r) { |
aoqi@6880 | 3548 | if (Universe::narrow_klass_base() != NULL) { |
aoqi@6880 | 3549 | assert(r != AT, "Encoding a klass in AT"); |
aoqi@6880 | 3550 | set64(AT, (int64_t)Universe::narrow_klass_base()); |
aoqi@6880 | 3551 | dsub(r, r, AT); |
aoqi@6880 | 3552 | } |
aoqi@6880 | 3553 | if (Universe::narrow_klass_shift() != 0) { |
aoqi@6880 | 3554 | assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong"); |
aoqi@6880 | 3555 | shr(r, LogKlassAlignmentInBytes); |
aoqi@6880 | 3556 | } |
aoqi@6880 | 3557 | // Not neccessary for MIPS at all. |
aoqi@6880 | 3558 | //if (Universe::narrow_klass_base() != NULL) { |
aoqi@6880 | 3559 | // reinit_heapbase(); |
aoqi@6880 | 3560 | //} |
aoqi@6880 | 3561 | } |
aoqi@6880 | 3562 | |
aoqi@6880 | 3563 | void MacroAssembler::encode_klass_not_null(Register dst, Register src) { |
aoqi@6880 | 3564 | if (dst == src) { |
aoqi@6880 | 3565 | encode_klass_not_null(src); |
aoqi@6880 | 3566 | } else { |
aoqi@6880 | 3567 | if (Universe::narrow_klass_base() != NULL) { |
aoqi@6880 | 3568 | set64(dst, (int64_t)Universe::narrow_klass_base()); |
aoqi@6880 | 3569 | dsub(dst, src, dst); |
aoqi@6880 | 3570 | if (Universe::narrow_klass_shift() != 0) { |
aoqi@6880 | 3571 | assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong"); |
aoqi@6880 | 3572 | shr(dst, LogKlassAlignmentInBytes); |
aoqi@6880 | 3573 | } |
aoqi@6880 | 3574 | } else { |
aoqi@6880 | 3575 | if (Universe::narrow_klass_shift() != 0) { |
aoqi@6880 | 3576 | assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong"); |
aoqi@6880 | 3577 | dsrl(dst, src, LogKlassAlignmentInBytes); |
aoqi@6880 | 3578 | } else { |
aoqi@6880 | 3579 | move(dst, src); |
aoqi@6880 | 3580 | } |
aoqi@6880 | 3581 | } |
aoqi@6880 | 3582 | } |
aoqi@6880 | 3583 | } |
aoqi@6880 | 3584 | |
aoqi@6880 | 3585 | // Function instr_size_for_decode_klass_not_null() counts the instructions |
aoqi@6880 | 3586 | // generated by decode_klass_not_null(register r) and reinit_heapbase(), |
aoqi@6880 | 3587 | // when (Universe::heap() != NULL). Hence, if the instructions they |
aoqi@6880 | 3588 | // generate change, then this method needs to be updated. |
aoqi@6880 | 3589 | int MacroAssembler::instr_size_for_decode_klass_not_null() { |
aoqi@6880 | 3590 | assert (UseCompressedClassPointers, "only for compressed klass ptrs"); |
aoqi@6880 | 3591 | if (Universe::narrow_klass_base() != NULL) { |
aoqi@6880 | 3592 | // mov64 + addq + shlq? + mov64 (for reinit_heapbase()). |
aoqi@6880 | 3593 | return (Universe::narrow_klass_shift() == 0 ? 4 * 9 : 4 * 10); |
aoqi@6880 | 3594 | } else { |
aoqi@6880 | 3595 | // longest load decode klass function, mov64, leaq |
aoqi@6880 | 3596 | return (Universe::narrow_klass_shift() == 0 ? 4 * 0 : 4 * 1); |
aoqi@6880 | 3597 | } |
aoqi@6880 | 3598 | } |
aoqi@6880 | 3599 | |
aoqi@6880 | 3600 | void MacroAssembler::decode_klass_not_null(Register r) { |
aoqi@6880 | 3601 | assert (UseCompressedClassPointers, "should only be used for compressed headers"); |
aoqi@6880 | 3602 | assert(r != AT, "Decoding a klass in AT"); |
aoqi@6880 | 3603 | // Cannot assert, unverified entry point counts instructions (see .ad file) |
aoqi@6880 | 3604 | // vtableStubs also counts instructions in pd_code_size_limit. |
aoqi@6880 | 3605 | // Also do not verify_oop as this is called by verify_oop. |
aoqi@6880 | 3606 | if (Universe::narrow_klass_shift() != 0) { |
aoqi@6880 | 3607 | assert(LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong"); |
aoqi@6880 | 3608 | shl(r, LogKlassAlignmentInBytes); |
aoqi@6880 | 3609 | } |
aoqi@6880 | 3610 | if (Universe::narrow_klass_base() != NULL) { |
aoqi@6880 | 3611 | set64(AT, (int64_t)Universe::narrow_klass_base()); |
aoqi@6880 | 3612 | daddu(r, r, AT); |
aoqi@6880 | 3613 | //Not neccessary for MIPS at all. |
aoqi@6880 | 3614 | //reinit_heapbase(); |
aoqi@6880 | 3615 | } |
aoqi@6880 | 3616 | } |
aoqi@6880 | 3617 | |
aoqi@6880 | 3618 | void MacroAssembler::decode_klass_not_null(Register dst, Register src) { |
aoqi@6880 | 3619 | assert (UseCompressedClassPointers, "should only be used for compressed headers"); |
aoqi@6880 | 3620 | |
aoqi@6880 | 3621 | if (dst == src) { |
aoqi@6880 | 3622 | decode_klass_not_null(dst); |
aoqi@6880 | 3623 | } else { |
aoqi@6880 | 3624 | // Cannot assert, unverified entry point counts instructions (see .ad file) |
aoqi@6880 | 3625 | // vtableStubs also counts instructions in pd_code_size_limit. |
aoqi@6880 | 3626 | // Also do not verify_oop as this is called by verify_oop. |
aoqi@6880 | 3627 | set64(dst, (int64_t)Universe::narrow_klass_base()); |
aoqi@6880 | 3628 | if (Universe::narrow_klass_shift() != 0) { |
aoqi@6880 | 3629 | assert(LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong"); |
aoqi@6880 | 3630 | assert(LogKlassAlignmentInBytes == Address::times_8, "klass not aligned on 64bits?"); |
aoqi@6880 | 3631 | dsll(AT, src, Address::times_8); |
aoqi@6880 | 3632 | daddu(dst, dst, AT); |
aoqi@6880 | 3633 | } else { |
aoqi@6880 | 3634 | daddu(dst, src, dst); |
aoqi@6880 | 3635 | } |
aoqi@6880 | 3636 | } |
aoqi@6880 | 3637 | } |
aoqi@6880 | 3638 | |
aoqi@6880 | 3639 | void MacroAssembler::incrementl(Register reg, int value) { |
aoqi@6880 | 3640 | if (value == min_jint) { |
aoqi@6880 | 3641 | move(AT, value); |
aoqi@6880 | 3642 | LP64_ONLY(addu32(reg, reg, AT)) NOT_LP64(addu(reg, reg, AT)); |
aoqi@6880 | 3643 | return; |
aoqi@6880 | 3644 | } |
aoqi@6880 | 3645 | if (value < 0) { decrementl(reg, -value); return; } |
aoqi@6880 | 3646 | if (value == 0) { ; return; } |
aoqi@6880 | 3647 | |
aoqi@6880 | 3648 | if(Assembler::is_simm16(value)) { |
aoqi@6880 | 3649 | NOT_LP64(addiu(reg, reg, value)); |
aoqi@6880 | 3650 | LP64_ONLY(move(AT, value); addu32(reg, reg, AT)); |
aoqi@6880 | 3651 | } else { |
aoqi@6880 | 3652 | move(AT, value); |
aoqi@6880 | 3653 | LP64_ONLY(addu32(reg, reg, AT)) NOT_LP64(addu(reg, reg, AT)); |
aoqi@6880 | 3654 | } |
aoqi@6880 | 3655 | } |
aoqi@6880 | 3656 | |
aoqi@6880 | 3657 | void MacroAssembler::decrementl(Register reg, int value) { |
aoqi@6880 | 3658 | if (value == min_jint) { |
aoqi@6880 | 3659 | move(AT, value); |
aoqi@6880 | 3660 | LP64_ONLY(subu32(reg, reg, AT)) NOT_LP64(subu(reg, reg, AT)); |
aoqi@6880 | 3661 | return; |
aoqi@6880 | 3662 | } |
aoqi@6880 | 3663 | if (value < 0) { incrementl(reg, -value); return; } |
aoqi@6880 | 3664 | if (value == 0) { ; return; } |
aoqi@6880 | 3665 | |
aoqi@6880 | 3666 | if(Assembler::is_simm16(value)) { |
aoqi@6880 | 3667 | NOT_LP64(addiu(reg, reg, -value)); |
aoqi@6880 | 3668 | LP64_ONLY(move(AT, value); subu32(reg, reg, AT)); |
aoqi@6880 | 3669 | } else { |
aoqi@6880 | 3670 | move(AT, value); |
aoqi@6880 | 3671 | LP64_ONLY(subu32(reg, reg, AT)) NOT_LP64(subu(reg, reg, AT)); |
aoqi@6880 | 3672 | } |
aoqi@6880 | 3673 | } |
aoqi@6880 | 3674 | |
aoqi@6880 | 3675 | void MacroAssembler::reinit_heapbase() { |
aoqi@6880 | 3676 | if (UseCompressedOops || UseCompressedClassPointers) { |
aoqi@6880 | 3677 | if (Universe::heap() != NULL) { |
aoqi@6880 | 3678 | if (Universe::narrow_oop_base() == NULL) { |
aoqi@6880 | 3679 | move(S5_heapbase, R0); |
aoqi@6880 | 3680 | } else { |
aoqi@6880 | 3681 | set64(S5_heapbase, (int64_t)Universe::narrow_ptrs_base()); |
aoqi@6880 | 3682 | } |
aoqi@6880 | 3683 | } else { |
aoqi@6880 | 3684 | set64(S5_heapbase, (intptr_t)Universe::narrow_ptrs_base_addr()); |
aoqi@6880 | 3685 | ld(S5_heapbase, S5_heapbase, 0); |
aoqi@6880 | 3686 | } |
aoqi@6880 | 3687 | } |
aoqi@6880 | 3688 | } |
aoqi@6880 | 3689 | #endif // _LP64 |
aoqi@6880 | 3690 | |
aoqi@6880 | 3691 | void MacroAssembler::check_klass_subtype(Register sub_klass, |
aoqi@6880 | 3692 | Register super_klass, |
aoqi@6880 | 3693 | Register temp_reg, |
aoqi@6880 | 3694 | Label& L_success) { |
aoqi@6880 | 3695 | //implement ind gen_subtype_check |
aoqi@6880 | 3696 | Label L_failure; |
aoqi@6880 | 3697 | check_klass_subtype_fast_path(sub_klass, super_klass, temp_reg, &L_success, &L_failure, NULL); |
aoqi@6880 | 3698 | check_klass_subtype_slow_path(sub_klass, super_klass, temp_reg, noreg, &L_success, NULL); |
aoqi@6880 | 3699 | bind(L_failure); |
aoqi@6880 | 3700 | } |
aoqi@6880 | 3701 | |
aoqi@6880 | 3702 | SkipIfEqual::SkipIfEqual( |
aoqi@6880 | 3703 | MacroAssembler* masm, const bool* flag_addr, bool value) { |
aoqi@6880 | 3704 | _masm = masm; |
aoqi@6880 | 3705 | _masm->li(AT, (address)flag_addr); |
aoqi@6880 | 3706 | _masm->lb(AT,AT,0); |
aoqi@6880 | 3707 | _masm->addi(AT,AT,-value); |
aoqi@6880 | 3708 | _masm->beq(AT,R0,_label); |
aoqi@6880 | 3709 | _masm->delayed()->nop(); |
aoqi@6880 | 3710 | } |
aoqi@6880 | 3711 | void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass, |
aoqi@6880 | 3712 | Register super_klass, |
aoqi@6880 | 3713 | Register temp_reg, |
aoqi@6880 | 3714 | Label* L_success, |
aoqi@6880 | 3715 | Label* L_failure, |
aoqi@6880 | 3716 | Label* L_slow_path, |
aoqi@6880 | 3717 | RegisterOrConstant super_check_offset) { |
aoqi@6880 | 3718 | assert_different_registers(sub_klass, super_klass, temp_reg); |
aoqi@6880 | 3719 | bool must_load_sco = (super_check_offset.constant_or_zero() == -1); |
aoqi@6880 | 3720 | if (super_check_offset.is_register()) { |
aoqi@6880 | 3721 | assert_different_registers(sub_klass, super_klass, |
aoqi@6880 | 3722 | super_check_offset.as_register()); |
aoqi@6880 | 3723 | } else if (must_load_sco) { |
aoqi@6880 | 3724 | assert(temp_reg != noreg, "supply either a temp or a register offset"); |
aoqi@6880 | 3725 | } |
aoqi@6880 | 3726 | |
aoqi@6880 | 3727 | Label L_fallthrough; |
aoqi@6880 | 3728 | int label_nulls = 0; |
aoqi@6880 | 3729 | if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; } |
aoqi@6880 | 3730 | if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; } |
aoqi@6880 | 3731 | if (L_slow_path == NULL) { L_slow_path = &L_fallthrough; label_nulls++; } |
aoqi@6880 | 3732 | assert(label_nulls <= 1, "at most one NULL in the batch"); |
aoqi@6880 | 3733 | |
aoqi@6880 | 3734 | int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); |
aoqi@6880 | 3735 | int sco_offset = in_bytes(Klass::super_check_offset_offset()); |
aoqi@6880 | 3736 | // If the pointers are equal, we are done (e.g., String[] elements). |
aoqi@6880 | 3737 | // This self-check enables sharing of secondary supertype arrays among |
aoqi@6880 | 3738 | // non-primary types such as array-of-interface. Otherwise, each such |
aoqi@6880 | 3739 | // type would need its own customized SSA. |
aoqi@6880 | 3740 | // We move this check to the front of the fast path because many |
aoqi@6880 | 3741 | // type checks are in fact trivially successful in this manner, |
aoqi@6880 | 3742 | // so we get a nicely predicted branch right at the start of the check. |
aoqi@6880 | 3743 | //cmpptr(sub_klass, super_klass); |
aoqi@6880 | 3744 | //local_jcc(Assembler::equal, *L_success); |
aoqi@6880 | 3745 | beq(sub_klass, super_klass, *L_success); |
aoqi@6880 | 3746 | delayed()->nop(); |
aoqi@6880 | 3747 | // Check the supertype display: |
aoqi@6880 | 3748 | if (must_load_sco) { |
aoqi@6880 | 3749 | // Positive movl does right thing on LP64. |
aoqi@6880 | 3750 | lwu(temp_reg, super_klass, sco_offset); |
aoqi@6880 | 3751 | super_check_offset = RegisterOrConstant(temp_reg); |
aoqi@6880 | 3752 | } |
aoqi@6880 | 3753 | dsll(AT, super_check_offset.register_or_noreg(), Address::times_1); |
aoqi@6880 | 3754 | daddu(AT, sub_klass, AT); |
aoqi@6880 | 3755 | ld(AT, AT, super_check_offset.constant_or_zero()*Address::times_1); |
aoqi@6880 | 3756 | |
aoqi@6880 | 3757 | // This check has worked decisively for primary supers. |
aoqi@6880 | 3758 | // Secondary supers are sought in the super_cache ('super_cache_addr'). |
aoqi@6880 | 3759 | // (Secondary supers are interfaces and very deeply nested subtypes.) |
aoqi@6880 | 3760 | // This works in the same check above because of a tricky aliasing |
aoqi@6880 | 3761 | // between the super_cache and the primary super display elements. |
aoqi@6880 | 3762 | // (The 'super_check_addr' can address either, as the case requires.) |
aoqi@6880 | 3763 | // Note that the cache is updated below if it does not help us find |
aoqi@6880 | 3764 | // what we need immediately. |
aoqi@6880 | 3765 | // So if it was a primary super, we can just fail immediately. |
aoqi@6880 | 3766 | // Otherwise, it's the slow path for us (no success at this point). |
aoqi@6880 | 3767 | |
aoqi@6880 | 3768 | if (super_check_offset.is_register()) { |
aoqi@6880 | 3769 | beq(super_klass, AT, *L_success); |
aoqi@6880 | 3770 | delayed()->nop(); |
aoqi@6880 | 3771 | addi(AT, super_check_offset.as_register(), -sc_offset); |
aoqi@6880 | 3772 | if (L_failure == &L_fallthrough) { |
aoqi@6880 | 3773 | beq(AT, R0, *L_slow_path); |
aoqi@6880 | 3774 | delayed()->nop(); |
aoqi@6880 | 3775 | } else { |
aoqi@6880 | 3776 | bne(AT, R0, *L_failure); |
aoqi@6880 | 3777 | delayed()->nop(); |
aoqi@6880 | 3778 | b(*L_slow_path); |
aoqi@6880 | 3779 | delayed()->nop(); |
aoqi@6880 | 3780 | } |
aoqi@6880 | 3781 | } else if (super_check_offset.as_constant() == sc_offset) { |
aoqi@6880 | 3782 | // Need a slow path; fast failure is impossible. |
aoqi@6880 | 3783 | if (L_slow_path == &L_fallthrough) { |
aoqi@6880 | 3784 | beq(super_klass, AT, *L_success); |
aoqi@6880 | 3785 | delayed()->nop(); |
aoqi@6880 | 3786 | } else { |
aoqi@6880 | 3787 | bne(super_klass, AT, *L_slow_path); |
aoqi@6880 | 3788 | delayed()->nop(); |
aoqi@6880 | 3789 | b(*L_success); |
aoqi@6880 | 3790 | delayed()->nop(); |
aoqi@6880 | 3791 | } |
aoqi@6880 | 3792 | } else { |
aoqi@6880 | 3793 | // No slow path; it's a fast decision. |
aoqi@6880 | 3794 | if (L_failure == &L_fallthrough) { |
aoqi@6880 | 3795 | beq(super_klass, AT, *L_success); |
aoqi@6880 | 3796 | delayed()->nop(); |
aoqi@6880 | 3797 | } else { |
aoqi@6880 | 3798 | bne(super_klass, AT, *L_failure); |
aoqi@6880 | 3799 | delayed()->nop(); |
aoqi@6880 | 3800 | b(*L_success); |
aoqi@6880 | 3801 | delayed()->nop(); |
aoqi@6880 | 3802 | } |
aoqi@6880 | 3803 | } |
aoqi@6880 | 3804 | |
aoqi@6880 | 3805 | bind(L_fallthrough); |
aoqi@6880 | 3806 | |
aoqi@6880 | 3807 | } |
aoqi@6880 | 3808 | |
aoqi@6880 | 3809 | |
aoqi@6880 | 3810 | void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass, |
aoqi@6880 | 3811 | Register super_klass, |
aoqi@6880 | 3812 | Register temp_reg, |
aoqi@6880 | 3813 | Register temp2_reg, |
aoqi@6880 | 3814 | Label* L_success, |
aoqi@6880 | 3815 | Label* L_failure, |
aoqi@6880 | 3816 | bool set_cond_codes) { |
aoqi@6880 | 3817 | assert_different_registers(sub_klass, super_klass, temp_reg); |
aoqi@6880 | 3818 | if (temp2_reg != noreg) |
aoqi@6880 | 3819 | assert_different_registers(sub_klass, super_klass, temp_reg, temp2_reg); |
aoqi@6880 | 3820 | else |
aoqi@6880 | 3821 | temp2_reg = T9; |
aoqi@6880 | 3822 | #define IS_A_TEMP(reg) ((reg) == temp_reg || (reg) == temp2_reg) |
aoqi@6880 | 3823 | |
aoqi@6880 | 3824 | Label L_fallthrough; |
aoqi@6880 | 3825 | int label_nulls = 0; |
aoqi@6880 | 3826 | if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; } |
aoqi@6880 | 3827 | if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; } |
aoqi@6880 | 3828 | assert(label_nulls <= 1, "at most one NULL in the batch"); |
aoqi@6880 | 3829 | |
aoqi@6880 | 3830 | // a couple of useful fields in sub_klass: |
aoqi@6880 | 3831 | int ss_offset = in_bytes(Klass::secondary_supers_offset()); |
aoqi@6880 | 3832 | int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); |
aoqi@6880 | 3833 | Address secondary_supers_addr(sub_klass, ss_offset); |
aoqi@6880 | 3834 | Address super_cache_addr( sub_klass, sc_offset); |
aoqi@6880 | 3835 | |
aoqi@6880 | 3836 | // Do a linear scan of the secondary super-klass chain. |
aoqi@6880 | 3837 | // This code is rarely used, so simplicity is a virtue here. |
aoqi@6880 | 3838 | // The repne_scan instruction uses fixed registers, which we must spill. |
aoqi@6880 | 3839 | // Don't worry too much about pre-existing connections with the input regs. |
aoqi@6880 | 3840 | |
aoqi@6880 | 3841 | #if 0 |
aoqi@6880 | 3842 | assert(sub_klass != T9, "killed reg"); // killed by mov(rax, super) |
aoqi@6880 | 3843 | assert(sub_klass != T1, "killed reg"); // killed by lea(rcx, &pst_counter) |
aoqi@6880 | 3844 | #endif |
aoqi@6880 | 3845 | |
aoqi@6880 | 3846 | // Get super_klass value into rax (even if it was in rdi or rcx). |
aoqi@6880 | 3847 | #ifndef PRODUCT |
aoqi@6880 | 3848 | int* pst_counter = &SharedRuntime::_partial_subtype_ctr; |
aoqi@6880 | 3849 | ExternalAddress pst_counter_addr((address) pst_counter); |
aoqi@6880 | 3850 | NOT_LP64( incrementl(pst_counter_addr) ); |
aoqi@6880 | 3851 | //LP64_ONLY( lea(rcx, pst_counter_addr) ); |
aoqi@6880 | 3852 | //LP64_ONLY( incrementl(Address(rcx, 0)) ); |
aoqi@6880 | 3853 | #endif //PRODUCT |
aoqi@6880 | 3854 | |
aoqi@6880 | 3855 | // We will consult the secondary-super array. |
aoqi@6880 | 3856 | ld(temp_reg, secondary_supers_addr); |
aoqi@6880 | 3857 | // Load the array length. (Positive movl does right thing on LP64.) |
aoqi@6880 | 3858 | lw(temp2_reg, Address(temp_reg, Array<Klass*>::length_offset_in_bytes())); |
aoqi@6880 | 3859 | // Skip to start of data. |
aoqi@6880 | 3860 | daddiu(temp_reg, temp_reg, Array<Klass*>::base_offset_in_bytes()); |
aoqi@6880 | 3861 | |
aoqi@6880 | 3862 | // Scan RCX words at [RDI] for an occurrence of RAX. |
aoqi@6880 | 3863 | // Set NZ/Z based on last compare. |
aoqi@6880 | 3864 | // Z flag value will not be set by 'repne' if RCX == 0 since 'repne' does |
aoqi@6880 | 3865 | // not change flags (only scas instruction which is repeated sets flags). |
aoqi@6880 | 3866 | // Set Z = 0 (not equal) before 'repne' to indicate that class was not found. |
aoqi@6880 | 3867 | |
aoqi@6880 | 3868 | /* 2013/4/3 Jin: OpenJDK8 never compresses klass pointers in secondary-super array. */ |
aoqi@6880 | 3869 | Label Loop, subtype; |
aoqi@6880 | 3870 | bind(Loop); |
aoqi@6880 | 3871 | beq(temp2_reg, R0, *L_failure); |
aoqi@6880 | 3872 | delayed()->nop(); |
aoqi@6880 | 3873 | ld(AT, temp_reg, 0); |
aoqi@6880 | 3874 | beq(AT, super_klass, subtype); |
aoqi@6880 | 3875 | delayed()->daddi(temp_reg, temp_reg, 1 * wordSize); |
aoqi@6880 | 3876 | b(Loop); |
aoqi@6880 | 3877 | delayed()->daddi(temp2_reg, temp2_reg, -1); |
aoqi@6880 | 3878 | |
aoqi@6880 | 3879 | bind(subtype); |
aoqi@6880 | 3880 | sd(super_klass, super_cache_addr); |
aoqi@6880 | 3881 | if (L_success != &L_fallthrough) { |
aoqi@6880 | 3882 | b(*L_success); |
aoqi@6880 | 3883 | delayed()->nop(); |
aoqi@6880 | 3884 | } |
aoqi@6880 | 3885 | |
aoqi@6880 | 3886 | // Success. Cache the super we found and proceed in triumph. |
aoqi@6880 | 3887 | #undef IS_A_TEMP |
aoqi@6880 | 3888 | |
aoqi@6880 | 3889 | bind(L_fallthrough); |
aoqi@6880 | 3890 | } |
aoqi@6880 | 3891 | void MacroAssembler::get_vm_result(Register oop_result, Register java_thread) { |
aoqi@6880 | 3892 | ld(oop_result, Address(java_thread, JavaThread::vm_result_offset())); |
aoqi@6880 | 3893 | sd(R0, Address(java_thread, JavaThread::vm_result_offset())); |
aoqi@6880 | 3894 | verify_oop(oop_result, "broken oop in call_VM_base"); |
aoqi@6880 | 3895 | } |
aoqi@6880 | 3896 | |
aoqi@6880 | 3897 | void MacroAssembler::get_vm_result_2(Register metadata_result, Register java_thread) { |
aoqi@6880 | 3898 | ld(metadata_result, Address(java_thread, JavaThread::vm_result_2_offset())); |
aoqi@6880 | 3899 | sd(R0, Address(java_thread, JavaThread::vm_result_2_offset())); |
aoqi@6880 | 3900 | } |
aoqi@6880 | 3901 | |
aoqi@6880 | 3902 | Address MacroAssembler::argument_address(RegisterOrConstant arg_slot, |
aoqi@6880 | 3903 | int extra_slot_offset) { |
aoqi@6880 | 3904 | // cf. TemplateTable::prepare_invoke(), if (load_receiver). |
aoqi@6880 | 3905 | int stackElementSize = Interpreter::stackElementSize; |
aoqi@6880 | 3906 | int offset = Interpreter::expr_offset_in_bytes(extra_slot_offset+0); |
aoqi@6880 | 3907 | #ifdef ASSERT |
aoqi@6880 | 3908 | int offset1 = Interpreter::expr_offset_in_bytes(extra_slot_offset+1); |
aoqi@6880 | 3909 | assert(offset1 - offset == stackElementSize, "correct arithmetic"); |
aoqi@6880 | 3910 | #endif |
aoqi@6880 | 3911 | Register scale_reg = NOREG; |
aoqi@6880 | 3912 | Address::ScaleFactor scale_factor = Address::no_scale; |
aoqi@6880 | 3913 | if (arg_slot.is_constant()) { |
aoqi@6880 | 3914 | offset += arg_slot.as_constant() * stackElementSize; |
aoqi@6880 | 3915 | } else { |
aoqi@6880 | 3916 | scale_reg = arg_slot.as_register(); |
aoqi@6880 | 3917 | scale_factor = Address::times_8; |
aoqi@6880 | 3918 | } |
aoqi@6880 | 3919 | // 2014/07/31 Fu: We don't push RA on stack in prepare_invoke. |
aoqi@6880 | 3920 | // offset += wordSize; // return PC is on stack |
aoqi@6880 | 3921 | if(scale_reg==NOREG) return Address(SP, offset); |
aoqi@6880 | 3922 | else { |
aoqi@6880 | 3923 | dsll(scale_reg, scale_reg, scale_factor); |
aoqi@6880 | 3924 | daddu(scale_reg, SP, scale_reg); |
aoqi@6880 | 3925 | return Address(scale_reg, offset); |
aoqi@6880 | 3926 | } |
aoqi@6880 | 3927 | } |
aoqi@6880 | 3928 | |
aoqi@6880 | 3929 | SkipIfEqual::~SkipIfEqual() { |
aoqi@6880 | 3930 | _masm->bind(_label); |
aoqi@6880 | 3931 | } |
aoqi@6880 | 3932 | |
aoqi@6880 | 3933 | void MacroAssembler::load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, Register dst2) { |
aoqi@6880 | 3934 | switch (size_in_bytes) { |
aoqi@6880 | 3935 | #ifndef _LP64 |
aoqi@6880 | 3936 | case 8: |
aoqi@6880 | 3937 | assert(dst2 != noreg, "second dest register required"); |
aoqi@6880 | 3938 | lw(dst, src); |
aoqi@6880 | 3939 | lw(dst2, src.plus_disp(BytesPerInt)); |
aoqi@6880 | 3940 | break; |
aoqi@6880 | 3941 | #else |
aoqi@6880 | 3942 | case 8: ld(dst, src); break; |
aoqi@6880 | 3943 | #endif |
aoqi@6880 | 3944 | case 4: lw(dst, src); break; |
aoqi@6880 | 3945 | case 2: is_signed ? lh(dst, src) : lhu(dst, src); break; |
aoqi@6880 | 3946 | case 1: is_signed ? lb( dst, src) : lbu( dst, src); break; |
aoqi@6880 | 3947 | default: ShouldNotReachHere(); |
aoqi@6880 | 3948 | } |
aoqi@6880 | 3949 | } |
aoqi@6880 | 3950 | |
aoqi@6880 | 3951 | void MacroAssembler::store_sized_value(Address dst, Register src, size_t size_in_bytes, Register src2) { |
aoqi@6880 | 3952 | switch (size_in_bytes) { |
aoqi@6880 | 3953 | #ifndef _LP64 |
aoqi@6880 | 3954 | case 8: |
aoqi@6880 | 3955 | assert(src2 != noreg, "second source register required"); |
aoqi@6880 | 3956 | sw(src, dst); |
aoqi@6880 | 3957 | sw(src2, dst.plus_disp(BytesPerInt)); |
aoqi@6880 | 3958 | break; |
aoqi@6880 | 3959 | #else |
aoqi@6880 | 3960 | case 8: sd(src, dst); break; |
aoqi@6880 | 3961 | #endif |
aoqi@6880 | 3962 | case 4: sw(src, dst); break; |
aoqi@6880 | 3963 | case 2: sh(src, dst); break; |
aoqi@6880 | 3964 | case 1: sb(src, dst); break; |
aoqi@6880 | 3965 | default: ShouldNotReachHere(); |
aoqi@6880 | 3966 | } |
aoqi@6880 | 3967 | } |
aoqi@6880 | 3968 | |
aoqi@6880 | 3969 | // Look up the method for a megamorphic invokeinterface call. |
aoqi@6880 | 3970 | // The target method is determined by <intf_klass, itable_index>. |
aoqi@6880 | 3971 | // The receiver klass is in recv_klass. |
aoqi@6880 | 3972 | // On success, the result will be in method_result, and execution falls through. |
aoqi@6880 | 3973 | // On failure, execution transfers to the given label. |
aoqi@6880 | 3974 | void MacroAssembler::lookup_interface_method(Register recv_klass, |
aoqi@6880 | 3975 | Register intf_klass, |
aoqi@6880 | 3976 | RegisterOrConstant itable_index, |
aoqi@6880 | 3977 | Register method_result, |
aoqi@6880 | 3978 | Register scan_temp, |
aoqi@6880 | 3979 | Label& L_no_such_interface) { |
aoqi@6880 | 3980 | assert_different_registers(recv_klass, intf_klass, method_result, scan_temp); |
aoqi@6880 | 3981 | assert(itable_index.is_constant() || itable_index.as_register() == method_result, |
aoqi@6880 | 3982 | "caller must use same register for non-constant itable index as for method"); |
aoqi@6880 | 3983 | |
aoqi@6880 | 3984 | // Compute start of first itableOffsetEntry (which is at the end of the vtable) |
aoqi@6880 | 3985 | int vtable_base = InstanceKlass::vtable_start_offset() * wordSize; |
aoqi@6880 | 3986 | int itentry_off = itableMethodEntry::method_offset_in_bytes(); |
aoqi@6880 | 3987 | int scan_step = itableOffsetEntry::size() * wordSize; |
aoqi@6880 | 3988 | int vte_size = vtableEntry::size() * wordSize; |
aoqi@6880 | 3989 | Address::ScaleFactor times_vte_scale = Address::times_ptr; |
aoqi@6880 | 3990 | assert(vte_size == wordSize, "else adjust times_vte_scale"); |
aoqi@6880 | 3991 | |
aoqi@6880 | 3992 | lw(scan_temp, Address(recv_klass, InstanceKlass::vtable_length_offset() * wordSize)); |
aoqi@6880 | 3993 | |
aoqi@6880 | 3994 | // %%% Could store the aligned, prescaled offset in the klassoop. |
aoqi@6880 | 3995 | dsll(scan_temp, scan_temp, times_vte_scale); |
aoqi@6880 | 3996 | daddu(scan_temp, recv_klass, scan_temp); |
aoqi@6880 | 3997 | daddiu(scan_temp, scan_temp, vtable_base); |
aoqi@6880 | 3998 | if (HeapWordsPerLong > 1) { |
aoqi@6880 | 3999 | // Round up to align_object_offset boundary |
aoqi@6880 | 4000 | // see code for InstanceKlass::start_of_itable! |
aoqi@6880 | 4001 | round_to(scan_temp, BytesPerLong); |
aoqi@6880 | 4002 | } |
aoqi@6880 | 4003 | |
aoqi@6880 | 4004 | // Adjust recv_klass by scaled itable_index, so we can free itable_index. |
aoqi@6880 | 4005 | assert(itableMethodEntry::size() * wordSize == wordSize, "adjust the scaling in the code below"); |
aoqi@6880 | 4006 | // lea(recv_klass, Address(recv_klass, itable_index, Address::times_ptr, itentry_off)); |
aoqi@6880 | 4007 | if (itable_index.is_constant()) { |
aoqi@6880 | 4008 | set64(AT, (int)itable_index.is_constant()); |
aoqi@6880 | 4009 | dsll(AT, AT, (int)Address::times_ptr); |
aoqi@6880 | 4010 | } else { |
aoqi@6880 | 4011 | dsll(AT, itable_index.as_register(), (int)Address::times_ptr); |
aoqi@6880 | 4012 | } |
aoqi@6880 | 4013 | daddu(AT, AT, recv_klass); |
aoqi@6880 | 4014 | daddiu(recv_klass, AT, itentry_off); |
aoqi@6880 | 4015 | |
aoqi@6880 | 4016 | // for (scan = klass->itable(); scan->interface() != NULL; scan += scan_step) { |
aoqi@6880 | 4017 | // if (scan->interface() == intf) { |
aoqi@6880 | 4018 | // result = (klass + scan->offset() + itable_index); |
aoqi@6880 | 4019 | // } |
aoqi@6880 | 4020 | // } |
aoqi@6880 | 4021 | Label search, found_method; |
aoqi@6880 | 4022 | |
aoqi@6880 | 4023 | for (int peel = 1; peel >= 0; peel--) { |
aoqi@6880 | 4024 | ld(method_result, Address(scan_temp, itableOffsetEntry::interface_offset_in_bytes())); |
aoqi@6880 | 4025 | |
aoqi@6880 | 4026 | if (peel) { |
aoqi@6880 | 4027 | beq(intf_klass, method_result, found_method); |
aoqi@6880 | 4028 | nop(); |
aoqi@6880 | 4029 | } else { |
aoqi@6880 | 4030 | bne(intf_klass, method_result, search); |
aoqi@6880 | 4031 | nop(); |
aoqi@6880 | 4032 | // (invert the test to fall through to found_method...) |
aoqi@6880 | 4033 | } |
aoqi@6880 | 4034 | |
aoqi@6880 | 4035 | if (!peel) break; |
aoqi@6880 | 4036 | |
aoqi@6880 | 4037 | bind(search); |
aoqi@6880 | 4038 | |
aoqi@6880 | 4039 | // Check that the previous entry is non-null. A null entry means that |
aoqi@6880 | 4040 | // the receiver class doesn't implement the interface, and wasn't the |
aoqi@6880 | 4041 | // same as when the caller was compiled. |
aoqi@6880 | 4042 | beq(method_result, R0, L_no_such_interface); |
aoqi@6880 | 4043 | nop(); |
aoqi@6880 | 4044 | daddiu(scan_temp, scan_temp, scan_step); |
aoqi@6880 | 4045 | } |
aoqi@6880 | 4046 | |
aoqi@6880 | 4047 | bind(found_method); |
aoqi@6880 | 4048 | |
aoqi@6880 | 4049 | // Got a hit. |
aoqi@6880 | 4050 | lw(scan_temp, Address(scan_temp, itableOffsetEntry::offset_offset_in_bytes())); |
aoqi@6880 | 4051 | //ld(method_result, Address(recv_klass, scan_temp, Address::times_1)); |
aoqi@6880 | 4052 | if(UseLoongsonISA) { |
aoqi@6880 | 4053 | gsldx(method_result, recv_klass, scan_temp, 0); |
aoqi@6880 | 4054 | } else { |
aoqi@6880 | 4055 | daddu(AT, recv_klass, scan_temp); |
aoqi@6880 | 4056 | ld(method_result, AT); |
aoqi@6880 | 4057 | } |
aoqi@6880 | 4058 | } |
aoqi@6880 | 4059 | |
aoqi@6880 | 4060 | |
aoqi@6880 | 4061 | // virtual method calling |
aoqi@6880 | 4062 | void MacroAssembler::lookup_virtual_method(Register recv_klass, |
aoqi@6880 | 4063 | RegisterOrConstant vtable_index, |
aoqi@6880 | 4064 | Register method_result) { |
aoqi@6880 | 4065 | Register tmp = GP; |
aoqi@6880 | 4066 | push(tmp); |
aoqi@6880 | 4067 | |
aoqi@6880 | 4068 | if (vtable_index.is_constant()) { |
aoqi@6880 | 4069 | assert_different_registers(recv_klass, method_result, tmp); |
aoqi@6880 | 4070 | } else { |
aoqi@6880 | 4071 | assert_different_registers(recv_klass, method_result, vtable_index.as_register(), tmp); |
aoqi@6880 | 4072 | } |
aoqi@6880 | 4073 | const int base = InstanceKlass::vtable_start_offset() * wordSize; |
aoqi@6880 | 4074 | assert(vtableEntry::size() * wordSize == wordSize, "else adjust the scaling in the code below"); |
aoqi@6880 | 4075 | /* |
aoqi@6880 | 4076 | Address vtable_entry_addr(recv_klass, |
aoqi@6880 | 4077 | vtable_index, Address::times_ptr, |
aoqi@6880 | 4078 | base + vtableEntry::method_offset_in_bytes()); |
aoqi@6880 | 4079 | */ |
aoqi@6880 | 4080 | if (vtable_index.is_constant()) { |
aoqi@6880 | 4081 | set64(AT, vtable_index.as_constant()); |
aoqi@6880 | 4082 | dsll(AT, AT, (int)Address::times_ptr); |
aoqi@6880 | 4083 | } else { |
aoqi@6880 | 4084 | dsll(AT, vtable_index.as_register(), (int)Address::times_ptr); |
aoqi@6880 | 4085 | } |
aoqi@6880 | 4086 | set64(tmp, base + vtableEntry::method_offset_in_bytes()); |
aoqi@6880 | 4087 | daddu(tmp, tmp, AT); |
aoqi@6880 | 4088 | daddu(tmp, tmp, recv_klass); |
aoqi@6880 | 4089 | ld(method_result, tmp, 0); |
aoqi@6880 | 4090 | |
aoqi@6880 | 4091 | pop(tmp); |
aoqi@6880 | 4092 | } |