src/cpu/mips/vm/nativeInst_mips.cpp

Tue, 26 Jul 2016 17:06:17 +0800

author
fujie
date
Tue, 26 Jul 2016 17:06:17 +0800
changeset 41
d885f8d65c58
parent 1
2d8a650513c2
child 129
ef84e7428333
permissions
-rw-r--r--

Add multiply word to GPR instruction (mul) in MIPS assembler.

aoqi@1 1 /*
aoqi@1 2 * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
aoqi@1 3 * Copyright (c) 2015, 2016, Loongson Technology. All rights reserved.
aoqi@1 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
aoqi@1 5 *
aoqi@1 6 * This code is free software; you can redistribute it and/or modify it
aoqi@1 7 * under the terms of the GNU General Public License version 2 only, as
aoqi@1 8 * published by the Free Software Foundation.
aoqi@1 9 *
aoqi@1 10 * This code is distributed in the hope that it will be useful, but WITHOUT
aoqi@1 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
aoqi@1 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
aoqi@1 13 * version 2 for more details (a copy is included in the LICENSE file that
aoqi@1 14 * accompanied this code).
aoqi@1 15 *
aoqi@1 16 * You should have received a copy of the GNU General Public License version
aoqi@1 17 * 2 along with this work; if not, write to the Free Software Foundation,
aoqi@1 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
aoqi@1 19 *
aoqi@1 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
aoqi@1 21 * or visit www.oracle.com if you need additional information or have any
aoqi@1 22 * questions.
aoqi@1 23 *
aoqi@1 24 */
aoqi@1 25
aoqi@1 26 #include "precompiled.hpp"
aoqi@1 27 #include "asm/macroAssembler.hpp"
aoqi@1 28 #include "memory/resourceArea.hpp"
aoqi@1 29 #include "nativeInst_mips.hpp"
aoqi@1 30 #include "oops/oop.inline.hpp"
aoqi@1 31 #include "runtime/handles.hpp"
aoqi@1 32 #include "runtime/sharedRuntime.hpp"
aoqi@1 33 #include "runtime/stubRoutines.hpp"
aoqi@1 34 #include "utilities/ostream.hpp"
aoqi@1 35 #ifdef COMPILER1
aoqi@1 36 #include "c1/c1_Runtime1.hpp"
aoqi@1 37 #endif
aoqi@1 38
aoqi@1 39 #include <sys/mman.h>
aoqi@1 40
aoqi@1 41 void NativeInstruction::wrote(int offset) {
aoqi@1 42 ICache::invalidate_word(addr_at(offset));
aoqi@1 43 }
aoqi@1 44
aoqi@1 45 void NativeInstruction::set_long_at(int offset, long i) {
aoqi@1 46 address addr = addr_at(offset);
aoqi@1 47 *(long*)addr = i;
aoqi@1 48 //ICache::invalidate_word(addr);
aoqi@1 49 }
aoqi@1 50
aoqi@1 51 static int illegal_instruction_bits = 0;
aoqi@1 52
aoqi@1 53 int NativeInstruction::illegal_instruction() {
aoqi@1 54 if (illegal_instruction_bits == 0) {
aoqi@1 55 ResourceMark rm;
aoqi@1 56 char buf[40];
aoqi@1 57 CodeBuffer cbuf((address)&buf[0], 20);
aoqi@1 58 MacroAssembler* a = new MacroAssembler(&cbuf);
aoqi@1 59 address ia = a->pc();
aoqi@1 60 a->brk(11);
aoqi@1 61 int bits = *(int*)ia;
aoqi@1 62 illegal_instruction_bits = bits;
aoqi@1 63 }
aoqi@1 64 return illegal_instruction_bits;
aoqi@1 65 }
aoqi@1 66
aoqi@1 67 bool NativeInstruction::is_int_branch() {
aoqi@1 68 switch(Assembler::opcode(insn_word())) {
aoqi@1 69 case Assembler::beq_op:
aoqi@1 70 case Assembler::beql_op:
aoqi@1 71 case Assembler::bgtz_op:
aoqi@1 72 case Assembler::bgtzl_op:
aoqi@1 73 case Assembler::blez_op:
aoqi@1 74 case Assembler::blezl_op:
aoqi@1 75 case Assembler::bne_op:
aoqi@1 76 case Assembler::bnel_op:
aoqi@1 77 return true;
aoqi@1 78 case Assembler::regimm_op:
aoqi@1 79 switch(Assembler::rt(insn_word())) {
aoqi@1 80 case Assembler::bgez_op:
aoqi@1 81 case Assembler::bgezal_op:
aoqi@1 82 case Assembler::bgezall_op:
aoqi@1 83 case Assembler::bgezl_op:
aoqi@1 84 case Assembler::bltz_op:
aoqi@1 85 case Assembler::bltzal_op:
aoqi@1 86 case Assembler::bltzall_op:
aoqi@1 87 case Assembler::bltzl_op:
aoqi@1 88 return true;
aoqi@1 89 }
aoqi@1 90 }
aoqi@1 91
aoqi@1 92 return false;
aoqi@1 93 }
aoqi@1 94
aoqi@1 95 bool NativeInstruction::is_float_branch() {
aoqi@1 96 if (!is_op(Assembler::cop1_op) ||
aoqi@1 97 !is_rs((Register)Assembler::bc_op)) return false;
aoqi@1 98
aoqi@1 99 switch(Assembler::rt(insn_word())) {
aoqi@1 100 case Assembler::bcf_op:
aoqi@1 101 case Assembler::bcfl_op:
aoqi@1 102 case Assembler::bct_op:
aoqi@1 103 case Assembler::bctl_op:
aoqi@1 104 return true;
aoqi@1 105 }
aoqi@1 106
aoqi@1 107 return false;
aoqi@1 108 }
aoqi@1 109
aoqi@1 110
aoqi@1 111 //-------------------------------------------------------------------
aoqi@1 112
aoqi@1 113 void NativeCall::verify() {
aoqi@1 114 // make sure code pattern is actually a call instruction
aoqi@1 115 #ifndef _LP64
aoqi@1 116 if ( !is_op(Assembler::lui_op) ||
aoqi@1 117 !is_op(int_at(4), Assembler::addiu_op) ||
aoqi@1 118 !is_special_op(int_at(8), Assembler::jalr_op) ) {
aoqi@1 119 fatal("not a call");
aoqi@1 120 }
aoqi@1 121 #else
aoqi@1 122 /* li64 or li48 */
aoqi@1 123 int li_64 = 0;
aoqi@1 124 int li_48 = 0;
aoqi@1 125
aoqi@1 126 if ( is_op (Assembler::lui_op) &&
aoqi@1 127 is_op (int_at(4), Assembler::ori_op) &&
aoqi@1 128 is_special_op(int_at(8), Assembler::dsll_op) &&
aoqi@1 129 is_op (int_at(12), Assembler::ori_op) &&
aoqi@1 130 is_special_op(int_at(16), Assembler::dsll_op) &&
aoqi@1 131 is_op (int_at(20), Assembler::ori_op) &&
aoqi@1 132 is_special_op(int_at(24), Assembler::jalr_op) ) {
aoqi@1 133 li_64 = 1;
aoqi@1 134 }
aoqi@1 135
aoqi@1 136 if ( is_op (Assembler::lui_op) &&
aoqi@1 137 is_op (int_at(4), Assembler::ori_op) &&
aoqi@1 138 is_special_op(int_at(8), Assembler::dsll_op) &&
aoqi@1 139 is_op (int_at(12), Assembler::ori_op) &&
aoqi@1 140 is_special_op(int_at(16), Assembler::jalr_op) ) {
aoqi@1 141 li_48 = 1;
aoqi@1 142 }
aoqi@1 143
aoqi@1 144 if (!li_64 && !li_48) {
aoqi@1 145 tty->print_cr("NativeCall::verify addr=%lx", addr_at(0));
aoqi@1 146 fatal("not a call");
aoqi@1 147 }
aoqi@1 148 #endif
aoqi@1 149 }
aoqi@1 150
aoqi@1 151 address NativeCall::destination() const {
aoqi@1 152 #ifndef _LP64
aoqi@1 153 return (address)Assembler::merge(int_at(4)&0xffff, long_at(0)&0xffff);
aoqi@1 154 #else
aoqi@1 155 /* li64 or li48 */
aoqi@1 156 if (is_special_op(int_at(16), Assembler::dsll_op)) {
aoqi@1 157 return (address)Assembler::merge( (intptr_t)(int_at(20) & 0xffff),
aoqi@1 158 (intptr_t)(int_at(12) & 0xffff),
aoqi@1 159 (intptr_t)(int_at(4) & 0xffff),
aoqi@1 160 (intptr_t)(int_at(0) & 0xffff));
aoqi@1 161 } else if (is_special_op(int_at(16), Assembler::jalr_op)) {
aoqi@1 162 return (address)Assembler::merge( (intptr_t)(int_at(12) & 0xffff),
aoqi@1 163 (intptr_t)(int_at(4) & 0xffff),
aoqi@1 164 (intptr_t)(int_at(0) & 0xffff),
aoqi@1 165 (intptr_t)0);
aoqi@1 166 }
aoqi@1 167 #endif
aoqi@1 168 }
aoqi@1 169
aoqi@1 170 /* 2013/6/14 Jin: manual implementation of GSSQ
aoqi@1 171 *
aoqi@1 172 * 00000001200009c0 <atomic_store128>:
aoqi@1 173 * 1200009c0: 0085202d daddu a0, a0, a1
aoqi@1 174 * 1200009c4: e8860027 gssq a2, a3, 0(a0)
aoqi@1 175 * 1200009c8: 03e00008 jr ra
aoqi@1 176 * 1200009cc: 00000000 nop
aoqi@1 177 */
aoqi@1 178 typedef void (* atomic_store128_ptr)(long *addr, int offset, long low64, long hi64);
aoqi@1 179
aoqi@1 180 static int *buf;
aoqi@1 181
aoqi@1 182 static atomic_store128_ptr get_atomic_store128_func()
aoqi@1 183 {
aoqi@1 184 static atomic_store128_ptr p = NULL;
aoqi@1 185 if (p != NULL)
aoqi@1 186 return p;
aoqi@1 187
aoqi@1 188 buf = (int *)mmap(NULL, 1024, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_PRIVATE | MAP_ANONYMOUS,
aoqi@1 189 -1, 0);
aoqi@1 190 buf[0] = 0x0085202d;
aoqi@1 191 buf[1] = (0x3a << 26) | (4 << 21) | (6 << 16) | 0x27; /* gssq $a2, $a3, 0($a0) */
aoqi@1 192 buf[2] = 0x03e00008;
aoqi@1 193 buf[3] = 0;
aoqi@1 194
aoqi@1 195 p = (atomic_store128_ptr)buf;
aoqi@1 196 return p;
aoqi@1 197 }
aoqi@1 198
aoqi@1 199 void NativeCall::set_destination(address dest) {
aoqi@1 200 #ifndef _LP64
aoqi@1 201 OrderAccess::fence();
aoqi@1 202 set_int_at(0, (int_at(0) & 0xffff0000) | (Assembler::split_high((intptr_t)dest) & 0xffff));
aoqi@1 203 set_int_at(4, (int_at(4) & 0xffff0000) | (Assembler::split_low((intptr_t)dest) & 0xffff));
aoqi@1 204 ICache::invalidate_range(addr_at(0), 8);
aoqi@1 205 #else
aoqi@1 206 OrderAccess::fence();
aoqi@1 207 /* 2013/6/13 Jin: ensure 100% atomicity */
aoqi@1 208 guarantee(!os::is_MP() || (((long)addr_at(0) % 16) == 0), "destination must be aligned for GSSD");
aoqi@1 209
aoqi@1 210 /* li64 or li48 */
aoqi@1 211 if (is_special_op(int_at(16), Assembler::dsll_op)) {
aoqi@1 212 int first_word = int_at(0);
aoqi@1 213 set_int_at(0, 0x1000ffff); /* .1: b .1 */
aoqi@1 214 set_int_at(4, (int_at(4) & 0xffff0000) | (Assembler::split_low((intptr_t)dest >> 32) & 0xffff));
aoqi@1 215 set_int_at(12, (int_at(12) & 0xffff0000) | (Assembler::split_low((intptr_t)dest >> 16) & 0xffff));
aoqi@1 216 set_int_at(20, (int_at(20) & 0xffff0000) | (Assembler::split_low((intptr_t)dest) & 0xffff));
aoqi@1 217 set_int_at(0, (first_word & 0xffff0000) | (Assembler::split_low((intptr_t)dest >> 48) & 0xffff));
aoqi@1 218 ICache::invalidate_range(addr_at(0), 24);
aoqi@1 219 } else if (is_special_op(int_at(16), Assembler::jalr_op)) {
aoqi@1 220 int insts[4];
aoqi@1 221 insts[0] = (int_at(0) & 0xffff0000) | (Assembler::split_low((intptr_t)dest >> 32) & 0xffff);
aoqi@1 222 insts[1] = (int_at(4) & 0xffff0000) | (Assembler::split_low((intptr_t)dest >> 16) & 0xffff);
aoqi@1 223 insts[2] = int_at(8);
aoqi@1 224 insts[3] = (int_at(12) & 0xffff0000) | (Assembler::split_low((intptr_t)dest) & 0xffff);
aoqi@1 225
aoqi@1 226 atomic_store128_ptr func = get_atomic_store128_func();
aoqi@1 227 (*func)((long *)addr_at(0), 0, *(long *)&insts[0], *(long *)&insts[2]);
aoqi@1 228 } else {
aoqi@1 229 fatal("not a call");
aoqi@1 230 }
aoqi@1 231 #endif
aoqi@1 232 }
aoqi@1 233
aoqi@1 234 void NativeCall::print() {
aoqi@1 235 tty->print_cr(PTR_FORMAT ": call " PTR_FORMAT,
aoqi@1 236 instruction_address(), destination());
aoqi@1 237 }
aoqi@1 238
aoqi@1 239 // Inserts a native call instruction at a given pc
aoqi@1 240 void NativeCall::insert(address code_pos, address entry) {
aoqi@1 241 NativeCall *call = nativeCall_at(code_pos);
aoqi@1 242 CodeBuffer cb(call->addr_at(0), instruction_size);
aoqi@1 243 MacroAssembler masm(&cb);
aoqi@1 244 #define __ masm.
aoqi@1 245 #ifndef _LP64
aoqi@1 246 __ lui(T9, Assembler::split_high((int)entry));
aoqi@1 247 __ addiu(T9, T9, Assembler::split_low((int)entry));
aoqi@1 248 #else
aoqi@1 249 __ li48(T9, (long)entry);
aoqi@1 250 #endif
aoqi@1 251 __ jalr ();
aoqi@1 252 __ delayed()->nop();
aoqi@1 253 #undef __
aoqi@1 254
aoqi@1 255 ICache::invalidate_range(call->addr_at(0), instruction_size);
aoqi@1 256 }
aoqi@1 257
aoqi@1 258 // MT-safe patching of a call instruction.
aoqi@1 259 // First patches first word of instruction to two jmp's that jmps to them
aoqi@1 260 // selfs (spinlock). Then patches the last byte, and then atomicly replaces
aoqi@1 261 // the jmp's with the first 4 byte of the new instruction.
aoqi@1 262 void NativeCall::replace_mt_safe(address instr_addr, address code_buffer) {
aoqi@1 263 Unimplemented();
aoqi@1 264 }
aoqi@1 265
aoqi@1 266 //-------------------------------------------------------------------
aoqi@1 267
aoqi@1 268 void NativeMovConstReg::verify() {
aoqi@1 269 #ifndef _LP64
aoqi@1 270 if ( !is_op(Assembler::lui_op) ||
aoqi@1 271 !is_op(int_at(4), Assembler::addiu_op) )
aoqi@1 272 fatal("not a mov reg, imm32")
aoqi@1 273 #else
aoqi@1 274 /* li64 or li48 */
aoqi@1 275 int li_64 = 0;
aoqi@1 276 int li_48 = 0;
aoqi@1 277
aoqi@1 278 if ( is_op(Assembler::lui_op) &&
aoqi@1 279 is_op(int_at(4), Assembler::ori_op) &&
aoqi@1 280 is_special_op(int_at(8), Assembler::dsll_op) &&
aoqi@1 281 is_op(int_at(12), Assembler::ori_op) &&
aoqi@1 282 is_special_op(int_at(16), Assembler::dsll_op) &&
aoqi@1 283 is_op(int_at(20), Assembler::ori_op) )
aoqi@1 284 {
aoqi@1 285 li_64 = 1;
aoqi@1 286 }
aoqi@1 287
aoqi@1 288 if ( is_op(Assembler::lui_op) &&
aoqi@1 289 is_op (int_at(4), Assembler::ori_op) &&
aoqi@1 290 is_special_op(int_at(8), Assembler::dsll_op) &&
aoqi@1 291 is_op (int_at(12), Assembler::ori_op) ) {
aoqi@1 292 li_48 = 1;
aoqi@1 293 }
aoqi@1 294
aoqi@1 295 if (!li_64 && !li_48) {
aoqi@1 296 fatal("not a mov reg, imm64/imm48");
aoqi@1 297 }
aoqi@1 298 #endif
aoqi@1 299 }
aoqi@1 300
aoqi@1 301 void NativeMovConstReg::print() {
aoqi@1 302 tty->print_cr(PTR_FORMAT ": mov reg, " INTPTR_FORMAT,
aoqi@1 303 instruction_address(), data());
aoqi@1 304 }
aoqi@1 305
aoqi@1 306 intptr_t NativeMovConstReg::data() const {
aoqi@1 307 #ifndef _LP64
aoqi@1 308 return Assembler::merge(int_at(4)&0xffff, long_at(0)&0xffff);
aoqi@1 309 #else
aoqi@1 310 /* li64 or li48 */
aoqi@1 311 if (is_special_op(int_at(16), Assembler::dsll_op) && is_op(long_at(20), Assembler::ori_op)) {
aoqi@1 312 return Assembler::merge( (intptr_t)(int_at(20) & 0xffff),
aoqi@1 313 (intptr_t)(int_at(12) & 0xffff),
aoqi@1 314 (intptr_t)(int_at(4) & 0xffff),
aoqi@1 315 (intptr_t)(int_at(0) & 0xffff));
aoqi@1 316 } else {
aoqi@1 317 return Assembler::merge( (intptr_t)(int_at(12) & 0xffff),
aoqi@1 318 (intptr_t)(int_at(4) & 0xffff),
aoqi@1 319 (intptr_t)(int_at(0) & 0xffff),
aoqi@1 320 (intptr_t)0);
aoqi@1 321 }
aoqi@1 322 #endif
aoqi@1 323 }
aoqi@1 324
aoqi@1 325 void NativeMovConstReg::set_data(intptr_t x) {
aoqi@1 326 /*
aoqi@1 327 #ifndef CORE
aoqi@1 328 // also store the value into an oop_Relocation cell, if any
aoqi@1 329 CodeBlob* cb = CodeCache::find_blob(instruction_address());
aoqi@1 330 nmethod* nm = cb ? cb->as_nmethod_or_null() : NULL;
aoqi@1 331 if (nm != NULL) {
aoqi@1 332 RelocIterator iter(nm, instruction_address(), instruction_address() + 1);
aoqi@1 333 oop* oop_addr = NULL;
aoqi@1 334 while (iter.next()) {
aoqi@1 335 if (iter.type() == relocInfo::oop_type) {
aoqi@1 336 oop_Relocation *r = iter.oop_reloc();
aoqi@1 337 if (oop_addr == NULL && r->oop_index()!=0) {
aoqi@1 338 oop_addr = r->oop_addr();
aoqi@1 339 *oop_addr = (oop)x;
aoqi@1 340 } else {
aoqi@1 341 assert(oop_addr == r->oop_addr(), "must be only one set-oop here");
aoqi@1 342 }
aoqi@1 343 }
aoqi@1 344 }
aoqi@1 345 }
aoqi@1 346 #endif
aoqi@1 347 */
aoqi@1 348
aoqi@1 349 #ifndef _LP64
aoqi@1 350 set_int_at(0, (int_at(0) & 0xffff0000) | (Assembler::split_high(x) & 0xffff));
aoqi@1 351 set_int_at(4, (int_at(4) & 0xffff0000) | (Assembler::split_low(x) & 0xffff));
aoqi@1 352 ICache::invalidate_range(addr_at(0), 8);
aoqi@1 353 #else
aoqi@1 354 /* li64 or li48 */
aoqi@1 355 if (is_special_op(int_at(16), Assembler::dsll_op) && is_op(long_at(20), Assembler::ori_op)) {
aoqi@1 356 set_int_at(0, (int_at(0) & 0xffff0000) | (Assembler::split_low((intptr_t)x >> 48) & 0xffff));
aoqi@1 357 set_int_at(4, (int_at(4) & 0xffff0000) | (Assembler::split_low((intptr_t)x >> 32) & 0xffff));
aoqi@1 358 set_int_at(12, (int_at(12) & 0xffff0000) | (Assembler::split_low((intptr_t)x >> 16) & 0xffff));
aoqi@1 359 set_int_at(20, (int_at(20) & 0xffff0000) | (Assembler::split_low((intptr_t)x) & 0xffff));
aoqi@1 360 } else {
aoqi@1 361 //assert(is_simm16(dest >> 32), "Not a 48-bit address");
aoqi@1 362 set_int_at(0, (int_at(0) & 0xffff0000) | (Assembler::split_low((intptr_t)x >> 32) & 0xffff));
aoqi@1 363 set_int_at(4, (int_at(4) & 0xffff0000) | (Assembler::split_low((intptr_t)x >> 16) & 0xffff));
aoqi@1 364 set_int_at(12, (int_at(12) & 0xffff0000) | (Assembler::split_low((intptr_t)x) & 0xffff));
aoqi@1 365 }
aoqi@1 366 ICache::invalidate_range(addr_at(0), 24);
aoqi@1 367 #endif
aoqi@1 368 }
aoqi@1 369
aoqi@1 370 //-------------------------------------------------------------------
aoqi@1 371
aoqi@1 372 int NativeMovRegMem::offset() const{
aoqi@1 373 if (is_immediate())
aoqi@1 374 return (short)(int_at(instruction_offset)&0xffff);
aoqi@1 375 else
aoqi@1 376 return Assembler::merge(int_at(hiword_offset)&0xffff, long_at(instruction_offset)&0xffff);
aoqi@1 377 }
aoqi@1 378
aoqi@1 379 void NativeMovRegMem::set_offset(int x) {
aoqi@1 380 if (is_immediate()) {
aoqi@1 381 assert(Assembler::is_simm16(x), "just check");
aoqi@1 382 set_int_at(0, (int_at(0)&0xffff0000) | (x&0xffff) );
aoqi@1 383 if (is_64ldst()) {
aoqi@1 384 assert(Assembler::is_simm16(x+4), "just check");
aoqi@1 385 set_int_at(4, (int_at(4)&0xffff0000) | ((x+4)&0xffff) );
aoqi@1 386 }
aoqi@1 387 } else {
aoqi@1 388 set_int_at(0, (int_at(0) & 0xffff0000) | (Assembler::split_high(x) & 0xffff));
aoqi@1 389 set_int_at(4, (int_at(4) & 0xffff0000) | (Assembler::split_low(x) & 0xffff));
aoqi@1 390 }
aoqi@1 391 ICache::invalidate_range(addr_at(0), 8);
aoqi@1 392 }
aoqi@1 393
aoqi@1 394 void NativeMovRegMem::verify() {
aoqi@1 395 int offset = 0;
aoqi@1 396
aoqi@1 397 if ( Assembler::opcode(int_at(0)) == Assembler::lui_op ) {
aoqi@1 398 #ifndef _LP64
aoqi@1 399 if ( (Assembler::opcode(int_at(4)) != Assembler::addiu_op) ||
aoqi@1 400 (Assembler::opcode(int_at(8)) != Assembler::special_op) ||
aoqi@1 401 (Assembler::special(int_at(8)) != Assembler::add_op))
aoqi@1 402 #else
aoqi@1 403 /* Jin: fit MIPS64 */
aoqi@1 404 if ( (Assembler::opcode(int_at(4)) != Assembler::addiu_op &&
aoqi@1 405 Assembler::opcode(int_at(4)) != Assembler::daddiu_op ) ||
aoqi@1 406 (Assembler::opcode(int_at(8)) != Assembler::special_op) ||
aoqi@1 407 (Assembler::special(int_at(8)) != Assembler::add_op
aoqi@1 408 && Assembler::special(int_at(8)) != Assembler::dadd_op))
aoqi@1 409 #endif
aoqi@1 410 fatal ("not a mov [reg+offs], reg instruction");
aoqi@1 411 offset += 12;
aoqi@1 412 }
aoqi@1 413
aoqi@1 414 switch(Assembler::opcode(int_at(offset))) {
aoqi@1 415 case Assembler::lb_op:
aoqi@1 416 case Assembler::lbu_op:
aoqi@1 417 case Assembler::lh_op:
aoqi@1 418 case Assembler::lhu_op:
aoqi@1 419 case Assembler::lw_op:
aoqi@1 420 LP64_ONLY(case Assembler::ld_op:)
aoqi@1 421 case Assembler::lwc1_op:
aoqi@1 422 LP64_ONLY(case Assembler::ldc1_op:)
aoqi@1 423 case Assembler::sb_op:
aoqi@1 424 case Assembler::sh_op:
aoqi@1 425 case Assembler::sw_op:
aoqi@1 426 LP64_ONLY(case Assembler::sd_op:)
aoqi@1 427 case Assembler::swc1_op:
aoqi@1 428 LP64_ONLY(case Assembler::sdc1_op:)
aoqi@1 429 break;
aoqi@1 430 default:
aoqi@1 431 fatal ("not a mov [reg+offs], reg instruction");
aoqi@1 432 }
aoqi@1 433 }
aoqi@1 434
aoqi@1 435
aoqi@1 436 void NativeMovRegMem::print() {
aoqi@1 437 tty->print_cr("0x%x: mov reg, [reg + %x]", instruction_address(), offset());
aoqi@1 438 }
aoqi@1 439
aoqi@1 440
aoqi@1 441
aoqi@1 442 void NativeIllegalInstruction::insert(address code_pos) {
aoqi@1 443 CodeBuffer cb(code_pos, instruction_size);
aoqi@1 444 MacroAssembler masm(&cb);
aoqi@1 445 #define __ masm.
aoqi@1 446 __ brk(11);
aoqi@1 447 #undef __
aoqi@1 448
aoqi@1 449 ICache::invalidate_range(code_pos, instruction_size);
aoqi@1 450 }
aoqi@1 451
aoqi@1 452 void NativeGeneralJump::verify() {
aoqi@1 453 assert(((NativeInstruction *)this)->is_jump() ||
aoqi@1 454 ((NativeInstruction *)this)->is_cond_jump(), "not a general jump instruction");
aoqi@1 455 }
aoqi@1 456
aoqi@1 457
aoqi@1 458 void NativeGeneralJump::set_jump_destination(address dest) {
aoqi@1 459 //tty->print_cr("NativeGeneralJump::set_jump_destination dest=%lx", dest);
aoqi@1 460 OrderAccess::fence();
aoqi@1 461
aoqi@1 462 if (is_short()) {
aoqi@1 463 assert(Assembler::is_simm16(dest-addr_at(4)), "change this code");
aoqi@1 464 set_int_at(0, (int_at(0) & 0xffff0000) | (dest - addr_at(4)) & 0xffff );
aoqi@1 465 ICache::invalidate_range(addr_at(0), 4);
aoqi@1 466 #ifdef _LP64
aoqi@1 467 } else if (is_b_far()) {
aoqi@1 468 int offset = dest - addr_at(12);
aoqi@1 469 set_int_at(12, (int_at(12) & 0xffff0000) | (offset >> 16));
aoqi@1 470 set_int_at(16, (int_at(16) & 0xffff0000) | (offset & 0xffff));
aoqi@1 471 #endif
aoqi@1 472 } else {
aoqi@1 473 #ifndef _LP64
aoqi@1 474 set_int_at(0, (int_at(0) & 0xffff0000) | (Assembler::split_high((intptr_t)dest) & 0xffff));
aoqi@1 475 set_int_at(4, (int_at(4) & 0xffff0000) | (Assembler::split_low((intptr_t)dest) & 0xffff));
aoqi@1 476 ICache::invalidate_range(addr_at(0), 8);
aoqi@1 477 #else
aoqi@1 478 /* li64 or li48 */
aoqi@1 479 if (is_special_op(int_at(16), Assembler::dsll_op)) {
aoqi@1 480 set_int_at(0, (int_at(0) & 0xffff0000) | (Assembler::split_low((intptr_t)dest >> 48) & 0xffff));
aoqi@1 481 set_int_at(4, (int_at(4) & 0xffff0000) | (Assembler::split_low((intptr_t)dest >> 32) & 0xffff));
aoqi@1 482 set_int_at(12, (int_at(12) & 0xffff0000) | (Assembler::split_low((intptr_t)dest >> 16) & 0xffff));
aoqi@1 483 set_int_at(20, (int_at(20) & 0xffff0000) | (Assembler::split_low((intptr_t)dest) & 0xffff));
aoqi@1 484 } else {
aoqi@1 485 int jr_word = int_at(16);
aoqi@1 486 set_int_at(16, 0x1000fffb); /* .1: --; --; --; --; b .1; nop */
aoqi@1 487
aoqi@1 488 set_int_at(0, (int_at(0) & 0xffff0000) | (Assembler::split_low((intptr_t)dest >> 32) & 0xffff));
aoqi@1 489 set_int_at(4, (int_at(4) & 0xffff0000) | (Assembler::split_low((intptr_t)dest >> 16) & 0xffff));
aoqi@1 490 set_int_at(12, (int_at(12) & 0xffff0000) | (Assembler::split_low((intptr_t)dest) & 0xffff));
aoqi@1 491 set_int_at(16, jr_word); /* .1: --; --; --; --; jr ; nop */
aoqi@1 492 }
aoqi@1 493
aoqi@1 494 ICache::invalidate_range(addr_at(0), 24);
aoqi@1 495 #endif
aoqi@1 496 }
aoqi@1 497 }
aoqi@1 498
aoqi@1 499 // we now use b to do this. be careful when using this method
aoqi@1 500 // by yjl 9/16/2005
aoqi@1 501 void NativeGeneralJump::insert_unconditional(address code_pos, address entry) {
aoqi@1 502 CodeBuffer cb(code_pos, instruction_size);
aoqi@1 503 MacroAssembler masm(&cb);
aoqi@1 504 #define __ masm.
aoqi@1 505 #ifdef _LP64
aoqi@1 506 if (Assembler::is_simm16((entry - code_pos - 4) / 4))
aoqi@1 507 {
aoqi@1 508 __ b(entry);
aoqi@1 509 __ delayed()->nop();
aoqi@1 510 }
aoqi@1 511 else
aoqi@1 512 {
aoqi@1 513 /* a simplified b_far */
aoqi@1 514 int offset = entry - code_pos;
aoqi@1 515
aoqi@1 516 // FIXME: need to preserve RA?
aoqi@1 517 __ emit_long(0x4110001); //__ emit_long(Assembler::insn_ORRI(Assembler::regimm_op, 0, Assembler::bgezal_op, 1));
aoqi@1 518 __ lui(T9, (offset - 8) >> 16); // delay slot
aoqi@1 519 __ ori(T9, T9, (offset - 8) & 0xffff);
aoqi@1 520 __ daddu(T9, T9, RA);
aoqi@1 521 __ jr(T9);
aoqi@1 522 __ nop();
aoqi@1 523 }
aoqi@1 524 #else
aoqi@1 525 __ b(entry);
aoqi@1 526 __ delayed()->nop();
aoqi@1 527 #endif
aoqi@1 528 #undef __
aoqi@1 529
aoqi@1 530 ICache::invalidate_range(code_pos, instruction_size);
aoqi@1 531 }
aoqi@1 532
aoqi@1 533 #ifdef _LP64
aoqi@1 534 bool NativeGeneralJump::is_b_far() {
aoqi@1 535 /*
aoqi@1 536 0x000000556809f198: dadd at, ra, zero
aoqi@1 537 0x000000556809f19c: [4110001]bgezal zero, 0x000000556809f1a4
aoqi@1 538
aoqi@1 539 0x000000556809f1a0: nop
aoqi@1 540 0x000000556809f1a4: lui t9, 0xfffffffd
aoqi@1 541 0x000000556809f1a8: ori t9, t9, 0x14dc
aoqi@1 542 0x000000556809f1ac: daddu t9, t9, ra
aoqi@1 543 0x000000556809f1b0: dadd ra, at, zero
aoqi@1 544 0x000000556809f1b4: jr t9
aoqi@1 545 0x000000556809f1b8: nop
aoqi@1 546 ;; ImplicitNullCheckStub slow case
aoqi@1 547 0x000000556809f1bc: lui t9, 0x55
aoqi@1 548 */
aoqi@1 549 return is_op(int_at(12), Assembler::lui_op);
aoqi@1 550 }
aoqi@1 551 #endif
aoqi@1 552
aoqi@1 553 address NativeGeneralJump::jump_destination() {
aoqi@1 554 if ( is_short() ) {
aoqi@1 555 return addr_at(4) + Assembler::imm_off(int_at(instruction_offset)) * 4;
aoqi@1 556 }
aoqi@1 557 #ifndef _LP64
aoqi@1 558 return (address)Assembler::merge(int_at(4)&0xffff, long_at(instruction_offset)&0xffff);
aoqi@1 559 #else
aoqi@1 560 /* 2012/4/19 Jin: Assembler::merge() is not correct in MIPS_64!
aoqi@1 561
aoqi@1 562 Example:
aoqi@1 563 hi16 = 0xfffd,
aoqi@1 564 lo16 = f7a4,
aoqi@1 565
aoqi@1 566 offset=0xfffdf7a4 (Right)
aoqi@1 567 Assembler::merge = 0xfffcf7a4 (Wrong)
aoqi@1 568 */
aoqi@1 569 if ( is_b_far() ) {
aoqi@1 570 int hi16 = int_at(12)&0xffff;
aoqi@1 571 int low16 = int_at(16)&0xffff;
aoqi@1 572 address target = addr_at(12) + (hi16 << 16) + low16;
aoqi@1 573 return target;
aoqi@1 574 }
aoqi@1 575
aoqi@1 576 /* li64 or li48 */
aoqi@1 577 if (is_special_op(int_at(16), Assembler::dsll_op)) {
aoqi@1 578 return (address)Assembler::merge( (intptr_t)(int_at(20) & 0xffff),
aoqi@1 579 (intptr_t)(int_at(12) & 0xffff),
aoqi@1 580 (intptr_t)(int_at(4) & 0xffff),
aoqi@1 581 (intptr_t)(int_at(0) & 0xffff));
aoqi@1 582 } else {
aoqi@1 583 return (address)Assembler::merge( (intptr_t)(int_at(12) & 0xffff),
aoqi@1 584 (intptr_t)(int_at(4) & 0xffff),
aoqi@1 585 (intptr_t)(int_at(0) & 0xffff),
aoqi@1 586 ((int_at(0) & 0xffff) >= 0x8000) ? (intptr_t)0xffff : (intptr_t)0); /* sign-extended to 64-bit*/
aoqi@1 587 }
aoqi@1 588 #endif
aoqi@1 589 }
aoqi@1 590
aoqi@1 591 // MT-safe patching of a long jump instruction.
aoqi@1 592 // First patches first word of instruction to two jmp's that jmps to them
aoqi@1 593 // selfs (spinlock). Then patches the last byte, and then atomicly replaces
aoqi@1 594 // the jmp's with the first 4 byte of the new instruction.
aoqi@1 595 void NativeGeneralJump::replace_mt_safe(address instr_addr, address code_buffer) {
aoqi@1 596 NativeGeneralJump* h_jump = nativeGeneralJump_at (instr_addr);
aoqi@1 597 assert(NativeGeneralJump::instruction_size == NativeCall::instruction_size,
aoqi@1 598 "note::Runtime1::patch_code uses NativeCall::instruction_size");
aoqi@1 599
aoqi@1 600 /* 2013/6/13 Jin: ensure 100% atomicity */
aoqi@1 601 guarantee(!os::is_MP() || (((long)instr_addr % BytesPerWord) == 0), "destination must be aligned for SD");
aoqi@1 602
aoqi@1 603 int *p = (int *)instr_addr;
aoqi@1 604 int jr_word = p[4];
aoqi@1 605
aoqi@1 606 p[4] = 0x1000fffb; /* .1: --; --; --; --; b .1; nop */
aoqi@1 607 memcpy(instr_addr, code_buffer, NativeCall::instruction_size - 8);
aoqi@1 608 *(long *)(instr_addr + 16) = *(long *)(code_buffer + 16);
aoqi@1 609 }
aoqi@1 610
aoqi@1 611 /* Must ensure atomicity */
aoqi@1 612 void NativeGeneralJump::patch_verified_entry(address entry, address verified_entry, address dest) {
aoqi@1 613 /* 2013/11/5 Jin: ensure 100% atomicity.
aoqi@1 614 * The destination is fixed and can be cached in JavaThread.
aoqi@1 615 */
aoqi@1 616 guarantee(!os::is_MP() || (((long)verified_entry % BytesPerWord) == 0), "destination must be aligned for SD");
aoqi@1 617
aoqi@1 618 int code_buffer[4];
aoqi@1 619
aoqi@1 620 CodeBuffer cb((address)code_buffer, instruction_size);
aoqi@1 621 MacroAssembler masm(&cb);
aoqi@1 622 #define __ masm.
aoqi@1 623 __ ld(T9, TREG, in_bytes(JavaThread::handle_wrong_method_stub_offset()));
aoqi@1 624 __ jr(T9);
aoqi@1 625 __ delayed()->nop();
aoqi@1 626 __ nop();
aoqi@1 627
aoqi@1 628 atomic_store128_ptr func = get_atomic_store128_func();
aoqi@1 629 (*func)((long *)verified_entry, 0, *(long *)&code_buffer[0], *(long *)&code_buffer[2]);
aoqi@1 630
aoqi@1 631 ICache::invalidate_range(verified_entry, instruction_size);
aoqi@1 632 }
aoqi@1 633
aoqi@1 634 bool NativeInstruction::is_jump()
aoqi@1 635 {
aoqi@1 636 #ifndef _LP64
aoqi@1 637 return ((int_at(0) & NativeGeneralJump::b_mask) == NativeGeneralJump::beq_opcode) ||
aoqi@1 638 (is_op(int_at(0), Assembler::lui_op) &&
aoqi@1 639 is_op(int_at(4), Assembler::addiu_op) &&
aoqi@1 640 is_special_op(int_at(8), Assembler::jr_op));
aoqi@1 641 #else
aoqi@1 642 // lui rd, imm(63...48);
aoqi@1 643 // ori rd, rd, imm(47...32);
aoqi@1 644 // dsll rd, rd, 16;
aoqi@1 645 // ori rd, rd, imm(31...16);
aoqi@1 646 // dsll rd, rd, 16;
aoqi@1 647 // ori rd, rd, imm(15...0);
aoqi@1 648 // jalr rd
aoqi@1 649 // nop
aoqi@1 650 //
aoqi@1 651 if ((int_at(0) & NativeGeneralJump::b_mask) == NativeGeneralJump::beq_opcode)
aoqi@1 652 return true;
aoqi@1 653 if (is_op(int_at(4), Assembler::lui_op)) /* simplified b_far */
aoqi@1 654 return true;
aoqi@1 655 if (is_op(int_at(12), Assembler::lui_op)) /* original b_far */
aoqi@1 656 return true;
aoqi@1 657 if (is_op(int_at(0), Assembler::lui_op) &&
aoqi@1 658 is_op(int_at(4), Assembler::ori_op) &&
aoqi@1 659 is_special_op(int_at(8), Assembler::dsll_op) &&
aoqi@1 660 is_op(int_at(12), Assembler::ori_op) &&
aoqi@1 661 is_special_op(int_at(16), Assembler::dsll_op) &&
aoqi@1 662 is_op(int_at(20), Assembler::ori_op))
aoqi@1 663 return true;
aoqi@1 664 if (is_op(int_at(0), Assembler::lui_op) &&
aoqi@1 665 is_op(int_at(4), Assembler::ori_op) &&
aoqi@1 666 is_special_op(int_at(8), Assembler::dsll_op) &&
aoqi@1 667 is_op(int_at(12), Assembler::ori_op))
aoqi@1 668 return true;
aoqi@1 669 return false;
aoqi@1 670 #endif
aoqi@1 671 }
aoqi@1 672
aoqi@1 673 bool NativeInstruction::is_dtrace_trap() {
aoqi@1 674 //return (*(int32_t*)this & 0xff) == 0xcc;
aoqi@1 675 Unimplemented();
aoqi@1 676 return false;
aoqi@1 677 }
aoqi@1 678
aoqi@1 679 // is mips we have to use two instruction to poll, however, we don't want to bother checking two instructions
aoqi@1 680 // instead, we use a lw $0, at() as the second instruction, and only check this.
aoqi@1 681 // change ZERO -> AT, only in godson-2e @jerome,11/25/2006
aoqi@1 682 bool NativeInstruction::is_safepoint_poll() {
aoqi@1 683 #ifdef _LP64
aoqi@1 684 /*
aoqi@1 685 0x0000005565d28868: lui t2, 0x0 ; -24
aoqi@1 686 0x0000005565d2886c: ori t2, t2, 0x55 ; -20
aoqi@1 687 0x0000005565d28870: dsll t2, t2, 16 ; -16
aoqi@1 688 0x0000005565d28874: ori t2, t2, 0x6428 ; -12
aoqi@1 689 0x0000005565d28878: dsll t2, t2, 16 ; -8
aoqi@1 690 0x0000005565d2887c: ori t2, t2, 0x100 ; -4
aoqi@1 691 0x0000005565d28880: lw at, 0x0(t2) <-- PC
aoqi@1 692 */
aoqi@1 693 #ifndef OPT_SAFEPOINT
aoqi@1 694 /* li64 or li48 */
aoqi@1 695 if (is_op(Assembler::lw_op) && is_rt(AT)) {
aoqi@1 696 return true;
aoqi@1 697 } else if (is_special_op(long_at(-16), Assembler::dsll_op)) {
aoqi@1 698 /* li64 */
aoqi@1 699 return (is_op(int_at(-24), Assembler::lui_op) &&
aoqi@1 700 is_op(int_at(-20), Assembler::ori_op) &&
aoqi@1 701 is_special_op(int_at(-16), Assembler::dsll_op) &&
aoqi@1 702 is_op(int_at(-12), Assembler::ori_op) &&
aoqi@1 703 is_special_op(int_at(-8), Assembler::dsll_op) &&
aoqi@1 704 is_op(int_at(-4), Assembler::ori_op) &&
aoqi@1 705 is_op(Assembler::lw_op) &&
aoqi@1 706 is_rt(AT));
aoqi@1 707 } else if (is_op(int_at(-16), Assembler::lui_op)) {
aoqi@1 708 /* li48 */
aoqi@1 709 return is_op(int_at(-16), Assembler::lui_op) &&
aoqi@1 710 is_op(int_at(-12), Assembler::ori_op) &&
aoqi@1 711 is_special_op(int_at(-8), Assembler::dsll_op) &&
aoqi@1 712 is_op(int_at(-4), Assembler::ori_op) &&
aoqi@1 713 is_op(Assembler::lw_op) &&
aoqi@1 714 is_rt(AT);
aoqi@1 715 } else {
aoqi@1 716 return false;
aoqi@1 717 }
aoqi@1 718 #else // OPT_SAFEPOINT
aoqi@1 719 return is_op(int_at(-4), Assembler::lui_op) &&
aoqi@1 720 is_op(Assembler::lw_op) &&
aoqi@1 721 is_rt(AT);
aoqi@1 722 #endif
aoqi@1 723 #else
aoqi@1 724 return is_op(int_at(-4), Assembler::lui_op) &&
aoqi@1 725 is_op(Assembler::lw_op) &&
aoqi@1 726 is_rt(AT);
aoqi@1 727 #endif
aoqi@1 728 }

mercurial