Tue, 11 Oct 2016 08:59:56 +0800
Added UseLoongsonISA to NativeCall::set_destination, since NativeCall::set_destination used GS instructions.
1 /*
2 * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2015, 2016, Loongson Technology. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
26 #include "precompiled.hpp"
27 #include "asm/macroAssembler.hpp"
28 #include "memory/resourceArea.hpp"
29 #include "nativeInst_mips.hpp"
30 #include "oops/oop.inline.hpp"
31 #include "runtime/handles.hpp"
32 #include "runtime/sharedRuntime.hpp"
33 #include "runtime/stubRoutines.hpp"
34 #include "utilities/ostream.hpp"
35 #ifdef COMPILER1
36 #include "c1/c1_Runtime1.hpp"
37 #endif
39 #include <sys/mman.h>
41 void NativeInstruction::wrote(int offset) {
42 ICache::invalidate_word(addr_at(offset));
43 }
45 void NativeInstruction::set_long_at(int offset, long i) {
46 address addr = addr_at(offset);
47 *(long*)addr = i;
48 //ICache::invalidate_word(addr);
49 }
51 static int illegal_instruction_bits = 0;
53 int NativeInstruction::illegal_instruction() {
54 if (illegal_instruction_bits == 0) {
55 ResourceMark rm;
56 char buf[40];
57 CodeBuffer cbuf((address)&buf[0], 20);
58 MacroAssembler* a = new MacroAssembler(&cbuf);
59 address ia = a->pc();
60 a->brk(11);
61 int bits = *(int*)ia;
62 illegal_instruction_bits = bits;
63 }
64 return illegal_instruction_bits;
65 }
67 bool NativeInstruction::is_int_branch() {
68 switch(Assembler::opcode(insn_word())) {
69 case Assembler::beq_op:
70 case Assembler::beql_op:
71 case Assembler::bgtz_op:
72 case Assembler::bgtzl_op:
73 case Assembler::blez_op:
74 case Assembler::blezl_op:
75 case Assembler::bne_op:
76 case Assembler::bnel_op:
77 return true;
78 case Assembler::regimm_op:
79 switch(Assembler::rt(insn_word())) {
80 case Assembler::bgez_op:
81 case Assembler::bgezal_op:
82 case Assembler::bgezall_op:
83 case Assembler::bgezl_op:
84 case Assembler::bltz_op:
85 case Assembler::bltzal_op:
86 case Assembler::bltzall_op:
87 case Assembler::bltzl_op:
88 return true;
89 }
90 }
92 return false;
93 }
95 bool NativeInstruction::is_float_branch() {
96 if (!is_op(Assembler::cop1_op) ||
97 !is_rs((Register)Assembler::bc_op)) return false;
99 switch(Assembler::rt(insn_word())) {
100 case Assembler::bcf_op:
101 case Assembler::bcfl_op:
102 case Assembler::bct_op:
103 case Assembler::bctl_op:
104 return true;
105 }
107 return false;
108 }
111 //-------------------------------------------------------------------
113 void NativeCall::verify() {
114 // make sure code pattern is actually a call instruction
115 #ifndef _LP64
116 if ( !is_op(Assembler::lui_op) ||
117 !is_op(int_at(4), Assembler::addiu_op) ||
118 !is_special_op(int_at(8), Assembler::jalr_op) ) {
119 fatal("not a call");
120 }
121 #else
122 /* li64 or li48 */
123 int li_64 = 0;
124 int li_48 = 0;
126 if ( is_op (Assembler::lui_op) &&
127 is_op (int_at(4), Assembler::ori_op) &&
128 is_special_op(int_at(8), Assembler::dsll_op) &&
129 is_op (int_at(12), Assembler::ori_op) &&
130 is_special_op(int_at(16), Assembler::dsll_op) &&
131 is_op (int_at(20), Assembler::ori_op) &&
132 is_special_op(int_at(24), Assembler::jalr_op) ) {
133 li_64 = 1;
134 }
136 if ( is_op (Assembler::lui_op) &&
137 is_op (int_at(4), Assembler::ori_op) &&
138 is_special_op(int_at(8), Assembler::dsll_op) &&
139 is_op (int_at(12), Assembler::ori_op) &&
140 is_special_op(int_at(16), Assembler::jalr_op) ) {
141 li_48 = 1;
142 }
144 if (!li_64 && !li_48) {
145 tty->print_cr("NativeCall::verify addr=%lx", addr_at(0));
146 fatal("not a call");
147 }
148 #endif
149 }
151 address NativeCall::destination() const {
152 #ifndef _LP64
153 return (address)Assembler::merge(int_at(4)&0xffff, long_at(0)&0xffff);
154 #else
155 /* li64 or li48 */
156 if (is_special_op(int_at(16), Assembler::dsll_op)) {
157 return (address)Assembler::merge( (intptr_t)(int_at(20) & 0xffff),
158 (intptr_t)(int_at(12) & 0xffff),
159 (intptr_t)(int_at(4) & 0xffff),
160 (intptr_t)(int_at(0) & 0xffff));
161 } else if (is_special_op(int_at(16), Assembler::jalr_op)) {
162 return (address)Assembler::merge( (intptr_t)(int_at(12) & 0xffff),
163 (intptr_t)(int_at(4) & 0xffff),
164 (intptr_t)(int_at(0) & 0xffff),
165 (intptr_t)0);
166 }
167 #endif
168 }
170 /* 2013/6/14 Jin: manual implementation of GSSQ
171 *
172 * 00000001200009c0 <atomic_store128>:
173 * 1200009c0: 0085202d daddu a0, a0, a1
174 * 1200009c4: e8860027 gssq a2, a3, 0(a0)
175 * 1200009c8: 03e00008 jr ra
176 * 1200009cc: 00000000 nop
177 */
178 typedef void (* atomic_store128_ptr)(long *addr, int offset, long low64, long hi64);
180 static int *buf;
182 static atomic_store128_ptr get_atomic_store128_func() {
183 assert(UseLoongsonISA, "UseLoongsonISA must be true");
184 static atomic_store128_ptr p = NULL;
185 if (p != NULL)
186 return p;
188 buf = (int *)mmap(NULL, 1024, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_PRIVATE | MAP_ANONYMOUS,
189 -1, 0);
190 buf[0] = 0x0085202d;
191 buf[1] = (0x3a << 26) | (4 << 21) | (6 << 16) | 0x27; /* gssq $a2, $a3, 0($a0) */
192 buf[2] = 0x03e00008;
193 buf[3] = 0;
195 p = (atomic_store128_ptr)buf;
196 return p;
197 }
199 void NativeCall::set_destination(address dest) {
200 #ifndef _LP64
201 OrderAccess::fence();
202 set_int_at(0, (int_at(0) & 0xffff0000) | (Assembler::split_high((intptr_t)dest) & 0xffff));
203 set_int_at(4, (int_at(4) & 0xffff0000) | (Assembler::split_low((intptr_t)dest) & 0xffff));
204 ICache::invalidate_range(addr_at(0), 8);
205 #else
206 OrderAccess::fence();
207 /* 2013/6/13 Jin: ensure 100% atomicity */
208 guarantee(!os::is_MP() || (((long)addr_at(0) % 16) == 0), "destination must be aligned for GSSD");
210 /* li64 or li48 */
211 if (is_special_op(int_at(16), Assembler::dsll_op)) {
212 int first_word = int_at(0);
213 set_int_at(0, 0x1000ffff); /* .1: b .1 */
214 set_int_at(4, (int_at(4) & 0xffff0000) | (Assembler::split_low((intptr_t)dest >> 32) & 0xffff));
215 set_int_at(12, (int_at(12) & 0xffff0000) | (Assembler::split_low((intptr_t)dest >> 16) & 0xffff));
216 set_int_at(20, (int_at(20) & 0xffff0000) | (Assembler::split_low((intptr_t)dest) & 0xffff));
217 set_int_at(0, (first_word & 0xffff0000) | (Assembler::split_low((intptr_t)dest >> 48) & 0xffff));
218 ICache::invalidate_range(addr_at(0), 24);
219 } else if (is_special_op(int_at(16), Assembler::jalr_op)) {
220 if (UseLoongsonISA) {
221 int insts[4];
222 insts[0] = (int_at(0) & 0xffff0000) | (Assembler::split_low((intptr_t)dest >> 32) & 0xffff);
223 insts[1] = (int_at(4) & 0xffff0000) | (Assembler::split_low((intptr_t)dest >> 16) & 0xffff);
224 insts[2] = int_at(8);
225 insts[3] = (int_at(12) & 0xffff0000) | (Assembler::split_low((intptr_t)dest) & 0xffff);
227 atomic_store128_ptr func = get_atomic_store128_func();
228 (*func)((long *)addr_at(0), 0, *(long *)&insts[0], *(long *)&insts[2]);
229 } else {
230 //assert(is_simm16(dest >> 32), "Not a 48-bit address");
231 int first_word = int_at(0);
232 set_int_at(0, 0x1000ffff); /* .1: b .1 */
233 set_int_at(4, (int_at(4) & 0xffff0000) | (Assembler::split_low((intptr_t)dest >> 16) & 0xffff));
234 set_int_at(12, (int_at(12) & 0xffff0000) | (Assembler::split_low((intptr_t)dest) & 0xffff));
235 set_int_at(0, (first_word & 0xffff0000) | (Assembler::split_low((intptr_t)dest >> 32) & 0xffff));
236 ICache::invalidate_range(addr_at(0), 16);
237 }
238 } else {
239 fatal("not a call");
240 }
241 #endif
242 }
244 void NativeCall::print() {
245 tty->print_cr(PTR_FORMAT ": call " PTR_FORMAT,
246 instruction_address(), destination());
247 }
249 // Inserts a native call instruction at a given pc
250 void NativeCall::insert(address code_pos, address entry) {
251 NativeCall *call = nativeCall_at(code_pos);
252 CodeBuffer cb(call->addr_at(0), instruction_size);
253 MacroAssembler masm(&cb);
254 #define __ masm.
255 #ifndef _LP64
256 __ lui(T9, Assembler::split_high((int)entry));
257 __ addiu(T9, T9, Assembler::split_low((int)entry));
258 #else
259 __ li48(T9, (long)entry);
260 #endif
261 __ jalr ();
262 __ delayed()->nop();
263 #undef __
265 ICache::invalidate_range(call->addr_at(0), instruction_size);
266 }
268 // MT-safe patching of a call instruction.
269 // First patches first word of instruction to two jmp's that jmps to them
270 // selfs (spinlock). Then patches the last byte, and then atomicly replaces
271 // the jmp's with the first 4 byte of the new instruction.
272 void NativeCall::replace_mt_safe(address instr_addr, address code_buffer) {
273 Unimplemented();
274 }
276 //-------------------------------------------------------------------
278 void NativeMovConstReg::verify() {
279 #ifndef _LP64
280 if ( !is_op(Assembler::lui_op) ||
281 !is_op(int_at(4), Assembler::addiu_op) )
282 fatal("not a mov reg, imm32")
283 #else
284 /* li64 or li48 */
285 int li_64 = 0;
286 int li_48 = 0;
288 if ( is_op(Assembler::lui_op) &&
289 is_op(int_at(4), Assembler::ori_op) &&
290 is_special_op(int_at(8), Assembler::dsll_op) &&
291 is_op(int_at(12), Assembler::ori_op) &&
292 is_special_op(int_at(16), Assembler::dsll_op) &&
293 is_op(int_at(20), Assembler::ori_op) )
294 {
295 li_64 = 1;
296 }
298 if ( is_op(Assembler::lui_op) &&
299 is_op (int_at(4), Assembler::ori_op) &&
300 is_special_op(int_at(8), Assembler::dsll_op) &&
301 is_op (int_at(12), Assembler::ori_op) ) {
302 li_48 = 1;
303 }
305 if (!li_64 && !li_48) {
306 fatal("not a mov reg, imm64/imm48");
307 }
308 #endif
309 }
311 void NativeMovConstReg::print() {
312 tty->print_cr(PTR_FORMAT ": mov reg, " INTPTR_FORMAT,
313 instruction_address(), data());
314 }
316 intptr_t NativeMovConstReg::data() const {
317 #ifndef _LP64
318 return Assembler::merge(int_at(4)&0xffff, long_at(0)&0xffff);
319 #else
320 /* li64 or li48 */
321 if (is_special_op(int_at(16), Assembler::dsll_op) && is_op(long_at(20), Assembler::ori_op)) {
322 return Assembler::merge( (intptr_t)(int_at(20) & 0xffff),
323 (intptr_t)(int_at(12) & 0xffff),
324 (intptr_t)(int_at(4) & 0xffff),
325 (intptr_t)(int_at(0) & 0xffff));
326 } else {
327 return Assembler::merge( (intptr_t)(int_at(12) & 0xffff),
328 (intptr_t)(int_at(4) & 0xffff),
329 (intptr_t)(int_at(0) & 0xffff),
330 (intptr_t)0);
331 }
332 #endif
333 }
335 void NativeMovConstReg::set_data(intptr_t x) {
336 /*
337 #ifndef CORE
338 // also store the value into an oop_Relocation cell, if any
339 CodeBlob* cb = CodeCache::find_blob(instruction_address());
340 nmethod* nm = cb ? cb->as_nmethod_or_null() : NULL;
341 if (nm != NULL) {
342 RelocIterator iter(nm, instruction_address(), instruction_address() + 1);
343 oop* oop_addr = NULL;
344 while (iter.next()) {
345 if (iter.type() == relocInfo::oop_type) {
346 oop_Relocation *r = iter.oop_reloc();
347 if (oop_addr == NULL && r->oop_index()!=0) {
348 oop_addr = r->oop_addr();
349 *oop_addr = (oop)x;
350 } else {
351 assert(oop_addr == r->oop_addr(), "must be only one set-oop here");
352 }
353 }
354 }
355 }
356 #endif
357 */
359 #ifndef _LP64
360 set_int_at(0, (int_at(0) & 0xffff0000) | (Assembler::split_high(x) & 0xffff));
361 set_int_at(4, (int_at(4) & 0xffff0000) | (Assembler::split_low(x) & 0xffff));
362 ICache::invalidate_range(addr_at(0), 8);
363 #else
364 /* li64 or li48 */
365 if (is_special_op(int_at(16), Assembler::dsll_op) && is_op(long_at(20), Assembler::ori_op)) {
366 set_int_at(0, (int_at(0) & 0xffff0000) | (Assembler::split_low((intptr_t)x >> 48) & 0xffff));
367 set_int_at(4, (int_at(4) & 0xffff0000) | (Assembler::split_low((intptr_t)x >> 32) & 0xffff));
368 set_int_at(12, (int_at(12) & 0xffff0000) | (Assembler::split_low((intptr_t)x >> 16) & 0xffff));
369 set_int_at(20, (int_at(20) & 0xffff0000) | (Assembler::split_low((intptr_t)x) & 0xffff));
370 } else {
371 //assert(is_simm16(dest >> 32), "Not a 48-bit address");
372 set_int_at(0, (int_at(0) & 0xffff0000) | (Assembler::split_low((intptr_t)x >> 32) & 0xffff));
373 set_int_at(4, (int_at(4) & 0xffff0000) | (Assembler::split_low((intptr_t)x >> 16) & 0xffff));
374 set_int_at(12, (int_at(12) & 0xffff0000) | (Assembler::split_low((intptr_t)x) & 0xffff));
375 }
376 ICache::invalidate_range(addr_at(0), 24);
377 #endif
378 }
380 //-------------------------------------------------------------------
382 int NativeMovRegMem::offset() const{
383 if (is_immediate())
384 return (short)(int_at(instruction_offset)&0xffff);
385 else
386 return Assembler::merge(int_at(hiword_offset)&0xffff, long_at(instruction_offset)&0xffff);
387 }
389 void NativeMovRegMem::set_offset(int x) {
390 if (is_immediate()) {
391 assert(Assembler::is_simm16(x), "just check");
392 set_int_at(0, (int_at(0)&0xffff0000) | (x&0xffff) );
393 if (is_64ldst()) {
394 assert(Assembler::is_simm16(x+4), "just check");
395 set_int_at(4, (int_at(4)&0xffff0000) | ((x+4)&0xffff) );
396 }
397 } else {
398 set_int_at(0, (int_at(0) & 0xffff0000) | (Assembler::split_high(x) & 0xffff));
399 set_int_at(4, (int_at(4) & 0xffff0000) | (Assembler::split_low(x) & 0xffff));
400 }
401 ICache::invalidate_range(addr_at(0), 8);
402 }
404 void NativeMovRegMem::verify() {
405 int offset = 0;
407 if ( Assembler::opcode(int_at(0)) == Assembler::lui_op ) {
408 #ifndef _LP64
409 if ( (Assembler::opcode(int_at(4)) != Assembler::addiu_op) ||
410 (Assembler::opcode(int_at(8)) != Assembler::special_op) ||
411 (Assembler::special(int_at(8)) != Assembler::add_op))
412 #else
413 /* Jin: fit MIPS64 */
414 if ( (Assembler::opcode(int_at(4)) != Assembler::addiu_op &&
415 Assembler::opcode(int_at(4)) != Assembler::daddiu_op ) ||
416 (Assembler::opcode(int_at(8)) != Assembler::special_op) ||
417 (Assembler::special(int_at(8)) != Assembler::add_op
418 && Assembler::special(int_at(8)) != Assembler::dadd_op))
419 #endif
420 fatal ("not a mov [reg+offs], reg instruction");
421 offset += 12;
422 }
424 switch(Assembler::opcode(int_at(offset))) {
425 case Assembler::lb_op:
426 case Assembler::lbu_op:
427 case Assembler::lh_op:
428 case Assembler::lhu_op:
429 case Assembler::lw_op:
430 LP64_ONLY(case Assembler::ld_op:)
431 case Assembler::lwc1_op:
432 LP64_ONLY(case Assembler::ldc1_op:)
433 case Assembler::sb_op:
434 case Assembler::sh_op:
435 case Assembler::sw_op:
436 LP64_ONLY(case Assembler::sd_op:)
437 case Assembler::swc1_op:
438 LP64_ONLY(case Assembler::sdc1_op:)
439 break;
440 default:
441 fatal ("not a mov [reg+offs], reg instruction");
442 }
443 }
446 void NativeMovRegMem::print() {
447 tty->print_cr("0x%x: mov reg, [reg + %x]", instruction_address(), offset());
448 }
452 void NativeIllegalInstruction::insert(address code_pos) {
453 CodeBuffer cb(code_pos, instruction_size);
454 MacroAssembler masm(&cb);
455 #define __ masm.
456 __ brk(11);
457 #undef __
459 ICache::invalidate_range(code_pos, instruction_size);
460 }
462 void NativeGeneralJump::verify() {
463 assert(((NativeInstruction *)this)->is_jump() ||
464 ((NativeInstruction *)this)->is_cond_jump(), "not a general jump instruction");
465 }
468 void NativeGeneralJump::set_jump_destination(address dest) {
469 //tty->print_cr("NativeGeneralJump::set_jump_destination dest=%lx", dest);
470 OrderAccess::fence();
472 if (is_short()) {
473 assert(Assembler::is_simm16(dest-addr_at(4)), "change this code");
474 set_int_at(0, (int_at(0) & 0xffff0000) | (dest - addr_at(4)) & 0xffff );
475 ICache::invalidate_range(addr_at(0), 4);
476 #ifdef _LP64
477 } else if (is_b_far()) {
478 int offset = dest - addr_at(12);
479 set_int_at(12, (int_at(12) & 0xffff0000) | (offset >> 16));
480 set_int_at(16, (int_at(16) & 0xffff0000) | (offset & 0xffff));
481 #endif
482 } else {
483 #ifndef _LP64
484 set_int_at(0, (int_at(0) & 0xffff0000) | (Assembler::split_high((intptr_t)dest) & 0xffff));
485 set_int_at(4, (int_at(4) & 0xffff0000) | (Assembler::split_low((intptr_t)dest) & 0xffff));
486 ICache::invalidate_range(addr_at(0), 8);
487 #else
488 /* li64 or li48 */
489 if (is_special_op(int_at(16), Assembler::dsll_op)) {
490 set_int_at(0, (int_at(0) & 0xffff0000) | (Assembler::split_low((intptr_t)dest >> 48) & 0xffff));
491 set_int_at(4, (int_at(4) & 0xffff0000) | (Assembler::split_low((intptr_t)dest >> 32) & 0xffff));
492 set_int_at(12, (int_at(12) & 0xffff0000) | (Assembler::split_low((intptr_t)dest >> 16) & 0xffff));
493 set_int_at(20, (int_at(20) & 0xffff0000) | (Assembler::split_low((intptr_t)dest) & 0xffff));
494 } else {
495 int jr_word = int_at(16);
496 set_int_at(16, 0x1000fffb); /* .1: --; --; --; --; b .1; nop */
498 set_int_at(0, (int_at(0) & 0xffff0000) | (Assembler::split_low((intptr_t)dest >> 32) & 0xffff));
499 set_int_at(4, (int_at(4) & 0xffff0000) | (Assembler::split_low((intptr_t)dest >> 16) & 0xffff));
500 set_int_at(12, (int_at(12) & 0xffff0000) | (Assembler::split_low((intptr_t)dest) & 0xffff));
501 set_int_at(16, jr_word); /* .1: --; --; --; --; jr ; nop */
502 }
504 ICache::invalidate_range(addr_at(0), 24);
505 #endif
506 }
507 }
509 // we now use b to do this. be careful when using this method
510 // by yjl 9/16/2005
511 void NativeGeneralJump::insert_unconditional(address code_pos, address entry) {
512 CodeBuffer cb(code_pos, instruction_size);
513 MacroAssembler masm(&cb);
514 #define __ masm.
515 #ifdef _LP64
516 if (Assembler::is_simm16((entry - code_pos - 4) / 4))
517 {
518 __ b(entry);
519 __ delayed()->nop();
520 }
521 else
522 {
523 /* a simplified b_far */
524 int offset = entry - code_pos;
526 // FIXME: need to preserve RA?
527 __ emit_long(0x4110001); //__ emit_long(Assembler::insn_ORRI(Assembler::regimm_op, 0, Assembler::bgezal_op, 1));
528 __ lui(T9, (offset - 8) >> 16); // delay slot
529 __ ori(T9, T9, (offset - 8) & 0xffff);
530 __ daddu(T9, T9, RA);
531 __ jr(T9);
532 __ nop();
533 }
534 #else
535 __ b(entry);
536 __ delayed()->nop();
537 #endif
538 #undef __
540 ICache::invalidate_range(code_pos, instruction_size);
541 }
543 #ifdef _LP64
544 bool NativeGeneralJump::is_b_far() {
545 /*
546 0x000000556809f198: dadd at, ra, zero
547 0x000000556809f19c: [4110001]bgezal zero, 0x000000556809f1a4
549 0x000000556809f1a0: nop
550 0x000000556809f1a4: lui t9, 0xfffffffd
551 0x000000556809f1a8: ori t9, t9, 0x14dc
552 0x000000556809f1ac: daddu t9, t9, ra
553 0x000000556809f1b0: dadd ra, at, zero
554 0x000000556809f1b4: jr t9
555 0x000000556809f1b8: nop
556 ;; ImplicitNullCheckStub slow case
557 0x000000556809f1bc: lui t9, 0x55
558 */
559 return is_op(int_at(12), Assembler::lui_op);
560 }
561 #endif
563 address NativeGeneralJump::jump_destination() {
564 if ( is_short() ) {
565 return addr_at(4) + Assembler::imm_off(int_at(instruction_offset)) * 4;
566 }
567 #ifndef _LP64
568 return (address)Assembler::merge(int_at(4)&0xffff, long_at(instruction_offset)&0xffff);
569 #else
570 /* 2012/4/19 Jin: Assembler::merge() is not correct in MIPS_64!
572 Example:
573 hi16 = 0xfffd,
574 lo16 = f7a4,
576 offset=0xfffdf7a4 (Right)
577 Assembler::merge = 0xfffcf7a4 (Wrong)
578 */
579 if ( is_b_far() ) {
580 int hi16 = int_at(12)&0xffff;
581 int low16 = int_at(16)&0xffff;
582 address target = addr_at(12) + (hi16 << 16) + low16;
583 return target;
584 }
586 /* li64 or li48 */
587 if (is_special_op(int_at(16), Assembler::dsll_op)) {
588 return (address)Assembler::merge( (intptr_t)(int_at(20) & 0xffff),
589 (intptr_t)(int_at(12) & 0xffff),
590 (intptr_t)(int_at(4) & 0xffff),
591 (intptr_t)(int_at(0) & 0xffff));
592 } else {
593 return (address)Assembler::merge( (intptr_t)(int_at(12) & 0xffff),
594 (intptr_t)(int_at(4) & 0xffff),
595 (intptr_t)(int_at(0) & 0xffff),
596 ((int_at(0) & 0xffff) >= 0x8000) ? (intptr_t)0xffff : (intptr_t)0); /* sign-extended to 64-bit*/
597 }
598 #endif
599 }
601 // MT-safe patching of a long jump instruction.
602 // First patches first word of instruction to two jmp's that jmps to them
603 // selfs (spinlock). Then patches the last byte, and then atomicly replaces
604 // the jmp's with the first 4 byte of the new instruction.
605 void NativeGeneralJump::replace_mt_safe(address instr_addr, address code_buffer) {
606 NativeGeneralJump* h_jump = nativeGeneralJump_at (instr_addr);
607 assert(NativeGeneralJump::instruction_size == NativeCall::instruction_size,
608 "note::Runtime1::patch_code uses NativeCall::instruction_size");
610 /* 2013/6/13 Jin: ensure 100% atomicity */
611 guarantee(!os::is_MP() || (((long)instr_addr % BytesPerWord) == 0), "destination must be aligned for SD");
613 int *p = (int *)instr_addr;
614 int jr_word = p[4];
616 p[4] = 0x1000fffb; /* .1: --; --; --; --; b .1; nop */
617 memcpy(instr_addr, code_buffer, NativeCall::instruction_size - 8);
618 *(long *)(instr_addr + 16) = *(long *)(code_buffer + 16);
619 }
621 /* Must ensure atomicity */
622 void NativeGeneralJump::patch_verified_entry(address entry, address verified_entry, address dest) {
623 /* 2013/11/5 Jin: ensure 100% atomicity.
624 * The destination is fixed and can be cached in JavaThread.
625 */
626 guarantee(!os::is_MP() || (((long)verified_entry % BytesPerWord) == 0), "destination must be aligned for SD");
628 int code_buffer[4];
630 CodeBuffer cb((address)code_buffer, instruction_size);
631 MacroAssembler masm(&cb);
632 #define __ masm.
633 __ ld(T9, TREG, in_bytes(JavaThread::handle_wrong_method_stub_offset()));
634 __ jr(T9);
635 __ delayed()->nop();
636 __ nop();
638 atomic_store128_ptr func = get_atomic_store128_func();
639 (*func)((long *)verified_entry, 0, *(long *)&code_buffer[0], *(long *)&code_buffer[2]);
641 ICache::invalidate_range(verified_entry, instruction_size);
642 }
644 bool NativeInstruction::is_jump()
645 {
646 #ifndef _LP64
647 return ((int_at(0) & NativeGeneralJump::b_mask) == NativeGeneralJump::beq_opcode) ||
648 (is_op(int_at(0), Assembler::lui_op) &&
649 is_op(int_at(4), Assembler::addiu_op) &&
650 is_special_op(int_at(8), Assembler::jr_op));
651 #else
652 // lui rd, imm(63...48);
653 // ori rd, rd, imm(47...32);
654 // dsll rd, rd, 16;
655 // ori rd, rd, imm(31...16);
656 // dsll rd, rd, 16;
657 // ori rd, rd, imm(15...0);
658 // jalr rd
659 // nop
660 //
661 if ((int_at(0) & NativeGeneralJump::b_mask) == NativeGeneralJump::beq_opcode)
662 return true;
663 if (is_op(int_at(4), Assembler::lui_op)) /* simplified b_far */
664 return true;
665 if (is_op(int_at(12), Assembler::lui_op)) /* original b_far */
666 return true;
667 if (is_op(int_at(0), Assembler::lui_op) &&
668 is_op(int_at(4), Assembler::ori_op) &&
669 is_special_op(int_at(8), Assembler::dsll_op) &&
670 is_op(int_at(12), Assembler::ori_op) &&
671 is_special_op(int_at(16), Assembler::dsll_op) &&
672 is_op(int_at(20), Assembler::ori_op))
673 return true;
674 if (is_op(int_at(0), Assembler::lui_op) &&
675 is_op(int_at(4), Assembler::ori_op) &&
676 is_special_op(int_at(8), Assembler::dsll_op) &&
677 is_op(int_at(12), Assembler::ori_op))
678 return true;
679 return false;
680 #endif
681 }
683 bool NativeInstruction::is_dtrace_trap() {
684 //return (*(int32_t*)this & 0xff) == 0xcc;
685 Unimplemented();
686 return false;
687 }
689 // is mips we have to use two instruction to poll, however, we don't want to bother checking two instructions
690 // instead, we use a lw $0, at() as the second instruction, and only check this.
691 // change ZERO -> AT, only in godson-2e @jerome,11/25/2006
692 bool NativeInstruction::is_safepoint_poll() {
693 #ifdef _LP64
694 /*
695 0x0000005565d28868: lui t2, 0x0 ; -24
696 0x0000005565d2886c: ori t2, t2, 0x55 ; -20
697 0x0000005565d28870: dsll t2, t2, 16 ; -16
698 0x0000005565d28874: ori t2, t2, 0x6428 ; -12
699 0x0000005565d28878: dsll t2, t2, 16 ; -8
700 0x0000005565d2887c: ori t2, t2, 0x100 ; -4
701 0x0000005565d28880: lw at, 0x0(t2) <-- PC
702 */
703 #ifndef OPT_SAFEPOINT
704 /* li64 or li48 */
705 if (is_op(Assembler::lw_op) && is_rt(AT)) {
706 return true;
707 } else if (is_special_op(long_at(-16), Assembler::dsll_op)) {
708 /* li64 */
709 return (is_op(int_at(-24), Assembler::lui_op) &&
710 is_op(int_at(-20), Assembler::ori_op) &&
711 is_special_op(int_at(-16), Assembler::dsll_op) &&
712 is_op(int_at(-12), Assembler::ori_op) &&
713 is_special_op(int_at(-8), Assembler::dsll_op) &&
714 is_op(int_at(-4), Assembler::ori_op) &&
715 is_op(Assembler::lw_op) &&
716 is_rt(AT));
717 } else if (is_op(int_at(-16), Assembler::lui_op)) {
718 /* li48 */
719 return is_op(int_at(-16), Assembler::lui_op) &&
720 is_op(int_at(-12), Assembler::ori_op) &&
721 is_special_op(int_at(-8), Assembler::dsll_op) &&
722 is_op(int_at(-4), Assembler::ori_op) &&
723 is_op(Assembler::lw_op) &&
724 is_rt(AT);
725 } else {
726 return false;
727 }
728 #else // OPT_SAFEPOINT
729 return is_op(int_at(-4), Assembler::lui_op) &&
730 is_op(Assembler::lw_op) &&
731 is_rt(AT);
732 #endif
733 #else
734 return is_op(int_at(-4), Assembler::lui_op) &&
735 is_op(Assembler::lw_op) &&
736 is_rt(AT);
737 #endif
738 }