src/cpu/mips/vm/templateTable_mips_64.cpp

Thu, 25 Aug 2016 22:31:58 +0800

author
jiangshaofeng
date
Thu, 25 Aug 2016 22:31:58 +0800
changeset 89
179d203c9b2b
parent 88
02ae7081a1a7
child 104
36f7453a6977
permissions
-rw-r--r--

#4428: Use gsdmult to optimize the lmul template of the interpreter.
I have run the test program on 3A2000 computer

Effects:
[loongson@localhost project]$ ./LmulTest.sh
before
time:3225ms
after
time:3115ms

The test java program:
public class LmulTest{
public static void main(String args[]){
int count = 10000000;
long startTime = System.currentTimeMillis();
//long startTime = System.nanoTime();
for(int i = 0; i < count; i++){
long a, b, c;
a = 12345678;
a++;
b = 87654321;
b++;
c = a * b;
//System.out.println(c);
}
long endTime = System.currentTimeMillis();
//long endTime = System.nanoTime();
System.out.println("time:" + (endTime - startTime) + "ms");
}
}

aoqi@1 1 /*
aoqi@1 2 * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
aoqi@1 3 * Copyright (c) 2015, 2016, Loongson Technology. All rights reserved.
aoqi@1 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
aoqi@1 5 *
aoqi@1 6 * This code is free software; you can redistribute it and/or modify it
aoqi@1 7 * under the terms of the GNU General Public License version 2 only, as
aoqi@1 8 * published by the Free Software Foundation.
aoqi@1 9 *
aoqi@1 10 * This code is distributed in the hope that it will be useful, but WITHOUT
aoqi@1 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
aoqi@1 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
aoqi@1 13 * version 2 for more details (a copy is included in the LICENSE file that
aoqi@1 14 * accompanied this code).
aoqi@1 15 *
aoqi@1 16 * You should have received a copy of the GNU General Public License version
aoqi@1 17 * 2 along with this work; if not, write to the Free Software Foundation,
aoqi@1 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
aoqi@1 19 *
aoqi@1 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
aoqi@1 21 * or visit www.oracle.com if you need additional information or have any
aoqi@1 22 * questions.
aoqi@1 23 *
aoqi@1 24 */
aoqi@1 25
aoqi@1 26 #include "precompiled.hpp"
aoqi@1 27 #include "asm/macroAssembler.hpp"
aoqi@1 28 #include "interpreter/interpreter.hpp"
aoqi@1 29 #include "interpreter/interpreterRuntime.hpp"
aoqi@1 30 #include "interpreter/templateTable.hpp"
aoqi@1 31 #include "memory/universe.inline.hpp"
aoqi@1 32 #include "oops/methodData.hpp"
aoqi@1 33 #include "oops/objArrayKlass.hpp"
aoqi@1 34 #include "oops/oop.inline.hpp"
aoqi@1 35 #include "prims/methodHandles.hpp"
aoqi@1 36 #include "runtime/sharedRuntime.hpp"
aoqi@1 37 #include "runtime/stubRoutines.hpp"
aoqi@1 38 #include "runtime/synchronizer.hpp"
aoqi@1 39
aoqi@1 40
aoqi@1 41 #ifndef CC_INTERP
aoqi@1 42
aoqi@1 43 #define __ _masm->
aoqi@1 44
aoqi@1 45 // Platform-dependent initialization
aoqi@1 46
aoqi@1 47 void TemplateTable::pd_initialize() {
aoqi@1 48 // No mips specific initialization
aoqi@1 49 }
aoqi@1 50
aoqi@1 51 // Address computation: local variables
aoqi@1 52 // we use t8 as the local variables pointer register, by yjl 6/27/2005
aoqi@1 53 static inline Address iaddress(int n) {
aoqi@1 54 return Address(LVP, Interpreter::local_offset_in_bytes(n));
aoqi@1 55 }
aoqi@1 56
aoqi@1 57 static inline Address laddress(int n) {
aoqi@1 58 return iaddress(n + 1);
aoqi@1 59 }
aoqi@1 60
aoqi@1 61 static inline Address faddress(int n) {
aoqi@1 62 return iaddress(n);
aoqi@1 63 }
aoqi@1 64
aoqi@1 65 static inline Address daddress(int n) {
aoqi@1 66 return laddress(n);
aoqi@1 67 }
aoqi@1 68
aoqi@1 69 static inline Address aaddress(int n) {
aoqi@1 70 return iaddress(n);
aoqi@1 71 }
aoqi@1 72 static inline Address haddress(int n) { return iaddress(n + 0); }
aoqi@1 73
aoqi@1 74 //FIXME , can not use dadd and dsll
aoqi@1 75 /*
aoqi@1 76 static inline Address iaddress(Register r) {
aoqi@1 77 return Address(r14, r, Address::times_8, Interpreter::value_offset_in_bytes());
aoqi@1 78 }
aoqi@1 79
aoqi@1 80 static inline Address laddress(Register r) {
aoqi@1 81 return Address(r14, r, Address::times_8, Interpreter::local_offset_in_bytes(1));
aoqi@1 82 }
aoqi@1 83
aoqi@1 84 static inline Address faddress(Register r) {
aoqi@1 85 return iaddress(r);
aoqi@1 86 }
aoqi@1 87
aoqi@1 88 static inline Address daddress(Register r) {
aoqi@1 89 return laddress(r);
aoqi@1 90 }
aoqi@1 91
aoqi@1 92 static inline Address aaddress(Register r) {
aoqi@1 93 return iaddress(r);
aoqi@1 94 }
aoqi@1 95 */
aoqi@1 96
aoqi@1 97 static inline Address at_sp() { return Address(SP, 0); }
aoqi@1 98 static inline Address at_sp_p1() { return Address(SP, 1 * wordSize); }
aoqi@1 99 static inline Address at_sp_p2() { return Address(SP, 2 * wordSize); }
aoqi@1 100
aoqi@1 101 // At top of Java expression stack which may be different than esp(). It
aoqi@1 102 // isn't for category 1 objects.
aoqi@1 103 static inline Address at_tos () {
aoqi@1 104 Address tos = Address(SP, Interpreter::expr_offset_in_bytes(0));
aoqi@1 105 return tos;
aoqi@1 106 }
aoqi@1 107
aoqi@1 108 static inline Address at_tos_p1() {
aoqi@1 109 return Address(SP, Interpreter::expr_offset_in_bytes(1));
aoqi@1 110 }
aoqi@1 111
aoqi@1 112 static inline Address at_tos_p2() {
aoqi@1 113 return Address(SP, Interpreter::expr_offset_in_bytes(2));
aoqi@1 114 }
aoqi@1 115
aoqi@1 116 static inline Address at_tos_p3() {
aoqi@1 117 return Address(SP, Interpreter::expr_offset_in_bytes(3));
aoqi@1 118 }
aoqi@1 119
aoqi@1 120 // we use S0 as bcp, be sure you have bcp in S0 before you call any of the Template generator
aoqi@1 121 Address TemplateTable::at_bcp(int offset) {
aoqi@1 122 assert(_desc->uses_bcp(), "inconsistent uses_bcp information");
aoqi@1 123 return Address(BCP, offset);
aoqi@1 124 }
aoqi@1 125
aoqi@1 126 #define callee_saved_register(R) assert((R>=S0 && R<=S7), "should use callee saved registers!")
aoqi@1 127
aoqi@1 128 // bytecode folding
aoqi@1 129 void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg,
aoqi@1 130 Register tmp_reg,
aoqi@1 131 bool load_bc_into_bc_reg,/*=true*/
aoqi@1 132 int byte_no) {
aoqi@1 133 if (!RewriteBytecodes) {
aoqi@1 134 return;
aoqi@1 135 }
aoqi@1 136
aoqi@1 137 Label L_patch_done;
aoqi@1 138 switch (bc) {
aoqi@1 139 case Bytecodes::_fast_aputfield:
aoqi@1 140 case Bytecodes::_fast_bputfield:
aoqi@1 141 case Bytecodes::_fast_cputfield:
aoqi@1 142 case Bytecodes::_fast_dputfield:
aoqi@1 143 case Bytecodes::_fast_fputfield:
aoqi@1 144 case Bytecodes::_fast_iputfield:
aoqi@1 145 case Bytecodes::_fast_lputfield:
aoqi@1 146 case Bytecodes::_fast_sputfield:
aoqi@1 147 {
aoqi@1 148 // We skip bytecode quickening for putfield instructions when the put_code written to the constant pool cache
aoqi@1 149 // is zero. This is required so that every execution of this instruction calls out to
aoqi@1 150 // InterpreterRuntime::resolve_get_put to do additional, required work.
aoqi@1 151 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
aoqi@1 152 assert(load_bc_into_bc_reg, "we use bc_reg as temp");
aoqi@1 153 __ get_cache_and_index_and_bytecode_at_bcp(tmp_reg, bc_reg, tmp_reg, byte_no, 1);
aoqi@1 154 __ daddi(bc_reg, R0, bc);
aoqi@1 155 __ beq(tmp_reg, R0, L_patch_done);
aoqi@1 156 __ delayed()->nop();
aoqi@1 157 }
aoqi@1 158 break;
aoqi@1 159 default:
aoqi@1 160 assert(byte_no == -1, "sanity");
aoqi@1 161 // the pair bytecodes have already done the load.
aoqi@1 162 if (load_bc_into_bc_reg) {
aoqi@1 163 __ move(bc_reg, bc);
aoqi@1 164 }
aoqi@1 165
aoqi@1 166 }
aoqi@1 167 if (JvmtiExport::can_post_breakpoint()) {
aoqi@1 168 Label L_fast_patch;
aoqi@1 169 // if a breakpoint is present we can't rewrite the stream directly
aoqi@1 170 __ lbu(tmp_reg, at_bcp(0));
aoqi@1 171 __ move(AT, Bytecodes::_breakpoint);
aoqi@1 172 __ bne(tmp_reg, AT, L_fast_patch);
aoqi@1 173 __ delayed()->nop();
aoqi@1 174
aoqi@1 175 __ get_method(tmp_reg);
aoqi@1 176 // Let breakpoint table handling rewrite to quicker bytecode
aoqi@1 177 __ call_VM(NOREG, CAST_FROM_FN_PTR(address,
aoqi@1 178 InterpreterRuntime::set_original_bytecode_at), tmp_reg, BCP, bc_reg);
aoqi@1 179
aoqi@1 180 __ b(L_patch_done);
aoqi@1 181 __ delayed()->nop();
aoqi@1 182 __ bind(L_fast_patch);
aoqi@1 183 }
aoqi@1 184
aoqi@1 185 #ifdef ASSERT
aoqi@1 186 Label L_okay;
aoqi@1 187 __ lbu(tmp_reg, at_bcp(0));
aoqi@1 188 __ move(AT, (int)Bytecodes::java_code(bc));
aoqi@1 189 __ beq(tmp_reg, AT, L_okay);
aoqi@1 190 __ delayed()->nop();
aoqi@1 191 __ beq(tmp_reg, bc_reg, L_patch_done);
aoqi@1 192 __ delayed()->nop();
aoqi@1 193 __ stop("patching the wrong bytecode");
aoqi@1 194 __ bind(L_okay);
aoqi@1 195 #endif
aoqi@1 196
aoqi@1 197 // patch bytecode
aoqi@1 198 __ sb(bc_reg, at_bcp(0));
aoqi@1 199 __ bind(L_patch_done);
aoqi@1 200 }
aoqi@1 201
aoqi@1 202
aoqi@1 203 // Individual instructions
aoqi@1 204
aoqi@1 205 void TemplateTable::nop() {
aoqi@1 206 transition(vtos, vtos);
aoqi@1 207 // nothing to do
aoqi@1 208 }
aoqi@1 209
aoqi@1 210 void TemplateTable::shouldnotreachhere() {
aoqi@1 211 transition(vtos, vtos);
aoqi@1 212 __ stop("shouldnotreachhere bytecode");
aoqi@1 213 }
aoqi@1 214
aoqi@1 215 void TemplateTable::aconst_null() {
aoqi@1 216 transition(vtos, atos);
aoqi@1 217 __ move(FSR, R0);
aoqi@1 218 }
aoqi@1 219
aoqi@1 220 void TemplateTable::iconst(int value) {
aoqi@1 221 transition(vtos, itos);
aoqi@1 222 if (value == 0) {
aoqi@1 223 __ move(FSR, R0);
aoqi@1 224 } else {
aoqi@1 225 __ move(FSR, value);
aoqi@1 226 }
aoqi@1 227 }
aoqi@1 228
aoqi@1 229 void TemplateTable::lconst(int value) {
aoqi@1 230 transition(vtos, ltos);
aoqi@1 231 if (value == 0) {
aoqi@1 232 __ move(FSR, R0);
aoqi@1 233 } else {
aoqi@1 234 __ move(FSR, value);
aoqi@1 235 }
aoqi@1 236 assert(value >= 0, "check this code");
aoqi@1 237 //__ move(SSR, R0);
aoqi@1 238 }
aoqi@1 239
aoqi@1 240 void TemplateTable::fconst(int value) {
aoqi@1 241 static float _f1 = 1.0, _f2 = 2.0;
aoqi@1 242 transition(vtos, ftos);
aoqi@1 243 float* p;
aoqi@1 244 switch( value ) {
aoqi@1 245 default: ShouldNotReachHere();
aoqi@1 246 case 0: __ dmtc1(R0, FSF); return;
aoqi@1 247 case 1: p = &_f1; break;
aoqi@1 248 case 2: p = &_f2; break;
aoqi@1 249 }
aoqi@1 250 __ li(AT, (address)p);
aoqi@1 251 __ lwc1(FSF, AT, 0);
aoqi@1 252 }
aoqi@1 253
aoqi@1 254 void TemplateTable::dconst(int value) {
aoqi@1 255 static double _d1 = 1.0;
aoqi@1 256 transition(vtos, dtos);
aoqi@1 257 double* p;
aoqi@1 258 switch( value ) {
aoqi@1 259 default: ShouldNotReachHere();
aoqi@1 260 case 0: __ dmtc1(R0, FSF); return;
aoqi@1 261 case 1: p = &_d1; break;
aoqi@1 262 }
aoqi@1 263 __ li(AT, (address)p);
aoqi@1 264 __ ldc1(FSF, AT, 0);
aoqi@1 265 }
aoqi@1 266
aoqi@1 267 void TemplateTable::bipush() {
aoqi@1 268 transition(vtos, itos);
aoqi@1 269 __ lb(FSR, at_bcp(1));
aoqi@1 270 }
aoqi@1 271
aoqi@1 272 void TemplateTable::sipush() {
aoqi@1 273 transition(vtos, itos);
aoqi@16 274 __ get_2_byte_integer_at_bcp(FSR, AT, 1);
aoqi@1 275 __ hswap(FSR);
aoqi@1 276 }
aoqi@1 277
aoqi@1 278 // T1 : tags
aoqi@1 279 // T2 : index
aoqi@1 280 // T3 : cpool
aoqi@1 281 // T8 : tag
aoqi@1 282 void TemplateTable::ldc(bool wide) {
aoqi@1 283 transition(vtos, vtos);
aoqi@1 284 Label call_ldc, notFloat, notClass, Done;
aoqi@1 285 // get index in cpool
aoqi@1 286 if (wide) {
aoqi@16 287 __ get_2_byte_integer_at_bcp(T2, AT, 1);
aoqi@1 288 __ huswap(T2);
aoqi@1 289 } else {
aoqi@1 290 __ lbu(T2, at_bcp(1));
aoqi@1 291 }
aoqi@1 292
aoqi@1 293 __ get_cpool_and_tags(T3, T1);
aoqi@1 294
aoqi@1 295 const int base_offset = ConstantPool::header_size() * wordSize;
aoqi@1 296 const int tags_offset = Array<u1>::base_offset_in_bytes();
aoqi@1 297
aoqi@1 298 // get type
aoqi@1 299 __ dadd(AT, T1, T2);
aoqi@1 300 __ lb(T1, AT, tags_offset);
aoqi@1 301 //now T1 is the tag
aoqi@1 302
aoqi@1 303 // unresolved string - get the resolved string
aoqi@1 304 /*__ daddiu(AT, T1, - JVM_CONSTANT_UnresolvedString);
aoqi@1 305 __ beq(AT, R0, call_ldc);
aoqi@1 306 __ delayed()->nop();*/
aoqi@1 307
aoqi@1 308 // unresolved class - get the resolved class
aoqi@1 309 __ daddiu(AT, T1, - JVM_CONSTANT_UnresolvedClass);
aoqi@1 310 __ beq(AT, R0, call_ldc);
aoqi@1 311 __ delayed()->nop();
aoqi@1 312
aoqi@1 313 // unresolved class in error (resolution failed) - call into runtime
aoqi@1 314 // so that the same error from first resolution attempt is thrown.
aoqi@1 315 __ daddiu(AT, T1, -JVM_CONSTANT_UnresolvedClassInError);
aoqi@1 316 __ beq(AT, R0, call_ldc);
aoqi@1 317 __ delayed()->nop();
aoqi@1 318
aoqi@1 319 // resolved class - need to call vm to get java mirror of the class
aoqi@1 320 __ daddiu(AT, T1, - JVM_CONSTANT_Class);
aoqi@1 321 __ bne(AT, R0, notClass);
aoqi@1 322 __ delayed()->dsll(T2, T2, Address::times_8);
aoqi@1 323
aoqi@1 324 __ bind(call_ldc);
aoqi@1 325
aoqi@1 326 __ move(A1, wide);
aoqi@1 327 call_VM(FSR, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), A1);
aoqi@1 328 // __ sw(FSR, SP, - 1 * wordSize);
aoqi@1 329 __ push(atos);
aoqi@1 330 __ b(Done);
aoqi@1 331 // __ delayed()->daddi(SP, SP, - 1 * wordSize);
aoqi@1 332 __ delayed()->nop();
aoqi@1 333 __ bind(notClass);
aoqi@1 334
aoqi@1 335 __ daddiu(AT, T1, -JVM_CONSTANT_Float);
aoqi@1 336 __ bne(AT, R0, notFloat);
aoqi@1 337 __ delayed()->nop();
aoqi@1 338 // ftos
aoqi@1 339 __ dadd(AT, T3, T2);
aoqi@1 340 __ lwc1(FSF, AT, base_offset);
aoqi@1 341 __ push_f();
aoqi@1 342 __ b(Done);
aoqi@1 343 __ delayed()->nop();
aoqi@1 344
aoqi@1 345 __ bind(notFloat);
aoqi@1 346 #ifdef ASSERT
aoqi@1 347 {
aoqi@1 348 Label L;
aoqi@1 349 __ daddiu(AT, T1, -JVM_CONSTANT_Integer);
aoqi@1 350 __ beq(AT, R0, L);
aoqi@1 351 __ delayed()->nop();
aoqi@1 352 __ stop("unexpected tag type in ldc");
aoqi@1 353 __ bind(L);
aoqi@1 354 }
aoqi@1 355 #endif
aoqi@1 356 // atos and itos
aoqi@1 357 __ dadd(T0, T3, T2);
aoqi@1 358 __ lw(FSR, T0, base_offset);
aoqi@1 359 __ push(itos);
aoqi@1 360 __ b(Done);
aoqi@1 361 __ delayed()->nop();
aoqi@1 362
aoqi@1 363
aoqi@1 364 if (VerifyOops) {
aoqi@1 365 __ verify_oop(FSR);
aoqi@1 366 }
aoqi@1 367
aoqi@1 368 __ bind(Done);
aoqi@1 369 }
aoqi@1 370
aoqi@1 371 // Fast path for caching oop constants.
aoqi@1 372 void TemplateTable::fast_aldc(bool wide) {
aoqi@1 373 transition(vtos, atos);
aoqi@1 374
aoqi@1 375 Register result = FSR;
aoqi@1 376 Register tmp = SSR;
aoqi@1 377 int index_size = wide ? sizeof(u2) : sizeof(u1);
aoqi@1 378
aoqi@1 379 Label resolved;
aoqi@1 380 // We are resolved if the resolved reference cache entry contains a
aoqi@1 381 // non-null object (String, MethodType, etc.)
aoqi@1 382 assert_different_registers(result, tmp);
aoqi@1 383 __ get_cache_index_at_bcp(tmp, 1, index_size);
aoqi@1 384 __ load_resolved_reference_at_index(result, tmp);
aoqi@1 385 __ bne(result, R0, resolved);
aoqi@1 386 __ delayed()->nop();
aoqi@1 387
aoqi@1 388 address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);
aoqi@1 389 // first time invocation - must resolve first
aoqi@1 390 int i = (int)bytecode();
aoqi@1 391 __ move(tmp, i);
aoqi@1 392 __ call_VM(result, entry, tmp);
aoqi@1 393
aoqi@1 394 __ bind(resolved);
aoqi@1 395
aoqi@1 396 if (VerifyOops) {
aoqi@1 397 __ verify_oop(result);
aoqi@1 398 }
aoqi@1 399 }
aoqi@1 400
aoqi@1 401
aoqi@1 402 // used register: T2, T3, T1
aoqi@1 403 // T2 : index
aoqi@1 404 // T3 : cpool
aoqi@1 405 // T1 : tag
aoqi@1 406 void TemplateTable::ldc2_w() {
aoqi@1 407 transition(vtos, vtos);
aoqi@1 408 Label Long, Done;
aoqi@1 409
aoqi@1 410 // get index in cpool
aoqi@16 411 __ get_2_byte_integer_at_bcp(T2, AT, 1);
aoqi@1 412 __ huswap(T2);
aoqi@1 413
aoqi@1 414 __ get_cpool_and_tags(T3, T1);
aoqi@1 415
aoqi@1 416 const int base_offset = ConstantPool::header_size() * wordSize;
aoqi@1 417 const int tags_offset = Array<u1>::base_offset_in_bytes();
aoqi@1 418
aoqi@1 419 // get type in T1
aoqi@1 420 __ dadd(AT, T1, T2);
aoqi@1 421 __ lb(T1, AT, tags_offset);
aoqi@1 422
aoqi@1 423 __ daddiu(AT, T1, - JVM_CONSTANT_Double);
aoqi@1 424 __ bne(AT, R0, Long);
aoqi@1 425 __ delayed()->dsll(T2, T2, Address::times_8);
aoqi@1 426 // dtos
aoqi@1 427 __ daddu(AT, T3, T2);
aoqi@1 428 __ ldc1(FSF, AT, base_offset + 0 * wordSize);
aoqi@1 429 __ sdc1(FSF, SP, - 2 * wordSize);
aoqi@1 430 __ b(Done);
aoqi@1 431 __ delayed()->daddi(SP, SP, - 2 * wordSize);
aoqi@1 432
aoqi@1 433 // ltos
aoqi@1 434 __ bind(Long);
aoqi@1 435 __ dadd(AT, T3, T2);
aoqi@1 436 __ ld(FSR, AT, base_offset + 0 * wordSize);
aoqi@1 437 __ push(ltos);
aoqi@1 438
aoqi@1 439 __ bind(Done);
aoqi@1 440 }
aoqi@1 441
aoqi@1 442 // we compute the actual local variable address here
aoqi@1 443 // the x86 dont do so for it has scaled index memory access model, we dont have, so do here
aoqi@1 444 void TemplateTable::locals_index(Register reg, int offset) {
aoqi@1 445 __ lbu(reg, at_bcp(offset));
aoqi@1 446 __ dsll(reg, reg, Address::times_8);
aoqi@1 447 __ dsub(reg, LVP, reg);
aoqi@1 448 }
aoqi@1 449
aoqi@1 450 // this method will do bytecode folding of the two form:
aoqi@1 451 // iload iload iload caload
aoqi@1 452 // used register : T2, T3
aoqi@1 453 // T2 : bytecode
aoqi@1 454 // T3 : folded code
aoqi@1 455 void TemplateTable::iload() {
aoqi@1 456 transition(vtos, itos);
aoqi@1 457 if (RewriteFrequentPairs) {
aoqi@1 458 Label rewrite, done;
aoqi@1 459 // get the next bytecode in T2
aoqi@1 460 __ lbu(T2, at_bcp(Bytecodes::length_for(Bytecodes::_iload)));
aoqi@1 461 // if _iload, wait to rewrite to iload2. We only want to rewrite the
aoqi@1 462 // last two iloads in a pair. Comparing against fast_iload means that
aoqi@1 463 // the next bytecode is neither an iload or a caload, and therefore
aoqi@1 464 // an iload pair.
aoqi@1 465 __ move(AT, Bytecodes::_iload);
aoqi@1 466 __ beq(AT, T2, done);
aoqi@1 467 __ delayed()->nop();
aoqi@1 468
aoqi@1 469 __ move(T3, Bytecodes::_fast_iload2);
aoqi@1 470 __ move(AT, Bytecodes::_fast_iload);
aoqi@1 471 __ beq(AT, T2, rewrite);
aoqi@1 472 __ delayed()->nop();
aoqi@1 473
aoqi@1 474 // if _caload, rewrite to fast_icaload
aoqi@1 475 __ move(T3, Bytecodes::_fast_icaload);
aoqi@1 476 __ move(AT, Bytecodes::_caload);
aoqi@1 477 __ beq(AT, T2, rewrite);
aoqi@1 478 __ delayed()->nop();
aoqi@1 479
aoqi@1 480 // rewrite so iload doesn't check again.
aoqi@1 481 __ move(T3, Bytecodes::_fast_iload);
aoqi@1 482
aoqi@1 483 // rewrite
aoqi@1 484 // T3 : fast bytecode
aoqi@1 485 __ bind(rewrite);
aoqi@1 486 patch_bytecode(Bytecodes::_iload, T3, T2, false);
aoqi@1 487 __ bind(done);
aoqi@1 488 }
aoqi@1 489
aoqi@1 490 // Get the local value into tos
aoqi@1 491 locals_index(T2);
aoqi@1 492 __ lw(FSR, T2, 0);
aoqi@1 493 }
aoqi@1 494
aoqi@1 495 // used register T2
aoqi@1 496 // T2 : index
aoqi@1 497 void TemplateTable::fast_iload2() {
aoqi@1 498 transition(vtos, itos);
aoqi@1 499 locals_index(T2);
aoqi@1 500 __ lw(FSR, T2, 0);
aoqi@1 501 __ push(itos);
aoqi@1 502 locals_index(T2, 3);
aoqi@1 503 __ lw(FSR, T2, 0);
aoqi@1 504 }
aoqi@1 505
aoqi@1 506 // used register T2
aoqi@1 507 // T2 : index
aoqi@1 508 void TemplateTable::fast_iload() {
aoqi@1 509 transition(vtos, itos);
aoqi@1 510 locals_index(T2);
aoqi@1 511 __ lw(FSR, T2, 0);
aoqi@1 512 }
aoqi@1 513
aoqi@1 514 // used register T2
aoqi@1 515 // T2 : index
aoqi@1 516 void TemplateTable::lload() {
aoqi@1 517
aoqi@1 518 transition(vtos, ltos);
aoqi@1 519 locals_index(T2);
aoqi@1 520 __ ld(FSR, T2, -wordSize);
aoqi@1 521 __ ld(SSR, T2, 0);
aoqi@1 522 }
aoqi@1 523
aoqi@1 524 // used register T2
aoqi@1 525 // T2 : index
aoqi@1 526 void TemplateTable::fload() {
aoqi@1 527 transition(vtos, ftos);
aoqi@1 528 locals_index(T2);
aoqi@1 529 //FIXME, aoqi. How should the high 32bits be when store a single float into a 64bits register.
aoqi@1 530 //__ mtc1(R0, FSF);
aoqi@1 531 __ lwc1(FSF, T2, 0);
aoqi@1 532 }
aoqi@1 533
aoqi@1 534 // used register T2
aoqi@1 535 // T2 : index
aoqi@1 536 void TemplateTable::dload() {
aoqi@1 537
aoqi@1 538 transition(vtos, dtos);
aoqi@1 539 locals_index(T2);
aoqi@1 540 /* if (TaggedStackInterpreter) {
aoqi@1 541 // Get double out of locals array, onto temp stack and load with
aoqi@1 542 // float instruction into ST0
aoqi@1 543 __ dsll(AT,T2,Interpreter::stackElementScale());
aoqi@1 544 __ dadd(AT, LVP, AT);
aoqi@1 545 __ ldc1(FSF, AT, Interpreter::local_offset_in_bytes(1));
aoqi@1 546 } else {*/
aoqi@1 547 __ ldc1(FSF, T2, -wordSize);
aoqi@1 548 __ ldc1(SSF, T2, 0);
aoqi@1 549 // }
aoqi@1 550 }
aoqi@1 551
aoqi@1 552 // used register T2
aoqi@1 553 // T2 : index
aoqi@1 554 void TemplateTable::aload()
aoqi@1 555 {
aoqi@1 556 transition(vtos, atos);
aoqi@1 557 locals_index(T2);
aoqi@1 558 __ ld(FSR, T2, 0);
aoqi@1 559 }
aoqi@1 560
aoqi@1 561 void TemplateTable::locals_index_wide(Register reg) {
aoqi@16 562 __ get_2_byte_integer_at_bcp(reg, AT, 2);
aoqi@1 563 __ huswap(reg);
aoqi@1 564 __ dsll(reg, reg, Address::times_8);
aoqi@1 565 __ dsub(reg, LVP, reg);
aoqi@1 566 }
aoqi@1 567
aoqi@1 568 // used register T2
aoqi@1 569 // T2 : index
aoqi@1 570 void TemplateTable::wide_iload() {
aoqi@1 571 transition(vtos, itos);
aoqi@1 572 locals_index_wide(T2);
aoqi@1 573 __ ld(FSR, T2, 0);
aoqi@1 574 }
aoqi@1 575
aoqi@1 576 // used register T2
aoqi@1 577 // T2 : index
aoqi@1 578 void TemplateTable::wide_lload() {
aoqi@1 579 transition(vtos, ltos);
aoqi@1 580 locals_index_wide(T2);
aoqi@1 581 __ ld(FSR, T2, -4);
aoqi@1 582 }
aoqi@1 583
aoqi@1 584 // used register T2
aoqi@1 585 // T2 : index
aoqi@1 586 void TemplateTable::wide_fload() {
aoqi@1 587 transition(vtos, ftos);
aoqi@1 588 locals_index_wide(T2);
aoqi@1 589 __ lwc1(FSF, T2, 0);
aoqi@1 590 }
aoqi@1 591
aoqi@1 592 // used register T2
aoqi@1 593 // T2 : index
aoqi@1 594 void TemplateTable::wide_dload() {
aoqi@1 595 transition(vtos, dtos);
aoqi@1 596 locals_index_wide(T2);
aoqi@1 597 /* if (TaggedStackInterpreter) {
aoqi@1 598 // Get double out of locals array, onto temp stack and load with
aoqi@1 599 // float instruction into ST0
aoqi@1 600 // __ movl(eax, laddress(ebx));
aoqi@1 601 // __ movl(edx, haddress(ebx));
aoqi@1 602 __ dsll(AT,T2,Interpreter::stackElementScale());
aoqi@1 603 __ dadd(AT, LVP, AT);
aoqi@1 604 __ ldc1(FSF, AT, Interpreter::local_offset_in_bytes(1));
aoqi@1 605
aoqi@1 606 // __ pushl(edx); // push hi first
aoqi@1 607 // __ pushl(eax);
aoqi@1 608 // __ fld_d(Address(esp));
aoqi@1 609 // __ addl(esp, 2*wordSize);
aoqi@1 610 } else {*/
aoqi@1 611 __ ldc1(FSF, T2, -4);
aoqi@1 612 //}
aoqi@1 613 }
aoqi@1 614
aoqi@1 615 // used register T2
aoqi@1 616 // T2 : index
aoqi@1 617 void TemplateTable::wide_aload() {
aoqi@1 618 transition(vtos, atos);
aoqi@1 619 locals_index_wide(T2);
aoqi@1 620 __ ld(FSR, T2, 0);
aoqi@1 621 }
aoqi@1 622
aoqi@1 623 // we use A2 as the regiser for index, BE CAREFUL!
aoqi@1 624 // we dont use our tge 29 now, for later optimization
aoqi@1 625 void TemplateTable::index_check(Register array, Register index) {
aoqi@1 626 // Pop ptr into array
aoqi@1 627 __ pop_ptr(array);
aoqi@1 628 index_check_without_pop(array, index);
aoqi@1 629 }
aoqi@1 630
aoqi@1 631 void TemplateTable::index_check_without_pop(Register array, Register index) {
aoqi@1 632 // destroys ebx
aoqi@1 633 // check array
aoqi@1 634 __ null_check(array, arrayOopDesc::length_offset_in_bytes());
aoqi@1 635
aoqi@1 636 // check index
aoqi@1 637 Label ok;
aoqi@1 638 __ lw(AT, array, arrayOopDesc::length_offset_in_bytes());
aoqi@1 639 #ifndef OPT_RANGECHECK
aoqi@1 640 __ sltu(AT, index, AT);
aoqi@1 641 __ bne(AT, R0, ok);
aoqi@1 642 __ delayed()->nop();
aoqi@1 643
aoqi@1 644 //throw_ArrayIndexOutOfBoundsException assume abberrant index in A2
aoqi@1 645 if (A2 != index) __ move(A2, index);
aoqi@1 646 __ jmp(Interpreter::_throw_ArrayIndexOutOfBoundsException_entry);
aoqi@1 647 __ delayed()->nop();
aoqi@1 648 __ bind(ok);
aoqi@1 649 #else
aoqi@1 650 __ lw(AT, array, arrayOopDesc::length_offset_in_bytes());
aoqi@1 651 __ move(A2, index);
aoqi@1 652 __ tgeu(A2, AT, 29);
aoqi@1 653 #endif
aoqi@1 654 }
aoqi@1 655
aoqi@1 656 void TemplateTable::iaload() {
aoqi@1 657 transition(itos, itos);
aoqi@1 658 // __ pop(SSR);
aoqi@1 659 index_check(SSR, FSR);
aoqi@1 660 __ dsll(FSR, FSR, 2);
aoqi@1 661 __ dadd(FSR, SSR, FSR);
aoqi@1 662 //FSR: index
aoqi@1 663 __ lw(FSR, FSR, arrayOopDesc::base_offset_in_bytes(T_INT));
aoqi@1 664 }
aoqi@1 665
aoqi@1 666
aoqi@1 667 void TemplateTable::laload() {
aoqi@1 668 transition(itos, ltos);
aoqi@1 669 // __ pop(SSR);
aoqi@1 670 index_check(SSR, FSR);
aoqi@1 671 __ dsll(AT, FSR, Address::times_8);
aoqi@1 672 __ dadd(AT, SSR, AT);
aoqi@1 673 __ ld(FSR, AT, arrayOopDesc::base_offset_in_bytes(T_LONG) + 0 * wordSize);
aoqi@1 674 }
aoqi@1 675
aoqi@1 676 void TemplateTable::faload() {
aoqi@1 677 transition(itos, ftos);
aoqi@1 678 // __ pop(SSR);
aoqi@1 679 index_check(SSR, FSR);
aoqi@1 680 __ shl(FSR, 2);
aoqi@1 681 __ dadd(FSR, SSR, FSR);
aoqi@1 682 __ lwc1(FSF, FSR, arrayOopDesc::base_offset_in_bytes(T_FLOAT));
aoqi@1 683 }
aoqi@1 684
aoqi@1 685 void TemplateTable::daload() {
aoqi@1 686 transition(itos, dtos);
aoqi@1 687 //__ pop(SSR);
aoqi@1 688 index_check(SSR, FSR);
aoqi@1 689 __ dsll(AT, FSR, 3);
aoqi@1 690 __ dadd(AT, SSR, AT);
aoqi@1 691 __ ldc1(FSF, AT, arrayOopDesc::base_offset_in_bytes(T_DOUBLE) + 0 * wordSize);
aoqi@1 692 }
aoqi@1 693
aoqi@1 694 void TemplateTable::aaload() {
aoqi@1 695 transition(itos, atos);
aoqi@1 696 //__ pop(SSR);
aoqi@1 697 index_check(SSR, FSR);
aoqi@1 698 __ dsll(FSR, FSR, UseCompressedOops ? Address::times_4 : Address::times_8);
aoqi@1 699 __ dadd(FSR, SSR, FSR);
aoqi@1 700 //add for compressedoops
aoqi@1 701 __ load_heap_oop(FSR, Address(FSR, arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
aoqi@1 702 }
aoqi@1 703
aoqi@1 704 void TemplateTable::baload() {
aoqi@1 705 transition(itos, itos);
aoqi@1 706 //__ pop(SSR);
aoqi@1 707 index_check(SSR, FSR);
aoqi@1 708 __ dadd(FSR, SSR, FSR);
aoqi@1 709 __ lb(FSR, FSR, arrayOopDesc::base_offset_in_bytes(T_BYTE));
aoqi@1 710 }
aoqi@1 711
aoqi@1 712 void TemplateTable::caload() {
aoqi@1 713 transition(itos, itos);
aoqi@1 714 // __ pop(SSR);
aoqi@1 715 index_check(SSR, FSR);
aoqi@1 716 __ dsll(FSR, FSR, Address::times_2);
aoqi@1 717 __ dadd(FSR, SSR, FSR);
aoqi@1 718 __ lhu(FSR, FSR, arrayOopDesc::base_offset_in_bytes(T_CHAR));
aoqi@1 719 }
aoqi@1 720
aoqi@1 721 // iload followed by caload frequent pair
aoqi@1 722 // used register : T2
aoqi@1 723 // T2 : index
aoqi@1 724 void TemplateTable::fast_icaload() {
aoqi@1 725 transition(vtos, itos);
aoqi@1 726 // load index out of locals
aoqi@1 727 locals_index(T2);
aoqi@1 728 __ lw(FSR, T2, 0);
aoqi@1 729 // __ pop(SSR);
aoqi@1 730 index_check(SSR, FSR);
aoqi@1 731 __ dsll(FSR, FSR, 1);
aoqi@1 732 __ dadd(FSR, SSR, FSR);
aoqi@1 733 __ lhu(FSR, FSR, arrayOopDesc::base_offset_in_bytes(T_CHAR));
aoqi@1 734 }
aoqi@1 735
aoqi@1 736 void TemplateTable::saload() {
aoqi@1 737 transition(itos, itos);
aoqi@1 738 // __ pop(SSR);
aoqi@1 739 index_check(SSR, FSR);
aoqi@1 740 __ dsll(FSR, FSR, Address::times_2);
aoqi@1 741 __ dadd(FSR, SSR, FSR);
aoqi@1 742 __ lh(FSR, FSR, arrayOopDesc::base_offset_in_bytes(T_SHORT));
aoqi@1 743 }
aoqi@1 744
aoqi@1 745 void TemplateTable::iload(int n) {
aoqi@1 746 transition(vtos, itos);
aoqi@1 747 __ lw(FSR, iaddress(n));
aoqi@1 748 }
aoqi@1 749
aoqi@1 750 void TemplateTable::lload(int n) {
aoqi@1 751 transition(vtos, ltos);
aoqi@1 752 __ ld(FSR, laddress(n));
aoqi@1 753 }
aoqi@1 754
aoqi@1 755 void TemplateTable::fload(int n) {
aoqi@1 756 transition(vtos, ftos);
aoqi@1 757 //__ mtc1(R0, FSF);
aoqi@1 758 __ lwc1(FSF, faddress(n));
aoqi@1 759 }
aoqi@1 760 //FIXME here
aoqi@1 761 void TemplateTable::dload(int n) {
aoqi@1 762 transition(vtos, dtos);
aoqi@1 763 __ ldc1(FSF, laddress(n));
aoqi@1 764 }
aoqi@1 765
aoqi@1 766 void TemplateTable::aload(int n) {
aoqi@1 767 transition(vtos, atos);
aoqi@1 768 __ ld(FSR, aaddress(n));
aoqi@1 769 }
aoqi@1 770
aoqi@1 771 // used register : T2, T3
aoqi@1 772 // T2 : bytecode
aoqi@1 773 // T3 : folded code
aoqi@1 774 void TemplateTable::aload_0() {
aoqi@1 775 transition(vtos, atos);
aoqi@1 776 // According to bytecode histograms, the pairs:
aoqi@1 777 //
aoqi@1 778 // _aload_0, _fast_igetfield
aoqi@1 779 // _aload_0, _fast_agetfield
aoqi@1 780 // _aload_0, _fast_fgetfield
aoqi@1 781 //
aoqi@1 782 // occur frequently. If RewriteFrequentPairs is set, the (slow) _aload_0
aoqi@1 783 // bytecode checks if the next bytecode is either _fast_igetfield,
aoqi@1 784 // _fast_agetfield or _fast_fgetfield and then rewrites the
aoqi@1 785 // current bytecode into a pair bytecode; otherwise it rewrites the current
aoqi@1 786 // bytecode into _fast_aload_0 that doesn't do the pair check anymore.
aoqi@1 787 //
aoqi@1 788 // Note: If the next bytecode is _getfield, the rewrite must be delayed,
aoqi@1 789 // otherwise we may miss an opportunity for a pair.
aoqi@1 790 //
aoqi@1 791 // Also rewrite frequent pairs
aoqi@1 792 // aload_0, aload_1
aoqi@1 793 // aload_0, iload_1
aoqi@1 794 // These bytecodes with a small amount of code are most profitable to rewrite
aoqi@1 795 if (RewriteFrequentPairs) {
aoqi@1 796 Label rewrite, done;
aoqi@1 797 // get the next bytecode in T2
aoqi@1 798 __ lbu(T2, at_bcp(Bytecodes::length_for(Bytecodes::_aload_0)));
aoqi@1 799
aoqi@1 800 // do actual aload_0
aoqi@1 801 aload(0);
aoqi@1 802
aoqi@1 803 // if _getfield then wait with rewrite
aoqi@1 804 __ move(AT, Bytecodes::_getfield);
aoqi@1 805 __ beq(AT, T2, done);
aoqi@1 806 __ delayed()->nop();
aoqi@1 807
aoqi@1 808 // if _igetfield then reqrite to _fast_iaccess_0
aoqi@1 809 assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) ==
aoqi@1 810 Bytecodes::_aload_0, "fix bytecode definition");
aoqi@1 811 __ move(T3, Bytecodes::_fast_iaccess_0);
aoqi@1 812 __ move(AT, Bytecodes::_fast_igetfield);
aoqi@1 813 __ beq(AT, T2, rewrite);
aoqi@1 814 __ delayed()->nop();
aoqi@1 815
aoqi@1 816 // if _agetfield then reqrite to _fast_aaccess_0
aoqi@1 817 assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) ==
aoqi@1 818 Bytecodes::_aload_0, "fix bytecode definition");
aoqi@1 819 __ move(T3, Bytecodes::_fast_aaccess_0);
aoqi@1 820 __ move(AT, Bytecodes::_fast_agetfield);
aoqi@1 821 __ beq(AT, T2, rewrite);
aoqi@1 822 __ delayed()->nop();
aoqi@1 823
aoqi@1 824 // if _fgetfield then reqrite to _fast_faccess_0
aoqi@1 825 assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) ==
aoqi@1 826 Bytecodes::_aload_0, "fix bytecode definition");
aoqi@1 827 __ move(T3, Bytecodes::_fast_faccess_0);
aoqi@1 828 __ move(AT, Bytecodes::_fast_fgetfield);
aoqi@1 829 __ beq(AT, T2, rewrite);
aoqi@1 830 __ delayed()->nop();
aoqi@1 831
aoqi@1 832 // else rewrite to _fast_aload0
aoqi@1 833 assert(Bytecodes::java_code(Bytecodes::_fast_aload_0) ==
aoqi@1 834 Bytecodes::_aload_0, "fix bytecode definition");
aoqi@1 835 __ move(T3, Bytecodes::_fast_aload_0);
aoqi@1 836
aoqi@1 837 // rewrite
aoqi@1 838 __ bind(rewrite);
aoqi@1 839 patch_bytecode(Bytecodes::_aload_0, T3, T2, false);
aoqi@1 840
aoqi@1 841 __ bind(done);
aoqi@1 842 } else {
aoqi@1 843 aload(0);
aoqi@1 844 }
aoqi@1 845 }
aoqi@1 846
aoqi@1 847 void TemplateTable::istore() {
aoqi@1 848 transition(itos, vtos);
aoqi@1 849 locals_index(T2);
aoqi@1 850 __ sw(FSR, T2, 0);
aoqi@1 851 }
aoqi@1 852
aoqi@1 853 void TemplateTable::lstore() {
aoqi@1 854 transition(ltos, vtos);
aoqi@1 855 locals_index(T2);
aoqi@1 856 __ sd(FSR, T2, -wordSize);
aoqi@1 857 }
aoqi@1 858
aoqi@1 859 void TemplateTable::fstore() {
aoqi@1 860 transition(ftos, vtos);
aoqi@1 861 locals_index(T2);
aoqi@1 862 __ swc1(FSF, T2, 0);
aoqi@1 863 }
aoqi@1 864
aoqi@1 865 void TemplateTable::dstore() {
aoqi@1 866 transition(dtos, vtos);
aoqi@1 867 locals_index(T2);
aoqi@1 868 __ sdc1(FSF, T2, -wordSize);
aoqi@1 869 }
aoqi@1 870
aoqi@1 871 void TemplateTable::astore() {
aoqi@1 872 transition(vtos, vtos);
aoqi@1 873 // __ pop(FSR);
aoqi@1 874 __ pop_ptr(FSR);
aoqi@1 875 locals_index(T2);
aoqi@1 876 __ sd(FSR, T2, 0);
aoqi@1 877 }
aoqi@1 878
aoqi@1 879 void TemplateTable::wide_istore() {
aoqi@1 880 transition(vtos, vtos);
aoqi@1 881 // __ pop(FSR);
aoqi@1 882 __ pop_i(FSR);
aoqi@1 883 locals_index_wide(T2);
aoqi@1 884 __ sd(FSR, T2, 0);
aoqi@1 885 }
aoqi@1 886
aoqi@1 887 void TemplateTable::wide_lstore() {
aoqi@1 888 transition(vtos, vtos);
aoqi@1 889 //__ pop2(FSR, SSR);
aoqi@1 890 //__ pop_l(FSR, SSR);
aoqi@1 891 __ pop_l(FSR); //aoqi:FIXME Is this right?
aoqi@1 892 locals_index_wide(T2);
aoqi@1 893 __ sd(FSR, T2, -4);
aoqi@1 894 }
aoqi@1 895
aoqi@1 896 void TemplateTable::wide_fstore() {
aoqi@1 897 wide_istore();
aoqi@1 898 }
aoqi@1 899
aoqi@1 900 void TemplateTable::wide_dstore() {
aoqi@1 901 wide_lstore();
aoqi@1 902 }
aoqi@1 903
aoqi@1 904 void TemplateTable::wide_astore() {
aoqi@1 905 transition(vtos, vtos);
aoqi@1 906 __ pop_ptr(FSR);
aoqi@1 907 locals_index_wide(T2);
aoqi@1 908 __ sd(FSR, T2, 0);
aoqi@1 909 }
aoqi@1 910
aoqi@1 911 // used register : T2
aoqi@1 912 void TemplateTable::iastore() {
aoqi@1 913 transition(itos, vtos);
aoqi@1 914 __ pop_i(SSR);
aoqi@1 915 index_check(T2, SSR); // prefer index in ebx
aoqi@1 916 __ dsll(SSR, SSR, Address::times_4);
aoqi@1 917 __ dadd(T2, T2, SSR);
aoqi@1 918 __ sw(FSR, T2, arrayOopDesc::base_offset_in_bytes(T_INT));
aoqi@1 919 }
aoqi@1 920
aoqi@1 921
aoqi@1 922
aoqi@1 923 // used register T2, T3
aoqi@1 924 void TemplateTable::lastore() {
aoqi@1 925 transition(ltos, vtos);
aoqi@1 926 __ pop_i (T2);
aoqi@1 927 index_check(T3, T2);
aoqi@1 928 __ dsll(T2, T2, Address::times_8);
aoqi@1 929 __ dadd(T3, T3, T2);
aoqi@1 930 __ sd(FSR, T3, arrayOopDesc::base_offset_in_bytes(T_LONG) + 0 * wordSize);
aoqi@1 931 }
aoqi@1 932
aoqi@1 933 // used register T2
aoqi@1 934 void TemplateTable::fastore() {
aoqi@1 935 transition(ftos, vtos);
aoqi@1 936 __ pop_i(SSR);
aoqi@1 937 index_check(T2, SSR);
aoqi@1 938 __ dsll(SSR, SSR, Address::times_4);
aoqi@1 939 __ dadd(T2, T2, SSR);
aoqi@1 940 __ swc1(FSF, T2, arrayOopDesc::base_offset_in_bytes(T_FLOAT));
aoqi@1 941 }
aoqi@1 942
aoqi@1 943 // used register T2, T3
aoqi@1 944 void TemplateTable::dastore() {
aoqi@1 945 transition(dtos, vtos);
aoqi@1 946 __ pop_i (T2);
aoqi@1 947 index_check(T3, T2);
aoqi@1 948 __ dsll(T2, T2, Address::times_8);
aoqi@1 949 __ daddu(T3, T3, T2);
aoqi@1 950 __ sdc1(FSF, T3, arrayOopDesc::base_offset_in_bytes(T_DOUBLE) + 0 * wordSize);
aoqi@1 951
aoqi@1 952 }
aoqi@1 953
aoqi@1 954 // used register : T2, T3, T8
aoqi@1 955 // T2 : array
aoqi@1 956 // T3 : subklass
aoqi@1 957 // T8 : supklass
aoqi@1 958 void TemplateTable::aastore() {
aoqi@1 959 Label is_null, ok_is_subtype, done;
aoqi@1 960 transition(vtos, vtos);
aoqi@1 961 // stack: ..., array, index, value
aoqi@1 962 __ ld(FSR, at_tos()); // Value
aoqi@1 963 __ lw(SSR, at_tos_p1()); // Index
aoqi@1 964 __ ld(T2, at_tos_p2()); // Array
aoqi@1 965
aoqi@1 966 // index_check(T2, SSR);
aoqi@1 967 index_check_without_pop(T2, SSR);
aoqi@1 968 // do array store check - check for NULL value first
aoqi@1 969 __ beq(FSR, R0, is_null);
aoqi@1 970 __ delayed()->nop();
aoqi@1 971
aoqi@1 972 // Move subklass into T3
aoqi@1 973 //__ ld(T3, Address(FSR, oopDesc::klass_offset_in_bytes()));
aoqi@1 974 //add for compressedoops
aoqi@1 975 __ load_klass(T3, FSR);
aoqi@1 976 // Move superklass into T8
aoqi@1 977 //__ ld(T8, Address(T2, oopDesc::klass_offset_in_bytes()));
aoqi@1 978 //add for compressedoops
aoqi@1 979 __ load_klass(T8, T2);
aoqi@1 980 __ ld(T8, Address(T8, ObjArrayKlass::element_klass_offset()));
aoqi@1 981 // Compress array+index*4+12 into a single register. T2
aoqi@1 982 __ dsll(AT, SSR, UseCompressedOops? Address::times_4 : Address::times_8);
aoqi@1 983 __ dadd(T2, T2, AT);
aoqi@1 984 __ daddi(T2, T2, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
aoqi@1 985
aoqi@1 986 // Generate subtype check.
aoqi@1 987 // Superklass in T8. Subklass in T3.
aoqi@1 988 __ gen_subtype_check(T8, T3, ok_is_subtype); // <-- Jin
aoqi@1 989 // Come here on failure
aoqi@1 990 // object is at FSR
aoqi@1 991 __ jmp(Interpreter::_throw_ArrayStoreException_entry); // <-- Jin
aoqi@1 992 __ delayed()->nop();
aoqi@1 993 // Come here on success
aoqi@1 994 __ bind(ok_is_subtype);
aoqi@1 995 //replace with do_oop_store->store_heap_oop
aoqi@1 996 //__ sd(FSR, T2, 0);
aoqi@1 997 __ store_heap_oop(Address(T2, 0), FSR); // <-- Jin
fujie@32 998 __ sync();
aoqi@1 999 __ store_check(T2);
aoqi@1 1000 __ b(done);
aoqi@1 1001 __ delayed()->nop();
aoqi@1 1002
aoqi@1 1003 // Have a NULL in FSR, EDX=T2, SSR=index. Store NULL at ary[idx]
aoqi@1 1004 __ bind(is_null);
aoqi@1 1005 __ profile_null_seen(T9);
aoqi@1 1006 __ dsll(AT, SSR, UseCompressedOops? Address::times_4 : Address::times_8);
aoqi@1 1007 __ dadd(T2, T2, AT);
aoqi@1 1008 //__ sd(FSR, T2, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
aoqi@1 1009 __ store_heap_oop(Address(T2, arrayOopDesc::base_offset_in_bytes(T_OBJECT)), FSR); /* FSR is null here */
fujie@32 1010 __ sync();
aoqi@1 1011
aoqi@1 1012 __ bind(done);
aoqi@1 1013 __ daddi(SP, SP, 3 * Interpreter::stackElementSize);
aoqi@1 1014 }
aoqi@1 1015
aoqi@1 1016 void TemplateTable::bastore() {
aoqi@1 1017 transition(itos, vtos);
aoqi@1 1018 __ pop_i (SSR);
aoqi@1 1019 index_check(T2, SSR);
aoqi@1 1020 __ dadd(SSR, T2, SSR);
aoqi@1 1021 __ sb(FSR, SSR, arrayOopDesc::base_offset_in_bytes(T_BYTE));
aoqi@1 1022 }
aoqi@1 1023
aoqi@1 1024 void TemplateTable::castore() {
aoqi@1 1025 transition(itos, vtos);
aoqi@1 1026 __ pop_i(SSR);
aoqi@1 1027 index_check(T2, SSR);
aoqi@1 1028 __ dsll(SSR, SSR, Address::times_2);
aoqi@1 1029 __ dadd(SSR, T2, SSR);
aoqi@1 1030 __ sh(FSR, SSR, arrayOopDesc::base_offset_in_bytes(T_CHAR));
aoqi@1 1031 }
aoqi@1 1032
aoqi@1 1033 void TemplateTable::sastore() {
aoqi@1 1034 castore();
aoqi@1 1035 }
aoqi@1 1036
aoqi@1 1037 void TemplateTable::istore(int n) {
aoqi@1 1038 transition(itos, vtos);
aoqi@1 1039 __ sw(FSR, iaddress(n));
aoqi@1 1040 }
aoqi@1 1041
aoqi@1 1042 void TemplateTable::lstore(int n) {
aoqi@1 1043 transition(ltos, vtos);
aoqi@1 1044 __ sd(FSR, laddress(n));
aoqi@1 1045 }
aoqi@1 1046
aoqi@1 1047 void TemplateTable::fstore(int n) {
aoqi@1 1048 transition(ftos, vtos);
aoqi@1 1049 __ swc1(FSF, faddress(n));
aoqi@1 1050 }
aoqi@1 1051
aoqi@1 1052 void TemplateTable::dstore(int n) {
aoqi@1 1053 transition(dtos, vtos);
aoqi@1 1054 __ sdc1(FSF, laddress(n));
aoqi@1 1055 }
aoqi@1 1056
aoqi@1 1057 void TemplateTable::astore(int n) {
aoqi@1 1058 transition(vtos, vtos);
aoqi@1 1059 __ pop_ptr(FSR);
aoqi@1 1060 __ sd(FSR, aaddress(n));
aoqi@1 1061 }
aoqi@1 1062
aoqi@1 1063 void TemplateTable::pop() {
aoqi@1 1064 transition(vtos, vtos);
aoqi@1 1065 __ daddi(SP, SP, Interpreter::stackElementSize);
aoqi@1 1066 }
aoqi@1 1067
aoqi@1 1068 void TemplateTable::pop2() {
aoqi@1 1069 transition(vtos, vtos);
aoqi@1 1070 __ daddi(SP, SP, 2 * Interpreter::stackElementSize);
aoqi@1 1071 }
aoqi@1 1072
aoqi@1 1073 void TemplateTable::dup() {
aoqi@1 1074 transition(vtos, vtos);
aoqi@1 1075 // stack: ..., a
aoqi@1 1076 __ load_ptr(0, FSR);
aoqi@1 1077 __ push_ptr(FSR);
aoqi@1 1078 // stack: ..., a, a
aoqi@1 1079 }
aoqi@1 1080
aoqi@1 1081 // blows FSR
aoqi@1 1082 void TemplateTable::dup_x1() {
aoqi@1 1083 transition(vtos, vtos);
aoqi@1 1084 // stack: ..., a, b
aoqi@1 1085 __ load_ptr(0, FSR); // load b
aoqi@1 1086 __ load_ptr(1, A5); // load a
aoqi@1 1087 __ store_ptr(1, FSR); // store b
aoqi@1 1088 __ store_ptr(0, A5); // store a
aoqi@1 1089 __ push_ptr(FSR); // push b
aoqi@1 1090 // stack: ..., b, a, b
aoqi@1 1091 }
aoqi@1 1092
aoqi@1 1093 // blows FSR
aoqi@1 1094 void TemplateTable::dup_x2() {
aoqi@1 1095 transition(vtos, vtos);
aoqi@1 1096 // stack: ..., a, b, c
aoqi@1 1097 __ load_ptr(0, FSR); // load c
aoqi@1 1098 __ load_ptr(2, A5); // load a
aoqi@1 1099 __ store_ptr(2, FSR); // store c in a
aoqi@1 1100 __ push_ptr(FSR); // push c
aoqi@1 1101 // stack: ..., c, b, c, c
aoqi@1 1102 __ load_ptr(2, FSR); // load b
aoqi@1 1103 __ store_ptr(2, A5); // store a in b
aoqi@1 1104 // stack: ..., c, a, c, c
aoqi@1 1105 __ store_ptr(1, FSR); // store b in c
aoqi@1 1106 // stack: ..., c, a, b, c
aoqi@1 1107 }
aoqi@1 1108
aoqi@1 1109 // blows FSR
aoqi@1 1110 void TemplateTable::dup2() {
aoqi@1 1111 transition(vtos, vtos);
aoqi@1 1112 // stack: ..., a, b
aoqi@1 1113 __ load_ptr(1, FSR); // load a
aoqi@1 1114 __ push_ptr(FSR); // push a
aoqi@1 1115 __ load_ptr(1, FSR); // load b
aoqi@1 1116 __ push_ptr(FSR); // push b
aoqi@1 1117 // stack: ..., a, b, a, b
aoqi@1 1118 }
aoqi@1 1119
aoqi@1 1120 // blows FSR
aoqi@1 1121 void TemplateTable::dup2_x1() {
aoqi@1 1122 transition(vtos, vtos);
aoqi@1 1123 // stack: ..., a, b, c
aoqi@1 1124 __ load_ptr(0, T2); // load c
aoqi@1 1125 __ load_ptr(1, FSR); // load b
aoqi@1 1126 __ push_ptr(FSR); // push b
aoqi@1 1127 __ push_ptr(T2); // push c
aoqi@1 1128 // stack: ..., a, b, c, b, c
aoqi@1 1129 __ store_ptr(3, T2); // store c in b
aoqi@1 1130 // stack: ..., a, c, c, b, c
aoqi@1 1131 __ load_ptr(4, T2); // load a
aoqi@1 1132 __ store_ptr(2, T2); // store a in 2nd c
aoqi@1 1133 // stack: ..., a, c, a, b, c
aoqi@1 1134 __ store_ptr(4, FSR); // store b in a
aoqi@1 1135 // stack: ..., b, c, a, b, c
aoqi@1 1136
aoqi@1 1137 // stack: ..., b, c, a, b, c
aoqi@1 1138 }
aoqi@1 1139
aoqi@1 1140 // blows FSR, SSR
aoqi@1 1141 void TemplateTable::dup2_x2() {
aoqi@1 1142 transition(vtos, vtos);
aoqi@1 1143 // stack: ..., a, b, c, d
aoqi@1 1144 // stack: ..., a, b, c, d
aoqi@1 1145 __ load_ptr(0, T2); // load d
aoqi@1 1146 __ load_ptr(1, FSR); // load c
aoqi@1 1147 __ push_ptr(FSR); // push c
aoqi@1 1148 __ push_ptr(T2); // push d
aoqi@1 1149 // stack: ..., a, b, c, d, c, d
aoqi@1 1150 __ load_ptr(4, FSR); // load b
aoqi@1 1151 __ store_ptr(2, FSR); // store b in d
aoqi@1 1152 __ store_ptr(4, T2); // store d in b
aoqi@1 1153 // stack: ..., a, d, c, b, c, d
aoqi@1 1154 __ load_ptr(5, T2); // load a
aoqi@1 1155 __ load_ptr(3, FSR); // load c
aoqi@1 1156 __ store_ptr(3, T2); // store a in c
aoqi@1 1157 __ store_ptr(5, FSR); // store c in a
aoqi@1 1158 // stack: ..., c, d, a, b, c, d
aoqi@1 1159
aoqi@1 1160 // stack: ..., c, d, a, b, c, d
aoqi@1 1161 }
aoqi@1 1162
aoqi@1 1163 // blows FSR
aoqi@1 1164 void TemplateTable::swap() {
aoqi@1 1165 transition(vtos, vtos);
aoqi@1 1166 // stack: ..., a, b
aoqi@1 1167
aoqi@1 1168 __ load_ptr(1, A5); // load a
aoqi@1 1169 __ load_ptr(0, FSR); // load b
aoqi@1 1170 __ store_ptr(0, A5); // store a in b
aoqi@1 1171 __ store_ptr(1, FSR); // store b in a
aoqi@1 1172
aoqi@1 1173 // stack: ..., b, a
aoqi@1 1174 }
aoqi@1 1175
aoqi@1 1176 void TemplateTable::iop2(Operation op) {
aoqi@1 1177 transition(itos, itos);
aoqi@1 1178 switch (op) {
aoqi@1 1179 case add :
aoqi@1 1180 __ pop_i(SSR);
aoqi@1 1181 __ addu32(FSR, SSR, FSR);
aoqi@1 1182 break;
aoqi@1 1183 case sub :
aoqi@1 1184 __ pop_i(SSR);
aoqi@1 1185 __ subu32(FSR, SSR, FSR);
aoqi@1 1186 break;
aoqi@1 1187 case mul :
aoqi@1 1188 __ lw(SSR, SP, 0);
aoqi@1 1189 __ daddi(SP, SP, wordSize);
fujie@42 1190 __ mul(FSR, SSR, FSR);
aoqi@1 1191 break;
aoqi@1 1192 case _and :
aoqi@1 1193 __ pop_i(SSR);
aoqi@1 1194 __ andr(FSR, SSR, FSR);
aoqi@1 1195 break;
aoqi@1 1196 case _or :
aoqi@1 1197 __ pop_i(SSR);
aoqi@1 1198 __ orr(FSR, SSR, FSR);
aoqi@1 1199 break;
aoqi@1 1200 case _xor :
aoqi@1 1201 __ pop_i(SSR);
aoqi@1 1202 __ xorr(FSR, SSR, FSR);
aoqi@1 1203 break;
aoqi@1 1204 case shl :
aoqi@1 1205 __ pop_i(SSR);
aoqi@1 1206 __ sllv(FSR, SSR, FSR);
aoqi@1 1207 break; // implicit masking of lower 5 bits by Intel shift instr. mips also
aoqi@1 1208 case shr :
aoqi@1 1209 __ pop_i(SSR);
aoqi@1 1210 __ srav(FSR, SSR, FSR);
aoqi@1 1211 break; // implicit masking of lower 5 bits by Intel shift instr. mips also
aoqi@1 1212 case ushr :
aoqi@1 1213 __ pop_i(SSR);
aoqi@1 1214 __ srlv(FSR, SSR, FSR);
aoqi@1 1215 break; // implicit masking of lower 5 bits by Intel shift instr. mips also
aoqi@1 1216 default : ShouldNotReachHere();
aoqi@1 1217 }
aoqi@1 1218 }
aoqi@1 1219
aoqi@1 1220 // the result stored in FSR, SSR,
aoqi@1 1221 // used registers : T2, T3
aoqi@1 1222 //FIXME, aoqi
aoqi@1 1223 void TemplateTable::lop2(Operation op) {
aoqi@1 1224 transition(ltos, ltos);
aoqi@1 1225 //__ pop2(T2, T3);
aoqi@1 1226 __ pop_l(T2, T3);
aoqi@1 1227 #ifdef ASSERT
aoqi@1 1228 {
aoqi@1 1229 Label L;
aoqi@1 1230 __ beq(T3, R0, L);
aoqi@1 1231 __ delayed()->nop();
aoqi@1 1232 // FIXME: stack verification required
aoqi@1 1233 // __ stop("lop2, wrong stack"); // <--- Fu 20130930
aoqi@1 1234 __ bind(L);
aoqi@1 1235 }
aoqi@1 1236 #endif
aoqi@1 1237 switch (op) {
aoqi@1 1238 case add :
aoqi@1 1239 __ daddu(FSR, T2, FSR);
aoqi@1 1240 //__ sltu(AT, FSR, T2);
aoqi@1 1241 //__ daddu(SSR, T3, SSR);
aoqi@1 1242 //__ daddu(SSR, SSR, AT);
aoqi@1 1243 break;
aoqi@1 1244 case sub :
aoqi@1 1245 __ dsubu(FSR, T2, FSR);
aoqi@1 1246 //__ sltu(AT, T2, FSR);
aoqi@1 1247 //__ dsubu(SSR, T3, SSR);
aoqi@1 1248 //__ dsubu(SSR, SSR, AT);
aoqi@1 1249 break;
aoqi@1 1250 case _and:
aoqi@1 1251 __ andr(FSR, T2, FSR);
aoqi@1 1252 //__ andr(SSR, T3, SSR);
aoqi@1 1253 break;
aoqi@1 1254 case _or :
aoqi@1 1255 __ orr(FSR, T2, FSR);
aoqi@1 1256 //__ orr(SSR, T3, SSR);
aoqi@1 1257 break;
aoqi@1 1258 case _xor:
aoqi@1 1259 __ xorr(FSR, T2, FSR);
aoqi@1 1260 //__ xorr(SSR, T3, SSR);
aoqi@1 1261 break;
aoqi@1 1262 default : ShouldNotReachHere();
aoqi@1 1263 }
aoqi@1 1264 }
aoqi@1 1265
aoqi@1 1266 // java require this bytecode could handle 0x80000000/-1, dont cause a overflow exception,
aoqi@1 1267 // the result is 0x80000000
aoqi@1 1268 // the godson2 cpu do the same, so we need not handle this specially like x86
aoqi@1 1269 void TemplateTable::idiv() {
aoqi@1 1270 transition(itos, itos);
aoqi@1 1271 Label not_zero;
aoqi@1 1272
aoqi@1 1273 __ bne(FSR, R0, not_zero);
aoqi@1 1274 __ delayed()->nop();
aoqi@1 1275 __ jmp(Interpreter::_throw_ArithmeticException_entry);
aoqi@1 1276 __ delayed()->nop();
aoqi@1 1277 __ bind(not_zero);
fujie@64 1278
fujie@64 1279 __ pop_i(SSR);
fujie@64 1280 if (UseLoongsonISA) {
fujie@64 1281 __ gsdiv(FSR, SSR, FSR);
fujie@64 1282 } else {
fujie@64 1283 __ div(SSR, FSR);
fujie@64 1284 __ mflo(FSR);
fujie@64 1285 }
aoqi@1 1286 }
aoqi@1 1287
aoqi@1 1288 void TemplateTable::irem() {
aoqi@1 1289 transition(itos, itos);
aoqi@1 1290 Label not_zero;
aoqi@1 1291 //__ pop(SSR);
aoqi@1 1292 __ pop_i(SSR);
aoqi@1 1293 __ div(SSR, FSR);
aoqi@1 1294
aoqi@1 1295 __ bne(FSR, R0, not_zero);
aoqi@1 1296 __ delayed()->nop();
aoqi@1 1297 //__ brk(7);
aoqi@1 1298 __ jmp(Interpreter::_throw_ArithmeticException_entry);
aoqi@1 1299 __ delayed()->nop();
aoqi@1 1300
aoqi@1 1301 __ bind(not_zero);
aoqi@1 1302 __ mfhi(FSR);
aoqi@1 1303 }
aoqi@1 1304
aoqi@1 1305 void TemplateTable::lmul() {
aoqi@1 1306 transition(ltos, ltos);
jiangshaofeng@89 1307 __ pop_l(T2);
jiangshaofeng@89 1308 if(UseLoongsonISA){
jiangshaofeng@89 1309 __ gsdmult(FSR, T2, FSR);
jiangshaofeng@89 1310 } else {
jiangshaofeng@89 1311 __ dmult(T2, FSR);
jiangshaofeng@89 1312 __ mflo(FSR);
jiangshaofeng@89 1313 }
jiangshaofeng@89 1314 }
aoqi@1 1315
aoqi@1 1316 // NOTE: i DONT use the Interpreter::_throw_ArithmeticException_entry
aoqi@1 1317 void TemplateTable::ldiv() {
aoqi@1 1318 transition(ltos, ltos);
aoqi@1 1319 Label normal;
aoqi@1 1320
aoqi@1 1321 __ bne(FSR, R0, normal);
aoqi@1 1322 __ delayed()->nop();
aoqi@1 1323
aoqi@1 1324 //__ brk(7); //generate FPE
aoqi@1 1325 __ jmp(Interpreter::_throw_ArithmeticException_entry);
aoqi@1 1326 __ delayed()->nop();
aoqi@1 1327
aoqi@1 1328 __ bind(normal);
chenhaoxuan@87 1329 __ pop_l(A2, A3);
chenhaoxuan@87 1330 if (UseLoongsonISA) {
chenhaoxuan@87 1331 __ gsddiv(FSR, A2, FSR);
chenhaoxuan@87 1332 } else {
chenhaoxuan@87 1333 __ ddiv(A2, FSR);
chenhaoxuan@87 1334 __ mflo(FSR);
chenhaoxuan@87 1335 }
aoqi@1 1336 }
aoqi@1 1337
aoqi@1 1338 // NOTE: i DONT use the Interpreter::_throw_ArithmeticException_entry
aoqi@1 1339 void TemplateTable::lrem() {
aoqi@1 1340 transition(ltos, ltos);
aoqi@1 1341 Label normal;
aoqi@1 1342
aoqi@1 1343 __ bne(FSR, R0, normal);
aoqi@1 1344 __ delayed()->nop();
aoqi@1 1345
aoqi@1 1346 __ jmp(Interpreter::_throw_ArithmeticException_entry);
aoqi@1 1347 __ delayed()->nop();
aoqi@1 1348
aoqi@1 1349 __ bind(normal);
aoqi@1 1350 __ pop_l (A2, A3);
lifangyuan@88 1351
lifangyuan@88 1352 if(UseLoongsonISA){
lifangyuan@88 1353 __ gsdmod(FSR, A2, FSR);
lifangyuan@88 1354 } else {
lifangyuan@88 1355 __ ddiv(A2, FSR);
lifangyuan@88 1356 __ mfhi(FSR);
lifangyuan@88 1357 }
aoqi@1 1358 }
aoqi@1 1359
aoqi@1 1360 // result in FSR
aoqi@1 1361 // used registers : T0
aoqi@1 1362 void TemplateTable::lshl() {
aoqi@1 1363 transition(itos, ltos);
aoqi@1 1364 __ pop_l(T0, T1);
aoqi@1 1365 #ifdef ASSERT
aoqi@1 1366 {
aoqi@1 1367 Label L;
aoqi@1 1368 __ beq(T1, R0, L);
aoqi@1 1369 __ delayed()->nop();
aoqi@1 1370 //__ stop("lshl, wrong stack"); // <-- Fu 20130930
aoqi@1 1371 __ bind(L);
aoqi@1 1372 }
aoqi@1 1373 #endif
aoqi@1 1374 __ andi(FSR, FSR, 0x3f); // the bit to be shifted
aoqi@1 1375 __ dsllv(FSR, T0, FSR);
aoqi@1 1376 }
aoqi@1 1377
aoqi@1 1378 // used registers : T0
aoqi@1 1379 void TemplateTable::lshr() {
aoqi@1 1380 transition(itos, ltos);
aoqi@1 1381 __ pop_l(T0, T1);
aoqi@1 1382 #ifdef ASSERT
aoqi@1 1383 {
aoqi@1 1384 Label L;
aoqi@1 1385 __ beq(T1, R0, L);
aoqi@1 1386 __ delayed()->nop();
aoqi@1 1387 __ stop("lshr, wrong stack");
aoqi@1 1388 __ bind(L);
aoqi@1 1389 }
aoqi@1 1390 #endif
aoqi@1 1391 __ andi(FSR, FSR, 0x3f); // the bit to be shifted
aoqi@1 1392 __ dsrav(FSR, T0, FSR);
aoqi@1 1393 }
aoqi@1 1394
aoqi@1 1395 // used registers : T0
aoqi@1 1396 void TemplateTable::lushr() {
aoqi@1 1397 transition(itos, ltos);
aoqi@1 1398 __ pop_l(T0, T1);
aoqi@1 1399 #ifdef ASSERT
aoqi@1 1400 {
aoqi@1 1401 Label L;
aoqi@1 1402 __ beq(T1, R0, L);
aoqi@1 1403 __ delayed()->nop();
aoqi@1 1404 __ stop("lushr, wrong stack");
aoqi@1 1405 __ bind(L);
aoqi@1 1406 }
aoqi@1 1407 #endif
aoqi@1 1408 __ andi(FSR, FSR, 0x3f); // the bit to be shifted
aoqi@1 1409 __ dsrlv(FSR, T0, FSR);
aoqi@1 1410 }
aoqi@1 1411
aoqi@1 1412 // result in FSF
aoqi@1 1413 void TemplateTable::fop2(Operation op) {
aoqi@1 1414 transition(ftos, ftos);
aoqi@1 1415 __ pop_ftos_to_esp(); // pop ftos into esp
aoqi@1 1416 switch (op) {
aoqi@1 1417 case add:
aoqi@1 1418 __ lwc1(FTF, at_sp());
aoqi@1 1419 __ add_s(FSF, FTF, FSF);
aoqi@1 1420 break;
aoqi@1 1421 case sub:
aoqi@1 1422 __ lwc1(FTF, at_sp());
aoqi@1 1423 __ sub_s(FSF, FTF, FSF);
aoqi@1 1424 break;
aoqi@1 1425 case mul:
aoqi@1 1426 __ lwc1(FTF, at_sp());
aoqi@1 1427 __ mul_s(FSF, FTF, FSF);
aoqi@1 1428 break;
aoqi@1 1429 case div:
aoqi@1 1430 __ lwc1(FTF, at_sp());
aoqi@1 1431 __ div_s(FSF, FTF, FSF);
aoqi@1 1432 break;
aoqi@1 1433 case rem:
aoqi@1 1434 __ mfc1(FSR, FSF);
aoqi@1 1435 __ mtc1(FSR, F12);
aoqi@1 1436 __ lwc1(FTF, at_sp());
aoqi@1 1437 __ rem_s(FSF, FTF, F12, FSF);
aoqi@1 1438 break;
aoqi@1 1439 default : ShouldNotReachHere();
aoqi@1 1440 }
aoqi@1 1441
aoqi@1 1442 __ daddi(SP, SP, 1 * wordSize);
aoqi@1 1443 }
aoqi@1 1444
aoqi@1 1445 // result in SSF||FSF
aoqi@1 1446 // i dont handle the strict flags
aoqi@1 1447 void TemplateTable::dop2(Operation op) {
aoqi@1 1448 transition(dtos, dtos);
aoqi@1 1449 __ pop_dtos_to_esp(); // pop dtos into esp
aoqi@1 1450 switch (op) {
aoqi@1 1451 case add:
aoqi@1 1452 __ ldc1(FTF, at_sp());
aoqi@1 1453 __ add_d(FSF, FTF, FSF);
aoqi@1 1454 break;
aoqi@1 1455 case sub:
aoqi@1 1456 __ ldc1(FTF, at_sp());
aoqi@1 1457 __ sub_d(FSF, FTF, FSF);
aoqi@1 1458 break;
aoqi@1 1459 case mul:
aoqi@1 1460 __ ldc1(FTF, at_sp());
aoqi@1 1461 __ mul_d(FSF, FTF, FSF);
aoqi@1 1462 break;
aoqi@1 1463 case div:
aoqi@1 1464 __ ldc1(FTF, at_sp());
aoqi@1 1465 __ div_d(FSF, FTF, FSF);
aoqi@1 1466 break;
aoqi@1 1467 case rem:
aoqi@1 1468 __ dmfc1(FSR, FSF);
aoqi@1 1469 __ dmtc1(FSR, F12);
aoqi@1 1470 __ ldc1(FTF, at_sp());
aoqi@1 1471 __ rem_d(FSF, FTF, F12, FSF);
aoqi@1 1472 break;
aoqi@1 1473 default : ShouldNotReachHere();
aoqi@1 1474 }
aoqi@1 1475
aoqi@1 1476 __ daddi(SP, SP, 2 * wordSize);
aoqi@1 1477 }
aoqi@1 1478
aoqi@1 1479 void TemplateTable::ineg() {
aoqi@1 1480 transition(itos, itos);
aoqi@1 1481 __ neg(FSR);
aoqi@1 1482 }
aoqi@1 1483
aoqi@1 1484 void TemplateTable::lneg() {
aoqi@1 1485 transition(ltos, ltos);
aoqi@1 1486 __ dsubu(FSR, R0, FSR);
aoqi@1 1487 }
aoqi@1 1488 /*
aoqi@1 1489 // Note: 'double' and 'long long' have 32-bits alignment on x86.
aoqi@1 1490 static jlong* double_quadword(jlong *adr, jlong lo, jlong hi) {
aoqi@1 1491 // Use the expression (adr)&(~0xF) to provide 128-bits aligned address
aoqi@1 1492 // of 128-bits operands for SSE instructions.
aoqi@1 1493 jlong *operand = (jlong*)(((intptr_t)adr)&((intptr_t)(~0xF)));
aoqi@1 1494 // Store the value to a 128-bits operand.
aoqi@1 1495 operand[0] = lo;
aoqi@1 1496 operand[1] = hi;
aoqi@1 1497 return operand;
aoqi@1 1498 }
aoqi@1 1499
aoqi@1 1500 // Buffer for 128-bits masks used by SSE instructions.
aoqi@1 1501 static jlong float_signflip_pool[2*2];
aoqi@1 1502 static jlong double_signflip_pool[2*2];
aoqi@1 1503 */
aoqi@1 1504 void TemplateTable::fneg() {
aoqi@1 1505 transition(ftos, ftos);
aoqi@1 1506 __ neg_s(FSF, FSF);
aoqi@1 1507 }
aoqi@1 1508
aoqi@1 1509 void TemplateTable::dneg() {
aoqi@1 1510 transition(dtos, dtos);
aoqi@1 1511 __ neg_d(FSF, FSF);
aoqi@1 1512 }
aoqi@1 1513
aoqi@1 1514 // used registers : T2
aoqi@1 1515 void TemplateTable::iinc() {
aoqi@1 1516 transition(vtos, vtos);
aoqi@1 1517 locals_index(T2);
aoqi@1 1518 __ lw(FSR, T2, 0);
aoqi@1 1519 __ lb(AT, at_bcp(2)); // get constant
aoqi@1 1520 __ daddu(FSR, FSR, AT);
aoqi@1 1521 __ sw(FSR, T2, 0);
aoqi@1 1522 }
aoqi@1 1523
aoqi@1 1524 // used register : T2
aoqi@1 1525 void TemplateTable::wide_iinc() {
aoqi@1 1526 transition(vtos, vtos);
aoqi@1 1527 locals_index_wide(T2);
aoqi@16 1528 __ get_2_byte_integer_at_bcp(FSR, AT, 4);
aoqi@1 1529 __ hswap(FSR);
aoqi@1 1530 __ lw(AT, T2, 0);
aoqi@1 1531 __ daddu(FSR, AT, FSR);
aoqi@1 1532 __ sw(FSR, T2, 0);
aoqi@1 1533 }
aoqi@1 1534
aoqi@1 1535 void TemplateTable::convert() {
aoqi@1 1536 // Checking
aoqi@1 1537 #ifdef ASSERT
aoqi@1 1538 { TosState tos_in = ilgl;
aoqi@1 1539 TosState tos_out = ilgl;
aoqi@1 1540 switch (bytecode()) {
aoqi@1 1541 case Bytecodes::_i2l: // fall through
aoqi@1 1542 case Bytecodes::_i2f: // fall through
aoqi@1 1543 case Bytecodes::_i2d: // fall through
aoqi@1 1544 case Bytecodes::_i2b: // fall through
aoqi@1 1545 case Bytecodes::_i2c: // fall through
aoqi@1 1546 case Bytecodes::_i2s: tos_in = itos; break;
aoqi@1 1547 case Bytecodes::_l2i: // fall through
aoqi@1 1548 case Bytecodes::_l2f: // fall through
aoqi@1 1549 case Bytecodes::_l2d: tos_in = ltos; break;
aoqi@1 1550 case Bytecodes::_f2i: // fall through
aoqi@1 1551 case Bytecodes::_f2l: // fall through
aoqi@1 1552 case Bytecodes::_f2d: tos_in = ftos; break;
aoqi@1 1553 case Bytecodes::_d2i: // fall through
aoqi@1 1554 case Bytecodes::_d2l: // fall through
aoqi@1 1555 case Bytecodes::_d2f: tos_in = dtos; break;
aoqi@1 1556 default : ShouldNotReachHere();
aoqi@1 1557 }
aoqi@1 1558 switch (bytecode()) {
aoqi@1 1559 case Bytecodes::_l2i: // fall through
aoqi@1 1560 case Bytecodes::_f2i: // fall through
aoqi@1 1561 case Bytecodes::_d2i: // fall through
aoqi@1 1562 case Bytecodes::_i2b: // fall through
aoqi@1 1563 case Bytecodes::_i2c: // fall through
aoqi@1 1564 case Bytecodes::_i2s: tos_out = itos; break;
aoqi@1 1565 case Bytecodes::_i2l: // fall through
aoqi@1 1566 case Bytecodes::_f2l: // fall through
aoqi@1 1567 case Bytecodes::_d2l: tos_out = ltos; break;
aoqi@1 1568 case Bytecodes::_i2f: // fall through
aoqi@1 1569 case Bytecodes::_l2f: // fall through
aoqi@1 1570 case Bytecodes::_d2f: tos_out = ftos; break;
aoqi@1 1571 case Bytecodes::_i2d: // fall through
aoqi@1 1572 case Bytecodes::_l2d: // fall through
aoqi@1 1573 case Bytecodes::_f2d: tos_out = dtos; break;
aoqi@1 1574 default : ShouldNotReachHere();
aoqi@1 1575 }
aoqi@1 1576 transition(tos_in, tos_out);
aoqi@1 1577 }
aoqi@1 1578 #endif // ASSERT
aoqi@1 1579
aoqi@1 1580 // Conversion
aoqi@1 1581 // (Note: use pushl(ecx)/popl(ecx) for 1/2-word stack-ptr manipulation)
aoqi@1 1582 switch (bytecode()) {
aoqi@1 1583 case Bytecodes::_i2l:
aoqi@1 1584 //__ extend_sign(SSR, FSR);
aoqi@1 1585 __ sll(FSR, FSR, 0);
aoqi@1 1586 break;
aoqi@1 1587 case Bytecodes::_i2f:
aoqi@1 1588 __ mtc1(FSR, FSF);
aoqi@1 1589 __ cvt_s_w(FSF, FSF);
aoqi@1 1590 break;
aoqi@1 1591 case Bytecodes::_i2d:
aoqi@1 1592 __ mtc1(FSR, FSF);
aoqi@1 1593 __ cvt_d_w(FSF, FSF);
aoqi@1 1594 break;
aoqi@1 1595 case Bytecodes::_i2b:
aoqi@1 1596 __ dsll32(FSR, FSR, 24);
aoqi@1 1597 __ dsra32(FSR, FSR, 24);
aoqi@1 1598 break;
aoqi@1 1599 case Bytecodes::_i2c:
aoqi@1 1600 __ andi(FSR, FSR, 0xFFFF); // truncate upper 56 bits
aoqi@1 1601 break;
aoqi@1 1602 case Bytecodes::_i2s:
aoqi@1 1603 __ dsll32(FSR, FSR, 16);
aoqi@1 1604 __ dsra32(FSR, FSR, 16);
aoqi@1 1605 break;
aoqi@1 1606 case Bytecodes::_l2i:
aoqi@1 1607 __ dsll32(FSR, FSR, 0);
aoqi@1 1608 __ dsra32(FSR, FSR, 0);
aoqi@1 1609 break;
aoqi@1 1610 case Bytecodes::_l2f:
aoqi@1 1611 __ dmtc1(FSR, FSF);
aoqi@1 1612 //__ mtc1(SSR, SSF);
aoqi@1 1613 __ cvt_s_l(FSF, FSF);
aoqi@1 1614 break;
aoqi@1 1615 case Bytecodes::_l2d:
aoqi@1 1616 __ dmtc1(FSR, FSF);
aoqi@1 1617 //__ mtc1(SSR, SSF);
aoqi@1 1618 __ cvt_d_l(FSF, FSF);
aoqi@1 1619 break;
aoqi@1 1620 case Bytecodes::_f2i:
aoqi@1 1621 {
aoqi@1 1622 Label L;
aoqi@1 1623 /*
aoqi@1 1624 __ c_un_s(FSF, FSF); //NaN?
aoqi@1 1625 __ bc1t(L);
aoqi@1 1626 __ delayed(); __ move(FSR, R0);
aoqi@1 1627 */
aoqi@1 1628 __ trunc_w_s(F12, FSF);
aoqi@1 1629 __ cfc1(AT, 31);
aoqi@1 1630 __ li(T0, 0x10000);
aoqi@1 1631 __ andr(AT, AT, T0);
aoqi@1 1632 __ beq(AT, R0, L);
aoqi@1 1633 __ delayed()->mfc1(FSR, F12);
aoqi@1 1634
aoqi@1 1635 __ mov_s(F12, FSF);
aoqi@1 1636 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), 1);
aoqi@1 1637 __ bind(L);
aoqi@1 1638 }
aoqi@1 1639 break;
aoqi@1 1640 case Bytecodes::_f2l:
aoqi@1 1641 {
aoqi@1 1642 Label L;
aoqi@1 1643 /*
aoqi@1 1644 __ move(SSR, R0);
aoqi@1 1645 __ c_un_s(FSF, FSF); //NaN?
aoqi@1 1646 __ bc1t(L);
aoqi@1 1647 __ delayed();
aoqi@1 1648 __ move(FSR, R0);
aoqi@1 1649 */
aoqi@1 1650 __ trunc_l_s(F12, FSF);
aoqi@1 1651 __ cfc1(AT, 31);
aoqi@1 1652 __ li(T0, 0x10000);
aoqi@1 1653 __ andr(AT, AT, T0);
aoqi@1 1654 __ beq(AT, R0, L);
aoqi@1 1655 __ delayed()->dmfc1(FSR, F12);
aoqi@1 1656
aoqi@1 1657 __ mov_s(F12, FSF);
aoqi@1 1658 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), 1);
aoqi@1 1659 __ bind(L);
aoqi@1 1660 }
aoqi@1 1661 break;
aoqi@1 1662 case Bytecodes::_f2d:
aoqi@1 1663 __ cvt_d_s(FSF, FSF);
aoqi@1 1664 break;
aoqi@1 1665 case Bytecodes::_d2i:
aoqi@1 1666 {
aoqi@1 1667 Label L;
aoqi@1 1668 /*
aoqi@1 1669 __ c_un_d(FSF, FSF); //NaN?
aoqi@1 1670 __ bc1t(L);
aoqi@1 1671 __ delayed(); __ move(FSR, R0);
aoqi@1 1672 */
aoqi@1 1673 __ trunc_w_d(F12, FSF);
aoqi@1 1674 __ cfc1(AT, 31);
aoqi@1 1675 __ li(T0, 0x10000);
aoqi@1 1676 __ andr(AT, AT, T0);
aoqi@1 1677 __ beq(AT, R0, L);
aoqi@1 1678 __ delayed()->mfc1(FSR, F12);
aoqi@1 1679
aoqi@1 1680 __ mov_d(F12, FSF);
aoqi@1 1681 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), 1);
aoqi@1 1682 __ bind(L);
aoqi@1 1683 }
aoqi@1 1684 break;
aoqi@1 1685 case Bytecodes::_d2l:
aoqi@1 1686 {
aoqi@1 1687 Label L;
aoqi@1 1688 /*
aoqi@1 1689 __ move(SSR, R0);
aoqi@1 1690 __ c_un_d(FSF, FSF); //NaN?
aoqi@1 1691 __ bc1t(L);
aoqi@1 1692 __ delayed(); __ move(FSR, R0);
aoqi@1 1693 */
aoqi@1 1694 __ trunc_l_d(F12, FSF);
aoqi@1 1695 __ cfc1(AT, 31);
aoqi@1 1696 __ li(T0, 0x10000);
aoqi@1 1697 __ andr(AT, AT, T0);
aoqi@1 1698 __ beq(AT, R0, L);
aoqi@1 1699 __ delayed()->dmfc1(FSR, F12);
aoqi@1 1700
aoqi@1 1701 __ mov_d(F12, FSF);
aoqi@1 1702 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), 1);
aoqi@1 1703 __ bind(L);
aoqi@1 1704 }
aoqi@1 1705 break;
aoqi@1 1706 case Bytecodes::_d2f:
aoqi@1 1707 __ cvt_s_d(FSF, FSF);
aoqi@1 1708 break;
aoqi@1 1709 default :
aoqi@1 1710 ShouldNotReachHere();
aoqi@1 1711 }
aoqi@1 1712 }
aoqi@1 1713
aoqi@1 1714 void TemplateTable::lcmp() {
aoqi@1 1715 transition(ltos, itos);
aoqi@1 1716
aoqi@1 1717 Label low, high, done;
aoqi@1 1718 __ pop(T0);
aoqi@1 1719 __ pop(R0);
aoqi@1 1720 __ slt(AT, T0, FSR);
aoqi@1 1721 __ bne(AT, R0, low);
aoqi@1 1722 __ delayed()->nop();
aoqi@1 1723
aoqi@1 1724 __ bne(T0, FSR, high);
aoqi@1 1725 __ delayed()->nop();
aoqi@1 1726
aoqi@1 1727 __ li(FSR, (long)0);
aoqi@1 1728 __ b(done);
aoqi@1 1729 __ delayed()->nop();
aoqi@1 1730
aoqi@1 1731 __ bind(low);
aoqi@1 1732 __ li(FSR, (long)-1);
aoqi@1 1733 __ b(done);
aoqi@1 1734 __ delayed()->nop();
aoqi@1 1735
aoqi@1 1736 __ bind(high);
aoqi@1 1737 __ li(FSR, (long)1);
aoqi@1 1738 __ b(done);
aoqi@1 1739 __ delayed()->nop();
aoqi@1 1740
aoqi@1 1741 __ bind(done);
aoqi@1 1742 }
aoqi@1 1743
aoqi@1 1744 void TemplateTable::float_cmp(bool is_float, int unordered_result) {
aoqi@1 1745 Label less, done;
aoqi@1 1746
aoqi@1 1747 __ move(FSR, R0);
aoqi@1 1748
aoqi@1 1749 if (is_float) {
aoqi@1 1750 __ pop_ftos_to_esp();
aoqi@1 1751 __ lwc1(FTF, at_sp());
aoqi@1 1752 __ c_eq_s(FTF, FSF);
aoqi@1 1753 __ bc1t(done);
aoqi@1 1754 __ delayed()->daddi(SP, SP, 1 * wordSize);
aoqi@1 1755
aoqi@1 1756 if (unordered_result<0)
aoqi@1 1757 __ c_ult_s(FTF, FSF);
aoqi@1 1758 else
aoqi@1 1759 __ c_olt_s(FTF, FSF);
aoqi@1 1760 } else {
aoqi@1 1761 __ pop_dtos_to_esp();
aoqi@1 1762 __ ldc1(FTF, at_sp());
aoqi@1 1763 __ c_eq_d(FTF, FSF);
aoqi@1 1764 __ bc1t(done);
aoqi@1 1765 __ delayed()->daddi(SP, SP, 2 * wordSize);
aoqi@1 1766
aoqi@1 1767 if (unordered_result<0)
aoqi@1 1768 __ c_ult_d(FTF, FSF);
aoqi@1 1769 else
aoqi@1 1770 __ c_olt_d(FTF, FSF);
aoqi@1 1771 }
aoqi@1 1772 __ bc1t(less);
aoqi@1 1773 __ delayed()->nop();
aoqi@1 1774 __ move(FSR, 1);
aoqi@1 1775 __ b(done);
aoqi@1 1776 __ delayed()->nop();
aoqi@1 1777 __ bind(less);
aoqi@1 1778 __ move(FSR, -1);
aoqi@1 1779 __ bind(done);
aoqi@1 1780 }
aoqi@1 1781
aoqi@1 1782
aoqi@1 1783 // used registers : T3, A7, Rnext
aoqi@1 1784 // FSR : return bci, this is defined by the vm specification
aoqi@1 1785 // T2 : MDO taken count
aoqi@1 1786 // T3 : method
aoqi@1 1787 // A7 : offset
aoqi@1 1788 // Rnext : next bytecode, this is required by dispatch_base
aoqi@1 1789 void TemplateTable::branch(bool is_jsr, bool is_wide) {
aoqi@1 1790 __ get_method(T3);
aoqi@1 1791 __ profile_taken_branch(A7, T2); // only C2 meaningful
aoqi@1 1792
aoqi@1 1793 #ifndef CORE
aoqi@1 1794 const ByteSize be_offset = MethodCounters::backedge_counter_offset()
aoqi@1 1795 + InvocationCounter::counter_offset();
aoqi@1 1796 const ByteSize inv_offset = MethodCounters::invocation_counter_offset()
aoqi@1 1797 + InvocationCounter::counter_offset();
aoqi@1 1798 const int method_offset = frame::interpreter_frame_method_offset * wordSize;
aoqi@1 1799 #endif // CORE
aoqi@1 1800
aoqi@1 1801 // Load up T4 with the branch displacement
aoqi@1 1802 if (!is_wide) {
aoqi@16 1803 __ get_2_byte_integer_at_bcp(A7, AT, 1);
aoqi@1 1804 __ hswap(A7);
aoqi@1 1805 } else {
aoqi@15 1806 __ get_4_byte_integer_at_bcp(A7, AT, 1);
aoqi@1 1807 __ swap(A7);
aoqi@1 1808 }
aoqi@1 1809
aoqi@1 1810 // Handle all the JSR stuff here, then exit.
aoqi@1 1811 // It's much shorter and cleaner than intermingling with the
aoqi@1 1812 // non-JSR normal-branch stuff occuring below.
aoqi@1 1813 if (is_jsr) {
aoqi@1 1814 // Pre-load the next target bytecode into Rnext
aoqi@1 1815 __ dadd(AT, BCP, A7);
aoqi@1 1816 __ lbu(Rnext, AT, 0);
aoqi@1 1817
aoqi@1 1818 // compute return address as bci in FSR
aoqi@1 1819 __ daddi(FSR, BCP, (is_wide?5:3) - in_bytes(ConstMethod::codes_offset()));
aoqi@1 1820 __ ld(AT, T3, in_bytes(Method::const_offset()));
aoqi@1 1821 __ dsub(FSR, FSR, AT);
aoqi@1 1822 // Adjust the bcp in BCP by the displacement in A7
aoqi@1 1823 __ dadd(BCP, BCP, A7);
aoqi@1 1824 // jsr returns atos that is not an oop
aoqi@1 1825 // __ dispatch_only_noverify(atos);
aoqi@1 1826 // Push return address
aoqi@1 1827 __ push_i(FSR);
aoqi@1 1828 // jsr returns vtos
aoqi@1 1829 __ dispatch_only_noverify(vtos);
aoqi@1 1830
aoqi@1 1831 return;
aoqi@1 1832 }
aoqi@1 1833
aoqi@1 1834 // Normal (non-jsr) branch handling
aoqi@1 1835
aoqi@1 1836 // Adjust the bcp in S0 by the displacement in T4
aoqi@1 1837 __ dadd(BCP, BCP, A7);
aoqi@1 1838
aoqi@1 1839 #ifdef CORE
aoqi@1 1840 // Pre-load the next target bytecode into EBX
aoqi@1 1841 __ lbu(Rnext, BCP, 0);
aoqi@1 1842 // continue with the bytecode @ target
aoqi@1 1843 __ dispatch_only(vtos);
aoqi@1 1844 #else
aoqi@1 1845 assert(UseLoopCounter || !UseOnStackReplacement, "on-stack-replacement requires loop counters");
aoqi@1 1846 Label backedge_counter_overflow;
aoqi@1 1847 Label profile_method;
aoqi@1 1848 Label dispatch;
aoqi@1 1849 if (UseLoopCounter) {
aoqi@1 1850 // increment backedge counter for backward branches
aoqi@1 1851 // eax: MDO
aoqi@1 1852 // ebx: MDO bumped taken-count
aoqi@1 1853 // T3: method
aoqi@1 1854 // T4: target offset
aoqi@1 1855 // BCP: target bcp
aoqi@1 1856 // LVP: locals pointer
aoqi@1 1857 __ bgtz(A7, dispatch); // check if forward or backward branch
aoqi@1 1858 __ delayed()->nop();
aoqi@1 1859
aoqi@1 1860 // check if MethodCounters exists
aoqi@1 1861 Label has_counters;
aoqi@1 1862 __ ld(AT, T3, in_bytes(Method::method_counters_offset())); // use AT as MDO, TEMP
aoqi@1 1863 __ bne(AT, R0, has_counters);
aoqi@1 1864 __ nop();
aoqi@1 1865 //__ push(T3);
aoqi@1 1866 //__ push(A7);
aoqi@1 1867 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::build_method_counters),
aoqi@1 1868 T3);
aoqi@1 1869 //__ pop(A7);
aoqi@1 1870 //__ pop(T3);
aoqi@1 1871 __ ld(AT, T3, in_bytes(Method::method_counters_offset())); // use AT as MDO, TEMP
aoqi@1 1872 __ beq(AT, R0, dispatch);
aoqi@1 1873 __ nop();
aoqi@1 1874 __ bind(has_counters);
aoqi@1 1875
aoqi@1 1876 // increment back edge counter
aoqi@1 1877 __ ld(T1, T3, in_bytes(Method::method_counters_offset()));
aoqi@1 1878 __ lw(T0, T1, in_bytes(be_offset));
aoqi@1 1879 __ increment(T0, InvocationCounter::count_increment);
aoqi@1 1880 __ sw(T0, T1, in_bytes(be_offset));
aoqi@1 1881
aoqi@1 1882 // load invocation counter
aoqi@1 1883 __ lw(T1, T1, in_bytes(inv_offset));
aoqi@1 1884 // buffer bit added, mask no needed
aoqi@1 1885 // by yjl 10/24/2005
aoqi@1 1886 //__ move(AT, InvocationCounter::count_mask_value);
aoqi@1 1887 //__ andr(T1, T1, AT);
aoqi@1 1888
aoqi@1 1889 // dadd backedge counter & invocation counter
aoqi@1 1890 __ dadd(T1, T1, T0);
aoqi@1 1891
aoqi@1 1892 if (ProfileInterpreter) {
aoqi@1 1893 // Test to see if we should create a method data oop
aoqi@1 1894 //__ lui(AT, Assembler::split_high(int(&InvocationCounter::InterpreterProfileLimit)));
aoqi@1 1895 //__ lw(AT, AT, Assembler::split_low(int(&InvocationCounter::InterpreterProfileLimit)));
aoqi@1 1896 // T1 : backedge counter & invocation counter
aoqi@1 1897 __ li(AT, (long)&InvocationCounter::InterpreterProfileLimit);
aoqi@1 1898 __ lw(AT, AT, 0);
aoqi@1 1899 __ slt(AT, T1, AT);
aoqi@1 1900 __ bne(AT, R0, dispatch);
aoqi@1 1901 __ delayed()->nop();
aoqi@1 1902
aoqi@1 1903 // if no method data exists, go to profile method
aoqi@1 1904 __ test_method_data_pointer(T1, profile_method);
aoqi@1 1905
aoqi@1 1906 if (UseOnStackReplacement) {
aoqi@1 1907 // check for overflow against ebx which is the MDO taken count
aoqi@1 1908 //__ lui(AT, Assembler::split_high(int(&InvocationCounter::InterpreterBackwardBranchLimit)));
aoqi@1 1909 //__ lw(AT, AT, Assembler::split_low(int(&InvocationCounter::InterpreterBackwardBranchLimit)));
aoqi@1 1910 __ li(AT, (long)&InvocationCounter::InterpreterBackwardBranchLimit);
aoqi@1 1911 __ lw(AT, AT, 0);
aoqi@1 1912 // the value Rnext Is get from the beginning profile_taken_branch
aoqi@1 1913 __ slt(AT, T2, AT);
aoqi@1 1914 __ bne(AT, R0, dispatch);
aoqi@1 1915 __ delayed()->nop();
aoqi@1 1916
aoqi@1 1917 // When ProfileInterpreter is on, the backedge_count comes
aoqi@1 1918 // from the methodDataOop, which value does not get reset on
aoqi@1 1919 // the call to frequency_counter_overflow().
aoqi@1 1920 // To avoid excessive calls to the overflow routine while
aoqi@1 1921 // the method is being compiled, dadd a second test to make
aoqi@1 1922 // sure the overflow function is called only once every
aoqi@1 1923 // overflow_frequency.
aoqi@1 1924 const int overflow_frequency = 1024;
aoqi@1 1925 __ andi(AT, T2, overflow_frequency-1);
aoqi@1 1926 __ beq(AT, R0, backedge_counter_overflow);
aoqi@1 1927 __ delayed()->nop();
aoqi@1 1928 }
aoqi@1 1929 } else {
aoqi@1 1930 if (UseOnStackReplacement) {
aoqi@1 1931 // check for overflow against eax, which is the sum of the counters
aoqi@1 1932 //__ lui(AT, Assembler::split_high(int(&InvocationCounter::InterpreterBackwardBranchLimit)));
aoqi@1 1933 //__ lw(AT, AT, Assembler::split_low(int(&InvocationCounter::InterpreterBackwardBranchLimit)));
aoqi@1 1934 __ li(AT, (long)&InvocationCounter::InterpreterBackwardBranchLimit);
aoqi@1 1935 __ lw(AT, AT, 0);
aoqi@1 1936 __ slt(AT, T1, AT);
aoqi@1 1937 __ beq(AT, R0, backedge_counter_overflow);
aoqi@1 1938 __ delayed()->nop();
aoqi@1 1939 }
aoqi@1 1940 }
aoqi@1 1941 __ bind(dispatch);
aoqi@1 1942 }
aoqi@1 1943
aoqi@1 1944 // Pre-load the next target bytecode into Rnext
aoqi@1 1945 __ lbu(Rnext, BCP, 0);
aoqi@1 1946
aoqi@1 1947 // continue with the bytecode @ target
aoqi@1 1948 // FSR: return bci for jsr's, unused otherwise
aoqi@1 1949 // Rnext: target bytecode
aoqi@1 1950 // BCP: target bcp
aoqi@1 1951 __ dispatch_only(vtos);
aoqi@1 1952
aoqi@1 1953 if (UseLoopCounter) {
aoqi@1 1954 if (ProfileInterpreter) {
aoqi@1 1955 // Out-of-line code to allocate method data oop.
aoqi@1 1956 __ bind(profile_method);
aoqi@1 1957 __ call_VM(NOREG, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
aoqi@1 1958 __ lbu(Rnext, BCP, 0);
aoqi@1 1959
aoqi@1 1960 __ set_method_data_pointer_for_bcp();
aoqi@1 1961 /*
aoqi@1 1962 __ ld(T3, FP, method_offset);
aoqi@1 1963 __ lw(T3, T3, in_bytes(Method::method_data_offset()));
aoqi@1 1964 __ sw(T3, FP, frame::interpreter_frame_mdx_offset * wordSize);
aoqi@1 1965 __ test_method_data_pointer(T3, dispatch);
aoqi@1 1966 // offset non-null mdp by MDO::data_offset() + IR::profile_method()
aoqi@1 1967 __ daddi(T3, T3, in_bytes(MethodData::data_offset()));
aoqi@1 1968 __ dadd(T3, T3, T1);
aoqi@1 1969 __ sw(T3, FP, frame::interpreter_frame_mdx_offset * wordSize);
aoqi@1 1970 */
aoqi@1 1971 __ b(dispatch);
aoqi@1 1972 __ delayed()->nop();
aoqi@1 1973 }
aoqi@1 1974
aoqi@1 1975 if (UseOnStackReplacement) {
aoqi@1 1976 // invocation counter overflow
aoqi@1 1977 __ bind(backedge_counter_overflow);
aoqi@1 1978 __ sub(A7, BCP, A7); // branch bcp
aoqi@1 1979 call_VM(NOREG, CAST_FROM_FN_PTR(address,
aoqi@1 1980 InterpreterRuntime::frequency_counter_overflow), A7);
aoqi@1 1981 __ lbu(Rnext, BCP, 0);
aoqi@1 1982
aoqi@1 1983 // V0: osr nmethod (osr ok) or NULL (osr not possible)
aoqi@1 1984 // V1: osr adapter frame return address
aoqi@1 1985 // Rnext: target bytecode
aoqi@1 1986 // LVP: locals pointer
aoqi@1 1987 // BCP: bcp
aoqi@1 1988 __ beq(V0, R0, dispatch);
aoqi@1 1989 __ delayed()->nop();
aoqi@1 1990 // nmethod may have been invalidated (VM may block upon call_VM return)
aoqi@1 1991 __ lw(T3, V0, nmethod::entry_bci_offset());
aoqi@1 1992 __ move(AT, InvalidOSREntryBci);
aoqi@1 1993 __ beq(AT, T3, dispatch);
aoqi@1 1994 __ delayed()->nop();
aoqi@1 1995 // We need to prepare to execute the OSR method. First we must
aoqi@1 1996 // migrate the locals and monitors off of the stack.
aoqi@1 1997 //eax V0: osr nmethod (osr ok) or NULL (osr not possible)
aoqi@1 1998 //ebx V1: osr adapter frame return address
aoqi@1 1999 //edx Rnext: target bytecode
aoqi@1 2000 //edi LVP: locals pointer
aoqi@1 2001 //esi BCP: bcp
aoqi@1 2002 __ move(BCP, V0);
aoqi@1 2003 // const Register thread = ecx;
aoqi@1 2004 const Register thread = TREG;
aoqi@1 2005 #ifndef OPT_THREAD
aoqi@1 2006 __ get_thread(thread);
aoqi@1 2007 #endif
aoqi@1 2008 call_VM(noreg, CAST_FROM_FN_PTR(address,
aoqi@1 2009 SharedRuntime::OSR_migration_begin));
aoqi@1 2010 // eax is OSR buffer, move it to expected parameter location
aoqi@1 2011 //refer to osrBufferPointer in c1_LIRAssembler_mips.cpp
aoqi@1 2012 __ move(T0, V0);
aoqi@1 2013
aoqi@1 2014 // pop the interpreter frame
aoqi@1 2015 // __ movl(edx, Address(ebp, frame::interpreter_frame_sender_sp_offset
aoqi@1 2016 // * wordSize)); // get sender sp
aoqi@1 2017 __ ld(A7, Address(FP,
aoqi@1 2018 frame::interpreter_frame_sender_sp_offset * wordSize));
aoqi@1 2019 //FIXME, shall we keep the return address on the stack?
aoqi@1 2020 __ leave(); // remove frame anchor
aoqi@1 2021 // __ popl(edi); // get return address
aoqi@1 2022 //__ daddi(SP, SP, wordSize); // get return address
aoqi@1 2023 // __ pop(LVP);
aoqi@1 2024 __ move(LVP, RA);
aoqi@1 2025 // __ movl(esp, edx); // set sp to sender sp
aoqi@1 2026 __ move(SP, A7);
aoqi@1 2027
aoqi@1 2028 Label skip;
aoqi@1 2029 Label chkint;
aoqi@1 2030
aoqi@1 2031 // The interpreter frame we have removed may be returning to
aoqi@1 2032 // either the callstub or the interpreter. Since we will
aoqi@1 2033 // now be returning from a compiled (OSR) nmethod we must
aoqi@1 2034 // adjust the return to the return were it can handler compiled
aoqi@1 2035 // results and clean the fpu stack. This is very similar to
aoqi@1 2036 // what a i2c adapter must do.
aoqi@1 2037
aoqi@1 2038 // Are we returning to the call stub?
aoqi@1 2039 #if 0
aoqi@1 2040 // __ cmpl(edi, (int)StubRoutines::_call_stub_return_address);
aoqi@1 2041 __ daddi(AT, LVP, -(int)StubRoutines::_call_stub_return_address);
aoqi@1 2042 // __ jcc(Assembler::notEqual, chkint);
aoqi@1 2043 __ bne(AT, R0, chkint);
aoqi@1 2044 __ delayed()->nop();
aoqi@1 2045 // yes adjust to the specialized call stub return.
aoqi@1 2046 // assert(StubRoutines::i486::get_call_stub_compiled_return() != NULL,
aoqi@1 2047 // "must be set");
aoqi@1 2048 assert(StubRoutines::gs2::get_call_stub_compiled_return() != NULL,
aoqi@1 2049 "must be set");
aoqi@1 2050 // __ movl(edi, (intptr_t) StubRoutines::i486::get_call_stub_compiled_return());
aoqi@1 2051 __ move(LVP, (intptr_t) StubRoutines::gs2::get_call_stub_compiled_return());
aoqi@1 2052 // __ jmp(skip);
aoqi@1 2053 __ b(skip);
aoqi@1 2054 __ delayed()->nop();
aoqi@1 2055 __ bind(chkint);
aoqi@1 2056
aoqi@1 2057 // Are we returning to the interpreter? Look for sentinel
aoqi@1 2058
aoqi@1 2059 //__ cmpl(Address(edi, -8), Interpreter::return_sentinel);
aoqi@1 2060 __ lw(AT, LVP , -8);
aoqi@1 2061 __ daddi(AT, AT, -Interpreter::return_sentinel);
aoqi@1 2062 //__ jcc(Assembler::notEqual, skip);
aoqi@1 2063 __ bne(AT, R0, skip);
aoqi@1 2064 __ delayed()->nop();
aoqi@1 2065 // Adjust to compiled return back to interpreter
aoqi@1 2066
aoqi@1 2067 // __ movl(edi, Address(edi, -4));
aoqi@1 2068 __ lw(LVP, LVP, -4);
aoqi@1 2069
aoqi@1 2070 __ bind(skip);
aoqi@1 2071 #endif
aoqi@1 2072 // Align stack pointer for compiled code (note that caller is
aoqi@1 2073 // responsible for undoing this fixup by remembering the old SP
aoqi@1 2074 // in an ebp-relative location)
aoqi@1 2075 // __ andl(esp, -(StackAlignmentInBytes));
aoqi@1 2076 __ move(AT, -(StackAlignmentInBytes));
aoqi@1 2077 __ andr(SP , SP , AT);
aoqi@1 2078 // push the (possibly adjusted) return address
aoqi@1 2079 // __ pushl(edi);
aoqi@1 2080 //__ push(LVP);
aoqi@1 2081 // __ move(RA, LVP);
aoqi@1 2082 // and begin the OSR nmethod
aoqi@1 2083 // __ jmp(Address(esi, nmethod::osr_entry_point_offset()));
aoqi@1 2084 //refer to osr_entry in c1_LIRAssembler_mips.cpp
aoqi@1 2085 __ ld(AT, BCP, nmethod::osr_entry_point_offset());
aoqi@1 2086 __ jr(AT);
aoqi@1 2087 __ delayed()->nop();
aoqi@1 2088 }
aoqi@1 2089 }
aoqi@1 2090 #endif // not CORE
aoqi@1 2091 }
aoqi@1 2092
aoqi@1 2093 void TemplateTable::if_0cmp(Condition cc) {
aoqi@1 2094 transition(itos, vtos);
aoqi@1 2095 // assume branch is more often taken than not (loops use backward branches)
aoqi@1 2096 Label not_taken;
aoqi@1 2097 switch(cc) {
aoqi@1 2098 case not_equal:
aoqi@1 2099 __ beq(FSR, R0, not_taken);
aoqi@1 2100 break;
aoqi@1 2101 case equal:
aoqi@1 2102 __ bne(FSR, R0, not_taken);
aoqi@1 2103 break;
aoqi@1 2104 case less:
aoqi@1 2105 __ bgez(FSR, not_taken);
aoqi@1 2106 break;
aoqi@1 2107 case less_equal:
aoqi@1 2108 __ bgtz(FSR, not_taken);
aoqi@1 2109 break;
aoqi@1 2110 case greater:
aoqi@1 2111 __ blez(FSR, not_taken);
aoqi@1 2112 break;
aoqi@1 2113 case greater_equal:
aoqi@1 2114 __ bltz(FSR, not_taken);
aoqi@1 2115 break;
aoqi@1 2116 }
aoqi@1 2117 __ delayed()->nop();
aoqi@1 2118
aoqi@1 2119 branch(false, false);
aoqi@1 2120
aoqi@1 2121 __ bind(not_taken);
aoqi@1 2122 __ profile_not_taken_branch(FSR);
aoqi@1 2123 }
aoqi@1 2124
aoqi@1 2125
aoqi@1 2126 void TemplateTable::if_icmp(Condition cc) {
aoqi@1 2127 transition(itos, vtos);
aoqi@1 2128 // assume branch is more often taken than not (loops use backward branches)
aoqi@1 2129 Label not_taken;
aoqi@1 2130
aoqi@1 2131 __ pop_i(SSR);
aoqi@1 2132 switch(cc) {
aoqi@1 2133 case not_equal:
aoqi@1 2134 __ beq(SSR, FSR, not_taken);
aoqi@1 2135 break;
aoqi@1 2136 case equal:
aoqi@1 2137 __ bne(SSR, FSR, not_taken);
aoqi@1 2138 break;
aoqi@1 2139 case less:
aoqi@1 2140 __ slt(AT, SSR, FSR);
aoqi@1 2141 __ beq(AT, R0, not_taken);
aoqi@1 2142 break;
aoqi@1 2143 case less_equal:
aoqi@1 2144 __ slt(AT, FSR, SSR);
aoqi@1 2145 __ bne(AT, R0, not_taken);
aoqi@1 2146 break;
aoqi@1 2147 case greater:
aoqi@1 2148 __ slt(AT, FSR, SSR);
aoqi@1 2149 __ beq(AT, R0, not_taken);
aoqi@1 2150 break;
aoqi@1 2151 case greater_equal:
aoqi@1 2152 __ slt(AT, SSR, FSR);
aoqi@1 2153 __ bne(AT, R0, not_taken);
aoqi@1 2154 break;
aoqi@1 2155 }
aoqi@1 2156 __ delayed()->nop();
aoqi@1 2157
aoqi@1 2158 branch(false, false);
aoqi@1 2159
aoqi@1 2160 __ bind(not_taken);
aoqi@1 2161 __ profile_not_taken_branch(FSR);
aoqi@1 2162 }
aoqi@1 2163
aoqi@1 2164
aoqi@1 2165 void TemplateTable::if_nullcmp(Condition cc) {
aoqi@1 2166 transition(atos, vtos);
aoqi@1 2167 // assume branch is more often taken than not (loops use backward branches)
aoqi@1 2168 Label not_taken;
aoqi@1 2169 switch(cc) {
aoqi@1 2170 case not_equal:
aoqi@1 2171 __ beq(FSR, R0, not_taken);
aoqi@1 2172 break;
aoqi@1 2173 case equal:
aoqi@1 2174 __ bne(FSR, R0, not_taken);
aoqi@1 2175 break;
aoqi@1 2176 default:
aoqi@1 2177 ShouldNotReachHere();
aoqi@1 2178 }
aoqi@1 2179 __ delayed()->nop();
aoqi@1 2180
aoqi@1 2181 branch(false, false);
aoqi@1 2182
aoqi@1 2183 __ bind(not_taken);
aoqi@1 2184 __ profile_not_taken_branch(FSR);
aoqi@1 2185 }
aoqi@1 2186
aoqi@1 2187
aoqi@1 2188 void TemplateTable::if_acmp(Condition cc) {
aoqi@1 2189 transition(atos, vtos);
aoqi@1 2190 // assume branch is more often taken than not (loops use backward branches)
aoqi@1 2191 Label not_taken;
aoqi@1 2192 // __ lw(SSR, SP, 0);
aoqi@1 2193 __ pop_ptr(SSR);
aoqi@1 2194 switch(cc) {
aoqi@1 2195 case not_equal:
aoqi@1 2196 __ beq(SSR, FSR, not_taken);
aoqi@1 2197 break;
aoqi@1 2198 case equal:
aoqi@1 2199 __ bne(SSR, FSR, not_taken);
aoqi@1 2200 break;
aoqi@1 2201 default:
aoqi@1 2202 ShouldNotReachHere();
aoqi@1 2203 }
aoqi@1 2204 // __ delayed()->daddi(SP, SP, 4);
aoqi@1 2205 __ delayed()->nop();
aoqi@1 2206
aoqi@1 2207 branch(false, false);
aoqi@1 2208
aoqi@1 2209 __ bind(not_taken);
aoqi@1 2210 __ profile_not_taken_branch(FSR);
aoqi@1 2211 }
aoqi@1 2212
aoqi@1 2213 // used registers : T1, T2, T3
aoqi@1 2214 // T1 : method
aoqi@1 2215 // T2 : returb bci
aoqi@1 2216 void TemplateTable::ret() {
aoqi@1 2217 transition(vtos, vtos);
aoqi@1 2218
aoqi@1 2219 locals_index(T2);
aoqi@1 2220 __ ld(T2, T2, 0);
aoqi@1 2221 __ profile_ret(T2, T3);
aoqi@1 2222
aoqi@1 2223 __ get_method(T1);
aoqi@1 2224 __ ld(BCP, T1, in_bytes(Method::const_offset()));
aoqi@1 2225 __ dadd(BCP, BCP, T2);
aoqi@1 2226 __ daddi(BCP, BCP, in_bytes(ConstMethod::codes_offset()));
aoqi@1 2227
aoqi@1 2228 __ dispatch_next(vtos);
aoqi@1 2229 }
aoqi@1 2230
aoqi@1 2231 // used registers : T1, T2, T3
aoqi@1 2232 // T1 : method
aoqi@1 2233 // T2 : returb bci
aoqi@1 2234 void TemplateTable::wide_ret() {
aoqi@1 2235 transition(vtos, vtos);
aoqi@1 2236
aoqi@1 2237 locals_index_wide(T2);
aoqi@1 2238 __ ld(T2, T2, 0); // get return bci, compute return bcp
aoqi@1 2239 __ profile_ret(T2, T3);
aoqi@1 2240
aoqi@1 2241 __ get_method(T1);
aoqi@1 2242 __ ld(BCP, T1, in_bytes(Method::const_offset()));
aoqi@1 2243 __ dadd(BCP, BCP, T2);
aoqi@1 2244 __ daddi(BCP, BCP, in_bytes(ConstMethod::codes_offset()));
aoqi@1 2245
aoqi@1 2246 __ dispatch_next(vtos);
aoqi@1 2247 }
aoqi@1 2248
aoqi@1 2249 // used register T2, T3, A7, Rnext
aoqi@1 2250 // T2 : bytecode pointer
aoqi@1 2251 // T3 : low
aoqi@1 2252 // A7 : high
aoqi@1 2253 // Rnext : dest bytecode, required by dispatch_base
aoqi@1 2254 void TemplateTable::tableswitch() {
aoqi@1 2255 Label default_case, continue_execution;
aoqi@1 2256 transition(itos, vtos);
aoqi@1 2257
aoqi@1 2258 // align BCP
aoqi@1 2259 __ daddi(T2, BCP, BytesPerInt);
aoqi@1 2260 __ li(AT, -BytesPerInt);
aoqi@1 2261 __ andr(T2, T2, AT);
aoqi@1 2262
aoqi@1 2263 // load lo & hi
aoqi@1 2264 __ lw(T3, T2, 1 * BytesPerInt);
aoqi@1 2265 __ swap(T3);
aoqi@1 2266 __ lw(A7, T2, 2 * BytesPerInt);
aoqi@1 2267 __ swap(A7);
aoqi@1 2268
aoqi@1 2269 // check against lo & hi
aoqi@1 2270 __ slt(AT, FSR, T3);
aoqi@1 2271 __ bne(AT, R0, default_case);
aoqi@1 2272 __ delayed()->nop();
aoqi@1 2273
aoqi@1 2274 __ slt(AT, A7, FSR);
aoqi@1 2275 __ bne(AT, R0, default_case);
aoqi@1 2276 __ delayed()->nop();
aoqi@1 2277
aoqi@1 2278 // lookup dispatch offset, in A7 big endian
aoqi@1 2279 __ dsub(FSR, FSR, T3);
aoqi@1 2280 __ dsll(AT, FSR, Address::times_4);
aoqi@1 2281 __ dadd(AT, T2, AT);
aoqi@1 2282 __ lw(A7, AT, 3 * BytesPerInt);
aoqi@1 2283 __ profile_switch_case(FSR, T9, T3);
aoqi@1 2284
aoqi@1 2285 __ bind(continue_execution);
aoqi@1 2286 __ swap(A7);
aoqi@1 2287 __ dadd(BCP, BCP, A7);
aoqi@1 2288 __ lbu(Rnext, BCP, 0);
aoqi@1 2289 __ dispatch_only(vtos);
aoqi@1 2290
aoqi@1 2291 // handle default
aoqi@1 2292 __ bind(default_case);
aoqi@1 2293 __ profile_switch_default(FSR);
aoqi@1 2294 __ lw(A7, T2, 0);
aoqi@1 2295 __ b(continue_execution);
aoqi@1 2296 __ delayed()->nop();
aoqi@1 2297 }
aoqi@1 2298
aoqi@1 2299 void TemplateTable::lookupswitch() {
aoqi@1 2300 transition(itos, itos);
aoqi@1 2301 __ stop("lookupswitch bytecode should have been rewritten");
aoqi@1 2302 }
aoqi@1 2303
aoqi@1 2304 // used registers : T2, T3, A7, Rnext
aoqi@1 2305 // T2 : bytecode pointer
aoqi@1 2306 // T3 : pair index
aoqi@1 2307 // A7 : offset
aoqi@1 2308 // Rnext : dest bytecode
aoqi@1 2309 // the data after the opcode is the same as lookupswitch
aoqi@1 2310 // see Rewriter::rewrite_method for more information
aoqi@1 2311 void TemplateTable::fast_linearswitch() {
aoqi@1 2312 transition(itos, vtos);
aoqi@1 2313 Label loop_entry, loop, found, continue_execution;
aoqi@1 2314
aoqi@1 2315 // swap eax so we can avoid swapping the table entries
aoqi@1 2316 __ swap(FSR);
aoqi@1 2317
aoqi@1 2318 // align BCP
aoqi@1 2319 __ daddi(T2, BCP, BytesPerInt);
aoqi@1 2320 __ li(AT, -BytesPerInt);
aoqi@1 2321 __ andr(T2, T2, AT);
aoqi@1 2322
aoqi@1 2323 // set counter
aoqi@1 2324 __ lw(T3, T2, BytesPerInt);
aoqi@1 2325 __ swap(T3);
aoqi@1 2326 __ b(loop_entry);
aoqi@1 2327 __ delayed()->nop();
aoqi@1 2328
aoqi@1 2329 // table search
aoqi@1 2330 __ bind(loop);
aoqi@1 2331 // get the entry value
aoqi@1 2332 __ dsll(AT, T3, Address::times_8);
aoqi@1 2333 __ dadd(AT, T2, AT);
aoqi@1 2334 __ lw(AT, AT, 2 * BytesPerInt);
aoqi@1 2335
aoqi@1 2336 // found?
aoqi@1 2337 __ beq(FSR, AT, found);
aoqi@1 2338 __ delayed()->nop();
aoqi@1 2339
aoqi@1 2340 __ bind(loop_entry);
aoqi@1 2341 __ bgtz(T3, loop);
aoqi@1 2342 __ delayed()->daddiu(T3, T3, -1);
aoqi@1 2343
aoqi@1 2344 // default case
aoqi@1 2345 __ profile_switch_default(FSR);
aoqi@1 2346 __ lw(A7, T2, 0);
aoqi@1 2347 __ b(continue_execution);
aoqi@1 2348 __ delayed()->nop();
aoqi@1 2349
aoqi@1 2350 // entry found -> get offset
aoqi@1 2351 __ bind(found);
aoqi@1 2352 __ dsll(AT, T3, Address::times_8);
aoqi@1 2353 __ dadd(AT, T2, AT);
aoqi@1 2354 __ lw(A7, AT, 3 * BytesPerInt);
aoqi@1 2355 __ profile_switch_case(T3, FSR, T2);
aoqi@1 2356
aoqi@1 2357 // continue execution
aoqi@1 2358 __ bind(continue_execution);
aoqi@1 2359 __ swap(A7);
aoqi@1 2360 __ dadd(BCP, BCP, A7);
aoqi@1 2361 __ lbu(Rnext, BCP, 0);
aoqi@1 2362 __ dispatch_only(vtos);
aoqi@1 2363 }
aoqi@1 2364
aoqi@1 2365 // used registers : T0, T1, T2, T3, A7, Rnext
aoqi@1 2366 // T2 : pairs address(array)
aoqi@1 2367 // Rnext : dest bytecode
aoqi@1 2368 // the data after the opcode is the same as lookupswitch
aoqi@1 2369 // see Rewriter::rewrite_method for more information
aoqi@1 2370 void TemplateTable::fast_binaryswitch() {
aoqi@1 2371 transition(itos, vtos);
aoqi@1 2372 // Implementation using the following core algorithm:
aoqi@1 2373 //
aoqi@1 2374 // int binary_search(int key, LookupswitchPair* array, int n) {
aoqi@1 2375 // // Binary search according to "Methodik des Programmierens" by
aoqi@1 2376 // // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985.
aoqi@1 2377 // int i = 0;
aoqi@1 2378 // int j = n;
aoqi@1 2379 // while (i+1 < j) {
aoqi@1 2380 // // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q)
aoqi@1 2381 // // with Q: for all i: 0 <= i < n: key < a[i]
aoqi@1 2382 // // where a stands for the array and assuming that the (inexisting)
aoqi@1 2383 // // element a[n] is infinitely big.
aoqi@1 2384 // int h = (i + j) >> 1;
aoqi@1 2385 // // i < h < j
aoqi@1 2386 // if (key < array[h].fast_match()) {
aoqi@1 2387 // j = h;
aoqi@1 2388 // } else {
aoqi@1 2389 // i = h;
aoqi@1 2390 // }
aoqi@1 2391 // }
aoqi@1 2392 // // R: a[i] <= key < a[i+1] or Q
aoqi@1 2393 // // (i.e., if key is within array, i is the correct index)
aoqi@1 2394 // return i;
aoqi@1 2395 // }
aoqi@1 2396
aoqi@1 2397 // register allocation
aoqi@1 2398 const Register array = T2;
aoqi@1 2399 const Register i = T3, j = A7;
aoqi@1 2400 const Register h = T1;
aoqi@1 2401 const Register temp = T0;
aoqi@1 2402 const Register key = FSR;
aoqi@1 2403
aoqi@1 2404 // setup array
aoqi@1 2405 __ daddi(array, BCP, 3*BytesPerInt);
aoqi@1 2406 __ li(AT, -BytesPerInt);
aoqi@1 2407 __ andr(array, array, AT);
aoqi@1 2408
aoqi@1 2409 // initialize i & j
aoqi@1 2410 __ move(i, R0);
aoqi@1 2411 __ lw(j, array, - 1 * BytesPerInt);
aoqi@1 2412 // Convert j into native byteordering
aoqi@1 2413 __ swap(j);
aoqi@1 2414
aoqi@1 2415 // and start
aoqi@1 2416 Label entry;
aoqi@1 2417 __ b(entry);
aoqi@1 2418 __ delayed()->nop();
aoqi@1 2419
aoqi@1 2420 // binary search loop
aoqi@1 2421 {
aoqi@1 2422 Label loop;
aoqi@1 2423 __ bind(loop);
aoqi@1 2424 // int h = (i + j) >> 1;
aoqi@1 2425 __ dadd(h, i, j);
aoqi@1 2426 __ dsrl(h, h, 1);
aoqi@1 2427 // if (key < array[h].fast_match()) {
aoqi@1 2428 // j = h;
aoqi@1 2429 // } else {
aoqi@1 2430 // i = h;
aoqi@1 2431 // }
aoqi@1 2432 // Convert array[h].match to native byte-ordering before compare
aoqi@1 2433 __ dsll(AT, h, Address::times_8);
aoqi@1 2434 __ dadd(AT, array, AT);
aoqi@1 2435 __ lw(temp, AT, 0 * BytesPerInt);
aoqi@1 2436 __ swap(temp);
aoqi@1 2437
aoqi@1 2438 {
aoqi@1 2439 Label set_i, end_of_if;
aoqi@1 2440 __ slt(AT, key, temp);
aoqi@1 2441 __ beq(AT, R0, set_i);
aoqi@1 2442 __ delayed()->nop();
aoqi@1 2443
aoqi@1 2444 __ b(end_of_if);
aoqi@1 2445 __ delayed(); __ move(j, h);
aoqi@1 2446
aoqi@1 2447 __ bind(set_i);
aoqi@1 2448 __ move(i, h);
aoqi@1 2449
aoqi@1 2450 __ bind(end_of_if);
aoqi@1 2451 }
aoqi@1 2452 // while (i+1 < j)
aoqi@1 2453 __ bind(entry);
aoqi@1 2454 __ daddi(h, i, 1);
aoqi@1 2455 __ slt(AT, h, j);
aoqi@1 2456 __ bne(AT, R0, loop);
aoqi@1 2457 __ delayed()->nop();
aoqi@1 2458 }
aoqi@1 2459
aoqi@1 2460 // end of binary search, result index is i (must check again!)
aoqi@1 2461 Label default_case;
aoqi@1 2462 // Convert array[i].match to native byte-ordering before compare
aoqi@1 2463 __ dsll(AT, i, Address::times_8);
aoqi@1 2464 __ dadd(AT, array, AT);
aoqi@1 2465 __ lw(temp, AT, 0 * BytesPerInt);
aoqi@1 2466 __ swap(temp);
aoqi@1 2467 __ bne(key, temp, default_case);
aoqi@1 2468 __ delayed()->nop();
aoqi@1 2469
aoqi@1 2470 // entry found -> j = offset
aoqi@1 2471 __ dsll(AT, i, Address::times_8);
aoqi@1 2472 __ dadd(AT, array, AT);
aoqi@1 2473 __ lw(j, AT, 1 * BytesPerInt);
aoqi@1 2474 __ profile_switch_case(i, key, array);
aoqi@1 2475 __ swap(j);
aoqi@1 2476
aoqi@1 2477 __ dadd(BCP, BCP, j);
aoqi@1 2478 __ lbu(Rnext, BCP, 0);
aoqi@1 2479 __ dispatch_only(vtos);
aoqi@1 2480
aoqi@1 2481 // default case -> j = default offset
aoqi@1 2482 __ bind(default_case);
aoqi@1 2483 __ profile_switch_default(i);
aoqi@1 2484 __ lw(j, array, - 2 * BytesPerInt);
aoqi@1 2485 __ swap(j);
aoqi@1 2486 __ dadd(BCP, BCP, j);
aoqi@1 2487 __ lbu(Rnext, BCP, 0);
aoqi@1 2488 __ dispatch_only(vtos);
aoqi@1 2489 }
aoqi@1 2490
aoqi@1 2491 void TemplateTable::_return(TosState state) {
aoqi@1 2492 transition(state, state);
aoqi@1 2493 assert(_desc->calls_vm(), "inconsistent calls_vm information"); // call in remove_activation
aoqi@1 2494 if (_desc->bytecode() == Bytecodes::_return_register_finalizer) {
aoqi@1 2495 assert(state == vtos, "only valid state");
aoqi@1 2496 __ ld(T1, aaddress(0));
aoqi@1 2497 //__ ld(LVP, T1, oopDesc::klass_offset_in_bytes());
aoqi@1 2498 __ load_klass(LVP, T1);
aoqi@1 2499 __ lw(LVP, LVP, in_bytes(Klass::access_flags_offset()));
aoqi@1 2500 __ move(AT, JVM_ACC_HAS_FINALIZER);
aoqi@1 2501 __ andr(AT, AT, LVP);//by_css
aoqi@1 2502 Label skip_register_finalizer;
aoqi@1 2503 __ beq(AT, R0, skip_register_finalizer);
aoqi@1 2504 __ delayed()->nop();
aoqi@1 2505 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
aoqi@1 2506 InterpreterRuntime::register_finalizer), T1);
aoqi@1 2507 __ bind(skip_register_finalizer);
aoqi@1 2508 }
aoqi@1 2509 __ remove_activation(state, T9);
fujie@32 2510 __ sync();
aoqi@1 2511
aoqi@1 2512 __ jr(T9);
aoqi@1 2513 __ delayed()->nop();
aoqi@1 2514 }
aoqi@1 2515
aoqi@1 2516 // ----------------------------------------------------------------------------
aoqi@1 2517 // Volatile variables demand their effects be made known to all CPU's
aoqi@1 2518 // in order. Store buffers on most chips allow reads & writes to
aoqi@1 2519 // reorder; the JMM's ReadAfterWrite.java test fails in -Xint mode
aoqi@1 2520 // without some kind of memory barrier (i.e., it's not sufficient that
aoqi@1 2521 // the interpreter does not reorder volatile references, the hardware
aoqi@1 2522 // also must not reorder them).
aoqi@1 2523 //
aoqi@1 2524 // According to the new Java Memory Model (JMM):
aoqi@1 2525 // (1) All volatiles are serialized wrt to each other. ALSO reads &
aoqi@1 2526 // writes act as aquire & release, so:
aoqi@1 2527 // (2) A read cannot let unrelated NON-volatile memory refs that
aoqi@1 2528 // happen after the read float up to before the read. It's OK for
aoqi@1 2529 // non-volatile memory refs that happen before the volatile read to
aoqi@1 2530 // float down below it.
aoqi@1 2531 // (3) Similar a volatile write cannot let unrelated NON-volatile
aoqi@1 2532 // memory refs that happen BEFORE the write float down to after the
aoqi@1 2533 // write. It's OK for non-volatile memory refs that happen after the
aoqi@1 2534 // volatile write to float up before it.
aoqi@1 2535 //
aoqi@1 2536 // We only put in barriers around volatile refs (they are expensive),
aoqi@1 2537 // not _between_ memory refs (that would require us to track the
aoqi@1 2538 // flavor of the previous memory refs). Requirements (2) and (3)
aoqi@1 2539 // require some barriers before volatile stores and after volatile
aoqi@1 2540 // loads. These nearly cover requirement (1) but miss the
aoqi@1 2541 // volatile-store-volatile-load case. This final case is placed after
aoqi@1 2542 // volatile-stores although it could just as well go before
aoqi@1 2543 // volatile-loads.
aoqi@1 2544 //void TemplateTable::volatile_barrier(Assembler::Membar_mask_bits
aoqi@1 2545 // order_constraint) {
aoqi@1 2546 void TemplateTable::volatile_barrier( ) {
aoqi@1 2547 // Helper function to insert a is-volatile test and memory barrier
aoqi@1 2548 //if (os::is_MP()) { // Not needed on single CPU
aoqi@1 2549 // __ membar(order_constraint);
aoqi@1 2550 //}
aoqi@1 2551 if( !os::is_MP() ) return; // Not needed on single CPU
aoqi@1 2552 __ sync();
aoqi@1 2553 }
aoqi@1 2554
aoqi@1 2555 // we dont shift left 2 bits in get_cache_and_index_at_bcp
aoqi@1 2556 // for we always need shift the index we use it. the ConstantPoolCacheEntry
aoqi@1 2557 // is 16-byte long, index is the index in
aoqi@1 2558 // ConstantPoolCache, so cache + base_offset() + index * 16 is
aoqi@1 2559 // the corresponding ConstantPoolCacheEntry
aoqi@1 2560 // used registers : T2
aoqi@1 2561 // NOTE : the returned index need also shift left 4 to get the address!
aoqi@1 2562 void TemplateTable::resolve_cache_and_index(int byte_no,
aoqi@1 2563 Register Rcache,
aoqi@1 2564 Register index,
aoqi@1 2565 size_t index_size) {
aoqi@1 2566 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
aoqi@1 2567 const Register temp = A1;
aoqi@1 2568 assert_different_registers(Rcache, index);
aoqi@1 2569 const int shift_count = (1 + byte_no)*BitsPerByte;
aoqi@1 2570 Label resolved;
aoqi@1 2571 __ get_cache_and_index_and_bytecode_at_bcp(Rcache, index, temp, byte_no, 1, index_size);
aoqi@1 2572 // is resolved?
aoqi@1 2573 int i = (int)bytecode();
aoqi@1 2574 __ addi(temp, temp, -i);
aoqi@1 2575 __ beq(temp, R0, resolved);
aoqi@1 2576 __ delayed()->nop();
aoqi@1 2577 // resolve first time through
aoqi@1 2578 address entry;
aoqi@1 2579 switch (bytecode()) {
aoqi@1 2580 case Bytecodes::_getstatic : // fall through
aoqi@1 2581 case Bytecodes::_putstatic : // fall through
aoqi@1 2582 case Bytecodes::_getfield : // fall through
aoqi@1 2583 case Bytecodes::_putfield :
aoqi@1 2584 entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_get_put);
aoqi@1 2585 break;
aoqi@1 2586 case Bytecodes::_invokevirtual : // fall through
aoqi@1 2587 case Bytecodes::_invokespecial : // fall through
aoqi@1 2588 case Bytecodes::_invokestatic : // fall through
aoqi@1 2589 case Bytecodes::_invokeinterface:
aoqi@1 2590 entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke);
aoqi@1 2591 break;
aoqi@1 2592 case Bytecodes::_invokehandle:
aoqi@1 2593 entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokehandle);
aoqi@1 2594 break;
aoqi@1 2595 case Bytecodes::_invokedynamic:
aoqi@1 2596 entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic);
aoqi@1 2597 break;
aoqi@1 2598 default :
aoqi@1 2599 fatal(err_msg("unexpected bytecode: %s", Bytecodes::name(bytecode())));
aoqi@1 2600 }
aoqi@1 2601
aoqi@1 2602 __ move(temp, i);
aoqi@1 2603 __ call_VM(NOREG, entry, temp);
aoqi@1 2604
aoqi@1 2605 // Update registers with resolved info
aoqi@1 2606 __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
aoqi@1 2607 __ bind(resolved);
aoqi@1 2608 }
aoqi@1 2609
aoqi@1 2610 // The Rcache and index registers must be set before call
aoqi@1 2611 void TemplateTable::load_field_cp_cache_entry(Register obj,
aoqi@1 2612 Register cache,
aoqi@1 2613 Register index,
aoqi@1 2614 Register off,
aoqi@1 2615 Register flags,
aoqi@1 2616 bool is_static = false) {
aoqi@1 2617 assert_different_registers(cache, index, flags, off);
aoqi@1 2618 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
aoqi@1 2619 // Field offset
aoqi@1 2620 __ dsll(AT, index, Address::times_ptr);
aoqi@1 2621 __ dadd(AT, cache, AT);
aoqi@1 2622 __ ld(off, AT, in_bytes(cp_base_offset + ConstantPoolCacheEntry::f2_offset()));
aoqi@1 2623 // Flags
aoqi@1 2624 __ ld(flags, AT, in_bytes(cp_base_offset + ConstantPoolCacheEntry::flags_offset()));
aoqi@1 2625
aoqi@1 2626 // klass overwrite register
aoqi@1 2627 if (is_static) {
aoqi@1 2628 __ ld(obj, AT, in_bytes(cp_base_offset + ConstantPoolCacheEntry::f1_offset()));
aoqi@1 2629 const int mirror_offset = in_bytes(Klass::java_mirror_offset());
aoqi@1 2630 __ ld(obj, Address(obj, mirror_offset));
aoqi@1 2631
aoqi@1 2632 __ verify_oop(obj);
aoqi@1 2633 }
aoqi@1 2634 }
aoqi@1 2635
aoqi@1 2636 // get the method, itable_index and flags of the current invoke
aoqi@1 2637 void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
aoqi@1 2638 Register method,
aoqi@1 2639 Register itable_index,
aoqi@1 2640 Register flags,
aoqi@1 2641 bool is_invokevirtual,
aoqi@1 2642 bool is_invokevfinal, /*unused*/
aoqi@1 2643 bool is_invokedynamic) {
aoqi@1 2644 // setup registers
aoqi@1 2645 const Register cache = T3;
aoqi@1 2646 const Register index = T1;
aoqi@1 2647 assert_different_registers(method, flags);
aoqi@1 2648 assert_different_registers(method, cache, index);
aoqi@1 2649 assert_different_registers(itable_index, flags);
aoqi@1 2650 assert_different_registers(itable_index, cache, index);
aoqi@1 2651 assert(is_invokevirtual == (byte_no == f2_byte), "is invokevirtual flag redundant");
aoqi@1 2652 // determine constant pool cache field offsets
aoqi@1 2653 const int method_offset = in_bytes(
aoqi@1 2654 ConstantPoolCache::base_offset() +
aoqi@1 2655 ((byte_no == f2_byte)
aoqi@1 2656 ? ConstantPoolCacheEntry::f2_offset()
aoqi@1 2657 : ConstantPoolCacheEntry::f1_offset()
aoqi@1 2658 )
aoqi@1 2659 );
aoqi@1 2660 const int flags_offset = in_bytes(ConstantPoolCache::base_offset() +
aoqi@1 2661 ConstantPoolCacheEntry::flags_offset());
aoqi@1 2662 // access constant pool cache fields
aoqi@1 2663 const int index_offset = in_bytes(ConstantPoolCache::base_offset() +
aoqi@1 2664 ConstantPoolCacheEntry::f2_offset());
aoqi@1 2665 size_t index_size = (is_invokedynamic ? sizeof(u4): sizeof(u2));
aoqi@1 2666 resolve_cache_and_index(byte_no, cache, index, index_size);
aoqi@1 2667
aoqi@1 2668 //assert(wordSize == 8, "adjust code below");
aoqi@1 2669 // note we shift 4 not 2, for we get is the true inde
aoqi@1 2670 // of ConstantPoolCacheEntry, not the shifted 2-bit index as x86 version
aoqi@1 2671 __ dsll(AT, index, Address::times_ptr);
aoqi@1 2672 __ dadd(AT, cache, AT);
aoqi@1 2673 __ ld(method, AT, method_offset);
aoqi@1 2674
aoqi@1 2675
aoqi@1 2676 if (itable_index != NOREG) {
aoqi@1 2677 __ ld(itable_index, AT, index_offset);
aoqi@1 2678 }
aoqi@1 2679 __ ld(flags, AT, flags_offset);
aoqi@1 2680 }
aoqi@1 2681
aoqi@1 2682
aoqi@1 2683 // The registers cache and index expected to be set before call.
aoqi@1 2684 // Correct values of the cache and index registers are preserved.
aoqi@1 2685 void TemplateTable::jvmti_post_field_access(Register cache, Register index,
aoqi@1 2686 bool is_static, bool has_tos) {
aoqi@1 2687 // do the JVMTI work here to avoid disturbing the register state below
aoqi@1 2688 // We use c_rarg registers here because we want to use the register used in
aoqi@1 2689 // the call to the VM
aoqi@1 2690 if (JvmtiExport::can_post_field_access()) {
aoqi@1 2691 // Check to see if a field access watch has been set before we take
aoqi@1 2692 // the time to call into the VM.
aoqi@1 2693 Label L1;
aoqi@1 2694 assert_different_registers(cache, index, FSR);
aoqi@1 2695 __ li(AT, (intptr_t)JvmtiExport::get_field_access_count_addr());
aoqi@1 2696 __ lw(FSR, AT, 0);
aoqi@1 2697 __ beq(FSR, R0, L1);
aoqi@1 2698 __ delayed()->nop();
aoqi@1 2699
aoqi@1 2700 // We rely on the bytecode being resolved and the cpCache entry filled in.
aoqi@1 2701 // cache entry pointer
aoqi@1 2702 //__ get_cache_and_index_at_bcp(c_rarg2, c_rarg3, 1);
aoqi@1 2703 __ daddi(cache, cache, in_bytes(ConstantPoolCache::base_offset()));
aoqi@1 2704 __ shl(index, 4);
aoqi@1 2705 __ dadd(cache, cache, index);
aoqi@1 2706 if (is_static) {
aoqi@1 2707 __ move(FSR, R0);
aoqi@1 2708 } else {
aoqi@1 2709 __ lw(FSR, SP, 0);
aoqi@1 2710 __ verify_oop(FSR);
aoqi@1 2711 }
aoqi@1 2712 // FSR: object pointer or NULL
aoqi@1 2713 // cache: cache entry pointer
aoqi@1 2714 __ call_VM(NOREG, CAST_FROM_FN_PTR(address,
aoqi@1 2715 InterpreterRuntime::post_field_access), FSR, cache);
aoqi@1 2716 __ get_cache_and_index_at_bcp(cache, index, 1);
aoqi@1 2717 __ bind(L1);
aoqi@1 2718 }
aoqi@1 2719 }
aoqi@1 2720
aoqi@1 2721 void TemplateTable::pop_and_check_object(Register r) {
aoqi@1 2722 __ pop_ptr(r);
aoqi@1 2723 __ null_check(r); // for field access must check obj.
aoqi@1 2724 __ verify_oop(r);
aoqi@1 2725 }
aoqi@1 2726
aoqi@1 2727 // used registers : T1, T2, T3, T1
aoqi@1 2728 // T1 : flags
aoqi@1 2729 // T2 : off
aoqi@1 2730 // T3 : obj
aoqi@1 2731 // T1 : field address
aoqi@1 2732 // The flags 31, 30, 29, 28 together build a 4 bit number 0 to 8 with the
aoqi@1 2733 // following mapping to the TosState states:
aoqi@1 2734 // btos: 0
aoqi@1 2735 // ctos: 1
aoqi@1 2736 // stos: 2
aoqi@1 2737 // itos: 3
aoqi@1 2738 // ltos: 4
aoqi@1 2739 // ftos: 5
aoqi@1 2740 // dtos: 6
aoqi@1 2741 // atos: 7
aoqi@1 2742 // vtos: 8
aoqi@1 2743 // see ConstantPoolCacheEntry::set_field for more info
aoqi@1 2744 void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
aoqi@1 2745 transition(vtos, vtos);
aoqi@1 2746
aoqi@1 2747 const Register cache = T3;
aoqi@1 2748 const Register index = T0;
aoqi@1 2749
aoqi@1 2750 const Register obj = T3;
aoqi@1 2751 const Register off = T2;
aoqi@1 2752 const Register flags = T1;
aoqi@1 2753 resolve_cache_and_index(byte_no, cache, index, sizeof(u2));
aoqi@1 2754 //jvmti_post_field_access(cache, index, is_static, false);
aoqi@1 2755
aoqi@1 2756 load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
aoqi@1 2757
aoqi@1 2758 if (!is_static) pop_and_check_object(obj);
aoqi@1 2759 __ dadd(index, obj, off);
aoqi@1 2760
aoqi@1 2761
aoqi@1 2762 Label Done, notByte, notInt, notShort, notChar, notLong, notFloat, notObj, notDouble;
aoqi@1 2763
aoqi@1 2764 assert(btos == 0, "change code, btos != 0");
aoqi@1 2765 __ dsrl(flags, flags, ConstantPoolCacheEntry::tos_state_shift);
aoqi@1 2766 __ andi(flags, flags, 0xf);
aoqi@1 2767 __ bne(flags, R0, notByte);
aoqi@1 2768 __ delayed()->nop();
aoqi@1 2769
aoqi@1 2770 // btos
fujie@33 2771 __ sync();
aoqi@1 2772 __ lb(FSR, index, 0);
aoqi@1 2773 __ sd(FSR, SP, - wordSize);
aoqi@1 2774
aoqi@1 2775 // Rewrite bytecode to be faster
aoqi@1 2776 if (!is_static) {
aoqi@1 2777 patch_bytecode(Bytecodes::_fast_bgetfield, T3, T2);
aoqi@1 2778 }
aoqi@1 2779 __ b(Done);
aoqi@1 2780 __ delayed()->daddi(SP, SP, - wordSize);
aoqi@1 2781
aoqi@1 2782 __ bind(notByte);
aoqi@1 2783 __ move(AT, itos);
aoqi@1 2784 __ bne(flags, AT, notInt);
aoqi@1 2785 __ delayed()->nop();
aoqi@1 2786
aoqi@1 2787 // itos
fujie@33 2788 __ sync();
aoqi@1 2789 __ lw(FSR, index, 0);
aoqi@1 2790 __ sd(FSR, SP, - wordSize);
aoqi@1 2791
aoqi@1 2792 // Rewrite bytecode to be faster
aoqi@1 2793 if (!is_static) {
aoqi@1 2794 // patch_bytecode(Bytecodes::_fast_igetfield, T3, T2);
aoqi@1 2795 patch_bytecode(Bytecodes::_fast_igetfield, T3, T2);
aoqi@1 2796 }
aoqi@1 2797 __ b(Done);
aoqi@1 2798 __ delayed()->daddi(SP, SP, - wordSize);
aoqi@1 2799
aoqi@1 2800 __ bind(notInt);
aoqi@1 2801 __ move(AT, atos);
aoqi@1 2802 __ bne(flags, AT, notObj);
aoqi@1 2803 __ delayed()->nop();
aoqi@1 2804
aoqi@1 2805 // atos
aoqi@1 2806 //add for compressedoops
fujie@33 2807 __ sync();
aoqi@1 2808 __ load_heap_oop(FSR, Address(index, 0));
aoqi@1 2809 __ sd(FSR, SP, - wordSize);
aoqi@1 2810
aoqi@1 2811 if (!is_static) {
aoqi@1 2812 //patch_bytecode(Bytecodes::_fast_agetfield, T3, T2);
aoqi@1 2813 patch_bytecode(Bytecodes::_fast_agetfield, T3, T2);
aoqi@1 2814 }
aoqi@1 2815 __ b(Done);
aoqi@1 2816 __ delayed()->daddi(SP, SP, - wordSize);
aoqi@1 2817
aoqi@1 2818 __ bind(notObj);
aoqi@1 2819 __ move(AT, ctos);
aoqi@1 2820 __ bne(flags, AT, notChar);
aoqi@1 2821 __ delayed()->nop();
aoqi@1 2822
aoqi@1 2823 // ctos
fujie@33 2824 __ sync();
aoqi@1 2825 __ lhu(FSR, index, 0);
aoqi@1 2826 __ sd(FSR, SP, - wordSize);
aoqi@1 2827
aoqi@1 2828 if (!is_static) {
aoqi@1 2829 patch_bytecode(Bytecodes::_fast_cgetfield, T3, T2);
aoqi@1 2830 }
aoqi@1 2831 __ b(Done);
aoqi@1 2832 __ delayed()->daddi(SP, SP, - wordSize);
aoqi@1 2833
aoqi@1 2834 __ bind(notChar);
aoqi@1 2835 __ move(AT, stos);
aoqi@1 2836 __ bne(flags, AT, notShort);
aoqi@1 2837 __ delayed()->nop();
aoqi@1 2838
aoqi@1 2839 // stos
fujie@33 2840 __ sync();
aoqi@1 2841 __ lh(FSR, index, 0);
aoqi@1 2842 __ sd(FSR, SP, - wordSize);
aoqi@1 2843
aoqi@1 2844 if (!is_static) {
aoqi@1 2845 // patch_bytecode(Bytecodes::_fast_sgetfield, T3, T2);
aoqi@1 2846 patch_bytecode(Bytecodes::_fast_sgetfield, T3, T2);
aoqi@1 2847 }
aoqi@1 2848 __ b(Done);
aoqi@1 2849 __ delayed()->daddi(SP, SP, - wordSize);
aoqi@1 2850
aoqi@1 2851 __ bind(notShort);
aoqi@1 2852 __ move(AT, ltos);
aoqi@1 2853 __ bne(flags, AT, notLong);
aoqi@1 2854 __ delayed()->nop();
aoqi@1 2855
aoqi@1 2856 // FIXME : the load/store should be atomic, we have no simple method to do this in mips32
aoqi@1 2857 // ltos
fujie@33 2858 __ sync();
aoqi@1 2859 __ ld(FSR, index, 0 * wordSize);
aoqi@1 2860 __ sd(FSR, SP, -2 * wordSize);
aoqi@1 2861 __ sd(R0, SP, -1 * wordSize);
aoqi@1 2862
aoqi@1 2863 // Don't rewrite to _fast_lgetfield for potential volatile case.
aoqi@1 2864 __ b(Done);
aoqi@1 2865 __ delayed()->daddi(SP, SP, - 2 * wordSize);
aoqi@1 2866
aoqi@1 2867 __ bind(notLong);
aoqi@1 2868 __ move(AT, ftos);
aoqi@1 2869 __ bne(flags, AT, notFloat);
aoqi@1 2870 __ delayed()->nop();
aoqi@1 2871
aoqi@1 2872 // ftos
fujie@33 2873 __ sync();
aoqi@1 2874 __ lwc1(FSF, index, 0);
aoqi@1 2875 __ sdc1(FSF, SP, - wordSize);
aoqi@1 2876
aoqi@1 2877 if (!is_static) {
aoqi@1 2878 patch_bytecode(Bytecodes::_fast_fgetfield, T3, T2);
aoqi@1 2879 }
aoqi@1 2880 __ b(Done);
aoqi@1 2881 __ delayed()->daddi(SP, SP, - wordSize);
aoqi@1 2882
aoqi@1 2883 __ bind(notFloat);
aoqi@1 2884 __ move(AT, dtos);
aoqi@1 2885 __ bne(flags, AT, notDouble);
aoqi@1 2886 __ delayed()->nop();
aoqi@1 2887
aoqi@1 2888 // dtos
fujie@33 2889 __ sync();
aoqi@1 2890 __ ldc1(FSF, index, 0 * wordSize);
aoqi@1 2891 __ sdc1(FSF, SP, - 2 * wordSize);
aoqi@1 2892 __ sd(R0, SP, - 1 * wordSize);
aoqi@1 2893
aoqi@1 2894 if (!is_static) {
aoqi@1 2895 patch_bytecode(Bytecodes::_fast_dgetfield, T3, T2);
aoqi@1 2896 }
aoqi@1 2897 __ b(Done);
aoqi@1 2898 __ delayed()->daddi(SP, SP, - 2 * wordSize);
aoqi@1 2899
aoqi@1 2900 __ bind(notDouble);
aoqi@1 2901
aoqi@1 2902 __ stop("Bad state");
aoqi@1 2903
aoqi@1 2904 __ bind(Done);
aoqi@1 2905 }
aoqi@1 2906
aoqi@1 2907 void TemplateTable::getfield(int byte_no) {
aoqi@1 2908 getfield_or_static(byte_no, false);
aoqi@1 2909 }
aoqi@1 2910
aoqi@1 2911 void TemplateTable::getstatic(int byte_no) {
aoqi@1 2912 getfield_or_static(byte_no, true);
aoqi@1 2913 }
aoqi@1 2914 /*
aoqi@1 2915 // used registers : T1, T2, T3, T1
aoqi@1 2916 // T1 : cache & cp entry
aoqi@1 2917 // T2 : obj
aoqi@1 2918 // T3 : flags & value pointer
aoqi@1 2919 // T1 : index
aoqi@1 2920 // see ConstantPoolCacheEntry::set_field for more info
aoqi@1 2921 void TemplateTable::jvmti_post_field_mod(int byte_no, bool is_static) {
aoqi@1 2922 */
aoqi@1 2923
aoqi@1 2924 // The registers cache and index expected to be set before call.
aoqi@1 2925 // The function may destroy various registers, just not the cache and index registers.
aoqi@1 2926 void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is_static) {
aoqi@1 2927 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
aoqi@1 2928
aoqi@1 2929 if (JvmtiExport::can_post_field_modification()) {
aoqi@1 2930 // Check to see if a field modification watch has been set before we take
aoqi@1 2931 // the time to call into the VM.
aoqi@1 2932 Label L1;
aoqi@1 2933 assert_different_registers(cache, index, AT);
aoqi@1 2934
aoqi@1 2935 //__ lui(AT, Assembler::split_high((int)JvmtiExport::get_field_modification_count_addr()));
aoqi@1 2936 //__ lw(FSR, AT, Assembler::split_low((int)JvmtiExport::get_field_modification_count_addr()));
aoqi@1 2937 __ li(AT, JvmtiExport::get_field_modification_count_addr());
aoqi@1 2938 __ lw(FSR, AT, 0);
aoqi@1 2939 __ beq(FSR, R0, L1);
aoqi@1 2940 __ delayed()->nop();
aoqi@1 2941
aoqi@1 2942 /* // We rely on the bytecode being resolved and the cpCache entry filled in.
aoqi@1 2943 resolve_cache_and_index(byte_no, T1, T1);
aoqi@1 2944 */
aoqi@1 2945 // The cache and index registers have been already set.
aoqi@1 2946 // This allows to eliminate this call but the cache and index
aoqi@1 2947 // registers have to be correspondingly used after this line.
aoqi@1 2948 // __ get_cache_and_index_at_bcp(eax, edx, 1);
aoqi@1 2949 __ get_cache_and_index_at_bcp(T1, T9, 1);
aoqi@1 2950
aoqi@1 2951 if (is_static) {
aoqi@1 2952 __ move(T2, R0);
aoqi@1 2953 } else {
aoqi@1 2954 // Life is harder. The stack holds the value on top,
aoqi@1 2955 // followed by the object.
aoqi@1 2956 // We don't know the size of the value, though;
aoqi@1 2957 // it could be one or two words
aoqi@1 2958 // depending on its type. As a result, we must find
aoqi@1 2959 // the type to determine where the object is.
aoqi@1 2960 Label two_word, valsize_known;
aoqi@1 2961 __ dsll(AT, T1, 4);
aoqi@1 2962 __ dadd(AT, T1, AT);
aoqi@1 2963 __ lw(T3, AT, in_bytes(cp_base_offset
aoqi@1 2964 + ConstantPoolCacheEntry::flags_offset()));
aoqi@1 2965 __ move(T2, SP);
aoqi@1 2966 __ shr(T3, ConstantPoolCacheEntry::tos_state_shift);
aoqi@1 2967
aoqi@1 2968 // Make sure we don't need to mask ecx for tos_state_shift
aoqi@1 2969 // after the above shift
aoqi@1 2970 ConstantPoolCacheEntry::verify_tos_state_shift();
aoqi@1 2971 __ move(AT, ltos);
aoqi@1 2972 __ beq(T3, AT, two_word);
aoqi@1 2973 __ delayed()->nop();
aoqi@1 2974 __ move(AT, dtos);
aoqi@1 2975 __ beq(T3, AT, two_word);
aoqi@1 2976 __ delayed()->nop();
aoqi@1 2977 __ b(valsize_known);
aoqi@1 2978 //__ delayed()->daddi(T2, T2, wordSize*1);
aoqi@1 2979 __ delayed()->daddi(T2, T2,Interpreter::expr_offset_in_bytes(1) );
aoqi@1 2980
aoqi@1 2981 __ bind(two_word);
aoqi@1 2982 // __ daddi(T2, T2, wordSize*2);
aoqi@1 2983 __ daddi(T2, T2,Interpreter::expr_offset_in_bytes(2));
aoqi@1 2984
aoqi@1 2985 __ bind(valsize_known);
aoqi@1 2986 // setup object pointer
aoqi@1 2987 __ lw(T2, T2, 0*wordSize);
aoqi@1 2988 }
aoqi@1 2989 // cache entry pointer
aoqi@1 2990 __ daddi(T1, T1, in_bytes(cp_base_offset));
aoqi@1 2991 __ shl(T1, 4);
aoqi@1 2992 __ daddu(T1, T1, T1);
aoqi@1 2993 // object (tos)
aoqi@1 2994 __ move(T3, SP);
aoqi@1 2995 // T2: object pointer set up above (NULL if static)
aoqi@1 2996 // T1: cache entry pointer
aoqi@1 2997 // T3: jvalue object on the stack
aoqi@1 2998 __ call_VM(NOREG, CAST_FROM_FN_PTR(address,
aoqi@1 2999 InterpreterRuntime::post_field_modification), T2, T1, T3);
aoqi@1 3000 __ get_cache_and_index_at_bcp(cache, index, 1);
aoqi@1 3001 __ bind(L1);
aoqi@1 3002 }
aoqi@1 3003 }
aoqi@1 3004
aoqi@1 3005 // used registers : T0, T1, T2, T3, T8
aoqi@1 3006 // T1 : flags
aoqi@1 3007 // T2 : off
aoqi@1 3008 // T3 : obj
aoqi@1 3009 // T8 : volatile bit
aoqi@1 3010 // see ConstantPoolCacheEntry::set_field for more info
aoqi@1 3011 void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
aoqi@1 3012 transition(vtos, vtos);
aoqi@1 3013
aoqi@1 3014 const Register cache = T3;
aoqi@1 3015 const Register index = T0;
aoqi@1 3016 const Register obj = T3;
aoqi@1 3017 const Register off = T2;
aoqi@1 3018 const Register flags = T1;
aoqi@1 3019 const Register bc = T3;
aoqi@1 3020
aoqi@1 3021 resolve_cache_and_index(byte_no, cache, index, sizeof(u2));
aoqi@1 3022 //TODO: LEE
aoqi@1 3023 //jvmti_post_field_mod(cache, index, is_static);
aoqi@1 3024 load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
aoqi@1 3025 // Doug Lea believes this is not needed with current Sparcs (TSO) and Intel (PSO).
aoqi@1 3026 // volatile_barrier( );
aoqi@1 3027
aoqi@1 3028 Label notVolatile, Done;
aoqi@1 3029 __ move(AT, 1<<ConstantPoolCacheEntry::is_volatile_shift);
aoqi@1 3030 __ andr(T8, flags, AT);
aoqi@1 3031
aoqi@1 3032 Label notByte, notInt, notShort, notChar, notLong, notFloat, notObj, notDouble;
aoqi@1 3033
aoqi@1 3034 assert(btos == 0, "change code, btos != 0");
aoqi@1 3035 // btos
aoqi@1 3036 __ dsrl(flags, flags, ConstantPoolCacheEntry::tos_state_shift);
aoqi@1 3037 __ andi(flags, flags, ConstantPoolCacheEntry::tos_state_mask);
aoqi@1 3038 __ bne(flags, R0, notByte);
aoqi@1 3039 __ delayed()->nop();
aoqi@1 3040
aoqi@1 3041 __ pop(btos);
aoqi@1 3042 if (!is_static) {
aoqi@1 3043 pop_and_check_object(obj);
aoqi@1 3044 }
aoqi@1 3045 __ dadd(AT, obj, off);
aoqi@1 3046 __ sb(FSR, AT, 0);
aoqi@1 3047
aoqi@1 3048 if (!is_static) {
aoqi@1 3049 patch_bytecode(Bytecodes::_fast_bputfield, bc, off, true, byte_no);
aoqi@1 3050 }
aoqi@1 3051 __ b(Done);
aoqi@1 3052 __ delayed()->nop();
aoqi@1 3053
aoqi@1 3054 __ bind(notByte);
aoqi@1 3055 // itos
aoqi@1 3056 __ move(AT, itos);
aoqi@1 3057 __ bne(flags, AT, notInt);
aoqi@1 3058 __ delayed()->nop();
aoqi@1 3059
aoqi@1 3060 __ pop(itos);
aoqi@1 3061 if (!is_static) {
aoqi@1 3062 pop_and_check_object(obj);
aoqi@1 3063 }
aoqi@1 3064 __ dadd(AT, obj, off);
aoqi@1 3065 __ sw(FSR, AT, 0);
aoqi@1 3066
aoqi@1 3067 if (!is_static) {
aoqi@1 3068 patch_bytecode(Bytecodes::_fast_iputfield, bc, off, true, byte_no);
aoqi@1 3069 }
aoqi@1 3070 __ b(Done);
aoqi@1 3071 __ delayed()->nop();
aoqi@1 3072 __ bind(notInt);
aoqi@1 3073 // atos
aoqi@1 3074 __ move(AT, atos);
aoqi@1 3075 __ bne(flags, AT, notObj);
aoqi@1 3076 __ delayed()->nop();
aoqi@1 3077
aoqi@1 3078 __ pop(atos);
aoqi@1 3079 if (!is_static) {
aoqi@1 3080 pop_and_check_object(obj);
aoqi@1 3081 }
aoqi@1 3082
aoqi@1 3083 __ dadd(AT, obj, off);
aoqi@1 3084 //__ sd(FSR, AT, 0);
aoqi@1 3085 __ store_heap_oop(Address(AT, 0), FSR);
fujie@32 3086 __ sync();
aoqi@1 3087 __ store_check(obj);
aoqi@1 3088
aoqi@1 3089 if (!is_static) {
aoqi@1 3090 patch_bytecode(Bytecodes::_fast_aputfield, bc, off, true, byte_no);
aoqi@1 3091 }
aoqi@1 3092 __ b(Done);
aoqi@1 3093 __ delayed()->nop();
aoqi@1 3094 __ bind(notObj);
aoqi@1 3095 // ctos
aoqi@1 3096 __ move(AT, ctos);
aoqi@1 3097 __ bne(flags, AT, notChar);
aoqi@1 3098 __ delayed()->nop();
aoqi@1 3099
aoqi@1 3100 __ pop(ctos);
aoqi@1 3101 if (!is_static) {
aoqi@1 3102 pop_and_check_object(obj);
aoqi@1 3103 }
aoqi@1 3104 __ dadd(AT, obj, off);
aoqi@1 3105 __ sh(FSR, AT, 0);
aoqi@1 3106 if (!is_static) {
aoqi@1 3107 patch_bytecode(Bytecodes::_fast_cputfield, bc, off, true, byte_no);
aoqi@1 3108 }
aoqi@1 3109 __ b(Done);
aoqi@1 3110 __ delayed()->nop();
aoqi@1 3111 __ bind(notChar);
aoqi@1 3112 // stos
aoqi@1 3113 __ move(AT, stos);
aoqi@1 3114 __ bne(flags, AT, notShort);
aoqi@1 3115 __ delayed()->nop();
aoqi@1 3116
aoqi@1 3117 __ pop(stos);
aoqi@1 3118 if (!is_static) {
aoqi@1 3119 pop_and_check_object(obj);
aoqi@1 3120 }
aoqi@1 3121 __ dadd(AT, obj, off);
aoqi@1 3122 __ sh(FSR, AT, 0);
aoqi@1 3123 if (!is_static) {
aoqi@1 3124 patch_bytecode(Bytecodes::_fast_sputfield, bc, off, true, byte_no);
aoqi@1 3125 }
aoqi@1 3126 __ b(Done);
aoqi@1 3127 __ delayed()->nop();
aoqi@1 3128 __ bind(notShort);
aoqi@1 3129 // ltos
aoqi@1 3130 __ move(AT, ltos);
aoqi@1 3131 __ bne(flags, AT, notLong);
aoqi@1 3132 __ delayed()->nop();
aoqi@1 3133
aoqi@1 3134 // FIXME: there is no simple method to load/store 64-bit data in a atomic operation
aoqi@1 3135 // we just ignore the volatile flag.
aoqi@1 3136 //Label notVolatileLong;
aoqi@1 3137 //__ beq(T1, R0, notVolatileLong);
aoqi@1 3138 //__ delayed()->nop();
aoqi@1 3139
aoqi@1 3140 //addent = 2 * wordSize;
aoqi@1 3141 // no need
aoqi@1 3142 //__ lw(FSR, SP, 0);
aoqi@1 3143 //__ lw(SSR, SP, 1 * wordSize);
aoqi@1 3144 //if (!is_static) {
aoqi@1 3145 // __ lw(T3, SP, addent);
aoqi@1 3146 // addent += 1 * wordSize;
aoqi@1 3147 // __ verify_oop(T3);
aoqi@1 3148 //}
aoqi@1 3149
aoqi@1 3150 //__ daddu(AT, T3, T2);
aoqi@1 3151
aoqi@1 3152 // Replace with real volatile test
aoqi@1 3153 // NOTE : we assume that sdc1&ldc1 operate in 32-bit, this is true for Godson2 even in 64-bit kernel
aoqi@1 3154 // last modified by yjl 7/12/2005
aoqi@1 3155 //__ ldc1(FSF, SP, 0);
aoqi@1 3156 //__ sdc1(FSF, AT, 0);
aoqi@1 3157 //volatile_barrier();
aoqi@1 3158
aoqi@1 3159 // Don't rewrite volatile version
aoqi@1 3160 //__ b(notVolatile);
aoqi@1 3161 //__ delayed()->addiu(SP, SP, addent);
aoqi@1 3162
aoqi@1 3163 //__ bind(notVolatileLong);
aoqi@1 3164
aoqi@1 3165 //__ pop(ltos); // overwrites edx
aoqi@1 3166 // __ lw(FSR, SP, 0 * wordSize);
aoqi@1 3167 // __ lw(SSR, SP, 1 * wordSize);
aoqi@1 3168 // __ daddi(SP, SP, 2*wordSize);
aoqi@1 3169 __ pop(ltos);
aoqi@1 3170 if (!is_static) {
aoqi@1 3171 pop_and_check_object(obj);
aoqi@1 3172 }
aoqi@1 3173 __ dadd(AT, obj, off);
aoqi@1 3174 __ sd(FSR, AT, 0);
aoqi@1 3175 if (!is_static) {
aoqi@1 3176 patch_bytecode(Bytecodes::_fast_lputfield, bc, off, true, byte_no);
aoqi@1 3177 }
aoqi@1 3178 __ b(notVolatile);
aoqi@1 3179 __ delayed()->nop();
aoqi@1 3180
aoqi@1 3181 __ bind(notLong);
aoqi@1 3182 // ftos
aoqi@1 3183 __ move(AT, ftos);
aoqi@1 3184 __ bne(flags, AT, notFloat);
aoqi@1 3185 __ delayed()->nop();
aoqi@1 3186
aoqi@1 3187 __ pop(ftos);
aoqi@1 3188 if (!is_static) {
aoqi@1 3189 pop_and_check_object(obj);
aoqi@1 3190 }
aoqi@1 3191 __ dadd(AT, obj, off);
aoqi@1 3192 __ swc1(FSF, AT, 0);
aoqi@1 3193 if (!is_static) {
aoqi@1 3194 patch_bytecode(Bytecodes::_fast_fputfield, bc, off, true, byte_no);
aoqi@1 3195 }
aoqi@1 3196 __ b(Done);
aoqi@1 3197 __ delayed()->nop();
aoqi@1 3198 __ bind(notFloat);
aoqi@1 3199 // dtos
aoqi@1 3200 __ move(AT, dtos);
aoqi@1 3201 __ bne(flags, AT, notDouble);
aoqi@1 3202 __ delayed()->nop();
aoqi@1 3203
aoqi@1 3204 __ pop(dtos);
aoqi@1 3205 if (!is_static) {
aoqi@1 3206 pop_and_check_object(obj);
aoqi@1 3207 }
aoqi@1 3208 __ dadd(AT, obj, off);
aoqi@1 3209 __ sdc1(FSF, AT, 0);
aoqi@1 3210 if (!is_static) {
aoqi@1 3211 patch_bytecode(Bytecodes::_fast_dputfield, bc, off, true, byte_no);
aoqi@1 3212 }
aoqi@1 3213 __ b(Done);
aoqi@1 3214 __ delayed()->nop();
aoqi@1 3215 __ bind(notDouble);
aoqi@1 3216
aoqi@1 3217 __ stop("Bad state");
aoqi@1 3218
aoqi@1 3219 __ bind(Done);
aoqi@1 3220
aoqi@1 3221 // Check for volatile store
aoqi@1 3222 __ beq(T8, R0, notVolatile);
aoqi@1 3223 __ delayed()->nop();
aoqi@1 3224 volatile_barrier( );
aoqi@1 3225 __ bind(notVolatile);
aoqi@1 3226 }
aoqi@1 3227
aoqi@1 3228 void TemplateTable::putfield(int byte_no) {
aoqi@1 3229 putfield_or_static(byte_no, false);
aoqi@1 3230 }
aoqi@1 3231
aoqi@1 3232 void TemplateTable::putstatic(int byte_no) {
aoqi@1 3233 putfield_or_static(byte_no, true);
aoqi@1 3234 }
aoqi@1 3235
aoqi@1 3236 // used registers : T1, T2, T3
aoqi@1 3237 // T1 : cp_entry
aoqi@1 3238 // T2 : obj
aoqi@1 3239 // T3 : value pointer
aoqi@1 3240 void TemplateTable::jvmti_post_fast_field_mod() {
aoqi@1 3241 if (JvmtiExport::can_post_field_modification()) {
aoqi@1 3242 // Check to see if a field modification watch has been set before we take
aoqi@1 3243 // the time to call into the VM.
aoqi@1 3244 Label L2;
aoqi@1 3245 //__ lui(AT, Assembler::split_high((intptr_t)JvmtiExport::get_field_modification_count_addr()));
aoqi@1 3246 //__ lw(T3, AT, Assembler::split_low((intptr_t)JvmtiExport::get_field_modification_count_addr()));
aoqi@1 3247 __ li(AT, JvmtiExport::get_field_modification_count_addr());
aoqi@1 3248 __ lw(T3, AT, 0);
aoqi@1 3249 __ beq(T3, R0, L2);
aoqi@1 3250 __ delayed()->nop();
aoqi@1 3251 //__ pop(T2);
aoqi@1 3252 __ pop_ptr(T2);
aoqi@1 3253 //__ lw(T2, SP, 0);
aoqi@1 3254 __ verify_oop(T2);
aoqi@1 3255 __ push_ptr(T2);
aoqi@1 3256 __ li(AT, -sizeof(jvalue));
aoqi@1 3257 __ daddu(SP, SP, AT);
aoqi@1 3258 __ move(T3, SP);
aoqi@1 3259 //__ push(T2);
aoqi@1 3260 //__ move(T2, R0);
aoqi@1 3261
aoqi@1 3262 switch (bytecode()) { // load values into the jvalue object
aoqi@1 3263 case Bytecodes::_fast_bputfield:
aoqi@1 3264 __ sb(FSR, SP, 0);
aoqi@1 3265 break;
aoqi@1 3266 case Bytecodes::_fast_sputfield:
aoqi@1 3267 __ sh(FSR, SP, 0);
aoqi@1 3268 break;
aoqi@1 3269 case Bytecodes::_fast_cputfield:
aoqi@1 3270 __ sh(FSR, SP, 0);
aoqi@1 3271 break;
aoqi@1 3272 case Bytecodes::_fast_iputfield:
aoqi@1 3273 __ sw(FSR, SP, 0);
aoqi@1 3274 break;
aoqi@1 3275 case Bytecodes::_fast_lputfield:
aoqi@1 3276 __ sd(FSR, SP, 0);
aoqi@1 3277 break;
aoqi@1 3278 case Bytecodes::_fast_fputfield:
aoqi@1 3279 __ swc1(FSF, SP, 0);
aoqi@1 3280 break;
aoqi@1 3281 case Bytecodes::_fast_dputfield:
aoqi@1 3282 __ sdc1(FSF, SP, 0);
aoqi@1 3283 break;
aoqi@1 3284 case Bytecodes::_fast_aputfield:
aoqi@1 3285 __ sd(FSR, SP, 0);
aoqi@1 3286 break;
aoqi@1 3287 default: ShouldNotReachHere();
aoqi@1 3288 }
aoqi@1 3289
aoqi@1 3290 //__ pop(T2); // restore copy of object pointer
aoqi@1 3291
aoqi@1 3292 // Save eax and sometimes edx because call_VM() will clobber them,
aoqi@1 3293 // then use them for JVM/DI purposes
aoqi@1 3294 __ push(FSR);
aoqi@1 3295 if (bytecode() == Bytecodes::_fast_lputfield) __ push(SSR);
aoqi@1 3296 // access constant pool cache entry
aoqi@1 3297 __ get_cache_entry_pointer_at_bcp(T1, T2, 1);
aoqi@1 3298 // no need, verified ahead
aoqi@1 3299 __ verify_oop(T2);
aoqi@1 3300
aoqi@1 3301 // ebx: object pointer copied above
aoqi@1 3302 // eax: cache entry pointer
aoqi@1 3303 // ecx: jvalue object on the stack
aoqi@1 3304 __ call_VM(NOREG, CAST_FROM_FN_PTR(address,
aoqi@1 3305 InterpreterRuntime::post_field_modification), T2, T1, T3);
aoqi@1 3306 if (bytecode() == Bytecodes::_fast_lputfield) __ pop(SSR); // restore high value
aoqi@1 3307 //__ pop(FSR); // restore lower value
aoqi@1 3308 //__ daddi(SP, SP, sizeof(jvalue)); // release jvalue object space
aoqi@1 3309 __ lw(FSR, SP, 0);
aoqi@1 3310 __ daddiu(SP, SP, sizeof(jvalue) + 1 * wordSize);
aoqi@1 3311 __ bind(L2);
aoqi@1 3312 }
aoqi@1 3313 }
aoqi@1 3314
aoqi@1 3315 // used registers : T2, T3, T1
aoqi@1 3316 // T2 : index & off & field address
aoqi@1 3317 // T3 : cache & obj
aoqi@1 3318 // T1 : flags
aoqi@1 3319 void TemplateTable::fast_storefield(TosState state) {
aoqi@1 3320 transition(state, vtos);
aoqi@1 3321
aoqi@1 3322 ByteSize base = ConstantPoolCache::base_offset();
aoqi@1 3323
aoqi@1 3324 jvmti_post_fast_field_mod();
aoqi@1 3325
aoqi@1 3326 // access constant pool cache
aoqi@1 3327 __ get_cache_and_index_at_bcp(T3, T2, 1);
aoqi@1 3328
aoqi@1 3329 // test for volatile with edx but edx is tos register for lputfield.
aoqi@1 3330 __ dsll(AT, T2, Address::times_8);
aoqi@1 3331 __ dadd(AT, T3, AT);
aoqi@1 3332 __ ld(T1, AT, in_bytes(base + ConstantPoolCacheEntry::flags_offset()));
aoqi@1 3333
aoqi@1 3334 // replace index with field offset from cache entry
aoqi@1 3335 __ ld(T2, AT, in_bytes(base + ConstantPoolCacheEntry::f2_offset()));
aoqi@1 3336
aoqi@1 3337 // Doug Lea believes this is not needed with current Sparcs (TSO) and Intel (PSO).
aoqi@1 3338 // volatile_barrier( );
aoqi@1 3339
aoqi@1 3340 Label notVolatile, Done;
aoqi@1 3341 // Check for volatile store
aoqi@1 3342 __ move(AT, 1<<ConstantPoolCacheEntry::is_volatile_shift);
aoqi@1 3343 __ andr(AT, T1, AT);
aoqi@1 3344 __ beq(AT, R0, notVolatile);
aoqi@1 3345 __ delayed()->nop();
aoqi@1 3346
aoqi@1 3347
aoqi@1 3348 // Get object from stack
aoqi@1 3349 // NOTE : the value in FSR/FSF now
aoqi@1 3350 // __ pop(T3);
aoqi@1 3351 // __ verify_oop(T3);
aoqi@1 3352 pop_and_check_object(T3);
aoqi@1 3353 // field addresses
aoqi@1 3354 __ dadd(T2, T3, T2);
aoqi@1 3355
aoqi@1 3356 // access field
aoqi@1 3357 switch (bytecode()) {
aoqi@1 3358 case Bytecodes::_fast_bputfield:
aoqi@1 3359 __ sb(FSR, T2, 0);
aoqi@1 3360 break;
aoqi@1 3361 case Bytecodes::_fast_sputfield: // fall through
aoqi@1 3362 case Bytecodes::_fast_cputfield:
aoqi@1 3363 __ sh(FSR, T2, 0);
aoqi@1 3364 break;
aoqi@1 3365 case Bytecodes::_fast_iputfield:
aoqi@1 3366 __ sw(FSR, T2, 0);
aoqi@1 3367 break;
aoqi@1 3368 case Bytecodes::_fast_lputfield:
aoqi@1 3369 __ sd(FSR, T2, 0 * wordSize);
aoqi@1 3370 break;
aoqi@1 3371 case Bytecodes::_fast_fputfield:
aoqi@1 3372 __ swc1(FSF, T2, 0);
aoqi@1 3373 break;
aoqi@1 3374 case Bytecodes::_fast_dputfield:
aoqi@1 3375 __ sdc1(FSF, T2, 0 * wordSize);
aoqi@1 3376 break;
aoqi@1 3377 case Bytecodes::_fast_aputfield:
aoqi@1 3378 __ store_heap_oop(Address(T2, 0), FSR);
fujie@32 3379 __ sync();
aoqi@1 3380 __ store_check(T3);
aoqi@1 3381 break;
aoqi@1 3382 default:
aoqi@1 3383 ShouldNotReachHere();
aoqi@1 3384 }
aoqi@1 3385
aoqi@1 3386 Label done;
aoqi@1 3387 volatile_barrier( );
aoqi@1 3388 __ b(done);
aoqi@1 3389 __ delayed()->nop();
aoqi@1 3390
aoqi@1 3391 // Same code as above, but don't need edx to test for volatile.
aoqi@1 3392 __ bind(notVolatile);
aoqi@1 3393
aoqi@1 3394 // Get object from stack
aoqi@1 3395 // __ pop(T3);
aoqi@1 3396 // __ verify_oop(T3);
aoqi@1 3397 pop_and_check_object(T3);
aoqi@1 3398 //get the field address
aoqi@1 3399 __ dadd(T2, T3, T2);
aoqi@1 3400
aoqi@1 3401 // access field
aoqi@1 3402 switch (bytecode()) {
aoqi@1 3403 case Bytecodes::_fast_bputfield:
aoqi@1 3404 __ sb(FSR, T2, 0);
aoqi@1 3405 break;
aoqi@1 3406 case Bytecodes::_fast_sputfield: // fall through
aoqi@1 3407 case Bytecodes::_fast_cputfield:
aoqi@1 3408 __ sh(FSR, T2, 0);
aoqi@1 3409 break;
aoqi@1 3410 case Bytecodes::_fast_iputfield:
aoqi@1 3411 __ sw(FSR, T2, 0);
aoqi@1 3412 break;
aoqi@1 3413 case Bytecodes::_fast_lputfield:
aoqi@1 3414 __ sd(FSR, T2, 0 * wordSize);
aoqi@1 3415 break;
aoqi@1 3416 case Bytecodes::_fast_fputfield:
aoqi@1 3417 __ swc1(FSF, T2, 0);
aoqi@1 3418 break;
aoqi@1 3419 case Bytecodes::_fast_dputfield:
aoqi@1 3420 __ sdc1(FSF, T2, 0 * wordSize);
aoqi@1 3421 break;
aoqi@1 3422 case Bytecodes::_fast_aputfield:
aoqi@1 3423 //add for compressedoops
aoqi@1 3424 __ store_heap_oop(Address(T2, 0), FSR);
fujie@32 3425 __ sync();
aoqi@1 3426 __ store_check(T3);
aoqi@1 3427 break;
aoqi@1 3428 default:
aoqi@1 3429 ShouldNotReachHere();
aoqi@1 3430 }
aoqi@1 3431 __ bind(done);
aoqi@1 3432 }
aoqi@1 3433
aoqi@1 3434 // used registers : T2, T3, T1
aoqi@1 3435 // T3 : cp_entry & cache
aoqi@1 3436 // T2 : index & offset
aoqi@1 3437 void TemplateTable::fast_accessfield(TosState state) {
aoqi@1 3438 transition(atos, state);
aoqi@1 3439
aoqi@1 3440 // do the JVMTI work here to avoid disturbing the register state below
aoqi@1 3441 if (JvmtiExport::can_post_field_access()) {
aoqi@1 3442 // Check to see if a field access watch has been set before we take
aoqi@1 3443 // the time to call into the VM.
aoqi@1 3444 Label L1;
aoqi@1 3445 __ li(AT, (intptr_t)JvmtiExport::get_field_access_count_addr());
aoqi@1 3446 __ lw(T3, AT, 0);
aoqi@1 3447 __ beq(T3, R0, L1);
aoqi@1 3448 __ delayed()->nop();
aoqi@1 3449 // access constant pool cache entry
aoqi@1 3450 __ get_cache_entry_pointer_at_bcp(T3, T1, 1);
aoqi@1 3451 __ move(TSR, FSR);
aoqi@1 3452 __ verify_oop(FSR);
aoqi@1 3453 // FSR: object pointer copied above
aoqi@1 3454 // T3: cache entry pointer
aoqi@1 3455 __ call_VM(NOREG, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access),
aoqi@1 3456 FSR, T3);
aoqi@1 3457 __ move(FSR, TSR);
aoqi@1 3458 __ bind(L1);
aoqi@1 3459 }
aoqi@1 3460
aoqi@1 3461 // access constant pool cache
aoqi@1 3462 __ get_cache_and_index_at_bcp(T3, T2, 1);
aoqi@1 3463 // replace index with field offset from cache entry
aoqi@1 3464 __ dsll(AT, T2, Address::times_8);
aoqi@1 3465 //__ dsll(AT, T2, 4);
aoqi@1 3466 __ dadd(AT, T3, AT);
aoqi@1 3467 __ ld(T2, AT, in_bytes(ConstantPoolCache::base_offset()
aoqi@1 3468 + ConstantPoolCacheEntry::f2_offset()));
aoqi@1 3469
aoqi@1 3470 // eax: object
aoqi@1 3471 __ verify_oop(FSR);
aoqi@1 3472 // __ null_check(FSR, 0);
aoqi@1 3473 __ null_check(FSR);
aoqi@1 3474 // field addresses
aoqi@1 3475 __ dadd(FSR, FSR, T2);
aoqi@1 3476
aoqi@1 3477 // access field
aoqi@1 3478 switch (bytecode()) {
aoqi@1 3479 case Bytecodes::_fast_bgetfield:
aoqi@1 3480 __ lb(FSR, FSR, 0);
aoqi@1 3481 break;
aoqi@1 3482 case Bytecodes::_fast_sgetfield:
aoqi@1 3483 __ lh(FSR, FSR, 0);
aoqi@1 3484 break;
aoqi@1 3485 case Bytecodes::_fast_cgetfield:
aoqi@1 3486 __ lhu(FSR, FSR, 0);
aoqi@1 3487 break;
aoqi@1 3488 case Bytecodes::_fast_igetfield:
aoqi@1 3489 __ lw(FSR, FSR, 0);
aoqi@1 3490 break;
aoqi@1 3491 case Bytecodes::_fast_lgetfield:
aoqi@1 3492 __ stop("should not be rewritten");
aoqi@1 3493 break;
aoqi@1 3494 case Bytecodes::_fast_fgetfield:
aoqi@1 3495 __ lwc1(FSF, FSR, 0);
aoqi@1 3496 break;
aoqi@1 3497 case Bytecodes::_fast_dgetfield:
aoqi@1 3498 __ ldc1(FSF, FSR, 0);
aoqi@1 3499 break;
aoqi@1 3500 case Bytecodes::_fast_agetfield:
aoqi@1 3501 //add for compressedoops
aoqi@1 3502 __ load_heap_oop(FSR, Address(FSR, 0));
aoqi@1 3503 __ verify_oop(FSR);
aoqi@1 3504 break;
aoqi@1 3505 default:
aoqi@1 3506 ShouldNotReachHere();
aoqi@1 3507 }
aoqi@1 3508
aoqi@1 3509 // Doug Lea believes this is not needed with current Sparcs(TSO) and Intel(PSO)
aoqi@1 3510 // volatile_barrier( );
aoqi@1 3511 }
aoqi@1 3512
aoqi@1 3513 // generator for _fast_iaccess_0, _fast_aaccess_0, _fast_faccess_0
aoqi@1 3514 // used registers : T1, T2, T3, T1
aoqi@1 3515 // T1 : obj & field address
aoqi@1 3516 // T2 : off
aoqi@1 3517 // T3 : cache
aoqi@1 3518 // T1 : index
aoqi@1 3519 void TemplateTable::fast_xaccess(TosState state) {
aoqi@1 3520 transition(vtos, state);
aoqi@1 3521 // get receiver
aoqi@1 3522 __ ld(T1, aaddress(0));
aoqi@1 3523 // access constant pool cache
aoqi@1 3524 __ get_cache_and_index_at_bcp(T3, T2, 2);
aoqi@1 3525 __ dsll(AT, T2, Address::times_8);
aoqi@1 3526 __ dadd(AT, T3, AT);
aoqi@1 3527 __ ld(T2, AT, in_bytes(ConstantPoolCache::base_offset()
aoqi@1 3528 + ConstantPoolCacheEntry::f2_offset()));
aoqi@1 3529
aoqi@1 3530 // make sure exception is reported in correct bcp range (getfield is next instruction)
aoqi@1 3531 __ daddi(BCP, BCP, 1);
aoqi@1 3532 // __ null_check(T1, 0);
aoqi@1 3533 __ null_check(T1);
aoqi@1 3534 __ dadd(T1, T1, T2);
aoqi@1 3535
aoqi@1 3536 if (state == itos) {
aoqi@1 3537 __ lw(FSR, T1, 0);
aoqi@1 3538 } else if (state == atos) {
aoqi@1 3539 //__ ld(FSR, T1, 0);
aoqi@1 3540 __ load_heap_oop(FSR, Address(T1, 0));
aoqi@1 3541 __ verify_oop(FSR);
aoqi@1 3542 } else if (state == ftos) {
aoqi@1 3543 __ lwc1(FSF, T1, 0);
aoqi@1 3544 } else {
aoqi@1 3545 ShouldNotReachHere();
aoqi@1 3546 }
aoqi@1 3547 __ daddi(BCP, BCP, -1);
aoqi@1 3548 }
aoqi@1 3549
aoqi@1 3550 //---------------------------------------------------
aoqi@1 3551 //-------------------------------------------------
aoqi@1 3552 // Calls
aoqi@1 3553
aoqi@1 3554 void TemplateTable::count_calls(Register method, Register temp) {
aoqi@1 3555 // implemented elsewhere
aoqi@1 3556 ShouldNotReachHere();
aoqi@1 3557 }
aoqi@1 3558
aoqi@1 3559 // method, index, recv, flags: T1, T2, T3, T1
aoqi@1 3560 // byte_no = 2 for _invokevirtual, 1 else
aoqi@1 3561 // T0 : return address
aoqi@1 3562 // get the method & index of the invoke, and push the return address of
aoqi@1 3563 // the invoke(first word in the frame)
aoqi@1 3564 // this address is where the return code jmp to.
aoqi@1 3565 // NOTE : this method will set T3&T1 as recv&flags
aoqi@1 3566 void TemplateTable::prepare_invoke(int byte_no,
aoqi@1 3567 Register method, //linked method (or i-klass)
aoqi@1 3568 Register index, //itable index, MethodType ,etc.
aoqi@1 3569 Register recv, // if caller wants to see it
aoqi@1 3570 Register flags // if caller wants to test it
aoqi@1 3571 ) {
aoqi@1 3572 // determine flags
aoqi@1 3573 const Bytecodes::Code code = bytecode();
aoqi@1 3574 const bool is_invokeinterface = code == Bytecodes::_invokeinterface;
aoqi@1 3575 const bool is_invokedynamic = code == Bytecodes::_invokedynamic;
aoqi@1 3576 const bool is_invokehandle = code == Bytecodes::_invokehandle;
aoqi@1 3577 const bool is_invokevirtual = code == Bytecodes::_invokevirtual;
aoqi@1 3578 const bool is_invokespecial = code == Bytecodes::_invokespecial;
aoqi@1 3579 const bool load_receiver = (recv != noreg);
aoqi@1 3580 const bool save_flags = (flags != noreg);
aoqi@1 3581 assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic),"");
aoqi@1 3582 assert(save_flags == (is_invokeinterface || is_invokevirtual), "need flags for vfinal");
aoqi@1 3583 assert(flags == noreg || flags == T1, "error flags reg.");
aoqi@1 3584 assert(recv == noreg || recv == T3, "error recv reg.");
aoqi@1 3585 // setup registers & access constant pool cache
aoqi@1 3586 if(recv == noreg) recv = T3;
aoqi@1 3587 if(flags == noreg) flags = T1;
aoqi@1 3588
aoqi@1 3589 assert_different_registers(method, index, recv, flags);
aoqi@1 3590
aoqi@1 3591 // save 'interpreter return address'
aoqi@1 3592 __ save_bcp();
aoqi@1 3593
aoqi@1 3594 load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic);
aoqi@1 3595 if (is_invokedynamic || is_invokehandle) {
aoqi@1 3596 Label L_no_push;
aoqi@1 3597 __ move(AT, (1 << ConstantPoolCacheEntry::has_appendix_shift));
aoqi@1 3598 __ andr(AT, AT, flags);
aoqi@1 3599 __ beq(AT, R0, L_no_push);
aoqi@1 3600 __ delayed()->nop();
aoqi@1 3601 // Push the appendix as a trailing parameter.
aoqi@1 3602 // This must be done before we get the receiver,
aoqi@1 3603 // since the parameter_size includes it.
aoqi@1 3604 Register tmp = SSR;
aoqi@1 3605 __ push(tmp);
aoqi@1 3606 __ move(tmp, index);
aoqi@1 3607 assert(ConstantPoolCacheEntry::_indy_resolved_references_appendix_offset == 0, "appendix expected at index+0");
aoqi@1 3608 __ load_resolved_reference_at_index(index, tmp);
aoqi@1 3609 __ pop(tmp);
aoqi@1 3610 __ push(index); // push appendix (MethodType, CallSite, etc.)
aoqi@1 3611 __ bind(L_no_push);
aoqi@1 3612
aoqi@1 3613 }
aoqi@1 3614
aoqi@1 3615 // load receiver if needed (after appendix is pushed so parameter size is correct)
aoqi@1 3616 // Note: no return address pushed yet
aoqi@1 3617 if (load_receiver) {
aoqi@1 3618 __ move(AT, ConstantPoolCacheEntry::parameter_size_mask);
aoqi@1 3619 __ andr(recv, flags, AT);
aoqi@1 3620 // 2014/07/31 Fu: Since we won't push RA on stack, no_return_pc_pushed_yet should be 0.
aoqi@1 3621 const int no_return_pc_pushed_yet = 0; // argument slot correction before we push return address
aoqi@1 3622 const int receiver_is_at_end = -1; // back off one slot to get receiver
aoqi@1 3623 Address recv_addr = __ argument_address(recv, no_return_pc_pushed_yet + receiver_is_at_end);
aoqi@1 3624
aoqi@1 3625 __ ld(recv, recv_addr);
aoqi@1 3626 __ verify_oop(recv);
aoqi@1 3627 }
aoqi@1 3628 if(save_flags) {
aoqi@1 3629 //__ movl(r13, flags);
aoqi@1 3630 __ move(BCP, flags);
aoqi@1 3631 }
aoqi@1 3632 // compute return type
aoqi@1 3633 __ dsrl(flags, flags, ConstantPoolCacheEntry::tos_state_shift);
aoqi@1 3634 __ andi(flags, flags, 0xf);
aoqi@1 3635
aoqi@1 3636 // Make sure we don't need to mask flags for tos_state_shift after the above shift
aoqi@1 3637 ConstantPoolCacheEntry::verify_tos_state_shift();
aoqi@1 3638 // load return address
aoqi@1 3639 {
aoqi@1 3640 const address table = (address) Interpreter::invoke_return_entry_table_for(code);
aoqi@1 3641 __ li(AT, (long)table);
aoqi@1 3642 __ dsll(flags, flags, LogBytesPerWord);
aoqi@1 3643 __ dadd(AT, AT, flags);
aoqi@1 3644 __ ld(RA, AT, 0);
aoqi@1 3645 }
aoqi@1 3646
aoqi@1 3647 if (save_flags) {
aoqi@1 3648 __ move(flags, BCP);
aoqi@1 3649 __ restore_bcp();
aoqi@1 3650 }
aoqi@1 3651 }
aoqi@1 3652
aoqi@1 3653 // used registers : T0, T3, T1, T2
aoqi@1 3654 // T3 : recv, this two register using convention is by prepare_invoke
aoqi@1 3655 // T1 : flags, klass
aoqi@1 3656 // Rmethod : method, index must be Rmethod
aoqi@1 3657 void TemplateTable::invokevirtual_helper(Register index, Register recv,
aoqi@1 3658 Register flags) {
aoqi@1 3659
aoqi@1 3660 assert_different_registers(index, recv, flags, T2);
aoqi@1 3661
aoqi@1 3662 // Test for an invoke of a final method
aoqi@1 3663 Label notFinal;
aoqi@1 3664 __ move(AT, (1 << ConstantPoolCacheEntry::is_vfinal_shift));
aoqi@1 3665 __ andr(AT, flags, AT);
aoqi@1 3666 __ beq(AT, R0, notFinal);
aoqi@1 3667 __ delayed()->nop();
aoqi@1 3668
aoqi@1 3669 Register method = index; // method must be Rmethod
aoqi@1 3670 assert(method == Rmethod, "methodOop must be Rmethod for interpreter calling convention");
aoqi@1 3671
aoqi@1 3672 // do the call - the index is actually the method to call
aoqi@1 3673 // the index is indeed methodOop, for this is vfinal,
aoqi@1 3674 // see ConstantPoolCacheEntry::set_method for more info
aoqi@1 3675
aoqi@1 3676 __ verify_oop(method);
aoqi@1 3677
aoqi@1 3678 // It's final, need a null check here!
aoqi@1 3679 __ null_check(recv);
aoqi@1 3680
aoqi@1 3681 // profile this call
aoqi@1 3682 __ profile_final_call(T2);
aoqi@1 3683
aoqi@1 3684 // 2014/11/24 Fu
aoqi@1 3685 // T2: tmp, used for mdp
aoqi@1 3686 // method: callee
aoqi@1 3687 // T9: tmp
aoqi@1 3688 // is_virtual: true
aoqi@1 3689 __ profile_arguments_type(T2, method, T9, true);
aoqi@1 3690
aoqi@1 3691 // __ move(T0, recv);
aoqi@1 3692 __ jump_from_interpreted(method, T2);
aoqi@1 3693
aoqi@1 3694 __ bind(notFinal);
aoqi@1 3695
aoqi@1 3696 // get receiver klass
aoqi@1 3697 __ null_check(recv, oopDesc::klass_offset_in_bytes());
aoqi@1 3698 // Keep recv in ecx for callee expects it there
aoqi@1 3699 __ load_klass(T2, recv);
aoqi@1 3700 __ verify_oop(T2);
aoqi@1 3701 // profile this call
aoqi@1 3702 __ profile_virtual_call(T2, T0, T1);
aoqi@1 3703
aoqi@1 3704 // get target methodOop & entry point
aoqi@1 3705 const int base = InstanceKlass::vtable_start_offset() * wordSize;
aoqi@1 3706 assert(vtableEntry::size() * wordSize == 8, "adjust the scaling in the code below");
aoqi@1 3707 __ dsll(AT, index, Address::times_8);
aoqi@1 3708 __ dadd(AT, T2, AT);
aoqi@1 3709 //this is a ualign read
aoqi@1 3710 __ ld(method, AT, base + vtableEntry::method_offset_in_bytes());
aoqi@1 3711 __ jump_from_interpreted(method, T2);
aoqi@1 3712
aoqi@1 3713 }
aoqi@1 3714
aoqi@1 3715 void TemplateTable::invokevirtual(int byte_no) {
aoqi@1 3716 transition(vtos, vtos);
aoqi@1 3717 assert(byte_no == f2_byte, "use this argument");
aoqi@1 3718 prepare_invoke(byte_no, Rmethod, NOREG, T3, T1);
aoqi@1 3719 // now recv & flags in T3, T1
aoqi@1 3720 invokevirtual_helper(Rmethod, T3, T1);
aoqi@1 3721 }
aoqi@1 3722
aoqi@1 3723 // T9 : entry
aoqi@1 3724 // Rmethod : method
aoqi@1 3725 void TemplateTable::invokespecial(int byte_no) {
aoqi@1 3726 transition(vtos, vtos);
aoqi@1 3727 assert(byte_no == f1_byte, "use this argument");
aoqi@1 3728 prepare_invoke(byte_no, Rmethod, NOREG, T3);
aoqi@1 3729 // now recv & flags in T3, T1
aoqi@1 3730 __ verify_oop(T3);
aoqi@1 3731 __ null_check(T3);
aoqi@1 3732 __ profile_call(T9);
aoqi@1 3733
aoqi@1 3734 // 2014/11/24 Fu
aoqi@1 3735 // T8: tmp, used for mdp
aoqi@1 3736 // Rmethod: callee
aoqi@1 3737 // T9: tmp
aoqi@1 3738 // is_virtual: false
aoqi@1 3739 __ profile_arguments_type(T8, Rmethod, T9, false);
aoqi@1 3740
aoqi@1 3741 __ jump_from_interpreted(Rmethod, T9);
aoqi@1 3742 __ move(T0, T3);//aoqi ?
aoqi@1 3743 }
aoqi@1 3744
aoqi@1 3745 void TemplateTable::invokestatic(int byte_no) {
aoqi@1 3746 transition(vtos, vtos);
aoqi@1 3747 assert(byte_no == f1_byte, "use this argument");
aoqi@1 3748 prepare_invoke(byte_no, Rmethod, NOREG);
aoqi@1 3749 __ verify_oop(Rmethod);
aoqi@1 3750
aoqi@1 3751 __ profile_call(T9);
aoqi@1 3752
aoqi@1 3753 // 2014/11/24 Fu
aoqi@1 3754 // T8: tmp, used for mdp
aoqi@1 3755 // Rmethod: callee
aoqi@1 3756 // T9: tmp
aoqi@1 3757 // is_virtual: false
aoqi@1 3758 __ profile_arguments_type(T8, Rmethod, T9, false);
aoqi@1 3759
aoqi@1 3760 __ jump_from_interpreted(Rmethod, T9);
aoqi@1 3761 }
aoqi@1 3762
aoqi@1 3763 // i have no idea what to do here, now. for future change. FIXME.
aoqi@1 3764 void TemplateTable::fast_invokevfinal(int byte_no) {
aoqi@1 3765 transition(vtos, vtos);
aoqi@1 3766 assert(byte_no == f2_byte, "use this argument");
aoqi@1 3767 __ stop("fast_invokevfinal not used on x86");
aoqi@1 3768 }
aoqi@1 3769
aoqi@1 3770 // used registers : T0, T1, T2, T3, T1, A7
aoqi@1 3771 // T0 : itable, vtable, entry
aoqi@1 3772 // T1 : interface
aoqi@1 3773 // T3 : receiver
aoqi@1 3774 // T1 : flags, klass
aoqi@1 3775 // Rmethod : index, method, this is required by interpreter_entry
aoqi@1 3776 void TemplateTable::invokeinterface(int byte_no) {
aoqi@1 3777 transition(vtos, vtos);
aoqi@1 3778 //this method will use T1-T4 and T0
aoqi@1 3779 assert(byte_no == f1_byte, "use this argument");
aoqi@1 3780 prepare_invoke(byte_no, T2, Rmethod, T3, T1);
aoqi@1 3781 // T2: Interface
aoqi@1 3782 // Rmethod: index
aoqi@1 3783 // T3: receiver
aoqi@1 3784 // T1: flags
aoqi@1 3785 Label notMethod;
aoqi@1 3786 __ move(AT, (1 << ConstantPoolCacheEntry::is_forced_virtual_shift));
aoqi@1 3787 __ andr(AT, T1, AT);
aoqi@1 3788 __ beq(AT, R0, notMethod);
aoqi@1 3789 __ delayed()->nop();
aoqi@1 3790
aoqi@1 3791 // Special case of invokeinterface called for virtual method of
aoqi@1 3792 // java.lang.Object. See cpCacheOop.cpp for details.
aoqi@1 3793 // This code isn't produced by javac, but could be produced by
aoqi@1 3794 // another compliant java compiler.
aoqi@1 3795 invokevirtual_helper(Rmethod, T3, T1);
aoqi@1 3796
aoqi@1 3797 __ bind(notMethod);
aoqi@1 3798 // Get receiver klass into T1 - also a null check
aoqi@1 3799 //__ ld(T1, T3, oopDesc::klass_offset_in_bytes());
aoqi@1 3800 //add for compressedoops
aoqi@1 3801 //__ restore_locals();
aoqi@1 3802 //__ null_check(T3, oopDesc::klass_offset_in_bytes());
aoqi@1 3803 __ load_klass(T1, T3);
aoqi@1 3804 __ verify_oop(T1);
aoqi@1 3805
aoqi@1 3806 // profile this call
aoqi@1 3807 __ profile_virtual_call(T1, T0, FSR);
aoqi@1 3808
aoqi@1 3809 // Compute start of first itableOffsetEntry (which is at the end of the vtable)
aoqi@1 3810 // TODO: x86 add a new method lookup_interface_method // LEE
aoqi@1 3811 const int base = InstanceKlass::vtable_start_offset() * wordSize;
aoqi@1 3812 assert(vtableEntry::size() * wordSize == 8, "adjust the scaling in the code below");
aoqi@1 3813 __ lw(AT, T1, InstanceKlass::vtable_length_offset() * wordSize);
aoqi@1 3814 __ dsll(AT, AT, Address::times_8);
aoqi@1 3815 __ dadd(T0, T1, AT);
aoqi@1 3816 __ daddi(T0, T0, base);
aoqi@1 3817 if (HeapWordsPerLong > 1) {
aoqi@1 3818 // Round up to align_object_offset boundary
aoqi@1 3819 __ round_to(T0, BytesPerLong);
aoqi@1 3820 }
aoqi@1 3821 // now T0 is the begin of the itable
aoqi@1 3822
aoqi@1 3823 Label entry, search, interface_ok;
aoqi@1 3824
aoqi@1 3825 ///__ jmp(entry);
aoqi@1 3826 __ b(entry);
aoqi@1 3827 __ delayed()->nop();
aoqi@1 3828
aoqi@1 3829 __ bind(search);
aoqi@1 3830 __ increment(T0, itableOffsetEntry::size() * wordSize);
aoqi@1 3831
aoqi@1 3832 __ bind(entry);
aoqi@1 3833
aoqi@1 3834 // Check that the entry is non-null. A null entry means that the receiver
aoqi@1 3835 // class doesn't implement the interface, and wasn't the same as the
aoqi@1 3836 // receiver class checked when the interface was resolved.
aoqi@1 3837 __ ld(AT, T0, itableOffsetEntry::interface_offset_in_bytes());
aoqi@1 3838 __ bne(AT, R0, interface_ok);
aoqi@1 3839 __ delayed()->nop();
aoqi@1 3840 // throw exception
aoqi@1 3841 // the call_VM checks for exception, so we should never return here.
aoqi@1 3842
aoqi@1 3843 //__ pop();//FIXME here,
aoqi@1 3844 // pop return address (pushed by prepare_invoke).
aoqi@1 3845 // no need now, we just save the value in RA now
aoqi@1 3846
aoqi@1 3847 __ call_VM(NOREG, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_IncompatibleClassChangeError));
aoqi@1 3848 __ should_not_reach_here();
aoqi@1 3849
aoqi@1 3850 __ bind(interface_ok);
aoqi@1 3851 //NOTICE here, no pop as x86 do
aoqi@1 3852 //__ lw(AT, T0, itableOffsetEntry::interface_offset_in_bytes());
aoqi@1 3853 __ bne(AT, T2, search);
aoqi@1 3854 __ delayed()->nop();
aoqi@1 3855
aoqi@1 3856 // now we get vtable of the interface
aoqi@1 3857 __ ld(T0, T0, itableOffsetEntry::offset_offset_in_bytes());
aoqi@1 3858 __ daddu(T0, T1, T0);
aoqi@1 3859 assert(itableMethodEntry::size() * wordSize == 8, "adjust the scaling in the code below");
aoqi@1 3860 __ dsll(AT, Rmethod, Address::times_8);
aoqi@1 3861 __ daddu(AT, T0, AT);
aoqi@1 3862 // now we get the method
aoqi@1 3863 __ ld(Rmethod, AT, 0);
aoqi@1 3864 // Rnext: methodOop to call
aoqi@1 3865 // T3: receiver
aoqi@1 3866 // Check for abstract method error
aoqi@1 3867 // Note: This should be done more efficiently via a throw_abstract_method_error
aoqi@1 3868 // interpreter entry point and a conditional jump to it in case of a null
aoqi@1 3869 // method.
aoqi@1 3870 {
aoqi@1 3871 Label L;
aoqi@1 3872 ///__ testl(ebx, ebx);
aoqi@1 3873 ///__ jcc(Assembler::notZero, L);
aoqi@1 3874 __ bne(Rmethod, R0, L);
aoqi@1 3875 __ delayed()->nop();
aoqi@1 3876
aoqi@1 3877 // throw exception
aoqi@1 3878 // note: must restore interpreter registers to canonical
aoqi@1 3879 // state for exception handling to work correctly!
aoqi@1 3880 ///__ popl(ebx); // pop return address (pushed by prepare_invoke)
aoqi@1 3881 //__ restore_bcp(); // esi must be correct for exception handler
aoqi@1 3882 //(was destroyed)
aoqi@1 3883 //__ restore_locals(); // make sure locals pointer
aoqi@1 3884 //is correct as well (was destroyed)
aoqi@1 3885 ///__ call_VM(noreg, CAST_FROM_FN_PTR(address,
aoqi@1 3886 //InterpreterRuntime::throw_AbstractMethodError));
aoqi@1 3887 __ call_VM(NOREG, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
aoqi@1 3888 // the call_VM checks for exception, so we should never return here.
aoqi@1 3889 __ should_not_reach_here();
aoqi@1 3890 __ bind(L);
aoqi@1 3891 }
aoqi@1 3892
aoqi@1 3893 // 2014/11/24 Fu
aoqi@1 3894 // T8: tmp, used for mdp
aoqi@1 3895 // Rmethod: callee
aoqi@1 3896 // T9: tmp
aoqi@1 3897 // is_virtual: true
aoqi@1 3898 __ profile_arguments_type(T8, Rmethod, T9, true);
aoqi@1 3899
aoqi@1 3900 __ jump_from_interpreted(Rmethod, T9);
aoqi@1 3901 }
aoqi@1 3902
aoqi@1 3903 void TemplateTable::invokehandle(int byte_no) {
aoqi@1 3904 transition(vtos, vtos);
aoqi@1 3905 assert(byte_no == f1_byte, "use this argument");
aoqi@1 3906 const Register T2_method = Rmethod;
aoqi@1 3907 const Register FSR_mtype = FSR;
aoqi@1 3908 const Register T3_recv = T3;
aoqi@1 3909
aoqi@1 3910 if (!EnableInvokeDynamic) {
aoqi@1 3911 // rewriter does not generate this bytecode
aoqi@1 3912 __ should_not_reach_here();
aoqi@1 3913 return;
aoqi@1 3914 }
aoqi@1 3915
aoqi@1 3916 prepare_invoke(byte_no, T2_method, FSR_mtype, T3_recv);
aoqi@1 3917 //??__ verify_method_ptr(T2_method);
aoqi@1 3918 __ verify_oop(T3_recv);
aoqi@1 3919 __ null_check(T3_recv);
aoqi@1 3920
aoqi@1 3921 // rax: MethodType object (from cpool->resolved_references[f1], if necessary)
aoqi@1 3922 // rbx: MH.invokeExact_MT method (from f2)
aoqi@1 3923
aoqi@1 3924 // Note: rax_mtype is already pushed (if necessary) by prepare_invoke
aoqi@1 3925
aoqi@1 3926 // FIXME: profile the LambdaForm also
aoqi@1 3927 __ profile_final_call(T9);
aoqi@1 3928
aoqi@1 3929 // 2014/11/24 Fu
aoqi@1 3930 // T8: tmp, used for mdp
aoqi@1 3931 // T2_method: callee
aoqi@1 3932 // T9: tmp
aoqi@1 3933 // is_virtual: true
aoqi@1 3934 __ profile_arguments_type(T8, T2_method, T9, true);
aoqi@1 3935
aoqi@1 3936 __ jump_from_interpreted(T2_method, T9);
aoqi@1 3937 }
aoqi@1 3938
aoqi@1 3939 void TemplateTable::invokedynamic(int byte_no) {
aoqi@1 3940 transition(vtos, vtos);
aoqi@1 3941 assert(byte_no == f1_byte, "use this argument");
aoqi@1 3942
aoqi@1 3943 if (!EnableInvokeDynamic) {
aoqi@1 3944 // We should not encounter this bytecode if !EnableInvokeDynamic.
aoqi@1 3945 // The verifier will stop it. However, if we get past the verifier,
aoqi@1 3946 // this will stop the thread in a reasonable way, without crashing the JVM.
aoqi@1 3947 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
aoqi@1 3948 InterpreterRuntime::throw_IncompatibleClassChangeError));
aoqi@1 3949 // the call_VM checks for exception, so we should never return here.
aoqi@1 3950 __ should_not_reach_here();
aoqi@1 3951 return;
aoqi@1 3952 }
aoqi@1 3953
aoqi@1 3954 //const Register Rmethod = T2;
aoqi@1 3955 const Register T2_callsite = T2;
aoqi@1 3956
aoqi@1 3957 prepare_invoke(byte_no, Rmethod, T2_callsite);
aoqi@1 3958
aoqi@1 3959 // rax: CallSite object (from cpool->resolved_references[f1])
aoqi@1 3960 // rbx: MH.linkToCallSite method (from f2)
aoqi@1 3961
aoqi@1 3962 // Note: rax_callsite is already pushed by prepare_invoke
aoqi@1 3963 // %%% should make a type profile for any invokedynamic that takes a ref argument
aoqi@1 3964 // profile this call
aoqi@1 3965 __ profile_call(T9);
aoqi@1 3966
aoqi@1 3967 // 2014/11/24 Fu
aoqi@1 3968 // T8: tmp, used for mdp
aoqi@1 3969 // Rmethod: callee
aoqi@1 3970 // T9: tmp
aoqi@1 3971 // is_virtual: false
aoqi@1 3972 __ profile_arguments_type(T8, Rmethod, T9, false);
aoqi@1 3973
aoqi@1 3974 __ verify_oop(T2_callsite);
aoqi@1 3975
aoqi@1 3976 __ jump_from_interpreted(Rmethod, T9);
aoqi@1 3977 }
aoqi@1 3978
aoqi@1 3979 //----------------------------------------------------------------------------------------------------
aoqi@1 3980 // Allocation
aoqi@1 3981 // T1 : tags & buffer end & thread
aoqi@1 3982 // T2 : object end
aoqi@1 3983 // T3 : klass
aoqi@1 3984 // T1 : object size
aoqi@1 3985 // A1 : cpool
aoqi@1 3986 // A2 : cp index
aoqi@1 3987 // return object in FSR
aoqi@1 3988 void TemplateTable::_new() {
aoqi@1 3989 transition(vtos, atos);
aoqi@16 3990 __ get_2_byte_integer_at_bcp(A2, AT, 1);
aoqi@1 3991 __ huswap(A2);
aoqi@1 3992
aoqi@1 3993 Label slow_case;
aoqi@1 3994 Label done;
aoqi@1 3995 Label initialize_header;
aoqi@1 3996 Label initialize_object; // including clearing the fields
aoqi@1 3997 Label allocate_shared;
aoqi@1 3998
aoqi@1 3999 // get InstanceKlass in T3
aoqi@1 4000 __ get_cpool_and_tags(A1, T1);
aoqi@1 4001 __ dsll(AT, A2, Address::times_8);
aoqi@1 4002 __ dadd(AT, A1, AT);
aoqi@1 4003 __ ld(T3, AT, sizeof(ConstantPool));
aoqi@1 4004
aoqi@1 4005 // make sure the class we're about to instantiate has been resolved.
aoqi@1 4006 // Note: slow_case does a pop of stack, which is why we loaded class/pushed above
aoqi@1 4007 const int tags_offset = Array<u1>::base_offset_in_bytes();
aoqi@1 4008 __ dadd(T1, T1, A2);
aoqi@1 4009 __ lb(AT, T1, tags_offset);
aoqi@1 4010 //__ addiu(AT, AT, - (int)JVM_CONSTANT_UnresolvedClass);
aoqi@1 4011 __ daddiu(AT, AT, - (int)JVM_CONSTANT_Class);
aoqi@1 4012 //__ beq(AT, R0, slow_case);
aoqi@1 4013 __ bne(AT, R0, slow_case);
aoqi@1 4014 __ delayed()->nop();
aoqi@1 4015
aoqi@1 4016 /*make sure klass is initialized & doesn't have finalizer*/
aoqi@1 4017
aoqi@1 4018 // make sure klass is fully initialized
Jin@2 4019 __ lhu(T1, T3, in_bytes(InstanceKlass::init_state_offset()));
aoqi@1 4020 __ daddiu(AT, T1, - (int)InstanceKlass::fully_initialized);
aoqi@1 4021 __ bne(AT, R0, slow_case);
aoqi@1 4022 __ delayed()->nop();
aoqi@1 4023
aoqi@1 4024 // has_finalizer
aoqi@1 4025 //__ lw(T1, T3, Klass::access_flags_offset() + sizeof(oopDesc));
aoqi@1 4026 //__ move(AT, JVM_ACC_CAN_BE_FASTPATH_ALLOCATED);
aoqi@1 4027 //__ andr(AT, T1, AT);
aoqi@1 4028 __ lw(T1, T3, in_bytes(Klass::layout_helper_offset()) );
aoqi@1 4029 __ andi(AT, T1, Klass::_lh_instance_slow_path_bit);
aoqi@1 4030 __ bne(AT, R0, slow_case);
aoqi@1 4031 __ delayed()->nop();
aoqi@1 4032
aoqi@1 4033 // get instance_size in InstanceKlass (already aligned) in T0,
aoqi@1 4034 // be sure to preserve this value
aoqi@1 4035 //__ lw(T0, T3, Klass::size_helper_offset_in_bytes() + sizeof(oopDesc));
aoqi@1 4036 //Klass::_size_helper is renamed Klass::_layout_helper. aoqi
aoqi@1 4037 __ lw(T0, T3, in_bytes(Klass::layout_helper_offset()) );
aoqi@1 4038
aoqi@1 4039 //
aoqi@1 4040 // Allocate the instance
aoqi@1 4041 // 1) Try to allocate in the TLAB
aoqi@1 4042 // 2) if fail and the object is large allocate in the shared Eden
aoqi@1 4043 // 3) if the above fails (or is not applicable), go to a slow case
aoqi@1 4044 // (creates a new TLAB, etc.)
aoqi@1 4045
aoqi@1 4046 const bool allow_shared_alloc =
aoqi@1 4047 Universe::heap()->supports_inline_contig_alloc() && !CMSIncrementalMode;
aoqi@1 4048
aoqi@1 4049 if (UseTLAB) {
aoqi@1 4050 #ifndef OPT_THREAD
aoqi@1 4051 const Register thread = T8;
aoqi@1 4052 __ get_thread(thread);
aoqi@1 4053 #else
aoqi@1 4054 const Register thread = TREG;
aoqi@1 4055 #endif
aoqi@1 4056 // get tlab_top
aoqi@1 4057 __ ld(FSR, thread, in_bytes(JavaThread::tlab_top_offset()));
aoqi@1 4058 __ dadd(T2, FSR, T0);
aoqi@1 4059 // get tlab_end
aoqi@1 4060 __ ld(AT, thread, in_bytes(JavaThread::tlab_end_offset()));
aoqi@1 4061 __ slt(AT, AT, T2);
aoqi@1 4062 // __ bne(AT, R0, allocate_shared);
aoqi@1 4063 __ bne(AT, R0, allow_shared_alloc ? allocate_shared : slow_case);
aoqi@1 4064 __ delayed()->nop();
aoqi@1 4065 __ sd(T2, thread, in_bytes(JavaThread::tlab_top_offset()));
aoqi@1 4066
aoqi@1 4067 if (ZeroTLAB) {
aoqi@1 4068 // the fields have been already cleared
aoqi@1 4069 __ b_far(initialize_header);
aoqi@1 4070 } else {
aoqi@1 4071 // initialize both the header and fields
aoqi@1 4072 __ b_far(initialize_object);
aoqi@1 4073 }
aoqi@1 4074 __ delayed()->nop();
aoqi@1 4075 /*
aoqi@1 4076
aoqi@1 4077 if (CMSIncrementalMode) {
aoqi@1 4078 // No allocation in shared eden.
aoqi@1 4079 ///__ jmp(slow_case);
aoqi@1 4080 __ b(slow_case);
aoqi@1 4081 __ delayed()->nop();
aoqi@1 4082 }
aoqi@1 4083 */
aoqi@1 4084 }
aoqi@1 4085
aoqi@1 4086 // Allocation in the shared Eden , if allowed
aoqi@1 4087 // T0 : instance size in words
aoqi@1 4088 if(allow_shared_alloc){
aoqi@1 4089 __ bind(allocate_shared);
aoqi@1 4090 Label retry;
aoqi@1 4091 //Address heap_top(T1, (int)Universe::heap()->top_addr());
aoqi@1 4092 Address heap_top(T1);
aoqi@1 4093 //__ lui(T1, Assembler::split_high((int)Universe::heap()->top_addr()));
aoqi@1 4094 __ li(T1, (long)Universe::heap()->top_addr());
aoqi@1 4095
aoqi@1 4096 __ ld(FSR, heap_top);
aoqi@1 4097 __ bind(retry);
aoqi@1 4098 __ dadd(T2, FSR, T0);
aoqi@1 4099 //__ lui(AT, Assembler::split_high((int)Universe::heap()->end_addr()));
aoqi@1 4100 //__ lw(AT, AT, Assembler::split_low((int)Universe::heap()->end_addr()));
aoqi@1 4101 __ li(AT, (long)Universe::heap()->end_addr());
aoqi@1 4102 __ ld(AT, AT, 0);
aoqi@1 4103 __ slt(AT, AT, T2);
aoqi@1 4104 __ bne(AT, R0, slow_case);
aoqi@1 4105 __ delayed()->nop();
aoqi@1 4106
aoqi@1 4107 // Compare FSR with the top addr, and if still equal, store the new
aoqi@1 4108 // top addr in ebx at the address of the top addr pointer. Sets ZF if was
aoqi@1 4109 // equal, and clears it otherwise. Use lock prefix for atomicity on MPs.
aoqi@1 4110 //
aoqi@1 4111 // FSR: object begin
aoqi@1 4112 // T2: object end
aoqi@1 4113 // T0: instance size in words
aoqi@1 4114
aoqi@1 4115 // if someone beat us on the allocation, try again, otherwise continue
aoqi@1 4116 //__ lui(T1, Assembler::split_high((int)Universe::heap()->top_addr()));
aoqi@1 4117 __ cmpxchg(T2, heap_top, FSR);
aoqi@1 4118 __ beq(AT, R0, retry);
aoqi@1 4119 __ delayed()->nop();
aoqi@1 4120 }
aoqi@1 4121
aoqi@1 4122 if (UseTLAB || Universe::heap()->supports_inline_contig_alloc()) {
aoqi@1 4123 // The object is initialized before the header. If the object size is
aoqi@1 4124 // zero, go directly to the header initialization.
aoqi@1 4125 __ bind(initialize_object);
aoqi@1 4126 __ li(AT, - sizeof(oopDesc));
aoqi@1 4127 __ daddu(T0, T0, AT);
aoqi@1 4128 __ beq_far(T0, R0, initialize_header);
aoqi@1 4129 __ delayed()->nop();
aoqi@1 4130
aoqi@1 4131
aoqi@1 4132 // T0 must have been multiple of 2
aoqi@1 4133 #ifdef ASSERT
aoqi@1 4134 // make sure T0 was multiple of 2
aoqi@1 4135 Label L;
aoqi@1 4136 __ andi(AT, T0, 1);
aoqi@1 4137 __ beq(AT, R0, L);
aoqi@1 4138 __ delayed()->nop();
aoqi@1 4139 __ stop("object size is not multiple of 2 - adjust this code");
aoqi@1 4140 __ bind(L);
aoqi@1 4141 // edx must be > 0, no extra check needed here
aoqi@1 4142 #endif
aoqi@1 4143
aoqi@1 4144 // initialize remaining object fields: T0 is a multiple of 2
aoqi@1 4145 {
aoqi@1 4146 Label loop;
aoqi@1 4147 __ dadd(T1, FSR, T0);
aoqi@1 4148 __ daddi(T1, T1, -oopSize);
aoqi@1 4149
aoqi@1 4150 __ bind(loop);
aoqi@1 4151 __ sd(R0, T1, sizeof(oopDesc) + 0 * oopSize);
aoqi@1 4152 // __ sd(R0, T1, sizeof(oopDesc) + 1 * oopSize);
aoqi@1 4153 __ bne(T1, FSR, loop); //dont clear header
aoqi@1 4154 __ delayed()->daddi(T1, T1, -oopSize);
aoqi@1 4155 // actually sizeof(oopDesc)==8, so we can move
aoqi@1 4156 // __ addiu(AT, AT, -8) to delay slot, and compare FSR with T1
aoqi@1 4157 }
aoqi@1 4158 //klass in T3,
aoqi@1 4159 // initialize object header only.
aoqi@1 4160 __ bind(initialize_header);
aoqi@1 4161 if (UseBiasedLocking) {
aoqi@1 4162 // __ popl(ecx); // get saved klass back in the register.
aoqi@1 4163 // __ movl(ebx, Address(ecx, Klass::prototype_header_offset_in_bytes()
aoqi@1 4164 // + klassOopDesc::klass_part_offset_in_bytes()));
aoqi@1 4165 __ ld(AT, T3, in_bytes(Klass::prototype_header_offset()));
aoqi@1 4166 // __ movl(Address(eax, oopDesc::mark_offset_in_bytes ()), ebx);
aoqi@1 4167 __ sd(AT, FSR, oopDesc::mark_offset_in_bytes ());
aoqi@1 4168 } else {
aoqi@1 4169 __ li(AT, (long)markOopDesc::prototype());
aoqi@1 4170 __ sd(AT, FSR, oopDesc::mark_offset_in_bytes());
aoqi@1 4171 }
aoqi@1 4172
aoqi@1 4173 //__ sd(T3, FSR, oopDesc::klass_offset_in_bytes());
aoqi@1 4174 __ store_klass_gap(FSR, R0);
aoqi@1 4175 __ store_klass(FSR, T3);
aoqi@1 4176
aoqi@1 4177 {
aoqi@1 4178 SkipIfEqual skip_if(_masm, &DTraceAllocProbes, 0);
aoqi@1 4179 // Trigger dtrace event for fastpath
aoqi@1 4180 __ push(atos);
aoqi@1 4181 __ call_VM_leaf(
aoqi@1 4182 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), FSR);
aoqi@1 4183 __ pop(atos);
aoqi@1 4184 }
aoqi@1 4185 __ b(done);
aoqi@1 4186 __ delayed()->nop();
aoqi@1 4187 }
aoqi@1 4188 // slow case
aoqi@1 4189 __ bind(slow_case);
aoqi@1 4190 call_VM(FSR, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), A1, A2);
aoqi@1 4191
aoqi@1 4192 // continue
aoqi@1 4193 __ bind(done);
fujie@32 4194 __ sync();
aoqi@1 4195 }
aoqi@1 4196
aoqi@1 4197 void TemplateTable::newarray() {
aoqi@1 4198 transition(itos, atos);
aoqi@1 4199 __ lbu(A1, at_bcp(1));
aoqi@1 4200 //type, count
aoqi@1 4201 call_VM(FSR, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray), A1, FSR);
fujie@32 4202 __ sync();
aoqi@1 4203 }
aoqi@1 4204
aoqi@1 4205 void TemplateTable::anewarray() {
aoqi@1 4206 transition(itos, atos);
aoqi@16 4207 __ get_2_byte_integer_at_bcp(A2, AT, 1);
aoqi@1 4208 __ huswap(A2);
aoqi@1 4209 __ get_constant_pool(A1);
aoqi@1 4210 // cp, index, count
aoqi@1 4211 call_VM(FSR, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray), A1, A2, FSR);
fujie@32 4212 __ sync();
aoqi@1 4213 }
aoqi@1 4214
aoqi@1 4215 void TemplateTable::arraylength() {
aoqi@1 4216 transition(atos, itos);
aoqi@1 4217 __ null_check(FSR, arrayOopDesc::length_offset_in_bytes());
aoqi@1 4218 __ lw(FSR, FSR, arrayOopDesc::length_offset_in_bytes());
aoqi@1 4219 }
aoqi@1 4220
aoqi@1 4221 // i use T2 as ebx, T3 as ecx, T1 as edx
aoqi@1 4222 // when invoke gen_subtype_check, super in T3, sub in T2, object in FSR(it's always)
aoqi@1 4223 // T2 : sub klass
aoqi@1 4224 // T3 : cpool
aoqi@1 4225 // T3 : super klass
aoqi@1 4226 void TemplateTable::checkcast() {
aoqi@1 4227 transition(atos, atos);
aoqi@1 4228 Label done, is_null, ok_is_subtype, quicked, resolved;
aoqi@1 4229 __ beq(FSR, R0, is_null);
aoqi@1 4230 __ delayed()->nop();
aoqi@1 4231
aoqi@1 4232 // Get cpool & tags index
aoqi@1 4233 __ get_cpool_and_tags(T3, T1);
aoqi@16 4234 __ get_2_byte_integer_at_bcp(T2, AT, 1);
aoqi@1 4235 __ huswap(T2);
aoqi@1 4236
aoqi@1 4237 // See if bytecode has already been quicked
aoqi@1 4238 __ dadd(AT, T1, T2);
aoqi@1 4239 __ lb(AT, AT, Array<u1>::base_offset_in_bytes());
aoqi@1 4240 __ daddiu(AT, AT, - (int)JVM_CONSTANT_Class);
aoqi@1 4241 __ beq(AT, R0, quicked);
aoqi@1 4242 __ delayed()->nop();
aoqi@1 4243
aoqi@1 4244 /* 2012/6/2 Jin: In InterpreterRuntime::quicken_io_cc, lots of new classes may be loaded.
aoqi@1 4245 * Then, GC will move the object in V0 to another places in heap.
aoqi@1 4246 * Therefore, We should never save such an object in register.
aoqi@1 4247 * Instead, we should save it in the stack. It can be modified automatically by the GC thread.
aoqi@1 4248 * After GC, the object address in FSR is changed to a new place.
aoqi@1 4249 */
aoqi@1 4250 __ push(atos);
aoqi@1 4251 const Register thread = TREG;
aoqi@1 4252 #ifndef OPT_THREAD
aoqi@1 4253 __ get_thread(thread);
aoqi@1 4254 #endif
aoqi@1 4255 call_VM(NOREG, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
aoqi@1 4256 __ get_vm_result_2(T3, thread);
aoqi@1 4257 __ pop_ptr(FSR);
aoqi@1 4258 __ b(resolved);
aoqi@1 4259 __ delayed()->nop();
aoqi@1 4260
aoqi@1 4261 // klass already in cp, get superklass in T3
aoqi@1 4262 __ bind(quicked);
aoqi@1 4263 __ dsll(AT, T2, Address::times_8);
aoqi@1 4264 __ dadd(AT, T3, AT);
aoqi@1 4265 __ ld(T3, AT, sizeof(ConstantPool));
aoqi@1 4266
aoqi@1 4267 __ bind(resolved);
aoqi@1 4268
aoqi@1 4269 // get subklass in T2
aoqi@1 4270 //__ ld(T2, FSR, oopDesc::klass_offset_in_bytes());
aoqi@1 4271 //add for compressedoops
aoqi@1 4272 __ load_klass(T2, FSR);
aoqi@1 4273 // Superklass in T3. Subklass in T2.
aoqi@1 4274 __ gen_subtype_check(T3, T2, ok_is_subtype);
aoqi@1 4275
aoqi@1 4276 // Come here on failure
aoqi@1 4277 // object is at FSR
aoqi@1 4278 __ jmp(Interpreter::_throw_ClassCastException_entry);
aoqi@1 4279 __ delayed()->nop();
aoqi@1 4280
aoqi@1 4281 // Come here on success
aoqi@1 4282 __ bind(ok_is_subtype);
aoqi@1 4283
aoqi@1 4284 // Collect counts on whether this check-cast sees NULLs a lot or not.
aoqi@1 4285 if (ProfileInterpreter) {
aoqi@1 4286 __ b(done);
aoqi@1 4287 __ delayed()->nop();
aoqi@1 4288 __ bind(is_null);
aoqi@1 4289 __ profile_null_seen(T3);
aoqi@1 4290 } else {
aoqi@1 4291 __ bind(is_null);
aoqi@1 4292 }
aoqi@1 4293 __ bind(done);
aoqi@1 4294 }
aoqi@1 4295
aoqi@1 4296 // i use T3 as cpool, T1 as tags, T2 as index
aoqi@1 4297 // object always in FSR, superklass in T3, subklass in T2
aoqi@1 4298 void TemplateTable::instanceof() {
aoqi@1 4299 transition(atos, itos);
aoqi@1 4300 Label done, is_null, ok_is_subtype, quicked, resolved;
aoqi@1 4301
aoqi@1 4302 __ beq(FSR, R0, is_null);
aoqi@1 4303 __ delayed()->nop();
aoqi@1 4304
aoqi@1 4305 // Get cpool & tags index
aoqi@1 4306 __ get_cpool_and_tags(T3, T1);
aoqi@1 4307 // get index
aoqi@16 4308 __ get_2_byte_integer_at_bcp(T2, AT, 1);
aoqi@1 4309 __ hswap(T2);
aoqi@1 4310
aoqi@1 4311 // See if bytecode has already been quicked
aoqi@1 4312 // quicked
aoqi@1 4313 __ daddu(AT, T1, T2);
aoqi@1 4314 __ lb(AT, AT, Array<u1>::base_offset_in_bytes());
aoqi@1 4315 __ daddiu(AT, AT, - (int)JVM_CONSTANT_Class);
aoqi@1 4316 __ beq(AT, R0, quicked);
aoqi@1 4317 __ delayed()->nop();
aoqi@1 4318
aoqi@1 4319 // get superklass in T3
aoqi@1 4320 //__ move(TSR, FSR);
aoqi@1 4321 // sometimes S2 may be changed during the call,
aoqi@1 4322 // be careful if u use TSR as a saving place
aoqi@1 4323 //__ push(FSR);
aoqi@1 4324 __ push(atos);
aoqi@1 4325 const Register thread = TREG;
aoqi@1 4326 #ifndef OPT_THREAD
aoqi@1 4327 __ get_thread(thread);
aoqi@1 4328 #endif
aoqi@1 4329 call_VM(NOREG, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
aoqi@1 4330 __ get_vm_result_2(T3, thread);
aoqi@1 4331 //__ lw(FSR, SP, 0);
aoqi@1 4332 __ pop_ptr(FSR);
aoqi@1 4333 __ b(resolved);
aoqi@1 4334 __ delayed()->nop();
aoqi@1 4335 //__ move(FSR, TSR);
aoqi@1 4336
aoqi@1 4337 // get superklass in T3, subklass in T2
aoqi@1 4338 __ bind(quicked);
aoqi@1 4339 __ dsll(AT, T2, Address::times_8);
aoqi@1 4340 __ daddu(AT, T3, AT);
aoqi@1 4341 __ ld(T3, AT, sizeof(ConstantPool));
aoqi@1 4342
aoqi@1 4343 __ bind(resolved);
aoqi@1 4344 // get subklass in T2
aoqi@1 4345 //__ ld(T2, FSR, oopDesc::klass_offset_in_bytes());
aoqi@1 4346 //add for compressedoops
aoqi@1 4347 __ load_klass(T2, FSR);
aoqi@1 4348
aoqi@1 4349 // Superklass in T3. Subklass in T2.
aoqi@1 4350 __ gen_subtype_check(T3, T2, ok_is_subtype);
aoqi@1 4351 // Come here on failure
aoqi@1 4352 __ b(done);
aoqi@1 4353 __ delayed(); __ move(FSR, R0);
aoqi@1 4354
aoqi@1 4355 // Come here on success
aoqi@1 4356 __ bind(ok_is_subtype);
aoqi@1 4357 __ move(FSR, 1);
aoqi@1 4358
aoqi@1 4359 // Collect counts on whether this test sees NULLs a lot or not.
aoqi@1 4360 if (ProfileInterpreter) {
aoqi@1 4361 __ beq(R0, R0, done);
aoqi@1 4362 __ nop();
aoqi@1 4363 __ bind(is_null);
aoqi@1 4364 __ profile_null_seen(T3);
aoqi@1 4365 } else {
aoqi@1 4366 __ bind(is_null); // same as 'done'
aoqi@1 4367 }
aoqi@1 4368 __ bind(done);
aoqi@1 4369 // FSR = 0: obj == NULL or obj is not an instanceof the specified klass
aoqi@1 4370 // FSR = 1: obj != NULL and obj is an instanceof the specified klass
aoqi@1 4371 }
aoqi@1 4372
aoqi@1 4373 //--------------------------------------------------------
aoqi@1 4374 //--------------------------------------------
aoqi@1 4375 // Breakpoints
aoqi@1 4376 void TemplateTable::_breakpoint() {
aoqi@1 4377
aoqi@1 4378 // Note: We get here even if we are single stepping..
aoqi@1 4379 // jbug inists on setting breakpoints at every bytecode
aoqi@1 4380 // even if we are in single step mode.
aoqi@1 4381
aoqi@1 4382 transition(vtos, vtos);
aoqi@1 4383
aoqi@1 4384 // get the unpatched byte code
aoqi@1 4385 ///__ get_method(ecx);
aoqi@1 4386 ///__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::get_original_bytecode_at)
aoqi@1 4387 //, ecx, esi);
aoqi@1 4388 ///__ movl(ebx, eax);
aoqi@1 4389 __ get_method(A1);
aoqi@1 4390 __ call_VM(NOREG, CAST_FROM_FN_PTR(address, InterpreterRuntime::get_original_bytecode_at),
aoqi@1 4391 A1, BCP);
aoqi@1 4392 __ move(Rnext, V0); // Jin: Rnext will be used in dispatch_only_normal
aoqi@1 4393
aoqi@1 4394 // post the breakpoint event
aoqi@1 4395 ///__ get_method(ecx);
aoqi@1 4396 ///__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint), ecx, esi);
aoqi@1 4397 __ get_method(A1);
aoqi@1 4398 __ call_VM(NOREG, CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint), A1, BCP);
aoqi@1 4399
aoqi@1 4400 // complete the execution of original bytecode
aoqi@1 4401 __ dispatch_only_normal(vtos);
aoqi@1 4402 }
aoqi@1 4403
aoqi@1 4404 //----------------------------------------------------------------------------------------------------
aoqi@1 4405 // Exceptions
aoqi@1 4406
aoqi@1 4407 void TemplateTable::athrow() {
aoqi@1 4408 transition(atos, vtos);
aoqi@1 4409 __ null_check(FSR);
aoqi@1 4410 __ jmp(Interpreter::throw_exception_entry());
aoqi@1 4411 __ delayed()->nop();
aoqi@1 4412 }
aoqi@1 4413
aoqi@1 4414 //----------------------------------------------------------------------------------------------------
aoqi@1 4415 // Synchronization
aoqi@1 4416 //
aoqi@1 4417 // Note: monitorenter & exit are symmetric routines; which is reflected
aoqi@1 4418 // in the assembly code structure as well
aoqi@1 4419 //
aoqi@1 4420 // Stack layout:
aoqi@1 4421 //
aoqi@1 4422 // [expressions ] <--- SP = expression stack top
aoqi@1 4423 // ..
aoqi@1 4424 // [expressions ]
aoqi@1 4425 // [monitor entry] <--- monitor block top = expression stack bot
aoqi@1 4426 // ..
aoqi@1 4427 // [monitor entry]
aoqi@1 4428 // [frame data ] <--- monitor block bot
aoqi@1 4429 // ...
aoqi@1 4430 // [return addr ] <--- FP
aoqi@1 4431
aoqi@1 4432 // we use T2 as monitor entry pointer, T3 as monitor top pointer, c_rarg0 as free slot pointer
aoqi@1 4433 // object always in FSR
aoqi@1 4434 void TemplateTable::monitorenter() {
aoqi@1 4435 transition(atos, vtos);
aoqi@1 4436 // check for NULL object
aoqi@1 4437 __ null_check(FSR);
aoqi@1 4438
aoqi@1 4439 const Address monitor_block_top(FP, frame::interpreter_frame_monitor_block_top_offset
aoqi@1 4440 * wordSize);
aoqi@1 4441 const int entry_size = (frame::interpreter_frame_monitor_size()* wordSize);
aoqi@1 4442 Label allocated;
aoqi@1 4443
aoqi@1 4444 // initialize entry pointer
aoqi@1 4445 __ move(c_rarg0, R0);
aoqi@1 4446
aoqi@1 4447 // find a free slot in the monitor block (result in edx)
aoqi@1 4448 {
aoqi@1 4449 Label entry, loop, exit, next;
aoqi@1 4450 __ ld(T2, monitor_block_top);
aoqi@1 4451 __ b(entry);
aoqi@1 4452 __ delayed()->daddi(T3, FP, frame::interpreter_frame_initial_sp_offset * wordSize);
aoqi@1 4453
aoqi@1 4454 // free slot?
aoqi@1 4455 __ bind(loop);
aoqi@1 4456 __ ld(AT, T2, BasicObjectLock::obj_offset_in_bytes());
aoqi@1 4457 __ bne(AT, R0, next);
aoqi@1 4458 __ delayed()->nop();
aoqi@1 4459 __ move(c_rarg0, T2);
aoqi@1 4460
aoqi@1 4461 __ bind(next);
aoqi@1 4462 __ beq(FSR, AT, exit);
aoqi@1 4463 __ delayed()->nop();
aoqi@1 4464 __ daddi(T2, T2, entry_size);
aoqi@1 4465
aoqi@1 4466 __ bind(entry);
aoqi@1 4467 __ bne(T3, T2, loop);
aoqi@1 4468 __ delayed()->nop();
aoqi@1 4469 __ bind(exit);
aoqi@1 4470 }
aoqi@1 4471
aoqi@1 4472 __ bne(c_rarg0, R0, allocated);
aoqi@1 4473 __ delayed()->nop();
aoqi@1 4474
aoqi@1 4475 // allocate one if there's no free slot
aoqi@1 4476 {
aoqi@1 4477 Label entry, loop;
aoqi@1 4478 // 1. compute new pointers // SP: old expression stack top
aoqi@1 4479 __ ld(c_rarg0, monitor_block_top);
aoqi@1 4480 __ daddi(SP, SP, - entry_size);
aoqi@1 4481 __ daddi(c_rarg0, c_rarg0, - entry_size);
aoqi@1 4482 __ sd(c_rarg0, monitor_block_top);
aoqi@1 4483 __ b(entry);
aoqi@1 4484 __ delayed(); __ move(T3, SP);
aoqi@1 4485
aoqi@1 4486 // 2. move expression stack contents
aoqi@1 4487 __ bind(loop);
aoqi@1 4488 __ ld(AT, T3, entry_size);
aoqi@1 4489 __ sd(AT, T3, 0);
aoqi@1 4490 __ daddi(T3, T3, wordSize);
aoqi@1 4491 __ bind(entry);
aoqi@1 4492 __ bne(T3, c_rarg0, loop);
aoqi@1 4493 __ delayed()->nop();
aoqi@1 4494 }
aoqi@1 4495
aoqi@1 4496 __ bind(allocated);
aoqi@1 4497 // Increment bcp to point to the next bytecode,
aoqi@1 4498 // so exception handling for async. exceptions work correctly.
aoqi@1 4499 // The object has already been poped from the stack, so the
aoqi@1 4500 // expression stack looks correct.
aoqi@1 4501 __ daddi(BCP, BCP, 1);
aoqi@1 4502 __ sd(FSR, c_rarg0, BasicObjectLock::obj_offset_in_bytes());
aoqi@1 4503 __ lock_object(c_rarg0);
aoqi@1 4504 // check to make sure this monitor doesn't cause stack overflow after locking
aoqi@1 4505 __ save_bcp(); // in case of exception
aoqi@1 4506 __ generate_stack_overflow_check(0);
aoqi@1 4507 // The bcp has already been incremented. Just need to dispatch to next instruction.
aoqi@1 4508
aoqi@1 4509 __ dispatch_next(vtos);
aoqi@1 4510 }
aoqi@1 4511
aoqi@1 4512 // T2 : top
aoqi@1 4513 // c_rarg0 : entry
aoqi@1 4514 void TemplateTable::monitorexit() {
aoqi@1 4515 transition(atos, vtos);
aoqi@1 4516
aoqi@1 4517 __ null_check(FSR);
aoqi@1 4518
aoqi@1 4519 const int entry_size =(frame::interpreter_frame_monitor_size()* wordSize);
aoqi@1 4520 Label found;
aoqi@1 4521
aoqi@1 4522 // find matching slot
aoqi@1 4523 {
aoqi@1 4524 Label entry, loop;
aoqi@1 4525 __ ld(c_rarg0, FP, frame::interpreter_frame_monitor_block_top_offset * wordSize);
aoqi@1 4526 __ b(entry);
aoqi@1 4527 __ delayed()->daddiu(T2, FP, frame::interpreter_frame_initial_sp_offset * wordSize);
aoqi@1 4528
aoqi@1 4529 __ bind(loop);
aoqi@1 4530 __ ld(AT, c_rarg0, BasicObjectLock::obj_offset_in_bytes());
aoqi@1 4531 __ beq(FSR, AT, found);
aoqi@1 4532 __ delayed()->nop();
aoqi@1 4533 __ daddiu(c_rarg0, c_rarg0, entry_size);
aoqi@1 4534 __ bind(entry);
aoqi@1 4535 __ bne(T2, c_rarg0, loop);
aoqi@1 4536 __ delayed()->nop();
aoqi@1 4537 }
aoqi@1 4538
aoqi@1 4539 // error handling. Unlocking was not block-structured
aoqi@1 4540 Label end;
aoqi@1 4541 __ call_VM(NOREG, CAST_FROM_FN_PTR(address,
aoqi@1 4542 InterpreterRuntime::throw_illegal_monitor_state_exception));
aoqi@1 4543 __ should_not_reach_here();
aoqi@1 4544
aoqi@1 4545 // call run-time routine
aoqi@1 4546 // c_rarg0: points to monitor entry
aoqi@1 4547 __ bind(found);
aoqi@1 4548 __ move(TSR, FSR);
aoqi@1 4549 __ unlock_object(c_rarg0);
aoqi@1 4550 __ move(FSR, TSR);
aoqi@1 4551 __ bind(end);
aoqi@1 4552 }
aoqi@1 4553
aoqi@1 4554 //--------------------------------------------------------------------------------------------------// Wide instructions
aoqi@1 4555
aoqi@1 4556 void TemplateTable::wide() {
aoqi@1 4557 transition(vtos, vtos);
aoqi@1 4558 // Note: the esi increment step is part of the individual wide bytecode implementations
aoqi@1 4559 __ lbu(Rnext, at_bcp(1));
aoqi@1 4560 __ dsll(T9, Rnext, Address::times_8);
aoqi@1 4561 __ li(AT, (long)Interpreter::_wentry_point);
aoqi@1 4562 __ dadd(AT, T9, AT);
aoqi@1 4563 __ ld(T9, AT, 0);
aoqi@1 4564 __ jr(T9);
aoqi@1 4565 __ delayed()->nop();
aoqi@1 4566 }
aoqi@1 4567
aoqi@1 4568 //--------------------------------------------------------------------------------------------------// Multi arrays
aoqi@1 4569
aoqi@1 4570 void TemplateTable::multianewarray() {
aoqi@1 4571 transition(vtos, atos);
aoqi@1 4572 // last dim is on top of stack; we want address of first one:
aoqi@1 4573 // first_addr = last_addr + (ndims - 1) * wordSize
aoqi@1 4574 __ lbu(A1, at_bcp(3)); // dimension
aoqi@1 4575 __ daddi(A1, A1, -1);
aoqi@1 4576 __ dsll(A1, A1, Address::times_8);
aoqi@1 4577 __ dadd(A1, SP, A1); // now A1 pointer to the count array on the stack
aoqi@1 4578 call_VM(FSR, CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), A1);
aoqi@1 4579 __ lbu(AT, at_bcp(3));
aoqi@1 4580 __ dsll(AT, AT, Address::times_8);
aoqi@1 4581 __ dadd(SP, SP, AT);
fujie@32 4582 __ sync();
aoqi@1 4583 }
aoqi@1 4584
aoqi@1 4585 #endif // !CC_INTERP

mercurial