src/cpu/sparc/vm/assembler_sparc.cpp

Fri, 20 Mar 2009 23:19:36 -0700

author
jrose
date
Fri, 20 Mar 2009 23:19:36 -0700
changeset 1100
c89f86385056
parent 1079
c517646eef23
child 1115
a80d48f6fde1
permissions
-rw-r--r--

6814659: separable cleanups and subroutines for 6655638
Summary: preparatory but separable changes for method handles
Reviewed-by: kvn, never

duke@435 1 /*
jrose@1100 2 * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
duke@435 19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
duke@435 20 * CA 95054 USA or visit www.sun.com if you need additional information or
duke@435 21 * have any questions.
duke@435 22 *
duke@435 23 */
duke@435 24
duke@435 25 #include "incls/_precompiled.incl"
duke@435 26 #include "incls/_assembler_sparc.cpp.incl"
duke@435 27
duke@435 28 // Implementation of Address
duke@435 29
duke@435 30 Address::Address( addr_type t, int which ) {
duke@435 31 switch (t) {
duke@435 32 case extra_in_argument:
duke@435 33 case extra_out_argument:
duke@435 34 _base = t == extra_in_argument ? FP : SP;
duke@435 35 _hi = 0;
duke@435 36 // Warning: In LP64 mode, _disp will occupy more than 10 bits.
duke@435 37 // This is inconsistent with the other constructors but op
duke@435 38 // codes such as ld or ldx, only access disp() to get their
duke@435 39 // simm13 argument.
duke@435 40 _disp = ((which - Argument::n_register_parameters + frame::memory_parameter_word_sp_offset) * BytesPerWord) + STACK_BIAS;
duke@435 41 break;
duke@435 42 default:
duke@435 43 ShouldNotReachHere();
duke@435 44 break;
duke@435 45 }
duke@435 46 }
duke@435 47
duke@435 48 static const char* argumentNames[][2] = {
duke@435 49 {"A0","P0"}, {"A1","P1"}, {"A2","P2"}, {"A3","P3"}, {"A4","P4"},
duke@435 50 {"A5","P5"}, {"A6","P6"}, {"A7","P7"}, {"A8","P8"}, {"A9","P9"},
duke@435 51 {"A(n>9)","P(n>9)"}
duke@435 52 };
duke@435 53
duke@435 54 const char* Argument::name() const {
duke@435 55 int nofArgs = sizeof argumentNames / sizeof argumentNames[0];
duke@435 56 int num = number();
duke@435 57 if (num >= nofArgs) num = nofArgs - 1;
duke@435 58 return argumentNames[num][is_in() ? 1 : 0];
duke@435 59 }
duke@435 60
duke@435 61 void Assembler::print_instruction(int inst) {
duke@435 62 const char* s;
duke@435 63 switch (inv_op(inst)) {
duke@435 64 default: s = "????"; break;
duke@435 65 case call_op: s = "call"; break;
duke@435 66 case branch_op:
duke@435 67 switch (inv_op2(inst)) {
duke@435 68 case bpr_op2: s = "bpr"; break;
duke@435 69 case fb_op2: s = "fb"; break;
duke@435 70 case fbp_op2: s = "fbp"; break;
duke@435 71 case br_op2: s = "br"; break;
duke@435 72 case bp_op2: s = "bp"; break;
duke@435 73 case cb_op2: s = "cb"; break;
duke@435 74 default: s = "????"; break;
duke@435 75 }
duke@435 76 }
duke@435 77 ::tty->print("%s", s);
duke@435 78 }
duke@435 79
duke@435 80
duke@435 81 // Patch instruction inst at offset inst_pos to refer to dest_pos
duke@435 82 // and return the resulting instruction.
duke@435 83 // We should have pcs, not offsets, but since all is relative, it will work out
duke@435 84 // OK.
duke@435 85 int Assembler::patched_branch(int dest_pos, int inst, int inst_pos) {
duke@435 86
duke@435 87 int m; // mask for displacement field
duke@435 88 int v; // new value for displacement field
duke@435 89 const int word_aligned_ones = -4;
duke@435 90 switch (inv_op(inst)) {
duke@435 91 default: ShouldNotReachHere();
duke@435 92 case call_op: m = wdisp(word_aligned_ones, 0, 30); v = wdisp(dest_pos, inst_pos, 30); break;
duke@435 93 case branch_op:
duke@435 94 switch (inv_op2(inst)) {
duke@435 95 case bpr_op2: m = wdisp16(word_aligned_ones, 0); v = wdisp16(dest_pos, inst_pos); break;
duke@435 96 case fbp_op2: m = wdisp( word_aligned_ones, 0, 19); v = wdisp( dest_pos, inst_pos, 19); break;
duke@435 97 case bp_op2: m = wdisp( word_aligned_ones, 0, 19); v = wdisp( dest_pos, inst_pos, 19); break;
duke@435 98 case fb_op2: m = wdisp( word_aligned_ones, 0, 22); v = wdisp( dest_pos, inst_pos, 22); break;
duke@435 99 case br_op2: m = wdisp( word_aligned_ones, 0, 22); v = wdisp( dest_pos, inst_pos, 22); break;
duke@435 100 case cb_op2: m = wdisp( word_aligned_ones, 0, 22); v = wdisp( dest_pos, inst_pos, 22); break;
duke@435 101 default: ShouldNotReachHere();
duke@435 102 }
duke@435 103 }
duke@435 104 return inst & ~m | v;
duke@435 105 }
duke@435 106
duke@435 107 // Return the offset of the branch destionation of instruction inst
duke@435 108 // at offset pos.
duke@435 109 // Should have pcs, but since all is relative, it works out.
duke@435 110 int Assembler::branch_destination(int inst, int pos) {
duke@435 111 int r;
duke@435 112 switch (inv_op(inst)) {
duke@435 113 default: ShouldNotReachHere();
duke@435 114 case call_op: r = inv_wdisp(inst, pos, 30); break;
duke@435 115 case branch_op:
duke@435 116 switch (inv_op2(inst)) {
duke@435 117 case bpr_op2: r = inv_wdisp16(inst, pos); break;
duke@435 118 case fbp_op2: r = inv_wdisp( inst, pos, 19); break;
duke@435 119 case bp_op2: r = inv_wdisp( inst, pos, 19); break;
duke@435 120 case fb_op2: r = inv_wdisp( inst, pos, 22); break;
duke@435 121 case br_op2: r = inv_wdisp( inst, pos, 22); break;
duke@435 122 case cb_op2: r = inv_wdisp( inst, pos, 22); break;
duke@435 123 default: ShouldNotReachHere();
duke@435 124 }
duke@435 125 }
duke@435 126 return r;
duke@435 127 }
duke@435 128
duke@435 129 int AbstractAssembler::code_fill_byte() {
duke@435 130 return 0x00; // illegal instruction 0x00000000
duke@435 131 }
duke@435 132
ysr@777 133 Assembler::Condition Assembler::reg_cond_to_cc_cond(Assembler::RCondition in) {
ysr@777 134 switch (in) {
ysr@777 135 case rc_z: return equal;
ysr@777 136 case rc_lez: return lessEqual;
ysr@777 137 case rc_lz: return less;
ysr@777 138 case rc_nz: return notEqual;
ysr@777 139 case rc_gz: return greater;
ysr@777 140 case rc_gez: return greaterEqual;
ysr@777 141 default:
ysr@777 142 ShouldNotReachHere();
ysr@777 143 }
ysr@777 144 return equal;
ysr@777 145 }
ysr@777 146
duke@435 147 // Generate a bunch 'o stuff (including v9's
duke@435 148 #ifndef PRODUCT
duke@435 149 void Assembler::test_v9() {
duke@435 150 add( G0, G1, G2 );
duke@435 151 add( G3, 0, G4 );
duke@435 152
duke@435 153 addcc( G5, G6, G7 );
duke@435 154 addcc( I0, 1, I1 );
duke@435 155 addc( I2, I3, I4 );
duke@435 156 addc( I5, -1, I6 );
duke@435 157 addccc( I7, L0, L1 );
duke@435 158 addccc( L2, (1 << 12) - 2, L3 );
duke@435 159
duke@435 160 Label lbl1, lbl2, lbl3;
duke@435 161
duke@435 162 bind(lbl1);
duke@435 163
duke@435 164 bpr( rc_z, true, pn, L4, pc(), relocInfo::oop_type );
duke@435 165 delayed()->nop();
duke@435 166 bpr( rc_lez, false, pt, L5, lbl1);
duke@435 167 delayed()->nop();
duke@435 168
duke@435 169 fb( f_never, true, pc() + 4, relocInfo::none);
duke@435 170 delayed()->nop();
duke@435 171 fb( f_notEqual, false, lbl2 );
duke@435 172 delayed()->nop();
duke@435 173
duke@435 174 fbp( f_notZero, true, fcc0, pn, pc() - 4, relocInfo::none);
duke@435 175 delayed()->nop();
duke@435 176 fbp( f_lessOrGreater, false, fcc1, pt, lbl3 );
duke@435 177 delayed()->nop();
duke@435 178
duke@435 179 br( equal, true, pc() + 1024, relocInfo::none);
duke@435 180 delayed()->nop();
duke@435 181 br( lessEqual, false, lbl1 );
duke@435 182 delayed()->nop();
duke@435 183 br( never, false, lbl1 );
duke@435 184 delayed()->nop();
duke@435 185
duke@435 186 bp( less, true, icc, pn, pc(), relocInfo::none);
duke@435 187 delayed()->nop();
duke@435 188 bp( lessEqualUnsigned, false, xcc, pt, lbl2 );
duke@435 189 delayed()->nop();
duke@435 190
duke@435 191 call( pc(), relocInfo::none);
duke@435 192 delayed()->nop();
duke@435 193 call( lbl3 );
duke@435 194 delayed()->nop();
duke@435 195
duke@435 196
duke@435 197 casa( L6, L7, O0 );
duke@435 198 casxa( O1, O2, O3, 0 );
duke@435 199
duke@435 200 udiv( O4, O5, O7 );
duke@435 201 udiv( G0, (1 << 12) - 1, G1 );
duke@435 202 sdiv( G1, G2, G3 );
duke@435 203 sdiv( G4, -((1 << 12) - 1), G5 );
duke@435 204 udivcc( G6, G7, I0 );
duke@435 205 udivcc( I1, -((1 << 12) - 2), I2 );
duke@435 206 sdivcc( I3, I4, I5 );
duke@435 207 sdivcc( I6, -((1 << 12) - 0), I7 );
duke@435 208
duke@435 209 done();
duke@435 210 retry();
duke@435 211
duke@435 212 fadd( FloatRegisterImpl::S, F0, F1, F2 );
duke@435 213 fsub( FloatRegisterImpl::D, F34, F0, F62 );
duke@435 214
duke@435 215 fcmp( FloatRegisterImpl::Q, fcc0, F0, F60);
duke@435 216 fcmpe( FloatRegisterImpl::S, fcc1, F31, F30);
duke@435 217
duke@435 218 ftox( FloatRegisterImpl::D, F2, F4 );
duke@435 219 ftoi( FloatRegisterImpl::Q, F4, F8 );
duke@435 220
duke@435 221 ftof( FloatRegisterImpl::S, FloatRegisterImpl::Q, F3, F12 );
duke@435 222
duke@435 223 fxtof( FloatRegisterImpl::S, F4, F5 );
duke@435 224 fitof( FloatRegisterImpl::D, F6, F8 );
duke@435 225
duke@435 226 fmov( FloatRegisterImpl::Q, F16, F20 );
duke@435 227 fneg( FloatRegisterImpl::S, F6, F7 );
duke@435 228 fabs( FloatRegisterImpl::D, F10, F12 );
duke@435 229
duke@435 230 fmul( FloatRegisterImpl::Q, F24, F28, F32 );
duke@435 231 fmul( FloatRegisterImpl::S, FloatRegisterImpl::D, F8, F9, F14 );
duke@435 232 fdiv( FloatRegisterImpl::S, F10, F11, F12 );
duke@435 233
duke@435 234 fsqrt( FloatRegisterImpl::S, F13, F14 );
duke@435 235
duke@435 236 flush( L0, L1 );
duke@435 237 flush( L2, -1 );
duke@435 238
duke@435 239 flushw();
duke@435 240
duke@435 241 illtrap( (1 << 22) - 2);
duke@435 242
duke@435 243 impdep1( 17, (1 << 19) - 1 );
duke@435 244 impdep2( 3, 0 );
duke@435 245
duke@435 246 jmpl( L3, L4, L5 );
duke@435 247 delayed()->nop();
duke@435 248 jmpl( L6, -1, L7, Relocation::spec_simple(relocInfo::none));
duke@435 249 delayed()->nop();
duke@435 250
duke@435 251
duke@435 252 ldf( FloatRegisterImpl::S, O0, O1, F15 );
duke@435 253 ldf( FloatRegisterImpl::D, O2, -1, F14 );
duke@435 254
duke@435 255
duke@435 256 ldfsr( O3, O4 );
duke@435 257 ldfsr( O5, -1 );
duke@435 258 ldxfsr( O6, O7 );
duke@435 259 ldxfsr( I0, -1 );
duke@435 260
duke@435 261 ldfa( FloatRegisterImpl::D, I1, I2, 1, F16 );
duke@435 262 ldfa( FloatRegisterImpl::Q, I3, -1, F36 );
duke@435 263
duke@435 264 ldsb( I4, I5, I6 );
duke@435 265 ldsb( I7, -1, G0 );
duke@435 266 ldsh( G1, G3, G4 );
duke@435 267 ldsh( G5, -1, G6 );
duke@435 268 ldsw( G7, L0, L1 );
duke@435 269 ldsw( L2, -1, L3 );
duke@435 270 ldub( L4, L5, L6 );
duke@435 271 ldub( L7, -1, O0 );
duke@435 272 lduh( O1, O2, O3 );
duke@435 273 lduh( O4, -1, O5 );
duke@435 274 lduw( O6, O7, G0 );
duke@435 275 lduw( G1, -1, G2 );
duke@435 276 ldx( G3, G4, G5 );
duke@435 277 ldx( G6, -1, G7 );
duke@435 278 ldd( I0, I1, I2 );
duke@435 279 ldd( I3, -1, I4 );
duke@435 280
duke@435 281 ldsba( I5, I6, 2, I7 );
duke@435 282 ldsba( L0, -1, L1 );
duke@435 283 ldsha( L2, L3, 3, L4 );
duke@435 284 ldsha( L5, -1, L6 );
duke@435 285 ldswa( L7, O0, (1 << 8) - 1, O1 );
duke@435 286 ldswa( O2, -1, O3 );
duke@435 287 lduba( O4, O5, 0, O6 );
duke@435 288 lduba( O7, -1, I0 );
duke@435 289 lduha( I1, I2, 1, I3 );
duke@435 290 lduha( I4, -1, I5 );
duke@435 291 lduwa( I6, I7, 2, L0 );
duke@435 292 lduwa( L1, -1, L2 );
duke@435 293 ldxa( L3, L4, 3, L5 );
duke@435 294 ldxa( L6, -1, L7 );
duke@435 295 ldda( G0, G1, 4, G2 );
duke@435 296 ldda( G3, -1, G4 );
duke@435 297
duke@435 298 ldstub( G5, G6, G7 );
duke@435 299 ldstub( O0, -1, O1 );
duke@435 300
duke@435 301 ldstuba( O2, O3, 5, O4 );
duke@435 302 ldstuba( O5, -1, O6 );
duke@435 303
duke@435 304 and3( I0, L0, O0 );
duke@435 305 and3( G7, -1, O7 );
duke@435 306 andcc( L2, I2, G2 );
duke@435 307 andcc( L4, -1, G4 );
duke@435 308 andn( I5, I6, I7 );
duke@435 309 andn( I6, -1, I7 );
duke@435 310 andncc( I5, I6, I7 );
duke@435 311 andncc( I7, -1, I6 );
duke@435 312 or3( I5, I6, I7 );
duke@435 313 or3( I7, -1, I6 );
duke@435 314 orcc( I5, I6, I7 );
duke@435 315 orcc( I7, -1, I6 );
duke@435 316 orn( I5, I6, I7 );
duke@435 317 orn( I7, -1, I6 );
duke@435 318 orncc( I5, I6, I7 );
duke@435 319 orncc( I7, -1, I6 );
duke@435 320 xor3( I5, I6, I7 );
duke@435 321 xor3( I7, -1, I6 );
duke@435 322 xorcc( I5, I6, I7 );
duke@435 323 xorcc( I7, -1, I6 );
duke@435 324 xnor( I5, I6, I7 );
duke@435 325 xnor( I7, -1, I6 );
duke@435 326 xnorcc( I5, I6, I7 );
duke@435 327 xnorcc( I7, -1, I6 );
duke@435 328
duke@435 329 membar( Membar_mask_bits(StoreStore | LoadStore | StoreLoad | LoadLoad | Sync | MemIssue | Lookaside ) );
duke@435 330 membar( StoreStore );
duke@435 331 membar( LoadStore );
duke@435 332 membar( StoreLoad );
duke@435 333 membar( LoadLoad );
duke@435 334 membar( Sync );
duke@435 335 membar( MemIssue );
duke@435 336 membar( Lookaside );
duke@435 337
duke@435 338 fmov( FloatRegisterImpl::S, f_ordered, true, fcc2, F16, F17 );
duke@435 339 fmov( FloatRegisterImpl::D, rc_lz, L5, F18, F20 );
duke@435 340
duke@435 341 movcc( overflowClear, false, icc, I6, L4 );
duke@435 342 movcc( f_unorderedOrEqual, true, fcc2, (1 << 10) - 1, O0 );
duke@435 343
duke@435 344 movr( rc_nz, I5, I6, I7 );
duke@435 345 movr( rc_gz, L1, -1, L2 );
duke@435 346
duke@435 347 mulx( I5, I6, I7 );
duke@435 348 mulx( I7, -1, I6 );
duke@435 349 sdivx( I5, I6, I7 );
duke@435 350 sdivx( I7, -1, I6 );
duke@435 351 udivx( I5, I6, I7 );
duke@435 352 udivx( I7, -1, I6 );
duke@435 353
duke@435 354 umul( I5, I6, I7 );
duke@435 355 umul( I7, -1, I6 );
duke@435 356 smul( I5, I6, I7 );
duke@435 357 smul( I7, -1, I6 );
duke@435 358 umulcc( I5, I6, I7 );
duke@435 359 umulcc( I7, -1, I6 );
duke@435 360 smulcc( I5, I6, I7 );
duke@435 361 smulcc( I7, -1, I6 );
duke@435 362
duke@435 363 mulscc( I5, I6, I7 );
duke@435 364 mulscc( I7, -1, I6 );
duke@435 365
duke@435 366 nop();
duke@435 367
duke@435 368
duke@435 369 popc( G0, G1);
duke@435 370 popc( -1, G2);
duke@435 371
duke@435 372 prefetch( L1, L2, severalReads );
duke@435 373 prefetch( L3, -1, oneRead );
duke@435 374 prefetcha( O3, O2, 6, severalWritesAndPossiblyReads );
duke@435 375 prefetcha( G2, -1, oneWrite );
duke@435 376
duke@435 377 rett( I7, I7);
duke@435 378 delayed()->nop();
duke@435 379 rett( G0, -1, relocInfo::none);
duke@435 380 delayed()->nop();
duke@435 381
duke@435 382 save( I5, I6, I7 );
duke@435 383 save( I7, -1, I6 );
duke@435 384 restore( I5, I6, I7 );
duke@435 385 restore( I7, -1, I6 );
duke@435 386
duke@435 387 saved();
duke@435 388 restored();
duke@435 389
duke@435 390 sethi( 0xaaaaaaaa, I3, Relocation::spec_simple(relocInfo::none));
duke@435 391
duke@435 392 sll( I5, I6, I7 );
duke@435 393 sll( I7, 31, I6 );
duke@435 394 srl( I5, I6, I7 );
duke@435 395 srl( I7, 0, I6 );
duke@435 396 sra( I5, I6, I7 );
duke@435 397 sra( I7, 30, I6 );
duke@435 398 sllx( I5, I6, I7 );
duke@435 399 sllx( I7, 63, I6 );
duke@435 400 srlx( I5, I6, I7 );
duke@435 401 srlx( I7, 0, I6 );
duke@435 402 srax( I5, I6, I7 );
duke@435 403 srax( I7, 62, I6 );
duke@435 404
duke@435 405 sir( -1 );
duke@435 406
duke@435 407 stbar();
duke@435 408
duke@435 409 stf( FloatRegisterImpl::Q, F40, G0, I7 );
duke@435 410 stf( FloatRegisterImpl::S, F18, I3, -1 );
duke@435 411
duke@435 412 stfsr( L1, L2 );
duke@435 413 stfsr( I7, -1 );
duke@435 414 stxfsr( I6, I5 );
duke@435 415 stxfsr( L4, -1 );
duke@435 416
duke@435 417 stfa( FloatRegisterImpl::D, F22, I6, I7, 7 );
duke@435 418 stfa( FloatRegisterImpl::Q, F44, G0, -1 );
duke@435 419
duke@435 420 stb( L5, O2, I7 );
duke@435 421 stb( I7, I6, -1 );
duke@435 422 sth( L5, O2, I7 );
duke@435 423 sth( I7, I6, -1 );
duke@435 424 stw( L5, O2, I7 );
duke@435 425 stw( I7, I6, -1 );
duke@435 426 stx( L5, O2, I7 );
duke@435 427 stx( I7, I6, -1 );
duke@435 428 std( L5, O2, I7 );
duke@435 429 std( I7, I6, -1 );
duke@435 430
duke@435 431 stba( L5, O2, I7, 8 );
duke@435 432 stba( I7, I6, -1 );
duke@435 433 stha( L5, O2, I7, 9 );
duke@435 434 stha( I7, I6, -1 );
duke@435 435 stwa( L5, O2, I7, 0 );
duke@435 436 stwa( I7, I6, -1 );
duke@435 437 stxa( L5, O2, I7, 11 );
duke@435 438 stxa( I7, I6, -1 );
duke@435 439 stda( L5, O2, I7, 12 );
duke@435 440 stda( I7, I6, -1 );
duke@435 441
duke@435 442 sub( I5, I6, I7 );
duke@435 443 sub( I7, -1, I6 );
duke@435 444 subcc( I5, I6, I7 );
duke@435 445 subcc( I7, -1, I6 );
duke@435 446 subc( I5, I6, I7 );
duke@435 447 subc( I7, -1, I6 );
duke@435 448 subccc( I5, I6, I7 );
duke@435 449 subccc( I7, -1, I6 );
duke@435 450
duke@435 451 swap( I5, I6, I7 );
duke@435 452 swap( I7, -1, I6 );
duke@435 453
duke@435 454 swapa( G0, G1, 13, G2 );
duke@435 455 swapa( I7, -1, I6 );
duke@435 456
duke@435 457 taddcc( I5, I6, I7 );
duke@435 458 taddcc( I7, -1, I6 );
duke@435 459 taddcctv( I5, I6, I7 );
duke@435 460 taddcctv( I7, -1, I6 );
duke@435 461
duke@435 462 tsubcc( I5, I6, I7 );
duke@435 463 tsubcc( I7, -1, I6 );
duke@435 464 tsubcctv( I5, I6, I7 );
duke@435 465 tsubcctv( I7, -1, I6 );
duke@435 466
duke@435 467 trap( overflowClear, xcc, G0, G1 );
duke@435 468 trap( lessEqual, icc, I7, 17 );
duke@435 469
duke@435 470 bind(lbl2);
duke@435 471 bind(lbl3);
duke@435 472
duke@435 473 code()->decode();
duke@435 474 }
duke@435 475
duke@435 476 // Generate a bunch 'o stuff unique to V8
duke@435 477 void Assembler::test_v8_onlys() {
duke@435 478 Label lbl1;
duke@435 479
duke@435 480 cb( cp_0or1or2, false, pc() - 4, relocInfo::none);
duke@435 481 delayed()->nop();
duke@435 482 cb( cp_never, true, lbl1);
duke@435 483 delayed()->nop();
duke@435 484
duke@435 485 cpop1(1, 2, 3, 4);
duke@435 486 cpop2(5, 6, 7, 8);
duke@435 487
duke@435 488 ldc( I0, I1, 31);
duke@435 489 ldc( I2, -1, 0);
duke@435 490
duke@435 491 lddc( I4, I4, 30);
duke@435 492 lddc( I6, 0, 1 );
duke@435 493
duke@435 494 ldcsr( L0, L1, 0);
duke@435 495 ldcsr( L1, (1 << 12) - 1, 17 );
duke@435 496
duke@435 497 stc( 31, L4, L5);
duke@435 498 stc( 30, L6, -(1 << 12) );
duke@435 499
duke@435 500 stdc( 0, L7, G0);
duke@435 501 stdc( 1, G1, 0 );
duke@435 502
duke@435 503 stcsr( 16, G2, G3);
duke@435 504 stcsr( 17, G4, 1 );
duke@435 505
duke@435 506 stdcq( 4, G5, G6);
duke@435 507 stdcq( 5, G7, -1 );
duke@435 508
duke@435 509 bind(lbl1);
duke@435 510
duke@435 511 code()->decode();
duke@435 512 }
duke@435 513 #endif
duke@435 514
duke@435 515 // Implementation of MacroAssembler
duke@435 516
duke@435 517 void MacroAssembler::null_check(Register reg, int offset) {
duke@435 518 if (needs_explicit_null_check((intptr_t)offset)) {
duke@435 519 // provoke OS NULL exception if reg = NULL by
duke@435 520 // accessing M[reg] w/o changing any registers
duke@435 521 ld_ptr(reg, 0, G0);
duke@435 522 }
duke@435 523 else {
duke@435 524 // nothing to do, (later) access of M[reg + offset]
duke@435 525 // will provoke OS NULL exception if reg = NULL
duke@435 526 }
duke@435 527 }
duke@435 528
duke@435 529 // Ring buffer jumps
duke@435 530
duke@435 531 #ifndef PRODUCT
duke@435 532 void MacroAssembler::ret( bool trace ) { if (trace) {
duke@435 533 mov(I7, O7); // traceable register
duke@435 534 JMP(O7, 2 * BytesPerInstWord);
duke@435 535 } else {
duke@435 536 jmpl( I7, 2 * BytesPerInstWord, G0 );
duke@435 537 }
duke@435 538 }
duke@435 539
duke@435 540 void MacroAssembler::retl( bool trace ) { if (trace) JMP(O7, 2 * BytesPerInstWord);
duke@435 541 else jmpl( O7, 2 * BytesPerInstWord, G0 ); }
duke@435 542 #endif /* PRODUCT */
duke@435 543
duke@435 544
duke@435 545 void MacroAssembler::jmp2(Register r1, Register r2, const char* file, int line ) {
duke@435 546 assert_not_delayed();
duke@435 547 // This can only be traceable if r1 & r2 are visible after a window save
duke@435 548 if (TraceJumps) {
duke@435 549 #ifndef PRODUCT
duke@435 550 save_frame(0);
duke@435 551 verify_thread();
duke@435 552 ld(G2_thread, in_bytes(JavaThread::jmp_ring_index_offset()), O0);
duke@435 553 add(G2_thread, in_bytes(JavaThread::jmp_ring_offset()), O1);
duke@435 554 sll(O0, exact_log2(4*sizeof(intptr_t)), O2);
duke@435 555 add(O2, O1, O1);
duke@435 556
duke@435 557 add(r1->after_save(), r2->after_save(), O2);
duke@435 558 set((intptr_t)file, O3);
duke@435 559 set(line, O4);
duke@435 560 Label L;
duke@435 561 // get nearby pc, store jmp target
duke@435 562 call(L, relocInfo::none); // No relocation for call to pc+0x8
duke@435 563 delayed()->st(O2, O1, 0);
duke@435 564 bind(L);
duke@435 565
duke@435 566 // store nearby pc
duke@435 567 st(O7, O1, sizeof(intptr_t));
duke@435 568 // store file
duke@435 569 st(O3, O1, 2*sizeof(intptr_t));
duke@435 570 // store line
duke@435 571 st(O4, O1, 3*sizeof(intptr_t));
duke@435 572 add(O0, 1, O0);
duke@435 573 and3(O0, JavaThread::jump_ring_buffer_size - 1, O0);
duke@435 574 st(O0, G2_thread, in_bytes(JavaThread::jmp_ring_index_offset()));
duke@435 575 restore();
duke@435 576 #endif /* PRODUCT */
duke@435 577 }
duke@435 578 jmpl(r1, r2, G0);
duke@435 579 }
duke@435 580 void MacroAssembler::jmp(Register r1, int offset, const char* file, int line ) {
duke@435 581 assert_not_delayed();
duke@435 582 // This can only be traceable if r1 is visible after a window save
duke@435 583 if (TraceJumps) {
duke@435 584 #ifndef PRODUCT
duke@435 585 save_frame(0);
duke@435 586 verify_thread();
duke@435 587 ld(G2_thread, in_bytes(JavaThread::jmp_ring_index_offset()), O0);
duke@435 588 add(G2_thread, in_bytes(JavaThread::jmp_ring_offset()), O1);
duke@435 589 sll(O0, exact_log2(4*sizeof(intptr_t)), O2);
duke@435 590 add(O2, O1, O1);
duke@435 591
duke@435 592 add(r1->after_save(), offset, O2);
duke@435 593 set((intptr_t)file, O3);
duke@435 594 set(line, O4);
duke@435 595 Label L;
duke@435 596 // get nearby pc, store jmp target
duke@435 597 call(L, relocInfo::none); // No relocation for call to pc+0x8
duke@435 598 delayed()->st(O2, O1, 0);
duke@435 599 bind(L);
duke@435 600
duke@435 601 // store nearby pc
duke@435 602 st(O7, O1, sizeof(intptr_t));
duke@435 603 // store file
duke@435 604 st(O3, O1, 2*sizeof(intptr_t));
duke@435 605 // store line
duke@435 606 st(O4, O1, 3*sizeof(intptr_t));
duke@435 607 add(O0, 1, O0);
duke@435 608 and3(O0, JavaThread::jump_ring_buffer_size - 1, O0);
duke@435 609 st(O0, G2_thread, in_bytes(JavaThread::jmp_ring_index_offset()));
duke@435 610 restore();
duke@435 611 #endif /* PRODUCT */
duke@435 612 }
duke@435 613 jmp(r1, offset);
duke@435 614 }
duke@435 615
duke@435 616 // This code sequence is relocatable to any address, even on LP64.
duke@435 617 void MacroAssembler::jumpl( Address& a, Register d, int offset, const char* file, int line ) {
duke@435 618 assert_not_delayed();
duke@435 619 // Force fixed length sethi because NativeJump and NativeFarCall don't handle
duke@435 620 // variable length instruction streams.
duke@435 621 sethi(a, /*ForceRelocatable=*/ true);
duke@435 622 if (TraceJumps) {
duke@435 623 #ifndef PRODUCT
duke@435 624 // Must do the add here so relocation can find the remainder of the
duke@435 625 // value to be relocated.
duke@435 626 add(a.base(), a.disp() + offset, a.base(), a.rspec(offset));
duke@435 627 save_frame(0);
duke@435 628 verify_thread();
duke@435 629 ld(G2_thread, in_bytes(JavaThread::jmp_ring_index_offset()), O0);
duke@435 630 add(G2_thread, in_bytes(JavaThread::jmp_ring_offset()), O1);
duke@435 631 sll(O0, exact_log2(4*sizeof(intptr_t)), O2);
duke@435 632 add(O2, O1, O1);
duke@435 633
duke@435 634 set((intptr_t)file, O3);
duke@435 635 set(line, O4);
duke@435 636 Label L;
duke@435 637
duke@435 638 // get nearby pc, store jmp target
duke@435 639 call(L, relocInfo::none); // No relocation for call to pc+0x8
duke@435 640 delayed()->st(a.base()->after_save(), O1, 0);
duke@435 641 bind(L);
duke@435 642
duke@435 643 // store nearby pc
duke@435 644 st(O7, O1, sizeof(intptr_t));
duke@435 645 // store file
duke@435 646 st(O3, O1, 2*sizeof(intptr_t));
duke@435 647 // store line
duke@435 648 st(O4, O1, 3*sizeof(intptr_t));
duke@435 649 add(O0, 1, O0);
duke@435 650 and3(O0, JavaThread::jump_ring_buffer_size - 1, O0);
duke@435 651 st(O0, G2_thread, in_bytes(JavaThread::jmp_ring_index_offset()));
duke@435 652 restore();
duke@435 653 jmpl(a.base(), G0, d);
duke@435 654 #else
duke@435 655 jmpl(a, d, offset);
duke@435 656 #endif /* PRODUCT */
duke@435 657 } else {
duke@435 658 jmpl(a, d, offset);
duke@435 659 }
duke@435 660 }
duke@435 661
duke@435 662 void MacroAssembler::jump( Address& a, int offset, const char* file, int line ) {
duke@435 663 jumpl( a, G0, offset, file, line );
duke@435 664 }
duke@435 665
duke@435 666
duke@435 667 // Convert to C varargs format
duke@435 668 void MacroAssembler::set_varargs( Argument inArg, Register d ) {
duke@435 669 // spill register-resident args to their memory slots
duke@435 670 // (SPARC calling convention requires callers to have already preallocated these)
duke@435 671 // Note that the inArg might in fact be an outgoing argument,
duke@435 672 // if a leaf routine or stub does some tricky argument shuffling.
duke@435 673 // This routine must work even though one of the saved arguments
duke@435 674 // is in the d register (e.g., set_varargs(Argument(0, false), O0)).
duke@435 675 for (Argument savePtr = inArg;
duke@435 676 savePtr.is_register();
duke@435 677 savePtr = savePtr.successor()) {
duke@435 678 st_ptr(savePtr.as_register(), savePtr.address_in_frame());
duke@435 679 }
duke@435 680 // return the address of the first memory slot
duke@435 681 add(inArg.address_in_frame(), d);
duke@435 682 }
duke@435 683
duke@435 684 // Conditional breakpoint (for assertion checks in assembly code)
duke@435 685 void MacroAssembler::breakpoint_trap(Condition c, CC cc) {
duke@435 686 trap(c, cc, G0, ST_RESERVED_FOR_USER_0);
duke@435 687 }
duke@435 688
duke@435 689 // We want to use ST_BREAKPOINT here, but the debugger is confused by it.
duke@435 690 void MacroAssembler::breakpoint_trap() {
duke@435 691 trap(ST_RESERVED_FOR_USER_0);
duke@435 692 }
duke@435 693
duke@435 694 // flush windows (except current) using flushw instruction if avail.
duke@435 695 void MacroAssembler::flush_windows() {
duke@435 696 if (VM_Version::v9_instructions_work()) flushw();
duke@435 697 else flush_windows_trap();
duke@435 698 }
duke@435 699
duke@435 700 // Write serialization page so VM thread can do a pseudo remote membar
duke@435 701 // We use the current thread pointer to calculate a thread specific
duke@435 702 // offset to write to within the page. This minimizes bus traffic
duke@435 703 // due to cache line collision.
duke@435 704 void MacroAssembler::serialize_memory(Register thread, Register tmp1, Register tmp2) {
duke@435 705 Address mem_serialize_page(tmp1, os::get_memory_serialize_page());
duke@435 706 srl(thread, os::get_serialize_page_shift_count(), tmp2);
duke@435 707 if (Assembler::is_simm13(os::vm_page_size())) {
duke@435 708 and3(tmp2, (os::vm_page_size() - sizeof(int)), tmp2);
duke@435 709 }
duke@435 710 else {
duke@435 711 set((os::vm_page_size() - sizeof(int)), tmp1);
duke@435 712 and3(tmp2, tmp1, tmp2);
duke@435 713 }
duke@435 714 load_address(mem_serialize_page);
duke@435 715 st(G0, tmp1, tmp2);
duke@435 716 }
duke@435 717
duke@435 718
duke@435 719
duke@435 720 void MacroAssembler::enter() {
duke@435 721 Unimplemented();
duke@435 722 }
duke@435 723
duke@435 724 void MacroAssembler::leave() {
duke@435 725 Unimplemented();
duke@435 726 }
duke@435 727
duke@435 728 void MacroAssembler::mult(Register s1, Register s2, Register d) {
duke@435 729 if(VM_Version::v9_instructions_work()) {
duke@435 730 mulx (s1, s2, d);
duke@435 731 } else {
duke@435 732 smul (s1, s2, d);
duke@435 733 }
duke@435 734 }
duke@435 735
duke@435 736 void MacroAssembler::mult(Register s1, int simm13a, Register d) {
duke@435 737 if(VM_Version::v9_instructions_work()) {
duke@435 738 mulx (s1, simm13a, d);
duke@435 739 } else {
duke@435 740 smul (s1, simm13a, d);
duke@435 741 }
duke@435 742 }
duke@435 743
duke@435 744
duke@435 745 #ifdef ASSERT
duke@435 746 void MacroAssembler::read_ccr_v8_assert(Register ccr_save) {
duke@435 747 const Register s1 = G3_scratch;
duke@435 748 const Register s2 = G4_scratch;
duke@435 749 Label get_psr_test;
duke@435 750 // Get the condition codes the V8 way.
duke@435 751 read_ccr_trap(s1);
duke@435 752 mov(ccr_save, s2);
duke@435 753 // This is a test of V8 which has icc but not xcc
duke@435 754 // so mask off the xcc bits
duke@435 755 and3(s2, 0xf, s2);
duke@435 756 // Compare condition codes from the V8 and V9 ways.
duke@435 757 subcc(s2, s1, G0);
duke@435 758 br(Assembler::notEqual, true, Assembler::pt, get_psr_test);
duke@435 759 delayed()->breakpoint_trap();
duke@435 760 bind(get_psr_test);
duke@435 761 }
duke@435 762
duke@435 763 void MacroAssembler::write_ccr_v8_assert(Register ccr_save) {
duke@435 764 const Register s1 = G3_scratch;
duke@435 765 const Register s2 = G4_scratch;
duke@435 766 Label set_psr_test;
duke@435 767 // Write out the saved condition codes the V8 way
duke@435 768 write_ccr_trap(ccr_save, s1, s2);
duke@435 769 // Read back the condition codes using the V9 instruction
duke@435 770 rdccr(s1);
duke@435 771 mov(ccr_save, s2);
duke@435 772 // This is a test of V8 which has icc but not xcc
duke@435 773 // so mask off the xcc bits
duke@435 774 and3(s2, 0xf, s2);
duke@435 775 and3(s1, 0xf, s1);
duke@435 776 // Compare the V8 way with the V9 way.
duke@435 777 subcc(s2, s1, G0);
duke@435 778 br(Assembler::notEqual, true, Assembler::pt, set_psr_test);
duke@435 779 delayed()->breakpoint_trap();
duke@435 780 bind(set_psr_test);
duke@435 781 }
duke@435 782 #else
duke@435 783 #define read_ccr_v8_assert(x)
duke@435 784 #define write_ccr_v8_assert(x)
duke@435 785 #endif // ASSERT
duke@435 786
duke@435 787 void MacroAssembler::read_ccr(Register ccr_save) {
duke@435 788 if (VM_Version::v9_instructions_work()) {
duke@435 789 rdccr(ccr_save);
duke@435 790 // Test code sequence used on V8. Do not move above rdccr.
duke@435 791 read_ccr_v8_assert(ccr_save);
duke@435 792 } else {
duke@435 793 read_ccr_trap(ccr_save);
duke@435 794 }
duke@435 795 }
duke@435 796
duke@435 797 void MacroAssembler::write_ccr(Register ccr_save) {
duke@435 798 if (VM_Version::v9_instructions_work()) {
duke@435 799 // Test code sequence used on V8. Do not move below wrccr.
duke@435 800 write_ccr_v8_assert(ccr_save);
duke@435 801 wrccr(ccr_save);
duke@435 802 } else {
duke@435 803 const Register temp_reg1 = G3_scratch;
duke@435 804 const Register temp_reg2 = G4_scratch;
duke@435 805 write_ccr_trap(ccr_save, temp_reg1, temp_reg2);
duke@435 806 }
duke@435 807 }
duke@435 808
duke@435 809
duke@435 810 // Calls to C land
duke@435 811
duke@435 812 #ifdef ASSERT
duke@435 813 // a hook for debugging
duke@435 814 static Thread* reinitialize_thread() {
duke@435 815 return ThreadLocalStorage::thread();
duke@435 816 }
duke@435 817 #else
duke@435 818 #define reinitialize_thread ThreadLocalStorage::thread
duke@435 819 #endif
duke@435 820
duke@435 821 #ifdef ASSERT
duke@435 822 address last_get_thread = NULL;
duke@435 823 #endif
duke@435 824
duke@435 825 // call this when G2_thread is not known to be valid
duke@435 826 void MacroAssembler::get_thread() {
duke@435 827 save_frame(0); // to avoid clobbering O0
duke@435 828 mov(G1, L0); // avoid clobbering G1
duke@435 829 mov(G5_method, L1); // avoid clobbering G5
duke@435 830 mov(G3, L2); // avoid clobbering G3 also
duke@435 831 mov(G4, L5); // avoid clobbering G4
duke@435 832 #ifdef ASSERT
duke@435 833 Address last_get_thread_addr(L3, (address)&last_get_thread);
duke@435 834 sethi(last_get_thread_addr);
duke@435 835 inc(L4, get_pc(L4) + 2 * BytesPerInstWord); // skip getpc() code + inc + st_ptr to point L4 at call
duke@435 836 st_ptr(L4, last_get_thread_addr);
duke@435 837 #endif
duke@435 838 call(CAST_FROM_FN_PTR(address, reinitialize_thread), relocInfo::runtime_call_type);
duke@435 839 delayed()->nop();
duke@435 840 mov(L0, G1);
duke@435 841 mov(L1, G5_method);
duke@435 842 mov(L2, G3);
duke@435 843 mov(L5, G4);
duke@435 844 restore(O0, 0, G2_thread);
duke@435 845 }
duke@435 846
duke@435 847 static Thread* verify_thread_subroutine(Thread* gthread_value) {
duke@435 848 Thread* correct_value = ThreadLocalStorage::thread();
duke@435 849 guarantee(gthread_value == correct_value, "G2_thread value must be the thread");
duke@435 850 return correct_value;
duke@435 851 }
duke@435 852
duke@435 853 void MacroAssembler::verify_thread() {
duke@435 854 if (VerifyThread) {
duke@435 855 // NOTE: this chops off the heads of the 64-bit O registers.
duke@435 856 #ifdef CC_INTERP
duke@435 857 save_frame(0);
duke@435 858 #else
duke@435 859 // make sure G2_thread contains the right value
duke@435 860 save_frame_and_mov(0, Lmethod, Lmethod); // to avoid clobbering O0 (and propagate Lmethod for -Xprof)
duke@435 861 mov(G1, L1); // avoid clobbering G1
duke@435 862 // G2 saved below
duke@435 863 mov(G3, L3); // avoid clobbering G3
duke@435 864 mov(G4, L4); // avoid clobbering G4
duke@435 865 mov(G5_method, L5); // avoid clobbering G5_method
duke@435 866 #endif /* CC_INTERP */
duke@435 867 #if defined(COMPILER2) && !defined(_LP64)
duke@435 868 // Save & restore possible 64-bit Long arguments in G-regs
duke@435 869 srlx(G1,32,L0);
duke@435 870 srlx(G4,32,L6);
duke@435 871 #endif
duke@435 872 call(CAST_FROM_FN_PTR(address,verify_thread_subroutine), relocInfo::runtime_call_type);
duke@435 873 delayed()->mov(G2_thread, O0);
duke@435 874
duke@435 875 mov(L1, G1); // Restore G1
duke@435 876 // G2 restored below
duke@435 877 mov(L3, G3); // restore G3
duke@435 878 mov(L4, G4); // restore G4
duke@435 879 mov(L5, G5_method); // restore G5_method
duke@435 880 #if defined(COMPILER2) && !defined(_LP64)
duke@435 881 // Save & restore possible 64-bit Long arguments in G-regs
duke@435 882 sllx(L0,32,G2); // Move old high G1 bits high in G2
duke@435 883 sllx(G1, 0,G1); // Clear current high G1 bits
duke@435 884 or3 (G1,G2,G1); // Recover 64-bit G1
duke@435 885 sllx(L6,32,G2); // Move old high G4 bits high in G2
duke@435 886 sllx(G4, 0,G4); // Clear current high G4 bits
duke@435 887 or3 (G4,G2,G4); // Recover 64-bit G4
duke@435 888 #endif
duke@435 889 restore(O0, 0, G2_thread);
duke@435 890 }
duke@435 891 }
duke@435 892
duke@435 893
duke@435 894 void MacroAssembler::save_thread(const Register thread_cache) {
duke@435 895 verify_thread();
duke@435 896 if (thread_cache->is_valid()) {
duke@435 897 assert(thread_cache->is_local() || thread_cache->is_in(), "bad volatile");
duke@435 898 mov(G2_thread, thread_cache);
duke@435 899 }
duke@435 900 if (VerifyThread) {
duke@435 901 // smash G2_thread, as if the VM were about to anyway
duke@435 902 set(0x67676767, G2_thread);
duke@435 903 }
duke@435 904 }
duke@435 905
duke@435 906
duke@435 907 void MacroAssembler::restore_thread(const Register thread_cache) {
duke@435 908 if (thread_cache->is_valid()) {
duke@435 909 assert(thread_cache->is_local() || thread_cache->is_in(), "bad volatile");
duke@435 910 mov(thread_cache, G2_thread);
duke@435 911 verify_thread();
duke@435 912 } else {
duke@435 913 // do it the slow way
duke@435 914 get_thread();
duke@435 915 }
duke@435 916 }
duke@435 917
duke@435 918
duke@435 919 // %%% maybe get rid of [re]set_last_Java_frame
duke@435 920 void MacroAssembler::set_last_Java_frame(Register last_java_sp, Register last_Java_pc) {
duke@435 921 assert_not_delayed();
duke@435 922 Address flags(G2_thread,
duke@435 923 0,
duke@435 924 in_bytes(JavaThread::frame_anchor_offset()) +
duke@435 925 in_bytes(JavaFrameAnchor::flags_offset()));
duke@435 926 Address pc_addr(G2_thread,
duke@435 927 0,
duke@435 928 in_bytes(JavaThread::last_Java_pc_offset()));
duke@435 929
duke@435 930 // Always set last_Java_pc and flags first because once last_Java_sp is visible
duke@435 931 // has_last_Java_frame is true and users will look at the rest of the fields.
duke@435 932 // (Note: flags should always be zero before we get here so doesn't need to be set.)
duke@435 933
duke@435 934 #ifdef ASSERT
duke@435 935 // Verify that flags was zeroed on return to Java
duke@435 936 Label PcOk;
duke@435 937 save_frame(0); // to avoid clobbering O0
duke@435 938 ld_ptr(pc_addr, L0);
duke@435 939 tst(L0);
duke@435 940 #ifdef _LP64
duke@435 941 brx(Assembler::zero, false, Assembler::pt, PcOk);
duke@435 942 #else
duke@435 943 br(Assembler::zero, false, Assembler::pt, PcOk);
duke@435 944 #endif // _LP64
duke@435 945 delayed() -> nop();
duke@435 946 stop("last_Java_pc not zeroed before leaving Java");
duke@435 947 bind(PcOk);
duke@435 948
duke@435 949 // Verify that flags was zeroed on return to Java
duke@435 950 Label FlagsOk;
duke@435 951 ld(flags, L0);
duke@435 952 tst(L0);
duke@435 953 br(Assembler::zero, false, Assembler::pt, FlagsOk);
duke@435 954 delayed() -> restore();
duke@435 955 stop("flags not zeroed before leaving Java");
duke@435 956 bind(FlagsOk);
duke@435 957 #endif /* ASSERT */
duke@435 958 //
duke@435 959 // When returning from calling out from Java mode the frame anchor's last_Java_pc
duke@435 960 // will always be set to NULL. It is set here so that if we are doing a call to
duke@435 961 // native (not VM) that we capture the known pc and don't have to rely on the
duke@435 962 // native call having a standard frame linkage where we can find the pc.
duke@435 963
duke@435 964 if (last_Java_pc->is_valid()) {
duke@435 965 st_ptr(last_Java_pc, pc_addr);
duke@435 966 }
duke@435 967
duke@435 968 #ifdef _LP64
duke@435 969 #ifdef ASSERT
duke@435 970 // Make sure that we have an odd stack
duke@435 971 Label StackOk;
duke@435 972 andcc(last_java_sp, 0x01, G0);
duke@435 973 br(Assembler::notZero, false, Assembler::pt, StackOk);
duke@435 974 delayed() -> nop();
duke@435 975 stop("Stack Not Biased in set_last_Java_frame");
duke@435 976 bind(StackOk);
duke@435 977 #endif // ASSERT
duke@435 978 assert( last_java_sp != G4_scratch, "bad register usage in set_last_Java_frame");
duke@435 979 add( last_java_sp, STACK_BIAS, G4_scratch );
duke@435 980 st_ptr(G4_scratch, Address(G2_thread, 0, in_bytes(JavaThread::last_Java_sp_offset())));
duke@435 981 #else
duke@435 982 st_ptr(last_java_sp, Address(G2_thread, 0, in_bytes(JavaThread::last_Java_sp_offset())));
duke@435 983 #endif // _LP64
duke@435 984 }
duke@435 985
duke@435 986 void MacroAssembler::reset_last_Java_frame(void) {
duke@435 987 assert_not_delayed();
duke@435 988
duke@435 989 Address sp_addr(G2_thread, 0, in_bytes(JavaThread::last_Java_sp_offset()));
duke@435 990 Address pc_addr(G2_thread,
duke@435 991 0,
duke@435 992 in_bytes(JavaThread::frame_anchor_offset()) + in_bytes(JavaFrameAnchor::last_Java_pc_offset()));
duke@435 993 Address flags(G2_thread,
duke@435 994 0,
duke@435 995 in_bytes(JavaThread::frame_anchor_offset()) + in_bytes(JavaFrameAnchor::flags_offset()));
duke@435 996
duke@435 997 #ifdef ASSERT
duke@435 998 // check that it WAS previously set
duke@435 999 #ifdef CC_INTERP
duke@435 1000 save_frame(0);
duke@435 1001 #else
duke@435 1002 save_frame_and_mov(0, Lmethod, Lmethod); // Propagate Lmethod to helper frame for -Xprof
duke@435 1003 #endif /* CC_INTERP */
duke@435 1004 ld_ptr(sp_addr, L0);
duke@435 1005 tst(L0);
duke@435 1006 breakpoint_trap(Assembler::zero, Assembler::ptr_cc);
duke@435 1007 restore();
duke@435 1008 #endif // ASSERT
duke@435 1009
duke@435 1010 st_ptr(G0, sp_addr);
duke@435 1011 // Always return last_Java_pc to zero
duke@435 1012 st_ptr(G0, pc_addr);
duke@435 1013 // Always null flags after return to Java
duke@435 1014 st(G0, flags);
duke@435 1015 }
duke@435 1016
duke@435 1017
duke@435 1018 void MacroAssembler::call_VM_base(
duke@435 1019 Register oop_result,
duke@435 1020 Register thread_cache,
duke@435 1021 Register last_java_sp,
duke@435 1022 address entry_point,
duke@435 1023 int number_of_arguments,
duke@435 1024 bool check_exceptions)
duke@435 1025 {
duke@435 1026 assert_not_delayed();
duke@435 1027
duke@435 1028 // determine last_java_sp register
duke@435 1029 if (!last_java_sp->is_valid()) {
duke@435 1030 last_java_sp = SP;
duke@435 1031 }
duke@435 1032 // debugging support
duke@435 1033 assert(number_of_arguments >= 0 , "cannot have negative number of arguments");
duke@435 1034
duke@435 1035 // 64-bit last_java_sp is biased!
duke@435 1036 set_last_Java_frame(last_java_sp, noreg);
duke@435 1037 if (VerifyThread) mov(G2_thread, O0); // about to be smashed; pass early
duke@435 1038 save_thread(thread_cache);
duke@435 1039 // do the call
duke@435 1040 call(entry_point, relocInfo::runtime_call_type);
duke@435 1041 if (!VerifyThread)
duke@435 1042 delayed()->mov(G2_thread, O0); // pass thread as first argument
duke@435 1043 else
duke@435 1044 delayed()->nop(); // (thread already passed)
duke@435 1045 restore_thread(thread_cache);
duke@435 1046 reset_last_Java_frame();
duke@435 1047
duke@435 1048 // check for pending exceptions. use Gtemp as scratch register.
duke@435 1049 if (check_exceptions) {
duke@435 1050 check_and_forward_exception(Gtemp);
duke@435 1051 }
duke@435 1052
duke@435 1053 // get oop result if there is one and reset the value in the thread
duke@435 1054 if (oop_result->is_valid()) {
duke@435 1055 get_vm_result(oop_result);
duke@435 1056 }
duke@435 1057 }
duke@435 1058
duke@435 1059 void MacroAssembler::check_and_forward_exception(Register scratch_reg)
duke@435 1060 {
duke@435 1061 Label L;
duke@435 1062
duke@435 1063 check_and_handle_popframe(scratch_reg);
duke@435 1064 check_and_handle_earlyret(scratch_reg);
duke@435 1065
duke@435 1066 Address exception_addr(G2_thread, 0, in_bytes(Thread::pending_exception_offset()));
duke@435 1067 ld_ptr(exception_addr, scratch_reg);
duke@435 1068 br_null(scratch_reg,false,pt,L);
duke@435 1069 delayed()->nop();
duke@435 1070 // we use O7 linkage so that forward_exception_entry has the issuing PC
duke@435 1071 call(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type);
duke@435 1072 delayed()->nop();
duke@435 1073 bind(L);
duke@435 1074 }
duke@435 1075
duke@435 1076
duke@435 1077 void MacroAssembler::check_and_handle_popframe(Register scratch_reg) {
duke@435 1078 }
duke@435 1079
duke@435 1080
duke@435 1081 void MacroAssembler::check_and_handle_earlyret(Register scratch_reg) {
duke@435 1082 }
duke@435 1083
duke@435 1084
duke@435 1085 void MacroAssembler::call_VM(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) {
duke@435 1086 call_VM_base(oop_result, noreg, noreg, entry_point, number_of_arguments, check_exceptions);
duke@435 1087 }
duke@435 1088
duke@435 1089
duke@435 1090 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, bool check_exceptions) {
duke@435 1091 // O0 is reserved for the thread
duke@435 1092 mov(arg_1, O1);
duke@435 1093 call_VM(oop_result, entry_point, 1, check_exceptions);
duke@435 1094 }
duke@435 1095
duke@435 1096
duke@435 1097 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, bool check_exceptions) {
duke@435 1098 // O0 is reserved for the thread
duke@435 1099 mov(arg_1, O1);
duke@435 1100 mov(arg_2, O2); assert(arg_2 != O1, "smashed argument");
duke@435 1101 call_VM(oop_result, entry_point, 2, check_exceptions);
duke@435 1102 }
duke@435 1103
duke@435 1104
duke@435 1105 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions) {
duke@435 1106 // O0 is reserved for the thread
duke@435 1107 mov(arg_1, O1);
duke@435 1108 mov(arg_2, O2); assert(arg_2 != O1, "smashed argument");
duke@435 1109 mov(arg_3, O3); assert(arg_3 != O1 && arg_3 != O2, "smashed argument");
duke@435 1110 call_VM(oop_result, entry_point, 3, check_exceptions);
duke@435 1111 }
duke@435 1112
duke@435 1113
duke@435 1114
duke@435 1115 // Note: The following call_VM overloadings are useful when a "save"
duke@435 1116 // has already been performed by a stub, and the last Java frame is
duke@435 1117 // the previous one. In that case, last_java_sp must be passed as FP
duke@435 1118 // instead of SP.
duke@435 1119
duke@435 1120
duke@435 1121 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, int number_of_arguments, bool check_exceptions) {
duke@435 1122 call_VM_base(oop_result, noreg, last_java_sp, entry_point, number_of_arguments, check_exceptions);
duke@435 1123 }
duke@435 1124
duke@435 1125
duke@435 1126 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions) {
duke@435 1127 // O0 is reserved for the thread
duke@435 1128 mov(arg_1, O1);
duke@435 1129 call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions);
duke@435 1130 }
duke@435 1131
duke@435 1132
duke@435 1133 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions) {
duke@435 1134 // O0 is reserved for the thread
duke@435 1135 mov(arg_1, O1);
duke@435 1136 mov(arg_2, O2); assert(arg_2 != O1, "smashed argument");
duke@435 1137 call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions);
duke@435 1138 }
duke@435 1139
duke@435 1140
duke@435 1141 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions) {
duke@435 1142 // O0 is reserved for the thread
duke@435 1143 mov(arg_1, O1);
duke@435 1144 mov(arg_2, O2); assert(arg_2 != O1, "smashed argument");
duke@435 1145 mov(arg_3, O3); assert(arg_3 != O1 && arg_3 != O2, "smashed argument");
duke@435 1146 call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions);
duke@435 1147 }
duke@435 1148
duke@435 1149
duke@435 1150
duke@435 1151 void MacroAssembler::call_VM_leaf_base(Register thread_cache, address entry_point, int number_of_arguments) {
duke@435 1152 assert_not_delayed();
duke@435 1153 save_thread(thread_cache);
duke@435 1154 // do the call
duke@435 1155 call(entry_point, relocInfo::runtime_call_type);
duke@435 1156 delayed()->nop();
duke@435 1157 restore_thread(thread_cache);
duke@435 1158 }
duke@435 1159
duke@435 1160
duke@435 1161 void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, int number_of_arguments) {
duke@435 1162 call_VM_leaf_base(thread_cache, entry_point, number_of_arguments);
duke@435 1163 }
duke@435 1164
duke@435 1165
duke@435 1166 void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, Register arg_1) {
duke@435 1167 mov(arg_1, O0);
duke@435 1168 call_VM_leaf(thread_cache, entry_point, 1);
duke@435 1169 }
duke@435 1170
duke@435 1171
duke@435 1172 void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, Register arg_1, Register arg_2) {
duke@435 1173 mov(arg_1, O0);
duke@435 1174 mov(arg_2, O1); assert(arg_2 != O0, "smashed argument");
duke@435 1175 call_VM_leaf(thread_cache, entry_point, 2);
duke@435 1176 }
duke@435 1177
duke@435 1178
duke@435 1179 void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, Register arg_1, Register arg_2, Register arg_3) {
duke@435 1180 mov(arg_1, O0);
duke@435 1181 mov(arg_2, O1); assert(arg_2 != O0, "smashed argument");
duke@435 1182 mov(arg_3, O2); assert(arg_3 != O0 && arg_3 != O1, "smashed argument");
duke@435 1183 call_VM_leaf(thread_cache, entry_point, 3);
duke@435 1184 }
duke@435 1185
duke@435 1186
duke@435 1187 void MacroAssembler::get_vm_result(Register oop_result) {
duke@435 1188 verify_thread();
duke@435 1189 Address vm_result_addr(G2_thread, 0, in_bytes(JavaThread::vm_result_offset()));
duke@435 1190 ld_ptr( vm_result_addr, oop_result);
duke@435 1191 st_ptr(G0, vm_result_addr);
duke@435 1192 verify_oop(oop_result);
duke@435 1193 }
duke@435 1194
duke@435 1195
duke@435 1196 void MacroAssembler::get_vm_result_2(Register oop_result) {
duke@435 1197 verify_thread();
duke@435 1198 Address vm_result_addr_2(G2_thread, 0, in_bytes(JavaThread::vm_result_2_offset()));
duke@435 1199 ld_ptr(vm_result_addr_2, oop_result);
duke@435 1200 st_ptr(G0, vm_result_addr_2);
duke@435 1201 verify_oop(oop_result);
duke@435 1202 }
duke@435 1203
duke@435 1204
duke@435 1205 // We require that C code which does not return a value in vm_result will
duke@435 1206 // leave it undisturbed.
duke@435 1207 void MacroAssembler::set_vm_result(Register oop_result) {
duke@435 1208 verify_thread();
duke@435 1209 Address vm_result_addr(G2_thread, 0, in_bytes(JavaThread::vm_result_offset()));
duke@435 1210 verify_oop(oop_result);
duke@435 1211
duke@435 1212 # ifdef ASSERT
duke@435 1213 // Check that we are not overwriting any other oop.
duke@435 1214 #ifdef CC_INTERP
duke@435 1215 save_frame(0);
duke@435 1216 #else
duke@435 1217 save_frame_and_mov(0, Lmethod, Lmethod); // Propagate Lmethod for -Xprof
duke@435 1218 #endif /* CC_INTERP */
duke@435 1219 ld_ptr(vm_result_addr, L0);
duke@435 1220 tst(L0);
duke@435 1221 restore();
duke@435 1222 breakpoint_trap(notZero, Assembler::ptr_cc);
duke@435 1223 // }
duke@435 1224 # endif
duke@435 1225
duke@435 1226 st_ptr(oop_result, vm_result_addr);
duke@435 1227 }
duke@435 1228
duke@435 1229
ysr@777 1230 void MacroAssembler::card_table_write(jbyte* byte_map_base,
ysr@777 1231 Register tmp, Register obj) {
duke@435 1232 #ifdef _LP64
duke@435 1233 srlx(obj, CardTableModRefBS::card_shift, obj);
duke@435 1234 #else
duke@435 1235 srl(obj, CardTableModRefBS::card_shift, obj);
duke@435 1236 #endif
duke@435 1237 assert( tmp != obj, "need separate temp reg");
ysr@777 1238 Address rs(tmp, (address)byte_map_base);
duke@435 1239 load_address(rs);
duke@435 1240 stb(G0, rs.base(), obj);
duke@435 1241 }
duke@435 1242
duke@435 1243 // %%% Note: The following six instructions have been moved,
duke@435 1244 // unchanged, from assembler_sparc.inline.hpp.
duke@435 1245 // They will be refactored at a later date.
duke@435 1246
duke@435 1247 void MacroAssembler::sethi(intptr_t imm22a,
duke@435 1248 Register d,
duke@435 1249 bool ForceRelocatable,
duke@435 1250 RelocationHolder const& rspec) {
duke@435 1251 Address adr( d, (address)imm22a, rspec );
duke@435 1252 MacroAssembler::sethi( adr, ForceRelocatable );
duke@435 1253 }
duke@435 1254
duke@435 1255
duke@435 1256 void MacroAssembler::sethi(Address& a, bool ForceRelocatable) {
duke@435 1257 address save_pc;
duke@435 1258 int shiftcnt;
duke@435 1259 // if addr of local, do not need to load it
duke@435 1260 assert(a.base() != FP && a.base() != SP, "just use ld or st for locals");
duke@435 1261 #ifdef _LP64
duke@435 1262 # ifdef CHECK_DELAY
duke@435 1263 assert_not_delayed( (char *)"cannot put two instructions in delay slot" );
duke@435 1264 # endif
duke@435 1265 v9_dep();
duke@435 1266 // ForceRelocatable = 1;
duke@435 1267 save_pc = pc();
duke@435 1268 if (a.hi32() == 0 && a.low32() >= 0) {
duke@435 1269 Assembler::sethi(a.low32(), a.base(), a.rspec());
duke@435 1270 }
duke@435 1271 else if (a.hi32() == -1) {
duke@435 1272 Assembler::sethi(~a.low32(), a.base(), a.rspec());
duke@435 1273 xor3(a.base(), ~low10(~0), a.base());
duke@435 1274 }
duke@435 1275 else {
duke@435 1276 Assembler::sethi(a.hi32(), a.base(), a.rspec() ); // 22
duke@435 1277 if ( a.hi32() & 0x3ff ) // Any bits?
duke@435 1278 or3( a.base(), a.hi32() & 0x3ff ,a.base() ); // High 32 bits are now in low 32
duke@435 1279 if ( a.low32() & 0xFFFFFC00 ) { // done?
duke@435 1280 if( (a.low32() >> 20) & 0xfff ) { // Any bits set?
duke@435 1281 sllx(a.base(), 12, a.base()); // Make room for next 12 bits
duke@435 1282 or3( a.base(), (a.low32() >> 20) & 0xfff,a.base() ); // Or in next 12
duke@435 1283 shiftcnt = 0; // We already shifted
duke@435 1284 }
duke@435 1285 else
duke@435 1286 shiftcnt = 12;
duke@435 1287 if( (a.low32() >> 10) & 0x3ff ) {
duke@435 1288 sllx(a.base(), shiftcnt+10, a.base());// Make room for last 10 bits
duke@435 1289 or3( a.base(), (a.low32() >> 10) & 0x3ff,a.base() ); // Or in next 10
duke@435 1290 shiftcnt = 0;
duke@435 1291 }
duke@435 1292 else
duke@435 1293 shiftcnt = 10;
duke@435 1294 sllx(a.base(), shiftcnt+10 , a.base()); // Shift leaving disp field 0'd
duke@435 1295 }
duke@435 1296 else
duke@435 1297 sllx( a.base(), 32, a.base() );
duke@435 1298 }
duke@435 1299 // Pad out the instruction sequence so it can be
duke@435 1300 // patched later.
duke@435 1301 if ( ForceRelocatable || (a.rtype() != relocInfo::none &&
duke@435 1302 a.rtype() != relocInfo::runtime_call_type) ) {
duke@435 1303 while ( pc() < (save_pc + (7 * BytesPerInstWord )) )
duke@435 1304 nop();
duke@435 1305 }
duke@435 1306 #else
duke@435 1307 Assembler::sethi(a.hi(), a.base(), a.rspec());
duke@435 1308 #endif
duke@435 1309
duke@435 1310 }
duke@435 1311
duke@435 1312 int MacroAssembler::size_of_sethi(address a, bool worst_case) {
duke@435 1313 #ifdef _LP64
duke@435 1314 if (worst_case) return 7;
duke@435 1315 intptr_t iaddr = (intptr_t)a;
duke@435 1316 int hi32 = (int)(iaddr >> 32);
duke@435 1317 int lo32 = (int)(iaddr);
duke@435 1318 int inst_count;
duke@435 1319 if (hi32 == 0 && lo32 >= 0)
duke@435 1320 inst_count = 1;
duke@435 1321 else if (hi32 == -1)
duke@435 1322 inst_count = 2;
duke@435 1323 else {
duke@435 1324 inst_count = 2;
duke@435 1325 if ( hi32 & 0x3ff )
duke@435 1326 inst_count++;
duke@435 1327 if ( lo32 & 0xFFFFFC00 ) {
duke@435 1328 if( (lo32 >> 20) & 0xfff ) inst_count += 2;
duke@435 1329 if( (lo32 >> 10) & 0x3ff ) inst_count += 2;
duke@435 1330 }
duke@435 1331 }
duke@435 1332 return BytesPerInstWord * inst_count;
duke@435 1333 #else
duke@435 1334 return BytesPerInstWord;
duke@435 1335 #endif
duke@435 1336 }
duke@435 1337
duke@435 1338 int MacroAssembler::worst_case_size_of_set() {
duke@435 1339 return size_of_sethi(NULL, true) + 1;
duke@435 1340 }
duke@435 1341
duke@435 1342 void MacroAssembler::set(intptr_t value, Register d,
duke@435 1343 RelocationHolder const& rspec) {
duke@435 1344 Address val( d, (address)value, rspec);
duke@435 1345
duke@435 1346 if ( rspec.type() == relocInfo::none ) {
duke@435 1347 // can optimize
duke@435 1348 if (-4096 <= value && value <= 4095) {
duke@435 1349 or3(G0, value, d); // setsw (this leaves upper 32 bits sign-extended)
duke@435 1350 return;
duke@435 1351 }
duke@435 1352 if (inv_hi22(hi22(value)) == value) {
duke@435 1353 sethi(val);
duke@435 1354 return;
duke@435 1355 }
duke@435 1356 }
duke@435 1357 assert_not_delayed( (char *)"cannot put two instructions in delay slot" );
duke@435 1358 sethi( val );
duke@435 1359 if (rspec.type() != relocInfo::none || (value & 0x3ff) != 0) {
duke@435 1360 add( d, value & 0x3ff, d, rspec);
duke@435 1361 }
duke@435 1362 }
duke@435 1363
duke@435 1364 void MacroAssembler::setsw(int value, Register d,
duke@435 1365 RelocationHolder const& rspec) {
duke@435 1366 Address val( d, (address)value, rspec);
duke@435 1367 if ( rspec.type() == relocInfo::none ) {
duke@435 1368 // can optimize
duke@435 1369 if (-4096 <= value && value <= 4095) {
duke@435 1370 or3(G0, value, d);
duke@435 1371 return;
duke@435 1372 }
duke@435 1373 if (inv_hi22(hi22(value)) == value) {
duke@435 1374 sethi( val );
duke@435 1375 #ifndef _LP64
duke@435 1376 if ( value < 0 ) {
duke@435 1377 assert_not_delayed();
duke@435 1378 sra (d, G0, d);
duke@435 1379 }
duke@435 1380 #endif
duke@435 1381 return;
duke@435 1382 }
duke@435 1383 }
duke@435 1384 assert_not_delayed();
duke@435 1385 sethi( val );
duke@435 1386 add( d, value & 0x3ff, d, rspec);
duke@435 1387
duke@435 1388 // (A negative value could be loaded in 2 insns with sethi/xor,
duke@435 1389 // but it would take a more complex relocation.)
duke@435 1390 #ifndef _LP64
duke@435 1391 if ( value < 0)
duke@435 1392 sra(d, G0, d);
duke@435 1393 #endif
duke@435 1394 }
duke@435 1395
duke@435 1396 // %%% End of moved six set instructions.
duke@435 1397
duke@435 1398
duke@435 1399 void MacroAssembler::set64(jlong value, Register d, Register tmp) {
duke@435 1400 assert_not_delayed();
duke@435 1401 v9_dep();
duke@435 1402
duke@435 1403 int hi = (int)(value >> 32);
duke@435 1404 int lo = (int)(value & ~0);
duke@435 1405 // (Matcher::isSimpleConstant64 knows about the following optimizations.)
duke@435 1406 if (Assembler::is_simm13(lo) && value == lo) {
duke@435 1407 or3(G0, lo, d);
duke@435 1408 } else if (hi == 0) {
duke@435 1409 Assembler::sethi(lo, d); // hardware version zero-extends to upper 32
duke@435 1410 if (low10(lo) != 0)
duke@435 1411 or3(d, low10(lo), d);
duke@435 1412 }
duke@435 1413 else if (hi == -1) {
duke@435 1414 Assembler::sethi(~lo, d); // hardware version zero-extends to upper 32
duke@435 1415 xor3(d, low10(lo) ^ ~low10(~0), d);
duke@435 1416 }
duke@435 1417 else if (lo == 0) {
duke@435 1418 if (Assembler::is_simm13(hi)) {
duke@435 1419 or3(G0, hi, d);
duke@435 1420 } else {
duke@435 1421 Assembler::sethi(hi, d); // hardware version zero-extends to upper 32
duke@435 1422 if (low10(hi) != 0)
duke@435 1423 or3(d, low10(hi), d);
duke@435 1424 }
duke@435 1425 sllx(d, 32, d);
duke@435 1426 }
duke@435 1427 else {
duke@435 1428 Assembler::sethi(hi, tmp);
duke@435 1429 Assembler::sethi(lo, d); // macro assembler version sign-extends
duke@435 1430 if (low10(hi) != 0)
duke@435 1431 or3 (tmp, low10(hi), tmp);
duke@435 1432 if (low10(lo) != 0)
duke@435 1433 or3 ( d, low10(lo), d);
duke@435 1434 sllx(tmp, 32, tmp);
duke@435 1435 or3 (d, tmp, d);
duke@435 1436 }
duke@435 1437 }
duke@435 1438
duke@435 1439 // compute size in bytes of sparc frame, given
duke@435 1440 // number of extraWords
duke@435 1441 int MacroAssembler::total_frame_size_in_bytes(int extraWords) {
duke@435 1442
duke@435 1443 int nWords = frame::memory_parameter_word_sp_offset;
duke@435 1444
duke@435 1445 nWords += extraWords;
duke@435 1446
duke@435 1447 if (nWords & 1) ++nWords; // round up to double-word
duke@435 1448
duke@435 1449 return nWords * BytesPerWord;
duke@435 1450 }
duke@435 1451
duke@435 1452
duke@435 1453 // save_frame: given number of "extra" words in frame,
duke@435 1454 // issue approp. save instruction (p 200, v8 manual)
duke@435 1455
duke@435 1456 void MacroAssembler::save_frame(int extraWords = 0) {
duke@435 1457 int delta = -total_frame_size_in_bytes(extraWords);
duke@435 1458 if (is_simm13(delta)) {
duke@435 1459 save(SP, delta, SP);
duke@435 1460 } else {
duke@435 1461 set(delta, G3_scratch);
duke@435 1462 save(SP, G3_scratch, SP);
duke@435 1463 }
duke@435 1464 }
duke@435 1465
duke@435 1466
duke@435 1467 void MacroAssembler::save_frame_c1(int size_in_bytes) {
duke@435 1468 if (is_simm13(-size_in_bytes)) {
duke@435 1469 save(SP, -size_in_bytes, SP);
duke@435 1470 } else {
duke@435 1471 set(-size_in_bytes, G3_scratch);
duke@435 1472 save(SP, G3_scratch, SP);
duke@435 1473 }
duke@435 1474 }
duke@435 1475
duke@435 1476
duke@435 1477 void MacroAssembler::save_frame_and_mov(int extraWords,
duke@435 1478 Register s1, Register d1,
duke@435 1479 Register s2, Register d2) {
duke@435 1480 assert_not_delayed();
duke@435 1481
duke@435 1482 // The trick here is to use precisely the same memory word
duke@435 1483 // that trap handlers also use to save the register.
duke@435 1484 // This word cannot be used for any other purpose, but
duke@435 1485 // it works fine to save the register's value, whether or not
duke@435 1486 // an interrupt flushes register windows at any given moment!
duke@435 1487 Address s1_addr;
duke@435 1488 if (s1->is_valid() && (s1->is_in() || s1->is_local())) {
duke@435 1489 s1_addr = s1->address_in_saved_window();
duke@435 1490 st_ptr(s1, s1_addr);
duke@435 1491 }
duke@435 1492
duke@435 1493 Address s2_addr;
duke@435 1494 if (s2->is_valid() && (s2->is_in() || s2->is_local())) {
duke@435 1495 s2_addr = s2->address_in_saved_window();
duke@435 1496 st_ptr(s2, s2_addr);
duke@435 1497 }
duke@435 1498
duke@435 1499 save_frame(extraWords);
duke@435 1500
duke@435 1501 if (s1_addr.base() == SP) {
duke@435 1502 ld_ptr(s1_addr.after_save(), d1);
duke@435 1503 } else if (s1->is_valid()) {
duke@435 1504 mov(s1->after_save(), d1);
duke@435 1505 }
duke@435 1506
duke@435 1507 if (s2_addr.base() == SP) {
duke@435 1508 ld_ptr(s2_addr.after_save(), d2);
duke@435 1509 } else if (s2->is_valid()) {
duke@435 1510 mov(s2->after_save(), d2);
duke@435 1511 }
duke@435 1512 }
duke@435 1513
duke@435 1514
duke@435 1515 Address MacroAssembler::allocate_oop_address(jobject obj, Register d) {
duke@435 1516 assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
duke@435 1517 int oop_index = oop_recorder()->allocate_index(obj);
duke@435 1518 return Address(d, address(obj), oop_Relocation::spec(oop_index));
duke@435 1519 }
duke@435 1520
duke@435 1521
duke@435 1522 Address MacroAssembler::constant_oop_address(jobject obj, Register d) {
duke@435 1523 assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
duke@435 1524 int oop_index = oop_recorder()->find_index(obj);
duke@435 1525 return Address(d, address(obj), oop_Relocation::spec(oop_index));
duke@435 1526 }
duke@435 1527
kvn@599 1528 void MacroAssembler::set_narrow_oop(jobject obj, Register d) {
kvn@599 1529 assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
kvn@599 1530 int oop_index = oop_recorder()->find_index(obj);
kvn@599 1531 RelocationHolder rspec = oop_Relocation::spec(oop_index);
kvn@599 1532
kvn@599 1533 assert_not_delayed();
kvn@599 1534 // Relocation with special format (see relocInfo_sparc.hpp).
kvn@599 1535 relocate(rspec, 1);
kvn@599 1536 // Assembler::sethi(0x3fffff, d);
kvn@599 1537 emit_long( op(branch_op) | rd(d) | op2(sethi_op2) | hi22(0x3fffff) );
kvn@599 1538 // Don't add relocation for 'add'. Do patching during 'sethi' processing.
kvn@599 1539 add(d, 0x3ff, d);
kvn@599 1540
kvn@599 1541 }
kvn@599 1542
duke@435 1543
duke@435 1544 void MacroAssembler::align(int modulus) {
duke@435 1545 while (offset() % modulus != 0) nop();
duke@435 1546 }
duke@435 1547
duke@435 1548
duke@435 1549 void MacroAssembler::safepoint() {
duke@435 1550 relocate(breakpoint_Relocation::spec(breakpoint_Relocation::safepoint));
duke@435 1551 }
duke@435 1552
duke@435 1553
duke@435 1554 void RegistersForDebugging::print(outputStream* s) {
duke@435 1555 int j;
duke@435 1556 for ( j = 0; j < 8; ++j )
duke@435 1557 if ( j != 6 ) s->print_cr("i%d = 0x%.16lx", j, i[j]);
duke@435 1558 else s->print_cr( "fp = 0x%.16lx", i[j]);
duke@435 1559 s->cr();
duke@435 1560
duke@435 1561 for ( j = 0; j < 8; ++j )
duke@435 1562 s->print_cr("l%d = 0x%.16lx", j, l[j]);
duke@435 1563 s->cr();
duke@435 1564
duke@435 1565 for ( j = 0; j < 8; ++j )
duke@435 1566 if ( j != 6 ) s->print_cr("o%d = 0x%.16lx", j, o[j]);
duke@435 1567 else s->print_cr( "sp = 0x%.16lx", o[j]);
duke@435 1568 s->cr();
duke@435 1569
duke@435 1570 for ( j = 0; j < 8; ++j )
duke@435 1571 s->print_cr("g%d = 0x%.16lx", j, g[j]);
duke@435 1572 s->cr();
duke@435 1573
duke@435 1574 // print out floats with compression
duke@435 1575 for (j = 0; j < 32; ) {
duke@435 1576 jfloat val = f[j];
duke@435 1577 int last = j;
duke@435 1578 for ( ; last+1 < 32; ++last ) {
duke@435 1579 char b1[1024], b2[1024];
duke@435 1580 sprintf(b1, "%f", val);
duke@435 1581 sprintf(b2, "%f", f[last+1]);
duke@435 1582 if (strcmp(b1, b2))
duke@435 1583 break;
duke@435 1584 }
duke@435 1585 s->print("f%d", j);
duke@435 1586 if ( j != last ) s->print(" - f%d", last);
duke@435 1587 s->print(" = %f", val);
duke@435 1588 s->fill_to(25);
duke@435 1589 s->print_cr(" (0x%x)", val);
duke@435 1590 j = last + 1;
duke@435 1591 }
duke@435 1592 s->cr();
duke@435 1593
duke@435 1594 // and doubles (evens only)
duke@435 1595 for (j = 0; j < 32; ) {
duke@435 1596 jdouble val = d[j];
duke@435 1597 int last = j;
duke@435 1598 for ( ; last+1 < 32; ++last ) {
duke@435 1599 char b1[1024], b2[1024];
duke@435 1600 sprintf(b1, "%f", val);
duke@435 1601 sprintf(b2, "%f", d[last+1]);
duke@435 1602 if (strcmp(b1, b2))
duke@435 1603 break;
duke@435 1604 }
duke@435 1605 s->print("d%d", 2 * j);
duke@435 1606 if ( j != last ) s->print(" - d%d", last);
duke@435 1607 s->print(" = %f", val);
duke@435 1608 s->fill_to(30);
duke@435 1609 s->print("(0x%x)", *(int*)&val);
duke@435 1610 s->fill_to(42);
duke@435 1611 s->print_cr("(0x%x)", *(1 + (int*)&val));
duke@435 1612 j = last + 1;
duke@435 1613 }
duke@435 1614 s->cr();
duke@435 1615 }
duke@435 1616
duke@435 1617 void RegistersForDebugging::save_registers(MacroAssembler* a) {
duke@435 1618 a->sub(FP, round_to(sizeof(RegistersForDebugging), sizeof(jdouble)) - STACK_BIAS, O0);
duke@435 1619 a->flush_windows();
duke@435 1620 int i;
duke@435 1621 for (i = 0; i < 8; ++i) {
duke@435 1622 a->ld_ptr(as_iRegister(i)->address_in_saved_window().after_save(), L1); a->st_ptr( L1, O0, i_offset(i));
duke@435 1623 a->ld_ptr(as_lRegister(i)->address_in_saved_window().after_save(), L1); a->st_ptr( L1, O0, l_offset(i));
duke@435 1624 a->st_ptr(as_oRegister(i)->after_save(), O0, o_offset(i));
duke@435 1625 a->st_ptr(as_gRegister(i)->after_save(), O0, g_offset(i));
duke@435 1626 }
duke@435 1627 for (i = 0; i < 32; ++i) {
duke@435 1628 a->stf(FloatRegisterImpl::S, as_FloatRegister(i), O0, f_offset(i));
duke@435 1629 }
duke@435 1630 for (i = 0; i < (VM_Version::v9_instructions_work() ? 64 : 32); i += 2) {
duke@435 1631 a->stf(FloatRegisterImpl::D, as_FloatRegister(i), O0, d_offset(i));
duke@435 1632 }
duke@435 1633 }
duke@435 1634
duke@435 1635 void RegistersForDebugging::restore_registers(MacroAssembler* a, Register r) {
duke@435 1636 for (int i = 1; i < 8; ++i) {
duke@435 1637 a->ld_ptr(r, g_offset(i), as_gRegister(i));
duke@435 1638 }
duke@435 1639 for (int j = 0; j < 32; ++j) {
duke@435 1640 a->ldf(FloatRegisterImpl::S, O0, f_offset(j), as_FloatRegister(j));
duke@435 1641 }
duke@435 1642 for (int k = 0; k < (VM_Version::v9_instructions_work() ? 64 : 32); k += 2) {
duke@435 1643 a->ldf(FloatRegisterImpl::D, O0, d_offset(k), as_FloatRegister(k));
duke@435 1644 }
duke@435 1645 }
duke@435 1646
duke@435 1647
duke@435 1648 // pushes double TOS element of FPU stack on CPU stack; pops from FPU stack
duke@435 1649 void MacroAssembler::push_fTOS() {
duke@435 1650 // %%%%%% need to implement this
duke@435 1651 }
duke@435 1652
duke@435 1653 // pops double TOS element from CPU stack and pushes on FPU stack
duke@435 1654 void MacroAssembler::pop_fTOS() {
duke@435 1655 // %%%%%% need to implement this
duke@435 1656 }
duke@435 1657
duke@435 1658 void MacroAssembler::empty_FPU_stack() {
duke@435 1659 // %%%%%% need to implement this
duke@435 1660 }
duke@435 1661
duke@435 1662 void MacroAssembler::_verify_oop(Register reg, const char* msg, const char * file, int line) {
duke@435 1663 // plausibility check for oops
duke@435 1664 if (!VerifyOops) return;
duke@435 1665
duke@435 1666 if (reg == G0) return; // always NULL, which is always an oop
duke@435 1667
ysr@777 1668 char buffer[64];
ysr@777 1669 #ifdef COMPILER1
ysr@777 1670 if (CommentedAssembly) {
ysr@777 1671 snprintf(buffer, sizeof(buffer), "verify_oop at %d", offset());
ysr@777 1672 block_comment(buffer);
ysr@777 1673 }
ysr@777 1674 #endif
ysr@777 1675
ysr@777 1676 int len = strlen(file) + strlen(msg) + 1 + 4;
duke@435 1677 sprintf(buffer, "%d", line);
ysr@777 1678 len += strlen(buffer);
ysr@777 1679 sprintf(buffer, " at offset %d ", offset());
ysr@777 1680 len += strlen(buffer);
duke@435 1681 char * real_msg = new char[len];
ysr@777 1682 sprintf(real_msg, "%s%s(%s:%d)", msg, buffer, file, line);
duke@435 1683
duke@435 1684 // Call indirectly to solve generation ordering problem
duke@435 1685 Address a(O7, (address)StubRoutines::verify_oop_subroutine_entry_address());
duke@435 1686
duke@435 1687 // Make some space on stack above the current register window.
duke@435 1688 // Enough to hold 8 64-bit registers.
duke@435 1689 add(SP,-8*8,SP);
duke@435 1690
duke@435 1691 // Save some 64-bit registers; a normal 'save' chops the heads off
duke@435 1692 // of 64-bit longs in the 32-bit build.
duke@435 1693 stx(O0,SP,frame::register_save_words*wordSize+STACK_BIAS+0*8);
duke@435 1694 stx(O1,SP,frame::register_save_words*wordSize+STACK_BIAS+1*8);
duke@435 1695 mov(reg,O0); // Move arg into O0; arg might be in O7 which is about to be crushed
duke@435 1696 stx(O7,SP,frame::register_save_words*wordSize+STACK_BIAS+7*8);
duke@435 1697
duke@435 1698 set((intptr_t)real_msg, O1);
duke@435 1699 // Load address to call to into O7
duke@435 1700 load_ptr_contents(a, O7);
duke@435 1701 // Register call to verify_oop_subroutine
duke@435 1702 callr(O7, G0);
duke@435 1703 delayed()->nop();
duke@435 1704 // recover frame size
duke@435 1705 add(SP, 8*8,SP);
duke@435 1706 }
duke@435 1707
duke@435 1708 void MacroAssembler::_verify_oop_addr(Address addr, const char* msg, const char * file, int line) {
duke@435 1709 // plausibility check for oops
duke@435 1710 if (!VerifyOops) return;
duke@435 1711
duke@435 1712 char buffer[64];
duke@435 1713 sprintf(buffer, "%d", line);
duke@435 1714 int len = strlen(file) + strlen(msg) + 1 + 4 + strlen(buffer);
duke@435 1715 sprintf(buffer, " at SP+%d ", addr.disp());
duke@435 1716 len += strlen(buffer);
duke@435 1717 char * real_msg = new char[len];
duke@435 1718 sprintf(real_msg, "%s at SP+%d (%s:%d)", msg, addr.disp(), file, line);
duke@435 1719
duke@435 1720 // Call indirectly to solve generation ordering problem
duke@435 1721 Address a(O7, (address)StubRoutines::verify_oop_subroutine_entry_address());
duke@435 1722
duke@435 1723 // Make some space on stack above the current register window.
duke@435 1724 // Enough to hold 8 64-bit registers.
duke@435 1725 add(SP,-8*8,SP);
duke@435 1726
duke@435 1727 // Save some 64-bit registers; a normal 'save' chops the heads off
duke@435 1728 // of 64-bit longs in the 32-bit build.
duke@435 1729 stx(O0,SP,frame::register_save_words*wordSize+STACK_BIAS+0*8);
duke@435 1730 stx(O1,SP,frame::register_save_words*wordSize+STACK_BIAS+1*8);
duke@435 1731 ld_ptr(addr.base(), addr.disp() + 8*8, O0); // Load arg into O0; arg might be in O7 which is about to be crushed
duke@435 1732 stx(O7,SP,frame::register_save_words*wordSize+STACK_BIAS+7*8);
duke@435 1733
duke@435 1734 set((intptr_t)real_msg, O1);
duke@435 1735 // Load address to call to into O7
duke@435 1736 load_ptr_contents(a, O7);
duke@435 1737 // Register call to verify_oop_subroutine
duke@435 1738 callr(O7, G0);
duke@435 1739 delayed()->nop();
duke@435 1740 // recover frame size
duke@435 1741 add(SP, 8*8,SP);
duke@435 1742 }
duke@435 1743
duke@435 1744 // side-door communication with signalHandler in os_solaris.cpp
duke@435 1745 address MacroAssembler::_verify_oop_implicit_branch[3] = { NULL };
duke@435 1746
duke@435 1747 // This macro is expanded just once; it creates shared code. Contract:
duke@435 1748 // receives an oop in O0. Must restore O0 & O7 from TLS. Must not smash ANY
duke@435 1749 // registers, including flags. May not use a register 'save', as this blows
duke@435 1750 // the high bits of the O-regs if they contain Long values. Acts as a 'leaf'
duke@435 1751 // call.
duke@435 1752 void MacroAssembler::verify_oop_subroutine() {
duke@435 1753 assert( VM_Version::v9_instructions_work(), "VerifyOops not supported for V8" );
duke@435 1754
duke@435 1755 // Leaf call; no frame.
duke@435 1756 Label succeed, fail, null_or_fail;
duke@435 1757
duke@435 1758 // O0 and O7 were saved already (O0 in O0's TLS home, O7 in O5's TLS home).
duke@435 1759 // O0 is now the oop to be checked. O7 is the return address.
duke@435 1760 Register O0_obj = O0;
duke@435 1761
duke@435 1762 // Save some more registers for temps.
duke@435 1763 stx(O2,SP,frame::register_save_words*wordSize+STACK_BIAS+2*8);
duke@435 1764 stx(O3,SP,frame::register_save_words*wordSize+STACK_BIAS+3*8);
duke@435 1765 stx(O4,SP,frame::register_save_words*wordSize+STACK_BIAS+4*8);
duke@435 1766 stx(O5,SP,frame::register_save_words*wordSize+STACK_BIAS+5*8);
duke@435 1767
duke@435 1768 // Save flags
duke@435 1769 Register O5_save_flags = O5;
duke@435 1770 rdccr( O5_save_flags );
duke@435 1771
duke@435 1772 { // count number of verifies
duke@435 1773 Register O2_adr = O2;
duke@435 1774 Register O3_accum = O3;
duke@435 1775 Address count_addr( O2_adr, (address) StubRoutines::verify_oop_count_addr() );
duke@435 1776 sethi(count_addr);
duke@435 1777 ld(count_addr, O3_accum);
duke@435 1778 inc(O3_accum);
duke@435 1779 st(O3_accum, count_addr);
duke@435 1780 }
duke@435 1781
duke@435 1782 Register O2_mask = O2;
duke@435 1783 Register O3_bits = O3;
duke@435 1784 Register O4_temp = O4;
duke@435 1785
duke@435 1786 // mark lower end of faulting range
duke@435 1787 assert(_verify_oop_implicit_branch[0] == NULL, "set once");
duke@435 1788 _verify_oop_implicit_branch[0] = pc();
duke@435 1789
duke@435 1790 // We can't check the mark oop because it could be in the process of
duke@435 1791 // locking or unlocking while this is running.
duke@435 1792 set(Universe::verify_oop_mask (), O2_mask);
duke@435 1793 set(Universe::verify_oop_bits (), O3_bits);
duke@435 1794
duke@435 1795 // assert((obj & oop_mask) == oop_bits);
duke@435 1796 and3(O0_obj, O2_mask, O4_temp);
duke@435 1797 cmp(O4_temp, O3_bits);
duke@435 1798 brx(notEqual, false, pn, null_or_fail);
duke@435 1799 delayed()->nop();
duke@435 1800
duke@435 1801 if ((NULL_WORD & Universe::verify_oop_mask()) == Universe::verify_oop_bits()) {
duke@435 1802 // the null_or_fail case is useless; must test for null separately
duke@435 1803 br_null(O0_obj, false, pn, succeed);
duke@435 1804 delayed()->nop();
duke@435 1805 }
duke@435 1806
duke@435 1807 // Check the klassOop of this object for being in the right area of memory.
duke@435 1808 // Cannot do the load in the delay above slot in case O0 is null
coleenp@548 1809 load_klass(O0_obj, O0_obj);
duke@435 1810 // assert((klass & klass_mask) == klass_bits);
duke@435 1811 if( Universe::verify_klass_mask() != Universe::verify_oop_mask() )
duke@435 1812 set(Universe::verify_klass_mask(), O2_mask);
duke@435 1813 if( Universe::verify_klass_bits() != Universe::verify_oop_bits() )
duke@435 1814 set(Universe::verify_klass_bits(), O3_bits);
duke@435 1815 and3(O0_obj, O2_mask, O4_temp);
duke@435 1816 cmp(O4_temp, O3_bits);
duke@435 1817 brx(notEqual, false, pn, fail);
coleenp@548 1818 delayed()->nop();
duke@435 1819 // Check the klass's klass
coleenp@548 1820 load_klass(O0_obj, O0_obj);
duke@435 1821 and3(O0_obj, O2_mask, O4_temp);
duke@435 1822 cmp(O4_temp, O3_bits);
duke@435 1823 brx(notEqual, false, pn, fail);
duke@435 1824 delayed()->wrccr( O5_save_flags ); // Restore CCR's
duke@435 1825
duke@435 1826 // mark upper end of faulting range
duke@435 1827 _verify_oop_implicit_branch[1] = pc();
duke@435 1828
duke@435 1829 //-----------------------
duke@435 1830 // all tests pass
duke@435 1831 bind(succeed);
duke@435 1832
duke@435 1833 // Restore prior 64-bit registers
duke@435 1834 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+0*8,O0);
duke@435 1835 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+1*8,O1);
duke@435 1836 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+2*8,O2);
duke@435 1837 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+3*8,O3);
duke@435 1838 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+4*8,O4);
duke@435 1839 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+5*8,O5);
duke@435 1840
duke@435 1841 retl(); // Leaf return; restore prior O7 in delay slot
duke@435 1842 delayed()->ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+7*8,O7);
duke@435 1843
duke@435 1844 //-----------------------
duke@435 1845 bind(null_or_fail); // nulls are less common but OK
duke@435 1846 br_null(O0_obj, false, pt, succeed);
duke@435 1847 delayed()->wrccr( O5_save_flags ); // Restore CCR's
duke@435 1848
duke@435 1849 //-----------------------
duke@435 1850 // report failure:
duke@435 1851 bind(fail);
duke@435 1852 _verify_oop_implicit_branch[2] = pc();
duke@435 1853
duke@435 1854 wrccr( O5_save_flags ); // Restore CCR's
duke@435 1855
duke@435 1856 save_frame(::round_to(sizeof(RegistersForDebugging) / BytesPerWord, 2));
duke@435 1857
duke@435 1858 // stop_subroutine expects message pointer in I1.
duke@435 1859 mov(I1, O1);
duke@435 1860
duke@435 1861 // Restore prior 64-bit registers
duke@435 1862 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+0*8,I0);
duke@435 1863 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+1*8,I1);
duke@435 1864 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+2*8,I2);
duke@435 1865 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+3*8,I3);
duke@435 1866 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+4*8,I4);
duke@435 1867 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+5*8,I5);
duke@435 1868
duke@435 1869 // factor long stop-sequence into subroutine to save space
duke@435 1870 assert(StubRoutines::Sparc::stop_subroutine_entry_address(), "hasn't been generated yet");
duke@435 1871
duke@435 1872 // call indirectly to solve generation ordering problem
duke@435 1873 Address a(O5, (address)StubRoutines::Sparc::stop_subroutine_entry_address());
duke@435 1874 load_ptr_contents(a, O5);
duke@435 1875 jmpl(O5, 0, O7);
duke@435 1876 delayed()->nop();
duke@435 1877 }
duke@435 1878
duke@435 1879
duke@435 1880 void MacroAssembler::stop(const char* msg) {
duke@435 1881 // save frame first to get O7 for return address
duke@435 1882 // add one word to size in case struct is odd number of words long
duke@435 1883 // It must be doubleword-aligned for storing doubles into it.
duke@435 1884
duke@435 1885 save_frame(::round_to(sizeof(RegistersForDebugging) / BytesPerWord, 2));
duke@435 1886
duke@435 1887 // stop_subroutine expects message pointer in I1.
duke@435 1888 set((intptr_t)msg, O1);
duke@435 1889
duke@435 1890 // factor long stop-sequence into subroutine to save space
duke@435 1891 assert(StubRoutines::Sparc::stop_subroutine_entry_address(), "hasn't been generated yet");
duke@435 1892
duke@435 1893 // call indirectly to solve generation ordering problem
duke@435 1894 Address a(O5, (address)StubRoutines::Sparc::stop_subroutine_entry_address());
duke@435 1895 load_ptr_contents(a, O5);
duke@435 1896 jmpl(O5, 0, O7);
duke@435 1897 delayed()->nop();
duke@435 1898
duke@435 1899 breakpoint_trap(); // make stop actually stop rather than writing
duke@435 1900 // unnoticeable results in the output files.
duke@435 1901
duke@435 1902 // restore(); done in callee to save space!
duke@435 1903 }
duke@435 1904
duke@435 1905
duke@435 1906 void MacroAssembler::warn(const char* msg) {
duke@435 1907 save_frame(::round_to(sizeof(RegistersForDebugging) / BytesPerWord, 2));
duke@435 1908 RegistersForDebugging::save_registers(this);
duke@435 1909 mov(O0, L0);
duke@435 1910 set((intptr_t)msg, O0);
duke@435 1911 call( CAST_FROM_FN_PTR(address, warning) );
duke@435 1912 delayed()->nop();
duke@435 1913 // ret();
duke@435 1914 // delayed()->restore();
duke@435 1915 RegistersForDebugging::restore_registers(this, L0);
duke@435 1916 restore();
duke@435 1917 }
duke@435 1918
duke@435 1919
duke@435 1920 void MacroAssembler::untested(const char* what) {
duke@435 1921 // We must be able to turn interactive prompting off
duke@435 1922 // in order to run automated test scripts on the VM
duke@435 1923 // Use the flag ShowMessageBoxOnError
duke@435 1924
duke@435 1925 char* b = new char[1024];
duke@435 1926 sprintf(b, "untested: %s", what);
duke@435 1927
duke@435 1928 if ( ShowMessageBoxOnError ) stop(b);
duke@435 1929 else warn(b);
duke@435 1930 }
duke@435 1931
duke@435 1932
duke@435 1933 void MacroAssembler::stop_subroutine() {
duke@435 1934 RegistersForDebugging::save_registers(this);
duke@435 1935
duke@435 1936 // for the sake of the debugger, stick a PC on the current frame
duke@435 1937 // (this assumes that the caller has performed an extra "save")
duke@435 1938 mov(I7, L7);
duke@435 1939 add(O7, -7 * BytesPerInt, I7);
duke@435 1940
duke@435 1941 save_frame(); // one more save to free up another O7 register
duke@435 1942 mov(I0, O1); // addr of reg save area
duke@435 1943
duke@435 1944 // We expect pointer to message in I1. Caller must set it up in O1
duke@435 1945 mov(I1, O0); // get msg
duke@435 1946 call (CAST_FROM_FN_PTR(address, MacroAssembler::debug), relocInfo::runtime_call_type);
duke@435 1947 delayed()->nop();
duke@435 1948
duke@435 1949 restore();
duke@435 1950
duke@435 1951 RegistersForDebugging::restore_registers(this, O0);
duke@435 1952
duke@435 1953 save_frame(0);
duke@435 1954 call(CAST_FROM_FN_PTR(address,breakpoint));
duke@435 1955 delayed()->nop();
duke@435 1956 restore();
duke@435 1957
duke@435 1958 mov(L7, I7);
duke@435 1959 retl();
duke@435 1960 delayed()->restore(); // see stop above
duke@435 1961 }
duke@435 1962
duke@435 1963
duke@435 1964 void MacroAssembler::debug(char* msg, RegistersForDebugging* regs) {
duke@435 1965 if ( ShowMessageBoxOnError ) {
duke@435 1966 JavaThreadState saved_state = JavaThread::current()->thread_state();
duke@435 1967 JavaThread::current()->set_thread_state(_thread_in_vm);
duke@435 1968 {
duke@435 1969 // In order to get locks work, we need to fake a in_VM state
duke@435 1970 ttyLocker ttyl;
duke@435 1971 ::tty->print_cr("EXECUTION STOPPED: %s\n", msg);
duke@435 1972 if (CountBytecodes || TraceBytecodes || StopInterpreterAt) {
duke@435 1973 ::tty->print_cr("Interpreter::bytecode_counter = %d", BytecodeCounter::counter_value());
duke@435 1974 }
duke@435 1975 if (os::message_box(msg, "Execution stopped, print registers?"))
duke@435 1976 regs->print(::tty);
duke@435 1977 }
duke@435 1978 ThreadStateTransition::transition(JavaThread::current(), _thread_in_vm, saved_state);
duke@435 1979 }
duke@435 1980 else
duke@435 1981 ::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n", msg);
duke@435 1982 assert(false, "error");
duke@435 1983 }
duke@435 1984
duke@435 1985
duke@435 1986 #ifndef PRODUCT
duke@435 1987 void MacroAssembler::test() {
duke@435 1988 ResourceMark rm;
duke@435 1989
duke@435 1990 CodeBuffer cb("test", 10000, 10000);
duke@435 1991 MacroAssembler* a = new MacroAssembler(&cb);
duke@435 1992 VM_Version::allow_all();
duke@435 1993 a->test_v9();
duke@435 1994 a->test_v8_onlys();
duke@435 1995 VM_Version::revert();
duke@435 1996
duke@435 1997 StubRoutines::Sparc::test_stop_entry()();
duke@435 1998 }
duke@435 1999 #endif
duke@435 2000
duke@435 2001
duke@435 2002 void MacroAssembler::calc_mem_param_words(Register Rparam_words, Register Rresult) {
duke@435 2003 subcc( Rparam_words, Argument::n_register_parameters, Rresult); // how many mem words?
duke@435 2004 Label no_extras;
duke@435 2005 br( negative, true, pt, no_extras ); // if neg, clear reg
duke@435 2006 delayed()->set( 0, Rresult); // annuled, so only if taken
duke@435 2007 bind( no_extras );
duke@435 2008 }
duke@435 2009
duke@435 2010
duke@435 2011 void MacroAssembler::calc_frame_size(Register Rextra_words, Register Rresult) {
duke@435 2012 #ifdef _LP64
duke@435 2013 add(Rextra_words, frame::memory_parameter_word_sp_offset, Rresult);
duke@435 2014 #else
duke@435 2015 add(Rextra_words, frame::memory_parameter_word_sp_offset + 1, Rresult);
duke@435 2016 #endif
duke@435 2017 bclr(1, Rresult);
duke@435 2018 sll(Rresult, LogBytesPerWord, Rresult); // Rresult has total frame bytes
duke@435 2019 }
duke@435 2020
duke@435 2021
duke@435 2022 void MacroAssembler::calc_frame_size_and_save(Register Rextra_words, Register Rresult) {
duke@435 2023 calc_frame_size(Rextra_words, Rresult);
duke@435 2024 neg(Rresult);
duke@435 2025 save(SP, Rresult, SP);
duke@435 2026 }
duke@435 2027
duke@435 2028
duke@435 2029 // ---------------------------------------------------------
duke@435 2030 Assembler::RCondition cond2rcond(Assembler::Condition c) {
duke@435 2031 switch (c) {
duke@435 2032 /*case zero: */
duke@435 2033 case Assembler::equal: return Assembler::rc_z;
duke@435 2034 case Assembler::lessEqual: return Assembler::rc_lez;
duke@435 2035 case Assembler::less: return Assembler::rc_lz;
duke@435 2036 /*case notZero:*/
duke@435 2037 case Assembler::notEqual: return Assembler::rc_nz;
duke@435 2038 case Assembler::greater: return Assembler::rc_gz;
duke@435 2039 case Assembler::greaterEqual: return Assembler::rc_gez;
duke@435 2040 }
duke@435 2041 ShouldNotReachHere();
duke@435 2042 return Assembler::rc_z;
duke@435 2043 }
duke@435 2044
duke@435 2045 // compares register with zero and branches. NOT FOR USE WITH 64-bit POINTERS
duke@435 2046 void MacroAssembler::br_zero( Condition c, bool a, Predict p, Register s1, Label& L) {
duke@435 2047 tst(s1);
duke@435 2048 br (c, a, p, L);
duke@435 2049 }
duke@435 2050
duke@435 2051
duke@435 2052 // Compares a pointer register with zero and branches on null.
duke@435 2053 // Does a test & branch on 32-bit systems and a register-branch on 64-bit.
duke@435 2054 void MacroAssembler::br_null( Register s1, bool a, Predict p, Label& L ) {
duke@435 2055 assert_not_delayed();
duke@435 2056 #ifdef _LP64
duke@435 2057 bpr( rc_z, a, p, s1, L );
duke@435 2058 #else
duke@435 2059 tst(s1);
duke@435 2060 br ( zero, a, p, L );
duke@435 2061 #endif
duke@435 2062 }
duke@435 2063
duke@435 2064 void MacroAssembler::br_notnull( Register s1, bool a, Predict p, Label& L ) {
duke@435 2065 assert_not_delayed();
duke@435 2066 #ifdef _LP64
duke@435 2067 bpr( rc_nz, a, p, s1, L );
duke@435 2068 #else
duke@435 2069 tst(s1);
duke@435 2070 br ( notZero, a, p, L );
duke@435 2071 #endif
duke@435 2072 }
duke@435 2073
ysr@777 2074 void MacroAssembler::br_on_reg_cond( RCondition rc, bool a, Predict p,
ysr@777 2075 Register s1, address d,
ysr@777 2076 relocInfo::relocType rt ) {
ysr@777 2077 if (VM_Version::v9_instructions_work()) {
ysr@777 2078 bpr(rc, a, p, s1, d, rt);
ysr@777 2079 } else {
ysr@777 2080 tst(s1);
ysr@777 2081 br(reg_cond_to_cc_cond(rc), a, p, d, rt);
ysr@777 2082 }
ysr@777 2083 }
ysr@777 2084
ysr@777 2085 void MacroAssembler::br_on_reg_cond( RCondition rc, bool a, Predict p,
ysr@777 2086 Register s1, Label& L ) {
ysr@777 2087 if (VM_Version::v9_instructions_work()) {
ysr@777 2088 bpr(rc, a, p, s1, L);
ysr@777 2089 } else {
ysr@777 2090 tst(s1);
ysr@777 2091 br(reg_cond_to_cc_cond(rc), a, p, L);
ysr@777 2092 }
ysr@777 2093 }
ysr@777 2094
duke@435 2095
duke@435 2096 // instruction sequences factored across compiler & interpreter
duke@435 2097
duke@435 2098
duke@435 2099 void MacroAssembler::lcmp( Register Ra_hi, Register Ra_low,
duke@435 2100 Register Rb_hi, Register Rb_low,
duke@435 2101 Register Rresult) {
duke@435 2102
duke@435 2103 Label check_low_parts, done;
duke@435 2104
duke@435 2105 cmp(Ra_hi, Rb_hi ); // compare hi parts
duke@435 2106 br(equal, true, pt, check_low_parts);
duke@435 2107 delayed()->cmp(Ra_low, Rb_low); // test low parts
duke@435 2108
duke@435 2109 // And, with an unsigned comparison, it does not matter if the numbers
duke@435 2110 // are negative or not.
duke@435 2111 // E.g., -2 cmp -1: the low parts are 0xfffffffe and 0xffffffff.
duke@435 2112 // The second one is bigger (unsignedly).
duke@435 2113
duke@435 2114 // Other notes: The first move in each triplet can be unconditional
duke@435 2115 // (and therefore probably prefetchable).
duke@435 2116 // And the equals case for the high part does not need testing,
duke@435 2117 // since that triplet is reached only after finding the high halves differ.
duke@435 2118
duke@435 2119 if (VM_Version::v9_instructions_work()) {
duke@435 2120
duke@435 2121 mov ( -1, Rresult);
duke@435 2122 ba( false, done ); delayed()-> movcc(greater, false, icc, 1, Rresult);
duke@435 2123 }
duke@435 2124 else {
duke@435 2125 br(less, true, pt, done); delayed()-> set(-1, Rresult);
duke@435 2126 br(greater, true, pt, done); delayed()-> set( 1, Rresult);
duke@435 2127 }
duke@435 2128
duke@435 2129 bind( check_low_parts );
duke@435 2130
duke@435 2131 if (VM_Version::v9_instructions_work()) {
duke@435 2132 mov( -1, Rresult);
duke@435 2133 movcc(equal, false, icc, 0, Rresult);
duke@435 2134 movcc(greaterUnsigned, false, icc, 1, Rresult);
duke@435 2135 }
duke@435 2136 else {
duke@435 2137 set(-1, Rresult);
duke@435 2138 br(equal, true, pt, done); delayed()->set( 0, Rresult);
duke@435 2139 br(greaterUnsigned, true, pt, done); delayed()->set( 1, Rresult);
duke@435 2140 }
duke@435 2141 bind( done );
duke@435 2142 }
duke@435 2143
duke@435 2144 void MacroAssembler::lneg( Register Rhi, Register Rlow ) {
duke@435 2145 subcc( G0, Rlow, Rlow );
duke@435 2146 subc( G0, Rhi, Rhi );
duke@435 2147 }
duke@435 2148
duke@435 2149 void MacroAssembler::lshl( Register Rin_high, Register Rin_low,
duke@435 2150 Register Rcount,
duke@435 2151 Register Rout_high, Register Rout_low,
duke@435 2152 Register Rtemp ) {
duke@435 2153
duke@435 2154
duke@435 2155 Register Ralt_count = Rtemp;
duke@435 2156 Register Rxfer_bits = Rtemp;
duke@435 2157
duke@435 2158 assert( Ralt_count != Rin_high
duke@435 2159 && Ralt_count != Rin_low
duke@435 2160 && Ralt_count != Rcount
duke@435 2161 && Rxfer_bits != Rin_low
duke@435 2162 && Rxfer_bits != Rin_high
duke@435 2163 && Rxfer_bits != Rcount
duke@435 2164 && Rxfer_bits != Rout_low
duke@435 2165 && Rout_low != Rin_high,
duke@435 2166 "register alias checks");
duke@435 2167
duke@435 2168 Label big_shift, done;
duke@435 2169
duke@435 2170 // This code can be optimized to use the 64 bit shifts in V9.
duke@435 2171 // Here we use the 32 bit shifts.
duke@435 2172
duke@435 2173 and3( Rcount, 0x3f, Rcount); // take least significant 6 bits
duke@435 2174 subcc(Rcount, 31, Ralt_count);
duke@435 2175 br(greater, true, pn, big_shift);
duke@435 2176 delayed()->
duke@435 2177 dec(Ralt_count);
duke@435 2178
duke@435 2179 // shift < 32 bits, Ralt_count = Rcount-31
duke@435 2180
duke@435 2181 // We get the transfer bits by shifting right by 32-count the low
duke@435 2182 // register. This is done by shifting right by 31-count and then by one
duke@435 2183 // more to take care of the special (rare) case where count is zero
duke@435 2184 // (shifting by 32 would not work).
duke@435 2185
duke@435 2186 neg( Ralt_count );
duke@435 2187
duke@435 2188 // The order of the next two instructions is critical in the case where
duke@435 2189 // Rin and Rout are the same and should not be reversed.
duke@435 2190
duke@435 2191 srl( Rin_low, Ralt_count, Rxfer_bits ); // shift right by 31-count
duke@435 2192 if (Rcount != Rout_low) {
duke@435 2193 sll( Rin_low, Rcount, Rout_low ); // low half
duke@435 2194 }
duke@435 2195 sll( Rin_high, Rcount, Rout_high );
duke@435 2196 if (Rcount == Rout_low) {
duke@435 2197 sll( Rin_low, Rcount, Rout_low ); // low half
duke@435 2198 }
duke@435 2199 srl( Rxfer_bits, 1, Rxfer_bits ); // shift right by one more
duke@435 2200 ba (false, done);
duke@435 2201 delayed()->
duke@435 2202 or3( Rout_high, Rxfer_bits, Rout_high); // new hi value: or in shifted old hi part and xfer from low
duke@435 2203
duke@435 2204 // shift >= 32 bits, Ralt_count = Rcount-32
duke@435 2205 bind(big_shift);
duke@435 2206 sll( Rin_low, Ralt_count, Rout_high );
duke@435 2207 clr( Rout_low );
duke@435 2208
duke@435 2209 bind(done);
duke@435 2210 }
duke@435 2211
duke@435 2212
duke@435 2213 void MacroAssembler::lshr( Register Rin_high, Register Rin_low,
duke@435 2214 Register Rcount,
duke@435 2215 Register Rout_high, Register Rout_low,
duke@435 2216 Register Rtemp ) {
duke@435 2217
duke@435 2218 Register Ralt_count = Rtemp;
duke@435 2219 Register Rxfer_bits = Rtemp;
duke@435 2220
duke@435 2221 assert( Ralt_count != Rin_high
duke@435 2222 && Ralt_count != Rin_low
duke@435 2223 && Ralt_count != Rcount
duke@435 2224 && Rxfer_bits != Rin_low
duke@435 2225 && Rxfer_bits != Rin_high
duke@435 2226 && Rxfer_bits != Rcount
duke@435 2227 && Rxfer_bits != Rout_high
duke@435 2228 && Rout_high != Rin_low,
duke@435 2229 "register alias checks");
duke@435 2230
duke@435 2231 Label big_shift, done;
duke@435 2232
duke@435 2233 // This code can be optimized to use the 64 bit shifts in V9.
duke@435 2234 // Here we use the 32 bit shifts.
duke@435 2235
duke@435 2236 and3( Rcount, 0x3f, Rcount); // take least significant 6 bits
duke@435 2237 subcc(Rcount, 31, Ralt_count);
duke@435 2238 br(greater, true, pn, big_shift);
duke@435 2239 delayed()->dec(Ralt_count);
duke@435 2240
duke@435 2241 // shift < 32 bits, Ralt_count = Rcount-31
duke@435 2242
duke@435 2243 // We get the transfer bits by shifting left by 32-count the high
duke@435 2244 // register. This is done by shifting left by 31-count and then by one
duke@435 2245 // more to take care of the special (rare) case where count is zero
duke@435 2246 // (shifting by 32 would not work).
duke@435 2247
duke@435 2248 neg( Ralt_count );
duke@435 2249 if (Rcount != Rout_low) {
duke@435 2250 srl( Rin_low, Rcount, Rout_low );
duke@435 2251 }
duke@435 2252
duke@435 2253 // The order of the next two instructions is critical in the case where
duke@435 2254 // Rin and Rout are the same and should not be reversed.
duke@435 2255
duke@435 2256 sll( Rin_high, Ralt_count, Rxfer_bits ); // shift left by 31-count
duke@435 2257 sra( Rin_high, Rcount, Rout_high ); // high half
duke@435 2258 sll( Rxfer_bits, 1, Rxfer_bits ); // shift left by one more
duke@435 2259 if (Rcount == Rout_low) {
duke@435 2260 srl( Rin_low, Rcount, Rout_low );
duke@435 2261 }
duke@435 2262 ba (false, done);
duke@435 2263 delayed()->
duke@435 2264 or3( Rout_low, Rxfer_bits, Rout_low ); // new low value: or shifted old low part and xfer from high
duke@435 2265
duke@435 2266 // shift >= 32 bits, Ralt_count = Rcount-32
duke@435 2267 bind(big_shift);
duke@435 2268
duke@435 2269 sra( Rin_high, Ralt_count, Rout_low );
duke@435 2270 sra( Rin_high, 31, Rout_high ); // sign into hi
duke@435 2271
duke@435 2272 bind( done );
duke@435 2273 }
duke@435 2274
duke@435 2275
duke@435 2276
duke@435 2277 void MacroAssembler::lushr( Register Rin_high, Register Rin_low,
duke@435 2278 Register Rcount,
duke@435 2279 Register Rout_high, Register Rout_low,
duke@435 2280 Register Rtemp ) {
duke@435 2281
duke@435 2282 Register Ralt_count = Rtemp;
duke@435 2283 Register Rxfer_bits = Rtemp;
duke@435 2284
duke@435 2285 assert( Ralt_count != Rin_high
duke@435 2286 && Ralt_count != Rin_low
duke@435 2287 && Ralt_count != Rcount
duke@435 2288 && Rxfer_bits != Rin_low
duke@435 2289 && Rxfer_bits != Rin_high
duke@435 2290 && Rxfer_bits != Rcount
duke@435 2291 && Rxfer_bits != Rout_high
duke@435 2292 && Rout_high != Rin_low,
duke@435 2293 "register alias checks");
duke@435 2294
duke@435 2295 Label big_shift, done;
duke@435 2296
duke@435 2297 // This code can be optimized to use the 64 bit shifts in V9.
duke@435 2298 // Here we use the 32 bit shifts.
duke@435 2299
duke@435 2300 and3( Rcount, 0x3f, Rcount); // take least significant 6 bits
duke@435 2301 subcc(Rcount, 31, Ralt_count);
duke@435 2302 br(greater, true, pn, big_shift);
duke@435 2303 delayed()->dec(Ralt_count);
duke@435 2304
duke@435 2305 // shift < 32 bits, Ralt_count = Rcount-31
duke@435 2306
duke@435 2307 // We get the transfer bits by shifting left by 32-count the high
duke@435 2308 // register. This is done by shifting left by 31-count and then by one
duke@435 2309 // more to take care of the special (rare) case where count is zero
duke@435 2310 // (shifting by 32 would not work).
duke@435 2311
duke@435 2312 neg( Ralt_count );
duke@435 2313 if (Rcount != Rout_low) {
duke@435 2314 srl( Rin_low, Rcount, Rout_low );
duke@435 2315 }
duke@435 2316
duke@435 2317 // The order of the next two instructions is critical in the case where
duke@435 2318 // Rin and Rout are the same and should not be reversed.
duke@435 2319
duke@435 2320 sll( Rin_high, Ralt_count, Rxfer_bits ); // shift left by 31-count
duke@435 2321 srl( Rin_high, Rcount, Rout_high ); // high half
duke@435 2322 sll( Rxfer_bits, 1, Rxfer_bits ); // shift left by one more
duke@435 2323 if (Rcount == Rout_low) {
duke@435 2324 srl( Rin_low, Rcount, Rout_low );
duke@435 2325 }
duke@435 2326 ba (false, done);
duke@435 2327 delayed()->
duke@435 2328 or3( Rout_low, Rxfer_bits, Rout_low ); // new low value: or shifted old low part and xfer from high
duke@435 2329
duke@435 2330 // shift >= 32 bits, Ralt_count = Rcount-32
duke@435 2331 bind(big_shift);
duke@435 2332
duke@435 2333 srl( Rin_high, Ralt_count, Rout_low );
duke@435 2334 clr( Rout_high );
duke@435 2335
duke@435 2336 bind( done );
duke@435 2337 }
duke@435 2338
duke@435 2339 #ifdef _LP64
duke@435 2340 void MacroAssembler::lcmp( Register Ra, Register Rb, Register Rresult) {
duke@435 2341 cmp(Ra, Rb);
duke@435 2342 mov( -1, Rresult);
duke@435 2343 movcc(equal, false, xcc, 0, Rresult);
duke@435 2344 movcc(greater, false, xcc, 1, Rresult);
duke@435 2345 }
duke@435 2346 #endif
duke@435 2347
duke@435 2348
duke@435 2349 void MacroAssembler::float_cmp( bool is_float, int unordered_result,
duke@435 2350 FloatRegister Fa, FloatRegister Fb,
duke@435 2351 Register Rresult) {
duke@435 2352
duke@435 2353 fcmp(is_float ? FloatRegisterImpl::S : FloatRegisterImpl::D, fcc0, Fa, Fb);
duke@435 2354
duke@435 2355 Condition lt = unordered_result == -1 ? f_unorderedOrLess : f_less;
duke@435 2356 Condition eq = f_equal;
duke@435 2357 Condition gt = unordered_result == 1 ? f_unorderedOrGreater : f_greater;
duke@435 2358
duke@435 2359 if (VM_Version::v9_instructions_work()) {
duke@435 2360
duke@435 2361 mov( -1, Rresult );
duke@435 2362 movcc( eq, true, fcc0, 0, Rresult );
duke@435 2363 movcc( gt, true, fcc0, 1, Rresult );
duke@435 2364
duke@435 2365 } else {
duke@435 2366 Label done;
duke@435 2367
duke@435 2368 set( -1, Rresult );
duke@435 2369 //fb(lt, true, pn, done); delayed()->set( -1, Rresult );
duke@435 2370 fb( eq, true, pn, done); delayed()->set( 0, Rresult );
duke@435 2371 fb( gt, true, pn, done); delayed()->set( 1, Rresult );
duke@435 2372
duke@435 2373 bind (done);
duke@435 2374 }
duke@435 2375 }
duke@435 2376
duke@435 2377
duke@435 2378 void MacroAssembler::fneg( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d)
duke@435 2379 {
duke@435 2380 if (VM_Version::v9_instructions_work()) {
duke@435 2381 Assembler::fneg(w, s, d);
duke@435 2382 } else {
duke@435 2383 if (w == FloatRegisterImpl::S) {
duke@435 2384 Assembler::fneg(w, s, d);
duke@435 2385 } else if (w == FloatRegisterImpl::D) {
duke@435 2386 // number() does a sanity check on the alignment.
duke@435 2387 assert(((s->encoding(FloatRegisterImpl::D) & 1) == 0) &&
duke@435 2388 ((d->encoding(FloatRegisterImpl::D) & 1) == 0), "float register alignment check");
duke@435 2389
duke@435 2390 Assembler::fneg(FloatRegisterImpl::S, s, d);
duke@435 2391 Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor());
duke@435 2392 } else {
duke@435 2393 assert(w == FloatRegisterImpl::Q, "Invalid float register width");
duke@435 2394
duke@435 2395 // number() does a sanity check on the alignment.
duke@435 2396 assert(((s->encoding(FloatRegisterImpl::D) & 3) == 0) &&
duke@435 2397 ((d->encoding(FloatRegisterImpl::D) & 3) == 0), "float register alignment check");
duke@435 2398
duke@435 2399 Assembler::fneg(FloatRegisterImpl::S, s, d);
duke@435 2400 Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor());
duke@435 2401 Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor(), d->successor()->successor());
duke@435 2402 Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor()->successor(), d->successor()->successor()->successor());
duke@435 2403 }
duke@435 2404 }
duke@435 2405 }
duke@435 2406
duke@435 2407 void MacroAssembler::fmov( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d)
duke@435 2408 {
duke@435 2409 if (VM_Version::v9_instructions_work()) {
duke@435 2410 Assembler::fmov(w, s, d);
duke@435 2411 } else {
duke@435 2412 if (w == FloatRegisterImpl::S) {
duke@435 2413 Assembler::fmov(w, s, d);
duke@435 2414 } else if (w == FloatRegisterImpl::D) {
duke@435 2415 // number() does a sanity check on the alignment.
duke@435 2416 assert(((s->encoding(FloatRegisterImpl::D) & 1) == 0) &&
duke@435 2417 ((d->encoding(FloatRegisterImpl::D) & 1) == 0), "float register alignment check");
duke@435 2418
duke@435 2419 Assembler::fmov(FloatRegisterImpl::S, s, d);
duke@435 2420 Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor());
duke@435 2421 } else {
duke@435 2422 assert(w == FloatRegisterImpl::Q, "Invalid float register width");
duke@435 2423
duke@435 2424 // number() does a sanity check on the alignment.
duke@435 2425 assert(((s->encoding(FloatRegisterImpl::D) & 3) == 0) &&
duke@435 2426 ((d->encoding(FloatRegisterImpl::D) & 3) == 0), "float register alignment check");
duke@435 2427
duke@435 2428 Assembler::fmov(FloatRegisterImpl::S, s, d);
duke@435 2429 Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor());
duke@435 2430 Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor(), d->successor()->successor());
duke@435 2431 Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor()->successor(), d->successor()->successor()->successor());
duke@435 2432 }
duke@435 2433 }
duke@435 2434 }
duke@435 2435
duke@435 2436 void MacroAssembler::fabs( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d)
duke@435 2437 {
duke@435 2438 if (VM_Version::v9_instructions_work()) {
duke@435 2439 Assembler::fabs(w, s, d);
duke@435 2440 } else {
duke@435 2441 if (w == FloatRegisterImpl::S) {
duke@435 2442 Assembler::fabs(w, s, d);
duke@435 2443 } else if (w == FloatRegisterImpl::D) {
duke@435 2444 // number() does a sanity check on the alignment.
duke@435 2445 assert(((s->encoding(FloatRegisterImpl::D) & 1) == 0) &&
duke@435 2446 ((d->encoding(FloatRegisterImpl::D) & 1) == 0), "float register alignment check");
duke@435 2447
duke@435 2448 Assembler::fabs(FloatRegisterImpl::S, s, d);
duke@435 2449 Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor());
duke@435 2450 } else {
duke@435 2451 assert(w == FloatRegisterImpl::Q, "Invalid float register width");
duke@435 2452
duke@435 2453 // number() does a sanity check on the alignment.
duke@435 2454 assert(((s->encoding(FloatRegisterImpl::D) & 3) == 0) &&
duke@435 2455 ((d->encoding(FloatRegisterImpl::D) & 3) == 0), "float register alignment check");
duke@435 2456
duke@435 2457 Assembler::fabs(FloatRegisterImpl::S, s, d);
duke@435 2458 Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor());
duke@435 2459 Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor(), d->successor()->successor());
duke@435 2460 Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor()->successor(), d->successor()->successor()->successor());
duke@435 2461 }
duke@435 2462 }
duke@435 2463 }
duke@435 2464
duke@435 2465 void MacroAssembler::save_all_globals_into_locals() {
duke@435 2466 mov(G1,L1);
duke@435 2467 mov(G2,L2);
duke@435 2468 mov(G3,L3);
duke@435 2469 mov(G4,L4);
duke@435 2470 mov(G5,L5);
duke@435 2471 mov(G6,L6);
duke@435 2472 mov(G7,L7);
duke@435 2473 }
duke@435 2474
duke@435 2475 void MacroAssembler::restore_globals_from_locals() {
duke@435 2476 mov(L1,G1);
duke@435 2477 mov(L2,G2);
duke@435 2478 mov(L3,G3);
duke@435 2479 mov(L4,G4);
duke@435 2480 mov(L5,G5);
duke@435 2481 mov(L6,G6);
duke@435 2482 mov(L7,G7);
duke@435 2483 }
duke@435 2484
duke@435 2485 // Use for 64 bit operation.
duke@435 2486 void MacroAssembler::casx_under_lock(Register top_ptr_reg, Register top_reg, Register ptr_reg, address lock_addr, bool use_call_vm)
duke@435 2487 {
duke@435 2488 // store ptr_reg as the new top value
duke@435 2489 #ifdef _LP64
duke@435 2490 casx(top_ptr_reg, top_reg, ptr_reg);
duke@435 2491 #else
duke@435 2492 cas_under_lock(top_ptr_reg, top_reg, ptr_reg, lock_addr, use_call_vm);
duke@435 2493 #endif // _LP64
duke@435 2494 }
duke@435 2495
duke@435 2496 // [RGV] This routine does not handle 64 bit operations.
duke@435 2497 // use casx_under_lock() or casx directly!!!
duke@435 2498 void MacroAssembler::cas_under_lock(Register top_ptr_reg, Register top_reg, Register ptr_reg, address lock_addr, bool use_call_vm)
duke@435 2499 {
duke@435 2500 // store ptr_reg as the new top value
duke@435 2501 if (VM_Version::v9_instructions_work()) {
duke@435 2502 cas(top_ptr_reg, top_reg, ptr_reg);
duke@435 2503 } else {
duke@435 2504
duke@435 2505 // If the register is not an out nor global, it is not visible
duke@435 2506 // after the save. Allocate a register for it, save its
duke@435 2507 // value in the register save area (the save may not flush
duke@435 2508 // registers to the save area).
duke@435 2509
duke@435 2510 Register top_ptr_reg_after_save;
duke@435 2511 Register top_reg_after_save;
duke@435 2512 Register ptr_reg_after_save;
duke@435 2513
duke@435 2514 if (top_ptr_reg->is_out() || top_ptr_reg->is_global()) {
duke@435 2515 top_ptr_reg_after_save = top_ptr_reg->after_save();
duke@435 2516 } else {
duke@435 2517 Address reg_save_addr = top_ptr_reg->address_in_saved_window();
duke@435 2518 top_ptr_reg_after_save = L0;
duke@435 2519 st(top_ptr_reg, reg_save_addr);
duke@435 2520 }
duke@435 2521
duke@435 2522 if (top_reg->is_out() || top_reg->is_global()) {
duke@435 2523 top_reg_after_save = top_reg->after_save();
duke@435 2524 } else {
duke@435 2525 Address reg_save_addr = top_reg->address_in_saved_window();
duke@435 2526 top_reg_after_save = L1;
duke@435 2527 st(top_reg, reg_save_addr);
duke@435 2528 }
duke@435 2529
duke@435 2530 if (ptr_reg->is_out() || ptr_reg->is_global()) {
duke@435 2531 ptr_reg_after_save = ptr_reg->after_save();
duke@435 2532 } else {
duke@435 2533 Address reg_save_addr = ptr_reg->address_in_saved_window();
duke@435 2534 ptr_reg_after_save = L2;
duke@435 2535 st(ptr_reg, reg_save_addr);
duke@435 2536 }
duke@435 2537
duke@435 2538 const Register& lock_reg = L3;
duke@435 2539 const Register& lock_ptr_reg = L4;
duke@435 2540 const Register& value_reg = L5;
duke@435 2541 const Register& yield_reg = L6;
duke@435 2542 const Register& yieldall_reg = L7;
duke@435 2543
duke@435 2544 save_frame();
duke@435 2545
duke@435 2546 if (top_ptr_reg_after_save == L0) {
duke@435 2547 ld(top_ptr_reg->address_in_saved_window().after_save(), top_ptr_reg_after_save);
duke@435 2548 }
duke@435 2549
duke@435 2550 if (top_reg_after_save == L1) {
duke@435 2551 ld(top_reg->address_in_saved_window().after_save(), top_reg_after_save);
duke@435 2552 }
duke@435 2553
duke@435 2554 if (ptr_reg_after_save == L2) {
duke@435 2555 ld(ptr_reg->address_in_saved_window().after_save(), ptr_reg_after_save);
duke@435 2556 }
duke@435 2557
duke@435 2558 Label(retry_get_lock);
duke@435 2559 Label(not_same);
duke@435 2560 Label(dont_yield);
duke@435 2561
duke@435 2562 assert(lock_addr, "lock_address should be non null for v8");
duke@435 2563 set((intptr_t)lock_addr, lock_ptr_reg);
duke@435 2564 // Initialize yield counter
duke@435 2565 mov(G0,yield_reg);
duke@435 2566 mov(G0, yieldall_reg);
duke@435 2567 set(StubRoutines::Sparc::locked, lock_reg);
duke@435 2568
duke@435 2569 bind(retry_get_lock);
duke@435 2570 cmp(yield_reg, V8AtomicOperationUnderLockSpinCount);
duke@435 2571 br(Assembler::less, false, Assembler::pt, dont_yield);
duke@435 2572 delayed()->nop();
duke@435 2573
duke@435 2574 if(use_call_vm) {
duke@435 2575 Untested("Need to verify global reg consistancy");
duke@435 2576 call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::yield_all), yieldall_reg);
duke@435 2577 } else {
duke@435 2578 // Save the regs and make space for a C call
duke@435 2579 save(SP, -96, SP);
duke@435 2580 save_all_globals_into_locals();
duke@435 2581 call(CAST_FROM_FN_PTR(address,os::yield_all));
duke@435 2582 delayed()->mov(yieldall_reg, O0);
duke@435 2583 restore_globals_from_locals();
duke@435 2584 restore();
duke@435 2585 }
duke@435 2586
duke@435 2587 // reset the counter
duke@435 2588 mov(G0,yield_reg);
duke@435 2589 add(yieldall_reg, 1, yieldall_reg);
duke@435 2590
duke@435 2591 bind(dont_yield);
duke@435 2592 // try to get lock
duke@435 2593 swap(lock_ptr_reg, 0, lock_reg);
duke@435 2594
duke@435 2595 // did we get the lock?
duke@435 2596 cmp(lock_reg, StubRoutines::Sparc::unlocked);
duke@435 2597 br(Assembler::notEqual, true, Assembler::pn, retry_get_lock);
duke@435 2598 delayed()->add(yield_reg,1,yield_reg);
duke@435 2599
duke@435 2600 // yes, got lock. do we have the same top?
duke@435 2601 ld(top_ptr_reg_after_save, 0, value_reg);
duke@435 2602 cmp(value_reg, top_reg_after_save);
duke@435 2603 br(Assembler::notEqual, false, Assembler::pn, not_same);
duke@435 2604 delayed()->nop();
duke@435 2605
duke@435 2606 // yes, same top.
duke@435 2607 st(ptr_reg_after_save, top_ptr_reg_after_save, 0);
duke@435 2608 membar(Assembler::StoreStore);
duke@435 2609
duke@435 2610 bind(not_same);
duke@435 2611 mov(value_reg, ptr_reg_after_save);
duke@435 2612 st(lock_reg, lock_ptr_reg, 0); // unlock
duke@435 2613
duke@435 2614 restore();
duke@435 2615 }
duke@435 2616 }
duke@435 2617
jrose@1100 2618 RegisterOrConstant MacroAssembler::delayed_value_impl(intptr_t* delayed_value_addr,
jrose@1100 2619 Register tmp,
jrose@1100 2620 int offset) {
jrose@1057 2621 intptr_t value = *delayed_value_addr;
jrose@1057 2622 if (value != 0)
jrose@1100 2623 return RegisterOrConstant(value + offset);
jrose@1057 2624
jrose@1057 2625 // load indirectly to solve generation ordering problem
jrose@1057 2626 Address a(tmp, (address) delayed_value_addr);
jrose@1057 2627 load_ptr_contents(a, tmp);
jrose@1057 2628
jrose@1057 2629 #ifdef ASSERT
jrose@1057 2630 tst(tmp);
jrose@1057 2631 breakpoint_trap(zero, xcc);
jrose@1057 2632 #endif
jrose@1057 2633
jrose@1057 2634 if (offset != 0)
jrose@1057 2635 add(tmp, offset, tmp);
jrose@1057 2636
jrose@1100 2637 return RegisterOrConstant(tmp);
jrose@1057 2638 }
jrose@1057 2639
jrose@1057 2640
jrose@1100 2641 void MacroAssembler::regcon_inc_ptr( RegisterOrConstant& dest, RegisterOrConstant src, Register temp ) {
jrose@1058 2642 assert(dest.register_or_noreg() != G0, "lost side effect");
jrose@1058 2643 if ((src.is_constant() && src.as_constant() == 0) ||
jrose@1058 2644 (src.is_register() && src.as_register() == G0)) {
jrose@1058 2645 // do nothing
jrose@1058 2646 } else if (dest.is_register()) {
jrose@1058 2647 add(dest.as_register(), ensure_rs2(src, temp), dest.as_register());
jrose@1058 2648 } else if (src.is_constant()) {
jrose@1058 2649 intptr_t res = dest.as_constant() + src.as_constant();
jrose@1100 2650 dest = RegisterOrConstant(res); // side effect seen by caller
jrose@1058 2651 } else {
jrose@1058 2652 assert(temp != noreg, "cannot handle constant += register");
jrose@1058 2653 add(src.as_register(), ensure_rs2(dest, temp), temp);
jrose@1100 2654 dest = RegisterOrConstant(temp); // side effect seen by caller
jrose@1058 2655 }
jrose@1058 2656 }
jrose@1058 2657
jrose@1100 2658 void MacroAssembler::regcon_sll_ptr( RegisterOrConstant& dest, RegisterOrConstant src, Register temp ) {
jrose@1058 2659 assert(dest.register_or_noreg() != G0, "lost side effect");
jrose@1058 2660 if (!is_simm13(src.constant_or_zero()))
jrose@1058 2661 src = (src.as_constant() & 0xFF);
jrose@1058 2662 if ((src.is_constant() && src.as_constant() == 0) ||
jrose@1058 2663 (src.is_register() && src.as_register() == G0)) {
jrose@1058 2664 // do nothing
jrose@1058 2665 } else if (dest.is_register()) {
jrose@1058 2666 sll_ptr(dest.as_register(), src, dest.as_register());
jrose@1058 2667 } else if (src.is_constant()) {
jrose@1058 2668 intptr_t res = dest.as_constant() << src.as_constant();
jrose@1100 2669 dest = RegisterOrConstant(res); // side effect seen by caller
jrose@1058 2670 } else {
jrose@1058 2671 assert(temp != noreg, "cannot handle constant <<= register");
jrose@1058 2672 set(dest.as_constant(), temp);
jrose@1058 2673 sll_ptr(temp, src, temp);
jrose@1100 2674 dest = RegisterOrConstant(temp); // side effect seen by caller
jrose@1058 2675 }
jrose@1058 2676 }
jrose@1058 2677
jrose@1058 2678
jrose@1058 2679 // Look up the method for a megamorphic invokeinterface call.
jrose@1058 2680 // The target method is determined by <intf_klass, itable_index>.
jrose@1058 2681 // The receiver klass is in recv_klass.
jrose@1058 2682 // On success, the result will be in method_result, and execution falls through.
jrose@1058 2683 // On failure, execution transfers to the given label.
jrose@1058 2684 void MacroAssembler::lookup_interface_method(Register recv_klass,
jrose@1058 2685 Register intf_klass,
jrose@1100 2686 RegisterOrConstant itable_index,
jrose@1058 2687 Register method_result,
jrose@1058 2688 Register scan_temp,
jrose@1058 2689 Register sethi_temp,
jrose@1058 2690 Label& L_no_such_interface) {
jrose@1058 2691 assert_different_registers(recv_klass, intf_klass, method_result, scan_temp);
jrose@1058 2692 assert(itable_index.is_constant() || itable_index.as_register() == method_result,
jrose@1058 2693 "caller must use same register for non-constant itable index as for method");
jrose@1058 2694
jrose@1058 2695 // Compute start of first itableOffsetEntry (which is at the end of the vtable)
jrose@1058 2696 int vtable_base = instanceKlass::vtable_start_offset() * wordSize;
jrose@1058 2697 int scan_step = itableOffsetEntry::size() * wordSize;
jrose@1058 2698 int vte_size = vtableEntry::size() * wordSize;
jrose@1058 2699
jrose@1058 2700 lduw(recv_klass, instanceKlass::vtable_length_offset() * wordSize, scan_temp);
jrose@1058 2701 // %%% We should store the aligned, prescaled offset in the klassoop.
jrose@1058 2702 // Then the next several instructions would fold away.
jrose@1058 2703
jrose@1058 2704 int round_to_unit = ((HeapWordsPerLong > 1) ? BytesPerLong : 0);
jrose@1058 2705 int itb_offset = vtable_base;
jrose@1058 2706 if (round_to_unit != 0) {
jrose@1058 2707 // hoist first instruction of round_to(scan_temp, BytesPerLong):
jrose@1058 2708 itb_offset += round_to_unit - wordSize;
jrose@1058 2709 }
jrose@1058 2710 int itb_scale = exact_log2(vtableEntry::size() * wordSize);
jrose@1058 2711 sll(scan_temp, itb_scale, scan_temp);
jrose@1058 2712 add(scan_temp, itb_offset, scan_temp);
jrose@1058 2713 if (round_to_unit != 0) {
jrose@1058 2714 // Round up to align_object_offset boundary
jrose@1058 2715 // see code for instanceKlass::start_of_itable!
jrose@1058 2716 // Was: round_to(scan_temp, BytesPerLong);
jrose@1058 2717 // Hoisted: add(scan_temp, BytesPerLong-1, scan_temp);
jrose@1058 2718 and3(scan_temp, -round_to_unit, scan_temp);
jrose@1058 2719 }
jrose@1058 2720 add(recv_klass, scan_temp, scan_temp);
jrose@1058 2721
jrose@1058 2722 // Adjust recv_klass by scaled itable_index, so we can free itable_index.
jrose@1100 2723 RegisterOrConstant itable_offset = itable_index;
jrose@1058 2724 regcon_sll_ptr(itable_offset, exact_log2(itableMethodEntry::size() * wordSize));
jrose@1058 2725 regcon_inc_ptr(itable_offset, itableMethodEntry::method_offset_in_bytes());
jrose@1058 2726 add(recv_klass, ensure_rs2(itable_offset, sethi_temp), recv_klass);
jrose@1058 2727
jrose@1058 2728 // for (scan = klass->itable(); scan->interface() != NULL; scan += scan_step) {
jrose@1058 2729 // if (scan->interface() == intf) {
jrose@1058 2730 // result = (klass + scan->offset() + itable_index);
jrose@1058 2731 // }
jrose@1058 2732 // }
jrose@1058 2733 Label search, found_method;
jrose@1058 2734
jrose@1058 2735 for (int peel = 1; peel >= 0; peel--) {
jrose@1058 2736 // %%%% Could load both offset and interface in one ldx, if they were
jrose@1058 2737 // in the opposite order. This would save a load.
jrose@1058 2738 ld_ptr(scan_temp, itableOffsetEntry::interface_offset_in_bytes(), method_result);
jrose@1058 2739
jrose@1058 2740 // Check that this entry is non-null. A null entry means that
jrose@1058 2741 // the receiver class doesn't implement the interface, and wasn't the
jrose@1058 2742 // same as when the caller was compiled.
jrose@1058 2743 bpr(Assembler::rc_z, false, Assembler::pn, method_result, L_no_such_interface);
jrose@1058 2744 delayed()->cmp(method_result, intf_klass);
jrose@1058 2745
jrose@1058 2746 if (peel) {
jrose@1058 2747 brx(Assembler::equal, false, Assembler::pt, found_method);
jrose@1058 2748 } else {
jrose@1058 2749 brx(Assembler::notEqual, false, Assembler::pn, search);
jrose@1058 2750 // (invert the test to fall through to found_method...)
jrose@1058 2751 }
jrose@1058 2752 delayed()->add(scan_temp, scan_step, scan_temp);
jrose@1058 2753
jrose@1058 2754 if (!peel) break;
jrose@1058 2755
jrose@1058 2756 bind(search);
jrose@1058 2757 }
jrose@1058 2758
jrose@1058 2759 bind(found_method);
jrose@1058 2760
jrose@1058 2761 // Got a hit.
jrose@1058 2762 int ito_offset = itableOffsetEntry::offset_offset_in_bytes();
jrose@1058 2763 // scan_temp[-scan_step] points to the vtable offset we need
jrose@1058 2764 ito_offset -= scan_step;
jrose@1058 2765 lduw(scan_temp, ito_offset, scan_temp);
jrose@1058 2766 ld_ptr(recv_klass, scan_temp, method_result);
jrose@1058 2767 }
jrose@1058 2768
jrose@1058 2769
jrose@1079 2770 void MacroAssembler::check_klass_subtype(Register sub_klass,
jrose@1079 2771 Register super_klass,
jrose@1079 2772 Register temp_reg,
jrose@1079 2773 Register temp2_reg,
jrose@1079 2774 Label& L_success) {
jrose@1079 2775 Label L_failure, L_pop_to_failure;
jrose@1079 2776 check_klass_subtype_fast_path(sub_klass, super_klass,
jrose@1079 2777 temp_reg, temp2_reg,
jrose@1079 2778 &L_success, &L_failure, NULL);
jrose@1079 2779 Register sub_2 = sub_klass;
jrose@1079 2780 Register sup_2 = super_klass;
jrose@1079 2781 if (!sub_2->is_global()) sub_2 = L0;
jrose@1079 2782 if (!sup_2->is_global()) sup_2 = L1;
jrose@1079 2783
jrose@1079 2784 save_frame_and_mov(0, sub_klass, sub_2, super_klass, sup_2);
jrose@1079 2785 check_klass_subtype_slow_path(sub_2, sup_2,
jrose@1079 2786 L2, L3, L4, L5,
jrose@1079 2787 NULL, &L_pop_to_failure);
jrose@1079 2788
jrose@1079 2789 // on success:
jrose@1079 2790 restore();
jrose@1079 2791 ba(false, L_success);
jrose@1079 2792 delayed()->nop();
jrose@1079 2793
jrose@1079 2794 // on failure:
jrose@1079 2795 bind(L_pop_to_failure);
jrose@1079 2796 restore();
jrose@1079 2797 bind(L_failure);
jrose@1079 2798 }
jrose@1079 2799
jrose@1079 2800
jrose@1079 2801 void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass,
jrose@1079 2802 Register super_klass,
jrose@1079 2803 Register temp_reg,
jrose@1079 2804 Register temp2_reg,
jrose@1079 2805 Label* L_success,
jrose@1079 2806 Label* L_failure,
jrose@1079 2807 Label* L_slow_path,
jrose@1100 2808 RegisterOrConstant super_check_offset,
jrose@1079 2809 Register instanceof_hack) {
jrose@1079 2810 int sc_offset = (klassOopDesc::header_size() * HeapWordSize +
jrose@1079 2811 Klass::secondary_super_cache_offset_in_bytes());
jrose@1079 2812 int sco_offset = (klassOopDesc::header_size() * HeapWordSize +
jrose@1079 2813 Klass::super_check_offset_offset_in_bytes());
jrose@1079 2814
jrose@1079 2815 bool must_load_sco = (super_check_offset.constant_or_zero() == -1);
jrose@1079 2816 bool need_slow_path = (must_load_sco ||
jrose@1079 2817 super_check_offset.constant_or_zero() == sco_offset);
jrose@1079 2818
jrose@1079 2819 assert_different_registers(sub_klass, super_klass, temp_reg);
jrose@1079 2820 if (super_check_offset.is_register()) {
jrose@1079 2821 assert_different_registers(sub_klass, super_klass,
jrose@1079 2822 super_check_offset.as_register());
jrose@1079 2823 } else if (must_load_sco) {
jrose@1079 2824 assert(temp2_reg != noreg, "supply either a temp or a register offset");
jrose@1079 2825 }
jrose@1079 2826
jrose@1079 2827 Label L_fallthrough;
jrose@1079 2828 int label_nulls = 0;
jrose@1079 2829 if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; }
jrose@1079 2830 if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; }
jrose@1079 2831 if (L_slow_path == NULL) { L_slow_path = &L_fallthrough; label_nulls++; }
jrose@1079 2832 assert(label_nulls <= 1 || instanceof_hack != noreg ||
jrose@1079 2833 (L_slow_path == &L_fallthrough && label_nulls <= 2 && !need_slow_path),
jrose@1079 2834 "at most one NULL in the batch, usually");
jrose@1079 2835
jrose@1079 2836 // Support for the instanceof hack, which uses delay slots to
jrose@1079 2837 // set a destination register to zero or one.
jrose@1079 2838 bool do_bool_sets = (instanceof_hack != noreg);
jrose@1079 2839 #define BOOL_SET(bool_value) \
jrose@1079 2840 if (do_bool_sets && bool_value >= 0) \
jrose@1079 2841 set(bool_value, instanceof_hack)
jrose@1079 2842 #define DELAYED_BOOL_SET(bool_value) \
jrose@1079 2843 if (do_bool_sets && bool_value >= 0) \
jrose@1079 2844 delayed()->set(bool_value, instanceof_hack); \
jrose@1079 2845 else delayed()->nop()
jrose@1079 2846 // Hacked ba(), which may only be used just before L_fallthrough.
jrose@1079 2847 #define FINAL_JUMP(label, bool_value) \
jrose@1079 2848 if (&(label) == &L_fallthrough) { \
jrose@1079 2849 BOOL_SET(bool_value); \
jrose@1079 2850 } else { \
jrose@1079 2851 ba((do_bool_sets && bool_value >= 0), label); \
jrose@1079 2852 DELAYED_BOOL_SET(bool_value); \
jrose@1079 2853 }
jrose@1079 2854
jrose@1079 2855 // If the pointers are equal, we are done (e.g., String[] elements).
jrose@1079 2856 // This self-check enables sharing of secondary supertype arrays among
jrose@1079 2857 // non-primary types such as array-of-interface. Otherwise, each such
jrose@1079 2858 // type would need its own customized SSA.
jrose@1079 2859 // We move this check to the front of the fast path because many
jrose@1079 2860 // type checks are in fact trivially successful in this manner,
jrose@1079 2861 // so we get a nicely predicted branch right at the start of the check.
jrose@1079 2862 cmp(super_klass, sub_klass);
jrose@1079 2863 brx(Assembler::equal, do_bool_sets, Assembler::pn, *L_success);
jrose@1079 2864 DELAYED_BOOL_SET(1);
jrose@1079 2865
jrose@1079 2866 // Check the supertype display:
jrose@1079 2867 if (must_load_sco) {
jrose@1079 2868 // The super check offset is always positive...
jrose@1079 2869 lduw(super_klass, sco_offset, temp2_reg);
jrose@1100 2870 super_check_offset = RegisterOrConstant(temp2_reg);
jrose@1079 2871 }
jrose@1079 2872 ld_ptr(sub_klass, super_check_offset, temp_reg);
jrose@1079 2873 cmp(super_klass, temp_reg);
jrose@1079 2874
jrose@1079 2875 // This check has worked decisively for primary supers.
jrose@1079 2876 // Secondary supers are sought in the super_cache ('super_cache_addr').
jrose@1079 2877 // (Secondary supers are interfaces and very deeply nested subtypes.)
jrose@1079 2878 // This works in the same check above because of a tricky aliasing
jrose@1079 2879 // between the super_cache and the primary super display elements.
jrose@1079 2880 // (The 'super_check_addr' can address either, as the case requires.)
jrose@1079 2881 // Note that the cache is updated below if it does not help us find
jrose@1079 2882 // what we need immediately.
jrose@1079 2883 // So if it was a primary super, we can just fail immediately.
jrose@1079 2884 // Otherwise, it's the slow path for us (no success at this point).
jrose@1079 2885
jrose@1079 2886 if (super_check_offset.is_register()) {
jrose@1079 2887 brx(Assembler::equal, do_bool_sets, Assembler::pn, *L_success);
jrose@1079 2888 delayed(); if (do_bool_sets) BOOL_SET(1);
jrose@1079 2889 // if !do_bool_sets, sneak the next cmp into the delay slot:
jrose@1079 2890 cmp(super_check_offset.as_register(), sc_offset);
jrose@1079 2891
jrose@1079 2892 if (L_failure == &L_fallthrough) {
jrose@1079 2893 brx(Assembler::equal, do_bool_sets, Assembler::pt, *L_slow_path);
jrose@1079 2894 delayed()->nop();
jrose@1079 2895 BOOL_SET(0); // fallthrough on failure
jrose@1079 2896 } else {
jrose@1079 2897 brx(Assembler::notEqual, do_bool_sets, Assembler::pn, *L_failure);
jrose@1079 2898 DELAYED_BOOL_SET(0);
jrose@1079 2899 FINAL_JUMP(*L_slow_path, -1); // -1 => vanilla delay slot
jrose@1079 2900 }
jrose@1079 2901 } else if (super_check_offset.as_constant() == sc_offset) {
jrose@1079 2902 // Need a slow path; fast failure is impossible.
jrose@1079 2903 if (L_slow_path == &L_fallthrough) {
jrose@1079 2904 brx(Assembler::equal, do_bool_sets, Assembler::pt, *L_success);
jrose@1079 2905 DELAYED_BOOL_SET(1);
jrose@1079 2906 } else {
jrose@1079 2907 brx(Assembler::notEqual, false, Assembler::pn, *L_slow_path);
jrose@1079 2908 delayed()->nop();
jrose@1079 2909 FINAL_JUMP(*L_success, 1);
jrose@1079 2910 }
jrose@1079 2911 } else {
jrose@1079 2912 // No slow path; it's a fast decision.
jrose@1079 2913 if (L_failure == &L_fallthrough) {
jrose@1079 2914 brx(Assembler::equal, do_bool_sets, Assembler::pt, *L_success);
jrose@1079 2915 DELAYED_BOOL_SET(1);
jrose@1079 2916 BOOL_SET(0);
jrose@1079 2917 } else {
jrose@1079 2918 brx(Assembler::notEqual, do_bool_sets, Assembler::pn, *L_failure);
jrose@1079 2919 DELAYED_BOOL_SET(0);
jrose@1079 2920 FINAL_JUMP(*L_success, 1);
jrose@1079 2921 }
jrose@1079 2922 }
jrose@1079 2923
jrose@1079 2924 bind(L_fallthrough);
jrose@1079 2925
jrose@1079 2926 #undef final_jump
jrose@1079 2927 #undef bool_set
jrose@1079 2928 #undef DELAYED_BOOL_SET
jrose@1079 2929 #undef final_jump
jrose@1079 2930 }
jrose@1079 2931
jrose@1079 2932
jrose@1079 2933 void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass,
jrose@1079 2934 Register super_klass,
jrose@1079 2935 Register count_temp,
jrose@1079 2936 Register scan_temp,
jrose@1079 2937 Register scratch_reg,
jrose@1079 2938 Register coop_reg,
jrose@1079 2939 Label* L_success,
jrose@1079 2940 Label* L_failure) {
jrose@1079 2941 assert_different_registers(sub_klass, super_klass,
jrose@1079 2942 count_temp, scan_temp, scratch_reg, coop_reg);
jrose@1079 2943
jrose@1079 2944 Label L_fallthrough, L_loop;
jrose@1079 2945 int label_nulls = 0;
jrose@1079 2946 if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; }
jrose@1079 2947 if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; }
jrose@1079 2948 assert(label_nulls <= 1, "at most one NULL in the batch");
jrose@1079 2949
jrose@1079 2950 // a couple of useful fields in sub_klass:
jrose@1079 2951 int ss_offset = (klassOopDesc::header_size() * HeapWordSize +
jrose@1079 2952 Klass::secondary_supers_offset_in_bytes());
jrose@1079 2953 int sc_offset = (klassOopDesc::header_size() * HeapWordSize +
jrose@1079 2954 Klass::secondary_super_cache_offset_in_bytes());
jrose@1079 2955
jrose@1079 2956 // Do a linear scan of the secondary super-klass chain.
jrose@1079 2957 // This code is rarely used, so simplicity is a virtue here.
jrose@1079 2958
jrose@1079 2959 #ifndef PRODUCT
jrose@1079 2960 int* pst_counter = &SharedRuntime::_partial_subtype_ctr;
jrose@1079 2961 inc_counter((address) pst_counter, count_temp, scan_temp);
jrose@1079 2962 #endif
jrose@1079 2963
jrose@1079 2964 // We will consult the secondary-super array.
jrose@1079 2965 ld_ptr(sub_klass, ss_offset, scan_temp);
jrose@1079 2966
jrose@1079 2967 // Compress superclass if necessary.
jrose@1079 2968 Register search_key = super_klass;
jrose@1079 2969 bool decode_super_klass = false;
jrose@1079 2970 if (UseCompressedOops) {
jrose@1079 2971 if (coop_reg != noreg) {
jrose@1079 2972 encode_heap_oop_not_null(super_klass, coop_reg);
jrose@1079 2973 search_key = coop_reg;
jrose@1079 2974 } else {
jrose@1079 2975 encode_heap_oop_not_null(super_klass);
jrose@1079 2976 decode_super_klass = true; // scarce temps!
jrose@1079 2977 }
jrose@1079 2978 // The superclass is never null; it would be a basic system error if a null
jrose@1079 2979 // pointer were to sneak in here. Note that we have already loaded the
jrose@1079 2980 // Klass::super_check_offset from the super_klass in the fast path,
jrose@1079 2981 // so if there is a null in that register, we are already in the afterlife.
jrose@1079 2982 }
jrose@1079 2983
jrose@1079 2984 // Load the array length. (Positive movl does right thing on LP64.)
jrose@1079 2985 lduw(scan_temp, arrayOopDesc::length_offset_in_bytes(), count_temp);
jrose@1079 2986
jrose@1079 2987 // Check for empty secondary super list
jrose@1079 2988 tst(count_temp);
jrose@1079 2989
jrose@1079 2990 // Top of search loop
jrose@1079 2991 bind(L_loop);
jrose@1079 2992 br(Assembler::equal, false, Assembler::pn, *L_failure);
jrose@1079 2993 delayed()->add(scan_temp, heapOopSize, scan_temp);
jrose@1079 2994 assert(heapOopSize != 0, "heapOopSize should be initialized");
jrose@1079 2995
jrose@1079 2996 // Skip the array header in all array accesses.
jrose@1079 2997 int elem_offset = arrayOopDesc::base_offset_in_bytes(T_OBJECT);
jrose@1079 2998 elem_offset -= heapOopSize; // the scan pointer was pre-incremented also
jrose@1079 2999
jrose@1079 3000 // Load next super to check
jrose@1079 3001 if (UseCompressedOops) {
jrose@1079 3002 // Don't use load_heap_oop; we don't want to decode the element.
jrose@1079 3003 lduw( scan_temp, elem_offset, scratch_reg );
jrose@1079 3004 } else {
jrose@1079 3005 ld_ptr( scan_temp, elem_offset, scratch_reg );
jrose@1079 3006 }
jrose@1079 3007
jrose@1079 3008 // Look for Rsuper_klass on Rsub_klass's secondary super-class-overflow list
jrose@1079 3009 cmp(scratch_reg, search_key);
jrose@1079 3010
jrose@1079 3011 // A miss means we are NOT a subtype and need to keep looping
jrose@1079 3012 brx(Assembler::notEqual, false, Assembler::pn, L_loop);
jrose@1079 3013 delayed()->deccc(count_temp); // decrement trip counter in delay slot
jrose@1079 3014
jrose@1079 3015 // Falling out the bottom means we found a hit; we ARE a subtype
jrose@1079 3016 if (decode_super_klass) decode_heap_oop(super_klass);
jrose@1079 3017
jrose@1079 3018 // Success. Cache the super we found and proceed in triumph.
jrose@1079 3019 st_ptr(super_klass, sub_klass, sc_offset);
jrose@1079 3020
jrose@1079 3021 if (L_success != &L_fallthrough) {
jrose@1079 3022 ba(false, *L_success);
jrose@1079 3023 delayed()->nop();
jrose@1079 3024 }
jrose@1079 3025
jrose@1079 3026 bind(L_fallthrough);
jrose@1079 3027 }
jrose@1079 3028
jrose@1079 3029
jrose@1079 3030
jrose@1079 3031
kvn@855 3032 void MacroAssembler::biased_locking_enter(Register obj_reg, Register mark_reg,
kvn@855 3033 Register temp_reg,
duke@435 3034 Label& done, Label* slow_case,
duke@435 3035 BiasedLockingCounters* counters) {
duke@435 3036 assert(UseBiasedLocking, "why call this otherwise?");
duke@435 3037
duke@435 3038 if (PrintBiasedLockingStatistics) {
duke@435 3039 assert_different_registers(obj_reg, mark_reg, temp_reg, O7);
duke@435 3040 if (counters == NULL)
duke@435 3041 counters = BiasedLocking::counters();
duke@435 3042 }
duke@435 3043
duke@435 3044 Label cas_label;
duke@435 3045
duke@435 3046 // Biased locking
duke@435 3047 // See whether the lock is currently biased toward our thread and
duke@435 3048 // whether the epoch is still valid
duke@435 3049 // Note that the runtime guarantees sufficient alignment of JavaThread
duke@435 3050 // pointers to allow age to be placed into low bits
duke@435 3051 assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, "biased locking makes assumptions about bit layout");
duke@435 3052 and3(mark_reg, markOopDesc::biased_lock_mask_in_place, temp_reg);
duke@435 3053 cmp(temp_reg, markOopDesc::biased_lock_pattern);
duke@435 3054 brx(Assembler::notEqual, false, Assembler::pn, cas_label);
coleenp@548 3055 delayed()->nop();
coleenp@548 3056
coleenp@548 3057 load_klass(obj_reg, temp_reg);
duke@435 3058 ld_ptr(Address(temp_reg, 0, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()), temp_reg);
duke@435 3059 or3(G2_thread, temp_reg, temp_reg);
duke@435 3060 xor3(mark_reg, temp_reg, temp_reg);
duke@435 3061 andcc(temp_reg, ~((int) markOopDesc::age_mask_in_place), temp_reg);
duke@435 3062 if (counters != NULL) {
duke@435 3063 cond_inc(Assembler::equal, (address) counters->biased_lock_entry_count_addr(), mark_reg, temp_reg);
duke@435 3064 // Reload mark_reg as we may need it later
duke@435 3065 ld_ptr(Address(obj_reg, 0, oopDesc::mark_offset_in_bytes()), mark_reg);
duke@435 3066 }
duke@435 3067 brx(Assembler::equal, true, Assembler::pt, done);
duke@435 3068 delayed()->nop();
duke@435 3069
duke@435 3070 Label try_revoke_bias;
duke@435 3071 Label try_rebias;
duke@435 3072 Address mark_addr = Address(obj_reg, 0, oopDesc::mark_offset_in_bytes());
duke@435 3073 assert(mark_addr.disp() == 0, "cas must take a zero displacement");
duke@435 3074
duke@435 3075 // At this point we know that the header has the bias pattern and
duke@435 3076 // that we are not the bias owner in the current epoch. We need to
duke@435 3077 // figure out more details about the state of the header in order to
duke@435 3078 // know what operations can be legally performed on the object's
duke@435 3079 // header.
duke@435 3080
duke@435 3081 // If the low three bits in the xor result aren't clear, that means
duke@435 3082 // the prototype header is no longer biased and we have to revoke
duke@435 3083 // the bias on this object.
duke@435 3084 btst(markOopDesc::biased_lock_mask_in_place, temp_reg);
duke@435 3085 brx(Assembler::notZero, false, Assembler::pn, try_revoke_bias);
duke@435 3086
duke@435 3087 // Biasing is still enabled for this data type. See whether the
duke@435 3088 // epoch of the current bias is still valid, meaning that the epoch
duke@435 3089 // bits of the mark word are equal to the epoch bits of the
duke@435 3090 // prototype header. (Note that the prototype header's epoch bits
duke@435 3091 // only change at a safepoint.) If not, attempt to rebias the object
duke@435 3092 // toward the current thread. Note that we must be absolutely sure
duke@435 3093 // that the current epoch is invalid in order to do this because
duke@435 3094 // otherwise the manipulations it performs on the mark word are
duke@435 3095 // illegal.
duke@435 3096 delayed()->btst(markOopDesc::epoch_mask_in_place, temp_reg);
duke@435 3097 brx(Assembler::notZero, false, Assembler::pn, try_rebias);
duke@435 3098
duke@435 3099 // The epoch of the current bias is still valid but we know nothing
duke@435 3100 // about the owner; it might be set or it might be clear. Try to
duke@435 3101 // acquire the bias of the object using an atomic operation. If this
duke@435 3102 // fails we will go in to the runtime to revoke the object's bias.
duke@435 3103 // Note that we first construct the presumed unbiased header so we
duke@435 3104 // don't accidentally blow away another thread's valid bias.
duke@435 3105 delayed()->and3(mark_reg,
duke@435 3106 markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place,
duke@435 3107 mark_reg);
duke@435 3108 or3(G2_thread, mark_reg, temp_reg);
kvn@855 3109 casn(mark_addr.base(), mark_reg, temp_reg);
duke@435 3110 // If the biasing toward our thread failed, this means that
duke@435 3111 // another thread succeeded in biasing it toward itself and we
duke@435 3112 // need to revoke that bias. The revocation will occur in the
duke@435 3113 // interpreter runtime in the slow case.
duke@435 3114 cmp(mark_reg, temp_reg);
duke@435 3115 if (counters != NULL) {
duke@435 3116 cond_inc(Assembler::zero, (address) counters->anonymously_biased_lock_entry_count_addr(), mark_reg, temp_reg);
duke@435 3117 }
duke@435 3118 if (slow_case != NULL) {
duke@435 3119 brx(Assembler::notEqual, true, Assembler::pn, *slow_case);
duke@435 3120 delayed()->nop();
duke@435 3121 }
duke@435 3122 br(Assembler::always, false, Assembler::pt, done);
duke@435 3123 delayed()->nop();
duke@435 3124
duke@435 3125 bind(try_rebias);
duke@435 3126 // At this point we know the epoch has expired, meaning that the
duke@435 3127 // current "bias owner", if any, is actually invalid. Under these
duke@435 3128 // circumstances _only_, we are allowed to use the current header's
duke@435 3129 // value as the comparison value when doing the cas to acquire the
duke@435 3130 // bias in the current epoch. In other words, we allow transfer of
duke@435 3131 // the bias from one thread to another directly in this situation.
duke@435 3132 //
duke@435 3133 // FIXME: due to a lack of registers we currently blow away the age
duke@435 3134 // bits in this situation. Should attempt to preserve them.
coleenp@548 3135 load_klass(obj_reg, temp_reg);
duke@435 3136 ld_ptr(Address(temp_reg, 0, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()), temp_reg);
duke@435 3137 or3(G2_thread, temp_reg, temp_reg);
kvn@855 3138 casn(mark_addr.base(), mark_reg, temp_reg);
duke@435 3139 // If the biasing toward our thread failed, this means that
duke@435 3140 // another thread succeeded in biasing it toward itself and we
duke@435 3141 // need to revoke that bias. The revocation will occur in the
duke@435 3142 // interpreter runtime in the slow case.
duke@435 3143 cmp(mark_reg, temp_reg);
duke@435 3144 if (counters != NULL) {
duke@435 3145 cond_inc(Assembler::zero, (address) counters->rebiased_lock_entry_count_addr(), mark_reg, temp_reg);
duke@435 3146 }
duke@435 3147 if (slow_case != NULL) {
duke@435 3148 brx(Assembler::notEqual, true, Assembler::pn, *slow_case);
duke@435 3149 delayed()->nop();
duke@435 3150 }
duke@435 3151 br(Assembler::always, false, Assembler::pt, done);
duke@435 3152 delayed()->nop();
duke@435 3153
duke@435 3154 bind(try_revoke_bias);
duke@435 3155 // The prototype mark in the klass doesn't have the bias bit set any
duke@435 3156 // more, indicating that objects of this data type are not supposed
duke@435 3157 // to be biased any more. We are going to try to reset the mark of
duke@435 3158 // this object to the prototype value and fall through to the
duke@435 3159 // CAS-based locking scheme. Note that if our CAS fails, it means
duke@435 3160 // that another thread raced us for the privilege of revoking the
duke@435 3161 // bias of this particular object, so it's okay to continue in the
duke@435 3162 // normal locking code.
duke@435 3163 //
duke@435 3164 // FIXME: due to a lack of registers we currently blow away the age
duke@435 3165 // bits in this situation. Should attempt to preserve them.
coleenp@548 3166 load_klass(obj_reg, temp_reg);
duke@435 3167 ld_ptr(Address(temp_reg, 0, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()), temp_reg);
kvn@855 3168 casn(mark_addr.base(), mark_reg, temp_reg);
duke@435 3169 // Fall through to the normal CAS-based lock, because no matter what
duke@435 3170 // the result of the above CAS, some thread must have succeeded in
duke@435 3171 // removing the bias bit from the object's header.
duke@435 3172 if (counters != NULL) {
duke@435 3173 cmp(mark_reg, temp_reg);
duke@435 3174 cond_inc(Assembler::zero, (address) counters->revoked_lock_entry_count_addr(), mark_reg, temp_reg);
duke@435 3175 }
duke@435 3176
duke@435 3177 bind(cas_label);
duke@435 3178 }
duke@435 3179
duke@435 3180 void MacroAssembler::biased_locking_exit (Address mark_addr, Register temp_reg, Label& done,
duke@435 3181 bool allow_delay_slot_filling) {
duke@435 3182 // Check for biased locking unlock case, which is a no-op
duke@435 3183 // Note: we do not have to check the thread ID for two reasons.
duke@435 3184 // First, the interpreter checks for IllegalMonitorStateException at
duke@435 3185 // a higher level. Second, if the bias was revoked while we held the
duke@435 3186 // lock, the object could not be rebiased toward another thread, so
duke@435 3187 // the bias bit would be clear.
duke@435 3188 ld_ptr(mark_addr, temp_reg);
duke@435 3189 and3(temp_reg, markOopDesc::biased_lock_mask_in_place, temp_reg);
duke@435 3190 cmp(temp_reg, markOopDesc::biased_lock_pattern);
duke@435 3191 brx(Assembler::equal, allow_delay_slot_filling, Assembler::pt, done);
duke@435 3192 delayed();
duke@435 3193 if (!allow_delay_slot_filling) {
duke@435 3194 nop();
duke@435 3195 }
duke@435 3196 }
duke@435 3197
duke@435 3198
duke@435 3199 // CASN -- 32-64 bit switch hitter similar to the synthetic CASN provided by
duke@435 3200 // Solaris/SPARC's "as". Another apt name would be cas_ptr()
duke@435 3201
duke@435 3202 void MacroAssembler::casn (Register addr_reg, Register cmp_reg, Register set_reg ) {
duke@435 3203 casx_under_lock (addr_reg, cmp_reg, set_reg, (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr()) ;
duke@435 3204 }
duke@435 3205
duke@435 3206
duke@435 3207
duke@435 3208 // compiler_lock_object() and compiler_unlock_object() are direct transliterations
duke@435 3209 // of i486.ad fast_lock() and fast_unlock(). See those methods for detailed comments.
duke@435 3210 // The code could be tightened up considerably.
duke@435 3211 //
duke@435 3212 // box->dhw disposition - post-conditions at DONE_LABEL.
duke@435 3213 // - Successful inflated lock: box->dhw != 0.
duke@435 3214 // Any non-zero value suffices.
duke@435 3215 // Consider G2_thread, rsp, boxReg, or unused_mark()
duke@435 3216 // - Successful Stack-lock: box->dhw == mark.
duke@435 3217 // box->dhw must contain the displaced mark word value
duke@435 3218 // - Failure -- icc.ZFlag == 0 and box->dhw is undefined.
duke@435 3219 // The slow-path fast_enter() and slow_enter() operators
duke@435 3220 // are responsible for setting box->dhw = NonZero (typically ::unused_mark).
duke@435 3221 // - Biased: box->dhw is undefined
duke@435 3222 //
duke@435 3223 // SPARC refworkload performance - specifically jetstream and scimark - are
duke@435 3224 // extremely sensitive to the size of the code emitted by compiler_lock_object
duke@435 3225 // and compiler_unlock_object. Critically, the key factor is code size, not path
duke@435 3226 // length. (Simply experiments to pad CLO with unexecuted NOPs demonstrte the
duke@435 3227 // effect).
duke@435 3228
duke@435 3229
kvn@855 3230 void MacroAssembler::compiler_lock_object(Register Roop, Register Rmark,
kvn@855 3231 Register Rbox, Register Rscratch,
kvn@855 3232 BiasedLockingCounters* counters,
kvn@855 3233 bool try_bias) {
duke@435 3234 Address mark_addr(Roop, 0, oopDesc::mark_offset_in_bytes());
duke@435 3235
duke@435 3236 verify_oop(Roop);
duke@435 3237 Label done ;
duke@435 3238
duke@435 3239 if (counters != NULL) {
duke@435 3240 inc_counter((address) counters->total_entry_count_addr(), Rmark, Rscratch);
duke@435 3241 }
duke@435 3242
duke@435 3243 if (EmitSync & 1) {
duke@435 3244 mov (3, Rscratch) ;
duke@435 3245 st_ptr (Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes());
duke@435 3246 cmp (SP, G0) ;
duke@435 3247 return ;
duke@435 3248 }
duke@435 3249
duke@435 3250 if (EmitSync & 2) {
duke@435 3251
duke@435 3252 // Fetch object's markword
duke@435 3253 ld_ptr(mark_addr, Rmark);
duke@435 3254
kvn@855 3255 if (try_bias) {
duke@435 3256 biased_locking_enter(Roop, Rmark, Rscratch, done, NULL, counters);
duke@435 3257 }
duke@435 3258
duke@435 3259 // Save Rbox in Rscratch to be used for the cas operation
duke@435 3260 mov(Rbox, Rscratch);
duke@435 3261
duke@435 3262 // set Rmark to markOop | markOopDesc::unlocked_value
duke@435 3263 or3(Rmark, markOopDesc::unlocked_value, Rmark);
duke@435 3264
duke@435 3265 // Initialize the box. (Must happen before we update the object mark!)
duke@435 3266 st_ptr(Rmark, Rbox, BasicLock::displaced_header_offset_in_bytes());
duke@435 3267
duke@435 3268 // compare object markOop with Rmark and if equal exchange Rscratch with object markOop
duke@435 3269 assert(mark_addr.disp() == 0, "cas must take a zero displacement");
duke@435 3270 casx_under_lock(mark_addr.base(), Rmark, Rscratch,
duke@435 3271 (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
duke@435 3272
duke@435 3273 // if compare/exchange succeeded we found an unlocked object and we now have locked it
duke@435 3274 // hence we are done
duke@435 3275 cmp(Rmark, Rscratch);
duke@435 3276 #ifdef _LP64
duke@435 3277 sub(Rscratch, STACK_BIAS, Rscratch);
duke@435 3278 #endif
duke@435 3279 brx(Assembler::equal, false, Assembler::pt, done);
duke@435 3280 delayed()->sub(Rscratch, SP, Rscratch); //pull next instruction into delay slot
duke@435 3281
duke@435 3282 // we did not find an unlocked object so see if this is a recursive case
duke@435 3283 // sub(Rscratch, SP, Rscratch);
duke@435 3284 assert(os::vm_page_size() > 0xfff, "page size too small - change the constant");
duke@435 3285 andcc(Rscratch, 0xfffff003, Rscratch);
duke@435 3286 st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes());
duke@435 3287 bind (done) ;
duke@435 3288 return ;
duke@435 3289 }
duke@435 3290
duke@435 3291 Label Egress ;
duke@435 3292
duke@435 3293 if (EmitSync & 256) {
duke@435 3294 Label IsInflated ;
duke@435 3295
duke@435 3296 ld_ptr (mark_addr, Rmark); // fetch obj->mark
duke@435 3297 // Triage: biased, stack-locked, neutral, inflated
kvn@855 3298 if (try_bias) {
duke@435 3299 biased_locking_enter(Roop, Rmark, Rscratch, done, NULL, counters);
duke@435 3300 // Invariant: if control reaches this point in the emitted stream
duke@435 3301 // then Rmark has not been modified.
duke@435 3302 }
duke@435 3303
duke@435 3304 // Store mark into displaced mark field in the on-stack basic-lock "box"
duke@435 3305 // Critically, this must happen before the CAS
duke@435 3306 // Maximize the ST-CAS distance to minimize the ST-before-CAS penalty.
duke@435 3307 st_ptr (Rmark, Rbox, BasicLock::displaced_header_offset_in_bytes());
duke@435 3308 andcc (Rmark, 2, G0) ;
duke@435 3309 brx (Assembler::notZero, false, Assembler::pn, IsInflated) ;
duke@435 3310 delayed() ->
duke@435 3311
duke@435 3312 // Try stack-lock acquisition.
duke@435 3313 // Beware: the 1st instruction is in a delay slot
duke@435 3314 mov (Rbox, Rscratch);
duke@435 3315 or3 (Rmark, markOopDesc::unlocked_value, Rmark);
duke@435 3316 assert (mark_addr.disp() == 0, "cas must take a zero displacement");
duke@435 3317 casn (mark_addr.base(), Rmark, Rscratch) ;
duke@435 3318 cmp (Rmark, Rscratch);
duke@435 3319 brx (Assembler::equal, false, Assembler::pt, done);
duke@435 3320 delayed()->sub(Rscratch, SP, Rscratch);
duke@435 3321
duke@435 3322 // Stack-lock attempt failed - check for recursive stack-lock.
duke@435 3323 // See the comments below about how we might remove this case.
duke@435 3324 #ifdef _LP64
duke@435 3325 sub (Rscratch, STACK_BIAS, Rscratch);
duke@435 3326 #endif
duke@435 3327 assert(os::vm_page_size() > 0xfff, "page size too small - change the constant");
duke@435 3328 andcc (Rscratch, 0xfffff003, Rscratch);
duke@435 3329 br (Assembler::always, false, Assembler::pt, done) ;
duke@435 3330 delayed()-> st_ptr (Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes());
duke@435 3331
duke@435 3332 bind (IsInflated) ;
duke@435 3333 if (EmitSync & 64) {
duke@435 3334 // If m->owner != null goto IsLocked
duke@435 3335 // Pessimistic form: Test-and-CAS vs CAS
duke@435 3336 // The optimistic form avoids RTS->RTO cache line upgrades.
duke@435 3337 ld_ptr (Address (Rmark, 0, ObjectMonitor::owner_offset_in_bytes()-2), Rscratch) ;
duke@435 3338 andcc (Rscratch, Rscratch, G0) ;
duke@435 3339 brx (Assembler::notZero, false, Assembler::pn, done) ;
duke@435 3340 delayed()->nop() ;
duke@435 3341 // m->owner == null : it's unlocked.
duke@435 3342 }
duke@435 3343
duke@435 3344 // Try to CAS m->owner from null to Self
duke@435 3345 // Invariant: if we acquire the lock then _recursions should be 0.
duke@435 3346 add (Rmark, ObjectMonitor::owner_offset_in_bytes()-2, Rmark) ;
duke@435 3347 mov (G2_thread, Rscratch) ;
duke@435 3348 casn (Rmark, G0, Rscratch) ;
duke@435 3349 cmp (Rscratch, G0) ;
duke@435 3350 // Intentional fall-through into done
duke@435 3351 } else {
duke@435 3352 // Aggressively avoid the Store-before-CAS penalty
duke@435 3353 // Defer the store into box->dhw until after the CAS
duke@435 3354 Label IsInflated, Recursive ;
duke@435 3355
duke@435 3356 // Anticipate CAS -- Avoid RTS->RTO upgrade
duke@435 3357 // prefetch (mark_addr, Assembler::severalWritesAndPossiblyReads) ;
duke@435 3358
duke@435 3359 ld_ptr (mark_addr, Rmark); // fetch obj->mark
duke@435 3360 // Triage: biased, stack-locked, neutral, inflated
duke@435 3361
kvn@855 3362 if (try_bias) {
duke@435 3363 biased_locking_enter(Roop, Rmark, Rscratch, done, NULL, counters);
duke@435 3364 // Invariant: if control reaches this point in the emitted stream
duke@435 3365 // then Rmark has not been modified.
duke@435 3366 }
duke@435 3367 andcc (Rmark, 2, G0) ;
duke@435 3368 brx (Assembler::notZero, false, Assembler::pn, IsInflated) ;
duke@435 3369 delayed()-> // Beware - dangling delay-slot
duke@435 3370
duke@435 3371 // Try stack-lock acquisition.
duke@435 3372 // Transiently install BUSY (0) encoding in the mark word.
duke@435 3373 // if the CAS of 0 into the mark was successful then we execute:
duke@435 3374 // ST box->dhw = mark -- save fetched mark in on-stack basiclock box
duke@435 3375 // ST obj->mark = box -- overwrite transient 0 value
duke@435 3376 // This presumes TSO, of course.
duke@435 3377
duke@435 3378 mov (0, Rscratch) ;
duke@435 3379 or3 (Rmark, markOopDesc::unlocked_value, Rmark);
duke@435 3380 assert (mark_addr.disp() == 0, "cas must take a zero displacement");
duke@435 3381 casn (mark_addr.base(), Rmark, Rscratch) ;
duke@435 3382 // prefetch (mark_addr, Assembler::severalWritesAndPossiblyReads) ;
duke@435 3383 cmp (Rscratch, Rmark) ;
duke@435 3384 brx (Assembler::notZero, false, Assembler::pn, Recursive) ;
duke@435 3385 delayed() ->
duke@435 3386 st_ptr (Rmark, Rbox, BasicLock::displaced_header_offset_in_bytes());
duke@435 3387 if (counters != NULL) {
duke@435 3388 cond_inc(Assembler::equal, (address) counters->fast_path_entry_count_addr(), Rmark, Rscratch);
duke@435 3389 }
duke@435 3390 br (Assembler::always, false, Assembler::pt, done);
duke@435 3391 delayed() ->
duke@435 3392 st_ptr (Rbox, mark_addr) ;
duke@435 3393
duke@435 3394 bind (Recursive) ;
duke@435 3395 // Stack-lock attempt failed - check for recursive stack-lock.
duke@435 3396 // Tests show that we can remove the recursive case with no impact
duke@435 3397 // on refworkload 0.83. If we need to reduce the size of the code
duke@435 3398 // emitted by compiler_lock_object() the recursive case is perfect
duke@435 3399 // candidate.
duke@435 3400 //
duke@435 3401 // A more extreme idea is to always inflate on stack-lock recursion.
duke@435 3402 // This lets us eliminate the recursive checks in compiler_lock_object
duke@435 3403 // and compiler_unlock_object and the (box->dhw == 0) encoding.
duke@435 3404 // A brief experiment - requiring changes to synchronizer.cpp, interpreter,
duke@435 3405 // and showed a performance *increase*. In the same experiment I eliminated
duke@435 3406 // the fast-path stack-lock code from the interpreter and always passed
duke@435 3407 // control to the "slow" operators in synchronizer.cpp.
duke@435 3408
duke@435 3409 // RScratch contains the fetched obj->mark value from the failed CASN.
duke@435 3410 #ifdef _LP64
duke@435 3411 sub (Rscratch, STACK_BIAS, Rscratch);
duke@435 3412 #endif
duke@435 3413 sub(Rscratch, SP, Rscratch);
duke@435 3414 assert(os::vm_page_size() > 0xfff, "page size too small - change the constant");
duke@435 3415 andcc (Rscratch, 0xfffff003, Rscratch);
duke@435 3416 if (counters != NULL) {
duke@435 3417 // Accounting needs the Rscratch register
duke@435 3418 st_ptr (Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes());
duke@435 3419 cond_inc(Assembler::equal, (address) counters->fast_path_entry_count_addr(), Rmark, Rscratch);
duke@435 3420 br (Assembler::always, false, Assembler::pt, done) ;
duke@435 3421 delayed()->nop() ;
duke@435 3422 } else {
duke@435 3423 br (Assembler::always, false, Assembler::pt, done) ;
duke@435 3424 delayed()-> st_ptr (Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes());
duke@435 3425 }
duke@435 3426
duke@435 3427 bind (IsInflated) ;
duke@435 3428 if (EmitSync & 64) {
duke@435 3429 // If m->owner != null goto IsLocked
duke@435 3430 // Test-and-CAS vs CAS
duke@435 3431 // Pessimistic form avoids futile (doomed) CAS attempts
duke@435 3432 // The optimistic form avoids RTS->RTO cache line upgrades.
duke@435 3433 ld_ptr (Address (Rmark, 0, ObjectMonitor::owner_offset_in_bytes()-2), Rscratch) ;
duke@435 3434 andcc (Rscratch, Rscratch, G0) ;
duke@435 3435 brx (Assembler::notZero, false, Assembler::pn, done) ;
duke@435 3436 delayed()->nop() ;
duke@435 3437 // m->owner == null : it's unlocked.
duke@435 3438 }
duke@435 3439
duke@435 3440 // Try to CAS m->owner from null to Self
duke@435 3441 // Invariant: if we acquire the lock then _recursions should be 0.
duke@435 3442 add (Rmark, ObjectMonitor::owner_offset_in_bytes()-2, Rmark) ;
duke@435 3443 mov (G2_thread, Rscratch) ;
duke@435 3444 casn (Rmark, G0, Rscratch) ;
duke@435 3445 cmp (Rscratch, G0) ;
duke@435 3446 // ST box->displaced_header = NonZero.
duke@435 3447 // Any non-zero value suffices:
duke@435 3448 // unused_mark(), G2_thread, RBox, RScratch, rsp, etc.
duke@435 3449 st_ptr (Rbox, Rbox, BasicLock::displaced_header_offset_in_bytes());
duke@435 3450 // Intentional fall-through into done
duke@435 3451 }
duke@435 3452
duke@435 3453 bind (done) ;
duke@435 3454 }
duke@435 3455
kvn@855 3456 void MacroAssembler::compiler_unlock_object(Register Roop, Register Rmark,
kvn@855 3457 Register Rbox, Register Rscratch,
kvn@855 3458 bool try_bias) {
duke@435 3459 Address mark_addr(Roop, 0, oopDesc::mark_offset_in_bytes());
duke@435 3460
duke@435 3461 Label done ;
duke@435 3462
duke@435 3463 if (EmitSync & 4) {
duke@435 3464 cmp (SP, G0) ;
duke@435 3465 return ;
duke@435 3466 }
duke@435 3467
duke@435 3468 if (EmitSync & 8) {
kvn@855 3469 if (try_bias) {
duke@435 3470 biased_locking_exit(mark_addr, Rscratch, done);
duke@435 3471 }
duke@435 3472
duke@435 3473 // Test first if it is a fast recursive unlock
duke@435 3474 ld_ptr(Rbox, BasicLock::displaced_header_offset_in_bytes(), Rmark);
duke@435 3475 cmp(Rmark, G0);
duke@435 3476 brx(Assembler::equal, false, Assembler::pt, done);
duke@435 3477 delayed()->nop();
duke@435 3478
duke@435 3479 // Check if it is still a light weight lock, this is is true if we see
duke@435 3480 // the stack address of the basicLock in the markOop of the object
duke@435 3481 assert(mark_addr.disp() == 0, "cas must take a zero displacement");
duke@435 3482 casx_under_lock(mark_addr.base(), Rbox, Rmark,
duke@435 3483 (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
duke@435 3484 br (Assembler::always, false, Assembler::pt, done);
duke@435 3485 delayed()->cmp(Rbox, Rmark);
duke@435 3486 bind (done) ;
duke@435 3487 return ;
duke@435 3488 }
duke@435 3489
duke@435 3490 // Beware ... If the aggregate size of the code emitted by CLO and CUO is
duke@435 3491 // is too large performance rolls abruptly off a cliff.
duke@435 3492 // This could be related to inlining policies, code cache management, or
duke@435 3493 // I$ effects.
duke@435 3494 Label LStacked ;
duke@435 3495
kvn@855 3496 if (try_bias) {
duke@435 3497 // TODO: eliminate redundant LDs of obj->mark
duke@435 3498 biased_locking_exit(mark_addr, Rscratch, done);
duke@435 3499 }
duke@435 3500
duke@435 3501 ld_ptr (Roop, oopDesc::mark_offset_in_bytes(), Rmark) ;
duke@435 3502 ld_ptr (Rbox, BasicLock::displaced_header_offset_in_bytes(), Rscratch);
duke@435 3503 andcc (Rscratch, Rscratch, G0);
duke@435 3504 brx (Assembler::zero, false, Assembler::pn, done);
duke@435 3505 delayed()-> nop() ; // consider: relocate fetch of mark, above, into this DS
duke@435 3506 andcc (Rmark, 2, G0) ;
duke@435 3507 brx (Assembler::zero, false, Assembler::pt, LStacked) ;
duke@435 3508 delayed()-> nop() ;
duke@435 3509
duke@435 3510 // It's inflated
duke@435 3511 // Conceptually we need a #loadstore|#storestore "release" MEMBAR before
duke@435 3512 // the ST of 0 into _owner which releases the lock. This prevents loads
duke@435 3513 // and stores within the critical section from reordering (floating)
duke@435 3514 // past the store that releases the lock. But TSO is a strong memory model
duke@435 3515 // and that particular flavor of barrier is a noop, so we can safely elide it.
duke@435 3516 // Note that we use 1-0 locking by default for the inflated case. We
duke@435 3517 // close the resultant (and rare) race by having contented threads in
duke@435 3518 // monitorenter periodically poll _owner.
duke@435 3519 ld_ptr (Address(Rmark, 0, ObjectMonitor::owner_offset_in_bytes()-2), Rscratch) ;
duke@435 3520 ld_ptr (Address(Rmark, 0, ObjectMonitor::recursions_offset_in_bytes()-2), Rbox) ;
duke@435 3521 xor3 (Rscratch, G2_thread, Rscratch) ;
duke@435 3522 orcc (Rbox, Rscratch, Rbox) ;
duke@435 3523 brx (Assembler::notZero, false, Assembler::pn, done) ;
duke@435 3524 delayed()->
duke@435 3525 ld_ptr (Address (Rmark, 0, ObjectMonitor::EntryList_offset_in_bytes()-2), Rscratch) ;
duke@435 3526 ld_ptr (Address (Rmark, 0, ObjectMonitor::cxq_offset_in_bytes()-2), Rbox) ;
duke@435 3527 orcc (Rbox, Rscratch, G0) ;
duke@435 3528 if (EmitSync & 65536) {
duke@435 3529 Label LSucc ;
duke@435 3530 brx (Assembler::notZero, false, Assembler::pn, LSucc) ;
duke@435 3531 delayed()->nop() ;
duke@435 3532 br (Assembler::always, false, Assembler::pt, done) ;
duke@435 3533 delayed()->
duke@435 3534 st_ptr (G0, Address (Rmark, 0, ObjectMonitor::owner_offset_in_bytes()-2)) ;
duke@435 3535
duke@435 3536 bind (LSucc) ;
duke@435 3537 st_ptr (G0, Address (Rmark, 0, ObjectMonitor::owner_offset_in_bytes()-2)) ;
duke@435 3538 if (os::is_MP()) { membar (StoreLoad) ; }
duke@435 3539 ld_ptr (Address (Rmark, 0, ObjectMonitor::succ_offset_in_bytes()-2), Rscratch) ;
duke@435 3540 andcc (Rscratch, Rscratch, G0) ;
duke@435 3541 brx (Assembler::notZero, false, Assembler::pt, done) ;
duke@435 3542 delayed()-> andcc (G0, G0, G0) ;
duke@435 3543 add (Rmark, ObjectMonitor::owner_offset_in_bytes()-2, Rmark) ;
duke@435 3544 mov (G2_thread, Rscratch) ;
duke@435 3545 casn (Rmark, G0, Rscratch) ;
duke@435 3546 cmp (Rscratch, G0) ;
duke@435 3547 // invert icc.zf and goto done
duke@435 3548 brx (Assembler::notZero, false, Assembler::pt, done) ;
duke@435 3549 delayed() -> cmp (G0, G0) ;
duke@435 3550 br (Assembler::always, false, Assembler::pt, done);
duke@435 3551 delayed() -> cmp (G0, 1) ;
duke@435 3552 } else {
duke@435 3553 brx (Assembler::notZero, false, Assembler::pn, done) ;
duke@435 3554 delayed()->nop() ;
duke@435 3555 br (Assembler::always, false, Assembler::pt, done) ;
duke@435 3556 delayed()->
duke@435 3557 st_ptr (G0, Address (Rmark, 0, ObjectMonitor::owner_offset_in_bytes()-2)) ;
duke@435 3558 }
duke@435 3559
duke@435 3560 bind (LStacked) ;
duke@435 3561 // Consider: we could replace the expensive CAS in the exit
duke@435 3562 // path with a simple ST of the displaced mark value fetched from
duke@435 3563 // the on-stack basiclock box. That admits a race where a thread T2
duke@435 3564 // in the slow lock path -- inflating with monitor M -- could race a
duke@435 3565 // thread T1 in the fast unlock path, resulting in a missed wakeup for T2.
duke@435 3566 // More precisely T1 in the stack-lock unlock path could "stomp" the
duke@435 3567 // inflated mark value M installed by T2, resulting in an orphan
duke@435 3568 // object monitor M and T2 becoming stranded. We can remedy that situation
duke@435 3569 // by having T2 periodically poll the object's mark word using timed wait
duke@435 3570 // operations. If T2 discovers that a stomp has occurred it vacates
duke@435 3571 // the monitor M and wakes any other threads stranded on the now-orphan M.
duke@435 3572 // In addition the monitor scavenger, which performs deflation,
duke@435 3573 // would also need to check for orpan monitors and stranded threads.
duke@435 3574 //
duke@435 3575 // Finally, inflation is also used when T2 needs to assign a hashCode
duke@435 3576 // to O and O is stack-locked by T1. The "stomp" race could cause
duke@435 3577 // an assigned hashCode value to be lost. We can avoid that condition
duke@435 3578 // and provide the necessary hashCode stability invariants by ensuring
duke@435 3579 // that hashCode generation is idempotent between copying GCs.
duke@435 3580 // For example we could compute the hashCode of an object O as
duke@435 3581 // O's heap address XOR some high quality RNG value that is refreshed
duke@435 3582 // at GC-time. The monitor scavenger would install the hashCode
duke@435 3583 // found in any orphan monitors. Again, the mechanism admits a
duke@435 3584 // lost-update "stomp" WAW race but detects and recovers as needed.
duke@435 3585 //
duke@435 3586 // A prototype implementation showed excellent results, although
duke@435 3587 // the scavenger and timeout code was rather involved.
duke@435 3588
duke@435 3589 casn (mark_addr.base(), Rbox, Rscratch) ;
duke@435 3590 cmp (Rbox, Rscratch);
duke@435 3591 // Intentional fall through into done ...
duke@435 3592
duke@435 3593 bind (done) ;
duke@435 3594 }
duke@435 3595
duke@435 3596
duke@435 3597
duke@435 3598 void MacroAssembler::print_CPU_state() {
duke@435 3599 // %%%%% need to implement this
duke@435 3600 }
duke@435 3601
duke@435 3602 void MacroAssembler::verify_FPU(int stack_depth, const char* s) {
duke@435 3603 // %%%%% need to implement this
duke@435 3604 }
duke@435 3605
duke@435 3606 void MacroAssembler::push_IU_state() {
duke@435 3607 // %%%%% need to implement this
duke@435 3608 }
duke@435 3609
duke@435 3610
duke@435 3611 void MacroAssembler::pop_IU_state() {
duke@435 3612 // %%%%% need to implement this
duke@435 3613 }
duke@435 3614
duke@435 3615
duke@435 3616 void MacroAssembler::push_FPU_state() {
duke@435 3617 // %%%%% need to implement this
duke@435 3618 }
duke@435 3619
duke@435 3620
duke@435 3621 void MacroAssembler::pop_FPU_state() {
duke@435 3622 // %%%%% need to implement this
duke@435 3623 }
duke@435 3624
duke@435 3625
duke@435 3626 void MacroAssembler::push_CPU_state() {
duke@435 3627 // %%%%% need to implement this
duke@435 3628 }
duke@435 3629
duke@435 3630
duke@435 3631 void MacroAssembler::pop_CPU_state() {
duke@435 3632 // %%%%% need to implement this
duke@435 3633 }
duke@435 3634
duke@435 3635
duke@435 3636
duke@435 3637 void MacroAssembler::verify_tlab() {
duke@435 3638 #ifdef ASSERT
duke@435 3639 if (UseTLAB && VerifyOops) {
duke@435 3640 Label next, next2, ok;
duke@435 3641 Register t1 = L0;
duke@435 3642 Register t2 = L1;
duke@435 3643 Register t3 = L2;
duke@435 3644
duke@435 3645 save_frame(0);
duke@435 3646 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), t1);
duke@435 3647 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_start_offset()), t2);
duke@435 3648 or3(t1, t2, t3);
duke@435 3649 cmp(t1, t2);
duke@435 3650 br(Assembler::greaterEqual, false, Assembler::pn, next);
duke@435 3651 delayed()->nop();
duke@435 3652 stop("assert(top >= start)");
duke@435 3653 should_not_reach_here();
duke@435 3654
duke@435 3655 bind(next);
duke@435 3656 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), t1);
duke@435 3657 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), t2);
duke@435 3658 or3(t3, t2, t3);
duke@435 3659 cmp(t1, t2);
duke@435 3660 br(Assembler::lessEqual, false, Assembler::pn, next2);
duke@435 3661 delayed()->nop();
duke@435 3662 stop("assert(top <= end)");
duke@435 3663 should_not_reach_here();
duke@435 3664
duke@435 3665 bind(next2);
duke@435 3666 and3(t3, MinObjAlignmentInBytesMask, t3);
duke@435 3667 cmp(t3, 0);
duke@435 3668 br(Assembler::lessEqual, false, Assembler::pn, ok);
duke@435 3669 delayed()->nop();
duke@435 3670 stop("assert(aligned)");
duke@435 3671 should_not_reach_here();
duke@435 3672
duke@435 3673 bind(ok);
duke@435 3674 restore();
duke@435 3675 }
duke@435 3676 #endif
duke@435 3677 }
duke@435 3678
duke@435 3679
duke@435 3680 void MacroAssembler::eden_allocate(
duke@435 3681 Register obj, // result: pointer to object after successful allocation
duke@435 3682 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise
duke@435 3683 int con_size_in_bytes, // object size in bytes if known at compile time
duke@435 3684 Register t1, // temp register
duke@435 3685 Register t2, // temp register
duke@435 3686 Label& slow_case // continuation point if fast allocation fails
duke@435 3687 ){
duke@435 3688 // make sure arguments make sense
duke@435 3689 assert_different_registers(obj, var_size_in_bytes, t1, t2);
duke@435 3690 assert(0 <= con_size_in_bytes && Assembler::is_simm13(con_size_in_bytes), "illegal object size");
duke@435 3691 assert((con_size_in_bytes & MinObjAlignmentInBytesMask) == 0, "object size is not multiple of alignment");
duke@435 3692
ysr@777 3693 if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) {
ysr@777 3694 // No allocation in the shared eden.
ysr@777 3695 br(Assembler::always, false, Assembler::pt, slow_case);
ysr@777 3696 delayed()->nop();
ysr@777 3697 } else {
ysr@777 3698 // get eden boundaries
ysr@777 3699 // note: we need both top & top_addr!
ysr@777 3700 const Register top_addr = t1;
ysr@777 3701 const Register end = t2;
ysr@777 3702
ysr@777 3703 CollectedHeap* ch = Universe::heap();
ysr@777 3704 set((intx)ch->top_addr(), top_addr);
ysr@777 3705 intx delta = (intx)ch->end_addr() - (intx)ch->top_addr();
ysr@777 3706 ld_ptr(top_addr, delta, end);
ysr@777 3707 ld_ptr(top_addr, 0, obj);
ysr@777 3708
ysr@777 3709 // try to allocate
ysr@777 3710 Label retry;
ysr@777 3711 bind(retry);
duke@435 3712 #ifdef ASSERT
ysr@777 3713 // make sure eden top is properly aligned
ysr@777 3714 {
ysr@777 3715 Label L;
ysr@777 3716 btst(MinObjAlignmentInBytesMask, obj);
ysr@777 3717 br(Assembler::zero, false, Assembler::pt, L);
ysr@777 3718 delayed()->nop();
ysr@777 3719 stop("eden top is not properly aligned");
ysr@777 3720 bind(L);
ysr@777 3721 }
ysr@777 3722 #endif // ASSERT
ysr@777 3723 const Register free = end;
ysr@777 3724 sub(end, obj, free); // compute amount of free space
ysr@777 3725 if (var_size_in_bytes->is_valid()) {
ysr@777 3726 // size is unknown at compile time
ysr@777 3727 cmp(free, var_size_in_bytes);
ysr@777 3728 br(Assembler::lessUnsigned, false, Assembler::pn, slow_case); // if there is not enough space go the slow case
ysr@777 3729 delayed()->add(obj, var_size_in_bytes, end);
ysr@777 3730 } else {
ysr@777 3731 // size is known at compile time
ysr@777 3732 cmp(free, con_size_in_bytes);
ysr@777 3733 br(Assembler::lessUnsigned, false, Assembler::pn, slow_case); // if there is not enough space go the slow case
ysr@777 3734 delayed()->add(obj, con_size_in_bytes, end);
ysr@777 3735 }
ysr@777 3736 // Compare obj with the value at top_addr; if still equal, swap the value of
ysr@777 3737 // end with the value at top_addr. If not equal, read the value at top_addr
ysr@777 3738 // into end.
ysr@777 3739 casx_under_lock(top_addr, obj, end, (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
ysr@777 3740 // if someone beat us on the allocation, try again, otherwise continue
ysr@777 3741 cmp(obj, end);
ysr@777 3742 brx(Assembler::notEqual, false, Assembler::pn, retry);
ysr@777 3743 delayed()->mov(end, obj); // nop if successfull since obj == end
ysr@777 3744
ysr@777 3745 #ifdef ASSERT
ysr@777 3746 // make sure eden top is properly aligned
ysr@777 3747 {
ysr@777 3748 Label L;
ysr@777 3749 const Register top_addr = t1;
ysr@777 3750
ysr@777 3751 set((intx)ch->top_addr(), top_addr);
ysr@777 3752 ld_ptr(top_addr, 0, top_addr);
ysr@777 3753 btst(MinObjAlignmentInBytesMask, top_addr);
ysr@777 3754 br(Assembler::zero, false, Assembler::pt, L);
ysr@777 3755 delayed()->nop();
ysr@777 3756 stop("eden top is not properly aligned");
ysr@777 3757 bind(L);
ysr@777 3758 }
ysr@777 3759 #endif // ASSERT
duke@435 3760 }
duke@435 3761 }
duke@435 3762
duke@435 3763
duke@435 3764 void MacroAssembler::tlab_allocate(
duke@435 3765 Register obj, // result: pointer to object after successful allocation
duke@435 3766 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise
duke@435 3767 int con_size_in_bytes, // object size in bytes if known at compile time
duke@435 3768 Register t1, // temp register
duke@435 3769 Label& slow_case // continuation point if fast allocation fails
duke@435 3770 ){
duke@435 3771 // make sure arguments make sense
duke@435 3772 assert_different_registers(obj, var_size_in_bytes, t1);
duke@435 3773 assert(0 <= con_size_in_bytes && is_simm13(con_size_in_bytes), "illegal object size");
duke@435 3774 assert((con_size_in_bytes & MinObjAlignmentInBytesMask) == 0, "object size is not multiple of alignment");
duke@435 3775
duke@435 3776 const Register free = t1;
duke@435 3777
duke@435 3778 verify_tlab();
duke@435 3779
duke@435 3780 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), obj);
duke@435 3781
duke@435 3782 // calculate amount of free space
duke@435 3783 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), free);
duke@435 3784 sub(free, obj, free);
duke@435 3785
duke@435 3786 Label done;
duke@435 3787 if (var_size_in_bytes == noreg) {
duke@435 3788 cmp(free, con_size_in_bytes);
duke@435 3789 } else {
duke@435 3790 cmp(free, var_size_in_bytes);
duke@435 3791 }
duke@435 3792 br(Assembler::less, false, Assembler::pn, slow_case);
duke@435 3793 // calculate the new top pointer
duke@435 3794 if (var_size_in_bytes == noreg) {
duke@435 3795 delayed()->add(obj, con_size_in_bytes, free);
duke@435 3796 } else {
duke@435 3797 delayed()->add(obj, var_size_in_bytes, free);
duke@435 3798 }
duke@435 3799
duke@435 3800 bind(done);
duke@435 3801
duke@435 3802 #ifdef ASSERT
duke@435 3803 // make sure new free pointer is properly aligned
duke@435 3804 {
duke@435 3805 Label L;
duke@435 3806 btst(MinObjAlignmentInBytesMask, free);
duke@435 3807 br(Assembler::zero, false, Assembler::pt, L);
duke@435 3808 delayed()->nop();
duke@435 3809 stop("updated TLAB free is not properly aligned");
duke@435 3810 bind(L);
duke@435 3811 }
duke@435 3812 #endif // ASSERT
duke@435 3813
duke@435 3814 // update the tlab top pointer
duke@435 3815 st_ptr(free, G2_thread, in_bytes(JavaThread::tlab_top_offset()));
duke@435 3816 verify_tlab();
duke@435 3817 }
duke@435 3818
duke@435 3819
duke@435 3820 void MacroAssembler::tlab_refill(Label& retry, Label& try_eden, Label& slow_case) {
duke@435 3821 Register top = O0;
duke@435 3822 Register t1 = G1;
duke@435 3823 Register t2 = G3;
duke@435 3824 Register t3 = O1;
duke@435 3825 assert_different_registers(top, t1, t2, t3, G4, G5 /* preserve G4 and G5 */);
duke@435 3826 Label do_refill, discard_tlab;
duke@435 3827
duke@435 3828 if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) {
duke@435 3829 // No allocation in the shared eden.
duke@435 3830 br(Assembler::always, false, Assembler::pt, slow_case);
duke@435 3831 delayed()->nop();
duke@435 3832 }
duke@435 3833
duke@435 3834 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), top);
duke@435 3835 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), t1);
duke@435 3836 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_refill_waste_limit_offset()), t2);
duke@435 3837
duke@435 3838 // calculate amount of free space
duke@435 3839 sub(t1, top, t1);
duke@435 3840 srl_ptr(t1, LogHeapWordSize, t1);
duke@435 3841
duke@435 3842 // Retain tlab and allocate object in shared space if
duke@435 3843 // the amount free in the tlab is too large to discard.
duke@435 3844 cmp(t1, t2);
duke@435 3845 brx(Assembler::lessEqual, false, Assembler::pt, discard_tlab);
duke@435 3846
duke@435 3847 // increment waste limit to prevent getting stuck on this slow path
duke@435 3848 delayed()->add(t2, ThreadLocalAllocBuffer::refill_waste_limit_increment(), t2);
duke@435 3849 st_ptr(t2, G2_thread, in_bytes(JavaThread::tlab_refill_waste_limit_offset()));
duke@435 3850 if (TLABStats) {
duke@435 3851 // increment number of slow_allocations
duke@435 3852 ld(G2_thread, in_bytes(JavaThread::tlab_slow_allocations_offset()), t2);
duke@435 3853 add(t2, 1, t2);
duke@435 3854 stw(t2, G2_thread, in_bytes(JavaThread::tlab_slow_allocations_offset()));
duke@435 3855 }
duke@435 3856 br(Assembler::always, false, Assembler::pt, try_eden);
duke@435 3857 delayed()->nop();
duke@435 3858
duke@435 3859 bind(discard_tlab);
duke@435 3860 if (TLABStats) {
duke@435 3861 // increment number of refills
duke@435 3862 ld(G2_thread, in_bytes(JavaThread::tlab_number_of_refills_offset()), t2);
duke@435 3863 add(t2, 1, t2);
duke@435 3864 stw(t2, G2_thread, in_bytes(JavaThread::tlab_number_of_refills_offset()));
duke@435 3865 // accumulate wastage
duke@435 3866 ld(G2_thread, in_bytes(JavaThread::tlab_fast_refill_waste_offset()), t2);
duke@435 3867 add(t2, t1, t2);
duke@435 3868 stw(t2, G2_thread, in_bytes(JavaThread::tlab_fast_refill_waste_offset()));
duke@435 3869 }
duke@435 3870
duke@435 3871 // if tlab is currently allocated (top or end != null) then
duke@435 3872 // fill [top, end + alignment_reserve) with array object
duke@435 3873 br_null(top, false, Assembler::pn, do_refill);
duke@435 3874 delayed()->nop();
duke@435 3875
duke@435 3876 set((intptr_t)markOopDesc::prototype()->copy_set_hash(0x2), t2);
duke@435 3877 st_ptr(t2, top, oopDesc::mark_offset_in_bytes()); // set up the mark word
duke@435 3878 // set klass to intArrayKlass
duke@435 3879 sub(t1, typeArrayOopDesc::header_size(T_INT), t1);
duke@435 3880 add(t1, ThreadLocalAllocBuffer::alignment_reserve(), t1);
duke@435 3881 sll_ptr(t1, log2_intptr(HeapWordSize/sizeof(jint)), t1);
duke@435 3882 st(t1, top, arrayOopDesc::length_offset_in_bytes());
coleenp@602 3883 set((intptr_t)Universe::intArrayKlassObj_addr(), t2);
coleenp@602 3884 ld_ptr(t2, 0, t2);
coleenp@602 3885 // store klass last. concurrent gcs assumes klass length is valid if
coleenp@602 3886 // klass field is not null.
coleenp@602 3887 store_klass(t2, top);
duke@435 3888 verify_oop(top);
duke@435 3889
duke@435 3890 // refill the tlab with an eden allocation
duke@435 3891 bind(do_refill);
duke@435 3892 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_size_offset()), t1);
duke@435 3893 sll_ptr(t1, LogHeapWordSize, t1);
duke@435 3894 // add object_size ??
duke@435 3895 eden_allocate(top, t1, 0, t2, t3, slow_case);
duke@435 3896
duke@435 3897 st_ptr(top, G2_thread, in_bytes(JavaThread::tlab_start_offset()));
duke@435 3898 st_ptr(top, G2_thread, in_bytes(JavaThread::tlab_top_offset()));
duke@435 3899 #ifdef ASSERT
duke@435 3900 // check that tlab_size (t1) is still valid
duke@435 3901 {
duke@435 3902 Label ok;
duke@435 3903 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_size_offset()), t2);
duke@435 3904 sll_ptr(t2, LogHeapWordSize, t2);
duke@435 3905 cmp(t1, t2);
duke@435 3906 br(Assembler::equal, false, Assembler::pt, ok);
duke@435 3907 delayed()->nop();
duke@435 3908 stop("assert(t1 == tlab_size)");
duke@435 3909 should_not_reach_here();
duke@435 3910
duke@435 3911 bind(ok);
duke@435 3912 }
duke@435 3913 #endif // ASSERT
duke@435 3914 add(top, t1, top); // t1 is tlab_size
duke@435 3915 sub(top, ThreadLocalAllocBuffer::alignment_reserve_in_bytes(), top);
duke@435 3916 st_ptr(top, G2_thread, in_bytes(JavaThread::tlab_end_offset()));
duke@435 3917 verify_tlab();
duke@435 3918 br(Assembler::always, false, Assembler::pt, retry);
duke@435 3919 delayed()->nop();
duke@435 3920 }
duke@435 3921
duke@435 3922 Assembler::Condition MacroAssembler::negate_condition(Assembler::Condition cond) {
duke@435 3923 switch (cond) {
duke@435 3924 // Note some conditions are synonyms for others
duke@435 3925 case Assembler::never: return Assembler::always;
duke@435 3926 case Assembler::zero: return Assembler::notZero;
duke@435 3927 case Assembler::lessEqual: return Assembler::greater;
duke@435 3928 case Assembler::less: return Assembler::greaterEqual;
duke@435 3929 case Assembler::lessEqualUnsigned: return Assembler::greaterUnsigned;
duke@435 3930 case Assembler::lessUnsigned: return Assembler::greaterEqualUnsigned;
duke@435 3931 case Assembler::negative: return Assembler::positive;
duke@435 3932 case Assembler::overflowSet: return Assembler::overflowClear;
duke@435 3933 case Assembler::always: return Assembler::never;
duke@435 3934 case Assembler::notZero: return Assembler::zero;
duke@435 3935 case Assembler::greater: return Assembler::lessEqual;
duke@435 3936 case Assembler::greaterEqual: return Assembler::less;
duke@435 3937 case Assembler::greaterUnsigned: return Assembler::lessEqualUnsigned;
duke@435 3938 case Assembler::greaterEqualUnsigned: return Assembler::lessUnsigned;
duke@435 3939 case Assembler::positive: return Assembler::negative;
duke@435 3940 case Assembler::overflowClear: return Assembler::overflowSet;
duke@435 3941 }
duke@435 3942
duke@435 3943 ShouldNotReachHere(); return Assembler::overflowClear;
duke@435 3944 }
duke@435 3945
duke@435 3946 void MacroAssembler::cond_inc(Assembler::Condition cond, address counter_ptr,
duke@435 3947 Register Rtmp1, Register Rtmp2 /*, Register Rtmp3, Register Rtmp4 */) {
duke@435 3948 Condition negated_cond = negate_condition(cond);
duke@435 3949 Label L;
duke@435 3950 brx(negated_cond, false, Assembler::pt, L);
duke@435 3951 delayed()->nop();
duke@435 3952 inc_counter(counter_ptr, Rtmp1, Rtmp2);
duke@435 3953 bind(L);
duke@435 3954 }
duke@435 3955
duke@435 3956 void MacroAssembler::inc_counter(address counter_ptr, Register Rtmp1, Register Rtmp2) {
duke@435 3957 Address counter_addr(Rtmp1, counter_ptr);
duke@435 3958 load_contents(counter_addr, Rtmp2);
duke@435 3959 inc(Rtmp2);
duke@435 3960 store_contents(Rtmp2, counter_addr);
duke@435 3961 }
duke@435 3962
duke@435 3963 SkipIfEqual::SkipIfEqual(
duke@435 3964 MacroAssembler* masm, Register temp, const bool* flag_addr,
duke@435 3965 Assembler::Condition condition) {
duke@435 3966 _masm = masm;
duke@435 3967 Address flag(temp, (address)flag_addr, relocInfo::none);
duke@435 3968 _masm->sethi(flag);
duke@435 3969 _masm->ldub(flag, temp);
duke@435 3970 _masm->tst(temp);
duke@435 3971 _masm->br(condition, false, Assembler::pt, _label);
duke@435 3972 _masm->delayed()->nop();
duke@435 3973 }
duke@435 3974
duke@435 3975 SkipIfEqual::~SkipIfEqual() {
duke@435 3976 _masm->bind(_label);
duke@435 3977 }
duke@435 3978
duke@435 3979
duke@435 3980 // Writes to stack successive pages until offset reached to check for
duke@435 3981 // stack overflow + shadow pages. This clobbers tsp and scratch.
duke@435 3982 void MacroAssembler::bang_stack_size(Register Rsize, Register Rtsp,
duke@435 3983 Register Rscratch) {
duke@435 3984 // Use stack pointer in temp stack pointer
duke@435 3985 mov(SP, Rtsp);
duke@435 3986
duke@435 3987 // Bang stack for total size given plus stack shadow page size.
duke@435 3988 // Bang one page at a time because a large size can overflow yellow and
duke@435 3989 // red zones (the bang will fail but stack overflow handling can't tell that
duke@435 3990 // it was a stack overflow bang vs a regular segv).
duke@435 3991 int offset = os::vm_page_size();
duke@435 3992 Register Roffset = Rscratch;
duke@435 3993
duke@435 3994 Label loop;
duke@435 3995 bind(loop);
duke@435 3996 set((-offset)+STACK_BIAS, Rscratch);
duke@435 3997 st(G0, Rtsp, Rscratch);
duke@435 3998 set(offset, Roffset);
duke@435 3999 sub(Rsize, Roffset, Rsize);
duke@435 4000 cmp(Rsize, G0);
duke@435 4001 br(Assembler::greater, false, Assembler::pn, loop);
duke@435 4002 delayed()->sub(Rtsp, Roffset, Rtsp);
duke@435 4003
duke@435 4004 // Bang down shadow pages too.
duke@435 4005 // The -1 because we already subtracted 1 page.
duke@435 4006 for (int i = 0; i< StackShadowPages-1; i++) {
duke@435 4007 set((-i*offset)+STACK_BIAS, Rscratch);
duke@435 4008 st(G0, Rtsp, Rscratch);
duke@435 4009 }
duke@435 4010 }
coleenp@548 4011
ysr@777 4012 ///////////////////////////////////////////////////////////////////////////////////
ysr@777 4013 #ifndef SERIALGC
ysr@777 4014
ysr@777 4015 static uint num_stores = 0;
ysr@777 4016 static uint num_null_pre_stores = 0;
ysr@777 4017
ysr@777 4018 static void count_null_pre_vals(void* pre_val) {
ysr@777 4019 num_stores++;
ysr@777 4020 if (pre_val == NULL) num_null_pre_stores++;
ysr@777 4021 if ((num_stores % 1000000) == 0) {
ysr@777 4022 tty->print_cr(UINT32_FORMAT " stores, " UINT32_FORMAT " (%5.2f%%) with null pre-vals.",
ysr@777 4023 num_stores, num_null_pre_stores,
ysr@777 4024 100.0*(float)num_null_pre_stores/(float)num_stores);
ysr@777 4025 }
ysr@777 4026 }
ysr@777 4027
ysr@777 4028 static address satb_log_enqueue_with_frame = 0;
ysr@777 4029 static u_char* satb_log_enqueue_with_frame_end = 0;
ysr@777 4030
ysr@777 4031 static address satb_log_enqueue_frameless = 0;
ysr@777 4032 static u_char* satb_log_enqueue_frameless_end = 0;
ysr@777 4033
ysr@777 4034 static int EnqueueCodeSize = 128 DEBUG_ONLY( + 256); // Instructions?
ysr@777 4035
ysr@777 4036 // The calls to this don't work. We'd need to do a fair amount of work to
ysr@777 4037 // make it work.
ysr@777 4038 static void check_index(int ind) {
ysr@777 4039 assert(0 <= ind && ind <= 64*K && ((ind % oopSize) == 0),
ysr@777 4040 "Invariants.")
ysr@777 4041 }
ysr@777 4042
ysr@777 4043 static void generate_satb_log_enqueue(bool with_frame) {
ysr@777 4044 BufferBlob* bb = BufferBlob::create("enqueue_with_frame", EnqueueCodeSize);
ysr@777 4045 CodeBuffer buf(bb->instructions_begin(), bb->instructions_size());
ysr@777 4046 MacroAssembler masm(&buf);
ysr@777 4047 address start = masm.pc();
ysr@777 4048 Register pre_val;
ysr@777 4049
ysr@777 4050 Label refill, restart;
ysr@777 4051 if (with_frame) {
ysr@777 4052 masm.save_frame(0);
ysr@777 4053 pre_val = I0; // Was O0 before the save.
ysr@777 4054 } else {
ysr@777 4055 pre_val = O0;
ysr@777 4056 }
ysr@777 4057 int satb_q_index_byte_offset =
ysr@777 4058 in_bytes(JavaThread::satb_mark_queue_offset() +
ysr@777 4059 PtrQueue::byte_offset_of_index());
ysr@777 4060 int satb_q_buf_byte_offset =
ysr@777 4061 in_bytes(JavaThread::satb_mark_queue_offset() +
ysr@777 4062 PtrQueue::byte_offset_of_buf());
ysr@777 4063 assert(in_bytes(PtrQueue::byte_width_of_index()) == sizeof(intptr_t) &&
ysr@777 4064 in_bytes(PtrQueue::byte_width_of_buf()) == sizeof(intptr_t),
ysr@777 4065 "check sizes in assembly below");
ysr@777 4066
ysr@777 4067 masm.bind(restart);
ysr@777 4068 masm.ld_ptr(G2_thread, satb_q_index_byte_offset, L0);
ysr@777 4069
ysr@777 4070 masm.br_on_reg_cond(Assembler::rc_z, /*annul*/false, Assembler::pn, L0, refill);
ysr@777 4071 // If the branch is taken, no harm in executing this in the delay slot.
ysr@777 4072 masm.delayed()->ld_ptr(G2_thread, satb_q_buf_byte_offset, L1);
ysr@777 4073 masm.sub(L0, oopSize, L0);
ysr@777 4074
ysr@777 4075 masm.st_ptr(pre_val, L1, L0); // [_buf + index] := I0
ysr@777 4076 if (!with_frame) {
ysr@777 4077 // Use return-from-leaf
ysr@777 4078 masm.retl();
ysr@777 4079 masm.delayed()->st_ptr(L0, G2_thread, satb_q_index_byte_offset);
ysr@777 4080 } else {
ysr@777 4081 // Not delayed.
ysr@777 4082 masm.st_ptr(L0, G2_thread, satb_q_index_byte_offset);
ysr@777 4083 }
ysr@777 4084 if (with_frame) {
ysr@777 4085 masm.ret();
ysr@777 4086 masm.delayed()->restore();
ysr@777 4087 }
ysr@777 4088 masm.bind(refill);
ysr@777 4089
ysr@777 4090 address handle_zero =
ysr@777 4091 CAST_FROM_FN_PTR(address,
ysr@777 4092 &SATBMarkQueueSet::handle_zero_index_for_thread);
ysr@777 4093 // This should be rare enough that we can afford to save all the
ysr@777 4094 // scratch registers that the calling context might be using.
ysr@777 4095 masm.mov(G1_scratch, L0);
ysr@777 4096 masm.mov(G3_scratch, L1);
ysr@777 4097 masm.mov(G4, L2);
ysr@777 4098 // We need the value of O0 above (for the write into the buffer), so we
ysr@777 4099 // save and restore it.
ysr@777 4100 masm.mov(O0, L3);
ysr@777 4101 // Since the call will overwrite O7, we save and restore that, as well.
ysr@777 4102 masm.mov(O7, L4);
ysr@777 4103 masm.call_VM_leaf(L5, handle_zero, G2_thread);
ysr@777 4104 masm.mov(L0, G1_scratch);
ysr@777 4105 masm.mov(L1, G3_scratch);
ysr@777 4106 masm.mov(L2, G4);
ysr@777 4107 masm.mov(L3, O0);
ysr@777 4108 masm.br(Assembler::always, /*annul*/false, Assembler::pt, restart);
ysr@777 4109 masm.delayed()->mov(L4, O7);
ysr@777 4110
ysr@777 4111 if (with_frame) {
ysr@777 4112 satb_log_enqueue_with_frame = start;
ysr@777 4113 satb_log_enqueue_with_frame_end = masm.pc();
ysr@777 4114 } else {
ysr@777 4115 satb_log_enqueue_frameless = start;
ysr@777 4116 satb_log_enqueue_frameless_end = masm.pc();
ysr@777 4117 }
ysr@777 4118 }
ysr@777 4119
ysr@777 4120 static inline void generate_satb_log_enqueue_if_necessary(bool with_frame) {
ysr@777 4121 if (with_frame) {
ysr@777 4122 if (satb_log_enqueue_with_frame == 0) {
ysr@777 4123 generate_satb_log_enqueue(with_frame);
ysr@777 4124 assert(satb_log_enqueue_with_frame != 0, "postcondition.");
ysr@777 4125 if (G1SATBPrintStubs) {
ysr@777 4126 tty->print_cr("Generated with-frame satb enqueue:");
ysr@777 4127 Disassembler::decode((u_char*)satb_log_enqueue_with_frame,
ysr@777 4128 satb_log_enqueue_with_frame_end,
ysr@777 4129 tty);
ysr@777 4130 }
ysr@777 4131 }
ysr@777 4132 } else {
ysr@777 4133 if (satb_log_enqueue_frameless == 0) {
ysr@777 4134 generate_satb_log_enqueue(with_frame);
ysr@777 4135 assert(satb_log_enqueue_frameless != 0, "postcondition.");
ysr@777 4136 if (G1SATBPrintStubs) {
ysr@777 4137 tty->print_cr("Generated frameless satb enqueue:");
ysr@777 4138 Disassembler::decode((u_char*)satb_log_enqueue_frameless,
ysr@777 4139 satb_log_enqueue_frameless_end,
ysr@777 4140 tty);
ysr@777 4141 }
ysr@777 4142 }
ysr@777 4143 }
ysr@777 4144 }
ysr@777 4145
ysr@777 4146 void MacroAssembler::g1_write_barrier_pre(Register obj, Register index, int offset, Register tmp, bool preserve_o_regs) {
ysr@777 4147 assert(offset == 0 || index == noreg, "choose one");
ysr@777 4148
ysr@777 4149 if (G1DisablePreBarrier) return;
ysr@777 4150 // satb_log_barrier(tmp, obj, offset, preserve_o_regs);
ysr@777 4151 Label filtered;
ysr@777 4152 // satb_log_barrier_work0(tmp, filtered);
ysr@777 4153 if (in_bytes(PtrQueue::byte_width_of_active()) == 4) {
ysr@777 4154 ld(G2,
ysr@777 4155 in_bytes(JavaThread::satb_mark_queue_offset() +
ysr@777 4156 PtrQueue::byte_offset_of_active()),
ysr@777 4157 tmp);
ysr@777 4158 } else {
ysr@777 4159 guarantee(in_bytes(PtrQueue::byte_width_of_active()) == 1,
ysr@777 4160 "Assumption");
ysr@777 4161 ldsb(G2,
ysr@777 4162 in_bytes(JavaThread::satb_mark_queue_offset() +
ysr@777 4163 PtrQueue::byte_offset_of_active()),
ysr@777 4164 tmp);
ysr@777 4165 }
ysr@777 4166 // Check on whether to annul.
ysr@777 4167 br_on_reg_cond(rc_z, /*annul*/false, Assembler::pt, tmp, filtered);
ysr@777 4168 delayed() -> nop();
ysr@777 4169
ysr@777 4170 // satb_log_barrier_work1(tmp, offset);
ysr@777 4171 if (index == noreg) {
ysr@777 4172 if (Assembler::is_simm13(offset)) {
ysr@777 4173 ld_ptr(obj, offset, tmp);
ysr@777 4174 } else {
ysr@777 4175 set(offset, tmp);
ysr@777 4176 ld_ptr(obj, tmp, tmp);
ysr@777 4177 }
ysr@777 4178 } else {
ysr@777 4179 ld_ptr(obj, index, tmp);
ysr@777 4180 }
ysr@777 4181
ysr@777 4182 // satb_log_barrier_work2(obj, tmp, offset);
ysr@777 4183
ysr@777 4184 // satb_log_barrier_work3(tmp, filtered, preserve_o_regs);
ysr@777 4185
ysr@777 4186 const Register pre_val = tmp;
ysr@777 4187
ysr@777 4188 if (G1SATBBarrierPrintNullPreVals) {
ysr@777 4189 save_frame(0);
ysr@777 4190 mov(pre_val, O0);
ysr@777 4191 // Save G-regs that target may use.
ysr@777 4192 mov(G1, L1);
ysr@777 4193 mov(G2, L2);
ysr@777 4194 mov(G3, L3);
ysr@777 4195 mov(G4, L4);
ysr@777 4196 mov(G5, L5);
ysr@777 4197 call(CAST_FROM_FN_PTR(address, &count_null_pre_vals));
ysr@777 4198 delayed()->nop();
ysr@777 4199 // Restore G-regs that target may have used.
ysr@777 4200 mov(L1, G1);
ysr@777 4201 mov(L2, G2);
ysr@777 4202 mov(L3, G3);
ysr@777 4203 mov(L4, G4);
ysr@777 4204 mov(L5, G5);
ysr@777 4205 restore(G0, G0, G0);
ysr@777 4206 }
ysr@777 4207
ysr@777 4208 // Check on whether to annul.
ysr@777 4209 br_on_reg_cond(rc_z, /*annul*/false, Assembler::pt, pre_val, filtered);
ysr@777 4210 delayed() -> nop();
ysr@777 4211
ysr@777 4212 // OK, it's not filtered, so we'll need to call enqueue. In the normal
ysr@777 4213 // case, pre_val will be a scratch G-reg, but there's some cases in which
ysr@777 4214 // it's an O-reg. In the first case, do a normal call. In the latter,
ysr@777 4215 // do a save here and call the frameless version.
ysr@777 4216
ysr@777 4217 guarantee(pre_val->is_global() || pre_val->is_out(),
ysr@777 4218 "Or we need to think harder.");
ysr@777 4219 if (pre_val->is_global() && !preserve_o_regs) {
ysr@777 4220 generate_satb_log_enqueue_if_necessary(true); // with frame.
ysr@777 4221 call(satb_log_enqueue_with_frame);
ysr@777 4222 delayed()->mov(pre_val, O0);
ysr@777 4223 } else {
ysr@777 4224 generate_satb_log_enqueue_if_necessary(false); // with frameless.
ysr@777 4225 save_frame(0);
ysr@777 4226 call(satb_log_enqueue_frameless);
ysr@777 4227 delayed()->mov(pre_val->after_save(), O0);
ysr@777 4228 restore();
ysr@777 4229 }
ysr@777 4230
ysr@777 4231 bind(filtered);
ysr@777 4232 }
ysr@777 4233
ysr@777 4234 static jint num_ct_writes = 0;
ysr@777 4235 static jint num_ct_writes_filtered_in_hr = 0;
ysr@777 4236 static jint num_ct_writes_filtered_null = 0;
ysr@777 4237 static jint num_ct_writes_filtered_pop = 0;
ysr@777 4238 static G1CollectedHeap* g1 = NULL;
ysr@777 4239
ysr@777 4240 static Thread* count_ct_writes(void* filter_val, void* new_val) {
ysr@777 4241 Atomic::inc(&num_ct_writes);
ysr@777 4242 if (filter_val == NULL) {
ysr@777 4243 Atomic::inc(&num_ct_writes_filtered_in_hr);
ysr@777 4244 } else if (new_val == NULL) {
ysr@777 4245 Atomic::inc(&num_ct_writes_filtered_null);
ysr@777 4246 } else {
ysr@777 4247 if (g1 == NULL) {
ysr@777 4248 g1 = G1CollectedHeap::heap();
ysr@777 4249 }
ysr@777 4250 if ((HeapWord*)new_val < g1->popular_object_boundary()) {
ysr@777 4251 Atomic::inc(&num_ct_writes_filtered_pop);
ysr@777 4252 }
ysr@777 4253 }
ysr@777 4254 if ((num_ct_writes % 1000000) == 0) {
ysr@777 4255 jint num_ct_writes_filtered =
ysr@777 4256 num_ct_writes_filtered_in_hr +
ysr@777 4257 num_ct_writes_filtered_null +
ysr@777 4258 num_ct_writes_filtered_pop;
ysr@777 4259
ysr@777 4260 tty->print_cr("%d potential CT writes: %5.2f%% filtered\n"
ysr@777 4261 " (%5.2f%% intra-HR, %5.2f%% null, %5.2f%% popular).",
ysr@777 4262 num_ct_writes,
ysr@777 4263 100.0*(float)num_ct_writes_filtered/(float)num_ct_writes,
ysr@777 4264 100.0*(float)num_ct_writes_filtered_in_hr/
ysr@777 4265 (float)num_ct_writes,
ysr@777 4266 100.0*(float)num_ct_writes_filtered_null/
ysr@777 4267 (float)num_ct_writes,
ysr@777 4268 100.0*(float)num_ct_writes_filtered_pop/
ysr@777 4269 (float)num_ct_writes);
ysr@777 4270 }
ysr@777 4271 return Thread::current();
ysr@777 4272 }
ysr@777 4273
ysr@777 4274 static address dirty_card_log_enqueue = 0;
ysr@777 4275 static u_char* dirty_card_log_enqueue_end = 0;
ysr@777 4276
ysr@777 4277 // This gets to assume that o0 contains the object address.
ysr@777 4278 static void generate_dirty_card_log_enqueue(jbyte* byte_map_base) {
ysr@777 4279 BufferBlob* bb = BufferBlob::create("dirty_card_enqueue", EnqueueCodeSize*2);
ysr@777 4280 CodeBuffer buf(bb->instructions_begin(), bb->instructions_size());
ysr@777 4281 MacroAssembler masm(&buf);
ysr@777 4282 address start = masm.pc();
ysr@777 4283
ysr@777 4284 Label not_already_dirty, restart, refill;
ysr@777 4285
ysr@777 4286 #ifdef _LP64
ysr@777 4287 masm.srlx(O0, CardTableModRefBS::card_shift, O0);
ysr@777 4288 #else
ysr@777 4289 masm.srl(O0, CardTableModRefBS::card_shift, O0);
ysr@777 4290 #endif
ysr@777 4291 Address rs(O1, (address)byte_map_base);
ysr@777 4292 masm.load_address(rs); // O1 := <card table base>
ysr@777 4293 masm.ldub(O0, O1, O2); // O2 := [O0 + O1]
ysr@777 4294
ysr@777 4295 masm.br_on_reg_cond(Assembler::rc_nz, /*annul*/false, Assembler::pt,
ysr@777 4296 O2, not_already_dirty);
ysr@777 4297 // Get O1 + O2 into a reg by itself -- useful in the take-the-branch
ysr@777 4298 // case, harmless if not.
ysr@777 4299 masm.delayed()->add(O0, O1, O3);
ysr@777 4300
ysr@777 4301 // We didn't take the branch, so we're already dirty: return.
ysr@777 4302 // Use return-from-leaf
ysr@777 4303 masm.retl();
ysr@777 4304 masm.delayed()->nop();
ysr@777 4305
ysr@777 4306 // Not dirty.
ysr@777 4307 masm.bind(not_already_dirty);
ysr@777 4308 // First, dirty it.
ysr@777 4309 masm.stb(G0, O3, G0); // [cardPtr] := 0 (i.e., dirty).
ysr@777 4310 int dirty_card_q_index_byte_offset =
ysr@777 4311 in_bytes(JavaThread::dirty_card_queue_offset() +
ysr@777 4312 PtrQueue::byte_offset_of_index());
ysr@777 4313 int dirty_card_q_buf_byte_offset =
ysr@777 4314 in_bytes(JavaThread::dirty_card_queue_offset() +
ysr@777 4315 PtrQueue::byte_offset_of_buf());
ysr@777 4316 masm.bind(restart);
ysr@777 4317 masm.ld_ptr(G2_thread, dirty_card_q_index_byte_offset, L0);
ysr@777 4318
ysr@777 4319 masm.br_on_reg_cond(Assembler::rc_z, /*annul*/false, Assembler::pn,
ysr@777 4320 L0, refill);
ysr@777 4321 // If the branch is taken, no harm in executing this in the delay slot.
ysr@777 4322 masm.delayed()->ld_ptr(G2_thread, dirty_card_q_buf_byte_offset, L1);
ysr@777 4323 masm.sub(L0, oopSize, L0);
ysr@777 4324
ysr@777 4325 masm.st_ptr(O3, L1, L0); // [_buf + index] := I0
ysr@777 4326 // Use return-from-leaf
ysr@777 4327 masm.retl();
ysr@777 4328 masm.delayed()->st_ptr(L0, G2_thread, dirty_card_q_index_byte_offset);
ysr@777 4329
ysr@777 4330 masm.bind(refill);
ysr@777 4331 address handle_zero =
ysr@777 4332 CAST_FROM_FN_PTR(address,
ysr@777 4333 &DirtyCardQueueSet::handle_zero_index_for_thread);
ysr@777 4334 // This should be rare enough that we can afford to save all the
ysr@777 4335 // scratch registers that the calling context might be using.
ysr@777 4336 masm.mov(G1_scratch, L3);
ysr@777 4337 masm.mov(G3_scratch, L5);
ysr@777 4338 // We need the value of O3 above (for the write into the buffer), so we
ysr@777 4339 // save and restore it.
ysr@777 4340 masm.mov(O3, L6);
ysr@777 4341 // Since the call will overwrite O7, we save and restore that, as well.
ysr@777 4342 masm.mov(O7, L4);
ysr@777 4343
ysr@777 4344 masm.call_VM_leaf(L7_thread_cache, handle_zero, G2_thread);
ysr@777 4345 masm.mov(L3, G1_scratch);
ysr@777 4346 masm.mov(L5, G3_scratch);
ysr@777 4347 masm.mov(L6, O3);
ysr@777 4348 masm.br(Assembler::always, /*annul*/false, Assembler::pt, restart);
ysr@777 4349 masm.delayed()->mov(L4, O7);
ysr@777 4350
ysr@777 4351 dirty_card_log_enqueue = start;
ysr@777 4352 dirty_card_log_enqueue_end = masm.pc();
ysr@777 4353 // XXX Should have a guarantee here about not going off the end!
ysr@777 4354 // Does it already do so? Do an experiment...
ysr@777 4355 }
ysr@777 4356
ysr@777 4357 static inline void
ysr@777 4358 generate_dirty_card_log_enqueue_if_necessary(jbyte* byte_map_base) {
ysr@777 4359 if (dirty_card_log_enqueue == 0) {
ysr@777 4360 generate_dirty_card_log_enqueue(byte_map_base);
ysr@777 4361 assert(dirty_card_log_enqueue != 0, "postcondition.");
ysr@777 4362 if (G1SATBPrintStubs) {
ysr@777 4363 tty->print_cr("Generated dirty_card enqueue:");
ysr@777 4364 Disassembler::decode((u_char*)dirty_card_log_enqueue,
ysr@777 4365 dirty_card_log_enqueue_end,
ysr@777 4366 tty);
ysr@777 4367 }
ysr@777 4368 }
ysr@777 4369 }
ysr@777 4370
ysr@777 4371
ysr@777 4372 void MacroAssembler::g1_write_barrier_post(Register store_addr, Register new_val, Register tmp) {
ysr@777 4373
ysr@777 4374 Label filtered;
ysr@777 4375 MacroAssembler* post_filter_masm = this;
ysr@777 4376
ysr@777 4377 if (new_val == G0) return;
ysr@777 4378 if (G1DisablePostBarrier) return;
ysr@777 4379
ysr@777 4380 G1SATBCardTableModRefBS* bs = (G1SATBCardTableModRefBS*) Universe::heap()->barrier_set();
ysr@777 4381 assert(bs->kind() == BarrierSet::G1SATBCT ||
ysr@777 4382 bs->kind() == BarrierSet::G1SATBCTLogging, "wrong barrier");
ysr@777 4383 if (G1RSBarrierRegionFilter) {
ysr@777 4384 xor3(store_addr, new_val, tmp);
ysr@777 4385 #ifdef _LP64
ysr@777 4386 srlx(tmp, HeapRegion::LogOfHRGrainBytes, tmp);
ysr@777 4387 #else
ysr@777 4388 srl(tmp, HeapRegion::LogOfHRGrainBytes, tmp);
ysr@777 4389 #endif
ysr@777 4390 if (G1PrintCTFilterStats) {
ysr@777 4391 guarantee(tmp->is_global(), "Or stats won't work...");
ysr@777 4392 // This is a sleazy hack: I'm temporarily hijacking G2, which I
ysr@777 4393 // promise to restore.
ysr@777 4394 mov(new_val, G2);
ysr@777 4395 save_frame(0);
ysr@777 4396 mov(tmp, O0);
ysr@777 4397 mov(G2, O1);
ysr@777 4398 // Save G-regs that target may use.
ysr@777 4399 mov(G1, L1);
ysr@777 4400 mov(G2, L2);
ysr@777 4401 mov(G3, L3);
ysr@777 4402 mov(G4, L4);
ysr@777 4403 mov(G5, L5);
ysr@777 4404 call(CAST_FROM_FN_PTR(address, &count_ct_writes));
ysr@777 4405 delayed()->nop();
ysr@777 4406 mov(O0, G2);
ysr@777 4407 // Restore G-regs that target may have used.
ysr@777 4408 mov(L1, G1);
ysr@777 4409 mov(L3, G3);
ysr@777 4410 mov(L4, G4);
ysr@777 4411 mov(L5, G5);
ysr@777 4412 restore(G0, G0, G0);
ysr@777 4413 }
ysr@777 4414 // XXX Should I predict this taken or not? Does it mattern?
ysr@777 4415 br_on_reg_cond(rc_z, /*annul*/false, Assembler::pt, tmp, filtered);
ysr@777 4416 delayed()->nop();
ysr@777 4417 }
ysr@777 4418
ysr@777 4419 // Now we decide how to generate the card table write. If we're
ysr@777 4420 // enqueueing, we call out to a generated function. Otherwise, we do it
ysr@777 4421 // inline here.
ysr@777 4422
ysr@777 4423 if (G1RSBarrierUseQueue) {
ysr@777 4424 // If the "store_addr" register is an "in" or "local" register, move it to
ysr@777 4425 // a scratch reg so we can pass it as an argument.
ysr@777 4426 bool use_scr = !(store_addr->is_global() || store_addr->is_out());
ysr@777 4427 // Pick a scratch register different from "tmp".
ysr@777 4428 Register scr = (tmp == G1_scratch ? G3_scratch : G1_scratch);
ysr@777 4429 // Make sure we use up the delay slot!
ysr@777 4430 if (use_scr) {
ysr@777 4431 post_filter_masm->mov(store_addr, scr);
ysr@777 4432 } else {
ysr@777 4433 post_filter_masm->nop();
ysr@777 4434 }
ysr@777 4435 generate_dirty_card_log_enqueue_if_necessary(bs->byte_map_base);
ysr@777 4436 save_frame(0);
ysr@777 4437 call(dirty_card_log_enqueue);
ysr@777 4438 if (use_scr) {
ysr@777 4439 delayed()->mov(scr, O0);
ysr@777 4440 } else {
ysr@777 4441 delayed()->mov(store_addr->after_save(), O0);
ysr@777 4442 }
ysr@777 4443 restore();
ysr@777 4444
ysr@777 4445 } else {
ysr@777 4446
ysr@777 4447 #ifdef _LP64
ysr@777 4448 post_filter_masm->srlx(store_addr, CardTableModRefBS::card_shift, store_addr);
ysr@777 4449 #else
ysr@777 4450 post_filter_masm->srl(store_addr, CardTableModRefBS::card_shift, store_addr);
ysr@777 4451 #endif
ysr@777 4452 assert( tmp != store_addr, "need separate temp reg");
ysr@777 4453 Address rs(tmp, (address)bs->byte_map_base);
ysr@777 4454 load_address(rs);
ysr@777 4455 stb(G0, rs.base(), store_addr);
ysr@777 4456 }
ysr@777 4457
ysr@777 4458 bind(filtered);
ysr@777 4459
ysr@777 4460 }
ysr@777 4461
ysr@777 4462 #endif // SERIALGC
ysr@777 4463 ///////////////////////////////////////////////////////////////////////////////////
ysr@777 4464
ysr@777 4465 void MacroAssembler::card_write_barrier_post(Register store_addr, Register new_val, Register tmp) {
ysr@777 4466 // If we're writing constant NULL, we can skip the write barrier.
ysr@777 4467 if (new_val == G0) return;
ysr@777 4468 CardTableModRefBS* bs = (CardTableModRefBS*) Universe::heap()->barrier_set();
ysr@777 4469 assert(bs->kind() == BarrierSet::CardTableModRef ||
ysr@777 4470 bs->kind() == BarrierSet::CardTableExtension, "wrong barrier");
ysr@777 4471 card_table_write(bs->byte_map_base, tmp, store_addr);
ysr@777 4472 }
ysr@777 4473
jrose@1057 4474 // Loading values by size and signed-ness
jrose@1100 4475 void MacroAssembler::load_sized_value(Register s1, RegisterOrConstant s2, Register d,
jrose@1057 4476 int size_in_bytes, bool is_signed) {
jrose@1057 4477 switch (size_in_bytes ^ (is_signed ? -1 : 0)) {
jrose@1057 4478 case ~8: // fall through:
jrose@1057 4479 case 8: ld_long( s1, s2, d ); break;
jrose@1057 4480 case ~4: ldsw( s1, s2, d ); break;
jrose@1057 4481 case 4: lduw( s1, s2, d ); break;
jrose@1057 4482 case ~2: ldsh( s1, s2, d ); break;
jrose@1057 4483 case 2: lduh( s1, s2, d ); break;
jrose@1057 4484 case ~1: ldsb( s1, s2, d ); break;
jrose@1057 4485 case 1: ldub( s1, s2, d ); break;
jrose@1057 4486 default: ShouldNotReachHere();
jrose@1057 4487 }
jrose@1057 4488 }
jrose@1057 4489
jrose@1057 4490
jrose@1057 4491
kvn@599 4492 void MacroAssembler::load_klass(Register src_oop, Register klass) {
coleenp@548 4493 // The number of bytes in this code is used by
coleenp@548 4494 // MachCallDynamicJavaNode::ret_addr_offset()
coleenp@548 4495 // if this changes, change that.
coleenp@548 4496 if (UseCompressedOops) {
kvn@599 4497 lduw(src_oop, oopDesc::klass_offset_in_bytes(), klass);
kvn@599 4498 decode_heap_oop_not_null(klass);
coleenp@548 4499 } else {
kvn@599 4500 ld_ptr(src_oop, oopDesc::klass_offset_in_bytes(), klass);
coleenp@548 4501 }
coleenp@548 4502 }
coleenp@548 4503
kvn@599 4504 void MacroAssembler::store_klass(Register klass, Register dst_oop) {
coleenp@548 4505 if (UseCompressedOops) {
kvn@599 4506 assert(dst_oop != klass, "not enough registers");
kvn@599 4507 encode_heap_oop_not_null(klass);
coleenp@602 4508 st(klass, dst_oop, oopDesc::klass_offset_in_bytes());
coleenp@548 4509 } else {
kvn@599 4510 st_ptr(klass, dst_oop, oopDesc::klass_offset_in_bytes());
kvn@559 4511 }
kvn@559 4512 }
kvn@559 4513
coleenp@602 4514 void MacroAssembler::store_klass_gap(Register s, Register d) {
coleenp@602 4515 if (UseCompressedOops) {
coleenp@602 4516 assert(s != d, "not enough registers");
coleenp@602 4517 st(s, d, oopDesc::klass_gap_offset_in_bytes());
coleenp@548 4518 }
coleenp@548 4519 }
coleenp@548 4520
coleenp@548 4521 void MacroAssembler::load_heap_oop(const Address& s, Register d, int offset) {
coleenp@548 4522 if (UseCompressedOops) {
coleenp@548 4523 lduw(s, d, offset);
coleenp@548 4524 decode_heap_oop(d);
coleenp@548 4525 } else {
coleenp@548 4526 ld_ptr(s, d, offset);
coleenp@548 4527 }
coleenp@548 4528 }
coleenp@548 4529
coleenp@548 4530 void MacroAssembler::load_heap_oop(Register s1, Register s2, Register d) {
coleenp@548 4531 if (UseCompressedOops) {
coleenp@548 4532 lduw(s1, s2, d);
coleenp@548 4533 decode_heap_oop(d, d);
coleenp@548 4534 } else {
coleenp@548 4535 ld_ptr(s1, s2, d);
coleenp@548 4536 }
coleenp@548 4537 }
coleenp@548 4538
coleenp@548 4539 void MacroAssembler::load_heap_oop(Register s1, int simm13a, Register d) {
coleenp@548 4540 if (UseCompressedOops) {
coleenp@548 4541 lduw(s1, simm13a, d);
coleenp@548 4542 decode_heap_oop(d, d);
coleenp@548 4543 } else {
coleenp@548 4544 ld_ptr(s1, simm13a, d);
coleenp@548 4545 }
coleenp@548 4546 }
coleenp@548 4547
coleenp@548 4548 void MacroAssembler::store_heap_oop(Register d, Register s1, Register s2) {
coleenp@548 4549 if (UseCompressedOops) {
coleenp@548 4550 assert(s1 != d && s2 != d, "not enough registers");
coleenp@548 4551 encode_heap_oop(d);
coleenp@548 4552 st(d, s1, s2);
coleenp@548 4553 } else {
coleenp@548 4554 st_ptr(d, s1, s2);
coleenp@548 4555 }
coleenp@548 4556 }
coleenp@548 4557
coleenp@548 4558 void MacroAssembler::store_heap_oop(Register d, Register s1, int simm13a) {
coleenp@548 4559 if (UseCompressedOops) {
coleenp@548 4560 assert(s1 != d, "not enough registers");
coleenp@548 4561 encode_heap_oop(d);
coleenp@548 4562 st(d, s1, simm13a);
coleenp@548 4563 } else {
coleenp@548 4564 st_ptr(d, s1, simm13a);
coleenp@548 4565 }
coleenp@548 4566 }
coleenp@548 4567
coleenp@548 4568 void MacroAssembler::store_heap_oop(Register d, const Address& a, int offset) {
coleenp@548 4569 if (UseCompressedOops) {
coleenp@548 4570 assert(a.base() != d, "not enough registers");
coleenp@548 4571 encode_heap_oop(d);
coleenp@548 4572 st(d, a, offset);
coleenp@548 4573 } else {
coleenp@548 4574 st_ptr(d, a, offset);
coleenp@548 4575 }
coleenp@548 4576 }
coleenp@548 4577
coleenp@548 4578
coleenp@548 4579 void MacroAssembler::encode_heap_oop(Register src, Register dst) {
coleenp@548 4580 assert (UseCompressedOops, "must be compressed");
kvn@1077 4581 assert (Universe::heap() != NULL, "java heap should be initialized");
kvn@1077 4582 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
coleenp@613 4583 verify_oop(src);
kvn@1077 4584 if (Universe::narrow_oop_base() == NULL) {
kvn@1077 4585 srlx(src, LogMinObjAlignmentInBytes, dst);
kvn@1077 4586 return;
kvn@1077 4587 }
coleenp@548 4588 Label done;
coleenp@548 4589 if (src == dst) {
coleenp@548 4590 // optimize for frequent case src == dst
coleenp@548 4591 bpr(rc_nz, true, Assembler::pt, src, done);
coleenp@548 4592 delayed() -> sub(src, G6_heapbase, dst); // annuled if not taken
coleenp@548 4593 bind(done);
coleenp@548 4594 srlx(src, LogMinObjAlignmentInBytes, dst);
coleenp@548 4595 } else {
coleenp@548 4596 bpr(rc_z, false, Assembler::pn, src, done);
coleenp@548 4597 delayed() -> mov(G0, dst);
coleenp@548 4598 // could be moved before branch, and annulate delay,
coleenp@548 4599 // but may add some unneeded work decoding null
coleenp@548 4600 sub(src, G6_heapbase, dst);
coleenp@548 4601 srlx(dst, LogMinObjAlignmentInBytes, dst);
coleenp@548 4602 bind(done);
coleenp@548 4603 }
coleenp@548 4604 }
coleenp@548 4605
coleenp@548 4606
coleenp@548 4607 void MacroAssembler::encode_heap_oop_not_null(Register r) {
coleenp@548 4608 assert (UseCompressedOops, "must be compressed");
kvn@1077 4609 assert (Universe::heap() != NULL, "java heap should be initialized");
kvn@1077 4610 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
coleenp@613 4611 verify_oop(r);
kvn@1077 4612 if (Universe::narrow_oop_base() != NULL)
kvn@1077 4613 sub(r, G6_heapbase, r);
coleenp@548 4614 srlx(r, LogMinObjAlignmentInBytes, r);
coleenp@548 4615 }
coleenp@548 4616
kvn@559 4617 void MacroAssembler::encode_heap_oop_not_null(Register src, Register dst) {
kvn@559 4618 assert (UseCompressedOops, "must be compressed");
kvn@1077 4619 assert (Universe::heap() != NULL, "java heap should be initialized");
kvn@1077 4620 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
coleenp@613 4621 verify_oop(src);
kvn@1077 4622 if (Universe::narrow_oop_base() == NULL) {
kvn@1077 4623 srlx(src, LogMinObjAlignmentInBytes, dst);
kvn@1077 4624 } else {
kvn@1077 4625 sub(src, G6_heapbase, dst);
kvn@1077 4626 srlx(dst, LogMinObjAlignmentInBytes, dst);
kvn@1077 4627 }
kvn@559 4628 }
kvn@559 4629
coleenp@548 4630 // Same algorithm as oops.inline.hpp decode_heap_oop.
coleenp@548 4631 void MacroAssembler::decode_heap_oop(Register src, Register dst) {
coleenp@548 4632 assert (UseCompressedOops, "must be compressed");
kvn@1077 4633 assert (Universe::heap() != NULL, "java heap should be initialized");
kvn@1077 4634 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
coleenp@548 4635 sllx(src, LogMinObjAlignmentInBytes, dst);
kvn@1077 4636 if (Universe::narrow_oop_base() != NULL) {
kvn@1077 4637 Label done;
kvn@1077 4638 bpr(rc_nz, true, Assembler::pt, dst, done);
kvn@1077 4639 delayed() -> add(dst, G6_heapbase, dst); // annuled if not taken
kvn@1077 4640 bind(done);
kvn@1077 4641 }
coleenp@613 4642 verify_oop(dst);
coleenp@548 4643 }
coleenp@548 4644
coleenp@548 4645 void MacroAssembler::decode_heap_oop_not_null(Register r) {
coleenp@548 4646 // Do not add assert code to this unless you change vtableStubs_sparc.cpp
coleenp@548 4647 // pd_code_size_limit.
coleenp@613 4648 // Also do not verify_oop as this is called by verify_oop.
coleenp@548 4649 assert (UseCompressedOops, "must be compressed");
kvn@1077 4650 assert (Universe::heap() != NULL, "java heap should be initialized");
kvn@1077 4651 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
coleenp@548 4652 sllx(r, LogMinObjAlignmentInBytes, r);
kvn@1077 4653 if (Universe::narrow_oop_base() != NULL)
kvn@1077 4654 add(r, G6_heapbase, r);
coleenp@548 4655 }
coleenp@548 4656
kvn@559 4657 void MacroAssembler::decode_heap_oop_not_null(Register src, Register dst) {
kvn@559 4658 // Do not add assert code to this unless you change vtableStubs_sparc.cpp
kvn@559 4659 // pd_code_size_limit.
coleenp@613 4660 // Also do not verify_oop as this is called by verify_oop.
kvn@559 4661 assert (UseCompressedOops, "must be compressed");
kvn@1077 4662 assert (Universe::heap() != NULL, "java heap should be initialized");
kvn@1077 4663 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
kvn@559 4664 sllx(src, LogMinObjAlignmentInBytes, dst);
kvn@1077 4665 if (Universe::narrow_oop_base() != NULL)
kvn@1077 4666 add(dst, G6_heapbase, dst);
kvn@559 4667 }
kvn@559 4668
coleenp@548 4669 void MacroAssembler::reinit_heapbase() {
coleenp@548 4670 if (UseCompressedOops) {
coleenp@548 4671 // call indirectly to solve generation ordering problem
kvn@1077 4672 Address base(G6_heapbase, (address)Universe::narrow_oop_base_addr());
coleenp@548 4673 load_ptr_contents(base, G6_heapbase);
coleenp@548 4674 }
coleenp@548 4675 }

mercurial