src/cpu/sparc/vm/c1_Runtime1_sparc.cpp

Thu, 07 Apr 2011 09:53:20 -0700

author
johnc
date
Thu, 07 Apr 2011 09:53:20 -0700
changeset 2781
e1162778c1c8
parent 2603
1b4e6a5d98e0
child 3037
3d42f82cd811
permissions
-rw-r--r--

7009266: G1: assert(obj->is_oop_or_null(true )) failed: Error
Summary: A referent object that is only weakly reachable at the start of concurrent marking but is re-attached to the strongly reachable object graph during marking may not be marked as live. This can cause the reference object to be processed prematurely and leave dangling pointers to the referent object. Implement a read barrier for the java.lang.ref.Reference::referent field by intrinsifying the Reference.get() method, and intercepting accesses though JNI, reflection, and Unsafe, so that when a non-null referent object is read it is also logged in an SATB buffer.
Reviewed-by: kvn, iveresov, never, tonyp, dholmes

duke@435 1 /*
phh@2423 2 * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
duke@435 22 *
duke@435 23 */
duke@435 24
stefank@2314 25 #include "precompiled.hpp"
stefank@2314 26 #include "c1/c1_Defs.hpp"
stefank@2314 27 #include "c1/c1_MacroAssembler.hpp"
stefank@2314 28 #include "c1/c1_Runtime1.hpp"
stefank@2314 29 #include "interpreter/interpreter.hpp"
stefank@2314 30 #include "nativeInst_sparc.hpp"
stefank@2314 31 #include "oops/compiledICHolderOop.hpp"
stefank@2314 32 #include "oops/oop.inline.hpp"
stefank@2314 33 #include "prims/jvmtiExport.hpp"
stefank@2314 34 #include "register_sparc.hpp"
stefank@2314 35 #include "runtime/sharedRuntime.hpp"
stefank@2314 36 #include "runtime/signature.hpp"
stefank@2314 37 #include "runtime/vframeArray.hpp"
stefank@2314 38 #include "vmreg_sparc.inline.hpp"
duke@435 39
duke@435 40 // Implementation of StubAssembler
duke@435 41
duke@435 42 int StubAssembler::call_RT(Register oop_result1, Register oop_result2, address entry_point, int number_of_arguments) {
duke@435 43 // for sparc changing the number of arguments doesn't change
duke@435 44 // anything about the frame size so we'll always lie and claim that
duke@435 45 // we are only passing 1 argument.
duke@435 46 set_num_rt_args(1);
duke@435 47
duke@435 48 assert_not_delayed();
duke@435 49 // bang stack before going to runtime
duke@435 50 set(-os::vm_page_size() + STACK_BIAS, G3_scratch);
duke@435 51 st(G0, SP, G3_scratch);
duke@435 52
duke@435 53 // debugging support
duke@435 54 assert(number_of_arguments >= 0 , "cannot have negative number of arguments");
duke@435 55
duke@435 56 set_last_Java_frame(SP, noreg);
duke@435 57 if (VerifyThread) mov(G2_thread, O0); // about to be smashed; pass early
duke@435 58 save_thread(L7_thread_cache);
duke@435 59 // do the call
duke@435 60 call(entry_point, relocInfo::runtime_call_type);
duke@435 61 if (!VerifyThread) {
duke@435 62 delayed()->mov(G2_thread, O0); // pass thread as first argument
duke@435 63 } else {
duke@435 64 delayed()->nop(); // (thread already passed)
duke@435 65 }
duke@435 66 int call_offset = offset(); // offset of return address
duke@435 67 restore_thread(L7_thread_cache);
duke@435 68 reset_last_Java_frame();
duke@435 69
duke@435 70 // check for pending exceptions
duke@435 71 { Label L;
twisti@1162 72 Address exception_addr(G2_thread, Thread::pending_exception_offset());
duke@435 73 ld_ptr(exception_addr, Gtemp);
duke@435 74 br_null(Gtemp, false, pt, L);
duke@435 75 delayed()->nop();
twisti@1162 76 Address vm_result_addr(G2_thread, JavaThread::vm_result_offset());
duke@435 77 st_ptr(G0, vm_result_addr);
twisti@1162 78 Address vm_result_addr_2(G2_thread, JavaThread::vm_result_2_offset());
duke@435 79 st_ptr(G0, vm_result_addr_2);
duke@435 80
duke@435 81 if (frame_size() == no_frame_size) {
duke@435 82 // we use O7 linkage so that forward_exception_entry has the issuing PC
duke@435 83 call(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type);
duke@435 84 delayed()->restore();
duke@435 85 } else if (_stub_id == Runtime1::forward_exception_id) {
duke@435 86 should_not_reach_here();
duke@435 87 } else {
twisti@1162 88 AddressLiteral exc(Runtime1::entry_for(Runtime1::forward_exception_id));
twisti@1162 89 jump_to(exc, G4);
duke@435 90 delayed()->nop();
duke@435 91 }
duke@435 92 bind(L);
duke@435 93 }
duke@435 94
duke@435 95 // get oop result if there is one and reset the value in the thread
duke@435 96 if (oop_result1->is_valid()) { // get oop result if there is one and reset it in the thread
duke@435 97 get_vm_result (oop_result1);
duke@435 98 } else {
duke@435 99 // be a little paranoid and clear the result
twisti@1162 100 Address vm_result_addr(G2_thread, JavaThread::vm_result_offset());
duke@435 101 st_ptr(G0, vm_result_addr);
duke@435 102 }
duke@435 103
duke@435 104 if (oop_result2->is_valid()) {
duke@435 105 get_vm_result_2(oop_result2);
duke@435 106 } else {
duke@435 107 // be a little paranoid and clear the result
twisti@1162 108 Address vm_result_addr_2(G2_thread, JavaThread::vm_result_2_offset());
duke@435 109 st_ptr(G0, vm_result_addr_2);
duke@435 110 }
duke@435 111
duke@435 112 return call_offset;
duke@435 113 }
duke@435 114
duke@435 115
duke@435 116 int StubAssembler::call_RT(Register oop_result1, Register oop_result2, address entry, Register arg1) {
duke@435 117 // O0 is reserved for the thread
duke@435 118 mov(arg1, O1);
duke@435 119 return call_RT(oop_result1, oop_result2, entry, 1);
duke@435 120 }
duke@435 121
duke@435 122
duke@435 123 int StubAssembler::call_RT(Register oop_result1, Register oop_result2, address entry, Register arg1, Register arg2) {
duke@435 124 // O0 is reserved for the thread
duke@435 125 mov(arg1, O1);
duke@435 126 mov(arg2, O2); assert(arg2 != O1, "smashed argument");
duke@435 127 return call_RT(oop_result1, oop_result2, entry, 2);
duke@435 128 }
duke@435 129
duke@435 130
duke@435 131 int StubAssembler::call_RT(Register oop_result1, Register oop_result2, address entry, Register arg1, Register arg2, Register arg3) {
duke@435 132 // O0 is reserved for the thread
duke@435 133 mov(arg1, O1);
duke@435 134 mov(arg2, O2); assert(arg2 != O1, "smashed argument");
duke@435 135 mov(arg3, O3); assert(arg3 != O1 && arg3 != O2, "smashed argument");
duke@435 136 return call_RT(oop_result1, oop_result2, entry, 3);
duke@435 137 }
duke@435 138
duke@435 139
duke@435 140 // Implementation of Runtime1
duke@435 141
duke@435 142 #define __ sasm->
duke@435 143
duke@435 144 static int cpu_reg_save_offsets[FrameMap::nof_cpu_regs];
duke@435 145 static int fpu_reg_save_offsets[FrameMap::nof_fpu_regs];
duke@435 146 static int reg_save_size_in_words;
duke@435 147 static int frame_size_in_bytes = -1;
duke@435 148
duke@435 149 static OopMap* generate_oop_map(StubAssembler* sasm, bool save_fpu_registers) {
duke@435 150 assert(frame_size_in_bytes == __ total_frame_size_in_bytes(reg_save_size_in_words),
twisti@2603 151 "mismatch in calculation");
duke@435 152 sasm->set_frame_size(frame_size_in_bytes / BytesPerWord);
duke@435 153 int frame_size_in_slots = frame_size_in_bytes / sizeof(jint);
duke@435 154 OopMap* oop_map = new OopMap(frame_size_in_slots, 0);
duke@435 155
duke@435 156 int i;
duke@435 157 for (i = 0; i < FrameMap::nof_cpu_regs; i++) {
duke@435 158 Register r = as_Register(i);
duke@435 159 if (r == G1 || r == G3 || r == G4 || r == G5) {
duke@435 160 int sp_offset = cpu_reg_save_offsets[i];
duke@435 161 oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset),
duke@435 162 r->as_VMReg());
duke@435 163 }
duke@435 164 }
duke@435 165
duke@435 166 if (save_fpu_registers) {
duke@435 167 for (i = 0; i < FrameMap::nof_fpu_regs; i++) {
duke@435 168 FloatRegister r = as_FloatRegister(i);
duke@435 169 int sp_offset = fpu_reg_save_offsets[i];
duke@435 170 oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset),
duke@435 171 r->as_VMReg());
duke@435 172 }
duke@435 173 }
duke@435 174 return oop_map;
duke@435 175 }
duke@435 176
duke@435 177 static OopMap* save_live_registers(StubAssembler* sasm, bool save_fpu_registers = true) {
duke@435 178 assert(frame_size_in_bytes == __ total_frame_size_in_bytes(reg_save_size_in_words),
twisti@2603 179 "mismatch in calculation");
duke@435 180 __ save_frame_c1(frame_size_in_bytes);
duke@435 181
duke@435 182 // Record volatile registers as callee-save values in an OopMap so their save locations will be
duke@435 183 // propagated to the caller frame's RegisterMap during StackFrameStream construction (needed for
duke@435 184 // deoptimization; see compiledVFrame::create_stack_value). The caller's I, L and O registers
duke@435 185 // are saved in register windows - I's and L's in the caller's frame and O's in the stub frame
duke@435 186 // (as the stub's I's) when the runtime routine called by the stub creates its frame.
duke@435 187 // OopMap frame sizes are in c2 stack slot sizes (sizeof(jint))
duke@435 188
duke@435 189 int i;
duke@435 190 for (i = 0; i < FrameMap::nof_cpu_regs; i++) {
duke@435 191 Register r = as_Register(i);
duke@435 192 if (r == G1 || r == G3 || r == G4 || r == G5) {
duke@435 193 int sp_offset = cpu_reg_save_offsets[i];
duke@435 194 __ st_ptr(r, SP, (sp_offset * BytesPerWord) + STACK_BIAS);
duke@435 195 }
duke@435 196 }
duke@435 197
duke@435 198 if (save_fpu_registers) {
duke@435 199 for (i = 0; i < FrameMap::nof_fpu_regs; i++) {
duke@435 200 FloatRegister r = as_FloatRegister(i);
duke@435 201 int sp_offset = fpu_reg_save_offsets[i];
duke@435 202 __ stf(FloatRegisterImpl::S, r, SP, (sp_offset * BytesPerWord) + STACK_BIAS);
duke@435 203 }
duke@435 204 }
duke@435 205
duke@435 206 return generate_oop_map(sasm, save_fpu_registers);
duke@435 207 }
duke@435 208
duke@435 209 static void restore_live_registers(StubAssembler* sasm, bool restore_fpu_registers = true) {
duke@435 210 for (int i = 0; i < FrameMap::nof_cpu_regs; i++) {
duke@435 211 Register r = as_Register(i);
duke@435 212 if (r == G1 || r == G3 || r == G4 || r == G5) {
duke@435 213 __ ld_ptr(SP, (cpu_reg_save_offsets[i] * BytesPerWord) + STACK_BIAS, r);
duke@435 214 }
duke@435 215 }
duke@435 216
duke@435 217 if (restore_fpu_registers) {
duke@435 218 for (int i = 0; i < FrameMap::nof_fpu_regs; i++) {
duke@435 219 FloatRegister r = as_FloatRegister(i);
duke@435 220 __ ldf(FloatRegisterImpl::S, SP, (fpu_reg_save_offsets[i] * BytesPerWord) + STACK_BIAS, r);
duke@435 221 }
duke@435 222 }
duke@435 223 }
duke@435 224
duke@435 225
duke@435 226 void Runtime1::initialize_pd() {
duke@435 227 // compute word offsets from SP at which live (non-windowed) registers are captured by stub routines
duke@435 228 //
duke@435 229 // A stub routine will have a frame that is at least large enough to hold
duke@435 230 // a register window save area (obviously) and the volatile g registers
duke@435 231 // and floating registers. A user of save_live_registers can have a frame
duke@435 232 // that has more scratch area in it (although typically they will use L-regs).
duke@435 233 // in that case the frame will look like this (stack growing down)
duke@435 234 //
duke@435 235 // FP -> | |
duke@435 236 // | scratch mem |
duke@435 237 // | " " |
duke@435 238 // --------------
duke@435 239 // | float regs |
duke@435 240 // | " " |
duke@435 241 // ---------------
duke@435 242 // | G regs |
duke@435 243 // | " " |
duke@435 244 // ---------------
duke@435 245 // | abi reg. |
duke@435 246 // | window save |
duke@435 247 // | area |
duke@435 248 // SP -> ---------------
duke@435 249 //
duke@435 250 int i;
duke@435 251 int sp_offset = round_to(frame::register_save_words, 2); // start doubleword aligned
duke@435 252
duke@435 253 // only G int registers are saved explicitly; others are found in register windows
duke@435 254 for (i = 0; i < FrameMap::nof_cpu_regs; i++) {
duke@435 255 Register r = as_Register(i);
duke@435 256 if (r == G1 || r == G3 || r == G4 || r == G5) {
duke@435 257 cpu_reg_save_offsets[i] = sp_offset;
duke@435 258 sp_offset++;
duke@435 259 }
duke@435 260 }
duke@435 261
duke@435 262 // all float registers are saved explicitly
duke@435 263 assert(FrameMap::nof_fpu_regs == 32, "double registers not handled here");
duke@435 264 for (i = 0; i < FrameMap::nof_fpu_regs; i++) {
duke@435 265 fpu_reg_save_offsets[i] = sp_offset;
duke@435 266 sp_offset++;
duke@435 267 }
duke@435 268 reg_save_size_in_words = sp_offset - frame::memory_parameter_word_sp_offset;
duke@435 269 // this should match assembler::total_frame_size_in_bytes, which
duke@435 270 // isn't callable from this context. It's checked by an assert when
duke@435 271 // it's used though.
duke@435 272 frame_size_in_bytes = align_size_up(sp_offset * wordSize, 8);
duke@435 273 }
duke@435 274
duke@435 275
duke@435 276 OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address target, bool has_argument) {
duke@435 277 // make a frame and preserve the caller's caller-save registers
duke@435 278 OopMap* oop_map = save_live_registers(sasm);
duke@435 279 int call_offset;
duke@435 280 if (!has_argument) {
duke@435 281 call_offset = __ call_RT(noreg, noreg, target);
duke@435 282 } else {
duke@435 283 call_offset = __ call_RT(noreg, noreg, target, G4);
duke@435 284 }
duke@435 285 OopMapSet* oop_maps = new OopMapSet();
duke@435 286 oop_maps->add_gc_map(call_offset, oop_map);
duke@435 287
duke@435 288 __ should_not_reach_here();
duke@435 289 return oop_maps;
duke@435 290 }
duke@435 291
duke@435 292
duke@435 293 OopMapSet* Runtime1::generate_stub_call(StubAssembler* sasm, Register result, address target,
duke@435 294 Register arg1, Register arg2, Register arg3) {
duke@435 295 // make a frame and preserve the caller's caller-save registers
duke@435 296 OopMap* oop_map = save_live_registers(sasm);
duke@435 297
duke@435 298 int call_offset;
duke@435 299 if (arg1 == noreg) {
duke@435 300 call_offset = __ call_RT(result, noreg, target);
duke@435 301 } else if (arg2 == noreg) {
duke@435 302 call_offset = __ call_RT(result, noreg, target, arg1);
duke@435 303 } else if (arg3 == noreg) {
duke@435 304 call_offset = __ call_RT(result, noreg, target, arg1, arg2);
duke@435 305 } else {
duke@435 306 call_offset = __ call_RT(result, noreg, target, arg1, arg2, arg3);
duke@435 307 }
duke@435 308 OopMapSet* oop_maps = NULL;
duke@435 309
duke@435 310 oop_maps = new OopMapSet();
duke@435 311 oop_maps->add_gc_map(call_offset, oop_map);
duke@435 312 restore_live_registers(sasm);
duke@435 313
duke@435 314 __ ret();
duke@435 315 __ delayed()->restore();
duke@435 316
duke@435 317 return oop_maps;
duke@435 318 }
duke@435 319
duke@435 320
duke@435 321 OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) {
duke@435 322 // make a frame and preserve the caller's caller-save registers
duke@435 323 OopMap* oop_map = save_live_registers(sasm);
duke@435 324
duke@435 325 // call the runtime patching routine, returns non-zero if nmethod got deopted.
duke@435 326 int call_offset = __ call_RT(noreg, noreg, target);
duke@435 327 OopMapSet* oop_maps = new OopMapSet();
duke@435 328 oop_maps->add_gc_map(call_offset, oop_map);
duke@435 329
duke@435 330 // re-execute the patched instruction or, if the nmethod was deoptmized, return to the
duke@435 331 // deoptimization handler entry that will cause re-execution of the current bytecode
duke@435 332 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
duke@435 333 assert(deopt_blob != NULL, "deoptimization blob must have been created");
duke@435 334
duke@435 335 Label no_deopt;
duke@435 336 __ tst(O0);
duke@435 337 __ brx(Assembler::equal, false, Assembler::pt, no_deopt);
duke@435 338 __ delayed()->nop();
duke@435 339
duke@435 340 // return to the deoptimization handler entry for unpacking and rexecute
duke@435 341 // if we simply returned the we'd deopt as if any call we patched had just
duke@435 342 // returned.
duke@435 343
duke@435 344 restore_live_registers(sasm);
iveresov@2476 345
iveresov@2476 346 AddressLiteral dest(deopt_blob->unpack_with_reexecution());
iveresov@2476 347 __ jump_to(dest, O0);
iveresov@2476 348 __ delayed()->restore();
duke@435 349
duke@435 350 __ bind(no_deopt);
duke@435 351 restore_live_registers(sasm);
duke@435 352 __ ret();
duke@435 353 __ delayed()->restore();
duke@435 354
duke@435 355 return oop_maps;
duke@435 356 }
duke@435 357
duke@435 358 OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
duke@435 359
duke@435 360 OopMapSet* oop_maps = NULL;
duke@435 361 // for better readability
duke@435 362 const bool must_gc_arguments = true;
duke@435 363 const bool dont_gc_arguments = false;
duke@435 364
duke@435 365 // stub code & info for the different stubs
duke@435 366 switch (id) {
duke@435 367 case forward_exception_id:
duke@435 368 {
twisti@2603 369 oop_maps = generate_handle_exception(id, sasm);
duke@435 370 }
duke@435 371 break;
duke@435 372
duke@435 373 case new_instance_id:
duke@435 374 case fast_new_instance_id:
duke@435 375 case fast_new_instance_init_check_id:
duke@435 376 {
duke@435 377 Register G5_klass = G5; // Incoming
duke@435 378 Register O0_obj = O0; // Outgoing
duke@435 379
duke@435 380 if (id == new_instance_id) {
duke@435 381 __ set_info("new_instance", dont_gc_arguments);
duke@435 382 } else if (id == fast_new_instance_id) {
duke@435 383 __ set_info("fast new_instance", dont_gc_arguments);
duke@435 384 } else {
duke@435 385 assert(id == fast_new_instance_init_check_id, "bad StubID");
duke@435 386 __ set_info("fast new_instance init check", dont_gc_arguments);
duke@435 387 }
duke@435 388
duke@435 389 if ((id == fast_new_instance_id || id == fast_new_instance_init_check_id) &&
duke@435 390 UseTLAB && FastTLABRefill) {
duke@435 391 Label slow_path;
duke@435 392 Register G1_obj_size = G1;
duke@435 393 Register G3_t1 = G3;
duke@435 394 Register G4_t2 = G4;
duke@435 395 assert_different_registers(G5_klass, G1_obj_size, G3_t1, G4_t2);
duke@435 396
duke@435 397 // Push a frame since we may do dtrace notification for the
duke@435 398 // allocation which requires calling out and we don't want
duke@435 399 // to stomp the real return address.
duke@435 400 __ save_frame(0);
duke@435 401
duke@435 402 if (id == fast_new_instance_init_check_id) {
duke@435 403 // make sure the klass is initialized
duke@435 404 __ ld(G5_klass, instanceKlass::init_state_offset_in_bytes() + sizeof(oopDesc), G3_t1);
duke@435 405 __ cmp(G3_t1, instanceKlass::fully_initialized);
duke@435 406 __ br(Assembler::notEqual, false, Assembler::pn, slow_path);
duke@435 407 __ delayed()->nop();
duke@435 408 }
duke@435 409 #ifdef ASSERT
duke@435 410 // assert object can be fast path allocated
duke@435 411 {
duke@435 412 Label ok, not_ok;
duke@435 413 __ ld(G5_klass, Klass::layout_helper_offset_in_bytes() + sizeof(oopDesc), G1_obj_size);
duke@435 414 __ cmp(G1_obj_size, 0); // make sure it's an instance (LH > 0)
duke@435 415 __ br(Assembler::lessEqual, false, Assembler::pn, not_ok);
duke@435 416 __ delayed()->nop();
duke@435 417 __ btst(Klass::_lh_instance_slow_path_bit, G1_obj_size);
duke@435 418 __ br(Assembler::zero, false, Assembler::pn, ok);
duke@435 419 __ delayed()->nop();
duke@435 420 __ bind(not_ok);
duke@435 421 __ stop("assert(can be fast path allocated)");
duke@435 422 __ should_not_reach_here();
duke@435 423 __ bind(ok);
duke@435 424 }
duke@435 425 #endif // ASSERT
duke@435 426 // if we got here then the TLAB allocation failed, so try
duke@435 427 // refilling the TLAB or allocating directly from eden.
duke@435 428 Label retry_tlab, try_eden;
duke@435 429 __ tlab_refill(retry_tlab, try_eden, slow_path); // preserves G5_klass
duke@435 430
duke@435 431 __ bind(retry_tlab);
duke@435 432
duke@435 433 // get the instance size
duke@435 434 __ ld(G5_klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes(), G1_obj_size);
phh@2423 435
duke@435 436 __ tlab_allocate(O0_obj, G1_obj_size, 0, G3_t1, slow_path);
phh@2423 437
duke@435 438 __ initialize_object(O0_obj, G5_klass, G1_obj_size, 0, G3_t1, G4_t2);
duke@435 439 __ verify_oop(O0_obj);
duke@435 440 __ mov(O0, I0);
duke@435 441 __ ret();
duke@435 442 __ delayed()->restore();
duke@435 443
duke@435 444 __ bind(try_eden);
duke@435 445 // get the instance size
duke@435 446 __ ld(G5_klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes(), G1_obj_size);
duke@435 447 __ eden_allocate(O0_obj, G1_obj_size, 0, G3_t1, G4_t2, slow_path);
phh@2447 448 __ incr_allocated_bytes(G1_obj_size, G3_t1, G4_t2);
phh@2423 449
duke@435 450 __ initialize_object(O0_obj, G5_klass, G1_obj_size, 0, G3_t1, G4_t2);
duke@435 451 __ verify_oop(O0_obj);
duke@435 452 __ mov(O0, I0);
duke@435 453 __ ret();
duke@435 454 __ delayed()->restore();
duke@435 455
duke@435 456 __ bind(slow_path);
duke@435 457
duke@435 458 // pop this frame so generate_stub_call can push it's own
duke@435 459 __ restore();
duke@435 460 }
duke@435 461
duke@435 462 oop_maps = generate_stub_call(sasm, I0, CAST_FROM_FN_PTR(address, new_instance), G5_klass);
duke@435 463 // I0->O0: new instance
duke@435 464 }
duke@435 465
duke@435 466 break;
duke@435 467
duke@435 468 case counter_overflow_id:
iveresov@2138 469 // G4 contains bci, G5 contains method
iveresov@2138 470 oop_maps = generate_stub_call(sasm, noreg, CAST_FROM_FN_PTR(address, counter_overflow), G4, G5);
duke@435 471 break;
duke@435 472
duke@435 473 case new_type_array_id:
duke@435 474 case new_object_array_id:
duke@435 475 {
duke@435 476 Register G5_klass = G5; // Incoming
duke@435 477 Register G4_length = G4; // Incoming
duke@435 478 Register O0_obj = O0; // Outgoing
duke@435 479
twisti@1162 480 Address klass_lh(G5_klass, ((klassOopDesc::header_size() * HeapWordSize)
twisti@1162 481 + Klass::layout_helper_offset_in_bytes()));
duke@435 482 assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise");
duke@435 483 assert(Klass::_lh_header_size_mask == 0xFF, "bytewise");
duke@435 484 // Use this offset to pick out an individual byte of the layout_helper:
duke@435 485 const int klass_lh_header_size_offset = ((BytesPerInt - 1) // 3 - 2 selects byte {0,1,0,0}
duke@435 486 - Klass::_lh_header_size_shift / BitsPerByte);
duke@435 487
duke@435 488 if (id == new_type_array_id) {
duke@435 489 __ set_info("new_type_array", dont_gc_arguments);
duke@435 490 } else {
duke@435 491 __ set_info("new_object_array", dont_gc_arguments);
duke@435 492 }
duke@435 493
duke@435 494 #ifdef ASSERT
duke@435 495 // assert object type is really an array of the proper kind
duke@435 496 {
duke@435 497 Label ok;
duke@435 498 Register G3_t1 = G3;
duke@435 499 __ ld(klass_lh, G3_t1);
duke@435 500 __ sra(G3_t1, Klass::_lh_array_tag_shift, G3_t1);
duke@435 501 int tag = ((id == new_type_array_id)
duke@435 502 ? Klass::_lh_array_tag_type_value
duke@435 503 : Klass::_lh_array_tag_obj_value);
duke@435 504 __ cmp(G3_t1, tag);
duke@435 505 __ brx(Assembler::equal, false, Assembler::pt, ok);
duke@435 506 __ delayed()->nop();
duke@435 507 __ stop("assert(is an array klass)");
duke@435 508 __ should_not_reach_here();
duke@435 509 __ bind(ok);
duke@435 510 }
duke@435 511 #endif // ASSERT
duke@435 512
duke@435 513 if (UseTLAB && FastTLABRefill) {
duke@435 514 Label slow_path;
duke@435 515 Register G1_arr_size = G1;
duke@435 516 Register G3_t1 = G3;
duke@435 517 Register O1_t2 = O1;
duke@435 518 assert_different_registers(G5_klass, G4_length, G1_arr_size, G3_t1, O1_t2);
duke@435 519
duke@435 520 // check that array length is small enough for fast path
duke@435 521 __ set(C1_MacroAssembler::max_array_allocation_length, G3_t1);
duke@435 522 __ cmp(G4_length, G3_t1);
duke@435 523 __ br(Assembler::greaterUnsigned, false, Assembler::pn, slow_path);
duke@435 524 __ delayed()->nop();
duke@435 525
duke@435 526 // if we got here then the TLAB allocation failed, so try
duke@435 527 // refilling the TLAB or allocating directly from eden.
duke@435 528 Label retry_tlab, try_eden;
duke@435 529 __ tlab_refill(retry_tlab, try_eden, slow_path); // preserves G4_length and G5_klass
duke@435 530
duke@435 531 __ bind(retry_tlab);
duke@435 532
duke@435 533 // get the allocation size: (length << (layout_helper & 0x1F)) + header_size
duke@435 534 __ ld(klass_lh, G3_t1);
duke@435 535 __ sll(G4_length, G3_t1, G1_arr_size);
duke@435 536 __ srl(G3_t1, Klass::_lh_header_size_shift, G3_t1);
duke@435 537 __ and3(G3_t1, Klass::_lh_header_size_mask, G3_t1);
duke@435 538 __ add(G1_arr_size, G3_t1, G1_arr_size);
duke@435 539 __ add(G1_arr_size, MinObjAlignmentInBytesMask, G1_arr_size); // align up
duke@435 540 __ and3(G1_arr_size, ~MinObjAlignmentInBytesMask, G1_arr_size);
duke@435 541
duke@435 542 __ tlab_allocate(O0_obj, G1_arr_size, 0, G3_t1, slow_path); // preserves G1_arr_size
duke@435 543
duke@435 544 __ initialize_header(O0_obj, G5_klass, G4_length, G3_t1, O1_t2);
duke@435 545 __ ldub(klass_lh, G3_t1, klass_lh_header_size_offset);
duke@435 546 __ sub(G1_arr_size, G3_t1, O1_t2); // body length
duke@435 547 __ add(O0_obj, G3_t1, G3_t1); // body start
duke@435 548 __ initialize_body(G3_t1, O1_t2);
duke@435 549 __ verify_oop(O0_obj);
duke@435 550 __ retl();
duke@435 551 __ delayed()->nop();
duke@435 552
duke@435 553 __ bind(try_eden);
duke@435 554 // get the allocation size: (length << (layout_helper & 0x1F)) + header_size
duke@435 555 __ ld(klass_lh, G3_t1);
duke@435 556 __ sll(G4_length, G3_t1, G1_arr_size);
duke@435 557 __ srl(G3_t1, Klass::_lh_header_size_shift, G3_t1);
duke@435 558 __ and3(G3_t1, Klass::_lh_header_size_mask, G3_t1);
duke@435 559 __ add(G1_arr_size, G3_t1, G1_arr_size);
duke@435 560 __ add(G1_arr_size, MinObjAlignmentInBytesMask, G1_arr_size);
duke@435 561 __ and3(G1_arr_size, ~MinObjAlignmentInBytesMask, G1_arr_size);
duke@435 562
duke@435 563 __ eden_allocate(O0_obj, G1_arr_size, 0, G3_t1, O1_t2, slow_path); // preserves G1_arr_size
phh@2447 564 __ incr_allocated_bytes(G1_arr_size, G3_t1, O1_t2);
duke@435 565
duke@435 566 __ initialize_header(O0_obj, G5_klass, G4_length, G3_t1, O1_t2);
duke@435 567 __ ldub(klass_lh, G3_t1, klass_lh_header_size_offset);
duke@435 568 __ sub(G1_arr_size, G3_t1, O1_t2); // body length
duke@435 569 __ add(O0_obj, G3_t1, G3_t1); // body start
duke@435 570 __ initialize_body(G3_t1, O1_t2);
duke@435 571 __ verify_oop(O0_obj);
duke@435 572 __ retl();
duke@435 573 __ delayed()->nop();
duke@435 574
duke@435 575 __ bind(slow_path);
duke@435 576 }
duke@435 577
duke@435 578 if (id == new_type_array_id) {
duke@435 579 oop_maps = generate_stub_call(sasm, I0, CAST_FROM_FN_PTR(address, new_type_array), G5_klass, G4_length);
duke@435 580 } else {
duke@435 581 oop_maps = generate_stub_call(sasm, I0, CAST_FROM_FN_PTR(address, new_object_array), G5_klass, G4_length);
duke@435 582 }
duke@435 583 // I0 -> O0: new array
duke@435 584 }
duke@435 585 break;
duke@435 586
duke@435 587 case new_multi_array_id:
duke@435 588 { // O0: klass
duke@435 589 // O1: rank
duke@435 590 // O2: address of 1st dimension
duke@435 591 __ set_info("new_multi_array", dont_gc_arguments);
duke@435 592 oop_maps = generate_stub_call(sasm, I0, CAST_FROM_FN_PTR(address, new_multi_array), I0, I1, I2);
duke@435 593 // I0 -> O0: new multi array
duke@435 594 }
duke@435 595 break;
duke@435 596
duke@435 597 case register_finalizer_id:
duke@435 598 {
duke@435 599 __ set_info("register_finalizer", dont_gc_arguments);
duke@435 600
duke@435 601 // load the klass and check the has finalizer flag
duke@435 602 Label register_finalizer;
duke@435 603 Register t = O1;
iveresov@2344 604 __ load_klass(O0, t);
duke@435 605 __ ld(t, Klass::access_flags_offset_in_bytes() + sizeof(oopDesc), t);
duke@435 606 __ set(JVM_ACC_HAS_FINALIZER, G3);
duke@435 607 __ andcc(G3, t, G0);
duke@435 608 __ br(Assembler::notZero, false, Assembler::pt, register_finalizer);
duke@435 609 __ delayed()->nop();
duke@435 610
duke@435 611 // do a leaf return
duke@435 612 __ retl();
duke@435 613 __ delayed()->nop();
duke@435 614
duke@435 615 __ bind(register_finalizer);
duke@435 616 OopMap* oop_map = save_live_registers(sasm);
duke@435 617 int call_offset = __ call_RT(noreg, noreg,
duke@435 618 CAST_FROM_FN_PTR(address, SharedRuntime::register_finalizer), I0);
duke@435 619 oop_maps = new OopMapSet();
duke@435 620 oop_maps->add_gc_map(call_offset, oop_map);
duke@435 621
duke@435 622 // Now restore all the live registers
duke@435 623 restore_live_registers(sasm);
duke@435 624
duke@435 625 __ ret();
duke@435 626 __ delayed()->restore();
duke@435 627 }
duke@435 628 break;
duke@435 629
duke@435 630 case throw_range_check_failed_id:
duke@435 631 { __ set_info("range_check_failed", dont_gc_arguments); // arguments will be discarded
duke@435 632 // G4: index
duke@435 633 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_range_check_exception), true);
duke@435 634 }
duke@435 635 break;
duke@435 636
duke@435 637 case throw_index_exception_id:
duke@435 638 { __ set_info("index_range_check_failed", dont_gc_arguments); // arguments will be discarded
duke@435 639 // G4: index
duke@435 640 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_index_exception), true);
duke@435 641 }
duke@435 642 break;
duke@435 643
duke@435 644 case throw_div0_exception_id:
duke@435 645 { __ set_info("throw_div0_exception", dont_gc_arguments);
duke@435 646 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_div0_exception), false);
duke@435 647 }
duke@435 648 break;
duke@435 649
duke@435 650 case throw_null_pointer_exception_id:
duke@435 651 { __ set_info("throw_null_pointer_exception", dont_gc_arguments);
duke@435 652 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_null_pointer_exception), false);
duke@435 653 }
duke@435 654 break;
duke@435 655
duke@435 656 case handle_exception_id:
twisti@2603 657 { __ set_info("handle_exception", dont_gc_arguments);
twisti@2603 658 oop_maps = generate_handle_exception(id, sasm);
twisti@2603 659 }
twisti@2603 660 break;
duke@435 661
twisti@2603 662 case handle_exception_from_callee_id:
twisti@2603 663 { __ set_info("handle_exception_from_callee", dont_gc_arguments);
twisti@2603 664 oop_maps = generate_handle_exception(id, sasm);
duke@435 665 }
duke@435 666 break;
duke@435 667
duke@435 668 case unwind_exception_id:
duke@435 669 {
duke@435 670 // O0: exception
duke@435 671 // I7: address of call to this method
duke@435 672
duke@435 673 __ set_info("unwind_exception", dont_gc_arguments);
duke@435 674 __ mov(Oexception, Oexception->after_save());
duke@435 675 __ add(I7, frame::pc_return_offset, Oissuing_pc->after_save());
duke@435 676
duke@435 677 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address),
twisti@1730 678 G2_thread, Oissuing_pc->after_save());
duke@435 679 __ verify_not_null_oop(Oexception->after_save());
twisti@1919 680
twisti@2603 681 // Restore SP from L7 if the exception PC is a method handle call site.
twisti@1919 682 __ mov(O0, G5); // Save the target address.
twisti@1919 683 __ lduw(Address(G2_thread, JavaThread::is_method_handle_return_offset()), L0);
twisti@1919 684 __ tst(L0); // Condition codes are preserved over the restore.
twisti@1919 685 __ restore();
twisti@1919 686
twisti@1919 687 __ jmp(G5, 0);
twisti@1919 688 __ delayed()->movcc(Assembler::notZero, false, Assembler::icc, L7_mh_SP_save, SP); // Restore SP if required.
duke@435 689 }
duke@435 690 break;
duke@435 691
duke@435 692 case throw_array_store_exception_id:
duke@435 693 {
duke@435 694 __ set_info("throw_array_store_exception", dont_gc_arguments);
never@2488 695 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_array_store_exception), true);
duke@435 696 }
duke@435 697 break;
duke@435 698
duke@435 699 case throw_class_cast_exception_id:
duke@435 700 {
duke@435 701 // G4: object
duke@435 702 __ set_info("throw_class_cast_exception", dont_gc_arguments);
duke@435 703 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_class_cast_exception), true);
duke@435 704 }
duke@435 705 break;
duke@435 706
duke@435 707 case throw_incompatible_class_change_error_id:
duke@435 708 {
duke@435 709 __ set_info("throw_incompatible_class_cast_exception", dont_gc_arguments);
duke@435 710 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false);
duke@435 711 }
duke@435 712 break;
duke@435 713
duke@435 714 case slow_subtype_check_id:
duke@435 715 { // Support for uint StubRoutine::partial_subtype_check( Klass sub, Klass super );
duke@435 716 // Arguments :
duke@435 717 //
duke@435 718 // ret : G3
duke@435 719 // sub : G3, argument, destroyed
duke@435 720 // super: G1, argument, not changed
duke@435 721 // raddr: O7, blown by call
jrose@1079 722 Label miss;
duke@435 723
duke@435 724 __ save_frame(0); // Blow no registers!
duke@435 725
jrose@1079 726 __ check_klass_subtype_slow_path(G3, G1, L0, L1, L2, L4, NULL, &miss);
duke@435 727
duke@435 728 __ mov(1, G3);
jrose@1079 729 __ ret(); // Result in G5 is 'true'
duke@435 730 __ delayed()->restore(); // free copy or add can go here
duke@435 731
duke@435 732 __ bind(miss);
duke@435 733 __ mov(0, G3);
jrose@1079 734 __ ret(); // Result in G5 is 'false'
duke@435 735 __ delayed()->restore(); // free copy or add can go here
duke@435 736 }
duke@435 737
duke@435 738 case monitorenter_nofpu_id:
duke@435 739 case monitorenter_id:
duke@435 740 { // G4: object
duke@435 741 // G5: lock address
duke@435 742 __ set_info("monitorenter", dont_gc_arguments);
duke@435 743
duke@435 744 int save_fpu_registers = (id == monitorenter_id);
duke@435 745 // make a frame and preserve the caller's caller-save registers
duke@435 746 OopMap* oop_map = save_live_registers(sasm, save_fpu_registers);
duke@435 747
duke@435 748 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorenter), G4, G5);
duke@435 749
duke@435 750 oop_maps = new OopMapSet();
duke@435 751 oop_maps->add_gc_map(call_offset, oop_map);
duke@435 752 restore_live_registers(sasm, save_fpu_registers);
duke@435 753
duke@435 754 __ ret();
duke@435 755 __ delayed()->restore();
duke@435 756 }
duke@435 757 break;
duke@435 758
duke@435 759 case monitorexit_nofpu_id:
duke@435 760 case monitorexit_id:
duke@435 761 { // G4: lock address
duke@435 762 // note: really a leaf routine but must setup last java sp
duke@435 763 // => use call_RT for now (speed can be improved by
duke@435 764 // doing last java sp setup manually)
duke@435 765 __ set_info("monitorexit", dont_gc_arguments);
duke@435 766
duke@435 767 int save_fpu_registers = (id == monitorexit_id);
duke@435 768 // make a frame and preserve the caller's caller-save registers
duke@435 769 OopMap* oop_map = save_live_registers(sasm, save_fpu_registers);
duke@435 770
duke@435 771 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorexit), G4);
duke@435 772
duke@435 773 oop_maps = new OopMapSet();
duke@435 774 oop_maps->add_gc_map(call_offset, oop_map);
duke@435 775 restore_live_registers(sasm, save_fpu_registers);
duke@435 776
duke@435 777 __ ret();
duke@435 778 __ delayed()->restore();
duke@435 779
duke@435 780 }
duke@435 781 break;
duke@435 782
duke@435 783 case access_field_patching_id:
duke@435 784 { __ set_info("access_field_patching", dont_gc_arguments);
duke@435 785 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, access_field_patching));
duke@435 786 }
duke@435 787 break;
duke@435 788
duke@435 789 case load_klass_patching_id:
duke@435 790 { __ set_info("load_klass_patching", dont_gc_arguments);
duke@435 791 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_klass_patching));
duke@435 792 }
duke@435 793 break;
duke@435 794
duke@435 795 case jvmti_exception_throw_id:
duke@435 796 { // Oexception : exception
duke@435 797 __ set_info("jvmti_exception_throw", dont_gc_arguments);
duke@435 798 oop_maps = generate_stub_call(sasm, noreg, CAST_FROM_FN_PTR(address, Runtime1::post_jvmti_exception_throw), I0);
duke@435 799 }
duke@435 800 break;
duke@435 801
duke@435 802 case dtrace_object_alloc_id:
duke@435 803 { // O0: object
duke@435 804 __ set_info("dtrace_object_alloc", dont_gc_arguments);
duke@435 805 // we can't gc here so skip the oopmap but make sure that all
duke@435 806 // the live registers get saved.
duke@435 807 save_live_registers(sasm);
duke@435 808
duke@435 809 __ save_thread(L7_thread_cache);
duke@435 810 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc),
duke@435 811 relocInfo::runtime_call_type);
duke@435 812 __ delayed()->mov(I0, O0);
duke@435 813 __ restore_thread(L7_thread_cache);
duke@435 814
duke@435 815 restore_live_registers(sasm);
duke@435 816 __ ret();
duke@435 817 __ delayed()->restore();
duke@435 818 }
duke@435 819 break;
duke@435 820
ysr@777 821 #ifndef SERIALGC
ysr@777 822 case g1_pre_barrier_slow_id:
ysr@777 823 { // G4: previous value of memory
ysr@777 824 BarrierSet* bs = Universe::heap()->barrier_set();
ysr@777 825 if (bs->kind() != BarrierSet::G1SATBCTLogging) {
ysr@777 826 __ save_frame(0);
ysr@777 827 __ set((int)id, O1);
ysr@777 828 __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), I0);
ysr@777 829 __ should_not_reach_here();
ysr@777 830 break;
ysr@777 831 }
ysr@777 832
ysr@777 833 __ set_info("g1_pre_barrier_slow_id", dont_gc_arguments);
ysr@777 834
ysr@777 835 Register pre_val = G4;
ysr@777 836 Register tmp = G1_scratch;
ysr@777 837 Register tmp2 = G3_scratch;
ysr@777 838
ysr@777 839 Label refill, restart;
ysr@777 840 bool with_frame = false; // I don't know if we can do with-frame.
ysr@777 841 int satb_q_index_byte_offset =
ysr@777 842 in_bytes(JavaThread::satb_mark_queue_offset() +
ysr@777 843 PtrQueue::byte_offset_of_index());
ysr@777 844 int satb_q_buf_byte_offset =
ysr@777 845 in_bytes(JavaThread::satb_mark_queue_offset() +
ysr@777 846 PtrQueue::byte_offset_of_buf());
ysr@777 847 __ bind(restart);
ysr@777 848 __ ld_ptr(G2_thread, satb_q_index_byte_offset, tmp);
ysr@777 849
ysr@777 850 __ br_on_reg_cond(Assembler::rc_z, /*annul*/false,
ysr@777 851 Assembler::pn, tmp, refill);
ysr@777 852
ysr@777 853 // If the branch is taken, no harm in executing this in the delay slot.
ysr@777 854 __ delayed()->ld_ptr(G2_thread, satb_q_buf_byte_offset, tmp2);
ysr@777 855 __ sub(tmp, oopSize, tmp);
ysr@777 856
ysr@777 857 __ st_ptr(pre_val, tmp2, tmp); // [_buf + index] := <address_of_card>
ysr@777 858 // Use return-from-leaf
ysr@777 859 __ retl();
ysr@777 860 __ delayed()->st_ptr(tmp, G2_thread, satb_q_index_byte_offset);
ysr@777 861
ysr@777 862 __ bind(refill);
ysr@777 863 __ save_frame(0);
ysr@777 864
ysr@777 865 __ mov(pre_val, L0);
ysr@777 866 __ mov(tmp, L1);
ysr@777 867 __ mov(tmp2, L2);
ysr@777 868
ysr@777 869 __ call_VM_leaf(L7_thread_cache,
ysr@777 870 CAST_FROM_FN_PTR(address,
ysr@777 871 SATBMarkQueueSet::handle_zero_index_for_thread),
ysr@777 872 G2_thread);
ysr@777 873
ysr@777 874 __ mov(L0, pre_val);
ysr@777 875 __ mov(L1, tmp);
ysr@777 876 __ mov(L2, tmp2);
ysr@777 877
ysr@777 878 __ br(Assembler::always, /*annul*/false, Assembler::pt, restart);
ysr@777 879 __ delayed()->restore();
ysr@777 880 }
ysr@777 881 break;
ysr@777 882
ysr@777 883 case g1_post_barrier_slow_id:
ysr@777 884 {
ysr@777 885 BarrierSet* bs = Universe::heap()->barrier_set();
ysr@777 886 if (bs->kind() != BarrierSet::G1SATBCTLogging) {
ysr@777 887 __ save_frame(0);
ysr@777 888 __ set((int)id, O1);
ysr@777 889 __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), I0);
ysr@777 890 __ should_not_reach_here();
ysr@777 891 break;
ysr@777 892 }
ysr@777 893
ysr@777 894 __ set_info("g1_post_barrier_slow_id", dont_gc_arguments);
ysr@777 895
ysr@777 896 Register addr = G4;
ysr@777 897 Register cardtable = G5;
ysr@777 898 Register tmp = G1_scratch;
ysr@777 899 Register tmp2 = G3_scratch;
ysr@777 900 jbyte* byte_map_base = ((CardTableModRefBS*)bs)->byte_map_base;
ysr@777 901
ysr@777 902 Label not_already_dirty, restart, refill;
ysr@777 903
ysr@777 904 #ifdef _LP64
ysr@777 905 __ srlx(addr, CardTableModRefBS::card_shift, addr);
ysr@777 906 #else
ysr@777 907 __ srl(addr, CardTableModRefBS::card_shift, addr);
ysr@777 908 #endif
ysr@777 909
twisti@1162 910 AddressLiteral rs(byte_map_base);
twisti@1162 911 __ set(rs, cardtable); // cardtable := <card table base>
ysr@777 912 __ ldub(addr, cardtable, tmp); // tmp := [addr + cardtable]
ysr@777 913
ysr@777 914 __ br_on_reg_cond(Assembler::rc_nz, /*annul*/false, Assembler::pt,
ysr@777 915 tmp, not_already_dirty);
ysr@777 916 // Get cardtable + tmp into a reg by itself -- useful in the take-the-branch
ysr@777 917 // case, harmless if not.
ysr@777 918 __ delayed()->add(addr, cardtable, tmp2);
ysr@777 919
ysr@777 920 // We didn't take the branch, so we're already dirty: return.
ysr@777 921 // Use return-from-leaf
ysr@777 922 __ retl();
ysr@777 923 __ delayed()->nop();
ysr@777 924
ysr@777 925 // Not dirty.
ysr@777 926 __ bind(not_already_dirty);
ysr@777 927 // First, dirty it.
ysr@777 928 __ stb(G0, tmp2, 0); // [cardPtr] := 0 (i.e., dirty).
ysr@777 929
ysr@777 930 Register tmp3 = cardtable;
ysr@777 931 Register tmp4 = tmp;
ysr@777 932
ysr@777 933 // these registers are now dead
ysr@777 934 addr = cardtable = tmp = noreg;
ysr@777 935
ysr@777 936 int dirty_card_q_index_byte_offset =
ysr@777 937 in_bytes(JavaThread::dirty_card_queue_offset() +
ysr@777 938 PtrQueue::byte_offset_of_index());
ysr@777 939 int dirty_card_q_buf_byte_offset =
ysr@777 940 in_bytes(JavaThread::dirty_card_queue_offset() +
ysr@777 941 PtrQueue::byte_offset_of_buf());
ysr@777 942 __ bind(restart);
ysr@777 943 __ ld_ptr(G2_thread, dirty_card_q_index_byte_offset, tmp3);
ysr@777 944
ysr@777 945 __ br_on_reg_cond(Assembler::rc_z, /*annul*/false, Assembler::pn,
ysr@777 946 tmp3, refill);
ysr@777 947 // If the branch is taken, no harm in executing this in the delay slot.
ysr@777 948 __ delayed()->ld_ptr(G2_thread, dirty_card_q_buf_byte_offset, tmp4);
ysr@777 949 __ sub(tmp3, oopSize, tmp3);
ysr@777 950
ysr@777 951 __ st_ptr(tmp2, tmp4, tmp3); // [_buf + index] := <address_of_card>
ysr@777 952 // Use return-from-leaf
ysr@777 953 __ retl();
ysr@777 954 __ delayed()->st_ptr(tmp3, G2_thread, dirty_card_q_index_byte_offset);
ysr@777 955
ysr@777 956 __ bind(refill);
ysr@777 957 __ save_frame(0);
ysr@777 958
ysr@777 959 __ mov(tmp2, L0);
ysr@777 960 __ mov(tmp3, L1);
ysr@777 961 __ mov(tmp4, L2);
ysr@777 962
ysr@777 963 __ call_VM_leaf(L7_thread_cache,
ysr@777 964 CAST_FROM_FN_PTR(address,
ysr@777 965 DirtyCardQueueSet::handle_zero_index_for_thread),
ysr@777 966 G2_thread);
ysr@777 967
ysr@777 968 __ mov(L0, tmp2);
ysr@777 969 __ mov(L1, tmp3);
ysr@777 970 __ mov(L2, tmp4);
ysr@777 971
ysr@777 972 __ br(Assembler::always, /*annul*/false, Assembler::pt, restart);
ysr@777 973 __ delayed()->restore();
ysr@777 974 }
ysr@777 975 break;
ysr@777 976 #endif // !SERIALGC
ysr@777 977
duke@435 978 default:
duke@435 979 { __ set_info("unimplemented entry", dont_gc_arguments);
duke@435 980 __ save_frame(0);
duke@435 981 __ set((int)id, O1);
duke@435 982 __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), O1);
duke@435 983 __ should_not_reach_here();
duke@435 984 }
duke@435 985 break;
duke@435 986 }
duke@435 987 return oop_maps;
duke@435 988 }
duke@435 989
duke@435 990
twisti@2603 991 OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler* sasm) {
twisti@2603 992 __ block_comment("generate_handle_exception");
twisti@2603 993
twisti@2603 994 // Save registers, if required.
twisti@2603 995 OopMapSet* oop_maps = new OopMapSet();
twisti@2603 996 OopMap* oop_map = NULL;
twisti@2603 997 switch (id) {
twisti@2603 998 case forward_exception_id:
twisti@2603 999 // We're handling an exception in the context of a compiled frame.
twisti@2603 1000 // The registers have been saved in the standard places. Perform
twisti@2603 1001 // an exception lookup in the caller and dispatch to the handler
twisti@2603 1002 // if found. Otherwise unwind and dispatch to the callers
twisti@2603 1003 // exception handler.
twisti@2603 1004 oop_map = generate_oop_map(sasm, true);
twisti@2603 1005
twisti@2603 1006 // transfer the pending exception to the exception_oop
twisti@2603 1007 __ ld_ptr(G2_thread, in_bytes(JavaThread::pending_exception_offset()), Oexception);
twisti@2603 1008 __ ld_ptr(Oexception, 0, G0);
twisti@2603 1009 __ st_ptr(G0, G2_thread, in_bytes(JavaThread::pending_exception_offset()));
twisti@2603 1010 __ add(I7, frame::pc_return_offset, Oissuing_pc);
twisti@2603 1011 break;
twisti@2603 1012 case handle_exception_id:
twisti@2603 1013 // At this point all registers MAY be live.
twisti@2603 1014 oop_map = save_live_registers(sasm);
twisti@2603 1015 __ mov(Oexception->after_save(), Oexception);
twisti@2603 1016 __ mov(Oissuing_pc->after_save(), Oissuing_pc);
twisti@2603 1017 break;
twisti@2603 1018 case handle_exception_from_callee_id:
twisti@2603 1019 // At this point all registers except exception oop (Oexception)
twisti@2603 1020 // and exception pc (Oissuing_pc) are dead.
twisti@2603 1021 oop_map = new OopMap(frame_size_in_bytes / sizeof(jint), 0);
twisti@2603 1022 sasm->set_frame_size(frame_size_in_bytes / BytesPerWord);
twisti@2603 1023 __ save_frame_c1(frame_size_in_bytes);
twisti@2603 1024 __ mov(Oexception->after_save(), Oexception);
twisti@2603 1025 __ mov(Oissuing_pc->after_save(), Oissuing_pc);
twisti@2603 1026 break;
twisti@2603 1027 default: ShouldNotReachHere();
twisti@2603 1028 }
duke@435 1029
duke@435 1030 __ verify_not_null_oop(Oexception);
duke@435 1031
duke@435 1032 // save the exception and issuing pc in the thread
twisti@2603 1033 __ st_ptr(Oexception, G2_thread, in_bytes(JavaThread::exception_oop_offset()));
duke@435 1034 __ st_ptr(Oissuing_pc, G2_thread, in_bytes(JavaThread::exception_pc_offset()));
duke@435 1035
twisti@2603 1036 // use the throwing pc as the return address to lookup (has bci & oop map)
duke@435 1037 __ mov(Oissuing_pc, I7);
duke@435 1038 __ sub(I7, frame::pc_return_offset, I7);
duke@435 1039 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, exception_handler_for_pc));
twisti@2603 1040 oop_maps->add_gc_map(call_offset, oop_map);
duke@435 1041
duke@435 1042 // Note: if nmethod has been deoptimized then regardless of
duke@435 1043 // whether it had a handler or not we will deoptimize
duke@435 1044 // by entering the deopt blob with a pending exception.
duke@435 1045
twisti@2603 1046 // Restore the registers that were saved at the beginning, remove
twisti@2603 1047 // the frame and jump to the exception handler.
twisti@2603 1048 switch (id) {
twisti@2603 1049 case forward_exception_id:
twisti@2603 1050 case handle_exception_id:
twisti@2603 1051 restore_live_registers(sasm);
twisti@2603 1052 __ jmp(O0, 0);
twisti@2603 1053 __ delayed()->restore();
twisti@2603 1054 break;
twisti@2603 1055 case handle_exception_from_callee_id:
twisti@2603 1056 // Restore SP from L7 if the exception PC is a method handle call site.
twisti@2603 1057 __ mov(O0, G5); // Save the target address.
twisti@2603 1058 __ lduw(Address(G2_thread, JavaThread::is_method_handle_return_offset()), L0);
twisti@2603 1059 __ tst(L0); // Condition codes are preserved over the restore.
twisti@2603 1060 __ restore();
duke@435 1061
twisti@2603 1062 __ jmp(G5, 0); // jump to the exception handler
twisti@2603 1063 __ delayed()->movcc(Assembler::notZero, false, Assembler::icc, L7_mh_SP_save, SP); // Restore SP if required.
twisti@2603 1064 break;
twisti@2603 1065 default: ShouldNotReachHere();
twisti@2603 1066 }
duke@435 1067
twisti@2603 1068 return oop_maps;
duke@435 1069 }
duke@435 1070
duke@435 1071
duke@435 1072 #undef __
duke@435 1073
bobv@2036 1074 const char *Runtime1::pd_name_for_address(address entry) {
bobv@2036 1075 return "<unknown function>";
bobv@2036 1076 }

mercurial