Wed, 27 Nov 2013 16:16:21 -0800
8029015: PPC64 (part 216): opto: trap based null and range checks
Summary: On PPC64 use tdi instruction that does a compare and raises SIGTRAP for NULL and range checks.
Reviewed-by: kvn
goetz@6458 | 1 | /* |
goetz@6458 | 2 | * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. |
goetz@6458 | 3 | * Copyright 2012, 2013 SAP AG. All rights reserved. |
goetz@6458 | 4 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
goetz@6458 | 5 | * |
goetz@6458 | 6 | * This code is free software; you can redistribute it and/or modify it |
goetz@6458 | 7 | * under the terms of the GNU General Public License version 2 only, as |
goetz@6458 | 8 | * published by the Free Software Foundation. |
goetz@6458 | 9 | * |
goetz@6458 | 10 | * This code is distributed in the hope that it will be useful, but WITHOUT |
goetz@6458 | 11 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
goetz@6458 | 12 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
goetz@6458 | 13 | * version 2 for more details (a copy is included in the LICENSE file that |
goetz@6458 | 14 | * accompanied this code). |
goetz@6458 | 15 | * |
goetz@6458 | 16 | * You should have received a copy of the GNU General Public License version |
goetz@6458 | 17 | * 2 along with this work; if not, write to the Free Software Foundation, |
goetz@6458 | 18 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
goetz@6458 | 19 | * |
goetz@6458 | 20 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
goetz@6458 | 21 | * or visit www.oracle.com if you need additional information or have any |
goetz@6458 | 22 | * questions. |
goetz@6458 | 23 | * |
goetz@6458 | 24 | */ |
goetz@6458 | 25 | |
goetz@6458 | 26 | |
goetz@6458 | 27 | #include "precompiled.hpp" |
goetz@6458 | 28 | #include "asm/assembler.hpp" |
goetz@6458 | 29 | #include "asm/macroAssembler.inline.hpp" |
goetz@6458 | 30 | #include "interp_masm_ppc_64.hpp" |
goetz@6458 | 31 | #include "interpreter/interpreterRuntime.hpp" |
goetz@6458 | 32 | |
goetz@6458 | 33 | |
goetz@6458 | 34 | #ifdef PRODUCT |
goetz@6458 | 35 | #define BLOCK_COMMENT(str) // nothing |
goetz@6458 | 36 | #else |
goetz@6458 | 37 | #define BLOCK_COMMENT(str) block_comment(str) |
goetz@6458 | 38 | #endif |
goetz@6458 | 39 | |
goetz@6458 | 40 | // Lock object |
goetz@6458 | 41 | // |
goetz@6458 | 42 | // Registers alive |
goetz@6458 | 43 | // monitor - Address of the BasicObjectLock to be used for locking, |
goetz@6458 | 44 | // which must be initialized with the object to lock. |
goetz@6458 | 45 | // object - Address of the object to be locked. |
goetz@6458 | 46 | // |
goetz@6458 | 47 | void InterpreterMacroAssembler::lock_object(Register monitor, Register object) { |
goetz@6458 | 48 | if (UseHeavyMonitors) { |
goetz@6458 | 49 | call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), |
goetz@6458 | 50 | monitor, /*check_for_exceptions=*/false); |
goetz@6458 | 51 | } else { |
goetz@6458 | 52 | // template code: |
goetz@6458 | 53 | // |
goetz@6458 | 54 | // markOop displaced_header = obj->mark().set_unlocked(); |
goetz@6458 | 55 | // monitor->lock()->set_displaced_header(displaced_header); |
goetz@6458 | 56 | // if (Atomic::cmpxchg_ptr(/*ex=*/monitor, /*addr*/obj->mark_addr(), /*cmp*/displaced_header) == displaced_header) { |
goetz@6458 | 57 | // // We stored the monitor address into the object's mark word. |
goetz@6458 | 58 | // } else if (THREAD->is_lock_owned((address)displaced_header)) |
goetz@6458 | 59 | // // Simple recursive case. |
goetz@6458 | 60 | // monitor->lock()->set_displaced_header(NULL); |
goetz@6458 | 61 | // } else { |
goetz@6458 | 62 | // // Slow path. |
goetz@6458 | 63 | // InterpreterRuntime::monitorenter(THREAD, monitor); |
goetz@6458 | 64 | // } |
goetz@6458 | 65 | |
goetz@6458 | 66 | const Register displaced_header = R7_ARG5; |
goetz@6458 | 67 | const Register object_mark_addr = R8_ARG6; |
goetz@6458 | 68 | const Register current_header = R9_ARG7; |
goetz@6458 | 69 | const Register tmp = R10_ARG8; |
goetz@6458 | 70 | |
goetz@6458 | 71 | Label done; |
goetz@6458 | 72 | Label slow_case; |
goetz@6458 | 73 | |
goetz@6458 | 74 | assert_different_registers(displaced_header, object_mark_addr, current_header, tmp); |
goetz@6458 | 75 | |
goetz@6458 | 76 | |
goetz@6458 | 77 | // markOop displaced_header = obj->mark().set_unlocked(); |
goetz@6458 | 78 | |
goetz@6458 | 79 | // Load markOop from object into displaced_header. |
goetz@6458 | 80 | ld(displaced_header, oopDesc::mark_offset_in_bytes(), object); |
goetz@6458 | 81 | |
goetz@6458 | 82 | if (UseBiasedLocking) { |
goetz@6458 | 83 | biased_locking_enter(CCR0, object, displaced_header, tmp, current_header, done, &slow_case); |
goetz@6458 | 84 | } |
goetz@6458 | 85 | |
goetz@6458 | 86 | // Set displaced_header to be (markOop of object | UNLOCK_VALUE). |
goetz@6458 | 87 | ori(displaced_header, displaced_header, markOopDesc::unlocked_value); |
goetz@6458 | 88 | |
goetz@6458 | 89 | |
goetz@6458 | 90 | // monitor->lock()->set_displaced_header(displaced_header); |
goetz@6458 | 91 | |
goetz@6458 | 92 | // Initialize the box (Must happen before we update the object mark!). |
goetz@6458 | 93 | std(displaced_header, BasicObjectLock::lock_offset_in_bytes() + |
goetz@6458 | 94 | BasicLock::displaced_header_offset_in_bytes(), monitor); |
goetz@6458 | 95 | |
goetz@6458 | 96 | // if (Atomic::cmpxchg_ptr(/*ex=*/monitor, /*addr*/obj->mark_addr(), /*cmp*/displaced_header) == displaced_header) { |
goetz@6458 | 97 | |
goetz@6458 | 98 | // Store stack address of the BasicObjectLock (this is monitor) into object. |
goetz@6458 | 99 | addi(object_mark_addr, object, oopDesc::mark_offset_in_bytes()); |
goetz@6458 | 100 | |
goetz@6458 | 101 | // Must fence, otherwise, preceding store(s) may float below cmpxchg. |
goetz@6458 | 102 | // CmpxchgX sets CCR0 to cmpX(current, displaced). |
goetz@6458 | 103 | fence(); // TODO: replace by MacroAssembler::MemBarRel | MacroAssembler::MemBarAcq ? |
goetz@6458 | 104 | cmpxchgd(/*flag=*/CCR0, |
goetz@6458 | 105 | /*current_value=*/current_header, |
goetz@6458 | 106 | /*compare_value=*/displaced_header, /*exchange_value=*/monitor, |
goetz@6458 | 107 | /*where=*/object_mark_addr, |
goetz@6458 | 108 | MacroAssembler::MemBarRel | MacroAssembler::MemBarAcq, |
goetz@6458 | 109 | MacroAssembler::cmpxchgx_hint_acquire_lock()); |
goetz@6458 | 110 | |
goetz@6458 | 111 | // If the compare-and-exchange succeeded, then we found an unlocked |
goetz@6458 | 112 | // object and we have now locked it. |
goetz@6458 | 113 | beq(CCR0, done); |
goetz@6458 | 114 | |
goetz@6458 | 115 | |
goetz@6458 | 116 | // } else if (THREAD->is_lock_owned((address)displaced_header)) |
goetz@6458 | 117 | // // Simple recursive case. |
goetz@6458 | 118 | // monitor->lock()->set_displaced_header(NULL); |
goetz@6458 | 119 | |
goetz@6458 | 120 | // We did not see an unlocked object so try the fast recursive case. |
goetz@6458 | 121 | |
goetz@6458 | 122 | // Check if owner is self by comparing the value in the markOop of object |
goetz@6458 | 123 | // (current_header) with the stack pointer. |
goetz@6458 | 124 | sub(current_header, current_header, R1_SP); |
goetz@6458 | 125 | |
goetz@6458 | 126 | assert(os::vm_page_size() > 0xfff, "page size too small - change the constant"); |
goetz@6458 | 127 | load_const_optimized(tmp, |
goetz@6458 | 128 | (address) (~(os::vm_page_size()-1) | |
goetz@6458 | 129 | markOopDesc::lock_mask_in_place)); |
goetz@6458 | 130 | |
goetz@6458 | 131 | and_(R0/*==0?*/, current_header, tmp); |
goetz@6458 | 132 | // If condition is true we are done and hence we can store 0 in the displaced |
goetz@6458 | 133 | // header indicating it is a recursive lock. |
goetz@6458 | 134 | bne(CCR0, slow_case); |
goetz@6458 | 135 | release(); |
goetz@6458 | 136 | std(R0/*==0!*/, BasicObjectLock::lock_offset_in_bytes() + |
goetz@6458 | 137 | BasicLock::displaced_header_offset_in_bytes(), monitor); |
goetz@6458 | 138 | b(done); |
goetz@6458 | 139 | |
goetz@6458 | 140 | |
goetz@6458 | 141 | // } else { |
goetz@6458 | 142 | // // Slow path. |
goetz@6458 | 143 | // InterpreterRuntime::monitorenter(THREAD, monitor); |
goetz@6458 | 144 | |
goetz@6458 | 145 | // None of the above fast optimizations worked so we have to get into the |
goetz@6458 | 146 | // slow case of monitor enter. |
goetz@6458 | 147 | bind(slow_case); |
goetz@6458 | 148 | call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), |
goetz@6458 | 149 | monitor, /*check_for_exceptions=*/false); |
goetz@6458 | 150 | // } |
goetz@6458 | 151 | |
goetz@6458 | 152 | bind(done); |
goetz@6458 | 153 | } |
goetz@6458 | 154 | } |
goetz@6458 | 155 | |
goetz@6458 | 156 | // Unlocks an object. Used in monitorexit bytecode and remove_activation. |
goetz@6458 | 157 | // |
goetz@6458 | 158 | // Registers alive |
goetz@6458 | 159 | // monitor - Address of the BasicObjectLock to be used for locking, |
goetz@6458 | 160 | // which must be initialized with the object to lock. |
goetz@6458 | 161 | // |
goetz@6458 | 162 | // Throw IllegalMonitorException if object is not locked by current thread. |
goetz@6458 | 163 | void InterpreterMacroAssembler::unlock_object(Register monitor) { |
goetz@6458 | 164 | if (UseHeavyMonitors) { |
goetz@6458 | 165 | call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), |
goetz@6458 | 166 | monitor, /*check_for_exceptions=*/false); |
goetz@6458 | 167 | } else { |
goetz@6458 | 168 | |
goetz@6458 | 169 | // template code: |
goetz@6458 | 170 | // |
goetz@6458 | 171 | // if ((displaced_header = monitor->displaced_header()) == NULL) { |
goetz@6458 | 172 | // // Recursive unlock. Mark the monitor unlocked by setting the object field to NULL. |
goetz@6458 | 173 | // monitor->set_obj(NULL); |
goetz@6458 | 174 | // } else if (Atomic::cmpxchg_ptr(displaced_header, obj->mark_addr(), monitor) == monitor) { |
goetz@6458 | 175 | // // We swapped the unlocked mark in displaced_header into the object's mark word. |
goetz@6458 | 176 | // monitor->set_obj(NULL); |
goetz@6458 | 177 | // } else { |
goetz@6458 | 178 | // // Slow path. |
goetz@6458 | 179 | // InterpreterRuntime::monitorexit(THREAD, monitor); |
goetz@6458 | 180 | // } |
goetz@6458 | 181 | |
goetz@6458 | 182 | const Register object = R7_ARG5; |
goetz@6458 | 183 | const Register displaced_header = R8_ARG6; |
goetz@6458 | 184 | const Register object_mark_addr = R9_ARG7; |
goetz@6458 | 185 | const Register current_header = R10_ARG8; |
goetz@6458 | 186 | |
goetz@6458 | 187 | Label no_recursive_unlock; |
goetz@6458 | 188 | Label slow_case; |
goetz@6458 | 189 | Label done; |
goetz@6458 | 190 | |
goetz@6458 | 191 | assert_different_registers(object, displaced_header, object_mark_addr, current_header); |
goetz@6458 | 192 | |
goetz@6458 | 193 | if (UseBiasedLocking) { |
goetz@6458 | 194 | // The object address from the monitor is in object. |
goetz@6458 | 195 | ld(object, BasicObjectLock::obj_offset_in_bytes(), monitor); |
goetz@6458 | 196 | assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0"); |
goetz@6458 | 197 | biased_locking_exit(CCR0, object, displaced_header, done); |
goetz@6458 | 198 | } |
goetz@6458 | 199 | |
goetz@6458 | 200 | // Test first if we are in the fast recursive case. |
goetz@6458 | 201 | ld(displaced_header, BasicObjectLock::lock_offset_in_bytes() + |
goetz@6458 | 202 | BasicLock::displaced_header_offset_in_bytes(), monitor); |
goetz@6458 | 203 | |
goetz@6458 | 204 | // If the displaced header is zero, we have a recursive unlock. |
goetz@6458 | 205 | cmpdi(CCR0, displaced_header, 0); |
goetz@6458 | 206 | bne(CCR0, no_recursive_unlock); |
goetz@6458 | 207 | // Release in recursive unlock is not necessary. |
goetz@6458 | 208 | // release(); |
goetz@6458 | 209 | std(displaced_header/*==0!*/, BasicObjectLock::obj_offset_in_bytes(), monitor); |
goetz@6458 | 210 | b(done); |
goetz@6458 | 211 | |
goetz@6458 | 212 | bind(no_recursive_unlock); |
goetz@6458 | 213 | |
goetz@6458 | 214 | // } else if (Atomic::cmpxchg_ptr(displaced_header, obj->mark_addr(), monitor) == monitor) { |
goetz@6458 | 215 | // // We swapped the unlocked mark in displaced_header into the object's mark word. |
goetz@6458 | 216 | // monitor->set_obj(NULL); |
goetz@6458 | 217 | |
goetz@6458 | 218 | // If we still have a lightweight lock, unlock the object and be done. |
goetz@6458 | 219 | |
goetz@6458 | 220 | // The object address from the monitor is in object. |
goetz@6458 | 221 | ld(object, BasicObjectLock::obj_offset_in_bytes(), monitor); |
goetz@6458 | 222 | addi(object_mark_addr, object, oopDesc::mark_offset_in_bytes()); |
goetz@6458 | 223 | |
goetz@6458 | 224 | // We have the displaced header in displaced_header. If the lock is still |
goetz@6458 | 225 | // lightweight, it will contain the monitor address and we'll store the |
goetz@6458 | 226 | // displaced header back into the object's mark word. |
goetz@6458 | 227 | // CmpxchgX sets CCR0 to cmpX(current, monitor). |
goetz@6458 | 228 | cmpxchgd(/*flag=*/CCR0, |
goetz@6458 | 229 | /*current_value=*/current_header, |
goetz@6458 | 230 | /*compare_value=*/monitor, /*exchange_value=*/displaced_header, |
goetz@6458 | 231 | /*where=*/object_mark_addr, |
goetz@6458 | 232 | MacroAssembler::MemBarRel | MacroAssembler::MemBarAcq, |
goetz@6458 | 233 | MacroAssembler::cmpxchgx_hint_release_lock()); |
goetz@6458 | 234 | bne(CCR0, slow_case); |
goetz@6458 | 235 | |
goetz@6458 | 236 | // Exchange worked, do monitor->set_obj(NULL). |
goetz@6458 | 237 | li(R0, 0); |
goetz@6458 | 238 | // Must realease earlier (see cmpxchgd above). |
goetz@6458 | 239 | // release(); |
goetz@6458 | 240 | std(R0, BasicObjectLock::obj_offset_in_bytes(), monitor); |
goetz@6458 | 241 | b(done); |
goetz@6458 | 242 | |
goetz@6458 | 243 | |
goetz@6458 | 244 | // } else { |
goetz@6458 | 245 | // // Slow path. |
goetz@6458 | 246 | // InterpreterRuntime::monitorexit(THREAD, monitor); |
goetz@6458 | 247 | |
goetz@6458 | 248 | // The lock has been converted into a heavy lock and hence |
goetz@6458 | 249 | // we need to get into the slow case. |
goetz@6458 | 250 | bind(slow_case); |
goetz@6458 | 251 | call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), |
goetz@6458 | 252 | monitor, /*check_for_exceptions=*/false); |
goetz@6458 | 253 | // } |
goetz@6458 | 254 | |
goetz@6458 | 255 | bind(done); |
goetz@6458 | 256 | } |
goetz@6458 | 257 | } |
goetz@6458 | 258 | |
goetz@6458 | 259 | void InterpreterMacroAssembler::get_method_counters(Register method, |
goetz@6458 | 260 | Register Rcounters, |
goetz@6458 | 261 | Label& skip) { |
goetz@6458 | 262 | BLOCK_COMMENT("Load and ev. allocate counter object {"); |
goetz@6458 | 263 | Label has_counters; |
goetz@6458 | 264 | ld(Rcounters, in_bytes(Method::method_counters_offset()), method); |
goetz@6458 | 265 | cmpdi(CCR0, Rcounters, 0); |
goetz@6458 | 266 | bne(CCR0, has_counters); |
goetz@6458 | 267 | call_VM(noreg, CAST_FROM_FN_PTR(address, |
goetz@6458 | 268 | InterpreterRuntime::build_method_counters), method, false); |
goetz@6458 | 269 | ld(Rcounters, in_bytes(Method::method_counters_offset()), method); |
goetz@6458 | 270 | cmpdi(CCR0, Rcounters, 0); |
goetz@6458 | 271 | beq(CCR0, skip); // No MethodCounters, OutOfMemory. |
goetz@6458 | 272 | BLOCK_COMMENT("} Load and ev. allocate counter object"); |
goetz@6458 | 273 | |
goetz@6458 | 274 | bind(has_counters); |
goetz@6458 | 275 | } |
goetz@6458 | 276 | |
goetz@6458 | 277 | void InterpreterMacroAssembler::increment_invocation_counter(Register Rcounters, Register iv_be_count, Register Rtmp_r0) { |
goetz@6458 | 278 | assert(UseCompiler, "incrementing must be useful"); |
goetz@6458 | 279 | Register invocation_count = iv_be_count; |
goetz@6458 | 280 | Register backedge_count = Rtmp_r0; |
goetz@6458 | 281 | int delta = InvocationCounter::count_increment; |
goetz@6458 | 282 | |
goetz@6458 | 283 | // Load each counter in a register. |
goetz@6458 | 284 | // ld(inv_counter, Rtmp); |
goetz@6458 | 285 | // ld(be_counter, Rtmp2); |
goetz@6458 | 286 | int inv_counter_offset = in_bytes(MethodCounters::invocation_counter_offset() + |
goetz@6458 | 287 | InvocationCounter::counter_offset()); |
goetz@6458 | 288 | int be_counter_offset = in_bytes(MethodCounters::backedge_counter_offset() + |
goetz@6458 | 289 | InvocationCounter::counter_offset()); |
goetz@6458 | 290 | |
goetz@6458 | 291 | BLOCK_COMMENT("Increment profiling counters {"); |
goetz@6458 | 292 | |
goetz@6458 | 293 | // Load the backedge counter. |
goetz@6458 | 294 | lwz(backedge_count, be_counter_offset, Rcounters); // is unsigned int |
goetz@6458 | 295 | // Mask the backedge counter. |
goetz@6458 | 296 | Register tmp = invocation_count; |
goetz@6458 | 297 | li(tmp, InvocationCounter::count_mask_value); |
goetz@6458 | 298 | andr(backedge_count, tmp, backedge_count); // Cannot use andi, need sign extension of count_mask_value. |
goetz@6458 | 299 | |
goetz@6458 | 300 | // Load the invocation counter. |
goetz@6458 | 301 | lwz(invocation_count, inv_counter_offset, Rcounters); // is unsigned int |
goetz@6458 | 302 | // Add the delta to the invocation counter and store the result. |
goetz@6458 | 303 | addi(invocation_count, invocation_count, delta); |
goetz@6458 | 304 | // Store value. |
goetz@6458 | 305 | stw(invocation_count, inv_counter_offset, Rcounters); |
goetz@6458 | 306 | |
goetz@6458 | 307 | // Add invocation counter + backedge counter. |
goetz@6458 | 308 | add(iv_be_count, backedge_count, invocation_count); |
goetz@6458 | 309 | |
goetz@6458 | 310 | // Note that this macro must leave the backedge_count + invocation_count in |
goetz@6458 | 311 | // register iv_be_count! |
goetz@6458 | 312 | BLOCK_COMMENT("} Increment profiling counters"); |
goetz@6458 | 313 | } |
goetz@6458 | 314 | |
goetz@6458 | 315 | void InterpreterMacroAssembler::verify_oop(Register reg, TosState state) { |
goetz@6458 | 316 | if (state == atos) { MacroAssembler::verify_oop(reg); } |
goetz@6458 | 317 | } |
goetz@6458 | 318 | |
goetz@6458 | 319 | // Inline assembly for: |
goetz@6458 | 320 | // |
goetz@6458 | 321 | // if (thread is in interp_only_mode) { |
goetz@6458 | 322 | // InterpreterRuntime::post_method_entry(); |
goetz@6458 | 323 | // } |
goetz@6458 | 324 | // if (*jvmpi::event_flags_array_at_addr(JVMPI_EVENT_METHOD_ENTRY ) || |
goetz@6458 | 325 | // *jvmpi::event_flags_array_at_addr(JVMPI_EVENT_METHOD_ENTRY2) ) { |
goetz@6458 | 326 | // SharedRuntime::jvmpi_method_entry(method, receiver); |
goetz@6458 | 327 | // } |
goetz@6458 | 328 | void InterpreterMacroAssembler::notify_method_entry() { |
goetz@6458 | 329 | // JVMTI |
goetz@6458 | 330 | // Whenever JVMTI puts a thread in interp_only_mode, method |
goetz@6458 | 331 | // entry/exit events are sent for that thread to track stack |
goetz@6458 | 332 | // depth. If it is possible to enter interp_only_mode we add |
goetz@6458 | 333 | // the code to check if the event should be sent. |
goetz@6458 | 334 | if (JvmtiExport::can_post_interpreter_events()) { |
goetz@6458 | 335 | Label jvmti_post_done; |
goetz@6458 | 336 | |
goetz@6458 | 337 | lwz(R0, in_bytes(JavaThread::interp_only_mode_offset()), R16_thread); |
goetz@6458 | 338 | cmpwi(CCR0, R0, 0); |
goetz@6458 | 339 | beq(CCR0, jvmti_post_done); |
goetz@6458 | 340 | call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_entry), |
goetz@6458 | 341 | /*check_exceptions=*/false); |
goetz@6458 | 342 | |
goetz@6458 | 343 | bind(jvmti_post_done); |
goetz@6458 | 344 | } |
goetz@6458 | 345 | } |
goetz@6458 | 346 | |
goetz@6458 | 347 | |
goetz@6458 | 348 | // Inline assembly for: |
goetz@6458 | 349 | // |
goetz@6458 | 350 | // if (thread is in interp_only_mode) { |
goetz@6458 | 351 | // // save result |
goetz@6458 | 352 | // InterpreterRuntime::post_method_exit(); |
goetz@6458 | 353 | // // restore result |
goetz@6458 | 354 | // } |
goetz@6458 | 355 | // if (*jvmpi::event_flags_array_at_addr(JVMPI_EVENT_METHOD_EXIT)) { |
goetz@6458 | 356 | // // save result |
goetz@6458 | 357 | // SharedRuntime::jvmpi_method_exit(); |
goetz@6458 | 358 | // // restore result |
goetz@6458 | 359 | // } |
goetz@6458 | 360 | // |
goetz@6458 | 361 | // Native methods have their result stored in d_tmp and l_tmp. |
goetz@6458 | 362 | // Java methods have their result stored in the expression stack. |
goetz@6458 | 363 | void InterpreterMacroAssembler::notify_method_exit(bool is_native_method, TosState state) { |
goetz@6458 | 364 | // JVMTI |
goetz@6458 | 365 | // Whenever JVMTI puts a thread in interp_only_mode, method |
goetz@6458 | 366 | // entry/exit events are sent for that thread to track stack |
goetz@6458 | 367 | // depth. If it is possible to enter interp_only_mode we add |
goetz@6458 | 368 | // the code to check if the event should be sent. |
goetz@6458 | 369 | if (JvmtiExport::can_post_interpreter_events()) { |
goetz@6458 | 370 | Label jvmti_post_done; |
goetz@6458 | 371 | |
goetz@6458 | 372 | lwz(R0, in_bytes(JavaThread::interp_only_mode_offset()), R16_thread); |
goetz@6458 | 373 | cmpwi(CCR0, R0, 0); |
goetz@6458 | 374 | beq(CCR0, jvmti_post_done); |
goetz@6458 | 375 | call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_exit), |
goetz@6458 | 376 | /*check_exceptions=*/false); |
goetz@6458 | 377 | |
goetz@6458 | 378 | bind(jvmti_post_done); |
goetz@6458 | 379 | } |
goetz@6458 | 380 | } |
goetz@6458 | 381 | |
goetz@6458 | 382 | // Convert the current TOP_IJAVA_FRAME into a PARENT_IJAVA_FRAME |
goetz@6458 | 383 | // (using parent_frame_resize) and push a new interpreter |
goetz@6458 | 384 | // TOP_IJAVA_FRAME (using frame_size). |
goetz@6458 | 385 | void InterpreterMacroAssembler::push_interpreter_frame(Register top_frame_size, Register parent_frame_resize, |
goetz@6458 | 386 | Register tmp1, Register tmp2, Register tmp3, |
goetz@6458 | 387 | Register tmp4, Register pc) { |
goetz@6458 | 388 | assert_different_registers(top_frame_size, parent_frame_resize, tmp1, tmp2, tmp3, tmp4); |
goetz@6458 | 389 | ld(tmp1, _top_ijava_frame_abi(frame_manager_lr), R1_SP); |
goetz@6458 | 390 | mr(tmp2/*top_frame_sp*/, R1_SP); |
goetz@6458 | 391 | // Move initial_caller_sp. |
goetz@6458 | 392 | ld(tmp4, _top_ijava_frame_abi(initial_caller_sp), R1_SP); |
goetz@6458 | 393 | neg(parent_frame_resize, parent_frame_resize); |
goetz@6458 | 394 | resize_frame(parent_frame_resize/*-parent_frame_resize*/, tmp3); |
goetz@6458 | 395 | |
goetz@6458 | 396 | // Set LR in new parent frame. |
goetz@6458 | 397 | std(tmp1, _abi(lr), R1_SP); |
goetz@6458 | 398 | // Set top_frame_sp info for new parent frame. |
goetz@6458 | 399 | std(tmp2, _parent_ijava_frame_abi(top_frame_sp), R1_SP); |
goetz@6458 | 400 | std(tmp4, _parent_ijava_frame_abi(initial_caller_sp), R1_SP); |
goetz@6458 | 401 | |
goetz@6458 | 402 | // Push new TOP_IJAVA_FRAME. |
goetz@6458 | 403 | push_frame(top_frame_size, tmp2); |
goetz@6458 | 404 | |
goetz@6458 | 405 | get_PC_trash_LR(tmp3); |
goetz@6458 | 406 | std(tmp3, _top_ijava_frame_abi(frame_manager_lr), R1_SP); |
goetz@6458 | 407 | // Used for non-initial callers by unextended_sp(). |
goetz@6458 | 408 | std(R1_SP, _top_ijava_frame_abi(initial_caller_sp), R1_SP); |
goetz@6458 | 409 | } |
goetz@6458 | 410 | |
goetz@6458 | 411 | // Pop the topmost TOP_IJAVA_FRAME and convert the previous |
goetz@6458 | 412 | // PARENT_IJAVA_FRAME back into a TOP_IJAVA_FRAME. |
goetz@6458 | 413 | void InterpreterMacroAssembler::pop_interpreter_frame(Register tmp1, Register tmp2, Register tmp3, Register tmp4) { |
goetz@6458 | 414 | assert_different_registers(tmp1, tmp2, tmp3, tmp4); |
goetz@6458 | 415 | |
goetz@6458 | 416 | ld(tmp1/*caller's sp*/, _abi(callers_sp), R1_SP); |
goetz@6458 | 417 | ld(tmp3, _abi(lr), tmp1); |
goetz@6458 | 418 | |
goetz@6458 | 419 | ld(tmp4, _parent_ijava_frame_abi(initial_caller_sp), tmp1); |
goetz@6458 | 420 | |
goetz@6458 | 421 | ld(tmp2/*caller's caller's sp*/, _abi(callers_sp), tmp1); |
goetz@6458 | 422 | // Merge top frame. |
goetz@6458 | 423 | std(tmp2, _abi(callers_sp), R1_SP); |
goetz@6458 | 424 | |
goetz@6458 | 425 | ld(tmp2, _parent_ijava_frame_abi(top_frame_sp), tmp1); |
goetz@6458 | 426 | |
goetz@6458 | 427 | // Update C stack pointer to caller's top_abi. |
goetz@6458 | 428 | resize_frame_absolute(tmp2/*addr*/, tmp1/*tmp*/, tmp2/*tmp*/); |
goetz@6458 | 429 | |
goetz@6458 | 430 | // Update LR in top_frame. |
goetz@6458 | 431 | std(tmp3, _top_ijava_frame_abi(frame_manager_lr), R1_SP); |
goetz@6458 | 432 | |
goetz@6458 | 433 | std(tmp4, _top_ijava_frame_abi(initial_caller_sp), R1_SP); |
goetz@6458 | 434 | |
goetz@6458 | 435 | // Store the top-frame stack-pointer for c2i adapters. |
goetz@6458 | 436 | std(R1_SP, _top_ijava_frame_abi(top_frame_sp), R1_SP); |
goetz@6458 | 437 | } |
goetz@6458 | 438 | |
goetz@6458 | 439 | #ifdef CC_INTERP |
goetz@6458 | 440 | // Turn state's interpreter frame into the current TOP_IJAVA_FRAME. |
goetz@6458 | 441 | void InterpreterMacroAssembler::pop_interpreter_frame_to_state(Register state, Register tmp1, Register tmp2, Register tmp3) { |
goetz@6458 | 442 | assert_different_registers(R14_state, R15_prev_state, tmp1, tmp2, tmp3); |
goetz@6458 | 443 | |
goetz@6458 | 444 | if (state == R14_state) { |
goetz@6458 | 445 | ld(tmp1/*state's fp*/, state_(_last_Java_fp)); |
goetz@6458 | 446 | ld(tmp2/*state's sp*/, state_(_last_Java_sp)); |
goetz@6458 | 447 | } else if (state == R15_prev_state) { |
goetz@6458 | 448 | ld(tmp1/*state's fp*/, prev_state_(_last_Java_fp)); |
goetz@6458 | 449 | ld(tmp2/*state's sp*/, prev_state_(_last_Java_sp)); |
goetz@6458 | 450 | } else { |
goetz@6458 | 451 | ShouldNotReachHere(); |
goetz@6458 | 452 | } |
goetz@6458 | 453 | |
goetz@6458 | 454 | // Merge top frames. |
goetz@6458 | 455 | std(tmp1, _abi(callers_sp), R1_SP); |
goetz@6458 | 456 | |
goetz@6458 | 457 | // Tmp2 is new SP. |
goetz@6458 | 458 | // Tmp1 is parent's SP. |
goetz@6458 | 459 | resize_frame_absolute(tmp2/*addr*/, tmp1/*tmp*/, tmp2/*tmp*/); |
goetz@6458 | 460 | |
goetz@6458 | 461 | // Update LR in top_frame. |
goetz@6458 | 462 | // Must be interpreter frame. |
goetz@6458 | 463 | get_PC_trash_LR(tmp3); |
goetz@6458 | 464 | std(tmp3, _top_ijava_frame_abi(frame_manager_lr), R1_SP); |
goetz@6458 | 465 | // Used for non-initial callers by unextended_sp(). |
goetz@6458 | 466 | std(R1_SP, _top_ijava_frame_abi(initial_caller_sp), R1_SP); |
goetz@6458 | 467 | } |
goetz@6458 | 468 | #endif // CC_INTERP |
goetz@6458 | 469 | |
goetz@6458 | 470 | // Set SP to initial caller's sp, but before fix the back chain. |
goetz@6458 | 471 | void InterpreterMacroAssembler::resize_frame_to_initial_caller(Register tmp1, Register tmp2) { |
goetz@6458 | 472 | ld(tmp1, _parent_ijava_frame_abi(initial_caller_sp), R1_SP); |
goetz@6458 | 473 | ld(tmp2, _parent_ijava_frame_abi(callers_sp), R1_SP); |
goetz@6458 | 474 | std(tmp2, _parent_ijava_frame_abi(callers_sp), tmp1); // Fix back chain ... |
goetz@6458 | 475 | mr(R1_SP, tmp1); // ... and resize to initial caller. |
goetz@6458 | 476 | } |
goetz@6458 | 477 | |
goetz@6458 | 478 | #ifdef CC_INTERP |
goetz@6458 | 479 | // Pop the current interpreter state (without popping the correspoding |
goetz@6458 | 480 | // frame) and restore R14_state and R15_prev_state accordingly. |
goetz@6458 | 481 | // Use prev_state_may_be_0 to indicate whether prev_state may be 0 |
goetz@6458 | 482 | // in order to generate an extra check before retrieving prev_state_(_prev_link). |
goetz@6458 | 483 | void InterpreterMacroAssembler::pop_interpreter_state(bool prev_state_may_be_0) |
goetz@6458 | 484 | { |
goetz@6458 | 485 | // Move prev_state to state and restore prev_state from state_(_prev_link). |
goetz@6458 | 486 | Label prev_state_is_0; |
goetz@6458 | 487 | mr(R14_state, R15_prev_state); |
goetz@6458 | 488 | |
goetz@6458 | 489 | // Don't retrieve /*state==*/prev_state_(_prev_link) |
goetz@6458 | 490 | // if /*state==*/prev_state is 0. |
goetz@6458 | 491 | if (prev_state_may_be_0) { |
goetz@6458 | 492 | cmpdi(CCR0, R15_prev_state, 0); |
goetz@6458 | 493 | beq(CCR0, prev_state_is_0); |
goetz@6458 | 494 | } |
goetz@6458 | 495 | |
goetz@6458 | 496 | ld(R15_prev_state, /*state==*/prev_state_(_prev_link)); |
goetz@6458 | 497 | bind(prev_state_is_0); |
goetz@6458 | 498 | } |
goetz@6458 | 499 | |
goetz@6458 | 500 | void InterpreterMacroAssembler::restore_prev_state() { |
goetz@6458 | 501 | // _prev_link is private, but cInterpreter is a friend. |
goetz@6458 | 502 | ld(R15_prev_state, state_(_prev_link)); |
goetz@6458 | 503 | } |
goetz@6458 | 504 | #endif // CC_INTERP |