src/cpu/ppc/vm/stubGenerator_ppc.cpp

Thu, 06 Mar 2014 10:55:28 -0800

author
goetz
date
Thu, 06 Mar 2014 10:55:28 -0800
changeset 6511
31e80afe3fed
parent 6508
c4178a748df9
child 6512
fd1b9f02cc91
permissions
-rw-r--r--

8035647: PPC64: Support for elf v2 abi.
Summary: ELFv2 ABI used by the little endian PowerPC64 on Linux.
Reviewed-by: kvn
Contributed-by: asmundak@google.com

goetz@6458 1 /*
goetz@6458 2 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
goetz@6458 3 * Copyright 2012, 2013 SAP AG. All rights reserved.
goetz@6458 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
goetz@6458 5 *
goetz@6458 6 * This code is free software; you can redistribute it and/or modify it
goetz@6458 7 * under the terms of the GNU General Public License version 2 only, as
goetz@6458 8 * published by the Free Software Foundation.
goetz@6458 9 *
goetz@6458 10 * This code is distributed in the hope that it will be useful, but WITHOUT
goetz@6458 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
goetz@6458 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
goetz@6458 13 * version 2 for more details (a copy is included in the LICENSE file that
goetz@6458 14 * accompanied this code).
goetz@6458 15 *
goetz@6458 16 * You should have received a copy of the GNU General Public License version
goetz@6458 17 * 2 along with this work; if not, write to the Free Software Foundation,
goetz@6458 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
goetz@6458 19 *
goetz@6458 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
goetz@6458 21 * or visit www.oracle.com if you need additional information or have any
goetz@6458 22 * questions.
goetz@6458 23 *
goetz@6458 24 */
goetz@6458 25
goetz@6458 26 #include "precompiled.hpp"
goetz@6458 27 #include "asm/assembler.hpp"
goetz@6458 28 #include "asm/macroAssembler.inline.hpp"
goetz@6458 29 #include "interpreter/interpreter.hpp"
goetz@6458 30 #include "nativeInst_ppc.hpp"
goetz@6458 31 #include "oops/instanceOop.hpp"
goetz@6458 32 #include "oops/method.hpp"
goetz@6458 33 #include "oops/objArrayKlass.hpp"
goetz@6458 34 #include "oops/oop.inline.hpp"
goetz@6458 35 #include "prims/methodHandles.hpp"
goetz@6458 36 #include "runtime/frame.inline.hpp"
goetz@6458 37 #include "runtime/handles.inline.hpp"
goetz@6458 38 #include "runtime/sharedRuntime.hpp"
goetz@6458 39 #include "runtime/stubCodeGenerator.hpp"
goetz@6458 40 #include "runtime/stubRoutines.hpp"
goetz@6458 41 #include "utilities/top.hpp"
goetz@6458 42 #ifdef TARGET_OS_FAMILY_aix
goetz@6458 43 # include "thread_aix.inline.hpp"
goetz@6458 44 #endif
goetz@6458 45 #ifdef TARGET_OS_FAMILY_linux
goetz@6458 46 # include "thread_linux.inline.hpp"
goetz@6458 47 #endif
goetz@6458 48 #ifdef COMPILER2
goetz@6458 49 #include "opto/runtime.hpp"
goetz@6458 50 #endif
goetz@6458 51
goetz@6458 52 #define __ _masm->
goetz@6458 53
goetz@6458 54 #ifdef PRODUCT
goetz@6458 55 #define BLOCK_COMMENT(str) // nothing
goetz@6458 56 #else
goetz@6458 57 #define BLOCK_COMMENT(str) __ block_comment(str)
goetz@6458 58 #endif
goetz@6458 59
goetz@6458 60 class StubGenerator: public StubCodeGenerator {
goetz@6458 61 private:
goetz@6458 62
goetz@6458 63 // Call stubs are used to call Java from C
goetz@6458 64 //
goetz@6458 65 // Arguments:
goetz@6458 66 //
goetz@6458 67 // R3 - call wrapper address : address
goetz@6458 68 // R4 - result : intptr_t*
goetz@6458 69 // R5 - result type : BasicType
goetz@6458 70 // R6 - method : Method
goetz@6458 71 // R7 - frame mgr entry point : address
goetz@6458 72 // R8 - parameter block : intptr_t*
goetz@6458 73 // R9 - parameter count in words : int
goetz@6458 74 // R10 - thread : Thread*
goetz@6458 75 //
goetz@6458 76 address generate_call_stub(address& return_address) {
goetz@6458 77 // Setup a new c frame, copy java arguments, call frame manager or
goetz@6458 78 // native_entry, and process result.
goetz@6458 79
goetz@6458 80 StubCodeMark mark(this, "StubRoutines", "call_stub");
goetz@6458 81
goetz@6511 82 address start = __ function_entry();
goetz@6458 83
goetz@6458 84 // some sanity checks
goetz@6511 85 assert((sizeof(frame::abi_minframe) % 16) == 0, "unaligned");
goetz@6511 86 assert((sizeof(frame::abi_reg_args) % 16) == 0, "unaligned");
goetz@6458 87 assert((sizeof(frame::spill_nonvolatiles) % 16) == 0, "unaligned");
goetz@6458 88 assert((sizeof(frame::parent_ijava_frame_abi) % 16) == 0, "unaligned");
goetz@6458 89 assert((sizeof(frame::entry_frame_locals) % 16) == 0, "unaligned");
goetz@6458 90
goetz@6458 91 Register r_arg_call_wrapper_addr = R3;
goetz@6458 92 Register r_arg_result_addr = R4;
goetz@6458 93 Register r_arg_result_type = R5;
goetz@6458 94 Register r_arg_method = R6;
goetz@6458 95 Register r_arg_entry = R7;
goetz@6458 96 Register r_arg_thread = R10;
goetz@6458 97
goetz@6458 98 Register r_temp = R24;
goetz@6458 99 Register r_top_of_arguments_addr = R25;
goetz@6458 100 Register r_entryframe_fp = R26;
goetz@6458 101
goetz@6458 102 {
goetz@6458 103 // Stack on entry to call_stub:
goetz@6458 104 //
goetz@6458 105 // F1 [C_FRAME]
goetz@6458 106 // ...
goetz@6458 107
goetz@6458 108 Register r_arg_argument_addr = R8;
goetz@6458 109 Register r_arg_argument_count = R9;
goetz@6458 110 Register r_frame_alignment_in_bytes = R27;
goetz@6458 111 Register r_argument_addr = R28;
goetz@6458 112 Register r_argumentcopy_addr = R29;
goetz@6458 113 Register r_argument_size_in_bytes = R30;
goetz@6458 114 Register r_frame_size = R23;
goetz@6458 115
goetz@6458 116 Label arguments_copied;
goetz@6458 117
goetz@6458 118 // Save LR/CR to caller's C_FRAME.
goetz@6458 119 __ save_LR_CR(R0);
goetz@6458 120
goetz@6458 121 // Zero extend arg_argument_count.
goetz@6458 122 __ clrldi(r_arg_argument_count, r_arg_argument_count, 32);
goetz@6458 123
goetz@6458 124 // Save non-volatiles GPRs to ENTRY_FRAME (not yet pushed, but it's safe).
goetz@6458 125 __ save_nonvolatile_gprs(R1_SP, _spill_nonvolatiles_neg(r14));
goetz@6458 126
goetz@6458 127 // Keep copy of our frame pointer (caller's SP).
goetz@6458 128 __ mr(r_entryframe_fp, R1_SP);
goetz@6458 129
goetz@6458 130 BLOCK_COMMENT("Push ENTRY_FRAME including arguments");
goetz@6458 131 // Push ENTRY_FRAME including arguments:
goetz@6458 132 //
goetz@6458 133 // F0 [TOP_IJAVA_FRAME_ABI]
goetz@6458 134 // alignment (optional)
goetz@6458 135 // [outgoing Java arguments]
goetz@6458 136 // [ENTRY_FRAME_LOCALS]
goetz@6458 137 // F1 [C_FRAME]
goetz@6458 138 // ...
goetz@6458 139
goetz@6458 140 // calculate frame size
goetz@6458 141
goetz@6458 142 // unaligned size of arguments
goetz@6458 143 __ sldi(r_argument_size_in_bytes,
goetz@6458 144 r_arg_argument_count, Interpreter::logStackElementSize);
goetz@6458 145 // arguments alignment (max 1 slot)
goetz@6458 146 // FIXME: use round_to() here
goetz@6458 147 __ andi_(r_frame_alignment_in_bytes, r_arg_argument_count, 1);
goetz@6458 148 __ sldi(r_frame_alignment_in_bytes,
goetz@6495 149 r_frame_alignment_in_bytes, Interpreter::logStackElementSize);
goetz@6458 150
goetz@6458 151 // size = unaligned size of arguments + top abi's size
goetz@6458 152 __ addi(r_frame_size, r_argument_size_in_bytes,
goetz@6458 153 frame::top_ijava_frame_abi_size);
goetz@6458 154 // size += arguments alignment
goetz@6458 155 __ add(r_frame_size,
goetz@6495 156 r_frame_size, r_frame_alignment_in_bytes);
goetz@6458 157 // size += size of call_stub locals
goetz@6458 158 __ addi(r_frame_size,
goetz@6458 159 r_frame_size, frame::entry_frame_locals_size);
goetz@6458 160
goetz@6458 161 // push ENTRY_FRAME
goetz@6458 162 __ push_frame(r_frame_size, r_temp);
goetz@6458 163
goetz@6458 164 // initialize call_stub locals (step 1)
goetz@6458 165 __ std(r_arg_call_wrapper_addr,
goetz@6458 166 _entry_frame_locals_neg(call_wrapper_address), r_entryframe_fp);
goetz@6458 167 __ std(r_arg_result_addr,
goetz@6458 168 _entry_frame_locals_neg(result_address), r_entryframe_fp);
goetz@6458 169 __ std(r_arg_result_type,
goetz@6458 170 _entry_frame_locals_neg(result_type), r_entryframe_fp);
goetz@6458 171 // we will save arguments_tos_address later
goetz@6458 172
goetz@6458 173
goetz@6458 174 BLOCK_COMMENT("Copy Java arguments");
goetz@6458 175 // copy Java arguments
goetz@6458 176
goetz@6458 177 // Calculate top_of_arguments_addr which will be R17_tos (not prepushed) later.
goetz@6458 178 // FIXME: why not simply use SP+frame::top_ijava_frame_size?
goetz@6458 179 __ addi(r_top_of_arguments_addr,
goetz@6458 180 R1_SP, frame::top_ijava_frame_abi_size);
goetz@6458 181 __ add(r_top_of_arguments_addr,
goetz@6495 182 r_top_of_arguments_addr, r_frame_alignment_in_bytes);
goetz@6458 183
goetz@6458 184 // any arguments to copy?
goetz@6458 185 __ cmpdi(CCR0, r_arg_argument_count, 0);
goetz@6458 186 __ beq(CCR0, arguments_copied);
goetz@6458 187
goetz@6458 188 // prepare loop and copy arguments in reverse order
goetz@6458 189 {
goetz@6458 190 // init CTR with arg_argument_count
goetz@6458 191 __ mtctr(r_arg_argument_count);
goetz@6458 192
goetz@6458 193 // let r_argumentcopy_addr point to last outgoing Java arguments P
goetz@6458 194 __ mr(r_argumentcopy_addr, r_top_of_arguments_addr);
goetz@6458 195
goetz@6458 196 // let r_argument_addr point to last incoming java argument
goetz@6458 197 __ add(r_argument_addr,
goetz@6458 198 r_arg_argument_addr, r_argument_size_in_bytes);
goetz@6458 199 __ addi(r_argument_addr, r_argument_addr, -BytesPerWord);
goetz@6458 200
goetz@6458 201 // now loop while CTR > 0 and copy arguments
goetz@6458 202 {
goetz@6458 203 Label next_argument;
goetz@6458 204 __ bind(next_argument);
goetz@6458 205
goetz@6458 206 __ ld(r_temp, 0, r_argument_addr);
goetz@6458 207 // argument_addr--;
goetz@6458 208 __ addi(r_argument_addr, r_argument_addr, -BytesPerWord);
goetz@6458 209 __ std(r_temp, 0, r_argumentcopy_addr);
goetz@6458 210 // argumentcopy_addr++;
goetz@6458 211 __ addi(r_argumentcopy_addr, r_argumentcopy_addr, BytesPerWord);
goetz@6458 212
goetz@6458 213 __ bdnz(next_argument);
goetz@6458 214 }
goetz@6458 215 }
goetz@6458 216
goetz@6458 217 // Arguments copied, continue.
goetz@6458 218 __ bind(arguments_copied);
goetz@6458 219 }
goetz@6458 220
goetz@6458 221 {
goetz@6458 222 BLOCK_COMMENT("Call frame manager or native entry.");
goetz@6458 223 // Call frame manager or native entry.
goetz@6458 224 Register r_new_arg_entry = R14_state;
goetz@6458 225 assert_different_registers(r_new_arg_entry, r_top_of_arguments_addr,
goetz@6458 226 r_arg_method, r_arg_thread);
goetz@6458 227
goetz@6458 228 __ mr(r_new_arg_entry, r_arg_entry);
goetz@6458 229
goetz@6458 230 // Register state on entry to frame manager / native entry:
goetz@6458 231 //
goetz@6495 232 // tos - intptr_t* sender tos (prepushed) Lesp = (SP) + copied_arguments_offset - 8
goetz@6458 233 // R19_method - Method
goetz@6458 234 // R16_thread - JavaThread*
goetz@6458 235
goetz@6495 236 // Tos must point to last argument - element_size.
goetz@6495 237 const Register tos = R17_tos;
goetz@6495 238 __ addi(tos, r_top_of_arguments_addr, -Interpreter::stackElementSize);
goetz@6458 239
goetz@6458 240 // initialize call_stub locals (step 2)
goetz@6495 241 // now save tos as arguments_tos_address
goetz@6495 242 __ std(tos, _entry_frame_locals_neg(arguments_tos_address), r_entryframe_fp);
goetz@6458 243
goetz@6458 244 // load argument registers for call
goetz@6458 245 __ mr(R19_method, r_arg_method);
goetz@6458 246 __ mr(R16_thread, r_arg_thread);
goetz@6495 247 assert(tos != r_arg_method, "trashed r_arg_method");
goetz@6495 248 assert(tos != r_arg_thread && R19_method != r_arg_thread, "trashed r_arg_thread");
goetz@6458 249
goetz@6458 250 // Set R15_prev_state to 0 for simplifying checks in callee.
goetz@6458 251 __ li(R15_prev_state, 0);
goetz@6458 252
goetz@6458 253 // Stack on entry to frame manager / native entry:
goetz@6458 254 //
goetz@6458 255 // F0 [TOP_IJAVA_FRAME_ABI]
goetz@6458 256 // alignment (optional)
goetz@6458 257 // [outgoing Java arguments]
goetz@6458 258 // [ENTRY_FRAME_LOCALS]
goetz@6458 259 // F1 [C_FRAME]
goetz@6458 260 // ...
goetz@6458 261 //
goetz@6458 262
goetz@6458 263 // global toc register
goetz@6458 264 __ load_const(R29, MacroAssembler::global_toc(), R11_scratch1);
goetz@6458 265
goetz@6458 266 // Load narrow oop base.
goetz@6458 267 __ reinit_heapbase(R30, R11_scratch1);
goetz@6458 268
goetz@6458 269 // Remember the senderSP so we interpreter can pop c2i arguments off of the stack
goetz@6458 270 // when called via a c2i.
goetz@6458 271
goetz@6458 272 // Pass initial_caller_sp to framemanager.
goetz@6458 273 __ mr(R21_tmp1, R1_SP);
goetz@6458 274
goetz@6458 275 // Do a light-weight C-call here, r_new_arg_entry holds the address
goetz@6458 276 // of the interpreter entry point (frame manager or native entry)
goetz@6458 277 // and save runtime-value of LR in return_address.
goetz@6495 278 assert(r_new_arg_entry != tos && r_new_arg_entry != R19_method && r_new_arg_entry != R16_thread,
goetz@6458 279 "trashed r_new_arg_entry");
goetz@6458 280 return_address = __ call_stub(r_new_arg_entry);
goetz@6458 281 }
goetz@6458 282
goetz@6458 283 {
goetz@6458 284 BLOCK_COMMENT("Returned from frame manager or native entry.");
goetz@6458 285 // Returned from frame manager or native entry.
goetz@6458 286 // Now pop frame, process result, and return to caller.
goetz@6458 287
goetz@6458 288 // Stack on exit from frame manager / native entry:
goetz@6458 289 //
goetz@6458 290 // F0 [ABI]
goetz@6458 291 // ...
goetz@6458 292 // [ENTRY_FRAME_LOCALS]
goetz@6458 293 // F1 [C_FRAME]
goetz@6458 294 // ...
goetz@6458 295 //
goetz@6458 296 // Just pop the topmost frame ...
goetz@6458 297 //
goetz@6458 298
goetz@6458 299 Label ret_is_object;
goetz@6458 300 Label ret_is_long;
goetz@6458 301 Label ret_is_float;
goetz@6458 302 Label ret_is_double;
goetz@6458 303
goetz@6458 304 Register r_entryframe_fp = R30;
goetz@6458 305 Register r_lr = R7_ARG5;
goetz@6458 306 Register r_cr = R8_ARG6;
goetz@6458 307
goetz@6458 308 // Reload some volatile registers which we've spilled before the call
goetz@6458 309 // to frame manager / native entry.
goetz@6458 310 // Access all locals via frame pointer, because we know nothing about
goetz@6458 311 // the topmost frame's size.
goetz@6458 312 __ ld(r_entryframe_fp, _abi(callers_sp), R1_SP);
goetz@6458 313 assert_different_registers(r_entryframe_fp, R3_RET, r_arg_result_addr, r_arg_result_type, r_cr, r_lr);
goetz@6458 314 __ ld(r_arg_result_addr,
goetz@6458 315 _entry_frame_locals_neg(result_address), r_entryframe_fp);
goetz@6458 316 __ ld(r_arg_result_type,
goetz@6458 317 _entry_frame_locals_neg(result_type), r_entryframe_fp);
goetz@6458 318 __ ld(r_cr, _abi(cr), r_entryframe_fp);
goetz@6458 319 __ ld(r_lr, _abi(lr), r_entryframe_fp);
goetz@6458 320
goetz@6458 321 // pop frame and restore non-volatiles, LR and CR
goetz@6458 322 __ mr(R1_SP, r_entryframe_fp);
goetz@6458 323 __ mtcr(r_cr);
goetz@6458 324 __ mtlr(r_lr);
goetz@6458 325
goetz@6458 326 // Store result depending on type. Everything that is not
goetz@6458 327 // T_OBJECT, T_LONG, T_FLOAT, or T_DOUBLE is treated as T_INT.
goetz@6458 328 __ cmpwi(CCR0, r_arg_result_type, T_OBJECT);
goetz@6458 329 __ cmpwi(CCR1, r_arg_result_type, T_LONG);
goetz@6495 330 __ cmpwi(CCR5, r_arg_result_type, T_FLOAT);
goetz@6495 331 __ cmpwi(CCR6, r_arg_result_type, T_DOUBLE);
goetz@6458 332
goetz@6458 333 // restore non-volatile registers
goetz@6458 334 __ restore_nonvolatile_gprs(R1_SP, _spill_nonvolatiles_neg(r14));
goetz@6458 335
goetz@6458 336
goetz@6458 337 // Stack on exit from call_stub:
goetz@6458 338 //
goetz@6458 339 // 0 [C_FRAME]
goetz@6458 340 // ...
goetz@6458 341 //
goetz@6458 342 // no call_stub frames left.
goetz@6458 343
goetz@6458 344 // All non-volatiles have been restored at this point!!
goetz@6458 345 assert(R3_RET == R3, "R3_RET should be R3");
goetz@6458 346
goetz@6458 347 __ beq(CCR0, ret_is_object);
goetz@6458 348 __ beq(CCR1, ret_is_long);
goetz@6495 349 __ beq(CCR5, ret_is_float);
goetz@6495 350 __ beq(CCR6, ret_is_double);
goetz@6458 351
goetz@6458 352 // default:
goetz@6458 353 __ stw(R3_RET, 0, r_arg_result_addr);
goetz@6458 354 __ blr(); // return to caller
goetz@6458 355
goetz@6458 356 // case T_OBJECT:
goetz@6458 357 __ bind(ret_is_object);
goetz@6458 358 __ std(R3_RET, 0, r_arg_result_addr);
goetz@6458 359 __ blr(); // return to caller
goetz@6458 360
goetz@6458 361 // case T_LONG:
goetz@6458 362 __ bind(ret_is_long);
goetz@6458 363 __ std(R3_RET, 0, r_arg_result_addr);
goetz@6458 364 __ blr(); // return to caller
goetz@6458 365
goetz@6458 366 // case T_FLOAT:
goetz@6458 367 __ bind(ret_is_float);
goetz@6458 368 __ stfs(F1_RET, 0, r_arg_result_addr);
goetz@6458 369 __ blr(); // return to caller
goetz@6458 370
goetz@6458 371 // case T_DOUBLE:
goetz@6458 372 __ bind(ret_is_double);
goetz@6458 373 __ stfd(F1_RET, 0, r_arg_result_addr);
goetz@6458 374 __ blr(); // return to caller
goetz@6458 375 }
goetz@6458 376
goetz@6458 377 return start;
goetz@6458 378 }
goetz@6458 379
goetz@6458 380 // Return point for a Java call if there's an exception thrown in
goetz@6458 381 // Java code. The exception is caught and transformed into a
goetz@6458 382 // pending exception stored in JavaThread that can be tested from
goetz@6458 383 // within the VM.
goetz@6458 384 //
goetz@6458 385 address generate_catch_exception() {
goetz@6458 386 StubCodeMark mark(this, "StubRoutines", "catch_exception");
goetz@6458 387
goetz@6458 388 address start = __ pc();
goetz@6458 389
goetz@6458 390 // Registers alive
goetz@6458 391 //
goetz@6458 392 // R16_thread
goetz@6458 393 // R3_ARG1 - address of pending exception
goetz@6458 394 // R4_ARG2 - return address in call stub
goetz@6458 395
goetz@6458 396 const Register exception_file = R21_tmp1;
goetz@6458 397 const Register exception_line = R22_tmp2;
goetz@6458 398
goetz@6458 399 __ load_const(exception_file, (void*)__FILE__);
goetz@6458 400 __ load_const(exception_line, (void*)__LINE__);
goetz@6458 401
goetz@6458 402 __ std(R3_ARG1, thread_(pending_exception));
goetz@6458 403 // store into `char *'
goetz@6458 404 __ std(exception_file, thread_(exception_file));
goetz@6458 405 // store into `int'
goetz@6458 406 __ stw(exception_line, thread_(exception_line));
goetz@6458 407
goetz@6458 408 // complete return to VM
goetz@6458 409 assert(StubRoutines::_call_stub_return_address != NULL, "must have been generated before");
goetz@6458 410
goetz@6458 411 __ mtlr(R4_ARG2);
goetz@6458 412 // continue in call stub
goetz@6458 413 __ blr();
goetz@6458 414
goetz@6458 415 return start;
goetz@6458 416 }
goetz@6458 417
goetz@6458 418 // Continuation point for runtime calls returning with a pending
goetz@6458 419 // exception. The pending exception check happened in the runtime
goetz@6458 420 // or native call stub. The pending exception in Thread is
goetz@6458 421 // converted into a Java-level exception.
goetz@6458 422 //
goetz@6458 423 address generate_forward_exception() {
goetz@6458 424 StubCodeMark mark(this, "StubRoutines", "forward_exception");
goetz@6458 425 address start = __ pc();
goetz@6458 426
goetz@6458 427 #if !defined(PRODUCT)
goetz@6458 428 if (VerifyOops) {
goetz@6458 429 // Get pending exception oop.
goetz@6458 430 __ ld(R3_ARG1,
goetz@6458 431 in_bytes(Thread::pending_exception_offset()),
goetz@6458 432 R16_thread);
goetz@6458 433 // Make sure that this code is only executed if there is a pending exception.
goetz@6458 434 {
goetz@6458 435 Label L;
goetz@6458 436 __ cmpdi(CCR0, R3_ARG1, 0);
goetz@6458 437 __ bne(CCR0, L);
goetz@6458 438 __ stop("StubRoutines::forward exception: no pending exception (1)");
goetz@6458 439 __ bind(L);
goetz@6458 440 }
goetz@6458 441 __ verify_oop(R3_ARG1, "StubRoutines::forward exception: not an oop");
goetz@6458 442 }
goetz@6458 443 #endif
goetz@6458 444
goetz@6458 445 // Save LR/CR and copy exception pc (LR) into R4_ARG2.
goetz@6458 446 __ save_LR_CR(R4_ARG2);
goetz@6511 447 __ push_frame_reg_args(0, R0);
goetz@6458 448 // Find exception handler.
goetz@6458 449 __ call_VM_leaf(CAST_FROM_FN_PTR(address,
goetz@6458 450 SharedRuntime::exception_handler_for_return_address),
goetz@6458 451 R16_thread,
goetz@6458 452 R4_ARG2);
goetz@6458 453 // Copy handler's address.
goetz@6458 454 __ mtctr(R3_RET);
goetz@6458 455 __ pop_frame();
goetz@6458 456 __ restore_LR_CR(R0);
goetz@6458 457
goetz@6458 458 // Set up the arguments for the exception handler:
goetz@6458 459 // - R3_ARG1: exception oop
goetz@6458 460 // - R4_ARG2: exception pc.
goetz@6458 461
goetz@6458 462 // Load pending exception oop.
goetz@6458 463 __ ld(R3_ARG1,
goetz@6458 464 in_bytes(Thread::pending_exception_offset()),
goetz@6458 465 R16_thread);
goetz@6458 466
goetz@6458 467 // The exception pc is the return address in the caller.
goetz@6458 468 // Must load it into R4_ARG2.
goetz@6458 469 __ mflr(R4_ARG2);
goetz@6458 470
goetz@6458 471 #ifdef ASSERT
goetz@6458 472 // Make sure exception is set.
goetz@6458 473 {
goetz@6458 474 Label L;
goetz@6458 475 __ cmpdi(CCR0, R3_ARG1, 0);
goetz@6458 476 __ bne(CCR0, L);
goetz@6458 477 __ stop("StubRoutines::forward exception: no pending exception (2)");
goetz@6458 478 __ bind(L);
goetz@6458 479 }
goetz@6458 480 #endif
goetz@6458 481
goetz@6458 482 // Clear the pending exception.
goetz@6458 483 __ li(R0, 0);
goetz@6458 484 __ std(R0,
goetz@6458 485 in_bytes(Thread::pending_exception_offset()),
goetz@6458 486 R16_thread);
goetz@6458 487 // Jump to exception handler.
goetz@6458 488 __ bctr();
goetz@6458 489
goetz@6458 490 return start;
goetz@6458 491 }
goetz@6458 492
goetz@6458 493 #undef __
goetz@6458 494 #define __ masm->
goetz@6458 495 // Continuation point for throwing of implicit exceptions that are
goetz@6458 496 // not handled in the current activation. Fabricates an exception
goetz@6458 497 // oop and initiates normal exception dispatching in this
goetz@6458 498 // frame. Only callee-saved registers are preserved (through the
goetz@6458 499 // normal register window / RegisterMap handling). If the compiler
goetz@6458 500 // needs all registers to be preserved between the fault point and
goetz@6458 501 // the exception handler then it must assume responsibility for that
goetz@6458 502 // in AbstractCompiler::continuation_for_implicit_null_exception or
goetz@6458 503 // continuation_for_implicit_division_by_zero_exception. All other
goetz@6458 504 // implicit exceptions (e.g., NullPointerException or
goetz@6458 505 // AbstractMethodError on entry) are either at call sites or
goetz@6458 506 // otherwise assume that stack unwinding will be initiated, so
goetz@6458 507 // caller saved registers were assumed volatile in the compiler.
goetz@6458 508 //
goetz@6458 509 // Note that we generate only this stub into a RuntimeStub, because
goetz@6458 510 // it needs to be properly traversed and ignored during GC, so we
goetz@6458 511 // change the meaning of the "__" macro within this method.
goetz@6458 512 //
goetz@6458 513 // Note: the routine set_pc_not_at_call_for_caller in
goetz@6458 514 // SharedRuntime.cpp requires that this code be generated into a
goetz@6458 515 // RuntimeStub.
goetz@6458 516 address generate_throw_exception(const char* name, address runtime_entry, bool restore_saved_exception_pc,
goetz@6458 517 Register arg1 = noreg, Register arg2 = noreg) {
goetz@6458 518 CodeBuffer code(name, 1024 DEBUG_ONLY(+ 512), 0);
goetz@6458 519 MacroAssembler* masm = new MacroAssembler(&code);
goetz@6458 520
goetz@6458 521 OopMapSet* oop_maps = new OopMapSet();
goetz@6511 522 int frame_size_in_bytes = frame::abi_reg_args_size;
goetz@6458 523 OopMap* map = new OopMap(frame_size_in_bytes / sizeof(jint), 0);
goetz@6458 524
goetz@6458 525 StubCodeMark mark(this, "StubRoutines", "throw_exception");
goetz@6458 526
goetz@6458 527 address start = __ pc();
goetz@6458 528
goetz@6458 529 __ save_LR_CR(R11_scratch1);
goetz@6458 530
goetz@6458 531 // Push a frame.
goetz@6511 532 __ push_frame_reg_args(0, R11_scratch1);
goetz@6458 533
goetz@6458 534 address frame_complete_pc = __ pc();
goetz@6458 535
goetz@6458 536 if (restore_saved_exception_pc) {
goetz@6458 537 __ unimplemented("StubGenerator::throw_exception with restore_saved_exception_pc", 74);
goetz@6458 538 }
goetz@6458 539
goetz@6458 540 // Note that we always have a runtime stub frame on the top of
goetz@6458 541 // stack by this point. Remember the offset of the instruction
goetz@6458 542 // whose address will be moved to R11_scratch1.
goetz@6458 543 address gc_map_pc = __ get_PC_trash_LR(R11_scratch1);
goetz@6458 544
goetz@6458 545 __ set_last_Java_frame(/*sp*/R1_SP, /*pc*/R11_scratch1);
goetz@6458 546
goetz@6458 547 __ mr(R3_ARG1, R16_thread);
goetz@6458 548 if (arg1 != noreg) {
goetz@6458 549 __ mr(R4_ARG2, arg1);
goetz@6458 550 }
goetz@6458 551 if (arg2 != noreg) {
goetz@6458 552 __ mr(R5_ARG3, arg2);
goetz@6458 553 }
goetz@6511 554 #if defined(ABI_ELFv2)
goetz@6511 555 __ call_c(runtime_entry, relocInfo::none);
goetz@6511 556 #else
goetz@6511 557 __ call_c(CAST_FROM_FN_PTR(FunctionDescriptor*, runtime_entry), relocInfo::none);
goetz@6511 558 #endif
goetz@6458 559
goetz@6458 560 // Set an oopmap for the call site.
goetz@6458 561 oop_maps->add_gc_map((int)(gc_map_pc - start), map);
goetz@6458 562
goetz@6458 563 __ reset_last_Java_frame();
goetz@6458 564
goetz@6458 565 #ifdef ASSERT
goetz@6458 566 // Make sure that this code is only executed if there is a pending
goetz@6458 567 // exception.
goetz@6458 568 {
goetz@6458 569 Label L;
goetz@6458 570 __ ld(R0,
goetz@6458 571 in_bytes(Thread::pending_exception_offset()),
goetz@6458 572 R16_thread);
goetz@6458 573 __ cmpdi(CCR0, R0, 0);
goetz@6458 574 __ bne(CCR0, L);
goetz@6458 575 __ stop("StubRoutines::throw_exception: no pending exception");
goetz@6458 576 __ bind(L);
goetz@6458 577 }
goetz@6458 578 #endif
goetz@6458 579
goetz@6458 580 // Pop frame.
goetz@6458 581 __ pop_frame();
goetz@6458 582
goetz@6458 583 __ restore_LR_CR(R11_scratch1);
goetz@6458 584
goetz@6458 585 __ load_const(R11_scratch1, StubRoutines::forward_exception_entry());
goetz@6458 586 __ mtctr(R11_scratch1);
goetz@6458 587 __ bctr();
goetz@6458 588
goetz@6458 589 // Create runtime stub with OopMap.
goetz@6458 590 RuntimeStub* stub =
goetz@6458 591 RuntimeStub::new_runtime_stub(name, &code,
goetz@6458 592 /*frame_complete=*/ (int)(frame_complete_pc - start),
goetz@6458 593 frame_size_in_bytes/wordSize,
goetz@6458 594 oop_maps,
goetz@6458 595 false);
goetz@6458 596 return stub->entry_point();
goetz@6458 597 }
goetz@6458 598 #undef __
goetz@6458 599 #define __ _masm->
goetz@6458 600
goetz@6458 601 // Generate G1 pre-write barrier for array.
goetz@6458 602 //
goetz@6458 603 // Input:
goetz@6458 604 // from - register containing src address (only needed for spilling)
goetz@6458 605 // to - register containing starting address
goetz@6458 606 // count - register containing element count
goetz@6458 607 // tmp - scratch register
goetz@6458 608 //
goetz@6458 609 // Kills:
goetz@6458 610 // nothing
goetz@6458 611 //
goetz@6458 612 void gen_write_ref_array_pre_barrier(Register from, Register to, Register count, bool dest_uninitialized, Register Rtmp1) {
goetz@6458 613 BarrierSet* const bs = Universe::heap()->barrier_set();
goetz@6458 614 switch (bs->kind()) {
goetz@6458 615 case BarrierSet::G1SATBCT:
goetz@6458 616 case BarrierSet::G1SATBCTLogging:
goetz@6458 617 // With G1, don't generate the call if we statically know that the target in uninitialized
goetz@6458 618 if (!dest_uninitialized) {
goetz@6458 619 const int spill_slots = 4 * wordSize;
goetz@6511 620 const int frame_size = frame::abi_reg_args_size + spill_slots;
goetz@6495 621 Label filtered;
goetz@6495 622
goetz@6495 623 // Is marking active?
goetz@6495 624 if (in_bytes(PtrQueue::byte_width_of_active()) == 4) {
goetz@6495 625 __ lwz(Rtmp1, in_bytes(JavaThread::satb_mark_queue_offset() + PtrQueue::byte_offset_of_active()), R16_thread);
goetz@6495 626 } else {
goetz@6495 627 guarantee(in_bytes(PtrQueue::byte_width_of_active()) == 1, "Assumption");
goetz@6495 628 __ lbz(Rtmp1, in_bytes(JavaThread::satb_mark_queue_offset() + PtrQueue::byte_offset_of_active()), R16_thread);
goetz@6495 629 }
goetz@6495 630 __ cmpdi(CCR0, Rtmp1, 0);
goetz@6495 631 __ beq(CCR0, filtered);
goetz@6458 632
goetz@6458 633 __ save_LR_CR(R0);
goetz@6511 634 __ push_frame_reg_args(spill_slots, R0);
goetz@6458 635 __ std(from, frame_size - 1 * wordSize, R1_SP);
goetz@6458 636 __ std(to, frame_size - 2 * wordSize, R1_SP);
goetz@6458 637 __ std(count, frame_size - 3 * wordSize, R1_SP);
goetz@6458 638
goetz@6458 639 __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre), to, count);
goetz@6458 640
goetz@6458 641 __ ld(from, frame_size - 1 * wordSize, R1_SP);
goetz@6458 642 __ ld(to, frame_size - 2 * wordSize, R1_SP);
goetz@6458 643 __ ld(count, frame_size - 3 * wordSize, R1_SP);
goetz@6458 644 __ pop_frame();
goetz@6458 645 __ restore_LR_CR(R0);
goetz@6495 646
goetz@6495 647 __ bind(filtered);
goetz@6458 648 }
goetz@6458 649 break;
goetz@6458 650 case BarrierSet::CardTableModRef:
goetz@6458 651 case BarrierSet::CardTableExtension:
goetz@6458 652 case BarrierSet::ModRef:
goetz@6458 653 break;
goetz@6458 654 default:
goetz@6458 655 ShouldNotReachHere();
goetz@6458 656 }
goetz@6458 657 }
goetz@6458 658
goetz@6458 659 // Generate CMS/G1 post-write barrier for array.
goetz@6458 660 //
goetz@6458 661 // Input:
goetz@6458 662 // addr - register containing starting address
goetz@6458 663 // count - register containing element count
goetz@6458 664 // tmp - scratch register
goetz@6458 665 //
goetz@6458 666 // The input registers and R0 are overwritten.
goetz@6458 667 //
goetz@6495 668 void gen_write_ref_array_post_barrier(Register addr, Register count, Register tmp, bool branchToEnd) {
goetz@6458 669 BarrierSet* const bs = Universe::heap()->barrier_set();
goetz@6458 670
goetz@6458 671 switch (bs->kind()) {
goetz@6458 672 case BarrierSet::G1SATBCT:
goetz@6458 673 case BarrierSet::G1SATBCTLogging:
goetz@6458 674 {
goetz@6495 675 if (branchToEnd) {
goetz@6495 676 __ save_LR_CR(R0);
goetz@6495 677 // We need this frame only to spill LR.
goetz@6511 678 __ push_frame_reg_args(0, R0);
goetz@6495 679 __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post), addr, count);
goetz@6495 680 __ pop_frame();
goetz@6495 681 __ restore_LR_CR(R0);
goetz@6495 682 } else {
goetz@6495 683 // Tail call: fake call from stub caller by branching without linking.
goetz@6495 684 address entry_point = (address)CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post);
goetz@6495 685 __ mr_if_needed(R3_ARG1, addr);
goetz@6495 686 __ mr_if_needed(R4_ARG2, count);
goetz@6495 687 __ load_const(R11, entry_point, R0);
goetz@6495 688 __ call_c_and_return_to_caller(R11);
goetz@6495 689 }
goetz@6458 690 }
goetz@6458 691 break;
goetz@6458 692 case BarrierSet::CardTableModRef:
goetz@6458 693 case BarrierSet::CardTableExtension:
goetz@6458 694 {
goetz@6458 695 Label Lskip_loop, Lstore_loop;
goetz@6458 696 if (UseConcMarkSweepGC) {
goetz@6458 697 // TODO PPC port: contribute optimization / requires shared changes
goetz@6458 698 __ release();
goetz@6458 699 }
goetz@6458 700
goetz@6458 701 CardTableModRefBS* const ct = (CardTableModRefBS*)bs;
goetz@6458 702 assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
goetz@6458 703 assert_different_registers(addr, count, tmp);
goetz@6458 704
goetz@6458 705 __ sldi(count, count, LogBytesPerHeapOop);
goetz@6458 706 __ addi(count, count, -BytesPerHeapOop);
goetz@6458 707 __ add(count, addr, count);
goetz@6458 708 // Use two shifts to clear out those low order two bits! (Cannot opt. into 1.)
goetz@6458 709 __ srdi(addr, addr, CardTableModRefBS::card_shift);
goetz@6458 710 __ srdi(count, count, CardTableModRefBS::card_shift);
goetz@6458 711 __ subf(count, addr, count);
goetz@6458 712 assert_different_registers(R0, addr, count, tmp);
goetz@6458 713 __ load_const(tmp, (address)ct->byte_map_base);
goetz@6458 714 __ addic_(count, count, 1);
goetz@6458 715 __ beq(CCR0, Lskip_loop);
goetz@6458 716 __ li(R0, 0);
goetz@6458 717 __ mtctr(count);
goetz@6458 718 // Byte store loop
goetz@6458 719 __ bind(Lstore_loop);
goetz@6458 720 __ stbx(R0, tmp, addr);
goetz@6458 721 __ addi(addr, addr, 1);
goetz@6458 722 __ bdnz(Lstore_loop);
goetz@6458 723 __ bind(Lskip_loop);
goetz@6495 724
goetz@6495 725 if (!branchToEnd) __ blr();
goetz@6458 726 }
goetz@6458 727 break;
goetz@6458 728 case BarrierSet::ModRef:
goetz@6495 729 if (!branchToEnd) __ blr();
goetz@6458 730 break;
goetz@6458 731 default:
goetz@6458 732 ShouldNotReachHere();
goetz@6458 733 }
goetz@6458 734 }
goetz@6458 735
goetz@6458 736 // Support for void zero_words_aligned8(HeapWord* to, size_t count)
goetz@6458 737 //
goetz@6458 738 // Arguments:
goetz@6458 739 // to:
goetz@6458 740 // count:
goetz@6458 741 //
goetz@6458 742 // Destroys:
goetz@6458 743 //
goetz@6458 744 address generate_zero_words_aligned8() {
goetz@6458 745 StubCodeMark mark(this, "StubRoutines", "zero_words_aligned8");
goetz@6458 746
goetz@6458 747 // Implemented as in ClearArray.
goetz@6511 748 address start = __ function_entry();
goetz@6458 749
goetz@6458 750 Register base_ptr_reg = R3_ARG1; // tohw (needs to be 8b aligned)
goetz@6458 751 Register cnt_dwords_reg = R4_ARG2; // count (in dwords)
goetz@6458 752 Register tmp1_reg = R5_ARG3;
goetz@6458 753 Register tmp2_reg = R6_ARG4;
goetz@6458 754 Register zero_reg = R7_ARG5;
goetz@6458 755
goetz@6458 756 // Procedure for large arrays (uses data cache block zero instruction).
goetz@6458 757 Label dwloop, fast, fastloop, restloop, lastdword, done;
goetz@6458 758 int cl_size=VM_Version::get_cache_line_size(), cl_dwords=cl_size>>3, cl_dwordaddr_bits=exact_log2(cl_dwords);
goetz@6458 759 int min_dcbz=2; // Needs to be positive, apply dcbz only to at least min_dcbz cache lines.
goetz@6458 760
goetz@6458 761 // Clear up to 128byte boundary if long enough, dword_cnt=(16-(base>>3))%16.
goetz@6458 762 __ dcbtst(base_ptr_reg); // Indicate write access to first cache line ...
goetz@6458 763 __ andi(tmp2_reg, cnt_dwords_reg, 1); // to check if number of dwords is even.
goetz@6458 764 __ srdi_(tmp1_reg, cnt_dwords_reg, 1); // number of double dwords
goetz@6458 765 __ load_const_optimized(zero_reg, 0L); // Use as zero register.
goetz@6458 766
goetz@6458 767 __ cmpdi(CCR1, tmp2_reg, 0); // cnt_dwords even?
goetz@6458 768 __ beq(CCR0, lastdword); // size <= 1
goetz@6458 769 __ mtctr(tmp1_reg); // Speculatively preload counter for rest loop (>0).
goetz@6458 770 __ cmpdi(CCR0, cnt_dwords_reg, (min_dcbz+1)*cl_dwords-1); // Big enough to ensure >=min_dcbz cache lines are included?
goetz@6458 771 __ neg(tmp1_reg, base_ptr_reg); // bit 0..58: bogus, bit 57..60: (16-(base>>3))%16, bit 61..63: 000
goetz@6458 772
goetz@6458 773 __ blt(CCR0, restloop); // Too small. (<31=(2*cl_dwords)-1 is sufficient, but bigger performs better.)
goetz@6458 774 __ rldicl_(tmp1_reg, tmp1_reg, 64-3, 64-cl_dwordaddr_bits); // Extract number of dwords to 128byte boundary=(16-(base>>3))%16.
goetz@6458 775
goetz@6458 776 __ beq(CCR0, fast); // already 128byte aligned
goetz@6458 777 __ mtctr(tmp1_reg); // Set ctr to hit 128byte boundary (0<ctr<cnt).
goetz@6458 778 __ subf(cnt_dwords_reg, tmp1_reg, cnt_dwords_reg); // rest (>0 since size>=256-8)
goetz@6458 779
goetz@6458 780 // Clear in first cache line dword-by-dword if not already 128byte aligned.
goetz@6458 781 __ bind(dwloop);
goetz@6458 782 __ std(zero_reg, 0, base_ptr_reg); // Clear 8byte aligned block.
goetz@6458 783 __ addi(base_ptr_reg, base_ptr_reg, 8);
goetz@6458 784 __ bdnz(dwloop);
goetz@6458 785
goetz@6458 786 // clear 128byte blocks
goetz@6458 787 __ bind(fast);
goetz@6458 788 __ srdi(tmp1_reg, cnt_dwords_reg, cl_dwordaddr_bits); // loop count for 128byte loop (>0 since size>=256-8)
goetz@6458 789 __ andi(tmp2_reg, cnt_dwords_reg, 1); // to check if rest even
goetz@6458 790
goetz@6458 791 __ mtctr(tmp1_reg); // load counter
goetz@6458 792 __ cmpdi(CCR1, tmp2_reg, 0); // rest even?
goetz@6458 793 __ rldicl_(tmp1_reg, cnt_dwords_reg, 63, 65-cl_dwordaddr_bits); // rest in double dwords
goetz@6458 794
goetz@6458 795 __ bind(fastloop);
goetz@6458 796 __ dcbz(base_ptr_reg); // Clear 128byte aligned block.
goetz@6458 797 __ addi(base_ptr_reg, base_ptr_reg, cl_size);
goetz@6458 798 __ bdnz(fastloop);
goetz@6458 799
goetz@6458 800 //__ dcbtst(base_ptr_reg); // Indicate write access to last cache line.
goetz@6458 801 __ beq(CCR0, lastdword); // rest<=1
goetz@6458 802 __ mtctr(tmp1_reg); // load counter
goetz@6458 803
goetz@6458 804 // Clear rest.
goetz@6458 805 __ bind(restloop);
goetz@6458 806 __ std(zero_reg, 0, base_ptr_reg); // Clear 8byte aligned block.
goetz@6458 807 __ std(zero_reg, 8, base_ptr_reg); // Clear 8byte aligned block.
goetz@6458 808 __ addi(base_ptr_reg, base_ptr_reg, 16);
goetz@6458 809 __ bdnz(restloop);
goetz@6458 810
goetz@6458 811 __ bind(lastdword);
goetz@6458 812 __ beq(CCR1, done);
goetz@6458 813 __ std(zero_reg, 0, base_ptr_reg);
goetz@6458 814 __ bind(done);
goetz@6458 815 __ blr(); // return
goetz@6458 816
goetz@6458 817 return start;
goetz@6458 818 }
goetz@6458 819
goetz@6458 820 // The following routine generates a subroutine to throw an asynchronous
goetz@6458 821 // UnknownError when an unsafe access gets a fault that could not be
goetz@6458 822 // reasonably prevented by the programmer. (Example: SIGBUS/OBJERR.)
goetz@6458 823 //
goetz@6458 824 address generate_handler_for_unsafe_access() {
goetz@6458 825 StubCodeMark mark(this, "StubRoutines", "handler_for_unsafe_access");
goetz@6511 826 address start = __ function_entry();
goetz@6458 827 __ unimplemented("StubRoutines::handler_for_unsafe_access", 93);
goetz@6458 828 return start;
goetz@6458 829 }
goetz@6458 830
goetz@6458 831 #if !defined(PRODUCT)
goetz@6458 832 // Wrapper which calls oopDesc::is_oop_or_null()
goetz@6458 833 // Only called by MacroAssembler::verify_oop
goetz@6458 834 static void verify_oop_helper(const char* message, oop o) {
goetz@6458 835 if (!o->is_oop_or_null()) {
goetz@6458 836 fatal(message);
goetz@6458 837 }
goetz@6458 838 ++ StubRoutines::_verify_oop_count;
goetz@6458 839 }
goetz@6458 840 #endif
goetz@6458 841
goetz@6458 842 // Return address of code to be called from code generated by
goetz@6458 843 // MacroAssembler::verify_oop.
goetz@6458 844 //
goetz@6458 845 // Don't generate, rather use C++ code.
goetz@6458 846 address generate_verify_oop() {
goetz@6458 847 StubCodeMark mark(this, "StubRoutines", "verify_oop");
goetz@6458 848
goetz@6458 849 // this is actually a `FunctionDescriptor*'.
goetz@6458 850 address start = 0;
goetz@6458 851
goetz@6458 852 #if !defined(PRODUCT)
goetz@6458 853 start = CAST_FROM_FN_PTR(address, verify_oop_helper);
goetz@6458 854 #endif
goetz@6458 855
goetz@6458 856 return start;
goetz@6458 857 }
goetz@6458 858
goetz@6458 859 // Fairer handling of safepoints for native methods.
goetz@6458 860 //
goetz@6458 861 // Generate code which reads from the polling page. This special handling is needed as the
goetz@6458 862 // linux-ppc64 kernel before 2.6.6 doesn't set si_addr on some segfaults in 64bit mode
goetz@6458 863 // (cf. http://www.kernel.org/pub/linux/kernel/v2.6/ChangeLog-2.6.6), especially when we try
goetz@6458 864 // to read from the safepoint polling page.
goetz@6458 865 address generate_load_from_poll() {
goetz@6458 866 StubCodeMark mark(this, "StubRoutines", "generate_load_from_poll");
goetz@6511 867 address start = __ function_entry();
goetz@6458 868 __ unimplemented("StubRoutines::verify_oop", 95); // TODO PPC port
goetz@6458 869 return start;
goetz@6458 870 }
goetz@6458 871
goetz@6458 872 // -XX:+OptimizeFill : convert fill/copy loops into intrinsic
goetz@6458 873 //
goetz@6458 874 // The code is implemented(ported from sparc) as we believe it benefits JVM98, however
goetz@6458 875 // tracing(-XX:+TraceOptimizeFill) shows the intrinsic replacement doesn't happen at all!
goetz@6458 876 //
goetz@6495 877 // Source code in function is_range_check_if() shows that OptimizeFill relaxed the condition
goetz@6458 878 // for turning on loop predication optimization, and hence the behavior of "array range check"
goetz@6458 879 // and "loop invariant check" could be influenced, which potentially boosted JVM98.
goetz@6458 880 //
goetz@6495 881 // Generate stub for disjoint short fill. If "aligned" is true, the
goetz@6495 882 // "to" address is assumed to be heapword aligned.
goetz@6458 883 //
goetz@6458 884 // Arguments for generated stub:
goetz@6495 885 // to: R3_ARG1
goetz@6495 886 // value: R4_ARG2
goetz@6495 887 // count: R5_ARG3 treated as signed
goetz@6458 888 //
goetz@6458 889 address generate_fill(BasicType t, bool aligned, const char* name) {
goetz@6458 890 StubCodeMark mark(this, "StubRoutines", name);
goetz@6511 891 address start = __ function_entry();
goetz@6458 892
goetz@6495 893 const Register to = R3_ARG1; // source array address
goetz@6495 894 const Register value = R4_ARG2; // fill value
goetz@6495 895 const Register count = R5_ARG3; // elements count
goetz@6495 896 const Register temp = R6_ARG4; // temp register
goetz@6458 897
goetz@6495 898 //assert_clean_int(count, O3); // Make sure 'count' is clean int.
goetz@6458 899
goetz@6458 900 Label L_exit, L_skip_align1, L_skip_align2, L_fill_byte;
goetz@6458 901 Label L_fill_2_bytes, L_fill_4_bytes, L_fill_elements, L_fill_32_bytes;
goetz@6458 902
goetz@6458 903 int shift = -1;
goetz@6458 904 switch (t) {
goetz@6458 905 case T_BYTE:
goetz@6458 906 shift = 2;
goetz@6495 907 // Clone bytes (zero extend not needed because store instructions below ignore high order bytes).
goetz@6458 908 __ rldimi(value, value, 8, 48); // 8 bit -> 16 bit
goetz@6495 909 __ cmpdi(CCR0, count, 2<<shift); // Short arrays (< 8 bytes) fill by element.
goetz@6458 910 __ blt(CCR0, L_fill_elements);
goetz@6458 911 __ rldimi(value, value, 16, 32); // 16 bit -> 32 bit
goetz@6458 912 break;
goetz@6458 913 case T_SHORT:
goetz@6458 914 shift = 1;
goetz@6495 915 // Clone bytes (zero extend not needed because store instructions below ignore high order bytes).
goetz@6458 916 __ rldimi(value, value, 16, 32); // 16 bit -> 32 bit
goetz@6495 917 __ cmpdi(CCR0, count, 2<<shift); // Short arrays (< 8 bytes) fill by element.
goetz@6458 918 __ blt(CCR0, L_fill_elements);
goetz@6458 919 break;
goetz@6458 920 case T_INT:
goetz@6458 921 shift = 0;
goetz@6495 922 __ cmpdi(CCR0, count, 2<<shift); // Short arrays (< 8 bytes) fill by element.
goetz@6458 923 __ blt(CCR0, L_fill_4_bytes);
goetz@6458 924 break;
goetz@6458 925 default: ShouldNotReachHere();
goetz@6458 926 }
goetz@6458 927
goetz@6458 928 if (!aligned && (t == T_BYTE || t == T_SHORT)) {
goetz@6495 929 // Align source address at 4 bytes address boundary.
goetz@6458 930 if (t == T_BYTE) {
goetz@6495 931 // One byte misalignment happens only for byte arrays.
goetz@6458 932 __ andi_(temp, to, 1);
goetz@6458 933 __ beq(CCR0, L_skip_align1);
goetz@6458 934 __ stb(value, 0, to);
goetz@6458 935 __ addi(to, to, 1);
goetz@6458 936 __ addi(count, count, -1);
goetz@6458 937 __ bind(L_skip_align1);
goetz@6458 938 }
goetz@6458 939 // Two bytes misalignment happens only for byte and short (char) arrays.
goetz@6458 940 __ andi_(temp, to, 2);
goetz@6458 941 __ beq(CCR0, L_skip_align2);
goetz@6458 942 __ sth(value, 0, to);
goetz@6458 943 __ addi(to, to, 2);
goetz@6458 944 __ addi(count, count, -(1 << (shift - 1)));
goetz@6458 945 __ bind(L_skip_align2);
goetz@6458 946 }
goetz@6458 947
goetz@6458 948 if (!aligned) {
goetz@6458 949 // Align to 8 bytes, we know we are 4 byte aligned to start.
goetz@6458 950 __ andi_(temp, to, 7);
goetz@6458 951 __ beq(CCR0, L_fill_32_bytes);
goetz@6458 952 __ stw(value, 0, to);
goetz@6458 953 __ addi(to, to, 4);
goetz@6458 954 __ addi(count, count, -(1 << shift));
goetz@6458 955 __ bind(L_fill_32_bytes);
goetz@6458 956 }
goetz@6458 957
goetz@6495 958 __ li(temp, 8<<shift); // Prepare for 32 byte loop.
goetz@6495 959 // Clone bytes int->long as above.
goetz@6495 960 __ rldimi(value, value, 32, 0); // 32 bit -> 64 bit
goetz@6458 961
goetz@6458 962 Label L_check_fill_8_bytes;
goetz@6495 963 // Fill 32-byte chunks.
goetz@6458 964 __ subf_(count, temp, count);
goetz@6458 965 __ blt(CCR0, L_check_fill_8_bytes);
goetz@6458 966
goetz@6458 967 Label L_fill_32_bytes_loop;
goetz@6458 968 __ align(32);
goetz@6458 969 __ bind(L_fill_32_bytes_loop);
goetz@6458 970
goetz@6458 971 __ std(value, 0, to);
goetz@6458 972 __ std(value, 8, to);
goetz@6495 973 __ subf_(count, temp, count); // Update count.
goetz@6458 974 __ std(value, 16, to);
goetz@6458 975 __ std(value, 24, to);
goetz@6458 976
goetz@6458 977 __ addi(to, to, 32);
goetz@6458 978 __ bge(CCR0, L_fill_32_bytes_loop);
goetz@6458 979
goetz@6458 980 __ bind(L_check_fill_8_bytes);
goetz@6458 981 __ add_(count, temp, count);
goetz@6458 982 __ beq(CCR0, L_exit);
goetz@6458 983 __ addic_(count, count, -(2 << shift));
goetz@6458 984 __ blt(CCR0, L_fill_4_bytes);
goetz@6458 985
goetz@6458 986 //
goetz@6458 987 // Length is too short, just fill 8 bytes at a time.
goetz@6458 988 //
goetz@6458 989 Label L_fill_8_bytes_loop;
goetz@6458 990 __ bind(L_fill_8_bytes_loop);
goetz@6458 991 __ std(value, 0, to);
goetz@6458 992 __ addic_(count, count, -(2 << shift));
goetz@6458 993 __ addi(to, to, 8);
goetz@6458 994 __ bge(CCR0, L_fill_8_bytes_loop);
goetz@6458 995
goetz@6495 996 // Fill trailing 4 bytes.
goetz@6458 997 __ bind(L_fill_4_bytes);
goetz@6458 998 __ andi_(temp, count, 1<<shift);
goetz@6458 999 __ beq(CCR0, L_fill_2_bytes);
goetz@6458 1000
goetz@6458 1001 __ stw(value, 0, to);
goetz@6458 1002 if (t == T_BYTE || t == T_SHORT) {
goetz@6458 1003 __ addi(to, to, 4);
goetz@6495 1004 // Fill trailing 2 bytes.
goetz@6458 1005 __ bind(L_fill_2_bytes);
goetz@6458 1006 __ andi_(temp, count, 1<<(shift-1));
goetz@6458 1007 __ beq(CCR0, L_fill_byte);
goetz@6458 1008 __ sth(value, 0, to);
goetz@6458 1009 if (t == T_BYTE) {
goetz@6458 1010 __ addi(to, to, 2);
goetz@6495 1011 // Fill trailing byte.
goetz@6458 1012 __ bind(L_fill_byte);
goetz@6458 1013 __ andi_(count, count, 1);
goetz@6458 1014 __ beq(CCR0, L_exit);
goetz@6458 1015 __ stb(value, 0, to);
goetz@6458 1016 } else {
goetz@6458 1017 __ bind(L_fill_byte);
goetz@6458 1018 }
goetz@6458 1019 } else {
goetz@6458 1020 __ bind(L_fill_2_bytes);
goetz@6458 1021 }
goetz@6458 1022 __ bind(L_exit);
goetz@6458 1023 __ blr();
goetz@6458 1024
goetz@6495 1025 // Handle copies less than 8 bytes. Int is handled elsewhere.
goetz@6458 1026 if (t == T_BYTE) {
goetz@6458 1027 __ bind(L_fill_elements);
goetz@6458 1028 Label L_fill_2, L_fill_4;
goetz@6458 1029 __ andi_(temp, count, 1);
goetz@6458 1030 __ beq(CCR0, L_fill_2);
goetz@6458 1031 __ stb(value, 0, to);
goetz@6458 1032 __ addi(to, to, 1);
goetz@6458 1033 __ bind(L_fill_2);
goetz@6458 1034 __ andi_(temp, count, 2);
goetz@6458 1035 __ beq(CCR0, L_fill_4);
goetz@6458 1036 __ stb(value, 0, to);
goetz@6458 1037 __ stb(value, 0, to);
goetz@6458 1038 __ addi(to, to, 2);
goetz@6458 1039 __ bind(L_fill_4);
goetz@6458 1040 __ andi_(temp, count, 4);
goetz@6458 1041 __ beq(CCR0, L_exit);
goetz@6458 1042 __ stb(value, 0, to);
goetz@6458 1043 __ stb(value, 1, to);
goetz@6458 1044 __ stb(value, 2, to);
goetz@6458 1045 __ stb(value, 3, to);
goetz@6458 1046 __ blr();
goetz@6458 1047 }
goetz@6458 1048
goetz@6458 1049 if (t == T_SHORT) {
goetz@6458 1050 Label L_fill_2;
goetz@6458 1051 __ bind(L_fill_elements);
goetz@6458 1052 __ andi_(temp, count, 1);
goetz@6458 1053 __ beq(CCR0, L_fill_2);
goetz@6458 1054 __ sth(value, 0, to);
goetz@6458 1055 __ addi(to, to, 2);
goetz@6458 1056 __ bind(L_fill_2);
goetz@6458 1057 __ andi_(temp, count, 2);
goetz@6458 1058 __ beq(CCR0, L_exit);
goetz@6458 1059 __ sth(value, 0, to);
goetz@6458 1060 __ sth(value, 2, to);
goetz@6458 1061 __ blr();
goetz@6458 1062 }
goetz@6458 1063 return start;
goetz@6458 1064 }
goetz@6458 1065
goetz@6458 1066
goetz@6495 1067 // Generate overlap test for array copy stubs.
goetz@6458 1068 //
goetz@6458 1069 // Input:
goetz@6458 1070 // R3_ARG1 - from
goetz@6458 1071 // R4_ARG2 - to
goetz@6458 1072 // R5_ARG3 - element count
goetz@6458 1073 //
goetz@6458 1074 void array_overlap_test(address no_overlap_target, int log2_elem_size) {
goetz@6458 1075 Register tmp1 = R6_ARG4;
goetz@6458 1076 Register tmp2 = R7_ARG5;
goetz@6458 1077
goetz@6458 1078 Label l_overlap;
goetz@6458 1079 #ifdef ASSERT
goetz@6458 1080 __ srdi_(tmp2, R5_ARG3, 31);
goetz@6458 1081 __ asm_assert_eq("missing zero extend", 0xAFFE);
goetz@6458 1082 #endif
goetz@6458 1083
goetz@6458 1084 __ subf(tmp1, R3_ARG1, R4_ARG2); // distance in bytes
goetz@6458 1085 __ sldi(tmp2, R5_ARG3, log2_elem_size); // size in bytes
goetz@6458 1086 __ cmpld(CCR0, R3_ARG1, R4_ARG2); // Use unsigned comparison!
goetz@6458 1087 __ cmpld(CCR1, tmp1, tmp2);
goetz@6458 1088 __ crand(/*CCR0 lt*/0, /*CCR1 lt*/4+0, /*CCR0 lt*/0);
goetz@6458 1089 __ blt(CCR0, l_overlap); // Src before dst and distance smaller than size.
goetz@6458 1090
goetz@6458 1091 // need to copy forwards
goetz@6458 1092 if (__ is_within_range_of_b(no_overlap_target, __ pc())) {
goetz@6458 1093 __ b(no_overlap_target);
goetz@6458 1094 } else {
goetz@6458 1095 __ load_const(tmp1, no_overlap_target, tmp2);
goetz@6458 1096 __ mtctr(tmp1);
goetz@6458 1097 __ bctr();
goetz@6458 1098 }
goetz@6458 1099
goetz@6458 1100 __ bind(l_overlap);
goetz@6458 1101 // need to copy backwards
goetz@6458 1102 }
goetz@6458 1103
goetz@6458 1104 // The guideline in the implementations of generate_disjoint_xxx_copy
goetz@6458 1105 // (xxx=byte,short,int,long,oop) is to copy as many elements as possible with
goetz@6458 1106 // single instructions, but to avoid alignment interrupts (see subsequent
goetz@6458 1107 // comment). Furthermore, we try to minimize misaligned access, even
goetz@6458 1108 // though they cause no alignment interrupt.
goetz@6458 1109 //
goetz@6458 1110 // In Big-Endian mode, the PowerPC architecture requires implementations to
goetz@6458 1111 // handle automatically misaligned integer halfword and word accesses,
goetz@6458 1112 // word-aligned integer doubleword accesses, and word-aligned floating-point
goetz@6458 1113 // accesses. Other accesses may or may not generate an Alignment interrupt
goetz@6458 1114 // depending on the implementation.
goetz@6458 1115 // Alignment interrupt handling may require on the order of hundreds of cycles,
goetz@6458 1116 // so every effort should be made to avoid misaligned memory values.
goetz@6458 1117 //
goetz@6458 1118 //
goetz@6458 1119 // Generate stub for disjoint byte copy. If "aligned" is true, the
goetz@6458 1120 // "from" and "to" addresses are assumed to be heapword aligned.
goetz@6458 1121 //
goetz@6458 1122 // Arguments for generated stub:
goetz@6458 1123 // from: R3_ARG1
goetz@6458 1124 // to: R4_ARG2
goetz@6458 1125 // count: R5_ARG3 treated as signed
goetz@6458 1126 //
goetz@6458 1127 address generate_disjoint_byte_copy(bool aligned, const char * name) {
goetz@6458 1128 StubCodeMark mark(this, "StubRoutines", name);
goetz@6511 1129 address start = __ function_entry();
goetz@6458 1130
goetz@6458 1131 Register tmp1 = R6_ARG4;
goetz@6458 1132 Register tmp2 = R7_ARG5;
goetz@6458 1133 Register tmp3 = R8_ARG6;
goetz@6458 1134 Register tmp4 = R9_ARG7;
goetz@6458 1135
goetz@6458 1136
goetz@6458 1137 Label l_1, l_2, l_3, l_4, l_5, l_6, l_7, l_8, l_9;
goetz@6458 1138 // Don't try anything fancy if arrays don't have many elements.
goetz@6458 1139 __ li(tmp3, 0);
goetz@6458 1140 __ cmpwi(CCR0, R5_ARG3, 17);
goetz@6458 1141 __ ble(CCR0, l_6); // copy 4 at a time
goetz@6458 1142
goetz@6458 1143 if (!aligned) {
goetz@6458 1144 __ xorr(tmp1, R3_ARG1, R4_ARG2);
goetz@6458 1145 __ andi_(tmp1, tmp1, 3);
goetz@6458 1146 __ bne(CCR0, l_6); // If arrays don't have the same alignment mod 4, do 4 element copy.
goetz@6458 1147
goetz@6458 1148 // Copy elements if necessary to align to 4 bytes.
goetz@6458 1149 __ neg(tmp1, R3_ARG1); // Compute distance to alignment boundary.
goetz@6458 1150 __ andi_(tmp1, tmp1, 3);
goetz@6458 1151 __ beq(CCR0, l_2);
goetz@6458 1152
goetz@6458 1153 __ subf(R5_ARG3, tmp1, R5_ARG3);
goetz@6458 1154 __ bind(l_9);
goetz@6458 1155 __ lbz(tmp2, 0, R3_ARG1);
goetz@6458 1156 __ addic_(tmp1, tmp1, -1);
goetz@6458 1157 __ stb(tmp2, 0, R4_ARG2);
goetz@6458 1158 __ addi(R3_ARG1, R3_ARG1, 1);
goetz@6458 1159 __ addi(R4_ARG2, R4_ARG2, 1);
goetz@6458 1160 __ bne(CCR0, l_9);
goetz@6458 1161
goetz@6458 1162 __ bind(l_2);
goetz@6458 1163 }
goetz@6458 1164
goetz@6458 1165 // copy 8 elements at a time
goetz@6458 1166 __ xorr(tmp2, R3_ARG1, R4_ARG2); // skip if src & dest have differing alignment mod 8
goetz@6458 1167 __ andi_(tmp1, tmp2, 7);
goetz@6458 1168 __ bne(CCR0, l_7); // not same alignment -> to or from is aligned -> copy 8
goetz@6458 1169
goetz@6458 1170 // copy a 2-element word if necessary to align to 8 bytes
goetz@6458 1171 __ andi_(R0, R3_ARG1, 7);
goetz@6458 1172 __ beq(CCR0, l_7);
goetz@6458 1173
goetz@6458 1174 __ lwzx(tmp2, R3_ARG1, tmp3);
goetz@6458 1175 __ addi(R5_ARG3, R5_ARG3, -4);
goetz@6458 1176 __ stwx(tmp2, R4_ARG2, tmp3);
goetz@6458 1177 { // FasterArrayCopy
goetz@6458 1178 __ addi(R3_ARG1, R3_ARG1, 4);
goetz@6458 1179 __ addi(R4_ARG2, R4_ARG2, 4);
goetz@6458 1180 }
goetz@6458 1181 __ bind(l_7);
goetz@6458 1182
goetz@6458 1183 { // FasterArrayCopy
goetz@6458 1184 __ cmpwi(CCR0, R5_ARG3, 31);
goetz@6458 1185 __ ble(CCR0, l_6); // copy 2 at a time if less than 32 elements remain
goetz@6458 1186
goetz@6458 1187 __ srdi(tmp1, R5_ARG3, 5);
goetz@6458 1188 __ andi_(R5_ARG3, R5_ARG3, 31);
goetz@6458 1189 __ mtctr(tmp1);
goetz@6458 1190
goetz@6458 1191 __ bind(l_8);
goetz@6458 1192 // Use unrolled version for mass copying (copy 32 elements a time)
goetz@6458 1193 // Load feeding store gets zero latency on Power6, however not on Power5.
goetz@6458 1194 // Therefore, the following sequence is made for the good of both.
goetz@6458 1195 __ ld(tmp1, 0, R3_ARG1);
goetz@6458 1196 __ ld(tmp2, 8, R3_ARG1);
goetz@6458 1197 __ ld(tmp3, 16, R3_ARG1);
goetz@6458 1198 __ ld(tmp4, 24, R3_ARG1);
goetz@6458 1199 __ std(tmp1, 0, R4_ARG2);
goetz@6458 1200 __ std(tmp2, 8, R4_ARG2);
goetz@6458 1201 __ std(tmp3, 16, R4_ARG2);
goetz@6458 1202 __ std(tmp4, 24, R4_ARG2);
goetz@6458 1203 __ addi(R3_ARG1, R3_ARG1, 32);
goetz@6458 1204 __ addi(R4_ARG2, R4_ARG2, 32);
goetz@6458 1205 __ bdnz(l_8);
goetz@6458 1206 }
goetz@6458 1207
goetz@6458 1208 __ bind(l_6);
goetz@6458 1209
goetz@6458 1210 // copy 4 elements at a time
goetz@6458 1211 __ cmpwi(CCR0, R5_ARG3, 4);
goetz@6458 1212 __ blt(CCR0, l_1);
goetz@6458 1213 __ srdi(tmp1, R5_ARG3, 2);
goetz@6458 1214 __ mtctr(tmp1); // is > 0
goetz@6458 1215 __ andi_(R5_ARG3, R5_ARG3, 3);
goetz@6458 1216
goetz@6458 1217 { // FasterArrayCopy
goetz@6458 1218 __ addi(R3_ARG1, R3_ARG1, -4);
goetz@6458 1219 __ addi(R4_ARG2, R4_ARG2, -4);
goetz@6458 1220 __ bind(l_3);
goetz@6458 1221 __ lwzu(tmp2, 4, R3_ARG1);
goetz@6458 1222 __ stwu(tmp2, 4, R4_ARG2);
goetz@6458 1223 __ bdnz(l_3);
goetz@6458 1224 __ addi(R3_ARG1, R3_ARG1, 4);
goetz@6458 1225 __ addi(R4_ARG2, R4_ARG2, 4);
goetz@6458 1226 }
goetz@6458 1227
goetz@6458 1228 // do single element copy
goetz@6458 1229 __ bind(l_1);
goetz@6458 1230 __ cmpwi(CCR0, R5_ARG3, 0);
goetz@6458 1231 __ beq(CCR0, l_4);
goetz@6458 1232
goetz@6458 1233 { // FasterArrayCopy
goetz@6458 1234 __ mtctr(R5_ARG3);
goetz@6458 1235 __ addi(R3_ARG1, R3_ARG1, -1);
goetz@6458 1236 __ addi(R4_ARG2, R4_ARG2, -1);
goetz@6458 1237
goetz@6458 1238 __ bind(l_5);
goetz@6458 1239 __ lbzu(tmp2, 1, R3_ARG1);
goetz@6458 1240 __ stbu(tmp2, 1, R4_ARG2);
goetz@6458 1241 __ bdnz(l_5);
goetz@6458 1242 }
goetz@6458 1243
goetz@6458 1244 __ bind(l_4);
goetz@6458 1245 __ blr();
goetz@6458 1246
goetz@6458 1247 return start;
goetz@6458 1248 }
goetz@6458 1249
goetz@6458 1250 // Generate stub for conjoint byte copy. If "aligned" is true, the
goetz@6458 1251 // "from" and "to" addresses are assumed to be heapword aligned.
goetz@6458 1252 //
goetz@6458 1253 // Arguments for generated stub:
goetz@6458 1254 // from: R3_ARG1
goetz@6458 1255 // to: R4_ARG2
goetz@6458 1256 // count: R5_ARG3 treated as signed
goetz@6458 1257 //
goetz@6458 1258 address generate_conjoint_byte_copy(bool aligned, const char * name) {
goetz@6458 1259 StubCodeMark mark(this, "StubRoutines", name);
goetz@6511 1260 address start = __ function_entry();
goetz@6458 1261
goetz@6458 1262 Register tmp1 = R6_ARG4;
goetz@6458 1263 Register tmp2 = R7_ARG5;
goetz@6458 1264 Register tmp3 = R8_ARG6;
goetz@6458 1265
goetz@6511 1266 #if defined(ABI_ELFv2)
goetz@6511 1267 address nooverlap_target = aligned ?
goetz@6511 1268 StubRoutines::arrayof_jbyte_disjoint_arraycopy() :
goetz@6511 1269 StubRoutines::jbyte_disjoint_arraycopy();
goetz@6511 1270 #else
goetz@6458 1271 address nooverlap_target = aligned ?
goetz@6458 1272 ((FunctionDescriptor*)StubRoutines::arrayof_jbyte_disjoint_arraycopy())->entry() :
goetz@6458 1273 ((FunctionDescriptor*)StubRoutines::jbyte_disjoint_arraycopy())->entry();
goetz@6511 1274 #endif
goetz@6458 1275
goetz@6458 1276 array_overlap_test(nooverlap_target, 0);
goetz@6458 1277 // Do reverse copy. We assume the case of actual overlap is rare enough
goetz@6458 1278 // that we don't have to optimize it.
goetz@6458 1279 Label l_1, l_2;
goetz@6458 1280
goetz@6458 1281 __ b(l_2);
goetz@6458 1282 __ bind(l_1);
goetz@6458 1283 __ stbx(tmp1, R4_ARG2, R5_ARG3);
goetz@6458 1284 __ bind(l_2);
goetz@6458 1285 __ addic_(R5_ARG3, R5_ARG3, -1);
goetz@6458 1286 __ lbzx(tmp1, R3_ARG1, R5_ARG3);
goetz@6458 1287 __ bge(CCR0, l_1);
goetz@6458 1288
goetz@6458 1289 __ blr();
goetz@6458 1290
goetz@6458 1291 return start;
goetz@6458 1292 }
goetz@6458 1293
goetz@6458 1294 // Generate stub for disjoint short copy. If "aligned" is true, the
goetz@6458 1295 // "from" and "to" addresses are assumed to be heapword aligned.
goetz@6458 1296 //
goetz@6458 1297 // Arguments for generated stub:
goetz@6458 1298 // from: R3_ARG1
goetz@6458 1299 // to: R4_ARG2
goetz@6458 1300 // elm.count: R5_ARG3 treated as signed
goetz@6458 1301 //
goetz@6458 1302 // Strategy for aligned==true:
goetz@6458 1303 //
goetz@6458 1304 // If length <= 9:
goetz@6458 1305 // 1. copy 2 elements at a time (l_6)
goetz@6458 1306 // 2. copy last element if original element count was odd (l_1)
goetz@6458 1307 //
goetz@6458 1308 // If length > 9:
goetz@6458 1309 // 1. copy 4 elements at a time until less than 4 elements are left (l_7)
goetz@6458 1310 // 2. copy 2 elements at a time until less than 2 elements are left (l_6)
goetz@6458 1311 // 3. copy last element if one was left in step 2. (l_1)
goetz@6458 1312 //
goetz@6458 1313 //
goetz@6458 1314 // Strategy for aligned==false:
goetz@6458 1315 //
goetz@6458 1316 // If length <= 9: same as aligned==true case, but NOTE: load/stores
goetz@6458 1317 // can be unaligned (see comment below)
goetz@6458 1318 //
goetz@6458 1319 // If length > 9:
goetz@6458 1320 // 1. continue with step 6. if the alignment of from and to mod 4
goetz@6458 1321 // is different.
goetz@6458 1322 // 2. align from and to to 4 bytes by copying 1 element if necessary
goetz@6458 1323 // 3. at l_2 from and to are 4 byte aligned; continue with
goetz@6458 1324 // 5. if they cannot be aligned to 8 bytes because they have
goetz@6458 1325 // got different alignment mod 8.
goetz@6458 1326 // 4. at this point we know that both, from and to, have the same
goetz@6458 1327 // alignment mod 8, now copy one element if necessary to get
goetz@6458 1328 // 8 byte alignment of from and to.
goetz@6458 1329 // 5. copy 4 elements at a time until less than 4 elements are
goetz@6458 1330 // left; depending on step 3. all load/stores are aligned or
goetz@6458 1331 // either all loads or all stores are unaligned.
goetz@6458 1332 // 6. copy 2 elements at a time until less than 2 elements are
goetz@6458 1333 // left (l_6); arriving here from step 1., there is a chance
goetz@6458 1334 // that all accesses are unaligned.
goetz@6458 1335 // 7. copy last element if one was left in step 6. (l_1)
goetz@6458 1336 //
goetz@6458 1337 // There are unaligned data accesses using integer load/store
goetz@6458 1338 // instructions in this stub. POWER allows such accesses.
goetz@6458 1339 //
goetz@6458 1340 // According to the manuals (PowerISA_V2.06_PUBLIC, Book II,
goetz@6458 1341 // Chapter 2: Effect of Operand Placement on Performance) unaligned
goetz@6458 1342 // integer load/stores have good performance. Only unaligned
goetz@6458 1343 // floating point load/stores can have poor performance.
goetz@6458 1344 //
goetz@6458 1345 // TODO:
goetz@6458 1346 //
goetz@6458 1347 // 1. check if aligning the backbranch target of loops is beneficial
goetz@6458 1348 //
goetz@6458 1349 address generate_disjoint_short_copy(bool aligned, const char * name) {
goetz@6458 1350 StubCodeMark mark(this, "StubRoutines", name);
goetz@6458 1351
goetz@6458 1352 Register tmp1 = R6_ARG4;
goetz@6458 1353 Register tmp2 = R7_ARG5;
goetz@6458 1354 Register tmp3 = R8_ARG6;
goetz@6458 1355 Register tmp4 = R9_ARG7;
goetz@6458 1356
goetz@6511 1357 address start = __ function_entry();
goetz@6458 1358
goetz@6458 1359 Label l_1, l_2, l_3, l_4, l_5, l_6, l_7, l_8;
goetz@6458 1360 // don't try anything fancy if arrays don't have many elements
goetz@6458 1361 __ li(tmp3, 0);
goetz@6458 1362 __ cmpwi(CCR0, R5_ARG3, 9);
goetz@6458 1363 __ ble(CCR0, l_6); // copy 2 at a time
goetz@6458 1364
goetz@6458 1365 if (!aligned) {
goetz@6458 1366 __ xorr(tmp1, R3_ARG1, R4_ARG2);
goetz@6458 1367 __ andi_(tmp1, tmp1, 3);
goetz@6458 1368 __ bne(CCR0, l_6); // if arrays don't have the same alignment mod 4, do 2 element copy
goetz@6458 1369
goetz@6458 1370 // At this point it is guaranteed that both, from and to have the same alignment mod 4.
goetz@6458 1371
goetz@6458 1372 // Copy 1 element if necessary to align to 4 bytes.
goetz@6458 1373 __ andi_(tmp1, R3_ARG1, 3);
goetz@6458 1374 __ beq(CCR0, l_2);
goetz@6458 1375
goetz@6458 1376 __ lhz(tmp2, 0, R3_ARG1);
goetz@6458 1377 __ addi(R3_ARG1, R3_ARG1, 2);
goetz@6458 1378 __ sth(tmp2, 0, R4_ARG2);
goetz@6458 1379 __ addi(R4_ARG2, R4_ARG2, 2);
goetz@6458 1380 __ addi(R5_ARG3, R5_ARG3, -1);
goetz@6458 1381 __ bind(l_2);
goetz@6458 1382
goetz@6458 1383 // At this point the positions of both, from and to, are at least 4 byte aligned.
goetz@6458 1384
goetz@6458 1385 // Copy 4 elements at a time.
goetz@6458 1386 // Align to 8 bytes, but only if both, from and to, have same alignment mod 8.
goetz@6458 1387 __ xorr(tmp2, R3_ARG1, R4_ARG2);
goetz@6458 1388 __ andi_(tmp1, tmp2, 7);
goetz@6458 1389 __ bne(CCR0, l_7); // not same alignment mod 8 -> copy 4, either from or to will be unaligned
goetz@6458 1390
goetz@6458 1391 // Copy a 2-element word if necessary to align to 8 bytes.
goetz@6458 1392 __ andi_(R0, R3_ARG1, 7);
goetz@6458 1393 __ beq(CCR0, l_7);
goetz@6458 1394
goetz@6458 1395 __ lwzx(tmp2, R3_ARG1, tmp3);
goetz@6458 1396 __ addi(R5_ARG3, R5_ARG3, -2);
goetz@6458 1397 __ stwx(tmp2, R4_ARG2, tmp3);
goetz@6458 1398 { // FasterArrayCopy
goetz@6458 1399 __ addi(R3_ARG1, R3_ARG1, 4);
goetz@6458 1400 __ addi(R4_ARG2, R4_ARG2, 4);
goetz@6458 1401 }
goetz@6458 1402 }
goetz@6458 1403
goetz@6458 1404 __ bind(l_7);
goetz@6458 1405
goetz@6458 1406 // Copy 4 elements at a time; either the loads or the stores can
goetz@6458 1407 // be unaligned if aligned == false.
goetz@6458 1408
goetz@6458 1409 { // FasterArrayCopy
goetz@6458 1410 __ cmpwi(CCR0, R5_ARG3, 15);
goetz@6458 1411 __ ble(CCR0, l_6); // copy 2 at a time if less than 16 elements remain
goetz@6458 1412
goetz@6458 1413 __ srdi(tmp1, R5_ARG3, 4);
goetz@6458 1414 __ andi_(R5_ARG3, R5_ARG3, 15);
goetz@6458 1415 __ mtctr(tmp1);
goetz@6458 1416
goetz@6458 1417 __ bind(l_8);
goetz@6458 1418 // Use unrolled version for mass copying (copy 16 elements a time).
goetz@6458 1419 // Load feeding store gets zero latency on Power6, however not on Power5.
goetz@6458 1420 // Therefore, the following sequence is made for the good of both.
goetz@6458 1421 __ ld(tmp1, 0, R3_ARG1);
goetz@6458 1422 __ ld(tmp2, 8, R3_ARG1);
goetz@6458 1423 __ ld(tmp3, 16, R3_ARG1);
goetz@6458 1424 __ ld(tmp4, 24, R3_ARG1);
goetz@6458 1425 __ std(tmp1, 0, R4_ARG2);
goetz@6458 1426 __ std(tmp2, 8, R4_ARG2);
goetz@6458 1427 __ std(tmp3, 16, R4_ARG2);
goetz@6458 1428 __ std(tmp4, 24, R4_ARG2);
goetz@6458 1429 __ addi(R3_ARG1, R3_ARG1, 32);
goetz@6458 1430 __ addi(R4_ARG2, R4_ARG2, 32);
goetz@6458 1431 __ bdnz(l_8);
goetz@6458 1432 }
goetz@6458 1433 __ bind(l_6);
goetz@6458 1434
goetz@6458 1435 // copy 2 elements at a time
goetz@6458 1436 { // FasterArrayCopy
goetz@6458 1437 __ cmpwi(CCR0, R5_ARG3, 2);
goetz@6458 1438 __ blt(CCR0, l_1);
goetz@6458 1439 __ srdi(tmp1, R5_ARG3, 1);
goetz@6458 1440 __ andi_(R5_ARG3, R5_ARG3, 1);
goetz@6458 1441
goetz@6458 1442 __ addi(R3_ARG1, R3_ARG1, -4);
goetz@6458 1443 __ addi(R4_ARG2, R4_ARG2, -4);
goetz@6458 1444 __ mtctr(tmp1);
goetz@6458 1445
goetz@6458 1446 __ bind(l_3);
goetz@6458 1447 __ lwzu(tmp2, 4, R3_ARG1);
goetz@6458 1448 __ stwu(tmp2, 4, R4_ARG2);
goetz@6458 1449 __ bdnz(l_3);
goetz@6458 1450
goetz@6458 1451 __ addi(R3_ARG1, R3_ARG1, 4);
goetz@6458 1452 __ addi(R4_ARG2, R4_ARG2, 4);
goetz@6458 1453 }
goetz@6458 1454
goetz@6458 1455 // do single element copy
goetz@6458 1456 __ bind(l_1);
goetz@6458 1457 __ cmpwi(CCR0, R5_ARG3, 0);
goetz@6458 1458 __ beq(CCR0, l_4);
goetz@6458 1459
goetz@6458 1460 { // FasterArrayCopy
goetz@6458 1461 __ mtctr(R5_ARG3);
goetz@6458 1462 __ addi(R3_ARG1, R3_ARG1, -2);
goetz@6458 1463 __ addi(R4_ARG2, R4_ARG2, -2);
goetz@6458 1464
goetz@6458 1465 __ bind(l_5);
goetz@6458 1466 __ lhzu(tmp2, 2, R3_ARG1);
goetz@6458 1467 __ sthu(tmp2, 2, R4_ARG2);
goetz@6458 1468 __ bdnz(l_5);
goetz@6458 1469 }
goetz@6458 1470 __ bind(l_4);
goetz@6458 1471 __ blr();
goetz@6458 1472
goetz@6458 1473 return start;
goetz@6458 1474 }
goetz@6458 1475
goetz@6458 1476 // Generate stub for conjoint short copy. If "aligned" is true, the
goetz@6458 1477 // "from" and "to" addresses are assumed to be heapword aligned.
goetz@6458 1478 //
goetz@6458 1479 // Arguments for generated stub:
goetz@6458 1480 // from: R3_ARG1
goetz@6458 1481 // to: R4_ARG2
goetz@6458 1482 // count: R5_ARG3 treated as signed
goetz@6458 1483 //
goetz@6458 1484 address generate_conjoint_short_copy(bool aligned, const char * name) {
goetz@6458 1485 StubCodeMark mark(this, "StubRoutines", name);
goetz@6511 1486 address start = __ function_entry();
goetz@6458 1487
goetz@6458 1488 Register tmp1 = R6_ARG4;
goetz@6458 1489 Register tmp2 = R7_ARG5;
goetz@6458 1490 Register tmp3 = R8_ARG6;
goetz@6458 1491
goetz@6511 1492 #if defined(ABI_ELFv2)
goetz@6511 1493 address nooverlap_target = aligned ?
goetz@6511 1494 StubRoutines::arrayof_jshort_disjoint_arraycopy() :
goetz@6511 1495 StubRoutines::jshort_disjoint_arraycopy();
goetz@6511 1496 #else
goetz@6458 1497 address nooverlap_target = aligned ?
goetz@6458 1498 ((FunctionDescriptor*)StubRoutines::arrayof_jshort_disjoint_arraycopy())->entry() :
goetz@6458 1499 ((FunctionDescriptor*)StubRoutines::jshort_disjoint_arraycopy())->entry();
goetz@6511 1500 #endif
goetz@6458 1501
goetz@6458 1502 array_overlap_test(nooverlap_target, 1);
goetz@6458 1503
goetz@6458 1504 Label l_1, l_2;
goetz@6458 1505 __ sldi(tmp1, R5_ARG3, 1);
goetz@6458 1506 __ b(l_2);
goetz@6458 1507 __ bind(l_1);
goetz@6458 1508 __ sthx(tmp2, R4_ARG2, tmp1);
goetz@6458 1509 __ bind(l_2);
goetz@6458 1510 __ addic_(tmp1, tmp1, -2);
goetz@6458 1511 __ lhzx(tmp2, R3_ARG1, tmp1);
goetz@6458 1512 __ bge(CCR0, l_1);
goetz@6458 1513
goetz@6458 1514 __ blr();
goetz@6458 1515
goetz@6458 1516 return start;
goetz@6458 1517 }
goetz@6458 1518
goetz@6458 1519 // Generate core code for disjoint int copy (and oop copy on 32-bit). If "aligned"
goetz@6458 1520 // is true, the "from" and "to" addresses are assumed to be heapword aligned.
goetz@6458 1521 //
goetz@6458 1522 // Arguments:
goetz@6458 1523 // from: R3_ARG1
goetz@6458 1524 // to: R4_ARG2
goetz@6458 1525 // count: R5_ARG3 treated as signed
goetz@6458 1526 //
goetz@6458 1527 void generate_disjoint_int_copy_core(bool aligned) {
goetz@6458 1528 Register tmp1 = R6_ARG4;
goetz@6458 1529 Register tmp2 = R7_ARG5;
goetz@6458 1530 Register tmp3 = R8_ARG6;
goetz@6458 1531 Register tmp4 = R0;
goetz@6458 1532
goetz@6458 1533 Label l_1, l_2, l_3, l_4, l_5, l_6;
goetz@6458 1534 // for short arrays, just do single element copy
goetz@6458 1535 __ li(tmp3, 0);
goetz@6458 1536 __ cmpwi(CCR0, R5_ARG3, 5);
goetz@6458 1537 __ ble(CCR0, l_2);
goetz@6458 1538
goetz@6458 1539 if (!aligned) {
goetz@6458 1540 // check if arrays have same alignment mod 8.
goetz@6458 1541 __ xorr(tmp1, R3_ARG1, R4_ARG2);
goetz@6458 1542 __ andi_(R0, tmp1, 7);
goetz@6458 1543 // Not the same alignment, but ld and std just need to be 4 byte aligned.
goetz@6458 1544 __ bne(CCR0, l_4); // to OR from is 8 byte aligned -> copy 2 at a time
goetz@6458 1545
goetz@6458 1546 // copy 1 element to align to and from on an 8 byte boundary
goetz@6458 1547 __ andi_(R0, R3_ARG1, 7);
goetz@6458 1548 __ beq(CCR0, l_4);
goetz@6458 1549
goetz@6458 1550 __ lwzx(tmp2, R3_ARG1, tmp3);
goetz@6458 1551 __ addi(R5_ARG3, R5_ARG3, -1);
goetz@6458 1552 __ stwx(tmp2, R4_ARG2, tmp3);
goetz@6458 1553 { // FasterArrayCopy
goetz@6458 1554 __ addi(R3_ARG1, R3_ARG1, 4);
goetz@6458 1555 __ addi(R4_ARG2, R4_ARG2, 4);
goetz@6458 1556 }
goetz@6458 1557 __ bind(l_4);
goetz@6458 1558 }
goetz@6458 1559
goetz@6458 1560 { // FasterArrayCopy
goetz@6458 1561 __ cmpwi(CCR0, R5_ARG3, 7);
goetz@6458 1562 __ ble(CCR0, l_2); // copy 1 at a time if less than 8 elements remain
goetz@6458 1563
goetz@6458 1564 __ srdi(tmp1, R5_ARG3, 3);
goetz@6458 1565 __ andi_(R5_ARG3, R5_ARG3, 7);
goetz@6458 1566 __ mtctr(tmp1);
goetz@6458 1567
goetz@6458 1568 __ bind(l_6);
goetz@6458 1569 // Use unrolled version for mass copying (copy 8 elements a time).
goetz@6458 1570 // Load feeding store gets zero latency on power6, however not on power 5.
goetz@6458 1571 // Therefore, the following sequence is made for the good of both.
goetz@6458 1572 __ ld(tmp1, 0, R3_ARG1);
goetz@6458 1573 __ ld(tmp2, 8, R3_ARG1);
goetz@6458 1574 __ ld(tmp3, 16, R3_ARG1);
goetz@6458 1575 __ ld(tmp4, 24, R3_ARG1);
goetz@6458 1576 __ std(tmp1, 0, R4_ARG2);
goetz@6458 1577 __ std(tmp2, 8, R4_ARG2);
goetz@6458 1578 __ std(tmp3, 16, R4_ARG2);
goetz@6458 1579 __ std(tmp4, 24, R4_ARG2);
goetz@6458 1580 __ addi(R3_ARG1, R3_ARG1, 32);
goetz@6458 1581 __ addi(R4_ARG2, R4_ARG2, 32);
goetz@6458 1582 __ bdnz(l_6);
goetz@6458 1583 }
goetz@6458 1584
goetz@6458 1585 // copy 1 element at a time
goetz@6458 1586 __ bind(l_2);
goetz@6458 1587 __ cmpwi(CCR0, R5_ARG3, 0);
goetz@6458 1588 __ beq(CCR0, l_1);
goetz@6458 1589
goetz@6458 1590 { // FasterArrayCopy
goetz@6458 1591 __ mtctr(R5_ARG3);
goetz@6458 1592 __ addi(R3_ARG1, R3_ARG1, -4);
goetz@6458 1593 __ addi(R4_ARG2, R4_ARG2, -4);
goetz@6458 1594
goetz@6458 1595 __ bind(l_3);
goetz@6458 1596 __ lwzu(tmp2, 4, R3_ARG1);
goetz@6458 1597 __ stwu(tmp2, 4, R4_ARG2);
goetz@6458 1598 __ bdnz(l_3);
goetz@6458 1599 }
goetz@6458 1600
goetz@6458 1601 __ bind(l_1);
goetz@6458 1602 return;
goetz@6458 1603 }
goetz@6458 1604
goetz@6458 1605 // Generate stub for disjoint int copy. If "aligned" is true, the
goetz@6458 1606 // "from" and "to" addresses are assumed to be heapword aligned.
goetz@6458 1607 //
goetz@6458 1608 // Arguments for generated stub:
goetz@6458 1609 // from: R3_ARG1
goetz@6458 1610 // to: R4_ARG2
goetz@6458 1611 // count: R5_ARG3 treated as signed
goetz@6458 1612 //
goetz@6458 1613 address generate_disjoint_int_copy(bool aligned, const char * name) {
goetz@6458 1614 StubCodeMark mark(this, "StubRoutines", name);
goetz@6511 1615 address start = __ function_entry();
goetz@6458 1616 generate_disjoint_int_copy_core(aligned);
goetz@6458 1617 __ blr();
goetz@6458 1618 return start;
goetz@6458 1619 }
goetz@6458 1620
goetz@6458 1621 // Generate core code for conjoint int copy (and oop copy on
goetz@6458 1622 // 32-bit). If "aligned" is true, the "from" and "to" addresses
goetz@6458 1623 // are assumed to be heapword aligned.
goetz@6458 1624 //
goetz@6458 1625 // Arguments:
goetz@6458 1626 // from: R3_ARG1
goetz@6458 1627 // to: R4_ARG2
goetz@6458 1628 // count: R5_ARG3 treated as signed
goetz@6458 1629 //
goetz@6458 1630 void generate_conjoint_int_copy_core(bool aligned) {
goetz@6458 1631 // Do reverse copy. We assume the case of actual overlap is rare enough
goetz@6458 1632 // that we don't have to optimize it.
goetz@6458 1633
goetz@6458 1634 Label l_1, l_2, l_3, l_4, l_5, l_6;
goetz@6458 1635
goetz@6458 1636 Register tmp1 = R6_ARG4;
goetz@6458 1637 Register tmp2 = R7_ARG5;
goetz@6458 1638 Register tmp3 = R8_ARG6;
goetz@6458 1639 Register tmp4 = R0;
goetz@6458 1640
goetz@6458 1641 { // FasterArrayCopy
goetz@6458 1642 __ cmpwi(CCR0, R5_ARG3, 0);
goetz@6458 1643 __ beq(CCR0, l_6);
goetz@6458 1644
goetz@6458 1645 __ sldi(R5_ARG3, R5_ARG3, 2);
goetz@6458 1646 __ add(R3_ARG1, R3_ARG1, R5_ARG3);
goetz@6458 1647 __ add(R4_ARG2, R4_ARG2, R5_ARG3);
goetz@6458 1648 __ srdi(R5_ARG3, R5_ARG3, 2);
goetz@6458 1649
goetz@6458 1650 __ cmpwi(CCR0, R5_ARG3, 7);
goetz@6458 1651 __ ble(CCR0, l_5); // copy 1 at a time if less than 8 elements remain
goetz@6458 1652
goetz@6458 1653 __ srdi(tmp1, R5_ARG3, 3);
goetz@6458 1654 __ andi(R5_ARG3, R5_ARG3, 7);
goetz@6458 1655 __ mtctr(tmp1);
goetz@6458 1656
goetz@6458 1657 __ bind(l_4);
goetz@6458 1658 // Use unrolled version for mass copying (copy 4 elements a time).
goetz@6458 1659 // Load feeding store gets zero latency on Power6, however not on Power5.
goetz@6458 1660 // Therefore, the following sequence is made for the good of both.
goetz@6458 1661 __ addi(R3_ARG1, R3_ARG1, -32);
goetz@6458 1662 __ addi(R4_ARG2, R4_ARG2, -32);
goetz@6458 1663 __ ld(tmp4, 24, R3_ARG1);
goetz@6458 1664 __ ld(tmp3, 16, R3_ARG1);
goetz@6458 1665 __ ld(tmp2, 8, R3_ARG1);
goetz@6458 1666 __ ld(tmp1, 0, R3_ARG1);
goetz@6458 1667 __ std(tmp4, 24, R4_ARG2);
goetz@6458 1668 __ std(tmp3, 16, R4_ARG2);
goetz@6458 1669 __ std(tmp2, 8, R4_ARG2);
goetz@6458 1670 __ std(tmp1, 0, R4_ARG2);
goetz@6458 1671 __ bdnz(l_4);
goetz@6458 1672
goetz@6458 1673 __ cmpwi(CCR0, R5_ARG3, 0);
goetz@6458 1674 __ beq(CCR0, l_6);
goetz@6458 1675
goetz@6458 1676 __ bind(l_5);
goetz@6458 1677 __ mtctr(R5_ARG3);
goetz@6458 1678 __ bind(l_3);
goetz@6458 1679 __ lwz(R0, -4, R3_ARG1);
goetz@6458 1680 __ stw(R0, -4, R4_ARG2);
goetz@6458 1681 __ addi(R3_ARG1, R3_ARG1, -4);
goetz@6458 1682 __ addi(R4_ARG2, R4_ARG2, -4);
goetz@6458 1683 __ bdnz(l_3);
goetz@6458 1684
goetz@6458 1685 __ bind(l_6);
goetz@6458 1686 }
goetz@6458 1687 }
goetz@6458 1688
goetz@6458 1689 // Generate stub for conjoint int copy. If "aligned" is true, the
goetz@6458 1690 // "from" and "to" addresses are assumed to be heapword aligned.
goetz@6458 1691 //
goetz@6458 1692 // Arguments for generated stub:
goetz@6458 1693 // from: R3_ARG1
goetz@6458 1694 // to: R4_ARG2
goetz@6458 1695 // count: R5_ARG3 treated as signed
goetz@6458 1696 //
goetz@6458 1697 address generate_conjoint_int_copy(bool aligned, const char * name) {
goetz@6458 1698 StubCodeMark mark(this, "StubRoutines", name);
goetz@6511 1699 address start = __ function_entry();
goetz@6458 1700
goetz@6511 1701 #if defined(ABI_ELFv2)
goetz@6511 1702 address nooverlap_target = aligned ?
goetz@6511 1703 StubRoutines::arrayof_jint_disjoint_arraycopy() :
goetz@6511 1704 StubRoutines::jint_disjoint_arraycopy();
goetz@6511 1705 #else
goetz@6458 1706 address nooverlap_target = aligned ?
goetz@6458 1707 ((FunctionDescriptor*)StubRoutines::arrayof_jint_disjoint_arraycopy())->entry() :
goetz@6458 1708 ((FunctionDescriptor*)StubRoutines::jint_disjoint_arraycopy())->entry();
goetz@6511 1709 #endif
goetz@6458 1710
goetz@6458 1711 array_overlap_test(nooverlap_target, 2);
goetz@6458 1712
goetz@6458 1713 generate_conjoint_int_copy_core(aligned);
goetz@6458 1714
goetz@6458 1715 __ blr();
goetz@6458 1716
goetz@6458 1717 return start;
goetz@6458 1718 }
goetz@6458 1719
goetz@6458 1720 // Generate core code for disjoint long copy (and oop copy on
goetz@6458 1721 // 64-bit). If "aligned" is true, the "from" and "to" addresses
goetz@6458 1722 // are assumed to be heapword aligned.
goetz@6458 1723 //
goetz@6458 1724 // Arguments:
goetz@6458 1725 // from: R3_ARG1
goetz@6458 1726 // to: R4_ARG2
goetz@6458 1727 // count: R5_ARG3 treated as signed
goetz@6458 1728 //
goetz@6458 1729 void generate_disjoint_long_copy_core(bool aligned) {
goetz@6458 1730 Register tmp1 = R6_ARG4;
goetz@6458 1731 Register tmp2 = R7_ARG5;
goetz@6458 1732 Register tmp3 = R8_ARG6;
goetz@6458 1733 Register tmp4 = R0;
goetz@6458 1734
goetz@6458 1735 Label l_1, l_2, l_3, l_4;
goetz@6458 1736
goetz@6458 1737 { // FasterArrayCopy
goetz@6458 1738 __ cmpwi(CCR0, R5_ARG3, 3);
goetz@6458 1739 __ ble(CCR0, l_3); // copy 1 at a time if less than 4 elements remain
goetz@6458 1740
goetz@6458 1741 __ srdi(tmp1, R5_ARG3, 2);
goetz@6458 1742 __ andi_(R5_ARG3, R5_ARG3, 3);
goetz@6458 1743 __ mtctr(tmp1);
goetz@6458 1744
goetz@6458 1745 __ bind(l_4);
goetz@6458 1746 // Use unrolled version for mass copying (copy 4 elements a time).
goetz@6458 1747 // Load feeding store gets zero latency on Power6, however not on Power5.
goetz@6458 1748 // Therefore, the following sequence is made for the good of both.
goetz@6458 1749 __ ld(tmp1, 0, R3_ARG1);
goetz@6458 1750 __ ld(tmp2, 8, R3_ARG1);
goetz@6458 1751 __ ld(tmp3, 16, R3_ARG1);
goetz@6458 1752 __ ld(tmp4, 24, R3_ARG1);
goetz@6458 1753 __ std(tmp1, 0, R4_ARG2);
goetz@6458 1754 __ std(tmp2, 8, R4_ARG2);
goetz@6458 1755 __ std(tmp3, 16, R4_ARG2);
goetz@6458 1756 __ std(tmp4, 24, R4_ARG2);
goetz@6458 1757 __ addi(R3_ARG1, R3_ARG1, 32);
goetz@6458 1758 __ addi(R4_ARG2, R4_ARG2, 32);
goetz@6458 1759 __ bdnz(l_4);
goetz@6458 1760 }
goetz@6458 1761
goetz@6458 1762 // copy 1 element at a time
goetz@6458 1763 __ bind(l_3);
goetz@6458 1764 __ cmpwi(CCR0, R5_ARG3, 0);
goetz@6458 1765 __ beq(CCR0, l_1);
goetz@6458 1766
goetz@6458 1767 { // FasterArrayCopy
goetz@6458 1768 __ mtctr(R5_ARG3);
goetz@6458 1769 __ addi(R3_ARG1, R3_ARG1, -8);
goetz@6458 1770 __ addi(R4_ARG2, R4_ARG2, -8);
goetz@6458 1771
goetz@6458 1772 __ bind(l_2);
goetz@6458 1773 __ ldu(R0, 8, R3_ARG1);
goetz@6458 1774 __ stdu(R0, 8, R4_ARG2);
goetz@6458 1775 __ bdnz(l_2);
goetz@6458 1776
goetz@6458 1777 }
goetz@6458 1778 __ bind(l_1);
goetz@6458 1779 }
goetz@6458 1780
goetz@6458 1781 // Generate stub for disjoint long copy. If "aligned" is true, the
goetz@6458 1782 // "from" and "to" addresses are assumed to be heapword aligned.
goetz@6458 1783 //
goetz@6458 1784 // Arguments for generated stub:
goetz@6458 1785 // from: R3_ARG1
goetz@6458 1786 // to: R4_ARG2
goetz@6458 1787 // count: R5_ARG3 treated as signed
goetz@6458 1788 //
goetz@6458 1789 address generate_disjoint_long_copy(bool aligned, const char * name) {
goetz@6458 1790 StubCodeMark mark(this, "StubRoutines", name);
goetz@6511 1791 address start = __ function_entry();
goetz@6458 1792 generate_disjoint_long_copy_core(aligned);
goetz@6458 1793 __ blr();
goetz@6458 1794
goetz@6458 1795 return start;
goetz@6458 1796 }
goetz@6458 1797
goetz@6458 1798 // Generate core code for conjoint long copy (and oop copy on
goetz@6458 1799 // 64-bit). If "aligned" is true, the "from" and "to" addresses
goetz@6458 1800 // are assumed to be heapword aligned.
goetz@6458 1801 //
goetz@6458 1802 // Arguments:
goetz@6458 1803 // from: R3_ARG1
goetz@6458 1804 // to: R4_ARG2
goetz@6458 1805 // count: R5_ARG3 treated as signed
goetz@6458 1806 //
goetz@6458 1807 void generate_conjoint_long_copy_core(bool aligned) {
goetz@6458 1808 Register tmp1 = R6_ARG4;
goetz@6458 1809 Register tmp2 = R7_ARG5;
goetz@6458 1810 Register tmp3 = R8_ARG6;
goetz@6458 1811 Register tmp4 = R0;
goetz@6458 1812
goetz@6458 1813 Label l_1, l_2, l_3, l_4, l_5;
goetz@6458 1814
goetz@6458 1815 __ cmpwi(CCR0, R5_ARG3, 0);
goetz@6458 1816 __ beq(CCR0, l_1);
goetz@6458 1817
goetz@6458 1818 { // FasterArrayCopy
goetz@6458 1819 __ sldi(R5_ARG3, R5_ARG3, 3);
goetz@6458 1820 __ add(R3_ARG1, R3_ARG1, R5_ARG3);
goetz@6458 1821 __ add(R4_ARG2, R4_ARG2, R5_ARG3);
goetz@6458 1822 __ srdi(R5_ARG3, R5_ARG3, 3);
goetz@6458 1823
goetz@6458 1824 __ cmpwi(CCR0, R5_ARG3, 3);
goetz@6458 1825 __ ble(CCR0, l_5); // copy 1 at a time if less than 4 elements remain
goetz@6458 1826
goetz@6458 1827 __ srdi(tmp1, R5_ARG3, 2);
goetz@6458 1828 __ andi(R5_ARG3, R5_ARG3, 3);
goetz@6458 1829 __ mtctr(tmp1);
goetz@6458 1830
goetz@6458 1831 __ bind(l_4);
goetz@6458 1832 // Use unrolled version for mass copying (copy 4 elements a time).
goetz@6458 1833 // Load feeding store gets zero latency on Power6, however not on Power5.
goetz@6458 1834 // Therefore, the following sequence is made for the good of both.
goetz@6458 1835 __ addi(R3_ARG1, R3_ARG1, -32);
goetz@6458 1836 __ addi(R4_ARG2, R4_ARG2, -32);
goetz@6458 1837 __ ld(tmp4, 24, R3_ARG1);
goetz@6458 1838 __ ld(tmp3, 16, R3_ARG1);
goetz@6458 1839 __ ld(tmp2, 8, R3_ARG1);
goetz@6458 1840 __ ld(tmp1, 0, R3_ARG1);
goetz@6458 1841 __ std(tmp4, 24, R4_ARG2);
goetz@6458 1842 __ std(tmp3, 16, R4_ARG2);
goetz@6458 1843 __ std(tmp2, 8, R4_ARG2);
goetz@6458 1844 __ std(tmp1, 0, R4_ARG2);
goetz@6458 1845 __ bdnz(l_4);
goetz@6458 1846
goetz@6458 1847 __ cmpwi(CCR0, R5_ARG3, 0);
goetz@6458 1848 __ beq(CCR0, l_1);
goetz@6458 1849
goetz@6458 1850 __ bind(l_5);
goetz@6458 1851 __ mtctr(R5_ARG3);
goetz@6458 1852 __ bind(l_3);
goetz@6458 1853 __ ld(R0, -8, R3_ARG1);
goetz@6458 1854 __ std(R0, -8, R4_ARG2);
goetz@6458 1855 __ addi(R3_ARG1, R3_ARG1, -8);
goetz@6458 1856 __ addi(R4_ARG2, R4_ARG2, -8);
goetz@6458 1857 __ bdnz(l_3);
goetz@6458 1858
goetz@6458 1859 }
goetz@6458 1860 __ bind(l_1);
goetz@6458 1861 }
goetz@6458 1862
goetz@6458 1863 // Generate stub for conjoint long copy. If "aligned" is true, the
goetz@6458 1864 // "from" and "to" addresses are assumed to be heapword aligned.
goetz@6458 1865 //
goetz@6458 1866 // Arguments for generated stub:
goetz@6458 1867 // from: R3_ARG1
goetz@6458 1868 // to: R4_ARG2
goetz@6458 1869 // count: R5_ARG3 treated as signed
goetz@6458 1870 //
goetz@6458 1871 address generate_conjoint_long_copy(bool aligned, const char * name) {
goetz@6458 1872 StubCodeMark mark(this, "StubRoutines", name);
goetz@6511 1873 address start = __ function_entry();
goetz@6458 1874
goetz@6511 1875 #if defined(ABI_ELFv2)
goetz@6511 1876 address nooverlap_target = aligned ?
goetz@6511 1877 StubRoutines::arrayof_jlong_disjoint_arraycopy() :
goetz@6511 1878 StubRoutines::jlong_disjoint_arraycopy();
goetz@6511 1879 #else
goetz@6458 1880 address nooverlap_target = aligned ?
goetz@6458 1881 ((FunctionDescriptor*)StubRoutines::arrayof_jlong_disjoint_arraycopy())->entry() :
goetz@6458 1882 ((FunctionDescriptor*)StubRoutines::jlong_disjoint_arraycopy())->entry();
goetz@6511 1883 #endif
goetz@6458 1884
goetz@6458 1885 array_overlap_test(nooverlap_target, 3);
goetz@6458 1886 generate_conjoint_long_copy_core(aligned);
goetz@6458 1887
goetz@6458 1888 __ blr();
goetz@6458 1889
goetz@6458 1890 return start;
goetz@6458 1891 }
goetz@6458 1892
goetz@6458 1893 // Generate stub for conjoint oop copy. If "aligned" is true, the
goetz@6458 1894 // "from" and "to" addresses are assumed to be heapword aligned.
goetz@6458 1895 //
goetz@6458 1896 // Arguments for generated stub:
goetz@6458 1897 // from: R3_ARG1
goetz@6458 1898 // to: R4_ARG2
goetz@6458 1899 // count: R5_ARG3 treated as signed
goetz@6458 1900 // dest_uninitialized: G1 support
goetz@6458 1901 //
goetz@6458 1902 address generate_conjoint_oop_copy(bool aligned, const char * name, bool dest_uninitialized) {
goetz@6458 1903 StubCodeMark mark(this, "StubRoutines", name);
goetz@6458 1904
goetz@6511 1905 address start = __ function_entry();
goetz@6458 1906
goetz@6511 1907 #if defined(ABI_ELFv2)
goetz@6511 1908 address nooverlap_target = aligned ?
goetz@6511 1909 StubRoutines::arrayof_oop_disjoint_arraycopy() :
goetz@6511 1910 StubRoutines::oop_disjoint_arraycopy();
goetz@6511 1911 #else
goetz@6458 1912 address nooverlap_target = aligned ?
goetz@6458 1913 ((FunctionDescriptor*)StubRoutines::arrayof_oop_disjoint_arraycopy())->entry() :
goetz@6458 1914 ((FunctionDescriptor*)StubRoutines::oop_disjoint_arraycopy())->entry();
goetz@6511 1915 #endif
goetz@6458 1916
goetz@6458 1917 gen_write_ref_array_pre_barrier(R3_ARG1, R4_ARG2, R5_ARG3, dest_uninitialized, R9_ARG7);
goetz@6458 1918
goetz@6458 1919 // Save arguments.
goetz@6458 1920 __ mr(R9_ARG7, R4_ARG2);
goetz@6458 1921 __ mr(R10_ARG8, R5_ARG3);
goetz@6458 1922
goetz@6458 1923 if (UseCompressedOops) {
goetz@6458 1924 array_overlap_test(nooverlap_target, 2);
goetz@6458 1925 generate_conjoint_int_copy_core(aligned);
goetz@6458 1926 } else {
goetz@6458 1927 array_overlap_test(nooverlap_target, 3);
goetz@6458 1928 generate_conjoint_long_copy_core(aligned);
goetz@6458 1929 }
goetz@6458 1930
goetz@6495 1931 gen_write_ref_array_post_barrier(R9_ARG7, R10_ARG8, R11_scratch1, /*branchToEnd*/ false);
goetz@6458 1932 return start;
goetz@6458 1933 }
goetz@6458 1934
goetz@6458 1935 // Generate stub for disjoint oop copy. If "aligned" is true, the
goetz@6458 1936 // "from" and "to" addresses are assumed to be heapword aligned.
goetz@6458 1937 //
goetz@6458 1938 // Arguments for generated stub:
goetz@6458 1939 // from: R3_ARG1
goetz@6458 1940 // to: R4_ARG2
goetz@6458 1941 // count: R5_ARG3 treated as signed
goetz@6458 1942 // dest_uninitialized: G1 support
goetz@6458 1943 //
goetz@6458 1944 address generate_disjoint_oop_copy(bool aligned, const char * name, bool dest_uninitialized) {
goetz@6458 1945 StubCodeMark mark(this, "StubRoutines", name);
goetz@6511 1946 address start = __ function_entry();
goetz@6458 1947
goetz@6458 1948 gen_write_ref_array_pre_barrier(R3_ARG1, R4_ARG2, R5_ARG3, dest_uninitialized, R9_ARG7);
goetz@6458 1949
goetz@6458 1950 // save some arguments, disjoint_long_copy_core destroys them.
goetz@6458 1951 // needed for post barrier
goetz@6458 1952 __ mr(R9_ARG7, R4_ARG2);
goetz@6458 1953 __ mr(R10_ARG8, R5_ARG3);
goetz@6458 1954
goetz@6458 1955 if (UseCompressedOops) {
goetz@6458 1956 generate_disjoint_int_copy_core(aligned);
goetz@6458 1957 } else {
goetz@6458 1958 generate_disjoint_long_copy_core(aligned);
goetz@6458 1959 }
goetz@6458 1960
goetz@6495 1961 gen_write_ref_array_post_barrier(R9_ARG7, R10_ARG8, R11_scratch1, /*branchToEnd*/ false);
goetz@6458 1962
goetz@6458 1963 return start;
goetz@6458 1964 }
goetz@6458 1965
goetz@6458 1966 void generate_arraycopy_stubs() {
goetz@6458 1967 // Note: the disjoint stubs must be generated first, some of
goetz@6458 1968 // the conjoint stubs use them.
goetz@6458 1969
goetz@6458 1970 // non-aligned disjoint versions
goetz@6458 1971 StubRoutines::_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(false, "jbyte_disjoint_arraycopy");
goetz@6458 1972 StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_short_copy(false, "jshort_disjoint_arraycopy");
goetz@6458 1973 StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_int_copy(false, "jint_disjoint_arraycopy");
goetz@6458 1974 StubRoutines::_jlong_disjoint_arraycopy = generate_disjoint_long_copy(false, "jlong_disjoint_arraycopy");
goetz@6458 1975 StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_oop_copy(false, "oop_disjoint_arraycopy", false);
goetz@6458 1976 StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_oop_copy(false, "oop_disjoint_arraycopy_uninit", true);
goetz@6458 1977
goetz@6458 1978 // aligned disjoint versions
goetz@6458 1979 StubRoutines::_arrayof_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(true, "arrayof_jbyte_disjoint_arraycopy");
goetz@6458 1980 StubRoutines::_arrayof_jshort_disjoint_arraycopy = generate_disjoint_short_copy(true, "arrayof_jshort_disjoint_arraycopy");
goetz@6458 1981 StubRoutines::_arrayof_jint_disjoint_arraycopy = generate_disjoint_int_copy(true, "arrayof_jint_disjoint_arraycopy");
goetz@6458 1982 StubRoutines::_arrayof_jlong_disjoint_arraycopy = generate_disjoint_long_copy(true, "arrayof_jlong_disjoint_arraycopy");
goetz@6458 1983 StubRoutines::_arrayof_oop_disjoint_arraycopy = generate_disjoint_oop_copy(true, "arrayof_oop_disjoint_arraycopy", false);
goetz@6458 1984 StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit = generate_disjoint_oop_copy(true, "oop_disjoint_arraycopy_uninit", true);
goetz@6458 1985
goetz@6458 1986 // non-aligned conjoint versions
goetz@6458 1987 StubRoutines::_jbyte_arraycopy = generate_conjoint_byte_copy(false, "jbyte_arraycopy");
goetz@6458 1988 StubRoutines::_jshort_arraycopy = generate_conjoint_short_copy(false, "jshort_arraycopy");
goetz@6458 1989 StubRoutines::_jint_arraycopy = generate_conjoint_int_copy(false, "jint_arraycopy");
goetz@6458 1990 StubRoutines::_jlong_arraycopy = generate_conjoint_long_copy(false, "jlong_arraycopy");
goetz@6458 1991 StubRoutines::_oop_arraycopy = generate_conjoint_oop_copy(false, "oop_arraycopy", false);
goetz@6458 1992 StubRoutines::_oop_arraycopy_uninit = generate_conjoint_oop_copy(false, "oop_arraycopy_uninit", true);
goetz@6458 1993
goetz@6458 1994 // aligned conjoint versions
goetz@6458 1995 StubRoutines::_arrayof_jbyte_arraycopy = generate_conjoint_byte_copy(true, "arrayof_jbyte_arraycopy");
goetz@6458 1996 StubRoutines::_arrayof_jshort_arraycopy = generate_conjoint_short_copy(true, "arrayof_jshort_arraycopy");
goetz@6458 1997 StubRoutines::_arrayof_jint_arraycopy = generate_conjoint_int_copy(true, "arrayof_jint_arraycopy");
goetz@6458 1998 StubRoutines::_arrayof_jlong_arraycopy = generate_conjoint_long_copy(true, "arrayof_jlong_arraycopy");
goetz@6458 1999 StubRoutines::_arrayof_oop_arraycopy = generate_conjoint_oop_copy(true, "arrayof_oop_arraycopy", false);
goetz@6458 2000 StubRoutines::_arrayof_oop_arraycopy_uninit = generate_conjoint_oop_copy(true, "arrayof_oop_arraycopy", true);
goetz@6458 2001
goetz@6458 2002 // fill routines
goetz@6458 2003 StubRoutines::_jbyte_fill = generate_fill(T_BYTE, false, "jbyte_fill");
goetz@6458 2004 StubRoutines::_jshort_fill = generate_fill(T_SHORT, false, "jshort_fill");
goetz@6458 2005 StubRoutines::_jint_fill = generate_fill(T_INT, false, "jint_fill");
goetz@6458 2006 StubRoutines::_arrayof_jbyte_fill = generate_fill(T_BYTE, true, "arrayof_jbyte_fill");
goetz@6458 2007 StubRoutines::_arrayof_jshort_fill = generate_fill(T_SHORT, true, "arrayof_jshort_fill");
goetz@6458 2008 StubRoutines::_arrayof_jint_fill = generate_fill(T_INT, true, "arrayof_jint_fill");
goetz@6458 2009 }
goetz@6458 2010
goetz@6458 2011 // Safefetch stubs.
goetz@6458 2012 void generate_safefetch(const char* name, int size, address* entry, address* fault_pc, address* continuation_pc) {
goetz@6458 2013 // safefetch signatures:
goetz@6458 2014 // int SafeFetch32(int* adr, int errValue);
goetz@6458 2015 // intptr_t SafeFetchN (intptr_t* adr, intptr_t errValue);
goetz@6458 2016 //
goetz@6458 2017 // arguments:
goetz@6458 2018 // R3_ARG1 = adr
goetz@6458 2019 // R4_ARG2 = errValue
goetz@6458 2020 //
goetz@6458 2021 // result:
goetz@6458 2022 // R3_RET = *adr or errValue
goetz@6458 2023
goetz@6458 2024 StubCodeMark mark(this, "StubRoutines", name);
goetz@6458 2025
goetz@6458 2026 // Entry point, pc or function descriptor.
goetz@6511 2027 *entry = __ function_entry();
goetz@6458 2028
goetz@6458 2029 // Load *adr into R4_ARG2, may fault.
goetz@6458 2030 *fault_pc = __ pc();
goetz@6458 2031 switch (size) {
goetz@6458 2032 case 4:
goetz@6458 2033 // int32_t, signed extended
goetz@6458 2034 __ lwa(R4_ARG2, 0, R3_ARG1);
goetz@6458 2035 break;
goetz@6458 2036 case 8:
goetz@6458 2037 // int64_t
goetz@6458 2038 __ ld(R4_ARG2, 0, R3_ARG1);
goetz@6458 2039 break;
goetz@6458 2040 default:
goetz@6458 2041 ShouldNotReachHere();
goetz@6458 2042 }
goetz@6458 2043
goetz@6458 2044 // return errValue or *adr
goetz@6458 2045 *continuation_pc = __ pc();
goetz@6458 2046 __ mr(R3_RET, R4_ARG2);
goetz@6458 2047 __ blr();
goetz@6458 2048 }
goetz@6458 2049
goetz@6458 2050 // Initialization
goetz@6458 2051 void generate_initial() {
goetz@6458 2052 // Generates all stubs and initializes the entry points
goetz@6458 2053
goetz@6458 2054 // Entry points that exist in all platforms.
goetz@6458 2055 // Note: This is code that could be shared among different platforms - however the
goetz@6458 2056 // benefit seems to be smaller than the disadvantage of having a
goetz@6458 2057 // much more complicated generator structure. See also comment in
goetz@6458 2058 // stubRoutines.hpp.
goetz@6458 2059
goetz@6458 2060 StubRoutines::_forward_exception_entry = generate_forward_exception();
goetz@6458 2061 StubRoutines::_call_stub_entry = generate_call_stub(StubRoutines::_call_stub_return_address);
goetz@6458 2062 StubRoutines::_catch_exception_entry = generate_catch_exception();
goetz@6501 2063
goetz@6501 2064 // Build this early so it's available for the interpreter.
goetz@6501 2065 StubRoutines::_throw_StackOverflowError_entry =
goetz@6501 2066 generate_throw_exception("StackOverflowError throw_exception",
goetz@6501 2067 CAST_FROM_FN_PTR(address, SharedRuntime::throw_StackOverflowError), false);
goetz@6458 2068 }
goetz@6458 2069
goetz@6458 2070 void generate_all() {
goetz@6458 2071 // Generates all stubs and initializes the entry points
goetz@6458 2072
goetz@6458 2073 // These entry points require SharedInfo::stack0 to be set up in
goetz@6458 2074 // non-core builds
goetz@6458 2075 StubRoutines::_throw_AbstractMethodError_entry = generate_throw_exception("AbstractMethodError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_AbstractMethodError), false);
goetz@6458 2076 // Handle IncompatibleClassChangeError in itable stubs.
goetz@6458 2077 StubRoutines::_throw_IncompatibleClassChangeError_entry= generate_throw_exception("IncompatibleClassChangeError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_IncompatibleClassChangeError), false);
goetz@6458 2078 StubRoutines::_throw_NullPointerException_at_call_entry= generate_throw_exception("NullPointerException at call throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException_at_call), false);
goetz@6458 2079
goetz@6458 2080 StubRoutines::_handler_for_unsafe_access_entry = generate_handler_for_unsafe_access();
goetz@6458 2081
goetz@6458 2082 // support for verify_oop (must happen after universe_init)
goetz@6458 2083 StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop();
goetz@6458 2084
goetz@6458 2085 // arraycopy stubs used by compilers
goetz@6458 2086 generate_arraycopy_stubs();
goetz@6458 2087
goetz@6508 2088 if (UseAESIntrinsics) {
goetz@6508 2089 guarantee(!UseAESIntrinsics, "not yet implemented.");
goetz@6508 2090 }
goetz@6508 2091
goetz@6458 2092 // PPC uses stubs for safefetch.
goetz@6458 2093 generate_safefetch("SafeFetch32", sizeof(int), &StubRoutines::_safefetch32_entry,
goetz@6458 2094 &StubRoutines::_safefetch32_fault_pc,
goetz@6458 2095 &StubRoutines::_safefetch32_continuation_pc);
goetz@6458 2096 generate_safefetch("SafeFetchN", sizeof(intptr_t), &StubRoutines::_safefetchN_entry,
goetz@6458 2097 &StubRoutines::_safefetchN_fault_pc,
goetz@6458 2098 &StubRoutines::_safefetchN_continuation_pc);
goetz@6458 2099 }
goetz@6458 2100
goetz@6458 2101 public:
goetz@6458 2102 StubGenerator(CodeBuffer* code, bool all) : StubCodeGenerator(code) {
goetz@6458 2103 // replace the standard masm with a special one:
goetz@6458 2104 _masm = new MacroAssembler(code);
goetz@6458 2105 if (all) {
goetz@6458 2106 generate_all();
goetz@6458 2107 } else {
goetz@6458 2108 generate_initial();
goetz@6458 2109 }
goetz@6458 2110 }
goetz@6458 2111 };
goetz@6458 2112
goetz@6458 2113 void StubGenerator_generate(CodeBuffer* code, bool all) {
goetz@6458 2114 StubGenerator g(code, all);
goetz@6458 2115 }

mercurial