src/cpu/ppc/vm/stubGenerator_ppc.cpp

Tue, 17 Oct 2017 12:58:25 +0800

author
aoqi
date
Tue, 17 Oct 2017 12:58:25 +0800
changeset 7994
04ff2f6cd0eb
parent 7535
7ae4e26cb1e0
child 8856
ac27a9c85bea
permissions
-rw-r--r--

merge

aoqi@0 1 /*
aoqi@0 2 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
aoqi@0 3 * Copyright 2012, 2014 SAP AG. All rights reserved.
aoqi@0 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
aoqi@0 5 *
aoqi@0 6 * This code is free software; you can redistribute it and/or modify it
aoqi@0 7 * under the terms of the GNU General Public License version 2 only, as
aoqi@0 8 * published by the Free Software Foundation.
aoqi@0 9 *
aoqi@0 10 * This code is distributed in the hope that it will be useful, but WITHOUT
aoqi@0 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
aoqi@0 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
aoqi@0 13 * version 2 for more details (a copy is included in the LICENSE file that
aoqi@0 14 * accompanied this code).
aoqi@0 15 *
aoqi@0 16 * You should have received a copy of the GNU General Public License version
aoqi@0 17 * 2 along with this work; if not, write to the Free Software Foundation,
aoqi@0 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
aoqi@0 19 *
aoqi@0 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
aoqi@0 21 * or visit www.oracle.com if you need additional information or have any
aoqi@0 22 * questions.
aoqi@0 23 *
aoqi@0 24 */
aoqi@0 25
aoqi@0 26 #include "precompiled.hpp"
aoqi@0 27 #include "asm/macroAssembler.inline.hpp"
aoqi@0 28 #include "interpreter/interpreter.hpp"
aoqi@0 29 #include "nativeInst_ppc.hpp"
aoqi@0 30 #include "oops/instanceOop.hpp"
aoqi@0 31 #include "oops/method.hpp"
aoqi@0 32 #include "oops/objArrayKlass.hpp"
aoqi@0 33 #include "oops/oop.inline.hpp"
aoqi@0 34 #include "prims/methodHandles.hpp"
aoqi@0 35 #include "runtime/frame.inline.hpp"
aoqi@0 36 #include "runtime/handles.inline.hpp"
aoqi@0 37 #include "runtime/sharedRuntime.hpp"
aoqi@0 38 #include "runtime/stubCodeGenerator.hpp"
aoqi@0 39 #include "runtime/stubRoutines.hpp"
aoqi@0 40 #include "utilities/top.hpp"
aoqi@0 41 #include "runtime/thread.inline.hpp"
aoqi@0 42
aoqi@0 43 #define __ _masm->
aoqi@0 44
aoqi@0 45 #ifdef PRODUCT
aoqi@0 46 #define BLOCK_COMMENT(str) // nothing
aoqi@0 47 #else
aoqi@0 48 #define BLOCK_COMMENT(str) __ block_comment(str)
aoqi@0 49 #endif
aoqi@0 50
aoqi@0 51 class StubGenerator: public StubCodeGenerator {
aoqi@0 52 private:
aoqi@0 53
aoqi@0 54 // Call stubs are used to call Java from C
aoqi@0 55 //
aoqi@0 56 // Arguments:
aoqi@0 57 //
aoqi@0 58 // R3 - call wrapper address : address
aoqi@0 59 // R4 - result : intptr_t*
aoqi@0 60 // R5 - result type : BasicType
aoqi@0 61 // R6 - method : Method
aoqi@0 62 // R7 - frame mgr entry point : address
aoqi@0 63 // R8 - parameter block : intptr_t*
aoqi@0 64 // R9 - parameter count in words : int
aoqi@0 65 // R10 - thread : Thread*
aoqi@0 66 //
aoqi@0 67 address generate_call_stub(address& return_address) {
aoqi@0 68 // Setup a new c frame, copy java arguments, call frame manager or
aoqi@0 69 // native_entry, and process result.
aoqi@0 70
aoqi@0 71 StubCodeMark mark(this, "StubRoutines", "call_stub");
aoqi@0 72
aoqi@0 73 address start = __ function_entry();
aoqi@0 74
aoqi@0 75 // some sanity checks
aoqi@0 76 assert((sizeof(frame::abi_minframe) % 16) == 0, "unaligned");
aoqi@0 77 assert((sizeof(frame::abi_reg_args) % 16) == 0, "unaligned");
aoqi@0 78 assert((sizeof(frame::spill_nonvolatiles) % 16) == 0, "unaligned");
aoqi@0 79 assert((sizeof(frame::parent_ijava_frame_abi) % 16) == 0, "unaligned");
aoqi@0 80 assert((sizeof(frame::entry_frame_locals) % 16) == 0, "unaligned");
aoqi@0 81
aoqi@0 82 Register r_arg_call_wrapper_addr = R3;
aoqi@0 83 Register r_arg_result_addr = R4;
aoqi@0 84 Register r_arg_result_type = R5;
aoqi@0 85 Register r_arg_method = R6;
aoqi@0 86 Register r_arg_entry = R7;
aoqi@0 87 Register r_arg_thread = R10;
aoqi@0 88
aoqi@0 89 Register r_temp = R24;
aoqi@0 90 Register r_top_of_arguments_addr = R25;
aoqi@0 91 Register r_entryframe_fp = R26;
aoqi@0 92
aoqi@0 93 {
aoqi@0 94 // Stack on entry to call_stub:
aoqi@0 95 //
aoqi@0 96 // F1 [C_FRAME]
aoqi@0 97 // ...
aoqi@0 98
aoqi@0 99 Register r_arg_argument_addr = R8;
aoqi@0 100 Register r_arg_argument_count = R9;
aoqi@0 101 Register r_frame_alignment_in_bytes = R27;
aoqi@0 102 Register r_argument_addr = R28;
aoqi@0 103 Register r_argumentcopy_addr = R29;
aoqi@0 104 Register r_argument_size_in_bytes = R30;
aoqi@0 105 Register r_frame_size = R23;
aoqi@0 106
aoqi@0 107 Label arguments_copied;
aoqi@0 108
aoqi@0 109 // Save LR/CR to caller's C_FRAME.
aoqi@0 110 __ save_LR_CR(R0);
aoqi@0 111
aoqi@0 112 // Zero extend arg_argument_count.
aoqi@0 113 __ clrldi(r_arg_argument_count, r_arg_argument_count, 32);
aoqi@0 114
aoqi@0 115 // Save non-volatiles GPRs to ENTRY_FRAME (not yet pushed, but it's safe).
aoqi@0 116 __ save_nonvolatile_gprs(R1_SP, _spill_nonvolatiles_neg(r14));
aoqi@0 117
aoqi@0 118 // Keep copy of our frame pointer (caller's SP).
aoqi@0 119 __ mr(r_entryframe_fp, R1_SP);
aoqi@0 120
aoqi@0 121 BLOCK_COMMENT("Push ENTRY_FRAME including arguments");
aoqi@0 122 // Push ENTRY_FRAME including arguments:
aoqi@0 123 //
aoqi@0 124 // F0 [TOP_IJAVA_FRAME_ABI]
aoqi@0 125 // alignment (optional)
aoqi@0 126 // [outgoing Java arguments]
aoqi@0 127 // [ENTRY_FRAME_LOCALS]
aoqi@0 128 // F1 [C_FRAME]
aoqi@0 129 // ...
aoqi@0 130
aoqi@0 131 // calculate frame size
aoqi@0 132
aoqi@0 133 // unaligned size of arguments
aoqi@0 134 __ sldi(r_argument_size_in_bytes,
aoqi@0 135 r_arg_argument_count, Interpreter::logStackElementSize);
aoqi@0 136 // arguments alignment (max 1 slot)
aoqi@0 137 // FIXME: use round_to() here
aoqi@0 138 __ andi_(r_frame_alignment_in_bytes, r_arg_argument_count, 1);
aoqi@0 139 __ sldi(r_frame_alignment_in_bytes,
aoqi@0 140 r_frame_alignment_in_bytes, Interpreter::logStackElementSize);
aoqi@0 141
aoqi@0 142 // size = unaligned size of arguments + top abi's size
aoqi@0 143 __ addi(r_frame_size, r_argument_size_in_bytes,
aoqi@0 144 frame::top_ijava_frame_abi_size);
aoqi@0 145 // size += arguments alignment
aoqi@0 146 __ add(r_frame_size,
aoqi@0 147 r_frame_size, r_frame_alignment_in_bytes);
aoqi@0 148 // size += size of call_stub locals
aoqi@0 149 __ addi(r_frame_size,
aoqi@0 150 r_frame_size, frame::entry_frame_locals_size);
aoqi@0 151
aoqi@0 152 // push ENTRY_FRAME
aoqi@0 153 __ push_frame(r_frame_size, r_temp);
aoqi@0 154
aoqi@0 155 // initialize call_stub locals (step 1)
aoqi@0 156 __ std(r_arg_call_wrapper_addr,
aoqi@0 157 _entry_frame_locals_neg(call_wrapper_address), r_entryframe_fp);
aoqi@0 158 __ std(r_arg_result_addr,
aoqi@0 159 _entry_frame_locals_neg(result_address), r_entryframe_fp);
aoqi@0 160 __ std(r_arg_result_type,
aoqi@0 161 _entry_frame_locals_neg(result_type), r_entryframe_fp);
aoqi@0 162 // we will save arguments_tos_address later
aoqi@0 163
aoqi@0 164
aoqi@0 165 BLOCK_COMMENT("Copy Java arguments");
aoqi@0 166 // copy Java arguments
aoqi@0 167
aoqi@0 168 // Calculate top_of_arguments_addr which will be R17_tos (not prepushed) later.
aoqi@0 169 // FIXME: why not simply use SP+frame::top_ijava_frame_size?
aoqi@0 170 __ addi(r_top_of_arguments_addr,
aoqi@0 171 R1_SP, frame::top_ijava_frame_abi_size);
aoqi@0 172 __ add(r_top_of_arguments_addr,
aoqi@0 173 r_top_of_arguments_addr, r_frame_alignment_in_bytes);
aoqi@0 174
aoqi@0 175 // any arguments to copy?
aoqi@0 176 __ cmpdi(CCR0, r_arg_argument_count, 0);
aoqi@0 177 __ beq(CCR0, arguments_copied);
aoqi@0 178
aoqi@0 179 // prepare loop and copy arguments in reverse order
aoqi@0 180 {
aoqi@0 181 // init CTR with arg_argument_count
aoqi@0 182 __ mtctr(r_arg_argument_count);
aoqi@0 183
aoqi@0 184 // let r_argumentcopy_addr point to last outgoing Java arguments P
aoqi@0 185 __ mr(r_argumentcopy_addr, r_top_of_arguments_addr);
aoqi@0 186
aoqi@0 187 // let r_argument_addr point to last incoming java argument
aoqi@0 188 __ add(r_argument_addr,
aoqi@0 189 r_arg_argument_addr, r_argument_size_in_bytes);
aoqi@0 190 __ addi(r_argument_addr, r_argument_addr, -BytesPerWord);
aoqi@0 191
aoqi@0 192 // now loop while CTR > 0 and copy arguments
aoqi@0 193 {
aoqi@0 194 Label next_argument;
aoqi@0 195 __ bind(next_argument);
aoqi@0 196
aoqi@0 197 __ ld(r_temp, 0, r_argument_addr);
aoqi@0 198 // argument_addr--;
aoqi@0 199 __ addi(r_argument_addr, r_argument_addr, -BytesPerWord);
aoqi@0 200 __ std(r_temp, 0, r_argumentcopy_addr);
aoqi@0 201 // argumentcopy_addr++;
aoqi@0 202 __ addi(r_argumentcopy_addr, r_argumentcopy_addr, BytesPerWord);
aoqi@0 203
aoqi@0 204 __ bdnz(next_argument);
aoqi@0 205 }
aoqi@0 206 }
aoqi@0 207
aoqi@0 208 // Arguments copied, continue.
aoqi@0 209 __ bind(arguments_copied);
aoqi@0 210 }
aoqi@0 211
aoqi@0 212 {
aoqi@0 213 BLOCK_COMMENT("Call frame manager or native entry.");
aoqi@0 214 // Call frame manager or native entry.
goetz@7222 215 Register r_new_arg_entry = R14;
aoqi@0 216 assert_different_registers(r_new_arg_entry, r_top_of_arguments_addr,
aoqi@0 217 r_arg_method, r_arg_thread);
aoqi@0 218
aoqi@0 219 __ mr(r_new_arg_entry, r_arg_entry);
aoqi@0 220
aoqi@0 221 // Register state on entry to frame manager / native entry:
aoqi@0 222 //
aoqi@0 223 // tos - intptr_t* sender tos (prepushed) Lesp = (SP) + copied_arguments_offset - 8
aoqi@0 224 // R19_method - Method
aoqi@0 225 // R16_thread - JavaThread*
aoqi@0 226
aoqi@0 227 // Tos must point to last argument - element_size.
aoqi@0 228 #ifdef CC_INTERP
aoqi@0 229 const Register tos = R17_tos;
aoqi@0 230 #else
aoqi@0 231 const Register tos = R15_esp;
aoqi@0 232 #endif
aoqi@0 233 __ addi(tos, r_top_of_arguments_addr, -Interpreter::stackElementSize);
aoqi@0 234
aoqi@0 235 // initialize call_stub locals (step 2)
aoqi@0 236 // now save tos as arguments_tos_address
aoqi@0 237 __ std(tos, _entry_frame_locals_neg(arguments_tos_address), r_entryframe_fp);
aoqi@0 238
aoqi@0 239 // load argument registers for call
aoqi@0 240 __ mr(R19_method, r_arg_method);
aoqi@0 241 __ mr(R16_thread, r_arg_thread);
aoqi@0 242 assert(tos != r_arg_method, "trashed r_arg_method");
aoqi@0 243 assert(tos != r_arg_thread && R19_method != r_arg_thread, "trashed r_arg_thread");
aoqi@0 244
aoqi@0 245 // Set R15_prev_state to 0 for simplifying checks in callee.
aoqi@0 246 #ifdef CC_INTERP
aoqi@0 247 __ li(R15_prev_state, 0);
aoqi@0 248 #else
aoqi@0 249 __ load_const_optimized(R25_templateTableBase, (address)Interpreter::dispatch_table((TosState)0), R11_scratch1);
aoqi@0 250 #endif
aoqi@0 251 // Stack on entry to frame manager / native entry:
aoqi@0 252 //
aoqi@0 253 // F0 [TOP_IJAVA_FRAME_ABI]
aoqi@0 254 // alignment (optional)
aoqi@0 255 // [outgoing Java arguments]
aoqi@0 256 // [ENTRY_FRAME_LOCALS]
aoqi@0 257 // F1 [C_FRAME]
aoqi@0 258 // ...
aoqi@0 259 //
aoqi@0 260
aoqi@0 261 // global toc register
aoqi@0 262 __ load_const(R29, MacroAssembler::global_toc(), R11_scratch1);
aoqi@0 263
aoqi@0 264 // Load narrow oop base.
aoqi@0 265 __ reinit_heapbase(R30, R11_scratch1);
aoqi@0 266
aoqi@0 267 // Remember the senderSP so we interpreter can pop c2i arguments off of the stack
aoqi@0 268 // when called via a c2i.
aoqi@0 269
aoqi@0 270 // Pass initial_caller_sp to framemanager.
aoqi@0 271 __ mr(R21_tmp1, R1_SP);
aoqi@0 272
aoqi@0 273 // Do a light-weight C-call here, r_new_arg_entry holds the address
aoqi@0 274 // of the interpreter entry point (frame manager or native entry)
aoqi@0 275 // and save runtime-value of LR in return_address.
aoqi@0 276 assert(r_new_arg_entry != tos && r_new_arg_entry != R19_method && r_new_arg_entry != R16_thread,
aoqi@0 277 "trashed r_new_arg_entry");
aoqi@0 278 return_address = __ call_stub(r_new_arg_entry);
aoqi@0 279 }
aoqi@0 280
aoqi@0 281 {
aoqi@0 282 BLOCK_COMMENT("Returned from frame manager or native entry.");
aoqi@0 283 // Returned from frame manager or native entry.
aoqi@0 284 // Now pop frame, process result, and return to caller.
aoqi@0 285
aoqi@0 286 // Stack on exit from frame manager / native entry:
aoqi@0 287 //
aoqi@0 288 // F0 [ABI]
aoqi@0 289 // ...
aoqi@0 290 // [ENTRY_FRAME_LOCALS]
aoqi@0 291 // F1 [C_FRAME]
aoqi@0 292 // ...
aoqi@0 293 //
aoqi@0 294 // Just pop the topmost frame ...
aoqi@0 295 //
aoqi@0 296
aoqi@0 297 Label ret_is_object;
aoqi@0 298 Label ret_is_long;
aoqi@0 299 Label ret_is_float;
aoqi@0 300 Label ret_is_double;
aoqi@0 301
aoqi@0 302 Register r_entryframe_fp = R30;
aoqi@0 303 Register r_lr = R7_ARG5;
aoqi@0 304 Register r_cr = R8_ARG6;
aoqi@0 305
aoqi@0 306 // Reload some volatile registers which we've spilled before the call
aoqi@0 307 // to frame manager / native entry.
aoqi@0 308 // Access all locals via frame pointer, because we know nothing about
aoqi@0 309 // the topmost frame's size.
aoqi@0 310 __ ld(r_entryframe_fp, _abi(callers_sp), R1_SP);
aoqi@0 311 assert_different_registers(r_entryframe_fp, R3_RET, r_arg_result_addr, r_arg_result_type, r_cr, r_lr);
aoqi@0 312 __ ld(r_arg_result_addr,
aoqi@0 313 _entry_frame_locals_neg(result_address), r_entryframe_fp);
aoqi@0 314 __ ld(r_arg_result_type,
aoqi@0 315 _entry_frame_locals_neg(result_type), r_entryframe_fp);
aoqi@0 316 __ ld(r_cr, _abi(cr), r_entryframe_fp);
aoqi@0 317 __ ld(r_lr, _abi(lr), r_entryframe_fp);
aoqi@0 318
aoqi@0 319 // pop frame and restore non-volatiles, LR and CR
aoqi@0 320 __ mr(R1_SP, r_entryframe_fp);
aoqi@0 321 __ mtcr(r_cr);
aoqi@0 322 __ mtlr(r_lr);
aoqi@0 323
aoqi@0 324 // Store result depending on type. Everything that is not
aoqi@0 325 // T_OBJECT, T_LONG, T_FLOAT, or T_DOUBLE is treated as T_INT.
aoqi@0 326 __ cmpwi(CCR0, r_arg_result_type, T_OBJECT);
aoqi@0 327 __ cmpwi(CCR1, r_arg_result_type, T_LONG);
aoqi@0 328 __ cmpwi(CCR5, r_arg_result_type, T_FLOAT);
aoqi@0 329 __ cmpwi(CCR6, r_arg_result_type, T_DOUBLE);
aoqi@0 330
aoqi@0 331 // restore non-volatile registers
aoqi@0 332 __ restore_nonvolatile_gprs(R1_SP, _spill_nonvolatiles_neg(r14));
aoqi@0 333
aoqi@0 334
aoqi@0 335 // Stack on exit from call_stub:
aoqi@0 336 //
aoqi@0 337 // 0 [C_FRAME]
aoqi@0 338 // ...
aoqi@0 339 //
aoqi@0 340 // no call_stub frames left.
aoqi@0 341
aoqi@0 342 // All non-volatiles have been restored at this point!!
aoqi@0 343 assert(R3_RET == R3, "R3_RET should be R3");
aoqi@0 344
aoqi@0 345 __ beq(CCR0, ret_is_object);
aoqi@0 346 __ beq(CCR1, ret_is_long);
aoqi@0 347 __ beq(CCR5, ret_is_float);
aoqi@0 348 __ beq(CCR6, ret_is_double);
aoqi@0 349
aoqi@0 350 // default:
aoqi@0 351 __ stw(R3_RET, 0, r_arg_result_addr);
aoqi@0 352 __ blr(); // return to caller
aoqi@0 353
aoqi@0 354 // case T_OBJECT:
aoqi@0 355 __ bind(ret_is_object);
aoqi@0 356 __ std(R3_RET, 0, r_arg_result_addr);
aoqi@0 357 __ blr(); // return to caller
aoqi@0 358
aoqi@0 359 // case T_LONG:
aoqi@0 360 __ bind(ret_is_long);
aoqi@0 361 __ std(R3_RET, 0, r_arg_result_addr);
aoqi@0 362 __ blr(); // return to caller
aoqi@0 363
aoqi@0 364 // case T_FLOAT:
aoqi@0 365 __ bind(ret_is_float);
aoqi@0 366 __ stfs(F1_RET, 0, r_arg_result_addr);
aoqi@0 367 __ blr(); // return to caller
aoqi@0 368
aoqi@0 369 // case T_DOUBLE:
aoqi@0 370 __ bind(ret_is_double);
aoqi@0 371 __ stfd(F1_RET, 0, r_arg_result_addr);
aoqi@0 372 __ blr(); // return to caller
aoqi@0 373 }
aoqi@0 374
aoqi@0 375 return start;
aoqi@0 376 }
aoqi@0 377
aoqi@0 378 // Return point for a Java call if there's an exception thrown in
aoqi@0 379 // Java code. The exception is caught and transformed into a
aoqi@0 380 // pending exception stored in JavaThread that can be tested from
aoqi@0 381 // within the VM.
aoqi@0 382 //
aoqi@0 383 address generate_catch_exception() {
aoqi@0 384 StubCodeMark mark(this, "StubRoutines", "catch_exception");
aoqi@0 385
aoqi@0 386 address start = __ pc();
aoqi@0 387
aoqi@0 388 // Registers alive
aoqi@0 389 //
aoqi@0 390 // R16_thread
aoqi@0 391 // R3_ARG1 - address of pending exception
aoqi@0 392 // R4_ARG2 - return address in call stub
aoqi@0 393
aoqi@0 394 const Register exception_file = R21_tmp1;
aoqi@0 395 const Register exception_line = R22_tmp2;
aoqi@0 396
aoqi@0 397 __ load_const(exception_file, (void*)__FILE__);
aoqi@0 398 __ load_const(exception_line, (void*)__LINE__);
aoqi@0 399
aoqi@0 400 __ std(R3_ARG1, thread_(pending_exception));
aoqi@0 401 // store into `char *'
aoqi@0 402 __ std(exception_file, thread_(exception_file));
aoqi@0 403 // store into `int'
aoqi@0 404 __ stw(exception_line, thread_(exception_line));
aoqi@0 405
aoqi@0 406 // complete return to VM
aoqi@0 407 assert(StubRoutines::_call_stub_return_address != NULL, "must have been generated before");
aoqi@0 408
aoqi@0 409 __ mtlr(R4_ARG2);
aoqi@0 410 // continue in call stub
aoqi@0 411 __ blr();
aoqi@0 412
aoqi@0 413 return start;
aoqi@0 414 }
aoqi@0 415
aoqi@0 416 // Continuation point for runtime calls returning with a pending
aoqi@0 417 // exception. The pending exception check happened in the runtime
aoqi@0 418 // or native call stub. The pending exception in Thread is
aoqi@0 419 // converted into a Java-level exception.
aoqi@0 420 //
aoqi@0 421 address generate_forward_exception() {
aoqi@0 422 StubCodeMark mark(this, "StubRoutines", "forward_exception");
aoqi@0 423 address start = __ pc();
aoqi@0 424
aoqi@0 425 #if !defined(PRODUCT)
aoqi@0 426 if (VerifyOops) {
aoqi@0 427 // Get pending exception oop.
aoqi@0 428 __ ld(R3_ARG1,
aoqi@0 429 in_bytes(Thread::pending_exception_offset()),
aoqi@0 430 R16_thread);
aoqi@0 431 // Make sure that this code is only executed if there is a pending exception.
aoqi@0 432 {
aoqi@0 433 Label L;
aoqi@0 434 __ cmpdi(CCR0, R3_ARG1, 0);
aoqi@0 435 __ bne(CCR0, L);
aoqi@0 436 __ stop("StubRoutines::forward exception: no pending exception (1)");
aoqi@0 437 __ bind(L);
aoqi@0 438 }
aoqi@0 439 __ verify_oop(R3_ARG1, "StubRoutines::forward exception: not an oop");
aoqi@0 440 }
aoqi@0 441 #endif
aoqi@0 442
aoqi@0 443 // Save LR/CR and copy exception pc (LR) into R4_ARG2.
aoqi@0 444 __ save_LR_CR(R4_ARG2);
aoqi@0 445 __ push_frame_reg_args(0, R0);
aoqi@0 446 // Find exception handler.
aoqi@0 447 __ call_VM_leaf(CAST_FROM_FN_PTR(address,
aoqi@0 448 SharedRuntime::exception_handler_for_return_address),
aoqi@0 449 R16_thread,
aoqi@0 450 R4_ARG2);
aoqi@0 451 // Copy handler's address.
aoqi@0 452 __ mtctr(R3_RET);
aoqi@0 453 __ pop_frame();
aoqi@0 454 __ restore_LR_CR(R0);
aoqi@0 455
aoqi@0 456 // Set up the arguments for the exception handler:
aoqi@0 457 // - R3_ARG1: exception oop
aoqi@0 458 // - R4_ARG2: exception pc.
aoqi@0 459
aoqi@0 460 // Load pending exception oop.
aoqi@0 461 __ ld(R3_ARG1,
aoqi@0 462 in_bytes(Thread::pending_exception_offset()),
aoqi@0 463 R16_thread);
aoqi@0 464
aoqi@0 465 // The exception pc is the return address in the caller.
aoqi@0 466 // Must load it into R4_ARG2.
aoqi@0 467 __ mflr(R4_ARG2);
aoqi@0 468
aoqi@0 469 #ifdef ASSERT
aoqi@0 470 // Make sure exception is set.
aoqi@0 471 {
aoqi@0 472 Label L;
aoqi@0 473 __ cmpdi(CCR0, R3_ARG1, 0);
aoqi@0 474 __ bne(CCR0, L);
aoqi@0 475 __ stop("StubRoutines::forward exception: no pending exception (2)");
aoqi@0 476 __ bind(L);
aoqi@0 477 }
aoqi@0 478 #endif
aoqi@0 479
aoqi@0 480 // Clear the pending exception.
aoqi@0 481 __ li(R0, 0);
aoqi@0 482 __ std(R0,
aoqi@0 483 in_bytes(Thread::pending_exception_offset()),
aoqi@0 484 R16_thread);
aoqi@0 485 // Jump to exception handler.
aoqi@0 486 __ bctr();
aoqi@0 487
aoqi@0 488 return start;
aoqi@0 489 }
aoqi@0 490
aoqi@0 491 #undef __
aoqi@0 492 #define __ masm->
aoqi@0 493 // Continuation point for throwing of implicit exceptions that are
aoqi@0 494 // not handled in the current activation. Fabricates an exception
aoqi@0 495 // oop and initiates normal exception dispatching in this
aoqi@0 496 // frame. Only callee-saved registers are preserved (through the
aoqi@0 497 // normal register window / RegisterMap handling). If the compiler
aoqi@0 498 // needs all registers to be preserved between the fault point and
aoqi@0 499 // the exception handler then it must assume responsibility for that
aoqi@0 500 // in AbstractCompiler::continuation_for_implicit_null_exception or
aoqi@0 501 // continuation_for_implicit_division_by_zero_exception. All other
aoqi@0 502 // implicit exceptions (e.g., NullPointerException or
aoqi@0 503 // AbstractMethodError on entry) are either at call sites or
aoqi@0 504 // otherwise assume that stack unwinding will be initiated, so
aoqi@0 505 // caller saved registers were assumed volatile in the compiler.
aoqi@0 506 //
aoqi@0 507 // Note that we generate only this stub into a RuntimeStub, because
aoqi@0 508 // it needs to be properly traversed and ignored during GC, so we
aoqi@0 509 // change the meaning of the "__" macro within this method.
aoqi@0 510 //
aoqi@0 511 // Note: the routine set_pc_not_at_call_for_caller in
aoqi@0 512 // SharedRuntime.cpp requires that this code be generated into a
aoqi@0 513 // RuntimeStub.
aoqi@0 514 address generate_throw_exception(const char* name, address runtime_entry, bool restore_saved_exception_pc,
aoqi@0 515 Register arg1 = noreg, Register arg2 = noreg) {
aoqi@0 516 CodeBuffer code(name, 1024 DEBUG_ONLY(+ 512), 0);
aoqi@0 517 MacroAssembler* masm = new MacroAssembler(&code);
aoqi@0 518
aoqi@0 519 OopMapSet* oop_maps = new OopMapSet();
aoqi@0 520 int frame_size_in_bytes = frame::abi_reg_args_size;
aoqi@0 521 OopMap* map = new OopMap(frame_size_in_bytes / sizeof(jint), 0);
aoqi@0 522
aoqi@0 523 StubCodeMark mark(this, "StubRoutines", "throw_exception");
aoqi@0 524
aoqi@0 525 address start = __ pc();
aoqi@0 526
aoqi@0 527 __ save_LR_CR(R11_scratch1);
aoqi@0 528
aoqi@0 529 // Push a frame.
aoqi@0 530 __ push_frame_reg_args(0, R11_scratch1);
aoqi@0 531
aoqi@0 532 address frame_complete_pc = __ pc();
aoqi@0 533
aoqi@0 534 if (restore_saved_exception_pc) {
aoqi@0 535 __ unimplemented("StubGenerator::throw_exception with restore_saved_exception_pc", 74);
aoqi@0 536 }
aoqi@0 537
aoqi@0 538 // Note that we always have a runtime stub frame on the top of
aoqi@0 539 // stack by this point. Remember the offset of the instruction
aoqi@0 540 // whose address will be moved to R11_scratch1.
aoqi@0 541 address gc_map_pc = __ get_PC_trash_LR(R11_scratch1);
aoqi@0 542
aoqi@0 543 __ set_last_Java_frame(/*sp*/R1_SP, /*pc*/R11_scratch1);
aoqi@0 544
aoqi@0 545 __ mr(R3_ARG1, R16_thread);
aoqi@0 546 if (arg1 != noreg) {
aoqi@0 547 __ mr(R4_ARG2, arg1);
aoqi@0 548 }
aoqi@0 549 if (arg2 != noreg) {
aoqi@0 550 __ mr(R5_ARG3, arg2);
aoqi@0 551 }
aoqi@0 552 #if defined(ABI_ELFv2)
aoqi@0 553 __ call_c(runtime_entry, relocInfo::none);
aoqi@0 554 #else
aoqi@0 555 __ call_c(CAST_FROM_FN_PTR(FunctionDescriptor*, runtime_entry), relocInfo::none);
aoqi@0 556 #endif
aoqi@0 557
aoqi@0 558 // Set an oopmap for the call site.
aoqi@0 559 oop_maps->add_gc_map((int)(gc_map_pc - start), map);
aoqi@0 560
aoqi@0 561 __ reset_last_Java_frame();
aoqi@0 562
aoqi@0 563 #ifdef ASSERT
aoqi@0 564 // Make sure that this code is only executed if there is a pending
aoqi@0 565 // exception.
aoqi@0 566 {
aoqi@0 567 Label L;
aoqi@0 568 __ ld(R0,
aoqi@0 569 in_bytes(Thread::pending_exception_offset()),
aoqi@0 570 R16_thread);
aoqi@0 571 __ cmpdi(CCR0, R0, 0);
aoqi@0 572 __ bne(CCR0, L);
aoqi@0 573 __ stop("StubRoutines::throw_exception: no pending exception");
aoqi@0 574 __ bind(L);
aoqi@0 575 }
aoqi@0 576 #endif
aoqi@0 577
aoqi@0 578 // Pop frame.
aoqi@0 579 __ pop_frame();
aoqi@0 580
aoqi@0 581 __ restore_LR_CR(R11_scratch1);
aoqi@0 582
aoqi@0 583 __ load_const(R11_scratch1, StubRoutines::forward_exception_entry());
aoqi@0 584 __ mtctr(R11_scratch1);
aoqi@0 585 __ bctr();
aoqi@0 586
aoqi@0 587 // Create runtime stub with OopMap.
aoqi@0 588 RuntimeStub* stub =
aoqi@0 589 RuntimeStub::new_runtime_stub(name, &code,
aoqi@0 590 /*frame_complete=*/ (int)(frame_complete_pc - start),
aoqi@0 591 frame_size_in_bytes/wordSize,
aoqi@0 592 oop_maps,
aoqi@0 593 false);
aoqi@0 594 return stub->entry_point();
aoqi@0 595 }
aoqi@0 596 #undef __
aoqi@0 597 #define __ _masm->
aoqi@0 598
aoqi@0 599 // Generate G1 pre-write barrier for array.
aoqi@0 600 //
aoqi@0 601 // Input:
aoqi@0 602 // from - register containing src address (only needed for spilling)
aoqi@0 603 // to - register containing starting address
aoqi@0 604 // count - register containing element count
aoqi@0 605 // tmp - scratch register
aoqi@0 606 //
aoqi@0 607 // Kills:
aoqi@0 608 // nothing
aoqi@0 609 //
aoqi@0 610 void gen_write_ref_array_pre_barrier(Register from, Register to, Register count, bool dest_uninitialized, Register Rtmp1) {
aoqi@0 611 BarrierSet* const bs = Universe::heap()->barrier_set();
aoqi@0 612 switch (bs->kind()) {
aoqi@0 613 case BarrierSet::G1SATBCT:
aoqi@0 614 case BarrierSet::G1SATBCTLogging:
aoqi@0 615 // With G1, don't generate the call if we statically know that the target in uninitialized
aoqi@0 616 if (!dest_uninitialized) {
aoqi@0 617 const int spill_slots = 4 * wordSize;
aoqi@0 618 const int frame_size = frame::abi_reg_args_size + spill_slots;
aoqi@0 619 Label filtered;
aoqi@0 620
aoqi@0 621 // Is marking active?
aoqi@0 622 if (in_bytes(PtrQueue::byte_width_of_active()) == 4) {
aoqi@0 623 __ lwz(Rtmp1, in_bytes(JavaThread::satb_mark_queue_offset() + PtrQueue::byte_offset_of_active()), R16_thread);
aoqi@0 624 } else {
aoqi@0 625 guarantee(in_bytes(PtrQueue::byte_width_of_active()) == 1, "Assumption");
aoqi@0 626 __ lbz(Rtmp1, in_bytes(JavaThread::satb_mark_queue_offset() + PtrQueue::byte_offset_of_active()), R16_thread);
aoqi@0 627 }
aoqi@0 628 __ cmpdi(CCR0, Rtmp1, 0);
aoqi@0 629 __ beq(CCR0, filtered);
aoqi@0 630
aoqi@0 631 __ save_LR_CR(R0);
aoqi@0 632 __ push_frame_reg_args(spill_slots, R0);
aoqi@0 633 __ std(from, frame_size - 1 * wordSize, R1_SP);
aoqi@0 634 __ std(to, frame_size - 2 * wordSize, R1_SP);
aoqi@0 635 __ std(count, frame_size - 3 * wordSize, R1_SP);
aoqi@0 636
aoqi@0 637 __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre), to, count);
aoqi@0 638
aoqi@0 639 __ ld(from, frame_size - 1 * wordSize, R1_SP);
aoqi@0 640 __ ld(to, frame_size - 2 * wordSize, R1_SP);
aoqi@0 641 __ ld(count, frame_size - 3 * wordSize, R1_SP);
aoqi@0 642 __ pop_frame();
aoqi@0 643 __ restore_LR_CR(R0);
aoqi@0 644
aoqi@0 645 __ bind(filtered);
aoqi@0 646 }
aoqi@0 647 break;
aoqi@0 648 case BarrierSet::CardTableModRef:
aoqi@0 649 case BarrierSet::CardTableExtension:
aoqi@0 650 case BarrierSet::ModRef:
aoqi@0 651 break;
aoqi@0 652 default:
aoqi@0 653 ShouldNotReachHere();
aoqi@0 654 }
aoqi@0 655 }
aoqi@0 656
aoqi@0 657 // Generate CMS/G1 post-write barrier for array.
aoqi@0 658 //
aoqi@0 659 // Input:
aoqi@0 660 // addr - register containing starting address
aoqi@0 661 // count - register containing element count
aoqi@0 662 // tmp - scratch register
aoqi@0 663 //
aoqi@0 664 // The input registers and R0 are overwritten.
aoqi@0 665 //
aoqi@0 666 void gen_write_ref_array_post_barrier(Register addr, Register count, Register tmp, bool branchToEnd) {
aoqi@0 667 BarrierSet* const bs = Universe::heap()->barrier_set();
aoqi@0 668
aoqi@0 669 switch (bs->kind()) {
aoqi@0 670 case BarrierSet::G1SATBCT:
aoqi@0 671 case BarrierSet::G1SATBCTLogging:
aoqi@0 672 {
aoqi@0 673 if (branchToEnd) {
aoqi@0 674 __ save_LR_CR(R0);
aoqi@0 675 // We need this frame only to spill LR.
aoqi@0 676 __ push_frame_reg_args(0, R0);
aoqi@0 677 __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post), addr, count);
aoqi@0 678 __ pop_frame();
aoqi@0 679 __ restore_LR_CR(R0);
aoqi@0 680 } else {
aoqi@0 681 // Tail call: fake call from stub caller by branching without linking.
aoqi@0 682 address entry_point = (address)CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post);
aoqi@0 683 __ mr_if_needed(R3_ARG1, addr);
aoqi@0 684 __ mr_if_needed(R4_ARG2, count);
aoqi@0 685 __ load_const(R11, entry_point, R0);
aoqi@0 686 __ call_c_and_return_to_caller(R11);
aoqi@0 687 }
aoqi@0 688 }
aoqi@0 689 break;
aoqi@0 690 case BarrierSet::CardTableModRef:
aoqi@0 691 case BarrierSet::CardTableExtension:
aoqi@0 692 {
aoqi@0 693 Label Lskip_loop, Lstore_loop;
aoqi@0 694 if (UseConcMarkSweepGC) {
aoqi@0 695 // TODO PPC port: contribute optimization / requires shared changes
aoqi@0 696 __ release();
aoqi@0 697 }
aoqi@0 698
aoqi@0 699 CardTableModRefBS* const ct = (CardTableModRefBS*)bs;
aoqi@0 700 assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
aoqi@0 701 assert_different_registers(addr, count, tmp);
aoqi@0 702
aoqi@0 703 __ sldi(count, count, LogBytesPerHeapOop);
aoqi@0 704 __ addi(count, count, -BytesPerHeapOop);
aoqi@0 705 __ add(count, addr, count);
aoqi@0 706 // Use two shifts to clear out those low order two bits! (Cannot opt. into 1.)
aoqi@0 707 __ srdi(addr, addr, CardTableModRefBS::card_shift);
aoqi@0 708 __ srdi(count, count, CardTableModRefBS::card_shift);
aoqi@0 709 __ subf(count, addr, count);
aoqi@0 710 assert_different_registers(R0, addr, count, tmp);
aoqi@0 711 __ load_const(tmp, (address)ct->byte_map_base);
aoqi@0 712 __ addic_(count, count, 1);
aoqi@0 713 __ beq(CCR0, Lskip_loop);
aoqi@0 714 __ li(R0, 0);
aoqi@0 715 __ mtctr(count);
aoqi@0 716 // Byte store loop
aoqi@0 717 __ bind(Lstore_loop);
aoqi@0 718 __ stbx(R0, tmp, addr);
aoqi@0 719 __ addi(addr, addr, 1);
aoqi@0 720 __ bdnz(Lstore_loop);
aoqi@0 721 __ bind(Lskip_loop);
aoqi@0 722
aoqi@0 723 if (!branchToEnd) __ blr();
aoqi@0 724 }
aoqi@0 725 break;
aoqi@0 726 case BarrierSet::ModRef:
aoqi@0 727 if (!branchToEnd) __ blr();
aoqi@0 728 break;
aoqi@0 729 default:
aoqi@0 730 ShouldNotReachHere();
aoqi@0 731 }
aoqi@0 732 }
aoqi@0 733
aoqi@0 734 // Support for void zero_words_aligned8(HeapWord* to, size_t count)
aoqi@0 735 //
aoqi@0 736 // Arguments:
aoqi@0 737 // to:
aoqi@0 738 // count:
aoqi@0 739 //
aoqi@0 740 // Destroys:
aoqi@0 741 //
aoqi@0 742 address generate_zero_words_aligned8() {
aoqi@0 743 StubCodeMark mark(this, "StubRoutines", "zero_words_aligned8");
aoqi@0 744
aoqi@0 745 // Implemented as in ClearArray.
aoqi@0 746 address start = __ function_entry();
aoqi@0 747
aoqi@0 748 Register base_ptr_reg = R3_ARG1; // tohw (needs to be 8b aligned)
aoqi@0 749 Register cnt_dwords_reg = R4_ARG2; // count (in dwords)
aoqi@0 750 Register tmp1_reg = R5_ARG3;
aoqi@0 751 Register tmp2_reg = R6_ARG4;
aoqi@0 752 Register zero_reg = R7_ARG5;
aoqi@0 753
aoqi@0 754 // Procedure for large arrays (uses data cache block zero instruction).
aoqi@0 755 Label dwloop, fast, fastloop, restloop, lastdword, done;
aoqi@0 756 int cl_size=VM_Version::get_cache_line_size(), cl_dwords=cl_size>>3, cl_dwordaddr_bits=exact_log2(cl_dwords);
aoqi@0 757 int min_dcbz=2; // Needs to be positive, apply dcbz only to at least min_dcbz cache lines.
aoqi@0 758
aoqi@0 759 // Clear up to 128byte boundary if long enough, dword_cnt=(16-(base>>3))%16.
aoqi@0 760 __ dcbtst(base_ptr_reg); // Indicate write access to first cache line ...
aoqi@0 761 __ andi(tmp2_reg, cnt_dwords_reg, 1); // to check if number of dwords is even.
aoqi@0 762 __ srdi_(tmp1_reg, cnt_dwords_reg, 1); // number of double dwords
aoqi@0 763 __ load_const_optimized(zero_reg, 0L); // Use as zero register.
aoqi@0 764
aoqi@0 765 __ cmpdi(CCR1, tmp2_reg, 0); // cnt_dwords even?
aoqi@0 766 __ beq(CCR0, lastdword); // size <= 1
aoqi@0 767 __ mtctr(tmp1_reg); // Speculatively preload counter for rest loop (>0).
aoqi@0 768 __ cmpdi(CCR0, cnt_dwords_reg, (min_dcbz+1)*cl_dwords-1); // Big enough to ensure >=min_dcbz cache lines are included?
aoqi@0 769 __ neg(tmp1_reg, base_ptr_reg); // bit 0..58: bogus, bit 57..60: (16-(base>>3))%16, bit 61..63: 000
aoqi@0 770
aoqi@0 771 __ blt(CCR0, restloop); // Too small. (<31=(2*cl_dwords)-1 is sufficient, but bigger performs better.)
aoqi@0 772 __ rldicl_(tmp1_reg, tmp1_reg, 64-3, 64-cl_dwordaddr_bits); // Extract number of dwords to 128byte boundary=(16-(base>>3))%16.
aoqi@0 773
aoqi@0 774 __ beq(CCR0, fast); // already 128byte aligned
aoqi@0 775 __ mtctr(tmp1_reg); // Set ctr to hit 128byte boundary (0<ctr<cnt).
aoqi@0 776 __ subf(cnt_dwords_reg, tmp1_reg, cnt_dwords_reg); // rest (>0 since size>=256-8)
aoqi@0 777
aoqi@0 778 // Clear in first cache line dword-by-dword if not already 128byte aligned.
aoqi@0 779 __ bind(dwloop);
aoqi@0 780 __ std(zero_reg, 0, base_ptr_reg); // Clear 8byte aligned block.
aoqi@0 781 __ addi(base_ptr_reg, base_ptr_reg, 8);
aoqi@0 782 __ bdnz(dwloop);
aoqi@0 783
aoqi@0 784 // clear 128byte blocks
aoqi@0 785 __ bind(fast);
aoqi@0 786 __ srdi(tmp1_reg, cnt_dwords_reg, cl_dwordaddr_bits); // loop count for 128byte loop (>0 since size>=256-8)
aoqi@0 787 __ andi(tmp2_reg, cnt_dwords_reg, 1); // to check if rest even
aoqi@0 788
aoqi@0 789 __ mtctr(tmp1_reg); // load counter
aoqi@0 790 __ cmpdi(CCR1, tmp2_reg, 0); // rest even?
aoqi@0 791 __ rldicl_(tmp1_reg, cnt_dwords_reg, 63, 65-cl_dwordaddr_bits); // rest in double dwords
aoqi@0 792
aoqi@0 793 __ bind(fastloop);
aoqi@0 794 __ dcbz(base_ptr_reg); // Clear 128byte aligned block.
aoqi@0 795 __ addi(base_ptr_reg, base_ptr_reg, cl_size);
aoqi@0 796 __ bdnz(fastloop);
aoqi@0 797
aoqi@0 798 //__ dcbtst(base_ptr_reg); // Indicate write access to last cache line.
aoqi@0 799 __ beq(CCR0, lastdword); // rest<=1
aoqi@0 800 __ mtctr(tmp1_reg); // load counter
aoqi@0 801
aoqi@0 802 // Clear rest.
aoqi@0 803 __ bind(restloop);
aoqi@0 804 __ std(zero_reg, 0, base_ptr_reg); // Clear 8byte aligned block.
aoqi@0 805 __ std(zero_reg, 8, base_ptr_reg); // Clear 8byte aligned block.
aoqi@0 806 __ addi(base_ptr_reg, base_ptr_reg, 16);
aoqi@0 807 __ bdnz(restloop);
aoqi@0 808
aoqi@0 809 __ bind(lastdword);
aoqi@0 810 __ beq(CCR1, done);
aoqi@0 811 __ std(zero_reg, 0, base_ptr_reg);
aoqi@0 812 __ bind(done);
aoqi@0 813 __ blr(); // return
aoqi@0 814
aoqi@0 815 return start;
aoqi@0 816 }
aoqi@0 817
aoqi@0 818 // The following routine generates a subroutine to throw an asynchronous
aoqi@0 819 // UnknownError when an unsafe access gets a fault that could not be
aoqi@0 820 // reasonably prevented by the programmer. (Example: SIGBUS/OBJERR.)
aoqi@0 821 //
aoqi@0 822 address generate_handler_for_unsafe_access() {
aoqi@0 823 StubCodeMark mark(this, "StubRoutines", "handler_for_unsafe_access");
aoqi@0 824 address start = __ function_entry();
aoqi@0 825 __ unimplemented("StubRoutines::handler_for_unsafe_access", 93);
aoqi@0 826 return start;
aoqi@0 827 }
aoqi@0 828
aoqi@0 829 #if !defined(PRODUCT)
aoqi@0 830 // Wrapper which calls oopDesc::is_oop_or_null()
aoqi@0 831 // Only called by MacroAssembler::verify_oop
aoqi@0 832 static void verify_oop_helper(const char* message, oop o) {
aoqi@0 833 if (!o->is_oop_or_null()) {
aoqi@0 834 fatal(message);
aoqi@0 835 }
aoqi@0 836 ++ StubRoutines::_verify_oop_count;
aoqi@0 837 }
aoqi@0 838 #endif
aoqi@0 839
aoqi@0 840 // Return address of code to be called from code generated by
aoqi@0 841 // MacroAssembler::verify_oop.
aoqi@0 842 //
aoqi@0 843 // Don't generate, rather use C++ code.
aoqi@0 844 address generate_verify_oop() {
aoqi@0 845 StubCodeMark mark(this, "StubRoutines", "verify_oop");
aoqi@0 846
aoqi@0 847 // this is actually a `FunctionDescriptor*'.
aoqi@0 848 address start = 0;
aoqi@0 849
aoqi@0 850 #if !defined(PRODUCT)
aoqi@0 851 start = CAST_FROM_FN_PTR(address, verify_oop_helper);
aoqi@0 852 #endif
aoqi@0 853
aoqi@0 854 return start;
aoqi@0 855 }
aoqi@0 856
aoqi@0 857 // Fairer handling of safepoints for native methods.
aoqi@0 858 //
aoqi@0 859 // Generate code which reads from the polling page. This special handling is needed as the
aoqi@0 860 // linux-ppc64 kernel before 2.6.6 doesn't set si_addr on some segfaults in 64bit mode
aoqi@0 861 // (cf. http://www.kernel.org/pub/linux/kernel/v2.6/ChangeLog-2.6.6), especially when we try
aoqi@0 862 // to read from the safepoint polling page.
aoqi@0 863 address generate_load_from_poll() {
aoqi@0 864 StubCodeMark mark(this, "StubRoutines", "generate_load_from_poll");
aoqi@0 865 address start = __ function_entry();
aoqi@0 866 __ unimplemented("StubRoutines::verify_oop", 95); // TODO PPC port
aoqi@0 867 return start;
aoqi@0 868 }
aoqi@0 869
aoqi@0 870 // -XX:+OptimizeFill : convert fill/copy loops into intrinsic
aoqi@0 871 //
aoqi@0 872 // The code is implemented(ported from sparc) as we believe it benefits JVM98, however
aoqi@0 873 // tracing(-XX:+TraceOptimizeFill) shows the intrinsic replacement doesn't happen at all!
aoqi@0 874 //
aoqi@0 875 // Source code in function is_range_check_if() shows that OptimizeFill relaxed the condition
aoqi@0 876 // for turning on loop predication optimization, and hence the behavior of "array range check"
aoqi@0 877 // and "loop invariant check" could be influenced, which potentially boosted JVM98.
aoqi@0 878 //
aoqi@0 879 // Generate stub for disjoint short fill. If "aligned" is true, the
aoqi@0 880 // "to" address is assumed to be heapword aligned.
aoqi@0 881 //
aoqi@0 882 // Arguments for generated stub:
aoqi@0 883 // to: R3_ARG1
aoqi@0 884 // value: R4_ARG2
aoqi@0 885 // count: R5_ARG3 treated as signed
aoqi@0 886 //
aoqi@0 887 address generate_fill(BasicType t, bool aligned, const char* name) {
aoqi@0 888 StubCodeMark mark(this, "StubRoutines", name);
aoqi@0 889 address start = __ function_entry();
aoqi@0 890
aoqi@0 891 const Register to = R3_ARG1; // source array address
aoqi@0 892 const Register value = R4_ARG2; // fill value
aoqi@0 893 const Register count = R5_ARG3; // elements count
aoqi@0 894 const Register temp = R6_ARG4; // temp register
aoqi@0 895
aoqi@0 896 //assert_clean_int(count, O3); // Make sure 'count' is clean int.
aoqi@0 897
aoqi@0 898 Label L_exit, L_skip_align1, L_skip_align2, L_fill_byte;
aoqi@0 899 Label L_fill_2_bytes, L_fill_4_bytes, L_fill_elements, L_fill_32_bytes;
aoqi@0 900
aoqi@0 901 int shift = -1;
aoqi@0 902 switch (t) {
aoqi@0 903 case T_BYTE:
aoqi@0 904 shift = 2;
aoqi@0 905 // Clone bytes (zero extend not needed because store instructions below ignore high order bytes).
aoqi@0 906 __ rldimi(value, value, 8, 48); // 8 bit -> 16 bit
aoqi@0 907 __ cmpdi(CCR0, count, 2<<shift); // Short arrays (< 8 bytes) fill by element.
aoqi@0 908 __ blt(CCR0, L_fill_elements);
aoqi@0 909 __ rldimi(value, value, 16, 32); // 16 bit -> 32 bit
aoqi@0 910 break;
aoqi@0 911 case T_SHORT:
aoqi@0 912 shift = 1;
aoqi@0 913 // Clone bytes (zero extend not needed because store instructions below ignore high order bytes).
aoqi@0 914 __ rldimi(value, value, 16, 32); // 16 bit -> 32 bit
aoqi@0 915 __ cmpdi(CCR0, count, 2<<shift); // Short arrays (< 8 bytes) fill by element.
aoqi@0 916 __ blt(CCR0, L_fill_elements);
aoqi@0 917 break;
aoqi@0 918 case T_INT:
aoqi@0 919 shift = 0;
aoqi@0 920 __ cmpdi(CCR0, count, 2<<shift); // Short arrays (< 8 bytes) fill by element.
aoqi@0 921 __ blt(CCR0, L_fill_4_bytes);
aoqi@0 922 break;
aoqi@0 923 default: ShouldNotReachHere();
aoqi@0 924 }
aoqi@0 925
aoqi@0 926 if (!aligned && (t == T_BYTE || t == T_SHORT)) {
aoqi@0 927 // Align source address at 4 bytes address boundary.
aoqi@0 928 if (t == T_BYTE) {
aoqi@0 929 // One byte misalignment happens only for byte arrays.
aoqi@0 930 __ andi_(temp, to, 1);
aoqi@0 931 __ beq(CCR0, L_skip_align1);
aoqi@0 932 __ stb(value, 0, to);
aoqi@0 933 __ addi(to, to, 1);
aoqi@0 934 __ addi(count, count, -1);
aoqi@0 935 __ bind(L_skip_align1);
aoqi@0 936 }
aoqi@0 937 // Two bytes misalignment happens only for byte and short (char) arrays.
aoqi@0 938 __ andi_(temp, to, 2);
aoqi@0 939 __ beq(CCR0, L_skip_align2);
aoqi@0 940 __ sth(value, 0, to);
aoqi@0 941 __ addi(to, to, 2);
aoqi@0 942 __ addi(count, count, -(1 << (shift - 1)));
aoqi@0 943 __ bind(L_skip_align2);
aoqi@0 944 }
aoqi@0 945
aoqi@0 946 if (!aligned) {
aoqi@0 947 // Align to 8 bytes, we know we are 4 byte aligned to start.
aoqi@0 948 __ andi_(temp, to, 7);
aoqi@0 949 __ beq(CCR0, L_fill_32_bytes);
aoqi@0 950 __ stw(value, 0, to);
aoqi@0 951 __ addi(to, to, 4);
aoqi@0 952 __ addi(count, count, -(1 << shift));
aoqi@0 953 __ bind(L_fill_32_bytes);
aoqi@0 954 }
aoqi@0 955
aoqi@0 956 __ li(temp, 8<<shift); // Prepare for 32 byte loop.
aoqi@0 957 // Clone bytes int->long as above.
aoqi@0 958 __ rldimi(value, value, 32, 0); // 32 bit -> 64 bit
aoqi@0 959
aoqi@0 960 Label L_check_fill_8_bytes;
aoqi@0 961 // Fill 32-byte chunks.
aoqi@0 962 __ subf_(count, temp, count);
aoqi@0 963 __ blt(CCR0, L_check_fill_8_bytes);
aoqi@0 964
aoqi@0 965 Label L_fill_32_bytes_loop;
aoqi@0 966 __ align(32);
aoqi@0 967 __ bind(L_fill_32_bytes_loop);
aoqi@0 968
aoqi@0 969 __ std(value, 0, to);
aoqi@0 970 __ std(value, 8, to);
aoqi@0 971 __ subf_(count, temp, count); // Update count.
aoqi@0 972 __ std(value, 16, to);
aoqi@0 973 __ std(value, 24, to);
aoqi@0 974
aoqi@0 975 __ addi(to, to, 32);
aoqi@0 976 __ bge(CCR0, L_fill_32_bytes_loop);
aoqi@0 977
aoqi@0 978 __ bind(L_check_fill_8_bytes);
aoqi@0 979 __ add_(count, temp, count);
aoqi@0 980 __ beq(CCR0, L_exit);
aoqi@0 981 __ addic_(count, count, -(2 << shift));
aoqi@0 982 __ blt(CCR0, L_fill_4_bytes);
aoqi@0 983
aoqi@0 984 //
aoqi@0 985 // Length is too short, just fill 8 bytes at a time.
aoqi@0 986 //
aoqi@0 987 Label L_fill_8_bytes_loop;
aoqi@0 988 __ bind(L_fill_8_bytes_loop);
aoqi@0 989 __ std(value, 0, to);
aoqi@0 990 __ addic_(count, count, -(2 << shift));
aoqi@0 991 __ addi(to, to, 8);
aoqi@0 992 __ bge(CCR0, L_fill_8_bytes_loop);
aoqi@0 993
aoqi@0 994 // Fill trailing 4 bytes.
aoqi@0 995 __ bind(L_fill_4_bytes);
aoqi@0 996 __ andi_(temp, count, 1<<shift);
aoqi@0 997 __ beq(CCR0, L_fill_2_bytes);
aoqi@0 998
aoqi@0 999 __ stw(value, 0, to);
aoqi@0 1000 if (t == T_BYTE || t == T_SHORT) {
aoqi@0 1001 __ addi(to, to, 4);
aoqi@0 1002 // Fill trailing 2 bytes.
aoqi@0 1003 __ bind(L_fill_2_bytes);
aoqi@0 1004 __ andi_(temp, count, 1<<(shift-1));
aoqi@0 1005 __ beq(CCR0, L_fill_byte);
aoqi@0 1006 __ sth(value, 0, to);
aoqi@0 1007 if (t == T_BYTE) {
aoqi@0 1008 __ addi(to, to, 2);
aoqi@0 1009 // Fill trailing byte.
aoqi@0 1010 __ bind(L_fill_byte);
aoqi@0 1011 __ andi_(count, count, 1);
aoqi@0 1012 __ beq(CCR0, L_exit);
aoqi@0 1013 __ stb(value, 0, to);
aoqi@0 1014 } else {
aoqi@0 1015 __ bind(L_fill_byte);
aoqi@0 1016 }
aoqi@0 1017 } else {
aoqi@0 1018 __ bind(L_fill_2_bytes);
aoqi@0 1019 }
aoqi@0 1020 __ bind(L_exit);
aoqi@0 1021 __ blr();
aoqi@0 1022
aoqi@0 1023 // Handle copies less than 8 bytes. Int is handled elsewhere.
aoqi@0 1024 if (t == T_BYTE) {
aoqi@0 1025 __ bind(L_fill_elements);
aoqi@0 1026 Label L_fill_2, L_fill_4;
aoqi@0 1027 __ andi_(temp, count, 1);
aoqi@0 1028 __ beq(CCR0, L_fill_2);
aoqi@0 1029 __ stb(value, 0, to);
aoqi@0 1030 __ addi(to, to, 1);
aoqi@0 1031 __ bind(L_fill_2);
aoqi@0 1032 __ andi_(temp, count, 2);
aoqi@0 1033 __ beq(CCR0, L_fill_4);
aoqi@0 1034 __ stb(value, 0, to);
aoqi@0 1035 __ stb(value, 0, to);
aoqi@0 1036 __ addi(to, to, 2);
aoqi@0 1037 __ bind(L_fill_4);
aoqi@0 1038 __ andi_(temp, count, 4);
aoqi@0 1039 __ beq(CCR0, L_exit);
aoqi@0 1040 __ stb(value, 0, to);
aoqi@0 1041 __ stb(value, 1, to);
aoqi@0 1042 __ stb(value, 2, to);
aoqi@0 1043 __ stb(value, 3, to);
aoqi@0 1044 __ blr();
aoqi@0 1045 }
aoqi@0 1046
aoqi@0 1047 if (t == T_SHORT) {
aoqi@0 1048 Label L_fill_2;
aoqi@0 1049 __ bind(L_fill_elements);
aoqi@0 1050 __ andi_(temp, count, 1);
aoqi@0 1051 __ beq(CCR0, L_fill_2);
aoqi@0 1052 __ sth(value, 0, to);
aoqi@0 1053 __ addi(to, to, 2);
aoqi@0 1054 __ bind(L_fill_2);
aoqi@0 1055 __ andi_(temp, count, 2);
aoqi@0 1056 __ beq(CCR0, L_exit);
aoqi@0 1057 __ sth(value, 0, to);
aoqi@0 1058 __ sth(value, 2, to);
aoqi@0 1059 __ blr();
aoqi@0 1060 }
aoqi@0 1061 return start;
aoqi@0 1062 }
aoqi@0 1063
aoqi@0 1064
aoqi@0 1065 // Generate overlap test for array copy stubs.
aoqi@0 1066 //
aoqi@0 1067 // Input:
aoqi@0 1068 // R3_ARG1 - from
aoqi@0 1069 // R4_ARG2 - to
aoqi@0 1070 // R5_ARG3 - element count
aoqi@0 1071 //
aoqi@0 1072 void array_overlap_test(address no_overlap_target, int log2_elem_size) {
aoqi@0 1073 Register tmp1 = R6_ARG4;
aoqi@0 1074 Register tmp2 = R7_ARG5;
aoqi@0 1075
aoqi@0 1076 Label l_overlap;
aoqi@0 1077 #ifdef ASSERT
aoqi@0 1078 __ srdi_(tmp2, R5_ARG3, 31);
aoqi@0 1079 __ asm_assert_eq("missing zero extend", 0xAFFE);
aoqi@0 1080 #endif
aoqi@0 1081
aoqi@0 1082 __ subf(tmp1, R3_ARG1, R4_ARG2); // distance in bytes
aoqi@0 1083 __ sldi(tmp2, R5_ARG3, log2_elem_size); // size in bytes
aoqi@0 1084 __ cmpld(CCR0, R3_ARG1, R4_ARG2); // Use unsigned comparison!
aoqi@0 1085 __ cmpld(CCR1, tmp1, tmp2);
aoqi@0 1086 __ crand(/*CCR0 lt*/0, /*CCR1 lt*/4+0, /*CCR0 lt*/0);
aoqi@0 1087 __ blt(CCR0, l_overlap); // Src before dst and distance smaller than size.
aoqi@0 1088
aoqi@0 1089 // need to copy forwards
aoqi@0 1090 if (__ is_within_range_of_b(no_overlap_target, __ pc())) {
aoqi@0 1091 __ b(no_overlap_target);
aoqi@0 1092 } else {
aoqi@0 1093 __ load_const(tmp1, no_overlap_target, tmp2);
aoqi@0 1094 __ mtctr(tmp1);
aoqi@0 1095 __ bctr();
aoqi@0 1096 }
aoqi@0 1097
aoqi@0 1098 __ bind(l_overlap);
aoqi@0 1099 // need to copy backwards
aoqi@0 1100 }
aoqi@0 1101
aoqi@0 1102 // The guideline in the implementations of generate_disjoint_xxx_copy
aoqi@0 1103 // (xxx=byte,short,int,long,oop) is to copy as many elements as possible with
aoqi@0 1104 // single instructions, but to avoid alignment interrupts (see subsequent
aoqi@0 1105 // comment). Furthermore, we try to minimize misaligned access, even
aoqi@0 1106 // though they cause no alignment interrupt.
aoqi@0 1107 //
aoqi@0 1108 // In Big-Endian mode, the PowerPC architecture requires implementations to
aoqi@0 1109 // handle automatically misaligned integer halfword and word accesses,
aoqi@0 1110 // word-aligned integer doubleword accesses, and word-aligned floating-point
aoqi@0 1111 // accesses. Other accesses may or may not generate an Alignment interrupt
aoqi@0 1112 // depending on the implementation.
aoqi@0 1113 // Alignment interrupt handling may require on the order of hundreds of cycles,
aoqi@0 1114 // so every effort should be made to avoid misaligned memory values.
aoqi@0 1115 //
aoqi@0 1116 //
aoqi@0 1117 // Generate stub for disjoint byte copy. If "aligned" is true, the
aoqi@0 1118 // "from" and "to" addresses are assumed to be heapword aligned.
aoqi@0 1119 //
aoqi@0 1120 // Arguments for generated stub:
aoqi@0 1121 // from: R3_ARG1
aoqi@0 1122 // to: R4_ARG2
aoqi@0 1123 // count: R5_ARG3 treated as signed
aoqi@0 1124 //
aoqi@0 1125 address generate_disjoint_byte_copy(bool aligned, const char * name) {
aoqi@0 1126 StubCodeMark mark(this, "StubRoutines", name);
aoqi@0 1127 address start = __ function_entry();
aoqi@0 1128
aoqi@0 1129 Register tmp1 = R6_ARG4;
aoqi@0 1130 Register tmp2 = R7_ARG5;
aoqi@0 1131 Register tmp3 = R8_ARG6;
aoqi@0 1132 Register tmp4 = R9_ARG7;
aoqi@0 1133
aoqi@0 1134
aoqi@0 1135 Label l_1, l_2, l_3, l_4, l_5, l_6, l_7, l_8, l_9;
aoqi@0 1136 // Don't try anything fancy if arrays don't have many elements.
aoqi@0 1137 __ li(tmp3, 0);
aoqi@0 1138 __ cmpwi(CCR0, R5_ARG3, 17);
aoqi@0 1139 __ ble(CCR0, l_6); // copy 4 at a time
aoqi@0 1140
aoqi@0 1141 if (!aligned) {
aoqi@0 1142 __ xorr(tmp1, R3_ARG1, R4_ARG2);
aoqi@0 1143 __ andi_(tmp1, tmp1, 3);
aoqi@0 1144 __ bne(CCR0, l_6); // If arrays don't have the same alignment mod 4, do 4 element copy.
aoqi@0 1145
aoqi@0 1146 // Copy elements if necessary to align to 4 bytes.
aoqi@0 1147 __ neg(tmp1, R3_ARG1); // Compute distance to alignment boundary.
aoqi@0 1148 __ andi_(tmp1, tmp1, 3);
aoqi@0 1149 __ beq(CCR0, l_2);
aoqi@0 1150
aoqi@0 1151 __ subf(R5_ARG3, tmp1, R5_ARG3);
aoqi@0 1152 __ bind(l_9);
aoqi@0 1153 __ lbz(tmp2, 0, R3_ARG1);
aoqi@0 1154 __ addic_(tmp1, tmp1, -1);
aoqi@0 1155 __ stb(tmp2, 0, R4_ARG2);
aoqi@0 1156 __ addi(R3_ARG1, R3_ARG1, 1);
aoqi@0 1157 __ addi(R4_ARG2, R4_ARG2, 1);
aoqi@0 1158 __ bne(CCR0, l_9);
aoqi@0 1159
aoqi@0 1160 __ bind(l_2);
aoqi@0 1161 }
aoqi@0 1162
aoqi@0 1163 // copy 8 elements at a time
aoqi@0 1164 __ xorr(tmp2, R3_ARG1, R4_ARG2); // skip if src & dest have differing alignment mod 8
aoqi@0 1165 __ andi_(tmp1, tmp2, 7);
aoqi@0 1166 __ bne(CCR0, l_7); // not same alignment -> to or from is aligned -> copy 8
aoqi@0 1167
aoqi@0 1168 // copy a 2-element word if necessary to align to 8 bytes
aoqi@0 1169 __ andi_(R0, R3_ARG1, 7);
aoqi@0 1170 __ beq(CCR0, l_7);
aoqi@0 1171
aoqi@0 1172 __ lwzx(tmp2, R3_ARG1, tmp3);
aoqi@0 1173 __ addi(R5_ARG3, R5_ARG3, -4);
aoqi@0 1174 __ stwx(tmp2, R4_ARG2, tmp3);
aoqi@0 1175 { // FasterArrayCopy
aoqi@0 1176 __ addi(R3_ARG1, R3_ARG1, 4);
aoqi@0 1177 __ addi(R4_ARG2, R4_ARG2, 4);
aoqi@0 1178 }
aoqi@0 1179 __ bind(l_7);
aoqi@0 1180
aoqi@0 1181 { // FasterArrayCopy
aoqi@0 1182 __ cmpwi(CCR0, R5_ARG3, 31);
aoqi@0 1183 __ ble(CCR0, l_6); // copy 2 at a time if less than 32 elements remain
aoqi@0 1184
aoqi@0 1185 __ srdi(tmp1, R5_ARG3, 5);
aoqi@0 1186 __ andi_(R5_ARG3, R5_ARG3, 31);
aoqi@0 1187 __ mtctr(tmp1);
aoqi@0 1188
aoqi@0 1189 __ bind(l_8);
aoqi@0 1190 // Use unrolled version for mass copying (copy 32 elements a time)
aoqi@0 1191 // Load feeding store gets zero latency on Power6, however not on Power5.
aoqi@0 1192 // Therefore, the following sequence is made for the good of both.
aoqi@0 1193 __ ld(tmp1, 0, R3_ARG1);
aoqi@0 1194 __ ld(tmp2, 8, R3_ARG1);
aoqi@0 1195 __ ld(tmp3, 16, R3_ARG1);
aoqi@0 1196 __ ld(tmp4, 24, R3_ARG1);
aoqi@0 1197 __ std(tmp1, 0, R4_ARG2);
aoqi@0 1198 __ std(tmp2, 8, R4_ARG2);
aoqi@0 1199 __ std(tmp3, 16, R4_ARG2);
aoqi@0 1200 __ std(tmp4, 24, R4_ARG2);
aoqi@0 1201 __ addi(R3_ARG1, R3_ARG1, 32);
aoqi@0 1202 __ addi(R4_ARG2, R4_ARG2, 32);
aoqi@0 1203 __ bdnz(l_8);
aoqi@0 1204 }
aoqi@0 1205
aoqi@0 1206 __ bind(l_6);
aoqi@0 1207
aoqi@0 1208 // copy 4 elements at a time
aoqi@0 1209 __ cmpwi(CCR0, R5_ARG3, 4);
aoqi@0 1210 __ blt(CCR0, l_1);
aoqi@0 1211 __ srdi(tmp1, R5_ARG3, 2);
aoqi@0 1212 __ mtctr(tmp1); // is > 0
aoqi@0 1213 __ andi_(R5_ARG3, R5_ARG3, 3);
aoqi@0 1214
aoqi@0 1215 { // FasterArrayCopy
aoqi@0 1216 __ addi(R3_ARG1, R3_ARG1, -4);
aoqi@0 1217 __ addi(R4_ARG2, R4_ARG2, -4);
aoqi@0 1218 __ bind(l_3);
aoqi@0 1219 __ lwzu(tmp2, 4, R3_ARG1);
aoqi@0 1220 __ stwu(tmp2, 4, R4_ARG2);
aoqi@0 1221 __ bdnz(l_3);
aoqi@0 1222 __ addi(R3_ARG1, R3_ARG1, 4);
aoqi@0 1223 __ addi(R4_ARG2, R4_ARG2, 4);
aoqi@0 1224 }
aoqi@0 1225
aoqi@0 1226 // do single element copy
aoqi@0 1227 __ bind(l_1);
aoqi@0 1228 __ cmpwi(CCR0, R5_ARG3, 0);
aoqi@0 1229 __ beq(CCR0, l_4);
aoqi@0 1230
aoqi@0 1231 { // FasterArrayCopy
aoqi@0 1232 __ mtctr(R5_ARG3);
aoqi@0 1233 __ addi(R3_ARG1, R3_ARG1, -1);
aoqi@0 1234 __ addi(R4_ARG2, R4_ARG2, -1);
aoqi@0 1235
aoqi@0 1236 __ bind(l_5);
aoqi@0 1237 __ lbzu(tmp2, 1, R3_ARG1);
aoqi@0 1238 __ stbu(tmp2, 1, R4_ARG2);
aoqi@0 1239 __ bdnz(l_5);
aoqi@0 1240 }
aoqi@0 1241
aoqi@0 1242 __ bind(l_4);
aoqi@0 1243 __ blr();
aoqi@0 1244
aoqi@0 1245 return start;
aoqi@0 1246 }
aoqi@0 1247
aoqi@0 1248 // Generate stub for conjoint byte copy. If "aligned" is true, the
aoqi@0 1249 // "from" and "to" addresses are assumed to be heapword aligned.
aoqi@0 1250 //
aoqi@0 1251 // Arguments for generated stub:
aoqi@0 1252 // from: R3_ARG1
aoqi@0 1253 // to: R4_ARG2
aoqi@0 1254 // count: R5_ARG3 treated as signed
aoqi@0 1255 //
aoqi@0 1256 address generate_conjoint_byte_copy(bool aligned, const char * name) {
aoqi@0 1257 StubCodeMark mark(this, "StubRoutines", name);
aoqi@0 1258 address start = __ function_entry();
aoqi@0 1259
aoqi@0 1260 Register tmp1 = R6_ARG4;
aoqi@0 1261 Register tmp2 = R7_ARG5;
aoqi@0 1262 Register tmp3 = R8_ARG6;
aoqi@0 1263
aoqi@0 1264 #if defined(ABI_ELFv2)
aoqi@0 1265 address nooverlap_target = aligned ?
aoqi@0 1266 StubRoutines::arrayof_jbyte_disjoint_arraycopy() :
aoqi@0 1267 StubRoutines::jbyte_disjoint_arraycopy();
aoqi@0 1268 #else
aoqi@0 1269 address nooverlap_target = aligned ?
aoqi@0 1270 ((FunctionDescriptor*)StubRoutines::arrayof_jbyte_disjoint_arraycopy())->entry() :
aoqi@0 1271 ((FunctionDescriptor*)StubRoutines::jbyte_disjoint_arraycopy())->entry();
aoqi@0 1272 #endif
aoqi@0 1273
aoqi@0 1274 array_overlap_test(nooverlap_target, 0);
aoqi@0 1275 // Do reverse copy. We assume the case of actual overlap is rare enough
aoqi@0 1276 // that we don't have to optimize it.
aoqi@0 1277 Label l_1, l_2;
aoqi@0 1278
aoqi@0 1279 __ b(l_2);
aoqi@0 1280 __ bind(l_1);
aoqi@0 1281 __ stbx(tmp1, R4_ARG2, R5_ARG3);
aoqi@0 1282 __ bind(l_2);
aoqi@0 1283 __ addic_(R5_ARG3, R5_ARG3, -1);
aoqi@0 1284 __ lbzx(tmp1, R3_ARG1, R5_ARG3);
aoqi@0 1285 __ bge(CCR0, l_1);
aoqi@0 1286
aoqi@0 1287 __ blr();
aoqi@0 1288
aoqi@0 1289 return start;
aoqi@0 1290 }
aoqi@0 1291
aoqi@0 1292 // Generate stub for disjoint short copy. If "aligned" is true, the
aoqi@0 1293 // "from" and "to" addresses are assumed to be heapword aligned.
aoqi@0 1294 //
aoqi@0 1295 // Arguments for generated stub:
aoqi@0 1296 // from: R3_ARG1
aoqi@0 1297 // to: R4_ARG2
aoqi@0 1298 // elm.count: R5_ARG3 treated as signed
aoqi@0 1299 //
aoqi@0 1300 // Strategy for aligned==true:
aoqi@0 1301 //
aoqi@0 1302 // If length <= 9:
aoqi@0 1303 // 1. copy 2 elements at a time (l_6)
aoqi@0 1304 // 2. copy last element if original element count was odd (l_1)
aoqi@0 1305 //
aoqi@0 1306 // If length > 9:
aoqi@0 1307 // 1. copy 4 elements at a time until less than 4 elements are left (l_7)
aoqi@0 1308 // 2. copy 2 elements at a time until less than 2 elements are left (l_6)
aoqi@0 1309 // 3. copy last element if one was left in step 2. (l_1)
aoqi@0 1310 //
aoqi@0 1311 //
aoqi@0 1312 // Strategy for aligned==false:
aoqi@0 1313 //
aoqi@0 1314 // If length <= 9: same as aligned==true case, but NOTE: load/stores
aoqi@0 1315 // can be unaligned (see comment below)
aoqi@0 1316 //
aoqi@0 1317 // If length > 9:
aoqi@0 1318 // 1. continue with step 6. if the alignment of from and to mod 4
aoqi@0 1319 // is different.
aoqi@0 1320 // 2. align from and to to 4 bytes by copying 1 element if necessary
aoqi@0 1321 // 3. at l_2 from and to are 4 byte aligned; continue with
aoqi@0 1322 // 5. if they cannot be aligned to 8 bytes because they have
aoqi@0 1323 // got different alignment mod 8.
aoqi@0 1324 // 4. at this point we know that both, from and to, have the same
aoqi@0 1325 // alignment mod 8, now copy one element if necessary to get
aoqi@0 1326 // 8 byte alignment of from and to.
aoqi@0 1327 // 5. copy 4 elements at a time until less than 4 elements are
aoqi@0 1328 // left; depending on step 3. all load/stores are aligned or
aoqi@0 1329 // either all loads or all stores are unaligned.
aoqi@0 1330 // 6. copy 2 elements at a time until less than 2 elements are
aoqi@0 1331 // left (l_6); arriving here from step 1., there is a chance
aoqi@0 1332 // that all accesses are unaligned.
aoqi@0 1333 // 7. copy last element if one was left in step 6. (l_1)
aoqi@0 1334 //
aoqi@0 1335 // There are unaligned data accesses using integer load/store
aoqi@0 1336 // instructions in this stub. POWER allows such accesses.
aoqi@0 1337 //
aoqi@0 1338 // According to the manuals (PowerISA_V2.06_PUBLIC, Book II,
aoqi@0 1339 // Chapter 2: Effect of Operand Placement on Performance) unaligned
aoqi@0 1340 // integer load/stores have good performance. Only unaligned
aoqi@0 1341 // floating point load/stores can have poor performance.
aoqi@0 1342 //
aoqi@0 1343 // TODO:
aoqi@0 1344 //
aoqi@0 1345 // 1. check if aligning the backbranch target of loops is beneficial
aoqi@0 1346 //
aoqi@0 1347 address generate_disjoint_short_copy(bool aligned, const char * name) {
aoqi@0 1348 StubCodeMark mark(this, "StubRoutines", name);
aoqi@0 1349
aoqi@0 1350 Register tmp1 = R6_ARG4;
aoqi@0 1351 Register tmp2 = R7_ARG5;
aoqi@0 1352 Register tmp3 = R8_ARG6;
aoqi@0 1353 Register tmp4 = R9_ARG7;
aoqi@0 1354
aoqi@0 1355 address start = __ function_entry();
aoqi@0 1356
aoqi@0 1357 Label l_1, l_2, l_3, l_4, l_5, l_6, l_7, l_8;
aoqi@0 1358 // don't try anything fancy if arrays don't have many elements
aoqi@0 1359 __ li(tmp3, 0);
aoqi@0 1360 __ cmpwi(CCR0, R5_ARG3, 9);
aoqi@0 1361 __ ble(CCR0, l_6); // copy 2 at a time
aoqi@0 1362
aoqi@0 1363 if (!aligned) {
aoqi@0 1364 __ xorr(tmp1, R3_ARG1, R4_ARG2);
aoqi@0 1365 __ andi_(tmp1, tmp1, 3);
aoqi@0 1366 __ bne(CCR0, l_6); // if arrays don't have the same alignment mod 4, do 2 element copy
aoqi@0 1367
aoqi@0 1368 // At this point it is guaranteed that both, from and to have the same alignment mod 4.
aoqi@0 1369
aoqi@0 1370 // Copy 1 element if necessary to align to 4 bytes.
aoqi@0 1371 __ andi_(tmp1, R3_ARG1, 3);
aoqi@0 1372 __ beq(CCR0, l_2);
aoqi@0 1373
aoqi@0 1374 __ lhz(tmp2, 0, R3_ARG1);
aoqi@0 1375 __ addi(R3_ARG1, R3_ARG1, 2);
aoqi@0 1376 __ sth(tmp2, 0, R4_ARG2);
aoqi@0 1377 __ addi(R4_ARG2, R4_ARG2, 2);
aoqi@0 1378 __ addi(R5_ARG3, R5_ARG3, -1);
aoqi@0 1379 __ bind(l_2);
aoqi@0 1380
aoqi@0 1381 // At this point the positions of both, from and to, are at least 4 byte aligned.
aoqi@0 1382
aoqi@0 1383 // Copy 4 elements at a time.
aoqi@0 1384 // Align to 8 bytes, but only if both, from and to, have same alignment mod 8.
aoqi@0 1385 __ xorr(tmp2, R3_ARG1, R4_ARG2);
aoqi@0 1386 __ andi_(tmp1, tmp2, 7);
aoqi@0 1387 __ bne(CCR0, l_7); // not same alignment mod 8 -> copy 4, either from or to will be unaligned
aoqi@0 1388
aoqi@0 1389 // Copy a 2-element word if necessary to align to 8 bytes.
aoqi@0 1390 __ andi_(R0, R3_ARG1, 7);
aoqi@0 1391 __ beq(CCR0, l_7);
aoqi@0 1392
aoqi@0 1393 __ lwzx(tmp2, R3_ARG1, tmp3);
aoqi@0 1394 __ addi(R5_ARG3, R5_ARG3, -2);
aoqi@0 1395 __ stwx(tmp2, R4_ARG2, tmp3);
aoqi@0 1396 { // FasterArrayCopy
aoqi@0 1397 __ addi(R3_ARG1, R3_ARG1, 4);
aoqi@0 1398 __ addi(R4_ARG2, R4_ARG2, 4);
aoqi@0 1399 }
aoqi@0 1400 }
aoqi@0 1401
aoqi@0 1402 __ bind(l_7);
aoqi@0 1403
aoqi@0 1404 // Copy 4 elements at a time; either the loads or the stores can
aoqi@0 1405 // be unaligned if aligned == false.
aoqi@0 1406
aoqi@0 1407 { // FasterArrayCopy
aoqi@0 1408 __ cmpwi(CCR0, R5_ARG3, 15);
aoqi@0 1409 __ ble(CCR0, l_6); // copy 2 at a time if less than 16 elements remain
aoqi@0 1410
aoqi@0 1411 __ srdi(tmp1, R5_ARG3, 4);
aoqi@0 1412 __ andi_(R5_ARG3, R5_ARG3, 15);
aoqi@0 1413 __ mtctr(tmp1);
aoqi@0 1414
aoqi@0 1415 __ bind(l_8);
aoqi@0 1416 // Use unrolled version for mass copying (copy 16 elements a time).
aoqi@0 1417 // Load feeding store gets zero latency on Power6, however not on Power5.
aoqi@0 1418 // Therefore, the following sequence is made for the good of both.
aoqi@0 1419 __ ld(tmp1, 0, R3_ARG1);
aoqi@0 1420 __ ld(tmp2, 8, R3_ARG1);
aoqi@0 1421 __ ld(tmp3, 16, R3_ARG1);
aoqi@0 1422 __ ld(tmp4, 24, R3_ARG1);
aoqi@0 1423 __ std(tmp1, 0, R4_ARG2);
aoqi@0 1424 __ std(tmp2, 8, R4_ARG2);
aoqi@0 1425 __ std(tmp3, 16, R4_ARG2);
aoqi@0 1426 __ std(tmp4, 24, R4_ARG2);
aoqi@0 1427 __ addi(R3_ARG1, R3_ARG1, 32);
aoqi@0 1428 __ addi(R4_ARG2, R4_ARG2, 32);
aoqi@0 1429 __ bdnz(l_8);
aoqi@0 1430 }
aoqi@0 1431 __ bind(l_6);
aoqi@0 1432
aoqi@0 1433 // copy 2 elements at a time
aoqi@0 1434 { // FasterArrayCopy
aoqi@0 1435 __ cmpwi(CCR0, R5_ARG3, 2);
aoqi@0 1436 __ blt(CCR0, l_1);
aoqi@0 1437 __ srdi(tmp1, R5_ARG3, 1);
aoqi@0 1438 __ andi_(R5_ARG3, R5_ARG3, 1);
aoqi@0 1439
aoqi@0 1440 __ addi(R3_ARG1, R3_ARG1, -4);
aoqi@0 1441 __ addi(R4_ARG2, R4_ARG2, -4);
aoqi@0 1442 __ mtctr(tmp1);
aoqi@0 1443
aoqi@0 1444 __ bind(l_3);
aoqi@0 1445 __ lwzu(tmp2, 4, R3_ARG1);
aoqi@0 1446 __ stwu(tmp2, 4, R4_ARG2);
aoqi@0 1447 __ bdnz(l_3);
aoqi@0 1448
aoqi@0 1449 __ addi(R3_ARG1, R3_ARG1, 4);
aoqi@0 1450 __ addi(R4_ARG2, R4_ARG2, 4);
aoqi@0 1451 }
aoqi@0 1452
aoqi@0 1453 // do single element copy
aoqi@0 1454 __ bind(l_1);
aoqi@0 1455 __ cmpwi(CCR0, R5_ARG3, 0);
aoqi@0 1456 __ beq(CCR0, l_4);
aoqi@0 1457
aoqi@0 1458 { // FasterArrayCopy
aoqi@0 1459 __ mtctr(R5_ARG3);
aoqi@0 1460 __ addi(R3_ARG1, R3_ARG1, -2);
aoqi@0 1461 __ addi(R4_ARG2, R4_ARG2, -2);
aoqi@0 1462
aoqi@0 1463 __ bind(l_5);
aoqi@0 1464 __ lhzu(tmp2, 2, R3_ARG1);
aoqi@0 1465 __ sthu(tmp2, 2, R4_ARG2);
aoqi@0 1466 __ bdnz(l_5);
aoqi@0 1467 }
aoqi@0 1468 __ bind(l_4);
aoqi@0 1469 __ blr();
aoqi@0 1470
aoqi@0 1471 return start;
aoqi@0 1472 }
aoqi@0 1473
aoqi@0 1474 // Generate stub for conjoint short copy. If "aligned" is true, the
aoqi@0 1475 // "from" and "to" addresses are assumed to be heapword aligned.
aoqi@0 1476 //
aoqi@0 1477 // Arguments for generated stub:
aoqi@0 1478 // from: R3_ARG1
aoqi@0 1479 // to: R4_ARG2
aoqi@0 1480 // count: R5_ARG3 treated as signed
aoqi@0 1481 //
aoqi@0 1482 address generate_conjoint_short_copy(bool aligned, const char * name) {
aoqi@0 1483 StubCodeMark mark(this, "StubRoutines", name);
aoqi@0 1484 address start = __ function_entry();
aoqi@0 1485
aoqi@0 1486 Register tmp1 = R6_ARG4;
aoqi@0 1487 Register tmp2 = R7_ARG5;
aoqi@0 1488 Register tmp3 = R8_ARG6;
aoqi@0 1489
aoqi@0 1490 #if defined(ABI_ELFv2)
aoqi@0 1491 address nooverlap_target = aligned ?
aoqi@0 1492 StubRoutines::arrayof_jshort_disjoint_arraycopy() :
aoqi@0 1493 StubRoutines::jshort_disjoint_arraycopy();
aoqi@0 1494 #else
aoqi@0 1495 address nooverlap_target = aligned ?
aoqi@0 1496 ((FunctionDescriptor*)StubRoutines::arrayof_jshort_disjoint_arraycopy())->entry() :
aoqi@0 1497 ((FunctionDescriptor*)StubRoutines::jshort_disjoint_arraycopy())->entry();
aoqi@0 1498 #endif
aoqi@0 1499
aoqi@0 1500 array_overlap_test(nooverlap_target, 1);
aoqi@0 1501
aoqi@0 1502 Label l_1, l_2;
aoqi@0 1503 __ sldi(tmp1, R5_ARG3, 1);
aoqi@0 1504 __ b(l_2);
aoqi@0 1505 __ bind(l_1);
aoqi@0 1506 __ sthx(tmp2, R4_ARG2, tmp1);
aoqi@0 1507 __ bind(l_2);
aoqi@0 1508 __ addic_(tmp1, tmp1, -2);
aoqi@0 1509 __ lhzx(tmp2, R3_ARG1, tmp1);
aoqi@0 1510 __ bge(CCR0, l_1);
aoqi@0 1511
aoqi@0 1512 __ blr();
aoqi@0 1513
aoqi@0 1514 return start;
aoqi@0 1515 }
aoqi@0 1516
aoqi@0 1517 // Generate core code for disjoint int copy (and oop copy on 32-bit). If "aligned"
aoqi@0 1518 // is true, the "from" and "to" addresses are assumed to be heapword aligned.
aoqi@0 1519 //
aoqi@0 1520 // Arguments:
aoqi@0 1521 // from: R3_ARG1
aoqi@0 1522 // to: R4_ARG2
aoqi@0 1523 // count: R5_ARG3 treated as signed
aoqi@0 1524 //
aoqi@0 1525 void generate_disjoint_int_copy_core(bool aligned) {
aoqi@0 1526 Register tmp1 = R6_ARG4;
aoqi@0 1527 Register tmp2 = R7_ARG5;
aoqi@0 1528 Register tmp3 = R8_ARG6;
aoqi@0 1529 Register tmp4 = R0;
aoqi@0 1530
aoqi@0 1531 Label l_1, l_2, l_3, l_4, l_5, l_6;
aoqi@0 1532 // for short arrays, just do single element copy
aoqi@0 1533 __ li(tmp3, 0);
aoqi@0 1534 __ cmpwi(CCR0, R5_ARG3, 5);
aoqi@0 1535 __ ble(CCR0, l_2);
aoqi@0 1536
aoqi@0 1537 if (!aligned) {
aoqi@0 1538 // check if arrays have same alignment mod 8.
aoqi@0 1539 __ xorr(tmp1, R3_ARG1, R4_ARG2);
aoqi@0 1540 __ andi_(R0, tmp1, 7);
aoqi@0 1541 // Not the same alignment, but ld and std just need to be 4 byte aligned.
aoqi@0 1542 __ bne(CCR0, l_4); // to OR from is 8 byte aligned -> copy 2 at a time
aoqi@0 1543
aoqi@0 1544 // copy 1 element to align to and from on an 8 byte boundary
aoqi@0 1545 __ andi_(R0, R3_ARG1, 7);
aoqi@0 1546 __ beq(CCR0, l_4);
aoqi@0 1547
aoqi@0 1548 __ lwzx(tmp2, R3_ARG1, tmp3);
aoqi@0 1549 __ addi(R5_ARG3, R5_ARG3, -1);
aoqi@0 1550 __ stwx(tmp2, R4_ARG2, tmp3);
aoqi@0 1551 { // FasterArrayCopy
aoqi@0 1552 __ addi(R3_ARG1, R3_ARG1, 4);
aoqi@0 1553 __ addi(R4_ARG2, R4_ARG2, 4);
aoqi@0 1554 }
aoqi@0 1555 __ bind(l_4);
aoqi@0 1556 }
aoqi@0 1557
aoqi@0 1558 { // FasterArrayCopy
aoqi@0 1559 __ cmpwi(CCR0, R5_ARG3, 7);
aoqi@0 1560 __ ble(CCR0, l_2); // copy 1 at a time if less than 8 elements remain
aoqi@0 1561
aoqi@0 1562 __ srdi(tmp1, R5_ARG3, 3);
aoqi@0 1563 __ andi_(R5_ARG3, R5_ARG3, 7);
aoqi@0 1564 __ mtctr(tmp1);
aoqi@0 1565
aoqi@0 1566 __ bind(l_6);
aoqi@0 1567 // Use unrolled version for mass copying (copy 8 elements a time).
aoqi@0 1568 // Load feeding store gets zero latency on power6, however not on power 5.
aoqi@0 1569 // Therefore, the following sequence is made for the good of both.
aoqi@0 1570 __ ld(tmp1, 0, R3_ARG1);
aoqi@0 1571 __ ld(tmp2, 8, R3_ARG1);
aoqi@0 1572 __ ld(tmp3, 16, R3_ARG1);
aoqi@0 1573 __ ld(tmp4, 24, R3_ARG1);
aoqi@0 1574 __ std(tmp1, 0, R4_ARG2);
aoqi@0 1575 __ std(tmp2, 8, R4_ARG2);
aoqi@0 1576 __ std(tmp3, 16, R4_ARG2);
aoqi@0 1577 __ std(tmp4, 24, R4_ARG2);
aoqi@0 1578 __ addi(R3_ARG1, R3_ARG1, 32);
aoqi@0 1579 __ addi(R4_ARG2, R4_ARG2, 32);
aoqi@0 1580 __ bdnz(l_6);
aoqi@0 1581 }
aoqi@0 1582
aoqi@0 1583 // copy 1 element at a time
aoqi@0 1584 __ bind(l_2);
aoqi@0 1585 __ cmpwi(CCR0, R5_ARG3, 0);
aoqi@0 1586 __ beq(CCR0, l_1);
aoqi@0 1587
aoqi@0 1588 { // FasterArrayCopy
aoqi@0 1589 __ mtctr(R5_ARG3);
aoqi@0 1590 __ addi(R3_ARG1, R3_ARG1, -4);
aoqi@0 1591 __ addi(R4_ARG2, R4_ARG2, -4);
aoqi@0 1592
aoqi@0 1593 __ bind(l_3);
aoqi@0 1594 __ lwzu(tmp2, 4, R3_ARG1);
aoqi@0 1595 __ stwu(tmp2, 4, R4_ARG2);
aoqi@0 1596 __ bdnz(l_3);
aoqi@0 1597 }
aoqi@0 1598
aoqi@0 1599 __ bind(l_1);
aoqi@0 1600 return;
aoqi@0 1601 }
aoqi@0 1602
aoqi@0 1603 // Generate stub for disjoint int copy. If "aligned" is true, the
aoqi@0 1604 // "from" and "to" addresses are assumed to be heapword aligned.
aoqi@0 1605 //
aoqi@0 1606 // Arguments for generated stub:
aoqi@0 1607 // from: R3_ARG1
aoqi@0 1608 // to: R4_ARG2
aoqi@0 1609 // count: R5_ARG3 treated as signed
aoqi@0 1610 //
aoqi@0 1611 address generate_disjoint_int_copy(bool aligned, const char * name) {
aoqi@0 1612 StubCodeMark mark(this, "StubRoutines", name);
aoqi@0 1613 address start = __ function_entry();
aoqi@0 1614 generate_disjoint_int_copy_core(aligned);
aoqi@0 1615 __ blr();
aoqi@0 1616 return start;
aoqi@0 1617 }
aoqi@0 1618
aoqi@0 1619 // Generate core code for conjoint int copy (and oop copy on
aoqi@0 1620 // 32-bit). If "aligned" is true, the "from" and "to" addresses
aoqi@0 1621 // are assumed to be heapword aligned.
aoqi@0 1622 //
aoqi@0 1623 // Arguments:
aoqi@0 1624 // from: R3_ARG1
aoqi@0 1625 // to: R4_ARG2
aoqi@0 1626 // count: R5_ARG3 treated as signed
aoqi@0 1627 //
aoqi@0 1628 void generate_conjoint_int_copy_core(bool aligned) {
aoqi@0 1629 // Do reverse copy. We assume the case of actual overlap is rare enough
aoqi@0 1630 // that we don't have to optimize it.
aoqi@0 1631
aoqi@0 1632 Label l_1, l_2, l_3, l_4, l_5, l_6;
aoqi@0 1633
aoqi@0 1634 Register tmp1 = R6_ARG4;
aoqi@0 1635 Register tmp2 = R7_ARG5;
aoqi@0 1636 Register tmp3 = R8_ARG6;
aoqi@0 1637 Register tmp4 = R0;
aoqi@0 1638
aoqi@0 1639 { // FasterArrayCopy
aoqi@0 1640 __ cmpwi(CCR0, R5_ARG3, 0);
aoqi@0 1641 __ beq(CCR0, l_6);
aoqi@0 1642
aoqi@0 1643 __ sldi(R5_ARG3, R5_ARG3, 2);
aoqi@0 1644 __ add(R3_ARG1, R3_ARG1, R5_ARG3);
aoqi@0 1645 __ add(R4_ARG2, R4_ARG2, R5_ARG3);
aoqi@0 1646 __ srdi(R5_ARG3, R5_ARG3, 2);
aoqi@0 1647
aoqi@0 1648 __ cmpwi(CCR0, R5_ARG3, 7);
aoqi@0 1649 __ ble(CCR0, l_5); // copy 1 at a time if less than 8 elements remain
aoqi@0 1650
aoqi@0 1651 __ srdi(tmp1, R5_ARG3, 3);
aoqi@0 1652 __ andi(R5_ARG3, R5_ARG3, 7);
aoqi@0 1653 __ mtctr(tmp1);
aoqi@0 1654
aoqi@0 1655 __ bind(l_4);
aoqi@0 1656 // Use unrolled version for mass copying (copy 4 elements a time).
aoqi@0 1657 // Load feeding store gets zero latency on Power6, however not on Power5.
aoqi@0 1658 // Therefore, the following sequence is made for the good of both.
aoqi@0 1659 __ addi(R3_ARG1, R3_ARG1, -32);
aoqi@0 1660 __ addi(R4_ARG2, R4_ARG2, -32);
aoqi@0 1661 __ ld(tmp4, 24, R3_ARG1);
aoqi@0 1662 __ ld(tmp3, 16, R3_ARG1);
aoqi@0 1663 __ ld(tmp2, 8, R3_ARG1);
aoqi@0 1664 __ ld(tmp1, 0, R3_ARG1);
aoqi@0 1665 __ std(tmp4, 24, R4_ARG2);
aoqi@0 1666 __ std(tmp3, 16, R4_ARG2);
aoqi@0 1667 __ std(tmp2, 8, R4_ARG2);
aoqi@0 1668 __ std(tmp1, 0, R4_ARG2);
aoqi@0 1669 __ bdnz(l_4);
aoqi@0 1670
aoqi@0 1671 __ cmpwi(CCR0, R5_ARG3, 0);
aoqi@0 1672 __ beq(CCR0, l_6);
aoqi@0 1673
aoqi@0 1674 __ bind(l_5);
aoqi@0 1675 __ mtctr(R5_ARG3);
aoqi@0 1676 __ bind(l_3);
aoqi@0 1677 __ lwz(R0, -4, R3_ARG1);
aoqi@0 1678 __ stw(R0, -4, R4_ARG2);
aoqi@0 1679 __ addi(R3_ARG1, R3_ARG1, -4);
aoqi@0 1680 __ addi(R4_ARG2, R4_ARG2, -4);
aoqi@0 1681 __ bdnz(l_3);
aoqi@0 1682
aoqi@0 1683 __ bind(l_6);
aoqi@0 1684 }
aoqi@0 1685 }
aoqi@0 1686
aoqi@0 1687 // Generate stub for conjoint int copy. If "aligned" is true, the
aoqi@0 1688 // "from" and "to" addresses are assumed to be heapword aligned.
aoqi@0 1689 //
aoqi@0 1690 // Arguments for generated stub:
aoqi@0 1691 // from: R3_ARG1
aoqi@0 1692 // to: R4_ARG2
aoqi@0 1693 // count: R5_ARG3 treated as signed
aoqi@0 1694 //
aoqi@0 1695 address generate_conjoint_int_copy(bool aligned, const char * name) {
aoqi@0 1696 StubCodeMark mark(this, "StubRoutines", name);
aoqi@0 1697 address start = __ function_entry();
aoqi@0 1698
aoqi@0 1699 #if defined(ABI_ELFv2)
aoqi@0 1700 address nooverlap_target = aligned ?
aoqi@0 1701 StubRoutines::arrayof_jint_disjoint_arraycopy() :
aoqi@0 1702 StubRoutines::jint_disjoint_arraycopy();
aoqi@0 1703 #else
aoqi@0 1704 address nooverlap_target = aligned ?
aoqi@0 1705 ((FunctionDescriptor*)StubRoutines::arrayof_jint_disjoint_arraycopy())->entry() :
aoqi@0 1706 ((FunctionDescriptor*)StubRoutines::jint_disjoint_arraycopy())->entry();
aoqi@0 1707 #endif
aoqi@0 1708
aoqi@0 1709 array_overlap_test(nooverlap_target, 2);
aoqi@0 1710
aoqi@0 1711 generate_conjoint_int_copy_core(aligned);
aoqi@0 1712
aoqi@0 1713 __ blr();
aoqi@0 1714
aoqi@0 1715 return start;
aoqi@0 1716 }
aoqi@0 1717
aoqi@0 1718 // Generate core code for disjoint long copy (and oop copy on
aoqi@0 1719 // 64-bit). If "aligned" is true, the "from" and "to" addresses
aoqi@0 1720 // are assumed to be heapword aligned.
aoqi@0 1721 //
aoqi@0 1722 // Arguments:
aoqi@0 1723 // from: R3_ARG1
aoqi@0 1724 // to: R4_ARG2
aoqi@0 1725 // count: R5_ARG3 treated as signed
aoqi@0 1726 //
aoqi@0 1727 void generate_disjoint_long_copy_core(bool aligned) {
aoqi@0 1728 Register tmp1 = R6_ARG4;
aoqi@0 1729 Register tmp2 = R7_ARG5;
aoqi@0 1730 Register tmp3 = R8_ARG6;
aoqi@0 1731 Register tmp4 = R0;
aoqi@0 1732
aoqi@0 1733 Label l_1, l_2, l_3, l_4;
aoqi@0 1734
aoqi@0 1735 { // FasterArrayCopy
aoqi@0 1736 __ cmpwi(CCR0, R5_ARG3, 3);
aoqi@0 1737 __ ble(CCR0, l_3); // copy 1 at a time if less than 4 elements remain
aoqi@0 1738
aoqi@0 1739 __ srdi(tmp1, R5_ARG3, 2);
aoqi@0 1740 __ andi_(R5_ARG3, R5_ARG3, 3);
aoqi@0 1741 __ mtctr(tmp1);
aoqi@0 1742
aoqi@0 1743 __ bind(l_4);
aoqi@0 1744 // Use unrolled version for mass copying (copy 4 elements a time).
aoqi@0 1745 // Load feeding store gets zero latency on Power6, however not on Power5.
aoqi@0 1746 // Therefore, the following sequence is made for the good of both.
aoqi@0 1747 __ ld(tmp1, 0, R3_ARG1);
aoqi@0 1748 __ ld(tmp2, 8, R3_ARG1);
aoqi@0 1749 __ ld(tmp3, 16, R3_ARG1);
aoqi@0 1750 __ ld(tmp4, 24, R3_ARG1);
aoqi@0 1751 __ std(tmp1, 0, R4_ARG2);
aoqi@0 1752 __ std(tmp2, 8, R4_ARG2);
aoqi@0 1753 __ std(tmp3, 16, R4_ARG2);
aoqi@0 1754 __ std(tmp4, 24, R4_ARG2);
aoqi@0 1755 __ addi(R3_ARG1, R3_ARG1, 32);
aoqi@0 1756 __ addi(R4_ARG2, R4_ARG2, 32);
aoqi@0 1757 __ bdnz(l_4);
aoqi@0 1758 }
aoqi@0 1759
aoqi@0 1760 // copy 1 element at a time
aoqi@0 1761 __ bind(l_3);
aoqi@0 1762 __ cmpwi(CCR0, R5_ARG3, 0);
aoqi@0 1763 __ beq(CCR0, l_1);
aoqi@0 1764
aoqi@0 1765 { // FasterArrayCopy
aoqi@0 1766 __ mtctr(R5_ARG3);
aoqi@0 1767 __ addi(R3_ARG1, R3_ARG1, -8);
aoqi@0 1768 __ addi(R4_ARG2, R4_ARG2, -8);
aoqi@0 1769
aoqi@0 1770 __ bind(l_2);
aoqi@0 1771 __ ldu(R0, 8, R3_ARG1);
aoqi@0 1772 __ stdu(R0, 8, R4_ARG2);
aoqi@0 1773 __ bdnz(l_2);
aoqi@0 1774
aoqi@0 1775 }
aoqi@0 1776 __ bind(l_1);
aoqi@0 1777 }
aoqi@0 1778
aoqi@0 1779 // Generate stub for disjoint long copy. If "aligned" is true, the
aoqi@0 1780 // "from" and "to" addresses are assumed to be heapword aligned.
aoqi@0 1781 //
aoqi@0 1782 // Arguments for generated stub:
aoqi@0 1783 // from: R3_ARG1
aoqi@0 1784 // to: R4_ARG2
aoqi@0 1785 // count: R5_ARG3 treated as signed
aoqi@0 1786 //
aoqi@0 1787 address generate_disjoint_long_copy(bool aligned, const char * name) {
aoqi@0 1788 StubCodeMark mark(this, "StubRoutines", name);
aoqi@0 1789 address start = __ function_entry();
aoqi@0 1790 generate_disjoint_long_copy_core(aligned);
aoqi@0 1791 __ blr();
aoqi@0 1792
aoqi@0 1793 return start;
aoqi@0 1794 }
aoqi@0 1795
aoqi@0 1796 // Generate core code for conjoint long copy (and oop copy on
aoqi@0 1797 // 64-bit). If "aligned" is true, the "from" and "to" addresses
aoqi@0 1798 // are assumed to be heapword aligned.
aoqi@0 1799 //
aoqi@0 1800 // Arguments:
aoqi@0 1801 // from: R3_ARG1
aoqi@0 1802 // to: R4_ARG2
aoqi@0 1803 // count: R5_ARG3 treated as signed
aoqi@0 1804 //
aoqi@0 1805 void generate_conjoint_long_copy_core(bool aligned) {
aoqi@0 1806 Register tmp1 = R6_ARG4;
aoqi@0 1807 Register tmp2 = R7_ARG5;
aoqi@0 1808 Register tmp3 = R8_ARG6;
aoqi@0 1809 Register tmp4 = R0;
aoqi@0 1810
aoqi@0 1811 Label l_1, l_2, l_3, l_4, l_5;
aoqi@0 1812
aoqi@0 1813 __ cmpwi(CCR0, R5_ARG3, 0);
aoqi@0 1814 __ beq(CCR0, l_1);
aoqi@0 1815
aoqi@0 1816 { // FasterArrayCopy
aoqi@0 1817 __ sldi(R5_ARG3, R5_ARG3, 3);
aoqi@0 1818 __ add(R3_ARG1, R3_ARG1, R5_ARG3);
aoqi@0 1819 __ add(R4_ARG2, R4_ARG2, R5_ARG3);
aoqi@0 1820 __ srdi(R5_ARG3, R5_ARG3, 3);
aoqi@0 1821
aoqi@0 1822 __ cmpwi(CCR0, R5_ARG3, 3);
aoqi@0 1823 __ ble(CCR0, l_5); // copy 1 at a time if less than 4 elements remain
aoqi@0 1824
aoqi@0 1825 __ srdi(tmp1, R5_ARG3, 2);
aoqi@0 1826 __ andi(R5_ARG3, R5_ARG3, 3);
aoqi@0 1827 __ mtctr(tmp1);
aoqi@0 1828
aoqi@0 1829 __ bind(l_4);
aoqi@0 1830 // Use unrolled version for mass copying (copy 4 elements a time).
aoqi@0 1831 // Load feeding store gets zero latency on Power6, however not on Power5.
aoqi@0 1832 // Therefore, the following sequence is made for the good of both.
aoqi@0 1833 __ addi(R3_ARG1, R3_ARG1, -32);
aoqi@0 1834 __ addi(R4_ARG2, R4_ARG2, -32);
aoqi@0 1835 __ ld(tmp4, 24, R3_ARG1);
aoqi@0 1836 __ ld(tmp3, 16, R3_ARG1);
aoqi@0 1837 __ ld(tmp2, 8, R3_ARG1);
aoqi@0 1838 __ ld(tmp1, 0, R3_ARG1);
aoqi@0 1839 __ std(tmp4, 24, R4_ARG2);
aoqi@0 1840 __ std(tmp3, 16, R4_ARG2);
aoqi@0 1841 __ std(tmp2, 8, R4_ARG2);
aoqi@0 1842 __ std(tmp1, 0, R4_ARG2);
aoqi@0 1843 __ bdnz(l_4);
aoqi@0 1844
aoqi@0 1845 __ cmpwi(CCR0, R5_ARG3, 0);
aoqi@0 1846 __ beq(CCR0, l_1);
aoqi@0 1847
aoqi@0 1848 __ bind(l_5);
aoqi@0 1849 __ mtctr(R5_ARG3);
aoqi@0 1850 __ bind(l_3);
aoqi@0 1851 __ ld(R0, -8, R3_ARG1);
aoqi@0 1852 __ std(R0, -8, R4_ARG2);
aoqi@0 1853 __ addi(R3_ARG1, R3_ARG1, -8);
aoqi@0 1854 __ addi(R4_ARG2, R4_ARG2, -8);
aoqi@0 1855 __ bdnz(l_3);
aoqi@0 1856
aoqi@0 1857 }
aoqi@0 1858 __ bind(l_1);
aoqi@0 1859 }
aoqi@0 1860
aoqi@0 1861 // Generate stub for conjoint long copy. If "aligned" is true, the
aoqi@0 1862 // "from" and "to" addresses are assumed to be heapword aligned.
aoqi@0 1863 //
aoqi@0 1864 // Arguments for generated stub:
aoqi@0 1865 // from: R3_ARG1
aoqi@0 1866 // to: R4_ARG2
aoqi@0 1867 // count: R5_ARG3 treated as signed
aoqi@0 1868 //
aoqi@0 1869 address generate_conjoint_long_copy(bool aligned, const char * name) {
aoqi@0 1870 StubCodeMark mark(this, "StubRoutines", name);
aoqi@0 1871 address start = __ function_entry();
aoqi@0 1872
aoqi@0 1873 #if defined(ABI_ELFv2)
aoqi@0 1874 address nooverlap_target = aligned ?
aoqi@0 1875 StubRoutines::arrayof_jlong_disjoint_arraycopy() :
aoqi@0 1876 StubRoutines::jlong_disjoint_arraycopy();
aoqi@0 1877 #else
aoqi@0 1878 address nooverlap_target = aligned ?
aoqi@0 1879 ((FunctionDescriptor*)StubRoutines::arrayof_jlong_disjoint_arraycopy())->entry() :
aoqi@0 1880 ((FunctionDescriptor*)StubRoutines::jlong_disjoint_arraycopy())->entry();
aoqi@0 1881 #endif
aoqi@0 1882
aoqi@0 1883 array_overlap_test(nooverlap_target, 3);
aoqi@0 1884 generate_conjoint_long_copy_core(aligned);
aoqi@0 1885
aoqi@0 1886 __ blr();
aoqi@0 1887
aoqi@0 1888 return start;
aoqi@0 1889 }
aoqi@0 1890
aoqi@0 1891 // Generate stub for conjoint oop copy. If "aligned" is true, the
aoqi@0 1892 // "from" and "to" addresses are assumed to be heapword aligned.
aoqi@0 1893 //
aoqi@0 1894 // Arguments for generated stub:
aoqi@0 1895 // from: R3_ARG1
aoqi@0 1896 // to: R4_ARG2
aoqi@0 1897 // count: R5_ARG3 treated as signed
aoqi@0 1898 // dest_uninitialized: G1 support
aoqi@0 1899 //
aoqi@0 1900 address generate_conjoint_oop_copy(bool aligned, const char * name, bool dest_uninitialized) {
aoqi@0 1901 StubCodeMark mark(this, "StubRoutines", name);
aoqi@0 1902
aoqi@0 1903 address start = __ function_entry();
aoqi@0 1904
aoqi@0 1905 #if defined(ABI_ELFv2)
aoqi@0 1906 address nooverlap_target = aligned ?
aoqi@0 1907 StubRoutines::arrayof_oop_disjoint_arraycopy() :
aoqi@0 1908 StubRoutines::oop_disjoint_arraycopy();
aoqi@0 1909 #else
aoqi@0 1910 address nooverlap_target = aligned ?
aoqi@0 1911 ((FunctionDescriptor*)StubRoutines::arrayof_oop_disjoint_arraycopy())->entry() :
aoqi@0 1912 ((FunctionDescriptor*)StubRoutines::oop_disjoint_arraycopy())->entry();
aoqi@0 1913 #endif
aoqi@0 1914
aoqi@0 1915 gen_write_ref_array_pre_barrier(R3_ARG1, R4_ARG2, R5_ARG3, dest_uninitialized, R9_ARG7);
aoqi@0 1916
aoqi@0 1917 // Save arguments.
aoqi@0 1918 __ mr(R9_ARG7, R4_ARG2);
aoqi@0 1919 __ mr(R10_ARG8, R5_ARG3);
aoqi@0 1920
aoqi@0 1921 if (UseCompressedOops) {
aoqi@0 1922 array_overlap_test(nooverlap_target, 2);
aoqi@0 1923 generate_conjoint_int_copy_core(aligned);
aoqi@0 1924 } else {
aoqi@0 1925 array_overlap_test(nooverlap_target, 3);
aoqi@0 1926 generate_conjoint_long_copy_core(aligned);
aoqi@0 1927 }
aoqi@0 1928
aoqi@0 1929 gen_write_ref_array_post_barrier(R9_ARG7, R10_ARG8, R11_scratch1, /*branchToEnd*/ false);
aoqi@0 1930 return start;
aoqi@0 1931 }
aoqi@0 1932
aoqi@0 1933 // Generate stub for disjoint oop copy. If "aligned" is true, the
aoqi@0 1934 // "from" and "to" addresses are assumed to be heapword aligned.
aoqi@0 1935 //
aoqi@0 1936 // Arguments for generated stub:
aoqi@0 1937 // from: R3_ARG1
aoqi@0 1938 // to: R4_ARG2
aoqi@0 1939 // count: R5_ARG3 treated as signed
aoqi@0 1940 // dest_uninitialized: G1 support
aoqi@0 1941 //
aoqi@0 1942 address generate_disjoint_oop_copy(bool aligned, const char * name, bool dest_uninitialized) {
aoqi@0 1943 StubCodeMark mark(this, "StubRoutines", name);
aoqi@0 1944 address start = __ function_entry();
aoqi@0 1945
aoqi@0 1946 gen_write_ref_array_pre_barrier(R3_ARG1, R4_ARG2, R5_ARG3, dest_uninitialized, R9_ARG7);
aoqi@0 1947
aoqi@0 1948 // save some arguments, disjoint_long_copy_core destroys them.
aoqi@0 1949 // needed for post barrier
aoqi@0 1950 __ mr(R9_ARG7, R4_ARG2);
aoqi@0 1951 __ mr(R10_ARG8, R5_ARG3);
aoqi@0 1952
aoqi@0 1953 if (UseCompressedOops) {
aoqi@0 1954 generate_disjoint_int_copy_core(aligned);
aoqi@0 1955 } else {
aoqi@0 1956 generate_disjoint_long_copy_core(aligned);
aoqi@0 1957 }
aoqi@0 1958
aoqi@0 1959 gen_write_ref_array_post_barrier(R9_ARG7, R10_ARG8, R11_scratch1, /*branchToEnd*/ false);
aoqi@0 1960
aoqi@0 1961 return start;
aoqi@0 1962 }
aoqi@0 1963
aoqi@0 1964 void generate_arraycopy_stubs() {
aoqi@0 1965 // Note: the disjoint stubs must be generated first, some of
aoqi@0 1966 // the conjoint stubs use them.
aoqi@0 1967
aoqi@0 1968 // non-aligned disjoint versions
aoqi@0 1969 StubRoutines::_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(false, "jbyte_disjoint_arraycopy");
aoqi@0 1970 StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_short_copy(false, "jshort_disjoint_arraycopy");
aoqi@0 1971 StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_int_copy(false, "jint_disjoint_arraycopy");
aoqi@0 1972 StubRoutines::_jlong_disjoint_arraycopy = generate_disjoint_long_copy(false, "jlong_disjoint_arraycopy");
aoqi@0 1973 StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_oop_copy(false, "oop_disjoint_arraycopy", false);
aoqi@0 1974 StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_oop_copy(false, "oop_disjoint_arraycopy_uninit", true);
aoqi@0 1975
aoqi@0 1976 // aligned disjoint versions
aoqi@0 1977 StubRoutines::_arrayof_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(true, "arrayof_jbyte_disjoint_arraycopy");
aoqi@0 1978 StubRoutines::_arrayof_jshort_disjoint_arraycopy = generate_disjoint_short_copy(true, "arrayof_jshort_disjoint_arraycopy");
aoqi@0 1979 StubRoutines::_arrayof_jint_disjoint_arraycopy = generate_disjoint_int_copy(true, "arrayof_jint_disjoint_arraycopy");
aoqi@0 1980 StubRoutines::_arrayof_jlong_disjoint_arraycopy = generate_disjoint_long_copy(true, "arrayof_jlong_disjoint_arraycopy");
aoqi@0 1981 StubRoutines::_arrayof_oop_disjoint_arraycopy = generate_disjoint_oop_copy(true, "arrayof_oop_disjoint_arraycopy", false);
aoqi@0 1982 StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit = generate_disjoint_oop_copy(true, "oop_disjoint_arraycopy_uninit", true);
aoqi@0 1983
aoqi@0 1984 // non-aligned conjoint versions
aoqi@0 1985 StubRoutines::_jbyte_arraycopy = generate_conjoint_byte_copy(false, "jbyte_arraycopy");
aoqi@0 1986 StubRoutines::_jshort_arraycopy = generate_conjoint_short_copy(false, "jshort_arraycopy");
aoqi@0 1987 StubRoutines::_jint_arraycopy = generate_conjoint_int_copy(false, "jint_arraycopy");
aoqi@0 1988 StubRoutines::_jlong_arraycopy = generate_conjoint_long_copy(false, "jlong_arraycopy");
aoqi@0 1989 StubRoutines::_oop_arraycopy = generate_conjoint_oop_copy(false, "oop_arraycopy", false);
aoqi@0 1990 StubRoutines::_oop_arraycopy_uninit = generate_conjoint_oop_copy(false, "oop_arraycopy_uninit", true);
aoqi@0 1991
aoqi@0 1992 // aligned conjoint versions
aoqi@0 1993 StubRoutines::_arrayof_jbyte_arraycopy = generate_conjoint_byte_copy(true, "arrayof_jbyte_arraycopy");
aoqi@0 1994 StubRoutines::_arrayof_jshort_arraycopy = generate_conjoint_short_copy(true, "arrayof_jshort_arraycopy");
aoqi@0 1995 StubRoutines::_arrayof_jint_arraycopy = generate_conjoint_int_copy(true, "arrayof_jint_arraycopy");
aoqi@0 1996 StubRoutines::_arrayof_jlong_arraycopy = generate_conjoint_long_copy(true, "arrayof_jlong_arraycopy");
aoqi@0 1997 StubRoutines::_arrayof_oop_arraycopy = generate_conjoint_oop_copy(true, "arrayof_oop_arraycopy", false);
aoqi@0 1998 StubRoutines::_arrayof_oop_arraycopy_uninit = generate_conjoint_oop_copy(true, "arrayof_oop_arraycopy", true);
aoqi@0 1999
aoqi@0 2000 // fill routines
aoqi@0 2001 StubRoutines::_jbyte_fill = generate_fill(T_BYTE, false, "jbyte_fill");
aoqi@0 2002 StubRoutines::_jshort_fill = generate_fill(T_SHORT, false, "jshort_fill");
aoqi@0 2003 StubRoutines::_jint_fill = generate_fill(T_INT, false, "jint_fill");
aoqi@0 2004 StubRoutines::_arrayof_jbyte_fill = generate_fill(T_BYTE, true, "arrayof_jbyte_fill");
aoqi@0 2005 StubRoutines::_arrayof_jshort_fill = generate_fill(T_SHORT, true, "arrayof_jshort_fill");
aoqi@0 2006 StubRoutines::_arrayof_jint_fill = generate_fill(T_INT, true, "arrayof_jint_fill");
aoqi@0 2007 }
aoqi@0 2008
aoqi@0 2009 // Safefetch stubs.
aoqi@0 2010 void generate_safefetch(const char* name, int size, address* entry, address* fault_pc, address* continuation_pc) {
aoqi@0 2011 // safefetch signatures:
aoqi@0 2012 // int SafeFetch32(int* adr, int errValue);
aoqi@0 2013 // intptr_t SafeFetchN (intptr_t* adr, intptr_t errValue);
aoqi@0 2014 //
aoqi@0 2015 // arguments:
aoqi@0 2016 // R3_ARG1 = adr
aoqi@0 2017 // R4_ARG2 = errValue
aoqi@0 2018 //
aoqi@0 2019 // result:
aoqi@0 2020 // R3_RET = *adr or errValue
aoqi@0 2021
aoqi@0 2022 StubCodeMark mark(this, "StubRoutines", name);
aoqi@0 2023
aoqi@0 2024 // Entry point, pc or function descriptor.
aoqi@0 2025 *entry = __ function_entry();
aoqi@0 2026
aoqi@0 2027 // Load *adr into R4_ARG2, may fault.
aoqi@0 2028 *fault_pc = __ pc();
aoqi@0 2029 switch (size) {
aoqi@0 2030 case 4:
aoqi@0 2031 // int32_t, signed extended
aoqi@0 2032 __ lwa(R4_ARG2, 0, R3_ARG1);
aoqi@0 2033 break;
aoqi@0 2034 case 8:
aoqi@0 2035 // int64_t
aoqi@0 2036 __ ld(R4_ARG2, 0, R3_ARG1);
aoqi@0 2037 break;
aoqi@0 2038 default:
aoqi@0 2039 ShouldNotReachHere();
aoqi@0 2040 }
aoqi@0 2041
aoqi@0 2042 // return errValue or *adr
aoqi@0 2043 *continuation_pc = __ pc();
aoqi@0 2044 __ mr(R3_RET, R4_ARG2);
aoqi@0 2045 __ blr();
aoqi@0 2046 }
aoqi@0 2047
aoqi@0 2048 // Initialization
aoqi@0 2049 void generate_initial() {
aoqi@0 2050 // Generates all stubs and initializes the entry points
aoqi@0 2051
aoqi@0 2052 // Entry points that exist in all platforms.
aoqi@0 2053 // Note: This is code that could be shared among different platforms - however the
aoqi@0 2054 // benefit seems to be smaller than the disadvantage of having a
aoqi@0 2055 // much more complicated generator structure. See also comment in
aoqi@0 2056 // stubRoutines.hpp.
aoqi@0 2057
aoqi@0 2058 StubRoutines::_forward_exception_entry = generate_forward_exception();
aoqi@0 2059 StubRoutines::_call_stub_entry = generate_call_stub(StubRoutines::_call_stub_return_address);
aoqi@0 2060 StubRoutines::_catch_exception_entry = generate_catch_exception();
aoqi@0 2061
aoqi@0 2062 // Build this early so it's available for the interpreter.
aoqi@0 2063 StubRoutines::_throw_StackOverflowError_entry =
aoqi@0 2064 generate_throw_exception("StackOverflowError throw_exception",
aoqi@0 2065 CAST_FROM_FN_PTR(address, SharedRuntime::throw_StackOverflowError), false);
aoqi@0 2066 }
aoqi@0 2067
aoqi@0 2068 void generate_all() {
aoqi@0 2069 // Generates all stubs and initializes the entry points
aoqi@0 2070
aoqi@0 2071 // These entry points require SharedInfo::stack0 to be set up in
aoqi@0 2072 // non-core builds
aoqi@0 2073 StubRoutines::_throw_AbstractMethodError_entry = generate_throw_exception("AbstractMethodError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_AbstractMethodError), false);
aoqi@0 2074 // Handle IncompatibleClassChangeError in itable stubs.
aoqi@0 2075 StubRoutines::_throw_IncompatibleClassChangeError_entry= generate_throw_exception("IncompatibleClassChangeError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_IncompatibleClassChangeError), false);
aoqi@0 2076 StubRoutines::_throw_NullPointerException_at_call_entry= generate_throw_exception("NullPointerException at call throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException_at_call), false);
aoqi@0 2077
aoqi@0 2078 StubRoutines::_handler_for_unsafe_access_entry = generate_handler_for_unsafe_access();
aoqi@0 2079
aoqi@0 2080 // support for verify_oop (must happen after universe_init)
aoqi@0 2081 StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop();
aoqi@0 2082
aoqi@0 2083 // arraycopy stubs used by compilers
aoqi@0 2084 generate_arraycopy_stubs();
aoqi@0 2085
aoqi@0 2086 if (UseAESIntrinsics) {
aoqi@0 2087 guarantee(!UseAESIntrinsics, "not yet implemented.");
aoqi@0 2088 }
aoqi@0 2089
aoqi@0 2090 // Safefetch stubs.
aoqi@0 2091 generate_safefetch("SafeFetch32", sizeof(int), &StubRoutines::_safefetch32_entry,
aoqi@0 2092 &StubRoutines::_safefetch32_fault_pc,
aoqi@0 2093 &StubRoutines::_safefetch32_continuation_pc);
aoqi@0 2094 generate_safefetch("SafeFetchN", sizeof(intptr_t), &StubRoutines::_safefetchN_entry,
aoqi@0 2095 &StubRoutines::_safefetchN_fault_pc,
aoqi@0 2096 &StubRoutines::_safefetchN_continuation_pc);
aoqi@0 2097 }
aoqi@0 2098
aoqi@0 2099 public:
aoqi@0 2100 StubGenerator(CodeBuffer* code, bool all) : StubCodeGenerator(code) {
aoqi@0 2101 // replace the standard masm with a special one:
aoqi@0 2102 _masm = new MacroAssembler(code);
aoqi@0 2103 if (all) {
aoqi@0 2104 generate_all();
aoqi@0 2105 } else {
aoqi@0 2106 generate_initial();
aoqi@0 2107 }
aoqi@0 2108 }
aoqi@0 2109 };
aoqi@0 2110
aoqi@0 2111 void StubGenerator_generate(CodeBuffer* code, bool all) {
aoqi@0 2112 StubGenerator g(code, all);
aoqi@0 2113 }

mercurial