1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 1.2 +++ b/src/cpu/x86/vm/stubGenerator_x86_32.cpp Wed Apr 27 01:25:04 2016 +0800 1.3 @@ -0,0 +1,3043 @@ 1.4 +/* 1.5 + * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved. 1.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 1.7 + * 1.8 + * This code is free software; you can redistribute it and/or modify it 1.9 + * under the terms of the GNU General Public License version 2 only, as 1.10 + * published by the Free Software Foundation. 1.11 + * 1.12 + * This code is distributed in the hope that it will be useful, but WITHOUT 1.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 1.14 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 1.15 + * version 2 for more details (a copy is included in the LICENSE file that 1.16 + * accompanied this code). 1.17 + * 1.18 + * You should have received a copy of the GNU General Public License version 1.19 + * 2 along with this work; if not, write to the Free Software Foundation, 1.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 1.21 + * 1.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 1.23 + * or visit www.oracle.com if you need additional information or have any 1.24 + * questions. 1.25 + * 1.26 + */ 1.27 + 1.28 +#include "precompiled.hpp" 1.29 +#include "asm/macroAssembler.hpp" 1.30 +#include "asm/macroAssembler.inline.hpp" 1.31 +#include "interpreter/interpreter.hpp" 1.32 +#include "nativeInst_x86.hpp" 1.33 +#include "oops/instanceOop.hpp" 1.34 +#include "oops/method.hpp" 1.35 +#include "oops/objArrayKlass.hpp" 1.36 +#include "oops/oop.inline.hpp" 1.37 +#include "prims/methodHandles.hpp" 1.38 +#include "runtime/frame.inline.hpp" 1.39 +#include "runtime/handles.inline.hpp" 1.40 +#include "runtime/sharedRuntime.hpp" 1.41 +#include "runtime/stubCodeGenerator.hpp" 1.42 +#include "runtime/stubRoutines.hpp" 1.43 +#include "runtime/thread.inline.hpp" 1.44 +#include "utilities/top.hpp" 1.45 +#ifdef COMPILER2 1.46 +#include "opto/runtime.hpp" 1.47 +#endif 1.48 + 1.49 +// Declaration and definition of StubGenerator (no .hpp file). 1.50 +// For a more detailed description of the stub routine structure 1.51 +// see the comment in stubRoutines.hpp 1.52 + 1.53 +#define __ _masm-> 1.54 +#define a__ ((Assembler*)_masm)-> 1.55 + 1.56 +#ifdef PRODUCT 1.57 +#define BLOCK_COMMENT(str) /* nothing */ 1.58 +#else 1.59 +#define BLOCK_COMMENT(str) __ block_comment(str) 1.60 +#endif 1.61 + 1.62 +#define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 1.63 + 1.64 +const int MXCSR_MASK = 0xFFC0; // Mask out any pending exceptions 1.65 +const int FPU_CNTRL_WRD_MASK = 0xFFFF; 1.66 + 1.67 +// ------------------------------------------------------------------------------------------------------------------------- 1.68 +// Stub Code definitions 1.69 + 1.70 +static address handle_unsafe_access() { 1.71 + JavaThread* thread = JavaThread::current(); 1.72 + address pc = thread->saved_exception_pc(); 1.73 + // pc is the instruction which we must emulate 1.74 + // doing a no-op is fine: return garbage from the load 1.75 + // therefore, compute npc 1.76 + address npc = Assembler::locate_next_instruction(pc); 1.77 + 1.78 + // request an async exception 1.79 + thread->set_pending_unsafe_access_error(); 1.80 + 1.81 + // return address of next instruction to execute 1.82 + return npc; 1.83 +} 1.84 + 1.85 +class StubGenerator: public StubCodeGenerator { 1.86 + private: 1.87 + 1.88 +#ifdef PRODUCT 1.89 +#define inc_counter_np(counter) ((void)0) 1.90 +#else 1.91 + void inc_counter_np_(int& counter) { 1.92 + __ incrementl(ExternalAddress((address)&counter)); 1.93 + } 1.94 +#define inc_counter_np(counter) \ 1.95 + BLOCK_COMMENT("inc_counter " #counter); \ 1.96 + inc_counter_np_(counter); 1.97 +#endif //PRODUCT 1.98 + 1.99 + void inc_copy_counter_np(BasicType t) { 1.100 +#ifndef PRODUCT 1.101 + switch (t) { 1.102 + case T_BYTE: inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); return; 1.103 + case T_SHORT: inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); return; 1.104 + case T_INT: inc_counter_np(SharedRuntime::_jint_array_copy_ctr); return; 1.105 + case T_LONG: inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); return; 1.106 + case T_OBJECT: inc_counter_np(SharedRuntime::_oop_array_copy_ctr); return; 1.107 + } 1.108 + ShouldNotReachHere(); 1.109 +#endif //PRODUCT 1.110 + } 1.111 + 1.112 + //------------------------------------------------------------------------------------------------------------------------ 1.113 + // Call stubs are used to call Java from C 1.114 + // 1.115 + // [ return_from_Java ] <--- rsp 1.116 + // [ argument word n ] 1.117 + // ... 1.118 + // -N [ argument word 1 ] 1.119 + // -7 [ Possible padding for stack alignment ] 1.120 + // -6 [ Possible padding for stack alignment ] 1.121 + // -5 [ Possible padding for stack alignment ] 1.122 + // -4 [ mxcsr save ] <--- rsp_after_call 1.123 + // -3 [ saved rbx, ] 1.124 + // -2 [ saved rsi ] 1.125 + // -1 [ saved rdi ] 1.126 + // 0 [ saved rbp, ] <--- rbp, 1.127 + // 1 [ return address ] 1.128 + // 2 [ ptr. to call wrapper ] 1.129 + // 3 [ result ] 1.130 + // 4 [ result_type ] 1.131 + // 5 [ method ] 1.132 + // 6 [ entry_point ] 1.133 + // 7 [ parameters ] 1.134 + // 8 [ parameter_size ] 1.135 + // 9 [ thread ] 1.136 + 1.137 + 1.138 + address generate_call_stub(address& return_address) { 1.139 + StubCodeMark mark(this, "StubRoutines", "call_stub"); 1.140 + address start = __ pc(); 1.141 + 1.142 + // stub code parameters / addresses 1.143 + assert(frame::entry_frame_call_wrapper_offset == 2, "adjust this code"); 1.144 + bool sse_save = false; 1.145 + const Address rsp_after_call(rbp, -4 * wordSize); // same as in generate_catch_exception()! 1.146 + const int locals_count_in_bytes (4*wordSize); 1.147 + const Address mxcsr_save (rbp, -4 * wordSize); 1.148 + const Address saved_rbx (rbp, -3 * wordSize); 1.149 + const Address saved_rsi (rbp, -2 * wordSize); 1.150 + const Address saved_rdi (rbp, -1 * wordSize); 1.151 + const Address result (rbp, 3 * wordSize); 1.152 + const Address result_type (rbp, 4 * wordSize); 1.153 + const Address method (rbp, 5 * wordSize); 1.154 + const Address entry_point (rbp, 6 * wordSize); 1.155 + const Address parameters (rbp, 7 * wordSize); 1.156 + const Address parameter_size(rbp, 8 * wordSize); 1.157 + const Address thread (rbp, 9 * wordSize); // same as in generate_catch_exception()! 1.158 + sse_save = UseSSE > 0; 1.159 + 1.160 + // stub code 1.161 + __ enter(); 1.162 + __ movptr(rcx, parameter_size); // parameter counter 1.163 + __ shlptr(rcx, Interpreter::logStackElementSize); // convert parameter count to bytes 1.164 + __ addptr(rcx, locals_count_in_bytes); // reserve space for register saves 1.165 + __ subptr(rsp, rcx); 1.166 + __ andptr(rsp, -(StackAlignmentInBytes)); // Align stack 1.167 + 1.168 + // save rdi, rsi, & rbx, according to C calling conventions 1.169 + __ movptr(saved_rdi, rdi); 1.170 + __ movptr(saved_rsi, rsi); 1.171 + __ movptr(saved_rbx, rbx); 1.172 + // save and initialize %mxcsr 1.173 + if (sse_save) { 1.174 + Label skip_ldmx; 1.175 + __ stmxcsr(mxcsr_save); 1.176 + __ movl(rax, mxcsr_save); 1.177 + __ andl(rax, MXCSR_MASK); // Only check control and mask bits 1.178 + ExternalAddress mxcsr_std(StubRoutines::addr_mxcsr_std()); 1.179 + __ cmp32(rax, mxcsr_std); 1.180 + __ jcc(Assembler::equal, skip_ldmx); 1.181 + __ ldmxcsr(mxcsr_std); 1.182 + __ bind(skip_ldmx); 1.183 + } 1.184 + 1.185 + // make sure the control word is correct. 1.186 + __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std())); 1.187 + 1.188 +#ifdef ASSERT 1.189 + // make sure we have no pending exceptions 1.190 + { Label L; 1.191 + __ movptr(rcx, thread); 1.192 + __ cmpptr(Address(rcx, Thread::pending_exception_offset()), (int32_t)NULL_WORD); 1.193 + __ jcc(Assembler::equal, L); 1.194 + __ stop("StubRoutines::call_stub: entered with pending exception"); 1.195 + __ bind(L); 1.196 + } 1.197 +#endif 1.198 + 1.199 + // pass parameters if any 1.200 + BLOCK_COMMENT("pass parameters if any"); 1.201 + Label parameters_done; 1.202 + __ movl(rcx, parameter_size); // parameter counter 1.203 + __ testl(rcx, rcx); 1.204 + __ jcc(Assembler::zero, parameters_done); 1.205 + 1.206 + // parameter passing loop 1.207 + 1.208 + Label loop; 1.209 + // Copy Java parameters in reverse order (receiver last) 1.210 + // Note that the argument order is inverted in the process 1.211 + // source is rdx[rcx: N-1..0] 1.212 + // dest is rsp[rbx: 0..N-1] 1.213 + 1.214 + __ movptr(rdx, parameters); // parameter pointer 1.215 + __ xorptr(rbx, rbx); 1.216 + 1.217 + __ BIND(loop); 1.218 + 1.219 + // get parameter 1.220 + __ movptr(rax, Address(rdx, rcx, Interpreter::stackElementScale(), -wordSize)); 1.221 + __ movptr(Address(rsp, rbx, Interpreter::stackElementScale(), 1.222 + Interpreter::expr_offset_in_bytes(0)), rax); // store parameter 1.223 + __ increment(rbx); 1.224 + __ decrement(rcx); 1.225 + __ jcc(Assembler::notZero, loop); 1.226 + 1.227 + // call Java function 1.228 + __ BIND(parameters_done); 1.229 + __ movptr(rbx, method); // get Method* 1.230 + __ movptr(rax, entry_point); // get entry_point 1.231 + __ mov(rsi, rsp); // set sender sp 1.232 + BLOCK_COMMENT("call Java function"); 1.233 + __ call(rax); 1.234 + 1.235 + BLOCK_COMMENT("call_stub_return_address:"); 1.236 + return_address = __ pc(); 1.237 + 1.238 +#ifdef COMPILER2 1.239 + { 1.240 + Label L_skip; 1.241 + if (UseSSE >= 2) { 1.242 + __ verify_FPU(0, "call_stub_return"); 1.243 + } else { 1.244 + for (int i = 1; i < 8; i++) { 1.245 + __ ffree(i); 1.246 + } 1.247 + 1.248 + // UseSSE <= 1 so double result should be left on TOS 1.249 + __ movl(rsi, result_type); 1.250 + __ cmpl(rsi, T_DOUBLE); 1.251 + __ jcc(Assembler::equal, L_skip); 1.252 + if (UseSSE == 0) { 1.253 + // UseSSE == 0 so float result should be left on TOS 1.254 + __ cmpl(rsi, T_FLOAT); 1.255 + __ jcc(Assembler::equal, L_skip); 1.256 + } 1.257 + __ ffree(0); 1.258 + } 1.259 + __ BIND(L_skip); 1.260 + } 1.261 +#endif // COMPILER2 1.262 + 1.263 + // store result depending on type 1.264 + // (everything that is not T_LONG, T_FLOAT or T_DOUBLE is treated as T_INT) 1.265 + __ movptr(rdi, result); 1.266 + Label is_long, is_float, is_double, exit; 1.267 + __ movl(rsi, result_type); 1.268 + __ cmpl(rsi, T_LONG); 1.269 + __ jcc(Assembler::equal, is_long); 1.270 + __ cmpl(rsi, T_FLOAT); 1.271 + __ jcc(Assembler::equal, is_float); 1.272 + __ cmpl(rsi, T_DOUBLE); 1.273 + __ jcc(Assembler::equal, is_double); 1.274 + 1.275 + // handle T_INT case 1.276 + __ movl(Address(rdi, 0), rax); 1.277 + __ BIND(exit); 1.278 + 1.279 + // check that FPU stack is empty 1.280 + __ verify_FPU(0, "generate_call_stub"); 1.281 + 1.282 + // pop parameters 1.283 + __ lea(rsp, rsp_after_call); 1.284 + 1.285 + // restore %mxcsr 1.286 + if (sse_save) { 1.287 + __ ldmxcsr(mxcsr_save); 1.288 + } 1.289 + 1.290 + // restore rdi, rsi and rbx, 1.291 + __ movptr(rbx, saved_rbx); 1.292 + __ movptr(rsi, saved_rsi); 1.293 + __ movptr(rdi, saved_rdi); 1.294 + __ addptr(rsp, 4*wordSize); 1.295 + 1.296 + // return 1.297 + __ pop(rbp); 1.298 + __ ret(0); 1.299 + 1.300 + // handle return types different from T_INT 1.301 + __ BIND(is_long); 1.302 + __ movl(Address(rdi, 0 * wordSize), rax); 1.303 + __ movl(Address(rdi, 1 * wordSize), rdx); 1.304 + __ jmp(exit); 1.305 + 1.306 + __ BIND(is_float); 1.307 + // interpreter uses xmm0 for return values 1.308 + if (UseSSE >= 1) { 1.309 + __ movflt(Address(rdi, 0), xmm0); 1.310 + } else { 1.311 + __ fstp_s(Address(rdi, 0)); 1.312 + } 1.313 + __ jmp(exit); 1.314 + 1.315 + __ BIND(is_double); 1.316 + // interpreter uses xmm0 for return values 1.317 + if (UseSSE >= 2) { 1.318 + __ movdbl(Address(rdi, 0), xmm0); 1.319 + } else { 1.320 + __ fstp_d(Address(rdi, 0)); 1.321 + } 1.322 + __ jmp(exit); 1.323 + 1.324 + return start; 1.325 + } 1.326 + 1.327 + 1.328 + //------------------------------------------------------------------------------------------------------------------------ 1.329 + // Return point for a Java call if there's an exception thrown in Java code. 1.330 + // The exception is caught and transformed into a pending exception stored in 1.331 + // JavaThread that can be tested from within the VM. 1.332 + // 1.333 + // Note: Usually the parameters are removed by the callee. In case of an exception 1.334 + // crossing an activation frame boundary, that is not the case if the callee 1.335 + // is compiled code => need to setup the rsp. 1.336 + // 1.337 + // rax,: exception oop 1.338 + 1.339 + address generate_catch_exception() { 1.340 + StubCodeMark mark(this, "StubRoutines", "catch_exception"); 1.341 + const Address rsp_after_call(rbp, -4 * wordSize); // same as in generate_call_stub()! 1.342 + const Address thread (rbp, 9 * wordSize); // same as in generate_call_stub()! 1.343 + address start = __ pc(); 1.344 + 1.345 + // get thread directly 1.346 + __ movptr(rcx, thread); 1.347 +#ifdef ASSERT 1.348 + // verify that threads correspond 1.349 + { Label L; 1.350 + __ get_thread(rbx); 1.351 + __ cmpptr(rbx, rcx); 1.352 + __ jcc(Assembler::equal, L); 1.353 + __ stop("StubRoutines::catch_exception: threads must correspond"); 1.354 + __ bind(L); 1.355 + } 1.356 +#endif 1.357 + // set pending exception 1.358 + __ verify_oop(rax); 1.359 + __ movptr(Address(rcx, Thread::pending_exception_offset()), rax ); 1.360 + __ lea(Address(rcx, Thread::exception_file_offset ()), 1.361 + ExternalAddress((address)__FILE__)); 1.362 + __ movl(Address(rcx, Thread::exception_line_offset ()), __LINE__ ); 1.363 + // complete return to VM 1.364 + assert(StubRoutines::_call_stub_return_address != NULL, "_call_stub_return_address must have been generated before"); 1.365 + __ jump(RuntimeAddress(StubRoutines::_call_stub_return_address)); 1.366 + 1.367 + return start; 1.368 + } 1.369 + 1.370 + 1.371 + //------------------------------------------------------------------------------------------------------------------------ 1.372 + // Continuation point for runtime calls returning with a pending exception. 1.373 + // The pending exception check happened in the runtime or native call stub. 1.374 + // The pending exception in Thread is converted into a Java-level exception. 1.375 + // 1.376 + // Contract with Java-level exception handlers: 1.377 + // rax: exception 1.378 + // rdx: throwing pc 1.379 + // 1.380 + // NOTE: At entry of this stub, exception-pc must be on stack !! 1.381 + 1.382 + address generate_forward_exception() { 1.383 + StubCodeMark mark(this, "StubRoutines", "forward exception"); 1.384 + address start = __ pc(); 1.385 + const Register thread = rcx; 1.386 + 1.387 + // other registers used in this stub 1.388 + const Register exception_oop = rax; 1.389 + const Register handler_addr = rbx; 1.390 + const Register exception_pc = rdx; 1.391 + 1.392 + // Upon entry, the sp points to the return address returning into Java 1.393 + // (interpreted or compiled) code; i.e., the return address becomes the 1.394 + // throwing pc. 1.395 + // 1.396 + // Arguments pushed before the runtime call are still on the stack but 1.397 + // the exception handler will reset the stack pointer -> ignore them. 1.398 + // A potential result in registers can be ignored as well. 1.399 + 1.400 +#ifdef ASSERT 1.401 + // make sure this code is only executed if there is a pending exception 1.402 + { Label L; 1.403 + __ get_thread(thread); 1.404 + __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); 1.405 + __ jcc(Assembler::notEqual, L); 1.406 + __ stop("StubRoutines::forward exception: no pending exception (1)"); 1.407 + __ bind(L); 1.408 + } 1.409 +#endif 1.410 + 1.411 + // compute exception handler into rbx, 1.412 + __ get_thread(thread); 1.413 + __ movptr(exception_pc, Address(rsp, 0)); 1.414 + BLOCK_COMMENT("call exception_handler_for_return_address"); 1.415 + __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), thread, exception_pc); 1.416 + __ mov(handler_addr, rax); 1.417 + 1.418 + // setup rax & rdx, remove return address & clear pending exception 1.419 + __ get_thread(thread); 1.420 + __ pop(exception_pc); 1.421 + __ movptr(exception_oop, Address(thread, Thread::pending_exception_offset())); 1.422 + __ movptr(Address(thread, Thread::pending_exception_offset()), NULL_WORD); 1.423 + 1.424 +#ifdef ASSERT 1.425 + // make sure exception is set 1.426 + { Label L; 1.427 + __ testptr(exception_oop, exception_oop); 1.428 + __ jcc(Assembler::notEqual, L); 1.429 + __ stop("StubRoutines::forward exception: no pending exception (2)"); 1.430 + __ bind(L); 1.431 + } 1.432 +#endif 1.433 + 1.434 + // Verify that there is really a valid exception in RAX. 1.435 + __ verify_oop(exception_oop); 1.436 + 1.437 + // continue at exception handler (return address removed) 1.438 + // rax: exception 1.439 + // rbx: exception handler 1.440 + // rdx: throwing pc 1.441 + __ jmp(handler_addr); 1.442 + 1.443 + return start; 1.444 + } 1.445 + 1.446 + 1.447 + //---------------------------------------------------------------------------------------------------- 1.448 + // Support for jint Atomic::xchg(jint exchange_value, volatile jint* dest) 1.449 + // 1.450 + // xchg exists as far back as 8086, lock needed for MP only 1.451 + // Stack layout immediately after call: 1.452 + // 1.453 + // 0 [ret addr ] <--- rsp 1.454 + // 1 [ ex ] 1.455 + // 2 [ dest ] 1.456 + // 1.457 + // Result: *dest <- ex, return (old *dest) 1.458 + // 1.459 + // Note: win32 does not currently use this code 1.460 + 1.461 + address generate_atomic_xchg() { 1.462 + StubCodeMark mark(this, "StubRoutines", "atomic_xchg"); 1.463 + address start = __ pc(); 1.464 + 1.465 + __ push(rdx); 1.466 + Address exchange(rsp, 2 * wordSize); 1.467 + Address dest_addr(rsp, 3 * wordSize); 1.468 + __ movl(rax, exchange); 1.469 + __ movptr(rdx, dest_addr); 1.470 + __ xchgl(rax, Address(rdx, 0)); 1.471 + __ pop(rdx); 1.472 + __ ret(0); 1.473 + 1.474 + return start; 1.475 + } 1.476 + 1.477 + //---------------------------------------------------------------------------------------------------- 1.478 + // Support for void verify_mxcsr() 1.479 + // 1.480 + // This routine is used with -Xcheck:jni to verify that native 1.481 + // JNI code does not return to Java code without restoring the 1.482 + // MXCSR register to our expected state. 1.483 + 1.484 + 1.485 + address generate_verify_mxcsr() { 1.486 + StubCodeMark mark(this, "StubRoutines", "verify_mxcsr"); 1.487 + address start = __ pc(); 1.488 + 1.489 + const Address mxcsr_save(rsp, 0); 1.490 + 1.491 + if (CheckJNICalls && UseSSE > 0 ) { 1.492 + Label ok_ret; 1.493 + ExternalAddress mxcsr_std(StubRoutines::addr_mxcsr_std()); 1.494 + __ push(rax); 1.495 + __ subptr(rsp, wordSize); // allocate a temp location 1.496 + __ stmxcsr(mxcsr_save); 1.497 + __ movl(rax, mxcsr_save); 1.498 + __ andl(rax, MXCSR_MASK); 1.499 + __ cmp32(rax, mxcsr_std); 1.500 + __ jcc(Assembler::equal, ok_ret); 1.501 + 1.502 + __ warn("MXCSR changed by native JNI code."); 1.503 + 1.504 + __ ldmxcsr(mxcsr_std); 1.505 + 1.506 + __ bind(ok_ret); 1.507 + __ addptr(rsp, wordSize); 1.508 + __ pop(rax); 1.509 + } 1.510 + 1.511 + __ ret(0); 1.512 + 1.513 + return start; 1.514 + } 1.515 + 1.516 + 1.517 + //--------------------------------------------------------------------------- 1.518 + // Support for void verify_fpu_cntrl_wrd() 1.519 + // 1.520 + // This routine is used with -Xcheck:jni to verify that native 1.521 + // JNI code does not return to Java code without restoring the 1.522 + // FP control word to our expected state. 1.523 + 1.524 + address generate_verify_fpu_cntrl_wrd() { 1.525 + StubCodeMark mark(this, "StubRoutines", "verify_spcw"); 1.526 + address start = __ pc(); 1.527 + 1.528 + const Address fpu_cntrl_wrd_save(rsp, 0); 1.529 + 1.530 + if (CheckJNICalls) { 1.531 + Label ok_ret; 1.532 + __ push(rax); 1.533 + __ subptr(rsp, wordSize); // allocate a temp location 1.534 + __ fnstcw(fpu_cntrl_wrd_save); 1.535 + __ movl(rax, fpu_cntrl_wrd_save); 1.536 + __ andl(rax, FPU_CNTRL_WRD_MASK); 1.537 + ExternalAddress fpu_std(StubRoutines::addr_fpu_cntrl_wrd_std()); 1.538 + __ cmp32(rax, fpu_std); 1.539 + __ jcc(Assembler::equal, ok_ret); 1.540 + 1.541 + __ warn("Floating point control word changed by native JNI code."); 1.542 + 1.543 + __ fldcw(fpu_std); 1.544 + 1.545 + __ bind(ok_ret); 1.546 + __ addptr(rsp, wordSize); 1.547 + __ pop(rax); 1.548 + } 1.549 + 1.550 + __ ret(0); 1.551 + 1.552 + return start; 1.553 + } 1.554 + 1.555 + //--------------------------------------------------------------------------- 1.556 + // Wrapper for slow-case handling of double-to-integer conversion 1.557 + // d2i or f2i fast case failed either because it is nan or because 1.558 + // of under/overflow. 1.559 + // Input: FPU TOS: float value 1.560 + // Output: rax, (rdx): integer (long) result 1.561 + 1.562 + address generate_d2i_wrapper(BasicType t, address fcn) { 1.563 + StubCodeMark mark(this, "StubRoutines", "d2i_wrapper"); 1.564 + address start = __ pc(); 1.565 + 1.566 + // Capture info about frame layout 1.567 + enum layout { FPUState_off = 0, 1.568 + rbp_off = FPUStateSizeInWords, 1.569 + rdi_off, 1.570 + rsi_off, 1.571 + rcx_off, 1.572 + rbx_off, 1.573 + saved_argument_off, 1.574 + saved_argument_off2, // 2nd half of double 1.575 + framesize 1.576 + }; 1.577 + 1.578 + assert(FPUStateSizeInWords == 27, "update stack layout"); 1.579 + 1.580 + // Save outgoing argument to stack across push_FPU_state() 1.581 + __ subptr(rsp, wordSize * 2); 1.582 + __ fstp_d(Address(rsp, 0)); 1.583 + 1.584 + // Save CPU & FPU state 1.585 + __ push(rbx); 1.586 + __ push(rcx); 1.587 + __ push(rsi); 1.588 + __ push(rdi); 1.589 + __ push(rbp); 1.590 + __ push_FPU_state(); 1.591 + 1.592 + // push_FPU_state() resets the FP top of stack 1.593 + // Load original double into FP top of stack 1.594 + __ fld_d(Address(rsp, saved_argument_off * wordSize)); 1.595 + // Store double into stack as outgoing argument 1.596 + __ subptr(rsp, wordSize*2); 1.597 + __ fst_d(Address(rsp, 0)); 1.598 + 1.599 + // Prepare FPU for doing math in C-land 1.600 + __ empty_FPU_stack(); 1.601 + // Call the C code to massage the double. Result in EAX 1.602 + if (t == T_INT) 1.603 + { BLOCK_COMMENT("SharedRuntime::d2i"); } 1.604 + else if (t == T_LONG) 1.605 + { BLOCK_COMMENT("SharedRuntime::d2l"); } 1.606 + __ call_VM_leaf( fcn, 2 ); 1.607 + 1.608 + // Restore CPU & FPU state 1.609 + __ pop_FPU_state(); 1.610 + __ pop(rbp); 1.611 + __ pop(rdi); 1.612 + __ pop(rsi); 1.613 + __ pop(rcx); 1.614 + __ pop(rbx); 1.615 + __ addptr(rsp, wordSize * 2); 1.616 + 1.617 + __ ret(0); 1.618 + 1.619 + return start; 1.620 + } 1.621 + 1.622 + 1.623 + //--------------------------------------------------------------------------- 1.624 + // The following routine generates a subroutine to throw an asynchronous 1.625 + // UnknownError when an unsafe access gets a fault that could not be 1.626 + // reasonably prevented by the programmer. (Example: SIGBUS/OBJERR.) 1.627 + address generate_handler_for_unsafe_access() { 1.628 + StubCodeMark mark(this, "StubRoutines", "handler_for_unsafe_access"); 1.629 + address start = __ pc(); 1.630 + 1.631 + __ push(0); // hole for return address-to-be 1.632 + __ pusha(); // push registers 1.633 + Address next_pc(rsp, RegisterImpl::number_of_registers * BytesPerWord); 1.634 + BLOCK_COMMENT("call handle_unsafe_access"); 1.635 + __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, handle_unsafe_access))); 1.636 + __ movptr(next_pc, rax); // stuff next address 1.637 + __ popa(); 1.638 + __ ret(0); // jump to next address 1.639 + 1.640 + return start; 1.641 + } 1.642 + 1.643 + 1.644 + //---------------------------------------------------------------------------------------------------- 1.645 + // Non-destructive plausibility checks for oops 1.646 + 1.647 + address generate_verify_oop() { 1.648 + StubCodeMark mark(this, "StubRoutines", "verify_oop"); 1.649 + address start = __ pc(); 1.650 + 1.651 + // Incoming arguments on stack after saving rax,: 1.652 + // 1.653 + // [tos ]: saved rdx 1.654 + // [tos + 1]: saved EFLAGS 1.655 + // [tos + 2]: return address 1.656 + // [tos + 3]: char* error message 1.657 + // [tos + 4]: oop object to verify 1.658 + // [tos + 5]: saved rax, - saved by caller and bashed 1.659 + 1.660 + Label exit, error; 1.661 + __ pushf(); 1.662 + __ incrementl(ExternalAddress((address) StubRoutines::verify_oop_count_addr())); 1.663 + __ push(rdx); // save rdx 1.664 + // make sure object is 'reasonable' 1.665 + __ movptr(rax, Address(rsp, 4 * wordSize)); // get object 1.666 + __ testptr(rax, rax); 1.667 + __ jcc(Assembler::zero, exit); // if obj is NULL it is ok 1.668 + 1.669 + // Check if the oop is in the right area of memory 1.670 + const int oop_mask = Universe::verify_oop_mask(); 1.671 + const int oop_bits = Universe::verify_oop_bits(); 1.672 + __ mov(rdx, rax); 1.673 + __ andptr(rdx, oop_mask); 1.674 + __ cmpptr(rdx, oop_bits); 1.675 + __ jcc(Assembler::notZero, error); 1.676 + 1.677 + // make sure klass is 'reasonable', which is not zero. 1.678 + __ movptr(rax, Address(rax, oopDesc::klass_offset_in_bytes())); // get klass 1.679 + __ testptr(rax, rax); 1.680 + __ jcc(Assembler::zero, error); // if klass is NULL it is broken 1.681 + 1.682 + // return if everything seems ok 1.683 + __ bind(exit); 1.684 + __ movptr(rax, Address(rsp, 5 * wordSize)); // get saved rax, back 1.685 + __ pop(rdx); // restore rdx 1.686 + __ popf(); // restore EFLAGS 1.687 + __ ret(3 * wordSize); // pop arguments 1.688 + 1.689 + // handle errors 1.690 + __ bind(error); 1.691 + __ movptr(rax, Address(rsp, 5 * wordSize)); // get saved rax, back 1.692 + __ pop(rdx); // get saved rdx back 1.693 + __ popf(); // get saved EFLAGS off stack -- will be ignored 1.694 + __ pusha(); // push registers (eip = return address & msg are already pushed) 1.695 + BLOCK_COMMENT("call MacroAssembler::debug"); 1.696 + __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug32))); 1.697 + __ popa(); 1.698 + __ ret(3 * wordSize); // pop arguments 1.699 + return start; 1.700 + } 1.701 + 1.702 + // 1.703 + // Generate pre-barrier for array stores 1.704 + // 1.705 + // Input: 1.706 + // start - starting address 1.707 + // count - element count 1.708 + void gen_write_ref_array_pre_barrier(Register start, Register count, bool uninitialized_target) { 1.709 + assert_different_registers(start, count); 1.710 + BarrierSet* bs = Universe::heap()->barrier_set(); 1.711 + switch (bs->kind()) { 1.712 + case BarrierSet::G1SATBCT: 1.713 + case BarrierSet::G1SATBCTLogging: 1.714 + // With G1, don't generate the call if we statically know that the target in uninitialized 1.715 + if (!uninitialized_target) { 1.716 + __ pusha(); // push registers 1.717 + __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre), 1.718 + start, count); 1.719 + __ popa(); 1.720 + } 1.721 + break; 1.722 + case BarrierSet::CardTableModRef: 1.723 + case BarrierSet::CardTableExtension: 1.724 + case BarrierSet::ModRef: 1.725 + break; 1.726 + default : 1.727 + ShouldNotReachHere(); 1.728 + 1.729 + } 1.730 + } 1.731 + 1.732 + 1.733 + // 1.734 + // Generate a post-barrier for an array store 1.735 + // 1.736 + // start - starting address 1.737 + // count - element count 1.738 + // 1.739 + // The two input registers are overwritten. 1.740 + // 1.741 + void gen_write_ref_array_post_barrier(Register start, Register count) { 1.742 + BarrierSet* bs = Universe::heap()->barrier_set(); 1.743 + assert_different_registers(start, count); 1.744 + switch (bs->kind()) { 1.745 + case BarrierSet::G1SATBCT: 1.746 + case BarrierSet::G1SATBCTLogging: 1.747 + { 1.748 + __ pusha(); // push registers 1.749 + __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post), 1.750 + start, count); 1.751 + __ popa(); 1.752 + } 1.753 + break; 1.754 + 1.755 + case BarrierSet::CardTableModRef: 1.756 + case BarrierSet::CardTableExtension: 1.757 + { 1.758 + CardTableModRefBS* ct = (CardTableModRefBS*)bs; 1.759 + assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code"); 1.760 + 1.761 + Label L_loop; 1.762 + const Register end = count; // elements count; end == start+count-1 1.763 + assert_different_registers(start, end); 1.764 + 1.765 + __ lea(end, Address(start, count, Address::times_ptr, -wordSize)); 1.766 + __ shrptr(start, CardTableModRefBS::card_shift); 1.767 + __ shrptr(end, CardTableModRefBS::card_shift); 1.768 + __ subptr(end, start); // end --> count 1.769 + __ BIND(L_loop); 1.770 + intptr_t disp = (intptr_t) ct->byte_map_base; 1.771 + Address cardtable(start, count, Address::times_1, disp); 1.772 + __ movb(cardtable, 0); 1.773 + __ decrement(count); 1.774 + __ jcc(Assembler::greaterEqual, L_loop); 1.775 + } 1.776 + break; 1.777 + case BarrierSet::ModRef: 1.778 + break; 1.779 + default : 1.780 + ShouldNotReachHere(); 1.781 + 1.782 + } 1.783 + } 1.784 + 1.785 + 1.786 + // Copy 64 bytes chunks 1.787 + // 1.788 + // Inputs: 1.789 + // from - source array address 1.790 + // to_from - destination array address - from 1.791 + // qword_count - 8-bytes element count, negative 1.792 + // 1.793 + void xmm_copy_forward(Register from, Register to_from, Register qword_count) { 1.794 + assert( UseSSE >= 2, "supported cpu only" ); 1.795 + Label L_copy_64_bytes_loop, L_copy_64_bytes, L_copy_8_bytes, L_exit; 1.796 + // Copy 64-byte chunks 1.797 + __ jmpb(L_copy_64_bytes); 1.798 + __ align(OptoLoopAlignment); 1.799 + __ BIND(L_copy_64_bytes_loop); 1.800 + 1.801 + if (UseUnalignedLoadStores) { 1.802 + if (UseAVX >= 2) { 1.803 + __ vmovdqu(xmm0, Address(from, 0)); 1.804 + __ vmovdqu(Address(from, to_from, Address::times_1, 0), xmm0); 1.805 + __ vmovdqu(xmm1, Address(from, 32)); 1.806 + __ vmovdqu(Address(from, to_from, Address::times_1, 32), xmm1); 1.807 + } else { 1.808 + __ movdqu(xmm0, Address(from, 0)); 1.809 + __ movdqu(Address(from, to_from, Address::times_1, 0), xmm0); 1.810 + __ movdqu(xmm1, Address(from, 16)); 1.811 + __ movdqu(Address(from, to_from, Address::times_1, 16), xmm1); 1.812 + __ movdqu(xmm2, Address(from, 32)); 1.813 + __ movdqu(Address(from, to_from, Address::times_1, 32), xmm2); 1.814 + __ movdqu(xmm3, Address(from, 48)); 1.815 + __ movdqu(Address(from, to_from, Address::times_1, 48), xmm3); 1.816 + } 1.817 + } else { 1.818 + __ movq(xmm0, Address(from, 0)); 1.819 + __ movq(Address(from, to_from, Address::times_1, 0), xmm0); 1.820 + __ movq(xmm1, Address(from, 8)); 1.821 + __ movq(Address(from, to_from, Address::times_1, 8), xmm1); 1.822 + __ movq(xmm2, Address(from, 16)); 1.823 + __ movq(Address(from, to_from, Address::times_1, 16), xmm2); 1.824 + __ movq(xmm3, Address(from, 24)); 1.825 + __ movq(Address(from, to_from, Address::times_1, 24), xmm3); 1.826 + __ movq(xmm4, Address(from, 32)); 1.827 + __ movq(Address(from, to_from, Address::times_1, 32), xmm4); 1.828 + __ movq(xmm5, Address(from, 40)); 1.829 + __ movq(Address(from, to_from, Address::times_1, 40), xmm5); 1.830 + __ movq(xmm6, Address(from, 48)); 1.831 + __ movq(Address(from, to_from, Address::times_1, 48), xmm6); 1.832 + __ movq(xmm7, Address(from, 56)); 1.833 + __ movq(Address(from, to_from, Address::times_1, 56), xmm7); 1.834 + } 1.835 + 1.836 + __ addl(from, 64); 1.837 + __ BIND(L_copy_64_bytes); 1.838 + __ subl(qword_count, 8); 1.839 + __ jcc(Assembler::greaterEqual, L_copy_64_bytes_loop); 1.840 + 1.841 + if (UseUnalignedLoadStores && (UseAVX >= 2)) { 1.842 + // clean upper bits of YMM registers 1.843 + __ vzeroupper(); 1.844 + } 1.845 + __ addl(qword_count, 8); 1.846 + __ jccb(Assembler::zero, L_exit); 1.847 + // 1.848 + // length is too short, just copy qwords 1.849 + // 1.850 + __ BIND(L_copy_8_bytes); 1.851 + __ movq(xmm0, Address(from, 0)); 1.852 + __ movq(Address(from, to_from, Address::times_1), xmm0); 1.853 + __ addl(from, 8); 1.854 + __ decrement(qword_count); 1.855 + __ jcc(Assembler::greater, L_copy_8_bytes); 1.856 + __ BIND(L_exit); 1.857 + } 1.858 + 1.859 + // Copy 64 bytes chunks 1.860 + // 1.861 + // Inputs: 1.862 + // from - source array address 1.863 + // to_from - destination array address - from 1.864 + // qword_count - 8-bytes element count, negative 1.865 + // 1.866 + void mmx_copy_forward(Register from, Register to_from, Register qword_count) { 1.867 + assert( VM_Version::supports_mmx(), "supported cpu only" ); 1.868 + Label L_copy_64_bytes_loop, L_copy_64_bytes, L_copy_8_bytes, L_exit; 1.869 + // Copy 64-byte chunks 1.870 + __ jmpb(L_copy_64_bytes); 1.871 + __ align(OptoLoopAlignment); 1.872 + __ BIND(L_copy_64_bytes_loop); 1.873 + __ movq(mmx0, Address(from, 0)); 1.874 + __ movq(mmx1, Address(from, 8)); 1.875 + __ movq(mmx2, Address(from, 16)); 1.876 + __ movq(Address(from, to_from, Address::times_1, 0), mmx0); 1.877 + __ movq(mmx3, Address(from, 24)); 1.878 + __ movq(Address(from, to_from, Address::times_1, 8), mmx1); 1.879 + __ movq(mmx4, Address(from, 32)); 1.880 + __ movq(Address(from, to_from, Address::times_1, 16), mmx2); 1.881 + __ movq(mmx5, Address(from, 40)); 1.882 + __ movq(Address(from, to_from, Address::times_1, 24), mmx3); 1.883 + __ movq(mmx6, Address(from, 48)); 1.884 + __ movq(Address(from, to_from, Address::times_1, 32), mmx4); 1.885 + __ movq(mmx7, Address(from, 56)); 1.886 + __ movq(Address(from, to_from, Address::times_1, 40), mmx5); 1.887 + __ movq(Address(from, to_from, Address::times_1, 48), mmx6); 1.888 + __ movq(Address(from, to_from, Address::times_1, 56), mmx7); 1.889 + __ addptr(from, 64); 1.890 + __ BIND(L_copy_64_bytes); 1.891 + __ subl(qword_count, 8); 1.892 + __ jcc(Assembler::greaterEqual, L_copy_64_bytes_loop); 1.893 + __ addl(qword_count, 8); 1.894 + __ jccb(Assembler::zero, L_exit); 1.895 + // 1.896 + // length is too short, just copy qwords 1.897 + // 1.898 + __ BIND(L_copy_8_bytes); 1.899 + __ movq(mmx0, Address(from, 0)); 1.900 + __ movq(Address(from, to_from, Address::times_1), mmx0); 1.901 + __ addptr(from, 8); 1.902 + __ decrement(qword_count); 1.903 + __ jcc(Assembler::greater, L_copy_8_bytes); 1.904 + __ BIND(L_exit); 1.905 + __ emms(); 1.906 + } 1.907 + 1.908 + address generate_disjoint_copy(BasicType t, bool aligned, 1.909 + Address::ScaleFactor sf, 1.910 + address* entry, const char *name, 1.911 + bool dest_uninitialized = false) { 1.912 + __ align(CodeEntryAlignment); 1.913 + StubCodeMark mark(this, "StubRoutines", name); 1.914 + address start = __ pc(); 1.915 + 1.916 + Label L_0_count, L_exit, L_skip_align1, L_skip_align2, L_copy_byte; 1.917 + Label L_copy_2_bytes, L_copy_4_bytes, L_copy_64_bytes; 1.918 + 1.919 + int shift = Address::times_ptr - sf; 1.920 + 1.921 + const Register from = rsi; // source array address 1.922 + const Register to = rdi; // destination array address 1.923 + const Register count = rcx; // elements count 1.924 + const Register to_from = to; // (to - from) 1.925 + const Register saved_to = rdx; // saved destination array address 1.926 + 1.927 + __ enter(); // required for proper stackwalking of RuntimeStub frame 1.928 + __ push(rsi); 1.929 + __ push(rdi); 1.930 + __ movptr(from , Address(rsp, 12+ 4)); 1.931 + __ movptr(to , Address(rsp, 12+ 8)); 1.932 + __ movl(count, Address(rsp, 12+ 12)); 1.933 + 1.934 + if (entry != NULL) { 1.935 + *entry = __ pc(); // Entry point from conjoint arraycopy stub. 1.936 + BLOCK_COMMENT("Entry:"); 1.937 + } 1.938 + 1.939 + if (t == T_OBJECT) { 1.940 + __ testl(count, count); 1.941 + __ jcc(Assembler::zero, L_0_count); 1.942 + gen_write_ref_array_pre_barrier(to, count, dest_uninitialized); 1.943 + __ mov(saved_to, to); // save 'to' 1.944 + } 1.945 + 1.946 + __ subptr(to, from); // to --> to_from 1.947 + __ cmpl(count, 2<<shift); // Short arrays (< 8 bytes) copy by element 1.948 + __ jcc(Assembler::below, L_copy_4_bytes); // use unsigned cmp 1.949 + if (!UseUnalignedLoadStores && !aligned && (t == T_BYTE || t == T_SHORT)) { 1.950 + // align source address at 4 bytes address boundary 1.951 + if (t == T_BYTE) { 1.952 + // One byte misalignment happens only for byte arrays 1.953 + __ testl(from, 1); 1.954 + __ jccb(Assembler::zero, L_skip_align1); 1.955 + __ movb(rax, Address(from, 0)); 1.956 + __ movb(Address(from, to_from, Address::times_1, 0), rax); 1.957 + __ increment(from); 1.958 + __ decrement(count); 1.959 + __ BIND(L_skip_align1); 1.960 + } 1.961 + // Two bytes misalignment happens only for byte and short (char) arrays 1.962 + __ testl(from, 2); 1.963 + __ jccb(Assembler::zero, L_skip_align2); 1.964 + __ movw(rax, Address(from, 0)); 1.965 + __ movw(Address(from, to_from, Address::times_1, 0), rax); 1.966 + __ addptr(from, 2); 1.967 + __ subl(count, 1<<(shift-1)); 1.968 + __ BIND(L_skip_align2); 1.969 + } 1.970 + if (!VM_Version::supports_mmx()) { 1.971 + __ mov(rax, count); // save 'count' 1.972 + __ shrl(count, shift); // bytes count 1.973 + __ addptr(to_from, from);// restore 'to' 1.974 + __ rep_mov(); 1.975 + __ subptr(to_from, from);// restore 'to_from' 1.976 + __ mov(count, rax); // restore 'count' 1.977 + __ jmpb(L_copy_2_bytes); // all dwords were copied 1.978 + } else { 1.979 + if (!UseUnalignedLoadStores) { 1.980 + // align to 8 bytes, we know we are 4 byte aligned to start 1.981 + __ testptr(from, 4); 1.982 + __ jccb(Assembler::zero, L_copy_64_bytes); 1.983 + __ movl(rax, Address(from, 0)); 1.984 + __ movl(Address(from, to_from, Address::times_1, 0), rax); 1.985 + __ addptr(from, 4); 1.986 + __ subl(count, 1<<shift); 1.987 + } 1.988 + __ BIND(L_copy_64_bytes); 1.989 + __ mov(rax, count); 1.990 + __ shrl(rax, shift+1); // 8 bytes chunk count 1.991 + // 1.992 + // Copy 8-byte chunks through MMX registers, 8 per iteration of the loop 1.993 + // 1.994 + if (UseXMMForArrayCopy) { 1.995 + xmm_copy_forward(from, to_from, rax); 1.996 + } else { 1.997 + mmx_copy_forward(from, to_from, rax); 1.998 + } 1.999 + } 1.1000 + // copy tailing dword 1.1001 + __ BIND(L_copy_4_bytes); 1.1002 + __ testl(count, 1<<shift); 1.1003 + __ jccb(Assembler::zero, L_copy_2_bytes); 1.1004 + __ movl(rax, Address(from, 0)); 1.1005 + __ movl(Address(from, to_from, Address::times_1, 0), rax); 1.1006 + if (t == T_BYTE || t == T_SHORT) { 1.1007 + __ addptr(from, 4); 1.1008 + __ BIND(L_copy_2_bytes); 1.1009 + // copy tailing word 1.1010 + __ testl(count, 1<<(shift-1)); 1.1011 + __ jccb(Assembler::zero, L_copy_byte); 1.1012 + __ movw(rax, Address(from, 0)); 1.1013 + __ movw(Address(from, to_from, Address::times_1, 0), rax); 1.1014 + if (t == T_BYTE) { 1.1015 + __ addptr(from, 2); 1.1016 + __ BIND(L_copy_byte); 1.1017 + // copy tailing byte 1.1018 + __ testl(count, 1); 1.1019 + __ jccb(Assembler::zero, L_exit); 1.1020 + __ movb(rax, Address(from, 0)); 1.1021 + __ movb(Address(from, to_from, Address::times_1, 0), rax); 1.1022 + __ BIND(L_exit); 1.1023 + } else { 1.1024 + __ BIND(L_copy_byte); 1.1025 + } 1.1026 + } else { 1.1027 + __ BIND(L_copy_2_bytes); 1.1028 + } 1.1029 + 1.1030 + if (t == T_OBJECT) { 1.1031 + __ movl(count, Address(rsp, 12+12)); // reread 'count' 1.1032 + __ mov(to, saved_to); // restore 'to' 1.1033 + gen_write_ref_array_post_barrier(to, count); 1.1034 + __ BIND(L_0_count); 1.1035 + } 1.1036 + inc_copy_counter_np(t); 1.1037 + __ pop(rdi); 1.1038 + __ pop(rsi); 1.1039 + __ leave(); // required for proper stackwalking of RuntimeStub frame 1.1040 + __ xorptr(rax, rax); // return 0 1.1041 + __ ret(0); 1.1042 + return start; 1.1043 + } 1.1044 + 1.1045 + 1.1046 + address generate_fill(BasicType t, bool aligned, const char *name) { 1.1047 + __ align(CodeEntryAlignment); 1.1048 + StubCodeMark mark(this, "StubRoutines", name); 1.1049 + address start = __ pc(); 1.1050 + 1.1051 + BLOCK_COMMENT("Entry:"); 1.1052 + 1.1053 + const Register to = rdi; // source array address 1.1054 + const Register value = rdx; // value 1.1055 + const Register count = rsi; // elements count 1.1056 + 1.1057 + __ enter(); // required for proper stackwalking of RuntimeStub frame 1.1058 + __ push(rsi); 1.1059 + __ push(rdi); 1.1060 + __ movptr(to , Address(rsp, 12+ 4)); 1.1061 + __ movl(value, Address(rsp, 12+ 8)); 1.1062 + __ movl(count, Address(rsp, 12+ 12)); 1.1063 + 1.1064 + __ generate_fill(t, aligned, to, value, count, rax, xmm0); 1.1065 + 1.1066 + __ pop(rdi); 1.1067 + __ pop(rsi); 1.1068 + __ leave(); // required for proper stackwalking of RuntimeStub frame 1.1069 + __ ret(0); 1.1070 + return start; 1.1071 + } 1.1072 + 1.1073 + address generate_conjoint_copy(BasicType t, bool aligned, 1.1074 + Address::ScaleFactor sf, 1.1075 + address nooverlap_target, 1.1076 + address* entry, const char *name, 1.1077 + bool dest_uninitialized = false) { 1.1078 + __ align(CodeEntryAlignment); 1.1079 + StubCodeMark mark(this, "StubRoutines", name); 1.1080 + address start = __ pc(); 1.1081 + 1.1082 + Label L_0_count, L_exit, L_skip_align1, L_skip_align2, L_copy_byte; 1.1083 + Label L_copy_2_bytes, L_copy_4_bytes, L_copy_8_bytes, L_copy_8_bytes_loop; 1.1084 + 1.1085 + int shift = Address::times_ptr - sf; 1.1086 + 1.1087 + const Register src = rax; // source array address 1.1088 + const Register dst = rdx; // destination array address 1.1089 + const Register from = rsi; // source array address 1.1090 + const Register to = rdi; // destination array address 1.1091 + const Register count = rcx; // elements count 1.1092 + const Register end = rax; // array end address 1.1093 + 1.1094 + __ enter(); // required for proper stackwalking of RuntimeStub frame 1.1095 + __ push(rsi); 1.1096 + __ push(rdi); 1.1097 + __ movptr(src , Address(rsp, 12+ 4)); // from 1.1098 + __ movptr(dst , Address(rsp, 12+ 8)); // to 1.1099 + __ movl2ptr(count, Address(rsp, 12+12)); // count 1.1100 + 1.1101 + if (entry != NULL) { 1.1102 + *entry = __ pc(); // Entry point from generic arraycopy stub. 1.1103 + BLOCK_COMMENT("Entry:"); 1.1104 + } 1.1105 + 1.1106 + // nooverlap_target expects arguments in rsi and rdi. 1.1107 + __ mov(from, src); 1.1108 + __ mov(to , dst); 1.1109 + 1.1110 + // arrays overlap test: dispatch to disjoint stub if necessary. 1.1111 + RuntimeAddress nooverlap(nooverlap_target); 1.1112 + __ cmpptr(dst, src); 1.1113 + __ lea(end, Address(src, count, sf, 0)); // src + count * elem_size 1.1114 + __ jump_cc(Assembler::belowEqual, nooverlap); 1.1115 + __ cmpptr(dst, end); 1.1116 + __ jump_cc(Assembler::aboveEqual, nooverlap); 1.1117 + 1.1118 + if (t == T_OBJECT) { 1.1119 + __ testl(count, count); 1.1120 + __ jcc(Assembler::zero, L_0_count); 1.1121 + gen_write_ref_array_pre_barrier(dst, count, dest_uninitialized); 1.1122 + } 1.1123 + 1.1124 + // copy from high to low 1.1125 + __ cmpl(count, 2<<shift); // Short arrays (< 8 bytes) copy by element 1.1126 + __ jcc(Assembler::below, L_copy_4_bytes); // use unsigned cmp 1.1127 + if (t == T_BYTE || t == T_SHORT) { 1.1128 + // Align the end of destination array at 4 bytes address boundary 1.1129 + __ lea(end, Address(dst, count, sf, 0)); 1.1130 + if (t == T_BYTE) { 1.1131 + // One byte misalignment happens only for byte arrays 1.1132 + __ testl(end, 1); 1.1133 + __ jccb(Assembler::zero, L_skip_align1); 1.1134 + __ decrement(count); 1.1135 + __ movb(rdx, Address(from, count, sf, 0)); 1.1136 + __ movb(Address(to, count, sf, 0), rdx); 1.1137 + __ BIND(L_skip_align1); 1.1138 + } 1.1139 + // Two bytes misalignment happens only for byte and short (char) arrays 1.1140 + __ testl(end, 2); 1.1141 + __ jccb(Assembler::zero, L_skip_align2); 1.1142 + __ subptr(count, 1<<(shift-1)); 1.1143 + __ movw(rdx, Address(from, count, sf, 0)); 1.1144 + __ movw(Address(to, count, sf, 0), rdx); 1.1145 + __ BIND(L_skip_align2); 1.1146 + __ cmpl(count, 2<<shift); // Short arrays (< 8 bytes) copy by element 1.1147 + __ jcc(Assembler::below, L_copy_4_bytes); 1.1148 + } 1.1149 + 1.1150 + if (!VM_Version::supports_mmx()) { 1.1151 + __ std(); 1.1152 + __ mov(rax, count); // Save 'count' 1.1153 + __ mov(rdx, to); // Save 'to' 1.1154 + __ lea(rsi, Address(from, count, sf, -4)); 1.1155 + __ lea(rdi, Address(to , count, sf, -4)); 1.1156 + __ shrptr(count, shift); // bytes count 1.1157 + __ rep_mov(); 1.1158 + __ cld(); 1.1159 + __ mov(count, rax); // restore 'count' 1.1160 + __ andl(count, (1<<shift)-1); // mask the number of rest elements 1.1161 + __ movptr(from, Address(rsp, 12+4)); // reread 'from' 1.1162 + __ mov(to, rdx); // restore 'to' 1.1163 + __ jmpb(L_copy_2_bytes); // all dword were copied 1.1164 + } else { 1.1165 + // Align to 8 bytes the end of array. It is aligned to 4 bytes already. 1.1166 + __ testptr(end, 4); 1.1167 + __ jccb(Assembler::zero, L_copy_8_bytes); 1.1168 + __ subl(count, 1<<shift); 1.1169 + __ movl(rdx, Address(from, count, sf, 0)); 1.1170 + __ movl(Address(to, count, sf, 0), rdx); 1.1171 + __ jmpb(L_copy_8_bytes); 1.1172 + 1.1173 + __ align(OptoLoopAlignment); 1.1174 + // Move 8 bytes 1.1175 + __ BIND(L_copy_8_bytes_loop); 1.1176 + if (UseXMMForArrayCopy) { 1.1177 + __ movq(xmm0, Address(from, count, sf, 0)); 1.1178 + __ movq(Address(to, count, sf, 0), xmm0); 1.1179 + } else { 1.1180 + __ movq(mmx0, Address(from, count, sf, 0)); 1.1181 + __ movq(Address(to, count, sf, 0), mmx0); 1.1182 + } 1.1183 + __ BIND(L_copy_8_bytes); 1.1184 + __ subl(count, 2<<shift); 1.1185 + __ jcc(Assembler::greaterEqual, L_copy_8_bytes_loop); 1.1186 + __ addl(count, 2<<shift); 1.1187 + if (!UseXMMForArrayCopy) { 1.1188 + __ emms(); 1.1189 + } 1.1190 + } 1.1191 + __ BIND(L_copy_4_bytes); 1.1192 + // copy prefix qword 1.1193 + __ testl(count, 1<<shift); 1.1194 + __ jccb(Assembler::zero, L_copy_2_bytes); 1.1195 + __ movl(rdx, Address(from, count, sf, -4)); 1.1196 + __ movl(Address(to, count, sf, -4), rdx); 1.1197 + 1.1198 + if (t == T_BYTE || t == T_SHORT) { 1.1199 + __ subl(count, (1<<shift)); 1.1200 + __ BIND(L_copy_2_bytes); 1.1201 + // copy prefix dword 1.1202 + __ testl(count, 1<<(shift-1)); 1.1203 + __ jccb(Assembler::zero, L_copy_byte); 1.1204 + __ movw(rdx, Address(from, count, sf, -2)); 1.1205 + __ movw(Address(to, count, sf, -2), rdx); 1.1206 + if (t == T_BYTE) { 1.1207 + __ subl(count, 1<<(shift-1)); 1.1208 + __ BIND(L_copy_byte); 1.1209 + // copy prefix byte 1.1210 + __ testl(count, 1); 1.1211 + __ jccb(Assembler::zero, L_exit); 1.1212 + __ movb(rdx, Address(from, 0)); 1.1213 + __ movb(Address(to, 0), rdx); 1.1214 + __ BIND(L_exit); 1.1215 + } else { 1.1216 + __ BIND(L_copy_byte); 1.1217 + } 1.1218 + } else { 1.1219 + __ BIND(L_copy_2_bytes); 1.1220 + } 1.1221 + if (t == T_OBJECT) { 1.1222 + __ movl2ptr(count, Address(rsp, 12+12)); // reread count 1.1223 + gen_write_ref_array_post_barrier(to, count); 1.1224 + __ BIND(L_0_count); 1.1225 + } 1.1226 + inc_copy_counter_np(t); 1.1227 + __ pop(rdi); 1.1228 + __ pop(rsi); 1.1229 + __ leave(); // required for proper stackwalking of RuntimeStub frame 1.1230 + __ xorptr(rax, rax); // return 0 1.1231 + __ ret(0); 1.1232 + return start; 1.1233 + } 1.1234 + 1.1235 + 1.1236 + address generate_disjoint_long_copy(address* entry, const char *name) { 1.1237 + __ align(CodeEntryAlignment); 1.1238 + StubCodeMark mark(this, "StubRoutines", name); 1.1239 + address start = __ pc(); 1.1240 + 1.1241 + Label L_copy_8_bytes, L_copy_8_bytes_loop; 1.1242 + const Register from = rax; // source array address 1.1243 + const Register to = rdx; // destination array address 1.1244 + const Register count = rcx; // elements count 1.1245 + const Register to_from = rdx; // (to - from) 1.1246 + 1.1247 + __ enter(); // required for proper stackwalking of RuntimeStub frame 1.1248 + __ movptr(from , Address(rsp, 8+0)); // from 1.1249 + __ movptr(to , Address(rsp, 8+4)); // to 1.1250 + __ movl2ptr(count, Address(rsp, 8+8)); // count 1.1251 + 1.1252 + *entry = __ pc(); // Entry point from conjoint arraycopy stub. 1.1253 + BLOCK_COMMENT("Entry:"); 1.1254 + 1.1255 + __ subptr(to, from); // to --> to_from 1.1256 + if (VM_Version::supports_mmx()) { 1.1257 + if (UseXMMForArrayCopy) { 1.1258 + xmm_copy_forward(from, to_from, count); 1.1259 + } else { 1.1260 + mmx_copy_forward(from, to_from, count); 1.1261 + } 1.1262 + } else { 1.1263 + __ jmpb(L_copy_8_bytes); 1.1264 + __ align(OptoLoopAlignment); 1.1265 + __ BIND(L_copy_8_bytes_loop); 1.1266 + __ fild_d(Address(from, 0)); 1.1267 + __ fistp_d(Address(from, to_from, Address::times_1)); 1.1268 + __ addptr(from, 8); 1.1269 + __ BIND(L_copy_8_bytes); 1.1270 + __ decrement(count); 1.1271 + __ jcc(Assembler::greaterEqual, L_copy_8_bytes_loop); 1.1272 + } 1.1273 + inc_copy_counter_np(T_LONG); 1.1274 + __ leave(); // required for proper stackwalking of RuntimeStub frame 1.1275 + __ xorptr(rax, rax); // return 0 1.1276 + __ ret(0); 1.1277 + return start; 1.1278 + } 1.1279 + 1.1280 + address generate_conjoint_long_copy(address nooverlap_target, 1.1281 + address* entry, const char *name) { 1.1282 + __ align(CodeEntryAlignment); 1.1283 + StubCodeMark mark(this, "StubRoutines", name); 1.1284 + address start = __ pc(); 1.1285 + 1.1286 + Label L_copy_8_bytes, L_copy_8_bytes_loop; 1.1287 + const Register from = rax; // source array address 1.1288 + const Register to = rdx; // destination array address 1.1289 + const Register count = rcx; // elements count 1.1290 + const Register end_from = rax; // source array end address 1.1291 + 1.1292 + __ enter(); // required for proper stackwalking of RuntimeStub frame 1.1293 + __ movptr(from , Address(rsp, 8+0)); // from 1.1294 + __ movptr(to , Address(rsp, 8+4)); // to 1.1295 + __ movl2ptr(count, Address(rsp, 8+8)); // count 1.1296 + 1.1297 + *entry = __ pc(); // Entry point from generic arraycopy stub. 1.1298 + BLOCK_COMMENT("Entry:"); 1.1299 + 1.1300 + // arrays overlap test 1.1301 + __ cmpptr(to, from); 1.1302 + RuntimeAddress nooverlap(nooverlap_target); 1.1303 + __ jump_cc(Assembler::belowEqual, nooverlap); 1.1304 + __ lea(end_from, Address(from, count, Address::times_8, 0)); 1.1305 + __ cmpptr(to, end_from); 1.1306 + __ movptr(from, Address(rsp, 8)); // from 1.1307 + __ jump_cc(Assembler::aboveEqual, nooverlap); 1.1308 + 1.1309 + __ jmpb(L_copy_8_bytes); 1.1310 + 1.1311 + __ align(OptoLoopAlignment); 1.1312 + __ BIND(L_copy_8_bytes_loop); 1.1313 + if (VM_Version::supports_mmx()) { 1.1314 + if (UseXMMForArrayCopy) { 1.1315 + __ movq(xmm0, Address(from, count, Address::times_8)); 1.1316 + __ movq(Address(to, count, Address::times_8), xmm0); 1.1317 + } else { 1.1318 + __ movq(mmx0, Address(from, count, Address::times_8)); 1.1319 + __ movq(Address(to, count, Address::times_8), mmx0); 1.1320 + } 1.1321 + } else { 1.1322 + __ fild_d(Address(from, count, Address::times_8)); 1.1323 + __ fistp_d(Address(to, count, Address::times_8)); 1.1324 + } 1.1325 + __ BIND(L_copy_8_bytes); 1.1326 + __ decrement(count); 1.1327 + __ jcc(Assembler::greaterEqual, L_copy_8_bytes_loop); 1.1328 + 1.1329 + if (VM_Version::supports_mmx() && !UseXMMForArrayCopy) { 1.1330 + __ emms(); 1.1331 + } 1.1332 + inc_copy_counter_np(T_LONG); 1.1333 + __ leave(); // required for proper stackwalking of RuntimeStub frame 1.1334 + __ xorptr(rax, rax); // return 0 1.1335 + __ ret(0); 1.1336 + return start; 1.1337 + } 1.1338 + 1.1339 + 1.1340 + // Helper for generating a dynamic type check. 1.1341 + // The sub_klass must be one of {rbx, rdx, rsi}. 1.1342 + // The temp is killed. 1.1343 + void generate_type_check(Register sub_klass, 1.1344 + Address& super_check_offset_addr, 1.1345 + Address& super_klass_addr, 1.1346 + Register temp, 1.1347 + Label* L_success, Label* L_failure) { 1.1348 + BLOCK_COMMENT("type_check:"); 1.1349 + 1.1350 + Label L_fallthrough; 1.1351 +#define LOCAL_JCC(assembler_con, label_ptr) \ 1.1352 + if (label_ptr != NULL) __ jcc(assembler_con, *(label_ptr)); \ 1.1353 + else __ jcc(assembler_con, L_fallthrough) /*omit semi*/ 1.1354 + 1.1355 + // The following is a strange variation of the fast path which requires 1.1356 + // one less register, because needed values are on the argument stack. 1.1357 + // __ check_klass_subtype_fast_path(sub_klass, *super_klass*, temp, 1.1358 + // L_success, L_failure, NULL); 1.1359 + assert_different_registers(sub_klass, temp); 1.1360 + 1.1361 + int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); 1.1362 + 1.1363 + // if the pointers are equal, we are done (e.g., String[] elements) 1.1364 + __ cmpptr(sub_klass, super_klass_addr); 1.1365 + LOCAL_JCC(Assembler::equal, L_success); 1.1366 + 1.1367 + // check the supertype display: 1.1368 + __ movl2ptr(temp, super_check_offset_addr); 1.1369 + Address super_check_addr(sub_klass, temp, Address::times_1, 0); 1.1370 + __ movptr(temp, super_check_addr); // load displayed supertype 1.1371 + __ cmpptr(temp, super_klass_addr); // test the super type 1.1372 + LOCAL_JCC(Assembler::equal, L_success); 1.1373 + 1.1374 + // if it was a primary super, we can just fail immediately 1.1375 + __ cmpl(super_check_offset_addr, sc_offset); 1.1376 + LOCAL_JCC(Assembler::notEqual, L_failure); 1.1377 + 1.1378 + // The repne_scan instruction uses fixed registers, which will get spilled. 1.1379 + // We happen to know this works best when super_klass is in rax. 1.1380 + Register super_klass = temp; 1.1381 + __ movptr(super_klass, super_klass_addr); 1.1382 + __ check_klass_subtype_slow_path(sub_klass, super_klass, noreg, noreg, 1.1383 + L_success, L_failure); 1.1384 + 1.1385 + __ bind(L_fallthrough); 1.1386 + 1.1387 + if (L_success == NULL) { BLOCK_COMMENT("L_success:"); } 1.1388 + if (L_failure == NULL) { BLOCK_COMMENT("L_failure:"); } 1.1389 + 1.1390 +#undef LOCAL_JCC 1.1391 + } 1.1392 + 1.1393 + // 1.1394 + // Generate checkcasting array copy stub 1.1395 + // 1.1396 + // Input: 1.1397 + // 4(rsp) - source array address 1.1398 + // 8(rsp) - destination array address 1.1399 + // 12(rsp) - element count, can be zero 1.1400 + // 16(rsp) - size_t ckoff (super_check_offset) 1.1401 + // 20(rsp) - oop ckval (super_klass) 1.1402 + // 1.1403 + // Output: 1.1404 + // rax, == 0 - success 1.1405 + // rax, == -1^K - failure, where K is partial transfer count 1.1406 + // 1.1407 + address generate_checkcast_copy(const char *name, address* entry, bool dest_uninitialized = false) { 1.1408 + __ align(CodeEntryAlignment); 1.1409 + StubCodeMark mark(this, "StubRoutines", name); 1.1410 + address start = __ pc(); 1.1411 + 1.1412 + Label L_load_element, L_store_element, L_do_card_marks, L_done; 1.1413 + 1.1414 + // register use: 1.1415 + // rax, rdx, rcx -- loop control (end_from, end_to, count) 1.1416 + // rdi, rsi -- element access (oop, klass) 1.1417 + // rbx, -- temp 1.1418 + const Register from = rax; // source array address 1.1419 + const Register to = rdx; // destination array address 1.1420 + const Register length = rcx; // elements count 1.1421 + const Register elem = rdi; // each oop copied 1.1422 + const Register elem_klass = rsi; // each elem._klass (sub_klass) 1.1423 + const Register temp = rbx; // lone remaining temp 1.1424 + 1.1425 + __ enter(); // required for proper stackwalking of RuntimeStub frame 1.1426 + 1.1427 + __ push(rsi); 1.1428 + __ push(rdi); 1.1429 + __ push(rbx); 1.1430 + 1.1431 + Address from_arg(rsp, 16+ 4); // from 1.1432 + Address to_arg(rsp, 16+ 8); // to 1.1433 + Address length_arg(rsp, 16+12); // elements count 1.1434 + Address ckoff_arg(rsp, 16+16); // super_check_offset 1.1435 + Address ckval_arg(rsp, 16+20); // super_klass 1.1436 + 1.1437 + // Load up: 1.1438 + __ movptr(from, from_arg); 1.1439 + __ movptr(to, to_arg); 1.1440 + __ movl2ptr(length, length_arg); 1.1441 + 1.1442 + if (entry != NULL) { 1.1443 + *entry = __ pc(); // Entry point from generic arraycopy stub. 1.1444 + BLOCK_COMMENT("Entry:"); 1.1445 + } 1.1446 + 1.1447 + //--------------------------------------------------------------- 1.1448 + // Assembler stub will be used for this call to arraycopy 1.1449 + // if the two arrays are subtypes of Object[] but the 1.1450 + // destination array type is not equal to or a supertype 1.1451 + // of the source type. Each element must be separately 1.1452 + // checked. 1.1453 + 1.1454 + // Loop-invariant addresses. They are exclusive end pointers. 1.1455 + Address end_from_addr(from, length, Address::times_ptr, 0); 1.1456 + Address end_to_addr(to, length, Address::times_ptr, 0); 1.1457 + 1.1458 + Register end_from = from; // re-use 1.1459 + Register end_to = to; // re-use 1.1460 + Register count = length; // re-use 1.1461 + 1.1462 + // Loop-variant addresses. They assume post-incremented count < 0. 1.1463 + Address from_element_addr(end_from, count, Address::times_ptr, 0); 1.1464 + Address to_element_addr(end_to, count, Address::times_ptr, 0); 1.1465 + Address elem_klass_addr(elem, oopDesc::klass_offset_in_bytes()); 1.1466 + 1.1467 + // Copy from low to high addresses, indexed from the end of each array. 1.1468 + gen_write_ref_array_pre_barrier(to, count, dest_uninitialized); 1.1469 + __ lea(end_from, end_from_addr); 1.1470 + __ lea(end_to, end_to_addr); 1.1471 + assert(length == count, ""); // else fix next line: 1.1472 + __ negptr(count); // negate and test the length 1.1473 + __ jccb(Assembler::notZero, L_load_element); 1.1474 + 1.1475 + // Empty array: Nothing to do. 1.1476 + __ xorptr(rax, rax); // return 0 on (trivial) success 1.1477 + __ jmp(L_done); 1.1478 + 1.1479 + // ======== begin loop ======== 1.1480 + // (Loop is rotated; its entry is L_load_element.) 1.1481 + // Loop control: 1.1482 + // for (count = -count; count != 0; count++) 1.1483 + // Base pointers src, dst are biased by 8*count,to last element. 1.1484 + __ align(OptoLoopAlignment); 1.1485 + 1.1486 + __ BIND(L_store_element); 1.1487 + __ movptr(to_element_addr, elem); // store the oop 1.1488 + __ increment(count); // increment the count toward zero 1.1489 + __ jccb(Assembler::zero, L_do_card_marks); 1.1490 + 1.1491 + // ======== loop entry is here ======== 1.1492 + __ BIND(L_load_element); 1.1493 + __ movptr(elem, from_element_addr); // load the oop 1.1494 + __ testptr(elem, elem); 1.1495 + __ jccb(Assembler::zero, L_store_element); 1.1496 + 1.1497 + // (Could do a trick here: Remember last successful non-null 1.1498 + // element stored and make a quick oop equality check on it.) 1.1499 + 1.1500 + __ movptr(elem_klass, elem_klass_addr); // query the object klass 1.1501 + generate_type_check(elem_klass, ckoff_arg, ckval_arg, temp, 1.1502 + &L_store_element, NULL); 1.1503 + // (On fall-through, we have failed the element type check.) 1.1504 + // ======== end loop ======== 1.1505 + 1.1506 + // It was a real error; we must depend on the caller to finish the job. 1.1507 + // Register "count" = -1 * number of *remaining* oops, length_arg = *total* oops. 1.1508 + // Emit GC store barriers for the oops we have copied (length_arg + count), 1.1509 + // and report their number to the caller. 1.1510 + assert_different_registers(to, count, rax); 1.1511 + Label L_post_barrier; 1.1512 + __ addl(count, length_arg); // transfers = (length - remaining) 1.1513 + __ movl2ptr(rax, count); // save the value 1.1514 + __ notptr(rax); // report (-1^K) to caller (does not affect flags) 1.1515 + __ jccb(Assembler::notZero, L_post_barrier); 1.1516 + __ jmp(L_done); // K == 0, nothing was copied, skip post barrier 1.1517 + 1.1518 + // Come here on success only. 1.1519 + __ BIND(L_do_card_marks); 1.1520 + __ xorptr(rax, rax); // return 0 on success 1.1521 + __ movl2ptr(count, length_arg); 1.1522 + 1.1523 + __ BIND(L_post_barrier); 1.1524 + __ movptr(to, to_arg); // reload 1.1525 + gen_write_ref_array_post_barrier(to, count); 1.1526 + 1.1527 + // Common exit point (success or failure). 1.1528 + __ BIND(L_done); 1.1529 + __ pop(rbx); 1.1530 + __ pop(rdi); 1.1531 + __ pop(rsi); 1.1532 + inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr); 1.1533 + __ leave(); // required for proper stackwalking of RuntimeStub frame 1.1534 + __ ret(0); 1.1535 + 1.1536 + return start; 1.1537 + } 1.1538 + 1.1539 + // 1.1540 + // Generate 'unsafe' array copy stub 1.1541 + // Though just as safe as the other stubs, it takes an unscaled 1.1542 + // size_t argument instead of an element count. 1.1543 + // 1.1544 + // Input: 1.1545 + // 4(rsp) - source array address 1.1546 + // 8(rsp) - destination array address 1.1547 + // 12(rsp) - byte count, can be zero 1.1548 + // 1.1549 + // Output: 1.1550 + // rax, == 0 - success 1.1551 + // rax, == -1 - need to call System.arraycopy 1.1552 + // 1.1553 + // Examines the alignment of the operands and dispatches 1.1554 + // to a long, int, short, or byte copy loop. 1.1555 + // 1.1556 + address generate_unsafe_copy(const char *name, 1.1557 + address byte_copy_entry, 1.1558 + address short_copy_entry, 1.1559 + address int_copy_entry, 1.1560 + address long_copy_entry) { 1.1561 + 1.1562 + Label L_long_aligned, L_int_aligned, L_short_aligned; 1.1563 + 1.1564 + __ align(CodeEntryAlignment); 1.1565 + StubCodeMark mark(this, "StubRoutines", name); 1.1566 + address start = __ pc(); 1.1567 + 1.1568 + const Register from = rax; // source array address 1.1569 + const Register to = rdx; // destination array address 1.1570 + const Register count = rcx; // elements count 1.1571 + 1.1572 + __ enter(); // required for proper stackwalking of RuntimeStub frame 1.1573 + __ push(rsi); 1.1574 + __ push(rdi); 1.1575 + Address from_arg(rsp, 12+ 4); // from 1.1576 + Address to_arg(rsp, 12+ 8); // to 1.1577 + Address count_arg(rsp, 12+12); // byte count 1.1578 + 1.1579 + // Load up: 1.1580 + __ movptr(from , from_arg); 1.1581 + __ movptr(to , to_arg); 1.1582 + __ movl2ptr(count, count_arg); 1.1583 + 1.1584 + // bump this on entry, not on exit: 1.1585 + inc_counter_np(SharedRuntime::_unsafe_array_copy_ctr); 1.1586 + 1.1587 + const Register bits = rsi; 1.1588 + __ mov(bits, from); 1.1589 + __ orptr(bits, to); 1.1590 + __ orptr(bits, count); 1.1591 + 1.1592 + __ testl(bits, BytesPerLong-1); 1.1593 + __ jccb(Assembler::zero, L_long_aligned); 1.1594 + 1.1595 + __ testl(bits, BytesPerInt-1); 1.1596 + __ jccb(Assembler::zero, L_int_aligned); 1.1597 + 1.1598 + __ testl(bits, BytesPerShort-1); 1.1599 + __ jump_cc(Assembler::notZero, RuntimeAddress(byte_copy_entry)); 1.1600 + 1.1601 + __ BIND(L_short_aligned); 1.1602 + __ shrptr(count, LogBytesPerShort); // size => short_count 1.1603 + __ movl(count_arg, count); // update 'count' 1.1604 + __ jump(RuntimeAddress(short_copy_entry)); 1.1605 + 1.1606 + __ BIND(L_int_aligned); 1.1607 + __ shrptr(count, LogBytesPerInt); // size => int_count 1.1608 + __ movl(count_arg, count); // update 'count' 1.1609 + __ jump(RuntimeAddress(int_copy_entry)); 1.1610 + 1.1611 + __ BIND(L_long_aligned); 1.1612 + __ shrptr(count, LogBytesPerLong); // size => qword_count 1.1613 + __ movl(count_arg, count); // update 'count' 1.1614 + __ pop(rdi); // Do pops here since jlong_arraycopy stub does not do it. 1.1615 + __ pop(rsi); 1.1616 + __ jump(RuntimeAddress(long_copy_entry)); 1.1617 + 1.1618 + return start; 1.1619 + } 1.1620 + 1.1621 + 1.1622 + // Perform range checks on the proposed arraycopy. 1.1623 + // Smashes src_pos and dst_pos. (Uses them up for temps.) 1.1624 + void arraycopy_range_checks(Register src, 1.1625 + Register src_pos, 1.1626 + Register dst, 1.1627 + Register dst_pos, 1.1628 + Address& length, 1.1629 + Label& L_failed) { 1.1630 + BLOCK_COMMENT("arraycopy_range_checks:"); 1.1631 + const Register src_end = src_pos; // source array end position 1.1632 + const Register dst_end = dst_pos; // destination array end position 1.1633 + __ addl(src_end, length); // src_pos + length 1.1634 + __ addl(dst_end, length); // dst_pos + length 1.1635 + 1.1636 + // if (src_pos + length > arrayOop(src)->length() ) FAIL; 1.1637 + __ cmpl(src_end, Address(src, arrayOopDesc::length_offset_in_bytes())); 1.1638 + __ jcc(Assembler::above, L_failed); 1.1639 + 1.1640 + // if (dst_pos + length > arrayOop(dst)->length() ) FAIL; 1.1641 + __ cmpl(dst_end, Address(dst, arrayOopDesc::length_offset_in_bytes())); 1.1642 + __ jcc(Assembler::above, L_failed); 1.1643 + 1.1644 + BLOCK_COMMENT("arraycopy_range_checks done"); 1.1645 + } 1.1646 + 1.1647 + 1.1648 + // 1.1649 + // Generate generic array copy stubs 1.1650 + // 1.1651 + // Input: 1.1652 + // 4(rsp) - src oop 1.1653 + // 8(rsp) - src_pos 1.1654 + // 12(rsp) - dst oop 1.1655 + // 16(rsp) - dst_pos 1.1656 + // 20(rsp) - element count 1.1657 + // 1.1658 + // Output: 1.1659 + // rax, == 0 - success 1.1660 + // rax, == -1^K - failure, where K is partial transfer count 1.1661 + // 1.1662 + address generate_generic_copy(const char *name, 1.1663 + address entry_jbyte_arraycopy, 1.1664 + address entry_jshort_arraycopy, 1.1665 + address entry_jint_arraycopy, 1.1666 + address entry_oop_arraycopy, 1.1667 + address entry_jlong_arraycopy, 1.1668 + address entry_checkcast_arraycopy) { 1.1669 + Label L_failed, L_failed_0, L_objArray; 1.1670 + 1.1671 + { int modulus = CodeEntryAlignment; 1.1672 + int target = modulus - 5; // 5 = sizeof jmp(L_failed) 1.1673 + int advance = target - (__ offset() % modulus); 1.1674 + if (advance < 0) advance += modulus; 1.1675 + if (advance > 0) __ nop(advance); 1.1676 + } 1.1677 + StubCodeMark mark(this, "StubRoutines", name); 1.1678 + 1.1679 + // Short-hop target to L_failed. Makes for denser prologue code. 1.1680 + __ BIND(L_failed_0); 1.1681 + __ jmp(L_failed); 1.1682 + assert(__ offset() % CodeEntryAlignment == 0, "no further alignment needed"); 1.1683 + 1.1684 + __ align(CodeEntryAlignment); 1.1685 + address start = __ pc(); 1.1686 + 1.1687 + __ enter(); // required for proper stackwalking of RuntimeStub frame 1.1688 + __ push(rsi); 1.1689 + __ push(rdi); 1.1690 + 1.1691 + // bump this on entry, not on exit: 1.1692 + inc_counter_np(SharedRuntime::_generic_array_copy_ctr); 1.1693 + 1.1694 + // Input values 1.1695 + Address SRC (rsp, 12+ 4); 1.1696 + Address SRC_POS (rsp, 12+ 8); 1.1697 + Address DST (rsp, 12+12); 1.1698 + Address DST_POS (rsp, 12+16); 1.1699 + Address LENGTH (rsp, 12+20); 1.1700 + 1.1701 + //----------------------------------------------------------------------- 1.1702 + // Assembler stub will be used for this call to arraycopy 1.1703 + // if the following conditions are met: 1.1704 + // 1.1705 + // (1) src and dst must not be null. 1.1706 + // (2) src_pos must not be negative. 1.1707 + // (3) dst_pos must not be negative. 1.1708 + // (4) length must not be negative. 1.1709 + // (5) src klass and dst klass should be the same and not NULL. 1.1710 + // (6) src and dst should be arrays. 1.1711 + // (7) src_pos + length must not exceed length of src. 1.1712 + // (8) dst_pos + length must not exceed length of dst. 1.1713 + // 1.1714 + 1.1715 + const Register src = rax; // source array oop 1.1716 + const Register src_pos = rsi; 1.1717 + const Register dst = rdx; // destination array oop 1.1718 + const Register dst_pos = rdi; 1.1719 + const Register length = rcx; // transfer count 1.1720 + 1.1721 + // if (src == NULL) return -1; 1.1722 + __ movptr(src, SRC); // src oop 1.1723 + __ testptr(src, src); 1.1724 + __ jccb(Assembler::zero, L_failed_0); 1.1725 + 1.1726 + // if (src_pos < 0) return -1; 1.1727 + __ movl2ptr(src_pos, SRC_POS); // src_pos 1.1728 + __ testl(src_pos, src_pos); 1.1729 + __ jccb(Assembler::negative, L_failed_0); 1.1730 + 1.1731 + // if (dst == NULL) return -1; 1.1732 + __ movptr(dst, DST); // dst oop 1.1733 + __ testptr(dst, dst); 1.1734 + __ jccb(Assembler::zero, L_failed_0); 1.1735 + 1.1736 + // if (dst_pos < 0) return -1; 1.1737 + __ movl2ptr(dst_pos, DST_POS); // dst_pos 1.1738 + __ testl(dst_pos, dst_pos); 1.1739 + __ jccb(Assembler::negative, L_failed_0); 1.1740 + 1.1741 + // if (length < 0) return -1; 1.1742 + __ movl2ptr(length, LENGTH); // length 1.1743 + __ testl(length, length); 1.1744 + __ jccb(Assembler::negative, L_failed_0); 1.1745 + 1.1746 + // if (src->klass() == NULL) return -1; 1.1747 + Address src_klass_addr(src, oopDesc::klass_offset_in_bytes()); 1.1748 + Address dst_klass_addr(dst, oopDesc::klass_offset_in_bytes()); 1.1749 + const Register rcx_src_klass = rcx; // array klass 1.1750 + __ movptr(rcx_src_klass, Address(src, oopDesc::klass_offset_in_bytes())); 1.1751 + 1.1752 +#ifdef ASSERT 1.1753 + // assert(src->klass() != NULL); 1.1754 + BLOCK_COMMENT("assert klasses not null"); 1.1755 + { Label L1, L2; 1.1756 + __ testptr(rcx_src_klass, rcx_src_klass); 1.1757 + __ jccb(Assembler::notZero, L2); // it is broken if klass is NULL 1.1758 + __ bind(L1); 1.1759 + __ stop("broken null klass"); 1.1760 + __ bind(L2); 1.1761 + __ cmpptr(dst_klass_addr, (int32_t)NULL_WORD); 1.1762 + __ jccb(Assembler::equal, L1); // this would be broken also 1.1763 + BLOCK_COMMENT("assert done"); 1.1764 + } 1.1765 +#endif //ASSERT 1.1766 + 1.1767 + // Load layout helper (32-bits) 1.1768 + // 1.1769 + // |array_tag| | header_size | element_type | |log2_element_size| 1.1770 + // 32 30 24 16 8 2 0 1.1771 + // 1.1772 + // array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0 1.1773 + // 1.1774 + 1.1775 + int lh_offset = in_bytes(Klass::layout_helper_offset()); 1.1776 + Address src_klass_lh_addr(rcx_src_klass, lh_offset); 1.1777 + 1.1778 + // Handle objArrays completely differently... 1.1779 + jint objArray_lh = Klass::array_layout_helper(T_OBJECT); 1.1780 + __ cmpl(src_klass_lh_addr, objArray_lh); 1.1781 + __ jcc(Assembler::equal, L_objArray); 1.1782 + 1.1783 + // if (src->klass() != dst->klass()) return -1; 1.1784 + __ cmpptr(rcx_src_klass, dst_klass_addr); 1.1785 + __ jccb(Assembler::notEqual, L_failed_0); 1.1786 + 1.1787 + const Register rcx_lh = rcx; // layout helper 1.1788 + assert(rcx_lh == rcx_src_klass, "known alias"); 1.1789 + __ movl(rcx_lh, src_klass_lh_addr); 1.1790 + 1.1791 + // if (!src->is_Array()) return -1; 1.1792 + __ cmpl(rcx_lh, Klass::_lh_neutral_value); 1.1793 + __ jcc(Assembler::greaterEqual, L_failed_0); // signed cmp 1.1794 + 1.1795 + // At this point, it is known to be a typeArray (array_tag 0x3). 1.1796 +#ifdef ASSERT 1.1797 + { Label L; 1.1798 + __ cmpl(rcx_lh, (Klass::_lh_array_tag_type_value << Klass::_lh_array_tag_shift)); 1.1799 + __ jcc(Assembler::greaterEqual, L); // signed cmp 1.1800 + __ stop("must be a primitive array"); 1.1801 + __ bind(L); 1.1802 + } 1.1803 +#endif 1.1804 + 1.1805 + assert_different_registers(src, src_pos, dst, dst_pos, rcx_lh); 1.1806 + arraycopy_range_checks(src, src_pos, dst, dst_pos, LENGTH, L_failed); 1.1807 + 1.1808 + // TypeArrayKlass 1.1809 + // 1.1810 + // src_addr = (src + array_header_in_bytes()) + (src_pos << log2elemsize); 1.1811 + // dst_addr = (dst + array_header_in_bytes()) + (dst_pos << log2elemsize); 1.1812 + // 1.1813 + const Register rsi_offset = rsi; // array offset 1.1814 + const Register src_array = src; // src array offset 1.1815 + const Register dst_array = dst; // dst array offset 1.1816 + const Register rdi_elsize = rdi; // log2 element size 1.1817 + 1.1818 + __ mov(rsi_offset, rcx_lh); 1.1819 + __ shrptr(rsi_offset, Klass::_lh_header_size_shift); 1.1820 + __ andptr(rsi_offset, Klass::_lh_header_size_mask); // array_offset 1.1821 + __ addptr(src_array, rsi_offset); // src array offset 1.1822 + __ addptr(dst_array, rsi_offset); // dst array offset 1.1823 + __ andptr(rcx_lh, Klass::_lh_log2_element_size_mask); // log2 elsize 1.1824 + 1.1825 + // next registers should be set before the jump to corresponding stub 1.1826 + const Register from = src; // source array address 1.1827 + const Register to = dst; // destination array address 1.1828 + const Register count = rcx; // elements count 1.1829 + // some of them should be duplicated on stack 1.1830 +#define FROM Address(rsp, 12+ 4) 1.1831 +#define TO Address(rsp, 12+ 8) // Not used now 1.1832 +#define COUNT Address(rsp, 12+12) // Only for oop arraycopy 1.1833 + 1.1834 + BLOCK_COMMENT("scale indexes to element size"); 1.1835 + __ movl2ptr(rsi, SRC_POS); // src_pos 1.1836 + __ shlptr(rsi); // src_pos << rcx (log2 elsize) 1.1837 + assert(src_array == from, ""); 1.1838 + __ addptr(from, rsi); // from = src_array + SRC_POS << log2 elsize 1.1839 + __ movl2ptr(rdi, DST_POS); // dst_pos 1.1840 + __ shlptr(rdi); // dst_pos << rcx (log2 elsize) 1.1841 + assert(dst_array == to, ""); 1.1842 + __ addptr(to, rdi); // to = dst_array + DST_POS << log2 elsize 1.1843 + __ movptr(FROM, from); // src_addr 1.1844 + __ mov(rdi_elsize, rcx_lh); // log2 elsize 1.1845 + __ movl2ptr(count, LENGTH); // elements count 1.1846 + 1.1847 + BLOCK_COMMENT("choose copy loop based on element size"); 1.1848 + __ cmpl(rdi_elsize, 0); 1.1849 + 1.1850 + __ jump_cc(Assembler::equal, RuntimeAddress(entry_jbyte_arraycopy)); 1.1851 + __ cmpl(rdi_elsize, LogBytesPerShort); 1.1852 + __ jump_cc(Assembler::equal, RuntimeAddress(entry_jshort_arraycopy)); 1.1853 + __ cmpl(rdi_elsize, LogBytesPerInt); 1.1854 + __ jump_cc(Assembler::equal, RuntimeAddress(entry_jint_arraycopy)); 1.1855 +#ifdef ASSERT 1.1856 + __ cmpl(rdi_elsize, LogBytesPerLong); 1.1857 + __ jccb(Assembler::notEqual, L_failed); 1.1858 +#endif 1.1859 + __ pop(rdi); // Do pops here since jlong_arraycopy stub does not do it. 1.1860 + __ pop(rsi); 1.1861 + __ jump(RuntimeAddress(entry_jlong_arraycopy)); 1.1862 + 1.1863 + __ BIND(L_failed); 1.1864 + __ xorptr(rax, rax); 1.1865 + __ notptr(rax); // return -1 1.1866 + __ pop(rdi); 1.1867 + __ pop(rsi); 1.1868 + __ leave(); // required for proper stackwalking of RuntimeStub frame 1.1869 + __ ret(0); 1.1870 + 1.1871 + // ObjArrayKlass 1.1872 + __ BIND(L_objArray); 1.1873 + // live at this point: rcx_src_klass, src[_pos], dst[_pos] 1.1874 + 1.1875 + Label L_plain_copy, L_checkcast_copy; 1.1876 + // test array classes for subtyping 1.1877 + __ cmpptr(rcx_src_klass, dst_klass_addr); // usual case is exact equality 1.1878 + __ jccb(Assembler::notEqual, L_checkcast_copy); 1.1879 + 1.1880 + // Identically typed arrays can be copied without element-wise checks. 1.1881 + assert_different_registers(src, src_pos, dst, dst_pos, rcx_src_klass); 1.1882 + arraycopy_range_checks(src, src_pos, dst, dst_pos, LENGTH, L_failed); 1.1883 + 1.1884 + __ BIND(L_plain_copy); 1.1885 + __ movl2ptr(count, LENGTH); // elements count 1.1886 + __ movl2ptr(src_pos, SRC_POS); // reload src_pos 1.1887 + __ lea(from, Address(src, src_pos, Address::times_ptr, 1.1888 + arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // src_addr 1.1889 + __ movl2ptr(dst_pos, DST_POS); // reload dst_pos 1.1890 + __ lea(to, Address(dst, dst_pos, Address::times_ptr, 1.1891 + arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // dst_addr 1.1892 + __ movptr(FROM, from); // src_addr 1.1893 + __ movptr(TO, to); // dst_addr 1.1894 + __ movl(COUNT, count); // count 1.1895 + __ jump(RuntimeAddress(entry_oop_arraycopy)); 1.1896 + 1.1897 + __ BIND(L_checkcast_copy); 1.1898 + // live at this point: rcx_src_klass, dst[_pos], src[_pos] 1.1899 + { 1.1900 + // Handy offsets: 1.1901 + int ek_offset = in_bytes(ObjArrayKlass::element_klass_offset()); 1.1902 + int sco_offset = in_bytes(Klass::super_check_offset_offset()); 1.1903 + 1.1904 + Register rsi_dst_klass = rsi; 1.1905 + Register rdi_temp = rdi; 1.1906 + assert(rsi_dst_klass == src_pos, "expected alias w/ src_pos"); 1.1907 + assert(rdi_temp == dst_pos, "expected alias w/ dst_pos"); 1.1908 + Address dst_klass_lh_addr(rsi_dst_klass, lh_offset); 1.1909 + 1.1910 + // Before looking at dst.length, make sure dst is also an objArray. 1.1911 + __ movptr(rsi_dst_klass, dst_klass_addr); 1.1912 + __ cmpl(dst_klass_lh_addr, objArray_lh); 1.1913 + __ jccb(Assembler::notEqual, L_failed); 1.1914 + 1.1915 + // It is safe to examine both src.length and dst.length. 1.1916 + __ movl2ptr(src_pos, SRC_POS); // reload rsi 1.1917 + arraycopy_range_checks(src, src_pos, dst, dst_pos, LENGTH, L_failed); 1.1918 + // (Now src_pos and dst_pos are killed, but not src and dst.) 1.1919 + 1.1920 + // We'll need this temp (don't forget to pop it after the type check). 1.1921 + __ push(rbx); 1.1922 + Register rbx_src_klass = rbx; 1.1923 + 1.1924 + __ mov(rbx_src_klass, rcx_src_klass); // spill away from rcx 1.1925 + __ movptr(rsi_dst_klass, dst_klass_addr); 1.1926 + Address super_check_offset_addr(rsi_dst_klass, sco_offset); 1.1927 + Label L_fail_array_check; 1.1928 + generate_type_check(rbx_src_klass, 1.1929 + super_check_offset_addr, dst_klass_addr, 1.1930 + rdi_temp, NULL, &L_fail_array_check); 1.1931 + // (On fall-through, we have passed the array type check.) 1.1932 + __ pop(rbx); 1.1933 + __ jmp(L_plain_copy); 1.1934 + 1.1935 + __ BIND(L_fail_array_check); 1.1936 + // Reshuffle arguments so we can call checkcast_arraycopy: 1.1937 + 1.1938 + // match initial saves for checkcast_arraycopy 1.1939 + // push(rsi); // already done; see above 1.1940 + // push(rdi); // already done; see above 1.1941 + // push(rbx); // already done; see above 1.1942 + 1.1943 + // Marshal outgoing arguments now, freeing registers. 1.1944 + Address from_arg(rsp, 16+ 4); // from 1.1945 + Address to_arg(rsp, 16+ 8); // to 1.1946 + Address length_arg(rsp, 16+12); // elements count 1.1947 + Address ckoff_arg(rsp, 16+16); // super_check_offset 1.1948 + Address ckval_arg(rsp, 16+20); // super_klass 1.1949 + 1.1950 + Address SRC_POS_arg(rsp, 16+ 8); 1.1951 + Address DST_POS_arg(rsp, 16+16); 1.1952 + Address LENGTH_arg(rsp, 16+20); 1.1953 + // push rbx, changed the incoming offsets (why not just use rbp,??) 1.1954 + // assert(SRC_POS_arg.disp() == SRC_POS.disp() + 4, ""); 1.1955 + 1.1956 + __ movptr(rbx, Address(rsi_dst_klass, ek_offset)); 1.1957 + __ movl2ptr(length, LENGTH_arg); // reload elements count 1.1958 + __ movl2ptr(src_pos, SRC_POS_arg); // reload src_pos 1.1959 + __ movl2ptr(dst_pos, DST_POS_arg); // reload dst_pos 1.1960 + 1.1961 + __ movptr(ckval_arg, rbx); // destination element type 1.1962 + __ movl(rbx, Address(rbx, sco_offset)); 1.1963 + __ movl(ckoff_arg, rbx); // corresponding class check offset 1.1964 + 1.1965 + __ movl(length_arg, length); // outgoing length argument 1.1966 + 1.1967 + __ lea(from, Address(src, src_pos, Address::times_ptr, 1.1968 + arrayOopDesc::base_offset_in_bytes(T_OBJECT))); 1.1969 + __ movptr(from_arg, from); 1.1970 + 1.1971 + __ lea(to, Address(dst, dst_pos, Address::times_ptr, 1.1972 + arrayOopDesc::base_offset_in_bytes(T_OBJECT))); 1.1973 + __ movptr(to_arg, to); 1.1974 + __ jump(RuntimeAddress(entry_checkcast_arraycopy)); 1.1975 + } 1.1976 + 1.1977 + return start; 1.1978 + } 1.1979 + 1.1980 + void generate_arraycopy_stubs() { 1.1981 + address entry; 1.1982 + address entry_jbyte_arraycopy; 1.1983 + address entry_jshort_arraycopy; 1.1984 + address entry_jint_arraycopy; 1.1985 + address entry_oop_arraycopy; 1.1986 + address entry_jlong_arraycopy; 1.1987 + address entry_checkcast_arraycopy; 1.1988 + 1.1989 + StubRoutines::_arrayof_jbyte_disjoint_arraycopy = 1.1990 + generate_disjoint_copy(T_BYTE, true, Address::times_1, &entry, 1.1991 + "arrayof_jbyte_disjoint_arraycopy"); 1.1992 + StubRoutines::_arrayof_jbyte_arraycopy = 1.1993 + generate_conjoint_copy(T_BYTE, true, Address::times_1, entry, 1.1994 + NULL, "arrayof_jbyte_arraycopy"); 1.1995 + StubRoutines::_jbyte_disjoint_arraycopy = 1.1996 + generate_disjoint_copy(T_BYTE, false, Address::times_1, &entry, 1.1997 + "jbyte_disjoint_arraycopy"); 1.1998 + StubRoutines::_jbyte_arraycopy = 1.1999 + generate_conjoint_copy(T_BYTE, false, Address::times_1, entry, 1.2000 + &entry_jbyte_arraycopy, "jbyte_arraycopy"); 1.2001 + 1.2002 + StubRoutines::_arrayof_jshort_disjoint_arraycopy = 1.2003 + generate_disjoint_copy(T_SHORT, true, Address::times_2, &entry, 1.2004 + "arrayof_jshort_disjoint_arraycopy"); 1.2005 + StubRoutines::_arrayof_jshort_arraycopy = 1.2006 + generate_conjoint_copy(T_SHORT, true, Address::times_2, entry, 1.2007 + NULL, "arrayof_jshort_arraycopy"); 1.2008 + StubRoutines::_jshort_disjoint_arraycopy = 1.2009 + generate_disjoint_copy(T_SHORT, false, Address::times_2, &entry, 1.2010 + "jshort_disjoint_arraycopy"); 1.2011 + StubRoutines::_jshort_arraycopy = 1.2012 + generate_conjoint_copy(T_SHORT, false, Address::times_2, entry, 1.2013 + &entry_jshort_arraycopy, "jshort_arraycopy"); 1.2014 + 1.2015 + // Next arrays are always aligned on 4 bytes at least. 1.2016 + StubRoutines::_jint_disjoint_arraycopy = 1.2017 + generate_disjoint_copy(T_INT, true, Address::times_4, &entry, 1.2018 + "jint_disjoint_arraycopy"); 1.2019 + StubRoutines::_jint_arraycopy = 1.2020 + generate_conjoint_copy(T_INT, true, Address::times_4, entry, 1.2021 + &entry_jint_arraycopy, "jint_arraycopy"); 1.2022 + 1.2023 + StubRoutines::_oop_disjoint_arraycopy = 1.2024 + generate_disjoint_copy(T_OBJECT, true, Address::times_ptr, &entry, 1.2025 + "oop_disjoint_arraycopy"); 1.2026 + StubRoutines::_oop_arraycopy = 1.2027 + generate_conjoint_copy(T_OBJECT, true, Address::times_ptr, entry, 1.2028 + &entry_oop_arraycopy, "oop_arraycopy"); 1.2029 + 1.2030 + StubRoutines::_oop_disjoint_arraycopy_uninit = 1.2031 + generate_disjoint_copy(T_OBJECT, true, Address::times_ptr, &entry, 1.2032 + "oop_disjoint_arraycopy_uninit", 1.2033 + /*dest_uninitialized*/true); 1.2034 + StubRoutines::_oop_arraycopy_uninit = 1.2035 + generate_conjoint_copy(T_OBJECT, true, Address::times_ptr, entry, 1.2036 + NULL, "oop_arraycopy_uninit", 1.2037 + /*dest_uninitialized*/true); 1.2038 + 1.2039 + StubRoutines::_jlong_disjoint_arraycopy = 1.2040 + generate_disjoint_long_copy(&entry, "jlong_disjoint_arraycopy"); 1.2041 + StubRoutines::_jlong_arraycopy = 1.2042 + generate_conjoint_long_copy(entry, &entry_jlong_arraycopy, 1.2043 + "jlong_arraycopy"); 1.2044 + 1.2045 + StubRoutines::_jbyte_fill = generate_fill(T_BYTE, false, "jbyte_fill"); 1.2046 + StubRoutines::_jshort_fill = generate_fill(T_SHORT, false, "jshort_fill"); 1.2047 + StubRoutines::_jint_fill = generate_fill(T_INT, false, "jint_fill"); 1.2048 + StubRoutines::_arrayof_jbyte_fill = generate_fill(T_BYTE, true, "arrayof_jbyte_fill"); 1.2049 + StubRoutines::_arrayof_jshort_fill = generate_fill(T_SHORT, true, "arrayof_jshort_fill"); 1.2050 + StubRoutines::_arrayof_jint_fill = generate_fill(T_INT, true, "arrayof_jint_fill"); 1.2051 + 1.2052 + StubRoutines::_arrayof_jint_disjoint_arraycopy = StubRoutines::_jint_disjoint_arraycopy; 1.2053 + StubRoutines::_arrayof_oop_disjoint_arraycopy = StubRoutines::_oop_disjoint_arraycopy; 1.2054 + StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit = StubRoutines::_oop_disjoint_arraycopy_uninit; 1.2055 + StubRoutines::_arrayof_jlong_disjoint_arraycopy = StubRoutines::_jlong_disjoint_arraycopy; 1.2056 + 1.2057 + StubRoutines::_arrayof_jint_arraycopy = StubRoutines::_jint_arraycopy; 1.2058 + StubRoutines::_arrayof_oop_arraycopy = StubRoutines::_oop_arraycopy; 1.2059 + StubRoutines::_arrayof_oop_arraycopy_uninit = StubRoutines::_oop_arraycopy_uninit; 1.2060 + StubRoutines::_arrayof_jlong_arraycopy = StubRoutines::_jlong_arraycopy; 1.2061 + 1.2062 + StubRoutines::_checkcast_arraycopy = 1.2063 + generate_checkcast_copy("checkcast_arraycopy", &entry_checkcast_arraycopy); 1.2064 + StubRoutines::_checkcast_arraycopy_uninit = 1.2065 + generate_checkcast_copy("checkcast_arraycopy_uninit", NULL, /*dest_uninitialized*/true); 1.2066 + 1.2067 + StubRoutines::_unsafe_arraycopy = 1.2068 + generate_unsafe_copy("unsafe_arraycopy", 1.2069 + entry_jbyte_arraycopy, 1.2070 + entry_jshort_arraycopy, 1.2071 + entry_jint_arraycopy, 1.2072 + entry_jlong_arraycopy); 1.2073 + 1.2074 + StubRoutines::_generic_arraycopy = 1.2075 + generate_generic_copy("generic_arraycopy", 1.2076 + entry_jbyte_arraycopy, 1.2077 + entry_jshort_arraycopy, 1.2078 + entry_jint_arraycopy, 1.2079 + entry_oop_arraycopy, 1.2080 + entry_jlong_arraycopy, 1.2081 + entry_checkcast_arraycopy); 1.2082 + } 1.2083 + 1.2084 + void generate_math_stubs() { 1.2085 + { 1.2086 + StubCodeMark mark(this, "StubRoutines", "log"); 1.2087 + StubRoutines::_intrinsic_log = (double (*)(double)) __ pc(); 1.2088 + 1.2089 + __ fld_d(Address(rsp, 4)); 1.2090 + __ flog(); 1.2091 + __ ret(0); 1.2092 + } 1.2093 + { 1.2094 + StubCodeMark mark(this, "StubRoutines", "log10"); 1.2095 + StubRoutines::_intrinsic_log10 = (double (*)(double)) __ pc(); 1.2096 + 1.2097 + __ fld_d(Address(rsp, 4)); 1.2098 + __ flog10(); 1.2099 + __ ret(0); 1.2100 + } 1.2101 + { 1.2102 + StubCodeMark mark(this, "StubRoutines", "sin"); 1.2103 + StubRoutines::_intrinsic_sin = (double (*)(double)) __ pc(); 1.2104 + 1.2105 + __ fld_d(Address(rsp, 4)); 1.2106 + __ trigfunc('s'); 1.2107 + __ ret(0); 1.2108 + } 1.2109 + { 1.2110 + StubCodeMark mark(this, "StubRoutines", "cos"); 1.2111 + StubRoutines::_intrinsic_cos = (double (*)(double)) __ pc(); 1.2112 + 1.2113 + __ fld_d(Address(rsp, 4)); 1.2114 + __ trigfunc('c'); 1.2115 + __ ret(0); 1.2116 + } 1.2117 + { 1.2118 + StubCodeMark mark(this, "StubRoutines", "tan"); 1.2119 + StubRoutines::_intrinsic_tan = (double (*)(double)) __ pc(); 1.2120 + 1.2121 + __ fld_d(Address(rsp, 4)); 1.2122 + __ trigfunc('t'); 1.2123 + __ ret(0); 1.2124 + } 1.2125 + { 1.2126 + StubCodeMark mark(this, "StubRoutines", "exp"); 1.2127 + StubRoutines::_intrinsic_exp = (double (*)(double)) __ pc(); 1.2128 + 1.2129 + __ fld_d(Address(rsp, 4)); 1.2130 + __ exp_with_fallback(0); 1.2131 + __ ret(0); 1.2132 + } 1.2133 + { 1.2134 + StubCodeMark mark(this, "StubRoutines", "pow"); 1.2135 + StubRoutines::_intrinsic_pow = (double (*)(double,double)) __ pc(); 1.2136 + 1.2137 + __ fld_d(Address(rsp, 12)); 1.2138 + __ fld_d(Address(rsp, 4)); 1.2139 + __ pow_with_fallback(0); 1.2140 + __ ret(0); 1.2141 + } 1.2142 + } 1.2143 + 1.2144 + // AES intrinsic stubs 1.2145 + enum {AESBlockSize = 16}; 1.2146 + 1.2147 + address generate_key_shuffle_mask() { 1.2148 + __ align(16); 1.2149 + StubCodeMark mark(this, "StubRoutines", "key_shuffle_mask"); 1.2150 + address start = __ pc(); 1.2151 + __ emit_data(0x00010203, relocInfo::none, 0 ); 1.2152 + __ emit_data(0x04050607, relocInfo::none, 0 ); 1.2153 + __ emit_data(0x08090a0b, relocInfo::none, 0 ); 1.2154 + __ emit_data(0x0c0d0e0f, relocInfo::none, 0 ); 1.2155 + return start; 1.2156 + } 1.2157 + 1.2158 + // Utility routine for loading a 128-bit key word in little endian format 1.2159 + // can optionally specify that the shuffle mask is already in an xmmregister 1.2160 + void load_key(XMMRegister xmmdst, Register key, int offset, XMMRegister xmm_shuf_mask=NULL) { 1.2161 + __ movdqu(xmmdst, Address(key, offset)); 1.2162 + if (xmm_shuf_mask != NULL) { 1.2163 + __ pshufb(xmmdst, xmm_shuf_mask); 1.2164 + } else { 1.2165 + __ pshufb(xmmdst, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 1.2166 + } 1.2167 + } 1.2168 + 1.2169 + // aesenc using specified key+offset 1.2170 + // can optionally specify that the shuffle mask is already in an xmmregister 1.2171 + void aes_enc_key(XMMRegister xmmdst, XMMRegister xmmtmp, Register key, int offset, XMMRegister xmm_shuf_mask=NULL) { 1.2172 + load_key(xmmtmp, key, offset, xmm_shuf_mask); 1.2173 + __ aesenc(xmmdst, xmmtmp); 1.2174 + } 1.2175 + 1.2176 + // aesdec using specified key+offset 1.2177 + // can optionally specify that the shuffle mask is already in an xmmregister 1.2178 + void aes_dec_key(XMMRegister xmmdst, XMMRegister xmmtmp, Register key, int offset, XMMRegister xmm_shuf_mask=NULL) { 1.2179 + load_key(xmmtmp, key, offset, xmm_shuf_mask); 1.2180 + __ aesdec(xmmdst, xmmtmp); 1.2181 + } 1.2182 + 1.2183 + 1.2184 + // Arguments: 1.2185 + // 1.2186 + // Inputs: 1.2187 + // c_rarg0 - source byte array address 1.2188 + // c_rarg1 - destination byte array address 1.2189 + // c_rarg2 - K (key) in little endian int array 1.2190 + // 1.2191 + address generate_aescrypt_encryptBlock() { 1.2192 + assert(UseAES, "need AES instructions and misaligned SSE support"); 1.2193 + __ align(CodeEntryAlignment); 1.2194 + StubCodeMark mark(this, "StubRoutines", "aescrypt_encryptBlock"); 1.2195 + Label L_doLast; 1.2196 + address start = __ pc(); 1.2197 + 1.2198 + const Register from = rdx; // source array address 1.2199 + const Register to = rdx; // destination array address 1.2200 + const Register key = rcx; // key array address 1.2201 + const Register keylen = rax; 1.2202 + const Address from_param(rbp, 8+0); 1.2203 + const Address to_param (rbp, 8+4); 1.2204 + const Address key_param (rbp, 8+8); 1.2205 + 1.2206 + const XMMRegister xmm_result = xmm0; 1.2207 + const XMMRegister xmm_key_shuf_mask = xmm1; 1.2208 + const XMMRegister xmm_temp1 = xmm2; 1.2209 + const XMMRegister xmm_temp2 = xmm3; 1.2210 + const XMMRegister xmm_temp3 = xmm4; 1.2211 + const XMMRegister xmm_temp4 = xmm5; 1.2212 + 1.2213 + __ enter(); // required for proper stackwalking of RuntimeStub frame 1.2214 + __ movptr(from, from_param); 1.2215 + __ movptr(key, key_param); 1.2216 + 1.2217 + // keylen could be only {11, 13, 15} * 4 = {44, 52, 60} 1.2218 + __ movl(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 1.2219 + 1.2220 + __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 1.2221 + __ movdqu(xmm_result, Address(from, 0)); // get 16 bytes of input 1.2222 + __ movptr(to, to_param); 1.2223 + 1.2224 + // For encryption, the java expanded key ordering is just what we need 1.2225 + 1.2226 + load_key(xmm_temp1, key, 0x00, xmm_key_shuf_mask); 1.2227 + __ pxor(xmm_result, xmm_temp1); 1.2228 + 1.2229 + load_key(xmm_temp1, key, 0x10, xmm_key_shuf_mask); 1.2230 + load_key(xmm_temp2, key, 0x20, xmm_key_shuf_mask); 1.2231 + load_key(xmm_temp3, key, 0x30, xmm_key_shuf_mask); 1.2232 + load_key(xmm_temp4, key, 0x40, xmm_key_shuf_mask); 1.2233 + 1.2234 + __ aesenc(xmm_result, xmm_temp1); 1.2235 + __ aesenc(xmm_result, xmm_temp2); 1.2236 + __ aesenc(xmm_result, xmm_temp3); 1.2237 + __ aesenc(xmm_result, xmm_temp4); 1.2238 + 1.2239 + load_key(xmm_temp1, key, 0x50, xmm_key_shuf_mask); 1.2240 + load_key(xmm_temp2, key, 0x60, xmm_key_shuf_mask); 1.2241 + load_key(xmm_temp3, key, 0x70, xmm_key_shuf_mask); 1.2242 + load_key(xmm_temp4, key, 0x80, xmm_key_shuf_mask); 1.2243 + 1.2244 + __ aesenc(xmm_result, xmm_temp1); 1.2245 + __ aesenc(xmm_result, xmm_temp2); 1.2246 + __ aesenc(xmm_result, xmm_temp3); 1.2247 + __ aesenc(xmm_result, xmm_temp4); 1.2248 + 1.2249 + load_key(xmm_temp1, key, 0x90, xmm_key_shuf_mask); 1.2250 + load_key(xmm_temp2, key, 0xa0, xmm_key_shuf_mask); 1.2251 + 1.2252 + __ cmpl(keylen, 44); 1.2253 + __ jccb(Assembler::equal, L_doLast); 1.2254 + 1.2255 + __ aesenc(xmm_result, xmm_temp1); 1.2256 + __ aesenc(xmm_result, xmm_temp2); 1.2257 + 1.2258 + load_key(xmm_temp1, key, 0xb0, xmm_key_shuf_mask); 1.2259 + load_key(xmm_temp2, key, 0xc0, xmm_key_shuf_mask); 1.2260 + 1.2261 + __ cmpl(keylen, 52); 1.2262 + __ jccb(Assembler::equal, L_doLast); 1.2263 + 1.2264 + __ aesenc(xmm_result, xmm_temp1); 1.2265 + __ aesenc(xmm_result, xmm_temp2); 1.2266 + 1.2267 + load_key(xmm_temp1, key, 0xd0, xmm_key_shuf_mask); 1.2268 + load_key(xmm_temp2, key, 0xe0, xmm_key_shuf_mask); 1.2269 + 1.2270 + __ BIND(L_doLast); 1.2271 + __ aesenc(xmm_result, xmm_temp1); 1.2272 + __ aesenclast(xmm_result, xmm_temp2); 1.2273 + __ movdqu(Address(to, 0), xmm_result); // store the result 1.2274 + __ xorptr(rax, rax); // return 0 1.2275 + __ leave(); // required for proper stackwalking of RuntimeStub frame 1.2276 + __ ret(0); 1.2277 + 1.2278 + return start; 1.2279 + } 1.2280 + 1.2281 + 1.2282 + // Arguments: 1.2283 + // 1.2284 + // Inputs: 1.2285 + // c_rarg0 - source byte array address 1.2286 + // c_rarg1 - destination byte array address 1.2287 + // c_rarg2 - K (key) in little endian int array 1.2288 + // 1.2289 + address generate_aescrypt_decryptBlock() { 1.2290 + assert(UseAES, "need AES instructions and misaligned SSE support"); 1.2291 + __ align(CodeEntryAlignment); 1.2292 + StubCodeMark mark(this, "StubRoutines", "aescrypt_decryptBlock"); 1.2293 + Label L_doLast; 1.2294 + address start = __ pc(); 1.2295 + 1.2296 + const Register from = rdx; // source array address 1.2297 + const Register to = rdx; // destination array address 1.2298 + const Register key = rcx; // key array address 1.2299 + const Register keylen = rax; 1.2300 + const Address from_param(rbp, 8+0); 1.2301 + const Address to_param (rbp, 8+4); 1.2302 + const Address key_param (rbp, 8+8); 1.2303 + 1.2304 + const XMMRegister xmm_result = xmm0; 1.2305 + const XMMRegister xmm_key_shuf_mask = xmm1; 1.2306 + const XMMRegister xmm_temp1 = xmm2; 1.2307 + const XMMRegister xmm_temp2 = xmm3; 1.2308 + const XMMRegister xmm_temp3 = xmm4; 1.2309 + const XMMRegister xmm_temp4 = xmm5; 1.2310 + 1.2311 + __ enter(); // required for proper stackwalking of RuntimeStub frame 1.2312 + __ movptr(from, from_param); 1.2313 + __ movptr(key, key_param); 1.2314 + 1.2315 + // keylen could be only {11, 13, 15} * 4 = {44, 52, 60} 1.2316 + __ movl(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 1.2317 + 1.2318 + __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 1.2319 + __ movdqu(xmm_result, Address(from, 0)); 1.2320 + __ movptr(to, to_param); 1.2321 + 1.2322 + // for decryption java expanded key ordering is rotated one position from what we want 1.2323 + // so we start from 0x10 here and hit 0x00 last 1.2324 + // we don't know if the key is aligned, hence not using load-execute form 1.2325 + load_key(xmm_temp1, key, 0x10, xmm_key_shuf_mask); 1.2326 + load_key(xmm_temp2, key, 0x20, xmm_key_shuf_mask); 1.2327 + load_key(xmm_temp3, key, 0x30, xmm_key_shuf_mask); 1.2328 + load_key(xmm_temp4, key, 0x40, xmm_key_shuf_mask); 1.2329 + 1.2330 + __ pxor (xmm_result, xmm_temp1); 1.2331 + __ aesdec(xmm_result, xmm_temp2); 1.2332 + __ aesdec(xmm_result, xmm_temp3); 1.2333 + __ aesdec(xmm_result, xmm_temp4); 1.2334 + 1.2335 + load_key(xmm_temp1, key, 0x50, xmm_key_shuf_mask); 1.2336 + load_key(xmm_temp2, key, 0x60, xmm_key_shuf_mask); 1.2337 + load_key(xmm_temp3, key, 0x70, xmm_key_shuf_mask); 1.2338 + load_key(xmm_temp4, key, 0x80, xmm_key_shuf_mask); 1.2339 + 1.2340 + __ aesdec(xmm_result, xmm_temp1); 1.2341 + __ aesdec(xmm_result, xmm_temp2); 1.2342 + __ aesdec(xmm_result, xmm_temp3); 1.2343 + __ aesdec(xmm_result, xmm_temp4); 1.2344 + 1.2345 + load_key(xmm_temp1, key, 0x90, xmm_key_shuf_mask); 1.2346 + load_key(xmm_temp2, key, 0xa0, xmm_key_shuf_mask); 1.2347 + load_key(xmm_temp3, key, 0x00, xmm_key_shuf_mask); 1.2348 + 1.2349 + __ cmpl(keylen, 44); 1.2350 + __ jccb(Assembler::equal, L_doLast); 1.2351 + 1.2352 + __ aesdec(xmm_result, xmm_temp1); 1.2353 + __ aesdec(xmm_result, xmm_temp2); 1.2354 + 1.2355 + load_key(xmm_temp1, key, 0xb0, xmm_key_shuf_mask); 1.2356 + load_key(xmm_temp2, key, 0xc0, xmm_key_shuf_mask); 1.2357 + 1.2358 + __ cmpl(keylen, 52); 1.2359 + __ jccb(Assembler::equal, L_doLast); 1.2360 + 1.2361 + __ aesdec(xmm_result, xmm_temp1); 1.2362 + __ aesdec(xmm_result, xmm_temp2); 1.2363 + 1.2364 + load_key(xmm_temp1, key, 0xd0, xmm_key_shuf_mask); 1.2365 + load_key(xmm_temp2, key, 0xe0, xmm_key_shuf_mask); 1.2366 + 1.2367 + __ BIND(L_doLast); 1.2368 + __ aesdec(xmm_result, xmm_temp1); 1.2369 + __ aesdec(xmm_result, xmm_temp2); 1.2370 + 1.2371 + // for decryption the aesdeclast operation is always on key+0x00 1.2372 + __ aesdeclast(xmm_result, xmm_temp3); 1.2373 + __ movdqu(Address(to, 0), xmm_result); // store the result 1.2374 + __ xorptr(rax, rax); // return 0 1.2375 + __ leave(); // required for proper stackwalking of RuntimeStub frame 1.2376 + __ ret(0); 1.2377 + 1.2378 + return start; 1.2379 + } 1.2380 + 1.2381 + void handleSOERegisters(bool saving) { 1.2382 + const int saveFrameSizeInBytes = 4 * wordSize; 1.2383 + const Address saved_rbx (rbp, -3 * wordSize); 1.2384 + const Address saved_rsi (rbp, -2 * wordSize); 1.2385 + const Address saved_rdi (rbp, -1 * wordSize); 1.2386 + 1.2387 + if (saving) { 1.2388 + __ subptr(rsp, saveFrameSizeInBytes); 1.2389 + __ movptr(saved_rsi, rsi); 1.2390 + __ movptr(saved_rdi, rdi); 1.2391 + __ movptr(saved_rbx, rbx); 1.2392 + } else { 1.2393 + // restoring 1.2394 + __ movptr(rsi, saved_rsi); 1.2395 + __ movptr(rdi, saved_rdi); 1.2396 + __ movptr(rbx, saved_rbx); 1.2397 + } 1.2398 + } 1.2399 + 1.2400 + // Arguments: 1.2401 + // 1.2402 + // Inputs: 1.2403 + // c_rarg0 - source byte array address 1.2404 + // c_rarg1 - destination byte array address 1.2405 + // c_rarg2 - K (key) in little endian int array 1.2406 + // c_rarg3 - r vector byte array address 1.2407 + // c_rarg4 - input length 1.2408 + // 1.2409 + // Output: 1.2410 + // rax - input length 1.2411 + // 1.2412 + address generate_cipherBlockChaining_encryptAESCrypt() { 1.2413 + assert(UseAES, "need AES instructions and misaligned SSE support"); 1.2414 + __ align(CodeEntryAlignment); 1.2415 + StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_encryptAESCrypt"); 1.2416 + address start = __ pc(); 1.2417 + 1.2418 + Label L_exit, L_key_192_256, L_key_256, L_loopTop_128, L_loopTop_192, L_loopTop_256; 1.2419 + const Register from = rsi; // source array address 1.2420 + const Register to = rdx; // destination array address 1.2421 + const Register key = rcx; // key array address 1.2422 + const Register rvec = rdi; // r byte array initialized from initvector array address 1.2423 + // and left with the results of the last encryption block 1.2424 + const Register len_reg = rbx; // src len (must be multiple of blocksize 16) 1.2425 + const Register pos = rax; 1.2426 + 1.2427 + // xmm register assignments for the loops below 1.2428 + const XMMRegister xmm_result = xmm0; 1.2429 + const XMMRegister xmm_temp = xmm1; 1.2430 + // first 6 keys preloaded into xmm2-xmm7 1.2431 + const int XMM_REG_NUM_KEY_FIRST = 2; 1.2432 + const int XMM_REG_NUM_KEY_LAST = 7; 1.2433 + const XMMRegister xmm_key0 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST); 1.2434 + 1.2435 + __ enter(); // required for proper stackwalking of RuntimeStub frame 1.2436 + handleSOERegisters(true /*saving*/); 1.2437 + 1.2438 + // load registers from incoming parameters 1.2439 + const Address from_param(rbp, 8+0); 1.2440 + const Address to_param (rbp, 8+4); 1.2441 + const Address key_param (rbp, 8+8); 1.2442 + const Address rvec_param (rbp, 8+12); 1.2443 + const Address len_param (rbp, 8+16); 1.2444 + __ movptr(from , from_param); 1.2445 + __ movptr(to , to_param); 1.2446 + __ movptr(key , key_param); 1.2447 + __ movptr(rvec , rvec_param); 1.2448 + __ movptr(len_reg , len_param); 1.2449 + 1.2450 + const XMMRegister xmm_key_shuf_mask = xmm_temp; // used temporarily to swap key bytes up front 1.2451 + __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 1.2452 + // load up xmm regs 2 thru 7 with keys 0-5 1.2453 + for (int rnum = XMM_REG_NUM_KEY_FIRST, offset = 0x00; rnum <= XMM_REG_NUM_KEY_LAST; rnum++) { 1.2454 + load_key(as_XMMRegister(rnum), key, offset, xmm_key_shuf_mask); 1.2455 + offset += 0x10; 1.2456 + } 1.2457 + 1.2458 + __ movdqu(xmm_result, Address(rvec, 0x00)); // initialize xmm_result with r vec 1.2459 + 1.2460 + // now split to different paths depending on the keylen (len in ints of AESCrypt.KLE array (52=192, or 60=256)) 1.2461 + __ movl(rax, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 1.2462 + __ cmpl(rax, 44); 1.2463 + __ jcc(Assembler::notEqual, L_key_192_256); 1.2464 + 1.2465 + // 128 bit code follows here 1.2466 + __ movl(pos, 0); 1.2467 + __ align(OptoLoopAlignment); 1.2468 + __ BIND(L_loopTop_128); 1.2469 + __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of input 1.2470 + __ pxor (xmm_result, xmm_temp); // xor with the current r vector 1.2471 + 1.2472 + __ pxor (xmm_result, xmm_key0); // do the aes rounds 1.2473 + for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_LAST; rnum++) { 1.2474 + __ aesenc(xmm_result, as_XMMRegister(rnum)); 1.2475 + } 1.2476 + for (int key_offset = 0x60; key_offset <= 0x90; key_offset += 0x10) { 1.2477 + aes_enc_key(xmm_result, xmm_temp, key, key_offset); 1.2478 + } 1.2479 + load_key(xmm_temp, key, 0xa0); 1.2480 + __ aesenclast(xmm_result, xmm_temp); 1.2481 + 1.2482 + __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 1.2483 + // no need to store r to memory until we exit 1.2484 + __ addptr(pos, AESBlockSize); 1.2485 + __ subptr(len_reg, AESBlockSize); 1.2486 + __ jcc(Assembler::notEqual, L_loopTop_128); 1.2487 + 1.2488 + __ BIND(L_exit); 1.2489 + __ movdqu(Address(rvec, 0), xmm_result); // final value of r stored in rvec of CipherBlockChaining object 1.2490 + 1.2491 + handleSOERegisters(false /*restoring*/); 1.2492 + __ movptr(rax, len_param); // return length 1.2493 + __ leave(); // required for proper stackwalking of RuntimeStub frame 1.2494 + __ ret(0); 1.2495 + 1.2496 + __ BIND(L_key_192_256); 1.2497 + // here rax = len in ints of AESCrypt.KLE array (52=192, or 60=256) 1.2498 + __ cmpl(rax, 52); 1.2499 + __ jcc(Assembler::notEqual, L_key_256); 1.2500 + 1.2501 + // 192-bit code follows here (could be changed to use more xmm registers) 1.2502 + __ movl(pos, 0); 1.2503 + __ align(OptoLoopAlignment); 1.2504 + __ BIND(L_loopTop_192); 1.2505 + __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of input 1.2506 + __ pxor (xmm_result, xmm_temp); // xor with the current r vector 1.2507 + 1.2508 + __ pxor (xmm_result, xmm_key0); // do the aes rounds 1.2509 + for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_LAST; rnum++) { 1.2510 + __ aesenc(xmm_result, as_XMMRegister(rnum)); 1.2511 + } 1.2512 + for (int key_offset = 0x60; key_offset <= 0xb0; key_offset += 0x10) { 1.2513 + aes_enc_key(xmm_result, xmm_temp, key, key_offset); 1.2514 + } 1.2515 + load_key(xmm_temp, key, 0xc0); 1.2516 + __ aesenclast(xmm_result, xmm_temp); 1.2517 + 1.2518 + __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 1.2519 + // no need to store r to memory until we exit 1.2520 + __ addptr(pos, AESBlockSize); 1.2521 + __ subptr(len_reg, AESBlockSize); 1.2522 + __ jcc(Assembler::notEqual, L_loopTop_192); 1.2523 + __ jmp(L_exit); 1.2524 + 1.2525 + __ BIND(L_key_256); 1.2526 + // 256-bit code follows here (could be changed to use more xmm registers) 1.2527 + __ movl(pos, 0); 1.2528 + __ align(OptoLoopAlignment); 1.2529 + __ BIND(L_loopTop_256); 1.2530 + __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of input 1.2531 + __ pxor (xmm_result, xmm_temp); // xor with the current r vector 1.2532 + 1.2533 + __ pxor (xmm_result, xmm_key0); // do the aes rounds 1.2534 + for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_LAST; rnum++) { 1.2535 + __ aesenc(xmm_result, as_XMMRegister(rnum)); 1.2536 + } 1.2537 + for (int key_offset = 0x60; key_offset <= 0xd0; key_offset += 0x10) { 1.2538 + aes_enc_key(xmm_result, xmm_temp, key, key_offset); 1.2539 + } 1.2540 + load_key(xmm_temp, key, 0xe0); 1.2541 + __ aesenclast(xmm_result, xmm_temp); 1.2542 + 1.2543 + __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 1.2544 + // no need to store r to memory until we exit 1.2545 + __ addptr(pos, AESBlockSize); 1.2546 + __ subptr(len_reg, AESBlockSize); 1.2547 + __ jcc(Assembler::notEqual, L_loopTop_256); 1.2548 + __ jmp(L_exit); 1.2549 + 1.2550 + return start; 1.2551 + } 1.2552 + 1.2553 + 1.2554 + // CBC AES Decryption. 1.2555 + // In 32-bit stub, because of lack of registers we do not try to parallelize 4 blocks at a time. 1.2556 + // 1.2557 + // Arguments: 1.2558 + // 1.2559 + // Inputs: 1.2560 + // c_rarg0 - source byte array address 1.2561 + // c_rarg1 - destination byte array address 1.2562 + // c_rarg2 - K (key) in little endian int array 1.2563 + // c_rarg3 - r vector byte array address 1.2564 + // c_rarg4 - input length 1.2565 + // 1.2566 + // Output: 1.2567 + // rax - input length 1.2568 + // 1.2569 + 1.2570 + address generate_cipherBlockChaining_decryptAESCrypt() { 1.2571 + assert(UseAES, "need AES instructions and misaligned SSE support"); 1.2572 + __ align(CodeEntryAlignment); 1.2573 + StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_decryptAESCrypt"); 1.2574 + address start = __ pc(); 1.2575 + 1.2576 + Label L_exit, L_key_192_256, L_key_256; 1.2577 + Label L_singleBlock_loopTop_128; 1.2578 + Label L_singleBlock_loopTop_192, L_singleBlock_loopTop_256; 1.2579 + const Register from = rsi; // source array address 1.2580 + const Register to = rdx; // destination array address 1.2581 + const Register key = rcx; // key array address 1.2582 + const Register rvec = rdi; // r byte array initialized from initvector array address 1.2583 + // and left with the results of the last encryption block 1.2584 + const Register len_reg = rbx; // src len (must be multiple of blocksize 16) 1.2585 + const Register pos = rax; 1.2586 + 1.2587 + // xmm register assignments for the loops below 1.2588 + const XMMRegister xmm_result = xmm0; 1.2589 + const XMMRegister xmm_temp = xmm1; 1.2590 + // first 6 keys preloaded into xmm2-xmm7 1.2591 + const int XMM_REG_NUM_KEY_FIRST = 2; 1.2592 + const int XMM_REG_NUM_KEY_LAST = 7; 1.2593 + const int FIRST_NON_REG_KEY_offset = 0x70; 1.2594 + const XMMRegister xmm_key_first = as_XMMRegister(XMM_REG_NUM_KEY_FIRST); 1.2595 + 1.2596 + __ enter(); // required for proper stackwalking of RuntimeStub frame 1.2597 + handleSOERegisters(true /*saving*/); 1.2598 + 1.2599 + // load registers from incoming parameters 1.2600 + const Address from_param(rbp, 8+0); 1.2601 + const Address to_param (rbp, 8+4); 1.2602 + const Address key_param (rbp, 8+8); 1.2603 + const Address rvec_param (rbp, 8+12); 1.2604 + const Address len_param (rbp, 8+16); 1.2605 + __ movptr(from , from_param); 1.2606 + __ movptr(to , to_param); 1.2607 + __ movptr(key , key_param); 1.2608 + __ movptr(rvec , rvec_param); 1.2609 + __ movptr(len_reg , len_param); 1.2610 + 1.2611 + // the java expanded key ordering is rotated one position from what we want 1.2612 + // so we start from 0x10 here and hit 0x00 last 1.2613 + const XMMRegister xmm_key_shuf_mask = xmm1; // used temporarily to swap key bytes up front 1.2614 + __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 1.2615 + // load up xmm regs 2 thru 6 with first 5 keys 1.2616 + for (int rnum = XMM_REG_NUM_KEY_FIRST, offset = 0x10; rnum <= XMM_REG_NUM_KEY_LAST; rnum++) { 1.2617 + load_key(as_XMMRegister(rnum), key, offset, xmm_key_shuf_mask); 1.2618 + offset += 0x10; 1.2619 + } 1.2620 + 1.2621 + // inside here, use the rvec register to point to previous block cipher 1.2622 + // with which we xor at the end of each newly decrypted block 1.2623 + const Register prev_block_cipher_ptr = rvec; 1.2624 + 1.2625 + // now split to different paths depending on the keylen (len in ints of AESCrypt.KLE array (52=192, or 60=256)) 1.2626 + __ movl(rax, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 1.2627 + __ cmpl(rax, 44); 1.2628 + __ jcc(Assembler::notEqual, L_key_192_256); 1.2629 + 1.2630 + 1.2631 + // 128-bit code follows here, parallelized 1.2632 + __ movl(pos, 0); 1.2633 + __ align(OptoLoopAlignment); 1.2634 + __ BIND(L_singleBlock_loopTop_128); 1.2635 + __ cmpptr(len_reg, 0); // any blocks left?? 1.2636 + __ jcc(Assembler::equal, L_exit); 1.2637 + __ movdqu(xmm_result, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of cipher input 1.2638 + __ pxor (xmm_result, xmm_key_first); // do the aes dec rounds 1.2639 + for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_LAST; rnum++) { 1.2640 + __ aesdec(xmm_result, as_XMMRegister(rnum)); 1.2641 + } 1.2642 + for (int key_offset = FIRST_NON_REG_KEY_offset; key_offset <= 0xa0; key_offset += 0x10) { // 128-bit runs up to key offset a0 1.2643 + aes_dec_key(xmm_result, xmm_temp, key, key_offset); 1.2644 + } 1.2645 + load_key(xmm_temp, key, 0x00); // final key is stored in java expanded array at offset 0 1.2646 + __ aesdeclast(xmm_result, xmm_temp); 1.2647 + __ movdqu(xmm_temp, Address(prev_block_cipher_ptr, 0x00)); 1.2648 + __ pxor (xmm_result, xmm_temp); // xor with the current r vector 1.2649 + __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 1.2650 + // no need to store r to memory until we exit 1.2651 + __ lea(prev_block_cipher_ptr, Address(from, pos, Address::times_1, 0)); // set up new ptr 1.2652 + __ addptr(pos, AESBlockSize); 1.2653 + __ subptr(len_reg, AESBlockSize); 1.2654 + __ jmp(L_singleBlock_loopTop_128); 1.2655 + 1.2656 + 1.2657 + __ BIND(L_exit); 1.2658 + __ movdqu(xmm_temp, Address(prev_block_cipher_ptr, 0x00)); 1.2659 + __ movptr(rvec , rvec_param); // restore this since used in loop 1.2660 + __ movdqu(Address(rvec, 0), xmm_temp); // final value of r stored in rvec of CipherBlockChaining object 1.2661 + handleSOERegisters(false /*restoring*/); 1.2662 + __ movptr(rax, len_param); // return length 1.2663 + __ leave(); // required for proper stackwalking of RuntimeStub frame 1.2664 + __ ret(0); 1.2665 + 1.2666 + 1.2667 + __ BIND(L_key_192_256); 1.2668 + // here rax = len in ints of AESCrypt.KLE array (52=192, or 60=256) 1.2669 + __ cmpl(rax, 52); 1.2670 + __ jcc(Assembler::notEqual, L_key_256); 1.2671 + 1.2672 + // 192-bit code follows here (could be optimized to use parallelism) 1.2673 + __ movl(pos, 0); 1.2674 + __ align(OptoLoopAlignment); 1.2675 + __ BIND(L_singleBlock_loopTop_192); 1.2676 + __ movdqu(xmm_result, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of cipher input 1.2677 + __ pxor (xmm_result, xmm_key_first); // do the aes dec rounds 1.2678 + for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_LAST; rnum++) { 1.2679 + __ aesdec(xmm_result, as_XMMRegister(rnum)); 1.2680 + } 1.2681 + for (int key_offset = FIRST_NON_REG_KEY_offset; key_offset <= 0xc0; key_offset += 0x10) { // 192-bit runs up to key offset c0 1.2682 + aes_dec_key(xmm_result, xmm_temp, key, key_offset); 1.2683 + } 1.2684 + load_key(xmm_temp, key, 0x00); // final key is stored in java expanded array at offset 0 1.2685 + __ aesdeclast(xmm_result, xmm_temp); 1.2686 + __ movdqu(xmm_temp, Address(prev_block_cipher_ptr, 0x00)); 1.2687 + __ pxor (xmm_result, xmm_temp); // xor with the current r vector 1.2688 + __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 1.2689 + // no need to store r to memory until we exit 1.2690 + __ lea(prev_block_cipher_ptr, Address(from, pos, Address::times_1, 0)); // set up new ptr 1.2691 + __ addptr(pos, AESBlockSize); 1.2692 + __ subptr(len_reg, AESBlockSize); 1.2693 + __ jcc(Assembler::notEqual,L_singleBlock_loopTop_192); 1.2694 + __ jmp(L_exit); 1.2695 + 1.2696 + __ BIND(L_key_256); 1.2697 + // 256-bit code follows here (could be optimized to use parallelism) 1.2698 + __ movl(pos, 0); 1.2699 + __ align(OptoLoopAlignment); 1.2700 + __ BIND(L_singleBlock_loopTop_256); 1.2701 + __ movdqu(xmm_result, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of cipher input 1.2702 + __ pxor (xmm_result, xmm_key_first); // do the aes dec rounds 1.2703 + for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_LAST; rnum++) { 1.2704 + __ aesdec(xmm_result, as_XMMRegister(rnum)); 1.2705 + } 1.2706 + for (int key_offset = FIRST_NON_REG_KEY_offset; key_offset <= 0xe0; key_offset += 0x10) { // 256-bit runs up to key offset e0 1.2707 + aes_dec_key(xmm_result, xmm_temp, key, key_offset); 1.2708 + } 1.2709 + load_key(xmm_temp, key, 0x00); // final key is stored in java expanded array at offset 0 1.2710 + __ aesdeclast(xmm_result, xmm_temp); 1.2711 + __ movdqu(xmm_temp, Address(prev_block_cipher_ptr, 0x00)); 1.2712 + __ pxor (xmm_result, xmm_temp); // xor with the current r vector 1.2713 + __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 1.2714 + // no need to store r to memory until we exit 1.2715 + __ lea(prev_block_cipher_ptr, Address(from, pos, Address::times_1, 0)); // set up new ptr 1.2716 + __ addptr(pos, AESBlockSize); 1.2717 + __ subptr(len_reg, AESBlockSize); 1.2718 + __ jcc(Assembler::notEqual,L_singleBlock_loopTop_256); 1.2719 + __ jmp(L_exit); 1.2720 + 1.2721 + return start; 1.2722 + } 1.2723 + 1.2724 + /** 1.2725 + * Arguments: 1.2726 + * 1.2727 + * Inputs: 1.2728 + * rsp(4) - int crc 1.2729 + * rsp(8) - byte* buf 1.2730 + * rsp(12) - int length 1.2731 + * 1.2732 + * Ouput: 1.2733 + * rax - int crc result 1.2734 + */ 1.2735 + address generate_updateBytesCRC32() { 1.2736 + assert(UseCRC32Intrinsics, "need AVX and CLMUL instructions"); 1.2737 + 1.2738 + __ align(CodeEntryAlignment); 1.2739 + StubCodeMark mark(this, "StubRoutines", "updateBytesCRC32"); 1.2740 + 1.2741 + address start = __ pc(); 1.2742 + 1.2743 + const Register crc = rdx; // crc 1.2744 + const Register buf = rsi; // source java byte array address 1.2745 + const Register len = rcx; // length 1.2746 + const Register table = rdi; // crc_table address (reuse register) 1.2747 + const Register tmp = rbx; 1.2748 + assert_different_registers(crc, buf, len, table, tmp, rax); 1.2749 + 1.2750 + BLOCK_COMMENT("Entry:"); 1.2751 + __ enter(); // required for proper stackwalking of RuntimeStub frame 1.2752 + __ push(rsi); 1.2753 + __ push(rdi); 1.2754 + __ push(rbx); 1.2755 + 1.2756 + Address crc_arg(rbp, 8 + 0); 1.2757 + Address buf_arg(rbp, 8 + 4); 1.2758 + Address len_arg(rbp, 8 + 8); 1.2759 + 1.2760 + // Load up: 1.2761 + __ movl(crc, crc_arg); 1.2762 + __ movptr(buf, buf_arg); 1.2763 + __ movl(len, len_arg); 1.2764 + 1.2765 + __ kernel_crc32(crc, buf, len, table, tmp); 1.2766 + 1.2767 + __ movl(rax, crc); 1.2768 + __ pop(rbx); 1.2769 + __ pop(rdi); 1.2770 + __ pop(rsi); 1.2771 + __ leave(); // required for proper stackwalking of RuntimeStub frame 1.2772 + __ ret(0); 1.2773 + 1.2774 + return start; 1.2775 + } 1.2776 + 1.2777 + // Safefetch stubs. 1.2778 + void generate_safefetch(const char* name, int size, address* entry, 1.2779 + address* fault_pc, address* continuation_pc) { 1.2780 + // safefetch signatures: 1.2781 + // int SafeFetch32(int* adr, int errValue); 1.2782 + // intptr_t SafeFetchN (intptr_t* adr, intptr_t errValue); 1.2783 + 1.2784 + StubCodeMark mark(this, "StubRoutines", name); 1.2785 + 1.2786 + // Entry point, pc or function descriptor. 1.2787 + *entry = __ pc(); 1.2788 + 1.2789 + __ movl(rax, Address(rsp, 0x8)); 1.2790 + __ movl(rcx, Address(rsp, 0x4)); 1.2791 + // Load *adr into eax, may fault. 1.2792 + *fault_pc = __ pc(); 1.2793 + switch (size) { 1.2794 + case 4: 1.2795 + // int32_t 1.2796 + __ movl(rax, Address(rcx, 0)); 1.2797 + break; 1.2798 + case 8: 1.2799 + // int64_t 1.2800 + Unimplemented(); 1.2801 + break; 1.2802 + default: 1.2803 + ShouldNotReachHere(); 1.2804 + } 1.2805 + 1.2806 + // Return errValue or *adr. 1.2807 + *continuation_pc = __ pc(); 1.2808 + __ ret(0); 1.2809 + } 1.2810 + 1.2811 + public: 1.2812 + // Information about frame layout at time of blocking runtime call. 1.2813 + // Note that we only have to preserve callee-saved registers since 1.2814 + // the compilers are responsible for supplying a continuation point 1.2815 + // if they expect all registers to be preserved. 1.2816 + enum layout { 1.2817 + thread_off, // last_java_sp 1.2818 + arg1_off, 1.2819 + arg2_off, 1.2820 + rbp_off, // callee saved register 1.2821 + ret_pc, 1.2822 + framesize 1.2823 + }; 1.2824 + 1.2825 + private: 1.2826 + 1.2827 +#undef __ 1.2828 +#define __ masm-> 1.2829 + 1.2830 + //------------------------------------------------------------------------------------------------------------------------ 1.2831 + // Continuation point for throwing of implicit exceptions that are not handled in 1.2832 + // the current activation. Fabricates an exception oop and initiates normal 1.2833 + // exception dispatching in this frame. 1.2834 + // 1.2835 + // Previously the compiler (c2) allowed for callee save registers on Java calls. 1.2836 + // This is no longer true after adapter frames were removed but could possibly 1.2837 + // be brought back in the future if the interpreter code was reworked and it 1.2838 + // was deemed worthwhile. The comment below was left to describe what must 1.2839 + // happen here if callee saves were resurrected. As it stands now this stub 1.2840 + // could actually be a vanilla BufferBlob and have now oopMap at all. 1.2841 + // Since it doesn't make much difference we've chosen to leave it the 1.2842 + // way it was in the callee save days and keep the comment. 1.2843 + 1.2844 + // If we need to preserve callee-saved values we need a callee-saved oop map and 1.2845 + // therefore have to make these stubs into RuntimeStubs rather than BufferBlobs. 1.2846 + // If the compiler needs all registers to be preserved between the fault 1.2847 + // point and the exception handler then it must assume responsibility for that in 1.2848 + // AbstractCompiler::continuation_for_implicit_null_exception or 1.2849 + // continuation_for_implicit_division_by_zero_exception. All other implicit 1.2850 + // exceptions (e.g., NullPointerException or AbstractMethodError on entry) are 1.2851 + // either at call sites or otherwise assume that stack unwinding will be initiated, 1.2852 + // so caller saved registers were assumed volatile in the compiler. 1.2853 + address generate_throw_exception(const char* name, address runtime_entry, 1.2854 + Register arg1 = noreg, Register arg2 = noreg) { 1.2855 + 1.2856 + int insts_size = 256; 1.2857 + int locs_size = 32; 1.2858 + 1.2859 + CodeBuffer code(name, insts_size, locs_size); 1.2860 + OopMapSet* oop_maps = new OopMapSet(); 1.2861 + MacroAssembler* masm = new MacroAssembler(&code); 1.2862 + 1.2863 + address start = __ pc(); 1.2864 + 1.2865 + // This is an inlined and slightly modified version of call_VM 1.2866 + // which has the ability to fetch the return PC out of 1.2867 + // thread-local storage and also sets up last_Java_sp slightly 1.2868 + // differently than the real call_VM 1.2869 + Register java_thread = rbx; 1.2870 + __ get_thread(java_thread); 1.2871 + 1.2872 + __ enter(); // required for proper stackwalking of RuntimeStub frame 1.2873 + 1.2874 + // pc and rbp, already pushed 1.2875 + __ subptr(rsp, (framesize-2) * wordSize); // prolog 1.2876 + 1.2877 + // Frame is now completed as far as size and linkage. 1.2878 + 1.2879 + int frame_complete = __ pc() - start; 1.2880 + 1.2881 + // push java thread (becomes first argument of C function) 1.2882 + __ movptr(Address(rsp, thread_off * wordSize), java_thread); 1.2883 + if (arg1 != noreg) { 1.2884 + __ movptr(Address(rsp, arg1_off * wordSize), arg1); 1.2885 + } 1.2886 + if (arg2 != noreg) { 1.2887 + assert(arg1 != noreg, "missing reg arg"); 1.2888 + __ movptr(Address(rsp, arg2_off * wordSize), arg2); 1.2889 + } 1.2890 + 1.2891 + // Set up last_Java_sp and last_Java_fp 1.2892 + __ set_last_Java_frame(java_thread, rsp, rbp, NULL); 1.2893 + 1.2894 + // Call runtime 1.2895 + BLOCK_COMMENT("call runtime_entry"); 1.2896 + __ call(RuntimeAddress(runtime_entry)); 1.2897 + // Generate oop map 1.2898 + OopMap* map = new OopMap(framesize, 0); 1.2899 + oop_maps->add_gc_map(__ pc() - start, map); 1.2900 + 1.2901 + // restore the thread (cannot use the pushed argument since arguments 1.2902 + // may be overwritten by C code generated by an optimizing compiler); 1.2903 + // however can use the register value directly if it is callee saved. 1.2904 + __ get_thread(java_thread); 1.2905 + 1.2906 + __ reset_last_Java_frame(java_thread, true, false); 1.2907 + 1.2908 + __ leave(); // required for proper stackwalking of RuntimeStub frame 1.2909 + 1.2910 + // check for pending exceptions 1.2911 +#ifdef ASSERT 1.2912 + Label L; 1.2913 + __ cmpptr(Address(java_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); 1.2914 + __ jcc(Assembler::notEqual, L); 1.2915 + __ should_not_reach_here(); 1.2916 + __ bind(L); 1.2917 +#endif /* ASSERT */ 1.2918 + __ jump(RuntimeAddress(StubRoutines::forward_exception_entry())); 1.2919 + 1.2920 + 1.2921 + RuntimeStub* stub = RuntimeStub::new_runtime_stub(name, &code, frame_complete, framesize, oop_maps, false); 1.2922 + return stub->entry_point(); 1.2923 + } 1.2924 + 1.2925 + 1.2926 + void create_control_words() { 1.2927 + // Round to nearest, 53-bit mode, exceptions masked 1.2928 + StubRoutines::_fpu_cntrl_wrd_std = 0x027F; 1.2929 + // Round to zero, 53-bit mode, exception mased 1.2930 + StubRoutines::_fpu_cntrl_wrd_trunc = 0x0D7F; 1.2931 + // Round to nearest, 24-bit mode, exceptions masked 1.2932 + StubRoutines::_fpu_cntrl_wrd_24 = 0x007F; 1.2933 + // Round to nearest, 64-bit mode, exceptions masked 1.2934 + StubRoutines::_fpu_cntrl_wrd_64 = 0x037F; 1.2935 + // Round to nearest, 64-bit mode, exceptions masked 1.2936 + StubRoutines::_mxcsr_std = 0x1F80; 1.2937 + // Note: the following two constants are 80-bit values 1.2938 + // layout is critical for correct loading by FPU. 1.2939 + // Bias for strict fp multiply/divide 1.2940 + StubRoutines::_fpu_subnormal_bias1[0]= 0x00000000; // 2^(-15360) == 0x03ff 8000 0000 0000 0000 1.2941 + StubRoutines::_fpu_subnormal_bias1[1]= 0x80000000; 1.2942 + StubRoutines::_fpu_subnormal_bias1[2]= 0x03ff; 1.2943 + // Un-Bias for strict fp multiply/divide 1.2944 + StubRoutines::_fpu_subnormal_bias2[0]= 0x00000000; // 2^(+15360) == 0x7bff 8000 0000 0000 0000 1.2945 + StubRoutines::_fpu_subnormal_bias2[1]= 0x80000000; 1.2946 + StubRoutines::_fpu_subnormal_bias2[2]= 0x7bff; 1.2947 + } 1.2948 + 1.2949 + //--------------------------------------------------------------------------- 1.2950 + // Initialization 1.2951 + 1.2952 + void generate_initial() { 1.2953 + // Generates all stubs and initializes the entry points 1.2954 + 1.2955 + //------------------------------------------------------------------------------------------------------------------------ 1.2956 + // entry points that exist in all platforms 1.2957 + // Note: This is code that could be shared among different platforms - however the benefit seems to be smaller than 1.2958 + // the disadvantage of having a much more complicated generator structure. See also comment in stubRoutines.hpp. 1.2959 + StubRoutines::_forward_exception_entry = generate_forward_exception(); 1.2960 + 1.2961 + StubRoutines::_call_stub_entry = 1.2962 + generate_call_stub(StubRoutines::_call_stub_return_address); 1.2963 + // is referenced by megamorphic call 1.2964 + StubRoutines::_catch_exception_entry = generate_catch_exception(); 1.2965 + 1.2966 + // These are currently used by Solaris/Intel 1.2967 + StubRoutines::_atomic_xchg_entry = generate_atomic_xchg(); 1.2968 + 1.2969 + StubRoutines::_handler_for_unsafe_access_entry = 1.2970 + generate_handler_for_unsafe_access(); 1.2971 + 1.2972 + // platform dependent 1.2973 + create_control_words(); 1.2974 + 1.2975 + StubRoutines::x86::_verify_mxcsr_entry = generate_verify_mxcsr(); 1.2976 + StubRoutines::x86::_verify_fpu_cntrl_wrd_entry = generate_verify_fpu_cntrl_wrd(); 1.2977 + StubRoutines::_d2i_wrapper = generate_d2i_wrapper(T_INT, 1.2978 + CAST_FROM_FN_PTR(address, SharedRuntime::d2i)); 1.2979 + StubRoutines::_d2l_wrapper = generate_d2i_wrapper(T_LONG, 1.2980 + CAST_FROM_FN_PTR(address, SharedRuntime::d2l)); 1.2981 + 1.2982 + // Build this early so it's available for the interpreter 1.2983 + StubRoutines::_throw_StackOverflowError_entry = generate_throw_exception("StackOverflowError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_StackOverflowError)); 1.2984 + 1.2985 + if (UseCRC32Intrinsics) { 1.2986 + // set table address before stub generation which use it 1.2987 + StubRoutines::_crc_table_adr = (address)StubRoutines::x86::_crc_table; 1.2988 + StubRoutines::_updateBytesCRC32 = generate_updateBytesCRC32(); 1.2989 + } 1.2990 + } 1.2991 + 1.2992 + 1.2993 + void generate_all() { 1.2994 + // Generates all stubs and initializes the entry points 1.2995 + 1.2996 + // These entry points require SharedInfo::stack0 to be set up in non-core builds 1.2997 + // and need to be relocatable, so they each fabricate a RuntimeStub internally. 1.2998 + StubRoutines::_throw_AbstractMethodError_entry = generate_throw_exception("AbstractMethodError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_AbstractMethodError)); 1.2999 + StubRoutines::_throw_IncompatibleClassChangeError_entry= generate_throw_exception("IncompatibleClassChangeError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_IncompatibleClassChangeError)); 1.3000 + StubRoutines::_throw_NullPointerException_at_call_entry= generate_throw_exception("NullPointerException at call throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException_at_call)); 1.3001 + 1.3002 + //------------------------------------------------------------------------------------------------------------------------ 1.3003 + // entry points that are platform specific 1.3004 + 1.3005 + // support for verify_oop (must happen after universe_init) 1.3006 + StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop(); 1.3007 + 1.3008 + // arraycopy stubs used by compilers 1.3009 + generate_arraycopy_stubs(); 1.3010 + 1.3011 + generate_math_stubs(); 1.3012 + 1.3013 + // don't bother generating these AES intrinsic stubs unless global flag is set 1.3014 + if (UseAESIntrinsics) { 1.3015 + StubRoutines::x86::_key_shuffle_mask_addr = generate_key_shuffle_mask(); // might be needed by the others 1.3016 + 1.3017 + StubRoutines::_aescrypt_encryptBlock = generate_aescrypt_encryptBlock(); 1.3018 + StubRoutines::_aescrypt_decryptBlock = generate_aescrypt_decryptBlock(); 1.3019 + StubRoutines::_cipherBlockChaining_encryptAESCrypt = generate_cipherBlockChaining_encryptAESCrypt(); 1.3020 + StubRoutines::_cipherBlockChaining_decryptAESCrypt = generate_cipherBlockChaining_decryptAESCrypt(); 1.3021 + } 1.3022 + 1.3023 + // Safefetch stubs. 1.3024 + generate_safefetch("SafeFetch32", sizeof(int), &StubRoutines::_safefetch32_entry, 1.3025 + &StubRoutines::_safefetch32_fault_pc, 1.3026 + &StubRoutines::_safefetch32_continuation_pc); 1.3027 + StubRoutines::_safefetchN_entry = StubRoutines::_safefetch32_entry; 1.3028 + StubRoutines::_safefetchN_fault_pc = StubRoutines::_safefetch32_fault_pc; 1.3029 + StubRoutines::_safefetchN_continuation_pc = StubRoutines::_safefetch32_continuation_pc; 1.3030 + } 1.3031 + 1.3032 + 1.3033 + public: 1.3034 + StubGenerator(CodeBuffer* code, bool all) : StubCodeGenerator(code) { 1.3035 + if (all) { 1.3036 + generate_all(); 1.3037 + } else { 1.3038 + generate_initial(); 1.3039 + } 1.3040 + } 1.3041 +}; // end class declaration 1.3042 + 1.3043 + 1.3044 +void StubGenerator_generate(CodeBuffer* code, bool all) { 1.3045 + StubGenerator g(code, all); 1.3046 +}